From d31b5c91a27b768ee221fe677eb0b18b4cfb9df8 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Tue, 3 Apr 2018 15:32:38 +0300 Subject: MAINTAINERS: Add drm/xen-front maintainer entry Add myself as drivers/gpu/drm/xen maintainer. Signed-off-by: Oleksandr Andrushchenko Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180403123238.19294-1-andr2000@gmail.com --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 004d2c14ee4b..4af7f6119530 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4841,6 +4841,15 @@ S: Maintained F: drivers/gpu/drm/tinydrm/ F: include/drm/tinydrm/ +DRM DRIVERS FOR XEN +M: Oleksandr Andrushchenko +T: git git://anongit.freedesktop.org/drm/drm-misc +L: dri-devel@lists.freedesktop.org +L: xen-devel@lists.xen.org +S: Supported +F: drivers/gpu/drm/xen/ +F: Documentation/gpu/xen-front.rst + DRM TTM SUBSYSTEM M: Christian Koenig M: Roger He -- cgit v1.2.3 From cf05fb8b144dae55d094b0fa7991e985a9b4561e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:32:59 -0400 Subject: drm/amd: Update GFXv9 SDMA MQD structure This matches what the HWS firmware expects on GFXv9 chips. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- MAINTAINERS | 1 + drivers/gpu/drm/amd/include/v9_structs.h | 48 ++++++++++++++++---------------- 2 files changed, 25 insertions(+), 24 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 92be777d060a..dc929dc9ce9b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -772,6 +772,7 @@ F: drivers/gpu/drm/amd/amdkfd/ F: drivers/gpu/drm/amd/include/cik_structs.h F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h F: drivers/gpu/drm/amd/include/vi_structs.h +F: drivers/gpu/drm/amd/include/v9_structs.h F: include/uapi/linux/kfd_ioctl.h AMD SEATTLE DEVICE TREE SUPPORT diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index 2fb25abaf7c8..ceaf4932258d 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -29,10 +29,10 @@ struct v9_sdma_mqd { uint32_t sdmax_rlcx_rb_base; uint32_t sdmax_rlcx_rb_base_hi; uint32_t sdmax_rlcx_rb_rptr; + uint32_t sdmax_rlcx_rb_rptr_hi; uint32_t sdmax_rlcx_rb_wptr; + uint32_t sdmax_rlcx_rb_wptr_hi; uint32_t sdmax_rlcx_rb_wptr_poll_cntl; - uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi; - uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo; uint32_t sdmax_rlcx_rb_rptr_addr_hi; uint32_t sdmax_rlcx_rb_rptr_addr_lo; uint32_t sdmax_rlcx_ib_cntl; @@ -44,29 +44,29 @@ struct v9_sdma_mqd { uint32_t sdmax_rlcx_skip_cntl; uint32_t sdmax_rlcx_context_status; uint32_t sdmax_rlcx_doorbell; - uint32_t sdmax_rlcx_virtual_addr; - uint32_t sdmax_rlcx_ape1_cntl; + uint32_t sdmax_rlcx_status; uint32_t sdmax_rlcx_doorbell_log; - uint32_t reserved_22; - uint32_t reserved_23; - uint32_t reserved_24; - uint32_t reserved_25; - uint32_t reserved_26; - uint32_t reserved_27; - uint32_t reserved_28; - uint32_t reserved_29; - uint32_t reserved_30; - uint32_t reserved_31; - uint32_t reserved_32; - uint32_t reserved_33; - uint32_t reserved_34; - uint32_t reserved_35; - uint32_t reserved_36; - uint32_t reserved_37; - uint32_t reserved_38; - uint32_t reserved_39; - uint32_t reserved_40; - uint32_t reserved_41; + uint32_t sdmax_rlcx_watermark; + uint32_t sdmax_rlcx_doorbell_offset; + uint32_t sdmax_rlcx_csa_addr_lo; + uint32_t sdmax_rlcx_csa_addr_hi; + uint32_t sdmax_rlcx_ib_sub_remain; + uint32_t sdmax_rlcx_preempt; + uint32_t sdmax_rlcx_dummy_reg; + uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi; + uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo; + uint32_t sdmax_rlcx_rb_aql_cntl; + uint32_t sdmax_rlcx_minor_ptr_update; + uint32_t sdmax_rlcx_midcmd_data0; + uint32_t sdmax_rlcx_midcmd_data1; + uint32_t sdmax_rlcx_midcmd_data2; + uint32_t sdmax_rlcx_midcmd_data3; + uint32_t sdmax_rlcx_midcmd_data4; + uint32_t sdmax_rlcx_midcmd_data5; + uint32_t sdmax_rlcx_midcmd_data6; + uint32_t sdmax_rlcx_midcmd_data7; + uint32_t sdmax_rlcx_midcmd_data8; + uint32_t sdmax_rlcx_midcmd_cntl; uint32_t reserved_42; uint32_t reserved_43; uint32_t reserved_44; -- cgit v1.2.3 From d5a114a6c5f7fa41da338e0134fccf3f25723fbd Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:01 -0400 Subject: drm/amdgpu: Add GFXv9 kfd2kgd interface functions Signed-off-by: John Bridgman Signed-off-by: Shaoyun Liu Signed-off-by: Jay Cornwall Signed-off-by: Yong Zhao Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- MAINTAINERS | 1 + drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1043 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 6 files changed, 1052 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dc929dc9ce9b..051f2fee58f9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -766,6 +766,7 @@ F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c F: drivers/gpu/drm/amd/amdkfd/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2ca2b5154d52..f3002020df6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -130,7 +130,8 @@ amdgpu-y += \ amdgpu_amdkfd.o \ amdgpu_amdkfd_fence.o \ amdgpu_amdkfd_gpuvm.o \ - amdgpu_amdkfd_gfx_v8.o + amdgpu_amdkfd_gfx_v8.o \ + amdgpu_amdkfd_gfx_v9.o # add cgs amdgpu-y += amdgpu_cgs.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4d36203ffb11..fcd10dbd121c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -92,6 +92,10 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) case CHIP_POLARIS11: kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; + case CHIP_VEGA10: + case CHIP_RAVEN: + kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); + break; default: dev_dbg(adev->dev, "kfd not supported on this ASIC\n"); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index c3024b143f3d..12367a9951e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -122,6 +122,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c new file mode 100644 index 000000000000..8f37991df61b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -0,0 +1,1043 @@ +/* + * Copyright 2014-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#define pr_fmt(fmt) "kfd2kgd: " fmt + +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_ucode.h" +#include "soc15_hw_ip.h" +#include "gc/gc_9_0_offset.h" +#include "gc/gc_9_0_sh_mask.h" +#include "vega10_enum.h" +#include "sdma0/sdma0_4_0_offset.h" +#include "sdma0/sdma0_4_0_sh_mask.h" +#include "sdma1/sdma1_4_0_offset.h" +#include "sdma1/sdma1_4_0_sh_mask.h" +#include "athub/athub_1_0_offset.h" +#include "athub/athub_1_0_sh_mask.h" +#include "oss/osssys_4_0_offset.h" +#include "oss/osssys_4_0_sh_mask.h" +#include "soc15_common.h" +#include "v9_structs.h" +#include "soc15.h" +#include "soc15d.h" + +/* HACK: MMHUB and GC both have VM-related register with the same + * names but different offsets. Define the MMHUB register we need here + * with a prefix. A proper solution would be to move the functions + * programming these registers into gfx_v9_0.c and mmhub_v1_0.c + * respectively. + */ +#define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3 +#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0 + +#define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705 +#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0 + +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 + +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 + +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 + +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727 +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0 +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728 +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0 + +#define V9_PIPE_PER_MEC (4) +#define V9_QUEUES_PER_PIPE_MEC (8) + +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES +}; + +/* + * Register access functions + */ + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases); +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid); +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); +static int kgd_hqd_dump(struct kgd_dev *kgd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs); +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm); +static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs); +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id); +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, + unsigned int utimeout, uint32_t pipe_id, + uint32_t queue_id); +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout); +static int kgd_address_watch_disable(struct kgd_dev *kgd); +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo); +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd); +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset); + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid); +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid); +static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, + uint32_t page_table_base); +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); +static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); +static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); + +/* Because of REG_GET_FIELD() being used, we put this function in the + * asic specific file. + */ +static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + return 0; +} + +static const struct kfd2kgd_calls kfd2kgd = { + .init_gtt_mem_allocation = alloc_gtt_mem, + .free_gtt_mem = free_gtt_mem, + .get_local_mem_info = get_local_mem_info, + .get_gpu_clock_counter = get_gpu_clock_counter, + .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = amdgpu_pasid_alloc, + .free_pasid = amdgpu_pasid_free, + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_pasid = + get_atc_vmid_pasid_mapping_pasid, + .get_atc_vmid_pasid_mapping_valid = + get_atc_vmid_pasid_mapping_valid, + .get_fw_version = get_fw_version, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = amdgpu_amdkfd_get_tile_config, + .get_cu_info = get_cu_info, + .get_vram_usage = amdgpu_amdkfd_get_vram_usage, + .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, + .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, + .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, + .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, + .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, + .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, + .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, + .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, + .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, + .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .submit_ib = amdgpu_amdkfd_submit_ib, +}; + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) +{ + return (struct kfd2kgd_calls *)&kfd2kgd; +} + +static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) +{ + return (struct amdgpu_device *)kgd; +} + +static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, mec, pipe, queue, vmid); +} + +static void unlock_srbm(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + lock_srbm(kgd, mec, pipe, queue_id, 0); +} + +static uint32_t get_queue_mask(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id) +{ + unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id) & 31; + + return ((uint32_t)1) << bit; +} + +static void release_queue(struct kgd_dev *kgd) +{ + unlock_srbm(kgd); +} + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + /* APE1 no longer exists on GFX9 */ + + unlock_srbm(kgd); +} + +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + /* + * We have to assume that there is no outstanding mapping. + * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because + * a mapping is in progress or because a mapping finished + * and the SW cleared it. + * So the protocol is to always wait & clear. + */ + uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | + ATC_VMID0_PASID_MAPPING__VALID_MASK; + + /* + * need to do this twice, once for gfx and once for mmhub + * for ATC add 16 to VMID for mmhub, for IH different registers. + * ATC_VMID0..15 registers are separate from ATC_VMID16..31. + */ + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, + pasid_mapping); + + while (!(RREG32(SOC15_REG_OFFSET( + ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & + (1U << vmid))) + cpu_relax(); + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS), + 1U << vmid); + + /* Mapping vmid to pasid also for IH block */ + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, + pasid_mapping); + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid, + pasid_mapping); + + while (!(RREG32(SOC15_REG_OFFSET( + ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & + (1U << (vmid + 16)))) + cpu_relax(); + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS), + 1U << (vmid + 16)); + + /* Mapping vmid to pasid also for IH block */ + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid, + pasid_mapping); + return 0; +} + +/* TODO - RING0 form of field is obsolete, seems to date back to SI + * but still works + */ + +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t mec; + uint32_t pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + lock_srbm(kgd, mec, pipe, 0, 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), + CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); + + unlock_srbm(kgd); + + return 0; +} + +static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t base[2] = { + SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL + }; + uint32_t retval; + + retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - + mmSDMA0_RLC0_RB_CNTL); + + pr_debug("sdma base address: 0x%x\n", retval); + + return retval; +} + +static inline struct v9_mqd *get_mqd(void *mqd) +{ + return (struct v9_mqd *)mqd; +} + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, hqd_base, data; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + /* HIQ is set during driver init period with vmid set to 0*/ + if (m->cp_hqd_vmid == 0) { + uint32_t value, mec, pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); + value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, + ((mec << 5) | (pipe << 3) | queue_id | 0x80)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); + } + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + + for (reg = hqd_base; + reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + WREG32(reg, mqd_hqd[reg - hqd_base]); + + + /* Activate doorbell logic before triggering WPTR poll. */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + + if (wptr) { + /* Don't read wptr with get_user because the user + * context may not be accessible (if this function + * runs in a work queue). Instead trigger a one-shot + * polling read from memory in the CP. This assumes + * that wptr is GPU-accessible in the queue's VMID via + * ATC or SVM. WPTR==RPTR before starting the poll so + * the CP starts fetching new commands from the right + * place. + * + * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit + * tricky. Assume that the queue didn't overflow. The + * number of valid bits in the 32-bit RPTR depends on + * the queue size. The remaining bits are taken from + * the saved 64-bit WPTR. If the WPTR wrapped, add the + * queue size. + */ + uint32_t queue_size = + 2 << REG_GET_FIELD(m->cp_hqd_pq_control, + CP_HQD_PQ_CONTROL, QUEUE_SIZE); + uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); + + if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) + guessed_wptr += queue_size; + guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); + guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; + + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + lower_32_bits(guessed_wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + upper_32_bits(guessed_wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + lower_32_bits((uint64_t)wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + upper_32_bits((uint64_t)wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), + get_queue_mask(adev, pipe_id, queue_id)); + } + + /* Start the EOP fetcher */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + REG_SET_FIELD(m->cp_hqd_eop_rptr, + CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + + release_queue(kgd); + + return 0; +} + +static int kgd_hqd_dump(struct kgd_dev *kgd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t i = 0, reg; +#define HQD_N_REGS 56 +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + + *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + acquire_queue(kgd, pipe_id, queue_id); + + for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + DUMP_REG(reg); + + release_queue(kgd); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + sdmax_gfx_context_cntl = m->sdma_engine_id ? + SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : + SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + data = RREG32(sdmax_gfx_context_cntl); + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(sdmax_gfx_context_cntl, data); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + + return 0; +} + +static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+10) + + *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; + reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; + reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_base_addr + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t act; + bool retval = false; + uint32_t low, high; + + acquire_queue(kgd, pipe_id, queue_id); + act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + if (act) { + low = lower_32_bits(queue_address >> 8); + high = upper_32_bits(queue_address >> 8); + + if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && + high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) + retval = true; + } + release_queue(kgd); + return retval; +} + +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, + unsigned int utimeout, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + enum hqd_dequeue_request_type type; + unsigned long end_jiffies; + uint32_t temp; + struct v9_mqd *m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + if (m->cp_hqd_vmid == 0) + WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); + + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; + while (true) { + temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue preemption time out.\n"); + release_queue(kgd); + return -ETIME; + } + usleep_range(500, 1000); + } + + release_queue(kgd); + return 0; +} + +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + + return 0; +} + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; +} + +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; +} + +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + uint32_t req = (1 << vmid) | + (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */ + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK; + + mutex_lock(&adev->srbm_mutex); + + /* Use legacy mode tlb invalidation. + * + * Currently on Raven the code below is broken for anything but + * legacy mode due to a MMHUB power gating problem. A workaround + * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ + * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack + * bit. + * + * TODO 1: agree on the right set of invalidation registers for + * KFD use. Use the last one for now. Invalidate both GC and + * MMHUB. + * + * TODO 2: support range-based invalidation, requires kfg2kgd + * interface change + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32), + 0xffffffff); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32), + 0x0000001f); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, + mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32), + 0xffffffff); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, + mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32), + 0x0000001f); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ), + req); + + while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) & + (1 << vmid))) + cpu_relax(); + + while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0, + mmMMHUB_VM_INVALIDATE_ENG16_ACK)) & + (1 << vmid))) + cpu_relax(); + + mutex_unlock(&adev->srbm_mutex); + +} + +static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) +{ + signed long r; + uint32_t seq; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + + spin_lock(&adev->gfx.kiq.ring_lock); + amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(1) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(0)); /* legacy */ + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + DRM_ERROR("wait for kiq fence error: %ld.\n", r); + return -ETIME; + } + + return 0; +} + +static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + int vmid; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + + if (ring->ready) + return invalidate_tlbs_with_kiq(adev, pasid); + + for (vmid = 0; vmid < 16; vmid++) { + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) + continue; + if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { + if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) + == pasid) { + write_vmid_invalidate_request(kgd, vmid); + break; + } + } + } + + return 0; +} + +static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { + pr_err("non kfd vmid %d\n", vmid); + return 0; + } + + write_vmid_invalidate_request(kgd, vmid); + return 0; +} + +static int kgd_address_watch_disable(struct kgd_dev *kgd) +{ + return 0; +} + +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo) +{ + return 0; +} + +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); + + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SH_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SE_BROADCAST_WRITES, 1); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset) +{ + return 0; +} + +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid) +{ + /* No longer needed on GFXv9. The scratch base address is + * passed to the shader by the CP. It's the user mode driver's + * responsibility. + */ +} + +/* FIXME: Does this need to be ASIC-specific code? */ +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + const union amdgpu_firmware_header *hdr; + + switch (type) { + case KGD_ENGINE_PFP: + hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data; + break; + + case KGD_ENGINE_ME: + hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data; + break; + + case KGD_ENGINE_CE: + hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data; + break; + + case KGD_ENGINE_MEC1: + hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data; + break; + + case KGD_ENGINE_MEC2: + hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data; + break; + + case KGD_ENGINE_RLC: + hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data; + break; + + case KGD_ENGINE_SDMA1: + hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data; + break; + + case KGD_ENGINE_SDMA2: + hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data; + break; + + default: + return 0; + } + + if (hdr == NULL) + return 0; + + /* Only 12 bit in use*/ + return hdr->common.ucode_version; +} + +static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, + uint32_t page_table_base) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT | + AMDGPU_PTE_VALID; + + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { + pr_err("trying to set page table base for wrong VMID %u\n", + vmid); + return; + } + + /* TODO: take advantage of per-process address space size. For + * now, all processes share the same address space size, like + * on GFX8 and older. + */ + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), + upper_32_bits(adev->vm_manager.max_pfn - 1)); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), + upper_32_bits(adev->vm_manager.max_pfn - 1)); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9d39fd5b1822..e5962e61beb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4686,6 +4686,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; + cu_info->simd_per_cu = NUM_SIMD_PER_CU; return 0; } -- cgit v1.2.3 From 6fd8e4f4907b1f6b61e6b2e89f2f54ef9b8aa3c1 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Sat, 10 Feb 2018 10:56:51 +0100 Subject: MAINTAINERS: auxdisplay: remove obsolete webpages Acked-by: Randy Dunlap Signed-off-by: Miguel Ojeda --- MAINTAINERS | 8 -------- 1 file changed, 8 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 6e950b8b4a41..f030e360f905 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2487,8 +2487,6 @@ F: kernel/audit* AUXILIARY DISPLAY DRIVERS M: Miguel Ojeda Sandonis -W: http://miguelojeda.es/auxdisplay.htm -W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm S: Maintained F: drivers/auxdisplay/ F: include/linux/cfag12864b.h @@ -3376,16 +3374,12 @@ F: include/linux/usb/wusb* CFAG12864B LCD DRIVER M: Miguel Ojeda Sandonis -W: http://miguelojeda.es/auxdisplay.htm -W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm S: Maintained F: drivers/auxdisplay/cfag12864b.c F: include/linux/cfag12864b.h CFAG12864BFB LCD FRAMEBUFFER DRIVER M: Miguel Ojeda Sandonis -W: http://miguelojeda.es/auxdisplay.htm -W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm S: Maintained F: drivers/auxdisplay/cfag12864bfb.c F: include/linux/cfag12864b.h @@ -7873,8 +7867,6 @@ F: kernel/kprobes.c KS0108 LCD CONTROLLER DRIVER M: Miguel Ojeda Sandonis -W: http://miguelojeda.es/auxdisplay.htm -W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm S: Maintained F: Documentation/auxdisplay/ks0108 F: drivers/auxdisplay/ks0108.c -- cgit v1.2.3 From 0357e488b825313db3d574137337557f404e59ed Mon Sep 17 00:00:00 2001 From: Stefan Popa Date: Wed, 11 Apr 2018 14:53:17 +0300 Subject: iio:dac:ad5686: Refactor the driver In this patch restructures the existing ad5686 driver by adding a module for SPI and a header file, while the baseline module deals with the chip-logic. This is a necessary step, as this driver should support in the future similar devices which differ only in the type of interface used (I2C instead of SPI). Signed-off-by: Stefan Popa Signed-off-by: Jonathan Cameron --- MAINTAINERS | 7 ++ drivers/iio/dac/Kconfig | 13 ++- drivers/iio/dac/Makefile | 1 + drivers/iio/dac/ad5686-spi.c | 93 +++++++++++++++++++++ drivers/iio/dac/ad5686.c | 191 +++++++------------------------------------ drivers/iio/dac/ad5686.h | 114 ++++++++++++++++++++++++++ 6 files changed, 253 insertions(+), 166 deletions(-) create mode 100644 drivers/iio/dac/ad5686-spi.c create mode 100644 drivers/iio/dac/ad5686.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 473ac00dcfb4..637e62d5f7ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -791,6 +791,13 @@ M: Michael Hanselmann S: Supported F: drivers/macintosh/ams/ +ANALOG DEVICES INC AD5686 DRIVER +M: Stefan Popa +L: linux-pm@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/iio/dac/ad5686* + ANALOG DEVICES INC AD9389B DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 965d5c0d2468..7a81f1e11b22 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -131,16 +131,21 @@ config LTC2632 module will be called ltc2632. config AD5686 - tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver" + tristate + +config AD5686_SPI + tristate "Analog Devices AD5686 and similar multi-channel DACs (SPI)" depends on SPI + select AD5686 help - Say yes here to build support for Analog Devices AD5686R, AD5685R, - AD5684R, AD5791 Voltage Output Digital to - Analog Converter. + Say yes here to build support for Analog Devices AD5672R, AD5676, + AD5676R, AD5684, AD5684R, AD5684R, AD5685R, AD5686, AD5686R. + Voltage Output Digital to Analog Converter. To compile this driver as a module, choose M here: the module will be called ad5686. + config AD5755 tristate "Analog Devices AD5755/AD5755-1/AD5757/AD5735/AD5737 DAC driver" depends on SPI_MASTER diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 81e710ed7491..07db92e19490 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_AD5761) += ad5761.o obj-$(CONFIG_AD5764) += ad5764.o obj-$(CONFIG_AD5791) += ad5791.o obj-$(CONFIG_AD5686) += ad5686.o +obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o obj-$(CONFIG_AD7303) += ad7303.o obj-$(CONFIG_AD8801) += ad8801.o obj-$(CONFIG_CIO_DAC) += cio-dac.o diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c new file mode 100644 index 000000000000..6bb09e9259e6 --- /dev/null +++ b/drivers/iio/dac/ad5686-spi.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * AD5672R, AD5676, AD5676R, AD5684, AD5684R, AD5684R, AD5685R, AD5686, AD5686R + * Digital to analog converters driver + * + * Copyright 2018 Analog Devices Inc. + */ + +#include "ad5686.h" + +#include +#include + +static int ad5686_spi_write(struct ad5686_state *st, + u8 cmd, u8 addr, u16 val) +{ + struct spi_device *spi = to_spi_device(st->dev); + + st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | + AD5686_ADDR(addr) | + val); + + return spi_write(spi, &st->data[0].d8[1], 3); +} + +static int ad5686_spi_read(struct ad5686_state *st, u8 addr) +{ + struct spi_transfer t[] = { + { + .tx_buf = &st->data[0].d8[1], + .len = 3, + .cs_change = 1, + }, { + .tx_buf = &st->data[1].d8[1], + .rx_buf = &st->data[2].d8[1], + .len = 3, + }, + }; + struct spi_device *spi = to_spi_device(st->dev); + int ret; + + st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) | + AD5686_ADDR(addr)); + st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP)); + + ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t)); + if (ret < 0) + return ret; + + return be32_to_cpu(st->data[2].d32); +} + +static int ad5686_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + + return ad5686_probe(&spi->dev, id->driver_data, id->name, + ad5686_spi_write, ad5686_spi_read); +} + +static int ad5686_spi_remove(struct spi_device *spi) +{ + return ad5686_remove(&spi->dev); +} + +static const struct spi_device_id ad5686_spi_id[] = { + {"ad5672r", ID_AD5672R}, + {"ad5676", ID_AD5676}, + {"ad5676r", ID_AD5676R}, + {"ad5684", ID_AD5684}, + {"ad5684r", ID_AD5684R}, + {"ad5685", ID_AD5685R}, /* Does not exist */ + {"ad5685r", ID_AD5685R}, + {"ad5686", ID_AD5686}, + {"ad5686r", ID_AD5686R}, + {} +}; +MODULE_DEVICE_TABLE(spi, ad5686_spi_id); + +static struct spi_driver ad5686_spi_driver = { + .driver = { + .name = "ad5686", + }, + .probe = ad5686_spi_probe, + .remove = ad5686_spi_remove, + .id_table = ad5686_spi_id, +}; + +module_spi_driver(ad5686_spi_driver); + +MODULE_AUTHOR("Stefan Popa "); +MODULE_DESCRIPTION("Analog Devices AD5686 and similar multi-channel DACs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index 9e1a6ba32138..79abff55a702 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -18,121 +17,7 @@ #include #include -#define AD5686_ADDR(x) ((x) << 16) -#define AD5686_CMD(x) ((x) << 20) - -#define AD5686_ADDR_DAC(chan) (0x1 << (chan)) -#define AD5686_ADDR_ALL_DAC 0xF - -#define AD5686_CMD_NOOP 0x0 -#define AD5686_CMD_WRITE_INPUT_N 0x1 -#define AD5686_CMD_UPDATE_DAC_N 0x2 -#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3 -#define AD5686_CMD_POWERDOWN_DAC 0x4 -#define AD5686_CMD_LDAC_MASK 0x5 -#define AD5686_CMD_RESET 0x6 -#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7 -#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8 -#define AD5686_CMD_READBACK_ENABLE 0x9 - -#define AD5686_LDAC_PWRDN_NONE 0x0 -#define AD5686_LDAC_PWRDN_1K 0x1 -#define AD5686_LDAC_PWRDN_100K 0x2 -#define AD5686_LDAC_PWRDN_3STATE 0x3 - -/** - * struct ad5686_chip_info - chip specific information - * @int_vref_mv: AD5620/40/60: the internal reference voltage - * @num_channels: number of channels - * @channel: channel specification -*/ - -struct ad5686_chip_info { - u16 int_vref_mv; - unsigned int num_channels; - struct iio_chan_spec *channels; -}; - -/** - * struct ad5446_state - driver instance specific data - * @spi: spi_device - * @chip_info: chip model specific constants, available modes etc - * @reg: supply regulator - * @vref_mv: actual reference voltage used - * @pwr_down_mask: power down mask - * @pwr_down_mode: current power down mode - * @data: spi transfer buffers - */ - -struct ad5686_state { - struct spi_device *spi; - const struct ad5686_chip_info *chip_info; - struct regulator *reg; - unsigned short vref_mv; - unsigned pwr_down_mask; - unsigned pwr_down_mode; - /* - * DMA (thus cache coherency maintenance) requires the - * transfer buffers to live in their own cache lines. - */ - - union { - __be32 d32; - u8 d8[4]; - } data[3] ____cacheline_aligned; -}; - -/** - * ad5686_supported_device_ids: - */ - -enum ad5686_supported_device_ids { - ID_AD5672R, - ID_AD5676, - ID_AD5676R, - ID_AD5684, - ID_AD5684R, - ID_AD5685R, - ID_AD5686, - ID_AD5686R -}; -static int ad5686_spi_write(struct ad5686_state *st, - u8 cmd, u8 addr, u16 val, u8 shift) -{ - val <<= shift; - - st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | - AD5686_ADDR(addr) | - val); - - return spi_write(st->spi, &st->data[0].d8[1], 3); -} - -static int ad5686_spi_read(struct ad5686_state *st, u8 addr) -{ - struct spi_transfer t[] = { - { - .tx_buf = &st->data[0].d8[1], - .len = 3, - .cs_change = 1, - }, { - .tx_buf = &st->data[1].d8[1], - .rx_buf = &st->data[2].d8[1], - .len = 3, - }, - }; - int ret; - - st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) | - AD5686_ADDR(addr)); - st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP)); - - ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t)); - if (ret < 0) - return ret; - - return be32_to_cpu(st->data[2].d32); -} +#include "ad5686.h" static const char * const ad5686_powerdown_modes[] = { "1kohm_to_gnd", @@ -195,8 +80,9 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev, else st->pwr_down_mask &= ~(0x3 << (chan->channel * 2)); - ret = ad5686_spi_write(st, AD5686_CMD_POWERDOWN_DAC, 0, - st->pwr_down_mask & st->pwr_down_mode, 0); + ret = st->write(st, AD5686_CMD_POWERDOWN_DAC, 0, + st->pwr_down_mask & st->pwr_down_mode); + return ret ? ret : len; } @@ -213,7 +99,7 @@ static int ad5686_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: mutex_lock(&indio_dev->mlock); - ret = ad5686_spi_read(st, chan->address); + ret = st->read(st, chan->address); mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; @@ -242,11 +128,10 @@ static int ad5686_write_raw(struct iio_dev *indio_dev, return -EINVAL; mutex_lock(&indio_dev->mlock); - ret = ad5686_spi_write(st, - AD5686_CMD_WRITE_INPUT_N_UPDATE_N, - chan->address, - val, - chan->scan_type.shift); + ret = st->write(st, + AD5686_CMD_WRITE_INPUT_N_UPDATE_N, + chan->address, + val << chan->scan_type.shift); mutex_unlock(&indio_dev->mlock); break; default: @@ -356,20 +241,27 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = { }, }; -static int ad5686_probe(struct spi_device *spi) +int ad5686_probe(struct device *dev, + enum ad5686_supported_device_ids chip_type, + const char *name, ad5686_write_func write, + ad5686_read_func read) { struct ad5686_state *st; struct iio_dev *indio_dev; int ret, voltage_uv = 0; - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); - spi_set_drvdata(spi, indio_dev); + dev_set_drvdata(dev, indio_dev); + + st->dev = dev; + st->write = write; + st->read = read; - st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); + st->reg = devm_regulator_get_optional(dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) @@ -382,28 +274,25 @@ static int ad5686_probe(struct spi_device *spi) voltage_uv = ret; } - st->chip_info = - &ad5686_chip_info_tbl[spi_get_device_id(spi)->driver_data]; + st->chip_info = &ad5686_chip_info_tbl[chip_type]; if (voltage_uv) st->vref_mv = voltage_uv / 1000; else st->vref_mv = st->chip_info->int_vref_mv; - st->spi = spi; - /* Set all the power down mode for all channels to 1K pulldown */ st->pwr_down_mode = 0x55; - indio_dev->dev.parent = &spi->dev; - indio_dev->name = spi_get_device_id(spi)->name; + indio_dev->dev.parent = dev; + indio_dev->name = name; indio_dev->info = &ad5686_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; - ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0, - !!voltage_uv, 0); + ret = st->write(st, AD5686_CMD_INTERNAL_REFER_SETUP, + 0, !!voltage_uv); if (ret) goto error_disable_reg; @@ -418,10 +307,11 @@ error_disable_reg: regulator_disable(st->reg); return ret; } +EXPORT_SYMBOL_GPL(ad5686_probe); -static int ad5686_remove(struct spi_device *spi) +int ad5686_remove(struct device *dev) { - struct iio_dev *indio_dev = spi_get_drvdata(spi); + struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5686_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); @@ -430,30 +320,7 @@ static int ad5686_remove(struct spi_device *spi) return 0; } - -static const struct spi_device_id ad5686_id[] = { - {"ad5672r", ID_AD5672R}, - {"ad5676", ID_AD5676}, - {"ad5676r", ID_AD5676R}, - {"ad5684", ID_AD5684}, - {"ad5684r", ID_AD5684R}, - {"ad5685", ID_AD5685R}, /* Does not exist */ - {"ad5685r", ID_AD5685R}, - {"ad5686", ID_AD5686}, - {"ad5686r", ID_AD5686R}, - {} -}; -MODULE_DEVICE_TABLE(spi, ad5686_id); - -static struct spi_driver ad5686_driver = { - .driver = { - .name = "ad5686", - }, - .probe = ad5686_probe, - .remove = ad5686_remove, - .id_table = ad5686_id, -}; -module_spi_driver(ad5686_driver); +EXPORT_SYMBOL_GPL(ad5686_remove); MODULE_AUTHOR("Michael Hennerich "); MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC"); diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h new file mode 100644 index 000000000000..c8e1565391ca --- /dev/null +++ b/drivers/iio/dac/ad5686.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * This file is part of AD5686 DAC driver + * + * Copyright 2018 Analog Devices Inc. + */ + +#ifndef __DRIVERS_IIO_DAC_AD5686_H__ +#define __DRIVERS_IIO_DAC_AD5686_H__ + +#include +#include +#include +#include + +#define AD5686_ADDR(x) ((x) << 16) +#define AD5686_CMD(x) ((x) << 20) + +#define AD5686_ADDR_DAC(chan) (0x1 << (chan)) +#define AD5686_ADDR_ALL_DAC 0xF + +#define AD5686_CMD_NOOP 0x0 +#define AD5686_CMD_WRITE_INPUT_N 0x1 +#define AD5686_CMD_UPDATE_DAC_N 0x2 +#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3 +#define AD5686_CMD_POWERDOWN_DAC 0x4 +#define AD5686_CMD_LDAC_MASK 0x5 +#define AD5686_CMD_RESET 0x6 +#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7 +#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8 +#define AD5686_CMD_READBACK_ENABLE 0x9 + +#define AD5686_LDAC_PWRDN_NONE 0x0 +#define AD5686_LDAC_PWRDN_1K 0x1 +#define AD5686_LDAC_PWRDN_100K 0x2 +#define AD5686_LDAC_PWRDN_3STATE 0x3 + +/** + * ad5686_supported_device_ids: + */ +enum ad5686_supported_device_ids { + ID_AD5672R, + ID_AD5676, + ID_AD5676R, + ID_AD5684, + ID_AD5684R, + ID_AD5685R, + ID_AD5686, + ID_AD5686R, +}; + +struct ad5686_state; + +typedef int (*ad5686_write_func)(struct ad5686_state *st, + u8 cmd, u8 addr, u16 val); + +typedef int (*ad5686_read_func)(struct ad5686_state *st, u8 addr); + +/** + * struct ad5686_chip_info - chip specific information + * @int_vref_mv: AD5620/40/60: the internal reference voltage + * @num_channels: number of channels + * @channel: channel specification + */ + +struct ad5686_chip_info { + u16 int_vref_mv; + unsigned int num_channels; + struct iio_chan_spec *channels; +}; + +/** + * struct ad5446_state - driver instance specific data + * @spi: spi_device + * @chip_info: chip model specific constants, available modes etc + * @reg: supply regulator + * @vref_mv: actual reference voltage used + * @pwr_down_mask: power down mask + * @pwr_down_mode: current power down mode + * @data: spi transfer buffers + */ + +struct ad5686_state { + struct device *dev; + const struct ad5686_chip_info *chip_info; + struct regulator *reg; + unsigned short vref_mv; + unsigned int pwr_down_mask; + unsigned int pwr_down_mode; + ad5686_write_func write; + ad5686_read_func read; + + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + + union { + __be32 d32; + __be16 d16; + u8 d8[4]; + } data[3] ____cacheline_aligned; +}; + + +int ad5686_probe(struct device *dev, + enum ad5686_supported_device_ids chip_type, + const char *name, ad5686_write_func write, + ad5686_read_func read); + +int ad5686_remove(struct device *dev); + + +#endif /* __DRIVERS_IIO_DAC_AD5686_H__ */ -- cgit v1.2.3 From 4177381b440130ccb686712aaa09b45539114698 Mon Sep 17 00:00:00 2001 From: Stefan Popa Date: Wed, 11 Apr 2018 14:53:39 +0300 Subject: iio:dac:ad5686: Add AD5671R/75R/94/94R/95R/96/96R support The AD5694/AD5694R/AD5695R/AD5696/AD5696R are a family of 4 channel DACs with 12-bit, 14-bit and 16-bit precision respectively. The devices have either no built-in reference, or built-in 2.5V reference. The AD5671R/AD5675R are similar, except that they have 8 instead of 4 channels. These devices are similar to AD5672R/AD5676/AD5676R and AD5684/AD5684R/AD5684/AD5685R/AD5686/AD5686R, except that they use i2c instead of spi. Datasheets: http://www.analog.com/media/en/technical-documentation/data-sheets/AD5671R_5675R.pdf http://www.analog.com/media/en/technical-documentation/data-sheets/AD5696R_5695R_5694R.pdf Signed-off-by: Stefan Popa Signed-off-by: Jonathan Cameron --- MAINTAINERS | 1 + drivers/iio/dac/Kconfig | 10 +++++ drivers/iio/dac/Makefile | 1 + drivers/iio/dac/ad5686.c | 28 +++++++++++++ drivers/iio/dac/ad5686.h | 7 ++++ drivers/iio/dac/ad5696-i2c.c | 97 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 144 insertions(+) create mode 100644 drivers/iio/dac/ad5696-i2c.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 637e62d5f7ee..002cb013b000 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -797,6 +797,7 @@ L: linux-pm@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/dac/ad5686* +F: drivers/iio/dac/ad5696* ANALOG DEVICES INC AD9389B DRIVER M: Hans Verkuil diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 7a81f1e11b22..3ff8a32f1385 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -145,6 +145,16 @@ config AD5686_SPI To compile this driver as a module, choose M here: the module will be called ad5686. +config AD5696_I2C + tristate "Analog Devices AD5696 and similar multi-channel DACs (I2C)" + depends on I2C + select AD5686 + help + Say yes here to build support for Analog Devices AD5671R, AD5675R, + AD5694, AD5694R, AD5695R, AD5696, AD5696R Voltage Output Digital to + Analog Converter. + To compile this driver as a module, choose M here: the module will be + called ad5696. config AD5755 tristate "Analog Devices AD5755/AD5755-1/AD5757/AD5735/AD5737 DAC driver" diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 07db92e19490..4397e2114344 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_AD5764) += ad5764.o obj-$(CONFIG_AD5791) += ad5791.o obj-$(CONFIG_AD5686) += ad5686.o obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o +obj-$(CONFIG_AD5696_I2C) += ad5696-i2c.o obj-$(CONFIG_AD7303) += ad7303.o obj-$(CONFIG_AD8801) += ad8801.o obj-$(CONFIG_CIO_DAC) += cio-dac.o diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index 79abff55a702..89c5f089ae7f 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -202,11 +202,21 @@ DECLARE_AD5686_CHANNELS(ad5685r_channels, 14, 2); DECLARE_AD5686_CHANNELS(ad5686_channels, 16, 0); static const struct ad5686_chip_info ad5686_chip_info_tbl[] = { + [ID_AD5671R] = { + .channels = ad5672_channels, + .int_vref_mv = 2500, + .num_channels = 8, + }, [ID_AD5672R] = { .channels = ad5672_channels, .int_vref_mv = 2500, .num_channels = 8, }, + [ID_AD5675R] = { + .channels = ad5676_channels, + .int_vref_mv = 2500, + .num_channels = 8, + }, [ID_AD5676] = { .channels = ad5676_channels, .num_channels = 8, @@ -239,6 +249,24 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = { .int_vref_mv = 2500, .num_channels = 4, }, + [ID_AD5694] = { + .channels = ad5684_channels, + .num_channels = 4, + }, + [ID_AD5694R] = { + .channels = ad5684_channels, + .int_vref_mv = 2500, + .num_channels = 4, + }, + [ID_AD5696] = { + .channels = ad5686_channels, + .num_channels = 4, + }, + [ID_AD5696R] = { + .channels = ad5686_channels, + .int_vref_mv = 2500, + .num_channels = 4, + }, }; int ad5686_probe(struct device *dev, diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h index c8e1565391ca..05f0ce9d2de1 100644 --- a/drivers/iio/dac/ad5686.h +++ b/drivers/iio/dac/ad5686.h @@ -39,7 +39,9 @@ * ad5686_supported_device_ids: */ enum ad5686_supported_device_ids { + ID_AD5671R, ID_AD5672R, + ID_AD5675R, ID_AD5676, ID_AD5676R, ID_AD5684, @@ -47,6 +49,11 @@ enum ad5686_supported_device_ids { ID_AD5685R, ID_AD5686, ID_AD5686R, + ID_AD5694, + ID_AD5694R, + ID_AD5695R, + ID_AD5696, + ID_AD5696R, }; struct ad5686_state; diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c new file mode 100644 index 000000000000..275e0321bcf8 --- /dev/null +++ b/drivers/iio/dac/ad5696-i2c.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * AD5671R, AD5675R, AD5694, AD5694R, AD5695R, AD5696, AD5696R + * Digital to analog converters driver + * + * Copyright 2018 Analog Devices Inc. + */ + +#include "ad5686.h" + +#include +#include + +static int ad5686_i2c_read(struct ad5686_state *st, u8 addr) +{ + struct i2c_client *i2c = to_i2c_client(st->dev); + struct i2c_msg msg[2] = { + { + .addr = i2c->addr, + .flags = i2c->flags, + .len = 3, + .buf = &st->data[0].d8[1], + }, + { + .addr = i2c->addr, + .flags = i2c->flags | I2C_M_RD, + .len = 2, + .buf = (char *)&st->data[0].d16, + }, + }; + int ret; + + st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP) | + AD5686_ADDR(addr) | + 0x00); + + ret = i2c_transfer(i2c->adapter, msg, 2); + if (ret < 0) + return ret; + + return be16_to_cpu(st->data[0].d16); +} + +static int ad5686_i2c_write(struct ad5686_state *st, + u8 cmd, u8 addr, u16 val) +{ + struct i2c_client *i2c = to_i2c_client(st->dev); + int ret; + + st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | AD5686_ADDR(addr) + | val); + + ret = i2c_master_send(i2c, &st->data[0].d8[1], 3); + if (ret < 0) + return ret; + + return (ret != 3) ? -EIO : 0; +} + +static int ad5686_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + return ad5686_probe(&i2c->dev, id->driver_data, id->name, + ad5686_i2c_write, ad5686_i2c_read); +} + +static int ad5686_i2c_remove(struct i2c_client *i2c) +{ + return ad5686_remove(&i2c->dev); +} + +static const struct i2c_device_id ad5686_i2c_id[] = { + {"ad5671r", ID_AD5671R}, + {"ad5675r", ID_AD5675R}, + {"ad5694", ID_AD5694}, + {"ad5694r", ID_AD5694R}, + {"ad5695r", ID_AD5695R}, + {"ad5696", ID_AD5696}, + {"ad5696r", ID_AD5696R}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ad5686_i2c_id); + +static struct i2c_driver ad5686_i2c_driver = { + .driver = { + .name = "ad5696", + }, + .probe = ad5686_i2c_probe, + .remove = ad5686_i2c_remove, + .id_table = ad5686_i2c_id, +}; + +module_i2c_driver(ad5686_i2c_driver); + +MODULE_AUTHOR("Stefan Popa "); +MODULE_DESCRIPTION("Analog Devices AD5686 and similar multi-channel DACs"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From ad56b738c5dd223a2f66685830f82194025a6138 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 21 Mar 2018 21:22:47 +0200 Subject: docs/vm: rename documentation files to .rst Signed-off-by: Mike Rapoport Signed-off-by: Jonathan Corbet --- Documentation/ABI/stable/sysfs-devices-node | 2 +- .../ABI/testing/sysfs-kernel-mm-hugepages | 2 +- Documentation/ABI/testing/sysfs-kernel-mm-ksm | 2 +- Documentation/ABI/testing/sysfs-kernel-slab | 4 +- Documentation/admin-guide/kernel-parameters.txt | 12 +- Documentation/dev-tools/kasan.rst | 2 +- Documentation/filesystems/proc.txt | 4 +- Documentation/filesystems/tmpfs.txt | 2 +- Documentation/sysctl/vm.txt | 6 +- Documentation/vm/00-INDEX | 58 +- Documentation/vm/active_mm.rst | 91 +++ Documentation/vm/active_mm.txt | 91 --- Documentation/vm/balance | 102 ---- Documentation/vm/balance.rst | 102 ++++ Documentation/vm/cleancache.rst | 296 ++++++++++ Documentation/vm/cleancache.txt | 296 ---------- Documentation/vm/frontswap.rst | 293 ++++++++++ Documentation/vm/frontswap.txt | 293 ---------- Documentation/vm/highmem.rst | 147 +++++ Documentation/vm/highmem.txt | 147 ----- Documentation/vm/hmm.rst | 374 +++++++++++++ Documentation/vm/hmm.txt | 374 ------------- Documentation/vm/hugetlbfs_reserv.rst | 587 ++++++++++++++++++++ Documentation/vm/hugetlbfs_reserv.txt | 587 -------------------- Documentation/vm/hugetlbpage.rst | 386 +++++++++++++ Documentation/vm/hugetlbpage.txt | 386 ------------- Documentation/vm/hwpoison.rst | 186 +++++++ Documentation/vm/hwpoison.txt | 186 ------- Documentation/vm/idle_page_tracking.rst | 115 ++++ Documentation/vm/idle_page_tracking.txt | 115 ---- Documentation/vm/ksm.rst | 183 ++++++ Documentation/vm/ksm.txt | 183 ------ Documentation/vm/mmu_notifier.rst | 99 ++++ Documentation/vm/mmu_notifier.txt | 99 ---- Documentation/vm/numa | 150 ----- Documentation/vm/numa.rst | 150 +++++ Documentation/vm/numa_memory_policy.rst | 485 ++++++++++++++++ Documentation/vm/numa_memory_policy.txt | 485 ---------------- Documentation/vm/overcommit-accounting | 87 --- Documentation/vm/overcommit-accounting.rst | 87 +++ Documentation/vm/page_frags | 45 -- Documentation/vm/page_frags.rst | 45 ++ Documentation/vm/page_migration | 257 --------- Documentation/vm/page_migration.rst | 257 +++++++++ Documentation/vm/page_owner.rst | 90 +++ Documentation/vm/page_owner.txt | 90 --- Documentation/vm/pagemap.rst | 197 +++++++ Documentation/vm/pagemap.txt | 197 ------- Documentation/vm/remap_file_pages.rst | 33 ++ Documentation/vm/remap_file_pages.txt | 33 -- Documentation/vm/slub.rst | 361 ++++++++++++ Documentation/vm/slub.txt | 361 ------------ Documentation/vm/soft-dirty.rst | 47 ++ Documentation/vm/soft-dirty.txt | 47 -- Documentation/vm/split_page_table_lock | 100 ---- Documentation/vm/split_page_table_lock.rst | 100 ++++ Documentation/vm/swap_numa.rst | 80 +++ Documentation/vm/swap_numa.txt | 80 --- Documentation/vm/transhuge.rst | 573 +++++++++++++++++++ Documentation/vm/transhuge.txt | 573 ------------------- Documentation/vm/unevictable-lru.rst | 614 +++++++++++++++++++++ Documentation/vm/unevictable-lru.txt | 614 --------------------- Documentation/vm/userfaultfd.rst | 241 ++++++++ Documentation/vm/userfaultfd.txt | 241 -------- Documentation/vm/z3fold.rst | 30 + Documentation/vm/z3fold.txt | 30 - Documentation/vm/zsmalloc.rst | 82 +++ Documentation/vm/zsmalloc.txt | 82 --- Documentation/vm/zswap.rst | 135 +++++ Documentation/vm/zswap.txt | 135 ----- MAINTAINERS | 2 +- arch/alpha/Kconfig | 2 +- arch/ia64/Kconfig | 2 +- arch/mips/Kconfig | 2 +- arch/powerpc/Kconfig | 2 +- fs/Kconfig | 2 +- fs/dax.c | 2 +- fs/proc/task_mmu.c | 4 +- include/linux/hmm.h | 2 +- include/linux/memremap.h | 4 +- include/linux/mmu_notifier.h | 2 +- include/linux/sched/mm.h | 4 +- include/linux/swap.h | 2 +- mm/Kconfig | 6 +- mm/cleancache.c | 2 +- mm/frontswap.c | 2 +- mm/hmm.c | 2 +- mm/huge_memory.c | 4 +- mm/hugetlb.c | 4 +- mm/ksm.c | 4 +- mm/mmap.c | 2 +- mm/rmap.c | 6 +- mm/util.c | 2 +- 93 files changed, 6546 insertions(+), 6546 deletions(-) create mode 100644 Documentation/vm/active_mm.rst delete mode 100644 Documentation/vm/active_mm.txt delete mode 100644 Documentation/vm/balance create mode 100644 Documentation/vm/balance.rst create mode 100644 Documentation/vm/cleancache.rst delete mode 100644 Documentation/vm/cleancache.txt create mode 100644 Documentation/vm/frontswap.rst delete mode 100644 Documentation/vm/frontswap.txt create mode 100644 Documentation/vm/highmem.rst delete mode 100644 Documentation/vm/highmem.txt create mode 100644 Documentation/vm/hmm.rst delete mode 100644 Documentation/vm/hmm.txt create mode 100644 Documentation/vm/hugetlbfs_reserv.rst delete mode 100644 Documentation/vm/hugetlbfs_reserv.txt create mode 100644 Documentation/vm/hugetlbpage.rst delete mode 100644 Documentation/vm/hugetlbpage.txt create mode 100644 Documentation/vm/hwpoison.rst delete mode 100644 Documentation/vm/hwpoison.txt create mode 100644 Documentation/vm/idle_page_tracking.rst delete mode 100644 Documentation/vm/idle_page_tracking.txt create mode 100644 Documentation/vm/ksm.rst delete mode 100644 Documentation/vm/ksm.txt create mode 100644 Documentation/vm/mmu_notifier.rst delete mode 100644 Documentation/vm/mmu_notifier.txt delete mode 100644 Documentation/vm/numa create mode 100644 Documentation/vm/numa.rst create mode 100644 Documentation/vm/numa_memory_policy.rst delete mode 100644 Documentation/vm/numa_memory_policy.txt delete mode 100644 Documentation/vm/overcommit-accounting create mode 100644 Documentation/vm/overcommit-accounting.rst delete mode 100644 Documentation/vm/page_frags create mode 100644 Documentation/vm/page_frags.rst delete mode 100644 Documentation/vm/page_migration create mode 100644 Documentation/vm/page_migration.rst create mode 100644 Documentation/vm/page_owner.rst delete mode 100644 Documentation/vm/page_owner.txt create mode 100644 Documentation/vm/pagemap.rst delete mode 100644 Documentation/vm/pagemap.txt create mode 100644 Documentation/vm/remap_file_pages.rst delete mode 100644 Documentation/vm/remap_file_pages.txt create mode 100644 Documentation/vm/slub.rst delete mode 100644 Documentation/vm/slub.txt create mode 100644 Documentation/vm/soft-dirty.rst delete mode 100644 Documentation/vm/soft-dirty.txt delete mode 100644 Documentation/vm/split_page_table_lock create mode 100644 Documentation/vm/split_page_table_lock.rst create mode 100644 Documentation/vm/swap_numa.rst delete mode 100644 Documentation/vm/swap_numa.txt create mode 100644 Documentation/vm/transhuge.rst delete mode 100644 Documentation/vm/transhuge.txt create mode 100644 Documentation/vm/unevictable-lru.rst delete mode 100644 Documentation/vm/unevictable-lru.txt create mode 100644 Documentation/vm/userfaultfd.rst delete mode 100644 Documentation/vm/userfaultfd.txt create mode 100644 Documentation/vm/z3fold.rst delete mode 100644 Documentation/vm/z3fold.txt create mode 100644 Documentation/vm/zsmalloc.rst delete mode 100644 Documentation/vm/zsmalloc.txt create mode 100644 Documentation/vm/zswap.rst delete mode 100644 Documentation/vm/zswap.txt (limited to 'MAINTAINERS') diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node index 5b2d0f08867c..b38f4b734567 100644 --- a/Documentation/ABI/stable/sysfs-devices-node +++ b/Documentation/ABI/stable/sysfs-devices-node @@ -90,4 +90,4 @@ Date: December 2009 Contact: Lee Schermerhorn Description: The node's huge page size control/query attributes. - See Documentation/vm/hugetlbpage.txt \ No newline at end of file + See Documentation/vm/hugetlbpage.rst \ No newline at end of file diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-hugepages b/Documentation/ABI/testing/sysfs-kernel-mm-hugepages index e21c00571cf4..5140b233356c 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-hugepages +++ b/Documentation/ABI/testing/sysfs-kernel-mm-hugepages @@ -12,4 +12,4 @@ Description: free_hugepages surplus_hugepages resv_hugepages - See Documentation/vm/hugetlbpage.txt for details. + See Documentation/vm/hugetlbpage.rst for details. diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-ksm b/Documentation/ABI/testing/sysfs-kernel-mm-ksm index 73e653ee2481..dfc13244cda3 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-ksm +++ b/Documentation/ABI/testing/sysfs-kernel-mm-ksm @@ -40,7 +40,7 @@ Description: Kernel Samepage Merging daemon sysfs interface sleep_millisecs: how many milliseconds ksm should sleep between scans. - See Documentation/vm/ksm.txt for more information. + See Documentation/vm/ksm.rst for more information. What: /sys/kernel/mm/ksm/merge_across_nodes Date: January 2013 diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab index 2cc0a72b64be..29601d93a1c2 100644 --- a/Documentation/ABI/testing/sysfs-kernel-slab +++ b/Documentation/ABI/testing/sysfs-kernel-slab @@ -37,7 +37,7 @@ Description: The alloc_calls file is read-only and lists the kernel code locations from which allocations for this cache were performed. The alloc_calls file only contains information if debugging is - enabled for that cache (see Documentation/vm/slub.txt). + enabled for that cache (see Documentation/vm/slub.rst). What: /sys/kernel/slab/cache/alloc_fastpath Date: February 2008 @@ -219,7 +219,7 @@ Contact: Pekka Enberg , Description: The free_calls file is read-only and lists the locations of object frees if slab debugging is enabled (see - Documentation/vm/slub.txt). + Documentation/vm/slub.rst). What: /sys/kernel/slab/cache/free_fastpath Date: February 2008 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1d1d53f85ddd..5d6e5509c049 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3887,7 +3887,7 @@ cache (risks via metadata attacks are mostly unchanged). Debug options disable merging on their own. - For more information see Documentation/vm/slub.txt. + For more information see Documentation/vm/slub.rst. slab_max_order= [MM, SLAB] Determines the maximum allowed order for slabs. @@ -3901,7 +3901,7 @@ slub_debug can create guard zones around objects and may poison objects when not in use. Also tracks the last alloc / free. For more information see - Documentation/vm/slub.txt. + Documentation/vm/slub.rst. slub_memcg_sysfs= [MM, SLUB] Determines whether to enable sysfs directories for @@ -3915,7 +3915,7 @@ Determines the maximum allowed order for slabs. A high setting may cause OOMs due to memory fragmentation. For more information see - Documentation/vm/slub.txt. + Documentation/vm/slub.rst. slub_min_objects= [MM, SLUB] The minimum number of objects per slab. SLUB will @@ -3924,12 +3924,12 @@ the number of objects indicated. The higher the number of objects the smaller the overhead of tracking slabs and the less frequently locks need to be acquired. - For more information see Documentation/vm/slub.txt. + For more information see Documentation/vm/slub.rst. slub_min_order= [MM, SLUB] Determines the minimum page order for slabs. Must be lower than slub_max_order. - For more information see Documentation/vm/slub.txt. + For more information see Documentation/vm/slub.rst. slub_nomerge [MM, SLUB] Same with slab_nomerge. This is supported for legacy. @@ -4285,7 +4285,7 @@ Format: [always|madvise|never] Can be used to control the default behavior of the system with respect to transparent hugepages. - See Documentation/vm/transhuge.txt for more details. + See Documentation/vm/transhuge.rst for more details. tsc= Disable clocksource stability checks for TSC. Format: diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst index f7a18f274357..aabc8738b3d8 100644 --- a/Documentation/dev-tools/kasan.rst +++ b/Documentation/dev-tools/kasan.rst @@ -120,7 +120,7 @@ A typical out of bounds access report looks like this:: The header of the report discribe what kind of bug happened and what kind of access caused it. It's followed by the description of the accessed slub object -(see 'SLUB Debug output' section in Documentation/vm/slub.txt for details) and +(see 'SLUB Debug output' section in Documentation/vm/slub.rst for details) and the description of the accessed memory page. In the last section the report shows memory state around the accessed address. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 2a84bb334894..2d3984c70feb 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -515,7 +515,7 @@ guarantees: The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG bits on both physical and virtual pages associated with a process, and the -soft-dirty bit on pte (see Documentation/vm/soft-dirty.txt for details). +soft-dirty bit on pte (see Documentation/vm/soft-dirty.rst for details). To clear the bits for all the pages associated with the process > echo 1 > /proc/PID/clear_refs @@ -536,7 +536,7 @@ Any other value written to /proc/PID/clear_refs will have no effect. The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags using /proc/kpageflags and number of times a page is mapped using -/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt. +/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.rst. The /proc/pid/numa_maps is an extension based on maps, showing the memory locality and binding policy, as well as the memory usage (in pages) of diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt index a85355cf85f4..627389a34f77 100644 --- a/Documentation/filesystems/tmpfs.txt +++ b/Documentation/filesystems/tmpfs.txt @@ -105,7 +105,7 @@ policy for the file will revert to "default" policy. NUMA memory allocation policies have optional flags that can be used in conjunction with their modes. These optional flags can be specified when tmpfs is mounted by appending them to the mode before the NodeList. -See Documentation/vm/numa_memory_policy.txt for a list of all available +See Documentation/vm/numa_memory_policy.rst for a list of all available memory allocation policy mode flags and their effect on memory policy. =static is equivalent to MPOL_F_STATIC_NODES diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index ff234d229cbb..ef581a940439 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -516,7 +516,7 @@ nr_hugepages Change the minimum size of the hugepage pool. -See Documentation/vm/hugetlbpage.txt +See Documentation/vm/hugetlbpage.rst ============================================================== @@ -525,7 +525,7 @@ nr_overcommit_hugepages Change the maximum size of the hugepage pool. The maximum is nr_hugepages + nr_overcommit_hugepages. -See Documentation/vm/hugetlbpage.txt +See Documentation/vm/hugetlbpage.rst ============================================================== @@ -668,7 +668,7 @@ and don't use much of it. The default value is 0. -See Documentation/vm/overcommit-accounting and +See Documentation/vm/overcommit-accounting.rst and mm/mmap.c::__vm_enough_memory() for more information. ============================================================== diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX index 0278f2c85efb..cda564d55b3c 100644 --- a/Documentation/vm/00-INDEX +++ b/Documentation/vm/00-INDEX @@ -1,62 +1,62 @@ 00-INDEX - this file. -active_mm.txt +active_mm.rst - An explanation from Linus about tsk->active_mm vs tsk->mm. -balance +balance.rst - various information on memory balancing. -cleancache.txt +cleancache.rst - Intro to cleancache and page-granularity victim cache. -frontswap.txt +frontswap.rst - Outline frontswap, part of the transcendent memory frontend. -highmem.txt +highmem.rst - Outline of highmem and common issues. -hmm.txt +hmm.rst - Documentation of heterogeneous memory management -hugetlbpage.txt +hugetlbpage.rst - a brief summary of hugetlbpage support in the Linux kernel. -hugetlbfs_reserv.txt +hugetlbfs_reserv.rst - A brief overview of hugetlbfs reservation design/implementation. -hwpoison.txt +hwpoison.rst - explains what hwpoison is -idle_page_tracking.txt +idle_page_tracking.rst - description of the idle page tracking feature. -ksm.txt +ksm.rst - how to use the Kernel Samepage Merging feature. -mmu_notifier.txt +mmu_notifier.rst - a note about clearing pte/pmd and mmu notifications -numa +numa.rst - information about NUMA specific code in the Linux vm. -numa_memory_policy.txt +numa_memory_policy.rst - documentation of concepts and APIs of the 2.6 memory policy support. -overcommit-accounting +overcommit-accounting.rst - description of the Linux kernels overcommit handling modes. -page_frags +page_frags.rst - description of page fragments allocator -page_migration +page_migration.rst - description of page migration in NUMA systems. -pagemap.txt +pagemap.rst - pagemap, from the userspace perspective -page_owner.txt +page_owner.rst - tracking about who allocated each page -remap_file_pages.txt +remap_file_pages.rst - a note about remap_file_pages() system call -slub.txt +slub.rst - a short users guide for SLUB. -soft-dirty.txt +soft-dirty.rst - short explanation for soft-dirty PTEs -split_page_table_lock +split_page_table_lock.rst - Separate per-table lock to improve scalability of the old page_table_lock. -swap_numa.txt +swap_numa.rst - automatic binding of swap device to numa node -transhuge.txt +transhuge.rst - Transparent Hugepage Support, alternative way of using hugepages. -unevictable-lru.txt +unevictable-lru.rst - Unevictable LRU infrastructure -userfaultfd.txt +userfaultfd.rst - description of userfaultfd system call z3fold.txt - outline of z3fold allocator for storing compressed pages -zsmalloc.txt +zsmalloc.rst - outline of zsmalloc allocator for storing compressed pages -zswap.txt +zswap.rst - Intro to compressed cache for swap pages diff --git a/Documentation/vm/active_mm.rst b/Documentation/vm/active_mm.rst new file mode 100644 index 000000000000..c84471b180f8 --- /dev/null +++ b/Documentation/vm/active_mm.rst @@ -0,0 +1,91 @@ +.. _active_mm: + +========= +Active MM +========= + +:: + + List: linux-kernel + Subject: Re: active_mm + From: Linus Torvalds + Date: 1999-07-30 21:36:24 + + Cc'd to linux-kernel, because I don't write explanations all that often, + and when I do I feel better about more people reading them. + + On Fri, 30 Jul 1999, David Mosberger wrote: + > + > Is there a brief description someplace on how "mm" vs. "active_mm" in + > the task_struct are supposed to be used? (My apologies if this was + > discussed on the mailing lists---I just returned from vacation and + > wasn't able to follow linux-kernel for a while). + + Basically, the new setup is: + + - we have "real address spaces" and "anonymous address spaces". The + difference is that an anonymous address space doesn't care about the + user-level page tables at all, so when we do a context switch into an + anonymous address space we just leave the previous address space + active. + + The obvious use for a "anonymous address space" is any thread that + doesn't need any user mappings - all kernel threads basically fall into + this category, but even "real" threads can temporarily say that for + some amount of time they are not going to be interested in user space, + and that the scheduler might as well try to avoid wasting time on + switching the VM state around. Currently only the old-style bdflush + sync does that. + + - "tsk->mm" points to the "real address space". For an anonymous process, + tsk->mm will be NULL, for the logical reason that an anonymous process + really doesn't _have_ a real address space at all. + + - however, we obviously need to keep track of which address space we + "stole" for such an anonymous user. For that, we have "tsk->active_mm", + which shows what the currently active address space is. + + The rule is that for a process with a real address space (ie tsk->mm is + non-NULL) the active_mm obviously always has to be the same as the real + one. + + For a anonymous process, tsk->mm == NULL, and tsk->active_mm is the + "borrowed" mm while the anonymous process is running. When the + anonymous process gets scheduled away, the borrowed address space is + returned and cleared. + + To support all that, the "struct mm_struct" now has two counters: a + "mm_users" counter that is how many "real address space users" there are, + and a "mm_count" counter that is the number of "lazy" users (ie anonymous + users) plus one if there are any real users. + + Usually there is at least one real user, but it could be that the real + user exited on another CPU while a lazy user was still active, so you do + actually get cases where you have a address space that is _only_ used by + lazy users. That is often a short-lived state, because once that thread + gets scheduled away in favour of a real thread, the "zombie" mm gets + released because "mm_users" becomes zero. + + Also, a new rule is that _nobody_ ever has "init_mm" as a real MM any + more. "init_mm" should be considered just a "lazy context when no other + context is available", and in fact it is mainly used just at bootup when + no real VM has yet been created. So code that used to check + + if (current->mm == &init_mm) + + should generally just do + + if (!current->mm) + + instead (which makes more sense anyway - the test is basically one of "do + we have a user context", and is generally done by the page fault handler + and things like that). + + Anyway, I put a pre-patch-2.3.13-1 on ftp.kernel.org just a moment ago, + because it slightly changes the interfaces to accommodate the alpha (who + would have thought it, but the alpha actually ends up having one of the + ugliest context switch codes - unlike the other architectures where the MM + and register state is separate, the alpha PALcode joins the two, and you + need to switch both together). + + (From http://marc.info/?l=linux-kernel&m=93337278602211&w=2) diff --git a/Documentation/vm/active_mm.txt b/Documentation/vm/active_mm.txt deleted file mode 100644 index c84471b180f8..000000000000 --- a/Documentation/vm/active_mm.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. _active_mm: - -========= -Active MM -========= - -:: - - List: linux-kernel - Subject: Re: active_mm - From: Linus Torvalds - Date: 1999-07-30 21:36:24 - - Cc'd to linux-kernel, because I don't write explanations all that often, - and when I do I feel better about more people reading them. - - On Fri, 30 Jul 1999, David Mosberger wrote: - > - > Is there a brief description someplace on how "mm" vs. "active_mm" in - > the task_struct are supposed to be used? (My apologies if this was - > discussed on the mailing lists---I just returned from vacation and - > wasn't able to follow linux-kernel for a while). - - Basically, the new setup is: - - - we have "real address spaces" and "anonymous address spaces". The - difference is that an anonymous address space doesn't care about the - user-level page tables at all, so when we do a context switch into an - anonymous address space we just leave the previous address space - active. - - The obvious use for a "anonymous address space" is any thread that - doesn't need any user mappings - all kernel threads basically fall into - this category, but even "real" threads can temporarily say that for - some amount of time they are not going to be interested in user space, - and that the scheduler might as well try to avoid wasting time on - switching the VM state around. Currently only the old-style bdflush - sync does that. - - - "tsk->mm" points to the "real address space". For an anonymous process, - tsk->mm will be NULL, for the logical reason that an anonymous process - really doesn't _have_ a real address space at all. - - - however, we obviously need to keep track of which address space we - "stole" for such an anonymous user. For that, we have "tsk->active_mm", - which shows what the currently active address space is. - - The rule is that for a process with a real address space (ie tsk->mm is - non-NULL) the active_mm obviously always has to be the same as the real - one. - - For a anonymous process, tsk->mm == NULL, and tsk->active_mm is the - "borrowed" mm while the anonymous process is running. When the - anonymous process gets scheduled away, the borrowed address space is - returned and cleared. - - To support all that, the "struct mm_struct" now has two counters: a - "mm_users" counter that is how many "real address space users" there are, - and a "mm_count" counter that is the number of "lazy" users (ie anonymous - users) plus one if there are any real users. - - Usually there is at least one real user, but it could be that the real - user exited on another CPU while a lazy user was still active, so you do - actually get cases where you have a address space that is _only_ used by - lazy users. That is often a short-lived state, because once that thread - gets scheduled away in favour of a real thread, the "zombie" mm gets - released because "mm_users" becomes zero. - - Also, a new rule is that _nobody_ ever has "init_mm" as a real MM any - more. "init_mm" should be considered just a "lazy context when no other - context is available", and in fact it is mainly used just at bootup when - no real VM has yet been created. So code that used to check - - if (current->mm == &init_mm) - - should generally just do - - if (!current->mm) - - instead (which makes more sense anyway - the test is basically one of "do - we have a user context", and is generally done by the page fault handler - and things like that). - - Anyway, I put a pre-patch-2.3.13-1 on ftp.kernel.org just a moment ago, - because it slightly changes the interfaces to accommodate the alpha (who - would have thought it, but the alpha actually ends up having one of the - ugliest context switch codes - unlike the other architectures where the MM - and register state is separate, the alpha PALcode joins the two, and you - need to switch both together). - - (From http://marc.info/?l=linux-kernel&m=93337278602211&w=2) diff --git a/Documentation/vm/balance b/Documentation/vm/balance deleted file mode 100644 index 6a1fadf3e173..000000000000 --- a/Documentation/vm/balance +++ /dev/null @@ -1,102 +0,0 @@ -.. _balance: - -================ -Memory Balancing -================ - -Started Jan 2000 by Kanoj Sarcar - -Memory balancing is needed for !__GFP_ATOMIC and !__GFP_KSWAPD_RECLAIM as -well as for non __GFP_IO allocations. - -The first reason why a caller may avoid reclaim is that the caller can not -sleep due to holding a spinlock or is in interrupt context. The second may -be that the caller is willing to fail the allocation without incurring the -overhead of page reclaim. This may happen for opportunistic high-order -allocation requests that have order-0 fallback options. In such cases, -the caller may also wish to avoid waking kswapd. - -__GFP_IO allocation requests are made to prevent file system deadlocks. - -In the absence of non sleepable allocation requests, it seems detrimental -to be doing balancing. Page reclamation can be kicked off lazily, that -is, only when needed (aka zone free memory is 0), instead of making it -a proactive process. - -That being said, the kernel should try to fulfill requests for direct -mapped pages from the direct mapped pool, instead of falling back on -the dma pool, so as to keep the dma pool filled for dma requests (atomic -or not). A similar argument applies to highmem and direct mapped pages. -OTOH, if there is a lot of free dma pages, it is preferable to satisfy -regular memory requests by allocating one from the dma pool, instead -of incurring the overhead of regular zone balancing. - -In 2.2, memory balancing/page reclamation would kick off only when the -_total_ number of free pages fell below 1/64 th of total memory. With the -right ratio of dma and regular memory, it is quite possible that balancing -would not be done even when the dma zone was completely empty. 2.2 has -been running production machines of varying memory sizes, and seems to be -doing fine even with the presence of this problem. In 2.3, due to -HIGHMEM, this problem is aggravated. - -In 2.3, zone balancing can be done in one of two ways: depending on the -zone size (and possibly of the size of lower class zones), we can decide -at init time how many free pages we should aim for while balancing any -zone. The good part is, while balancing, we do not need to look at sizes -of lower class zones, the bad part is, we might do too frequent balancing -due to ignoring possibly lower usage in the lower class zones. Also, -with a slight change in the allocation routine, it is possible to reduce -the memclass() macro to be a simple equality. - -Another possible solution is that we balance only when the free memory -of a zone _and_ all its lower class zones falls below 1/64th of the -total memory in the zone and its lower class zones. This fixes the 2.2 -balancing problem, and stays as close to 2.2 behavior as possible. Also, -the balancing algorithm works the same way on the various architectures, -which have different numbers and types of zones. If we wanted to get -fancy, we could assign different weights to free pages in different -zones in the future. - -Note that if the size of the regular zone is huge compared to dma zone, -it becomes less significant to consider the free dma pages while -deciding whether to balance the regular zone. The first solution -becomes more attractive then. - -The appended patch implements the second solution. It also "fixes" two -problems: first, kswapd is woken up as in 2.2 on low memory conditions -for non-sleepable allocations. Second, the HIGHMEM zone is also balanced, -so as to give a fighting chance for replace_with_highmem() to get a -HIGHMEM page, as well as to ensure that HIGHMEM allocations do not -fall back into regular zone. This also makes sure that HIGHMEM pages -are not leaked (for example, in situations where a HIGHMEM page is in -the swapcache but is not being used by anyone) - -kswapd also needs to know about the zones it should balance. kswapd is -primarily needed in a situation where balancing can not be done, -probably because all allocation requests are coming from intr context -and all process contexts are sleeping. For 2.3, kswapd does not really -need to balance the highmem zone, since intr context does not request -highmem pages. kswapd looks at the zone_wake_kswapd field in the zone -structure to decide whether a zone needs balancing. - -Page stealing from process memory and shm is done if stealing the page would -alleviate memory pressure on any zone in the page's node that has fallen below -its watermark. - -watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These -are per-zone fields, used to determine when a zone needs to be balanced. When -the number of pages falls below watermark[WMARK_MIN], the hysteric field -low_on_memory gets set. This stays set till the number of free pages becomes -watermark[WMARK_HIGH]. When low_on_memory is set, page allocation requests will -try to free some pages in the zone (providing GFP_WAIT is set in the request). -Orthogonal to this, is the decision to poke kswapd to free some zone pages. -That decision is not hysteresis based, and is done when the number of free -pages is below watermark[WMARK_LOW]; in which case zone_wake_kswapd is also set. - - -(Good) Ideas that I have heard: - -1. Dynamic experience should influence balancing: number of failed requests - for a zone can be tracked and fed into the balancing scheme (jalvo@mbay.net) -2. Implement a replace_with_highmem()-like replace_with_regular() to preserve - dma pages. (lkd@tantalophile.demon.co.uk) diff --git a/Documentation/vm/balance.rst b/Documentation/vm/balance.rst new file mode 100644 index 000000000000..6a1fadf3e173 --- /dev/null +++ b/Documentation/vm/balance.rst @@ -0,0 +1,102 @@ +.. _balance: + +================ +Memory Balancing +================ + +Started Jan 2000 by Kanoj Sarcar + +Memory balancing is needed for !__GFP_ATOMIC and !__GFP_KSWAPD_RECLAIM as +well as for non __GFP_IO allocations. + +The first reason why a caller may avoid reclaim is that the caller can not +sleep due to holding a spinlock or is in interrupt context. The second may +be that the caller is willing to fail the allocation without incurring the +overhead of page reclaim. This may happen for opportunistic high-order +allocation requests that have order-0 fallback options. In such cases, +the caller may also wish to avoid waking kswapd. + +__GFP_IO allocation requests are made to prevent file system deadlocks. + +In the absence of non sleepable allocation requests, it seems detrimental +to be doing balancing. Page reclamation can be kicked off lazily, that +is, only when needed (aka zone free memory is 0), instead of making it +a proactive process. + +That being said, the kernel should try to fulfill requests for direct +mapped pages from the direct mapped pool, instead of falling back on +the dma pool, so as to keep the dma pool filled for dma requests (atomic +or not). A similar argument applies to highmem and direct mapped pages. +OTOH, if there is a lot of free dma pages, it is preferable to satisfy +regular memory requests by allocating one from the dma pool, instead +of incurring the overhead of regular zone balancing. + +In 2.2, memory balancing/page reclamation would kick off only when the +_total_ number of free pages fell below 1/64 th of total memory. With the +right ratio of dma and regular memory, it is quite possible that balancing +would not be done even when the dma zone was completely empty. 2.2 has +been running production machines of varying memory sizes, and seems to be +doing fine even with the presence of this problem. In 2.3, due to +HIGHMEM, this problem is aggravated. + +In 2.3, zone balancing can be done in one of two ways: depending on the +zone size (and possibly of the size of lower class zones), we can decide +at init time how many free pages we should aim for while balancing any +zone. The good part is, while balancing, we do not need to look at sizes +of lower class zones, the bad part is, we might do too frequent balancing +due to ignoring possibly lower usage in the lower class zones. Also, +with a slight change in the allocation routine, it is possible to reduce +the memclass() macro to be a simple equality. + +Another possible solution is that we balance only when the free memory +of a zone _and_ all its lower class zones falls below 1/64th of the +total memory in the zone and its lower class zones. This fixes the 2.2 +balancing problem, and stays as close to 2.2 behavior as possible. Also, +the balancing algorithm works the same way on the various architectures, +which have different numbers and types of zones. If we wanted to get +fancy, we could assign different weights to free pages in different +zones in the future. + +Note that if the size of the regular zone is huge compared to dma zone, +it becomes less significant to consider the free dma pages while +deciding whether to balance the regular zone. The first solution +becomes more attractive then. + +The appended patch implements the second solution. It also "fixes" two +problems: first, kswapd is woken up as in 2.2 on low memory conditions +for non-sleepable allocations. Second, the HIGHMEM zone is also balanced, +so as to give a fighting chance for replace_with_highmem() to get a +HIGHMEM page, as well as to ensure that HIGHMEM allocations do not +fall back into regular zone. This also makes sure that HIGHMEM pages +are not leaked (for example, in situations where a HIGHMEM page is in +the swapcache but is not being used by anyone) + +kswapd also needs to know about the zones it should balance. kswapd is +primarily needed in a situation where balancing can not be done, +probably because all allocation requests are coming from intr context +and all process contexts are sleeping. For 2.3, kswapd does not really +need to balance the highmem zone, since intr context does not request +highmem pages. kswapd looks at the zone_wake_kswapd field in the zone +structure to decide whether a zone needs balancing. + +Page stealing from process memory and shm is done if stealing the page would +alleviate memory pressure on any zone in the page's node that has fallen below +its watermark. + +watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These +are per-zone fields, used to determine when a zone needs to be balanced. When +the number of pages falls below watermark[WMARK_MIN], the hysteric field +low_on_memory gets set. This stays set till the number of free pages becomes +watermark[WMARK_HIGH]. When low_on_memory is set, page allocation requests will +try to free some pages in the zone (providing GFP_WAIT is set in the request). +Orthogonal to this, is the decision to poke kswapd to free some zone pages. +That decision is not hysteresis based, and is done when the number of free +pages is below watermark[WMARK_LOW]; in which case zone_wake_kswapd is also set. + + +(Good) Ideas that I have heard: + +1. Dynamic experience should influence balancing: number of failed requests + for a zone can be tracked and fed into the balancing scheme (jalvo@mbay.net) +2. Implement a replace_with_highmem()-like replace_with_regular() to preserve + dma pages. (lkd@tantalophile.demon.co.uk) diff --git a/Documentation/vm/cleancache.rst b/Documentation/vm/cleancache.rst new file mode 100644 index 000000000000..68cba9131c31 --- /dev/null +++ b/Documentation/vm/cleancache.rst @@ -0,0 +1,296 @@ +.. _cleancache: + +========== +Cleancache +========== + +Motivation +========== + +Cleancache is a new optional feature provided by the VFS layer that +potentially dramatically increases page cache effectiveness for +many workloads in many environments at a negligible cost. + +Cleancache can be thought of as a page-granularity victim cache for clean +pages that the kernel's pageframe replacement algorithm (PFRA) would like +to keep around, but can't since there isn't enough memory. So when the +PFRA "evicts" a page, it first attempts to use cleancache code to +put the data contained in that page into "transcendent memory", memory +that is not directly accessible or addressable by the kernel and is +of unknown and possibly time-varying size. + +Later, when a cleancache-enabled filesystem wishes to access a page +in a file on disk, it first checks cleancache to see if it already +contains it; if it does, the page of data is copied into the kernel +and a disk access is avoided. + +Transcendent memory "drivers" for cleancache are currently implemented +in Xen (using hypervisor memory) and zcache (using in-kernel compressed +memory) and other implementations are in development. + +:ref:`FAQs ` are included below. + +Implementation Overview +======================= + +A cleancache "backend" that provides transcendent memory registers itself +to the kernel's cleancache "frontend" by calling cleancache_register_ops, +passing a pointer to a cleancache_ops structure with funcs set appropriately. +The functions provided must conform to certain semantics as follows: + +Most important, cleancache is "ephemeral". Pages which are copied into +cleancache have an indefinite lifetime which is completely unknowable +by the kernel and so may or may not still be in cleancache at any later time. +Thus, as its name implies, cleancache is not suitable for dirty pages. +Cleancache has complete discretion over what pages to preserve and what +pages to discard and when. + +Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a +pool id which, if positive, must be saved in the filesystem's superblock; +a negative return value indicates failure. A "put_page" will copy a +(presumably about-to-be-evicted) page into cleancache and associate it with +the pool id, a file key, and a page index into the file. (The combination +of a pool id, a file key, and an index is sometimes called a "handle".) +A "get_page" will copy the page, if found, from cleancache into kernel memory. +An "invalidate_page" will ensure the page no longer is present in cleancache; +an "invalidate_inode" will invalidate all pages associated with the specified +file; and, when a filesystem is unmounted, an "invalidate_fs" will invalidate +all pages in all files specified by the given pool id and also surrender +the pool id. + +An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache +to treat the pool as shared using a 128-bit UUID as a key. On systems +that may run multiple kernels (such as hard partitioned or virtualized +systems) that may share a clustered filesystem, and where cleancache +may be shared among those kernels, calls to init_shared_fs that specify the +same UUID will receive the same pool id, thus allowing the pages to +be shared. Note that any security requirements must be imposed outside +of the kernel (e.g. by "tools" that control cleancache). Or a +cleancache implementation can simply disable shared_init by always +returning a negative value. + +If a get_page is successful on a non-shared pool, the page is invalidated +(thus making cleancache an "exclusive" cache). On a shared pool, the page +is NOT invalidated on a successful get_page so that it remains accessible to +other sharers. The kernel is responsible for ensuring coherency between +cleancache (shared or not), the page cache, and the filesystem, using +cleancache invalidate operations as required. + +Note that cleancache must enforce put-put-get coherency and get-get +coherency. For the former, if two puts are made to the same handle but +with different data, say AAA by the first put and BBB by the second, a +subsequent get can never return the stale data (AAA). For get-get coherency, +if a get for a given handle fails, subsequent gets for that handle will +never succeed unless preceded by a successful put with that handle. + +Last, cleancache provides no SMP serialization guarantees; if two +different Linux threads are simultaneously putting and invalidating a page +with the same handle, the results are indeterminate. Callers must +lock the page to ensure serial behavior. + +Cleancache Performance Metrics +============================== + +If properly configured, monitoring of cleancache is done via debugfs in +the `/sys/kernel/debug/cleancache` directory. The effectiveness of cleancache +can be measured (across all filesystems) with: + +``succ_gets`` + number of gets that were successful + +``failed_gets`` + number of gets that failed + +``puts`` + number of puts attempted (all "succeed") + +``invalidates`` + number of invalidates attempted + +A backend implementation may provide additional metrics. + +.. _faq: + +FAQ +=== + +* Where's the value? (Andrew Morton) + +Cleancache provides a significant performance benefit to many workloads +in many environments with negligible overhead by improving the +effectiveness of the pagecache. Clean pagecache pages are +saved in transcendent memory (RAM that is otherwise not directly +addressable to the kernel); fetching those pages later avoids "refaults" +and thus disk reads. + +Cleancache (and its sister code "frontswap") provide interfaces for +this transcendent memory (aka "tmem"), which conceptually lies between +fast kernel-directly-addressable RAM and slower DMA/asynchronous devices. +Disallowing direct kernel or userland reads/writes to tmem +is ideal when data is transformed to a different form and size (such +as with compression) or secretly moved (as might be useful for write- +balancing for some RAM-like devices). Evicted page-cache pages (and +swap pages) are a great use for this kind of slower-than-RAM-but-much- +faster-than-disk transcendent memory, and the cleancache (and frontswap) +"page-object-oriented" specification provides a nice way to read and +write -- and indirectly "name" -- the pages. + +In the virtual case, the whole point of virtualization is to statistically +multiplex physical resources across the varying demands of multiple +virtual machines. This is really hard to do with RAM and efforts to +do it well with no kernel change have essentially failed (except in some +well-publicized special-case workloads). Cleancache -- and frontswap -- +with a fairly small impact on the kernel, provide a huge amount +of flexibility for more dynamic, flexible RAM multiplexing. +Specifically, the Xen Transcendent Memory backend allows otherwise +"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple +virtual machines, but the pages can be compressed and deduplicated to +optimize RAM utilization. And when guest OS's are induced to surrender +underutilized RAM (e.g. with "self-ballooning"), page cache pages +are the first to go, and cleancache allows those pages to be +saved and reclaimed if overall host system memory conditions allow. + +And the identical interface used for cleancache can be used in +physical systems as well. The zcache driver acts as a memory-hungry +device that stores pages of data in a compressed state. And +the proposed "RAMster" driver shares RAM across multiple physical +systems. + +* Why does cleancache have its sticky fingers so deep inside the + filesystems and VFS? (Andrew Morton and Christoph Hellwig) + +The core hooks for cleancache in VFS are in most cases a single line +and the minimum set are placed precisely where needed to maintain +coherency (via cleancache_invalidate operations) between cleancache, +the page cache, and disk. All hooks compile into nothingness if +cleancache is config'ed off and turn into a function-pointer- +compare-to-NULL if config'ed on but no backend claims the ops +functions, or to a compare-struct-element-to-negative if a +backend claims the ops functions but a filesystem doesn't enable +cleancache. + +Some filesystems are built entirely on top of VFS and the hooks +in VFS are sufficient, so don't require an "init_fs" hook; the +initial implementation of cleancache didn't provide this hook. +But for some filesystems (such as btrfs), the VFS hooks are +incomplete and one or more hooks in fs-specific code are required. +And for some other filesystems, such as tmpfs, cleancache may +be counterproductive. So it seemed prudent to require a filesystem +to "opt in" to use cleancache, which requires adding a hook in +each filesystem. Not all filesystems are supported by cleancache +only because they haven't been tested. The existing set should +be sufficient to validate the concept, the opt-in approach means +that untested filesystems are not affected, and the hooks in the +existing filesystems should make it very easy to add more +filesystems in the future. + +The total impact of the hooks to existing fs and mm files is only +about 40 lines added (not counting comments and blank lines). + +* Why not make cleancache asynchronous and batched so it can more + easily interface with real devices with DMA instead of copying each + individual page? (Minchan Kim) + +The one-page-at-a-time copy semantics simplifies the implementation +on both the frontend and backend and also allows the backend to +do fancy things on-the-fly like page compression and +page deduplication. And since the data is "gone" (copied into/out +of the pageframe) before the cleancache get/put call returns, +a great deal of race conditions and potential coherency issues +are avoided. While the interface seems odd for a "real device" +or for real kernel-addressable RAM, it makes perfect sense for +transcendent memory. + +* Why is non-shared cleancache "exclusive"? And where is the + page "invalidated" after a "get"? (Minchan Kim) + +The main reason is to free up space in transcendent memory and +to avoid unnecessary cleancache_invalidate calls. If you want inclusive, +the page can be "put" immediately following the "get". If +put-after-get for inclusive becomes common, the interface could +be easily extended to add a "get_no_invalidate" call. + +The invalidate is done by the cleancache backend implementation. + +* What's the performance impact? + +Performance analysis has been presented at OLS'09 and LCA'10. +Briefly, performance gains can be significant on most workloads, +especially when memory pressure is high (e.g. when RAM is +overcommitted in a virtual workload); and because the hooks are +invoked primarily in place of or in addition to a disk read/write, +overhead is negligible even in worst case workloads. Basically +cleancache replaces I/O with memory-copy-CPU-overhead; on older +single-core systems with slow memory-copy speeds, cleancache +has little value, but in newer multicore machines, especially +consolidated/virtualized machines, it has great value. + +* How do I add cleancache support for filesystem X? (Boaz Harrash) + +Filesystems that are well-behaved and conform to certain +restrictions can utilize cleancache simply by making a call to +cleancache_init_fs at mount time. Unusual, misbehaving, or +poorly layered filesystems must either add additional hooks +and/or undergo extensive additional testing... or should just +not enable the optional cleancache. + +Some points for a filesystem to consider: + + - The FS should be block-device-based (e.g. a ram-based FS such + as tmpfs should not enable cleancache) + - To ensure coherency/correctness, the FS must ensure that all + file removal or truncation operations either go through VFS or + add hooks to do the equivalent cleancache "invalidate" operations + - To ensure coherency/correctness, either inode numbers must + be unique across the lifetime of the on-disk file OR the + FS must provide an "encode_fh" function. + - The FS must call the VFS superblock alloc and deactivate routines + or add hooks to do the equivalent cleancache calls done there. + - To maximize performance, all pages fetched from the FS should + go through the do_mpag_readpage routine or the FS should add + hooks to do the equivalent (cf. btrfs) + - Currently, the FS blocksize must be the same as PAGESIZE. This + is not an architectural restriction, but no backends currently + support anything different. + - A clustered FS should invoke the "shared_init_fs" cleancache + hook to get best performance for some backends. + +* Why not use the KVA of the inode as the key? (Christoph Hellwig) + +If cleancache would use the inode virtual address instead of +inode/filehandle, the pool id could be eliminated. But, this +won't work because cleancache retains pagecache data pages +persistently even when the inode has been pruned from the +inode unused list, and only invalidates the data page if the file +gets removed/truncated. So if cleancache used the inode kva, +there would be potential coherency issues if/when the inode +kva is reused for a different file. Alternately, if cleancache +invalidated the pages when the inode kva was freed, much of the value +of cleancache would be lost because the cache of pages in cleanache +is potentially much larger than the kernel pagecache and is most +useful if the pages survive inode cache removal. + +* Why is a global variable required? + +The cleancache_enabled flag is checked in all of the frequently-used +cleancache hooks. The alternative is a function call to check a static +variable. Since cleancache is enabled dynamically at runtime, systems +that don't enable cleancache would suffer thousands (possibly +tens-of-thousands) of unnecessary function calls per second. So the +global variable allows cleancache to be enabled by default at compile +time, but have insignificant performance impact when cleancache remains +disabled at runtime. + +* Does cleanache work with KVM? + +The memory model of KVM is sufficiently different that a cleancache +backend may have less value for KVM. This remains to be tested, +especially in an overcommitted system. + +* Does cleancache work in userspace? It sounds useful for + memory hungry caches like web browsers. (Jamie Lokier) + +No plans yet, though we agree it sounds useful, at least for +apps that bypass the page cache (e.g. O_DIRECT). + +Last updated: Dan Magenheimer, April 13 2011 diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt deleted file mode 100644 index 68cba9131c31..000000000000 --- a/Documentation/vm/cleancache.txt +++ /dev/null @@ -1,296 +0,0 @@ -.. _cleancache: - -========== -Cleancache -========== - -Motivation -========== - -Cleancache is a new optional feature provided by the VFS layer that -potentially dramatically increases page cache effectiveness for -many workloads in many environments at a negligible cost. - -Cleancache can be thought of as a page-granularity victim cache for clean -pages that the kernel's pageframe replacement algorithm (PFRA) would like -to keep around, but can't since there isn't enough memory. So when the -PFRA "evicts" a page, it first attempts to use cleancache code to -put the data contained in that page into "transcendent memory", memory -that is not directly accessible or addressable by the kernel and is -of unknown and possibly time-varying size. - -Later, when a cleancache-enabled filesystem wishes to access a page -in a file on disk, it first checks cleancache to see if it already -contains it; if it does, the page of data is copied into the kernel -and a disk access is avoided. - -Transcendent memory "drivers" for cleancache are currently implemented -in Xen (using hypervisor memory) and zcache (using in-kernel compressed -memory) and other implementations are in development. - -:ref:`FAQs ` are included below. - -Implementation Overview -======================= - -A cleancache "backend" that provides transcendent memory registers itself -to the kernel's cleancache "frontend" by calling cleancache_register_ops, -passing a pointer to a cleancache_ops structure with funcs set appropriately. -The functions provided must conform to certain semantics as follows: - -Most important, cleancache is "ephemeral". Pages which are copied into -cleancache have an indefinite lifetime which is completely unknowable -by the kernel and so may or may not still be in cleancache at any later time. -Thus, as its name implies, cleancache is not suitable for dirty pages. -Cleancache has complete discretion over what pages to preserve and what -pages to discard and when. - -Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a -pool id which, if positive, must be saved in the filesystem's superblock; -a negative return value indicates failure. A "put_page" will copy a -(presumably about-to-be-evicted) page into cleancache and associate it with -the pool id, a file key, and a page index into the file. (The combination -of a pool id, a file key, and an index is sometimes called a "handle".) -A "get_page" will copy the page, if found, from cleancache into kernel memory. -An "invalidate_page" will ensure the page no longer is present in cleancache; -an "invalidate_inode" will invalidate all pages associated with the specified -file; and, when a filesystem is unmounted, an "invalidate_fs" will invalidate -all pages in all files specified by the given pool id and also surrender -the pool id. - -An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache -to treat the pool as shared using a 128-bit UUID as a key. On systems -that may run multiple kernels (such as hard partitioned or virtualized -systems) that may share a clustered filesystem, and where cleancache -may be shared among those kernels, calls to init_shared_fs that specify the -same UUID will receive the same pool id, thus allowing the pages to -be shared. Note that any security requirements must be imposed outside -of the kernel (e.g. by "tools" that control cleancache). Or a -cleancache implementation can simply disable shared_init by always -returning a negative value. - -If a get_page is successful on a non-shared pool, the page is invalidated -(thus making cleancache an "exclusive" cache). On a shared pool, the page -is NOT invalidated on a successful get_page so that it remains accessible to -other sharers. The kernel is responsible for ensuring coherency between -cleancache (shared or not), the page cache, and the filesystem, using -cleancache invalidate operations as required. - -Note that cleancache must enforce put-put-get coherency and get-get -coherency. For the former, if two puts are made to the same handle but -with different data, say AAA by the first put and BBB by the second, a -subsequent get can never return the stale data (AAA). For get-get coherency, -if a get for a given handle fails, subsequent gets for that handle will -never succeed unless preceded by a successful put with that handle. - -Last, cleancache provides no SMP serialization guarantees; if two -different Linux threads are simultaneously putting and invalidating a page -with the same handle, the results are indeterminate. Callers must -lock the page to ensure serial behavior. - -Cleancache Performance Metrics -============================== - -If properly configured, monitoring of cleancache is done via debugfs in -the `/sys/kernel/debug/cleancache` directory. The effectiveness of cleancache -can be measured (across all filesystems) with: - -``succ_gets`` - number of gets that were successful - -``failed_gets`` - number of gets that failed - -``puts`` - number of puts attempted (all "succeed") - -``invalidates`` - number of invalidates attempted - -A backend implementation may provide additional metrics. - -.. _faq: - -FAQ -=== - -* Where's the value? (Andrew Morton) - -Cleancache provides a significant performance benefit to many workloads -in many environments with negligible overhead by improving the -effectiveness of the pagecache. Clean pagecache pages are -saved in transcendent memory (RAM that is otherwise not directly -addressable to the kernel); fetching those pages later avoids "refaults" -and thus disk reads. - -Cleancache (and its sister code "frontswap") provide interfaces for -this transcendent memory (aka "tmem"), which conceptually lies between -fast kernel-directly-addressable RAM and slower DMA/asynchronous devices. -Disallowing direct kernel or userland reads/writes to tmem -is ideal when data is transformed to a different form and size (such -as with compression) or secretly moved (as might be useful for write- -balancing for some RAM-like devices). Evicted page-cache pages (and -swap pages) are a great use for this kind of slower-than-RAM-but-much- -faster-than-disk transcendent memory, and the cleancache (and frontswap) -"page-object-oriented" specification provides a nice way to read and -write -- and indirectly "name" -- the pages. - -In the virtual case, the whole point of virtualization is to statistically -multiplex physical resources across the varying demands of multiple -virtual machines. This is really hard to do with RAM and efforts to -do it well with no kernel change have essentially failed (except in some -well-publicized special-case workloads). Cleancache -- and frontswap -- -with a fairly small impact on the kernel, provide a huge amount -of flexibility for more dynamic, flexible RAM multiplexing. -Specifically, the Xen Transcendent Memory backend allows otherwise -"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple -virtual machines, but the pages can be compressed and deduplicated to -optimize RAM utilization. And when guest OS's are induced to surrender -underutilized RAM (e.g. with "self-ballooning"), page cache pages -are the first to go, and cleancache allows those pages to be -saved and reclaimed if overall host system memory conditions allow. - -And the identical interface used for cleancache can be used in -physical systems as well. The zcache driver acts as a memory-hungry -device that stores pages of data in a compressed state. And -the proposed "RAMster" driver shares RAM across multiple physical -systems. - -* Why does cleancache have its sticky fingers so deep inside the - filesystems and VFS? (Andrew Morton and Christoph Hellwig) - -The core hooks for cleancache in VFS are in most cases a single line -and the minimum set are placed precisely where needed to maintain -coherency (via cleancache_invalidate operations) between cleancache, -the page cache, and disk. All hooks compile into nothingness if -cleancache is config'ed off and turn into a function-pointer- -compare-to-NULL if config'ed on but no backend claims the ops -functions, or to a compare-struct-element-to-negative if a -backend claims the ops functions but a filesystem doesn't enable -cleancache. - -Some filesystems are built entirely on top of VFS and the hooks -in VFS are sufficient, so don't require an "init_fs" hook; the -initial implementation of cleancache didn't provide this hook. -But for some filesystems (such as btrfs), the VFS hooks are -incomplete and one or more hooks in fs-specific code are required. -And for some other filesystems, such as tmpfs, cleancache may -be counterproductive. So it seemed prudent to require a filesystem -to "opt in" to use cleancache, which requires adding a hook in -each filesystem. Not all filesystems are supported by cleancache -only because they haven't been tested. The existing set should -be sufficient to validate the concept, the opt-in approach means -that untested filesystems are not affected, and the hooks in the -existing filesystems should make it very easy to add more -filesystems in the future. - -The total impact of the hooks to existing fs and mm files is only -about 40 lines added (not counting comments and blank lines). - -* Why not make cleancache asynchronous and batched so it can more - easily interface with real devices with DMA instead of copying each - individual page? (Minchan Kim) - -The one-page-at-a-time copy semantics simplifies the implementation -on both the frontend and backend and also allows the backend to -do fancy things on-the-fly like page compression and -page deduplication. And since the data is "gone" (copied into/out -of the pageframe) before the cleancache get/put call returns, -a great deal of race conditions and potential coherency issues -are avoided. While the interface seems odd for a "real device" -or for real kernel-addressable RAM, it makes perfect sense for -transcendent memory. - -* Why is non-shared cleancache "exclusive"? And where is the - page "invalidated" after a "get"? (Minchan Kim) - -The main reason is to free up space in transcendent memory and -to avoid unnecessary cleancache_invalidate calls. If you want inclusive, -the page can be "put" immediately following the "get". If -put-after-get for inclusive becomes common, the interface could -be easily extended to add a "get_no_invalidate" call. - -The invalidate is done by the cleancache backend implementation. - -* What's the performance impact? - -Performance analysis has been presented at OLS'09 and LCA'10. -Briefly, performance gains can be significant on most workloads, -especially when memory pressure is high (e.g. when RAM is -overcommitted in a virtual workload); and because the hooks are -invoked primarily in place of or in addition to a disk read/write, -overhead is negligible even in worst case workloads. Basically -cleancache replaces I/O with memory-copy-CPU-overhead; on older -single-core systems with slow memory-copy speeds, cleancache -has little value, but in newer multicore machines, especially -consolidated/virtualized machines, it has great value. - -* How do I add cleancache support for filesystem X? (Boaz Harrash) - -Filesystems that are well-behaved and conform to certain -restrictions can utilize cleancache simply by making a call to -cleancache_init_fs at mount time. Unusual, misbehaving, or -poorly layered filesystems must either add additional hooks -and/or undergo extensive additional testing... or should just -not enable the optional cleancache. - -Some points for a filesystem to consider: - - - The FS should be block-device-based (e.g. a ram-based FS such - as tmpfs should not enable cleancache) - - To ensure coherency/correctness, the FS must ensure that all - file removal or truncation operations either go through VFS or - add hooks to do the equivalent cleancache "invalidate" operations - - To ensure coherency/correctness, either inode numbers must - be unique across the lifetime of the on-disk file OR the - FS must provide an "encode_fh" function. - - The FS must call the VFS superblock alloc and deactivate routines - or add hooks to do the equivalent cleancache calls done there. - - To maximize performance, all pages fetched from the FS should - go through the do_mpag_readpage routine or the FS should add - hooks to do the equivalent (cf. btrfs) - - Currently, the FS blocksize must be the same as PAGESIZE. This - is not an architectural restriction, but no backends currently - support anything different. - - A clustered FS should invoke the "shared_init_fs" cleancache - hook to get best performance for some backends. - -* Why not use the KVA of the inode as the key? (Christoph Hellwig) - -If cleancache would use the inode virtual address instead of -inode/filehandle, the pool id could be eliminated. But, this -won't work because cleancache retains pagecache data pages -persistently even when the inode has been pruned from the -inode unused list, and only invalidates the data page if the file -gets removed/truncated. So if cleancache used the inode kva, -there would be potential coherency issues if/when the inode -kva is reused for a different file. Alternately, if cleancache -invalidated the pages when the inode kva was freed, much of the value -of cleancache would be lost because the cache of pages in cleanache -is potentially much larger than the kernel pagecache and is most -useful if the pages survive inode cache removal. - -* Why is a global variable required? - -The cleancache_enabled flag is checked in all of the frequently-used -cleancache hooks. The alternative is a function call to check a static -variable. Since cleancache is enabled dynamically at runtime, systems -that don't enable cleancache would suffer thousands (possibly -tens-of-thousands) of unnecessary function calls per second. So the -global variable allows cleancache to be enabled by default at compile -time, but have insignificant performance impact when cleancache remains -disabled at runtime. - -* Does cleanache work with KVM? - -The memory model of KVM is sufficiently different that a cleancache -backend may have less value for KVM. This remains to be tested, -especially in an overcommitted system. - -* Does cleancache work in userspace? It sounds useful for - memory hungry caches like web browsers. (Jamie Lokier) - -No plans yet, though we agree it sounds useful, at least for -apps that bypass the page cache (e.g. O_DIRECT). - -Last updated: Dan Magenheimer, April 13 2011 diff --git a/Documentation/vm/frontswap.rst b/Documentation/vm/frontswap.rst new file mode 100644 index 000000000000..1979f430c1c5 --- /dev/null +++ b/Documentation/vm/frontswap.rst @@ -0,0 +1,293 @@ +.. _frontswap: + +========= +Frontswap +========= + +Frontswap provides a "transcendent memory" interface for swap pages. +In some environments, dramatic performance savings may be obtained because +swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. + +(Note, frontswap -- and :ref:`cleancache` (merged at 3.0) -- are the "frontends" +and the only necessary changes to the core kernel for transcendent memory; +all other supporting code -- the "backends" -- is implemented as drivers. +See the LWN.net article `Transcendent memory in a nutshell`_ +for a detailed overview of frontswap and related kernel parts) + +.. _Transcendent memory in a nutshell: https://lwn.net/Articles/454795/ + +Frontswap is so named because it can be thought of as the opposite of +a "backing" store for a swap device. The storage is assumed to be +a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming +to the requirements of transcendent memory (such as Xen's "tmem", or +in-kernel compressed memory, aka "zcache", or future RAM-like devices); +this pseudo-RAM device is not directly accessible or addressable by the +kernel and is of unknown and possibly time-varying size. The driver +links itself to frontswap by calling frontswap_register_ops to set the +frontswap_ops funcs appropriately and the functions it provides must +conform to certain policies as follows: + +An "init" prepares the device to receive frontswap pages associated +with the specified swap device number (aka "type"). A "store" will +copy the page to transcendent memory and associate it with the type and +offset associated with the page. A "load" will copy the page, if found, +from transcendent memory into kernel memory, but will NOT remove the page +from transcendent memory. An "invalidate_page" will remove the page +from transcendent memory and an "invalidate_area" will remove ALL pages +associated with the swap type (e.g., like swapoff) and notify the "device" +to refuse further stores with that swap type. + +Once a page is successfully stored, a matching load on the page will normally +succeed. So when the kernel finds itself in a situation where it needs +to swap out a page, it first attempts to use frontswap. If the store returns +success, the data has been successfully saved to transcendent memory and +a disk write and, if the data is later read back, a disk read are avoided. +If a store returns failure, transcendent memory has rejected the data, and the +page can be written to swap as usual. + +If a backend chooses, frontswap can be configured as a "writethrough +cache" by calling frontswap_writethrough(). In this mode, the reduction +in swap device writes is lost (and also a non-trivial performance advantage) +in order to allow the backend to arbitrarily "reclaim" space used to +store frontswap pages to more completely manage its memory usage. + +Note that if a page is stored and the page already exists in transcendent memory +(a "duplicate" store), either the store succeeds and the data is overwritten, +or the store fails AND the page is invalidated. This ensures stale data may +never be obtained from frontswap. + +If properly configured, monitoring of frontswap is done via debugfs in +the `/sys/kernel/debug/frontswap` directory. The effectiveness of +frontswap can be measured (across all swap devices) with: + +``failed_stores`` + how many store attempts have failed + +``loads`` + how many loads were attempted (all should succeed) + +``succ_stores`` + how many store attempts have succeeded + +``invalidates`` + how many invalidates were attempted + +A backend implementation may provide additional metrics. + +FAQ +=== + +* Where's the value? + +When a workload starts swapping, performance falls through the floor. +Frontswap significantly increases performance in many such workloads by +providing a clean, dynamic interface to read and write swap pages to +"transcendent memory" that is otherwise not directly addressable to the kernel. +This interface is ideal when data is transformed to a different form +and size (such as with compression) or secretly moved (as might be +useful for write-balancing for some RAM-like devices). Swap pages (and +evicted page-cache pages) are a great use for this kind of slower-than-RAM- +but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and +cleancache) interface to transcendent memory provides a nice way to read +and write -- and indirectly "name" -- the pages. + +Frontswap -- and cleancache -- with a fairly small impact on the kernel, +provides a huge amount of flexibility for more dynamic, flexible RAM +utilization in various system configurations: + +In the single kernel case, aka "zcache", pages are compressed and +stored in local memory, thus increasing the total anonymous pages +that can be safely kept in RAM. Zcache essentially trades off CPU +cycles used in compression/decompression for better memory utilization. +Benchmarks have shown little or no impact when memory pressure is +low while providing a significant performance improvement (25%+) +on some workloads under high memory pressure. + +"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory +support for clustered systems. Frontswap pages are locally compressed +as in zcache, but then "remotified" to another system's RAM. This +allows RAM to be dynamically load-balanced back-and-forth as needed, +i.e. when system A is overcommitted, it can swap to system B, and +vice versa. RAMster can also be configured as a memory server so +many servers in a cluster can swap, dynamically as needed, to a single +server configured with a large amount of RAM... without pre-configuring +how much of the RAM is available for each of the clients! + +In the virtual case, the whole point of virtualization is to statistically +multiplex physical resources across the varying demands of multiple +virtual machines. This is really hard to do with RAM and efforts to do +it well with no kernel changes have essentially failed (except in some +well-publicized special-case workloads). +Specifically, the Xen Transcendent Memory backend allows otherwise +"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple +virtual machines, but the pages can be compressed and deduplicated to +optimize RAM utilization. And when guest OS's are induced to surrender +underutilized RAM (e.g. with "selfballooning"), sudden unexpected +memory pressure may result in swapping; frontswap allows those pages +to be swapped to and from hypervisor RAM (if overall host system memory +conditions allow), thus mitigating the potentially awful performance impact +of unplanned swapping. + +A KVM implementation is underway and has been RFC'ed to lkml. And, +using frontswap, investigation is also underway on the use of NVM as +a memory extension technology. + +* Sure there may be performance advantages in some situations, but + what's the space/time overhead of frontswap? + +If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into +nothingness and the only overhead is a few extra bytes per swapon'ed +swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" +registers, there is one extra global variable compared to zero for +every swap page read or written. If CONFIG_FRONTSWAP is enabled +AND a frontswap backend registers AND the backend fails every "store" +request (i.e. provides no memory despite claiming it might), +CPU overhead is still negligible -- and since every frontswap fail +precedes a swap page write-to-disk, the system is highly likely +to be I/O bound and using a small fraction of a percent of a CPU +will be irrelevant anyway. + +As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend +registers, one bit is allocated for every swap page for every swap +device that is swapon'd. This is added to the EIGHT bits (which +was sixteen until about 2.6.34) that the kernel already allocates +for every swap page for every swap device that is swapon'd. (Hugh +Dickins has observed that frontswap could probably steal one of +the existing eight bits, but let's worry about that minor optimization +later.) For very large swap disks (which are rare) on a standard +4K pagesize, this is 1MB per 32GB swap. + +When swap pages are stored in transcendent memory instead of written +out to disk, there is a side effect that this may create more memory +pressure that can potentially outweigh the other advantages. A +backend, such as zcache, must implement policies to carefully (but +dynamically) manage memory limits to ensure this doesn't happen. + +* OK, how about a quick overview of what this frontswap patch does + in terms that a kernel hacker can grok? + +Let's assume that a frontswap "backend" has registered during +kernel initialization; this registration indicates that this +frontswap backend has access to some "memory" that is not directly +accessible by the kernel. Exactly how much memory it provides is +entirely dynamic and random. + +Whenever a swap-device is swapon'd frontswap_init() is called, +passing the swap device number (aka "type") as a parameter. +This notifies frontswap to expect attempts to "store" swap pages +associated with that number. + +Whenever the swap subsystem is readying a page to write to a swap +device (c.f swap_writepage()), frontswap_store is called. Frontswap +consults with the frontswap backend and if the backend says it does NOT +have room, frontswap_store returns -1 and the kernel swaps the page +to the swap device as normal. Note that the response from the frontswap +backend is unpredictable to the kernel; it may choose to never accept a +page, it could accept every ninth page, or it might accept every +page. But if the backend does accept a page, the data from the page +has already been copied and associated with the type and offset, +and the backend guarantees the persistence of the data. In this case, +frontswap sets a bit in the "frontswap_map" for the swap device +corresponding to the page offset on the swap device to which it would +otherwise have written the data. + +When the swap subsystem needs to swap-in a page (swap_readpage()), +it first calls frontswap_load() which checks the frontswap_map to +see if the page was earlier accepted by the frontswap backend. If +it was, the page of data is filled from the frontswap backend and +the swap-in is complete. If not, the normal swap-in code is +executed to obtain the page of data from the real swap device. + +So every time the frontswap backend accepts a page, a swap device read +and (potentially) a swap device write are replaced by a "frontswap backend +store" and (possibly) a "frontswap backend loads", which are presumably much +faster. + +* Can't frontswap be configured as a "special" swap device that is + just higher priority than any real swap device (e.g. like zswap, + or maybe swap-over-nbd/NFS)? + +No. First, the existing swap subsystem doesn't allow for any kind of +swap hierarchy. Perhaps it could be rewritten to accommodate a hierarchy, +but this would require fairly drastic changes. Even if it were +rewritten, the existing swap subsystem uses the block I/O layer which +assumes a swap device is fixed size and any page in it is linearly +addressable. Frontswap barely touches the existing swap subsystem, +and works around the constraints of the block I/O subsystem to provide +a great deal of flexibility and dynamicity. + +For example, the acceptance of any swap page by the frontswap backend is +entirely unpredictable. This is critical to the definition of frontswap +backends because it grants completely dynamic discretion to the +backend. In zcache, one cannot know a priori how compressible a page is. +"Poorly" compressible pages can be rejected, and "poorly" can itself be +defined dynamically depending on current memory constraints. + +Further, frontswap is entirely synchronous whereas a real swap +device is, by definition, asynchronous and uses block I/O. The +block I/O layer is not only unnecessary, but may perform "optimizations" +that are inappropriate for a RAM-oriented device including delaying +the write of some pages for a significant amount of time. Synchrony is +required to ensure the dynamicity of the backend and to avoid thorny race +conditions that would unnecessarily and greatly complicate frontswap +and/or the block I/O subsystem. That said, only the initial "store" +and "load" operations need be synchronous. A separate asynchronous thread +is free to manipulate the pages stored by frontswap. For example, +the "remotification" thread in RAMster uses standard asynchronous +kernel sockets to move compressed frontswap pages to a remote machine. +Similarly, a KVM guest-side implementation could do in-guest compression +and use "batched" hypercalls. + +In a virtualized environment, the dynamicity allows the hypervisor +(or host OS) to do "intelligent overcommit". For example, it can +choose to accept pages only until host-swapping might be imminent, +then force guests to do their own swapping. + +There is a downside to the transcendent memory specifications for +frontswap: Since any "store" might fail, there must always be a real +slot on a real swap device to swap the page. Thus frontswap must be +implemented as a "shadow" to every swapon'd device with the potential +capability of holding every page that the swap device might have held +and the possibility that it might hold no pages at all. This means +that frontswap cannot contain more pages than the total of swapon'd +swap devices. For example, if NO swap device is configured on some +installation, frontswap is useless. Swapless portable devices +can still use frontswap but a backend for such devices must configure +some kind of "ghost" swap device and ensure that it is never used. + +* Why this weird definition about "duplicate stores"? If a page + has been previously successfully stored, can't it always be + successfully overwritten? + +Nearly always it can, but no, sometimes it cannot. Consider an example +where data is compressed and the original 4K page has been compressed +to 1K. Now an attempt is made to overwrite the page with data that +is non-compressible and so would take the entire 4K. But the backend +has no more space. In this case, the store must be rejected. Whenever +frontswap rejects a store that would overwrite, it also must invalidate +the old data and ensure that it is no longer accessible. Since the +swap subsystem then writes the new data to the read swap device, +this is the correct course of action to ensure coherency. + +* What is frontswap_shrink for? + +When the (non-frontswap) swap subsystem swaps out a page to a real +swap device, that page is only taking up low-value pre-allocated disk +space. But if frontswap has placed a page in transcendent memory, that +page may be taking up valuable real estate. The frontswap_shrink +routine allows code outside of the swap subsystem to force pages out +of the memory managed by frontswap and back into kernel-addressable memory. +For example, in RAMster, a "suction driver" thread will attempt +to "repatriate" pages sent to a remote machine back to the local machine; +this is driven using the frontswap_shrink mechanism when memory pressure +subsides. + +* Why does the frontswap patch create the new include file swapfile.h? + +The frontswap code depends on some swap-subsystem-internal data +structures that have, over the years, moved back and forth between +static and global. This seemed a reasonable compromise: Define +them as global but declare them in a new include file that isn't +included by the large number of source files that include swap.h. + +Dan Magenheimer, last updated April 9, 2012 diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt deleted file mode 100644 index 1979f430c1c5..000000000000 --- a/Documentation/vm/frontswap.txt +++ /dev/null @@ -1,293 +0,0 @@ -.. _frontswap: - -========= -Frontswap -========= - -Frontswap provides a "transcendent memory" interface for swap pages. -In some environments, dramatic performance savings may be obtained because -swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. - -(Note, frontswap -- and :ref:`cleancache` (merged at 3.0) -- are the "frontends" -and the only necessary changes to the core kernel for transcendent memory; -all other supporting code -- the "backends" -- is implemented as drivers. -See the LWN.net article `Transcendent memory in a nutshell`_ -for a detailed overview of frontswap and related kernel parts) - -.. _Transcendent memory in a nutshell: https://lwn.net/Articles/454795/ - -Frontswap is so named because it can be thought of as the opposite of -a "backing" store for a swap device. The storage is assumed to be -a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming -to the requirements of transcendent memory (such as Xen's "tmem", or -in-kernel compressed memory, aka "zcache", or future RAM-like devices); -this pseudo-RAM device is not directly accessible or addressable by the -kernel and is of unknown and possibly time-varying size. The driver -links itself to frontswap by calling frontswap_register_ops to set the -frontswap_ops funcs appropriately and the functions it provides must -conform to certain policies as follows: - -An "init" prepares the device to receive frontswap pages associated -with the specified swap device number (aka "type"). A "store" will -copy the page to transcendent memory and associate it with the type and -offset associated with the page. A "load" will copy the page, if found, -from transcendent memory into kernel memory, but will NOT remove the page -from transcendent memory. An "invalidate_page" will remove the page -from transcendent memory and an "invalidate_area" will remove ALL pages -associated with the swap type (e.g., like swapoff) and notify the "device" -to refuse further stores with that swap type. - -Once a page is successfully stored, a matching load on the page will normally -succeed. So when the kernel finds itself in a situation where it needs -to swap out a page, it first attempts to use frontswap. If the store returns -success, the data has been successfully saved to transcendent memory and -a disk write and, if the data is later read back, a disk read are avoided. -If a store returns failure, transcendent memory has rejected the data, and the -page can be written to swap as usual. - -If a backend chooses, frontswap can be configured as a "writethrough -cache" by calling frontswap_writethrough(). In this mode, the reduction -in swap device writes is lost (and also a non-trivial performance advantage) -in order to allow the backend to arbitrarily "reclaim" space used to -store frontswap pages to more completely manage its memory usage. - -Note that if a page is stored and the page already exists in transcendent memory -(a "duplicate" store), either the store succeeds and the data is overwritten, -or the store fails AND the page is invalidated. This ensures stale data may -never be obtained from frontswap. - -If properly configured, monitoring of frontswap is done via debugfs in -the `/sys/kernel/debug/frontswap` directory. The effectiveness of -frontswap can be measured (across all swap devices) with: - -``failed_stores`` - how many store attempts have failed - -``loads`` - how many loads were attempted (all should succeed) - -``succ_stores`` - how many store attempts have succeeded - -``invalidates`` - how many invalidates were attempted - -A backend implementation may provide additional metrics. - -FAQ -=== - -* Where's the value? - -When a workload starts swapping, performance falls through the floor. -Frontswap significantly increases performance in many such workloads by -providing a clean, dynamic interface to read and write swap pages to -"transcendent memory" that is otherwise not directly addressable to the kernel. -This interface is ideal when data is transformed to a different form -and size (such as with compression) or secretly moved (as might be -useful for write-balancing for some RAM-like devices). Swap pages (and -evicted page-cache pages) are a great use for this kind of slower-than-RAM- -but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and -cleancache) interface to transcendent memory provides a nice way to read -and write -- and indirectly "name" -- the pages. - -Frontswap -- and cleancache -- with a fairly small impact on the kernel, -provides a huge amount of flexibility for more dynamic, flexible RAM -utilization in various system configurations: - -In the single kernel case, aka "zcache", pages are compressed and -stored in local memory, thus increasing the total anonymous pages -that can be safely kept in RAM. Zcache essentially trades off CPU -cycles used in compression/decompression for better memory utilization. -Benchmarks have shown little or no impact when memory pressure is -low while providing a significant performance improvement (25%+) -on some workloads under high memory pressure. - -"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory -support for clustered systems. Frontswap pages are locally compressed -as in zcache, but then "remotified" to another system's RAM. This -allows RAM to be dynamically load-balanced back-and-forth as needed, -i.e. when system A is overcommitted, it can swap to system B, and -vice versa. RAMster can also be configured as a memory server so -many servers in a cluster can swap, dynamically as needed, to a single -server configured with a large amount of RAM... without pre-configuring -how much of the RAM is available for each of the clients! - -In the virtual case, the whole point of virtualization is to statistically -multiplex physical resources across the varying demands of multiple -virtual machines. This is really hard to do with RAM and efforts to do -it well with no kernel changes have essentially failed (except in some -well-publicized special-case workloads). -Specifically, the Xen Transcendent Memory backend allows otherwise -"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple -virtual machines, but the pages can be compressed and deduplicated to -optimize RAM utilization. And when guest OS's are induced to surrender -underutilized RAM (e.g. with "selfballooning"), sudden unexpected -memory pressure may result in swapping; frontswap allows those pages -to be swapped to and from hypervisor RAM (if overall host system memory -conditions allow), thus mitigating the potentially awful performance impact -of unplanned swapping. - -A KVM implementation is underway and has been RFC'ed to lkml. And, -using frontswap, investigation is also underway on the use of NVM as -a memory extension technology. - -* Sure there may be performance advantages in some situations, but - what's the space/time overhead of frontswap? - -If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into -nothingness and the only overhead is a few extra bytes per swapon'ed -swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" -registers, there is one extra global variable compared to zero for -every swap page read or written. If CONFIG_FRONTSWAP is enabled -AND a frontswap backend registers AND the backend fails every "store" -request (i.e. provides no memory despite claiming it might), -CPU overhead is still negligible -- and since every frontswap fail -precedes a swap page write-to-disk, the system is highly likely -to be I/O bound and using a small fraction of a percent of a CPU -will be irrelevant anyway. - -As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend -registers, one bit is allocated for every swap page for every swap -device that is swapon'd. This is added to the EIGHT bits (which -was sixteen until about 2.6.34) that the kernel already allocates -for every swap page for every swap device that is swapon'd. (Hugh -Dickins has observed that frontswap could probably steal one of -the existing eight bits, but let's worry about that minor optimization -later.) For very large swap disks (which are rare) on a standard -4K pagesize, this is 1MB per 32GB swap. - -When swap pages are stored in transcendent memory instead of written -out to disk, there is a side effect that this may create more memory -pressure that can potentially outweigh the other advantages. A -backend, such as zcache, must implement policies to carefully (but -dynamically) manage memory limits to ensure this doesn't happen. - -* OK, how about a quick overview of what this frontswap patch does - in terms that a kernel hacker can grok? - -Let's assume that a frontswap "backend" has registered during -kernel initialization; this registration indicates that this -frontswap backend has access to some "memory" that is not directly -accessible by the kernel. Exactly how much memory it provides is -entirely dynamic and random. - -Whenever a swap-device is swapon'd frontswap_init() is called, -passing the swap device number (aka "type") as a parameter. -This notifies frontswap to expect attempts to "store" swap pages -associated with that number. - -Whenever the swap subsystem is readying a page to write to a swap -device (c.f swap_writepage()), frontswap_store is called. Frontswap -consults with the frontswap backend and if the backend says it does NOT -have room, frontswap_store returns -1 and the kernel swaps the page -to the swap device as normal. Note that the response from the frontswap -backend is unpredictable to the kernel; it may choose to never accept a -page, it could accept every ninth page, or it might accept every -page. But if the backend does accept a page, the data from the page -has already been copied and associated with the type and offset, -and the backend guarantees the persistence of the data. In this case, -frontswap sets a bit in the "frontswap_map" for the swap device -corresponding to the page offset on the swap device to which it would -otherwise have written the data. - -When the swap subsystem needs to swap-in a page (swap_readpage()), -it first calls frontswap_load() which checks the frontswap_map to -see if the page was earlier accepted by the frontswap backend. If -it was, the page of data is filled from the frontswap backend and -the swap-in is complete. If not, the normal swap-in code is -executed to obtain the page of data from the real swap device. - -So every time the frontswap backend accepts a page, a swap device read -and (potentially) a swap device write are replaced by a "frontswap backend -store" and (possibly) a "frontswap backend loads", which are presumably much -faster. - -* Can't frontswap be configured as a "special" swap device that is - just higher priority than any real swap device (e.g. like zswap, - or maybe swap-over-nbd/NFS)? - -No. First, the existing swap subsystem doesn't allow for any kind of -swap hierarchy. Perhaps it could be rewritten to accommodate a hierarchy, -but this would require fairly drastic changes. Even if it were -rewritten, the existing swap subsystem uses the block I/O layer which -assumes a swap device is fixed size and any page in it is linearly -addressable. Frontswap barely touches the existing swap subsystem, -and works around the constraints of the block I/O subsystem to provide -a great deal of flexibility and dynamicity. - -For example, the acceptance of any swap page by the frontswap backend is -entirely unpredictable. This is critical to the definition of frontswap -backends because it grants completely dynamic discretion to the -backend. In zcache, one cannot know a priori how compressible a page is. -"Poorly" compressible pages can be rejected, and "poorly" can itself be -defined dynamically depending on current memory constraints. - -Further, frontswap is entirely synchronous whereas a real swap -device is, by definition, asynchronous and uses block I/O. The -block I/O layer is not only unnecessary, but may perform "optimizations" -that are inappropriate for a RAM-oriented device including delaying -the write of some pages for a significant amount of time. Synchrony is -required to ensure the dynamicity of the backend and to avoid thorny race -conditions that would unnecessarily and greatly complicate frontswap -and/or the block I/O subsystem. That said, only the initial "store" -and "load" operations need be synchronous. A separate asynchronous thread -is free to manipulate the pages stored by frontswap. For example, -the "remotification" thread in RAMster uses standard asynchronous -kernel sockets to move compressed frontswap pages to a remote machine. -Similarly, a KVM guest-side implementation could do in-guest compression -and use "batched" hypercalls. - -In a virtualized environment, the dynamicity allows the hypervisor -(or host OS) to do "intelligent overcommit". For example, it can -choose to accept pages only until host-swapping might be imminent, -then force guests to do their own swapping. - -There is a downside to the transcendent memory specifications for -frontswap: Since any "store" might fail, there must always be a real -slot on a real swap device to swap the page. Thus frontswap must be -implemented as a "shadow" to every swapon'd device with the potential -capability of holding every page that the swap device might have held -and the possibility that it might hold no pages at all. This means -that frontswap cannot contain more pages than the total of swapon'd -swap devices. For example, if NO swap device is configured on some -installation, frontswap is useless. Swapless portable devices -can still use frontswap but a backend for such devices must configure -some kind of "ghost" swap device and ensure that it is never used. - -* Why this weird definition about "duplicate stores"? If a page - has been previously successfully stored, can't it always be - successfully overwritten? - -Nearly always it can, but no, sometimes it cannot. Consider an example -where data is compressed and the original 4K page has been compressed -to 1K. Now an attempt is made to overwrite the page with data that -is non-compressible and so would take the entire 4K. But the backend -has no more space. In this case, the store must be rejected. Whenever -frontswap rejects a store that would overwrite, it also must invalidate -the old data and ensure that it is no longer accessible. Since the -swap subsystem then writes the new data to the read swap device, -this is the correct course of action to ensure coherency. - -* What is frontswap_shrink for? - -When the (non-frontswap) swap subsystem swaps out a page to a real -swap device, that page is only taking up low-value pre-allocated disk -space. But if frontswap has placed a page in transcendent memory, that -page may be taking up valuable real estate. The frontswap_shrink -routine allows code outside of the swap subsystem to force pages out -of the memory managed by frontswap and back into kernel-addressable memory. -For example, in RAMster, a "suction driver" thread will attempt -to "repatriate" pages sent to a remote machine back to the local machine; -this is driven using the frontswap_shrink mechanism when memory pressure -subsides. - -* Why does the frontswap patch create the new include file swapfile.h? - -The frontswap code depends on some swap-subsystem-internal data -structures that have, over the years, moved back and forth between -static and global. This seemed a reasonable compromise: Define -them as global but declare them in a new include file that isn't -included by the large number of source files that include swap.h. - -Dan Magenheimer, last updated April 9, 2012 diff --git a/Documentation/vm/highmem.rst b/Documentation/vm/highmem.rst new file mode 100644 index 000000000000..0f69a9fec34d --- /dev/null +++ b/Documentation/vm/highmem.rst @@ -0,0 +1,147 @@ +.. _highmem: + +==================== +High Memory Handling +==================== + +By: Peter Zijlstra + +.. contents:: :local: + +What Is High Memory? +==================== + +High memory (highmem) is used when the size of physical memory approaches or +exceeds the maximum size of virtual memory. At that point it becomes +impossible for the kernel to keep all of the available physical memory mapped +at all times. This means the kernel needs to start using temporary mappings of +the pieces of physical memory that it wants to access. + +The part of (physical) memory not covered by a permanent mapping is what we +refer to as 'highmem'. There are various architecture dependent constraints on +where exactly that border lies. + +In the i386 arch, for example, we choose to map the kernel into every process's +VM space so that we don't have to pay the full TLB invalidation costs for +kernel entry/exit. This means the available virtual memory space (4GiB on +i386) has to be divided between user and kernel space. + +The traditional split for architectures using this approach is 3:1, 3GiB for +userspace and the top 1GiB for kernel space:: + + +--------+ 0xffffffff + | Kernel | + +--------+ 0xc0000000 + | | + | User | + | | + +--------+ 0x00000000 + +This means that the kernel can at most map 1GiB of physical memory at any one +time, but because we need virtual address space for other things - including +temporary maps to access the rest of the physical memory - the actual direct +map will typically be less (usually around ~896MiB). + +Other architectures that have mm context tagged TLBs can have separate kernel +and user maps. Some hardware (like some ARMs), however, have limited virtual +space when they use mm context tags. + + +Temporary Virtual Mappings +========================== + +The kernel contains several ways of creating temporary mappings: + +* vmap(). This can be used to make a long duration mapping of multiple + physical pages into a contiguous virtual space. It needs global + synchronization to unmap. + +* kmap(). This permits a short duration mapping of a single page. It needs + global synchronization, but is amortized somewhat. It is also prone to + deadlocks when using in a nested fashion, and so it is not recommended for + new code. + +* kmap_atomic(). This permits a very short duration mapping of a single + page. Since the mapping is restricted to the CPU that issued it, it + performs well, but the issuing task is therefore required to stay on that + CPU until it has finished, lest some other task displace its mappings. + + kmap_atomic() may also be used by interrupt contexts, since it is does not + sleep and the caller may not sleep until after kunmap_atomic() is called. + + It may be assumed that k[un]map_atomic() won't fail. + + +Using kmap_atomic +================= + +When and where to use kmap_atomic() is straightforward. It is used when code +wants to access the contents of a page that might be allocated from high memory +(see __GFP_HIGHMEM), for example a page in the pagecache. The API has two +functions, and they can be used in a manner similar to the following:: + + /* Find the page of interest. */ + struct page *page = find_get_page(mapping, offset); + + /* Gain access to the contents of that page. */ + void *vaddr = kmap_atomic(page); + + /* Do something to the contents of that page. */ + memset(vaddr, 0, PAGE_SIZE); + + /* Unmap that page. */ + kunmap_atomic(vaddr); + +Note that the kunmap_atomic() call takes the result of the kmap_atomic() call +not the argument. + +If you need to map two pages because you want to copy from one page to +another you need to keep the kmap_atomic calls strictly nested, like:: + + vaddr1 = kmap_atomic(page1); + vaddr2 = kmap_atomic(page2); + + memcpy(vaddr1, vaddr2, PAGE_SIZE); + + kunmap_atomic(vaddr2); + kunmap_atomic(vaddr1); + + +Cost of Temporary Mappings +========================== + +The cost of creating temporary mappings can be quite high. The arch has to +manipulate the kernel's page tables, the data TLB and/or the MMU's registers. + +If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping +simply with a bit of arithmetic that will convert the page struct address into +a pointer to the page contents rather than juggling mappings about. In such a +case, the unmap operation may be a null operation. + +If CONFIG_MMU is not set, then there can be no temporary mappings and no +highmem. In such a case, the arithmetic approach will also be used. + + +i386 PAE +======== + +The i386 arch, under some circumstances, will permit you to stick up to 64GiB +of RAM into your 32-bit machine. This has a number of consequences: + +* Linux needs a page-frame structure for each page in the system and the + pageframes need to live in the permanent mapping, which means: + +* you can have 896M/sizeof(struct page) page-frames at most; with struct + page being 32-bytes that would end up being something in the order of 112G + worth of pages; the kernel, however, needs to store more than just + page-frames in that memory... + +* PAE makes your page tables larger - which slows the system down as more + data has to be accessed to traverse in TLB fills and the like. One + advantage is that PAE has more PTE bits and can provide advanced features + like NX and PAT. + +The general recommendation is that you don't use more than 8GiB on a 32-bit +machine - although more might work for you and your workload, you're pretty +much on your own - don't expect kernel developers to really care much if things +come apart. diff --git a/Documentation/vm/highmem.txt b/Documentation/vm/highmem.txt deleted file mode 100644 index 0f69a9fec34d..000000000000 --- a/Documentation/vm/highmem.txt +++ /dev/null @@ -1,147 +0,0 @@ -.. _highmem: - -==================== -High Memory Handling -==================== - -By: Peter Zijlstra - -.. contents:: :local: - -What Is High Memory? -==================== - -High memory (highmem) is used when the size of physical memory approaches or -exceeds the maximum size of virtual memory. At that point it becomes -impossible for the kernel to keep all of the available physical memory mapped -at all times. This means the kernel needs to start using temporary mappings of -the pieces of physical memory that it wants to access. - -The part of (physical) memory not covered by a permanent mapping is what we -refer to as 'highmem'. There are various architecture dependent constraints on -where exactly that border lies. - -In the i386 arch, for example, we choose to map the kernel into every process's -VM space so that we don't have to pay the full TLB invalidation costs for -kernel entry/exit. This means the available virtual memory space (4GiB on -i386) has to be divided between user and kernel space. - -The traditional split for architectures using this approach is 3:1, 3GiB for -userspace and the top 1GiB for kernel space:: - - +--------+ 0xffffffff - | Kernel | - +--------+ 0xc0000000 - | | - | User | - | | - +--------+ 0x00000000 - -This means that the kernel can at most map 1GiB of physical memory at any one -time, but because we need virtual address space for other things - including -temporary maps to access the rest of the physical memory - the actual direct -map will typically be less (usually around ~896MiB). - -Other architectures that have mm context tagged TLBs can have separate kernel -and user maps. Some hardware (like some ARMs), however, have limited virtual -space when they use mm context tags. - - -Temporary Virtual Mappings -========================== - -The kernel contains several ways of creating temporary mappings: - -* vmap(). This can be used to make a long duration mapping of multiple - physical pages into a contiguous virtual space. It needs global - synchronization to unmap. - -* kmap(). This permits a short duration mapping of a single page. It needs - global synchronization, but is amortized somewhat. It is also prone to - deadlocks when using in a nested fashion, and so it is not recommended for - new code. - -* kmap_atomic(). This permits a very short duration mapping of a single - page. Since the mapping is restricted to the CPU that issued it, it - performs well, but the issuing task is therefore required to stay on that - CPU until it has finished, lest some other task displace its mappings. - - kmap_atomic() may also be used by interrupt contexts, since it is does not - sleep and the caller may not sleep until after kunmap_atomic() is called. - - It may be assumed that k[un]map_atomic() won't fail. - - -Using kmap_atomic -================= - -When and where to use kmap_atomic() is straightforward. It is used when code -wants to access the contents of a page that might be allocated from high memory -(see __GFP_HIGHMEM), for example a page in the pagecache. The API has two -functions, and they can be used in a manner similar to the following:: - - /* Find the page of interest. */ - struct page *page = find_get_page(mapping, offset); - - /* Gain access to the contents of that page. */ - void *vaddr = kmap_atomic(page); - - /* Do something to the contents of that page. */ - memset(vaddr, 0, PAGE_SIZE); - - /* Unmap that page. */ - kunmap_atomic(vaddr); - -Note that the kunmap_atomic() call takes the result of the kmap_atomic() call -not the argument. - -If you need to map two pages because you want to copy from one page to -another you need to keep the kmap_atomic calls strictly nested, like:: - - vaddr1 = kmap_atomic(page1); - vaddr2 = kmap_atomic(page2); - - memcpy(vaddr1, vaddr2, PAGE_SIZE); - - kunmap_atomic(vaddr2); - kunmap_atomic(vaddr1); - - -Cost of Temporary Mappings -========================== - -The cost of creating temporary mappings can be quite high. The arch has to -manipulate the kernel's page tables, the data TLB and/or the MMU's registers. - -If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping -simply with a bit of arithmetic that will convert the page struct address into -a pointer to the page contents rather than juggling mappings about. In such a -case, the unmap operation may be a null operation. - -If CONFIG_MMU is not set, then there can be no temporary mappings and no -highmem. In such a case, the arithmetic approach will also be used. - - -i386 PAE -======== - -The i386 arch, under some circumstances, will permit you to stick up to 64GiB -of RAM into your 32-bit machine. This has a number of consequences: - -* Linux needs a page-frame structure for each page in the system and the - pageframes need to live in the permanent mapping, which means: - -* you can have 896M/sizeof(struct page) page-frames at most; with struct - page being 32-bytes that would end up being something in the order of 112G - worth of pages; the kernel, however, needs to store more than just - page-frames in that memory... - -* PAE makes your page tables larger - which slows the system down as more - data has to be accessed to traverse in TLB fills and the like. One - advantage is that PAE has more PTE bits and can provide advanced features - like NX and PAT. - -The general recommendation is that you don't use more than 8GiB on a 32-bit -machine - although more might work for you and your workload, you're pretty -much on your own - don't expect kernel developers to really care much if things -come apart. diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst new file mode 100644 index 000000000000..3fafa3381730 --- /dev/null +++ b/Documentation/vm/hmm.rst @@ -0,0 +1,374 @@ +.. hmm: + +===================================== +Heterogeneous Memory Management (HMM) +===================================== + +Transparently allow any component of a program to use any memory region of said +program with a device without using device specific memory allocator. This is +becoming a requirement to simplify the use of advance heterogeneous computing +where GPU, DSP or FPGA are use to perform various computations. + +This document is divided as follow, in the first section i expose the problems +related to the use of a device specific allocator. The second section i expose +the hardware limitations that are inherent to many platforms. The third section +gives an overview of HMM designs. The fourth section explains how CPU page- +table mirroring works and what is HMM purpose in this context. Fifth section +deals with how device memory is represented inside the kernel. Finaly the last +section present the new migration helper that allow to leverage the device DMA +engine. + +.. contents:: :local: + +Problems of using device specific memory allocator +================================================== + +Device with large amount of on board memory (several giga bytes) like GPU have +historically manage their memory through dedicated driver specific API. This +creates a disconnect between memory allocated and managed by device driver and +regular application memory (private anonymous, share memory or regular file +back memory). From here on i will refer to this aspect as split address space. +I use share address space to refer to the opposite situation ie one in which +any memory region can be use by device transparently. + +Split address space because device can only access memory allocated through the +device specific API. This imply that all memory object in a program are not +equal from device point of view which complicate large program that rely on a +wide set of libraries. + +Concretly this means that code that wants to leverage device like GPU need to +copy object between genericly allocated memory (malloc, mmap private/share/) +and memory allocated through the device driver API (this still end up with an +mmap but of the device file). + +For flat dataset (array, grid, image, ...) this isn't too hard to achieve but +complex data-set (list, tree, ...) are hard to get right. Duplicating a complex +data-set need to re-map all the pointer relations between each of its elements. +This is error prone and program gets harder to debug because of the duplicate +data-set. + +Split address space also means that library can not transparently use data they +are getting from core program or other library and thus each library might have +to duplicate its input data-set using specific memory allocator. Large project +suffer from this and waste resources because of the various memory copy. + +Duplicating each library API to accept as input or output memory allocted by +each device specific allocator is not a viable option. It would lead to a +combinatorial explosions in the library entry points. + +Finaly with the advance of high level language constructs (in C++ but in other +language too) it is now possible for compiler to leverage GPU or other devices +without even the programmer knowledge. Some of compiler identified patterns are +only do-able with a share address. It is as well more reasonable to use a share +address space for all the other patterns. + + +System bus, device memory characteristics +========================================= + +System bus cripple share address due to few limitations. Most system bus only +allow basic memory access from device to main memory, even cache coherency is +often optional. Access to device memory from CPU is even more limited, most +often than not it is not cache coherent. + +If we only consider the PCIE bus than device can access main memory (often +through an IOMMU) and be cache coherent with the CPUs. However it only allows +a limited set of atomic operation from device on main memory. This is worse +in the other direction the CPUs can only access a limited range of the device +memory and can not perform atomic operations on it. Thus device memory can not +be consider like regular memory from kernel point of view. + +Another crippling factor is the limited bandwidth (~32GBytes/s with PCIE 4.0 +and 16 lanes). This is 33 times less that fastest GPU memory (1 TBytes/s). +The final limitation is latency, access to main memory from the device has an +order of magnitude higher latency than when the device access its own memory. + +Some platform are developing new system bus or additions/modifications to PCIE +to address some of those limitations (OpenCAPI, CCIX). They mainly allow two +way cache coherency between CPU and device and allow all atomic operations the +architecture supports. Saddly not all platform are following this trends and +some major architecture are left without hardware solutions to those problems. + +So for share address space to make sense not only we must allow device to +access any memory memory but we must also permit any memory to be migrated to +device memory while device is using it (blocking CPU access while it happens). + + +Share address space and migration +================================= + +HMM intends to provide two main features. First one is to share the address +space by duplication the CPU page table into the device page table so same +address point to same memory and this for any valid main memory address in +the process address space. + +To achieve this, HMM offer a set of helpers to populate the device page table +while keeping track of CPU page table updates. Device page table updates are +not as easy as CPU page table updates. To update the device page table you must +allow a buffer (or use a pool of pre-allocated buffer) and write GPU specifics +commands in it to perform the update (unmap, cache invalidations and flush, +...). This can not be done through common code for all device. Hence why HMM +provides helpers to factor out everything that can be while leaving the gory +details to the device driver. + +The second mechanism HMM provide is a new kind of ZONE_DEVICE memory that does +allow to allocate a struct page for each page of the device memory. Those page +are special because the CPU can not map them. They however allow to migrate +main memory to device memory using exhisting migration mechanism and everything +looks like if page was swap out to disk from CPU point of view. Using a struct +page gives the easiest and cleanest integration with existing mm mechanisms. +Again here HMM only provide helpers, first to hotplug new ZONE_DEVICE memory +for the device memory and second to perform migration. Policy decision of what +and when to migrate things is left to the device driver. + +Note that any CPU access to a device page trigger a page fault and a migration +back to main memory ie when a page backing an given address A is migrated from +a main memory page to a device page then any CPU access to address A trigger a +page fault and initiate a migration back to main memory. + + +With this two features, HMM not only allow a device to mirror a process address +space and keeps both CPU and device page table synchronize, but also allow to +leverage device memory by migrating part of data-set that is actively use by a +device. + + +Address space mirroring implementation and API +============================================== + +Address space mirroring main objective is to allow to duplicate range of CPU +page table into a device page table and HMM helps keeping both synchronize. A +device driver that want to mirror a process address space must start with the +registration of an hmm_mirror struct:: + + int hmm_mirror_register(struct hmm_mirror *mirror, + struct mm_struct *mm); + int hmm_mirror_register_locked(struct hmm_mirror *mirror, + struct mm_struct *mm); + +The locked variant is to be use when the driver is already holding the mmap_sem +of the mm in write mode. The mirror struct has a set of callback that are use +to propagate CPU page table:: + + struct hmm_mirror_ops { + /* sync_cpu_device_pagetables() - synchronize page tables + * + * @mirror: pointer to struct hmm_mirror + * @update_type: type of update that occurred to the CPU page table + * @start: virtual start address of the range to update + * @end: virtual end address of the range to update + * + * This callback ultimately originates from mmu_notifiers when the CPU + * page table is updated. The device driver must update its page table + * in response to this callback. The update argument tells what action + * to perform. + * + * The device driver must not return from this callback until the device + * page tables are completely updated (TLBs flushed, etc); this is a + * synchronous call. + */ + void (*update)(struct hmm_mirror *mirror, + enum hmm_update action, + unsigned long start, + unsigned long end); + }; + +Device driver must perform update to the range following action (turn range +read only, or fully unmap, ...). Once driver callback returns the device must +be done with the update. + + +When device driver wants to populate a range of virtual address it can use +either:: + + int hmm_vma_get_pfns(struct vm_area_struct *vma, + struct hmm_range *range, + unsigned long start, + unsigned long end, + hmm_pfn_t *pfns); + int hmm_vma_fault(struct vm_area_struct *vma, + struct hmm_range *range, + unsigned long start, + unsigned long end, + hmm_pfn_t *pfns, + bool write, + bool block); + +First one (hmm_vma_get_pfns()) will only fetch present CPU page table entry and +will not trigger a page fault on missing or non present entry. The second one +do trigger page fault on missing or read only entry if write parameter is true. +Page fault use the generic mm page fault code path just like a CPU page fault. + +Both function copy CPU page table into their pfns array argument. Each entry in +that array correspond to an address in the virtual range. HMM provide a set of +flags to help driver identify special CPU page table entries. + +Locking with the update() callback is the most important aspect the driver must +respect in order to keep things properly synchronize. The usage pattern is:: + + int driver_populate_range(...) + { + struct hmm_range range; + ... + again: + ret = hmm_vma_get_pfns(vma, &range, start, end, pfns); + if (ret) + return ret; + take_lock(driver->update); + if (!hmm_vma_range_done(vma, &range)) { + release_lock(driver->update); + goto again; + } + + // Use pfns array content to update device page table + + release_lock(driver->update); + return 0; + } + +The driver->update lock is the same lock that driver takes inside its update() +callback. That lock must be call before hmm_vma_range_done() to avoid any race +with a concurrent CPU page table update. + +HMM implements all this on top of the mmu_notifier API because we wanted to a +simpler API and also to be able to perform optimization latter own like doing +concurrent device update in multi-devices scenario. + +HMM also serve as an impedence missmatch between how CPU page table update are +done (by CPU write to the page table and TLB flushes) from how device update +their own page table. Device update is a multi-step process, first appropriate +commands are write to a buffer, then this buffer is schedule for execution on +the device. It is only once the device has executed commands in the buffer that +the update is done. Creating and scheduling update command buffer can happen +concurrently for multiple devices. Waiting for each device to report commands +as executed is serialize (there is no point in doing this concurrently). + + +Represent and manage device memory from core kernel point of view +================================================================= + +Several differents design were try to support device memory. First one use +device specific data structure to keep information about migrated memory and +HMM hooked itself in various place of mm code to handle any access to address +that were back by device memory. It turns out that this ended up replicating +most of the fields of struct page and also needed many kernel code path to be +updated to understand this new kind of memory. + +Thing is most kernel code path never try to access the memory behind a page +but only care about struct page contents. Because of this HMM switchted to +directly using struct page for device memory which left most kernel code path +un-aware of the difference. We only need to make sure that no one ever try to +map those page from the CPU side. + +HMM provide a set of helpers to register and hotplug device memory as a new +region needing struct page. This is offer through a very simple API:: + + struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, + struct device *device, + unsigned long size); + void hmm_devmem_remove(struct hmm_devmem *devmem); + +The hmm_devmem_ops is where most of the important things are:: + + struct hmm_devmem_ops { + void (*free)(struct hmm_devmem *devmem, struct page *page); + int (*fault)(struct hmm_devmem *devmem, + struct vm_area_struct *vma, + unsigned long addr, + struct page *page, + unsigned flags, + pmd_t *pmdp); + }; + +The first callback (free()) happens when the last reference on a device page is +drop. This means the device page is now free and no longer use by anyone. The +second callback happens whenever CPU try to access a device page which it can +not do. This second callback must trigger a migration back to system memory. + + +Migrate to and from device memory +================================= + +Because CPU can not access device memory, migration must use device DMA engine +to perform copy from and to device memory. For this we need a new migration +helper:: + + int migrate_vma(const struct migrate_vma_ops *ops, + struct vm_area_struct *vma, + unsigned long mentries, + unsigned long start, + unsigned long end, + unsigned long *src, + unsigned long *dst, + void *private); + +Unlike other migration function it works on a range of virtual address, there +is two reasons for that. First device DMA copy has a high setup overhead cost +and thus batching multiple pages is needed as otherwise the migration overhead +make the whole excersie pointless. The second reason is because driver trigger +such migration base on range of address the device is actively accessing. + +The migrate_vma_ops struct define two callbacks. First one (alloc_and_copy()) +control destination memory allocation and copy operation. Second one is there +to allow device driver to perform cleanup operation after migration:: + + struct migrate_vma_ops { + void (*alloc_and_copy)(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); + void (*finalize_and_map)(struct vm_area_struct *vma, + const unsigned long *src, + const unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); + }; + +It is important to stress that this migration helpers allow for hole in the +virtual address range. Some pages in the range might not be migrated for all +the usual reasons (page is pin, page is lock, ...). This helper does not fail +but just skip over those pages. + +The alloc_and_copy() might as well decide to not migrate all pages in the +range (for reasons under the callback control). For those the callback just +have to leave the corresponding dst entry empty. + +Finaly the migration of the struct page might fails (for file back page) for +various reasons (failure to freeze reference, or update page cache, ...). If +that happens then the finalize_and_map() can catch any pages that was not +migrated. Note those page were still copied to new page and thus we wasted +bandwidth but this is considered as a rare event and a price that we are +willing to pay to keep all the code simpler. + + +Memory cgroup (memcg) and rss accounting +======================================== + +For now device memory is accounted as any regular page in rss counters (either +anonymous if device page is use for anonymous, file if device page is use for +file back page or shmem if device page is use for share memory). This is a +deliberate choice to keep existing application that might start using device +memory without knowing about it to keep runing unimpacted. + +Drawbacks is that OOM killer might kill an application using a lot of device +memory and not a lot of regular system memory and thus not freeing much system +memory. We want to gather more real world experience on how application and +system react under memory pressure in the presence of device memory before +deciding to account device memory differently. + + +Same decision was made for memory cgroup. Device memory page are accounted +against same memory cgroup a regular page would be accounted to. This does +simplify migration to and from device memory. This also means that migration +back from device memory to regular memory can not fail because it would +go above memory cgroup limit. We might revisit this choice latter on once we +get more experience in how device memory is use and its impact on memory +resource control. + + +Note that device memory can never be pin nor by device driver nor through GUP +and thus such memory is always free upon process exit. Or when last reference +is drop in case of share memory or file back memory. diff --git a/Documentation/vm/hmm.txt b/Documentation/vm/hmm.txt deleted file mode 100644 index 3fafa3381730..000000000000 --- a/Documentation/vm/hmm.txt +++ /dev/null @@ -1,374 +0,0 @@ -.. hmm: - -===================================== -Heterogeneous Memory Management (HMM) -===================================== - -Transparently allow any component of a program to use any memory region of said -program with a device without using device specific memory allocator. This is -becoming a requirement to simplify the use of advance heterogeneous computing -where GPU, DSP or FPGA are use to perform various computations. - -This document is divided as follow, in the first section i expose the problems -related to the use of a device specific allocator. The second section i expose -the hardware limitations that are inherent to many platforms. The third section -gives an overview of HMM designs. The fourth section explains how CPU page- -table mirroring works and what is HMM purpose in this context. Fifth section -deals with how device memory is represented inside the kernel. Finaly the last -section present the new migration helper that allow to leverage the device DMA -engine. - -.. contents:: :local: - -Problems of using device specific memory allocator -================================================== - -Device with large amount of on board memory (several giga bytes) like GPU have -historically manage their memory through dedicated driver specific API. This -creates a disconnect between memory allocated and managed by device driver and -regular application memory (private anonymous, share memory or regular file -back memory). From here on i will refer to this aspect as split address space. -I use share address space to refer to the opposite situation ie one in which -any memory region can be use by device transparently. - -Split address space because device can only access memory allocated through the -device specific API. This imply that all memory object in a program are not -equal from device point of view which complicate large program that rely on a -wide set of libraries. - -Concretly this means that code that wants to leverage device like GPU need to -copy object between genericly allocated memory (malloc, mmap private/share/) -and memory allocated through the device driver API (this still end up with an -mmap but of the device file). - -For flat dataset (array, grid, image, ...) this isn't too hard to achieve but -complex data-set (list, tree, ...) are hard to get right. Duplicating a complex -data-set need to re-map all the pointer relations between each of its elements. -This is error prone and program gets harder to debug because of the duplicate -data-set. - -Split address space also means that library can not transparently use data they -are getting from core program or other library and thus each library might have -to duplicate its input data-set using specific memory allocator. Large project -suffer from this and waste resources because of the various memory copy. - -Duplicating each library API to accept as input or output memory allocted by -each device specific allocator is not a viable option. It would lead to a -combinatorial explosions in the library entry points. - -Finaly with the advance of high level language constructs (in C++ but in other -language too) it is now possible for compiler to leverage GPU or other devices -without even the programmer knowledge. Some of compiler identified patterns are -only do-able with a share address. It is as well more reasonable to use a share -address space for all the other patterns. - - -System bus, device memory characteristics -========================================= - -System bus cripple share address due to few limitations. Most system bus only -allow basic memory access from device to main memory, even cache coherency is -often optional. Access to device memory from CPU is even more limited, most -often than not it is not cache coherent. - -If we only consider the PCIE bus than device can access main memory (often -through an IOMMU) and be cache coherent with the CPUs. However it only allows -a limited set of atomic operation from device on main memory. This is worse -in the other direction the CPUs can only access a limited range of the device -memory and can not perform atomic operations on it. Thus device memory can not -be consider like regular memory from kernel point of view. - -Another crippling factor is the limited bandwidth (~32GBytes/s with PCIE 4.0 -and 16 lanes). This is 33 times less that fastest GPU memory (1 TBytes/s). -The final limitation is latency, access to main memory from the device has an -order of magnitude higher latency than when the device access its own memory. - -Some platform are developing new system bus or additions/modifications to PCIE -to address some of those limitations (OpenCAPI, CCIX). They mainly allow two -way cache coherency between CPU and device and allow all atomic operations the -architecture supports. Saddly not all platform are following this trends and -some major architecture are left without hardware solutions to those problems. - -So for share address space to make sense not only we must allow device to -access any memory memory but we must also permit any memory to be migrated to -device memory while device is using it (blocking CPU access while it happens). - - -Share address space and migration -================================= - -HMM intends to provide two main features. First one is to share the address -space by duplication the CPU page table into the device page table so same -address point to same memory and this for any valid main memory address in -the process address space. - -To achieve this, HMM offer a set of helpers to populate the device page table -while keeping track of CPU page table updates. Device page table updates are -not as easy as CPU page table updates. To update the device page table you must -allow a buffer (or use a pool of pre-allocated buffer) and write GPU specifics -commands in it to perform the update (unmap, cache invalidations and flush, -...). This can not be done through common code for all device. Hence why HMM -provides helpers to factor out everything that can be while leaving the gory -details to the device driver. - -The second mechanism HMM provide is a new kind of ZONE_DEVICE memory that does -allow to allocate a struct page for each page of the device memory. Those page -are special because the CPU can not map them. They however allow to migrate -main memory to device memory using exhisting migration mechanism and everything -looks like if page was swap out to disk from CPU point of view. Using a struct -page gives the easiest and cleanest integration with existing mm mechanisms. -Again here HMM only provide helpers, first to hotplug new ZONE_DEVICE memory -for the device memory and second to perform migration. Policy decision of what -and when to migrate things is left to the device driver. - -Note that any CPU access to a device page trigger a page fault and a migration -back to main memory ie when a page backing an given address A is migrated from -a main memory page to a device page then any CPU access to address A trigger a -page fault and initiate a migration back to main memory. - - -With this two features, HMM not only allow a device to mirror a process address -space and keeps both CPU and device page table synchronize, but also allow to -leverage device memory by migrating part of data-set that is actively use by a -device. - - -Address space mirroring implementation and API -============================================== - -Address space mirroring main objective is to allow to duplicate range of CPU -page table into a device page table and HMM helps keeping both synchronize. A -device driver that want to mirror a process address space must start with the -registration of an hmm_mirror struct:: - - int hmm_mirror_register(struct hmm_mirror *mirror, - struct mm_struct *mm); - int hmm_mirror_register_locked(struct hmm_mirror *mirror, - struct mm_struct *mm); - -The locked variant is to be use when the driver is already holding the mmap_sem -of the mm in write mode. The mirror struct has a set of callback that are use -to propagate CPU page table:: - - struct hmm_mirror_ops { - /* sync_cpu_device_pagetables() - synchronize page tables - * - * @mirror: pointer to struct hmm_mirror - * @update_type: type of update that occurred to the CPU page table - * @start: virtual start address of the range to update - * @end: virtual end address of the range to update - * - * This callback ultimately originates from mmu_notifiers when the CPU - * page table is updated. The device driver must update its page table - * in response to this callback. The update argument tells what action - * to perform. - * - * The device driver must not return from this callback until the device - * page tables are completely updated (TLBs flushed, etc); this is a - * synchronous call. - */ - void (*update)(struct hmm_mirror *mirror, - enum hmm_update action, - unsigned long start, - unsigned long end); - }; - -Device driver must perform update to the range following action (turn range -read only, or fully unmap, ...). Once driver callback returns the device must -be done with the update. - - -When device driver wants to populate a range of virtual address it can use -either:: - - int hmm_vma_get_pfns(struct vm_area_struct *vma, - struct hmm_range *range, - unsigned long start, - unsigned long end, - hmm_pfn_t *pfns); - int hmm_vma_fault(struct vm_area_struct *vma, - struct hmm_range *range, - unsigned long start, - unsigned long end, - hmm_pfn_t *pfns, - bool write, - bool block); - -First one (hmm_vma_get_pfns()) will only fetch present CPU page table entry and -will not trigger a page fault on missing or non present entry. The second one -do trigger page fault on missing or read only entry if write parameter is true. -Page fault use the generic mm page fault code path just like a CPU page fault. - -Both function copy CPU page table into their pfns array argument. Each entry in -that array correspond to an address in the virtual range. HMM provide a set of -flags to help driver identify special CPU page table entries. - -Locking with the update() callback is the most important aspect the driver must -respect in order to keep things properly synchronize. The usage pattern is:: - - int driver_populate_range(...) - { - struct hmm_range range; - ... - again: - ret = hmm_vma_get_pfns(vma, &range, start, end, pfns); - if (ret) - return ret; - take_lock(driver->update); - if (!hmm_vma_range_done(vma, &range)) { - release_lock(driver->update); - goto again; - } - - // Use pfns array content to update device page table - - release_lock(driver->update); - return 0; - } - -The driver->update lock is the same lock that driver takes inside its update() -callback. That lock must be call before hmm_vma_range_done() to avoid any race -with a concurrent CPU page table update. - -HMM implements all this on top of the mmu_notifier API because we wanted to a -simpler API and also to be able to perform optimization latter own like doing -concurrent device update in multi-devices scenario. - -HMM also serve as an impedence missmatch between how CPU page table update are -done (by CPU write to the page table and TLB flushes) from how device update -their own page table. Device update is a multi-step process, first appropriate -commands are write to a buffer, then this buffer is schedule for execution on -the device. It is only once the device has executed commands in the buffer that -the update is done. Creating and scheduling update command buffer can happen -concurrently for multiple devices. Waiting for each device to report commands -as executed is serialize (there is no point in doing this concurrently). - - -Represent and manage device memory from core kernel point of view -================================================================= - -Several differents design were try to support device memory. First one use -device specific data structure to keep information about migrated memory and -HMM hooked itself in various place of mm code to handle any access to address -that were back by device memory. It turns out that this ended up replicating -most of the fields of struct page and also needed many kernel code path to be -updated to understand this new kind of memory. - -Thing is most kernel code path never try to access the memory behind a page -but only care about struct page contents. Because of this HMM switchted to -directly using struct page for device memory which left most kernel code path -un-aware of the difference. We only need to make sure that no one ever try to -map those page from the CPU side. - -HMM provide a set of helpers to register and hotplug device memory as a new -region needing struct page. This is offer through a very simple API:: - - struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, - struct device *device, - unsigned long size); - void hmm_devmem_remove(struct hmm_devmem *devmem); - -The hmm_devmem_ops is where most of the important things are:: - - struct hmm_devmem_ops { - void (*free)(struct hmm_devmem *devmem, struct page *page); - int (*fault)(struct hmm_devmem *devmem, - struct vm_area_struct *vma, - unsigned long addr, - struct page *page, - unsigned flags, - pmd_t *pmdp); - }; - -The first callback (free()) happens when the last reference on a device page is -drop. This means the device page is now free and no longer use by anyone. The -second callback happens whenever CPU try to access a device page which it can -not do. This second callback must trigger a migration back to system memory. - - -Migrate to and from device memory -================================= - -Because CPU can not access device memory, migration must use device DMA engine -to perform copy from and to device memory. For this we need a new migration -helper:: - - int migrate_vma(const struct migrate_vma_ops *ops, - struct vm_area_struct *vma, - unsigned long mentries, - unsigned long start, - unsigned long end, - unsigned long *src, - unsigned long *dst, - void *private); - -Unlike other migration function it works on a range of virtual address, there -is two reasons for that. First device DMA copy has a high setup overhead cost -and thus batching multiple pages is needed as otherwise the migration overhead -make the whole excersie pointless. The second reason is because driver trigger -such migration base on range of address the device is actively accessing. - -The migrate_vma_ops struct define two callbacks. First one (alloc_and_copy()) -control destination memory allocation and copy operation. Second one is there -to allow device driver to perform cleanup operation after migration:: - - struct migrate_vma_ops { - void (*alloc_and_copy)(struct vm_area_struct *vma, - const unsigned long *src, - unsigned long *dst, - unsigned long start, - unsigned long end, - void *private); - void (*finalize_and_map)(struct vm_area_struct *vma, - const unsigned long *src, - const unsigned long *dst, - unsigned long start, - unsigned long end, - void *private); - }; - -It is important to stress that this migration helpers allow for hole in the -virtual address range. Some pages in the range might not be migrated for all -the usual reasons (page is pin, page is lock, ...). This helper does not fail -but just skip over those pages. - -The alloc_and_copy() might as well decide to not migrate all pages in the -range (for reasons under the callback control). For those the callback just -have to leave the corresponding dst entry empty. - -Finaly the migration of the struct page might fails (for file back page) for -various reasons (failure to freeze reference, or update page cache, ...). If -that happens then the finalize_and_map() can catch any pages that was not -migrated. Note those page were still copied to new page and thus we wasted -bandwidth but this is considered as a rare event and a price that we are -willing to pay to keep all the code simpler. - - -Memory cgroup (memcg) and rss accounting -======================================== - -For now device memory is accounted as any regular page in rss counters (either -anonymous if device page is use for anonymous, file if device page is use for -file back page or shmem if device page is use for share memory). This is a -deliberate choice to keep existing application that might start using device -memory without knowing about it to keep runing unimpacted. - -Drawbacks is that OOM killer might kill an application using a lot of device -memory and not a lot of regular system memory and thus not freeing much system -memory. We want to gather more real world experience on how application and -system react under memory pressure in the presence of device memory before -deciding to account device memory differently. - - -Same decision was made for memory cgroup. Device memory page are accounted -against same memory cgroup a regular page would be accounted to. This does -simplify migration to and from device memory. This also means that migration -back from device memory to regular memory can not fail because it would -go above memory cgroup limit. We might revisit this choice latter on once we -get more experience in how device memory is use and its impact on memory -resource control. - - -Note that device memory can never be pin nor by device driver nor through GUP -and thus such memory is always free upon process exit. Or when last reference -is drop in case of share memory or file back memory. diff --git a/Documentation/vm/hugetlbfs_reserv.rst b/Documentation/vm/hugetlbfs_reserv.rst new file mode 100644 index 000000000000..36a87a2ea435 --- /dev/null +++ b/Documentation/vm/hugetlbfs_reserv.rst @@ -0,0 +1,587 @@ +.. _hugetlbfs_reserve: + +===================== +Hugetlbfs Reservation +===================== + +Overview +======== + +Huge pages as described at :ref:`hugetlbpage` are typically +preallocated for application use. These huge pages are instantiated in a +task's address space at page fault time if the VMA indicates huge pages are +to be used. If no huge page exists at page fault time, the task is sent +a SIGBUS and often dies an unhappy death. Shortly after huge page support +was added, it was determined that it would be better to detect a shortage +of huge pages at mmap() time. The idea is that if there were not enough +huge pages to cover the mapping, the mmap() would fail. This was first +done with a simple check in the code at mmap() time to determine if there +were enough free huge pages to cover the mapping. Like most things in the +kernel, the code has evolved over time. However, the basic idea was to +'reserve' huge pages at mmap() time to ensure that huge pages would be +available for page faults in that mapping. The description below attempts to +describe how huge page reserve processing is done in the v4.10 kernel. + + +Audience +======== +This description is primarily targeted at kernel developers who are modifying +hugetlbfs code. + + +The Data Structures +=================== + +resv_huge_pages + This is a global (per-hstate) count of reserved huge pages. Reserved + huge pages are only available to the task which reserved them. + Therefore, the number of huge pages generally available is computed + as (``free_huge_pages - resv_huge_pages``). +Reserve Map + A reserve map is described by the structure:: + + struct resv_map { + struct kref refs; + spinlock_t lock; + struct list_head regions; + long adds_in_progress; + struct list_head region_cache; + long region_cache_count; + }; + + There is one reserve map for each huge page mapping in the system. + The regions list within the resv_map describes the regions within + the mapping. A region is described as:: + + struct file_region { + struct list_head link; + long from; + long to; + }; + + The 'from' and 'to' fields of the file region structure are huge page + indices into the mapping. Depending on the type of mapping, a + region in the reserv_map may indicate reservations exist for the + range, or reservations do not exist. +Flags for MAP_PRIVATE Reservations + These are stored in the bottom bits of the reservation map pointer. + + ``#define HPAGE_RESV_OWNER (1UL << 0)`` + Indicates this task is the owner of the reservations + associated with the mapping. + ``#define HPAGE_RESV_UNMAPPED (1UL << 1)`` + Indicates task originally mapping this range (and creating + reserves) has unmapped a page from this task (the child) + due to a failed COW. +Page Flags + The PagePrivate page flag is used to indicate that a huge page + reservation must be restored when the huge page is freed. More + details will be discussed in the "Freeing huge pages" section. + + +Reservation Map Location (Private or Shared) +============================================ + +A huge page mapping or segment is either private or shared. If private, +it is typically only available to a single address space (task). If shared, +it can be mapped into multiple address spaces (tasks). The location and +semantics of the reservation map is significantly different for two types +of mappings. Location differences are: + +- For private mappings, the reservation map hangs off the the VMA structure. + Specifically, vma->vm_private_data. This reserve map is created at the + time the mapping (mmap(MAP_PRIVATE)) is created. +- For shared mappings, the reservation map hangs off the inode. Specifically, + inode->i_mapping->private_data. Since shared mappings are always backed + by files in the hugetlbfs filesystem, the hugetlbfs code ensures each inode + contains a reservation map. As a result, the reservation map is allocated + when the inode is created. + + +Creating Reservations +===================== +Reservations are created when a huge page backed shared memory segment is +created (shmget(SHM_HUGETLB)) or a mapping is created via mmap(MAP_HUGETLB). +These operations result in a call to the routine hugetlb_reserve_pages():: + + int hugetlb_reserve_pages(struct inode *inode, + long from, long to, + struct vm_area_struct *vma, + vm_flags_t vm_flags) + +The first thing hugetlb_reserve_pages() does is check for the NORESERVE +flag was specified in either the shmget() or mmap() call. If NORESERVE +was specified, then this routine returns immediately as no reservation +are desired. + +The arguments 'from' and 'to' are huge page indices into the mapping or +underlying file. For shmget(), 'from' is always 0 and 'to' corresponds to +the length of the segment/mapping. For mmap(), the offset argument could +be used to specify the offset into the underlying file. In such a case +the 'from' and 'to' arguments have been adjusted by this offset. + +One of the big differences between PRIVATE and SHARED mappings is the way +in which reservations are represented in the reservation map. + +- For shared mappings, an entry in the reservation map indicates a reservation + exists or did exist for the corresponding page. As reservations are + consumed, the reservation map is not modified. +- For private mappings, the lack of an entry in the reservation map indicates + a reservation exists for the corresponding page. As reservations are + consumed, entries are added to the reservation map. Therefore, the + reservation map can also be used to determine which reservations have + been consumed. + +For private mappings, hugetlb_reserve_pages() creates the reservation map and +hangs it off the VMA structure. In addition, the HPAGE_RESV_OWNER flag is set +to indicate this VMA owns the reservations. + +The reservation map is consulted to determine how many huge page reservations +are needed for the current mapping/segment. For private mappings, this is +always the value (to - from). However, for shared mappings it is possible that some reservations may already exist within the range (to - from). See the +section :ref:`Reservation Map Modifications ` +for details on how this is accomplished. + +The mapping may be associated with a subpool. If so, the subpool is consulted +to ensure there is sufficient space for the mapping. It is possible that the +subpool has set aside reservations that can be used for the mapping. See the +section :ref:`Subpool Reservations ` for more details. + +After consulting the reservation map and subpool, the number of needed new +reservations is known. The routine hugetlb_acct_memory() is called to check +for and take the requested number of reservations. hugetlb_acct_memory() +calls into routines that potentially allocate and adjust surplus page counts. +However, within those routines the code is simply checking to ensure there +are enough free huge pages to accommodate the reservation. If there are, +the global reservation count resv_huge_pages is adjusted something like the +following:: + + if (resv_needed <= (resv_huge_pages - free_huge_pages)) + resv_huge_pages += resv_needed; + +Note that the global lock hugetlb_lock is held when checking and adjusting +these counters. + +If there were enough free huge pages and the global count resv_huge_pages +was adjusted, then the reservation map associated with the mapping is +modified to reflect the reservations. In the case of a shared mapping, a +file_region will exist that includes the range 'from' 'to'. For private +mappings, no modifications are made to the reservation map as lack of an +entry indicates a reservation exists. + +If hugetlb_reserve_pages() was successful, the global reservation count and +reservation map associated with the mapping will be modified as required to +ensure reservations exist for the range 'from' - 'to'. + +.. _consume_resv: + +Consuming Reservations/Allocating a Huge Page +============================================= + +Reservations are consumed when huge pages associated with the reservations +are allocated and instantiated in the corresponding mapping. The allocation +is performed within the routine alloc_huge_page():: + + struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr, int avoid_reserve) + +alloc_huge_page is passed a VMA pointer and a virtual address, so it can +consult the reservation map to determine if a reservation exists. In addition, +alloc_huge_page takes the argument avoid_reserve which indicates reserves +should not be used even if it appears they have been set aside for the +specified address. The avoid_reserve argument is most often used in the case +of Copy on Write and Page Migration where additional copies of an existing +page are being allocated. + +The helper routine vma_needs_reservation() is called to determine if a +reservation exists for the address within the mapping(vma). See the section +:ref:`Reservation Map Helper Routines ` for detailed +information on what this routine does. +The value returned from vma_needs_reservation() is generally +0 or 1. 0 if a reservation exists for the address, 1 if no reservation exists. +If a reservation does not exist, and there is a subpool associated with the +mapping the subpool is consulted to determine if it contains reservations. +If the subpool contains reservations, one can be used for this allocation. +However, in every case the avoid_reserve argument overrides the use of +a reservation for the allocation. After determining whether a reservation +exists and can be used for the allocation, the routine dequeue_huge_page_vma() +is called. This routine takes two arguments related to reservations: + +- avoid_reserve, this is the same value/argument passed to alloc_huge_page() +- chg, even though this argument is of type long only the values 0 or 1 are + passed to dequeue_huge_page_vma. If the value is 0, it indicates a + reservation exists (see the section "Memory Policy and Reservations" for + possible issues). If the value is 1, it indicates a reservation does not + exist and the page must be taken from the global free pool if possible. + +The free lists associated with the memory policy of the VMA are searched for +a free page. If a page is found, the value free_huge_pages is decremented +when the page is removed from the free list. If there was a reservation +associated with the page, the following adjustments are made:: + + SetPagePrivate(page); /* Indicates allocating this page consumed + * a reservation, and if an error is + * encountered such that the page must be + * freed, the reservation will be restored. */ + resv_huge_pages--; /* Decrement the global reservation count */ + +Note, if no huge page can be found that satisfies the VMA's memory policy +an attempt will be made to allocate one using the buddy allocator. This +brings up the issue of surplus huge pages and overcommit which is beyond +the scope reservations. Even if a surplus page is allocated, the same +reservation based adjustments as above will be made: SetPagePrivate(page) and +resv_huge_pages--. + +After obtaining a new huge page, (page)->private is set to the value of +the subpool associated with the page if it exists. This will be used for +subpool accounting when the page is freed. + +The routine vma_commit_reservation() is then called to adjust the reserve +map based on the consumption of the reservation. In general, this involves +ensuring the page is represented within a file_region structure of the region +map. For shared mappings where the the reservation was present, an entry +in the reserve map already existed so no change is made. However, if there +was no reservation in a shared mapping or this was a private mapping a new +entry must be created. + +It is possible that the reserve map could have been changed between the call +to vma_needs_reservation() at the beginning of alloc_huge_page() and the +call to vma_commit_reservation() after the page was allocated. This would +be possible if hugetlb_reserve_pages was called for the same page in a shared +mapping. In such cases, the reservation count and subpool free page count +will be off by one. This rare condition can be identified by comparing the +return value from vma_needs_reservation and vma_commit_reservation. If such +a race is detected, the subpool and global reserve counts are adjusted to +compensate. See the section +:ref:`Reservation Map Helper Routines ` for more +information on these routines. + + +Instantiate Huge Pages +====================== + +After huge page allocation, the page is typically added to the page tables +of the allocating task. Before this, pages in a shared mapping are added +to the page cache and pages in private mappings are added to an anonymous +reverse mapping. In both cases, the PagePrivate flag is cleared. Therefore, +when a huge page that has been instantiated is freed no adjustment is made +to the global reservation count (resv_huge_pages). + + +Freeing Huge Pages +================== + +Huge page freeing is performed by the routine free_huge_page(). This routine +is the destructor for hugetlbfs compound pages. As a result, it is only +passed a pointer to the page struct. When a huge page is freed, reservation +accounting may need to be performed. This would be the case if the page was +associated with a subpool that contained reserves, or the page is being freed +on an error path where a global reserve count must be restored. + +The page->private field points to any subpool associated with the page. +If the PagePrivate flag is set, it indicates the global reserve count should +be adjusted (see the section +:ref:`Consuming Reservations/Allocating a Huge Page ` +for information on how these are set). + +The routine first calls hugepage_subpool_put_pages() for the page. If this +routine returns a value of 0 (which does not equal the value passed 1) it +indicates reserves are associated with the subpool, and this newly free page +must be used to keep the number of subpool reserves above the minimum size. +Therefore, the global resv_huge_pages counter is incremented in this case. + +If the PagePrivate flag was set in the page, the global resv_huge_pages counter +will always be incremented. + +.. _sub_pool_resv: + +Subpool Reservations +==================== + +There is a struct hstate associated with each huge page size. The hstate +tracks all huge pages of the specified size. A subpool represents a subset +of pages within a hstate that is associated with a mounted hugetlbfs +filesystem. + +When a hugetlbfs filesystem is mounted a min_size option can be specified +which indicates the minimum number of huge pages required by the filesystem. +If this option is specified, the number of huge pages corresponding to +min_size are reserved for use by the filesystem. This number is tracked in +the min_hpages field of a struct hugepage_subpool. At mount time, +hugetlb_acct_memory(min_hpages) is called to reserve the specified number of +huge pages. If they can not be reserved, the mount fails. + +The routines hugepage_subpool_get/put_pages() are called when pages are +obtained from or released back to a subpool. They perform all subpool +accounting, and track any reservations associated with the subpool. +hugepage_subpool_get/put_pages are passed the number of huge pages by which +to adjust the subpool 'used page' count (down for get, up for put). Normally, +they return the same value that was passed or an error if not enough pages +exist in the subpool. + +However, if reserves are associated with the subpool a return value less +than the passed value may be returned. This return value indicates the +number of additional global pool adjustments which must be made. For example, +suppose a subpool contains 3 reserved huge pages and someone asks for 5. +The 3 reserved pages associated with the subpool can be used to satisfy part +of the request. But, 2 pages must be obtained from the global pools. To +relay this information to the caller, the value 2 is returned. The caller +is then responsible for attempting to obtain the additional two pages from +the global pools. + + +COW and Reservations +==================== + +Since shared mappings all point to and use the same underlying pages, the +biggest reservation concern for COW is private mappings. In this case, +two tasks can be pointing at the same previously allocated page. One task +attempts to write to the page, so a new page must be allocated so that each +task points to its own page. + +When the page was originally allocated, the reservation for that page was +consumed. When an attempt to allocate a new page is made as a result of +COW, it is possible that no free huge pages are free and the allocation +will fail. + +When the private mapping was originally created, the owner of the mapping +was noted by setting the HPAGE_RESV_OWNER bit in the pointer to the reservation +map of the owner. Since the owner created the mapping, the owner owns all +the reservations associated with the mapping. Therefore, when a write fault +occurs and there is no page available, different action is taken for the owner +and non-owner of the reservation. + +In the case where the faulting task is not the owner, the fault will fail and +the task will typically receive a SIGBUS. + +If the owner is the faulting task, we want it to succeed since it owned the +original reservation. To accomplish this, the page is unmapped from the +non-owning task. In this way, the only reference is from the owning task. +In addition, the HPAGE_RESV_UNMAPPED bit is set in the reservation map pointer +of the non-owning task. The non-owning task may receive a SIGBUS if it later +faults on a non-present page. But, the original owner of the +mapping/reservation will behave as expected. + + +.. _resv_map_modifications: + +Reservation Map Modifications +============================= + +The following low level routines are used to make modifications to a +reservation map. Typically, these routines are not called directly. Rather, +a reservation map helper routine is called which calls one of these low level +routines. These low level routines are fairly well documented in the source +code (mm/hugetlb.c). These routines are:: + + long region_chg(struct resv_map *resv, long f, long t); + long region_add(struct resv_map *resv, long f, long t); + void region_abort(struct resv_map *resv, long f, long t); + long region_count(struct resv_map *resv, long f, long t); + +Operations on the reservation map typically involve two operations: + +1) region_chg() is called to examine the reserve map and determine how + many pages in the specified range [f, t) are NOT currently represented. + + The calling code performs global checks and allocations to determine if + there are enough huge pages for the operation to succeed. + +2) + a) If the operation can succeed, region_add() is called to actually modify + the reservation map for the same range [f, t) previously passed to + region_chg(). + b) If the operation can not succeed, region_abort is called for the same + range [f, t) to abort the operation. + +Note that this is a two step process where region_add() and region_abort() +are guaranteed to succeed after a prior call to region_chg() for the same +range. region_chg() is responsible for pre-allocating any data structures +necessary to ensure the subsequent operations (specifically region_add())) +will succeed. + +As mentioned above, region_chg() determines the number of pages in the range +which are NOT currently represented in the map. This number is returned to +the caller. region_add() returns the number of pages in the range added to +the map. In most cases, the return value of region_add() is the same as the +return value of region_chg(). However, in the case of shared mappings it is +possible for changes to the reservation map to be made between the calls to +region_chg() and region_add(). In this case, the return value of region_add() +will not match the return value of region_chg(). It is likely that in such +cases global counts and subpool accounting will be incorrect and in need of +adjustment. It is the responsibility of the caller to check for this condition +and make the appropriate adjustments. + +The routine region_del() is called to remove regions from a reservation map. +It is typically called in the following situations: + +- When a file in the hugetlbfs filesystem is being removed, the inode will + be released and the reservation map freed. Before freeing the reservation + map, all the individual file_region structures must be freed. In this case + region_del is passed the range [0, LONG_MAX). +- When a hugetlbfs file is being truncated. In this case, all allocated pages + after the new file size must be freed. In addition, any file_region entries + in the reservation map past the new end of file must be deleted. In this + case, region_del is passed the range [new_end_of_file, LONG_MAX). +- When a hole is being punched in a hugetlbfs file. In this case, huge pages + are removed from the middle of the file one at a time. As the pages are + removed, region_del() is called to remove the corresponding entry from the + reservation map. In this case, region_del is passed the range + [page_idx, page_idx + 1). + +In every case, region_del() will return the number of pages removed from the +reservation map. In VERY rare cases, region_del() can fail. This can only +happen in the hole punch case where it has to split an existing file_region +entry and can not allocate a new structure. In this error case, region_del() +will return -ENOMEM. The problem here is that the reservation map will +indicate that there is a reservation for the page. However, the subpool and +global reservation counts will not reflect the reservation. To handle this +situation, the routine hugetlb_fix_reserve_counts() is called to adjust the +counters so that they correspond with the reservation map entry that could +not be deleted. + +region_count() is called when unmapping a private huge page mapping. In +private mappings, the lack of a entry in the reservation map indicates that +a reservation exists. Therefore, by counting the number of entries in the +reservation map we know how many reservations were consumed and how many are +outstanding (outstanding = (end - start) - region_count(resv, start, end)). +Since the mapping is going away, the subpool and global reservation counts +are decremented by the number of outstanding reservations. + +.. _resv_map_helpers: + +Reservation Map Helper Routines +=============================== + +Several helper routines exist to query and modify the reservation maps. +These routines are only interested with reservations for a specific huge +page, so they just pass in an address instead of a range. In addition, +they pass in the associated VMA. From the VMA, the type of mapping (private +or shared) and the location of the reservation map (inode or VMA) can be +determined. These routines simply call the underlying routines described +in the section "Reservation Map Modifications". However, they do take into +account the 'opposite' meaning of reservation map entries for private and +shared mappings and hide this detail from the caller:: + + long vma_needs_reservation(struct hstate *h, + struct vm_area_struct *vma, + unsigned long addr) + +This routine calls region_chg() for the specified page. If no reservation +exists, 1 is returned. If a reservation exists, 0 is returned:: + + long vma_commit_reservation(struct hstate *h, + struct vm_area_struct *vma, + unsigned long addr) + +This calls region_add() for the specified page. As in the case of region_chg +and region_add, this routine is to be called after a previous call to +vma_needs_reservation. It will add a reservation entry for the page. It +returns 1 if the reservation was added and 0 if not. The return value should +be compared with the return value of the previous call to +vma_needs_reservation. An unexpected difference indicates the reservation +map was modified between calls:: + + void vma_end_reservation(struct hstate *h, + struct vm_area_struct *vma, + unsigned long addr) + +This calls region_abort() for the specified page. As in the case of region_chg +and region_abort, this routine is to be called after a previous call to +vma_needs_reservation. It will abort/end the in progress reservation add +operation:: + + long vma_add_reservation(struct hstate *h, + struct vm_area_struct *vma, + unsigned long addr) + +This is a special wrapper routine to help facilitate reservation cleanup +on error paths. It is only called from the routine restore_reserve_on_error(). +This routine is used in conjunction with vma_needs_reservation in an attempt +to add a reservation to the reservation map. It takes into account the +different reservation map semantics for private and shared mappings. Hence, +region_add is called for shared mappings (as an entry present in the map +indicates a reservation), and region_del is called for private mappings (as +the absence of an entry in the map indicates a reservation). See the section +"Reservation cleanup in error paths" for more information on what needs to +be done on error paths. + + +Reservation Cleanup in Error Paths +================================== + +As mentioned in the section +:ref:`Reservation Map Helper Routines `, reservation +map modifications are performed in two steps. First vma_needs_reservation +is called before a page is allocated. If the allocation is successful, +then vma_commit_reservation is called. If not, vma_end_reservation is called. +Global and subpool reservation counts are adjusted based on success or failure +of the operation and all is well. + +Additionally, after a huge page is instantiated the PagePrivate flag is +cleared so that accounting when the page is ultimately freed is correct. + +However, there are several instances where errors are encountered after a huge +page is allocated but before it is instantiated. In this case, the page +allocation has consumed the reservation and made the appropriate subpool, +reservation map and global count adjustments. If the page is freed at this +time (before instantiation and clearing of PagePrivate), then free_huge_page +will increment the global reservation count. However, the reservation map +indicates the reservation was consumed. This resulting inconsistent state +will cause the 'leak' of a reserved huge page. The global reserve count will +be higher than it should and prevent allocation of a pre-allocated page. + +The routine restore_reserve_on_error() attempts to handle this situation. It +is fairly well documented. The intention of this routine is to restore +the reservation map to the way it was before the page allocation. In this +way, the state of the reservation map will correspond to the global reservation +count after the page is freed. + +The routine restore_reserve_on_error itself may encounter errors while +attempting to restore the reservation map entry. In this case, it will +simply clear the PagePrivate flag of the page. In this way, the global +reserve count will not be incremented when the page is freed. However, the +reservation map will continue to look as though the reservation was consumed. +A page can still be allocated for the address, but it will not use a reserved +page as originally intended. + +There is some code (most notably userfaultfd) which can not call +restore_reserve_on_error. In this case, it simply modifies the PagePrivate +so that a reservation will not be leaked when the huge page is freed. + + +Reservations and Memory Policy +============================== +Per-node huge page lists existed in struct hstate when git was first used +to manage Linux code. The concept of reservations was added some time later. +When reservations were added, no attempt was made to take memory policy +into account. While cpusets are not exactly the same as memory policy, this +comment in hugetlb_acct_memory sums up the interaction between reservations +and cpusets/memory policy:: + + /* + * When cpuset is configured, it breaks the strict hugetlb page + * reservation as the accounting is done on a global variable. Such + * reservation is completely rubbish in the presence of cpuset because + * the reservation is not checked against page availability for the + * current cpuset. Application can still potentially OOM'ed by kernel + * with lack of free htlb page in cpuset that the task is in. + * Attempt to enforce strict accounting with cpuset is almost + * impossible (or too ugly) because cpuset is too fluid that + * task or memory node can be dynamically moved between cpusets. + * + * The change of semantics for shared hugetlb mapping with cpuset is + * undesirable. However, in order to preserve some of the semantics, + * we fall back to check against current free page availability as + * a best attempt and hopefully to minimize the impact of changing + * semantics that cpuset has. + */ + +Huge page reservations were added to prevent unexpected page allocation +failures (OOM) at page fault time. However, if an application makes use +of cpusets or memory policy there is no guarantee that huge pages will be +available on the required nodes. This is true even if there are a sufficient +number of global reservations. + + +Mike Kravetz, 7 April 2017 diff --git a/Documentation/vm/hugetlbfs_reserv.txt b/Documentation/vm/hugetlbfs_reserv.txt deleted file mode 100644 index 36a87a2ea435..000000000000 --- a/Documentation/vm/hugetlbfs_reserv.txt +++ /dev/null @@ -1,587 +0,0 @@ -.. _hugetlbfs_reserve: - -===================== -Hugetlbfs Reservation -===================== - -Overview -======== - -Huge pages as described at :ref:`hugetlbpage` are typically -preallocated for application use. These huge pages are instantiated in a -task's address space at page fault time if the VMA indicates huge pages are -to be used. If no huge page exists at page fault time, the task is sent -a SIGBUS and often dies an unhappy death. Shortly after huge page support -was added, it was determined that it would be better to detect a shortage -of huge pages at mmap() time. The idea is that if there were not enough -huge pages to cover the mapping, the mmap() would fail. This was first -done with a simple check in the code at mmap() time to determine if there -were enough free huge pages to cover the mapping. Like most things in the -kernel, the code has evolved over time. However, the basic idea was to -'reserve' huge pages at mmap() time to ensure that huge pages would be -available for page faults in that mapping. The description below attempts to -describe how huge page reserve processing is done in the v4.10 kernel. - - -Audience -======== -This description is primarily targeted at kernel developers who are modifying -hugetlbfs code. - - -The Data Structures -=================== - -resv_huge_pages - This is a global (per-hstate) count of reserved huge pages. Reserved - huge pages are only available to the task which reserved them. - Therefore, the number of huge pages generally available is computed - as (``free_huge_pages - resv_huge_pages``). -Reserve Map - A reserve map is described by the structure:: - - struct resv_map { - struct kref refs; - spinlock_t lock; - struct list_head regions; - long adds_in_progress; - struct list_head region_cache; - long region_cache_count; - }; - - There is one reserve map for each huge page mapping in the system. - The regions list within the resv_map describes the regions within - the mapping. A region is described as:: - - struct file_region { - struct list_head link; - long from; - long to; - }; - - The 'from' and 'to' fields of the file region structure are huge page - indices into the mapping. Depending on the type of mapping, a - region in the reserv_map may indicate reservations exist for the - range, or reservations do not exist. -Flags for MAP_PRIVATE Reservations - These are stored in the bottom bits of the reservation map pointer. - - ``#define HPAGE_RESV_OWNER (1UL << 0)`` - Indicates this task is the owner of the reservations - associated with the mapping. - ``#define HPAGE_RESV_UNMAPPED (1UL << 1)`` - Indicates task originally mapping this range (and creating - reserves) has unmapped a page from this task (the child) - due to a failed COW. -Page Flags - The PagePrivate page flag is used to indicate that a huge page - reservation must be restored when the huge page is freed. More - details will be discussed in the "Freeing huge pages" section. - - -Reservation Map Location (Private or Shared) -============================================ - -A huge page mapping or segment is either private or shared. If private, -it is typically only available to a single address space (task). If shared, -it can be mapped into multiple address spaces (tasks). The location and -semantics of the reservation map is significantly different for two types -of mappings. Location differences are: - -- For private mappings, the reservation map hangs off the the VMA structure. - Specifically, vma->vm_private_data. This reserve map is created at the - time the mapping (mmap(MAP_PRIVATE)) is created. -- For shared mappings, the reservation map hangs off the inode. Specifically, - inode->i_mapping->private_data. Since shared mappings are always backed - by files in the hugetlbfs filesystem, the hugetlbfs code ensures each inode - contains a reservation map. As a result, the reservation map is allocated - when the inode is created. - - -Creating Reservations -===================== -Reservations are created when a huge page backed shared memory segment is -created (shmget(SHM_HUGETLB)) or a mapping is created via mmap(MAP_HUGETLB). -These operations result in a call to the routine hugetlb_reserve_pages():: - - int hugetlb_reserve_pages(struct inode *inode, - long from, long to, - struct vm_area_struct *vma, - vm_flags_t vm_flags) - -The first thing hugetlb_reserve_pages() does is check for the NORESERVE -flag was specified in either the shmget() or mmap() call. If NORESERVE -was specified, then this routine returns immediately as no reservation -are desired. - -The arguments 'from' and 'to' are huge page indices into the mapping or -underlying file. For shmget(), 'from' is always 0 and 'to' corresponds to -the length of the segment/mapping. For mmap(), the offset argument could -be used to specify the offset into the underlying file. In such a case -the 'from' and 'to' arguments have been adjusted by this offset. - -One of the big differences between PRIVATE and SHARED mappings is the way -in which reservations are represented in the reservation map. - -- For shared mappings, an entry in the reservation map indicates a reservation - exists or did exist for the corresponding page. As reservations are - consumed, the reservation map is not modified. -- For private mappings, the lack of an entry in the reservation map indicates - a reservation exists for the corresponding page. As reservations are - consumed, entries are added to the reservation map. Therefore, the - reservation map can also be used to determine which reservations have - been consumed. - -For private mappings, hugetlb_reserve_pages() creates the reservation map and -hangs it off the VMA structure. In addition, the HPAGE_RESV_OWNER flag is set -to indicate this VMA owns the reservations. - -The reservation map is consulted to determine how many huge page reservations -are needed for the current mapping/segment. For private mappings, this is -always the value (to - from). However, for shared mappings it is possible that some reservations may already exist within the range (to - from). See the -section :ref:`Reservation Map Modifications ` -for details on how this is accomplished. - -The mapping may be associated with a subpool. If so, the subpool is consulted -to ensure there is sufficient space for the mapping. It is possible that the -subpool has set aside reservations that can be used for the mapping. See the -section :ref:`Subpool Reservations ` for more details. - -After consulting the reservation map and subpool, the number of needed new -reservations is known. The routine hugetlb_acct_memory() is called to check -for and take the requested number of reservations. hugetlb_acct_memory() -calls into routines that potentially allocate and adjust surplus page counts. -However, within those routines the code is simply checking to ensure there -are enough free huge pages to accommodate the reservation. If there are, -the global reservation count resv_huge_pages is adjusted something like the -following:: - - if (resv_needed <= (resv_huge_pages - free_huge_pages)) - resv_huge_pages += resv_needed; - -Note that the global lock hugetlb_lock is held when checking and adjusting -these counters. - -If there were enough free huge pages and the global count resv_huge_pages -was adjusted, then the reservation map associated with the mapping is -modified to reflect the reservations. In the case of a shared mapping, a -file_region will exist that includes the range 'from' 'to'. For private -mappings, no modifications are made to the reservation map as lack of an -entry indicates a reservation exists. - -If hugetlb_reserve_pages() was successful, the global reservation count and -reservation map associated with the mapping will be modified as required to -ensure reservations exist for the range 'from' - 'to'. - -.. _consume_resv: - -Consuming Reservations/Allocating a Huge Page -============================================= - -Reservations are consumed when huge pages associated with the reservations -are allocated and instantiated in the corresponding mapping. The allocation -is performed within the routine alloc_huge_page():: - - struct page *alloc_huge_page(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve) - -alloc_huge_page is passed a VMA pointer and a virtual address, so it can -consult the reservation map to determine if a reservation exists. In addition, -alloc_huge_page takes the argument avoid_reserve which indicates reserves -should not be used even if it appears they have been set aside for the -specified address. The avoid_reserve argument is most often used in the case -of Copy on Write and Page Migration where additional copies of an existing -page are being allocated. - -The helper routine vma_needs_reservation() is called to determine if a -reservation exists for the address within the mapping(vma). See the section -:ref:`Reservation Map Helper Routines ` for detailed -information on what this routine does. -The value returned from vma_needs_reservation() is generally -0 or 1. 0 if a reservation exists for the address, 1 if no reservation exists. -If a reservation does not exist, and there is a subpool associated with the -mapping the subpool is consulted to determine if it contains reservations. -If the subpool contains reservations, one can be used for this allocation. -However, in every case the avoid_reserve argument overrides the use of -a reservation for the allocation. After determining whether a reservation -exists and can be used for the allocation, the routine dequeue_huge_page_vma() -is called. This routine takes two arguments related to reservations: - -- avoid_reserve, this is the same value/argument passed to alloc_huge_page() -- chg, even though this argument is of type long only the values 0 or 1 are - passed to dequeue_huge_page_vma. If the value is 0, it indicates a - reservation exists (see the section "Memory Policy and Reservations" for - possible issues). If the value is 1, it indicates a reservation does not - exist and the page must be taken from the global free pool if possible. - -The free lists associated with the memory policy of the VMA are searched for -a free page. If a page is found, the value free_huge_pages is decremented -when the page is removed from the free list. If there was a reservation -associated with the page, the following adjustments are made:: - - SetPagePrivate(page); /* Indicates allocating this page consumed - * a reservation, and if an error is - * encountered such that the page must be - * freed, the reservation will be restored. */ - resv_huge_pages--; /* Decrement the global reservation count */ - -Note, if no huge page can be found that satisfies the VMA's memory policy -an attempt will be made to allocate one using the buddy allocator. This -brings up the issue of surplus huge pages and overcommit which is beyond -the scope reservations. Even if a surplus page is allocated, the same -reservation based adjustments as above will be made: SetPagePrivate(page) and -resv_huge_pages--. - -After obtaining a new huge page, (page)->private is set to the value of -the subpool associated with the page if it exists. This will be used for -subpool accounting when the page is freed. - -The routine vma_commit_reservation() is then called to adjust the reserve -map based on the consumption of the reservation. In general, this involves -ensuring the page is represented within a file_region structure of the region -map. For shared mappings where the the reservation was present, an entry -in the reserve map already existed so no change is made. However, if there -was no reservation in a shared mapping or this was a private mapping a new -entry must be created. - -It is possible that the reserve map could have been changed between the call -to vma_needs_reservation() at the beginning of alloc_huge_page() and the -call to vma_commit_reservation() after the page was allocated. This would -be possible if hugetlb_reserve_pages was called for the same page in a shared -mapping. In such cases, the reservation count and subpool free page count -will be off by one. This rare condition can be identified by comparing the -return value from vma_needs_reservation and vma_commit_reservation. If such -a race is detected, the subpool and global reserve counts are adjusted to -compensate. See the section -:ref:`Reservation Map Helper Routines ` for more -information on these routines. - - -Instantiate Huge Pages -====================== - -After huge page allocation, the page is typically added to the page tables -of the allocating task. Before this, pages in a shared mapping are added -to the page cache and pages in private mappings are added to an anonymous -reverse mapping. In both cases, the PagePrivate flag is cleared. Therefore, -when a huge page that has been instantiated is freed no adjustment is made -to the global reservation count (resv_huge_pages). - - -Freeing Huge Pages -================== - -Huge page freeing is performed by the routine free_huge_page(). This routine -is the destructor for hugetlbfs compound pages. As a result, it is only -passed a pointer to the page struct. When a huge page is freed, reservation -accounting may need to be performed. This would be the case if the page was -associated with a subpool that contained reserves, or the page is being freed -on an error path where a global reserve count must be restored. - -The page->private field points to any subpool associated with the page. -If the PagePrivate flag is set, it indicates the global reserve count should -be adjusted (see the section -:ref:`Consuming Reservations/Allocating a Huge Page ` -for information on how these are set). - -The routine first calls hugepage_subpool_put_pages() for the page. If this -routine returns a value of 0 (which does not equal the value passed 1) it -indicates reserves are associated with the subpool, and this newly free page -must be used to keep the number of subpool reserves above the minimum size. -Therefore, the global resv_huge_pages counter is incremented in this case. - -If the PagePrivate flag was set in the page, the global resv_huge_pages counter -will always be incremented. - -.. _sub_pool_resv: - -Subpool Reservations -==================== - -There is a struct hstate associated with each huge page size. The hstate -tracks all huge pages of the specified size. A subpool represents a subset -of pages within a hstate that is associated with a mounted hugetlbfs -filesystem. - -When a hugetlbfs filesystem is mounted a min_size option can be specified -which indicates the minimum number of huge pages required by the filesystem. -If this option is specified, the number of huge pages corresponding to -min_size are reserved for use by the filesystem. This number is tracked in -the min_hpages field of a struct hugepage_subpool. At mount time, -hugetlb_acct_memory(min_hpages) is called to reserve the specified number of -huge pages. If they can not be reserved, the mount fails. - -The routines hugepage_subpool_get/put_pages() are called when pages are -obtained from or released back to a subpool. They perform all subpool -accounting, and track any reservations associated with the subpool. -hugepage_subpool_get/put_pages are passed the number of huge pages by which -to adjust the subpool 'used page' count (down for get, up for put). Normally, -they return the same value that was passed or an error if not enough pages -exist in the subpool. - -However, if reserves are associated with the subpool a return value less -than the passed value may be returned. This return value indicates the -number of additional global pool adjustments which must be made. For example, -suppose a subpool contains 3 reserved huge pages and someone asks for 5. -The 3 reserved pages associated with the subpool can be used to satisfy part -of the request. But, 2 pages must be obtained from the global pools. To -relay this information to the caller, the value 2 is returned. The caller -is then responsible for attempting to obtain the additional two pages from -the global pools. - - -COW and Reservations -==================== - -Since shared mappings all point to and use the same underlying pages, the -biggest reservation concern for COW is private mappings. In this case, -two tasks can be pointing at the same previously allocated page. One task -attempts to write to the page, so a new page must be allocated so that each -task points to its own page. - -When the page was originally allocated, the reservation for that page was -consumed. When an attempt to allocate a new page is made as a result of -COW, it is possible that no free huge pages are free and the allocation -will fail. - -When the private mapping was originally created, the owner of the mapping -was noted by setting the HPAGE_RESV_OWNER bit in the pointer to the reservation -map of the owner. Since the owner created the mapping, the owner owns all -the reservations associated with the mapping. Therefore, when a write fault -occurs and there is no page available, different action is taken for the owner -and non-owner of the reservation. - -In the case where the faulting task is not the owner, the fault will fail and -the task will typically receive a SIGBUS. - -If the owner is the faulting task, we want it to succeed since it owned the -original reservation. To accomplish this, the page is unmapped from the -non-owning task. In this way, the only reference is from the owning task. -In addition, the HPAGE_RESV_UNMAPPED bit is set in the reservation map pointer -of the non-owning task. The non-owning task may receive a SIGBUS if it later -faults on a non-present page. But, the original owner of the -mapping/reservation will behave as expected. - - -.. _resv_map_modifications: - -Reservation Map Modifications -============================= - -The following low level routines are used to make modifications to a -reservation map. Typically, these routines are not called directly. Rather, -a reservation map helper routine is called which calls one of these low level -routines. These low level routines are fairly well documented in the source -code (mm/hugetlb.c). These routines are:: - - long region_chg(struct resv_map *resv, long f, long t); - long region_add(struct resv_map *resv, long f, long t); - void region_abort(struct resv_map *resv, long f, long t); - long region_count(struct resv_map *resv, long f, long t); - -Operations on the reservation map typically involve two operations: - -1) region_chg() is called to examine the reserve map and determine how - many pages in the specified range [f, t) are NOT currently represented. - - The calling code performs global checks and allocations to determine if - there are enough huge pages for the operation to succeed. - -2) - a) If the operation can succeed, region_add() is called to actually modify - the reservation map for the same range [f, t) previously passed to - region_chg(). - b) If the operation can not succeed, region_abort is called for the same - range [f, t) to abort the operation. - -Note that this is a two step process where region_add() and region_abort() -are guaranteed to succeed after a prior call to region_chg() for the same -range. region_chg() is responsible for pre-allocating any data structures -necessary to ensure the subsequent operations (specifically region_add())) -will succeed. - -As mentioned above, region_chg() determines the number of pages in the range -which are NOT currently represented in the map. This number is returned to -the caller. region_add() returns the number of pages in the range added to -the map. In most cases, the return value of region_add() is the same as the -return value of region_chg(). However, in the case of shared mappings it is -possible for changes to the reservation map to be made between the calls to -region_chg() and region_add(). In this case, the return value of region_add() -will not match the return value of region_chg(). It is likely that in such -cases global counts and subpool accounting will be incorrect and in need of -adjustment. It is the responsibility of the caller to check for this condition -and make the appropriate adjustments. - -The routine region_del() is called to remove regions from a reservation map. -It is typically called in the following situations: - -- When a file in the hugetlbfs filesystem is being removed, the inode will - be released and the reservation map freed. Before freeing the reservation - map, all the individual file_region structures must be freed. In this case - region_del is passed the range [0, LONG_MAX). -- When a hugetlbfs file is being truncated. In this case, all allocated pages - after the new file size must be freed. In addition, any file_region entries - in the reservation map past the new end of file must be deleted. In this - case, region_del is passed the range [new_end_of_file, LONG_MAX). -- When a hole is being punched in a hugetlbfs file. In this case, huge pages - are removed from the middle of the file one at a time. As the pages are - removed, region_del() is called to remove the corresponding entry from the - reservation map. In this case, region_del is passed the range - [page_idx, page_idx + 1). - -In every case, region_del() will return the number of pages removed from the -reservation map. In VERY rare cases, region_del() can fail. This can only -happen in the hole punch case where it has to split an existing file_region -entry and can not allocate a new structure. In this error case, region_del() -will return -ENOMEM. The problem here is that the reservation map will -indicate that there is a reservation for the page. However, the subpool and -global reservation counts will not reflect the reservation. To handle this -situation, the routine hugetlb_fix_reserve_counts() is called to adjust the -counters so that they correspond with the reservation map entry that could -not be deleted. - -region_count() is called when unmapping a private huge page mapping. In -private mappings, the lack of a entry in the reservation map indicates that -a reservation exists. Therefore, by counting the number of entries in the -reservation map we know how many reservations were consumed and how many are -outstanding (outstanding = (end - start) - region_count(resv, start, end)). -Since the mapping is going away, the subpool and global reservation counts -are decremented by the number of outstanding reservations. - -.. _resv_map_helpers: - -Reservation Map Helper Routines -=============================== - -Several helper routines exist to query and modify the reservation maps. -These routines are only interested with reservations for a specific huge -page, so they just pass in an address instead of a range. In addition, -they pass in the associated VMA. From the VMA, the type of mapping (private -or shared) and the location of the reservation map (inode or VMA) can be -determined. These routines simply call the underlying routines described -in the section "Reservation Map Modifications". However, they do take into -account the 'opposite' meaning of reservation map entries for private and -shared mappings and hide this detail from the caller:: - - long vma_needs_reservation(struct hstate *h, - struct vm_area_struct *vma, - unsigned long addr) - -This routine calls region_chg() for the specified page. If no reservation -exists, 1 is returned. If a reservation exists, 0 is returned:: - - long vma_commit_reservation(struct hstate *h, - struct vm_area_struct *vma, - unsigned long addr) - -This calls region_add() for the specified page. As in the case of region_chg -and region_add, this routine is to be called after a previous call to -vma_needs_reservation. It will add a reservation entry for the page. It -returns 1 if the reservation was added and 0 if not. The return value should -be compared with the return value of the previous call to -vma_needs_reservation. An unexpected difference indicates the reservation -map was modified between calls:: - - void vma_end_reservation(struct hstate *h, - struct vm_area_struct *vma, - unsigned long addr) - -This calls region_abort() for the specified page. As in the case of region_chg -and region_abort, this routine is to be called after a previous call to -vma_needs_reservation. It will abort/end the in progress reservation add -operation:: - - long vma_add_reservation(struct hstate *h, - struct vm_area_struct *vma, - unsigned long addr) - -This is a special wrapper routine to help facilitate reservation cleanup -on error paths. It is only called from the routine restore_reserve_on_error(). -This routine is used in conjunction with vma_needs_reservation in an attempt -to add a reservation to the reservation map. It takes into account the -different reservation map semantics for private and shared mappings. Hence, -region_add is called for shared mappings (as an entry present in the map -indicates a reservation), and region_del is called for private mappings (as -the absence of an entry in the map indicates a reservation). See the section -"Reservation cleanup in error paths" for more information on what needs to -be done on error paths. - - -Reservation Cleanup in Error Paths -================================== - -As mentioned in the section -:ref:`Reservation Map Helper Routines `, reservation -map modifications are performed in two steps. First vma_needs_reservation -is called before a page is allocated. If the allocation is successful, -then vma_commit_reservation is called. If not, vma_end_reservation is called. -Global and subpool reservation counts are adjusted based on success or failure -of the operation and all is well. - -Additionally, after a huge page is instantiated the PagePrivate flag is -cleared so that accounting when the page is ultimately freed is correct. - -However, there are several instances where errors are encountered after a huge -page is allocated but before it is instantiated. In this case, the page -allocation has consumed the reservation and made the appropriate subpool, -reservation map and global count adjustments. If the page is freed at this -time (before instantiation and clearing of PagePrivate), then free_huge_page -will increment the global reservation count. However, the reservation map -indicates the reservation was consumed. This resulting inconsistent state -will cause the 'leak' of a reserved huge page. The global reserve count will -be higher than it should and prevent allocation of a pre-allocated page. - -The routine restore_reserve_on_error() attempts to handle this situation. It -is fairly well documented. The intention of this routine is to restore -the reservation map to the way it was before the page allocation. In this -way, the state of the reservation map will correspond to the global reservation -count after the page is freed. - -The routine restore_reserve_on_error itself may encounter errors while -attempting to restore the reservation map entry. In this case, it will -simply clear the PagePrivate flag of the page. In this way, the global -reserve count will not be incremented when the page is freed. However, the -reservation map will continue to look as though the reservation was consumed. -A page can still be allocated for the address, but it will not use a reserved -page as originally intended. - -There is some code (most notably userfaultfd) which can not call -restore_reserve_on_error. In this case, it simply modifies the PagePrivate -so that a reservation will not be leaked when the huge page is freed. - - -Reservations and Memory Policy -============================== -Per-node huge page lists existed in struct hstate when git was first used -to manage Linux code. The concept of reservations was added some time later. -When reservations were added, no attempt was made to take memory policy -into account. While cpusets are not exactly the same as memory policy, this -comment in hugetlb_acct_memory sums up the interaction between reservations -and cpusets/memory policy:: - - /* - * When cpuset is configured, it breaks the strict hugetlb page - * reservation as the accounting is done on a global variable. Such - * reservation is completely rubbish in the presence of cpuset because - * the reservation is not checked against page availability for the - * current cpuset. Application can still potentially OOM'ed by kernel - * with lack of free htlb page in cpuset that the task is in. - * Attempt to enforce strict accounting with cpuset is almost - * impossible (or too ugly) because cpuset is too fluid that - * task or memory node can be dynamically moved between cpusets. - * - * The change of semantics for shared hugetlb mapping with cpuset is - * undesirable. However, in order to preserve some of the semantics, - * we fall back to check against current free page availability as - * a best attempt and hopefully to minimize the impact of changing - * semantics that cpuset has. - */ - -Huge page reservations were added to prevent unexpected page allocation -failures (OOM) at page fault time. However, if an application makes use -of cpusets or memory policy there is no guarantee that huge pages will be -available on the required nodes. This is true even if there are a sufficient -number of global reservations. - - -Mike Kravetz, 7 April 2017 diff --git a/Documentation/vm/hugetlbpage.rst b/Documentation/vm/hugetlbpage.rst new file mode 100644 index 000000000000..a5da14b05b4b --- /dev/null +++ b/Documentation/vm/hugetlbpage.rst @@ -0,0 +1,386 @@ +.. _hugetlbpage: + +============= +HugeTLB Pages +============= + +Overview +======== + +The intent of this file is to give a brief summary of hugetlbpage support in +the Linux kernel. This support is built on top of multiple page size support +that is provided by most modern architectures. For example, x86 CPUs normally +support 4K and 2M (1G if architecturally supported) page sizes, ia64 +architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M, +256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical +translations. Typically this is a very scarce resource on processor. +Operating systems try to make best use of limited number of TLB resources. +This optimization is more critical now as bigger and bigger physical memories +(several GBs) are more readily available. + +Users can use the huge page support in Linux kernel by either using the mmap +system call or standard SYSV shared memory system calls (shmget, shmat). + +First the Linux kernel needs to be built with the CONFIG_HUGETLBFS +(present under "File systems") and CONFIG_HUGETLB_PAGE (selected +automatically when CONFIG_HUGETLBFS is selected) configuration +options. + +The ``/proc/meminfo`` file provides information about the total number of +persistent hugetlb pages in the kernel's huge page pool. It also displays +default huge page size and information about the number of free, reserved +and surplus huge pages in the pool of huge pages of default size. +The huge page size is needed for generating the proper alignment and +size of the arguments to system calls that map huge page regions. + +The output of ``cat /proc/meminfo`` will include lines like:: + + HugePages_Total: uuu + HugePages_Free: vvv + HugePages_Rsvd: www + HugePages_Surp: xxx + Hugepagesize: yyy kB + Hugetlb: zzz kB + +where: + +HugePages_Total + is the size of the pool of huge pages. +HugePages_Free + is the number of huge pages in the pool that are not yet + allocated. +HugePages_Rsvd + is short for "reserved," and is the number of huge pages for + which a commitment to allocate from the pool has been made, + but no allocation has yet been made. Reserved huge pages + guarantee that an application will be able to allocate a + huge page from the pool of huge pages at fault time. +HugePages_Surp + is short for "surplus," and is the number of huge pages in + the pool above the value in ``/proc/sys/vm/nr_hugepages``. The + maximum number of surplus huge pages is controlled by + ``/proc/sys/vm/nr_overcommit_hugepages``. +Hugepagesize + is the default hugepage size (in Kb). +Hugetlb + is the total amount of memory (in kB), consumed by huge + pages of all sizes. + If huge pages of different sizes are in use, this number + will exceed HugePages_Total \* Hugepagesize. To get more + detailed information, please, refer to + ``/sys/kernel/mm/hugepages`` (described below). + + +``/proc/filesystems`` should also show a filesystem of type "hugetlbfs" +configured in the kernel. + +``/proc/sys/vm/nr_hugepages`` indicates the current number of "persistent" huge +pages in the kernel's huge page pool. "Persistent" huge pages will be +returned to the huge page pool when freed by a task. A user with root +privileges can dynamically allocate more or free some persistent huge pages +by increasing or decreasing the value of ``nr_hugepages``. + +Pages that are used as huge pages are reserved inside the kernel and cannot +be used for other purposes. Huge pages cannot be swapped out under +memory pressure. + +Once a number of huge pages have been pre-allocated to the kernel huge page +pool, a user with appropriate privilege can use either the mmap system call +or shared memory system calls to use the huge pages. See the discussion of +Using Huge Pages, below. + +The administrator can allocate persistent huge pages on the kernel boot +command line by specifying the "hugepages=N" parameter, where 'N' = the +number of huge pages requested. This is the most reliable method of +allocating huge pages as memory has not yet become fragmented. + +Some platforms support multiple huge page sizes. To allocate huge pages +of a specific size, one must precede the huge pages boot command parameters +with a huge page size selection parameter "hugepagesz=". must +be specified in bytes with optional scale suffix [kKmMgG]. The default huge +page size may be selected with the "default_hugepagesz=" boot parameter. + +When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages`` +indicates the current number of pre-allocated huge pages of the default size. +Thus, one can use the following command to dynamically allocate/deallocate +default sized persistent huge pages:: + + echo 20 > /proc/sys/vm/nr_hugepages + +This command will try to adjust the number of default sized huge pages in the +huge page pool to 20, allocating or freeing huge pages, as required. + +On a NUMA platform, the kernel will attempt to distribute the huge page pool +over all the set of allowed nodes specified by the NUMA memory policy of the +task that modifies ``nr_hugepages``. The default for the allowed nodes--when the +task has default memory policy--is all on-line nodes with memory. Allowed +nodes with insufficient available, contiguous memory for a huge page will be +silently skipped when allocating persistent huge pages. See the discussion +below of the interaction of task memory policy, cpusets and per node attributes +with the allocation and freeing of persistent huge pages. + +The success or failure of huge page allocation depends on the amount of +physically contiguous memory that is present in system at the time of the +allocation attempt. If the kernel is unable to allocate huge pages from +some nodes in a NUMA system, it will attempt to make up the difference by +allocating extra pages on other nodes with sufficient available contiguous +memory, if any. + +System administrators may want to put this command in one of the local rc +init files. This will enable the kernel to allocate huge pages early in +the boot process when the possibility of getting physical contiguous pages +is still very high. Administrators can verify the number of huge pages +actually allocated by checking the sysctl or meminfo. To check the per node +distribution of huge pages in a NUMA system, use:: + + cat /sys/devices/system/node/node*/meminfo | fgrep Huge + +``/proc/sys/vm/nr_overcommit_hugepages`` specifies how large the pool of +huge pages can grow, if more huge pages than ``/proc/sys/vm/nr_hugepages`` are +requested by applications. Writing any non-zero value into this file +indicates that the hugetlb subsystem is allowed to try to obtain that +number of "surplus" huge pages from the kernel's normal page pool, when the +persistent huge page pool is exhausted. As these surplus huge pages become +unused, they are freed back to the kernel's normal page pool. + +When increasing the huge page pool size via ``nr_hugepages``, any existing +surplus pages will first be promoted to persistent huge pages. Then, additional +huge pages will be allocated, if necessary and if possible, to fulfill +the new persistent huge page pool size. + +The administrator may shrink the pool of persistent huge pages for +the default huge page size by setting the ``nr_hugepages`` sysctl to a +smaller value. The kernel will attempt to balance the freeing of huge pages +across all nodes in the memory policy of the task modifying ``nr_hugepages``. +Any free huge pages on the selected nodes will be freed back to the kernel's +normal page pool. + +Caveat: Shrinking the persistent huge page pool via ``nr_hugepages`` such that +it becomes less than the number of huge pages in use will convert the balance +of the in-use huge pages to surplus huge pages. This will occur even if +the number of surplus pages it would exceed the overcommit value. As long as +this condition holds--that is, until ``nr_hugepages+nr_overcommit_hugepages`` is +increased sufficiently, or the surplus huge pages go out of use and are freed-- +no more surplus huge pages will be allowed to be allocated. + +With support for multiple huge page pools at run-time available, much of +the huge page userspace interface in ``/proc/sys/vm`` has been duplicated in +sysfs. +The ``/proc`` interfaces discussed above have been retained for backwards +compatibility. The root huge page control directory in sysfs is:: + + /sys/kernel/mm/hugepages + +For each huge page size supported by the running kernel, a subdirectory +will exist, of the form:: + + hugepages-${size}kB + +Inside each of these directories, the same set of files will exist:: + + nr_hugepages + nr_hugepages_mempolicy + nr_overcommit_hugepages + free_hugepages + resv_hugepages + surplus_hugepages + +which function as described above for the default huge page-sized case. + + +Interaction of Task Memory Policy with Huge Page Allocation/Freeing +=================================================================== + +Whether huge pages are allocated and freed via the ``/proc`` interface or +the ``/sysfs`` interface using the ``nr_hugepages_mempolicy`` attribute, the +NUMA nodes from which huge pages are allocated or freed are controlled by the +NUMA memory policy of the task that modifies the ``nr_hugepages_mempolicy`` +sysctl or attribute. When the ``nr_hugepages`` attribute is used, mempolicy +is ignored. + +The recommended method to allocate or free huge pages to/from the kernel +huge page pool, using the ``nr_hugepages`` example above, is:: + + numactl --interleave echo 20 \ + >/proc/sys/vm/nr_hugepages_mempolicy + +or, more succinctly:: + + numactl -m echo 20 >/proc/sys/vm/nr_hugepages_mempolicy + +This will allocate or free ``abs(20 - nr_hugepages)`` to or from the nodes +specified in , depending on whether number of persistent huge pages +is initially less than or greater than 20, respectively. No huge pages will be +allocated nor freed on any node not included in the specified . + +When adjusting the persistent hugepage count via ``nr_hugepages_mempolicy``, any +memory policy mode--bind, preferred, local or interleave--may be used. The +resulting effect on persistent huge page allocation is as follows: + +#. Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.rst], + persistent huge pages will be distributed across the node or nodes + specified in the mempolicy as if "interleave" had been specified. + However, if a node in the policy does not contain sufficient contiguous + memory for a huge page, the allocation will not "fallback" to the nearest + neighbor node with sufficient contiguous memory. To do this would cause + undesirable imbalance in the distribution of the huge page pool, or + possibly, allocation of persistent huge pages on nodes not allowed by + the task's memory policy. + +#. One or more nodes may be specified with the bind or interleave policy. + If more than one node is specified with the preferred policy, only the + lowest numeric id will be used. Local policy will select the node where + the task is running at the time the nodes_allowed mask is constructed. + For local policy to be deterministic, the task must be bound to a cpu or + cpus in a single node. Otherwise, the task could be migrated to some + other node at any time after launch and the resulting node will be + indeterminate. Thus, local policy is not very useful for this purpose. + Any of the other mempolicy modes may be used to specify a single node. + +#. The nodes allowed mask will be derived from any non-default task mempolicy, + whether this policy was set explicitly by the task itself or one of its + ancestors, such as numactl. This means that if the task is invoked from a + shell with non-default policy, that policy will be used. One can specify a + node list of "all" with numactl --interleave or --membind [-m] to achieve + interleaving over all nodes in the system or cpuset. + +#. Any task mempolicy specified--e.g., using numactl--will be constrained by + the resource limits of any cpuset in which the task runs. Thus, there will + be no way for a task with non-default policy running in a cpuset with a + subset of the system nodes to allocate huge pages outside the cpuset + without first moving to a cpuset that contains all of the desired nodes. + +#. Boot-time huge page allocation attempts to distribute the requested number + of huge pages over all on-lines nodes with memory. + +Per Node Hugepages Attributes +============================= + +A subset of the contents of the root huge page control directory in sysfs, +described above, will be replicated under each the system device of each +NUMA node with memory in:: + + /sys/devices/system/node/node[0-9]*/hugepages/ + +Under this directory, the subdirectory for each supported huge page size +contains the following attribute files:: + + nr_hugepages + free_hugepages + surplus_hugepages + +The free\_' and surplus\_' attribute files are read-only. They return the number +of free and surplus [overcommitted] huge pages, respectively, on the parent +node. + +The ``nr_hugepages`` attribute returns the total number of huge pages on the +specified node. When this attribute is written, the number of persistent huge +pages on the parent node will be adjusted to the specified value, if sufficient +resources exist, regardless of the task's mempolicy or cpuset constraints. + +Note that the number of overcommit and reserve pages remain global quantities, +as we don't know until fault time, when the faulting task's mempolicy is +applied, from which node the huge page allocation will be attempted. + + +Using Huge Pages +================ + +If the user applications are going to request huge pages using mmap system +call, then it is required that system administrator mount a file system of +type hugetlbfs:: + + mount -t hugetlbfs \ + -o uid=,gid=,mode=,pagesize=,size=,\ + min_size=,nr_inodes= none /mnt/huge + +This command mounts a (pseudo) filesystem of type hugetlbfs on the directory +``/mnt/huge``. Any files created on ``/mnt/huge`` uses huge pages. + +The ``uid`` and ``gid`` options sets the owner and group of the root of the +file system. By default the ``uid`` and ``gid`` of the current process +are taken. + +The ``mode`` option sets the mode of root of file system to value & 01777. +This value is given in octal. By default the value 0755 is picked. + +If the platform supports multiple huge page sizes, the ``pagesize`` option can +be used to specify the huge page size and associated pool. ``pagesize`` +is specified in bytes. If ``pagesize`` is not specified the platform's +default huge page size and associated pool will be used. + +The ``size`` option sets the maximum value of memory (huge pages) allowed +for that filesystem (``/mnt/huge``). The ``size`` option can be specified +in bytes, or as a percentage of the specified huge page pool (``nr_hugepages``). +The size is rounded down to HPAGE_SIZE boundary. + +The ``min_size`` option sets the minimum value of memory (huge pages) allowed +for the filesystem. ``min_size`` can be specified in the same way as ``size``, +either bytes or a percentage of the huge page pool. +At mount time, the number of huge pages specified by ``min_size`` are reserved +for use by the filesystem. +If there are not enough free huge pages available, the mount will fail. +As huge pages are allocated to the filesystem and freed, the reserve count +is adjusted so that the sum of allocated and reserved huge pages is always +at least ``min_size``. + +The option ``nr_inodes`` sets the maximum number of inodes that ``/mnt/huge`` +can use. + +If the ``size``, ``min_size`` or ``nr_inodes`` option is not provided on +command line then no limits are set. + +For ``pagesize``, ``size``, ``min_size`` and ``nr_inodes`` options, you can +use [G|g]/[M|m]/[K|k] to represent giga/mega/kilo. +For example, size=2K has the same meaning as size=2048. + +While read system calls are supported on files that reside on hugetlb +file systems, write system calls are not. + +Regular chown, chgrp, and chmod commands (with right permissions) could be +used to change the file attributes on hugetlbfs. + +Also, it is important to note that no such mount command is required if +applications are going to use only shmat/shmget system calls or mmap with +MAP_HUGETLB. For an example of how to use mmap with MAP_HUGETLB see +:ref:`map_hugetlb ` below. + +Users who wish to use hugetlb memory via shared memory segment should be a +member of a supplementary group and system admin needs to configure that gid +into ``/proc/sys/vm/hugetlb_shm_group``. It is possible for same or different +applications to use any combination of mmaps and shm* calls, though the mount of +filesystem will be required for using mmap calls without MAP_HUGETLB. + +Syscalls that operate on memory backed by hugetlb pages only have their lengths +aligned to the native page size of the processor; they will normally fail with +errno set to EINVAL or exclude hugetlb pages that extend beyond the length if +not hugepage aligned. For example, munmap(2) will fail if memory is backed by +a hugetlb page and the length is smaller than the hugepage size. + + +Examples +======== + +.. _map_hugetlb: + +``map_hugetlb`` + see tools/testing/selftests/vm/map_hugetlb.c + +``hugepage-shm`` + see tools/testing/selftests/vm/hugepage-shm.c + +``hugepage-mmap`` + see tools/testing/selftests/vm/hugepage-mmap.c + +The `libhugetlbfs`_ library provides a wide range of userspace tools +to help with huge page usability, environment setup, and control. + +.. _libhugetlbfs: https://github.com/libhugetlbfs/libhugetlbfs + +Kernel development regression testing +===================================== + +The most complete set of hugetlb tests are in the libhugetlbfs repository. +If you modify any hugetlb related code, use the libhugetlbfs test suite +to check for regressions. In addition, if you add any new hugetlb +functionality, please add appropriate tests to libhugetlbfs. diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt deleted file mode 100644 index 3bb0d991f102..000000000000 --- a/Documentation/vm/hugetlbpage.txt +++ /dev/null @@ -1,386 +0,0 @@ -.. _hugetlbpage: - -============= -HugeTLB Pages -============= - -Overview -======== - -The intent of this file is to give a brief summary of hugetlbpage support in -the Linux kernel. This support is built on top of multiple page size support -that is provided by most modern architectures. For example, x86 CPUs normally -support 4K and 2M (1G if architecturally supported) page sizes, ia64 -architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M, -256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical -translations. Typically this is a very scarce resource on processor. -Operating systems try to make best use of limited number of TLB resources. -This optimization is more critical now as bigger and bigger physical memories -(several GBs) are more readily available. - -Users can use the huge page support in Linux kernel by either using the mmap -system call or standard SYSV shared memory system calls (shmget, shmat). - -First the Linux kernel needs to be built with the CONFIG_HUGETLBFS -(present under "File systems") and CONFIG_HUGETLB_PAGE (selected -automatically when CONFIG_HUGETLBFS is selected) configuration -options. - -The ``/proc/meminfo`` file provides information about the total number of -persistent hugetlb pages in the kernel's huge page pool. It also displays -default huge page size and information about the number of free, reserved -and surplus huge pages in the pool of huge pages of default size. -The huge page size is needed for generating the proper alignment and -size of the arguments to system calls that map huge page regions. - -The output of ``cat /proc/meminfo`` will include lines like:: - - HugePages_Total: uuu - HugePages_Free: vvv - HugePages_Rsvd: www - HugePages_Surp: xxx - Hugepagesize: yyy kB - Hugetlb: zzz kB - -where: - -HugePages_Total - is the size of the pool of huge pages. -HugePages_Free - is the number of huge pages in the pool that are not yet - allocated. -HugePages_Rsvd - is short for "reserved," and is the number of huge pages for - which a commitment to allocate from the pool has been made, - but no allocation has yet been made. Reserved huge pages - guarantee that an application will be able to allocate a - huge page from the pool of huge pages at fault time. -HugePages_Surp - is short for "surplus," and is the number of huge pages in - the pool above the value in ``/proc/sys/vm/nr_hugepages``. The - maximum number of surplus huge pages is controlled by - ``/proc/sys/vm/nr_overcommit_hugepages``. -Hugepagesize - is the default hugepage size (in Kb). -Hugetlb - is the total amount of memory (in kB), consumed by huge - pages of all sizes. - If huge pages of different sizes are in use, this number - will exceed HugePages_Total \* Hugepagesize. To get more - detailed information, please, refer to - ``/sys/kernel/mm/hugepages`` (described below). - - -``/proc/filesystems`` should also show a filesystem of type "hugetlbfs" -configured in the kernel. - -``/proc/sys/vm/nr_hugepages`` indicates the current number of "persistent" huge -pages in the kernel's huge page pool. "Persistent" huge pages will be -returned to the huge page pool when freed by a task. A user with root -privileges can dynamically allocate more or free some persistent huge pages -by increasing or decreasing the value of ``nr_hugepages``. - -Pages that are used as huge pages are reserved inside the kernel and cannot -be used for other purposes. Huge pages cannot be swapped out under -memory pressure. - -Once a number of huge pages have been pre-allocated to the kernel huge page -pool, a user with appropriate privilege can use either the mmap system call -or shared memory system calls to use the huge pages. See the discussion of -Using Huge Pages, below. - -The administrator can allocate persistent huge pages on the kernel boot -command line by specifying the "hugepages=N" parameter, where 'N' = the -number of huge pages requested. This is the most reliable method of -allocating huge pages as memory has not yet become fragmented. - -Some platforms support multiple huge page sizes. To allocate huge pages -of a specific size, one must precede the huge pages boot command parameters -with a huge page size selection parameter "hugepagesz=". must -be specified in bytes with optional scale suffix [kKmMgG]. The default huge -page size may be selected with the "default_hugepagesz=" boot parameter. - -When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages`` -indicates the current number of pre-allocated huge pages of the default size. -Thus, one can use the following command to dynamically allocate/deallocate -default sized persistent huge pages:: - - echo 20 > /proc/sys/vm/nr_hugepages - -This command will try to adjust the number of default sized huge pages in the -huge page pool to 20, allocating or freeing huge pages, as required. - -On a NUMA platform, the kernel will attempt to distribute the huge page pool -over all the set of allowed nodes specified by the NUMA memory policy of the -task that modifies ``nr_hugepages``. The default for the allowed nodes--when the -task has default memory policy--is all on-line nodes with memory. Allowed -nodes with insufficient available, contiguous memory for a huge page will be -silently skipped when allocating persistent huge pages. See the discussion -below of the interaction of task memory policy, cpusets and per node attributes -with the allocation and freeing of persistent huge pages. - -The success or failure of huge page allocation depends on the amount of -physically contiguous memory that is present in system at the time of the -allocation attempt. If the kernel is unable to allocate huge pages from -some nodes in a NUMA system, it will attempt to make up the difference by -allocating extra pages on other nodes with sufficient available contiguous -memory, if any. - -System administrators may want to put this command in one of the local rc -init files. This will enable the kernel to allocate huge pages early in -the boot process when the possibility of getting physical contiguous pages -is still very high. Administrators can verify the number of huge pages -actually allocated by checking the sysctl or meminfo. To check the per node -distribution of huge pages in a NUMA system, use:: - - cat /sys/devices/system/node/node*/meminfo | fgrep Huge - -``/proc/sys/vm/nr_overcommit_hugepages`` specifies how large the pool of -huge pages can grow, if more huge pages than ``/proc/sys/vm/nr_hugepages`` are -requested by applications. Writing any non-zero value into this file -indicates that the hugetlb subsystem is allowed to try to obtain that -number of "surplus" huge pages from the kernel's normal page pool, when the -persistent huge page pool is exhausted. As these surplus huge pages become -unused, they are freed back to the kernel's normal page pool. - -When increasing the huge page pool size via ``nr_hugepages``, any existing -surplus pages will first be promoted to persistent huge pages. Then, additional -huge pages will be allocated, if necessary and if possible, to fulfill -the new persistent huge page pool size. - -The administrator may shrink the pool of persistent huge pages for -the default huge page size by setting the ``nr_hugepages`` sysctl to a -smaller value. The kernel will attempt to balance the freeing of huge pages -across all nodes in the memory policy of the task modifying ``nr_hugepages``. -Any free huge pages on the selected nodes will be freed back to the kernel's -normal page pool. - -Caveat: Shrinking the persistent huge page pool via ``nr_hugepages`` such that -it becomes less than the number of huge pages in use will convert the balance -of the in-use huge pages to surplus huge pages. This will occur even if -the number of surplus pages it would exceed the overcommit value. As long as -this condition holds--that is, until ``nr_hugepages+nr_overcommit_hugepages`` is -increased sufficiently, or the surplus huge pages go out of use and are freed-- -no more surplus huge pages will be allowed to be allocated. - -With support for multiple huge page pools at run-time available, much of -the huge page userspace interface in ``/proc/sys/vm`` has been duplicated in -sysfs. -The ``/proc`` interfaces discussed above have been retained for backwards -compatibility. The root huge page control directory in sysfs is:: - - /sys/kernel/mm/hugepages - -For each huge page size supported by the running kernel, a subdirectory -will exist, of the form:: - - hugepages-${size}kB - -Inside each of these directories, the same set of files will exist:: - - nr_hugepages - nr_hugepages_mempolicy - nr_overcommit_hugepages - free_hugepages - resv_hugepages - surplus_hugepages - -which function as described above for the default huge page-sized case. - - -Interaction of Task Memory Policy with Huge Page Allocation/Freeing -=================================================================== - -Whether huge pages are allocated and freed via the ``/proc`` interface or -the ``/sysfs`` interface using the ``nr_hugepages_mempolicy`` attribute, the -NUMA nodes from which huge pages are allocated or freed are controlled by the -NUMA memory policy of the task that modifies the ``nr_hugepages_mempolicy`` -sysctl or attribute. When the ``nr_hugepages`` attribute is used, mempolicy -is ignored. - -The recommended method to allocate or free huge pages to/from the kernel -huge page pool, using the ``nr_hugepages`` example above, is:: - - numactl --interleave echo 20 \ - >/proc/sys/vm/nr_hugepages_mempolicy - -or, more succinctly:: - - numactl -m echo 20 >/proc/sys/vm/nr_hugepages_mempolicy - -This will allocate or free ``abs(20 - nr_hugepages)`` to or from the nodes -specified in , depending on whether number of persistent huge pages -is initially less than or greater than 20, respectively. No huge pages will be -allocated nor freed on any node not included in the specified . - -When adjusting the persistent hugepage count via ``nr_hugepages_mempolicy``, any -memory policy mode--bind, preferred, local or interleave--may be used. The -resulting effect on persistent huge page allocation is as follows: - -#. Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt], - persistent huge pages will be distributed across the node or nodes - specified in the mempolicy as if "interleave" had been specified. - However, if a node in the policy does not contain sufficient contiguous - memory for a huge page, the allocation will not "fallback" to the nearest - neighbor node with sufficient contiguous memory. To do this would cause - undesirable imbalance in the distribution of the huge page pool, or - possibly, allocation of persistent huge pages on nodes not allowed by - the task's memory policy. - -#. One or more nodes may be specified with the bind or interleave policy. - If more than one node is specified with the preferred policy, only the - lowest numeric id will be used. Local policy will select the node where - the task is running at the time the nodes_allowed mask is constructed. - For local policy to be deterministic, the task must be bound to a cpu or - cpus in a single node. Otherwise, the task could be migrated to some - other node at any time after launch and the resulting node will be - indeterminate. Thus, local policy is not very useful for this purpose. - Any of the other mempolicy modes may be used to specify a single node. - -#. The nodes allowed mask will be derived from any non-default task mempolicy, - whether this policy was set explicitly by the task itself or one of its - ancestors, such as numactl. This means that if the task is invoked from a - shell with non-default policy, that policy will be used. One can specify a - node list of "all" with numactl --interleave or --membind [-m] to achieve - interleaving over all nodes in the system or cpuset. - -#. Any task mempolicy specified--e.g., using numactl--will be constrained by - the resource limits of any cpuset in which the task runs. Thus, there will - be no way for a task with non-default policy running in a cpuset with a - subset of the system nodes to allocate huge pages outside the cpuset - without first moving to a cpuset that contains all of the desired nodes. - -#. Boot-time huge page allocation attempts to distribute the requested number - of huge pages over all on-lines nodes with memory. - -Per Node Hugepages Attributes -============================= - -A subset of the contents of the root huge page control directory in sysfs, -described above, will be replicated under each the system device of each -NUMA node with memory in:: - - /sys/devices/system/node/node[0-9]*/hugepages/ - -Under this directory, the subdirectory for each supported huge page size -contains the following attribute files:: - - nr_hugepages - free_hugepages - surplus_hugepages - -The free\_' and surplus\_' attribute files are read-only. They return the number -of free and surplus [overcommitted] huge pages, respectively, on the parent -node. - -The ``nr_hugepages`` attribute returns the total number of huge pages on the -specified node. When this attribute is written, the number of persistent huge -pages on the parent node will be adjusted to the specified value, if sufficient -resources exist, regardless of the task's mempolicy or cpuset constraints. - -Note that the number of overcommit and reserve pages remain global quantities, -as we don't know until fault time, when the faulting task's mempolicy is -applied, from which node the huge page allocation will be attempted. - - -Using Huge Pages -================ - -If the user applications are going to request huge pages using mmap system -call, then it is required that system administrator mount a file system of -type hugetlbfs:: - - mount -t hugetlbfs \ - -o uid=,gid=,mode=,pagesize=,size=,\ - min_size=,nr_inodes= none /mnt/huge - -This command mounts a (pseudo) filesystem of type hugetlbfs on the directory -``/mnt/huge``. Any files created on ``/mnt/huge`` uses huge pages. - -The ``uid`` and ``gid`` options sets the owner and group of the root of the -file system. By default the ``uid`` and ``gid`` of the current process -are taken. - -The ``mode`` option sets the mode of root of file system to value & 01777. -This value is given in octal. By default the value 0755 is picked. - -If the platform supports multiple huge page sizes, the ``pagesize`` option can -be used to specify the huge page size and associated pool. ``pagesize`` -is specified in bytes. If ``pagesize`` is not specified the platform's -default huge page size and associated pool will be used. - -The ``size`` option sets the maximum value of memory (huge pages) allowed -for that filesystem (``/mnt/huge``). The ``size`` option can be specified -in bytes, or as a percentage of the specified huge page pool (``nr_hugepages``). -The size is rounded down to HPAGE_SIZE boundary. - -The ``min_size`` option sets the minimum value of memory (huge pages) allowed -for the filesystem. ``min_size`` can be specified in the same way as ``size``, -either bytes or a percentage of the huge page pool. -At mount time, the number of huge pages specified by ``min_size`` are reserved -for use by the filesystem. -If there are not enough free huge pages available, the mount will fail. -As huge pages are allocated to the filesystem and freed, the reserve count -is adjusted so that the sum of allocated and reserved huge pages is always -at least ``min_size``. - -The option ``nr_inodes`` sets the maximum number of inodes that ``/mnt/huge`` -can use. - -If the ``size``, ``min_size`` or ``nr_inodes`` option is not provided on -command line then no limits are set. - -For ``pagesize``, ``size``, ``min_size`` and ``nr_inodes`` options, you can -use [G|g]/[M|m]/[K|k] to represent giga/mega/kilo. -For example, size=2K has the same meaning as size=2048. - -While read system calls are supported on files that reside on hugetlb -file systems, write system calls are not. - -Regular chown, chgrp, and chmod commands (with right permissions) could be -used to change the file attributes on hugetlbfs. - -Also, it is important to note that no such mount command is required if -applications are going to use only shmat/shmget system calls or mmap with -MAP_HUGETLB. For an example of how to use mmap with MAP_HUGETLB see -:ref:`map_hugetlb ` below. - -Users who wish to use hugetlb memory via shared memory segment should be a -member of a supplementary group and system admin needs to configure that gid -into ``/proc/sys/vm/hugetlb_shm_group``. It is possible for same or different -applications to use any combination of mmaps and shm* calls, though the mount of -filesystem will be required for using mmap calls without MAP_HUGETLB. - -Syscalls that operate on memory backed by hugetlb pages only have their lengths -aligned to the native page size of the processor; they will normally fail with -errno set to EINVAL or exclude hugetlb pages that extend beyond the length if -not hugepage aligned. For example, munmap(2) will fail if memory is backed by -a hugetlb page and the length is smaller than the hugepage size. - - -Examples -======== - -.. _map_hugetlb: - -``map_hugetlb`` - see tools/testing/selftests/vm/map_hugetlb.c - -``hugepage-shm`` - see tools/testing/selftests/vm/hugepage-shm.c - -``hugepage-mmap`` - see tools/testing/selftests/vm/hugepage-mmap.c - -The `libhugetlbfs`_ library provides a wide range of userspace tools -to help with huge page usability, environment setup, and control. - -.. _libhugetlbfs: https://github.com/libhugetlbfs/libhugetlbfs - -Kernel development regression testing -===================================== - -The most complete set of hugetlb tests are in the libhugetlbfs repository. -If you modify any hugetlb related code, use the libhugetlbfs test suite -to check for regressions. In addition, if you add any new hugetlb -functionality, please add appropriate tests to libhugetlbfs. diff --git a/Documentation/vm/hwpoison.rst b/Documentation/vm/hwpoison.rst new file mode 100644 index 000000000000..070aa1e716b7 --- /dev/null +++ b/Documentation/vm/hwpoison.rst @@ -0,0 +1,186 @@ +.. hwpoison: + +======== +hwpoison +======== + +What is hwpoison? +================= + +Upcoming Intel CPUs have support for recovering from some memory errors +(``MCA recovery``). This requires the OS to declare a page "poisoned", +kill the processes associated with it and avoid using it in the future. + +This patchkit implements the necessary infrastructure in the VM. + +To quote the overview comment: + + * High level machine check handler. Handles pages reported by the + * hardware as being corrupted usually due to a 2bit ECC memory or cache + * failure. + * + * This focusses on pages detected as corrupted in the background. + * When the current CPU tries to consume corruption the currently + * running process can just be killed directly instead. This implies + * that if the error cannot be handled for some reason it's safe to + * just ignore it because no corruption has been consumed yet. Instead + * when that happens another machine check will happen. + * + * Handles page cache pages in various states. The tricky part + * here is that we can access any page asynchronous to other VM + * users, because memory failures could happen anytime and anywhere, + * possibly violating some of their assumptions. This is why this code + * has to be extremely careful. Generally it tries to use normal locking + * rules, as in get the standard locks, even if that means the + * error handling takes potentially a long time. + * + * Some of the operations here are somewhat inefficient and have non + * linear algorithmic complexity, because the data structures have not + * been optimized for this case. This is in particular the case + * for the mapping from a vma to a process. Since this case is expected + * to be rare we hope we can get away with this. + +The code consists of a the high level handler in mm/memory-failure.c, +a new page poison bit and various checks in the VM to handle poisoned +pages. + +The main target right now is KVM guests, but it works for all kinds +of applications. KVM support requires a recent qemu-kvm release. + +For the KVM use there was need for a new signal type so that +KVM can inject the machine check into the guest with the proper +address. This in theory allows other applications to handle +memory failures too. The expection is that near all applications +won't do that, but some very specialized ones might. + +Failure recovery modes +====================== + +There are two (actually three) modes memory failure recovery can be in: + +vm.memory_failure_recovery sysctl set to zero: + All memory failures cause a panic. Do not attempt recovery. + (on x86 this can be also affected by the tolerant level of the + MCE subsystem) + +early kill + (can be controlled globally and per process) + Send SIGBUS to the application as soon as the error is detected + This allows applications who can process memory errors in a gentle + way (e.g. drop affected object) + This is the mode used by KVM qemu. + +late kill + Send SIGBUS when the application runs into the corrupted page. + This is best for memory error unaware applications and default + Note some pages are always handled as late kill. + +User control +============ + +vm.memory_failure_recovery + See sysctl.txt + +vm.memory_failure_early_kill + Enable early kill mode globally + +PR_MCE_KILL + Set early/late kill mode/revert to system default + + arg1: PR_MCE_KILL_CLEAR: + Revert to system default + arg1: PR_MCE_KILL_SET: + arg2 defines thread specific mode + + PR_MCE_KILL_EARLY: + Early kill + PR_MCE_KILL_LATE: + Late kill + PR_MCE_KILL_DEFAULT + Use system global default + + Note that if you want to have a dedicated thread which handles + the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should + call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise, + the SIGBUS is sent to the main thread. + +PR_MCE_KILL_GET + return current mode + +Testing +======= + +* madvise(MADV_HWPOISON, ....) (as root) - Poison a page in the + process for testing + +* hwpoison-inject module through debugfs ``/sys/kernel/debug/hwpoison/`` + + corrupt-pfn + Inject hwpoison fault at PFN echoed into this file. This does + some early filtering to avoid corrupted unintended pages in test suites. + + unpoison-pfn + Software-unpoison page at PFN echoed into this file. This way + a page can be reused again. This only works for Linux + injected failures, not for real memory failures. + + Note these injection interfaces are not stable and might change between + kernel versions + + corrupt-filter-dev-major, corrupt-filter-dev-minor + Only handle memory failures to pages associated with the file + system defined by block device major/minor. -1U is the + wildcard value. This should be only used for testing with + artificial injection. + + corrupt-filter-memcg + Limit injection to pages owned by memgroup. Specified by inode + number of the memcg. + + Example:: + + mkdir /sys/fs/cgroup/mem/hwpoison + + usemem -m 100 -s 1000 & + echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks + + memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ') + echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg + + page-types -p `pidof init` --hwpoison # shall do nothing + page-types -p `pidof usemem` --hwpoison # poison its pages + + corrupt-filter-flags-mask, corrupt-filter-flags-value + When specified, only poison pages if ((page_flags & mask) == + value). This allows stress testing of many kinds of + pages. The page_flags are the same as in /proc/kpageflags. The + flag bits are defined in include/linux/kernel-page-flags.h and + documented in Documentation/vm/pagemap.rst + +* Architecture specific MCE injector + + x86 has mce-inject, mce-test + + Some portable hwpoison test programs in mce-test, see below. + +References +========== + +http://halobates.de/mce-lc09-2.pdf + Overview presentation from LinuxCon 09 + +git://git.kernel.org/pub/scm/utils/cpu/mce/mce-test.git + Test suite (hwpoison specific portable tests in tsrc) + +git://git.kernel.org/pub/scm/utils/cpu/mce/mce-inject.git + x86 specific injector + + +Limitations +=========== +- Not all page types are supported and never will. Most kernel internal + objects cannot be recovered, only LRU pages for now. +- Right now hugepage support is missing. + +--- +Andi Kleen, Oct 2009 diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt deleted file mode 100644 index b1a8c241d6c2..000000000000 --- a/Documentation/vm/hwpoison.txt +++ /dev/null @@ -1,186 +0,0 @@ -.. hwpoison: - -======== -hwpoison -======== - -What is hwpoison? -================= - -Upcoming Intel CPUs have support for recovering from some memory errors -(``MCA recovery``). This requires the OS to declare a page "poisoned", -kill the processes associated with it and avoid using it in the future. - -This patchkit implements the necessary infrastructure in the VM. - -To quote the overview comment: - - * High level machine check handler. Handles pages reported by the - * hardware as being corrupted usually due to a 2bit ECC memory or cache - * failure. - * - * This focusses on pages detected as corrupted in the background. - * When the current CPU tries to consume corruption the currently - * running process can just be killed directly instead. This implies - * that if the error cannot be handled for some reason it's safe to - * just ignore it because no corruption has been consumed yet. Instead - * when that happens another machine check will happen. - * - * Handles page cache pages in various states. The tricky part - * here is that we can access any page asynchronous to other VM - * users, because memory failures could happen anytime and anywhere, - * possibly violating some of their assumptions. This is why this code - * has to be extremely careful. Generally it tries to use normal locking - * rules, as in get the standard locks, even if that means the - * error handling takes potentially a long time. - * - * Some of the operations here are somewhat inefficient and have non - * linear algorithmic complexity, because the data structures have not - * been optimized for this case. This is in particular the case - * for the mapping from a vma to a process. Since this case is expected - * to be rare we hope we can get away with this. - -The code consists of a the high level handler in mm/memory-failure.c, -a new page poison bit and various checks in the VM to handle poisoned -pages. - -The main target right now is KVM guests, but it works for all kinds -of applications. KVM support requires a recent qemu-kvm release. - -For the KVM use there was need for a new signal type so that -KVM can inject the machine check into the guest with the proper -address. This in theory allows other applications to handle -memory failures too. The expection is that near all applications -won't do that, but some very specialized ones might. - -Failure recovery modes -====================== - -There are two (actually three) modes memory failure recovery can be in: - -vm.memory_failure_recovery sysctl set to zero: - All memory failures cause a panic. Do not attempt recovery. - (on x86 this can be also affected by the tolerant level of the - MCE subsystem) - -early kill - (can be controlled globally and per process) - Send SIGBUS to the application as soon as the error is detected - This allows applications who can process memory errors in a gentle - way (e.g. drop affected object) - This is the mode used by KVM qemu. - -late kill - Send SIGBUS when the application runs into the corrupted page. - This is best for memory error unaware applications and default - Note some pages are always handled as late kill. - -User control -============ - -vm.memory_failure_recovery - See sysctl.txt - -vm.memory_failure_early_kill - Enable early kill mode globally - -PR_MCE_KILL - Set early/late kill mode/revert to system default - - arg1: PR_MCE_KILL_CLEAR: - Revert to system default - arg1: PR_MCE_KILL_SET: - arg2 defines thread specific mode - - PR_MCE_KILL_EARLY: - Early kill - PR_MCE_KILL_LATE: - Late kill - PR_MCE_KILL_DEFAULT - Use system global default - - Note that if you want to have a dedicated thread which handles - the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should - call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise, - the SIGBUS is sent to the main thread. - -PR_MCE_KILL_GET - return current mode - -Testing -======= - -* madvise(MADV_HWPOISON, ....) (as root) - Poison a page in the - process for testing - -* hwpoison-inject module through debugfs ``/sys/kernel/debug/hwpoison/`` - - corrupt-pfn - Inject hwpoison fault at PFN echoed into this file. This does - some early filtering to avoid corrupted unintended pages in test suites. - - unpoison-pfn - Software-unpoison page at PFN echoed into this file. This way - a page can be reused again. This only works for Linux - injected failures, not for real memory failures. - - Note these injection interfaces are not stable and might change between - kernel versions - - corrupt-filter-dev-major, corrupt-filter-dev-minor - Only handle memory failures to pages associated with the file - system defined by block device major/minor. -1U is the - wildcard value. This should be only used for testing with - artificial injection. - - corrupt-filter-memcg - Limit injection to pages owned by memgroup. Specified by inode - number of the memcg. - - Example:: - - mkdir /sys/fs/cgroup/mem/hwpoison - - usemem -m 100 -s 1000 & - echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks - - memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ') - echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg - - page-types -p `pidof init` --hwpoison # shall do nothing - page-types -p `pidof usemem` --hwpoison # poison its pages - - corrupt-filter-flags-mask, corrupt-filter-flags-value - When specified, only poison pages if ((page_flags & mask) == - value). This allows stress testing of many kinds of - pages. The page_flags are the same as in /proc/kpageflags. The - flag bits are defined in include/linux/kernel-page-flags.h and - documented in Documentation/vm/pagemap.txt - -* Architecture specific MCE injector - - x86 has mce-inject, mce-test - - Some portable hwpoison test programs in mce-test, see below. - -References -========== - -http://halobates.de/mce-lc09-2.pdf - Overview presentation from LinuxCon 09 - -git://git.kernel.org/pub/scm/utils/cpu/mce/mce-test.git - Test suite (hwpoison specific portable tests in tsrc) - -git://git.kernel.org/pub/scm/utils/cpu/mce/mce-inject.git - x86 specific injector - - -Limitations -=========== -- Not all page types are supported and never will. Most kernel internal - objects cannot be recovered, only LRU pages for now. -- Right now hugepage support is missing. - ---- -Andi Kleen, Oct 2009 diff --git a/Documentation/vm/idle_page_tracking.rst b/Documentation/vm/idle_page_tracking.rst new file mode 100644 index 000000000000..d1c4609a5220 --- /dev/null +++ b/Documentation/vm/idle_page_tracking.rst @@ -0,0 +1,115 @@ +.. _idle_page_tracking: + +================== +Idle Page Tracking +================== + +Motivation +========== + +The idle page tracking feature allows to track which memory pages are being +accessed by a workload and which are idle. This information can be useful for +estimating the workload's working set size, which, in turn, can be taken into +account when configuring the workload parameters, setting memory cgroup limits, +or deciding where to place the workload within a compute cluster. + +It is enabled by CONFIG_IDLE_PAGE_TRACKING=y. + +.. _user_api: + +User API +======== + +The idle page tracking API is located at ``/sys/kernel/mm/page_idle``. +Currently, it consists of the only read-write file, +``/sys/kernel/mm/page_idle/bitmap``. + +The file implements a bitmap where each bit corresponds to a memory page. The +bitmap is represented by an array of 8-byte integers, and the page at PFN #i is +mapped to bit #i%64 of array element #i/64, byte order is native. When a bit is +set, the corresponding page is idle. + +A page is considered idle if it has not been accessed since it was marked idle +(for more details on what "accessed" actually means see the :ref:`Implementation +Details ` section). +To mark a page idle one has to set the bit corresponding to +the page by writing to the file. A value written to the file is OR-ed with the +current bitmap value. + +Only accesses to user memory pages are tracked. These are pages mapped to a +process address space, page cache and buffer pages, swap cache pages. For other +page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored, +and hence such pages are never reported idle. + +For huge pages the idle flag is set only on the head page, so one has to read +``/proc/kpageflags`` in order to correctly count idle huge pages. + +Reading from or writing to ``/sys/kernel/mm/page_idle/bitmap`` will return +-EINVAL if you are not starting the read/write on an 8-byte boundary, or +if the size of the read/write is not a multiple of 8 bytes. Writing to +this file beyond max PFN will return -ENXIO. + +That said, in order to estimate the amount of pages that are not used by a +workload one should: + + 1. Mark all the workload's pages as idle by setting corresponding bits in + ``/sys/kernel/mm/page_idle/bitmap``. The pages can be found by reading + ``/proc/pid/pagemap`` if the workload is represented by a process, or by + filtering out alien pages using ``/proc/kpagecgroup`` in case the workload + is placed in a memory cgroup. + + 2. Wait until the workload accesses its working set. + + 3. Read ``/sys/kernel/mm/page_idle/bitmap`` and count the number of bits set. + If one wants to ignore certain types of pages, e.g. mlocked pages since they + are not reclaimable, he or she can filter them out using + ``/proc/kpageflags``. + +See Documentation/vm/pagemap.rst for more information about +``/proc/pid/pagemap``, ``/proc/kpageflags``, and ``/proc/kpagecgroup``. + +.. _impl_details: + +Implementation Details +====================== + +The kernel internally keeps track of accesses to user memory pages in order to +reclaim unreferenced pages first on memory shortage conditions. A page is +considered referenced if it has been recently accessed via a process address +space, in which case one or more PTEs it is mapped to will have the Accessed bit +set, or marked accessed explicitly by the kernel (see mark_page_accessed()). The +latter happens when: + + - a userspace process reads or writes a page using a system call (e.g. read(2) + or write(2)) + + - a page that is used for storing filesystem buffers is read or written, + because a process needs filesystem metadata stored in it (e.g. lists a + directory tree) + + - a page is accessed by a device driver using get_user_pages() + +When a dirty page is written to swap or disk as a result of memory reclaim or +exceeding the dirty memory limit, it is not marked referenced. + +The idle memory tracking feature adds a new page flag, the Idle flag. This flag +is set manually, by writing to ``/sys/kernel/mm/page_idle/bitmap`` (see the +:ref:`User API ` +section), and cleared automatically whenever a page is referenced as defined +above. + +When a page is marked idle, the Accessed bit must be cleared in all PTEs it is +mapped to, otherwise we will not be able to detect accesses to the page coming +from a process address space. To avoid interference with the reclaimer, which, +as noted above, uses the Accessed bit to promote actively referenced pages, one +more page flag is introduced, the Young flag. When the PTE Accessed bit is +cleared as a result of setting or updating a page's Idle flag, the Young flag +is set on the page. The reclaimer treats the Young flag as an extra PTE +Accessed bit and therefore will consider such a page as referenced. + +Since the idle memory tracking feature is based on the memory reclaimer logic, +it only works with pages that are on an LRU list, other pages are silently +ignored. That means it will ignore a user memory page if it is isolated, but +since there are usually not many of them, it should not affect the overall +result noticeably. In order not to stall scanning of the idle page bitmap, +locked pages may be skipped too. diff --git a/Documentation/vm/idle_page_tracking.txt b/Documentation/vm/idle_page_tracking.txt deleted file mode 100644 index 9cbe6f8d7a99..000000000000 --- a/Documentation/vm/idle_page_tracking.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. _idle_page_tracking: - -================== -Idle Page Tracking -================== - -Motivation -========== - -The idle page tracking feature allows to track which memory pages are being -accessed by a workload and which are idle. This information can be useful for -estimating the workload's working set size, which, in turn, can be taken into -account when configuring the workload parameters, setting memory cgroup limits, -or deciding where to place the workload within a compute cluster. - -It is enabled by CONFIG_IDLE_PAGE_TRACKING=y. - -.. _user_api: - -User API -======== - -The idle page tracking API is located at ``/sys/kernel/mm/page_idle``. -Currently, it consists of the only read-write file, -``/sys/kernel/mm/page_idle/bitmap``. - -The file implements a bitmap where each bit corresponds to a memory page. The -bitmap is represented by an array of 8-byte integers, and the page at PFN #i is -mapped to bit #i%64 of array element #i/64, byte order is native. When a bit is -set, the corresponding page is idle. - -A page is considered idle if it has not been accessed since it was marked idle -(for more details on what "accessed" actually means see the :ref:`Implementation -Details ` section). -To mark a page idle one has to set the bit corresponding to -the page by writing to the file. A value written to the file is OR-ed with the -current bitmap value. - -Only accesses to user memory pages are tracked. These are pages mapped to a -process address space, page cache and buffer pages, swap cache pages. For other -page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored, -and hence such pages are never reported idle. - -For huge pages the idle flag is set only on the head page, so one has to read -``/proc/kpageflags`` in order to correctly count idle huge pages. - -Reading from or writing to ``/sys/kernel/mm/page_idle/bitmap`` will return --EINVAL if you are not starting the read/write on an 8-byte boundary, or -if the size of the read/write is not a multiple of 8 bytes. Writing to -this file beyond max PFN will return -ENXIO. - -That said, in order to estimate the amount of pages that are not used by a -workload one should: - - 1. Mark all the workload's pages as idle by setting corresponding bits in - ``/sys/kernel/mm/page_idle/bitmap``. The pages can be found by reading - ``/proc/pid/pagemap`` if the workload is represented by a process, or by - filtering out alien pages using ``/proc/kpagecgroup`` in case the workload - is placed in a memory cgroup. - - 2. Wait until the workload accesses its working set. - - 3. Read ``/sys/kernel/mm/page_idle/bitmap`` and count the number of bits set. - If one wants to ignore certain types of pages, e.g. mlocked pages since they - are not reclaimable, he or she can filter them out using - ``/proc/kpageflags``. - -See Documentation/vm/pagemap.txt for more information about -``/proc/pid/pagemap``, ``/proc/kpageflags``, and ``/proc/kpagecgroup``. - -.. _impl_details: - -Implementation Details -====================== - -The kernel internally keeps track of accesses to user memory pages in order to -reclaim unreferenced pages first on memory shortage conditions. A page is -considered referenced if it has been recently accessed via a process address -space, in which case one or more PTEs it is mapped to will have the Accessed bit -set, or marked accessed explicitly by the kernel (see mark_page_accessed()). The -latter happens when: - - - a userspace process reads or writes a page using a system call (e.g. read(2) - or write(2)) - - - a page that is used for storing filesystem buffers is read or written, - because a process needs filesystem metadata stored in it (e.g. lists a - directory tree) - - - a page is accessed by a device driver using get_user_pages() - -When a dirty page is written to swap or disk as a result of memory reclaim or -exceeding the dirty memory limit, it is not marked referenced. - -The idle memory tracking feature adds a new page flag, the Idle flag. This flag -is set manually, by writing to ``/sys/kernel/mm/page_idle/bitmap`` (see the -:ref:`User API ` -section), and cleared automatically whenever a page is referenced as defined -above. - -When a page is marked idle, the Accessed bit must be cleared in all PTEs it is -mapped to, otherwise we will not be able to detect accesses to the page coming -from a process address space. To avoid interference with the reclaimer, which, -as noted above, uses the Accessed bit to promote actively referenced pages, one -more page flag is introduced, the Young flag. When the PTE Accessed bit is -cleared as a result of setting or updating a page's Idle flag, the Young flag -is set on the page. The reclaimer treats the Young flag as an extra PTE -Accessed bit and therefore will consider such a page as referenced. - -Since the idle memory tracking feature is based on the memory reclaimer logic, -it only works with pages that are on an LRU list, other pages are silently -ignored. That means it will ignore a user memory page if it is isolated, but -since there are usually not many of them, it should not affect the overall -result noticeably. In order not to stall scanning of the idle page bitmap, -locked pages may be skipped too. diff --git a/Documentation/vm/ksm.rst b/Documentation/vm/ksm.rst new file mode 100644 index 000000000000..87e7eef5ea9c --- /dev/null +++ b/Documentation/vm/ksm.rst @@ -0,0 +1,183 @@ +.. _ksm: + +======================= +Kernel Samepage Merging +======================= + +KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y, +added to the Linux kernel in 2.6.32. See ``mm/ksm.c`` for its implementation, +and http://lwn.net/Articles/306704/ and http://lwn.net/Articles/330589/ + +The KSM daemon ksmd periodically scans those areas of user memory which +have been registered with it, looking for pages of identical content which +can be replaced by a single write-protected page (which is automatically +copied if a process later wants to update its content). + +KSM was originally developed for use with KVM (where it was known as +Kernel Shared Memory), to fit more virtual machines into physical memory, +by sharing the data common between them. But it can be useful to any +application which generates many instances of the same data. + +KSM only merges anonymous (private) pages, never pagecache (file) pages. +KSM's merged pages were originally locked into kernel memory, but can now +be swapped out just like other user pages (but sharing is broken when they +are swapped back in: ksmd must rediscover their identity and merge again). + +KSM only operates on those areas of address space which an application +has advised to be likely candidates for merging, by using the madvise(2) +system call: int madvise(addr, length, MADV_MERGEABLE). + +The app may call int madvise(addr, length, MADV_UNMERGEABLE) to cancel +that advice and restore unshared pages: whereupon KSM unmerges whatever +it merged in that range. Note: this unmerging call may suddenly require +more memory than is available - possibly failing with EAGAIN, but more +probably arousing the Out-Of-Memory killer. + +If KSM is not configured into the running kernel, madvise MADV_MERGEABLE +and MADV_UNMERGEABLE simply fail with EINVAL. If the running kernel was +built with CONFIG_KSM=y, those calls will normally succeed: even if the +the KSM daemon is not currently running, MADV_MERGEABLE still registers +the range for whenever the KSM daemon is started; even if the range +cannot contain any pages which KSM could actually merge; even if +MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE. + +If a region of memory must be split into at least one new MADV_MERGEABLE +or MADV_UNMERGEABLE region, the madvise may return ENOMEM if the process +will exceed vm.max_map_count (see Documentation/sysctl/vm.txt). + +Like other madvise calls, they are intended for use on mapped areas of +the user address space: they will report ENOMEM if the specified range +includes unmapped gaps (though working on the intervening mapped areas), +and might fail with EAGAIN if not enough memory for internal structures. + +Applications should be considerate in their use of MADV_MERGEABLE, +restricting its use to areas likely to benefit. KSM's scans may use a lot +of processing power: some installations will disable KSM for that reason. + +The KSM daemon is controlled by sysfs files in ``/sys/kernel/mm/ksm/``, +readable by all but writable only by root: + +pages_to_scan + how many present pages to scan before ksmd goes to sleep + e.g. ``echo 100 > /sys/kernel/mm/ksm/pages_to_scan`` Default: 100 + (chosen for demonstration purposes) + +sleep_millisecs + how many milliseconds ksmd should sleep before next scan + e.g. ``echo 20 > /sys/kernel/mm/ksm/sleep_millisecs`` Default: 20 + (chosen for demonstration purposes) + +merge_across_nodes + specifies if pages from different numa nodes can be merged. + When set to 0, ksm merges only pages which physically reside + in the memory area of same NUMA node. That brings lower + latency to access of shared pages. Systems with more nodes, at + significant NUMA distances, are likely to benefit from the + lower latency of setting 0. Smaller systems, which need to + minimize memory usage, are likely to benefit from the greater + sharing of setting 1 (default). You may wish to compare how + your system performs under each setting, before deciding on + which to use. merge_across_nodes setting can be changed only + when there are no ksm shared pages in system: set run 2 to + unmerge pages first, then to 1 after changing + merge_across_nodes, to remerge according to the new setting. + Default: 1 (merging across nodes as in earlier releases) + +run + set 0 to stop ksmd from running but keep merged pages, + set 1 to run ksmd e.g. ``echo 1 > /sys/kernel/mm/ksm/run``, + set 2 to stop ksmd and unmerge all pages currently merged, but + leave mergeable areas registered for next run Default: 0 (must + be changed to 1 to activate KSM, except if CONFIG_SYSFS is + disabled) + +use_zero_pages + specifies whether empty pages (i.e. allocated pages that only + contain zeroes) should be treated specially. When set to 1, + empty pages are merged with the kernel zero page(s) instead of + with each other as it would happen normally. This can improve + the performance on architectures with coloured zero pages, + depending on the workload. Care should be taken when enabling + this setting, as it can potentially degrade the performance of + KSM for some workloads, for example if the checksums of pages + candidate for merging match the checksum of an empty + page. This setting can be changed at any time, it is only + effective for pages merged after the change. Default: 0 + (normal KSM behaviour as in earlier releases) + +max_page_sharing + Maximum sharing allowed for each KSM page. This enforces a + deduplication limit to avoid the virtual memory rmap lists to + grow too large. The minimum value is 2 as a newly created KSM + page will have at least two sharers. The rmap walk has O(N) + complexity where N is the number of rmap_items (i.e. virtual + mappings) that are sharing the page, which is in turn capped + by max_page_sharing. So this effectively spread the the linear + O(N) computational complexity from rmap walk context over + different KSM pages. The ksmd walk over the stable_node + "chains" is also O(N), but N is the number of stable_node + "dups", not the number of rmap_items, so it has not a + significant impact on ksmd performance. In practice the best + stable_node "dup" candidate will be kept and found at the head + of the "dups" list. The higher this value the faster KSM will + merge the memory (because there will be fewer stable_node dups + queued into the stable_node chain->hlist to check for pruning) + and the higher the deduplication factor will be, but the + slowest the worst case rmap walk could be for any given KSM + page. Slowing down the rmap_walk means there will be higher + latency for certain virtual memory operations happening during + swapping, compaction, NUMA balancing and page migration, in + turn decreasing responsiveness for the caller of those virtual + memory operations. The scheduler latency of other tasks not + involved with the VM operations doing the rmap walk is not + affected by this parameter as the rmap walks are always + schedule friendly themselves. + +stable_node_chains_prune_millisecs + How frequently to walk the whole list of stable_node "dups" + linked in the stable_node "chains" in order to prune stale + stable_nodes. Smaller milllisecs values will free up the KSM + metadata with lower latency, but they will make ksmd use more + CPU during the scan. This only applies to the stable_node + chains so it's a noop if not a single KSM page hit the + max_page_sharing yet (there would be no stable_node chains in + such case). + +The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``: + +pages_shared + how many shared pages are being used +pages_sharing + how many more sites are sharing them i.e. how much saved +pages_unshared + how many pages unique but repeatedly checked for merging +pages_volatile + how many pages changing too fast to be placed in a tree +full_scans + how many times all mergeable areas have been scanned +stable_node_chains + number of stable node chains allocated, this is effectively + the number of KSM pages that hit the max_page_sharing limit +stable_node_dups + number of stable node dups queued into the stable_node chains + +A high ratio of pages_sharing to pages_shared indicates good sharing, but +a high ratio of pages_unshared to pages_sharing indicates wasted effort. +pages_volatile embraces several different kinds of activity, but a high +proportion there would also indicate poor use of madvise MADV_MERGEABLE. + +The maximum possible page_sharing/page_shared ratio is limited by the +max_page_sharing tunable. To increase the ratio max_page_sharing must +be increased accordingly. + +The stable_node_dups/stable_node_chains ratio is also affected by the +max_page_sharing tunable, and an high ratio may indicate fragmentation +in the stable_node dups, which could be solved by introducing +fragmentation algorithms in ksmd which would refile rmap_items from +one stable_node dup to another stable_node dup, in order to freeup +stable_node "dups" with few rmap_items in them, but that may increase +the ksmd CPU usage and possibly slowdown the readonly computations on +the KSM pages of the applications. + +Izik Eidus, +Hugh Dickins, 17 Nov 2009 diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt deleted file mode 100644 index 87e7eef5ea9c..000000000000 --- a/Documentation/vm/ksm.txt +++ /dev/null @@ -1,183 +0,0 @@ -.. _ksm: - -======================= -Kernel Samepage Merging -======================= - -KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y, -added to the Linux kernel in 2.6.32. See ``mm/ksm.c`` for its implementation, -and http://lwn.net/Articles/306704/ and http://lwn.net/Articles/330589/ - -The KSM daemon ksmd periodically scans those areas of user memory which -have been registered with it, looking for pages of identical content which -can be replaced by a single write-protected page (which is automatically -copied if a process later wants to update its content). - -KSM was originally developed for use with KVM (where it was known as -Kernel Shared Memory), to fit more virtual machines into physical memory, -by sharing the data common between them. But it can be useful to any -application which generates many instances of the same data. - -KSM only merges anonymous (private) pages, never pagecache (file) pages. -KSM's merged pages were originally locked into kernel memory, but can now -be swapped out just like other user pages (but sharing is broken when they -are swapped back in: ksmd must rediscover their identity and merge again). - -KSM only operates on those areas of address space which an application -has advised to be likely candidates for merging, by using the madvise(2) -system call: int madvise(addr, length, MADV_MERGEABLE). - -The app may call int madvise(addr, length, MADV_UNMERGEABLE) to cancel -that advice and restore unshared pages: whereupon KSM unmerges whatever -it merged in that range. Note: this unmerging call may suddenly require -more memory than is available - possibly failing with EAGAIN, but more -probably arousing the Out-Of-Memory killer. - -If KSM is not configured into the running kernel, madvise MADV_MERGEABLE -and MADV_UNMERGEABLE simply fail with EINVAL. If the running kernel was -built with CONFIG_KSM=y, those calls will normally succeed: even if the -the KSM daemon is not currently running, MADV_MERGEABLE still registers -the range for whenever the KSM daemon is started; even if the range -cannot contain any pages which KSM could actually merge; even if -MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE. - -If a region of memory must be split into at least one new MADV_MERGEABLE -or MADV_UNMERGEABLE region, the madvise may return ENOMEM if the process -will exceed vm.max_map_count (see Documentation/sysctl/vm.txt). - -Like other madvise calls, they are intended for use on mapped areas of -the user address space: they will report ENOMEM if the specified range -includes unmapped gaps (though working on the intervening mapped areas), -and might fail with EAGAIN if not enough memory for internal structures. - -Applications should be considerate in their use of MADV_MERGEABLE, -restricting its use to areas likely to benefit. KSM's scans may use a lot -of processing power: some installations will disable KSM for that reason. - -The KSM daemon is controlled by sysfs files in ``/sys/kernel/mm/ksm/``, -readable by all but writable only by root: - -pages_to_scan - how many present pages to scan before ksmd goes to sleep - e.g. ``echo 100 > /sys/kernel/mm/ksm/pages_to_scan`` Default: 100 - (chosen for demonstration purposes) - -sleep_millisecs - how many milliseconds ksmd should sleep before next scan - e.g. ``echo 20 > /sys/kernel/mm/ksm/sleep_millisecs`` Default: 20 - (chosen for demonstration purposes) - -merge_across_nodes - specifies if pages from different numa nodes can be merged. - When set to 0, ksm merges only pages which physically reside - in the memory area of same NUMA node. That brings lower - latency to access of shared pages. Systems with more nodes, at - significant NUMA distances, are likely to benefit from the - lower latency of setting 0. Smaller systems, which need to - minimize memory usage, are likely to benefit from the greater - sharing of setting 1 (default). You may wish to compare how - your system performs under each setting, before deciding on - which to use. merge_across_nodes setting can be changed only - when there are no ksm shared pages in system: set run 2 to - unmerge pages first, then to 1 after changing - merge_across_nodes, to remerge according to the new setting. - Default: 1 (merging across nodes as in earlier releases) - -run - set 0 to stop ksmd from running but keep merged pages, - set 1 to run ksmd e.g. ``echo 1 > /sys/kernel/mm/ksm/run``, - set 2 to stop ksmd and unmerge all pages currently merged, but - leave mergeable areas registered for next run Default: 0 (must - be changed to 1 to activate KSM, except if CONFIG_SYSFS is - disabled) - -use_zero_pages - specifies whether empty pages (i.e. allocated pages that only - contain zeroes) should be treated specially. When set to 1, - empty pages are merged with the kernel zero page(s) instead of - with each other as it would happen normally. This can improve - the performance on architectures with coloured zero pages, - depending on the workload. Care should be taken when enabling - this setting, as it can potentially degrade the performance of - KSM for some workloads, for example if the checksums of pages - candidate for merging match the checksum of an empty - page. This setting can be changed at any time, it is only - effective for pages merged after the change. Default: 0 - (normal KSM behaviour as in earlier releases) - -max_page_sharing - Maximum sharing allowed for each KSM page. This enforces a - deduplication limit to avoid the virtual memory rmap lists to - grow too large. The minimum value is 2 as a newly created KSM - page will have at least two sharers. The rmap walk has O(N) - complexity where N is the number of rmap_items (i.e. virtual - mappings) that are sharing the page, which is in turn capped - by max_page_sharing. So this effectively spread the the linear - O(N) computational complexity from rmap walk context over - different KSM pages. The ksmd walk over the stable_node - "chains" is also O(N), but N is the number of stable_node - "dups", not the number of rmap_items, so it has not a - significant impact on ksmd performance. In practice the best - stable_node "dup" candidate will be kept and found at the head - of the "dups" list. The higher this value the faster KSM will - merge the memory (because there will be fewer stable_node dups - queued into the stable_node chain->hlist to check for pruning) - and the higher the deduplication factor will be, but the - slowest the worst case rmap walk could be for any given KSM - page. Slowing down the rmap_walk means there will be higher - latency for certain virtual memory operations happening during - swapping, compaction, NUMA balancing and page migration, in - turn decreasing responsiveness for the caller of those virtual - memory operations. The scheduler latency of other tasks not - involved with the VM operations doing the rmap walk is not - affected by this parameter as the rmap walks are always - schedule friendly themselves. - -stable_node_chains_prune_millisecs - How frequently to walk the whole list of stable_node "dups" - linked in the stable_node "chains" in order to prune stale - stable_nodes. Smaller milllisecs values will free up the KSM - metadata with lower latency, but they will make ksmd use more - CPU during the scan. This only applies to the stable_node - chains so it's a noop if not a single KSM page hit the - max_page_sharing yet (there would be no stable_node chains in - such case). - -The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``: - -pages_shared - how many shared pages are being used -pages_sharing - how many more sites are sharing them i.e. how much saved -pages_unshared - how many pages unique but repeatedly checked for merging -pages_volatile - how many pages changing too fast to be placed in a tree -full_scans - how many times all mergeable areas have been scanned -stable_node_chains - number of stable node chains allocated, this is effectively - the number of KSM pages that hit the max_page_sharing limit -stable_node_dups - number of stable node dups queued into the stable_node chains - -A high ratio of pages_sharing to pages_shared indicates good sharing, but -a high ratio of pages_unshared to pages_sharing indicates wasted effort. -pages_volatile embraces several different kinds of activity, but a high -proportion there would also indicate poor use of madvise MADV_MERGEABLE. - -The maximum possible page_sharing/page_shared ratio is limited by the -max_page_sharing tunable. To increase the ratio max_page_sharing must -be increased accordingly. - -The stable_node_dups/stable_node_chains ratio is also affected by the -max_page_sharing tunable, and an high ratio may indicate fragmentation -in the stable_node dups, which could be solved by introducing -fragmentation algorithms in ksmd which would refile rmap_items from -one stable_node dup to another stable_node dup, in order to freeup -stable_node "dups" with few rmap_items in them, but that may increase -the ksmd CPU usage and possibly slowdown the readonly computations on -the KSM pages of the applications. - -Izik Eidus, -Hugh Dickins, 17 Nov 2009 diff --git a/Documentation/vm/mmu_notifier.rst b/Documentation/vm/mmu_notifier.rst new file mode 100644 index 000000000000..47baa1cf28c5 --- /dev/null +++ b/Documentation/vm/mmu_notifier.rst @@ -0,0 +1,99 @@ +.. _mmu_notifier: + +When do you need to notify inside page table lock ? +=================================================== + +When clearing a pte/pmd we are given a choice to notify the event through +(notify version of \*_clear_flush call mmu_notifier_invalidate_range) under +the page table lock. But that notification is not necessary in all cases. + +For secondary TLB (non CPU TLB) like IOMMU TLB or device TLB (when device use +thing like ATS/PASID to get the IOMMU to walk the CPU page table to access a +process virtual address space). There is only 2 cases when you need to notify +those secondary TLB while holding page table lock when clearing a pte/pmd: + + A) page backing address is free before mmu_notifier_invalidate_range_end() + B) a page table entry is updated to point to a new page (COW, write fault + on zero page, __replace_page(), ...) + +Case A is obvious you do not want to take the risk for the device to write to +a page that might now be used by some completely different task. + +Case B is more subtle. For correctness it requires the following sequence to +happen: + + - take page table lock + - clear page table entry and notify ([pmd/pte]p_huge_clear_flush_notify()) + - set page table entry to point to new page + +If clearing the page table entry is not followed by a notify before setting +the new pte/pmd value then you can break memory model like C11 or C++11 for +the device. + +Consider the following scenario (device use a feature similar to ATS/PASID): + +Two address addrA and addrB such that \|addrA - addrB\| >= PAGE_SIZE we assume +they are write protected for COW (other case of B apply too). + +:: + + [Time N] -------------------------------------------------------------------- + CPU-thread-0 {try to write to addrA} + CPU-thread-1 {try to write to addrB} + CPU-thread-2 {} + CPU-thread-3 {} + DEV-thread-0 {read addrA and populate device TLB} + DEV-thread-2 {read addrB and populate device TLB} + [Time N+1] ------------------------------------------------------------------ + CPU-thread-0 {COW_step0: {mmu_notifier_invalidate_range_start(addrA)}} + CPU-thread-1 {COW_step0: {mmu_notifier_invalidate_range_start(addrB)}} + CPU-thread-2 {} + CPU-thread-3 {} + DEV-thread-0 {} + DEV-thread-2 {} + [Time N+2] ------------------------------------------------------------------ + CPU-thread-0 {COW_step1: {update page table to point to new page for addrA}} + CPU-thread-1 {COW_step1: {update page table to point to new page for addrB}} + CPU-thread-2 {} + CPU-thread-3 {} + DEV-thread-0 {} + DEV-thread-2 {} + [Time N+3] ------------------------------------------------------------------ + CPU-thread-0 {preempted} + CPU-thread-1 {preempted} + CPU-thread-2 {write to addrA which is a write to new page} + CPU-thread-3 {} + DEV-thread-0 {} + DEV-thread-2 {} + [Time N+3] ------------------------------------------------------------------ + CPU-thread-0 {preempted} + CPU-thread-1 {preempted} + CPU-thread-2 {} + CPU-thread-3 {write to addrB which is a write to new page} + DEV-thread-0 {} + DEV-thread-2 {} + [Time N+4] ------------------------------------------------------------------ + CPU-thread-0 {preempted} + CPU-thread-1 {COW_step3: {mmu_notifier_invalidate_range_end(addrB)}} + CPU-thread-2 {} + CPU-thread-3 {} + DEV-thread-0 {} + DEV-thread-2 {} + [Time N+5] ------------------------------------------------------------------ + CPU-thread-0 {preempted} + CPU-thread-1 {} + CPU-thread-2 {} + CPU-thread-3 {} + DEV-thread-0 {read addrA from old page} + DEV-thread-2 {read addrB from new page} + +So here because at time N+2 the clear page table entry was not pair with a +notification to invalidate the secondary TLB, the device see the new value for +addrB before seing the new value for addrA. This break total memory ordering +for the device. + +When changing a pte to write protect or to point to a new write protected page +with same content (KSM) it is fine to delay the mmu_notifier_invalidate_range +call to mmu_notifier_invalidate_range_end() outside the page table lock. This +is true even if the thread doing the page table update is preempted right after +releasing page table lock but before call mmu_notifier_invalidate_range_end(). diff --git a/Documentation/vm/mmu_notifier.txt b/Documentation/vm/mmu_notifier.txt deleted file mode 100644 index 47baa1cf28c5..000000000000 --- a/Documentation/vm/mmu_notifier.txt +++ /dev/null @@ -1,99 +0,0 @@ -.. _mmu_notifier: - -When do you need to notify inside page table lock ? -=================================================== - -When clearing a pte/pmd we are given a choice to notify the event through -(notify version of \*_clear_flush call mmu_notifier_invalidate_range) under -the page table lock. But that notification is not necessary in all cases. - -For secondary TLB (non CPU TLB) like IOMMU TLB or device TLB (when device use -thing like ATS/PASID to get the IOMMU to walk the CPU page table to access a -process virtual address space). There is only 2 cases when you need to notify -those secondary TLB while holding page table lock when clearing a pte/pmd: - - A) page backing address is free before mmu_notifier_invalidate_range_end() - B) a page table entry is updated to point to a new page (COW, write fault - on zero page, __replace_page(), ...) - -Case A is obvious you do not want to take the risk for the device to write to -a page that might now be used by some completely different task. - -Case B is more subtle. For correctness it requires the following sequence to -happen: - - - take page table lock - - clear page table entry and notify ([pmd/pte]p_huge_clear_flush_notify()) - - set page table entry to point to new page - -If clearing the page table entry is not followed by a notify before setting -the new pte/pmd value then you can break memory model like C11 or C++11 for -the device. - -Consider the following scenario (device use a feature similar to ATS/PASID): - -Two address addrA and addrB such that \|addrA - addrB\| >= PAGE_SIZE we assume -they are write protected for COW (other case of B apply too). - -:: - - [Time N] -------------------------------------------------------------------- - CPU-thread-0 {try to write to addrA} - CPU-thread-1 {try to write to addrB} - CPU-thread-2 {} - CPU-thread-3 {} - DEV-thread-0 {read addrA and populate device TLB} - DEV-thread-2 {read addrB and populate device TLB} - [Time N+1] ------------------------------------------------------------------ - CPU-thread-0 {COW_step0: {mmu_notifier_invalidate_range_start(addrA)}} - CPU-thread-1 {COW_step0: {mmu_notifier_invalidate_range_start(addrB)}} - CPU-thread-2 {} - CPU-thread-3 {} - DEV-thread-0 {} - DEV-thread-2 {} - [Time N+2] ------------------------------------------------------------------ - CPU-thread-0 {COW_step1: {update page table to point to new page for addrA}} - CPU-thread-1 {COW_step1: {update page table to point to new page for addrB}} - CPU-thread-2 {} - CPU-thread-3 {} - DEV-thread-0 {} - DEV-thread-2 {} - [Time N+3] ------------------------------------------------------------------ - CPU-thread-0 {preempted} - CPU-thread-1 {preempted} - CPU-thread-2 {write to addrA which is a write to new page} - CPU-thread-3 {} - DEV-thread-0 {} - DEV-thread-2 {} - [Time N+3] ------------------------------------------------------------------ - CPU-thread-0 {preempted} - CPU-thread-1 {preempted} - CPU-thread-2 {} - CPU-thread-3 {write to addrB which is a write to new page} - DEV-thread-0 {} - DEV-thread-2 {} - [Time N+4] ------------------------------------------------------------------ - CPU-thread-0 {preempted} - CPU-thread-1 {COW_step3: {mmu_notifier_invalidate_range_end(addrB)}} - CPU-thread-2 {} - CPU-thread-3 {} - DEV-thread-0 {} - DEV-thread-2 {} - [Time N+5] ------------------------------------------------------------------ - CPU-thread-0 {preempted} - CPU-thread-1 {} - CPU-thread-2 {} - CPU-thread-3 {} - DEV-thread-0 {read addrA from old page} - DEV-thread-2 {read addrB from new page} - -So here because at time N+2 the clear page table entry was not pair with a -notification to invalidate the secondary TLB, the device see the new value for -addrB before seing the new value for addrA. This break total memory ordering -for the device. - -When changing a pte to write protect or to point to a new write protected page -with same content (KSM) it is fine to delay the mmu_notifier_invalidate_range -call to mmu_notifier_invalidate_range_end() outside the page table lock. This -is true even if the thread doing the page table update is preempted right after -releasing page table lock but before call mmu_notifier_invalidate_range_end(). diff --git a/Documentation/vm/numa b/Documentation/vm/numa deleted file mode 100644 index c81e7c56f0f9..000000000000 --- a/Documentation/vm/numa +++ /dev/null @@ -1,150 +0,0 @@ -.. _numa: - -Started Nov 1999 by Kanoj Sarcar - -============= -What is NUMA? -============= - -This question can be answered from a couple of perspectives: the -hardware view and the Linux software view. - -From the hardware perspective, a NUMA system is a computer platform that -comprises multiple components or assemblies each of which may contain 0 -or more CPUs, local memory, and/or IO buses. For brevity and to -disambiguate the hardware view of these physical components/assemblies -from the software abstraction thereof, we'll call the components/assemblies -'cells' in this document. - -Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset -of the system--although some components necessary for a stand-alone SMP system -may not be populated on any given cell. The cells of the NUMA system are -connected together with some sort of system interconnect--e.g., a crossbar or -point-to-point link are common types of NUMA system interconnects. Both of -these types of interconnects can be aggregated to create NUMA platforms with -cells at multiple distances from other cells. - -For Linux, the NUMA platforms of interest are primarily what is known as Cache -Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible -to and accessible from any CPU attached to any cell and cache coherency -is handled in hardware by the processor caches and/or the system interconnect. - -Memory access time and effective memory bandwidth varies depending on how far -away the cell containing the CPU or IO bus making the memory access is from the -cell containing the target memory. For example, access to memory by CPUs -attached to the same cell will experience faster access times and higher -bandwidths than accesses to memory on other, remote cells. NUMA platforms -can have cells at multiple remote distances from any given cell. - -Platform vendors don't build NUMA systems just to make software developers' -lives interesting. Rather, this architecture is a means to provide scalable -memory bandwidth. However, to achieve scalable memory bandwidth, system and -application software must arrange for a large majority of the memory references -[cache misses] to be to "local" memory--memory on the same cell, if any--or -to the closest cell with memory. - -This leads to the Linux software view of a NUMA system: - -Linux divides the system's hardware resources into multiple software -abstractions called "nodes". Linux maps the nodes onto the physical cells -of the hardware platform, abstracting away some of the details for some -architectures. As with physical cells, software nodes may contain 0 or more -CPUs, memory and/or IO buses. And, again, memory accesses to memory on -"closer" nodes--nodes that map to closer cells--will generally experience -faster access times and higher effective bandwidth than accesses to more -remote cells. - -For some architectures, such as x86, Linux will "hide" any node representing a -physical cell that has no memory attached, and reassign any CPUs attached to -that cell to a node representing a cell that does have memory. Thus, on -these architectures, one cannot assume that all CPUs that Linux associates with -a given node will see the same local memory access times and bandwidth. - -In addition, for some architectures, again x86 is an example, Linux supports -the emulation of additional nodes. For NUMA emulation, linux will carve up -the existing nodes--or the system memory for non-NUMA platforms--into multiple -nodes. Each emulated node will manage a fraction of the underlying cells' -physical memory. NUMA emluation is useful for testing NUMA kernel and -application features on non-NUMA platforms, and as a sort of memory resource -management mechanism when used together with cpusets. -[see Documentation/cgroup-v1/cpusets.txt] - -For each node with memory, Linux constructs an independent memory management -subsystem, complete with its own free page lists, in-use page lists, usage -statistics and locks to mediate access. In addition, Linux constructs for -each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE], -an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a -selected zone/node cannot satisfy the allocation request. This situation, -when a zone has no available memory to satisfy a request, is called -"overflow" or "fallback". - -Because some nodes contain multiple zones containing different types of -memory, Linux must decide whether to order the zonelists such that allocations -fall back to the same zone type on a different node, or to a different zone -type on the same node. This is an important consideration because some zones, -such as DMA or DMA32, represent relatively scarce resources. Linux chooses -a default Node ordered zonelist. This means it tries to fallback to other zones -from the same node before using remote nodes which are ordered by NUMA distance. - -By default, Linux will attempt to satisfy memory allocation requests from the -node to which the CPU that executes the request is assigned. Specifically, -Linux will attempt to allocate from the first node in the appropriate zonelist -for the node where the request originates. This is called "local allocation." -If the "local" node cannot satisfy the request, the kernel will examine other -nodes' zones in the selected zonelist looking for the first zone in the list -that can satisfy the request. - -Local allocation will tend to keep subsequent access to the allocated memory -"local" to the underlying physical resources and off the system interconnect-- -as long as the task on whose behalf the kernel allocated some memory does not -later migrate away from that memory. The Linux scheduler is aware of the -NUMA topology of the platform--embodied in the "scheduling domains" data -structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler -attempts to minimize task migration to distant scheduling domains. However, -the scheduler does not take a task's NUMA footprint into account directly. -Thus, under sufficient imbalance, tasks can migrate between nodes, remote -from their initial node and kernel data structures. - -System administrators and application designers can restrict a task's migration -to improve NUMA locality using various CPU affinity command line interfaces, -such as taskset(1) and numactl(1), and program interfaces such as -sched_setaffinity(2). Further, one can modify the kernel's default local -allocation behavior using Linux NUMA memory policy. -[see Documentation/vm/numa_memory_policy.txt.] - -System administrators can restrict the CPUs and nodes' memories that a non- -privileged user can specify in the scheduling or NUMA commands and functions -using control groups and CPUsets. [see Documentation/cgroup-v1/cpusets.txt] - -On architectures that do not hide memoryless nodes, Linux will include only -zones [nodes] with memory in the zonelists. This means that for a memoryless -node the "local memory node"--the node of the first zone in CPU's node's -zonelist--will not be the node itself. Rather, it will be the node that the -kernel selected as the nearest node with memory when it built the zonelists. -So, default, local allocations will succeed with the kernel supplying the -closest available memory. This is a consequence of the same mechanism that -allows such allocations to fallback to other nearby nodes when a node that -does contain memory overflows. - -Some kernel allocations do not want or cannot tolerate this allocation fallback -behavior. Rather they want to be sure they get memory from the specified node -or get notified that the node has no free memory. This is usually the case when -a subsystem allocates per CPU memory resources, for example. - -A typical model for making such an allocation is to obtain the node id of the -node to which the "current CPU" is attached using one of the kernel's -numa_node_id() or CPU_to_node() functions and then request memory from only -the node id returned. When such an allocation fails, the requesting subsystem -may revert to its own fallback path. The slab kernel memory allocator is an -example of this. Or, the subsystem may choose to disable or not to enable -itself on allocation failure. The kernel profiling subsystem is an example of -this. - -If the architecture supports--does not hide--memoryless nodes, then CPUs -attached to memoryless nodes would always incur the fallback path overhead -or some subsystems would fail to initialize if they attempted to allocated -memory exclusively from a node without memory. To support such -architectures transparently, kernel subsystems can use the numa_mem_id() -or cpu_to_mem() function to locate the "local memory node" for the calling or -specified CPU. Again, this is the same node from which default, local page -allocations will be attempted. diff --git a/Documentation/vm/numa.rst b/Documentation/vm/numa.rst new file mode 100644 index 000000000000..aada84bc8c46 --- /dev/null +++ b/Documentation/vm/numa.rst @@ -0,0 +1,150 @@ +.. _numa: + +Started Nov 1999 by Kanoj Sarcar + +============= +What is NUMA? +============= + +This question can be answered from a couple of perspectives: the +hardware view and the Linux software view. + +From the hardware perspective, a NUMA system is a computer platform that +comprises multiple components or assemblies each of which may contain 0 +or more CPUs, local memory, and/or IO buses. For brevity and to +disambiguate the hardware view of these physical components/assemblies +from the software abstraction thereof, we'll call the components/assemblies +'cells' in this document. + +Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset +of the system--although some components necessary for a stand-alone SMP system +may not be populated on any given cell. The cells of the NUMA system are +connected together with some sort of system interconnect--e.g., a crossbar or +point-to-point link are common types of NUMA system interconnects. Both of +these types of interconnects can be aggregated to create NUMA platforms with +cells at multiple distances from other cells. + +For Linux, the NUMA platforms of interest are primarily what is known as Cache +Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible +to and accessible from any CPU attached to any cell and cache coherency +is handled in hardware by the processor caches and/or the system interconnect. + +Memory access time and effective memory bandwidth varies depending on how far +away the cell containing the CPU or IO bus making the memory access is from the +cell containing the target memory. For example, access to memory by CPUs +attached to the same cell will experience faster access times and higher +bandwidths than accesses to memory on other, remote cells. NUMA platforms +can have cells at multiple remote distances from any given cell. + +Platform vendors don't build NUMA systems just to make software developers' +lives interesting. Rather, this architecture is a means to provide scalable +memory bandwidth. However, to achieve scalable memory bandwidth, system and +application software must arrange for a large majority of the memory references +[cache misses] to be to "local" memory--memory on the same cell, if any--or +to the closest cell with memory. + +This leads to the Linux software view of a NUMA system: + +Linux divides the system's hardware resources into multiple software +abstractions called "nodes". Linux maps the nodes onto the physical cells +of the hardware platform, abstracting away some of the details for some +architectures. As with physical cells, software nodes may contain 0 or more +CPUs, memory and/or IO buses. And, again, memory accesses to memory on +"closer" nodes--nodes that map to closer cells--will generally experience +faster access times and higher effective bandwidth than accesses to more +remote cells. + +For some architectures, such as x86, Linux will "hide" any node representing a +physical cell that has no memory attached, and reassign any CPUs attached to +that cell to a node representing a cell that does have memory. Thus, on +these architectures, one cannot assume that all CPUs that Linux associates with +a given node will see the same local memory access times and bandwidth. + +In addition, for some architectures, again x86 is an example, Linux supports +the emulation of additional nodes. For NUMA emulation, linux will carve up +the existing nodes--or the system memory for non-NUMA platforms--into multiple +nodes. Each emulated node will manage a fraction of the underlying cells' +physical memory. NUMA emluation is useful for testing NUMA kernel and +application features on non-NUMA platforms, and as a sort of memory resource +management mechanism when used together with cpusets. +[see Documentation/cgroup-v1/cpusets.txt] + +For each node with memory, Linux constructs an independent memory management +subsystem, complete with its own free page lists, in-use page lists, usage +statistics and locks to mediate access. In addition, Linux constructs for +each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE], +an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a +selected zone/node cannot satisfy the allocation request. This situation, +when a zone has no available memory to satisfy a request, is called +"overflow" or "fallback". + +Because some nodes contain multiple zones containing different types of +memory, Linux must decide whether to order the zonelists such that allocations +fall back to the same zone type on a different node, or to a different zone +type on the same node. This is an important consideration because some zones, +such as DMA or DMA32, represent relatively scarce resources. Linux chooses +a default Node ordered zonelist. This means it tries to fallback to other zones +from the same node before using remote nodes which are ordered by NUMA distance. + +By default, Linux will attempt to satisfy memory allocation requests from the +node to which the CPU that executes the request is assigned. Specifically, +Linux will attempt to allocate from the first node in the appropriate zonelist +for the node where the request originates. This is called "local allocation." +If the "local" node cannot satisfy the request, the kernel will examine other +nodes' zones in the selected zonelist looking for the first zone in the list +that can satisfy the request. + +Local allocation will tend to keep subsequent access to the allocated memory +"local" to the underlying physical resources and off the system interconnect-- +as long as the task on whose behalf the kernel allocated some memory does not +later migrate away from that memory. The Linux scheduler is aware of the +NUMA topology of the platform--embodied in the "scheduling domains" data +structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler +attempts to minimize task migration to distant scheduling domains. However, +the scheduler does not take a task's NUMA footprint into account directly. +Thus, under sufficient imbalance, tasks can migrate between nodes, remote +from their initial node and kernel data structures. + +System administrators and application designers can restrict a task's migration +to improve NUMA locality using various CPU affinity command line interfaces, +such as taskset(1) and numactl(1), and program interfaces such as +sched_setaffinity(2). Further, one can modify the kernel's default local +allocation behavior using Linux NUMA memory policy. +[see Documentation/vm/numa_memory_policy.rst.] + +System administrators can restrict the CPUs and nodes' memories that a non- +privileged user can specify in the scheduling or NUMA commands and functions +using control groups and CPUsets. [see Documentation/cgroup-v1/cpusets.txt] + +On architectures that do not hide memoryless nodes, Linux will include only +zones [nodes] with memory in the zonelists. This means that for a memoryless +node the "local memory node"--the node of the first zone in CPU's node's +zonelist--will not be the node itself. Rather, it will be the node that the +kernel selected as the nearest node with memory when it built the zonelists. +So, default, local allocations will succeed with the kernel supplying the +closest available memory. This is a consequence of the same mechanism that +allows such allocations to fallback to other nearby nodes when a node that +does contain memory overflows. + +Some kernel allocations do not want or cannot tolerate this allocation fallback +behavior. Rather they want to be sure they get memory from the specified node +or get notified that the node has no free memory. This is usually the case when +a subsystem allocates per CPU memory resources, for example. + +A typical model for making such an allocation is to obtain the node id of the +node to which the "current CPU" is attached using one of the kernel's +numa_node_id() or CPU_to_node() functions and then request memory from only +the node id returned. When such an allocation fails, the requesting subsystem +may revert to its own fallback path. The slab kernel memory allocator is an +example of this. Or, the subsystem may choose to disable or not to enable +itself on allocation failure. The kernel profiling subsystem is an example of +this. + +If the architecture supports--does not hide--memoryless nodes, then CPUs +attached to memoryless nodes would always incur the fallback path overhead +or some subsystems would fail to initialize if they attempted to allocated +memory exclusively from a node without memory. To support such +architectures transparently, kernel subsystems can use the numa_mem_id() +or cpu_to_mem() function to locate the "local memory node" for the calling or +specified CPU. Again, this is the same node from which default, local page +allocations will be attempted. diff --git a/Documentation/vm/numa_memory_policy.rst b/Documentation/vm/numa_memory_policy.rst new file mode 100644 index 000000000000..8cd942ca114e --- /dev/null +++ b/Documentation/vm/numa_memory_policy.rst @@ -0,0 +1,485 @@ +.. _numa_memory_policy: + +=================== +Linux Memory Policy +=================== + +What is Linux Memory Policy? +============================ + +In the Linux kernel, "memory policy" determines from which node the kernel will +allocate memory in a NUMA system or in an emulated NUMA system. Linux has +supported platforms with Non-Uniform Memory Access architectures since 2.4.?. +The current memory policy support was added to Linux 2.6 around May 2004. This +document attempts to describe the concepts and APIs of the 2.6 memory policy +support. + +Memory policies should not be confused with cpusets +(``Documentation/cgroup-v1/cpusets.txt``) +which is an administrative mechanism for restricting the nodes from which +memory may be allocated by a set of processes. Memory policies are a +programming interface that a NUMA-aware application can take advantage of. When +both cpusets and policies are applied to a task, the restrictions of the cpuset +takes priority. See :ref:`Memory Policies and cpusets ` +below for more details. + +Memory Policy Concepts +====================== + +Scope of Memory Policies +------------------------ + +The Linux kernel supports _scopes_ of memory policy, described here from +most general to most specific: + +System Default Policy + this policy is "hard coded" into the kernel. It is the policy + that governs all page allocations that aren't controlled by + one of the more specific policy scopes discussed below. When + the system is "up and running", the system default policy will + use "local allocation" described below. However, during boot + up, the system default policy will be set to interleave + allocations across all nodes with "sufficient" memory, so as + not to overload the initial boot node with boot-time + allocations. + +Task/Process Policy + this is an optional, per-task policy. When defined for a specific task, this policy controls all page allocations made by or on behalf of the task that aren't controlled by a more specific scope. If a task does not define a task policy, then all page allocations that would have been controlled by the task policy "fall back" to the System Default Policy. + + The task policy applies to the entire address space of a task. Thus, + it is inheritable, and indeed is inherited, across both fork() + [clone() w/o the CLONE_VM flag] and exec*(). This allows a parent task + to establish the task policy for a child task exec()'d from an + executable image that has no awareness of memory policy. See the + MEMORY POLICY APIS section, below, for an overview of the system call + that a task may use to set/change its task/process policy. + + In a multi-threaded task, task policies apply only to the thread + [Linux kernel task] that installs the policy and any threads + subsequently created by that thread. Any sibling threads existing + at the time a new task policy is installed retain their current + policy. + + A task policy applies only to pages allocated after the policy is + installed. Any pages already faulted in by the task when the task + changes its task policy remain where they were allocated based on + the policy at the time they were allocated. + +.. _vma_policy: + +VMA Policy + A "VMA" or "Virtual Memory Area" refers to a range of a task's + virtual address space. A task may define a specific policy for a range + of its virtual address space. See the MEMORY POLICIES APIS section, + below, for an overview of the mbind() system call used to set a VMA + policy. + + A VMA policy will govern the allocation of pages that back + this region ofthe address space. Any regions of the task's + address space that don't have an explicit VMA policy will fall + back to the task policy, which may itself fall back to the + System Default Policy. + + VMA policies have a few complicating details: + + * VMA policy applies ONLY to anonymous pages. These include + pages allocated for anonymous segments, such as the task + stack and heap, and any regions of the address space + mmap()ed with the MAP_ANONYMOUS flag. If a VMA policy is + applied to a file mapping, it will be ignored if the mapping + used the MAP_SHARED flag. If the file mapping used the + MAP_PRIVATE flag, the VMA policy will only be applied when + an anonymous page is allocated on an attempt to write to the + mapping-- i.e., at Copy-On-Write. + + * VMA policies are shared between all tasks that share a + virtual address space--a.k.a. threads--independent of when + the policy is installed; and they are inherited across + fork(). However, because VMA policies refer to a specific + region of a task's address space, and because the address + space is discarded and recreated on exec*(), VMA policies + are NOT inheritable across exec(). Thus, only NUMA-aware + applications may use VMA policies. + + * A task may install a new VMA policy on a sub-range of a + previously mmap()ed region. When this happens, Linux splits + the existing virtual memory area into 2 or 3 VMAs, each with + it's own policy. + + * By default, VMA policy applies only to pages allocated after + the policy is installed. Any pages already faulted into the + VMA range remain where they were allocated based on the + policy at the time they were allocated. However, since + 2.6.16, Linux supports page migration via the mbind() system + call, so that page contents can be moved to match a newly + installed policy. + +Shared Policy + Conceptually, shared policies apply to "memory objects" mapped + shared into one or more tasks' distinct address spaces. An + application installs a shared policies the same way as VMA + policies--using the mbind() system call specifying a range of + virtual addresses that map the shared object. However, unlike + VMA policies, which can be considered to be an attribute of a + range of a task's address space, shared policies apply + directly to the shared object. Thus, all tasks that attach to + the object share the policy, and all pages allocated for the + shared object, by any task, will obey the shared policy. + + As of 2.6.22, only shared memory segments, created by shmget() or + mmap(MAP_ANONYMOUS|MAP_SHARED), support shared policy. When shared + policy support was added to Linux, the associated data structures were + added to hugetlbfs shmem segments. At the time, hugetlbfs did not + support allocation at fault time--a.k.a lazy allocation--so hugetlbfs + shmem segments were never "hooked up" to the shared policy support. + Although hugetlbfs segments now support lazy allocation, their support + for shared policy has not been completed. + + As mentioned above :ref:`VMA policies `, + allocations of page cache pages for regular files mmap()ed + with MAP_SHARED ignore any VMA policy installed on the virtual + address range backed by the shared file mapping. Rather, + shared page cache pages, including pages backing private + mappings that have not yet been written by the task, follow + task policy, if any, else System Default Policy. + + The shared policy infrastructure supports different policies on subset + ranges of the shared object. However, Linux still splits the VMA of + the task that installs the policy for each range of distinct policy. + Thus, different tasks that attach to a shared memory segment can have + different VMA configurations mapping that one shared object. This + can be seen by examining the /proc//numa_maps of tasks sharing + a shared memory region, when one task has installed shared policy on + one or more ranges of the region. + +Components of Memory Policies +----------------------------- + +A Linux memory policy consists of a "mode", optional mode flags, and +an optional set of nodes. The mode determines the behavior of the +policy, the optional mode flags determine the behavior of the mode, +and the optional set of nodes can be viewed as the arguments to the +policy behavior. + +Internally, memory policies are implemented by a reference counted +structure, struct mempolicy. Details of this structure will be +discussed in context, below, as required to explain the behavior. + +Linux memory policy supports the following 4 behavioral modes: + +Default Mode--MPOL_DEFAULT + This mode is only used in the memory policy APIs. Internally, + MPOL_DEFAULT is converted to the NULL memory policy in all + policy scopes. Any existing non-default policy will simply be + removed when MPOL_DEFAULT is specified. As a result, + MPOL_DEFAULT means "fall back to the next most specific policy + scope." + + For example, a NULL or default task policy will fall back to the + system default policy. A NULL or default vma policy will fall + back to the task policy. + + When specified in one of the memory policy APIs, the Default mode + does not use the optional set of nodes. + + It is an error for the set of nodes specified for this policy to + be non-empty. + +MPOL_BIND + This mode specifies that memory must come from the set of + nodes specified by the policy. Memory will be allocated from + the node in the set with sufficient free memory that is + closest to the node where the allocation takes place. + +MPOL_PREFERRED + This mode specifies that the allocation should be attempted + from the single node specified in the policy. If that + allocation fails, the kernel will search other nodes, in order + of increasing distance from the preferred node based on + information provided by the platform firmware. + + Internally, the Preferred policy uses a single node--the + preferred_node member of struct mempolicy. When the internal + mode flag MPOL_F_LOCAL is set, the preferred_node is ignored + and the policy is interpreted as local allocation. "Local" + allocation policy can be viewed as a Preferred policy that + starts at the node containing the cpu where the allocation + takes place. + + It is possible for the user to specify that local allocation + is always preferred by passing an empty nodemask with this + mode. If an empty nodemask is passed, the policy cannot use + the MPOL_F_STATIC_NODES or MPOL_F_RELATIVE_NODES flags + described below. + +MPOL_INTERLEAVED + This mode specifies that page allocations be interleaved, on a + page granularity, across the nodes specified in the policy. + This mode also behaves slightly differently, based on the + context where it is used: + + For allocation of anonymous pages and shared memory pages, + Interleave mode indexes the set of nodes specified by the + policy using the page offset of the faulting address into the + segment [VMA] containing the address modulo the number of + nodes specified by the policy. It then attempts to allocate a + page, starting at the selected node, as if the node had been + specified by a Preferred policy or had been selected by a + local allocation. That is, allocation will follow the per + node zonelist. + + For allocation of page cache pages, Interleave mode indexes + the set of nodes specified by the policy using a node counter + maintained per task. This counter wraps around to the lowest + specified node after it reaches the highest specified node. + This will tend to spread the pages out over the nodes + specified by the policy based on the order in which they are + allocated, rather than based on any page offset into an + address range or file. During system boot up, the temporary + interleaved system default policy works in this mode. + +Linux memory policy supports the following optional mode flags: + +MPOL_F_STATIC_NODES + This flag specifies that the nodemask passed by + the user should not be remapped if the task or VMA's set of allowed + nodes changes after the memory policy has been defined. + + Without this flag, anytime a mempolicy is rebound because of a + change in the set of allowed nodes, the node (Preferred) or + nodemask (Bind, Interleave) is remapped to the new set of + allowed nodes. This may result in nodes being used that were + previously undesired. + + With this flag, if the user-specified nodes overlap with the + nodes allowed by the task's cpuset, then the memory policy is + applied to their intersection. If the two sets of nodes do not + overlap, the Default policy is used. + + For example, consider a task that is attached to a cpuset with + mems 1-3 that sets an Interleave policy over the same set. If + the cpuset's mems change to 3-5, the Interleave will now occur + over nodes 3, 4, and 5. With this flag, however, since only node + 3 is allowed from the user's nodemask, the "interleave" only + occurs over that node. If no nodes from the user's nodemask are + now allowed, the Default behavior is used. + + MPOL_F_STATIC_NODES cannot be combined with the + MPOL_F_RELATIVE_NODES flag. It also cannot be used for + MPOL_PREFERRED policies that were created with an empty nodemask + (local allocation). + +MPOL_F_RELATIVE_NODES + This flag specifies that the nodemask passed + by the user will be mapped relative to the set of the task or VMA's + set of allowed nodes. The kernel stores the user-passed nodemask, + and if the allowed nodes changes, then that original nodemask will + be remapped relative to the new set of allowed nodes. + + Without this flag (and without MPOL_F_STATIC_NODES), anytime a + mempolicy is rebound because of a change in the set of allowed + nodes, the node (Preferred) or nodemask (Bind, Interleave) is + remapped to the new set of allowed nodes. That remap may not + preserve the relative nature of the user's passed nodemask to its + set of allowed nodes upon successive rebinds: a nodemask of + 1,3,5 may be remapped to 7-9 and then to 1-3 if the set of + allowed nodes is restored to its original state. + + With this flag, the remap is done so that the node numbers from + the user's passed nodemask are relative to the set of allowed + nodes. In other words, if nodes 0, 2, and 4 are set in the user's + nodemask, the policy will be effected over the first (and in the + Bind or Interleave case, the third and fifth) nodes in the set of + allowed nodes. The nodemask passed by the user represents nodes + relative to task or VMA's set of allowed nodes. + + If the user's nodemask includes nodes that are outside the range + of the new set of allowed nodes (for example, node 5 is set in + the user's nodemask when the set of allowed nodes is only 0-3), + then the remap wraps around to the beginning of the nodemask and, + if not already set, sets the node in the mempolicy nodemask. + + For example, consider a task that is attached to a cpuset with + mems 2-5 that sets an Interleave policy over the same set with + MPOL_F_RELATIVE_NODES. If the cpuset's mems change to 3-7, the + interleave now occurs over nodes 3,5-7. If the cpuset's mems + then change to 0,2-3,5, then the interleave occurs over nodes + 0,2-3,5. + + Thanks to the consistent remapping, applications preparing + nodemasks to specify memory policies using this flag should + disregard their current, actual cpuset imposed memory placement + and prepare the nodemask as if they were always located on + memory nodes 0 to N-1, where N is the number of memory nodes the + policy is intended to manage. Let the kernel then remap to the + set of memory nodes allowed by the task's cpuset, as that may + change over time. + + MPOL_F_RELATIVE_NODES cannot be combined with the + MPOL_F_STATIC_NODES flag. It also cannot be used for + MPOL_PREFERRED policies that were created with an empty nodemask + (local allocation). + +Memory Policy Reference Counting +================================ + +To resolve use/free races, struct mempolicy contains an atomic reference +count field. Internal interfaces, mpol_get()/mpol_put() increment and +decrement this reference count, respectively. mpol_put() will only free +the structure back to the mempolicy kmem cache when the reference count +goes to zero. + +When a new memory policy is allocated, its reference count is initialized +to '1', representing the reference held by the task that is installing the +new policy. When a pointer to a memory policy structure is stored in another +structure, another reference is added, as the task's reference will be dropped +on completion of the policy installation. + +During run-time "usage" of the policy, we attempt to minimize atomic operations +on the reference count, as this can lead to cache lines bouncing between cpus +and NUMA nodes. "Usage" here means one of the following: + +1) querying of the policy, either by the task itself [using the get_mempolicy() + API discussed below] or by another task using the /proc//numa_maps + interface. + +2) examination of the policy to determine the policy mode and associated node + or node lists, if any, for page allocation. This is considered a "hot + path". Note that for MPOL_BIND, the "usage" extends across the entire + allocation process, which may sleep during page reclaimation, because the + BIND policy nodemask is used, by reference, to filter ineligible nodes. + +We can avoid taking an extra reference during the usages listed above as +follows: + +1) we never need to get/free the system default policy as this is never + changed nor freed, once the system is up and running. + +2) for querying the policy, we do not need to take an extra reference on the + target task's task policy nor vma policies because we always acquire the + task's mm's mmap_sem for read during the query. The set_mempolicy() and + mbind() APIs [see below] always acquire the mmap_sem for write when + installing or replacing task or vma policies. Thus, there is no possibility + of a task or thread freeing a policy while another task or thread is + querying it. + +3) Page allocation usage of task or vma policy occurs in the fault path where + we hold them mmap_sem for read. Again, because replacing the task or vma + policy requires that the mmap_sem be held for write, the policy can't be + freed out from under us while we're using it for page allocation. + +4) Shared policies require special consideration. One task can replace a + shared memory policy while another task, with a distinct mmap_sem, is + querying or allocating a page based on the policy. To resolve this + potential race, the shared policy infrastructure adds an extra reference + to the shared policy during lookup while holding a spin lock on the shared + policy management structure. This requires that we drop this extra + reference when we're finished "using" the policy. We must drop the + extra reference on shared policies in the same query/allocation paths + used for non-shared policies. For this reason, shared policies are marked + as such, and the extra reference is dropped "conditionally"--i.e., only + for shared policies. + + Because of this extra reference counting, and because we must lookup + shared policies in a tree structure under spinlock, shared policies are + more expensive to use in the page allocation path. This is especially + true for shared policies on shared memory regions shared by tasks running + on different NUMA nodes. This extra overhead can be avoided by always + falling back to task or system default policy for shared memory regions, + or by prefaulting the entire shared memory region into memory and locking + it down. However, this might not be appropriate for all applications. + +Memory Policy APIs + +Linux supports 3 system calls for controlling memory policy. These APIS +always affect only the calling task, the calling task's address space, or +some shared object mapped into the calling task's address space. + +.. note:: + the headers that define these APIs and the parameter data types for + user space applications reside in a package that is not part of the + Linux kernel. The kernel system call interfaces, with the 'sys\_' + prefix, are defined in ; the mode and flag + definitions are defined in . + +Set [Task] Memory Policy:: + + long set_mempolicy(int mode, const unsigned long *nmask, + unsigned long maxnode); + +Set's the calling task's "task/process memory policy" to mode +specified by the 'mode' argument and the set of nodes defined by +'nmask'. 'nmask' points to a bit mask of node ids containing at least +'maxnode' ids. Optional mode flags may be passed by combining the +'mode' argument with the flag (for example: MPOL_INTERLEAVE | +MPOL_F_STATIC_NODES). + +See the set_mempolicy(2) man page for more details + + +Get [Task] Memory Policy or Related Information:: + + long get_mempolicy(int *mode, + const unsigned long *nmask, unsigned long maxnode, + void *addr, int flags); + +Queries the "task/process memory policy" of the calling task, or the +policy or location of a specified virtual address, depending on the +'flags' argument. + +See the get_mempolicy(2) man page for more details + + +Install VMA/Shared Policy for a Range of Task's Address Space:: + + long mbind(void *start, unsigned long len, int mode, + const unsigned long *nmask, unsigned long maxnode, + unsigned flags); + +mbind() installs the policy specified by (mode, nmask, maxnodes) as a +VMA policy for the range of the calling task's address space specified +by the 'start' and 'len' arguments. Additional actions may be +requested via the 'flags' argument. + +See the mbind(2) man page for more details. + +Memory Policy Command Line Interface +==================================== + +Although not strictly part of the Linux implementation of memory policy, +a command line tool, numactl(8), exists that allows one to: + ++ set the task policy for a specified program via set_mempolicy(2), fork(2) and + exec(2) + ++ set the shared policy for a shared memory segment via mbind(2) + +The numactl(8) tool is packaged with the run-time version of the library +containing the memory policy system call wrappers. Some distributions +package the headers and compile-time libraries in a separate development +package. + +.. _mem_pol_and_cpusets: + +Memory Policies and cpusets +=========================== + +Memory policies work within cpusets as described above. For memory policies +that require a node or set of nodes, the nodes are restricted to the set of +nodes whose memories are allowed by the cpuset constraints. If the nodemask +specified for the policy contains nodes that are not allowed by the cpuset and +MPOL_F_RELATIVE_NODES is not used, the intersection of the set of nodes +specified for the policy and the set of nodes with memory is used. If the +result is the empty set, the policy is considered invalid and cannot be +installed. If MPOL_F_RELATIVE_NODES is used, the policy's nodes are mapped +onto and folded into the task's set of allowed nodes as previously described. + +The interaction of memory policies and cpusets can be problematic when tasks +in two cpusets share access to a memory region, such as shared memory segments +created by shmget() of mmap() with the MAP_ANONYMOUS and MAP_SHARED flags, and +any of the tasks install shared policy on the region, only nodes whose +memories are allowed in both cpusets may be used in the policies. Obtaining +this information requires "stepping outside" the memory policy APIs to use the +cpuset information and requires that one know in what cpusets other task might +be attaching to the shared region. Furthermore, if the cpusets' allowed +memory sets are disjoint, "local" allocation is the only valid policy. diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt deleted file mode 100644 index 8cd942ca114e..000000000000 --- a/Documentation/vm/numa_memory_policy.txt +++ /dev/null @@ -1,485 +0,0 @@ -.. _numa_memory_policy: - -=================== -Linux Memory Policy -=================== - -What is Linux Memory Policy? -============================ - -In the Linux kernel, "memory policy" determines from which node the kernel will -allocate memory in a NUMA system or in an emulated NUMA system. Linux has -supported platforms with Non-Uniform Memory Access architectures since 2.4.?. -The current memory policy support was added to Linux 2.6 around May 2004. This -document attempts to describe the concepts and APIs of the 2.6 memory policy -support. - -Memory policies should not be confused with cpusets -(``Documentation/cgroup-v1/cpusets.txt``) -which is an administrative mechanism for restricting the nodes from which -memory may be allocated by a set of processes. Memory policies are a -programming interface that a NUMA-aware application can take advantage of. When -both cpusets and policies are applied to a task, the restrictions of the cpuset -takes priority. See :ref:`Memory Policies and cpusets ` -below for more details. - -Memory Policy Concepts -====================== - -Scope of Memory Policies ------------------------- - -The Linux kernel supports _scopes_ of memory policy, described here from -most general to most specific: - -System Default Policy - this policy is "hard coded" into the kernel. It is the policy - that governs all page allocations that aren't controlled by - one of the more specific policy scopes discussed below. When - the system is "up and running", the system default policy will - use "local allocation" described below. However, during boot - up, the system default policy will be set to interleave - allocations across all nodes with "sufficient" memory, so as - not to overload the initial boot node with boot-time - allocations. - -Task/Process Policy - this is an optional, per-task policy. When defined for a specific task, this policy controls all page allocations made by or on behalf of the task that aren't controlled by a more specific scope. If a task does not define a task policy, then all page allocations that would have been controlled by the task policy "fall back" to the System Default Policy. - - The task policy applies to the entire address space of a task. Thus, - it is inheritable, and indeed is inherited, across both fork() - [clone() w/o the CLONE_VM flag] and exec*(). This allows a parent task - to establish the task policy for a child task exec()'d from an - executable image that has no awareness of memory policy. See the - MEMORY POLICY APIS section, below, for an overview of the system call - that a task may use to set/change its task/process policy. - - In a multi-threaded task, task policies apply only to the thread - [Linux kernel task] that installs the policy and any threads - subsequently created by that thread. Any sibling threads existing - at the time a new task policy is installed retain their current - policy. - - A task policy applies only to pages allocated after the policy is - installed. Any pages already faulted in by the task when the task - changes its task policy remain where they were allocated based on - the policy at the time they were allocated. - -.. _vma_policy: - -VMA Policy - A "VMA" or "Virtual Memory Area" refers to a range of a task's - virtual address space. A task may define a specific policy for a range - of its virtual address space. See the MEMORY POLICIES APIS section, - below, for an overview of the mbind() system call used to set a VMA - policy. - - A VMA policy will govern the allocation of pages that back - this region ofthe address space. Any regions of the task's - address space that don't have an explicit VMA policy will fall - back to the task policy, which may itself fall back to the - System Default Policy. - - VMA policies have a few complicating details: - - * VMA policy applies ONLY to anonymous pages. These include - pages allocated for anonymous segments, such as the task - stack and heap, and any regions of the address space - mmap()ed with the MAP_ANONYMOUS flag. If a VMA policy is - applied to a file mapping, it will be ignored if the mapping - used the MAP_SHARED flag. If the file mapping used the - MAP_PRIVATE flag, the VMA policy will only be applied when - an anonymous page is allocated on an attempt to write to the - mapping-- i.e., at Copy-On-Write. - - * VMA policies are shared between all tasks that share a - virtual address space--a.k.a. threads--independent of when - the policy is installed; and they are inherited across - fork(). However, because VMA policies refer to a specific - region of a task's address space, and because the address - space is discarded and recreated on exec*(), VMA policies - are NOT inheritable across exec(). Thus, only NUMA-aware - applications may use VMA policies. - - * A task may install a new VMA policy on a sub-range of a - previously mmap()ed region. When this happens, Linux splits - the existing virtual memory area into 2 or 3 VMAs, each with - it's own policy. - - * By default, VMA policy applies only to pages allocated after - the policy is installed. Any pages already faulted into the - VMA range remain where they were allocated based on the - policy at the time they were allocated. However, since - 2.6.16, Linux supports page migration via the mbind() system - call, so that page contents can be moved to match a newly - installed policy. - -Shared Policy - Conceptually, shared policies apply to "memory objects" mapped - shared into one or more tasks' distinct address spaces. An - application installs a shared policies the same way as VMA - policies--using the mbind() system call specifying a range of - virtual addresses that map the shared object. However, unlike - VMA policies, which can be considered to be an attribute of a - range of a task's address space, shared policies apply - directly to the shared object. Thus, all tasks that attach to - the object share the policy, and all pages allocated for the - shared object, by any task, will obey the shared policy. - - As of 2.6.22, only shared memory segments, created by shmget() or - mmap(MAP_ANONYMOUS|MAP_SHARED), support shared policy. When shared - policy support was added to Linux, the associated data structures were - added to hugetlbfs shmem segments. At the time, hugetlbfs did not - support allocation at fault time--a.k.a lazy allocation--so hugetlbfs - shmem segments were never "hooked up" to the shared policy support. - Although hugetlbfs segments now support lazy allocation, their support - for shared policy has not been completed. - - As mentioned above :ref:`VMA policies `, - allocations of page cache pages for regular files mmap()ed - with MAP_SHARED ignore any VMA policy installed on the virtual - address range backed by the shared file mapping. Rather, - shared page cache pages, including pages backing private - mappings that have not yet been written by the task, follow - task policy, if any, else System Default Policy. - - The shared policy infrastructure supports different policies on subset - ranges of the shared object. However, Linux still splits the VMA of - the task that installs the policy for each range of distinct policy. - Thus, different tasks that attach to a shared memory segment can have - different VMA configurations mapping that one shared object. This - can be seen by examining the /proc//numa_maps of tasks sharing - a shared memory region, when one task has installed shared policy on - one or more ranges of the region. - -Components of Memory Policies ------------------------------ - -A Linux memory policy consists of a "mode", optional mode flags, and -an optional set of nodes. The mode determines the behavior of the -policy, the optional mode flags determine the behavior of the mode, -and the optional set of nodes can be viewed as the arguments to the -policy behavior. - -Internally, memory policies are implemented by a reference counted -structure, struct mempolicy. Details of this structure will be -discussed in context, below, as required to explain the behavior. - -Linux memory policy supports the following 4 behavioral modes: - -Default Mode--MPOL_DEFAULT - This mode is only used in the memory policy APIs. Internally, - MPOL_DEFAULT is converted to the NULL memory policy in all - policy scopes. Any existing non-default policy will simply be - removed when MPOL_DEFAULT is specified. As a result, - MPOL_DEFAULT means "fall back to the next most specific policy - scope." - - For example, a NULL or default task policy will fall back to the - system default policy. A NULL or default vma policy will fall - back to the task policy. - - When specified in one of the memory policy APIs, the Default mode - does not use the optional set of nodes. - - It is an error for the set of nodes specified for this policy to - be non-empty. - -MPOL_BIND - This mode specifies that memory must come from the set of - nodes specified by the policy. Memory will be allocated from - the node in the set with sufficient free memory that is - closest to the node where the allocation takes place. - -MPOL_PREFERRED - This mode specifies that the allocation should be attempted - from the single node specified in the policy. If that - allocation fails, the kernel will search other nodes, in order - of increasing distance from the preferred node based on - information provided by the platform firmware. - - Internally, the Preferred policy uses a single node--the - preferred_node member of struct mempolicy. When the internal - mode flag MPOL_F_LOCAL is set, the preferred_node is ignored - and the policy is interpreted as local allocation. "Local" - allocation policy can be viewed as a Preferred policy that - starts at the node containing the cpu where the allocation - takes place. - - It is possible for the user to specify that local allocation - is always preferred by passing an empty nodemask with this - mode. If an empty nodemask is passed, the policy cannot use - the MPOL_F_STATIC_NODES or MPOL_F_RELATIVE_NODES flags - described below. - -MPOL_INTERLEAVED - This mode specifies that page allocations be interleaved, on a - page granularity, across the nodes specified in the policy. - This mode also behaves slightly differently, based on the - context where it is used: - - For allocation of anonymous pages and shared memory pages, - Interleave mode indexes the set of nodes specified by the - policy using the page offset of the faulting address into the - segment [VMA] containing the address modulo the number of - nodes specified by the policy. It then attempts to allocate a - page, starting at the selected node, as if the node had been - specified by a Preferred policy or had been selected by a - local allocation. That is, allocation will follow the per - node zonelist. - - For allocation of page cache pages, Interleave mode indexes - the set of nodes specified by the policy using a node counter - maintained per task. This counter wraps around to the lowest - specified node after it reaches the highest specified node. - This will tend to spread the pages out over the nodes - specified by the policy based on the order in which they are - allocated, rather than based on any page offset into an - address range or file. During system boot up, the temporary - interleaved system default policy works in this mode. - -Linux memory policy supports the following optional mode flags: - -MPOL_F_STATIC_NODES - This flag specifies that the nodemask passed by - the user should not be remapped if the task or VMA's set of allowed - nodes changes after the memory policy has been defined. - - Without this flag, anytime a mempolicy is rebound because of a - change in the set of allowed nodes, the node (Preferred) or - nodemask (Bind, Interleave) is remapped to the new set of - allowed nodes. This may result in nodes being used that were - previously undesired. - - With this flag, if the user-specified nodes overlap with the - nodes allowed by the task's cpuset, then the memory policy is - applied to their intersection. If the two sets of nodes do not - overlap, the Default policy is used. - - For example, consider a task that is attached to a cpuset with - mems 1-3 that sets an Interleave policy over the same set. If - the cpuset's mems change to 3-5, the Interleave will now occur - over nodes 3, 4, and 5. With this flag, however, since only node - 3 is allowed from the user's nodemask, the "interleave" only - occurs over that node. If no nodes from the user's nodemask are - now allowed, the Default behavior is used. - - MPOL_F_STATIC_NODES cannot be combined with the - MPOL_F_RELATIVE_NODES flag. It also cannot be used for - MPOL_PREFERRED policies that were created with an empty nodemask - (local allocation). - -MPOL_F_RELATIVE_NODES - This flag specifies that the nodemask passed - by the user will be mapped relative to the set of the task or VMA's - set of allowed nodes. The kernel stores the user-passed nodemask, - and if the allowed nodes changes, then that original nodemask will - be remapped relative to the new set of allowed nodes. - - Without this flag (and without MPOL_F_STATIC_NODES), anytime a - mempolicy is rebound because of a change in the set of allowed - nodes, the node (Preferred) or nodemask (Bind, Interleave) is - remapped to the new set of allowed nodes. That remap may not - preserve the relative nature of the user's passed nodemask to its - set of allowed nodes upon successive rebinds: a nodemask of - 1,3,5 may be remapped to 7-9 and then to 1-3 if the set of - allowed nodes is restored to its original state. - - With this flag, the remap is done so that the node numbers from - the user's passed nodemask are relative to the set of allowed - nodes. In other words, if nodes 0, 2, and 4 are set in the user's - nodemask, the policy will be effected over the first (and in the - Bind or Interleave case, the third and fifth) nodes in the set of - allowed nodes. The nodemask passed by the user represents nodes - relative to task or VMA's set of allowed nodes. - - If the user's nodemask includes nodes that are outside the range - of the new set of allowed nodes (for example, node 5 is set in - the user's nodemask when the set of allowed nodes is only 0-3), - then the remap wraps around to the beginning of the nodemask and, - if not already set, sets the node in the mempolicy nodemask. - - For example, consider a task that is attached to a cpuset with - mems 2-5 that sets an Interleave policy over the same set with - MPOL_F_RELATIVE_NODES. If the cpuset's mems change to 3-7, the - interleave now occurs over nodes 3,5-7. If the cpuset's mems - then change to 0,2-3,5, then the interleave occurs over nodes - 0,2-3,5. - - Thanks to the consistent remapping, applications preparing - nodemasks to specify memory policies using this flag should - disregard their current, actual cpuset imposed memory placement - and prepare the nodemask as if they were always located on - memory nodes 0 to N-1, where N is the number of memory nodes the - policy is intended to manage. Let the kernel then remap to the - set of memory nodes allowed by the task's cpuset, as that may - change over time. - - MPOL_F_RELATIVE_NODES cannot be combined with the - MPOL_F_STATIC_NODES flag. It also cannot be used for - MPOL_PREFERRED policies that were created with an empty nodemask - (local allocation). - -Memory Policy Reference Counting -================================ - -To resolve use/free races, struct mempolicy contains an atomic reference -count field. Internal interfaces, mpol_get()/mpol_put() increment and -decrement this reference count, respectively. mpol_put() will only free -the structure back to the mempolicy kmem cache when the reference count -goes to zero. - -When a new memory policy is allocated, its reference count is initialized -to '1', representing the reference held by the task that is installing the -new policy. When a pointer to a memory policy structure is stored in another -structure, another reference is added, as the task's reference will be dropped -on completion of the policy installation. - -During run-time "usage" of the policy, we attempt to minimize atomic operations -on the reference count, as this can lead to cache lines bouncing between cpus -and NUMA nodes. "Usage" here means one of the following: - -1) querying of the policy, either by the task itself [using the get_mempolicy() - API discussed below] or by another task using the /proc//numa_maps - interface. - -2) examination of the policy to determine the policy mode and associated node - or node lists, if any, for page allocation. This is considered a "hot - path". Note that for MPOL_BIND, the "usage" extends across the entire - allocation process, which may sleep during page reclaimation, because the - BIND policy nodemask is used, by reference, to filter ineligible nodes. - -We can avoid taking an extra reference during the usages listed above as -follows: - -1) we never need to get/free the system default policy as this is never - changed nor freed, once the system is up and running. - -2) for querying the policy, we do not need to take an extra reference on the - target task's task policy nor vma policies because we always acquire the - task's mm's mmap_sem for read during the query. The set_mempolicy() and - mbind() APIs [see below] always acquire the mmap_sem for write when - installing or replacing task or vma policies. Thus, there is no possibility - of a task or thread freeing a policy while another task or thread is - querying it. - -3) Page allocation usage of task or vma policy occurs in the fault path where - we hold them mmap_sem for read. Again, because replacing the task or vma - policy requires that the mmap_sem be held for write, the policy can't be - freed out from under us while we're using it for page allocation. - -4) Shared policies require special consideration. One task can replace a - shared memory policy while another task, with a distinct mmap_sem, is - querying or allocating a page based on the policy. To resolve this - potential race, the shared policy infrastructure adds an extra reference - to the shared policy during lookup while holding a spin lock on the shared - policy management structure. This requires that we drop this extra - reference when we're finished "using" the policy. We must drop the - extra reference on shared policies in the same query/allocation paths - used for non-shared policies. For this reason, shared policies are marked - as such, and the extra reference is dropped "conditionally"--i.e., only - for shared policies. - - Because of this extra reference counting, and because we must lookup - shared policies in a tree structure under spinlock, shared policies are - more expensive to use in the page allocation path. This is especially - true for shared policies on shared memory regions shared by tasks running - on different NUMA nodes. This extra overhead can be avoided by always - falling back to task or system default policy for shared memory regions, - or by prefaulting the entire shared memory region into memory and locking - it down. However, this might not be appropriate for all applications. - -Memory Policy APIs - -Linux supports 3 system calls for controlling memory policy. These APIS -always affect only the calling task, the calling task's address space, or -some shared object mapped into the calling task's address space. - -.. note:: - the headers that define these APIs and the parameter data types for - user space applications reside in a package that is not part of the - Linux kernel. The kernel system call interfaces, with the 'sys\_' - prefix, are defined in ; the mode and flag - definitions are defined in . - -Set [Task] Memory Policy:: - - long set_mempolicy(int mode, const unsigned long *nmask, - unsigned long maxnode); - -Set's the calling task's "task/process memory policy" to mode -specified by the 'mode' argument and the set of nodes defined by -'nmask'. 'nmask' points to a bit mask of node ids containing at least -'maxnode' ids. Optional mode flags may be passed by combining the -'mode' argument with the flag (for example: MPOL_INTERLEAVE | -MPOL_F_STATIC_NODES). - -See the set_mempolicy(2) man page for more details - - -Get [Task] Memory Policy or Related Information:: - - long get_mempolicy(int *mode, - const unsigned long *nmask, unsigned long maxnode, - void *addr, int flags); - -Queries the "task/process memory policy" of the calling task, or the -policy or location of a specified virtual address, depending on the -'flags' argument. - -See the get_mempolicy(2) man page for more details - - -Install VMA/Shared Policy for a Range of Task's Address Space:: - - long mbind(void *start, unsigned long len, int mode, - const unsigned long *nmask, unsigned long maxnode, - unsigned flags); - -mbind() installs the policy specified by (mode, nmask, maxnodes) as a -VMA policy for the range of the calling task's address space specified -by the 'start' and 'len' arguments. Additional actions may be -requested via the 'flags' argument. - -See the mbind(2) man page for more details. - -Memory Policy Command Line Interface -==================================== - -Although not strictly part of the Linux implementation of memory policy, -a command line tool, numactl(8), exists that allows one to: - -+ set the task policy for a specified program via set_mempolicy(2), fork(2) and - exec(2) - -+ set the shared policy for a shared memory segment via mbind(2) - -The numactl(8) tool is packaged with the run-time version of the library -containing the memory policy system call wrappers. Some distributions -package the headers and compile-time libraries in a separate development -package. - -.. _mem_pol_and_cpusets: - -Memory Policies and cpusets -=========================== - -Memory policies work within cpusets as described above. For memory policies -that require a node or set of nodes, the nodes are restricted to the set of -nodes whose memories are allowed by the cpuset constraints. If the nodemask -specified for the policy contains nodes that are not allowed by the cpuset and -MPOL_F_RELATIVE_NODES is not used, the intersection of the set of nodes -specified for the policy and the set of nodes with memory is used. If the -result is the empty set, the policy is considered invalid and cannot be -installed. If MPOL_F_RELATIVE_NODES is used, the policy's nodes are mapped -onto and folded into the task's set of allowed nodes as previously described. - -The interaction of memory policies and cpusets can be problematic when tasks -in two cpusets share access to a memory region, such as shared memory segments -created by shmget() of mmap() with the MAP_ANONYMOUS and MAP_SHARED flags, and -any of the tasks install shared policy on the region, only nodes whose -memories are allowed in both cpusets may be used in the policies. Obtaining -this information requires "stepping outside" the memory policy APIs to use the -cpuset information and requires that one know in what cpusets other task might -be attaching to the shared region. Furthermore, if the cpusets' allowed -memory sets are disjoint, "local" allocation is the only valid policy. diff --git a/Documentation/vm/overcommit-accounting b/Documentation/vm/overcommit-accounting deleted file mode 100644 index 0dd54bbe4afa..000000000000 --- a/Documentation/vm/overcommit-accounting +++ /dev/null @@ -1,87 +0,0 @@ -.. _overcommit_accounting: - -===================== -Overcommit Accounting -===================== - -The Linux kernel supports the following overcommit handling modes - -0 - Heuristic overcommit handling. Obvious overcommits of address - space are refused. Used for a typical system. It ensures a - seriously wild allocation fails while allowing overcommit to - reduce swap usage. root is allowed to allocate slightly more - memory in this mode. This is the default. - -1 - Always overcommit. Appropriate for some scientific - applications. Classic example is code using sparse arrays and - just relying on the virtual memory consisting almost entirely - of zero pages. - -2 - Don't overcommit. The total address space commit for the - system is not permitted to exceed swap + a configurable amount - (default is 50%) of physical RAM. Depending on the amount you - use, in most situations this means a process will not be - killed while accessing pages but will receive errors on memory - allocation as appropriate. - - Useful for applications that want to guarantee their memory - allocations will be available in the future without having to - initialize every page. - -The overcommit policy is set via the sysctl ``vm.overcommit_memory``. - -The overcommit amount can be set via ``vm.overcommit_ratio`` (percentage) -or ``vm.overcommit_kbytes`` (absolute value). - -The current overcommit limit and amount committed are viewable in -``/proc/meminfo`` as CommitLimit and Committed_AS respectively. - -Gotchas -======= - -The C language stack growth does an implicit mremap. If you want absolute -guarantees and run close to the edge you MUST mmap your stack for the -largest size you think you will need. For typical stack usage this does -not matter much but it's a corner case if you really really care - -In mode 2 the MAP_NORESERVE flag is ignored. - - -How It Works -============ - -The overcommit is based on the following rules - -For a file backed map - | SHARED or READ-only - 0 cost (the file is the map not swap) - | PRIVATE WRITABLE - size of mapping per instance - -For an anonymous or ``/dev/zero`` map - | SHARED - size of mapping - | PRIVATE READ-only - 0 cost (but of little use) - | PRIVATE WRITABLE - size of mapping per instance - -Additional accounting - | Pages made writable copies by mmap - | shmfs memory drawn from the same pool - -Status -====== - -* We account mmap memory mappings -* We account mprotect changes in commit -* We account mremap changes in size -* We account brk -* We account munmap -* We report the commit status in /proc -* Account and check on fork -* Review stack handling/building on exec -* SHMfs accounting -* Implement actual limit enforcement - -To Do -===== -* Account ptrace pages (this is hard) diff --git a/Documentation/vm/overcommit-accounting.rst b/Documentation/vm/overcommit-accounting.rst new file mode 100644 index 000000000000..0dd54bbe4afa --- /dev/null +++ b/Documentation/vm/overcommit-accounting.rst @@ -0,0 +1,87 @@ +.. _overcommit_accounting: + +===================== +Overcommit Accounting +===================== + +The Linux kernel supports the following overcommit handling modes + +0 + Heuristic overcommit handling. Obvious overcommits of address + space are refused. Used for a typical system. It ensures a + seriously wild allocation fails while allowing overcommit to + reduce swap usage. root is allowed to allocate slightly more + memory in this mode. This is the default. + +1 + Always overcommit. Appropriate for some scientific + applications. Classic example is code using sparse arrays and + just relying on the virtual memory consisting almost entirely + of zero pages. + +2 + Don't overcommit. The total address space commit for the + system is not permitted to exceed swap + a configurable amount + (default is 50%) of physical RAM. Depending on the amount you + use, in most situations this means a process will not be + killed while accessing pages but will receive errors on memory + allocation as appropriate. + + Useful for applications that want to guarantee their memory + allocations will be available in the future without having to + initialize every page. + +The overcommit policy is set via the sysctl ``vm.overcommit_memory``. + +The overcommit amount can be set via ``vm.overcommit_ratio`` (percentage) +or ``vm.overcommit_kbytes`` (absolute value). + +The current overcommit limit and amount committed are viewable in +``/proc/meminfo`` as CommitLimit and Committed_AS respectively. + +Gotchas +======= + +The C language stack growth does an implicit mremap. If you want absolute +guarantees and run close to the edge you MUST mmap your stack for the +largest size you think you will need. For typical stack usage this does +not matter much but it's a corner case if you really really care + +In mode 2 the MAP_NORESERVE flag is ignored. + + +How It Works +============ + +The overcommit is based on the following rules + +For a file backed map + | SHARED or READ-only - 0 cost (the file is the map not swap) + | PRIVATE WRITABLE - size of mapping per instance + +For an anonymous or ``/dev/zero`` map + | SHARED - size of mapping + | PRIVATE READ-only - 0 cost (but of little use) + | PRIVATE WRITABLE - size of mapping per instance + +Additional accounting + | Pages made writable copies by mmap + | shmfs memory drawn from the same pool + +Status +====== + +* We account mmap memory mappings +* We account mprotect changes in commit +* We account mremap changes in size +* We account brk +* We account munmap +* We report the commit status in /proc +* Account and check on fork +* Review stack handling/building on exec +* SHMfs accounting +* Implement actual limit enforcement + +To Do +===== +* Account ptrace pages (this is hard) diff --git a/Documentation/vm/page_frags b/Documentation/vm/page_frags deleted file mode 100644 index 637cc49d1b2f..000000000000 --- a/Documentation/vm/page_frags +++ /dev/null @@ -1,45 +0,0 @@ -.. _page_frags: - -============== -Page fragments -============== - -A page fragment is an arbitrary-length arbitrary-offset area of memory -which resides within a 0 or higher order compound page. Multiple -fragments within that page are individually refcounted, in the page's -reference counter. - -The page_frag functions, page_frag_alloc and page_frag_free, provide a -simple allocation framework for page fragments. This is used by the -network stack and network device drivers to provide a backing region of -memory for use as either an sk_buff->head, or to be used in the "frags" -portion of skb_shared_info. - -In order to make use of the page fragment APIs a backing page fragment -cache is needed. This provides a central point for the fragment allocation -and tracks allows multiple calls to make use of a cached page. The -advantage to doing this is that multiple calls to get_page can be avoided -which can be expensive at allocation time. However due to the nature of -this caching it is required that any calls to the cache be protected by -either a per-cpu limitation, or a per-cpu limitation and forcing interrupts -to be disabled when executing the fragment allocation. - -The network stack uses two separate caches per CPU to handle fragment -allocation. The netdev_alloc_cache is used by callers making use of the -__netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is -used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The -main difference between these two calls is the context in which they may be -called. The "netdev" prefixed functions are usable in any context as these -functions will disable interrupts, while the "napi" prefixed functions are -only usable within the softirq context. - -Many network device drivers use a similar methodology for allocating page -fragments, but the page fragments are cached at the ring or descriptor -level. In order to enable these cases it is necessary to provide a generic -way of tearing down a page cache. For this reason __page_frag_cache_drain -was implemented. It allows for freeing multiple references from a single -page via a single call. The advantage to doing this is that it allows for -cleaning up the multiple references that were added to a page in order to -avoid calling get_page per allocation. - -Alexander Duyck, Nov 29, 2016. diff --git a/Documentation/vm/page_frags.rst b/Documentation/vm/page_frags.rst new file mode 100644 index 000000000000..637cc49d1b2f --- /dev/null +++ b/Documentation/vm/page_frags.rst @@ -0,0 +1,45 @@ +.. _page_frags: + +============== +Page fragments +============== + +A page fragment is an arbitrary-length arbitrary-offset area of memory +which resides within a 0 or higher order compound page. Multiple +fragments within that page are individually refcounted, in the page's +reference counter. + +The page_frag functions, page_frag_alloc and page_frag_free, provide a +simple allocation framework for page fragments. This is used by the +network stack and network device drivers to provide a backing region of +memory for use as either an sk_buff->head, or to be used in the "frags" +portion of skb_shared_info. + +In order to make use of the page fragment APIs a backing page fragment +cache is needed. This provides a central point for the fragment allocation +and tracks allows multiple calls to make use of a cached page. The +advantage to doing this is that multiple calls to get_page can be avoided +which can be expensive at allocation time. However due to the nature of +this caching it is required that any calls to the cache be protected by +either a per-cpu limitation, or a per-cpu limitation and forcing interrupts +to be disabled when executing the fragment allocation. + +The network stack uses two separate caches per CPU to handle fragment +allocation. The netdev_alloc_cache is used by callers making use of the +__netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is +used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The +main difference between these two calls is the context in which they may be +called. The "netdev" prefixed functions are usable in any context as these +functions will disable interrupts, while the "napi" prefixed functions are +only usable within the softirq context. + +Many network device drivers use a similar methodology for allocating page +fragments, but the page fragments are cached at the ring or descriptor +level. In order to enable these cases it is necessary to provide a generic +way of tearing down a page cache. For this reason __page_frag_cache_drain +was implemented. It allows for freeing multiple references from a single +page via a single call. The advantage to doing this is that it allows for +cleaning up the multiple references that were added to a page in order to +avoid calling get_page per allocation. + +Alexander Duyck, Nov 29, 2016. diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration deleted file mode 100644 index 07b67a821a12..000000000000 --- a/Documentation/vm/page_migration +++ /dev/null @@ -1,257 +0,0 @@ -.. _page_migration: - -============== -Page migration -============== - -Page migration allows the moving of the physical location of pages between -nodes in a numa system while the process is running. This means that the -virtual addresses that the process sees do not change. However, the -system rearranges the physical location of those pages. - -The main intend of page migration is to reduce the latency of memory access -by moving pages near to the processor where the process accessing that memory -is running. - -Page migration allows a process to manually relocate the node on which its -pages are located through the MF_MOVE and MF_MOVE_ALL options while setting -a new memory policy via mbind(). The pages of process can also be relocated -from another process using the sys_migrate_pages() function call. The -migrate_pages function call takes two sets of nodes and moves pages of a -process that are located on the from nodes to the destination nodes. -Page migration functions are provided by the numactl package by Andi Kleen -(a version later than 0.9.3 is required. Get it from -ftp://oss.sgi.com/www/projects/libnuma/download/). numactl provides libnuma -which provides an interface similar to other numa functionality for page -migration. cat ``/proc//numa_maps`` allows an easy review of where the -pages of a process are located. See also the numa_maps documentation in the -proc(5) man page. - -Manual migration is useful if for example the scheduler has relocated -a process to a processor on a distant node. A batch scheduler or an -administrator may detect the situation and move the pages of the process -nearer to the new processor. The kernel itself does only provide -manual page migration support. Automatic page migration may be implemented -through user space processes that move pages. A special function call -"move_pages" allows the moving of individual pages within a process. -A NUMA profiler may f.e. obtain a log showing frequent off node -accesses and may use the result to move pages to more advantageous -locations. - -Larger installations usually partition the system using cpusets into -sections of nodes. Paul Jackson has equipped cpusets with the ability to -move pages when a task is moved to another cpuset (See -Documentation/cgroup-v1/cpusets.txt). -Cpusets allows the automation of process locality. If a task is moved to -a new cpuset then also all its pages are moved with it so that the -performance of the process does not sink dramatically. Also the pages -of processes in a cpuset are moved if the allowed memory nodes of a -cpuset are changed. - -Page migration allows the preservation of the relative location of pages -within a group of nodes for all migration techniques which will preserve a -particular memory allocation pattern generated even after migrating a -process. This is necessary in order to preserve the memory latencies. -Processes will run with similar performance after migration. - -Page migration occurs in several steps. First a high level -description for those trying to use migrate_pages() from the kernel -(for userspace usage see the Andi Kleen's numactl package mentioned above) -and then a low level description of how the low level details work. - -In kernel use of migrate_pages() -================================ - -1. Remove pages from the LRU. - - Lists of pages to be migrated are generated by scanning over - pages and moving them into lists. This is done by - calling isolate_lru_page(). - Calling isolate_lru_page increases the references to the page - so that it cannot vanish while the page migration occurs. - It also prevents the swapper or other scans to encounter - the page. - -2. We need to have a function of type new_page_t that can be - passed to migrate_pages(). This function should figure out - how to allocate the correct new page given the old page. - -3. The migrate_pages() function is called which attempts - to do the migration. It will call the function to allocate - the new page for each page that is considered for - moving. - -How migrate_pages() works -========================= - -migrate_pages() does several passes over its list of pages. A page is moved -if all references to a page are removable at the time. The page has -already been removed from the LRU via isolate_lru_page() and the refcount -is increased so that the page cannot be freed while page migration occurs. - -Steps: - -1. Lock the page to be migrated - -2. Insure that writeback is complete. - -3. Lock the new page that we want to move to. It is locked so that accesses to - this (not yet uptodate) page immediately lock while the move is in progress. - -4. All the page table references to the page are converted to migration - entries. This decreases the mapcount of a page. If the resulting - mapcount is not zero then we do not migrate the page. All user space - processes that attempt to access the page will now wait on the page lock. - -5. The radix tree lock is taken. This will cause all processes trying - to access the page via the mapping to block on the radix tree spinlock. - -6. The refcount of the page is examined and we back out if references remain - otherwise we know that we are the only one referencing this page. - -7. The radix tree is checked and if it does not contain the pointer to this - page then we back out because someone else modified the radix tree. - -8. The new page is prepped with some settings from the old page so that - accesses to the new page will discover a page with the correct settings. - -9. The radix tree is changed to point to the new page. - -10. The reference count of the old page is dropped because the radix tree - reference is gone. A reference to the new page is established because - the new page is referenced to by the radix tree. - -11. The radix tree lock is dropped. With that lookups in the mapping - become possible again. Processes will move from spinning on the tree_lock - to sleeping on the locked new page. - -12. The page contents are copied to the new page. - -13. The remaining page flags are copied to the new page. - -14. The old page flags are cleared to indicate that the page does - not provide any information anymore. - -15. Queued up writeback on the new page is triggered. - -16. If migration entries were page then replace them with real ptes. Doing - so will enable access for user space processes not already waiting for - the page lock. - -19. The page locks are dropped from the old and new page. - Processes waiting on the page lock will redo their page faults - and will reach the new page. - -20. The new page is moved to the LRU and can be scanned by the swapper - etc again. - -Non-LRU page migration -====================== - -Although original migration aimed for reducing the latency of memory access -for NUMA, compaction who want to create high-order page is also main customer. - -Current problem of the implementation is that it is designed to migrate only -*LRU* pages. However, there are potential non-lru pages which can be migrated -in drivers, for example, zsmalloc, virtio-balloon pages. - -For virtio-balloon pages, some parts of migration code path have been hooked -up and added virtio-balloon specific functions to intercept migration logics. -It's too specific to a driver so other drivers who want to make their pages -movable would have to add own specific hooks in migration path. - -To overclome the problem, VM supports non-LRU page migration which provides -generic functions for non-LRU movable pages without driver specific hooks -migration path. - -If a driver want to make own pages movable, it should define three functions -which are function pointers of struct address_space_operations. - -1. ``bool (*isolate_page) (struct page *page, isolate_mode_t mode);`` - - What VM expects on isolate_page function of driver is to return *true* - if driver isolates page successfully. On returing true, VM marks the page - as PG_isolated so concurrent isolation in several CPUs skip the page - for isolation. If a driver cannot isolate the page, it should return *false*. - - Once page is successfully isolated, VM uses page.lru fields so driver - shouldn't expect to preserve values in that fields. - -2. ``int (*migratepage) (struct address_space *mapping,`` -| ``struct page *newpage, struct page *oldpage, enum migrate_mode);`` - - After isolation, VM calls migratepage of driver with isolated page. - The function of migratepage is to move content of the old page to new page - and set up fields of struct page newpage. Keep in mind that you should - indicate to the VM the oldpage is no longer movable via __ClearPageMovable() - under page_lock if you migrated the oldpage successfully and returns - MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver - can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time - because VM interprets -EAGAIN as "temporal migration failure". On returning - any error except -EAGAIN, VM will give up the page migration without retrying - in this time. - - Driver shouldn't touch page.lru field VM using in the functions. - -3. ``void (*putback_page)(struct page *);`` - - If migration fails on isolated page, VM should return the isolated page - to the driver so VM calls driver's putback_page with migration failed page. - In this function, driver should put the isolated page back to the own data - structure. - -4. non-lru movable page flags - - There are two page flags for supporting non-lru movable page. - - * PG_movable - - Driver should use the below function to make page movable under page_lock:: - - void __SetPageMovable(struct page *page, struct address_space *mapping) - - It needs argument of address_space for registering migration - family functions which will be called by VM. Exactly speaking, - PG_movable is not a real flag of struct page. Rather than, VM - reuses page->mapping's lower bits to represent it. - -:: - #define PAGE_MAPPING_MOVABLE 0x2 - page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; - - so driver shouldn't access page->mapping directly. Instead, driver should - use page_mapping which mask off the low two bits of page->mapping under - page lock so it can get right struct address_space. - - For testing of non-lru movable page, VM supports __PageMovable function. - However, it doesn't guarantee to identify non-lru movable page because - page->mapping field is unified with other variables in struct page. - As well, if driver releases the page after isolation by VM, page->mapping - doesn't have stable value although it has PAGE_MAPPING_MOVABLE - (Look at __ClearPageMovable). But __PageMovable is cheap to catch whether - page is LRU or non-lru movable once the page has been isolated. Because - LRU pages never can have PAGE_MAPPING_MOVABLE in page->mapping. It is also - good for just peeking to test non-lru movable pages before more expensive - checking with lock_page in pfn scanning to select victim. - - For guaranteeing non-lru movable page, VM provides PageMovable function. - Unlike __PageMovable, PageMovable functions validates page->mapping and - mapping->a_ops->isolate_page under lock_page. The lock_page prevents sudden - destroying of page->mapping. - - Driver using __SetPageMovable should clear the flag via __ClearMovablePage - under page_lock before the releasing the page. - - * PG_isolated - - To prevent concurrent isolation among several CPUs, VM marks isolated page - as PG_isolated under lock_page. So if a CPU encounters PG_isolated non-lru - movable page, it can skip it. Driver doesn't need to manipulate the flag - because VM will set/clear it automatically. Keep in mind that if driver - sees PG_isolated page, it means the page have been isolated by VM so it - shouldn't touch page.lru field. - PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag - for own purpose. - -Christoph Lameter, May 8, 2006. -Minchan Kim, Mar 28, 2016. diff --git a/Documentation/vm/page_migration.rst b/Documentation/vm/page_migration.rst new file mode 100644 index 000000000000..07b67a821a12 --- /dev/null +++ b/Documentation/vm/page_migration.rst @@ -0,0 +1,257 @@ +.. _page_migration: + +============== +Page migration +============== + +Page migration allows the moving of the physical location of pages between +nodes in a numa system while the process is running. This means that the +virtual addresses that the process sees do not change. However, the +system rearranges the physical location of those pages. + +The main intend of page migration is to reduce the latency of memory access +by moving pages near to the processor where the process accessing that memory +is running. + +Page migration allows a process to manually relocate the node on which its +pages are located through the MF_MOVE and MF_MOVE_ALL options while setting +a new memory policy via mbind(). The pages of process can also be relocated +from another process using the sys_migrate_pages() function call. The +migrate_pages function call takes two sets of nodes and moves pages of a +process that are located on the from nodes to the destination nodes. +Page migration functions are provided by the numactl package by Andi Kleen +(a version later than 0.9.3 is required. Get it from +ftp://oss.sgi.com/www/projects/libnuma/download/). numactl provides libnuma +which provides an interface similar to other numa functionality for page +migration. cat ``/proc//numa_maps`` allows an easy review of where the +pages of a process are located. See also the numa_maps documentation in the +proc(5) man page. + +Manual migration is useful if for example the scheduler has relocated +a process to a processor on a distant node. A batch scheduler or an +administrator may detect the situation and move the pages of the process +nearer to the new processor. The kernel itself does only provide +manual page migration support. Automatic page migration may be implemented +through user space processes that move pages. A special function call +"move_pages" allows the moving of individual pages within a process. +A NUMA profiler may f.e. obtain a log showing frequent off node +accesses and may use the result to move pages to more advantageous +locations. + +Larger installations usually partition the system using cpusets into +sections of nodes. Paul Jackson has equipped cpusets with the ability to +move pages when a task is moved to another cpuset (See +Documentation/cgroup-v1/cpusets.txt). +Cpusets allows the automation of process locality. If a task is moved to +a new cpuset then also all its pages are moved with it so that the +performance of the process does not sink dramatically. Also the pages +of processes in a cpuset are moved if the allowed memory nodes of a +cpuset are changed. + +Page migration allows the preservation of the relative location of pages +within a group of nodes for all migration techniques which will preserve a +particular memory allocation pattern generated even after migrating a +process. This is necessary in order to preserve the memory latencies. +Processes will run with similar performance after migration. + +Page migration occurs in several steps. First a high level +description for those trying to use migrate_pages() from the kernel +(for userspace usage see the Andi Kleen's numactl package mentioned above) +and then a low level description of how the low level details work. + +In kernel use of migrate_pages() +================================ + +1. Remove pages from the LRU. + + Lists of pages to be migrated are generated by scanning over + pages and moving them into lists. This is done by + calling isolate_lru_page(). + Calling isolate_lru_page increases the references to the page + so that it cannot vanish while the page migration occurs. + It also prevents the swapper or other scans to encounter + the page. + +2. We need to have a function of type new_page_t that can be + passed to migrate_pages(). This function should figure out + how to allocate the correct new page given the old page. + +3. The migrate_pages() function is called which attempts + to do the migration. It will call the function to allocate + the new page for each page that is considered for + moving. + +How migrate_pages() works +========================= + +migrate_pages() does several passes over its list of pages. A page is moved +if all references to a page are removable at the time. The page has +already been removed from the LRU via isolate_lru_page() and the refcount +is increased so that the page cannot be freed while page migration occurs. + +Steps: + +1. Lock the page to be migrated + +2. Insure that writeback is complete. + +3. Lock the new page that we want to move to. It is locked so that accesses to + this (not yet uptodate) page immediately lock while the move is in progress. + +4. All the page table references to the page are converted to migration + entries. This decreases the mapcount of a page. If the resulting + mapcount is not zero then we do not migrate the page. All user space + processes that attempt to access the page will now wait on the page lock. + +5. The radix tree lock is taken. This will cause all processes trying + to access the page via the mapping to block on the radix tree spinlock. + +6. The refcount of the page is examined and we back out if references remain + otherwise we know that we are the only one referencing this page. + +7. The radix tree is checked and if it does not contain the pointer to this + page then we back out because someone else modified the radix tree. + +8. The new page is prepped with some settings from the old page so that + accesses to the new page will discover a page with the correct settings. + +9. The radix tree is changed to point to the new page. + +10. The reference count of the old page is dropped because the radix tree + reference is gone. A reference to the new page is established because + the new page is referenced to by the radix tree. + +11. The radix tree lock is dropped. With that lookups in the mapping + become possible again. Processes will move from spinning on the tree_lock + to sleeping on the locked new page. + +12. The page contents are copied to the new page. + +13. The remaining page flags are copied to the new page. + +14. The old page flags are cleared to indicate that the page does + not provide any information anymore. + +15. Queued up writeback on the new page is triggered. + +16. If migration entries were page then replace them with real ptes. Doing + so will enable access for user space processes not already waiting for + the page lock. + +19. The page locks are dropped from the old and new page. + Processes waiting on the page lock will redo their page faults + and will reach the new page. + +20. The new page is moved to the LRU and can be scanned by the swapper + etc again. + +Non-LRU page migration +====================== + +Although original migration aimed for reducing the latency of memory access +for NUMA, compaction who want to create high-order page is also main customer. + +Current problem of the implementation is that it is designed to migrate only +*LRU* pages. However, there are potential non-lru pages which can be migrated +in drivers, for example, zsmalloc, virtio-balloon pages. + +For virtio-balloon pages, some parts of migration code path have been hooked +up and added virtio-balloon specific functions to intercept migration logics. +It's too specific to a driver so other drivers who want to make their pages +movable would have to add own specific hooks in migration path. + +To overclome the problem, VM supports non-LRU page migration which provides +generic functions for non-LRU movable pages without driver specific hooks +migration path. + +If a driver want to make own pages movable, it should define three functions +which are function pointers of struct address_space_operations. + +1. ``bool (*isolate_page) (struct page *page, isolate_mode_t mode);`` + + What VM expects on isolate_page function of driver is to return *true* + if driver isolates page successfully. On returing true, VM marks the page + as PG_isolated so concurrent isolation in several CPUs skip the page + for isolation. If a driver cannot isolate the page, it should return *false*. + + Once page is successfully isolated, VM uses page.lru fields so driver + shouldn't expect to preserve values in that fields. + +2. ``int (*migratepage) (struct address_space *mapping,`` +| ``struct page *newpage, struct page *oldpage, enum migrate_mode);`` + + After isolation, VM calls migratepage of driver with isolated page. + The function of migratepage is to move content of the old page to new page + and set up fields of struct page newpage. Keep in mind that you should + indicate to the VM the oldpage is no longer movable via __ClearPageMovable() + under page_lock if you migrated the oldpage successfully and returns + MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver + can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time + because VM interprets -EAGAIN as "temporal migration failure". On returning + any error except -EAGAIN, VM will give up the page migration without retrying + in this time. + + Driver shouldn't touch page.lru field VM using in the functions. + +3. ``void (*putback_page)(struct page *);`` + + If migration fails on isolated page, VM should return the isolated page + to the driver so VM calls driver's putback_page with migration failed page. + In this function, driver should put the isolated page back to the own data + structure. + +4. non-lru movable page flags + + There are two page flags for supporting non-lru movable page. + + * PG_movable + + Driver should use the below function to make page movable under page_lock:: + + void __SetPageMovable(struct page *page, struct address_space *mapping) + + It needs argument of address_space for registering migration + family functions which will be called by VM. Exactly speaking, + PG_movable is not a real flag of struct page. Rather than, VM + reuses page->mapping's lower bits to represent it. + +:: + #define PAGE_MAPPING_MOVABLE 0x2 + page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; + + so driver shouldn't access page->mapping directly. Instead, driver should + use page_mapping which mask off the low two bits of page->mapping under + page lock so it can get right struct address_space. + + For testing of non-lru movable page, VM supports __PageMovable function. + However, it doesn't guarantee to identify non-lru movable page because + page->mapping field is unified with other variables in struct page. + As well, if driver releases the page after isolation by VM, page->mapping + doesn't have stable value although it has PAGE_MAPPING_MOVABLE + (Look at __ClearPageMovable). But __PageMovable is cheap to catch whether + page is LRU or non-lru movable once the page has been isolated. Because + LRU pages never can have PAGE_MAPPING_MOVABLE in page->mapping. It is also + good for just peeking to test non-lru movable pages before more expensive + checking with lock_page in pfn scanning to select victim. + + For guaranteeing non-lru movable page, VM provides PageMovable function. + Unlike __PageMovable, PageMovable functions validates page->mapping and + mapping->a_ops->isolate_page under lock_page. The lock_page prevents sudden + destroying of page->mapping. + + Driver using __SetPageMovable should clear the flag via __ClearMovablePage + under page_lock before the releasing the page. + + * PG_isolated + + To prevent concurrent isolation among several CPUs, VM marks isolated page + as PG_isolated under lock_page. So if a CPU encounters PG_isolated non-lru + movable page, it can skip it. Driver doesn't need to manipulate the flag + because VM will set/clear it automatically. Keep in mind that if driver + sees PG_isolated page, it means the page have been isolated by VM so it + shouldn't touch page.lru field. + PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag + for own purpose. + +Christoph Lameter, May 8, 2006. +Minchan Kim, Mar 28, 2016. diff --git a/Documentation/vm/page_owner.rst b/Documentation/vm/page_owner.rst new file mode 100644 index 000000000000..0ed5ab8c7ab4 --- /dev/null +++ b/Documentation/vm/page_owner.rst @@ -0,0 +1,90 @@ +.. _page_owner: + +================================================== +page owner: Tracking about who allocated each page +================================================== + +Introduction +============ + +page owner is for the tracking about who allocated each page. +It can be used to debug memory leak or to find a memory hogger. +When allocation happens, information about allocation such as call stack +and order of pages is stored into certain storage for each page. +When we need to know about status of all pages, we can get and analyze +this information. + +Although we already have tracepoint for tracing page allocation/free, +using it for analyzing who allocate each page is rather complex. We need +to enlarge the trace buffer for preventing overlapping until userspace +program launched. And, launched program continually dump out the trace +buffer for later analysis and it would change system behviour with more +possibility rather than just keeping it in memory, so bad for debugging. + +page owner can also be used for various purposes. For example, accurate +fragmentation statistics can be obtained through gfp flag information of +each page. It is already implemented and activated if page owner is +enabled. Other usages are more than welcome. + +page owner is disabled in default. So, if you'd like to use it, you need +to add "page_owner=on" into your boot cmdline. If the kernel is built +with page owner and page owner is disabled in runtime due to no enabling +boot option, runtime overhead is marginal. If disabled in runtime, it +doesn't require memory to store owner information, so there is no runtime +memory overhead. And, page owner inserts just two unlikely branches into +the page allocator hotpath and if not enabled, then allocation is done +like as the kernel without page owner. These two unlikely branches should +not affect to allocation performance, especially if the static keys jump +label patching functionality is available. Following is the kernel's code +size change due to this facility. + +- Without page owner:: + + text data bss dec hex filename + 40662 1493 644 42799 a72f mm/page_alloc.o + +- With page owner:: + + text data bss dec hex filename + 40892 1493 644 43029 a815 mm/page_alloc.o + 1427 24 8 1459 5b3 mm/page_ext.o + 2722 50 0 2772 ad4 mm/page_owner.o + +Although, roughly, 4 KB code is added in total, page_alloc.o increase by +230 bytes and only half of it is in hotpath. Building the kernel with +page owner and turning it on if needed would be great option to debug +kernel memory problem. + +There is one notice that is caused by implementation detail. page owner +stores information into the memory from struct page extension. This memory +is initialized some time later than that page allocator starts in sparse +memory system, so, until initialization, many pages can be allocated and +they would have no owner information. To fix it up, these early allocated +pages are investigated and marked as allocated in initialization phase. +Although it doesn't mean that they have the right owner information, +at least, we can tell whether the page is allocated or not, +more accurately. On 2GB memory x86-64 VM box, 13343 early allocated pages +are catched and marked, although they are mostly allocated from struct +page extension feature. Anyway, after that, no page is left in +un-tracking state. + +Usage +===== + +1) Build user-space helper:: + + cd tools/vm + make page_owner_sort + +2) Enable page owner: add "page_owner=on" to boot cmdline. + +3) Do the job what you want to debug + +4) Analyze information from page owner:: + + cat /sys/kernel/debug/page_owner > page_owner_full.txt + grep -v ^PFN page_owner_full.txt > page_owner.txt + ./page_owner_sort page_owner.txt sorted_page_owner.txt + + See the result about who allocated each page + in the ``sorted_page_owner.txt``. diff --git a/Documentation/vm/page_owner.txt b/Documentation/vm/page_owner.txt deleted file mode 100644 index 0ed5ab8c7ab4..000000000000 --- a/Documentation/vm/page_owner.txt +++ /dev/null @@ -1,90 +0,0 @@ -.. _page_owner: - -================================================== -page owner: Tracking about who allocated each page -================================================== - -Introduction -============ - -page owner is for the tracking about who allocated each page. -It can be used to debug memory leak or to find a memory hogger. -When allocation happens, information about allocation such as call stack -and order of pages is stored into certain storage for each page. -When we need to know about status of all pages, we can get and analyze -this information. - -Although we already have tracepoint for tracing page allocation/free, -using it for analyzing who allocate each page is rather complex. We need -to enlarge the trace buffer for preventing overlapping until userspace -program launched. And, launched program continually dump out the trace -buffer for later analysis and it would change system behviour with more -possibility rather than just keeping it in memory, so bad for debugging. - -page owner can also be used for various purposes. For example, accurate -fragmentation statistics can be obtained through gfp flag information of -each page. It is already implemented and activated if page owner is -enabled. Other usages are more than welcome. - -page owner is disabled in default. So, if you'd like to use it, you need -to add "page_owner=on" into your boot cmdline. If the kernel is built -with page owner and page owner is disabled in runtime due to no enabling -boot option, runtime overhead is marginal. If disabled in runtime, it -doesn't require memory to store owner information, so there is no runtime -memory overhead. And, page owner inserts just two unlikely branches into -the page allocator hotpath and if not enabled, then allocation is done -like as the kernel without page owner. These two unlikely branches should -not affect to allocation performance, especially if the static keys jump -label patching functionality is available. Following is the kernel's code -size change due to this facility. - -- Without page owner:: - - text data bss dec hex filename - 40662 1493 644 42799 a72f mm/page_alloc.o - -- With page owner:: - - text data bss dec hex filename - 40892 1493 644 43029 a815 mm/page_alloc.o - 1427 24 8 1459 5b3 mm/page_ext.o - 2722 50 0 2772 ad4 mm/page_owner.o - -Although, roughly, 4 KB code is added in total, page_alloc.o increase by -230 bytes and only half of it is in hotpath. Building the kernel with -page owner and turning it on if needed would be great option to debug -kernel memory problem. - -There is one notice that is caused by implementation detail. page owner -stores information into the memory from struct page extension. This memory -is initialized some time later than that page allocator starts in sparse -memory system, so, until initialization, many pages can be allocated and -they would have no owner information. To fix it up, these early allocated -pages are investigated and marked as allocated in initialization phase. -Although it doesn't mean that they have the right owner information, -at least, we can tell whether the page is allocated or not, -more accurately. On 2GB memory x86-64 VM box, 13343 early allocated pages -are catched and marked, although they are mostly allocated from struct -page extension feature. Anyway, after that, no page is left in -un-tracking state. - -Usage -===== - -1) Build user-space helper:: - - cd tools/vm - make page_owner_sort - -2) Enable page owner: add "page_owner=on" to boot cmdline. - -3) Do the job what you want to debug - -4) Analyze information from page owner:: - - cat /sys/kernel/debug/page_owner > page_owner_full.txt - grep -v ^PFN page_owner_full.txt > page_owner.txt - ./page_owner_sort page_owner.txt sorted_page_owner.txt - - See the result about who allocated each page - in the ``sorted_page_owner.txt``. diff --git a/Documentation/vm/pagemap.rst b/Documentation/vm/pagemap.rst new file mode 100644 index 000000000000..d54b4bfd3043 --- /dev/null +++ b/Documentation/vm/pagemap.rst @@ -0,0 +1,197 @@ +.. _pagemap: + +====================================== +pagemap from the Userspace Perspective +====================================== + +pagemap is a new (as of 2.6.25) set of interfaces in the kernel that allow +userspace programs to examine the page tables and related information by +reading files in ``/proc``. + +There are four components to pagemap: + + * ``/proc/pid/pagemap``. This file lets a userspace process find out which + physical frame each virtual page is mapped to. It contains one 64-bit + value for each virtual page, containing the following data (from + fs/proc/task_mmu.c, above pagemap_read): + + * Bits 0-54 page frame number (PFN) if present + * Bits 0-4 swap type if swapped + * Bits 5-54 swap offset if swapped + * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.rst) + * Bit 56 page exclusively mapped (since 4.2) + * Bits 57-60 zero + * Bit 61 page is file-page or shared-anon (since 3.5) + * Bit 62 page swapped + * Bit 63 page present + + Since Linux 4.0 only users with the CAP_SYS_ADMIN capability can get PFNs. + In 4.0 and 4.1 opens by unprivileged fail with -EPERM. Starting from + 4.2 the PFN field is zeroed if the user does not have CAP_SYS_ADMIN. + Reason: information about PFNs helps in exploiting Rowhammer vulnerability. + + If the page is not present but in swap, then the PFN contains an + encoding of the swap file number and the page's offset into the + swap. Unmapped pages return a null PFN. This allows determining + precisely which pages are mapped (or in swap) and comparing mapped + pages between processes. + + Efficient users of this interface will use /proc/pid/maps to + determine which areas of memory are actually mapped and llseek to + skip over unmapped regions. + + * ``/proc/kpagecount``. This file contains a 64-bit count of the number of + times each page is mapped, indexed by PFN. + + * ``/proc/kpageflags``. This file contains a 64-bit set of flags for each + page, indexed by PFN. + + The flags are (from ``fs/proc/page.c``, above kpageflags_read): + + 0. LOCKED + 1. ERROR + 2. REFERENCED + 3. UPTODATE + 4. DIRTY + 5. LRU + 6. ACTIVE + 7. SLAB + 8. WRITEBACK + 9. RECLAIM + 10. BUDDY + 11. MMAP + 12. ANON + 13. SWAPCACHE + 14. SWAPBACKED + 15. COMPOUND_HEAD + 16. COMPOUND_TAIL + 17. HUGE + 18. UNEVICTABLE + 19. HWPOISON + 20. NOPAGE + 21. KSM + 22. THP + 23. BALLOON + 24. ZERO_PAGE + 25. IDLE + + * ``/proc/kpagecgroup``. This file contains a 64-bit inode number of the + memory cgroup each page is charged to, indexed by PFN. Only available when + CONFIG_MEMCG is set. + +Short descriptions to the page flags: +===================================== + +0 - LOCKED + page is being locked for exclusive access, eg. by undergoing read/write IO +7 - SLAB + page is managed by the SLAB/SLOB/SLUB/SLQB kernel memory allocator + When compound page is used, SLUB/SLQB will only set this flag on the head + page; SLOB will not flag it at all. +10 - BUDDY + a free memory block managed by the buddy system allocator + The buddy system organizes free memory in blocks of various orders. + An order N block has 2^N physically contiguous pages, with the BUDDY flag + set for and _only_ for the first page. +15 - COMPOUND_HEAD + A compound page with order N consists of 2^N physically contiguous pages. + A compound page with order 2 takes the form of "HTTT", where H donates its + head page and T donates its tail page(s). The major consumers of compound + pages are hugeTLB pages (Documentation/vm/hugetlbpage.rst), the SLUB etc. + memory allocators and various device drivers. However in this interface, + only huge/giga pages are made visible to end users. +16 - COMPOUND_TAIL + A compound page tail (see description above). +17 - HUGE + this is an integral part of a HugeTLB page +19 - HWPOISON + hardware detected memory corruption on this page: don't touch the data! +20 - NOPAGE + no page frame exists at the requested address +21 - KSM + identical memory pages dynamically shared between one or more processes +22 - THP + contiguous pages which construct transparent hugepages +23 - BALLOON + balloon compaction page +24 - ZERO_PAGE + zero page for pfn_zero or huge_zero page +25 - IDLE + page has not been accessed since it was marked idle (see + Documentation/vm/idle_page_tracking.rst). Note that this flag may be + stale in case the page was accessed via a PTE. To make sure the flag + is up-to-date one has to read ``/sys/kernel/mm/page_idle/bitmap`` first. + +IO related page flags +--------------------- + +1 - ERROR + IO error occurred +3 - UPTODATE + page has up-to-date data + ie. for file backed page: (in-memory data revision >= on-disk one) +4 - DIRTY + page has been written to, hence contains new data + ie. for file backed page: (in-memory data revision > on-disk one) +8 - WRITEBACK + page is being synced to disk + +LRU related page flags +---------------------- + +5 - LRU + page is in one of the LRU lists +6 - ACTIVE + page is in the active LRU list +18 - UNEVICTABLE + page is in the unevictable (non-)LRU list It is somehow pinned and + not a candidate for LRU page reclaims, eg. ramfs pages, + shmctl(SHM_LOCK) and mlock() memory segments +2 - REFERENCED + page has been referenced since last LRU list enqueue/requeue +9 - RECLAIM + page will be reclaimed soon after its pageout IO completed +11 - MMAP + a memory mapped page +12 - ANON + a memory mapped page that is not part of a file +13 - SWAPCACHE + page is mapped to swap space, ie. has an associated swap entry +14 - SWAPBACKED + page is backed by swap/RAM + +The page-types tool in the tools/vm directory can be used to query the +above flags. + +Using pagemap to do something useful +==================================== + +The general procedure for using pagemap to find out about a process' memory +usage goes like this: + + 1. Read ``/proc/pid/maps`` to determine which parts of the memory space are + mapped to what. + 2. Select the maps you are interested in -- all of them, or a particular + library, or the stack or the heap, etc. + 3. Open ``/proc/pid/pagemap`` and seek to the pages you would like to examine. + 4. Read a u64 for each page from pagemap. + 5. Open ``/proc/kpagecount`` and/or ``/proc/kpageflags``. For each PFN you + just read, seek to that entry in the file, and read the data you want. + +For example, to find the "unique set size" (USS), which is the amount of +memory that a process is using that is not shared with any other process, +you can go through every map in the process, find the PFNs, look those up +in kpagecount, and tally up the number of pages that are only referenced +once. + +Other notes +=========== + +Reading from any of the files will return -EINVAL if you are not starting +the read on an 8-byte boundary (e.g., if you sought an odd number of bytes +into the file), or if the size of the read is not a multiple of 8 bytes. + +Before Linux 3.11 pagemap bits 55-60 were used for "page-shift" (which is +always 12 at most architectures). Since Linux 3.11 their meaning changes +after first clear of soft-dirty bits. Since Linux 4.2 they are used for +flags unconditionally. diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt deleted file mode 100644 index bd6d71740c88..000000000000 --- a/Documentation/vm/pagemap.txt +++ /dev/null @@ -1,197 +0,0 @@ -.. _pagemap: - -====================================== -pagemap from the Userspace Perspective -====================================== - -pagemap is a new (as of 2.6.25) set of interfaces in the kernel that allow -userspace programs to examine the page tables and related information by -reading files in ``/proc``. - -There are four components to pagemap: - - * ``/proc/pid/pagemap``. This file lets a userspace process find out which - physical frame each virtual page is mapped to. It contains one 64-bit - value for each virtual page, containing the following data (from - fs/proc/task_mmu.c, above pagemap_read): - - * Bits 0-54 page frame number (PFN) if present - * Bits 0-4 swap type if swapped - * Bits 5-54 swap offset if swapped - * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) - * Bit 56 page exclusively mapped (since 4.2) - * Bits 57-60 zero - * Bit 61 page is file-page or shared-anon (since 3.5) - * Bit 62 page swapped - * Bit 63 page present - - Since Linux 4.0 only users with the CAP_SYS_ADMIN capability can get PFNs. - In 4.0 and 4.1 opens by unprivileged fail with -EPERM. Starting from - 4.2 the PFN field is zeroed if the user does not have CAP_SYS_ADMIN. - Reason: information about PFNs helps in exploiting Rowhammer vulnerability. - - If the page is not present but in swap, then the PFN contains an - encoding of the swap file number and the page's offset into the - swap. Unmapped pages return a null PFN. This allows determining - precisely which pages are mapped (or in swap) and comparing mapped - pages between processes. - - Efficient users of this interface will use /proc/pid/maps to - determine which areas of memory are actually mapped and llseek to - skip over unmapped regions. - - * ``/proc/kpagecount``. This file contains a 64-bit count of the number of - times each page is mapped, indexed by PFN. - - * ``/proc/kpageflags``. This file contains a 64-bit set of flags for each - page, indexed by PFN. - - The flags are (from ``fs/proc/page.c``, above kpageflags_read): - - 0. LOCKED - 1. ERROR - 2. REFERENCED - 3. UPTODATE - 4. DIRTY - 5. LRU - 6. ACTIVE - 7. SLAB - 8. WRITEBACK - 9. RECLAIM - 10. BUDDY - 11. MMAP - 12. ANON - 13. SWAPCACHE - 14. SWAPBACKED - 15. COMPOUND_HEAD - 16. COMPOUND_TAIL - 17. HUGE - 18. UNEVICTABLE - 19. HWPOISON - 20. NOPAGE - 21. KSM - 22. THP - 23. BALLOON - 24. ZERO_PAGE - 25. IDLE - - * ``/proc/kpagecgroup``. This file contains a 64-bit inode number of the - memory cgroup each page is charged to, indexed by PFN. Only available when - CONFIG_MEMCG is set. - -Short descriptions to the page flags: -===================================== - -0 - LOCKED - page is being locked for exclusive access, eg. by undergoing read/write IO -7 - SLAB - page is managed by the SLAB/SLOB/SLUB/SLQB kernel memory allocator - When compound page is used, SLUB/SLQB will only set this flag on the head - page; SLOB will not flag it at all. -10 - BUDDY - a free memory block managed by the buddy system allocator - The buddy system organizes free memory in blocks of various orders. - An order N block has 2^N physically contiguous pages, with the BUDDY flag - set for and _only_ for the first page. -15 - COMPOUND_HEAD - A compound page with order N consists of 2^N physically contiguous pages. - A compound page with order 2 takes the form of "HTTT", where H donates its - head page and T donates its tail page(s). The major consumers of compound - pages are hugeTLB pages (Documentation/vm/hugetlbpage.txt), the SLUB etc. - memory allocators and various device drivers. However in this interface, - only huge/giga pages are made visible to end users. -16 - COMPOUND_TAIL - A compound page tail (see description above). -17 - HUGE - this is an integral part of a HugeTLB page -19 - HWPOISON - hardware detected memory corruption on this page: don't touch the data! -20 - NOPAGE - no page frame exists at the requested address -21 - KSM - identical memory pages dynamically shared between one or more processes -22 - THP - contiguous pages which construct transparent hugepages -23 - BALLOON - balloon compaction page -24 - ZERO_PAGE - zero page for pfn_zero or huge_zero page -25 - IDLE - page has not been accessed since it was marked idle (see - Documentation/vm/idle_page_tracking.txt). Note that this flag may be - stale in case the page was accessed via a PTE. To make sure the flag - is up-to-date one has to read ``/sys/kernel/mm/page_idle/bitmap`` first. - -IO related page flags ---------------------- - -1 - ERROR - IO error occurred -3 - UPTODATE - page has up-to-date data - ie. for file backed page: (in-memory data revision >= on-disk one) -4 - DIRTY - page has been written to, hence contains new data - ie. for file backed page: (in-memory data revision > on-disk one) -8 - WRITEBACK - page is being synced to disk - -LRU related page flags ----------------------- - -5 - LRU - page is in one of the LRU lists -6 - ACTIVE - page is in the active LRU list -18 - UNEVICTABLE - page is in the unevictable (non-)LRU list It is somehow pinned and - not a candidate for LRU page reclaims, eg. ramfs pages, - shmctl(SHM_LOCK) and mlock() memory segments -2 - REFERENCED - page has been referenced since last LRU list enqueue/requeue -9 - RECLAIM - page will be reclaimed soon after its pageout IO completed -11 - MMAP - a memory mapped page -12 - ANON - a memory mapped page that is not part of a file -13 - SWAPCACHE - page is mapped to swap space, ie. has an associated swap entry -14 - SWAPBACKED - page is backed by swap/RAM - -The page-types tool in the tools/vm directory can be used to query the -above flags. - -Using pagemap to do something useful -==================================== - -The general procedure for using pagemap to find out about a process' memory -usage goes like this: - - 1. Read ``/proc/pid/maps`` to determine which parts of the memory space are - mapped to what. - 2. Select the maps you are interested in -- all of them, or a particular - library, or the stack or the heap, etc. - 3. Open ``/proc/pid/pagemap`` and seek to the pages you would like to examine. - 4. Read a u64 for each page from pagemap. - 5. Open ``/proc/kpagecount`` and/or ``/proc/kpageflags``. For each PFN you - just read, seek to that entry in the file, and read the data you want. - -For example, to find the "unique set size" (USS), which is the amount of -memory that a process is using that is not shared with any other process, -you can go through every map in the process, find the PFNs, look those up -in kpagecount, and tally up the number of pages that are only referenced -once. - -Other notes -=========== - -Reading from any of the files will return -EINVAL if you are not starting -the read on an 8-byte boundary (e.g., if you sought an odd number of bytes -into the file), or if the size of the read is not a multiple of 8 bytes. - -Before Linux 3.11 pagemap bits 55-60 were used for "page-shift" (which is -always 12 at most architectures). Since Linux 3.11 their meaning changes -after first clear of soft-dirty bits. Since Linux 4.2 they are used for -flags unconditionally. diff --git a/Documentation/vm/remap_file_pages.rst b/Documentation/vm/remap_file_pages.rst new file mode 100644 index 000000000000..7bef6718e3a9 --- /dev/null +++ b/Documentation/vm/remap_file_pages.rst @@ -0,0 +1,33 @@ +.. _remap_file_pages: + +============================== +remap_file_pages() system call +============================== + +The remap_file_pages() system call is used to create a nonlinear mapping, +that is, a mapping in which the pages of the file are mapped into a +nonsequential order in memory. The advantage of using remap_file_pages() +over using repeated calls to mmap(2) is that the former approach does not +require the kernel to create additional VMA (Virtual Memory Area) data +structures. + +Supporting of nonlinear mapping requires significant amount of non-trivial +code in kernel virtual memory subsystem including hot paths. Also to get +nonlinear mapping work kernel need a way to distinguish normal page table +entries from entries with file offset (pte_file). Kernel reserves flag in +PTE for this purpose. PTE flags are scarce resource especially on some CPU +architectures. It would be nice to free up the flag for other usage. + +Fortunately, there are not many users of remap_file_pages() in the wild. +It's only known that one enterprise RDBMS implementation uses the syscall +on 32-bit systems to map files bigger than can linearly fit into 32-bit +virtual address space. This use-case is not critical anymore since 64-bit +systems are widely available. + +The syscall is deprecated and replaced it with an emulation now. The +emulation creates new VMAs instead of nonlinear mappings. It's going to +work slower for rare users of remap_file_pages() but ABI is preserved. + +One side effect of emulation (apart from performance) is that user can hit +vm.max_map_count limit more easily due to additional VMAs. See comment for +DEFAULT_MAX_MAP_COUNT for more details on the limit. diff --git a/Documentation/vm/remap_file_pages.txt b/Documentation/vm/remap_file_pages.txt deleted file mode 100644 index 7bef6718e3a9..000000000000 --- a/Documentation/vm/remap_file_pages.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. _remap_file_pages: - -============================== -remap_file_pages() system call -============================== - -The remap_file_pages() system call is used to create a nonlinear mapping, -that is, a mapping in which the pages of the file are mapped into a -nonsequential order in memory. The advantage of using remap_file_pages() -over using repeated calls to mmap(2) is that the former approach does not -require the kernel to create additional VMA (Virtual Memory Area) data -structures. - -Supporting of nonlinear mapping requires significant amount of non-trivial -code in kernel virtual memory subsystem including hot paths. Also to get -nonlinear mapping work kernel need a way to distinguish normal page table -entries from entries with file offset (pte_file). Kernel reserves flag in -PTE for this purpose. PTE flags are scarce resource especially on some CPU -architectures. It would be nice to free up the flag for other usage. - -Fortunately, there are not many users of remap_file_pages() in the wild. -It's only known that one enterprise RDBMS implementation uses the syscall -on 32-bit systems to map files bigger than can linearly fit into 32-bit -virtual address space. This use-case is not critical anymore since 64-bit -systems are widely available. - -The syscall is deprecated and replaced it with an emulation now. The -emulation creates new VMAs instead of nonlinear mappings. It's going to -work slower for rare users of remap_file_pages() but ABI is preserved. - -One side effect of emulation (apart from performance) is that user can hit -vm.max_map_count limit more easily due to additional VMAs. See comment for -DEFAULT_MAX_MAP_COUNT for more details on the limit. diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst new file mode 100644 index 000000000000..3a775fd64e2d --- /dev/null +++ b/Documentation/vm/slub.rst @@ -0,0 +1,361 @@ +.. _slub: + +========================== +Short users guide for SLUB +========================== + +The basic philosophy of SLUB is very different from SLAB. SLAB +requires rebuilding the kernel to activate debug options for all +slab caches. SLUB always includes full debugging but it is off by default. +SLUB can enable debugging only for selected slabs in order to avoid +an impact on overall system performance which may make a bug more +difficult to find. + +In order to switch debugging on one can add an option ``slub_debug`` +to the kernel command line. That will enable full debugging for +all slabs. + +Typically one would then use the ``slabinfo`` command to get statistical +data and perform operation on the slabs. By default ``slabinfo`` only lists +slabs that have data in them. See "slabinfo -h" for more options when +running the command. ``slabinfo`` can be compiled with +:: + + gcc -o slabinfo tools/vm/slabinfo.c + +Some of the modes of operation of ``slabinfo`` require that slub debugging +be enabled on the command line. F.e. no tracking information will be +available without debugging on and validation can only partially +be performed if debugging was not switched on. + +Some more sophisticated uses of slub_debug: +------------------------------------------- + +Parameters may be given to ``slub_debug``. If none is specified then full +debugging is enabled. Format: + +slub_debug= + Enable options for all slabs +slub_debug=, + Enable options only for select slabs + + +Possible debug options are:: + + F Sanity checks on (enables SLAB_DEBUG_CONSISTENCY_CHECKS + Sorry SLAB legacy issues) + Z Red zoning + P Poisoning (object and padding) + U User tracking (free and alloc) + T Trace (please only use on single slabs) + A Toggle failslab filter mark for the cache + O Switch debugging off for caches that would have + caused higher minimum slab orders + - Switch all debugging off (useful if the kernel is + configured with CONFIG_SLUB_DEBUG_ON) + +F.e. in order to boot just with sanity checks and red zoning one would specify:: + + slub_debug=FZ + +Trying to find an issue in the dentry cache? Try:: + + slub_debug=,dentry + +to only enable debugging on the dentry cache. + +Red zoning and tracking may realign the slab. We can just apply sanity checks +to the dentry cache with:: + + slub_debug=F,dentry + +Debugging options may require the minimum possible slab order to increase as +a result of storing the metadata (for example, caches with PAGE_SIZE object +sizes). This has a higher liklihood of resulting in slab allocation errors +in low memory situations or if there's high fragmentation of memory. To +switch off debugging for such caches by default, use:: + + slub_debug=O + +In case you forgot to enable debugging on the kernel command line: It is +possible to enable debugging manually when the kernel is up. Look at the +contents of:: + + /sys/kernel/slab// + +Look at the writable files. Writing 1 to them will enable the +corresponding debug option. All options can be set on a slab that does +not contain objects. If the slab already contains objects then sanity checks +and tracing may only be enabled. The other options may cause the realignment +of objects. + +Careful with tracing: It may spew out lots of information and never stop if +used on the wrong slab. + +Slab merging +============ + +If no debug options are specified then SLUB may merge similar slabs together +in order to reduce overhead and increase cache hotness of objects. +``slabinfo -a`` displays which slabs were merged together. + +Slab validation +=============== + +SLUB can validate all object if the kernel was booted with slub_debug. In +order to do so you must have the ``slabinfo`` tool. Then you can do +:: + + slabinfo -v + +which will test all objects. Output will be generated to the syslog. + +This also works in a more limited way if boot was without slab debug. +In that case ``slabinfo -v`` simply tests all reachable objects. Usually +these are in the cpu slabs and the partial slabs. Full slabs are not +tracked by SLUB in a non debug situation. + +Getting more performance +======================== + +To some degree SLUB's performance is limited by the need to take the +list_lock once in a while to deal with partial slabs. That overhead is +governed by the order of the allocation for each slab. The allocations +can be influenced by kernel parameters: + +.. slub_min_objects=x (default 4) +.. slub_min_order=x (default 0) +.. slub_max_order=x (default 3 (PAGE_ALLOC_COSTLY_ORDER)) + +``slub_min_objects`` + allows to specify how many objects must at least fit into one + slab in order for the allocation order to be acceptable. In + general slub will be able to perform this number of + allocations on a slab without consulting centralized resources + (list_lock) where contention may occur. + +``slub_min_order`` + specifies a minim order of slabs. A similar effect like + ``slub_min_objects``. + +``slub_max_order`` + specified the order at which ``slub_min_objects`` should no + longer be checked. This is useful to avoid SLUB trying to + generate super large order pages to fit ``slub_min_objects`` + of a slab cache with large object sizes into one high order + page. Setting command line parameter + ``debug_guardpage_minorder=N`` (N > 0), forces setting + ``slub_max_order`` to 0, what cause minimum possible order of + slabs allocation. + +SLUB Debug output +================= + +Here is a sample of slub debug output:: + + ==================================================================== + BUG kmalloc-8: Redzone overwritten + -------------------------------------------------------------------- + + INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc + INFO: Slab 0xc528c530 flags=0x400000c3 inuse=61 fp=0xc90f6d58 + INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58 + INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554 + + Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ + Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005 + Redzone 0xc90f6d28: 00 cc cc cc . + Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ + + [] dump_trace+0x63/0x1eb + [] show_trace_log_lvl+0x1a/0x2f + [] show_trace+0x12/0x14 + [] dump_stack+0x16/0x18 + [] object_err+0x143/0x14b + [] check_object+0x66/0x234 + [] __slab_free+0x239/0x384 + [] kfree+0xa6/0xc6 + [] get_modalias+0xb9/0xf5 + [] dmi_dev_uevent+0x27/0x3c + [] dev_uevent+0x1ad/0x1da + [] kobject_uevent_env+0x20a/0x45b + [] kobject_uevent+0xa/0xf + [] store_uevent+0x4f/0x58 + [] dev_attr_store+0x29/0x2f + [] sysfs_write_file+0x16e/0x19c + [] vfs_write+0xd1/0x15a + [] sys_write+0x3d/0x72 + [] sysenter_past_esp+0x5f/0x99 + [] 0xb7f7b410 + ======================= + + FIX kmalloc-8: Restoring Redzone 0xc90f6d28-0xc90f6d2b=0xcc + +If SLUB encounters a corrupted object (full detection requires the kernel +to be booted with slub_debug) then the following output will be dumped +into the syslog: + +1. Description of the problem encountered + + This will be a message in the system log starting with:: + + =============================================== + BUG : + ----------------------------------------------- + + INFO: - + INFO: Slab
+ INFO: Object
+ INFO: Allocated in age= cpu= pid= + INFO: Freed in age= cpu= + pid= + + (Object allocation / free information is only available if SLAB_STORE_USER is + set for the slab. slub_debug sets that option) + +2. The object contents if an object was involved. + + Various types of lines can follow the BUG SLUB line: + + Bytes b4
: + Shows a few bytes before the object where the problem was detected. + Can be useful if the corruption does not stop with the start of the + object. + + Object
: + The bytes of the object. If the object is inactive then the bytes + typically contain poison values. Any non-poison value shows a + corruption by a write after free. + + Redzone
: + The Redzone following the object. The Redzone is used to detect + writes after the object. All bytes should always have the same + value. If there is any deviation then it is due to a write after + the object boundary. + + (Redzone information is only available if SLAB_RED_ZONE is set. + slub_debug sets that option) + + Padding
: + Unused data to fill up the space in order to get the next object + properly aligned. In the debug case we make sure that there are + at least 4 bytes of padding. This allows the detection of writes + before the object. + +3. A stackdump + + The stackdump describes the location where the error was detected. The cause + of the corruption is may be more likely found by looking at the function that + allocated or freed the object. + +4. Report on how the problem was dealt with in order to ensure the continued + operation of the system. + + These are messages in the system log beginning with:: + + FIX : + + In the above sample SLUB found that the Redzone of an active object has + been overwritten. Here a string of 8 characters was written into a slab that + has the length of 8 characters. However, a 8 character string needs a + terminating 0. That zero has overwritten the first byte of the Redzone field. + After reporting the details of the issue encountered the FIX SLUB message + tells us that SLUB has restored the Redzone to its proper value and then + system operations continue. + +Emergency operations +==================== + +Minimal debugging (sanity checks alone) can be enabled by booting with:: + + slub_debug=F + +This will be generally be enough to enable the resiliency features of slub +which will keep the system running even if a bad kernel component will +keep corrupting objects. This may be important for production systems. +Performance will be impacted by the sanity checks and there will be a +continual stream of error messages to the syslog but no additional memory +will be used (unlike full debugging). + +No guarantees. The kernel component still needs to be fixed. Performance +may be optimized further by locating the slab that experiences corruption +and enabling debugging only for that cache + +I.e.:: + + slub_debug=F,dentry + +If the corruption occurs by writing after the end of the object then it +may be advisable to enable a Redzone to avoid corrupting the beginning +of other objects:: + + slub_debug=FZ,dentry + +Extended slabinfo mode and plotting +=================================== + +The ``slabinfo`` tool has a special 'extended' ('-X') mode that includes: + - Slabcache Totals + - Slabs sorted by size (up to -N slabs, default 1) + - Slabs sorted by loss (up to -N slabs, default 1) + +Additionally, in this mode ``slabinfo`` does not dynamically scale +sizes (G/M/K) and reports everything in bytes (this functionality is +also available to other slabinfo modes via '-B' option) which makes +reporting more precise and accurate. Moreover, in some sense the `-X' +mode also simplifies the analysis of slabs' behaviour, because its +output can be plotted using the ``slabinfo-gnuplot.sh`` script. So it +pushes the analysis from looking through the numbers (tons of numbers) +to something easier -- visual analysis. + +To generate plots: + +a) collect slabinfo extended records, for example:: + + while [ 1 ]; do slabinfo -X >> FOO_STATS; sleep 1; done + +b) pass stats file(-s) to ``slabinfo-gnuplot.sh`` script:: + + slabinfo-gnuplot.sh FOO_STATS [FOO_STATS2 .. FOO_STATSN] + + The ``slabinfo-gnuplot.sh`` script will pre-processes the collected records + and generates 3 png files (and 3 pre-processing cache files) per STATS + file: + - Slabcache Totals: FOO_STATS-totals.png + - Slabs sorted by size: FOO_STATS-slabs-by-size.png + - Slabs sorted by loss: FOO_STATS-slabs-by-loss.png + +Another use case, when ``slabinfo-gnuplot.sh`` can be useful, is when you +need to compare slabs' behaviour "prior to" and "after" some code +modification. To help you out there, ``slabinfo-gnuplot.sh`` script +can 'merge' the `Slabcache Totals` sections from different +measurements. To visually compare N plots: + +a) Collect as many STATS1, STATS2, .. STATSN files as you need:: + + while [ 1 ]; do slabinfo -X >> STATS; sleep 1; done + +b) Pre-process those STATS files:: + + slabinfo-gnuplot.sh STATS1 STATS2 .. STATSN + +c) Execute ``slabinfo-gnuplot.sh`` in '-t' mode, passing all of the + generated pre-processed \*-totals:: + + slabinfo-gnuplot.sh -t STATS1-totals STATS2-totals .. STATSN-totals + + This will produce a single plot (png file). + + Plots, expectedly, can be large so some fluctuations or small spikes + can go unnoticed. To deal with that, ``slabinfo-gnuplot.sh`` has two + options to 'zoom-in'/'zoom-out': + + a) ``-s %d,%d`` -- overwrites the default image width and heigh + b) ``-r %d,%d`` -- specifies a range of samples to use (for example, + in ``slabinfo -X >> FOO_STATS; sleep 1;`` case, using a ``-r + 40,60`` range will plot only samples collected between 40th and + 60th seconds). + +Christoph Lameter, May 30, 2007 +Sergey Senozhatsky, October 23, 2015 diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt deleted file mode 100644 index 3a775fd64e2d..000000000000 --- a/Documentation/vm/slub.txt +++ /dev/null @@ -1,361 +0,0 @@ -.. _slub: - -========================== -Short users guide for SLUB -========================== - -The basic philosophy of SLUB is very different from SLAB. SLAB -requires rebuilding the kernel to activate debug options for all -slab caches. SLUB always includes full debugging but it is off by default. -SLUB can enable debugging only for selected slabs in order to avoid -an impact on overall system performance which may make a bug more -difficult to find. - -In order to switch debugging on one can add an option ``slub_debug`` -to the kernel command line. That will enable full debugging for -all slabs. - -Typically one would then use the ``slabinfo`` command to get statistical -data and perform operation on the slabs. By default ``slabinfo`` only lists -slabs that have data in them. See "slabinfo -h" for more options when -running the command. ``slabinfo`` can be compiled with -:: - - gcc -o slabinfo tools/vm/slabinfo.c - -Some of the modes of operation of ``slabinfo`` require that slub debugging -be enabled on the command line. F.e. no tracking information will be -available without debugging on and validation can only partially -be performed if debugging was not switched on. - -Some more sophisticated uses of slub_debug: -------------------------------------------- - -Parameters may be given to ``slub_debug``. If none is specified then full -debugging is enabled. Format: - -slub_debug= - Enable options for all slabs -slub_debug=, - Enable options only for select slabs - - -Possible debug options are:: - - F Sanity checks on (enables SLAB_DEBUG_CONSISTENCY_CHECKS - Sorry SLAB legacy issues) - Z Red zoning - P Poisoning (object and padding) - U User tracking (free and alloc) - T Trace (please only use on single slabs) - A Toggle failslab filter mark for the cache - O Switch debugging off for caches that would have - caused higher minimum slab orders - - Switch all debugging off (useful if the kernel is - configured with CONFIG_SLUB_DEBUG_ON) - -F.e. in order to boot just with sanity checks and red zoning one would specify:: - - slub_debug=FZ - -Trying to find an issue in the dentry cache? Try:: - - slub_debug=,dentry - -to only enable debugging on the dentry cache. - -Red zoning and tracking may realign the slab. We can just apply sanity checks -to the dentry cache with:: - - slub_debug=F,dentry - -Debugging options may require the minimum possible slab order to increase as -a result of storing the metadata (for example, caches with PAGE_SIZE object -sizes). This has a higher liklihood of resulting in slab allocation errors -in low memory situations or if there's high fragmentation of memory. To -switch off debugging for such caches by default, use:: - - slub_debug=O - -In case you forgot to enable debugging on the kernel command line: It is -possible to enable debugging manually when the kernel is up. Look at the -contents of:: - - /sys/kernel/slab// - -Look at the writable files. Writing 1 to them will enable the -corresponding debug option. All options can be set on a slab that does -not contain objects. If the slab already contains objects then sanity checks -and tracing may only be enabled. The other options may cause the realignment -of objects. - -Careful with tracing: It may spew out lots of information and never stop if -used on the wrong slab. - -Slab merging -============ - -If no debug options are specified then SLUB may merge similar slabs together -in order to reduce overhead and increase cache hotness of objects. -``slabinfo -a`` displays which slabs were merged together. - -Slab validation -=============== - -SLUB can validate all object if the kernel was booted with slub_debug. In -order to do so you must have the ``slabinfo`` tool. Then you can do -:: - - slabinfo -v - -which will test all objects. Output will be generated to the syslog. - -This also works in a more limited way if boot was without slab debug. -In that case ``slabinfo -v`` simply tests all reachable objects. Usually -these are in the cpu slabs and the partial slabs. Full slabs are not -tracked by SLUB in a non debug situation. - -Getting more performance -======================== - -To some degree SLUB's performance is limited by the need to take the -list_lock once in a while to deal with partial slabs. That overhead is -governed by the order of the allocation for each slab. The allocations -can be influenced by kernel parameters: - -.. slub_min_objects=x (default 4) -.. slub_min_order=x (default 0) -.. slub_max_order=x (default 3 (PAGE_ALLOC_COSTLY_ORDER)) - -``slub_min_objects`` - allows to specify how many objects must at least fit into one - slab in order for the allocation order to be acceptable. In - general slub will be able to perform this number of - allocations on a slab without consulting centralized resources - (list_lock) where contention may occur. - -``slub_min_order`` - specifies a minim order of slabs. A similar effect like - ``slub_min_objects``. - -``slub_max_order`` - specified the order at which ``slub_min_objects`` should no - longer be checked. This is useful to avoid SLUB trying to - generate super large order pages to fit ``slub_min_objects`` - of a slab cache with large object sizes into one high order - page. Setting command line parameter - ``debug_guardpage_minorder=N`` (N > 0), forces setting - ``slub_max_order`` to 0, what cause minimum possible order of - slabs allocation. - -SLUB Debug output -================= - -Here is a sample of slub debug output:: - - ==================================================================== - BUG kmalloc-8: Redzone overwritten - -------------------------------------------------------------------- - - INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc - INFO: Slab 0xc528c530 flags=0x400000c3 inuse=61 fp=0xc90f6d58 - INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58 - INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554 - - Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ - Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005 - Redzone 0xc90f6d28: 00 cc cc cc . - Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ - - [] dump_trace+0x63/0x1eb - [] show_trace_log_lvl+0x1a/0x2f - [] show_trace+0x12/0x14 - [] dump_stack+0x16/0x18 - [] object_err+0x143/0x14b - [] check_object+0x66/0x234 - [] __slab_free+0x239/0x384 - [] kfree+0xa6/0xc6 - [] get_modalias+0xb9/0xf5 - [] dmi_dev_uevent+0x27/0x3c - [] dev_uevent+0x1ad/0x1da - [] kobject_uevent_env+0x20a/0x45b - [] kobject_uevent+0xa/0xf - [] store_uevent+0x4f/0x58 - [] dev_attr_store+0x29/0x2f - [] sysfs_write_file+0x16e/0x19c - [] vfs_write+0xd1/0x15a - [] sys_write+0x3d/0x72 - [] sysenter_past_esp+0x5f/0x99 - [] 0xb7f7b410 - ======================= - - FIX kmalloc-8: Restoring Redzone 0xc90f6d28-0xc90f6d2b=0xcc - -If SLUB encounters a corrupted object (full detection requires the kernel -to be booted with slub_debug) then the following output will be dumped -into the syslog: - -1. Description of the problem encountered - - This will be a message in the system log starting with:: - - =============================================== - BUG : - ----------------------------------------------- - - INFO: - - INFO: Slab
- INFO: Object
- INFO: Allocated in age= cpu= pid= - INFO: Freed in age= cpu= - pid= - - (Object allocation / free information is only available if SLAB_STORE_USER is - set for the slab. slub_debug sets that option) - -2. The object contents if an object was involved. - - Various types of lines can follow the BUG SLUB line: - - Bytes b4
: - Shows a few bytes before the object where the problem was detected. - Can be useful if the corruption does not stop with the start of the - object. - - Object
: - The bytes of the object. If the object is inactive then the bytes - typically contain poison values. Any non-poison value shows a - corruption by a write after free. - - Redzone
: - The Redzone following the object. The Redzone is used to detect - writes after the object. All bytes should always have the same - value. If there is any deviation then it is due to a write after - the object boundary. - - (Redzone information is only available if SLAB_RED_ZONE is set. - slub_debug sets that option) - - Padding
: - Unused data to fill up the space in order to get the next object - properly aligned. In the debug case we make sure that there are - at least 4 bytes of padding. This allows the detection of writes - before the object. - -3. A stackdump - - The stackdump describes the location where the error was detected. The cause - of the corruption is may be more likely found by looking at the function that - allocated or freed the object. - -4. Report on how the problem was dealt with in order to ensure the continued - operation of the system. - - These are messages in the system log beginning with:: - - FIX : - - In the above sample SLUB found that the Redzone of an active object has - been overwritten. Here a string of 8 characters was written into a slab that - has the length of 8 characters. However, a 8 character string needs a - terminating 0. That zero has overwritten the first byte of the Redzone field. - After reporting the details of the issue encountered the FIX SLUB message - tells us that SLUB has restored the Redzone to its proper value and then - system operations continue. - -Emergency operations -==================== - -Minimal debugging (sanity checks alone) can be enabled by booting with:: - - slub_debug=F - -This will be generally be enough to enable the resiliency features of slub -which will keep the system running even if a bad kernel component will -keep corrupting objects. This may be important for production systems. -Performance will be impacted by the sanity checks and there will be a -continual stream of error messages to the syslog but no additional memory -will be used (unlike full debugging). - -No guarantees. The kernel component still needs to be fixed. Performance -may be optimized further by locating the slab that experiences corruption -and enabling debugging only for that cache - -I.e.:: - - slub_debug=F,dentry - -If the corruption occurs by writing after the end of the object then it -may be advisable to enable a Redzone to avoid corrupting the beginning -of other objects:: - - slub_debug=FZ,dentry - -Extended slabinfo mode and plotting -=================================== - -The ``slabinfo`` tool has a special 'extended' ('-X') mode that includes: - - Slabcache Totals - - Slabs sorted by size (up to -N slabs, default 1) - - Slabs sorted by loss (up to -N slabs, default 1) - -Additionally, in this mode ``slabinfo`` does not dynamically scale -sizes (G/M/K) and reports everything in bytes (this functionality is -also available to other slabinfo modes via '-B' option) which makes -reporting more precise and accurate. Moreover, in some sense the `-X' -mode also simplifies the analysis of slabs' behaviour, because its -output can be plotted using the ``slabinfo-gnuplot.sh`` script. So it -pushes the analysis from looking through the numbers (tons of numbers) -to something easier -- visual analysis. - -To generate plots: - -a) collect slabinfo extended records, for example:: - - while [ 1 ]; do slabinfo -X >> FOO_STATS; sleep 1; done - -b) pass stats file(-s) to ``slabinfo-gnuplot.sh`` script:: - - slabinfo-gnuplot.sh FOO_STATS [FOO_STATS2 .. FOO_STATSN] - - The ``slabinfo-gnuplot.sh`` script will pre-processes the collected records - and generates 3 png files (and 3 pre-processing cache files) per STATS - file: - - Slabcache Totals: FOO_STATS-totals.png - - Slabs sorted by size: FOO_STATS-slabs-by-size.png - - Slabs sorted by loss: FOO_STATS-slabs-by-loss.png - -Another use case, when ``slabinfo-gnuplot.sh`` can be useful, is when you -need to compare slabs' behaviour "prior to" and "after" some code -modification. To help you out there, ``slabinfo-gnuplot.sh`` script -can 'merge' the `Slabcache Totals` sections from different -measurements. To visually compare N plots: - -a) Collect as many STATS1, STATS2, .. STATSN files as you need:: - - while [ 1 ]; do slabinfo -X >> STATS; sleep 1; done - -b) Pre-process those STATS files:: - - slabinfo-gnuplot.sh STATS1 STATS2 .. STATSN - -c) Execute ``slabinfo-gnuplot.sh`` in '-t' mode, passing all of the - generated pre-processed \*-totals:: - - slabinfo-gnuplot.sh -t STATS1-totals STATS2-totals .. STATSN-totals - - This will produce a single plot (png file). - - Plots, expectedly, can be large so some fluctuations or small spikes - can go unnoticed. To deal with that, ``slabinfo-gnuplot.sh`` has two - options to 'zoom-in'/'zoom-out': - - a) ``-s %d,%d`` -- overwrites the default image width and heigh - b) ``-r %d,%d`` -- specifies a range of samples to use (for example, - in ``slabinfo -X >> FOO_STATS; sleep 1;`` case, using a ``-r - 40,60`` range will plot only samples collected between 40th and - 60th seconds). - -Christoph Lameter, May 30, 2007 -Sergey Senozhatsky, October 23, 2015 diff --git a/Documentation/vm/soft-dirty.rst b/Documentation/vm/soft-dirty.rst new file mode 100644 index 000000000000..cb0cfd6672fa --- /dev/null +++ b/Documentation/vm/soft-dirty.rst @@ -0,0 +1,47 @@ +.. _soft_dirty: + +=============== +Soft-Dirty PTEs +=============== + +The soft-dirty is a bit on a PTE which helps to track which pages a task +writes to. In order to do this tracking one should + + 1. Clear soft-dirty bits from the task's PTEs. + + This is done by writing "4" into the ``/proc/PID/clear_refs`` file of the + task in question. + + 2. Wait some time. + + 3. Read soft-dirty bits from the PTEs. + + This is done by reading from the ``/proc/PID/pagemap``. The bit 55 of the + 64-bit qword is the soft-dirty one. If set, the respective PTE was + written to since step 1. + + +Internally, to do this tracking, the writable bit is cleared from PTEs +when the soft-dirty bit is cleared. So, after this, when the task tries to +modify a page at some virtual address the #PF occurs and the kernel sets +the soft-dirty bit on the respective PTE. + +Note, that although all the task's address space is marked as r/o after the +soft-dirty bits clear, the #PF-s that occur after that are processed fast. +This is so, since the pages are still mapped to physical memory, and thus all +the kernel does is finds this fact out and puts both writable and soft-dirty +bits on the PTE. + +While in most cases tracking memory changes by #PF-s is more than enough +there is still a scenario when we can lose soft dirty bits -- a task +unmaps a previously mapped memory region and then maps a new one at exactly +the same place. When unmap is called, the kernel internally clears PTE values +including soft dirty bits. To notify user space application about such +memory region renewal the kernel always marks new memory regions (and +expanded regions) as soft dirty. + +This feature is actively used by the checkpoint-restore project. You +can find more details about it on http://criu.org + + +-- Pavel Emelyanov, Apr 9, 2013 diff --git a/Documentation/vm/soft-dirty.txt b/Documentation/vm/soft-dirty.txt deleted file mode 100644 index cb0cfd6672fa..000000000000 --- a/Documentation/vm/soft-dirty.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. _soft_dirty: - -=============== -Soft-Dirty PTEs -=============== - -The soft-dirty is a bit on a PTE which helps to track which pages a task -writes to. In order to do this tracking one should - - 1. Clear soft-dirty bits from the task's PTEs. - - This is done by writing "4" into the ``/proc/PID/clear_refs`` file of the - task in question. - - 2. Wait some time. - - 3. Read soft-dirty bits from the PTEs. - - This is done by reading from the ``/proc/PID/pagemap``. The bit 55 of the - 64-bit qword is the soft-dirty one. If set, the respective PTE was - written to since step 1. - - -Internally, to do this tracking, the writable bit is cleared from PTEs -when the soft-dirty bit is cleared. So, after this, when the task tries to -modify a page at some virtual address the #PF occurs and the kernel sets -the soft-dirty bit on the respective PTE. - -Note, that although all the task's address space is marked as r/o after the -soft-dirty bits clear, the #PF-s that occur after that are processed fast. -This is so, since the pages are still mapped to physical memory, and thus all -the kernel does is finds this fact out and puts both writable and soft-dirty -bits on the PTE. - -While in most cases tracking memory changes by #PF-s is more than enough -there is still a scenario when we can lose soft dirty bits -- a task -unmaps a previously mapped memory region and then maps a new one at exactly -the same place. When unmap is called, the kernel internally clears PTE values -including soft dirty bits. To notify user space application about such -memory region renewal the kernel always marks new memory regions (and -expanded regions) as soft dirty. - -This feature is actively used by the checkpoint-restore project. You -can find more details about it on http://criu.org - - --- Pavel Emelyanov, Apr 9, 2013 diff --git a/Documentation/vm/split_page_table_lock b/Documentation/vm/split_page_table_lock deleted file mode 100644 index 889b00be469f..000000000000 --- a/Documentation/vm/split_page_table_lock +++ /dev/null @@ -1,100 +0,0 @@ -.. _split_page_table_lock: - -===================== -Split page table lock -===================== - -Originally, mm->page_table_lock spinlock protected all page tables of the -mm_struct. But this approach leads to poor page fault scalability of -multi-threaded applications due high contention on the lock. To improve -scalability, split page table lock was introduced. - -With split page table lock we have separate per-table lock to serialize -access to the table. At the moment we use split lock for PTE and PMD -tables. Access to higher level tables protected by mm->page_table_lock. - -There are helpers to lock/unlock a table and other accessor functions: - - - pte_offset_map_lock() - maps pte and takes PTE table lock, returns pointer to the taken - lock; - - pte_unmap_unlock() - unlocks and unmaps PTE table; - - pte_alloc_map_lock() - allocates PTE table if needed and take the lock, returns pointer - to taken lock or NULL if allocation failed; - - pte_lockptr() - returns pointer to PTE table lock; - - pmd_lock() - takes PMD table lock, returns pointer to taken lock; - - pmd_lockptr() - returns pointer to PMD table lock; - -Split page table lock for PTE tables is enabled compile-time if -CONFIG_SPLIT_PTLOCK_CPUS (usually 4) is less or equal to NR_CPUS. -If split lock is disabled, all tables guaded by mm->page_table_lock. - -Split page table lock for PMD tables is enabled, if it's enabled for PTE -tables and the architecture supports it (see below). - -Hugetlb and split page table lock -================================= - -Hugetlb can support several page sizes. We use split lock only for PMD -level, but not for PUD. - -Hugetlb-specific helpers: - - - huge_pte_lock() - takes pmd split lock for PMD_SIZE page, mm->page_table_lock - otherwise; - - huge_pte_lockptr() - returns pointer to table lock; - -Support of split page table lock by an architecture -=================================================== - -There's no need in special enabling of PTE split page table lock: -everything required is done by pgtable_page_ctor() and pgtable_page_dtor(), -which must be called on PTE table allocation / freeing. - -Make sure the architecture doesn't use slab allocator for page table -allocation: slab uses page->slab_cache for its pages. -This field shares storage with page->ptl. - -PMD split lock only makes sense if you have more than two page table -levels. - -PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table -allocation and pgtable_pmd_page_dtor() on freeing. - -Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and -pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing -paths: i.e X86_PAE preallocate few PMDs on pgd_alloc(). - -With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. - -NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must -be handled properly. - -page->ptl -========= - -page->ptl is used to access split page table lock, where 'page' is struct -page of page containing the table. It shares storage with page->private -(and few other fields in union). - -To avoid increasing size of struct page and have best performance, we use a -trick: - - - if spinlock_t fits into long, we use page->ptr as spinlock, so we - can avoid indirect access and save a cache line. - - if size of spinlock_t is bigger then size of long, we use page->ptl as - pointer to spinlock_t and allocate it dynamically. This allows to use - split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs - one more cache line for indirect access; - -The spinlock_t allocated in pgtable_page_ctor() for PTE table and in -pgtable_pmd_page_ctor() for PMD table. - -Please, never access page->ptl directly -- use appropriate helper. diff --git a/Documentation/vm/split_page_table_lock.rst b/Documentation/vm/split_page_table_lock.rst new file mode 100644 index 000000000000..889b00be469f --- /dev/null +++ b/Documentation/vm/split_page_table_lock.rst @@ -0,0 +1,100 @@ +.. _split_page_table_lock: + +===================== +Split page table lock +===================== + +Originally, mm->page_table_lock spinlock protected all page tables of the +mm_struct. But this approach leads to poor page fault scalability of +multi-threaded applications due high contention on the lock. To improve +scalability, split page table lock was introduced. + +With split page table lock we have separate per-table lock to serialize +access to the table. At the moment we use split lock for PTE and PMD +tables. Access to higher level tables protected by mm->page_table_lock. + +There are helpers to lock/unlock a table and other accessor functions: + + - pte_offset_map_lock() + maps pte and takes PTE table lock, returns pointer to the taken + lock; + - pte_unmap_unlock() + unlocks and unmaps PTE table; + - pte_alloc_map_lock() + allocates PTE table if needed and take the lock, returns pointer + to taken lock or NULL if allocation failed; + - pte_lockptr() + returns pointer to PTE table lock; + - pmd_lock() + takes PMD table lock, returns pointer to taken lock; + - pmd_lockptr() + returns pointer to PMD table lock; + +Split page table lock for PTE tables is enabled compile-time if +CONFIG_SPLIT_PTLOCK_CPUS (usually 4) is less or equal to NR_CPUS. +If split lock is disabled, all tables guaded by mm->page_table_lock. + +Split page table lock for PMD tables is enabled, if it's enabled for PTE +tables and the architecture supports it (see below). + +Hugetlb and split page table lock +================================= + +Hugetlb can support several page sizes. We use split lock only for PMD +level, but not for PUD. + +Hugetlb-specific helpers: + + - huge_pte_lock() + takes pmd split lock for PMD_SIZE page, mm->page_table_lock + otherwise; + - huge_pte_lockptr() + returns pointer to table lock; + +Support of split page table lock by an architecture +=================================================== + +There's no need in special enabling of PTE split page table lock: +everything required is done by pgtable_page_ctor() and pgtable_page_dtor(), +which must be called on PTE table allocation / freeing. + +Make sure the architecture doesn't use slab allocator for page table +allocation: slab uses page->slab_cache for its pages. +This field shares storage with page->ptl. + +PMD split lock only makes sense if you have more than two page table +levels. + +PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table +allocation and pgtable_pmd_page_dtor() on freeing. + +Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and +pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing +paths: i.e X86_PAE preallocate few PMDs on pgd_alloc(). + +With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK. + +NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must +be handled properly. + +page->ptl +========= + +page->ptl is used to access split page table lock, where 'page' is struct +page of page containing the table. It shares storage with page->private +(and few other fields in union). + +To avoid increasing size of struct page and have best performance, we use a +trick: + + - if spinlock_t fits into long, we use page->ptr as spinlock, so we + can avoid indirect access and save a cache line. + - if size of spinlock_t is bigger then size of long, we use page->ptl as + pointer to spinlock_t and allocate it dynamically. This allows to use + split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs + one more cache line for indirect access; + +The spinlock_t allocated in pgtable_page_ctor() for PTE table and in +pgtable_pmd_page_ctor() for PMD table. + +Please, never access page->ptl directly -- use appropriate helper. diff --git a/Documentation/vm/swap_numa.rst b/Documentation/vm/swap_numa.rst new file mode 100644 index 000000000000..e0466f2db8fa --- /dev/null +++ b/Documentation/vm/swap_numa.rst @@ -0,0 +1,80 @@ +.. _swap_numa: + +=========================================== +Automatically bind swap device to numa node +=========================================== + +If the system has more than one swap device and swap device has the node +information, we can make use of this information to decide which swap +device to use in get_swap_pages() to get better performance. + + +How to use this feature +======================= + +Swap device has priority and that decides the order of it to be used. To make +use of automatically binding, there is no need to manipulate priority settings +for swap devices. e.g. on a 2 node machine, assume 2 swap devices swapA and +swapB, with swapA attached to node 0 and swapB attached to node 1, are going +to be swapped on. Simply swapping them on by doing:: + + # swapon /dev/swapA + # swapon /dev/swapB + +Then node 0 will use the two swap devices in the order of swapA then swapB and +node 1 will use the two swap devices in the order of swapB then swapA. Note +that the order of them being swapped on doesn't matter. + +A more complex example on a 4 node machine. Assume 6 swap devices are going to +be swapped on: swapA and swapB are attached to node 0, swapC is attached to +node 1, swapD and swapE are attached to node 2 and swapF is attached to node3. +The way to swap them on is the same as above:: + + # swapon /dev/swapA + # swapon /dev/swapB + # swapon /dev/swapC + # swapon /dev/swapD + # swapon /dev/swapE + # swapon /dev/swapF + +Then node 0 will use them in the order of:: + + swapA/swapB -> swapC -> swapD -> swapE -> swapF + +swapA and swapB will be used in a round robin mode before any other swap device. + +node 1 will use them in the order of:: + + swapC -> swapA -> swapB -> swapD -> swapE -> swapF + +node 2 will use them in the order of:: + + swapD/swapE -> swapA -> swapB -> swapC -> swapF + +Similaly, swapD and swapE will be used in a round robin mode before any +other swap devices. + +node 3 will use them in the order of:: + + swapF -> swapA -> swapB -> swapC -> swapD -> swapE + + +Implementation details +====================== + +The current code uses a priority based list, swap_avail_list, to decide +which swap device to use and if multiple swap devices share the same +priority, they are used round robin. This change here replaces the single +global swap_avail_list with a per-numa-node list, i.e. for each numa node, +it sees its own priority based list of available swap devices. Swap +device's priority can be promoted on its matching node's swap_avail_list. + +The current swap device's priority is set as: user can set a >=0 value, +or the system will pick one starting from -1 then downwards. The priority +value in the swap_avail_list is the negated value of the swap device's +due to plist being sorted from low to high. The new policy doesn't change +the semantics for priority >=0 cases, the previous starting from -1 then +downwards now becomes starting from -2 then downwards and -1 is reserved +as the promoted value. So if multiple swap devices are attached to the same +node, they will all be promoted to priority -1 on that node's plist and will +be used round robin before any other swap devices. diff --git a/Documentation/vm/swap_numa.txt b/Documentation/vm/swap_numa.txt deleted file mode 100644 index e0466f2db8fa..000000000000 --- a/Documentation/vm/swap_numa.txt +++ /dev/null @@ -1,80 +0,0 @@ -.. _swap_numa: - -=========================================== -Automatically bind swap device to numa node -=========================================== - -If the system has more than one swap device and swap device has the node -information, we can make use of this information to decide which swap -device to use in get_swap_pages() to get better performance. - - -How to use this feature -======================= - -Swap device has priority and that decides the order of it to be used. To make -use of automatically binding, there is no need to manipulate priority settings -for swap devices. e.g. on a 2 node machine, assume 2 swap devices swapA and -swapB, with swapA attached to node 0 and swapB attached to node 1, are going -to be swapped on. Simply swapping them on by doing:: - - # swapon /dev/swapA - # swapon /dev/swapB - -Then node 0 will use the two swap devices in the order of swapA then swapB and -node 1 will use the two swap devices in the order of swapB then swapA. Note -that the order of them being swapped on doesn't matter. - -A more complex example on a 4 node machine. Assume 6 swap devices are going to -be swapped on: swapA and swapB are attached to node 0, swapC is attached to -node 1, swapD and swapE are attached to node 2 and swapF is attached to node3. -The way to swap them on is the same as above:: - - # swapon /dev/swapA - # swapon /dev/swapB - # swapon /dev/swapC - # swapon /dev/swapD - # swapon /dev/swapE - # swapon /dev/swapF - -Then node 0 will use them in the order of:: - - swapA/swapB -> swapC -> swapD -> swapE -> swapF - -swapA and swapB will be used in a round robin mode before any other swap device. - -node 1 will use them in the order of:: - - swapC -> swapA -> swapB -> swapD -> swapE -> swapF - -node 2 will use them in the order of:: - - swapD/swapE -> swapA -> swapB -> swapC -> swapF - -Similaly, swapD and swapE will be used in a round robin mode before any -other swap devices. - -node 3 will use them in the order of:: - - swapF -> swapA -> swapB -> swapC -> swapD -> swapE - - -Implementation details -====================== - -The current code uses a priority based list, swap_avail_list, to decide -which swap device to use and if multiple swap devices share the same -priority, they are used round robin. This change here replaces the single -global swap_avail_list with a per-numa-node list, i.e. for each numa node, -it sees its own priority based list of available swap devices. Swap -device's priority can be promoted on its matching node's swap_avail_list. - -The current swap device's priority is set as: user can set a >=0 value, -or the system will pick one starting from -1 then downwards. The priority -value in the swap_avail_list is the negated value of the swap device's -due to plist being sorted from low to high. The new policy doesn't change -the semantics for priority >=0 cases, the previous starting from -1 then -downwards now becomes starting from -2 then downwards and -1 is reserved -as the promoted value. So if multiple swap devices are attached to the same -node, they will all be promoted to priority -1 on that node's plist and will -be used round robin before any other swap devices. diff --git a/Documentation/vm/transhuge.rst b/Documentation/vm/transhuge.rst new file mode 100644 index 000000000000..569d182cc973 --- /dev/null +++ b/Documentation/vm/transhuge.rst @@ -0,0 +1,573 @@ +.. _transhuge: + +============================ +Transparent Hugepage Support +============================ + +Objective +========= + +Performance critical computing applications dealing with large memory +working sets are already running on top of libhugetlbfs and in turn +hugetlbfs. Transparent Hugepage Support is an alternative means of +using huge pages for the backing of virtual memory with huge pages +that supports the automatic promotion and demotion of page sizes and +without the shortcomings of hugetlbfs. + +Currently it only works for anonymous memory mappings and tmpfs/shmem. +But in the future it can expand to other filesystems. + +The reason applications are running faster is because of two +factors. The first factor is almost completely irrelevant and it's not +of significant interest because it'll also have the downside of +requiring larger clear-page copy-page in page faults which is a +potentially negative effect. The first factor consists in taking a +single page fault for each 2M virtual region touched by userland (so +reducing the enter/exit kernel frequency by a 512 times factor). This +only matters the first time the memory is accessed for the lifetime of +a memory mapping. The second long lasting and much more important +factor will affect all subsequent accesses to the memory for the whole +runtime of the application. The second factor consist of two +components: 1) the TLB miss will run faster (especially with +virtualization using nested pagetables but almost always also on bare +metal without virtualization) and 2) a single TLB entry will be +mapping a much larger amount of virtual memory in turn reducing the +number of TLB misses. With virtualization and nested pagetables the +TLB can be mapped of larger size only if both KVM and the Linux guest +are using hugepages but a significant speedup already happens if only +one of the two is using hugepages just because of the fact the TLB +miss is going to run faster. + +Design +====== + +- "graceful fallback": mm components which don't have transparent hugepage + knowledge fall back to breaking huge pmd mapping into table of ptes and, + if necessary, split a transparent hugepage. Therefore these components + can continue working on the regular pages or regular pte mappings. + +- if a hugepage allocation fails because of memory fragmentation, + regular pages should be gracefully allocated instead and mixed in + the same vma without any failure or significant delay and without + userland noticing + +- if some task quits and more hugepages become available (either + immediately in the buddy or through the VM), guest physical memory + backed by regular pages should be relocated on hugepages + automatically (with khugepaged) + +- it doesn't require memory reservation and in turn it uses hugepages + whenever possible (the only possible reservation here is kernelcore= + to avoid unmovable pages to fragment all the memory but such a tweak + is not specific to transparent hugepage support and it's a generic + feature that applies to all dynamic high order allocations in the + kernel) + +Transparent Hugepage Support maximizes the usefulness of free memory +if compared to the reservation approach of hugetlbfs by allowing all +unused memory to be used as cache or other movable (or even unmovable +entities). It doesn't require reservation to prevent hugepage +allocation failures to be noticeable from userland. It allows paging +and all other advanced VM features to be available on the +hugepages. It requires no modifications for applications to take +advantage of it. + +Applications however can be further optimized to take advantage of +this feature, like for example they've been optimized before to avoid +a flood of mmap system calls for every malloc(4k). Optimizing userland +is by far not mandatory and khugepaged already can take care of long +lived page allocations even for hugepage unaware applications that +deals with large amounts of memory. + +In certain cases when hugepages are enabled system wide, application +may end up allocating more memory resources. An application may mmap a +large region but only touch 1 byte of it, in that case a 2M page might +be allocated instead of a 4k page for no good. This is why it's +possible to disable hugepages system-wide and to only have them inside +MADV_HUGEPAGE madvise regions. + +Embedded systems should enable hugepages only inside madvise regions +to eliminate any risk of wasting any precious byte of memory and to +only run faster. + +Applications that gets a lot of benefit from hugepages and that don't +risk to lose memory by using hugepages, should use +madvise(MADV_HUGEPAGE) on their critical mmapped regions. + +sysfs +===== + +Transparent Hugepage Support for anonymous memory can be entirely disabled +(mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE +regions (to avoid the risk of consuming more memory resources) or enabled +system wide. This can be achieved with one of:: + + echo always >/sys/kernel/mm/transparent_hugepage/enabled + echo madvise >/sys/kernel/mm/transparent_hugepage/enabled + echo never >/sys/kernel/mm/transparent_hugepage/enabled + +It's also possible to limit defrag efforts in the VM to generate +anonymous hugepages in case they're not immediately free to madvise +regions or to never try to defrag memory and simply fallback to regular +pages unless hugepages are immediately available. Clearly if we spend CPU +time to defrag memory, we would expect to gain even more by the fact we +use hugepages later instead of regular pages. This isn't always +guaranteed, but it may be more likely in case the allocation is for a +MADV_HUGEPAGE region. + +:: + + echo always >/sys/kernel/mm/transparent_hugepage/defrag + echo defer >/sys/kernel/mm/transparent_hugepage/defrag + echo defer+madvise >/sys/kernel/mm/transparent_hugepage/defrag + echo madvise >/sys/kernel/mm/transparent_hugepage/defrag + echo never >/sys/kernel/mm/transparent_hugepage/defrag + +always + means that an application requesting THP will stall on + allocation failure and directly reclaim pages and compact + memory in an effort to allocate a THP immediately. This may be + desirable for virtual machines that benefit heavily from THP + use and are willing to delay the VM start to utilise them. + +defer + means that an application will wake kswapd in the background + to reclaim pages and wake kcompactd to compact memory so that + THP is available in the near future. It's the responsibility + of khugepaged to then install the THP pages later. + +defer+madvise + will enter direct reclaim and compaction like ``always``, but + only for regions that have used madvise(MADV_HUGEPAGE); all + other regions will wake kswapd in the background to reclaim + pages and wake kcompactd to compact memory so that THP is + available in the near future. + +madvise + will enter direct reclaim like ``always`` but only for regions + that are have used madvise(MADV_HUGEPAGE). This is the default + behaviour. + +never + should be self-explanatory. + +By default kernel tries to use huge zero page on read page fault to +anonymous mapping. It's possible to disable huge zero page by writing 0 +or enable it back by writing 1:: + + echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page + echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page + +Some userspace (such as a test program, or an optimized memory allocation +library) may want to know the size (in bytes) of a transparent hugepage:: + + cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size + +khugepaged will be automatically started when +transparent_hugepage/enabled is set to "always" or "madvise, and it'll +be automatically shutdown if it's set to "never". + +khugepaged runs usually at low frequency so while one may not want to +invoke defrag algorithms synchronously during the page faults, it +should be worth invoking defrag at least in khugepaged. However it's +also possible to disable defrag in khugepaged by writing 0 or enable +defrag in khugepaged by writing 1:: + + echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag + echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag + +You can also control how many pages khugepaged should scan at each +pass:: + + /sys/kernel/mm/transparent_hugepage/khugepaged/pages_to_scan + +and how many milliseconds to wait in khugepaged between each pass (you +can set this to 0 to run khugepaged at 100% utilization of one core):: + + /sys/kernel/mm/transparent_hugepage/khugepaged/scan_sleep_millisecs + +and how many milliseconds to wait in khugepaged if there's an hugepage +allocation failure to throttle the next allocation attempt:: + + /sys/kernel/mm/transparent_hugepage/khugepaged/alloc_sleep_millisecs + +The khugepaged progress can be seen in the number of pages collapsed:: + + /sys/kernel/mm/transparent_hugepage/khugepaged/pages_collapsed + +for each pass:: + + /sys/kernel/mm/transparent_hugepage/khugepaged/full_scans + +``max_ptes_none`` specifies how many extra small pages (that are +not already mapped) can be allocated when collapsing a group +of small pages into one large page:: + + /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none + +A higher value leads to use additional memory for programs. +A lower value leads to gain less thp performance. Value of +max_ptes_none can waste cpu time very little, you can +ignore it. + +``max_ptes_swap`` specifies how many pages can be brought in from +swap when collapsing a group of pages into a transparent huge page:: + + /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_swap + +A higher value can cause excessive swap IO and waste +memory. A lower value can prevent THPs from being +collapsed, resulting fewer pages being collapsed into +THPs, and lower memory access performance. + +Boot parameter +============== + +You can change the sysfs boot time defaults of Transparent Hugepage +Support by passing the parameter ``transparent_hugepage=always`` or +``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` +to the kernel command line. + +Hugepages in tmpfs/shmem +======================== + +You can control hugepage allocation policy in tmpfs with mount option +``huge=``. It can have following values: + +always + Attempt to allocate huge pages every time we need a new page; + +never + Do not allocate huge pages; + +within_size + Only allocate huge page if it will be fully within i_size. + Also respect fadvise()/madvise() hints; + +advise + Only allocate huge pages if requested with fadvise()/madvise(); + +The default policy is ``never``. + +``mount -o remount,huge= /mountpoint`` works fine after mount: remounting +``huge=never`` will not attempt to break up huge pages at all, just stop more +from being allocated. + +There's also sysfs knob to control hugepage allocation policy for internal +shmem mount: /sys/kernel/mm/transparent_hugepage/shmem_enabled. The mount +is used for SysV SHM, memfds, shared anonymous mmaps (of /dev/zero or +MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem. + +In addition to policies listed above, shmem_enabled allows two further +values: + +deny + For use in emergencies, to force the huge option off from + all mounts; +force + Force the huge option on for all - very useful for testing; + +Need of application restart +=========================== + +The transparent_hugepage/enabled values and tmpfs mount option only affect +future behavior. So to make them effective you need to restart any +application that could have been using hugepages. This also applies to the +regions registered in khugepaged. + +Monitoring usage +================ + +The number of anonymous transparent huge pages currently used by the +system is available by reading the AnonHugePages field in ``/proc/meminfo``. +To identify what applications are using anonymous transparent huge pages, +it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages fields +for each mapping. + +The number of file transparent huge pages mapped to userspace is available +by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``. +To identify what applications are mapping file transparent huge pages, it +is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields +for each mapping. + +Note that reading the smaps file is expensive and reading it +frequently will incur overhead. + +There are a number of counters in ``/proc/vmstat`` that may be used to +monitor how successfully the system is providing huge pages for use. + +thp_fault_alloc + is incremented every time a huge page is successfully + allocated to handle a page fault. This applies to both the + first time a page is faulted and for COW faults. + +thp_collapse_alloc + is incremented by khugepaged when it has found + a range of pages to collapse into one huge page and has + successfully allocated a new huge page to store the data. + +thp_fault_fallback + is incremented if a page fault fails to allocate + a huge page and instead falls back to using small pages. + +thp_collapse_alloc_failed + is incremented if khugepaged found a range + of pages that should be collapsed into one huge page but failed + the allocation. + +thp_file_alloc + is incremented every time a file huge page is successfully + allocated. + +thp_file_mapped + is incremented every time a file huge page is mapped into + user address space. + +thp_split_page + is incremented every time a huge page is split into base + pages. This can happen for a variety of reasons but a common + reason is that a huge page is old and is being reclaimed. + This action implies splitting all PMD the page mapped with. + +thp_split_page_failed + is incremented if kernel fails to split huge + page. This can happen if the page was pinned by somebody. + +thp_deferred_split_page + is incremented when a huge page is put onto split + queue. This happens when a huge page is partially unmapped and + splitting it would free up some memory. Pages on split queue are + going to be split under memory pressure. + +thp_split_pmd + is incremented every time a PMD split into table of PTEs. + This can happen, for instance, when application calls mprotect() or + munmap() on part of huge page. It doesn't split huge page, only + page table entry. + +thp_zero_page_alloc + is incremented every time a huge zero page is + successfully allocated. It includes allocations which where + dropped due race with other allocation. Note, it doesn't count + every map of the huge zero page, only its allocation. + +thp_zero_page_alloc_failed + is incremented if kernel fails to allocate + huge zero page and falls back to using small pages. + +As the system ages, allocating huge pages may be expensive as the +system uses memory compaction to copy data around memory to free a +huge page for use. There are some counters in ``/proc/vmstat`` to help +monitor this overhead. + +compact_stall + is incremented every time a process stalls to run + memory compaction so that a huge page is free for use. + +compact_success + is incremented if the system compacted memory and + freed a huge page for use. + +compact_fail + is incremented if the system tries to compact memory + but failed. + +compact_pages_moved + is incremented each time a page is moved. If + this value is increasing rapidly, it implies that the system + is copying a lot of data to satisfy the huge page allocation. + It is possible that the cost of copying exceeds any savings + from reduced TLB misses. + +compact_pagemigrate_failed + is incremented when the underlying mechanism + for moving a page failed. + +compact_blocks_moved + is incremented each time memory compaction examines + a huge page aligned range of pages. + +It is possible to establish how long the stalls were using the function +tracer to record how long was spent in __alloc_pages_nodemask and +using the mm_page_alloc tracepoint to identify which allocations were +for huge pages. + +get_user_pages and follow_page +============================== + +get_user_pages and follow_page if run on a hugepage, will return the +head or tail pages as usual (exactly as they would do on +hugetlbfs). Most gup users will only care about the actual physical +address of the page and its temporary pinning to release after the I/O +is complete, so they won't ever notice the fact the page is huge. But +if any driver is going to mangle over the page structure of the tail +page (like for checking page->mapping or other bits that are relevant +for the head page and not the tail page), it should be updated to jump +to check head page instead. Taking reference on any head/tail page would +prevent page from being split by anyone. + +.. note:: + these aren't new constraints to the GUP API, and they match the + same constrains that applies to hugetlbfs too, so any driver capable + of handling GUP on hugetlbfs will also work fine on transparent + hugepage backed mappings. + +In case you can't handle compound pages if they're returned by +follow_page, the FOLL_SPLIT bit can be specified as parameter to +follow_page, so that it will split the hugepages before returning +them. Migration for example passes FOLL_SPLIT as parameter to +follow_page because it's not hugepage aware and in fact it can't work +at all on hugetlbfs (but it instead works fine on transparent +hugepages thanks to FOLL_SPLIT). migration simply can't deal with +hugepages being returned (as it's not only checking the pfn of the +page and pinning it during the copy but it pretends to migrate the +memory in regular page sizes and with regular pte/pmd mappings). + +Optimizing the applications +=========================== + +To be guaranteed that the kernel will map a 2M page immediately in any +memory region, the mmap region has to be hugepage naturally +aligned. posix_memalign() can provide that guarantee. + +Hugetlbfs +========= + +You can use hugetlbfs on a kernel that has transparent hugepage +support enabled just fine as always. No difference can be noted in +hugetlbfs other than there will be less overall fragmentation. All +usual features belonging to hugetlbfs are preserved and +unaffected. libhugetlbfs will also work fine as usual. + +Graceful fallback +================= + +Code walking pagetables but unaware about huge pmds can simply call +split_huge_pmd(vma, pmd, addr) where the pmd is the one returned by +pmd_offset. It's trivial to make the code transparent hugepage aware +by just grepping for "pmd_offset" and adding split_huge_pmd where +missing after pmd_offset returns the pmd. Thanks to the graceful +fallback design, with a one liner change, you can avoid to write +hundred if not thousand of lines of complex code to make your code +hugepage aware. + +If you're not walking pagetables but you run into a physical hugepage +but you can't handle it natively in your code, you can split it by +calling split_huge_page(page). This is what the Linux VM does before +it tries to swapout the hugepage for example. split_huge_page() can fail +if the page is pinned and you must handle this correctly. + +Example to make mremap.c transparent hugepage aware with a one liner +change:: + + diff --git a/mm/mremap.c b/mm/mremap.c + --- a/mm/mremap.c + +++ b/mm/mremap.c + @@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_stru + return NULL; + + pmd = pmd_offset(pud, addr); + + split_huge_pmd(vma, pmd, addr); + if (pmd_none_or_clear_bad(pmd)) + return NULL; + +Locking in hugepage aware code +============================== + +We want as much code as possible hugepage aware, as calling +split_huge_page() or split_huge_pmd() has a cost. + +To make pagetable walks huge pmd aware, all you need to do is to call +pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the +mmap_sem in read (or write) mode to be sure an huge pmd cannot be +created from under you by khugepaged (khugepaged collapse_huge_page +takes the mmap_sem in write mode in addition to the anon_vma lock). If +pmd_trans_huge returns false, you just fallback in the old code +paths. If instead pmd_trans_huge returns true, you have to take the +page table lock (pmd_lock()) and re-run pmd_trans_huge. Taking the +page table lock will prevent the huge pmd to be converted into a +regular pmd from under you (split_huge_pmd can run in parallel to the +pagetable walk). If the second pmd_trans_huge returns false, you +should just drop the page table lock and fallback to the old code as +before. Otherwise you can proceed to process the huge pmd and the +hugepage natively. Once finished you can drop the page table lock. + +Refcounts and transparent huge pages +==================================== + +Refcounting on THP is mostly consistent with refcounting on other compound +pages: + + - get_page()/put_page() and GUP operate in head page's ->_refcount. + + - ->_refcount in tail pages is always zero: get_page_unless_zero() never + succeed on tail pages. + + - map/unmap of the pages with PTE entry increment/decrement ->_mapcount + on relevant sub-page of the compound page. + + - map/unmap of the whole compound page accounted in compound_mapcount + (stored in first tail page). For file huge pages, we also increment + ->_mapcount of all sub-pages in order to have race-free detection of + last unmap of subpages. + +PageDoubleMap() indicates that the page is *possibly* mapped with PTEs. + +For anonymous pages PageDoubleMap() also indicates ->_mapcount in all +subpages is offset up by one. This additional reference is required to +get race-free detection of unmap of subpages when we have them mapped with +both PMDs and PTEs. + +This is optimization required to lower overhead of per-subpage mapcount +tracking. The alternative is alter ->_mapcount in all subpages on each +map/unmap of the whole compound page. + +For anonymous pages, we set PG_double_map when a PMD of the page got split +for the first time, but still have PMD mapping. The additional references +go away with last compound_mapcount. + +File pages get PG_double_map set on first map of the page with PTE and +goes away when the page gets evicted from page cache. + +split_huge_page internally has to distribute the refcounts in the head +page to the tail pages before clearing all PG_head/tail bits from the page +structures. It can be done easily for refcounts taken by page table +entries. But we don't have enough information on how to distribute any +additional pins (i.e. from get_user_pages). split_huge_page() fails any +requests to split pinned huge page: it expects page count to be equal to +sum of mapcount of all sub-pages plus one (split_huge_page caller must +have reference for head page). + +split_huge_page uses migration entries to stabilize page->_refcount and +page->_mapcount of anonymous pages. File pages just got unmapped. + +We safe against physical memory scanners too: the only legitimate way +scanner can get reference to a page is get_page_unless_zero(). + +All tail pages have zero ->_refcount until atomic_add(). This prevents the +scanner from getting a reference to the tail page up to that point. After the +atomic_add() we don't care about the ->_refcount value. We already known how +many references should be uncharged from the head page. + +For head page get_page_unless_zero() will succeed and we don't mind. It's +clear where reference should go after split: it will stay on head page. + +Note that split_huge_pmd() doesn't have any limitation on refcounting: +pmd can be split at any point and never fails. + +Partial unmap and deferred_split_huge_page() +============================================ + +Unmapping part of THP (with munmap() or other way) is not going to free +memory immediately. Instead, we detect that a subpage of THP is not in use +in page_remove_rmap() and queue the THP for splitting if memory pressure +comes. Splitting will free up unused subpages. + +Splitting the page right away is not an option due to locking context in +the place where we can detect partial unmap. It's also might be +counterproductive since in many cases partial unmap happens during exit(2) if +a THP crosses a VMA boundary. + +Function deferred_split_huge_page() is used to queue page for splitting. +The splitting itself will happen when we get memory pressure via shrinker +interface. diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt deleted file mode 100644 index 569d182cc973..000000000000 --- a/Documentation/vm/transhuge.txt +++ /dev/null @@ -1,573 +0,0 @@ -.. _transhuge: - -============================ -Transparent Hugepage Support -============================ - -Objective -========= - -Performance critical computing applications dealing with large memory -working sets are already running on top of libhugetlbfs and in turn -hugetlbfs. Transparent Hugepage Support is an alternative means of -using huge pages for the backing of virtual memory with huge pages -that supports the automatic promotion and demotion of page sizes and -without the shortcomings of hugetlbfs. - -Currently it only works for anonymous memory mappings and tmpfs/shmem. -But in the future it can expand to other filesystems. - -The reason applications are running faster is because of two -factors. The first factor is almost completely irrelevant and it's not -of significant interest because it'll also have the downside of -requiring larger clear-page copy-page in page faults which is a -potentially negative effect. The first factor consists in taking a -single page fault for each 2M virtual region touched by userland (so -reducing the enter/exit kernel frequency by a 512 times factor). This -only matters the first time the memory is accessed for the lifetime of -a memory mapping. The second long lasting and much more important -factor will affect all subsequent accesses to the memory for the whole -runtime of the application. The second factor consist of two -components: 1) the TLB miss will run faster (especially with -virtualization using nested pagetables but almost always also on bare -metal without virtualization) and 2) a single TLB entry will be -mapping a much larger amount of virtual memory in turn reducing the -number of TLB misses. With virtualization and nested pagetables the -TLB can be mapped of larger size only if both KVM and the Linux guest -are using hugepages but a significant speedup already happens if only -one of the two is using hugepages just because of the fact the TLB -miss is going to run faster. - -Design -====== - -- "graceful fallback": mm components which don't have transparent hugepage - knowledge fall back to breaking huge pmd mapping into table of ptes and, - if necessary, split a transparent hugepage. Therefore these components - can continue working on the regular pages or regular pte mappings. - -- if a hugepage allocation fails because of memory fragmentation, - regular pages should be gracefully allocated instead and mixed in - the same vma without any failure or significant delay and without - userland noticing - -- if some task quits and more hugepages become available (either - immediately in the buddy or through the VM), guest physical memory - backed by regular pages should be relocated on hugepages - automatically (with khugepaged) - -- it doesn't require memory reservation and in turn it uses hugepages - whenever possible (the only possible reservation here is kernelcore= - to avoid unmovable pages to fragment all the memory but such a tweak - is not specific to transparent hugepage support and it's a generic - feature that applies to all dynamic high order allocations in the - kernel) - -Transparent Hugepage Support maximizes the usefulness of free memory -if compared to the reservation approach of hugetlbfs by allowing all -unused memory to be used as cache or other movable (or even unmovable -entities). It doesn't require reservation to prevent hugepage -allocation failures to be noticeable from userland. It allows paging -and all other advanced VM features to be available on the -hugepages. It requires no modifications for applications to take -advantage of it. - -Applications however can be further optimized to take advantage of -this feature, like for example they've been optimized before to avoid -a flood of mmap system calls for every malloc(4k). Optimizing userland -is by far not mandatory and khugepaged already can take care of long -lived page allocations even for hugepage unaware applications that -deals with large amounts of memory. - -In certain cases when hugepages are enabled system wide, application -may end up allocating more memory resources. An application may mmap a -large region but only touch 1 byte of it, in that case a 2M page might -be allocated instead of a 4k page for no good. This is why it's -possible to disable hugepages system-wide and to only have them inside -MADV_HUGEPAGE madvise regions. - -Embedded systems should enable hugepages only inside madvise regions -to eliminate any risk of wasting any precious byte of memory and to -only run faster. - -Applications that gets a lot of benefit from hugepages and that don't -risk to lose memory by using hugepages, should use -madvise(MADV_HUGEPAGE) on their critical mmapped regions. - -sysfs -===== - -Transparent Hugepage Support for anonymous memory can be entirely disabled -(mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE -regions (to avoid the risk of consuming more memory resources) or enabled -system wide. This can be achieved with one of:: - - echo always >/sys/kernel/mm/transparent_hugepage/enabled - echo madvise >/sys/kernel/mm/transparent_hugepage/enabled - echo never >/sys/kernel/mm/transparent_hugepage/enabled - -It's also possible to limit defrag efforts in the VM to generate -anonymous hugepages in case they're not immediately free to madvise -regions or to never try to defrag memory and simply fallback to regular -pages unless hugepages are immediately available. Clearly if we spend CPU -time to defrag memory, we would expect to gain even more by the fact we -use hugepages later instead of regular pages. This isn't always -guaranteed, but it may be more likely in case the allocation is for a -MADV_HUGEPAGE region. - -:: - - echo always >/sys/kernel/mm/transparent_hugepage/defrag - echo defer >/sys/kernel/mm/transparent_hugepage/defrag - echo defer+madvise >/sys/kernel/mm/transparent_hugepage/defrag - echo madvise >/sys/kernel/mm/transparent_hugepage/defrag - echo never >/sys/kernel/mm/transparent_hugepage/defrag - -always - means that an application requesting THP will stall on - allocation failure and directly reclaim pages and compact - memory in an effort to allocate a THP immediately. This may be - desirable for virtual machines that benefit heavily from THP - use and are willing to delay the VM start to utilise them. - -defer - means that an application will wake kswapd in the background - to reclaim pages and wake kcompactd to compact memory so that - THP is available in the near future. It's the responsibility - of khugepaged to then install the THP pages later. - -defer+madvise - will enter direct reclaim and compaction like ``always``, but - only for regions that have used madvise(MADV_HUGEPAGE); all - other regions will wake kswapd in the background to reclaim - pages and wake kcompactd to compact memory so that THP is - available in the near future. - -madvise - will enter direct reclaim like ``always`` but only for regions - that are have used madvise(MADV_HUGEPAGE). This is the default - behaviour. - -never - should be self-explanatory. - -By default kernel tries to use huge zero page on read page fault to -anonymous mapping. It's possible to disable huge zero page by writing 0 -or enable it back by writing 1:: - - echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page - echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page - -Some userspace (such as a test program, or an optimized memory allocation -library) may want to know the size (in bytes) of a transparent hugepage:: - - cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size - -khugepaged will be automatically started when -transparent_hugepage/enabled is set to "always" or "madvise, and it'll -be automatically shutdown if it's set to "never". - -khugepaged runs usually at low frequency so while one may not want to -invoke defrag algorithms synchronously during the page faults, it -should be worth invoking defrag at least in khugepaged. However it's -also possible to disable defrag in khugepaged by writing 0 or enable -defrag in khugepaged by writing 1:: - - echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag - echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag - -You can also control how many pages khugepaged should scan at each -pass:: - - /sys/kernel/mm/transparent_hugepage/khugepaged/pages_to_scan - -and how many milliseconds to wait in khugepaged between each pass (you -can set this to 0 to run khugepaged at 100% utilization of one core):: - - /sys/kernel/mm/transparent_hugepage/khugepaged/scan_sleep_millisecs - -and how many milliseconds to wait in khugepaged if there's an hugepage -allocation failure to throttle the next allocation attempt:: - - /sys/kernel/mm/transparent_hugepage/khugepaged/alloc_sleep_millisecs - -The khugepaged progress can be seen in the number of pages collapsed:: - - /sys/kernel/mm/transparent_hugepage/khugepaged/pages_collapsed - -for each pass:: - - /sys/kernel/mm/transparent_hugepage/khugepaged/full_scans - -``max_ptes_none`` specifies how many extra small pages (that are -not already mapped) can be allocated when collapsing a group -of small pages into one large page:: - - /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none - -A higher value leads to use additional memory for programs. -A lower value leads to gain less thp performance. Value of -max_ptes_none can waste cpu time very little, you can -ignore it. - -``max_ptes_swap`` specifies how many pages can be brought in from -swap when collapsing a group of pages into a transparent huge page:: - - /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_swap - -A higher value can cause excessive swap IO and waste -memory. A lower value can prevent THPs from being -collapsed, resulting fewer pages being collapsed into -THPs, and lower memory access performance. - -Boot parameter -============== - -You can change the sysfs boot time defaults of Transparent Hugepage -Support by passing the parameter ``transparent_hugepage=always`` or -``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` -to the kernel command line. - -Hugepages in tmpfs/shmem -======================== - -You can control hugepage allocation policy in tmpfs with mount option -``huge=``. It can have following values: - -always - Attempt to allocate huge pages every time we need a new page; - -never - Do not allocate huge pages; - -within_size - Only allocate huge page if it will be fully within i_size. - Also respect fadvise()/madvise() hints; - -advise - Only allocate huge pages if requested with fadvise()/madvise(); - -The default policy is ``never``. - -``mount -o remount,huge= /mountpoint`` works fine after mount: remounting -``huge=never`` will not attempt to break up huge pages at all, just stop more -from being allocated. - -There's also sysfs knob to control hugepage allocation policy for internal -shmem mount: /sys/kernel/mm/transparent_hugepage/shmem_enabled. The mount -is used for SysV SHM, memfds, shared anonymous mmaps (of /dev/zero or -MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem. - -In addition to policies listed above, shmem_enabled allows two further -values: - -deny - For use in emergencies, to force the huge option off from - all mounts; -force - Force the huge option on for all - very useful for testing; - -Need of application restart -=========================== - -The transparent_hugepage/enabled values and tmpfs mount option only affect -future behavior. So to make them effective you need to restart any -application that could have been using hugepages. This also applies to the -regions registered in khugepaged. - -Monitoring usage -================ - -The number of anonymous transparent huge pages currently used by the -system is available by reading the AnonHugePages field in ``/proc/meminfo``. -To identify what applications are using anonymous transparent huge pages, -it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages fields -for each mapping. - -The number of file transparent huge pages mapped to userspace is available -by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``. -To identify what applications are mapping file transparent huge pages, it -is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields -for each mapping. - -Note that reading the smaps file is expensive and reading it -frequently will incur overhead. - -There are a number of counters in ``/proc/vmstat`` that may be used to -monitor how successfully the system is providing huge pages for use. - -thp_fault_alloc - is incremented every time a huge page is successfully - allocated to handle a page fault. This applies to both the - first time a page is faulted and for COW faults. - -thp_collapse_alloc - is incremented by khugepaged when it has found - a range of pages to collapse into one huge page and has - successfully allocated a new huge page to store the data. - -thp_fault_fallback - is incremented if a page fault fails to allocate - a huge page and instead falls back to using small pages. - -thp_collapse_alloc_failed - is incremented if khugepaged found a range - of pages that should be collapsed into one huge page but failed - the allocation. - -thp_file_alloc - is incremented every time a file huge page is successfully - allocated. - -thp_file_mapped - is incremented every time a file huge page is mapped into - user address space. - -thp_split_page - is incremented every time a huge page is split into base - pages. This can happen for a variety of reasons but a common - reason is that a huge page is old and is being reclaimed. - This action implies splitting all PMD the page mapped with. - -thp_split_page_failed - is incremented if kernel fails to split huge - page. This can happen if the page was pinned by somebody. - -thp_deferred_split_page - is incremented when a huge page is put onto split - queue. This happens when a huge page is partially unmapped and - splitting it would free up some memory. Pages on split queue are - going to be split under memory pressure. - -thp_split_pmd - is incremented every time a PMD split into table of PTEs. - This can happen, for instance, when application calls mprotect() or - munmap() on part of huge page. It doesn't split huge page, only - page table entry. - -thp_zero_page_alloc - is incremented every time a huge zero page is - successfully allocated. It includes allocations which where - dropped due race with other allocation. Note, it doesn't count - every map of the huge zero page, only its allocation. - -thp_zero_page_alloc_failed - is incremented if kernel fails to allocate - huge zero page and falls back to using small pages. - -As the system ages, allocating huge pages may be expensive as the -system uses memory compaction to copy data around memory to free a -huge page for use. There are some counters in ``/proc/vmstat`` to help -monitor this overhead. - -compact_stall - is incremented every time a process stalls to run - memory compaction so that a huge page is free for use. - -compact_success - is incremented if the system compacted memory and - freed a huge page for use. - -compact_fail - is incremented if the system tries to compact memory - but failed. - -compact_pages_moved - is incremented each time a page is moved. If - this value is increasing rapidly, it implies that the system - is copying a lot of data to satisfy the huge page allocation. - It is possible that the cost of copying exceeds any savings - from reduced TLB misses. - -compact_pagemigrate_failed - is incremented when the underlying mechanism - for moving a page failed. - -compact_blocks_moved - is incremented each time memory compaction examines - a huge page aligned range of pages. - -It is possible to establish how long the stalls were using the function -tracer to record how long was spent in __alloc_pages_nodemask and -using the mm_page_alloc tracepoint to identify which allocations were -for huge pages. - -get_user_pages and follow_page -============================== - -get_user_pages and follow_page if run on a hugepage, will return the -head or tail pages as usual (exactly as they would do on -hugetlbfs). Most gup users will only care about the actual physical -address of the page and its temporary pinning to release after the I/O -is complete, so they won't ever notice the fact the page is huge. But -if any driver is going to mangle over the page structure of the tail -page (like for checking page->mapping or other bits that are relevant -for the head page and not the tail page), it should be updated to jump -to check head page instead. Taking reference on any head/tail page would -prevent page from being split by anyone. - -.. note:: - these aren't new constraints to the GUP API, and they match the - same constrains that applies to hugetlbfs too, so any driver capable - of handling GUP on hugetlbfs will also work fine on transparent - hugepage backed mappings. - -In case you can't handle compound pages if they're returned by -follow_page, the FOLL_SPLIT bit can be specified as parameter to -follow_page, so that it will split the hugepages before returning -them. Migration for example passes FOLL_SPLIT as parameter to -follow_page because it's not hugepage aware and in fact it can't work -at all on hugetlbfs (but it instead works fine on transparent -hugepages thanks to FOLL_SPLIT). migration simply can't deal with -hugepages being returned (as it's not only checking the pfn of the -page and pinning it during the copy but it pretends to migrate the -memory in regular page sizes and with regular pte/pmd mappings). - -Optimizing the applications -=========================== - -To be guaranteed that the kernel will map a 2M page immediately in any -memory region, the mmap region has to be hugepage naturally -aligned. posix_memalign() can provide that guarantee. - -Hugetlbfs -========= - -You can use hugetlbfs on a kernel that has transparent hugepage -support enabled just fine as always. No difference can be noted in -hugetlbfs other than there will be less overall fragmentation. All -usual features belonging to hugetlbfs are preserved and -unaffected. libhugetlbfs will also work fine as usual. - -Graceful fallback -================= - -Code walking pagetables but unaware about huge pmds can simply call -split_huge_pmd(vma, pmd, addr) where the pmd is the one returned by -pmd_offset. It's trivial to make the code transparent hugepage aware -by just grepping for "pmd_offset" and adding split_huge_pmd where -missing after pmd_offset returns the pmd. Thanks to the graceful -fallback design, with a one liner change, you can avoid to write -hundred if not thousand of lines of complex code to make your code -hugepage aware. - -If you're not walking pagetables but you run into a physical hugepage -but you can't handle it natively in your code, you can split it by -calling split_huge_page(page). This is what the Linux VM does before -it tries to swapout the hugepage for example. split_huge_page() can fail -if the page is pinned and you must handle this correctly. - -Example to make mremap.c transparent hugepage aware with a one liner -change:: - - diff --git a/mm/mremap.c b/mm/mremap.c - --- a/mm/mremap.c - +++ b/mm/mremap.c - @@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_stru - return NULL; - - pmd = pmd_offset(pud, addr); - + split_huge_pmd(vma, pmd, addr); - if (pmd_none_or_clear_bad(pmd)) - return NULL; - -Locking in hugepage aware code -============================== - -We want as much code as possible hugepage aware, as calling -split_huge_page() or split_huge_pmd() has a cost. - -To make pagetable walks huge pmd aware, all you need to do is to call -pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the -mmap_sem in read (or write) mode to be sure an huge pmd cannot be -created from under you by khugepaged (khugepaged collapse_huge_page -takes the mmap_sem in write mode in addition to the anon_vma lock). If -pmd_trans_huge returns false, you just fallback in the old code -paths. If instead pmd_trans_huge returns true, you have to take the -page table lock (pmd_lock()) and re-run pmd_trans_huge. Taking the -page table lock will prevent the huge pmd to be converted into a -regular pmd from under you (split_huge_pmd can run in parallel to the -pagetable walk). If the second pmd_trans_huge returns false, you -should just drop the page table lock and fallback to the old code as -before. Otherwise you can proceed to process the huge pmd and the -hugepage natively. Once finished you can drop the page table lock. - -Refcounts and transparent huge pages -==================================== - -Refcounting on THP is mostly consistent with refcounting on other compound -pages: - - - get_page()/put_page() and GUP operate in head page's ->_refcount. - - - ->_refcount in tail pages is always zero: get_page_unless_zero() never - succeed on tail pages. - - - map/unmap of the pages with PTE entry increment/decrement ->_mapcount - on relevant sub-page of the compound page. - - - map/unmap of the whole compound page accounted in compound_mapcount - (stored in first tail page). For file huge pages, we also increment - ->_mapcount of all sub-pages in order to have race-free detection of - last unmap of subpages. - -PageDoubleMap() indicates that the page is *possibly* mapped with PTEs. - -For anonymous pages PageDoubleMap() also indicates ->_mapcount in all -subpages is offset up by one. This additional reference is required to -get race-free detection of unmap of subpages when we have them mapped with -both PMDs and PTEs. - -This is optimization required to lower overhead of per-subpage mapcount -tracking. The alternative is alter ->_mapcount in all subpages on each -map/unmap of the whole compound page. - -For anonymous pages, we set PG_double_map when a PMD of the page got split -for the first time, but still have PMD mapping. The additional references -go away with last compound_mapcount. - -File pages get PG_double_map set on first map of the page with PTE and -goes away when the page gets evicted from page cache. - -split_huge_page internally has to distribute the refcounts in the head -page to the tail pages before clearing all PG_head/tail bits from the page -structures. It can be done easily for refcounts taken by page table -entries. But we don't have enough information on how to distribute any -additional pins (i.e. from get_user_pages). split_huge_page() fails any -requests to split pinned huge page: it expects page count to be equal to -sum of mapcount of all sub-pages plus one (split_huge_page caller must -have reference for head page). - -split_huge_page uses migration entries to stabilize page->_refcount and -page->_mapcount of anonymous pages. File pages just got unmapped. - -We safe against physical memory scanners too: the only legitimate way -scanner can get reference to a page is get_page_unless_zero(). - -All tail pages have zero ->_refcount until atomic_add(). This prevents the -scanner from getting a reference to the tail page up to that point. After the -atomic_add() we don't care about the ->_refcount value. We already known how -many references should be uncharged from the head page. - -For head page get_page_unless_zero() will succeed and we don't mind. It's -clear where reference should go after split: it will stay on head page. - -Note that split_huge_pmd() doesn't have any limitation on refcounting: -pmd can be split at any point and never fails. - -Partial unmap and deferred_split_huge_page() -============================================ - -Unmapping part of THP (with munmap() or other way) is not going to free -memory immediately. Instead, we detect that a subpage of THP is not in use -in page_remove_rmap() and queue the THP for splitting if memory pressure -comes. Splitting will free up unused subpages. - -Splitting the page right away is not an option due to locking context in -the place where we can detect partial unmap. It's also might be -counterproductive since in many cases partial unmap happens during exit(2) if -a THP crosses a VMA boundary. - -Function deferred_split_huge_page() is used to queue page for splitting. -The splitting itself will happen when we get memory pressure via shrinker -interface. diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst new file mode 100644 index 000000000000..fdd84cb8d511 --- /dev/null +++ b/Documentation/vm/unevictable-lru.rst @@ -0,0 +1,614 @@ +.. _unevictable_lru: + +============================== +Unevictable LRU Infrastructure +============================== + +.. contents:: :local: + + +Introduction +============ + +This document describes the Linux memory manager's "Unevictable LRU" +infrastructure and the use of this to manage several types of "unevictable" +pages. + +The document attempts to provide the overall rationale behind this mechanism +and the rationale for some of the design decisions that drove the +implementation. The latter design rationale is discussed in the context of an +implementation description. Admittedly, one can obtain the implementation +details - the "what does it do?" - by reading the code. One hopes that the +descriptions below add value by provide the answer to "why does it do that?". + + + +The Unevictable LRU +=================== + +The Unevictable LRU facility adds an additional LRU list to track unevictable +pages and to hide these pages from vmscan. This mechanism is based on a patch +by Larry Woodman of Red Hat to address several scalability problems with page +reclaim in Linux. The problems have been observed at customer sites on large +memory x86_64 systems. + +To illustrate this with an example, a non-NUMA x86_64 platform with 128GB of +main memory will have over 32 million 4k pages in a single zone. When a large +fraction of these pages are not evictable for any reason [see below], vmscan +will spend a lot of time scanning the LRU lists looking for the small fraction +of pages that are evictable. This can result in a situation where all CPUs are +spending 100% of their time in vmscan for hours or days on end, with the system +completely unresponsive. + +The unevictable list addresses the following classes of unevictable pages: + + * Those owned by ramfs. + + * Those mapped into SHM_LOCK'd shared memory regions. + + * Those mapped into VM_LOCKED [mlock()ed] VMAs. + +The infrastructure may also be able to handle other conditions that make pages +unevictable, either by definition or by circumstance, in the future. + + +The Unevictable Page List +------------------------- + +The Unevictable LRU infrastructure consists of an additional, per-zone, LRU list +called the "unevictable" list and an associated page flag, PG_unevictable, to +indicate that the page is being managed on the unevictable list. + +The PG_unevictable flag is analogous to, and mutually exclusive with, the +PG_active flag in that it indicates on which LRU list a page resides when +PG_lru is set. + +The Unevictable LRU infrastructure maintains unevictable pages on an additional +LRU list for a few reasons: + + (1) We get to "treat unevictable pages just like we treat other pages in the + system - which means we get to use the same code to manipulate them, the + same code to isolate them (for migrate, etc.), the same code to keep track + of the statistics, etc..." [Rik van Riel] + + (2) We want to be able to migrate unevictable pages between nodes for memory + defragmentation, workload management and memory hotplug. The linux kernel + can only migrate pages that it can successfully isolate from the LRU + lists. If we were to maintain pages elsewhere than on an LRU-like list, + where they can be found by isolate_lru_page(), we would prevent their + migration, unless we reworked migration code to find the unevictable pages + itself. + + +The unevictable list does not differentiate between file-backed and anonymous, +swap-backed pages. This differentiation is only important while the pages are, +in fact, evictable. + +The unevictable list benefits from the "arrayification" of the per-zone LRU +lists and statistics originally proposed and posted by Christoph Lameter. + +The unevictable list does not use the LRU pagevec mechanism. Rather, +unevictable pages are placed directly on the page's zone's unevictable list +under the zone lru_lock. This allows us to prevent the stranding of pages on +the unevictable list when one task has the page isolated from the LRU and other +tasks are changing the "evictability" state of the page. + + +Memory Control Group Interaction +-------------------------------- + +The unevictable LRU facility interacts with the memory control group [aka +memory controller; see Documentation/cgroup-v1/memory.txt] by extending the +lru_list enum. + +The memory controller data structure automatically gets a per-zone unevictable +list as a result of the "arrayification" of the per-zone LRU lists (one per +lru_list enum element). The memory controller tracks the movement of pages to +and from the unevictable list. + +When a memory control group comes under memory pressure, the controller will +not attempt to reclaim pages on the unevictable list. This has a couple of +effects: + + (1) Because the pages are "hidden" from reclaim on the unevictable list, the + reclaim process can be more efficient, dealing only with pages that have a + chance of being reclaimed. + + (2) On the other hand, if too many of the pages charged to the control group + are unevictable, the evictable portion of the working set of the tasks in + the control group may not fit into the available memory. This can cause + the control group to thrash or to OOM-kill tasks. + + +.. _mark_addr_space_unevict: + +Marking Address Spaces Unevictable +---------------------------------- + +For facilities such as ramfs none of the pages attached to the address space +may be evicted. To prevent eviction of any such pages, the AS_UNEVICTABLE +address space flag is provided, and this can be manipulated by a filesystem +using a number of wrapper functions: + + * ``void mapping_set_unevictable(struct address_space *mapping);`` + + Mark the address space as being completely unevictable. + + * ``void mapping_clear_unevictable(struct address_space *mapping);`` + + Mark the address space as being evictable. + + * ``int mapping_unevictable(struct address_space *mapping);`` + + Query the address space, and return true if it is completely + unevictable. + +These are currently used in two places in the kernel: + + (1) By ramfs to mark the address spaces of its inodes when they are created, + and this mark remains for the life of the inode. + + (2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called. + + Note that SHM_LOCK is not required to page in the locked pages if they're + swapped out; the application must touch the pages manually if it wants to + ensure they're in memory. + + +Detecting Unevictable Pages +--------------------------- + +The function page_evictable() in vmscan.c determines whether a page is +evictable or not using the query function outlined above [see section +:ref:`Marking address spaces unevictable `] +to check the AS_UNEVICTABLE flag. + +For address spaces that are so marked after being populated (as SHM regions +might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate +the page tables for the region as does, for example, mlock(), nor need it make +any special effort to push any pages in the SHM_LOCK'd area to the unevictable +list. Instead, vmscan will do this if and when it encounters the pages during +a reclamation scan. + +On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan +the pages in the region and "rescue" them from the unevictable list if no other +condition is keeping them unevictable. If an unevictable region is destroyed, +the pages are also "rescued" from the unevictable list in the process of +freeing them. + +page_evictable() also checks for mlocked pages by testing an additional page +flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is +faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED. + + +Vmscan's Handling of Unevictable Pages +-------------------------------------- + +If unevictable pages are culled in the fault path, or moved to the unevictable +list at mlock() or mmap() time, vmscan will not encounter the pages until they +have become evictable again (via munlock() for example) and have been "rescued" +from the unevictable list. However, there may be situations where we decide, +for the sake of expediency, to leave a unevictable page on one of the regular +active/inactive LRU lists for vmscan to deal with. vmscan checks for such +pages in all of the shrink_{active|inactive|page}_list() functions and will +"cull" such pages that it encounters: that is, it diverts those pages to the +unevictable list for the zone being scanned. + +There may be situations where a page is mapped into a VM_LOCKED VMA, but the +page is not marked as PG_mlocked. Such pages will make it all the way to +shrink_page_list() where they will be detected when vmscan walks the reverse +map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK, +shrink_page_list() will cull the page at that point. + +To "cull" an unevictable page, vmscan simply puts the page back on the LRU list +using putback_lru_page() - the inverse operation to isolate_lru_page() - after +dropping the page lock. Because the condition which makes the page unevictable +may change once the page is unlocked, putback_lru_page() will recheck the +unevictable state of a page that it places on the unevictable list. If the +page has become unevictable, putback_lru_page() removes it from the list and +retries, including the page_unevictable() test. Because such a race is a rare +event and movement of pages onto the unevictable list should be rare, these +extra evictabilty checks should not occur in the majority of calls to +putback_lru_page(). + + +MLOCKED Pages +============= + +The unevictable page list is also useful for mlock(), in addition to ramfs and +SYSV SHM. Note that mlock() is only available in CONFIG_MMU=y situations; in +NOMMU situations, all mappings are effectively mlocked. + + +History +------- + +The "Unevictable mlocked Pages" infrastructure is based on work originally +posted by Nick Piggin in an RFC patch entitled "mm: mlocked pages off LRU". +Nick posted his patch as an alternative to a patch posted by Christoph Lameter +to achieve the same objective: hiding mlocked pages from vmscan. + +In Nick's patch, he used one of the struct page LRU list link fields as a count +of VM_LOCKED VMAs that map the page. This use of the link field for a count +prevented the management of the pages on an LRU list, and thus mlocked pages +were not migratable as isolate_lru_page() could not find them, and the LRU list +link field was not available to the migration subsystem. + +Nick resolved this by putting mlocked pages back on the lru list before +attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs. When +Nick's patch was integrated with the Unevictable LRU work, the count was +replaced by walking the reverse map to determine whether any VM_LOCKED VMAs +mapped the page. More on this below. + + +Basic Management +---------------- + +mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable +pages. When such a page has been "noticed" by the memory management subsystem, +the page is marked with the PG_mlocked flag. This can be manipulated using the +PageMlocked() functions. + +A PG_mlocked page will be placed on the unevictable list when it is added to +the LRU. Such pages can be "noticed" by memory management in several places: + + (1) in the mlock()/mlockall() system call handlers; + + (2) in the mmap() system call handler when mmapping a region with the + MAP_LOCKED flag; + + (3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE + flag + + (4) in the fault path, if mlocked pages are "culled" in the fault path, + and when a VM_LOCKED stack segment is expanded; or + + (5) as mentioned above, in vmscan:shrink_page_list() when attempting to + reclaim a page in a VM_LOCKED VMA via try_to_unmap() + +all of which result in the VM_LOCKED flag being set for the VMA if it doesn't +already have it set. + +mlocked pages become unlocked and rescued from the unevictable list when: + + (1) mapped in a range unlocked via the munlock()/munlockall() system calls; + + (2) munmap()'d out of the last VM_LOCKED VMA that maps the page, including + unmapping at task exit; + + (3) when the page is truncated from the last VM_LOCKED VMA of an mmapped file; + or + + (4) before a page is COW'd in a VM_LOCKED VMA. + + +mlock()/mlockall() System Call Handling +--------------------------------------- + +Both [do\_]mlock() and [do\_]mlockall() system call handlers call mlock_fixup() +for each VMA in the range specified by the call. In the case of mlockall(), +this is the entire active address space of the task. Note that mlock_fixup() +is used for both mlocking and munlocking a range of memory. A call to mlock() +an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is +treated as a no-op, and mlock_fixup() simply returns. + +If the VMA passes some filtering as described in "Filtering Special Vmas" +below, mlock_fixup() will attempt to merge the VMA with its neighbors or split +off a subset of the VMA if the range does not cover the entire VMA. Once the +VMA has been merged or split or neither, mlock_fixup() will call +populate_vma_page_range() to fault in the pages via get_user_pages() and to +mark the pages as mlocked via mlock_vma_page(). + +Note that the VMA being mlocked might be mapped with PROT_NONE. In this case, +get_user_pages() will be unable to fault in the pages. That's okay. If pages +do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the +fault path or in vmscan. + +Also note that a page returned by get_user_pages() could be truncated or +migrated out from under us, while we're trying to mlock it. To detect this, +populate_vma_page_range() checks page_mapping() after acquiring the page lock. +If the page is still associated with its mapping, we'll go ahead and call +mlock_vma_page(). If the mapping is gone, we just unlock the page and move on. +In the worst case, this will result in a page mapped in a VM_LOCKED VMA +remaining on a normal LRU list without being PageMlocked(). Again, vmscan will +detect and cull such pages. + +mlock_vma_page() will call TestSetPageMlocked() for each page returned by +get_user_pages(). We use TestSetPageMlocked() because the page might already +be mlocked by another task/VMA and we don't want to do extra work. We +especially do not want to count an mlocked page more than once in the +statistics. If the page was already mlocked, mlock_vma_page() need do nothing +more. + +If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the +page from the LRU, as it is likely on the appropriate active or inactive list +at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will put +back the page - by calling putback_lru_page() - which will notice that the page +is now mlocked and divert the page to the zone's unevictable list. If +mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle +it later if and when it attempts to reclaim the page. + + +Filtering Special VMAs +---------------------- + +mlock_fixup() filters several classes of "special" VMAs: + +1) VMAs with VM_IO or VM_PFNMAP set are skipped entirely. The pages behind + these mappings are inherently pinned, so we don't need to mark them as + mlocked. In any case, most of the pages have no struct page in which to so + mark the page. Because of this, get_user_pages() will fail for these VMAs, + so there is no sense in attempting to visit them. + +2) VMAs mapping hugetlbfs page are already effectively pinned into memory. We + neither need nor want to mlock() these pages. However, to preserve the + prior behavior of mlock() - before the unevictable/mlock changes - + mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to + allocate the huge pages and populate the ptes. + +3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages, + such as the VDSO page, relay channel pages, etc. These pages + are inherently unevictable and are not managed on the LRU lists. + mlock_fixup() treats these VMAs the same as hugetlbfs VMAs. It calls + make_pages_present() to populate the ptes. + +Note that for all of these special VMAs, mlock_fixup() does not set the +VM_LOCKED flag. Therefore, we won't have to deal with them later during +munlock(), munmap() or task exit. Neither does mlock_fixup() account these +VMAs against the task's "locked_vm". + +.. _munlock_munlockall_handling: + +munlock()/munlockall() System Call Handling +------------------------------------------- + +The munlock() and munlockall() system calls are handled by the same functions - +do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs +lock operation indicated by an argument. So, these system calls are also +handled by mlock_fixup(). Again, if called for an already munlocked VMA, +mlock_fixup() simply returns. Because of the VMA filtering discussed above, +VM_LOCKED will not be set in any "special" VMAs. So, these VMAs will be +ignored for munlock. + +If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the +specified range. The range is then munlocked via the function +populate_vma_page_range() - the same function used to mlock a VMA range - +passing a flag to indicate that munlock() is being performed. + +Because the VMA access protections could have been changed to PROT_NONE after +faulting in and mlocking pages, get_user_pages() was unreliable for visiting +these pages for munlocking. Because we don't want to leave pages mlocked, +get_user_pages() was enhanced to accept a flag to ignore the permissions when +fetching the pages - all of which should be resident as a result of previous +mlocking. + +For munlock(), populate_vma_page_range() unlocks individual pages by calling +munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked +flag using TestClearPageMlocked(). As with mlock_vma_page(), +munlock_vma_page() use the Test*PageMlocked() function to handle the case where +the page might have already been unlocked by another task. If the page was +mlocked, munlock_vma_page() updates that zone statistics for the number of +mlocked pages. Note, however, that at this point we haven't checked whether +the page is mapped by other VM_LOCKED VMAs. + +We can't call try_to_munlock(), the function that walks the reverse map to +check for other VM_LOCKED VMAs, without first isolating the page from the LRU. +try_to_munlock() is a variant of try_to_unmap() and thus requires that the page +not be on an LRU list [more on these below]. However, the call to +isolate_lru_page() could fail, in which case we couldn't try_to_munlock(). So, +we go ahead and clear PG_mlocked up front, as this might be the only chance we +have. If we can successfully isolate the page, we go ahead and +try_to_munlock(), which will restore the PG_mlocked flag and update the zone +page statistics if it finds another VMA holding the page mlocked. If we fail +to isolate the page, we'll have left a potentially mlocked page on the LRU. +This is fine, because we'll catch it later if and if vmscan tries to reclaim +the page. This should be relatively rare. + + +Migrating MLOCKED Pages +----------------------- + +A page that is being migrated has been isolated from the LRU lists and is held +locked across unmapping of the page, updating the page's address space entry +and copying the contents and state, until the page table entry has been +replaced with an entry that refers to the new page. Linux supports migration +of mlocked pages and other unevictable pages. This involves simply moving the +PG_mlocked and PG_unevictable states from the old page to the new page. + +Note that page migration can race with mlocking or munlocking of the same page. +This has been discussed from the mlock/munlock perspective in the respective +sections above. Both processes (migration and m[un]locking) hold the page +locked. This provides the first level of synchronization. Page migration +zeros out the page_mapping of the old page before unlocking it, so m[un]lock +can skip these pages by testing the page mapping under page lock. + +To complete page migration, we place the new and old pages back onto the LRU +after dropping the page lock. The "unneeded" page - old page on success, new +page on failure - will be freed when the reference count held by the migration +process is released. To ensure that we don't strand pages on the unevictable +list because of a race between munlock and migration, page migration uses the +putback_lru_page() function to add migrated pages back to the LRU. + + +Compacting MLOCKED Pages +------------------------ + +The unevictable LRU can be scanned for compactable regions and the default +behavior is to do so. /proc/sys/vm/compact_unevictable_allowed controls +this behavior (see Documentation/sysctl/vm.txt). Once scanning of the +unevictable LRU is enabled, the work of compaction is mostly handled by +the page migration code and the same work flow as described in MIGRATING +MLOCKED PAGES will apply. + +MLOCKING Transparent Huge Pages +------------------------------- + +A transparent huge page is represented by a single entry on an LRU list. +Therefore, we can only make unevictable an entire compound page, not +individual subpages. + +If a user tries to mlock() part of a huge page, we want the rest of the +page to be reclaimable. + +We cannot just split the page on partial mlock() as split_huge_page() can +fail and new intermittent failure mode for the syscall is undesirable. + +We handle this by keeping PTE-mapped huge pages on normal LRU lists: the +PMD on border of VM_LOCKED VMA will be split into PTE table. + +This way the huge page is accessible for vmscan. Under memory pressure the +page will be split, subpages which belong to VM_LOCKED VMAs will be moved +to unevictable LRU and the rest can be reclaimed. + +See also comment in follow_trans_huge_pmd(). + +mmap(MAP_LOCKED) System Call Handling +------------------------------------- + +In addition the mlock()/mlockall() system calls, an application can request +that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap() +call. There is one important and subtle difference here, though. mmap() + mlock() +will fail if the range cannot be faulted in (e.g. because mm_populate fails) +and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped +area will still have properties of the locked area - aka. pages will not get +swapped out - but major page faults to fault memory in might still happen. + +Furthermore, any mmap() call or brk() call that expands the heap by a +task that has previously called mlockall() with the MCL_FUTURE flag will result +in the newly mapped memory being mlocked. Before the unevictable/mlock +changes, the kernel simply called make_pages_present() to allocate pages and +populate the page table. + +To mlock a range of memory under the unevictable/mlock infrastructure, the +mmap() handler and task address space expansion functions call +populate_vma_page_range() specifying the vma and the address range to mlock. + +The callers of populate_vma_page_range() will have already added the memory range +to be mlocked to the task's "locked_vm". To account for filtered VMAs, +populate_vma_page_range() returns the number of pages NOT mlocked. All of the +callers then subtract a non-negative return value from the task's locked_vm. A +negative return value represent an error - for example, from get_user_pages() +attempting to fault in a VMA with PROT_NONE access. In this case, we leave the +memory range accounted as locked_vm, as the protections could be changed later +and pages allocated into that region. + + +munmap()/exit()/exec() System Call Handling +------------------------------------------- + +When unmapping an mlocked region of memory, whether by an explicit call to +munmap() or via an internal unmap from exit() or exec() processing, we must +munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages. +Before the unevictable/mlock changes, mlocking did not mark the pages in any +way, so unmapping them required no processing. + +To munlock a range of memory under the unevictable/mlock infrastructure, the +munmap() handler and task address space call tear down function +munlock_vma_pages_all(). The name reflects the observation that one always +specifies the entire VMA range when munlock()ing during unmap of a region. +Because of the VMA filtering when mlocking() regions, only "normal" VMAs that +actually contain mlocked pages will be passed to munlock_vma_pages_all(). + +munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup() +for the munlock case, calls __munlock_vma_pages_range() to walk the page table +for the VMA's memory range and munlock_vma_page() each resident page mapped by +the VMA. This effectively munlocks the page, only if this is the last +VM_LOCKED VMA that maps the page. + + +try_to_unmap() +-------------- + +Pages can, of course, be mapped into multiple VMAs. Some of these VMAs may +have VM_LOCKED flag set. It is possible for a page mapped into one or more +VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one +of the active or inactive LRU lists. This could happen if, for example, a task +in the process of munlocking the page could not isolate the page from the LRU. +As a result, vmscan/shrink_page_list() might encounter such a page as described +in section "vmscan's handling of unevictable pages". To handle this situation, +try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse +map. + +try_to_unmap() is always called, by either vmscan for reclaim or for page +migration, with the argument page locked and isolated from the LRU. Separate +functions handle anonymous and mapped file and KSM pages, as these types of +pages have different reverse map lookup mechanisms, with different locking. +In each case, whether rmap_walk_anon() or rmap_walk_file() or rmap_walk_ksm(), +it will call try_to_unmap_one() for every VMA which might contain the page. + +When trying to reclaim, if try_to_unmap_one() finds the page in a VM_LOCKED +VMA, it will then mlock the page via mlock_vma_page() instead of unmapping it, +and return SWAP_MLOCK to indicate that the page is unevictable: and the scan +stops there. + +mlock_vma_page() is called while holding the page table's lock (in addition +to the page lock, and the rmap lock): to serialize against concurrent mlock or +munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim, +holepunching, and truncation of file pages and their anonymous COWed pages. + + +try_to_munlock() Reverse Map Scan +--------------------------------- + +.. warning:: + [!] TODO/FIXME: a better name might be page_mlocked() - analogous to the + page_referenced() reverse map walker. + +When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call +Handling ` above] tries to munlock a +page, it needs to determine whether or not the page is mapped by any +VM_LOCKED VMA without actually attempting to unmap all PTEs from the +page. For this purpose, the unevictable/mlock infrastructure +introduced a variant of try_to_unmap() called try_to_munlock(). + +try_to_munlock() calls the same functions as try_to_unmap() for anonymous and +mapped file and KSM pages with a flag argument specifying unlock versus unmap +processing. Again, these functions walk the respective reverse maps looking +for VM_LOCKED VMAs. When such a VMA is found, as in the try_to_unmap() case, +the functions mlock the page via mlock_vma_page() and return SWAP_MLOCK. This +undoes the pre-clearing of the page's PG_mlocked done by munlock_vma_page. + +Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's +reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA. +However, the scan can terminate when it encounters a VM_LOCKED VMA. +Although try_to_munlock() might be called a great many times when munlocking a +large region or tearing down a large address space that has been mlocked via +mlockall(), overall this is a fairly rare event. + + +Page Reclaim in shrink_*_list() +------------------------------- + +shrink_active_list() culls any obviously unevictable pages - i.e. +!page_evictable(page) - diverting these to the unevictable list. +However, shrink_active_list() only sees unevictable pages that made it onto the +active/inactive lru lists. Note that these pages do not have PageUnevictable +set - otherwise they would be on the unevictable list and shrink_active_list +would never see them. + +Some examples of these unevictable pages on the LRU lists are: + + (1) ramfs pages that have been placed on the LRU lists when first allocated. + + (2) SHM_LOCK'd shared memory pages. shmctl(SHM_LOCK) does not attempt to + allocate or fault in the pages in the shared memory region. This happens + when an application accesses the page the first time after SHM_LOCK'ing + the segment. + + (3) mlocked pages that could not be isolated from the LRU and moved to the + unevictable list in mlock_vma_page(). + +shrink_inactive_list() also diverts any unevictable pages that it finds on the +inactive lists to the appropriate zone's unevictable list. + +shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd +after shrink_active_list() had moved them to the inactive list, or pages mapped +into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to +recheck via try_to_munlock(). shrink_inactive_list() won't notice the latter, +but will pass on to shrink_page_list(). + +shrink_page_list() again culls obviously unevictable pages that it could +encounter for similar reason to shrink_inactive_list(). Pages mapped into +VM_LOCKED VMAs but without PG_mlocked set will make it all the way to +try_to_unmap(). shrink_page_list() will divert them to the unevictable list +when try_to_unmap() returns SWAP_MLOCK, as discussed above. diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt deleted file mode 100644 index fdd84cb8d511..000000000000 --- a/Documentation/vm/unevictable-lru.txt +++ /dev/null @@ -1,614 +0,0 @@ -.. _unevictable_lru: - -============================== -Unevictable LRU Infrastructure -============================== - -.. contents:: :local: - - -Introduction -============ - -This document describes the Linux memory manager's "Unevictable LRU" -infrastructure and the use of this to manage several types of "unevictable" -pages. - -The document attempts to provide the overall rationale behind this mechanism -and the rationale for some of the design decisions that drove the -implementation. The latter design rationale is discussed in the context of an -implementation description. Admittedly, one can obtain the implementation -details - the "what does it do?" - by reading the code. One hopes that the -descriptions below add value by provide the answer to "why does it do that?". - - - -The Unevictable LRU -=================== - -The Unevictable LRU facility adds an additional LRU list to track unevictable -pages and to hide these pages from vmscan. This mechanism is based on a patch -by Larry Woodman of Red Hat to address several scalability problems with page -reclaim in Linux. The problems have been observed at customer sites on large -memory x86_64 systems. - -To illustrate this with an example, a non-NUMA x86_64 platform with 128GB of -main memory will have over 32 million 4k pages in a single zone. When a large -fraction of these pages are not evictable for any reason [see below], vmscan -will spend a lot of time scanning the LRU lists looking for the small fraction -of pages that are evictable. This can result in a situation where all CPUs are -spending 100% of their time in vmscan for hours or days on end, with the system -completely unresponsive. - -The unevictable list addresses the following classes of unevictable pages: - - * Those owned by ramfs. - - * Those mapped into SHM_LOCK'd shared memory regions. - - * Those mapped into VM_LOCKED [mlock()ed] VMAs. - -The infrastructure may also be able to handle other conditions that make pages -unevictable, either by definition or by circumstance, in the future. - - -The Unevictable Page List -------------------------- - -The Unevictable LRU infrastructure consists of an additional, per-zone, LRU list -called the "unevictable" list and an associated page flag, PG_unevictable, to -indicate that the page is being managed on the unevictable list. - -The PG_unevictable flag is analogous to, and mutually exclusive with, the -PG_active flag in that it indicates on which LRU list a page resides when -PG_lru is set. - -The Unevictable LRU infrastructure maintains unevictable pages on an additional -LRU list for a few reasons: - - (1) We get to "treat unevictable pages just like we treat other pages in the - system - which means we get to use the same code to manipulate them, the - same code to isolate them (for migrate, etc.), the same code to keep track - of the statistics, etc..." [Rik van Riel] - - (2) We want to be able to migrate unevictable pages between nodes for memory - defragmentation, workload management and memory hotplug. The linux kernel - can only migrate pages that it can successfully isolate from the LRU - lists. If we were to maintain pages elsewhere than on an LRU-like list, - where they can be found by isolate_lru_page(), we would prevent their - migration, unless we reworked migration code to find the unevictable pages - itself. - - -The unevictable list does not differentiate between file-backed and anonymous, -swap-backed pages. This differentiation is only important while the pages are, -in fact, evictable. - -The unevictable list benefits from the "arrayification" of the per-zone LRU -lists and statistics originally proposed and posted by Christoph Lameter. - -The unevictable list does not use the LRU pagevec mechanism. Rather, -unevictable pages are placed directly on the page's zone's unevictable list -under the zone lru_lock. This allows us to prevent the stranding of pages on -the unevictable list when one task has the page isolated from the LRU and other -tasks are changing the "evictability" state of the page. - - -Memory Control Group Interaction --------------------------------- - -The unevictable LRU facility interacts with the memory control group [aka -memory controller; see Documentation/cgroup-v1/memory.txt] by extending the -lru_list enum. - -The memory controller data structure automatically gets a per-zone unevictable -list as a result of the "arrayification" of the per-zone LRU lists (one per -lru_list enum element). The memory controller tracks the movement of pages to -and from the unevictable list. - -When a memory control group comes under memory pressure, the controller will -not attempt to reclaim pages on the unevictable list. This has a couple of -effects: - - (1) Because the pages are "hidden" from reclaim on the unevictable list, the - reclaim process can be more efficient, dealing only with pages that have a - chance of being reclaimed. - - (2) On the other hand, if too many of the pages charged to the control group - are unevictable, the evictable portion of the working set of the tasks in - the control group may not fit into the available memory. This can cause - the control group to thrash or to OOM-kill tasks. - - -.. _mark_addr_space_unevict: - -Marking Address Spaces Unevictable ----------------------------------- - -For facilities such as ramfs none of the pages attached to the address space -may be evicted. To prevent eviction of any such pages, the AS_UNEVICTABLE -address space flag is provided, and this can be manipulated by a filesystem -using a number of wrapper functions: - - * ``void mapping_set_unevictable(struct address_space *mapping);`` - - Mark the address space as being completely unevictable. - - * ``void mapping_clear_unevictable(struct address_space *mapping);`` - - Mark the address space as being evictable. - - * ``int mapping_unevictable(struct address_space *mapping);`` - - Query the address space, and return true if it is completely - unevictable. - -These are currently used in two places in the kernel: - - (1) By ramfs to mark the address spaces of its inodes when they are created, - and this mark remains for the life of the inode. - - (2) By SYSV SHM to mark SHM_LOCK'd address spaces until SHM_UNLOCK is called. - - Note that SHM_LOCK is not required to page in the locked pages if they're - swapped out; the application must touch the pages manually if it wants to - ensure they're in memory. - - -Detecting Unevictable Pages ---------------------------- - -The function page_evictable() in vmscan.c determines whether a page is -evictable or not using the query function outlined above [see section -:ref:`Marking address spaces unevictable `] -to check the AS_UNEVICTABLE flag. - -For address spaces that are so marked after being populated (as SHM regions -might be), the lock action (eg: SHM_LOCK) can be lazy, and need not populate -the page tables for the region as does, for example, mlock(), nor need it make -any special effort to push any pages in the SHM_LOCK'd area to the unevictable -list. Instead, vmscan will do this if and when it encounters the pages during -a reclamation scan. - -On an unlock action (such as SHM_UNLOCK), the unlocker (eg: shmctl()) must scan -the pages in the region and "rescue" them from the unevictable list if no other -condition is keeping them unevictable. If an unevictable region is destroyed, -the pages are also "rescued" from the unevictable list in the process of -freeing them. - -page_evictable() also checks for mlocked pages by testing an additional page -flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is -faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED. - - -Vmscan's Handling of Unevictable Pages --------------------------------------- - -If unevictable pages are culled in the fault path, or moved to the unevictable -list at mlock() or mmap() time, vmscan will not encounter the pages until they -have become evictable again (via munlock() for example) and have been "rescued" -from the unevictable list. However, there may be situations where we decide, -for the sake of expediency, to leave a unevictable page on one of the regular -active/inactive LRU lists for vmscan to deal with. vmscan checks for such -pages in all of the shrink_{active|inactive|page}_list() functions and will -"cull" such pages that it encounters: that is, it diverts those pages to the -unevictable list for the zone being scanned. - -There may be situations where a page is mapped into a VM_LOCKED VMA, but the -page is not marked as PG_mlocked. Such pages will make it all the way to -shrink_page_list() where they will be detected when vmscan walks the reverse -map in try_to_unmap(). If try_to_unmap() returns SWAP_MLOCK, -shrink_page_list() will cull the page at that point. - -To "cull" an unevictable page, vmscan simply puts the page back on the LRU list -using putback_lru_page() - the inverse operation to isolate_lru_page() - after -dropping the page lock. Because the condition which makes the page unevictable -may change once the page is unlocked, putback_lru_page() will recheck the -unevictable state of a page that it places on the unevictable list. If the -page has become unevictable, putback_lru_page() removes it from the list and -retries, including the page_unevictable() test. Because such a race is a rare -event and movement of pages onto the unevictable list should be rare, these -extra evictabilty checks should not occur in the majority of calls to -putback_lru_page(). - - -MLOCKED Pages -============= - -The unevictable page list is also useful for mlock(), in addition to ramfs and -SYSV SHM. Note that mlock() is only available in CONFIG_MMU=y situations; in -NOMMU situations, all mappings are effectively mlocked. - - -History -------- - -The "Unevictable mlocked Pages" infrastructure is based on work originally -posted by Nick Piggin in an RFC patch entitled "mm: mlocked pages off LRU". -Nick posted his patch as an alternative to a patch posted by Christoph Lameter -to achieve the same objective: hiding mlocked pages from vmscan. - -In Nick's patch, he used one of the struct page LRU list link fields as a count -of VM_LOCKED VMAs that map the page. This use of the link field for a count -prevented the management of the pages on an LRU list, and thus mlocked pages -were not migratable as isolate_lru_page() could not find them, and the LRU list -link field was not available to the migration subsystem. - -Nick resolved this by putting mlocked pages back on the lru list before -attempting to isolate them, thus abandoning the count of VM_LOCKED VMAs. When -Nick's patch was integrated with the Unevictable LRU work, the count was -replaced by walking the reverse map to determine whether any VM_LOCKED VMAs -mapped the page. More on this below. - - -Basic Management ----------------- - -mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable -pages. When such a page has been "noticed" by the memory management subsystem, -the page is marked with the PG_mlocked flag. This can be manipulated using the -PageMlocked() functions. - -A PG_mlocked page will be placed on the unevictable list when it is added to -the LRU. Such pages can be "noticed" by memory management in several places: - - (1) in the mlock()/mlockall() system call handlers; - - (2) in the mmap() system call handler when mmapping a region with the - MAP_LOCKED flag; - - (3) mmapping a region in a task that has called mlockall() with the MCL_FUTURE - flag - - (4) in the fault path, if mlocked pages are "culled" in the fault path, - and when a VM_LOCKED stack segment is expanded; or - - (5) as mentioned above, in vmscan:shrink_page_list() when attempting to - reclaim a page in a VM_LOCKED VMA via try_to_unmap() - -all of which result in the VM_LOCKED flag being set for the VMA if it doesn't -already have it set. - -mlocked pages become unlocked and rescued from the unevictable list when: - - (1) mapped in a range unlocked via the munlock()/munlockall() system calls; - - (2) munmap()'d out of the last VM_LOCKED VMA that maps the page, including - unmapping at task exit; - - (3) when the page is truncated from the last VM_LOCKED VMA of an mmapped file; - or - - (4) before a page is COW'd in a VM_LOCKED VMA. - - -mlock()/mlockall() System Call Handling ---------------------------------------- - -Both [do\_]mlock() and [do\_]mlockall() system call handlers call mlock_fixup() -for each VMA in the range specified by the call. In the case of mlockall(), -this is the entire active address space of the task. Note that mlock_fixup() -is used for both mlocking and munlocking a range of memory. A call to mlock() -an already VM_LOCKED VMA, or to munlock() a VMA that is not VM_LOCKED is -treated as a no-op, and mlock_fixup() simply returns. - -If the VMA passes some filtering as described in "Filtering Special Vmas" -below, mlock_fixup() will attempt to merge the VMA with its neighbors or split -off a subset of the VMA if the range does not cover the entire VMA. Once the -VMA has been merged or split or neither, mlock_fixup() will call -populate_vma_page_range() to fault in the pages via get_user_pages() and to -mark the pages as mlocked via mlock_vma_page(). - -Note that the VMA being mlocked might be mapped with PROT_NONE. In this case, -get_user_pages() will be unable to fault in the pages. That's okay. If pages -do end up getting faulted into this VM_LOCKED VMA, we'll handle them in the -fault path or in vmscan. - -Also note that a page returned by get_user_pages() could be truncated or -migrated out from under us, while we're trying to mlock it. To detect this, -populate_vma_page_range() checks page_mapping() after acquiring the page lock. -If the page is still associated with its mapping, we'll go ahead and call -mlock_vma_page(). If the mapping is gone, we just unlock the page and move on. -In the worst case, this will result in a page mapped in a VM_LOCKED VMA -remaining on a normal LRU list without being PageMlocked(). Again, vmscan will -detect and cull such pages. - -mlock_vma_page() will call TestSetPageMlocked() for each page returned by -get_user_pages(). We use TestSetPageMlocked() because the page might already -be mlocked by another task/VMA and we don't want to do extra work. We -especially do not want to count an mlocked page more than once in the -statistics. If the page was already mlocked, mlock_vma_page() need do nothing -more. - -If the page was NOT already mlocked, mlock_vma_page() attempts to isolate the -page from the LRU, as it is likely on the appropriate active or inactive list -at that time. If the isolate_lru_page() succeeds, mlock_vma_page() will put -back the page - by calling putback_lru_page() - which will notice that the page -is now mlocked and divert the page to the zone's unevictable list. If -mlock_vma_page() is unable to isolate the page from the LRU, vmscan will handle -it later if and when it attempts to reclaim the page. - - -Filtering Special VMAs ----------------------- - -mlock_fixup() filters several classes of "special" VMAs: - -1) VMAs with VM_IO or VM_PFNMAP set are skipped entirely. The pages behind - these mappings are inherently pinned, so we don't need to mark them as - mlocked. In any case, most of the pages have no struct page in which to so - mark the page. Because of this, get_user_pages() will fail for these VMAs, - so there is no sense in attempting to visit them. - -2) VMAs mapping hugetlbfs page are already effectively pinned into memory. We - neither need nor want to mlock() these pages. However, to preserve the - prior behavior of mlock() - before the unevictable/mlock changes - - mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to - allocate the huge pages and populate the ptes. - -3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages, - such as the VDSO page, relay channel pages, etc. These pages - are inherently unevictable and are not managed on the LRU lists. - mlock_fixup() treats these VMAs the same as hugetlbfs VMAs. It calls - make_pages_present() to populate the ptes. - -Note that for all of these special VMAs, mlock_fixup() does not set the -VM_LOCKED flag. Therefore, we won't have to deal with them later during -munlock(), munmap() or task exit. Neither does mlock_fixup() account these -VMAs against the task's "locked_vm". - -.. _munlock_munlockall_handling: - -munlock()/munlockall() System Call Handling -------------------------------------------- - -The munlock() and munlockall() system calls are handled by the same functions - -do_mlock[all]() - as the mlock() and mlockall() system calls with the unlock vs -lock operation indicated by an argument. So, these system calls are also -handled by mlock_fixup(). Again, if called for an already munlocked VMA, -mlock_fixup() simply returns. Because of the VMA filtering discussed above, -VM_LOCKED will not be set in any "special" VMAs. So, these VMAs will be -ignored for munlock. - -If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the -specified range. The range is then munlocked via the function -populate_vma_page_range() - the same function used to mlock a VMA range - -passing a flag to indicate that munlock() is being performed. - -Because the VMA access protections could have been changed to PROT_NONE after -faulting in and mlocking pages, get_user_pages() was unreliable for visiting -these pages for munlocking. Because we don't want to leave pages mlocked, -get_user_pages() was enhanced to accept a flag to ignore the permissions when -fetching the pages - all of which should be resident as a result of previous -mlocking. - -For munlock(), populate_vma_page_range() unlocks individual pages by calling -munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked -flag using TestClearPageMlocked(). As with mlock_vma_page(), -munlock_vma_page() use the Test*PageMlocked() function to handle the case where -the page might have already been unlocked by another task. If the page was -mlocked, munlock_vma_page() updates that zone statistics for the number of -mlocked pages. Note, however, that at this point we haven't checked whether -the page is mapped by other VM_LOCKED VMAs. - -We can't call try_to_munlock(), the function that walks the reverse map to -check for other VM_LOCKED VMAs, without first isolating the page from the LRU. -try_to_munlock() is a variant of try_to_unmap() and thus requires that the page -not be on an LRU list [more on these below]. However, the call to -isolate_lru_page() could fail, in which case we couldn't try_to_munlock(). So, -we go ahead and clear PG_mlocked up front, as this might be the only chance we -have. If we can successfully isolate the page, we go ahead and -try_to_munlock(), which will restore the PG_mlocked flag and update the zone -page statistics if it finds another VMA holding the page mlocked. If we fail -to isolate the page, we'll have left a potentially mlocked page on the LRU. -This is fine, because we'll catch it later if and if vmscan tries to reclaim -the page. This should be relatively rare. - - -Migrating MLOCKED Pages ------------------------ - -A page that is being migrated has been isolated from the LRU lists and is held -locked across unmapping of the page, updating the page's address space entry -and copying the contents and state, until the page table entry has been -replaced with an entry that refers to the new page. Linux supports migration -of mlocked pages and other unevictable pages. This involves simply moving the -PG_mlocked and PG_unevictable states from the old page to the new page. - -Note that page migration can race with mlocking or munlocking of the same page. -This has been discussed from the mlock/munlock perspective in the respective -sections above. Both processes (migration and m[un]locking) hold the page -locked. This provides the first level of synchronization. Page migration -zeros out the page_mapping of the old page before unlocking it, so m[un]lock -can skip these pages by testing the page mapping under page lock. - -To complete page migration, we place the new and old pages back onto the LRU -after dropping the page lock. The "unneeded" page - old page on success, new -page on failure - will be freed when the reference count held by the migration -process is released. To ensure that we don't strand pages on the unevictable -list because of a race between munlock and migration, page migration uses the -putback_lru_page() function to add migrated pages back to the LRU. - - -Compacting MLOCKED Pages ------------------------- - -The unevictable LRU can be scanned for compactable regions and the default -behavior is to do so. /proc/sys/vm/compact_unevictable_allowed controls -this behavior (see Documentation/sysctl/vm.txt). Once scanning of the -unevictable LRU is enabled, the work of compaction is mostly handled by -the page migration code and the same work flow as described in MIGRATING -MLOCKED PAGES will apply. - -MLOCKING Transparent Huge Pages -------------------------------- - -A transparent huge page is represented by a single entry on an LRU list. -Therefore, we can only make unevictable an entire compound page, not -individual subpages. - -If a user tries to mlock() part of a huge page, we want the rest of the -page to be reclaimable. - -We cannot just split the page on partial mlock() as split_huge_page() can -fail and new intermittent failure mode for the syscall is undesirable. - -We handle this by keeping PTE-mapped huge pages on normal LRU lists: the -PMD on border of VM_LOCKED VMA will be split into PTE table. - -This way the huge page is accessible for vmscan. Under memory pressure the -page will be split, subpages which belong to VM_LOCKED VMAs will be moved -to unevictable LRU and the rest can be reclaimed. - -See also comment in follow_trans_huge_pmd(). - -mmap(MAP_LOCKED) System Call Handling -------------------------------------- - -In addition the mlock()/mlockall() system calls, an application can request -that a region of memory be mlocked supplying the MAP_LOCKED flag to the mmap() -call. There is one important and subtle difference here, though. mmap() + mlock() -will fail if the range cannot be faulted in (e.g. because mm_populate fails) -and returns with ENOMEM while mmap(MAP_LOCKED) will not fail. The mmaped -area will still have properties of the locked area - aka. pages will not get -swapped out - but major page faults to fault memory in might still happen. - -Furthermore, any mmap() call or brk() call that expands the heap by a -task that has previously called mlockall() with the MCL_FUTURE flag will result -in the newly mapped memory being mlocked. Before the unevictable/mlock -changes, the kernel simply called make_pages_present() to allocate pages and -populate the page table. - -To mlock a range of memory under the unevictable/mlock infrastructure, the -mmap() handler and task address space expansion functions call -populate_vma_page_range() specifying the vma and the address range to mlock. - -The callers of populate_vma_page_range() will have already added the memory range -to be mlocked to the task's "locked_vm". To account for filtered VMAs, -populate_vma_page_range() returns the number of pages NOT mlocked. All of the -callers then subtract a non-negative return value from the task's locked_vm. A -negative return value represent an error - for example, from get_user_pages() -attempting to fault in a VMA with PROT_NONE access. In this case, we leave the -memory range accounted as locked_vm, as the protections could be changed later -and pages allocated into that region. - - -munmap()/exit()/exec() System Call Handling -------------------------------------------- - -When unmapping an mlocked region of memory, whether by an explicit call to -munmap() or via an internal unmap from exit() or exec() processing, we must -munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages. -Before the unevictable/mlock changes, mlocking did not mark the pages in any -way, so unmapping them required no processing. - -To munlock a range of memory under the unevictable/mlock infrastructure, the -munmap() handler and task address space call tear down function -munlock_vma_pages_all(). The name reflects the observation that one always -specifies the entire VMA range when munlock()ing during unmap of a region. -Because of the VMA filtering when mlocking() regions, only "normal" VMAs that -actually contain mlocked pages will be passed to munlock_vma_pages_all(). - -munlock_vma_pages_all() clears the VM_LOCKED VMA flag and, like mlock_fixup() -for the munlock case, calls __munlock_vma_pages_range() to walk the page table -for the VMA's memory range and munlock_vma_page() each resident page mapped by -the VMA. This effectively munlocks the page, only if this is the last -VM_LOCKED VMA that maps the page. - - -try_to_unmap() --------------- - -Pages can, of course, be mapped into multiple VMAs. Some of these VMAs may -have VM_LOCKED flag set. It is possible for a page mapped into one or more -VM_LOCKED VMAs not to have the PG_mlocked flag set and therefore reside on one -of the active or inactive LRU lists. This could happen if, for example, a task -in the process of munlocking the page could not isolate the page from the LRU. -As a result, vmscan/shrink_page_list() might encounter such a page as described -in section "vmscan's handling of unevictable pages". To handle this situation, -try_to_unmap() checks for VM_LOCKED VMAs while it is walking a page's reverse -map. - -try_to_unmap() is always called, by either vmscan for reclaim or for page -migration, with the argument page locked and isolated from the LRU. Separate -functions handle anonymous and mapped file and KSM pages, as these types of -pages have different reverse map lookup mechanisms, with different locking. -In each case, whether rmap_walk_anon() or rmap_walk_file() or rmap_walk_ksm(), -it will call try_to_unmap_one() for every VMA which might contain the page. - -When trying to reclaim, if try_to_unmap_one() finds the page in a VM_LOCKED -VMA, it will then mlock the page via mlock_vma_page() instead of unmapping it, -and return SWAP_MLOCK to indicate that the page is unevictable: and the scan -stops there. - -mlock_vma_page() is called while holding the page table's lock (in addition -to the page lock, and the rmap lock): to serialize against concurrent mlock or -munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim, -holepunching, and truncation of file pages and their anonymous COWed pages. - - -try_to_munlock() Reverse Map Scan ---------------------------------- - -.. warning:: - [!] TODO/FIXME: a better name might be page_mlocked() - analogous to the - page_referenced() reverse map walker. - -When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call -Handling ` above] tries to munlock a -page, it needs to determine whether or not the page is mapped by any -VM_LOCKED VMA without actually attempting to unmap all PTEs from the -page. For this purpose, the unevictable/mlock infrastructure -introduced a variant of try_to_unmap() called try_to_munlock(). - -try_to_munlock() calls the same functions as try_to_unmap() for anonymous and -mapped file and KSM pages with a flag argument specifying unlock versus unmap -processing. Again, these functions walk the respective reverse maps looking -for VM_LOCKED VMAs. When such a VMA is found, as in the try_to_unmap() case, -the functions mlock the page via mlock_vma_page() and return SWAP_MLOCK. This -undoes the pre-clearing of the page's PG_mlocked done by munlock_vma_page. - -Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's -reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA. -However, the scan can terminate when it encounters a VM_LOCKED VMA. -Although try_to_munlock() might be called a great many times when munlocking a -large region or tearing down a large address space that has been mlocked via -mlockall(), overall this is a fairly rare event. - - -Page Reclaim in shrink_*_list() -------------------------------- - -shrink_active_list() culls any obviously unevictable pages - i.e. -!page_evictable(page) - diverting these to the unevictable list. -However, shrink_active_list() only sees unevictable pages that made it onto the -active/inactive lru lists. Note that these pages do not have PageUnevictable -set - otherwise they would be on the unevictable list and shrink_active_list -would never see them. - -Some examples of these unevictable pages on the LRU lists are: - - (1) ramfs pages that have been placed on the LRU lists when first allocated. - - (2) SHM_LOCK'd shared memory pages. shmctl(SHM_LOCK) does not attempt to - allocate or fault in the pages in the shared memory region. This happens - when an application accesses the page the first time after SHM_LOCK'ing - the segment. - - (3) mlocked pages that could not be isolated from the LRU and moved to the - unevictable list in mlock_vma_page(). - -shrink_inactive_list() also diverts any unevictable pages that it finds on the -inactive lists to the appropriate zone's unevictable list. - -shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd -after shrink_active_list() had moved them to the inactive list, or pages mapped -into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to -recheck via try_to_munlock(). shrink_inactive_list() won't notice the latter, -but will pass on to shrink_page_list(). - -shrink_page_list() again culls obviously unevictable pages that it could -encounter for similar reason to shrink_inactive_list(). Pages mapped into -VM_LOCKED VMAs but without PG_mlocked set will make it all the way to -try_to_unmap(). shrink_page_list() will divert them to the unevictable list -when try_to_unmap() returns SWAP_MLOCK, as discussed above. diff --git a/Documentation/vm/userfaultfd.rst b/Documentation/vm/userfaultfd.rst new file mode 100644 index 000000000000..5048cf661a8a --- /dev/null +++ b/Documentation/vm/userfaultfd.rst @@ -0,0 +1,241 @@ +.. _userfaultfd: + +=========== +Userfaultfd +=========== + +Objective +========= + +Userfaults allow the implementation of on-demand paging from userland +and more generally they allow userland to take control of various +memory page faults, something otherwise only the kernel code could do. + +For example userfaults allows a proper and more optimal implementation +of the PROT_NONE+SIGSEGV trick. + +Design +====== + +Userfaults are delivered and resolved through the userfaultfd syscall. + +The userfaultfd (aside from registering and unregistering virtual +memory ranges) provides two primary functionalities: + +1) read/POLLIN protocol to notify a userland thread of the faults + happening + +2) various UFFDIO_* ioctls that can manage the virtual memory regions + registered in the userfaultfd that allows userland to efficiently + resolve the userfaults it receives via 1) or to manage the virtual + memory in the background + +The real advantage of userfaults if compared to regular virtual memory +management of mremap/mprotect is that the userfaults in all their +operations never involve heavyweight structures like vmas (in fact the +userfaultfd runtime load never takes the mmap_sem for writing). + +Vmas are not suitable for page- (or hugepage) granular fault tracking +when dealing with virtual address spaces that could span +Terabytes. Too many vmas would be needed for that. + +The userfaultfd once opened by invoking the syscall, can also be +passed using unix domain sockets to a manager process, so the same +manager process could handle the userfaults of a multitude of +different processes without them being aware about what is going on +(well of course unless they later try to use the userfaultfd +themselves on the same region the manager is already tracking, which +is a corner case that would currently return -EBUSY). + +API +=== + +When first opened the userfaultfd must be enabled invoking the +UFFDIO_API ioctl specifying a uffdio_api.api value set to UFFD_API (or +a later API version) which will specify the read/POLLIN protocol +userland intends to speak on the UFFD and the uffdio_api.features +userland requires. The UFFDIO_API ioctl if successful (i.e. if the +requested uffdio_api.api is spoken also by the running kernel and the +requested features are going to be enabled) will return into +uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of +respectively all the available features of the read(2) protocol and +the generic ioctl available. + +The uffdio_api.features bitmask returned by the UFFDIO_API ioctl +defines what memory types are supported by the userfaultfd and what +events, except page fault notifications, may be generated. + +If the kernel supports registering userfaultfd ranges on hugetlbfs +virtual memory areas, UFFD_FEATURE_MISSING_HUGETLBFS will be set in +uffdio_api.features. Similarly, UFFD_FEATURE_MISSING_SHMEM will be +set if the kernel supports registering userfaultfd ranges on shared +memory (covering all shmem APIs, i.e. tmpfs, IPCSHM, /dev/zero +MAP_SHARED, memfd_create, etc). + +The userland application that wants to use userfaultfd with hugetlbfs +or shared memory need to set the corresponding flag in +uffdio_api.features to enable those features. + +If the userland desires to receive notifications for events other than +page faults, it has to verify that uffdio_api.features has appropriate +UFFD_FEATURE_EVENT_* bits set. These events are described in more +detail below in "Non-cooperative userfaultfd" section. + +Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should +be invoked (if present in the returned uffdio_api.ioctls bitmask) to +register a memory range in the userfaultfd by setting the +uffdio_register structure accordingly. The uffdio_register.mode +bitmask will specify to the kernel which kind of faults to track for +the range (UFFDIO_REGISTER_MODE_MISSING would track missing +pages). The UFFDIO_REGISTER ioctl will return the +uffdio_register.ioctls bitmask of ioctls that are suitable to resolve +userfaults on the range registered. Not all ioctls will necessarily be +supported for all memory types depending on the underlying virtual +memory backend (anonymous memory vs tmpfs vs real filebacked +mappings). + +Userland can use the uffdio_register.ioctls to manage the virtual +address space in the background (to add or potentially also remove +memory from the userfaultfd registered range). This means a userfault +could be triggering just before userland maps in the background the +user-faulted page. + +The primary ioctl to resolve userfaults is UFFDIO_COPY. That +atomically copies a page into the userfault registered range and wakes +up the blocked userfaults (unless uffdio_copy.mode & +UFFDIO_COPY_MODE_DONTWAKE is set). Other ioctl works similarly to +UFFDIO_COPY. They're atomic as in guaranteeing that nothing can see an +half copied page since it'll keep userfaulting until the copy has +finished. + +QEMU/KVM +======== + +QEMU/KVM is using the userfaultfd syscall to implement postcopy live +migration. Postcopy live migration is one form of memory +externalization consisting of a virtual machine running with part or +all of its memory residing on a different node in the cloud. The +userfaultfd abstraction is generic enough that not a single line of +KVM kernel code had to be modified in order to add postcopy live +migration to QEMU. + +Guest async page faults, FOLL_NOWAIT and all other GUP features work +just fine in combination with userfaults. Userfaults trigger async +page faults in the guest scheduler so those guest processes that +aren't waiting for userfaults (i.e. network bound) can keep running in +the guest vcpus. + +It is generally beneficial to run one pass of precopy live migration +just before starting postcopy live migration, in order to avoid +generating userfaults for readonly guest regions. + +The implementation of postcopy live migration currently uses one +single bidirectional socket but in the future two different sockets +will be used (to reduce the latency of the userfaults to the minimum +possible without having to decrease /proc/sys/net/ipv4/tcp_wmem). + +The QEMU in the source node writes all pages that it knows are missing +in the destination node, into the socket, and the migration thread of +the QEMU running in the destination node runs UFFDIO_COPY|ZEROPAGE +ioctls on the userfaultfd in order to map the received pages into the +guest (UFFDIO_ZEROCOPY is used if the source page was a zero page). + +A different postcopy thread in the destination node listens with +poll() to the userfaultfd in parallel. When a POLLIN event is +generated after a userfault triggers, the postcopy thread read() from +the userfaultfd and receives the fault address (or -EAGAIN in case the +userfault was already resolved and waken by a UFFDIO_COPY|ZEROPAGE run +by the parallel QEMU migration thread). + +After the QEMU postcopy thread (running in the destination node) gets +the userfault address it writes the information about the missing page +into the socket. The QEMU source node receives the information and +roughly "seeks" to that page address and continues sending all +remaining missing pages from that new page offset. Soon after that +(just the time to flush the tcp_wmem queue through the network) the +migration thread in the QEMU running in the destination node will +receive the page that triggered the userfault and it'll map it as +usual with the UFFDIO_COPY|ZEROPAGE (without actually knowing if it +was spontaneously sent by the source or if it was an urgent page +requested through a userfault). + +By the time the userfaults start, the QEMU in the destination node +doesn't need to keep any per-page state bitmap relative to the live +migration around and a single per-page bitmap has to be maintained in +the QEMU running in the source node to know which pages are still +missing in the destination node. The bitmap in the source node is +checked to find which missing pages to send in round robin and we seek +over it when receiving incoming userfaults. After sending each page of +course the bitmap is updated accordingly. It's also useful to avoid +sending the same page twice (in case the userfault is read by the +postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration +thread). + +Non-cooperative userfaultfd +=========================== + +When the userfaultfd is monitored by an external manager, the manager +must be able to track changes in the process virtual memory +layout. Userfaultfd can notify the manager about such changes using +the same read(2) protocol as for the page fault notifications. The +manager has to explicitly enable these events by setting appropriate +bits in uffdio_api.features passed to UFFDIO_API ioctl: + +UFFD_FEATURE_EVENT_FORK + enable userfaultfd hooks for fork(). When this feature is + enabled, the userfaultfd context of the parent process is + duplicated into the newly created process. The manager + receives UFFD_EVENT_FORK with file descriptor of the new + userfaultfd context in the uffd_msg.fork. + +UFFD_FEATURE_EVENT_REMAP + enable notifications about mremap() calls. When the + non-cooperative process moves a virtual memory area to a + different location, the manager will receive + UFFD_EVENT_REMAP. The uffd_msg.remap will contain the old and + new addresses of the area and its original length. + +UFFD_FEATURE_EVENT_REMOVE + enable notifications about madvise(MADV_REMOVE) and + madvise(MADV_DONTNEED) calls. The event UFFD_EVENT_REMOVE will + be generated upon these calls to madvise. The uffd_msg.remove + will contain start and end addresses of the removed area. + +UFFD_FEATURE_EVENT_UNMAP + enable notifications about memory unmapping. The manager will + get UFFD_EVENT_UNMAP with uffd_msg.remove containing start and + end addresses of the unmapped area. + +Although the UFFD_FEATURE_EVENT_REMOVE and UFFD_FEATURE_EVENT_UNMAP +are pretty similar, they quite differ in the action expected from the +userfaultfd manager. In the former case, the virtual memory is +removed, but the area is not, the area remains monitored by the +userfaultfd, and if a page fault occurs in that area it will be +delivered to the manager. The proper resolution for such page fault is +to zeromap the faulting address. However, in the latter case, when an +area is unmapped, either explicitly (with munmap() system call), or +implicitly (e.g. during mremap()), the area is removed and in turn the +userfaultfd context for such area disappears too and the manager will +not get further userland page faults from the removed area. Still, the +notification is required in order to prevent manager from using +UFFDIO_COPY on the unmapped area. + +Unlike userland page faults which have to be synchronous and require +explicit or implicit wakeup, all the events are delivered +asynchronously and the non-cooperative process resumes execution as +soon as manager executes read(). The userfaultfd manager should +carefully synchronize calls to UFFDIO_COPY with the events +processing. To aid the synchronization, the UFFDIO_COPY ioctl will +return -ENOSPC when the monitored process exits at the time of +UFFDIO_COPY, and -ENOENT, when the non-cooperative process has changed +its virtual memory layout simultaneously with outstanding UFFDIO_COPY +operation. + +The current asynchronous model of the event delivery is optimal for +single threaded non-cooperative userfaultfd manager implementations. A +synchronous event delivery model can be added later as a new +userfaultfd feature to facilitate multithreading enhancements of the +non cooperative manager, for example to allow UFFDIO_COPY ioctls to +run in parallel to the event reception. Single threaded +implementations should continue to use the current async event +delivery model instead. diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt deleted file mode 100644 index 5048cf661a8a..000000000000 --- a/Documentation/vm/userfaultfd.txt +++ /dev/null @@ -1,241 +0,0 @@ -.. _userfaultfd: - -=========== -Userfaultfd -=========== - -Objective -========= - -Userfaults allow the implementation of on-demand paging from userland -and more generally they allow userland to take control of various -memory page faults, something otherwise only the kernel code could do. - -For example userfaults allows a proper and more optimal implementation -of the PROT_NONE+SIGSEGV trick. - -Design -====== - -Userfaults are delivered and resolved through the userfaultfd syscall. - -The userfaultfd (aside from registering and unregistering virtual -memory ranges) provides two primary functionalities: - -1) read/POLLIN protocol to notify a userland thread of the faults - happening - -2) various UFFDIO_* ioctls that can manage the virtual memory regions - registered in the userfaultfd that allows userland to efficiently - resolve the userfaults it receives via 1) or to manage the virtual - memory in the background - -The real advantage of userfaults if compared to regular virtual memory -management of mremap/mprotect is that the userfaults in all their -operations never involve heavyweight structures like vmas (in fact the -userfaultfd runtime load never takes the mmap_sem for writing). - -Vmas are not suitable for page- (or hugepage) granular fault tracking -when dealing with virtual address spaces that could span -Terabytes. Too many vmas would be needed for that. - -The userfaultfd once opened by invoking the syscall, can also be -passed using unix domain sockets to a manager process, so the same -manager process could handle the userfaults of a multitude of -different processes without them being aware about what is going on -(well of course unless they later try to use the userfaultfd -themselves on the same region the manager is already tracking, which -is a corner case that would currently return -EBUSY). - -API -=== - -When first opened the userfaultfd must be enabled invoking the -UFFDIO_API ioctl specifying a uffdio_api.api value set to UFFD_API (or -a later API version) which will specify the read/POLLIN protocol -userland intends to speak on the UFFD and the uffdio_api.features -userland requires. The UFFDIO_API ioctl if successful (i.e. if the -requested uffdio_api.api is spoken also by the running kernel and the -requested features are going to be enabled) will return into -uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of -respectively all the available features of the read(2) protocol and -the generic ioctl available. - -The uffdio_api.features bitmask returned by the UFFDIO_API ioctl -defines what memory types are supported by the userfaultfd and what -events, except page fault notifications, may be generated. - -If the kernel supports registering userfaultfd ranges on hugetlbfs -virtual memory areas, UFFD_FEATURE_MISSING_HUGETLBFS will be set in -uffdio_api.features. Similarly, UFFD_FEATURE_MISSING_SHMEM will be -set if the kernel supports registering userfaultfd ranges on shared -memory (covering all shmem APIs, i.e. tmpfs, IPCSHM, /dev/zero -MAP_SHARED, memfd_create, etc). - -The userland application that wants to use userfaultfd with hugetlbfs -or shared memory need to set the corresponding flag in -uffdio_api.features to enable those features. - -If the userland desires to receive notifications for events other than -page faults, it has to verify that uffdio_api.features has appropriate -UFFD_FEATURE_EVENT_* bits set. These events are described in more -detail below in "Non-cooperative userfaultfd" section. - -Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should -be invoked (if present in the returned uffdio_api.ioctls bitmask) to -register a memory range in the userfaultfd by setting the -uffdio_register structure accordingly. The uffdio_register.mode -bitmask will specify to the kernel which kind of faults to track for -the range (UFFDIO_REGISTER_MODE_MISSING would track missing -pages). The UFFDIO_REGISTER ioctl will return the -uffdio_register.ioctls bitmask of ioctls that are suitable to resolve -userfaults on the range registered. Not all ioctls will necessarily be -supported for all memory types depending on the underlying virtual -memory backend (anonymous memory vs tmpfs vs real filebacked -mappings). - -Userland can use the uffdio_register.ioctls to manage the virtual -address space in the background (to add or potentially also remove -memory from the userfaultfd registered range). This means a userfault -could be triggering just before userland maps in the background the -user-faulted page. - -The primary ioctl to resolve userfaults is UFFDIO_COPY. That -atomically copies a page into the userfault registered range and wakes -up the blocked userfaults (unless uffdio_copy.mode & -UFFDIO_COPY_MODE_DONTWAKE is set). Other ioctl works similarly to -UFFDIO_COPY. They're atomic as in guaranteeing that nothing can see an -half copied page since it'll keep userfaulting until the copy has -finished. - -QEMU/KVM -======== - -QEMU/KVM is using the userfaultfd syscall to implement postcopy live -migration. Postcopy live migration is one form of memory -externalization consisting of a virtual machine running with part or -all of its memory residing on a different node in the cloud. The -userfaultfd abstraction is generic enough that not a single line of -KVM kernel code had to be modified in order to add postcopy live -migration to QEMU. - -Guest async page faults, FOLL_NOWAIT and all other GUP features work -just fine in combination with userfaults. Userfaults trigger async -page faults in the guest scheduler so those guest processes that -aren't waiting for userfaults (i.e. network bound) can keep running in -the guest vcpus. - -It is generally beneficial to run one pass of precopy live migration -just before starting postcopy live migration, in order to avoid -generating userfaults for readonly guest regions. - -The implementation of postcopy live migration currently uses one -single bidirectional socket but in the future two different sockets -will be used (to reduce the latency of the userfaults to the minimum -possible without having to decrease /proc/sys/net/ipv4/tcp_wmem). - -The QEMU in the source node writes all pages that it knows are missing -in the destination node, into the socket, and the migration thread of -the QEMU running in the destination node runs UFFDIO_COPY|ZEROPAGE -ioctls on the userfaultfd in order to map the received pages into the -guest (UFFDIO_ZEROCOPY is used if the source page was a zero page). - -A different postcopy thread in the destination node listens with -poll() to the userfaultfd in parallel. When a POLLIN event is -generated after a userfault triggers, the postcopy thread read() from -the userfaultfd and receives the fault address (or -EAGAIN in case the -userfault was already resolved and waken by a UFFDIO_COPY|ZEROPAGE run -by the parallel QEMU migration thread). - -After the QEMU postcopy thread (running in the destination node) gets -the userfault address it writes the information about the missing page -into the socket. The QEMU source node receives the information and -roughly "seeks" to that page address and continues sending all -remaining missing pages from that new page offset. Soon after that -(just the time to flush the tcp_wmem queue through the network) the -migration thread in the QEMU running in the destination node will -receive the page that triggered the userfault and it'll map it as -usual with the UFFDIO_COPY|ZEROPAGE (without actually knowing if it -was spontaneously sent by the source or if it was an urgent page -requested through a userfault). - -By the time the userfaults start, the QEMU in the destination node -doesn't need to keep any per-page state bitmap relative to the live -migration around and a single per-page bitmap has to be maintained in -the QEMU running in the source node to know which pages are still -missing in the destination node. The bitmap in the source node is -checked to find which missing pages to send in round robin and we seek -over it when receiving incoming userfaults. After sending each page of -course the bitmap is updated accordingly. It's also useful to avoid -sending the same page twice (in case the userfault is read by the -postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration -thread). - -Non-cooperative userfaultfd -=========================== - -When the userfaultfd is monitored by an external manager, the manager -must be able to track changes in the process virtual memory -layout. Userfaultfd can notify the manager about such changes using -the same read(2) protocol as for the page fault notifications. The -manager has to explicitly enable these events by setting appropriate -bits in uffdio_api.features passed to UFFDIO_API ioctl: - -UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When this feature is - enabled, the userfaultfd context of the parent process is - duplicated into the newly created process. The manager - receives UFFD_EVENT_FORK with file descriptor of the new - userfaultfd context in the uffd_msg.fork. - -UFFD_FEATURE_EVENT_REMAP - enable notifications about mremap() calls. When the - non-cooperative process moves a virtual memory area to a - different location, the manager will receive - UFFD_EVENT_REMAP. The uffd_msg.remap will contain the old and - new addresses of the area and its original length. - -UFFD_FEATURE_EVENT_REMOVE - enable notifications about madvise(MADV_REMOVE) and - madvise(MADV_DONTNEED) calls. The event UFFD_EVENT_REMOVE will - be generated upon these calls to madvise. The uffd_msg.remove - will contain start and end addresses of the removed area. - -UFFD_FEATURE_EVENT_UNMAP - enable notifications about memory unmapping. The manager will - get UFFD_EVENT_UNMAP with uffd_msg.remove containing start and - end addresses of the unmapped area. - -Although the UFFD_FEATURE_EVENT_REMOVE and UFFD_FEATURE_EVENT_UNMAP -are pretty similar, they quite differ in the action expected from the -userfaultfd manager. In the former case, the virtual memory is -removed, but the area is not, the area remains monitored by the -userfaultfd, and if a page fault occurs in that area it will be -delivered to the manager. The proper resolution for such page fault is -to zeromap the faulting address. However, in the latter case, when an -area is unmapped, either explicitly (with munmap() system call), or -implicitly (e.g. during mremap()), the area is removed and in turn the -userfaultfd context for such area disappears too and the manager will -not get further userland page faults from the removed area. Still, the -notification is required in order to prevent manager from using -UFFDIO_COPY on the unmapped area. - -Unlike userland page faults which have to be synchronous and require -explicit or implicit wakeup, all the events are delivered -asynchronously and the non-cooperative process resumes execution as -soon as manager executes read(). The userfaultfd manager should -carefully synchronize calls to UFFDIO_COPY with the events -processing. To aid the synchronization, the UFFDIO_COPY ioctl will -return -ENOSPC when the monitored process exits at the time of -UFFDIO_COPY, and -ENOENT, when the non-cooperative process has changed -its virtual memory layout simultaneously with outstanding UFFDIO_COPY -operation. - -The current asynchronous model of the event delivery is optimal for -single threaded non-cooperative userfaultfd manager implementations. A -synchronous event delivery model can be added later as a new -userfaultfd feature to facilitate multithreading enhancements of the -non cooperative manager, for example to allow UFFDIO_COPY ioctls to -run in parallel to the event reception. Single threaded -implementations should continue to use the current async event -delivery model instead. diff --git a/Documentation/vm/z3fold.rst b/Documentation/vm/z3fold.rst new file mode 100644 index 000000000000..224e3c61d686 --- /dev/null +++ b/Documentation/vm/z3fold.rst @@ -0,0 +1,30 @@ +.. _z3fold: + +====== +z3fold +====== + +z3fold is a special purpose allocator for storing compressed pages. +It is designed to store up to three compressed pages per physical page. +It is a zbud derivative which allows for higher compression +ratio keeping the simplicity and determinism of its predecessor. + +The main differences between z3fold and zbud are: + +* unlike zbud, z3fold allows for up to PAGE_SIZE allocations +* z3fold can hold up to 3 compressed pages in its page +* z3fold doesn't export any API itself and is thus intended to be used + via the zpool API. + +To keep the determinism and simplicity, z3fold, just like zbud, always +stores an integral number of compressed pages per page, but it can store +up to 3 pages unlike zbud which can store at most 2. Therefore the +compression ratio goes to around 2.7x while zbud's one is around 1.7x. + +Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not +return a dereferenceable pointer. Instead, it returns an unsigned long +handle which encodes actual location of the allocated object. + +Keeping effective compression ratio close to zsmalloc's, z3fold doesn't +depend on MMU enabled and provides more predictable reclaim behavior +which makes it a better fit for small and response-critical systems. diff --git a/Documentation/vm/z3fold.txt b/Documentation/vm/z3fold.txt deleted file mode 100644 index 224e3c61d686..000000000000 --- a/Documentation/vm/z3fold.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. _z3fold: - -====== -z3fold -====== - -z3fold is a special purpose allocator for storing compressed pages. -It is designed to store up to three compressed pages per physical page. -It is a zbud derivative which allows for higher compression -ratio keeping the simplicity and determinism of its predecessor. - -The main differences between z3fold and zbud are: - -* unlike zbud, z3fold allows for up to PAGE_SIZE allocations -* z3fold can hold up to 3 compressed pages in its page -* z3fold doesn't export any API itself and is thus intended to be used - via the zpool API. - -To keep the determinism and simplicity, z3fold, just like zbud, always -stores an integral number of compressed pages per page, but it can store -up to 3 pages unlike zbud which can store at most 2. Therefore the -compression ratio goes to around 2.7x while zbud's one is around 1.7x. - -Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not -return a dereferenceable pointer. Instead, it returns an unsigned long -handle which encodes actual location of the allocated object. - -Keeping effective compression ratio close to zsmalloc's, z3fold doesn't -depend on MMU enabled and provides more predictable reclaim behavior -which makes it a better fit for small and response-critical systems. diff --git a/Documentation/vm/zsmalloc.rst b/Documentation/vm/zsmalloc.rst new file mode 100644 index 000000000000..6e79893d6132 --- /dev/null +++ b/Documentation/vm/zsmalloc.rst @@ -0,0 +1,82 @@ +.. _zsmalloc: + +======== +zsmalloc +======== + +This allocator is designed for use with zram. Thus, the allocator is +supposed to work well under low memory conditions. In particular, it +never attempts higher order page allocation which is very likely to +fail under memory pressure. On the other hand, if we just use single +(0-order) pages, it would suffer from very high fragmentation -- +any object of size PAGE_SIZE/2 or larger would occupy an entire page. +This was one of the major issues with its predecessor (xvmalloc). + +To overcome these issues, zsmalloc allocates a bunch of 0-order pages +and links them together using various 'struct page' fields. These linked +pages act as a single higher-order page i.e. an object can span 0-order +page boundaries. The code refers to these linked pages as a single entity +called zspage. + +For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE +since this satisfies the requirements of all its current users (in the +worst case, page is incompressible and is thus stored "as-is" i.e. in +uncompressed form). For allocation requests larger than this size, failure +is returned (see zs_malloc). + +Additionally, zs_malloc() does not return a dereferenceable pointer. +Instead, it returns an opaque handle (unsigned long) which encodes actual +location of the allocated object. The reason for this indirection is that +zsmalloc does not keep zspages permanently mapped since that would cause +issues on 32-bit systems where the VA region for kernel space mappings +is very small. So, before using the allocating memory, the object has to +be mapped using zs_map_object() to get a usable pointer and subsequently +unmapped using zs_unmap_object(). + +stat +==== + +With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via +``/sys/kernel/debug/zsmalloc/``. Here is a sample of stat output:: + + # cat /sys/kernel/debug/zsmalloc/zram0/classes + + class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage + ... + ... + 9 176 0 1 186 129 8 4 + 10 192 1 0 2880 2872 135 3 + 11 208 0 1 819 795 42 2 + 12 224 0 1 219 159 12 4 + ... + ... + + +class + index +size + object size zspage stores +almost_empty + the number of ZS_ALMOST_EMPTY zspages(see below) +almost_full + the number of ZS_ALMOST_FULL zspages(see below) +obj_allocated + the number of objects allocated +obj_used + the number of objects allocated to the user +pages_used + the number of pages allocated for the class +pages_per_zspage + the number of 0-order pages to make a zspage + +We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where + +* n = number of allocated objects +* N = total number of objects zspage can store +* f = fullness_threshold_frac(ie, 4 at the moment) + +Similarly, we assign zspage to: + +* ZS_ALMOST_FULL when n > N / f +* ZS_EMPTY when n == 0 +* ZS_FULL when n == N diff --git a/Documentation/vm/zsmalloc.txt b/Documentation/vm/zsmalloc.txt deleted file mode 100644 index 6e79893d6132..000000000000 --- a/Documentation/vm/zsmalloc.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. _zsmalloc: - -======== -zsmalloc -======== - -This allocator is designed for use with zram. Thus, the allocator is -supposed to work well under low memory conditions. In particular, it -never attempts higher order page allocation which is very likely to -fail under memory pressure. On the other hand, if we just use single -(0-order) pages, it would suffer from very high fragmentation -- -any object of size PAGE_SIZE/2 or larger would occupy an entire page. -This was one of the major issues with its predecessor (xvmalloc). - -To overcome these issues, zsmalloc allocates a bunch of 0-order pages -and links them together using various 'struct page' fields. These linked -pages act as a single higher-order page i.e. an object can span 0-order -page boundaries. The code refers to these linked pages as a single entity -called zspage. - -For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE -since this satisfies the requirements of all its current users (in the -worst case, page is incompressible and is thus stored "as-is" i.e. in -uncompressed form). For allocation requests larger than this size, failure -is returned (see zs_malloc). - -Additionally, zs_malloc() does not return a dereferenceable pointer. -Instead, it returns an opaque handle (unsigned long) which encodes actual -location of the allocated object. The reason for this indirection is that -zsmalloc does not keep zspages permanently mapped since that would cause -issues on 32-bit systems where the VA region for kernel space mappings -is very small. So, before using the allocating memory, the object has to -be mapped using zs_map_object() to get a usable pointer and subsequently -unmapped using zs_unmap_object(). - -stat -==== - -With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via -``/sys/kernel/debug/zsmalloc/``. Here is a sample of stat output:: - - # cat /sys/kernel/debug/zsmalloc/zram0/classes - - class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage - ... - ... - 9 176 0 1 186 129 8 4 - 10 192 1 0 2880 2872 135 3 - 11 208 0 1 819 795 42 2 - 12 224 0 1 219 159 12 4 - ... - ... - - -class - index -size - object size zspage stores -almost_empty - the number of ZS_ALMOST_EMPTY zspages(see below) -almost_full - the number of ZS_ALMOST_FULL zspages(see below) -obj_allocated - the number of objects allocated -obj_used - the number of objects allocated to the user -pages_used - the number of pages allocated for the class -pages_per_zspage - the number of 0-order pages to make a zspage - -We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where - -* n = number of allocated objects -* N = total number of objects zspage can store -* f = fullness_threshold_frac(ie, 4 at the moment) - -Similarly, we assign zspage to: - -* ZS_ALMOST_FULL when n > N / f -* ZS_EMPTY when n == 0 -* ZS_FULL when n == N diff --git a/Documentation/vm/zswap.rst b/Documentation/vm/zswap.rst new file mode 100644 index 000000000000..1444ecd40911 --- /dev/null +++ b/Documentation/vm/zswap.rst @@ -0,0 +1,135 @@ +.. _zswap: + +===== +zswap +===== + +Overview +======== + +Zswap is a lightweight compressed cache for swap pages. It takes pages that are +in the process of being swapped out and attempts to compress them into a +dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles +for potentially reduced swap I/O.  This trade-off can also result in a +significant performance improvement if reads from the compressed cache are +faster than reads from a swap device. + +.. note:: + Zswap is a new feature as of v3.11 and interacts heavily with memory + reclaim. This interaction has not been fully explored on the large set of + potential configurations and workloads that exist. For this reason, zswap + is a work in progress and should be considered experimental. + + Some potential benefits: + +* Desktop/laptop users with limited RAM capacities can mitigate the + performance impact of swapping. +* Overcommitted guests that share a common I/O resource can + dramatically reduce their swap I/O pressure, avoiding heavy handed I/O + throttling by the hypervisor. This allows more work to get done with less + impact to the guest workload and guests sharing the I/O subsystem +* Users with SSDs as swap devices can extend the life of the device by + drastically reducing life-shortening writes. + +Zswap evicts pages from compressed cache on an LRU basis to the backing swap +device when the compressed pool reaches its size limit. This requirement had +been identified in prior community discussions. + +Zswap is disabled by default but can be enabled at boot time by setting +the ``enabled`` attribute to 1 at boot time. ie: ``zswap.enabled=1``. Zswap +can also be enabled and disabled at runtime using the sysfs interface. +An example command to enable zswap at runtime, assuming sysfs is mounted +at ``/sys``, is:: + + echo 1 > /sys/module/zswap/parameters/enabled + +When zswap is disabled at runtime it will stop storing pages that are +being swapped out. However, it will _not_ immediately write out or fault +back into memory all of the pages stored in the compressed pool. The +pages stored in zswap will remain in the compressed pool until they are +either invalidated or faulted back into memory. In order to force all +pages out of the compressed pool, a swapoff on the swap device(s) will +fault back into memory all swapped out pages, including those in the +compressed pool. + +Design +====== + +Zswap receives pages for compression through the Frontswap API and is able to +evict pages from its own compressed pool on an LRU basis and write them back to +the backing swap device in the case that the compressed pool is full. + +Zswap makes use of zpool for the managing the compressed memory pool. Each +allocation in zpool is not directly accessible by address. Rather, a handle is +returned by the allocation routine and that handle must be mapped before being +accessed. The compressed memory pool grows on demand and shrinks as compressed +pages are freed. The pool is not preallocated. By default, a zpool +of type zbud is created, but it can be selected at boot time by +setting the ``zpool`` attribute, e.g. ``zswap.zpool=zbud``. It can +also be changed at runtime using the sysfs ``zpool`` attribute, e.g.:: + + echo zbud > /sys/module/zswap/parameters/zpool + +The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which +means the compression ratio will always be 2:1 or worse (because of half-full +zbud pages). The zsmalloc type zpool has a more complex compressed page +storage method, and it can achieve greater storage densities. However, +zsmalloc does not implement compressed page eviction, so once zswap fills it +cannot evict the oldest page, it can only reject new pages. + +When a swap page is passed from frontswap to zswap, zswap maintains a mapping +of the swap entry, a combination of the swap type and swap offset, to the zpool +handle that references that compressed swap page. This mapping is achieved +with a red-black tree per swap type. The swap offset is the search key for the +tree nodes. + +During a page fault on a PTE that is a swap entry, frontswap calls the zswap +load function to decompress the page into the page allocated by the page fault +handler. + +Once there are no PTEs referencing a swap page stored in zswap (i.e. the count +in the swap_map goes to 0) the swap code calls the zswap invalidate function, +via frontswap, to free the compressed entry. + +Zswap seeks to be simple in its policies. Sysfs attributes allow for one user +controlled policy: + +* max_pool_percent - The maximum percentage of memory that the compressed + pool can occupy. + +The default compressor is lzo, but it can be selected at boot time by +setting the ``compressor`` attribute, e.g. ``zswap.compressor=lzo``. +It can also be changed at runtime using the sysfs "compressor" +attribute, e.g.:: + + echo lzo > /sys/module/zswap/parameters/compressor + +When the zpool and/or compressor parameter is changed at runtime, any existing +compressed pages are not modified; they are left in their own zpool. When a +request is made for a page in an old zpool, it is uncompressed using its +original compressor. Once all pages are removed from an old zpool, the zpool +and its compressor are freed. + +Some of the pages in zswap are same-value filled pages (i.e. contents of the +page have same value or repetitive pattern). These pages include zero-filled +pages and they are handled differently. During store operation, a page is +checked if it is a same-value filled page before compressing it. If true, the +compressed length of the page is set to zero and the pattern or same-filled +value is stored. + +Same-value filled pages identification feature is enabled by default and can be +disabled at boot time by setting the ``same_filled_pages_enabled`` attribute +to 0, e.g. ``zswap.same_filled_pages_enabled=0``. It can also be enabled and +disabled at runtime using the sysfs ``same_filled_pages_enabled`` +attribute, e.g.:: + + echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled + +When zswap same-filled page identification is disabled at runtime, it will stop +checking for the same-value filled pages during store operation. However, the +existing pages which are marked as same-value filled pages remain stored +unchanged in zswap until they are either loaded or invalidated. + +A debugfs interface is provided for various statistic about pool size, number +of pages stored, same-value filled pages and various counters for the reasons +pages are rejected. diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt deleted file mode 100644 index 1444ecd40911..000000000000 --- a/Documentation/vm/zswap.txt +++ /dev/null @@ -1,135 +0,0 @@ -.. _zswap: - -===== -zswap -===== - -Overview -======== - -Zswap is a lightweight compressed cache for swap pages. It takes pages that are -in the process of being swapped out and attempts to compress them into a -dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles -for potentially reduced swap I/O.  This trade-off can also result in a -significant performance improvement if reads from the compressed cache are -faster than reads from a swap device. - -.. note:: - Zswap is a new feature as of v3.11 and interacts heavily with memory - reclaim. This interaction has not been fully explored on the large set of - potential configurations and workloads that exist. For this reason, zswap - is a work in progress and should be considered experimental. - - Some potential benefits: - -* Desktop/laptop users with limited RAM capacities can mitigate the - performance impact of swapping. -* Overcommitted guests that share a common I/O resource can - dramatically reduce their swap I/O pressure, avoiding heavy handed I/O - throttling by the hypervisor. This allows more work to get done with less - impact to the guest workload and guests sharing the I/O subsystem -* Users with SSDs as swap devices can extend the life of the device by - drastically reducing life-shortening writes. - -Zswap evicts pages from compressed cache on an LRU basis to the backing swap -device when the compressed pool reaches its size limit. This requirement had -been identified in prior community discussions. - -Zswap is disabled by default but can be enabled at boot time by setting -the ``enabled`` attribute to 1 at boot time. ie: ``zswap.enabled=1``. Zswap -can also be enabled and disabled at runtime using the sysfs interface. -An example command to enable zswap at runtime, assuming sysfs is mounted -at ``/sys``, is:: - - echo 1 > /sys/module/zswap/parameters/enabled - -When zswap is disabled at runtime it will stop storing pages that are -being swapped out. However, it will _not_ immediately write out or fault -back into memory all of the pages stored in the compressed pool. The -pages stored in zswap will remain in the compressed pool until they are -either invalidated or faulted back into memory. In order to force all -pages out of the compressed pool, a swapoff on the swap device(s) will -fault back into memory all swapped out pages, including those in the -compressed pool. - -Design -====== - -Zswap receives pages for compression through the Frontswap API and is able to -evict pages from its own compressed pool on an LRU basis and write them back to -the backing swap device in the case that the compressed pool is full. - -Zswap makes use of zpool for the managing the compressed memory pool. Each -allocation in zpool is not directly accessible by address. Rather, a handle is -returned by the allocation routine and that handle must be mapped before being -accessed. The compressed memory pool grows on demand and shrinks as compressed -pages are freed. The pool is not preallocated. By default, a zpool -of type zbud is created, but it can be selected at boot time by -setting the ``zpool`` attribute, e.g. ``zswap.zpool=zbud``. It can -also be changed at runtime using the sysfs ``zpool`` attribute, e.g.:: - - echo zbud > /sys/module/zswap/parameters/zpool - -The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which -means the compression ratio will always be 2:1 or worse (because of half-full -zbud pages). The zsmalloc type zpool has a more complex compressed page -storage method, and it can achieve greater storage densities. However, -zsmalloc does not implement compressed page eviction, so once zswap fills it -cannot evict the oldest page, it can only reject new pages. - -When a swap page is passed from frontswap to zswap, zswap maintains a mapping -of the swap entry, a combination of the swap type and swap offset, to the zpool -handle that references that compressed swap page. This mapping is achieved -with a red-black tree per swap type. The swap offset is the search key for the -tree nodes. - -During a page fault on a PTE that is a swap entry, frontswap calls the zswap -load function to decompress the page into the page allocated by the page fault -handler. - -Once there are no PTEs referencing a swap page stored in zswap (i.e. the count -in the swap_map goes to 0) the swap code calls the zswap invalidate function, -via frontswap, to free the compressed entry. - -Zswap seeks to be simple in its policies. Sysfs attributes allow for one user -controlled policy: - -* max_pool_percent - The maximum percentage of memory that the compressed - pool can occupy. - -The default compressor is lzo, but it can be selected at boot time by -setting the ``compressor`` attribute, e.g. ``zswap.compressor=lzo``. -It can also be changed at runtime using the sysfs "compressor" -attribute, e.g.:: - - echo lzo > /sys/module/zswap/parameters/compressor - -When the zpool and/or compressor parameter is changed at runtime, any existing -compressed pages are not modified; they are left in their own zpool. When a -request is made for a page in an old zpool, it is uncompressed using its -original compressor. Once all pages are removed from an old zpool, the zpool -and its compressor are freed. - -Some of the pages in zswap are same-value filled pages (i.e. contents of the -page have same value or repetitive pattern). These pages include zero-filled -pages and they are handled differently. During store operation, a page is -checked if it is a same-value filled page before compressing it. If true, the -compressed length of the page is set to zero and the pattern or same-filled -value is stored. - -Same-value filled pages identification feature is enabled by default and can be -disabled at boot time by setting the ``same_filled_pages_enabled`` attribute -to 0, e.g. ``zswap.same_filled_pages_enabled=0``. It can also be enabled and -disabled at runtime using the sysfs ``same_filled_pages_enabled`` -attribute, e.g.:: - - echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled - -When zswap same-filled page identification is disabled at runtime, it will stop -checking for the same-value filled pages during store operation. However, the -existing pages which are marked as same-value filled pages remain stored -unchanged in zswap until they are either loaded or invalidated. - -A debugfs interface is provided for various statistic about pool size, number -of pages stored, same-value filled pages and various counters for the reasons -pages are rejected. diff --git a/MAINTAINERS b/MAINTAINERS index 3bdc260e36b7..575849a8343e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15406,7 +15406,7 @@ L: linux-mm@kvack.org S: Maintained F: mm/zsmalloc.c F: include/linux/zsmalloc.h -F: Documentation/vm/zsmalloc.txt +F: Documentation/vm/zsmalloc.rst ZSWAP COMPRESSED SWAP CACHING M: Seth Jennings diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index e96adcbcab41..f53e5060afe7 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -584,7 +584,7 @@ config ARCH_DISCONTIGMEM_ENABLE Say Y to support efficient handling of discontiguous physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See for more. + See for more. source "mm/Kconfig" diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index bbe12a038d21..3ac9bf4cc2a0 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -397,7 +397,7 @@ config ARCH_DISCONTIGMEM_ENABLE Say Y to support efficient handling of discontiguous physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See for more. + See for more. config ARCH_FLATMEM_ENABLE def_bool y diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8128c3b68d6b..4562810857eb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2551,7 +2551,7 @@ config ARCH_DISCONTIGMEM_ENABLE Say Y to support efficient handling of discontiguous physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See for more. + See for more. config ARCH_SPARSEMEM_ENABLE bool diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 73ce5dd07642..f8c0f10949ea 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -880,7 +880,7 @@ config PPC_MEM_KEYS page-based protections, but without requiring modification of the page tables when an application changes protection domains. - For details, see Documentation/vm/protection-keys.txt + For details, see Documentation/vm/protection-keys.rst If unsure, say y. diff --git a/fs/Kconfig b/fs/Kconfig index bc821a86d965..ba53dc2a9691 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -196,7 +196,7 @@ config HUGETLBFS help hugetlbfs is a filesystem backing for HugeTLB pages, based on ramfs. For architectures that support it, say Y here and read - for details. + for details. If unsure, say N. diff --git a/fs/dax.c b/fs/dax.c index 0276df90e86c..0eb65c34d5a6 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -618,7 +618,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, * downgrading page table protection not changing it to point * to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ if (pmdp) { #ifdef CONFIG_FS_DAX_PMD diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ec6d2983a5cb..91d14c4ac04a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -956,7 +956,7 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, /* * The soft-dirty tracker uses #PF-s to catch writes * to pages, so write-protect the pte as well. See the - * Documentation/vm/soft-dirty.txt for full description + * Documentation/vm/soft-dirty.rst for full description * of how soft-dirty works. */ pte_t ptent = *pte; @@ -1436,7 +1436,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, * Bits 0-54 page frame number (PFN) if present * Bits 0-4 swap type if swapped * Bits 5-54 swap offset if swapped - * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) + * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.rst) * Bit 56 page exclusively mapped * Bits 57-60 zero * Bit 61 page is file-page or shared-anon diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 325017ad9311..77be87c095f2 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -16,7 +16,7 @@ /* * Heterogeneous Memory Management (HMM) * - * See Documentation/vm/hmm.txt for reasons and overview of what HMM is and it + * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it * is for. Here we focus on the HMM API description, with some explanation of * the underlying implementation. * diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 7b4899c06f49..74ea5e2310a8 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -45,7 +45,7 @@ struct vmem_altmap { * must be treated as an opaque object, rather than a "normal" struct page. * * A more complete discussion of unaddressable memory may be found in - * include/linux/hmm.h and Documentation/vm/hmm.txt. + * include/linux/hmm.h and Documentation/vm/hmm.rst. * * MEMORY_DEVICE_PUBLIC: * Device memory that is cache coherent from device and CPU point of view. This @@ -67,7 +67,7 @@ enum memory_type { * page_free() * * Additional notes about MEMORY_DEVICE_PRIVATE may be found in - * include/linux/hmm.h and Documentation/vm/hmm.txt. There is also a brief + * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief * explanation in include/linux/memory_hotplug.h. * * The page_fault() callback must migrate page back, from device memory to diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 2d07a1ed5a31..392e6af82701 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -174,7 +174,7 @@ struct mmu_notifier_ops { * invalidate_range_start()/end() notifiers, as * invalidate_range() alread catches the points in time when an * external TLB range needs to be flushed. For more in depth - * discussion on this see Documentation/vm/mmu_notifier.txt + * discussion on this see Documentation/vm/mmu_notifier.rst * * Note that this function might be called with just a sub-range * of what was passed to invalidate_range_start()/end(), if diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 1149533aa2fa..df2c7d11f496 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -28,7 +28,7 @@ extern struct mm_struct *mm_alloc(void); * * Use mmdrop() to release the reference acquired by mmgrab(). * - * See also for an in-depth explanation + * See also for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmgrab(struct mm_struct *mm) @@ -51,7 +51,7 @@ extern void mmdrop(struct mm_struct *mm); * * Use mmput() to release the reference acquired by mmget(). * - * See also for an in-depth explanation + * See also for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmget(struct mm_struct *mm) diff --git a/include/linux/swap.h b/include/linux/swap.h index 7b6a59f722a3..4003973deff4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -53,7 +53,7 @@ static inline int current_is_kswapd(void) /* * Unaddressable device memory support. See include/linux/hmm.h and - * Documentation/vm/hmm.txt. Short description is we need struct pages for + * Documentation/vm/hmm.rst. Short description is we need struct pages for * device memory that is unaddressable (inaccessible) by CPU, so that we can * migrate part of a process memory to device memory. * diff --git a/mm/Kconfig b/mm/Kconfig index c782e8fb7235..b9f04213a353 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -312,7 +312,7 @@ config KSM the many instances by a single page with that content, so saving memory until one or another app needs to modify the content. Recommended for use with KVM, or with other duplicative applications. - See Documentation/vm/ksm.txt for more information: KSM is inactive + See Documentation/vm/ksm.rst for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). @@ -537,7 +537,7 @@ config MEM_SOFT_DIRTY into a page just as regular dirty bit, but unlike the latter it can be cleared by hands. - See Documentation/vm/soft-dirty.txt for more details. + See Documentation/vm/soft-dirty.rst for more details. config ZSWAP bool "Compressed cache for swap pages (EXPERIMENTAL)" @@ -664,7 +664,7 @@ config IDLE_PAGE_TRACKING be useful to tune memory cgroup limits and/or for job placement within a compute cluster. - See Documentation/vm/idle_page_tracking.txt for more details. + See Documentation/vm/idle_page_tracking.rst for more details. # arch_add_memory() comprehends device memory config ARCH_HAS_ZONE_DEVICE diff --git a/mm/cleancache.c b/mm/cleancache.c index f7b9fdc79d97..126548b5a292 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -3,7 +3,7 @@ * * This code provides the generic "frontend" layer to call a matching * "backend" driver implementation of cleancache. See - * Documentation/vm/cleancache.txt for more information. + * Documentation/vm/cleancache.rst for more information. * * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. * Author: Dan Magenheimer diff --git a/mm/frontswap.c b/mm/frontswap.c index fec8b5044040..4f5476a0f955 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -3,7 +3,7 @@ * * This code provides the generic "frontend" layer to call a matching * "backend" driver implementation of frontswap. See - * Documentation/vm/frontswap.txt for more information. + * Documentation/vm/frontswap.rst for more information. * * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. * Author: Dan Magenheimer diff --git a/mm/hmm.c b/mm/hmm.c index 320545b98ff5..af176c6820cf 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -37,7 +37,7 @@ #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) /* - * Device private memory see HMM (Documentation/vm/hmm.txt) or hmm.h + * Device private memory see HMM (Documentation/vm/hmm.rst) or hmm.h */ DEFINE_STATIC_KEY_FALSE(device_private_key); EXPORT_SYMBOL(device_private_key); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 87ab9b8f56b5..6d5911673450 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1185,7 +1185,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, * mmu_notifier_invalidate_range_end() happens which can lead to a * device seeing memory write in different order than CPU. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); @@ -2037,7 +2037,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, * replacing a zero pmd write protected page with a zero pte write * protected page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ pmdp_huge_clear_flush(vma, haddr, pmd); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c204e3d132b..5af974abae46 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3289,7 +3289,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, * table protection not changing it to point * to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ huge_ptep_set_wrprotect(src, addr, src_pte); } @@ -4355,7 +4355,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, * No need to call mmu_notifier_invalidate_range() we are downgrading * page table protection not changing it to point to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ i_mmap_unlock_write(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(mm, start, end); diff --git a/mm/ksm.c b/mm/ksm.c index 293721f5da70..0b88698a9014 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1049,7 +1049,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * No need to notify as we are downgrading page table to read * only not changing it to point to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); /* @@ -1138,7 +1138,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * No need to notify as we are replacing a read only page with another * read only page with the same content. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, newpte); diff --git a/mm/mmap.c b/mm/mmap.c index 9efdc021ad22..39fc51d1639c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2769,7 +2769,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long ret = -EINVAL; struct file *file; - pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n", + pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) diff --git a/mm/rmap.c b/mm/rmap.c index 47db27f8049e..854b703fbe2a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -942,7 +942,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, * downgrading page table protection not changing it to point * to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ if (ret) (*cleaned)++; @@ -1587,7 +1587,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * point at new page while a device still is using this * page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ dec_mm_counter(mm, mm_counter_file(page)); } @@ -1597,7 +1597,7 @@ discard: * done above for all cases requiring it to happen under page * table lock before mmu_notifier_invalidate_range_end() * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ page_remove_rmap(subpage, PageHuge(page)); put_page(page); diff --git a/mm/util.c b/mm/util.c index c1250501364f..e857c80c6f4a 100644 --- a/mm/util.c +++ b/mm/util.c @@ -609,7 +609,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the - * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting + * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. -- cgit v1.2.3 From 0207dd1173fe31c153ffd439c4bb33d1341829b1 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 19 Apr 2018 01:02:59 +0200 Subject: net: phy: mdio-gpio: Remove redundant platform data header The platform data header file is now unused. Remove it, but add an extra include which it brought in. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 1 - drivers/net/phy/mdio-gpio.c | 2 +- include/linux/platform_data/mdio-gpio.h | 23 ----------------------- 3 files changed, 1 insertion(+), 25 deletions(-) delete mode 100644 include/linux/platform_data/mdio-gpio.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b60179d948bb..a7321687cae9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5321,7 +5321,6 @@ F: include/linux/*mdio*.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h -F: include/linux/platform_data/mdio-gpio.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 281c905ef9fd..b501221819e1 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h deleted file mode 100644 index bd91fa98a3aa..000000000000 --- a/include/linux/platform_data/mdio-gpio.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * MDIO-GPIO bus platform data structures - * - * Copyright (C) 2008, Paulius Zaleckas - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef __LINUX_MDIO_GPIO_H -#define __LINUX_MDIO_GPIO_H - -#include - -struct mdio_gpio_platform_data { - /* GPIO numbers for bus pins */ - struct gpio_desc *mdc; - struct gpio_desc *mdio; - struct gpio_desc *mdo; -}; - -#endif /* __LINUX_MDIO_GPIO_H */ -- cgit v1.2.3 From 1827b0678863bc97a1653fdf5308762b2aefcd56 Mon Sep 17 00:00:00 2001 From: Phil Elwell Date: Thu, 19 Apr 2018 17:59:39 +0100 Subject: lan78xx: Read LED states from Device Tree Add support for DT property "microchip,led-modes", a vector of zero to four cells (u32s) in the range 0-15, each of which sets the mode for one of the LEDs. Some possible values are: 0=link/activity 1=link1000/activity 2=link100/activity 3=link10/activity 4=link100/1000/activity 5=link10/1000/activity 6=link10/100/activity 14=off 15=on These values are given symbolic constants in a dt-bindings header. Also use the presence of the DT property to indicate that the LEDs should be enabled - necessary in the event that no valid OTP or EEPROM is available. Signed-off-by: Phil Elwell Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/phy/microchip.c | 25 ++++++++++++++++++++++ drivers/net/usb/lan78xx.c | 32 ++++++++++++++++++++++++++++- include/dt-bindings/net/microchip-lan78xx.h | 21 +++++++++++++++++++ include/linux/microchipphy.h | 3 +++ 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 include/dt-bindings/net/microchip-lan78xx.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a7321687cae9..c952d3076a65 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14572,6 +14572,7 @@ M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained F: drivers/net/usb/lan78xx.* +F: include/dt-bindings/net/microchip-lan78xx.h USB MASS STORAGE DRIVER M: Alan Stern diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0f293ef28935..ef5e160fe9dc 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #define DRIVER_AUTHOR "WOOJUNG HUH " #define DRIVER_DESC "Microchip LAN88XX PHY driver" @@ -70,6 +72,8 @@ static int lan88xx_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv; + u32 led_modes[4]; + int len; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -77,6 +81,27 @@ static int lan88xx_probe(struct phy_device *phydev) priv->wolopts = 0; + len = of_property_read_variable_u32_array(dev->of_node, + "microchip,led-modes", + led_modes, + 0, + ARRAY_SIZE(led_modes)); + if (len >= 0) { + u32 reg = 0; + int i; + + for (i = 0; i < len; i++) { + if (led_modes[i] > 15) + return -EINVAL; + reg |= led_modes[i] << (i * 4); + } + for (; i < ARRAY_SIZE(led_modes); i++) + reg |= LAN78XX_FORCE_LED_OFF << (i * 4); + (void)phy_write(phydev, LAN78XX_PHY_LED_MODE_SELECT, reg); + } else if (len == -EOVERFLOW) { + return -EINVAL; + } + /* these values can be used to identify internal PHY */ priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID); priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index a823f010de30..6b03b973083e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "lan78xx.h" @@ -1760,6 +1761,7 @@ done: static int lan78xx_mdio_init(struct lan78xx_net *dev) { + struct device_node *node; int ret; dev->mdiobus = mdiobus_alloc(); @@ -1788,7 +1790,13 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) break; } - ret = mdiobus_register(dev->mdiobus); + node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); + if (node) { + ret = of_mdiobus_register(dev->mdiobus, node); + of_node_put(node); + } else { + ret = mdiobus_register(dev->mdiobus); + } if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; @@ -2077,6 +2085,28 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + if (phydev->mdio.dev.of_node) { + u32 reg; + int len; + + len = of_property_count_elems_of_size(phydev->mdio.dev.of_node, + "microchip,led-modes", + sizeof(u32)); + if (len >= 0) { + /* Ensure the appropriate LEDs are enabled */ + lan78xx_read_reg(dev, HW_CFG, ®); + reg &= ~(HW_CFG_LED0_EN_ | + HW_CFG_LED1_EN_ | + HW_CFG_LED2_EN_ | + HW_CFG_LED3_EN_); + reg |= (len > 0) * HW_CFG_LED0_EN_ | + (len > 1) * HW_CFG_LED1_EN_ | + (len > 2) * HW_CFG_LED2_EN_ | + (len > 3) * HW_CFG_LED3_EN_; + lan78xx_write_reg(dev, HW_CFG, reg); + } + } + genphy_config_aneg(phydev); dev->fc_autoneg = phydev->autoneg; diff --git a/include/dt-bindings/net/microchip-lan78xx.h b/include/dt-bindings/net/microchip-lan78xx.h new file mode 100644 index 000000000000..0742ff075307 --- /dev/null +++ b/include/dt-bindings/net/microchip-lan78xx.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_MICROCHIP_LAN78XX_H +#define _DT_BINDINGS_MICROCHIP_LAN78XX_H + +/* LED modes for LAN7800/LAN7850 embedded PHY */ + +#define LAN78XX_LINK_ACTIVITY 0 +#define LAN78XX_LINK_1000_ACTIVITY 1 +#define LAN78XX_LINK_100_ACTIVITY 2 +#define LAN78XX_LINK_10_ACTIVITY 3 +#define LAN78XX_LINK_100_1000_ACTIVITY 4 +#define LAN78XX_LINK_10_1000_ACTIVITY 5 +#define LAN78XX_LINK_10_100_ACTIVITY 6 +#define LAN78XX_DUPLEX_COLLISION 8 +#define LAN78XX_COLLISION 9 +#define LAN78XX_ACTIVITY 10 +#define LAN78XX_AUTONEG_FAULT 12 +#define LAN78XX_FORCE_LED_OFF 14 +#define LAN78XX_FORCE_LED_ON 15 + +#endif diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index eb492d47f717..8e4015e7f8d3 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h @@ -70,4 +70,7 @@ #define LAN88XX_MMD3_CHIP_ID (32877) #define LAN88XX_MMD3_CHIP_REV (32878) +/* Registers specific to the LAN7800/LAN7850 embedded phy */ +#define LAN78XX_PHY_LED_MODE_SELECT (0x1D) + #endif /* _MICROCHIPPHY_H */ -- cgit v1.2.3 From 01d26589dee4b23376642fba333539605c52d324 Mon Sep 17 00:00:00 2001 From: Phil Elwell Date: Thu, 19 Apr 2018 17:59:40 +0100 Subject: dt-bindings: Document the DT bindings for lan78xx The Microchip LAN78XX family of devices are Ethernet controllers with a USB interface. Despite being discoverable devices it can be useful to be able to configure them from Device Tree, particularly in low-cost applications without an EEPROM or programmed OTP. Document the supported properties in a bindings file. Signed-off-by: Phil Elwell Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- .../devicetree/bindings/net/microchip,lan78xx.txt | 54 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 55 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/microchip,lan78xx.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/net/microchip,lan78xx.txt b/Documentation/devicetree/bindings/net/microchip,lan78xx.txt new file mode 100644 index 000000000000..76786a0f6d3d --- /dev/null +++ b/Documentation/devicetree/bindings/net/microchip,lan78xx.txt @@ -0,0 +1,54 @@ +Microchip LAN78xx Gigabit Ethernet controller + +The LAN78XX devices are usually configured by programming their OTP or with +an external EEPROM, but some platforms (e.g. Raspberry Pi 3 B+) have neither. +The Device Tree properties, if present, override the OTP and EEPROM. + +Required properties: +- compatible: Should be one of "usb424,7800", "usb424,7801" or "usb424,7850". + +Optional properties: +- local-mac-address: see ethernet.txt +- mac-address: see ethernet.txt + +Optional properties of the embedded PHY: +- microchip,led-modes: a 0..4 element vector, with each element configuring + the operating mode of an LED. Omitted LEDs are turned off. Allowed values + are defined in "include/dt-bindings/net/microchip-lan78xx.h". + +Example: + +/* Based on the configuration for a Raspberry Pi 3 B+ */ +&usb { + usb-port@1 { + compatible = "usb424,2514"; + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + usb-port@1 { + compatible = "usb424,2514"; + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + ethernet: ethernet@1 { + compatible = "usb424,7800"; + reg = <1>; + local-mac-address = [ 00 11 22 33 44 55 ]; + + mdio { + #address-cells = <0x1>; + #size-cells = <0x0>; + eth_phy: ethernet-phy@1 { + reg = <1>; + microchip,led-modes = < + LAN78XX_LINK_1000_ACTIVITY + LAN78XX_LINK_10_100_ACTIVITY + >; + }; + }; + }; + }; + }; +}; diff --git a/MAINTAINERS b/MAINTAINERS index c952d3076a65..81465707d8a8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14571,6 +14571,7 @@ M: Woojung Huh M: Microchip Linux Driver Support L: netdev@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/net/microchip,lan78xx.txt F: drivers/net/usb/lan78xx.* F: include/dt-bindings/net/microchip-lan78xx.h -- cgit v1.2.3 From 6ae7abe370d14c62e5ae70e3f5909d31a44a560b Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 19 Apr 2018 11:20:32 -0400 Subject: MAINTAINERS: add maintainer for Qualcomm HIDMA drivers drivers/dma/qcom directory is being shared by multiple QCOM dmaengine drivers. Separate ownership by filenames. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..2110ad09b93d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11657,6 +11657,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g S: Supported F: arch/hexagon/ +QUALCOMM HIDMA DRIVER +M: Sinan Kaya +L: linux-arm-kernel@lists.infradead.org +L: linux-arm-msm@vger.kernel.org +L: dmaengine@vger.kernel.org +S: Supported +F: drivers/dma/qcom/hidma* + QUALCOMM IOMMU M: Rob Clark L: iommu@lists.linux-foundation.org -- cgit v1.2.3 From 7fd899fff5907dbb02089494102ef628988f2330 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 23 Apr 2018 11:55:01 +0800 Subject: MAINTAINERS: add maintainer for the DPAA2 PTP clock driver This patch is to add maintainer for the DPAA2 PTP clock driver. Signed-off-by: Yangbo Lu Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..7733efa0db92 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4395,6 +4395,12 @@ L: linux-kernel@vger.kernel.org S: Maintained F: drivers/staging/fsl-dpaa2/ethsw +DPAA2 PTP CLOCK DRIVER +M: Yangbo Lu +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/staging/fsl-dpaa2/rtc + DPT_I2O SCSI RAID DRIVER M: Adaptec OEM Raid Solutions L: linux-scsi@vger.kernel.org -- cgit v1.2.3 From cd3bf368aa7a352a577be7b9540c6b5b2681bf17 Mon Sep 17 00:00:00 2001 From: Alban Bedel Date: Sat, 24 Mar 2018 23:38:40 +0100 Subject: phy: Add a driver for the ATH79 USB phy The ATH79 USB phy is very simple, it only have a reset. On some SoC a second reset is used to force the phy in suspend mode regardless of the USB controller status. This driver is added to the qualcom directory as atheros is now part of qualcom and newer SoC of this familly are marketed under the qualcom name. Signed-off-by: Alban Bedel Signed-off-by: Kishon Vijay Abraham I --- MAINTAINERS | 8 +++ drivers/phy/qualcomm/Kconfig | 11 +++- drivers/phy/qualcomm/Makefile | 1 + drivers/phy/qualcomm/phy-ath79-usb.c | 108 +++++++++++++++++++++++++++++++++++ 4 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 drivers/phy/qualcomm/phy-ath79-usb.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..7561bd48f011 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2326,6 +2326,14 @@ S: Maintained F: drivers/gpio/gpio-ath79.c F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt +ATHEROS 71XX/9XXX USB PHY DRIVER +M: Alban Bedel +W: https://github.com/AlbanBedel/linux +T: git git://github.com/AlbanBedel/linux +S: Maintained +F: drivers/phy/qualcomm/phy-ath79-usb.c +F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt + ATHEROS ATH GENERIC UTILITIES M: "Luis R. Rodriguez" L: linux-wireless@vger.kernel.org diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index 7bfa64baf837..632a0e73ee10 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -1,6 +1,15 @@ # -# Phy drivers for Qualcomm platforms +# Phy drivers for Qualcomm and Atheros platforms # +config PHY_ATH79_USB + tristate "Atheros AR71XX/9XXX USB PHY driver" + depends on OF && (ATH79 || COMPILE_TEST) + default y if USB_EHCI_HCD_PLATFORM || USB_OHCI_HCD_PLATFORM + select RESET_CONTROLLER + select GENERIC_PHY + help + Enable this to support the USB PHY on Atheros AR71XX/9XXX SoCs. + config PHY_QCOM_APQ8064_SATA tristate "Qualcomm APQ8064 SATA SerDes/PHY driver" depends on ARCH_QCOM diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile index 9abb7899762a..deb831f453ae 100644 --- a/drivers/phy/qualcomm/Makefile +++ b/drivers/phy/qualcomm/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PHY_ATH79_USB) += phy-ath79-usb.o obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c new file mode 100644 index 000000000000..6fd6e07ab345 --- /dev/null +++ b/drivers/phy/qualcomm/phy-ath79-usb.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Atheros AR71XX/9XXX USB PHY driver + * + * Copyright (C) 2015-2018 Alban Bedel + */ + +#include +#include +#include +#include + +struct ath79_usb_phy { + struct reset_control *reset; + /* The suspend override logic is inverted, hence the no prefix + * to make the code a bit easier to understand. + */ + struct reset_control *no_suspend_override; +}; + +static int ath79_usb_phy_power_on(struct phy *phy) +{ + struct ath79_usb_phy *priv = phy_get_drvdata(phy); + int err = 0; + + if (priv->no_suspend_override) { + err = reset_control_assert(priv->no_suspend_override); + if (err) + return err; + } + + err = reset_control_deassert(priv->reset); + if (err && priv->no_suspend_override) + reset_control_assert(priv->no_suspend_override); + + return err; +} + +static int ath79_usb_phy_power_off(struct phy *phy) +{ + struct ath79_usb_phy *priv = phy_get_drvdata(phy); + int err = 0; + + err = reset_control_assert(priv->reset); + if (err) + return err; + + if (priv->no_suspend_override) { + err = reset_control_deassert(priv->no_suspend_override); + if (err) + reset_control_deassert(priv->reset); + } + + return err; +} + +static const struct phy_ops ath79_usb_phy_ops = { + .power_on = ath79_usb_phy_power_on, + .power_off = ath79_usb_phy_power_off, + .owner = THIS_MODULE, +}; + +static int ath79_usb_phy_probe(struct platform_device *pdev) +{ + struct ath79_usb_phy *priv; + struct phy *phy; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); + if (IS_ERR(priv->reset)) + return PTR_ERR(priv->reset); + + priv->no_suspend_override = devm_reset_control_get_optional( + &pdev->dev, "usb-suspend-override"); + if (IS_ERR(priv->no_suspend_override)) + return PTR_ERR(priv->no_suspend_override); + + phy = devm_phy_create(&pdev->dev, NULL, &ath79_usb_phy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + phy_set_drvdata(phy, priv); + + return PTR_ERR_OR_ZERO(devm_of_phy_provider_register( + &pdev->dev, of_phy_simple_xlate)); +} + +static const struct of_device_id ath79_usb_phy_of_match[] = { + { .compatible = "qca,ar7100-usb-phy" }, + {} +}; +MODULE_DEVICE_TABLE(of, ath79_usb_phy_of_match); + +static struct platform_driver ath79_usb_phy_driver = { + .probe = ath79_usb_phy_probe, + .driver = { + .of_match_table = ath79_usb_phy_of_match, + .name = "ath79-usb-phy", + } +}; +module_platform_driver(ath79_usb_phy_driver); + +MODULE_DESCRIPTION("ATH79 USB PHY driver"); +MODULE_AUTHOR("Alban Bedel "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 91dabc54073324006d5eaba483679c47b6eb93a8 Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Mon, 23 Apr 2018 15:10:57 +0100 Subject: Documentation: power: Initial effort to document power_supply ABI This commit adds generic ABI information regarding power_supply properties. This is an initial attempt to try and align the usage of these properties between drivers. As part of this commit, common Battery and USB related properties have been listed. Signed-off-by: Adam Thomson Reviewed-by: Heikki Krogerus Reviewed-by: Sebastian Reichel Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-class-power | 443 ++++++++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 444 insertions(+) (limited to 'MAINTAINERS') diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index f85ce9e327b9..e046566e38cb 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -1,3 +1,446 @@ +===== General Properties ===== + +What: /sys/class/power_supply//manufacturer +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the name of the device manufacturer. + + Access: Read + Valid values: Represented as string + +What: /sys/class/power_supply//model_name +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the name of the device model. + + Access: Read + Valid values: Represented as string + +What: /sys/class/power_supply//serial_number +Date: January 2008 +Contact: linux-pm@vger.kernel.org +Description: + Reports the serial number of the device. + + Access: Read + Valid values: Represented as string + +What: /sys/class/power_supply//type +Date: May 2010 +Contact: linux-pm@vger.kernel.org +Description: + Describes the main type of the supply. + + Access: Read + Valid values: "Battery", "UPS", "Mains", "USB" + +===== Battery Properties ===== + +What: /sys/class/power_supply//capacity +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Fine grain representation of battery capacity. + Access: Read + Valid values: 0 - 100 (percent) + +What: /sys/class/power_supply//capacity_alert_max +Date: July 2012 +Contact: linux-pm@vger.kernel.org +Description: + Maximum battery capacity trip-wire value where the supply will + notify user-space of the event. This is normally used for the + battery discharging scenario where user-space needs to know the + battery has dropped to an upper level so it can take + appropriate action (e.g. warning user that battery level is + low). + + Access: Read, Write + Valid values: 0 - 100 (percent) + +What: /sys/class/power_supply//capacity_alert_min +Date: July 2012 +Contact: linux-pm@vger.kernel.org +Description: + Minimum battery capacity trip-wire value where the supply will + notify user-space of the event. This is normally used for the + battery discharging scenario where user-space needs to know the + battery has dropped to a lower level so it can take + appropriate action (e.g. warning user that battery level is + critically low). + + Access: Read, Write + Valid values: 0 - 100 (percent) + +What: /sys/class/power_supply//capacity_level +Date: June 2009 +Contact: linux-pm@vger.kernel.org +Description: + Coarse representation of battery capacity. + + Access: Read + Valid values: "Unknown", "Critical", "Low", "Normal", "High", + "Full" + +What: /sys/class/power_supply//current_avg +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports an average IBAT current reading for the battery, over a + fixed period. Normally devices will provide a fixed interval in + which they average readings to smooth out the reported value. + + Access: Read + Valid values: Represented in microamps + +What: /sys/class/power_supply//current_max +Date: October 2010 +Contact: linux-pm@vger.kernel.org +Description: + Reports the maximum IBAT current allowed into the battery. + + Access: Read + Valid values: Represented in microamps + +What: /sys/class/power_supply//current_now +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports an instant, single IBAT current reading for the battery. + This value is not averaged/smoothed. + + Access: Read + Valid values: Represented in microamps + +What: /sys/class/power_supply//charge_type +Date: July 2009 +Contact: linux-pm@vger.kernel.org +Description: + Represents the type of charging currently being applied to the + battery. + + Access: Read + Valid values: "Unknown", "N/A", "Trickle", "Fast" + +What: /sys/class/power_supply//charge_term_current +Date: July 2014 +Contact: linux-pm@vger.kernel.org +Description: + Reports the charging current value which is used to determine + when the battery is considered full and charging should end. + + Access: Read + Valid values: Represented in microamps + +What: /sys/class/power_supply//health +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the health of the battery or battery side of charger + functionality. + + Access: Read + Valid values: "Unknown", "Good", "Overheat", "Dead", + "Over voltage", "Unspecified failure", "Cold", + "Watchdog timer expire", "Safety timer expire" + +What: /sys/class/power_supply//precharge_current +Date: June 2017 +Contact: linux-pm@vger.kernel.org +Description: + Reports the charging current applied during pre-charging phase + for a battery charge cycle. + + Access: Read + Valid values: Represented in microamps + +What: /sys/class/power_supply//present +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports whether a battery is present or not in the system. + + Access: Read + Valid values: + 0: Absent + 1: Present + +What: /sys/class/power_supply//status +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Represents the charging status of the battery. Normally this + is read-only reporting although for some supplies this can be + used to enable/disable charging to the battery. + + Access: Read, Write + Valid values: "Unknown", "Charging", "Discharging", + "Not charging", "Full" + +What: /sys/class/power_supply//technology +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Describes the battery technology supported by the supply. + + Access: Read + Valid values: "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", + "NiCd", "LiMn" + +What: /sys/class/power_supply//temp +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the current TBAT battery temperature reading. + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_alert_max +Date: July 2012 +Contact: linux-pm@vger.kernel.org +Description: + Maximum TBAT temperature trip-wire value where the supply will + notify user-space of the event. This is normally used for the + battery charging scenario where user-space needs to know the + battery temperature has crossed an upper threshold so it can + take appropriate action (e.g. warning user that battery level is + critically high, and charging has stopped). + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_alert_min +Date: July 2012 +Contact: linux-pm@vger.kernel.org +Description: + Minimum TBAT temperature trip-wire value where the supply will + notify user-space of the event. This is normally used for the + battery charging scenario where user-space needs to know the + battery temperature has crossed a lower threshold so it can take + appropriate action (e.g. warning user that battery level is + high, and charging current has been reduced accordingly to + remedy the situation). + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_max +Date: July 2014 +Contact: linux-pm@vger.kernel.org +Description: + Reports the maximum allowed TBAT battery temperature for + charging. + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_min +Date: July 2014 +Contact: linux-pm@vger.kernel.org +Description: + Reports the minimum allowed TBAT battery temperature for + charging. + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//voltage_avg, +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports an average VBAT voltage reading for the battery, over a + fixed period. Normally devices will provide a fixed interval in + which they average readings to smooth out the reported value. + + Access: Read + Valid values: Represented in microvolts + +What: /sys/class/power_supply//voltage_max, +Date: January 2008 +Contact: linux-pm@vger.kernel.org +Description: + Reports the maximum safe VBAT voltage permitted for the battery, + during charging. + + Access: Read + Valid values: Represented in microvolts + +What: /sys/class/power_supply//voltage_min, +Date: January 2008 +Contact: linux-pm@vger.kernel.org +Description: + Reports the minimum safe VBAT voltage permitted for the battery, + during discharging. + + Access: Read + Valid values: Represented in microvolts + +What: /sys/class/power_supply//voltage_now, +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports an instant, single VBAT voltage reading for the battery. + This value is not averaged/smoothed. + + Access: Read + Valid values: Represented in microvolts + +===== USB Properties ===== + +What: /sys/class/power_supply//current_avg +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports an average IBUS current reading over a fixed period. + Normally devices will provide a fixed interval in which they + average readings to smooth out the reported value. + + Access: Read + Valid values: Represented in microamps + + +What: /sys/class/power_supply//current_max +Date: October 2010 +Contact: linux-pm@vger.kernel.org +Description: + Reports the maximum IBUS current the supply can support. + + Access: Read + Valid values: Represented in microamps + +What: /sys/class/power_supply//current_now +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the IBUS current supplied now. This value is generally + read-only reporting, unless the 'online' state of the supply + is set to be programmable, in which case this value can be set + within the reported min/max range. + + Access: Read, Write + Valid values: Represented in microamps + +What: /sys/class/power_supply//input_current_limit +Date: July 2014 +Contact: linux-pm@vger.kernel.org +Description: + Details the incoming IBUS current limit currently set in the + supply. Normally this is configured based on the type of + connection made (e.g. A configured SDP should output a maximum + of 500mA so the input current limit is set to the same value). + + Access: Read, Write + Valid values: Represented in microamps + +What: /sys/class/power_supply//online, +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Indicates if VBUS is present for the supply. When the supply is + online, and the supply allows it, then it's possible to switch + between online states (e.g. Fixed -> Programmable for a PD_PPS + USB supply so voltage and current can be controlled). + + Access: Read, Write + Valid values: + 0: Offline + 1: Online Fixed - Fixed Voltage Supply + 2: Online Programmable - Programmable Voltage Supply + +What: /sys/class/power_supply//temp +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the current supply temperature reading. This would + normally be the internal temperature of the device itself (e.g + TJUNC temperature of an IC) + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_alert_max +Date: July 2012 +Contact: linux-pm@vger.kernel.org +Description: + Maximum supply temperature trip-wire value where the supply will + notify user-space of the event. This is normally used for the + charging scenario where user-space needs to know the supply + temperature has crossed an upper threshold so it can take + appropriate action (e.g. warning user that the supply + temperature is critically high, and charging has stopped to + remedy the situation). + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_alert_min +Date: July 2012 +Contact: linux-pm@vger.kernel.org +Description: + Minimum supply temperature trip-wire value where the supply will + notify user-space of the event. This is normally used for the + charging scenario where user-space needs to know the supply + temperature has crossed a lower threshold so it can take + appropriate action (e.g. warning user that the supply + temperature is high, and charging current has been reduced + accordingly to remedy the situation). + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_max +Date: July 2014 +Contact: linux-pm@vger.kernel.org +Description: + Reports the maximum allowed supply temperature for operation. + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//temp_min +Date: July 2014 +Contact: linux-pm@vger.kernel.org +Description: + Reports the mainimum allowed supply temperature for operation. + + Access: Read + Valid values: Represented in 1/10 Degrees Celsius + +What: /sys/class/power_supply//voltage_max +Date: January 2008 +Contact: linux-pm@vger.kernel.org +Description: + Reports the maximum VBUS voltage the supply can support. + + Access: Read + Valid values: Represented in microvolts + +What: /sys/class/power_supply//voltage_min +Date: January 2008 +Contact: linux-pm@vger.kernel.org +Description: + Reports the minimum VBUS voltage the supply can support. + + Access: Read + Valid values: Represented in microvolts + +What: /sys/class/power_supply//voltage_now +Date: May 2007 +Contact: linux-pm@vger.kernel.org +Description: + Reports the VBUS voltage supplied now. This value is generally + read-only reporting, unless the 'online' state of the supply + is set to be programmable, in which case this value can be set + within the reported min/max range. + + Access: Read, Write + Valid values: Represented in microvolts + +===== Device Specific Properties ===== + What: /sys/class/power/ds2760-battery.*/charge_now Date: May 2010 KernelVersion: 2.6.35 diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..700bfb7af2ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11242,6 +11242,7 @@ M: Sebastian Reichel L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git S: Maintained +F: Documentation/ABI/testing/sysfs-class-power F: Documentation/devicetree/bindings/power/supply/ F: include/linux/power_supply.h F: drivers/power/supply/ -- cgit v1.2.3 From 483abace7bdd31c527d821d3c28cb2879c84b1c8 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 26 Apr 2018 12:14:50 +0530 Subject: ASoC: Update email address for Vinod Update the email address for compressed audio maintainer Also update .mailmap. Signed-off-by: Vinod Koul Signed-off-by: Mark Brown --- .mailmap | 3 +++ MAINTAINERS | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/.mailmap b/.mailmap index e18cab73e209..4cb8bf0a8402 100644 --- a/.mailmap +++ b/.mailmap @@ -181,6 +181,9 @@ Uwe Kleine-König Uwe Kleine-König Uwe Kleine-König Valdis Kletnieks +Vinod Koul +Vinod Koul +Vinod Koul Viresh Kumar Viresh Kumar Viresh Kumar diff --git a/MAINTAINERS b/MAINTAINERS index 98d14aee828a..2b62e6cf6b1c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12925,7 +12925,7 @@ F: include/uapi/sound/ F: sound/ SOUND - COMPRESSED AUDIO -M: Vinod Koul +M: Vinod Koul L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Supported -- cgit v1.2.3 From baa8c6ddf7be33f2b0ddeb68906d668caf646baa Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:28 +0100 Subject: MAINTAINERS: Add myself as a co-maintainer for the locking subsystem I've been heavily involved with concurrency and memory ordering stuff (see ATOMIC INFRASTRUCTURE and LINUX KERNEL MEMORY CONSISTENCY MODEL) and with arm64 now using qrwlock with a view to using qspinlock in the near future, I'm going to continue being involved with the core locking primitives. Reflect this by adding myself as a co-maintainer alongside Ingo and Peter. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Thomas Gleixner Cc: Waiman Long Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-15-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dd66ae9a847e..e4585e33862c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8328,6 +8328,7 @@ F: Documentation/admin-guide/LSM/LoadPin.rst LOCKING PRIMITIVES M: Peter Zijlstra M: Ingo Molnar +M: Will Deacon L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core S: Maintained -- cgit v1.2.3 From 1351b50cc4b8abcab128febf3a27d01af44697b3 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Mon, 23 Apr 2018 23:08:06 +0200 Subject: dt-bindings: iio: afe: add binding for current-sense-shunt An ADC is often used to measure other quantities indirectly. This binding describe one cases, a current through a shunt resistor measured by the voltage over it. Signed-off-by: Peter Rosin Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../bindings/iio/afe/current-sense-shunt.txt | 41 ++++++++++++++++++++++ MAINTAINERS | 6 ++++ 2 files changed, 47 insertions(+) create mode 100644 Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt b/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt new file mode 100644 index 000000000000..8e7b3e408a52 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt @@ -0,0 +1,41 @@ +Current Sense Shunt +=================== + +When an io-channel measures the voltage over a current sense shunt, +the interesting mesaurement is almost always the current through the +shunt, not the voltage over it. This binding describes such a current +sense circuit. + +Required properties: +- compatible : "current-sense-shunt" +- io-channels : Channel node of a voltage io-channel. +- shunt-resistor-micro-ohms : The shunt resistance in microohms. + +Example: +The system current is measured by measuring the voltage over a +3.3 ohms shunt resistor. + +sysi { + compatible = "current-sense-shunt"; + io-channels = <&tiadc 0>; + + /* Divide the voltage by 3300000/1000000 (or 3.3) for the current. */ + shunt-resistor-micro-ohms = <3300000>; +}; + +&i2c { + tiadc: adc@48 { + compatible = "ti,ads1015"; + reg = <0x48>; + #io-channel-cells = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { /* IN0,IN1 differential */ + reg = <0>; + ti,gain = <1>; + ti,datarate = <4>; + }; + }; +}; diff --git a/MAINTAINERS b/MAINTAINERS index 002cb013b000..d3052bd4a752 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6898,6 +6898,12 @@ F: drivers/staging/iio/ F: include/linux/iio/ F: tools/iio/ +IIO UNIT CONVERTER +M: Peter Rosin +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt + IKANOS/ADI EAGLE ADSL USB DRIVER M: Matthieu Castet M: Stanislaw Gruszka -- cgit v1.2.3 From ff915802fb7f2bb2fa9890cb88dab9cdabb466b8 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Mon, 23 Apr 2018 23:08:07 +0200 Subject: dt-bindings: iio: afe: add binding for voltage-divider An ADC is often used to measure other quantities indirectly. This binding describe one cases, a "big" voltage measured with the help of a voltage divider. Signed-off-by: Peter Rosin Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../bindings/iio/afe/voltage-divider.txt | 53 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 54 insertions(+) create mode 100644 Documentation/devicetree/bindings/iio/afe/voltage-divider.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/iio/afe/voltage-divider.txt b/Documentation/devicetree/bindings/iio/afe/voltage-divider.txt new file mode 100644 index 000000000000..b452a8406107 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/afe/voltage-divider.txt @@ -0,0 +1,53 @@ +Voltage divider +=============== + +When an io-channel measures the midpoint of a voltage divider, the +interesting voltage is often the voltage over the full resistance +of the divider. This binding describes the voltage divider in such +a curcuit. + + Vin ----. + | + .-----. + | R | + '-----' + | + +---- Vout + | + .-----. + | Rout| + '-----' + | + GND + +Required properties: +- compatible : "voltage-divider" +- io-channels : Channel node of a voltage io-channel measuring Vout. +- output-ohms : Resistance Rout over which the output voltage is measured. + See full-ohms. +- full-ohms : Resistance R + Rout for the full divider. The io-channel + is scaled by the Rout / (R + Rout) quotient. + +Example: +The system voltage is circa 12V, but divided down with a 22/222 +voltage divider (R = 200 Ohms, Rout = 22 Ohms) and fed to an ADC. + +sysv { + compatible = "voltage-divider"; + io-channels = <&maxadc 1>; + + /* Scale the system voltage by 22/222 to fit the ADC range. */ + output-ohms = <22>; + full-ohms = <222>; /* 200 + 22 */ +}; + +&spi { + maxadc: adc@0 { + compatible = "maxim,max1027"; + reg = <0>; + #io-channel-cells = <1>; + interrupt-parent = <&gpio5>; + interrupts = <15 IRQ_TYPE_EDGE_RISING>; + spi-max-frequency = <1000000>; + }; +}; diff --git a/MAINTAINERS b/MAINTAINERS index d3052bd4a752..35987f60649b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6903,6 +6903,7 @@ M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt +F: Documentation/devicetree/bindings/iio/afe/voltage-divider.txt IKANOS/ADI EAGLE ADSL USB DRIVER M: Matthieu Castet -- cgit v1.2.3 From 2e9a128f359c1baa8f0fbfdb95a1b40f84244801 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Mon, 23 Apr 2018 23:08:08 +0200 Subject: dt-bindings: iio: afe: add binding for current-sense-amplifier Similar to current sense shunts, but an amplifier enables the use of a smaller sense resistance. Signed-off-by: Peter Rosin Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../bindings/iio/afe/current-sense-amplifier.txt | 26 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 27 insertions(+) create mode 100644 Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt b/Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt new file mode 100644 index 000000000000..0ddbaebba8ce --- /dev/null +++ b/Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt @@ -0,0 +1,26 @@ +Current Sense Amplifier +======================= + +When an io-channel measures the output voltage from a current sense +amplifier, the interesting mesaurement is almost always the current +through the sense resistor, not the voltage output. This binding +describes such a current sense circuit. + +Required properties: +- compatible : "current-sense-amplifier" +- io-channels : Channel node of a voltage io-channel. +- sense-resistor-micro-ohms : The sense resistance in microohms. + +Optional properties: +- sense-gain-mult: Amplifier gain multiplier. The default is <1>. +- sense-gain-div: Amplifier gain divider. The default is <1>. + +Example: + +sysi { + compatible = "current-sense-amplifier"; + io-channels = <&tiadc 0>; + + sense-resistor-micro-ohms = <20000>; + sense-gain-mul = <50>; +}; diff --git a/MAINTAINERS b/MAINTAINERS index 35987f60649b..d1c0f58cf8a0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6902,6 +6902,7 @@ IIO UNIT CONVERTER M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt F: Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt F: Documentation/devicetree/bindings/iio/afe/voltage-divider.txt -- cgit v1.2.3 From 8b74816b5a9adac4629f0f072c122d57b8f0eb78 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Mon, 23 Apr 2018 23:08:09 +0200 Subject: iio: afe: rescale: new driver If an ADC channel measures the midpoint of a voltage divider, the interesting voltage is often the voltage over the full resistance. E.g. if the full voltage is too big for the ADC to handle. Likewise, if an ADC channel measures the voltage across a shunt resistor, with or without amplification, the interesting value is often the current through the resistor. This driver solves these problems by allowing to linearly scale a channel and/or by allowing changes to the type of the channel. Signed-off-by: Peter Rosin Signed-off-by: Jonathan Cameron --- MAINTAINERS | 1 + drivers/iio/Kconfig | 1 + drivers/iio/Makefile | 1 + drivers/iio/afe/Kconfig | 19 +++ drivers/iio/afe/Makefile | 6 + drivers/iio/afe/iio-rescale.c | 359 ++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 387 insertions(+) create mode 100644 drivers/iio/afe/Kconfig create mode 100644 drivers/iio/afe/Makefile create mode 100644 drivers/iio/afe/iio-rescale.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index d1c0f58cf8a0..ad4c68af122a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6905,6 +6905,7 @@ S: Maintained F: Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt F: Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt F: Documentation/devicetree/bindings/iio/afe/voltage-divider.txt +F: drivers/iio/afe/iio-rescale.c IKANOS/ADI EAGLE ADSL USB DRIVER M: Matthieu Castet diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index b3c8c6ef0dff..d69e85a8bdc3 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT source "drivers/iio/accel/Kconfig" source "drivers/iio/adc/Kconfig" +source "drivers/iio/afe/Kconfig" source "drivers/iio/amplifiers/Kconfig" source "drivers/iio/chemical/Kconfig" source "drivers/iio/common/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index b16b2e9ddc40..d8cba9c229c0 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o obj-y += accel/ obj-y += adc/ +obj-y += afe/ obj-y += amplifiers/ obj-y += buffer/ obj-y += chemical/ diff --git a/drivers/iio/afe/Kconfig b/drivers/iio/afe/Kconfig new file mode 100644 index 000000000000..c91eef04825a --- /dev/null +++ b/drivers/iio/afe/Kconfig @@ -0,0 +1,19 @@ +# +# Analog Front End drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Analog Front Ends" + +config IIO_RESCALE + tristate "IIO rescale" + depends on OF || COMPILE_TEST + help + Say yes here to build support for the IIO rescaling + that handles voltage dividers, current sense shunts and + current sense amplifiers. + + To compile this driver as a module, choose M here: the + module will be called iio-rescale. + +endmenu diff --git a/drivers/iio/afe/Makefile b/drivers/iio/afe/Makefile new file mode 100644 index 000000000000..5fabb7bcac47 --- /dev/null +++ b/drivers/iio/afe/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for industrial I/O Analog Front Ends (AFE) +# + +# When adding new entries keep the list in alphabetical order +obj-$(CONFIG_IIO_RESCALE) += iio-rescale.o diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c new file mode 100644 index 000000000000..e9ceee66d1e7 --- /dev/null +++ b/drivers/iio/afe/iio-rescale.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IIO rescale driver + * + * Copyright (C) 2018 Axentia Technologies AB + * + * Author: Peter Rosin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rescale; + +struct rescale_cfg { + enum iio_chan_type type; + int (*props)(struct device *dev, struct rescale *rescale); +}; + +struct rescale { + const struct rescale_cfg *cfg; + struct iio_channel *source; + struct iio_chan_spec chan; + struct iio_chan_spec_ext_info *ext_info; + s32 numerator; + s32 denominator; +}; + +static int rescale_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct rescale *rescale = iio_priv(indio_dev); + unsigned long long tmp; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + return iio_read_channel_raw(rescale->source, val); + + case IIO_CHAN_INFO_SCALE: + ret = iio_read_channel_scale(rescale->source, val, val2); + switch (ret) { + case IIO_VAL_FRACTIONAL: + *val *= rescale->numerator; + *val2 *= rescale->denominator; + return ret; + case IIO_VAL_INT: + *val *= rescale->numerator; + if (rescale->denominator == 1) + return ret; + *val2 = rescale->denominator; + return IIO_VAL_FRACTIONAL; + case IIO_VAL_FRACTIONAL_LOG2: + tmp = *val * 1000000000LL; + do_div(tmp, rescale->denominator); + tmp *= rescale->numerator; + do_div(tmp, 1000000000LL); + *val = tmp; + return ret; + default: + return -EOPNOTSUPP; + } + default: + return -EINVAL; + } +} + +static int rescale_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + struct rescale *rescale = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + *type = IIO_VAL_INT; + return iio_read_avail_channel_raw(rescale->source, + vals, length); + default: + return -EINVAL; + } +} + +static const struct iio_info rescale_info = { + .read_raw = rescale_read_raw, + .read_avail = rescale_read_avail, +}; + +static ssize_t rescale_read_ext_info(struct iio_dev *indio_dev, + uintptr_t private, + struct iio_chan_spec const *chan, + char *buf) +{ + struct rescale *rescale = iio_priv(indio_dev); + + return iio_read_channel_ext_info(rescale->source, + rescale->ext_info[private].name, + buf); +} + +static ssize_t rescale_write_ext_info(struct iio_dev *indio_dev, + uintptr_t private, + struct iio_chan_spec const *chan, + const char *buf, size_t len) +{ + struct rescale *rescale = iio_priv(indio_dev); + + return iio_write_channel_ext_info(rescale->source, + rescale->ext_info[private].name, + buf, len); +} + +static int rescale_configure_channel(struct device *dev, + struct rescale *rescale) +{ + struct iio_chan_spec *chan = &rescale->chan; + struct iio_chan_spec const *schan = rescale->source->channel; + + chan->indexed = 1; + chan->output = schan->output; + chan->ext_info = rescale->ext_info; + chan->type = rescale->cfg->type; + + if (!iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) || + !iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) { + dev_err(dev, "source channel does not support raw/scale\n"); + return -EINVAL; + } + + chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE); + + if (iio_channel_has_available(schan, IIO_CHAN_INFO_RAW)) + chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW); + + return 0; +} + +static int rescale_current_sense_amplifier_props(struct device *dev, + struct rescale *rescale) +{ + u32 sense; + u32 gain_mult = 1; + u32 gain_div = 1; + u32 factor; + int ret; + + ret = device_property_read_u32(dev, "sense-resistor-micro-ohms", + &sense); + if (ret) { + dev_err(dev, "failed to read the sense resistance: %d\n", ret); + return ret; + } + + device_property_read_u32(dev, "sense-gain-mult", &gain_mult); + device_property_read_u32(dev, "sense-gain-div", &gain_div); + + /* + * Calculate the scaling factor, 1 / (gain * sense), or + * gain_div / (gain_mult * sense), while trying to keep the + * numerator/denominator from overflowing. + */ + factor = gcd(sense, 1000000); + rescale->numerator = 1000000 / factor; + rescale->denominator = sense / factor; + + factor = gcd(rescale->numerator, gain_mult); + rescale->numerator /= factor; + rescale->denominator *= gain_mult / factor; + + factor = gcd(rescale->denominator, gain_div); + rescale->numerator *= gain_div / factor; + rescale->denominator /= factor; + + return 0; +} + +static int rescale_current_sense_shunt_props(struct device *dev, + struct rescale *rescale) +{ + u32 shunt; + u32 factor; + int ret; + + ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms", + &shunt); + if (ret) { + dev_err(dev, "failed to read the shunt resistance: %d\n", ret); + return ret; + } + + factor = gcd(shunt, 1000000); + rescale->numerator = 1000000 / factor; + rescale->denominator = shunt / factor; + + return 0; +} + +static int rescale_voltage_divider_props(struct device *dev, + struct rescale *rescale) +{ + int ret; + u32 factor; + + ret = device_property_read_u32(dev, "output-ohms", + &rescale->denominator); + if (ret) { + dev_err(dev, "failed to read output-ohms: %d\n", ret); + return ret; + } + + ret = device_property_read_u32(dev, "full-ohms", + &rescale->numerator); + if (ret) { + dev_err(dev, "failed to read full-ohms: %d\n", ret); + return ret; + } + + factor = gcd(rescale->numerator, rescale->denominator); + rescale->numerator /= factor; + rescale->denominator /= factor; + + return 0; +} + +enum rescale_variant { + CURRENT_SENSE_AMPLIFIER, + CURRENT_SENSE_SHUNT, + VOLTAGE_DIVIDER, +}; + +static const struct rescale_cfg rescale_cfg[] = { + [CURRENT_SENSE_AMPLIFIER] = { + .type = IIO_CURRENT, + .props = rescale_current_sense_amplifier_props, + }, + [CURRENT_SENSE_SHUNT] = { + .type = IIO_CURRENT, + .props = rescale_current_sense_shunt_props, + }, + [VOLTAGE_DIVIDER] = { + .type = IIO_VOLTAGE, + .props = rescale_voltage_divider_props, + }, +}; + +static const struct of_device_id rescale_match[] = { + { .compatible = "current-sense-amplifier", + .data = &rescale_cfg[CURRENT_SENSE_AMPLIFIER], }, + { .compatible = "current-sense-shunt", + .data = &rescale_cfg[CURRENT_SENSE_SHUNT], }, + { .compatible = "voltage-divider", + .data = &rescale_cfg[VOLTAGE_DIVIDER], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rescale_match); + +static int rescale_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct iio_dev *indio_dev; + struct iio_channel *source; + struct rescale *rescale; + int sizeof_ext_info; + int sizeof_priv; + int i; + int ret; + + source = devm_iio_channel_get(dev, NULL); + if (IS_ERR(source)) { + if (PTR_ERR(source) != -EPROBE_DEFER) + dev_err(dev, "failed to get source channel\n"); + return PTR_ERR(source); + } + + sizeof_ext_info = iio_get_channel_ext_info_count(source); + if (sizeof_ext_info) { + sizeof_ext_info += 1; /* one extra entry for the sentinel */ + sizeof_ext_info *= sizeof(*rescale->ext_info); + } + + sizeof_priv = sizeof(*rescale) + sizeof_ext_info; + + indio_dev = devm_iio_device_alloc(dev, sizeof_priv); + if (!indio_dev) + return -ENOMEM; + + rescale = iio_priv(indio_dev); + + rescale->cfg = of_device_get_match_data(dev); + rescale->numerator = 1; + rescale->denominator = 1; + + ret = rescale->cfg->props(dev, rescale); + if (ret) + return ret; + + if (!rescale->numerator || !rescale->denominator) { + dev_err(dev, "invalid scaling factor.\n"); + return -EINVAL; + } + + platform_set_drvdata(pdev, indio_dev); + + rescale->source = source; + + indio_dev->name = dev_name(dev); + indio_dev->dev.parent = dev; + indio_dev->info = &rescale_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = &rescale->chan; + indio_dev->num_channels = 1; + if (sizeof_ext_info) { + rescale->ext_info = devm_kmemdup(dev, + source->channel->ext_info, + sizeof_ext_info, GFP_KERNEL); + if (!rescale->ext_info) + return -ENOMEM; + + for (i = 0; rescale->ext_info[i].name; ++i) { + struct iio_chan_spec_ext_info *ext_info = + &rescale->ext_info[i]; + + if (source->channel->ext_info[i].read) + ext_info->read = rescale_read_ext_info; + if (source->channel->ext_info[i].write) + ext_info->write = rescale_write_ext_info; + ext_info->private = i; + } + } + + ret = rescale_configure_channel(dev, rescale); + if (ret) + return ret; + + return devm_iio_device_register(dev, indio_dev); +} + +static struct platform_driver rescale_driver = { + .probe = rescale_probe, + .driver = { + .name = "iio-rescale", + .of_match_table = rescale_match, + }, +}; +module_platform_driver(rescale_driver); + +MODULE_DESCRIPTION("IIO rescale driver"); +MODULE_AUTHOR("Peter Rosin "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 3855f66c8fdb30e0e9cc67003279af2c52451a7f Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Mon, 16 Apr 2018 15:41:02 +0800 Subject: MAINTAINERS: Add entry for Mediatek NAND controller driver Add entry for Mediatek NAND controller driver and its bindings. Signed-off-by: Xiaolei Li Signed-off-by: Boris Brezillon --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..0eff9eba4d28 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8941,6 +8941,13 @@ L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt7601u/ +MEDIATEK NAND CONTROLLER DRIVER +M: Xiaolei Li +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/nand/raw/mtk_* +F: Documentation/devicetree/bindings/mtd/mtk-nand.txt + MEDIATEK RANDOM NUMBER GENERATOR SUPPORT M: Sean Wang S: Maintained -- cgit v1.2.3 From 4baf6819111a3e29d13456c0e7816beec0e457f2 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Sun, 22 Apr 2018 19:55:53 +0200 Subject: mtd: nand: add myself as NAND co-maintainer I have been actively reviewing and contributing improvements to the NAND subsystem for more than 6 months now and am willing to continue doing so in the future. Formalize my new role. Signed-off-by: Miquel Raynal Acked-by: Richard Weinberger Signed-off-by: Boris Brezillon --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0eff9eba4d28..634abf4569c4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9581,6 +9581,7 @@ F: drivers/net/ethernet/myricom/myri10ge/ NAND FLASH SUBSYSTEM M: Boris Brezillon +M: Miquel Raynal R: Richard Weinberger L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ -- cgit v1.2.3 From 4d220ed0f8140c478ab7b0a14d96821da639b646 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Sat, 28 Apr 2018 19:56:37 -0700 Subject: bpf: remove tracepoints from bpf core tracepoints to bpf core were added as a way to provide introspection to bpf programs and maps, but after some time it became clear that this approach is inadequate, so prog_id, map_id and corresponding get_next_id, get_fd_by_id, get_info_by_fd, prog_query APIs were introduced and fully adopted by bpftool and other applications. The tracepoints in bpf core started to rot and causing syzbot warnings: WARNING: CPU: 0 PID: 3008 at kernel/trace/trace_event_perf.c:274 Kernel panic - not syncing: panic_on_warn set ... perf_trace_bpf_map_keyval+0x260/0xbd0 include/trace/events/bpf.h:228 trace_bpf_map_update_elem include/trace/events/bpf.h:274 [inline] map_update_elem kernel/bpf/syscall.c:597 [inline] SYSC_bpf kernel/bpf/syscall.c:1478 [inline] Hence this patch deletes tracepoints in bpf core. Reported-by: Eric Biggers Reported-by: syzbot Signed-off-by: Alexei Starovoitov Acked-by: David S. Miller Signed-off-by: Daniel Borkmann --- MAINTAINERS | 1 - include/linux/bpf_trace.h | 1 - include/trace/events/bpf.h | 355 --------------------------------------------- kernel/bpf/core.c | 6 - kernel/bpf/inode.c | 16 +- kernel/bpf/syscall.c | 15 +- 6 files changed, 2 insertions(+), 392 deletions(-) delete mode 100644 include/trace/events/bpf.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a52800867850..537fd17a211b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2727,7 +2727,6 @@ F: Documentation/networking/filter.txt F: Documentation/bpf/ F: include/linux/bpf* F: include/linux/filter.h -F: include/trace/events/bpf.h F: include/trace/events/xdp.h F: include/uapi/linux/bpf* F: include/uapi/linux/filter.h diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h index e6fe98ae3794..ddf896abcfb6 100644 --- a/include/linux/bpf_trace.h +++ b/include/linux/bpf_trace.h @@ -2,7 +2,6 @@ #ifndef __LINUX_BPF_TRACE_H__ #define __LINUX_BPF_TRACE_H__ -#include #include #endif /* __LINUX_BPF_TRACE_H__ */ diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h deleted file mode 100644 index 150185647e6b..000000000000 --- a/include/trace/events/bpf.h +++ /dev/null @@ -1,355 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM bpf - -#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_BPF_H - -/* These are only used within the BPF_SYSCALL code */ -#ifdef CONFIG_BPF_SYSCALL - -#include -#include -#include -#include - -#define __PROG_TYPE_MAP(FN) \ - FN(SOCKET_FILTER) \ - FN(KPROBE) \ - FN(SCHED_CLS) \ - FN(SCHED_ACT) \ - FN(TRACEPOINT) \ - FN(XDP) \ - FN(PERF_EVENT) \ - FN(CGROUP_SKB) \ - FN(CGROUP_SOCK) \ - FN(LWT_IN) \ - FN(LWT_OUT) \ - FN(LWT_XMIT) - -#define __MAP_TYPE_MAP(FN) \ - FN(HASH) \ - FN(ARRAY) \ - FN(PROG_ARRAY) \ - FN(PERF_EVENT_ARRAY) \ - FN(PERCPU_HASH) \ - FN(PERCPU_ARRAY) \ - FN(STACK_TRACE) \ - FN(CGROUP_ARRAY) \ - FN(LRU_HASH) \ - FN(LRU_PERCPU_HASH) \ - FN(LPM_TRIE) - -#define __PROG_TYPE_TP_FN(x) \ - TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x); -#define __PROG_TYPE_SYM_FN(x) \ - { BPF_PROG_TYPE_##x, #x }, -#define __PROG_TYPE_SYM_TAB \ - __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 } -__PROG_TYPE_MAP(__PROG_TYPE_TP_FN) - -#define __MAP_TYPE_TP_FN(x) \ - TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x); -#define __MAP_TYPE_SYM_FN(x) \ - { BPF_MAP_TYPE_##x, #x }, -#define __MAP_TYPE_SYM_TAB \ - __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 } -__MAP_TYPE_MAP(__MAP_TYPE_TP_FN) - -DECLARE_EVENT_CLASS(bpf_prog_event, - - TP_PROTO(const struct bpf_prog *prg), - - TP_ARGS(prg), - - TP_STRUCT__entry( - __array(u8, prog_tag, 8) - __field(u32, type) - ), - - TP_fast_assign( - BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); - memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); - __entry->type = prg->type; - ), - - TP_printk("prog=%s type=%s", - __print_hex_str(__entry->prog_tag, 8), - __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB)) -); - -DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type, - - TP_PROTO(const struct bpf_prog *prg), - - TP_ARGS(prg) -); - -DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu, - - TP_PROTO(const struct bpf_prog *prg), - - TP_ARGS(prg) -); - -TRACE_EVENT(bpf_prog_load, - - TP_PROTO(const struct bpf_prog *prg, int ufd), - - TP_ARGS(prg, ufd), - - TP_STRUCT__entry( - __array(u8, prog_tag, 8) - __field(u32, type) - __field(int, ufd) - ), - - TP_fast_assign( - BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); - memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); - __entry->type = prg->type; - __entry->ufd = ufd; - ), - - TP_printk("prog=%s type=%s ufd=%d", - __print_hex_str(__entry->prog_tag, 8), - __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB), - __entry->ufd) -); - -TRACE_EVENT(bpf_map_create, - - TP_PROTO(const struct bpf_map *map, int ufd), - - TP_ARGS(map, ufd), - - TP_STRUCT__entry( - __field(u32, type) - __field(u32, size_key) - __field(u32, size_value) - __field(u32, max_entries) - __field(u32, flags) - __field(int, ufd) - ), - - TP_fast_assign( - __entry->type = map->map_type; - __entry->size_key = map->key_size; - __entry->size_value = map->value_size; - __entry->max_entries = map->max_entries; - __entry->flags = map->map_flags; - __entry->ufd = ufd; - ), - - TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x", - __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), - __entry->ufd, __entry->size_key, __entry->size_value, - __entry->max_entries, __entry->flags) -); - -DECLARE_EVENT_CLASS(bpf_obj_prog, - - TP_PROTO(const struct bpf_prog *prg, int ufd, - const struct filename *pname), - - TP_ARGS(prg, ufd, pname), - - TP_STRUCT__entry( - __array(u8, prog_tag, 8) - __field(int, ufd) - __string(path, pname->name) - ), - - TP_fast_assign( - BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); - memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); - __assign_str(path, pname->name); - __entry->ufd = ufd; - ), - - TP_printk("prog=%s path=%s ufd=%d", - __print_hex_str(__entry->prog_tag, 8), - __get_str(path), __entry->ufd) -); - -DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog, - - TP_PROTO(const struct bpf_prog *prg, int ufd, - const struct filename *pname), - - TP_ARGS(prg, ufd, pname) -); - -DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog, - - TP_PROTO(const struct bpf_prog *prg, int ufd, - const struct filename *pname), - - TP_ARGS(prg, ufd, pname) -); - -DECLARE_EVENT_CLASS(bpf_obj_map, - - TP_PROTO(const struct bpf_map *map, int ufd, - const struct filename *pname), - - TP_ARGS(map, ufd, pname), - - TP_STRUCT__entry( - __field(u32, type) - __field(int, ufd) - __string(path, pname->name) - ), - - TP_fast_assign( - __assign_str(path, pname->name); - __entry->type = map->map_type; - __entry->ufd = ufd; - ), - - TP_printk("map type=%s ufd=%d path=%s", - __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), - __entry->ufd, __get_str(path)) -); - -DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map, - - TP_PROTO(const struct bpf_map *map, int ufd, - const struct filename *pname), - - TP_ARGS(map, ufd, pname) -); - -DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map, - - TP_PROTO(const struct bpf_map *map, int ufd, - const struct filename *pname), - - TP_ARGS(map, ufd, pname) -); - -DECLARE_EVENT_CLASS(bpf_map_keyval, - - TP_PROTO(const struct bpf_map *map, int ufd, - const void *key, const void *val), - - TP_ARGS(map, ufd, key, val), - - TP_STRUCT__entry( - __field(u32, type) - __field(u32, key_len) - __dynamic_array(u8, key, map->key_size) - __field(bool, key_trunc) - __field(u32, val_len) - __dynamic_array(u8, val, map->value_size) - __field(bool, val_trunc) - __field(int, ufd) - ), - - TP_fast_assign( - memcpy(__get_dynamic_array(key), key, map->key_size); - memcpy(__get_dynamic_array(val), val, map->value_size); - __entry->type = map->map_type; - __entry->key_len = min(map->key_size, 16U); - __entry->key_trunc = map->key_size != __entry->key_len; - __entry->val_len = min(map->value_size, 16U); - __entry->val_trunc = map->value_size != __entry->val_len; - __entry->ufd = ufd; - ), - - TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]", - __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), - __entry->ufd, - __print_hex(__get_dynamic_array(key), __entry->key_len), - __entry->key_trunc ? " ..." : "", - __print_hex(__get_dynamic_array(val), __entry->val_len), - __entry->val_trunc ? " ..." : "") -); - -DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem, - - TP_PROTO(const struct bpf_map *map, int ufd, - const void *key, const void *val), - - TP_ARGS(map, ufd, key, val) -); - -DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem, - - TP_PROTO(const struct bpf_map *map, int ufd, - const void *key, const void *val), - - TP_ARGS(map, ufd, key, val) -); - -TRACE_EVENT(bpf_map_delete_elem, - - TP_PROTO(const struct bpf_map *map, int ufd, - const void *key), - - TP_ARGS(map, ufd, key), - - TP_STRUCT__entry( - __field(u32, type) - __field(u32, key_len) - __dynamic_array(u8, key, map->key_size) - __field(bool, key_trunc) - __field(int, ufd) - ), - - TP_fast_assign( - memcpy(__get_dynamic_array(key), key, map->key_size); - __entry->type = map->map_type; - __entry->key_len = min(map->key_size, 16U); - __entry->key_trunc = map->key_size != __entry->key_len; - __entry->ufd = ufd; - ), - - TP_printk("map type=%s ufd=%d key=[%s%s]", - __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), - __entry->ufd, - __print_hex(__get_dynamic_array(key), __entry->key_len), - __entry->key_trunc ? " ..." : "") -); - -TRACE_EVENT(bpf_map_next_key, - - TP_PROTO(const struct bpf_map *map, int ufd, - const void *key, const void *key_next), - - TP_ARGS(map, ufd, key, key_next), - - TP_STRUCT__entry( - __field(u32, type) - __field(u32, key_len) - __dynamic_array(u8, key, map->key_size) - __dynamic_array(u8, nxt, map->key_size) - __field(bool, key_trunc) - __field(bool, key_null) - __field(int, ufd) - ), - - TP_fast_assign( - if (key) - memcpy(__get_dynamic_array(key), key, map->key_size); - __entry->key_null = !key; - memcpy(__get_dynamic_array(nxt), key_next, map->key_size); - __entry->type = map->map_type; - __entry->key_len = min(map->key_size, 16U); - __entry->key_trunc = map->key_size != __entry->key_len; - __entry->ufd = ufd; - ), - - TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]", - __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), - __entry->ufd, - __entry->key_null ? "NULL" : __print_hex(__get_dynamic_array(key), - __entry->key_len), - __entry->key_trunc && !__entry->key_null ? " ..." : "", - __print_hex(__get_dynamic_array(nxt), __entry->key_len), - __entry->key_trunc ? " ..." : "") -); -#endif /* CONFIG_BPF_SYSCALL */ -#endif /* _TRACE_BPF_H */ - -#include diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9349a5db3cf2..90feeba3a1a1 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1845,9 +1845,3 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, #include EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); - -/* These are only used within the BPF_SYSCALL code */ -#ifdef CONFIG_BPF_SYSCALL -EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); -EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu); -#endif diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index a41343009ccc..ed13645bd80c 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -429,13 +429,6 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname) ret = bpf_obj_do_pin(pname, raw, type); if (ret != 0) bpf_any_put(raw, type); - if ((trace_bpf_obj_pin_prog_enabled() || - trace_bpf_obj_pin_map_enabled()) && !ret) { - if (type == BPF_TYPE_PROG) - trace_bpf_obj_pin_prog(raw, ufd, pname); - if (type == BPF_TYPE_MAP) - trace_bpf_obj_pin_map(raw, ufd, pname); - } out: putname(pname); return ret; @@ -502,15 +495,8 @@ int bpf_obj_get_user(const char __user *pathname, int flags) else goto out; - if (ret < 0) { + if (ret < 0) bpf_any_put(raw, type); - } else if (trace_bpf_obj_get_prog_enabled() || - trace_bpf_obj_get_map_enabled()) { - if (type == BPF_TYPE_PROG) - trace_bpf_obj_get_prog(raw, ret, pname); - if (type == BPF_TYPE_MAP) - trace_bpf_obj_get_map(raw, ret, pname); - } out: putname(pname); return ret; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0bd2944eafb9..263e13ede029 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -503,7 +503,6 @@ static int map_create(union bpf_attr *attr) return err; } - trace_bpf_map_create(map, err); return err; free_map: @@ -663,7 +662,6 @@ static int map_lookup_elem(union bpf_attr *attr) if (copy_to_user(uvalue, value, value_size) != 0) goto free_value; - trace_bpf_map_lookup_elem(map, ufd, key, value); err = 0; free_value: @@ -760,8 +758,6 @@ static int map_update_elem(union bpf_attr *attr) __this_cpu_dec(bpf_prog_active); preempt_enable(); out: - if (!err) - trace_bpf_map_update_elem(map, ufd, key, value); free_value: kfree(value); free_key: @@ -814,8 +810,6 @@ static int map_delete_elem(union bpf_attr *attr) __this_cpu_dec(bpf_prog_active); preempt_enable(); out: - if (!err) - trace_bpf_map_delete_elem(map, ufd, key); kfree(key); err_put: fdput(f); @@ -879,7 +873,6 @@ out: if (copy_to_user(unext_key, next_key, map->key_size) != 0) goto free_next_key; - trace_bpf_map_next_key(map, ufd, key, next_key); err = 0; free_next_key: @@ -1027,7 +1020,6 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) if (atomic_dec_and_test(&prog->aux->refcnt)) { int i; - trace_bpf_prog_put_rcu(prog); /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog, do_idr_lock); @@ -1194,11 +1186,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd) struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, bool attach_drv) { - struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv); - - if (!IS_ERR(prog)) - trace_bpf_prog_get_type(prog); - return prog; + return __bpf_prog_get(ufd, &type, attach_drv); } EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); @@ -1373,7 +1361,6 @@ static int bpf_prog_load(union bpf_attr *attr) } bpf_prog_kallsyms_add(prog); - trace_bpf_prog_load(prog, err); return err; free_used_maps: -- cgit v1.2.3 From 840d40d941bf669669088732ef68606adb19feeb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 25 Apr 2018 19:42:53 +0200 Subject: MAINTAINERS: Add dri-devel for backlight subsystem patches For the same reasons we've added dri-devel for all fbdev patches: Most of the actively developed drivers using this infrastructure are in drivers/gpu/. It just makes sense to cross-post patches and keep aligned. And total activity in the backlight subsystem is miniscule compared to drm overall. Signed-off-by: Daniel Vetter Acked-by: Daniel Thompson Acked-by: Jingoo Han Reviewed-by: Jani Nikula Signed-off-by: Lee Jones --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..8ffc9273132b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2584,6 +2584,7 @@ BACKLIGHT CLASS/SUBSYSTEM M: Lee Jones M: Daniel Thompson M: Jingoo Han +L: dri-devel@lists.freedesktop.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/backlight.git S: Maintained F: drivers/video/backlight/ -- cgit v1.2.3 From a051505c7ec025772983419cc309dc593843f3d2 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 30 Apr 2018 10:16:22 +0300 Subject: MAINTAINERS: Update mlx5 innova driver maintainers Signed-off-by: Boris Pismenny Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- MAINTAINERS | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a52800867850..8f0fc25f0042 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9037,26 +9037,17 @@ W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_* -MELLANOX ETHERNET INNOVA DRIVER -M: Ilan Tayari -R: Boris Pismenny +MELLANOX ETHERNET INNOVA DRIVERS +M: Boris Pismenny L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ +F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* +F: drivers/net/ethernet/mellanox/mlx5/core/accel/* F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h -MELLANOX ETHERNET INNOVA IPSEC DRIVER -M: Ilan Tayari -R: Boris Pismenny -L: netdev@vger.kernel.org -S: Supported -W: http://www.mellanox.com -Q: http://patchwork.ozlabs.org/project/netdev/list/ -F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/* -F: drivers/net/ethernet/mellanox/mlx5/core/ipsec* - MELLANOX ETHERNET SWITCH DRIVERS M: Jiri Pirko M: Ido Schimmel -- cgit v1.2.3 From f9c8141fc10324cef00f7e5a3358ccdbe1bd10b4 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 30 Apr 2018 10:16:23 +0300 Subject: MAINTAINERS: Update TLS maintainers Signed-off-by: Boris Pismenny Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 8f0fc25f0042..4ee8bec9c9bd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9839,7 +9839,7 @@ F: net/netfilter/xt_CONNSECMARK.c F: net/netfilter/xt_SECMARK.c NETWORKING [TLS] -M: Ilya Lesokhin +M: Boris Pismenny M: Aviad Yehezkel M: Dave Watson L: netdev@vger.kernel.org -- cgit v1.2.3 From eb06d6bbc45a7561de78a00fb17bfbb75893ee26 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 18 Apr 2018 16:50:01 +0200 Subject: clk: Extract OF clock helpers in The use of of_clk_get_parent_{count,name}() and of_clk_init() is not limited to clock providers. Hence move these helpers into their own header file, so callers that are not clock providers no longer have to include . Suggested-by: Stephen Boyd Signed-off-by: Geert Uytterhoeven Reviewed-by: Heiko Stuebner Signed-off-by: Stephen Boyd --- MAINTAINERS | 1 + include/linux/clk-provider.h | 14 +------------- include/linux/of_clk.h | 30 ++++++++++++++++++++++++++++++ 3 files changed, 32 insertions(+), 13 deletions(-) create mode 100644 include/linux/of_clk.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..b61b2bf1eb75 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3556,6 +3556,7 @@ F: drivers/clk/ X: drivers/clk/clkdev.c F: include/linux/clk-pr* F: include/linux/clk/ +F: include/linux/of_clk.h COMMON INTERNET FILE SYSTEM (CIFS) M: Steve French diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 210a890008f9..61cb4729f22a 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -13,6 +13,7 @@ #include #include +#include #ifdef CONFIG_COMMON_CLK @@ -890,13 +891,10 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data); -unsigned int of_clk_get_parent_count(struct device_node *np); int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size); -const char *of_clk_get_parent_name(struct device_node *np, int index); int of_clk_detect_critical(struct device_node *np, int index, unsigned long *flags); -void of_clk_init(const struct of_device_id *matches); #else /* !CONFIG_OF */ @@ -943,26 +941,16 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } -static inline unsigned int of_clk_get_parent_count(struct device_node *np) -{ - return 0; -} static inline int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size) { return 0; } -static inline const char *of_clk_get_parent_name(struct device_node *np, - int index) -{ - return NULL; -} static inline int of_clk_detect_critical(struct device_node *np, int index, unsigned long *flags) { return 0; } -static inline void of_clk_init(const struct of_device_id *matches) {} #endif /* CONFIG_OF */ /* diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h new file mode 100644 index 000000000000..b27da9f164cb --- /dev/null +++ b/include/linux/of_clk.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * OF clock helpers + */ + +#ifndef __LINUX_OF_CLK_H +#define __LINUX_OF_CLK_H + +#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF) + +unsigned int of_clk_get_parent_count(struct device_node *np); +const char *of_clk_get_parent_name(struct device_node *np, int index); +void of_clk_init(const struct of_device_id *matches); + +#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */ + +static inline unsigned int of_clk_get_parent_count(struct device_node *np) +{ + return 0; +} +static inline const char *of_clk_get_parent_name(struct device_node *np, + int index) +{ + return NULL; +} +static inline void of_clk_init(const struct of_device_id *matches) {} + +#endif /* !CONFIG_COMMON_CLK || !CONFIG_OF */ + +#endif /* __LINUX_OF_CLK_H */ -- cgit v1.2.3 From 68e8b849b221b37a78a110a0307717d45e3593a0 Mon Sep 17 00:00:00 2001 From: Björn Töpel Date: Wed, 2 May 2018 13:01:22 +0200 Subject: net: initial AF_XDP skeleton MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Buildable skeleton of AF_XDP without any functionality. Just what it takes to register a new address family. Signed-off-by: Björn Töpel Signed-off-by: Alexei Starovoitov --- MAINTAINERS | 8 ++++++++ include/linux/socket.h | 5 ++++- net/Kconfig | 1 + net/core/sock.c | 12 ++++++++---- net/xdp/Kconfig | 7 +++++++ security/selinux/hooks.c | 4 +++- security/selinux/include/classmap.h | 4 +++- 7 files changed, 34 insertions(+), 7 deletions(-) create mode 100644 net/xdp/Kconfig (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 537fd17a211b..52d246fd29c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15424,6 +15424,14 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/tuners/tuner-xc2028.* +XDP SOCKETS (AF_XDP) +M: Björn Töpel +M: Magnus Karlsson +L: netdev@vger.kernel.org +S: Maintained +F: kernel/bpf/xskmap.c +F: net/xdp/ + XEN BLOCK SUBSYSTEM M: Konrad Rzeszutek Wilk M: Roger Pau Monné diff --git a/include/linux/socket.h b/include/linux/socket.h index ea50f4a65816..7ed4713d5337 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -207,8 +207,9 @@ struct ucred { * PF_SMC protocol family that * reuses AF_INET address family */ +#define AF_XDP 44 /* XDP sockets */ -#define AF_MAX 44 /* For now.. */ +#define AF_MAX 45 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -257,6 +258,7 @@ struct ucred { #define PF_KCM AF_KCM #define PF_QIPCRTR AF_QIPCRTR #define PF_SMC AF_SMC +#define PF_XDP AF_XDP #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -338,6 +340,7 @@ struct ucred { #define SOL_NFC 280 #define SOL_KCM 281 #define SOL_TLS 282 +#define SOL_XDP 283 /* IPX options */ #define IPX_TYPE 1 diff --git a/net/Kconfig b/net/Kconfig index 6fa1a4493b8c..86471a1c1ed4 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -59,6 +59,7 @@ source "net/tls/Kconfig" source "net/xfrm/Kconfig" source "net/iucv/Kconfig" source "net/smc/Kconfig" +source "net/xdp/Kconfig" config INET bool "TCP/IP networking" diff --git a/net/core/sock.c b/net/core/sock.c index b2c3db169ca1..e7d8b6c955c6 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -226,7 +226,8 @@ static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ - x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX" + x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ + x "AF_MAX" static const char *const af_family_key_strings[AF_MAX+1] = { _sock_locks("sk_lock-") @@ -262,7 +263,8 @@ static const char *const af_family_rlock_key_strings[AF_MAX+1] = { "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" , "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" , "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" , - "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX" + "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_XDP" , + "rlock-AF_MAX" }; static const char *const af_family_wlock_key_strings[AF_MAX+1] = { "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" , @@ -279,7 +281,8 @@ static const char *const af_family_wlock_key_strings[AF_MAX+1] = { "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" , "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" , "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" , - "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX" + "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_XDP" , + "wlock-AF_MAX" }; static const char *const af_family_elock_key_strings[AF_MAX+1] = { "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" , @@ -296,7 +299,8 @@ static const char *const af_family_elock_key_strings[AF_MAX+1] = { "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" , "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" , "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" , - "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX" + "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_XDP" , + "elock-AF_MAX" }; /* diff --git a/net/xdp/Kconfig b/net/xdp/Kconfig new file mode 100644 index 000000000000..90e4a7152854 --- /dev/null +++ b/net/xdp/Kconfig @@ -0,0 +1,7 @@ +config XDP_SOCKETS + bool "XDP sockets" + depends on BPF_SYSCALL + default n + help + XDP sockets allows a channel between XDP programs and + userspace applications. diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4cafe6a19167..5c508d26b367 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1471,7 +1471,9 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc return SECCLASS_QIPCRTR_SOCKET; case PF_SMC: return SECCLASS_SMC_SOCKET; -#if PF_MAX > 44 + case PF_XDP: + return SECCLASS_XDP_SOCKET; +#if PF_MAX > 45 #error New address family defined, please update this function. #endif } diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 7f0372426494..bd5fe0d3204a 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -240,9 +240,11 @@ struct security_class_mapping secclass_map[] = { { "manage_subnet", NULL } }, { "bpf", {"map_create", "map_read", "map_write", "prog_load", "prog_run"} }, + { "xdp_socket", + { COMMON_SOCK_PERMS, NULL } }, { NULL } }; -#if PF_MAX > 44 +#if PF_MAX > 45 #error New address family defined, please update secclass_map. #endif -- cgit v1.2.3 From 57692c94dcbe99a1e0444409a3da13fb3443562c Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 30 Apr 2018 11:10:58 -0700 Subject: drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+ This driver will be used to support Mesa on the Broadcom 7268 and 7278 platforms. V3D 3.3 introduces an MMU, which means we no longer need CMA or vc4's complicated CL/shader validation scheme. This massively changes the GEM behavior, so I've forked off to a new driver. v2: Mark SUBMIT_CL as needing DRM_AUTH. coccinelle fixes from kbuild test robot. Drop personal git link from MAINTAINERS. Don't double-map dma-buf imported BOs. Add kerneldoc about needing MMU eviction. Drop prime vmap/unmap stubs. Delay mmap offset setup to mmap time. Use drm_dev_init instead of _alloc. Use ktime_get() for wait_bo timeouts. Drop drm_can_sleep() usage, since we don't modeset. Switch page tables back to WC (debug change to coherent had slipped in). Switch drm_gem_object_unreference_unlocked() to drm_gem_object_put_unlocked(). Simplify overflow mem handling by not sharing overflow mem between jobs. v3: no changes v4: align submit_cl to 64 bits (review by airlied), check zero flags in other ioctls. Signed-off-by: Eric Anholt Acked-by: Daniel Vetter (v4) Acked-by: Dave Airlie (v3, requested submit_cl change) Link: https://patchwork.freedesktop.org/patch/msgid/20180430181058.30181-3-eric@anholt.net --- Documentation/gpu/drivers.rst | 1 + MAINTAINERS | 8 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/v3d/Kconfig | 9 + drivers/gpu/drm/v3d/Makefile | 18 + drivers/gpu/drm/v3d/v3d_bo.c | 389 +++++++++++++++++++ drivers/gpu/drm/v3d/v3d_debugfs.c | 191 ++++++++++ drivers/gpu/drm/v3d/v3d_drv.c | 371 ++++++++++++++++++ drivers/gpu/drm/v3d/v3d_drv.h | 294 +++++++++++++++ drivers/gpu/drm/v3d/v3d_fence.c | 58 +++ drivers/gpu/drm/v3d/v3d_gem.c | 668 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_irq.c | 206 ++++++++++ drivers/gpu/drm/v3d/v3d_mmu.c | 122 ++++++ drivers/gpu/drm/v3d/v3d_regs.h | 295 +++++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 228 +++++++++++ drivers/gpu/drm/v3d/v3d_trace.h | 82 ++++ drivers/gpu/drm/v3d/v3d_trace_points.c | 9 + include/uapi/drm/v3d_drm.h | 194 ++++++++++ 19 files changed, 3146 insertions(+) create mode 100644 drivers/gpu/drm/v3d/Kconfig create mode 100644 drivers/gpu/drm/v3d/Makefile create mode 100644 drivers/gpu/drm/v3d/v3d_bo.c create mode 100644 drivers/gpu/drm/v3d/v3d_debugfs.c create mode 100644 drivers/gpu/drm/v3d/v3d_drv.c create mode 100644 drivers/gpu/drm/v3d/v3d_drv.h create mode 100644 drivers/gpu/drm/v3d/v3d_fence.c create mode 100644 drivers/gpu/drm/v3d/v3d_gem.c create mode 100644 drivers/gpu/drm/v3d/v3d_irq.c create mode 100644 drivers/gpu/drm/v3d/v3d_mmu.c create mode 100644 drivers/gpu/drm/v3d/v3d_regs.h create mode 100644 drivers/gpu/drm/v3d/v3d_sched.c create mode 100644 drivers/gpu/drm/v3d/v3d_trace.h create mode 100644 drivers/gpu/drm/v3d/v3d_trace_points.c create mode 100644 include/uapi/drm/v3d_drm.h (limited to 'MAINTAINERS') diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst index d3ab6abae838..f982558fc25d 100644 --- a/Documentation/gpu/drivers.rst +++ b/Documentation/gpu/drivers.rst @@ -10,6 +10,7 @@ GPU Driver Documentation tegra tinydrm tve200 + v3d vc4 bridge/dw-hdmi xen-front diff --git a/MAINTAINERS b/MAINTAINERS index 4af7f6119530..631a16f7fa19 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4795,6 +4795,14 @@ S: Maintained F: drivers/gpu/drm/omapdrm/ F: Documentation/devicetree/bindings/display/ti/ +DRM DRIVERS FOR V3D +M: Eric Anholt +S: Supported +F: drivers/gpu/drm/v3d/ +F: include/uapi/drm/v3d_drm.h +F: Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt +T: git git://anongit.freedesktop.org/drm/drm-misc + DRM DRIVERS FOR VC4 M: Eric Anholt T: git git://github.com/anholt/linux diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 757825ac60df..1c73a455fdb1 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -267,6 +267,8 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig" source "drivers/gpu/drm/imx/Kconfig" +source "drivers/gpu/drm/v3d/Kconfig" + source "drivers/gpu/drm/vc4/Kconfig" source "drivers/gpu/drm/etnaviv/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 9d66657ea117..7a401edd8761 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ +obj-$(CONFIG_DRM_V3D) += v3d/ obj-$(CONFIG_DRM_VC4) += vc4/ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ obj-$(CONFIG_DRM_SIS) += sis/ diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig new file mode 100644 index 000000000000..a0c0259355bd --- /dev/null +++ b/drivers/gpu/drm/v3d/Kconfig @@ -0,0 +1,9 @@ +config DRM_V3D + tristate "Broadcom V3D 3.x and newer" + depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST + depends on DRM + depends on COMMON_CLK + select DRM_SCHED + help + Choose this option if you have a system that has a Broadcom + V3D 3.x or newer GPU, such as BCM7268. diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile new file mode 100644 index 000000000000..34446e1de64f --- /dev/null +++ b/drivers/gpu/drm/v3d/Makefile @@ -0,0 +1,18 @@ +# Please keep these build lists sorted! + +# core driver code +v3d-y := \ + v3d_bo.o \ + v3d_drv.o \ + v3d_fence.o \ + v3d_gem.o \ + v3d_irq.o \ + v3d_mmu.o \ + v3d_trace_points.o \ + v3d_sched.o + +v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o + +obj-$(CONFIG_DRM_V3D) += v3d.o + +CFLAGS_v3d_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c new file mode 100644 index 000000000000..7b1e2a549a71 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +/** + * DOC: V3D GEM BO management support + * + * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the + * GPU and the bus, allowing us to use shmem objects for our storage + * instead of CMA. + * + * Physically contiguous objects may still be imported to V3D, but the + * driver doesn't allocate physically contiguous objects on its own. + * Display engines requiring physically contiguous allocations should + * look into Mesa's "renderonly" support (as used by the Mesa pl111 + * driver) for an example of how to integrate with V3D. + * + * Long term, we should support evicting pages from the MMU when under + * memory pressure (thus the v3d_bo_get_pages() refcounting), but + * that's not a high priority since our systems tend to not have swap. + */ + +#include +#include + +#include "v3d_drv.h" +#include "uapi/drm/v3d_drm.h" + +/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps + * it for DMA. + */ +static int +v3d_bo_get_pages(struct v3d_bo *bo) +{ + struct drm_gem_object *obj = &bo->base; + struct drm_device *dev = obj->dev; + int npages = obj->size >> PAGE_SHIFT; + int ret = 0; + + mutex_lock(&bo->lock); + if (bo->pages_refcount++ != 0) + goto unlock; + + if (!obj->import_attach) { + bo->pages = drm_gem_get_pages(obj); + if (IS_ERR(bo->pages)) { + ret = PTR_ERR(bo->pages); + goto unlock; + } + + bo->sgt = drm_prime_pages_to_sg(bo->pages, npages); + if (IS_ERR(bo->sgt)) { + ret = PTR_ERR(bo->sgt); + goto put_pages; + } + + /* Map the pages for use by the GPU. */ + dma_map_sg(dev->dev, bo->sgt->sgl, + bo->sgt->nents, DMA_BIDIRECTIONAL); + } else { + bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); + if (!bo->pages) + goto put_pages; + + drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages, + NULL, npages); + + /* Note that dma-bufs come in mapped. */ + } + + mutex_unlock(&bo->lock); + + return 0; + +put_pages: + drm_gem_put_pages(obj, bo->pages, true, true); + bo->pages = NULL; +unlock: + bo->pages_refcount--; + mutex_unlock(&bo->lock); + return ret; +} + +static void +v3d_bo_put_pages(struct v3d_bo *bo) +{ + struct drm_gem_object *obj = &bo->base; + + mutex_lock(&bo->lock); + if (--bo->pages_refcount == 0) { + if (!obj->import_attach) { + dma_unmap_sg(obj->dev->dev, bo->sgt->sgl, + bo->sgt->nents, DMA_BIDIRECTIONAL); + sg_free_table(bo->sgt); + kfree(bo->sgt); + drm_gem_put_pages(obj, bo->pages, true, true); + } else { + kfree(bo->pages); + } + } + mutex_unlock(&bo->lock); +} + +static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev, + size_t unaligned_size) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_gem_object *obj; + struct v3d_bo *bo; + size_t size = roundup(unaligned_size, PAGE_SIZE); + int ret; + + if (size == 0) + return ERR_PTR(-EINVAL); + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + obj = &bo->base; + + INIT_LIST_HEAD(&bo->vmas); + INIT_LIST_HEAD(&bo->unref_head); + mutex_init(&bo->lock); + + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto free_bo; + + spin_lock(&v3d->mm_lock); + ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, + obj->size >> PAGE_SHIFT, + GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + spin_unlock(&v3d->mm_lock); + if (ret) + goto free_obj; + + return bo; + +free_obj: + drm_gem_object_release(obj); +free_bo: + kfree(bo); + return ERR_PTR(ret); +} + +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t unaligned_size) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_gem_object *obj; + struct v3d_bo *bo; + int ret; + + bo = v3d_bo_create_struct(dev, unaligned_size); + if (IS_ERR(bo)) + return bo; + obj = &bo->base; + + bo->resv = &bo->_resv; + reservation_object_init(bo->resv); + + ret = v3d_bo_get_pages(bo); + if (ret) + goto free_mm; + + v3d_mmu_insert_ptes(bo); + + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated++; + v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); + + return bo; + +free_mm: + spin_lock(&v3d->mm_lock); + drm_mm_remove_node(&bo->node); + spin_unlock(&v3d->mm_lock); + + drm_gem_object_release(obj); + kfree(bo); + return ERR_PTR(ret); +} + +/* Called DRM core on the last userspace/kernel unreference of the + * BO. + */ +void v3d_free_object(struct drm_gem_object *obj) +{ + struct v3d_dev *v3d = to_v3d_dev(obj->dev); + struct v3d_bo *bo = to_v3d_bo(obj); + + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated--; + v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); + + reservation_object_fini(&bo->_resv); + + v3d_bo_put_pages(bo); + + if (obj->import_attach) + drm_prime_gem_destroy(obj, bo->sgt); + + v3d_mmu_remove_ptes(bo); + spin_lock(&v3d->mm_lock); + drm_mm_remove_node(&bo->node); + spin_unlock(&v3d->mm_lock); + + mutex_destroy(&bo->lock); + + drm_gem_object_release(obj); + kfree(bo); +} + +struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) +{ + struct v3d_bo *bo = to_v3d_bo(obj); + + return bo->resv; +} + +static void +v3d_set_mmap_vma_flags(struct vm_area_struct *vma) +{ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); +} + +int v3d_gem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct v3d_bo *bo = to_v3d_bo(obj); + unsigned long pfn; + pgoff_t pgoff; + int ret; + + /* We don't use vmf->pgoff since that has the fake offset: */ + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + pfn = page_to_pfn(bo->pages[pgoff]); + + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); + + switch (ret) { + case -EAGAIN: + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +int v3d_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + v3d_set_mmap_vma_flags(vma); + + return ret; +} + +int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap_obj(obj, obj->size, vma); + if (ret < 0) + return ret; + + v3d_set_mmap_vma_flags(vma); + + return 0; +} + +struct sg_table * +v3d_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct v3d_bo *bo = to_v3d_bo(obj); + int npages = obj->size >> PAGE_SHIFT; + + return drm_prime_pages_to_sg(bo->pages, npages); +} + +struct drm_gem_object * +v3d_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct drm_gem_object *obj; + struct v3d_bo *bo; + + bo = v3d_bo_create_struct(dev, attach->dmabuf->size); + if (IS_ERR(bo)) + return ERR_CAST(bo); + obj = &bo->base; + + bo->resv = attach->dmabuf->resv; + + bo->sgt = sgt; + v3d_bo_get_pages(bo); + + v3d_mmu_insert_ptes(bo); + + return obj; +} + +int v3d_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_create_bo *args = data; + struct v3d_bo *bo = NULL; + int ret; + + if (args->flags != 0) { + DRM_INFO("unknown create_bo flags: %d\n", args->flags); + return -EINVAL; + } + + bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size)); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + args->offset = bo->node.start << PAGE_SHIFT; + + ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); + drm_gem_object_put_unlocked(&bo->base); + + return ret; +} + +int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_mmap_bo *args = data; + struct drm_gem_object *gem_obj; + int ret; + + if (args->flags != 0) { + DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); + return -EINVAL; + } + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + + ret = drm_gem_create_mmap_offset(gem_obj); + if (ret == 0) + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + drm_gem_object_put_unlocked(gem_obj); + + return ret; +} + +int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_get_bo_offset *args = data; + struct drm_gem_object *gem_obj; + struct v3d_bo *bo; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + bo = to_v3d_bo(gem_obj); + + args->offset = bo->node.start << PAGE_SHIFT; + + drm_gem_object_put_unlocked(gem_obj); + return 0; +} diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c new file mode 100644 index 000000000000..4db62c545748 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +#include +#include +#include +#include +#include +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define REGDEF(reg) { reg, #reg } +struct v3d_reg_def { + u32 reg; + const char *name; +}; + +static const struct v3d_reg_def v3d_hub_reg_defs[] = { + REGDEF(V3D_HUB_AXICFG), + REGDEF(V3D_HUB_UIFCFG), + REGDEF(V3D_HUB_IDENT0), + REGDEF(V3D_HUB_IDENT1), + REGDEF(V3D_HUB_IDENT2), + REGDEF(V3D_HUB_IDENT3), + REGDEF(V3D_HUB_INT_STS), + REGDEF(V3D_HUB_INT_MSK_STS), +}; + +static const struct v3d_reg_def v3d_gca_reg_defs[] = { + REGDEF(V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK), +}; + +static const struct v3d_reg_def v3d_core_reg_defs[] = { + REGDEF(V3D_CTL_IDENT0), + REGDEF(V3D_CTL_IDENT1), + REGDEF(V3D_CTL_IDENT2), + REGDEF(V3D_CTL_MISCCFG), + REGDEF(V3D_CTL_INT_STS), + REGDEF(V3D_CTL_INT_MSK_STS), + REGDEF(V3D_CLE_CT0CS), + REGDEF(V3D_CLE_CT0CA), + REGDEF(V3D_CLE_CT0EA), + REGDEF(V3D_CLE_CT1CS), + REGDEF(V3D_CLE_CT1CA), + REGDEF(V3D_CLE_CT1EA), + + REGDEF(V3D_PTB_BPCA), + REGDEF(V3D_PTB_BPCS), + + REGDEF(V3D_MMU_CTL), + REGDEF(V3D_MMU_VIO_ADDR), + + REGDEF(V3D_GMP_STATUS), + REGDEF(V3D_GMP_CFG), + REGDEF(V3D_GMP_VIO_ADDR), +}; + +static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + int i, core; + + for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg, + V3D_READ(v3d_hub_reg_defs[i].reg)); + } + + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg, + V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + } + + for (core = 0; core < v3d->cores; core++) { + for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) { + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", + core, + v3d_core_reg_defs[i].name, + v3d_core_reg_defs[i].reg, + V3D_CORE_READ(core, + v3d_core_reg_defs[i].reg)); + } + } + + return 0; +} + +static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + u32 ident0, ident1, ident2, ident3, cores; + int ret, core; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) + return ret; + + ident0 = V3D_READ(V3D_HUB_IDENT0); + ident1 = V3D_READ(V3D_HUB_IDENT1); + ident2 = V3D_READ(V3D_HUB_IDENT2); + ident3 = V3D_READ(V3D_HUB_IDENT3); + cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); + + seq_printf(m, "Revision: %d.%d.%d.%d\n", + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER), + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV), + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV), + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX)); + seq_printf(m, "MMU: %s\n", + (ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no"); + seq_printf(m, "TFU: %s\n", + (ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no"); + seq_printf(m, "TSY: %s\n", + (ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no"); + seq_printf(m, "MSO: %s\n", + (ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no"); + seq_printf(m, "L3C: %s (%dkb)\n", + (ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no", + V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB)); + + for (core = 0; core < cores; core++) { + u32 misccfg; + u32 nslc, ntmu, qups; + + ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0); + ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1); + ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2); + misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG); + + nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC); + ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU); + qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS); + + seq_printf(m, "Core %d:\n", core); + seq_printf(m, " Revision: %d.%d\n", + V3D_GET_FIELD(ident0, V3D_IDENT0_VER), + V3D_GET_FIELD(ident1, V3D_IDENT1_REV)); + seq_printf(m, " Slices: %d\n", nslc); + seq_printf(m, " TMUs: %d\n", nslc * ntmu); + seq_printf(m, " QPUs: %d\n", nslc * qups); + seq_printf(m, " Semaphores: %d\n", + V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); + seq_printf(m, " BCG int: %d\n", + (ident2 & V3D_IDENT2_BCG_INT) != 0); + seq_printf(m, " Override TMU: %d\n", + (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); + } + + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + + return 0; +} + +static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + + mutex_lock(&v3d->bo_lock); + seq_printf(m, "allocated bos: %d\n", + v3d->bo_stats.num_allocated); + seq_printf(m, "allocated bo size (kb): %ld\n", + (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10)); + mutex_unlock(&v3d->bo_lock); + + return 0; +} + +static const struct drm_info_list v3d_debugfs_list[] = { + {"v3d_ident", v3d_v3d_debugfs_ident, 0}, + {"v3d_regs", v3d_v3d_debugfs_regs, 0}, + {"bo_stats", v3d_debugfs_bo_stats, 0}, +}; + +int +v3d_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(v3d_debugfs_list, + ARRAY_SIZE(v3d_debugfs_list), + minor->debugfs_root, minor); +} diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c new file mode 100644 index 000000000000..38e8041b5f0c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +/** + * DOC: Broadcom V3D Graphics Driver + * + * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. + * For V3D 2.x support, see the VC4 driver. + * + * Currently only single-core rendering using the binner and renderer + * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD + * (compute shader dispatch) are not yet supported. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "uapi/drm/v3d_drm.h" +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define DRIVER_NAME "v3d" +#define DRIVER_DESC "Broadcom V3D graphics" +#define DRIVER_DATE "20180419" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#ifdef CONFIG_PM +static int v3d_runtime_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct v3d_dev *v3d = to_v3d_dev(drm); + + v3d_irq_disable(v3d); + + clk_disable_unprepare(v3d->clk); + + return 0; +} + +static int v3d_runtime_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct v3d_dev *v3d = to_v3d_dev(drm); + int ret; + + ret = clk_prepare_enable(v3d->clk); + if (ret != 0) + return ret; + + /* XXX: VPM base */ + + v3d_mmu_set_page_table(v3d); + v3d_irq_enable(v3d); + + return 0; +} +#endif + +static const struct dev_pm_ops v3d_v3d_pm_ops = { + SET_RUNTIME_PM_OPS(v3d_runtime_suspend, v3d_runtime_resume, NULL) +}; + +static int v3d_get_param_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_get_param *args = data; + int ret; + static const u32 reg_map[] = { + [DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG, + [DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1, + [DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2, + [DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3, + [DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0, + [DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1, + [DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2, + }; + + if (args->pad != 0) + return -EINVAL; + + /* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need + * to explicitly allow it in the "the register in our + * parameter map" check. + */ + if (args->param < ARRAY_SIZE(reg_map) && + (reg_map[args->param] || + args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) { + u32 offset = reg_map[args->param]; + + if (args->value != 0) + return -EINVAL; + + ret = pm_runtime_get_sync(v3d->dev); + if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 && + args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) { + args->value = V3D_CORE_READ(0, offset); + } else { + args->value = V3D_READ(offset); + } + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + return 0; + } + + /* Any params that aren't just register reads would go here. */ + + DRM_DEBUG("Unknown parameter %d\n", args->param); + return -EINVAL; +} + +static int +v3d_open(struct drm_device *dev, struct drm_file *file) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv; + int i; + + v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); + if (!v3d_priv) + return -ENOMEM; + + v3d_priv->v3d = v3d; + + for (i = 0; i < V3D_MAX_QUEUES; i++) { + drm_sched_entity_init(&v3d->queue[i].sched, + &v3d_priv->sched_entity[i], + &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], + 32, NULL); + } + + file->driver_priv = v3d_priv; + + return 0; +} + +static void +v3d_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file->driver_priv; + enum v3d_queue q; + + for (q = 0; q < V3D_MAX_QUEUES; q++) { + drm_sched_entity_fini(&v3d->queue[q].sched, + &v3d_priv->sched_entity[q]); + } + + kfree(v3d_priv); +} + +static const struct file_operations v3d_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = v3d_mmap, + .poll = drm_poll, + .read = drm_read, + .compat_ioctl = drm_compat_ioctl, + .llseek = noop_llseek, +}; + +/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP + * protection between clients. Note that render nodes would be be + * able to submit CLs that could access BOs from clients authenticated + * with the master node. + */ +static const struct drm_ioctl_desc v3d_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), +}; + +static const struct vm_operations_struct v3d_vm_ops = { + .fault = v3d_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static struct drm_driver v3d_drm_driver = { + .driver_features = (DRIVER_GEM | + DRIVER_RENDER | + DRIVER_PRIME | + DRIVER_SYNCOBJ), + + .open = v3d_open, + .postclose = v3d_postclose, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = v3d_debugfs_init, +#endif + + .gem_free_object_unlocked = v3d_free_object, + .gem_vm_ops = &v3d_vm_ops, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_res_obj = v3d_prime_res_obj, + .gem_prime_get_sg_table = v3d_prime_get_sg_table, + .gem_prime_import_sg_table = v3d_prime_import_sg_table, + .gem_prime_mmap = v3d_prime_mmap, + + .ioctls = v3d_drm_ioctls, + .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), + .fops = &v3d_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static const struct of_device_id v3d_of_match[] = { + { .compatible = "brcm,7268-v3d" }, + { .compatible = "brcm,7278-v3d" }, + {}, +}; +MODULE_DEVICE_TABLE(of, v3d_of_match); + +static int +map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) +{ + struct resource *res = + platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name); + + *regs = devm_ioremap_resource(v3d->dev, res); + return PTR_ERR_OR_ZERO(*regs); +} + +static int v3d_platform_drm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct drm_device *drm; + struct v3d_dev *v3d; + int ret; + u32 ident1; + + dev->coherent_dma_mask = DMA_BIT_MASK(36); + + v3d = kzalloc(sizeof(*v3d), GFP_KERNEL); + if (!v3d) + return -ENOMEM; + v3d->dev = dev; + v3d->pdev = pdev; + drm = &v3d->drm; + + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); + if (ret) + goto dev_free; + + ret = map_regs(v3d, &v3d->hub_regs, "hub"); + if (ret) + goto dev_free; + + ret = map_regs(v3d, &v3d->core_regs[0], "core0"); + if (ret) + goto dev_free; + + ident1 = V3D_READ(V3D_HUB_IDENT1); + v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); + WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ + + if (v3d->ver < 41) { + ret = map_regs(v3d, &v3d->gca_regs, "gca"); + if (ret) + goto dev_free; + } + + v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->mmu_scratch) { + dev_err(dev, "Failed to allocate MMU scratch page\n"); + ret = -ENOMEM; + goto dev_free; + } + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 50); + pm_runtime_enable(dev); + + ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); + if (ret) + goto dma_free; + + platform_set_drvdata(pdev, drm); + drm->dev_private = v3d; + + ret = v3d_gem_init(drm); + if (ret) + goto dev_destroy; + + v3d_irq_init(v3d); + + ret = drm_dev_register(drm, 0); + if (ret) + goto gem_destroy; + + return 0; + +gem_destroy: + v3d_gem_destroy(drm); +dev_destroy: + drm_dev_put(drm); +dma_free: + dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); +dev_free: + kfree(v3d); + return ret; +} + +static int v3d_platform_drm_remove(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + struct v3d_dev *v3d = to_v3d_dev(drm); + + drm_dev_unregister(drm); + + v3d_gem_destroy(drm); + + drm_dev_put(drm); + + dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + + return 0; +} + +static struct platform_driver v3d_platform_driver = { + .probe = v3d_platform_drm_probe, + .remove = v3d_platform_drm_remove, + .driver = { + .name = "v3d", + .of_match_table = v3d_of_match, + }, +}; + +static int __init v3d_drm_register(void) +{ + return platform_driver_register(&v3d_platform_driver); +} + +static void __exit v3d_drm_unregister(void) +{ + platform_driver_unregister(&v3d_platform_driver); +} + +module_init(v3d_drm_register); +module_exit(v3d_drm_unregister); + +MODULE_ALIAS("platform:v3d-drm"); +MODULE_DESCRIPTION("Broadcom V3D DRM Driver"); +MODULE_AUTHOR("Eric Anholt "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h new file mode 100644 index 000000000000..a043ac3aae98 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +#include +#include +#include +#include +#include + +#define GMP_GRANULARITY (128 * 1024) + +/* Enum for each of the V3D queues. We maintain various queue + * tracking as an array because at some point we'll want to support + * the TFU (texture formatting unit) as another queue. + */ +enum v3d_queue { + V3D_BIN, + V3D_RENDER, +}; + +#define V3D_MAX_QUEUES (V3D_RENDER + 1) + +struct v3d_queue_state { + struct drm_gpu_scheduler sched; + + u64 fence_context; + u64 emit_seqno; + u64 finished_seqno; +}; + +struct v3d_dev { + struct drm_device drm; + + /* Short representation (e.g. 33, 41) of the V3D tech version + * and revision. + */ + int ver; + + struct device *dev; + struct platform_device *pdev; + void __iomem *hub_regs; + void __iomem *core_regs[3]; + void __iomem *bridge_regs; + void __iomem *gca_regs; + struct clk *clk; + + /* Virtual and DMA addresses of the single shared page table. */ + volatile u32 *pt; + dma_addr_t pt_paddr; + + /* Virtual and DMA addresses of the MMU's scratch page. When + * a read or write is invalid in the MMU, it will be + * redirected here. + */ + void *mmu_scratch; + dma_addr_t mmu_scratch_paddr; + + /* Number of V3D cores. */ + u32 cores; + + /* Allocator managing the address space. All units are in + * number of pages. + */ + struct drm_mm mm; + spinlock_t mm_lock; + + struct work_struct overflow_mem_work; + + struct v3d_exec_info *bin_job; + struct v3d_exec_info *render_job; + + struct v3d_queue_state queue[V3D_MAX_QUEUES]; + + /* Spinlock used to synchronize the overflow memory + * management against bin job submission. + */ + spinlock_t job_lock; + + /* Protects bo_stats */ + struct mutex bo_lock; + + /* Lock taken when resetting the GPU, to keep multiple + * processes from trying to park the scheduler threads and + * reset at once. + */ + struct mutex reset_lock; + + struct { + u32 num_allocated; + u32 pages_allocated; + } bo_stats; +}; + +static inline struct v3d_dev * +to_v3d_dev(struct drm_device *dev) +{ + return (struct v3d_dev *)dev->dev_private; +} + +/* The per-fd struct, which tracks the MMU mappings. */ +struct v3d_file_priv { + struct v3d_dev *v3d; + + struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; +}; + +/* Tracks a mapping of a BO into a per-fd address space */ +struct v3d_vma { + struct v3d_page_table *pt; + struct list_head list; /* entry in v3d_bo.vmas */ +}; + +struct v3d_bo { + struct drm_gem_object base; + + struct mutex lock; + + struct drm_mm_node node; + + u32 pages_refcount; + struct page **pages; + struct sg_table *sgt; + void *vaddr; + + struct list_head vmas; /* list of v3d_vma */ + + /* List entry for the BO's position in + * v3d_exec_info->unref_list + */ + struct list_head unref_head; + + /* normally (resv == &_resv) except for imported bo's */ + struct reservation_object *resv; + struct reservation_object _resv; +}; + +static inline struct v3d_bo * +to_v3d_bo(struct drm_gem_object *bo) +{ + return (struct v3d_bo *)bo; +} + +struct v3d_fence { + struct dma_fence base; + struct drm_device *dev; + /* v3d seqno for signaled() test */ + u64 seqno; + enum v3d_queue queue; +}; + +static inline struct v3d_fence * +to_v3d_fence(struct dma_fence *fence) +{ + return (struct v3d_fence *)fence; +} + +#define V3D_READ(offset) readl(v3d->hub_regs + offset) +#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset) + +#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset) +#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset) + +#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) +#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) + +#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) +#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) + +struct v3d_job { + struct drm_sched_job base; + + struct v3d_exec_info *exec; + + /* An optional fence userspace can pass in for the job to depend on. */ + struct dma_fence *in_fence; + + /* v3d fence to be signaled by IRQ handler when the job is complete. */ + struct dma_fence *done_fence; + + /* GPU virtual addresses of the start/end of the CL job. */ + u32 start, end; +}; + +struct v3d_exec_info { + struct v3d_dev *v3d; + + struct v3d_job bin, render; + + /* Fence for when the scheduler considers the binner to be + * done, for render to depend on. + */ + struct dma_fence *bin_done_fence; + + struct kref refcount; + + /* This is the array of BOs that were looked up at the start of exec. */ + struct v3d_bo **bo; + u32 bo_count; + + /* List of overflow BOs used in the job that need to be + * released once the job is complete. + */ + struct list_head unref_list; + + /* Submitted tile memory allocation start/size, tile state. */ + u32 qma, qms, qts; +}; + +/** + * _wait_for - magic (register) wait macro + * + * Does the right thing for modeset paths when run under kdgb or similar atomic + * contexts. Note that it's important that we check the condition again after + * having timed out, since the timeout could be due to preemption or similar and + * we've never had a chance to check the condition before the timeout. + */ +#define wait_for(COND, MS) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + if (!(COND)) \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + msleep(1); \ + } \ + ret__; \ +}) + +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) +{ + /* nsecs_to_jiffies64() does not guard against overflow */ + if (NSEC_PER_SEC % HZ && + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) + return MAX_JIFFY_OFFSET; + + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); +} + +/* v3d_bo.c */ +void v3d_free_object(struct drm_gem_object *gem_obj); +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t size); +int v3d_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_gem_fault(struct vm_fault *vmf); +int v3d_mmap(struct file *filp, struct vm_area_struct *vma); +struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); +int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +/* v3d_debugfs.c */ +int v3d_debugfs_init(struct drm_minor *minor); + +/* v3d_fence.c */ +extern const struct dma_fence_ops v3d_fence_ops; +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); + +/* v3d_gem.c */ +int v3d_gem_init(struct drm_device *dev); +void v3d_gem_destroy(struct drm_device *dev); +int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void v3d_exec_put(struct v3d_exec_info *exec); +void v3d_reset(struct v3d_dev *v3d); +void v3d_invalidate_caches(struct v3d_dev *v3d); +void v3d_flush_caches(struct v3d_dev *v3d); + +/* v3d_irq.c */ +void v3d_irq_init(struct v3d_dev *v3d); +void v3d_irq_enable(struct v3d_dev *v3d); +void v3d_irq_disable(struct v3d_dev *v3d); +void v3d_irq_reset(struct v3d_dev *v3d); + +/* v3d_mmu.c */ +int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, + u32 *offset); +int v3d_mmu_set_page_table(struct v3d_dev *v3d); +void v3d_mmu_insert_ptes(struct v3d_bo *bo); +void v3d_mmu_remove_ptes(struct v3d_bo *bo); + +/* v3d_sched.c */ +int v3d_sched_init(struct v3d_dev *v3d); +void v3d_sched_fini(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c new file mode 100644 index 000000000000..087d49c8cb12 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +#include "v3d_drv.h" + +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) +{ + struct v3d_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return ERR_PTR(-ENOMEM); + + fence->dev = &v3d->drm; + fence->queue = queue; + fence->seqno = ++v3d->queue[queue].emit_seqno; + dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock, + v3d->queue[queue].fence_context, fence->seqno); + + return &fence->base; +} + +static const char *v3d_fence_get_driver_name(struct dma_fence *fence) +{ + return "v3d"; +} + +static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) +{ + struct v3d_fence *f = to_v3d_fence(fence); + + if (f->queue == V3D_BIN) + return "v3d-bin"; + else + return "v3d-render"; +} + +static bool v3d_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static bool v3d_fence_signaled(struct dma_fence *fence) +{ + struct v3d_fence *f = to_v3d_fence(fence); + struct v3d_dev *v3d = to_v3d_dev(f->dev); + + return v3d->queue[f->queue].finished_seqno >= f->seqno; +} + +const struct dma_fence_ops v3d_fence_ops = { + .get_driver_name = v3d_fence_get_driver_name, + .get_timeline_name = v3d_fence_get_timeline_name, + .enable_signaling = v3d_fence_enable_signaling, + .signaled = v3d_fence_signaled, + .wait = dma_fence_default_wait, + .release = dma_fence_free, +}; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c new file mode 100644 index 000000000000..b513f9189caf --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "uapi/drm/v3d_drm.h" +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +static void +v3d_init_core(struct v3d_dev *v3d, int core) +{ + /* Set OVRTMUOUT, which means that the texture sampler uniform + * configuration's tmu output type field is used, instead of + * using the hardware default behavior based on the texture + * type. If you want the default behavior, you can still put + * "2" in the indirect texture state's output_type field. + */ + V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); + + /* Whenever we flush the L2T cache, we always want to flush + * the whole thing. + */ + V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); + V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); +} + +/* Sets invariant state for the HW. */ +static void +v3d_init_hw_state(struct v3d_dev *v3d) +{ + v3d_init_core(v3d, 0); +} + +static void +v3d_idle_axi(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); + + if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & + (V3D_GMP_STATUS_RD_COUNT_MASK | + V3D_GMP_STATUS_WR_COUNT_MASK | + V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { + DRM_ERROR("Failed to wait for safe GMP shutdown\n"); + } +} + +static void +v3d_idle_gca(struct v3d_dev *v3d) +{ + if (v3d->ver >= 41) + return; + + V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); + + if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { + DRM_ERROR("Failed to wait for safe GCA shutdown\n"); + } +} + +static void +v3d_reset_v3d(struct v3d_dev *v3d) +{ + int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); + + if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, + V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); + + /* GFXH-1383: The SW_INIT may cause a stray write to address 0 + * of the unit, so reset it to its power-on value here. + */ + V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); + } else { + WARN_ON_ONCE(V3D_GET_FIELD(version, + V3D_TOP_GR_BRIDGE_MAJOR) != 7); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, + V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); + } + + v3d_init_hw_state(v3d); +} + +void +v3d_reset(struct v3d_dev *v3d) +{ + struct drm_device *dev = &v3d->drm; + + DRM_ERROR("Resetting GPU.\n"); + trace_v3d_reset_begin(dev); + + /* XXX: only needed for safe powerdown, not reset. */ + if (false) + v3d_idle_axi(v3d, 0); + + v3d_idle_gca(v3d); + v3d_reset_v3d(v3d); + + v3d_mmu_set_page_table(v3d); + v3d_irq_reset(v3d); + + trace_v3d_reset_end(dev); +} + +static void +v3d_flush_l3(struct v3d_dev *v3d) +{ + if (v3d->ver < 41) { + u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); + + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, + gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); + + if (v3d->ver < 33) { + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, + gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); + } + } +} + +/* Invalidates the (read-only) L2 cache. */ +static void +v3d_invalidate_l2(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, + V3D_L2CACTL_L2CCLR | + V3D_L2CACTL_L2CENA); +} + +static void +v3d_invalidate_l1td(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); + } +} + +/* Invalidates texture L2 cachelines */ +static void +v3d_flush_l2t(struct v3d_dev *v3d, int core) +{ + v3d_invalidate_l1td(v3d, core); + + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L2T flush\n"); + } +} + +/* Invalidates the slice caches. These are read-only caches. */ +static void +v3d_invalidate_slices(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, + V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); +} + +/* Invalidates texture L2 cachelines */ +static void +v3d_invalidate_l2t(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, + V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM)); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L2T invalidate\n"); + } +} + +void +v3d_invalidate_caches(struct v3d_dev *v3d) +{ + v3d_flush_l3(v3d); + + v3d_invalidate_l2(v3d, 0); + v3d_invalidate_slices(v3d, 0); + v3d_flush_l2t(v3d, 0); +} + +void +v3d_flush_caches(struct v3d_dev *v3d) +{ + v3d_invalidate_l1td(v3d, 0); + v3d_invalidate_l2t(v3d, 0); +} + +static void +v3d_attach_object_fences(struct v3d_exec_info *exec) +{ + struct dma_fence *out_fence = &exec->render.base.s_fence->finished; + struct v3d_bo *bo; + int i; + + for (i = 0; i < exec->bo_count; i++) { + bo = to_v3d_bo(&exec->bo[i]->base); + + /* XXX: Use shared fences for read-only objects. */ + reservation_object_add_excl_fence(bo->resv, out_fence); + } +} + +static void +v3d_unlock_bo_reservations(struct drm_device *dev, + struct v3d_exec_info *exec, + struct ww_acquire_ctx *acquire_ctx) +{ + int i; + + for (i = 0; i < exec->bo_count; i++) { + struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base); + + ww_mutex_unlock(&bo->resv->lock); + } + + ww_acquire_fini(acquire_ctx); +} + +/* Takes the reservation lock on all the BOs being referenced, so that + * at queue submit time we can update the reservations. + * + * We don't lock the RCL the tile alloc/state BOs, or overflow memory + * (all of which are on exec->unref_list). They're entirely private + * to v3d, so we don't attach dma-buf fences to them. + */ +static int +v3d_lock_bo_reservations(struct drm_device *dev, + struct v3d_exec_info *exec, + struct ww_acquire_ctx *acquire_ctx) +{ + int contended_lock = -1; + int i, ret; + struct v3d_bo *bo; + + ww_acquire_init(acquire_ctx, &reservation_ww_class); + +retry: + if (contended_lock != -1) { + bo = to_v3d_bo(&exec->bo[contended_lock]->base); + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + acquire_ctx); + if (ret) { + ww_acquire_done(acquire_ctx); + return ret; + } + } + + for (i = 0; i < exec->bo_count; i++) { + if (i == contended_lock) + continue; + + bo = to_v3d_bo(&exec->bo[i]->base); + + ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); + if (ret) { + int j; + + for (j = 0; j < i; j++) { + bo = to_v3d_bo(&exec->bo[j]->base); + ww_mutex_unlock(&bo->resv->lock); + } + + if (contended_lock != -1 && contended_lock >= i) { + bo = to_v3d_bo(&exec->bo[contended_lock]->base); + + ww_mutex_unlock(&bo->resv->lock); + } + + if (ret == -EDEADLK) { + contended_lock = i; + goto retry; + } + + ww_acquire_done(acquire_ctx); + return ret; + } + } + + ww_acquire_done(acquire_ctx); + + /* Reserve space for our shared (read-only) fence references, + * before we commit the CL to the hardware. + */ + for (i = 0; i < exec->bo_count; i++) { + bo = to_v3d_bo(&exec->bo[i]->base); + + ret = reservation_object_reserve_shared(bo->resv); + if (ret) { + v3d_unlock_bo_reservations(dev, exec, acquire_ctx); + return ret; + } + } + + return 0; +} + +/** + * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects + * referenced by the job. + * @dev: DRM device + * @file_priv: DRM file for this fd + * @exec: V3D job being set up + * + * The command validator needs to reference BOs by their index within + * the submitted job's BO list. This does the validation of the job's + * BO list and reference counting for the lifetime of the job. + * + * Note that this function doesn't need to unreference the BOs on + * failure, because that will happen at v3d_exec_cleanup() time. + */ +static int +v3d_cl_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_v3d_submit_cl *args, + struct v3d_exec_info *exec) +{ + u32 *handles; + int ret = 0; + int i; + + exec->bo_count = args->bo_handle_count; + + if (!exec->bo_count) { + /* See comment on bo_index for why we have to check + * this. + */ + DRM_DEBUG("Rendering requires BOs\n"); + return -EINVAL; + } + + exec->bo = kvmalloc_array(exec->bo_count, + sizeof(struct drm_gem_cma_object *), + GFP_KERNEL | __GFP_ZERO); + if (!exec->bo) { + DRM_DEBUG("Failed to allocate validated BO pointers\n"); + return -ENOMEM; + } + + handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL); + if (!handles) { + ret = -ENOMEM; + DRM_DEBUG("Failed to allocate incoming GEM handles\n"); + goto fail; + } + + if (copy_from_user(handles, + (void __user *)(uintptr_t)args->bo_handles, + exec->bo_count * sizeof(u32))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy in GEM handles\n"); + goto fail; + } + + spin_lock(&file_priv->table_lock); + for (i = 0; i < exec->bo_count; i++) { + struct drm_gem_object *bo = idr_find(&file_priv->object_idr, + handles[i]); + if (!bo) { + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", + i, handles[i]); + ret = -ENOENT; + spin_unlock(&file_priv->table_lock); + goto fail; + } + drm_gem_object_get(bo); + exec->bo[i] = to_v3d_bo(bo); + } + spin_unlock(&file_priv->table_lock); + +fail: + kvfree(handles); + return ret; +} + +static void +v3d_exec_cleanup(struct kref *ref) +{ + struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info, + refcount); + struct v3d_dev *v3d = exec->v3d; + unsigned int i; + struct v3d_bo *bo, *save; + + dma_fence_put(exec->bin.in_fence); + dma_fence_put(exec->render.in_fence); + + dma_fence_put(exec->bin.done_fence); + dma_fence_put(exec->render.done_fence); + + dma_fence_put(exec->bin_done_fence); + + for (i = 0; i < exec->bo_count; i++) + drm_gem_object_put_unlocked(&exec->bo[i]->base); + kvfree(exec->bo); + + list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { + drm_gem_object_put_unlocked(&bo->base); + } + + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + + kfree(exec); +} + +void v3d_exec_put(struct v3d_exec_info *exec) +{ + kref_put(&exec->refcount, v3d_exec_cleanup); +} + +int +v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_v3d_wait_bo *args = data; + struct drm_gem_object *gem_obj; + struct v3d_bo *bo; + ktime_t start = ktime_get(); + u64 delta_ns; + unsigned long timeout_jiffies = + nsecs_to_jiffies_timeout(args->timeout_ns); + + if (args->pad != 0) + return -EINVAL; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -EINVAL; + } + bo = to_v3d_bo(gem_obj); + + ret = reservation_object_wait_timeout_rcu(bo->resv, + true, true, + timeout_jiffies); + + if (ret == 0) + ret = -ETIME; + else if (ret > 0) + ret = 0; + + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + + drm_gem_object_put_unlocked(gem_obj); + + return ret; +} + +/** + * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * This is the main entrypoint for userspace to submit a 3D frame to + * the GPU. Userspace provides the binner command list (if + * applicable), and the kernel sets up the render command list to draw + * to the framebuffer described in the ioctl, using the command lists + * that the 3D engine's binner will produce. + */ +int +v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_cl *args = data; + struct v3d_exec_info *exec; + struct ww_acquire_ctx acquire_ctx; + struct drm_syncobj *sync_out; + int ret = 0; + + if (args->pad != 0) { + DRM_INFO("pad must be zero: %d\n", args->pad); + return -EINVAL; + } + + exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); + if (!exec) + return -ENOMEM; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) { + kfree(exec); + return ret; + } + + kref_init(&exec->refcount); + + ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, + &exec->bin.in_fence); + if (ret == -EINVAL) + goto fail; + + ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, + &exec->render.in_fence); + if (ret == -EINVAL) + goto fail; + + exec->qma = args->qma; + exec->qms = args->qms; + exec->qts = args->qts; + exec->bin.exec = exec; + exec->bin.start = args->bcl_start; + exec->bin.end = args->bcl_end; + exec->render.exec = exec; + exec->render.start = args->rcl_start; + exec->render.end = args->rcl_end; + exec->v3d = v3d; + INIT_LIST_HEAD(&exec->unref_list); + + ret = v3d_cl_lookup_bos(dev, file_priv, args, exec); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx); + if (ret) + goto fail; + + if (exec->bin.start != exec->bin.end) { + ret = drm_sched_job_init(&exec->bin.base, + &v3d->queue[V3D_BIN].sched, + &v3d_priv->sched_entity[V3D_BIN], + v3d_priv); + if (ret) + goto fail_unreserve; + + exec->bin_done_fence = + dma_fence_get(&exec->bin.base.s_fence->finished); + + kref_get(&exec->refcount); /* put by scheduler job completion */ + drm_sched_entity_push_job(&exec->bin.base, + &v3d_priv->sched_entity[V3D_BIN]); + } + + ret = drm_sched_job_init(&exec->render.base, + &v3d->queue[V3D_RENDER].sched, + &v3d_priv->sched_entity[V3D_RENDER], + v3d_priv); + if (ret) + goto fail_unreserve; + + kref_get(&exec->refcount); /* put by scheduler job completion */ + drm_sched_entity_push_job(&exec->render.base, + &v3d_priv->sched_entity[V3D_RENDER]); + + v3d_attach_object_fences(exec); + + v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); + + /* Update the return sync object for the */ + sync_out = drm_syncobj_find(file_priv, args->out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, + &exec->render.base.s_fence->finished); + drm_syncobj_put(sync_out); + } + + v3d_exec_put(exec); + + return 0; + +fail_unreserve: + v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); +fail: + v3d_exec_put(exec); + + return ret; +} + +int +v3d_gem_init(struct drm_device *dev) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + u32 pt_size = 4096 * 1024; + int ret, i; + + for (i = 0; i < V3D_MAX_QUEUES; i++) + v3d->queue[i].fence_context = dma_fence_context_alloc(1); + + spin_lock_init(&v3d->mm_lock); + spin_lock_init(&v3d->job_lock); + mutex_init(&v3d->bo_lock); + mutex_init(&v3d->reset_lock); + + /* Note: We don't allocate address 0. Various bits of HW + * treat 0 as special, such as the occlusion query counters + * where 0 means "disabled". + */ + drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); + + v3d->pt = dma_alloc_wc(v3d->dev, pt_size, + &v3d->pt_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->pt) { + drm_mm_takedown(&v3d->mm); + dev_err(v3d->dev, + "Failed to allocate page tables. " + "Please ensure you have CMA enabled.\n"); + return -ENOMEM; + } + + v3d_init_hw_state(v3d); + v3d_mmu_set_page_table(v3d); + + ret = v3d_sched_init(v3d); + if (ret) { + drm_mm_takedown(&v3d->mm); + dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, + v3d->pt_paddr); + } + + return 0; +} + +void +v3d_gem_destroy(struct drm_device *dev) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + enum v3d_queue q; + + v3d_sched_fini(v3d); + + /* Waiting for exec to finish would need to be done before + * unregistering V3D. + */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + WARN_ON(v3d->queue[q].emit_seqno != + v3d->queue[q].finished_seqno); + } + + drm_mm_takedown(&v3d->mm); + + dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); +} diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c new file mode 100644 index 000000000000..77e1fa046c10 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +/** + * DOC: Interrupt management for the V3D engine + * + * When we take a binning or rendering flush done interrupt, we need + * to signal the fence for that job so that the scheduler can queue up + * the next one and unblock any waiters. + * + * When we take the binner out of memory interrupt, we need to + * allocate some new memory and pass it to the binner so that the + * current job can make progress. + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ + V3D_INT_FLDONE | \ + V3D_INT_FRDONE | \ + V3D_INT_GMPV)) + +#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ + V3D_HUB_INT_MMU_PTI | \ + V3D_HUB_INT_MMU_CAP)) + +static void +v3d_overflow_mem_work(struct work_struct *work) +{ + struct v3d_dev *v3d = + container_of(work, struct v3d_dev, overflow_mem_work); + struct drm_device *dev = &v3d->drm; + struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); + unsigned long irqflags; + + if (IS_ERR(bo)) { + DRM_ERROR("Couldn't allocate binner overflow mem\n"); + return; + } + + /* We lost a race, and our work task came in after the bin job + * completed and exited. This can happen because the HW + * signals OOM before it's fully OOM, so the binner might just + * barely complete. + * + * If we lose the race and our work task comes in after a new + * bin job got scheduled, that's fine. We'll just give them + * some binner pool anyway. + */ + spin_lock_irqsave(&v3d->job_lock, irqflags); + if (!v3d->bin_job) { + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + goto out; + } + + drm_gem_object_get(&bo->base); + list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); + V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); + +out: + drm_gem_object_put_unlocked(&bo->base); +} + +static irqreturn_t +v3d_irq(int irq, void *arg) +{ + struct v3d_dev *v3d = arg; + u32 intsts; + irqreturn_t status = IRQ_NONE; + + intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); + + /* Acknowledge the interrupts we're handling here. */ + V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); + + if (intsts & V3D_INT_OUTOMEM) { + /* Note that the OOM status is edge signaled, so the + * interrupt won't happen again until the we actually + * add more memory. + */ + schedule_work(&v3d->overflow_mem_work); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_FLDONE) { + v3d->queue[V3D_BIN].finished_seqno++; + dma_fence_signal(v3d->bin_job->bin.done_fence); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_FRDONE) { + v3d->queue[V3D_RENDER].finished_seqno++; + dma_fence_signal(v3d->render_job->render.done_fence); + + status = IRQ_HANDLED; + } + + /* We shouldn't be triggering these if we have GMP in + * always-allowed mode. + */ + if (intsts & V3D_INT_GMPV) + dev_err(v3d->dev, "GMP violation\n"); + + return status; +} + +static irqreturn_t +v3d_hub_irq(int irq, void *arg) +{ + struct v3d_dev *v3d = arg; + u32 intsts; + irqreturn_t status = IRQ_NONE; + + intsts = V3D_READ(V3D_HUB_INT_STS); + + /* Acknowledge the interrupts we're handling here. */ + V3D_WRITE(V3D_HUB_INT_CLR, intsts); + + if (intsts & (V3D_HUB_INT_MMU_WRV | + V3D_HUB_INT_MMU_PTI | + V3D_HUB_INT_MMU_CAP)) { + u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); + u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; + + dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", + axi_id, (long long)vio_addr, + ((intsts & V3D_HUB_INT_MMU_WRV) ? + ", write violation" : ""), + ((intsts & V3D_HUB_INT_MMU_PTI) ? + ", pte invalid" : ""), + ((intsts & V3D_HUB_INT_MMU_CAP) ? + ", cap exceeded" : "")); + status = IRQ_HANDLED; + } + + return status; +} + +void +v3d_irq_init(struct v3d_dev *v3d) +{ + int ret, core; + + INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); + + /* Clear any pending interrupts someone might have left around + * for us. + */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), + v3d_hub_irq, IRQF_SHARED, + "v3d_hub", v3d); + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), + v3d_irq, IRQF_SHARED, + "v3d_core0", v3d); + if (ret) + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + + v3d_irq_enable(v3d); +} + +void +v3d_irq_enable(struct v3d_dev *v3d) +{ + int core; + + /* Enable our set of interrupts, masking out any others. */ + for (core = 0; core < v3d->cores; core++) { + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); + } + + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); + V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); +} + +void +v3d_irq_disable(struct v3d_dev *v3d) +{ + int core; + + /* Disable all interrupts. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + + /* Clear any pending interrupts we might have left. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + + cancel_work_sync(&v3d->overflow_mem_work); +} + +/** Reinitializes interrupt registers when a GPU reset is performed. */ +void v3d_irq_reset(struct v3d_dev *v3d) +{ + v3d_irq_enable(v3d); +} diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c new file mode 100644 index 000000000000..b00f97c31b70 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +/** + * DOC: Broadcom V3D MMU + * + * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has + * a single level of page tables for the V3D's 4GB address space to + * map to AXI bus addresses, thus it could need up to 4MB of + * physically contiguous memory to store the PTEs. + * + * Because the 4MB of contiguous memory for page tables is precious, + * and switching between them is expensive, we load all BOs into the + * same 4GB address space. + * + * To protect clients from each other, we should use the GMP to + * quickly mask out (at 128kb granularity) what pages are available to + * each client. This is not yet implemented. + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_MMU_PAGE_SHIFT 12 + +/* Note: All PTEs for the 1MB superpage must be filled with the + * superpage bit set. + */ +#define V3D_PTE_SUPERPAGE BIT(31) +#define V3D_PTE_WRITEABLE BIT(29) +#define V3D_PTE_VALID BIT(28) + +static int v3d_mmu_flush_all(struct v3d_dev *v3d) +{ + int ret; + + /* Make sure that another flush isn't already running when we + * start this one. + */ + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); + if (ret) + dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n"); + + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | + V3D_MMU_CTL_TLB_CLEAR); + + V3D_WRITE(V3D_MMUC_CONTROL, + V3D_MMUC_CONTROL_FLUSH | + V3D_MMUC_CONTROL_ENABLE); + + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); + if (ret) { + dev_err(v3d->dev, "TLB clear wait idle failed\n"); + return ret; + } + + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); + if (ret) + dev_err(v3d->dev, "MMUC flush wait idle failed\n"); + + return ret; +} + +int v3d_mmu_set_page_table(struct v3d_dev *v3d) +{ + V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT); + V3D_WRITE(V3D_MMU_CTL, + V3D_MMU_CTL_ENABLE | + V3D_MMU_CTL_PT_INVALID | + V3D_MMU_CTL_PT_INVALID_ABORT | + V3D_MMU_CTL_WRITE_VIOLATION_ABORT | + V3D_MMU_CTL_CAP_EXCEEDED_ABORT); + V3D_WRITE(V3D_MMU_ILLEGAL_ADDR, + (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) | + V3D_MMU_ILLEGAL_ADDR_ENABLE); + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE); + + return v3d_mmu_flush_all(v3d); +} + +void v3d_mmu_insert_ptes(struct v3d_bo *bo) +{ + struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); + u32 page = bo->node.start; + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; + unsigned int count; + struct scatterlist *sgl; + + for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) { + u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; + u32 pte = page_prot | page_address; + u32 i; + + BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >= + BIT(24)); + + for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++) + v3d->pt[page++] = pte + i; + } + + WARN_ON_ONCE(page - bo->node.start != + bo->base.size >> V3D_MMU_PAGE_SHIFT); + + if (v3d_mmu_flush_all(v3d)) + dev_err(v3d->dev, "MMU flush timeout\n"); +} + +void v3d_mmu_remove_ptes(struct v3d_bo *bo) +{ + struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); + u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT; + u32 page; + + for (page = bo->node.start; page < bo->node.start + npages; page++) + v3d->pt[page] = 0; + + if (v3d_mmu_flush_all(v3d)) + dev_err(v3d->dev, "MMU flush timeout\n"); +} diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h new file mode 100644 index 000000000000..fc13282dfc2f --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +#ifndef V3D_REGS_H +#define V3D_REGS_H + +#include + +#define V3D_MASK(high, low) ((u32)GENMASK(high, low)) +/* Using the GNU statement expression extension */ +#define V3D_SET_FIELD(value, field) \ + ({ \ + u32 fieldval = (value) << field##_SHIFT; \ + WARN_ON((fieldval & ~field##_MASK) != 0); \ + fieldval & field##_MASK; \ + }) + +#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \ + field##_SHIFT) + +/* Hub registers for shared hardware between V3D cores. */ + +#define V3D_HUB_AXICFG 0x00000 +# define V3D_HUB_AXICFG_MAX_LEN_MASK V3D_MASK(3, 0) +# define V3D_HUB_AXICFG_MAX_LEN_SHIFT 0 +#define V3D_HUB_UIFCFG 0x00004 +#define V3D_HUB_IDENT0 0x00008 + +#define V3D_HUB_IDENT1 0x0000c +# define V3D_HUB_IDENT1_WITH_MSO BIT(19) +# define V3D_HUB_IDENT1_WITH_TSY BIT(18) +# define V3D_HUB_IDENT1_WITH_TFU BIT(17) +# define V3D_HUB_IDENT1_WITH_L3C BIT(16) +# define V3D_HUB_IDENT1_NHOSTS_MASK V3D_MASK(15, 12) +# define V3D_HUB_IDENT1_NHOSTS_SHIFT 12 +# define V3D_HUB_IDENT1_NCORES_MASK V3D_MASK(11, 8) +# define V3D_HUB_IDENT1_NCORES_SHIFT 8 +# define V3D_HUB_IDENT1_REV_MASK V3D_MASK(7, 4) +# define V3D_HUB_IDENT1_REV_SHIFT 4 +# define V3D_HUB_IDENT1_TVER_MASK V3D_MASK(3, 0) +# define V3D_HUB_IDENT1_TVER_SHIFT 0 + +#define V3D_HUB_IDENT2 0x00010 +# define V3D_HUB_IDENT2_WITH_MMU BIT(8) +# define V3D_HUB_IDENT2_L3C_NKB_MASK V3D_MASK(7, 0) +# define V3D_HUB_IDENT2_L3C_NKB_SHIFT 0 + +#define V3D_HUB_IDENT3 0x00014 +# define V3D_HUB_IDENT3_IPREV_MASK V3D_MASK(15, 8) +# define V3D_HUB_IDENT3_IPREV_SHIFT 8 +# define V3D_HUB_IDENT3_IPIDX_MASK V3D_MASK(7, 0) +# define V3D_HUB_IDENT3_IPIDX_SHIFT 0 + +#define V3D_HUB_INT_STS 0x00050 +#define V3D_HUB_INT_SET 0x00054 +#define V3D_HUB_INT_CLR 0x00058 +#define V3D_HUB_INT_MSK_STS 0x0005c +#define V3D_HUB_INT_MSK_SET 0x00060 +#define V3D_HUB_INT_MSK_CLR 0x00064 +# define V3D_HUB_INT_MMU_WRV BIT(5) +# define V3D_HUB_INT_MMU_PTI BIT(4) +# define V3D_HUB_INT_MMU_CAP BIT(3) +# define V3D_HUB_INT_MSO BIT(2) +# define V3D_HUB_INT_TFUC BIT(1) +# define V3D_HUB_INT_TFUF BIT(0) + +#define V3D_GCA_CACHE_CTRL 0x0000c +# define V3D_GCA_CACHE_CTRL_FLUSH BIT(0) + +#define V3D_GCA_SAFE_SHUTDOWN 0x000b0 +# define V3D_GCA_SAFE_SHUTDOWN_EN BIT(0) + +#define V3D_GCA_SAFE_SHUTDOWN_ACK 0x000b4 +# define V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED 3 + +# define V3D_TOP_GR_BRIDGE_REVISION 0x00000 +# define V3D_TOP_GR_BRIDGE_MAJOR_MASK V3D_MASK(15, 8) +# define V3D_TOP_GR_BRIDGE_MAJOR_SHIFT 8 +# define V3D_TOP_GR_BRIDGE_MINOR_MASK V3D_MASK(7, 0) +# define V3D_TOP_GR_BRIDGE_MINOR_SHIFT 0 + +/* 7268 reset reg */ +# define V3D_TOP_GR_BRIDGE_SW_INIT_0 0x00008 +# define V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT BIT(0) +/* 7278 reset reg */ +# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c +# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) + +/* Per-MMU registers. */ + +#define V3D_MMUC_CONTROL 0x01000 +# define V3D_MMUC_CONTROL_CLEAR BIT(3) +# define V3D_MMUC_CONTROL_FLUSHING BIT(2) +# define V3D_MMUC_CONTROL_FLUSH BIT(1) +# define V3D_MMUC_CONTROL_ENABLE BIT(0) + +#define V3D_MMU_CTL 0x01200 +# define V3D_MMU_CTL_CAP_EXCEEDED BIT(27) +# define V3D_MMU_CTL_CAP_EXCEEDED_ABORT BIT(26) +# define V3D_MMU_CTL_CAP_EXCEEDED_INT BIT(25) +# define V3D_MMU_CTL_CAP_EXCEEDED_EXCEPTION BIT(24) +# define V3D_MMU_CTL_PT_INVALID BIT(20) +# define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19) +# define V3D_MMU_CTL_PT_INVALID_INT BIT(18) +# define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17) +# define V3D_MMU_CTL_WRITE_VIOLATION BIT(16) +# define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11) +# define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10) +# define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9) +# define V3D_MMU_CTL_TLB_CLEARING BIT(7) +# define V3D_MMU_CTL_TLB_STATS_CLEAR BIT(3) +# define V3D_MMU_CTL_TLB_CLEAR BIT(2) +# define V3D_MMU_CTL_TLB_STATS_ENABLE BIT(1) +# define V3D_MMU_CTL_ENABLE BIT(0) + +#define V3D_MMU_PT_PA_BASE 0x01204 +#define V3D_MMU_HIT 0x01208 +#define V3D_MMU_MISSES 0x0120c +#define V3D_MMU_STALLS 0x01210 + +#define V3D_MMU_ADDR_CAP 0x01214 +# define V3D_MMU_ADDR_CAP_ENABLE BIT(31) +# define V3D_MMU_ADDR_CAP_MPAGE_MASK V3D_MASK(11, 0) +# define V3D_MMU_ADDR_CAP_MPAGE_SHIFT 0 + +#define V3D_MMU_SHOOT_DOWN 0x01218 +# define V3D_MMU_SHOOT_DOWN_SHOOTING BIT(29) +# define V3D_MMU_SHOOT_DOWN_SHOOT BIT(28) +# define V3D_MMU_SHOOT_DOWN_PAGE_MASK V3D_MASK(27, 0) +# define V3D_MMU_SHOOT_DOWN_PAGE_SHIFT 0 + +#define V3D_MMU_BYPASS_START 0x0121c +#define V3D_MMU_BYPASS_END 0x01220 + +/* AXI ID of the access that faulted */ +#define V3D_MMU_VIO_ID 0x0122c + +/* Address for illegal PTEs to return */ +#define V3D_MMU_ILLEGAL_ADDR 0x01230 +# define V3D_MMU_ILLEGAL_ADDR_ENABLE BIT(31) + +/* Address that faulted */ +#define V3D_MMU_VIO_ADDR 0x01234 + +/* Per-V3D-core registers */ + +#define V3D_CTL_IDENT0 0x00000 +# define V3D_IDENT0_VER_MASK V3D_MASK(31, 24) +# define V3D_IDENT0_VER_SHIFT 24 + +#define V3D_CTL_IDENT1 0x00004 +/* Multiples of 1kb */ +# define V3D_IDENT1_VPM_SIZE_MASK V3D_MASK(31, 28) +# define V3D_IDENT1_VPM_SIZE_SHIFT 28 +# define V3D_IDENT1_NSEM_MASK V3D_MASK(23, 16) +# define V3D_IDENT1_NSEM_SHIFT 16 +# define V3D_IDENT1_NTMU_MASK V3D_MASK(15, 12) +# define V3D_IDENT1_NTMU_SHIFT 12 +# define V3D_IDENT1_QUPS_MASK V3D_MASK(11, 8) +# define V3D_IDENT1_QUPS_SHIFT 8 +# define V3D_IDENT1_NSLC_MASK V3D_MASK(7, 4) +# define V3D_IDENT1_NSLC_SHIFT 4 +# define V3D_IDENT1_REV_MASK V3D_MASK(3, 0) +# define V3D_IDENT1_REV_SHIFT 0 + +#define V3D_CTL_IDENT2 0x00008 +# define V3D_IDENT2_BCG_INT BIT(28) + +#define V3D_CTL_MISCCFG 0x00018 +# define V3D_MISCCFG_OVRTMUOUT BIT(0) + +#define V3D_CTL_L2CACTL 0x00020 +# define V3D_L2CACTL_L2CCLR BIT(2) +# define V3D_L2CACTL_L2CDIS BIT(1) +# define V3D_L2CACTL_L2CENA BIT(0) + +#define V3D_CTL_SLCACTL 0x00024 +# define V3D_SLCACTL_TVCCS_MASK V3D_MASK(27, 24) +# define V3D_SLCACTL_TVCCS_SHIFT 24 +# define V3D_SLCACTL_TDCCS_MASK V3D_MASK(19, 16) +# define V3D_SLCACTL_TDCCS_SHIFT 16 +# define V3D_SLCACTL_UCC_MASK V3D_MASK(11, 8) +# define V3D_SLCACTL_UCC_SHIFT 8 +# define V3D_SLCACTL_ICC_MASK V3D_MASK(3, 0) +# define V3D_SLCACTL_ICC_SHIFT 0 + +#define V3D_CTL_L2TCACTL 0x00030 +# define V3D_L2TCACTL_TMUWCF BIT(8) +# define V3D_L2TCACTL_L2T_NO_WM BIT(4) +# define V3D_L2TCACTL_FLM_FLUSH 0 +# define V3D_L2TCACTL_FLM_CLEAR 1 +# define V3D_L2TCACTL_FLM_CLEAN 2 +# define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1) +# define V3D_L2TCACTL_FLM_SHIFT 1 +# define V3D_L2TCACTL_L2TFLS BIT(0) +#define V3D_CTL_L2TFLSTA 0x00034 +#define V3D_CTL_L2TFLEND 0x00038 + +#define V3D_CTL_INT_STS 0x00050 +#define V3D_CTL_INT_SET 0x00054 +#define V3D_CTL_INT_CLR 0x00058 +#define V3D_CTL_INT_MSK_STS 0x0005c +#define V3D_CTL_INT_MSK_SET 0x00060 +#define V3D_CTL_INT_MSK_CLR 0x00064 +# define V3D_INT_QPU_MASK V3D_MASK(27, 16) +# define V3D_INT_QPU_SHIFT 16 +# define V3D_INT_GMPV BIT(5) +# define V3D_INT_TRFB BIT(4) +# define V3D_INT_SPILLUSE BIT(3) +# define V3D_INT_OUTOMEM BIT(2) +# define V3D_INT_FLDONE BIT(1) +# define V3D_INT_FRDONE BIT(0) + +#define V3D_CLE_CT0CS 0x00100 +#define V3D_CLE_CT1CS 0x00104 +#define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n) +#define V3D_CLE_CT0EA 0x00108 +#define V3D_CLE_CT1EA 0x0010c +#define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n) +#define V3D_CLE_CT0CA 0x00110 +#define V3D_CLE_CT1CA 0x00114 +#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n) +#define V3D_CLE_CT0RA 0x00118 +#define V3D_CLE_CT1RA 0x0011c +#define V3D_CLE_CT0LC 0x00120 +#define V3D_CLE_CT1LC 0x00124 +#define V3D_CLE_CT0PC 0x00128 +#define V3D_CLE_CT1PC 0x0012c +#define V3D_CLE_PCS 0x00130 +#define V3D_CLE_BFC 0x00134 +#define V3D_CLE_RFC 0x00138 +#define V3D_CLE_TFBC 0x0013c +#define V3D_CLE_TFIT 0x00140 +#define V3D_CLE_CT1CFG 0x00144 +#define V3D_CLE_CT1TILECT 0x00148 +#define V3D_CLE_CT1TSKIP 0x0014c +#define V3D_CLE_CT1PTCT 0x00150 +#define V3D_CLE_CT0SYNC 0x00154 +#define V3D_CLE_CT1SYNC 0x00158 +#define V3D_CLE_CT0QTS 0x0015c +# define V3D_CLE_CT0QTS_ENABLE BIT(1) +#define V3D_CLE_CT0QBA 0x00160 +#define V3D_CLE_CT1QBA 0x00164 +#define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n) +#define V3D_CLE_CT0QEA 0x00168 +#define V3D_CLE_CT1QEA 0x0016c +#define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n) +#define V3D_CLE_CT0QMA 0x00170 +#define V3D_CLE_CT0QMS 0x00174 +#define V3D_CLE_CT1QCFG 0x00178 +/* If set without ETPROC, entirely skip tiles with no primitives. */ +# define V3D_CLE_QCFG_ETFILT BIT(7) +/* If set with ETFILT, just write the clear color to tiles with no + * primitives. + */ +# define V3D_CLE_QCFG_ETPROC BIT(6) +# define V3D_CLE_QCFG_ETSFLUSH BIT(1) +# define V3D_CLE_QCFG_MCDIS BIT(0) + +#define V3D_PTB_BPCA 0x00300 +#define V3D_PTB_BPCS 0x00304 +#define V3D_PTB_BPOA 0x00308 +#define V3D_PTB_BPOS 0x0030c + +#define V3D_PTB_BXCF 0x00310 +# define V3D_PTB_BXCF_RWORDERDISA BIT(1) +# define V3D_PTB_BXCF_CLIPDISA BIT(0) + +#define V3D_GMP_STATUS 0x00800 +# define V3D_GMP_STATUS_GMPRST BIT(31) +# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24) +# define V3D_GMP_STATUS_WR_COUNT_SHIFT 24 +# define V3D_GMP_STATUS_RD_COUNT_MASK V3D_MASK(22, 16) +# define V3D_GMP_STATUS_RD_COUNT_SHIFT 16 +# define V3D_GMP_STATUS_WR_ACTIVE BIT(5) +# define V3D_GMP_STATUS_RD_ACTIVE BIT(4) +# define V3D_GMP_STATUS_CFG_BUSY BIT(3) +# define V3D_GMP_STATUS_CNTOVF BIT(2) +# define V3D_GMP_STATUS_INVPROT BIT(1) +# define V3D_GMP_STATUS_VIO BIT(0) + +#define V3D_GMP_CFG 0x00804 +# define V3D_GMP_CFG_LBURSTEN BIT(3) +# define V3D_GMP_CFG_PGCRSEN BIT() +# define V3D_GMP_CFG_STOP_REQ BIT(1) +# define V3D_GMP_CFG_PROT_ENABLE BIT(0) + +#define V3D_GMP_VIO_ADDR 0x00808 +#define V3D_GMP_VIO_TYPE 0x0080c +#define V3D_GMP_TABLE_ADDR 0x00810 +#define V3D_GMP_CLEAR_LOAD 0x00814 +#define V3D_GMP_PRESERVE_LOAD 0x00818 +#define V3D_GMP_VALID_LINES 0x00820 + +#endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c new file mode 100644 index 000000000000..b07bece9417d --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2018 Broadcom */ + +/** + * DOC: Broadcom V3D scheduling + * + * The shared DRM GPU scheduler is used to coordinate submitting jobs + * to the hardware. Each DRM fd (roughly a client process) gets its + * own scheduler entity, which will process jobs in order. The GPU + * scheduler will round-robin between clients to submit the next job. + * + * For simplicity, and in order to keep latency low for interactive + * jobs when bulk background jobs are queued up, we submit a new job + * to the HW only when it has completed the last one, instead of + * filling up the CT[01]Q FIFOs with jobs. Similarly, we use + * v3d_job_dependency() to manage the dependency between bin and + * render, instead of having the clients submit jobs with using the + * HW's semaphores to interlock between them. + */ + +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +static struct v3d_job * +to_v3d_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_job, base); +} + +static void +v3d_job_free(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + + v3d_exec_put(job->exec); +} + +/** + * Returns the fences that the bin job depends on, one by one. + * v3d_job_run() won't be called until all of them have been signaled. + */ +static struct dma_fence * +v3d_job_dependency(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_exec_info *exec = job->exec; + enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; + struct dma_fence *fence; + + fence = job->in_fence; + if (fence) { + job->in_fence = NULL; + return fence; + } + + if (q == V3D_RENDER) { + /* If we had a bin job, the render job definitely depends on + * it. We first have to wait for bin to be scheduled, so that + * its done_fence is created. + */ + fence = exec->bin_done_fence; + if (fence) { + exec->bin_done_fence = NULL; + return fence; + } + } + + /* XXX: Wait on a fence for switching the GMP if necessary, + * and then do so. + */ + + return fence; +} + +static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_exec_info *exec = job->exec; + enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; + struct v3d_dev *v3d = exec->v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + unsigned long irqflags; + + if (unlikely(job->base.s_fence->finished.error)) + return NULL; + + /* Lock required around bin_job update vs + * v3d_overflow_mem_work(). + */ + spin_lock_irqsave(&v3d->job_lock, irqflags); + if (q == V3D_BIN) { + v3d->bin_job = job->exec; + + /* Clear out the overflow allocation, so we don't + * reuse the overflow attached to a previous job. + */ + V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); + } else { + v3d->render_job = job->exec; + } + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + + /* Can we avoid this flush when q==RENDER? We need to be + * careful of scheduling, though -- imagine job0 rendering to + * texture and job1 reading, and them being executed as bin0, + * bin1, render0, render1, so that render1's flush at bin time + * wasn't enough. + */ + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, q); + if (!fence) + return fence; + + if (job->done_fence) + dma_fence_put(job->done_fence); + job->done_fence = dma_fence_get(fence); + + trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, + job->start, job->end); + + if (q == V3D_BIN) { + if (exec->qma) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma); + V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms); + } + if (exec->qts) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, + V3D_CLE_CT0QTS_ENABLE | + exec->qts); + } + } else { + /* XXX: Set the QCFG */ + } + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start); + V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end); + + return fence; +} + +static void +v3d_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_exec_info *exec = job->exec; + struct v3d_dev *v3d = exec->v3d; + enum v3d_queue q; + + mutex_lock(&v3d->reset_lock); + + /* block scheduler */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; + + kthread_park(sched->thread); + drm_sched_hw_job_reset(sched, (sched_job->sched == sched ? + sched_job : NULL)); + } + + /* get the GPU back into the init state */ + v3d_reset(v3d); + + /* Unblock schedulers and restart their jobs. */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + drm_sched_job_recovery(&v3d->queue[q].sched); + kthread_unpark(v3d->queue[q].sched.thread); + } + + mutex_unlock(&v3d->reset_lock); +} + +static const struct drm_sched_backend_ops v3d_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_job_run, + .timedout_job = v3d_job_timedout, + .free_job = v3d_job_free +}; + +int +v3d_sched_init(struct v3d_dev *v3d) +{ + int hw_jobs_limit = 1; + int job_hang_limit = 0; + int hang_limit_ms = 500; + int ret; + + ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, + &v3d_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_bin"); + if (ret) { + dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret); + return ret; + } + + ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, + &v3d_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_render"); + if (ret) { + dev_err(v3d->dev, "Failed to create render scheduler: %d.", + ret); + drm_sched_fini(&v3d->queue[V3D_BIN].sched); + return ret; + } + + return 0; +} + +void +v3d_sched_fini(struct v3d_dev *v3d) +{ + enum v3d_queue q; + + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_fini(&v3d->queue[q].sched); +} diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h new file mode 100644 index 000000000000..85dd351e1e09 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_trace.h @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +#if !defined(_V3D_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _V3D_TRACE_H_ + +#include +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM v3d +#define TRACE_INCLUDE_FILE v3d_trace + +TRACE_EVENT(v3d_submit_cl, + TP_PROTO(struct drm_device *dev, bool is_render, + uint64_t seqno, + u32 ctnqba, u32 ctnqea), + TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea), + + TP_STRUCT__entry( + __field(u32, dev) + __field(bool, is_render) + __field(u64, seqno) + __field(u32, ctnqba) + __field(u32, ctnqea) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->is_render = is_render; + __entry->seqno = seqno; + __entry->ctnqba = ctnqba; + __entry->ctnqea = ctnqea; + ), + + TP_printk("dev=%u, %s, seqno=%llu, 0x%08x..0x%08x", + __entry->dev, + __entry->is_render ? "RCL" : "BCL", + __entry->seqno, + __entry->ctnqba, + __entry->ctnqea) +); + +TRACE_EVENT(v3d_reset_begin, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +TRACE_EVENT(v3d_reset_end, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +#endif /* _V3D_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/v3d/v3d_trace_points.c b/drivers/gpu/drm/v3d/v3d_trace_points.c new file mode 100644 index 000000000000..482922d7c7e1 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_trace_points.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015 Broadcom */ + +#include "v3d_drv.h" + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "v3d_trace.h" +#endif diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h new file mode 100644 index 000000000000..7b6627783608 --- /dev/null +++ b/include/uapi/drm/v3d_drm.h @@ -0,0 +1,194 @@ +/* + * Copyright © 2014-2018 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _V3D_DRM_H_ +#define _V3D_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#define DRM_V3D_SUBMIT_CL 0x00 +#define DRM_V3D_WAIT_BO 0x01 +#define DRM_V3D_CREATE_BO 0x02 +#define DRM_V3D_MMAP_BO 0x03 +#define DRM_V3D_GET_PARAM 0x04 +#define DRM_V3D_GET_BO_OFFSET 0x05 + +#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) +#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) +#define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo) +#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo) +#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) +#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) + +/** + * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D + * engine. + * + * This asks the kernel to have the GPU execute an optional binner + * command list, and a render command list. + */ +struct drm_v3d_submit_cl { + /* Pointer to the binner command list. + * + * This is the first set of commands executed, which runs the + * coordinate shader to determine where primitives land on the screen, + * then writes out the state updates and draw calls necessary per tile + * to the tile allocation BO. + */ + __u32 bcl_start; + + /** End address of the BCL (first byte after the BCL) */ + __u32 bcl_end; + + /* Offset of the render command list. + * + * This is the second set of commands executed, which will either + * execute the tiles that have been set up by the BCL, or a fixed set + * of tiles (in the case of RCL-only blits). + */ + __u32 rcl_start; + + /** End address of the RCL (first byte after the RCL) */ + __u32 rcl_end; + + /** An optional sync object to wait on before starting the BCL. */ + __u32 in_sync_bcl; + /** An optional sync object to wait on before starting the RCL. */ + __u32 in_sync_rcl; + /** An optional sync object to place the completion fence in. */ + __u32 out_sync; + + /* Offset of the tile alloc memory + * + * This is optional on V3D 3.3 (where the CL can set the value) but + * required on V3D 4.1. + */ + __u32 qma; + + /** Size of the tile alloc memory. */ + __u32 qms; + + /** Offset of the tile state data array. */ + __u32 qts; + + /* Pointer to a u32 array of the BOs that are referenced by the job. + */ + __u64 bo_handles; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + /* Pad, must be zero-filled. */ + __u32 pad; +}; + +/** + * struct drm_v3d_wait_bo - ioctl argument for waiting for + * completion of the last DRM_V3D_SUBMIT_CL on a BO. + * + * This is useful for cases where multiple processes might be + * rendering to a BO and you want to wait for all rendering to be + * completed. + */ +struct drm_v3d_wait_bo { + __u32 handle; + __u32 pad; + __u64 timeout_ns; +}; + +/** + * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_v3d_create_bo { + __u32 size; + __u32 flags; + /** Returned GEM handle for the BO. */ + __u32 handle; + /** + * Returned offset for the BO in the V3D address space. This offset + * is private to the DRM fd and is valid for the lifetime of the GEM + * handle. + * + * This offset value will always be nonzero, since various HW + * units treat 0 specially. + */ + __u32 offset; +}; + +/** + * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs. + * + * This doesn't actually perform an mmap. Instead, it returns the + * offset you need to use in an mmap on the DRM device node. This + * means that tools like valgrind end up knowing about the mapped + * memory. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_v3d_mmap_bo { + /** Handle for the object being mapped. */ + __u32 handle; + __u32 flags; + /** offset into the drm node to use for subsequent mmap call. */ + __u64 offset; +}; + +enum drm_v3d_param { + DRM_V3D_PARAM_V3D_UIFCFG, + DRM_V3D_PARAM_V3D_HUB_IDENT1, + DRM_V3D_PARAM_V3D_HUB_IDENT2, + DRM_V3D_PARAM_V3D_HUB_IDENT3, + DRM_V3D_PARAM_V3D_CORE0_IDENT0, + DRM_V3D_PARAM_V3D_CORE0_IDENT1, + DRM_V3D_PARAM_V3D_CORE0_IDENT2, +}; + +struct drm_v3d_get_param { + __u32 param; + __u32 pad; + __u64 value; +}; + +/** + * Returns the offset for the BO in the V3D address space for this DRM fd. + * This is the same value returned by drm_v3d_create_bo, if that was called + * from this DRM fd. + */ +struct drm_v3d_get_bo_offset { + __u32 handle; + __u32 offset; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _V3D_DRM_H_ */ -- cgit v1.2.3 From 5cebaac609744414463d1ecc28fdecd26c1b9bc1 Mon Sep 17 00:00:00 2001 From: Matt Ranostay Date: Fri, 6 Apr 2018 18:52:31 -0400 Subject: media: video-i2c: add video-i2c driver There are several thermal sensors that only have a low-speed bus interface but output valid video data. This patchset enables support for the AMG88xx "Grid-Eye" sensor family. Signed-off-by: Matt Ranostay Acked-by: Sakari Ailus [hans.verkuil@cisco.com: split up int ret = ...->xfer(); line] Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 6 + drivers/media/i2c/Kconfig | 13 + drivers/media/i2c/Makefile | 1 + drivers/media/i2c/video-i2c.c | 564 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 584 insertions(+) create mode 100644 drivers/media/i2c/video-i2c.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..0a919a84d344 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14922,6 +14922,12 @@ L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/video-mux.c +VIDEO I2C POLLING DRIVER +M: Matt Ranostay +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/video-i2c.c + VIDEOBUF2 FRAMEWORK M: Pawel Osciak M: Marek Szyprowski diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 541f0d28afd8..faaaceb94832 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -974,6 +974,19 @@ config VIDEO_M52790 To compile this driver as a module, choose M here: the module will be called m52790. + +config VIDEO_I2C + tristate "I2C transport video support" + depends on VIDEO_V4L2 && I2C + select VIDEOBUF2_VMALLOC + ---help--- + Enable the I2C transport video support which supports the + following: + * Panasonic AMG88xx Grid-Eye Sensors + + To compile this driver as a module, choose M here: the + module will be called video-i2c + endmenu menu "Sensors used on soc_camera driver" diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index ea34aee1a85a..84cc472238ef 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -96,6 +96,7 @@ obj-$(CONFIG_VIDEO_LM3646) += lm3646.o obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o obj-$(CONFIG_VIDEO_AK881X) += ak881x.o obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o +obj-$(CONFIG_VIDEO_I2C) += video-i2c.o obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o obj-$(CONFIG_VIDEO_OV2659) += ov2659.o obj-$(CONFIG_VIDEO_TC358743) += tc358743.o diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c new file mode 100644 index 000000000000..971eb46c87f6 --- /dev/null +++ b/drivers/media/i2c/video-i2c.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * video-i2c.c - Support for I2C transport video devices + * + * Copyright (C) 2018 Matt Ranostay + * + * Supported: + * - Panasonic AMG88xx Grid-Eye Sensors + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VIDEO_I2C_DRIVER "video-i2c" + +struct video_i2c_chip; + +struct video_i2c_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +struct video_i2c_data { + struct i2c_client *client; + const struct video_i2c_chip *chip; + struct mutex lock; + spinlock_t slock; + unsigned int sequence; + struct mutex queue_lock; + + struct v4l2_device v4l2_dev; + struct video_device vdev; + struct vb2_queue vb_vidq; + + struct task_struct *kthread_vid_cap; + struct list_head vid_cap_active; +}; + +const static struct v4l2_fmtdesc amg88xx_format = { + .pixelformat = V4L2_PIX_FMT_Y12, +}; + +const static struct v4l2_frmsize_discrete amg88xx_size = { + .width = 8, + .height = 8, +}; + +struct video_i2c_chip { + /* video dimensions */ + const struct v4l2_fmtdesc *format; + const struct v4l2_frmsize_discrete *size; + + /* max frames per second */ + unsigned int max_fps; + + /* pixel buffer size */ + unsigned int buffer_size; + + /* pixel size in bits */ + unsigned int bpp; + + /* xfer function */ + int (*xfer)(struct video_i2c_data *data, char *buf); +}; + +static int amg88xx_xfer(struct video_i2c_data *data, char *buf) +{ + struct i2c_client *client = data->client; + struct i2c_msg msg[2]; + u8 reg = 0x80; + int ret; + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = (char *)® + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = data->chip->buffer_size; + msg[1].buf = (char *)buf; + + ret = i2c_transfer(client->adapter, msg, 2); + + return (ret == 2) ? 0 : -EIO; +} + +#define AMG88XX 0 + +static const struct video_i2c_chip video_i2c_chip[] = { + [AMG88XX] = { + .size = &amg88xx_size, + .format = &amg88xx_format, + .max_fps = 10, + .buffer_size = 128, + .bpp = 16, + .xfer = &amg88xx_xfer, + }, +}; + +static const struct v4l2_file_operations video_i2c_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .poll = vb2_fop_poll, + .read = vb2_fop_read, + .mmap = vb2_fop_mmap, + .unlocked_ioctl = video_ioctl2, +}; + +static int queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct video_i2c_data *data = vb2_get_drv_priv(vq); + unsigned int size = data->chip->buffer_size; + + if (vq->num_buffers + *nbuffers < 2) + *nbuffers = 2; + + if (*nplanes) + return sizes[0] < size ? -EINVAL : 0; + + *nplanes = 1; + sizes[0] = size; + + return 0; +} + +static int buffer_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct video_i2c_data *data = vb2_get_drv_priv(vb->vb2_queue); + unsigned int size = data->chip->buffer_size; + + if (vb2_plane_size(vb, 0) < size) + return -EINVAL; + + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(vb, 0, size); + + return 0; +} + +static void buffer_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct video_i2c_data *data = vb2_get_drv_priv(vb->vb2_queue); + struct video_i2c_buffer *buf = + container_of(vbuf, struct video_i2c_buffer, vb); + + spin_lock(&data->slock); + list_add_tail(&buf->list, &data->vid_cap_active); + spin_unlock(&data->slock); +} + +static int video_i2c_thread_vid_cap(void *priv) +{ + struct video_i2c_data *data = priv; + unsigned int delay = msecs_to_jiffies(1000 / data->chip->max_fps); + + set_freezable(); + + do { + unsigned long start_jiffies = jiffies; + struct video_i2c_buffer *vid_cap_buf = NULL; + int schedule_delay; + + try_to_freeze(); + + spin_lock(&data->slock); + + if (!list_empty(&data->vid_cap_active)) { + vid_cap_buf = list_last_entry(&data->vid_cap_active, + struct video_i2c_buffer, list); + list_del(&vid_cap_buf->list); + } + + spin_unlock(&data->slock); + + if (vid_cap_buf) { + struct vb2_buffer *vb2_buf = &vid_cap_buf->vb.vb2_buf; + void *vbuf = vb2_plane_vaddr(vb2_buf, 0); + int ret; + + ret = data->chip->xfer(data, vbuf); + vb2_buf->timestamp = ktime_get_ns(); + vid_cap_buf->vb.sequence = data->sequence++; + vb2_buffer_done(vb2_buf, ret ? + VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); + } + + schedule_delay = delay - (jiffies - start_jiffies); + + if (time_after(jiffies, start_jiffies + delay)) + schedule_delay = delay; + + schedule_timeout_interruptible(schedule_delay); + } while (!kthread_should_stop()); + + return 0; +} + +static void video_i2c_del_list(struct vb2_queue *vq, enum vb2_buffer_state state) +{ + struct video_i2c_data *data = vb2_get_drv_priv(vq); + struct video_i2c_buffer *buf, *tmp; + + spin_lock(&data->slock); + + list_for_each_entry_safe(buf, tmp, &data->vid_cap_active, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } + + spin_unlock(&data->slock); +} + +static int start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct video_i2c_data *data = vb2_get_drv_priv(vq); + + if (data->kthread_vid_cap) + return 0; + + data->sequence = 0; + data->kthread_vid_cap = kthread_run(video_i2c_thread_vid_cap, data, + "%s-vid-cap", data->v4l2_dev.name); + if (!IS_ERR(data->kthread_vid_cap)) + return 0; + + video_i2c_del_list(vq, VB2_BUF_STATE_QUEUED); + + return PTR_ERR(data->kthread_vid_cap); +} + +static void stop_streaming(struct vb2_queue *vq) +{ + struct video_i2c_data *data = vb2_get_drv_priv(vq); + + if (data->kthread_vid_cap == NULL) + return; + + kthread_stop(data->kthread_vid_cap); + data->kthread_vid_cap = NULL; + + video_i2c_del_list(vq, VB2_BUF_STATE_ERROR); +} + +static struct vb2_ops video_i2c_video_qops = { + .queue_setup = queue_setup, + .buf_prepare = buffer_prepare, + .buf_queue = buffer_queue, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +static int video_i2c_querycap(struct file *file, void *priv, + struct v4l2_capability *vcap) +{ + struct video_i2c_data *data = video_drvdata(file); + struct i2c_client *client = data->client; + + strlcpy(vcap->driver, data->v4l2_dev.name, sizeof(vcap->driver)); + strlcpy(vcap->card, data->vdev.name, sizeof(vcap->card)); + + sprintf(vcap->bus_info, "I2C:%d-%d", client->adapter->nr, client->addr); + + return 0; +} + +static int video_i2c_g_input(struct file *file, void *fh, unsigned int *inp) +{ + *inp = 0; + + return 0; +} + +static int video_i2c_s_input(struct file *file, void *fh, unsigned int inp) +{ + return (inp > 0) ? -EINVAL : 0; +} + +static int video_i2c_enum_input(struct file *file, void *fh, + struct v4l2_input *vin) +{ + if (vin->index > 0) + return -EINVAL; + + strlcpy(vin->name, "Camera", sizeof(vin->name)); + + vin->type = V4L2_INPUT_TYPE_CAMERA; + + return 0; +} + +static int video_i2c_enum_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_fmtdesc *fmt) +{ + struct video_i2c_data *data = video_drvdata(file); + enum v4l2_buf_type type = fmt->type; + + if (fmt->index > 0) + return -EINVAL; + + *fmt = *data->chip->format; + fmt->type = type; + + return 0; +} + +static int video_i2c_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + const struct video_i2c_data *data = video_drvdata(file); + const struct v4l2_frmsize_discrete *size = data->chip->size; + + /* currently only one frame size is allowed */ + if (fsize->index > 0) + return -EINVAL; + + if (fsize->pixel_format != data->chip->format->pixelformat) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + fsize->discrete.width = size->width; + fsize->discrete.height = size->height; + + return 0; +} + +static int video_i2c_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *fe) +{ + const struct video_i2c_data *data = video_drvdata(file); + const struct v4l2_frmsize_discrete *size = data->chip->size; + + if (fe->index > 0) + return -EINVAL; + + if (fe->width != size->width || fe->height != size->height) + return -EINVAL; + + fe->type = V4L2_FRMIVAL_TYPE_DISCRETE; + fe->discrete.numerator = 1; + fe->discrete.denominator = data->chip->max_fps; + + return 0; +} + +static int video_i2c_try_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *fmt) +{ + const struct video_i2c_data *data = video_drvdata(file); + const struct v4l2_frmsize_discrete *size = data->chip->size; + struct v4l2_pix_format *pix = &fmt->fmt.pix; + unsigned int bpp = data->chip->bpp / 8; + + pix->width = size->width; + pix->height = size->height; + pix->pixelformat = data->chip->format->pixelformat; + pix->field = V4L2_FIELD_NONE; + pix->bytesperline = pix->width * bpp; + pix->sizeimage = pix->bytesperline * pix->height; + pix->colorspace = V4L2_COLORSPACE_RAW; + + return 0; +} + +static int video_i2c_s_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *fmt) +{ + struct video_i2c_data *data = video_drvdata(file); + + if (vb2_is_busy(&data->vb_vidq)) + return -EBUSY; + + return video_i2c_try_fmt_vid_cap(file, fh, fmt); +} + +static int video_i2c_g_parm(struct file *filp, void *priv, + struct v4l2_streamparm *parm) +{ + struct video_i2c_data *data = video_drvdata(filp); + + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + parm->parm.capture.readbuffers = 1; + parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + parm->parm.capture.timeperframe.numerator = 1; + parm->parm.capture.timeperframe.denominator = data->chip->max_fps; + + return 0; +} + +static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = { + .vidioc_querycap = video_i2c_querycap, + .vidioc_g_input = video_i2c_g_input, + .vidioc_s_input = video_i2c_s_input, + .vidioc_enum_input = video_i2c_enum_input, + .vidioc_enum_fmt_vid_cap = video_i2c_enum_fmt_vid_cap, + .vidioc_enum_framesizes = video_i2c_enum_framesizes, + .vidioc_enum_frameintervals = video_i2c_enum_frameintervals, + .vidioc_g_fmt_vid_cap = video_i2c_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = video_i2c_s_fmt_vid_cap, + .vidioc_g_parm = video_i2c_g_parm, + .vidioc_s_parm = video_i2c_g_parm, + .vidioc_try_fmt_vid_cap = video_i2c_try_fmt_vid_cap, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, +}; + +static void video_i2c_release(struct video_device *vdev) +{ + kfree(video_get_drvdata(vdev)); +} + +static int video_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct video_i2c_data *data; + struct v4l2_device *v4l2_dev; + struct vb2_queue *queue; + int ret = -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (dev_fwnode(&client->dev)) + data->chip = device_get_match_data(&client->dev); + else if (id) + data->chip = &video_i2c_chip[id->driver_data]; + else + goto error_free_device; + + data->client = client; + v4l2_dev = &data->v4l2_dev; + strlcpy(v4l2_dev->name, VIDEO_I2C_DRIVER, sizeof(v4l2_dev->name)); + + ret = v4l2_device_register(&client->dev, v4l2_dev); + if (ret < 0) + goto error_free_device; + + mutex_init(&data->lock); + mutex_init(&data->queue_lock); + + queue = &data->vb_vidq; + queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + queue->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR | VB2_READ; + queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + queue->drv_priv = data; + queue->buf_struct_size = sizeof(struct video_i2c_buffer); + queue->min_buffers_needed = 1; + queue->ops = &video_i2c_video_qops; + queue->mem_ops = &vb2_vmalloc_memops; + + ret = vb2_queue_init(queue); + if (ret < 0) + goto error_unregister_device; + + data->vdev.queue = queue; + data->vdev.queue->lock = &data->queue_lock; + + snprintf(data->vdev.name, sizeof(data->vdev.name), + "I2C %d-%d Transport Video", + client->adapter->nr, client->addr); + + data->vdev.v4l2_dev = v4l2_dev; + data->vdev.fops = &video_i2c_fops; + data->vdev.lock = &data->lock; + data->vdev.ioctl_ops = &video_i2c_ioctl_ops; + data->vdev.release = video_i2c_release; + data->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; + + spin_lock_init(&data->slock); + INIT_LIST_HEAD(&data->vid_cap_active); + + video_set_drvdata(&data->vdev, data); + i2c_set_clientdata(client, data); + + ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1); + if (ret < 0) + goto error_unregister_device; + + return 0; + +error_unregister_device: + v4l2_device_unregister(v4l2_dev); + mutex_destroy(&data->lock); + mutex_destroy(&data->queue_lock); + +error_free_device: + kfree(data); + + return ret; +} + +static int video_i2c_remove(struct i2c_client *client) +{ + struct video_i2c_data *data = i2c_get_clientdata(client); + + video_unregister_device(&data->vdev); + v4l2_device_unregister(&data->v4l2_dev); + + mutex_destroy(&data->lock); + mutex_destroy(&data->queue_lock); + + return 0; +} + +static const struct i2c_device_id video_i2c_id_table[] = { + { "amg88xx", AMG88XX }, + {} +}; +MODULE_DEVICE_TABLE(i2c, video_i2c_id_table); + +static const struct of_device_id video_i2c_of_match[] = { + { .compatible = "panasonic,amg88xx", .data = &video_i2c_chip[AMG88XX] }, + {} +}; +MODULE_DEVICE_TABLE(of, video_i2c_of_match); + +static struct i2c_driver video_i2c_driver = { + .driver = { + .name = VIDEO_I2C_DRIVER, + .of_match_table = video_i2c_of_match, + }, + .probe = video_i2c_probe, + .remove = video_i2c_remove, + .id_table = video_i2c_id_table, +}; + +module_i2c_driver(video_i2c_driver); + +MODULE_AUTHOR("Matt Ranostay "); +MODULE_DESCRIPTION("I2C transport video support"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From f13678960fb594f192fbcfffa510ef9342f3ff38 Mon Sep 17 00:00:00 2001 From: Tomohiro Kusumi Date: Fri, 4 May 2018 16:45:28 -0700 Subject: scsi: mpt3sas: remove obsolete path "drivers/scsi/mpt2sas/" from MAINTAINERS drivers/scsi/mpt2sas/ no longer exists after commit c84b06a48c ("mpt3sas: Single driver module which supports both SAS 2.0 & SAS 3.0 HBAs") merged/removed it. Signed-off-by: Tomohiro Kusumi Signed-off-by: Martin K. Petersen --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..f2eaf4d2223b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8335,7 +8335,6 @@ L: linux-scsi@vger.kernel.org W: http://www.avagotech.com/support/ S: Supported F: drivers/message/fusion/ -F: drivers/scsi/mpt2sas/ F: drivers/scsi/mpt3sas/ LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers -- cgit v1.2.3 From 8878302ebbc580d64f390c0acc509e5e8276598c Mon Sep 17 00:00:00 2001 From: Ondrej Jirman Date: Mon, 7 May 2018 20:29:41 +0800 Subject: regulator: add support for SY8106A regulator SY8106A is an I2C attached single output regulator made by Silergy Corp, which is used on several Allwinner H3/H5 SBCs to control the power supply of the ARM cores. Add a driver for it. Signed-off-by: Ondrej Jirman [Icenowy: Change commit message, remove enable/disable code, add default ramp_delay, add comment for go bit, add code for fixed mode voltage] Signed-off-by: Icenowy Zheng Reviewed-by: Chen-Yu Tsai Signed-off-by: Mark Brown --- MAINTAINERS | 6 ++ drivers/regulator/Kconfig | 7 ++ drivers/regulator/Makefile | 2 +- drivers/regulator/sy8106a-regulator.c | 167 ++++++++++++++++++++++++++++++++++ 4 files changed, 181 insertions(+), 1 deletion(-) create mode 100644 drivers/regulator/sy8106a-regulator.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..971300930067 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13461,6 +13461,12 @@ S: Supported F: net/switchdev/ F: include/net/switchdev.h +SY8106A REGULATOR DRIVER +M: Icenowy Zheng +S: Maintained +F: drivers/regulator/sy8106a-regulator.c +F: Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt + SYNC FILE FRAMEWORK M: Sumit Semwal R: Gustavo Padovan diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 097f61784a7d..4efae3b7e746 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -801,6 +801,13 @@ config REGULATOR_STW481X_VMMC This driver supports the internal VMMC regulator in the STw481x PMIC chips. +config REGULATOR_SY8106A + tristate "Silergy SY8106A regulator" + depends on I2C && (OF || COMPILE_TEST) + select REGMAP_I2C + help + This driver supports SY8106A single output regulator. + config REGULATOR_TPS51632 tristate "TI TPS51632 Power Regulator" depends on I2C diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 590674fbecd7..d81fb02bd6e9 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o +obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o @@ -125,5 +126,4 @@ obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o - ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c new file mode 100644 index 000000000000..65fbd1f0b612 --- /dev/null +++ b/drivers/regulator/sy8106a-regulator.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// sy8106a-regulator.c - Regulator device driver for SY8106A +// +// Copyright (C) 2016 Ondřej Jirman +// Copyright (c) 2017-2018 Icenowy Zheng + +#include +#include +#include +#include +#include +#include + +#define SY8106A_REG_VOUT1_SEL 0x01 +#define SY8106A_REG_VOUT_COM 0x02 +#define SY8106A_REG_VOUT1_SEL_MASK 0x7f +#define SY8106A_DISABLE_REG BIT(0) +/* + * The I2C controlled voltage will only work when this bit is set; otherwise + * it will behave like a fixed regulator. + */ +#define SY8106A_GO_BIT BIT(7) + +struct sy8106a { + struct regulator_dev *rdev; + struct regmap *regmap; + u32 fixed_voltage; +}; + +static const struct regmap_config sy8106a_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static const struct regulator_ops sy8106a_ops = { + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, + /* Enabling/disabling the regulator is not yet implemented */ +}; + +/* Default limits measured in millivolts */ +#define SY8106A_MIN_MV 680 +#define SY8106A_MAX_MV 1950 +#define SY8106A_STEP_MV 10 + +static const struct regulator_desc sy8106a_reg = { + .name = "SY8106A", + .id = 0, + .ops = &sy8106a_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = ((SY8106A_MAX_MV - SY8106A_MIN_MV) / SY8106A_STEP_MV) + 1, + .min_uV = (SY8106A_MIN_MV * 1000), + .uV_step = (SY8106A_STEP_MV * 1000), + .vsel_reg = SY8106A_REG_VOUT1_SEL, + .vsel_mask = SY8106A_REG_VOUT1_SEL_MASK, + /* + * This ramp_delay is a conservative default value which works on + * H3/H5 boards VDD-CPUX situations. + */ + .ramp_delay = 200, + .owner = THIS_MODULE, +}; + +/* + * I2C driver interface functions + */ +static int sy8106a_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct sy8106a *chip; + struct device *dev = &i2c->dev; + struct regulator_dev *rdev = NULL; + struct regulator_config config = { }; + unsigned int reg, vsel; + int error; + + chip = devm_kzalloc(&i2c->dev, sizeof(struct sy8106a), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + error = of_property_read_u32(dev->of_node, "silergy,fixed-microvolt", + &chip->fixed_voltage); + if (error) + return error; + + if (chip->fixed_voltage < SY8106A_MIN_MV * 1000 || + chip->fixed_voltage > SY8106A_MAX_MV * 1000) + return -EINVAL; + + chip->regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config); + if (IS_ERR(chip->regmap)) { + error = PTR_ERR(chip->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", error); + return error; + } + + config.dev = &i2c->dev; + config.regmap = chip->regmap; + config.driver_data = chip; + + config.of_node = dev->of_node; + config.init_data = of_get_regulator_init_data(dev, dev->of_node, + &sy8106a_reg); + + if (!config.init_data) + return -ENOMEM; + + /* Ensure GO_BIT is enabled when probing */ + error = regmap_read(chip->regmap, SY8106A_REG_VOUT1_SEL, ®); + if (error) + return error; + + if (!(reg & SY8106A_GO_BIT)) { + vsel = (chip->fixed_voltage / 1000 - SY8106A_MIN_MV) / + SY8106A_STEP_MV; + + error = regmap_write(chip->regmap, SY8106A_REG_VOUT1_SEL, + vsel | SY8106A_GO_BIT); + if (error) + return error; + } + + /* Probe regulator */ + rdev = devm_regulator_register(&i2c->dev, &sy8106a_reg, &config); + if (IS_ERR(rdev)) { + error = PTR_ERR(rdev); + dev_err(&i2c->dev, "Failed to register SY8106A regulator: %d\n", error); + return error; + } + + chip->rdev = rdev; + + i2c_set_clientdata(i2c, chip); + + return 0; +} + +static const struct of_device_id sy8106a_i2c_of_match[] = { + { .compatible = "silergy,sy8106a" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sy8106a_i2c_of_match); + +static const struct i2c_device_id sy8106a_i2c_id[] = { + { "sy8106a", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, sy8106a_i2c_id); + +static struct i2c_driver sy8106a_regulator_driver = { + .driver = { + .name = "sy8106a", + .of_match_table = of_match_ptr(sy8106a_i2c_of_match), + }, + .probe = sy8106a_i2c_probe, + .id_table = sy8106a_i2c_id, +}; + +module_i2c_driver(sy8106a_regulator_driver); + +MODULE_AUTHOR("Ondřej Jirman "); +MODULE_AUTHOR("Icenowy Zheng "); +MODULE_DESCRIPTION("Regulator device driver for Silergy SY8106A"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 2ff0e41ac5a7b5c3f1ac1a82f4bbf6083ae09d8d Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Fri, 27 Apr 2018 18:31:20 -0700 Subject: MAINTAINERS: add keyword for devicetree overlay notifiers Devicetree overlay notifiers have a chance to potentially get pointers into the overlay unflattened devicetree and overlay FDT. The only protection against these pointers being accessed after the underlying data has been released by kfree() is by source code review of patches. Add a keyword line to the devicetree overlay maintainers entry to try to catch overlay notifier related patches. The keyword line is added to the devicetree overlay entry instead of the devicetree entry so that not all maintainers will receive the additional review traffic. Add Frank Rowand (already a maintainer in the devicetree entry) so that he will receive the additional review traffic. Signed-off-by: Frank Rowand Signed-off-by: Rob Herring --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..1dce07f4e1fc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10394,12 +10394,14 @@ F: drivers/infiniband/ulp/opa_vnic OPEN FIRMWARE AND DEVICE TREE OVERLAYS M: Pantelis Antoniou +M: Frank Rowand L: devicetree@vger.kernel.org S: Maintained F: Documentation/devicetree/dynamic-resolution-notes.txt F: Documentation/devicetree/overlay-notes.txt F: drivers/of/overlay.c F: drivers/of/resolver.c +K: of_overlay_notifier_ OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Rob Herring -- cgit v1.2.3 From e7330fa032bb5e5dd393db35ab544ac8e52e9e62 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 9 May 2018 13:23:38 -0700 Subject: Input: add support for ChipOne icn8505 based touchscreens The ChipOne icn8505 is an i2c capacitive touchscreen controller typically used in cheap x86 tablets, this commit adds a driver for it. Note the icn8505 is somewhat similar to the icn8318 and I started with modifying that driver to support both, but in the end the differences were too large and I decided to write a new driver instead. Signed-off-by: Hans de Goede Signed-off-by: Dmitry Torokhov --- MAINTAINERS | 6 + drivers/input/touchscreen/Kconfig | 11 + drivers/input/touchscreen/Makefile | 1 + drivers/input/touchscreen/chipone_icn8505.c | 520 ++++++++++++++++++++++++++++ 4 files changed, 538 insertions(+) create mode 100644 drivers/input/touchscreen/chipone_icn8505.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index d4b0b09d2e3f..77759d6b2c3d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3441,6 +3441,12 @@ S: Maintained F: Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt F: drivers/input/touchscreen/chipone_icn8318.c +CHIPONE ICN8505 I2C TOUCHSCREEN DRIVER +M: Hans de Goede +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/input/touchscreen/chipone_icn8505.c + CHROME HARDWARE PLATFORM SUPPORT M: Benson Leung M: Olof Johansson diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 4f15496fec8b..94cc740a4203 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -164,6 +164,17 @@ config TOUCHSCREEN_CHIPONE_ICN8318 To compile this driver as a module, choose M here: the module will be called chipone_icn8318. +config TOUCHSCREEN_CHIPONE_ICN8505 + tristate "chipone icn8505 touchscreen controller" + depends on I2C && ACPI + help + Say Y here if you have a ChipOne icn8505 based I2C touchscreen. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called chipone_icn8505. + config TOUCHSCREEN_CY8CTMG110 tristate "cy8ctmg110 touchscreen" depends on I2C diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index dddae7973436..fd4fd32fb73f 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o +obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o diff --git a/drivers/input/touchscreen/chipone_icn8505.c b/drivers/input/touchscreen/chipone_icn8505.c new file mode 100644 index 000000000000..c768186ce856 --- /dev/null +++ b/drivers/input/touchscreen/chipone_icn8505.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for ChipOne icn8505 i2c touchscreen controller + * + * Copyright (c) 2015-2018 Red Hat Inc. + * + * Red Hat authors: + * Hans de Goede + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Normal operation mode defines */ +#define ICN8505_REG_ADDR_WIDTH 16 + +#define ICN8505_REG_POWER 0x0004 +#define ICN8505_REG_TOUCHDATA 0x1000 +#define ICN8505_REG_CONFIGDATA 0x8000 + +/* ICN8505_REG_POWER commands */ +#define ICN8505_POWER_ACTIVE 0x00 +#define ICN8505_POWER_MONITOR 0x01 +#define ICN8505_POWER_HIBERNATE 0x02 +/* + * The Android driver uses these to turn on/off the charger filter, but the + * filter is way too aggressive making e.g. onscreen keyboards unusable. + */ +#define ICN8505_POWER_ENA_CHARGER_MODE 0x55 +#define ICN8505_POWER_DIS_CHARGER_MODE 0x66 + +#define ICN8505_MAX_TOUCHES 10 + +/* Programming mode defines */ +#define ICN8505_PROG_I2C_ADDR 0x30 +#define ICN8505_PROG_REG_ADDR_WIDTH 24 + +#define MAX_FW_UPLOAD_TRIES 3 + +struct icn8505_touch { + u8 slot; + u8 x[2]; + u8 y[2]; + u8 pressure; /* Seems more like finger width then pressure really */ + u8 event; +/* The difference between 2 and 3 is unclear */ +#define ICN8505_EVENT_NO_DATA 1 /* No finger seen yet since wakeup */ +#define ICN8505_EVENT_UPDATE1 2 /* New or updated coordinates */ +#define ICN8505_EVENT_UPDATE2 3 /* New or updated coordinates */ +#define ICN8505_EVENT_END 4 /* Finger lifted */ +} __packed; + +struct icn8505_touch_data { + u8 softbutton; + u8 touch_count; + struct icn8505_touch touches[ICN8505_MAX_TOUCHES]; +} __packed; + +struct icn8505_data { + struct i2c_client *client; + struct input_dev *input; + struct gpio_desc *wake_gpio; + struct touchscreen_properties prop; + char firmware_name[32]; +}; + +static int icn8505_read_xfer(struct i2c_client *client, u16 i2c_addr, + int reg_addr, int reg_addr_width, + void *data, int len, bool silent) +{ + u8 buf[3]; + int i, ret; + struct i2c_msg msg[2] = { + { + .addr = i2c_addr, + .buf = buf, + .len = reg_addr_width / 8, + }, + { + .addr = i2c_addr, + .flags = I2C_M_RD, + .buf = data, + .len = len, + } + }; + + for (i = 0; i < (reg_addr_width / 8); i++) + buf[i] = (reg_addr >> (reg_addr_width - (i + 1) * 8)) & 0xff; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret != ARRAY_SIZE(msg)) { + if (ret >= 0) + ret = -EIO; + if (!silent) + dev_err(&client->dev, + "Error reading addr %#x reg %#x: %d\n", + i2c_addr, reg_addr, ret); + return ret; + } + + return 0; +} + +static int icn8505_write_xfer(struct i2c_client *client, u16 i2c_addr, + int reg_addr, int reg_addr_width, + const void *data, int len, bool silent) +{ + u8 buf[3 + 32]; /* 3 bytes for 24 bit reg-addr + 32 bytes max len */ + int i, ret; + struct i2c_msg msg = { + .addr = i2c_addr, + .buf = buf, + .len = reg_addr_width / 8 + len, + }; + + if (WARN_ON(len > 32)) + return -EINVAL; + + for (i = 0; i < (reg_addr_width / 8); i++) + buf[i] = (reg_addr >> (reg_addr_width - (i + 1) * 8)) & 0xff; + + memcpy(buf + reg_addr_width / 8, data, len); + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret != 1) { + if (ret >= 0) + ret = -EIO; + if (!silent) + dev_err(&client->dev, + "Error writing addr %#x reg %#x: %d\n", + i2c_addr, reg_addr, ret); + return ret; + } + + return 0; +} + +static int icn8505_read_data(struct icn8505_data *icn8505, int reg, + void *buf, int len) +{ + return icn8505_read_xfer(icn8505->client, icn8505->client->addr, reg, + ICN8505_REG_ADDR_WIDTH, buf, len, false); +} + +static int icn8505_read_reg_silent(struct icn8505_data *icn8505, int reg) +{ + u8 buf; + int error; + + error = icn8505_read_xfer(icn8505->client, icn8505->client->addr, reg, + ICN8505_REG_ADDR_WIDTH, &buf, 1, true); + if (error) + return error; + + return buf; +} + +static int icn8505_write_reg(struct icn8505_data *icn8505, int reg, u8 val) +{ + return icn8505_write_xfer(icn8505->client, icn8505->client->addr, reg, + ICN8505_REG_ADDR_WIDTH, &val, 1, false); +} + +static int icn8505_read_prog_data(struct icn8505_data *icn8505, int reg, + void *buf, int len) +{ + return icn8505_read_xfer(icn8505->client, ICN8505_PROG_I2C_ADDR, reg, + ICN8505_PROG_REG_ADDR_WIDTH, buf, len, false); +} + +static int icn8505_write_prog_data(struct icn8505_data *icn8505, int reg, + const void *buf, int len) +{ + return icn8505_write_xfer(icn8505->client, ICN8505_PROG_I2C_ADDR, reg, + ICN8505_PROG_REG_ADDR_WIDTH, buf, len, false); +} + +static int icn8505_write_prog_reg(struct icn8505_data *icn8505, int reg, u8 val) +{ + return icn8505_write_xfer(icn8505->client, ICN8505_PROG_I2C_ADDR, reg, + ICN8505_PROG_REG_ADDR_WIDTH, &val, 1, false); +} + +/* + * Note this function uses a number of magic register addresses and values, + * there are deliberately no defines for these because the algorithm is taken + * from the icn85xx Android driver and I do not want to make up possibly wrong + * names for the addresses and/or values. + */ +static int icn8505_try_fw_upload(struct icn8505_data *icn8505, + const struct firmware *fw) +{ + struct device *dev = &icn8505->client->dev; + size_t offset, count; + int error; + u8 buf[4]; + u32 crc; + + /* Put the controller in programming mode */ + error = icn8505_write_prog_reg(icn8505, 0xcc3355, 0x5a); + if (error) + return error; + + usleep_range(2000, 5000); + + error = icn8505_write_prog_reg(icn8505, 0x040400, 0x01); + if (error) + return error; + + usleep_range(2000, 5000); + + error = icn8505_read_prog_data(icn8505, 0x040002, buf, 1); + if (error) + return error; + + if (buf[0] != 0x85) { + dev_err(dev, "Failed to enter programming mode\n"); + return -ENODEV; + } + + usleep_range(1000, 5000); + + /* Enable CRC mode */ + error = icn8505_write_prog_reg(icn8505, 0x40028, 1); + if (error) + return error; + + /* Send the firmware to SRAM */ + for (offset = 0; offset < fw->size; offset += count) { + count = min_t(size_t, fw->size - offset, 32); + error = icn8505_write_prog_data(icn8505, offset, + fw->data + offset, count); + if (error) + return error; + } + + /* Disable CRC mode */ + error = icn8505_write_prog_reg(icn8505, 0x40028, 0); + if (error) + return error; + + /* Get and check length and CRC */ + error = icn8505_read_prog_data(icn8505, 0x40034, buf, 2); + if (error) + return error; + + if (get_unaligned_le16(buf) != fw->size) { + dev_warn(dev, "Length mismatch after uploading fw\n"); + return -EIO; + } + + error = icn8505_read_prog_data(icn8505, 0x4002c, buf, 4); + if (error) + return error; + + crc = crc32_be(0, fw->data, fw->size); + if (get_unaligned_le32(buf) != crc) { + dev_warn(dev, "CRC mismatch after uploading fw\n"); + return -EIO; + } + + /* Boot controller from SRAM */ + error = icn8505_write_prog_reg(icn8505, 0x40400, 0x03); + if (error) + return error; + + usleep_range(2000, 5000); + return 0; +} + +static int icn8505_upload_fw(struct icn8505_data *icn8505) +{ + struct device *dev = &icn8505->client->dev; + const struct firmware *fw; + int i, error; + + /* + * Always load the firmware, even if we don't need it at boot, we + * we may need it at resume. Having loaded it once will make the + * firmware class code cache it at suspend/resume. + */ + error = request_firmware(&fw, icn8505->firmware_name, dev); + if (error) { + dev_err(dev, "Firmware request error %d\n", error); + return error; + } + + /* Check if the controller is not already up and running */ + if (icn8505_read_reg_silent(icn8505, 0x000a) == 0x85) + goto success; + + for (i = 1; i <= MAX_FW_UPLOAD_TRIES; i++) { + error = icn8505_try_fw_upload(icn8505, fw); + if (!error) + goto success; + + dev_err(dev, "Failed to upload firmware: %d (attempt %d/%d)\n", + error, i, MAX_FW_UPLOAD_TRIES); + usleep_range(2000, 5000); + } + +success: + release_firmware(fw); + return error; +} + +static bool icn8505_touch_active(u8 event) +{ + return event == ICN8505_EVENT_UPDATE1 || + event == ICN8505_EVENT_UPDATE2; +} + +static irqreturn_t icn8505_irq(int irq, void *dev_id) +{ + struct icn8505_data *icn8505 = dev_id; + struct device *dev = &icn8505->client->dev; + struct icn8505_touch_data touch_data; + int i, error; + + error = icn8505_read_data(icn8505, ICN8505_REG_TOUCHDATA, + &touch_data, sizeof(touch_data)); + if (error) { + dev_err(dev, "Error reading touch data: %d\n", error); + return IRQ_HANDLED; + } + + if (touch_data.touch_count > ICN8505_MAX_TOUCHES) { + dev_warn(dev, "Too many touches %d > %d\n", + touch_data.touch_count, ICN8505_MAX_TOUCHES); + touch_data.touch_count = ICN8505_MAX_TOUCHES; + } + + for (i = 0; i < touch_data.touch_count; i++) { + struct icn8505_touch *touch = &touch_data.touches[i]; + bool act = icn8505_touch_active(touch->event); + + input_mt_slot(icn8505->input, touch->slot); + input_mt_report_slot_state(icn8505->input, MT_TOOL_FINGER, act); + if (!act) + continue; + + touchscreen_report_pos(icn8505->input, &icn8505->prop, + get_unaligned_le16(touch->x), + get_unaligned_le16(touch->y), + true); + } + + input_mt_sync_frame(icn8505->input); + input_report_key(icn8505->input, KEY_LEFTMETA, + touch_data.softbutton == 1); + input_sync(icn8505->input); + + return IRQ_HANDLED; +} + +static int icn8505_probe_acpi(struct icn8505_data *icn8505, struct device *dev) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + const char *subsys = "unknown"; + struct acpi_device *adev; + union acpi_object *obj; + acpi_status status; + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; + + status = acpi_evaluate_object(adev->handle, "_SUB", NULL, &buffer); + if (ACPI_SUCCESS(status)) { + obj = buffer.pointer; + if (obj->type == ACPI_TYPE_STRING) + subsys = obj->string.pointer; + else + dev_warn(dev, "Warning ACPI _SUB did not return a string\n"); + } else { + dev_warn(dev, "Warning ACPI _SUB failed: %#x\n", status); + buffer.pointer = NULL; + } + + snprintf(icn8505->firmware_name, sizeof(icn8505->firmware_name), + "chipone/icn8505-%s.fw", subsys); + + kfree(buffer.pointer); + return 0; +} + +static int icn8505_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct icn8505_data *icn8505; + struct input_dev *input; + __le16 resolution[2]; + int error; + + if (!client->irq) { + dev_err(dev, "No irq specified\n"); + return -EINVAL; + } + + icn8505 = devm_kzalloc(dev, sizeof(*icn8505), GFP_KERNEL); + if (!icn8505) + return -ENOMEM; + + input = devm_input_allocate_device(dev); + if (!input) + return -ENOMEM; + + input->name = client->name; + input->id.bustype = BUS_I2C; + + input_set_capability(input, EV_ABS, ABS_MT_POSITION_X); + input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y); + input_set_capability(input, EV_KEY, KEY_LEFTMETA); + + icn8505->client = client; + icn8505->input = input; + input_set_drvdata(input, icn8505); + + error = icn8505_probe_acpi(icn8505, dev); + if (error) + return error; + + error = icn8505_upload_fw(icn8505); + if (error) + return error; + + error = icn8505_read_data(icn8505, ICN8505_REG_CONFIGDATA, + resolution, sizeof(resolution)); + if (error) { + dev_err(dev, "Error reading resolution: %d\n", error); + return error; + } + + input_set_abs_params(input, ABS_MT_POSITION_X, 0, + le16_to_cpu(resolution[0]) - 1, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, + le16_to_cpu(resolution[1]) - 1, 0, 0); + + touchscreen_parse_properties(input, true, &icn8505->prop); + if (!input_abs_get_max(input, ABS_MT_POSITION_X) || + !input_abs_get_max(input, ABS_MT_POSITION_Y)) { + dev_err(dev, "Error touchscreen-size-x and/or -y missing\n"); + return -EINVAL; + } + + error = input_mt_init_slots(input, ICN8505_MAX_TOUCHES, + INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); + if (error) + return error; + + error = devm_request_threaded_irq(dev, client->irq, NULL, icn8505_irq, + IRQF_ONESHOT, client->name, icn8505); + if (error) { + dev_err(dev, "Error requesting irq: %d\n", error); + return error; + } + + error = input_register_device(input); + if (error) + return error; + + i2c_set_clientdata(client, icn8505); + return 0; +} + +static int __maybe_unused icn8505_suspend(struct device *dev) +{ + struct icn8505_data *icn8505 = i2c_get_clientdata(to_i2c_client(dev)); + + disable_irq(icn8505->client->irq); + + icn8505_write_reg(icn8505, ICN8505_REG_POWER, ICN8505_POWER_HIBERNATE); + + return 0; +} + +static int __maybe_unused icn8505_resume(struct device *dev) +{ + struct icn8505_data *icn8505 = i2c_get_clientdata(to_i2c_client(dev)); + int error; + + error = icn8505_upload_fw(icn8505); + if (error) + return error; + + enable_irq(icn8505->client->irq); + return 0; +} + +static SIMPLE_DEV_PM_OPS(icn8505_pm_ops, icn8505_suspend, icn8505_resume); + +static const struct acpi_device_id icn8505_acpi_match[] = { + { "CHPN0001" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, icn8505_acpi_match); + +static struct i2c_driver icn8505_driver = { + .driver = { + .name = "chipone_icn8505", + .pm = &icn8505_pm_ops, + .acpi_match_table = icn8505_acpi_match, + }, + .probe_new = icn8505_probe, +}; + +module_i2c_driver(icn8505_driver); + +MODULE_DESCRIPTION("ChipOne icn8505 I2C Touchscreen Driver"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 956cd99b35a8fb9f7564702e7fd263c27b0a8f24 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Wed, 9 May 2018 09:11:49 +0800 Subject: PCI: rockchip: Separate common code from RC driver In preparation for introducing EP driver for Rockchip PCIe controller, rename the RC driver from pcie-rockchip.c to pcie-rockchip-host.c, and only leave some common functions in pcie-rockchip.c in order to be reused for both of RC driver and EP driver. Signed-off-by: Shawn Lin Signed-off-by: Lorenzo Pieralisi Tested-by: Jeffy Chen --- MAINTAINERS | 4 +- drivers/pci/host/Kconfig | 7 +- drivers/pci/host/Makefile | 1 + drivers/pci/host/pcie-rockchip-host.c | 1368 +++++++++++++++++++++++++++ drivers/pci/host/pcie-rockchip.c | 1625 +-------------------------------- drivers/pci/host/pcie-rockchip.h | 245 +++++ 6 files changed, 1653 insertions(+), 1597 deletions(-) create mode 100644 drivers/pci/host/pcie-rockchip-host.c create mode 100644 drivers/pci/host/pcie-rockchip.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..d425fe31bcc4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10940,8 +10940,8 @@ M: Shawn Lin L: linux-pci@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained -F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt -F: drivers/pci/host/pcie-rockchip.c +F: Documentation/devicetree/bindings/pci/rockchip-pcie* +F: drivers/pci/host/pcie-rockchip* PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC M: Linus Walleij diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 0d0177ce436c..f2d8cab8fe5e 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -179,11 +179,16 @@ config PCI_HOST_THUNDER_ECAM Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. config PCIE_ROCKCHIP - tristate "Rockchip PCIe controller" + bool + depends on PCI + +config PCIE_ROCKCHIP_HOST + tristate "Rockchip PCIe host controller" depends on ARCH_ROCKCHIP || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN select MFD_SYSCON + select PCIE_ROCKCHIP help Say Y here if you want internal PCI support on Rockchip SoC. There is 1 internal PCIe port available to support GEN2 with diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 3b1059190867..f7d45228490b 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o +obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c new file mode 100644 index 000000000000..fae9ecc572da --- /dev/null +++ b/drivers/pci/host/pcie-rockchip-host.c @@ -0,0 +1,1368 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe host controller driver + * + * Copyright (c) 2016 Rockchip, Inc. + * + * Author: Shawn Lin + * Wenrui Li + * + * Bits taken from Synopsys DesignWare Host controller driver and + * ARM PCI Host generic driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-rockchip.h" + +static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); +} + +static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); +} + +static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) +{ + u32 val; + + /* Update Tx credit maximum update interval */ + val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); + val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; + val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ + rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); +} + +static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, + struct pci_bus *bus, int dev) +{ + /* access only one slot on each root port */ + if (bus->number == rockchip->root_bus_nr && dev > 0) + return 0; + + /* + * do not read more than one device on the bus directly attached + * to RC's downstream side. + */ + if (bus->primary == rockchip->root_bus_nr && dev > 0) + return 0; + + return 1; +} + +static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) +{ + u32 val; + u8 map; + + if (rockchip->legacy_phy) + return GENMASK(MAX_LANE_NUM - 1, 0); + + val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); + map = val & PCIE_CORE_LANE_MAP_MASK; + + /* The link may be using a reverse-indexed mapping. */ + if (val & PCIE_CORE_LANE_MAP_REVERSE) + map = bitrev8(map) >> 4; + + return map; +} + +static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 4) { + *val = readl(addr); + } else if (size == 2) { + *val = readw(addr); + } else if (size == 1) { + *val = readb(addr); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, + int where, int size, u32 val) +{ + u32 mask, tmp, offset; + void __iomem *addr; + + offset = where & ~0x3; + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; + + if (size == 4) { + writel(val, addr); + return PCIBIOS_SUCCESSFUL; + } + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); + + /* + * N.B. This read/modify/write isn't safe in general because it can + * corrupt RW1C bits in adjacent registers. But the hardware + * doesn't support smaller writes. + */ + tmp = readl(addr) & mask; + tmp |= val << ((where & 0x3) * 8); + writel(tmp, addr); + + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, + struct pci_bus *bus, u32 devfn, + int where, int size, u32 *val) +{ + u32 busdev; + + busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + if (!IS_ALIGNED(busdev, size)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + + if (size == 4) { + *val = readl(rockchip->reg_base + busdev); + } else if (size == 2) { + *val = readw(rockchip->reg_base + busdev); + } else if (size == 1) { + *val = readb(rockchip->reg_base + busdev); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, + struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + u32 busdev; + + busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + if (!IS_ALIGNED(busdev, size)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + + if (size == 4) + writel(val, rockchip->reg_base + busdev); + else if (size == 2) + writew(val, rockchip->reg_base + busdev); + else if (size == 1) + writeb(val, rockchip->reg_base + busdev); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct rockchip_pcie *rockchip = bus->sysdata; + + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (bus->number == rockchip->root_bus_nr) + return rockchip_pcie_rd_own_conf(rockchip, where, size, val); + + return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, + val); +} + +static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct rockchip_pcie *rockchip = bus->sysdata; + + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == rockchip->root_bus_nr) + return rockchip_pcie_wr_own_conf(rockchip, where, size, val); + + return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, + val); +} + +static struct pci_ops rockchip_pcie_ops = { + .read = rockchip_pcie_rd_conf, + .write = rockchip_pcie_wr_conf, +}; + +static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) +{ + int curr; + u32 status, scale, power; + + if (IS_ERR(rockchip->vpcie3v3)) + return; + + /* + * Set RC's captured slot power limit and scale if + * vpcie3v3 available. The default values are both zero + * which means the software should set these two according + * to the actual power supply. + */ + curr = regulator_get_current_limit(rockchip->vpcie3v3); + if (curr <= 0) + return; + + scale = 3; /* 0.001x */ + curr = curr / 1000; /* convert to mA */ + power = (curr * 3300) / 1000; /* milliwatt */ + while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + if (!scale) { + dev_warn(rockchip->dev, "invalid power supply\n"); + return; + } + scale--; + power = power / 10; + } + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); + status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | + (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); +} + +/** + * rockchip_pcie_init_port - Initialize hardware + * @rockchip: PCIe port information + */ +static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err, i; + u32 status; + + gpiod_set_value_cansleep(rockchip->ep_gpio, 0); + + err = reset_control_assert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "assert aclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "assert pclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pm_rst); + if (err) { + dev_err(dev, "assert pm_rst err %d\n", err); + return err; + } + + for (i = 0; i < MAX_LANE_NUM; i++) { + err = phy_init(rockchip->phys[i]); + if (err) { + dev_err(dev, "init phy%d err %d\n", i, err); + goto err_exit_phy; + } + } + + err = reset_control_assert(rockchip->core_rst); + if (err) { + dev_err(dev, "assert core_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_assert(rockchip->mgmt_rst); + if (err) { + dev_err(dev, "assert mgmt_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_assert(rockchip->mgmt_sticky_rst); + if (err) { + dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_assert(rockchip->pipe_rst); + if (err) { + dev_err(dev, "assert pipe_rst err %d\n", err); + goto err_exit_phy; + } + + udelay(10); + + err = reset_control_deassert(rockchip->pm_rst); + if (err) { + dev_err(dev, "deassert pm_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_deassert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "deassert aclk_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_deassert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "deassert pclk_rst err %d\n", err); + goto err_exit_phy; + } + + if (rockchip->link_gen == 2) + rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, + PCIE_CLIENT_CONFIG); + else + rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, + PCIE_CLIENT_CONFIG); + + rockchip_pcie_write(rockchip, + PCIE_CLIENT_CONF_ENABLE | + PCIE_CLIENT_LINK_TRAIN_ENABLE | + PCIE_CLIENT_ARI_ENABLE | + PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) | + PCIE_CLIENT_MODE_RC, + PCIE_CLIENT_CONFIG); + + for (i = 0; i < MAX_LANE_NUM; i++) { + err = phy_power_on(rockchip->phys[i]); + if (err) { + dev_err(dev, "power on phy%d err %d\n", i, err); + goto err_power_off_phy; + } + } + + /* + * Please don't reorder the deassert sequence of the following + * four reset pins. + */ + err = reset_control_deassert(rockchip->mgmt_sticky_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + goto err_power_off_phy; + } + + err = reset_control_deassert(rockchip->core_rst); + if (err) { + dev_err(dev, "deassert core_rst err %d\n", err); + goto err_power_off_phy; + } + + err = reset_control_deassert(rockchip->mgmt_rst); + if (err) { + dev_err(dev, "deassert mgmt_rst err %d\n", err); + goto err_power_off_phy; + } + + err = reset_control_deassert(rockchip->pipe_rst); + if (err) { + dev_err(dev, "deassert pipe_rst err %d\n", err); + goto err_power_off_phy; + } + + /* Fix the transmitted FTS count desired to exit from L0s. */ + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); + status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | + (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); + + rockchip_pcie_set_power_limit(rockchip); + + /* Set RC's clock architecture as common clock */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKSTA_SLC << 16; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + /* Set RC's RCB to 128 */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RCB; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + /* Enable Gen1 training */ + rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, + PCIE_CLIENT_CONFIG); + + gpiod_set_value_cansleep(rockchip->ep_gpio, 1); + + /* 500ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, + status, PCIE_LINK_UP(status), 20, + 500 * USEC_PER_MSEC); + if (err) { + dev_err(dev, "PCIe link training gen1 timeout!\n"); + goto err_power_off_phy; + } + + if (rockchip->link_gen == 2) { + /* + * Enable retrain for gen2. This should be configured only after + * gen1 finished. + */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RL; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, + status, PCIE_LINK_IS_GEN2(status), 20, + 500 * USEC_PER_MSEC); + if (err) + dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); + } + + /* Check the final link width from negotiated lane counter from MGMT */ + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); + status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> + PCIE_CORE_PL_CONF_LANE_SHIFT); + dev_dbg(dev, "current link width is x%d\n", status); + + /* Power off unused lane(s) */ + rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); + for (i = 0; i < MAX_LANE_NUM; i++) { + if (!(rockchip->lanes_map & BIT(i))) { + dev_dbg(dev, "idling lane %d\n", i); + phy_power_off(rockchip->phys[i]); + } + } + + rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, + PCIE_CORE_CONFIG_VENDOR); + rockchip_pcie_write(rockchip, + PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, + PCIE_RC_CONFIG_RID_CCR); + + /* Clear THP cap's next cap pointer to remove L1 substate cap */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); + status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); + + /* Clear L0s from RC's link cap */ + if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); + status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); + } + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); + status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; + status |= PCIE_RC_CONFIG_DCSR_MPS_256; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); + + return 0; +err_power_off_phy: + while (i--) + phy_power_off(rockchip->phys[i]); + i = MAX_LANE_NUM; +err_exit_phy: + while (i--) + phy_exit(rockchip->phys[i]); + return err; +} + +static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct device *dev = rockchip->dev; + u32 reg; + u32 sub_reg; + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + if (reg & PCIE_CLIENT_INT_LOCAL) { + dev_dbg(dev, "local interrupt received\n"); + sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); + if (sub_reg & PCIE_CORE_INT_PRFPE) + dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); + + if (sub_reg & PCIE_CORE_INT_CRFPE) + dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); + + if (sub_reg & PCIE_CORE_INT_RRPE) + dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); + + if (sub_reg & PCIE_CORE_INT_PRFO) + dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); + + if (sub_reg & PCIE_CORE_INT_CRFO) + dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); + + if (sub_reg & PCIE_CORE_INT_RT) + dev_dbg(dev, "replay timer timed out\n"); + + if (sub_reg & PCIE_CORE_INT_RTR) + dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); + + if (sub_reg & PCIE_CORE_INT_PE) + dev_dbg(dev, "phy error detected on receive side\n"); + + if (sub_reg & PCIE_CORE_INT_MTR) + dev_dbg(dev, "malformed TLP received from the link\n"); + + if (sub_reg & PCIE_CORE_INT_UCR) + dev_dbg(dev, "malformed TLP received from the link\n"); + + if (sub_reg & PCIE_CORE_INT_FCE) + dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); + + if (sub_reg & PCIE_CORE_INT_CT) + dev_dbg(dev, "a request timed out waiting for completion\n"); + + if (sub_reg & PCIE_CORE_INT_UTC) + dev_dbg(dev, "unmapped TC error\n"); + + if (sub_reg & PCIE_CORE_INT_MMVC) + dev_dbg(dev, "MSI mask register changes\n"); + + rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); + } else if (reg & PCIE_CLIENT_INT_PHY) { + dev_dbg(dev, "phy link changes\n"); + rockchip_pcie_update_txcredit_mui(rockchip); + rockchip_pcie_clr_bw_int(rockchip); + } + + rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, + PCIE_CLIENT_INT_STATUS); + + return IRQ_HANDLED; +} + +static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct device *dev = rockchip->dev; + u32 reg; + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + if (reg & PCIE_CLIENT_INT_LEGACY_DONE) + dev_dbg(dev, "legacy done interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_MSG) + dev_dbg(dev, "message done interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_HOT_RST) + dev_dbg(dev, "hot reset interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_DPA) + dev_dbg(dev, "dpa interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_FATAL_ERR) + dev_dbg(dev, "fatal error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_NFATAL_ERR) + dev_dbg(dev, "no fatal error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_CORR_ERR) + dev_dbg(dev, "correctable error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_PHY) + dev_dbg(dev, "phy interrupt received\n"); + + rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | + PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | + PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | + PCIE_CLIENT_INT_NFATAL_ERR | + PCIE_CLIENT_INT_CORR_ERR | + PCIE_CLIENT_INT_PHY), + PCIE_CLIENT_INT_STATUS); + + return IRQ_HANDLED; +} + +static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); + struct device *dev = rockchip->dev; + u32 reg; + u32 hwirq; + u32 virq; + + chained_irq_enter(chip, desc); + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; + + while (reg) { + hwirq = ffs(reg) - 1; + reg &= ~BIT(hwirq); + + virq = irq_find_mapping(rockchip->irq_domain, hwirq); + if (virq) + generic_handle_irq(virq); + else + dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); + } + + chained_irq_exit(chip, desc); +} + +static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) +{ + int irq, err; + struct device *dev = rockchip->dev; + struct platform_device *pdev = to_platform_device(dev); + + irq = platform_get_irq_byname(pdev, "sys"); + if (irq < 0) { + dev_err(dev, "missing sys IRQ resource\n"); + return irq; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, + IRQF_SHARED, "pcie-sys", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe subsystem IRQ\n"); + return err; + } + + irq = platform_get_irq_byname(pdev, "legacy"); + if (irq < 0) { + dev_err(dev, "missing legacy IRQ resource\n"); + return irq; + } + + irq_set_chained_handler_and_data(irq, + rockchip_pcie_legacy_int_handler, + rockchip); + + irq = platform_get_irq_byname(pdev, "client"); + if (irq < 0) { + dev_err(dev, "missing client IRQ resource\n"); + return irq; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, + IRQF_SHARED, "pcie-client", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe client IRQ\n"); + return err; + } + + return 0; +} + +/** + * rockchip_pcie_parse_dt - Parse Device Tree + * @rockchip: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct platform_device *pdev = to_platform_device(dev); + struct device_node *node = dev->of_node; + struct resource *regs; + int err; + + regs = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + "axi-base"); + rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); + if (IS_ERR(rockchip->reg_base)) + return PTR_ERR(rockchip->reg_base); + + regs = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + "apb-base"); + rockchip->apb_base = devm_ioremap_resource(dev, regs); + if (IS_ERR(rockchip->apb_base)) + return PTR_ERR(rockchip->apb_base); + + err = rockchip_pcie_get_phys(rockchip); + if (err) + return err; + + rockchip->lanes = 1; + err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); + if (!err && (rockchip->lanes == 0 || + rockchip->lanes == 3 || + rockchip->lanes > 4)) { + dev_warn(dev, "invalid num-lanes, default to use one lane\n"); + rockchip->lanes = 1; + } + + rockchip->link_gen = of_pci_get_max_link_speed(node); + if (rockchip->link_gen < 0 || rockchip->link_gen > 2) + rockchip->link_gen = 2; + + rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); + if (IS_ERR(rockchip->core_rst)) { + if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) + dev_err(dev, "missing core reset property in node\n"); + return PTR_ERR(rockchip->core_rst); + } + + rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); + if (IS_ERR(rockchip->mgmt_rst)) { + if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) + dev_err(dev, "missing mgmt reset property in node\n"); + return PTR_ERR(rockchip->mgmt_rst); + } + + rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, + "mgmt-sticky"); + if (IS_ERR(rockchip->mgmt_sticky_rst)) { + if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) + dev_err(dev, "missing mgmt-sticky reset property in node\n"); + return PTR_ERR(rockchip->mgmt_sticky_rst); + } + + rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); + if (IS_ERR(rockchip->pipe_rst)) { + if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pipe reset property in node\n"); + return PTR_ERR(rockchip->pipe_rst); + } + + rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); + if (IS_ERR(rockchip->pm_rst)) { + if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pm reset property in node\n"); + return PTR_ERR(rockchip->pm_rst); + } + + rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); + if (IS_ERR(rockchip->pclk_rst)) { + if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pclk reset property in node\n"); + return PTR_ERR(rockchip->pclk_rst); + } + + rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); + if (IS_ERR(rockchip->aclk_rst)) { + if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing aclk reset property in node\n"); + return PTR_ERR(rockchip->aclk_rst); + } + + rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); + if (IS_ERR(rockchip->ep_gpio)) { + dev_err(dev, "missing ep-gpios property in node\n"); + return PTR_ERR(rockchip->ep_gpio); + } + + rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); + if (IS_ERR(rockchip->aclk_pcie)) { + dev_err(dev, "aclk clock not found\n"); + return PTR_ERR(rockchip->aclk_pcie); + } + + rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); + if (IS_ERR(rockchip->aclk_perf_pcie)) { + dev_err(dev, "aclk_perf clock not found\n"); + return PTR_ERR(rockchip->aclk_perf_pcie); + } + + rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); + if (IS_ERR(rockchip->hclk_pcie)) { + dev_err(dev, "hclk clock not found\n"); + return PTR_ERR(rockchip->hclk_pcie); + } + + rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); + if (IS_ERR(rockchip->clk_pcie_pm)) { + dev_err(dev, "pm clock not found\n"); + return PTR_ERR(rockchip->clk_pcie_pm); + } + + err = rockchip_pcie_setup_irq(rockchip); + if (err) + return err; + + rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); + if (IS_ERR(rockchip->vpcie12v)) { + if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie12v regulator found\n"); + } + + rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); + if (IS_ERR(rockchip->vpcie3v3)) { + if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie3v3 regulator found\n"); + } + + rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); + if (IS_ERR(rockchip->vpcie1v8)) { + if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie1v8 regulator found\n"); + } + + rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); + if (IS_ERR(rockchip->vpcie0v9)) { + if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie0v9 regulator found\n"); + } + + return 0; +} + +static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + + if (!IS_ERR(rockchip->vpcie12v)) { + err = regulator_enable(rockchip->vpcie12v); + if (err) { + dev_err(dev, "fail to enable vpcie12v regulator\n"); + goto err_out; + } + } + + if (!IS_ERR(rockchip->vpcie3v3)) { + err = regulator_enable(rockchip->vpcie3v3); + if (err) { + dev_err(dev, "fail to enable vpcie3v3 regulator\n"); + goto err_disable_12v; + } + } + + if (!IS_ERR(rockchip->vpcie1v8)) { + err = regulator_enable(rockchip->vpcie1v8); + if (err) { + dev_err(dev, "fail to enable vpcie1v8 regulator\n"); + goto err_disable_3v3; + } + } + + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + goto err_disable_1v8; + } + } + + return 0; + +err_disable_1v8: + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); +err_disable_3v3: + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); +err_disable_12v: + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); +err_out: + return err; +} + +static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) +{ + rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & + (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); + rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), + PCIE_CORE_INT_MASK); + + rockchip_pcie_enable_bw_int(rockchip); +} + +static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = rockchip_pcie_intx_map, +}; + +static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct device_node *intc = of_get_next_child(dev->of_node, NULL); + + if (!intc) { + dev_err(dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, + &intx_domain_ops, rockchip); + if (!rockchip->irq_domain) { + dev_err(dev, "failed to get a INTx IRQ domain\n"); + return -EINVAL; + } + + return 0; +} + +static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, + int region_no, int type, u8 num_pass_bits, + u32 lower_addr, u32 upper_addr) +{ + u32 ob_addr_0; + u32 ob_addr_1; + u32 ob_desc_0; + u32 aw_offset; + + if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) + return -EINVAL; + if (num_pass_bits + 1 < 8) + return -EINVAL; + if (num_pass_bits > 63) + return -EINVAL; + if (region_no == 0) { + if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) + return -EINVAL; + } + if (region_no != 0) { + if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) + return -EINVAL; + } + + aw_offset = (region_no << OB_REG_SIZE_SHIFT); + + ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; + ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; + ob_addr_1 = upper_addr; + ob_desc_0 = (1 << 23 | type); + + rockchip_pcie_write(rockchip, ob_addr_0, + PCIE_CORE_OB_REGION_ADDR0 + aw_offset); + rockchip_pcie_write(rockchip, ob_addr_1, + PCIE_CORE_OB_REGION_ADDR1 + aw_offset); + rockchip_pcie_write(rockchip, ob_desc_0, + PCIE_CORE_OB_REGION_DESC0 + aw_offset); + rockchip_pcie_write(rockchip, 0, + PCIE_CORE_OB_REGION_DESC1 + aw_offset); + + return 0; +} + +static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, + int region_no, u8 num_pass_bits, + u32 lower_addr, u32 upper_addr) +{ + u32 ib_addr_0; + u32 ib_addr_1; + u32 aw_offset; + + if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) + return -EINVAL; + if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) + return -EINVAL; + if (num_pass_bits > 63) + return -EINVAL; + + aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); + + ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; + ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; + ib_addr_1 = upper_addr; + + rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); + rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); + + return 0; +} + +static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int offset; + int err; + int reg_no; + + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + + for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, + AXI_WRAPPER_MEM_WRITE, + 20 - 1, + rockchip->mem_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC mem outbound ATU failed\n"); + return err; + } + } + + err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); + if (err) { + dev_err(dev, "program RC mem inbound ATU failed\n"); + return err; + } + + offset = rockchip->mem_size >> 20; + for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, + reg_no + 1 + offset, + AXI_WRAPPER_IO_WRITE, + 20 - 1, + rockchip->io_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC io outbound ATU failed\n"); + return err; + } + } + + /* assign message regions */ + rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, + AXI_WRAPPER_NOR_MSG, + 20 - 1, 0, 0); + + rockchip->msg_bus_addr = rockchip->mem_bus_addr + + ((reg_no + offset) << 20); + return err; +} + +static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) +{ + u32 value; + int err; + + /* send PME_TURN_OFF message */ + writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); + + /* read LTSSM and wait for falling into L2 link state */ + err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, + value, PCIE_LINK_IS_L2(value), 20, + jiffies_to_usecs(5 * HZ)); + if (err) { + dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); + return err; + } + + return 0; +} + +static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) +{ + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + int ret; + + /* disable core and cli int since we don't need to ack PME_ACK */ + rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | + PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); + rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); + + ret = rockchip_pcie_wait_l2(rockchip); + if (ret) { + rockchip_pcie_enable_interrupts(rockchip); + return ret; + } + + rockchip_pcie_deinit_phys(rockchip); + + rockchip_pcie_disable_clocks(rockchip); + + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + + return ret; +} + +static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) +{ + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + int err; + + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + return err; + } + } + + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + goto err_disable_0v9; + + err = rockchip_pcie_init_port(rockchip); + if (err) + goto err_pcie_resume; + + err = rockchip_pcie_cfg_atu(rockchip); + if (err) + goto err_err_deinit_port; + + /* Need this to enter L1 again */ + rockchip_pcie_update_txcredit_mui(rockchip); + rockchip_pcie_enable_interrupts(rockchip); + + return 0; + +err_err_deinit_port: + rockchip_pcie_deinit_phys(rockchip); +err_pcie_resume: + rockchip_pcie_disable_clocks(rockchip); +err_disable_0v9: + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + return err; +} + +static int rockchip_pcie_probe(struct platform_device *pdev) +{ + struct rockchip_pcie *rockchip; + struct device *dev = &pdev->dev; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + struct resource_entry *win; + resource_size_t io_base; + struct resource *mem; + struct resource *io; + int err; + + LIST_HEAD(res); + + if (!dev->of_node) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); + if (!bridge) + return -ENOMEM; + + rockchip = pci_host_bridge_priv(bridge); + + platform_set_drvdata(pdev, rockchip); + rockchip->dev = dev; + + err = rockchip_pcie_parse_dt(rockchip); + if (err) + return err; + + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + return err; + + err = rockchip_pcie_set_vpcie(rockchip); + if (err) { + dev_err(dev, "failed to set vpcie regulator\n"); + goto err_set_vpcie; + } + + err = rockchip_pcie_init_port(rockchip); + if (err) + goto err_vpcie; + + rockchip_pcie_enable_interrupts(rockchip); + + err = rockchip_pcie_init_irq_domain(rockchip); + if (err < 0) + goto err_deinit_port; + + err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, + &res, &io_base); + if (err) + goto err_remove_irq_domain; + + err = devm_request_pci_bus_resources(dev, &res); + if (err) + goto err_free_res; + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry(win, &res) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + io = win->res; + io->name = "I/O"; + rockchip->io_size = resource_size(io); + rockchip->io_bus_addr = io->start - win->offset; + err = pci_remap_iospace(io, io_base); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, io); + continue; + } + rockchip->io = io; + break; + case IORESOURCE_MEM: + mem = win->res; + mem->name = "MEM"; + rockchip->mem_size = resource_size(mem); + rockchip->mem_bus_addr = mem->start - win->offset; + break; + case IORESOURCE_BUS: + rockchip->root_bus_nr = win->res->start; + break; + default: + continue; + } + } + + err = rockchip_pcie_cfg_atu(rockchip); + if (err) + goto err_unmap_iospace; + + rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); + if (!rockchip->msg_region) { + err = -ENOMEM; + goto err_unmap_iospace; + } + + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = rockchip; + bridge->busnr = 0; + bridge->ops = &rockchip_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(bridge); + if (err < 0) + goto err_unmap_iospace; + + bus = bridge->bus; + + rockchip->root_bus = bus; + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + return 0; + +err_unmap_iospace: + pci_unmap_iospace(rockchip->io); +err_free_res: + pci_free_resource_list(&res); +err_remove_irq_domain: + irq_domain_remove(rockchip->irq_domain); +err_deinit_port: + rockchip_pcie_deinit_phys(rockchip); +err_vpcie: + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); +err_set_vpcie: + rockchip_pcie_disable_clocks(rockchip); + return err; +} + +static int rockchip_pcie_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + + pci_stop_root_bus(rockchip->root_bus); + pci_remove_root_bus(rockchip->root_bus); + pci_unmap_iospace(rockchip->io); + irq_domain_remove(rockchip->irq_domain); + + rockchip_pcie_deinit_phys(rockchip); + + rockchip_pcie_disable_clocks(rockchip); + + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + + return 0; +} + +static const struct dev_pm_ops rockchip_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, + rockchip_pcie_resume_noirq) +}; + +static const struct of_device_id rockchip_pcie_of_match[] = { + { .compatible = "rockchip,rk3399-pcie", }, + {} +}; +MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); + +static struct platform_driver rockchip_pcie_driver = { + .driver = { + .name = "rockchip-pcie", + .of_match_table = rockchip_pcie_of_match, + .pm = &rockchip_pcie_pm_ops, + }, + .probe = rockchip_pcie_probe, + .remove = rockchip_pcie_remove, +}; +module_platform_driver(rockchip_pcie_driver); + +MODULE_AUTHOR("Rockchip Inc"); +MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index f1e8f97ea1fb..3d46da910292 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -11,904 +11,12 @@ * ARM PCI Host generic driver. */ -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -/* - * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 - * bits. This allows atomic updates of the register without locking. - */ -#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) -#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) - -#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) -#define MAX_LANE_NUM 4 - -#define PCIE_CLIENT_BASE 0x0 -#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) -#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) -#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) -#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) -#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) -#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) -#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) -#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) -#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c) -#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) -#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 -#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19 -#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) -#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 -#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 -#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c) -#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50) -#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5) -#define PCIE_CLIENT_INTR_SHIFT 5 -#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15) -#define PCIE_CLIENT_INT_MSG BIT(14) -#define PCIE_CLIENT_INT_HOT_RST BIT(13) -#define PCIE_CLIENT_INT_DPA BIT(12) -#define PCIE_CLIENT_INT_FATAL_ERR BIT(11) -#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10) -#define PCIE_CLIENT_INT_CORR_ERR BIT(9) -#define PCIE_CLIENT_INT_INTD BIT(8) -#define PCIE_CLIENT_INT_INTC BIT(7) -#define PCIE_CLIENT_INT_INTB BIT(6) -#define PCIE_CLIENT_INT_INTA BIT(5) -#define PCIE_CLIENT_INT_LOCAL BIT(4) -#define PCIE_CLIENT_INT_UDMA BIT(3) -#define PCIE_CLIENT_INT_PHY BIT(2) -#define PCIE_CLIENT_INT_HOT_PLUG BIT(1) -#define PCIE_CLIENT_INT_PWR_STCG BIT(0) - -#define PCIE_CLIENT_INT_LEGACY \ - (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \ - PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD) - -#define PCIE_CLIENT_INT_CLI \ - (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \ - PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \ - PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \ - PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \ - PCIE_CLIENT_INT_PHY) - -#define PCIE_CORE_CTRL_MGMT_BASE 0x900000 -#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) -#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 -#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 -#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 -#define PCIE_CORE_PL_CONF_LANE_SHIFT 1 -#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004) -#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8) -#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8 -#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff -#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020) -#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000 -#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 -#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ - (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) -#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200) -#define PCIE_CORE_LANE_MAP_MASK 0x0000000f -#define PCIE_CORE_LANE_MAP_REVERSE BIT(16) -#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) -#define PCIE_CORE_INT_PRFPE BIT(0) -#define PCIE_CORE_INT_CRFPE BIT(1) -#define PCIE_CORE_INT_RRPE BIT(2) -#define PCIE_CORE_INT_PRFO BIT(3) -#define PCIE_CORE_INT_CRFO BIT(4) -#define PCIE_CORE_INT_RT BIT(5) -#define PCIE_CORE_INT_RTR BIT(6) -#define PCIE_CORE_INT_PE BIT(7) -#define PCIE_CORE_INT_MTR BIT(8) -#define PCIE_CORE_INT_UCR BIT(9) -#define PCIE_CORE_INT_FCE BIT(10) -#define PCIE_CORE_INT_CT BIT(11) -#define PCIE_CORE_INT_UTC BIT(18) -#define PCIE_CORE_INT_MMVC BIT(19) -#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44) -#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210) -#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300) - -#define PCIE_CORE_INT \ - (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \ - PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \ - PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \ - PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \ - PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \ - PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ - PCIE_CORE_INT_MMVC) - -#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 -#define PCIE_RC_CONFIG_BASE 0xa00000 -#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_SCC_SHIFT 16 -#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) -#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 -#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff -#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 -#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) -#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) -#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) -#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) -#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) -#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) -#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) -#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) -#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) - -#define PCIE_CORE_AXI_CONF_BASE 0xc00000 -#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) -#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f -#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 -#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) -#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) -#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) - -#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 -#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) -#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f -#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 -#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) - -/* Size of one AXI Region (not Region 0) */ -#define AXI_REGION_SIZE BIT(20) -/* Size of Region 0, equal to sum of sizes of other regions */ -#define AXI_REGION_0_SIZE (32 * (0x1 << 20)) -#define OB_REG_SIZE_SHIFT 5 -#define IB_ROOT_PORT_REG_SIZE_SHIFT 3 -#define AXI_WRAPPER_IO_WRITE 0x6 -#define AXI_WRAPPER_MEM_WRITE 0x2 -#define AXI_WRAPPER_TYPE0_CFG 0xa -#define AXI_WRAPPER_TYPE1_CFG 0xb -#define AXI_WRAPPER_NOR_MSG 0xc - -#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 -#define MIN_AXI_ADDR_BITS_PASSED 8 -#define PCIE_RC_SEND_PME_OFF 0x11960 -#define ROCKCHIP_VENDOR_ID 0x1d87 -#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20) -#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15) -#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12) -#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0) -#define PCIE_ECAM_ADDR(bus, dev, func, reg) \ - (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \ - PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg)) -#define PCIE_LINK_IS_L2(x) \ - (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) -#define PCIE_LINK_UP(x) \ - (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) -#define PCIE_LINK_IS_GEN2(x) \ - (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G) - -#define RC_REGION_0_ADDR_TRANS_H 0x00000000 -#define RC_REGION_0_ADDR_TRANS_L 0x00000000 -#define RC_REGION_0_PASS_BITS (25 - 1) -#define RC_REGION_0_TYPE_MASK GENMASK(3, 0) -#define MAX_AXI_WRAPPER_REGION_NUM 33 - -struct rockchip_pcie { - void __iomem *reg_base; /* DT axi-base */ - void __iomem *apb_base; /* DT apb-base */ - bool legacy_phy; - struct phy *phys[MAX_LANE_NUM]; - struct reset_control *core_rst; - struct reset_control *mgmt_rst; - struct reset_control *mgmt_sticky_rst; - struct reset_control *pipe_rst; - struct reset_control *pm_rst; - struct reset_control *aclk_rst; - struct reset_control *pclk_rst; - struct clk *aclk_pcie; - struct clk *aclk_perf_pcie; - struct clk *hclk_pcie; - struct clk *clk_pcie_pm; - struct regulator *vpcie12v; /* 12V power supply */ - struct regulator *vpcie3v3; /* 3.3V power supply */ - struct regulator *vpcie1v8; /* 1.8V power supply */ - struct regulator *vpcie0v9; /* 0.9V power supply */ - struct gpio_desc *ep_gpio; - u32 lanes; - u8 lanes_map; - u8 root_bus_nr; - int link_gen; - struct device *dev; - struct irq_domain *irq_domain; - int offset; - struct pci_bus *root_bus; - struct resource *io; - phys_addr_t io_bus_addr; - u32 io_size; - void __iomem *msg_region; - u32 mem_size; - phys_addr_t msg_bus_addr; - phys_addr_t mem_bus_addr; -}; - -static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) -{ - return readl(rockchip->apb_base + reg); -} - -static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val, - u32 reg) -{ - writel(val, rockchip->apb_base + reg); -} - -static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) -{ - u32 status; - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); -} - -static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) -{ - u32 status; - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); -} - -static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) -{ - u32 val; - - /* Update Tx credit maximum update interval */ - val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); - val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; - val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ - rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); -} - -static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, - struct pci_bus *bus, int dev) -{ - /* access only one slot on each root port */ - if (bus->number == rockchip->root_bus_nr && dev > 0) - return 0; - - /* - * do not read more than one device on the bus directly attached - * to RC's downstream side. - */ - if (bus->primary == rockchip->root_bus_nr && dev > 0) - return 0; - - return 1; -} - -static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) -{ - u32 val; - u8 map; - - if (rockchip->legacy_phy) - return GENMASK(MAX_LANE_NUM - 1, 0); - - val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); - map = val & PCIE_CORE_LANE_MAP_MASK; - - /* The link may be using a reverse-indexed mapping. */ - if (val & PCIE_CORE_LANE_MAP_REVERSE) - map = bitrev8(map) >> 4; - - return map; -} - -static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, - int where, int size, u32 *val) -{ - void __iomem *addr; - - addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; - - if (!IS_ALIGNED((uintptr_t)addr, size)) { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - if (size == 4) { - *val = readl(addr); - } else if (size == 2) { - *val = readw(addr); - } else if (size == 1) { - *val = readb(addr); - } else { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, - int where, int size, u32 val) -{ - u32 mask, tmp, offset; - void __iomem *addr; - - offset = where & ~0x3; - addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; - - if (size == 4) { - writel(val, addr); - return PCIBIOS_SUCCESSFUL; - } - - mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); - - /* - * N.B. This read/modify/write isn't safe in general because it can - * corrupt RW1C bits in adjacent registers. But the hardware - * doesn't support smaller writes. - */ - tmp = readl(addr) & mask; - tmp |= val << ((where & 0x3) * 8); - writel(tmp, addr); - - return PCIBIOS_SUCCESSFUL; -} - -static void rockchip_pcie_cfg_configuration_accesses( - struct rockchip_pcie *rockchip, u32 type) -{ - u32 ob_desc_0; - - /* Configuration Accesses for region 0 */ - rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); - - rockchip_pcie_write(rockchip, - (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), - PCIE_CORE_OB_REGION_ADDR0); - rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, - PCIE_CORE_OB_REGION_ADDR1); - ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); - ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); - ob_desc_0 |= (type | (0x1 << 23)); - rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); - rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); -} - -static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, - struct pci_bus *bus, u32 devfn, - int where, int size, u32 *val) -{ - u32 busdev; - - busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where); - - if (!IS_ALIGNED(busdev, size)) { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - if (bus->parent->number == rockchip->root_bus_nr) - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE0_CFG); - else - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE1_CFG); - - if (size == 4) { - *val = readl(rockchip->reg_base + busdev); - } else if (size == 2) { - *val = readw(rockchip->reg_base + busdev); - } else if (size == 1) { - *val = readb(rockchip->reg_base + busdev); - } else { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, - struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - u32 busdev; - - busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where); - if (!IS_ALIGNED(busdev, size)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (bus->parent->number == rockchip->root_bus_nr) - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE0_CFG); - else - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE1_CFG); - - if (size == 4) - writel(val, rockchip->reg_base + busdev); - else if (size == 2) - writew(val, rockchip->reg_base + busdev); - else if (size == 1) - writeb(val, rockchip->reg_base + busdev); - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) -{ - struct rockchip_pcie *rockchip = bus->sysdata; - - if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - if (bus->number == rockchip->root_bus_nr) - return rockchip_pcie_rd_own_conf(rockchip, where, size, val); - - return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val); -} - -static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - struct rockchip_pcie *rockchip = bus->sysdata; - - if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == rockchip->root_bus_nr) - return rockchip_pcie_wr_own_conf(rockchip, where, size, val); - - return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val); -} - -static struct pci_ops rockchip_pcie_ops = { - .read = rockchip_pcie_rd_conf, - .write = rockchip_pcie_wr_conf, -}; - -static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) -{ - int curr; - u32 status, scale, power; - - if (IS_ERR(rockchip->vpcie3v3)) - return; - - /* - * Set RC's captured slot power limit and scale if - * vpcie3v3 available. The default values are both zero - * which means the software should set these two according - * to the actual power supply. - */ - curr = regulator_get_current_limit(rockchip->vpcie3v3); - if (curr <= 0) - return; - - scale = 3; /* 0.001x */ - curr = curr / 1000; /* convert to mA */ - power = (curr * 3300) / 1000; /* milliwatt */ - while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { - if (!scale) { - dev_warn(rockchip->dev, "invalid power supply\n"); - return; - } - scale--; - power = power / 10; - } - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); - status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | - (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); -} - -/** - * rockchip_pcie_init_port - Initialize hardware - * @rockchip: PCIe port information - */ -static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err, i; - u32 status; - - gpiod_set_value_cansleep(rockchip->ep_gpio, 0); - - err = reset_control_assert(rockchip->aclk_rst); - if (err) { - dev_err(dev, "assert aclk_rst err %d\n", err); - return err; - } - - err = reset_control_assert(rockchip->pclk_rst); - if (err) { - dev_err(dev, "assert pclk_rst err %d\n", err); - return err; - } - - err = reset_control_assert(rockchip->pm_rst); - if (err) { - dev_err(dev, "assert pm_rst err %d\n", err); - return err; - } - - for (i = 0; i < MAX_LANE_NUM; i++) { - err = phy_init(rockchip->phys[i]); - if (err) { - dev_err(dev, "init phy%d err %d\n", i, err); - goto err_exit_phy; - } - } - - err = reset_control_assert(rockchip->core_rst); - if (err) { - dev_err(dev, "assert core_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->mgmt_rst); - if (err) { - dev_err(dev, "assert mgmt_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->mgmt_sticky_rst); - if (err) { - dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->pipe_rst); - if (err) { - dev_err(dev, "assert pipe_rst err %d\n", err); - goto err_exit_phy; - } - - udelay(10); - - err = reset_control_deassert(rockchip->pm_rst); - if (err) { - dev_err(dev, "deassert pm_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_deassert(rockchip->aclk_rst); - if (err) { - dev_err(dev, "deassert aclk_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_deassert(rockchip->pclk_rst); - if (err) { - dev_err(dev, "deassert pclk_rst err %d\n", err); - goto err_exit_phy; - } - - if (rockchip->link_gen == 2) - rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, - PCIE_CLIENT_CONFIG); - else - rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, - PCIE_CLIENT_CONFIG); - - rockchip_pcie_write(rockchip, - PCIE_CLIENT_CONF_ENABLE | - PCIE_CLIENT_LINK_TRAIN_ENABLE | - PCIE_CLIENT_ARI_ENABLE | - PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) | - PCIE_CLIENT_MODE_RC, - PCIE_CLIENT_CONFIG); - - for (i = 0; i < MAX_LANE_NUM; i++) { - err = phy_power_on(rockchip->phys[i]); - if (err) { - dev_err(dev, "power on phy%d err %d\n", i, err); - goto err_power_off_phy; - } - } - - /* - * Please don't reorder the deassert sequence of the following - * four reset pins. - */ - err = reset_control_deassert(rockchip->mgmt_sticky_rst); - if (err) { - dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->core_rst); - if (err) { - dev_err(dev, "deassert core_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->mgmt_rst); - if (err) { - dev_err(dev, "deassert mgmt_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->pipe_rst); - if (err) { - dev_err(dev, "deassert pipe_rst err %d\n", err); - goto err_power_off_phy; - } - - /* Fix the transmitted FTS count desired to exit from L0s. */ - status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); - status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | - (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); - - rockchip_pcie_set_power_limit(rockchip); - - /* Set RC's clock architecture as common clock */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKSTA_SLC << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); - - /* Set RC's RCB to 128 */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKCTL_RCB; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); - - /* Enable Gen1 training */ - rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, - PCIE_CLIENT_CONFIG); - - gpiod_set_value_cansleep(rockchip->ep_gpio, 1); - - /* 500ms timeout value should be enough for Gen1/2 training */ - err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, - status, PCIE_LINK_UP(status), 20, - 500 * USEC_PER_MSEC); - if (err) { - dev_err(dev, "PCIe link training gen1 timeout!\n"); - goto err_power_off_phy; - } - - if (rockchip->link_gen == 2) { - /* - * Enable retrain for gen2. This should be configured only after - * gen1 finished. - */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); - - err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, - status, PCIE_LINK_IS_GEN2(status), 20, - 500 * USEC_PER_MSEC); - if (err) - dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); - } - - /* Check the final link width from negotiated lane counter from MGMT */ - status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); - status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> - PCIE_CORE_PL_CONF_LANE_SHIFT); - dev_dbg(dev, "current link width is x%d\n", status); - - /* Power off unused lane(s) */ - rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); - for (i = 0; i < MAX_LANE_NUM; i++) { - if (!(rockchip->lanes_map & BIT(i))) { - dev_dbg(dev, "idling lane %d\n", i); - phy_power_off(rockchip->phys[i]); - } - } - - rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, - PCIE_CORE_CONFIG_VENDOR); - rockchip_pcie_write(rockchip, - PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, - PCIE_RC_CONFIG_RID_CCR); - - /* Clear THP cap's next cap pointer to remove L1 substate cap */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); - status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); - - /* Clear L0s from RC's link cap */ - if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); - status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); - } - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); - status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; - status |= PCIE_RC_CONFIG_DCSR_MPS_256; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); - - return 0; -err_power_off_phy: - while (i--) - phy_power_off(rockchip->phys[i]); - i = MAX_LANE_NUM; -err_exit_phy: - while (i--) - phy_exit(rockchip->phys[i]); - return err; -} - -static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) -{ - int i; - - for (i = 0; i < MAX_LANE_NUM; i++) { - /* inactive lanes are already powered off */ - if (rockchip->lanes_map & BIT(i)) - phy_power_off(rockchip->phys[i]); - phy_exit(rockchip->phys[i]); - } -} - -static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) -{ - struct rockchip_pcie *rockchip = arg; - struct device *dev = rockchip->dev; - u32 reg; - u32 sub_reg; - - reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); - if (reg & PCIE_CLIENT_INT_LOCAL) { - dev_dbg(dev, "local interrupt received\n"); - sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); - if (sub_reg & PCIE_CORE_INT_PRFPE) - dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); - - if (sub_reg & PCIE_CORE_INT_CRFPE) - dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); - - if (sub_reg & PCIE_CORE_INT_RRPE) - dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); - - if (sub_reg & PCIE_CORE_INT_PRFO) - dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); - - if (sub_reg & PCIE_CORE_INT_CRFO) - dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); - - if (sub_reg & PCIE_CORE_INT_RT) - dev_dbg(dev, "replay timer timed out\n"); - - if (sub_reg & PCIE_CORE_INT_RTR) - dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); - - if (sub_reg & PCIE_CORE_INT_PE) - dev_dbg(dev, "phy error detected on receive side\n"); - - if (sub_reg & PCIE_CORE_INT_MTR) - dev_dbg(dev, "malformed TLP received from the link\n"); - - if (sub_reg & PCIE_CORE_INT_UCR) - dev_dbg(dev, "malformed TLP received from the link\n"); - - if (sub_reg & PCIE_CORE_INT_FCE) - dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); - - if (sub_reg & PCIE_CORE_INT_CT) - dev_dbg(dev, "a request timed out waiting for completion\n"); - - if (sub_reg & PCIE_CORE_INT_UTC) - dev_dbg(dev, "unmapped TC error\n"); - - if (sub_reg & PCIE_CORE_INT_MMVC) - dev_dbg(dev, "MSI mask register changes\n"); - - rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); - } else if (reg & PCIE_CLIENT_INT_PHY) { - dev_dbg(dev, "phy link changes\n"); - rockchip_pcie_update_txcredit_mui(rockchip); - rockchip_pcie_clr_bw_int(rockchip); - } - - rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, - PCIE_CLIENT_INT_STATUS); - - return IRQ_HANDLED; -} - -static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) -{ - struct rockchip_pcie *rockchip = arg; - struct device *dev = rockchip->dev; - u32 reg; - - reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); - if (reg & PCIE_CLIENT_INT_LEGACY_DONE) - dev_dbg(dev, "legacy done interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_MSG) - dev_dbg(dev, "message done interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_HOT_RST) - dev_dbg(dev, "hot reset interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_DPA) - dev_dbg(dev, "dpa interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_FATAL_ERR) - dev_dbg(dev, "fatal error interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_NFATAL_ERR) - dev_dbg(dev, "no fatal error interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_CORR_ERR) - dev_dbg(dev, "correctable error interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_PHY) - dev_dbg(dev, "phy interrupt received\n"); - - rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | - PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | - PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | - PCIE_CLIENT_INT_NFATAL_ERR | - PCIE_CLIENT_INT_CORR_ERR | - PCIE_CLIENT_INT_PHY), - PCIE_CLIENT_INT_STATUS); - - return IRQ_HANDLED; -} - -static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); - struct device *dev = rockchip->dev; - u32 reg; - u32 hwirq; - u32 virq; - - chained_irq_enter(chip, desc); - - reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); - reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; - - while (reg) { - hwirq = ffs(reg) - 1; - reg &= ~BIT(hwirq); - - virq = irq_find_mapping(rockchip->irq_domain, hwirq); - if (virq) - generic_handle_irq(virq); - else - dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); - } - - chained_irq_exit(chip, desc); -} +#include "pcie-rockchip.h" -static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) +int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct phy *phy; @@ -948,452 +56,22 @@ static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) return 0; } +EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys); -static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) -{ - int irq, err; - struct device *dev = rockchip->dev; - struct platform_device *pdev = to_platform_device(dev); - - irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); - return irq; - } - - err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, - IRQF_SHARED, "pcie-sys", rockchip); - if (err) { - dev_err(dev, "failed to request PCIe subsystem IRQ\n"); - return err; - } - - irq = platform_get_irq_byname(pdev, "legacy"); - if (irq < 0) { - dev_err(dev, "missing legacy IRQ resource\n"); - return irq; - } - - irq_set_chained_handler_and_data(irq, - rockchip_pcie_legacy_int_handler, - rockchip); - - irq = platform_get_irq_byname(pdev, "client"); - if (irq < 0) { - dev_err(dev, "missing client IRQ resource\n"); - return irq; - } - - err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, - IRQF_SHARED, "pcie-client", rockchip); - if (err) { - dev_err(dev, "failed to request PCIe client IRQ\n"); - return err; - } - - return 0; -} - -/** - * rockchip_pcie_parse_dt - Parse Device Tree - * @rockchip: PCIe port information - * - * Return: '0' on success and error value on failure - */ -static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - struct platform_device *pdev = to_platform_device(dev); - struct device_node *node = dev->of_node; - struct resource *regs; - int err; - - regs = platform_get_resource_byname(pdev, - IORESOURCE_MEM, - "axi-base"); - rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); - if (IS_ERR(rockchip->reg_base)) - return PTR_ERR(rockchip->reg_base); - - regs = platform_get_resource_byname(pdev, - IORESOURCE_MEM, - "apb-base"); - rockchip->apb_base = devm_ioremap_resource(dev, regs); - if (IS_ERR(rockchip->apb_base)) - return PTR_ERR(rockchip->apb_base); - - err = rockchip_pcie_get_phys(rockchip); - if (err) - return err; - - rockchip->lanes = 1; - err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); - if (!err && (rockchip->lanes == 0 || - rockchip->lanes == 3 || - rockchip->lanes > 4)) { - dev_warn(dev, "invalid num-lanes, default to use one lane\n"); - rockchip->lanes = 1; - } - - rockchip->link_gen = of_pci_get_max_link_speed(node); - if (rockchip->link_gen < 0 || rockchip->link_gen > 2) - rockchip->link_gen = 2; - - rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); - if (IS_ERR(rockchip->core_rst)) { - if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) - dev_err(dev, "missing core reset property in node\n"); - return PTR_ERR(rockchip->core_rst); - } - - rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); - if (IS_ERR(rockchip->mgmt_rst)) { - if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) - dev_err(dev, "missing mgmt reset property in node\n"); - return PTR_ERR(rockchip->mgmt_rst); - } - - rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, - "mgmt-sticky"); - if (IS_ERR(rockchip->mgmt_sticky_rst)) { - if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) - dev_err(dev, "missing mgmt-sticky reset property in node\n"); - return PTR_ERR(rockchip->mgmt_sticky_rst); - } - - rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); - if (IS_ERR(rockchip->pipe_rst)) { - if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pipe reset property in node\n"); - return PTR_ERR(rockchip->pipe_rst); - } - - rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); - if (IS_ERR(rockchip->pm_rst)) { - if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pm reset property in node\n"); - return PTR_ERR(rockchip->pm_rst); - } - - rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); - if (IS_ERR(rockchip->pclk_rst)) { - if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pclk reset property in node\n"); - return PTR_ERR(rockchip->pclk_rst); - } - - rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); - if (IS_ERR(rockchip->aclk_rst)) { - if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) - dev_err(dev, "missing aclk reset property in node\n"); - return PTR_ERR(rockchip->aclk_rst); - } - - rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); - if (IS_ERR(rockchip->ep_gpio)) { - dev_err(dev, "missing ep-gpios property in node\n"); - return PTR_ERR(rockchip->ep_gpio); - } - - rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); - if (IS_ERR(rockchip->aclk_pcie)) { - dev_err(dev, "aclk clock not found\n"); - return PTR_ERR(rockchip->aclk_pcie); - } - - rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); - if (IS_ERR(rockchip->aclk_perf_pcie)) { - dev_err(dev, "aclk_perf clock not found\n"); - return PTR_ERR(rockchip->aclk_perf_pcie); - } - - rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); - if (IS_ERR(rockchip->hclk_pcie)) { - dev_err(dev, "hclk clock not found\n"); - return PTR_ERR(rockchip->hclk_pcie); - } - - rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); - if (IS_ERR(rockchip->clk_pcie_pm)) { - dev_err(dev, "pm clock not found\n"); - return PTR_ERR(rockchip->clk_pcie_pm); - } - - err = rockchip_pcie_setup_irq(rockchip); - if (err) - return err; - - rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); - if (IS_ERR(rockchip->vpcie12v)) { - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie12v regulator found\n"); - } - - rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); - if (IS_ERR(rockchip->vpcie3v3)) { - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie3v3 regulator found\n"); - } - - rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); - if (IS_ERR(rockchip->vpcie1v8)) { - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie1v8 regulator found\n"); - } - - rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); - if (IS_ERR(rockchip->vpcie0v9)) { - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie0v9 regulator found\n"); - } - - return 0; -} - -static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err; - - if (!IS_ERR(rockchip->vpcie12v)) { - err = regulator_enable(rockchip->vpcie12v); - if (err) { - dev_err(dev, "fail to enable vpcie12v regulator\n"); - goto err_out; - } - } - - if (!IS_ERR(rockchip->vpcie3v3)) { - err = regulator_enable(rockchip->vpcie3v3); - if (err) { - dev_err(dev, "fail to enable vpcie3v3 regulator\n"); - goto err_disable_12v; - } - } - - if (!IS_ERR(rockchip->vpcie1v8)) { - err = regulator_enable(rockchip->vpcie1v8); - if (err) { - dev_err(dev, "fail to enable vpcie1v8 regulator\n"); - goto err_disable_3v3; - } - } - - if (!IS_ERR(rockchip->vpcie0v9)) { - err = regulator_enable(rockchip->vpcie0v9); - if (err) { - dev_err(dev, "fail to enable vpcie0v9 regulator\n"); - goto err_disable_1v8; - } - } - - return 0; - -err_disable_1v8: - if (!IS_ERR(rockchip->vpcie1v8)) - regulator_disable(rockchip->vpcie1v8); -err_disable_3v3: - if (!IS_ERR(rockchip->vpcie3v3)) - regulator_disable(rockchip->vpcie3v3); -err_disable_12v: - if (!IS_ERR(rockchip->vpcie12v)) - regulator_disable(rockchip->vpcie12v); -err_out: - return err; -} - -static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) -{ - rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & - (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); - rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), - PCIE_CORE_INT_MASK); - - rockchip_pcie_enable_bw_int(rockchip); -} - -static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -static const struct irq_domain_ops intx_domain_ops = { - .map = rockchip_pcie_intx_map, -}; - -static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - struct device_node *intc = of_get_next_child(dev->of_node, NULL); - - if (!intc) { - dev_err(dev, "missing child interrupt-controller node\n"); - return -EINVAL; - } - - rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &intx_domain_ops, rockchip); - if (!rockchip->irq_domain) { - dev_err(dev, "failed to get a INTx IRQ domain\n"); - return -EINVAL; - } - - return 0; -} - -static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, - int region_no, int type, u8 num_pass_bits, - u32 lower_addr, u32 upper_addr) -{ - u32 ob_addr_0; - u32 ob_addr_1; - u32 ob_desc_0; - u32 aw_offset; - - if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) - return -EINVAL; - if (num_pass_bits + 1 < 8) - return -EINVAL; - if (num_pass_bits > 63) - return -EINVAL; - if (region_no == 0) { - if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) - return -EINVAL; - } - if (region_no != 0) { - if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) - return -EINVAL; - } - - aw_offset = (region_no << OB_REG_SIZE_SHIFT); - - ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; - ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; - ob_addr_1 = upper_addr; - ob_desc_0 = (1 << 23 | type); - - rockchip_pcie_write(rockchip, ob_addr_0, - PCIE_CORE_OB_REGION_ADDR0 + aw_offset); - rockchip_pcie_write(rockchip, ob_addr_1, - PCIE_CORE_OB_REGION_ADDR1 + aw_offset); - rockchip_pcie_write(rockchip, ob_desc_0, - PCIE_CORE_OB_REGION_DESC0 + aw_offset); - rockchip_pcie_write(rockchip, 0, - PCIE_CORE_OB_REGION_DESC1 + aw_offset); - - return 0; -} - -static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, - int region_no, u8 num_pass_bits, - u32 lower_addr, u32 upper_addr) -{ - u32 ib_addr_0; - u32 ib_addr_1; - u32 aw_offset; - - if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) - return -EINVAL; - if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) - return -EINVAL; - if (num_pass_bits > 63) - return -EINVAL; - - aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); - - ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; - ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; - ib_addr_1 = upper_addr; - - rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); - rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); - - return 0; -} - -static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int offset; - int err; - int reg_no; - - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE0_CFG); - - for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { - err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, - AXI_WRAPPER_MEM_WRITE, - 20 - 1, - rockchip->mem_bus_addr + - (reg_no << 20), - 0); - if (err) { - dev_err(dev, "program RC mem outbound ATU failed\n"); - return err; - } - } - - err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); - if (err) { - dev_err(dev, "program RC mem inbound ATU failed\n"); - return err; - } - - offset = rockchip->mem_size >> 20; - for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) { - err = rockchip_pcie_prog_ob_atu(rockchip, - reg_no + 1 + offset, - AXI_WRAPPER_IO_WRITE, - 20 - 1, - rockchip->io_bus_addr + - (reg_no << 20), - 0); - if (err) { - dev_err(dev, "program RC io outbound ATU failed\n"); - return err; - } - } - - /* assign message regions */ - rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, - AXI_WRAPPER_NOR_MSG, - 20 - 1, 0, 0); - - rockchip->msg_bus_addr = rockchip->mem_bus_addr + - ((reg_no + offset) << 20); - return err; -} - -static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) +void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) { - u32 value; - int err; - - /* send PME_TURN_OFF message */ - writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); + int i; - /* read LTSSM and wait for falling into L2 link state */ - err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, - value, PCIE_LINK_IS_L2(value), 20, - jiffies_to_usecs(5 * HZ)); - if (err) { - dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); - return err; + for (i = 0; i < MAX_LANE_NUM; i++) { + /* inactive lanes are already powered off */ + if (rockchip->lanes_map & BIT(i)) + phy_power_off(rockchip->phys[i]); + phy_exit(rockchip->phys[i]); } - - return 0; } +EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys); -static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) +int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err; @@ -1432,8 +110,9 @@ err_aclk_perf_pcie: clk_disable_unprepare(rockchip->aclk_pcie); return err; } +EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks); -static void rockchip_pcie_disable_clocks(void *data) +void rockchip_pcie_disable_clocks(void *data) { struct rockchip_pcie *rockchip = data; @@ -1442,267 +121,25 @@ static void rockchip_pcie_disable_clocks(void *data) clk_disable_unprepare(rockchip->aclk_perf_pcie); clk_disable_unprepare(rockchip->aclk_pcie); } +EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks); -static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) -{ - struct rockchip_pcie *rockchip = dev_get_drvdata(dev); - int ret; - - /* disable core and cli int since we don't need to ack PME_ACK */ - rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | - PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); - rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); - - ret = rockchip_pcie_wait_l2(rockchip); - if (ret) { - rockchip_pcie_enable_interrupts(rockchip); - return ret; - } - - rockchip_pcie_deinit_phys(rockchip); - - rockchip_pcie_disable_clocks(rockchip); - - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); - - return ret; -} - -static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) -{ - struct rockchip_pcie *rockchip = dev_get_drvdata(dev); - int err; - - if (!IS_ERR(rockchip->vpcie0v9)) { - err = regulator_enable(rockchip->vpcie0v9); - if (err) { - dev_err(dev, "fail to enable vpcie0v9 regulator\n"); - return err; - } - } - - err = rockchip_pcie_enable_clocks(rockchip); - if (err) - goto err_disable_0v9; - - err = rockchip_pcie_init_port(rockchip); - if (err) - goto err_pcie_resume; - - err = rockchip_pcie_cfg_atu(rockchip); - if (err) - goto err_err_deinit_port; - - /* Need this to enter L1 again */ - rockchip_pcie_update_txcredit_mui(rockchip); - rockchip_pcie_enable_interrupts(rockchip); - - return 0; - -err_err_deinit_port: - rockchip_pcie_deinit_phys(rockchip); -err_pcie_resume: - rockchip_pcie_disable_clocks(rockchip); -err_disable_0v9: - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); - return err; -} - -static int rockchip_pcie_probe(struct platform_device *pdev) -{ - struct rockchip_pcie *rockchip; - struct device *dev = &pdev->dev; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - struct resource_entry *win; - resource_size_t io_base; - struct resource *mem; - struct resource *io; - int err; - - LIST_HEAD(res); - - if (!dev->of_node) - return -ENODEV; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); - if (!bridge) - return -ENOMEM; - - rockchip = pci_host_bridge_priv(bridge); - - platform_set_drvdata(pdev, rockchip); - rockchip->dev = dev; - - err = rockchip_pcie_parse_dt(rockchip); - if (err) - return err; - - err = rockchip_pcie_enable_clocks(rockchip); - if (err) - return err; - - err = rockchip_pcie_set_vpcie(rockchip); - if (err) { - dev_err(dev, "failed to set vpcie regulator\n"); - goto err_set_vpcie; - } - - err = rockchip_pcie_init_port(rockchip); - if (err) - goto err_vpcie; - - rockchip_pcie_enable_interrupts(rockchip); - - err = rockchip_pcie_init_irq_domain(rockchip); - if (err < 0) - goto err_deinit_port; - - err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, - &res, &io_base); - if (err) - goto err_remove_irq_domain; - - err = devm_request_pci_bus_resources(dev, &res); - if (err) - goto err_free_res; - - /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry(win, &res) { - switch (resource_type(win->res)) { - case IORESOURCE_IO: - io = win->res; - io->name = "I/O"; - rockchip->io_size = resource_size(io); - rockchip->io_bus_addr = io->start - win->offset; - err = pci_remap_iospace(io, io_base); - if (err) { - dev_warn(dev, "error %d: failed to map resource %pR\n", - err, io); - continue; - } - rockchip->io = io; - break; - case IORESOURCE_MEM: - mem = win->res; - mem->name = "MEM"; - rockchip->mem_size = resource_size(mem); - rockchip->mem_bus_addr = mem->start - win->offset; - break; - case IORESOURCE_BUS: - rockchip->root_bus_nr = win->res->start; - break; - default: - continue; - } - } - - err = rockchip_pcie_cfg_atu(rockchip); - if (err) - goto err_unmap_iospace; - - rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); - if (!rockchip->msg_region) { - err = -ENOMEM; - goto err_unmap_iospace; - } - - list_splice_init(&res, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = rockchip; - bridge->busnr = 0; - bridge->ops = &rockchip_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - err = pci_scan_root_bus_bridge(bridge); - if (err < 0) - goto err_unmap_iospace; - - bus = bridge->bus; - - rockchip->root_bus = bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return 0; - -err_unmap_iospace: - pci_unmap_iospace(rockchip->io); -err_free_res: - pci_free_resource_list(&res); -err_remove_irq_domain: - irq_domain_remove(rockchip->irq_domain); -err_deinit_port: - rockchip_pcie_deinit_phys(rockchip); -err_vpcie: - if (!IS_ERR(rockchip->vpcie12v)) - regulator_disable(rockchip->vpcie12v); - if (!IS_ERR(rockchip->vpcie3v3)) - regulator_disable(rockchip->vpcie3v3); - if (!IS_ERR(rockchip->vpcie1v8)) - regulator_disable(rockchip->vpcie1v8); - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); -err_set_vpcie: - rockchip_pcie_disable_clocks(rockchip); - return err; -} - -static int rockchip_pcie_remove(struct platform_device *pdev) +void rockchip_pcie_cfg_configuration_accesses( + struct rockchip_pcie *rockchip, u32 type) { - struct device *dev = &pdev->dev; - struct rockchip_pcie *rockchip = dev_get_drvdata(dev); - - pci_stop_root_bus(rockchip->root_bus); - pci_remove_root_bus(rockchip->root_bus); - pci_unmap_iospace(rockchip->io); - irq_domain_remove(rockchip->irq_domain); - - rockchip_pcie_deinit_phys(rockchip); - - rockchip_pcie_disable_clocks(rockchip); + u32 ob_desc_0; - if (!IS_ERR(rockchip->vpcie12v)) - regulator_disable(rockchip->vpcie12v); - if (!IS_ERR(rockchip->vpcie3v3)) - regulator_disable(rockchip->vpcie3v3); - if (!IS_ERR(rockchip->vpcie1v8)) - regulator_disable(rockchip->vpcie1v8); - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); + /* Configuration Accesses for region 0 */ + rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); - return 0; + rockchip_pcie_write(rockchip, + (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), + PCIE_CORE_OB_REGION_ADDR0); + rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, + PCIE_CORE_OB_REGION_ADDR1); + ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); + ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); + ob_desc_0 |= (type | (0x1 << 23)); + rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); + rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); } - -static const struct dev_pm_ops rockchip_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, - rockchip_pcie_resume_noirq) -}; - -static const struct of_device_id rockchip_pcie_of_match[] = { - { .compatible = "rockchip,rk3399-pcie", }, - {} -}; -MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); - -static struct platform_driver rockchip_pcie_driver = { - .driver = { - .name = "rockchip-pcie", - .of_match_table = rockchip_pcie_of_match, - .pm = &rockchip_pcie_pm_ops, - }, - .probe = rockchip_pcie_probe, - .remove = rockchip_pcie_remove, -}; -module_platform_driver(rockchip_pcie_driver); - -MODULE_AUTHOR("Rockchip Inc"); -MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); -MODULE_LICENSE("GPL v2"); +EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses); diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h new file mode 100644 index 000000000000..d27941e66e12 --- /dev/null +++ b/drivers/pci/host/pcie-rockchip.h @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe controller driver + * + * Copyright (c) 2018 Rockchip, Inc. + * + * Author: Shawn Lin + * + */ + +#ifndef _PCIE_ROCKCHIP_H +#define _PCIE_ROCKCHIP_H + +#include +#include + +/* + * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 + * bits. This allows atomic updates of the register without locking. + */ +#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) +#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) + +#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) +#define MAX_LANE_NUM 4 + +#define PCIE_CLIENT_BASE 0x0 +#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) +#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) +#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) +#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) +#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) +#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) +#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) +#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) +#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c) +#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) +#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 +#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19 +#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) +#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 +#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 +#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c) +#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50) +#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5) +#define PCIE_CLIENT_INTR_SHIFT 5 +#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15) +#define PCIE_CLIENT_INT_MSG BIT(14) +#define PCIE_CLIENT_INT_HOT_RST BIT(13) +#define PCIE_CLIENT_INT_DPA BIT(12) +#define PCIE_CLIENT_INT_FATAL_ERR BIT(11) +#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10) +#define PCIE_CLIENT_INT_CORR_ERR BIT(9) +#define PCIE_CLIENT_INT_INTD BIT(8) +#define PCIE_CLIENT_INT_INTC BIT(7) +#define PCIE_CLIENT_INT_INTB BIT(6) +#define PCIE_CLIENT_INT_INTA BIT(5) +#define PCIE_CLIENT_INT_LOCAL BIT(4) +#define PCIE_CLIENT_INT_UDMA BIT(3) +#define PCIE_CLIENT_INT_PHY BIT(2) +#define PCIE_CLIENT_INT_HOT_PLUG BIT(1) +#define PCIE_CLIENT_INT_PWR_STCG BIT(0) + +#define PCIE_CLIENT_INT_LEGACY \ + (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \ + PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD) + +#define PCIE_CLIENT_INT_CLI \ + (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \ + PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \ + PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \ + PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \ + PCIE_CLIENT_INT_PHY) + +#define PCIE_CORE_CTRL_MGMT_BASE 0x900000 +#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) +#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 +#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 +#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 +#define PCIE_CORE_PL_CONF_LANE_SHIFT 1 +#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004) +#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8) +#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8 +#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff +#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020) +#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000 +#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 +#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ + (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) +#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200) +#define PCIE_CORE_LANE_MAP_MASK 0x0000000f +#define PCIE_CORE_LANE_MAP_REVERSE BIT(16) +#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) +#define PCIE_CORE_INT_PRFPE BIT(0) +#define PCIE_CORE_INT_CRFPE BIT(1) +#define PCIE_CORE_INT_RRPE BIT(2) +#define PCIE_CORE_INT_PRFO BIT(3) +#define PCIE_CORE_INT_CRFO BIT(4) +#define PCIE_CORE_INT_RT BIT(5) +#define PCIE_CORE_INT_RTR BIT(6) +#define PCIE_CORE_INT_PE BIT(7) +#define PCIE_CORE_INT_MTR BIT(8) +#define PCIE_CORE_INT_UCR BIT(9) +#define PCIE_CORE_INT_FCE BIT(10) +#define PCIE_CORE_INT_CT BIT(11) +#define PCIE_CORE_INT_UTC BIT(18) +#define PCIE_CORE_INT_MMVC BIT(19) +#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44) +#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210) +#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300) + +#define PCIE_CORE_INT \ + (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \ + PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \ + PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \ + PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \ + PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \ + PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ + PCIE_CORE_INT_MMVC) + +#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 +#define PCIE_RC_CONFIG_BASE 0xa00000 +#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) +#define PCIE_RC_CONFIG_SCC_SHIFT 16 +#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) +#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 +#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff +#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 +#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) +#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) +#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) +#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) +#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) +#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) +#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) +#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) +#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) + +#define PCIE_CORE_AXI_CONF_BASE 0xc00000 +#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) +#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f +#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) +#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) +#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) + +#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 +#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) +#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f +#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) + +/* Size of one AXI Region (not Region 0) */ +#define AXI_REGION_SIZE BIT(20) +/* Size of Region 0, equal to sum of sizes of other regions */ +#define AXI_REGION_0_SIZE (32 * (0x1 << 20)) +#define OB_REG_SIZE_SHIFT 5 +#define IB_ROOT_PORT_REG_SIZE_SHIFT 3 +#define AXI_WRAPPER_IO_WRITE 0x6 +#define AXI_WRAPPER_MEM_WRITE 0x2 +#define AXI_WRAPPER_TYPE0_CFG 0xa +#define AXI_WRAPPER_TYPE1_CFG 0xb +#define AXI_WRAPPER_NOR_MSG 0xc + +#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 +#define MIN_AXI_ADDR_BITS_PASSED 8 +#define PCIE_RC_SEND_PME_OFF 0x11960 +#define ROCKCHIP_VENDOR_ID 0x1d87 +#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20) +#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15) +#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12) +#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0) +#define PCIE_ECAM_ADDR(bus, dev, func, reg) \ + (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \ + PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg)) +#define PCIE_LINK_IS_L2(x) \ + (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) +#define PCIE_LINK_UP(x) \ + (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) +#define PCIE_LINK_IS_GEN2(x) \ + (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G) + +#define RC_REGION_0_ADDR_TRANS_H 0x00000000 +#define RC_REGION_0_ADDR_TRANS_L 0x00000000 +#define RC_REGION_0_PASS_BITS (25 - 1) +#define RC_REGION_0_TYPE_MASK GENMASK(3, 0) +#define MAX_AXI_WRAPPER_REGION_NUM 33 + +struct rockchip_pcie { + void __iomem *reg_base; /* DT axi-base */ + void __iomem *apb_base; /* DT apb-base */ + bool legacy_phy; + struct phy *phys[MAX_LANE_NUM]; + struct reset_control *core_rst; + struct reset_control *mgmt_rst; + struct reset_control *mgmt_sticky_rst; + struct reset_control *pipe_rst; + struct reset_control *pm_rst; + struct reset_control *aclk_rst; + struct reset_control *pclk_rst; + struct clk *aclk_pcie; + struct clk *aclk_perf_pcie; + struct clk *hclk_pcie; + struct clk *clk_pcie_pm; + struct regulator *vpcie12v; /* 12V power supply */ + struct regulator *vpcie3v3; /* 3.3V power supply */ + struct regulator *vpcie1v8; /* 1.8V power supply */ + struct regulator *vpcie0v9; /* 0.9V power supply */ + struct gpio_desc *ep_gpio; + u32 lanes; + u8 lanes_map; + u8 root_bus_nr; + int link_gen; + struct device *dev; + struct irq_domain *irq_domain; + int offset; + struct pci_bus *root_bus; + struct resource *io; + phys_addr_t io_bus_addr; + u32 io_size; + void __iomem *msg_region; + u32 mem_size; + phys_addr_t msg_bus_addr; + phys_addr_t mem_bus_addr; +}; + +static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) +{ + return readl(rockchip->apb_base + reg); +} + +static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val, + u32 reg) +{ + writel(val, rockchip->apb_base + reg); +} + +int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip); +void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip); +int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip); +void rockchip_pcie_disable_clocks(void *data); +void rockchip_pcie_cfg_configuration_accesses( + struct rockchip_pcie *rockchip, u32 type); + +#endif /* _PCIE_ROCKCHIP_H */ -- cgit v1.2.3 From 7c3b479eece64caefe1d9230a466c2f1ed587308 Mon Sep 17 00:00:00 2001 From: Minghuan Lian Date: Thu, 29 Mar 2018 09:53:01 +0800 Subject: MAINTAINERS: Update Layerscape PCIe driver maintainers list Change Layerscape PCIe driver maintainers' email address from freescale to nxp. Signed-off-by: Minghuan Lian Signed-off-by: Lorenzo Pieralisi Acked-by: Roy Zang --- MAINTAINERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..c789798666bc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10711,9 +10711,9 @@ F: Documentation/devicetree/bindings/pci/cdns,*.txt F: drivers/pci/cadence/pcie-cadence* PCI DRIVER FOR FREESCALE LAYERSCAPE -M: Minghuan Lian -M: Mingkai Hu -M: Roy Zang +M: Minghuan Lian +M: Mingkai Hu +M: Roy Zang L: linuxppc-dev@lists.ozlabs.org L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org -- cgit v1.2.3 From fce45d114270afb51771d569b50721985ca763bb Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 26 Apr 2018 12:21:37 +0530 Subject: soundwire: Update email address for Vinod Update the email address for SoundWire maintainer Signed-off-by: Vinod Koul --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..eaee45919cb9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13095,7 +13095,7 @@ F: include/uapi/sound/ F: sound/ SOUND - COMPRESSED AUDIO -M: Vinod Koul +M: Vinod Koul L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Supported -- cgit v1.2.3 From 0eca353e7ae75a56d5a10cd1c0ff1f06d89e60e3 Mon Sep 17 00:00:00 2001 From: "Bryant G. Ly" Date: Wed, 25 Apr 2018 16:32:57 -0500 Subject: misc: IBM Virtual Management Channel Driver (VMC) This driver is a logical device which provides an interface between the hypervisor and a management partition. This interface is like a message passing interface. This management partition is intended to provide an alternative to HMC-based system management. VMC enables the Management LPAR to provide basic logical partition functions: - Logical Partition Configuration - Boot, start, and stop actions for individual partitions - Display of partition status - Management of virtual Ethernet - Management of virtual Storage - Basic system management This driver is to be used for the POWER Virtual Management Channel Virtual Adapter on the PowerPC platform. It provides a character device which allows for both request/response and async message support through the /dev/ibmvmc node. Signed-off-by: Bryant G. Ly Reviewed-by: Steven Royer Reviewed-by: Adam Reznechek Reviewed-by: Randy Dunlap Tested-by: Taylor Jakobson Tested-by: Brad Warrum Cc: Greg Kroah-Hartman Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- Documentation/ioctl/ioctl-number.txt | 1 + Documentation/misc-devices/ibmvmc.rst | 226 +++ MAINTAINERS | 6 + arch/powerpc/include/asm/hvcall.h | 1 + drivers/misc/Kconfig | 14 + drivers/misc/Makefile | 1 + drivers/misc/ibmvmc.c | 2418 +++++++++++++++++++++++++++++++++ drivers/misc/ibmvmc.h | 209 +++ 8 files changed, 2876 insertions(+) create mode 100644 Documentation/misc-devices/ibmvmc.rst create mode 100644 drivers/misc/ibmvmc.c create mode 100644 drivers/misc/ibmvmc.h (limited to 'MAINTAINERS') diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 7f7413e597f3..be8a98b9e6c4 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -327,6 +327,7 @@ Code Seq#(hex) Include File Comments 0xCA 80-BF uapi/scsi/cxlflash_ioctl.h 0xCB 00-1F CBM serial IEC bus in development: +0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver 0xCD 01 linux/reiserfs_fs.h 0xCF 02 fs/cifs/ioctl.c 0xDB 00-0F drivers/char/mwave/mwavepub.h diff --git a/Documentation/misc-devices/ibmvmc.rst b/Documentation/misc-devices/ibmvmc.rst new file mode 100644 index 000000000000..46ded79554d4 --- /dev/null +++ b/Documentation/misc-devices/ibmvmc.rst @@ -0,0 +1,226 @@ +.. SPDX-License-Identifier: GPL-2.0+ +====================================================== +IBM Virtual Management Channel Kernel Driver (IBMVMC) +====================================================== + +:Authors: + Dave Engebretsen , + Adam Reznechek , + Steven Royer , + Bryant G. Ly , + +Introduction +============ + +Note: Knowledge of virtualization technology is required to understand +this document. + +A good reference document would be: + +https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf + +The Virtual Management Channel (VMC) is a logical device which provides an +interface between the hypervisor and a management partition. This interface +is like a message passing interface. This management partition is intended +to provide an alternative to systems that use a Hardware Management +Console (HMC) - based system management. + +The primary hardware management solution that is developed by IBM relies +on an appliance server named the Hardware Management Console (HMC), +packaged as an external tower or rack-mounted personal computer. In a +Power Systems environment, a single HMC can manage multiple POWER +processor-based systems. + +Management Application +---------------------- + +In the management partition, a management application exists which enables +a system administrator to configure the system’s partitioning +characteristics via a command line interface (CLI) or Representational +State Transfer Application (REST API's). + +The management application runs on a Linux logical partition on a +POWER8 or newer processor-based server that is virtualized by PowerVM. +System configuration, maintenance, and control functions which +traditionally require an HMC can be implemented in the management +application using a combination of HMC to hypervisor interfaces and +existing operating system methods. This tool provides a subset of the +functions implemented by the HMC and enables basic partition configuration. +The set of HMC to hypervisor messages supported by the management +application component are passed to the hypervisor over a VMC interface, +which is defined below. + +The VMC enables the management partition to provide basic partitioning +functions: + +- Logical Partitioning Configuration +- Start, and stop actions for individual partitions +- Display of partition status +- Management of virtual Ethernet +- Management of virtual Storage +- Basic system management + +Virtual Management Channel (VMC) +-------------------------------- + +A logical device, called the Virtual Management Channel (VMC), is defined +for communicating between the management application and the hypervisor. It +basically creates the pipes that enable virtualization management +software. This device is presented to a designated management partition as +a virtual device. + +This communication device uses Command/Response Queue (CRQ) and the +Remote Direct Memory Access (RDMA) interfaces. A three-way handshake is +defined that must take place to establish that both the hypervisor and +management partition sides of the channel are running prior to +sending/receiving any of the protocol messages. + +This driver also utilizes Transport Event CRQs. CRQ messages are sent +when the hypervisor detects one of the peer partitions has abnormally +terminated, or one side has called H_FREE_CRQ to close their CRQ. +Two new classes of CRQ messages are introduced for the VMC device. VMC +Administrative messages are used for each partition using the VMC to +communicate capabilities to their partner. HMC Interface messages are used +for the actual flow of HMC messages between the management partition and +the hypervisor. As most HMC messages far exceed the size of a CRQ buffer, +a virtual DMA (RMDA) of the HMC message data is done prior to each HMC +Interface CRQ message. Only the management partition drives RDMA +operations; hypervisors never directly cause the movement of message data. + + +Terminology +----------- +RDMA + Remote Direct Memory Access is DMA transfer from the server to its + client or from the server to its partner partition. DMA refers + to both physical I/O to and from memory operations and to memory + to memory move operations. +CRQ + Command/Response Queue a facility which is used to communicate + between partner partitions. Transport events which are signaled + from the hypervisor to partition are also reported in this queue. + +Example Management Partition VMC Driver Interface +================================================= + +This section provides an example for the management application +implementation where a device driver is used to interface to the VMC +device. This driver consists of a new device, for example /dev/ibmvmc, +which provides interfaces to open, close, read, write, and perform +ioctl’s against the VMC device. + +VMC Interface Initialization +---------------------------- + +The device driver is responsible for initializing the VMC when the driver +is loaded. It first creates and initializes the CRQ. Next, an exchange of +VMC capabilities is performed to indicate the code version and number of +resources available in both the management partition and the hypervisor. +Finally, the hypervisor requests that the management partition create an +initial pool of VMC buffers, one buffer for each possible HMC connection, +which will be used for management application session initialization. +Prior to completion of this initialization sequence, the device returns +EBUSY to open() calls. EIO is returned for all open() failures. + +:: + + Management Partition Hypervisor + CRQ INIT + ----------------------------------------> + CRQ INIT COMPLETE + <---------------------------------------- + CAPABILITIES + ----------------------------------------> + CAPABILITIES RESPONSE + <---------------------------------------- + ADD BUFFER (HMC IDX=0,1,..) _ + <---------------------------------------- | + ADD BUFFER RESPONSE | - Perform # HMCs Iterations + ----------------------------------------> - + +VMC Interface Open +------------------ + +After the basic VMC channel has been initialized, an HMC session level +connection can be established. The application layer performs an open() to +the VMC device and executes an ioctl() against it, indicating the HMC ID +(32 bytes of data) for this session. If the VMC device is in an invalid +state, EIO will be returned for the ioctl(). The device driver creates a +new HMC session value (ranging from 1 to 255) and HMC index value (starting +at index 0 and ranging to 254) for this HMC ID. The driver then does an +RDMA of the HMC ID to the hypervisor, and then sends an Interface Open +message to the hypervisor to establish the session over the VMC. After the +hypervisor receives this information, it sends Add Buffer messages to the +management partition to seed an initial pool of buffers for the new HMC +connection. Finally, the hypervisor sends an Interface Open Response +message, to indicate that it is ready for normal runtime messaging. The +following illustrates this VMC flow: + +:: + + Management Partition Hypervisor + RDMA HMC ID + ----------------------------------------> + Interface Open + ----------------------------------------> + Add Buffer _ + <---------------------------------------- | + Add Buffer Response | - Perform N Iterations + ----------------------------------------> - + Interface Open Response + <---------------------------------------- + +VMC Interface Runtime +--------------------- + +During normal runtime, the management application and the hypervisor +exchange HMC messages via the Signal VMC message and RDMA operations. When +sending data to the hypervisor, the management application performs a +write() to the VMC device, and the driver RDMA’s the data to the hypervisor +and then sends a Signal Message. If a write() is attempted before VMC +device buffers have been made available by the hypervisor, or no buffers +are currently available, EBUSY is returned in response to the write(). A +write() will return EIO for all other errors, such as an invalid device +state. When the hypervisor sends a message to the management, the data is +put into a VMC buffer and an Signal Message is sent to the VMC driver in +the management partition. The driver RDMA’s the buffer into the partition +and passes the data up to the appropriate management application via a +read() to the VMC device. The read() request blocks if there is no buffer +available to read. The management application may use select() to wait for +the VMC device to become ready with data to read. + +:: + + Management Partition Hypervisor + MSG RDMA + ----------------------------------------> + SIGNAL MSG + ----------------------------------------> + SIGNAL MSG + <---------------------------------------- + MSG RDMA + <---------------------------------------- + +VMC Interface Close +------------------- + +HMC session level connections are closed by the management partition when +the application layer performs a close() against the device. This action +results in an Interface Close message flowing to the hypervisor, which +causes the session to be terminated. The device driver must free any +storage allocated for buffers for this HMC connection. + +:: + + Management Partition Hypervisor + INTERFACE CLOSE + ----------------------------------------> + INTERFACE CLOSE RESPONSE + <---------------------------------------- + +Additional Information +====================== + +For more information on the documentation for CRQ Messages, VMC Messages, +HMC interface Buffers, and signal messages please refer to the Linux on +Power Architecture Platform Reference. Section F. diff --git a/MAINTAINERS b/MAINTAINERS index 20032feaa9e7..4f5d8932a6c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6757,6 +6757,12 @@ L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/ibmvscsi/ibmvfc* +IBM Power Virtual Management Channel Driver +M: Bryant G. Ly +M: Steven Royer +S: Supported +F: drivers/misc/ibmvmc.* + IBM Power Virtual SCSI Device Drivers M: Tyrel Datwyler L: linux-scsi@vger.kernel.org diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 2e2dddab5d65..662c8347d699 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -279,6 +279,7 @@ #define H_GET_MPP_X 0x314 #define H_SET_MODE 0x31C #define H_CLEAR_HPT 0x358 +#define H_REQUEST_VMC 0x360 #define H_RESIZE_HPT_PREPARE 0x36C #define H_RESIZE_HPT_COMMIT 0x370 #define H_REGISTER_PROC_TBL 0x37C diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 5d713008749b..3726eacdf65d 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -113,6 +113,20 @@ config IBM_ASM for information on the specific driver level and support statement for your IBM server. +config IBMVMC + tristate "IBM Virtual Management Channel support" + depends on PPC_PSERIES + help + This is the IBM POWER Virtual Management Channel + + This driver is to be used for the POWER Virtual + Management Channel virtual adapter on the PowerVM + platform. It provides both request/response and + async message support through the /dev/ibmvmc node. + + To compile this driver as a module, choose M here: the + module will be called ibmvmc. + config PHANTOM tristate "Sensable PHANToM (PCI)" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 20be70c3f118..af22bbc3d00c 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_IBM_ASM) += ibmasm/ +obj-$(CONFIG_IBMVMC) += ibmvmc.o obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c new file mode 100644 index 000000000000..fb83d1375638 --- /dev/null +++ b/drivers/misc/ibmvmc.c @@ -0,0 +1,2418 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IBM Power Systems Virtual Management Channel Support. + * + * Copyright (c) 2004, 2018 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Steven Royer seroyer@linux.vnet.ibm.com + * Adam Reznechek adreznec@linux.vnet.ibm.com + * Bryant G. Ly + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ibmvmc.h" + +#define IBMVMC_DRIVER_VERSION "1.0" + +/* + * Static global variables + */ +static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait); + +static const char ibmvmc_driver_name[] = "ibmvmc"; + +static struct ibmvmc_struct ibmvmc; +static struct ibmvmc_hmc hmcs[MAX_HMCS]; +static struct crq_server_adapter ibmvmc_adapter; + +static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE; +static int ibmvmc_max_hmcs = DEFAULT_HMCS; +static int ibmvmc_max_mtu = DEFAULT_MTU; + +static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba, + u64 dliobn, u64 dlioba) +{ + long rc = 0; + + /* Ensure all writes to source memory are visible before hcall */ + dma_wmb(); + pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + length, sliobn, slioba, dliobn, dlioba); + rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba, + dliobn, dlioba); + pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc); + + return rc; +} + +static inline void h_free_crq(uint32_t unit_address) +{ + long rc = 0; + + do { + if (H_IS_LONG_BUSY(rc)) + msleep(get_longbusy_msecs(rc)); + + rc = plpar_hcall_norets(H_FREE_CRQ, unit_address); + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); +} + +/** + * h_request_vmc: - request a hypervisor virtual management channel device + * @vmc_index: drc index of the vmc device created + * + * Requests the hypervisor create a new virtual management channel device, + * allowing this partition to send hypervisor virtualization control + * commands. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static inline long h_request_vmc(u32 *vmc_index) +{ + long rc = 0; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + do { + if (H_IS_LONG_BUSY(rc)) + msleep(get_longbusy_msecs(rc)); + + /* Call to request the VMC device from phyp */ + rc = plpar_hcall(H_REQUEST_VMC, retbuf); + pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc); + *vmc_index = retbuf[0]; + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + + return rc; +} + +/* routines for managing a command/response queue */ +/** + * ibmvmc_handle_event: - Interrupt handler for crq events + * @irq: number of irq to handle, not used + * @dev_instance: crq_server_adapter that received interrupt + * + * Disables interrupts and schedules ibmvmc_task + * + * Always returns IRQ_HANDLED + */ +static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance) +{ + struct crq_server_adapter *adapter = + (struct crq_server_adapter *)dev_instance; + + vio_disable_interrupts(to_vio_dev(adapter->dev)); + tasklet_schedule(&adapter->work_task); + + return IRQ_HANDLED; +} + +/** + * ibmvmc_release_crq_queue - Release CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + + free_irq(vdev->irq, (void *)adapter); + tasklet_kill(&adapter->work_task); + + if (adapter->reset_task) + kthread_stop(adapter->reset_task); + + h_free_crq(vdev->unit_address); + dma_unmap_single(adapter->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); + free_page((unsigned long)queue->msgs); +} + +/** + * ibmvmc_reset_crq_queue - Reset CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * This function calls h_free_crq and then calls H_REG_CRQ and does all the + * bookkeeping to get us back to where we can communicate. + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + int rc = 0; + + /* Close the CRQ */ + h_free_crq(vdev->unit_address); + + /* Clean out the queue */ + memset(queue->msgs, 0x00, PAGE_SIZE); + queue->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + if (rc == 2) + /* Adapter is good, but other end is not ready */ + dev_warn(adapter->dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc); + + return rc; +} + +/** + * crq_queue_next_crq: - Returns the next entry in message queue + * @queue: crq_queue to use + * + * Returns pointer to next entry in queue, or NULL if there are no new + * entried in the CRQ. + */ +static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue) +{ + struct ibmvmc_crq_msg *crq; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + crq = &queue->msgs[queue->cur]; + if (crq->valid & 0x80) { + if (++queue->cur == queue->size) + queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + dma_rmb(); + } else { + crq = NULL; + } + + spin_unlock_irqrestore(&queue->lock, flags); + + return crq; +} + +/** + * ibmvmc_send_crq - Send CRQ + * + * @adapter: crq_server_adapter struct + * @word1: Word1 Data field + * @word2: Word2 Data field + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static long ibmvmc_send_crq(struct crq_server_adapter *adapter, + u64 word1, u64 word2) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + long rc = 0; + + dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n", + vdev->unit_address, word1, word2); + + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the other side to prevent it from fetching any stale data. + */ + dma_wmb(); + rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); + dev_dbg(adapter->dev, "rc = 0x%lx\n", rc); + + return rc; +} + +/** + * alloc_dma_buffer - Create DMA Buffer + * + * @vdev: vio_dev struct + * @size: Size field + * @dma_handle: DMA address field + * + * Allocates memory for the command queue and maps remote memory into an + * ioba. + * + * Returns a pointer to the buffer + */ +static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, + dma_addr_t *dma_handle) +{ + /* allocate memory */ + void *buffer = kzalloc(size, GFP_KERNEL); + + if (!buffer) { + *dma_handle = 0; + return NULL; + } + + /* DMA map */ + *dma_handle = dma_map_single(&vdev->dev, buffer, size, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&vdev->dev, *dma_handle)) { + *dma_handle = 0; + kzfree(buffer); + return NULL; + } + + return buffer; +} + +/** + * free_dma_buffer - Free DMA Buffer + * + * @vdev: vio_dev struct + * @size: Size field + * @vaddr: Address field + * @dma_handle: DMA address field + * + * Releases memory for a command queue and unmaps mapped remote memory. + */ +static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + /* DMA unmap */ + dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL); + + /* deallocate memory */ + kzfree(vaddr); +} + +/** + * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer + * + * @hmc_index: HMC Index Field + * + * Return: + * Pointer to ibmvmc_buffer + */ +static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_buffer *ret_buf = NULL; + unsigned long i; + + if (hmc_index > ibmvmc.max_hmc_index) + return NULL; + + buffer = hmcs[hmc_index].buffer; + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid && buffer[i].free && + buffer[i].owner == VMC_BUF_OWNER_ALPHA) { + buffer[i].free = 0; + ret_buf = &buffer[i]; + break; + } + } + + return ret_buf; +} + +/** + * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer + * + * @adapter: crq_server_adapter struct + * @hmc_index: Hmc Index field + * + * Return: + * Pointer to ibmvmc_buffer + */ +static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter, + u8 hmc_index) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_buffer *ret_buf = NULL; + unsigned long i; + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n", + hmc_index); + return NULL; + } + + buffer = hmcs[hmc_index].buffer; + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].free && + buffer[i].owner == VMC_BUF_OWNER_ALPHA) { + buffer[i].free = 0; + ret_buf = &buffer[i]; + break; + } + } + + return ret_buf; +} + +/** + * ibmvmc_free_hmc_buffer - Free an HMC Buffer + * + * @hmc: ibmvmc_hmc struct + * @buffer: ibmvmc_buffer struct + * + */ +static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc, + struct ibmvmc_buffer *buffer) +{ + unsigned long flags; + + spin_lock_irqsave(&hmc->lock, flags); + buffer->free = 1; + spin_unlock_irqrestore(&hmc->lock, flags); +} + +/** + * ibmvmc_count_hmc_buffers - Count HMC Buffers + * + * @hmc_index: HMC Index field + * @valid: Valid number of buffers field + * @free: Free number of buffers field + * + */ +static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid, + unsigned int *free) +{ + struct ibmvmc_buffer *buffer; + unsigned long i; + unsigned long flags; + + if (hmc_index > ibmvmc.max_hmc_index) + return; + + if (!valid || !free) + return; + + *valid = 0; *free = 0; + + buffer = hmcs[hmc_index].buffer; + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid) { + *valid = *valid + 1; + if (buffer[i].free) + *free = *free + 1; + } + } + + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); +} + +/** + * ibmvmc_get_free_hmc - Get Free HMC + * + * Return: + * Pointer to an available HMC Connection + * Null otherwise + */ +static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void) +{ + unsigned long i; + unsigned long flags; + + /* + * Find an available HMC connection. + */ + for (i = 0; i <= ibmvmc.max_hmc_index; i++) { + spin_lock_irqsave(&hmcs[i].lock, flags); + if (hmcs[i].state == ibmhmc_state_free) { + hmcs[i].index = i; + hmcs[i].state = ibmhmc_state_initial; + spin_unlock_irqrestore(&hmcs[i].lock, flags); + return &hmcs[i]; + } + spin_unlock_irqrestore(&hmcs[i].lock, flags); + } + + return NULL; +} + +/** + * ibmvmc_return_hmc - Return an HMC Connection + * + * @hmc: ibmvmc_hmc struct + * @release_readers: Number of readers connected to session + * + * This function releases the HMC connections back into the pool. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers) +{ + struct ibmvmc_buffer *buffer; + struct crq_server_adapter *adapter; + struct vio_dev *vdev; + unsigned long i; + unsigned long flags; + + if (!hmc || !hmc->adapter) + return -EIO; + + if (release_readers) { + if (hmc->file_session) { + struct ibmvmc_file_session *session = hmc->file_session; + + session->valid = 0; + wake_up_interruptible(&ibmvmc_read_wait); + } + } + + adapter = hmc->adapter; + vdev = to_vio_dev(adapter->dev); + + spin_lock_irqsave(&hmc->lock, flags); + hmc->index = 0; + hmc->state = ibmhmc_state_free; + hmc->queue_head = 0; + hmc->queue_tail = 0; + buffer = hmc->buffer; + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid) { + free_dma_buffer(vdev, + ibmvmc.max_mtu, + buffer[i].real_addr_local, + buffer[i].dma_addr_local); + dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i); + } + memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer)); + + hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID; + } + + spin_unlock_irqrestore(&hmc->lock, flags); + + return 0; +} + +/** + * ibmvmc_send_open - Interface Open + * @buffer: Pointer to ibmvmc_buffer struct + * @hmc: Pointer to ibmvmc_hmc struct + * + * This command is sent by the management partition as the result of a + * management partition device request. It causes the hypervisor to + * prepare a set of data buffers for the management application connection + * indicated HMC idx. A unique HMC Idx would be used if multiple management + * applications running concurrently were desired. Before responding to this + * command, the hypervisor must provide the management partition with at + * least one of these new buffers via the Add Buffer. This indicates whether + * the messages are inbound or outbound from the hypervisor. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_open(struct ibmvmc_buffer *buffer, + struct ibmvmc_hmc *hmc) +{ + struct ibmvmc_crq_msg crq_msg; + struct crq_server_adapter *adapter; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + if (!hmc || !hmc->adapter) + return -EIO; + + adapter = hmc->adapter; + + dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", + (unsigned long)buffer->size, (unsigned long)adapter->liobn, + (unsigned long)buffer->dma_addr_local, + (unsigned long)adapter->riobn, + (unsigned long)buffer->dma_addr_remote); + + rc = h_copy_rdma(buffer->size, + adapter->liobn, + buffer->dma_addr_local, + adapter->riobn, + buffer->dma_addr_remote); + if (rc) { + dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n", + rc); + return -EIO; + } + + hmc->state = ibmhmc_state_opening; + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_OPEN; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer->id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_send_close - Interface Close + * @hmc: Pointer to ibmvmc_hmc struct + * + * This command is sent by the management partition to terminate a + * management application to hypervisor connection. When this command is + * sent, the management partition has quiesced all I/O operations to all + * buffers associated with this management application connection, and + * has freed any storage for these buffers. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_close(struct ibmvmc_hmc *hmc) +{ + struct ibmvmc_crq_msg crq_msg; + struct crq_server_adapter *adapter; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + if (!hmc || !hmc->adapter) + return -EIO; + + adapter = hmc->adapter; + + dev_info(adapter->dev, "CRQ send: close\n"); + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_CLOSE; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.rsvd = 0; + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_send_capabilities - Send VMC Capabilities + * + * @adapter: crq_server_adapter struct + * + * The capabilities message is an administrative message sent after the CRQ + * initialization sequence of messages and is used to exchange VMC capabilities + * between the management partition and the hypervisor. The management + * partition must send this message and the hypervisor must respond with VMC + * capabilities Response message before HMC interface message can begin. Any + * HMC interface messages received before the exchange of capabilities has + * complete are dropped. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter) +{ + struct ibmvmc_admin_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_CAP; + crq_msg.status = 0; + crq_msg.rsvd[0] = 0; + crq_msg.rsvd[1] = 0; + crq_msg.max_hmc = ibmvmc_max_hmcs; + crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu); + crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size); + crq_msg.crq_size = cpu_to_be16(adapter->queue.size); + crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION); + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + ibmvmc.state = ibmvmc_state_capabilities; + + return 0; +} + +/** + * ibmvmc_send_add_buffer_resp - Add Buffer Response + * + * @adapter: crq_server_adapter struct + * @status: Status field + * @hmc_session: HMC Session field + * @hmc_index: HMC Index field + * @buffer_id: Buffer Id field + * + * This command is sent by the management partition to the hypervisor in + * response to the Add Buffer message. The Status field indicates the result of + * the command. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter, + u8 status, u8 hmc_session, + u8 hmc_index, u16 buffer_id) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_ADD_BUF_RESP; + crq_msg.status = status; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc_session; + crq_msg.hmc_index = hmc_index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer_id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return 0; +} + +/** + * ibmvmc_send_rem_buffer_resp - Remove Buffer Response + * + * @adapter: crq_server_adapter struct + * @status: Status field + * @hmc_session: HMC Session field + * @hmc_index: HMC Index field + * @buffer_id: Buffer Id field + * + * This command is sent by the management partition to the hypervisor in + * response to the Remove Buffer message. The Buffer ID field indicates + * which buffer the management partition selected to remove. The Status + * field indicates the result of the command. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter, + u8 status, u8 hmc_session, + u8 hmc_index, u16 buffer_id) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_REM_BUF_RESP; + crq_msg.status = status; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc_session; + crq_msg.hmc_index = hmc_index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer_id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return 0; +} + +/** + * ibmvmc_send_msg - Signal Message + * + * @adapter: crq_server_adapter struct + * @buffer: ibmvmc_buffer struct + * @hmc: ibmvmc_hmc struct + * @msg_length: message length field + * + * This command is sent between the management partition and the hypervisor + * in order to signal the arrival of an HMC protocol message. The command + * can be sent by both the management partition and the hypervisor. It is + * used for all traffic between the management application and the hypervisor, + * regardless of who initiated the communication. + * + * There is no response to this message. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_msg(struct crq_server_adapter *adapter, + struct ibmvmc_buffer *buffer, + struct ibmvmc_hmc *hmc, int msg_len) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + dev_dbg(adapter->dev, "CRQ send: rdma to HV\n"); + rc = h_copy_rdma(msg_len, + adapter->liobn, + buffer->dma_addr_local, + adapter->riobn, + buffer->dma_addr_remote); + if (rc) { + dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n", + rc); + return rc; + } + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_SIGNAL; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer->id); + crq_msg.var3.msg_len = cpu_to_be32(msg_len); + dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n", + be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1])); + + buffer->owner = VMC_BUF_OWNER_HV; + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_open - Open Session + * + * @inode: inode struct + * @file: file struct + * + * Return: + * 0 - Success + */ +static int ibmvmc_open(struct inode *inode, struct file *file) +{ + struct ibmvmc_file_session *session; + int rc = 0; + + pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)inode, (unsigned long)file, + ibmvmc.state); + + session = kzalloc(sizeof(*session), GFP_KERNEL); + session->file = file; + file->private_data = session; + + return rc; +} + +/** + * ibmvmc_close - Close Session + * + * @inode: inode struct + * @file: file struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_close(struct inode *inode, struct file *file) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + int rc = 0; + unsigned long flags; + + pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)file, ibmvmc.state); + + session = file->private_data; + if (!session) + return -EIO; + + hmc = session->hmc; + if (hmc) { + if (!hmc->adapter) + return -EIO; + + if (ibmvmc.state == ibmvmc_state_failed) { + dev_warn(hmc->adapter->dev, "close: state_failed\n"); + return -EIO; + } + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->state >= ibmhmc_state_opening) { + rc = ibmvmc_send_close(hmc); + if (rc) + dev_warn(hmc->adapter->dev, "close: send_close failed.\n"); + } + spin_unlock_irqrestore(&hmc->lock, flags); + } + + kzfree(session); + + return rc; +} + +/** + * ibmvmc_read - Read + * + * @file: file struct + * @buf: Character buffer + * @nbytes: Size in bytes + * @ppos: Offset + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes, + loff_t *ppos) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + struct crq_server_adapter *adapter; + struct ibmvmc_buffer *buffer; + ssize_t n; + ssize_t retval = 0; + unsigned long flags; + DEFINE_WAIT(wait); + + pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n", + (unsigned long)file, (unsigned long)buf, + (unsigned long)nbytes); + + if (nbytes == 0) + return 0; + + if (nbytes > ibmvmc.max_mtu) { + pr_warn("ibmvmc: read: nbytes invalid 0x%x\n", + (unsigned int)nbytes); + return -EINVAL; + } + + session = file->private_data; + if (!session) { + pr_warn("ibmvmc: read: no session\n"); + return -EIO; + } + + hmc = session->hmc; + if (!hmc) { + pr_warn("ibmvmc: read: no hmc\n"); + return -EIO; + } + + adapter = hmc->adapter; + if (!adapter) { + pr_warn("ibmvmc: read: no adapter\n"); + return -EIO; + } + + do { + prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE); + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->queue_tail != hmc->queue_head) + /* Data is available */ + break; + + spin_unlock_irqrestore(&hmc->lock, flags); + + if (!session->valid) { + retval = -EBADFD; + goto out; + } + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + + schedule(); + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + } while (1); + + buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]); + hmc->queue_tail++; + if (hmc->queue_tail == ibmvmc_max_buf_pool_size) + hmc->queue_tail = 0; + spin_unlock_irqrestore(&hmc->lock, flags); + + nbytes = min_t(size_t, nbytes, buffer->msg_len); + n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes); + dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes); + ibmvmc_free_hmc_buffer(hmc, buffer); + retval = nbytes; + + if (n) { + dev_warn(adapter->dev, "read: copy to user failed.\n"); + retval = -EFAULT; + } + + out: + finish_wait(&ibmvmc_read_wait, &wait); + dev_dbg(adapter->dev, "read: out %ld\n", retval); + return retval; +} + +/** + * ibmvmc_poll - Poll + * + * @file: file struct + * @wait: Poll Table + * + * Return: + * poll.h return values + */ +static unsigned int ibmvmc_poll(struct file *file, poll_table *wait) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + unsigned int mask = 0; + + session = file->private_data; + if (!session) + return 0; + + hmc = session->hmc; + if (!hmc) + return 0; + + poll_wait(file, &ibmvmc_read_wait, wait); + + if (hmc->queue_head != hmc->queue_tail) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +/** + * ibmvmc_write - Write + * + * @file: file struct + * @buf: Character buffer + * @count: Count field + * @ppos: Offset + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static ssize_t ibmvmc_write(struct file *file, const char *buffer, + size_t count, loff_t *ppos) +{ + struct ibmvmc_buffer *vmc_buffer; + struct ibmvmc_file_session *session; + struct crq_server_adapter *adapter; + struct ibmvmc_hmc *hmc; + unsigned char *buf; + unsigned long flags; + size_t bytes; + const char *p = buffer; + size_t c = count; + int ret = 0; + + session = file->private_data; + if (!session) + return -EIO; + + hmc = session->hmc; + if (!hmc) + return -EIO; + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->state == ibmhmc_state_free) { + /* HMC connection is not valid (possibly was reset under us). */ + ret = -EIO; + goto out; + } + + adapter = hmc->adapter; + if (!adapter) { + ret = -EIO; + goto out; + } + + if (count > ibmvmc.max_mtu) { + dev_warn(adapter->dev, "invalid buffer size 0x%lx\n", + (unsigned long)count); + ret = -EIO; + goto out; + } + + /* Waiting for the open resp message to the ioctl(1) - retry */ + if (hmc->state == ibmhmc_state_opening) { + ret = -EBUSY; + goto out; + } + + /* Make sure the ioctl() was called & the open msg sent, and that + * the HMC connection has not failed. + */ + if (hmc->state != ibmhmc_state_ready) { + ret = -EIO; + goto out; + } + + vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); + if (!vmc_buffer) { + /* No buffer available for the msg send, or we have not yet + * completed the open/open_resp sequence. Retry until this is + * complete. + */ + ret = -EBUSY; + goto out; + } + if (!vmc_buffer->real_addr_local) { + dev_err(adapter->dev, "no buffer storage assigned\n"); + ret = -EIO; + goto out; + } + buf = vmc_buffer->real_addr_local; + + while (c > 0) { + bytes = min_t(size_t, c, vmc_buffer->size); + + bytes -= copy_from_user(buf, p, bytes); + if (!bytes) { + ret = -EFAULT; + goto out; + } + c -= bytes; + p += bytes; + } + if (p == buffer) + goto out; + + file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file)); + mark_inode_dirty(file->f_path.dentry->d_inode); + + dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n", + (unsigned long)file, (unsigned long)count); + + ibmvmc_send_msg(adapter, vmc_buffer, hmc, count); + ret = p - buffer; + out: + spin_unlock_irqrestore(&hmc->lock, flags); + return (ssize_t)(ret); +} + +/** + * ibmvmc_setup_hmc - Setup the HMC + * + * @session: ibmvmc_file_session struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session) +{ + struct ibmvmc_hmc *hmc; + unsigned int valid, free, index; + + if (ibmvmc.state == ibmvmc_state_failed) { + pr_warn("ibmvmc: Reserve HMC: state_failed\n"); + return -EIO; + } + + if (ibmvmc.state < ibmvmc_state_ready) { + pr_warn("ibmvmc: Reserve HMC: not state_ready\n"); + return -EAGAIN; + } + + /* Device is busy until capabilities have been exchanged and we + * have a generic buffer for each possible HMC connection. + */ + for (index = 0; index <= ibmvmc.max_hmc_index; index++) { + valid = 0; + ibmvmc_count_hmc_buffers(index, &valid, &free); + if (valid == 0) { + pr_warn("ibmvmc: buffers not ready for index %d\n", + index); + return -ENOBUFS; + } + } + + /* Get an hmc object, and transition to ibmhmc_state_initial */ + hmc = ibmvmc_get_free_hmc(); + if (!hmc) { + pr_warn("%s: free hmc not found\n", __func__); + return -EBUSY; + } + + hmc->session = hmc->session + 1; + if (hmc->session == 0xff) + hmc->session = 1; + + session->hmc = hmc; + hmc->adapter = &ibmvmc_adapter; + hmc->file_session = session; + session->valid = 1; + + return 0; +} + +/** + * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID + * + * @session: ibmvmc_file_session struct + * @new_hmc_id: HMC id field + * + * IOCTL command to setup the hmc id + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session, + unsigned char __user *new_hmc_id) +{ + struct ibmvmc_hmc *hmc; + struct ibmvmc_buffer *buffer; + size_t bytes; + char print_buffer[HMC_ID_LEN + 1]; + unsigned long flags; + long rc = 0; + + /* Reserve HMC session */ + hmc = session->hmc; + if (!hmc) { + rc = ibmvmc_setup_hmc(session); + if (rc) + return rc; + + hmc = session->hmc; + if (!hmc) { + pr_err("ibmvmc: setup_hmc success but no hmc\n"); + return -EIO; + } + } + + if (hmc->state != ibmhmc_state_initial) { + pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n", + hmc->state); + return -EIO; + } + + bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN); + if (bytes) + return -EFAULT; + + /* Send Open Session command */ + spin_lock_irqsave(&hmc->lock, flags); + buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); + spin_unlock_irqrestore(&hmc->lock, flags); + + if (!buffer || !buffer->real_addr_local) { + pr_warn("ibmvmc: sethmcid: no buffer available\n"); + return -EIO; + } + + /* Make sure buffer is NULL terminated before trying to print it */ + memset(print_buffer, 0, HMC_ID_LEN + 1); + strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN); + pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer); + + memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN); + /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */ + rc = ibmvmc_send_open(buffer, hmc); + + return rc; +} + +/** + * ibmvmc_ioctl_query - IOCTL Query + * + * @session: ibmvmc_file_session struct + * @ret_struct: ibmvmc_query_struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session, + struct ibmvmc_query_struct __user *ret_struct) +{ + struct ibmvmc_query_struct query_struct; + size_t bytes; + + memset(&query_struct, 0, sizeof(query_struct)); + query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial); + query_struct.state = ibmvmc.state; + query_struct.vmc_drc_index = ibmvmc.vmc_drc_index; + + bytes = copy_to_user(ret_struct, &query_struct, + sizeof(query_struct)); + if (bytes) + return -EFAULT; + + return 0; +} + +/** + * ibmvmc_ioctl_requestvmc - IOCTL Request VMC + * + * @session: ibmvmc_file_session struct + * @ret_vmc_index: VMC Index + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session, + u32 __user *ret_vmc_index) +{ + /* TODO: (adreznec) Add locking to control multiple process access */ + size_t bytes; + long rc; + u32 vmc_drc_index; + + /* Call to request the VMC device from phyp*/ + rc = h_request_vmc(&vmc_drc_index); + pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc); + + if (rc == H_SUCCESS) { + rc = 0; + } else if (rc == H_FUNCTION) { + pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n"); + return -EPERM; + } else if (rc == H_AUTHORITY) { + pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n"); + return -EPERM; + } else if (rc == H_HARDWARE) { + pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n"); + return -EIO; + } else if (rc == H_RESOURCE) { + pr_err("ibmvmc: requestvmc: vmc resource unavailable\n"); + return -ENODEV; + } else if (rc == H_NOT_AVAILABLE) { + pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n"); + return -EPERM; + } else if (rc == H_PARAMETER) { + pr_err("ibmvmc: requestvmc: invalid parameter\n"); + return -EINVAL; + } + + /* Success, set the vmc index in global struct */ + ibmvmc.vmc_drc_index = vmc_drc_index; + + bytes = copy_to_user(ret_vmc_index, &vmc_drc_index, + sizeof(*ret_vmc_index)); + if (bytes) { + pr_warn("ibmvmc: requestvmc: copy to user failed.\n"); + return -EFAULT; + } + return rc; +} + +/** + * ibmvmc_ioctl - IOCTL + * + * @session: ibmvmc_file_session struct + * @cmd: cmd field + * @arg: Argument field + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct ibmvmc_file_session *session = file->private_data; + + pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n", + (unsigned long)file, cmd, arg, + (unsigned long)session); + + if (!session) { + pr_warn("ibmvmc: ioctl: no session\n"); + return -EIO; + } + + switch (cmd) { + case VMC_IOCTL_SETHMCID: + return ibmvmc_ioctl_sethmcid(session, + (unsigned char __user *)arg); + case VMC_IOCTL_QUERY: + return ibmvmc_ioctl_query(session, + (struct ibmvmc_query_struct __user *)arg); + case VMC_IOCTL_REQUESTVMC: + return ibmvmc_ioctl_requestvmc(session, + (unsigned int __user *)arg); + default: + pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd); + return -EINVAL; + } +} + +static const struct file_operations ibmvmc_fops = { + .owner = THIS_MODULE, + .read = ibmvmc_read, + .write = ibmvmc_write, + .poll = ibmvmc_poll, + .unlocked_ioctl = ibmvmc_ioctl, + .open = ibmvmc_open, + .release = ibmvmc_close, +}; + +/** + * ibmvmc_add_buffer - Add Buffer + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * This message transfers a buffer from hypervisor ownership to management + * partition ownership. The LIOBA is obtained from the virtual TCE table + * associated with the hypervisor side of the VMC device, and points to a + * buffer of size MTU (as established in the capabilities exchange). + * + * Typical flow for ading buffers: + * 1. A new management application connection is opened by the management + * partition. + * 2. The hypervisor assigns new buffers for the traffic associated with + * that connection. + * 3. The hypervisor sends VMC Add Buffer messages to the management + * partition, informing it of the new buffers. + * 4. The hypervisor sends an HMC protocol message (to the management + * application) notifying it of the new buffers. This informs the + * application that it has buffers available for sending HMC + * commands. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_add_buffer(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + buffer_id = be16_to_cpu(crq->var2.buffer_id); + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n", + buffer_id); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + buffer = &hmcs[hmc_index].buffer[buffer_id]; + + if (buffer->real_addr_local || buffer->dma_addr_local) { + dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n", + (unsigned long)buffer_id); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev), + ibmvmc.max_mtu, + &buffer->dma_addr_local); + + if (!buffer->real_addr_local) { + dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n"); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE, + hmc_session, hmc_index, buffer_id); + return -1; + } + + buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba); + buffer->size = ibmvmc.max_mtu; + buffer->owner = crq->var1.owner; + buffer->free = 1; + /* Must ensure valid==1 is observable only after all other fields are */ + dma_wmb(); + buffer->valid = 1; + buffer->id = buffer_id; + + dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n"); + dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n", + hmc_index, hmc_session, buffer_id, buffer->owner); + dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n", + (u32)buffer->dma_addr_local, + (u32)buffer->dma_addr_remote); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session, + hmc_index, buffer_id); + + return rc; +} + +/** + * ibmvmc_rem_buffer - Remove Buffer + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * This message requests an HMC buffer to be transferred from management + * partition ownership to hypervisor ownership. The management partition may + * not be able to satisfy the request at a particular point in time if all its + * buffers are in use. The management partition requires a depth of at least + * one inbound buffer to allow management application commands to flow to the + * hypervisor. It is, therefore, an interface error for the hypervisor to + * attempt to remove the management partition's last buffer. + * + * The hypervisor is expected to manage buffer usage with the management + * application directly and inform the management partition when buffers may be + * removed. The typical flow for removing buffers: + * + * 1. The management application no longer needs a communication path to a + * particular hypervisor function. That function is closed. + * 2. The hypervisor and the management application quiesce all traffic to that + * function. The hypervisor requests a reduction in buffer pool size. + * 3. The management application acknowledges the reduction in buffer pool size. + * 4. The hypervisor sends a Remove Buffer message to the management partition, + * informing it of the reduction in buffers. + * 5. The management partition verifies it can remove the buffer. This is + * possible if buffers have been quiesced. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +/* + * The hypervisor requested that we pick an unused buffer, and return it. + * Before sending the buffer back, we free any storage associated with the + * buffer. + */ +static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id = 0; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index); + if (!buffer) { + dev_info(adapter->dev, "rem_buffer: no buffer to remove\n"); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER, + hmc_session, hmc_index, + VMC_INVALID_BUFFER_ID); + return -1; + } + + buffer_id = buffer->id; + + if (buffer->valid) + free_dma_buffer(to_vio_dev(adapter->dev), + ibmvmc.max_mtu, + buffer->real_addr_local, + buffer->dma_addr_local); + + memset(buffer, 0, sizeof(struct ibmvmc_buffer)); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + + dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session, + hmc_index, buffer_id); + + return rc; +} + +static int ibmvmc_recv_msg(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_hmc *hmc; + unsigned long msg_len; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + /* Hypervisor writes CRQs directly into our memory in big endian */ + dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n", + be64_to_cpu(*((unsigned long *)crq)), + be64_to_cpu(*(((unsigned long *)crq) + 1))); + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + buffer_id = be16_to_cpu(crq->var2.buffer_id); + msg_len = be32_to_cpu(crq->var3.msg_len); + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n", + buffer_id); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + hmc = &hmcs[hmc_index]; + spin_lock_irqsave(&hmc->lock, flags); + + if (hmc->state == ibmhmc_state_free) { + dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n", + hmc->state); + /* HMC connection is not valid (possibly was reset under us). */ + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + buffer = &hmc->buffer[buffer_id]; + + if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) { + dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n", + buffer->valid, buffer->owner); + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + /* RDMA the data into the partition. */ + rc = h_copy_rdma(msg_len, + adapter->riobn, + buffer->dma_addr_remote, + adapter->liobn, + buffer->dma_addr_local); + + dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n", + (unsigned int)msg_len, (unsigned int)buffer_id, + (unsigned int)hmc->queue_head, (unsigned int)hmc_index); + buffer->msg_len = msg_len; + buffer->free = 0; + buffer->owner = VMC_BUF_OWNER_ALPHA; + + if (rc) { + dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n", + rc); + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + /* Must be locked because read operates on the same data */ + hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id; + hmc->queue_head++; + if (hmc->queue_head == ibmvmc_max_buf_pool_size) + hmc->queue_head = 0; + + if (hmc->queue_head == hmc->queue_tail) + dev_err(adapter->dev, "outbound buffer queue wrapped.\n"); + + spin_unlock_irqrestore(&hmc->lock, flags); + + wake_up_interruptible(&ibmvmc_read_wait); + + return 0; +} + +/** + * ibmvmc_process_capabilities - Process Capabilities + * + * @adapter: crq_server_adapter struct + * @crqp: ibmvmc_crq_msg struct + * + */ +static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crqp) +{ + struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp; + + if ((be16_to_cpu(crq->version) >> 8) != + (IBMVMC_PROTOCOL_VERSION >> 8)) { + dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n", + be16_to_cpu(crq->version), + IBMVMC_PROTOCOL_VERSION); + ibmvmc.state = ibmvmc_state_failed; + return; + } + + ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu)); + ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size, + be16_to_cpu(crq->pool_size)); + ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1; + ibmvmc.state = ibmvmc_state_ready; + + dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n", + ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size, + ibmvmc.max_hmc_index); +} + +/** + * ibmvmc_validate_hmc_session - Validate HMC Session + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + unsigned char hmc_index; + + hmc_index = crq->hmc_index; + + if (crq->hmc_session == 0) + return 0; + + if (hmc_index > ibmvmc.max_hmc_index) + return -1; + + if (hmcs[hmc_index].session != crq->hmc_session) { + dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n", + hmcs[hmc_index].session, crq->hmc_session); + return -1; + } + + return 0; +} + +/** + * ibmvmc_reset - Reset + * + * @adapter: crq_server_adapter struct + * @xport_event: export_event field + * + * Closes all HMC sessions and conditionally schedules a CRQ reset. + * @xport_event: If true, the partner closed their CRQ; we don't need to reset. + * If false, we need to schedule a CRQ reset. + */ +static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event) +{ + int i; + + if (ibmvmc.state != ibmvmc_state_sched_reset) { + dev_info(adapter->dev, "*** Reset to initial state.\n"); + for (i = 0; i < ibmvmc_max_hmcs; i++) + ibmvmc_return_hmc(&hmcs[i], xport_event); + + if (xport_event) { + /* CRQ was closed by the partner. We don't need to do + * anything except set ourself to the correct state to + * handle init msgs. + */ + ibmvmc.state = ibmvmc_state_crqinit; + } else { + /* The partner did not close their CRQ - instead, we're + * closing the CRQ on our end. Need to schedule this + * for process context, because CRQ reset may require a + * sleep. + * + * Setting ibmvmc.state here immediately prevents + * ibmvmc_open from completing until the reset + * completes in process context. + */ + ibmvmc.state = ibmvmc_state_sched_reset; + dev_dbg(adapter->dev, "Device reset scheduled"); + wake_up_interruptible(&adapter->reset_wait_queue); + } + } +} + +/** + * ibmvmc_reset_task - Reset Task + * + * @data: Data field + * + * Performs a CRQ reset of the VMC device in process context. + * NOTE: This function should not be called directly, use ibmvmc_reset. + */ +static int ibmvmc_reset_task(void *data) +{ + struct crq_server_adapter *adapter = data; + int rc; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + wait_event_interruptible(adapter->reset_wait_queue, + (ibmvmc.state == ibmvmc_state_sched_reset) || + kthread_should_stop()); + + if (kthread_should_stop()) + break; + + dev_dbg(adapter->dev, "CRQ resetting in process context"); + tasklet_disable(&adapter->work_task); + + rc = ibmvmc_reset_crq_queue(adapter); + + if (rc != H_SUCCESS && rc != H_RESOURCE) { + dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", + rc); + ibmvmc.state = ibmvmc_state_failed; + } else { + ibmvmc.state = ibmvmc_state_crqinit; + + if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) + != 0 && rc != H_RESOURCE) + dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); + } + + vio_enable_interrupts(to_vio_dev(adapter->dev)); + tasklet_enable(&adapter->work_task); + } + + return 0; +} + +/** + * ibmvmc_process_open_resp - Process Open Response + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * This command is sent by the hypervisor in response to the Interface + * Open message. When this message is received, the indicated buffer is + * again available for management partition use. + */ +static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + unsigned char hmc_index; + unsigned short buffer_id; + + hmc_index = crq->hmc_index; + if (hmc_index > ibmvmc.max_hmc_index) { + /* Why would PHYP give an index > max negotiated? */ + ibmvmc_reset(adapter, false); + return; + } + + if (crq->status) { + dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n", + crq->status); + ibmvmc_return_hmc(&hmcs[hmc_index], false); + return; + } + + if (hmcs[hmc_index].state == ibmhmc_state_opening) { + buffer_id = be16_to_cpu(crq->var2.buffer_id); + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n", + buffer_id); + hmcs[hmc_index].state = ibmhmc_state_failed; + } else { + ibmvmc_free_hmc_buffer(&hmcs[hmc_index], + &hmcs[hmc_index].buffer[buffer_id]); + hmcs[hmc_index].state = ibmhmc_state_ready; + dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n"); + } + } else { + dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n", + hmcs[hmc_index].state); + } +} + +/** + * ibmvmc_process_close_resp - Process Close Response + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * This command is sent by the hypervisor in response to the managemant + * application Interface Close message. + * + * If the close fails, simply reset the entire driver as the state of the VMC + * must be in tough shape. + */ +static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + unsigned char hmc_index; + + hmc_index = crq->hmc_index; + if (hmc_index > ibmvmc.max_hmc_index) { + ibmvmc_reset(adapter, false); + return; + } + + if (crq->status) { + dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n", + crq->status); + ibmvmc_reset(adapter, false); + return; + } + + ibmvmc_return_hmc(&hmcs[hmc_index], false); +} + +/** + * ibmvmc_crq_process - Process CRQ + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * Process the CRQ message based upon the type of message received. + * + */ +static void ibmvmc_crq_process(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + switch (crq->type) { + case VMC_MSG_CAP_RESP: + dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n", + crq->type); + if (ibmvmc.state == ibmvmc_state_capabilities) + ibmvmc_process_capabilities(adapter, crq); + else + dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n", + ibmvmc.state); + break; + case VMC_MSG_OPEN_RESP: + dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_process_open_resp(crq, adapter); + break; + case VMC_MSG_ADD_BUF: + dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_add_buffer(adapter, crq); + break; + case VMC_MSG_REM_BUF: + dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_rem_buffer(adapter, crq); + break; + case VMC_MSG_SIGNAL: + dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_recv_msg(adapter, crq); + break; + case VMC_MSG_CLOSE_RESP: + dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_process_close_resp(crq, adapter); + break; + case VMC_MSG_CAP: + case VMC_MSG_OPEN: + case VMC_MSG_CLOSE: + case VMC_MSG_ADD_BUF_RESP: + case VMC_MSG_REM_BUF_RESP: + dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n", + crq->type); + break; + default: + dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n", + crq->type); + break; + } +} + +/** + * ibmvmc_handle_crq_init - Handle CRQ Init + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * Handle the type of crq initialization based on whether + * it is a message or a response. + * + */ +static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + switch (crq->type) { + case 0x01: /* Initialization message */ + dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n", + ibmvmc.state); + if (ibmvmc.state == ibmvmc_state_crqinit) { + /* Send back a response */ + if (ibmvmc_send_crq(adapter, 0xC002000000000000, + 0) == 0) + ibmvmc_send_capabilities(adapter); + else + dev_err(adapter->dev, " Unable to send init rsp\n"); + } else { + dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n", + ibmvmc.state, ibmvmc.max_mtu); + } + + break; + case 0x02: /* Initialization response */ + dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n", + ibmvmc.state); + if (ibmvmc.state == ibmvmc_state_crqinit) + ibmvmc_send_capabilities(adapter); + break; + default: + dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n", + (unsigned long)crq->type); + } +} + +/** + * ibmvmc_handle_crq - Handle CRQ + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * Read the command elements from the command queue and execute the + * requests based upon the type of crq message. + * + */ +static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + switch (crq->valid) { + case 0xC0: /* initialization */ + ibmvmc_handle_crq_init(crq, adapter); + break; + case 0xFF: /* Hypervisor telling us the connection is closed */ + dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n"); + ibmvmc_reset(adapter, true); + break; + case 0x80: /* real payload */ + ibmvmc_crq_process(adapter, crq); + break; + default: + dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n", + crq->valid); + break; + } +} + +static void ibmvmc_task(unsigned long data) +{ + struct crq_server_adapter *adapter = + (struct crq_server_adapter *)data; + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct ibmvmc_crq_msg *crq; + int done = 0; + + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) { + ibmvmc_handle_crq(crq, adapter); + crq->valid = 0x00; + /* CRQ reset was requested, stop processing CRQs. + * Interrupts will be re-enabled by the reset task. + */ + if (ibmvmc.state == ibmvmc_state_sched_reset) + return; + } + + vio_enable_interrupts(vdev); + crq = crq_queue_next_crq(&adapter->queue); + if (crq) { + vio_disable_interrupts(vdev); + ibmvmc_handle_crq(crq, adapter); + crq->valid = 0x00; + /* CRQ reset was requested, stop processing CRQs. + * Interrupts will be re-enabled by the reset task. + */ + if (ibmvmc.state == ibmvmc_state_sched_reset) + return; + } else { + done = 1; + } + } +} + +/** + * ibmvmc_init_crq_queue - Init CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + int rc = 0; + int retrc = 0; + + queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL); + + if (!queue->msgs) + goto malloc_failed; + + queue->size = PAGE_SIZE / sizeof(*queue->msgs); + + queue->msg_token = dma_map_single(adapter->dev, queue->msgs, + queue->size * sizeof(*queue->msgs), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(adapter->dev, queue->msg_token)) + goto map_failed; + + retrc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + retrc = rc; + + if (rc == H_RESOURCE) + rc = ibmvmc_reset_crq_queue(adapter); + + if (rc == 2) { + dev_warn(adapter->dev, "Partner adapter not ready\n"); + retrc = 0; + } else if (rc != 0) { + dev_err(adapter->dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + queue->cur = 0; + spin_lock_init(&queue->lock); + + tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter); + + if (request_irq(vdev->irq, + ibmvmc_handle_event, + 0, "ibmvmc", (void *)adapter) != 0) { + dev_err(adapter->dev, "couldn't register irq 0x%x\n", + vdev->irq); + goto req_irq_failed; + } + + rc = vio_enable_interrupts(vdev); + if (rc != 0) { + dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc); + goto req_irq_failed; + } + + return retrc; + +req_irq_failed: + /* Cannot have any work since we either never got our IRQ registered, + * or never got interrupts enabled + */ + tasklet_kill(&adapter->work_task); + h_free_crq(vdev->unit_address); +reg_crq_failed: + dma_unmap_single(adapter->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); +map_failed: + free_page((unsigned long)queue->msgs); +malloc_failed: + return -ENOMEM; +} + +/* Fill in the liobn and riobn fields on the adapter */ +static int read_dma_window(struct vio_dev *vdev, + struct crq_server_adapter *adapter) +{ + const __be32 *dma_window; + const __be32 *prop; + + /* TODO Using of_parse_dma_window would be better, but it doesn't give + * a way to read multiple windows without already knowing the size of + * a window or the number of windows + */ + dma_window = + (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window", + NULL); + if (!dma_window) { + dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n"); + return -1; + } + + adapter->liobn = be32_to_cpu(*dma_window); + dma_window++; + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", + NULL); + if (!prop) { + dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", + NULL); + if (!prop) { + dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + /* dma_window should point to the second window now */ + adapter->riobn = be32_to_cpu(*dma_window); + + return 0; +} + +static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct crq_server_adapter *adapter = &ibmvmc_adapter; + int rc; + + dev_set_drvdata(&vdev->dev, NULL); + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = &vdev->dev; + + dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address); + + rc = read_dma_window(vdev, adapter); + if (rc != 0) { + ibmvmc.state = ibmvmc_state_failed; + return -1; + } + + dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n", + adapter->liobn, adapter->riobn); + + init_waitqueue_head(&adapter->reset_wait_queue); + adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc"); + if (IS_ERR(adapter->reset_task)) { + dev_err(adapter->dev, "Failed to start reset thread\n"); + ibmvmc.state = ibmvmc_state_failed; + rc = PTR_ERR(adapter->reset_task); + adapter->reset_task = NULL; + return rc; + } + + rc = ibmvmc_init_crq_queue(adapter); + if (rc != 0 && rc != H_RESOURCE) { + dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", + rc); + ibmvmc.state = ibmvmc_state_failed; + goto crq_failed; + } + + ibmvmc.state = ibmvmc_state_crqinit; + + /* Try to send an initialization message. Note that this is allowed + * to fail if the other end is not acive. In that case we just wait + * for the other side to initialize. + */ + if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 && + rc != H_RESOURCE) + dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); + + dev_set_drvdata(&vdev->dev, adapter); + + return 0; + +crq_failed: + kthread_stop(adapter->reset_task); + adapter->reset_task = NULL; + return -EPERM; +} + +static int ibmvmc_remove(struct vio_dev *vdev) +{ + struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev); + + dev_info(adapter->dev, "Entering remove for UA 0x%x\n", + vdev->unit_address); + ibmvmc_release_crq_queue(adapter); + + return 0; +} + +static struct vio_device_id ibmvmc_device_table[] = { + { "ibm,vmc", "IBM,vmc" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvmc_device_table); + +static struct vio_driver ibmvmc_driver = { + .name = ibmvmc_driver_name, + .id_table = ibmvmc_device_table, + .probe = ibmvmc_probe, + .remove = ibmvmc_remove, +}; + +static void __init ibmvmc_scrub_module_parms(void) +{ + if (ibmvmc_max_mtu > MAX_MTU) { + pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU); + ibmvmc_max_mtu = MAX_MTU; + } else if (ibmvmc_max_mtu < MIN_MTU) { + pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU); + ibmvmc_max_mtu = MIN_MTU; + } + + if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) { + pr_warn("ibmvmc: Max buffer pool size reduced to %d\n", + MAX_BUF_POOL_SIZE); + ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE; + } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) { + pr_warn("ibmvmc: Max buffer pool size increased to %d\n", + MIN_BUF_POOL_SIZE); + ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE; + } + + if (ibmvmc_max_hmcs > MAX_HMCS) { + pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS); + ibmvmc_max_hmcs = MAX_HMCS; + } else if (ibmvmc_max_hmcs < MIN_HMCS) { + pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS); + ibmvmc_max_hmcs = MIN_HMCS; + } +} + +static struct miscdevice ibmvmc_miscdev = { + .name = ibmvmc_driver_name, + .minor = MISC_DYNAMIC_MINOR, + .fops = &ibmvmc_fops, +}; + +static int __init ibmvmc_module_init(void) +{ + int rc, i, j; + + ibmvmc.state = ibmvmc_state_initial; + pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION); + + rc = misc_register(&ibmvmc_miscdev); + if (rc) { + pr_err("ibmvmc: misc registration failed\n"); + goto misc_register_failed; + } + pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR, + ibmvmc_miscdev.minor); + + /* Initialize data structures */ + memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS); + for (i = 0; i < MAX_HMCS; i++) { + spin_lock_init(&hmcs[i].lock); + hmcs[i].state = ibmhmc_state_free; + for (j = 0; j < MAX_BUF_POOL_SIZE; j++) + hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID; + } + + /* Sanity check module parms */ + ibmvmc_scrub_module_parms(); + + /* + * Initialize some reasonable values. Might be negotiated smaller + * values during the capabilities exchange. + */ + ibmvmc.max_mtu = ibmvmc_max_mtu; + ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size; + ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1; + + rc = vio_register_driver(&ibmvmc_driver); + + if (rc) { + pr_err("ibmvmc: rc %d from vio_register_driver\n", rc); + goto vio_reg_failed; + } + + return 0; + +vio_reg_failed: + misc_deregister(&ibmvmc_miscdev); +misc_register_failed: + return rc; +} + +static void __exit ibmvmc_module_exit(void) +{ + pr_info("ibmvmc: module exit\n"); + vio_unregister_driver(&ibmvmc_driver); + misc_deregister(&ibmvmc_miscdev); +} + +module_init(ibmvmc_module_init); +module_exit(ibmvmc_module_exit); + +module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size, + int, 0644); +MODULE_PARM_DESC(buf_pool_size, "Buffer pool size"); +module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644); +MODULE_PARM_DESC(max_hmcs, "Max HMCs"); +module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644); +MODULE_PARM_DESC(max_mtu, "Max MTU"); + +MODULE_AUTHOR("Steven Royer "); +MODULE_DESCRIPTION("IBM VMC"); +MODULE_VERSION(IBMVMC_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h new file mode 100644 index 000000000000..e140ada8fe2c --- /dev/null +++ b/drivers/misc/ibmvmc.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * linux/drivers/misc/ibmvmc.h + * + * IBM Power Systems Virtual Management Channel Support. + * + * Copyright (c) 2004, 2018 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Steven Royer seroyer@linux.vnet.ibm.com + * Adam Reznechek adreznec@linux.vnet.ibm.com + * Bryant G. Ly + */ +#ifndef IBMVMC_H +#define IBMVMC_H + +#include +#include + +#include + +#define IBMVMC_PROTOCOL_VERSION 0x0101 + +#define MIN_BUF_POOL_SIZE 16 +#define MIN_HMCS 1 +#define MIN_MTU 4096 +#define MAX_BUF_POOL_SIZE 64 +#define MAX_HMCS 2 +#define MAX_MTU (4 * 4096) +#define DEFAULT_BUF_POOL_SIZE 32 +#define DEFAULT_HMCS 1 +#define DEFAULT_MTU 4096 +#define HMC_ID_LEN 32 + +#define VMC_INVALID_BUFFER_ID 0xFFFF + +/* ioctl numbers */ +#define VMC_BASE 0xCC +#define VMC_IOCTL_SETHMCID _IOW(VMC_BASE, 0x00, unsigned char *) +#define VMC_IOCTL_QUERY _IOR(VMC_BASE, 0x01, struct ibmvmc_query_struct) +#define VMC_IOCTL_REQUESTVMC _IOR(VMC_BASE, 0x02, u32) + +#define VMC_MSG_CAP 0x01 +#define VMC_MSG_CAP_RESP 0x81 +#define VMC_MSG_OPEN 0x02 +#define VMC_MSG_OPEN_RESP 0x82 +#define VMC_MSG_CLOSE 0x03 +#define VMC_MSG_CLOSE_RESP 0x83 +#define VMC_MSG_ADD_BUF 0x04 +#define VMC_MSG_ADD_BUF_RESP 0x84 +#define VMC_MSG_REM_BUF 0x05 +#define VMC_MSG_REM_BUF_RESP 0x85 +#define VMC_MSG_SIGNAL 0x06 + +#define VMC_MSG_SUCCESS 0 +#define VMC_MSG_INVALID_HMC_INDEX 1 +#define VMC_MSG_INVALID_BUFFER_ID 2 +#define VMC_MSG_CLOSED_HMC 3 +#define VMC_MSG_INTERFACE_FAILURE 4 +#define VMC_MSG_NO_BUFFER 5 + +#define VMC_BUF_OWNER_ALPHA 0 +#define VMC_BUF_OWNER_HV 1 + +enum ibmvmc_states { + ibmvmc_state_sched_reset = -1, + ibmvmc_state_initial = 0, + ibmvmc_state_crqinit = 1, + ibmvmc_state_capabilities = 2, + ibmvmc_state_ready = 3, + ibmvmc_state_failed = 4, +}; + +enum ibmhmc_states { + /* HMC connection not established */ + ibmhmc_state_free = 0, + + /* HMC connection established (open called) */ + ibmhmc_state_initial = 1, + + /* open msg sent to HV, due to ioctl(1) call */ + ibmhmc_state_opening = 2, + + /* HMC connection ready, open resp msg from HV */ + ibmhmc_state_ready = 3, + + /* HMC connection failure */ + ibmhmc_state_failed = 4, +}; + +struct ibmvmc_buffer { + u8 valid; /* 1 when DMA storage allocated to buffer */ + u8 free; /* 1 when buffer available for the Alpha Partition */ + u8 owner; + u16 id; + u32 size; + u32 msg_len; + dma_addr_t dma_addr_local; + dma_addr_t dma_addr_remote; + void *real_addr_local; +}; + +struct ibmvmc_admin_crq_msg { + u8 valid; /* RPA Defined */ + u8 type; /* ibmvmc msg type */ + u8 status; /* Response msg status. Zero is success and on failure, + * either 1 - General Failure, or 2 - Invalid Version is + * returned. + */ + u8 rsvd[2]; + u8 max_hmc; /* Max # of independent HMC connections supported */ + __be16 pool_size; /* Maximum number of buffers supported per HMC + * connection + */ + __be32 max_mtu; /* Maximum message size supported (bytes) */ + __be16 crq_size; /* # of entries available in the CRQ for the + * source partition. The target partition must + * limit the number of outstanding messages to + * one half or less. + */ + __be16 version; /* Indicates the code level of the management partition + * or the hypervisor with the high-order byte + * indicating a major version and the low-order byte + * indicating a minor version. + */ +}; + +struct ibmvmc_crq_msg { + u8 valid; /* RPA Defined */ + u8 type; /* ibmvmc msg type */ + u8 status; /* Response msg status */ + union { + u8 rsvd; /* Reserved */ + u8 owner; + } var1; + u8 hmc_session; /* Session Identifier for the current VMC connection */ + u8 hmc_index; /* A unique HMC Idx would be used if multiple management + * applications running concurrently were desired + */ + union { + __be16 rsvd; + __be16 buffer_id; + } var2; + __be32 rsvd; + union { + __be32 rsvd; + __be32 lioba; + __be32 msg_len; + } var3; +}; + +/* an RPA command/response transport queue */ +struct crq_queue { + struct ibmvmc_crq_msg *msgs; + int size, cur; + dma_addr_t msg_token; + spinlock_t lock; +}; + +/* VMC server adapter settings */ +struct crq_server_adapter { + struct device *dev; + struct crq_queue queue; + u32 liobn; + u32 riobn; + struct tasklet_struct work_task; + wait_queue_head_t reset_wait_queue; + struct task_struct *reset_task; +}; + +/* Driver wide settings */ +struct ibmvmc_struct { + u32 state; + u32 max_mtu; + u32 max_buffer_pool_size; + u32 max_hmc_index; + struct crq_server_adapter *adapter; + struct cdev cdev; + u32 vmc_drc_index; +}; + +struct ibmvmc_file_session; + +/* Connection specific settings */ +struct ibmvmc_hmc { + u8 session; + u8 index; + u32 state; + struct crq_server_adapter *adapter; + spinlock_t lock; + unsigned char hmc_id[HMC_ID_LEN]; + struct ibmvmc_buffer buffer[MAX_BUF_POOL_SIZE]; + unsigned short queue_outbound_msgs[MAX_BUF_POOL_SIZE]; + int queue_head, queue_tail; + struct ibmvmc_file_session *file_session; +}; + +struct ibmvmc_file_session { + struct file *file; + struct ibmvmc_hmc *hmc; + bool valid; +}; + +struct ibmvmc_query_struct { + int have_vmc; + int state; + int vmc_drc_index; +}; + +#endif /* __IBMVMC_H */ -- cgit v1.2.3 From 96faf246d07cb35e7cc2c926b4e32fefab0dd8a6 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Mon, 14 May 2018 10:29:56 +0100 Subject: net: stmmac: Add Jose Abreu as co-maintainer I'm offering to be a co-maintainer for stmmac driver. As per discussion with Alexandre, I will arrange to get STM32 boards to test patches in GMAC version 3.x and 4.1. I also have HW to test GMAC version 5. Looking forward to contribute to net-dev! Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Alexandre Torgue Cc: Giuseppe Cavallaro Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index cecf461678b5..c6989d04afef 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13385,6 +13385,7 @@ F: drivers/media/usb/stk1160/ STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro M: Alexandre Torgue +M: Jose Abreu L: netdev@vger.kernel.org W: http://www.stlinux.com S: Supported -- cgit v1.2.3 From 5ccdb7536ebec7a5f8a3883ba1985a80cec80dd3 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Mon, 14 May 2018 16:33:55 -0700 Subject: MAINTAINERS, tools/memory-model: Update e-mail address for Andrea Parri I moved to Amarula Solutions; switch to work e-mail address. Signed-off-by: Andrea Parri Signed-off-by: Paul E. McKenney Cc: Akira Yokosawa Cc: Alan Stern Cc: Andrew Morton Cc: Boqun Feng Cc: David Howells Cc: Jade Alglave Cc: Linus Torvalds Cc: Luc Maranget Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arch@vger.kernel.org Cc: parri.andrea@gmail.com Link: http://lkml.kernel.org/r/1526340837-12222-17-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 649e782e4415..b6341e8a3587 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8203,7 +8203,7 @@ F: drivers/misc/lkdtm/* LINUX KERNEL MEMORY CONSISTENCY MODEL (LKMM) M: Alan Stern -M: Andrea Parri +M: Andrea Parri M: Will Deacon M: Peter Zijlstra M: Boqun Feng -- cgit v1.2.3 From 0ce60edd78d3efa8570362846ea76ce1beb3c2b8 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 14 May 2018 22:05:00 +0200 Subject: MAINTAINERS: Add entry for Microsemi Ethernet switches Add myself as a maintainer for the Microsemi Ethernet switches. Signed-off-by: Alexandre Belloni Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c6989d04afef..658880464b9d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9279,6 +9279,12 @@ F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h F: Documentation/scsi/smartpqi.txt +MICROSEMI ETHERNET SWITCH DRIVER +M: Alexandre Belloni +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/mscc/ + MICROSOFT SURFACE PRO 3 BUTTON DRIVER M: Chen Yu L: platform-driver-x86@vger.kernel.org -- cgit v1.2.3 From 190a5f2e084a14fe7ec50c7d0ba1693645291f13 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Mon, 14 May 2018 09:27:42 +0300 Subject: MAINTAINERS: Add ALSA: xen-front: maintainer entry Add myself as sound/xen maintainer. Signed-off-by: Oleksandr Andrushchenko Signed-off-by: Takashi Iwai --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 92be777d060a..bd214e061359 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15494,6 +15494,13 @@ S: Supported F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* +XEN SOUND FRONTEND DRIVER +M: Oleksandr Andrushchenko +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Supported +F: sound/xen/* + XFS FILESYSTEM M: Darrick J. Wong M: linux-xfs@vger.kernel.org -- cgit v1.2.3 From 51b8dc5163d2ff2bf04019f8bf7e3bd0e75bb654 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Wed, 9 May 2018 17:34:45 +0300 Subject: media: staging: atomisp: Remove driver The atomisp driver has a long list of todo items and little has been done to address these lately while more has been added. The driver is also not functional. In other words, the driver would not be getting out of staging in the foreseeable future. At the same time it consumes developer resources in order to maintain the flaky code base. Remove it. Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 7 - drivers/staging/media/Kconfig | 2 - drivers/staging/media/Makefile | 1 - drivers/staging/media/atomisp/Kconfig | 12 - drivers/staging/media/atomisp/Makefile | 6 - drivers/staging/media/atomisp/TODO | 74 - drivers/staging/media/atomisp/i2c/Kconfig | 86 - drivers/staging/media/atomisp/i2c/Makefile | 18 - drivers/staging/media/atomisp/i2c/atomisp-gc0310.c | 1392 --- drivers/staging/media/atomisp/i2c/atomisp-gc2235.c | 1124 -- .../media/atomisp/i2c/atomisp-libmsrlisthelper.c | 205 - drivers/staging/media/atomisp/i2c/atomisp-lm3554.c | 968 -- .../staging/media/atomisp/i2c/atomisp-mt9m114.c | 1908 ---- drivers/staging/media/atomisp/i2c/atomisp-ov2680.c | 1470 --- drivers/staging/media/atomisp/i2c/atomisp-ov2722.c | 1271 --- drivers/staging/media/atomisp/i2c/gc0310.h | 404 - drivers/staging/media/atomisp/i2c/gc2235.h | 677 -- drivers/staging/media/atomisp/i2c/mt9m114.h | 1788 --- drivers/staging/media/atomisp/i2c/ov2680.h | 858 -- drivers/staging/media/atomisp/i2c/ov2722.h | 1268 --- drivers/staging/media/atomisp/i2c/ov5693/Kconfig | 11 - drivers/staging/media/atomisp/i2c/ov5693/Makefile | 2 - drivers/staging/media/atomisp/i2c/ov5693/ad5823.h | 63 - .../media/atomisp/i2c/ov5693/atomisp-ov5693.c | 1993 ---- drivers/staging/media/atomisp/i2c/ov5693/ov5693.h | 1392 --- .../staging/media/atomisp/include/linux/atomisp.h | 1359 --- .../atomisp/include/linux/atomisp_gmin_platform.h | 36 - .../media/atomisp/include/linux/atomisp_platform.h | 249 - .../media/atomisp/include/linux/libmsrlisthelper.h | 28 - .../staging/media/atomisp/include/media/lm3554.h | 131 - drivers/staging/media/atomisp/pci/Kconfig | 14 - drivers/staging/media/atomisp/pci/Makefile | 5 - .../staging/media/atomisp/pci/atomisp2/Makefile | 349 - .../media/atomisp/pci/atomisp2/atomisp-regs.h | 205 - .../media/atomisp/pci/atomisp2/atomisp_acc.c | 604 - .../media/atomisp/pci/atomisp2/atomisp_acc.h | 120 - .../media/atomisp/pci/atomisp2/atomisp_cmd.c | 6697 ----------- .../media/atomisp/pci/atomisp2/atomisp_cmd.h | 446 - .../media/atomisp/pci/atomisp2/atomisp_common.h | 75 - .../media/atomisp/pci/atomisp2/atomisp_compat.h | 662 -- .../atomisp/pci/atomisp2/atomisp_compat_css20.c | 4704 -------- .../atomisp/pci/atomisp2/atomisp_compat_css20.h | 277 - .../atomisp/pci/atomisp2/atomisp_compat_ioctl32.c | 1225 -- .../atomisp/pci/atomisp2/atomisp_compat_ioctl32.h | 365 - .../media/atomisp/pci/atomisp2/atomisp_csi2.c | 442 - .../media/atomisp/pci/atomisp2/atomisp_csi2.h | 57 - .../atomisp/pci/atomisp2/atomisp_dfs_tables.h | 408 - .../media/atomisp/pci/atomisp2/atomisp_drvfs.c | 205 - .../media/atomisp/pci/atomisp2/atomisp_drvfs.h | 24 - .../media/atomisp/pci/atomisp2/atomisp_file.c | 225 - .../media/atomisp/pci/atomisp2/atomisp_file.h | 43 - .../media/atomisp/pci/atomisp2/atomisp_fops.c | 1302 --- .../media/atomisp/pci/atomisp2/atomisp_fops.h | 50 - .../media/atomisp/pci/atomisp2/atomisp_helper.h | 29 - .../media/atomisp/pci/atomisp2/atomisp_internal.h | 310 - .../media/atomisp/pci/atomisp2/atomisp_ioctl.c | 3123 ------ .../media/atomisp/pci/atomisp2/atomisp_ioctl.h | 69 - .../media/atomisp/pci/atomisp2/atomisp_subdev.c | 1422 --- .../media/atomisp/pci/atomisp2/atomisp_subdev.h | 467 - .../media/atomisp/pci/atomisp2/atomisp_tables.h | 187 - .../media/atomisp/pci/atomisp2/atomisp_tpg.c | 164 - .../media/atomisp/pci/atomisp2/atomisp_tpg.h | 38 - .../atomisp/pci/atomisp2/atomisp_trace_event.h | 129 - .../media/atomisp/pci/atomisp2/atomisp_v4l2.c | 1562 --- .../media/atomisp/pci/atomisp2/atomisp_v4l2.h | 40 - .../media/atomisp/pci/atomisp2/css2400/Makefile | 2 - .../base/circbuf/interface/ia_css_circbuf.h | 376 - .../base/circbuf/interface/ia_css_circbuf_comm.h | 56 - .../base/circbuf/interface/ia_css_circbuf_desc.h | 169 - .../atomisp2/css2400/base/circbuf/src/circbuf.c | 321 - .../base/refcount/interface/ia_css_refcount.h | 83 - .../atomisp2/css2400/base/refcount/src/refcount.c | 281 - .../camera/pipe/interface/ia_css_pipe_binarydesc.h | 297 - .../camera/pipe/interface/ia_css_pipe_stagedesc.h | 52 - .../camera/pipe/interface/ia_css_pipe_util.h | 39 - .../css2400/camera/pipe/src/pipe_binarydesc.c | 880 -- .../css2400/camera/pipe/src/pipe_stagedesc.c | 115 - .../atomisp2/css2400/camera/pipe/src/pipe_util.c | 51 - .../css2400/camera/util/interface/ia_css_util.h | 141 - .../pci/atomisp2/css2400/camera/util/src/util.c | 227 - .../ia_css_isp_configs.c | 360 - .../ia_css_isp_configs.h | 189 - .../ia_css_isp_params.c | 3221 ------ .../ia_css_isp_params.h | 399 - .../ia_css_isp_states.c | 214 - .../ia_css_isp_states.h | 72 - .../atomisp2/css2400/css_2400_system/hrt/bits.h | 104 - .../css2400/css_2400_system/hrt/cell_params.h | 42 - .../hrt/css_receiver_2400_common_defs.h | 200 - .../css_2400_system/hrt/css_receiver_2400_defs.h | 258 - .../atomisp2/css2400/css_2400_system/hrt/defs.h | 36 - .../css2400/css_2400_system/hrt/dma_v2_defs.h | 199 - .../css2400/css_2400_system/hrt/gdc_v2_defs.h | 170 - .../css2400/css_2400_system/hrt/gp_timer_defs.h | 36 - .../css2400/css_2400_system/hrt/gpio_block_defs.h | 42 - .../css_2400_system/hrt/hive_isp_css_defs.h | 416 - .../hrt/hive_isp_css_host_ids_hrt.h | 84 - .../hrt/hive_isp_css_irq_types_hrt.h | 72 - .../hrt/hive_isp_css_streaming_to_mipi_types_hrt.h | 26 - .../css2400/css_2400_system/hrt/hive_types.h | 128 - .../atomisp2/css2400/css_2400_system/hrt/if_defs.h | 22 - .../hrt/input_formatter_subsystem_defs.h | 53 - .../css_2400_system/hrt/input_selector_defs.h | 89 - .../css_2400_system/hrt/input_switch_2400_defs.h | 30 - .../css_2400_system/hrt/input_system_ctrl_defs.h | 254 - .../css_2400_system/hrt/input_system_defs.h | 126 - .../css_2400_system/hrt/irq_controller_defs.h | 28 - .../css_2400_system/hrt/isp2400_mamoiada_params.h | 254 - .../css2400/css_2400_system/hrt/isp2400_support.h | 38 - .../css_2400_system/hrt/isp_acquisition_defs.h | 234 - .../css2400/css_2400_system/hrt/isp_capture_defs.h | 310 - .../css2400/css_2400_system/hrt/mmu_defs.h | 23 - .../hrt/scalar_processor_2400_params.h | 20 - .../css2400/css_2400_system/hrt/str2mem_defs.h | 39 - .../css_2400_system/hrt/streaming_to_mipi_defs.h | 28 - .../css_2400_system/hrt/timed_controller_defs.h | 22 - .../pci/atomisp2/css2400/css_2400_system/hrt/var.h | 74 - .../atomisp2/css2400/css_2400_system/hrt/version.h | 20 - .../atomisp2/css2400/css_2400_system/spmem_dump.c | 3634 ------ .../css2400/css_2401_csi2p_system/csi_rx_global.h | 63 - .../ia_css_isp_configs.c | 360 - .../ia_css_isp_configs.h | 189 - .../ia_css_isp_params.c | 3220 ------ .../ia_css_isp_params.h | 399 - .../ia_css_isp_states.c | 214 - .../ia_css_isp_states.h | 72 - .../css2400/css_2401_csi2p_system/host/csi_rx.c | 41 - .../css_2401_csi2p_system/host/csi_rx_local.h | 61 - .../css_2401_csi2p_system/host/csi_rx_private.h | 282 - .../css2400/css_2401_csi2p_system/host/ibuf_ctrl.c | 22 - .../css_2401_csi2p_system/host/ibuf_ctrl_local.h | 58 - .../css_2401_csi2p_system/host/ibuf_ctrl_private.h | 233 - .../host/input_system_local.h | 106 - .../host/input_system_private.h | 128 - .../css2400/css_2401_csi2p_system/host/isys_dma.c | 40 - .../css_2401_csi2p_system/host/isys_dma_local.h | 20 - .../css_2401_csi2p_system/host/isys_dma_private.h | 60 - .../css2400/css_2401_csi2p_system/host/isys_irq.c | 39 - .../css_2401_csi2p_system/host/isys_irq_local.h | 35 - .../css_2401_csi2p_system/host/isys_irq_private.h | 108 - .../css_2401_csi2p_system/host/isys_stream2mmio.c | 21 - .../host/isys_stream2mmio_local.h | 36 - .../host/isys_stream2mmio_private.h | 168 - .../css_2401_csi2p_system/host/pixelgen_local.h | 50 - .../css_2401_csi2p_system/host/pixelgen_private.h | 164 - .../css_2401_csi2p_system/host/system_local.h | 366 - .../hrt/PixelGen_SysBlock_defs.h | 126 - .../css2400/css_2401_csi2p_system/hrt/bits.h | 104 - .../css_2401_csi2p_system/hrt/cell_params.h | 42 - .../hrt/css_receiver_2400_common_defs.h | 200 - .../hrt/css_receiver_2400_defs.h | 258 - .../css2400/css_2401_csi2p_system/hrt/defs.h | 36 - .../css_2401_csi2p_system/hrt/dma_v2_defs.h | 199 - .../css_2401_csi2p_system/hrt/gdc_v2_defs.h | 170 - .../css_2401_csi2p_system/hrt/gp_timer_defs.h | 36 - .../css_2401_csi2p_system/hrt/gpio_block_defs.h | 42 - .../hrt/hive_isp_css_2401_irq_types_hrt.h | 68 - .../css_2401_csi2p_system/hrt/hive_isp_css_defs.h | 435 - .../hrt/hive_isp_css_host_ids_hrt.h | 119 - .../hrt/hive_isp_css_streaming_to_mipi_types_hrt.h | 26 - .../css2400/css_2401_csi2p_system/hrt/hive_types.h | 128 - .../css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h | 138 - .../css2400/css_2401_csi2p_system/hrt/if_defs.h | 22 - .../hrt/input_formatter_subsystem_defs.h | 53 - .../hrt/input_selector_defs.h | 89 - .../hrt/input_switch_2400_defs.h | 30 - .../hrt/input_system_ctrl_defs.h | 254 - .../css_2401_csi2p_system/hrt/input_system_defs.h | 126 - .../hrt/irq_controller_defs.h | 28 - .../css_2401_csi2p_system/hrt/isp2400_support.h | 38 - .../hrt/isp2401_mamoiada_params.h | 258 - .../hrt/isp_acquisition_defs.h | 234 - .../css_2401_csi2p_system/hrt/isp_capture_defs.h | 310 - .../hrt/mipi_backend_common_defs.h | 210 - .../css_2401_csi2p_system/hrt/mipi_backend_defs.h | 215 - .../css2400/css_2401_csi2p_system/hrt/mmu_defs.h | 23 - .../css_2401_csi2p_system/hrt/rx_csi_defs.h | 175 - .../hrt/scalar_processor_2400_params.h | 20 - .../css_2401_csi2p_system/hrt/str2mem_defs.h | 39 - .../css_2401_csi2p_system/hrt/stream2mmio_defs.h | 71 - .../hrt/streaming_to_mipi_defs.h | 28 - .../hrt/timed_controller_defs.h | 22 - .../css2400/css_2401_csi2p_system/hrt/var.h | 99 - .../css2400/css_2401_csi2p_system/hrt/version.h | 20 - .../css_2401_csi2p_system/ibuf_ctrl_global.h | 80 - .../css_2401_csi2p_system/input_system_global.h | 206 - .../css_2401_csi2p_system/isys_dma_global.h | 87 - .../css_2401_csi2p_system/isys_irq_global.h | 35 - .../isys_stream2mmio_global.h | 39 - .../css_2401_csi2p_system/pixelgen_global.h | 91 - .../css2400/css_2401_csi2p_system/spmem_dump.c | 3686 ------ .../css2400/css_2401_csi2p_system/system_global.h | 458 - .../ia_css_isp_configs.c | 360 - .../ia_css_isp_configs.h | 189 - .../ia_css_isp_params.c | 3220 ------ .../ia_css_isp_params.h | 399 - .../ia_css_isp_states.c | 214 - .../ia_css_isp_states.h | 72 - .../atomisp2/css2400/css_2401_system/hrt/bits.h | 104 - .../css2400/css_2401_system/hrt/cell_params.h | 42 - .../hrt/css_receiver_2400_common_defs.h | 200 - .../css_2401_system/hrt/css_receiver_2400_defs.h | 258 - .../atomisp2/css2400/css_2401_system/hrt/defs.h | 36 - .../css2400/css_2401_system/hrt/dma_v2_defs.h | 199 - .../css2400/css_2401_system/hrt/gdc_v2_defs.h | 170 - .../css2400/css_2401_system/hrt/gp_timer_defs.h | 36 - .../css2400/css_2401_system/hrt/gpio_block_defs.h | 42 - .../hrt/hive_isp_css_2401_irq_types_hrt.h | 69 - .../css_2401_system/hrt/hive_isp_css_defs.h | 435 - .../hrt/hive_isp_css_host_ids_hrt.h | 119 - .../hrt/hive_isp_css_streaming_to_mipi_types_hrt.h | 26 - .../css2400/css_2401_system/hrt/hive_types.h | 128 - .../atomisp2/css2400/css_2401_system/hrt/if_defs.h | 22 - .../hrt/input_formatter_subsystem_defs.h | 53 - .../css_2401_system/hrt/input_selector_defs.h | 89 - .../css_2401_system/hrt/input_switch_2400_defs.h | 30 - .../css_2401_system/hrt/input_system_ctrl_defs.h | 254 - .../css_2401_system/hrt/input_system_defs.h | 126 - .../css_2401_system/hrt/irq_controller_defs.h | 28 - .../css2400/css_2401_system/hrt/isp2400_support.h | 38 - .../css_2401_system/hrt/isp2401_mamoiada_params.h | 258 - .../css_2401_system/hrt/isp_acquisition_defs.h | 234 - .../css2400/css_2401_system/hrt/isp_capture_defs.h | 310 - .../css2400/css_2401_system/hrt/mmu_defs.h | 23 - .../hrt/scalar_processor_2400_params.h | 20 - .../css2400/css_2401_system/hrt/str2mem_defs.h | 39 - .../css_2401_system/hrt/streaming_to_mipi_defs.h | 28 - .../css_2401_system/hrt/timed_controller_defs.h | 22 - .../pci/atomisp2/css2400/css_2401_system/hrt/var.h | 99 - .../atomisp2/css2400/css_2401_system/hrt/version.h | 20 - .../atomisp2/css2400/css_2401_system/spmem_dump.c | 3634 ------ .../media/atomisp/pci/atomisp2/css2400/css_trace.h | 388 - .../css2400/hive_isp_css_common/debug_global.h | 83 - .../css2400/hive_isp_css_common/dma_global.h | 255 - .../hive_isp_css_common/event_fifo_global.h | 20 - .../hive_isp_css_common/fifo_monitor_global.h | 32 - .../css2400/hive_isp_css_common/gdc_global.h | 90 - .../css2400/hive_isp_css_common/gp_device_global.h | 85 - .../css2400/hive_isp_css_common/gp_timer_global.h | 33 - .../css2400/hive_isp_css_common/gpio_global.h | 45 - .../css2400/hive_isp_css_common/hmem_global.h | 45 - .../css2400/hive_isp_css_common/host/debug.c | 72 - .../css2400/hive_isp_css_common/host/debug_local.h | 21 - .../hive_isp_css_common/host/debug_private.h | 99 - .../css2400/hive_isp_css_common/host/dma.c | 299 - .../css2400/hive_isp_css_common/host/dma_local.h | 207 - .../css2400/hive_isp_css_common/host/dma_private.h | 41 - .../css2400/hive_isp_css_common/host/event_fifo.c | 19 - .../hive_isp_css_common/host/event_fifo_local.h | 57 - .../hive_isp_css_common/host/event_fifo_private.h | 75 - .../hive_isp_css_common/host/fifo_monitor.c | 567 - .../hive_isp_css_common/host/fifo_monitor_local.h | 99 - .../host/fifo_monitor_private.h | 79 - .../css2400/hive_isp_css_common/host/gdc.c | 127 - .../css2400/hive_isp_css_common/host/gdc_local.h | 20 - .../css2400/hive_isp_css_common/host/gdc_private.h | 20 - .../css2400/hive_isp_css_common/host/gp_device.c | 108 - .../hive_isp_css_common/host/gp_device_local.h | 143 - .../hive_isp_css_common/host/gp_device_private.h | 46 - .../css2400/hive_isp_css_common/host/gp_timer.c | 70 - .../hive_isp_css_common/host/gp_timer_local.h | 45 - .../hive_isp_css_common/host/gp_timer_private.h | 22 - .../css2400/hive_isp_css_common/host/gpio_local.h | 20 - .../hive_isp_css_common/host/gpio_private.h | 44 - .../css2400/hive_isp_css_common/host/hmem.c | 19 - .../css2400/hive_isp_css_common/host/hmem_local.h | 20 - .../hive_isp_css_common/host/hmem_private.h | 30 - .../hive_isp_css_common/host/input_formatter.c | 228 - .../host/input_formatter_local.h | 120 - .../host/input_formatter_private.h | 46 - .../hive_isp_css_common/host/input_system.c | 1823 --- .../hive_isp_css_common/host/input_system_local.h | 533 - .../host/input_system_private.h | 116 - .../css2400/hive_isp_css_common/host/irq.c | 448 - .../css2400/hive_isp_css_common/host/irq_local.h | 136 - .../css2400/hive_isp_css_common/host/irq_private.h | 44 - .../css2400/hive_isp_css_common/host/isp.c | 129 - .../css2400/hive_isp_css_common/host/isp_local.h | 57 - .../css2400/hive_isp_css_common/host/isp_private.h | 157 - .../css2400/hive_isp_css_common/host/mmu.c | 46 - .../css2400/hive_isp_css_common/host/mmu_local.h | 20 - .../atomisp2/css2400/hive_isp_css_common/host/sp.c | 81 - .../css2400/hive_isp_css_common/host/sp_local.h | 101 - .../css2400/hive_isp_css_common/host/sp_private.h | 163 - .../hive_isp_css_common/host/system_local.h | 291 - .../css2400/hive_isp_css_common/host/timed_ctrl.c | 74 - .../hive_isp_css_common/host/timed_ctrl_local.h | 20 - .../hive_isp_css_common/host/timed_ctrl_private.h | 34 - .../css2400/hive_isp_css_common/host/vamem_local.h | 20 - .../hive_isp_css_common/host/vamem_private.h | 37 - .../css2400/hive_isp_css_common/host/vmem.c | 258 - .../css2400/hive_isp_css_common/host/vmem_local.h | 55 - .../hive_isp_css_common/host/vmem_private.h | 20 - .../hive_isp_css_common/input_formatter_global.h | 114 - .../hive_isp_css_common/input_system_global.h | 155 - .../css2400/hive_isp_css_common/irq_global.h | 45 - .../css2400/hive_isp_css_common/isp_global.h | 115 - .../css2400/hive_isp_css_common/mmu_global.h | 22 - .../css2400/hive_isp_css_common/sp_global.h | 93 - .../css2400/hive_isp_css_common/system_global.h | 348 - .../hive_isp_css_common/timed_ctrl_global.h | 56 - .../css2400/hive_isp_css_common/vamem_global.h | 34 - .../css2400/hive_isp_css_common/vmem_global.h | 28 - .../css2400/hive_isp_css_include/assert_support.h | 102 - .../css2400/hive_isp_css_include/bitop_support.h | 25 - .../atomisp2/css2400/hive_isp_css_include/csi_rx.h | 43 - .../atomisp2/css2400/hive_isp_css_include/debug.h | 47 - .../device_access/device_access.h | 194 - .../atomisp2/css2400/hive_isp_css_include/dma.h | 47 - .../css2400/hive_isp_css_include/error_support.h | 70 - .../css2400/hive_isp_css_include/event_fifo.h | 46 - .../css2400/hive_isp_css_include/fifo_monitor.h | 46 - .../css2400/hive_isp_css_include/gdc_device.h | 48 - .../css2400/hive_isp_css_include/gp_device.h | 46 - .../css2400/hive_isp_css_include/gp_timer.h | 46 - .../atomisp2/css2400/hive_isp_css_include/gpio.h | 46 - .../atomisp2/css2400/hive_isp_css_include/hmem.h | 46 - .../hive_isp_css_include/host/csi_rx_public.h | 135 - .../hive_isp_css_include/host/debug_public.h | 99 - .../css2400/hive_isp_css_include/host/dma_public.h | 73 - .../hive_isp_css_include/host/event_fifo_public.h | 79 - .../host/fifo_monitor_public.h | 110 - .../css2400/hive_isp_css_include/host/gdc_public.h | 59 - .../hive_isp_css_include/host/gp_device_public.h | 58 - .../hive_isp_css_include/host/gp_timer_public.h | 34 - .../hive_isp_css_include/host/gpio_public.h | 45 - .../hive_isp_css_include/host/hmem_public.h | 32 - .../hive_isp_css_include/host/ibuf_ctrl_public.h | 93 - .../host/input_formatter_public.h | 115 - .../host/input_system_public.h | 376 - .../css2400/hive_isp_css_include/host/irq_public.h | 184 - .../css2400/hive_isp_css_include/host/isp_public.h | 186 - .../hive_isp_css_include/host/isys_dma_public.h | 38 - .../hive_isp_css_include/host/isys_irq_public.h | 45 - .../hive_isp_css_include/host/isys_public.h | 37 - .../host/isys_stream2mmio_public.h | 101 - .../css2400/hive_isp_css_include/host/mmu_public.h | 96 - .../hive_isp_css_include/host/pixelgen_public.h | 79 - .../css2400/hive_isp_css_include/host/sp_public.h | 223 - .../css2400/hive_isp_css_include/host/tag_public.h | 41 - .../hive_isp_css_include/host/timed_ctrl_public.h | 59 - .../hive_isp_css_include/host/vamem_public.h | 20 - .../hive_isp_css_include/host/vmem_public.h | 20 - .../css2400/hive_isp_css_include/ibuf_ctrl.h | 48 - .../css2400/hive_isp_css_include/input_formatter.h | 46 - .../css2400/hive_isp_css_include/input_system.h | 46 - .../atomisp2/css2400/hive_isp_css_include/irq.h | 46 - .../atomisp2/css2400/hive_isp_css_include/isp.h | 46 - .../css2400/hive_isp_css_include/isys_dma.h | 48 - .../css2400/hive_isp_css_include/isys_irq.h | 39 - .../hive_isp_css_include/isys_stream2mmio.h | 48 - .../css2400/hive_isp_css_include/math_support.h | 218 - .../memory_access/memory_access.h | 174 - .../css2400/hive_isp_css_include/memory_realloc.h | 38 - .../css2400/hive_isp_css_include/misc_support.h | 26 - .../css2400/hive_isp_css_include/mmu_device.h | 40 - .../css2400/hive_isp_css_include/pixelgen.h | 48 - .../hive_isp_css_include/platform_support.h | 41 - .../css2400/hive_isp_css_include/print_support.h | 41 - .../atomisp2/css2400/hive_isp_css_include/queue.h | 46 - .../css2400/hive_isp_css_include/resource.h | 47 - .../atomisp2/css2400/hive_isp_css_include/socket.h | 47 - .../pci/atomisp2/css2400/hive_isp_css_include/sp.h | 46 - .../css2400/hive_isp_css_include/string_support.h | 165 - .../css2400/hive_isp_css_include/system_types.h | 25 - .../atomisp2/css2400/hive_isp_css_include/tag.h | 45 - .../css2400/hive_isp_css_include/timed_ctrl.h | 46 - .../css2400/hive_isp_css_include/type_support.h | 40 - .../atomisp2/css2400/hive_isp_css_include/vamem.h | 46 - .../atomisp2/css2400/hive_isp_css_include/vmem.h | 46 - .../css2400/hive_isp_css_shared/host/queue_local.h | 20 - .../hive_isp_css_shared/host/queue_private.h | 18 - .../css2400/hive_isp_css_shared/host/tag.c | 95 - .../css2400/hive_isp_css_shared/host/tag_local.h | 22 - .../css2400/hive_isp_css_shared/host/tag_private.h | 18 - .../css2400/hive_isp_css_shared/queue_global.h | 19 - .../css2400/hive_isp_css_shared/sw_event_global.h | 36 - .../css2400/hive_isp_css_shared/tag_global.h | 56 - .../media/atomisp/pci/atomisp2/css2400/ia_css.h | 57 - .../media/atomisp/pci/atomisp2/css2400/ia_css_3a.h | 188 - .../pci/atomisp2/css2400/ia_css_acc_types.h | 468 - .../atomisp/pci/atomisp2/css2400/ia_css_buffer.h | 84 - .../atomisp/pci/atomisp2/css2400/ia_css_control.h | 157 - .../pci/atomisp2/css2400/ia_css_device_access.c | 95 - .../pci/atomisp2/css2400/ia_css_device_access.h | 59 - .../atomisp/pci/atomisp2/css2400/ia_css_dvs.h | 299 - .../atomisp/pci/atomisp2/css2400/ia_css_env.h | 94 - .../atomisp/pci/atomisp2/css2400/ia_css_err.h | 63 - .../pci/atomisp2/css2400/ia_css_event_public.h | 196 - .../atomisp/pci/atomisp2/css2400/ia_css_firmware.h | 74 - .../atomisp/pci/atomisp2/css2400/ia_css_frac.h | 37 - .../pci/atomisp2/css2400/ia_css_frame_format.h | 101 - .../pci/atomisp2/css2400/ia_css_frame_public.h | 352 - .../pci/atomisp2/css2400/ia_css_host_data.h | 46 - .../pci/atomisp2/css2400/ia_css_input_port.h | 60 - .../atomisp/pci/atomisp2/css2400/ia_css_irq.h | 235 - .../pci/atomisp2/css2400/ia_css_memory_access.c | 83 - .../atomisp/pci/atomisp2/css2400/ia_css_metadata.h | 71 - .../atomisp/pci/atomisp2/css2400/ia_css_mipi.h | 82 - .../atomisp/pci/atomisp2/css2400/ia_css_mmu.h | 32 - .../pci/atomisp2/css2400/ia_css_mmu_private.h | 29 - .../atomisp/pci/atomisp2/css2400/ia_css_morph.h | 39 - .../atomisp/pci/atomisp2/css2400/ia_css_pipe.h | 195 - .../pci/atomisp2/css2400/ia_css_pipe_public.h | 579 - .../atomisp/pci/atomisp2/css2400/ia_css_prbs.h | 53 - .../pci/atomisp2/css2400/ia_css_properties.h | 41 - .../atomisp/pci/atomisp2/css2400/ia_css_shading.h | 40 - .../atomisp/pci/atomisp2/css2400/ia_css_stream.h | 110 - .../pci/atomisp2/css2400/ia_css_stream_format.h | 29 - .../pci/atomisp2/css2400/ia_css_stream_public.h | 582 - .../atomisp/pci/atomisp2/css2400/ia_css_timer.h | 84 - .../atomisp/pci/atomisp2/css2400/ia_css_tpg.h | 78 - .../atomisp/pci/atomisp2/css2400/ia_css_types.h | 616 - .../atomisp/pci/atomisp2/css2400/ia_css_version.h | 40 - .../pci/atomisp2/css2400/ia_css_version_data.h | 33 - .../css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c | 32 - .../css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h | 27 - .../css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h | 24 - .../css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h | 48 - .../isp/kernels/anr/anr_1.0/ia_css_anr.host.c | 60 - .../isp/kernels/anr/anr_1.0/ia_css_anr.host.h | 39 - .../isp/kernels/anr/anr_1.0/ia_css_anr_param.h | 25 - .../isp/kernels/anr/anr_1.0/ia_css_anr_types.h | 36 - .../isp/kernels/anr/anr_2/ia_css_anr2.host.c | 46 - .../isp/kernels/anr/anr_2/ia_css_anr2.host.h | 35 - .../isp/kernels/anr/anr_2/ia_css_anr2_table.host.c | 52 - .../isp/kernels/anr/anr_2/ia_css_anr2_table.host.h | 22 - .../isp/kernels/anr/anr_2/ia_css_anr2_types.h | 32 - .../isp/kernels/anr/anr_2/ia_css_anr_param.h | 27 - .../css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c | 66 - .../css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h | 32 - .../css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h | 40 - .../css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h | 37 - .../css2400/isp/kernels/bnlm/ia_css_bnlm.host.c | 183 - .../css2400/isp/kernels/bnlm/ia_css_bnlm.host.h | 40 - .../css2400/isp/kernels/bnlm/ia_css_bnlm_param.h | 63 - .../css2400/isp/kernels/bnlm/ia_css_bnlm_types.h | 106 - .../isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c | 122 - .../isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h | 35 - .../isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h | 47 - .../isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h | 71 - .../isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c | 64 - .../isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h | 34 - .../isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h | 30 - .../isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c | 28 - .../isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h | 25 - .../isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h | 24 - .../isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c | 76 - .../isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h | 43 - .../isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h | 32 - .../isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h | 55 - .../isp/kernels/cnr/cnr_2/ia_css_cnr_param.h | 20 - .../conversion_1.0/ia_css_conversion.host.c | 36 - .../conversion_1.0/ia_css_conversion.host.h | 33 - .../conversion_1.0/ia_css_conversion_param.h | 28 - .../conversion_1.0/ia_css_conversion_types.h | 32 - .../copy_output_1.0/ia_css_copy_output.host.c | 47 - .../copy_output_1.0/ia_css_copy_output.host.h | 34 - .../copy_output_1.0/ia_css_copy_output_param.h | 26 - .../isp/kernels/crop/crop_1.0/ia_css_crop.host.c | 64 - .../isp/kernels/crop/crop_1.0/ia_css_crop.host.h | 41 - .../isp/kernels/crop/crop_1.0/ia_css_crop_param.h | 32 - .../isp/kernels/crop/crop_1.0/ia_css_crop_types.h | 35 - .../isp/kernels/csc/csc_1.0/ia_css_csc.host.c | 132 - .../isp/kernels/csc/csc_1.0/ia_css_csc.host.h | 54 - .../isp/kernels/csc/csc_1.0/ia_css_csc_param.h | 34 - .../isp/kernels/csc/csc_1.0/ia_css_csc_types.h | 78 - .../isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c | 120 - .../isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h | 33 - .../isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h | 46 - .../isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h | 20 - .../isp/kernels/ctc/ctc2/ia_css_ctc2.host.c | 156 - .../isp/kernels/ctc/ctc2/ia_css_ctc2.host.h | 33 - .../isp/kernels/ctc/ctc2/ia_css_ctc2_param.h | 49 - .../isp/kernels/ctc/ctc2/ia_css_ctc2_types.h | 55 - .../isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c | 63 - .../isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h | 36 - .../isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h | 44 - .../kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c | 215 - .../kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h | 24 - .../isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h | 110 - .../css2400/isp/kernels/de/de_1.0/ia_css_de.host.c | 79 - .../css2400/isp/kernels/de/de_1.0/ia_css_de.host.h | 44 - .../isp/kernels/de/de_1.0/ia_css_de_param.h | 27 - .../isp/kernels/de/de_1.0/ia_css_de_state.h | 26 - .../isp/kernels/de/de_1.0/ia_css_de_types.h | 43 - .../css2400/isp/kernels/de/de_2/ia_css_de2.host.c | 54 - .../css2400/isp/kernels/de/de_2/ia_css_de2.host.h | 38 - .../css2400/isp/kernels/de/de_2/ia_css_de2_param.h | 30 - .../css2400/isp/kernels/de/de_2/ia_css_de2_types.h | 42 - .../css2400/isp/kernels/de/de_2/ia_css_de_param.h | 20 - .../css2400/isp/kernels/de/de_2/ia_css_de_state.h | 21 - .../css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c | 132 - .../css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h | 47 - .../isp/kernels/dp/dp_1.0/ia_css_dp_param.h | 36 - .../isp/kernels/dp/dp_1.0/ia_css_dp_types.h | 50 - .../css2400/isp/kernels/dpc2/ia_css_dpc2.host.c | 65 - .../css2400/isp/kernels/dpc2/ia_css_dpc2.host.h | 39 - .../css2400/isp/kernels/dpc2/ia_css_dpc2_param.h | 53 - .../css2400/isp/kernels/dpc2/ia_css_dpc2_types.h | 59 - .../isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c | 306 - .../isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h | 60 - .../isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h | 39 - .../isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h | 30 - .../isp/kernels/eed1_8/ia_css_eed1_8.host.c | 329 - .../isp/kernels/eed1_8/ia_css_eed1_8.host.h | 45 - .../isp/kernels/eed1_8/ia_css_eed1_8_param.h | 154 - .../isp/kernels/eed1_8/ia_css_eed1_8_types.h | 86 - .../isp/kernels/fc/fc_1.0/ia_css_formats.host.c | 62 - .../isp/kernels/fc/fc_1.0/ia_css_formats.host.h | 45 - .../isp/kernels/fc/fc_1.0/ia_css_formats_param.h | 25 - .../isp/kernels/fc/fc_1.0/ia_css_formats_types.h | 38 - .../fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h | 33 - .../fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h | 26 - .../isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c | 89 - .../isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h | 44 - .../isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h | 35 - .../isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h | 52 - .../css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c | 118 - .../css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h | 65 - .../isp/kernels/gc/gc_1.0/ia_css_gc_param.h | 61 - .../isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c | 214 - .../isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h | 24 - .../isp/kernels/gc/gc_1.0/ia_css_gc_types.h | 97 - .../css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.c | 110 - .../css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h | 79 - .../css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h | 43 - .../isp/kernels/gc/gc_2/ia_css_gc2_table.host.c | 132 - .../isp/kernels/gc/gc_2/ia_css_gc2_table.host.h | 26 - .../css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h | 54 - .../css2400/isp/kernels/hdr/ia_css_hdr.host.c | 41 - .../css2400/isp/kernels/hdr/ia_css_hdr.host.h | 31 - .../css2400/isp/kernels/hdr/ia_css_hdr_param.h | 53 - .../css2400/isp/kernels/hdr/ia_css_hdr_types.h | 64 - .../io_ls/bayer_io_ls/ia_css_bayer_io.host.c | 86 - .../io_ls/bayer_io_ls/ia_css_bayer_io.host.h | 31 - .../io_ls/bayer_io_ls/ia_css_bayer_io_param.h | 22 - .../io_ls/bayer_io_ls/ia_css_bayer_io_types.h | 22 - .../kernels/io_ls/common/ia_css_common_io_param.h | 22 - .../kernels/io_ls/common/ia_css_common_io_types.h | 31 - .../io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h | 22 - .../io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h | 22 - .../ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c | 86 - .../ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h | 31 - .../ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h | 22 - .../ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h | 22 - .../ipu2_io_ls/common/ia_css_common_io_param.h | 22 - .../ipu2_io_ls/common/ia_css_common_io_types.h | 31 - .../yuv444_io_ls/ia_css_yuv444_io.host.c | 86 - .../yuv444_io_ls/ia_css_yuv444_io.host.h | 31 - .../yuv444_io_ls/ia_css_yuv444_io_param.h | 22 - .../yuv444_io_ls/ia_css_yuv444_io_types.h | 22 - .../iterator/iterator_1.0/ia_css_iterator.host.c | 80 - .../iterator/iterator_1.0/ia_css_iterator.host.h | 34 - .../iterator/iterator_1.0/ia_css_iterator_param.h | 38 - .../isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c | 74 - .../isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h | 41 - .../kernels/macc/macc1_5/ia_css_macc1_5_param.h | 31 - .../macc/macc1_5/ia_css_macc1_5_table.host.c | 32 - .../macc/macc1_5/ia_css_macc1_5_table.host.h | 22 - .../kernels/macc/macc1_5/ia_css_macc1_5_types.h | 74 - .../isp/kernels/macc/macc_1.0/ia_css_macc.host.c | 49 - .../isp/kernels/macc/macc_1.0/ia_css_macc.host.h | 42 - .../isp/kernels/macc/macc_1.0/ia_css_macc_param.h | 25 - .../kernels/macc/macc_1.0/ia_css_macc_table.host.c | 47 - .../kernels/macc/macc_1.0/ia_css_macc_table.host.h | 23 - .../isp/kernels/macc/macc_1.0/ia_css_macc_types.h | 63 - .../isp/kernels/norm/norm_1.0/ia_css_norm.host.c | 16 - .../isp/kernels/norm/norm_1.0/ia_css_norm.host.h | 20 - .../isp/kernels/norm/norm_1.0/ia_css_norm_param.h | 19 - .../css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c | 79 - .../css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h | 40 - .../css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h | 29 - .../css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h | 45 - .../css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c | 159 - .../css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h | 53 - .../isp/kernels/ob/ob_1.0/ia_css_ob_param.h | 48 - .../isp/kernels/ob/ob_1.0/ia_css_ob_types.h | 69 - .../kernels/output/output_1.0/ia_css_output.host.c | 162 - .../kernels/output/output_1.0/ia_css_output.host.h | 75 - .../output/output_1.0/ia_css_output_param.h | 36 - .../output/output_1.0/ia_css_output_types.h | 48 - .../kernels/qplane/qplane_2/ia_css_qplane.host.c | 61 - .../kernels/qplane/qplane_2/ia_css_qplane.host.h | 43 - .../kernels/qplane/qplane_2/ia_css_qplane_param.h | 30 - .../kernels/qplane/qplane_2/ia_css_qplane_types.h | 33 - .../isp/kernels/raw/raw_1.0/ia_css_raw.host.c | 136 - .../isp/kernels/raw/raw_1.0/ia_css_raw.host.h | 38 - .../isp/kernels/raw/raw_1.0/ia_css_raw_param.h | 38 - .../isp/kernels/raw/raw_1.0/ia_css_raw_types.h | 37 - .../raw_aa_binning_1.0/ia_css_raa.host.c | 35 - .../raw_aa_binning_1.0/ia_css_raa.host.h | 27 - .../isp/kernels/ref/ref_1.0/ia_css_ref.host.c | 74 - .../isp/kernels/ref/ref_1.0/ia_css_ref.host.h | 41 - .../isp/kernels/ref/ref_1.0/ia_css_ref_param.h | 36 - .../isp/kernels/ref/ref_1.0/ia_css_ref_state.h | 26 - .../isp/kernels/ref/ref_1.0/ia_css_ref_types.h | 28 - .../isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c | 386 - .../isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h | 77 - .../isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h | 54 - .../isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h | 220 - .../css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c | 130 - .../css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h | 77 - .../isp/kernels/sc/sc_1.0/ia_css_sc_param.h | 71 - .../isp/kernels/sc/sc_1.0/ia_css_sc_types.h | 136 - .../kernels/sdis/common/ia_css_sdis_common.host.h | 99 - .../kernels/sdis/common/ia_css_sdis_common_types.h | 219 - .../isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c | 423 - .../isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h | 101 - .../isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h | 53 - .../isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c | 338 - .../isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h | 95 - .../isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h | 69 - .../isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c | 76 - .../isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h | 38 - .../isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h | 43 - .../isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h | 53 - .../isp/kernels/tnr/tnr3/ia_css_tnr3_types.h | 61 - .../isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c | 130 - .../isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h | 56 - .../isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h | 48 - .../isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h | 26 - .../isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h | 60 - .../isp/kernels/uds/uds_1.0/ia_css_uds_param.h | 31 - .../css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c | 140 - .../css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h | 47 - .../isp/kernels/vf/vf_1.0/ia_css_vf_param.h | 37 - .../isp/kernels/vf/vf_1.0/ia_css_vf_types.h | 32 - .../css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c | 89 - .../css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h | 39 - .../isp/kernels/wb/wb_1.0/ia_css_wb_param.h | 29 - .../isp/kernels/wb/wb_1.0/ia_css_wb_types.h | 47 - .../isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c | 66 - .../isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h | 47 - .../isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h | 51 - .../kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c | 81 - .../kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h | 22 - .../isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h | 71 - .../isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c | 265 - .../isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h | 42 - .../isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h | 96 - .../isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h | 98 - .../isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c | 219 - .../isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h | 60 - .../isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h | 49 - .../isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h | 26 - .../isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h | 81 - .../isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c | 125 - .../isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h | 56 - .../isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h | 45 - .../isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h | 94 - .../isp/kernels/ynr/ynr_2/ia_css_ynr_param.h | 20 - .../isp/kernels/ynr/ynr_2/ia_css_ynr_state.h | 21 - .../css2400/isp/modes/interface/input_buf.isp.h | 73 - .../css2400/isp/modes/interface/isp_const.h | 482 - .../css2400/isp/modes/interface/isp_types.h | 128 - .../atomisp/pci/atomisp2/css2400/memory_realloc.c | 81 - .../runtime/binary/interface/ia_css_binary.h | 257 - .../atomisp2/css2400/runtime/binary/src/binary.c | 1838 --- .../css2400/runtime/bufq/interface/ia_css_bufq.h | 197 - .../runtime/bufq/interface/ia_css_bufq_comm.h | 66 - .../pci/atomisp2/css2400/runtime/bufq/src/bufq.c | 589 - .../css2400/runtime/debug/interface/ia_css_debug.h | 509 - .../debug/interface/ia_css_debug_internal.h | 31 - .../runtime/debug/interface/ia_css_debug_pipe.h | 84 - .../css2400/runtime/debug/src/ia_css_debug.c | 3596 ------ .../css2400/runtime/event/interface/ia_css_event.h | 46 - .../pci/atomisp2/css2400/runtime/event/src/event.c | 126 - .../runtime/eventq/interface/ia_css_eventq.h | 69 - .../atomisp2/css2400/runtime/eventq/src/eventq.c | 77 - .../css2400/runtime/frame/interface/ia_css_frame.h | 180 - .../runtime/frame/interface/ia_css_frame_comm.h | 132 - .../pci/atomisp2/css2400/runtime/frame/src/frame.c | 1026 -- .../css2400/runtime/ifmtr/interface/ia_css_ifmtr.h | 49 - .../pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c | 569 - .../runtime/inputfifo/interface/ia_css_inputfifo.h | 69 - .../css2400/runtime/inputfifo/src/inputfifo.c | 613 - .../runtime/isp_param/interface/ia_css_isp_param.h | 118 - .../isp_param/interface/ia_css_isp_param_types.h | 98 - .../css2400/runtime/isp_param/src/isp_param.c | 227 - .../css2400/runtime/isys/interface/ia_css_isys.h | 201 - .../runtime/isys/interface/ia_css_isys_comm.h | 69 - .../css2400/runtime/isys/src/csi_rx_rmgr.c | 179 - .../css2400/runtime/isys/src/csi_rx_rmgr.h | 43 - .../css2400/runtime/isys/src/ibuf_ctrl_rmgr.c | 140 - .../css2400/runtime/isys/src/ibuf_ctrl_rmgr.h | 55 - .../css2400/runtime/isys/src/isys_dma_rmgr.c | 103 - .../css2400/runtime/isys/src/isys_dma_rmgr.h | 41 - .../atomisp2/css2400/runtime/isys/src/isys_init.c | 139 - .../runtime/isys/src/isys_stream2mmio_rmgr.c | 105 - .../runtime/isys/src/isys_stream2mmio_rmgr.h | 41 - .../pci/atomisp2/css2400/runtime/isys/src/rx.c | 607 - .../css2400/runtime/isys/src/virtual_isys.c | 898 -- .../css2400/runtime/isys/src/virtual_isys.h | 41 - .../runtime/pipeline/interface/ia_css_pipeline.h | 302 - .../pipeline/interface/ia_css_pipeline_common.h | 42 - .../css2400/runtime/pipeline/src/pipeline.c | 805 -- .../css2400/runtime/queue/interface/ia_css_queue.h | 192 - .../runtime/queue/interface/ia_css_queue_comm.h | 69 - .../pci/atomisp2/css2400/runtime/queue/src/queue.c | 412 - .../css2400/runtime/queue/src/queue_access.c | 192 - .../css2400/runtime/queue/src/queue_access.h | 101 - .../css2400/runtime/rmgr/interface/ia_css_rmgr.h | 88 - .../runtime/rmgr/interface/ia_css_rmgr_vbuf.h | 115 - .../pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c | 55 - .../atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c | 330 - .../runtime/spctrl/interface/ia_css_spctrl.h | 87 - .../runtime/spctrl/interface/ia_css_spctrl_comm.h | 61 - .../atomisp2/css2400/runtime/spctrl/src/spctrl.c | 193 - .../tagger/interface/ia_css_tagger_common.h | 59 - .../pci/atomisp2/css2400/runtime/timer/src/timer.c | 48 - .../media/atomisp/pci/atomisp2/css2400/sh_css.c | 11094 ------------------- .../atomisp/pci/atomisp2/css2400/sh_css_defs.h | 410 - .../atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h | 36 - .../atomisp/pci/atomisp2/css2400/sh_css_firmware.c | 315 - .../atomisp/pci/atomisp2/css2400/sh_css_firmware.h | 54 - .../atomisp/pci/atomisp2/css2400/sh_css_frac.h | 40 - .../pci/atomisp2/css2400/sh_css_host_data.c | 42 - .../atomisp/pci/atomisp2/css2400/sh_css_hrt.c | 84 - .../atomisp/pci/atomisp2/css2400/sh_css_hrt.h | 34 - .../atomisp/pci/atomisp2/css2400/sh_css_internal.h | 1089 -- .../atomisp/pci/atomisp2/css2400/sh_css_legacy.h | 77 - .../atomisp/pci/atomisp2/css2400/sh_css_metadata.c | 16 - .../atomisp/pci/atomisp2/css2400/sh_css_metrics.c | 176 - .../atomisp/pci/atomisp2/css2400/sh_css_metrics.h | 55 - .../atomisp/pci/atomisp2/css2400/sh_css_mipi.c | 749 -- .../atomisp/pci/atomisp2/css2400/sh_css_mipi.h | 49 - .../atomisp/pci/atomisp2/css2400/sh_css_mmu.c | 56 - .../atomisp/pci/atomisp2/css2400/sh_css_morph.c | 16 - .../pci/atomisp2/css2400/sh_css_param_dvs.c | 267 - .../pci/atomisp2/css2400/sh_css_param_dvs.h | 86 - .../pci/atomisp2/css2400/sh_css_param_shading.c | 417 - .../pci/atomisp2/css2400/sh_css_param_shading.h | 39 - .../atomisp/pci/atomisp2/css2400/sh_css_params.c | 5253 --------- .../atomisp/pci/atomisp2/css2400/sh_css_params.h | 188 - .../pci/atomisp2/css2400/sh_css_params_internal.h | 21 - .../atomisp/pci/atomisp2/css2400/sh_css_pipe.c | 16 - .../pci/atomisp2/css2400/sh_css_properties.c | 43 - .../atomisp/pci/atomisp2/css2400/sh_css_shading.c | 16 - .../media/atomisp/pci/atomisp2/css2400/sh_css_sp.c | 1799 --- .../media/atomisp/pci/atomisp2/css2400/sh_css_sp.h | 248 - .../atomisp/pci/atomisp2/css2400/sh_css_stream.c | 16 - .../pci/atomisp2/css2400/sh_css_stream_format.c | 76 - .../pci/atomisp2/css2400/sh_css_stream_format.h | 23 - .../atomisp/pci/atomisp2/css2400/sh_css_struct.h | 80 - .../atomisp/pci/atomisp2/css2400/sh_css_uds.h | 37 - .../atomisp/pci/atomisp2/css2400/sh_css_version.c | 30 - .../staging/media/atomisp/pci/atomisp2/hmm/hmm.c | 727 -- .../media/atomisp/pci/atomisp2/hmm/hmm_bo.c | 1528 --- .../atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c | 233 - .../atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c | 252 - .../media/atomisp/pci/atomisp2/hmm/hmm_vm.c | 212 - .../atomisp2/hrt/hive_isp_css_custom_host_hrt.h | 103 - .../atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c | 127 - .../atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h | 57 - .../media/atomisp/pci/atomisp2/include/hmm/hmm.h | 102 - .../atomisp/pci/atomisp2/include/hmm/hmm_bo.h | 319 - .../atomisp/pci/atomisp2/include/hmm/hmm_common.h | 96 - .../atomisp/pci/atomisp2/include/hmm/hmm_pool.h | 115 - .../atomisp/pci/atomisp2/include/hmm/hmm_vm.h | 64 - .../atomisp/pci/atomisp2/include/mmu/isp_mmu.h | 169 - .../pci/atomisp2/include/mmu/sh_mmu_mrfld.h | 24 - .../media/atomisp/pci/atomisp2/mmu/isp_mmu.c | 584 - .../media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c | 75 - drivers/staging/media/atomisp/platform/Makefile | 5 - .../media/atomisp/platform/intel-mid/Makefile | 4 - .../platform/intel-mid/atomisp_gmin_platform.c | 779 -- 767 files changed, 168056 deletions(-) delete mode 100644 drivers/staging/media/atomisp/Kconfig delete mode 100644 drivers/staging/media/atomisp/Makefile delete mode 100644 drivers/staging/media/atomisp/TODO delete mode 100644 drivers/staging/media/atomisp/i2c/Kconfig delete mode 100644 drivers/staging/media/atomisp/i2c/Makefile delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-gc0310.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-gc2235.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-lm3554.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-ov2680.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-ov2722.c delete mode 100644 drivers/staging/media/atomisp/i2c/gc0310.h delete mode 100644 drivers/staging/media/atomisp/i2c/gc2235.h delete mode 100644 drivers/staging/media/atomisp/i2c/mt9m114.h delete mode 100644 drivers/staging/media/atomisp/i2c/ov2680.h delete mode 100644 drivers/staging/media/atomisp/i2c/ov2722.h delete mode 100644 drivers/staging/media/atomisp/i2c/ov5693/Kconfig delete mode 100644 drivers/staging/media/atomisp/i2c/ov5693/Makefile delete mode 100644 drivers/staging/media/atomisp/i2c/ov5693/ad5823.h delete mode 100644 drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c delete mode 100644 drivers/staging/media/atomisp/i2c/ov5693/ov5693.h delete mode 100644 drivers/staging/media/atomisp/include/linux/atomisp.h delete mode 100644 drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h delete mode 100644 drivers/staging/media/atomisp/include/linux/atomisp_platform.h delete mode 100644 drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h delete mode 100644 drivers/staging/media/atomisp/include/media/lm3554.h delete mode 100644 drivers/staging/media/atomisp/pci/Kconfig delete mode 100644 drivers/staging/media/atomisp/pci/Makefile delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/Makefile delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_common_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_host_ids_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_common_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_host_ids_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_common_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_host_ids_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/mmu_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vmem_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_dma_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_irq_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/sp_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/tag_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/timed_ctrl_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vamem_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vmem_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c delete mode 100644 drivers/staging/media/atomisp/platform/Makefile delete mode 100644 drivers/staging/media/atomisp/platform/intel-mid/Makefile delete mode 100644 drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 49003f77cedd..bef6cbe7adc3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13272,13 +13272,6 @@ L: stable@vger.kernel.org S: Supported F: Documentation/process/stable-kernel-rules.rst -STAGING - ATOMISP DRIVER -M: Alan Cox -M: Sakari Ailus -L: linux-media@vger.kernel.org -S: Maintained -F: drivers/staging/media/atomisp/ - STAGING - COMEDI M: Ian Abbott M: H Hartley Sweeten diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 4c495a10025c..494f369695d7 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -19,8 +19,6 @@ menuconfig STAGING_MEDIA if STAGING_MEDIA && MEDIA_SUPPORT # Please keep them in alphabetic order -source "drivers/staging/media/atomisp/Kconfig" - source "drivers/staging/media/bcm2048/Kconfig" source "drivers/staging/media/davinci_vpfe/Kconfig" diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 61a5765cb98f..59c184c7cfa8 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -5,5 +5,4 @@ obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074/ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031/ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ -obj-$(CONFIG_INTEL_ATOMISP) += atomisp/ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/ diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig deleted file mode 100644 index 27f078749148..000000000000 --- a/drivers/staging/media/atomisp/Kconfig +++ /dev/null @@ -1,12 +0,0 @@ -menuconfig INTEL_ATOMISP - bool "Enable support to Intel MIPI camera drivers" - depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI - select COMMON_CLK - help - Enable support for the Intel ISP2 camera interfaces and MIPI - sensor drivers. - -if INTEL_ATOMISP -source "drivers/staging/media/atomisp/pci/Kconfig" -source "drivers/staging/media/atomisp/i2c/Kconfig" -endif diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile deleted file mode 100644 index 403fe5edff6d..000000000000 --- a/drivers/staging/media/atomisp/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# -# Makefile for camera drivers. -# -obj-$(CONFIG_INTEL_ATOMISP) += pci/ -obj-$(CONFIG_INTEL_ATOMISP) += i2c/ -obj-$(CONFIG_INTEL_ATOMISP) += platform/ diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO deleted file mode 100644 index 255ce3630c2a..000000000000 --- a/drivers/staging/media/atomisp/TODO +++ /dev/null @@ -1,74 +0,0 @@ -1. A single AtomISP driver needs to be implemented to support both BYT and - CHT platforms. The current driver is a mechanical and hand combined merge - of the two using an ifdef ISP2401 to select the CHT version, which at the - moment is not enabled. Eventually this should become a runtime if check, - but there are some quite tricky things that need sorting out before that - will be possible. - -2. The file structure needs to get tidied up to resemble a normal Linux - driver. - -3. Lots of the midlayer glue. unused code and abstraction needs removing. - -3. The sensor drivers read MIPI settings from EFI variables or default to the - settings hard-coded in the platform data file for different platforms. - This isn't ideal but may be hard to improve as this is how existing - platforms work. - -4. The sensor drivers use the regulator framework API. In the ideal world it - would be using ACPI but that's not how the existing devices work. - -5. The AtomISP driver includes some special IOCTLS (ATOMISP_IOC_XXXX_XXXX) - that may need some cleaning up. - -6. Correct Coding Style. Please don't send coding style patches for this - driver until the other work is done. - -7. The ISP code depends on the exact FW version. The version defined in - BYT: - drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c - static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458); - CHT: - drivers/staging/media/atomisp/pci/atomisp2/css/sh_css_firmware.c - static const char *release_version = STR(irci_ecr-master_20150911_0724); - - At some point we may need to round up a few driver versions and see if - there are any specific things that can be done to fold in support for - multiple firmware versions. - -8. Switch to V4L2 async API to set up sensor, lens and flash devices. - Control those devices using V4L2 sub-device API without custom - extensions. - -9. Switch to standard V4L2 sub-device API for sensor and lens. In - particular, the user space API needs to support V4L2 controls as - defined in the V4L2 spec and references to atomisp must be removed from - these drivers. - -10. Use LED flash API for flash LED drivers such as LM3554 (which already - has a LED class driver). - -11. Switch from videobuf1 to videobuf2. Videobuf1 is being removed! - -Limitations: - -1. To test the patches, you also need the ISP firmware - - for BYT:/lib/firmware/shisp_2400b0_v21.bin - for CHT:/lib/firmware/shisp_2401a0_v21.bin - - The firmware files will usually be found in /etc/firmware on an Android - device but can also be extracted from the upgrade kit if you've managed - to lose them somehow. - -2. Without a 3A libary the capture behaviour is not very good. To take a good - picture, you need tune ISP parameters by IOCTL functions or use a 3A libary - such as libxcam. - -3. The driver is intended to drive the PCI exposed versions of the device. - It will not detect those devices enumerated via ACPI as a field of the - i915 GPU driver. - -4. The driver supports only v2 of the IPU/Camera. It will not work with the - versions of the hardware in other SoCs. - diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig deleted file mode 100644 index f7f7177b9b37..000000000000 --- a/drivers/staging/media/atomisp/i2c/Kconfig +++ /dev/null @@ -1,86 +0,0 @@ -# -# Kconfig for sensor drivers -# - -source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig" - -config VIDEO_ATOMISP_OV2722 - tristate "OVT ov2722 sensor support" - depends on ACPI - depends on I2C && VIDEO_V4L2 - ---help--- - This is a Video4Linux2 sensor-level driver for the OVT - OV2722 raw camera. - - OVT is a 2M raw sensor. - - It currently only works with the atomisp driver. - -config VIDEO_ATOMISP_GC2235 - tristate "Galaxy gc2235 sensor support" - depends on ACPI - depends on I2C && VIDEO_V4L2 - ---help--- - This is a Video4Linux2 sensor-level driver for the OVT - GC2235 raw camera. - - GC2235 is a 2M raw sensor. - - It currently only works with the atomisp driver. - -config VIDEO_ATOMISP_MSRLIST_HELPER - tristate "Helper library to load, parse and apply large register lists." - depends on I2C - ---help--- - This is a helper library to be used from a sensor driver to load, parse - and apply large register lists. - - To compile this driver as a module, choose M here: the - module will be called libmsrlisthelper. - -config VIDEO_ATOMISP_MT9M114 - tristate "Aptina mt9m114 sensor support" - depends on ACPI - depends on I2C && VIDEO_V4L2 - ---help--- - This is a Video4Linux2 sensor-level driver for the Micron - mt9m114 1.3 Mpixel camera. - - mt9m114 is video camera sensor. - - It currently only works with the atomisp driver. - -config VIDEO_ATOMISP_GC0310 - tristate "GC0310 sensor support" - depends on ACPI - depends on I2C && VIDEO_V4L2 - ---help--- - This is a Video4Linux2 sensor-level driver for the Galaxycore - GC0310 0.3MP sensor. - -config VIDEO_ATOMISP_OV2680 - tristate "Omnivision OV2680 sensor support" - depends on ACPI - depends on I2C && VIDEO_V4L2 - ---help--- - This is a Video4Linux2 sensor-level driver for the Omnivision - OV2680 raw camera. - - ov2680 is a 2M raw sensor. - - It currently only works with the atomisp driver. - -# -# Kconfig for flash drivers -# - -config VIDEO_ATOMISP_LM3554 - tristate "LM3554 flash light driver" - depends on ACPI - depends on VIDEO_V4L2 && I2C - ---help--- - This is a Video4Linux2 sub-dev driver for the LM3554 - flash light driver. - - To compile this driver as a module, choose M here: the - module will be called lm3554 diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile deleted file mode 100644 index 8d022986e199..000000000000 --- a/drivers/staging/media/atomisp/i2c/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for sensor drivers -# - -obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/ -obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o -obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o -obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o -obj-$(CONFIG_VIDEO_ATOMISP_OV2680) += atomisp-ov2680.o -obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o - -obj-$(CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER) += atomisp-libmsrlisthelper.o - -# Makefile for flash drivers -# - -obj-$(CONFIG_VIDEO_ATOMISP_LM3554) += atomisp-lm3554.o diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c deleted file mode 100644 index 3b38cbccf294..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c +++ /dev/null @@ -1,1392 +0,0 @@ -/* - * Support for GalaxyCore GC0310 VGA camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../include/linux/atomisp_gmin_platform.h" - -#include "gc0310.h" - -/* i2c read/write stuff */ -static int gc0310_read_reg(struct i2c_client *client, - u16 data_length, u8 reg, u8 *val) -{ - int err; - struct i2c_msg msg[2]; - unsigned char data[1]; - - if (!client->adapter) { - dev_err(&client->dev, "%s error, no client->adapter\n", - __func__); - return -ENODEV; - } - - if (data_length != GC0310_8BIT) { - dev_err(&client->dev, "%s error, invalid data length\n", - __func__); - return -EINVAL; - } - - memset(msg, 0, sizeof(msg)); - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = I2C_MSG_LENGTH; - msg[0].buf = data; - - /* high byte goes out first */ - data[0] = (u8)(reg & 0xff); - - msg[1].addr = client->addr; - msg[1].len = data_length; - msg[1].flags = I2C_M_RD; - msg[1].buf = data; - - err = i2c_transfer(client->adapter, msg, 2); - if (err != 2) { - if (err >= 0) - err = -EIO; - dev_err(&client->dev, - "read from offset 0x%x error %d", reg, err); - return err; - } - - *val = 0; - /* high byte comes first */ - if (data_length == GC0310_8BIT) - *val = (u8)data[0]; - - return 0; -} - -static int gc0310_i2c_write(struct i2c_client *client, u16 len, u8 *data) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = len; - msg.buf = data; - ret = i2c_transfer(client->adapter, &msg, 1); - - return ret == num_msg ? 0 : -EIO; -} - -static int gc0310_write_reg(struct i2c_client *client, u16 data_length, - u8 reg, u8 val) -{ - int ret; - unsigned char data[2] = {0}; - u8 *wreg = (u8 *)data; - const u16 len = data_length + sizeof(u8); /* 8-bit address + data */ - - if (data_length != GC0310_8BIT) { - dev_err(&client->dev, - "%s error, invalid data_length\n", __func__); - return -EINVAL; - } - - /* high byte goes out first */ - *wreg = (u8)(reg & 0xff); - - if (data_length == GC0310_8BIT) - data[1] = (u8)(val); - - ret = gc0310_i2c_write(client, len, data); - if (ret) - dev_err(&client->dev, - "write error: wrote 0x%x to offset 0x%x error %d", - val, reg, ret); - - return ret; -} - -/* - * gc0310_write_reg_array - Initializes a list of GC0310 registers - * @client: i2c driver client structure - * @reglist: list of registers to be written - * - * This function initializes a list of registers. When consecutive addresses - * are found in a row on the list, this function creates a buffer and sends - * consecutive data in a single i2c_transfer(). - * - * __gc0310_flush_reg_array, __gc0310_buf_reg_array() and - * __gc0310_write_reg_is_consecutive() are internal functions to - * gc0310_write_reg_array_fast() and should be not used anywhere else. - * - */ - -static int __gc0310_flush_reg_array(struct i2c_client *client, - struct gc0310_write_ctrl *ctrl) -{ - u16 size; - - if (ctrl->index == 0) - return 0; - - size = sizeof(u8) + ctrl->index; /* 8-bit address + data */ - ctrl->buffer.addr = (u8)(ctrl->buffer.addr); - ctrl->index = 0; - - return gc0310_i2c_write(client, size, (u8 *)&ctrl->buffer); -} - -static int __gc0310_buf_reg_array(struct i2c_client *client, - struct gc0310_write_ctrl *ctrl, - const struct gc0310_reg *next) -{ - int size; - - switch (next->type) { - case GC0310_8BIT: - size = 1; - ctrl->buffer.data[ctrl->index] = (u8)next->val; - break; - default: - return -EINVAL; - } - - /* When first item is added, we need to store its starting address */ - if (ctrl->index == 0) - ctrl->buffer.addr = next->reg; - - ctrl->index += size; - - /* - * Buffer cannot guarantee free space for u32? Better flush it to avoid - * possible lack of memory for next item. - */ - if (ctrl->index + sizeof(u8) >= GC0310_MAX_WRITE_BUF_SIZE) - return __gc0310_flush_reg_array(client, ctrl); - - return 0; -} - -static int __gc0310_write_reg_is_consecutive(struct i2c_client *client, - struct gc0310_write_ctrl *ctrl, - const struct gc0310_reg *next) -{ - if (ctrl->index == 0) - return 1; - - return ctrl->buffer.addr + ctrl->index == next->reg; -} - -static int gc0310_write_reg_array(struct i2c_client *client, - const struct gc0310_reg *reglist) -{ - const struct gc0310_reg *next = reglist; - struct gc0310_write_ctrl ctrl; - int err; - - ctrl.index = 0; - for (; next->type != GC0310_TOK_TERM; next++) { - switch (next->type & GC0310_TOK_MASK) { - case GC0310_TOK_DELAY: - err = __gc0310_flush_reg_array(client, &ctrl); - if (err) - return err; - msleep(next->val); - break; - default: - /* - * If next address is not consecutive, data needs to be - * flushed before proceed. - */ - if (!__gc0310_write_reg_is_consecutive(client, &ctrl, - next)) { - err = __gc0310_flush_reg_array(client, &ctrl); - if (err) - return err; - } - err = __gc0310_buf_reg_array(client, &ctrl, next); - if (err) { - dev_err(&client->dev, "%s: write error, aborted\n", - __func__); - return err; - } - break; - } - } - - return __gc0310_flush_reg_array(client, &ctrl); -} -static int gc0310_g_focal(struct v4l2_subdev *sd, s32 *val) -{ - *val = (GC0310_FOCAL_LENGTH_NUM << 16) | GC0310_FOCAL_LENGTH_DEM; - return 0; -} - -static int gc0310_g_fnumber(struct v4l2_subdev *sd, s32 *val) -{ - /*const f number for imx*/ - *val = (GC0310_F_NUMBER_DEFAULT_NUM << 16) | GC0310_F_NUMBER_DEM; - return 0; -} - -static int gc0310_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) -{ - *val = (GC0310_F_NUMBER_DEFAULT_NUM << 24) | - (GC0310_F_NUMBER_DEM << 16) | - (GC0310_F_NUMBER_DEFAULT_NUM << 8) | GC0310_F_NUMBER_DEM; - return 0; -} - -static int gc0310_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - - *val = gc0310_res[dev->fmt_idx].bin_factor_x; - - return 0; -} - -static int gc0310_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - - *val = gc0310_res[dev->fmt_idx].bin_factor_y; - - return 0; -} - -static int gc0310_get_intg_factor(struct i2c_client *client, - struct camera_mipi_info *info, - const struct gc0310_resolution *res) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct atomisp_sensor_mode_data *buf = &info->data; - u16 val; - u8 reg_val; - int ret; - unsigned int hori_blanking; - unsigned int vert_blanking; - unsigned int sh_delay; - - if (!info) - return -EINVAL; - - /* pixel clock calculattion */ - dev->vt_pix_clk_freq_mhz = 14400000; // 16.8MHz - buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz; - pr_info("vt_pix_clk_freq_mhz=%d\n", buf->vt_pix_clk_freq_mhz); - - /* get integration time */ - buf->coarse_integration_time_min = GC0310_COARSE_INTG_TIME_MIN; - buf->coarse_integration_time_max_margin = - GC0310_COARSE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_min = GC0310_FINE_INTG_TIME_MIN; - buf->fine_integration_time_max_margin = - GC0310_FINE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_def = GC0310_FINE_INTG_TIME_MIN; - buf->read_mode = res->bin_mode; - - /* get the cropping and output resolution to ISP for this mode. */ - /* Getting crop_horizontal_start */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_H_CROP_START_H, ®_val); - if (ret) - return ret; - val = (reg_val & 0xFF) << 8; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_H_CROP_START_L, ®_val); - if (ret) - return ret; - buf->crop_horizontal_start = val | (reg_val & 0xFF); - pr_info("crop_horizontal_start=%d\n", buf->crop_horizontal_start); - - /* Getting crop_vertical_start */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_V_CROP_START_H, ®_val); - if (ret) - return ret; - val = (reg_val & 0xFF) << 8; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_V_CROP_START_L, ®_val); - if (ret) - return ret; - buf->crop_vertical_start = val | (reg_val & 0xFF); - pr_info("crop_vertical_start=%d\n", buf->crop_vertical_start); - - /* Getting output_width */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_H_OUTSIZE_H, ®_val); - if (ret) - return ret; - val = (reg_val & 0xFF) << 8; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_H_OUTSIZE_L, ®_val); - if (ret) - return ret; - buf->output_width = val | (reg_val & 0xFF); - pr_info("output_width=%d\n", buf->output_width); - - /* Getting output_height */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_V_OUTSIZE_H, ®_val); - if (ret) - return ret; - val = (reg_val & 0xFF) << 8; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_V_OUTSIZE_L, ®_val); - if (ret) - return ret; - buf->output_height = val | (reg_val & 0xFF); - pr_info("output_height=%d\n", buf->output_height); - - buf->crop_horizontal_end = buf->crop_horizontal_start + buf->output_width - 1; - buf->crop_vertical_end = buf->crop_vertical_start + buf->output_height - 1; - pr_info("crop_horizontal_end=%d\n", buf->crop_horizontal_end); - pr_info("crop_vertical_end=%d\n", buf->crop_vertical_end); - - /* Getting line_length_pck */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_H_BLANKING_H, ®_val); - if (ret) - return ret; - val = (reg_val & 0xFF) << 8; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_H_BLANKING_L, ®_val); - if (ret) - return ret; - hori_blanking = val | (reg_val & 0xFF); - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_SH_DELAY, ®_val); - if (ret) - return ret; - sh_delay = reg_val; - buf->line_length_pck = buf->output_width + hori_blanking + sh_delay + 4; - pr_info("hori_blanking=%d sh_delay=%d line_length_pck=%d\n", hori_blanking, sh_delay, buf->line_length_pck); - - /* Getting frame_length_lines */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_V_BLANKING_H, ®_val); - if (ret) - return ret; - val = (reg_val & 0xFF) << 8; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_V_BLANKING_L, ®_val); - if (ret) - return ret; - vert_blanking = val | (reg_val & 0xFF); - buf->frame_length_lines = buf->output_height + vert_blanking; - pr_info("vert_blanking=%d frame_length_lines=%d\n", vert_blanking, buf->frame_length_lines); - - buf->binning_factor_x = res->bin_factor_x ? - res->bin_factor_x : 1; - buf->binning_factor_y = res->bin_factor_y ? - res->bin_factor_y : 1; - return 0; -} - -static int gc0310_set_gain(struct v4l2_subdev *sd, int gain) - -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - u8 again, dgain; - - if (gain < 0x20) - gain = 0x20; - if (gain > 0x80) - gain = 0x80; - - if (gain >= 0x20 && gain < 0x40) { - again = 0x0; /* sqrt(2) */ - dgain = gain; - } else { - again = 0x2; /* 2 * sqrt(2) */ - dgain = gain / 2; - } - - pr_info("gain=0x%x again=0x%x dgain=0x%x\n", gain, again, dgain); - - /* set analog gain */ - ret = gc0310_write_reg(client, GC0310_8BIT, - GC0310_AGC_ADJ, again); - if (ret) - return ret; - - /* set digital gain */ - ret = gc0310_write_reg(client, GC0310_8BIT, - GC0310_DGC_ADJ, dgain); - if (ret) - return ret; - - return 0; -} - -static int __gc0310_set_exposure(struct v4l2_subdev *sd, int coarse_itg, - int gain, int digitgain) - -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - pr_info("coarse_itg=%d gain=%d digitgain=%d\n", coarse_itg, gain, digitgain); - - /* set exposure */ - ret = gc0310_write_reg(client, GC0310_8BIT, - GC0310_AEC_PK_EXPO_L, - coarse_itg & 0xff); - if (ret) - return ret; - - ret = gc0310_write_reg(client, GC0310_8BIT, - GC0310_AEC_PK_EXPO_H, - (coarse_itg >> 8) & 0x0f); - if (ret) - return ret; - - ret = gc0310_set_gain(sd, gain); - if (ret) - return ret; - - return ret; -} - -static int gc0310_set_exposure(struct v4l2_subdev *sd, int exposure, - int gain, int digitgain) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - int ret; - - mutex_lock(&dev->input_lock); - ret = __gc0310_set_exposure(sd, exposure, gain, digitgain); - mutex_unlock(&dev->input_lock); - - return ret; -} - -static long gc0310_s_exposure(struct v4l2_subdev *sd, - struct atomisp_exposure *exposure) -{ - int exp = exposure->integration_time[0]; - int gain = exposure->gain[0]; - int digitgain = exposure->gain[1]; - - /* we should not accept the invalid value below. */ - if (gain == 0) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - v4l2_err(client, "%s: invalid value\n", __func__); - return -EINVAL; - } - - return gc0310_set_exposure(sd, exp, gain, digitgain); -} - -/* TO DO */ -static int gc0310_v_flip(struct v4l2_subdev *sd, s32 value) -{ - return 0; -} - -/* TO DO */ -static int gc0310_h_flip(struct v4l2_subdev *sd, s32 value) -{ - return 0; -} - -static long gc0310_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - - switch (cmd) { - case ATOMISP_IOC_S_EXPOSURE: - return gc0310_s_exposure(sd, arg); - default: - return -EINVAL; - } - return 0; -} - -/* This returns the exposure time being used. This should only be used - * for filling in EXIF data, not for actual image processing. - */ -static int gc0310_q_exposure(struct v4l2_subdev *sd, s32 *value) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u8 reg_v; - int ret; - - /* get exposure */ - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_AEC_PK_EXPO_L, - ®_v); - if (ret) - goto err; - - *value = reg_v; - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_AEC_PK_EXPO_H, - ®_v); - if (ret) - goto err; - - *value = *value + (reg_v << 8); -err: - return ret; -} - -static int gc0310_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct gc0310_device *dev = - container_of(ctrl->handler, struct gc0310_device, ctrl_handler); - struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n", - __func__, ctrl->val); - ret = gc0310_v_flip(&dev->sd, ctrl->val); - break; - case V4L2_CID_HFLIP: - dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n", - __func__, ctrl->val); - ret = gc0310_h_flip(&dev->sd, ctrl->val); - break; - default: - ret = -EINVAL; - } - return ret; -} - -static int gc0310_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct gc0310_device *dev = - container_of(ctrl->handler, struct gc0310_device, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - ret = gc0310_q_exposure(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCAL_ABSOLUTE: - ret = gc0310_g_focal(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_ABSOLUTE: - ret = gc0310_g_fnumber(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_RANGE: - ret = gc0310_g_fnumber_range(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_HORZ: - ret = gc0310_g_bin_factor_x(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_VERT: - ret = gc0310_g_bin_factor_y(&dev->sd, &ctrl->val); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .s_ctrl = gc0310_s_ctrl, - .g_volatile_ctrl = gc0310_g_volatile_ctrl -}; - -static const struct v4l2_ctrl_config gc0310_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "exposure", - .min = 0x0, - .max = 0xffff, - .step = 0x01, - .def = 0x00, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_VFLIP, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Flip", - .min = 0, - .max = 1, - .step = 1, - .def = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_HFLIP, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Mirror", - .min = 0, - .max = 1, - .step = 1, - .def = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCAL_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focal length", - .min = GC0310_FOCAL_LENGTH_DEFAULT, - .max = GC0310_FOCAL_LENGTH_DEFAULT, - .step = 0x01, - .def = GC0310_FOCAL_LENGTH_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number", - .min = GC0310_F_NUMBER_DEFAULT, - .max = GC0310_F_NUMBER_DEFAULT, - .step = 0x01, - .def = GC0310_F_NUMBER_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_RANGE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number range", - .min = GC0310_F_NUMBER_RANGE, - .max = GC0310_F_NUMBER_RANGE, - .step = 0x01, - .def = GC0310_F_NUMBER_RANGE, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_HORZ, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "horizontal binning factor", - .min = 0, - .max = GC0310_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_VERT, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "vertical binning factor", - .min = 0, - .max = GC0310_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, -}; - -static int gc0310_init(struct v4l2_subdev *sd) -{ - int ret; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct gc0310_device *dev = to_gc0310_sensor(sd); - - pr_info("%s S\n", __func__); - mutex_lock(&dev->input_lock); - - /* set inital registers */ - ret = gc0310_write_reg_array(client, gc0310_reset_register); - - /* restore settings */ - gc0310_res = gc0310_res_preview; - N_RES = N_RES_PREVIEW; - - mutex_unlock(&dev->input_lock); - - pr_info("%s E\n", __func__); - return ret; -} - -static int power_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret = 0; - struct gc0310_device *dev = to_gc0310_sensor(sd); - if (!dev || !dev->platform_data) - return -ENODEV; - - if (flag) { - /* The upstream module driver (written to Crystal - * Cove) had this logic to pulse the rails low first. - * This appears to break things on the MRD7 with the - * X-Powers PMIC... - * - * ret = dev->platform_data->v1p8_ctrl(sd, 0); - * ret |= dev->platform_data->v2p8_ctrl(sd, 0); - * mdelay(50); - */ - ret |= dev->platform_data->v1p8_ctrl(sd, 1); - ret |= dev->platform_data->v2p8_ctrl(sd, 1); - usleep_range(10000, 15000); - } - - if (!flag || ret) { - ret |= dev->platform_data->v1p8_ctrl(sd, 0); - ret |= dev->platform_data->v2p8_ctrl(sd, 0); - } - return ret; -} - -static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret; - struct gc0310_device *dev = to_gc0310_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - /* GPIO0 == "reset" (active low), GPIO1 == "power down" */ - if (flag) { - /* Pulse reset, then release power down */ - ret = dev->platform_data->gpio0_ctrl(sd, 0); - usleep_range(5000, 10000); - ret |= dev->platform_data->gpio0_ctrl(sd, 1); - usleep_range(10000, 15000); - ret |= dev->platform_data->gpio1_ctrl(sd, 0); - usleep_range(10000, 15000); - } else { - ret = dev->platform_data->gpio1_ctrl(sd, 1); - ret |= dev->platform_data->gpio0_ctrl(sd, 0); - } - return ret; -} - - -static int power_down(struct v4l2_subdev *sd); - -static int power_up(struct v4l2_subdev *sd) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - pr_info("%s S\n", __func__); - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - /* power control */ - ret = power_ctrl(sd, 1); - if (ret) - goto fail_power; - - /* flis clock control */ - ret = dev->platform_data->flisclk_ctrl(sd, 1); - if (ret) - goto fail_clk; - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 1); - if (ret) { - ret = gpio_ctrl(sd, 1); - if (ret) - goto fail_gpio; - } - - msleep(100); - - pr_info("%s E\n", __func__); - return 0; - -fail_gpio: - dev->platform_data->flisclk_ctrl(sd, 0); -fail_clk: - power_ctrl(sd, 0); -fail_power: - dev_err(&client->dev, "sensor power-up failed\n"); - - return ret; -} - -static int power_down(struct v4l2_subdev *sd) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 0); - if (ret) { - ret = gpio_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "gpio failed 2\n"); - } - - ret = dev->platform_data->flisclk_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "flisclk failed\n"); - - /* power control */ - ret = power_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "vprog failed.\n"); - - return ret; -} - -static int gc0310_s_power(struct v4l2_subdev *sd, int on) -{ - int ret; - if (on == 0) - return power_down(sd); - else { - ret = power_up(sd); - if (!ret) - return gc0310_init(sd); - } - return ret; -} - -/* - * distance - calculate the distance - * @res: resolution - * @w: width - * @h: height - * - * Get the gap between resolution and w/h. - * res->width/height smaller than w/h wouldn't be considered. - * Returns the value of gap or -1 if fail. - */ -#define LARGEST_ALLOWED_RATIO_MISMATCH 800 -static int distance(struct gc0310_resolution *res, u32 w, u32 h) -{ - unsigned int w_ratio = (res->width << 13) / w; - unsigned int h_ratio; - int match; - - if (h == 0) - return -1; - h_ratio = (res->height << 13) / h; - if (h_ratio == 0) - return -1; - match = abs(((w_ratio << 13) / h_ratio) - ((int)8192)); - - if ((w_ratio < (int)8192) || (h_ratio < (int)8192) || - (match > LARGEST_ALLOWED_RATIO_MISMATCH)) - return -1; - - return w_ratio + h_ratio; -} - -/* Return the nearest higher resolution index */ -static int nearest_resolution_index(int w, int h) -{ - int i; - int idx = -1; - int dist; - int min_dist = INT_MAX; - struct gc0310_resolution *tmp_res = NULL; - - for (i = 0; i < N_RES; i++) { - tmp_res = &gc0310_res[i]; - dist = distance(tmp_res, w, h); - if (dist == -1) - continue; - if (dist < min_dist) { - min_dist = dist; - idx = i; - } - } - - return idx; -} - -static int get_resolution_index(int w, int h) -{ - int i; - - for (i = 0; i < N_RES; i++) { - if (w != gc0310_res[i].width) - continue; - if (h != gc0310_res[i].height) - continue; - - return i; - } - - return -1; -} - - -/* TODO: remove it. */ -static int startup(struct v4l2_subdev *sd) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - pr_info("%s S\n", __func__); - - ret = gc0310_write_reg_array(client, gc0310_res[dev->fmt_idx].regs); - if (ret) { - dev_err(&client->dev, "gc0310 write register err.\n"); - return ret; - } - - pr_info("%s E\n", __func__); - return ret; -} - -static int gc0310_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *gc0310_info = NULL; - int ret = 0; - int idx = 0; - pr_info("%s S\n", __func__); - - if (format->pad) - return -EINVAL; - - if (!fmt) - return -EINVAL; - - gc0310_info = v4l2_get_subdev_hostdata(sd); - if (!gc0310_info) - return -EINVAL; - - mutex_lock(&dev->input_lock); - - idx = nearest_resolution_index(fmt->width, fmt->height); - if (idx == -1) { - /* return the largest resolution */ - fmt->width = gc0310_res[N_RES - 1].width; - fmt->height = gc0310_res[N_RES - 1].height; - } else { - fmt->width = gc0310_res[idx].width; - fmt->height = gc0310_res[idx].height; - } - fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8; - - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); - return 0; - } - - dev->fmt_idx = get_resolution_index(fmt->width, fmt->height); - if (dev->fmt_idx == -1) { - dev_err(&client->dev, "get resolution fail\n"); - mutex_unlock(&dev->input_lock); - return -EINVAL; - } - - printk("%s: before gc0310_write_reg_array %s\n", __FUNCTION__, - gc0310_res[dev->fmt_idx].desc); - ret = startup(sd); - if (ret) { - dev_err(&client->dev, "gc0310 startup err\n"); - goto err; - } - - ret = gc0310_get_intg_factor(client, gc0310_info, - &gc0310_res[dev->fmt_idx]); - if (ret) { - dev_err(&client->dev, "failed to get integration_factor\n"); - goto err; - } - - pr_info("%s E\n", __func__); -err: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int gc0310_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct gc0310_device *dev = to_gc0310_sensor(sd); - - if (format->pad) - return -EINVAL; - - if (!fmt) - return -EINVAL; - - fmt->width = gc0310_res[dev->fmt_idx].width; - fmt->height = gc0310_res[dev->fmt_idx].height; - fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8; - - return 0; -} - -static int gc0310_detect(struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - u8 high, low; - int ret; - u16 id; - - pr_info("%s S\n", __func__); - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) - return -ENODEV; - - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_SC_CMMN_CHIP_ID_H, &high); - if (ret) { - dev_err(&client->dev, "read sensor_id_high failed\n"); - return -ENODEV; - } - ret = gc0310_read_reg(client, GC0310_8BIT, - GC0310_SC_CMMN_CHIP_ID_L, &low); - if (ret) { - dev_err(&client->dev, "read sensor_id_low failed\n"); - return -ENODEV; - } - id = ((((u16) high) << 8) | (u16) low); - pr_info("sensor ID = 0x%x\n", id); - - if (id != GC0310_ID) { - dev_err(&client->dev, "sensor ID error, read id = 0x%x, target id = 0x%x\n", id, GC0310_ID); - return -ENODEV; - } - - dev_dbg(&client->dev, "detect gc0310 success\n"); - - pr_info("%s E\n", __func__); - - return 0; -} - -static int gc0310_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - pr_info("%s S enable=%d\n", __func__, enable); - mutex_lock(&dev->input_lock); - - if (enable) { - /* enable per frame MIPI and sensor ctrl reset */ - ret = gc0310_write_reg(client, GC0310_8BIT, - 0xFE, 0x30); - if (ret) { - mutex_unlock(&dev->input_lock); - return ret; - } - } - - ret = gc0310_write_reg(client, GC0310_8BIT, - GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_3); - if (ret) { - mutex_unlock(&dev->input_lock); - return ret; - } - - ret = gc0310_write_reg(client, GC0310_8BIT, GC0310_SW_STREAM, - enable ? GC0310_START_STREAMING : - GC0310_STOP_STREAMING); - if (ret) { - mutex_unlock(&dev->input_lock); - return ret; - } - - ret = gc0310_write_reg(client, GC0310_8BIT, - GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_0); - if (ret) { - mutex_unlock(&dev->input_lock); - return ret; - } - - mutex_unlock(&dev->input_lock); - pr_info("%s E\n", __func__); - return ret; -} - - -static int gc0310_s_config(struct v4l2_subdev *sd, - int irq, void *platform_data) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - pr_info("%s S\n", __func__); - if (!platform_data) - return -ENODEV; - - dev->platform_data = - (struct camera_sensor_platform_data *)platform_data; - - mutex_lock(&dev->input_lock); - /* power off the module, then power on it in future - * as first power on by board may not fulfill the - * power on sequqence needed by the module - */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "gc0310 power-off err.\n"); - goto fail_power_off; - } - - ret = power_up(sd); - if (ret) { - dev_err(&client->dev, "gc0310 power-up err.\n"); - goto fail_power_on; - } - - ret = dev->platform_data->csi_cfg(sd, 1); - if (ret) - goto fail_csi_cfg; - - /* config & detect sensor */ - ret = gc0310_detect(client); - if (ret) { - dev_err(&client->dev, "gc0310_detect err s_config.\n"); - goto fail_csi_cfg; - } - - /* turn off sensor, after probed */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "gc0310 power-off err.\n"); - goto fail_csi_cfg; - } - mutex_unlock(&dev->input_lock); - - pr_info("%s E\n", __func__); - return 0; - -fail_csi_cfg: - dev->platform_data->csi_cfg(sd, 0); -fail_power_on: - power_down(sd); - dev_err(&client->dev, "sensor power-gating failed\n"); -fail_power_off: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int gc0310_g_frame_interval(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - - interval->interval.numerator = 1; - interval->interval.denominator = gc0310_res[dev->fmt_idx].fps; - - return 0; -} - -static int gc0310_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index >= MAX_FMTS) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_SGRBG8_1X8; - return 0; -} - -static int gc0310_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - int index = fse->index; - - if (index >= N_RES) - return -EINVAL; - - fse->min_width = gc0310_res[index].width; - fse->min_height = gc0310_res[index].height; - fse->max_width = gc0310_res[index].width; - fse->max_height = gc0310_res[index].height; - - return 0; - -} - - -static int gc0310_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) -{ - struct gc0310_device *dev = to_gc0310_sensor(sd); - - mutex_lock(&dev->input_lock); - *frames = gc0310_res[dev->fmt_idx].skip_frames; - mutex_unlock(&dev->input_lock); - - return 0; -} - -static const struct v4l2_subdev_sensor_ops gc0310_sensor_ops = { - .g_skip_frames = gc0310_g_skip_frames, -}; - -static const struct v4l2_subdev_video_ops gc0310_video_ops = { - .s_stream = gc0310_s_stream, - .g_frame_interval = gc0310_g_frame_interval, -}; - -static const struct v4l2_subdev_core_ops gc0310_core_ops = { - .s_power = gc0310_s_power, - .ioctl = gc0310_ioctl, -}; - -static const struct v4l2_subdev_pad_ops gc0310_pad_ops = { - .enum_mbus_code = gc0310_enum_mbus_code, - .enum_frame_size = gc0310_enum_frame_size, - .get_fmt = gc0310_get_fmt, - .set_fmt = gc0310_set_fmt, -}; - -static const struct v4l2_subdev_ops gc0310_ops = { - .core = &gc0310_core_ops, - .video = &gc0310_video_ops, - .pad = &gc0310_pad_ops, - .sensor = &gc0310_sensor_ops, -}; - -static int gc0310_remove(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct gc0310_device *dev = to_gc0310_sensor(sd); - dev_dbg(&client->dev, "gc0310_remove...\n"); - - dev->platform_data->csi_cfg(sd, 0); - - v4l2_device_unregister_subdev(sd); - media_entity_cleanup(&dev->sd.entity); - v4l2_ctrl_handler_free(&dev->ctrl_handler); - kfree(dev); - - return 0; -} - -static int gc0310_probe(struct i2c_client *client) -{ - struct gc0310_device *dev; - int ret; - void *pdata; - unsigned int i; - - pr_info("%s S\n", __func__); - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - mutex_init(&dev->input_lock); - - dev->fmt_idx = 0; - v4l2_i2c_subdev_init(&(dev->sd), client, &gc0310_ops); - - pdata = gmin_camera_platform_data(&dev->sd, - ATOMISP_INPUT_FORMAT_RAW_8, - atomisp_bayer_order_grbg); - if (!pdata) { - ret = -EINVAL; - goto out_free; - } - - ret = gc0310_s_config(&dev->sd, client->irq, pdata); - if (ret) - goto out_free; - - ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA); - if (ret) - goto out_free; - - dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dev->pad.flags = MEDIA_PAD_FL_SOURCE; - dev->format.code = MEDIA_BUS_FMT_SGRBG8_1X8; - dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; - ret = - v4l2_ctrl_handler_init(&dev->ctrl_handler, - ARRAY_SIZE(gc0310_controls)); - if (ret) { - gc0310_remove(client); - return ret; - } - - for (i = 0; i < ARRAY_SIZE(gc0310_controls); i++) - v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc0310_controls[i], - NULL); - - if (dev->ctrl_handler.error) { - gc0310_remove(client); - return dev->ctrl_handler.error; - } - - /* Use same lock for controls as for everything else. */ - dev->ctrl_handler.lock = &dev->input_lock; - dev->sd.ctrl_handler = &dev->ctrl_handler; - - ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); - if (ret) - gc0310_remove(client); - - pr_info("%s E\n", __func__); - return ret; -out_free: - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - return ret; -} - -static const struct acpi_device_id gc0310_acpi_match[] = { - {"XXGC0310"}, - {"INT0310"}, - {}, -}; -MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match); - -static struct i2c_driver gc0310_driver = { - .driver = { - .name = "gc0310", - .acpi_match_table = gc0310_acpi_match, - }, - .probe_new = gc0310_probe, - .remove = gc0310_remove, -}; -module_i2c_driver(gc0310_driver); - -MODULE_AUTHOR("Lai, Angie "); -MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c deleted file mode 100644 index 4b6b6568b3cf..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c +++ /dev/null @@ -1,1124 +0,0 @@ -/* - * Support for GalaxyCore GC2235 2M camera sensor. - * - * Copyright (c) 2014 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../include/linux/atomisp_gmin_platform.h" -#include -#include - -#include "gc2235.h" - -/* i2c read/write stuff */ -static int gc2235_read_reg(struct i2c_client *client, - u16 data_length, u16 reg, u16 *val) -{ - int err; - struct i2c_msg msg[2]; - unsigned char data[6]; - - if (!client->adapter) { - dev_err(&client->dev, "%s error, no client->adapter\n", - __func__); - return -ENODEV; - } - - if (data_length != GC2235_8BIT) { - dev_err(&client->dev, "%s error, invalid data length\n", - __func__); - return -EINVAL; - } - - memset(msg, 0, sizeof(msg)); - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = 1; - msg[0].buf = data; - - /* high byte goes out first */ - data[0] = (u8)(reg & 0xff); - - msg[1].addr = client->addr; - msg[1].len = data_length; - msg[1].flags = I2C_M_RD; - msg[1].buf = data; - - err = i2c_transfer(client->adapter, msg, 2); - if (err != 2) { - if (err >= 0) - err = -EIO; - dev_err(&client->dev, - "read from offset 0x%x error %d", reg, err); - return err; - } - - *val = 0; - /* high byte comes first */ - if (data_length == GC2235_8BIT) - *val = (u8)data[0]; - - return 0; -} - -static int gc2235_i2c_write(struct i2c_client *client, u16 len, u8 *data) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = len; - msg.buf = data; - ret = i2c_transfer(client->adapter, &msg, 1); - - return ret == num_msg ? 0 : -EIO; -} - -static int gc2235_write_reg(struct i2c_client *client, u16 data_length, - u8 reg, u8 val) -{ - int ret; - unsigned char data[4] = {0}; - const u16 len = data_length + sizeof(u8); /* 16-bit address + data */ - - if (data_length != GC2235_8BIT) { - dev_err(&client->dev, - "%s error, invalid data_length\n", __func__); - return -EINVAL; - } - - /* high byte goes out first */ - data[0] = reg; - data[1] = val; - - ret = gc2235_i2c_write(client, len, data); - if (ret) - dev_err(&client->dev, - "write error: wrote 0x%x to offset 0x%x error %d", - val, reg, ret); - - return ret; -} - -static int __gc2235_flush_reg_array(struct i2c_client *client, - struct gc2235_write_ctrl *ctrl) -{ - u16 size; - - if (ctrl->index == 0) - return 0; - - size = sizeof(u8) + ctrl->index; /* 8-bit address + data */ - ctrl->index = 0; - - return gc2235_i2c_write(client, size, (u8 *)&ctrl->buffer); -} - -static int __gc2235_buf_reg_array(struct i2c_client *client, - struct gc2235_write_ctrl *ctrl, - const struct gc2235_reg *next) -{ - int size; - - if (next->type != GC2235_8BIT) - return -EINVAL; - - size = 1; - ctrl->buffer.data[ctrl->index] = (u8)next->val; - - /* When first item is added, we need to store its starting address */ - if (ctrl->index == 0) - ctrl->buffer.addr = next->reg; - - ctrl->index += size; - - /* - * Buffer cannot guarantee free space for u32? Better flush it to avoid - * possible lack of memory for next item. - */ - if (ctrl->index + sizeof(u8) >= GC2235_MAX_WRITE_BUF_SIZE) - return __gc2235_flush_reg_array(client, ctrl); - - return 0; -} -static int __gc2235_write_reg_is_consecutive(struct i2c_client *client, - struct gc2235_write_ctrl *ctrl, - const struct gc2235_reg *next) -{ - if (ctrl->index == 0) - return 1; - - return ctrl->buffer.addr + ctrl->index == next->reg; -} -static int gc2235_write_reg_array(struct i2c_client *client, - const struct gc2235_reg *reglist) -{ - const struct gc2235_reg *next = reglist; - struct gc2235_write_ctrl ctrl; - int err; - - ctrl.index = 0; - for (; next->type != GC2235_TOK_TERM; next++) { - switch (next->type & GC2235_TOK_MASK) { - case GC2235_TOK_DELAY: - err = __gc2235_flush_reg_array(client, &ctrl); - if (err) - return err; - msleep(next->val); - break; - default: - /* - * If next address is not consecutive, data needs to be - * flushed before proceed. - */ - if (!__gc2235_write_reg_is_consecutive(client, &ctrl, - next)) { - err = __gc2235_flush_reg_array(client, &ctrl); - if (err) - return err; - } - err = __gc2235_buf_reg_array(client, &ctrl, next); - if (err) { - dev_err(&client->dev, "%s: write error, aborted\n", - __func__); - return err; - } - break; - } - } - - return __gc2235_flush_reg_array(client, &ctrl); -} - -static int gc2235_g_focal(struct v4l2_subdev *sd, s32 *val) -{ - *val = (GC2235_FOCAL_LENGTH_NUM << 16) | GC2235_FOCAL_LENGTH_DEM; - return 0; -} - -static int gc2235_g_fnumber(struct v4l2_subdev *sd, s32 *val) -{ - /*const f number for imx*/ - *val = (GC2235_F_NUMBER_DEFAULT_NUM << 16) | GC2235_F_NUMBER_DEM; - return 0; -} - -static int gc2235_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) -{ - *val = (GC2235_F_NUMBER_DEFAULT_NUM << 24) | - (GC2235_F_NUMBER_DEM << 16) | - (GC2235_F_NUMBER_DEFAULT_NUM << 8) | GC2235_F_NUMBER_DEM; - return 0; -} - - -static int gc2235_get_intg_factor(struct i2c_client *client, - struct camera_mipi_info *info, - const struct gc2235_resolution *res) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct atomisp_sensor_mode_data *buf = &info->data; - u16 reg_val, reg_val_h; - int ret; - - if (!info) - return -EINVAL; - - /* pixel clock calculattion */ - buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz = 30000000; - - /* get integration time */ - buf->coarse_integration_time_min = GC2235_COARSE_INTG_TIME_MIN; - buf->coarse_integration_time_max_margin = - GC2235_COARSE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_min = GC2235_FINE_INTG_TIME_MIN; - buf->fine_integration_time_max_margin = - GC2235_FINE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_def = GC2235_FINE_INTG_TIME_MIN; - buf->frame_length_lines = res->lines_per_frame; - buf->line_length_pck = res->pixels_per_line; - buf->read_mode = res->bin_mode; - - /* get the cropping and output resolution to ISP for this mode. */ - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_H_CROP_START_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_H_CROP_START_L, ®_val); - if (ret) - return ret; - - buf->crop_horizontal_start = (reg_val_h << 8) | reg_val; - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_V_CROP_START_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_V_CROP_START_L, ®_val); - if (ret) - return ret; - - buf->crop_vertical_start = (reg_val_h << 8) | reg_val; - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_H_OUTSIZE_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_H_OUTSIZE_L, ®_val); - if (ret) - return ret; - buf->output_width = (reg_val_h << 8) | reg_val; - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_V_OUTSIZE_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_V_OUTSIZE_L, ®_val); - if (ret) - return ret; - buf->output_height = (reg_val_h << 8) | reg_val; - - buf->crop_horizontal_end = buf->crop_horizontal_start + - buf->output_width - 1; - buf->crop_vertical_end = buf->crop_vertical_start + - buf->output_height - 1; - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_HB_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_HB_L, ®_val); - if (ret) - return ret; - -#if 0 - u16 dummy = (reg_val_h << 8) | reg_val; -#endif - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_SH_DELAY_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_SH_DELAY_L, ®_val); - -#if 0 - buf->line_length_pck = buf->output_width + 16 + dummy + - (((u16)reg_val_h << 8) | (u16)reg_val) + 4; -#endif - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_VB_H, ®_val_h); - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_VB_L, ®_val); - if (ret) - return ret; - -#if 0 - buf->frame_length_lines = buf->output_height + 32 + - (((u16)reg_val_h << 8) | (u16)reg_val); -#endif - buf->binning_factor_x = res->bin_factor_x ? - res->bin_factor_x : 1; - buf->binning_factor_y = res->bin_factor_y ? - res->bin_factor_y : 1; - return 0; -} - -static long __gc2235_set_exposure(struct v4l2_subdev *sd, int coarse_itg, - int gain, int digitgain) - -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u16 coarse_integration = (u16)coarse_itg; - int ret = 0; - u16 expo_coarse_h, expo_coarse_l, gain_val = 0xF0, gain_val2 = 0xF0; - expo_coarse_h = coarse_integration >> 8; - expo_coarse_l = coarse_integration & 0xff; - - ret = gc2235_write_reg(client, GC2235_8BIT, - GC2235_EXPOSURE_H, expo_coarse_h); - ret = gc2235_write_reg(client, GC2235_8BIT, - GC2235_EXPOSURE_L, expo_coarse_l); - - if (gain <= 0x58) { - gain_val = 0x40; - gain_val2 = 0x58; - } else if (gain < 256) { - gain_val = 0x40; - gain_val2 = gain; - } else { - gain_val2 = 64 * gain / 256; - gain_val = 0xff; - } - - ret = gc2235_write_reg(client, GC2235_8BIT, - GC2235_GLOBAL_GAIN, (u8)gain_val); - ret = gc2235_write_reg(client, GC2235_8BIT, - GC2235_PRE_GAIN, (u8)gain_val2); - - return ret; -} - - -static int gc2235_set_exposure(struct v4l2_subdev *sd, int exposure, - int gain, int digitgain) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - int ret; - - mutex_lock(&dev->input_lock); - ret = __gc2235_set_exposure(sd, exposure, gain, digitgain); - mutex_unlock(&dev->input_lock); - - return ret; -} - -static long gc2235_s_exposure(struct v4l2_subdev *sd, - struct atomisp_exposure *exposure) -{ - int exp = exposure->integration_time[0]; - int gain = exposure->gain[0]; - int digitgain = exposure->gain[1]; - - /* we should not accept the invalid value below. */ - if (gain == 0) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - v4l2_err(client, "%s: invalid value\n", __func__); - return -EINVAL; - } - - return gc2235_set_exposure(sd, exp, gain, digitgain); -} -static long gc2235_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - switch (cmd) { - case ATOMISP_IOC_S_EXPOSURE: - return gc2235_s_exposure(sd, arg); - default: - return -EINVAL; - } - return 0; -} -/* This returns the exposure time being used. This should only be used - * for filling in EXIF data, not for actual image processing. - */ -static int gc2235_q_exposure(struct v4l2_subdev *sd, s32 *value) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u16 reg_v, reg_v2; - int ret; - - /* get exposure */ - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_EXPOSURE_L, - ®_v); - if (ret) - goto err; - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_EXPOSURE_H, - ®_v2); - if (ret) - goto err; - - reg_v += reg_v2 << 8; - - *value = reg_v; -err: - return ret; -} - -static int gc2235_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct gc2235_device *dev = - container_of(ctrl->handler, struct gc2235_device, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - ret = gc2235_q_exposure(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCAL_ABSOLUTE: - ret = gc2235_g_focal(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_ABSOLUTE: - ret = gc2235_g_fnumber(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_RANGE: - ret = gc2235_g_fnumber_range(&dev->sd, &ctrl->val); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .g_volatile_ctrl = gc2235_g_volatile_ctrl -}; - -static struct v4l2_ctrl_config gc2235_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "exposure", - .min = 0x0, - .max = 0xffff, - .step = 0x01, - .def = 0x00, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCAL_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focal length", - .min = GC2235_FOCAL_LENGTH_DEFAULT, - .max = GC2235_FOCAL_LENGTH_DEFAULT, - .step = 0x01, - .def = GC2235_FOCAL_LENGTH_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number", - .min = GC2235_F_NUMBER_DEFAULT, - .max = GC2235_F_NUMBER_DEFAULT, - .step = 0x01, - .def = GC2235_F_NUMBER_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_RANGE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number range", - .min = GC2235_F_NUMBER_RANGE, - .max = GC2235_F_NUMBER_RANGE, - .step = 0x01, - .def = GC2235_F_NUMBER_RANGE, - .flags = 0, - }, -}; - -static int __gc2235_init(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - /* restore settings */ - gc2235_res = gc2235_res_preview; - N_RES = N_RES_PREVIEW; - - return gc2235_write_reg_array(client, gc2235_init_settings); -} - -static int is_init; - -static int power_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret = -1; - struct gc2235_device *dev = to_gc2235_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - if (flag) { - ret = dev->platform_data->v1p8_ctrl(sd, 1); - usleep_range(60, 90); - if (ret == 0) - ret |= dev->platform_data->v2p8_ctrl(sd, 1); - } else { - ret = dev->platform_data->v1p8_ctrl(sd, 0); - ret |= dev->platform_data->v2p8_ctrl(sd, 0); - } - return ret; -} - -static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - int ret = -1; - - if (!dev || !dev->platform_data) - return -ENODEV; - - ret |= dev->platform_data->gpio1_ctrl(sd, !flag); - usleep_range(60, 90); - return dev->platform_data->gpio0_ctrl(sd, flag); -} - -static int power_up(struct v4l2_subdev *sd) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - /* power control */ - ret = power_ctrl(sd, 1); - if (ret) - goto fail_power; - - /* according to DS, at least 5ms is needed between DOVDD and PWDN */ - usleep_range(5000, 6000); - - ret = dev->platform_data->flisclk_ctrl(sd, 1); - if (ret) - goto fail_clk; - usleep_range(5000, 6000); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 1); - if (ret) { - ret = gpio_ctrl(sd, 1); - if (ret) - goto fail_power; - } - - msleep(5); - return 0; - -fail_clk: - gpio_ctrl(sd, 0); -fail_power: - power_ctrl(sd, 0); - dev_err(&client->dev, "sensor power-up failed\n"); - - return ret; -} - -static int power_down(struct v4l2_subdev *sd) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - /* gpio ctrl */ - ret = gpio_ctrl(sd, 0); - if (ret) { - ret = gpio_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "gpio failed 2\n"); - } - - ret = dev->platform_data->flisclk_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "flisclk failed\n"); - - /* power control */ - ret = power_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "vprog failed.\n"); - - return ret; -} - -static int gc2235_s_power(struct v4l2_subdev *sd, int on) -{ - int ret; - - if (on == 0) - ret = power_down(sd); - else { - ret = power_up(sd); - if (!ret) - ret = __gc2235_init(sd); - is_init = 1; - } - return ret; -} - -/* - * distance - calculate the distance - * @res: resolution - * @w: width - * @h: height - * - * Get the gap between resolution and w/h. - * res->width/height smaller than w/h wouldn't be considered. - * Returns the value of gap or -1 if fail. - */ -#define LARGEST_ALLOWED_RATIO_MISMATCH 800 -static int distance(struct gc2235_resolution *res, u32 w, u32 h) -{ - unsigned int w_ratio = (res->width << 13) / w; - unsigned int h_ratio; - int match; - - if (h == 0) - return -1; - h_ratio = (res->height << 13) / h; - if (h_ratio == 0) - return -1; - match = abs(((w_ratio << 13) / h_ratio) - 8192); - - if ((w_ratio < 8192) || (h_ratio < 8192) || - (match > LARGEST_ALLOWED_RATIO_MISMATCH)) - return -1; - - return w_ratio + h_ratio; -} - -/* Return the nearest higher resolution index */ -static int nearest_resolution_index(int w, int h) -{ - int i; - int idx = -1; - int dist; - int min_dist = INT_MAX; - struct gc2235_resolution *tmp_res = NULL; - - for (i = 0; i < N_RES; i++) { - tmp_res = &gc2235_res[i]; - dist = distance(tmp_res, w, h); - if (dist == -1) - continue; - if (dist < min_dist) { - min_dist = dist; - idx = i; - } - } - - return idx; -} - -static int get_resolution_index(int w, int h) -{ - int i; - - for (i = 0; i < N_RES; i++) { - if (w != gc2235_res[i].width) - continue; - if (h != gc2235_res[i].height) - continue; - - return i; - } - - return -1; -} - -static int startup(struct v4l2_subdev *sd) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - if (is_init == 0) { - /* force gc2235 to do a reset in res change, otherwise it - * can not output normal after switching res. and it is not - * necessary for first time run up after power on, for the sack - * of performance - */ - power_down(sd); - power_up(sd); - gc2235_write_reg_array(client, gc2235_init_settings); - } - - ret = gc2235_write_reg_array(client, gc2235_res[dev->fmt_idx].regs); - if (ret) { - dev_err(&client->dev, "gc2235 write register err.\n"); - return ret; - } - is_init = 0; - - return ret; -} - -static int gc2235_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - - struct v4l2_mbus_framefmt *fmt = &format->format; - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *gc2235_info = NULL; - int ret = 0; - int idx; - - gc2235_info = v4l2_get_subdev_hostdata(sd); - if (!gc2235_info) - return -EINVAL; - if (format->pad) - return -EINVAL; - if (!fmt) - return -EINVAL; - mutex_lock(&dev->input_lock); - idx = nearest_resolution_index(fmt->width, fmt->height); - if (idx == -1) { - /* return the largest resolution */ - fmt->width = gc2235_res[N_RES - 1].width; - fmt->height = gc2235_res[N_RES - 1].height; - } else { - fmt->width = gc2235_res[idx].width; - fmt->height = gc2235_res[idx].height; - } - fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); - return 0; - } - - dev->fmt_idx = get_resolution_index(fmt->width, fmt->height); - if (dev->fmt_idx == -1) { - dev_err(&client->dev, "get resolution fail\n"); - mutex_unlock(&dev->input_lock); - return -EINVAL; - } - - ret = startup(sd); - if (ret) { - dev_err(&client->dev, "gc2235 startup err\n"); - goto err; - } - - ret = gc2235_get_intg_factor(client, gc2235_info, - &gc2235_res[dev->fmt_idx]); - if (ret) - dev_err(&client->dev, "failed to get integration_factor\n"); - -err: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int gc2235_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct gc2235_device *dev = to_gc2235_sensor(sd); - - if (format->pad) - return -EINVAL; - - if (!fmt) - return -EINVAL; - - fmt->width = gc2235_res[dev->fmt_idx].width; - fmt->height = gc2235_res[dev->fmt_idx].height; - fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; - - return 0; -} - -static int gc2235_detect(struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - u16 high, low; - int ret; - u16 id; - - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) - return -ENODEV; - - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_SENSOR_ID_H, &high); - if (ret) { - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); - return -ENODEV; - } - ret = gc2235_read_reg(client, GC2235_8BIT, - GC2235_SENSOR_ID_L, &low); - id = ((high << 8) | low); - - if (id != GC2235_ID) { - dev_err(&client->dev, "sensor ID error, 0x%x\n", id); - return -ENODEV; - } - - dev_info(&client->dev, "detect gc2235 success\n"); - return 0; -} - -static int gc2235_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - mutex_lock(&dev->input_lock); - - if (enable) - ret = gc2235_write_reg_array(client, gc2235_stream_on); - else - ret = gc2235_write_reg_array(client, gc2235_stream_off); - - mutex_unlock(&dev->input_lock); - return ret; -} - - -static int gc2235_s_config(struct v4l2_subdev *sd, - int irq, void *platform_data) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (!platform_data) - return -ENODEV; - - dev->platform_data = - (struct camera_sensor_platform_data *)platform_data; - - mutex_lock(&dev->input_lock); - /* power off the module, then power on it in future - * as first power on by board may not fulfill the - * power on sequqence needed by the module - */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "gc2235 power-off err.\n"); - goto fail_power_off; - } - - ret = power_up(sd); - if (ret) { - dev_err(&client->dev, "gc2235 power-up err.\n"); - goto fail_power_on; - } - - ret = dev->platform_data->csi_cfg(sd, 1); - if (ret) - goto fail_csi_cfg; - - /* config & detect sensor */ - ret = gc2235_detect(client); - if (ret) { - dev_err(&client->dev, "gc2235_detect err s_config.\n"); - goto fail_csi_cfg; - } - - /* turn off sensor, after probed */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "gc2235 power-off err.\n"); - goto fail_csi_cfg; - } - mutex_unlock(&dev->input_lock); - - return 0; - -fail_csi_cfg: - dev->platform_data->csi_cfg(sd, 0); -fail_power_on: - power_down(sd); - dev_err(&client->dev, "sensor power-gating failed\n"); -fail_power_off: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int gc2235_g_frame_interval(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - - interval->interval.numerator = 1; - interval->interval.denominator = gc2235_res[dev->fmt_idx].fps; - - return 0; -} - -static int gc2235_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index >= MAX_FMTS) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_SBGGR10_1X10; - return 0; -} - -static int gc2235_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - int index = fse->index; - - if (index >= N_RES) - return -EINVAL; - - fse->min_width = gc2235_res[index].width; - fse->min_height = gc2235_res[index].height; - fse->max_width = gc2235_res[index].width; - fse->max_height = gc2235_res[index].height; - - return 0; - -} - -static int gc2235_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) -{ - struct gc2235_device *dev = to_gc2235_sensor(sd); - - mutex_lock(&dev->input_lock); - *frames = gc2235_res[dev->fmt_idx].skip_frames; - mutex_unlock(&dev->input_lock); - - return 0; -} - -static const struct v4l2_subdev_sensor_ops gc2235_sensor_ops = { - .g_skip_frames = gc2235_g_skip_frames, -}; - -static const struct v4l2_subdev_video_ops gc2235_video_ops = { - .s_stream = gc2235_s_stream, - .g_frame_interval = gc2235_g_frame_interval, -}; - -static const struct v4l2_subdev_core_ops gc2235_core_ops = { - .s_power = gc2235_s_power, - .ioctl = gc2235_ioctl, -}; - -static const struct v4l2_subdev_pad_ops gc2235_pad_ops = { - .enum_mbus_code = gc2235_enum_mbus_code, - .enum_frame_size = gc2235_enum_frame_size, - .get_fmt = gc2235_get_fmt, - .set_fmt = gc2235_set_fmt, -}; - -static const struct v4l2_subdev_ops gc2235_ops = { - .core = &gc2235_core_ops, - .video = &gc2235_video_ops, - .pad = &gc2235_pad_ops, - .sensor = &gc2235_sensor_ops, -}; - -static int gc2235_remove(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct gc2235_device *dev = to_gc2235_sensor(sd); - dev_dbg(&client->dev, "gc2235_remove...\n"); - - dev->platform_data->csi_cfg(sd, 0); - - v4l2_device_unregister_subdev(sd); - media_entity_cleanup(&dev->sd.entity); - v4l2_ctrl_handler_free(&dev->ctrl_handler); - kfree(dev); - - return 0; -} - -static int gc2235_probe(struct i2c_client *client) -{ - struct gc2235_device *dev; - void *gcpdev; - int ret; - unsigned int i; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - mutex_init(&dev->input_lock); - - dev->fmt_idx = 0; - v4l2_i2c_subdev_init(&(dev->sd), client, &gc2235_ops); - - gcpdev = gmin_camera_platform_data(&dev->sd, - ATOMISP_INPUT_FORMAT_RAW_10, - atomisp_bayer_order_grbg); - - ret = gc2235_s_config(&dev->sd, client->irq, gcpdev); - if (ret) - goto out_free; - - dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dev->pad.flags = MEDIA_PAD_FL_SOURCE; - dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; - dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; - ret = - v4l2_ctrl_handler_init(&dev->ctrl_handler, - ARRAY_SIZE(gc2235_controls)); - if (ret) { - gc2235_remove(client); - return ret; - } - - for (i = 0; i < ARRAY_SIZE(gc2235_controls); i++) - v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc2235_controls[i], - NULL); - - if (dev->ctrl_handler.error) { - gc2235_remove(client); - return dev->ctrl_handler.error; - } - - /* Use same lock for controls as for everything else. */ - dev->ctrl_handler.lock = &dev->input_lock; - dev->sd.ctrl_handler = &dev->ctrl_handler; - - ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); - if (ret) - gc2235_remove(client); - - return atomisp_register_i2c_module(&dev->sd, gcpdev, RAW_CAMERA); - -out_free: - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - - return ret; -} - -static const struct acpi_device_id gc2235_acpi_match[] = { - { "INT33F8" }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match); - -static struct i2c_driver gc2235_driver = { - .driver = { - .name = "gc2235", - .acpi_match_table = gc2235_acpi_match, - }, - .probe_new = gc2235_probe, - .remove = gc2235_remove, -}; -module_i2c_driver(gc2235_driver); - -MODULE_AUTHOR("Shuguang Gong "); -MODULE_DESCRIPTION("A low-level driver for GC2235 sensors"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c deleted file mode 100644 index 81e5ec0c2b64..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#include -#include -#include -#include -#include "../include/linux/libmsrlisthelper.h" -#include -#include - -/* Tagged binary data container structure definitions. */ -struct tbd_header { - uint32_t tag; /*!< Tag identifier, also checks endianness */ - uint32_t size; /*!< Container size including this header */ - uint32_t version; /*!< Version, format 0xYYMMDDVV */ - uint32_t revision; /*!< Revision, format 0xYYMMDDVV */ - uint32_t config_bits; /*!< Configuration flag bits set */ - uint32_t checksum; /*!< Global checksum, header included */ -} __packed; - -struct tbd_record_header { - uint32_t size; /*!< Size of record including header */ - uint8_t format_id; /*!< tbd_format_t enumeration values used */ - uint8_t packing_key; /*!< Packing method; 0 = no packing */ - uint16_t class_id; /*!< tbd_class_t enumeration values used */ -} __packed; - -struct tbd_data_record_header { - uint16_t next_offset; - uint16_t flags; - uint16_t data_offset; - uint16_t data_size; -} __packed; - -#define TBD_CLASS_DRV_ID 2 - -static int set_msr_configuration(struct i2c_client *client, uint8_t *bufptr, - unsigned int size) -{ - /* The configuration data contains any number of sequences where - * the first byte (that is, uint8_t) that marks the number of bytes - * in the sequence to follow, is indeed followed by the indicated - * number of bytes of actual data to be written to sensor. - * By convention, the first two bytes of actual data should be - * understood as an address in the sensor address space (hibyte - * followed by lobyte) where the remaining data in the sequence - * will be written. */ - - uint8_t *ptr = bufptr; - while (ptr < bufptr + size) { - struct i2c_msg msg = { - .addr = client->addr, - .flags = 0, - }; - int ret; - - /* How many bytes */ - msg.len = *ptr++; - /* Where the bytes are located */ - msg.buf = ptr; - ptr += msg.len; - - if (ptr > bufptr + size) - /* Accessing data beyond bounds is not tolerated */ - return -EINVAL; - - ret = i2c_transfer(client->adapter, &msg, 1); - if (ret < 0) { - dev_err(&client->dev, "i2c write error: %d", ret); - return ret; - } - } - return 0; -} - -static int parse_and_apply(struct i2c_client *client, uint8_t *buffer, - unsigned int size) -{ - uint8_t *endptr8 = buffer + size; - struct tbd_data_record_header *header = - (struct tbd_data_record_header *)buffer; - - /* There may be any number of datasets present */ - unsigned int dataset = 0; - - do { - /* In below, four variables are read from buffer */ - if ((uint8_t *)header + sizeof(*header) > endptr8) - return -EINVAL; - - /* All data should be located within given buffer */ - if ((uint8_t *)header + header->data_offset + - header->data_size > endptr8) - return -EINVAL; - - /* We have a new valid dataset */ - dataset++; - /* See whether there is MSR data */ - /* If yes, update the reg info */ - if (header->data_size && (header->flags & 1)) { - int ret; - - dev_info(&client->dev, - "New MSR data for sensor driver (dataset %02d) size:%d\n", - dataset, header->data_size); - ret = set_msr_configuration(client, - buffer + header->data_offset, - header->data_size); - if (ret) - return ret; - } - header = (struct tbd_data_record_header *)(buffer + - header->next_offset); - } while (header->next_offset); - - return 0; -} - -int apply_msr_data(struct i2c_client *client, const struct firmware *fw) -{ - struct tbd_header *header; - struct tbd_record_header *record; - - if (!fw) { - dev_warn(&client->dev, "Drv data is not loaded.\n"); - return -EINVAL; - } - - if (sizeof(*header) > fw->size) - return -EINVAL; - - header = (struct tbd_header *)fw->data; - /* Check that we have drvb block. */ - if (memcmp(&header->tag, "DRVB", 4)) - return -EINVAL; - - /* Check the size */ - if (header->size != fw->size) - return -EINVAL; - - if (sizeof(*header) + sizeof(*record) > fw->size) - return -EINVAL; - - record = (struct tbd_record_header *)(header + 1); - /* Check that class id mathes tbd's drv id. */ - if (record->class_id != TBD_CLASS_DRV_ID) - return -EINVAL; - - /* Size 0 shall not be treated as an error */ - if (!record->size) - return 0; - - return parse_and_apply(client, (uint8_t *)(record + 1), record->size); -} -EXPORT_SYMBOL_GPL(apply_msr_data); - -int load_msr_list(struct i2c_client *client, char *name, - const struct firmware **fw) -{ - int ret = request_firmware(fw, name, &client->dev); - if (ret) { - dev_err(&client->dev, - "Error %d while requesting firmware %s\n", - ret, name); - return ret; - } - dev_info(&client->dev, "Received %lu bytes drv data\n", - (unsigned long)(*fw)->size); - - return 0; -} -EXPORT_SYMBOL_GPL(load_msr_list); - -void release_msr_list(struct i2c_client *client, const struct firmware *fw) -{ - release_firmware(fw); -} -EXPORT_SYMBOL_GPL(release_msr_list); - -static int init_msrlisthelper(void) -{ - return 0; -} - -static void exit_msrlisthelper(void) -{ -} - -module_init(init_msrlisthelper); -module_exit(exit_msrlisthelper); - -MODULE_AUTHOR("Jukka Kaartinen "); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c deleted file mode 100644 index 7098bf317f16..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c +++ /dev/null @@ -1,968 +0,0 @@ -/* - * LED flash driver for LM3554 - * - * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#include -#include -#include -#include -#include -#include - -#include "../include/media/lm3554.h" -#include -#include -#include -#include -#include "../include/linux/atomisp_gmin_platform.h" -#include "../include/linux/atomisp.h" - -/* Registers */ - -#define LM3554_TORCH_BRIGHTNESS_REG 0xA0 -#define LM3554_TORCH_MODE_SHIFT 0 -#define LM3554_TORCH_CURRENT_SHIFT 3 -#define LM3554_INDICATOR_CURRENT_SHIFT 6 - -#define LM3554_FLASH_BRIGHTNESS_REG 0xB0 -#define LM3554_FLASH_MODE_SHIFT 0 -#define LM3554_FLASH_CURRENT_SHIFT 3 -#define LM3554_STROBE_SENSITIVITY_SHIFT 7 - -#define LM3554_FLASH_DURATION_REG 0xC0 -#define LM3554_FLASH_TIMEOUT_SHIFT 0 -#define LM3554_CURRENT_LIMIT_SHIFT 5 - -#define LM3554_FLAGS_REG 0xD0 -#define LM3554_FLAG_TIMEOUT (1 << 0) -#define LM3554_FLAG_THERMAL_SHUTDOWN (1 << 1) -#define LM3554_FLAG_LED_FAULT (1 << 2) -#define LM3554_FLAG_TX1_INTERRUPT (1 << 3) -#define LM3554_FLAG_TX2_INTERRUPT (1 << 4) -#define LM3554_FLAG_LED_THERMAL_FAULT (1 << 5) -#define LM3554_FLAG_UNUSED (1 << 6) -#define LM3554_FLAG_INPUT_VOLTAGE_LOW (1 << 7) - -#define LM3554_CONFIG_REG_1 0xE0 -#define LM3554_ENVM_TX2_SHIFT 5 -#define LM3554_TX2_POLARITY_SHIFT 6 - -struct lm3554 { - struct v4l2_subdev sd; - - struct mutex power_lock; - struct v4l2_ctrl_handler ctrl_handler; - int power_count; - - unsigned int mode; - int timeout; - u8 torch_current; - u8 indicator_current; - u8 flash_current; - - struct timer_list flash_off_delay; - struct lm3554_platform_data *pdata; -}; - -#define to_lm3554(p_sd) container_of(p_sd, struct lm3554, sd) - -/* Return negative errno else zero on success */ -static int lm3554_write(struct lm3554 *flash, u8 addr, u8 val) -{ - struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); - int ret; - - ret = i2c_smbus_write_byte_data(client, addr, val); - - dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val, - ret < 0 ? "fail" : "ok"); - - return ret; -} - -/* Return negative errno else a data byte received from the device. */ -static int lm3554_read(struct lm3554 *flash, u8 addr) -{ - struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); - int ret; - - ret = i2c_smbus_read_byte_data(client, addr); - - dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, ret, - ret < 0 ? "fail" : "ok"); - - return ret; -} - -/* ----------------------------------------------------------------------------- - * Hardware configuration - */ - -static int lm3554_set_mode(struct lm3554 *flash, unsigned int mode) -{ - u8 val; - int ret; - - val = (mode << LM3554_FLASH_MODE_SHIFT) | - (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT); - - ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val); - if (ret == 0) - flash->mode = mode; - return ret; -} - -static int lm3554_set_torch(struct lm3554 *flash) -{ - u8 val; - - val = (flash->mode << LM3554_TORCH_MODE_SHIFT) | - (flash->torch_current << LM3554_TORCH_CURRENT_SHIFT) | - (flash->indicator_current << LM3554_INDICATOR_CURRENT_SHIFT); - - return lm3554_write(flash, LM3554_TORCH_BRIGHTNESS_REG, val); -} - -static int lm3554_set_flash(struct lm3554 *flash) -{ - u8 val; - - val = (flash->mode << LM3554_FLASH_MODE_SHIFT) | - (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT); - - return lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val); -} - -static int lm3554_set_duration(struct lm3554 *flash) -{ - u8 val; - - val = (flash->timeout << LM3554_FLASH_TIMEOUT_SHIFT) | - (flash->pdata->current_limit << LM3554_CURRENT_LIMIT_SHIFT); - - return lm3554_write(flash, LM3554_FLASH_DURATION_REG, val); -} - -static int lm3554_set_config1(struct lm3554 *flash) -{ - u8 val; - - val = (flash->pdata->envm_tx2 << LM3554_ENVM_TX2_SHIFT) | - (flash->pdata->tx2_polarity << LM3554_TX2_POLARITY_SHIFT); - return lm3554_write(flash, LM3554_CONFIG_REG_1, val); -} - -/* ----------------------------------------------------------------------------- - * Hardware trigger - */ -static void lm3554_flash_off_delay(struct timer_list *t) -{ - struct lm3554 *flash = from_timer(flash, t, flash_off_delay); - struct lm3554_platform_data *pdata = flash->pdata; - - gpio_set_value(pdata->gpio_strobe, 0); -} - -static int lm3554_hw_strobe(struct i2c_client *client, bool strobe) -{ - int ret, timer_pending; - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct lm3554 *flash = to_lm3554(sd); - struct lm3554_platform_data *pdata = flash->pdata; - - /* - * An abnormal high flash current is observed when strobe off the - * flash. Workaround here is firstly set flash current to lower level, - * wait a short moment, and then strobe off the flash. - */ - - timer_pending = del_timer_sync(&flash->flash_off_delay); - - /* Flash off */ - if (!strobe) { - /* set current to 70mA and wait a while */ - ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, 0); - if (ret < 0) - goto err; - mod_timer(&flash->flash_off_delay, - jiffies + msecs_to_jiffies(LM3554_TIMER_DELAY)); - return 0; - } - - /* Flash on */ - - /* - * If timer is killed before run, flash is not strobe off, - * so must strobe off here - */ - if (timer_pending) - gpio_set_value(pdata->gpio_strobe, 0); - - /* Restore flash current settings */ - ret = lm3554_set_flash(flash); - if (ret < 0) - goto err; - - /* Strobe on Flash */ - gpio_set_value(pdata->gpio_strobe, 1); - - return 0; -err: - dev_err(&client->dev, "failed to %s flash strobe (%d)\n", - strobe ? "on" : "off", ret); - return ret; -} - -/* ----------------------------------------------------------------------------- - * V4L2 controls - */ - -static int lm3554_read_status(struct lm3554 *flash) -{ - int ret; - struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); - - /* NOTE: reading register clear fault status */ - ret = lm3554_read(flash, LM3554_FLAGS_REG); - if (ret < 0) - return ret; - - /* - * Accordingly to datasheet we read back '1' in bit 6. - * Clear it first. - */ - ret &= ~LM3554_FLAG_UNUSED; - - /* - * Do not take TX1/TX2 signal as an error - * because MSIC will not turn off flash, but turn to - * torch mode according to gsm modem signal by hardware. - */ - ret &= ~(LM3554_FLAG_TX1_INTERRUPT | LM3554_FLAG_TX2_INTERRUPT); - - if (ret > 0) - dev_dbg(&client->dev, "LM3554 flag status: %02x\n", ret); - - return ret; -} - -static int lm3554_s_flash_timeout(struct v4l2_subdev *sd, u32 val) -{ - struct lm3554 *flash = to_lm3554(sd); - - val = clamp(val, LM3554_MIN_TIMEOUT, LM3554_MAX_TIMEOUT); - val = val / LM3554_TIMEOUT_STEPSIZE - 1; - - flash->timeout = val; - - return lm3554_set_duration(flash); -} - -static int lm3554_g_flash_timeout(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - - *val = (u32)(flash->timeout + 1) * LM3554_TIMEOUT_STEPSIZE; - - return 0; -} - -static int lm3554_s_flash_intensity(struct v4l2_subdev *sd, u32 intensity) -{ - struct lm3554 *flash = to_lm3554(sd); - - intensity = LM3554_CLAMP_PERCENTAGE(intensity); - intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_FLASH_STEP); - - flash->flash_current = intensity; - - return lm3554_set_flash(flash); -} - -static int lm3554_g_flash_intensity(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - - *val = LM3554_VALUE_TO_PERCENT((u32)flash->flash_current, - LM3554_FLASH_STEP); - - return 0; -} - -static int lm3554_s_torch_intensity(struct v4l2_subdev *sd, u32 intensity) -{ - struct lm3554 *flash = to_lm3554(sd); - - intensity = LM3554_CLAMP_PERCENTAGE(intensity); - intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_TORCH_STEP); - - flash->torch_current = intensity; - - return lm3554_set_torch(flash); -} - -static int lm3554_g_torch_intensity(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - - *val = LM3554_VALUE_TO_PERCENT((u32)flash->torch_current, - LM3554_TORCH_STEP); - - return 0; -} - -static int lm3554_s_indicator_intensity(struct v4l2_subdev *sd, u32 intensity) -{ - struct lm3554 *flash = to_lm3554(sd); - - intensity = LM3554_CLAMP_PERCENTAGE(intensity); - intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_INDICATOR_STEP); - - flash->indicator_current = intensity; - - return lm3554_set_torch(flash); -} - -static int lm3554_g_indicator_intensity(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - - *val = LM3554_VALUE_TO_PERCENT((u32)flash->indicator_current, - LM3554_INDICATOR_STEP); - - return 0; -} - -static int lm3554_s_flash_strobe(struct v4l2_subdev *sd, u32 val) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - return lm3554_hw_strobe(client, val); -} - -static int lm3554_s_flash_mode(struct v4l2_subdev *sd, u32 new_mode) -{ - struct lm3554 *flash = to_lm3554(sd); - unsigned int mode; - - switch (new_mode) { - case ATOMISP_FLASH_MODE_OFF: - mode = LM3554_MODE_SHUTDOWN; - break; - case ATOMISP_FLASH_MODE_FLASH: - mode = LM3554_MODE_FLASH; - break; - case ATOMISP_FLASH_MODE_INDICATOR: - mode = LM3554_MODE_INDICATOR; - break; - case ATOMISP_FLASH_MODE_TORCH: - mode = LM3554_MODE_TORCH; - break; - default: - return -EINVAL; - } - - return lm3554_set_mode(flash, mode); -} - -static int lm3554_g_flash_mode(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - *val = flash->mode; - return 0; -} - -static int lm3554_g_flash_status(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - int value; - - value = lm3554_read_status(flash); - if (value < 0) - return value; - - if (value & LM3554_FLAG_TIMEOUT) - *val = ATOMISP_FLASH_STATUS_TIMEOUT; - else if (value > 0) - *val = ATOMISP_FLASH_STATUS_HW_ERROR; - else - *val = ATOMISP_FLASH_STATUS_OK; - - return 0; -} - -#ifndef CSS15 -static int lm3554_g_flash_status_register(struct v4l2_subdev *sd, s32 *val) -{ - struct lm3554 *flash = to_lm3554(sd); - int ret; - - ret = lm3554_read(flash, LM3554_FLAGS_REG); - - if (ret < 0) - return ret; - - *val = ret; - return 0; -} -#endif - -static int lm3554_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct lm3554 *dev = - container_of(ctrl->handler, struct lm3554, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_FLASH_TIMEOUT: - ret = lm3554_s_flash_timeout(&dev->sd, ctrl->val); - break; - case V4L2_CID_FLASH_INTENSITY: - ret = lm3554_s_flash_intensity(&dev->sd, ctrl->val); - break; - case V4L2_CID_FLASH_TORCH_INTENSITY: - ret = lm3554_s_torch_intensity(&dev->sd, ctrl->val); - break; - case V4L2_CID_FLASH_INDICATOR_INTENSITY: - ret = lm3554_s_indicator_intensity(&dev->sd, ctrl->val); - break; - case V4L2_CID_FLASH_STROBE: - ret = lm3554_s_flash_strobe(&dev->sd, ctrl->val); - break; - case V4L2_CID_FLASH_MODE: - ret = lm3554_s_flash_mode(&dev->sd, ctrl->val); - break; - default: - ret = -EINVAL; - } - return ret; -} - -static int lm3554_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct lm3554 *dev = - container_of(ctrl->handler, struct lm3554, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_FLASH_TIMEOUT: - ret = lm3554_g_flash_timeout(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FLASH_INTENSITY: - ret = lm3554_g_flash_intensity(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FLASH_TORCH_INTENSITY: - ret = lm3554_g_torch_intensity(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FLASH_INDICATOR_INTENSITY: - ret = lm3554_g_indicator_intensity(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FLASH_MODE: - ret = lm3554_g_flash_mode(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FLASH_STATUS: - ret = lm3554_g_flash_status(&dev->sd, &ctrl->val); - break; -#ifndef CSS15 - case V4L2_CID_FLASH_STATUS_REGISTER: - ret = lm3554_g_flash_status_register(&dev->sd, &ctrl->val); - break; -#endif - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .s_ctrl = lm3554_s_ctrl, - .g_volatile_ctrl = lm3554_g_volatile_ctrl -}; - -static const struct v4l2_ctrl_config lm3554_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_TIMEOUT, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Flash Timeout", - .min = 0x0, - .max = LM3554_MAX_TIMEOUT, - .step = 0x01, - .def = LM3554_DEFAULT_TIMEOUT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_INTENSITY, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Flash Intensity", - .min = LM3554_MIN_PERCENT, - .max = LM3554_MAX_PERCENT, - .step = 0x01, - .def = LM3554_FLASH_DEFAULT_BRIGHTNESS, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_TORCH_INTENSITY, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Torch Intensity", - .min = LM3554_MIN_PERCENT, - .max = LM3554_MAX_PERCENT, - .step = 0x01, - .def = LM3554_TORCH_DEFAULT_BRIGHTNESS, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_INDICATOR_INTENSITY, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Indicator Intensity", - .min = LM3554_MIN_PERCENT, - .max = LM3554_MAX_PERCENT, - .step = 0x01, - .def = LM3554_INDICATOR_DEFAULT_BRIGHTNESS, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_STROBE, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Flash Strobe", - .min = 0, - .max = 1, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_MODE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Flash Mode", - .min = 0, - .max = 100, - .step = 1, - .def = ATOMISP_FLASH_MODE_OFF, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_STATUS, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Flash Status", - .min = ATOMISP_FLASH_STATUS_OK, - .max = ATOMISP_FLASH_STATUS_TIMEOUT, - .step = 1, - .def = ATOMISP_FLASH_STATUS_OK, - .flags = 0, - }, -#ifndef CSS15 - { - .ops = &ctrl_ops, - .id = V4L2_CID_FLASH_STATUS_REGISTER, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Flash Status Register", - .min = 0, - .max = 255, - .step = 1, - .def = 0, - .flags = 0, - }, -#endif -}; - -/* ----------------------------------------------------------------------------- - * V4L2 subdev core operations - */ - -/* Put device into known state. */ -static int lm3554_setup(struct lm3554 *flash) -{ - struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); - int ret; - - /* clear the flags register */ - ret = lm3554_read(flash, LM3554_FLAGS_REG); - if (ret < 0) - return ret; - - dev_dbg(&client->dev, "Fault info: %02x\n", ret); - - ret = lm3554_set_config1(flash); - if (ret < 0) - return ret; - - ret = lm3554_set_duration(flash); - if (ret < 0) - return ret; - - ret = lm3554_set_torch(flash); - if (ret < 0) - return ret; - - ret = lm3554_set_flash(flash); - if (ret < 0) - return ret; - - /* read status */ - ret = lm3554_read_status(flash); - if (ret < 0) - return ret; - - return ret ? -EIO : 0; -} - -static int __lm3554_s_power(struct lm3554 *flash, int power) -{ - struct lm3554_platform_data *pdata = flash->pdata; - int ret; - - /*initialize flash driver*/ - gpio_set_value(pdata->gpio_reset, power); - usleep_range(100, 100 + 1); - - if (power) { - /* Setup default values. This makes sure that the chip - * is in a known state. - */ - ret = lm3554_setup(flash); - if (ret < 0) { - __lm3554_s_power(flash, 0); - return ret; - } - } - - return 0; -} - -static int lm3554_s_power(struct v4l2_subdev *sd, int power) -{ - struct lm3554 *flash = to_lm3554(sd); - int ret = 0; - - mutex_lock(&flash->power_lock); - - if (flash->power_count == !power) { - ret = __lm3554_s_power(flash, !!power); - if (ret < 0) - goto done; - } - - flash->power_count += power ? 1 : -1; - WARN_ON(flash->power_count < 0); - -done: - mutex_unlock(&flash->power_lock); - return ret; -} - -static const struct v4l2_subdev_core_ops lm3554_core_ops = { - .s_power = lm3554_s_power, -}; - -static const struct v4l2_subdev_ops lm3554_ops = { - .core = &lm3554_core_ops, -}; - -static int lm3554_detect(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct i2c_adapter *adapter = client->adapter; - struct lm3554 *flash = to_lm3554(sd); - int ret; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_err(&client->dev, "lm3554_detect i2c error\n"); - return -ENODEV; - } - - /* Power up the flash driver and reset it */ - ret = lm3554_s_power(&flash->sd, 1); - if (ret < 0) { - dev_err(&client->dev, "Failed to power on lm3554 LED flash\n"); - } else { - dev_dbg(&client->dev, "Successfully detected lm3554 LED flash\n"); - lm3554_s_power(&flash->sd, 0); - } - - return ret; -} - -static int lm3554_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) -{ - return lm3554_s_power(sd, 1); -} - -static int lm3554_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) -{ - return lm3554_s_power(sd, 0); -} - -static const struct v4l2_subdev_internal_ops lm3554_internal_ops = { - .registered = lm3554_detect, - .open = lm3554_open, - .close = lm3554_close, -}; - -/* ----------------------------------------------------------------------------- - * I2C driver - */ -#ifdef CONFIG_PM - -static int lm3554_suspend(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct v4l2_subdev *subdev = i2c_get_clientdata(client); - struct lm3554 *flash = to_lm3554(subdev); - int rval; - - if (flash->power_count == 0) - return 0; - - rval = __lm3554_s_power(flash, 0); - - dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok"); - - return rval; -} - -static int lm3554_resume(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct v4l2_subdev *subdev = i2c_get_clientdata(client); - struct lm3554 *flash = to_lm3554(subdev); - int rval; - - if (flash->power_count == 0) - return 0; - - rval = __lm3554_s_power(flash, 1); - - dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok"); - - return rval; -} - -#else - -#define lm3554_suspend NULL -#define lm3554_resume NULL - -#endif /* CONFIG_PM */ - -static int lm3554_gpio_init(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct lm3554 *flash = to_lm3554(sd); - struct lm3554_platform_data *pdata = flash->pdata; - int ret; - - if (!gpio_is_valid(pdata->gpio_reset)) - return -EINVAL; - - ret = gpio_direction_output(pdata->gpio_reset, 0); - if (ret < 0) - goto err_gpio_reset; - dev_info(&client->dev, "flash led reset successfully\n"); - - if (!gpio_is_valid(pdata->gpio_strobe)) { - ret = -EINVAL; - goto err_gpio_dir_reset; - } - - ret = gpio_direction_output(pdata->gpio_strobe, 0); - if (ret < 0) - goto err_gpio_strobe; - - return 0; - -err_gpio_strobe: - gpio_free(pdata->gpio_strobe); -err_gpio_dir_reset: - gpio_direction_output(pdata->gpio_reset, 0); -err_gpio_reset: - gpio_free(pdata->gpio_reset); - - return ret; -} - -static int lm3554_gpio_uninit(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct lm3554 *flash = to_lm3554(sd); - struct lm3554_platform_data *pdata = flash->pdata; - int ret; - - ret = gpio_direction_output(pdata->gpio_strobe, 0); - if (ret < 0) - return ret; - - ret = gpio_direction_output(pdata->gpio_reset, 0); - if (ret < 0) - return ret; - - gpio_free(pdata->gpio_strobe); - gpio_free(pdata->gpio_reset); - return 0; -} - -static void *lm3554_platform_data_func(struct i2c_client *client) -{ - static struct lm3554_platform_data platform_data; - - platform_data.gpio_reset = - desc_to_gpio(gpiod_get_index(&client->dev, - NULL, 2, GPIOD_OUT_LOW)); - platform_data.gpio_strobe = - desc_to_gpio(gpiod_get_index(&client->dev, - NULL, 0, GPIOD_OUT_LOW)); - platform_data.gpio_torch = - desc_to_gpio(gpiod_get_index(&client->dev, - NULL, 1, GPIOD_OUT_LOW)); - dev_info(&client->dev, "camera pdata: lm3554: reset: %d strobe %d torch %d\n", - platform_data.gpio_reset, platform_data.gpio_strobe, - platform_data.gpio_torch); - - /* Set to TX2 mode, then ENVM/TX2 pin is a power amplifier sync input: - * ENVM/TX pin asserted, flash forced into torch; - * ENVM/TX pin desserted, flash set back; - */ - platform_data.envm_tx2 = 1; - platform_data.tx2_polarity = 0; - - /* set peak current limit to be 1000mA */ - platform_data.current_limit = 0; - - return &platform_data; -} - -static int lm3554_probe(struct i2c_client *client) -{ - int err = 0; - struct lm3554 *flash; - unsigned int i; - int ret; - - flash = kzalloc(sizeof(*flash), GFP_KERNEL); - if (!flash) - return -ENOMEM; - - flash->pdata = lm3554_platform_data_func(client); - - v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops); - flash->sd.internal_ops = &lm3554_internal_ops; - flash->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - flash->mode = ATOMISP_FLASH_MODE_OFF; - flash->timeout = LM3554_MAX_TIMEOUT / LM3554_TIMEOUT_STEPSIZE - 1; - ret = - v4l2_ctrl_handler_init(&flash->ctrl_handler, - ARRAY_SIZE(lm3554_controls)); - if (ret) { - dev_err(&client->dev, "error initialize a ctrl_handler.\n"); - goto fail2; - } - - for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++) - v4l2_ctrl_new_custom(&flash->ctrl_handler, &lm3554_controls[i], - NULL); - - if (flash->ctrl_handler.error) { - - dev_err(&client->dev, "ctrl_handler error.\n"); - goto fail2; - } - - flash->sd.ctrl_handler = &flash->ctrl_handler; - err = media_entity_pads_init(&flash->sd.entity, 0, NULL); - if (err) { - dev_err(&client->dev, "error initialize a media entity.\n"); - goto fail1; - } - - flash->sd.entity.function = MEDIA_ENT_F_FLASH; - - mutex_init(&flash->power_lock); - - timer_setup(&flash->flash_off_delay, lm3554_flash_off_delay, 0); - - err = lm3554_gpio_init(client); - if (err) { - dev_err(&client->dev, "gpio request/direction_output fail"); - goto fail2; - } - return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH); -fail2: - media_entity_cleanup(&flash->sd.entity); - v4l2_ctrl_handler_free(&flash->ctrl_handler); -fail1: - v4l2_device_unregister_subdev(&flash->sd); - kfree(flash); - - return err; -} - -static int lm3554_remove(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct lm3554 *flash = to_lm3554(sd); - int ret; - - media_entity_cleanup(&flash->sd.entity); - v4l2_ctrl_handler_free(&flash->ctrl_handler); - v4l2_device_unregister_subdev(sd); - - atomisp_gmin_remove_subdev(sd); - - del_timer_sync(&flash->flash_off_delay); - - ret = lm3554_gpio_uninit(client); - if (ret < 0) - goto fail; - - kfree(flash); - - return 0; -fail: - dev_err(&client->dev, "gpio request/direction_output fail"); - return ret; -} - -static const struct dev_pm_ops lm3554_pm_ops = { - .suspend = lm3554_suspend, - .resume = lm3554_resume, -}; - -static const struct acpi_device_id lm3554_acpi_match[] = { - { "INTCF1C" }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match); - -static struct i2c_driver lm3554_driver = { - .driver = { - .name = "lm3554", - .pm = &lm3554_pm_ops, - .acpi_match_table = lm3554_acpi_match, - }, - .probe_new = lm3554_probe, - .remove = lm3554_remove, -}; -module_i2c_driver(lm3554_driver); - -MODULE_AUTHOR("Jing Tao "); -MODULE_DESCRIPTION("LED flash driver for LM3554"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c deleted file mode 100644 index 8e180f903335..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c +++ /dev/null @@ -1,1908 +0,0 @@ -/* - * Support for mt9m114 Camera Sensor. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../include/linux/atomisp_gmin_platform.h" -#include - -#include "mt9m114.h" - -#define to_mt9m114_sensor(sd) container_of(sd, struct mt9m114_device, sd) - -/* - * TODO: use debug parameter to actually define when debug messages should - * be printed. - */ -static int debug; -static int aaalock; -module_param(debug, int, 0644); -MODULE_PARM_DESC(debug, "Debug level (0-1)"); - -static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value); -static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value); -static int mt9m114_wait_state(struct i2c_client *client, int timeout); - -static int -mt9m114_read_reg(struct i2c_client *client, u16 data_length, u32 reg, u32 *val) -{ - int err; - struct i2c_msg msg[2]; - unsigned char data[4]; - - if (!client->adapter) { - v4l2_err(client, "%s error, no client->adapter\n", __func__); - return -ENODEV; - } - - if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT - && data_length != MISENSOR_32BIT) { - v4l2_err(client, "%s error, invalid data length\n", __func__); - return -EINVAL; - } - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = MSG_LEN_OFFSET; - msg[0].buf = data; - - /* high byte goes out first */ - data[0] = (u16) (reg >> 8); - data[1] = (u16) (reg & 0xff); - - msg[1].addr = client->addr; - msg[1].len = data_length; - msg[1].flags = I2C_M_RD; - msg[1].buf = data; - - err = i2c_transfer(client->adapter, msg, 2); - - if (err >= 0) { - *val = 0; - /* high byte comes first */ - if (data_length == MISENSOR_8BIT) - *val = data[0]; - else if (data_length == MISENSOR_16BIT) - *val = data[1] + (data[0] << 8); - else - *val = data[3] + (data[2] << 8) + - (data[1] << 16) + (data[0] << 24); - - return 0; - } - - dev_err(&client->dev, "read from offset 0x%x error %d", reg, err); - return err; -} - -static int -mt9m114_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u32 val) -{ - int num_msg; - struct i2c_msg msg; - unsigned char data[6] = {0}; - __be16 *wreg; - int retry = 0; - - if (!client->adapter) { - v4l2_err(client, "%s error, no client->adapter\n", __func__); - return -ENODEV; - } - - if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT - && data_length != MISENSOR_32BIT) { - v4l2_err(client, "%s error, invalid data_length\n", __func__); - return -EINVAL; - } - - memset(&msg, 0, sizeof(msg)); - -again: - msg.addr = client->addr; - msg.flags = 0; - msg.len = 2 + data_length; - msg.buf = data; - - /* high byte goes out first */ - wreg = (void *)data; - *wreg = cpu_to_be16(reg); - - if (data_length == MISENSOR_8BIT) { - data[2] = (u8)(val); - } else if (data_length == MISENSOR_16BIT) { - u16 *wdata = (void *)&data[2]; - - *wdata = be16_to_cpu(*(__be16 *)&data[2]); - } else { - /* MISENSOR_32BIT */ - u32 *wdata = (void *)&data[2]; - - *wdata = be32_to_cpu(*(__be32 *)&data[2]); - } - - num_msg = i2c_transfer(client->adapter, &msg, 1); - - /* - * HACK: Need some delay here for Rev 2 sensors otherwise some - * registers do not seem to load correctly. - */ - mdelay(1); - - if (num_msg >= 0) - return 0; - - dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d", - val, reg, num_msg); - if (retry <= I2C_RETRY_COUNT) { - dev_dbg(&client->dev, "retrying... %d", retry); - retry++; - msleep(20); - goto again; - } - - return num_msg; -} - -/** - * misensor_rmw_reg - Read/Modify/Write a value to a register in the sensor - * device - * @client: i2c driver client structure - * @data_length: 8/16/32-bits length - * @reg: register address - * @mask: masked out bits - * @set: bits set - * - * Read/modify/write a value to a register in the sensor device. - * Returns zero if successful, or non-zero otherwise. - */ -static int -misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg, - u32 mask, u32 set) -{ - int err; - u32 val; - - /* Exit when no mask */ - if (mask == 0) - return 0; - - /* @mask must not exceed data length */ - switch (data_length) { - case MISENSOR_8BIT: - if (mask & ~0xff) - return -EINVAL; - break; - case MISENSOR_16BIT: - if (mask & ~0xffff) - return -EINVAL; - break; - case MISENSOR_32BIT: - break; - default: - /* Wrong @data_length */ - return -EINVAL; - } - - err = mt9m114_read_reg(client, data_length, reg, &val); - if (err) { - v4l2_err(client, "misensor_rmw_reg error exit, read failed\n"); - return -EINVAL; - } - - val &= ~mask; - - /* - * Perform the OR function if the @set exists. - * Shift @set value to target bit location. @set should set only - * bits included in @mask. - * - * REVISIT: This function expects @set to be non-shifted. Its shift - * value is then defined to be equal to mask's LSB position. - * How about to inform values in their right offset position and avoid - * this unneeded shift operation? - */ - set <<= ffs(mask) - 1; - val |= set & mask; - - err = mt9m114_write_reg(client, data_length, reg, val); - if (err) { - v4l2_err(client, "misensor_rmw_reg error exit, write failed\n"); - return -EINVAL; - } - - return 0; -} - - -static int __mt9m114_flush_reg_array(struct i2c_client *client, - struct mt9m114_write_ctrl *ctrl) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - int retry = 0; - __be16 *data16 = (void *)&ctrl->buffer.addr; - - if (ctrl->index == 0) - return 0; - -again: - msg.addr = client->addr; - msg.flags = 0; - msg.len = 2 + ctrl->index; - *data16 = cpu_to_be16(ctrl->buffer.addr); - msg.buf = (u8 *)&ctrl->buffer; - - ret = i2c_transfer(client->adapter, &msg, num_msg); - if (ret != num_msg) { - if (++retry <= I2C_RETRY_COUNT) { - dev_dbg(&client->dev, "retrying... %d\n", retry); - msleep(20); - goto again; - } - dev_err(&client->dev, "%s: i2c transfer error\n", __func__); - return -EIO; - } - - ctrl->index = 0; - - /* - * REVISIT: Previously we had a delay after writing data to sensor. - * But it was removed as our tests have shown it is not necessary - * anymore. - */ - - return 0; -} - -static int __mt9m114_buf_reg_array(struct i2c_client *client, - struct mt9m114_write_ctrl *ctrl, - const struct misensor_reg *next) -{ - __be16 *data16; - __be32 *data32; - int err; - - /* Insufficient buffer? Let's flush and get more free space. */ - if (ctrl->index + next->length >= MT9M114_MAX_WRITE_BUF_SIZE) { - err = __mt9m114_flush_reg_array(client, ctrl); - if (err) - return err; - } - - switch (next->length) { - case MISENSOR_8BIT: - ctrl->buffer.data[ctrl->index] = (u8)next->val; - break; - case MISENSOR_16BIT: - data16 = (__be16 *)&ctrl->buffer.data[ctrl->index]; - *data16 = cpu_to_be16((u16)next->val); - break; - case MISENSOR_32BIT: - data32 = (__be32 *)&ctrl->buffer.data[ctrl->index]; - *data32 = cpu_to_be32(next->val); - break; - default: - return -EINVAL; - } - - /* When first item is added, we need to store its starting address */ - if (ctrl->index == 0) - ctrl->buffer.addr = next->reg; - - ctrl->index += next->length; - - return 0; -} - -static int -__mt9m114_write_reg_is_consecutive(struct i2c_client *client, - struct mt9m114_write_ctrl *ctrl, - const struct misensor_reg *next) -{ - if (ctrl->index == 0) - return 1; - - return ctrl->buffer.addr + ctrl->index == next->reg; -} - -/* - * mt9m114_write_reg_array - Initializes a list of mt9m114 registers - * @client: i2c driver client structure - * @reglist: list of registers to be written - * @poll: completion polling requirement - * This function initializes a list of registers. When consecutive addresses - * are found in a row on the list, this function creates a buffer and sends - * consecutive data in a single i2c_transfer(). - * - * __mt9m114_flush_reg_array, __mt9m114_buf_reg_array() and - * __mt9m114_write_reg_is_consecutive() are internal functions to - * mt9m114_write_reg_array() and should be not used anywhere else. - * - */ -static int mt9m114_write_reg_array(struct i2c_client *client, - const struct misensor_reg *reglist, - int poll) -{ - const struct misensor_reg *next = reglist; - struct mt9m114_write_ctrl ctrl; - int err; - - if (poll == PRE_POLLING) { - err = mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT); - if (err) - return err; - } - - ctrl.index = 0; - for (; next->length != MISENSOR_TOK_TERM; next++) { - switch (next->length & MISENSOR_TOK_MASK) { - case MISENSOR_TOK_DELAY: - err = __mt9m114_flush_reg_array(client, &ctrl); - if (err) - return err; - msleep(next->val); - break; - case MISENSOR_TOK_RMW: - err = __mt9m114_flush_reg_array(client, &ctrl); - err |= misensor_rmw_reg(client, - next->length & - ~MISENSOR_TOK_RMW, - next->reg, next->val, - next->val2); - if (err) { - dev_err(&client->dev, "%s read err. aborted\n", - __func__); - return -EINVAL; - } - break; - default: - /* - * If next address is not consecutive, data needs to be - * flushed before proceed. - */ - if (!__mt9m114_write_reg_is_consecutive(client, &ctrl, - next)) { - err = __mt9m114_flush_reg_array(client, &ctrl); - if (err) - return err; - } - err = __mt9m114_buf_reg_array(client, &ctrl, next); - if (err) { - v4l2_err(client, "%s: write error, aborted\n", - __func__); - return err; - } - break; - } - } - - err = __mt9m114_flush_reg_array(client, &ctrl); - if (err) - return err; - - if (poll == POST_POLLING) - return mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT); - - return 0; -} - -static int mt9m114_wait_state(struct i2c_client *client, int timeout) -{ - int ret; - unsigned int val; - - while (timeout-- > 0) { - ret = mt9m114_read_reg(client, MISENSOR_16BIT, 0x0080, &val); - if (ret) - return ret; - if ((val & 0x2) == 0) - return 0; - msleep(20); - } - - return -EINVAL; - -} - -static int mt9m114_set_suspend(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - return mt9m114_write_reg_array(client, - mt9m114_standby_reg, POST_POLLING); -} - -static int mt9m114_init_common(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - return mt9m114_write_reg_array(client, mt9m114_common, PRE_POLLING); -} - -static int power_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret; - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - if (flag) { - ret = dev->platform_data->v2p8_ctrl(sd, 1); - if (ret == 0) { - ret = dev->platform_data->v1p8_ctrl(sd, 1); - if (ret) - ret = dev->platform_data->v2p8_ctrl(sd, 0); - } - } else { - ret = dev->platform_data->v2p8_ctrl(sd, 0); - ret = dev->platform_data->v1p8_ctrl(sd, 0); - } - return ret; -} - -static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret; - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - /* Note: current modules wire only one GPIO signal (RESET#), - * but the schematic wires up two to the connector. BIOS - * versions have been unfortunately inconsistent with which - * ACPI index RESET# is on, so hit both */ - - if (flag) { - ret = dev->platform_data->gpio0_ctrl(sd, 0); - ret = dev->platform_data->gpio1_ctrl(sd, 0); - msleep(60); - ret |= dev->platform_data->gpio0_ctrl(sd, 1); - ret |= dev->platform_data->gpio1_ctrl(sd, 1); - } else { - ret = dev->platform_data->gpio0_ctrl(sd, 0); - ret = dev->platform_data->gpio1_ctrl(sd, 0); - } - return ret; -} - -static int power_up(struct v4l2_subdev *sd) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (NULL == dev->platform_data) { - dev_err(&client->dev, "no camera_sensor_platform_data"); - return -ENODEV; - } - - /* power control */ - ret = power_ctrl(sd, 1); - if (ret) - goto fail_power; - - /* flis clock control */ - ret = dev->platform_data->flisclk_ctrl(sd, 1); - if (ret) - goto fail_clk; - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 1); - if (ret) - dev_err(&client->dev, "gpio failed 1\n"); - /* - * according to DS, 44ms is needed between power up and first i2c - * commend - */ - msleep(50); - - return 0; - -fail_clk: - dev->platform_data->flisclk_ctrl(sd, 0); -fail_power: - power_ctrl(sd, 0); - dev_err(&client->dev, "sensor power-up failed\n"); - - return ret; -} - -static int power_down(struct v4l2_subdev *sd) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (NULL == dev->platform_data) { - dev_err(&client->dev, "no camera_sensor_platform_data"); - return -ENODEV; - } - - ret = dev->platform_data->flisclk_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "flisclk failed\n"); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "gpio failed 1\n"); - - /* power control */ - ret = power_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "vprog failed.\n"); - - /*according to DS, 20ms is needed after power down*/ - msleep(20); - - return ret; -} - -static int mt9m114_s_power(struct v4l2_subdev *sd, int power) -{ - if (power == 0) - return power_down(sd); - else { - if (power_up(sd)) - return -EINVAL; - - return mt9m114_init_common(sd); - } -} - -/* - * distance - calculate the distance - * @res: resolution - * @w: width - * @h: height - * - * Get the gap between resolution and w/h. - * res->width/height smaller than w/h wouldn't be considered. - * Returns the value of gap or -1 if fail. - */ -#define LARGEST_ALLOWED_RATIO_MISMATCH 600 -static int distance(struct mt9m114_res_struct const *res, u32 w, u32 h) -{ - unsigned int w_ratio; - unsigned int h_ratio; - int match; - - if (w == 0) - return -1; - w_ratio = (res->width << 13) / w; - if (h == 0) - return -1; - h_ratio = (res->height << 13) / h; - if (h_ratio == 0) - return -1; - match = abs(((w_ratio << 13) / h_ratio) - 8192); - - if ((w_ratio < 8192) || (h_ratio < 8192) || - (match > LARGEST_ALLOWED_RATIO_MISMATCH)) - return -1; - - return w_ratio + h_ratio; -} - -/* Return the nearest higher resolution index */ -static int nearest_resolution_index(int w, int h) -{ - int i; - int idx = -1; - int dist; - int min_dist = INT_MAX; - const struct mt9m114_res_struct *tmp_res = NULL; - - for (i = 0; i < ARRAY_SIZE(mt9m114_res); i++) { - tmp_res = &mt9m114_res[i]; - dist = distance(tmp_res, w, h); - if (dist == -1) - continue; - if (dist < min_dist) { - min_dist = dist; - idx = i; - } - } - - return idx; -} - -static int mt9m114_try_res(u32 *w, u32 *h) -{ - int idx = 0; - - if ((*w > MT9M114_RES_960P_SIZE_H) - || (*h > MT9M114_RES_960P_SIZE_V)) { - *w = MT9M114_RES_960P_SIZE_H; - *h = MT9M114_RES_960P_SIZE_V; - } else { - idx = nearest_resolution_index(*w, *h); - - /* - * nearest_resolution_index() doesn't return smaller - * resolutions. If it fails, it means the requested - * resolution is higher than wecan support. Fallback - * to highest possible resolution in this case. - */ - if (idx == -1) - idx = ARRAY_SIZE(mt9m114_res) - 1; - - *w = mt9m114_res[idx].width; - *h = mt9m114_res[idx].height; - } - - return 0; -} - -static struct mt9m114_res_struct *mt9m114_to_res(u32 w, u32 h) -{ - int index; - - for (index = 0; index < N_RES; index++) { - if ((mt9m114_res[index].width == w) && - (mt9m114_res[index].height == h)) - break; - } - - /* No mode found */ - if (index >= N_RES) - return NULL; - - return &mt9m114_res[index]; -} - -static int mt9m114_res2size(struct v4l2_subdev *sd, int *h_size, int *v_size) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - unsigned short hsize; - unsigned short vsize; - - switch (dev->res) { - case MT9M114_RES_736P: - hsize = MT9M114_RES_736P_SIZE_H; - vsize = MT9M114_RES_736P_SIZE_V; - break; - case MT9M114_RES_864P: - hsize = MT9M114_RES_864P_SIZE_H; - vsize = MT9M114_RES_864P_SIZE_V; - break; - case MT9M114_RES_960P: - hsize = MT9M114_RES_960P_SIZE_H; - vsize = MT9M114_RES_960P_SIZE_V; - break; - default: - v4l2_err(sd, "%s: Resolution 0x%08x unknown\n", __func__, - dev->res); - return -EINVAL; - } - - if (h_size != NULL) - *h_size = hsize; - if (v_size != NULL) - *v_size = vsize; - - return 0; -} - -static int mt9m114_get_intg_factor(struct i2c_client *client, - struct camera_mipi_info *info, - const struct mt9m114_res_struct *res) -{ - struct atomisp_sensor_mode_data *buf = &info->data; - u32 reg_val; - int ret; - - if (info == NULL) - return -EINVAL; - - ret = mt9m114_read_reg(client, MISENSOR_32BIT, - REG_PIXEL_CLK, ®_val); - if (ret) - return ret; - buf->vt_pix_clk_freq_mhz = reg_val; - - /* get integration time */ - buf->coarse_integration_time_min = MT9M114_COARSE_INTG_TIME_MIN; - buf->coarse_integration_time_max_margin = - MT9M114_COARSE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_min = MT9M114_FINE_INTG_TIME_MIN; - buf->fine_integration_time_max_margin = - MT9M114_FINE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_def = MT9M114_FINE_INTG_TIME_MIN; - - buf->frame_length_lines = res->lines_per_frame; - buf->line_length_pck = res->pixels_per_line; - buf->read_mode = res->bin_mode; - - /* get the cropping and output resolution to ISP for this mode. */ - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_H_START, ®_val); - if (ret) - return ret; - buf->crop_horizontal_start = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_V_START, ®_val); - if (ret) - return ret; - buf->crop_vertical_start = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_H_END, ®_val); - if (ret) - return ret; - buf->crop_horizontal_end = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_V_END, ®_val); - if (ret) - return ret; - buf->crop_vertical_end = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_WIDTH, ®_val); - if (ret) - return ret; - buf->output_width = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_HEIGHT, ®_val); - if (ret) - return ret; - buf->output_height = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_TIMING_HTS, ®_val); - if (ret) - return ret; - buf->line_length_pck = reg_val; - - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_TIMING_VTS, ®_val); - if (ret) - return ret; - buf->frame_length_lines = reg_val; - - buf->binning_factor_x = res->bin_factor_x ? - res->bin_factor_x : 1; - buf->binning_factor_y = res->bin_factor_y ? - res->bin_factor_y : 1; - return 0; -} - -static int mt9m114_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - int width, height; - int ret; - if (format->pad) - return -EINVAL; - fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; - - ret = mt9m114_res2size(sd, &width, &height); - if (ret) - return ret; - fmt->width = width; - fmt->height = height; - - return 0; -} - -static int mt9m114_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct i2c_client *c = v4l2_get_subdevdata(sd); - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - struct mt9m114_res_struct *res_index; - u32 width = fmt->width; - u32 height = fmt->height; - struct camera_mipi_info *mt9m114_info = NULL; - - int ret; - if (format->pad) - return -EINVAL; - dev->streamon = 0; - dev->first_exp = MT9M114_DEFAULT_FIRST_EXP; - - mt9m114_info = v4l2_get_subdev_hostdata(sd); - if (mt9m114_info == NULL) - return -EINVAL; - - mt9m114_try_res(&width, &height); - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - return 0; - } - res_index = mt9m114_to_res(width, height); - - /* Sanity check */ - if (unlikely(!res_index)) { - WARN_ON(1); - return -EINVAL; - } - - switch (res_index->res) { - case MT9M114_RES_736P: - ret = mt9m114_write_reg_array(c, mt9m114_736P_init, NO_POLLING); - ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET); - break; - case MT9M114_RES_864P: - ret = mt9m114_write_reg_array(c, mt9m114_864P_init, NO_POLLING); - ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET); - break; - case MT9M114_RES_960P: - ret = mt9m114_write_reg_array(c, mt9m114_976P_init, NO_POLLING); - /* set sensor read_mode to Normal */ - ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET); - break; - default: - v4l2_err(sd, "set resolution: %d failed!\n", res_index->res); - return -EINVAL; - } - - if (ret) - return -EINVAL; - - ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, POST_POLLING); - if (ret < 0) - return ret; - - if (mt9m114_set_suspend(sd)) - return -EINVAL; - - if (dev->res != res_index->res) { - int index; - - /* Switch to different size */ - if (width <= 640) { - dev->nctx = 0x00; /* Set for context A */ - } else { - /* - * Context B is used for resolutions larger than 640x480 - * Using YUV for Context B. - */ - dev->nctx = 0x01; /* set for context B */ - } - - /* - * Marked current sensor res as being "used" - * - * REVISIT: We don't need to use an "used" field on each mode - * list entry to know which mode is selected. If this - * information is really necessary, how about to use a single - * variable on sensor dev struct? - */ - for (index = 0; index < N_RES; index++) { - if ((width == mt9m114_res[index].width) && - (height == mt9m114_res[index].height)) { - mt9m114_res[index].used = true; - continue; - } - mt9m114_res[index].used = false; - } - } - ret = mt9m114_get_intg_factor(c, mt9m114_info, - &mt9m114_res[res_index->res]); - if (ret) { - dev_err(&c->dev, "failed to get integration_factor\n"); - return -EINVAL; - } - /* - * mt9m114 - we don't poll for context switch - * because it does not happen with streaming disabled. - */ - dev->res = res_index->res; - - fmt->width = width; - fmt->height = height; - fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; - return 0; -} - -/* TODO: Update to SOC functions, remove exposure and gain */ -static int mt9m114_g_focal(struct v4l2_subdev *sd, s32 *val) -{ - *val = (MT9M114_FOCAL_LENGTH_NUM << 16) | MT9M114_FOCAL_LENGTH_DEM; - return 0; -} - -static int mt9m114_g_fnumber(struct v4l2_subdev *sd, s32 *val) -{ - /*const f number for mt9m114*/ - *val = (MT9M114_F_NUMBER_DEFAULT_NUM << 16) | MT9M114_F_NUMBER_DEM; - return 0; -} - -static int mt9m114_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) -{ - *val = (MT9M114_F_NUMBER_DEFAULT_NUM << 24) | - (MT9M114_F_NUMBER_DEM << 16) | - (MT9M114_F_NUMBER_DEFAULT_NUM << 8) | MT9M114_F_NUMBER_DEM; - return 0; -} - -/* Horizontal flip the image. */ -static int mt9m114_g_hflip(struct v4l2_subdev *sd, s32 *val) -{ - struct i2c_client *c = v4l2_get_subdevdata(sd); - int ret; - u32 data; - ret = mt9m114_read_reg(c, MISENSOR_16BIT, - (u32)MISENSOR_READ_MODE, &data); - if (ret) - return ret; - *val = !!(data & MISENSOR_HFLIP_MASK); - - return 0; -} - -static int mt9m114_g_vflip(struct v4l2_subdev *sd, s32 *val) -{ - struct i2c_client *c = v4l2_get_subdevdata(sd); - int ret; - u32 data; - - ret = mt9m114_read_reg(c, MISENSOR_16BIT, - (u32)MISENSOR_READ_MODE, &data); - if (ret) - return ret; - *val = !!(data & MISENSOR_VFLIP_MASK); - - return 0; -} - -static long mt9m114_s_exposure(struct v4l2_subdev *sd, - struct atomisp_exposure *exposure) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - int ret = 0; - unsigned int coarse_integration = 0; - unsigned int FLines = 0; - unsigned int FrameLengthLines = 0; /* ExposureTime.FrameLengthLines; */ - unsigned int AnalogGain, DigitalGain; - u32 AnalogGainToWrite = 0; - - dev_dbg(&client->dev, "%s(0x%X 0x%X 0x%X)\n", __func__, - exposure->integration_time[0], exposure->gain[0], - exposure->gain[1]); - - coarse_integration = exposure->integration_time[0]; - /* fine_integration = ExposureTime.FineIntegrationTime; */ - /* FrameLengthLines = ExposureTime.FrameLengthLines; */ - FLines = mt9m114_res[dev->res].lines_per_frame; - AnalogGain = exposure->gain[0]; - DigitalGain = exposure->gain[1]; - if (!dev->streamon) { - /*Save the first exposure values while stream is off*/ - dev->first_exp = coarse_integration; - dev->first_gain = AnalogGain; - dev->first_diggain = DigitalGain; - } - /* DigitalGain = 0x400 * (((u16) DigitalGain) >> 8) + - ((unsigned int)(0x400 * (((u16) DigitalGain) & 0xFF)) >>8); */ - - /* set frame length */ - if (FLines < coarse_integration + 6) - FLines = coarse_integration + 6; - if (FLines < FrameLengthLines) - FLines = FrameLengthLines; - ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, FLines); - if (ret) { - v4l2_err(client, "%s: fail to set FLines\n", __func__); - return -EINVAL; - } - - /* set coarse integration */ - /* 3A provide real exposure time. - should not translate to any value here. */ - ret = mt9m114_write_reg(client, MISENSOR_16BIT, - REG_EXPO_COARSE, (u16)(coarse_integration)); - if (ret) { - v4l2_err(client, "%s: fail to set exposure time\n", __func__); - return -EINVAL; - } - - /* - // set analog/digital gain - switch(AnalogGain) - { - case 0: - AnalogGainToWrite = 0x0; - break; - case 1: - AnalogGainToWrite = 0x20; - break; - case 2: - AnalogGainToWrite = 0x60; - break; - case 4: - AnalogGainToWrite = 0xA0; - break; - case 8: - AnalogGainToWrite = 0xE0; - break; - default: - AnalogGainToWrite = 0x20; - break; - } - */ - if (DigitalGain >= 16 || DigitalGain <= 1) - DigitalGain = 1; - /* AnalogGainToWrite = - (u16)((DigitalGain << 12) | AnalogGainToWrite); */ - AnalogGainToWrite = (u16)((DigitalGain << 12) | (u16)AnalogGain); - ret = mt9m114_write_reg(client, MISENSOR_16BIT, - REG_GAIN, AnalogGainToWrite); - if (ret) { - v4l2_err(client, "%s: fail to set AnalogGainToWrite\n", - __func__); - return -EINVAL; - } - - return ret; -} - -static long mt9m114_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - - switch (cmd) { - case ATOMISP_IOC_S_EXPOSURE: - return mt9m114_s_exposure(sd, arg); - default: - return -EINVAL; - } - - return 0; -} - -/* This returns the exposure time being used. This should only be used - for filling in EXIF data, not for actual image processing. */ -static int mt9m114_g_exposure(struct v4l2_subdev *sd, s32 *value) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u32 coarse; - int ret; - - /* the fine integration time is currently not calculated */ - ret = mt9m114_read_reg(client, MISENSOR_16BIT, - REG_EXPO_COARSE, &coarse); - if (ret) - return ret; - - *value = coarse; - return 0; -} -#ifndef CSS15 -/* - * This function will return the sensor supported max exposure zone number. - * the sensor which supports max exposure zone number is 1. - */ -static int mt9m114_g_exposure_zone_num(struct v4l2_subdev *sd, s32 *val) -{ - *val = 1; - - return 0; -} - -/* - * set exposure metering, average/center_weighted/spot/matrix. - */ -static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - switch (val) { - case V4L2_EXPOSURE_METERING_SPOT: - ret = mt9m114_write_reg_array(client, mt9m114_exp_average, - NO_POLLING); - if (ret) { - dev_err(&client->dev, "write exp_average reg err.\n"); - return ret; - } - break; - case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED: - default: - ret = mt9m114_write_reg_array(client, mt9m114_exp_center, - NO_POLLING); - if (ret) { - dev_err(&client->dev, "write exp_default reg err"); - return ret; - } - } - - return 0; -} - -/* - * This function is for touch exposure feature. - */ -static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct misensor_reg exp_reg; - int width, height; - int grid_width, grid_height; - int grid_left, grid_top, grid_right, grid_bottom; - int win_left, win_top, win_right, win_bottom; - int i, j; - int ret; - - if (sel->which != V4L2_SUBDEV_FORMAT_TRY && - sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - grid_left = sel->r.left; - grid_top = sel->r.top; - grid_right = sel->r.left + sel->r.width - 1; - grid_bottom = sel->r.top + sel->r.height - 1; - - ret = mt9m114_res2size(sd, &width, &height); - if (ret) - return ret; - - grid_width = width / 5; - grid_height = height / 5; - - if (grid_width && grid_height) { - win_left = grid_left / grid_width; - win_top = grid_top / grid_height; - win_right = grid_right / grid_width; - win_bottom = grid_bottom / grid_height; - } else { - dev_err(&client->dev, "Incorrect exp grid.\n"); - return -EINVAL; - } - - win_left = clamp_t(int, win_left, 0, 4); - win_top = clamp_t(int, win_top, 0, 4); - win_right = clamp_t(int, win_right, 0, 4); - win_bottom = clamp_t(int, win_bottom, 0, 4); - - ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING); - if (ret) { - dev_err(&client->dev, "write exp_average reg err.\n"); - return ret; - } - - for (i = win_top; i <= win_bottom; i++) { - for (j = win_left; j <= win_right; j++) { - exp_reg = mt9m114_exp_win[i][j]; - - ret = mt9m114_write_reg(client, exp_reg.length, - exp_reg.reg, exp_reg.val); - if (ret) { - dev_err(&client->dev, "write exp_reg err.\n"); - return ret; - } - } - } - - return 0; -} -#endif - -static int mt9m114_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - - *val = mt9m114_res[dev->res].bin_factor_x; - - return 0; -} - -static int mt9m114_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - - *val = mt9m114_res[dev->res].bin_factor_y; - - return 0; -} - -static int mt9m114_s_ev(struct v4l2_subdev *sd, s32 val) -{ - struct i2c_client *c = v4l2_get_subdevdata(sd); - s32 luma = 0x37; - int err; - - /* EV value only support -2 to 2 - * 0: 0x37, 1:0x47, 2:0x57, -1:0x27, -2:0x17 - */ - if (val < -2 || val > 2) - return -EINVAL; - luma += 0x10 * val; - dev_dbg(&c->dev, "%s val:%d luma:0x%x\n", __func__, val, luma); - err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A); - if (err) { - dev_err(&c->dev, "%s logic addr access error\n", __func__); - return err; - } - err = mt9m114_write_reg(c, MISENSOR_8BIT, 0xC87A, (u32)luma); - if (err) { - dev_err(&c->dev, "%s write target_average_luma failed\n", - __func__); - return err; - } - udelay(10); - - return 0; -} - -static int mt9m114_g_ev(struct v4l2_subdev *sd, s32 *val) -{ - struct i2c_client *c = v4l2_get_subdevdata(sd); - int err; - u32 luma; - - err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A); - if (err) { - dev_err(&c->dev, "%s logic addr access error\n", __func__); - return err; - } - err = mt9m114_read_reg(c, MISENSOR_8BIT, 0xC87A, &luma); - if (err) { - dev_err(&c->dev, "%s read target_average_luma failed\n", - __func__); - return err; - } - luma -= 0x17; - luma /= 0x10; - *val = (s32)luma - 2; - dev_dbg(&c->dev, "%s val:%d\n", __func__, *val); - - return 0; -} - -/* Fake interface - * mt9m114 now can not support 3a_lock -*/ -static int mt9m114_s_3a_lock(struct v4l2_subdev *sd, s32 val) -{ - aaalock = val; - return 0; -} - -static int mt9m114_g_3a_lock(struct v4l2_subdev *sd, s32 *val) -{ - if (aaalock) - return V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE - | V4L2_LOCK_FOCUS; - return 0; -} - -static int mt9m114_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct mt9m114_device *dev = - container_of(ctrl->handler, struct mt9m114_device, ctrl_handler); - struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n", - __func__, ctrl->val); - ret = mt9m114_t_vflip(&dev->sd, ctrl->val); - break; - case V4L2_CID_HFLIP: - dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n", - __func__, ctrl->val); - ret = mt9m114_t_hflip(&dev->sd, ctrl->val); - break; -#ifndef CSS15 - case V4L2_CID_EXPOSURE_METERING: - ret = mt9m114_s_exposure_metering(&dev->sd, ctrl->val); - break; -#endif - case V4L2_CID_EXPOSURE: - ret = mt9m114_s_ev(&dev->sd, ctrl->val); - break; - case V4L2_CID_3A_LOCK: - ret = mt9m114_s_3a_lock(&dev->sd, ctrl->val); - break; - default: - ret = -EINVAL; - } - return ret; -} - -static int mt9m114_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct mt9m114_device *dev = - container_of(ctrl->handler, struct mt9m114_device, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - ret = mt9m114_g_vflip(&dev->sd, &ctrl->val); - break; - case V4L2_CID_HFLIP: - ret = mt9m114_g_hflip(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCAL_ABSOLUTE: - ret = mt9m114_g_focal(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_ABSOLUTE: - ret = mt9m114_g_fnumber(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_RANGE: - ret = mt9m114_g_fnumber_range(&dev->sd, &ctrl->val); - break; - case V4L2_CID_EXPOSURE_ABSOLUTE: - ret = mt9m114_g_exposure(&dev->sd, &ctrl->val); - break; -#ifndef CSS15 - case V4L2_CID_EXPOSURE_ZONE_NUM: - ret = mt9m114_g_exposure_zone_num(&dev->sd, &ctrl->val); - break; -#endif - case V4L2_CID_BIN_FACTOR_HORZ: - ret = mt9m114_g_bin_factor_x(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_VERT: - ret = mt9m114_g_bin_factor_y(&dev->sd, &ctrl->val); - break; - case V4L2_CID_EXPOSURE: - ret = mt9m114_g_ev(&dev->sd, &ctrl->val); - break; - case V4L2_CID_3A_LOCK: - ret = mt9m114_g_3a_lock(&dev->sd, &ctrl->val); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .s_ctrl = mt9m114_s_ctrl, - .g_volatile_ctrl = mt9m114_g_volatile_ctrl -}; - -static struct v4l2_ctrl_config mt9m114_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_VFLIP, - .name = "Image v-Flip", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .max = 1, - .step = 1, - .def = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_HFLIP, - .name = "Image h-Flip", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .max = 1, - .step = 1, - .def = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCAL_ABSOLUTE, - .name = "focal length", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = MT9M114_FOCAL_LENGTH_DEFAULT, - .max = MT9M114_FOCAL_LENGTH_DEFAULT, - .step = 1, - .def = MT9M114_FOCAL_LENGTH_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_ABSOLUTE, - .name = "f-number", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = MT9M114_F_NUMBER_DEFAULT, - .max = MT9M114_F_NUMBER_DEFAULT, - .step = 1, - .def = MT9M114_F_NUMBER_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_RANGE, - .name = "f-number range", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = MT9M114_F_NUMBER_RANGE, - .max = MT9M114_F_NUMBER_RANGE, - .step = 1, - .def = MT9M114_F_NUMBER_RANGE, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ABSOLUTE, - .name = "exposure", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .max = 0xffff, - .step = 1, - .def = 0, - .flags = 0, - }, -#ifndef CSS15 - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ZONE_NUM, - .name = "one-time exposure zone number", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .max = 0xffff, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_METERING, - .name = "metering", - .type = V4L2_CTRL_TYPE_MENU, - .min = 0, - .max = 3, - .step = 0, - .def = 1, - .flags = 0, - }, -#endif - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_HORZ, - .name = "horizontal binning factor", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .max = MT9M114_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_VERT, - .name = "vertical binning factor", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 0, - .max = MT9M114_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE, - .name = "exposure biasx", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = -2, - .max = 2, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_3A_LOCK, - .name = "3a lock", - .type = V4L2_CTRL_TYPE_BITMASK, - .min = 0, - .max = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE | V4L2_LOCK_FOCUS, - .step = 1, - .def = 0, - .flags = 0, - }, -}; - -static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - u32 retvalue; - - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { - dev_err(&client->dev, "%s: i2c error", __func__); - return -ENODEV; - } - mt9m114_read_reg(client, MISENSOR_16BIT, (u32)MT9M114_PID, &retvalue); - dev->real_model_id = retvalue; - - if (retvalue != MT9M114_MOD_ID) { - dev_err(&client->dev, "%s: failed: client->addr = %x\n", - __func__, client->addr); - return -ENODEV; - } - - return 0; -} - -static int -mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (NULL == platform_data) - return -ENODEV; - - dev->platform_data = - (struct camera_sensor_platform_data *)platform_data; - - ret = power_up(sd); - if (ret) { - v4l2_err(client, "mt9m114 power-up err"); - return ret; - } - - /* config & detect sensor */ - ret = mt9m114_detect(dev, client); - if (ret) { - v4l2_err(client, "mt9m114_detect err s_config.\n"); - goto fail_detect; - } - - ret = dev->platform_data->csi_cfg(sd, 1); - if (ret) - goto fail_csi_cfg; - - ret = mt9m114_set_suspend(sd); - if (ret) { - v4l2_err(client, "mt9m114 suspend err"); - return ret; - } - - ret = power_down(sd); - if (ret) { - v4l2_err(client, "mt9m114 power down err"); - return ret; - } - - return ret; - -fail_csi_cfg: - dev->platform_data->csi_cfg(sd, 0); -fail_detect: - power_down(sd); - dev_err(&client->dev, "sensor power-gating failed\n"); - return ret; -} - -/* Horizontal flip the image. */ -static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value) -{ - struct i2c_client *c = v4l2_get_subdevdata(sd); - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - int err; - /* set for direct mode */ - err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850); - if (value) { - /* enable H flip ctx A */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x01); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x01); - /* ctx B */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x01); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x01); - - err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_HFLIP_MASK, MISENSOR_FLIP_EN); - - dev->bpat = MT9M114_BPAT_GRGRBGBG; - } else { - /* disable H flip ctx A */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x00); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x00); - /* ctx B */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x00); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x00); - - err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_HFLIP_MASK, MISENSOR_FLIP_DIS); - - dev->bpat = MT9M114_BPAT_BGBGGRGR; - } - - err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06); - udelay(10); - - return !!err; -} - -/* Vertically flip the image */ -static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value) -{ - struct i2c_client *c = v4l2_get_subdevdata(sd); - int err; - /* set for direct mode */ - err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850); - if (value >= 1) { - /* enable H flip - ctx A */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x01); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x01); - /* ctx B */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x01); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x01); - - err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_VFLIP_MASK, MISENSOR_FLIP_EN); - } else { - /* disable H flip - ctx A */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x00); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x00); - /* ctx B */ - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x00); - err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x00); - - err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, - MISENSOR_VFLIP_MASK, MISENSOR_FLIP_DIS); - } - - err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06); - udelay(10); - - return !!err; -} - -static int mt9m114_g_frame_interval(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval) -{ - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - - interval->interval.numerator = 1; - interval->interval.denominator = mt9m114_res[dev->res].fps; - - return 0; -} - -static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable) -{ - int ret; - struct i2c_client *c = v4l2_get_subdevdata(sd); - struct mt9m114_device *dev = to_mt9m114_sensor(sd); - struct atomisp_exposure exposure; - - if (enable) { - ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, - POST_POLLING); - if (ret < 0) - return ret; - - if (dev->first_exp > MT9M114_MAX_FIRST_EXP) { - exposure.integration_time[0] = dev->first_exp; - exposure.gain[0] = dev->first_gain; - exposure.gain[1] = dev->first_diggain; - mt9m114_s_exposure(sd, &exposure); - } - dev->streamon = 1; - - } else { - dev->streamon = 0; - ret = mt9m114_set_suspend(sd); - } - - return ret; -} - -static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index) - return -EINVAL; - code->code = MEDIA_BUS_FMT_SGRBG10_1X10; - - return 0; -} - -static int mt9m114_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - - unsigned int index = fse->index; - - if (index >= N_RES) - return -EINVAL; - - fse->min_width = mt9m114_res[index].width; - fse->min_height = mt9m114_res[index].height; - fse->max_width = mt9m114_res[index].width; - fse->max_height = mt9m114_res[index].height; - - return 0; -} - -static int mt9m114_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) -{ - int index; - struct mt9m114_device *snr = to_mt9m114_sensor(sd); - - if (frames == NULL) - return -EINVAL; - - for (index = 0; index < N_RES; index++) { - if (mt9m114_res[index].res == snr->res) - break; - } - - if (index >= N_RES) - return -EINVAL; - - *frames = mt9m114_res[index].skip_frames; - - return 0; -} - -static const struct v4l2_subdev_video_ops mt9m114_video_ops = { - .s_stream = mt9m114_s_stream, - .g_frame_interval = mt9m114_g_frame_interval, -}; - -static const struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = { - .g_skip_frames = mt9m114_g_skip_frames, -}; - -static const struct v4l2_subdev_core_ops mt9m114_core_ops = { - .s_power = mt9m114_s_power, - .ioctl = mt9m114_ioctl, -}; - -/* REVISIT: Do we need pad operations? */ -static const struct v4l2_subdev_pad_ops mt9m114_pad_ops = { - .enum_mbus_code = mt9m114_enum_mbus_code, - .enum_frame_size = mt9m114_enum_frame_size, - .get_fmt = mt9m114_get_fmt, - .set_fmt = mt9m114_set_fmt, -#ifndef CSS15 - .set_selection = mt9m114_s_exposure_selection, -#endif -}; - -static const struct v4l2_subdev_ops mt9m114_ops = { - .core = &mt9m114_core_ops, - .video = &mt9m114_video_ops, - .pad = &mt9m114_pad_ops, - .sensor = &mt9m114_sensor_ops, -}; - -static int mt9m114_remove(struct i2c_client *client) -{ - struct mt9m114_device *dev; - struct v4l2_subdev *sd = i2c_get_clientdata(client); - - dev = container_of(sd, struct mt9m114_device, sd); - dev->platform_data->csi_cfg(sd, 0); - v4l2_device_unregister_subdev(sd); - media_entity_cleanup(&dev->sd.entity); - v4l2_ctrl_handler_free(&dev->ctrl_handler); - kfree(dev); - return 0; -} - -static int mt9m114_probe(struct i2c_client *client) -{ - struct mt9m114_device *dev; - int ret = 0; - unsigned int i; - void *pdata; - - /* Setup sensor configuration structure */ - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops); - pdata = gmin_camera_platform_data(&dev->sd, - ATOMISP_INPUT_FORMAT_RAW_10, - atomisp_bayer_order_grbg); - if (pdata) - ret = mt9m114_s_config(&dev->sd, client->irq, pdata); - if (!pdata || ret) { - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - return ret; - } - - ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA); - if (ret) { - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - /* Coverity CID 298095 - return on error */ - return ret; - } - - /*TODO add format code here*/ - dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dev->pad.flags = MEDIA_PAD_FL_SOURCE; - dev->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; - dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; - - ret = - v4l2_ctrl_handler_init(&dev->ctrl_handler, - ARRAY_SIZE(mt9m114_controls)); - if (ret) { - mt9m114_remove(client); - return ret; - } - - for (i = 0; i < ARRAY_SIZE(mt9m114_controls); i++) - v4l2_ctrl_new_custom(&dev->ctrl_handler, &mt9m114_controls[i], - NULL); - - if (dev->ctrl_handler.error) { - mt9m114_remove(client); - return dev->ctrl_handler.error; - } - - /* Use same lock for controls as for everything else. */ - dev->ctrl_handler.lock = &dev->input_lock; - dev->sd.ctrl_handler = &dev->ctrl_handler; - - /* REVISIT: Do we need media controller? */ - ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); - if (ret) { - mt9m114_remove(client); - return ret; - } - return 0; -} - -static const struct acpi_device_id mt9m114_acpi_match[] = { - { "INT33F0" }, - { "CRMT1040" }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match); - -static struct i2c_driver mt9m114_driver = { - .driver = { - .name = "mt9m114", - .acpi_match_table = mt9m114_acpi_match, - }, - .probe_new = mt9m114_probe, - .remove = mt9m114_remove, -}; -module_i2c_driver(mt9m114_driver); - -MODULE_AUTHOR("Shuguang Gong "); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c deleted file mode 100644 index bba3d1745908..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c +++ /dev/null @@ -1,1470 +0,0 @@ -/* - * Support for OmniVision OV2680 1080p HD camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../include/linux/atomisp_gmin_platform.h" - -#include "ov2680.h" - -static int h_flag = 0; -static int v_flag = 0; -static enum atomisp_bayer_order ov2680_bayer_order_mapping[] = { - atomisp_bayer_order_bggr, - atomisp_bayer_order_grbg, - atomisp_bayer_order_gbrg, - atomisp_bayer_order_rggb, -}; - -/* i2c read/write stuff */ -static int ov2680_read_reg(struct i2c_client *client, - u16 data_length, u16 reg, u16 *val) -{ - int err; - struct i2c_msg msg[2]; - unsigned char data[6]; - - if (!client->adapter) { - dev_err(&client->dev, "%s error, no client->adapter\n", - __func__); - return -ENODEV; - } - - if (data_length != OV2680_8BIT && data_length != OV2680_16BIT - && data_length != OV2680_32BIT) { - dev_err(&client->dev, "%s error, invalid data length\n", - __func__); - return -EINVAL; - } - - memset(msg, 0 , sizeof(msg)); - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = I2C_MSG_LENGTH; - msg[0].buf = data; - - /* high byte goes out first */ - data[0] = (u8)(reg >> 8); - data[1] = (u8)(reg & 0xff); - - msg[1].addr = client->addr; - msg[1].len = data_length; - msg[1].flags = I2C_M_RD; - msg[1].buf = data; - - err = i2c_transfer(client->adapter, msg, 2); - if (err != 2) { - if (err >= 0) - err = -EIO; - dev_err(&client->dev, - "read from offset 0x%x error %d", reg, err); - return err; - } - - *val = 0; - /* high byte comes first */ - if (data_length == OV2680_8BIT) - *val = (u8)data[0]; - else if (data_length == OV2680_16BIT) - *val = be16_to_cpu(*(__be16 *)&data[0]); - else - *val = be32_to_cpu(*(__be32 *)&data[0]); - //dev_dbg(&client->dev, "++++i2c read adr%x = %x\n", reg,*val); - return 0; -} - -static int ov2680_i2c_write(struct i2c_client *client, u16 len, u8 *data) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = len; - msg.buf = data; - ret = i2c_transfer(client->adapter, &msg, 1); - //dev_dbg(&client->dev, "+++i2c write reg=%x->%x\n", data[0]*256 +data[1],data[2]); - return ret == num_msg ? 0 : -EIO; -} - -static int ov2680_write_reg(struct i2c_client *client, u16 data_length, - u16 reg, u16 val) -{ - int ret; - unsigned char data[4] = {0}; - __be16 *wreg = (void *)data; - const u16 len = data_length + sizeof(u16); /* 16-bit address + data */ - - if (data_length != OV2680_8BIT && data_length != OV2680_16BIT) { - dev_err(&client->dev, - "%s error, invalid data_length\n", __func__); - return -EINVAL; - } - - /* high byte goes out first */ - *wreg = cpu_to_be16(reg); - - if (data_length == OV2680_8BIT) { - data[2] = (u8)(val); - } else { - /* OV2680_16BIT */ - __be16 *wdata = (void *)&data[2]; - - *wdata = cpu_to_be16(val); - } - - ret = ov2680_i2c_write(client, len, data); - if (ret) - dev_err(&client->dev, - "write error: wrote 0x%x to offset 0x%x error %d", - val, reg, ret); - - return ret; -} - -/* - * ov2680_write_reg_array - Initializes a list of OV2680 registers - * @client: i2c driver client structure - * @reglist: list of registers to be written - * - * This function initializes a list of registers. When consecutive addresses - * are found in a row on the list, this function creates a buffer and sends - * consecutive data in a single i2c_transfer(). - * - * __ov2680_flush_reg_array, __ov2680_buf_reg_array() and - * __ov2680_write_reg_is_consecutive() are internal functions to - * ov2680_write_reg_array_fast() and should be not used anywhere else. - * - */ - -static int __ov2680_flush_reg_array(struct i2c_client *client, - struct ov2680_write_ctrl *ctrl) -{ - u16 size; - __be16 *data16 = (void *)&ctrl->buffer.addr; - - if (ctrl->index == 0) - return 0; - - size = sizeof(u16) + ctrl->index; /* 16-bit address + data */ - *data16 = cpu_to_be16(ctrl->buffer.addr); - ctrl->index = 0; - - return ov2680_i2c_write(client, size, (u8 *)&ctrl->buffer); -} - -static int __ov2680_buf_reg_array(struct i2c_client *client, - struct ov2680_write_ctrl *ctrl, - const struct ov2680_reg *next) -{ - int size; - __be16 *data16; - - switch (next->type) { - case OV2680_8BIT: - size = 1; - ctrl->buffer.data[ctrl->index] = (u8)next->val; - break; - case OV2680_16BIT: - size = 2; - data16 = (void *)&ctrl->buffer.data[ctrl->index]; - *data16 = cpu_to_be16((u16)next->val); - break; - default: - return -EINVAL; - } - - /* When first item is added, we need to store its starting address */ - if (ctrl->index == 0) - ctrl->buffer.addr = next->reg; - - ctrl->index += size; - - /* - * Buffer cannot guarantee free space for u32? Better flush it to avoid - * possible lack of memory for next item. - */ - if (ctrl->index + sizeof(u16) >= OV2680_MAX_WRITE_BUF_SIZE) - return __ov2680_flush_reg_array(client, ctrl); - - return 0; -} - -static int __ov2680_write_reg_is_consecutive(struct i2c_client *client, - struct ov2680_write_ctrl *ctrl, - const struct ov2680_reg *next) -{ - if (ctrl->index == 0) - return 1; - - return ctrl->buffer.addr + ctrl->index == next->reg; -} - -static int ov2680_write_reg_array(struct i2c_client *client, - const struct ov2680_reg *reglist) -{ - const struct ov2680_reg *next = reglist; - struct ov2680_write_ctrl ctrl; - int err; - dev_dbg(&client->dev, "++++write reg array\n"); - ctrl.index = 0; - for (; next->type != OV2680_TOK_TERM; next++) { - switch (next->type & OV2680_TOK_MASK) { - case OV2680_TOK_DELAY: - err = __ov2680_flush_reg_array(client, &ctrl); - if (err) - return err; - msleep(next->val); - break; - default: - /* - * If next address is not consecutive, data needs to be - * flushed before proceed. - */ - dev_dbg(&client->dev, "+++ov2680_write_reg_array reg=%x->%x\n", next->reg,next->val); - if (!__ov2680_write_reg_is_consecutive(client, &ctrl, - next)) { - err = __ov2680_flush_reg_array(client, &ctrl); - if (err) - return err; - } - err = __ov2680_buf_reg_array(client, &ctrl, next); - if (err) { - dev_err(&client->dev, "%s: write error, aborted\n", - __func__); - return err; - } - break; - } - } - - return __ov2680_flush_reg_array(client, &ctrl); -} -static int ov2680_g_focal(struct v4l2_subdev *sd, s32 *val) -{ - - *val = (OV2680_FOCAL_LENGTH_NUM << 16) | OV2680_FOCAL_LENGTH_DEM; - return 0; -} - -static int ov2680_g_fnumber(struct v4l2_subdev *sd, s32 *val) -{ - /*const f number for ov2680*/ - - *val = (OV2680_F_NUMBER_DEFAULT_NUM << 16) | OV2680_F_NUMBER_DEM; - return 0; -} - -static int ov2680_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) -{ - *val = (OV2680_F_NUMBER_DEFAULT_NUM << 24) | - (OV2680_F_NUMBER_DEM << 16) | - (OV2680_F_NUMBER_DEFAULT_NUM << 8) | OV2680_F_NUMBER_DEM; - return 0; -} - -static int ov2680_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - dev_dbg(&client->dev, "++++ov2680_g_bin_factor_x\n"); - *val = ov2680_res[dev->fmt_idx].bin_factor_x; - - return 0; -} - -static int ov2680_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - - *val = ov2680_res[dev->fmt_idx].bin_factor_y; - dev_dbg(&client->dev, "++++ov2680_g_bin_factor_y\n"); - return 0; -} - - -static int ov2680_get_intg_factor(struct i2c_client *client, - struct camera_mipi_info *info, - const struct ov2680_resolution *res) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct atomisp_sensor_mode_data *buf = &info->data; - unsigned int pix_clk_freq_hz; - u16 reg_val; - int ret; - dev_dbg(&client->dev, "++++ov2680_get_intg_factor\n"); - if (!info) - return -EINVAL; - - /* pixel clock */ - pix_clk_freq_hz = res->pix_clk_freq * 1000000; - - dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz; - buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz; - - /* get integration time */ - buf->coarse_integration_time_min = OV2680_COARSE_INTG_TIME_MIN; - buf->coarse_integration_time_max_margin = - OV2680_COARSE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_min = OV2680_FINE_INTG_TIME_MIN; - buf->fine_integration_time_max_margin = - OV2680_FINE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_def = OV2680_FINE_INTG_TIME_MIN; - buf->frame_length_lines = res->lines_per_frame; - buf->line_length_pck = res->pixels_per_line; - buf->read_mode = res->bin_mode; - - /* get the cropping and output resolution to ISP for this mode. */ - ret = ov2680_read_reg(client, OV2680_16BIT, - OV2680_HORIZONTAL_START_H, ®_val); - if (ret) - return ret; - buf->crop_horizontal_start = reg_val; - - ret = ov2680_read_reg(client, OV2680_16BIT, - OV2680_VERTICAL_START_H, ®_val); - if (ret) - return ret; - buf->crop_vertical_start = reg_val; - - ret = ov2680_read_reg(client, OV2680_16BIT, - OV2680_HORIZONTAL_END_H, ®_val); - if (ret) - return ret; - buf->crop_horizontal_end = reg_val; - - ret = ov2680_read_reg(client, OV2680_16BIT, - OV2680_VERTICAL_END_H, ®_val); - if (ret) - return ret; - buf->crop_vertical_end = reg_val; - - ret = ov2680_read_reg(client, OV2680_16BIT, - OV2680_HORIZONTAL_OUTPUT_SIZE_H, ®_val); - if (ret) - return ret; - buf->output_width = reg_val; - - ret = ov2680_read_reg(client, OV2680_16BIT, - OV2680_VERTICAL_OUTPUT_SIZE_H, ®_val); - if (ret) - return ret; - buf->output_height = reg_val; - - buf->binning_factor_x = res->bin_factor_x ? - (res->bin_factor_x * 2) : 1; - buf->binning_factor_y = res->bin_factor_y ? - (res->bin_factor_y * 2) : 1; - return 0; -} - -static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg, - int gain, int digitgain) - -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov2680_device *dev = to_ov2680_sensor(sd); - u16 vts; - int ret,exp_val; - - dev_dbg(&client->dev, - "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n", - coarse_itg, gain, digitgain); - - vts = ov2680_res[dev->fmt_idx].lines_per_frame; - - /* group hold */ - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_GROUP_ACCESS, 0x00); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_GROUP_ACCESS); - return ret; - } - - /* Increase the VTS to match exposure + MARGIN */ - if (coarse_itg > vts - OV2680_INTEGRATION_TIME_MARGIN) - vts = (u16) coarse_itg + OV2680_INTEGRATION_TIME_MARGIN; - - ret = ov2680_write_reg(client, OV2680_16BIT, OV2680_TIMING_VTS_H, vts); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_TIMING_VTS_H); - return ret; - } - - /* set exposure */ - - /* Lower four bit should be 0*/ - exp_val = coarse_itg << 4; - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_EXPOSURE_L, exp_val & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_EXPOSURE_L); - return ret; - } - - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_EXPOSURE_M, (exp_val >> 8) & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_EXPOSURE_M); - return ret; - } - - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_EXPOSURE_H, (exp_val >> 16) & 0x0F); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_EXPOSURE_H); - return ret; - } - - /* Analog gain */ - ret = ov2680_write_reg(client, OV2680_16BIT, OV2680_AGC_H, gain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_AGC_H); - return ret; - } - /* Digital gain */ - if (digitgain) { - ret = ov2680_write_reg(client, OV2680_16BIT, - OV2680_MWB_RED_GAIN_H, digitgain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_MWB_RED_GAIN_H); - return ret; - } - - ret = ov2680_write_reg(client, OV2680_16BIT, - OV2680_MWB_GREEN_GAIN_H, digitgain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_MWB_RED_GAIN_H); - return ret; - } - - ret = ov2680_write_reg(client, OV2680_16BIT, - OV2680_MWB_BLUE_GAIN_H, digitgain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV2680_MWB_RED_GAIN_H); - return ret; - } - } - - /* End group */ - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_GROUP_ACCESS, 0x10); - if (ret) - return ret; - - /* Delay launch group */ - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_GROUP_ACCESS, 0xa0); - if (ret) - return ret; - return ret; -} - -static int ov2680_set_exposure(struct v4l2_subdev *sd, int exposure, - int gain, int digitgain) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - int ret; - - mutex_lock(&dev->input_lock); - ret = __ov2680_set_exposure(sd, exposure, gain, digitgain); - mutex_unlock(&dev->input_lock); - - return ret; -} - -static long ov2680_s_exposure(struct v4l2_subdev *sd, - struct atomisp_exposure *exposure) -{ - u16 coarse_itg = exposure->integration_time[0]; - u16 analog_gain = exposure->gain[0]; - u16 digital_gain = exposure->gain[1]; - - /* we should not accept the invalid value below */ - if (analog_gain == 0) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - v4l2_err(client, "%s: invalid value\n", __func__); - return -EINVAL; - } - - // EXPOSURE CONTROL DISABLED FOR INITIAL CHECKIN, TUNING DOESN'T WORK - return ov2680_set_exposure(sd, coarse_itg, analog_gain, digital_gain); -} - - - - - -static long ov2680_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - - switch (cmd) { - case ATOMISP_IOC_S_EXPOSURE: - return ov2680_s_exposure(sd, arg); - - default: - return -EINVAL; - } - return 0; -} - -/* This returns the exposure time being used. This should only be used - * for filling in EXIF data, not for actual image processing. - */ -static int ov2680_q_exposure(struct v4l2_subdev *sd, s32 *value) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u16 reg_v, reg_v2; - int ret; - - /* get exposure */ - ret = ov2680_read_reg(client, OV2680_8BIT, - OV2680_EXPOSURE_L, - ®_v); - if (ret) - goto err; - - ret = ov2680_read_reg(client, OV2680_8BIT, - OV2680_EXPOSURE_M, - ®_v2); - if (ret) - goto err; - - reg_v += reg_v2 << 8; - ret = ov2680_read_reg(client, OV2680_8BIT, - OV2680_EXPOSURE_H, - ®_v2); - if (ret) - goto err; - - *value = reg_v + (((u32)reg_v2 << 16)); -err: - return ret; -} - -static u32 ov2680_translate_bayer_order(enum atomisp_bayer_order code) -{ - switch (code) { - case atomisp_bayer_order_rggb: - return MEDIA_BUS_FMT_SRGGB10_1X10; - case atomisp_bayer_order_grbg: - return MEDIA_BUS_FMT_SGRBG10_1X10; - case atomisp_bayer_order_bggr: - return MEDIA_BUS_FMT_SBGGR10_1X10; - case atomisp_bayer_order_gbrg: - return MEDIA_BUS_FMT_SGBRG10_1X10; - } - return 0; -} - -static int ov2680_v_flip(struct v4l2_subdev *sd, s32 value) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct camera_mipi_info *ov2680_info = NULL; - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - u16 val; - u8 index; - dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value); - ret = ov2680_read_reg(client, OV2680_8BIT, OV2680_FLIP_REG, &val); - if (ret) - return ret; - if (value) { - val |= OV2680_FLIP_MIRROR_BIT_ENABLE; - } else { - val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE; - } - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_FLIP_REG, val); - if (ret) - return ret; - index = (v_flag>0?OV2680_FLIP_BIT:0) | (h_flag>0?OV2680_MIRROR_BIT:0); - ov2680_info = v4l2_get_subdev_hostdata(sd); - if (ov2680_info) { - ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index]; - dev->format.code = ov2680_translate_bayer_order( - ov2680_info->raw_bayer_order); - } - return ret; -} - -static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct camera_mipi_info *ov2680_info = NULL; - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - u16 val; - u8 index; - dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value); - - ret = ov2680_read_reg(client, OV2680_8BIT, OV2680_MIRROR_REG, &val); - if (ret) - return ret; - if (value) { - val |= OV2680_FLIP_MIRROR_BIT_ENABLE; - } else { - val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE; - } - ret = ov2680_write_reg(client, OV2680_8BIT, - OV2680_MIRROR_REG, val); - if (ret) - return ret; - index = (v_flag>0?OV2680_FLIP_BIT:0) | (h_flag>0?OV2680_MIRROR_BIT:0); - ov2680_info = v4l2_get_subdev_hostdata(sd); - if (ov2680_info) { - ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index]; - dev->format.code = ov2680_translate_bayer_order( - ov2680_info->raw_bayer_order); - } - return ret; -} - -static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov2680_device *dev = - container_of(ctrl->handler, struct ov2680_device, ctrl_handler); - struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n", - __func__, ctrl->val); - ret = ov2680_v_flip(&dev->sd, ctrl->val); - break; - case V4L2_CID_HFLIP: - dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n", - __func__, ctrl->val); - ret = ov2680_h_flip(&dev->sd, ctrl->val); - break; - default: - ret = -EINVAL; - } - return ret; -} - -static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov2680_device *dev = - container_of(ctrl->handler, struct ov2680_device, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - ret = ov2680_q_exposure(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCAL_ABSOLUTE: - ret = ov2680_g_focal(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_ABSOLUTE: - ret = ov2680_g_fnumber(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_RANGE: - ret = ov2680_g_fnumber_range(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_HORZ: - ret = ov2680_g_bin_factor_x(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_VERT: - ret = ov2680_g_bin_factor_y(&dev->sd, &ctrl->val); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .s_ctrl = ov2680_s_ctrl, - .g_volatile_ctrl = ov2680_g_volatile_ctrl -}; - -static const struct v4l2_ctrl_config ov2680_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "exposure", - .min = 0x0, - .max = 0xffff, - .step = 0x01, - .def = 0x00, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCAL_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focal length", - .min = OV2680_FOCAL_LENGTH_DEFAULT, - .max = OV2680_FOCAL_LENGTH_DEFAULT, - .step = 0x01, - .def = OV2680_FOCAL_LENGTH_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number", - .min = OV2680_F_NUMBER_DEFAULT, - .max = OV2680_F_NUMBER_DEFAULT, - .step = 0x01, - .def = OV2680_F_NUMBER_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_RANGE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number range", - .min = OV2680_F_NUMBER_RANGE, - .max = OV2680_F_NUMBER_RANGE, - .step = 0x01, - .def = OV2680_F_NUMBER_RANGE, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_HORZ, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "horizontal binning factor", - .min = 0, - .max = OV2680_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_VERT, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "vertical binning factor", - .min = 0, - .max = OV2680_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_VFLIP, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Flip", - .min = 0, - .max = 1, - .step = 1, - .def = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_HFLIP, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Mirror", - .min = 0, - .max = 1, - .step = 1, - .def = 0, - }, -}; - -static int ov2680_init_registers(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - ret = ov2680_write_reg(client, OV2680_8BIT, OV2680_SW_RESET, 0x01); - ret |= ov2680_write_reg_array(client, ov2680_global_setting); - - return ret; -} - -static int ov2680_init(struct v4l2_subdev *sd) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - - int ret; - - mutex_lock(&dev->input_lock); - - /* restore settings */ - ov2680_res = ov2680_res_preview; - N_RES = N_RES_PREVIEW; - - ret = ov2680_init_registers(sd); - - mutex_unlock(&dev->input_lock); - - return ret; -} - -static int power_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret = 0; - struct ov2680_device *dev = to_ov2680_sensor(sd); - if (!dev || !dev->platform_data) - return -ENODEV; - - if (flag) { - ret |= dev->platform_data->v1p8_ctrl(sd, 1); - ret |= dev->platform_data->v2p8_ctrl(sd, 1); - usleep_range(10000, 15000); - } - - if (!flag || ret) { - ret |= dev->platform_data->v1p8_ctrl(sd, 0); - ret |= dev->platform_data->v2p8_ctrl(sd, 0); - } - return ret; -} - -static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret; - struct ov2680_device *dev = to_ov2680_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - /* The OV2680 documents only one GPIO input (#XSHUTDN), but - * existing integrations often wire two (reset/power_down) - * because that is the way other sensors work. There is no - * way to tell how it is wired internally, so existing - * firmwares expose both and we drive them symmetrically. */ - if (flag) { - ret = dev->platform_data->gpio0_ctrl(sd, 1); - usleep_range(10000, 15000); - /* Ignore return from second gpio, it may not be there */ - dev->platform_data->gpio1_ctrl(sd, 1); - usleep_range(10000, 15000); - } else { - dev->platform_data->gpio1_ctrl(sd, 0); - ret = dev->platform_data->gpio0_ctrl(sd, 0); - } - return ret; -} - -static int power_up(struct v4l2_subdev *sd) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - /* power control */ - ret = power_ctrl(sd, 1); - if (ret) - goto fail_power; - - /* according to DS, at least 5ms is needed between DOVDD and PWDN */ - usleep_range(5000, 6000); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 1); - if (ret) { - ret = gpio_ctrl(sd, 1); - if (ret) - goto fail_power; - } - - /* flis clock control */ - ret = dev->platform_data->flisclk_ctrl(sd, 1); - if (ret) - goto fail_clk; - - /* according to DS, 20ms is needed between PWDN and i2c access */ - msleep(20); - - return 0; - -fail_clk: - gpio_ctrl(sd, 0); -fail_power: - power_ctrl(sd, 0); - dev_err(&client->dev, "sensor power-up failed\n"); - - return ret; -} - -static int power_down(struct v4l2_subdev *sd) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - h_flag = 0; - v_flag = 0; - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - ret = dev->platform_data->flisclk_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "flisclk failed\n"); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 0); - if (ret) { - ret = gpio_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "gpio failed 2\n"); - } - - /* power control */ - ret = power_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "vprog failed.\n"); - - return ret; -} - -static int ov2680_s_power(struct v4l2_subdev *sd, int on) -{ - int ret; - - if (on == 0){ - ret = power_down(sd); - } else { - ret = power_up(sd); - if (!ret) - return ov2680_init(sd); - } - return ret; -} - -/* - * distance - calculate the distance - * @res: resolution - * @w: width - * @h: height - * - * Get the gap between resolution and w/h. - * res->width/height smaller than w/h wouldn't be considered. - * Returns the value of gap or -1 if fail. - */ -#define LARGEST_ALLOWED_RATIO_MISMATCH 600 -static int distance(struct ov2680_resolution *res, u32 w, u32 h) -{ - unsigned int w_ratio = (res->width << 13) / w; - unsigned int h_ratio; - int match; - - if (h == 0) - return -1; - h_ratio = (res->height << 13) / h; - if (h_ratio == 0) - return -1; - match = abs(((w_ratio << 13) / h_ratio) - ((int)8192)); - - - if ((w_ratio < (int)8192) || (h_ratio < (int)8192) || - (match > LARGEST_ALLOWED_RATIO_MISMATCH)) - return -1; - - return w_ratio + h_ratio; -} - -/* Return the nearest higher resolution index */ -static int nearest_resolution_index(int w, int h) -{ - int i; - int idx = -1; - int dist; - int min_dist = INT_MAX; - struct ov2680_resolution *tmp_res = NULL; - - for (i = 0; i < N_RES; i++) { - tmp_res = &ov2680_res[i]; - dist = distance(tmp_res, w, h); - if (dist == -1) - continue; - if (dist < min_dist) { - min_dist = dist; - idx = i; - } - } - - return idx; -} - -static int get_resolution_index(int w, int h) -{ - int i; - - for (i = 0; i < N_RES; i++) { - if (w != ov2680_res[i].width) - continue; - if (h != ov2680_res[i].height) - continue; - - return i; - } - - return -1; -} - -static int ov2680_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *ov2680_info = NULL; - int ret = 0; - int idx = 0; - dev_dbg(&client->dev, "+++++ov2680_s_mbus_fmt+++++l\n"); - if (format->pad) - return -EINVAL; - - if (!fmt) - return -EINVAL; - - ov2680_info = v4l2_get_subdev_hostdata(sd); - if (!ov2680_info) - return -EINVAL; - - mutex_lock(&dev->input_lock); - idx = nearest_resolution_index(fmt->width, fmt->height); - if (idx == -1) { - /* return the largest resolution */ - fmt->width = ov2680_res[N_RES - 1].width; - fmt->height = ov2680_res[N_RES - 1].height; - } else { - fmt->width = ov2680_res[idx].width; - fmt->height = ov2680_res[idx].height; - } - fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); - return 0; - } - dev->fmt_idx = get_resolution_index(fmt->width, fmt->height); - dev_dbg(&client->dev, "+++++get_resolution_index=%d+++++l\n", - dev->fmt_idx); - if (dev->fmt_idx == -1) { - dev_err(&client->dev, "get resolution fail\n"); - mutex_unlock(&dev->input_lock); - return -EINVAL; - } - v4l2_info(client, "__s_mbus_fmt i=%d, w=%d, h=%d\n", dev->fmt_idx, - fmt->width, fmt->height); - dev_dbg(&client->dev, "__s_mbus_fmt i=%d, w=%d, h=%d\n", - dev->fmt_idx, fmt->width, fmt->height); - - ret = ov2680_write_reg_array(client, ov2680_res[dev->fmt_idx].regs); - if (ret) - dev_err(&client->dev, "ov2680 write resolution register err\n"); - - ret = ov2680_get_intg_factor(client, ov2680_info, - &ov2680_res[dev->fmt_idx]); - if (ret) { - dev_err(&client->dev, "failed to get integration_factor\n"); - goto err; - } - - /*recall flip functions to avoid flip registers - * were overridden by default setting - */ - if (h_flag) - ov2680_h_flip(sd, h_flag); - if (v_flag) - ov2680_v_flip(sd, v_flag); - - v4l2_info(client, "\n%s idx %d \n", __func__, dev->fmt_idx); - - /*ret = startup(sd); - * if (ret) - * dev_err(&client->dev, "ov2680 startup err\n"); - */ -err: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int ov2680_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct ov2680_device *dev = to_ov2680_sensor(sd); - - if (format->pad) - return -EINVAL; - - if (!fmt) - return -EINVAL; - - fmt->width = ov2680_res[dev->fmt_idx].width; - fmt->height = ov2680_res[dev->fmt_idx].height; - fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; - - return 0; -} - -static int ov2680_detect(struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - u16 high, low; - int ret; - u16 id; - u8 revision; - - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) - return -ENODEV; - - ret = ov2680_read_reg(client, OV2680_8BIT, - OV2680_SC_CMMN_CHIP_ID_H, &high); - if (ret) { - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); - return -ENODEV; - } - ret = ov2680_read_reg(client, OV2680_8BIT, - OV2680_SC_CMMN_CHIP_ID_L, &low); - id = ((((u16) high) << 8) | (u16) low); - - if (id != OV2680_ID) { - dev_err(&client->dev, "sensor ID error 0x%x\n", id); - return -ENODEV; - } - - ret = ov2680_read_reg(client, OV2680_8BIT, - OV2680_SC_CMMN_SUB_ID, &high); - revision = (u8) high & 0x0f; - - dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n", - id, revision); - - return 0; -} - -static int ov2680_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - mutex_lock(&dev->input_lock); - if(enable ) - dev_dbg(&client->dev, "ov2680_s_stream one \n"); - else - dev_dbg(&client->dev, "ov2680_s_stream off \n"); - - ret = ov2680_write_reg(client, OV2680_8BIT, OV2680_SW_STREAM, - enable ? OV2680_START_STREAMING : - OV2680_STOP_STREAMING); -#if 0 - /* restore settings */ - ov2680_res = ov2680_res_preview; - N_RES = N_RES_PREVIEW; -#endif - - //otp valid at stream on state - //if(!dev->otp_data) - // dev->otp_data = ov2680_otp_read(sd); - - mutex_unlock(&dev->input_lock); - - return ret; -} - - -static int ov2680_s_config(struct v4l2_subdev *sd, - int irq, void *platform_data) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (!platform_data) - return -ENODEV; - - dev->platform_data = - (struct camera_sensor_platform_data *)platform_data; - - mutex_lock(&dev->input_lock); - /* power off the module, then power on it in future - * as first power on by board may not fulfill the - * power on sequqence needed by the module - */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "ov2680 power-off err.\n"); - goto fail_power_off; - } - - ret = power_up(sd); - if (ret) { - dev_err(&client->dev, "ov2680 power-up err.\n"); - goto fail_power_on; - } - - ret = dev->platform_data->csi_cfg(sd, 1); - if (ret) - goto fail_csi_cfg; - - /* config & detect sensor */ - ret = ov2680_detect(client); - if (ret) { - dev_err(&client->dev, "ov2680_detect err s_config.\n"); - goto fail_csi_cfg; - } - - /* turn off sensor, after probed */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "ov2680 power-off err.\n"); - goto fail_csi_cfg; - } - mutex_unlock(&dev->input_lock); - - return 0; - -fail_csi_cfg: - dev->platform_data->csi_cfg(sd, 0); -fail_power_on: - power_down(sd); - dev_err(&client->dev, "sensor power-gating failed\n"); -fail_power_off: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int ov2680_g_frame_interval(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - - interval->interval.numerator = 1; - interval->interval.denominator = ov2680_res[dev->fmt_idx].fps; - - return 0; -} - -static int ov2680_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index >= MAX_FMTS) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_SBGGR10_1X10; - return 0; -} - -static int ov2680_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - int index = fse->index; - - if (index >= N_RES) - return -EINVAL; - - fse->min_width = ov2680_res[index].width; - fse->min_height = ov2680_res[index].height; - fse->max_width = ov2680_res[index].width; - fse->max_height = ov2680_res[index].height; - - return 0; - -} - -static int ov2680_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) -{ - struct ov2680_device *dev = to_ov2680_sensor(sd); - - mutex_lock(&dev->input_lock); - *frames = ov2680_res[dev->fmt_idx].skip_frames; - mutex_unlock(&dev->input_lock); - - return 0; -} - -static const struct v4l2_subdev_video_ops ov2680_video_ops = { - .s_stream = ov2680_s_stream, - .g_frame_interval = ov2680_g_frame_interval, -}; - -static const struct v4l2_subdev_sensor_ops ov2680_sensor_ops = { - .g_skip_frames = ov2680_g_skip_frames, -}; - -static const struct v4l2_subdev_core_ops ov2680_core_ops = { - .s_power = ov2680_s_power, - .ioctl = ov2680_ioctl, -}; - -static const struct v4l2_subdev_pad_ops ov2680_pad_ops = { - .enum_mbus_code = ov2680_enum_mbus_code, - .enum_frame_size = ov2680_enum_frame_size, - .get_fmt = ov2680_get_fmt, - .set_fmt = ov2680_set_fmt, -}; - -static const struct v4l2_subdev_ops ov2680_ops = { - .core = &ov2680_core_ops, - .video = &ov2680_video_ops, - .pad = &ov2680_pad_ops, - .sensor = &ov2680_sensor_ops, -}; - -static int ov2680_remove(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov2680_device *dev = to_ov2680_sensor(sd); - dev_dbg(&client->dev, "ov2680_remove...\n"); - - dev->platform_data->csi_cfg(sd, 0); - - v4l2_device_unregister_subdev(sd); - media_entity_cleanup(&dev->sd.entity); - v4l2_ctrl_handler_free(&dev->ctrl_handler); - kfree(dev); - - return 0; -} - -static int ov2680_probe(struct i2c_client *client) -{ - struct ov2680_device *dev; - int ret; - void *pdata; - unsigned int i; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - mutex_init(&dev->input_lock); - - dev->fmt_idx = 0; - v4l2_i2c_subdev_init(&(dev->sd), client, &ov2680_ops); - - pdata = gmin_camera_platform_data(&dev->sd, - ATOMISP_INPUT_FORMAT_RAW_10, - atomisp_bayer_order_bggr); - if (!pdata) { - ret = -EINVAL; - goto out_free; - } - - ret = ov2680_s_config(&dev->sd, client->irq, pdata); - if (ret) - goto out_free; - - ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA); - if (ret) - goto out_free; - - dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dev->pad.flags = MEDIA_PAD_FL_SOURCE; - dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; - dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; - ret = - v4l2_ctrl_handler_init(&dev->ctrl_handler, - ARRAY_SIZE(ov2680_controls)); - if (ret) { - ov2680_remove(client); - return ret; - } - - for (i = 0; i < ARRAY_SIZE(ov2680_controls); i++) - v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2680_controls[i], - NULL); - - if (dev->ctrl_handler.error) { - ov2680_remove(client); - return dev->ctrl_handler.error; - } - - /* Use same lock for controls as for everything else. */ - dev->ctrl_handler.lock = &dev->input_lock; - dev->sd.ctrl_handler = &dev->ctrl_handler; - - ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); - if (ret) - { - ov2680_remove(client); - dev_dbg(&client->dev, "+++ remove ov2680 \n"); - } - return ret; -out_free: - dev_dbg(&client->dev, "+++ out free \n"); - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - return ret; -} - -static const struct acpi_device_id ov2680_acpi_match[] = { - {"XXOV2680"}, - {"OVTI2680"}, - {}, -}; -MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match); - -static struct i2c_driver ov2680_driver = { - .driver = { - .name = "ov2680", - .acpi_match_table = ov2680_acpi_match, - }, - .probe_new = ov2680_probe, - .remove = ov2680_remove, -}; -module_i2c_driver(ov2680_driver); - -MODULE_AUTHOR("Jacky Wang "); -MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c deleted file mode 100644 index a362eebd882f..000000000000 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c +++ /dev/null @@ -1,1271 +0,0 @@ -/* - * Support for OmniVision OV2722 1080p HD camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../include/linux/atomisp_gmin_platform.h" -#include -#include - -#include "ov2722.h" - -/* i2c read/write stuff */ -static int ov2722_read_reg(struct i2c_client *client, - u16 data_length, u16 reg, u16 *val) -{ - int err; - struct i2c_msg msg[2]; - unsigned char data[6]; - - if (!client->adapter) { - dev_err(&client->dev, "%s error, no client->adapter\n", - __func__); - return -ENODEV; - } - - if (data_length != OV2722_8BIT && data_length != OV2722_16BIT - && data_length != OV2722_32BIT) { - dev_err(&client->dev, "%s error, invalid data length\n", - __func__); - return -EINVAL; - } - - memset(msg, 0 , sizeof(msg)); - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = I2C_MSG_LENGTH; - msg[0].buf = data; - - /* high byte goes out first */ - data[0] = (u8)(reg >> 8); - data[1] = (u8)(reg & 0xff); - - msg[1].addr = client->addr; - msg[1].len = data_length; - msg[1].flags = I2C_M_RD; - msg[1].buf = data; - - err = i2c_transfer(client->adapter, msg, 2); - if (err != 2) { - if (err >= 0) - err = -EIO; - dev_err(&client->dev, - "read from offset 0x%x error %d", reg, err); - return err; - } - - *val = 0; - /* high byte comes first */ - if (data_length == OV2722_8BIT) - *val = (u8)data[0]; - else if (data_length == OV2722_16BIT) - *val = be16_to_cpu(*(__be16 *)&data[0]); - else - *val = be32_to_cpu(*(__be32 *)&data[0]); - - return 0; -} - -static int ov2722_i2c_write(struct i2c_client *client, u16 len, u8 *data) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = len; - msg.buf = data; - ret = i2c_transfer(client->adapter, &msg, 1); - - return ret == num_msg ? 0 : -EIO; -} - -static int ov2722_write_reg(struct i2c_client *client, u16 data_length, - u16 reg, u16 val) -{ - int ret; - unsigned char data[4] = {0}; - __be16 *wreg = (__be16 *)data; - const u16 len = data_length + sizeof(u16); /* 16-bit address + data */ - - if (data_length != OV2722_8BIT && data_length != OV2722_16BIT) { - dev_err(&client->dev, - "%s error, invalid data_length\n", __func__); - return -EINVAL; - } - - /* high byte goes out first */ - *wreg = cpu_to_be16(reg); - - if (data_length == OV2722_8BIT) { - data[2] = (u8)(val); - } else { - /* OV2722_16BIT */ - __be16 *wdata = (__be16 *)&data[2]; - - *wdata = cpu_to_be16(val); - } - - ret = ov2722_i2c_write(client, len, data); - if (ret) - dev_err(&client->dev, - "write error: wrote 0x%x to offset 0x%x error %d", - val, reg, ret); - - return ret; -} - -/* - * ov2722_write_reg_array - Initializes a list of OV2722 registers - * @client: i2c driver client structure - * @reglist: list of registers to be written - * - * This function initializes a list of registers. When consecutive addresses - * are found in a row on the list, this function creates a buffer and sends - * consecutive data in a single i2c_transfer(). - * - * __ov2722_flush_reg_array, __ov2722_buf_reg_array() and - * __ov2722_write_reg_is_consecutive() are internal functions to - * ov2722_write_reg_array_fast() and should be not used anywhere else. - * - */ - -static int __ov2722_flush_reg_array(struct i2c_client *client, - struct ov2722_write_ctrl *ctrl) -{ - u16 size; - __be16 *data16 = (void *)&ctrl->buffer.addr; - - if (ctrl->index == 0) - return 0; - - size = sizeof(u16) + ctrl->index; /* 16-bit address + data */ - *data16 = cpu_to_be16(ctrl->buffer.addr); - ctrl->index = 0; - - return ov2722_i2c_write(client, size, (u8 *)&ctrl->buffer); -} - -static int __ov2722_buf_reg_array(struct i2c_client *client, - struct ov2722_write_ctrl *ctrl, - const struct ov2722_reg *next) -{ - int size; - __be16 *data16; - - switch (next->type) { - case OV2722_8BIT: - size = 1; - ctrl->buffer.data[ctrl->index] = (u8)next->val; - break; - case OV2722_16BIT: - size = 2; - data16 = (void *)&ctrl->buffer.data[ctrl->index]; - *data16 = cpu_to_be16((u16)next->val); - break; - default: - return -EINVAL; - } - - /* When first item is added, we need to store its starting address */ - if (ctrl->index == 0) - ctrl->buffer.addr = next->reg; - - ctrl->index += size; - - /* - * Buffer cannot guarantee free space for u32? Better flush it to avoid - * possible lack of memory for next item. - */ - if (ctrl->index + sizeof(u16) >= OV2722_MAX_WRITE_BUF_SIZE) - return __ov2722_flush_reg_array(client, ctrl); - - return 0; -} - -static int __ov2722_write_reg_is_consecutive(struct i2c_client *client, - struct ov2722_write_ctrl *ctrl, - const struct ov2722_reg *next) -{ - if (ctrl->index == 0) - return 1; - - return ctrl->buffer.addr + ctrl->index == next->reg; -} - -static int ov2722_write_reg_array(struct i2c_client *client, - const struct ov2722_reg *reglist) -{ - const struct ov2722_reg *next = reglist; - struct ov2722_write_ctrl ctrl; - int err; - - ctrl.index = 0; - for (; next->type != OV2722_TOK_TERM; next++) { - switch (next->type & OV2722_TOK_MASK) { - case OV2722_TOK_DELAY: - err = __ov2722_flush_reg_array(client, &ctrl); - if (err) - return err; - msleep(next->val); - break; - default: - /* - * If next address is not consecutive, data needs to be - * flushed before proceed. - */ - if (!__ov2722_write_reg_is_consecutive(client, &ctrl, - next)) { - err = __ov2722_flush_reg_array(client, &ctrl); - if (err) - return err; - } - err = __ov2722_buf_reg_array(client, &ctrl, next); - if (err) { - dev_err(&client->dev, "%s: write error, aborted\n", - __func__); - return err; - } - break; - } - } - - return __ov2722_flush_reg_array(client, &ctrl); -} -static int ov2722_g_focal(struct v4l2_subdev *sd, s32 *val) -{ - *val = (OV2722_FOCAL_LENGTH_NUM << 16) | OV2722_FOCAL_LENGTH_DEM; - return 0; -} - -static int ov2722_g_fnumber(struct v4l2_subdev *sd, s32 *val) -{ - /*const f number for imx*/ - *val = (OV2722_F_NUMBER_DEFAULT_NUM << 16) | OV2722_F_NUMBER_DEM; - return 0; -} - -static int ov2722_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) -{ - *val = (OV2722_F_NUMBER_DEFAULT_NUM << 24) | - (OV2722_F_NUMBER_DEM << 16) | - (OV2722_F_NUMBER_DEFAULT_NUM << 8) | OV2722_F_NUMBER_DEM; - return 0; -} - -static int ov2722_get_intg_factor(struct i2c_client *client, - struct camera_mipi_info *info, - const struct ov2722_resolution *res) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov2722_device *dev = NULL; - struct atomisp_sensor_mode_data *buf = &info->data; - const unsigned int ext_clk_freq_hz = 19200000; - const unsigned int pll_invariant_div = 10; - unsigned int pix_clk_freq_hz; - u16 pre_pll_clk_div; - u16 pll_multiplier; - u16 op_pix_clk_div; - u16 reg_val; - int ret; - - if (!info) - return -EINVAL; - - dev = to_ov2722_sensor(sd); - - /* pixel clock calculattion */ - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_PLL_CTRL3, &pre_pll_clk_div); - if (ret) - return ret; - - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_PLL_MULTIPLIER, &pll_multiplier); - if (ret) - return ret; - - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_PLL_DEBUG_OPT, &op_pix_clk_div); - if (ret) - return ret; - - pre_pll_clk_div = (pre_pll_clk_div & 0x70) >> 4; - if (0 == pre_pll_clk_div) - return -EINVAL; - - pll_multiplier = pll_multiplier & 0x7f; - op_pix_clk_div = op_pix_clk_div & 0x03; - pix_clk_freq_hz = ext_clk_freq_hz / pre_pll_clk_div * pll_multiplier - * op_pix_clk_div / pll_invariant_div; - - dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz; - buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz; - - /* get integration time */ - buf->coarse_integration_time_min = OV2722_COARSE_INTG_TIME_MIN; - buf->coarse_integration_time_max_margin = - OV2722_COARSE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_min = OV2722_FINE_INTG_TIME_MIN; - buf->fine_integration_time_max_margin = - OV2722_FINE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_def = OV2722_FINE_INTG_TIME_MIN; - buf->frame_length_lines = res->lines_per_frame; - buf->line_length_pck = res->pixels_per_line; - buf->read_mode = res->bin_mode; - - /* get the cropping and output resolution to ISP for this mode. */ - ret = ov2722_read_reg(client, OV2722_16BIT, - OV2722_H_CROP_START_H, ®_val); - if (ret) - return ret; - buf->crop_horizontal_start = reg_val; - - ret = ov2722_read_reg(client, OV2722_16BIT, - OV2722_V_CROP_START_H, ®_val); - if (ret) - return ret; - buf->crop_vertical_start = reg_val; - - ret = ov2722_read_reg(client, OV2722_16BIT, - OV2722_H_CROP_END_H, ®_val); - if (ret) - return ret; - buf->crop_horizontal_end = reg_val; - - ret = ov2722_read_reg(client, OV2722_16BIT, - OV2722_V_CROP_END_H, ®_val); - if (ret) - return ret; - buf->crop_vertical_end = reg_val; - - ret = ov2722_read_reg(client, OV2722_16BIT, - OV2722_H_OUTSIZE_H, ®_val); - if (ret) - return ret; - buf->output_width = reg_val; - - ret = ov2722_read_reg(client, OV2722_16BIT, - OV2722_V_OUTSIZE_H, ®_val); - if (ret) - return ret; - buf->output_height = reg_val; - - buf->binning_factor_x = res->bin_factor_x ? - res->bin_factor_x : 1; - buf->binning_factor_y = res->bin_factor_y ? - res->bin_factor_y : 1; - return 0; -} - -static long __ov2722_set_exposure(struct v4l2_subdev *sd, int coarse_itg, - int gain, int digitgain) - -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov2722_device *dev = to_ov2722_sensor(sd); - u16 hts, vts; - int ret; - - dev_dbg(&client->dev, "set_exposure without group hold\n"); - - /* clear VTS_DIFF on manual mode */ - ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_VTS_DIFF_H, 0); - if (ret) - return ret; - - hts = dev->pixels_per_line; - vts = dev->lines_per_frame; - - if ((coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN) > vts) - vts = coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN; - - coarse_itg <<= 4; - digitgain <<= 2; - - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_VTS_H, vts); - if (ret) - return ret; - - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_HTS_H, hts); - if (ret) - return ret; - - /* set exposure */ - ret = ov2722_write_reg(client, OV2722_8BIT, - OV2722_AEC_PK_EXPO_L, - coarse_itg & 0xff); - if (ret) - return ret; - - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_AEC_PK_EXPO_H, - (coarse_itg >> 8) & 0xfff); - if (ret) - return ret; - - /* set analog gain */ - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_AGC_ADJ_H, gain); - if (ret) - return ret; - - /* set digital gain */ - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_MWB_GAIN_R_H, digitgain); - if (ret) - return ret; - - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_MWB_GAIN_G_H, digitgain); - if (ret) - return ret; - - ret = ov2722_write_reg(client, OV2722_16BIT, - OV2722_MWB_GAIN_B_H, digitgain); - - return ret; -} - -static int ov2722_set_exposure(struct v4l2_subdev *sd, int exposure, - int gain, int digitgain) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - int ret; - - mutex_lock(&dev->input_lock); - ret = __ov2722_set_exposure(sd, exposure, gain, digitgain); - mutex_unlock(&dev->input_lock); - - return ret; -} - -static long ov2722_s_exposure(struct v4l2_subdev *sd, - struct atomisp_exposure *exposure) -{ - int exp = exposure->integration_time[0]; - int gain = exposure->gain[0]; - int digitgain = exposure->gain[1]; - - /* we should not accept the invalid value below. */ - if (gain == 0) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - v4l2_err(client, "%s: invalid value\n", __func__); - return -EINVAL; - } - - return ov2722_set_exposure(sd, exp, gain, digitgain); -} - -static long ov2722_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - - switch (cmd) { - case ATOMISP_IOC_S_EXPOSURE: - return ov2722_s_exposure(sd, arg); - default: - return -EINVAL; - } - return 0; -} - -/* This returns the exposure time being used. This should only be used - * for filling in EXIF data, not for actual image processing. - */ -static int ov2722_q_exposure(struct v4l2_subdev *sd, s32 *value) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u16 reg_v, reg_v2; - int ret; - - /* get exposure */ - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_AEC_PK_EXPO_L, - ®_v); - if (ret) - goto err; - - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_AEC_PK_EXPO_M, - ®_v2); - if (ret) - goto err; - - reg_v += reg_v2 << 8; - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_AEC_PK_EXPO_H, - ®_v2); - if (ret) - goto err; - - *value = reg_v + (((u32)reg_v2 << 16)); -err: - return ret; -} - -static int ov2722_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov2722_device *dev = - container_of(ctrl->handler, struct ov2722_device, ctrl_handler); - int ret = 0; - unsigned int val; - switch (ctrl->id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - ret = ov2722_q_exposure(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCAL_ABSOLUTE: - ret = ov2722_g_focal(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_ABSOLUTE: - ret = ov2722_g_fnumber(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_RANGE: - ret = ov2722_g_fnumber_range(&dev->sd, &ctrl->val); - break; - case V4L2_CID_LINK_FREQ: - val = ov2722_res[dev->fmt_idx].mipi_freq; - if (val == 0) - return -EINVAL; - - ctrl->val = val * 1000; /* To Hz */ - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .g_volatile_ctrl = ov2722_g_volatile_ctrl -}; - -static const struct v4l2_ctrl_config ov2722_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "exposure", - .min = 0x0, - .max = 0xffff, - .step = 0x01, - .def = 0x00, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCAL_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focal length", - .min = OV2722_FOCAL_LENGTH_DEFAULT, - .max = OV2722_FOCAL_LENGTH_DEFAULT, - .step = 0x01, - .def = OV2722_FOCAL_LENGTH_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number", - .min = OV2722_F_NUMBER_DEFAULT, - .max = OV2722_F_NUMBER_DEFAULT, - .step = 0x01, - .def = OV2722_F_NUMBER_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_RANGE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number range", - .min = OV2722_F_NUMBER_RANGE, - .max = OV2722_F_NUMBER_RANGE, - .step = 0x01, - .def = OV2722_F_NUMBER_RANGE, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_LINK_FREQ, - .name = "Link Frequency", - .type = V4L2_CTRL_TYPE_INTEGER, - .min = 1, - .max = 1500000 * 1000, - .step = 1, - .def = 1, - .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, - }, -}; - -static int ov2722_init(struct v4l2_subdev *sd) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - - mutex_lock(&dev->input_lock); - - /* restore settings */ - ov2722_res = ov2722_res_preview; - N_RES = N_RES_PREVIEW; - - mutex_unlock(&dev->input_lock); - - return 0; -} - -static int power_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret = -1; - struct ov2722_device *dev = to_ov2722_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - if (flag) { - ret = dev->platform_data->v1p8_ctrl(sd, 1); - if (ret == 0) { - ret = dev->platform_data->v2p8_ctrl(sd, 1); - if (ret) - dev->platform_data->v1p8_ctrl(sd, 0); - } - } else { - ret = dev->platform_data->v1p8_ctrl(sd, 0); - ret |= dev->platform_data->v2p8_ctrl(sd, 0); - } - - return ret; -} - -static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - int ret = -1; - - if (!dev || !dev->platform_data) - return -ENODEV; - - /* Note: the GPIO order is asymmetric: always RESET# - * before PWDN# when turning it on or off. - */ - ret = dev->platform_data->gpio0_ctrl(sd, flag); - /* - *ov2722 PWDN# active high when pull down,opposite to the convention - */ - ret |= dev->platform_data->gpio1_ctrl(sd, !flag); - return ret; -} - -static int power_up(struct v4l2_subdev *sd) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - /* power control */ - ret = power_ctrl(sd, 1); - if (ret) - goto fail_power; - - /* according to DS, at least 5ms is needed between DOVDD and PWDN */ - usleep_range(5000, 6000); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 1); - if (ret) { - ret = gpio_ctrl(sd, 0); - if (ret) - goto fail_power; - } - - /* flis clock control */ - ret = dev->platform_data->flisclk_ctrl(sd, 1); - if (ret) - goto fail_clk; - - /* according to DS, 20ms is needed between PWDN and i2c access */ - msleep(20); - - return 0; - -fail_clk: - gpio_ctrl(sd, 0); -fail_power: - power_ctrl(sd, 0); - dev_err(&client->dev, "sensor power-up failed\n"); - - return ret; -} - -static int power_down(struct v4l2_subdev *sd) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - ret = dev->platform_data->flisclk_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "flisclk failed\n"); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 0); - if (ret) { - ret = gpio_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "gpio failed 2\n"); - } - - /* power control */ - ret = power_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "vprog failed.\n"); - - return ret; -} - -static int ov2722_s_power(struct v4l2_subdev *sd, int on) -{ - int ret; - if (on == 0) - return power_down(sd); - else { - ret = power_up(sd); - if (!ret) - return ov2722_init(sd); - } - return ret; -} - -/* - * distance - calculate the distance - * @res: resolution - * @w: width - * @h: height - * - * Get the gap between resolution and w/h. - * res->width/height smaller than w/h wouldn't be considered. - * Returns the value of gap or -1 if fail. - */ -#define LARGEST_ALLOWED_RATIO_MISMATCH 800 -static int distance(struct ov2722_resolution *res, u32 w, u32 h) -{ - unsigned int w_ratio = (res->width << 13) / w; - unsigned int h_ratio; - int match; - - if (h == 0) - return -1; - h_ratio = (res->height << 13) / h; - if (h_ratio == 0) - return -1; - match = abs(((w_ratio << 13) / h_ratio) - 8192); - - if ((w_ratio < 8192) || (h_ratio < 8192) || - (match > LARGEST_ALLOWED_RATIO_MISMATCH)) - return -1; - - return w_ratio + h_ratio; -} - -/* Return the nearest higher resolution index */ -static int nearest_resolution_index(int w, int h) -{ - int i; - int idx = -1; - int dist; - int min_dist = INT_MAX; - struct ov2722_resolution *tmp_res = NULL; - - for (i = 0; i < N_RES; i++) { - tmp_res = &ov2722_res[i]; - dist = distance(tmp_res, w, h); - if (dist == -1) - continue; - if (dist < min_dist) { - min_dist = dist; - idx = i; - } - } - - return idx; -} - -static int get_resolution_index(int w, int h) -{ - int i; - - for (i = 0; i < N_RES; i++) { - if (w != ov2722_res[i].width) - continue; - if (h != ov2722_res[i].height) - continue; - - return i; - } - - return -1; -} - -/* TODO: remove it. */ -static int startup(struct v4l2_subdev *sd) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - ret = ov2722_write_reg(client, OV2722_8BIT, - OV2722_SW_RESET, 0x01); - if (ret) { - dev_err(&client->dev, "ov2722 reset err.\n"); - return ret; - } - - ret = ov2722_write_reg_array(client, ov2722_res[dev->fmt_idx].regs); - if (ret) { - dev_err(&client->dev, "ov2722 write register err.\n"); - return ret; - } - - return ret; -} - -static int ov2722_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct ov2722_device *dev = to_ov2722_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *ov2722_info = NULL; - int ret = 0; - int idx; - if (format->pad) - return -EINVAL; - if (!fmt) - return -EINVAL; - ov2722_info = v4l2_get_subdev_hostdata(sd); - if (!ov2722_info) - return -EINVAL; - - mutex_lock(&dev->input_lock); - idx = nearest_resolution_index(fmt->width, fmt->height); - if (idx == -1) { - /* return the largest resolution */ - fmt->width = ov2722_res[N_RES - 1].width; - fmt->height = ov2722_res[N_RES - 1].height; - } else { - fmt->width = ov2722_res[idx].width; - fmt->height = ov2722_res[idx].height; - } - fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); - return 0; - } - - dev->fmt_idx = get_resolution_index(fmt->width, fmt->height); - if (dev->fmt_idx == -1) { - dev_err(&client->dev, "get resolution fail\n"); - mutex_unlock(&dev->input_lock); - return -EINVAL; - } - - dev->pixels_per_line = ov2722_res[dev->fmt_idx].pixels_per_line; - dev->lines_per_frame = ov2722_res[dev->fmt_idx].lines_per_frame; - - ret = startup(sd); - if (ret) { - int i = 0; - dev_err(&client->dev, "ov2722 startup err, retry to power up\n"); - for (i = 0; i < OV2722_POWER_UP_RETRY_NUM; i++) { - dev_err(&client->dev, - "ov2722 retry to power up %d/%d times, result: ", - i + 1, OV2722_POWER_UP_RETRY_NUM); - power_down(sd); - ret = power_up(sd); - if (ret) { - dev_err(&client->dev, "power up failed, continue\n"); - continue; - } - ret = startup(sd); - if (ret) { - dev_err(&client->dev, " startup FAILED!\n"); - } else { - dev_err(&client->dev, " startup SUCCESS!\n"); - break; - } - } - if (ret) { - dev_err(&client->dev, "ov2722 startup err\n"); - goto err; - } - } - - ret = ov2722_get_intg_factor(client, ov2722_info, - &ov2722_res[dev->fmt_idx]); - if (ret) - dev_err(&client->dev, "failed to get integration_factor\n"); - -err: - mutex_unlock(&dev->input_lock); - return ret; -} -static int ov2722_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct ov2722_device *dev = to_ov2722_sensor(sd); - - if (format->pad) - return -EINVAL; - if (!fmt) - return -EINVAL; - - fmt->width = ov2722_res[dev->fmt_idx].width; - fmt->height = ov2722_res[dev->fmt_idx].height; - fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; - - return 0; -} - -static int ov2722_detect(struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - u16 high, low; - int ret; - u16 id; - u8 revision; - - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) - return -ENODEV; - - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_CHIP_ID_H, &high); - if (ret) { - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); - return -ENODEV; - } - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_CHIP_ID_L, &low); - id = (high << 8) | low; - - if ((id != OV2722_ID) && (id != OV2720_ID)) { - dev_err(&client->dev, "sensor ID error\n"); - return -ENODEV; - } - - ret = ov2722_read_reg(client, OV2722_8BIT, - OV2722_SC_CMMN_SUB_ID, &high); - revision = (u8) high & 0x0f; - - dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision); - dev_dbg(&client->dev, "detect ov2722 success\n"); - return 0; -} - -static int ov2722_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - mutex_lock(&dev->input_lock); - - ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_SW_STREAM, - enable ? OV2722_START_STREAMING : - OV2722_STOP_STREAMING); - - mutex_unlock(&dev->input_lock); - return ret; -} - -static int ov2722_s_config(struct v4l2_subdev *sd, - int irq, void *platform_data) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (!platform_data) - return -ENODEV; - - dev->platform_data = - (struct camera_sensor_platform_data *)platform_data; - - mutex_lock(&dev->input_lock); - - /* power off the module, then power on it in future - * as first power on by board may not fulfill the - * power on sequqence needed by the module - */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "ov2722 power-off err.\n"); - goto fail_power_off; - } - - ret = power_up(sd); - if (ret) { - dev_err(&client->dev, "ov2722 power-up err.\n"); - goto fail_power_on; - } - - ret = dev->platform_data->csi_cfg(sd, 1); - if (ret) - goto fail_csi_cfg; - - /* config & detect sensor */ - ret = ov2722_detect(client); - if (ret) { - dev_err(&client->dev, "ov2722_detect err s_config.\n"); - goto fail_csi_cfg; - } - - /* turn off sensor, after probed */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "ov2722 power-off err.\n"); - goto fail_csi_cfg; - } - mutex_unlock(&dev->input_lock); - - return 0; - -fail_csi_cfg: - dev->platform_data->csi_cfg(sd, 0); -fail_power_on: - power_down(sd); - dev_err(&client->dev, "sensor power-gating failed\n"); -fail_power_off: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int ov2722_g_frame_interval(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - - interval->interval.numerator = 1; - interval->interval.denominator = ov2722_res[dev->fmt_idx].fps; - - return 0; -} - -static int ov2722_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index >= MAX_FMTS) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_SBGGR10_1X10; - return 0; -} - -static int ov2722_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - int index = fse->index; - - if (index >= N_RES) - return -EINVAL; - - fse->min_width = ov2722_res[index].width; - fse->min_height = ov2722_res[index].height; - fse->max_width = ov2722_res[index].width; - fse->max_height = ov2722_res[index].height; - - return 0; - -} - - -static int ov2722_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) -{ - struct ov2722_device *dev = to_ov2722_sensor(sd); - - mutex_lock(&dev->input_lock); - *frames = ov2722_res[dev->fmt_idx].skip_frames; - mutex_unlock(&dev->input_lock); - - return 0; -} - -static const struct v4l2_subdev_sensor_ops ov2722_sensor_ops = { - .g_skip_frames = ov2722_g_skip_frames, -}; - -static const struct v4l2_subdev_video_ops ov2722_video_ops = { - .s_stream = ov2722_s_stream, - .g_frame_interval = ov2722_g_frame_interval, -}; - -static const struct v4l2_subdev_core_ops ov2722_core_ops = { - .s_power = ov2722_s_power, - .ioctl = ov2722_ioctl, -}; - -static const struct v4l2_subdev_pad_ops ov2722_pad_ops = { - .enum_mbus_code = ov2722_enum_mbus_code, - .enum_frame_size = ov2722_enum_frame_size, - .get_fmt = ov2722_get_fmt, - .set_fmt = ov2722_set_fmt, -}; - -static const struct v4l2_subdev_ops ov2722_ops = { - .core = &ov2722_core_ops, - .video = &ov2722_video_ops, - .pad = &ov2722_pad_ops, - .sensor = &ov2722_sensor_ops, -}; - -static int ov2722_remove(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov2722_device *dev = to_ov2722_sensor(sd); - dev_dbg(&client->dev, "ov2722_remove...\n"); - - dev->platform_data->csi_cfg(sd, 0); - v4l2_ctrl_handler_free(&dev->ctrl_handler); - v4l2_device_unregister_subdev(sd); - - atomisp_gmin_remove_subdev(sd); - - media_entity_cleanup(&dev->sd.entity); - kfree(dev); - - return 0; -} - -static int __ov2722_init_ctrl_handler(struct ov2722_device *dev) -{ - struct v4l2_ctrl_handler *hdl; - unsigned int i; - hdl = &dev->ctrl_handler; - v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ov2722_controls)); - for (i = 0; i < ARRAY_SIZE(ov2722_controls); i++) - v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2722_controls[i], - NULL); - - dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_LINK_FREQ); - - if (dev->ctrl_handler.error || !dev->link_freq) - return dev->ctrl_handler.error; - - dev->sd.ctrl_handler = hdl; - - return 0; -} - -static int ov2722_probe(struct i2c_client *client) -{ - struct ov2722_device *dev; - void *ovpdev; - int ret; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - mutex_init(&dev->input_lock); - - dev->fmt_idx = 0; - v4l2_i2c_subdev_init(&(dev->sd), client, &ov2722_ops); - - ovpdev = gmin_camera_platform_data(&dev->sd, - ATOMISP_INPUT_FORMAT_RAW_10, - atomisp_bayer_order_grbg); - - ret = ov2722_s_config(&dev->sd, client->irq, ovpdev); - if (ret) - goto out_free; - - ret = __ov2722_init_ctrl_handler(dev); - if (ret) - goto out_ctrl_handler_free; - - dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dev->pad.flags = MEDIA_PAD_FL_SOURCE; - dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; - dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; - - ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); - if (ret) - ov2722_remove(client); - - return atomisp_register_i2c_module(&dev->sd, ovpdev, RAW_CAMERA); - -out_ctrl_handler_free: - v4l2_ctrl_handler_free(&dev->ctrl_handler); - -out_free: - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - return ret; -} - -static const struct acpi_device_id ov2722_acpi_match[] = { - { "INT33FB" }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match); - -static struct i2c_driver ov2722_driver = { - .driver = { - .name = "ov2722", - .acpi_match_table = ov2722_acpi_match, - }, - .probe_new = ov2722_probe, - .remove = ov2722_remove, -}; -module_i2c_driver(ov2722_driver); - -MODULE_AUTHOR("Wei Liu "); -MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h deleted file mode 100644 index 70c252c5163c..000000000000 --- a/drivers/staging/media/atomisp/i2c/gc0310.h +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Support for GalaxyCore GC0310 VGA camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __GC0310_H__ -#define __GC0310_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../include/linux/atomisp_platform.h" - -/* Defines for register writes and register array processing */ -#define I2C_MSG_LENGTH 1 -#define I2C_RETRY_COUNT 5 - -#define GC0310_FOCAL_LENGTH_NUM 278 /*2.78mm*/ -#define GC0310_FOCAL_LENGTH_DEM 100 -#define GC0310_F_NUMBER_DEFAULT_NUM 26 -#define GC0310_F_NUMBER_DEM 10 - -#define MAX_FMTS 1 - -/* - * focal length bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define GC0310_FOCAL_LENGTH_DEFAULT 0x1160064 - -/* - * current f-number bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define GC0310_F_NUMBER_DEFAULT 0x1a000a - -/* - * f-number range bits definition: - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ -#define GC0310_F_NUMBER_RANGE 0x1a0a1a0a -#define GC0310_ID 0xa310 - -#define GC0310_RESET_RELATED 0xFE -#define GC0310_REGISTER_PAGE_0 0x0 -#define GC0310_REGISTER_PAGE_3 0x3 - -#define GC0310_FINE_INTG_TIME_MIN 0 -#define GC0310_FINE_INTG_TIME_MAX_MARGIN 0 -#define GC0310_COARSE_INTG_TIME_MIN 1 -#define GC0310_COARSE_INTG_TIME_MAX_MARGIN 6 - -/* - * GC0310 System control registers - */ -#define GC0310_SW_STREAM 0x10 - -#define GC0310_SC_CMMN_CHIP_ID_H 0xf0 -#define GC0310_SC_CMMN_CHIP_ID_L 0xf1 - -#define GC0310_AEC_PK_EXPO_H 0x03 -#define GC0310_AEC_PK_EXPO_L 0x04 -#define GC0310_AGC_ADJ 0x48 -#define GC0310_DGC_ADJ 0x71 -#if 0 -#define GC0310_GROUP_ACCESS 0x3208 -#endif - -#define GC0310_H_CROP_START_H 0x09 -#define GC0310_H_CROP_START_L 0x0A -#define GC0310_V_CROP_START_H 0x0B -#define GC0310_V_CROP_START_L 0x0C -#define GC0310_H_OUTSIZE_H 0x0F -#define GC0310_H_OUTSIZE_L 0x10 -#define GC0310_V_OUTSIZE_H 0x0D -#define GC0310_V_OUTSIZE_L 0x0E -#define GC0310_H_BLANKING_H 0x05 -#define GC0310_H_BLANKING_L 0x06 -#define GC0310_V_BLANKING_H 0x07 -#define GC0310_V_BLANKING_L 0x08 -#define GC0310_SH_DELAY 0x11 - -#define GC0310_START_STREAMING 0x94 /* 8-bit enable */ -#define GC0310_STOP_STREAMING 0x0 /* 8-bit disable */ - -#define GC0310_BIN_FACTOR_MAX 3 - -struct regval_list { - u16 reg_num; - u8 value; -}; - -struct gc0310_resolution { - u8 *desc; - const struct gc0310_reg *regs; - int res; - int width; - int height; - int fps; - int pix_clk_freq; - u32 skip_frames; - u16 pixels_per_line; - u16 lines_per_frame; - u8 bin_factor_x; - u8 bin_factor_y; - u8 bin_mode; - bool used; -}; - -struct gc0310_format { - u8 *desc; - u32 pixelformat; - struct gc0310_reg *regs; -}; - -/* - * gc0310 device structure. - */ -struct gc0310_device { - struct v4l2_subdev sd; - struct media_pad pad; - struct v4l2_mbus_framefmt format; - struct mutex input_lock; - struct v4l2_ctrl_handler ctrl_handler; - - struct camera_sensor_platform_data *platform_data; - int vt_pix_clk_freq_mhz; - int fmt_idx; - u8 res; - u8 type; -}; - -enum gc0310_tok_type { - GC0310_8BIT = 0x0001, - GC0310_TOK_TERM = 0xf000, /* terminating token for reg list */ - GC0310_TOK_DELAY = 0xfe00, /* delay token for reg list */ - GC0310_TOK_MASK = 0xfff0 -}; - -/** - * struct gc0310_reg - MI sensor register format - * @type: type of the register - * @reg: 16-bit offset to register - * @val: 8/16/32-bit register value - * - * Define a structure for sensor register initialization values - */ -struct gc0310_reg { - enum gc0310_tok_type type; - u8 reg; - u8 val; /* @set value for read/mod/write, @mask */ -}; - -#define to_gc0310_sensor(x) container_of(x, struct gc0310_device, sd) - -#define GC0310_MAX_WRITE_BUF_SIZE 30 - -struct gc0310_write_buffer { - u8 addr; - u8 data[GC0310_MAX_WRITE_BUF_SIZE]; -}; - -struct gc0310_write_ctrl { - int index; - struct gc0310_write_buffer buffer; -}; - -/* - * Register settings for various resolution - */ -static const struct gc0310_reg gc0310_reset_register[] = { -///////////////////////////////////////////////// -///////////////// system reg ///////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0xfe, 0xf0}, - {GC0310_8BIT, 0xfe, 0xf0}, - {GC0310_8BIT, 0xfe, 0x00}, - - {GC0310_8BIT, 0xfc, 0x0e}, //4e - {GC0310_8BIT, 0xfc, 0x0e}, //16//4e // [0]apwd [6]regf_clk_gate - {GC0310_8BIT, 0xf2, 0x80}, //sync output - {GC0310_8BIT, 0xf3, 0x00}, //1f//01 data output - {GC0310_8BIT, 0xf7, 0x33}, //f9 - {GC0310_8BIT, 0xf8, 0x05}, //00 - {GC0310_8BIT, 0xf9, 0x0e}, // 0x8e //0f - {GC0310_8BIT, 0xfa, 0x11}, - -///////////////////////////////////////////////// -/////////////////// MIPI //////////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0xfe, 0x03}, - {GC0310_8BIT, 0x01, 0x03}, ///mipi 1lane - {GC0310_8BIT, 0x02, 0x22}, // 0x33 - {GC0310_8BIT, 0x03, 0x94}, - {GC0310_8BIT, 0x04, 0x01}, // fifo_prog - {GC0310_8BIT, 0x05, 0x00}, //fifo_prog - {GC0310_8BIT, 0x06, 0x80}, //b0 //YUV ISP data - {GC0310_8BIT, 0x11, 0x2a},//1e //LDI set YUV422 - {GC0310_8BIT, 0x12, 0x90},//00 //04 //00 //04//00 //LWC[7:0] // - {GC0310_8BIT, 0x13, 0x02},//05 //05 //LWC[15:8] - {GC0310_8BIT, 0x15, 0x12}, // 0x10 //DPHYY_MODE read_ready - {GC0310_8BIT, 0x17, 0x01}, - {GC0310_8BIT, 0x40, 0x08}, - {GC0310_8BIT, 0x41, 0x00}, - {GC0310_8BIT, 0x42, 0x00}, - {GC0310_8BIT, 0x43, 0x00}, - {GC0310_8BIT, 0x21, 0x02}, // 0x01 - {GC0310_8BIT, 0x22, 0x02}, // 0x01 - {GC0310_8BIT, 0x23, 0x01}, // 0x05 //Nor:0x05 DOU:0x06 - {GC0310_8BIT, 0x29, 0x00}, - {GC0310_8BIT, 0x2A, 0x25}, // 0x05 //data zero 0x7a de - {GC0310_8BIT, 0x2B, 0x02}, - - {GC0310_8BIT, 0xfe, 0x00}, - -///////////////////////////////////////////////// -///////////////// CISCTL reg ///////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0x00, 0x2f}, //2f//0f//02//01 - {GC0310_8BIT, 0x01, 0x0f}, //06 - {GC0310_8BIT, 0x02, 0x04}, - {GC0310_8BIT, 0x4f, 0x00}, //AEC 0FF - {GC0310_8BIT, 0x03, 0x01}, // 0x03 //04 - {GC0310_8BIT, 0x04, 0xc0}, // 0xe8 //58 - {GC0310_8BIT, 0x05, 0x00}, - {GC0310_8BIT, 0x06, 0xb2}, // 0x0a //HB - {GC0310_8BIT, 0x07, 0x00}, - {GC0310_8BIT, 0x08, 0x0c}, // 0x89 //VB - {GC0310_8BIT, 0x09, 0x00}, //row start - {GC0310_8BIT, 0x0a, 0x00}, // - {GC0310_8BIT, 0x0b, 0x00}, //col start - {GC0310_8BIT, 0x0c, 0x00}, - {GC0310_8BIT, 0x0d, 0x01}, //height - {GC0310_8BIT, 0x0e, 0xf2}, // 0xf7 //height - {GC0310_8BIT, 0x0f, 0x02}, //width - {GC0310_8BIT, 0x10, 0x94}, // 0xa0 //height - {GC0310_8BIT, 0x17, 0x14}, - {GC0310_8BIT, 0x18, 0x1a}, //0a//[4]double reset - {GC0310_8BIT, 0x19, 0x14}, //AD pipeline - {GC0310_8BIT, 0x1b, 0x48}, - {GC0310_8BIT, 0x1e, 0x6b}, //3b//col bias - {GC0310_8BIT, 0x1f, 0x28}, //20//00//08//txlow - {GC0310_8BIT, 0x20, 0x89}, //88//0c//[3:2]DA15 - {GC0310_8BIT, 0x21, 0x49}, //48//[3] txhigh - {GC0310_8BIT, 0x22, 0xb0}, - {GC0310_8BIT, 0x23, 0x04}, //[1:0]vcm_r - {GC0310_8BIT, 0x24, 0x16}, //15 - {GC0310_8BIT, 0x34, 0x20}, //[6:4] rsg high//range - -///////////////////////////////////////////////// -//////////////////// BLK //////////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0x26, 0x23}, //[1]dark_current_en [0]offset_en - {GC0310_8BIT, 0x28, 0xff}, //BLK_limie_value - {GC0310_8BIT, 0x29, 0x00}, //global offset - {GC0310_8BIT, 0x33, 0x18}, //offset_ratio - {GC0310_8BIT, 0x37, 0x20}, //dark_current_ratio - {GC0310_8BIT, 0x2a, 0x00}, - {GC0310_8BIT, 0x2b, 0x00}, - {GC0310_8BIT, 0x2c, 0x00}, - {GC0310_8BIT, 0x2d, 0x00}, - {GC0310_8BIT, 0x2e, 0x00}, - {GC0310_8BIT, 0x2f, 0x00}, - {GC0310_8BIT, 0x30, 0x00}, - {GC0310_8BIT, 0x31, 0x00}, - {GC0310_8BIT, 0x47, 0x80}, //a7 - {GC0310_8BIT, 0x4e, 0x66}, //select_row - {GC0310_8BIT, 0xa8, 0x02}, //win_width_dark, same with crop_win_width - {GC0310_8BIT, 0xa9, 0x80}, - -///////////////////////////////////////////////// -////////////////// ISP reg /////////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0x40, 0x06}, // 0xff //ff //48 - {GC0310_8BIT, 0x41, 0x00}, // 0x21 //00//[0]curve_en - {GC0310_8BIT, 0x42, 0x04}, // 0xcf //0a//[1]awn_en - {GC0310_8BIT, 0x44, 0x18}, // 0x18 //02 - {GC0310_8BIT, 0x46, 0x02}, // 0x03 //sync - {GC0310_8BIT, 0x49, 0x03}, - {GC0310_8BIT, 0x4c, 0x20}, //00[5]pretect exp - {GC0310_8BIT, 0x50, 0x01}, //crop enable - {GC0310_8BIT, 0x51, 0x00}, - {GC0310_8BIT, 0x52, 0x00}, - {GC0310_8BIT, 0x53, 0x00}, - {GC0310_8BIT, 0x54, 0x01}, - {GC0310_8BIT, 0x55, 0x01}, //crop window height - {GC0310_8BIT, 0x56, 0xf0}, - {GC0310_8BIT, 0x57, 0x02}, //crop window width - {GC0310_8BIT, 0x58, 0x90}, - -///////////////////////////////////////////////// -/////////////////// GAIN //////////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0x70, 0x70}, //70 //80//global gain - {GC0310_8BIT, 0x71, 0x20}, // pregain gain - {GC0310_8BIT, 0x72, 0x40}, // post gain - {GC0310_8BIT, 0x5a, 0x84}, //84//analog gain 0 - {GC0310_8BIT, 0x5b, 0xc9}, //c9 - {GC0310_8BIT, 0x5c, 0xed}, //ed//not use pga gain highest level - {GC0310_8BIT, 0x77, 0x40}, // R gain 0x74 //awb gain - {GC0310_8BIT, 0x78, 0x40}, // G gain - {GC0310_8BIT, 0x79, 0x40}, // B gain 0x5f - - {GC0310_8BIT, 0x48, 0x00}, - {GC0310_8BIT, 0xfe, 0x01}, - {GC0310_8BIT, 0x0a, 0x45}, //[7]col gain mode - - {GC0310_8BIT, 0x3e, 0x40}, - {GC0310_8BIT, 0x3f, 0x5c}, - {GC0310_8BIT, 0x40, 0x7b}, - {GC0310_8BIT, 0x41, 0xbd}, - {GC0310_8BIT, 0x42, 0xf6}, - {GC0310_8BIT, 0x43, 0x63}, - {GC0310_8BIT, 0x03, 0x60}, - {GC0310_8BIT, 0x44, 0x03}, - -///////////////////////////////////////////////// -///////////////// dark sun ////////////////// -///////////////////////////////////////////////// - {GC0310_8BIT, 0xfe, 0x01}, - {GC0310_8BIT, 0x45, 0xa4}, // 0xf7 - {GC0310_8BIT, 0x46, 0xf0}, // 0xff //f0//sun vaule th - {GC0310_8BIT, 0x48, 0x03}, //sun mode - {GC0310_8BIT, 0x4f, 0x60}, //sun_clamp - {GC0310_8BIT, 0xfe, 0x00}, - - {GC0310_TOK_TERM, 0, 0}, -}; - -static struct gc0310_reg const gc0310_VGA_30fps[] = { - {GC0310_8BIT, 0xfe, 0x00}, - {GC0310_8BIT, 0x0d, 0x01}, //height - {GC0310_8BIT, 0x0e, 0xf2}, // 0xf7 //height - {GC0310_8BIT, 0x0f, 0x02}, //width - {GC0310_8BIT, 0x10, 0x94}, // 0xa0 //height - - {GC0310_8BIT, 0x50, 0x01}, //crop enable - {GC0310_8BIT, 0x51, 0x00}, - {GC0310_8BIT, 0x52, 0x00}, - {GC0310_8BIT, 0x53, 0x00}, - {GC0310_8BIT, 0x54, 0x01}, - {GC0310_8BIT, 0x55, 0x01}, //crop window height - {GC0310_8BIT, 0x56, 0xf0}, - {GC0310_8BIT, 0x57, 0x02}, //crop window width - {GC0310_8BIT, 0x58, 0x90}, - - {GC0310_8BIT, 0xfe, 0x03}, - {GC0310_8BIT, 0x12, 0x90},//00 //04 //00 //04//00 //LWC[7:0] // - {GC0310_8BIT, 0x13, 0x02},//05 //05 //LWC[15:8] - - {GC0310_8BIT, 0xfe, 0x00}, - - {GC0310_TOK_TERM, 0, 0}, -}; - -static struct gc0310_resolution gc0310_res_preview[] = { - { - .desc = "gc0310_VGA_30fps", - .width = 656, // 648, - .height = 496, // 488, - .fps = 30, - //.pix_clk_freq = 73, - .used = 0, -#if 0 - .pixels_per_line = 0x0314, - .lines_per_frame = 0x0213, -#endif - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 2, - .regs = gc0310_VGA_30fps, - }, -}; -#define N_RES_PREVIEW (ARRAY_SIZE(gc0310_res_preview)) - -static struct gc0310_resolution *gc0310_res = gc0310_res_preview; -static unsigned long N_RES = N_RES_PREVIEW; -#endif - diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h deleted file mode 100644 index 54bf7812b27a..000000000000 --- a/drivers/staging/media/atomisp/i2c/gc2235.h +++ /dev/null @@ -1,677 +0,0 @@ -/* - * Support for GalaxyCore GC2235 2M camera sensor. - * - * Copyright (c) 2014 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. - * - */ - -#ifndef __GC2235_H__ -#define __GC2235_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../include/linux/atomisp_platform.h" - -/* - * FIXME: non-preview resolutions are currently broken - */ -#define ENABLE_NON_PREVIEW 0 - -/* Defines for register writes and register array processing */ -#define I2C_MSG_LENGTH 0x2 -#define I2C_RETRY_COUNT 5 - -#define GC2235_FOCAL_LENGTH_NUM 278 /*2.78mm*/ -#define GC2235_FOCAL_LENGTH_DEM 100 -#define GC2235_F_NUMBER_DEFAULT_NUM 26 -#define GC2235_F_NUMBER_DEM 10 - -#define MAX_FMTS 1 - -/* - * focal length bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define GC2235_FOCAL_LENGTH_DEFAULT 0x1160064 - -/* - * current f-number bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define GC2235_F_NUMBER_DEFAULT 0x1a000a - -/* - * f-number range bits definition: - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ -#define GC2235_F_NUMBER_RANGE 0x1a0a1a0a -#define GC2235_ID 0x2235 - -#define GC2235_FINE_INTG_TIME_MIN 0 -#define GC2235_FINE_INTG_TIME_MAX_MARGIN 0 -#define GC2235_COARSE_INTG_TIME_MIN 1 -#define GC2235_COARSE_INTG_TIME_MAX_MARGIN 6 - -/* - * GC2235 System control registers - */ -/* - * GC2235 System control registers - */ -#define GC2235_SENSOR_ID_H 0xF0 -#define GC2235_SENSOR_ID_L 0xF1 -#define GC2235_RESET_RELATED 0xFE -#define GC2235_SW_RESET 0x8 -#define GC2235_MIPI_RESET 0x3 -#define GC2235_RESET_BIT 0x4 -#define GC2235_REGISTER_PAGE_0 0x0 -#define GC2235_REGISTER_PAGE_3 0x3 - -#define GC2235_V_CROP_START_H 0x91 -#define GC2235_V_CROP_START_L 0x92 -#define GC2235_H_CROP_START_H 0x93 -#define GC2235_H_CROP_START_L 0x94 -#define GC2235_V_OUTSIZE_H 0x95 -#define GC2235_V_OUTSIZE_L 0x96 -#define GC2235_H_OUTSIZE_H 0x97 -#define GC2235_H_OUTSIZE_L 0x98 - -#define GC2235_HB_H 0x5 -#define GC2235_HB_L 0x6 -#define GC2235_VB_H 0x7 -#define GC2235_VB_L 0x8 -#define GC2235_SH_DELAY_H 0x11 -#define GC2235_SH_DELAY_L 0x12 - -#define GC2235_CSI2_MODE 0x10 - -#define GC2235_EXPOSURE_H 0x3 -#define GC2235_EXPOSURE_L 0x4 -#define GC2235_GLOBAL_GAIN 0xB0 -#define GC2235_PRE_GAIN 0xB1 -#define GC2235_AWB_R_GAIN 0xB3 -#define GC2235_AWB_G_GAIN 0xB4 -#define GC2235_AWB_B_GAIN 0xB5 - -#define GC2235_START_STREAMING 0x91 -#define GC2235_STOP_STREAMING 0x0 - -struct regval_list { - u16 reg_num; - u8 value; -}; - -struct gc2235_resolution { - u8 *desc; - const struct gc2235_reg *regs; - int res; - int width; - int height; - int fps; - int pix_clk_freq; - u32 skip_frames; - u16 pixels_per_line; - u16 lines_per_frame; - u8 bin_factor_x; - u8 bin_factor_y; - u8 bin_mode; - bool used; -}; - -struct gc2235_format { - u8 *desc; - u32 pixelformat; - struct gc2235_reg *regs; -}; - -/* - * gc2235 device structure. - */ -struct gc2235_device { - struct v4l2_subdev sd; - struct media_pad pad; - struct v4l2_mbus_framefmt format; - struct mutex input_lock; - struct v4l2_ctrl_handler ctrl_handler; - - struct camera_sensor_platform_data *platform_data; - int vt_pix_clk_freq_mhz; - int fmt_idx; - u8 res; - u8 type; -}; - -enum gc2235_tok_type { - GC2235_8BIT = 0x0001, - GC2235_16BIT = 0x0002, - GC2235_32BIT = 0x0004, - GC2235_TOK_TERM = 0xf000, /* terminating token for reg list */ - GC2235_TOK_DELAY = 0xfe00, /* delay token for reg list */ - GC2235_TOK_MASK = 0xfff0 -}; - -/** - * struct gc2235_reg - MI sensor register format - * @type: type of the register - * @reg: 8-bit offset to register - * @val: 8/16/32-bit register value - * - * Define a structure for sensor register initialization values - */ -struct gc2235_reg { - enum gc2235_tok_type type; - u8 reg; - u32 val; /* @set value for read/mod/write, @mask */ -}; - -#define to_gc2235_sensor(x) container_of(x, struct gc2235_device, sd) - -#define GC2235_MAX_WRITE_BUF_SIZE 30 - -struct gc2235_write_buffer { - u8 addr; - u8 data[GC2235_MAX_WRITE_BUF_SIZE]; -}; - -struct gc2235_write_ctrl { - int index; - struct gc2235_write_buffer buffer; -}; - -static struct gc2235_reg const gc2235_stream_on[] = { - { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */ - { GC2235_8BIT, 0x10, 0x91}, /* start mipi */ - { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; - -static struct gc2235_reg const gc2235_stream_off[] = { - { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */ - { GC2235_8BIT, 0x10, 0x01}, /* stop mipi */ - { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; - -static struct gc2235_reg const gc2235_init_settings[] = { - /* Sysytem */ - { GC2235_8BIT, 0xfe, 0x80 }, - { GC2235_8BIT, 0xfe, 0x80 }, - { GC2235_8BIT, 0xfe, 0x80 }, - { GC2235_8BIT, 0xf2, 0x00 }, - { GC2235_8BIT, 0xf6, 0x00 }, - { GC2235_8BIT, 0xfc, 0x06 }, - { GC2235_8BIT, 0xf7, 0x15 }, - { GC2235_8BIT, 0xf8, 0x84 }, - { GC2235_8BIT, 0xf9, 0xfe }, - { GC2235_8BIT, 0xfa, 0x00 }, - { GC2235_8BIT, 0xfe, 0x00 }, - /* Analog & cisctl */ - { GC2235_8BIT, 0x03, 0x04 }, - { GC2235_8BIT, 0x04, 0x9E }, - { GC2235_8BIT, 0x05, 0x00 }, - { GC2235_8BIT, 0x06, 0xfd }, - { GC2235_8BIT, 0x07, 0x00 }, - { GC2235_8BIT, 0x08, 0x14 }, - { GC2235_8BIT, 0x0a, 0x02 }, /* row start */ - { GC2235_8BIT, 0x0c, 0x00 }, /* col start */ - { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */ - { GC2235_8BIT, 0x0e, 0xd0 }, - { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */ - { GC2235_8BIT, 0x10, 0x60 }, - { GC2235_8BIT, 0x17, 0x15 }, /* mirror flip */ - { GC2235_8BIT, 0x18, 0x1a }, - { GC2235_8BIT, 0x19, 0x06 }, - { GC2235_8BIT, 0x1a, 0x01 }, - { GC2235_8BIT, 0x1b, 0x4d }, - { GC2235_8BIT, 0x1e, 0x88 }, - { GC2235_8BIT, 0x1f, 0x48 }, - { GC2235_8BIT, 0x20, 0x03 }, - { GC2235_8BIT, 0x21, 0x7f }, - { GC2235_8BIT, 0x22, 0x83 }, - { GC2235_8BIT, 0x23, 0x42 }, - { GC2235_8BIT, 0x24, 0x16 }, - { GC2235_8BIT, 0x26, 0x01 }, /*analog gain*/ - { GC2235_8BIT, 0x27, 0x30 }, - { GC2235_8BIT, 0x3f, 0x00 }, /* PRC */ - /* blk */ - { GC2235_8BIT, 0x40, 0xa3 }, - { GC2235_8BIT, 0x41, 0x82 }, - { GC2235_8BIT, 0x43, 0x20 }, - { GC2235_8BIT, 0x5e, 0x18 }, - { GC2235_8BIT, 0x5f, 0x18 }, - { GC2235_8BIT, 0x60, 0x18 }, - { GC2235_8BIT, 0x61, 0x18 }, - { GC2235_8BIT, 0x62, 0x18 }, - { GC2235_8BIT, 0x63, 0x18 }, - { GC2235_8BIT, 0x64, 0x18 }, - { GC2235_8BIT, 0x65, 0x18 }, - { GC2235_8BIT, 0x66, 0x20 }, - { GC2235_8BIT, 0x67, 0x20 }, - { GC2235_8BIT, 0x68, 0x20 }, - { GC2235_8BIT, 0x69, 0x20 }, - /* Gain */ - { GC2235_8BIT, 0xb2, 0x00 }, - { GC2235_8BIT, 0xb3, 0x40 }, - { GC2235_8BIT, 0xb4, 0x40 }, - { GC2235_8BIT, 0xb5, 0x40 }, - /* Dark sun */ - { GC2235_8BIT, 0xbc, 0x00 }, - - { GC2235_8BIT, 0xfe, 0x03 }, - { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */ - { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; -/* - * Register settings for various resolution - */ -#if ENABLE_NON_PREVIEW -static struct gc2235_reg const gc2235_1296_736_30fps[] = { - { GC2235_8BIT, 0x8b, 0xa0 }, - { GC2235_8BIT, 0x8c, 0x02 }, - - { GC2235_8BIT, 0x07, 0x01 }, /* VBI */ - { GC2235_8BIT, 0x08, 0x44 }, - { GC2235_8BIT, 0x09, 0x00 }, /* row start */ - { GC2235_8BIT, 0x0a, 0xf0 }, - { GC2235_8BIT, 0x0b, 0x00 }, /* col start */ - { GC2235_8BIT, 0x0c, 0xa0 }, - { GC2235_8BIT, 0x0d, 0x02 }, /* win height 736 */ - { GC2235_8BIT, 0x0e, 0xf0 }, - { GC2235_8BIT, 0x0f, 0x05 }, /* win width: 1296 */ - { GC2235_8BIT, 0x10, 0x20 }, - - { GC2235_8BIT, 0x90, 0x01 }, - { GC2235_8BIT, 0x92, 0x08 }, - { GC2235_8BIT, 0x94, 0x08 }, - { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 736 */ - { GC2235_8BIT, 0x96, 0xe0 }, - { GC2235_8BIT, 0x97, 0x05 }, /* crop win width 1296 */ - { GC2235_8BIT, 0x98, 0x10 }, - /* mimi init */ - { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */ - { GC2235_8BIT, 0x01, 0x07 }, - { GC2235_8BIT, 0x02, 0x11 }, - { GC2235_8BIT, 0x03, 0x11 }, - { GC2235_8BIT, 0x06, 0x80 }, - { GC2235_8BIT, 0x11, 0x2b }, - /* set mipi buffer */ - { GC2235_8BIT, 0x12, 0x54 }, /* val_low = (width * 10 / 8) & 0xFF */ - { GC2235_8BIT, 0x13, 0x06 }, /* val_high = (width * 10 / 8) >> 8 */ - - { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/ - { GC2235_8BIT, 0x04, 0x10 }, - { GC2235_8BIT, 0x05, 0x00 }, - { GC2235_8BIT, 0x17, 0x01 }, - - { GC2235_8BIT, 0x22, 0x01 }, - { GC2235_8BIT, 0x23, 0x05 }, - { GC2235_8BIT, 0x24, 0x10 }, - { GC2235_8BIT, 0x25, 0x10 }, - { GC2235_8BIT, 0x26, 0x02 }, - { GC2235_8BIT, 0x21, 0x10 }, - { GC2235_8BIT, 0x29, 0x01 }, - { GC2235_8BIT, 0x2a, 0x02 }, - { GC2235_8BIT, 0x2b, 0x02 }, - - { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */ - { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; - -static struct gc2235_reg const gc2235_960_640_30fps[] = { - { GC2235_8BIT, 0x8b, 0xa0 }, - { GC2235_8BIT, 0x8c, 0x02 }, - - { GC2235_8BIT, 0x07, 0x02 }, /* VBI */ - { GC2235_8BIT, 0x08, 0xA4 }, - { GC2235_8BIT, 0x09, 0x01 }, /* row start */ - { GC2235_8BIT, 0x0a, 0x18 }, - { GC2235_8BIT, 0x0b, 0x01 }, /* col start */ - { GC2235_8BIT, 0x0c, 0x40 }, - { GC2235_8BIT, 0x0d, 0x02 }, /* win height 656 */ - { GC2235_8BIT, 0x0e, 0x90 }, - { GC2235_8BIT, 0x0f, 0x03 }, /* win width: 976 */ - { GC2235_8BIT, 0x10, 0xd0 }, - - { GC2235_8BIT, 0x90, 0x01 }, - { GC2235_8BIT, 0x92, 0x02 }, - { GC2235_8BIT, 0x94, 0x06 }, - { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 640 */ - { GC2235_8BIT, 0x96, 0x80 }, - { GC2235_8BIT, 0x97, 0x03 }, /* crop win width 960 */ - { GC2235_8BIT, 0x98, 0xc0 }, - /* mimp init */ - { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */ - { GC2235_8BIT, 0x01, 0x07 }, - { GC2235_8BIT, 0x02, 0x11 }, - { GC2235_8BIT, 0x03, 0x11 }, - { GC2235_8BIT, 0x06, 0x80 }, - { GC2235_8BIT, 0x11, 0x2b }, - /* set mipi buffer */ - { GC2235_8BIT, 0x12, 0xb0 }, /* val_low = (width * 10 / 8) & 0xFF */ - { GC2235_8BIT, 0x13, 0x04 }, /* val_high = (width * 10 / 8) >> 8 */ - - { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/ - { GC2235_8BIT, 0x04, 0x10 }, - { GC2235_8BIT, 0x05, 0x00 }, - { GC2235_8BIT, 0x17, 0x01 }, - { GC2235_8BIT, 0x22, 0x01 }, - { GC2235_8BIT, 0x23, 0x05 }, - { GC2235_8BIT, 0x24, 0x10 }, - { GC2235_8BIT, 0x25, 0x10 }, - { GC2235_8BIT, 0x26, 0x02 }, - { GC2235_8BIT, 0x21, 0x10 }, - { GC2235_8BIT, 0x29, 0x01 }, - { GC2235_8BIT, 0x2a, 0x02 }, - { GC2235_8BIT, 0x2b, 0x02 }, - { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */ - { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; -#endif - -static struct gc2235_reg const gc2235_1600_900_30fps[] = { - { GC2235_8BIT, 0x8b, 0xa0 }, - { GC2235_8BIT, 0x8c, 0x02 }, - - { GC2235_8BIT, 0x0d, 0x03 }, /* win height 932 */ - { GC2235_8BIT, 0x0e, 0xa4 }, - { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1632 */ - { GC2235_8BIT, 0x10, 0x50 }, - - { GC2235_8BIT, 0x90, 0x01 }, - { GC2235_8BIT, 0x92, 0x02 }, - { GC2235_8BIT, 0x94, 0x06 }, - { GC2235_8BIT, 0x95, 0x03 }, /* crop win height 900 */ - { GC2235_8BIT, 0x96, 0x84 }, - { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1600 */ - { GC2235_8BIT, 0x98, 0x40 }, - /* mimi init */ - { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */ - { GC2235_8BIT, 0x01, 0x07 }, - { GC2235_8BIT, 0x02, 0x11 }, - { GC2235_8BIT, 0x03, 0x11 }, - { GC2235_8BIT, 0x06, 0x80 }, - { GC2235_8BIT, 0x11, 0x2b }, - /* set mipi buffer */ - { GC2235_8BIT, 0x12, 0xd0 }, /* val_low = (width * 10 / 8) & 0xFF */ - { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */ - - { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/ - { GC2235_8BIT, 0x04, 0x10 }, - { GC2235_8BIT, 0x05, 0x00 }, - { GC2235_8BIT, 0x17, 0x01 }, - { GC2235_8BIT, 0x22, 0x01 }, - { GC2235_8BIT, 0x23, 0x05 }, - { GC2235_8BIT, 0x24, 0x10 }, - { GC2235_8BIT, 0x25, 0x10 }, - { GC2235_8BIT, 0x26, 0x02 }, - { GC2235_8BIT, 0x21, 0x10 }, - { GC2235_8BIT, 0x29, 0x01 }, - { GC2235_8BIT, 0x2a, 0x02 }, - { GC2235_8BIT, 0x2b, 0x02 }, - { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */ - { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; - -static struct gc2235_reg const gc2235_1616_1082_30fps[] = { - { GC2235_8BIT, 0x8b, 0xa0 }, - { GC2235_8BIT, 0x8c, 0x02 }, - - { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */ - { GC2235_8BIT, 0x0e, 0xd0 }, - { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */ - { GC2235_8BIT, 0x10, 0x50 }, - - { GC2235_8BIT, 0x90, 0x01 }, - { GC2235_8BIT, 0x92, 0x4a }, - { GC2235_8BIT, 0x94, 0x00 }, - { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1082 */ - { GC2235_8BIT, 0x96, 0x3a }, - { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */ - { GC2235_8BIT, 0x98, 0x50 }, - /* mimp init */ - { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */ - { GC2235_8BIT, 0x01, 0x07 }, - { GC2235_8BIT, 0x02, 0x11 }, - { GC2235_8BIT, 0x03, 0x11 }, - { GC2235_8BIT, 0x06, 0x80 }, - { GC2235_8BIT, 0x11, 0x2b }, - /* set mipi buffer */ - { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */ - { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */ - - { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/ - { GC2235_8BIT, 0x04, 0x10 }, - { GC2235_8BIT, 0x05, 0x00 }, - { GC2235_8BIT, 0x17, 0x01 }, - { GC2235_8BIT, 0x22, 0x01 }, - { GC2235_8BIT, 0x23, 0x05 }, - { GC2235_8BIT, 0x24, 0x10 }, - { GC2235_8BIT, 0x25, 0x10 }, - { GC2235_8BIT, 0x26, 0x02 }, - { GC2235_8BIT, 0x21, 0x10 }, - { GC2235_8BIT, 0x29, 0x01 }, - { GC2235_8BIT, 0x2a, 0x02 }, - { GC2235_8BIT, 0x2b, 0x02 }, - { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */ - { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; - -static struct gc2235_reg const gc2235_1616_1216_30fps[] = { - { GC2235_8BIT, 0x8b, 0xa0 }, - { GC2235_8BIT, 0x8c, 0x02 }, - - { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */ - { GC2235_8BIT, 0x0e, 0xd0 }, - { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */ - { GC2235_8BIT, 0x10, 0x50 }, - - { GC2235_8BIT, 0x90, 0x01 }, - { GC2235_8BIT, 0x92, 0x02 }, - { GC2235_8BIT, 0x94, 0x00 }, - { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1216 */ - { GC2235_8BIT, 0x96, 0xc0 }, - { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */ - { GC2235_8BIT, 0x98, 0x50 }, - /* mimi init */ - { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */ - { GC2235_8BIT, 0x01, 0x07 }, - { GC2235_8BIT, 0x02, 0x11 }, - { GC2235_8BIT, 0x03, 0x11 }, - { GC2235_8BIT, 0x06, 0x80 }, - { GC2235_8BIT, 0x11, 0x2b }, - /* set mipi buffer */ - { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */ - { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */ - { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/ - { GC2235_8BIT, 0x04, 0x10 }, - { GC2235_8BIT, 0x05, 0x00 }, - { GC2235_8BIT, 0x17, 0x01 }, - { GC2235_8BIT, 0x22, 0x01 }, - { GC2235_8BIT, 0x23, 0x05 }, - { GC2235_8BIT, 0x24, 0x10 }, - { GC2235_8BIT, 0x25, 0x10 }, - { GC2235_8BIT, 0x26, 0x02 }, - { GC2235_8BIT, 0x21, 0x10 }, - { GC2235_8BIT, 0x29, 0x01 }, - { GC2235_8BIT, 0x2a, 0x02 }, - { GC2235_8BIT, 0x2b, 0x02 }, - { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */ - { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */ - { GC2235_TOK_TERM, 0, 0 } -}; - -static struct gc2235_resolution gc2235_res_preview[] = { - - { - .desc = "gc2235_1600_900_30fps", - .width = 1600, - .height = 900, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 2132, - .lines_per_frame = 1068, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1600_900_30fps, - }, - - { - .desc = "gc2235_1600_1066_30fps", - .width = 1616, - .height = 1082, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 2132, - .lines_per_frame = 1368, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1616_1082_30fps, - }, - { - .desc = "gc2235_1600_1200_30fps", - .width = 1616, - .height = 1216, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 2132, - .lines_per_frame = 1368, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1616_1216_30fps, - }, - -}; -#define N_RES_PREVIEW (ARRAY_SIZE(gc2235_res_preview)) - -/* - * Disable non-preview configurations until the configuration selection is - * improved. - */ -#if ENABLE_NON_PREVIEW -static struct gc2235_resolution gc2235_res_still[] = { - { - .desc = "gc2235_1600_900_30fps", - .width = 1600, - .height = 900, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 2132, - .lines_per_frame = 1068, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1600_900_30fps, - }, - { - .desc = "gc2235_1600_1066_30fps", - .width = 1616, - .height = 1082, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 2132, - .lines_per_frame = 1368, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1616_1082_30fps, - }, - { - .desc = "gc2235_1600_1200_30fps", - .width = 1616, - .height = 1216, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 2132, - .lines_per_frame = 1368, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1616_1216_30fps, - }, - -}; -#define N_RES_STILL (ARRAY_SIZE(gc2235_res_still)) - -static struct gc2235_resolution gc2235_res_video[] = { - { - .desc = "gc2235_1296_736_30fps", - .width = 1296, - .height = 736, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 1828, - .lines_per_frame = 888, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_1296_736_30fps, - }, - { - .desc = "gc2235_960_640_30fps", - .width = 960, - .height = 640, - .pix_clk_freq = 30, - .fps = 30, - .used = 0, - .pixels_per_line = 1492, - .lines_per_frame = 792, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = gc2235_960_640_30fps, - }, - -}; -#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video)) -#endif - -static struct gc2235_resolution *gc2235_res = gc2235_res_preview; -static unsigned long N_RES = N_RES_PREVIEW; -#endif diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h deleted file mode 100644 index de39cc141308..000000000000 --- a/drivers/staging/media/atomisp/i2c/mt9m114.h +++ /dev/null @@ -1,1788 +0,0 @@ -/* - * Support for mt9m114 Camera Sensor. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __A1040_H__ -#define __A1040_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../include/linux/atomisp_platform.h" -#include "../include/linux/atomisp.h" - -#define V4L2_IDENT_MT9M114 8245 - -#define MT9P111_REV3 -#define FULLINISUPPORT - -/* #defines for register writes and register array processing */ -#define MISENSOR_8BIT 1 -#define MISENSOR_16BIT 2 -#define MISENSOR_32BIT 4 - -#define MISENSOR_FWBURST0 0x80 -#define MISENSOR_FWBURST1 0x81 -#define MISENSOR_FWBURST4 0x84 -#define MISENSOR_FWBURST 0x88 - -#define MISENSOR_TOK_TERM 0xf000 /* terminating token for reg list */ -#define MISENSOR_TOK_DELAY 0xfe00 /* delay token for reg list */ -#define MISENSOR_TOK_FWLOAD 0xfd00 /* token indicating load FW */ -#define MISENSOR_TOK_POLL 0xfc00 /* token indicating poll instruction */ -#define MISENSOR_TOK_RMW 0x0010 /* RMW operation */ -#define MISENSOR_TOK_MASK 0xfff0 -#define MISENSOR_AWB_STEADY (1<<0) /* awb steady */ -#define MISENSOR_AE_READY (1<<3) /* ae status ready */ - -/* mask to set sensor read_mode via misensor_rmw_reg */ -#define MISENSOR_R_MODE_MASK 0x0330 -/* mask to set sensor vert_flip and horz_mirror */ -#define MISENSOR_VFLIP_MASK 0x0002 -#define MISENSOR_HFLIP_MASK 0x0001 -#define MISENSOR_FLIP_EN 1 -#define MISENSOR_FLIP_DIS 0 - -/* bits set to set sensor read_mode via misensor_rmw_reg */ -#define MISENSOR_SKIPPING_SET 0x0011 -#define MISENSOR_SUMMING_SET 0x0033 -#define MISENSOR_NORMAL_SET 0x0000 - -/* sensor register that control sensor read-mode and mirror */ -#define MISENSOR_READ_MODE 0xC834 -/* sensor ae-track status register */ -#define MISENSOR_AE_TRACK_STATUS 0xA800 -/* sensor awb status register */ -#define MISENSOR_AWB_STATUS 0xAC00 -/* sensor coarse integration time register */ -#define MISENSOR_COARSE_INTEGRATION_TIME 0xC83C - -/* registers */ -#define REG_SW_RESET 0x301A -#define REG_SW_STREAM 0xDC00 -#define REG_SCCB_CTRL 0x3100 -#define REG_SC_CMMN_CHIP_ID 0x0000 -#define REG_V_START 0xc800 /* 16bits */ -#define REG_H_START 0xc802 /* 16bits */ -#define REG_V_END 0xc804 /* 16bits */ -#define REG_H_END 0xc806 /* 16bits */ -#define REG_PIXEL_CLK 0xc808 /* 32bits */ -#define REG_TIMING_VTS 0xc812 /* 16bits */ -#define REG_TIMING_HTS 0xc814 /* 16bits */ -#define REG_WIDTH 0xC868 /* 16bits */ -#define REG_HEIGHT 0xC86A /* 16bits */ -#define REG_EXPO_COARSE 0x3012 /* 16bits */ -#define REG_EXPO_FINE 0x3014 /* 16bits */ -#define REG_GAIN 0x305E -#define REG_ANALOGGAIN 0x305F -#define REG_ADDR_ACESSS 0x098E /* logical_address_access */ -#define REG_COMM_Register 0x0080 /* command_register */ - -#define SENSOR_DETECTED 1 -#define SENSOR_NOT_DETECTED 0 - -#define I2C_RETRY_COUNT 5 -#define MSG_LEN_OFFSET 2 - -#ifndef MIPI_CONTROL -#define MIPI_CONTROL 0x3400 /* MIPI_Control */ -#endif - -/* GPIO pin on Moorestown */ -#define GPIO_SCLK_25 44 -#define GPIO_STB_PIN 47 - -#define GPIO_STDBY_PIN 49 /* ab:new */ -#define GPIO_RESET_PIN 50 - -/* System control register for Aptina A-1040SOC*/ -#define MT9M114_PID 0x0 - -/* MT9P111_DEVICE_ID */ -#define MT9M114_MOD_ID 0x2481 - -#define MT9M114_FINE_INTG_TIME_MIN 0 -#define MT9M114_FINE_INTG_TIME_MAX_MARGIN 0 -#define MT9M114_COARSE_INTG_TIME_MIN 1 -#define MT9M114_COARSE_INTG_TIME_MAX_MARGIN 6 - - -/* ulBPat; */ - -#define MT9M114_BPAT_RGRGGBGB (1 << 0) -#define MT9M114_BPAT_GRGRBGBG (1 << 1) -#define MT9M114_BPAT_GBGBRGRG (1 << 2) -#define MT9M114_BPAT_BGBGGRGR (1 << 3) - -#define MT9M114_FOCAL_LENGTH_NUM 208 /*2.08mm*/ -#define MT9M114_FOCAL_LENGTH_DEM 100 -#define MT9M114_F_NUMBER_DEFAULT_NUM 24 -#define MT9M114_F_NUMBER_DEM 10 -#define MT9M114_WAIT_STAT_TIMEOUT 100 -#define MT9M114_FLICKER_MODE_50HZ 1 -#define MT9M114_FLICKER_MODE_60HZ 2 -/* - * focal length bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define MT9M114_FOCAL_LENGTH_DEFAULT 0xD00064 - -/* - * current f-number bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define MT9M114_F_NUMBER_DEFAULT 0x18000a - -/* - * f-number range bits definition: - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ -#define MT9M114_F_NUMBER_RANGE 0x180a180a - -/* Supported resolutions */ -enum { - MT9M114_RES_736P, - MT9M114_RES_864P, - MT9M114_RES_960P, -}; -#define MT9M114_RES_960P_SIZE_H 1296 -#define MT9M114_RES_960P_SIZE_V 976 -#define MT9M114_RES_720P_SIZE_H 1280 -#define MT9M114_RES_720P_SIZE_V 720 -#define MT9M114_RES_576P_SIZE_H 1024 -#define MT9M114_RES_576P_SIZE_V 576 -#define MT9M114_RES_480P_SIZE_H 768 -#define MT9M114_RES_480P_SIZE_V 480 -#define MT9M114_RES_VGA_SIZE_H 640 -#define MT9M114_RES_VGA_SIZE_V 480 -#define MT9M114_RES_QVGA_SIZE_H 320 -#define MT9M114_RES_QVGA_SIZE_V 240 -#define MT9M114_RES_QCIF_SIZE_H 176 -#define MT9M114_RES_QCIF_SIZE_V 144 - -#define MT9M114_RES_720_480p_768_SIZE_H 736 -#define MT9M114_RES_720_480p_768_SIZE_V 496 -#define MT9M114_RES_736P_SIZE_H 1296 -#define MT9M114_RES_736P_SIZE_V 736 -#define MT9M114_RES_864P_SIZE_H 1296 -#define MT9M114_RES_864P_SIZE_V 864 -#define MT9M114_RES_976P_SIZE_H 1296 -#define MT9M114_RES_976P_SIZE_V 976 - -#define MT9M114_BIN_FACTOR_MAX 3 - -#define MT9M114_DEFAULT_FIRST_EXP 0x10 -#define MT9M114_MAX_FIRST_EXP 0x302 - -/* completion status polling requirements, usage based on Aptina .INI Rev2 */ -enum poll_reg { - NO_POLLING, - PRE_POLLING, - POST_POLLING, -}; -/* - * struct misensor_reg - MI sensor register format - * @length: length of the register - * @reg: 16-bit offset to register - * @val: 8/16/32-bit register value - * Define a structure for sensor register initialization values - */ -struct misensor_reg { - u32 length; - u32 reg; - u32 val; /* value or for read/mod/write, AND mask */ - u32 val2; /* optional; for rmw, OR mask */ -}; - -/* - * struct misensor_fwreg - Firmware burst command - * @type: FW burst or 8/16 bit register - * @addr: 16-bit offset to register or other values depending on type - * @valx: data value for burst (or other commands) - * - * Define a structure for sensor register initialization values - */ -struct misensor_fwreg { - u32 type; /* type of value, register or FW burst string */ - u32 addr; /* target address */ - u32 val0; - u32 val1; - u32 val2; - u32 val3; - u32 val4; - u32 val5; - u32 val6; - u32 val7; -}; - -struct regval_list { - u16 reg_num; - u8 value; -}; - -struct mt9m114_device { - struct v4l2_subdev sd; - struct media_pad pad; - struct v4l2_mbus_framefmt format; - - struct camera_sensor_platform_data *platform_data; - struct mutex input_lock; /* serialize sensor's ioctl */ - struct v4l2_ctrl_handler ctrl_handler; - int real_model_id; - int nctx; - int power; - - unsigned int bus_width; - unsigned int mode; - unsigned int field_inv; - unsigned int field_sel; - unsigned int ycseq; - unsigned int conv422; - unsigned int bpat; - unsigned int hpol; - unsigned int vpol; - unsigned int edge; - unsigned int bls; - unsigned int gamma; - unsigned int cconv; - unsigned int res; - unsigned int dwn_sz; - unsigned int blc; - unsigned int agc; - unsigned int awb; - unsigned int aec; - /* extention SENSOR version 2 */ - unsigned int cie_profile; - - /* extention SENSOR version 3 */ - unsigned int flicker_freq; - - /* extension SENSOR version 4 */ - unsigned int smia_mode; - unsigned int mipi_mode; - - /* Add name here to load shared library */ - unsigned int type; - - /*Number of MIPI lanes*/ - unsigned int mipi_lanes; - /*WA for low light AE*/ - unsigned int first_exp; - unsigned int first_gain; - unsigned int first_diggain; - char name[32]; - - u8 lightfreq; - u8 streamon; -}; - -struct mt9m114_format_struct { - u8 *desc; - u32 pixelformat; - struct regval_list *regs; -}; - -struct mt9m114_res_struct { - u8 *desc; - int res; - int width; - int height; - int fps; - int skip_frames; - bool used; - struct regval_list *regs; - u16 pixels_per_line; - u16 lines_per_frame; - u8 bin_factor_x; - u8 bin_factor_y; - u8 bin_mode; -}; - -/* 2 bytes used for address: 256 bytes total */ -#define MT9M114_MAX_WRITE_BUF_SIZE 254 -struct mt9m114_write_buffer { - u16 addr; - u8 data[MT9M114_MAX_WRITE_BUF_SIZE]; -}; - -struct mt9m114_write_ctrl { - int index; - struct mt9m114_write_buffer buffer; -}; - -/* - * Modes supported by the mt9m114 driver. - * Please, keep them in ascending order. - */ -static struct mt9m114_res_struct mt9m114_res[] = { - { - .desc = "720P", - .res = MT9M114_RES_736P, - .width = 1296, - .height = 736, - .fps = 30, - .used = false, - .regs = NULL, - .skip_frames = 1, - - .pixels_per_line = 0x0640, - .lines_per_frame = 0x0307, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - }, - { - .desc = "848P", - .res = MT9M114_RES_864P, - .width = 1296, - .height = 864, - .fps = 30, - .used = false, - .regs = NULL, - .skip_frames = 1, - - .pixels_per_line = 0x0640, - .lines_per_frame = 0x03E8, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - }, - { - .desc = "960P", - .res = MT9M114_RES_960P, - .width = 1296, - .height = 976, - .fps = 30, - .used = false, - .regs = NULL, - .skip_frames = 1, - - .pixels_per_line = 0x0644, /* consistent with regs arrays */ - .lines_per_frame = 0x03E5, /* consistent with regs arrays */ - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - }, -}; -#define N_RES (ARRAY_SIZE(mt9m114_res)) - -#if 0 /* Currently unused */ -static struct misensor_reg const mt9m114_exitstandby[] = { - {MISENSOR_16BIT, 0x098E, 0xDC00}, - /* exit-standby */ - {MISENSOR_8BIT, 0xDC00, 0x54}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; -#endif - -static struct misensor_reg const mt9m114_exp_win[5][5] = { - { - {MISENSOR_8BIT, 0xA407, 0x64}, - {MISENSOR_8BIT, 0xA408, 0x64}, - {MISENSOR_8BIT, 0xA409, 0x64}, - {MISENSOR_8BIT, 0xA40A, 0x64}, - {MISENSOR_8BIT, 0xA40B, 0x64}, - }, - { - {MISENSOR_8BIT, 0xA40C, 0x64}, - {MISENSOR_8BIT, 0xA40D, 0x64}, - {MISENSOR_8BIT, 0xA40E, 0x64}, - {MISENSOR_8BIT, 0xA40F, 0x64}, - {MISENSOR_8BIT, 0xA410, 0x64}, - }, - { - {MISENSOR_8BIT, 0xA411, 0x64}, - {MISENSOR_8BIT, 0xA412, 0x64}, - {MISENSOR_8BIT, 0xA413, 0x64}, - {MISENSOR_8BIT, 0xA414, 0x64}, - {MISENSOR_8BIT, 0xA415, 0x64}, - }, - { - {MISENSOR_8BIT, 0xA416, 0x64}, - {MISENSOR_8BIT, 0xA417, 0x64}, - {MISENSOR_8BIT, 0xA418, 0x64}, - {MISENSOR_8BIT, 0xA419, 0x64}, - {MISENSOR_8BIT, 0xA41A, 0x64}, - }, - { - {MISENSOR_8BIT, 0xA41B, 0x64}, - {MISENSOR_8BIT, 0xA41C, 0x64}, - {MISENSOR_8BIT, 0xA41D, 0x64}, - {MISENSOR_8BIT, 0xA41E, 0x64}, - {MISENSOR_8BIT, 0xA41F, 0x64}, - }, -}; - -static struct misensor_reg const mt9m114_exp_average[] = { - {MISENSOR_8BIT, 0xA407, 0x00}, - {MISENSOR_8BIT, 0xA408, 0x00}, - {MISENSOR_8BIT, 0xA409, 0x00}, - {MISENSOR_8BIT, 0xA40A, 0x00}, - {MISENSOR_8BIT, 0xA40B, 0x00}, - {MISENSOR_8BIT, 0xA40C, 0x00}, - {MISENSOR_8BIT, 0xA40D, 0x00}, - {MISENSOR_8BIT, 0xA40E, 0x00}, - {MISENSOR_8BIT, 0xA40F, 0x00}, - {MISENSOR_8BIT, 0xA410, 0x00}, - {MISENSOR_8BIT, 0xA411, 0x00}, - {MISENSOR_8BIT, 0xA412, 0x00}, - {MISENSOR_8BIT, 0xA413, 0x00}, - {MISENSOR_8BIT, 0xA414, 0x00}, - {MISENSOR_8BIT, 0xA415, 0x00}, - {MISENSOR_8BIT, 0xA416, 0x00}, - {MISENSOR_8BIT, 0xA417, 0x00}, - {MISENSOR_8BIT, 0xA418, 0x00}, - {MISENSOR_8BIT, 0xA419, 0x00}, - {MISENSOR_8BIT, 0xA41A, 0x00}, - {MISENSOR_8BIT, 0xA41B, 0x00}, - {MISENSOR_8BIT, 0xA41C, 0x00}, - {MISENSOR_8BIT, 0xA41D, 0x00}, - {MISENSOR_8BIT, 0xA41E, 0x00}, - {MISENSOR_8BIT, 0xA41F, 0x00}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -static struct misensor_reg const mt9m114_exp_center[] = { - {MISENSOR_8BIT, 0xA407, 0x19}, - {MISENSOR_8BIT, 0xA408, 0x19}, - {MISENSOR_8BIT, 0xA409, 0x19}, - {MISENSOR_8BIT, 0xA40A, 0x19}, - {MISENSOR_8BIT, 0xA40B, 0x19}, - {MISENSOR_8BIT, 0xA40C, 0x19}, - {MISENSOR_8BIT, 0xA40D, 0x4B}, - {MISENSOR_8BIT, 0xA40E, 0x4B}, - {MISENSOR_8BIT, 0xA40F, 0x4B}, - {MISENSOR_8BIT, 0xA410, 0x19}, - {MISENSOR_8BIT, 0xA411, 0x19}, - {MISENSOR_8BIT, 0xA412, 0x4B}, - {MISENSOR_8BIT, 0xA413, 0x64}, - {MISENSOR_8BIT, 0xA414, 0x4B}, - {MISENSOR_8BIT, 0xA415, 0x19}, - {MISENSOR_8BIT, 0xA416, 0x19}, - {MISENSOR_8BIT, 0xA417, 0x4B}, - {MISENSOR_8BIT, 0xA418, 0x4B}, - {MISENSOR_8BIT, 0xA419, 0x4B}, - {MISENSOR_8BIT, 0xA41A, 0x19}, - {MISENSOR_8BIT, 0xA41B, 0x19}, - {MISENSOR_8BIT, 0xA41C, 0x19}, - {MISENSOR_8BIT, 0xA41D, 0x19}, - {MISENSOR_8BIT, 0xA41E, 0x19}, - {MISENSOR_8BIT, 0xA41F, 0x19}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -#if 0 /* Currently unused */ -static struct misensor_reg const mt9m114_suspend[] = { - {MISENSOR_16BIT, 0x098E, 0xDC00}, - {MISENSOR_8BIT, 0xDC00, 0x40}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -static struct misensor_reg const mt9m114_streaming[] = { - {MISENSOR_16BIT, 0x098E, 0xDC00}, - {MISENSOR_8BIT, 0xDC00, 0x34}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; -#endif - -static struct misensor_reg const mt9m114_standby_reg[] = { - {MISENSOR_16BIT, 0x098E, 0xDC00}, - {MISENSOR_8BIT, 0xDC00, 0x50}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -#if 0 /* Currently unused */ -static struct misensor_reg const mt9m114_wakeup_reg[] = { - {MISENSOR_16BIT, 0x098E, 0xDC00}, - {MISENSOR_8BIT, 0xDC00, 0x54}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; -#endif - -static struct misensor_reg const mt9m114_chgstat_reg[] = { - {MISENSOR_16BIT, 0x098E, 0xDC00}, - {MISENSOR_8BIT, 0xDC00, 0x28}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -/* [1296x976_30fps] - Intel */ -#if 0 -static struct misensor_reg const mt9m114_960P_init[] = { - {MISENSOR_16BIT, 0x098E, 0x1000}, - {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */ - {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */ - {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */ - {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */ - {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */ - {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 971 */ - {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1291 */ - {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 48000000 */ - {MISENSOR_16BIT, 0xC80A, 0x6C00}, - {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */ - /* cam_sensor_cfg_fine_integ_time_min = 219 */ - {MISENSOR_16BIT, 0xC80E, 0x00DB}, - /* cam_sensor_cfg_fine_integ_time_max = 1459 */ - {MISENSOR_16BIT, 0xC810, 0x05B3}, - /* cam_sensor_cfg_frame_length_lines = 1006 */ - {MISENSOR_16BIT, 0xC812, 0x03F6}, - /* cam_sensor_cfg_line_length_pck = 1590 */ - {MISENSOR_16BIT, 0xC814, 0x063E}, - /* cam_sensor_cfg_fine_correction = 96 */ - {MISENSOR_16BIT, 0xC816, 0x0060}, - /* cam_sensor_cfg_cpipe_last_row = 963 */ - {MISENSOR_16BIT, 0xC818, 0x03C3}, - {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */ - {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */ - {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */ - {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */ - {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1280 */ - {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 960 */ - {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */ - {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1280 */ - {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 960 */ - {MISENSOR_TOK_TERM, 0, 0}, -}; -#endif - -/* [1296x976_30fps_768Mbps] */ -static struct misensor_reg const mt9m114_976P_init[] = { - {MISENSOR_16BIT, 0x98E, 0x1000}, - {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */ - {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */ - {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */ - {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */ - {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */ - {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 975 */ - {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */ - {MISENSOR_32BIT, 0xC808, 0x2DC6C00},/* cam_sensor_cfg_pixclk = 480000*/ - {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */ - /* cam_sensor_cfg_fine_integ_time_min = 219 */ - {MISENSOR_16BIT, 0xC80E, 0x00DB}, - /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */ - {MISENSOR_16BIT, 0xC810, 0x05B3}, - /* 0x074C //cam_sensor_cfg_frame_length_lines = 1006 */ - {MISENSOR_16BIT, 0xC812, 0x03E5}, - /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1590 */ - {MISENSOR_16BIT, 0xC814, 0x0644}, - /* cam_sensor_cfg_fine_correction = 96 */ - {MISENSOR_16BIT, 0xC816, 0x0060}, - /* cam_sensor_cfg_cpipe_last_row = 963 */ - {MISENSOR_16BIT, 0xC818, 0x03C3}, - {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */ - {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */ - {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */ - {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */ - {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */ - {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 968 */ - {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */ - {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */ - {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 968 */ - {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */ - {MISENSOR_TOK_TERM, 0, 0} -}; - -/* [1296x864_30fps] */ -static struct misensor_reg const mt9m114_864P_init[] = { - {MISENSOR_16BIT, 0x98E, 0x1000}, - {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */ - {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */ - {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */ - {MISENSOR_16BIT, 0xC800, 0x0038}, /* cam_sensor_cfg_y_addr_start = 56 */ - {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */ - {MISENSOR_16BIT, 0xC804, 0x0397}, /* cam_sensor_cfg_y_addr_end = 919 */ - {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */ - /* cam_sensor_cfg_pixclk = 48000000 */ - {MISENSOR_32BIT, 0xC808, 0x2DC6C00}, - {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */ - /* cam_sensor_cfg_fine_integ_time_min = 219 */ - {MISENSOR_16BIT, 0xC80E, 0x00DB}, - /* cam_sensor_cfg_fine_integ_time_max = 1469 */ - {MISENSOR_16BIT, 0xC810, 0x05BD}, - /* cam_sensor_cfg_frame_length_lines = 1000 */ - {MISENSOR_16BIT, 0xC812, 0x03E8}, - /* cam_sensor_cfg_line_length_pck = 1600 */ - {MISENSOR_16BIT, 0xC814, 0x0640}, - /* cam_sensor_cfg_fine_correction = 96 */ - {MISENSOR_16BIT, 0xC816, 0x0060}, - /* cam_sensor_cfg_cpipe_last_row = 859 */ - {MISENSOR_16BIT, 0xC818, 0x035B}, - {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */ - {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */ - {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */ - {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */ - {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */ - {MISENSOR_16BIT, 0xC85A, 0x0358}, /* cam_crop_window_height = 856 */ - {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */ - {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */ - {MISENSOR_16BIT, 0xC86A, 0x0358}, /* cam_output_height = 856 */ - {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */ - {MISENSOR_TOK_TERM, 0, 0} -}; - -/* [1296x736_30fps] */ -static struct misensor_reg const mt9m114_736P_init[] = { - {MISENSOR_16BIT, 0x98E, 0x1000}, - {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */ - {MISENSOR_16BIT, 0xC980, 0x011F}, /* cam_sysctl_pll_divider_m_n = 287 */ - {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */ - {MISENSOR_16BIT, 0xC800, 0x0078}, /* cam_sensor_cfg_y_addr_start = 120*/ - {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */ - {MISENSOR_16BIT, 0xC804, 0x0357}, /* cam_sensor_cfg_y_addr_end = 855 */ - {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */ - {MISENSOR_32BIT, 0xC808, 0x237A07F}, /* cam_sensor_cfg_pixclk=37199999*/ - {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */ - /* cam_sensor_cfg_fine_integ_time_min = 219 */ - {MISENSOR_16BIT, 0xC80E, 0x00DB}, - /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1469 */ - {MISENSOR_16BIT, 0xC810, 0x05BD}, - /* 0x074C //cam_sensor_cfg_frame_length_lines = 775 */ - {MISENSOR_16BIT, 0xC812, 0x0307}, - /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1600 */ - {MISENSOR_16BIT, 0xC814, 0x0640}, - /* cam_sensor_cfg_fine_correction = 96 */ - {MISENSOR_16BIT, 0xC816, 0x0060}, - /* cam_sensor_cfg_cpipe_last_row = 731 */ - {MISENSOR_16BIT, 0xC818, 0x02DB}, - {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */ - {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */ - {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */ - {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */ - {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */ - {MISENSOR_16BIT, 0xC85A, 0x02D8}, /* cam_crop_window_height = 728 */ - {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */ - {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */ - {MISENSOR_16BIT, 0xC86A, 0x02D8}, /* cam_output_height = 728 */ - {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */ - {MISENSOR_TOK_TERM, 0, 0} -}; - -/* [736x496_30fps_768Mbps] */ -#if 0 /* Currently unused */ -static struct misensor_reg const mt9m114_720_480P_init[] = { - {MISENSOR_16BIT, 0x98E, 0x1000}, - {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */ - {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */ - {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */ - {MISENSOR_16BIT, 0xC800, 0x00F0}, /* cam_sensor_cfg_y_addr_start = 240*/ - {MISENSOR_16BIT, 0xC802, 0x0118}, /* cam_sensor_cfg_x_addr_start = 280*/ - {MISENSOR_16BIT, 0xC804, 0x02DF}, /* cam_sensor_cfg_y_addr_end = 735 */ - {MISENSOR_16BIT, 0xC806, 0x03F7}, /* cam_sensor_cfg_x_addr_end = 1015 */ - /* cam_sensor_cfg_pixclk = 48000000 */ - {MISENSOR_32BIT, 0xC808, 0x2DC6C00}, - {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */ - /* cam_sensor_cfg_fine_integ_time_min = 219 */ - {MISENSOR_16BIT, 0xC80E, 0x00DB}, - /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */ - {MISENSOR_16BIT, 0xC810, 0x05B3}, - /* 0x074C //cam_sensor_cfg_frame_length_lines = 997 */ - {MISENSOR_16BIT, 0xC812, 0x03E5}, - /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1604 */ - {MISENSOR_16BIT, 0xC814, 0x0644}, - /* cam_sensor_cfg_fine_correction = 96 */ - {MISENSOR_16BIT, 0xC816, 0x0060}, - {MISENSOR_16BIT, 0xC818, 0x03C3}, /* cam_sensor_cfg_cpipe_last_row=963*/ - {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */ - {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0*/ - {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */ - {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */ - {MISENSOR_16BIT, 0xC858, 0x02D8}, /* cam_crop_window_width = 728 */ - {MISENSOR_16BIT, 0xC85A, 0x01E8}, /* cam_crop_window_height = 488 */ - {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */ - {MISENSOR_16BIT, 0xC868, 0x02D8}, /* cam_output_width = 728 */ - {MISENSOR_16BIT, 0xC86A, 0x01E8}, /* cam_output_height = 488 */ - {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */ - {MISENSOR_TOK_TERM, 0, 0} -}; -#endif - -static struct misensor_reg const mt9m114_common[] = { - /* reset */ - {MISENSOR_16BIT, 0x301A, 0x0234}, - /* LOAD = Step2-PLL_Timing //PLL and Timing */ - {MISENSOR_16BIT, 0x098E, 0x1000}, /* LOGICAL_ADDRESS_ACCESS */ - {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */ - {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */ - {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */ - {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 216*/ - {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 168*/ - {MISENSOR_16BIT, 0xC804, 0x03CD}, /* cam_sensor_cfg_y_addr_end = 761 */ - {MISENSOR_16BIT, 0xC806, 0x050D}, /* cam_sensor_cfg_x_addr_end = 1127 */ - {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 24000000 */ - {MISENSOR_16BIT, 0xC80A, 0x6C00}, - {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */ - /* cam_sensor_cfg_fine_integ_time_min = 219 */ - {MISENSOR_16BIT, 0xC80E, 0x01C3}, - /* cam_sensor_cfg_fine_integ_time_max = 1149 */ - {MISENSOR_16BIT, 0xC810, 0x03F7}, - /* cam_sensor_cfg_frame_length_lines = 625 */ - {MISENSOR_16BIT, 0xC812, 0x0500}, - /* cam_sensor_cfg_line_length_pck = 1280 */ - {MISENSOR_16BIT, 0xC814, 0x04E2}, - /* cam_sensor_cfg_fine_correction = 96 */ - {MISENSOR_16BIT, 0xC816, 0x00E0}, - /* cam_sensor_cfg_cpipe_last_row = 541 */ - {MISENSOR_16BIT, 0xC818, 0x01E3}, - {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */ - {MISENSOR_16BIT, 0xC834, 0x0330}, /* cam_sensor_control_read_mode = 0 */ - {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */ - {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */ - {MISENSOR_16BIT, 0xC858, 0x0280}, /* cam_crop_window_width = 952 */ - {MISENSOR_16BIT, 0xC85A, 0x01E0}, /* cam_crop_window_height = 538 */ - {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */ - {MISENSOR_16BIT, 0xC868, 0x0280}, /* cam_output_width = 952 */ - {MISENSOR_16BIT, 0xC86A, 0x01E0}, /* cam_output_height = 538 */ - /* LOAD = Step3-Recommended - * Patch,Errata and Sensor optimization Setting */ - {MISENSOR_16BIT, 0x316A, 0x8270}, /* DAC_TXLO_ROW */ - {MISENSOR_16BIT, 0x316C, 0x8270}, /* DAC_TXLO */ - {MISENSOR_16BIT, 0x3ED0, 0x2305}, /* DAC_LD_4_5 */ - {MISENSOR_16BIT, 0x3ED2, 0x77CF}, /* DAC_LD_6_7 */ - {MISENSOR_16BIT, 0x316E, 0x8202}, /* DAC_ECL */ - {MISENSOR_16BIT, 0x3180, 0x87FF}, /* DELTA_DK_CONTROL */ - {MISENSOR_16BIT, 0x30D4, 0x6080}, /* COLUMN_CORRECTION */ - {MISENSOR_16BIT, 0xA802, 0x0008}, /* AE_TRACK_MODE */ - {MISENSOR_16BIT, 0x3E14, 0xFF39}, /* SAMP_COL_PUP2 */ - {MISENSOR_16BIT, 0x31E0, 0x0003}, /* PIX_DEF_ID */ - /* LOAD = Step8-Features //Ports, special features, etc. */ - {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */ - {MISENSOR_16BIT, 0x001E, 0x0777}, /* PAD_SLEW */ - {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */ - {MISENSOR_16BIT, 0xC984, 0x8001}, /* CAM_PORT_OUTPUT_CONTROL */ - {MISENSOR_16BIT, 0xC988, 0x0F00}, /* CAM_PORT_MIPI_TIMING_T_HS_ZERO */ - /* CAM_PORT_MIPI_TIMING_T_HS_EXIT_HS_TRAIL */ - {MISENSOR_16BIT, 0xC98A, 0x0B07}, - /* CAM_PORT_MIPI_TIMING_T_CLK_POST_CLK_PRE */ - {MISENSOR_16BIT, 0xC98C, 0x0D01}, - /* CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_CLK_ZERO */ - {MISENSOR_16BIT, 0xC98E, 0x071D}, - {MISENSOR_16BIT, 0xC990, 0x0006}, /* CAM_PORT_MIPI_TIMING_T_LPX */ - {MISENSOR_16BIT, 0xC992, 0x0A0C}, /* CAM_PORT_MIPI_TIMING_INIT_TIMING */ - {MISENSOR_16BIT, 0x3C5A, 0x0009}, /* MIPI_DELAY_TRIM */ - {MISENSOR_16BIT, 0xC86C, 0x0210}, /* CAM_OUTPUT_FORMAT */ - {MISENSOR_16BIT, 0xA804, 0x0000}, /* AE_TRACK_ALGO */ - /* default exposure */ - {MISENSOR_16BIT, 0x3012, 0x0110}, /* COMMAND_REGISTER */ - {MISENSOR_TOK_TERM, 0, 0}, - -}; -#if 0 /* Currently unused */ -static struct misensor_reg const mt9m114_antiflicker_50hz[] = { - {MISENSOR_16BIT, 0x098E, 0xC88B}, - {MISENSOR_8BIT, 0xC88B, 0x32}, - {MISENSOR_8BIT, 0xDC00, 0x28}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -static struct misensor_reg const mt9m114_antiflicker_60hz[] = { - {MISENSOR_16BIT, 0x098E, 0xC88B}, - {MISENSOR_8BIT, 0xC88B, 0x3C}, - {MISENSOR_8BIT, 0xDC00, 0x28}, - {MISENSOR_16BIT, 0x0080, 0x8002}, - {MISENSOR_TOK_TERM, 0, 0} -}; - -static struct misensor_reg const mt9m114_iq[] = { - /* [Step3-Recommended] [Sensor optimization] */ - {MISENSOR_16BIT, 0x316A, 0x8270}, - {MISENSOR_16BIT, 0x316C, 0x8270}, - {MISENSOR_16BIT, 0x3ED0, 0x2305}, - {MISENSOR_16BIT, 0x3ED2, 0x77CF}, - {MISENSOR_16BIT, 0x316E, 0x8202}, - {MISENSOR_16BIT, 0x3180, 0x87FF}, - {MISENSOR_16BIT, 0x30D4, 0x6080}, - {MISENSOR_16BIT, 0xA802, 0x0008}, - - /* This register is from vender to avoid low light color noise */ - {MISENSOR_16BIT, 0x31E0, 0x0001}, - - /* LOAD=Errata item 1 */ - {MISENSOR_16BIT, 0x3E14, 0xFF39}, - - /* LOAD=Errata item 2 */ - {MISENSOR_16BIT, 0x301A, 0x8234}, - - /* - * LOAD=Errata item 3 - * LOAD=Patch 0202; - * Feature Recommended; Black level correction fix - */ - {MISENSOR_16BIT, 0x0982, 0x0001}, - {MISENSOR_16BIT, 0x098A, 0x5000}, - {MISENSOR_16BIT, 0xD000, 0x70CF}, - {MISENSOR_16BIT, 0xD002, 0xFFFF}, - {MISENSOR_16BIT, 0xD004, 0xC5D4}, - {MISENSOR_16BIT, 0xD006, 0x903A}, - {MISENSOR_16BIT, 0xD008, 0x2144}, - {MISENSOR_16BIT, 0xD00A, 0x0C00}, - {MISENSOR_16BIT, 0xD00C, 0x2186}, - {MISENSOR_16BIT, 0xD00E, 0x0FF3}, - {MISENSOR_16BIT, 0xD010, 0xB844}, - {MISENSOR_16BIT, 0xD012, 0xB948}, - {MISENSOR_16BIT, 0xD014, 0xE082}, - {MISENSOR_16BIT, 0xD016, 0x20CC}, - {MISENSOR_16BIT, 0xD018, 0x80E2}, - {MISENSOR_16BIT, 0xD01A, 0x21CC}, - {MISENSOR_16BIT, 0xD01C, 0x80A2}, - {MISENSOR_16BIT, 0xD01E, 0x21CC}, - {MISENSOR_16BIT, 0xD020, 0x80E2}, - {MISENSOR_16BIT, 0xD022, 0xF404}, - {MISENSOR_16BIT, 0xD024, 0xD801}, - {MISENSOR_16BIT, 0xD026, 0xF003}, - {MISENSOR_16BIT, 0xD028, 0xD800}, - {MISENSOR_16BIT, 0xD02A, 0x7EE0}, - {MISENSOR_16BIT, 0xD02C, 0xC0F1}, - {MISENSOR_16BIT, 0xD02E, 0x08BA}, - - {MISENSOR_16BIT, 0xD030, 0x0600}, - {MISENSOR_16BIT, 0xD032, 0xC1A1}, - {MISENSOR_16BIT, 0xD034, 0x76CF}, - {MISENSOR_16BIT, 0xD036, 0xFFFF}, - {MISENSOR_16BIT, 0xD038, 0xC130}, - {MISENSOR_16BIT, 0xD03A, 0x6E04}, - {MISENSOR_16BIT, 0xD03C, 0xC040}, - {MISENSOR_16BIT, 0xD03E, 0x71CF}, - {MISENSOR_16BIT, 0xD040, 0xFFFF}, - {MISENSOR_16BIT, 0xD042, 0xC790}, - {MISENSOR_16BIT, 0xD044, 0x8103}, - {MISENSOR_16BIT, 0xD046, 0x77CF}, - {MISENSOR_16BIT, 0xD048, 0xFFFF}, - {MISENSOR_16BIT, 0xD04A, 0xC7C0}, - {MISENSOR_16BIT, 0xD04C, 0xE001}, - {MISENSOR_16BIT, 0xD04E, 0xA103}, - {MISENSOR_16BIT, 0xD050, 0xD800}, - {MISENSOR_16BIT, 0xD052, 0x0C6A}, - {MISENSOR_16BIT, 0xD054, 0x04E0}, - {MISENSOR_16BIT, 0xD056, 0xB89E}, - {MISENSOR_16BIT, 0xD058, 0x7508}, - {MISENSOR_16BIT, 0xD05A, 0x8E1C}, - {MISENSOR_16BIT, 0xD05C, 0x0809}, - {MISENSOR_16BIT, 0xD05E, 0x0191}, - - {MISENSOR_16BIT, 0xD060, 0xD801}, - {MISENSOR_16BIT, 0xD062, 0xAE1D}, - {MISENSOR_16BIT, 0xD064, 0xE580}, - {MISENSOR_16BIT, 0xD066, 0x20CA}, - {MISENSOR_16BIT, 0xD068, 0x0022}, - {MISENSOR_16BIT, 0xD06A, 0x20CF}, - {MISENSOR_16BIT, 0xD06C, 0x0522}, - {MISENSOR_16BIT, 0xD06E, 0x0C5C}, - {MISENSOR_16BIT, 0xD070, 0x04E2}, - {MISENSOR_16BIT, 0xD072, 0x21CA}, - {MISENSOR_16BIT, 0xD074, 0x0062}, - {MISENSOR_16BIT, 0xD076, 0xE580}, - {MISENSOR_16BIT, 0xD078, 0xD901}, - {MISENSOR_16BIT, 0xD07A, 0x79C0}, - {MISENSOR_16BIT, 0xD07C, 0xD800}, - {MISENSOR_16BIT, 0xD07E, 0x0BE6}, - {MISENSOR_16BIT, 0xD080, 0x04E0}, - {MISENSOR_16BIT, 0xD082, 0xB89E}, - {MISENSOR_16BIT, 0xD084, 0x70CF}, - {MISENSOR_16BIT, 0xD086, 0xFFFF}, - {MISENSOR_16BIT, 0xD088, 0xC8D4}, - {MISENSOR_16BIT, 0xD08A, 0x9002}, - {MISENSOR_16BIT, 0xD08C, 0x0857}, - {MISENSOR_16BIT, 0xD08E, 0x025E}, - - {MISENSOR_16BIT, 0xD090, 0xFFDC}, - {MISENSOR_16BIT, 0xD092, 0xE080}, - {MISENSOR_16BIT, 0xD094, 0x25CC}, - {MISENSOR_16BIT, 0xD096, 0x9022}, - {MISENSOR_16BIT, 0xD098, 0xF225}, - {MISENSOR_16BIT, 0xD09A, 0x1700}, - {MISENSOR_16BIT, 0xD09C, 0x108A}, - {MISENSOR_16BIT, 0xD09E, 0x73CF}, - {MISENSOR_16BIT, 0xD0A0, 0xFF00}, - {MISENSOR_16BIT, 0xD0A2, 0x3174}, - {MISENSOR_16BIT, 0xD0A4, 0x9307}, - {MISENSOR_16BIT, 0xD0A6, 0x2A04}, - {MISENSOR_16BIT, 0xD0A8, 0x103E}, - {MISENSOR_16BIT, 0xD0AA, 0x9328}, - {MISENSOR_16BIT, 0xD0AC, 0x2942}, - {MISENSOR_16BIT, 0xD0AE, 0x7140}, - {MISENSOR_16BIT, 0xD0B0, 0x2A04}, - {MISENSOR_16BIT, 0xD0B2, 0x107E}, - {MISENSOR_16BIT, 0xD0B4, 0x9349}, - {MISENSOR_16BIT, 0xD0B6, 0x2942}, - {MISENSOR_16BIT, 0xD0B8, 0x7141}, - {MISENSOR_16BIT, 0xD0BA, 0x2A04}, - {MISENSOR_16BIT, 0xD0BC, 0x10BE}, - {MISENSOR_16BIT, 0xD0BE, 0x934A}, - - {MISENSOR_16BIT, 0xD0C0, 0x2942}, - {MISENSOR_16BIT, 0xD0C2, 0x714B}, - {MISENSOR_16BIT, 0xD0C4, 0x2A04}, - {MISENSOR_16BIT, 0xD0C6, 0x10BE}, - {MISENSOR_16BIT, 0xD0C8, 0x130C}, - {MISENSOR_16BIT, 0xD0CA, 0x010A}, - {MISENSOR_16BIT, 0xD0CC, 0x2942}, - {MISENSOR_16BIT, 0xD0CE, 0x7142}, - {MISENSOR_16BIT, 0xD0D0, 0x2250}, - {MISENSOR_16BIT, 0xD0D2, 0x13CA}, - {MISENSOR_16BIT, 0xD0D4, 0x1B0C}, - {MISENSOR_16BIT, 0xD0D6, 0x0284}, - {MISENSOR_16BIT, 0xD0D8, 0xB307}, - {MISENSOR_16BIT, 0xD0DA, 0xB328}, - {MISENSOR_16BIT, 0xD0DC, 0x1B12}, - {MISENSOR_16BIT, 0xD0DE, 0x02C4}, - {MISENSOR_16BIT, 0xD0E0, 0xB34A}, - {MISENSOR_16BIT, 0xD0E2, 0xED88}, - {MISENSOR_16BIT, 0xD0E4, 0x71CF}, - {MISENSOR_16BIT, 0xD0E6, 0xFF00}, - {MISENSOR_16BIT, 0xD0E8, 0x3174}, - {MISENSOR_16BIT, 0xD0EA, 0x9106}, - {MISENSOR_16BIT, 0xD0EC, 0xB88F}, - {MISENSOR_16BIT, 0xD0EE, 0xB106}, - - {MISENSOR_16BIT, 0xD0F0, 0x210A}, - {MISENSOR_16BIT, 0xD0F2, 0x8340}, - {MISENSOR_16BIT, 0xD0F4, 0xC000}, - {MISENSOR_16BIT, 0xD0F6, 0x21CA}, - {MISENSOR_16BIT, 0xD0F8, 0x0062}, - {MISENSOR_16BIT, 0xD0FA, 0x20F0}, - {MISENSOR_16BIT, 0xD0FC, 0x0040}, - {MISENSOR_16BIT, 0xD0FE, 0x0B02}, - {MISENSOR_16BIT, 0xD100, 0x0320}, - {MISENSOR_16BIT, 0xD102, 0xD901}, - {MISENSOR_16BIT, 0xD104, 0x07F1}, - {MISENSOR_16BIT, 0xD106, 0x05E0}, - {MISENSOR_16BIT, 0xD108, 0xC0A1}, - {MISENSOR_16BIT, 0xD10A, 0x78E0}, - {MISENSOR_16BIT, 0xD10C, 0xC0F1}, - {MISENSOR_16BIT, 0xD10E, 0x71CF}, - {MISENSOR_16BIT, 0xD110, 0xFFFF}, - {MISENSOR_16BIT, 0xD112, 0xC7C0}, - {MISENSOR_16BIT, 0xD114, 0xD840}, - {MISENSOR_16BIT, 0xD116, 0xA900}, - {MISENSOR_16BIT, 0xD118, 0x71CF}, - {MISENSOR_16BIT, 0xD11A, 0xFFFF}, - {MISENSOR_16BIT, 0xD11C, 0xD02C}, - {MISENSOR_16BIT, 0xD11E, 0xD81E}, - - {MISENSOR_16BIT, 0xD120, 0x0A5A}, - {MISENSOR_16BIT, 0xD122, 0x04E0}, - {MISENSOR_16BIT, 0xD124, 0xDA00}, - {MISENSOR_16BIT, 0xD126, 0xD800}, - {MISENSOR_16BIT, 0xD128, 0xC0D1}, - {MISENSOR_16BIT, 0xD12A, 0x7EE0}, - - {MISENSOR_16BIT, 0x098E, 0x0000}, - {MISENSOR_16BIT, 0xE000, 0x010C}, - {MISENSOR_16BIT, 0xE002, 0x0202}, - {MISENSOR_16BIT, 0xE004, 0x4103}, - {MISENSOR_16BIT, 0xE006, 0x0202}, - {MISENSOR_16BIT, 0x0080, 0xFFF0}, - {MISENSOR_16BIT, 0x0080, 0xFFF1}, - - /* LOAD=Patch 0302; Feature Recommended; Adaptive Sensitivity */ - {MISENSOR_16BIT, 0x0982, 0x0001}, - {MISENSOR_16BIT, 0x098A, 0x512C}, - {MISENSOR_16BIT, 0xD12C, 0x70CF}, - {MISENSOR_16BIT, 0xD12E, 0xFFFF}, - {MISENSOR_16BIT, 0xD130, 0xC5D4}, - {MISENSOR_16BIT, 0xD132, 0x903A}, - {MISENSOR_16BIT, 0xD134, 0x2144}, - {MISENSOR_16BIT, 0xD136, 0x0C00}, - {MISENSOR_16BIT, 0xD138, 0x2186}, - {MISENSOR_16BIT, 0xD13A, 0x0FF3}, - {MISENSOR_16BIT, 0xD13C, 0xB844}, - {MISENSOR_16BIT, 0xD13E, 0x262F}, - {MISENSOR_16BIT, 0xD140, 0xF008}, - {MISENSOR_16BIT, 0xD142, 0xB948}, - {MISENSOR_16BIT, 0xD144, 0x21CC}, - {MISENSOR_16BIT, 0xD146, 0x8021}, - {MISENSOR_16BIT, 0xD148, 0xD801}, - {MISENSOR_16BIT, 0xD14A, 0xF203}, - {MISENSOR_16BIT, 0xD14C, 0xD800}, - {MISENSOR_16BIT, 0xD14E, 0x7EE0}, - {MISENSOR_16BIT, 0xD150, 0xC0F1}, - {MISENSOR_16BIT, 0xD152, 0x71CF}, - {MISENSOR_16BIT, 0xD154, 0xFFFF}, - {MISENSOR_16BIT, 0xD156, 0xC610}, - {MISENSOR_16BIT, 0xD158, 0x910E}, - {MISENSOR_16BIT, 0xD15A, 0x208C}, - {MISENSOR_16BIT, 0xD15C, 0x8014}, - {MISENSOR_16BIT, 0xD15E, 0xF418}, - {MISENSOR_16BIT, 0xD160, 0x910F}, - {MISENSOR_16BIT, 0xD162, 0x208C}, - {MISENSOR_16BIT, 0xD164, 0x800F}, - {MISENSOR_16BIT, 0xD166, 0xF414}, - {MISENSOR_16BIT, 0xD168, 0x9116}, - {MISENSOR_16BIT, 0xD16A, 0x208C}, - {MISENSOR_16BIT, 0xD16C, 0x800A}, - {MISENSOR_16BIT, 0xD16E, 0xF410}, - {MISENSOR_16BIT, 0xD170, 0x9117}, - {MISENSOR_16BIT, 0xD172, 0x208C}, - {MISENSOR_16BIT, 0xD174, 0x8807}, - {MISENSOR_16BIT, 0xD176, 0xF40C}, - {MISENSOR_16BIT, 0xD178, 0x9118}, - {MISENSOR_16BIT, 0xD17A, 0x2086}, - {MISENSOR_16BIT, 0xD17C, 0x0FF3}, - {MISENSOR_16BIT, 0xD17E, 0xB848}, - {MISENSOR_16BIT, 0xD180, 0x080D}, - {MISENSOR_16BIT, 0xD182, 0x0090}, - {MISENSOR_16BIT, 0xD184, 0xFFEA}, - {MISENSOR_16BIT, 0xD186, 0xE081}, - {MISENSOR_16BIT, 0xD188, 0xD801}, - {MISENSOR_16BIT, 0xD18A, 0xF203}, - {MISENSOR_16BIT, 0xD18C, 0xD800}, - {MISENSOR_16BIT, 0xD18E, 0xC0D1}, - {MISENSOR_16BIT, 0xD190, 0x7EE0}, - {MISENSOR_16BIT, 0xD192, 0x78E0}, - {MISENSOR_16BIT, 0xD194, 0xC0F1}, - {MISENSOR_16BIT, 0xD196, 0x71CF}, - {MISENSOR_16BIT, 0xD198, 0xFFFF}, - {MISENSOR_16BIT, 0xD19A, 0xC610}, - {MISENSOR_16BIT, 0xD19C, 0x910E}, - {MISENSOR_16BIT, 0xD19E, 0x208C}, - {MISENSOR_16BIT, 0xD1A0, 0x800A}, - {MISENSOR_16BIT, 0xD1A2, 0xF418}, - {MISENSOR_16BIT, 0xD1A4, 0x910F}, - {MISENSOR_16BIT, 0xD1A6, 0x208C}, - {MISENSOR_16BIT, 0xD1A8, 0x8807}, - {MISENSOR_16BIT, 0xD1AA, 0xF414}, - {MISENSOR_16BIT, 0xD1AC, 0x9116}, - {MISENSOR_16BIT, 0xD1AE, 0x208C}, - {MISENSOR_16BIT, 0xD1B0, 0x800A}, - {MISENSOR_16BIT, 0xD1B2, 0xF410}, - {MISENSOR_16BIT, 0xD1B4, 0x9117}, - {MISENSOR_16BIT, 0xD1B6, 0x208C}, - {MISENSOR_16BIT, 0xD1B8, 0x8807}, - {MISENSOR_16BIT, 0xD1BA, 0xF40C}, - {MISENSOR_16BIT, 0xD1BC, 0x9118}, - {MISENSOR_16BIT, 0xD1BE, 0x2086}, - {MISENSOR_16BIT, 0xD1C0, 0x0FF3}, - {MISENSOR_16BIT, 0xD1C2, 0xB848}, - {MISENSOR_16BIT, 0xD1C4, 0x080D}, - {MISENSOR_16BIT, 0xD1C6, 0x0090}, - {MISENSOR_16BIT, 0xD1C8, 0xFFD9}, - {MISENSOR_16BIT, 0xD1CA, 0xE080}, - {MISENSOR_16BIT, 0xD1CC, 0xD801}, - {MISENSOR_16BIT, 0xD1CE, 0xF203}, - {MISENSOR_16BIT, 0xD1D0, 0xD800}, - {MISENSOR_16BIT, 0xD1D2, 0xF1DF}, - {MISENSOR_16BIT, 0xD1D4, 0x9040}, - {MISENSOR_16BIT, 0xD1D6, 0x71CF}, - {MISENSOR_16BIT, 0xD1D8, 0xFFFF}, - {MISENSOR_16BIT, 0xD1DA, 0xC5D4}, - {MISENSOR_16BIT, 0xD1DC, 0xB15A}, - {MISENSOR_16BIT, 0xD1DE, 0x9041}, - {MISENSOR_16BIT, 0xD1E0, 0x73CF}, - {MISENSOR_16BIT, 0xD1E2, 0xFFFF}, - {MISENSOR_16BIT, 0xD1E4, 0xC7D0}, - {MISENSOR_16BIT, 0xD1E6, 0xB140}, - {MISENSOR_16BIT, 0xD1E8, 0x9042}, - {MISENSOR_16BIT, 0xD1EA, 0xB141}, - {MISENSOR_16BIT, 0xD1EC, 0x9043}, - {MISENSOR_16BIT, 0xD1EE, 0xB142}, - {MISENSOR_16BIT, 0xD1F0, 0x9044}, - {MISENSOR_16BIT, 0xD1F2, 0xB143}, - {MISENSOR_16BIT, 0xD1F4, 0x9045}, - {MISENSOR_16BIT, 0xD1F6, 0xB147}, - {MISENSOR_16BIT, 0xD1F8, 0x9046}, - {MISENSOR_16BIT, 0xD1FA, 0xB148}, - {MISENSOR_16BIT, 0xD1FC, 0x9047}, - {MISENSOR_16BIT, 0xD1FE, 0xB14B}, - {MISENSOR_16BIT, 0xD200, 0x9048}, - {MISENSOR_16BIT, 0xD202, 0xB14C}, - {MISENSOR_16BIT, 0xD204, 0x9049}, - {MISENSOR_16BIT, 0xD206, 0x1958}, - {MISENSOR_16BIT, 0xD208, 0x0084}, - {MISENSOR_16BIT, 0xD20A, 0x904A}, - {MISENSOR_16BIT, 0xD20C, 0x195A}, - {MISENSOR_16BIT, 0xD20E, 0x0084}, - {MISENSOR_16BIT, 0xD210, 0x8856}, - {MISENSOR_16BIT, 0xD212, 0x1B36}, - {MISENSOR_16BIT, 0xD214, 0x8082}, - {MISENSOR_16BIT, 0xD216, 0x8857}, - {MISENSOR_16BIT, 0xD218, 0x1B37}, - {MISENSOR_16BIT, 0xD21A, 0x8082}, - {MISENSOR_16BIT, 0xD21C, 0x904C}, - {MISENSOR_16BIT, 0xD21E, 0x19A7}, - {MISENSOR_16BIT, 0xD220, 0x009C}, - {MISENSOR_16BIT, 0xD222, 0x881A}, - {MISENSOR_16BIT, 0xD224, 0x7FE0}, - {MISENSOR_16BIT, 0xD226, 0x1B54}, - {MISENSOR_16BIT, 0xD228, 0x8002}, - {MISENSOR_16BIT, 0xD22A, 0x78E0}, - {MISENSOR_16BIT, 0xD22C, 0x71CF}, - {MISENSOR_16BIT, 0xD22E, 0xFFFF}, - {MISENSOR_16BIT, 0xD230, 0xC350}, - {MISENSOR_16BIT, 0xD232, 0xD828}, - {MISENSOR_16BIT, 0xD234, 0xA90B}, - {MISENSOR_16BIT, 0xD236, 0x8100}, - {MISENSOR_16BIT, 0xD238, 0x01C5}, - {MISENSOR_16BIT, 0xD23A, 0x0320}, - {MISENSOR_16BIT, 0xD23C, 0xD900}, - {MISENSOR_16BIT, 0xD23E, 0x78E0}, - {MISENSOR_16BIT, 0xD240, 0x220A}, - {MISENSOR_16BIT, 0xD242, 0x1F80}, - {MISENSOR_16BIT, 0xD244, 0xFFFF}, - {MISENSOR_16BIT, 0xD246, 0xD4E0}, - {MISENSOR_16BIT, 0xD248, 0xC0F1}, - {MISENSOR_16BIT, 0xD24A, 0x0811}, - {MISENSOR_16BIT, 0xD24C, 0x0051}, - {MISENSOR_16BIT, 0xD24E, 0x2240}, - {MISENSOR_16BIT, 0xD250, 0x1200}, - {MISENSOR_16BIT, 0xD252, 0xFFE1}, - {MISENSOR_16BIT, 0xD254, 0xD801}, - {MISENSOR_16BIT, 0xD256, 0xF006}, - {MISENSOR_16BIT, 0xD258, 0x2240}, - {MISENSOR_16BIT, 0xD25A, 0x1900}, - {MISENSOR_16BIT, 0xD25C, 0xFFDE}, - {MISENSOR_16BIT, 0xD25E, 0xD802}, - {MISENSOR_16BIT, 0xD260, 0x1A05}, - {MISENSOR_16BIT, 0xD262, 0x1002}, - {MISENSOR_16BIT, 0xD264, 0xFFF2}, - {MISENSOR_16BIT, 0xD266, 0xF195}, - {MISENSOR_16BIT, 0xD268, 0xC0F1}, - {MISENSOR_16BIT, 0xD26A, 0x0E7E}, - {MISENSOR_16BIT, 0xD26C, 0x05C0}, - {MISENSOR_16BIT, 0xD26E, 0x75CF}, - {MISENSOR_16BIT, 0xD270, 0xFFFF}, - {MISENSOR_16BIT, 0xD272, 0xC84C}, - {MISENSOR_16BIT, 0xD274, 0x9502}, - {MISENSOR_16BIT, 0xD276, 0x77CF}, - {MISENSOR_16BIT, 0xD278, 0xFFFF}, - {MISENSOR_16BIT, 0xD27A, 0xC344}, - {MISENSOR_16BIT, 0xD27C, 0x2044}, - {MISENSOR_16BIT, 0xD27E, 0x008E}, - {MISENSOR_16BIT, 0xD280, 0xB8A1}, - {MISENSOR_16BIT, 0xD282, 0x0926}, - {MISENSOR_16BIT, 0xD284, 0x03E0}, - {MISENSOR_16BIT, 0xD286, 0xB502}, - {MISENSOR_16BIT, 0xD288, 0x9502}, - {MISENSOR_16BIT, 0xD28A, 0x952E}, - {MISENSOR_16BIT, 0xD28C, 0x7E05}, - {MISENSOR_16BIT, 0xD28E, 0xB5C2}, - {MISENSOR_16BIT, 0xD290, 0x70CF}, - {MISENSOR_16BIT, 0xD292, 0xFFFF}, - {MISENSOR_16BIT, 0xD294, 0xC610}, - {MISENSOR_16BIT, 0xD296, 0x099A}, - {MISENSOR_16BIT, 0xD298, 0x04A0}, - {MISENSOR_16BIT, 0xD29A, 0xB026}, - {MISENSOR_16BIT, 0xD29C, 0x0E02}, - {MISENSOR_16BIT, 0xD29E, 0x0560}, - {MISENSOR_16BIT, 0xD2A0, 0xDE00}, - {MISENSOR_16BIT, 0xD2A2, 0x0A12}, - {MISENSOR_16BIT, 0xD2A4, 0x0320}, - {MISENSOR_16BIT, 0xD2A6, 0xB7C4}, - {MISENSOR_16BIT, 0xD2A8, 0x0B36}, - {MISENSOR_16BIT, 0xD2AA, 0x03A0}, - {MISENSOR_16BIT, 0xD2AC, 0x70C9}, - {MISENSOR_16BIT, 0xD2AE, 0x9502}, - {MISENSOR_16BIT, 0xD2B0, 0x7608}, - {MISENSOR_16BIT, 0xD2B2, 0xB8A8}, - {MISENSOR_16BIT, 0xD2B4, 0xB502}, - {MISENSOR_16BIT, 0xD2B6, 0x70CF}, - {MISENSOR_16BIT, 0xD2B8, 0x0000}, - {MISENSOR_16BIT, 0xD2BA, 0x5536}, - {MISENSOR_16BIT, 0xD2BC, 0x7860}, - {MISENSOR_16BIT, 0xD2BE, 0x2686}, - {MISENSOR_16BIT, 0xD2C0, 0x1FFB}, - {MISENSOR_16BIT, 0xD2C2, 0x9502}, - {MISENSOR_16BIT, 0xD2C4, 0x78C5}, - {MISENSOR_16BIT, 0xD2C6, 0x0631}, - {MISENSOR_16BIT, 0xD2C8, 0x05E0}, - {MISENSOR_16BIT, 0xD2CA, 0xB502}, - {MISENSOR_16BIT, 0xD2CC, 0x72CF}, - {MISENSOR_16BIT, 0xD2CE, 0xFFFF}, - {MISENSOR_16BIT, 0xD2D0, 0xC5D4}, - {MISENSOR_16BIT, 0xD2D2, 0x923A}, - {MISENSOR_16BIT, 0xD2D4, 0x73CF}, - {MISENSOR_16BIT, 0xD2D6, 0xFFFF}, - {MISENSOR_16BIT, 0xD2D8, 0xC7D0}, - {MISENSOR_16BIT, 0xD2DA, 0xB020}, - {MISENSOR_16BIT, 0xD2DC, 0x9220}, - {MISENSOR_16BIT, 0xD2DE, 0xB021}, - {MISENSOR_16BIT, 0xD2E0, 0x9221}, - {MISENSOR_16BIT, 0xD2E2, 0xB022}, - {MISENSOR_16BIT, 0xD2E4, 0x9222}, - {MISENSOR_16BIT, 0xD2E6, 0xB023}, - {MISENSOR_16BIT, 0xD2E8, 0x9223}, - {MISENSOR_16BIT, 0xD2EA, 0xB024}, - {MISENSOR_16BIT, 0xD2EC, 0x9227}, - {MISENSOR_16BIT, 0xD2EE, 0xB025}, - {MISENSOR_16BIT, 0xD2F0, 0x9228}, - {MISENSOR_16BIT, 0xD2F2, 0xB026}, - {MISENSOR_16BIT, 0xD2F4, 0x922B}, - {MISENSOR_16BIT, 0xD2F6, 0xB027}, - {MISENSOR_16BIT, 0xD2F8, 0x922C}, - {MISENSOR_16BIT, 0xD2FA, 0xB028}, - {MISENSOR_16BIT, 0xD2FC, 0x1258}, - {MISENSOR_16BIT, 0xD2FE, 0x0101}, - {MISENSOR_16BIT, 0xD300, 0xB029}, - {MISENSOR_16BIT, 0xD302, 0x125A}, - {MISENSOR_16BIT, 0xD304, 0x0101}, - {MISENSOR_16BIT, 0xD306, 0xB02A}, - {MISENSOR_16BIT, 0xD308, 0x1336}, - {MISENSOR_16BIT, 0xD30A, 0x8081}, - {MISENSOR_16BIT, 0xD30C, 0xA836}, - {MISENSOR_16BIT, 0xD30E, 0x1337}, - {MISENSOR_16BIT, 0xD310, 0x8081}, - {MISENSOR_16BIT, 0xD312, 0xA837}, - {MISENSOR_16BIT, 0xD314, 0x12A7}, - {MISENSOR_16BIT, 0xD316, 0x0701}, - {MISENSOR_16BIT, 0xD318, 0xB02C}, - {MISENSOR_16BIT, 0xD31A, 0x1354}, - {MISENSOR_16BIT, 0xD31C, 0x8081}, - {MISENSOR_16BIT, 0xD31E, 0x7FE0}, - {MISENSOR_16BIT, 0xD320, 0xA83A}, - {MISENSOR_16BIT, 0xD322, 0x78E0}, - {MISENSOR_16BIT, 0xD324, 0xC0F1}, - {MISENSOR_16BIT, 0xD326, 0x0DC2}, - {MISENSOR_16BIT, 0xD328, 0x05C0}, - {MISENSOR_16BIT, 0xD32A, 0x7608}, - {MISENSOR_16BIT, 0xD32C, 0x09BB}, - {MISENSOR_16BIT, 0xD32E, 0x0010}, - {MISENSOR_16BIT, 0xD330, 0x75CF}, - {MISENSOR_16BIT, 0xD332, 0xFFFF}, - {MISENSOR_16BIT, 0xD334, 0xD4E0}, - {MISENSOR_16BIT, 0xD336, 0x8D21}, - {MISENSOR_16BIT, 0xD338, 0x8D00}, - {MISENSOR_16BIT, 0xD33A, 0x2153}, - {MISENSOR_16BIT, 0xD33C, 0x0003}, - {MISENSOR_16BIT, 0xD33E, 0xB8C0}, - {MISENSOR_16BIT, 0xD340, 0x8D45}, - {MISENSOR_16BIT, 0xD342, 0x0B23}, - {MISENSOR_16BIT, 0xD344, 0x0000}, - {MISENSOR_16BIT, 0xD346, 0xEA8F}, - {MISENSOR_16BIT, 0xD348, 0x0915}, - {MISENSOR_16BIT, 0xD34A, 0x001E}, - {MISENSOR_16BIT, 0xD34C, 0xFF81}, - {MISENSOR_16BIT, 0xD34E, 0xE808}, - {MISENSOR_16BIT, 0xD350, 0x2540}, - {MISENSOR_16BIT, 0xD352, 0x1900}, - {MISENSOR_16BIT, 0xD354, 0xFFDE}, - {MISENSOR_16BIT, 0xD356, 0x8D00}, - {MISENSOR_16BIT, 0xD358, 0xB880}, - {MISENSOR_16BIT, 0xD35A, 0xF004}, - {MISENSOR_16BIT, 0xD35C, 0x8D00}, - {MISENSOR_16BIT, 0xD35E, 0xB8A0}, - {MISENSOR_16BIT, 0xD360, 0xAD00}, - {MISENSOR_16BIT, 0xD362, 0x8D05}, - {MISENSOR_16BIT, 0xD364, 0xE081}, - {MISENSOR_16BIT, 0xD366, 0x20CC}, - {MISENSOR_16BIT, 0xD368, 0x80A2}, - {MISENSOR_16BIT, 0xD36A, 0xDF00}, - {MISENSOR_16BIT, 0xD36C, 0xF40A}, - {MISENSOR_16BIT, 0xD36E, 0x71CF}, - {MISENSOR_16BIT, 0xD370, 0xFFFF}, - {MISENSOR_16BIT, 0xD372, 0xC84C}, - {MISENSOR_16BIT, 0xD374, 0x9102}, - {MISENSOR_16BIT, 0xD376, 0x7708}, - {MISENSOR_16BIT, 0xD378, 0xB8A6}, - {MISENSOR_16BIT, 0xD37A, 0x2786}, - {MISENSOR_16BIT, 0xD37C, 0x1FFE}, - {MISENSOR_16BIT, 0xD37E, 0xB102}, - {MISENSOR_16BIT, 0xD380, 0x0B42}, - {MISENSOR_16BIT, 0xD382, 0x0180}, - {MISENSOR_16BIT, 0xD384, 0x0E3E}, - {MISENSOR_16BIT, 0xD386, 0x0180}, - {MISENSOR_16BIT, 0xD388, 0x0F4A}, - {MISENSOR_16BIT, 0xD38A, 0x0160}, - {MISENSOR_16BIT, 0xD38C, 0x70C9}, - {MISENSOR_16BIT, 0xD38E, 0x8D05}, - {MISENSOR_16BIT, 0xD390, 0xE081}, - {MISENSOR_16BIT, 0xD392, 0x20CC}, - {MISENSOR_16BIT, 0xD394, 0x80A2}, - {MISENSOR_16BIT, 0xD396, 0xF429}, - {MISENSOR_16BIT, 0xD398, 0x76CF}, - {MISENSOR_16BIT, 0xD39A, 0xFFFF}, - {MISENSOR_16BIT, 0xD39C, 0xC84C}, - {MISENSOR_16BIT, 0xD39E, 0x082D}, - {MISENSOR_16BIT, 0xD3A0, 0x0051}, - {MISENSOR_16BIT, 0xD3A2, 0x70CF}, - {MISENSOR_16BIT, 0xD3A4, 0xFFFF}, - {MISENSOR_16BIT, 0xD3A6, 0xC90C}, - {MISENSOR_16BIT, 0xD3A8, 0x8805}, - {MISENSOR_16BIT, 0xD3AA, 0x09B6}, - {MISENSOR_16BIT, 0xD3AC, 0x0360}, - {MISENSOR_16BIT, 0xD3AE, 0xD908}, - {MISENSOR_16BIT, 0xD3B0, 0x2099}, - {MISENSOR_16BIT, 0xD3B2, 0x0802}, - {MISENSOR_16BIT, 0xD3B4, 0x9634}, - {MISENSOR_16BIT, 0xD3B6, 0xB503}, - {MISENSOR_16BIT, 0xD3B8, 0x7902}, - {MISENSOR_16BIT, 0xD3BA, 0x1523}, - {MISENSOR_16BIT, 0xD3BC, 0x1080}, - {MISENSOR_16BIT, 0xD3BE, 0xB634}, - {MISENSOR_16BIT, 0xD3C0, 0xE001}, - {MISENSOR_16BIT, 0xD3C2, 0x1D23}, - {MISENSOR_16BIT, 0xD3C4, 0x1002}, - {MISENSOR_16BIT, 0xD3C6, 0xF00B}, - {MISENSOR_16BIT, 0xD3C8, 0x9634}, - {MISENSOR_16BIT, 0xD3CA, 0x9503}, - {MISENSOR_16BIT, 0xD3CC, 0x6038}, - {MISENSOR_16BIT, 0xD3CE, 0xB614}, - {MISENSOR_16BIT, 0xD3D0, 0x153F}, - {MISENSOR_16BIT, 0xD3D2, 0x1080}, - {MISENSOR_16BIT, 0xD3D4, 0xE001}, - {MISENSOR_16BIT, 0xD3D6, 0x1D3F}, - {MISENSOR_16BIT, 0xD3D8, 0x1002}, - {MISENSOR_16BIT, 0xD3DA, 0xFFA4}, - {MISENSOR_16BIT, 0xD3DC, 0x9602}, - {MISENSOR_16BIT, 0xD3DE, 0x7F05}, - {MISENSOR_16BIT, 0xD3E0, 0xD800}, - {MISENSOR_16BIT, 0xD3E2, 0xB6E2}, - {MISENSOR_16BIT, 0xD3E4, 0xAD05}, - {MISENSOR_16BIT, 0xD3E6, 0x0511}, - {MISENSOR_16BIT, 0xD3E8, 0x05E0}, - {MISENSOR_16BIT, 0xD3EA, 0xD800}, - {MISENSOR_16BIT, 0xD3EC, 0xC0F1}, - {MISENSOR_16BIT, 0xD3EE, 0x0CFE}, - {MISENSOR_16BIT, 0xD3F0, 0x05C0}, - {MISENSOR_16BIT, 0xD3F2, 0x0A96}, - {MISENSOR_16BIT, 0xD3F4, 0x05A0}, - {MISENSOR_16BIT, 0xD3F6, 0x7608}, - {MISENSOR_16BIT, 0xD3F8, 0x0C22}, - {MISENSOR_16BIT, 0xD3FA, 0x0240}, - {MISENSOR_16BIT, 0xD3FC, 0xE080}, - {MISENSOR_16BIT, 0xD3FE, 0x20CA}, - {MISENSOR_16BIT, 0xD400, 0x0F82}, - {MISENSOR_16BIT, 0xD402, 0x0000}, - {MISENSOR_16BIT, 0xD404, 0x190B}, - {MISENSOR_16BIT, 0xD406, 0x0C60}, - {MISENSOR_16BIT, 0xD408, 0x05A2}, - {MISENSOR_16BIT, 0xD40A, 0x21CA}, - {MISENSOR_16BIT, 0xD40C, 0x0022}, - {MISENSOR_16BIT, 0xD40E, 0x0C56}, - {MISENSOR_16BIT, 0xD410, 0x0240}, - {MISENSOR_16BIT, 0xD412, 0xE806}, - {MISENSOR_16BIT, 0xD414, 0x0E0E}, - {MISENSOR_16BIT, 0xD416, 0x0220}, - {MISENSOR_16BIT, 0xD418, 0x70C9}, - {MISENSOR_16BIT, 0xD41A, 0xF048}, - {MISENSOR_16BIT, 0xD41C, 0x0896}, - {MISENSOR_16BIT, 0xD41E, 0x0440}, - {MISENSOR_16BIT, 0xD420, 0x0E96}, - {MISENSOR_16BIT, 0xD422, 0x0400}, - {MISENSOR_16BIT, 0xD424, 0x0966}, - {MISENSOR_16BIT, 0xD426, 0x0380}, - {MISENSOR_16BIT, 0xD428, 0x75CF}, - {MISENSOR_16BIT, 0xD42A, 0xFFFF}, - {MISENSOR_16BIT, 0xD42C, 0xD4E0}, - {MISENSOR_16BIT, 0xD42E, 0x8D00}, - {MISENSOR_16BIT, 0xD430, 0x084D}, - {MISENSOR_16BIT, 0xD432, 0x001E}, - {MISENSOR_16BIT, 0xD434, 0xFF47}, - {MISENSOR_16BIT, 0xD436, 0x080D}, - {MISENSOR_16BIT, 0xD438, 0x0050}, - {MISENSOR_16BIT, 0xD43A, 0xFF57}, - {MISENSOR_16BIT, 0xD43C, 0x0841}, - {MISENSOR_16BIT, 0xD43E, 0x0051}, - {MISENSOR_16BIT, 0xD440, 0x8D04}, - {MISENSOR_16BIT, 0xD442, 0x9521}, - {MISENSOR_16BIT, 0xD444, 0xE064}, - {MISENSOR_16BIT, 0xD446, 0x790C}, - {MISENSOR_16BIT, 0xD448, 0x702F}, - {MISENSOR_16BIT, 0xD44A, 0x0CE2}, - {MISENSOR_16BIT, 0xD44C, 0x05E0}, - {MISENSOR_16BIT, 0xD44E, 0xD964}, - {MISENSOR_16BIT, 0xD450, 0x72CF}, - {MISENSOR_16BIT, 0xD452, 0xFFFF}, - {MISENSOR_16BIT, 0xD454, 0xC700}, - {MISENSOR_16BIT, 0xD456, 0x9235}, - {MISENSOR_16BIT, 0xD458, 0x0811}, - {MISENSOR_16BIT, 0xD45A, 0x0043}, - {MISENSOR_16BIT, 0xD45C, 0xFF3D}, - {MISENSOR_16BIT, 0xD45E, 0x080D}, - {MISENSOR_16BIT, 0xD460, 0x0051}, - {MISENSOR_16BIT, 0xD462, 0xD801}, - {MISENSOR_16BIT, 0xD464, 0xFF77}, - {MISENSOR_16BIT, 0xD466, 0xF025}, - {MISENSOR_16BIT, 0xD468, 0x9501}, - {MISENSOR_16BIT, 0xD46A, 0x9235}, - {MISENSOR_16BIT, 0xD46C, 0x0911}, - {MISENSOR_16BIT, 0xD46E, 0x0003}, - {MISENSOR_16BIT, 0xD470, 0xFF49}, - {MISENSOR_16BIT, 0xD472, 0x080D}, - {MISENSOR_16BIT, 0xD474, 0x0051}, - {MISENSOR_16BIT, 0xD476, 0xD800}, - {MISENSOR_16BIT, 0xD478, 0xFF72}, - {MISENSOR_16BIT, 0xD47A, 0xF01B}, - {MISENSOR_16BIT, 0xD47C, 0x0886}, - {MISENSOR_16BIT, 0xD47E, 0x03E0}, - {MISENSOR_16BIT, 0xD480, 0xD801}, - {MISENSOR_16BIT, 0xD482, 0x0EF6}, - {MISENSOR_16BIT, 0xD484, 0x03C0}, - {MISENSOR_16BIT, 0xD486, 0x0F52}, - {MISENSOR_16BIT, 0xD488, 0x0340}, - {MISENSOR_16BIT, 0xD48A, 0x0DBA}, - {MISENSOR_16BIT, 0xD48C, 0x0200}, - {MISENSOR_16BIT, 0xD48E, 0x0AF6}, - {MISENSOR_16BIT, 0xD490, 0x0440}, - {MISENSOR_16BIT, 0xD492, 0x0C22}, - {MISENSOR_16BIT, 0xD494, 0x0400}, - {MISENSOR_16BIT, 0xD496, 0x0D72}, - {MISENSOR_16BIT, 0xD498, 0x0440}, - {MISENSOR_16BIT, 0xD49A, 0x0DC2}, - {MISENSOR_16BIT, 0xD49C, 0x0200}, - {MISENSOR_16BIT, 0xD49E, 0x0972}, - {MISENSOR_16BIT, 0xD4A0, 0x0440}, - {MISENSOR_16BIT, 0xD4A2, 0x0D3A}, - {MISENSOR_16BIT, 0xD4A4, 0x0220}, - {MISENSOR_16BIT, 0xD4A6, 0xD820}, - {MISENSOR_16BIT, 0xD4A8, 0x0BFA}, - {MISENSOR_16BIT, 0xD4AA, 0x0260}, - {MISENSOR_16BIT, 0xD4AC, 0x70C9}, - {MISENSOR_16BIT, 0xD4AE, 0x0451}, - {MISENSOR_16BIT, 0xD4B0, 0x05C0}, - {MISENSOR_16BIT, 0xD4B2, 0x78E0}, - {MISENSOR_16BIT, 0xD4B4, 0xD900}, - {MISENSOR_16BIT, 0xD4B6, 0xF00A}, - {MISENSOR_16BIT, 0xD4B8, 0x70CF}, - {MISENSOR_16BIT, 0xD4BA, 0xFFFF}, - {MISENSOR_16BIT, 0xD4BC, 0xD520}, - {MISENSOR_16BIT, 0xD4BE, 0x7835}, - {MISENSOR_16BIT, 0xD4C0, 0x8041}, - {MISENSOR_16BIT, 0xD4C2, 0x8000}, - {MISENSOR_16BIT, 0xD4C4, 0xE102}, - {MISENSOR_16BIT, 0xD4C6, 0xA040}, - {MISENSOR_16BIT, 0xD4C8, 0x09F1}, - {MISENSOR_16BIT, 0xD4CA, 0x8114}, - {MISENSOR_16BIT, 0xD4CC, 0x71CF}, - {MISENSOR_16BIT, 0xD4CE, 0xFFFF}, - {MISENSOR_16BIT, 0xD4D0, 0xD4E0}, - {MISENSOR_16BIT, 0xD4D2, 0x70CF}, - {MISENSOR_16BIT, 0xD4D4, 0xFFFF}, - {MISENSOR_16BIT, 0xD4D6, 0xC594}, - {MISENSOR_16BIT, 0xD4D8, 0xB03A}, - {MISENSOR_16BIT, 0xD4DA, 0x7FE0}, - {MISENSOR_16BIT, 0xD4DC, 0xD800}, - {MISENSOR_16BIT, 0xD4DE, 0x0000}, - {MISENSOR_16BIT, 0xD4E0, 0x0000}, - {MISENSOR_16BIT, 0xD4E2, 0x0500}, - {MISENSOR_16BIT, 0xD4E4, 0x0500}, - {MISENSOR_16BIT, 0xD4E6, 0x0200}, - {MISENSOR_16BIT, 0xD4E8, 0x0330}, - {MISENSOR_16BIT, 0xD4EA, 0x0000}, - {MISENSOR_16BIT, 0xD4EC, 0x0000}, - {MISENSOR_16BIT, 0xD4EE, 0x03CD}, - {MISENSOR_16BIT, 0xD4F0, 0x050D}, - {MISENSOR_16BIT, 0xD4F2, 0x01C5}, - {MISENSOR_16BIT, 0xD4F4, 0x03B3}, - {MISENSOR_16BIT, 0xD4F6, 0x00E0}, - {MISENSOR_16BIT, 0xD4F8, 0x01E3}, - {MISENSOR_16BIT, 0xD4FA, 0x0280}, - {MISENSOR_16BIT, 0xD4FC, 0x01E0}, - {MISENSOR_16BIT, 0xD4FE, 0x0109}, - {MISENSOR_16BIT, 0xD500, 0x0080}, - {MISENSOR_16BIT, 0xD502, 0x0500}, - {MISENSOR_16BIT, 0xD504, 0x0000}, - {MISENSOR_16BIT, 0xD506, 0x0000}, - {MISENSOR_16BIT, 0xD508, 0x0000}, - {MISENSOR_16BIT, 0xD50A, 0x0000}, - {MISENSOR_16BIT, 0xD50C, 0x0000}, - {MISENSOR_16BIT, 0xD50E, 0x0000}, - {MISENSOR_16BIT, 0xD510, 0x0000}, - {MISENSOR_16BIT, 0xD512, 0x0000}, - {MISENSOR_16BIT, 0xD514, 0x0000}, - {MISENSOR_16BIT, 0xD516, 0x0000}, - {MISENSOR_16BIT, 0xD518, 0x0000}, - {MISENSOR_16BIT, 0xD51A, 0x0000}, - {MISENSOR_16BIT, 0xD51C, 0x0000}, - {MISENSOR_16BIT, 0xD51E, 0x0000}, - {MISENSOR_16BIT, 0xD520, 0xFFFF}, - {MISENSOR_16BIT, 0xD522, 0xC9B4}, - {MISENSOR_16BIT, 0xD524, 0xFFFF}, - {MISENSOR_16BIT, 0xD526, 0xD324}, - {MISENSOR_16BIT, 0xD528, 0xFFFF}, - {MISENSOR_16BIT, 0xD52A, 0xCA34}, - {MISENSOR_16BIT, 0xD52C, 0xFFFF}, - {MISENSOR_16BIT, 0xD52E, 0xD3EC}, - {MISENSOR_16BIT, 0x098E, 0x0000}, - {MISENSOR_16BIT, 0xE000, 0x04B4}, - {MISENSOR_16BIT, 0xE002, 0x0302}, - {MISENSOR_16BIT, 0xE004, 0x4103}, - {MISENSOR_16BIT, 0xE006, 0x0202}, - {MISENSOR_16BIT, 0x0080, 0xFFF0}, - {MISENSOR_16BIT, 0x0080, 0xFFF1}, - - /* PGA parameter and APGA - * [Step4-APGA] [TP101_MT9M114_APGA] - */ - {MISENSOR_16BIT, 0x098E, 0x495E}, - {MISENSOR_16BIT, 0xC95E, 0x0000}, - {MISENSOR_16BIT, 0x3640, 0x02B0}, - {MISENSOR_16BIT, 0x3642, 0x8063}, - {MISENSOR_16BIT, 0x3644, 0x78D0}, - {MISENSOR_16BIT, 0x3646, 0x50CC}, - {MISENSOR_16BIT, 0x3648, 0x3511}, - {MISENSOR_16BIT, 0x364A, 0x0110}, - {MISENSOR_16BIT, 0x364C, 0xBD8A}, - {MISENSOR_16BIT, 0x364E, 0x0CD1}, - {MISENSOR_16BIT, 0x3650, 0x24ED}, - {MISENSOR_16BIT, 0x3652, 0x7C11}, - {MISENSOR_16BIT, 0x3654, 0x0150}, - {MISENSOR_16BIT, 0x3656, 0x124C}, - {MISENSOR_16BIT, 0x3658, 0x3130}, - {MISENSOR_16BIT, 0x365A, 0x508C}, - {MISENSOR_16BIT, 0x365C, 0x21F1}, - {MISENSOR_16BIT, 0x365E, 0x0090}, - {MISENSOR_16BIT, 0x3660, 0xBFCA}, - {MISENSOR_16BIT, 0x3662, 0x0A11}, - {MISENSOR_16BIT, 0x3664, 0x4F4B}, - {MISENSOR_16BIT, 0x3666, 0x28B1}, - {MISENSOR_16BIT, 0x3680, 0x50A9}, - {MISENSOR_16BIT, 0x3682, 0xA04B}, - {MISENSOR_16BIT, 0x3684, 0x0E2D}, - {MISENSOR_16BIT, 0x3686, 0x73EC}, - {MISENSOR_16BIT, 0x3688, 0x164F}, - {MISENSOR_16BIT, 0x368A, 0xF829}, - {MISENSOR_16BIT, 0x368C, 0xC1A8}, - {MISENSOR_16BIT, 0x368E, 0xB0EC}, - {MISENSOR_16BIT, 0x3690, 0xE76A}, - {MISENSOR_16BIT, 0x3692, 0x69AF}, - {MISENSOR_16BIT, 0x3694, 0x378C}, - {MISENSOR_16BIT, 0x3696, 0xA70D}, - {MISENSOR_16BIT, 0x3698, 0x884F}, - {MISENSOR_16BIT, 0x369A, 0xEE8B}, - {MISENSOR_16BIT, 0x369C, 0x5DEF}, - {MISENSOR_16BIT, 0x369E, 0x27CC}, - {MISENSOR_16BIT, 0x36A0, 0xCAAC}, - {MISENSOR_16BIT, 0x36A2, 0x840E}, - {MISENSOR_16BIT, 0x36A4, 0xDAA9}, - {MISENSOR_16BIT, 0x36A6, 0xF00C}, - {MISENSOR_16BIT, 0x36C0, 0x1371}, - {MISENSOR_16BIT, 0x36C2, 0x272F}, - {MISENSOR_16BIT, 0x36C4, 0x2293}, - {MISENSOR_16BIT, 0x36C6, 0xE6D0}, - {MISENSOR_16BIT, 0x36C8, 0xEC32}, - {MISENSOR_16BIT, 0x36CA, 0x11B1}, - {MISENSOR_16BIT, 0x36CC, 0x7BAF}, - {MISENSOR_16BIT, 0x36CE, 0x5813}, - {MISENSOR_16BIT, 0x36D0, 0xB871}, - {MISENSOR_16BIT, 0x36D2, 0x8913}, - {MISENSOR_16BIT, 0x36D4, 0x4610}, - {MISENSOR_16BIT, 0x36D6, 0x7EEE}, - {MISENSOR_16BIT, 0x36D8, 0x0DF3}, - {MISENSOR_16BIT, 0x36DA, 0xB84F}, - {MISENSOR_16BIT, 0x36DC, 0xB532}, - {MISENSOR_16BIT, 0x36DE, 0x1171}, - {MISENSOR_16BIT, 0x36E0, 0x13CF}, - {MISENSOR_16BIT, 0x36E2, 0x22F3}, - {MISENSOR_16BIT, 0x36E4, 0xE090}, - {MISENSOR_16BIT, 0x36E6, 0x8133}, - {MISENSOR_16BIT, 0x3700, 0x88AE}, - {MISENSOR_16BIT, 0x3702, 0x00EA}, - {MISENSOR_16BIT, 0x3704, 0x344F}, - {MISENSOR_16BIT, 0x3706, 0xEC88}, - {MISENSOR_16BIT, 0x3708, 0x3E91}, - {MISENSOR_16BIT, 0x370A, 0xF12D}, - {MISENSOR_16BIT, 0x370C, 0xB0EF}, - {MISENSOR_16BIT, 0x370E, 0x77CD}, - {MISENSOR_16BIT, 0x3710, 0x7930}, - {MISENSOR_16BIT, 0x3712, 0x5C12}, - {MISENSOR_16BIT, 0x3714, 0x500C}, - {MISENSOR_16BIT, 0x3716, 0x22CE}, - {MISENSOR_16BIT, 0x3718, 0x2370}, - {MISENSOR_16BIT, 0x371A, 0x258F}, - {MISENSOR_16BIT, 0x371C, 0x3D30}, - {MISENSOR_16BIT, 0x371E, 0x370C}, - {MISENSOR_16BIT, 0x3720, 0x03ED}, - {MISENSOR_16BIT, 0x3722, 0x9AD0}, - {MISENSOR_16BIT, 0x3724, 0x7ECF}, - {MISENSOR_16BIT, 0x3726, 0x1093}, - {MISENSOR_16BIT, 0x3740, 0x2391}, - {MISENSOR_16BIT, 0x3742, 0xAAD0}, - {MISENSOR_16BIT, 0x3744, 0x28F2}, - {MISENSOR_16BIT, 0x3746, 0xBA4F}, - {MISENSOR_16BIT, 0x3748, 0xC536}, - {MISENSOR_16BIT, 0x374A, 0x1472}, - {MISENSOR_16BIT, 0x374C, 0xD110}, - {MISENSOR_16BIT, 0x374E, 0x2933}, - {MISENSOR_16BIT, 0x3750, 0xD0D1}, - {MISENSOR_16BIT, 0x3752, 0x9F37}, - {MISENSOR_16BIT, 0x3754, 0x34D1}, - {MISENSOR_16BIT, 0x3756, 0x1C6C}, - {MISENSOR_16BIT, 0x3758, 0x3FD2}, - {MISENSOR_16BIT, 0x375A, 0xCB72}, - {MISENSOR_16BIT, 0x375C, 0xBA96}, - {MISENSOR_16BIT, 0x375E, 0x1551}, - {MISENSOR_16BIT, 0x3760, 0xB74F}, - {MISENSOR_16BIT, 0x3762, 0x1672}, - {MISENSOR_16BIT, 0x3764, 0x84F1}, - {MISENSOR_16BIT, 0x3766, 0xC2D6}, - {MISENSOR_16BIT, 0x3782, 0x01E0}, - {MISENSOR_16BIT, 0x3784, 0x0280}, - {MISENSOR_16BIT, 0x37C0, 0xA6EA}, - {MISENSOR_16BIT, 0x37C2, 0x874B}, - {MISENSOR_16BIT, 0x37C4, 0x85CB}, - {MISENSOR_16BIT, 0x37C6, 0x968A}, - {MISENSOR_16BIT, 0x098E, 0x0000}, - {MISENSOR_16BIT, 0xC960, 0x0AF0}, - {MISENSOR_16BIT, 0xC962, 0x79E2}, - {MISENSOR_16BIT, 0xC964, 0x5EC8}, - {MISENSOR_16BIT, 0xC966, 0x791F}, - {MISENSOR_16BIT, 0xC968, 0x76EE}, - {MISENSOR_16BIT, 0xC96A, 0x0FA0}, - {MISENSOR_16BIT, 0xC96C, 0x7DFA}, - {MISENSOR_16BIT, 0xC96E, 0x7DAF}, - {MISENSOR_16BIT, 0xC970, 0x7E02}, - {MISENSOR_16BIT, 0xC972, 0x7E0A}, - {MISENSOR_16BIT, 0xC974, 0x1964}, - {MISENSOR_16BIT, 0xC976, 0x7CDC}, - {MISENSOR_16BIT, 0xC978, 0x7838}, - {MISENSOR_16BIT, 0xC97A, 0x7C2F}, - {MISENSOR_16BIT, 0xC97C, 0x7792}, - {MISENSOR_16BIT, 0xC95E, 0x0003}, - - /* [Step4-APGA] */ - {MISENSOR_16BIT, 0x098E, 0x0000}, - {MISENSOR_16BIT, 0xC95E, 0x0003}, - - /* [Step5-AWB_CCM]1: LOAD=CCM */ - {MISENSOR_16BIT, 0xC892, 0x0267}, - {MISENSOR_16BIT, 0xC894, 0xFF1A}, - {MISENSOR_16BIT, 0xC896, 0xFFB3}, - {MISENSOR_16BIT, 0xC898, 0xFF80}, - {MISENSOR_16BIT, 0xC89A, 0x0166}, - {MISENSOR_16BIT, 0xC89C, 0x0003}, - {MISENSOR_16BIT, 0xC89E, 0xFF9A}, - {MISENSOR_16BIT, 0xC8A0, 0xFEB4}, - {MISENSOR_16BIT, 0xC8A2, 0x024D}, - {MISENSOR_16BIT, 0xC8A4, 0x01BF}, - {MISENSOR_16BIT, 0xC8A6, 0xFF01}, - {MISENSOR_16BIT, 0xC8A8, 0xFFF3}, - {MISENSOR_16BIT, 0xC8AA, 0xFF75}, - {MISENSOR_16BIT, 0xC8AC, 0x0198}, - {MISENSOR_16BIT, 0xC8AE, 0xFFFD}, - {MISENSOR_16BIT, 0xC8B0, 0xFF9A}, - {MISENSOR_16BIT, 0xC8B2, 0xFEE7}, - {MISENSOR_16BIT, 0xC8B4, 0x02A8}, - {MISENSOR_16BIT, 0xC8B6, 0x01D9}, - {MISENSOR_16BIT, 0xC8B8, 0xFF26}, - {MISENSOR_16BIT, 0xC8BA, 0xFFF3}, - {MISENSOR_16BIT, 0xC8BC, 0xFFB3}, - {MISENSOR_16BIT, 0xC8BE, 0x0132}, - {MISENSOR_16BIT, 0xC8C0, 0xFFE8}, - {MISENSOR_16BIT, 0xC8C2, 0xFFDA}, - {MISENSOR_16BIT, 0xC8C4, 0xFECD}, - {MISENSOR_16BIT, 0xC8C6, 0x02C2}, - {MISENSOR_16BIT, 0xC8C8, 0x0075}, - {MISENSOR_16BIT, 0xC8CA, 0x011C}, - {MISENSOR_16BIT, 0xC8CC, 0x009A}, - {MISENSOR_16BIT, 0xC8CE, 0x0105}, - {MISENSOR_16BIT, 0xC8D0, 0x00A4}, - {MISENSOR_16BIT, 0xC8D2, 0x00AC}, - {MISENSOR_16BIT, 0xC8D4, 0x0A8C}, - {MISENSOR_16BIT, 0xC8D6, 0x0F0A}, - {MISENSOR_16BIT, 0xC8D8, 0x1964}, - - /* LOAD=AWB */ - {MISENSOR_16BIT, 0xC914, 0x0000}, - {MISENSOR_16BIT, 0xC916, 0x0000}, - {MISENSOR_16BIT, 0xC918, 0x04FF}, - {MISENSOR_16BIT, 0xC91A, 0x02CF}, - {MISENSOR_16BIT, 0xC904, 0x0033}, - {MISENSOR_16BIT, 0xC906, 0x0040}, - {MISENSOR_8BIT, 0xC8F2, 0x03}, - {MISENSOR_8BIT, 0xC8F3, 0x02}, - {MISENSOR_16BIT, 0xC906, 0x003C}, - {MISENSOR_16BIT, 0xC8F4, 0x0000}, - {MISENSOR_16BIT, 0xC8F6, 0x0000}, - {MISENSOR_16BIT, 0xC8F8, 0x0000}, - {MISENSOR_16BIT, 0xC8FA, 0xE724}, - {MISENSOR_16BIT, 0xC8FC, 0x1583}, - {MISENSOR_16BIT, 0xC8FE, 0x2045}, - {MISENSOR_16BIT, 0xC900, 0x05DC}, - {MISENSOR_16BIT, 0xC902, 0x007C}, - {MISENSOR_8BIT, 0xC90C, 0x80}, - {MISENSOR_8BIT, 0xC90D, 0x80}, - {MISENSOR_8BIT, 0xC90E, 0x80}, - {MISENSOR_8BIT, 0xC90F, 0x88}, - {MISENSOR_8BIT, 0xC910, 0x80}, - {MISENSOR_8BIT, 0xC911, 0x80}, - - /* LOAD=Step7-CPIPE_Preference */ - {MISENSOR_16BIT, 0xC926, 0x0020}, - {MISENSOR_16BIT, 0xC928, 0x009A}, - {MISENSOR_16BIT, 0xC946, 0x0070}, - {MISENSOR_16BIT, 0xC948, 0x00F3}, - {MISENSOR_16BIT, 0xC952, 0x0020}, - {MISENSOR_16BIT, 0xC954, 0x009A}, - {MISENSOR_8BIT, 0xC92A, 0x80}, - {MISENSOR_8BIT, 0xC92B, 0x4B}, - {MISENSOR_8BIT, 0xC92C, 0x00}, - {MISENSOR_8BIT, 0xC92D, 0xFF}, - {MISENSOR_8BIT, 0xC92E, 0x3C}, - {MISENSOR_8BIT, 0xC92F, 0x02}, - {MISENSOR_8BIT, 0xC930, 0x06}, - {MISENSOR_8BIT, 0xC931, 0x64}, - {MISENSOR_8BIT, 0xC932, 0x01}, - {MISENSOR_8BIT, 0xC933, 0x0C}, - {MISENSOR_8BIT, 0xC934, 0x3C}, - {MISENSOR_8BIT, 0xC935, 0x3C}, - {MISENSOR_8BIT, 0xC936, 0x3C}, - {MISENSOR_8BIT, 0xC937, 0x0F}, - {MISENSOR_8BIT, 0xC938, 0x64}, - {MISENSOR_8BIT, 0xC939, 0x64}, - {MISENSOR_8BIT, 0xC93A, 0x64}, - {MISENSOR_8BIT, 0xC93B, 0x32}, - {MISENSOR_16BIT, 0xC93C, 0x0020}, - {MISENSOR_16BIT, 0xC93E, 0x009A}, - {MISENSOR_16BIT, 0xC940, 0x00DC}, - {MISENSOR_8BIT, 0xC942, 0x38}, - {MISENSOR_8BIT, 0xC943, 0x30}, - {MISENSOR_8BIT, 0xC944, 0x50}, - {MISENSOR_8BIT, 0xC945, 0x19}, - {MISENSOR_16BIT, 0xC94A, 0x0230}, - {MISENSOR_16BIT, 0xC94C, 0x0010}, - {MISENSOR_16BIT, 0xC94E, 0x01CD}, - {MISENSOR_8BIT, 0xC950, 0x05}, - {MISENSOR_8BIT, 0xC951, 0x40}, - {MISENSOR_8BIT, 0xC87B, 0x1B}, - {MISENSOR_8BIT, 0xC878, 0x0E}, - {MISENSOR_16BIT, 0xC890, 0x0080}, - {MISENSOR_16BIT, 0xC886, 0x0100}, - {MISENSOR_16BIT, 0xC87C, 0x005A}, - {MISENSOR_8BIT, 0xB42A, 0x05}, - {MISENSOR_8BIT, 0xA80A, 0x20}, - - /* Speed up AE/AWB */ - {MISENSOR_16BIT, 0x098E, 0x2802}, - {MISENSOR_16BIT, 0xA802, 0x0008}, - {MISENSOR_8BIT, 0xC908, 0x01}, - {MISENSOR_8BIT, 0xC879, 0x01}, - {MISENSOR_8BIT, 0xC909, 0x02}, - {MISENSOR_8BIT, 0xA80A, 0x18}, - {MISENSOR_8BIT, 0xA80B, 0x18}, - {MISENSOR_8BIT, 0xAC16, 0x18}, - {MISENSOR_8BIT, 0xC878, 0x0E}, - - {MISENSOR_TOK_TERM, 0, 0} -}; - -#endif -#endif diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h deleted file mode 100644 index bde2f148184d..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov2680.h +++ /dev/null @@ -1,858 +0,0 @@ -/* - * Support for OmniVision OV2680 5M camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __OV2680_H__ -#define __OV2680_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../include/linux/atomisp_platform.h" - -/* Defines for register writes and register array processing */ -#define I2C_MSG_LENGTH 0x2 -#define I2C_RETRY_COUNT 5 - -#define OV2680_FOCAL_LENGTH_NUM 334 /*3.34mm*/ -#define OV2680_FOCAL_LENGTH_DEM 100 -#define OV2680_F_NUMBER_DEFAULT_NUM 24 -#define OV2680_F_NUMBER_DEM 10 - -#define OV2680_BIN_FACTOR_MAX 4 - -#define MAX_FMTS 1 - -/* sensor_mode_data read_mode adaptation */ -#define OV2680_READ_MODE_BINNING_ON 0x0400 -#define OV2680_READ_MODE_BINNING_OFF 0x00 -#define OV2680_INTEGRATION_TIME_MARGIN 8 - -#define OV2680_MAX_EXPOSURE_VALUE 0xFFF1 -#define OV2680_MAX_GAIN_VALUE 0xFF - -/* - * focal length bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define OV2680_FOCAL_LENGTH_DEFAULT 0x1B70064 - -/* - * current f-number bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define OV2680_F_NUMBER_DEFAULT 0x18000a - -/* - * f-number range bits definition: - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ -#define OV2680_F_NUMBER_RANGE 0x180a180a -#define OV2680_ID 0x2680 - -#define OV2680_FINE_INTG_TIME_MIN 0 -#define OV2680_FINE_INTG_TIME_MAX_MARGIN 0 -#define OV2680_COARSE_INTG_TIME_MIN 1 -#define OV2680_COARSE_INTG_TIME_MAX_MARGIN 6 - -/* - * OV2680 System control registers - */ -#define OV2680_SW_SLEEP 0x0100 -#define OV2680_SW_RESET 0x0103 -#define OV2680_SW_STREAM 0x0100 - -#define OV2680_SC_CMMN_CHIP_ID_H 0x300A -#define OV2680_SC_CMMN_CHIP_ID_L 0x300B -#define OV2680_SC_CMMN_SCCB_ID 0x302B /* 0x300C*/ -#define OV2680_SC_CMMN_SUB_ID 0x302A /* process, version*/ - -#define OV2680_GROUP_ACCESS 0x3208 /*Bit[7:4] Group control, Bit[3:0] Group ID*/ - -#define OV2680_EXPOSURE_H 0x3500 /*Bit[3:0] Bit[19:16] of exposure, remaining 16 bits lies in Reg0x3501&Reg0x3502*/ -#define OV2680_EXPOSURE_M 0x3501 -#define OV2680_EXPOSURE_L 0x3502 -#define OV2680_AGC_H 0x350A /*Bit[1:0] means Bit[9:8] of gain*/ -#define OV2680_AGC_L 0x350B /*Bit[7:0] of gain*/ - -#define OV2680_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/ -#define OV2680_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/ -#define OV2680_VERTICAL_START_H 0x3802 /*Bit[11:8]*/ -#define OV2680_VERTICAL_START_L 0x3803 /*Bit[7:0]*/ -#define OV2680_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/ -#define OV2680_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/ -#define OV2680_VERTICAL_END_H 0x3806 /*Bit[11:8]*/ -#define OV2680_VERTICAL_END_L 0x3807 /*Bit[7:0]*/ -#define OV2680_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/ -#define OV2680_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/ -#define OV2680_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/ -#define OV2680_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/ -#define OV2680_TIMING_HTS_H 0x380C /*High 8-bit, and low 8-bit HTS address is 0x380d*/ -#define OV2680_TIMING_HTS_L 0x380D /*High 8-bit, and low 8-bit HTS address is 0x380d*/ -#define OV2680_TIMING_VTS_H 0x380e /*High 8-bit, and low 8-bit HTS address is 0x380f*/ -#define OV2680_TIMING_VTS_L 0x380f /*High 8-bit, and low 8-bit HTS address is 0x380f*/ -#define OV2680_FRAME_OFF_NUM 0x4202 - -/*Flip/Mirror*/ -#define OV2680_FLIP_REG 0x3820 -#define OV2680_MIRROR_REG 0x3821 -#define OV2680_FLIP_BIT 1 -#define OV2680_MIRROR_BIT 2 -#define OV2680_FLIP_MIRROR_BIT_ENABLE 4 - -#define OV2680_MWB_RED_GAIN_H 0x5004/*0x3400*/ -#define OV2680_MWB_GREEN_GAIN_H 0x5006/*0x3402*/ -#define OV2680_MWB_BLUE_GAIN_H 0x5008/*0x3404*/ -#define OV2680_MWB_GAIN_MAX 0x0fff - -#define OV2680_START_STREAMING 0x01 -#define OV2680_STOP_STREAMING 0x00 - - -#define OV2680_INVALID_CONFIG 0xffffffff - - -struct regval_list { - u16 reg_num; - u8 value; -}; - -struct ov2680_resolution { - u8 *desc; - const struct ov2680_reg *regs; - int res; - int width; - int height; - int fps; - int pix_clk_freq; - u32 skip_frames; - u16 pixels_per_line; - u16 lines_per_frame; - u8 bin_factor_x; - u8 bin_factor_y; - u8 bin_mode; - bool used; -}; - -struct ov2680_format { - u8 *desc; - u32 pixelformat; - struct ov2680_reg *regs; -}; - - /* - * ov2680 device structure. - */ - struct ov2680_device { - struct v4l2_subdev sd; - struct media_pad pad; - struct v4l2_mbus_framefmt format; - struct mutex input_lock; - struct v4l2_ctrl_handler ctrl_handler; - struct camera_sensor_platform_data *platform_data; - int vt_pix_clk_freq_mhz; - int fmt_idx; - int run_mode; - u8 res; - u8 type; - }; - - enum ov2680_tok_type { - OV2680_8BIT = 0x0001, - OV2680_16BIT = 0x0002, - OV2680_32BIT = 0x0004, - OV2680_TOK_TERM = 0xf000, /* terminating token for reg list */ - OV2680_TOK_DELAY = 0xfe00, /* delay token for reg list */ - OV2680_TOK_MASK = 0xfff0 - }; - - /** - * struct ov2680_reg - MI sensor register format - * @type: type of the register - * @reg: 16-bit offset to register - * @val: 8/16/32-bit register value - * - * Define a structure for sensor register initialization values - */ - struct ov2680_reg { - enum ov2680_tok_type type; - u16 reg; - u32 val; /* @set value for read/mod/write, @mask */ - }; - - #define to_ov2680_sensor(x) container_of(x, struct ov2680_device, sd) - - #define OV2680_MAX_WRITE_BUF_SIZE 30 - - struct ov2680_write_buffer { - u16 addr; - u8 data[OV2680_MAX_WRITE_BUF_SIZE]; - }; - - struct ov2680_write_ctrl { - int index; - struct ov2680_write_buffer buffer; - }; - - static struct ov2680_reg const ov2680_global_setting[] = { - {OV2680_8BIT, 0x0103, 0x01}, - {OV2680_8BIT, 0x3002, 0x00}, - {OV2680_8BIT, 0x3016, 0x1c}, - {OV2680_8BIT, 0x3018, 0x44}, - {OV2680_8BIT, 0x3020, 0x00}, - {OV2680_8BIT, 0x3080, 0x02}, - {OV2680_8BIT, 0x3082, 0x45}, - {OV2680_8BIT, 0x3084, 0x09}, - {OV2680_8BIT, 0x3085, 0x04}, - {OV2680_8BIT, 0x3503, 0x03}, - {OV2680_8BIT, 0x350b, 0x36}, - {OV2680_8BIT, 0x3600, 0xb4}, - {OV2680_8BIT, 0x3603, 0x39}, - {OV2680_8BIT, 0x3604, 0x24}, - {OV2680_8BIT, 0x3605, 0x00}, - {OV2680_8BIT, 0x3620, 0x26}, - {OV2680_8BIT, 0x3621, 0x37}, - {OV2680_8BIT, 0x3622, 0x04}, - {OV2680_8BIT, 0x3628, 0x00}, - {OV2680_8BIT, 0x3705, 0x3c}, - {OV2680_8BIT, 0x370c, 0x50}, - {OV2680_8BIT, 0x370d, 0xc0}, - {OV2680_8BIT, 0x3718, 0x88}, - {OV2680_8BIT, 0x3720, 0x00}, - {OV2680_8BIT, 0x3721, 0x00}, - {OV2680_8BIT, 0x3722, 0x00}, - {OV2680_8BIT, 0x3723, 0x00}, - {OV2680_8BIT, 0x3738, 0x00}, - {OV2680_8BIT, 0x3717, 0x58}, - {OV2680_8BIT, 0x3781, 0x80}, - {OV2680_8BIT, 0x3789, 0x60}, - {OV2680_8BIT, 0x3800, 0x00}, - {OV2680_8BIT, 0x3819, 0x04}, - {OV2680_8BIT, 0x4000, 0x81}, - {OV2680_8BIT, 0x4001, 0x40}, - {OV2680_8BIT, 0x4602, 0x02}, - {OV2680_8BIT, 0x481f, 0x36}, - {OV2680_8BIT, 0x4825, 0x36}, - {OV2680_8BIT, 0x4837, 0x18}, - {OV2680_8BIT, 0x5002, 0x30}, - {OV2680_8BIT, 0x5004, 0x04},//manual awb 1x - {OV2680_8BIT, 0x5005, 0x00}, - {OV2680_8BIT, 0x5006, 0x04}, - {OV2680_8BIT, 0x5007, 0x00}, - {OV2680_8BIT, 0x5008, 0x04}, - {OV2680_8BIT, 0x5009, 0x00}, - {OV2680_8BIT, 0x5080, 0x00}, - {OV2680_8BIT, 0x3701, 0x64}, //add on 14/05/13 - {OV2680_8BIT, 0x3784, 0x0c}, //based OV2680_R1A_AM10.ovt add on 14/06/13 - {OV2680_8BIT, 0x5780, 0x3e}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13 - {OV2680_8BIT, 0x5781, 0x0f}, - {OV2680_8BIT, 0x5782, 0x04}, - {OV2680_8BIT, 0x5783, 0x02}, - {OV2680_8BIT, 0x5784, 0x01}, - {OV2680_8BIT, 0x5785, 0x01}, - {OV2680_8BIT, 0x5786, 0x00}, - {OV2680_8BIT, 0x5787, 0x04}, - {OV2680_8BIT, 0x5788, 0x02}, - {OV2680_8BIT, 0x5789, 0x00}, - {OV2680_8BIT, 0x578a, 0x01}, - {OV2680_8BIT, 0x578b, 0x02}, - {OV2680_8BIT, 0x578c, 0x03}, - {OV2680_8BIT, 0x578d, 0x03}, - {OV2680_8BIT, 0x578e, 0x08}, - {OV2680_8BIT, 0x578f, 0x0c}, - {OV2680_8BIT, 0x5790, 0x08}, - {OV2680_8BIT, 0x5791, 0x04}, - {OV2680_8BIT, 0x5792, 0x00}, - {OV2680_8BIT, 0x5793, 0x00}, - {OV2680_8BIT, 0x5794, 0x03}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13 - {OV2680_8BIT, 0x0100, 0x00}, //stream off - - {OV2680_TOK_TERM, 0, 0} - }; - - -#if 0 /* None of the definitions below are used currently */ - /* - * 176x144 30fps VBlanking 1lane 10Bit (binning) - */ - static struct ov2680_reg const ov2680_QCIF_30fps[] = { - {OV2680_8BIT, 0x3086, 0x01}, - {OV2680_8BIT, 0x3501, 0x24}, - {OV2680_8BIT, 0x3502, 0x40}, - {OV2680_8BIT, 0x370a, 0x23}, - {OV2680_8BIT, 0x3801, 0xa0}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x78}, - {OV2680_8BIT, 0x3804, 0x05}, - {OV2680_8BIT, 0x3805, 0xaf}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0x47}, - {OV2680_8BIT, 0x3808, 0x00}, - {OV2680_8BIT, 0x3809, 0xC0}, - {OV2680_8BIT, 0x380a, 0x00}, - {OV2680_8BIT, 0x380b, 0xa0}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xb0}, - {OV2680_8BIT, 0x380e, 0x02}, - {OV2680_8BIT, 0x380f, 0x84}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x04}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x04}, - {OV2680_8BIT, 0x3814, 0x31}, - {OV2680_8BIT, 0x3815, 0x31}, - {OV2680_8BIT, 0x4000, 0x81}, - {OV2680_8BIT, 0x4001, 0x40}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x03}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc2}, - {OV2680_8BIT, 0x3821, 0x01}, - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; - - /* - * 352x288 30fps VBlanking 1lane 10Bit (binning) - */ - static struct ov2680_reg const ov2680_CIF_30fps[] = { - {OV2680_8BIT, 0x3086, 0x01}, - {OV2680_8BIT, 0x3501, 0x24}, - {OV2680_8BIT, 0x3502, 0x40}, - {OV2680_8BIT, 0x370a, 0x23}, - {OV2680_8BIT, 0x3801, 0xa0}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x78}, - {OV2680_8BIT, 0x3804, 0x03}, - {OV2680_8BIT, 0x3805, 0x8f}, - {OV2680_8BIT, 0x3806, 0x02}, - {OV2680_8BIT, 0x3807, 0xe7}, - {OV2680_8BIT, 0x3808, 0x01}, - {OV2680_8BIT, 0x3809, 0x70}, - {OV2680_8BIT, 0x380a, 0x01}, - {OV2680_8BIT, 0x380b, 0x30}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xb0}, - {OV2680_8BIT, 0x380e, 0x02}, - {OV2680_8BIT, 0x380f, 0x84}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x04}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x04}, - {OV2680_8BIT, 0x3814, 0x31}, - {OV2680_8BIT, 0x3815, 0x31}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x03}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc2}, - {OV2680_8BIT, 0x3821, 0x01}, - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; - - /* - * 336x256 30fps VBlanking 1lane 10Bit (binning) - */ - static struct ov2680_reg const ov2680_QVGA_30fps[] = { - {OV2680_8BIT, 0x3086, 0x01}, - {OV2680_8BIT, 0x3501, 0x24}, - {OV2680_8BIT, 0x3502, 0x40}, - {OV2680_8BIT, 0x370a, 0x23}, - {OV2680_8BIT, 0x3801, 0xa0}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x78}, - {OV2680_8BIT, 0x3804, 0x03}, - {OV2680_8BIT, 0x3805, 0x4f}, - {OV2680_8BIT, 0x3806, 0x02}, - {OV2680_8BIT, 0x3807, 0x87}, - {OV2680_8BIT, 0x3808, 0x01}, - {OV2680_8BIT, 0x3809, 0x50}, - {OV2680_8BIT, 0x380a, 0x01}, - {OV2680_8BIT, 0x380b, 0x00}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xb0}, - {OV2680_8BIT, 0x380e, 0x02}, - {OV2680_8BIT, 0x380f, 0x84}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x04}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x04}, - {OV2680_8BIT, 0x3814, 0x31}, - {OV2680_8BIT, 0x3815, 0x31}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x03}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc2}, - {OV2680_8BIT, 0x3821, 0x01}, - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; - - - /* - * 656x496 30fps VBlanking 1lane 10Bit (binning) - */ - static struct ov2680_reg const ov2680_656x496_30fps[] = { - {OV2680_8BIT, 0x3086, 0x01}, - {OV2680_8BIT, 0x3501, 0x24}, - {OV2680_8BIT, 0x3502, 0x40}, - {OV2680_8BIT, 0x370a, 0x23}, - {OV2680_8BIT, 0x3801, 0xa0}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x78}, - {OV2680_8BIT, 0x3804, 0x05}, - {OV2680_8BIT, 0x3805, 0xcf}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0x67}, - {OV2680_8BIT, 0x3808, 0x02}, - {OV2680_8BIT, 0x3809, 0x90}, - {OV2680_8BIT, 0x380a, 0x01}, - {OV2680_8BIT, 0x380b, 0xf0}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xb0}, - {OV2680_8BIT, 0x380e, 0x02}, - {OV2680_8BIT, 0x380f, 0x84}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x04}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x04}, - {OV2680_8BIT, 0x3814, 0x31}, - {OV2680_8BIT, 0x3815, 0x31}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x03}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc2}, - {OV2680_8BIT, 0x3821, 0x01}, - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; - /* - * 800x600 30fps VBlanking 1lane 10Bit (binning) - */ - static struct ov2680_reg const ov2680_720x592_30fps[] = { - {OV2680_8BIT, 0x3086, 0x01}, - {OV2680_8BIT, 0x3501, 0x26}, - {OV2680_8BIT, 0x3502, 0x40}, - {OV2680_8BIT, 0x370a, 0x23}, - {OV2680_8BIT, 0x3801, 0x00}, // X_ADDR_START; - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x00}, // Y_ADDR_START; - {OV2680_8BIT, 0x3804, 0x05}, - {OV2680_8BIT, 0x3805, 0xaf}, // X_ADDR_END; - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0xaf}, // Y_ADDR_END; - {OV2680_8BIT, 0x3808, 0x02}, - {OV2680_8BIT, 0x3809, 0xd0}, // X_OUTPUT_SIZE; - {OV2680_8BIT, 0x380a, 0x02}, - {OV2680_8BIT, 0x380b, 0x50}, // Y_OUTPUT_SIZE; - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xac}, // HTS; - {OV2680_8BIT, 0x380e, 0x02}, - {OV2680_8BIT, 0x380f, 0x84}, // VTS; - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x00}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x00}, - {OV2680_8BIT, 0x3814, 0x31}, - {OV2680_8BIT, 0x3815, 0x31}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x03}, - {OV2680_8BIT, 0x5708, 0x00}, - {OV2680_8BIT, 0x5704, 0x02}, - {OV2680_8BIT, 0x5705, 0xd0}, // X_WIN; - {OV2680_8BIT, 0x5706, 0x02}, - {OV2680_8BIT, 0x5707, 0x50}, // Y_WIN; - {OV2680_8BIT, 0x3820, 0xc2}, // FLIP_FORMAT; - {OV2680_8BIT, 0x3821, 0x01}, // MIRROR_FORMAT; - {OV2680_8BIT, 0x5090, 0x00}, // PRE ISP CTRL16, default value is 0x0C; - // BIT[3]: Mirror order, BG or GB; - // BIT[2]: Flip order, BR or RB; - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_TOK_TERM, 0, 0} - }; - /* - * 800x600 30fps VBlanking 1lane 10Bit (binning) - */ - static struct ov2680_reg const ov2680_800x600_30fps[] = { - {OV2680_8BIT, 0x3086, 0x01}, - {OV2680_8BIT, 0x3501, 0x26}, - {OV2680_8BIT, 0x3502, 0x40}, - {OV2680_8BIT, 0x370a, 0x23}, - {OV2680_8BIT, 0x3801, 0x00}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x00}, - {OV2680_8BIT, 0x3804, 0x06}, - {OV2680_8BIT, 0x3805, 0x4f}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0xbf}, - {OV2680_8BIT, 0x3808, 0x03}, - {OV2680_8BIT, 0x3809, 0x20}, - {OV2680_8BIT, 0x380a, 0x02}, - {OV2680_8BIT, 0x380b, 0x58}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xac}, - {OV2680_8BIT, 0x380e, 0x02}, - {OV2680_8BIT, 0x380f, 0x84}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x00}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x00}, - {OV2680_8BIT, 0x3814, 0x31}, - {OV2680_8BIT, 0x3815, 0x31}, - {OV2680_8BIT, 0x5708, 0x00}, - {OV2680_8BIT, 0x5704, 0x03}, - {OV2680_8BIT, 0x5705, 0x20}, - {OV2680_8BIT, 0x5706, 0x02}, - {OV2680_8BIT, 0x5707, 0x58}, - {OV2680_8BIT, 0x3820, 0xc2}, - {OV2680_8BIT, 0x3821, 0x01}, - {OV2680_8BIT, 0x5090, 0x00}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x03}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_TOK_TERM, 0, 0} - }; - - /* - * 720p=1280*720 30fps VBlanking 1lane 10Bit (no-Scaling) - */ - static struct ov2680_reg const ov2680_720p_30fps[] = { - {OV2680_8BIT, 0x3086, 0x00}, - {OV2680_8BIT, 0x3501, 0x48}, - {OV2680_8BIT, 0x3502, 0xe0}, - {OV2680_8BIT, 0x370a, 0x21}, - {OV2680_8BIT, 0x3801, 0xa0}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0xf2}, - {OV2680_8BIT, 0x3804, 0x05}, - {OV2680_8BIT, 0x3805, 0xbf}, - {OV2680_8BIT, 0x3806, 0x03}, - {OV2680_8BIT, 0x3807, 0xdd}, - {OV2680_8BIT, 0x3808, 0x05}, - {OV2680_8BIT, 0x3809, 0x10}, - {OV2680_8BIT, 0x380a, 0x02}, - {OV2680_8BIT, 0x380b, 0xe0}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xa8}, - {OV2680_8BIT, 0x380e, 0x05}, - {OV2680_8BIT, 0x380f, 0x0e}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x08}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x06}, - {OV2680_8BIT, 0x3814, 0x11}, - {OV2680_8BIT, 0x3815, 0x11}, - {OV2680_8BIT, 0x4008, 0x02}, - {OV2680_8BIT, 0x4009, 0x09}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc0}, - {OV2680_8BIT, 0x3821, 0x00}, - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; - - /* - * 1296x976 30fps VBlanking 1lane 10Bit(no-scaling) - */ - static struct ov2680_reg const ov2680_1296x976_30fps[] = { - {OV2680_8BIT, 0x3086, 0x00}, - {OV2680_8BIT, 0x3501, 0x48}, - {OV2680_8BIT, 0x3502, 0xe0}, - {OV2680_8BIT, 0x370a, 0x21}, - {OV2680_8BIT, 0x3801, 0xa0}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x78}, - {OV2680_8BIT, 0x3804, 0x05}, - {OV2680_8BIT, 0x3805, 0xbf}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0x57}, - {OV2680_8BIT, 0x3808, 0x05}, - {OV2680_8BIT, 0x3809, 0x10}, - {OV2680_8BIT, 0x380a, 0x03}, - {OV2680_8BIT, 0x380b, 0xd0}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xa8}, - {OV2680_8BIT, 0x380e, 0x05}, - {OV2680_8BIT, 0x380f, 0x0e}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x08}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x08}, - {OV2680_8BIT, 0x3814, 0x11}, - {OV2680_8BIT, 0x3815, 0x11}, - {OV2680_8BIT, 0x4008, 0x02}, - {OV2680_8BIT, 0x4009, 0x09}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc0}, - {OV2680_8BIT, 0x3821, 0x00}, //miror/flip - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; - - /* - * 1456*1096 30fps VBlanking 1lane 10bit(no-scaling) - */ - static struct ov2680_reg const ov2680_1456x1096_30fps[]= { - {OV2680_8BIT, 0x3086, 0x00}, - {OV2680_8BIT, 0x3501, 0x48}, - {OV2680_8BIT, 0x3502, 0xe0}, - {OV2680_8BIT, 0x370a, 0x21}, - {OV2680_8BIT, 0x3801, 0x90}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x78}, - {OV2680_8BIT, 0x3804, 0x06}, - {OV2680_8BIT, 0x3805, 0x4f}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0xC0}, - {OV2680_8BIT, 0x3808, 0x05}, - {OV2680_8BIT, 0x3809, 0xb0}, - {OV2680_8BIT, 0x380a, 0x04}, - {OV2680_8BIT, 0x380b, 0x48}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xa8}, - {OV2680_8BIT, 0x380e, 0x05}, - {OV2680_8BIT, 0x380f, 0x0e}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x08}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x00}, - {OV2680_8BIT, 0x3814, 0x11}, - {OV2680_8BIT, 0x3815, 0x11}, - {OV2680_8BIT, 0x4008, 0x02}, - {OV2680_8BIT, 0x4009, 0x09}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x10}, - {OV2680_8BIT, 0x5705, 0xa0}, - {OV2680_8BIT, 0x5706, 0x0c}, - {OV2680_8BIT, 0x5707, 0x78}, - {OV2680_8BIT, 0x3820, 0xc0}, - {OV2680_8BIT, 0x3821, 0x00}, - // {OV2680_8BIT, 0x5090, 0x0c}, - {OV2680_TOK_TERM, 0, 0} - }; -#endif - - /* - *1616x916 30fps VBlanking 1lane 10bit - */ - - static struct ov2680_reg const ov2680_1616x916_30fps[] = { - {OV2680_8BIT, 0x3086, 0x00}, - {OV2680_8BIT, 0x3501, 0x48}, - {OV2680_8BIT, 0x3502, 0xe0}, - {OV2680_8BIT, 0x370a, 0x21}, - {OV2680_8BIT, 0x3801, 0x00}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x96}, - {OV2680_8BIT, 0x3804, 0x06}, - {OV2680_8BIT, 0x3805, 0x4f}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0x39}, - {OV2680_8BIT, 0x3808, 0x06}, - {OV2680_8BIT, 0x3809, 0x50}, - {OV2680_8BIT, 0x380a, 0x03}, - {OV2680_8BIT, 0x380b, 0x94}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xa8}, - {OV2680_8BIT, 0x380e, 0x05}, - {OV2680_8BIT, 0x380f, 0x0e}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x00}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x08}, - {OV2680_8BIT, 0x3814, 0x11}, - {OV2680_8BIT, 0x3815, 0x11}, - {OV2680_8BIT, 0x4008, 0x02}, - {OV2680_8BIT, 0x4009, 0x09}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_8BIT, 0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x06}, - {OV2680_8BIT, 0x5705, 0x50}, - {OV2680_8BIT, 0x5706, 0x03}, - {OV2680_8BIT, 0x5707, 0x94}, - {OV2680_8BIT, 0x3820, 0xc0}, - {OV2680_8BIT, 0x3821, 0x00}, - // {OV2680_8BIT, 0x5090, 0x0C}, - {OV2680_TOK_TERM, 0, 0} - }; - - /* - * 1612x1212 30fps VBlanking 1lane 10Bit - */ -#if 0 - static struct ov2680_reg const ov2680_1616x1082_30fps[] = { - {OV2680_8BIT, 0x3086, 0x00}, - {OV2680_8BIT, 0x3501, 0x48}, - {OV2680_8BIT, 0x3502, 0xe0}, - {OV2680_8BIT, 0x370a, 0x21}, - {OV2680_8BIT, 0x3801, 0x00}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x86}, - {OV2680_8BIT, 0x3804, 0x06}, - {OV2680_8BIT, 0x3805, 0x4f}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0xbf}, - {OV2680_8BIT, 0x3808, 0x06}, - {OV2680_8BIT, 0x3809, 0x50}, - {OV2680_8BIT, 0x380a, 0x04}, - {OV2680_8BIT, 0x380b, 0x3a}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xa8}, - {OV2680_8BIT, 0x380e, 0x05}, - {OV2680_8BIT, 0x380f, 0x0e}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x00}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x00}, - {OV2680_8BIT, 0x3814, 0x11}, - {OV2680_8BIT, 0x3815, 0x11}, - {OV2680_8BIT, 0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x06}, - {OV2680_8BIT, 0x5705, 0x50}, - {OV2680_8BIT, 0x5706, 0x04}, - {OV2680_8BIT, 0x5707, 0x3a}, - {OV2680_8BIT, 0x3820, 0xc0}, - {OV2680_8BIT, 0x3821, 0x00}, - // {OV2680_8BIT, 0x5090, 0x0C}, - {OV2680_8BIT, 0x4008, 0x02}, - {OV2680_8BIT, 0x4009, 0x09}, - {OV2680_8BIT, 0x5081, 0x41}, - {OV2680_TOK_TERM, 0, 0} - }; -#endif - /* - * 1616x1216 30fps VBlanking 1lane 10Bit - */ - static struct ov2680_reg const ov2680_1616x1216_30fps[] = { - {OV2680_8BIT, 0x3086, 0x00}, - {OV2680_8BIT, 0x3501, 0x48}, - {OV2680_8BIT, 0x3502, 0xe0}, - {OV2680_8BIT, 0x370a, 0x21}, - {OV2680_8BIT, 0x3801, 0x00}, - {OV2680_8BIT, 0x3802, 0x00}, - {OV2680_8BIT, 0x3803, 0x00}, - {OV2680_8BIT, 0x3804, 0x06}, - {OV2680_8BIT, 0x3805, 0x4f}, - {OV2680_8BIT, 0x3806, 0x04}, - {OV2680_8BIT, 0x3807, 0xbf}, - {OV2680_8BIT, 0x3808, 0x06}, - {OV2680_8BIT, 0x3809, 0x50},//50},//4line for mirror and flip - {OV2680_8BIT, 0x380a, 0x04}, - {OV2680_8BIT, 0x380b, 0xc0},//c0}, - {OV2680_8BIT, 0x380c, 0x06}, - {OV2680_8BIT, 0x380d, 0xa8}, - {OV2680_8BIT, 0x380e, 0x05}, - {OV2680_8BIT, 0x380f, 0x0e}, - {OV2680_8BIT, 0x3810, 0x00}, - {OV2680_8BIT, 0x3811, 0x00}, - {OV2680_8BIT, 0x3812, 0x00}, - {OV2680_8BIT, 0x3813, 0x00}, - {OV2680_8BIT, 0x3814, 0x11}, - {OV2680_8BIT, 0x3815, 0x11}, - {OV2680_8BIT, 0x4008, 0x00}, - {OV2680_8BIT, 0x4009, 0x0b}, - {OV2680_8BIT, 0x5081, 0x01}, - {OV2680_8BIT, 0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11 - {OV2680_8BIT, 0x5704, 0x06}, - {OV2680_8BIT, 0x5705, 0x50}, - {OV2680_8BIT, 0x5706, 0x04}, - {OV2680_8BIT, 0x5707, 0xcc}, - {OV2680_8BIT, 0x3820, 0xc0}, - {OV2680_8BIT, 0x3821, 0x00}, - // {OV2680_8BIT, 0x5090, 0x0C}, - {OV2680_TOK_TERM, 0, 0} - }; - - static struct ov2680_resolution ov2680_res_preview[] = { - { - .desc = "ov2680_1616x1216_30fps", - .width = 1616, - .height = 1216, - .pix_clk_freq = 66, - .fps = 30, - .used = 0, - .pixels_per_line = 1698,//1704, - .lines_per_frame = 1294, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2680_1616x1216_30fps, - }, - { - .desc = "ov2680_1616x916_30fps", - .width = 1616, - .height = 916, - .fps = 30, - .pix_clk_freq = 66, - .used = 0, - .pixels_per_line = 1698,//1704, - .lines_per_frame = 1294, - .bin_factor_x = 0, - .bin_factor_y = 0, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2680_1616x916_30fps, - }, -}; -#define N_RES_PREVIEW (ARRAY_SIZE(ov2680_res_preview)) - -static struct ov2680_resolution *ov2680_res = ov2680_res_preview; -static unsigned long N_RES = N_RES_PREVIEW; - -#endif diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h deleted file mode 100644 index d99188a5c9d0..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov2722.h +++ /dev/null @@ -1,1268 +0,0 @@ -/* - * Support for OmniVision OV2722 1080p HD camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __OV2722_H__ -#define __OV2722_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../include/linux/atomisp_platform.h" - -#define OV2722_POWER_UP_RETRY_NUM 5 - -/* Defines for register writes and register array processing */ -#define I2C_MSG_LENGTH 0x2 -#define I2C_RETRY_COUNT 5 - -#define OV2722_FOCAL_LENGTH_NUM 278 /*2.78mm*/ -#define OV2722_FOCAL_LENGTH_DEM 100 -#define OV2722_F_NUMBER_DEFAULT_NUM 26 -#define OV2722_F_NUMBER_DEM 10 - -#define MAX_FMTS 1 - -/* - * focal length bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define OV2722_FOCAL_LENGTH_DEFAULT 0x1160064 - -/* - * current f-number bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define OV2722_F_NUMBER_DEFAULT 0x1a000a - -/* - * f-number range bits definition: - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ -#define OV2722_F_NUMBER_RANGE 0x1a0a1a0a -#define OV2720_ID 0x2720 -#define OV2722_ID 0x2722 - -#define OV2722_FINE_INTG_TIME_MIN 0 -#define OV2722_FINE_INTG_TIME_MAX_MARGIN 0 -#define OV2722_COARSE_INTG_TIME_MIN 1 -#define OV2722_COARSE_INTG_TIME_MAX_MARGIN 4 - -/* - * OV2722 System control registers - */ -#define OV2722_SW_SLEEP 0x0100 -#define OV2722_SW_RESET 0x0103 -#define OV2722_SW_STREAM 0x0100 - -#define OV2722_SC_CMMN_CHIP_ID_H 0x300A -#define OV2722_SC_CMMN_CHIP_ID_L 0x300B -#define OV2722_SC_CMMN_SCCB_ID 0x300C -#define OV2722_SC_CMMN_SUB_ID 0x302A /* process, version*/ - -#define OV2722_SC_CMMN_PAD_OEN0 0x3000 -#define OV2722_SC_CMMN_PAD_OEN1 0x3001 -#define OV2722_SC_CMMN_PAD_OEN2 0x3002 -#define OV2722_SC_CMMN_PAD_OUT0 0x3008 -#define OV2722_SC_CMMN_PAD_OUT1 0x3009 -#define OV2722_SC_CMMN_PAD_OUT2 0x300D -#define OV2722_SC_CMMN_PAD_SEL0 0x300E -#define OV2722_SC_CMMN_PAD_SEL1 0x300F -#define OV2722_SC_CMMN_PAD_SEL2 0x3010 - -#define OV2722_SC_CMMN_PAD_PK 0x3011 -#define OV2722_SC_CMMN_A_PWC_PK_O_13 0x3013 -#define OV2722_SC_CMMN_A_PWC_PK_O_14 0x3014 - -#define OV2722_SC_CMMN_CLKRST0 0x301A -#define OV2722_SC_CMMN_CLKRST1 0x301B -#define OV2722_SC_CMMN_CLKRST2 0x301C -#define OV2722_SC_CMMN_CLKRST3 0x301D -#define OV2722_SC_CMMN_CLKRST4 0x301E -#define OV2722_SC_CMMN_CLKRST5 0x3005 -#define OV2722_SC_CMMN_PCLK_DIV_CTRL 0x3007 -#define OV2722_SC_CMMN_CLOCK_SEL 0x3020 -#define OV2722_SC_SOC_CLKRST5 0x3040 - -#define OV2722_SC_CMMN_PLL_CTRL0 0x3034 -#define OV2722_SC_CMMN_PLL_CTRL1 0x3035 -#define OV2722_SC_CMMN_PLL_CTRL2 0x3039 -#define OV2722_SC_CMMN_PLL_CTRL3 0x3037 -#define OV2722_SC_CMMN_PLL_MULTIPLIER 0x3036 -#define OV2722_SC_CMMN_PLL_DEBUG_OPT 0x3038 -#define OV2722_SC_CMMN_PLLS_CTRL0 0x303A -#define OV2722_SC_CMMN_PLLS_CTRL1 0x303B -#define OV2722_SC_CMMN_PLLS_CTRL2 0x303C -#define OV2722_SC_CMMN_PLLS_CTRL3 0x303D - -#define OV2722_SC_CMMN_MIPI_PHY_16 0x3016 -#define OV2722_SC_CMMN_MIPI_PHY_17 0x3017 -#define OV2722_SC_CMMN_MIPI_SC_CTRL_18 0x3018 -#define OV2722_SC_CMMN_MIPI_SC_CTRL_19 0x3019 -#define OV2722_SC_CMMN_MIPI_SC_CTRL_21 0x3021 -#define OV2722_SC_CMMN_MIPI_SC_CTRL_22 0x3022 - -#define OV2722_AEC_PK_EXPO_H 0x3500 -#define OV2722_AEC_PK_EXPO_M 0x3501 -#define OV2722_AEC_PK_EXPO_L 0x3502 -#define OV2722_AEC_MANUAL_CTRL 0x3503 -#define OV2722_AGC_ADJ_H 0x3508 -#define OV2722_AGC_ADJ_L 0x3509 -#define OV2722_VTS_DIFF_H 0x350c -#define OV2722_VTS_DIFF_L 0x350d -#define OV2722_GROUP_ACCESS 0x3208 -#define OV2722_HTS_H 0x380c -#define OV2722_HTS_L 0x380d -#define OV2722_VTS_H 0x380e -#define OV2722_VTS_L 0x380f - -#define OV2722_MWB_GAIN_R_H 0x5186 -#define OV2722_MWB_GAIN_R_L 0x5187 -#define OV2722_MWB_GAIN_G_H 0x5188 -#define OV2722_MWB_GAIN_G_L 0x5189 -#define OV2722_MWB_GAIN_B_H 0x518a -#define OV2722_MWB_GAIN_B_L 0x518b - -#define OV2722_H_CROP_START_H 0x3800 -#define OV2722_H_CROP_START_L 0x3801 -#define OV2722_V_CROP_START_H 0x3802 -#define OV2722_V_CROP_START_L 0x3803 -#define OV2722_H_CROP_END_H 0x3804 -#define OV2722_H_CROP_END_L 0x3805 -#define OV2722_V_CROP_END_H 0x3806 -#define OV2722_V_CROP_END_L 0x3807 -#define OV2722_H_OUTSIZE_H 0x3808 -#define OV2722_H_OUTSIZE_L 0x3809 -#define OV2722_V_OUTSIZE_H 0x380a -#define OV2722_V_OUTSIZE_L 0x380b - -#define OV2722_START_STREAMING 0x01 -#define OV2722_STOP_STREAMING 0x00 - -struct regval_list { - u16 reg_num; - u8 value; -}; - -struct ov2722_resolution { - u8 *desc; - const struct ov2722_reg *regs; - int res; - int width; - int height; - int fps; - int pix_clk_freq; - u32 skip_frames; - u16 pixels_per_line; - u16 lines_per_frame; - u8 bin_factor_x; - u8 bin_factor_y; - u8 bin_mode; - bool used; - int mipi_freq; -}; - -struct ov2722_format { - u8 *desc; - u32 pixelformat; - struct ov2722_reg *regs; -}; - -/* - * ov2722 device structure. - */ -struct ov2722_device { - struct v4l2_subdev sd; - struct media_pad pad; - struct v4l2_mbus_framefmt format; - struct mutex input_lock; - - struct camera_sensor_platform_data *platform_data; - int vt_pix_clk_freq_mhz; - int fmt_idx; - int run_mode; - u16 pixels_per_line; - u16 lines_per_frame; - u8 res; - u8 type; - - struct v4l2_ctrl_handler ctrl_handler; - struct v4l2_ctrl *link_freq; -}; - -enum ov2722_tok_type { - OV2722_8BIT = 0x0001, - OV2722_16BIT = 0x0002, - OV2722_32BIT = 0x0004, - OV2722_TOK_TERM = 0xf000, /* terminating token for reg list */ - OV2722_TOK_DELAY = 0xfe00, /* delay token for reg list */ - OV2722_TOK_MASK = 0xfff0 -}; - -/** - * struct ov2722_reg - MI sensor register format - * @type: type of the register - * @reg: 16-bit offset to register - * @val: 8/16/32-bit register value - * - * Define a structure for sensor register initialization values - */ -struct ov2722_reg { - enum ov2722_tok_type type; - u16 reg; - u32 val; /* @set value for read/mod/write, @mask */ -}; - -#define to_ov2722_sensor(x) container_of(x, struct ov2722_device, sd) - -#define OV2722_MAX_WRITE_BUF_SIZE 30 - -struct ov2722_write_buffer { - u16 addr; - u8 data[OV2722_MAX_WRITE_BUF_SIZE]; -}; - -struct ov2722_write_ctrl { - int index; - struct ov2722_write_buffer buffer; -}; - -/* - * Register settings for various resolution - */ -#if 0 -static struct ov2722_reg const ov2722_QVGA_30fps[] = { - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x0c}, - {OV2722_8BIT, 0x373a, 0x1c}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x0c}, - {OV2722_8BIT, 0x3705, 0x06}, - {OV2722_8BIT, 0x3730, 0x0e}, - {OV2722_8BIT, 0x3704, 0x1c}, - {OV2722_8BIT, 0x3f06, 0x00}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0x46}, - {OV2722_8BIT, 0x371e, 0x00}, - {OV2722_8BIT, 0x371f, 0x63}, - {OV2722_8BIT, 0x3708, 0x61}, - {OV2722_8BIT, 0x3709, 0x12}, - {OV2722_8BIT, 0x3800, 0x01}, - {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32 */ - {OV2722_8BIT, 0x3804, 0x06}, - {OV2722_8BIT, 0x3805, 0x95}, /* H crop end: 1685 */ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x27}, /* V crop end: 1063 */ - {OV2722_8BIT, 0x3808, 0x01}, - {OV2722_8BIT, 0x3809, 0x50}, /* H output size: 336 */ - {OV2722_8BIT, 0x380a, 0x01}, - {OV2722_8BIT, 0x380b, 0x00}, /* V output size: 256 */ - - /* H blank timing */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0xc0}, - {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/ - {OV2722_8BIT, 0x3814, 0x71}, - {OV2722_8BIT, 0x3815, 0x71}, - {OV2722_8BIT, 0x3612, 0x49}, - {OV2722_8BIT, 0x3618, 0x00}, - {OV2722_8BIT, 0x3a08, 0x01}, - {OV2722_8BIT, 0x3a09, 0xc3}, - {OV2722_8BIT, 0x3a0a, 0x01}, - {OV2722_8BIT, 0x3a0b, 0x77}, - {OV2722_8BIT, 0x3a0d, 0x00}, - {OV2722_8BIT, 0x3a0e, 0x00}, - {OV2722_8BIT, 0x4520, 0x09}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3000, 0xff}, - {OV2722_8BIT, 0x3001, 0xff}, - {OV2722_8BIT, 0x3002, 0xf0}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x63}, - {OV2722_8BIT, 0x3634, 0x24}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */ - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xff}, - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */ - {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */ - {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */ - {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */ - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */ - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x26}, - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - - /* Added for power optimization */ - {OV2722_8BIT, 0x3000, 0x00}, - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3011, 0x22}, - {OV2722_8BIT, 0x3a00, 0x58}, - {OV2722_8BIT, 0x3503, 0x17}, - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x46}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x10}, - {OV2722_TOK_TERM, 0, 0}, - -}; - -static struct ov2722_reg const ov2722_480P_30fps[] = { - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x18}, - {OV2722_8BIT, 0x373a, 0x3c}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x1d}, - {OV2722_8BIT, 0x3705, 0x12}, - {OV2722_8BIT, 0x3730, 0x1f}, - {OV2722_8BIT, 0x3704, 0x3f}, - {OV2722_8BIT, 0x3f06, 0x1d}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0x83}, - {OV2722_8BIT, 0x371e, 0x00}, - {OV2722_8BIT, 0x371f, 0xbd}, - {OV2722_8BIT, 0x3708, 0x63}, - {OV2722_8BIT, 0x3709, 0x52}, - {OV2722_8BIT, 0x3800, 0x00}, - {OV2722_8BIT, 0x3801, 0xf2}, /* H crop start: 322 - 80 = 242*/ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/ - {OV2722_8BIT, 0x3804, 0x06}, - {OV2722_8BIT, 0x3805, 0xBB}, /* H crop end: 1643 + 80 = 1723*/ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/ - {OV2722_8BIT, 0x3808, 0x02}, - {OV2722_8BIT, 0x3809, 0xE0}, /* H output size: 656 +80 = 736*/ - {OV2722_8BIT, 0x380a, 0x01}, - {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */ - - /* H blank timing */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/ - {OV2722_8BIT, 0x3814, 0x31}, - {OV2722_8BIT, 0x3815, 0x31}, - {OV2722_8BIT, 0x3612, 0x4b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x02}, - {OV2722_8BIT, 0x3a09, 0x67}, - {OV2722_8BIT, 0x3a0a, 0x02}, - {OV2722_8BIT, 0x3a0b, 0x00}, - {OV2722_8BIT, 0x3a0d, 0x00}, - {OV2722_8BIT, 0x3a0e, 0x00}, - {OV2722_8BIT, 0x4520, 0x0a}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3000, 0xff}, - {OV2722_8BIT, 0x3001, 0xff}, - {OV2722_8BIT, 0x3002, 0xf0}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x63}, - {OV2722_8BIT, 0x3634, 0x24}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */ - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xff}, - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */ - {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */ - {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */ - {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */ - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */ - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x26}, - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - - /* Added for power optimization */ - {OV2722_8BIT, 0x3000, 0x00}, - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3011, 0x22}, - {OV2722_8BIT, 0x3a00, 0x58}, - {OV2722_8BIT, 0x3503, 0x17}, - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x46}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x10}, - {OV2722_TOK_TERM, 0, 0}, -}; - -static struct ov2722_reg const ov2722_VGA_30fps[] = { - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x18}, - {OV2722_8BIT, 0x373a, 0x3c}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x1d}, - {OV2722_8BIT, 0x3705, 0x12}, - {OV2722_8BIT, 0x3730, 0x1f}, - {OV2722_8BIT, 0x3704, 0x3f}, - {OV2722_8BIT, 0x3f06, 0x1d}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0x83}, - {OV2722_8BIT, 0x371e, 0x00}, - {OV2722_8BIT, 0x371f, 0xbd}, - {OV2722_8BIT, 0x3708, 0x63}, - {OV2722_8BIT, 0x3709, 0x52}, - {OV2722_8BIT, 0x3800, 0x01}, - {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/ - {OV2722_8BIT, 0x3804, 0x06}, - {OV2722_8BIT, 0x3805, 0x6B}, /* H crop end: 1643*/ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/ - {OV2722_8BIT, 0x3808, 0x02}, - {OV2722_8BIT, 0x3809, 0x90}, /* H output size: 656 */ - {OV2722_8BIT, 0x380a, 0x01}, - {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */ - - /* H blank timing */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/ - {OV2722_8BIT, 0x3814, 0x31}, - {OV2722_8BIT, 0x3815, 0x31}, - {OV2722_8BIT, 0x3612, 0x4b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x02}, - {OV2722_8BIT, 0x3a09, 0x67}, - {OV2722_8BIT, 0x3a0a, 0x02}, - {OV2722_8BIT, 0x3a0b, 0x00}, - {OV2722_8BIT, 0x3a0d, 0x00}, - {OV2722_8BIT, 0x3a0e, 0x00}, - {OV2722_8BIT, 0x4520, 0x0a}, - {OV2722_8BIT, 0x4837, 0x29}, - {OV2722_8BIT, 0x3000, 0xff}, - {OV2722_8BIT, 0x3001, 0xff}, - {OV2722_8BIT, 0x3002, 0xf0}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x63}, - {OV2722_8BIT, 0x3634, 0x24}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */ - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xff}, - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */ - {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */ - {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */ - {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */ - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */ - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x26}, - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - - /* Added for power optimization */ - {OV2722_8BIT, 0x3000, 0x00}, - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3011, 0x22}, - {OV2722_8BIT, 0x3a00, 0x58}, - {OV2722_8BIT, 0x3503, 0x17}, - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x46}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x10}, - {OV2722_TOK_TERM, 0, 0}, -}; -#endif - -static struct ov2722_reg const ov2722_1632_1092_30fps[] = { - {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for - a whole frame complete.(vblank) */ - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x24}, - {OV2722_8BIT, 0x373a, 0x60}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x2e}, - {OV2722_8BIT, 0x3705, 0x10}, - {OV2722_8BIT, 0x3730, 0x30}, - {OV2722_8BIT, 0x3704, 0x62}, - {OV2722_8BIT, 0x3f06, 0x3a}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0xc4}, - {OV2722_8BIT, 0x371e, 0x01}, - {OV2722_8BIT, 0x371f, 0x0d}, - {OV2722_8BIT, 0x3708, 0x61}, - {OV2722_8BIT, 0x3709, 0x12}, - {OV2722_8BIT, 0x3800, 0x00}, - {OV2722_8BIT, 0x3801, 0x9E}, /* H crop start: 158 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */ - {OV2722_8BIT, 0x3804, 0x07}, - {OV2722_8BIT, 0x3805, 0x05}, /* H crop end: 1797 */ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */ - - {OV2722_8BIT, 0x3808, 0x06}, - {OV2722_8BIT, 0x3809, 0x60}, /* H output size: 1632 */ - {OV2722_8BIT, 0x380a, 0x04}, - {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* mirror */ - {OV2722_8BIT, 0x3814, 0x11}, - {OV2722_8BIT, 0x3815, 0x11}, - {OV2722_8BIT, 0x3612, 0x0b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x01}, - {OV2722_8BIT, 0x3a09, 0x50}, - {OV2722_8BIT, 0x3a0a, 0x01}, - {OV2722_8BIT, 0x3a0b, 0x18}, - {OV2722_8BIT, 0x3a0d, 0x03}, - {OV2722_8BIT, 0x3a0e, 0x03}, - {OV2722_8BIT, 0x4520, 0x00}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x23}, - {OV2722_8BIT, 0x3634, 0x54}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */ - {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */ - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, - {OV2722_8BIT, 0x5184, 0xb0}, - {OV2722_8BIT, 0x5185, 0xb0}, - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */ - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */ - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */ - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x3F}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x00}, - {OV2722_TOK_TERM, 0, 0} -}; - -static struct ov2722_reg const ov2722_1452_1092_30fps[] = { - {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for - a whole frame complete.(vblank) */ - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x24}, - {OV2722_8BIT, 0x373a, 0x60}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x2e}, - {OV2722_8BIT, 0x3705, 0x10}, - {OV2722_8BIT, 0x3730, 0x30}, - {OV2722_8BIT, 0x3704, 0x62}, - {OV2722_8BIT, 0x3f06, 0x3a}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0xc4}, - {OV2722_8BIT, 0x371e, 0x01}, - {OV2722_8BIT, 0x371f, 0x0d}, - {OV2722_8BIT, 0x3708, 0x61}, - {OV2722_8BIT, 0x3709, 0x12}, - {OV2722_8BIT, 0x3800, 0x00}, - {OV2722_8BIT, 0x3801, 0xF8}, /* H crop start: 248 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */ - {OV2722_8BIT, 0x3804, 0x06}, - {OV2722_8BIT, 0x3805, 0xab}, /* H crop end: 1707 */ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */ - {OV2722_8BIT, 0x3808, 0x05}, - {OV2722_8BIT, 0x3809, 0xac}, /* H output size: 1452 */ - {OV2722_8BIT, 0x380a, 0x04}, - {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* mirror */ - {OV2722_8BIT, 0x3814, 0x11}, - {OV2722_8BIT, 0x3815, 0x11}, - {OV2722_8BIT, 0x3612, 0x0b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x01}, - {OV2722_8BIT, 0x3a09, 0x50}, - {OV2722_8BIT, 0x3a0a, 0x01}, - {OV2722_8BIT, 0x3a0b, 0x18}, - {OV2722_8BIT, 0x3a0d, 0x03}, - {OV2722_8BIT, 0x3a0e, 0x03}, - {OV2722_8BIT, 0x4520, 0x00}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x23}, - {OV2722_8BIT, 0x3634, 0x54}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */ - {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */ - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, - {OV2722_8BIT, 0x5184, 0xb0}, - {OV2722_8BIT, 0x5185, 0xb0}, - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */ - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */ - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */ - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x3F}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x00}, - {OV2722_TOK_TERM, 0, 0} -}; -#if 0 -static struct ov2722_reg const ov2722_1M3_30fps[] = { - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x24}, - {OV2722_8BIT, 0x373a, 0x60}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x2e}, - {OV2722_8BIT, 0x3705, 0x10}, - {OV2722_8BIT, 0x3730, 0x30}, - {OV2722_8BIT, 0x3704, 0x62}, - {OV2722_8BIT, 0x3f06, 0x3a}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0xc4}, - {OV2722_8BIT, 0x371e, 0x01}, - {OV2722_8BIT, 0x371f, 0x0d}, - {OV2722_8BIT, 0x3708, 0x61}, - {OV2722_8BIT, 0x3709, 0x12}, - {OV2722_8BIT, 0x3800, 0x01}, - {OV2722_8BIT, 0x3801, 0x4a}, /* H crop start: 330 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x03}, /* V crop start: 3 */ - {OV2722_8BIT, 0x3804, 0x06}, - {OV2722_8BIT, 0x3805, 0xe1}, /* H crop end: 1761 */ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x47}, /* V crop end: 1095 */ - {OV2722_8BIT, 0x3808, 0x05}, - {OV2722_8BIT, 0x3809, 0x88}, /* H output size: 1416 */ - {OV2722_8BIT, 0x380a, 0x04}, - {OV2722_8BIT, 0x380b, 0x0a}, /* V output size: 1034 */ - - /* H blank timing */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x05}, /* H window offset: 5 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* flip isp */ - {OV2722_8BIT, 0x3814, 0x11}, - {OV2722_8BIT, 0x3815, 0x11}, - {OV2722_8BIT, 0x3612, 0x0b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x01}, - {OV2722_8BIT, 0x3a09, 0x50}, - {OV2722_8BIT, 0x3a0a, 0x01}, - {OV2722_8BIT, 0x3a0b, 0x18}, - {OV2722_8BIT, 0x3a0d, 0x03}, - {OV2722_8BIT, 0x3a0e, 0x03}, - {OV2722_8BIT, 0x4520, 0x00}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3000, 0xff}, - {OV2722_8BIT, 0x3001, 0xff}, - {OV2722_8BIT, 0x3002, 0xf0}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x23}, - {OV2722_8BIT, 0x3634, 0x54}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */ - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xcf}, - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */ - {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */ - {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */ - {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */ - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */ - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x26}, - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - - /* Added for power optimization */ - {OV2722_8BIT, 0x3000, 0x00}, - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3503, 0x17}, - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x46}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x10}, - {OV2722_TOK_TERM, 0, 0}, -}; -#endif - -static struct ov2722_reg const ov2722_1080p_30fps[] = { - {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for a whole - frame complete.(vblank) */ - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x24}, - {OV2722_8BIT, 0x373a, 0x60}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x2e}, - {OV2722_8BIT, 0x3705, 0x2b}, - {OV2722_8BIT, 0x3730, 0x30}, - {OV2722_8BIT, 0x3704, 0x62}, - {OV2722_8BIT, 0x3f06, 0x3a}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0xc4}, - {OV2722_8BIT, 0x371e, 0x01}, - {OV2722_8BIT, 0x371f, 0x28}, - {OV2722_8BIT, 0x3708, 0x61}, - {OV2722_8BIT, 0x3709, 0x12}, - {OV2722_8BIT, 0x3800, 0x00}, - {OV2722_8BIT, 0x3801, 0x08}, /* H crop start: 8 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */ - {OV2722_8BIT, 0x3804, 0x07}, - {OV2722_8BIT, 0x3805, 0x9b}, /* H crop end: 1947 */ - {OV2722_8BIT, 0x3806, 0x04}, - {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */ - {OV2722_8BIT, 0x3808, 0x07}, - {OV2722_8BIT, 0x3809, 0x8c}, /* H output size: 1932 */ - {OV2722_8BIT, 0x380a, 0x04}, - {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0x14}, /* H timing: 2068 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0x5a}, /* V timing: 1114 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* mirror */ - {OV2722_8BIT, 0x3814, 0x11}, - {OV2722_8BIT, 0x3815, 0x11}, - {OV2722_8BIT, 0x3612, 0x4b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x01}, - {OV2722_8BIT, 0x3a09, 0x50}, - {OV2722_8BIT, 0x3a0a, 0x01}, - {OV2722_8BIT, 0x3a0b, 0x18}, - {OV2722_8BIT, 0x3a0d, 0x03}, - {OV2722_8BIT, 0x3a0e, 0x03}, - {OV2722_8BIT, 0x4520, 0x00}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3000, 0xff}, - {OV2722_8BIT, 0x3001, 0xff}, - {OV2722_8BIT, 0x3002, 0xf0}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x63}, - {OV2722_8BIT, 0x3634, 0x24}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xcd}, /* manual 3a */ - {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */ - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x3503, 0x17}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, - {OV2722_8BIT, 0x5184, 0xb0}, - {OV2722_8BIT, 0x5185, 0xb0}, - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x24}, /* 345.6 MHz */ - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */ - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3011, 0x22}, - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x3F}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x00}, - {OV2722_TOK_TERM, 0, 0} -}; - -#if 0 /* Currently unused */ -static struct ov2722_reg const ov2722_720p_30fps[] = { - {OV2722_8BIT, 0x3021, 0x03}, - {OV2722_8BIT, 0x3718, 0x10}, - {OV2722_8BIT, 0x3702, 0x24}, - {OV2722_8BIT, 0x373a, 0x60}, - {OV2722_8BIT, 0x3715, 0x01}, - {OV2722_8BIT, 0x3703, 0x2e}, - {OV2722_8BIT, 0x3705, 0x10}, - {OV2722_8BIT, 0x3730, 0x30}, - {OV2722_8BIT, 0x3704, 0x62}, - {OV2722_8BIT, 0x3f06, 0x3a}, - {OV2722_8BIT, 0x371c, 0x00}, - {OV2722_8BIT, 0x371d, 0xc4}, - {OV2722_8BIT, 0x371e, 0x01}, - {OV2722_8BIT, 0x371f, 0x0d}, - {OV2722_8BIT, 0x3708, 0x61}, - {OV2722_8BIT, 0x3709, 0x12}, - {OV2722_8BIT, 0x3800, 0x01}, - {OV2722_8BIT, 0x3801, 0x40}, /* H crop start: 320 */ - {OV2722_8BIT, 0x3802, 0x00}, - {OV2722_8BIT, 0x3803, 0xb1}, /* V crop start: 177 */ - {OV2722_8BIT, 0x3804, 0x06}, - {OV2722_8BIT, 0x3805, 0x55}, /* H crop end: 1621 */ - {OV2722_8BIT, 0x3806, 0x03}, - {OV2722_8BIT, 0x3807, 0x95}, /* V crop end: 918 */ - {OV2722_8BIT, 0x3808, 0x05}, - {OV2722_8BIT, 0x3809, 0x10}, /* H output size: 0x0788==1928 */ - {OV2722_8BIT, 0x380a, 0x02}, - {OV2722_8BIT, 0x380b, 0xe0}, /* output size: 0x02DE==734 */ - {OV2722_8BIT, 0x380c, 0x08}, - {OV2722_8BIT, 0x380d, 0x00}, /* H timing: 2048 */ - {OV2722_8BIT, 0x380e, 0x04}, - {OV2722_8BIT, 0x380f, 0xa3}, /* V timing: 1187 */ - {OV2722_8BIT, 0x3810, 0x00}, - {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */ - {OV2722_8BIT, 0x3812, 0x00}, - {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */ - {OV2722_8BIT, 0x3820, 0x80}, - {OV2722_8BIT, 0x3821, 0x06}, /* mirror */ - {OV2722_8BIT, 0x3814, 0x11}, - {OV2722_8BIT, 0x3815, 0x11}, - {OV2722_8BIT, 0x3612, 0x0b}, - {OV2722_8BIT, 0x3618, 0x04}, - {OV2722_8BIT, 0x3a08, 0x01}, - {OV2722_8BIT, 0x3a09, 0x50}, - {OV2722_8BIT, 0x3a0a, 0x01}, - {OV2722_8BIT, 0x3a0b, 0x18}, - {OV2722_8BIT, 0x3a0d, 0x03}, - {OV2722_8BIT, 0x3a0e, 0x03}, - {OV2722_8BIT, 0x4520, 0x00}, - {OV2722_8BIT, 0x4837, 0x1b}, - {OV2722_8BIT, 0x3600, 0x08}, - {OV2722_8BIT, 0x3621, 0xc0}, - {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */ - {OV2722_8BIT, 0x3633, 0x23}, - {OV2722_8BIT, 0x3634, 0x54}, - {OV2722_8BIT, 0x3f01, 0x0c}, - {OV2722_8BIT, 0x5001, 0xc1}, - {OV2722_8BIT, 0x3614, 0xf0}, - {OV2722_8BIT, 0x3630, 0x2d}, - {OV2722_8BIT, 0x370b, 0x62}, - {OV2722_8BIT, 0x3706, 0x61}, - {OV2722_8BIT, 0x4000, 0x02}, - {OV2722_8BIT, 0x4002, 0xc5}, - {OV2722_8BIT, 0x4005, 0x08}, - {OV2722_8BIT, 0x404f, 0x84}, - {OV2722_8BIT, 0x4051, 0x00}, - {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */ - {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */ - {OV2722_8BIT, 0x3a18, 0x00}, - {OV2722_8BIT, 0x3a19, 0x80}, - {OV2722_8BIT, 0x4521, 0x00}, - {OV2722_8BIT, 0x5183, 0xb0}, - {OV2722_8BIT, 0x5184, 0xb0}, - {OV2722_8BIT, 0x5185, 0xb0}, - {OV2722_8BIT, 0x370c, 0x0c}, - {OV2722_8BIT, 0x3035, 0x00}, - {OV2722_8BIT, 0x3036, 0x26}, /* {0x3036, 0x2c}, //422.4 MHz */ - {OV2722_8BIT, 0x3037, 0xa1}, - {OV2722_8BIT, 0x303e, 0x19}, - {OV2722_8BIT, 0x3038, 0x06}, - {OV2722_8BIT, 0x3018, 0x04}, - {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */ - {OV2722_8BIT, 0x3001, 0x00}, - {OV2722_8BIT, 0x3002, 0x00}, - {OV2722_8BIT, 0x3a0f, 0x40}, - {OV2722_8BIT, 0x3a10, 0x38}, - {OV2722_8BIT, 0x3a1b, 0x48}, - {OV2722_8BIT, 0x3a1e, 0x30}, - {OV2722_8BIT, 0x3a11, 0x90}, - {OV2722_8BIT, 0x3a1f, 0x10}, - {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */ - {OV2722_8BIT, 0x3500, 0x00}, - {OV2722_8BIT, 0x3501, 0x3F}, - {OV2722_8BIT, 0x3502, 0x00}, - {OV2722_8BIT, 0x3508, 0x00}, - {OV2722_8BIT, 0x3509, 0x00}, - {OV2722_TOK_TERM, 0, 0}, -}; -#endif - -static struct ov2722_resolution ov2722_res_preview[] = { - { - .desc = "ov2722_1632_1092_30fps", - .width = 1632, - .height = 1092, - .fps = 30, - .pix_clk_freq = 85, - .used = 0, - .pixels_per_line = 2260, - .lines_per_frame = 1244, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1632_1092_30fps, - .mipi_freq = 422400, - }, - { - .desc = "ov2722_1452_1092_30fps", - .width = 1452, - .height = 1092, - .fps = 30, - .pix_clk_freq = 85, - .used = 0, - .pixels_per_line = 2260, - .lines_per_frame = 1244, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1452_1092_30fps, - .mipi_freq = 422400, - }, - { - .desc = "ov2722_1080P_30fps", - .width = 1932, - .height = 1092, - .pix_clk_freq = 69, - .fps = 30, - .used = 0, - .pixels_per_line = 2068, - .lines_per_frame = 1114, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1080p_30fps, - .mipi_freq = 345600, - }, -}; -#define N_RES_PREVIEW (ARRAY_SIZE(ov2722_res_preview)) - -/* - * Disable non-preview configurations until the configuration selection is - * improved. - */ -#if 0 -struct ov2722_resolution ov2722_res_still[] = { - { - .desc = "ov2722_480P_30fps", - .width = 1632, - .height = 1092, - .fps = 30, - .pix_clk_freq = 85, - .used = 0, - .pixels_per_line = 2260, - .lines_per_frame = 1244, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1632_1092_30fps, - .mipi_freq = 422400, - }, - { - .desc = "ov2722_1452_1092_30fps", - .width = 1452, - .height = 1092, - .fps = 30, - .pix_clk_freq = 85, - .used = 0, - .pixels_per_line = 2260, - .lines_per_frame = 1244, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1452_1092_30fps, - .mipi_freq = 422400, - }, - { - .desc = "ov2722_1080P_30fps", - .width = 1932, - .height = 1092, - .pix_clk_freq = 69, - .fps = 30, - .used = 0, - .pixels_per_line = 2068, - .lines_per_frame = 1114, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1080p_30fps, - .mipi_freq = 345600, - }, -}; -#define N_RES_STILL (ARRAY_SIZE(ov2722_res_still)) - -struct ov2722_resolution ov2722_res_video[] = { - { - .desc = "ov2722_QVGA_30fps", - .width = 336, - .height = 256, - .fps = 30, - .pix_clk_freq = 73, - .used = 0, - .pixels_per_line = 2048, - .lines_per_frame = 1184, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_QVGA_30fps, - .mipi_freq = 364800, - }, - { - .desc = "ov2722_480P_30fps", - .width = 736, - .height = 496, - .fps = 30, - .pix_clk_freq = 73, - .used = 0, - .pixels_per_line = 2048, - .lines_per_frame = 1184, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_480P_30fps, - }, - { - .desc = "ov2722_1080P_30fps", - .width = 1932, - .height = 1092, - .pix_clk_freq = 69, - .fps = 30, - .used = 0, - .pixels_per_line = 2068, - .lines_per_frame = 1114, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .skip_frames = 3, - .regs = ov2722_1080p_30fps, - .mipi_freq = 345600, - }, -}; -#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video)) -#endif - -static struct ov2722_resolution *ov2722_res = ov2722_res_preview; -static unsigned long N_RES = N_RES_PREVIEW; -#endif diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig deleted file mode 100644 index 3f527f2047a7..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -config VIDEO_ATOMISP_OV5693 - tristate "Omnivision ov5693 sensor support" - depends on ACPI - depends on I2C && VIDEO_V4L2 - ---help--- - This is a Video4Linux2 sensor-level driver for the Micron - ov5693 5 Mpixel camera. - - ov5693 is video camera sensor. - - It currently only works with the atomisp driver. diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile deleted file mode 100644 index 3275f2be229e..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h deleted file mode 100644 index 4de44569fe54..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Support for AD5823 VCM. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __AD5823_H__ -#define __AD5823_H__ - -#include - - -#define AD5823_VCM_ADDR 0x0c - -#define AD5823_REG_RESET 0x01 -#define AD5823_REG_MODE 0x02 -#define AD5823_REG_VCM_MOVE_TIME 0x03 -#define AD5823_REG_VCM_CODE_MSB 0x04 -#define AD5823_REG_VCM_CODE_LSB 0x05 -#define AD5823_REG_VCM_THRESHOLD_MSB 0x06 -#define AD5823_REG_VCM_THRESHOLD_LSB 0x07 - -#define AD5823_REG_LENGTH 0x1 - -#define AD5823_RING_CTRL_ENABLE 0x04 -#define AD5823_RING_CTRL_DISABLE 0x00 - -#define AD5823_RESONANCE_PERIOD 100000 -#define AD5823_RESONANCE_COEF 512 -#define AD5823_HIGH_FREQ_RANGE 0x80 - -#define VCM_CODE_MSB_MASK 0xfc -#define AD5823_INIT_FOCUS_POS 350 - -enum ad5823_tok_type { - AD5823_8BIT = 0x1, - AD5823_16BIT = 0x2, -}; - -enum ad5823_vcm_mode { - AD5823_ARC_RES0 = 0x0, /* Actuator response control RES1 */ - AD5823_ARC_RES1 = 0x1, /* Actuator response control RES0.5 */ - AD5823_ARC_RES2 = 0x2, /* Actuator response control RES2 */ - AD5823_ESRC = 0x3, /* Enhanced slew rate control */ - AD5823_DIRECT = 0x4, /* Direct control */ -}; - -#define AD5823_INVALID_CONFIG 0xffffffff -#define AD5823_MAX_FOCUS_POS 1023 -#define DELAY_PER_STEP_NS 1000000 -#define DELAY_MAX_PER_STEP_NS (1000000 * 1023) -#endif diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c deleted file mode 100644 index 714297c36b3e..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c +++ /dev/null @@ -1,1993 +0,0 @@ -/* - * Support for OmniVision OV5693 1080p HD camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../../include/linux/atomisp_gmin_platform.h" - -#include "ov5693.h" -#include "ad5823.h" - -#define __cci_delay(t) \ - do { \ - if ((t) < 10) { \ - usleep_range((t) * 1000, ((t) + 1) * 1000); \ - } else { \ - msleep((t)); \ - } \ - } while (0) - -/* Value 30ms reached through experimentation on byt ecs. - * The DS specifies a much lower value but when using a smaller value - * the I2C bus sometimes locks up permanently when starting the camera. - * This issue could not be reproduced on cht, so we can reduce the - * delay value to a lower value when insmod. - */ -static uint up_delay = 30; -module_param(up_delay, uint, 0644); -MODULE_PARM_DESC(up_delay, "Delay prior to the first CCI transaction for ov5693"); - -static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val) -{ - int err; - struct i2c_msg msg; - u8 buf[2]; - - buf[0] = reg; - buf[1] = val; - - msg.addr = VCM_ADDR; - msg.flags = 0; - msg.len = 2; - msg.buf = &buf[0]; - - err = i2c_transfer(client->adapter, &msg, 1); - if (err != 1) { - dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n", - __func__, err); - return -EIO; - } - return 0; -} - -static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val) -{ - struct i2c_msg msg; - u8 buf[2]; - - buf[0] = reg; - buf[1] = val; - msg.addr = AD5823_VCM_ADDR; - msg.flags = 0; - msg.len = 0x02; - msg.buf = &buf[0]; - - if (i2c_transfer(client->adapter, &msg, 1) != 1) - return -EIO; - return 0; -} - -static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val) -{ - struct i2c_msg msg[2]; - u8 buf[2]; - - buf[0] = reg; - buf[1] = 0; - - msg[0].addr = AD5823_VCM_ADDR; - msg[0].flags = 0; - msg[0].len = 0x01; - msg[0].buf = &buf[0]; - - msg[1].addr = 0x0c; - msg[1].flags = I2C_M_RD; - msg[1].len = 0x01; - msg[1].buf = &buf[1]; - *val = 0; - if (i2c_transfer(client->adapter, msg, 2) != 2) - return -EIO; - *val = buf[1]; - return 0; -} - - -static const uint32_t ov5693_embedded_effective_size = 28; - -/* i2c read/write stuff */ -static int ov5693_read_reg(struct i2c_client *client, - u16 data_length, u16 reg, u16 *val) -{ - int err; - struct i2c_msg msg[2]; - unsigned char data[6]; - - if (!client->adapter) { - dev_err(&client->dev, "%s error, no client->adapter\n", - __func__); - return -ENODEV; - } - - if (data_length != OV5693_8BIT && data_length != OV5693_16BIT - && data_length != OV5693_32BIT) { - dev_err(&client->dev, "%s error, invalid data length\n", - __func__); - return -EINVAL; - } - - memset(msg, 0, sizeof(msg)); - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = I2C_MSG_LENGTH; - msg[0].buf = data; - - /* high byte goes out first */ - data[0] = (u8)(reg >> 8); - data[1] = (u8)(reg & 0xff); - - msg[1].addr = client->addr; - msg[1].len = data_length; - msg[1].flags = I2C_M_RD; - msg[1].buf = data; - - err = i2c_transfer(client->adapter, msg, 2); - if (err != 2) { - if (err >= 0) - err = -EIO; - dev_err(&client->dev, - "read from offset 0x%x error %d", reg, err); - return err; - } - - *val = 0; - /* high byte comes first */ - if (data_length == OV5693_8BIT) - *val = (u8)data[0]; - else if (data_length == OV5693_16BIT) - *val = be16_to_cpu(*(__be16 *)&data[0]); - else - *val = be32_to_cpu(*(__be32 *)&data[0]); - - return 0; -} - -static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = len; - msg.buf = data; - ret = i2c_transfer(client->adapter, &msg, 1); - - return ret == num_msg ? 0 : -EIO; -} - -static int vcm_dw_i2c_write(struct i2c_client *client, u16 data) -{ - struct i2c_msg msg; - const int num_msg = 1; - int ret; - __be16 val; - - val = cpu_to_be16(data); - msg.addr = VCM_ADDR; - msg.flags = 0; - msg.len = OV5693_16BIT; - msg.buf = (void *)&val; - - ret = i2c_transfer(client->adapter, &msg, 1); - - return ret == num_msg ? 0 : -EIO; -} - -/* - * Theory: per datasheet, the two VCMs both allow for a 2-byte read. - * The DW9714 doesn't actually specify what this does (it has a - * two-byte write-only protocol, but specifies the read sequence as - * legal), but it returns the same data (zeroes) always, after an - * undocumented initial NAK. The AD5823 has a one-byte address - * register to which all writes go, and subsequent reads will cycle - * through the 8 bytes of registers. Notably, the default values (the - * device is always power-cycled affirmatively, so we can rely on - * these) in AD5823 are not pairwise repetitions of the same 16 bit - * word. So all we have to do is sequentially read two bytes at a - * time and see if we detect a difference in any of the first four - * pairs. - */ -static int vcm_detect(struct i2c_client *client) -{ - int i, ret; - struct i2c_msg msg; - u16 data0 = 0, data; - - for (i = 0; i < 4; i++) { - msg.addr = VCM_ADDR; - msg.flags = I2C_M_RD; - msg.len = sizeof(data); - msg.buf = (u8 *)&data; - ret = i2c_transfer(client->adapter, &msg, 1); - - /* - * DW9714 always fails the first read and returns - * zeroes for subsequent ones - */ - if (i == 0 && ret == -EREMOTEIO) { - data0 = 0; - continue; - } - - if (i == 0) - data0 = data; - - if (data != data0) - return VCM_AD5823; - } - return ret == 1 ? VCM_DW9714 : ret; -} - -static int ov5693_write_reg(struct i2c_client *client, u16 data_length, - u16 reg, u16 val) -{ - int ret; - unsigned char data[4] = {0}; - __be16 *wreg = (void *)data; - const u16 len = data_length + sizeof(u16); /* 16-bit address + data */ - - if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) { - dev_err(&client->dev, - "%s error, invalid data_length\n", __func__); - return -EINVAL; - } - - /* high byte goes out first */ - *wreg = cpu_to_be16(reg); - - if (data_length == OV5693_8BIT) { - data[2] = (u8)(val); - } else { - /* OV5693_16BIT */ - __be16 *wdata = (void *)&data[2]; - - *wdata = cpu_to_be16(val); - } - - ret = ov5693_i2c_write(client, len, data); - if (ret) - dev_err(&client->dev, - "write error: wrote 0x%x to offset 0x%x error %d", - val, reg, ret); - - return ret; -} - -/* - * ov5693_write_reg_array - Initializes a list of OV5693 registers - * @client: i2c driver client structure - * @reglist: list of registers to be written - * - * This function initializes a list of registers. When consecutive addresses - * are found in a row on the list, this function creates a buffer and sends - * consecutive data in a single i2c_transfer(). - * - * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and - * __ov5693_write_reg_is_consecutive() are internal functions to - * ov5693_write_reg_array_fast() and should be not used anywhere else. - * - */ - -static int __ov5693_flush_reg_array(struct i2c_client *client, - struct ov5693_write_ctrl *ctrl) -{ - u16 size; - __be16 *reg = (void *)&ctrl->buffer.addr; - - if (ctrl->index == 0) - return 0; - - size = sizeof(u16) + ctrl->index; /* 16-bit address + data */ - - *reg = cpu_to_be16(ctrl->buffer.addr); - ctrl->index = 0; - - return ov5693_i2c_write(client, size, (u8 *)reg); -} - -static int __ov5693_buf_reg_array(struct i2c_client *client, - struct ov5693_write_ctrl *ctrl, - const struct ov5693_reg *next) -{ - int size; - __be16 *data16; - - switch (next->type) { - case OV5693_8BIT: - size = 1; - ctrl->buffer.data[ctrl->index] = (u8)next->val; - break; - case OV5693_16BIT: - size = 2; - - data16 = (void *)&ctrl->buffer.data[ctrl->index]; - *data16 = cpu_to_be16((u16)next->val); - break; - default: - return -EINVAL; - } - - /* When first item is added, we need to store its starting address */ - if (ctrl->index == 0) - ctrl->buffer.addr = next->reg; - - ctrl->index += size; - - /* - * Buffer cannot guarantee free space for u32? Better flush it to avoid - * possible lack of memory for next item. - */ - if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE) - return __ov5693_flush_reg_array(client, ctrl); - - return 0; -} - -static int __ov5693_write_reg_is_consecutive(struct i2c_client *client, - struct ov5693_write_ctrl *ctrl, - const struct ov5693_reg *next) -{ - if (ctrl->index == 0) - return 1; - - return ctrl->buffer.addr + ctrl->index == next->reg; -} - -static int ov5693_write_reg_array(struct i2c_client *client, - const struct ov5693_reg *reglist) -{ - const struct ov5693_reg *next = reglist; - struct ov5693_write_ctrl ctrl; - int err; - - ctrl.index = 0; - for (; next->type != OV5693_TOK_TERM; next++) { - switch (next->type & OV5693_TOK_MASK) { - case OV5693_TOK_DELAY: - err = __ov5693_flush_reg_array(client, &ctrl); - if (err) - return err; - msleep(next->val); - break; - default: - /* - * If next address is not consecutive, data needs to be - * flushed before proceed. - */ - if (!__ov5693_write_reg_is_consecutive(client, &ctrl, - next)) { - err = __ov5693_flush_reg_array(client, &ctrl); - if (err) - return err; - } - err = __ov5693_buf_reg_array(client, &ctrl, next); - if (err) { - dev_err(&client->dev, - "%s: write error, aborted\n", - __func__); - return err; - } - break; - } - } - - return __ov5693_flush_reg_array(client, &ctrl); -} -static int ov5693_g_focal(struct v4l2_subdev *sd, s32 *val) -{ - *val = (OV5693_FOCAL_LENGTH_NUM << 16) | OV5693_FOCAL_LENGTH_DEM; - return 0; -} - -static int ov5693_g_fnumber(struct v4l2_subdev *sd, s32 *val) -{ - /*const f number for imx*/ - *val = (OV5693_F_NUMBER_DEFAULT_NUM << 16) | OV5693_F_NUMBER_DEM; - return 0; -} - -static int ov5693_g_fnumber_range(struct v4l2_subdev *sd, s32 *val) -{ - *val = (OV5693_F_NUMBER_DEFAULT_NUM << 24) | - (OV5693_F_NUMBER_DEM << 16) | - (OV5693_F_NUMBER_DEFAULT_NUM << 8) | OV5693_F_NUMBER_DEM; - return 0; -} - -static int ov5693_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - *val = ov5693_res[dev->fmt_idx].bin_factor_x; - - return 0; -} - -static int ov5693_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - *val = ov5693_res[dev->fmt_idx].bin_factor_y; - - return 0; -} - -static int ov5693_get_intg_factor(struct i2c_client *client, - struct camera_mipi_info *info, - const struct ov5693_resolution *res) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct atomisp_sensor_mode_data *buf = &info->data; - unsigned int pix_clk_freq_hz; - u16 reg_val; - int ret; - - if (info == NULL) - return -EINVAL; - - /* pixel clock */ - pix_clk_freq_hz = res->pix_clk_freq * 1000000; - - dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz; - buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz; - - /* get integration time */ - buf->coarse_integration_time_min = OV5693_COARSE_INTG_TIME_MIN; - buf->coarse_integration_time_max_margin = - OV5693_COARSE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_min = OV5693_FINE_INTG_TIME_MIN; - buf->fine_integration_time_max_margin = - OV5693_FINE_INTG_TIME_MAX_MARGIN; - - buf->fine_integration_time_def = OV5693_FINE_INTG_TIME_MIN; - buf->frame_length_lines = res->lines_per_frame; - buf->line_length_pck = res->pixels_per_line; - buf->read_mode = res->bin_mode; - - /* get the cropping and output resolution to ISP for this mode. */ - ret = ov5693_read_reg(client, OV5693_16BIT, - OV5693_HORIZONTAL_START_H, ®_val); - if (ret) - return ret; - buf->crop_horizontal_start = reg_val; - - ret = ov5693_read_reg(client, OV5693_16BIT, - OV5693_VERTICAL_START_H, ®_val); - if (ret) - return ret; - buf->crop_vertical_start = reg_val; - - ret = ov5693_read_reg(client, OV5693_16BIT, - OV5693_HORIZONTAL_END_H, ®_val); - if (ret) - return ret; - buf->crop_horizontal_end = reg_val; - - ret = ov5693_read_reg(client, OV5693_16BIT, - OV5693_VERTICAL_END_H, ®_val); - if (ret) - return ret; - buf->crop_vertical_end = reg_val; - - ret = ov5693_read_reg(client, OV5693_16BIT, - OV5693_HORIZONTAL_OUTPUT_SIZE_H, ®_val); - if (ret) - return ret; - buf->output_width = reg_val; - - ret = ov5693_read_reg(client, OV5693_16BIT, - OV5693_VERTICAL_OUTPUT_SIZE_H, ®_val); - if (ret) - return ret; - buf->output_height = reg_val; - - buf->binning_factor_x = res->bin_factor_x ? - res->bin_factor_x : 1; - buf->binning_factor_y = res->bin_factor_y ? - res->bin_factor_y : 1; - return 0; -} - -static long __ov5693_set_exposure(struct v4l2_subdev *sd, int coarse_itg, - int gain, int digitgain) - -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5693_device *dev = to_ov5693_sensor(sd); - u16 vts, hts; - int ret, exp_val; - - hts = ov5693_res[dev->fmt_idx].pixels_per_line; - vts = ov5693_res[dev->fmt_idx].lines_per_frame; - /* - * If coarse_itg is larger than 1<<15, can not write to reg directly. - * The way is to write coarse_itg/2 to the reg, meanwhile write 2*hts - * to the reg. - */ - if (coarse_itg > (1 << 15)) { - hts = hts * 2; - coarse_itg = (int)coarse_itg / 2; - } - /* group hold */ - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_GROUP_ACCESS, 0x00); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_GROUP_ACCESS); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_TIMING_HTS_H, (hts >> 8) & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_TIMING_HTS_H); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_TIMING_HTS_L, hts & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_TIMING_HTS_L); - return ret; - } - /* Increase the VTS to match exposure + MARGIN */ - if (coarse_itg > vts - OV5693_INTEGRATION_TIME_MARGIN) - vts = (u16) coarse_itg + OV5693_INTEGRATION_TIME_MARGIN; - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_TIMING_VTS_H, (vts >> 8) & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_TIMING_VTS_H); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_TIMING_VTS_L, vts & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_TIMING_VTS_L); - return ret; - } - - /* set exposure */ - - /* Lower four bit should be 0*/ - exp_val = coarse_itg << 4; - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_EXPOSURE_L, exp_val & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_EXPOSURE_L); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_EXPOSURE_M, (exp_val >> 8) & 0xFF); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_EXPOSURE_M); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_EXPOSURE_H, (exp_val >> 16) & 0x0F); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_EXPOSURE_H); - return ret; - } - - /* Analog gain */ - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_AGC_L, gain & 0xff); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_AGC_L); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_AGC_H, (gain >> 8) & 0xff); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_AGC_H); - return ret; - } - - /* Digital gain */ - if (digitgain) { - ret = ov5693_write_reg(client, OV5693_16BIT, - OV5693_MWB_RED_GAIN_H, digitgain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_MWB_RED_GAIN_H); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_16BIT, - OV5693_MWB_GREEN_GAIN_H, digitgain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_MWB_RED_GAIN_H); - return ret; - } - - ret = ov5693_write_reg(client, OV5693_16BIT, - OV5693_MWB_BLUE_GAIN_H, digitgain); - if (ret) { - dev_err(&client->dev, "%s: write %x error, aborted\n", - __func__, OV5693_MWB_RED_GAIN_H); - return ret; - } - } - - /* End group */ - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_GROUP_ACCESS, 0x10); - if (ret) - return ret; - - /* Delay launch group */ - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_GROUP_ACCESS, 0xa0); - if (ret) - return ret; - return ret; -} - -static int ov5693_set_exposure(struct v4l2_subdev *sd, int exposure, - int gain, int digitgain) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - int ret; - - mutex_lock(&dev->input_lock); - ret = __ov5693_set_exposure(sd, exposure, gain, digitgain); - mutex_unlock(&dev->input_lock); - - return ret; -} - -static long ov5693_s_exposure(struct v4l2_subdev *sd, - struct atomisp_exposure *exposure) -{ - u16 coarse_itg = exposure->integration_time[0]; - u16 analog_gain = exposure->gain[0]; - u16 digital_gain = exposure->gain[1]; - - /* we should not accept the invalid value below */ - if (analog_gain == 0) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - - v4l2_err(client, "%s: invalid value\n", __func__); - return -EINVAL; - } - return ov5693_set_exposure(sd, coarse_itg, analog_gain, digital_gain); -} - -static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size, - u16 addr, u8 *buf) -{ - u16 index; - int ret; - u16 *pVal = NULL; - - for (index = 0; index <= size; index++) { - pVal = (u16 *) (buf + index); - ret = - ov5693_read_reg(client, OV5693_8BIT, addr + index, - pVal); - if (ret) - return ret; - } - - return 0; -} - -static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5693_device *dev = to_ov5693_sensor(sd); - int ret; - int i; - u8 *b = buf; - - dev->otp_size = 0; - for (i = 1; i < OV5693_OTP_BANK_MAX; i++) { - /*set bank NO and OTP read mode. */ - ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG, (i | 0xc0)); //[7:6] 2'b11 [5:0] bank no - if (ret) { - dev_err(&client->dev, "failed to prepare OTP page\n"); - return ret; - } - //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0)); - - /*enable read */ - ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG, OV5693_OTP_MODE_READ); // enable :1 - if (ret) { - dev_err(&client->dev, - "failed to set OTP reading mode page"); - return ret; - } - //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ); - - /* Reading the OTP data array */ - ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE, - OV5693_OTP_START_ADDR, - b); - if (ret) { - dev_err(&client->dev, "failed to read OTP data\n"); - return ret; - } - - //pr_debug("BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7), *(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15)); - - //Intel OTP map, try to read 320byts first. - if (i == 21) { - if ((*b) == 0) { - dev->otp_size = 320; - break; - } else { - b = buf; - continue; - } - } else if (i == 24) { //if the first 320bytes data doesn't not exist, try to read the next 32bytes data. - if ((*b) == 0) { - dev->otp_size = 32; - break; - } else { - b = buf; - continue; - } - } else if (i == 27) { //if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again. - if ((*b) == 0) { - dev->otp_size = 32; - break; - } else { - dev->otp_size = 0; // no OTP data. - break; - } - } - - b = b + OV5693_OTP_BANK_SIZE; - } - return 0; -} - -/* - * Read otp data and store it into a kmalloced buffer. - * The caller must kfree the buffer when no more needed. - * @size: set to the size of the returned otp data. - */ -static void *ov5693_otp_read(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u8 *buf; - int ret; - - buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL); - if (!buf) - return ERR_PTR(-ENOMEM); - - //otp valid after mipi on and sw stream on - ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00); - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_SW_STREAM, OV5693_START_STREAMING); - - ret = __ov5693_otp_read(sd, buf); - - //mipi off and sw stream off after otp read - ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f); - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_SW_STREAM, OV5693_STOP_STREAMING); - - /* Driver has failed to find valid data */ - if (ret) { - dev_err(&client->dev, "sensor found no valid OTP data\n"); - return ERR_PTR(ret); - } - - return buf; -} - -static int ov5693_g_priv_int_data(struct v4l2_subdev *sd, - struct v4l2_private_int_data *priv) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5693_device *dev = to_ov5693_sensor(sd); - u8 __user *to = priv->data; - u32 read_size = priv->size; - int ret; - - /* No need to copy data if size is 0 */ - if (!read_size) - goto out; - - if (IS_ERR(dev->otp_data)) { - dev_err(&client->dev, "OTP data not available"); - return PTR_ERR(dev->otp_data); - } - - /* Correct read_size value only if bigger than maximum */ - if (read_size > OV5693_OTP_DATA_SIZE) - read_size = OV5693_OTP_DATA_SIZE; - - ret = copy_to_user(to, dev->otp_data, read_size); - if (ret) { - dev_err(&client->dev, "%s: failed to copy OTP data to user\n", - __func__); - return -EFAULT; - } - - pr_debug("%s read_size:%d\n", __func__, read_size); - -out: - /* Return correct size */ - priv->size = dev->otp_size; - - return 0; - -} - -static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - - switch (cmd) { - case ATOMISP_IOC_S_EXPOSURE: - return ov5693_s_exposure(sd, arg); - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - return ov5693_g_priv_int_data(sd, arg); - default: - return -EINVAL; - } - return 0; -} - -/* - * This returns the exposure time being used. This should only be used - * for filling in EXIF data, not for actual image processing. - */ -static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - u16 reg_v, reg_v2; - int ret; - - /* get exposure */ - ret = ov5693_read_reg(client, OV5693_8BIT, - OV5693_EXPOSURE_L, - ®_v); - if (ret) - goto err; - - ret = ov5693_read_reg(client, OV5693_8BIT, - OV5693_EXPOSURE_M, - ®_v2); - if (ret) - goto err; - - reg_v += reg_v2 << 8; - ret = ov5693_read_reg(client, OV5693_8BIT, - OV5693_EXPOSURE_H, - ®_v2); - if (ret) - goto err; - - *value = reg_v + (((u32)reg_v2 << 16)); -err: - return ret; -} - -static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = -EINVAL; - u8 vcm_code; - - ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code); - if (ret) - return ret; - - /* set reg VCM_CODE_MSB Bit[1:0] */ - vcm_code = (vcm_code & VCM_CODE_MSB_MASK) | - ((val >> 8) & ~VCM_CODE_MSB_MASK); - ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code); - if (ret) - return ret; - - /* set reg VCM_CODE_LSB Bit[7:0] */ - ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff)); - if (ret) - return ret; - - /* set required vcm move time */ - vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF - - AD5823_HIGH_FREQ_RANGE; - ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code); - - return ret; -} - -static int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value) -{ - value = min(value, AD5823_MAX_FOCUS_POS); - return ad5823_t_focus_vcm(sd, value); -} - -static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value); - value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS); - if (dev->vcm == VCM_DW9714) { - if (dev->vcm_update) { - ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF); - if (ret) - return ret; - ret = vcm_dw_i2c_write(client, DIRECT_VCM); - if (ret) - return ret; - ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON); - if (ret) - return ret; - dev->vcm_update = false; - } - ret = vcm_dw_i2c_write(client, - vcm_val(value, VCM_DEFAULT_S)); - } else if (dev->vcm == VCM_AD5823) { - ad5823_t_focus_abs(sd, value); - } - if (ret == 0) { - dev->number_of_steps = value - dev->focus; - dev->focus = value; - dev->timestamp_t_focus_abs = ktime_get(); - } else - dev_err(&client->dev, - "%s: i2c failed. ret %d\n", __func__, ret); - - return ret; -} - -static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - return ov5693_t_focus_abs(sd, dev->focus + value); -} - -#define DELAY_PER_STEP_NS 1000000 -#define DELAY_MAX_PER_STEP_NS (1000000 * 1023) -static int ov5693_q_focus_status(struct v4l2_subdev *sd, s32 *value) -{ - u32 status = 0; - struct ov5693_device *dev = to_ov5693_sensor(sd); - ktime_t temptime; - ktime_t timedelay = ns_to_ktime(min_t(u32, - abs(dev->number_of_steps) * DELAY_PER_STEP_NS, - DELAY_MAX_PER_STEP_NS)); - - temptime = ktime_sub(ktime_get(), (dev->timestamp_t_focus_abs)); - if (ktime_compare(temptime, timedelay) <= 0) { - status |= ATOMISP_FOCUS_STATUS_MOVING; - status |= ATOMISP_FOCUS_HP_IN_PROGRESS; - } else { - status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE; - status |= ATOMISP_FOCUS_HP_COMPLETE; - } - - *value = status; - - return 0; -} - -static int ov5693_q_focus_abs(struct v4l2_subdev *sd, s32 *value) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - s32 val; - - ov5693_q_focus_status(sd, &val); - - if (val & ATOMISP_FOCUS_STATUS_MOVING) - *value = dev->focus - dev->number_of_steps; - else - *value = dev->focus; - - return 0; -} - -static int ov5693_t_vcm_slew(struct v4l2_subdev *sd, s32 value) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - dev->number_of_steps = value; - dev->vcm_update = true; - return 0; -} - -static int ov5693_t_vcm_timing(struct v4l2_subdev *sd, s32 value) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - dev->number_of_steps = value; - dev->vcm_update = true; - return 0; -} - -static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov5693_device *dev = - container_of(ctrl->handler, struct ov5693_device, ctrl_handler); - struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_FOCUS_ABSOLUTE: - dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n", - __func__, ctrl->val); - ret = ov5693_t_focus_abs(&dev->sd, ctrl->val); - break; - case V4L2_CID_FOCUS_RELATIVE: - dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n", - __func__, ctrl->val); - ret = ov5693_t_focus_rel(&dev->sd, ctrl->val); - break; - case V4L2_CID_VCM_SLEW: - ret = ov5693_t_vcm_slew(&dev->sd, ctrl->val); - break; - case V4L2_CID_VCM_TIMEING: - ret = ov5693_t_vcm_timing(&dev->sd, ctrl->val); - break; - default: - ret = -EINVAL; - } - return ret; -} - -static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov5693_device *dev = - container_of(ctrl->handler, struct ov5693_device, ctrl_handler); - int ret = 0; - - switch (ctrl->id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - ret = ov5693_q_exposure(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCAL_ABSOLUTE: - ret = ov5693_g_focal(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_ABSOLUTE: - ret = ov5693_g_fnumber(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FNUMBER_RANGE: - ret = ov5693_g_fnumber_range(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCUS_ABSOLUTE: - ret = ov5693_q_focus_abs(&dev->sd, &ctrl->val); - break; - case V4L2_CID_FOCUS_STATUS: - ret = ov5693_q_focus_status(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_HORZ: - ret = ov5693_g_bin_factor_x(&dev->sd, &ctrl->val); - break; - case V4L2_CID_BIN_FACTOR_VERT: - ret = ov5693_g_bin_factor_y(&dev->sd, &ctrl->val); - break; - default: - ret = -EINVAL; - } - - return ret; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .s_ctrl = ov5693_s_ctrl, - .g_volatile_ctrl = ov5693_g_volatile_ctrl -}; - -static const struct v4l2_ctrl_config ov5693_controls[] = { - { - .ops = &ctrl_ops, - .id = V4L2_CID_EXPOSURE_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "exposure", - .min = 0x0, - .max = 0xffff, - .step = 0x01, - .def = 0x00, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCAL_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focal length", - .min = OV5693_FOCAL_LENGTH_DEFAULT, - .max = OV5693_FOCAL_LENGTH_DEFAULT, - .step = 0x01, - .def = OV5693_FOCAL_LENGTH_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number", - .min = OV5693_F_NUMBER_DEFAULT, - .max = OV5693_F_NUMBER_DEFAULT, - .step = 0x01, - .def = OV5693_F_NUMBER_DEFAULT, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FNUMBER_RANGE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "f-number range", - .min = OV5693_F_NUMBER_RANGE, - .max = OV5693_F_NUMBER_RANGE, - .step = 0x01, - .def = OV5693_F_NUMBER_RANGE, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCUS_ABSOLUTE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focus move absolute", - .min = 0, - .max = OV5693_VCM_MAX_FOCUS_POS, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCUS_RELATIVE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focus move relative", - .min = OV5693_VCM_MAX_FOCUS_NEG, - .max = OV5693_VCM_MAX_FOCUS_POS, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_FOCUS_STATUS, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "focus status", - .min = 0, - .max = 100, /* allow enum to grow in the future */ - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_VCM_SLEW, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "vcm slew", - .min = 0, - .max = OV5693_VCM_SLEW_STEP_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_VCM_TIMEING, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "vcm step time", - .min = 0, - .max = OV5693_VCM_SLEW_TIME_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_HORZ, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "horizontal binning factor", - .min = 0, - .max = OV5693_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, - { - .ops = &ctrl_ops, - .id = V4L2_CID_BIN_FACTOR_VERT, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "vertical binning factor", - .min = 0, - .max = OV5693_BIN_FACTOR_MAX, - .step = 1, - .def = 0, - .flags = 0, - }, -}; - -static int ov5693_init(struct v4l2_subdev *sd) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - pr_info("%s\n", __func__); - mutex_lock(&dev->input_lock); - dev->vcm_update = false; - - if (dev->vcm == VCM_AD5823) { - ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */ - if (ret) - dev_err(&client->dev, - "vcm reset failed\n"); - /*change the mode*/ - ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, - AD5823_RING_CTRL_ENABLE); - if (ret) - dev_err(&client->dev, - "vcm enable ringing failed\n"); - ret = ad5823_i2c_write(client, AD5823_REG_MODE, - AD5823_ARC_RES1); - if (ret) - dev_err(&client->dev, - "vcm change mode failed\n"); - } - - /*change initial focus value for ad5823*/ - if (dev->vcm == VCM_AD5823) { - dev->focus = AD5823_INIT_FOCUS_POS; - ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS); - } else { - dev->focus = 0; - ov5693_t_focus_abs(sd, 0); - } - - mutex_unlock(&dev->input_lock); - - return 0; -} - -static int power_ctrl(struct v4l2_subdev *sd, bool flag) -{ - int ret; - struct ov5693_device *dev = to_ov5693_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - /* - * This driver assumes "internal DVDD, PWDNB tied to DOVDD". - * In this set up only gpio0 (XSHUTDN) should be available - * but in some products (for example ECS) gpio1 (PWDNB) is - * also available. If gpio1 is available we emulate it being - * tied to DOVDD here. - */ - if (flag) { - ret = dev->platform_data->v2p8_ctrl(sd, 1); - dev->platform_data->gpio1_ctrl(sd, 1); - if (ret == 0) { - ret = dev->platform_data->v1p8_ctrl(sd, 1); - if (ret) { - dev->platform_data->gpio1_ctrl(sd, 0); - ret = dev->platform_data->v2p8_ctrl(sd, 0); - } - } - } else { - dev->platform_data->gpio1_ctrl(sd, 0); - ret = dev->platform_data->v1p8_ctrl(sd, 0); - ret |= dev->platform_data->v2p8_ctrl(sd, 0); - } - - return ret; -} - -static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - if (!dev || !dev->platform_data) - return -ENODEV; - - return dev->platform_data->gpio0_ctrl(sd, flag); -} - -static int __power_up(struct v4l2_subdev *sd) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - /* power control */ - ret = power_ctrl(sd, 1); - if (ret) - goto fail_power; - - /* according to DS, at least 5ms is needed between DOVDD and PWDN */ - /* add this delay time to 10~11ms*/ - usleep_range(10000, 11000); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 1); - if (ret) { - ret = gpio_ctrl(sd, 1); - if (ret) - goto fail_power; - } - - /* flis clock control */ - ret = dev->platform_data->flisclk_ctrl(sd, 1); - if (ret) - goto fail_clk; - - __cci_delay(up_delay); - - return 0; - -fail_clk: - gpio_ctrl(sd, 0); -fail_power: - power_ctrl(sd, 0); - dev_err(&client->dev, "sensor power-up failed\n"); - - return ret; -} - -static int power_down(struct v4l2_subdev *sd) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - dev->focus = OV5693_INVALID_CONFIG; - if (!dev->platform_data) { - dev_err(&client->dev, - "no camera_sensor_platform_data"); - return -ENODEV; - } - - ret = dev->platform_data->flisclk_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "flisclk failed\n"); - - /* gpio ctrl */ - ret = gpio_ctrl(sd, 0); - if (ret) { - ret = gpio_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "gpio failed 2\n"); - } - - /* power control */ - ret = power_ctrl(sd, 0); - if (ret) - dev_err(&client->dev, "vprog failed.\n"); - - return ret; -} - -static int power_up(struct v4l2_subdev *sd) -{ - static const int retry_count = 4; - int i, ret; - - for (i = 0; i < retry_count; i++) { - ret = __power_up(sd); - if (!ret) - return 0; - - power_down(sd); - } - return ret; -} - -static int ov5693_s_power(struct v4l2_subdev *sd, int on) -{ - int ret; - - pr_info("%s: on %d\n", __func__, on); - if (on == 0) - return power_down(sd); - else { - ret = power_up(sd); - if (!ret) { - ret = ov5693_init(sd); - /* restore settings */ - ov5693_res = ov5693_res_preview; - N_RES = N_RES_PREVIEW; - } - } - return ret; -} - -/* - * distance - calculate the distance - * @res: resolution - * @w: width - * @h: height - * - * Get the gap between res_w/res_h and w/h. - * distance = (res_w/res_h - w/h) / (w/h) * 8192 - * res->width/height smaller than w/h wouldn't be considered. - * The gap of ratio larger than 1/8 wouldn't be considered. - * Returns the value of gap or -1 if fail. - */ -#define LARGEST_ALLOWED_RATIO_MISMATCH 1024 -static int distance(struct ov5693_resolution *res, u32 w, u32 h) -{ - int ratio; - int distance; - - if (w == 0 || h == 0 || - res->width < w || res->height < h) - return -1; - - ratio = res->width << 13; - ratio /= w; - ratio *= h; - ratio /= res->height; - - distance = abs(ratio - 8192); - - if (distance > LARGEST_ALLOWED_RATIO_MISMATCH) - return -1; - - return distance; -} - -/* Return the nearest higher resolution index - * Firstly try to find the approximate aspect ratio resolution - * If we find multiple same AR resolutions, choose the - * minimal size. - */ -static int nearest_resolution_index(int w, int h) -{ - int i; - int idx = -1; - int dist; - int min_dist = INT_MAX; - int min_res_w = INT_MAX; - struct ov5693_resolution *tmp_res = NULL; - - for (i = 0; i < N_RES; i++) { - tmp_res = &ov5693_res[i]; - dist = distance(tmp_res, w, h); - if (dist == -1) - continue; - if (dist < min_dist) { - min_dist = dist; - idx = i; - min_res_w = ov5693_res[i].width; - continue; - } - if (dist == min_dist && ov5693_res[i].width < min_res_w) - idx = i; - } - - return idx; -} - -static int get_resolution_index(int w, int h) -{ - int i; - - for (i = 0; i < N_RES; i++) { - if (w != ov5693_res[i].width) - continue; - if (h != ov5693_res[i].height) - continue; - - return i; - } - - return -1; -} - -/* TODO: remove it. */ -static int startup(struct v4l2_subdev *sd) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - ret = ov5693_write_reg(client, OV5693_8BIT, - OV5693_SW_RESET, 0x01); - if (ret) { - dev_err(&client->dev, "ov5693 reset err.\n"); - return ret; - } - - ret = ov5693_write_reg_array(client, ov5693_global_setting); - if (ret) { - dev_err(&client->dev, "ov5693 write register err.\n"); - return ret; - } - - ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs); - if (ret) { - dev_err(&client->dev, "ov5693 write register err.\n"); - return ret; - } - - return ret; -} - -static int ov5693_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *ov5693_info = NULL; - int ret = 0; - int idx; - - if (format->pad) - return -EINVAL; - if (!fmt) - return -EINVAL; - ov5693_info = v4l2_get_subdev_hostdata(sd); - if (ov5693_info == NULL) - return -EINVAL; - - mutex_lock(&dev->input_lock); - idx = nearest_resolution_index(fmt->width, fmt->height); - if (idx == -1) { - /* return the largest resolution */ - fmt->width = ov5693_res[N_RES - 1].width; - fmt->height = ov5693_res[N_RES - 1].height; - } else { - fmt->width = ov5693_res[idx].width; - fmt->height = ov5693_res[idx].height; - } - - fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); - return 0; - } - - dev->fmt_idx = get_resolution_index(fmt->width, fmt->height); - if (dev->fmt_idx == -1) { - dev_err(&client->dev, "get resolution fail\n"); - mutex_unlock(&dev->input_lock); - return -EINVAL; - } - - ret = startup(sd); - if (ret) { - int i = 0; - - dev_err(&client->dev, "ov5693 startup err, retry to power up\n"); - for (i = 0; i < OV5693_POWER_UP_RETRY_NUM; i++) { - dev_err(&client->dev, - "ov5693 retry to power up %d/%d times, result: ", - i+1, OV5693_POWER_UP_RETRY_NUM); - power_down(sd); - ret = power_up(sd); - if (!ret) { - mutex_unlock(&dev->input_lock); - ov5693_init(sd); - mutex_lock(&dev->input_lock); - } else { - dev_err(&client->dev, "power up failed, continue\n"); - continue; - } - ret = startup(sd); - if (ret) { - dev_err(&client->dev, " startup FAILED!\n"); - } else { - dev_err(&client->dev, " startup SUCCESS!\n"); - break; - } - } - } - - /* - * After sensor settings are set to HW, sometimes stream is started. - * This would cause ISP timeout because ISP is not ready to receive - * data yet. So add stop streaming here. - */ - ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM, - OV5693_STOP_STREAMING); - if (ret) - dev_warn(&client->dev, "ov5693 stream off err\n"); - - ret = ov5693_get_intg_factor(client, ov5693_info, - &ov5693_res[dev->fmt_idx]); - if (ret) { - dev_err(&client->dev, "failed to get integration_factor\n"); - goto err; - } - - ov5693_info->metadata_width = fmt->width * 10 / 8; - ov5693_info->metadata_height = 1; - ov5693_info->metadata_effective_width = &ov5693_embedded_effective_size; - -err: - mutex_unlock(&dev->input_lock); - return ret; -} -static int ov5693_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct ov5693_device *dev = to_ov5693_sensor(sd); - - if (format->pad) - return -EINVAL; - - if (!fmt) - return -EINVAL; - - fmt->width = ov5693_res[dev->fmt_idx].width; - fmt->height = ov5693_res[dev->fmt_idx].height; - fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; - - return 0; -} - -static int ov5693_detect(struct i2c_client *client) -{ - struct i2c_adapter *adapter = client->adapter; - u16 high, low; - int ret; - u16 id; - u8 revision; - - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) - return -ENODEV; - - ret = ov5693_read_reg(client, OV5693_8BIT, - OV5693_SC_CMMN_CHIP_ID_H, &high); - if (ret) { - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); - return -ENODEV; - } - ret = ov5693_read_reg(client, OV5693_8BIT, - OV5693_SC_CMMN_CHIP_ID_L, &low); - id = ((((u16) high) << 8) | (u16) low); - - if (id != OV5693_ID) { - dev_err(&client->dev, "sensor ID error 0x%x\n", id); - return -ENODEV; - } - - ret = ov5693_read_reg(client, OV5693_8BIT, - OV5693_SC_CMMN_SUB_ID, &high); - revision = (u8) high & 0x0f; - - dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision); - dev_dbg(&client->dev, "detect ov5693 success\n"); - return 0; -} - -static int ov5693_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - mutex_lock(&dev->input_lock); - - ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM, - enable ? OV5693_START_STREAMING : - OV5693_STOP_STREAMING); - - mutex_unlock(&dev->input_lock); - - return ret; -} - - -static int ov5693_s_config(struct v4l2_subdev *sd, - int irq, void *platform_data) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - - if (platform_data == NULL) - return -ENODEV; - - dev->platform_data = - (struct camera_sensor_platform_data *)platform_data; - - mutex_lock(&dev->input_lock); - /* power off the module, then power on it in future - * as first power on by board may not fulfill the - * power on sequqence needed by the module - */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "ov5693 power-off err.\n"); - goto fail_power_off; - } - - ret = power_up(sd); - if (ret) { - dev_err(&client->dev, "ov5693 power-up err.\n"); - goto fail_power_on; - } - - if (!dev->vcm) - dev->vcm = vcm_detect(client); - - ret = dev->platform_data->csi_cfg(sd, 1); - if (ret) - goto fail_csi_cfg; - - /* config & detect sensor */ - ret = ov5693_detect(client); - if (ret) { - dev_err(&client->dev, "ov5693_detect err s_config.\n"); - goto fail_csi_cfg; - } - - dev->otp_data = ov5693_otp_read(sd); - - /* turn off sensor, after probed */ - ret = power_down(sd); - if (ret) { - dev_err(&client->dev, "ov5693 power-off err.\n"); - goto fail_csi_cfg; - } - mutex_unlock(&dev->input_lock); - - return ret; - -fail_csi_cfg: - dev->platform_data->csi_cfg(sd, 0); -fail_power_on: - power_down(sd); - dev_err(&client->dev, "sensor power-gating failed\n"); -fail_power_off: - mutex_unlock(&dev->input_lock); - return ret; -} - -static int ov5693_g_frame_interval(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval) -{ - struct ov5693_device *dev = to_ov5693_sensor(sd); - - interval->interval.numerator = 1; - interval->interval.denominator = ov5693_res[dev->fmt_idx].fps; - - return 0; -} - -static int ov5693_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index >= MAX_FMTS) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_SBGGR10_1X10; - return 0; -} - -static int ov5693_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - int index = fse->index; - - if (index >= N_RES) - return -EINVAL; - - fse->min_width = ov5693_res[index].width; - fse->min_height = ov5693_res[index].height; - fse->max_width = ov5693_res[index].width; - fse->max_height = ov5693_res[index].height; - - return 0; - -} - -static const struct v4l2_subdev_video_ops ov5693_video_ops = { - .s_stream = ov5693_s_stream, - .g_frame_interval = ov5693_g_frame_interval, -}; - -static const struct v4l2_subdev_core_ops ov5693_core_ops = { - .s_power = ov5693_s_power, - .ioctl = ov5693_ioctl, -}; - -static const struct v4l2_subdev_pad_ops ov5693_pad_ops = { - .enum_mbus_code = ov5693_enum_mbus_code, - .enum_frame_size = ov5693_enum_frame_size, - .get_fmt = ov5693_get_fmt, - .set_fmt = ov5693_set_fmt, -}; - -static const struct v4l2_subdev_ops ov5693_ops = { - .core = &ov5693_core_ops, - .video = &ov5693_video_ops, - .pad = &ov5693_pad_ops, -}; - -static int ov5693_remove(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov5693_device *dev = to_ov5693_sensor(sd); - - dev_dbg(&client->dev, "ov5693_remove...\n"); - - dev->platform_data->csi_cfg(sd, 0); - - v4l2_device_unregister_subdev(sd); - - atomisp_gmin_remove_subdev(sd); - - media_entity_cleanup(&dev->sd.entity); - v4l2_ctrl_handler_free(&dev->ctrl_handler); - kfree(dev); - - return 0; -} - -static int ov5693_probe(struct i2c_client *client) -{ - struct ov5693_device *dev; - int i2c; - int ret = 0; - void *pdata; - unsigned int i; - - /* - * Firmware workaround: Some modules use a "secondary default" - * address of 0x10 which doesn't appear on schematics, and - * some BIOS versions haven't gotten the memo. Work around - * via config. - */ - i2c = gmin_get_var_int(&client->dev, "I2CAddr", -1); - if (i2c != -1) { - dev_info(&client->dev, - "Overriding firmware-provided I2C address (0x%x) with 0x%x\n", - client->addr, i2c); - client->addr = i2c; - } - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - mutex_init(&dev->input_lock); - - dev->fmt_idx = 0; - v4l2_i2c_subdev_init(&(dev->sd), client, &ov5693_ops); - - pdata = gmin_camera_platform_data(&dev->sd, - ATOMISP_INPUT_FORMAT_RAW_10, - atomisp_bayer_order_bggr); - if (!pdata) - goto out_free; - - ret = ov5693_s_config(&dev->sd, client->irq, pdata); - if (ret) - goto out_free; - - ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA); - if (ret) - goto out_free; - - dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dev->pad.flags = MEDIA_PAD_FL_SOURCE; - dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; - dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; - ret = - v4l2_ctrl_handler_init(&dev->ctrl_handler, - ARRAY_SIZE(ov5693_controls)); - if (ret) { - ov5693_remove(client); - return ret; - } - - for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++) - v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov5693_controls[i], - NULL); - - if (dev->ctrl_handler.error) { - ov5693_remove(client); - return dev->ctrl_handler.error; - } - - /* Use same lock for controls as for everything else. */ - dev->ctrl_handler.lock = &dev->input_lock; - dev->sd.ctrl_handler = &dev->ctrl_handler; - - ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); - if (ret) - ov5693_remove(client); - - return ret; -out_free: - v4l2_device_unregister_subdev(&dev->sd); - kfree(dev); - return ret; -} - -static const struct acpi_device_id ov5693_acpi_match[] = { - {"INT33BE"}, - {}, -}; -MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match); - -static struct i2c_driver ov5693_driver = { - .driver = { - .name = "ov5693", - .acpi_match_table = ov5693_acpi_match, - }, - .probe_new = ov5693_probe, - .remove = ov5693_remove, -}; -module_i2c_driver(ov5693_driver); - -MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h deleted file mode 100644 index bba99406785e..000000000000 --- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h +++ /dev/null @@ -1,1392 +0,0 @@ -/* - * Support for OmniVision OV5693 5M camera sensor. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __OV5693_H__ -#define __OV5693_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../../include/linux/atomisp_platform.h" - -/* - * FIXME: non-preview resolutions are currently broken - */ -#define ENABLE_NON_PREVIEW 0 - - -#define OV5693_POWER_UP_RETRY_NUM 5 - -/* Defines for register writes and register array processing */ -#define I2C_MSG_LENGTH 0x2 -#define I2C_RETRY_COUNT 5 - -#define OV5693_FOCAL_LENGTH_NUM 334 /*3.34mm*/ -#define OV5693_FOCAL_LENGTH_DEM 100 -#define OV5693_F_NUMBER_DEFAULT_NUM 24 -#define OV5693_F_NUMBER_DEM 10 - -#define MAX_FMTS 1 - -/* sensor_mode_data read_mode adaptation */ -#define OV5693_READ_MODE_BINNING_ON 0x0400 -#define OV5693_READ_MODE_BINNING_OFF 0x00 -#define OV5693_INTEGRATION_TIME_MARGIN 8 - -#define OV5693_MAX_EXPOSURE_VALUE 0xFFF1 -#define OV5693_MAX_GAIN_VALUE 0xFF - -/* - * focal length bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define OV5693_FOCAL_LENGTH_DEFAULT 0x1B70064 - -/* - * current f-number bits definition: - * bits 31-16: numerator, bits 15-0: denominator - */ -#define OV5693_F_NUMBER_DEFAULT 0x18000a - -/* - * f-number range bits definition: - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ -#define OV5693_F_NUMBER_RANGE 0x180a180a -#define OV5693_ID 0x5690 - -#define OV5693_FINE_INTG_TIME_MIN 0 -#define OV5693_FINE_INTG_TIME_MAX_MARGIN 0 -#define OV5693_COARSE_INTG_TIME_MIN 1 -#define OV5693_COARSE_INTG_TIME_MAX_MARGIN 6 - -#define OV5693_BIN_FACTOR_MAX 4 -/* - * OV5693 System control registers - */ -#define OV5693_SW_SLEEP 0x0100 -#define OV5693_SW_RESET 0x0103 -#define OV5693_SW_STREAM 0x0100 - -#define OV5693_SC_CMMN_CHIP_ID_H 0x300A -#define OV5693_SC_CMMN_CHIP_ID_L 0x300B -#define OV5693_SC_CMMN_SCCB_ID 0x300C -#define OV5693_SC_CMMN_SUB_ID 0x302A /* process, version*/ -/*Bit[7:4] Group control, Bit[3:0] Group ID*/ -#define OV5693_GROUP_ACCESS 0x3208 -/* -*Bit[3:0] Bit[19:16] of exposure, -*remaining 16 bits lies in Reg0x3501&Reg0x3502 -*/ -#define OV5693_EXPOSURE_H 0x3500 -#define OV5693_EXPOSURE_M 0x3501 -#define OV5693_EXPOSURE_L 0x3502 -/*Bit[1:0] means Bit[9:8] of gain*/ -#define OV5693_AGC_H 0x350A -#define OV5693_AGC_L 0x350B /*Bit[7:0] of gain*/ - -#define OV5693_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/ -#define OV5693_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/ -#define OV5693_VERTICAL_START_H 0x3802 /*Bit[11:8]*/ -#define OV5693_VERTICAL_START_L 0x3803 /*Bit[7:0]*/ -#define OV5693_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/ -#define OV5693_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/ -#define OV5693_VERTICAL_END_H 0x3806 /*Bit[11:8]*/ -#define OV5693_VERTICAL_END_L 0x3807 /*Bit[7:0]*/ -#define OV5693_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/ -#define OV5693_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/ -#define OV5693_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/ -#define OV5693_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/ -/*High 8-bit, and low 8-bit HTS address is 0x380d*/ -#define OV5693_TIMING_HTS_H 0x380C -/*High 8-bit, and low 8-bit HTS address is 0x380d*/ -#define OV5693_TIMING_HTS_L 0x380D -/*High 8-bit, and low 8-bit HTS address is 0x380f*/ -#define OV5693_TIMING_VTS_H 0x380e -/*High 8-bit, and low 8-bit HTS address is 0x380f*/ -#define OV5693_TIMING_VTS_L 0x380f - -#define OV5693_MWB_RED_GAIN_H 0x3400 -#define OV5693_MWB_GREEN_GAIN_H 0x3402 -#define OV5693_MWB_BLUE_GAIN_H 0x3404 -#define OV5693_MWB_GAIN_MAX 0x0fff - -#define OV5693_START_STREAMING 0x01 -#define OV5693_STOP_STREAMING 0x00 - -#define VCM_ADDR 0x0c -#define VCM_CODE_MSB 0x04 - -#define OV5693_INVALID_CONFIG 0xffffffff - -#define OV5693_VCM_SLEW_STEP 0x30F0 -#define OV5693_VCM_SLEW_STEP_MAX 0x7 -#define OV5693_VCM_SLEW_STEP_MASK 0x7 -#define OV5693_VCM_CODE 0x30F2 -#define OV5693_VCM_SLEW_TIME 0x30F4 -#define OV5693_VCM_SLEW_TIME_MAX 0xffff -#define OV5693_VCM_ENABLE 0x8000 - -#define OV5693_VCM_MAX_FOCUS_NEG -1023 -#define OV5693_VCM_MAX_FOCUS_POS 1023 - -#define DLC_ENABLE 1 -#define DLC_DISABLE 0 -#define VCM_PROTECTION_OFF 0xeca3 -#define VCM_PROTECTION_ON 0xdc51 -#define VCM_DEFAULT_S 0x0 -#define vcm_step_s(a) (u8)(a & 0xf) -#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3) -#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104) -#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200) -#define vcm_val(data, s) (u16)(data << 4 | s) -#define DIRECT_VCM vcm_dlc_mclk(0, 0) - -/* Defines for OTP Data Registers */ -#define OV5693_FRAME_OFF_NUM 0x4202 -#define OV5693_OTP_BYTE_MAX 32 //change to 32 as needed by otpdata -#define OV5693_OTP_SHORT_MAX 16 -#define OV5693_OTP_START_ADDR 0x3D00 -#define OV5693_OTP_END_ADDR 0x3D0F -#define OV5693_OTP_DATA_SIZE 320 -#define OV5693_OTP_PROGRAM_REG 0x3D80 -#define OV5693_OTP_READ_REG 0x3D81 // 1:Enable 0:disable -#define OV5693_OTP_BANK_REG 0x3D84 //otp bank and mode -#define OV5693_OTP_READY_REG_DONE 1 -#define OV5693_OTP_BANK_MAX 28 -#define OV5693_OTP_BANK_SIZE 16 //16 bytes per bank -#define OV5693_OTP_READ_ONETIME 16 -#define OV5693_OTP_MODE_READ 1 - -struct regval_list { - u16 reg_num; - u8 value; -}; - -struct ov5693_resolution { - u8 *desc; - const struct ov5693_reg *regs; - int res; - int width; - int height; - int fps; - int pix_clk_freq; - u16 pixels_per_line; - u16 lines_per_frame; - u8 bin_factor_x; - u8 bin_factor_y; - u8 bin_mode; - bool used; -}; - -struct ov5693_format { - u8 *desc; - u32 pixelformat; - struct ov5693_reg *regs; -}; - -enum vcm_type { - VCM_UNKNOWN, - VCM_AD5823, - VCM_DW9714, -}; - -/* - * ov5693 device structure. - */ -struct ov5693_device { - struct v4l2_subdev sd; - struct media_pad pad; - struct v4l2_mbus_framefmt format; - struct mutex input_lock; - struct v4l2_ctrl_handler ctrl_handler; - - struct camera_sensor_platform_data *platform_data; - ktime_t timestamp_t_focus_abs; - int vt_pix_clk_freq_mhz; - int fmt_idx; - int run_mode; - int otp_size; - u8 *otp_data; - u32 focus; - s16 number_of_steps; - u8 res; - u8 type; - bool vcm_update; - enum vcm_type vcm; -}; - -enum ov5693_tok_type { - OV5693_8BIT = 0x0001, - OV5693_16BIT = 0x0002, - OV5693_32BIT = 0x0004, - OV5693_TOK_TERM = 0xf000, /* terminating token for reg list */ - OV5693_TOK_DELAY = 0xfe00, /* delay token for reg list */ - OV5693_TOK_MASK = 0xfff0 -}; - -/** - * struct ov5693_reg - MI sensor register format - * @type: type of the register - * @reg: 16-bit offset to register - * @val: 8/16/32-bit register value - * - * Define a structure for sensor register initialization values - */ -struct ov5693_reg { - enum ov5693_tok_type type; - u16 reg; - u32 val; /* @set value for read/mod/write, @mask */ -}; - -#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd) - -#define OV5693_MAX_WRITE_BUF_SIZE 30 - -struct ov5693_write_buffer { - u16 addr; - u8 data[OV5693_MAX_WRITE_BUF_SIZE]; -}; - -struct ov5693_write_ctrl { - int index; - struct ov5693_write_buffer buffer; -}; - -static struct ov5693_reg const ov5693_global_setting[] = { - {OV5693_8BIT, 0x0103, 0x01}, - {OV5693_8BIT, 0x3001, 0x0a}, - {OV5693_8BIT, 0x3002, 0x80}, - {OV5693_8BIT, 0x3006, 0x00}, - {OV5693_8BIT, 0x3011, 0x21}, - {OV5693_8BIT, 0x3012, 0x09}, - {OV5693_8BIT, 0x3013, 0x10}, - {OV5693_8BIT, 0x3014, 0x00}, - {OV5693_8BIT, 0x3015, 0x08}, - {OV5693_8BIT, 0x3016, 0xf0}, - {OV5693_8BIT, 0x3017, 0xf0}, - {OV5693_8BIT, 0x3018, 0xf0}, - {OV5693_8BIT, 0x301b, 0xb4}, - {OV5693_8BIT, 0x301d, 0x02}, - {OV5693_8BIT, 0x3021, 0x00}, - {OV5693_8BIT, 0x3022, 0x01}, - {OV5693_8BIT, 0x3028, 0x44}, - {OV5693_8BIT, 0x3098, 0x02}, - {OV5693_8BIT, 0x3099, 0x19}, - {OV5693_8BIT, 0x309a, 0x02}, - {OV5693_8BIT, 0x309b, 0x01}, - {OV5693_8BIT, 0x309c, 0x00}, - {OV5693_8BIT, 0x30a0, 0xd2}, - {OV5693_8BIT, 0x30a2, 0x01}, - {OV5693_8BIT, 0x30b2, 0x00}, - {OV5693_8BIT, 0x30b3, 0x7d}, - {OV5693_8BIT, 0x30b4, 0x03}, - {OV5693_8BIT, 0x30b5, 0x04}, - {OV5693_8BIT, 0x30b6, 0x01}, - {OV5693_8BIT, 0x3104, 0x21}, - {OV5693_8BIT, 0x3106, 0x00}, - {OV5693_8BIT, 0x3400, 0x04}, - {OV5693_8BIT, 0x3401, 0x00}, - {OV5693_8BIT, 0x3402, 0x04}, - {OV5693_8BIT, 0x3403, 0x00}, - {OV5693_8BIT, 0x3404, 0x04}, - {OV5693_8BIT, 0x3405, 0x00}, - {OV5693_8BIT, 0x3406, 0x01}, - {OV5693_8BIT, 0x3500, 0x00}, - {OV5693_8BIT, 0x3503, 0x07}, - {OV5693_8BIT, 0x3504, 0x00}, - {OV5693_8BIT, 0x3505, 0x00}, - {OV5693_8BIT, 0x3506, 0x00}, - {OV5693_8BIT, 0x3507, 0x02}, - {OV5693_8BIT, 0x3508, 0x00}, - {OV5693_8BIT, 0x3509, 0x10}, - {OV5693_8BIT, 0x350a, 0x00}, - {OV5693_8BIT, 0x350b, 0x40}, - {OV5693_8BIT, 0x3601, 0x0a}, - {OV5693_8BIT, 0x3602, 0x38}, - {OV5693_8BIT, 0x3612, 0x80}, - {OV5693_8BIT, 0x3620, 0x54}, - {OV5693_8BIT, 0x3621, 0xc7}, - {OV5693_8BIT, 0x3622, 0x0f}, - {OV5693_8BIT, 0x3625, 0x10}, - {OV5693_8BIT, 0x3630, 0x55}, - {OV5693_8BIT, 0x3631, 0xf4}, - {OV5693_8BIT, 0x3632, 0x00}, - {OV5693_8BIT, 0x3633, 0x34}, - {OV5693_8BIT, 0x3634, 0x02}, - {OV5693_8BIT, 0x364d, 0x0d}, - {OV5693_8BIT, 0x364f, 0xdd}, - {OV5693_8BIT, 0x3660, 0x04}, - {OV5693_8BIT, 0x3662, 0x10}, - {OV5693_8BIT, 0x3663, 0xf1}, - {OV5693_8BIT, 0x3665, 0x00}, - {OV5693_8BIT, 0x3666, 0x20}, - {OV5693_8BIT, 0x3667, 0x00}, - {OV5693_8BIT, 0x366a, 0x80}, - {OV5693_8BIT, 0x3680, 0xe0}, - {OV5693_8BIT, 0x3681, 0x00}, - {OV5693_8BIT, 0x3700, 0x42}, - {OV5693_8BIT, 0x3701, 0x14}, - {OV5693_8BIT, 0x3702, 0xa0}, - {OV5693_8BIT, 0x3703, 0xd8}, - {OV5693_8BIT, 0x3704, 0x78}, - {OV5693_8BIT, 0x3705, 0x02}, - {OV5693_8BIT, 0x370a, 0x00}, - {OV5693_8BIT, 0x370b, 0x20}, - {OV5693_8BIT, 0x370c, 0x0c}, - {OV5693_8BIT, 0x370d, 0x11}, - {OV5693_8BIT, 0x370e, 0x00}, - {OV5693_8BIT, 0x370f, 0x40}, - {OV5693_8BIT, 0x3710, 0x00}, - {OV5693_8BIT, 0x371a, 0x1c}, - {OV5693_8BIT, 0x371b, 0x05}, - {OV5693_8BIT, 0x371c, 0x01}, - {OV5693_8BIT, 0x371e, 0xa1}, - {OV5693_8BIT, 0x371f, 0x0c}, - {OV5693_8BIT, 0x3721, 0x00}, - {OV5693_8BIT, 0x3724, 0x10}, - {OV5693_8BIT, 0x3726, 0x00}, - {OV5693_8BIT, 0x372a, 0x01}, - {OV5693_8BIT, 0x3730, 0x10}, - {OV5693_8BIT, 0x3738, 0x22}, - {OV5693_8BIT, 0x3739, 0xe5}, - {OV5693_8BIT, 0x373a, 0x50}, - {OV5693_8BIT, 0x373b, 0x02}, - {OV5693_8BIT, 0x373c, 0x41}, - {OV5693_8BIT, 0x373f, 0x02}, - {OV5693_8BIT, 0x3740, 0x42}, - {OV5693_8BIT, 0x3741, 0x02}, - {OV5693_8BIT, 0x3742, 0x18}, - {OV5693_8BIT, 0x3743, 0x01}, - {OV5693_8BIT, 0x3744, 0x02}, - {OV5693_8BIT, 0x3747, 0x10}, - {OV5693_8BIT, 0x374c, 0x04}, - {OV5693_8BIT, 0x3751, 0xf0}, - {OV5693_8BIT, 0x3752, 0x00}, - {OV5693_8BIT, 0x3753, 0x00}, - {OV5693_8BIT, 0x3754, 0xc0}, - {OV5693_8BIT, 0x3755, 0x00}, - {OV5693_8BIT, 0x3756, 0x1a}, - {OV5693_8BIT, 0x3758, 0x00}, - {OV5693_8BIT, 0x3759, 0x0f}, - {OV5693_8BIT, 0x376b, 0x44}, - {OV5693_8BIT, 0x375c, 0x04}, - {OV5693_8BIT, 0x3774, 0x10}, - {OV5693_8BIT, 0x3776, 0x00}, - {OV5693_8BIT, 0x377f, 0x08}, - {OV5693_8BIT, 0x3780, 0x22}, - {OV5693_8BIT, 0x3781, 0x0c}, - {OV5693_8BIT, 0x3784, 0x2c}, - {OV5693_8BIT, 0x3785, 0x1e}, - {OV5693_8BIT, 0x378f, 0xf5}, - {OV5693_8BIT, 0x3791, 0xb0}, - {OV5693_8BIT, 0x3795, 0x00}, - {OV5693_8BIT, 0x3796, 0x64}, - {OV5693_8BIT, 0x3797, 0x11}, - {OV5693_8BIT, 0x3798, 0x30}, - {OV5693_8BIT, 0x3799, 0x41}, - {OV5693_8BIT, 0x379a, 0x07}, - {OV5693_8BIT, 0x379b, 0xb0}, - {OV5693_8BIT, 0x379c, 0x0c}, - {OV5693_8BIT, 0x37c5, 0x00}, - {OV5693_8BIT, 0x37c6, 0x00}, - {OV5693_8BIT, 0x37c7, 0x00}, - {OV5693_8BIT, 0x37c9, 0x00}, - {OV5693_8BIT, 0x37ca, 0x00}, - {OV5693_8BIT, 0x37cb, 0x00}, - {OV5693_8BIT, 0x37de, 0x00}, - {OV5693_8BIT, 0x37df, 0x00}, - {OV5693_8BIT, 0x3800, 0x00}, - {OV5693_8BIT, 0x3801, 0x00}, - {OV5693_8BIT, 0x3802, 0x00}, - {OV5693_8BIT, 0x3804, 0x0a}, - {OV5693_8BIT, 0x3805, 0x3f}, - {OV5693_8BIT, 0x3810, 0x00}, - {OV5693_8BIT, 0x3812, 0x00}, - {OV5693_8BIT, 0x3823, 0x00}, - {OV5693_8BIT, 0x3824, 0x00}, - {OV5693_8BIT, 0x3825, 0x00}, - {OV5693_8BIT, 0x3826, 0x00}, - {OV5693_8BIT, 0x3827, 0x00}, - {OV5693_8BIT, 0x382a, 0x04}, - {OV5693_8BIT, 0x3a04, 0x06}, - {OV5693_8BIT, 0x3a05, 0x14}, - {OV5693_8BIT, 0x3a06, 0x00}, - {OV5693_8BIT, 0x3a07, 0xfe}, - {OV5693_8BIT, 0x3b00, 0x00}, - {OV5693_8BIT, 0x3b02, 0x00}, - {OV5693_8BIT, 0x3b03, 0x00}, - {OV5693_8BIT, 0x3b04, 0x00}, - {OV5693_8BIT, 0x3b05, 0x00}, - {OV5693_8BIT, 0x3e07, 0x20}, - {OV5693_8BIT, 0x4000, 0x08}, - {OV5693_8BIT, 0x4001, 0x04}, - {OV5693_8BIT, 0x4002, 0x45}, - {OV5693_8BIT, 0x4004, 0x08}, - {OV5693_8BIT, 0x4005, 0x18}, - {OV5693_8BIT, 0x4006, 0x20}, - {OV5693_8BIT, 0x4008, 0x24}, - {OV5693_8BIT, 0x4009, 0x10}, - {OV5693_8BIT, 0x400c, 0x00}, - {OV5693_8BIT, 0x400d, 0x00}, - {OV5693_8BIT, 0x4058, 0x00}, - {OV5693_8BIT, 0x404e, 0x37}, - {OV5693_8BIT, 0x404f, 0x8f}, - {OV5693_8BIT, 0x4058, 0x00}, - {OV5693_8BIT, 0x4101, 0xb2}, - {OV5693_8BIT, 0x4303, 0x00}, - {OV5693_8BIT, 0x4304, 0x08}, - {OV5693_8BIT, 0x4307, 0x31}, - {OV5693_8BIT, 0x4311, 0x04}, - {OV5693_8BIT, 0x4315, 0x01}, - {OV5693_8BIT, 0x4511, 0x05}, - {OV5693_8BIT, 0x4512, 0x01}, - {OV5693_8BIT, 0x4806, 0x00}, - {OV5693_8BIT, 0x4816, 0x52}, - {OV5693_8BIT, 0x481f, 0x30}, - {OV5693_8BIT, 0x4826, 0x2c}, - {OV5693_8BIT, 0x4831, 0x64}, - {OV5693_8BIT, 0x4d00, 0x04}, - {OV5693_8BIT, 0x4d01, 0x71}, - {OV5693_8BIT, 0x4d02, 0xfd}, - {OV5693_8BIT, 0x4d03, 0xf5}, - {OV5693_8BIT, 0x4d04, 0x0c}, - {OV5693_8BIT, 0x4d05, 0xcc}, - {OV5693_8BIT, 0x4837, 0x0a}, - {OV5693_8BIT, 0x5000, 0x06}, - {OV5693_8BIT, 0x5001, 0x01}, - {OV5693_8BIT, 0x5003, 0x20}, - {OV5693_8BIT, 0x5046, 0x0a}, - {OV5693_8BIT, 0x5013, 0x00}, - {OV5693_8BIT, 0x5046, 0x0a}, - {OV5693_8BIT, 0x5780, 0x1c}, - {OV5693_8BIT, 0x5786, 0x20}, - {OV5693_8BIT, 0x5787, 0x10}, - {OV5693_8BIT, 0x5788, 0x18}, - {OV5693_8BIT, 0x578a, 0x04}, - {OV5693_8BIT, 0x578b, 0x02}, - {OV5693_8BIT, 0x578c, 0x02}, - {OV5693_8BIT, 0x578e, 0x06}, - {OV5693_8BIT, 0x578f, 0x02}, - {OV5693_8BIT, 0x5790, 0x02}, - {OV5693_8BIT, 0x5791, 0xff}, - {OV5693_8BIT, 0x5842, 0x01}, - {OV5693_8BIT, 0x5843, 0x2b}, - {OV5693_8BIT, 0x5844, 0x01}, - {OV5693_8BIT, 0x5845, 0x92}, - {OV5693_8BIT, 0x5846, 0x01}, - {OV5693_8BIT, 0x5847, 0x8f}, - {OV5693_8BIT, 0x5848, 0x01}, - {OV5693_8BIT, 0x5849, 0x0c}, - {OV5693_8BIT, 0x5e00, 0x00}, - {OV5693_8BIT, 0x5e10, 0x0c}, - {OV5693_8BIT, 0x0100, 0x00}, - {OV5693_TOK_TERM, 0, 0} -}; - -#if ENABLE_NON_PREVIEW -/* - * 654x496 30fps 17ms VBlanking 2lane 10Bit (Scaling) - */ -static struct ov5693_reg const ov5693_654x496[] = { - {OV5693_8BIT, 0x3501, 0x3d}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe6}, - {OV5693_8BIT, 0x3709, 0xc7}, - {OV5693_8BIT, 0x3803, 0x00}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xa3}, - {OV5693_8BIT, 0x3808, 0x02}, - {OV5693_8BIT, 0x3809, 0x90}, - {OV5693_8BIT, 0x380a, 0x01}, - {OV5693_8BIT, 0x380b, 0xf0}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x08}, - {OV5693_8BIT, 0x3813, 0x02}, - {OV5693_8BIT, 0x3814, 0x31}, - {OV5693_8BIT, 0x3815, 0x31}, - {OV5693_8BIT, 0x3820, 0x04}, - {OV5693_8BIT, 0x3821, 0x1f}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -/* - * 1296x976 30fps 17ms VBlanking 2lane 10Bit (Scaling) -*DS from 2592x1952 -*/ -static struct ov5693_reg const ov5693_1296x976[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - - {OV5693_8BIT, 0x3800, 0x00}, - {OV5693_8BIT, 0x3801, 0x00}, - {OV5693_8BIT, 0x3802, 0x00}, - {OV5693_8BIT, 0x3803, 0x00}, - - {OV5693_8BIT, 0x3804, 0x0a}, - {OV5693_8BIT, 0x3805, 0x3f}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xA3}, - - {OV5693_8BIT, 0x3808, 0x05}, - {OV5693_8BIT, 0x3809, 0x10}, - {OV5693_8BIT, 0x380a, 0x03}, - {OV5693_8BIT, 0x380b, 0xD0}, - - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - - {OV5693_8BIT, 0x3810, 0x00}, - {OV5693_8BIT, 0x3811, 0x10}, - {OV5693_8BIT, 0x3812, 0x00}, - {OV5693_8BIT, 0x3813, 0x02}, - - {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/ - {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/ - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */ - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} - -}; - - -/* - * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling) - DS from 2564x1956 - */ -static struct ov5693_reg const ov5693_336x256[] = { - {OV5693_8BIT, 0x3501, 0x3d}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe6}, - {OV5693_8BIT, 0x3709, 0xc7}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xa3}, - {OV5693_8BIT, 0x3808, 0x01}, - {OV5693_8BIT, 0x3809, 0x50}, - {OV5693_8BIT, 0x380a, 0x01}, - {OV5693_8BIT, 0x380b, 0x00}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x1E}, - {OV5693_8BIT, 0x3814, 0x31}, - {OV5693_8BIT, 0x3815, 0x31}, - {OV5693_8BIT, 0x3820, 0x04}, - {OV5693_8BIT, 0x3821, 0x1f}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -/* - * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling) - DS from 2368x1956 - */ -static struct ov5693_reg const ov5693_368x304[] = { - {OV5693_8BIT, 0x3501, 0x3d}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe6}, - {OV5693_8BIT, 0x3709, 0xc7}, - {OV5693_8BIT, 0x3808, 0x01}, - {OV5693_8BIT, 0x3809, 0x70}, - {OV5693_8BIT, 0x380a, 0x01}, - {OV5693_8BIT, 0x380b, 0x30}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x80}, - {OV5693_8BIT, 0x3814, 0x31}, - {OV5693_8BIT, 0x3815, 0x31}, - {OV5693_8BIT, 0x3820, 0x04}, - {OV5693_8BIT, 0x3821, 0x1f}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -/* - * ov5693_192x160 30fps 17ms VBlanking 2lane 10Bit (Scaling) - DS from 2460x1956 - */ -static struct ov5693_reg const ov5693_192x160[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x80}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3804, 0x0a}, - {OV5693_8BIT, 0x3805, 0x3f}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xA3}, - {OV5693_8BIT, 0x3808, 0x00}, - {OV5693_8BIT, 0x3809, 0xC0}, - {OV5693_8BIT, 0x380a, 0x00}, - {OV5693_8BIT, 0x380b, 0xA0}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x40}, - {OV5693_8BIT, 0x3813, 0x00}, - {OV5693_8BIT, 0x3814, 0x31}, - {OV5693_8BIT, 0x3815, 0x31}, - {OV5693_8BIT, 0x3820, 0x04}, - {OV5693_8BIT, 0x3821, 0x1f}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - - -static struct ov5693_reg const ov5693_736x496[] = { - {OV5693_8BIT, 0x3501, 0x3d}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe6}, - {OV5693_8BIT, 0x3709, 0xc7}, - {OV5693_8BIT, 0x3803, 0x68}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0x3b}, - {OV5693_8BIT, 0x3808, 0x02}, - {OV5693_8BIT, 0x3809, 0xe0}, - {OV5693_8BIT, 0x380a, 0x01}, - {OV5693_8BIT, 0x380b, 0xf0}, - {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/ - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, /*vts*/ - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x08}, - {OV5693_8BIT, 0x3813, 0x02}, - {OV5693_8BIT, 0x3814, 0x31}, - {OV5693_8BIT, 0x3815, 0x31}, - {OV5693_8BIT, 0x3820, 0x04}, - {OV5693_8BIT, 0x3821, 0x1f}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; -#endif - -/* -static struct ov5693_reg const ov5693_736x496[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe6}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3803, 0x00}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xa3}, - {OV5693_8BIT, 0x3808, 0x02}, - {OV5693_8BIT, 0x3809, 0xe0}, - {OV5693_8BIT, 0x380a, 0x01}, - {OV5693_8BIT, 0x380b, 0xf0}, - {OV5693_8BIT, 0x380c, 0x0d}, - {OV5693_8BIT, 0x380d, 0xb0}, - {OV5693_8BIT, 0x380e, 0x05}, - {OV5693_8BIT, 0x380f, 0xf2}, - {OV5693_8BIT, 0x3811, 0x08}, - {OV5693_8BIT, 0x3813, 0x02}, - {OV5693_8BIT, 0x3814, 0x31}, - {OV5693_8BIT, 0x3815, 0x31}, - {OV5693_8BIT, 0x3820, 0x01}, - {OV5693_8BIT, 0x3821, 0x1f}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; -*/ -/* - * 976x556 30fps 8.8ms VBlanking 2lane 10Bit (Scaling) - */ -#if ENABLE_NON_PREVIEW -static struct ov5693_reg const ov5693_976x556[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3803, 0xf0}, - {OV5693_8BIT, 0x3806, 0x06}, - {OV5693_8BIT, 0x3807, 0xa7}, - {OV5693_8BIT, 0x3808, 0x03}, - {OV5693_8BIT, 0x3809, 0xd0}, - {OV5693_8BIT, 0x380a, 0x02}, - {OV5693_8BIT, 0x380b, 0x2C}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x10}, - {OV5693_8BIT, 0x3813, 0x02}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -/*DS from 2624x1492*/ -static struct ov5693_reg const ov5693_1296x736[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - - {OV5693_8BIT, 0x3800, 0x00}, - {OV5693_8BIT, 0x3801, 0x00}, - {OV5693_8BIT, 0x3802, 0x00}, - {OV5693_8BIT, 0x3803, 0x00}, - - {OV5693_8BIT, 0x3804, 0x0a}, - {OV5693_8BIT, 0x3805, 0x3f}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xA3}, - - {OV5693_8BIT, 0x3808, 0x05}, - {OV5693_8BIT, 0x3809, 0x10}, - {OV5693_8BIT, 0x380a, 0x02}, - {OV5693_8BIT, 0x380b, 0xe0}, - - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - - {OV5693_8BIT, 0x3813, 0xE8}, - - {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/ - {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/ - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */ - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -static struct ov5693_reg const ov5693_1636p_30fps[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3803, 0xf0}, - {OV5693_8BIT, 0x3806, 0x06}, - {OV5693_8BIT, 0x3807, 0xa7}, - {OV5693_8BIT, 0x3808, 0x06}, - {OV5693_8BIT, 0x3809, 0x64}, - {OV5693_8BIT, 0x380a, 0x04}, - {OV5693_8BIT, 0x380b, 0x48}, - {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/ - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, /*vts*/ - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x02}, - {OV5693_8BIT, 0x3813, 0x02}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; -#endif - -static struct ov5693_reg const ov5693_1616x1216_30fps[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x80}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3800, 0x00}, /*{3800,3801} Array X start*/ - {OV5693_8BIT, 0x3801, 0x08}, /* 04 //{3800,3801} Array X start*/ - {OV5693_8BIT, 0x3802, 0x00}, /*{3802,3803} Array Y start*/ - {OV5693_8BIT, 0x3803, 0x04}, /* 00 //{3802,3803} Array Y start*/ - {OV5693_8BIT, 0x3804, 0x0a}, /*{3804,3805} Array X end*/ - {OV5693_8BIT, 0x3805, 0x37}, /* 3b //{3804,3805} Array X end*/ - {OV5693_8BIT, 0x3806, 0x07}, /*{3806,3807} Array Y end*/ - {OV5693_8BIT, 0x3807, 0x9f}, /* a3 //{3806,3807} Array Y end*/ - {OV5693_8BIT, 0x3808, 0x06}, /*{3808,3809} Final output H size*/ - {OV5693_8BIT, 0x3809, 0x50}, /*{3808,3809} Final output H size*/ - {OV5693_8BIT, 0x380a, 0x04}, /*{380a,380b} Final output V size*/ - {OV5693_8BIT, 0x380b, 0xc0}, /*{380a,380b} Final output V size*/ - {OV5693_8BIT, 0x380c, 0x0a}, /*{380c,380d} HTS*/ - {OV5693_8BIT, 0x380d, 0x80}, /*{380c,380d} HTS*/ - {OV5693_8BIT, 0x380e, 0x07}, /*{380e,380f} VTS*/ - {OV5693_8BIT, 0x380f, 0xc0}, /* bc //{380e,380f} VTS*/ - {OV5693_8BIT, 0x3810, 0x00}, /*{3810,3811} windowing X offset*/ - {OV5693_8BIT, 0x3811, 0x10}, /*{3810,3811} windowing X offset*/ - {OV5693_8BIT, 0x3812, 0x00}, /*{3812,3813} windowing Y offset*/ - {OV5693_8BIT, 0x3813, 0x06}, /*{3812,3813} windowing Y offset*/ - {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/ - {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/ - {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binnning control*/ - {OV5693_8BIT, 0x3821, 0x1e}, /*MIRROR control*/ - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x5041, 0x84}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - - -/* - * 1940x1096 30fps 8.8ms VBlanking 2lane 10bit (Scaling) - */ -#if ENABLE_NON_PREVIEW -static struct ov5693_reg const ov5693_1940x1096[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3803, 0xf0}, - {OV5693_8BIT, 0x3806, 0x06}, - {OV5693_8BIT, 0x3807, 0xa7}, - {OV5693_8BIT, 0x3808, 0x07}, - {OV5693_8BIT, 0x3809, 0x94}, - {OV5693_8BIT, 0x380a, 0x04}, - {OV5693_8BIT, 0x380b, 0x48}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x02}, - {OV5693_8BIT, 0x3813, 0x02}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x80}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -static struct ov5693_reg const ov5693_2592x1456_30fps[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3800, 0x00}, - {OV5693_8BIT, 0x3801, 0x00}, - {OV5693_8BIT, 0x3802, 0x00}, - {OV5693_8BIT, 0x3803, 0xf0}, - {OV5693_8BIT, 0x3804, 0x0a}, - {OV5693_8BIT, 0x3805, 0x3f}, - {OV5693_8BIT, 0x3806, 0x06}, - {OV5693_8BIT, 0x3807, 0xa4}, - {OV5693_8BIT, 0x3808, 0x0a}, - {OV5693_8BIT, 0x3809, 0x20}, - {OV5693_8BIT, 0x380a, 0x05}, - {OV5693_8BIT, 0x380b, 0xb0}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x10}, - {OV5693_8BIT, 0x3813, 0x00}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_TOK_TERM, 0, 0} -}; -#endif - -static struct ov5693_reg const ov5693_2576x1456_30fps[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3800, 0x00}, - {OV5693_8BIT, 0x3801, 0x00}, - {OV5693_8BIT, 0x3802, 0x00}, - {OV5693_8BIT, 0x3803, 0xf0}, - {OV5693_8BIT, 0x3804, 0x0a}, - {OV5693_8BIT, 0x3805, 0x3f}, - {OV5693_8BIT, 0x3806, 0x06}, - {OV5693_8BIT, 0x3807, 0xa4}, - {OV5693_8BIT, 0x3808, 0x0a}, - {OV5693_8BIT, 0x3809, 0x10}, - {OV5693_8BIT, 0x380a, 0x05}, - {OV5693_8BIT, 0x380b, 0xb0}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x18}, - {OV5693_8BIT, 0x3813, 0x00}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_TOK_TERM, 0, 0} -}; - -/* - * 2592x1944 30fps 0.6ms VBlanking 2lane 10Bit - */ -#if ENABLE_NON_PREVIEW -static struct ov5693_reg const ov5693_2592x1944_30fps[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3803, 0x00}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xa3}, - {OV5693_8BIT, 0x3808, 0x0a}, - {OV5693_8BIT, 0x3809, 0x20}, - {OV5693_8BIT, 0x380a, 0x07}, - {OV5693_8BIT, 0x380b, 0x98}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x10}, - {OV5693_8BIT, 0x3813, 0x00}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; -#endif - -/* - * 11:9 Full FOV Output, expected FOV Res: 2346x1920 - * ISP Effect Res: 1408x1152 - * Sensor out: 1424x1168, DS From: 2380x1952 - * - * WA: Left Offset: 8, Hor scal: 64 - */ -#if ENABLE_NON_PREVIEW -static struct ov5693_reg const ov5693_1424x1168_30fps[] = { - {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */ - {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */ - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */ - {OV5693_8BIT, 0x3801, 0x50}, /* 80 */ - {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */ - {OV5693_8BIT, 0x3803, 0x02}, /* 2 */ - {OV5693_8BIT, 0x3804, 0x09}, /* TIMING_X_ADDR_END */ - {OV5693_8BIT, 0x3805, 0xdd}, /* 2525 */ - {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */ - {OV5693_8BIT, 0x3807, 0xa1}, /* 1953 */ - {OV5693_8BIT, 0x3808, 0x05}, /* TIMING_X_OUTPUT_SIZE */ - {OV5693_8BIT, 0x3809, 0x90}, /* 1424 */ - {OV5693_8BIT, 0x380a, 0x04}, /* TIMING_Y_OUTPUT_SIZE */ - {OV5693_8BIT, 0x380b, 0x90}, /* 1168 */ - {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */ - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */ - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */ - {OV5693_8BIT, 0x3811, 0x02}, /* 2 */ - {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */ - {OV5693_8BIT, 0x3813, 0x00}, /* 0 */ - {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */ - {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */ - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */ - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; -#endif - -/* - * 3:2 Full FOV Output, expected FOV Res: 2560x1706 - * ISP Effect Res: 720x480 - * Sensor out: 736x496, DS From 2616x1764 - */ -static struct ov5693_reg const ov5693_736x496_30fps[] = { - {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */ - {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */ - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */ - {OV5693_8BIT, 0x3801, 0x02}, /* 2 */ - {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */ - {OV5693_8BIT, 0x3803, 0x62}, /* 98 */ - {OV5693_8BIT, 0x3804, 0x0a}, /* TIMING_X_ADDR_END */ - {OV5693_8BIT, 0x3805, 0x3b}, /* 2619 */ - {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */ - {OV5693_8BIT, 0x3807, 0x43}, /* 1859 */ - {OV5693_8BIT, 0x3808, 0x02}, /* TIMING_X_OUTPUT_SIZE */ - {OV5693_8BIT, 0x3809, 0xe0}, /* 736 */ - {OV5693_8BIT, 0x380a, 0x01}, /* TIMING_Y_OUTPUT_SIZE */ - {OV5693_8BIT, 0x380b, 0xf0}, /* 496 */ - {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */ - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */ - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */ - {OV5693_8BIT, 0x3811, 0x02}, /* 2 */ - {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */ - {OV5693_8BIT, 0x3813, 0x00}, /* 0 */ - {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */ - {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */ - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */ - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -static struct ov5693_reg const ov5693_2576x1936_30fps[] = { - {OV5693_8BIT, 0x3501, 0x7b}, - {OV5693_8BIT, 0x3502, 0x00}, - {OV5693_8BIT, 0x3708, 0xe2}, - {OV5693_8BIT, 0x3709, 0xc3}, - {OV5693_8BIT, 0x3803, 0x00}, - {OV5693_8BIT, 0x3806, 0x07}, - {OV5693_8BIT, 0x3807, 0xa3}, - {OV5693_8BIT, 0x3808, 0x0a}, - {OV5693_8BIT, 0x3809, 0x10}, - {OV5693_8BIT, 0x380a, 0x07}, - {OV5693_8BIT, 0x380b, 0x90}, - {OV5693_8BIT, 0x380c, 0x0a}, - {OV5693_8BIT, 0x380d, 0x80}, - {OV5693_8BIT, 0x380e, 0x07}, - {OV5693_8BIT, 0x380f, 0xc0}, - {OV5693_8BIT, 0x3811, 0x18}, - {OV5693_8BIT, 0x3813, 0x00}, - {OV5693_8BIT, 0x3814, 0x11}, - {OV5693_8BIT, 0x3815, 0x11}, - {OV5693_8BIT, 0x3820, 0x00}, - {OV5693_8BIT, 0x3821, 0x1e}, - {OV5693_8BIT, 0x5002, 0x00}, - {OV5693_8BIT, 0x0100, 0x01}, - {OV5693_TOK_TERM, 0, 0} -}; - -static struct ov5693_resolution ov5693_res_preview[] = { - { - .desc = "ov5693_736x496_30fps", - .width = 736, - .height = 496, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_736x496_30fps, - }, - { - .desc = "ov5693_1616x1216_30fps", - .width = 1616, - .height = 1216, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_1616x1216_30fps, - }, - { - .desc = "ov5693_5M_30fps", - .width = 2576, - .height = 1456, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_2576x1456_30fps, - }, - { - .desc = "ov5693_5M_30fps", - .width = 2576, - .height = 1936, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_2576x1936_30fps, - }, -}; -#define N_RES_PREVIEW (ARRAY_SIZE(ov5693_res_preview)) - -/* - * Disable non-preview configurations until the configuration selection is - * improved. - */ -#if ENABLE_NON_PREVIEW -struct ov5693_resolution ov5693_res_still[] = { - { - .desc = "ov5693_736x496_30fps", - .width = 736, - .height = 496, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_736x496_30fps, - }, - { - .desc = "ov5693_1424x1168_30fps", - .width = 1424, - .height = 1168, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_1424x1168_30fps, - }, - { - .desc = "ov5693_1616x1216_30fps", - .width = 1616, - .height = 1216, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_1616x1216_30fps, - }, - { - .desc = "ov5693_5M_30fps", - .width = 2592, - .height = 1456, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_2592x1456_30fps, - }, - { - .desc = "ov5693_5M_30fps", - .width = 2592, - .height = 1944, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_2592x1944_30fps, - }, -}; -#define N_RES_STILL (ARRAY_SIZE(ov5693_res_still)) - -struct ov5693_resolution ov5693_res_video[] = { - { - .desc = "ov5693_736x496_30fps", - .width = 736, - .height = 496, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 2, - .bin_factor_y = 2, - .bin_mode = 1, - .regs = ov5693_736x496, - }, - { - .desc = "ov5693_336x256_30fps", - .width = 336, - .height = 256, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 2, - .bin_factor_y = 2, - .bin_mode = 1, - .regs = ov5693_336x256, - }, - { - .desc = "ov5693_368x304_30fps", - .width = 368, - .height = 304, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 2, - .bin_factor_y = 2, - .bin_mode = 1, - .regs = ov5693_368x304, - }, - { - .desc = "ov5693_192x160_30fps", - .width = 192, - .height = 160, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 2, - .bin_factor_y = 2, - .bin_mode = 1, - .regs = ov5693_192x160, - }, - { - .desc = "ov5693_1296x736_30fps", - .width = 1296, - .height = 736, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 2, - .bin_factor_y = 2, - .bin_mode = 0, - .regs = ov5693_1296x736, - }, - { - .desc = "ov5693_1296x976_30fps", - .width = 1296, - .height = 976, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 2, - .bin_factor_y = 2, - .bin_mode = 0, - .regs = ov5693_1296x976, - }, - { - .desc = "ov5693_1636P_30fps", - .width = 1636, - .height = 1096, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_1636p_30fps, - }, - { - .desc = "ov5693_1080P_30fps", - .width = 1940, - .height = 1096, - .fps = 30, - .pix_clk_freq = 160, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_1940x1096, - }, - { - .desc = "ov5693_5M_30fps", - .width = 2592, - .height = 1456, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_2592x1456_30fps, - }, - { - .desc = "ov5693_5M_30fps", - .width = 2592, - .height = 1944, - .pix_clk_freq = 160, - .fps = 30, - .used = 0, - .pixels_per_line = 2688, - .lines_per_frame = 1984, - .bin_factor_x = 1, - .bin_factor_y = 1, - .bin_mode = 0, - .regs = ov5693_2592x1944_30fps, - }, -}; -#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video)) -#endif - -static struct ov5693_resolution *ov5693_res = ov5693_res_preview; -static unsigned long N_RES = N_RES_PREVIEW; -#endif diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h deleted file mode 100644 index ebe193ba3871..000000000000 --- a/drivers/staging/media/atomisp/include/linux/atomisp.h +++ /dev/null @@ -1,1359 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifdef CSS15 -#include -#else - -#ifndef _ATOM_ISP_H -#define _ATOM_ISP_H - -#include -#include - -/* struct media_device_info.hw_revision */ -#define ATOMISP_HW_REVISION_MASK 0x0000ff00 -#define ATOMISP_HW_REVISION_SHIFT 8 -#define ATOMISP_HW_REVISION_ISP2300 0x00 -#define ATOMISP_HW_REVISION_ISP2400 0x10 -#define ATOMISP_HW_REVISION_ISP2401_LEGACY 0x11 -#define ATOMISP_HW_REVISION_ISP2401 0x20 - -#define ATOMISP_HW_STEPPING_MASK 0x000000ff -#define ATOMISP_HW_STEPPING_A0 0x00 -#define ATOMISP_HW_STEPPING_B0 0x10 - -/*ISP binary running mode*/ -#define CI_MODE_PREVIEW 0x8000 -#define CI_MODE_VIDEO 0x4000 -#define CI_MODE_STILL_CAPTURE 0x2000 -#define CI_MODE_CONTINUOUS 0x1000 -#define CI_MODE_NONE 0x0000 - -#define OUTPUT_MODE_FILE 0x0100 -#define OUTPUT_MODE_TEXT 0x0200 - -/* - * Camera HAL sets this flag in v4l2_buffer reserved2 to indicate this - * buffer has a per-frame parameter. - */ -#define ATOMISP_BUFFER_HAS_PER_FRAME_SETTING 0x80000000 - -/* Custom format for RAW capture from M10MO 0x3130314d */ -#define V4L2_PIX_FMT_CUSTOM_M10MO_RAW v4l2_fourcc('M', '1', '0', '1') - -/* Custom media bus formats being used in atomisp */ -#define V4L2_MBUS_FMT_CUSTOM_YUV420 0x8001 -#define V4L2_MBUS_FMT_CUSTOM_YVU420 0x8002 -#define V4L2_MBUS_FMT_CUSTOM_YUV422P 0x8003 -#define V4L2_MBUS_FMT_CUSTOM_YUV444 0x8004 -#define V4L2_MBUS_FMT_CUSTOM_NV12 0x8005 -#define V4L2_MBUS_FMT_CUSTOM_NV21 0x8006 -#define V4L2_MBUS_FMT_CUSTOM_NV16 0x8007 -#define V4L2_MBUS_FMT_CUSTOM_YUYV 0x8008 -#define V4L2_MBUS_FMT_CUSTOM_SBGGR16 0x8009 -#define V4L2_MBUS_FMT_CUSTOM_RGB32 0x800a - -/* Custom media bus format for M10MO RAW capture */ -#if 0 -#define V4L2_MBUS_FMT_CUSTOM_M10MO_RAW 0x800b -#endif - -/* Configuration used by Bayer noise reduction and YCC noise reduction */ -struct atomisp_nr_config { - /* [gain] Strength of noise reduction for Bayer NR (Used by Bayer NR) */ - unsigned int bnr_gain; - /* [gain] Strength of noise reduction for YCC NR (Used by YCC NR) */ - unsigned int ynr_gain; - /* [intensity] Sensitivity of Edge (Used by Bayer NR) */ - unsigned int direction; - /* [intensity] coring threshold for Cb (Used by YCC NR) */ - unsigned int threshold_cb; - /* [intensity] coring threshold for Cr (Used by YCC NR) */ - unsigned int threshold_cr; -}; - -/* Temporal noise reduction configuration */ -struct atomisp_tnr_config { - unsigned int gain; /* [gain] Strength of NR */ - unsigned int threshold_y;/* [intensity] Motion sensitivity for Y */ - unsigned int threshold_uv;/* [intensity] Motion sensitivity for U/V */ -}; - -/* Histogram. This contains num_elements values of type unsigned int. - * The data pointer is a DDR pointer (virtual address). - */ -struct atomisp_histogram { - unsigned int num_elements; - void __user *data; -}; - -enum atomisp_ob_mode { - atomisp_ob_mode_none, - atomisp_ob_mode_fixed, - atomisp_ob_mode_raster -}; - -/* Optical black level configuration */ -struct atomisp_ob_config { - /* Obtical black level mode (Fixed / Raster) */ - enum atomisp_ob_mode mode; - /* [intensity] optical black level for GR (relevant for fixed mode) */ - unsigned int level_gr; - /* [intensity] optical black level for R (relevant for fixed mode) */ - unsigned int level_r; - /* [intensity] optical black level for B (relevant for fixed mode) */ - unsigned int level_b; - /* [intensity] optical black level for GB (relevant for fixed mode) */ - unsigned int level_gb; - /* [BQ] 0..63 start position of OB area (relevant for raster mode) */ - unsigned short start_position; - /* [BQ] start..63 end position of OB area (relevant for raster mode) */ - unsigned short end_position; -}; - -/* Edge enhancement (sharpen) configuration */ -struct atomisp_ee_config { - /* [gain] The strength of sharpness. u5_11 */ - unsigned int gain; - /* [intensity] The threshold that divides noises from edge. u8_8 */ - unsigned int threshold; - /* [gain] The strength of sharpness in pell-mell area. u5_11 */ - unsigned int detail_gain; -}; - -struct atomisp_3a_output { - int ae_y; - int awb_cnt; - int awb_gr; - int awb_r; - int awb_b; - int awb_gb; - int af_hpf1; - int af_hpf2; -}; - -enum atomisp_calibration_type { - calibration_type1, - calibration_type2, - calibration_type3 -}; - -struct atomisp_calibration_group { - unsigned int size; - unsigned int type; - unsigned short *calb_grp_values; -}; - -struct atomisp_gc_config { - __u16 gain_k1; - __u16 gain_k2; -}; - -struct atomisp_3a_config { - unsigned int ae_y_coef_r; /* [gain] Weight of R for Y */ - unsigned int ae_y_coef_g; /* [gain] Weight of G for Y */ - unsigned int ae_y_coef_b; /* [gain] Weight of B for Y */ - unsigned int awb_lg_high_raw; /* [intensity] - AWB level gate high for raw */ - unsigned int awb_lg_low; /* [intensity] AWB level gate low */ - unsigned int awb_lg_high; /* [intensity] AWB level gate high */ - int af_fir1_coef[7]; /* [factor] AF FIR coefficients of fir1 */ - int af_fir2_coef[7]; /* [factor] AF FIR coefficients of fir2 */ -}; - -struct atomisp_dvs_grid_info { - uint32_t enable; - uint32_t width; - uint32_t aligned_width; - uint32_t height; - uint32_t aligned_height; - uint32_t bqs_per_grid_cell; - uint32_t num_hor_coefs; - uint32_t num_ver_coefs; -}; - -struct atomisp_dvs_envelop { - unsigned int width; - unsigned int height; -}; - -struct atomisp_grid_info { - uint32_t enable; - uint32_t use_dmem; - uint32_t has_histogram; - uint32_t s3a_width; - uint32_t s3a_height; - uint32_t aligned_width; - uint32_t aligned_height; - uint32_t s3a_bqs_per_grid_cell; - uint32_t deci_factor_log2; - uint32_t elem_bit_depth; -}; - -struct atomisp_dis_vector { - int x; - int y; -}; - - -/* DVS 2.0 Coefficient types. This structure contains 4 pointers to - * arrays that contain the coeffients for each type. - */ -struct atomisp_dvs2_coef_types { - short __user *odd_real; /** real part of the odd coefficients*/ - short __user *odd_imag; /** imaginary part of the odd coefficients*/ - short __user *even_real;/** real part of the even coefficients*/ - short __user *even_imag;/** imaginary part of the even coefficients*/ -}; - -/* - * DVS 2.0 Statistic types. This structure contains 4 pointers to - * arrays that contain the statistics for each type. - */ -struct atomisp_dvs2_stat_types { - int __user *odd_real; /** real part of the odd statistics*/ - int __user *odd_imag; /** imaginary part of the odd statistics*/ - int __user *even_real;/** real part of the even statistics*/ - int __user *even_imag;/** imaginary part of the even statistics*/ -}; - -struct atomisp_dis_coefficients { - struct atomisp_dvs_grid_info grid_info; - struct atomisp_dvs2_coef_types hor_coefs; - struct atomisp_dvs2_coef_types ver_coefs; -}; - -struct atomisp_dvs2_statistics { - struct atomisp_dvs_grid_info grid_info; - struct atomisp_dvs2_stat_types hor_prod; - struct atomisp_dvs2_stat_types ver_prod; -}; - -struct atomisp_dis_statistics { - struct atomisp_dvs2_statistics dvs2_stat; - uint32_t exp_id; -}; - -struct atomisp_3a_rgby_output { - uint32_t r; - uint32_t g; - uint32_t b; - uint32_t y; -}; - -/* - * Because we have 2 pipes at max to output metadata, therefore driver will use - * ATOMISP_MAIN_METADATA to specify the metadata from the pipe which keeps - * streaming always and use ATOMISP_SEC_METADATA to specify the metadata from - * the pipe which is streaming by request like capture pipe of ZSL or SDV mode - * as secondary metadata. And for the use case which has only one pipe - * streaming like online capture, ATOMISP_MAIN_METADATA will be used. - */ -enum atomisp_metadata_type { - ATOMISP_MAIN_METADATA = 0, - ATOMISP_SEC_METADATA, - ATOMISP_METADATA_TYPE_NUM, -}; - -struct atomisp_metadata_with_type { - /* to specify which type of metadata to get */ - enum atomisp_metadata_type type; - void __user *data; - uint32_t width; - uint32_t height; - uint32_t stride; /* in bytes */ - uint32_t exp_id; /* exposure ID */ - uint32_t *effective_width; /* mipi packets valid data size */ -}; - -struct atomisp_metadata { - void __user *data; - uint32_t width; - uint32_t height; - uint32_t stride; /* in bytes */ - uint32_t exp_id; /* exposure ID */ - uint32_t *effective_width; /* mipi packets valid data size */ -}; - -struct atomisp_ext_isp_ctrl { - uint32_t id; - uint32_t data; -}; - -struct atomisp_3a_statistics { - struct atomisp_grid_info grid_info; - struct atomisp_3a_output __user *data; - struct atomisp_3a_rgby_output __user *rgby_data; - uint32_t exp_id; /* exposure ID */ - uint32_t isp_config_id; /* isp config ID */ -}; - -/** - * struct atomisp_cont_capture_conf - continuous capture parameters - * @num_captures: number of still images to capture - * @skip_frames: number of frames to skip between 2 captures - * @offset: offset in ring buffer to start capture - * - * For example, to capture 1 frame from past, current, and 1 from future - * and skip one frame between each capture, parameters would be: - * num_captures:3 - * skip_frames:1 - * offset:-2 - */ - -struct atomisp_cont_capture_conf { - int num_captures; - unsigned int skip_frames; - int offset; - __u32 reserved[5]; -}; - -struct atomisp_ae_window { - int x_left; - int x_right; - int y_top; - int y_bottom; - int weight; -}; - -/* White Balance (Gain Adjust) */ -struct atomisp_wb_config { - unsigned int integer_bits; - unsigned int gr; /* unsigned .<16-integer_bits> */ - unsigned int r; /* unsigned .<16-integer_bits> */ - unsigned int b; /* unsigned .<16-integer_bits> */ - unsigned int gb; /* unsigned .<16-integer_bits> */ -}; - -/* Color Space Conversion settings */ -struct atomisp_cc_config { - unsigned int fraction_bits; - int matrix[3 * 3]; /* RGB2YUV Color matrix, signed - <13-fraction_bits>. */ -}; - -/* De pixel noise configuration */ -struct atomisp_de_config { - unsigned int pixelnoise; - unsigned int c1_coring_threshold; - unsigned int c2_coring_threshold; -}; - -/* Chroma enhancement */ -struct atomisp_ce_config { - unsigned char uv_level_min; - unsigned char uv_level_max; -}; - -/* Defect pixel correction configuration */ -struct atomisp_dp_config { - /* [intensity] The threshold of defect Pixel Correction, representing - * the permissible difference of intensity between one pixel and its - * surrounding pixels. Smaller values result in more frequent pixel - * corrections. u0_16 - */ - unsigned int threshold; - /* [gain] The sensitivity of mis-correction. ISP will miss a lot of - * defects if the value is set too large. u8_8 - */ - unsigned int gain; - unsigned int gr; - unsigned int r; - unsigned int b; - unsigned int gb; -}; - -/* XNR threshold */ -struct atomisp_xnr_config { - __u16 threshold; -}; - -/* metadata config */ -struct atomisp_metadata_config { - uint32_t metadata_height; - uint32_t metadata_stride; -}; - -/* - * Generic resolution structure. - */ -struct atomisp_resolution { - uint32_t width; /** Width */ - uint32_t height; /** Height */ -}; - -/* - * This specifies the coordinates (x,y) - */ -struct atomisp_zoom_point { - int32_t x; /** x coordinate */ - int32_t y; /** y coordinate */ -}; - -/* - * This specifies the region - */ -struct atomisp_zoom_region { - struct atomisp_zoom_point origin; /* Starting point coordinates for the region */ - struct atomisp_resolution resolution; /* Region resolution */ -}; - -struct atomisp_dz_config { - uint32_t dx; /** Horizontal zoom factor */ - uint32_t dy; /** Vertical zoom factor */ - struct atomisp_zoom_region zoom_region; /** region for zoom */ -}; - -struct atomisp_parm { - struct atomisp_grid_info info; - struct atomisp_dvs_grid_info dvs_grid; - struct atomisp_dvs_envelop dvs_envelop; - struct atomisp_wb_config wb_config; - struct atomisp_cc_config cc_config; - struct atomisp_ob_config ob_config; - struct atomisp_de_config de_config; - struct atomisp_dz_config dz_config; - struct atomisp_ce_config ce_config; - struct atomisp_dp_config dp_config; - struct atomisp_nr_config nr_config; - struct atomisp_ee_config ee_config; - struct atomisp_tnr_config tnr_config; - struct atomisp_metadata_config metadata_config; -}; - -struct dvs2_bq_resolution { - int width_bq; /* width [BQ] */ - int height_bq; /* height [BQ] */ -}; - -struct atomisp_dvs2_bq_resolutions { - /* GDC source image size [BQ] */ - struct dvs2_bq_resolution source_bq; - /* GDC output image size [BQ] */ - struct dvs2_bq_resolution output_bq; - /* GDC effective envelope size [BQ] */ - struct dvs2_bq_resolution envelope_bq; - /* isp pipe filter size [BQ] */ - struct dvs2_bq_resolution ispfilter_bq; - /* GDC shit size [BQ] */ - struct dvs2_bq_resolution gdc_shift_bq; -}; - -struct atomisp_dvs_6axis_config { - uint32_t exp_id; - uint32_t width_y; - uint32_t height_y; - uint32_t width_uv; - uint32_t height_uv; - uint32_t *xcoords_y; - uint32_t *ycoords_y; - uint32_t *xcoords_uv; - uint32_t *ycoords_uv; -}; - -struct atomisp_formats_config { - uint32_t video_full_range_flag; -}; - -struct atomisp_parameters { - struct atomisp_wb_config *wb_config; /* White Balance config */ - struct atomisp_cc_config *cc_config; /* Color Correction config */ - struct atomisp_tnr_config *tnr_config; /* Temporal Noise Reduction */ - struct atomisp_ecd_config *ecd_config; /* Eigen Color Demosaicing */ - struct atomisp_ynr_config *ynr_config; /* Y(Luma) Noise Reduction */ - struct atomisp_fc_config *fc_config; /* Fringe Control */ - struct atomisp_formats_config *formats_config; /* Formats Control */ - struct atomisp_cnr_config *cnr_config; /* Chroma Noise Reduction */ - struct atomisp_macc_config *macc_config; /* MACC */ - struct atomisp_ctc_config *ctc_config; /* Chroma Tone Control */ - struct atomisp_aa_config *aa_config; /* Anti-Aliasing */ - struct atomisp_aa_config *baa_config; /* Anti-Aliasing */ - struct atomisp_ce_config *ce_config; - struct atomisp_dvs_6axis_config *dvs_6axis_config; - struct atomisp_ob_config *ob_config; /* Objective Black config */ - struct atomisp_dp_config *dp_config; /* Dead Pixel config */ - struct atomisp_nr_config *nr_config; /* Noise Reduction config */ - struct atomisp_ee_config *ee_config; /* Edge Enhancement config */ - struct atomisp_de_config *de_config; /* Demosaic config */ - struct atomisp_gc_config *gc_config; /* Gamma Correction config */ - struct atomisp_anr_config *anr_config; /* Advanced Noise Reduction */ - struct atomisp_3a_config *a3a_config; /* 3A Statistics config */ - struct atomisp_xnr_config *xnr_config; /* eXtra Noise Reduction */ - struct atomisp_dz_config *dz_config; /* Digital Zoom */ - struct atomisp_cc_config *yuv2rgb_cc_config; /* Color - Correction config */ - struct atomisp_cc_config *rgb2yuv_cc_config; /* Color - Correction config */ - struct atomisp_macc_table *macc_table; - struct atomisp_gamma_table *gamma_table; - struct atomisp_ctc_table *ctc_table; - struct atomisp_xnr_table *xnr_table; - struct atomisp_rgb_gamma_table *r_gamma_table; - struct atomisp_rgb_gamma_table *g_gamma_table; - struct atomisp_rgb_gamma_table *b_gamma_table; - struct atomisp_vector *motion_vector; /* For 2-axis DVS */ - struct atomisp_shading_table *shading_table; - struct atomisp_morph_table *morph_table; - struct atomisp_dvs_coefficients *dvs_coefs; /* DVS 1.0 coefficients */ - struct atomisp_dvs2_coefficients *dvs2_coefs; /* DVS 2.0 coefficients */ - struct atomisp_capture_config *capture_config; - struct atomisp_anr_thres *anr_thres; - - void *lin_2500_config; /* Skylake: Linearization config */ - void *obgrid_2500_config; /* Skylake: OBGRID config */ - void *bnr_2500_config; /* Skylake: bayer denoise config */ - void *shd_2500_config; /* Skylake: shading config */ - void *dm_2500_config; /* Skylake: demosaic config */ - void *rgbpp_2500_config; /* Skylake: RGBPP config */ - void *dvs_stat_2500_config; /* Skylake: DVS STAT config */ - void *lace_stat_2500_config; /* Skylake: LACE STAT config */ - void *yuvp1_2500_config; /* Skylake: yuvp1 config */ - void *yuvp2_2500_config; /* Skylake: yuvp2 config */ - void *tnr_2500_config; /* Skylake: TNR config */ - void *dpc_2500_config; /* Skylake: DPC config */ - void *awb_2500_config; /* Skylake: auto white balance config */ - void *awb_fr_2500_config; /* Skylake: auto white balance filter response config */ - void *anr_2500_config; /* Skylake: ANR config */ - void *af_2500_config; /* Skylake: auto focus config */ - void *ae_2500_config; /* Skylake: auto exposure config */ - void *bds_2500_config; /* Skylake: bayer downscaler config */ - void *dvs_2500_config; /* Skylake: digital video stabilization config */ - void *res_mgr_2500_config; - - /* - * Output frame pointer the config is to be applied to (optional), - * set to NULL to make this config is applied as global. - */ - void *output_frame; - /* - * Unique ID to track which config was actually applied to a particular - * frame, driver will send this id back with output frame together. - */ - uint32_t isp_config_id; - - /* - * Switch to control per_frame setting: - * 0: this is a global setting - * 1: this is a per_frame setting - * PLEASE KEEP THIS AT THE END OF THE STRUCTURE!! - */ - uint32_t per_frame_setting; -}; - -#define ATOMISP_GAMMA_TABLE_SIZE 1024 -struct atomisp_gamma_table { - unsigned short data[ATOMISP_GAMMA_TABLE_SIZE]; -}; - -/* Morphing table for advanced ISP. - * Each line of width elements takes up COORD_TABLE_EXT_WIDTH elements - * in memory. - */ -#define ATOMISP_MORPH_TABLE_NUM_PLANES 6 -struct atomisp_morph_table { - unsigned int enabled; - - unsigned int height; - unsigned int width; /* number of valid elements per line */ - unsigned short __user *coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES]; - unsigned short __user *coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES]; -}; - -#define ATOMISP_NUM_SC_COLORS 4 -#define ATOMISP_SC_FLAG_QUERY (1 << 0) - -struct atomisp_shading_table { - __u32 enable; - - __u32 sensor_width; - __u32 sensor_height; - __u32 width; - __u32 height; - __u32 fraction_bits; - - __u16 *data[ATOMISP_NUM_SC_COLORS]; -}; - -struct atomisp_makernote_info { - /* bits 31-16: numerator, bits 15-0: denominator */ - unsigned int focal_length; - /* bits 31-16: numerator, bits 15-0: denominator*/ - unsigned int f_number_curr; - /* - * bits 31-24: max f-number numerator - * bits 23-16: max f-number denominator - * bits 15-8: min f-number numerator - * bits 7-0: min f-number denominator - */ - unsigned int f_number_range; -}; - -/* parameter for MACC */ -#define ATOMISP_NUM_MACC_AXES 16 -struct atomisp_macc_table { - short data[4 * ATOMISP_NUM_MACC_AXES]; -}; - -struct atomisp_macc_config { - int color_effect; - struct atomisp_macc_table table; -}; - -/* Parameter for ctc parameter control */ -#define ATOMISP_CTC_TABLE_SIZE 1024 -struct atomisp_ctc_table { - unsigned short data[ATOMISP_CTC_TABLE_SIZE]; -}; - -/* Parameter for overlay image loading */ -struct atomisp_overlay { - /* the frame containing the overlay data The overlay frame width should - * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height - * should be the multiples of 2. - */ - struct v4l2_framebuffer *frame; - /* Y value of overlay background */ - unsigned char bg_y; - /* U value of overlay background */ - char bg_u; - /* V value of overlay background */ - char bg_v; - /* the blending percent of input data for Y subpixels */ - unsigned char blend_input_perc_y; - /* the blending percent of input data for U subpixels */ - unsigned char blend_input_perc_u; - /* the blending percent of input data for V subpixels */ - unsigned char blend_input_perc_v; - /* the blending percent of overlay data for Y subpixels */ - unsigned char blend_overlay_perc_y; - /* the blending percent of overlay data for U subpixels */ - unsigned char blend_overlay_perc_u; - /* the blending percent of overlay data for V subpixels */ - unsigned char blend_overlay_perc_v; - /* the overlay start x pixel position on output frame It should be the - multiples of 2*ISP_VEC_NELEMS. */ - unsigned int overlay_start_x; - /* the overlay start y pixel position on output frame It should be the - multiples of 2. */ - unsigned int overlay_start_y; -}; - -/* Sensor resolution specific data for AE calculation.*/ -struct atomisp_sensor_mode_data { - unsigned int coarse_integration_time_min; - unsigned int coarse_integration_time_max_margin; - unsigned int fine_integration_time_min; - unsigned int fine_integration_time_max_margin; - unsigned int fine_integration_time_def; - unsigned int frame_length_lines; - unsigned int line_length_pck; - unsigned int read_mode; - unsigned int vt_pix_clk_freq_mhz; - unsigned int crop_horizontal_start; /* Sensor crop start cord. (x0,y0)*/ - unsigned int crop_vertical_start; - unsigned int crop_horizontal_end; /* Sensor crop end cord. (x1,y1)*/ - unsigned int crop_vertical_end; - unsigned int output_width; /* input size to ISP after binning/scaling */ - unsigned int output_height; - uint8_t binning_factor_x; /* horizontal binning factor used */ - uint8_t binning_factor_y; /* vertical binning factor used */ - uint16_t hts; -}; - -struct atomisp_exposure { - unsigned int integration_time[8]; - unsigned int shutter_speed[8]; - unsigned int gain[4]; - unsigned int aperture; -}; - -/* For texture streaming. */ -struct atomisp_bc_video_package { - int ioctl_cmd; - int device_id; - int inputparam; - int outputparam; -}; - -enum atomisp_focus_hp { - ATOMISP_FOCUS_HP_IN_PROGRESS = (1U << 2), - ATOMISP_FOCUS_HP_COMPLETE = (2U << 2), - ATOMISP_FOCUS_HP_FAILED = (3U << 2) -}; - -/* Masks */ -#define ATOMISP_FOCUS_STATUS_MOVING (1U << 0) -#define ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE (1U << 1) -#define ATOMISP_FOCUS_STATUS_HOME_POSITION (3U << 2) - -enum atomisp_camera_port { - ATOMISP_CAMERA_PORT_SECONDARY, - ATOMISP_CAMERA_PORT_PRIMARY, - ATOMISP_CAMERA_PORT_TERTIARY, - ATOMISP_CAMERA_NR_PORTS -}; - -/* Flash modes. Default is off. - * Setting a flash to TORCH or INDICATOR mode will automatically - * turn it on. Setting it to FLASH mode will not turn on the flash - * until the FLASH_STROBE command is sent. */ -enum atomisp_flash_mode { - ATOMISP_FLASH_MODE_OFF, - ATOMISP_FLASH_MODE_FLASH, - ATOMISP_FLASH_MODE_TORCH, - ATOMISP_FLASH_MODE_INDICATOR, -}; - -/* Flash statuses, used by atomisp driver to check before starting - * flash and after having started flash. */ -enum atomisp_flash_status { - ATOMISP_FLASH_STATUS_OK, - ATOMISP_FLASH_STATUS_HW_ERROR, - ATOMISP_FLASH_STATUS_INTERRUPTED, - ATOMISP_FLASH_STATUS_TIMEOUT, -}; - -/* Frame status. This is used to detect corrupted frames and flash - * exposed frames. Usually, the first 2 frames coming out of the sensor - * are corrupted. When using flash, the frame before and the frame after - * the flash exposed frame may be partially exposed by flash. The ISP - * statistics for these frames should not be used by the 3A library. - * The frame status value can be found in the "reserved" field in the - * v4l2_buffer struct. */ -enum atomisp_frame_status { - ATOMISP_FRAME_STATUS_OK, - ATOMISP_FRAME_STATUS_CORRUPTED, - ATOMISP_FRAME_STATUS_FLASH_EXPOSED, - ATOMISP_FRAME_STATUS_FLASH_PARTIAL, - ATOMISP_FRAME_STATUS_FLASH_FAILED, -}; - -enum atomisp_acc_type { - ATOMISP_ACC_STANDALONE, /* Stand-alone acceleration */ - ATOMISP_ACC_OUTPUT, /* Accelerator stage on output frame */ - ATOMISP_ACC_VIEWFINDER /* Accelerator stage on viewfinder frame */ -}; - -enum atomisp_acc_arg_type { - ATOMISP_ACC_ARG_SCALAR_IN, /* Scalar input argument */ - ATOMISP_ACC_ARG_SCALAR_OUT, /* Scalar output argument */ - ATOMISP_ACC_ARG_SCALAR_IO, /* Scalar in/output argument */ - ATOMISP_ACC_ARG_PTR_IN, /* Pointer input argument */ - ATOMISP_ACC_ARG_PTR_OUT, /* Pointer output argument */ - ATOMISP_ACC_ARG_PTR_IO, /* Pointer in/output argument */ - ATOMISP_ARG_PTR_NOFLUSH, /* Pointer argument will not be flushed */ - ATOMISP_ARG_PTR_STABLE, /* Pointer input argument that is stable */ - ATOMISP_ACC_ARG_FRAME /* Frame argument */ -}; - -/* ISP memories, isp2400 */ -enum atomisp_acc_memory { - ATOMISP_ACC_MEMORY_PMEM0 = 0, - ATOMISP_ACC_MEMORY_DMEM0, - /* for backward compatibility */ - ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0, - ATOMISP_ACC_MEMORY_VMEM0, - ATOMISP_ACC_MEMORY_VAMEM0, - ATOMISP_ACC_MEMORY_VAMEM1, - ATOMISP_ACC_MEMORY_VAMEM2, - ATOMISP_ACC_MEMORY_HMEM0, - ATOMISP_ACC_NR_MEMORY -}; - -enum atomisp_ext_isp_id { - EXT_ISP_CID_ISO = 0, - EXT_ISP_CID_CAPTURE_HDR, - EXT_ISP_CID_CAPTURE_LLS, - EXT_ISP_CID_FOCUS_MODE, - EXT_ISP_CID_FOCUS_EXECUTION, - EXT_ISP_CID_TOUCH_POSX, - EXT_ISP_CID_TOUCH_POSY, - EXT_ISP_CID_CAF_STATUS, - EXT_ISP_CID_AF_STATUS, - EXT_ISP_CID_GET_AF_MODE, - EXT_ISP_CID_CAPTURE_BURST, - EXT_ISP_CID_FLASH_MODE, - EXT_ISP_CID_ZOOM, - EXT_ISP_CID_SHOT_MODE -}; - -#define EXT_ISP_FOCUS_MODE_NORMAL 0 -#define EXT_ISP_FOCUS_MODE_MACRO 1 -#define EXT_ISP_FOCUS_MODE_TOUCH_AF 2 -#define EXT_ISP_FOCUS_MODE_PREVIEW_CAF 3 -#define EXT_ISP_FOCUS_MODE_MOVIE_CAF 4 -#define EXT_ISP_FOCUS_MODE_FACE_CAF 5 -#define EXT_ISP_FOCUS_MODE_TOUCH_MACRO 6 -#define EXT_ISP_FOCUS_MODE_TOUCH_CAF 7 - -#define EXT_ISP_FOCUS_STOP 0 -#define EXT_ISP_FOCUS_SEARCH 1 -#define EXT_ISP_PAN_FOCUSING 2 - -#define EXT_ISP_CAF_RESTART_CHECK 1 -#define EXT_ISP_CAF_STATUS_FOCUSING 2 -#define EXT_ISP_CAF_STATUS_SUCCESS 3 -#define EXT_ISP_CAF_STATUS_FAIL 4 - -#define EXT_ISP_AF_STATUS_INVALID 1 -#define EXT_ISP_AF_STATUS_FOCUSING 2 -#define EXT_ISP_AF_STATUS_SUCCESS 3 -#define EXT_ISP_AF_STATUS_FAIL 4 - -enum atomisp_burst_capture_options { - EXT_ISP_BURST_CAPTURE_CTRL_START = 0, - EXT_ISP_BURST_CAPTURE_CTRL_STOP -}; - -#define EXT_ISP_FLASH_MODE_OFF 0 -#define EXT_ISP_FLASH_MODE_ON 1 -#define EXT_ISP_FLASH_MODE_AUTO 2 -#define EXT_ISP_LED_TORCH_OFF 3 -#define EXT_ISP_LED_TORCH_ON 4 - -#define EXT_ISP_SHOT_MODE_AUTO 0 -#define EXT_ISP_SHOT_MODE_BEAUTY_FACE 1 -#define EXT_ISP_SHOT_MODE_BEST_PHOTO 2 -#define EXT_ISP_SHOT_MODE_DRAMA 3 -#define EXT_ISP_SHOT_MODE_BEST_FACE 4 -#define EXT_ISP_SHOT_MODE_ERASER 5 -#define EXT_ISP_SHOT_MODE_PANORAMA 6 -#define EXT_ISP_SHOT_MODE_RICH_TONE_HDR 7 -#define EXT_ISP_SHOT_MODE_NIGHT 8 -#define EXT_ISP_SHOT_MODE_SOUND_SHOT 9 -#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10 -#define EXT_ISP_SHOT_MODE_SPORTS 11 - -struct atomisp_sp_arg { - enum atomisp_acc_arg_type type; /* Type of SP argument */ - void *value; /* Value of SP argument */ - unsigned int size; /* Size of SP argument */ -}; - -/* Acceleration API */ - -/* For CSS 1.0 only */ -struct atomisp_acc_fw_arg { - unsigned int fw_handle; - unsigned int index; - void __user *value; - size_t size; -}; - -/* - * Set arguments after first mapping with ATOMISP_IOC_ACC_S_MAPPED_ARG. - */ -struct atomisp_acc_s_mapped_arg { - unsigned int fw_handle; - __u32 memory; /* one of enum atomisp_acc_memory */ - size_t length; - unsigned long css_ptr; -}; - -struct atomisp_acc_fw_abort { - unsigned int fw_handle; - /* Timeout in us */ - unsigned int timeout; -}; - -struct atomisp_acc_fw_load { - unsigned int size; - unsigned int fw_handle; - void __user *data; -}; - -/* - * Load firmware to specified pipeline. - */ -struct atomisp_acc_fw_load_to_pipe { - __u32 flags; /* Flags, see below for valid values */ - unsigned int fw_handle; /* Handle, filled by kernel. */ - __u32 size; /* Firmware binary size */ - void __user *data; /* Pointer to firmware */ - __u32 type; /* Binary type */ - __u32 reserved[3]; /* Set to zero */ -}; -/* - * Set Senor run mode - */ -struct atomisp_s_runmode { - __u32 mode; -}; - -#define ATOMISP_ACC_FW_LOAD_FL_PREVIEW (1 << 0) -#define ATOMISP_ACC_FW_LOAD_FL_COPY (1 << 1) -#define ATOMISP_ACC_FW_LOAD_FL_VIDEO (1 << 2) -#define ATOMISP_ACC_FW_LOAD_FL_CAPTURE (1 << 3) -#define ATOMISP_ACC_FW_LOAD_FL_ACC (1 << 4) -#define ATOMISP_ACC_FW_LOAD_FL_ENABLE (1 << 16) - -#define ATOMISP_ACC_FW_LOAD_TYPE_NONE 0 /* Normal binary: don't use */ -#define ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT 1 /* Stage on output */ -#define ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER 2 /* Stage on viewfinder */ -#define ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE 3 /* Stand-alone acceleration */ - -struct atomisp_acc_map { - __u32 flags; /* Flags, see list below */ - __u32 length; /* Length of data in bytes */ - void __user *user_ptr; /* Pointer into user space */ - unsigned long css_ptr; /* Pointer into CSS address space */ - __u32 reserved[4]; /* Set to zero */ -}; - -#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */ -#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */ - -struct atomisp_acc_state { - __u32 flags; /* Flags, see list below */ -#define ATOMISP_STATE_FLAG_ENABLE ATOMISP_ACC_FW_LOAD_FL_ENABLE - unsigned int fw_handle; -}; - -struct atomisp_update_exposure { - unsigned int gain; - unsigned int digi_gain; - unsigned int update_gain; - unsigned int update_digi_gain; -}; - -/* - * V4L2 private internal data interface. - * ----------------------------------------------------------------------------- - * struct v4l2_private_int_data - request private data stored in video device - * internal memory. - * @size: sanity check to ensure userspace's buffer fits whole private data. - * If not, kernel will make partial copy (or nothing if @size == 0). - * @size is always corrected for the minimum necessary if IOCTL returns - * no error. - * @data: pointer to userspace buffer. - */ -struct v4l2_private_int_data { - __u32 size; - void __user *data; - __u32 reserved[2]; -}; - -enum atomisp_sensor_ae_bracketing_mode { - SENSOR_AE_BRACKETING_MODE_OFF = 0, - SENSOR_AE_BRACKETING_MODE_SINGLE, /* back to SW standby after bracketing */ - SENSOR_AE_BRACKETING_MODE_SINGLE_TO_STREAMING, /* back to normal streaming after bracketing */ - SENSOR_AE_BRACKETING_MODE_LOOP, /* continue AE bracketing in loop mode */ -}; - -struct atomisp_sensor_ae_bracketing_info { - unsigned int modes; /* bit mask to indicate supported modes */ - unsigned int lut_depth; -}; - -struct atomisp_sensor_ae_bracketing_lut_entry { - __u16 coarse_integration_time; - __u16 analog_gain; - __u16 digital_gain; -}; - -struct atomisp_sensor_ae_bracketing_lut { - struct atomisp_sensor_ae_bracketing_lut_entry *lut; - unsigned int lut_size; -}; - -/*Private IOCTLs for ISP */ -#define ATOMISP_IOC_G_XNR \ - _IOR('v', BASE_VIDIOC_PRIVATE + 0, int) -#define ATOMISP_IOC_S_XNR \ - _IOW('v', BASE_VIDIOC_PRIVATE + 0, int) -#define ATOMISP_IOC_G_NR \ - _IOR('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config) -#define ATOMISP_IOC_S_NR \ - _IOW('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config) -#define ATOMISP_IOC_G_TNR \ - _IOR('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config) -#define ATOMISP_IOC_S_TNR \ - _IOW('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config) -#define ATOMISP_IOC_G_HISTOGRAM \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram) -#define ATOMISP_IOC_S_HISTOGRAM \ - _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram) -#define ATOMISP_IOC_G_BLACK_LEVEL_COMP \ - _IOR('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config) -#define ATOMISP_IOC_S_BLACK_LEVEL_COMP \ - _IOW('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config) -#define ATOMISP_IOC_G_EE \ - _IOR('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config) -#define ATOMISP_IOC_S_EE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config) -/* Digital Image Stabilization: - * 1. get dis statistics: reads DIS statistics from ISP (every frame) - * 2. set dis coefficients: set DIS filter coefficients (one time) - * 3. set dis motion vecotr: set motion vector (result of DIS, every frame) - */ -#define ATOMISP_IOC_G_DIS_STAT \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics) - -#define ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS \ - _IOR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs2_bq_resolutions) - -#define ATOMISP_IOC_S_DIS_COEFS \ - _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients) - -#define ATOMISP_IOC_S_DIS_VECTOR \ - _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config) - -#define ATOMISP_IOC_G_3A_STAT \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics) -#define ATOMISP_IOC_G_ISP_PARM \ - _IOR('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm) -#define ATOMISP_IOC_S_ISP_PARM \ - _IOW('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm) -#define ATOMISP_IOC_G_ISP_GAMMA \ - _IOR('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table) -#define ATOMISP_IOC_S_ISP_GAMMA \ - _IOW('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table) -#define ATOMISP_IOC_G_ISP_GDC_TAB \ - _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table) -#define ATOMISP_IOC_S_ISP_GDC_TAB \ - _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table) -#define ATOMISP_IOC_ISP_MAKERNOTE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 11, struct atomisp_makernote_info) - -/* macc parameter control*/ -#define ATOMISP_IOC_G_ISP_MACC \ - _IOR('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config) -#define ATOMISP_IOC_S_ISP_MACC \ - _IOW('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config) - -/* Defect pixel detection & Correction */ -#define ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION \ - _IOR('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config) -#define ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION \ - _IOW('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config) - -/* False Color Correction */ -#define ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION \ - _IOR('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config) -#define ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION \ - _IOW('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config) - -/* ctc parameter control */ -#define ATOMISP_IOC_G_ISP_CTC \ - _IOR('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table) -#define ATOMISP_IOC_S_ISP_CTC \ - _IOW('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table) - -/* white balance Correction */ -#define ATOMISP_IOC_G_ISP_WHITE_BALANCE \ - _IOR('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config) -#define ATOMISP_IOC_S_ISP_WHITE_BALANCE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config) - -/* fpn table loading */ -#define ATOMISP_IOC_S_ISP_FPN_TABLE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer) - -/* overlay image loading */ -#define ATOMISP_IOC_G_ISP_OVERLAY \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay) -#define ATOMISP_IOC_S_ISP_OVERLAY \ - _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay) - -/* bcd driver bridge */ -#define ATOMISP_IOC_CAMERA_BRIDGE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 19, struct atomisp_bc_video_package) - -/* Sensor resolution specific info for AE */ -#define ATOMISP_IOC_G_SENSOR_MODE_DATA \ - _IOR('v', BASE_VIDIOC_PRIVATE + 20, struct atomisp_sensor_mode_data) - -#define ATOMISP_IOC_S_EXPOSURE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 21, struct atomisp_exposure) - -/* sensor calibration registers group */ -#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group) - -/* white balance Correction */ -#define ATOMISP_IOC_G_3A_CONFIG \ - _IOR('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config) -#define ATOMISP_IOC_S_3A_CONFIG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config) - -/* Accelerate ioctls */ -#define ATOMISP_IOC_ACC_LOAD \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load) - -#define ATOMISP_IOC_ACC_UNLOAD \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 24, unsigned int) - -/* For CSS 1.0 only */ -#define ATOMISP_IOC_ACC_S_ARG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg) - -#define ATOMISP_IOC_ACC_START \ - _IOW('v', BASE_VIDIOC_PRIVATE + 24, unsigned int) - -#define ATOMISP_IOC_ACC_WAIT \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, unsigned int) - -#define ATOMISP_IOC_ACC_ABORT \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_abort) - -#define ATOMISP_IOC_ACC_DESTAB \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg) - -/* sensor OTP memory read */ -#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data) - -/* LCS (shading) table write */ -#define ATOMISP_IOC_S_ISP_SHD_TAB \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table) - -/* Gamma Correction */ -#define ATOMISP_IOC_G_ISP_GAMMA_CORRECTION \ - _IOR('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config) - -#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \ - _IOW('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config) - -/* motor internal memory read */ -#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data) - -/* - * Ioctls to map and unmap user buffers to CSS address space for acceleration. - * User fills fields length and user_ptr and sets other fields to zero, - * kernel may modify the flags and sets css_ptr. - */ -#define ATOMISP_IOC_ACC_MAP \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map) - -/* User fills fields length, user_ptr, and css_ptr and zeroes other fields. */ -#define ATOMISP_IOC_ACC_UNMAP \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map) - -#define ATOMISP_IOC_ACC_S_MAPPED_ARG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg) - -#define ATOMISP_IOC_ACC_LOAD_TO_PIPE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe) - -#define ATOMISP_IOC_S_PARAMETERS \ - _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters) - -#define ATOMISP_IOC_S_CONT_CAPTURE_CONFIG \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 33, struct atomisp_cont_capture_conf) - -#define ATOMISP_IOC_G_METADATA \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata) - -#define ATOMISP_IOC_G_METADATA_BY_TYPE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type) - -#define ATOMISP_IOC_EXT_ISP_CTRL \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 35, struct atomisp_ext_isp_ctrl) - -#define ATOMISP_IOC_EXP_ID_UNLOCK \ - _IOW('v', BASE_VIDIOC_PRIVATE + 36, int) - -#define ATOMISP_IOC_EXP_ID_CAPTURE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 37, int) - -#define ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 38, unsigned int) - -#define ATOMISP_IOC_G_FORMATS_CONFIG \ - _IOR('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config) - -#define ATOMISP_IOC_S_FORMATS_CONFIG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config) - -#define ATOMISP_IOC_S_EXPOSURE_WINDOW \ - _IOW('v', BASE_VIDIOC_PRIVATE + 40, struct atomisp_ae_window) - -#define ATOMISP_IOC_S_ACC_STATE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state) - -#define ATOMISP_IOC_G_ACC_STATE \ - _IOR('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state) - -#define ATOMISP_IOC_INJECT_A_FAKE_EVENT \ - _IOW('v', BASE_VIDIOC_PRIVATE + 42, int) - -#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO \ - _IOR('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_info) - -#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 43, unsigned int) - -#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE \ - _IOR('v', BASE_VIDIOC_PRIVATE + 43, unsigned int) - -#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT \ - _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut) - -#define ATOMISP_IOC_G_INVALID_FRAME_NUM \ - _IOR('v', BASE_VIDIOC_PRIVATE + 44, unsigned int) - -#define ATOMISP_IOC_S_ARRAY_RESOLUTION \ - _IOW('v', BASE_VIDIOC_PRIVATE + 45, struct atomisp_resolution) - -/* for depth mode sensor frame sync compensation */ -#define ATOMISP_IOC_G_DEPTH_SYNC_COMP \ - _IOR('v', BASE_VIDIOC_PRIVATE + 46, unsigned int) - -#define ATOMISP_IOC_S_SENSOR_EE_CONFIG \ - _IOW('v', BASE_VIDIOC_PRIVATE + 47, unsigned int) - -#define ATOMISP_IOC_S_SENSOR_RUNMODE \ - _IOW('v', BASE_VIDIOC_PRIVATE + 48, struct atomisp_s_runmode) - -#define ATOMISP_IOC_G_UPDATE_EXPOSURE \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 49, struct atomisp_update_exposure) - -/* - * Reserved ioctls. We have customer implementing it internally. - * We can't use both numbers to not cause ABI conflict. - * Anyway, those ioctls are hacks and not implemented by us: - * - * #define ATOMISP_IOC_G_SENSOR_REG \ - * _IOW('v', BASE_VIDIOC_PRIVATE + 55, struct atomisp_sensor_regs) - * #define ATOMISP_IOC_S_SENSOR_REG \ - * _IOW('v', BASE_VIDIOC_PRIVATE + 56, struct atomisp_sensor_regs) - */ - -/* ISP Private control IDs */ -#define V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION \ - (V4L2_CID_PRIVATE_BASE + 0) -#define V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC \ - (V4L2_CID_PRIVATE_BASE + 1) -#define V4L2_CID_ATOMISP_VIDEO_STABLIZATION \ - (V4L2_CID_PRIVATE_BASE + 2) -#define V4L2_CID_ATOMISP_FIXED_PATTERN_NR \ - (V4L2_CID_PRIVATE_BASE + 3) -#define V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION \ - (V4L2_CID_PRIVATE_BASE + 4) -#define V4L2_CID_ATOMISP_LOW_LIGHT \ - (V4L2_CID_PRIVATE_BASE + 5) - -/* Camera class: - * Exposure, Flash and privacy (indicator) light controls, to be upstreamed */ -#define V4L2_CID_CAMERA_LASTP1 (V4L2_CID_CAMERA_CLASS_BASE + 1024) - -#define V4L2_CID_FOCAL_ABSOLUTE (V4L2_CID_CAMERA_LASTP1 + 0) -#define V4L2_CID_FNUMBER_ABSOLUTE (V4L2_CID_CAMERA_LASTP1 + 1) -#define V4L2_CID_FNUMBER_RANGE (V4L2_CID_CAMERA_LASTP1 + 2) - -/* Flash related CIDs, see also: - * http://linuxtv.org/downloads/v4l-dvb-apis/extended-controls.html\ - * #flash-controls */ - -/* Request a number of flash-exposed frames. The frame status can be - * found in the reserved field in the v4l2_buffer struct. */ -#define V4L2_CID_REQUEST_FLASH (V4L2_CID_CAMERA_LASTP1 + 3) -/* Query flash driver status. See enum atomisp_flash_status above. */ -#define V4L2_CID_FLASH_STATUS (V4L2_CID_CAMERA_LASTP1 + 5) -/* Set the flash mode (see enum atomisp_flash_mode) */ -#define V4L2_CID_FLASH_MODE (V4L2_CID_CAMERA_LASTP1 + 10) - -/* VCM slew control */ -#define V4L2_CID_VCM_SLEW (V4L2_CID_CAMERA_LASTP1 + 11) -/* VCM step time */ -#define V4L2_CID_VCM_TIMEING (V4L2_CID_CAMERA_LASTP1 + 12) - -/* Query Focus Status */ -#define V4L2_CID_FOCUS_STATUS (V4L2_CID_CAMERA_LASTP1 + 14) - -/* Query sensor's binning factor */ -#define V4L2_CID_BIN_FACTOR_HORZ (V4L2_CID_CAMERA_LASTP1 + 15) -#define V4L2_CID_BIN_FACTOR_VERT (V4L2_CID_CAMERA_LASTP1 + 16) - -/* number of frames to skip at stream start */ -#define V4L2_CID_G_SKIP_FRAMES (V4L2_CID_CAMERA_LASTP1 + 17) - -/* Query sensor's 2A status */ -#define V4L2_CID_2A_STATUS (V4L2_CID_CAMERA_LASTP1 + 18) -#define V4L2_2A_STATUS_AE_READY (1 << 0) -#define V4L2_2A_STATUS_AWB_READY (1 << 1) - -#define V4L2_CID_FMT_AUTO (V4L2_CID_CAMERA_LASTP1 + 19) - -#define V4L2_CID_RUN_MODE (V4L2_CID_CAMERA_LASTP1 + 20) -#define ATOMISP_RUN_MODE_VIDEO 1 -#define ATOMISP_RUN_MODE_STILL_CAPTURE 2 -#define ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE 3 -#define ATOMISP_RUN_MODE_PREVIEW 4 -#define ATOMISP_RUN_MODE_SDV 5 - -#define V4L2_CID_ENABLE_VFPP (V4L2_CID_CAMERA_LASTP1 + 21) -#define V4L2_CID_ATOMISP_CONTINUOUS_MODE (V4L2_CID_CAMERA_LASTP1 + 22) -#define V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE \ - (V4L2_CID_CAMERA_LASTP1 + 23) -#define V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER \ - (V4L2_CID_CAMERA_LASTP1 + 24) - -#define V4L2_CID_VFPP (V4L2_CID_CAMERA_LASTP1 + 25) -#define ATOMISP_VFPP_ENABLE 0 -#define ATOMISP_VFPP_DISABLE_SCALER 1 -#define ATOMISP_VFPP_DISABLE_LOWLAT 2 - -/* Query real flash status register value */ -#define V4L2_CID_FLASH_STATUS_REGISTER (V4L2_CID_CAMERA_LASTP1 + 26) - -#define V4L2_CID_START_ZSL_CAPTURE (V4L2_CID_CAMERA_LASTP1 + 28) -/* Lock and unlock raw buffer */ -#define V4L2_CID_ENABLE_RAW_BUFFER_LOCK (V4L2_CID_CAMERA_LASTP1 + 29) - -#define V4L2_CID_DEPTH_MODE (V4L2_CID_CAMERA_LASTP1 + 30) - -#define V4L2_CID_EXPOSURE_ZONE_NUM (V4L2_CID_CAMERA_LASTP1 + 31) -/* Disable digital zoom */ -#define V4L2_CID_DISABLE_DZ (V4L2_CID_CAMERA_LASTP1 + 32) - -#define V4L2_CID_TEST_PATTERN_COLOR_R (V4L2_CID_CAMERA_LASTP1 + 33) -#define V4L2_CID_TEST_PATTERN_COLOR_GR (V4L2_CID_CAMERA_LASTP1 + 34) -#define V4L2_CID_TEST_PATTERN_COLOR_GB (V4L2_CID_CAMERA_LASTP1 + 35) -#define V4L2_CID_TEST_PATTERN_COLOR_B (V4L2_CID_CAMERA_LASTP1 + 36) - -#define V4L2_CID_ATOMISP_SELECT_ISP_VERSION (V4L2_CID_CAMERA_LASTP1 + 38) - -#define V4L2_BUF_FLAG_BUFFER_INVALID 0x0400 -#define V4L2_BUF_FLAG_BUFFER_VALID 0x0800 - -#define V4L2_BUF_TYPE_VIDEO_CAPTURE_ION (V4L2_BUF_TYPE_PRIVATE + 1024) - -#define V4L2_EVENT_ATOMISP_3A_STATS_READY (V4L2_EVENT_PRIVATE_START + 1) -#define V4L2_EVENT_ATOMISP_METADATA_READY (V4L2_EVENT_PRIVATE_START + 2) -#define V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE (V4L2_EVENT_PRIVATE_START + 3) -#define V4L2_EVENT_ATOMISP_ACC_COMPLETE (V4L2_EVENT_PRIVATE_START + 4) -#define V4L2_EVENT_ATOMISP_PAUSE_BUFFER (V4L2_EVENT_PRIVATE_START + 5) -#define V4L2_EVENT_ATOMISP_CSS_RESET (V4L2_EVENT_PRIVATE_START + 6) -/* Nonstandard color effects for V4L2_CID_COLORFX */ -enum { - V4L2_COLORFX_SKIN_WHITEN_LOW = 1001, - V4L2_COLORFX_SKIN_WHITEN_HIGH = 1002, - V4L2_COLORFX_WARM = 1003, - V4L2_COLORFX_COLD = 1004, - V4L2_COLORFX_WASHED = 1005, - V4L2_COLORFX_RED = 1006, - V4L2_COLORFX_GREEN = 1007, - V4L2_COLORFX_BLUE = 1008, - V4L2_COLORFX_PINK = 1009, - V4L2_COLORFX_YELLOW = 1010, - V4L2_COLORFX_PURPLE = 1011, -}; - -#endif /* _ATOM_ISP_H */ -#endif /* CSS15*/ diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h deleted file mode 100644 index c52c56a17e17..000000000000 --- a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel MID SoC Camera Imaging ISP subsystem. - * - * Copyright (c) 2014 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef ATOMISP_GMIN_PLATFORM_H_ -#define ATOMISP_GMIN_PLATFORM_H_ - -#include "atomisp_platform.h" - -int atomisp_register_i2c_module(struct v4l2_subdev *subdev, - struct camera_sensor_platform_data *plat_data, - enum intel_v4l2_subdev_type type); -struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter, - struct i2c_board_info *board_info); -int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd); -int gmin_get_var_int(struct device *dev, const char *var, int def); -int camera_sensor_csi(struct v4l2_subdev *sd, u32 port, - u32 lanes, u32 format, u32 bayer_order, int flag); -struct camera_sensor_platform_data *gmin_camera_platform_data( - struct v4l2_subdev *subdev, - enum atomisp_input_format csi_format, - enum atomisp_bayer_order csi_bayer); - -int atomisp_gmin_register_vcm_control(struct camera_vcm_control *); - -#endif diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h deleted file mode 100644 index aa5e294e7b7d..000000000000 --- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef ATOMISP_PLATFORM_H_ -#define ATOMISP_PLATFORM_H_ - -#include -#include -#include -#include "atomisp.h" - -#define MAX_SENSORS_PER_PORT 4 -#define MAX_STREAMS_PER_CHANNEL 2 - -#define CAMERA_MODULE_ID_LEN 64 - -enum atomisp_bayer_order { - atomisp_bayer_order_grbg, - atomisp_bayer_order_rggb, - atomisp_bayer_order_bggr, - atomisp_bayer_order_gbrg -}; - -enum atomisp_input_stream_id { - ATOMISP_INPUT_STREAM_GENERAL = 0, - ATOMISP_INPUT_STREAM_CAPTURE = 0, - ATOMISP_INPUT_STREAM_POSTVIEW, - ATOMISP_INPUT_STREAM_PREVIEW, - ATOMISP_INPUT_STREAM_VIDEO, - ATOMISP_INPUT_STREAM_NUM -}; - -enum atomisp_input_format { - ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY,/* 8 bits per subpixel (legacy) */ - ATOMISP_INPUT_FORMAT_YUV420_8, /* 8 bits per subpixel */ - ATOMISP_INPUT_FORMAT_YUV420_10,/* 10 bits per subpixel */ - ATOMISP_INPUT_FORMAT_YUV420_16,/* 16 bits per subpixel */ - ATOMISP_INPUT_FORMAT_YUV422_8, /* UYVY..UVYV, 8 bits per subpixel */ - ATOMISP_INPUT_FORMAT_YUV422_10,/* UYVY..UVYV, 10 bits per subpixel */ - ATOMISP_INPUT_FORMAT_YUV422_16,/* UYVY..UVYV, 16 bits per subpixel */ - ATOMISP_INPUT_FORMAT_RGB_444, /* BGR..BGR, 4 bits per subpixel */ - ATOMISP_INPUT_FORMAT_RGB_555, /* BGR..BGR, 5 bits per subpixel */ - ATOMISP_INPUT_FORMAT_RGB_565, /* BGR..BGR, 5 bits B and R, 6 bits G */ - ATOMISP_INPUT_FORMAT_RGB_666, /* BGR..BGR, 6 bits per subpixel */ - ATOMISP_INPUT_FORMAT_RGB_888, /* BGR..BGR, 8 bits per subpixel */ - ATOMISP_INPUT_FORMAT_RAW_6, /* RAW data, 6 bits per pixel */ - ATOMISP_INPUT_FORMAT_RAW_7, /* RAW data, 7 bits per pixel */ - ATOMISP_INPUT_FORMAT_RAW_8, /* RAW data, 8 bits per pixel */ - ATOMISP_INPUT_FORMAT_RAW_10, /* RAW data, 10 bits per pixel */ - ATOMISP_INPUT_FORMAT_RAW_12, /* RAW data, 12 bits per pixel */ - ATOMISP_INPUT_FORMAT_RAW_14, /* RAW data, 14 bits per pixel */ - ATOMISP_INPUT_FORMAT_RAW_16, /* RAW data, 16 bits per pixel */ - ATOMISP_INPUT_FORMAT_BINARY_8, /* Binary byte stream. */ - - /* CSI2-MIPI specific format: Generic short packet data. It is used to - * keep the timing information for the opening/closing of shutters, - * triggering of flashes and etc. - */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT1, /* Generic Short Packet Code 1 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT2, /* Generic Short Packet Code 2 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT3, /* Generic Short Packet Code 3 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT4, /* Generic Short Packet Code 4 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT5, /* Generic Short Packet Code 5 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT6, /* Generic Short Packet Code 6 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT7, /* Generic Short Packet Code 7 */ - ATOMISP_INPUT_FORMAT_GENERIC_SHORT8, /* Generic Short Packet Code 8 */ - - /* CSI2-MIPI specific format: YUV data. - */ - ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT, /* YUV420 8-bit (Chroma Shifted - Pixel Sampling) */ - ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT, /* YUV420 8-bit (Chroma Shifted - Pixel Sampling) */ - - /* CSI2-MIPI specific format: Generic long packet data - */ - ATOMISP_INPUT_FORMAT_EMBEDDED, /* Embedded 8-bit non Image Data */ - - /* CSI2-MIPI specific format: User defined byte-based data. For example, - * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as - * the User Defined Data Type 4 and the MPEG data as the - * User Defined Data Type 7. - */ - ATOMISP_INPUT_FORMAT_USER_DEF1, /* User defined 8-bit data type 1 */ - ATOMISP_INPUT_FORMAT_USER_DEF2, /* User defined 8-bit data type 2 */ - ATOMISP_INPUT_FORMAT_USER_DEF3, /* User defined 8-bit data type 3 */ - ATOMISP_INPUT_FORMAT_USER_DEF4, /* User defined 8-bit data type 4 */ - ATOMISP_INPUT_FORMAT_USER_DEF5, /* User defined 8-bit data type 5 */ - ATOMISP_INPUT_FORMAT_USER_DEF6, /* User defined 8-bit data type 6 */ - ATOMISP_INPUT_FORMAT_USER_DEF7, /* User defined 8-bit data type 7 */ - ATOMISP_INPUT_FORMAT_USER_DEF8, /* User defined 8-bit data type 8 */ -}; - -#define N_ATOMISP_INPUT_FORMAT (ATOMISP_INPUT_FORMAT_USER_DEF8 + 1) - - - -enum intel_v4l2_subdev_type { - RAW_CAMERA = 1, - SOC_CAMERA = 2, - CAMERA_MOTOR = 3, - LED_FLASH = 4, - XENON_FLASH = 5, - FILE_INPUT = 6, - TEST_PATTERN = 7, -}; - -struct intel_v4l2_subdev_id { - char name[17]; - enum intel_v4l2_subdev_type type; - enum atomisp_camera_port port; -}; - -struct intel_v4l2_subdev_i2c_board_info { - struct i2c_board_info board_info; - int i2c_adapter_id; -}; - -struct intel_v4l2_subdev_table { - struct intel_v4l2_subdev_i2c_board_info v4l2_subdev; - enum intel_v4l2_subdev_type type; - enum atomisp_camera_port port; - struct v4l2_subdev *subdev; -}; - -struct atomisp_platform_data { - struct intel_v4l2_subdev_table *subdevs; -}; - -/* Describe the capacities of one single sensor. */ -struct atomisp_sensor_caps { - /* The number of streams this sensor can output. */ - int stream_num; - bool is_slave; -}; - -/* Describe the capacities of sensors connected to one camera port. */ -struct atomisp_camera_caps { - /* The number of sensors connected to this camera port. */ - int sensor_num; - /* The capacities of each sensor. */ - struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT]; - /* Define whether stream control is required for multiple streams. */ - bool multi_stream_ctrl; -}; - -/* - * Sensor of external ISP can send multiple steams with different mipi data - * type in the same virtual channel. This information needs to come from the - * sensor or external ISP - */ -struct atomisp_isys_config_info { - u8 input_format; - u16 width; - u16 height; -}; - -struct atomisp_input_stream_info { - enum atomisp_input_stream_id stream; - u8 enable; - /* Sensor driver fills ch_id with the id - of the virtual channel. */ - u8 ch_id; - /* Tells how many streams in this virtual channel. If 0 ignore rest - * and the input format will be from mipi_info */ - u8 isys_configs; - /* - * if more isys_configs is more than 0, sensor needs to configure the - * input format differently. width and height can be 0. If width and - * height is not zero, then the corresponsing data needs to be set - */ - struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL]; -}; - -struct camera_vcm_control; -struct camera_vcm_ops { - int (*power_up)(struct v4l2_subdev *sd, struct camera_vcm_control *vcm); - int (*power_down)(struct v4l2_subdev *sd, - struct camera_vcm_control *vcm); - int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc, - struct camera_vcm_control *vcm); - int (*g_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl, - struct camera_vcm_control *vcm); - int (*s_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl, - struct camera_vcm_control *vcm); -}; - -struct camera_vcm_control { - char camera_module[CAMERA_MODULE_ID_LEN]; - struct camera_vcm_ops *ops; - struct list_head list; -}; - -struct camera_sensor_platform_data { - int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag); - int (*csi_cfg)(struct v4l2_subdev *subdev, int flag); - - /* - * New G-Min power and GPIO interface to control individual - * lines as implemented on all known camera modules. - */ - int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on); - int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on); - int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on); - int (*v2p8_ctrl)(struct v4l2_subdev *subdev, int on); - int (*v1p2_ctrl)(struct v4l2_subdev *subdev, int on); - struct camera_vcm_control * (*get_vcm_ctrl)(struct v4l2_subdev *subdev, - char *module_id); -}; - -struct camera_mipi_info { - enum atomisp_camera_port port; - unsigned int num_lanes; - enum atomisp_input_format input_format; - enum atomisp_bayer_order raw_bayer_order; - struct atomisp_sensor_mode_data data; - enum atomisp_input_format metadata_format; - uint32_t metadata_width; - uint32_t metadata_height; - const uint32_t *metadata_effective_width; -}; - -extern const struct atomisp_platform_data *atomisp_get_platform_data(void); -extern const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void); - -/* API from old platform_camera.h, new CPUID implementation */ -#define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \ - boot_cpu_data.x86 == 6 && \ - boot_cpu_data.x86_model == x) - -#define IS_MFLD __IS_SOC(0x27) -#define IS_BYT __IS_SOC(0x37) -#define IS_CHT __IS_SOC(0x4C) -#define IS_MOFD __IS_SOC(0x5A) - -#endif /* ATOMISP_PLATFORM_H_ */ diff --git a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h deleted file mode 100644 index 8988b37943b3..000000000000 --- a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __LIBMSRLISTHELPER_H__ -#define __LIBMSRLISTHELPER_H__ - -struct i2c_client; -struct firmware; - -extern int load_msr_list(struct i2c_client *client, char *path, - const struct firmware **fw); -extern int apply_msr_data(struct i2c_client *client, const struct firmware *fw); -extern void release_msr_list(struct i2c_client *client, - const struct firmware *fw); - - -#endif /* ifndef __LIBMSRLISTHELPER_H__ */ diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h deleted file mode 100644 index 9276ce44d907..000000000000 --- a/drivers/staging/media/atomisp/include/media/lm3554.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * include/media/lm3554.h - * - * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef _LM3554_H_ -#define _LM3554_H_ - -#include -#include - -#define LM3554_ID 3554 - -#define v4l2_queryctrl_entry_integer(_id, _name,\ - _minimum, _maximum, _step, \ - _default_value, _flags) \ - {\ - .id = (_id), \ - .type = V4L2_CTRL_TYPE_INTEGER, \ - .name = _name, \ - .minimum = (_minimum), \ - .maximum = (_maximum), \ - .step = (_step), \ - .default_value = (_default_value),\ - .flags = (_flags),\ - } -#define v4l2_queryctrl_entry_boolean(_id, _name,\ - _default_value, _flags) \ - {\ - .id = (_id), \ - .type = V4L2_CTRL_TYPE_BOOLEAN, \ - .name = _name, \ - .minimum = 0, \ - .maximum = 1, \ - .step = 1, \ - .default_value = (_default_value),\ - .flags = (_flags),\ - } - -#define s_ctrl_id_entry_integer(_id, _name, \ - _minimum, _maximum, _step, \ - _default_value, _flags, \ - _s_ctrl, _g_ctrl) \ - {\ - .qc = v4l2_queryctrl_entry_integer(_id, _name,\ - _minimum, _maximum, _step,\ - _default_value, _flags), \ - .s_ctrl = _s_ctrl, \ - .g_ctrl = _g_ctrl, \ - } - -#define s_ctrl_id_entry_boolean(_id, _name, \ - _default_value, _flags, \ - _s_ctrl, _g_ctrl) \ - {\ - .qc = v4l2_queryctrl_entry_boolean(_id, _name,\ - _default_value, _flags), \ - .s_ctrl = _s_ctrl, \ - .g_ctrl = _g_ctrl, \ - } - -/* Value settings for Flash Time-out Duration*/ -#define LM3554_DEFAULT_TIMEOUT 512U -#define LM3554_MIN_TIMEOUT 32U -#define LM3554_MAX_TIMEOUT 1024U -#define LM3554_TIMEOUT_STEPSIZE 32U - -/* Flash modes */ -#define LM3554_MODE_SHUTDOWN 0 -#define LM3554_MODE_INDICATOR 1 -#define LM3554_MODE_TORCH 2 -#define LM3554_MODE_FLASH 3 - -/* timer delay time */ -#define LM3554_TIMER_DELAY 5 - -/* Percentage <-> value macros */ -#define LM3554_MIN_PERCENT 0U -#define LM3554_MAX_PERCENT 100U -#define LM3554_CLAMP_PERCENTAGE(val) \ - clamp(val, LM3554_MIN_PERCENT, LM3554_MAX_PERCENT) - -#define LM3554_VALUE_TO_PERCENT(v, step) (((((unsigned long)(v))*(step))+50)/100) -#define LM3554_PERCENT_TO_VALUE(p, step) (((((unsigned long)(p))*100)+(step>>1))/(step)) - -/* Product specific limits - * TODO: get these from platform data */ -#define LM3554_FLASH_MAX_LVL 0x0F /* 1191mA */ - -/* Flash brightness, input is percentage, output is [0..15] */ -#define LM3554_FLASH_STEP \ - ((100ul*(LM3554_MAX_PERCENT)+((LM3554_FLASH_MAX_LVL)>>1))/((LM3554_FLASH_MAX_LVL))) -#define LM3554_FLASH_DEFAULT_BRIGHTNESS \ - LM3554_VALUE_TO_PERCENT(13, LM3554_FLASH_STEP) - -/* Torch brightness, input is percentage, output is [0..7] */ -#define LM3554_TORCH_STEP 1250 -#define LM3554_TORCH_DEFAULT_BRIGHTNESS \ - LM3554_VALUE_TO_PERCENT(2, LM3554_TORCH_STEP) - -/* Indicator brightness, input is percentage, output is [0..3] */ -#define LM3554_INDICATOR_STEP 2500 -#define LM3554_INDICATOR_DEFAULT_BRIGHTNESS \ - LM3554_VALUE_TO_PERCENT(1, LM3554_INDICATOR_STEP) - -/* - * lm3554_platform_data - Flash controller platform data - */ -struct lm3554_platform_data { - int gpio_torch; - int gpio_strobe; - int gpio_reset; - - unsigned int current_limit; - unsigned int envm_tx2; - unsigned int tx2_polarity; -}; - -#endif /* _LM3554_H_ */ - diff --git a/drivers/staging/media/atomisp/pci/Kconfig b/drivers/staging/media/atomisp/pci/Kconfig deleted file mode 100644 index 41f116d52060..000000000000 --- a/drivers/staging/media/atomisp/pci/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# -# Kconfig for ISP driver -# - -config VIDEO_ATOMISP - tristate "Intel Atom Image Signal Processor Driver" - depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API - select IOSF_MBI - select VIDEOBUF_VMALLOC - ---help--- - Say Y here if your platform supports Intel Atom SoC - camera imaging subsystem. - To compile this driver as a module, choose M here: the - module will be called atomisp diff --git a/drivers/staging/media/atomisp/pci/Makefile b/drivers/staging/media/atomisp/pci/Makefile deleted file mode 100644 index 61ad1fbb1ee6..000000000000 --- a/drivers/staging/media/atomisp/pci/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for ISP driver -# - -obj-$(CONFIG_VIDEO_ATOMISP) += atomisp2/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile deleted file mode 100644 index 7fead5fc9a7d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile +++ /dev/null @@ -1,349 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -atomisp-objs += \ - atomisp_drvfs.o \ - atomisp_file.o \ - css2400/sh_css_mipi.o \ - css2400/runtime/pipeline/src/pipeline.o \ - css2400/runtime/spctrl/src/spctrl.o \ - css2400/runtime/rmgr/src/rmgr.o \ - css2400/runtime/rmgr/src/rmgr_vbuf.o \ - css2400/runtime/isp_param/src/isp_param.o \ - css2400/runtime/inputfifo/src/inputfifo.o \ - css2400/runtime/queue/src/queue_access.o \ - css2400/runtime/queue/src/queue.o \ - css2400/runtime/frame/src/frame.o \ - css2400/runtime/eventq/src/eventq.o \ - css2400/runtime/binary/src/binary.o \ - css2400/runtime/timer/src/timer.o \ - css2400/runtime/isys/src/csi_rx_rmgr.o \ - css2400/runtime/isys/src/isys_stream2mmio_rmgr.o \ - css2400/runtime/isys/src/virtual_isys.o \ - css2400/runtime/isys/src/rx.o \ - css2400/runtime/isys/src/isys_dma_rmgr.o \ - css2400/runtime/isys/src/ibuf_ctrl_rmgr.o \ - css2400/runtime/isys/src/isys_init.o \ - css2400/runtime/bufq/src/bufq.o \ - css2400/runtime/ifmtr/src/ifmtr.o \ - css2400/runtime/debug/src/ia_css_debug.o \ - css2400/runtime/event/src/event.o \ - css2400/sh_css_sp.o \ - css2400/css_2400_system/spmem_dump.o \ - css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.o \ - css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.o \ - css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.o \ - css2400/sh_css_stream_format.o \ - css2400/sh_css_hrt.o \ - css2400/sh_css_properties.o \ - css2400/memory_realloc.o \ - css2400/hive_isp_css_shared/host/tag.o \ - css2400/sh_css_params.o \ - css2400/sh_css.o \ - css2400/isp/kernels/hdr/ia_css_hdr.host.o \ - css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.o \ - css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.o \ - css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.o \ - css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.o \ - css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.o \ - css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.o \ - css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.o \ - css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.o \ - css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.o \ - css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.o \ - css2400/isp/kernels/output/output_1.0/ia_css_output.host.o \ - css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.o \ - css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.o \ - css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.o \ - css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.o \ - css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.o \ - css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.o \ - css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.o \ - css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.o \ - css2400/isp/kernels/dpc2/ia_css_dpc2.host.o \ - css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.o \ - css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.o \ - css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.o \ - css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.o \ - css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.o \ - css2400/isp/kernels/bh/bh_2/ia_css_bh.host.o \ - css2400/isp/kernels/bnlm/ia_css_bnlm.host.o \ - css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.o \ - css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.o \ - css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.o \ - css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.o \ - css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.o \ - css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.o \ - css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.o \ - css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.o \ - css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.o \ - css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.o \ - css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.o \ - css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.o \ - css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.o \ - css2400/isp/kernels/de/de_1.0/ia_css_de.host.o \ - css2400/isp/kernels/de/de_2/ia_css_de2.host.o \ - css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.o \ - css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.o \ - css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.o \ - css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.o \ - css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.o \ - css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.o \ - css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.o \ - css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.o \ - css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.o \ - css2400/isp/kernels/ob/ob2/ia_css_ob2.host.o \ - css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.o \ - css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.o \ - css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.o \ - css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.o \ - css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.o \ - css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.o \ - css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.o \ - css2400/sh_css_pipe.o \ - css2400/ia_css_device_access.o \ - css2400/sh_css_host_data.o \ - css2400/sh_css_mmu.o \ - css2400/sh_css_metadata.o \ - css2400/base/refcount/src/refcount.o \ - css2400/base/circbuf/src/circbuf.o \ - css2400/camera/pipe/src/pipe_binarydesc.o \ - css2400/camera/pipe/src/pipe_util.o \ - css2400/camera/pipe/src/pipe_stagedesc.o \ - css2400/camera/util/src/util.o \ - css2400/sh_css_metrics.o \ - css2400/sh_css_version.o \ - css2400/ia_css_memory_access.o \ - css2400/sh_css_param_shading.o \ - css2400/sh_css_morph.o \ - css2400/sh_css_firmware.o \ - css2400/hive_isp_css_common/host/isp.o \ - css2400/hive_isp_css_common/host/gdc.o \ - css2400/hive_isp_css_common/host/sp.o \ - css2400/hive_isp_css_common/host/vmem.o \ - css2400/hive_isp_css_common/host/dma.o \ - css2400/hive_isp_css_common/host/input_formatter.o \ - css2400/hive_isp_css_common/host/debug.o \ - css2400/hive_isp_css_common/host/hmem.o \ - css2400/hive_isp_css_common/host/gp_device.o \ - css2400/hive_isp_css_common/host/fifo_monitor.o \ - css2400/hive_isp_css_common/host/gp_timer.o \ - css2400/hive_isp_css_common/host/irq.o \ - css2400/hive_isp_css_common/host/input_system.o \ - css2400/hive_isp_css_common/host/timed_ctrl.o \ - css2400/hive_isp_css_common/host/mmu.o \ - css2400/hive_isp_css_common/host/event_fifo.o \ - css2400/sh_css_param_dvs.o \ - css2400/sh_css_shading.o \ - css2400/sh_css_stream.o \ - mmu/sh_mmu_mrfld.o \ - mmu/isp_mmu.o \ - atomisp_acc.o \ - atomisp_compat_css20.o \ - atomisp_fops.o \ - atomisp_subdev.o \ - atomisp_ioctl.o \ - atomisp_compat_ioctl32.o \ - atomisp_csi2.o \ - atomisp_cmd.o \ - atomisp_tpg.o \ - hmm/hmm_vm.o \ - hmm/hmm.o \ - hmm/hmm_bo.o \ - hmm/hmm_reserved_pool.o \ - hmm/hmm_dynamic_pool.o \ - hrt/hive_isp_css_mm_hrt.o \ - atomisp_v4l2.o - -# These will be needed when clean merge CHT support nicely into the driver -# Keep them here handy for when we get to that point -# - -obj-cht= \ - css2400/css_2401_system/spmem_dump.o \ - css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.o \ - css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.o \ - css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.o \ - css2400/css_2401_csi2p_system/spmem_dump.o \ - css2400/css_2401_csi2p_system/host/isys_stream2mmio.o \ - css2400/css_2401_csi2p_system/host/ibuf_ctrl.o \ - css2400/css_2401_csi2p_system/host/isys_irq.o \ - css2400/css_2401_csi2p_system/host/isys_dma.o \ - css2400/css_2401_csi2p_system/host/csi_rx.o \ - css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.o \ - css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.o \ - css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.o \ - -# -I$(atomisp)/css2400/css_2401_csi2p_system/ \ -# -I$(atomisp)/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ \ -# -I$(atomisp)/css2400/css_2401_csi2p_system/host/ \ -# -I$(atomisp)/css2400/css_2401_csi2p_system/hrt/ \ -# -I$(atomisp)/css2400/css_2401_system/hive_isp_css_2401_system_generated/ \ -# -I$(atomisp)/css2400/css_2401_system/hrt/ \ - - - -obj-$(CONFIG_VIDEO_ATOMISP) += atomisp.o - -atomisp = $(srctree)/drivers/staging/media/atomisp/pci/atomisp2 - -INCLUDES += \ - -I$(atomisp)/ \ - -I$(atomisp)/css2400/ \ - -I$(atomisp)/hrt/ \ - -I$(atomisp)/include/ \ - -I$(atomisp)/include/hmm/ \ - -I$(atomisp)/include/mmu/ \ - -I$(atomisp)/css2400/base/circbuf/interface/ \ - -I$(atomisp)/css2400/base/refcount/interface/ \ - -I$(atomisp)/css2400/camera/pipe/interface/ \ - -I$(atomisp)/css2400/camera/util/interface/ \ - -I$(atomisp)/css2400/css_2400_system/ \ - -I$(atomisp)/css2400/css_2400_system/hive_isp_css_2400_system_generated/ \ - -I$(atomisp)/css2400/css_2400_system/hrt/ \ - -I$(atomisp)/css2400/hive_isp_css_common/ \ - -I$(atomisp)/css2400/hive_isp_css_common/host/ \ - -I$(atomisp)/css2400/hive_isp_css_include/ \ - -I$(atomisp)/css2400/hive_isp_css_include/device_access/ \ - -I$(atomisp)/css2400/hive_isp_css_include/host/ \ - -I$(atomisp)/css2400/hive_isp_css_include/memory_access/ \ - -I$(atomisp)/css2400/hive_isp_css_shared/ \ - -I$(atomisp)/css2400/hive_isp_css_shared/host/ \ - -I$(atomisp)/css2400/isp/kernels/ \ - -I$(atomisp)/css2400/isp/kernels/aa/aa_2/ \ - -I$(atomisp)/css2400/isp/kernels/anr/anr_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/anr/anr_2/ \ - -I$(atomisp)/css2400/isp/kernels/bh/bh_2/ \ - -I$(atomisp)/css2400/isp/kernels/bnlm/ \ - -I$(atomisp)/css2400/isp/kernels/bnr/ \ - -I$(atomisp)/css2400/isp/kernels/bnr/bnr_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/bnr/bnr2_2/ \ - -I$(atomisp)/css2400/isp/kernels/cnr/ \ - -I$(atomisp)/css2400/isp/kernels/cnr/cnr_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/cnr/cnr_2/ \ - -I$(atomisp)/css2400/isp/kernels/conversion/ \ - -I$(atomisp)/css2400/isp/kernels/conversion/conversion_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/copy_output/ \ - -I$(atomisp)/css2400/isp/kernels/copy_output/copy_output_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/crop/ \ - -I$(atomisp)/css2400/isp/kernels/crop/crop_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/csc/ \ - -I$(atomisp)/css2400/isp/kernels/csc/csc_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/ctc/ \ - -I$(atomisp)/css2400/isp/kernels/ctc/ctc_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/ctc/ctc1_5/ \ - -I$(atomisp)/css2400/isp/kernels/ctc/ctc2/ \ - -I$(atomisp)/css2400/isp/kernels/de/ \ - -I$(atomisp)/css2400/isp/kernels/de/de_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/de/de_2/ \ - -I$(atomisp)/css2400/isp/kernels/dpc2/ \ - -I$(atomisp)/css2400/isp/kernels/dp/ \ - -I$(atomisp)/css2400/isp/kernels/dp/dp_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/dvs/ \ - -I$(atomisp)/css2400/isp/kernels/dvs/dvs_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/eed1_8/ \ - -I$(atomisp)/css2400/isp/kernels/fc/ \ - -I$(atomisp)/css2400/isp/kernels/fc/fc_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/fixedbds/ \ - -I$(atomisp)/css2400/isp/kernels/fixedbds/fixedbds_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/fpn/ \ - -I$(atomisp)/css2400/isp/kernels/fpn/fpn_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/gc/ \ - -I$(atomisp)/css2400/isp/kernels/gc/gc_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/gc/gc_2/ \ - -I$(atomisp)/css2400/isp/kernels/hdr/ \ - -I$(atomisp)/css2400/isp/kernels/io_ls/ \ - -I$(atomisp)/css2400/isp/kernels/io_ls/bayer_io_ls/ \ - -I$(atomisp)/css2400/isp/kernels/io_ls/common/ \ - -I$(atomisp)/css2400/isp/kernels/io_ls/yuv444_io_ls/ \ - -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/ \ - -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ \ - -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/common/ \ - -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ \ - -I$(atomisp)/css2400/isp/kernels/iterator/ \ - -I$(atomisp)/css2400/isp/kernels/iterator/iterator_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/macc/ \ - -I$(atomisp)/css2400/isp/kernels/macc/macc_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/macc/macc1_5/ \ - -I$(atomisp)/css2400/isp/kernels/norm/ \ - -I$(atomisp)/css2400/isp/kernels/norm/norm_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/ob/ \ - -I$(atomisp)/css2400/isp/kernels/ob/ob_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/ob/ob2/ \ - -I$(atomisp)/css2400/isp/kernels/output/ \ - -I$(atomisp)/css2400/isp/kernels/output/output_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/qplane/ \ - -I$(atomisp)/css2400/isp/kernels/qplane/qplane_2/ \ - -I$(atomisp)/css2400/isp/kernels/raw_aa_binning/ \ - -I$(atomisp)/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/raw/ \ - -I$(atomisp)/css2400/isp/kernels/raw/raw_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/ref/ \ - -I$(atomisp)/css2400/isp/kernels/ref/ref_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/s3a/ \ - -I$(atomisp)/css2400/isp/kernels/s3a/s3a_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/sc/ \ - -I$(atomisp)/css2400/isp/kernels/sc/sc_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/sdis/ \ - -I$(atomisp)/css2400/isp/kernels/sdis/common/ \ - -I$(atomisp)/css2400/isp/kernels/sdis/sdis_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/sdis/sdis_2/ \ - -I$(atomisp)/css2400/isp/kernels/tdf/ \ - -I$(atomisp)/css2400/isp/kernels/tdf/tdf_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/tnr/ \ - -I$(atomisp)/css2400/isp/kernels/tnr/tnr_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/tnr/tnr3/ \ - -I$(atomisp)/css2400/isp/kernels/uds/ \ - -I$(atomisp)/css2400/isp/kernels/uds/uds_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/vf/ \ - -I$(atomisp)/css2400/isp/kernels/vf/vf_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/wb/ \ - -I$(atomisp)/css2400/isp/kernels/wb/wb_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/xnr/ \ - -I$(atomisp)/css2400/isp/kernels/xnr/xnr_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/xnr/xnr_3.0/ \ - -I$(atomisp)/css2400/isp/kernels/ynr/ \ - -I$(atomisp)/css2400/isp/kernels/ynr/ynr_1.0/ \ - -I$(atomisp)/css2400/isp/kernels/ynr/ynr_2/ \ - -I$(atomisp)/css2400/isp/modes/interface/ \ - -I$(atomisp)/css2400/runtime/binary/interface/ \ - -I$(atomisp)/css2400/runtime/bufq/interface/ \ - -I$(atomisp)/css2400/runtime/debug/interface/ \ - -I$(atomisp)/css2400/runtime/event/interface/ \ - -I$(atomisp)/css2400/runtime/eventq/interface/ \ - -I$(atomisp)/css2400/runtime/frame/interface/ \ - -I$(atomisp)/css2400/runtime/ifmtr/interface/ \ - -I$(atomisp)/css2400/runtime/inputfifo/interface/ \ - -I$(atomisp)/css2400/runtime/isp_param/interface/ \ - -I$(atomisp)/css2400/runtime/isys/interface/ \ - -I$(atomisp)/css2400/runtime/isys/src/ \ - -I$(atomisp)/css2400/runtime/pipeline/interface/ \ - -I$(atomisp)/css2400/runtime/queue/interface/ \ - -I$(atomisp)/css2400/runtime/queue/src/ \ - -I$(atomisp)/css2400/runtime/rmgr/interface/ \ - -I$(atomisp)/css2400/runtime/spctrl/interface/ \ - -I$(atomisp)/css2400/runtime/tagger/interface/ - -ifeq ($(CONFIG_ION),y) -INCLUDES += -I$(srctree)/drivers/staging/android/ion -endif - -DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__ -#DEFINES += -DUSE_DYNAMIC_BIN -#DEFINES += -DISP_POWER_GATING -#DEFINES += -DUSE_INTERRUPTS -#DEFINES += -DUSE_SSSE3 -#DEFINES += -DPUNIT_CAMERA_BUSY -#DEFINES += -DUSE_KMEM_CACHE - -DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 -DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 - -ccflags-y += $(INCLUDES) $(DEFINES) -fno-common - -# HACK! While this driver is in bad shape, don't enable several warnings -# that would be otherwise enabled with W=1 -ccflags-y += $(call cc-disable-warning, implicit-fallthrough) -ccflags-y += $(call cc-disable-warning, missing-prototypes) -ccflags-y += $(call cc-disable-warning, missing-declarations) -ccflags-y += $(call cc-disable-warning, suggest-attribute=format) -ccflags-y += $(call cc-disable-warning, unused-const-variable) -ccflags-y += $(call cc-disable-warning, unused-but-set-variable) diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h deleted file mode 100644 index 5d102a4f8aff..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef ATOMISP_REGS_H -#define ATOMISP_REGS_H - -/* common register definitions */ -#define PUNIT_PORT 0x04 -#define CCK_PORT 0x14 - -#define PCICMDSTS 0x01 -#define INTR 0x0f -#define MSI_CAPID 0x24 -#define MSI_ADDRESS 0x25 -#define MSI_DATA 0x26 -#define INTR_CTL 0x27 - -#define PCI_MSI_CAPID 0x90 -#define PCI_MSI_ADDR 0x94 -#define PCI_MSI_DATA 0x98 -#define PCI_INTERRUPT_CTRL 0x9C -#define PCI_I_CONTROL 0xfc - -/* MRFLD specific register definitions */ -#define MRFLD_CSI_AFE 0x39 -#define MRFLD_CSI_CONTROL 0x3a -#define MRFLD_CSI_RCOMP 0x3d - -#define MRFLD_PCI_PMCS 0x84 -#define MRFLD_PCI_CSI_ACCESS_CTRL_VIOL 0xd4 -#define MRFLD_PCI_CSI_AFE_HS_CONTROL 0xdc -#define MRFLD_PCI_CSI_AFE_RCOMP_CONTROL 0xe0 -#define MRFLD_PCI_CSI_CONTROL 0xe8 -#define MRFLD_PCI_CSI_AFE_TRIM_CONTROL 0xe4 -#define MRFLD_PCI_CSI_DEADLINE_CONTROL 0xec -#define MRFLD_PCI_CSI_RCOMP_CONTROL 0xf4 - -/* Select Arasan (legacy)/Intel input system */ -#define MRFLD_PCI_CSI_CONTROL_PARPATHEN BIT(24) -/* Enable CSI interface (ANN B0/K0) */ -#define MRFLD_PCI_CSI_CONTROL_CSI_READY BIT(25) - -/* - * Enables the combining of adjacent 32-byte read requests to the same - * cache line. When cleared, each 32-byte read request is sent as a - * separate request on the IB interface. - */ -#define MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING 0x1 - -/* - * Register: MRFLD_PCI_CSI_RCOMP_CONTROL - * If cleared, the high speed clock going to the digital logic is gated when - * RCOMP update is happening. The clock is gated for a minimum of 100 nsec. - * If this bit is set, then the high speed clock is not gated during the - * update cycle. - */ -#define MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE 0x800000 - -/* - * Enables the combining of adjacent 32-byte write requests to the same - * cache line. When cleared, each 32-byte write request is sent as a - * separate request on the IB interface. - */ -#define MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING 0x2 - -#define MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK 0xc - -#define MRFLD_PCI_CSI1_HSRXCLKTRIM 0x2 -#define MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT 16 -#define MRFLD_PCI_CSI2_HSRXCLKTRIM 0x3 -#define MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT 24 -#define MRFLD_PCI_CSI3_HSRXCLKTRIM 0x2 -#define MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT 28 -#define MRFLD_PCI_CSI_HSRXCLKTRIM_MASK 0xf - -/* - * This register is IUINT MMIO register, it is used to select the CSI - * receiver backend. - * 1: SH CSI backend - * 0: Arasan CSI backend - */ -#define MRFLD_CSI_RECEIVER_SELECTION_REG 0x8081c - -#define MRFLD_INTR_CLEAR_REG 0x50c -#define MRFLD_INTR_STATUS_REG 0x508 -#define MRFLD_INTR_ENABLE_REG 0x510 - -#define MRFLD_MAX_ZOOM_FACTOR 1024 - -/* MRFLD ISP POWER related */ -#define MRFLD_ISPSSPM0 0x39 -#define MRFLD_ISPSSPM0_ISPSSC_OFFSET 0 -#define MRFLD_ISPSSPM0_ISPSSS_OFFSET 24 -#define MRFLD_ISPSSPM0_ISPSSC_MASK 0x3 -#define MRFLD_ISPSSPM0_IUNIT_POWER_ON 0 -#define MRFLD_ISPSSPM0_IUNIT_POWER_OFF 0x3 -#define MRFLD_ISPSSDVFS 0x13F -#define MRFLD_BIT0 0x0001 -#define MRFLD_BIT1 0x0002 - -/* MRFLD CSI lane configuration related */ -#define MRFLD_PORT_CONFIG_NUM 8 -#define MRFLD_PORT_NUM 3 -#define MRFLD_PORT1_ENABLE_SHIFT 0 -#define MRFLD_PORT2_ENABLE_SHIFT 1 -#define MRFLD_PORT3_ENABLE_SHIFT 2 -#define MRFLD_PORT1_LANES_SHIFT 3 -#define MRFLD_PORT2_LANES_SHIFT 7 -#define MRFLD_PORT3_LANES_SHIFT 8 -#define MRFLD_PORT_CONFIG_MASK 0x000f03ff -#define MRFLD_PORT_CONFIGCODE_SHIFT 16 -#define MRFLD_ALL_CSI_PORTS_OFF_MASK 0x7 - -#define CHV_PORT3_LANES_SHIFT 9 -#define CHV_PORT_CONFIG_MASK 0x1f07ff - -#define ISPSSPM1 0x3a -#define ISP_FREQ_STAT_MASK (0x1f << ISP_FREQ_STAT_OFFSET) -#define ISP_REQ_FREQ_MASK 0x1f -#define ISP_FREQ_VALID_MASK (0x1 << ISP_FREQ_VALID_OFFSET) -#define ISP_FREQ_STAT_OFFSET 0x18 -#define ISP_REQ_GUAR_FREQ_OFFSET 0x8 -#define ISP_REQ_FREQ_OFFSET 0x0 -#define ISP_FREQ_VALID_OFFSET 0x7 -#define ISP_FREQ_RULE_ANY 0x0 - -#define ISP_FREQ_457MHZ 0x1C9 -#define ISP_FREQ_400MHZ 0x190 -#define ISP_FREQ_356MHZ 0x164 -#define ISP_FREQ_320MHZ 0x140 -#define ISP_FREQ_266MHZ 0x10a -#define ISP_FREQ_200MHZ 0xc8 -#define ISP_FREQ_100MHZ 0x64 - -#define HPLL_FREQ_800MHZ 0x320 -#define HPLL_FREQ_1600MHZ 0x640 -#define HPLL_FREQ_2000MHZ 0x7D0 - -#define CCK_FUSE_REG_0 0x08 -#define CCK_FUSE_HPLL_FREQ_MASK 0x03 - -#if defined(ISP2401) -#define ISP_FREQ_MAX ISP_FREQ_320MHZ -#else -#define ISP_FREQ_MAX ISP_FREQ_400MHZ -#endif - -/* ISP2401 CSI2+ receiver delay settings */ -#define CSI2_PORT_A_BASE 0xC0000 -#define CSI2_PORT_B_BASE 0xC2000 -#define CSI2_PORT_C_BASE 0xC4000 - -#define CSI2_LANE_CL_BASE 0x418 -#define CSI2_LANE_D0_BASE 0x420 -#define CSI2_LANE_D1_BASE 0x428 -#define CSI2_LANE_D2_BASE 0x430 -#define CSI2_LANE_D3_BASE 0x438 - -#define CSI2_REG_RX_CSI_DLY_CNT_TERMEN 0 -#define CSI2_REG_RX_CSI_DLY_CNT_SETTLE 0x4 - -#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC0418 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC041C -#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC0420 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC0424 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC0428 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC042C -#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE2 0xC0430 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE2 0xC0434 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE3 0xC0438 -#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE3 0xC043C - -#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC2418 -#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC241C -#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC2420 -#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC2424 -#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC2428 -#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC242C - -#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC4418 -#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC441C -#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC4420 -#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC4424 -#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC4428 -#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC442C - -#define DMA_BURST_SIZE_REG 0xCD408 - -#define ISP_DFS_TRY_TIMES 2 - -#endif /* ATOMISP_REGS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c deleted file mode 100644 index 7ebcebd80b77..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c +++ /dev/null @@ -1,604 +0,0 @@ -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -/* - * This file implements loadable acceleration firmware API, - * including ioctls to map and unmap acceleration parameters and buffers. - */ - -#include -#include - -#include "atomisp_acc.h" -#include "atomisp_internal.h" -#include "atomisp_compat.h" -#include "atomisp_cmd.h" - -#include "hrt/hive_isp_css_mm_hrt.h" -#include "memory_access/memory_access.h" -#include "ia_css.h" - -static const struct { - unsigned int flag; - enum atomisp_css_pipe_id pipe_id; -} acc_flag_to_pipe[] = { - { ATOMISP_ACC_FW_LOAD_FL_PREVIEW, CSS_PIPE_ID_PREVIEW }, - { ATOMISP_ACC_FW_LOAD_FL_COPY, CSS_PIPE_ID_COPY }, - { ATOMISP_ACC_FW_LOAD_FL_VIDEO, CSS_PIPE_ID_VIDEO }, - { ATOMISP_ACC_FW_LOAD_FL_CAPTURE, CSS_PIPE_ID_CAPTURE }, - { ATOMISP_ACC_FW_LOAD_FL_ACC, CSS_PIPE_ID_ACC } -}; - -/* - * Allocate struct atomisp_acc_fw along with space for firmware. - * The returned struct atomisp_acc_fw is cleared (firmware region is not). - */ -static struct atomisp_acc_fw *acc_alloc_fw(unsigned int fw_size) -{ - struct atomisp_acc_fw *acc_fw; - - acc_fw = kzalloc(sizeof(*acc_fw), GFP_KERNEL); - if (!acc_fw) - return NULL; - - acc_fw->fw = vmalloc(fw_size); - if (!acc_fw->fw) { - kfree(acc_fw); - return NULL; - } - - return acc_fw; -} - -static void acc_free_fw(struct atomisp_acc_fw *acc_fw) -{ - vfree(acc_fw->fw); - kfree(acc_fw); -} - -static struct atomisp_acc_fw * -acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle) -{ - struct atomisp_acc_fw *acc_fw; - - list_for_each_entry(acc_fw, &asd->acc.fw, list) - if (acc_fw->handle == handle) - return acc_fw; - - return NULL; -} - -static struct atomisp_map *acc_get_map(struct atomisp_sub_device *asd, - unsigned long css_ptr, size_t length) -{ - struct atomisp_map *atomisp_map; - - list_for_each_entry(atomisp_map, &asd->acc.memory_maps, list) { - if (atomisp_map->ptr == css_ptr && - atomisp_map->length == length) - return atomisp_map; - } - return NULL; -} - -static int acc_stop_acceleration(struct atomisp_sub_device *asd) -{ - int ret; - - ret = atomisp_css_stop_acc_pipe(asd); - atomisp_css_destroy_acc_pipe(asd); - - return ret; -} - -void atomisp_acc_cleanup(struct atomisp_device *isp) -{ - int i; - - for (i = 0; i < isp->num_of_streams; i++) - ida_destroy(&isp->asd[i].acc.ida); -} - -void atomisp_acc_release(struct atomisp_sub_device *asd) -{ - struct atomisp_acc_fw *acc_fw, *ta; - struct atomisp_map *atomisp_map, *tm; - - /* Stop acceleration if already running */ - if (asd->acc.pipeline) - acc_stop_acceleration(asd); - - /* Unload all loaded acceleration binaries */ - list_for_each_entry_safe(acc_fw, ta, &asd->acc.fw, list) { - list_del(&acc_fw->list); - ida_remove(&asd->acc.ida, acc_fw->handle); - acc_free_fw(acc_fw); - } - - /* Free all mapped memory blocks */ - list_for_each_entry_safe(atomisp_map, tm, &asd->acc.memory_maps, list) { - list_del(&atomisp_map->list); - hmm_free(atomisp_map->ptr); - kfree(atomisp_map); - } -} - -int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load_to_pipe *user_fw) -{ - static const unsigned int pipeline_flags = - ATOMISP_ACC_FW_LOAD_FL_PREVIEW | ATOMISP_ACC_FW_LOAD_FL_COPY | - ATOMISP_ACC_FW_LOAD_FL_VIDEO | - ATOMISP_ACC_FW_LOAD_FL_CAPTURE | ATOMISP_ACC_FW_LOAD_FL_ACC; - - struct atomisp_acc_fw *acc_fw; - int handle; - - if (!user_fw->data || user_fw->size < sizeof(*acc_fw->fw)) - return -EINVAL; - - /* Binary has to be enabled at least for one pipeline */ - if (!(user_fw->flags & pipeline_flags)) - return -EINVAL; - - /* We do not support other flags yet */ - if (user_fw->flags & ~pipeline_flags) - return -EINVAL; - - if (user_fw->type < ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT || - user_fw->type > ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE) - return -EINVAL; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - acc_fw = acc_alloc_fw(user_fw->size); - if (!acc_fw) - return -ENOMEM; - - if (copy_from_user(acc_fw->fw, user_fw->data, user_fw->size)) { - acc_free_fw(acc_fw); - return -EFAULT; - } - - if (!ida_pre_get(&asd->acc.ida, GFP_KERNEL) || - ida_get_new_above(&asd->acc.ida, 1, &handle)) { - acc_free_fw(acc_fw); - return -ENOSPC; - } - - user_fw->fw_handle = handle; - acc_fw->handle = handle; - acc_fw->flags = user_fw->flags; - acc_fw->type = user_fw->type; - acc_fw->fw->handle = handle; - - /* - * correct isp firmware type in order ISP firmware can be appended - * to correct pipe properly - */ - if (acc_fw->fw->type == ia_css_isp_firmware) { - static const int type_to_css[] = { - [ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT] = - IA_CSS_ACC_OUTPUT, - [ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER] = - IA_CSS_ACC_VIEWFINDER, - [ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE] = - IA_CSS_ACC_STANDALONE, - }; - acc_fw->fw->info.isp.type = type_to_css[acc_fw->type]; - } - - list_add_tail(&acc_fw->list, &asd->acc.fw); - return 0; -} - -int atomisp_acc_load(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load *user_fw) -{ - struct atomisp_acc_fw_load_to_pipe ltp = {0}; - int r; - - ltp.flags = ATOMISP_ACC_FW_LOAD_FL_ACC; - ltp.type = ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE; - ltp.size = user_fw->size; - ltp.data = user_fw->data; - r = atomisp_acc_load_to_pipe(asd, <p); - user_fw->fw_handle = ltp.fw_handle; - return r; -} - -int atomisp_acc_unload(struct atomisp_sub_device *asd, unsigned int *handle) -{ - struct atomisp_acc_fw *acc_fw; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - acc_fw = acc_get_fw(asd, *handle); - if (!acc_fw) - return -EINVAL; - - list_del(&acc_fw->list); - ida_remove(&asd->acc.ida, acc_fw->handle); - acc_free_fw(acc_fw); - - return 0; -} - -int atomisp_acc_start(struct atomisp_sub_device *asd, unsigned int *handle) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_acc_fw *acc_fw; - int ret; - unsigned int nbin; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - /* Invalidate caches. FIXME: should flush only necessary buffers */ - wbinvd(); - - ret = atomisp_css_create_acc_pipe(asd); - if (ret) - return ret; - - nbin = 0; - list_for_each_entry(acc_fw, &asd->acc.fw, list) { - if (*handle != 0 && *handle != acc_fw->handle) - continue; - - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE) - continue; - - /* Add the binary into the pipeline */ - ret = atomisp_css_load_acc_binary(asd, acc_fw->fw, nbin); - if (ret < 0) { - dev_err(isp->dev, "acc_load_binary failed\n"); - goto err_stage; - } - - ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) { - dev_err(isp->dev, "acc_set_parameters failed\n"); - goto err_stage; - } - nbin++; - } - if (nbin < 1) { - /* Refuse creating pipelines with no binaries */ - dev_err(isp->dev, "%s: no acc binary available\n", __func__); - ret = -EINVAL; - goto err_stage; - } - - ret = atomisp_css_start_acc_pipe(asd); - if (ret) { - dev_err(isp->dev, "%s: atomisp_acc_start_acc_pipe failed\n", - __func__); - goto err_stage; - } - - return 0; - -err_stage: - atomisp_css_destroy_acc_pipe(asd); - return ret; -} - -int atomisp_acc_wait(struct atomisp_sub_device *asd, unsigned int *handle) -{ - struct atomisp_device *isp = asd->isp; - int ret; - - if (!asd->acc.pipeline) - return -ENOENT; - - if (*handle && !acc_get_fw(asd, *handle)) - return -EINVAL; - - ret = atomisp_css_wait_acc_finish(asd); - if (acc_stop_acceleration(asd) == -EIO) { - atomisp_reset(isp); - return -EINVAL; - } - - return ret; -} - -void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle) -{ - struct v4l2_event event = { 0 }; - - event.type = V4L2_EVENT_ATOMISP_ACC_COMPLETE; - event.u.frame_sync.frame_sequence = atomic_read(&asd->sequence); - event.id = handle; - - v4l2_event_queue(asd->subdev.devnode, &event); -} - -int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map) -{ - struct atomisp_map *atomisp_map; - ia_css_ptr cssptr; - int pgnr; - - if (map->css_ptr) - return -EINVAL; - - if (asd->acc.pipeline) - return -EBUSY; - - if (map->user_ptr) { - /* Buffer to map must be page-aligned */ - if ((unsigned long)map->user_ptr & ~PAGE_MASK) { - dev_err(asd->isp->dev, - "%s: mapped buffer address %p is not page aligned\n", - __func__, map->user_ptr); - return -EINVAL; - } - - pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE); - cssptr = hrt_isp_css_mm_alloc_user_ptr(map->length, - map->user_ptr, - pgnr, HRT_USR_PTR, - (map->flags & ATOMISP_MAP_FLAG_CACHED)); - } else { - /* Allocate private buffer. */ - if (map->flags & ATOMISP_MAP_FLAG_CACHED) - cssptr = hrt_isp_css_mm_calloc_cached(map->length); - else - cssptr = hrt_isp_css_mm_calloc(map->length); - } - - if (!cssptr) - return -ENOMEM; - - atomisp_map = kmalloc(sizeof(*atomisp_map), GFP_KERNEL); - if (!atomisp_map) { - hmm_free(cssptr); - return -ENOMEM; - } - atomisp_map->ptr = cssptr; - atomisp_map->length = map->length; - list_add(&atomisp_map->list, &asd->acc.memory_maps); - - dev_dbg(asd->isp->dev, "%s: userptr %p, css_address 0x%x, size %d\n", - __func__, map->user_ptr, cssptr, map->length); - map->css_ptr = cssptr; - return 0; -} - -int atomisp_acc_unmap(struct atomisp_sub_device *asd, struct atomisp_acc_map *map) -{ - struct atomisp_map *atomisp_map; - - if (asd->acc.pipeline) - return -EBUSY; - - atomisp_map = acc_get_map(asd, map->css_ptr, map->length); - if (!atomisp_map) - return -EINVAL; - - list_del(&atomisp_map->list); - hmm_free(atomisp_map->ptr); - kfree(atomisp_map); - return 0; -} - -int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, - struct atomisp_acc_s_mapped_arg *arg) -{ - struct atomisp_acc_fw *acc_fw; - - if (arg->memory >= ATOMISP_ACC_NR_MEMORY) - return -EINVAL; - - if (asd->acc.pipeline) - return -EBUSY; - - acc_fw = acc_get_fw(asd, arg->fw_handle); - if (!acc_fw) - return -EINVAL; - - if (arg->css_ptr != 0 || arg->length != 0) { - /* Unless the parameter is cleared, check that it exists */ - if (!acc_get_map(asd, arg->css_ptr, arg->length)) - return -EINVAL; - } - - acc_fw->args[arg->memory].length = arg->length; - acc_fw->args[arg->memory].css_ptr = arg->css_ptr; - - dev_dbg(asd->isp->dev, "%s: mem %d, address %p, size %ld\n", - __func__, arg->memory, (void *)arg->css_ptr, - (unsigned long)arg->length); - return 0; -} - -/* - * Appends the loaded acceleration binary extensions to the - * current ISP mode. Must be called just before sh_css_start(). - */ -int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) -{ - struct atomisp_acc_fw *acc_fw; - bool ext_loaded = false; - bool continuous = asd->continuous_mode->val && - asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW; - int ret = 0, i = -1; - struct atomisp_device *isp = asd->isp; - - if (asd->acc.pipeline || asd->acc.extension_mode) - return -EBUSY; - - /* Invalidate caches. FIXME: should flush only necessary buffers */ - wbinvd(); - - list_for_each_entry(acc_fw, &asd->acc.fw, list) { - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && - acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) - continue; - - for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) { - /* QoS (ACC pipe) acceleration stages are currently - * allowed only in continuous mode. Skip them for - * all other modes. */ - if (!continuous && - acc_flag_to_pipe[i].flag == - ATOMISP_ACC_FW_LOAD_FL_ACC) - continue; - - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - ret = atomisp_css_load_acc_extension(asd, - acc_fw->fw, - acc_flag_to_pipe[i].pipe_id, - acc_fw->type); - if (ret) - goto error; - - ext_loaded = true; - } - } - - ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) - goto error; - } - - if (!ext_loaded) - return ret; - - ret = atomisp_css_update_stream(asd); - if (ret) { - dev_err(isp->dev, "%s: update stream failed.\n", __func__); - goto error; - } - - asd->acc.extension_mode = true; - return 0; - -error: - while (--i >= 0) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - - list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) { - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && - acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) - continue; - - for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) { - if (!continuous && - acc_flag_to_pipe[i].flag == - ATOMISP_ACC_FW_LOAD_FL_ACC) - continue; - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, - acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - } - return ret; -} - -void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd) -{ - struct atomisp_acc_fw *acc_fw; - int i; - - if (!asd->acc.extension_mode) - return; - - list_for_each_entry_reverse(acc_fw, &asd->acc.fw, list) { - if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && - acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) - continue; - - for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, - acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - } - - asd->acc.extension_mode = false; -} - -int atomisp_acc_set_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg) -{ - struct atomisp_acc_fw *acc_fw; - bool enable = (arg->flags & ATOMISP_STATE_FLAG_ENABLE) != 0; - struct ia_css_pipe *pipe; - enum ia_css_err r; - int i; - - if (!asd->acc.extension_mode) - return -EBUSY; - - if (arg->flags & ~ATOMISP_STATE_FLAG_ENABLE) - return -EINVAL; - - acc_fw = acc_get_fw(asd, arg->fw_handle); - if (!acc_fw) - return -EINVAL; - - if (enable) - wbinvd(); - - for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - pipe = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - pipes[acc_flag_to_pipe[i].pipe_id]; - r = ia_css_pipe_set_qos_ext_state(pipe, acc_fw->handle, - enable); - if (r != IA_CSS_SUCCESS) - return -EBADRQC; - } - } - - if (enable) - acc_fw->flags |= ATOMISP_ACC_FW_LOAD_FL_ENABLE; - else - acc_fw->flags &= ~ATOMISP_ACC_FW_LOAD_FL_ENABLE; - - return 0; -} - -int atomisp_acc_get_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg) -{ - struct atomisp_acc_fw *acc_fw; - - if (!asd->acc.extension_mode) - return -EBUSY; - - acc_fw = acc_get_fw(asd, arg->fw_handle); - if (!acc_fw) - return -EINVAL; - - arg->flags = acc_fw->flags; - - return 0; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h deleted file mode 100644 index 56386154643b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_ACC_H__ -#define __ATOMISP_ACC_H__ - -#include "../../include/linux/atomisp.h" -#include "atomisp_internal.h" - -#include "ia_css_types.h" - -/* - * Interface functions for AtomISP driver acceleration API implementation. - */ - -struct atomisp_sub_device; - -void atomisp_acc_cleanup(struct atomisp_device *isp); - -/* - * Free up any allocated resources. - * Must be called each time when the device is closed. - * Note that there isn't corresponding open() call; - * this function may be called sequentially multiple times. - * Must be called to free up resources before driver is unloaded. - */ -void atomisp_acc_release(struct atomisp_sub_device *asd); - -/* Load acceleration binary. DEPRECATED. */ -int atomisp_acc_load(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load *fw); - -/* Load acceleration binary with specified properties */ -int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_fw_load_to_pipe *fw); - -/* Unload specified acceleration binary */ -int atomisp_acc_unload(struct atomisp_sub_device *asd, - unsigned int *handle); - -/* - * Map a memory region into ISP memory space. - */ -int atomisp_acc_map(struct atomisp_sub_device *asd, - struct atomisp_acc_map *map); - -/* - * Unmap a mapped memory region. - */ -int atomisp_acc_unmap(struct atomisp_sub_device *asd, - struct atomisp_acc_map *map); - -/* - * Set acceleration binary argument to a previously mapped memory region. - */ -int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, - struct atomisp_acc_s_mapped_arg *arg); - - -/* - * Start acceleration. - * Return immediately, acceleration is left running in background. - * Specify either acceleration binary or pipeline which to start. - */ -int atomisp_acc_start(struct atomisp_sub_device *asd, - unsigned int *handle); - -/* - * Wait until acceleration finishes. - * This MUST be called after each acceleration has been started. - * Specify either acceleration binary or pipeline handle. - */ -int atomisp_acc_wait(struct atomisp_sub_device *asd, - unsigned int *handle); - -/* - * Used by ISR to notify ACC stage finished. - * This is internally used and does not export as IOCTL. - */ -void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle); - -/* - * Appends the loaded acceleration binary extensions to the - * current ISP mode. Must be called just before atomisp_css_start(). - */ -int atomisp_acc_load_extensions(struct atomisp_sub_device *asd); - -/* - * Must be called after streaming is stopped: - * unloads any loaded acceleration extensions. - */ -void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd); - -/* - * Set acceleration firmware flags. - */ -int atomisp_acc_set_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg); - -/* - * Get acceleration firmware flags. - */ -int atomisp_acc_get_state(struct atomisp_sub_device *asd, - struct atomisp_acc_state *arg); - -#endif /* __ATOMISP_ACC_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c deleted file mode 100644 index 874165654850..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c +++ /dev/null @@ -1,6697 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#define CREATE_TRACE_POINTS -#include "atomisp_trace_event.h" - -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_fops.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" -#include "atomisp-regs.h" -#include "atomisp_tables.h" -#include "atomisp_acc.h" -#include "atomisp_compat.h" -#include "atomisp_subdev.h" -#include "atomisp_dfs_tables.h" - -#include "hrt/hive_isp_css_mm_hrt.h" - -#include "sh_css_hrt.h" -#include "sh_css_defs.h" -#include "system_global.h" -#include "sh_css_internal.h" -#include "sh_css_sp.h" -#include "gp_device.h" -#include "device_access.h" -#include "irq.h" - -#include "ia_css_types.h" -#include "ia_css_stream.h" -#include "error_support.h" -#include "hrt/bits.h" - - -/* We should never need to run the flash for more than 2 frames. - * At 15fps this means 133ms. We set the timeout a bit longer. - * Each flash driver is supposed to set its own timeout, but - * just in case someone else changed the timeout, we set it - * here to make sure we don't damage the flash hardware. */ -#define FLASH_TIMEOUT 800 /* ms */ - -union host { - struct { - void *kernel_ptr; - void __user *user_ptr; - int size; - } scalar; - struct { - void *hmm_ptr; - } ptr; -}; - -/* - * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field. - * subdev->priv is set in mrst.c - */ -struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd) -{ - return (struct camera_mipi_info *)v4l2_get_subdev_hostdata(sd); -} - -/* - * get struct atomisp_video_pipe from v4l2 video_device - */ -struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev) -{ - return (struct atomisp_video_pipe *) - container_of(dev, struct atomisp_video_pipe, vdev); -} - -/* - * get struct atomisp_acc_pipe from v4l2 video_device - */ -struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev) -{ - return (struct atomisp_acc_pipe *) - container_of(dev, struct atomisp_acc_pipe, vdev); -} - -static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd) -{ - struct v4l2_subdev_frame_interval fi; - struct atomisp_device *isp = asd->isp; - - unsigned short fps = 0; - int ret; - - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, g_frame_interval, &fi); - - if (!ret && fi.interval.numerator) - fps = fi.interval.denominator / fi.interval.numerator; - - return fps; -} - -/* - * DFS progress is shown as follows: - * 1. Target frequency is calculated according to FPS/Resolution/ISP running - * mode. - * 2. Ratio is calculated using formula: 2 * HPLL / target frequency - 1 - * with proper rounding. - * 3. Set ratio to ISPFREQ40, 1 to FREQVALID and ISPFREQGUAR40 - * to 200MHz in ISPSSPM1. - * 4. Wait for FREQVALID to be cleared by P-Unit. - * 5. Wait for field ISPFREQSTAT40 in ISPSSPM1 turn to ratio set in 3. - */ -static int write_target_freq_to_hw(struct atomisp_device *isp, - unsigned int new_freq) -{ - unsigned int ratio, timeout, guar_ratio; - u32 isp_sspm1 = 0; - int i; - - if (!isp->hpll_freq) { - dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n"); - return -EINVAL; - } - - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1); - if (isp_sspm1 & ISP_FREQ_VALID_MASK) { - dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n"); - iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1, - isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET)); - } - - ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1; - guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1; - - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1); - isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET); - - for (i = 0; i < ISP_DFS_TRY_TIMES; i++) { - iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1, - isp_sspm1 - | ratio << ISP_REQ_FREQ_OFFSET - | 1 << ISP_FREQ_VALID_OFFSET - | guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET); - - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1); - timeout = 20; - while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) { - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1); - dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n"); - udelay(100); - timeout--; - } - - if (timeout != 0) - break; - } - - if (timeout == 0) { - dev_err(isp->dev, "DFS failed due to HW error.\n"); - return -EINVAL; - } - - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1); - timeout = 10; - while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) { - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1); - dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n", - new_freq); - udelay(100); - timeout--; - } - if (timeout == 0) { - dev_err(isp->dev, "DFS target freq is rejected by HW.\n"); - return -EINVAL; - } - - return 0; -} -int atomisp_freq_scaling(struct atomisp_device *isp, - enum atomisp_dfs_mode mode, - bool force) -{ - /* FIXME! Only use subdev[0] status yet */ - struct atomisp_sub_device *asd = &isp->asd[0]; - const struct atomisp_dfs_config *dfs; - unsigned int new_freq; - struct atomisp_freq_scaling_rule curr_rules; - int i, ret; - unsigned short fps = 0; - - if (isp->sw_contex.power_state != ATOM_ISP_POWER_UP) { - dev_err(isp->dev, "DFS cannot proceed due to no power.\n"); - return -EINVAL; - } - - if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) == - ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd)) - isp->dfs = &dfs_config_cht_soc; - - dfs = isp->dfs; - - if (dfs->lowest_freq == 0 || dfs->max_freq_at_vmin == 0 || - dfs->highest_freq == 0 || dfs->dfs_table_size == 0 || - !dfs->dfs_table) { - dev_err(isp->dev, "DFS configuration is invalid.\n"); - return -EINVAL; - } - - if (mode == ATOMISP_DFS_MODE_LOW) { - new_freq = dfs->lowest_freq; - goto done; - } - - if (mode == ATOMISP_DFS_MODE_MAX) { - new_freq = dfs->highest_freq; - goto done; - } - - fps = atomisp_get_sensor_fps(asd); - if (fps == 0) - return -EINVAL; - - curr_rules.width = asd->fmt[asd->capture_pad].fmt.width; - curr_rules.height = asd->fmt[asd->capture_pad].fmt.height; - curr_rules.fps = fps; - curr_rules.run_mode = asd->run_mode->val; - /* - * For continuous mode, we need to make the capture setting applied - * since preview mode, because there is no chance to do this when - * starting image capture. - */ - if (asd->continuous_mode->val) { - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - curr_rules.run_mode = ATOMISP_RUN_MODE_SDV; - else - curr_rules.run_mode = - ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE; - } - - /* search for the target frequency by looping freq rules*/ - for (i = 0; i < dfs->dfs_table_size; i++) { - if (curr_rules.width != dfs->dfs_table[i].width && - dfs->dfs_table[i].width != ISP_FREQ_RULE_ANY) - continue; - if (curr_rules.height != dfs->dfs_table[i].height && - dfs->dfs_table[i].height != ISP_FREQ_RULE_ANY) - continue; - if (curr_rules.fps != dfs->dfs_table[i].fps && - dfs->dfs_table[i].fps != ISP_FREQ_RULE_ANY) - continue; - if (curr_rules.run_mode != dfs->dfs_table[i].run_mode && - dfs->dfs_table[i].run_mode != ISP_FREQ_RULE_ANY) - continue; - break; - } - - if (i == dfs->dfs_table_size) - new_freq = dfs->max_freq_at_vmin; - else - new_freq = dfs->dfs_table[i].isp_freq; - -done: - dev_dbg(isp->dev, "DFS target frequency=%d.\n", new_freq); - - if ((new_freq == isp->sw_contex.running_freq) && !force) - return 0; - - dev_dbg(isp->dev, "Programming DFS frequency to %d\n", new_freq); - - ret = write_target_freq_to_hw(isp, new_freq); - if (!ret) { - isp->sw_contex.running_freq = new_freq; - trace_ipu_pstate(new_freq, -1); - } - return ret; -} - -/* - * reset and restore ISP - */ -int atomisp_reset(struct atomisp_device *isp) -{ - /* Reset ISP by power-cycling it */ - int ret = 0; - - dev_dbg(isp->dev, "%s\n", __func__); - atomisp_css_suspend(isp); - ret = atomisp_runtime_suspend(isp->dev); - if (ret < 0) - dev_err(isp->dev, "atomisp_runtime_suspend failed, %d\n", ret); - ret = atomisp_mrfld_power_down(isp); - if (ret < 0) { - dev_err(isp->dev, "can not disable ISP power\n"); - } else { - ret = atomisp_mrfld_power_up(isp); - if (ret < 0) - dev_err(isp->dev, "can not enable ISP power\n"); - ret = atomisp_runtime_resume(isp->dev); - if (ret < 0) - dev_err(isp->dev, "atomisp_runtime_resume failed, %d\n", ret); - } - ret = atomisp_css_resume(isp); - if (ret) - isp->isp_fatal_error = true; - - return ret; -} - -/* - * interrupt disable functions - */ -static void disable_isp_irq(enum hrt_isp_css_irq irq) -{ - irq_disable_channel(IRQ0_ID, irq); - - if (irq != hrt_isp_css_irq_sp) - return; - - cnd_sp_irq_enable(SP0_ID, false); -} - -/* - * interrupt clean function - */ -static void clear_isp_irq(enum hrt_isp_css_irq irq) -{ - irq_clear_all(IRQ0_ID); -} - -void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev) -{ - u32 msg32; - u16 msg16; - - pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32); - msg32 |= 1 << MSI_ENABLE_BIT; - pci_write_config_dword(dev, PCI_MSI_CAPID, msg32); - - msg32 = (1 << INTR_IER) | (1 << INTR_IIR); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32); - - pci_read_config_word(dev, PCI_COMMAND, &msg16); - msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | - PCI_COMMAND_INTX_DISABLE); - pci_write_config_word(dev, PCI_COMMAND, msg16); -} - -void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev) -{ - u32 msg32; - u16 msg16; - - pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32); - msg32 &= ~(1 << MSI_ENABLE_BIT); - pci_write_config_dword(dev, PCI_MSI_CAPID, msg32); - - msg32 = 0x0; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32); - - pci_read_config_word(dev, PCI_COMMAND, &msg16); - msg16 &= ~(PCI_COMMAND_MASTER); - pci_write_config_word(dev, PCI_COMMAND, msg16); -} - -static void atomisp_sof_event(struct atomisp_sub_device *asd) -{ - struct v4l2_event event = {0}; - - event.type = V4L2_EVENT_FRAME_SYNC; - event.u.frame_sync.frame_sequence = atomic_read(&asd->sof_count); - - v4l2_event_queue(asd->subdev.devnode, &event); -} - -void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id) -{ - struct v4l2_event event = {0}; - - event.type = V4L2_EVENT_FRAME_END; - event.u.frame_sync.frame_sequence = exp_id; - - v4l2_event_queue(asd->subdev.devnode, &event); -} - -static void atomisp_3a_stats_ready_event(struct atomisp_sub_device *asd, uint8_t exp_id) -{ - struct v4l2_event event = {0}; - - event.type = V4L2_EVENT_ATOMISP_3A_STATS_READY; - event.u.frame_sync.frame_sequence = exp_id; - - v4l2_event_queue(asd->subdev.devnode, &event); -} - -static void atomisp_metadata_ready_event(struct atomisp_sub_device *asd, - enum atomisp_metadata_type md_type) -{ - struct v4l2_event event = {0}; - - event.type = V4L2_EVENT_ATOMISP_METADATA_READY; - event.u.data[0] = md_type; - - v4l2_event_queue(asd->subdev.devnode, &event); -} - -static void atomisp_reset_event(struct atomisp_sub_device *asd) -{ - struct v4l2_event event = {0}; - - event.type = V4L2_EVENT_ATOMISP_CSS_RESET; - - v4l2_event_queue(asd->subdev.devnode, &event); -} - - -static void print_csi_rx_errors(enum mipi_port_id port, - struct atomisp_device *isp) -{ - u32 infos = 0; - - atomisp_css_rx_get_irq_info(port, &infos); - - dev_err(isp->dev, "CSI Receiver port %d errors:\n", port); - if (infos & CSS_RX_IRQ_INFO_BUFFER_OVERRUN) - dev_err(isp->dev, " buffer overrun"); - if (infos & CSS_RX_IRQ_INFO_ERR_SOT) - dev_err(isp->dev, " start-of-transmission error"); - if (infos & CSS_RX_IRQ_INFO_ERR_SOT_SYNC) - dev_err(isp->dev, " start-of-transmission sync error"); - if (infos & CSS_RX_IRQ_INFO_ERR_CONTROL) - dev_err(isp->dev, " control error"); - if (infos & CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE) - dev_err(isp->dev, " 2 or more ECC errors"); - if (infos & CSS_RX_IRQ_INFO_ERR_CRC) - dev_err(isp->dev, " CRC mismatch"); - if (infos & CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID) - dev_err(isp->dev, " unknown error"); - if (infos & CSS_RX_IRQ_INFO_ERR_FRAME_SYNC) - dev_err(isp->dev, " frame sync error"); - if (infos & CSS_RX_IRQ_INFO_ERR_FRAME_DATA) - dev_err(isp->dev, " frame data error"); - if (infos & CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT) - dev_err(isp->dev, " data timeout"); - if (infos & CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC) - dev_err(isp->dev, " unknown escape command entry"); - if (infos & CSS_RX_IRQ_INFO_ERR_LINE_SYNC) - dev_err(isp->dev, " line sync error"); -} - -/* Clear irq reg */ -static void clear_irq_reg(struct atomisp_device *isp) -{ - u32 msg_ret; - pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret); - msg_ret |= 1 << INTR_IIR; - pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret); -} - -static struct atomisp_sub_device * -__get_asd_from_port(struct atomisp_device *isp, enum mipi_port_id port) -{ - int i; - - /* Check which isp subdev to send eof */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - struct camera_mipi_info *mipi_info; - - mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED && - __get_mipi_port(isp, mipi_info->port) == port) { - return asd; - } - } - - return NULL; -} - -/* interrupt handling function*/ -irqreturn_t atomisp_isr(int irq, void *dev) -{ - struct atomisp_device *isp = (struct atomisp_device *)dev; - struct atomisp_sub_device *asd; - struct atomisp_css_event eof_event; - unsigned int irq_infos = 0; - unsigned long flags; - unsigned int i; - int err; - - spin_lock_irqsave(&isp->lock, flags); - if (isp->sw_contex.power_state != ATOM_ISP_POWER_UP || - !isp->css_initialized) { - spin_unlock_irqrestore(&isp->lock, flags); - return IRQ_HANDLED; - } - err = atomisp_css_irq_translate(isp, &irq_infos); - if (err) { - spin_unlock_irqrestore(&isp->lock, flags); - return IRQ_NONE; - } - - dev_dbg(isp->dev, "irq:0x%x\n", irq_infos); - - clear_irq_reg(isp); - - if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) - goto out_nowake; - - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - /* - * Current SOF only support one stream, so the SOF only valid - * either solely one stream is running - */ - if (irq_infos & CSS_IRQ_INFO_CSS_RECEIVER_SOF) { - atomic_inc(&asd->sof_count); - atomisp_sof_event(asd); - - /* If sequence_temp and sequence are the same - * there where no frames lost so we can increase - * sequence_temp. - * If not then processing of frame is still in progress - * and driver needs to keep old sequence_temp value. - * NOTE: There is assumption here that ISP will not - * start processing next frame from sensor before old - * one is completely done. */ - if (atomic_read(&asd->sequence) == atomic_read( - &asd->sequence_temp)) - atomic_set(&asd->sequence_temp, - atomic_read(&asd->sof_count)); - } - if (irq_infos & CSS_IRQ_INFO_EVENTS_READY) - atomic_set(&asd->sequence, - atomic_read(&asd->sequence_temp)); - } - - if (irq_infos & CSS_IRQ_INFO_CSS_RECEIVER_SOF) - irq_infos &= ~CSS_IRQ_INFO_CSS_RECEIVER_SOF; - - if ((irq_infos & CSS_IRQ_INFO_INPUT_SYSTEM_ERROR) || - (irq_infos & CSS_IRQ_INFO_IF_ERROR)) { - /* handle mipi receiver error */ - u32 rx_infos; - enum mipi_port_id port; - - for (port = MIPI_PORT0_ID; port <= MIPI_PORT2_ID; - port++) { - print_csi_rx_errors(port, isp); - atomisp_css_rx_get_irq_info(port, &rx_infos); - atomisp_css_rx_clear_irq_info(port, rx_infos); - } - } - - if (irq_infos & IA_CSS_IRQ_INFO_ISYS_EVENTS_READY) { - while (ia_css_dequeue_isys_event(&(eof_event.event)) == - IA_CSS_SUCCESS) { - /* EOF Event does not have the css_pipe returned */ - asd = __get_asd_from_port(isp, eof_event.event.port); - if (!asd) { - dev_err(isp->dev, "%s:no subdev.event:%d", __func__, - eof_event.event.type); - continue; - } - - atomisp_eof_event(asd, eof_event.event.exp_id); - dev_dbg(isp->dev, "%s EOF exp_id %d, asd %d\n", - __func__, eof_event.event.exp_id, asd->index); - } - - irq_infos &= ~IA_CSS_IRQ_INFO_ISYS_EVENTS_READY; - if (irq_infos == 0) - goto out_nowake; - } - - spin_unlock_irqrestore(&isp->lock, flags); - - return IRQ_WAKE_THREAD; - -out_nowake: - spin_unlock_irqrestore(&isp->lock, flags); - - return IRQ_HANDLED; -} - -void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd) -{ - int i; - memset(asd->s3a_bufs_in_css, 0, sizeof(asd->s3a_bufs_in_css)); - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) - memset(asd->metadata_bufs_in_css[i], 0, - sizeof(asd->metadata_bufs_in_css[i])); - asd->dis_bufs_in_css = 0; - asd->video_out_capture.buffers_in_css = 0; - asd->video_out_vf.buffers_in_css = 0; - asd->video_out_preview.buffers_in_css = 0; - asd->video_out_video_capture.buffers_in_css = 0; -} - -#ifndef ISP2401 -bool atomisp_buffers_queued(struct atomisp_sub_device *asd) -#else -bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe) -#endif -{ -#ifndef ISP2401 - return asd->video_out_capture.buffers_in_css || - asd->video_out_vf.buffers_in_css || - asd->video_out_preview.buffers_in_css || - asd->video_out_video_capture.buffers_in_css ? - true : false; -#else - return pipe->buffers_in_css ? true : false; -#endif -} - -/* 0x100000 is the start of dmem inside SP */ -#define SP_DMEM_BASE 0x100000 - -void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, - unsigned int size) -{ - unsigned int data = 0; - unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32)); - - dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base); - dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__, - addr, size, size32); - if (size32 * 4 + addr > 0x4000) { - dev_err(isp->dev, "illegal size (%d) or addr (0x%x)\n", - size32, addr); - return; - } - addr += SP_DMEM_BASE; - do { - data = _hrt_master_port_uload_32(addr); - - dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data); - addr += sizeof(unsigned int); - size32 -= 1; - } while (size32 > 0); -} - -static struct videobuf_buffer *atomisp_css_frame_to_vbuf( - struct atomisp_video_pipe *pipe, struct atomisp_css_frame *frame) -{ - struct videobuf_vmalloc_memory *vm_mem; - struct atomisp_css_frame *handle; - int i; - - for (i = 0; pipe->capq.bufs[i]; i++) { - vm_mem = pipe->capq.bufs[i]->priv; - handle = vm_mem->vaddr; - if (handle && handle->data == frame->data) - return pipe->capq.bufs[i]; - } - - return NULL; -} - -static void get_buf_timestamp(struct timeval *tv) -{ - struct timespec ts; - ktime_get_ts(&ts); - tv->tv_sec = ts.tv_sec; - tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; -} - -static void atomisp_flush_video_pipe(struct atomisp_sub_device *asd, - struct atomisp_video_pipe *pipe) -{ - unsigned long irqflags; - int i; - - if (!pipe->users) - return; - - for (i = 0; pipe->capq.bufs[i]; i++) { - spin_lock_irqsave(&pipe->irq_lock, irqflags); - if (pipe->capq.bufs[i]->state == VIDEOBUF_ACTIVE || - pipe->capq.bufs[i]->state == VIDEOBUF_QUEUED) { - get_buf_timestamp(&pipe->capq.bufs[i]->ts); - pipe->capq.bufs[i]->field_count = - atomic_read(&asd->sequence) << 1; - dev_dbg(asd->isp->dev, "release buffers on device %s\n", - pipe->vdev.name); - if (pipe->capq.bufs[i]->state == VIDEOBUF_QUEUED) - list_del_init(&pipe->capq.bufs[i]->queue); - pipe->capq.bufs[i]->state = VIDEOBUF_ERROR; - wake_up(&pipe->capq.bufs[i]->done); - } - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - } -} - -/* Returns queued buffers back to video-core */ -void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd) -{ - atomisp_flush_video_pipe(asd, &asd->video_out_capture); - atomisp_flush_video_pipe(asd, &asd->video_out_vf); - atomisp_flush_video_pipe(asd, &asd->video_out_preview); - atomisp_flush_video_pipe(asd, &asd->video_out_video_capture); -} - -/* clean out the parameters that did not apply */ -void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe) -{ - struct atomisp_css_params_with_list *param; - - while (!list_empty(&pipe->per_frame_params)) { - param = list_entry(pipe->per_frame_params.next, - struct atomisp_css_params_with_list, list); - list_del(¶m->list); - atomisp_free_css_parameters(¶m->params); - kvfree(param); - } -} - -/* Re-queue per-frame parameters */ -static void atomisp_recover_params_queue(struct atomisp_video_pipe *pipe) -{ - struct atomisp_css_params_with_list *param; - int i; - - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - param = pipe->frame_params[i]; - if (param) - list_add_tail(¶m->list, &pipe->per_frame_params); - pipe->frame_params[i] = NULL; - } - atomisp_handle_parameter_and_buffer(pipe); -} - -/* find atomisp_video_pipe with css pipe id, buffer type and atomisp run_mode */ -static struct atomisp_video_pipe *__atomisp_get_pipe( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id, - enum atomisp_css_buffer_type buf_type) -{ - struct atomisp_device *isp = asd->isp; - - if (css_pipe_id == CSS_PIPE_ID_COPY && - isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - switch (stream_id) { - case ATOMISP_INPUT_STREAM_PREVIEW: - return &asd->video_out_preview; - case ATOMISP_INPUT_STREAM_POSTVIEW: - return &asd->video_out_vf; - case ATOMISP_INPUT_STREAM_VIDEO: - return &asd->video_out_video_capture; - case ATOMISP_INPUT_STREAM_CAPTURE: - default: - return &asd->video_out_capture; - } - } - - /* video is same in online as in continuouscapture mode */ - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { - /* - * Disable vf_pp and run CSS in still capture mode. In this - * mode, CSS does not cause extra latency with buffering, but - * scaling is not available. - */ - return &asd->video_out_capture; - } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { - /* - * Disable vf_pp and run CSS in video mode. This allows using - * ISP scaling but it has one frame delay due to CSS internal - * buffering. - */ - return &asd->video_out_video_capture; - } else if (css_pipe_id == CSS_PIPE_ID_YUVPP) { - /* - * to SOC camera, yuvpp pipe is run for capture/video/SDV/ZSL. - */ - if (asd->continuous_mode->val) { - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - /* SDV case */ - switch (buf_type) { - case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME: - return &asd->video_out_video_capture; - case CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: - return &asd->video_out_preview; - case CSS_BUFFER_TYPE_OUTPUT_FRAME: - return &asd->video_out_capture; - default: - return &asd->video_out_vf; - } - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) { - /* ZSL case */ - switch (buf_type) { - case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME: - return &asd->video_out_preview; - case CSS_BUFFER_TYPE_OUTPUT_FRAME: - return &asd->video_out_capture; - default: - return &asd->video_out_vf; - } - } - } else if (buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) { - switch (asd->run_mode->val) { - case ATOMISP_RUN_MODE_VIDEO: - return &asd->video_out_video_capture; - case ATOMISP_RUN_MODE_PREVIEW: - return &asd->video_out_preview; - default: - return &asd->video_out_capture; - } - } else if (buf_type == CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) { - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - return &asd->video_out_preview; - else - return &asd->video_out_vf; - } - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - /* For online video or SDV video pipe. */ - if (css_pipe_id == CSS_PIPE_ID_VIDEO || - css_pipe_id == CSS_PIPE_ID_COPY) { - if (buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) - return &asd->video_out_video_capture; - return &asd->video_out_preview; - } - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) { - /* For online preview or ZSL preview pipe. */ - if (css_pipe_id == CSS_PIPE_ID_PREVIEW || - css_pipe_id == CSS_PIPE_ID_COPY) - return &asd->video_out_preview; - } - /* For capture pipe. */ - if (buf_type == CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) - return &asd->video_out_vf; - return &asd->video_out_capture; -} - -enum atomisp_metadata_type -atomisp_get_metadata_type(struct atomisp_sub_device *asd, - enum ia_css_pipe_id pipe_id) -{ - if (!asd->continuous_mode->val) - return ATOMISP_MAIN_METADATA; - - if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) /* online capture pipe */ - return ATOMISP_SEC_METADATA; - else - return ATOMISP_MAIN_METADATA; -} - -void atomisp_buf_done(struct atomisp_sub_device *asd, int error, - enum atomisp_css_buffer_type buf_type, - enum atomisp_css_pipe_id css_pipe_id, - bool q_buffers, enum atomisp_input_stream_id stream_id) -{ - struct videobuf_buffer *vb = NULL; - struct atomisp_video_pipe *pipe = NULL; - struct atomisp_css_buffer buffer; - bool requeue = false; - int err; - unsigned long irqflags; - struct atomisp_css_frame *frame = NULL; - struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp; - struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp; - struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp; - enum atomisp_metadata_type md_type; - struct atomisp_device *isp = asd->isp; - struct v4l2_control ctrl; -#ifdef ISP2401 - bool reset_wdt_timer = false; -#endif - - if ( - buf_type != CSS_BUFFER_TYPE_METADATA && - buf_type != CSS_BUFFER_TYPE_3A_STATISTICS && - buf_type != CSS_BUFFER_TYPE_DIS_STATISTICS && - buf_type != CSS_BUFFER_TYPE_OUTPUT_FRAME && - buf_type != CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME && - buf_type != CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME && - buf_type != CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME && - buf_type != CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) { - dev_err(isp->dev, "%s, unsupported buffer type: %d\n", - __func__, buf_type); - return; - } - - memset(&buffer, 0, sizeof(struct atomisp_css_buffer)); - buffer.css_buffer.type = buf_type; - err = atomisp_css_dequeue_buffer(asd, stream_id, css_pipe_id, - buf_type, &buffer); - if (err) { - dev_err(isp->dev, - "atomisp_css_dequeue_buffer failed: 0x%x\n", err); - return; - } - - /* need to know the atomisp pipe for frame buffers */ - pipe = __atomisp_get_pipe(asd, stream_id, css_pipe_id, buf_type); - if (pipe == NULL) { - dev_err(isp->dev, "error getting atomisp pipe\n"); - return; - } - - switch (buf_type) { - case CSS_BUFFER_TYPE_3A_STATISTICS: - list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp, - &asd->s3a_stats_in_css, list) { - if (s3a_buf->s3a_data == - buffer.css_buffer.data.stats_3a) { - list_del_init(&s3a_buf->list); - list_add_tail(&s3a_buf->list, - &asd->s3a_stats_ready); - break; - } - } - - asd->s3a_bufs_in_css[css_pipe_id]--; - atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id); - dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n", - __func__, s3a_buf->s3a_data->exp_id); - break; - case CSS_BUFFER_TYPE_METADATA: - if (error) - break; - - md_type = atomisp_get_metadata_type(asd, css_pipe_id); - list_for_each_entry_safe(md_buf, _md_buf_tmp, - &asd->metadata_in_css[md_type], list) { - if (md_buf->metadata == - buffer.css_buffer.data.metadata) { - list_del_init(&md_buf->list); - list_add_tail(&md_buf->list, - &asd->metadata_ready[md_type]); - break; - } - } - asd->metadata_bufs_in_css[stream_id][css_pipe_id]--; - atomisp_metadata_ready_event(asd, md_type); - dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n", - __func__, md_buf->metadata->exp_id); - break; - case CSS_BUFFER_TYPE_DIS_STATISTICS: - list_for_each_entry_safe(dis_buf, _dis_buf_tmp, - &asd->dis_stats_in_css, list) { - if (dis_buf->dis_data == - buffer.css_buffer.data.stats_dvs) { - spin_lock_irqsave(&asd->dis_stats_lock, - irqflags); - list_del_init(&dis_buf->list); - list_add(&dis_buf->list, &asd->dis_stats); - asd->params.dis_proj_data_valid = true; - spin_unlock_irqrestore(&asd->dis_stats_lock, - irqflags); - break; - } - } - asd->dis_bufs_in_css--; - dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n", - __func__, dis_buf->dis_data->exp_id); - break; - case CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: - case CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: -#ifdef ISP2401 - reset_wdt_timer = true; -#endif - pipe->buffers_in_css--; - frame = buffer.css_buffer.data.frame; - if (!frame) { - WARN_ON(1); - break; - } - if (!frame->valid) - error = true; - - /* FIXME: - * YUVPP doesn't set postview exp_id correctlly in SDV mode. - * This is a WORKAROUND to set exp_id. see HSDES-1503911606. - */ - if (IS_BYT && buf_type == CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME && - asd->continuous_mode->val && ATOMISP_USE_YUVPP(asd)) - frame->exp_id = (asd->postview_exp_id++) % - (ATOMISP_MAX_EXP_ID + 1); - - dev_dbg(isp->dev, "%s: vf frame with exp_id %d is ready\n", - __func__, frame->exp_id); - if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) { - if (frame->flash_state - == CSS_FRAME_FLASH_STATE_PARTIAL) - dev_dbg(isp->dev, "%s thumb partially flashed\n", - __func__); - else if (frame->flash_state - == CSS_FRAME_FLASH_STATE_FULL) - dev_dbg(isp->dev, "%s thumb completely flashed\n", - __func__); - else - dev_dbg(isp->dev, "%s thumb no flash in this frame\n", - __func__); - } - vb = atomisp_css_frame_to_vbuf(pipe, frame); - WARN_ON(!vb); - if (vb) - pipe->frame_config_id[vb->i] = frame->isp_config_id; - if (css_pipe_id == IA_CSS_PIPE_ID_CAPTURE && - asd->pending_capture_request > 0) { - err = atomisp_css_offline_capture_configure(asd, - asd->params.offline_parm.num_captures, - asd->params.offline_parm.skip_frames, - asd->params.offline_parm.offset); -#ifndef ISP2401 - asd->pending_capture_request--; - dev_dbg(isp->dev, "Trigger capture again for new buffer. err=%d\n", - err); -#else - asd->pending_capture_request--; - asd->re_trigger_capture = false; - dev_dbg(isp->dev, "Trigger capture again for new buffer. err=%d\n", - err); - } else { - asd->re_trigger_capture = true; - } -#endif - } - break; - case CSS_BUFFER_TYPE_OUTPUT_FRAME: - case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME: -#ifdef ISP2401 - reset_wdt_timer = true; -#endif - pipe->buffers_in_css--; - frame = buffer.css_buffer.data.frame; - if (!frame) { - WARN_ON(1); - break; - } - - if (!frame->valid) - error = true; - - /* FIXME: - * YUVPP doesn't set preview exp_id correctlly in ZSL mode. - * This is a WORKAROUND to set exp_id. see HSDES-1503911606. - */ - if (IS_BYT && buf_type == CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME && - asd->continuous_mode->val && ATOMISP_USE_YUVPP(asd)) - frame->exp_id = (asd->preview_exp_id++) % - (ATOMISP_MAX_EXP_ID + 1); - - dev_dbg(isp->dev, "%s: main frame with exp_id %d is ready\n", - __func__, frame->exp_id); - vb = atomisp_css_frame_to_vbuf(pipe, frame); - if (!vb) { - WARN_ON(1); - break; - } - - /* free the parameters */ - if (pipe->frame_params[vb->i]) { - if (asd->params.dvs_6axis == - pipe->frame_params[vb->i]->params.dvs_6axis) - asd->params.dvs_6axis = NULL; - atomisp_free_css_parameters( - &pipe->frame_params[vb->i]->params); - kvfree(pipe->frame_params[vb->i]); - pipe->frame_params[vb->i] = NULL; - } - - pipe->frame_config_id[vb->i] = frame->isp_config_id; - ctrl.id = V4L2_CID_FLASH_MODE; - if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) { - if (frame->flash_state - == CSS_FRAME_FLASH_STATE_PARTIAL) { - asd->frame_status[vb->i] = - ATOMISP_FRAME_STATUS_FLASH_PARTIAL; - dev_dbg(isp->dev, "%s partially flashed\n", - __func__); - } else if (frame->flash_state - == CSS_FRAME_FLASH_STATE_FULL) { - asd->frame_status[vb->i] = - ATOMISP_FRAME_STATUS_FLASH_EXPOSED; - asd->params.num_flash_frames--; - dev_dbg(isp->dev, "%s completely flashed\n", - __func__); - } else { - asd->frame_status[vb->i] = - ATOMISP_FRAME_STATUS_OK; - dev_dbg(isp->dev, - "%s no flash in this frame\n", - __func__); - } - - /* Check if flashing sequence is done */ - if (asd->frame_status[vb->i] == - ATOMISP_FRAME_STATUS_FLASH_EXPOSED) - asd->params.flash_state = ATOMISP_FLASH_DONE; - } else if (isp->flash) { - if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl) == - 0 && ctrl.value == ATOMISP_FLASH_MODE_TORCH) { - ctrl.id = V4L2_CID_FLASH_TORCH_INTENSITY; - if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl) - == 0 && ctrl.value > 0) { - asd->frame_status[vb->i] = - ATOMISP_FRAME_STATUS_FLASH_EXPOSED; - } else { - asd->frame_status[vb->i] = - ATOMISP_FRAME_STATUS_OK; - } - } else - asd->frame_status[vb->i] = - ATOMISP_FRAME_STATUS_OK; - } else { - asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; - } - - asd->params.last_frame_status = asd->frame_status[vb->i]; - - if (asd->continuous_mode->val) { - if (css_pipe_id == CSS_PIPE_ID_PREVIEW || - css_pipe_id == CSS_PIPE_ID_VIDEO) { - asd->latest_preview_exp_id = frame->exp_id; - } else if (css_pipe_id == - CSS_PIPE_ID_CAPTURE) { - if (asd->run_mode->val == - ATOMISP_RUN_MODE_VIDEO) - dev_dbg(isp->dev, "SDV capture raw buffer id: %u\n", - frame->exp_id); - else - dev_dbg(isp->dev, "ZSL capture raw buffer id: %u\n", - frame->exp_id); - } - } - /* - * Only after enabled the raw buffer lock - * and in continuous mode. - * in preview/video pipe, each buffer will - * be locked automatically, so record it here. - */ - if (((css_pipe_id == CSS_PIPE_ID_PREVIEW) || - (css_pipe_id == CSS_PIPE_ID_VIDEO)) && - asd->enable_raw_buffer_lock->val && - asd->continuous_mode->val) { - atomisp_set_raw_buffer_bitmap(asd, frame->exp_id); - WARN_ON(frame->exp_id > ATOMISP_MAX_EXP_ID); - } - - if (asd->params.css_update_params_needed) { - atomisp_apply_css_parameters(asd, - &asd->params.css_param); - if (asd->params.css_param.update_flag.dz_config) - atomisp_css_set_dz_config(asd, - &asd->params.css_param.dz_config); - /* New global dvs 6axis config should be blocked - * here if there's a buffer with per-frame parameters - * pending in CSS frame buffer queue. - * This is to aviod zooming vibration since global - * parameters take effect immediately while - * per-frame parameters are taken after previous - * buffers in CSS got processed. - */ - if (asd->params.dvs_6axis) - atomisp_css_set_dvs_6axis(asd, - asd->params.dvs_6axis); - else - asd->params.css_update_params_needed = false; - /* The update flag should not be cleaned here - * since it is still going to be used to make up - * following per-frame parameters. - * This will introduce more copy work since each - * time when updating global parameters, the whole - * parameter set are applied. - * FIXME: A new set of parameter copy functions can - * be added to make up per-frame parameters based on - * solid structures stored in asd->params.css_param - * instead of using shadow pointers in update flag. - */ - atomisp_css_update_isp_params(asd); - } - break; - default: - break; - } - if (vb) { - get_buf_timestamp(&vb->ts); - vb->field_count = atomic_read(&asd->sequence) << 1; - /*mark videobuffer done for dequeue*/ - spin_lock_irqsave(&pipe->irq_lock, irqflags); - vb->state = !error ? VIDEOBUF_DONE : VIDEOBUF_ERROR; - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - - /* - * Frame capture done, wake up any process block on - * current active buffer - * possibly hold by videobuf_dqbuf() - */ - wake_up(&vb->done); - } -#ifdef ISP2401 - atomic_set(&pipe->wdt_count, 0); -#endif - /* - * Requeue should only be done for 3a and dis buffers. - * Queue/dequeue order will change if driver recycles image buffers. - */ - if (requeue) { - err = atomisp_css_queue_buffer(asd, - stream_id, css_pipe_id, - buf_type, &buffer); - if (err) - dev_err(isp->dev, "%s, q to css fails: %d\n", - __func__, err); - return; - } - if (!error && q_buffers) - atomisp_qbuffers_to_css(asd); -#ifdef ISP2401 - - /* If there are no buffers queued then - * delete wdt timer. */ - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - if (!atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_stop_pipe(pipe, false); - else if (reset_wdt_timer) - /* SOF irq should not reset wdt timer. */ - atomisp_wdt_refresh_pipe(pipe, - ATOMISP_WDT_KEEP_CURRENT_DELAY); -#endif -} - -void atomisp_delayed_init_work(struct work_struct *work) -{ - struct atomisp_sub_device *asd = container_of(work, - struct atomisp_sub_device, - delayed_init_work); - /* - * to SOC camera, use yuvpp pipe and no support continuous mode. - */ - if (!ATOMISP_USE_YUVPP(asd)) { - struct v4l2_event event = {0}; - - atomisp_css_allocate_continuous_frames(false, asd); - atomisp_css_update_continuous_frames(asd); - - event.type = V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE; - v4l2_event_queue(asd->subdev.devnode, &event); - } - - /* signal streamon after delayed init is done */ - asd->delayed_init = ATOMISP_DELAYED_INIT_DONE; - complete(&asd->init_done); -} - -static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) -{ - enum atomisp_css_pipe_id css_pipe_id; - bool stream_restart[MAX_STREAM_NUM] = {0}; - bool depth_mode = false; - int i, ret, depth_cnt = 0; - - if (!isp->sw_contex.file_input) - atomisp_css_irq_enable(isp, - CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); - - BUG_ON(isp->num_of_streams > MAX_STREAM_NUM); - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - struct ia_css_pipeline *acc_pipeline; - struct ia_css_pipe *acc_pipe = NULL; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED && - !asd->stream_prepared) - continue; - - /* - * AtomISP::waitStageUpdate is blocked when WDT happens. - * By calling acc_done() for all loaded fw_handles, - * HAL will be unblocked. - */ - acc_pipe = asd->stream_env[i].pipes[CSS_PIPE_ID_ACC]; - if (acc_pipe != NULL) { - acc_pipeline = ia_css_pipe_get_pipeline(acc_pipe); - if (acc_pipeline) { - struct ia_css_pipeline_stage *stage; - for (stage = acc_pipeline->stages; stage; - stage = stage->next) { - const struct ia_css_fw_info *fw; - fw = stage->firmware; - atomisp_acc_done(asd, fw->handle); - } - } - } - - depth_cnt++; - - if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) - cancel_work_sync(&asd->delayed_init_work); - - complete(&asd->init_done); - asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; - - stream_restart[asd->index] = true; - - asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; - - /* stream off sensor */ - ret = v4l2_subdev_call( - isp->inputs[asd->input_curr]. - camera, video, s_stream, 0); - if (ret) - dev_warn(isp->dev, - "can't stop streaming on sensor!\n"); - - atomisp_acc_unload_extensions(asd); - - atomisp_clear_css_buffer_counters(asd); - - css_pipe_id = atomisp_get_css_pipe_id(asd); - atomisp_css_stop(asd, css_pipe_id, true); - - asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; - - asd->preview_exp_id = 1; - asd->postview_exp_id = 1; - /* notify HAL the CSS reset */ - dev_dbg(isp->dev, - "send reset event to %s\n", asd->subdev.devnode->name); - atomisp_reset_event(asd); - } - - /* clear irq */ - disable_isp_irq(hrt_isp_css_irq_sp); - clear_isp_irq(hrt_isp_css_irq_sp); - - /* Set the SRSE to 3 before resetting */ - pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control | - MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); - - /* reset ISP and restore its state */ - isp->isp_timeout = true; - atomisp_reset(isp); - isp->isp_timeout = false; - - if (!isp_timeout) { - for (i = 0; i < isp->num_of_streams; i++) { - if (isp->asd[i].depth_mode->val) - return; - } - } - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (!stream_restart[i]) - continue; - - if (isp->inputs[asd->input_curr].type != FILE_INPUT) - atomisp_css_input_set_mode(asd, - CSS_INPUT_MODE_SENSOR); - - css_pipe_id = atomisp_get_css_pipe_id(asd); - if (atomisp_css_start(asd, css_pipe_id, true)) - dev_warn(isp->dev, - "start SP failed, so do not set streaming to be enable!\n"); - else - asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED; - - atomisp_csi2_configure(asd); - } - - if (!isp->sw_contex.file_input) { - atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF, - atomisp_css_valid_sof(isp)); - - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0) - dev_dbg(isp->dev, "dfs failed!\n"); - } else { - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, true) < 0) - dev_dbg(isp->dev, "dfs failed!\n"); - } - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd; - - asd = &isp->asd[i]; - - if (!stream_restart[i]) - continue; - - if (asd->continuous_mode->val && - asd->delayed_init == ATOMISP_DELAYED_INIT_NOT_QUEUED) { - reinit_completion(&asd->init_done); - asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED; - queue_work(asd->delayed_init_workq, - &asd->delayed_init_work); - } - /* - * dequeueing buffers is not needed. CSS will recycle - * buffers that it has. - */ - atomisp_flush_bufs_and_wakeup(asd); - - /* Requeue unprocessed per-frame parameters. */ - atomisp_recover_params_queue(&asd->video_out_capture); - atomisp_recover_params_queue(&asd->video_out_preview); - atomisp_recover_params_queue(&asd->video_out_video_capture); - - if ((asd->depth_mode->val) && - (depth_cnt == ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) { - depth_mode = true; - continue; - } - - ret = v4l2_subdev_call( - isp->inputs[asd->input_curr].camera, video, - s_stream, 1); - if (ret) - dev_warn(isp->dev, - "can't start streaming on sensor!\n"); - - } - - if (depth_mode) { - if (atomisp_stream_on_master_slave_sensor(isp, true)) - dev_warn(isp->dev, - "master slave sensor stream on failed!\n"); - } -} - -void atomisp_wdt_work(struct work_struct *work) -{ - struct atomisp_device *isp = container_of(work, struct atomisp_device, - wdt_work); - int i; -#ifdef ISP2401 - unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} }; - bool css_recover = true; -#endif - - rt_mutex_lock(&isp->mutex); - if (!atomisp_streaming_count(isp)) { - atomic_set(&isp->wdt_work_queued, 0); - rt_mutex_unlock(&isp->mutex); - return; - } - -#ifndef ISP2401 - dev_err(isp->dev, "timeout %d of %d\n", - atomic_read(&isp->wdt_count) + 1, - ATOMISP_ISP_MAX_TIMEOUT_COUNT); -#else - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - pipe_wdt_cnt[i][0] += - atomic_read(&asd->video_out_capture.wdt_count); - pipe_wdt_cnt[i][1] += - atomic_read(&asd->video_out_vf.wdt_count); - pipe_wdt_cnt[i][2] += - atomic_read(&asd->video_out_preview.wdt_count); - pipe_wdt_cnt[i][3] += - atomic_read(&asd->video_out_video_capture.wdt_count); - css_recover = - (pipe_wdt_cnt[i][0] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][1] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][2] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][3] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT) - ? true : false; - dev_err(isp->dev, "pipe on asd%d timeout cnt: (%d, %d, %d, %d) of %d, recover = %d\n", - asd->index, pipe_wdt_cnt[i][0], pipe_wdt_cnt[i][1], - pipe_wdt_cnt[i][2], pipe_wdt_cnt[i][3], - ATOMISP_ISP_MAX_TIMEOUT_COUNT, css_recover); - } -#endif - -#ifndef ISP2401 - if (atomic_inc_return(&isp->wdt_count) < - ATOMISP_ISP_MAX_TIMEOUT_COUNT) { -#else - if (css_recover) { -#endif - unsigned int old_dbglevel = dbg_level; - atomisp_css_debug_dump_sp_sw_debug_info(); - atomisp_css_debug_dump_debug_info(__func__); - dbg_level = old_dbglevel; - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - dev_err(isp->dev, "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_capture.vdev.name, - asd->video_out_capture. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_vf.vdev.name, - asd->video_out_vf. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_preview.vdev.name, - asd->video_out_preview. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_video_capture.vdev.name, - asd->video_out_video_capture. - buffers_in_css); - dev_err(isp->dev, - "%s, s3a buffers in css preview pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[CSS_PIPE_ID_PREVIEW]); - dev_err(isp->dev, - "%s, s3a buffers in css capture pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[CSS_PIPE_ID_CAPTURE]); - dev_err(isp->dev, - "%s, s3a buffers in css video pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[CSS_PIPE_ID_VIDEO]); - dev_err(isp->dev, - "%s, dis buffers in css: %d\n", - __func__, asd->dis_bufs_in_css); - dev_err(isp->dev, - "%s, metadata buffers in css preview pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [CSS_PIPE_ID_PREVIEW]); - dev_err(isp->dev, - "%s, metadata buffers in css capture pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [CSS_PIPE_ID_CAPTURE]); - dev_err(isp->dev, - "%s, metadata buffers in css video pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [CSS_PIPE_ID_VIDEO]); - if (asd->enable_raw_buffer_lock->val) { - unsigned int j; - - dev_err(isp->dev, "%s, raw_buffer_locked_count %d\n", - __func__, asd->raw_buffer_locked_count); - for (j = 0; j <= ATOMISP_MAX_EXP_ID/32; j++) - dev_err(isp->dev, "%s, raw_buffer_bitmap[%d]: 0x%x\n", - __func__, j, - asd->raw_buffer_bitmap[j]); - } - } - - /*sh_css_dump_sp_state();*/ - /*sh_css_dump_isp_state();*/ - } else { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - if (asd->streaming == - ATOMISP_DEVICE_STREAMING_ENABLED) { - atomisp_clear_css_buffer_counters(asd); - atomisp_flush_bufs_and_wakeup(asd); - complete(&asd->init_done); - } -#ifdef ISP2401 - atomisp_wdt_stop(asd, false); -#endif - } - -#ifndef ISP2401 - atomic_set(&isp->wdt_count, 0); -#endif - isp->isp_fatal_error = true; - atomic_set(&isp->wdt_work_queued, 0); - - rt_mutex_unlock(&isp->mutex); - return; - } - - __atomisp_css_recover(isp, true); -#ifdef ISP2401 - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - if (asd->streaming == - ATOMISP_DEVICE_STREAMING_ENABLED) { - atomisp_wdt_refresh(asd, - isp->sw_contex.file_input ? - ATOMISP_ISP_FILE_TIMEOUT_DURATION : - ATOMISP_ISP_TIMEOUT_DURATION); - } - } -#endif - dev_err(isp->dev, "timeout recovery handling done\n"); - atomic_set(&isp->wdt_work_queued, 0); - - rt_mutex_unlock(&isp->mutex); -} - -void atomisp_css_flush(struct atomisp_device *isp) -{ - int i; - - if (!atomisp_streaming_count(isp)) - return; - - /* Disable wdt */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - atomisp_wdt_stop(asd, true); - } - - /* Start recover */ - __atomisp_css_recover(isp, false); - /* Restore wdt */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != - ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - - atomisp_wdt_refresh(asd, - isp->sw_contex.file_input ? - ATOMISP_ISP_FILE_TIMEOUT_DURATION : - ATOMISP_ISP_TIMEOUT_DURATION); - } - dev_dbg(isp->dev, "atomisp css flush done\n"); -} - -void atomisp_wdt(struct timer_list *t) -{ -#ifndef ISP2401 - struct atomisp_sub_device *asd = from_timer(asd, t, wdt); -#else - struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt); - struct atomisp_sub_device *asd = pipe->asd; -#endif - struct atomisp_device *isp = asd->isp; - -#ifdef ISP2401 - atomic_inc(&pipe->wdt_count); - dev_warn(isp->dev, - "[WARNING]asd %d pipe %s ISP timeout %d!\n", - asd->index, pipe->vdev.name, - atomic_read(&pipe->wdt_count)); -#endif - if (atomic_read(&isp->wdt_work_queued)) { - dev_dbg(isp->dev, "ISP watchdog was put into workqueue\n"); - return; - } - atomic_set(&isp->wdt_work_queued, 1); - queue_work(isp->wdt_work_queue, &isp->wdt_work); -} - -#ifndef ISP2401 -void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay) -#else -void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, - unsigned int delay) -#endif -{ - unsigned long next; - - if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) -#ifndef ISP2401 - asd->wdt_duration = delay; -#else - pipe->wdt_duration = delay; -#endif - -#ifndef ISP2401 - next = jiffies + asd->wdt_duration; -#else - next = jiffies + pipe->wdt_duration; -#endif - - /* Override next if it has been pushed beyon the "next" time */ -#ifndef ISP2401 - if (atomisp_is_wdt_running(asd) && time_after(asd->wdt_expires, next)) - next = asd->wdt_expires; -#else - if (atomisp_is_wdt_running(pipe) && time_after(pipe->wdt_expires, next)) - next = pipe->wdt_expires; -#endif - -#ifndef ISP2401 - asd->wdt_expires = next; -#else - pipe->wdt_expires = next; -#endif - -#ifndef ISP2401 - if (atomisp_is_wdt_running(asd)) - dev_dbg(asd->isp->dev, "WDT will hit after %d ms\n", - ((int)(next - jiffies) * 1000 / HZ)); -#else - if (atomisp_is_wdt_running(pipe)) - dev_dbg(pipe->asd->isp->dev, "WDT will hit after %d ms (%s)\n", - ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name); -#endif - else -#ifndef ISP2401 - dev_dbg(asd->isp->dev, "WDT starts with %d ms period\n", - ((int)(next - jiffies) * 1000 / HZ)); -#else - dev_dbg(pipe->asd->isp->dev, "WDT starts with %d ms period (%s)\n", - ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name); -#endif - -#ifndef ISP2401 - mod_timer(&asd->wdt, next); - atomic_set(&asd->isp->wdt_count, 0); -#else - mod_timer(&pipe->wdt, next); -#endif -} - -#ifndef ISP2401 -void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync) -#else -void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay) -{ - dev_dbg(asd->isp->dev, "WDT refresh all:\n"); - if (atomisp_is_wdt_running(&asd->video_out_capture)) - atomisp_wdt_refresh_pipe(&asd->video_out_capture, delay); - if (atomisp_is_wdt_running(&asd->video_out_preview)) - atomisp_wdt_refresh_pipe(&asd->video_out_preview, delay); - if (atomisp_is_wdt_running(&asd->video_out_vf)) - atomisp_wdt_refresh_pipe(&asd->video_out_vf, delay); - if (atomisp_is_wdt_running(&asd->video_out_video_capture)) - atomisp_wdt_refresh_pipe(&asd->video_out_video_capture, delay); -} - - -void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync) -#endif -{ -#ifndef ISP2401 - dev_dbg(asd->isp->dev, "WDT stop\n"); -#else - if (!atomisp_is_wdt_running(pipe)) - return; - - dev_dbg(pipe->asd->isp->dev, - "WDT stop asd %d (%s)\n", pipe->asd->index, pipe->vdev.name); - -#endif - if (sync) { -#ifndef ISP2401 - del_timer_sync(&asd->wdt); - cancel_work_sync(&asd->isp->wdt_work); -#else - del_timer_sync(&pipe->wdt); - cancel_work_sync(&pipe->asd->isp->wdt_work); -#endif - } else { -#ifndef ISP2401 - del_timer(&asd->wdt); -#else - del_timer(&pipe->wdt); -#endif - } -} - -#ifndef ISP2401 -void atomisp_wdt_start(struct atomisp_sub_device *asd) -#else -void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync) -{ - dev_dbg(asd->isp->dev, "WDT stop all:\n"); - atomisp_wdt_stop_pipe(&asd->video_out_capture, sync); - atomisp_wdt_stop_pipe(&asd->video_out_preview, sync); - atomisp_wdt_stop_pipe(&asd->video_out_vf, sync); - atomisp_wdt_stop_pipe(&asd->video_out_video_capture, sync); -} - -void atomisp_wdt_start(struct atomisp_video_pipe *pipe) -#endif -{ -#ifndef ISP2401 - atomisp_wdt_refresh(asd, ATOMISP_ISP_TIMEOUT_DURATION); -#else - atomisp_wdt_refresh_pipe(pipe, ATOMISP_ISP_TIMEOUT_DURATION); -#endif -} - -void atomisp_setup_flash(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - struct v4l2_control ctrl; - - if (isp->flash == NULL) - return; - - if (asd->params.flash_state != ATOMISP_FLASH_REQUESTED && - asd->params.flash_state != ATOMISP_FLASH_DONE) - return; - - if (asd->params.num_flash_frames) { - /* make sure the timeout is set before setting flash mode */ - ctrl.id = V4L2_CID_FLASH_TIMEOUT; - ctrl.value = FLASH_TIMEOUT; - - if (v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, &ctrl)) { - dev_err(isp->dev, "flash timeout configure failed\n"); - return; - } - - atomisp_css_request_flash(asd); - asd->params.flash_state = ATOMISP_FLASH_ONGOING; - } else { - asd->params.flash_state = ATOMISP_FLASH_IDLE; - } -} - -irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr) -{ - struct atomisp_device *isp = isp_ptr; - unsigned long flags; - bool frame_done_found[MAX_STREAM_NUM] = {0}; - bool css_pipe_done[MAX_STREAM_NUM] = {0}; - unsigned int i; - struct atomisp_sub_device *asd; - - dev_dbg(isp->dev, ">%s\n", __func__); - - spin_lock_irqsave(&isp->lock, flags); - - if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) { - spin_unlock_irqrestore(&isp->lock, flags); - return IRQ_HANDLED; - } - - spin_unlock_irqrestore(&isp->lock, flags); - - /* - * The standard CSS2.0 API tells the following calling sequence of - * dequeue ready buffers: - * while (ia_css_dequeue_event(...)) { - * switch (event.type) { - * ... - * ia_css_pipe_dequeue_buffer() - * } - * } - * That is, dequeue event and buffer are one after another. - * - * But the following implementation is to first deuque all the event - * to a FIFO, then process the event in the FIFO. - * This will not have issue in single stream mode, but it do have some - * issue in multiple stream case. The issue is that - * ia_css_pipe_dequeue_buffer() will not return the corrent buffer in - * a specific pipe. - * - * This is due to ia_css_pipe_dequeue_buffer() does not take the - * ia_css_pipe parameter. - * - * So: - * For CSS2.0: we change the way to not dequeue all the event at one - * time, instead, dequue one and process one, then another - */ - rt_mutex_lock(&isp->mutex); - if (atomisp_css_isr_thread(isp, frame_done_found, css_pipe_done)) - goto out; - - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - atomisp_setup_flash(asd); - - } -out: - rt_mutex_unlock(&isp->mutex); - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED - && css_pipe_done[asd->index] - && isp->sw_contex.file_input) - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 1); - /* FIXME! FIX ACC implementation */ - if (asd->acc.pipeline && css_pipe_done[asd->index]) - atomisp_css_acc_done(asd); - } - dev_dbg(isp->dev, "<%s\n", __func__); - - return IRQ_HANDLED; -} - -/* - * utils for buffer allocation/free - */ - -int atomisp_get_frame_pgnr(struct atomisp_device *isp, - const struct atomisp_css_frame *frame, u32 *p_pgnr) -{ - if (!frame) { - dev_err(isp->dev, "%s: NULL frame pointer ERROR.\n", __func__); - return -EINVAL; - } - - *p_pgnr = DIV_ROUND_UP(frame->data_bytes, PAGE_SIZE); - return 0; -} - -/* - * Get internal fmt according to V4L2 fmt - */ -static enum atomisp_css_frame_format -v4l2_fmt_to_sh_fmt(u32 fmt) -{ - switch (fmt) { - case V4L2_PIX_FMT_YUV420: - return CSS_FRAME_FORMAT_YUV420; - case V4L2_PIX_FMT_YVU420: - return CSS_FRAME_FORMAT_YV12; - case V4L2_PIX_FMT_YUV422P: - return CSS_FRAME_FORMAT_YUV422; - case V4L2_PIX_FMT_YUV444: - return CSS_FRAME_FORMAT_YUV444; - case V4L2_PIX_FMT_NV12: - return CSS_FRAME_FORMAT_NV12; - case V4L2_PIX_FMT_NV21: - return CSS_FRAME_FORMAT_NV21; - case V4L2_PIX_FMT_NV16: - return CSS_FRAME_FORMAT_NV16; - case V4L2_PIX_FMT_NV61: - return CSS_FRAME_FORMAT_NV61; - case V4L2_PIX_FMT_UYVY: - return CSS_FRAME_FORMAT_UYVY; - case V4L2_PIX_FMT_YUYV: - return CSS_FRAME_FORMAT_YUYV; - case V4L2_PIX_FMT_RGB24: - return CSS_FRAME_FORMAT_PLANAR_RGB888; - case V4L2_PIX_FMT_RGB32: - return CSS_FRAME_FORMAT_RGBA888; - case V4L2_PIX_FMT_RGB565: - return CSS_FRAME_FORMAT_RGB565; - case V4L2_PIX_FMT_JPEG: - case V4L2_PIX_FMT_CUSTOM_M10MO_RAW: - return CSS_FRAME_FORMAT_BINARY_8; - case V4L2_PIX_FMT_SBGGR16: - case V4L2_PIX_FMT_SBGGR10: - case V4L2_PIX_FMT_SGBRG10: - case V4L2_PIX_FMT_SGRBG10: - case V4L2_PIX_FMT_SRGGB10: - case V4L2_PIX_FMT_SBGGR12: - case V4L2_PIX_FMT_SGBRG12: - case V4L2_PIX_FMT_SGRBG12: - case V4L2_PIX_FMT_SRGGB12: - case V4L2_PIX_FMT_SBGGR8: - case V4L2_PIX_FMT_SGBRG8: - case V4L2_PIX_FMT_SGRBG8: - case V4L2_PIX_FMT_SRGGB8: - return CSS_FRAME_FORMAT_RAW; - default: - return -EINVAL; - } -} -/* - * raw format match between SH format and V4L2 format - */ -static int raw_output_format_match_input(u32 input, u32 output) -{ - if ((input == CSS_FORMAT_RAW_12) && - ((output == V4L2_PIX_FMT_SRGGB12) || - (output == V4L2_PIX_FMT_SGRBG12) || - (output == V4L2_PIX_FMT_SBGGR12) || - (output == V4L2_PIX_FMT_SGBRG12))) - return 0; - - if ((input == CSS_FORMAT_RAW_10) && - ((output == V4L2_PIX_FMT_SRGGB10) || - (output == V4L2_PIX_FMT_SGRBG10) || - (output == V4L2_PIX_FMT_SBGGR10) || - (output == V4L2_PIX_FMT_SGBRG10))) - return 0; - - if ((input == CSS_FORMAT_RAW_8) && - ((output == V4L2_PIX_FMT_SRGGB8) || - (output == V4L2_PIX_FMT_SGRBG8) || - (output == V4L2_PIX_FMT_SBGGR8) || - (output == V4L2_PIX_FMT_SGBRG8))) - return 0; - - if ((input == CSS_FORMAT_RAW_16) && (output == V4L2_PIX_FMT_SBGGR16)) - return 0; - - return -EINVAL; -} - -static u32 get_pixel_depth(u32 pixelformat) -{ - switch (pixelformat) { - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_YVU420: - return 12; - case V4L2_PIX_FMT_YUV422P: - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_UYVY: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - case V4L2_PIX_FMT_RGB565: - case V4L2_PIX_FMT_SBGGR16: - case V4L2_PIX_FMT_SBGGR12: - case V4L2_PIX_FMT_SGBRG12: - case V4L2_PIX_FMT_SGRBG12: - case V4L2_PIX_FMT_SRGGB12: - case V4L2_PIX_FMT_SBGGR10: - case V4L2_PIX_FMT_SGBRG10: - case V4L2_PIX_FMT_SGRBG10: - case V4L2_PIX_FMT_SRGGB10: - return 16; - case V4L2_PIX_FMT_RGB24: - case V4L2_PIX_FMT_YUV444: - return 24; - case V4L2_PIX_FMT_RGB32: - return 32; - case V4L2_PIX_FMT_JPEG: - case V4L2_PIX_FMT_CUSTOM_M10MO_RAW: - case V4L2_PIX_FMT_SBGGR8: - case V4L2_PIX_FMT_SGBRG8: - case V4L2_PIX_FMT_SGRBG8: - case V4L2_PIX_FMT_SRGGB8: - return 8; - default: - return 8 * 2; /* raw type now */ - } -} - -bool atomisp_is_mbuscode_raw(uint32_t code) -{ - return code >= 0x3000 && code < 0x4000; -} - -/* - * ISP features control function - */ - -/* - * Set ISP capture mode based on current settings - */ -static void atomisp_update_capture_mode(struct atomisp_sub_device *asd) -{ - if (asd->params.gdc_cac_en) - atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_ADVANCED); - else if (asd->params.low_light) - atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_LOW_LIGHT); - else if (asd->video_out_capture.sh_fmt == CSS_FRAME_FORMAT_RAW) - atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_RAW); - else - atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_PRIMARY); -} - -#ifdef ISP2401 -int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd, - struct atomisp_s_runmode *runmode) -{ - struct atomisp_device *isp = asd->isp; - struct v4l2_ctrl *c; - struct v4l2_streamparm p = {0}; - int ret = 0; - int modes[] = { CI_MODE_NONE, - CI_MODE_VIDEO, - CI_MODE_STILL_CAPTURE, - CI_MODE_CONTINUOUS, - CI_MODE_PREVIEW }; - - if (!(runmode && (runmode->mode & RUNMODE_MASK))) - return -EINVAL; - - mutex_lock(asd->ctrl_handler.lock); - c = v4l2_ctrl_find(isp->inputs[asd->input_curr].camera->ctrl_handler, - V4L2_CID_RUN_MODE); - - if (c) - ret = v4l2_ctrl_s_ctrl(c, runmode->mode); - - mutex_unlock(asd->ctrl_handler.lock); - return ret; -} - -#endif -/* - * Function to enable/disable lens geometry distortion correction (GDC) and - * chromatic aberration correction (CAC) - */ -int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - if (flag == 0) { - *value = asd->params.gdc_cac_en; - return 0; - } - - asd->params.gdc_cac_en = !!*value; - if (asd->params.gdc_cac_en) { - atomisp_css_set_morph_table(asd, - asd->params.css_param.morph_table); - } else { - atomisp_css_set_morph_table(asd, NULL); - } - asd->params.css_update_params_needed = true; - atomisp_update_capture_mode(asd); - return 0; -} - -/* - * Function to enable/disable low light mode including ANR - */ -int atomisp_low_light(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - if (flag == 0) { - *value = asd->params.low_light; - return 0; - } - - asd->params.low_light = (*value != 0); - atomisp_update_capture_mode(asd); - return 0; -} - -/* - * Function to enable/disable extra noise reduction (XNR) in low light - * condition - */ -int atomisp_xnr(struct atomisp_sub_device *asd, int flag, - int *xnr_enable) -{ - if (flag == 0) { - *xnr_enable = asd->params.xnr_en; - return 0; - } - - atomisp_css_capture_enable_xnr(asd, !!*xnr_enable); - - return 0; -} - -/* - * Function to configure bayer noise reduction - */ -int atomisp_nr(struct atomisp_sub_device *asd, int flag, - struct atomisp_nr_config *arg) -{ - if (flag == 0) { - /* Get nr config from current setup */ - if (atomisp_css_get_nr_config(asd, arg)) - return -EINVAL; - } else { - /* Set nr config to isp parameters */ - memcpy(&asd->params.css_param.nr_config, arg, - sizeof(struct atomisp_css_nr_config)); - atomisp_css_set_nr_config(asd, &asd->params.css_param.nr_config); - asd->params.css_update_params_needed = true; - } - return 0; -} - -/* - * Function to configure temporal noise reduction (TNR) - */ -int atomisp_tnr(struct atomisp_sub_device *asd, int flag, - struct atomisp_tnr_config *config) -{ - /* Get tnr config from current setup */ - if (flag == 0) { - /* Get tnr config from current setup */ - if (atomisp_css_get_tnr_config(asd, config)) - return -EINVAL; - } else { - /* Set tnr config to isp parameters */ - memcpy(&asd->params.css_param.tnr_config, config, - sizeof(struct atomisp_css_tnr_config)); - atomisp_css_set_tnr_config(asd, &asd->params.css_param.tnr_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to configure black level compensation - */ -int atomisp_black_level(struct atomisp_sub_device *asd, int flag, - struct atomisp_ob_config *config) -{ - if (flag == 0) { - /* Get ob config from current setup */ - if (atomisp_css_get_ob_config(asd, config)) - return -EINVAL; - } else { - /* Set ob config to isp parameters */ - memcpy(&asd->params.css_param.ob_config, config, - sizeof(struct atomisp_css_ob_config)); - atomisp_css_set_ob_config(asd, &asd->params.css_param.ob_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to configure edge enhancement - */ -int atomisp_ee(struct atomisp_sub_device *asd, int flag, - struct atomisp_ee_config *config) -{ - if (flag == 0) { - /* Get ee config from current setup */ - if (atomisp_css_get_ee_config(asd, config)) - return -EINVAL; - } else { - /* Set ee config to isp parameters */ - memcpy(&asd->params.css_param.ee_config, config, - sizeof(asd->params.css_param.ee_config)); - atomisp_css_set_ee_config(asd, &asd->params.css_param.ee_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to update Gamma table for gamma, brightness and contrast config - */ -int atomisp_gamma(struct atomisp_sub_device *asd, int flag, - struct atomisp_gamma_table *config) -{ - if (flag == 0) { - /* Get gamma table from current setup */ - if (atomisp_css_get_gamma_table(asd, config)) - return -EINVAL; - } else { - /* Set gamma table to isp parameters */ - memcpy(&asd->params.css_param.gamma_table, config, - sizeof(asd->params.css_param.gamma_table)); - atomisp_css_set_gamma_table(asd, &asd->params.css_param.gamma_table); - } - - return 0; -} - -/* - * Function to update Ctc table for Chroma Enhancement - */ -int atomisp_ctc(struct atomisp_sub_device *asd, int flag, - struct atomisp_ctc_table *config) -{ - if (flag == 0) { - /* Get ctc table from current setup */ - if (atomisp_css_get_ctc_table(asd, config)) - return -EINVAL; - } else { - /* Set ctc table to isp parameters */ - memcpy(&asd->params.css_param.ctc_table, config, - sizeof(asd->params.css_param.ctc_table)); - atomisp_css_set_ctc_table(asd, &asd->params.css_param.ctc_table); - } - - return 0; -} - -/* - * Function to update gamma correction parameters - */ -int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag, - struct atomisp_gc_config *config) -{ - if (flag == 0) { - /* Get gamma correction params from current setup */ - if (atomisp_css_get_gc_config(asd, config)) - return -EINVAL; - } else { - /* Set gamma correction params to isp parameters */ - memcpy(&asd->params.css_param.gc_config, config, - sizeof(asd->params.css_param.gc_config)); - atomisp_css_set_gc_config(asd, &asd->params.css_param.gc_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to update narrow gamma flag - */ -int atomisp_formats(struct atomisp_sub_device *asd, int flag, - struct atomisp_formats_config *config) -{ - if (flag == 0) { - /* Get narrow gamma flag from current setup */ - if (atomisp_css_get_formats_config(asd, config)) - return -EINVAL; - } else { - /* Set narrow gamma flag to isp parameters */ - memcpy(&asd->params.css_param.formats_config, config, - sizeof(asd->params.css_param.formats_config)); - atomisp_css_set_formats_config(asd, &asd->params.css_param.formats_config); - } - - return 0; -} - -void atomisp_free_internal_buffers(struct atomisp_sub_device *asd) -{ - atomisp_free_css_parameters(&asd->params.css_param); - - if (asd->raw_output_frame) { - atomisp_css_frame_free(asd->raw_output_frame); - asd->raw_output_frame = NULL; - } -} - -static void atomisp_update_grid_info(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, - int source_pad) -{ - struct atomisp_device *isp = asd->isp; - int err; - uint16_t stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); - - if (atomisp_css_get_grid_info(asd, pipe_id, source_pad)) - return; - - /* We must free all buffers because they no longer match - the grid size. */ - atomisp_css_free_stat_buffers(asd); - - err = atomisp_alloc_css_stat_bufs(asd, stream_id); - if (err) { - dev_err(isp->dev, "stat_buf allocate error\n"); - goto err; - } - - if (atomisp_alloc_3a_output_buf(asd)) { - /* Failure for 3A buffers does not influence DIS buffers */ - if (asd->params.s3a_output_bytes != 0) { - /* For SOC sensor happens s3a_output_bytes == 0, - * using if condition to exclude false error log */ - dev_err(isp->dev, "Failed to allocate memory for 3A statistics\n"); - } - goto err; - } - - if (atomisp_alloc_dis_coef_buf(asd)) { - dev_err(isp->dev, - "Failed to allocate memory for DIS statistics\n"); - goto err; - } - - if (atomisp_alloc_metadata_output_buf(asd)) { - dev_err(isp->dev, "Failed to allocate memory for metadata\n"); - goto err; - } - - return; - -err: - atomisp_css_free_stat_buffers(asd); - return; -} - -static void atomisp_curr_user_grid_info(struct atomisp_sub_device *asd, - struct atomisp_grid_info *info) -{ - memcpy(info, &asd->params.curr_grid_info.s3a_grid, - sizeof(struct atomisp_css_3a_grid_info)); -} - -int atomisp_compare_grid(struct atomisp_sub_device *asd, - struct atomisp_grid_info *atomgrid) -{ - struct atomisp_grid_info tmp = {0}; - - atomisp_curr_user_grid_info(asd, &tmp); - return memcmp(atomgrid, &tmp, sizeof(tmp)); -} - -/* - * Function to update Gdc table for gdc - */ -int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag, - struct atomisp_morph_table *config) -{ - int ret; - int i; - struct atomisp_device *isp = asd->isp; - - if (flag == 0) { - /* Get gdc table from current setup */ - struct atomisp_css_morph_table tab = {0}; - atomisp_css_get_morph_table(asd, &tab); - - config->width = tab.width; - config->height = tab.height; - - for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) { - ret = copy_to_user(config->coordinates_x[i], - tab.coordinates_x[i], tab.height * - tab.width * sizeof(*tab.coordinates_x[i])); - if (ret) { - dev_err(isp->dev, - "Failed to copy to User for x\n"); - return -EFAULT; - } - ret = copy_to_user(config->coordinates_y[i], - tab.coordinates_y[i], tab.height * - tab.width * sizeof(*tab.coordinates_y[i])); - if (ret) { - dev_err(isp->dev, - "Failed to copy to User for y\n"); - return -EFAULT; - } - } - } else { - struct atomisp_css_morph_table *tab = - asd->params.css_param.morph_table; - - /* free first if we have one */ - if (tab) { - atomisp_css_morph_table_free(tab); - asd->params.css_param.morph_table = NULL; - } - - /* allocate new one */ - tab = atomisp_css_morph_table_allocate(config->width, - config->height); - - if (!tab) { - dev_err(isp->dev, "out of memory\n"); - return -EINVAL; - } - - for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) { - ret = copy_from_user(tab->coordinates_x[i], - config->coordinates_x[i], - config->height * config->width * - sizeof(*config->coordinates_x[i])); - if (ret) { - dev_err(isp->dev, - "Failed to copy from User for x, ret %d\n", - ret); - atomisp_css_morph_table_free(tab); - return -EFAULT; - } - ret = copy_from_user(tab->coordinates_y[i], - config->coordinates_y[i], - config->height * config->width * - sizeof(*config->coordinates_y[i])); - if (ret) { - dev_err(isp->dev, - "Failed to copy from User for y, ret is %d\n", - ret); - atomisp_css_morph_table_free(tab); - return -EFAULT; - } - } - asd->params.css_param.morph_table = tab; - if (asd->params.gdc_cac_en) - atomisp_css_set_morph_table(asd, tab); - } - - return 0; -} - -int atomisp_macc_table(struct atomisp_sub_device *asd, int flag, - struct atomisp_macc_config *config) -{ - struct atomisp_css_macc_table *macc_table; - - switch (config->color_effect) { - case V4L2_COLORFX_NONE: - macc_table = &asd->params.css_param.macc_table; - break; - case V4L2_COLORFX_SKY_BLUE: - macc_table = &blue_macc_table; - break; - case V4L2_COLORFX_GRASS_GREEN: - macc_table = &green_macc_table; - break; - case V4L2_COLORFX_SKIN_WHITEN_LOW: - macc_table = &skin_low_macc_table; - break; - case V4L2_COLORFX_SKIN_WHITEN: - macc_table = &skin_medium_macc_table; - break; - case V4L2_COLORFX_SKIN_WHITEN_HIGH: - macc_table = &skin_high_macc_table; - break; - default: - return -EINVAL; - } - - if (flag == 0) { - /* Get macc table from current setup */ - memcpy(&config->table, macc_table, - sizeof(struct atomisp_css_macc_table)); - } else { - memcpy(macc_table, &config->table, - sizeof(struct atomisp_css_macc_table)); - if (config->color_effect == asd->params.color_effect) - atomisp_css_set_macc_table(asd, macc_table); - } - - return 0; -} - -int atomisp_set_dis_vector(struct atomisp_sub_device *asd, - struct atomisp_dis_vector *vector) -{ - atomisp_css_video_set_dis_vector(asd, vector); - - asd->params.dis_proj_data_valid = false; - asd->params.css_update_params_needed = true; - return 0; -} - -/* - * Function to set/get image stablization statistics - */ -int atomisp_get_dis_stat(struct atomisp_sub_device *asd, - struct atomisp_dis_statistics *stats) -{ - return atomisp_css_get_dis_stat(asd, stats); -} - -/* - * Function set camrea_prefiles.xml current sensor pixel array size - */ -int atomisp_set_array_res(struct atomisp_sub_device *asd, - struct atomisp_resolution *config) -{ - dev_dbg(asd->isp->dev, ">%s start\n", __func__); - if (!config) { - dev_err(asd->isp->dev, "Set sensor array size is not valid\n"); - return -EINVAL; - } - - asd->sensor_array_res.width = config->width; - asd->sensor_array_res.height = config->height; - return 0; -} - -/* - * Function to get DVS2 BQ resolution settings - */ -int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd, - struct atomisp_dvs2_bq_resolutions *bq_res) -{ - struct ia_css_pipe_config *pipe_cfg = NULL; - struct ia_css_stream_config *stream_cfg = NULL; - struct ia_css_stream_input_config *input_config = NULL; - - struct ia_css_stream *stream = - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream; - if (!stream) { - dev_warn(asd->isp->dev, "stream is not created"); - return -EAGAIN; - } - - pipe_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[CSS_PIPE_ID_VIDEO]; - stream_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config; - input_config = &stream_cfg->input_config; - - if (!bq_res) - return -EINVAL; - - /* the GDC output resolution */ - bq_res->output_bq.width_bq = pipe_cfg->output_info[0].res.width / 2; - bq_res->output_bq.height_bq = pipe_cfg->output_info[0].res.height / 2; - - bq_res->envelope_bq.width_bq = 0; - bq_res->envelope_bq.height_bq = 0; - /* the GDC input resolution */ - if (!asd->continuous_mode->val) { - bq_res->source_bq.width_bq = bq_res->output_bq.width_bq + - pipe_cfg->dvs_envelope.width / 2; - bq_res->source_bq.height_bq = bq_res->output_bq.height_bq + - pipe_cfg->dvs_envelope.height / 2; - /* - * Bad pixels caused by spatial filter processing - * ISP filter resolution should be given by CSS/FW, but for now - * there is not such API to query, and it is fixed value, so - * hardcoded here. - */ - bq_res->ispfilter_bq.width_bq = 12 / 2; - bq_res->ispfilter_bq.height_bq = 12 / 2; - /* spatial filter shift, always 4 pixels */ - bq_res->gdc_shift_bq.width_bq = 4 / 2; - bq_res->gdc_shift_bq.height_bq = 4 / 2; - - if (asd->params.video_dis_en) { - bq_res->envelope_bq.width_bq = pipe_cfg->dvs_envelope.width - / 2 - bq_res->ispfilter_bq.width_bq; - bq_res->envelope_bq.height_bq = pipe_cfg->dvs_envelope.height - / 2 - bq_res->ispfilter_bq.height_bq; - } - } else { - unsigned int w_padding; - unsigned int gdc_effective_input = 0; - - /* For GDC: - * gdc_effective_input = effective_input + envelope - * - * From the comment and formula in BZ1786, - * we see the source_bq should be: - * effective_input / bayer_ds_ratio - */ - bq_res->source_bq.width_bq = - (input_config->effective_res.width * - pipe_cfg->bayer_ds_out_res.width / - input_config->effective_res.width + 1) / 2; - bq_res->source_bq.height_bq = - (input_config->effective_res.height * - pipe_cfg->bayer_ds_out_res.height / - input_config->effective_res.height + 1) / 2; - - - if (!asd->params.video_dis_en) { - /* - * We adjust the ispfilter_bq to: - * ispfilter_bq = 128/BDS - * we still need firmware team to provide an offical - * formula for SDV. - */ - bq_res->ispfilter_bq.width_bq = 128 * - pipe_cfg->bayer_ds_out_res.width / - input_config->effective_res.width / 2; - bq_res->ispfilter_bq.height_bq = 128 * - pipe_cfg->bayer_ds_out_res.width / - input_config->effective_res.width / 2; - - if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) { - /* No additional left padding for ISYS2401 */ - bq_res->gdc_shift_bq.width_bq = 4 / 2; - bq_res->gdc_shift_bq.height_bq = 4 / 2; - } else { - /* - * For the w_padding and gdc_shift_bq cacluation - * Please see the BZ 1786 and 4358 for more info. - * Just test that this formula can work now, - * but we still have no offical formula. - * - * w_padding = ceiling(gdc_effective_input - * /128, 1) * 128 - effective_width - * gdc_shift_bq = w_padding/BDS/2 + ispfilter_bq/2 - */ - gdc_effective_input = - input_config->effective_res.width + - pipe_cfg->dvs_envelope.width; - w_padding = roundup(gdc_effective_input, 128) - - input_config->effective_res.width; - w_padding = w_padding * - pipe_cfg->bayer_ds_out_res.width / - input_config->effective_res.width + 1; - w_padding = roundup(w_padding/2, 1); - - bq_res->gdc_shift_bq.width_bq = bq_res->ispfilter_bq.width_bq / 2 - + w_padding; - bq_res->gdc_shift_bq.height_bq = 4 / 2; - } - } else { - unsigned int dvs_w, dvs_h, dvs_w_max, dvs_h_max; - - bq_res->ispfilter_bq.width_bq = 8 / 2; - bq_res->ispfilter_bq.height_bq = 8 / 2; - - if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) { - /* No additional left padding for ISYS2401 */ - bq_res->gdc_shift_bq.width_bq = 4 / 2; - bq_res->gdc_shift_bq.height_bq = 4 / 2; - } else { - w_padding = - roundup(input_config->effective_res.width, 128) - - input_config->effective_res.width; - if (w_padding < 12) - w_padding = 12; - bq_res->gdc_shift_bq.width_bq = 4 / 2 + - ((w_padding - 12) * - pipe_cfg->bayer_ds_out_res.width / - input_config->effective_res.width + 1) / 2; - bq_res->gdc_shift_bq.height_bq = 4 / 2; - } - - dvs_w = pipe_cfg->bayer_ds_out_res.width - - pipe_cfg->output_info[0].res.width; - dvs_h = pipe_cfg->bayer_ds_out_res.height - - pipe_cfg->output_info[0].res.height; - dvs_w_max = rounddown( - pipe_cfg->output_info[0].res.width / 5, - ATOM_ISP_STEP_WIDTH); - dvs_h_max = rounddown( - pipe_cfg->output_info[0].res.height / 5, - ATOM_ISP_STEP_HEIGHT); - bq_res->envelope_bq.width_bq = - min((dvs_w / 2), (dvs_w_max / 2)) - - bq_res->ispfilter_bq.width_bq; - bq_res->envelope_bq.height_bq = - min((dvs_h / 2), (dvs_h_max / 2)) - - bq_res->ispfilter_bq.height_bq; - } - } - - dev_dbg(asd->isp->dev, "source_bq.width_bq %d, source_bq.height_bq %d,\nispfilter_bq.width_bq %d, ispfilter_bq.height_bq %d,\ngdc_shift_bq.width_bq %d, gdc_shift_bq.height_bq %d,\nenvelope_bq.width_bq %d, envelope_bq.height_bq %d,\noutput_bq.width_bq %d, output_bq.height_bq %d\n", - bq_res->source_bq.width_bq, bq_res->source_bq.height_bq, - bq_res->ispfilter_bq.width_bq, bq_res->ispfilter_bq.height_bq, - bq_res->gdc_shift_bq.width_bq, bq_res->gdc_shift_bq.height_bq, - bq_res->envelope_bq.width_bq, bq_res->envelope_bq.height_bq, - bq_res->output_bq.width_bq, bq_res->output_bq.height_bq); - - return 0; -} - -int atomisp_set_dis_coefs(struct atomisp_sub_device *asd, - struct atomisp_dis_coefficients *coefs) -{ - return atomisp_css_set_dis_coefs(asd, coefs); -} - -/* - * Function to set/get 3A stat from isp - */ -int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag, - struct atomisp_3a_statistics *config) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_s3a_buf *s3a_buf; - unsigned long ret; - - if (flag != 0) - return -EINVAL; - - /* sanity check to avoid writing into unallocated memory. */ - if (asd->params.s3a_output_bytes == 0) - return -EINVAL; - - if (atomisp_compare_grid(asd, &config->grid_info) != 0) { - /* If the grid info in the argument differs from the current - grid info, we tell the caller to reset the grid size and - try again. */ - return -EAGAIN; - } - - if (list_empty(&asd->s3a_stats_ready)) { - dev_err(isp->dev, "3a statistics is not valid.\n"); - return -EAGAIN; - } - - s3a_buf = list_entry(asd->s3a_stats_ready.next, - struct atomisp_s3a_buf, list); - if (s3a_buf->s3a_map) - ia_css_translate_3a_statistics( - asd->params.s3a_user_stat, s3a_buf->s3a_map); - else - ia_css_get_3a_statistics(asd->params.s3a_user_stat, - s3a_buf->s3a_data); - - config->exp_id = s3a_buf->s3a_data->exp_id; - config->isp_config_id = s3a_buf->s3a_data->isp_config_id; - - ret = copy_to_user(config->data, asd->params.s3a_user_stat->data, - asd->params.s3a_output_bytes); - if (ret) { - dev_err(isp->dev, "copy to user failed: copied %lu bytes\n", - ret); - return -EFAULT; - } - - /* Move to free buffer list */ - list_del_init(&s3a_buf->list); - list_add_tail(&s3a_buf->list, &asd->s3a_stats); - dev_dbg(isp->dev, "%s: finish getting exp_id %d 3a stat, isp_config_id %d\n", __func__, - config->exp_id, config->isp_config_id); - return 0; -} - -int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag, - struct atomisp_metadata *md) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_stream_config *stream_config; - struct ia_css_stream_info *stream_info; - struct camera_mipi_info *mipi_info; - struct atomisp_metadata_buf *md_buf; - enum atomisp_metadata_type md_type = ATOMISP_MAIN_METADATA; - int ret, i; - - if (flag != 0) - return -EINVAL; - - stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_config; - stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_info; - - /* We always return the resolution and stride even if there is - * no valid metadata. This allows the caller to get the information - * needed to allocate user-space buffers. */ - md->width = stream_info->metadata_info.resolution.width; - md->height = stream_info->metadata_info.resolution.height; - md->stride = stream_info->metadata_info.stride; - - /* sanity check to avoid writing into unallocated memory. - * This does not return an error because it is a valid way - * for applications to detect that metadata is not enabled. */ - if (md->width == 0 || md->height == 0 || !md->data) - return 0; - - /* This is done in the atomisp_buf_done() */ - if (list_empty(&asd->metadata_ready[md_type])) { - dev_warn(isp->dev, "Metadata queue is empty now!\n"); - return -EAGAIN; - } - - mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - if (mipi_info == NULL) - return -EINVAL; - - if (mipi_info->metadata_effective_width != NULL) { - for (i = 0; i < md->height; i++) - md->effective_width[i] = - mipi_info->metadata_effective_width[i]; - } - - md_buf = list_entry(asd->metadata_ready[md_type].next, - struct atomisp_metadata_buf, list); - md->exp_id = md_buf->metadata->exp_id; - if (md_buf->md_vptr) { - ret = copy_to_user(md->data, - md_buf->md_vptr, - stream_info->metadata_info.size); - } else { - hmm_load(md_buf->metadata->address, - asd->params.metadata_user[md_type], - stream_info->metadata_info.size); - - ret = copy_to_user(md->data, - asd->params.metadata_user[md_type], - stream_info->metadata_info.size); - } - if (ret) { - dev_err(isp->dev, "copy to user failed: copied %d bytes\n", - ret); - return -EFAULT; - } - - list_del_init(&md_buf->list); - list_add_tail(&md_buf->list, &asd->metadata[md_type]); - - dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n", - __func__, md_type, md->exp_id); - return 0; -} - -int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag, - struct atomisp_metadata_with_type *md) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_stream_config *stream_config; - struct ia_css_stream_info *stream_info; - struct camera_mipi_info *mipi_info; - struct atomisp_metadata_buf *md_buf; - enum atomisp_metadata_type md_type; - int ret, i; - - if (flag != 0) - return -EINVAL; - - stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_config; - stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_info; - - /* We always return the resolution and stride even if there is - * no valid metadata. This allows the caller to get the information - * needed to allocate user-space buffers. */ - md->width = stream_info->metadata_info.resolution.width; - md->height = stream_info->metadata_info.resolution.height; - md->stride = stream_info->metadata_info.stride; - - /* sanity check to avoid writing into unallocated memory. - * This does not return an error because it is a valid way - * for applications to detect that metadata is not enabled. */ - if (md->width == 0 || md->height == 0 || !md->data) - return 0; - - md_type = md->type; - if (md_type < 0 || md_type >= ATOMISP_METADATA_TYPE_NUM) - return -EINVAL; - - /* This is done in the atomisp_buf_done() */ - if (list_empty(&asd->metadata_ready[md_type])) { - dev_warn(isp->dev, "Metadata queue is empty now!\n"); - return -EAGAIN; - } - - mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - if (mipi_info == NULL) - return -EINVAL; - - if (mipi_info->metadata_effective_width != NULL) { - for (i = 0; i < md->height; i++) - md->effective_width[i] = - mipi_info->metadata_effective_width[i]; - } - - md_buf = list_entry(asd->metadata_ready[md_type].next, - struct atomisp_metadata_buf, list); - md->exp_id = md_buf->metadata->exp_id; - if (md_buf->md_vptr) { - ret = copy_to_user(md->data, - md_buf->md_vptr, - stream_info->metadata_info.size); - } else { - hmm_load(md_buf->metadata->address, - asd->params.metadata_user[md_type], - stream_info->metadata_info.size); - - ret = copy_to_user(md->data, - asd->params.metadata_user[md_type], - stream_info->metadata_info.size); - } - if (ret) { - dev_err(isp->dev, "copy to user failed: copied %d bytes\n", - ret); - return -EFAULT; - } else { - list_del_init(&md_buf->list); - list_add_tail(&md_buf->list, &asd->metadata[md_type]); - } - dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n", - __func__, md_type, md->exp_id); - return 0; -} - -/* - * Function to calculate real zoom region for every pipe - */ -int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd, - struct ia_css_dz_config *dz_config, - enum atomisp_css_pipe_id css_pipe_id) - -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct atomisp_resolution eff_res, out_res; -#ifdef ISP2401 - int w_offset, h_offset; -#endif - - memset(&eff_res, 0, sizeof(eff_res)); - memset(&out_res, 0, sizeof(out_res)); - - if (dz_config->dx || dz_config->dy) - return 0; - - if (css_pipe_id != IA_CSS_PIPE_ID_PREVIEW - && css_pipe_id != IA_CSS_PIPE_ID_CAPTURE) { - dev_err(asd->isp->dev, "%s the set pipe no support crop region" - , __func__); - return -EINVAL; - } - - eff_res.width = - stream_env->stream_config.input_config.effective_res.width; - eff_res.height = - stream_env->stream_config.input_config.effective_res.height; - if (eff_res.width == 0 || eff_res.height == 0) { - dev_err(asd->isp->dev, "%s err effective resolution" - , __func__); - return -EINVAL; - } - - if (dz_config->zoom_region.resolution.width - == asd->sensor_array_res.width - || dz_config->zoom_region.resolution.height - == asd->sensor_array_res.height) { - /*no need crop region*/ - dz_config->zoom_region.origin.x = 0; - dz_config->zoom_region.origin.y = 0; - dz_config->zoom_region.resolution.width = eff_res.width; - dz_config->zoom_region.resolution.height = eff_res.height; - return 0; - } - - /* FIXME: - * This is not the correct implementation with Google's definition, due - * to firmware limitation. - * map real crop region base on above calculating base max crop region. - */ -#ifdef ISP2401 - out_res.width = - stream_env->pipe_configs[css_pipe_id].output_info[0].res.width; - out_res.height = - stream_env->pipe_configs[css_pipe_id].output_info[0].res.height; - if (out_res.width == 0 || out_res.height == 0) { - dev_err(asd->isp->dev, "%s err current pipe output resolution" - , __func__); - return -EINVAL; - } - - if (asd->sensor_array_res.width * out_res.height - < out_res.width * asd->sensor_array_res.height) { - h_offset = asd->sensor_array_res.height - - asd->sensor_array_res.width - * out_res.height / out_res.width; - h_offset = h_offset / 2; - if (dz_config->zoom_region.origin.y < h_offset) - dz_config->zoom_region.origin.y = 0; - else - dz_config->zoom_region.origin.y = - dz_config->zoom_region.origin.y - h_offset; - w_offset = 0; - } else { - w_offset = asd->sensor_array_res.width - - asd->sensor_array_res.height - * out_res.width / out_res.height; - w_offset = w_offset / 2; - if (dz_config->zoom_region.origin.x < w_offset) - dz_config->zoom_region.origin.x = 0; - else - dz_config->zoom_region.origin.x = - dz_config->zoom_region.origin.x - w_offset; - h_offset = 0; - } -#endif - dz_config->zoom_region.origin.x = - dz_config->zoom_region.origin.x - * eff_res.width -#ifndef ISP2401 - / asd->sensor_array_res.width; -#else - / (asd->sensor_array_res.width - - 2 * w_offset); -#endif - dz_config->zoom_region.origin.y = - dz_config->zoom_region.origin.y - * eff_res.height -#ifndef ISP2401 - / asd->sensor_array_res.height; -#else - / (asd->sensor_array_res.height - - 2 * h_offset); -#endif - dz_config->zoom_region.resolution.width = - dz_config->zoom_region.resolution.width - * eff_res.width -#ifndef ISP2401 - / asd->sensor_array_res.width; -#else - / (asd->sensor_array_res.width - - 2 * w_offset); -#endif - dz_config->zoom_region.resolution.height = - dz_config->zoom_region.resolution.height - * eff_res.height -#ifndef ISP2401 - / asd->sensor_array_res.height; -#else - / (asd->sensor_array_res.height - - 2 * h_offset); -#endif - - /* - * Set same ratio of crop region resolution and current pipe output - * resolution - */ -#ifndef ISP2401 - out_res.width = - stream_env->pipe_configs[css_pipe_id].output_info[0].res.width; - out_res.height = - stream_env->pipe_configs[css_pipe_id].output_info[0].res.height; - if (out_res.width == 0 || out_res.height == 0) { - dev_err(asd->isp->dev, "%s err current pipe output resolution" - , __func__); - return -EINVAL; - } - -#endif - if (out_res.width * dz_config->zoom_region.resolution.height - > dz_config->zoom_region.resolution.width * out_res.height) { - dz_config->zoom_region.resolution.height = - dz_config->zoom_region.resolution.width - * out_res.height / out_res.width; - } else { - dz_config->zoom_region.resolution.width = - dz_config->zoom_region.resolution.height - * out_res.width / out_res.height; - } - dev_dbg(asd->isp->dev, "%s crop region:(%d,%d),(%d,%d) eff_res(%d, %d) array_size(%d,%d) out_res(%d, %d)\n", - __func__, dz_config->zoom_region.origin.x, - dz_config->zoom_region.origin.y, - dz_config->zoom_region.resolution.width, - dz_config->zoom_region.resolution.height, - eff_res.width, eff_res.height, - asd->sensor_array_res.width, - asd->sensor_array_res.height, - out_res.width, out_res.height); - - - if ((dz_config->zoom_region.origin.x + - dz_config->zoom_region.resolution.width - > eff_res.width) || - (dz_config->zoom_region.origin.y + - dz_config->zoom_region.resolution.height - > eff_res.height)) - return -EINVAL; - - return 0; -} - - -/* - * Function to check the zoom region whether is effective - */ -static bool atomisp_check_zoom_region( - struct atomisp_sub_device *asd, - struct ia_css_dz_config *dz_config) -{ - struct atomisp_resolution config; - bool flag = false; - unsigned int w , h; - - memset(&config, 0, sizeof(struct atomisp_resolution)); - - if (dz_config->dx && dz_config->dy) - return true; - - config.width = asd->sensor_array_res.width; - config.height = asd->sensor_array_res.height; - w = dz_config->zoom_region.origin.x + - dz_config->zoom_region.resolution.width; - h = dz_config->zoom_region.origin.y + - dz_config->zoom_region.resolution.height; - - if ((w <= config.width) && (h <= config.height) && w > 0 && h > 0) - flag = true; - else - /* setting error zoom region */ - dev_err(asd->isp->dev, "%s zoom region ERROR:dz_config:(%d,%d),(%d,%d)array_res(%d, %d)\n", - __func__, dz_config->zoom_region.origin.x, - dz_config->zoom_region.origin.y, - dz_config->zoom_region.resolution.width, - dz_config->zoom_region.resolution.height, - config.width, config.height); - - return flag; -} - -void atomisp_apply_css_parameters( - struct atomisp_sub_device *asd, - struct atomisp_css_params *css_param) -{ - if (css_param->update_flag.wb_config) - atomisp_css_set_wb_config(asd, &css_param->wb_config); - - if (css_param->update_flag.ob_config) - atomisp_css_set_ob_config(asd, &css_param->ob_config); - - if (css_param->update_flag.dp_config) - atomisp_css_set_dp_config(asd, &css_param->dp_config); - - if (css_param->update_flag.nr_config) - atomisp_css_set_nr_config(asd, &css_param->nr_config); - - if (css_param->update_flag.ee_config) - atomisp_css_set_ee_config(asd, &css_param->ee_config); - - if (css_param->update_flag.tnr_config) - atomisp_css_set_tnr_config(asd, &css_param->tnr_config); - - if (css_param->update_flag.a3a_config) - atomisp_css_set_3a_config(asd, &css_param->s3a_config); - - if (css_param->update_flag.ctc_config) - atomisp_css_set_ctc_config(asd, &css_param->ctc_config); - - if (css_param->update_flag.cnr_config) - atomisp_css_set_cnr_config(asd, &css_param->cnr_config); - - if (css_param->update_flag.ecd_config) - atomisp_css_set_ecd_config(asd, &css_param->ecd_config); - - if (css_param->update_flag.ynr_config) - atomisp_css_set_ynr_config(asd, &css_param->ynr_config); - - if (css_param->update_flag.fc_config) - atomisp_css_set_fc_config(asd, &css_param->fc_config); - - if (css_param->update_flag.macc_config) - atomisp_css_set_macc_config(asd, &css_param->macc_config); - - if (css_param->update_flag.aa_config) - atomisp_css_set_aa_config(asd, &css_param->aa_config); - - if (css_param->update_flag.anr_config) - atomisp_css_set_anr_config(asd, &css_param->anr_config); - - if (css_param->update_flag.xnr_config) - atomisp_css_set_xnr_config(asd, &css_param->xnr_config); - - if (css_param->update_flag.yuv2rgb_cc_config) - atomisp_css_set_yuv2rgb_cc_config(asd, - &css_param->yuv2rgb_cc_config); - - if (css_param->update_flag.rgb2yuv_cc_config) - atomisp_css_set_rgb2yuv_cc_config(asd, - &css_param->rgb2yuv_cc_config); - - if (css_param->update_flag.macc_table) - atomisp_css_set_macc_table(asd, &css_param->macc_table); - - if (css_param->update_flag.xnr_table) - atomisp_css_set_xnr_table(asd, &css_param->xnr_table); - - if (css_param->update_flag.r_gamma_table) - atomisp_css_set_r_gamma_table(asd, &css_param->r_gamma_table); - - if (css_param->update_flag.g_gamma_table) - atomisp_css_set_g_gamma_table(asd, &css_param->g_gamma_table); - - if (css_param->update_flag.b_gamma_table) - atomisp_css_set_b_gamma_table(asd, &css_param->b_gamma_table); - - if (css_param->update_flag.anr_thres) - atomisp_css_set_anr_thres(asd, &css_param->anr_thres); - - if (css_param->update_flag.shading_table) - atomisp_css_set_shading_table(asd, css_param->shading_table); - - if (css_param->update_flag.morph_table && asd->params.gdc_cac_en) - atomisp_css_set_morph_table(asd, css_param->morph_table); - - if (css_param->update_flag.dvs2_coefs) { - struct atomisp_css_dvs_grid_info *dvs_grid_info = - atomisp_css_get_dvs_grid_info( - &asd->params.curr_grid_info); - - if (dvs_grid_info && dvs_grid_info->enable) - atomisp_css_set_dvs2_coefs(asd, css_param->dvs2_coeff); - } - - if (css_param->update_flag.dvs_6axis_config) - atomisp_css_set_dvs_6axis(asd, css_param->dvs_6axis); - - atomisp_css_set_isp_config_id(asd, css_param->isp_config_id); - /* - * These configurations are on used by ISP1.x, not for ISP2.x, - * so do not handle them. see comments of ia_css_isp_config. - * 1 cc_config - * 2 ce_config - * 3 de_config - * 4 gc_config - * 5 gamma_table - * 6 ctc_table - * 7 dvs_coefs - */ -} - -static unsigned int long copy_from_compatible(void *to, const void *from, - unsigned long n, bool from_user) -{ - if (from_user) - return copy_from_user(to, (void __user *)from, n); - else - memcpy(to, from, n); - return 0; -} - -int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd, - struct atomisp_parameters *arg, - struct atomisp_css_params *css_param, - bool from_user) -{ - struct atomisp_parameters *cur_config = &css_param->update_flag; - - if (!arg || !asd || !css_param) - return -EINVAL; - - if (arg->wb_config && (from_user || !cur_config->wb_config)) { - if (copy_from_compatible(&css_param->wb_config, arg->wb_config, - sizeof(struct atomisp_css_wb_config), - from_user)) - return -EFAULT; - css_param->update_flag.wb_config = - (struct atomisp_wb_config *) &css_param->wb_config; - } - - if (arg->ob_config && (from_user || !cur_config->ob_config)) { - if (copy_from_compatible(&css_param->ob_config, arg->ob_config, - sizeof(struct atomisp_css_ob_config), - from_user)) - return -EFAULT; - css_param->update_flag.ob_config = - (struct atomisp_ob_config *) &css_param->ob_config; - } - - if (arg->dp_config && (from_user || !cur_config->dp_config)) { - if (copy_from_compatible(&css_param->dp_config, arg->dp_config, - sizeof(struct atomisp_css_dp_config), - from_user)) - return -EFAULT; - css_param->update_flag.dp_config = - (struct atomisp_dp_config *) &css_param->dp_config; - } - - if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) { - if (arg->dz_config && (from_user || !cur_config->dz_config)) { - if (copy_from_compatible(&css_param->dz_config, - arg->dz_config, - sizeof(struct atomisp_css_dz_config), - from_user)) - return -EFAULT; - if (!atomisp_check_zoom_region(asd, - &css_param->dz_config)) { - dev_err(asd->isp->dev, "crop region error!"); - return -EINVAL; - } - css_param->update_flag.dz_config = - (struct atomisp_dz_config *) - &css_param->dz_config; - } - } - - if (arg->nr_config && (from_user || !cur_config->nr_config)) { - if (copy_from_compatible(&css_param->nr_config, arg->nr_config, - sizeof(struct atomisp_css_nr_config), - from_user)) - return -EFAULT; - css_param->update_flag.nr_config = - (struct atomisp_nr_config *) &css_param->nr_config; - } - - if (arg->ee_config && (from_user || !cur_config->ee_config)) { - if (copy_from_compatible(&css_param->ee_config, arg->ee_config, - sizeof(struct atomisp_css_ee_config), - from_user)) - return -EFAULT; - css_param->update_flag.ee_config = - (struct atomisp_ee_config *) &css_param->ee_config; - } - - if (arg->tnr_config && (from_user || !cur_config->tnr_config)) { - if (copy_from_compatible(&css_param->tnr_config, - arg->tnr_config, - sizeof(struct atomisp_css_tnr_config), - from_user)) - return -EFAULT; - css_param->update_flag.tnr_config = - (struct atomisp_tnr_config *) - &css_param->tnr_config; - } - - if (arg->a3a_config && (from_user || !cur_config->a3a_config)) { - if (copy_from_compatible(&css_param->s3a_config, - arg->a3a_config, - sizeof(struct atomisp_css_3a_config), - from_user)) - return -EFAULT; - css_param->update_flag.a3a_config = - (struct atomisp_3a_config *) &css_param->s3a_config; - } - - if (arg->ctc_config && (from_user || !cur_config->ctc_config)) { - if (copy_from_compatible(&css_param->ctc_config, - arg->ctc_config, - sizeof(struct atomisp_css_ctc_config), - from_user)) - return -EFAULT; - css_param->update_flag.ctc_config = - (struct atomisp_ctc_config *) - &css_param->ctc_config; - } - - if (arg->cnr_config && (from_user || !cur_config->cnr_config)) { - if (copy_from_compatible(&css_param->cnr_config, - arg->cnr_config, - sizeof(struct atomisp_css_cnr_config), - from_user)) - return -EFAULT; - css_param->update_flag.cnr_config = - (struct atomisp_cnr_config *) - &css_param->cnr_config; - } - - if (arg->ecd_config && (from_user || !cur_config->ecd_config)) { - if (copy_from_compatible(&css_param->ecd_config, - arg->ecd_config, - sizeof(struct atomisp_css_ecd_config), - from_user)) - return -EFAULT; - css_param->update_flag.ecd_config = - (struct atomisp_ecd_config *) - &css_param->ecd_config; - } - - if (arg->ynr_config && (from_user || !cur_config->ynr_config)) { - if (copy_from_compatible(&css_param->ynr_config, - arg->ynr_config, - sizeof(struct atomisp_css_ynr_config), - from_user)) - return -EFAULT; - css_param->update_flag.ynr_config = - (struct atomisp_ynr_config *) - &css_param->ynr_config; - } - - if (arg->fc_config && (from_user || !cur_config->fc_config)) { - if (copy_from_compatible(&css_param->fc_config, - arg->fc_config, - sizeof(struct atomisp_css_fc_config), - from_user)) - return -EFAULT; - css_param->update_flag.fc_config = - (struct atomisp_fc_config *) &css_param->fc_config; - } - - if (arg->macc_config && (from_user || !cur_config->macc_config)) { - if (copy_from_compatible(&css_param->macc_config, - arg->macc_config, - sizeof(struct atomisp_css_macc_config), - from_user)) - return -EFAULT; - css_param->update_flag.macc_config = - (struct atomisp_macc_config *) - &css_param->macc_config; - } - - if (arg->aa_config && (from_user || !cur_config->aa_config)) { - if (copy_from_compatible(&css_param->aa_config, arg->aa_config, - sizeof(struct atomisp_css_aa_config), - from_user)) - return -EFAULT; - css_param->update_flag.aa_config = - (struct atomisp_aa_config *) &css_param->aa_config; - } - - if (arg->anr_config && (from_user || !cur_config->anr_config)) { - if (copy_from_compatible(&css_param->anr_config, - arg->anr_config, - sizeof(struct atomisp_css_anr_config), - from_user)) - return -EFAULT; - css_param->update_flag.anr_config = - (struct atomisp_anr_config *) - &css_param->anr_config; - } - - if (arg->xnr_config && (from_user || !cur_config->xnr_config)) { - if (copy_from_compatible(&css_param->xnr_config, - arg->xnr_config, - sizeof(struct atomisp_css_xnr_config), - from_user)) - return -EFAULT; - css_param->update_flag.xnr_config = - (struct atomisp_xnr_config *) - &css_param->xnr_config; - } - - if (arg->yuv2rgb_cc_config && - (from_user || !cur_config->yuv2rgb_cc_config)) { - if (copy_from_compatible(&css_param->yuv2rgb_cc_config, - arg->yuv2rgb_cc_config, - sizeof(struct atomisp_css_cc_config), - from_user)) - return -EFAULT; - css_param->update_flag.yuv2rgb_cc_config = - (struct atomisp_cc_config *) - &css_param->yuv2rgb_cc_config; - } - - if (arg->rgb2yuv_cc_config && - (from_user || !cur_config->rgb2yuv_cc_config)) { - if (copy_from_compatible(&css_param->rgb2yuv_cc_config, - arg->rgb2yuv_cc_config, - sizeof(struct atomisp_css_cc_config), - from_user)) - return -EFAULT; - css_param->update_flag.rgb2yuv_cc_config = - (struct atomisp_cc_config *) - &css_param->rgb2yuv_cc_config; - } - - if (arg->macc_table && (from_user || !cur_config->macc_table)) { - if (copy_from_compatible(&css_param->macc_table, - arg->macc_table, - sizeof(struct atomisp_css_macc_table), - from_user)) - return -EFAULT; - css_param->update_flag.macc_table = - (struct atomisp_macc_table *) - &css_param->macc_table; - } - - if (arg->xnr_table && (from_user || !cur_config->xnr_table)) { - if (copy_from_compatible(&css_param->xnr_table, - arg->xnr_table, - sizeof(struct atomisp_css_xnr_table), - from_user)) - return -EFAULT; - css_param->update_flag.xnr_table = - (struct atomisp_xnr_table *) &css_param->xnr_table; - } - - if (arg->r_gamma_table && (from_user || !cur_config->r_gamma_table)) { - if (copy_from_compatible(&css_param->r_gamma_table, - arg->r_gamma_table, - sizeof(struct atomisp_css_rgb_gamma_table), - from_user)) - return -EFAULT; - css_param->update_flag.r_gamma_table = - (struct atomisp_rgb_gamma_table *) - &css_param->r_gamma_table; - } - - if (arg->g_gamma_table && (from_user || !cur_config->g_gamma_table)) { - if (copy_from_compatible(&css_param->g_gamma_table, - arg->g_gamma_table, - sizeof(struct atomisp_css_rgb_gamma_table), - from_user)) - return -EFAULT; - css_param->update_flag.g_gamma_table = - (struct atomisp_rgb_gamma_table *) - &css_param->g_gamma_table; - } - - if (arg->b_gamma_table && (from_user || !cur_config->b_gamma_table)) { - if (copy_from_compatible(&css_param->b_gamma_table, - arg->b_gamma_table, - sizeof(struct atomisp_css_rgb_gamma_table), - from_user)) - return -EFAULT; - css_param->update_flag.b_gamma_table = - (struct atomisp_rgb_gamma_table *) - &css_param->b_gamma_table; - } - - if (arg->anr_thres && (from_user || !cur_config->anr_thres)) { - if (copy_from_compatible(&css_param->anr_thres, arg->anr_thres, - sizeof(struct atomisp_css_anr_thres), - from_user)) - return -EFAULT; - css_param->update_flag.anr_thres = - (struct atomisp_anr_thres *) &css_param->anr_thres; - } - - if (from_user) - css_param->isp_config_id = arg->isp_config_id; - /* - * These configurations are on used by ISP1.x, not for ISP2.x, - * so do not handle them. see comments of ia_css_isp_config. - * 1 cc_config - * 2 ce_config - * 3 de_config - * 4 gc_config - * 5 gamma_table - * 6 ctc_table - * 7 dvs_coefs - */ - return 0; -} - -int atomisp_cp_lsc_table(struct atomisp_sub_device *asd, - struct atomisp_shading_table *source_st, - struct atomisp_css_params *css_param, - bool from_user) -{ - unsigned int i; - unsigned int len_table; - struct atomisp_css_shading_table *shading_table; - struct atomisp_css_shading_table *old_table; -#ifdef ISP2401 - struct atomisp_shading_table st; -#endif - - if (!source_st) - return 0; - - if (!css_param) - return -EINVAL; - - if (!from_user && css_param->update_flag.shading_table) - return 0; - -#ifdef ISP2401 - if (copy_from_compatible(&st, source_st, - sizeof(struct atomisp_shading_table), - from_user)) { - dev_err(asd->isp->dev, "copy shading table failed!"); - return -EFAULT; - } - -#endif - old_table = css_param->shading_table; - -#ifdef ISP2401 - -#endif - /* user config is to disable the shading table. */ -#ifndef ISP2401 - if (!source_st->enable) { -#else - if (!st.enable) { -#endif - /* Generate a minimum table with enable = 0. */ - shading_table = atomisp_css_shading_table_alloc(1, 1); - if (!shading_table) - return -ENOMEM; - shading_table->enable = 0; - goto set_lsc; - } - - /* Setting a new table. Validate first - all tables must be set */ - for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) { -#ifndef ISP2401 - if (!source_st->data[i]) -#else - if (!st.data[i]) { - dev_err(asd->isp->dev, "shading table validate failed"); -#endif - return -EINVAL; -#ifdef ISP2401 - } -#endif - } - - /* Shading table size per color */ -#ifndef ISP2401 - if (source_st->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR || - source_st->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) -#else - if (st.width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR || - st.height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) { - dev_err(asd->isp->dev, "shading table w/h validate failed!"); -#endif - return -EINVAL; -#ifdef ISP2401 - } -#endif - -#ifndef ISP2401 - shading_table = atomisp_css_shading_table_alloc(source_st->width, - source_st->height); - if (!shading_table) - return -ENOMEM; -#else - shading_table = atomisp_css_shading_table_alloc(st.width, - st.height); - if (!shading_table) { - dev_err(asd->isp->dev, "shading table alloc failed!"); - return -ENOMEM; - } -#endif - -#ifndef ISP2401 - len_table = source_st->width * source_st->height * ATOMISP_SC_TYPE_SIZE; -#else - len_table = st.width * st.height * ATOMISP_SC_TYPE_SIZE; -#endif - for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) { - if (copy_from_compatible(shading_table->data[i], -#ifndef ISP2401 - source_st->data[i], len_table, from_user)) { -#else - st.data[i], len_table, from_user)) { -#endif - atomisp_css_shading_table_free(shading_table); - return -EFAULT; - } - - } -#ifndef ISP2401 - shading_table->sensor_width = source_st->sensor_width; - shading_table->sensor_height = source_st->sensor_height; - shading_table->fraction_bits = source_st->fraction_bits; - shading_table->enable = source_st->enable; -#else - shading_table->sensor_width = st.sensor_width; - shading_table->sensor_height = st.sensor_height; - shading_table->fraction_bits = st.fraction_bits; - shading_table->enable = st.enable; -#endif - - /* No need to update shading table if it is the same */ - if (old_table != NULL && - old_table->sensor_width == shading_table->sensor_width && - old_table->sensor_height == shading_table->sensor_height && - old_table->width == shading_table->width && - old_table->height == shading_table->height && - old_table->fraction_bits == shading_table->fraction_bits && - old_table->enable == shading_table->enable) { - bool data_is_same = true; - - for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) { - if (memcmp(shading_table->data[i], old_table->data[i], - len_table) != 0) { - data_is_same = false; - break; - } - } - - if (data_is_same) { - atomisp_css_shading_table_free(shading_table); - return 0; - } - } - -set_lsc: - /* set LSC to CSS */ - css_param->shading_table = shading_table; - css_param->update_flag.shading_table = - (struct atomisp_shading_table *) shading_table; - asd->params.sc_en = shading_table != NULL; - - if (old_table) - atomisp_css_shading_table_free(old_table); - - return 0; -} - -int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd, - struct ia_css_dvs2_coefficients *coefs, - struct atomisp_css_params *css_param, - bool from_user) -{ - struct atomisp_css_dvs_grid_info *cur = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - int dvs_hor_coef_bytes, dvs_ver_coef_bytes; -#ifdef ISP2401 - struct ia_css_dvs2_coefficients dvs2_coefs; -#endif - - if (!coefs || !cur) - return 0; - - if (!from_user && css_param->update_flag.dvs2_coefs) - return 0; - -#ifndef ISP2401 - if (sizeof(*cur) != sizeof(coefs->grid) || - memcmp(&coefs->grid, cur, sizeof(coefs->grid))) { -#else - if (copy_from_compatible(&dvs2_coefs, coefs, - sizeof(struct ia_css_dvs2_coefficients), - from_user)) { - dev_err(asd->isp->dev, "copy dvs2 coef failed"); - return -EFAULT; - } - - if (sizeof(*cur) != sizeof(dvs2_coefs.grid) || - memcmp(&dvs2_coefs.grid, cur, sizeof(dvs2_coefs.grid))) { -#endif - dev_err(asd->isp->dev, "dvs grid mis-match!\n"); - /* If the grid info in the argument differs from the current - grid info, we tell the caller to reset the grid size and - try again. */ - return -EAGAIN; - } - -#ifndef ISP2401 - if (coefs->hor_coefs.odd_real == NULL || - coefs->hor_coefs.odd_imag == NULL || - coefs->hor_coefs.even_real == NULL || - coefs->hor_coefs.even_imag == NULL || - coefs->ver_coefs.odd_real == NULL || - coefs->ver_coefs.odd_imag == NULL || - coefs->ver_coefs.even_real == NULL || - coefs->ver_coefs.even_imag == NULL) -#else - if (dvs2_coefs.hor_coefs.odd_real == NULL || - dvs2_coefs.hor_coefs.odd_imag == NULL || - dvs2_coefs.hor_coefs.even_real == NULL || - dvs2_coefs.hor_coefs.even_imag == NULL || - dvs2_coefs.ver_coefs.odd_real == NULL || - dvs2_coefs.ver_coefs.odd_imag == NULL || - dvs2_coefs.ver_coefs.even_real == NULL || - dvs2_coefs.ver_coefs.even_imag == NULL) -#endif - return -EINVAL; - - if (!css_param->dvs2_coeff) { - /* DIS coefficients. */ - css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur); - if (!css_param->dvs2_coeff) - return -ENOMEM; - } - - dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes; - dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes; - if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real, -#ifndef ISP2401 - coefs->hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) || -#else - dvs2_coefs.hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag, -#ifndef ISP2401 - coefs->hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) || -#else - dvs2_coefs.hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real, -#ifndef ISP2401 - coefs->hor_coefs.even_real, dvs_hor_coef_bytes, from_user) || -#else - dvs2_coefs.hor_coefs.even_real, dvs_hor_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag, -#ifndef ISP2401 - coefs->hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) || -#else - dvs2_coefs.hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real, -#ifndef ISP2401 - coefs->ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) || -#else - dvs2_coefs.ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag, -#ifndef ISP2401 - coefs->ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) || -#else - dvs2_coefs.ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real, -#ifndef ISP2401 - coefs->ver_coefs.even_real, dvs_ver_coef_bytes, from_user) || -#else - dvs2_coefs.ver_coefs.even_real, dvs_ver_coef_bytes, from_user) || -#endif - copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag, -#ifndef ISP2401 - coefs->ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) { -#else - dvs2_coefs.ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) { -#endif - ia_css_dvs2_coefficients_free(css_param->dvs2_coeff); - css_param->dvs2_coeff = NULL; - return -EFAULT; - } - - css_param->update_flag.dvs2_coefs = - (struct atomisp_dvs2_coefficients *)css_param->dvs2_coeff; - return 0; -} - -int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd, - struct atomisp_dvs_6axis_config *source_6axis_config, - struct atomisp_css_params *css_param, - bool from_user) -{ - struct atomisp_css_dvs_6axis_config *dvs_6axis_config; - struct atomisp_css_dvs_6axis_config *old_6axis_config; -#ifdef ISP2401 - struct atomisp_css_dvs_6axis_config t_6axis_config; -#endif - struct ia_css_stream *stream = - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream; - struct atomisp_css_dvs_grid_info *dvs_grid_info = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - int ret = -EFAULT; - - if (stream == NULL) { - dev_err(asd->isp->dev, "%s: internal error!", __func__); - return -EINVAL; - } - - if (!source_6axis_config || !dvs_grid_info) - return 0; - - if (!dvs_grid_info->enable) - return 0; - - if (!from_user && css_param->update_flag.dvs_6axis_config) - return 0; - - /* check whether need to reallocate for 6 axis config */ - old_6axis_config = css_param->dvs_6axis; - dvs_6axis_config = old_6axis_config; -#ifdef ISP2401 - - if (copy_from_compatible(&t_6axis_config, source_6axis_config, - sizeof(struct atomisp_dvs_6axis_config), - from_user)) { - dev_err(asd->isp->dev, "copy morph table failed!"); - return -EFAULT; - } - -#endif - if (old_6axis_config && -#ifndef ISP2401 - (old_6axis_config->width_y != source_6axis_config->width_y || - old_6axis_config->height_y != source_6axis_config->height_y || - old_6axis_config->width_uv != source_6axis_config->width_uv || - old_6axis_config->height_uv != source_6axis_config->height_uv)) { -#else - (old_6axis_config->width_y != t_6axis_config.width_y || - old_6axis_config->height_y != t_6axis_config.height_y || - old_6axis_config->width_uv != t_6axis_config.width_uv || - old_6axis_config->height_uv != t_6axis_config.height_uv)) { -#endif - ia_css_dvs2_6axis_config_free(css_param->dvs_6axis); - css_param->dvs_6axis = NULL; - - dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream); - if (!dvs_6axis_config) - return -ENOMEM; - } else if (!dvs_6axis_config) { - dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream); - if (!dvs_6axis_config) - return -ENOMEM; - } - -#ifndef ISP2401 - dvs_6axis_config->exp_id = source_6axis_config->exp_id; -#else - dvs_6axis_config->exp_id = t_6axis_config.exp_id; -#endif - - if (copy_from_compatible(dvs_6axis_config->xcoords_y, -#ifndef ISP2401 - source_6axis_config->xcoords_y, - source_6axis_config->width_y * - source_6axis_config->height_y * - sizeof(*source_6axis_config->xcoords_y), -#else - t_6axis_config.xcoords_y, - t_6axis_config.width_y * - t_6axis_config.height_y * - sizeof(*dvs_6axis_config->xcoords_y), -#endif - from_user)) - goto error; - if (copy_from_compatible(dvs_6axis_config->ycoords_y, -#ifndef ISP2401 - source_6axis_config->ycoords_y, - source_6axis_config->width_y * - source_6axis_config->height_y * - sizeof(*source_6axis_config->ycoords_y), -#else - t_6axis_config.ycoords_y, - t_6axis_config.width_y * - t_6axis_config.height_y * - sizeof(*dvs_6axis_config->ycoords_y), -#endif - from_user)) - goto error; - if (copy_from_compatible(dvs_6axis_config->xcoords_uv, -#ifndef ISP2401 - source_6axis_config->xcoords_uv, - source_6axis_config->width_uv * - source_6axis_config->height_uv * - sizeof(*source_6axis_config->xcoords_uv), -#else - t_6axis_config.xcoords_uv, - t_6axis_config.width_uv * - t_6axis_config.height_uv * - sizeof(*dvs_6axis_config->xcoords_uv), -#endif - from_user)) - goto error; - if (copy_from_compatible(dvs_6axis_config->ycoords_uv, -#ifndef ISP2401 - source_6axis_config->ycoords_uv, - source_6axis_config->width_uv * - source_6axis_config->height_uv * - sizeof(*source_6axis_config->ycoords_uv), -#else - t_6axis_config.ycoords_uv, - t_6axis_config.width_uv * - t_6axis_config.height_uv * - sizeof(*dvs_6axis_config->ycoords_uv), -#endif - from_user)) - goto error; - - css_param->dvs_6axis = dvs_6axis_config; - css_param->update_flag.dvs_6axis_config = - (struct atomisp_dvs_6axis_config *) dvs_6axis_config; - return 0; - -error: - if (dvs_6axis_config) - ia_css_dvs2_6axis_config_free(dvs_6axis_config); - return ret; -} - -int atomisp_cp_morph_table(struct atomisp_sub_device *asd, - struct atomisp_morph_table *source_morph_table, - struct atomisp_css_params *css_param, - bool from_user) -{ - int ret = -EFAULT; - unsigned int i; - struct atomisp_css_morph_table *morph_table; -#ifdef ISP2401 - struct atomisp_css_morph_table mtbl; -#endif - struct atomisp_css_morph_table *old_morph_table; - - if (!source_morph_table) - return 0; - - if (!from_user && css_param->update_flag.morph_table) - return 0; - - old_morph_table = css_param->morph_table; - -#ifdef ISP2401 - if (copy_from_compatible(&mtbl, source_morph_table, - sizeof(struct atomisp_morph_table), - from_user)) { - dev_err(asd->isp->dev, "copy morph table failed!"); - return -EFAULT; - } - -#endif - morph_table = atomisp_css_morph_table_allocate( -#ifndef ISP2401 - source_morph_table->width, - source_morph_table->height); -#else - mtbl.width, - mtbl.height); -#endif - if (!morph_table) - return -ENOMEM; - - for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) { - if (copy_from_compatible(morph_table->coordinates_x[i], - (__force void *)source_morph_table->coordinates_x[i], -#ifndef ISP2401 - source_morph_table->height * source_morph_table->width * - sizeof(*source_morph_table->coordinates_x[i]), -#else - mtbl.height * mtbl.width * - sizeof(*morph_table->coordinates_x[i]), -#endif - from_user)) - goto error; - - if (copy_from_compatible(morph_table->coordinates_y[i], - (__force void *)source_morph_table->coordinates_y[i], -#ifndef ISP2401 - source_morph_table->height * source_morph_table->width * - sizeof(*source_morph_table->coordinates_y[i]), -#else - mtbl.height * mtbl.width * - sizeof(*morph_table->coordinates_y[i]), -#endif - from_user)) - goto error; - } - - css_param->morph_table = morph_table; - if (old_morph_table) - atomisp_css_morph_table_free(old_morph_table); - css_param->update_flag.morph_table = - (struct atomisp_morph_table *) morph_table; - return 0; - -error: - if (morph_table) - atomisp_css_morph_table_free(morph_table); - return ret; -} - -int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd, - struct atomisp_parameters *arg, - struct atomisp_css_params *css_param) -{ - int ret; - - ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, false); - if (ret) - return ret; - ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, false); - if (ret) - return ret; - ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, false); - if (ret) - return ret; - ret = atomisp_css_cp_dvs2_coefs(asd, - (struct ia_css_dvs2_coefficients *) arg->dvs2_coefs, - css_param, false); - if (ret) - return ret; - ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config, - css_param, false); - return ret; -} - -void atomisp_free_css_parameters(struct atomisp_css_params *css_param) -{ - if (css_param->dvs_6axis) { - ia_css_dvs2_6axis_config_free(css_param->dvs_6axis); - css_param->dvs_6axis = NULL; - } - if (css_param->dvs2_coeff) { - ia_css_dvs2_coefficients_free(css_param->dvs2_coeff); - css_param->dvs2_coeff = NULL; - } - if (css_param->shading_table) { - ia_css_shading_table_free(css_param->shading_table); - css_param->shading_table = NULL; - } - if (css_param->morph_table) { - ia_css_morph_table_free(css_param->morph_table); - css_param->morph_table = NULL; - } -} - -/* - * Check parameter queue list and buffer queue list to find out if matched items - * and then set parameter to CSS and enqueue buffer to CSS. - * Of course, if the buffer in buffer waiting list is not bound to a per-frame - * parameter, it will be enqueued into CSS as long as the per-frame setting - * buffers before it get enqueued. - */ -void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) -{ - struct atomisp_sub_device *asd = pipe->asd; - struct videobuf_buffer *vb = NULL, *vb_tmp; - struct atomisp_css_params_with_list *param = NULL, *param_tmp; - struct videobuf_vmalloc_memory *vm_mem = NULL; - unsigned long irqflags; - bool need_to_enqueue_buffer = false; - - if (atomisp_is_vf_pipe(pipe)) - return; - - /* - * CSS/FW requires set parameter and enqueue buffer happen after ISP - * is streamon. - */ - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - - if (list_empty(&pipe->per_frame_params) || - list_empty(&pipe->buffers_waiting_for_param)) - return; - - list_for_each_entry_safe(vb, vb_tmp, - &pipe->buffers_waiting_for_param, queue) { - if (pipe->frame_request_config_id[vb->i]) { - list_for_each_entry_safe(param, param_tmp, - &pipe->per_frame_params, list) { - if (pipe->frame_request_config_id[vb->i] != - param->params.isp_config_id) - continue; - - list_del(¶m->list); - list_del(&vb->queue); - /* - * clear the request config id as the buffer - * will be handled and enqueued into CSS soon - */ - pipe->frame_request_config_id[vb->i] = 0; - pipe->frame_params[vb->i] = param; - vm_mem = vb->priv; - BUG_ON(!vm_mem); - break; - } - - if (vm_mem) { - spin_lock_irqsave(&pipe->irq_lock, irqflags); - list_add_tail(&vb->queue, &pipe->activeq); - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - vm_mem = NULL; - need_to_enqueue_buffer = true; - } else { - /* The is the end, stop further loop */ - break; - } - } else { - list_del(&vb->queue); - pipe->frame_params[vb->i] = NULL; - spin_lock_irqsave(&pipe->irq_lock, irqflags); - list_add_tail(&vb->queue, &pipe->activeq); - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - need_to_enqueue_buffer = true; - } - } - - if (need_to_enqueue_buffer) { - atomisp_qbuffers_to_css(asd); -#ifndef ISP2401 - if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd)) - atomisp_wdt_start(asd); -#else - if (atomisp_buffers_queued_pipe(pipe)) { - if (!atomisp_is_wdt_running(pipe)) - atomisp_wdt_start(pipe); - else - atomisp_wdt_refresh_pipe(pipe, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } -#endif - } -} - -/* -* Function to configure ISP parameters -*/ -int atomisp_set_parameters(struct video_device *vdev, - struct atomisp_parameters *arg) -{ - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct atomisp_css_params_with_list *param = NULL; - struct atomisp_css_params *css_param = &asd->params.css_param; - int ret; - - if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream == NULL) { - dev_err(asd->isp->dev, "%s: internal error!\n", __func__); - return -EINVAL; - } - - dev_dbg(asd->isp->dev, "%s: set parameter(per_frame_setting %d) for asd%d with isp_config_id %d of %s\n", - __func__, arg->per_frame_setting, asd->index, - arg->isp_config_id, vdev->name); -#ifdef ISP2401 - - if (atomisp_is_vf_pipe(pipe) && arg->per_frame_setting) { - dev_err(asd->isp->dev, "%s: vf pipe not support per_frame_setting", - __func__); - return -EINVAL; - } - -#endif - if (arg->per_frame_setting && !atomisp_is_vf_pipe(pipe)) { - /* - * Per-frame setting enabled, we allocate a new paramter - * buffer to cache the parameters and only when frame buffers - * are ready, the parameters will be set to CSS. - * per-frame setting only works for the main output frame. - */ - param = kvzalloc(sizeof(*param), GFP_KERNEL); - if (!param) { - dev_err(asd->isp->dev, "%s: failed to alloc params buffer\n", - __func__); - return -ENOMEM; - } - css_param = ¶m->params; - } - - ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, true); - if (ret) - goto apply_parameter_failed; - - ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, true); - if (ret) - goto apply_parameter_failed; - - ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, true); - if (ret) - goto apply_parameter_failed; - - ret = atomisp_css_cp_dvs2_coefs(asd, - (struct ia_css_dvs2_coefficients *) arg->dvs2_coefs, - css_param, true); - if (ret) - goto apply_parameter_failed; - - ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config, - css_param, true); - if (ret) - goto apply_parameter_failed; - - if (!(arg->per_frame_setting && !atomisp_is_vf_pipe(pipe))) { - /* indicate to CSS that we have parameters to be updated */ - asd->params.css_update_params_needed = true; - } else { - list_add_tail(¶m->list, &pipe->per_frame_params); - atomisp_handle_parameter_and_buffer(pipe); - } - - return 0; - -apply_parameter_failed: - if (css_param) - atomisp_free_css_parameters(css_param); - if (param) - kvfree(param); - - return ret; -} - -/* - * Function to set/get isp parameters to isp - */ -int atomisp_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_parm *config) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_pipe_config *vp_cfg = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - pipe_configs[IA_CSS_PIPE_ID_VIDEO]; - - /* Read parameter for 3A binary info */ - if (flag == 0) { - struct atomisp_css_dvs_grid_info *dvs_grid_info = - atomisp_css_get_dvs_grid_info( - &asd->params.curr_grid_info); - - if (&config->info == NULL) { - dev_err(isp->dev, "ERROR: NULL pointer in grid_info\n"); - return -EINVAL; - } - atomisp_curr_user_grid_info(asd, &config->info); - - /* We always return the resolution and stride even if there is - * no valid metadata. This allows the caller to get the - * information needed to allocate user-space buffers. */ - config->metadata_config.metadata_height = asd-> - stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info. - metadata_info.resolution.height; - config->metadata_config.metadata_stride = asd-> - stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info. - metadata_info.stride; - - /* update dvs grid info */ - if (dvs_grid_info) - memcpy(&config->dvs_grid, - dvs_grid_info, - sizeof(struct atomisp_css_dvs_grid_info)); - - if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) { - config->dvs_envelop.width = 0; - config->dvs_envelop.height = 0; - return 0; - } - - /* update dvs envelop info */ - if (!asd->continuous_mode->val) { - config->dvs_envelop.width = vp_cfg->dvs_envelope.width; - config->dvs_envelop.height = - vp_cfg->dvs_envelope.height; - } else { - unsigned int dvs_w, dvs_h, dvs_w_max, dvs_h_max; - - dvs_w = vp_cfg->bayer_ds_out_res.width - - vp_cfg->output_info[0].res.width; - dvs_h = vp_cfg->bayer_ds_out_res.height - - vp_cfg->output_info[0].res.height; - dvs_w_max = rounddown( - vp_cfg->output_info[0].res.width / 5, - ATOM_ISP_STEP_WIDTH); - dvs_h_max = rounddown( - vp_cfg->output_info[0].res.height / 5, - ATOM_ISP_STEP_HEIGHT); - - config->dvs_envelop.width = min(dvs_w, dvs_w_max); - config->dvs_envelop.height = min(dvs_h, dvs_h_max); - } - - return 0; - } - - memcpy(&asd->params.css_param.wb_config, &config->wb_config, - sizeof(struct atomisp_css_wb_config)); - memcpy(&asd->params.css_param.ob_config, &config->ob_config, - sizeof(struct atomisp_css_ob_config)); - memcpy(&asd->params.css_param.dp_config, &config->dp_config, - sizeof(struct atomisp_css_dp_config)); - memcpy(&asd->params.css_param.de_config, &config->de_config, - sizeof(struct atomisp_css_de_config)); - memcpy(&asd->params.css_param.dz_config, &config->dz_config, - sizeof(struct atomisp_css_dz_config)); - memcpy(&asd->params.css_param.ce_config, &config->ce_config, - sizeof(struct atomisp_css_ce_config)); - memcpy(&asd->params.css_param.nr_config, &config->nr_config, - sizeof(struct atomisp_css_nr_config)); - memcpy(&asd->params.css_param.ee_config, &config->ee_config, - sizeof(struct atomisp_css_ee_config)); - memcpy(&asd->params.css_param.tnr_config, &config->tnr_config, - sizeof(struct atomisp_css_tnr_config)); - - if (asd->params.color_effect == V4L2_COLORFX_NEGATIVE) { - asd->params.css_param.cc_config.matrix[3] = -config->cc_config.matrix[3]; - asd->params.css_param.cc_config.matrix[4] = -config->cc_config.matrix[4]; - asd->params.css_param.cc_config.matrix[5] = -config->cc_config.matrix[5]; - asd->params.css_param.cc_config.matrix[6] = -config->cc_config.matrix[6]; - asd->params.css_param.cc_config.matrix[7] = -config->cc_config.matrix[7]; - asd->params.css_param.cc_config.matrix[8] = -config->cc_config.matrix[8]; - } - - if (asd->params.color_effect != V4L2_COLORFX_SEPIA && - asd->params.color_effect != V4L2_COLORFX_BW) { - memcpy(&asd->params.css_param.cc_config, &config->cc_config, - sizeof(struct atomisp_css_cc_config)); - atomisp_css_set_cc_config(asd, &asd->params.css_param.cc_config); - } - - atomisp_css_set_wb_config(asd, &asd->params.css_param.wb_config); - atomisp_css_set_ob_config(asd, &asd->params.css_param.ob_config); - atomisp_css_set_de_config(asd, &asd->params.css_param.de_config); - atomisp_css_set_dz_config(asd, &asd->params.css_param.dz_config); - atomisp_css_set_ce_config(asd, &asd->params.css_param.ce_config); - atomisp_css_set_dp_config(asd, &asd->params.css_param.dp_config); - atomisp_css_set_nr_config(asd, &asd->params.css_param.nr_config); - atomisp_css_set_ee_config(asd, &asd->params.css_param.ee_config); - atomisp_css_set_tnr_config(asd, &asd->params.css_param.tnr_config); - asd->params.css_update_params_needed = true; - - return 0; -} - -/* - * Function to configure color effect of the image - */ -int atomisp_color_effect(struct atomisp_sub_device *asd, int flag, - __s32 *effect) -{ - struct atomisp_css_cc_config *cc_config = NULL; - struct atomisp_css_macc_table *macc_table = NULL; - struct atomisp_css_ctc_table *ctc_table = NULL; - int ret = 0; - struct v4l2_control control; - struct atomisp_device *isp = asd->isp; - - if (flag == 0) { - *effect = asd->params.color_effect; - return 0; - } - - - control.id = V4L2_CID_COLORFX; - control.value = *effect; - ret = - v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera->ctrl_handler, - &control); - /* - * if set color effect to sensor successfully, return - * 0 directly. - */ - if (!ret) { - asd->params.color_effect = (u32)*effect; - return 0; - } - - if (*effect == asd->params.color_effect) - return 0; - - /* - * isp_subdev->params.macc_en should be set to false. - */ - asd->params.macc_en = false; - - switch (*effect) { - case V4L2_COLORFX_NONE: - macc_table = &asd->params.css_param.macc_table; - asd->params.macc_en = true; - break; - case V4L2_COLORFX_SEPIA: - cc_config = &sepia_cc_config; - break; - case V4L2_COLORFX_NEGATIVE: - cc_config = &nega_cc_config; - break; - case V4L2_COLORFX_BW: - cc_config = &mono_cc_config; - break; - case V4L2_COLORFX_SKY_BLUE: - macc_table = &blue_macc_table; - asd->params.macc_en = true; - break; - case V4L2_COLORFX_GRASS_GREEN: - macc_table = &green_macc_table; - asd->params.macc_en = true; - break; - case V4L2_COLORFX_SKIN_WHITEN_LOW: - macc_table = &skin_low_macc_table; - asd->params.macc_en = true; - break; - case V4L2_COLORFX_SKIN_WHITEN: - macc_table = &skin_medium_macc_table; - asd->params.macc_en = true; - break; - case V4L2_COLORFX_SKIN_WHITEN_HIGH: - macc_table = &skin_high_macc_table; - asd->params.macc_en = true; - break; - case V4L2_COLORFX_VIVID: - ctc_table = &vivid_ctc_table; - break; - default: - return -EINVAL; - } - atomisp_update_capture_mode(asd); - - if (cc_config) - atomisp_css_set_cc_config(asd, cc_config); - if (macc_table) - atomisp_css_set_macc_table(asd, macc_table); - if (ctc_table) - atomisp_css_set_ctc_table(asd, ctc_table); - asd->params.color_effect = (u32)*effect; - asd->params.css_update_params_needed = true; - return 0; -} - -/* - * Function to configure bad pixel correction - */ -int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - - if (flag == 0) { - *value = asd->params.bad_pixel_en; - return 0; - } - asd->params.bad_pixel_en = !!*value; - - return 0; -} - -/* - * Function to configure bad pixel correction params - */ -int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_dp_config *config) -{ - if (flag == 0) { - /* Get bad pixel from current setup */ - if (atomisp_css_get_dp_config(asd, config)) - return -EINVAL; - } else { - /* Set bad pixel to isp parameters */ - memcpy(&asd->params.css_param.dp_config, config, - sizeof(asd->params.css_param.dp_config)); - atomisp_css_set_dp_config(asd, &asd->params.css_param.dp_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to enable/disable video image stablization - */ -int atomisp_video_stable(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - if (flag == 0) - *value = asd->params.video_dis_en; - else - asd->params.video_dis_en = !!*value; - - return 0; -} - -/* - * Function to configure fixed pattern noise - */ -int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - - if (flag == 0) { - *value = asd->params.fpn_en; - return 0; - } - - if (*value == 0) { - asd->params.fpn_en = false; - return 0; - } - - /* Add function to get black from from sensor with shutter off */ - return 0; -} - -static unsigned int -atomisp_bytesperline_to_padded_width(unsigned int bytesperline, - enum atomisp_css_frame_format format) -{ - switch (format) { - case CSS_FRAME_FORMAT_UYVY: - case CSS_FRAME_FORMAT_YUYV: - case CSS_FRAME_FORMAT_RAW: - case CSS_FRAME_FORMAT_RGB565: - return bytesperline/2; - case CSS_FRAME_FORMAT_RGBA888: - return bytesperline/4; - /* The following cases could be removed, but we leave them - in to document the formats that are included. */ - case CSS_FRAME_FORMAT_NV11: - case CSS_FRAME_FORMAT_NV12: - case CSS_FRAME_FORMAT_NV16: - case CSS_FRAME_FORMAT_NV21: - case CSS_FRAME_FORMAT_NV61: - case CSS_FRAME_FORMAT_YV12: - case CSS_FRAME_FORMAT_YV16: - case CSS_FRAME_FORMAT_YUV420: - case CSS_FRAME_FORMAT_YUV420_16: - case CSS_FRAME_FORMAT_YUV422: - case CSS_FRAME_FORMAT_YUV422_16: - case CSS_FRAME_FORMAT_YUV444: - case CSS_FRAME_FORMAT_YUV_LINE: - case CSS_FRAME_FORMAT_PLANAR_RGB888: - case CSS_FRAME_FORMAT_QPLANE6: - case CSS_FRAME_FORMAT_BINARY_8: - default: - return bytesperline; - } -} - -static int -atomisp_v4l2_framebuffer_to_css_frame(const struct v4l2_framebuffer *arg, - struct atomisp_css_frame **result) -{ - struct atomisp_css_frame *res = NULL; - unsigned int padded_width; - enum atomisp_css_frame_format sh_format; - char *tmp_buf = NULL; - int ret = 0; - - sh_format = v4l2_fmt_to_sh_fmt(arg->fmt.pixelformat); - padded_width = atomisp_bytesperline_to_padded_width( - arg->fmt.bytesperline, sh_format); - - /* Note: the padded width on an atomisp_css_frame is in elements, not in - bytes. The RAW frame we use here should always be a 16bit RAW - frame. This is why we bytesperline/2 is equal to the padded with */ - if (atomisp_css_frame_allocate(&res, arg->fmt.width, arg->fmt.height, - sh_format, padded_width, 0)) { - ret = -ENOMEM; - goto err; - } - - tmp_buf = vmalloc(arg->fmt.sizeimage); - if (!tmp_buf) { - ret = -ENOMEM; - goto err; - } - if (copy_from_user(tmp_buf, (void __user __force *)arg->base, - arg->fmt.sizeimage)) { - ret = -EFAULT; - goto err; - } - - if (hmm_store(res->data, tmp_buf, arg->fmt.sizeimage)) { - ret = -EINVAL; - goto err; - } - -err: - if (ret && res) - atomisp_css_frame_free(res); - if (tmp_buf) - vfree(tmp_buf); - if (ret == 0) - *result = res; - return ret; -} - -/* - * Function to configure fixed pattern noise table - */ -int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd, - struct v4l2_framebuffer *arg) -{ - struct atomisp_css_frame *raw_black_frame = NULL; - int ret; - - if (arg == NULL) - return -EINVAL; - - ret = atomisp_v4l2_framebuffer_to_css_frame(arg, &raw_black_frame); - if (ret) - return ret; - if (atomisp_css_set_black_frame(asd, raw_black_frame)) - ret = -ENOMEM; - - atomisp_css_frame_free(raw_black_frame); - return ret; -} - -/* - * Function to configure false color correction - */ -int atomisp_false_color(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - /* Get nr config from current setup */ - if (flag == 0) { - *value = asd->params.false_color; - return 0; - } - - /* Set nr config to isp parameters */ - if (*value) { - atomisp_css_set_default_de_config(asd); - } else { - asd->params.css_param.de_config.pixelnoise = 0; - atomisp_css_set_de_config(asd, &asd->params.css_param.de_config); - } - asd->params.css_update_params_needed = true; - asd->params.false_color = *value; - return 0; -} - -/* - * Function to configure bad pixel correction params - */ -int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_de_config *config) -{ - if (flag == 0) { - /* Get false color from current setup */ - if (atomisp_css_get_de_config(asd, config)) - return -EINVAL; - } else { - /* Set false color to isp parameters */ - memcpy(&asd->params.css_param.de_config, config, - sizeof(asd->params.css_param.de_config)); - atomisp_css_set_de_config(asd, &asd->params.css_param.de_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to configure white balance params - */ -int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_wb_config *config) -{ - if (flag == 0) { - /* Get white balance from current setup */ - if (atomisp_css_get_wb_config(asd, config)) - return -EINVAL; - } else { - /* Set white balance to isp parameters */ - memcpy(&asd->params.css_param.wb_config, config, - sizeof(asd->params.css_param.wb_config)); - atomisp_css_set_wb_config(asd, &asd->params.css_param.wb_config); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_3a_config *config) -{ - struct atomisp_device *isp = asd->isp; - - dev_dbg(isp->dev, ">%s %d\n", __func__, flag); - - if (flag == 0) { - /* Get white balance from current setup */ - if (atomisp_css_get_3a_config(asd, config)) - return -EINVAL; - } else { - /* Set white balance to isp parameters */ - memcpy(&asd->params.css_param.s3a_config, config, - sizeof(asd->params.css_param.s3a_config)); - atomisp_css_set_3a_config(asd, &asd->params.css_param.s3a_config); - asd->params.css_update_params_needed = true; - } - - dev_dbg(isp->dev, "<%s %d\n", __func__, flag); - return 0; -} - -/* - * Function to setup digital zoom - */ -int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag, - __s32 *value) -{ - u32 zoom; - struct atomisp_device *isp = asd->isp; - - unsigned int max_zoom = MRFLD_MAX_ZOOM_FACTOR; - - if (flag == 0) { - atomisp_css_get_zoom_factor(asd, &zoom); - *value = max_zoom - zoom; - } else { - if (*value < 0) - return -EINVAL; - - zoom = max_zoom - min_t(u32, max_zoom - 1, *value); - atomisp_css_set_zoom_factor(asd, zoom); - - dev_dbg(isp->dev, "%s, zoom: %d\n", __func__, zoom); - asd->params.css_update_params_needed = true; - } - - return 0; -} - -/* - * Function to get sensor specific info for current resolution, - * which will be used for auto exposure conversion. - */ -int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd, - struct atomisp_sensor_mode_data *config) -{ - struct camera_mipi_info *mipi_info; - struct atomisp_device *isp = asd->isp; - - mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - if (mipi_info == NULL) - return -EINVAL; - - memcpy(config, &mipi_info->data, sizeof(*config)); - return 0; -} - -int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f) -{ - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - f->fmt.pix = pipe->pix; - - return 0; -} - -static void __atomisp_update_stream_env(struct atomisp_sub_device *asd, - uint16_t stream_index, struct atomisp_input_stream_info *stream_info) -{ - int i; - -#if defined(ISP2401_NEW_INPUT_SYSTEM) - /* assign virtual channel id return from sensor driver query */ - asd->stream_env[stream_index].ch_id = stream_info->ch_id; -#endif - asd->stream_env[stream_index].isys_configs = stream_info->isys_configs; - for (i = 0; i < stream_info->isys_configs; i++) { - asd->stream_env[stream_index].isys_info[i].input_format = - stream_info->isys_info[i].input_format; - asd->stream_env[stream_index].isys_info[i].width = - stream_info->isys_info[i].width; - asd->stream_env[stream_index].isys_info[i].height = - stream_info->isys_info[i].height; - } -} - -static void __atomisp_init_stream_info(uint16_t stream_index, - struct atomisp_input_stream_info *stream_info) -{ - int i; - - stream_info->enable = 1; - stream_info->stream = stream_index; - stream_info->ch_id = 0; - stream_info->isys_configs = 0; - for (i = 0; i < MAX_STREAMS_PER_CHANNEL; i++) { - stream_info->isys_info[i].input_format = 0; - stream_info->isys_info[i].width = 0; - stream_info->isys_info[i].height = 0; - } -} - -/* This function looks up the closest available resolution. */ -int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f, - bool *res_overflow) -{ - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct v4l2_subdev_pad_config pad_cfg; - struct v4l2_subdev_format format = { - .which = V4L2_SUBDEV_FORMAT_TRY, - }; - - struct v4l2_mbus_framefmt *snr_mbus_fmt = &format.format; - const struct atomisp_format_bridge *fmt; - struct atomisp_input_stream_info *stream_info = - (struct atomisp_input_stream_info *)snr_mbus_fmt->reserved; - uint16_t stream_index; - int source_pad = atomisp_subdev_source_pad(vdev); - int ret; - - if (isp->inputs[asd->input_curr].camera == NULL) - return -EINVAL; - - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - fmt = atomisp_get_format_bridge(f->fmt.pix.pixelformat); - if (fmt == NULL) { - dev_err(isp->dev, "unsupported pixelformat!\n"); - fmt = atomisp_output_fmts; - } - -#ifdef ISP2401 - if (f->fmt.pix.width <= 0 || f->fmt.pix.height <= 0) - return -EINVAL; - -#endif - snr_mbus_fmt->code = fmt->mbus_code; - snr_mbus_fmt->width = f->fmt.pix.width; - snr_mbus_fmt->height = f->fmt.pix.height; - - __atomisp_init_stream_info(stream_index, stream_info); - - dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n", - snr_mbus_fmt->width, snr_mbus_fmt->height); - - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - pad, set_fmt, &pad_cfg, &format); - if (ret) - return ret; - - dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n", - snr_mbus_fmt->width, snr_mbus_fmt->height); - - fmt = atomisp_get_format_bridge_from_mbus(snr_mbus_fmt->code); - if (fmt == NULL) { - dev_err(isp->dev, "unknown sensor format 0x%8.8x\n", - snr_mbus_fmt->code); - return -EINVAL; - } - - f->fmt.pix.pixelformat = fmt->pixelformat; - - /* - * If the format is jpeg or custom RAW, then the width and height will - * not satisfy the normal atomisp requirements and no need to check - * the below conditions. So just assign to what is being returned from - * the sensor driver. - */ - if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG || - f->fmt.pix.pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) { - f->fmt.pix.width = snr_mbus_fmt->width; - f->fmt.pix.height = snr_mbus_fmt->height; - return 0; - } - - if (snr_mbus_fmt->width < f->fmt.pix.width - && snr_mbus_fmt->height < f->fmt.pix.height) { - f->fmt.pix.width = snr_mbus_fmt->width; - f->fmt.pix.height = snr_mbus_fmt->height; - /* Set the flag when resolution requested is - * beyond the max value supported by sensor - */ - if (res_overflow != NULL) - *res_overflow = true; - } - - /* app vs isp */ - f->fmt.pix.width = rounddown( - clamp_t(u32, f->fmt.pix.width, ATOM_ISP_MIN_WIDTH, - ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH); - f->fmt.pix.height = rounddown( - clamp_t(u32, f->fmt.pix.height, ATOM_ISP_MIN_HEIGHT, - ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT); - - return 0; -} - -static int -atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f) -{ - u32 width = f->fmt.pix.width; - u32 height = f->fmt.pix.height; - u32 pixelformat = f->fmt.pix.pixelformat; - enum v4l2_field field = f->fmt.pix.field; - u32 depth; - - if (!atomisp_get_format_bridge(pixelformat)) { - dev_err(isp->dev, "Wrong output pixelformat\n"); - return -EINVAL; - } - - depth = get_pixel_depth(pixelformat); - - if (field == V4L2_FIELD_ANY) - field = V4L2_FIELD_NONE; - else if (field != V4L2_FIELD_NONE) { - dev_err(isp->dev, "Wrong output field\n"); - return -EINVAL; - } - - f->fmt.pix.field = field; - f->fmt.pix.width = clamp_t(u32, - rounddown(width, (u32)ATOM_ISP_STEP_WIDTH), - ATOM_ISP_MIN_WIDTH, ATOM_ISP_MAX_WIDTH); - f->fmt.pix.height = clamp_t(u32, rounddown(height, - (u32)ATOM_ISP_STEP_HEIGHT), - ATOM_ISP_MIN_HEIGHT, ATOM_ISP_MAX_HEIGHT); - f->fmt.pix.bytesperline = (width * depth) >> 3; - - return 0; -} - -enum mipi_port_id __get_mipi_port(struct atomisp_device *isp, - enum atomisp_camera_port port) -{ - switch (port) { - case ATOMISP_CAMERA_PORT_PRIMARY: - return MIPI_PORT0_ID; - case ATOMISP_CAMERA_PORT_SECONDARY: - return MIPI_PORT1_ID; - case ATOMISP_CAMERA_PORT_TERTIARY: - if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID) - return MIPI_PORT1_ID + 1; - /* go through down for else case */ - default: - dev_err(isp->dev, "unsupported port: %d\n", port); - return MIPI_PORT0_ID; - } -} - -static inline int atomisp_set_sensor_mipi_to_isp( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct camera_mipi_info *mipi_info) -{ - struct v4l2_control ctrl; - struct atomisp_device *isp = asd->isp; - const struct atomisp_in_fmt_conv *fc; - int mipi_freq = 0; - unsigned int input_format, bayer_order; - - ctrl.id = V4L2_CID_LINK_FREQ; - if (v4l2_g_ctrl - (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0) - mipi_freq = ctrl.value; - - if (asd->stream_env[stream_id].isys_configs == 1) { - input_format = - asd->stream_env[stream_id].isys_info[0].input_format; - atomisp_css_isys_set_format(asd, stream_id, - input_format, IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX); - } else if (asd->stream_env[stream_id].isys_configs == 2) { - atomisp_css_isys_two_stream_cfg_update_stream1( - asd, stream_id, - asd->stream_env[stream_id].isys_info[0].input_format, - asd->stream_env[stream_id].isys_info[0].width, - asd->stream_env[stream_id].isys_info[0].height); - - atomisp_css_isys_two_stream_cfg_update_stream2( - asd, stream_id, - asd->stream_env[stream_id].isys_info[1].input_format, - asd->stream_env[stream_id].isys_info[1].width, - asd->stream_env[stream_id].isys_info[1].height); - } - - /* Compatibility for sensors which provide no media bus code - * in s_mbus_framefmt() nor support pad formats. */ - if (mipi_info->input_format != -1) { - bayer_order = mipi_info->raw_bayer_order; - - /* Input stream config is still needs configured */ - /* TODO: Check if this is necessary */ - fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt( - mipi_info->input_format); - if (!fc) - return -EINVAL; - input_format = fc->css_stream_fmt; - } else { - struct v4l2_mbus_framefmt *sink; - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - fc = atomisp_find_in_fmt_conv(sink->code); - if (!fc) - return -EINVAL; - input_format = fc->css_stream_fmt; - bayer_order = fc->bayer_order; - } - - atomisp_css_input_set_format(asd, stream_id, input_format); - atomisp_css_input_set_bayer_order(asd, stream_id, bayer_order); - - fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt( - mipi_info->metadata_format); - if (!fc) - return -EINVAL; - input_format = fc->css_stream_fmt; - atomisp_css_input_configure_port(asd, - __get_mipi_port(asd->isp, mipi_info->port), - mipi_info->num_lanes, - 0xffff4, mipi_freq, - input_format, - mipi_info->metadata_width, - mipi_info->metadata_height); - return 0; -} - -static int __enable_continuous_mode(struct atomisp_sub_device *asd, - bool enable) -{ - struct atomisp_device *isp = asd->isp; - - dev_dbg(isp->dev, - "continuous mode %d, raw buffers %d, stop preview %d\n", - enable, asd->continuous_raw_buffer_size->val, - !asd->continuous_viewfinder->val); -#ifndef ISP2401 - atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_PRIMARY); -#else - atomisp_update_capture_mode(asd); -#endif - /* in case of ANR, force capture pipe to offline mode */ - atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, - asd->params.low_light ? false : !enable); - atomisp_css_preview_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, - !enable); - atomisp_css_enable_continuous(asd, enable); - atomisp_css_enable_cvf(asd, asd->continuous_viewfinder->val); - - if (atomisp_css_continuous_set_num_raw_frames(asd, - asd->continuous_raw_buffer_size->val)) { - dev_err(isp->dev, "css_continuous_set_num_raw_frames failed\n"); - return -EINVAL; - } - - if (!enable) { - atomisp_css_enable_raw_binning(asd, false); - atomisp_css_input_set_two_pixels_per_clock(asd, false); - } - - if (isp->inputs[asd->input_curr].type != FILE_INPUT) - atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_SENSOR); - - return atomisp_update_run_mode(asd); -} - -static int configure_pp_input_nop(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height) -{ - return 0; -} - -static int configure_output_nop(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format sh_fmt) -{ - return 0; -} - -static int get_frame_info_nop(struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *finfo) -{ - return 0; -} - -/* - * Resets CSS parameters that depend on input resolution. - * - * Update params like CSS RAW binning, 2ppc mode and pp_input - * which depend on input size, but are not automatically - * handled in CSS when the input resolution is changed. - */ -static int css_input_resolution_changed(struct atomisp_sub_device *asd, - struct v4l2_mbus_framefmt *ffmt) -{ - struct atomisp_metadata_buf *md_buf = NULL, *_md_buf; - unsigned int i; - - dev_dbg(asd->isp->dev, "css_input_resolution_changed to %ux%u\n", - ffmt->width, ffmt->height); - -#if defined(ISP2401_NEW_INPUT_SYSTEM) - atomisp_css_input_set_two_pixels_per_clock(asd, false); -#else - atomisp_css_input_set_two_pixels_per_clock(asd, true); -#endif - if (asd->continuous_mode->val) { - /* Note for all checks: ffmt includes pad_w+pad_h */ - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO || - (ffmt->width >= 2048 || ffmt->height >= 1536)) { - /* - * For preview pipe, enable only if resolution - * is >= 3M for ISP2400. - */ - atomisp_css_enable_raw_binning(asd, true); - } - } - /* - * If sensor input changed, which means metadata resolution changed - * together. Release all metadata buffers here to let it re-allocated - * next time in reqbufs. - */ - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i], - list) { - atomisp_css_free_metadata_buffer(md_buf); - list_del(&md_buf->list); - kfree(md_buf); - } - } - return 0; - - /* - * TODO: atomisp_css_preview_configure_pp_input() not - * reset due to CSS bug tracked as PSI BZ 115124 - */ -} - -static int atomisp_set_fmt_to_isp(struct video_device *vdev, - struct atomisp_css_frame_info *output_info, - struct atomisp_css_frame_info *raw_output_info, - struct v4l2_pix_format *pix, - unsigned int source_pad) -{ - struct camera_mipi_info *mipi_info; - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - const struct atomisp_format_bridge *format; - struct v4l2_rect *isp_sink_crop; - enum atomisp_css_pipe_id pipe_id; - struct v4l2_subdev_fh fh; - int (*configure_output)(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format sh_fmt) = - configure_output_nop; - int (*get_frame_info)(struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *finfo) = - get_frame_info_nop; - int (*configure_pp_input)(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height) = - configure_pp_input_nop; - uint16_t stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - const struct atomisp_in_fmt_conv *fc; - int ret; - - v4l2_fh_init(&fh.vfh, vdev); - - isp_sink_crop = atomisp_subdev_get_rect( - &asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP); - - format = atomisp_get_format_bridge(pix->pixelformat); - if (format == NULL) - return -EINVAL; - - if (isp->inputs[asd->input_curr].type != TEST_PATTERN && - isp->inputs[asd->input_curr].type != FILE_INPUT) { - mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - if (!mipi_info) { - dev_err(isp->dev, "mipi_info is NULL\n"); - return -EINVAL; - } - if (atomisp_set_sensor_mipi_to_isp(asd, stream_index, - mipi_info)) - return -EINVAL; - fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt( - mipi_info->input_format); - if (!fc) - fc = atomisp_find_in_fmt_conv( - atomisp_subdev_get_ffmt(&asd->subdev, - NULL, V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK)->code); - if (!fc) - return -EINVAL; - if (format->sh_fmt == CSS_FRAME_FORMAT_RAW && - raw_output_format_match_input(fc->css_stream_fmt, - pix->pixelformat)) - return -EINVAL; - } - - /* - * Configure viewfinder also when vfpp is disabled: the - * CSS still requires viewfinder configuration. - */ - if (asd->fmt_auto->val || - asd->vfpp->val != ATOMISP_VFPP_ENABLE) { - struct v4l2_rect vf_size = {0}; - struct v4l2_mbus_framefmt vf_ffmt = {0}; - - if (pix->width < 640 || pix->height < 480) { - vf_size.width = pix->width; - vf_size.height = pix->height; - } else { - vf_size.width = 640; - vf_size.height = 480; - } - - /* FIXME: proper format name for this one. See - atomisp_output_fmts[] in atomisp_v4l2.c */ - vf_ffmt.code = V4L2_MBUS_FMT_CUSTOM_YUV420; - - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SOURCE_VF, - V4L2_SEL_TGT_COMPOSE, 0, &vf_size); - atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SOURCE_VF, &vf_ffmt); - asd->video_out_vf.sh_fmt = CSS_FRAME_FORMAT_NV12; - - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { - atomisp_css_video_configure_viewfinder(asd, - vf_size.width, vf_size.height, 0, - asd->video_out_vf.sh_fmt); - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW || - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) - atomisp_css_video_configure_viewfinder(asd, - vf_size.width, vf_size.height, 0, - asd->video_out_vf.sh_fmt); - else - atomisp_css_capture_configure_viewfinder(asd, - vf_size.width, vf_size.height, 0, - asd->video_out_vf.sh_fmt); - } else if (source_pad != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW || - asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { - atomisp_css_capture_configure_viewfinder(asd, - vf_size.width, vf_size.height, 0, - asd->video_out_vf.sh_fmt); - } - } - - if (asd->continuous_mode->val) { - ret = __enable_continuous_mode(asd, true); - if (ret) - return -EINVAL; - } - - atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_SENSOR); - atomisp_css_disable_vf_pp(asd, - asd->vfpp->val != ATOMISP_VFPP_ENABLE); - - /* ISP2401 new input system need to use copy pipe */ - if (asd->copy_mode) { - pipe_id = CSS_PIPE_ID_COPY; - atomisp_css_capture_enable_online(asd, stream_index, false); - } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { - /* video same in continuouscapture and online modes */ - configure_output = atomisp_css_video_configure_output; - get_frame_info = atomisp_css_video_get_output_frame_info; - pipe_id = CSS_PIPE_ID_VIDEO; - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - if (!asd->continuous_mode->val) { - configure_output = atomisp_css_video_configure_output; - get_frame_info = - atomisp_css_video_get_output_frame_info; - pipe_id = CSS_PIPE_ID_VIDEO; - } else { - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW || - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { - configure_output = - atomisp_css_video_configure_output; - get_frame_info = - atomisp_css_video_get_output_frame_info; - configure_pp_input = - atomisp_css_video_configure_pp_input; - pipe_id = CSS_PIPE_ID_VIDEO; - } else { - configure_output = - atomisp_css_capture_configure_output; - get_frame_info = - atomisp_css_capture_get_output_frame_info; - configure_pp_input = - atomisp_css_capture_configure_pp_input; - pipe_id = CSS_PIPE_ID_CAPTURE; - - atomisp_update_capture_mode(asd); - atomisp_css_capture_enable_online(asd, stream_index, false); - } - } - } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { - configure_output = atomisp_css_preview_configure_output; - get_frame_info = atomisp_css_preview_get_output_frame_info; - configure_pp_input = atomisp_css_preview_configure_pp_input; - pipe_id = CSS_PIPE_ID_PREVIEW; - } else { - /* CSS doesn't support low light mode on SOC cameras, so disable - * it. FIXME: if this is done elsewhere, it gives corrupted - * colors into thumbnail image. - */ - if (isp->inputs[asd->input_curr].type == SOC_CAMERA) - asd->params.low_light = false; - - if (format->sh_fmt == CSS_FRAME_FORMAT_RAW) { - atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_RAW); - atomisp_css_enable_dz(asd, false); - } else { - atomisp_update_capture_mode(asd); - } - - if (!asd->continuous_mode->val) - /* in case of ANR, force capture pipe to offline mode */ - atomisp_css_capture_enable_online(asd, stream_index, - asd->params.low_light ? - false : asd->params.online_process); - - configure_output = atomisp_css_capture_configure_output; - get_frame_info = atomisp_css_capture_get_output_frame_info; - configure_pp_input = atomisp_css_capture_configure_pp_input; - pipe_id = CSS_PIPE_ID_CAPTURE; - - if (!asd->params.online_process && - !asd->continuous_mode->val) { - ret = atomisp_css_capture_get_output_raw_frame_info(asd, - raw_output_info); - if (ret) - return ret; - } - if (!asd->continuous_mode->val && asd->run_mode->val - != ATOMISP_RUN_MODE_STILL_CAPTURE) { - dev_err(isp->dev, - "Need to set the running mode first\n"); - asd->run_mode->val = ATOMISP_RUN_MODE_STILL_CAPTURE; - } - } - - /* - * to SOC camera, use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - pipe_id = CSS_PIPE_ID_YUVPP; - - if (asd->copy_mode) - ret = atomisp_css_copy_configure_output(asd, stream_index, - pix->width, pix->height, - format->planar ? pix->bytesperline : - pix->bytesperline * 8 / format->depth, - format->sh_fmt); - else - ret = configure_output(asd, pix->width, pix->height, - format->planar ? pix->bytesperline : - pix->bytesperline * 8 / format->depth, - format->sh_fmt); - if (ret) { - dev_err(isp->dev, "configure_output %ux%u, format %8.8x\n", - pix->width, pix->height, format->sh_fmt); - return -EINVAL; - } - - if (asd->continuous_mode->val && - (configure_pp_input == atomisp_css_preview_configure_pp_input || - configure_pp_input == atomisp_css_video_configure_pp_input)) { - /* for isp 2.2, configure pp input is available for continuous - * mode */ - ret = configure_pp_input(asd, isp_sink_crop->width, - isp_sink_crop->height); - if (ret) { - dev_err(isp->dev, "configure_pp_input %ux%u\n", - isp_sink_crop->width, - isp_sink_crop->height); - return -EINVAL; - } - } else { - ret = configure_pp_input(asd, isp_sink_crop->width, - isp_sink_crop->height); - if (ret) { - dev_err(isp->dev, "configure_pp_input %ux%u\n", - isp_sink_crop->width, isp_sink_crop->height); - return -EINVAL; - } - } - if (asd->copy_mode) - ret = atomisp_css_copy_get_output_frame_info(asd, stream_index, - output_info); - else - ret = get_frame_info(asd, output_info); - if (ret) { - dev_err(isp->dev, "get_frame_info %ux%u (padded to %u)\n", - pix->width, pix->height, pix->bytesperline); - return -EINVAL; - } - - atomisp_update_grid_info(asd, pipe_id, source_pad); - - /* Free the raw_dump buffer first */ - atomisp_css_frame_free(asd->raw_output_frame); - asd->raw_output_frame = NULL; - - if (!asd->continuous_mode->val && - !asd->params.online_process && !isp->sw_contex.file_input && - atomisp_css_frame_allocate_from_info(&asd->raw_output_frame, - raw_output_info)) - return -ENOMEM; - - return 0; -} - -static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int *dvs_env_w, unsigned int *dvs_env_h) -{ - struct atomisp_device *isp = asd->isp; - - /* if subdev type is SOC camera,we do not need to set DVS */ - if (isp->inputs[asd->input_curr].type == SOC_CAMERA) - asd->params.video_dis_en = false; - - if (asd->params.video_dis_en && - asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - /* envelope is 20% of the output resolution */ - /* - * dvs envelope cannot be round up. - * it would cause ISP timeout and color switch issue - */ - *dvs_env_w = rounddown(width / 5, ATOM_ISP_STEP_WIDTH); - *dvs_env_h = rounddown(height / 5, ATOM_ISP_STEP_HEIGHT); - } - - asd->params.dis_proj_data_valid = false; - asd->params.css_update_params_needed = true; -} - -static void atomisp_check_copy_mode(struct atomisp_sub_device *asd, - int source_pad, struct v4l2_format *f) -{ -#if defined(ISP2401_NEW_INPUT_SYSTEM) - struct v4l2_mbus_framefmt *sink, *src; - - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK); - src = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, source_pad); - - if ((sink->code == src->code && - sink->width == f->fmt.pix.width && - sink->height == f->fmt.pix.height) || - ((asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) && - (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1))) - asd->copy_mode = true; - else -#endif - /* Only used for the new input system */ - asd->copy_mode = false; - - dev_dbg(asd->isp->dev, "copy_mode: %d\n", asd->copy_mode); - -} - -static int atomisp_set_fmt_to_snr(struct video_device *vdev, - struct v4l2_format *f, unsigned int pixelformat, - unsigned int padding_w, unsigned int padding_h, - unsigned int dvs_env_w, unsigned int dvs_env_h) -{ - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - const struct atomisp_format_bridge *format; - struct v4l2_subdev_pad_config pad_cfg; - struct v4l2_subdev_format vformat = { - .which = V4L2_SUBDEV_FORMAT_TRY, - }; - struct v4l2_mbus_framefmt *ffmt = &vformat.format; - struct v4l2_mbus_framefmt *req_ffmt; - struct atomisp_device *isp = asd->isp; - struct atomisp_input_stream_info *stream_info = - (struct atomisp_input_stream_info *)ffmt->reserved; - uint16_t stream_index = ATOMISP_INPUT_STREAM_GENERAL; - int source_pad = atomisp_subdev_source_pad(vdev); - struct v4l2_subdev_fh fh; - int ret; - - v4l2_fh_init(&fh.vfh, vdev); - - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - - format = atomisp_get_format_bridge(pixelformat); - if (format == NULL) - return -EINVAL; - - v4l2_fill_mbus_format(ffmt, &f->fmt.pix, format->mbus_code); - ffmt->height += padding_h + dvs_env_h; - ffmt->width += padding_w + dvs_env_w; - - dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n", - ffmt->width, ffmt->height, padding_w, padding_h, - dvs_env_w, dvs_env_h); - - __atomisp_init_stream_info(stream_index, stream_info); - - req_ffmt = ffmt; - - /* Disable dvs if resolution can't be supported by sensor */ - if (asd->params.video_dis_en && - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { - vformat.which = V4L2_SUBDEV_FORMAT_TRY; - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - pad, set_fmt, &pad_cfg, &vformat); - if (ret) - return ret; - if (ffmt->width < req_ffmt->width || - ffmt->height < req_ffmt->height) { - req_ffmt->height -= dvs_env_h; - req_ffmt->width -= dvs_env_w; - ffmt = req_ffmt; - dev_warn(isp->dev, - "can not enable video dis due to sensor limitation."); - asd->params.video_dis_en = false; - } - } - dev_dbg(isp->dev, "sensor width: %d, height: %d\n", - ffmt->width, ffmt->height); - vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad, - set_fmt, NULL, &vformat); - if (ret) - return ret; - - __atomisp_update_stream_env(asd, stream_index, stream_info); - - dev_dbg(isp->dev, "sensor width: %d, height: %d\n", - ffmt->width, ffmt->height); - - if (ffmt->width < ATOM_ISP_STEP_WIDTH || - ffmt->height < ATOM_ISP_STEP_HEIGHT) - return -EINVAL; - - if (asd->params.video_dis_en && - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO && - (ffmt->width < req_ffmt->width || ffmt->height < req_ffmt->height)) { - dev_warn(isp->dev, - "can not enable video dis due to sensor limitation."); - asd->params.video_dis_en = false; - } - - atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, ffmt); - - return css_input_resolution_changed(asd, ffmt); -} - -int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) -{ - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - const struct atomisp_format_bridge *format_bridge; - const struct atomisp_format_bridge *snr_format_bridge; - struct atomisp_css_frame_info output_info, raw_output_info; - struct v4l2_format snr_fmt = *f; - struct v4l2_format backup_fmt = *f, s_fmt = *f; - unsigned int dvs_env_w = 0, dvs_env_h = 0; - unsigned int padding_w = pad_w, padding_h = pad_h; - bool res_overflow = false, crop_needs_override = false; - struct v4l2_mbus_framefmt isp_sink_fmt; - struct v4l2_mbus_framefmt isp_source_fmt = {0}; - struct v4l2_rect isp_sink_crop; - uint16_t source_pad = atomisp_subdev_source_pad(vdev); - struct v4l2_subdev_fh fh; - int ret; - - dev_dbg(isp->dev, - "setting resolution %ux%u on pad %u for asd%d, bytesperline %u\n", - f->fmt.pix.width, f->fmt.pix.height, source_pad, - asd->index, f->fmt.pix.bytesperline); - - if (source_pad >= ATOMISP_SUBDEV_PADS_NUM) - return -EINVAL; - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - dev_warn(isp->dev, "ISP does not support set format while at streaming!\n"); - return -EBUSY; - } - - v4l2_fh_init(&fh.vfh, vdev); - - format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); - if (format_bridge == NULL) - return -EINVAL; - - pipe->sh_fmt = format_bridge->sh_fmt; - pipe->pix.pixelformat = f->fmt.pix.pixelformat; - - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VF || - (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW - && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)) { - if (asd->fmt_auto->val) { - struct v4l2_rect *capture_comp; - struct v4l2_rect r = {0}; - - r.width = f->fmt.pix.width; - r.height = f->fmt.pix.height; - - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) - capture_comp = atomisp_subdev_get_rect( - &asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO, - V4L2_SEL_TGT_COMPOSE); - else - capture_comp = atomisp_subdev_get_rect( - &asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE, - V4L2_SEL_TGT_COMPOSE); - - if (capture_comp->width < r.width - || capture_comp->height < r.height) { - r.width = capture_comp->width; - r.height = capture_comp->height; - } - - atomisp_subdev_set_selection( - &asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, source_pad, - V4L2_SEL_TGT_COMPOSE, 0, &r); - - f->fmt.pix.width = r.width; - f->fmt.pix.height = r.height; - } - - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) && - (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1)) { - /* For M10MO outputing YUV preview images. */ - uint16_t video_index = - atomisp_source_pad_to_stream_id(asd, - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO); - - ret = atomisp_css_copy_get_output_frame_info(asd, - video_index, &output_info); - if (ret) { - dev_err(isp->dev, - "copy_get_output_frame_info ret %i", ret); - return -EINVAL; - } - if (!asd->yuvpp_mode) { - /* - * If viewfinder was configured into copy_mode, - * we switch to using yuvpp pipe instead. - */ - asd->yuvpp_mode = true; - ret = atomisp_css_copy_configure_output( - asd, video_index, 0, 0, 0, 0); - if (ret) { - dev_err(isp->dev, - "failed to disable copy pipe"); - return -EINVAL; - } - ret = atomisp_css_yuvpp_configure_output( - asd, video_index, - output_info.res.width, - output_info.res.height, - output_info.padded_width, - output_info.format); - if (ret) { - dev_err(isp->dev, - "failed to set up yuvpp pipe\n"); - return -EINVAL; - } - atomisp_css_video_enable_online(asd, false); - atomisp_css_preview_enable_online(asd, - ATOMISP_INPUT_STREAM_GENERAL, false); - } - atomisp_css_yuvpp_configure_viewfinder(asd, video_index, - f->fmt.pix.width, f->fmt.pix.height, - format_bridge->planar ? f->fmt.pix.bytesperline - : f->fmt.pix.bytesperline * 8 - / format_bridge->depth, format_bridge->sh_fmt); - atomisp_css_yuvpp_get_viewfinder_frame_info( - asd, video_index, &output_info); - } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { - atomisp_css_video_configure_viewfinder(asd, - f->fmt.pix.width, f->fmt.pix.height, - format_bridge->planar ? f->fmt.pix.bytesperline - : f->fmt.pix.bytesperline * 8 - / format_bridge->depth, format_bridge->sh_fmt); - atomisp_css_video_get_viewfinder_frame_info(asd, - &output_info); - asd->copy_mode = false; - } else { - atomisp_css_capture_configure_viewfinder(asd, - f->fmt.pix.width, f->fmt.pix.height, - format_bridge->planar ? f->fmt.pix.bytesperline - : f->fmt.pix.bytesperline * 8 - / format_bridge->depth, format_bridge->sh_fmt); - atomisp_css_capture_get_viewfinder_frame_info(asd, - &output_info); - asd->copy_mode = false; - } - - goto done; - } - /* - * Check whether main resolution configured smaller - * than snapshot resolution. If so, force main resolution - * to be the same as snapshot resolution - */ - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { - struct v4l2_rect *r; - - r = atomisp_subdev_get_rect( - &asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SOURCE_VF, V4L2_SEL_TGT_COMPOSE); - - if (r->width && r->height - && (r->width > f->fmt.pix.width - || r->height > f->fmt.pix.height)) - dev_warn(isp->dev, - "Main Resolution config smaller then Vf Resolution. Force to be equal with Vf Resolution."); - } - - /* Pipeline configuration done through subdevs. Bail out now. */ - if (!asd->fmt_auto->val) - goto set_fmt_to_isp; - - /* get sensor resolution and format */ - ret = atomisp_try_fmt(vdev, &snr_fmt, &res_overflow); - if (ret) - return ret; - f->fmt.pix.width = snr_fmt.fmt.pix.width; - f->fmt.pix.height = snr_fmt.fmt.pix.height; - - snr_format_bridge = - atomisp_get_format_bridge(snr_fmt.fmt.pix.pixelformat); - if (!snr_format_bridge) - return -EINVAL; - - atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK)->code = - snr_format_bridge->mbus_code; - - isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - isp_source_fmt.code = format_bridge->mbus_code; - atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - source_pad, &isp_source_fmt); - - if (!atomisp_subdev_format_conversion(asd, source_pad)) { - padding_w = 0; - padding_h = 0; - } else if (IS_BYT) { - padding_w = 12; - padding_h = 12; - } - - /* construct resolution supported by isp */ - if (res_overflow && !asd->continuous_mode->val) { - f->fmt.pix.width = rounddown( - clamp_t(u32, f->fmt.pix.width - padding_w, - ATOM_ISP_MIN_WIDTH, - ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH); - f->fmt.pix.height = rounddown( - clamp_t(u32, f->fmt.pix.height - padding_h, - ATOM_ISP_MIN_HEIGHT, - ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT); - } - - atomisp_get_dis_envelop(asd, f->fmt.pix.width, f->fmt.pix.height, - &dvs_env_w, &dvs_env_h); - - if (asd->continuous_mode->val) { - struct v4l2_rect *r; - - r = atomisp_subdev_get_rect( - &asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE, - V4L2_SEL_TGT_COMPOSE); - /* - * The ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE should get resolutions - * properly set otherwise, it should not be the capture_pad. - */ - if (r->width && r->height) - asd->capture_pad = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE; - else - asd->capture_pad = source_pad; - } else { - asd->capture_pad = source_pad; - } - /* - * set format info to sensor - * In continuous mode, resolution is set only if it is higher than - * existing value. This because preview pipe will be configured after - * capture pipe and usually has lower resolution than capture pipe. - */ - if (!asd->continuous_mode->val || - isp_sink_fmt.width < (f->fmt.pix.width + padding_w + dvs_env_w) || - isp_sink_fmt.height < (f->fmt.pix.height + padding_h + - dvs_env_h)) { - /* - * For jpeg or custom raw format the sensor will return constant - * width and height. Because we already had quried try_mbus_fmt, - * f->fmt.pix.width and f->fmt.pix.height has been changed to - * this fixed width and height. So we cannot select the correct - * resolution with that information. So use the original width - * and height while set_mbus_fmt() so actual resolutions are - * being used in while set media bus format. - */ - s_fmt = *f; - if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG || - f->fmt.pix.pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) { - s_fmt.fmt.pix.width = backup_fmt.fmt.pix.width; - s_fmt.fmt.pix.height = backup_fmt.fmt.pix.height; - } - ret = atomisp_set_fmt_to_snr(vdev, &s_fmt, - f->fmt.pix.pixelformat, padding_w, - padding_h, dvs_env_w, dvs_env_h); - if (ret) - return -EINVAL; - - atomisp_csi_lane_config(isp); - crop_needs_override = true; - } - - atomisp_check_copy_mode(asd, source_pad, &backup_fmt); - asd->yuvpp_mode = false; /* Reset variable */ - - isp_sink_crop = *atomisp_subdev_get_rect(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, - V4L2_SEL_TGT_CROP); - - /* Try to enable YUV downscaling if ISP input is 10 % (either - * width or height) bigger than the desired result. */ - if (isp_sink_crop.width * 9 / 10 < f->fmt.pix.width || - isp_sink_crop.height * 9 / 10 < f->fmt.pix.height || - (atomisp_subdev_format_conversion(asd, source_pad) && - ((asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - !asd->continuous_mode->val) || - asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER))) { - /* for continuous mode, preview size might be smaller than - * still capture size. if preview size still needs crop, - * pick the larger one between crop size of preview and - * still capture. - */ - if (asd->continuous_mode->val - && source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW - && !crop_needs_override) { - isp_sink_crop.width = - max_t(unsigned int, f->fmt.pix.width, - isp_sink_crop.width); - isp_sink_crop.height = - max_t(unsigned int, f->fmt.pix.height, - isp_sink_crop.height); - } else { - isp_sink_crop.width = f->fmt.pix.width; - isp_sink_crop.height = f->fmt.pix.height; - } - - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, - V4L2_SEL_TGT_CROP, - V4L2_SEL_FLAG_KEEP_CONFIG, - &isp_sink_crop); - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - source_pad, V4L2_SEL_TGT_COMPOSE, - 0, &isp_sink_crop); - } else if (IS_MOFD) { - struct v4l2_rect main_compose = {0}; - - main_compose.width = isp_sink_crop.width; - main_compose.height = - DIV_ROUND_UP(main_compose.width * f->fmt.pix.height, - f->fmt.pix.width); - if (main_compose.height > isp_sink_crop.height) { - main_compose.height = isp_sink_crop.height; - main_compose.width = - DIV_ROUND_UP(main_compose.height * - f->fmt.pix.width, - f->fmt.pix.height); - } - - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - source_pad, - V4L2_SEL_TGT_COMPOSE, 0, - &main_compose); - } else { - struct v4l2_rect sink_crop = {0}; - struct v4l2_rect main_compose = {0}; - - main_compose.width = f->fmt.pix.width; - main_compose.height = f->fmt.pix.height; - -#ifndef ISP2401 - /* WORKAROUND: this override is universally enabled in - * GMIN to work around a CTS failures (GMINL-539) - * which appears to be related by a hardware - * performance limitation. It's unclear why this - * particular code triggers the issue. */ - if (1 || - crop_needs_override) { -#else - if (crop_needs_override) { -#endif - if (isp_sink_crop.width * main_compose.height > - isp_sink_crop.height * main_compose.width) { - sink_crop.height = isp_sink_crop.height; - sink_crop.width = DIV_NEAREST_STEP( - sink_crop.height * - f->fmt.pix.width, - f->fmt.pix.height, - ATOM_ISP_STEP_WIDTH); - } else { - sink_crop.width = isp_sink_crop.width; - sink_crop.height = DIV_NEAREST_STEP( - sink_crop.width * - f->fmt.pix.height, - f->fmt.pix.width, - ATOM_ISP_STEP_HEIGHT); - } - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, - V4L2_SEL_TGT_CROP, - V4L2_SEL_FLAG_KEEP_CONFIG, - &sink_crop); - } - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - source_pad, - V4L2_SEL_TGT_COMPOSE, 0, - &main_compose); - } - -set_fmt_to_isp: - ret = atomisp_set_fmt_to_isp(vdev, &output_info, &raw_output_info, - &f->fmt.pix, source_pad); - if (ret) - return -EINVAL; -done: - pipe->pix.width = f->fmt.pix.width; - pipe->pix.height = f->fmt.pix.height; - pipe->pix.pixelformat = f->fmt.pix.pixelformat; - if (format_bridge->planar) { - pipe->pix.bytesperline = output_info.padded_width; - pipe->pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * - DIV_ROUND_UP(format_bridge->depth * - output_info.padded_width, 8)); - } else { - pipe->pix.bytesperline = - DIV_ROUND_UP(format_bridge->depth * - output_info.padded_width, 8); - pipe->pix.sizeimage = - PAGE_ALIGN(f->fmt.pix.height * pipe->pix.bytesperline); - - } - if (f->fmt.pix.field == V4L2_FIELD_ANY) - f->fmt.pix.field = V4L2_FIELD_NONE; - pipe->pix.field = f->fmt.pix.field; - - f->fmt.pix = pipe->pix; - f->fmt.pix.priv = PAGE_ALIGN(pipe->pix.width * - pipe->pix.height * 2); - - pipe->capq.field = f->fmt.pix.field; - - /* - * If in video 480P case, no GFX throttle - */ - if (asd->run_mode->val == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO && - f->fmt.pix.width == 720 && f->fmt.pix.height == 480) - isp->need_gfx_throttle = false; - else - isp->need_gfx_throttle = true; - - return 0; -} - -int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f) -{ - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct v4l2_mbus_framefmt ffmt = {0}; - const struct atomisp_format_bridge *format_bridge; - struct v4l2_subdev_fh fh; - int ret; - - v4l2_fh_init(&fh.vfh, vdev); - - dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n", - f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat); - ret = atomisp_try_fmt_file(isp, f); - if (ret) { - dev_err(isp->dev, "atomisp_try_fmt_file err: %d\n", ret); - return ret; - } - - format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); - if (format_bridge == NULL) { - dev_dbg(isp->dev, "atomisp_get_format_bridge err! fmt:0x%x\n", - f->fmt.pix.pixelformat); - return -EINVAL; - } - - pipe->pix = f->fmt.pix; - atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_FIFO); - atomisp_css_input_configure_port(asd, - __get_mipi_port(isp, ATOMISP_CAMERA_PORT_PRIMARY), 2, 0xffff4, - 0, 0, 0, 0); - ffmt.width = f->fmt.pix.width; - ffmt.height = f->fmt.pix.height; - ffmt.code = format_bridge->mbus_code; - - atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &ffmt); - - return 0; -} - -int atomisp_set_shading_table(struct atomisp_sub_device *asd, - struct atomisp_shading_table *user_shading_table) -{ - struct atomisp_css_shading_table *shading_table; - struct atomisp_css_shading_table *free_table; - unsigned int len_table; - int i; - int ret = 0; - - if (!user_shading_table) - return -EINVAL; - - if (!user_shading_table->enable) { - atomisp_css_set_shading_table(asd, NULL); - asd->params.sc_en = false; - return 0; - } - - /* If enabling, all tables must be set */ - for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) { - if (!user_shading_table->data[i]) - return -EINVAL; - } - - /* Shading table size per color */ - if (user_shading_table->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR || - user_shading_table->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) - return -EINVAL; - - shading_table = atomisp_css_shading_table_alloc( - user_shading_table->width, user_shading_table->height); - if (!shading_table) - return -ENOMEM; - - len_table = user_shading_table->width * user_shading_table->height * - ATOMISP_SC_TYPE_SIZE; - for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) { - ret = copy_from_user(shading_table->data[i], - (void __user *)user_shading_table->data[i], - len_table); - if (ret) { - free_table = shading_table; - ret = -EFAULT; - goto out; - } - } - shading_table->sensor_width = user_shading_table->sensor_width; - shading_table->sensor_height = user_shading_table->sensor_height; - shading_table->fraction_bits = user_shading_table->fraction_bits; - - free_table = asd->params.css_param.shading_table; - asd->params.css_param.shading_table = shading_table; - atomisp_css_set_shading_table(asd, shading_table); - asd->params.sc_en = true; - -out: - if (free_table != NULL) - atomisp_css_shading_table_free(free_table); - - return ret; -} - -/*Turn off ISP dphy */ -int atomisp_ospm_dphy_down(struct atomisp_device *isp) -{ - unsigned long flags; - u32 reg; - - dev_dbg(isp->dev, "%s\n", __func__); - - /* if ISP timeout, we can force powerdown */ - if (isp->isp_timeout) - goto done; - - if (!atomisp_dev_users(isp)) - goto done; - - spin_lock_irqsave(&isp->lock, flags); - isp->sw_contex.power_state = ATOM_ISP_POWER_DOWN; - spin_unlock_irqrestore(&isp->lock, flags); -done: - /* - * MRFLD IUNIT DPHY is located in an always-power-on island - * MRFLD HW design need all CSI ports are disabled before - * powering down the IUNIT. - */ - pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, ®); - reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK; - pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg); - return 0; -} - -/*Turn on ISP dphy */ -int atomisp_ospm_dphy_up(struct atomisp_device *isp) -{ - unsigned long flags; - dev_dbg(isp->dev, "%s\n", __func__); - - spin_lock_irqsave(&isp->lock, flags); - isp->sw_contex.power_state = ATOM_ISP_POWER_UP; - spin_unlock_irqrestore(&isp->lock, flags); - - return 0; -} - - -int atomisp_exif_makernote(struct atomisp_sub_device *asd, - struct atomisp_makernote_info *config) -{ - struct v4l2_control ctrl; - struct atomisp_device *isp = asd->isp; - - ctrl.id = V4L2_CID_FOCAL_ABSOLUTE; - if (v4l2_g_ctrl - (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) { - dev_warn(isp->dev, "failed to g_ctrl for focal length\n"); - return -EINVAL; - } else { - config->focal_length = ctrl.value; - } - - ctrl.id = V4L2_CID_FNUMBER_ABSOLUTE; - if (v4l2_g_ctrl - (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) { - dev_warn(isp->dev, "failed to g_ctrl for f-number\n"); - return -EINVAL; - } else { - config->f_number_curr = ctrl.value; - } - - ctrl.id = V4L2_CID_FNUMBER_RANGE; - if (v4l2_g_ctrl - (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) { - dev_warn(isp->dev, "failed to g_ctrl for f number range\n"); - return -EINVAL; - } else { - config->f_number_range = ctrl.value; - } - - return 0; -} - -int atomisp_offline_capture_configure(struct atomisp_sub_device *asd, - struct atomisp_cont_capture_conf *cvf_config) -{ - struct v4l2_ctrl *c; - - /* - * In case of M10MO ZSL capture case, we need to issue a separate - * capture request to M10MO which will output captured jpeg image - */ - c = v4l2_ctrl_find( - asd->isp->inputs[asd->input_curr].camera->ctrl_handler, - V4L2_CID_START_ZSL_CAPTURE); - if (c) { - int ret; - dev_dbg(asd->isp->dev, "%s trigger ZSL capture request\n", - __func__); - /* TODO: use the cvf_config */ - ret = v4l2_ctrl_s_ctrl(c, 1); - if (ret) - return ret; - - return v4l2_ctrl_s_ctrl(c, 0); - } - - asd->params.offline_parm = *cvf_config; - - if (asd->params.offline_parm.num_captures) { - if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED) { - unsigned int init_raw_num; - - if (asd->enable_raw_buffer_lock->val) { - init_raw_num = - ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN; - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - asd->params.video_dis_en) - init_raw_num += - ATOMISP_CSS2_NUM_DVS_FRAME_DELAY; - } else { - init_raw_num = - ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES; - } - - /* TODO: this can be removed once user-space - * has been updated to use control API */ - asd->continuous_raw_buffer_size->val = - max_t(int, - asd->continuous_raw_buffer_size->val, - asd->params.offline_parm. - num_captures + init_raw_num); - asd->continuous_raw_buffer_size->val = - min_t(int, ATOMISP_CONT_RAW_FRAMES, - asd->continuous_raw_buffer_size->val); - } - asd->continuous_mode->val = true; - } else { - asd->continuous_mode->val = false; - __enable_continuous_mode(asd, false); - } - - return 0; -} - -/* - * set auto exposure metering window to camera sensor - */ -int atomisp_s_ae_window(struct atomisp_sub_device *asd, - struct atomisp_ae_window *arg) -{ - struct atomisp_device *isp = asd->isp; - /* Coverity CID 298071 - initialzize struct */ - struct v4l2_subdev_selection sel = { 0 }; - - sel.r.left = arg->x_left; - sel.r.top = arg->y_top; - sel.r.width = arg->x_right - arg->x_left + 1; - sel.r.height = arg->y_bottom - arg->y_top + 1; - - if (v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - pad, set_selection, NULL, &sel)) { - dev_err(isp->dev, "failed to call sensor set_selection.\n"); - return -EINVAL; - } - - return 0; -} - -int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames) -{ - struct atomisp_device *isp = asd->isp; - - if (num_frames < 0) { - dev_dbg(isp->dev, "%s ERROR: num_frames: %d\n", __func__, - num_frames); - return -EINVAL; - } - /* a requested flash is still in progress. */ - if (num_frames && asd->params.flash_state != ATOMISP_FLASH_IDLE) { - dev_dbg(isp->dev, "%s flash busy: %d frames left: %d\n", - __func__, asd->params.flash_state, - asd->params.num_flash_frames); - return -EBUSY; - } - - asd->params.num_flash_frames = num_frames; - asd->params.flash_state = ATOMISP_FLASH_REQUESTED; - return 0; -} - -int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd, - uint16_t source_pad) -{ - int stream_id; - struct atomisp_device *isp = asd->isp; - - if (isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num == 1) - return ATOMISP_INPUT_STREAM_GENERAL; - - switch (source_pad) { - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - stream_id = ATOMISP_INPUT_STREAM_CAPTURE; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - stream_id = ATOMISP_INPUT_STREAM_POSTVIEW; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: - stream_id = ATOMISP_INPUT_STREAM_VIDEO; - break; - default: - stream_id = ATOMISP_INPUT_STREAM_GENERAL; - } - - return stream_id; -} - -bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe) -{ - struct atomisp_sub_device *asd = pipe->asd; - - if (pipe == &asd->video_out_vf) - return true; - - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - pipe == &asd->video_out_preview) - return true; - - return false; -} - -static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id) -{ - struct atomisp_device *isp = asd->isp; - - if (!asd->enable_raw_buffer_lock->val) { - dev_warn(isp->dev, "%s Raw Buffer Lock is disable.\n", __func__); - return -EINVAL; - } - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) { - dev_err(isp->dev, "%s streaming %d invalid exp_id %d.\n", - __func__, exp_id, asd->streaming); - return -EINVAL; - } - if ((exp_id > ATOMISP_MAX_EXP_ID) || (exp_id <= 0)) { - dev_err(isp->dev, "%s exp_id %d invalid.\n", __func__, exp_id); - return -EINVAL; - } - return 0; -} - -void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd) -{ - unsigned long flags; - spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags); - memset(asd->raw_buffer_bitmap, 0, sizeof(asd->raw_buffer_bitmap)); - asd->raw_buffer_locked_count = 0; - spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); -} - -int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) -{ - int *bitmap, bit; - unsigned long flags; - - if (__checking_exp_id(asd, exp_id)) - return -EINVAL; - - bitmap = asd->raw_buffer_bitmap + exp_id / 32; - bit = exp_id % 32; - spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags); - (*bitmap) |= (1 << bit); - asd->raw_buffer_locked_count++; - spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); - - dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n", - __func__, exp_id, asd->raw_buffer_locked_count); - - /* Check if the raw buffer after next is still locked!!! */ - exp_id += 2; - if (exp_id > ATOMISP_MAX_EXP_ID) - exp_id -= ATOMISP_MAX_EXP_ID; - bitmap = asd->raw_buffer_bitmap + exp_id / 32; - bit = exp_id % 32; - if ((*bitmap) & (1 << bit)) { - int ret; - - /* WORKAROUND unlock the raw buffer compulsively */ - ret = atomisp_css_exp_id_unlock(asd, exp_id); - if (ret) { - dev_err(asd->isp->dev, "%s exp_id is wrapping back to %d but force unlock failed,, err %d.\n", - __func__, exp_id, ret); - return ret; - } - - spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags); - (*bitmap) &= ~(1 << bit); - asd->raw_buffer_locked_count--; - spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); - dev_warn(asd->isp->dev, "%s exp_id is wrapping back to %d but it is still locked so force unlock it, raw_buffer_locked_count %d\n", - __func__, exp_id, asd->raw_buffer_locked_count); - } - return 0; -} - -static int __is_raw_buffer_locked(struct atomisp_sub_device *asd, int exp_id) -{ - int *bitmap, bit; - unsigned long flags; - int ret; - - if (__checking_exp_id(asd, exp_id)) - return -EINVAL; - - bitmap = asd->raw_buffer_bitmap + exp_id / 32; - bit = exp_id % 32; - spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags); - ret = ((*bitmap) & (1 << bit)); - spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); - return !ret; -} - -static int __clear_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) -{ - int *bitmap, bit; - unsigned long flags; - - if (__is_raw_buffer_locked(asd, exp_id)) - return -EINVAL; - - bitmap = asd->raw_buffer_bitmap + exp_id / 32; - bit = exp_id % 32; - spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags); - (*bitmap) &= ~(1 << bit); - asd->raw_buffer_locked_count--; - spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); - - dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n", - __func__, exp_id, asd->raw_buffer_locked_count); - return 0; -} - -int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id) -{ - struct atomisp_device *isp = asd->isp; - int value = *exp_id; - int ret; - - ret = __is_raw_buffer_locked(asd, value); - if (ret) { - dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret); - return -EINVAL; - } - - dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value); - ret = atomisp_css_exp_id_capture(asd, value); - if (ret) { - dev_err(isp->dev, "%s exp_id %d failed.\n", __func__, value); - return -EIO; - } - return 0; -} - -int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id) -{ - struct atomisp_device *isp = asd->isp; - int value = *exp_id; - int ret; - - ret = __clear_raw_buffer_bitmap(asd, value); - if (ret) { - dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret); - return -EINVAL; - } - - dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value); - ret = atomisp_css_exp_id_unlock(asd, value); - if (ret) - dev_err(isp->dev, "%s exp_id %d failed, err %d.\n", - __func__, value, ret); - - return ret; -} - -int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd, - unsigned int *enable) -{ - bool value; - - if (enable == NULL) - return -EINVAL; - - value = *enable > 0 ? true : false; - - atomisp_en_dz_capt_pipe(asd, value); - - return 0; -} - -int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event) -{ - if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return -EINVAL; - - dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n", - __func__, *event); - - switch (*event) { - case V4L2_EVENT_FRAME_SYNC: - atomisp_sof_event(asd); - break; - case V4L2_EVENT_FRAME_END: - atomisp_eof_event(asd, 0); - break; - case V4L2_EVENT_ATOMISP_3A_STATS_READY: - atomisp_3a_stats_ready_event(asd, 0); - break; - case V4L2_EVENT_ATOMISP_METADATA_READY: - atomisp_metadata_ready_event(asd, 0); - break; - default: - return -EINVAL; - } - - return 0; -} - -static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe) -{ - struct atomisp_sub_device *asd = pipe->asd; - - if (ATOMISP_USE_YUVPP(asd)) - return CSS_PIPE_ID_YUVPP; - else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) - return CSS_PIPE_ID_VIDEO; - else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) - return CSS_PIPE_ID_CAPTURE; - else if (pipe == &asd->video_out_video_capture) - return CSS_PIPE_ID_VIDEO; - else if (pipe == &asd->video_out_vf) - return CSS_PIPE_ID_CAPTURE; - else if (pipe == &asd->video_out_preview) { - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - return CSS_PIPE_ID_VIDEO; - else - return CSS_PIPE_ID_PREVIEW; - } else if (pipe == &asd->video_out_capture) { - if (asd->copy_mode) - return IA_CSS_PIPE_ID_COPY; - else - return CSS_PIPE_ID_CAPTURE; - } - - /* fail through */ - dev_warn(asd->isp->dev, "%s failed to find proper pipe\n", - __func__); - return CSS_PIPE_ID_CAPTURE; -} - -int atomisp_get_invalid_frame_num(struct video_device *vdev, - int *invalid_frame_num) -{ - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - enum atomisp_css_pipe_id pipe_id; - struct ia_css_pipe_info p_info; - int ret; - - if (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - /* External ISP */ - *invalid_frame_num = 0; - return 0; - } - - pipe_id = atomisp_get_pipe_id(pipe); - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) { - dev_warn(asd->isp->dev, "%s pipe %d has not been created yet, do SET_FMT first!\n", - __func__, pipe_id); - return -EINVAL; - } - - ret = ia_css_pipe_get_info( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipes[pipe_id], &p_info); - if (ret == IA_CSS_SUCCESS) { - *invalid_frame_num = p_info.num_invalid_frames; - return 0; - } else { - dev_warn(asd->isp->dev, "%s get pipe infor failed %d\n", - __func__, ret); - return -EINVAL; - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h deleted file mode 100644 index 79d493dba403..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h +++ /dev/null @@ -1,446 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_CMD_H__ -#define __ATOMISP_CMD_H__ - -#include "../../include/linux/atomisp.h" -#include -#include - -#include - -#include "atomisp_internal.h" - -#include "ia_css_types.h" -#include "ia_css.h" - -struct atomisp_device; -struct atomisp_css_frame; - -#define MSI_ENABLE_BIT 16 -#define INTR_DISABLE_BIT 10 -#define BUS_MASTER_ENABLE 2 -#define MEMORY_SPACE_ENABLE 1 -#define INTR_IER 24 -#define INTR_IIR 16 -#ifdef ISP2401 -#define RUNMODE_MASK (ATOMISP_RUN_MODE_VIDEO | ATOMISP_RUN_MODE_STILL_CAPTURE \ - | ATOMISP_RUN_MODE_PREVIEW) - -/* FIXME: check if can go */ -extern int atomisp_punit_hpll_freq; -#endif - -/* - * Helper function - */ -void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, - unsigned int size); -struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd); -struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev); -struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev); -int atomisp_reset(struct atomisp_device *isp); -void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd); -void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd); -#ifndef ISP2401 -bool atomisp_buffers_queued(struct atomisp_sub_device *asd); -#else -bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe); -#endif - -/* TODO:should be here instead of atomisp_helper.h -extern void __iomem *atomisp_io_base; - -static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address) -{ - void __iomem *ret = atomisp_io_base + (address & 0x003FFFFF); - return ret; -} -*/ - -/* - * Interrupt functions - */ -void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev); -void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev); -void atomisp_wdt_work(struct work_struct *work); -void atomisp_wdt(struct timer_list *t); -void atomisp_setup_flash(struct atomisp_sub_device *asd); -irqreturn_t atomisp_isr(int irq, void *dev); -irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr); -const struct atomisp_format_bridge *get_atomisp_format_bridge_from_mbus( - u32 mbus_code); -bool atomisp_is_mbuscode_raw(uint32_t code); -int atomisp_get_frame_pgnr(struct atomisp_device *isp, - const struct atomisp_css_frame *frame, u32 *p_pgnr); -void atomisp_delayed_init_work(struct work_struct *work); - -/* - * Get internal fmt according to V4L2 fmt - */ - -bool atomisp_is_viewfinder_support(struct atomisp_device *isp); - -/* - * ISP features control function - */ - -/* -#ifdef ISP2401 - * Function to set sensor runmode by user when - * ATOMISP_IOC_S_SENSOR_RUNMODE ioctl was called - */ -int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd, - struct atomisp_s_runmode *runmode); -/* -#endif - * Function to enable/disable lens geometry distortion correction (GDC) and - * chromatic aberration correction (CAC) - */ -int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function to enable/disable low light mode (including ANR) - */ -int atomisp_low_light(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function to enable/disable extra noise reduction (XNR) in low light - * condition - */ -int atomisp_xnr(struct atomisp_sub_device *asd, int flag, int *arg); - -int atomisp_formats(struct atomisp_sub_device *asd, int flag, - struct atomisp_formats_config *config); - -/* - * Function to configure noise reduction - */ -int atomisp_nr(struct atomisp_sub_device *asd, int flag, - struct atomisp_nr_config *config); - -/* - * Function to configure temporal noise reduction (TNR) - */ -int atomisp_tnr(struct atomisp_sub_device *asd, int flag, - struct atomisp_tnr_config *config); - -/* - * Function to configure black level compensation - */ -int atomisp_black_level(struct atomisp_sub_device *asd, int flag, - struct atomisp_ob_config *config); - -/* - * Function to configure edge enhancement - */ -int atomisp_ee(struct atomisp_sub_device *asd, int flag, - struct atomisp_ee_config *config); - -/* - * Function to update Gamma table for gamma, brightness and contrast config - */ -int atomisp_gamma(struct atomisp_sub_device *asd, int flag, - struct atomisp_gamma_table *config); -/* - * Function to update Ctc table for Chroma Enhancement - */ -int atomisp_ctc(struct atomisp_sub_device *asd, int flag, - struct atomisp_ctc_table *config); - -/* - * Function to update gamma correction parameters - */ -int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag, - struct atomisp_gc_config *config); - -/* - * Function to update Gdc table for gdc - */ -int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag, - struct atomisp_morph_table *config); - -/* - * Function to update table for macc - */ -int atomisp_macc_table(struct atomisp_sub_device *asd, int flag, - struct atomisp_macc_config *config); -/* - * Function to get DIS statistics. - */ -int atomisp_get_dis_stat(struct atomisp_sub_device *asd, - struct atomisp_dis_statistics *stats); - -/* - * Function to get DVS2 BQ resolution settings - */ -int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd, - struct atomisp_dvs2_bq_resolutions *bq_res); - -/* - * Function to set the DIS coefficients. - */ -int atomisp_set_dis_coefs(struct atomisp_sub_device *asd, - struct atomisp_dis_coefficients *coefs); - -/* - * Function to set the DIS motion vector. - */ -int atomisp_set_dis_vector(struct atomisp_sub_device *asd, - struct atomisp_dis_vector *vector); - -/* - * Function to set/get 3A stat from isp - */ -int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag, - struct atomisp_3a_statistics *config); - -/* - * Function to get metadata from isp - */ -int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag, - struct atomisp_metadata *config); - -int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag, - struct atomisp_metadata_with_type *config); - -int atomisp_set_parameters(struct video_device *vdev, - struct atomisp_parameters *arg); -/* - * Function to set/get isp parameters to isp - */ -int atomisp_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_parm *config); - -/* - * Function to configure color effect of the image - */ -int atomisp_color_effect(struct atomisp_sub_device *asd, int flag, - __s32 *effect); - -/* - * Function to configure bad pixel correction - */ -int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function to configure bad pixel correction params - */ -int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_dp_config *config); - -/* - * Function to enable/disable video image stablization - */ -int atomisp_video_stable(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function to configure fixed pattern noise - */ -int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function to configure fixed pattern noise table - */ -int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd, - struct v4l2_framebuffer *config); - -/* - * Function to configure false color correction - */ -int atomisp_false_color(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function to configure false color correction params - */ -int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_de_config *config); - -/* - * Function to configure white balance params - */ -int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_wb_config *config); - -int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag, - struct atomisp_3a_config *config); - -/* - * Function to setup digital zoom - */ -int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag, - __s32 *value); - -/* - * Function set camera_prefiles.xml current sensor pixel array size - */ -int atomisp_set_array_res(struct atomisp_sub_device *asd, - struct atomisp_resolution *config); - -/* - * Function to calculate real zoom region for every pipe - */ -int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd, - struct atomisp_css_dz_config *dz_config, - enum atomisp_css_pipe_id css_pipe_id); - -int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd, - struct atomisp_parameters *arg, - struct atomisp_css_params *css_param, - bool from_user); - -int atomisp_cp_lsc_table(struct atomisp_sub_device *asd, - struct atomisp_shading_table *source_st, - struct atomisp_css_params *css_param, - bool from_user); - -int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd, - struct ia_css_dvs2_coefficients *coefs, - struct atomisp_css_params *css_param, - bool from_user); - -int atomisp_cp_morph_table(struct atomisp_sub_device *asd, - struct atomisp_morph_table *source_morph_table, - struct atomisp_css_params *css_param, - bool from_user); - -int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd, - struct atomisp_dvs_6axis_config *user_6axis_config, - struct atomisp_css_params *css_param, - bool from_user); - -int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd, - struct atomisp_parameters *arg, - struct atomisp_css_params *css_param); - -int atomisp_compare_grid(struct atomisp_sub_device *asd, - struct atomisp_grid_info *atomgrid); - -int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd, - struct atomisp_sensor_mode_data *config); - -int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f); - - -/* This function looks up the closest available resolution. */ -int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f, - bool *res_overflow); - -int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f); -int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f); - -int atomisp_set_shading_table(struct atomisp_sub_device *asd, - struct atomisp_shading_table *shading_table); - -int atomisp_offline_capture_configure(struct atomisp_sub_device *asd, - struct atomisp_cont_capture_conf *cvf_config); - -int atomisp_ospm_dphy_down(struct atomisp_device *isp); -int atomisp_ospm_dphy_up(struct atomisp_device *isp); -int atomisp_exif_makernote(struct atomisp_sub_device *asd, - struct atomisp_makernote_info *config); - -void atomisp_free_internal_buffers(struct atomisp_sub_device *asd); - -int atomisp_s_ae_window(struct atomisp_sub_device *asd, - struct atomisp_ae_window *arg); - -int atomisp_flash_enable(struct atomisp_sub_device *asd, - int num_frames); - -int atomisp_freq_scaling(struct atomisp_device *vdev, - enum atomisp_dfs_mode mode, - bool force); - -void atomisp_buf_done(struct atomisp_sub_device *asd, int error, - enum atomisp_css_buffer_type buf_type, - enum atomisp_css_pipe_id css_pipe_id, - bool q_buffers, enum atomisp_input_stream_id stream_id); - -void atomisp_css_flush(struct atomisp_device *isp); -int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd, - uint16_t source_pad); - -/* - * Events. Only one event has to be exported for now. - */ -void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id); - -enum mipi_port_id __get_mipi_port(struct atomisp_device *isp, - enum atomisp_camera_port port); - -bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe); - -void atomisp_apply_css_parameters( - struct atomisp_sub_device *asd, - struct atomisp_css_params *css_param); -void atomisp_free_css_parameters(struct atomisp_css_params *css_param); - -void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe); - -void atomisp_flush_params_queue(struct atomisp_video_pipe *asd); -/* - * Function to do Raw Buffer related operation, after enable Lock Unlock Raw Buffer - */ -int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id); -int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id); - -/* - * Function to update Raw Buffer bitmap - */ -int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id); -void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd); - -/* - * Function to enable/disable zoom for capture pipe - */ -int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd, - unsigned int *enable); - -/* - * Function to get metadata type bu pipe id - */ -enum atomisp_metadata_type -atomisp_get_metadata_type(struct atomisp_sub_device *asd, - enum ia_css_pipe_id pipe_id); - -/* - * Function for HAL to inject a fake event to wake up poll thread - */ -int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event); - -/* - * Function for HAL to query how many invalid frames at the beginning of ISP - * pipeline output - */ -int atomisp_get_invalid_frame_num(struct video_device *vdev, - int *invalid_frame_num); - -int atomisp_mrfld_power_up(struct atomisp_device *isp); -int atomisp_mrfld_power_down(struct atomisp_device *isp); -int atomisp_runtime_suspend(struct device *dev); -int atomisp_runtime_resume(struct device *dev); -#endif /* __ATOMISP_CMD_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h deleted file mode 100644 index 2558193045a6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_COMMON_H__ -#define __ATOMISP_COMMON_H__ - -#include "../../include/linux/atomisp.h" - -#include - -#include - -#include "atomisp_compat.h" - -#include "ia_css.h" - -extern int dbg_level; -extern int dbg_func; -extern int mipicsi_flag; -extern int pad_w; -extern int pad_h; - -#define CSS_DTRACE_VERBOSITY_LEVEL 5 /* Controls trace verbosity */ -#define CSS_DTRACE_VERBOSITY_TIMEOUT 9 /* Verbosity on ISP timeout */ -#define MRFLD_MAX_ZOOM_FACTOR 1024 -#ifdef ISP2401 -#define ATOMISP_CSS_ISP_PIPE_VERSION_2_2 0 -#define ATOMISP_CSS_ISP_PIPE_VERSION_2_7 1 -#endif - -#define IS_ISP2401(isp) \ - (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) \ - >= (ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT)) - -struct atomisp_format_bridge { - unsigned int pixelformat; - unsigned int depth; - u32 mbus_code; - enum atomisp_css_frame_format sh_fmt; - unsigned char description[32]; /* the same as struct v4l2_fmtdesc */ - bool planar; -}; - -struct atomisp_fmt { - u32 pixelformat; - u32 depth; - u32 bytesperline; - u32 framesize; - u32 imagesize; - u32 width; - u32 height; - u32 bayer_order; -}; - -struct atomisp_buffer { - struct videobuf_buffer vb; -}; - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h deleted file mode 100644 index aac0eccee798..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h +++ /dev/null @@ -1,662 +0,0 @@ -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_COMPAT_H__ -#define __ATOMISP_COMPAT_H__ - -#include "atomisp_compat_css20.h" - -#include "../../include/linux/atomisp.h" -#include - -#define CSS_RX_IRQ_INFO_BUFFER_OVERRUN \ - CSS_ID(CSS_RX_IRQ_INFO_BUFFER_OVERRUN) -#define CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE \ - CSS_ID(CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE) -#define CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE \ - CSS_ID(CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE) -#define CSS_RX_IRQ_INFO_ECC_CORRECTED \ - CSS_ID(CSS_RX_IRQ_INFO_ECC_CORRECTED) -#define CSS_RX_IRQ_INFO_ERR_SOT \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_SOT) -#define CSS_RX_IRQ_INFO_ERR_SOT_SYNC \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_SOT_SYNC) -#define CSS_RX_IRQ_INFO_ERR_CONTROL \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_CONTROL) -#define CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE) -#define CSS_RX_IRQ_INFO_ERR_CRC \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_CRC) -#define CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID) -#define CSS_RX_IRQ_INFO_ERR_FRAME_SYNC \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_FRAME_SYNC) -#define CSS_RX_IRQ_INFO_ERR_FRAME_DATA \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_FRAME_DATA) -#define CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT) -#define CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC) -#define CSS_RX_IRQ_INFO_ERR_LINE_SYNC \ - CSS_ID(CSS_RX_IRQ_INFO_ERR_LINE_SYNC) -#define CSS_RX_IRQ_INFO_INIT_TIMEOUT \ - CSS_ID(CSS_RX_IRQ_INFO_INIT_TIMEOUT) - -#define CSS_IRQ_INFO_CSS_RECEIVER_SOF CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_SOF) -#define CSS_IRQ_INFO_CSS_RECEIVER_EOF CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_EOF) -#define CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW \ - CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW) -#define CSS_EVENT_OUTPUT_FRAME_DONE CSS_EVENT(OUTPUT_FRAME_DONE) -#define CSS_EVENT_SEC_OUTPUT_FRAME_DONE CSS_EVENT(SECOND_OUTPUT_FRAME_DONE) -#define CSS_EVENT_VF_OUTPUT_FRAME_DONE CSS_EVENT(VF_OUTPUT_FRAME_DONE) -#define CSS_EVENT_SEC_VF_OUTPUT_FRAME_DONE CSS_EVENT(SECOND_VF_OUTPUT_FRAME_DONE) -#define CSS_EVENT_3A_STATISTICS_DONE CSS_EVENT(3A_STATISTICS_DONE) -#define CSS_EVENT_DIS_STATISTICS_DONE CSS_EVENT(DIS_STATISTICS_DONE) -#define CSS_EVENT_PIPELINE_DONE CSS_EVENT(PIPELINE_DONE) -#define CSS_EVENT_METADATA_DONE CSS_EVENT(METADATA_DONE) -#define CSS_EVENT_ACC_STAGE_COMPLETE CSS_EVENT(ACC_STAGE_COMPLETE) -#define CSS_EVENT_TIMER CSS_EVENT(TIMER) - -#define CSS_BUFFER_TYPE_METADATA CSS_ID(CSS_BUFFER_TYPE_METADATA) -#define CSS_BUFFER_TYPE_3A_STATISTICS CSS_ID(CSS_BUFFER_TYPE_3A_STATISTICS) -#define CSS_BUFFER_TYPE_DIS_STATISTICS CSS_ID(CSS_BUFFER_TYPE_DIS_STATISTICS) -#define CSS_BUFFER_TYPE_INPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_INPUT_FRAME) -#define CSS_BUFFER_TYPE_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_OUTPUT_FRAME) -#define CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME) -#define CSS_BUFFER_TYPE_VF_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) -#define CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME) -#define CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME \ - CSS_ID(CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME) - -#define CSS_FORMAT_RAW_8 CSS_FORMAT(RAW_8) -#define CSS_FORMAT_RAW_10 CSS_FORMAT(RAW_10) -#define CSS_FORMAT_RAW_12 CSS_FORMAT(RAW_12) -#define CSS_FORMAT_RAW_16 CSS_FORMAT(RAW_16) - -#define CSS_CAPTURE_MODE_RAW CSS_ID(CSS_CAPTURE_MODE_RAW) -#define CSS_CAPTURE_MODE_BAYER CSS_ID(CSS_CAPTURE_MODE_BAYER) -#define CSS_CAPTURE_MODE_PRIMARY CSS_ID(CSS_CAPTURE_MODE_PRIMARY) -#define CSS_CAPTURE_MODE_ADVANCED CSS_ID(CSS_CAPTURE_MODE_ADVANCED) -#define CSS_CAPTURE_MODE_LOW_LIGHT CSS_ID(CSS_CAPTURE_MODE_LOW_LIGHT) - -#define CSS_MORPH_TABLE_NUM_PLANES CSS_ID(CSS_MORPH_TABLE_NUM_PLANES) - -#define CSS_FRAME_FORMAT_NV11 CSS_ID(CSS_FRAME_FORMAT_NV11) -#define CSS_FRAME_FORMAT_NV12 CSS_ID(CSS_FRAME_FORMAT_NV12) -#define CSS_FRAME_FORMAT_NV16 CSS_ID(CSS_FRAME_FORMAT_NV16) -#define CSS_FRAME_FORMAT_NV21 CSS_ID(CSS_FRAME_FORMAT_NV21) -#define CSS_FRAME_FORMAT_NV61 CSS_ID(CSS_FRAME_FORMAT_NV61) -#define CSS_FRAME_FORMAT_YV12 CSS_ID(CSS_FRAME_FORMAT_YV12) -#define CSS_FRAME_FORMAT_YV16 CSS_ID(CSS_FRAME_FORMAT_YV16) -#define CSS_FRAME_FORMAT_YUV420 CSS_ID(CSS_FRAME_FORMAT_YUV420) -#define CSS_FRAME_FORMAT_YUV420_16 CSS_ID(CSS_FRAME_FORMAT_YUV420_16) -#define CSS_FRAME_FORMAT_YUV422 CSS_ID(CSS_FRAME_FORMAT_YUV422) -#define CSS_FRAME_FORMAT_YUV422_16 CSS_ID(CSS_FRAME_FORMAT_YUV422_16) -#define CSS_FRAME_FORMAT_UYVY CSS_ID(CSS_FRAME_FORMAT_UYVY) -#define CSS_FRAME_FORMAT_YUYV CSS_ID(CSS_FRAME_FORMAT_YUYV) -#define CSS_FRAME_FORMAT_YUV444 CSS_ID(CSS_FRAME_FORMAT_YUV444) -#define CSS_FRAME_FORMAT_YUV_LINE CSS_ID(CSS_FRAME_FORMAT_YUV_LINE) -#define CSS_FRAME_FORMAT_RAW CSS_ID(CSS_FRAME_FORMAT_RAW) -#define CSS_FRAME_FORMAT_RGB565 CSS_ID(CSS_FRAME_FORMAT_RGB565) -#define CSS_FRAME_FORMAT_PLANAR_RGB888 CSS_ID(CSS_FRAME_FORMAT_PLANAR_RGB888) -#define CSS_FRAME_FORMAT_RGBA888 CSS_ID(CSS_FRAME_FORMAT_RGBA888) -#define CSS_FRAME_FORMAT_QPLANE6 CSS_ID(CSS_FRAME_FORMAT_QPLANE6) -#define CSS_FRAME_FORMAT_BINARY_8 CSS_ID(CSS_FRAME_FORMAT_BINARY_8) - -struct atomisp_device; -struct atomisp_sub_device; -struct video_device; -enum atomisp_input_stream_id; - -struct atomisp_metadata_buf { - struct ia_css_metadata *metadata; - void *md_vptr; - struct list_head list; -}; - -void atomisp_css_debug_dump_sp_sw_debug_info(void); -void atomisp_css_debug_dump_debug_info(const char *context); -void atomisp_css_debug_set_dtrace_level(const unsigned int trace_level); - -void atomisp_store_uint32(hrt_address addr, uint32_t data); -void atomisp_load_uint32(hrt_address addr, uint32_t *data); - -int atomisp_css_init(struct atomisp_device *isp); - -void atomisp_css_uninit(struct atomisp_device *isp); - -void atomisp_css_suspend(struct atomisp_device *isp); - -int atomisp_css_resume(struct atomisp_device *isp); - -void atomisp_css_init_struct(struct atomisp_sub_device *asd); - -int atomisp_css_irq_translate(struct atomisp_device *isp, - unsigned int *infos); - -void atomisp_css_rx_get_irq_info(enum mipi_port_id port, - unsigned int *infos); - -void atomisp_css_rx_clear_irq_info(enum mipi_port_id port, - unsigned int infos); - -int atomisp_css_irq_enable(struct atomisp_device *isp, - enum atomisp_css_irq_info info, bool enable); - -int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd, - struct videobuf_vmalloc_memory *vm_mem, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_buffer_type css_buf_type, - enum atomisp_css_pipe_id css_pipe_id); - -int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd, - struct atomisp_s3a_buf *s3a_buf, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id); - -int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd, - struct atomisp_metadata_buf *metadata_buf, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id); - -int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd, - struct atomisp_dis_buf *dis_buf, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id); - -void atomisp_css_mmu_invalidate_cache(void); - -void atomisp_css_mmu_invalidate_tlb(void); - -int atomisp_css_start(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, bool in_reset); - -void atomisp_css_update_isp_params(struct atomisp_sub_device *asd); -void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd, - struct ia_css_pipe *pipe); - -int atomisp_css_queue_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id pipe_id, - enum atomisp_css_buffer_type buf_type, - struct atomisp_css_buffer *isp_css_buffer); - -int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id pipe_id, - enum atomisp_css_buffer_type buf_type, - struct atomisp_css_buffer *isp_css_buffer); - -int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd, - uint16_t stream_id, - struct atomisp_s3a_buf *s3a_buf, - struct atomisp_dis_buf *dis_buf, - struct atomisp_metadata_buf *md_buf); - -void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd); - -void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf); - -void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf); - -void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf); - -int atomisp_css_get_grid_info(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, - int source_pad); - -int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd); - -int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd); - -int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd); - -void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd); - -void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd, - struct atomisp_css_buffer *isp_css_buffer, - struct ia_css_isp_dvs_statistics_map *dvs_map); - -int atomisp_css_dequeue_event(struct atomisp_css_event *current_event); - -void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd, - struct atomisp_css_event *current_event); - -int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct v4l2_mbus_framefmt *ffmt, - int isys_stream); - -void atomisp_css_isys_set_link(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - int link, - int isys_stream); - -void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - bool valid, - int isys_stream); - -void atomisp_css_isys_set_format(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format format, - int isys_stream); - -int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct v4l2_mbus_framefmt *ffmt); - -int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format input_format); - -void atomisp_css_isys_two_stream_cfg_update_stream1( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format input_format, - unsigned int width, unsigned int height); - -void atomisp_css_isys_two_stream_cfg_update_stream2( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format input_format, - unsigned int width, unsigned int height); - -int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct v4l2_mbus_framefmt *ffmt); - -void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - unsigned int bin_factor); - -void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_bayer_order bayer_order); - -void atomisp_css_input_set_format(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format format); - -int atomisp_css_input_set_effective_resolution( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - unsigned int width, - unsigned int height); - -void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd, - unsigned int dvs_w, unsigned int dvs_h); - -void atomisp_css_input_set_two_pixels_per_clock( - struct atomisp_sub_device *asd, - bool two_ppc); - -void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd, - bool enable); - -void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable); - -void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd, - enum atomisp_css_capture_mode mode); - -void atomisp_css_input_set_mode(struct atomisp_sub_device *asd, - enum atomisp_css_input_mode mode); - -void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd, - unsigned short stream_index, bool enable); - -void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd, - unsigned short stream_index, bool enable); - -void atomisp_css_video_enable_online(struct atomisp_sub_device *asd, - bool enable); - -void atomisp_css_enable_continuous(struct atomisp_sub_device *asd, - bool enable); - -void atomisp_css_enable_cvf(struct atomisp_sub_device *asd, - bool enable); - -int atomisp_css_input_configure_port(struct atomisp_sub_device *asd, - enum mipi_port_id port, - unsigned int num_lanes, - unsigned int timeout, - unsigned int mipi_freq, - enum atomisp_input_format metadata_format, - unsigned int metadata_width, - unsigned int metadata_height); - -int atomisp_css_frame_allocate(struct atomisp_css_frame **frame, - unsigned int width, unsigned int height, - enum atomisp_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth); - -int atomisp_css_frame_allocate_from_info(struct atomisp_css_frame **frame, - const struct atomisp_css_frame_info *info); - -void atomisp_css_frame_free(struct atomisp_css_frame *frame); - -int atomisp_css_frame_map(struct atomisp_css_frame **frame, - const struct atomisp_css_frame_info *info, - const void __user *data, uint16_t attribute, - void *context); - -int atomisp_css_set_black_frame(struct atomisp_sub_device *asd, - const struct atomisp_css_frame *raw_black_frame); - -int atomisp_css_allocate_continuous_frames(bool init_time, - struct atomisp_sub_device *asd); - -void atomisp_css_update_continuous_frames(struct atomisp_sub_device *asd); - -void atomisp_create_pipes_stream(struct atomisp_sub_device *asd); -void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd); - -int atomisp_css_stop(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, bool in_reset); - -int atomisp_css_continuous_set_num_raw_frames( - struct atomisp_sub_device *asd, - int num_frames); - -void atomisp_css_disable_vf_pp(struct atomisp_sub_device *asd, - bool disable); - -int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int padded_width, - enum atomisp_css_frame_format format); - -int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int padded_width, - enum atomisp_css_frame_format format); - -int atomisp_css_yuvpp_configure_viewfinder( - struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format); - -int atomisp_css_yuvpp_get_output_frame_info( - struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info); - -int atomisp_css_yuvpp_get_viewfinder_frame_info( - struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info); - -int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format); - -int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format); - -int atomisp_css_video_configure_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format); - -int atomisp_get_css_frame_info(struct atomisp_sub_device *asd, - uint16_t source_pad, - struct atomisp_css_frame_info *frame_info); - -int atomisp_css_video_configure_viewfinder(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format); - -int atomisp_css_capture_configure_viewfinder( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format); - -int atomisp_css_video_get_viewfinder_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info); - -int atomisp_css_capture_get_viewfinder_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info); - -int atomisp_css_copy_get_output_frame_info( - struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info); - -int atomisp_css_capture_get_output_raw_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info); - -int atomisp_css_preview_get_output_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info); - -int atomisp_css_capture_get_output_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info); - -int atomisp_css_video_get_output_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info); - -int atomisp_css_preview_configure_pp_input( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height); - -int atomisp_css_capture_configure_pp_input( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height); - -int atomisp_css_video_configure_pp_input( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height); - -int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd, - int num_captures, unsigned int skip, int offset); -int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id); -int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id); - -int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd, - bool enable); - -void atomisp_css_send_input_frame(struct atomisp_sub_device *asd, - unsigned short *data, unsigned int width, - unsigned int height); - -bool atomisp_css_isp_has_started(void); - -void atomisp_css_request_flash(struct atomisp_sub_device *asd); - -void atomisp_css_set_wb_config(struct atomisp_sub_device *asd, - struct atomisp_css_wb_config *wb_config); - -void atomisp_css_set_ob_config(struct atomisp_sub_device *asd, - struct atomisp_css_ob_config *ob_config); - -void atomisp_css_set_dp_config(struct atomisp_sub_device *asd, - struct atomisp_css_dp_config *dp_config); - -void atomisp_css_set_de_config(struct atomisp_sub_device *asd, - struct atomisp_css_de_config *de_config); - -void atomisp_css_set_dz_config(struct atomisp_sub_device *asd, - struct atomisp_css_dz_config *dz_config); - -void atomisp_css_set_default_de_config(struct atomisp_sub_device *asd); - -void atomisp_css_set_ce_config(struct atomisp_sub_device *asd, - struct atomisp_css_ce_config *ce_config); - -void atomisp_css_set_nr_config(struct atomisp_sub_device *asd, - struct atomisp_css_nr_config *nr_config); - -void atomisp_css_set_ee_config(struct atomisp_sub_device *asd, - struct atomisp_css_ee_config *ee_config); - -void atomisp_css_set_tnr_config(struct atomisp_sub_device *asd, - struct atomisp_css_tnr_config *tnr_config); - -void atomisp_css_set_cc_config(struct atomisp_sub_device *asd, - struct atomisp_css_cc_config *cc_config); - -void atomisp_css_set_macc_table(struct atomisp_sub_device *asd, - struct atomisp_css_macc_table *macc_table); - -void atomisp_css_set_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_gamma_table *gamma_table); - -void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd, - struct atomisp_css_ctc_table *ctc_table); - -void atomisp_css_set_gc_config(struct atomisp_sub_device *asd, - struct atomisp_css_gc_config *gc_config); - -void atomisp_css_set_3a_config(struct atomisp_sub_device *asd, - struct atomisp_css_3a_config *s3a_config); - -void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd, - struct atomisp_dis_vector *vector); - -void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd, - struct ia_css_dvs2_coefficients *coefs); - -int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd, - struct atomisp_dis_coefficients *coefs); - -void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd, - unsigned int zoom); - -int atomisp_css_get_wb_config(struct atomisp_sub_device *asd, - struct atomisp_wb_config *config); - -int atomisp_css_get_ob_config(struct atomisp_sub_device *asd, - struct atomisp_ob_config *config); - -int atomisp_css_get_dp_config(struct atomisp_sub_device *asd, - struct atomisp_dp_config *config); - -int atomisp_css_get_de_config(struct atomisp_sub_device *asd, - struct atomisp_de_config *config); - -int atomisp_css_get_nr_config(struct atomisp_sub_device *asd, - struct atomisp_nr_config *config); - -int atomisp_css_get_ee_config(struct atomisp_sub_device *asd, - struct atomisp_ee_config *config); - -int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd, - struct atomisp_tnr_config *config); - -int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd, - struct atomisp_ctc_table *config); - -int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_gamma_table *config); - -int atomisp_css_get_gc_config(struct atomisp_sub_device *asd, - struct atomisp_gc_config *config); - -int atomisp_css_get_3a_config(struct atomisp_sub_device *asd, - struct atomisp_3a_config *config); - -int atomisp_css_get_formats_config(struct atomisp_sub_device *asd, - struct atomisp_formats_config *formats_config); - -void atomisp_css_set_formats_config(struct atomisp_sub_device *asd, - struct atomisp_css_formats_config *formats_config); - -int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd, - unsigned int *zoom); - -struct atomisp_css_shading_table *atomisp_css_shading_table_alloc( - unsigned int width, unsigned int height); - -void atomisp_css_set_shading_table(struct atomisp_sub_device *asd, - struct atomisp_css_shading_table *table); - -void atomisp_css_shading_table_free(struct atomisp_css_shading_table *table); - -struct atomisp_css_morph_table *atomisp_css_morph_table_allocate( - unsigned int width, unsigned int height); - -void atomisp_css_set_morph_table(struct atomisp_sub_device *asd, - struct atomisp_css_morph_table *table); - -void atomisp_css_get_morph_table(struct atomisp_sub_device *asd, - struct atomisp_css_morph_table *table); - -void atomisp_css_morph_table_free(struct atomisp_css_morph_table *table); - -void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, - unsigned int overlap); - -int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, - struct atomisp_dis_statistics *stats); - -int atomisp_css_update_stream(struct atomisp_sub_device *asd); - -int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd); - -int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd); - -int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd); - -void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd); - -int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd, - struct atomisp_css_fw_info *fw, - enum atomisp_css_pipe_id pipe_id, - unsigned int type); - -void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd, - struct atomisp_css_fw_info *fw, - enum atomisp_css_pipe_id pipe_id); - -int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd); - -void atomisp_css_acc_done(struct atomisp_sub_device *asd); - -int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd, - struct atomisp_css_fw_info *fw, - unsigned int index); - -void atomisp_css_unload_acc_binary(struct atomisp_sub_device *asd); - -struct atomisp_acc_fw; -int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw); - -int atomisp_css_isr_thread(struct atomisp_device *isp, - bool *frame_done_found, - bool *css_pipe_done); - -bool atomisp_css_valid_sof(struct atomisp_device *isp); - -void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable); - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c deleted file mode 100644 index df88d9df2027..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c +++ /dev/null @@ -1,4704 +0,0 @@ -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include -#include - -#include "mmu/isp_mmu.h" -#include "mmu/sh_mmu_mrfld.h" -#include "hmm/hmm_bo.h" -#include "hmm/hmm.h" - -#include "atomisp_compat.h" -#include "atomisp_internal.h" -#include "atomisp_cmd.h" -#include "atomisp-regs.h" -#include "atomisp_fops.h" -#include "atomisp_ioctl.h" -#include "atomisp_acc.h" - -#include "hrt/hive_isp_css_mm_hrt.h" - -#include - -#include "ia_css_debug.h" -#include "ia_css_isp_param.h" -#include "sh_css_hrt.h" -#include "ia_css_isys.h" - -#include - -/* Assume max number of ACC stages */ -#define MAX_ACC_STAGES 20 - -/* Ideally, this should come from CSS headers */ -#define NO_LINK -1 - -/* - * to serialize MMIO access , this is due to ISP2400 silicon issue Sighting - * #4684168, if concurrency access happened, system may hard hang. - */ -static DEFINE_SPINLOCK(mmio_lock); - -enum frame_info_type { - ATOMISP_CSS_VF_FRAME, - ATOMISP_CSS_SECOND_VF_FRAME, - ATOMISP_CSS_OUTPUT_FRAME, - ATOMISP_CSS_SECOND_OUTPUT_FRAME, - ATOMISP_CSS_RAW_FRAME, -}; - -struct bayer_ds_factor { - unsigned int numerator; - unsigned int denominator; -}; - -void atomisp_css_debug_dump_sp_sw_debug_info(void) -{ - ia_css_debug_dump_sp_sw_debug_info(); -} - -void atomisp_css_debug_dump_debug_info(const char *context) -{ - ia_css_debug_dump_debug_info(context); -} - -void atomisp_css_debug_set_dtrace_level(const unsigned int trace_level) -{ - ia_css_debug_set_dtrace_level(trace_level); -} - -unsigned int atomisp_css_debug_get_dtrace_level(void) -{ - return ia_css_debug_trace_level; -} - -static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data) -{ - unsigned long flags; - - spin_lock_irqsave(&mmio_lock, flags); - _hrt_master_port_store_8(addr, data); - spin_unlock_irqrestore(&mmio_lock, flags); -} - -static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data) -{ - unsigned long flags; - - spin_lock_irqsave(&mmio_lock, flags); - _hrt_master_port_store_16(addr, data); - spin_unlock_irqrestore(&mmio_lock, flags); -} - -static void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data) -{ - unsigned long flags; - - spin_lock_irqsave(&mmio_lock, flags); - _hrt_master_port_store_32(addr, data); - spin_unlock_irqrestore(&mmio_lock, flags); -} - -static uint8_t atomisp_css2_hw_load_8(hrt_address addr) -{ - unsigned long flags; - uint8_t ret; - - spin_lock_irqsave(&mmio_lock, flags); - ret = _hrt_master_port_load_8(addr); - spin_unlock_irqrestore(&mmio_lock, flags); - return ret; -} - -static uint16_t atomisp_css2_hw_load_16(hrt_address addr) -{ - unsigned long flags; - uint16_t ret; - - spin_lock_irqsave(&mmio_lock, flags); - ret = _hrt_master_port_load_16(addr); - spin_unlock_irqrestore(&mmio_lock, flags); - return ret; -} - -static uint32_t atomisp_css2_hw_load_32(hrt_address addr) -{ - unsigned long flags; - uint32_t ret; - - spin_lock_irqsave(&mmio_lock, flags); - ret = _hrt_master_port_load_32(addr); - spin_unlock_irqrestore(&mmio_lock, flags); - return ret; -} - -static void atomisp_css2_hw_store(hrt_address addr, - const void *from, uint32_t n) -{ - unsigned long flags; - unsigned int i; - unsigned int _to = (unsigned int)addr; - const char *_from = (const char *)from; - - spin_lock_irqsave(&mmio_lock, flags); - for (i = 0; i < n; i++, _to++, _from++) - _hrt_master_port_store_8(_to , *_from); - spin_unlock_irqrestore(&mmio_lock, flags); -} - -static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n) -{ - unsigned long flags; - unsigned int i; - char *_to = (char *)to; - unsigned int _from = (unsigned int)addr; - - spin_lock_irqsave(&mmio_lock, flags); - for (i = 0; i < n; i++, _to++, _from++) - *_to = _hrt_master_port_load_8(_from); - spin_unlock_irqrestore(&mmio_lock, flags); -} - -static int atomisp_css2_dbg_print(const char *fmt, va_list args) -{ - vprintk(fmt, args); - return 0; -} - -static int atomisp_css2_dbg_ftrace_print(const char *fmt, va_list args) -{ - ftrace_vprintk(fmt, args); - return 0; -} - -static int atomisp_css2_err_print(const char *fmt, va_list args) -{ - vprintk(fmt, args); - return 0; -} - -void atomisp_store_uint32(hrt_address addr, uint32_t data) -{ - atomisp_css2_hw_store_32(addr, data); -} - -void atomisp_load_uint32(hrt_address addr, uint32_t *data) -{ - *data = atomisp_css2_hw_load_32(addr); -} -static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr) -{ - if (sh_mmu_mrfld.get_pd_base == NULL) { - dev_err(atomisp_dev, "get mmu base address failed.\n"); - return -EINVAL; - } - - *mmu_base_addr = sh_mmu_mrfld.get_pd_base(&bo_device.mmu, - bo_device.mmu.base_address); - return 0; -} - -static void atomisp_isp_parameters_clean_up( - struct atomisp_css_isp_config *config) -{ - /* - * Set NULL to configs pointer to avoid they are set into isp again when - * some configs are changed and need to be updated later. - */ - memset(config, 0, sizeof(*config)); -} - -static void __dump_pipe_config(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, - unsigned int pipe_id) -{ - struct atomisp_device *isp = asd->isp; - - if (stream_env->pipes[pipe_id]) { - struct ia_css_pipe_config *p_config; - struct ia_css_pipe_extra_config *pe_config; - - p_config = &stream_env->pipe_configs[pipe_id]; - pe_config = &stream_env->pipe_extra_configs[pipe_id]; - dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id); - dev_dbg(isp->dev, - "pipe_config.pipe_mode:%d.\n", p_config->mode); - dev_dbg(isp->dev, - "pipe_config.output_info[0] w=%d, h=%d.\n", - p_config->output_info[0].res.width, - p_config->output_info[0].res.height); - dev_dbg(isp->dev, - "pipe_config.vf_pp_in_res w=%d, h=%d.\n", - p_config->vf_pp_in_res.width, - p_config->vf_pp_in_res.height); - dev_dbg(isp->dev, - "pipe_config.capt_pp_in_res w=%d, h=%d.\n", - p_config->capt_pp_in_res.width, - p_config->capt_pp_in_res.height); - dev_dbg(isp->dev, - "pipe_config.output.padded w=%d.\n", - p_config->output_info[0].padded_width); - dev_dbg(isp->dev, - "pipe_config.vf_output_info[0] w=%d, h=%d.\n", - p_config->vf_output_info[0].res.width, - p_config->vf_output_info[0].res.height); - dev_dbg(isp->dev, - "pipe_config.bayer_ds_out_res w=%d, h=%d.\n", - p_config->bayer_ds_out_res.width, - p_config->bayer_ds_out_res.height); - dev_dbg(isp->dev, - "pipe_config.envelope w=%d, h=%d.\n", - p_config->dvs_envelope.width, - p_config->dvs_envelope.height); - dev_dbg(isp->dev, - "pipe_config.dvs_frame_delay=%d.\n", - p_config->dvs_frame_delay); - dev_dbg(isp->dev, - "pipe_config.isp_pipe_version:%d.\n", - p_config->isp_pipe_version); - dev_dbg(isp->dev, - "pipe_config.acc_extension=%p.\n", - p_config->acc_extension); - dev_dbg(isp->dev, - "pipe_config.acc_stages=%p.\n", - p_config->acc_stages); - dev_dbg(isp->dev, - "pipe_config.num_acc_stages=%d.\n", - p_config->num_acc_stages); - dev_dbg(isp->dev, - "pipe_config.acc_num_execs=%d.\n", - p_config->acc_num_execs); - dev_dbg(isp->dev, - "pipe_config.default_capture_config.capture_mode=%d.\n", - p_config->default_capture_config.mode); - dev_dbg(isp->dev, - "pipe_config.enable_dz=%d.\n", - p_config->enable_dz); - dev_dbg(isp->dev, - "pipe_config.default_capture_config.enable_xnr=%d.\n", - p_config->default_capture_config.enable_xnr); - dev_dbg(isp->dev, - "dumping pipe[%d] extra config:\n", pipe_id); - dev_dbg(isp->dev, - "pipe_extra_config.enable_raw_binning:%d.\n", - pe_config->enable_raw_binning); - dev_dbg(isp->dev, - "pipe_extra_config.enable_yuv_ds:%d.\n", - pe_config->enable_yuv_ds); - dev_dbg(isp->dev, - "pipe_extra_config.enable_high_speed:%d.\n", - pe_config->enable_high_speed); - dev_dbg(isp->dev, - "pipe_extra_config.enable_dvs_6axis:%d.\n", - pe_config->enable_dvs_6axis); - dev_dbg(isp->dev, - "pipe_extra_config.enable_reduced_pipe:%d.\n", - pe_config->enable_reduced_pipe); - dev_dbg(isp->dev, - "pipe_(extra_)config.enable_dz:%d.\n", - p_config->enable_dz); - dev_dbg(isp->dev, - "pipe_extra_config.disable_vf_pp:%d.\n", - pe_config->disable_vf_pp); - } -} - -static void __dump_stream_config(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_stream_config *s_config; - int j; - bool valid_stream = false; - - for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) { - if (stream_env->pipes[j]) { - __dump_pipe_config(asd, stream_env, j); - valid_stream = true; - } - } - if (!valid_stream) - return; - s_config = &stream_env->stream_config; - dev_dbg(isp->dev, "stream_config.mode=%d.\n", s_config->mode); - - if (s_config->mode == IA_CSS_INPUT_MODE_SENSOR || - s_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { - dev_dbg(isp->dev, "stream_config.source.port.port=%d.\n", - s_config->source.port.port); - dev_dbg(isp->dev, "stream_config.source.port.num_lanes=%d.\n", - s_config->source.port.num_lanes); - dev_dbg(isp->dev, "stream_config.source.port.timeout=%d.\n", - s_config->source.port.timeout); - dev_dbg(isp->dev, "stream_config.source.port.rxcount=0x%x.\n", - s_config->source.port.rxcount); - dev_dbg(isp->dev, "stream_config.source.port.compression.type=%d.\n", - s_config->source.port.compression.type); - dev_dbg(isp->dev, "stream_config.source.port.compression.compressed_bits_per_pixel=%d.\n", - s_config->source.port.compression. - compressed_bits_per_pixel); - dev_dbg(isp->dev, "stream_config.source.port.compression.uncompressed_bits_per_pixel=%d.\n", - s_config->source.port.compression. - uncompressed_bits_per_pixel); - } else if (s_config->mode == IA_CSS_INPUT_MODE_TPG) { - dev_dbg(isp->dev, "stream_config.source.tpg.id=%d.\n", - s_config->source.tpg.id); - dev_dbg(isp->dev, "stream_config.source.tpg.mode=%d.\n", - s_config->source.tpg.mode); - dev_dbg(isp->dev, "stream_config.source.tpg.x_mask=%d.\n", - s_config->source.tpg.x_mask); - dev_dbg(isp->dev, "stream_config.source.tpg.x_delta=%d.\n", - s_config->source.tpg.x_delta); - dev_dbg(isp->dev, "stream_config.source.tpg.y_mask=%d.\n", - s_config->source.tpg.y_mask); - dev_dbg(isp->dev, "stream_config.source.tpg.y_delta=%d.\n", - s_config->source.tpg.y_delta); - dev_dbg(isp->dev, "stream_config.source.tpg.xy_mask=%d.\n", - s_config->source.tpg.xy_mask); - } else if (s_config->mode == IA_CSS_INPUT_MODE_PRBS) { - dev_dbg(isp->dev, "stream_config.source.prbs.id=%d.\n", - s_config->source.prbs.id); - dev_dbg(isp->dev, "stream_config.source.prbs.h_blank=%d.\n", - s_config->source.prbs.h_blank); - dev_dbg(isp->dev, "stream_config.source.prbs.v_blank=%d.\n", - s_config->source.prbs.v_blank); - dev_dbg(isp->dev, "stream_config.source.prbs.seed=%d.\n", - s_config->source.prbs.seed); - dev_dbg(isp->dev, "stream_config.source.prbs.seed1=%d.\n", - s_config->source.prbs.seed1); - } - - for (j = 0; j < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; j++) { - dev_dbg(isp->dev, "stream_configisys_config[%d].input_res w=%d, h=%d.\n", - j, - s_config->isys_config[j].input_res.width, - s_config->isys_config[j].input_res.height); - - dev_dbg(isp->dev, "stream_configisys_config[%d].linked_isys_stream_id=%d\n", - j, - s_config->isys_config[j].linked_isys_stream_id); - - dev_dbg(isp->dev, "stream_configisys_config[%d].format=%d\n", - j, - s_config->isys_config[j].format); - - dev_dbg(isp->dev, "stream_configisys_config[%d].valid=%d.\n", - j, - s_config->isys_config[j].valid); - } - - dev_dbg(isp->dev, "stream_config.input_config.input_res w=%d, h=%d.\n", - s_config->input_config.input_res.width, - s_config->input_config.input_res.height); - - dev_dbg(isp->dev, "stream_config.input_config.effective_res w=%d, h=%d.\n", - s_config->input_config.effective_res.width, - s_config->input_config.effective_res.height); - - dev_dbg(isp->dev, "stream_config.input_config.format=%d\n", - s_config->input_config.format); - - dev_dbg(isp->dev, "stream_config.input_config.bayer_order=%d.\n", - s_config->input_config.bayer_order); - - dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n", - s_config->pixels_per_clock); - dev_dbg(isp->dev, "stream_config.online=%d.\n", s_config->online); - dev_dbg(isp->dev, "stream_config.continuous=%d.\n", - s_config->continuous); - dev_dbg(isp->dev, "stream_config.disable_cont_viewfinder=%d.\n", - s_config->disable_cont_viewfinder); - dev_dbg(isp->dev, "stream_config.channel_id=%d.\n", - s_config->channel_id); - dev_dbg(isp->dev, "stream_config.init_num_cont_raw_buf=%d.\n", - s_config->init_num_cont_raw_buf); - dev_dbg(isp->dev, "stream_config.target_num_cont_raw_buf=%d.\n", - s_config->target_num_cont_raw_buf); - dev_dbg(isp->dev, "stream_config.left_padding=%d.\n", - s_config->left_padding); - dev_dbg(isp->dev, "stream_config.sensor_binning_factor=%d.\n", - s_config->sensor_binning_factor); - dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n", - s_config->pixels_per_clock); - dev_dbg(isp->dev, "stream_config.pack_raw_pixels=%d.\n", - s_config->pack_raw_pixels); - dev_dbg(isp->dev, "stream_config.flash_gpio_pin=%d.\n", - s_config->flash_gpio_pin); - dev_dbg(isp->dev, "stream_config.mipi_buffer_config.size_mem_words=%d.\n", - s_config->mipi_buffer_config.size_mem_words); - dev_dbg(isp->dev, "stream_config.mipi_buffer_config.contiguous=%d.\n", - s_config->mipi_buffer_config.contiguous); - dev_dbg(isp->dev, "stream_config.metadata_config.data_type=%d.\n", - s_config->metadata_config.data_type); - dev_dbg(isp->dev, "stream_config.metadata_config.resolution w=%d, h=%d.\n", - s_config->metadata_config.resolution.width, - s_config->metadata_config.resolution.height); -} - -static int __destroy_stream(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, bool force) -{ - struct atomisp_device *isp = asd->isp; - int i; - unsigned long timeout; - - if (!stream_env->stream) - return 0; - - if (!force) { - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - if (stream_env->update_pipe[i]) - break; - - if (i == IA_CSS_PIPE_ID_NUM) - return 0; - } - - if (stream_env->stream_state == CSS_STREAM_STARTED - && ia_css_stream_stop(stream_env->stream) != IA_CSS_SUCCESS) { - dev_err(isp->dev, "stop stream failed.\n"); - return -EINVAL; - } - - if (stream_env->stream_state == CSS_STREAM_STARTED) { - timeout = jiffies + msecs_to_jiffies(40); - while (1) { - if (ia_css_stream_has_stopped(stream_env->stream)) - break; - - if (time_after(jiffies, timeout)) { - dev_warn(isp->dev, "stop stream timeout.\n"); - break; - } - - usleep_range(100, 200); - } - } - - stream_env->stream_state = CSS_STREAM_STOPPED; - - if (ia_css_stream_destroy(stream_env->stream) != IA_CSS_SUCCESS) { - dev_err(isp->dev, "destroy stream failed.\n"); - return -EINVAL; - } - stream_env->stream_state = CSS_STREAM_UNINIT; - stream_env->stream = NULL; - - return 0; -} - -static int __destroy_streams(struct atomisp_sub_device *asd, bool force) -{ - int ret, i; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - ret = __destroy_stream(asd, &asd->stream_env[i], force); - if (ret) - return ret; - } - asd->stream_prepared = false; - return 0; -} -static int __create_stream(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env) -{ - int pipe_index = 0, i; - struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM]; - - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - if (stream_env->pipes[i]) - multi_pipes[pipe_index++] = stream_env->pipes[i]; - } - if (pipe_index == 0) - return 0; - - stream_env->stream_config.target_num_cont_raw_buf = - asd->continuous_raw_buffer_size->val; - stream_env->stream_config.channel_id = stream_env->ch_id; - stream_env->stream_config.ia_css_enable_raw_buffer_locking = - asd->enable_raw_buffer_lock->val; - - __dump_stream_config(asd, stream_env); - if (ia_css_stream_create(&stream_env->stream_config, - pipe_index, multi_pipes, &stream_env->stream) != IA_CSS_SUCCESS) - return -EINVAL; - if (ia_css_stream_get_info(stream_env->stream, - &stream_env->stream_info) != IA_CSS_SUCCESS) { - ia_css_stream_destroy(stream_env->stream); - stream_env->stream = NULL; - return -EINVAL; - } - - stream_env->stream_state = CSS_STREAM_CREATED; - return 0; -} - -static int __create_streams(struct atomisp_sub_device *asd) -{ - int ret, i; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - ret = __create_stream(asd, &asd->stream_env[i]); - if (ret) - goto rollback; - } - asd->stream_prepared = true; - return 0; -rollback: - for (i--; i >= 0; i--) - __destroy_stream(asd, &asd->stream_env[i], true); - return ret; -} - -static int __destroy_stream_pipes(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, - bool force) -{ - struct atomisp_device *isp = asd->isp; - int ret = 0; - int i; - - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - if (!stream_env->pipes[i] || - !(force || stream_env->update_pipe[i])) - continue; - if (ia_css_pipe_destroy(stream_env->pipes[i]) - != IA_CSS_SUCCESS) { - dev_err(isp->dev, - "destroy pipe[%d]failed.cannot recover.\n", i); - ret = -EINVAL; - } - stream_env->pipes[i] = NULL; - stream_env->update_pipe[i] = false; - } - return ret; -} - -static int __destroy_pipes(struct atomisp_sub_device *asd, bool force) -{ - struct atomisp_device *isp = asd->isp; - int i; - int ret = 0; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - if (asd->stream_env[i].stream) { - - dev_err(isp->dev, - "cannot destroy css pipes for stream[%d].\n", - i); - continue; - } - - ret = __destroy_stream_pipes(asd, &asd->stream_env[i], force); - if (ret) - return ret; - } - - return 0; -} - -void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd) -{ - __destroy_streams(asd, true); - __destroy_pipes(asd, true); -} - -static void __apply_additional_pipe_config( - struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - - if (pipe_id < 0 || pipe_id >= IA_CSS_PIPE_ID_NUM) { - dev_err(isp->dev, - "wrong pipe_id for additional pipe config.\n"); - return; - } - - /* apply default pipe config */ - stream_env->pipe_configs[pipe_id].isp_pipe_version = 2; - stream_env->pipe_configs[pipe_id].enable_dz = - asd->disable_dz->val ? false : true; - /* apply isp 2.2 specific config for baytrail*/ - switch (pipe_id) { - case IA_CSS_PIPE_ID_CAPTURE: - /* enable capture pp/dz manually or digital zoom would - * fail*/ - if (stream_env->pipe_configs[pipe_id]. - default_capture_config.mode == CSS_CAPTURE_MODE_RAW) - stream_env->pipe_configs[pipe_id].enable_dz = false; -#ifdef ISP2401 - - /* the isp default to use ISP2.2 and the camera hal will - * control whether use isp2.7 */ - if (asd->select_isp_version->val == - ATOMISP_CSS_ISP_PIPE_VERSION_2_7) - stream_env->pipe_configs[pipe_id].isp_pipe_version = - SH_CSS_ISP_PIPE_VERSION_2_7; - else - stream_env->pipe_configs[pipe_id].isp_pipe_version = - SH_CSS_ISP_PIPE_VERSION_2_2; -#endif - break; - case IA_CSS_PIPE_ID_VIDEO: - /* enable reduced pipe to have binary - * video_dz_2_min selected*/ - stream_env->pipe_extra_configs[pipe_id] - .enable_reduced_pipe = true; - stream_env->pipe_configs[pipe_id] - .enable_dz = false; - if (ATOMISP_SOC_CAMERA(asd)) - stream_env->pipe_configs[pipe_id].enable_dz = true; - - if (asd->params.video_dis_en) { - stream_env->pipe_extra_configs[pipe_id] - .enable_dvs_6axis = true; - stream_env->pipe_configs[pipe_id] - .dvs_frame_delay = - ATOMISP_CSS2_NUM_DVS_FRAME_DELAY; - } - break; - case IA_CSS_PIPE_ID_PREVIEW: - break; - case IA_CSS_PIPE_ID_YUVPP: - case IA_CSS_PIPE_ID_COPY: - if (ATOMISP_SOC_CAMERA(asd)) - stream_env->pipe_configs[pipe_id].enable_dz = true; - else - stream_env->pipe_configs[pipe_id].enable_dz = false; - break; - case IA_CSS_PIPE_ID_ACC: - stream_env->pipe_configs[pipe_id].mode = IA_CSS_PIPE_MODE_ACC; - stream_env->pipe_configs[pipe_id].enable_dz = false; - break; - default: - break; - } -} - -static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd, - enum ia_css_pipe_id pipe_id) -{ - if (!asd) - return false; - - if (pipe_id == CSS_PIPE_ID_ACC || pipe_id == CSS_PIPE_ID_YUVPP) - return true; - - if (asd->vfpp) { - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { - if (pipe_id == IA_CSS_PIPE_ID_VIDEO) - return true; - else - return false; - } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { - if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) - return true; - else - return false; - } - } - - if (!asd->run_mode) - return false; - - if (asd->copy_mode && pipe_id == IA_CSS_PIPE_ID_COPY) - return true; - - switch (asd->run_mode->val) { - case ATOMISP_RUN_MODE_STILL_CAPTURE: - if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) - return true; - else - return false; - case ATOMISP_RUN_MODE_PREVIEW: - if (!asd->continuous_mode->val) { - if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) - return true; - else - return false; - } - /* fall through to ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */ - case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE: - if (pipe_id == IA_CSS_PIPE_ID_CAPTURE || - pipe_id == IA_CSS_PIPE_ID_PREVIEW) - return true; - else - return false; - case ATOMISP_RUN_MODE_VIDEO: - if (!asd->continuous_mode->val) { - if (pipe_id == IA_CSS_PIPE_ID_VIDEO || - pipe_id == IA_CSS_PIPE_ID_YUVPP) - return true; - else - return false; - } - /* fall through to ATOMISP_RUN_MODE_SDV */ - case ATOMISP_RUN_MODE_SDV: - if (pipe_id == IA_CSS_PIPE_ID_CAPTURE || - pipe_id == IA_CSS_PIPE_ID_VIDEO) - return true; - else - return false; - } - - return false; -} - -static int __create_pipe(struct atomisp_sub_device *asd, - struct atomisp_stream_env *stream_env, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_pipe_extra_config extra_config; - enum ia_css_err ret; - - if (pipe_id >= IA_CSS_PIPE_ID_NUM) - return -EINVAL; - - if (pipe_id != CSS_PIPE_ID_ACC && - !stream_env->pipe_configs[pipe_id].output_info[0].res.width) - return 0; - - if (pipe_id == CSS_PIPE_ID_ACC && - !stream_env->pipe_configs[pipe_id].acc_extension) - return 0; - - if (!is_pipe_valid_to_current_run_mode(asd, pipe_id)) - return 0; - - ia_css_pipe_extra_config_defaults(&extra_config); - - __apply_additional_pipe_config(asd, stream_env, pipe_id); - if (!memcmp(&extra_config, - &stream_env->pipe_extra_configs[pipe_id], - sizeof(extra_config))) - ret = ia_css_pipe_create( - &stream_env->pipe_configs[pipe_id], - &stream_env->pipes[pipe_id]); - else - ret = ia_css_pipe_create_extra( - &stream_env->pipe_configs[pipe_id], - &stream_env->pipe_extra_configs[pipe_id], - &stream_env->pipes[pipe_id]); - if (ret != IA_CSS_SUCCESS) - dev_err(isp->dev, "create pipe[%d] error.\n", pipe_id); - return ret; -} - -static int __create_pipes(struct atomisp_sub_device *asd) -{ - enum ia_css_err ret; - int i, j; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) { - ret = __create_pipe(asd, &asd->stream_env[i], j); - if (ret != IA_CSS_SUCCESS) - break; - } - if (j < IA_CSS_PIPE_ID_NUM) - goto pipe_err; - } - return 0; -pipe_err: - for (; i >= 0; i--) { - for (j--; j >= 0; j--) { - if (asd->stream_env[i].pipes[j]) { - ia_css_pipe_destroy(asd->stream_env[i].pipes[j]); - asd->stream_env[i].pipes[j] = NULL; - } - } - j = IA_CSS_PIPE_ID_NUM; - } - return -EINVAL; -} - -void atomisp_create_pipes_stream(struct atomisp_sub_device *asd) -{ - __create_pipes(asd); - __create_streams(asd); -} - -int atomisp_css_update_stream(struct atomisp_sub_device *asd) -{ - int ret; - struct atomisp_device *isp = asd->isp; - - if (__destroy_streams(asd, true) != IA_CSS_SUCCESS) - dev_warn(isp->dev, "destroy stream failed.\n"); - - if (__destroy_pipes(asd, true) != IA_CSS_SUCCESS) - dev_warn(isp->dev, "destroy pipe failed.\n"); - - ret = __create_pipes(asd); - if (ret != IA_CSS_SUCCESS) { - dev_err(isp->dev, "create pipe failed %d.\n", ret); - return -EIO; - } - - ret = __create_streams(asd); - if (ret != IA_CSS_SUCCESS) { - dev_warn(isp->dev, "create stream failed %d.\n", ret); - __destroy_pipes(asd, true); - return -EIO; - } - - return 0; -} - -int atomisp_css_init(struct atomisp_device *isp) -{ - unsigned int mmu_base_addr; - int ret; - enum ia_css_err err; - - ret = hmm_get_mmu_base_addr(&mmu_base_addr); - if (ret) - return ret; - - /* Init ISP */ - err = ia_css_init(&isp->css_env.isp_css_env, NULL, - (uint32_t)mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE); - if (err != IA_CSS_SUCCESS) { - dev_err(isp->dev, "css init failed --- bad firmware?\n"); - return -EINVAL; - } - ia_css_enable_isys_event_queue(true); - - isp->css_initialized = true; - dev_dbg(isp->dev, "sh_css_init success\n"); - - return 0; -} - -static inline int __set_css_print_env(struct atomisp_device *isp, int opt) -{ - int ret = 0; - - if (opt == 0) - isp->css_env.isp_css_env.print_env.debug_print = NULL; - else if (opt == 1) - isp->css_env.isp_css_env.print_env.debug_print = - atomisp_css2_dbg_ftrace_print; - else if (opt == 2) - isp->css_env.isp_css_env.print_env.debug_print = - atomisp_css2_dbg_print; - else - ret = -EINVAL; - - return ret; -} - -int atomisp_css_check_firmware_version(struct atomisp_device *isp) -{ - if (!sh_css_check_firmware_version((void *)isp->firmware->data)) { - dev_err(isp->dev, "Fw version check failed.\n"); - return -EINVAL; - } - return 0; -} - -int atomisp_css_load_firmware(struct atomisp_device *isp) -{ - enum ia_css_err err; - - /* set css env */ - isp->css_env.isp_css_fw.data = (void *)isp->firmware->data; - isp->css_env.isp_css_fw.bytes = isp->firmware->size; - - isp->css_env.isp_css_env.hw_access_env.store_8 = - atomisp_css2_hw_store_8; - isp->css_env.isp_css_env.hw_access_env.store_16 = - atomisp_css2_hw_store_16; - isp->css_env.isp_css_env.hw_access_env.store_32 = - atomisp_css2_hw_store_32; - - isp->css_env.isp_css_env.hw_access_env.load_8 = atomisp_css2_hw_load_8; - isp->css_env.isp_css_env.hw_access_env.load_16 = - atomisp_css2_hw_load_16; - isp->css_env.isp_css_env.hw_access_env.load_32 = - atomisp_css2_hw_load_32; - - isp->css_env.isp_css_env.hw_access_env.load = atomisp_css2_hw_load; - isp->css_env.isp_css_env.hw_access_env.store = atomisp_css2_hw_store; - - __set_css_print_env(isp, dbg_func); - - isp->css_env.isp_css_env.print_env.error_print = atomisp_css2_err_print; - - /* load isp fw into ISP memory */ - err = ia_css_load_firmware(&isp->css_env.isp_css_env, - &isp->css_env.isp_css_fw); - if (err != IA_CSS_SUCCESS) { - dev_err(isp->dev, "css load fw failed.\n"); - return -EINVAL; - } - - return 0; -} - -void atomisp_css_unload_firmware(struct atomisp_device *isp) -{ - ia_css_unload_firmware(); -} - -void atomisp_css_uninit(struct atomisp_device *isp) -{ - struct atomisp_sub_device *asd; - unsigned int i; - - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - atomisp_isp_parameters_clean_up(&asd->params.config); - asd->params.css_update_params_needed = false; - } - - isp->css_initialized = false; - ia_css_uninit(); -} - -void atomisp_css_suspend(struct atomisp_device *isp) -{ - isp->css_initialized = false; - ia_css_uninit(); -} - -int atomisp_css_resume(struct atomisp_device *isp) -{ - unsigned int mmu_base_addr; - int ret; - - ret = hmm_get_mmu_base_addr(&mmu_base_addr); - if (ret) { - dev_err(isp->dev, "get base address error.\n"); - return -EINVAL; - } - - ret = ia_css_init(&isp->css_env.isp_css_env, NULL, - mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE); - if (ret) { - dev_err(isp->dev, "re-init css failed.\n"); - return -EINVAL; - } - ia_css_enable_isys_event_queue(true); - - isp->css_initialized = true; - return 0; -} - -int atomisp_css_irq_translate(struct atomisp_device *isp, - unsigned int *infos) -{ - int err; - - err = ia_css_irq_translate(infos); - if (err != IA_CSS_SUCCESS) { - dev_warn(isp->dev, - "%s:failed to translate irq (err = %d,infos = %d)\n", - __func__, err, *infos); - return -EINVAL; - } - - return 0; -} - -void atomisp_css_rx_get_irq_info(enum mipi_port_id port, - unsigned int *infos) -{ -#ifndef ISP2401_NEW_INPUT_SYSTEM - ia_css_isys_rx_get_irq_info(port, infos); -#else - *infos = 0; -#endif -} - -void atomisp_css_rx_clear_irq_info(enum mipi_port_id port, - unsigned int infos) -{ -#ifndef ISP2401_NEW_INPUT_SYSTEM - ia_css_isys_rx_clear_irq_info(port, infos); -#endif -} - -int atomisp_css_irq_enable(struct atomisp_device *isp, - enum atomisp_css_irq_info info, bool enable) -{ - if (ia_css_irq_enable(info, enable) != IA_CSS_SUCCESS) { - dev_warn(isp->dev, "%s:Invalid irq info.\n", __func__); - return -EINVAL; - } - - return 0; -} - -void atomisp_css_init_struct(struct atomisp_sub_device *asd) -{ - int i, j; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - asd->stream_env[i].stream = NULL; - for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) { - asd->stream_env[i].pipes[j] = NULL; - asd->stream_env[i].update_pipe[j] = false; - ia_css_pipe_config_defaults( - &asd->stream_env[i].pipe_configs[j]); - ia_css_pipe_extra_config_defaults( - &asd->stream_env[i].pipe_extra_configs[j]); - } - ia_css_stream_config_defaults(&asd->stream_env[i].stream_config); - } -} - -int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd, - struct videobuf_vmalloc_memory *vm_mem, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_buffer_type css_buf_type, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id]; - struct ia_css_buffer css_buf = {0}; - enum ia_css_err err; - - css_buf.type = css_buf_type; - css_buf.data.frame = vm_mem->vaddr; - - err = ia_css_pipe_enqueue_buffer( - stream_env->pipes[css_pipe_id], &css_buf); - if (err != IA_CSS_SUCCESS) - return -EINVAL; - - return 0; -} - -int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd, - struct atomisp_metadata_buf *metadata_buf, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id]; - struct ia_css_buffer buffer = {0}; - struct atomisp_device *isp = asd->isp; - - buffer.type = IA_CSS_BUFFER_TYPE_METADATA; - buffer.data.metadata = metadata_buf->metadata; - if (ia_css_pipe_enqueue_buffer(stream_env->pipes[css_pipe_id], - &buffer)) { - dev_err(isp->dev, "failed to q meta data buffer\n"); - return -EINVAL; - } - - return 0; -} - -int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd, - struct atomisp_s3a_buf *s3a_buf, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id]; - struct ia_css_buffer buffer = {0}; - struct atomisp_device *isp = asd->isp; - - buffer.type = IA_CSS_BUFFER_TYPE_3A_STATISTICS; - buffer.data.stats_3a = s3a_buf->s3a_data; - if (ia_css_pipe_enqueue_buffer( - stream_env->pipes[css_pipe_id], - &buffer)) { - dev_dbg(isp->dev, "failed to q s3a stat buffer\n"); - return -EINVAL; - } - - return 0; -} - -int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd, - struct atomisp_dis_buf *dis_buf, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id]; - struct ia_css_buffer buffer = {0}; - struct atomisp_device *isp = asd->isp; - - buffer.type = IA_CSS_BUFFER_TYPE_DIS_STATISTICS; - buffer.data.stats_dvs = dis_buf->dis_data; - if (ia_css_pipe_enqueue_buffer( - stream_env->pipes[css_pipe_id], - &buffer)) { - dev_dbg(isp->dev, "failed to q dvs stat buffer\n"); - return -EINVAL; - } - - return 0; -} - -void atomisp_css_mmu_invalidate_cache(void) -{ - ia_css_mmu_invalidate_cache(); -} - -void atomisp_css_mmu_invalidate_tlb(void) -{ - ia_css_mmu_invalidate_cache(); -} - -int atomisp_css_start(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, bool in_reset) -{ - struct atomisp_device *isp = asd->isp; - bool sp_is_started = false; - int ret = 0, i = 0; - - if (in_reset) { - if (__destroy_streams(asd, true)) - dev_warn(isp->dev, "destroy stream failed.\n"); - - if (__destroy_pipes(asd, true)) - dev_warn(isp->dev, "destroy pipe failed.\n"); - - if (__create_pipes(asd)) { - dev_err(isp->dev, "create pipe error.\n"); - return -EINVAL; - } - if (__create_streams(asd)) { - dev_err(isp->dev, "create stream error.\n"); - ret = -EINVAL; - goto stream_err; - } - /* in_reset == true, extension firmwares are reloaded after the recovery */ - atomisp_acc_load_extensions(asd); - } - - /* - * For dual steam case, it is possible that: - * 1: for this stream, it is at the stage that: - * - after set_fmt is called - * - before stream on is called - * 2: for the other stream, the stream off is called which css reset - * has been done. - * - * Thus the stream created in set_fmt get destroyed and need to be - * recreated in the next stream on. - */ - if (asd->stream_prepared == false) { - if (__create_pipes(asd)) { - dev_err(isp->dev, "create pipe error.\n"); - return -EINVAL; - } - if (__create_streams(asd)) { - dev_err(isp->dev, "create stream error.\n"); - ret = -EINVAL; - goto stream_err; - } - } - /* - * SP can only be started one time - * if atomisp_subdev_streaming_count() tell there already has some - * subdev at streamming, then SP should already be started previously, - * so need to skip start sp procedure - */ - if (atomisp_streaming_count(isp)) { - dev_dbg(isp->dev, "skip start sp\n"); - } else { - if (!sh_css_hrt_system_is_idle()) - dev_err(isp->dev, "CSS HW not idle before starting SP\n"); - if (ia_css_start_sp() != IA_CSS_SUCCESS) { - dev_err(isp->dev, "start sp error.\n"); - ret = -EINVAL; - goto start_err; - } else { - sp_is_started = true; - } - } - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - if (asd->stream_env[i].stream) { - if (ia_css_stream_start(asd->stream_env[i] - .stream) != IA_CSS_SUCCESS) { - dev_err(isp->dev, "stream[%d] start error.\n", i); - ret = -EINVAL; - goto start_err; - } else { - asd->stream_env[i].stream_state = CSS_STREAM_STARTED; - dev_dbg(isp->dev, "stream[%d] started.\n", i); - } - } - } - - return 0; - -start_err: - __destroy_streams(asd, true); -stream_err: - __destroy_pipes(asd, true); - - /* css 2.0 API limitation: ia_css_stop_sp() could be only called after - * destroy all pipes - */ - /* - * SP can not be stop if other streams are in use - */ - if ((atomisp_streaming_count(isp) == 0) && sp_is_started) - ia_css_stop_sp(); - - return ret; -} - -void atomisp_css_update_isp_params(struct atomisp_sub_device *asd) -{ - /* - * FIXME! - * for ISP2401 new input system, this api is under development. - * Calling it would cause kernel panic. - * - * VIED BZ: 1458 - * - * Check if it is Cherry Trail and also new input system - */ - if (asd->copy_mode) { - dev_warn(asd->isp->dev, - "%s: ia_css_stream_set_isp_config() not supported in copy mode!.\n", - __func__); - return; - } - - ia_css_stream_set_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &asd->params.config); - atomisp_isp_parameters_clean_up(&asd->params.config); -} - - -void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd, - struct ia_css_pipe *pipe) -{ - enum ia_css_err ret; - - if (!pipe) { - atomisp_css_update_isp_params(asd); - return; - } - - dev_dbg(asd->isp->dev, "%s: apply parameter for ia_css_frame %p with isp_config_id %d on pipe %p.\n", - __func__, asd->params.config.output_frame, - asd->params.config.isp_config_id, pipe); - - ret = ia_css_stream_set_isp_config_on_pipe( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &asd->params.config, pipe); - if (ret != IA_CSS_SUCCESS) - dev_warn(asd->isp->dev, "%s: ia_css_stream_set_isp_config_on_pipe failed %d\n", - __func__, ret); - atomisp_isp_parameters_clean_up(&asd->params.config); -} - -int atomisp_css_queue_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id pipe_id, - enum atomisp_css_buffer_type buf_type, - struct atomisp_css_buffer *isp_css_buffer) -{ - if (ia_css_pipe_enqueue_buffer( - asd->stream_env[stream_id].pipes[pipe_id], - &isp_css_buffer->css_buffer) - != IA_CSS_SUCCESS) - return -EINVAL; - - return 0; -} - -int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id pipe_id, - enum atomisp_css_buffer_type buf_type, - struct atomisp_css_buffer *isp_css_buffer) -{ - struct atomisp_device *isp = asd->isp; - enum ia_css_err err; - - err = ia_css_pipe_dequeue_buffer( - asd->stream_env[stream_id].pipes[pipe_id], - &isp_css_buffer->css_buffer); - if (err != IA_CSS_SUCCESS) { - dev_err(isp->dev, - "ia_css_pipe_dequeue_buffer failed: 0x%x\n", err); - return -EINVAL; - } - - return 0; -} - -int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd, - uint16_t stream_id, - struct atomisp_s3a_buf *s3a_buf, - struct atomisp_dis_buf *dis_buf, - struct atomisp_metadata_buf *md_buf) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_css_dvs_grid_info *dvs_grid_info = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - - if (s3a_buf && asd->params.curr_grid_info.s3a_grid.enable) { - void *s3a_ptr; - - s3a_buf->s3a_data = ia_css_isp_3a_statistics_allocate( - &asd->params.curr_grid_info.s3a_grid); - if (!s3a_buf->s3a_data) { - dev_err(isp->dev, "3a buf allocation failed.\n"); - return -EINVAL; - } - - s3a_ptr = hmm_vmap(s3a_buf->s3a_data->data_ptr, true); - s3a_buf->s3a_map = ia_css_isp_3a_statistics_map_allocate( - s3a_buf->s3a_data, s3a_ptr); - } - - if (dis_buf && dvs_grid_info && dvs_grid_info->enable) { - void *dvs_ptr; - - dis_buf->dis_data = ia_css_isp_dvs2_statistics_allocate( - dvs_grid_info); - if (!dis_buf->dis_data) { - dev_err(isp->dev, "dvs buf allocation failed.\n"); - if (s3a_buf) - ia_css_isp_3a_statistics_free(s3a_buf->s3a_data); - return -EINVAL; - } - - dvs_ptr = hmm_vmap(dis_buf->dis_data->data_ptr, true); - dis_buf->dvs_map = ia_css_isp_dvs_statistics_map_allocate( - dis_buf->dis_data, dvs_ptr); - } - - if (asd->stream_env[stream_id].stream_info. - metadata_info.size && md_buf) { - md_buf->metadata = ia_css_metadata_allocate( - &asd->stream_env[stream_id].stream_info.metadata_info); - if (!md_buf->metadata) { - if (s3a_buf) - ia_css_isp_3a_statistics_free(s3a_buf->s3a_data); - if (dis_buf) - ia_css_isp_dvs2_statistics_free(dis_buf->dis_data); - dev_err(isp->dev, "metadata buf allocation failed.\n"); - return -EINVAL; - } - md_buf->md_vptr = hmm_vmap(md_buf->metadata->address, false); - } - - return 0; -} - -void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf) -{ - if (s3a_buf->s3a_data) - hmm_vunmap(s3a_buf->s3a_data->data_ptr); - - ia_css_isp_3a_statistics_map_free(s3a_buf->s3a_map); - s3a_buf->s3a_map = NULL; - ia_css_isp_3a_statistics_free(s3a_buf->s3a_data); -} - -void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf) -{ - if (dis_buf->dis_data) - hmm_vunmap(dis_buf->dis_data->data_ptr); - - ia_css_isp_dvs_statistics_map_free(dis_buf->dvs_map); - dis_buf->dvs_map = NULL; - ia_css_isp_dvs2_statistics_free(dis_buf->dis_data); -} - -void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf) -{ - if (metadata_buf->md_vptr) { - hmm_vunmap(metadata_buf->metadata->address); - metadata_buf->md_vptr = NULL; - } - ia_css_metadata_free(metadata_buf->metadata); -} - -void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd) -{ - struct atomisp_s3a_buf *s3a_buf, *_s3a_buf; - struct atomisp_dis_buf *dis_buf, *_dis_buf; - struct atomisp_metadata_buf *md_buf, *_md_buf; - struct atomisp_css_dvs_grid_info *dvs_grid_info = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - unsigned int i; - - /* 3A statistics use vmalloc, DIS use kmalloc */ - if (dvs_grid_info && dvs_grid_info->enable) { - ia_css_dvs2_coefficients_free(asd->params.css_param.dvs2_coeff); - ia_css_dvs2_statistics_free(asd->params.dvs_stat); - asd->params.css_param.dvs2_coeff = NULL; - asd->params.dvs_stat = NULL; - asd->params.dvs_hor_proj_bytes = 0; - asd->params.dvs_ver_proj_bytes = 0; - asd->params.dvs_hor_coef_bytes = 0; - asd->params.dvs_ver_coef_bytes = 0; - asd->params.dis_proj_data_valid = false; - list_for_each_entry_safe(dis_buf, _dis_buf, - &asd->dis_stats, list) { - atomisp_css_free_dis_buffer(dis_buf); - list_del(&dis_buf->list); - kfree(dis_buf); - } - list_for_each_entry_safe(dis_buf, _dis_buf, - &asd->dis_stats_in_css, list) { - atomisp_css_free_dis_buffer(dis_buf); - list_del(&dis_buf->list); - kfree(dis_buf); - } - } - if (asd->params.curr_grid_info.s3a_grid.enable) { - ia_css_3a_statistics_free(asd->params.s3a_user_stat); - asd->params.s3a_user_stat = NULL; - asd->params.s3a_output_bytes = 0; - list_for_each_entry_safe(s3a_buf, _s3a_buf, - &asd->s3a_stats, list) { - atomisp_css_free_3a_buffer(s3a_buf); - list_del(&s3a_buf->list); - kfree(s3a_buf); - } - list_for_each_entry_safe(s3a_buf, _s3a_buf, - &asd->s3a_stats_in_css, list) { - atomisp_css_free_3a_buffer(s3a_buf); - list_del(&s3a_buf->list); - kfree(s3a_buf); - } - list_for_each_entry_safe(s3a_buf, _s3a_buf, - &asd->s3a_stats_ready, list) { - atomisp_css_free_3a_buffer(s3a_buf); - list_del(&s3a_buf->list); - kfree(s3a_buf); - } - } - - if (asd->params.css_param.dvs_6axis) { - ia_css_dvs2_6axis_config_free(asd->params.css_param.dvs_6axis); - asd->params.css_param.dvs_6axis = NULL; - } - - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - list_for_each_entry_safe(md_buf, _md_buf, - &asd->metadata[i], list) { - atomisp_css_free_metadata_buffer(md_buf); - list_del(&md_buf->list); - kfree(md_buf); - } - list_for_each_entry_safe(md_buf, _md_buf, - &asd->metadata_in_css[i], list) { - atomisp_css_free_metadata_buffer(md_buf); - list_del(&md_buf->list); - kfree(md_buf); - } - list_for_each_entry_safe(md_buf, _md_buf, - &asd->metadata_ready[i], list) { - atomisp_css_free_metadata_buffer(md_buf); - list_del(&md_buf->list); - kfree(md_buf); - } - } - asd->params.metadata_width_size = 0; - atomisp_free_metadata_output_buf(asd); -} - -int atomisp_css_get_grid_info(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, - int source_pad) -{ - struct ia_css_pipe_info p_info; - struct ia_css_grid_info old_info; - struct atomisp_device *isp = asd->isp; - int stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_config.metadata_config.resolution.width; - - memset(&p_info, 0, sizeof(struct ia_css_pipe_info)); - memset(&old_info, 0, sizeof(struct ia_css_grid_info)); - - if (ia_css_pipe_get_info( - asd->stream_env[stream_index].pipes[pipe_id], - &p_info) != IA_CSS_SUCCESS) { - dev_err(isp->dev, "ia_css_pipe_get_info failed\n"); - return -EINVAL; - } - - memcpy(&old_info, &asd->params.curr_grid_info, - sizeof(struct ia_css_grid_info)); - memcpy(&asd->params.curr_grid_info, &p_info.grid_info, - sizeof(struct ia_css_grid_info)); - /* - * Record which css pipe enables s3a_grid. - * Currently would have one css pipe that need it - */ - if (asd->params.curr_grid_info.s3a_grid.enable) { - if (asd->params.s3a_enabled_pipe != CSS_PIPE_ID_NUM) - dev_dbg(isp->dev, "css pipe %d enabled s3a grid replaced by: %d.\n", - asd->params.s3a_enabled_pipe, pipe_id); - asd->params.s3a_enabled_pipe = pipe_id; - } - - /* If the grid info has not changed and the buffers for 3A and - * DIS statistics buffers are allocated or buffer size would be zero - * then no need to do anything. */ - if (((!memcmp(&old_info, &asd->params.curr_grid_info, sizeof(old_info)) - && asd->params.s3a_user_stat && asd->params.dvs_stat) - || asd->params.curr_grid_info.s3a_grid.width == 0 - || asd->params.curr_grid_info.s3a_grid.height == 0) - && asd->params.metadata_width_size == md_width) { - dev_dbg(isp->dev, - "grid info change escape. memcmp=%d, s3a_user_stat=%d," - "dvs_stat=%d, s3a.width=%d, s3a.height=%d, metadata width =%d\n", - !memcmp(&old_info, &asd->params.curr_grid_info, - sizeof(old_info)), - !!asd->params.s3a_user_stat, !!asd->params.dvs_stat, - asd->params.curr_grid_info.s3a_grid.width, - asd->params.curr_grid_info.s3a_grid.height, - asd->params.metadata_width_size); - return -EINVAL; - } - asd->params.metadata_width_size = md_width; - - return 0; -} - -int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd) -{ - if (!asd->params.curr_grid_info.s3a_grid.width || - !asd->params.curr_grid_info.s3a_grid.height) - return 0; - - asd->params.s3a_user_stat = ia_css_3a_statistics_allocate( - &asd->params.curr_grid_info.s3a_grid); - if (!asd->params.s3a_user_stat) - return -ENOMEM; - /* 3A statistics. These can be big, so we use vmalloc. */ - asd->params.s3a_output_bytes = - asd->params.curr_grid_info.s3a_grid.width * - asd->params.curr_grid_info.s3a_grid.height * - sizeof(*asd->params.s3a_user_stat->data); - - return 0; -} - -int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd) -{ - struct atomisp_css_dvs_grid_info *dvs_grid = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - - if (!dvs_grid) - return 0; - - if (!dvs_grid->enable) { - dev_dbg(asd->isp->dev, "%s: dvs_grid not enabled.\n", __func__); - return 0; - } - - /* DIS coefficients. */ - asd->params.css_param.dvs2_coeff = ia_css_dvs2_coefficients_allocate( - dvs_grid); - if (!asd->params.css_param.dvs2_coeff) - return -ENOMEM; - - asd->params.dvs_hor_coef_bytes = dvs_grid->num_hor_coefs * - sizeof(*asd->params.css_param.dvs2_coeff->hor_coefs.odd_real); - - asd->params.dvs_ver_coef_bytes = dvs_grid->num_ver_coefs * - sizeof(*asd->params.css_param.dvs2_coeff->ver_coefs.odd_real); - - /* DIS projections. */ - asd->params.dis_proj_data_valid = false; - asd->params.dvs_stat = ia_css_dvs2_statistics_allocate(dvs_grid); - if (!asd->params.dvs_stat) - return -ENOMEM; - - asd->params.dvs_hor_proj_bytes = - dvs_grid->aligned_height * dvs_grid->aligned_width * - sizeof(*asd->params.dvs_stat->hor_prod.odd_real); - - asd->params.dvs_ver_proj_bytes = - dvs_grid->aligned_height * dvs_grid->aligned_width * - sizeof(*asd->params.dvs_stat->ver_prod.odd_real); - - return 0; -} - -int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd) -{ - int i; - - /* We allocate the cpu-side buffer used for communication with user - * space */ - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - asd->params.metadata_user[i] = kvmalloc( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. - stream_info.metadata_info.size, GFP_KERNEL); - if (!asd->params.metadata_user[i]) { - while (--i >= 0) { - kvfree(asd->params.metadata_user[i]); - asd->params.metadata_user[i] = NULL; - } - return -ENOMEM; - } - } - - return 0; -} - -void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd) -{ - unsigned int i; - - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - if (asd->params.metadata_user[i]) { - kvfree(asd->params.metadata_user[i]); - asd->params.metadata_user[i] = NULL; - } - } -} - -void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd, - struct atomisp_css_buffer *isp_css_buffer, - struct ia_css_isp_dvs_statistics_map *dvs_map) -{ - if (asd->params.dvs_stat) { - if (dvs_map) - ia_css_translate_dvs2_statistics( - asd->params.dvs_stat, dvs_map); - else - ia_css_get_dvs2_statistics(asd->params.dvs_stat, - isp_css_buffer->css_buffer.data.stats_dvs); - - } -} - -int atomisp_css_dequeue_event(struct atomisp_css_event *current_event) -{ - if (ia_css_dequeue_event(¤t_event->event) != IA_CSS_SUCCESS) - return -EINVAL; - - return 0; -} - -void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd, - struct atomisp_css_event *current_event) -{ - /* - * FIXME! - * Pipe ID reported in CSS event is not correct for new system's - * copy pipe. - * VIED BZ: 1463 - */ - ia_css_temp_pipe_to_pipe_id(current_event->event.pipe, - ¤t_event->pipe); - if (asd && asd->copy_mode && - current_event->pipe == IA_CSS_PIPE_ID_CAPTURE) - current_event->pipe = IA_CSS_PIPE_ID_COPY; -} - -int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct v4l2_mbus_framefmt *ffmt, - int isys_stream) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - if (isys_stream >= IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH) - return -EINVAL; - - s_config->isys_config[isys_stream].input_res.width = ffmt->width; - s_config->isys_config[isys_stream].input_res.height = ffmt->height; - return 0; -} - -int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct v4l2_mbus_framefmt *ffmt) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->input_config.input_res.width = ffmt->width; - s_config->input_config.input_res.height = ffmt->height; - return 0; -} - -void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - unsigned int bin_factor) -{ - asd->stream_env[stream_id] - .stream_config.sensor_binning_factor = bin_factor; -} - -void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_bayer_order bayer_order) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - s_config->input_config.bayer_order = bayer_order; -} - -void atomisp_css_isys_set_link(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - int link, - int isys_stream) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->isys_config[isys_stream].linked_isys_stream_id = link; -} - -void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - bool valid, - int isys_stream) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->isys_config[isys_stream].valid = valid; -} - -void atomisp_css_isys_set_format(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format format, - int isys_stream) -{ - - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->isys_config[isys_stream].format = format; -} - -void atomisp_css_input_set_format(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format format) -{ - - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->input_config.format = format; -} - -int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - struct v4l2_mbus_framefmt *ffmt) -{ - int i; - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - /* - * Set all isys configs to not valid. - * Currently we support only one stream per channel - */ - for (i = IA_CSS_STREAM_ISYS_STREAM_0; - i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) - s_config->isys_config[i].valid = false; - - atomisp_css_isys_set_resolution(asd, stream_id, ffmt, - IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX); - atomisp_css_isys_set_format(asd, stream_id, - s_config->input_config.format, - IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX); - atomisp_css_isys_set_link(asd, stream_id, NO_LINK, - IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX); - atomisp_css_isys_set_valid(asd, stream_id, true, - IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX); - - return 0; -} - -int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format input_format) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width = - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width; - - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height = - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height / 2; - - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id - = IA_CSS_STREAM_ISYS_STREAM_0; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format = - ATOMISP_INPUT_FORMAT_USER_DEF1; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format = - ATOMISP_INPUT_FORMAT_USER_DEF2; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true; - return 0; -} - -void atomisp_css_isys_two_stream_cfg_update_stream1( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format input_format, - unsigned int width, unsigned int height) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width = - width; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height = - height; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format = - input_format; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].valid = true; -} - -void atomisp_css_isys_two_stream_cfg_update_stream2( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_input_format input_format, - unsigned int width, unsigned int height) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width = - width; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height = - height; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id - = IA_CSS_STREAM_ISYS_STREAM_0; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format = - input_format; - s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true; -} - -int atomisp_css_input_set_effective_resolution( - struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - unsigned int width, unsigned int height) -{ - struct ia_css_stream_config *s_config = - &asd->stream_env[stream_id].stream_config; - s_config->input_config.effective_res.width = width; - s_config->input_config.effective_res.height = height; - return 0; -} - -void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd, - unsigned int dvs_w, unsigned int dvs_h) -{ - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.width = dvs_w; - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.height = dvs_h; -} - -void atomisp_css_input_set_two_pixels_per_clock( - struct atomisp_sub_device *asd, - bool two_ppc) -{ - int i; - - if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.pixels_per_clock == (two_ppc ? 2 : 1)) - return; - - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.pixels_per_clock = (two_ppc ? 2 : 1); - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .update_pipe[i] = true; -} - -void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd, - bool enable) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - unsigned int pipe; - - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - pipe = IA_CSS_PIPE_ID_VIDEO; - else - pipe = IA_CSS_PIPE_ID_PREVIEW; - - stream_env->pipe_extra_configs[pipe].enable_raw_binning = enable; - stream_env->update_pipe[pipe] = true; - if (enable) - stream_env->pipe_configs[pipe].output_info[0].padded_width = - stream_env->stream_config.input_config.effective_res.width; -} - -void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable) -{ - int i; - - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[i].enable_dz = enable; -} - -void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd, - enum atomisp_css_capture_mode mode) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - - if (stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE] - .default_capture_config.mode == mode) - return; - - stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]. - default_capture_config.mode = mode; - stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true; -} - -void atomisp_css_input_set_mode(struct atomisp_sub_device *asd, - enum atomisp_css_input_mode mode) -{ - int i; - struct atomisp_device *isp = asd->isp; - unsigned int size_mem_words; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) - asd->stream_env[i].stream_config.mode = mode; - - if (isp->inputs[asd->input_curr].type == TEST_PATTERN) { - struct ia_css_stream_config *s_config = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_config; - s_config->mode = IA_CSS_INPUT_MODE_TPG; - s_config->source.tpg.mode = IA_CSS_TPG_MODE_CHECKERBOARD; - s_config->source.tpg.x_mask = (1 << 4) - 1; - s_config->source.tpg.x_delta = -2; - s_config->source.tpg.y_mask = (1 << 4) - 1; - s_config->source.tpg.y_delta = 3; - s_config->source.tpg.xy_mask = (1 << 8) - 1; - return; - } - - if (mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) - return; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - /* - * TODO: sensor needs to export the embedded_data_size_words - * information to atomisp for each setting. - * Here using a large safe value. - */ - struct ia_css_stream_config *s_config = - &asd->stream_env[i].stream_config; - - if (s_config->input_config.input_res.width == 0) - continue; - - if (ia_css_mipi_frame_calculate_size( - s_config->input_config.input_res.width, - s_config->input_config.input_res.height, - s_config->input_config.format, - true, - 0x13000, - &size_mem_words) != IA_CSS_SUCCESS) { - if (intel_mid_identify_cpu() == - INTEL_MID_CPU_CHIP_TANGIER) - size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2; - else - size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1; - dev_warn(asd->isp->dev, - "ia_css_mipi_frame_calculate_size failed," - "applying pre-defined MIPI buffer size %u.\n", - size_mem_words); - } - s_config->mipi_buffer_config.size_mem_words = size_mem_words; - s_config->mipi_buffer_config.nof_mipi_buffers = 2; - } -} - -void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd, - unsigned short stream_index, bool enable) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[stream_index]; - - if (stream_env->stream_config.online == !!enable) - return; - - stream_env->stream_config.online = !!enable; - stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true; -} - -void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd, - unsigned short stream_index, bool enable) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[stream_index]; - int i; - - if (stream_env->stream_config.online != !!enable) { - stream_env->stream_config.online = !!enable; - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - stream_env->update_pipe[i] = true; - } -} - -void atomisp_css_video_enable_online(struct atomisp_sub_device *asd, - bool enable) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO]; - int i; - - if (stream_env->stream_config.online != enable) { - stream_env->stream_config.online = enable; - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - stream_env->update_pipe[i] = true; - } -} - -void atomisp_css_enable_continuous(struct atomisp_sub_device *asd, - bool enable) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - int i; - - /* - * To SOC camera, there is only one YUVPP pipe in any case - * including ZSL/SDV/continuous viewfinder, so always set - * stream_config.continuous to 0. - */ - if (ATOMISP_USE_YUVPP(asd)) { - stream_env->stream_config.continuous = 0; - stream_env->stream_config.online = 1; - return; - } - - if (stream_env->stream_config.continuous != !!enable) { - stream_env->stream_config.continuous = !!enable; - stream_env->stream_config.pack_raw_pixels = true; - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - stream_env->update_pipe[i] = true; - } -} - -void atomisp_css_enable_cvf(struct atomisp_sub_device *asd, - bool enable) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - int i; - - if (stream_env->stream_config.disable_cont_viewfinder != !enable) { - stream_env->stream_config.disable_cont_viewfinder = !enable; - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - stream_env->update_pipe[i] = true; - } -} - -int atomisp_css_input_configure_port( - struct atomisp_sub_device *asd, - enum mipi_port_id port, - unsigned int num_lanes, - unsigned int timeout, - unsigned int mipi_freq, - enum atomisp_input_format metadata_format, - unsigned int metadata_width, - unsigned int metadata_height) -{ - int i; - struct atomisp_stream_env *stream_env; - /* - * Calculate rx_count as follows: - * Input: mipi_freq : CSI-2 bus frequency in Hz - * UI = 1 / (2 * mipi_freq) : period of one bit on the bus - * min = 85e-9 + 6 * UI : Limits for rx_count in seconds - * max = 145e-9 + 10 * UI - * rxcount0 = min / (4 / mipi_freq) : convert seconds to byte clocks - * rxcount = rxcount0 - 2 : adjust for better results - * The formula below is simplified version of the above with - * 10-bit fixed points for improved accuracy. - */ - const unsigned int rxcount = - min(((mipi_freq / 46000) - 1280) >> 10, 0xffU) * 0x01010101U; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - stream_env = &asd->stream_env[i]; - stream_env->stream_config.source.port.port = port; - stream_env->stream_config.source.port.num_lanes = num_lanes; - stream_env->stream_config.source.port.timeout = timeout; - if (mipi_freq) - stream_env->stream_config.source.port.rxcount = rxcount; - stream_env->stream_config. - metadata_config.data_type = metadata_format; - stream_env->stream_config. - metadata_config.resolution.width = metadata_width; - stream_env->stream_config. - metadata_config.resolution.height = metadata_height; - } - - return 0; -} - -int atomisp_css_frame_allocate(struct atomisp_css_frame **frame, - unsigned int width, unsigned int height, - enum atomisp_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth) -{ - if (ia_css_frame_allocate(frame, width, height, format, - padded_width, raw_bit_depth) != IA_CSS_SUCCESS) - return -ENOMEM; - - return 0; -} - -int atomisp_css_frame_allocate_from_info(struct atomisp_css_frame **frame, - const struct atomisp_css_frame_info *info) -{ - if (ia_css_frame_allocate_from_info(frame, info) != IA_CSS_SUCCESS) - return -ENOMEM; - - return 0; -} - -void atomisp_css_frame_free(struct atomisp_css_frame *frame) -{ - ia_css_frame_free(frame); -} - -int atomisp_css_frame_map(struct atomisp_css_frame **frame, - const struct atomisp_css_frame_info *info, - const void __user *data, uint16_t attribute, - void *context) -{ - if (ia_css_frame_map(frame, info, data, attribute, context) - != IA_CSS_SUCCESS) - return -ENOMEM; - - return 0; -} - -int atomisp_css_set_black_frame(struct atomisp_sub_device *asd, - const struct atomisp_css_frame *raw_black_frame) -{ - if (sh_css_set_black_frame( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - raw_black_frame) != IA_CSS_SUCCESS) - return -ENOMEM; - - return 0; -} - -int atomisp_css_allocate_continuous_frames(bool init_time, - struct atomisp_sub_device *asd) -{ - if (ia_css_alloc_continuous_frame_remain( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) - != IA_CSS_SUCCESS) - return -EINVAL; - return 0; -} - -void atomisp_css_update_continuous_frames(struct atomisp_sub_device *asd) -{ - ia_css_update_continuous_frames( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream); -} - -int atomisp_css_stop(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, bool in_reset) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_s3a_buf *s3a_buf; - struct atomisp_dis_buf *dis_buf; - struct atomisp_metadata_buf *md_buf; - unsigned long irqflags; - unsigned int i; - - /* if is called in atomisp_reset(), force destroy stream */ - if (__destroy_streams(asd, true)) - dev_err(isp->dev, "destroy stream failed.\n"); - - /* if is called in atomisp_reset(), force destroy all pipes */ - if (__destroy_pipes(asd, true)) - dev_err(isp->dev, "destroy pipes failed.\n"); - - atomisp_init_raw_buffer_bitmap(asd); - - /* - * SP can not be stop if other streams are in use - */ - if (atomisp_streaming_count(isp) == 0) - ia_css_stop_sp(); - - if (!in_reset) { - struct atomisp_stream_env *stream_env; - int i, j; - - for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) { - stream_env = &asd->stream_env[i]; - for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) { - ia_css_pipe_config_defaults( - &stream_env->pipe_configs[j]); - ia_css_pipe_extra_config_defaults( - &stream_env->pipe_extra_configs[j]); - } - ia_css_stream_config_defaults( - &stream_env->stream_config); - } - atomisp_isp_parameters_clean_up(&asd->params.config); - asd->params.css_update_params_needed = false; - } - - /* move stats buffers to free queue list */ - while (!list_empty(&asd->s3a_stats_in_css)) { - s3a_buf = list_entry(asd->s3a_stats_in_css.next, - struct atomisp_s3a_buf, list); - list_del(&s3a_buf->list); - list_add_tail(&s3a_buf->list, &asd->s3a_stats); - } - while (!list_empty(&asd->s3a_stats_ready)) { - s3a_buf = list_entry(asd->s3a_stats_ready.next, - struct atomisp_s3a_buf, list); - list_del(&s3a_buf->list); - list_add_tail(&s3a_buf->list, &asd->s3a_stats); - } - - spin_lock_irqsave(&asd->dis_stats_lock, irqflags); - while (!list_empty(&asd->dis_stats_in_css)) { - dis_buf = list_entry(asd->dis_stats_in_css.next, - struct atomisp_dis_buf, list); - list_del(&dis_buf->list); - list_add_tail(&dis_buf->list, &asd->dis_stats); - } - asd->params.dis_proj_data_valid = false; - spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); - - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - while (!list_empty(&asd->metadata_in_css[i])) { - md_buf = list_entry(asd->metadata_in_css[i].next, - struct atomisp_metadata_buf, list); - list_del(&md_buf->list); - list_add_tail(&md_buf->list, &asd->metadata[i]); - } - while (!list_empty(&asd->metadata_ready[i])) { - md_buf = list_entry(asd->metadata_ready[i].next, - struct atomisp_metadata_buf, list); - list_del(&md_buf->list); - list_add_tail(&md_buf->list, &asd->metadata[i]); - } - } - - atomisp_flush_params_queue(&asd->video_out_capture); - atomisp_flush_params_queue(&asd->video_out_vf); - atomisp_flush_params_queue(&asd->video_out_preview); - atomisp_flush_params_queue(&asd->video_out_video_capture); - atomisp_free_css_parameters(&asd->params.css_param); - memset(&asd->params.css_param, 0, sizeof(asd->params.css_param)); - return 0; -} - -int atomisp_css_continuous_set_num_raw_frames( - struct atomisp_sub_device *asd, - int num_frames) -{ - if (asd->enable_raw_buffer_lock->val) { - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.init_num_cont_raw_buf = - ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN; - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - asd->params.video_dis_en) - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.init_num_cont_raw_buf += - ATOMISP_CSS2_NUM_DVS_FRAME_DELAY; - } else { - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.init_num_cont_raw_buf = - ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES; - } - - if (asd->params.video_dis_en) - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.init_num_cont_raw_buf += - ATOMISP_CSS2_NUM_DVS_FRAME_DELAY; - - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .stream_config.target_num_cont_raw_buf = num_frames; - return 0; -} - -void atomisp_css_disable_vf_pp(struct atomisp_sub_device *asd, - bool disable) -{ - int i; - - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_extra_configs[i].disable_vf_pp = !!disable; -} - -static enum ia_css_pipe_mode __pipe_id_to_pipe_mode( - struct atomisp_sub_device *asd, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct camera_mipi_info *mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - - switch (pipe_id) { - case IA_CSS_PIPE_ID_COPY: - /* Currently only YUVPP mode supports YUV420_Legacy format. - * Revert this when other pipe modes can support - * YUV420_Legacy format. - */ - if (mipi_info && mipi_info->input_format == - ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) - return IA_CSS_PIPE_MODE_YUVPP; - return IA_CSS_PIPE_MODE_COPY; - case IA_CSS_PIPE_ID_PREVIEW: - return IA_CSS_PIPE_MODE_PREVIEW; - case IA_CSS_PIPE_ID_CAPTURE: - return IA_CSS_PIPE_MODE_CAPTURE; - case IA_CSS_PIPE_ID_VIDEO: - return IA_CSS_PIPE_MODE_VIDEO; - case IA_CSS_PIPE_ID_ACC: - return IA_CSS_PIPE_MODE_ACC; - case IA_CSS_PIPE_ID_YUVPP: - return IA_CSS_PIPE_MODE_YUVPP; - default: - WARN_ON(1); - return IA_CSS_PIPE_MODE_PREVIEW; - } - -} - -static void __configure_output(struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int min_width, - enum ia_css_frame_format format, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[stream_index]; - struct ia_css_stream_config *s_config = &stream_env->stream_config; - - stream_env->pipe_configs[pipe_id].mode = - __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - stream_env->pipe_configs[pipe_id].output_info[0].res.width = width; - stream_env->pipe_configs[pipe_id].output_info[0].res.height = height; - stream_env->pipe_configs[pipe_id].output_info[0].format = format; - stream_env->pipe_configs[pipe_id].output_info[0].padded_width = min_width; - - /* isp binary 2.2 specific setting*/ - if (width > s_config->input_config.effective_res.width || - height > s_config->input_config.effective_res.height) { - s_config->input_config.effective_res.width = width; - s_config->input_config.effective_res.height = height; - } - - dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n", - pipe_id, width, height, format); -} - -static void __configure_video_preview_output(struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int min_width, - enum ia_css_frame_format format, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[stream_index]; - struct ia_css_frame_info *css_output_info; - struct ia_css_stream_config *stream_config = &stream_env->stream_config; - - stream_env->pipe_configs[pipe_id].mode = - __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - /* - * second_output will be as video main output in SDV mode - * with SOC camera. output will be as video main output in - * normal video mode. - */ - if (asd->continuous_mode->val) - css_output_info = &stream_env->pipe_configs[pipe_id]. - output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX]; - else - css_output_info = &stream_env->pipe_configs[pipe_id]. - output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX]; - - css_output_info->res.width = width; - css_output_info->res.height = height; - css_output_info->format = format; - css_output_info->padded_width = min_width; - - /* isp binary 2.2 specific setting*/ - if (width > stream_config->input_config.effective_res.width || - height > stream_config->input_config.effective_res.height) { - stream_config->input_config.effective_res.width = width; - stream_config->input_config.effective_res.height = height; - } - - dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n", - pipe_id, width, height, format); -} - -/* - * For CSS2.1, capture pipe uses capture_pp_in_res to configure yuv - * downscaling input resolution. - */ -static void __configure_capture_pp_input(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct ia_css_stream_config *stream_config = &stream_env->stream_config; - struct ia_css_pipe_config *pipe_configs = - &stream_env->pipe_configs[pipe_id]; - struct ia_css_pipe_extra_config *pipe_extra_configs = - &stream_env->pipe_extra_configs[pipe_id]; - unsigned int hor_ds_factor = 0, ver_ds_factor = 0; - - if (width == 0 && height == 0) - return; - - if (width * 9 / 10 < pipe_configs->output_info[0].res.width || - height * 9 / 10 < pipe_configs->output_info[0].res.height) - return; - /* here just copy the calculation in css */ - hor_ds_factor = CEIL_DIV(width >> 1, - pipe_configs->output_info[0].res.width); - ver_ds_factor = CEIL_DIV(height >> 1, - pipe_configs->output_info[0].res.height); - - if ((asd->isp->media_dev.hw_revision < - (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) || - IS_CHT) && hor_ds_factor != ver_ds_factor) { - dev_warn(asd->isp->dev, - "Cropping for capture due to FW limitation"); - return; - } - - pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - pipe_extra_configs->enable_yuv_ds = true; - - pipe_configs->capt_pp_in_res.width = - stream_config->input_config.effective_res.width; - pipe_configs->capt_pp_in_res.height = - stream_config->input_config.effective_res.height; - - dev_dbg(isp->dev, "configuring pipe[%d]capture pp input w=%d.h=%d.\n", - pipe_id, width, height); -} - -/* - * For CSS2.1, preview pipe could support bayer downscaling, yuv decimation and - * yuv downscaling, which needs addtional configurations. - */ -static void __configure_preview_pp_input(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - int out_width, out_height, yuv_ds_in_width, yuv_ds_in_height; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct ia_css_stream_config *stream_config = &stream_env->stream_config; - struct ia_css_pipe_config *pipe_configs = - &stream_env->pipe_configs[pipe_id]; - struct ia_css_pipe_extra_config *pipe_extra_configs = - &stream_env->pipe_extra_configs[pipe_id]; - struct ia_css_resolution *bayer_ds_out_res = - &pipe_configs->bayer_ds_out_res; - struct ia_css_resolution *vf_pp_in_res = - &pipe_configs->vf_pp_in_res; - struct ia_css_resolution *effective_res = - &stream_config->input_config.effective_res; - - const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} }; - /* - * BZ201033: YUV decimation factor of 4 causes couple of rightmost - * columns to be shaded. Remove this factor to work around the CSS bug. - * const unsigned int yuv_dec_fct[] = {4, 2}; - */ - const unsigned int yuv_dec_fct[] = { 2 }; - unsigned int i; - - if (width == 0 && height == 0) - return; - - pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - out_width = pipe_configs->output_info[0].res.width; - out_height = pipe_configs->output_info[0].res.height; - - /* - * The ISP could do bayer downscaling, yuv decimation and yuv - * downscaling: - * 1: Bayer Downscaling: between effective resolution and - * bayer_ds_res_out; - * 2: YUV Decimation: between bayer_ds_res_out and vf_pp_in_res; - * 3: YUV Downscaling: between vf_pp_in_res and final vf output - * - * Rule for Bayer Downscaling: support factor 2, 1.5 and 1.25 - * Rule for YUV Decimation: support factor 2, 4 - * Rule for YUV Downscaling: arbitary value below 2 - * - * General rule of factor distribution among these stages: - * 1: try to do Bayer downscaling first if not in online mode. - * 2: try to do maximum of 2 for YUV downscaling - * 3: the remainling for YUV decimation - * - * Note: - * Do not configure bayer_ds_out_res if: - * online == 1 or continuous == 0 or raw_binning = 0 - */ - if (stream_config->online || !stream_config->continuous || - !pipe_extra_configs->enable_raw_binning) { - bayer_ds_out_res->width = 0; - bayer_ds_out_res->height = 0; - } else { - bayer_ds_out_res->width = effective_res->width; - bayer_ds_out_res->height = effective_res->height; - - for (i = 0; i < ARRAY_SIZE(bds_fct); i++) { - if (effective_res->width >= out_width * - bds_fct[i].numerator / bds_fct[i].denominator && - effective_res->height >= out_height * - bds_fct[i].numerator / bds_fct[i].denominator) { - bayer_ds_out_res->width = - effective_res->width * - bds_fct[i].denominator / - bds_fct[i].numerator; - bayer_ds_out_res->height = - effective_res->height * - bds_fct[i].denominator / - bds_fct[i].numerator; - break; - } - } - } - /* - * calculate YUV Decimation, YUV downscaling facor: - * YUV Downscaling factor must not exceed 2. - * YUV Decimation factor could be 2, 4. - */ - /* first decide the yuv_ds input resolution */ - if (bayer_ds_out_res->width == 0) { - yuv_ds_in_width = effective_res->width; - yuv_ds_in_height = effective_res->height; - } else { - yuv_ds_in_width = bayer_ds_out_res->width; - yuv_ds_in_height = bayer_ds_out_res->height; - } - - vf_pp_in_res->width = yuv_ds_in_width; - vf_pp_in_res->height = yuv_ds_in_height; - - /* find out the yuv decimation factor */ - for (i = 0; i < ARRAY_SIZE(yuv_dec_fct); i++) { - if (yuv_ds_in_width >= out_width * yuv_dec_fct[i] && - yuv_ds_in_height >= out_height * yuv_dec_fct[i]) { - vf_pp_in_res->width = yuv_ds_in_width / yuv_dec_fct[i]; - vf_pp_in_res->height = yuv_ds_in_height / yuv_dec_fct[i]; - break; - } - } - - if (vf_pp_in_res->width == out_width && - vf_pp_in_res->height == out_height) { - pipe_extra_configs->enable_yuv_ds = false; - vf_pp_in_res->width = 0; - vf_pp_in_res->height = 0; - } else { - pipe_extra_configs->enable_yuv_ds = true; - } - - dev_dbg(isp->dev, "configuring pipe[%d]preview pp input w=%d.h=%d.\n", - pipe_id, width, height); -} - -/* - * For CSS2.1, offline video pipe could support bayer decimation, and - * yuv downscaling, which needs addtional configurations. - */ -static void __configure_video_pp_input(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - int out_width, out_height; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct ia_css_stream_config *stream_config = &stream_env->stream_config; - struct ia_css_pipe_config *pipe_configs = - &stream_env->pipe_configs[pipe_id]; - struct ia_css_pipe_extra_config *pipe_extra_configs = - &stream_env->pipe_extra_configs[pipe_id]; - struct ia_css_resolution *bayer_ds_out_res = - &pipe_configs->bayer_ds_out_res; - struct ia_css_resolution *effective_res = - &stream_config->input_config.effective_res; - - const struct bayer_ds_factor bds_factors[] = { - {8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2} }; - unsigned int i; - - if (width == 0 && height == 0) - return; - - pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - pipe_extra_configs->enable_yuv_ds = false; - - /* - * If DVS is enabled, video binary will take care the dvs envelope - * and usually the bayer_ds_out_res should be larger than 120% of - * destination resolution, the extra 20% will be cropped as DVS - * envelope. But, if the bayer_ds_out_res is less than 120% of the - * destination. The ISP can still work, but DVS quality is not good. - */ - /* taking at least 10% as envelope */ - if (asd->params.video_dis_en) { - out_width = pipe_configs->output_info[0].res.width * 110 / 100; - out_height = pipe_configs->output_info[0].res.height * 110 / 100; - } else { - out_width = pipe_configs->output_info[0].res.width; - out_height = pipe_configs->output_info[0].res.height; - } - - /* - * calculate bayer decimate factor: - * 1: only 1.5, 2, 4 and 8 get supported - * 2: Do not configure bayer_ds_out_res if: - * online == 1 or continuous == 0 or raw_binning = 0 - */ - if (stream_config->online || !stream_config->continuous) { - bayer_ds_out_res->width = 0; - bayer_ds_out_res->height = 0; - goto done; - } - - pipe_extra_configs->enable_raw_binning = true; - bayer_ds_out_res->width = effective_res->width; - bayer_ds_out_res->height = effective_res->height; - - for (i = 0; i < sizeof(bds_factors) / sizeof(struct bayer_ds_factor); - i++) { - if (effective_res->width >= out_width * - bds_factors[i].numerator / bds_factors[i].denominator && - effective_res->height >= out_height * - bds_factors[i].numerator / bds_factors[i].denominator) { - bayer_ds_out_res->width = effective_res->width * - bds_factors[i].denominator / - bds_factors[i].numerator; - bayer_ds_out_res->height = effective_res->height * - bds_factors[i].denominator / - bds_factors[i].numerator; - break; - } - } - - /* - * DVS is cropped from BDS output, so we do not really need to set the - * envelope to 20% of output resolution here. always set it to 12x12 - * per firmware requirement. - */ - pipe_configs->dvs_envelope.width = 12; - pipe_configs->dvs_envelope.height = 12; - -done: - if (pipe_id == IA_CSS_PIPE_ID_YUVPP) - stream_config->left_padding = -1; - else - stream_config->left_padding = 12; - dev_dbg(isp->dev, "configuring pipe[%d]video pp input w=%d.h=%d.\n", - pipe_id, width, height); -} - -static void __configure_vf_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - stream_env->pipe_configs[pipe_id].mode = - __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width; - stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height; - stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format; - stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width = - min_width; - dev_dbg(isp->dev, - "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n", - pipe_id, width, height, format); -} - -static void __configure_video_vf_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct ia_css_frame_info *css_output_info; - - stream_env->pipe_configs[pipe_id].mode = - __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - /* - * second_vf_output will be as video viewfinder in SDV mode - * with SOC camera. vf_output will be as video viewfinder in - * normal video mode. - */ - if (asd->continuous_mode->val) - css_output_info = &stream_env->pipe_configs[pipe_id]. - vf_output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX]; - else - css_output_info = &stream_env->pipe_configs[pipe_id]. - vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX]; - - css_output_info->res.width = width; - css_output_info->res.height = height; - css_output_info->format = format; - css_output_info->padded_width = min_width; - dev_dbg(isp->dev, - "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n", - pipe_id, width, height, format); -} - -static int __get_frame_info(struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info, - enum frame_info_type type, - enum ia_css_pipe_id pipe_id) -{ - struct atomisp_device *isp = asd->isp; - enum ia_css_err ret; - struct ia_css_pipe_info p_info; - - /* FIXME! No need to destroy/recreate all streams */ - if (__destroy_streams(asd, true)) - dev_warn(isp->dev, "destroy stream failed.\n"); - - if (__destroy_pipes(asd, true)) - dev_warn(isp->dev, "destroy pipe failed.\n"); - - if (__create_pipes(asd)) - return -EINVAL; - - if (__create_streams(asd)) - goto stream_err; - - ret = ia_css_pipe_get_info( - asd->stream_env[stream_index] - .pipes[pipe_id], &p_info); - if (ret == IA_CSS_SUCCESS) { - switch (type) { - case ATOMISP_CSS_VF_FRAME: - *info = p_info.vf_output_info[0]; - dev_dbg(isp->dev, "getting vf frame info.\n"); - break; - case ATOMISP_CSS_SECOND_VF_FRAME: - *info = p_info.vf_output_info[1]; - dev_dbg(isp->dev, "getting second vf frame info.\n"); - break; - case ATOMISP_CSS_OUTPUT_FRAME: - *info = p_info.output_info[0]; - dev_dbg(isp->dev, "getting main frame info.\n"); - break; - case ATOMISP_CSS_SECOND_OUTPUT_FRAME: - *info = p_info.output_info[1]; - dev_dbg(isp->dev, "getting second main frame info.\n"); - break; - case ATOMISP_CSS_RAW_FRAME: - *info = p_info.raw_output_info; - dev_dbg(isp->dev, "getting raw frame info.\n"); - } - dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n", - info->res.width, info->res.height, p_info.num_invalid_frames); - return 0; - } - -stream_err: - __destroy_pipes(asd, true); - return -EINVAL; -} - -static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd, - uint16_t source_pad) -{ - struct atomisp_device *isp = asd->isp; - /* - * to SOC camera, use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - return IA_CSS_PIPE_ID_YUVPP; - - switch (source_pad) { - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: - if (asd->yuvpp_mode) - return IA_CSS_PIPE_ID_YUVPP; - if (asd->copy_mode) - return IA_CSS_PIPE_ID_COPY; - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO - || asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) - return IA_CSS_PIPE_ID_VIDEO; - else - return IA_CSS_PIPE_ID_CAPTURE; - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - if (asd->copy_mode) - return IA_CSS_PIPE_ID_COPY; - return IA_CSS_PIPE_ID_CAPTURE; - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - if (!atomisp_is_mbuscode_raw( - asd->fmt[asd->capture_pad].fmt.code)) - return IA_CSS_PIPE_ID_CAPTURE; - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - if (asd->yuvpp_mode) - return IA_CSS_PIPE_ID_YUVPP; - if (asd->copy_mode) - return IA_CSS_PIPE_ID_COPY; - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - return IA_CSS_PIPE_ID_VIDEO; - else - return IA_CSS_PIPE_ID_PREVIEW; - } - dev_warn(isp->dev, - "invalid source pad:%d, return default preview pipe index.\n", - source_pad); - return IA_CSS_PIPE_ID_PREVIEW; -} - -int atomisp_get_css_frame_info(struct atomisp_sub_device *asd, - uint16_t source_pad, - struct atomisp_css_frame_info *frame_info) -{ - struct ia_css_pipe_info info; - int pipe_index = atomisp_get_pipe_index(asd, source_pad); - int stream_index; - struct atomisp_device *isp = asd->isp; - - if (ATOMISP_SOC_CAMERA(asd)) - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - else { - stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ? - ATOMISP_INPUT_STREAM_VIDEO : - atomisp_source_pad_to_stream_id(asd, source_pad); - } - - if (IA_CSS_SUCCESS != ia_css_pipe_get_info(asd->stream_env[stream_index] - .pipes[pipe_index], &info)) { - dev_err(isp->dev, "ia_css_pipe_get_info FAILED"); - return -EINVAL; - } - - switch (source_pad) { - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - *frame_info = info.output_info[0]; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: - if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val) - *frame_info = info. - output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX]; - else - *frame_info = info. - output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX]; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - if (stream_index == ATOMISP_INPUT_STREAM_POSTVIEW) - *frame_info = info.output_info[0]; - else - *frame_info = info.vf_output_info[0]; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - (pipe_index == IA_CSS_PIPE_ID_VIDEO || - pipe_index == IA_CSS_PIPE_ID_YUVPP)) - if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val) - *frame_info = info. - vf_output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX]; - else - *frame_info = info. - vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX]; - else if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val) - *frame_info = - info.output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX]; - else - *frame_info = - info.output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX]; - - break; - default: - frame_info = NULL; - break; - } - return frame_info ? 0 : -EINVAL; -} - -int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int padded_width, - enum atomisp_css_frame_format format) -{ - asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_COPY]. - default_capture_config.mode = - CSS_CAPTURE_MODE_RAW; - - __configure_output(asd, stream_index, width, height, padded_width, - format, IA_CSS_PIPE_ID_COPY); - return 0; -} - -int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int padded_width, - enum atomisp_css_frame_format format) -{ - asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_YUVPP]. - default_capture_config.mode = - CSS_CAPTURE_MODE_RAW; - - __configure_output(asd, stream_index, width, height, padded_width, - format, IA_CSS_PIPE_ID_YUVPP); - return 0; -} - -int atomisp_css_yuvpp_configure_viewfinder( - struct atomisp_sub_device *asd, - unsigned int stream_index, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[stream_index]; - enum ia_css_pipe_id pipe_id = IA_CSS_PIPE_ID_YUVPP; - - stream_env->pipe_configs[pipe_id].mode = - __pipe_id_to_pipe_mode(asd, pipe_id); - stream_env->update_pipe[pipe_id] = true; - - stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width; - stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height; - stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format; - stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width = - min_width; - return 0; -} - -int atomisp_css_yuvpp_get_output_frame_info( - struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info) -{ - return __get_frame_info(asd, stream_index, info, - ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_YUVPP); -} - -int atomisp_css_yuvpp_get_viewfinder_frame_info( - struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info) -{ - return __get_frame_info(asd, stream_index, info, - ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_YUVPP); -} - -int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format) -{ - /* - * to SOC camera, use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - __configure_video_preview_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height, - min_width, format, IA_CSS_PIPE_ID_YUVPP); - else - __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height, - min_width, format, IA_CSS_PIPE_ID_PREVIEW); - return 0; -} - -int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format) -{ - enum ia_css_pipe_id pipe_id; - - /* - * to SOC camera, use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - pipe_id = IA_CSS_PIPE_ID_YUVPP; - else - pipe_id = IA_CSS_PIPE_ID_CAPTURE; - - __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height, - min_width, format, pipe_id); - return 0; -} - -int atomisp_css_video_configure_output(struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format) -{ - /* - * to SOC camera, use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - __configure_video_preview_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height, - min_width, format, IA_CSS_PIPE_ID_YUVPP); - else - __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height, - min_width, format, IA_CSS_PIPE_ID_VIDEO); - return 0; -} - -int atomisp_css_video_configure_viewfinder( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format) -{ - /* - * to SOC camera, video will use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - __configure_video_vf_output(asd, width, height, min_width, format, - IA_CSS_PIPE_ID_YUVPP); - else - __configure_vf_output(asd, width, height, min_width, format, - IA_CSS_PIPE_ID_VIDEO); - return 0; -} - -int atomisp_css_capture_configure_viewfinder( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height, - unsigned int min_width, - enum atomisp_css_frame_format format) -{ - enum ia_css_pipe_id pipe_id; - - /* - * to SOC camera, video will use yuvpp pipe. - */ - if (ATOMISP_USE_YUVPP(asd)) - pipe_id = IA_CSS_PIPE_ID_YUVPP; - else - pipe_id = IA_CSS_PIPE_ID_CAPTURE; - - __configure_vf_output(asd, width, height, min_width, format, - pipe_id); - return 0; -} - -int atomisp_css_video_get_viewfinder_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info) -{ - enum ia_css_pipe_id pipe_id; - enum frame_info_type frame_type = ATOMISP_CSS_VF_FRAME; - - if (ATOMISP_USE_YUVPP(asd)) { - pipe_id = IA_CSS_PIPE_ID_YUVPP; - if (asd->continuous_mode->val) - frame_type = ATOMISP_CSS_SECOND_VF_FRAME; - } else { - pipe_id = IA_CSS_PIPE_ID_VIDEO; - } - - return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info, - frame_type, pipe_id); -} - -int atomisp_css_capture_get_viewfinder_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info) -{ - enum ia_css_pipe_id pipe_id; - - if (ATOMISP_USE_YUVPP(asd)) - pipe_id = IA_CSS_PIPE_ID_YUVPP; - else - pipe_id = IA_CSS_PIPE_ID_CAPTURE; - - return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info, - ATOMISP_CSS_VF_FRAME, pipe_id); -} - -int atomisp_css_capture_get_output_raw_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info) -{ - if (ATOMISP_USE_YUVPP(asd)) - return 0; - - return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info, - ATOMISP_CSS_RAW_FRAME, IA_CSS_PIPE_ID_CAPTURE); -} - -int atomisp_css_copy_get_output_frame_info( - struct atomisp_sub_device *asd, - unsigned int stream_index, - struct atomisp_css_frame_info *info) -{ - return __get_frame_info(asd, stream_index, info, - ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_COPY); -} - -int atomisp_css_preview_get_output_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info) -{ - enum ia_css_pipe_id pipe_id; - enum frame_info_type frame_type = ATOMISP_CSS_OUTPUT_FRAME; - - if (ATOMISP_USE_YUVPP(asd)) { - pipe_id = IA_CSS_PIPE_ID_YUVPP; - if (asd->continuous_mode->val) - frame_type = ATOMISP_CSS_SECOND_OUTPUT_FRAME; - } else { - pipe_id = IA_CSS_PIPE_ID_PREVIEW; - } - - return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info, - frame_type, pipe_id); -} - -int atomisp_css_capture_get_output_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info) -{ - enum ia_css_pipe_id pipe_id; - - if (ATOMISP_USE_YUVPP(asd)) - pipe_id = IA_CSS_PIPE_ID_YUVPP; - else - pipe_id = IA_CSS_PIPE_ID_CAPTURE; - - return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info, - ATOMISP_CSS_OUTPUT_FRAME, pipe_id); -} - -int atomisp_css_video_get_output_frame_info( - struct atomisp_sub_device *asd, - struct atomisp_css_frame_info *info) -{ - enum ia_css_pipe_id pipe_id; - enum frame_info_type frame_type = ATOMISP_CSS_OUTPUT_FRAME; - - if (ATOMISP_USE_YUVPP(asd)) { - pipe_id = IA_CSS_PIPE_ID_YUVPP; - if (asd->continuous_mode->val) - frame_type = ATOMISP_CSS_SECOND_OUTPUT_FRAME; - } else { - pipe_id = IA_CSS_PIPE_ID_VIDEO; - } - - return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info, - frame_type, pipe_id); -} - -int atomisp_css_preview_configure_pp_input( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - __configure_preview_pp_input(asd, width, height, - ATOMISP_USE_YUVPP(asd) ? - IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_PREVIEW); - - if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]. - capt_pp_in_res.width) - __configure_capture_pp_input(asd, width, height, - ATOMISP_USE_YUVPP(asd) ? - IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE); - return 0; -} - -int atomisp_css_capture_configure_pp_input( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height) -{ - __configure_capture_pp_input(asd, width, height, - ATOMISP_USE_YUVPP(asd) ? - IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE); - return 0; -} - -int atomisp_css_video_configure_pp_input( - struct atomisp_sub_device *asd, - unsigned int width, unsigned int height) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - - __configure_video_pp_input(asd, width, height, - ATOMISP_USE_YUVPP(asd) ? - IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_VIDEO); - - if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]. - capt_pp_in_res.width) - __configure_capture_pp_input(asd, width, height, - ATOMISP_USE_YUVPP(asd) ? - IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE); - return 0; -} - -int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd, - int num_captures, unsigned int skip, int offset) -{ - enum ia_css_err ret; - -#ifdef ISP2401 - dev_dbg(asd->isp->dev, "%s num_capture:%d skip:%d offset:%d\n", - __func__, num_captures, skip, offset); -#endif - ret = ia_css_stream_capture( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - num_captures, skip, offset); - if (ret != IA_CSS_SUCCESS) - return -EINVAL; - - return 0; -} - -int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id) -{ - enum ia_css_err ret; - - ret = ia_css_stream_capture_frame( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - exp_id); - if (ret == IA_CSS_ERR_QUEUE_IS_FULL) { - /* capture cmd queue is full */ - return -EBUSY; - } else if (ret != IA_CSS_SUCCESS) { - return -EIO; - } - - return 0; -} - -int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id) -{ - enum ia_css_err ret; - - ret = ia_css_unlock_raw_frame( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - exp_id); - if (ret == IA_CSS_ERR_QUEUE_IS_FULL) - return -EAGAIN; - else if (ret != IA_CSS_SUCCESS) - return -EIO; - - return 0; -} - -int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd, - bool enable) -{ - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[IA_CSS_PIPE_ID_CAPTURE] - .default_capture_config.enable_xnr = enable; - asd->params.capture_config.enable_xnr = enable; - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true; - - return 0; -} - -void atomisp_css_send_input_frame(struct atomisp_sub_device *asd, - unsigned short *data, unsigned int width, - unsigned int height) -{ - ia_css_stream_send_input_frame( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - data, width, height); -} - -bool atomisp_css_isp_has_started(void) -{ - return ia_css_isp_has_started(); -} - -void atomisp_css_request_flash(struct atomisp_sub_device *asd) -{ - ia_css_stream_request_flash( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream); -} - -void atomisp_css_set_wb_config(struct atomisp_sub_device *asd, - struct atomisp_css_wb_config *wb_config) -{ - asd->params.config.wb_config = wb_config; -} - -void atomisp_css_set_ob_config(struct atomisp_sub_device *asd, - struct atomisp_css_ob_config *ob_config) -{ - asd->params.config.ob_config = ob_config; -} - -void atomisp_css_set_dp_config(struct atomisp_sub_device *asd, - struct atomisp_css_dp_config *dp_config) -{ - asd->params.config.dp_config = dp_config; -} - -void atomisp_css_set_de_config(struct atomisp_sub_device *asd, - struct atomisp_css_de_config *de_config) -{ - asd->params.config.de_config = de_config; -} - -void atomisp_css_set_dz_config(struct atomisp_sub_device *asd, - struct atomisp_css_dz_config *dz_config) -{ - asd->params.config.dz_config = dz_config; -} - -void atomisp_css_set_default_de_config(struct atomisp_sub_device *asd) -{ - asd->params.config.de_config = NULL; -} - -void atomisp_css_set_ce_config(struct atomisp_sub_device *asd, - struct atomisp_css_ce_config *ce_config) -{ - asd->params.config.ce_config = ce_config; -} - -void atomisp_css_set_nr_config(struct atomisp_sub_device *asd, - struct atomisp_css_nr_config *nr_config) -{ - asd->params.config.nr_config = nr_config; -} - -void atomisp_css_set_ee_config(struct atomisp_sub_device *asd, - struct atomisp_css_ee_config *ee_config) -{ - asd->params.config.ee_config = ee_config; -} - -void atomisp_css_set_tnr_config(struct atomisp_sub_device *asd, - struct atomisp_css_tnr_config *tnr_config) -{ - asd->params.config.tnr_config = tnr_config; -} - -void atomisp_css_set_cc_config(struct atomisp_sub_device *asd, - struct atomisp_css_cc_config *cc_config) -{ - asd->params.config.cc_config = cc_config; -} - -void atomisp_css_set_macc_table(struct atomisp_sub_device *asd, - struct atomisp_css_macc_table *macc_table) -{ - asd->params.config.macc_table = macc_table; -} - -void atomisp_css_set_macc_config(struct atomisp_sub_device *asd, - struct atomisp_css_macc_config *macc_config) -{ - asd->params.config.macc_config = macc_config; -} - -void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd, - struct atomisp_css_ecd_config *ecd_config) -{ - asd->params.config.ecd_config = ecd_config; -} - -void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd, - struct atomisp_css_ynr_config *ynr_config) -{ - asd->params.config.ynr_config = ynr_config; -} - -void atomisp_css_set_fc_config(struct atomisp_sub_device *asd, - struct atomisp_css_fc_config *fc_config) -{ - asd->params.config.fc_config = fc_config; -} - -void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd, - struct atomisp_css_ctc_config *ctc_config) -{ - asd->params.config.ctc_config = ctc_config; -} - -void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd, - struct atomisp_css_cnr_config *cnr_config) -{ - asd->params.config.cnr_config = cnr_config; -} - -void atomisp_css_set_aa_config(struct atomisp_sub_device *asd, - struct atomisp_css_aa_config *aa_config) -{ - asd->params.config.aa_config = aa_config; -} - -void atomisp_css_set_baa_config(struct atomisp_sub_device *asd, - struct atomisp_css_baa_config *baa_config) -{ - asd->params.config.baa_config = baa_config; -} - -void atomisp_css_set_anr_config(struct atomisp_sub_device *asd, - struct atomisp_css_anr_config *anr_config) -{ - asd->params.config.anr_config = anr_config; -} - -void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd, - struct atomisp_css_xnr_config *xnr_config) -{ - asd->params.config.xnr_config = xnr_config; -} - -void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd, - struct atomisp_css_cc_config *yuv2rgb_cc_config) -{ - asd->params.config.yuv2rgb_cc_config = yuv2rgb_cc_config; -} - -void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd, - struct atomisp_css_cc_config *rgb2yuv_cc_config) -{ - asd->params.config.rgb2yuv_cc_config = rgb2yuv_cc_config; -} - -void atomisp_css_set_xnr_table(struct atomisp_sub_device *asd, - struct atomisp_css_xnr_table *xnr_table) -{ - asd->params.config.xnr_table = xnr_table; -} - -void atomisp_css_set_r_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_rgb_gamma_table *r_gamma_table) -{ - asd->params.config.r_gamma_table = r_gamma_table; -} - -void atomisp_css_set_g_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_rgb_gamma_table *g_gamma_table) -{ - asd->params.config.g_gamma_table = g_gamma_table; -} - -void atomisp_css_set_b_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_rgb_gamma_table *b_gamma_table) -{ - asd->params.config.b_gamma_table = b_gamma_table; -} - -void atomisp_css_set_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_gamma_table *gamma_table) -{ - asd->params.config.gamma_table = gamma_table; -} - -void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd, - struct atomisp_css_ctc_table *ctc_table) -{ - int i; - uint16_t *vamem_ptr = ctc_table->data.vamem_1; - int data_size = IA_CSS_VAMEM_1_CTC_TABLE_SIZE; - bool valid = false; - - /* workaround: if ctc_table is all 0, do not apply it */ - if (ctc_table->vamem_type == IA_CSS_VAMEM_TYPE_2) { - vamem_ptr = ctc_table->data.vamem_2; - data_size = IA_CSS_VAMEM_2_CTC_TABLE_SIZE; - } - - for (i = 0; i < data_size; i++) { - if (*(vamem_ptr + i)) { - valid = true; - break; - } - } - - if (valid) - asd->params.config.ctc_table = ctc_table; - else - dev_warn(asd->isp->dev, "Bypass the invalid ctc_table.\n"); -} - -void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd, - struct atomisp_css_anr_thres *anr_thres) -{ - asd->params.config.anr_thres = anr_thres; -} - -void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd, - struct atomisp_css_dvs_6axis *dvs_6axis) -{ - asd->params.config.dvs_6axis_config = dvs_6axis; -} - -void atomisp_css_set_gc_config(struct atomisp_sub_device *asd, - struct atomisp_css_gc_config *gc_config) -{ - asd->params.config.gc_config = gc_config; -} - -void atomisp_css_set_3a_config(struct atomisp_sub_device *asd, - struct atomisp_css_3a_config *s3a_config) -{ - asd->params.config.s3a_config = s3a_config; -} - -void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd, - struct atomisp_dis_vector *vector) -{ - if (!asd->params.config.motion_vector) - asd->params.config.motion_vector = &asd->params.css_param.motion_vector; - - memset(asd->params.config.motion_vector, - 0, sizeof(struct ia_css_vector)); - asd->params.css_param.motion_vector.x = vector->x; - asd->params.css_param.motion_vector.y = vector->y; -} - -static int atomisp_compare_dvs_grid(struct atomisp_sub_device *asd, - struct atomisp_dvs_grid_info *atomgrid) -{ - struct atomisp_css_dvs_grid_info *cur = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - - if (!cur) { - dev_err(asd->isp->dev, "dvs grid not available!\n"); - return -EINVAL; - } - - if (sizeof(*cur) != sizeof(*atomgrid)) { - dev_err(asd->isp->dev, "dvs grid mis-match!\n"); - return -EINVAL; - } - - if (!cur->enable) { - dev_err(asd->isp->dev, "dvs not enabled!\n"); - return -EINVAL; - } - - return memcmp(atomgrid, cur, sizeof(*cur)); -} - -void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd, - struct ia_css_dvs2_coefficients *coefs) -{ - asd->params.config.dvs2_coefs = coefs; -} - -int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd, - struct atomisp_dis_coefficients *coefs) -{ - if (atomisp_compare_dvs_grid(asd, &coefs->grid_info) != 0) - /* If the grid info in the argument differs from the current - grid info, we tell the caller to reset the grid size and - try again. */ - return -EAGAIN; - - if (coefs->hor_coefs.odd_real == NULL || - coefs->hor_coefs.odd_imag == NULL || - coefs->hor_coefs.even_real == NULL || - coefs->hor_coefs.even_imag == NULL || - coefs->ver_coefs.odd_real == NULL || - coefs->ver_coefs.odd_imag == NULL || - coefs->ver_coefs.even_real == NULL || - coefs->ver_coefs.even_imag == NULL || - asd->params.css_param.dvs2_coeff->hor_coefs.odd_real == NULL || - asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag == NULL || - asd->params.css_param.dvs2_coeff->hor_coefs.even_real == NULL || - asd->params.css_param.dvs2_coeff->hor_coefs.even_imag == NULL || - asd->params.css_param.dvs2_coeff->ver_coefs.odd_real == NULL || - asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag == NULL || - asd->params.css_param.dvs2_coeff->ver_coefs.even_real == NULL || - asd->params.css_param.dvs2_coeff->ver_coefs.even_imag == NULL) - return -EINVAL; - - if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_real, - coefs->hor_coefs.odd_real, asd->params.dvs_hor_coef_bytes)) - return -EFAULT; - if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag, - coefs->hor_coefs.odd_imag, asd->params.dvs_hor_coef_bytes)) - return -EFAULT; - if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_real, - coefs->hor_coefs.even_real, asd->params.dvs_hor_coef_bytes)) - return -EFAULT; - if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_imag, - coefs->hor_coefs.even_imag, asd->params.dvs_hor_coef_bytes)) - return -EFAULT; - - if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_real, - coefs->ver_coefs.odd_real, asd->params.dvs_ver_coef_bytes)) - return -EFAULT; - if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag, - coefs->ver_coefs.odd_imag, asd->params.dvs_ver_coef_bytes)) - return -EFAULT; - if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_real, - coefs->ver_coefs.even_real, asd->params.dvs_ver_coef_bytes)) - return -EFAULT; - if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_imag, - coefs->ver_coefs.even_imag, asd->params.dvs_ver_coef_bytes)) - return -EFAULT; - - asd->params.css_param.update_flag.dvs2_coefs = - (struct atomisp_dvs2_coefficients *) - asd->params.css_param.dvs2_coeff; - /* FIXME! */ -/* asd->params.dis_proj_data_valid = false; */ - asd->params.css_update_params_needed = true; - - return 0; -} - -void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd, - unsigned int zoom) -{ - struct atomisp_device *isp = asd->isp; - - if (zoom == asd->params.css_param.dz_config.dx && - zoom == asd->params.css_param.dz_config.dy) { - dev_dbg(isp->dev, "same zoom scale. skipped.\n"); - return; - } - - memset(&asd->params.css_param.dz_config, 0, - sizeof(struct ia_css_dz_config)); - asd->params.css_param.dz_config.dx = zoom; - asd->params.css_param.dz_config.dy = zoom; - - asd->params.css_param.update_flag.dz_config = - (struct atomisp_dz_config *) &asd->params.css_param.dz_config; - asd->params.css_update_params_needed = true; -} - -void atomisp_css_set_formats_config(struct atomisp_sub_device *asd, - struct atomisp_css_formats_config *formats_config) -{ - asd->params.config.formats_config = formats_config; -} - -int atomisp_css_get_wb_config(struct atomisp_sub_device *asd, - struct atomisp_wb_config *config) -{ - struct atomisp_css_wb_config wb_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&wb_config, 0, sizeof(struct atomisp_css_wb_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.wb_config = &wb_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &wb_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_ob_config(struct atomisp_sub_device *asd, - struct atomisp_ob_config *config) -{ - struct atomisp_css_ob_config ob_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&ob_config, 0, sizeof(struct atomisp_css_ob_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.ob_config = &ob_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &ob_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_dp_config(struct atomisp_sub_device *asd, - struct atomisp_dp_config *config) -{ - struct atomisp_css_dp_config dp_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&dp_config, 0, sizeof(struct atomisp_css_dp_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.dp_config = &dp_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &dp_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_de_config(struct atomisp_sub_device *asd, - struct atomisp_de_config *config) -{ - struct atomisp_css_de_config de_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&de_config, 0, sizeof(struct atomisp_css_de_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.de_config = &de_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &de_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_nr_config(struct atomisp_sub_device *asd, - struct atomisp_nr_config *config) -{ - struct atomisp_css_nr_config nr_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&nr_config, 0, sizeof(struct atomisp_css_nr_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - - isp_config.nr_config = &nr_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &nr_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_ee_config(struct atomisp_sub_device *asd, - struct atomisp_ee_config *config) -{ - struct atomisp_css_ee_config ee_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&ee_config, 0, sizeof(struct atomisp_css_ee_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.ee_config = &ee_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &ee_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd, - struct atomisp_tnr_config *config) -{ - struct atomisp_css_tnr_config tnr_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&tnr_config, 0, sizeof(struct atomisp_css_tnr_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.tnr_config = &tnr_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, &tnr_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd, - struct atomisp_ctc_table *config) -{ - struct atomisp_css_ctc_table *tab; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - - tab = vzalloc(sizeof(struct atomisp_css_ctc_table)); - if (!tab) - return -ENOMEM; - - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.ctc_table = tab; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, tab, sizeof(*tab)); - vfree(tab); - - return 0; -} - -int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_gamma_table *config) -{ - struct atomisp_css_gamma_table *tab; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - - tab = vzalloc(sizeof(struct atomisp_css_gamma_table)); - if (!tab) - return -ENOMEM; - - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.gamma_table = tab; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - memcpy(config, tab, sizeof(*tab)); - vfree(tab); - - return 0; -} - -int atomisp_css_get_gc_config(struct atomisp_sub_device *asd, - struct atomisp_gc_config *config) -{ - struct atomisp_css_gc_config gc_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&gc_config, 0, sizeof(struct atomisp_css_gc_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.gc_config = &gc_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - /* Get gamma correction params from current setup */ - memcpy(config, &gc_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_3a_config(struct atomisp_sub_device *asd, - struct atomisp_3a_config *config) -{ - struct atomisp_css_3a_config s3a_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&s3a_config, 0, sizeof(struct atomisp_css_3a_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.s3a_config = &s3a_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - /* Get white balance from current setup */ - memcpy(config, &s3a_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_formats_config(struct atomisp_sub_device *asd, - struct atomisp_formats_config *config) -{ - struct atomisp_css_formats_config formats_config; - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&formats_config, 0, sizeof(formats_config)); - memset(&isp_config, 0, sizeof(isp_config)); - isp_config.formats_config = &formats_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - /* Get narrow gamma from current setup */ - memcpy(config, &formats_config, sizeof(*config)); - - return 0; -} - -int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd, - unsigned int *zoom) -{ - struct ia_css_dz_config dz_config; /** Digital Zoom */ - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, "%s called after streamoff, skipping.\n", - __func__); - return -EINVAL; - } - memset(&dz_config, 0, sizeof(struct ia_css_dz_config)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.dz_config = &dz_config; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); - *zoom = dz_config.dx; - - return 0; -} - - -/* - * Function to set/get image stablization statistics - */ -int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, - struct atomisp_dis_statistics *stats) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_dis_buf *dis_buf; - unsigned long flags; - - if (asd->params.dvs_stat->hor_prod.odd_real == NULL || - asd->params.dvs_stat->hor_prod.odd_imag == NULL || - asd->params.dvs_stat->hor_prod.even_real == NULL || - asd->params.dvs_stat->hor_prod.even_imag == NULL || - asd->params.dvs_stat->ver_prod.odd_real == NULL || - asd->params.dvs_stat->ver_prod.odd_imag == NULL || - asd->params.dvs_stat->ver_prod.even_real == NULL || - asd->params.dvs_stat->ver_prod.even_imag == NULL) - return -EINVAL; - - /* isp needs to be streaming to get DIS statistics */ - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) { - spin_unlock_irqrestore(&isp->lock, flags); - return -EINVAL; - } - spin_unlock_irqrestore(&isp->lock, flags); - - if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0) - /* If the grid info in the argument differs from the current - grid info, we tell the caller to reset the grid size and - try again. */ - return -EAGAIN; - - spin_lock_irqsave(&asd->dis_stats_lock, flags); - if (!asd->params.dis_proj_data_valid || list_empty(&asd->dis_stats)) { - spin_unlock_irqrestore(&asd->dis_stats_lock, flags); - dev_err(isp->dev, "dis statistics is not valid.\n"); - return -EAGAIN; - } - - dis_buf = list_entry(asd->dis_stats.next, - struct atomisp_dis_buf, list); - list_del_init(&dis_buf->list); - spin_unlock_irqrestore(&asd->dis_stats_lock, flags); - - if (dis_buf->dvs_map) - ia_css_translate_dvs2_statistics( - asd->params.dvs_stat, dis_buf->dvs_map); - else - ia_css_get_dvs2_statistics(asd->params.dvs_stat, - dis_buf->dis_data); - stats->exp_id = dis_buf->dis_data->exp_id; - - spin_lock_irqsave(&asd->dis_stats_lock, flags); - list_add_tail(&dis_buf->list, &asd->dis_stats); - spin_unlock_irqrestore(&asd->dis_stats_lock, flags); - - if (copy_to_user(stats->dvs2_stat.ver_prod.odd_real, - asd->params.dvs_stat->ver_prod.odd_real, - asd->params.dvs_ver_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.ver_prod.odd_imag, - asd->params.dvs_stat->ver_prod.odd_imag, - asd->params.dvs_ver_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.ver_prod.even_real, - asd->params.dvs_stat->ver_prod.even_real, - asd->params.dvs_ver_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.ver_prod.even_imag, - asd->params.dvs_stat->ver_prod.even_imag, - asd->params.dvs_ver_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.hor_prod.odd_real, - asd->params.dvs_stat->hor_prod.odd_real, - asd->params.dvs_hor_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.hor_prod.odd_imag, - asd->params.dvs_stat->hor_prod.odd_imag, - asd->params.dvs_hor_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.hor_prod.even_real, - asd->params.dvs_stat->hor_prod.even_real, - asd->params.dvs_hor_proj_bytes)) - return -EFAULT; - if (copy_to_user(stats->dvs2_stat.hor_prod.even_imag, - asd->params.dvs_stat->hor_prod.even_imag, - asd->params.dvs_hor_proj_bytes)) - return -EFAULT; - - return 0; -} - -struct atomisp_css_shading_table *atomisp_css_shading_table_alloc( - unsigned int width, unsigned int height) -{ - return ia_css_shading_table_alloc(width, height); -} - -void atomisp_css_set_shading_table(struct atomisp_sub_device *asd, - struct atomisp_css_shading_table *table) -{ - asd->params.config.shading_table = table; -} - -void atomisp_css_shading_table_free(struct atomisp_css_shading_table *table) -{ - ia_css_shading_table_free(table); -} - -struct atomisp_css_morph_table *atomisp_css_morph_table_allocate( - unsigned int width, unsigned int height) -{ - return ia_css_morph_table_allocate(width, height); -} - -void atomisp_css_set_morph_table(struct atomisp_sub_device *asd, - struct atomisp_css_morph_table *table) -{ - asd->params.config.morph_table = table; -} - -void atomisp_css_get_morph_table(struct atomisp_sub_device *asd, - struct atomisp_css_morph_table *table) -{ - struct ia_css_isp_config isp_config; - struct atomisp_device *isp = asd->isp; - - if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { - dev_err(isp->dev, - "%s called after streamoff, skipping.\n", __func__); - return; - } - memset(table, 0, sizeof(struct atomisp_css_morph_table)); - memset(&isp_config, 0, sizeof(struct ia_css_isp_config)); - isp_config.morph_table = table; - ia_css_stream_get_isp_config( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - &isp_config); -} - -void atomisp_css_morph_table_free(struct atomisp_css_morph_table *table) -{ - ia_css_morph_table_free(table); -} - -void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, - unsigned int overlap) -{ - /* CSS 2.0 doesn't support this API. */ - dev_dbg(isp->dev, "set cont prev start time is not supported.\n"); - return; -} - -void atomisp_css_acc_done(struct atomisp_sub_device *asd) -{ - complete(&asd->acc.acc_done); -} - -int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd) -{ - int ret = 0; - struct atomisp_device *isp = asd->isp; - - /* Unlock the isp mutex taken in IOCTL handler before sleeping! */ - rt_mutex_unlock(&isp->mutex); - if (wait_for_completion_interruptible_timeout(&asd->acc.acc_done, - ATOMISP_ISP_TIMEOUT_DURATION) == 0) { - dev_err(isp->dev, "<%s: completion timeout\n", __func__); - atomisp_css_debug_dump_sp_sw_debug_info(); - atomisp_css_debug_dump_debug_info(__func__); - ret = -EIO; - } - rt_mutex_lock(&isp->mutex); - - return ret; -} - -/* Set the ACC binary arguments */ -int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw) -{ - unsigned int mem; - - for (mem = 0; mem < ATOMISP_ACC_NR_MEMORY; mem++) { - if (acc_fw->args[mem].length == 0) - continue; - - ia_css_isp_param_set_css_mem_init(&acc_fw->fw->mem_initializers, - IA_CSS_PARAM_CLASS_PARAM, mem, - acc_fw->args[mem].css_ptr, - acc_fw->args[mem].length); - } - - return 0; -} - -/* Load acc binary extension */ -int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd, - struct atomisp_css_fw_info *fw, - enum atomisp_css_pipe_id pipe_id, - unsigned int type) -{ - struct atomisp_css_fw_info **hd; - - fw->next = NULL; - hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[pipe_id].acc_extension); - while (*hd) - hd = &(*hd)->next; - *hd = fw; - - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .update_pipe[pipe_id] = true; - return 0; -} - -/* Unload acc binary extension */ -void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd, - struct atomisp_css_fw_info *fw, - enum atomisp_css_pipe_id pipe_id) -{ - struct atomisp_css_fw_info **hd; - - hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[pipe_id].acc_extension); - while (*hd && *hd != fw) - hd = &(*hd)->next; - if (!*hd) { - dev_err(asd->isp->dev, "did not find acc fw for removal\n"); - return; - } - *hd = fw->next; - fw->next = NULL; - - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .update_pipe[pipe_id] = true; -} - -int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - struct ia_css_pipe_config *pipe_config; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - - if (stream_env->acc_stream) { - if (stream_env->acc_stream_state == CSS_STREAM_STARTED) { - if (ia_css_stream_stop(stream_env->acc_stream) - != IA_CSS_SUCCESS) { - dev_err(isp->dev, "stop acc_stream failed.\n"); - return -EBUSY; - } - } - - if (ia_css_stream_destroy(stream_env->acc_stream) - != IA_CSS_SUCCESS) { - dev_err(isp->dev, "destroy acc_stream failed.\n"); - return -EBUSY; - } - stream_env->acc_stream = NULL; - } - - pipe_config = &stream_env->pipe_configs[CSS_PIPE_ID_ACC]; - ia_css_pipe_config_defaults(pipe_config); - asd->acc.acc_stages = kzalloc(MAX_ACC_STAGES * - sizeof(void *), GFP_KERNEL); - if (!asd->acc.acc_stages) - return -ENOMEM; - pipe_config->acc_stages = asd->acc.acc_stages; - pipe_config->mode = IA_CSS_PIPE_MODE_ACC; - pipe_config->num_acc_stages = 0; - - /* - * We delay the ACC pipeline creation to atomisp_css_start_acc_pipe, - * because pipe configuration will soon be changed by - * atomisp_css_load_acc_binary() - */ - return 0; -} - -int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - struct ia_css_pipe_config *pipe_config = - &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]; - - if (ia_css_pipe_create(pipe_config, - &stream_env->pipes[IA_CSS_PIPE_ID_ACC]) != IA_CSS_SUCCESS) { - dev_err(isp->dev, "%s: ia_css_pipe_create failed\n", - __func__); - return -EBADE; - } - - memset(&stream_env->acc_stream_config, 0, - sizeof(struct ia_css_stream_config)); - if (ia_css_stream_create(&stream_env->acc_stream_config, 1, - &stream_env->pipes[IA_CSS_PIPE_ID_ACC], - &stream_env->acc_stream) != IA_CSS_SUCCESS) { - dev_err(isp->dev, "%s: create acc_stream error.\n", __func__); - return -EINVAL; - } - stream_env->acc_stream_state = CSS_STREAM_CREATED; - - init_completion(&asd->acc.acc_done); - asd->acc.pipeline = stream_env->pipes[IA_CSS_PIPE_ID_ACC]; - - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false); - - if (ia_css_start_sp() != IA_CSS_SUCCESS) { - dev_err(isp->dev, "start sp error.\n"); - return -EIO; - } - - if (ia_css_stream_start(stream_env->acc_stream) - != IA_CSS_SUCCESS) { - dev_err(isp->dev, "acc_stream start error.\n"); - return -EIO; - } - - stream_env->acc_stream_state = CSS_STREAM_STARTED; - return 0; -} - -int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - if (stream_env->acc_stream_state == CSS_STREAM_STARTED) { - ia_css_stream_stop(stream_env->acc_stream); - stream_env->acc_stream_state = CSS_STREAM_STOPPED; - } - return 0; -} - -void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd) -{ - struct atomisp_stream_env *stream_env = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]; - if (stream_env->acc_stream) { - if (ia_css_stream_destroy(stream_env->acc_stream) - != IA_CSS_SUCCESS) - dev_warn(asd->isp->dev, - "destroy acc_stream failed.\n"); - stream_env->acc_stream = NULL; - } - - if (stream_env->pipes[IA_CSS_PIPE_ID_ACC]) { - if (ia_css_pipe_destroy(stream_env->pipes[IA_CSS_PIPE_ID_ACC]) - != IA_CSS_SUCCESS) - dev_warn(asd->isp->dev, - "destroy ACC pipe failed.\n"); - stream_env->pipes[IA_CSS_PIPE_ID_ACC] = NULL; - stream_env->update_pipe[IA_CSS_PIPE_ID_ACC] = false; - ia_css_pipe_config_defaults( - &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]); - ia_css_pipe_extra_config_defaults( - &stream_env->pipe_extra_configs[IA_CSS_PIPE_ID_ACC]); - } - asd->acc.pipeline = NULL; - - /* css 2.0 API limitation: ia_css_stop_sp() could be only called after - * destroy all pipes - */ - ia_css_stop_sp(); - - kfree(asd->acc.acc_stages); - asd->acc.acc_stages = NULL; - - atomisp_freq_scaling(asd->isp, ATOMISP_DFS_MODE_LOW, false); -} - -int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd, - struct atomisp_css_fw_info *fw, - unsigned int index) -{ - struct ia_css_pipe_config *pipe_config = - &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL] - .pipe_configs[IA_CSS_PIPE_ID_ACC]; - - if (index >= MAX_ACC_STAGES) { - dev_dbg(asd->isp->dev, "%s: index(%d) out of range\n", - __func__, index); - return -ENOMEM; - } - - pipe_config->acc_stages[index] = fw; - pipe_config->num_acc_stages = index + 1; - pipe_config->acc_num_execs = 1; - - return 0; -} - -static struct atomisp_sub_device *__get_atomisp_subdev( - struct ia_css_pipe *css_pipe, - struct atomisp_device *isp, - enum atomisp_input_stream_id *stream_id) -{ - int i, j, k; - struct atomisp_sub_device *asd; - struct atomisp_stream_env *stream_env; - - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED && - !asd->acc.pipeline) - continue; - for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) { - stream_env = &asd->stream_env[j]; - for (k = 0; k < IA_CSS_PIPE_ID_NUM; k++) { - if (stream_env->pipes[k] && - stream_env->pipes[k] == css_pipe) { - *stream_id = j; - return asd; - } - } - } - } - - return NULL; -} - -int atomisp_css_isr_thread(struct atomisp_device *isp, - bool *frame_done_found, - bool *css_pipe_done) -{ - enum atomisp_input_stream_id stream_id = 0; - struct atomisp_css_event current_event; - struct atomisp_sub_device *asd; -#ifndef ISP2401 - bool reset_wdt_timer[MAX_STREAM_NUM] = {false}; -#endif - int i; - - while (!atomisp_css_dequeue_event(¤t_event)) { - if (current_event.event.type == - IA_CSS_EVENT_TYPE_FW_ASSERT) { - /* - * Received FW assertion signal, - * trigger WDT to recover - */ - dev_err(isp->dev, "%s: ISP reports FW_ASSERT event! fw_assert_module_id %d fw_assert_line_no %d\n", - __func__, - current_event.event.fw_assert_module_id, - current_event.event.fw_assert_line_no); - for (i = 0; i < isp->num_of_streams; i++) - atomisp_wdt_stop(&isp->asd[i], 0); -#ifndef ISP2401 - atomisp_wdt(&isp->asd[0].wdt); -#else - queue_work(isp->wdt_work_queue, &isp->wdt_work); -#endif - return -EINVAL; - } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) { - dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n", - __func__, current_event.event.fw_warning, - current_event.event.exp_id); - continue; - } - - asd = __get_atomisp_subdev(current_event.event.pipe, - isp, &stream_id); - if (!asd) { - if (current_event.event.type == CSS_EVENT_TIMER) - dev_dbg(isp->dev, - "event: Timer event."); - else - dev_warn(isp->dev, "%s:no subdev.event:%d", - __func__, - current_event.event.type); - continue; - } - - atomisp_css_temp_pipe_to_pipe_id(asd, ¤t_event); - switch (current_event.event.type) { - case CSS_EVENT_OUTPUT_FRAME_DONE: - frame_done_found[asd->index] = true; - atomisp_buf_done(asd, 0, CSS_BUFFER_TYPE_OUTPUT_FRAME, - current_event.pipe, true, stream_id); -#ifndef ISP2401 - reset_wdt_timer[asd->index] = true; /* ISP running */ -#endif - break; - case CSS_EVENT_SEC_OUTPUT_FRAME_DONE: - frame_done_found[asd->index] = true; - atomisp_buf_done(asd, 0, CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME, - current_event.pipe, true, stream_id); -#ifndef ISP2401 - reset_wdt_timer[asd->index] = true; /* ISP running */ -#endif - break; - case CSS_EVENT_3A_STATISTICS_DONE: - atomisp_buf_done(asd, 0, - CSS_BUFFER_TYPE_3A_STATISTICS, - current_event.pipe, - false, stream_id); - break; - case CSS_EVENT_METADATA_DONE: - atomisp_buf_done(asd, 0, - CSS_BUFFER_TYPE_METADATA, - current_event.pipe, - false, stream_id); - break; - case CSS_EVENT_VF_OUTPUT_FRAME_DONE: - atomisp_buf_done(asd, 0, - CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, - current_event.pipe, true, stream_id); -#ifndef ISP2401 - reset_wdt_timer[asd->index] = true; /* ISP running */ -#endif - break; - case CSS_EVENT_SEC_VF_OUTPUT_FRAME_DONE: - atomisp_buf_done(asd, 0, - CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME, - current_event.pipe, true, stream_id); -#ifndef ISP2401 - reset_wdt_timer[asd->index] = true; /* ISP running */ -#endif - break; - case CSS_EVENT_DIS_STATISTICS_DONE: - atomisp_buf_done(asd, 0, - CSS_BUFFER_TYPE_DIS_STATISTICS, - current_event.pipe, - false, stream_id); - break; - case CSS_EVENT_PIPELINE_DONE: - css_pipe_done[asd->index] = true; - break; - case CSS_EVENT_ACC_STAGE_COMPLETE: - atomisp_acc_done(asd, current_event.event.fw_handle); - break; - default: - dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n", - current_event.event.type); - break; - } - } -#ifndef ISP2401 - /* If there are no buffers queued then - * delete wdt timer. */ - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (!asd) - continue; - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - if (!atomisp_buffers_queued(asd)) - atomisp_wdt_stop(asd, false); - else if (reset_wdt_timer[i]) - /* SOF irq should not reset wdt timer. */ - atomisp_wdt_refresh(asd, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } -#endif - - return 0; -} - -bool atomisp_css_valid_sof(struct atomisp_device *isp) -{ - unsigned int i, j; - - /* Loop for each css stream */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - /* Loop for each css vc stream */ - for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) { - if (asd->stream_env[j].stream && - asd->stream_env[j].stream_config.mode == - IA_CSS_INPUT_MODE_BUFFERED_SENSOR) - return false; - } - } - - return true; -} - -int atomisp_css_debug_dump_isp_binary(void) -{ - ia_css_debug_dump_isp_binary(); - return 0; -} - -int atomisp_css_dump_sp_raw_copy_linecount(bool reduced) -{ - sh_css_dump_sp_raw_copy_linecount(reduced); - return 0; -} - -int atomisp_css_dump_blob_infor(void) -{ - struct ia_css_blob_descr *bd = sh_css_blob_info; - unsigned int i, nm = sh_css_num_binaries; - - if (nm == 0) - return -EPERM; - if (bd == NULL) - return -EPERM; - - for (i = 1; i < sh_css_num_binaries; i++) - dev_dbg(atomisp_dev, "Num%d binary id is %d, name is %s\n", i, - bd[i-1].header.info.isp.sp.id, bd[i-1].name); - - return 0; -} - -void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd, - uint32_t isp_config_id) -{ - asd->params.config.isp_config_id = isp_config_id; -} - -void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd, - struct atomisp_css_frame *output_frame) -{ - asd->params.config.output_frame = output_frame; -} - -int atomisp_get_css_dbgfunc(void) -{ - return dbg_func; -} - -int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt) -{ - int ret; - - ret = __set_css_print_env(isp, opt); - if (ret == 0) - dbg_func = opt; - - return ret; -} -void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable) -{ - ia_css_en_dz_capt_pipe( - asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - enable); -} - -struct atomisp_css_dvs_grid_info *atomisp_css_get_dvs_grid_info( - struct atomisp_css_grid_info *grid_info) -{ - if (!grid_info) - return NULL; - -#ifdef IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED - return &grid_info->dvs_grid.dvs_grid_info; -#else - return &grid_info->dvs_grid; -#endif -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h deleted file mode 100644 index a06c5b6e8027..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Support for Clovertrail PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_COMPAT_CSS20_H__ -#define __ATOMISP_COMPAT_CSS20_H__ - -#include - -#include "ia_css.h" -#include "ia_css_types.h" -#include "ia_css_acc_types.h" -#include "sh_css_legacy.h" - -#define ATOMISP_CSS2_PIPE_MAX 2 -#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES 3 -#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN 4 -#define ATOMISP_CSS2_NUM_DVS_FRAME_DELAY 2 - -#define atomisp_css_pipe_id ia_css_pipe_id -#define atomisp_css_pipeline ia_css_pipe -#define atomisp_css_buffer_type ia_css_buffer_type -#define atomisp_css_dis_data ia_css_isp_dvs_statistics -#define atomisp_css_irq_info ia_css_irq_info -#define atomisp_css_isp_config ia_css_isp_config -#define atomisp_css_bayer_order ia_css_bayer_order -#define atomisp_css_capture_mode ia_css_capture_mode -#define atomisp_css_input_mode ia_css_input_mode -#define atomisp_css_frame ia_css_frame -#define atomisp_css_frame_format ia_css_frame_format -#define atomisp_css_frame_info ia_css_frame_info -#define atomisp_css_dp_config ia_css_dp_config -#define atomisp_css_wb_config ia_css_wb_config -#define atomisp_css_cc_config ia_css_cc_config -#define atomisp_css_nr_config ia_css_nr_config -#define atomisp_css_ee_config ia_css_ee_config -#define atomisp_css_ob_config ia_css_ob_config -#define atomisp_css_de_config ia_css_de_config -#define atomisp_css_dz_config ia_css_dz_config -#define atomisp_css_ce_config ia_css_ce_config -#define atomisp_css_gc_config ia_css_gc_config -#define atomisp_css_tnr_config ia_css_tnr_config -#define atomisp_css_cnr_config ia_css_cnr_config -#define atomisp_css_ctc_config ia_css_ctc_config -#define atomisp_css_3a_config ia_css_3a_config -#define atomisp_css_ecd_config ia_css_ecd_config -#define atomisp_css_ynr_config ia_css_ynr_config -#define atomisp_css_fc_config ia_css_fc_config -#define atomisp_css_aa_config ia_css_aa_config -#define atomisp_css_baa_config ia_css_aa_config -#define atomisp_css_anr_config ia_css_anr_config -#define atomisp_css_xnr_config ia_css_xnr_config -#define atomisp_css_macc_config ia_css_macc_config -#define atomisp_css_gamma_table ia_css_gamma_table -#define atomisp_css_ctc_table ia_css_ctc_table -#define atomisp_css_macc_table ia_css_macc_table -#define atomisp_css_xnr_table ia_css_xnr_table -#define atomisp_css_rgb_gamma_table ia_css_rgb_gamma_table -#define atomisp_css_anr_thres ia_css_anr_thres -#define atomisp_css_dvs_6axis ia_css_dvs_6axis_config -#define atomisp_css_grid_info ia_css_grid_info -#define atomisp_css_3a_grid_info ia_css_3a_grid_info -#define atomisp_css_dvs_grid_info ia_css_dvs_grid_info -#define atomisp_css_shading_table ia_css_shading_table -#define atomisp_css_morph_table ia_css_morph_table -#define atomisp_css_dvs_6axis_config ia_css_dvs_6axis_config -#define atomisp_css_fw_info ia_css_fw_info -#define atomisp_css_formats_config ia_css_formats_config - -#define CSS_PIPE_ID_PREVIEW IA_CSS_PIPE_ID_PREVIEW -#define CSS_PIPE_ID_COPY IA_CSS_PIPE_ID_COPY -#define CSS_PIPE_ID_VIDEO IA_CSS_PIPE_ID_VIDEO -#define CSS_PIPE_ID_CAPTURE IA_CSS_PIPE_ID_CAPTURE -#define CSS_PIPE_ID_ACC IA_CSS_PIPE_ID_ACC -#define CSS_PIPE_ID_YUVPP IA_CSS_PIPE_ID_YUVPP -#define CSS_PIPE_ID_NUM IA_CSS_PIPE_ID_NUM - -#define CSS_INPUT_MODE_SENSOR IA_CSS_INPUT_MODE_BUFFERED_SENSOR -#define CSS_INPUT_MODE_FIFO IA_CSS_INPUT_MODE_FIFO -#define CSS_INPUT_MODE_TPG IA_CSS_INPUT_MODE_TPG -#define CSS_INPUT_MODE_PRBS IA_CSS_INPUT_MODE_PRBS -#define CSS_INPUT_MODE_MEMORY IA_CSS_INPUT_MODE_MEMORY - -#define CSS_IRQ_INFO_CSS_RECEIVER_ERROR IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR -#define CSS_IRQ_INFO_EVENTS_READY IA_CSS_IRQ_INFO_EVENTS_READY -#define CSS_IRQ_INFO_INPUT_SYSTEM_ERROR \ - IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR -#define CSS_IRQ_INFO_IF_ERROR IA_CSS_IRQ_INFO_IF_ERROR - -#define CSS_BUFFER_TYPE_NUM IA_CSS_BUFFER_TYPE_NUM - -#define CSS_FRAME_FLASH_STATE_NONE IA_CSS_FRAME_FLASH_STATE_NONE -#define CSS_FRAME_FLASH_STATE_PARTIAL IA_CSS_FRAME_FLASH_STATE_PARTIAL -#define CSS_FRAME_FLASH_STATE_FULL IA_CSS_FRAME_FLASH_STATE_FULL - -#define CSS_BAYER_ORDER_GRBG IA_CSS_BAYER_ORDER_GRBG -#define CSS_BAYER_ORDER_RGGB IA_CSS_BAYER_ORDER_RGGB -#define CSS_BAYER_ORDER_BGGR IA_CSS_BAYER_ORDER_BGGR -#define CSS_BAYER_ORDER_GBRG IA_CSS_BAYER_ORDER_GBRG - -/* - * Hide IA_ naming difference in otherwise common CSS macros. - */ -#define CSS_ID(val) (IA_ ## val) -#define CSS_EVENT(val) (IA_CSS_EVENT_TYPE_ ## val) -#define CSS_FORMAT(val) (ATOMISP_INPUT_FORMAT_ ## val) - -#define CSS_EVENT_PORT_EOF CSS_EVENT(PORT_EOF) -#define CSS_EVENT_FRAME_TAGGED CSS_EVENT(FRAME_TAGGED) - -#define CSS_MIPI_FRAME_BUFFER_SIZE_1 0x60000 -#define CSS_MIPI_FRAME_BUFFER_SIZE_2 0x80000 - -struct atomisp_device; -struct atomisp_sub_device; - -#define MAX_STREAMS_PER_CHANNEL 2 - -/* - * These are used to indicate the css stream state, corresponding - * stream handling can be done via judging the different state. - */ -enum atomisp_css_stream_state { - CSS_STREAM_UNINIT, - CSS_STREAM_CREATED, - CSS_STREAM_STARTED, - CSS_STREAM_STOPPED, -}; - -/* - * Sensor of external ISP can send multiple steams with different mipi data - * type in the same virtual channel. This information needs to come from the - * sensor or external ISP - */ -struct atomisp_css_isys_config_info { - unsigned int input_format; - unsigned int width; - unsigned int height; -}; - -struct atomisp_stream_env { - struct ia_css_stream *stream; - struct ia_css_stream_config stream_config; - struct ia_css_stream_info stream_info; - struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM]; - struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM]; - struct ia_css_pipe_config pipe_configs[IA_CSS_PIPE_ID_NUM]; - struct ia_css_pipe_extra_config pipe_extra_configs[IA_CSS_PIPE_ID_NUM]; - bool update_pipe[IA_CSS_PIPE_ID_NUM]; - enum atomisp_css_stream_state stream_state; - struct ia_css_stream *acc_stream; - enum atomisp_css_stream_state acc_stream_state; - struct ia_css_stream_config acc_stream_config; - unsigned int ch_id; /* virtual channel ID */ - unsigned int isys_configs; - struct atomisp_css_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL]; -}; - -struct atomisp_css_env { - struct ia_css_env isp_css_env; - struct ia_css_fw isp_css_fw; -}; - -struct atomisp_s3a_buf { - struct ia_css_isp_3a_statistics *s3a_data; - struct ia_css_isp_3a_statistics_map *s3a_map; - struct list_head list; -}; - -struct atomisp_dis_buf { - struct atomisp_css_dis_data *dis_data; - struct ia_css_isp_dvs_statistics_map *dvs_map; - struct list_head list; -}; - -struct atomisp_css_buffer { - struct ia_css_buffer css_buffer; -}; - -struct atomisp_css_event { - enum atomisp_css_pipe_id pipe; - struct ia_css_event event; -}; - -void atomisp_css_set_macc_config(struct atomisp_sub_device *asd, - struct atomisp_css_macc_config *macc_config); - -void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd, - struct atomisp_css_ecd_config *ecd_config); - -void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd, - struct atomisp_css_ynr_config *ynr_config); - -void atomisp_css_set_fc_config(struct atomisp_sub_device *asd, - struct atomisp_css_fc_config *fc_config); - -void atomisp_css_set_aa_config(struct atomisp_sub_device *asd, - struct atomisp_css_aa_config *aa_config); - -void atomisp_css_set_baa_config(struct atomisp_sub_device *asd, - struct atomisp_css_baa_config *baa_config); - -void atomisp_css_set_anr_config(struct atomisp_sub_device *asd, - struct atomisp_css_anr_config *anr_config); - -void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd, - struct atomisp_css_xnr_config *xnr_config); - -void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd, - struct atomisp_css_cnr_config *cnr_config); - -void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd, - struct atomisp_css_ctc_config *ctc_config); - -void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd, - struct atomisp_css_cc_config *yuv2rgb_cc_config); - -void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd, - struct atomisp_css_cc_config *rgb2yuv_cc_config); - -void atomisp_css_set_xnr_table(struct atomisp_sub_device *asd, - struct atomisp_css_xnr_table *xnr_table); - -void atomisp_css_set_r_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_rgb_gamma_table *r_gamma_table); - -void atomisp_css_set_g_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_rgb_gamma_table *g_gamma_table); - -void atomisp_css_set_b_gamma_table(struct atomisp_sub_device *asd, - struct atomisp_css_rgb_gamma_table *b_gamma_table); - -void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd, - struct atomisp_css_anr_thres *anr_thres); - -int atomisp_css_check_firmware_version(struct atomisp_device *isp); - -int atomisp_css_load_firmware(struct atomisp_device *isp); - -void atomisp_css_unload_firmware(struct atomisp_device *isp); - -void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd, - struct atomisp_css_dvs_6axis *dvs_6axis); - -unsigned int atomisp_css_debug_get_dtrace_level(void); - -int atomisp_css_debug_dump_isp_binary(void); - -int atomisp_css_dump_sp_raw_copy_linecount(bool reduced); - -int atomisp_css_dump_blob_infor(void); - -void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd, - uint32_t isp_config_id); - -void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd, - struct atomisp_css_frame *output_frame); - -int atomisp_get_css_dbgfunc(void); - -int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt); -struct atomisp_css_dvs_grid_info *atomisp_css_get_dvs_grid_info( - struct atomisp_css_grid_info *grid_info); -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c deleted file mode 100644 index b86ab107a9e5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c +++ /dev/null @@ -1,1225 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifdef CONFIG_COMPAT -#include - -#include - -#include "atomisp_internal.h" -#include "atomisp_compat.h" -#include "atomisp_ioctl.h" -#include "atomisp_compat_ioctl32.h" - -static int get_atomisp_histogram32(struct atomisp_histogram *kp, - struct atomisp_histogram32 __user *up) -{ - compat_uptr_t tmp; - - if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_histogram32)) || - get_user(kp->num_elements, &up->num_elements) || - get_user(tmp, &up->data)) - return -EFAULT; - - kp->data = compat_ptr(tmp); - return 0; -} - -static int put_atomisp_histogram32(struct atomisp_histogram *kp, - struct atomisp_histogram32 __user *up) -{ - compat_uptr_t tmp = (compat_uptr_t)((uintptr_t)kp->data); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_histogram32)) || - put_user(kp->num_elements, &up->num_elements) || - put_user(tmp, &up->data)) - return -EFAULT; - - return 0; -} - -static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, - struct v4l2_pix_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, - struct v4l2_pix_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, - struct v4l2_framebuffer32 __user *up) -{ - compat_uptr_t tmp; - - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || - get_user(tmp, &up->base) || - get_user(kp->capability, &up->capability) || - get_user(kp->flags, &up->flags)) - return -EFAULT; - - kp->base = (void __force *)compat_ptr(tmp); - get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt); - return 0; -} - -static int get_atomisp_dis_statistics32(struct atomisp_dis_statistics *kp, - struct atomisp_dis_statistics32 __user *up) -{ - compat_uptr_t hor_prod_odd_real; - compat_uptr_t hor_prod_odd_imag; - compat_uptr_t hor_prod_even_real; - compat_uptr_t hor_prod_even_imag; - compat_uptr_t ver_prod_odd_real; - compat_uptr_t ver_prod_odd_imag; - compat_uptr_t ver_prod_even_real; - compat_uptr_t ver_prod_even_imag; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_dis_statistics32)) || - copy_from_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) || - get_user(hor_prod_odd_real, - &up->dvs2_stat.hor_prod.odd_real) || - get_user(hor_prod_odd_imag, - &up->dvs2_stat.hor_prod.odd_imag) || - get_user(hor_prod_even_real, - &up->dvs2_stat.hor_prod.even_real) || - get_user(hor_prod_even_imag, - &up->dvs2_stat.hor_prod.even_imag) || - get_user(ver_prod_odd_real, - &up->dvs2_stat.ver_prod.odd_real) || - get_user(ver_prod_odd_imag, - &up->dvs2_stat.ver_prod.odd_imag) || - get_user(ver_prod_even_real, - &up->dvs2_stat.ver_prod.even_real) || - get_user(ver_prod_even_imag, - &up->dvs2_stat.ver_prod.even_imag) || - get_user(kp->exp_id, &up->exp_id)) - return -EFAULT; - - kp->dvs2_stat.hor_prod.odd_real = compat_ptr(hor_prod_odd_real); - kp->dvs2_stat.hor_prod.odd_imag = compat_ptr(hor_prod_odd_imag); - kp->dvs2_stat.hor_prod.even_real = compat_ptr(hor_prod_even_real); - kp->dvs2_stat.hor_prod.even_imag = compat_ptr(hor_prod_even_imag); - kp->dvs2_stat.ver_prod.odd_real = compat_ptr(ver_prod_odd_real); - kp->dvs2_stat.ver_prod.odd_imag = compat_ptr(ver_prod_odd_imag); - kp->dvs2_stat.ver_prod.even_real = compat_ptr(ver_prod_even_real); - kp->dvs2_stat.ver_prod.even_imag = compat_ptr(ver_prod_even_imag); - return 0; -} - -static int put_atomisp_dis_statistics32(struct atomisp_dis_statistics *kp, - struct atomisp_dis_statistics32 __user *up) -{ - compat_uptr_t hor_prod_odd_real = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.odd_real); - compat_uptr_t hor_prod_odd_imag = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.odd_imag); - compat_uptr_t hor_prod_even_real = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.even_real); - compat_uptr_t hor_prod_even_imag = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.even_imag); - compat_uptr_t ver_prod_odd_real = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.odd_real); - compat_uptr_t ver_prod_odd_imag = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.odd_imag); - compat_uptr_t ver_prod_even_real = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.even_real); - compat_uptr_t ver_prod_even_imag = - (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.even_imag); - - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_dis_statistics32)) || - copy_to_user(up, kp, sizeof(struct atomisp_dvs_grid_info)) || - put_user(hor_prod_odd_real, - &up->dvs2_stat.hor_prod.odd_real) || - put_user(hor_prod_odd_imag, - &up->dvs2_stat.hor_prod.odd_imag) || - put_user(hor_prod_even_real, - &up->dvs2_stat.hor_prod.even_real) || - put_user(hor_prod_even_imag, - &up->dvs2_stat.hor_prod.even_imag) || - put_user(ver_prod_odd_real, - &up->dvs2_stat.ver_prod.odd_real) || - put_user(ver_prod_odd_imag, - &up->dvs2_stat.ver_prod.odd_imag) || - put_user(ver_prod_even_real, - &up->dvs2_stat.ver_prod.even_real) || - put_user(ver_prod_even_imag, - &up->dvs2_stat.ver_prod.even_imag) || - put_user(kp->exp_id, &up->exp_id)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_dis_coefficients32(struct atomisp_dis_coefficients *kp, - struct atomisp_dis_coefficients32 __user *up) -{ - compat_uptr_t hor_coefs_odd_real; - compat_uptr_t hor_coefs_odd_imag; - compat_uptr_t hor_coefs_even_real; - compat_uptr_t hor_coefs_even_imag; - compat_uptr_t ver_coefs_odd_real; - compat_uptr_t ver_coefs_odd_imag; - compat_uptr_t ver_coefs_even_real; - compat_uptr_t ver_coefs_even_imag; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_dis_coefficients32)) || - copy_from_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) || - get_user(hor_coefs_odd_real, &up->hor_coefs.odd_real) || - get_user(hor_coefs_odd_imag, &up->hor_coefs.odd_imag) || - get_user(hor_coefs_even_real, &up->hor_coefs.even_real) || - get_user(hor_coefs_even_imag, &up->hor_coefs.even_imag) || - get_user(ver_coefs_odd_real, &up->ver_coefs.odd_real) || - get_user(ver_coefs_odd_imag, &up->ver_coefs.odd_imag) || - get_user(ver_coefs_even_real, &up->ver_coefs.even_real) || - get_user(ver_coefs_even_imag, &up->ver_coefs.even_imag)) - return -EFAULT; - - kp->hor_coefs.odd_real = compat_ptr(hor_coefs_odd_real); - kp->hor_coefs.odd_imag = compat_ptr(hor_coefs_odd_imag); - kp->hor_coefs.even_real = compat_ptr(hor_coefs_even_real); - kp->hor_coefs.even_imag = compat_ptr(hor_coefs_even_imag); - kp->ver_coefs.odd_real = compat_ptr(ver_coefs_odd_real); - kp->ver_coefs.odd_imag = compat_ptr(ver_coefs_odd_imag); - kp->ver_coefs.even_real = compat_ptr(ver_coefs_even_real); - kp->ver_coefs.even_imag = compat_ptr(ver_coefs_even_imag); - return 0; -} - -static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp, - struct atomisp_dvs_6axis_config32 __user *up) -{ compat_uptr_t xcoords_y; - compat_uptr_t ycoords_y; - compat_uptr_t xcoords_uv; - compat_uptr_t ycoords_uv; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_dvs_6axis_config32)) || - get_user(kp->exp_id, &up->exp_id) || - get_user(kp->width_y, &up->width_y) || - get_user(kp->height_y, &up->height_y) || - get_user(kp->width_uv, &up->width_uv) || - get_user(kp->height_uv, &up->height_uv) || - get_user(xcoords_y, &up->xcoords_y) || - get_user(ycoords_y, &up->ycoords_y) || - get_user(xcoords_uv, &up->xcoords_uv) || - get_user(ycoords_uv, &up->ycoords_uv)) - return -EFAULT; - - kp->xcoords_y = (void __force *)compat_ptr(xcoords_y); - kp->ycoords_y = (void __force *)compat_ptr(ycoords_y); - kp->xcoords_uv = (void __force *)compat_ptr(xcoords_uv); - kp->ycoords_uv = (void __force *)compat_ptr(ycoords_uv); - return 0; -} - -static int get_atomisp_3a_statistics32(struct atomisp_3a_statistics *kp, - struct atomisp_3a_statistics32 __user *up) -{ - compat_uptr_t data; - compat_uptr_t rgby_data; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_3a_statistics32)) || - copy_from_user(kp, up, sizeof(struct atomisp_grid_info)) || - get_user(rgby_data, &up->rgby_data) || - get_user(data, &up->data) || - get_user(kp->exp_id, &up->exp_id) || - get_user(kp->isp_config_id, &up->isp_config_id)) - return -EFAULT; - - kp->data = compat_ptr(data); - kp->rgby_data = compat_ptr(rgby_data); - - return 0; -} - -static int put_atomisp_3a_statistics32(struct atomisp_3a_statistics *kp, - struct atomisp_3a_statistics32 __user *up) -{ - compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data); - compat_uptr_t rgby_data = (compat_uptr_t)((uintptr_t)kp->rgby_data); - - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_3a_statistics32)) || - copy_to_user(up, kp, sizeof(struct atomisp_grid_info)) || - put_user(rgby_data, &up->rgby_data) || - put_user(data, &up->data) || - put_user(kp->exp_id, &up->exp_id) || - put_user(kp->isp_config_id, &up->isp_config_id)) - return -EFAULT; - - return 0; -} - - -static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp, - struct atomisp_metadata32 __user *up) -{ - compat_uptr_t data; - compat_uptr_t effective_width; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_metadata32)) || - get_user(data, &up->data) || - get_user(kp->width, &up->width) || - get_user(kp->height, &up->height) || - get_user(kp->stride, &up->stride) || - get_user(kp->exp_id, &up->exp_id) || - get_user(effective_width, &up->effective_width)) - return -EFAULT; - - kp->data = compat_ptr(data); - kp->effective_width = (void __force *)compat_ptr(effective_width); - return 0; -} - - -static int put_atomisp_metadata_stat32(struct atomisp_metadata *kp, - struct atomisp_metadata32 __user *up) -{ - compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data); - compat_uptr_t effective_width = - (compat_uptr_t)((uintptr_t)kp->effective_width); - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_metadata32)) || - put_user(data, &up->data) || - put_user(kp->width, &up->width) || - put_user(kp->height, &up->height) || - put_user(kp->stride, &up->stride) || - put_user(kp->exp_id, &up->exp_id) || - put_user(effective_width, &up->effective_width)) - return -EFAULT; - - return 0; -} - -static int put_atomisp_metadata_by_type_stat32( - struct atomisp_metadata_with_type *kp, - struct atomisp_metadata_with_type32 __user *up) -{ - compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data); - compat_uptr_t effective_width = - (compat_uptr_t)((uintptr_t)kp->effective_width); - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_metadata_with_type32)) || - put_user(data, &up->data) || - put_user(kp->width, &up->width) || - put_user(kp->height, &up->height) || - put_user(kp->stride, &up->stride) || - put_user(kp->exp_id, &up->exp_id) || - put_user(effective_width, &up->effective_width) || - put_user(kp->type, &up->type)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_metadata_by_type_stat32( - struct atomisp_metadata_with_type *kp, - struct atomisp_metadata_with_type32 __user *up) -{ - compat_uptr_t data; - compat_uptr_t effective_width; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_metadata_with_type32)) || - get_user(data, &up->data) || - get_user(kp->width, &up->width) || - get_user(kp->height, &up->height) || - get_user(kp->stride, &up->stride) || - get_user(kp->exp_id, &up->exp_id) || - get_user(effective_width, &up->effective_width) || - get_user(kp->type, &up->type)) - return -EFAULT; - - kp->data = compat_ptr(data); - kp->effective_width = (void __force *)compat_ptr(effective_width); - return 0; -} - -static int get_atomisp_morph_table32(struct atomisp_morph_table *kp, - struct atomisp_morph_table32 __user *up) -{ - unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_morph_table32)) || - get_user(kp->enabled, &up->enabled) || - get_user(kp->width, &up->width) || - get_user(kp->height, &up->height)) - return -EFAULT; - - while (n-- > 0) { - uintptr_t *coord_kp = (uintptr_t *)&kp->coordinates_x[n]; - - if (get_user((*coord_kp), &up->coordinates_x[n])) - return -EFAULT; - - coord_kp = (uintptr_t *)&kp->coordinates_y[n]; - if (get_user((*coord_kp), &up->coordinates_y[n])) - return -EFAULT; - } - return 0; -} - -static int put_atomisp_morph_table32(struct atomisp_morph_table *kp, - struct atomisp_morph_table32 __user *up) -{ - unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES; - - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_morph_table32)) || - put_user(kp->enabled, &up->enabled) || - put_user(kp->width, &up->width) || - put_user(kp->height, &up->height)) - return -EFAULT; - - while (n-- > 0) { - uintptr_t *coord_kp = (uintptr_t *)&kp->coordinates_x[n]; - - if (put_user((*coord_kp), &up->coordinates_x[n])) - return -EFAULT; - - coord_kp = (uintptr_t *)&kp->coordinates_y[n]; - if (put_user((*coord_kp), &up->coordinates_y[n])) - return -EFAULT; - } - return 0; -} - -static int get_atomisp_overlay32(struct atomisp_overlay *kp, - struct atomisp_overlay32 __user *up) -{ - compat_uptr_t frame; - if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_overlay32)) || - get_user(frame, &up->frame) || - get_user(kp->bg_y, &up->bg_y) || - get_user(kp->bg_u, &up->bg_u) || - get_user(kp->bg_v, &up->bg_v) || - get_user(kp->blend_input_perc_y, &up->blend_input_perc_y) || - get_user(kp->blend_input_perc_u, &up->blend_input_perc_u) || - get_user(kp->blend_input_perc_v, &up->blend_input_perc_v) || - get_user(kp->blend_overlay_perc_y, - &up->blend_overlay_perc_y) || - get_user(kp->blend_overlay_perc_u, - &up->blend_overlay_perc_u) || - get_user(kp->blend_overlay_perc_v, - &up->blend_overlay_perc_v) || - get_user(kp->blend_overlay_perc_u, - &up->blend_overlay_perc_u) || - get_user(kp->overlay_start_x, &up->overlay_start_y)) - return -EFAULT; - - kp->frame = (void __force *)compat_ptr(frame); - return 0; -} - -static int put_atomisp_overlay32(struct atomisp_overlay *kp, - struct atomisp_overlay32 __user *up) -{ - compat_uptr_t frame = (compat_uptr_t)((uintptr_t)kp->frame); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_overlay32)) || - put_user(frame, &up->frame) || - put_user(kp->bg_y, &up->bg_y) || - put_user(kp->bg_u, &up->bg_u) || - put_user(kp->bg_v, &up->bg_v) || - put_user(kp->blend_input_perc_y, &up->blend_input_perc_y) || - put_user(kp->blend_input_perc_u, &up->blend_input_perc_u) || - put_user(kp->blend_input_perc_v, &up->blend_input_perc_v) || - put_user(kp->blend_overlay_perc_y, - &up->blend_overlay_perc_y) || - put_user(kp->blend_overlay_perc_u, - &up->blend_overlay_perc_u) || - put_user(kp->blend_overlay_perc_v, - &up->blend_overlay_perc_v) || - put_user(kp->blend_overlay_perc_u, - &up->blend_overlay_perc_u) || - put_user(kp->overlay_start_x, &up->overlay_start_y)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_calibration_group32( - struct atomisp_calibration_group *kp, - struct atomisp_calibration_group32 __user *up) -{ - compat_uptr_t calb_grp_values; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_calibration_group32)) || - get_user(kp->size, &up->size) || - get_user(kp->type, &up->type) || - get_user(calb_grp_values, &up->calb_grp_values)) - return -EFAULT; - - kp->calb_grp_values = (void __force *)compat_ptr(calb_grp_values); - return 0; -} - -static int put_atomisp_calibration_group32( - struct atomisp_calibration_group *kp, - struct atomisp_calibration_group32 __user *up) -{ - compat_uptr_t calb_grp_values = - (compat_uptr_t)((uintptr_t)kp->calb_grp_values); - - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_calibration_group32)) || - put_user(kp->size, &up->size) || - put_user(kp->type, &up->type) || - put_user(calb_grp_values, &up->calb_grp_values)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_acc_fw_load32(struct atomisp_acc_fw_load *kp, - struct atomisp_acc_fw_load32 __user *up) -{ - compat_uptr_t data; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_acc_fw_load32)) || - get_user(kp->size, &up->size) || - get_user(kp->fw_handle, &up->fw_handle) || - get_user(data, &up->data)) - return -EFAULT; - - kp->data = compat_ptr(data); - return 0; -} - -static int put_atomisp_acc_fw_load32(struct atomisp_acc_fw_load *kp, - struct atomisp_acc_fw_load32 __user *up) -{ - compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data); - - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_acc_fw_load32)) || - put_user(kp->size, &up->size) || - put_user(kp->fw_handle, &up->fw_handle) || - put_user(data, &up->data)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg *kp, - struct atomisp_acc_fw_arg32 __user *up) -{ - compat_uptr_t value; - - if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_acc_fw_arg32)) || - get_user(kp->fw_handle, &up->fw_handle) || - get_user(kp->index, &up->index) || - get_user(value, &up->value) || - get_user(kp->size, &up->size)) - return -EFAULT; - - kp->value = compat_ptr(value); - return 0; -} - -static int put_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg *kp, - struct atomisp_acc_fw_arg32 __user *up) -{ - compat_uptr_t value = (compat_uptr_t)((uintptr_t)kp->value); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_acc_fw_arg32)) || - put_user(kp->fw_handle, &up->fw_handle) || - put_user(kp->index, &up->index) || - put_user(value, &up->value) || - put_user(kp->size, &up->size)) - return -EFAULT; - - return 0; -} - -static int get_v4l2_private_int_data32(struct v4l2_private_int_data *kp, - struct v4l2_private_int_data32 __user *up) -{ - compat_uptr_t data; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct v4l2_private_int_data32)) || - get_user(kp->size, &up->size) || - get_user(data, &up->data) || - get_user(kp->reserved[0], &up->reserved[0]) || - get_user(kp->reserved[1], &up->reserved[1])) - return -EFAULT; - - kp->data = compat_ptr(data); - return 0; -} - -static int put_v4l2_private_int_data32(struct v4l2_private_int_data *kp, - struct v4l2_private_int_data32 __user *up) -{ - compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data); - - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct v4l2_private_int_data32)) || - put_user(kp->size, &up->size) || - put_user(data, &up->data) || - put_user(kp->reserved[0], &up->reserved[0]) || - put_user(kp->reserved[1], &up->reserved[1])) - return -EFAULT; - - return 0; -} - -static int get_atomisp_shading_table32(struct atomisp_shading_table *kp, - struct atomisp_shading_table32 __user *up) -{ - unsigned int n = ATOMISP_NUM_SC_COLORS; - - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_shading_table32)) || - get_user(kp->enable, &up->enable) || - get_user(kp->sensor_width, &up->sensor_width) || - get_user(kp->sensor_height, &up->sensor_height) || - get_user(kp->width, &up->width) || - get_user(kp->height, &up->height) || - get_user(kp->fraction_bits, &up->fraction_bits)) - return -EFAULT; - - while (n-- > 0) { - uintptr_t *data_p = (uintptr_t *)&kp->data[n]; - - if (get_user((*data_p), &up->data[n])) - return -EFAULT; - } - return 0; -} - -static int get_atomisp_acc_map32(struct atomisp_acc_map *kp, - struct atomisp_acc_map32 __user *up) -{ - compat_uptr_t user_ptr; - - if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_acc_map32)) || - get_user(kp->flags, &up->flags) || - get_user(kp->length, &up->length) || - get_user(user_ptr, &up->user_ptr) || - get_user(kp->css_ptr, &up->css_ptr) || - get_user(kp->reserved[0], &up->reserved[0]) || - get_user(kp->reserved[1], &up->reserved[1]) || - get_user(kp->reserved[2], &up->reserved[2]) || - get_user(kp->reserved[3], &up->reserved[3])) - return -EFAULT; - - kp->user_ptr = compat_ptr(user_ptr); - return 0; -} - -static int put_atomisp_acc_map32(struct atomisp_acc_map *kp, - struct atomisp_acc_map32 __user *up) -{ - compat_uptr_t user_ptr = (compat_uptr_t)((uintptr_t)kp->user_ptr); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_acc_map32)) || - put_user(kp->flags, &up->flags) || - put_user(kp->length, &up->length) || - put_user(user_ptr, &up->user_ptr) || - put_user(kp->css_ptr, &up->css_ptr) || - put_user(kp->reserved[0], &up->reserved[0]) || - put_user(kp->reserved[1], &up->reserved[1]) || - put_user(kp->reserved[2], &up->reserved[2]) || - put_user(kp->reserved[3], &up->reserved[3])) - return -EFAULT; - - return 0; -} - -static int get_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg *kp, - struct atomisp_acc_s_mapped_arg32 __user *up) -{ - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_acc_s_mapped_arg32)) || - get_user(kp->fw_handle, &up->fw_handle) || - get_user(kp->memory, &up->memory) || - get_user(kp->length, &up->length) || - get_user(kp->css_ptr, &up->css_ptr)) - return -EFAULT; - - return 0; -} - -static int put_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg *kp, - struct atomisp_acc_s_mapped_arg32 __user *up) -{ - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_acc_s_mapped_arg32)) || - put_user(kp->fw_handle, &up->fw_handle) || - put_user(kp->memory, &up->memory) || - put_user(kp->length, &up->length) || - put_user(kp->css_ptr, &up->css_ptr)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_parameters32(struct atomisp_parameters *kp, - struct atomisp_parameters32 __user *up) -{ - int n = offsetof(struct atomisp_parameters32, output_frame) / - sizeof(compat_uptr_t); - unsigned int size, offset = 0; - void __user *user_ptr; - unsigned int stp, mtp, dcp, dscp = 0; - - if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_parameters32))) - return -EFAULT; - - while (n >= 0) { - compat_uptr_t __user *src = ((compat_uptr_t __user *)up) + n; - uintptr_t *dst = ((uintptr_t *)kp) + n; - - if (get_user((*dst), src)) - return -EFAULT; - n--; - } - if (get_user(kp->isp_config_id, &up->isp_config_id) || - get_user(kp->per_frame_setting, &up->per_frame_setting) || - get_user(stp, &up->shading_table) || - get_user(mtp, &up->morph_table) || - get_user(dcp, &up->dvs2_coefs) || - get_user(dscp, &up->dvs_6axis_config)) - return -EFAULT; - - { - union { - struct atomisp_shading_table shading_table; - struct atomisp_morph_table morph_table; - struct atomisp_dis_coefficients dvs2_coefs; - struct atomisp_dvs_6axis_config dvs_6axis_config; - } karg; - - size = sizeof(struct atomisp_shading_table) + - sizeof(struct atomisp_morph_table) + - sizeof(struct atomisp_dis_coefficients) + - sizeof(struct atomisp_dvs_6axis_config); - user_ptr = compat_alloc_user_space(size); - - /* handle shading table */ - if (stp != 0) { - if (get_atomisp_shading_table32(&karg.shading_table, - (struct atomisp_shading_table32 __user *) - (uintptr_t)stp)) - return -EFAULT; - - kp->shading_table = (void __force *)user_ptr + offset; - offset = sizeof(struct atomisp_shading_table); - if (!kp->shading_table) - return -EFAULT; - - if (copy_to_user((void __user *)kp->shading_table, - &karg.shading_table, - sizeof(struct atomisp_shading_table))) - return -EFAULT; - } - - /* handle morph table */ - if (mtp != 0) { - if (get_atomisp_morph_table32(&karg.morph_table, - (struct atomisp_morph_table32 __user *) - (uintptr_t)mtp)) - return -EFAULT; - - kp->morph_table = (void __force *)user_ptr + offset; - offset += sizeof(struct atomisp_morph_table); - if (!kp->morph_table) - return -EFAULT; - - if (copy_to_user((void __user *)kp->morph_table, - &karg.morph_table, - sizeof(struct atomisp_morph_table))) - return -EFAULT; - } - - /* handle dvs2 coefficients */ - if (dcp != 0) { - if (get_atomisp_dis_coefficients32(&karg.dvs2_coefs, - (struct atomisp_dis_coefficients32 __user *) - (uintptr_t)dcp)) - return -EFAULT; - - kp->dvs2_coefs = (void __force *)user_ptr + offset; - offset += sizeof(struct atomisp_dis_coefficients); - if (!kp->dvs2_coefs) - return -EFAULT; - - if (copy_to_user((void __user *)kp->dvs2_coefs, - &karg.dvs2_coefs, - sizeof(struct atomisp_dis_coefficients))) - return -EFAULT; - } - /* handle dvs 6axis configuration */ - if (dscp != 0) { - if (get_atomisp_dvs_6axis_config32(&karg.dvs_6axis_config, - (struct atomisp_dvs_6axis_config32 __user *) - (uintptr_t)dscp)) - return -EFAULT; - - kp->dvs_6axis_config = (void __force *)user_ptr + offset; - offset += sizeof(struct atomisp_dvs_6axis_config); - if (!kp->dvs_6axis_config) - return -EFAULT; - - if (copy_to_user((void __user *)kp->dvs_6axis_config, - &karg.dvs_6axis_config, - sizeof(struct atomisp_dvs_6axis_config))) - return -EFAULT; - } - } - return 0; -} - -static int get_atomisp_acc_fw_load_to_pipe32( - struct atomisp_acc_fw_load_to_pipe *kp, - struct atomisp_acc_fw_load_to_pipe32 __user *up) -{ - compat_uptr_t data; - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_acc_fw_load_to_pipe32)) || - get_user(kp->flags, &up->flags) || - get_user(kp->fw_handle, &up->fw_handle) || - get_user(kp->size, &up->size) || - get_user(kp->type, &up->type) || - get_user(kp->reserved[0], &up->reserved[0]) || - get_user(kp->reserved[1], &up->reserved[1]) || - get_user(kp->reserved[2], &up->reserved[2]) || - get_user(data, &up->data)) - return -EFAULT; - - kp->data = compat_ptr(data); - return 0; -} - -static int put_atomisp_acc_fw_load_to_pipe32( - struct atomisp_acc_fw_load_to_pipe *kp, - struct atomisp_acc_fw_load_to_pipe32 __user *up) -{ - compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data); - if (!access_ok(VERIFY_WRITE, up, - sizeof(struct atomisp_acc_fw_load_to_pipe32)) || - put_user(kp->flags, &up->flags) || - put_user(kp->fw_handle, &up->fw_handle) || - put_user(kp->size, &up->size) || - put_user(kp->type, &up->type) || - put_user(kp->reserved[0], &up->reserved[0]) || - put_user(kp->reserved[1], &up->reserved[1]) || - put_user(kp->reserved[2], &up->reserved[2]) || - put_user(data, &up->data)) - return -EFAULT; - - return 0; -} - -static int get_atomisp_sensor_ae_bracketing_lut( - struct atomisp_sensor_ae_bracketing_lut *kp, - struct atomisp_sensor_ae_bracketing_lut32 __user *up) -{ - compat_uptr_t lut; - if (!access_ok(VERIFY_READ, up, - sizeof(struct atomisp_sensor_ae_bracketing_lut32)) || - get_user(kp->lut_size, &up->lut_size) || - get_user(lut, &up->lut)) - return -EFAULT; - - kp->lut = (void __force *)compat_ptr(lut); - return 0; -} - -static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - long ret = -ENOIOCTLCMD; - - if (file->f_op->unlocked_ioctl) - ret = file->f_op->unlocked_ioctl(file, cmd, arg); - - return ret; -} - -static long atomisp_do_compat_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - union { - struct atomisp_histogram his; - struct atomisp_dis_statistics dis_s; - struct atomisp_dis_coefficients dis_c; - struct atomisp_dvs_6axis_config dvs_c; - struct atomisp_3a_statistics s3a_s; - struct atomisp_morph_table mor_t; - struct v4l2_framebuffer v4l2_buf; - struct atomisp_overlay overlay; - struct atomisp_calibration_group cal_grp; - struct atomisp_acc_fw_load acc_fw_load; - struct atomisp_acc_fw_arg acc_fw_arg; - struct v4l2_private_int_data v4l2_pri_data; - struct atomisp_shading_table shd_tbl; - struct atomisp_acc_map acc_map; - struct atomisp_acc_s_mapped_arg acc_map_arg; - struct atomisp_parameters param; - struct atomisp_acc_fw_load_to_pipe acc_fw_to_pipe; - struct atomisp_metadata md; - struct atomisp_metadata_with_type md_with_type; - struct atomisp_sensor_ae_bracketing_lut lut; - } karg; - mm_segment_t old_fs; - void __user *up = compat_ptr(arg); - long err = -ENOIOCTLCMD; - - /* First, convert the command. */ - switch (cmd) { - case ATOMISP_IOC_G_HISTOGRAM32: - cmd = ATOMISP_IOC_G_HISTOGRAM; - break; - case ATOMISP_IOC_S_HISTOGRAM32: - cmd = ATOMISP_IOC_S_HISTOGRAM; - break; - case ATOMISP_IOC_G_DIS_STAT32: - cmd = ATOMISP_IOC_G_DIS_STAT; - break; - case ATOMISP_IOC_S_DIS_COEFS32: - cmd = ATOMISP_IOC_S_DIS_COEFS; - break; - case ATOMISP_IOC_S_DIS_VECTOR32: - cmd = ATOMISP_IOC_S_DIS_VECTOR; - break; - case ATOMISP_IOC_G_3A_STAT32: - cmd = ATOMISP_IOC_G_3A_STAT; - break; - case ATOMISP_IOC_G_ISP_GDC_TAB32: - cmd = ATOMISP_IOC_G_ISP_GDC_TAB; - break; - case ATOMISP_IOC_S_ISP_GDC_TAB32: - cmd = ATOMISP_IOC_S_ISP_GDC_TAB; - break; - case ATOMISP_IOC_S_ISP_FPN_TABLE32: - cmd = ATOMISP_IOC_S_ISP_FPN_TABLE; - break; - case ATOMISP_IOC_G_ISP_OVERLAY32: - cmd = ATOMISP_IOC_G_ISP_OVERLAY; - break; - case ATOMISP_IOC_S_ISP_OVERLAY32: - cmd = ATOMISP_IOC_S_ISP_OVERLAY; - break; - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32: - cmd = ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP; - break; - case ATOMISP_IOC_ACC_LOAD32: - cmd = ATOMISP_IOC_ACC_LOAD; - break; - case ATOMISP_IOC_ACC_S_ARG32: - cmd = ATOMISP_IOC_ACC_S_ARG; - break; - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32: - cmd = ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA; - break; - case ATOMISP_IOC_S_ISP_SHD_TAB32: - cmd = ATOMISP_IOC_S_ISP_SHD_TAB; - break; - case ATOMISP_IOC_ACC_DESTAB32: - cmd = ATOMISP_IOC_ACC_DESTAB; - break; - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32: - cmd = ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA; - break; - case ATOMISP_IOC_ACC_MAP32: - cmd = ATOMISP_IOC_ACC_MAP; - break; - case ATOMISP_IOC_ACC_UNMAP32: - cmd = ATOMISP_IOC_ACC_UNMAP; - break; - case ATOMISP_IOC_ACC_S_MAPPED_ARG32: - cmd = ATOMISP_IOC_ACC_S_MAPPED_ARG; - break; - case ATOMISP_IOC_S_PARAMETERS32: - cmd = ATOMISP_IOC_S_PARAMETERS; - break; - case ATOMISP_IOC_ACC_LOAD_TO_PIPE32: - cmd = ATOMISP_IOC_ACC_LOAD_TO_PIPE; - break; - case ATOMISP_IOC_G_METADATA32: - cmd = ATOMISP_IOC_G_METADATA; - break; - case ATOMISP_IOC_G_METADATA_BY_TYPE32: - cmd = ATOMISP_IOC_G_METADATA_BY_TYPE; - break; - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32: - cmd = ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT; - break; - } - - switch (cmd) { - case ATOMISP_IOC_G_HISTOGRAM: - case ATOMISP_IOC_S_HISTOGRAM: - err = get_atomisp_histogram32(&karg.his, up); - break; - case ATOMISP_IOC_G_DIS_STAT: - err = get_atomisp_dis_statistics32(&karg.dis_s, up); - break; - case ATOMISP_IOC_S_DIS_COEFS: - err = get_atomisp_dis_coefficients32(&karg.dis_c, up); - break; - case ATOMISP_IOC_S_DIS_VECTOR: - err = get_atomisp_dvs_6axis_config32(&karg.dvs_c, up); - break; - case ATOMISP_IOC_G_3A_STAT: - err = get_atomisp_3a_statistics32(&karg.s3a_s, up); - break; - case ATOMISP_IOC_G_ISP_GDC_TAB: - case ATOMISP_IOC_S_ISP_GDC_TAB: - err = get_atomisp_morph_table32(&karg.mor_t, up); - break; - case ATOMISP_IOC_S_ISP_FPN_TABLE: - err = get_v4l2_framebuffer32(&karg.v4l2_buf, up); - break; - case ATOMISP_IOC_G_ISP_OVERLAY: - case ATOMISP_IOC_S_ISP_OVERLAY: - err = get_atomisp_overlay32(&karg.overlay, up); - break; - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - err = get_atomisp_calibration_group32(&karg.cal_grp, up); - break; - case ATOMISP_IOC_ACC_LOAD: - err = get_atomisp_acc_fw_load32(&karg.acc_fw_load, up); - break; - case ATOMISP_IOC_ACC_S_ARG: - case ATOMISP_IOC_ACC_DESTAB: - err = get_atomisp_acc_fw_arg32(&karg.acc_fw_arg, up); - break; - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - err = get_v4l2_private_int_data32(&karg.v4l2_pri_data, up); - break; - case ATOMISP_IOC_S_ISP_SHD_TAB: - err = get_atomisp_shading_table32(&karg.shd_tbl, up); - break; - case ATOMISP_IOC_ACC_MAP: - case ATOMISP_IOC_ACC_UNMAP: - err = get_atomisp_acc_map32(&karg.acc_map, up); - break; - case ATOMISP_IOC_ACC_S_MAPPED_ARG: - err = get_atomisp_acc_s_mapped_arg32(&karg.acc_map_arg, up); - break; - case ATOMISP_IOC_S_PARAMETERS: - err = get_atomisp_parameters32(&karg.param, up); - break; - case ATOMISP_IOC_ACC_LOAD_TO_PIPE: - err = get_atomisp_acc_fw_load_to_pipe32(&karg.acc_fw_to_pipe, - up); - break; - case ATOMISP_IOC_G_METADATA: - err = get_atomisp_metadata_stat32(&karg.md, up); - break; - case ATOMISP_IOC_G_METADATA_BY_TYPE: - err = get_atomisp_metadata_by_type_stat32(&karg.md_with_type, - up); - break; - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: - err = get_atomisp_sensor_ae_bracketing_lut(&karg.lut, up); - break; - } - if (err) - return err; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - err = native_ioctl(file, cmd, (unsigned long)&karg); - set_fs(old_fs); - if (err) - return err; - - switch (cmd) { - case ATOMISP_IOC_G_HISTOGRAM: - err = put_atomisp_histogram32(&karg.his, up); - break; - case ATOMISP_IOC_G_DIS_STAT: - err = put_atomisp_dis_statistics32(&karg.dis_s, up); - break; - case ATOMISP_IOC_G_3A_STAT: - err = put_atomisp_3a_statistics32(&karg.s3a_s, up); - break; - case ATOMISP_IOC_G_ISP_GDC_TAB: - err = put_atomisp_morph_table32(&karg.mor_t, up); - break; - case ATOMISP_IOC_G_ISP_OVERLAY: - err = put_atomisp_overlay32(&karg.overlay, up); - break; - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - err = put_atomisp_calibration_group32(&karg.cal_grp, up); - break; - case ATOMISP_IOC_ACC_LOAD: - err = put_atomisp_acc_fw_load32(&karg.acc_fw_load, up); - break; - case ATOMISP_IOC_ACC_S_ARG: - case ATOMISP_IOC_ACC_DESTAB: - err = put_atomisp_acc_fw_arg32(&karg.acc_fw_arg, up); - break; - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - err = put_v4l2_private_int_data32(&karg.v4l2_pri_data, up); - break; - case ATOMISP_IOC_ACC_MAP: - case ATOMISP_IOC_ACC_UNMAP: - err = put_atomisp_acc_map32(&karg.acc_map, up); - break; - case ATOMISP_IOC_ACC_S_MAPPED_ARG: - err = put_atomisp_acc_s_mapped_arg32(&karg.acc_map_arg, up); - break; - case ATOMISP_IOC_ACC_LOAD_TO_PIPE: - err = put_atomisp_acc_fw_load_to_pipe32(&karg.acc_fw_to_pipe, - up); - break; - case ATOMISP_IOC_G_METADATA: - err = put_atomisp_metadata_stat32(&karg.md, up); - break; - case ATOMISP_IOC_G_METADATA_BY_TYPE: - err = put_atomisp_metadata_by_type_stat32(&karg.md_with_type, - up); - break; - } - - return err; -} - -long atomisp_compat_ioctl32(struct file *file, - unsigned int cmd, unsigned long arg) -{ - - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - long ret = -ENOIOCTLCMD; - - if (!file->f_op->unlocked_ioctl) - return ret; - - switch (cmd) { - case ATOMISP_IOC_G_XNR: - case ATOMISP_IOC_S_XNR: - case ATOMISP_IOC_G_NR: - case ATOMISP_IOC_S_NR: - case ATOMISP_IOC_G_TNR: - case ATOMISP_IOC_S_TNR: - case ATOMISP_IOC_G_BLACK_LEVEL_COMP: - case ATOMISP_IOC_S_BLACK_LEVEL_COMP: - case ATOMISP_IOC_G_EE: - case ATOMISP_IOC_S_EE: - case ATOMISP_IOC_S_DIS_VECTOR: - case ATOMISP_IOC_G_ISP_PARM: - case ATOMISP_IOC_S_ISP_PARM: - case ATOMISP_IOC_G_ISP_GAMMA: - case ATOMISP_IOC_S_ISP_GAMMA: - case ATOMISP_IOC_ISP_MAKERNOTE: - case ATOMISP_IOC_G_ISP_MACC: - case ATOMISP_IOC_S_ISP_MACC: - case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION: - case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION: - case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION: - case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION: - case ATOMISP_IOC_G_ISP_CTC: - case ATOMISP_IOC_S_ISP_CTC: - case ATOMISP_IOC_G_ISP_WHITE_BALANCE: - case ATOMISP_IOC_S_ISP_WHITE_BALANCE: - case ATOMISP_IOC_CAMERA_BRIDGE: - case ATOMISP_IOC_G_SENSOR_MODE_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_3A_CONFIG: - case ATOMISP_IOC_S_3A_CONFIG: - case ATOMISP_IOC_ACC_UNLOAD: - case ATOMISP_IOC_ACC_START: - case ATOMISP_IOC_ACC_WAIT: - case ATOMISP_IOC_ACC_ABORT: - case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION: - case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION: - case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG: - case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_EXP_ID_UNLOCK: - case ATOMISP_IOC_EXP_ID_CAPTURE: - case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE: - case ATOMISP_IOC_G_FORMATS_CONFIG: - case ATOMISP_IOC_S_FORMATS_CONFIG: - case ATOMISP_IOC_S_EXPOSURE_WINDOW: - case ATOMISP_IOC_S_ACC_STATE: - case ATOMISP_IOC_G_ACC_STATE: - case ATOMISP_IOC_INJECT_A_FAKE_EVENT: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_INVALID_FRAME_NUM: - case ATOMISP_IOC_S_ARRAY_RESOLUTION: -#ifdef ISP2401 - case ATOMISP_IOC_S_SENSOR_RUNMODE: - case ATOMISP_IOC_G_UPDATE_EXPOSURE: -#endif - ret = native_ioctl(file, cmd, arg); - break; - - case ATOMISP_IOC_G_HISTOGRAM32: - case ATOMISP_IOC_S_HISTOGRAM32: - case ATOMISP_IOC_G_DIS_STAT32: - case ATOMISP_IOC_S_DIS_COEFS32: - case ATOMISP_IOC_S_DIS_VECTOR32: - case ATOMISP_IOC_G_3A_STAT32: - case ATOMISP_IOC_G_ISP_GDC_TAB32: - case ATOMISP_IOC_S_ISP_GDC_TAB32: - case ATOMISP_IOC_S_ISP_FPN_TABLE32: - case ATOMISP_IOC_G_ISP_OVERLAY32: - case ATOMISP_IOC_S_ISP_OVERLAY32: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32: - case ATOMISP_IOC_ACC_LOAD32: - case ATOMISP_IOC_ACC_S_ARG32: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32: - case ATOMISP_IOC_S_ISP_SHD_TAB32: - case ATOMISP_IOC_ACC_DESTAB32: - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32: - case ATOMISP_IOC_ACC_MAP32: - case ATOMISP_IOC_ACC_UNMAP32: - case ATOMISP_IOC_ACC_S_MAPPED_ARG32: - case ATOMISP_IOC_S_PARAMETERS32: - case ATOMISP_IOC_ACC_LOAD_TO_PIPE32: - case ATOMISP_IOC_G_METADATA32: - case ATOMISP_IOC_G_METADATA_BY_TYPE32: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32: - ret = atomisp_do_compat_ioctl(file, cmd, arg); - break; - - default: - dev_warn(isp->dev, - "%s: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n", - __func__, _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), - cmd); - break; - } - return ret; -} -#endif /* CONFIG_COMPAT */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h deleted file mode 100644 index 95669eedaad1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __ATOMISP_COMPAT_IOCTL32_H__ -#define __ATOMISP_COMPAT_IOCTL32_H__ - -#include -#include - -#include "atomisp_compat.h" - -struct atomisp_histogram32 { - unsigned int num_elements; - compat_uptr_t data; -}; - -struct atomisp_dvs2_stat_types32 { - compat_uptr_t odd_real; /** real part of the odd statistics*/ - compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/ - compat_uptr_t even_real;/** real part of the even statistics*/ - compat_uptr_t even_imag;/** imaginary part of the even statistics*/ -}; - -struct atomisp_dvs2_coef_types32 { - compat_uptr_t odd_real; /** real part of the odd coefficients*/ - compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/ - compat_uptr_t even_real;/** real part of the even coefficients*/ - compat_uptr_t even_imag;/** imaginary part of the even coefficients*/ -}; - -struct atomisp_dvs2_statistics32 { - struct atomisp_dvs_grid_info grid_info; - struct atomisp_dvs2_stat_types32 hor_prod; - struct atomisp_dvs2_stat_types32 ver_prod; -}; - -struct atomisp_dis_statistics32 { - struct atomisp_dvs2_statistics32 dvs2_stat; - uint32_t exp_id; -}; - -struct atomisp_dis_coefficients32 { - struct atomisp_dvs_grid_info grid_info; - struct atomisp_dvs2_coef_types32 hor_coefs; - struct atomisp_dvs2_coef_types32 ver_coefs; -}; - -struct atomisp_3a_statistics32 { - struct atomisp_grid_info grid_info; - compat_uptr_t data; - compat_uptr_t rgby_data; - uint32_t exp_id; - uint32_t isp_config_id; -}; - -struct atomisp_metadata_with_type32 { - /* to specify which type of metadata to get */ - enum atomisp_metadata_type type; - compat_uptr_t data; - uint32_t width; - uint32_t height; - uint32_t stride; /* in bytes */ - uint32_t exp_id; /* exposure ID */ - compat_uptr_t effective_width; -}; - -struct atomisp_metadata32 { - compat_uptr_t data; - uint32_t width; - uint32_t height; - uint32_t stride; - uint32_t exp_id; - compat_uptr_t effective_width; -}; - -struct atomisp_morph_table32 { - unsigned int enabled; - unsigned int height; - unsigned int width; /* number of valid elements per line */ - compat_uptr_t coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES]; - compat_uptr_t coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES]; -}; - -struct v4l2_framebuffer32 { - __u32 capability; - __u32 flags; - compat_uptr_t base; - struct v4l2_pix_format fmt; -}; - -struct atomisp_overlay32 { - /* the frame containing the overlay data The overlay frame width should - * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height - * should be the multiples of 2. - */ - compat_uptr_t frame; - /* Y value of overlay background */ - unsigned char bg_y; - /* U value of overlay background */ - char bg_u; - /* V value of overlay background */ - char bg_v; - /* the blending percent of input data for Y subpixels */ - unsigned char blend_input_perc_y; - /* the blending percent of input data for U subpixels */ - unsigned char blend_input_perc_u; - /* the blending percent of input data for V subpixels */ - unsigned char blend_input_perc_v; - /* the blending percent of overlay data for Y subpixels */ - unsigned char blend_overlay_perc_y; - /* the blending percent of overlay data for U subpixels */ - unsigned char blend_overlay_perc_u; - /* the blending percent of overlay data for V subpixels */ - unsigned char blend_overlay_perc_v; - /* the overlay start x pixel position on output frame It should be the - multiples of 2*ISP_VEC_NELEMS. */ - unsigned int overlay_start_x; - /* the overlay start y pixel position on output frame It should be the - multiples of 2. */ - unsigned int overlay_start_y; -}; - -struct atomisp_calibration_group32 { - unsigned int size; - unsigned int type; - compat_uptr_t calb_grp_values; -}; - -struct atomisp_acc_fw_load32 { - unsigned int size; - unsigned int fw_handle; - compat_uptr_t data; -}; - -struct atomisp_acc_fw_arg32 { - unsigned int fw_handle; - unsigned int index; - compat_uptr_t value; - compat_size_t size; -}; - -struct v4l2_private_int_data32 { - __u32 size; - compat_uptr_t data; - __u32 reserved[2]; -}; - -struct atomisp_shading_table32 { - __u32 enable; - __u32 sensor_width; - __u32 sensor_height; - __u32 width; - __u32 height; - __u32 fraction_bits; - - compat_uptr_t data[ATOMISP_NUM_SC_COLORS]; -}; - -struct atomisp_acc_map32 { - __u32 flags; /* Flags, see list below */ - __u32 length; /* Length of data in bytes */ - compat_uptr_t user_ptr; /* Pointer into user space */ - compat_ulong_t css_ptr; /* Pointer into CSS address space */ - __u32 reserved[4]; /* Set to zero */ -}; - -struct atomisp_acc_s_mapped_arg32 { - unsigned int fw_handle; - __u32 memory; /* one of enum atomisp_acc_memory */ - compat_size_t length; - compat_ulong_t css_ptr; -}; - -struct atomisp_parameters32 { - compat_uptr_t wb_config; /* White Balance config */ - compat_uptr_t cc_config; /* Color Correction config */ - compat_uptr_t tnr_config; /* Temporal Noise Reduction */ - compat_uptr_t ecd_config; /* Eigen Color Demosaicing */ - compat_uptr_t ynr_config; /* Y(Luma) Noise Reduction */ - compat_uptr_t fc_config; /* Fringe Control */ - compat_uptr_t formats_config; /* Formats Control */ - compat_uptr_t cnr_config; /* Chroma Noise Reduction */ - compat_uptr_t macc_config; /* MACC */ - compat_uptr_t ctc_config; /* Chroma Tone Control */ - compat_uptr_t aa_config; /* Anti-Aliasing */ - compat_uptr_t baa_config; /* Anti-Aliasing */ - compat_uptr_t ce_config; - compat_uptr_t dvs_6axis_config; - compat_uptr_t ob_config; /* Objective Black config */ - compat_uptr_t dp_config; /* Dead Pixel config */ - compat_uptr_t nr_config; /* Noise Reduction config */ - compat_uptr_t ee_config; /* Edge Enhancement config */ - compat_uptr_t de_config; /* Demosaic config */ - compat_uptr_t gc_config; /* Gamma Correction config */ - compat_uptr_t anr_config; /* Advanced Noise Reduction */ - compat_uptr_t a3a_config; /* 3A Statistics config */ - compat_uptr_t xnr_config; /* eXtra Noise Reduction */ - compat_uptr_t dz_config; /* Digital Zoom */ - compat_uptr_t yuv2rgb_cc_config; /* Color - Correction config */ - compat_uptr_t rgb2yuv_cc_config; /* Color - Correction config */ - compat_uptr_t macc_table; - compat_uptr_t gamma_table; - compat_uptr_t ctc_table; - compat_uptr_t xnr_table; - compat_uptr_t r_gamma_table; - compat_uptr_t g_gamma_table; - compat_uptr_t b_gamma_table; - compat_uptr_t motion_vector; /* For 2-axis DVS */ - compat_uptr_t shading_table; - compat_uptr_t morph_table; - compat_uptr_t dvs_coefs; /* DVS 1.0 coefficients */ - compat_uptr_t dvs2_coefs; /* DVS 2.0 coefficients */ - compat_uptr_t capture_config; - compat_uptr_t anr_thres; - - compat_uptr_t lin_2500_config; /* Skylake: Linearization config */ - compat_uptr_t obgrid_2500_config; /* Skylake: OBGRID config */ - compat_uptr_t bnr_2500_config; /* Skylake: bayer denoise config */ - compat_uptr_t shd_2500_config; /* Skylake: shading config */ - compat_uptr_t dm_2500_config; /* Skylake: demosaic config */ - compat_uptr_t rgbpp_2500_config; /* Skylake: RGBPP config */ - compat_uptr_t dvs_stat_2500_config; /* Skylake: DVS STAT config */ - compat_uptr_t lace_stat_2500_config; /* Skylake: LACE STAT config */ - compat_uptr_t yuvp1_2500_config; /* Skylake: yuvp1 config */ - compat_uptr_t yuvp2_2500_config; /* Skylake: yuvp2 config */ - compat_uptr_t tnr_2500_config; /* Skylake: TNR config */ - compat_uptr_t dpc_2500_config; /* Skylake: DPC config */ - compat_uptr_t awb_2500_config; /* Skylake: auto white balance config */ - compat_uptr_t awb_fr_2500_config; /* Skylake: auto white balance filter response config */ - compat_uptr_t anr_2500_config; /* Skylake: ANR config */ - compat_uptr_t af_2500_config; /* Skylake: auto focus config */ - compat_uptr_t ae_2500_config; /* Skylake: auto exposure config */ - compat_uptr_t bds_2500_config; /* Skylake: bayer downscaler config */ - compat_uptr_t dvs_2500_config; /* Skylake: digital video stabilization config */ - compat_uptr_t res_mgr_2500_config; - - /* - * Output frame pointer the config is to be applied to (optional), - * set to NULL to make this config is applied as global. - */ - compat_uptr_t output_frame; - /* - * Unique ID to track which config was actually applied to a particular - * frame, driver will send this id back with output frame together. - */ - uint32_t isp_config_id; - uint32_t per_frame_setting; -}; - -struct atomisp_acc_fw_load_to_pipe32 { - __u32 flags; /* Flags, see below for valid values */ - unsigned int fw_handle; /* Handle, filled by kernel. */ - __u32 size; /* Firmware binary size */ - compat_uptr_t data; /* Pointer to firmware */ - __u32 type; /* Binary type */ - __u32 reserved[3]; /* Set to zero */ -}; - -struct atomisp_dvs_6axis_config32 { - uint32_t exp_id; - uint32_t width_y; - uint32_t height_y; - uint32_t width_uv; - uint32_t height_uv; - compat_uptr_t xcoords_y; - compat_uptr_t ycoords_y; - compat_uptr_t xcoords_uv; - compat_uptr_t ycoords_uv; -}; - -struct atomisp_sensor_ae_bracketing_lut32 { - compat_uptr_t lut; - unsigned int lut_size; -}; - -#define ATOMISP_IOC_G_HISTOGRAM32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32) -#define ATOMISP_IOC_S_HISTOGRAM32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32) - -#define ATOMISP_IOC_G_DIS_STAT32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics32) -#define ATOMISP_IOC_S_DIS_COEFS32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients32) - -#define ATOMISP_IOC_S_DIS_VECTOR32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config32) - -#define ATOMISP_IOC_G_3A_STAT32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics32) - -#define ATOMISP_IOC_G_ISP_GDC_TAB32 \ - _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32) -#define ATOMISP_IOC_S_ISP_GDC_TAB32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32) - -#define ATOMISP_IOC_S_ISP_FPN_TABLE32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer32) - -#define ATOMISP_IOC_G_ISP_OVERLAY32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32) -#define ATOMISP_IOC_S_ISP_OVERLAY32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32) - -#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group32) - -#define ATOMISP_IOC_ACC_LOAD32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load32) - -#define ATOMISP_IOC_ACC_S_ARG32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg32) - -#define ATOMISP_IOC_ACC_DESTAB32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg32) - -#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data32) - -#define ATOMISP_IOC_S_ISP_SHD_TAB32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table32) - -#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data32) - -#define ATOMISP_IOC_ACC_MAP32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32) - -#define ATOMISP_IOC_ACC_UNMAP32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32) - -#define ATOMISP_IOC_ACC_S_MAPPED_ARG32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg32) - -#define ATOMISP_IOC_ACC_LOAD_TO_PIPE32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe32) - -#define ATOMISP_IOC_S_PARAMETERS32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32) - -#define ATOMISP_IOC_G_METADATA32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata32) - -#define ATOMISP_IOC_G_METADATA_BY_TYPE32 \ - _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type32) - -#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32 \ - _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut32) - -#endif /* __ATOMISP_COMPAT_IOCTL32_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c deleted file mode 100644 index fa03b78c3580..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include -#include "atomisp_cmd.h" -#include "atomisp_internal.h" -#include "atomisp-regs.h" - -static struct v4l2_mbus_framefmt *__csi2_get_format(struct - atomisp_mipi_csi2_device - *csi2, - struct - v4l2_subdev_pad_config *cfg, - enum - v4l2_subdev_format_whence - which, unsigned int pad) -{ - if (which == V4L2_SUBDEV_FORMAT_TRY) - return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad); - else - return &csi2->formats[pad]; -} - -/* - * csi2_enum_mbus_code - Handle pixel format enumeration - * @sd : pointer to v4l2 subdev structure - * @fh : V4L2 subdev file handle - * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure - * return -EINVAL or zero on success -*/ -static int csi2_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv; - unsigned int i = 0; - - while (ic->code) { - if (i == code->index) { - code->code = ic->code; - return 0; - } - i++, ic++; - } - - return -EINVAL; -} - -/* - * csi2_get_format - Handle get format by pads subdev method - * @sd : pointer to v4l2 subdev structure - * @fh : V4L2 subdev file handle - * @pad: pad num - * @fmt: pointer to v4l2 format structure - * return -EINVAL or zero on sucess -*/ -static int csi2_get_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) -{ - struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd); - struct v4l2_mbus_framefmt *format; - - format = __csi2_get_format(csi2, cfg, fmt->which, fmt->pad); - - fmt->format = *format; - - return 0; -} - -int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - unsigned int which, uint16_t pad, - struct v4l2_mbus_framefmt *ffmt) -{ - struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd); - struct v4l2_mbus_framefmt *actual_ffmt = -#ifndef ISP2401 - __csi2_get_format(csi2, cfg, which, pad); -#else - __csi2_get_format(csi2, cfg, which, pad); -#endif - - if (pad == CSI2_PAD_SINK) { - const struct atomisp_in_fmt_conv *ic; - struct v4l2_mbus_framefmt tmp_ffmt; - - ic = atomisp_find_in_fmt_conv(ffmt->code); - if (ic) - actual_ffmt->code = ic->code; - else - actual_ffmt->code = atomisp_in_fmt_conv[0].code; - - actual_ffmt->width = clamp_t( - u32, ffmt->width, ATOM_ISP_MIN_WIDTH, - ATOM_ISP_MAX_WIDTH); - actual_ffmt->height = clamp_t( - u32, ffmt->height, ATOM_ISP_MIN_HEIGHT, - ATOM_ISP_MAX_HEIGHT); - - tmp_ffmt = *ffmt = *actual_ffmt; - - return atomisp_csi2_set_ffmt(sd, cfg, which, CSI2_PAD_SOURCE, - &tmp_ffmt); - } - - /* FIXME: DPCM decompression */ - *actual_ffmt = *ffmt = -#ifndef ISP2401 - *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK); -#else - *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK); -#endif - - return 0; -} - -/* - * csi2_set_format - Handle set format by pads subdev method - * @sd : pointer to v4l2 subdev structure - * @fh : V4L2 subdev file handle - * @pad: pad num - * @fmt: pointer to v4l2 format structure - * return -EINVAL or zero on success -*/ -static int csi2_set_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) -{ - return atomisp_csi2_set_ffmt(sd, cfg, fmt->which, fmt->pad, - &fmt->format); -} - -/* - * csi2_set_stream - Enable/Disable streaming on the CSI2 module - * @sd: ISP CSI2 V4L2 subdevice - * @enable: Enable/disable stream (1/0) - * - * Return 0 on success or a negative error code otherwise. -*/ -static int csi2_set_stream(struct v4l2_subdev *sd, int enable) -{ - return 0; -} - -/* subdev core operations */ -static const struct v4l2_subdev_core_ops csi2_core_ops = { -}; - -/* subdev video operations */ -static const struct v4l2_subdev_video_ops csi2_video_ops = { - .s_stream = csi2_set_stream, -}; - -/* subdev pad operations */ -static const struct v4l2_subdev_pad_ops csi2_pad_ops = { - .enum_mbus_code = csi2_enum_mbus_code, - .get_fmt = csi2_get_format, - .set_fmt = csi2_set_format, - .link_validate = v4l2_subdev_link_validate_default, -}; - -/* subdev operations */ -static const struct v4l2_subdev_ops csi2_ops = { - .core = &csi2_core_ops, - .video = &csi2_video_ops, - .pad = &csi2_pad_ops, -}; - -#ifndef ISP2401 - -#endif -/* - * csi2_link_setup - Setup CSI2 connections. - * @entity : Pointer to media entity structure - * @local : Pointer to local pad array - * @remote : Pointer to remote pad array - * @flags : Link flags - * return -EINVAL or zero on success -*/ -static int csi2_link_setup(struct media_entity *entity, - const struct media_pad *local, - const struct media_pad *remote, u32 flags) -{ - struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); - struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd); - u32 result = local->index | is_media_entity_v4l2_subdev(remote->entity); - - switch (result) { - case CSI2_PAD_SOURCE | MEDIA_ENT_F_OLD_BASE: - /* not supported yet */ - return -EINVAL; - - case CSI2_PAD_SOURCE | MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN: - if (flags & MEDIA_LNK_FL_ENABLED) { - if (csi2->output & ~CSI2_OUTPUT_ISP_SUBDEV) - return -EBUSY; - csi2->output |= CSI2_OUTPUT_ISP_SUBDEV; - } else { - csi2->output &= ~CSI2_OUTPUT_ISP_SUBDEV; - } - break; - - default: - /* Link from camera to CSI2 is fixed... */ - return -EINVAL; - } - return 0; -} - -/* media operations */ -static const struct media_entity_operations csi2_media_ops = { - .link_setup = csi2_link_setup, - .link_validate = v4l2_subdev_link_validate, -}; - -/* -* ispcsi2_init_entities - Initialize subdev and media entity. -* @csi2: Pointer to ispcsi2 structure. -* return -ENOMEM or zero on success -*/ -static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2, - int port) -{ - struct v4l2_subdev *sd = &csi2->subdev; - struct media_pad *pads = csi2->pads; - struct media_entity *me = &sd->entity; - int ret; - - v4l2_subdev_init(sd, &csi2_ops); - snprintf(sd->name, sizeof(sd->name), "ATOM ISP CSI2-port%d", port); - - v4l2_set_subdevdata(sd, csi2); - sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - - pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; - pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK; - - me->ops = &csi2_media_ops; - me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; - ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads); - if (ret < 0) - return ret; - - csi2->formats[CSI2_PAD_SINK].code = - csi2->formats[CSI2_PAD_SOURCE].code = - atomisp_in_fmt_conv[0].code; - - return 0; -} - -void -atomisp_mipi_csi2_unregister_entities(struct atomisp_mipi_csi2_device *csi2) -{ - media_entity_cleanup(&csi2->subdev.entity); - v4l2_device_unregister_subdev(&csi2->subdev); -} - -int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2, - struct v4l2_device *vdev) -{ - int ret; - - /* Register the subdev and video nodes. */ - ret = v4l2_device_register_subdev(vdev, &csi2->subdev); - if (ret < 0) - goto error; - - return 0; - -error: - atomisp_mipi_csi2_unregister_entities(csi2); - return ret; -} - -static const int LIMIT_SHIFT = 6; /* Limit numeric range into 31 bits */ - -static int -atomisp_csi2_configure_calc(const short int coeffs[2], int mipi_freq, int def) -{ - /* Delay counter accuracy, 1/0.0625 for ANN/CHT, 1/0.125 for BXT */ - static const int accinv = 16; /* 1 / COUNT_ACC */ - int r; - - if (mipi_freq >> LIMIT_SHIFT <= 0) - return def; - - r = accinv * coeffs[1] * (500000000 >> LIMIT_SHIFT); - r /= mipi_freq >> LIMIT_SHIFT; - r += accinv * coeffs[0]; - - return r; -} - -static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd) -{ - /* - * The ISP2401 new input system CSI2+ receiver has several - * parameters affecting the receiver timings. These depend - * on the MIPI bus frequency F in Hz (sensor transmitter rate) - * as follows: - * register value = (A/1e9 + B * UI) / COUNT_ACC - * where - * UI = 1 / (2 * F) in seconds - * COUNT_ACC = counter accuracy in seconds - * For ANN and CHV, COUNT_ACC = 0.0625 ns - * For BXT, COUNT_ACC = 0.125 ns - * A and B are coefficients from the table below, - * depending whether the register minimum or maximum value is - * calculated. - * Minimum Maximum - * Clock lane A B A B - * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 - * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 - * Data lanes - * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 - * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 - * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 - * - * We use the minimum values in the calculations below. - */ - static const short int coeff_clk_termen[] = { 0, 0 }; - static const short int coeff_clk_settle[] = { 95, -8 }; - static const short int coeff_dat_termen[] = { 0, 0 }; - static const short int coeff_dat_settle[] = { 85, -2 }; - static const int TERMEN_DEFAULT = 0 * 0; - static const int SETTLE_DEFAULT = 0x480; - static const hrt_address csi2_port_base[] = { - [ATOMISP_CAMERA_PORT_PRIMARY] = CSI2_PORT_A_BASE, - [ATOMISP_CAMERA_PORT_SECONDARY] = CSI2_PORT_B_BASE, - [ATOMISP_CAMERA_PORT_TERTIARY] = CSI2_PORT_C_BASE, - }; - /* Number of lanes on each port, excluding clock lane */ - static const unsigned char csi2_port_lanes[] = { - [ATOMISP_CAMERA_PORT_PRIMARY] = 4, - [ATOMISP_CAMERA_PORT_SECONDARY] = 2, - [ATOMISP_CAMERA_PORT_TERTIARY] = 2, - }; - static const hrt_address csi2_lane_base[] = { - CSI2_LANE_CL_BASE, - CSI2_LANE_D0_BASE, - CSI2_LANE_D1_BASE, - CSI2_LANE_D2_BASE, - CSI2_LANE_D3_BASE, - }; - - int clk_termen; - int clk_settle; - int dat_termen; - int dat_settle; - - struct v4l2_control ctrl; - struct atomisp_device *isp = asd->isp; - struct camera_mipi_info *mipi_info; - int mipi_freq = 0; - enum atomisp_camera_port port; - - int n; - - mipi_info = atomisp_to_sensor_mipi_info( - isp->inputs[asd->input_curr].camera); - port = mipi_info->port; - - ctrl.id = V4L2_CID_LINK_FREQ; - if (v4l2_g_ctrl - (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0) - mipi_freq = ctrl.value; - - clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen, - mipi_freq, TERMEN_DEFAULT); - clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle, - mipi_freq, SETTLE_DEFAULT); - dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen, - mipi_freq, TERMEN_DEFAULT); - dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle, - mipi_freq, SETTLE_DEFAULT); - for (n = 0; n < csi2_port_lanes[port] + 1; n++) { - hrt_address base = csi2_port_base[port] + csi2_lane_base[n]; - atomisp_store_uint32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN, - n == 0 ? clk_termen : dat_termen); - atomisp_store_uint32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE, - n == 0 ? clk_settle : dat_settle); - } -} - -void atomisp_csi2_configure(struct atomisp_sub_device *asd) -{ - if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) - atomisp_csi2_configure_isp2401(asd); -} - -/* - * atomisp_mipi_csi2_cleanup - Routine for module driver cleanup -*/ -void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp) -{ -} - -#ifndef ISP2401 - -#endif -int atomisp_mipi_csi2_init(struct atomisp_device *isp) -{ - struct atomisp_mipi_csi2_device *csi2_port; - unsigned int i; - int ret; - - for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) { - csi2_port = &isp->csi2_port[i]; - csi2_port->isp = isp; - ret = mipi_csi2_init_entities(csi2_port, i); - if (ret < 0) - goto fail; - } - - return 0; - -fail: - atomisp_mipi_csi2_cleanup(isp); - return ret; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h deleted file mode 100644 index 0191d28a55bc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __ATOMISP_CSI2_H__ -#define __ATOMISP_CSI2_H__ - -#include -#include - -#define CSI2_PAD_SINK 0 -#define CSI2_PAD_SOURCE 1 -#define CSI2_PADS_NUM 2 - -#define CSI2_OUTPUT_ISP_SUBDEV (1 << 0) -#define CSI2_OUTPUT_MEMORY (1 << 1) - -struct atomisp_device; -struct v4l2_device; -struct atomisp_sub_device; - -struct atomisp_mipi_csi2_device { - struct v4l2_subdev subdev; - struct media_pad pads[CSI2_PADS_NUM]; - struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM]; - - struct v4l2_ctrl_handler ctrls; - struct atomisp_device *isp; - - u32 output; /* output direction */ -}; - -int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, - unsigned int which, uint16_t pad, - struct v4l2_mbus_framefmt *ffmt); -int atomisp_mipi_csi2_init(struct atomisp_device *isp); -void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp); -void atomisp_mipi_csi2_unregister_entities( - struct atomisp_mipi_csi2_device *csi2); -int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2, - struct v4l2_device *vdev); - -void atomisp_csi2_configure(struct atomisp_sub_device *asd); - -#endif /* __ATOMISP_CSI2_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h deleted file mode 100644 index 54e28605b5de..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __ATOMISP_DFS_TABLES_H__ -#define __ATOMISP_DFS_TABLES_H__ - -#include - -struct atomisp_freq_scaling_rule { - unsigned int width; - unsigned int height; - unsigned short fps; - unsigned int isp_freq; - unsigned int run_mode; -}; - - -struct atomisp_dfs_config { - unsigned int lowest_freq; - unsigned int max_freq_at_vmin; - unsigned int highest_freq; - const struct atomisp_freq_scaling_rule *dfs_table; - unsigned int dfs_table_size; -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = { - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_457MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -/* Merrifield and Moorefield DFS rules */ -static const struct atomisp_dfs_config dfs_config_merr = { - .lowest_freq = ISP_FREQ_200MHZ, - .max_freq_at_vmin = ISP_FREQ_400MHZ, - .highest_freq = ISP_FREQ_457MHZ, - .dfs_table = dfs_rules_merr, - .dfs_table_size = ARRAY_SIZE(dfs_rules_merr), -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_merr_1179[] = { - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -static const struct atomisp_dfs_config dfs_config_merr_1179 = { - .lowest_freq = ISP_FREQ_200MHZ, - .max_freq_at_vmin = ISP_FREQ_400MHZ, - .highest_freq = ISP_FREQ_400MHZ, - .dfs_table = dfs_rules_merr_1179, - .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_1179), -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_merr_117a[] = { - { - .width = 1920, - .height = 1080, - .fps = 30, - .isp_freq = ISP_FREQ_266MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = 1080, - .height = 1920, - .fps = 30, -#ifndef ISP2401 - .isp_freq = ISP_FREQ_266MHZ, -#else - .isp_freq = ISP_FREQ_400MHZ, -#endif - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = 1920, - .height = 1080, - .fps = 45, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = 1080, - .height = 1920, - .fps = 45, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = 60, - .isp_freq = ISP_FREQ_356MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_200MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_200MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -static const struct atomisp_dfs_config dfs_config_merr_117a = { - .lowest_freq = ISP_FREQ_200MHZ, - .max_freq_at_vmin = ISP_FREQ_200MHZ, - .highest_freq = ISP_FREQ_400MHZ, - .dfs_table = dfs_rules_merr_117a, - .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_117a), -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_byt[] = { - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_400MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -static const struct atomisp_dfs_config dfs_config_byt = { - .lowest_freq = ISP_FREQ_200MHZ, - .max_freq_at_vmin = ISP_FREQ_400MHZ, - .highest_freq = ISP_FREQ_400MHZ, - .dfs_table = dfs_rules_byt, - .dfs_table_size = ARRAY_SIZE(dfs_rules_byt), -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_byt_cr[] = { - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -static const struct atomisp_dfs_config dfs_config_byt_cr = { - .lowest_freq = ISP_FREQ_200MHZ, - .max_freq_at_vmin = ISP_FREQ_320MHZ, - .highest_freq = ISP_FREQ_320MHZ, - .dfs_table = dfs_rules_byt_cr, - .dfs_table_size = ARRAY_SIZE(dfs_rules_byt_cr), -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_cht[] = { - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_356MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = 1280, - .height = 720, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_356MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = { - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_356MHZ, - .run_mode = ATOMISP_RUN_MODE_VIDEO, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_356MHZ, - .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_320MHZ, - .run_mode = ATOMISP_RUN_MODE_PREVIEW, - }, - { - .width = ISP_FREQ_RULE_ANY, - .height = ISP_FREQ_RULE_ANY, - .fps = ISP_FREQ_RULE_ANY, - .isp_freq = ISP_FREQ_356MHZ, - .run_mode = ATOMISP_RUN_MODE_SDV, - }, -}; - -static const struct atomisp_dfs_config dfs_config_cht = { - .lowest_freq = ISP_FREQ_100MHZ, - .max_freq_at_vmin = ISP_FREQ_356MHZ, - .highest_freq = ISP_FREQ_356MHZ, - .dfs_table = dfs_rules_cht, - .dfs_table_size = ARRAY_SIZE(dfs_rules_cht), -}; - -static const struct atomisp_dfs_config dfs_config_cht_soc = { - .lowest_freq = ISP_FREQ_100MHZ, - .max_freq_at_vmin = ISP_FREQ_356MHZ, - .highest_freq = ISP_FREQ_356MHZ, - .dfs_table = dfs_rules_cht_soc, - .dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc), -}; - -#endif /* __ATOMISP_DFS_TABLES_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c deleted file mode 100644 index a815c768bda9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Support for atomisp driver sysfs interface - * - * Copyright (c) 2014 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include -#include - -#include "atomisp_compat.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" -#include "atomisp_drvfs.h" -#include "hmm/hmm.h" - -/* - * _iunit_debug: - * dbglvl: iunit css driver trace level - * dbgopt: iunit debug option: - * bit 0: binary list - * bit 1: running binary - * bit 2: memory statistic -*/ -struct _iunit_debug { - struct device_driver *drv; - struct atomisp_device *isp; - unsigned int dbglvl; - unsigned int dbgfun; - unsigned int dbgopt; -}; - -#define OPTION_BIN_LIST (1<<0) -#define OPTION_BIN_RUN (1<<1) -#define OPTION_MEM_STAT (1<<2) -#define OPTION_VALID (OPTION_BIN_LIST \ - | OPTION_BIN_RUN \ - | OPTION_MEM_STAT) - -static struct _iunit_debug iunit_debug = { - .dbglvl = 0, - .dbgopt = OPTION_BIN_LIST, -}; - -static inline int iunit_dump_dbgopt(struct atomisp_device *isp, - unsigned int opt) -{ - int ret = 0; - - if (opt & OPTION_VALID) { - if (opt & OPTION_BIN_LIST) { - ret = atomisp_css_dump_blob_infor(); - if (ret) { - dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n", - __func__, ret); - goto opt_err; - } - } - - if (opt & OPTION_BIN_RUN) { - if (atomisp_streaming_count(isp)) { - atomisp_css_dump_sp_raw_copy_linecount(true); - atomisp_css_debug_dump_isp_binary(); - } else { - ret = -EPERM; - dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n", - __func__, ret); - goto opt_err; - } - } - - if (opt & OPTION_MEM_STAT) - hmm_show_mem_stat(__func__, __LINE__); - } else { - ret = -EINVAL; - dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__, - ret); - } - -opt_err: - return ret; -} - -static ssize_t iunit_dbglvl_show(struct device_driver *drv, char *buf) -{ - iunit_debug.dbglvl = atomisp_css_debug_get_dtrace_level(); - return sprintf(buf, "dtrace level:%u\n", iunit_debug.dbglvl); -} - -static ssize_t iunit_dbglvl_store(struct device_driver *drv, const char *buf, - size_t size) -{ - if (kstrtouint(buf, 10, &iunit_debug.dbglvl) - || iunit_debug.dbglvl < 1 - || iunit_debug.dbglvl > 9) { - return -ERANGE; - } - atomisp_css_debug_set_dtrace_level(iunit_debug.dbglvl); - - return size; -} - -static ssize_t iunit_dbgfun_show(struct device_driver *drv, char *buf) -{ - iunit_debug.dbgfun = atomisp_get_css_dbgfunc(); - return sprintf(buf, "dbgfun opt:%u\n", iunit_debug.dbgfun); -} - -static ssize_t iunit_dbgfun_store(struct device_driver *drv, const char *buf, - size_t size) -{ - unsigned int opt; - int ret; - - ret = kstrtouint(buf, 10, &opt); - if (ret) - return ret; - - ret = atomisp_set_css_dbgfunc(iunit_debug.isp, opt); - if (ret) - return ret; - - iunit_debug.dbgfun = opt; - - return size; -} - -static ssize_t iunit_dbgopt_show(struct device_driver *drv, char *buf) -{ - return sprintf(buf, "option:0x%x\n", iunit_debug.dbgopt); -} - -static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf, - size_t size) -{ - unsigned int opt; - int ret; - - ret = kstrtouint(buf, 10, &opt); - if (ret) - return ret; - - iunit_debug.dbgopt = opt; - ret = iunit_dump_dbgopt(iunit_debug.isp, iunit_debug.dbgopt); - if (ret) - return ret; - - return size; -} - -static const struct driver_attribute iunit_drvfs_attrs[] = { - __ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store), - __ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store), - __ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store), -}; - -static int iunit_drvfs_create_files(struct device_driver *drv) -{ - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++) - ret |= driver_create_file(drv, &iunit_drvfs_attrs[i]); - - return ret; -} - -static void iunit_drvfs_remove_files(struct device_driver *drv) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++) - driver_remove_file(drv, &iunit_drvfs_attrs[i]); -} - -int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp) -{ - int ret; - - iunit_debug.isp = isp; - iunit_debug.drv = drv; - - ret = iunit_drvfs_create_files(iunit_debug.drv); - if (ret) { - dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret); - iunit_drvfs_remove_files(iunit_debug.drv); - } - - return ret; -} - -void atomisp_drvfs_exit(void) -{ - iunit_drvfs_remove_files(iunit_debug.drv); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h deleted file mode 100644 index 7c99240d107a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Support for atomisp driver sysfs interface. - * - * Copyright (c) 2014 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_DRVFS_H__ -#define __ATOMISP_DRVFS_H__ - -int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp); -void atomisp_drvfs_exit(void); - -#endif /* __ATOMISP_DRVFS_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c deleted file mode 100644 index c6d96987561d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include - -#include -#include - -#include "ia_css.h" - -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_file.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" - -static void file_work(struct work_struct *work) -{ - struct atomisp_file_device *file_dev = - container_of(work, struct atomisp_file_device, work); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - struct atomisp_video_pipe *out_pipe = &asd->video_in; - unsigned short *buf = videobuf_to_vmalloc(out_pipe->outq.bufs[0]); - struct v4l2_mbus_framefmt isp_sink_fmt; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - - dev_dbg(isp->dev, ">%s: ready to start streaming\n", __func__); - isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - while (!atomisp_css_isp_has_started()) - usleep_range(1000, 1500); - - atomisp_css_send_input_frame(asd, buf, isp_sink_fmt.width, - isp_sink_fmt.height); - dev_dbg(isp->dev, "<%s: streaming done\n", __func__); -} - -static int file_input_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - - dev_dbg(isp->dev, "%s: enable %d\n", __func__, enable); - if (enable) { - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return 0; - - queue_work(file_dev->work_queue, &file_dev->work); - return 0; - } - cancel_work_sync(&file_dev->work); - return 0; -} - -static int file_input_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - struct v4l2_mbus_framefmt *isp_sink_fmt; - if (format->pad) - return -EINVAL; - isp_sink_fmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - fmt->width = isp_sink_fmt->width; - fmt->height = isp_sink_fmt->height; - fmt->code = isp_sink_fmt->code; - - return 0; -} - -static int file_input_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - if (format->pad) - return -EINVAL; - file_input_get_fmt(sd, cfg, format); - if (format->which == V4L2_SUBDEV_FORMAT_TRY) - cfg->try_fmt = *fmt; - return 0; -} - -static int file_input_log_status(struct v4l2_subdev *sd) -{ - /*to fake*/ - return 0; -} - -static int file_input_s_power(struct v4l2_subdev *sd, int on) -{ - /* to fake */ - return 0; -} - -static int file_input_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - /*to fake*/ - return 0; -} - -static int file_input_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - /*to fake*/ - return 0; -} - -static int file_input_enum_frame_ival(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_interval_enum - *fie) -{ - /*to fake*/ - return 0; -} - -static const struct v4l2_subdev_video_ops file_input_video_ops = { - .s_stream = file_input_s_stream, -}; - -static const struct v4l2_subdev_core_ops file_input_core_ops = { - .log_status = file_input_log_status, - .s_power = file_input_s_power, -}; - -static const struct v4l2_subdev_pad_ops file_input_pad_ops = { - .enum_mbus_code = file_input_enum_mbus_code, - .enum_frame_size = file_input_enum_frame_size, - .enum_frame_interval = file_input_enum_frame_ival, - .get_fmt = file_input_get_fmt, - .set_fmt = file_input_set_fmt, -}; - -static const struct v4l2_subdev_ops file_input_ops = { - .core = &file_input_core_ops, - .video = &file_input_video_ops, - .pad = &file_input_pad_ops, -}; - -void -atomisp_file_input_unregister_entities(struct atomisp_file_device *file_dev) -{ - media_entity_cleanup(&file_dev->sd.entity); - v4l2_device_unregister_subdev(&file_dev->sd); -} - -int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev, - struct v4l2_device *vdev) -{ - /* Register the subdev and video nodes. */ - return v4l2_device_register_subdev(vdev, &file_dev->sd); -} - -void atomisp_file_input_cleanup(struct atomisp_device *isp) -{ - struct atomisp_file_device *file_dev = &isp->file_dev; - - if (file_dev->work_queue) { - destroy_workqueue(file_dev->work_queue); - file_dev->work_queue = NULL; - } -} - -int atomisp_file_input_init(struct atomisp_device *isp) -{ - struct atomisp_file_device *file_dev = &isp->file_dev; - struct v4l2_subdev *sd = &file_dev->sd; - struct media_pad *pads = file_dev->pads; - struct media_entity *me = &sd->entity; - - file_dev->isp = isp; - file_dev->work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1); - if (file_dev->work_queue == NULL) { - dev_err(isp->dev, "Failed to initialize file inject workq\n"); - return -ENOMEM; - } - - INIT_WORK(&file_dev->work, file_work); - - v4l2_subdev_init(sd, &file_input_ops); - sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - strcpy(sd->name, "file_input_subdev"); - v4l2_set_subdevdata(sd, file_dev); - - pads[0].flags = MEDIA_PAD_FL_SINK; - me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; - - return media_entity_pads_init(me, 1, pads); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h deleted file mode 100644 index 61fdeb5ee60a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_FILE_H__ -#define __ATOMISP_FILE_H__ - -#include -#include - -struct atomisp_device; - -struct atomisp_file_device { - struct v4l2_subdev sd; - struct atomisp_device *isp; - struct media_pad pads[1]; - - struct workqueue_struct *work_queue; - struct work_struct work; -}; - -void atomisp_file_input_cleanup(struct atomisp_device *isp); -int atomisp_file_input_init(struct atomisp_device *isp); -void atomisp_file_input_unregister_entities( - struct atomisp_file_device *file_dev); -int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev, - struct v4l2_device *vdev); -#endif /* __ATOMISP_FILE_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c deleted file mode 100644 index 693b905547e4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c +++ /dev/null @@ -1,1302 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include - -#include -#include - -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_fops.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" -#include "atomisp_compat.h" -#include "atomisp_subdev.h" -#include "atomisp_v4l2.h" -#include "atomisp-regs.h" -#include "hmm/hmm.h" - -#include "hrt/hive_isp_css_mm_hrt.h" - -#include "type_support.h" -#include "device_access/device_access.h" -#include "memory_access/memory_access.h" - -#include "atomisp_acc.h" - -#define ISP_LEFT_PAD 128 /* equal to 2*NWAY */ - -/* - * input image data, and current frame resolution for test - */ -#define ISP_PARAM_MMAP_OFFSET 0xfffff000 - -#define MAGIC_CHECK(is, should) \ - do { \ - if (unlikely((is) != (should))) { \ - pr_err("magic mismatch: %x (expected %x)\n", \ - is, should); \ - BUG(); \ - } \ - } while (0) - -/* - * Videobuf ops - */ -static int atomisp_buf_setup(struct videobuf_queue *vq, unsigned int *count, - unsigned int *size) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - *size = pipe->pix.sizeimage; - - return 0; -} - -static int atomisp_buf_prepare(struct videobuf_queue *vq, - struct videobuf_buffer *vb, - enum v4l2_field field) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - vb->size = pipe->pix.sizeimage; - vb->width = pipe->pix.width; - vb->height = pipe->pix.height; - vb->field = field; - vb->state = VIDEOBUF_PREPARED; - - return 0; -} - -static int atomisp_q_one_metadata_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_metadata_buf *metadata_buf; - enum atomisp_metadata_type md_type = - atomisp_get_metadata_type(asd, css_pipe_id); - struct list_head *metadata_list; - - if (asd->metadata_bufs_in_css[stream_id][css_pipe_id] >= - ATOMISP_CSS_Q_DEPTH) - return 0; /* we have reached CSS queue depth */ - - if (!list_empty(&asd->metadata[md_type])) { - metadata_list = &asd->metadata[md_type]; - } else if (!list_empty(&asd->metadata_ready[md_type])) { - metadata_list = &asd->metadata_ready[md_type]; - } else { - dev_warn(asd->isp->dev, "%s: No metadata buffers available for type %d!\n", - __func__, md_type); - return -EINVAL; - } - - metadata_buf = list_entry(metadata_list->next, - struct atomisp_metadata_buf, list); - list_del_init(&metadata_buf->list); - - if (atomisp_q_metadata_buffer_to_css(asd, metadata_buf, - stream_id, css_pipe_id)) { - list_add(&metadata_buf->list, metadata_list); - return -EINVAL; - } else { - list_add_tail(&metadata_buf->list, - &asd->metadata_in_css[md_type]); - } - asd->metadata_bufs_in_css[stream_id][css_pipe_id]++; - - return 0; -} - -static int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_s3a_buf *s3a_buf; - struct list_head *s3a_list; - unsigned int exp_id; - - if (asd->s3a_bufs_in_css[css_pipe_id] >= ATOMISP_CSS_Q_DEPTH) - return 0; /* we have reached CSS queue depth */ - - if (!list_empty(&asd->s3a_stats)) { - s3a_list = &asd->s3a_stats; - } else if (!list_empty(&asd->s3a_stats_ready)) { - s3a_list = &asd->s3a_stats_ready; - } else { - dev_warn(asd->isp->dev, "%s: No s3a buffers available!\n", - __func__); - return -EINVAL; - } - - s3a_buf = list_entry(s3a_list->next, struct atomisp_s3a_buf, list); - list_del_init(&s3a_buf->list); - exp_id = s3a_buf->s3a_data->exp_id; - - hmm_flush_vmap(s3a_buf->s3a_data->data_ptr); - if (atomisp_q_s3a_buffer_to_css(asd, s3a_buf, - stream_id, css_pipe_id)) { - /* got from head, so return back to the head */ - list_add(&s3a_buf->list, s3a_list); - return -EINVAL; - } else { - list_add_tail(&s3a_buf->list, &asd->s3a_stats_in_css); - if (s3a_list == &asd->s3a_stats_ready) - dev_warn(asd->isp->dev, "%s: drop one s3a stat which has exp_id %d!\n", - __func__, exp_id); - } - - asd->s3a_bufs_in_css[css_pipe_id]++; - return 0; -} - -static int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct atomisp_dis_buf *dis_buf; - unsigned long irqflags; - - if (asd->dis_bufs_in_css >= ATOMISP_CSS_Q_DEPTH) - return 0; /* we have reached CSS queue depth */ - - spin_lock_irqsave(&asd->dis_stats_lock, irqflags); - if (list_empty(&asd->dis_stats)) { - spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); - dev_warn(asd->isp->dev, "%s: No dis buffers available!\n", - __func__); - return -EINVAL; - } - - dis_buf = list_entry(asd->dis_stats.prev, - struct atomisp_dis_buf, list); - list_del_init(&dis_buf->list); - spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); - - hmm_flush_vmap(dis_buf->dis_data->data_ptr); - if (atomisp_q_dis_buffer_to_css(asd, dis_buf, - stream_id, css_pipe_id)) { - spin_lock_irqsave(&asd->dis_stats_lock, irqflags); - /* got from tail, so return back to the tail */ - list_add_tail(&dis_buf->list, &asd->dis_stats); - spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); - return -EINVAL; - } else { - spin_lock_irqsave(&asd->dis_stats_lock, irqflags); - list_add_tail(&dis_buf->list, &asd->dis_stats_in_css); - spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); - } - - asd->dis_bufs_in_css++; - - return 0; -} - -int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd, - struct atomisp_video_pipe *pipe, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_buffer_type css_buf_type, - enum atomisp_css_pipe_id css_pipe_id) -{ - struct videobuf_vmalloc_memory *vm_mem; - struct atomisp_css_params_with_list *param; - struct atomisp_css_dvs_grid_info *dvs_grid = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - unsigned long irqflags; - int err = 0; - - while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) { - struct videobuf_buffer *vb; - - spin_lock_irqsave(&pipe->irq_lock, irqflags); - if (list_empty(&pipe->activeq)) { - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - return -EINVAL; - } - vb = list_entry(pipe->activeq.next, - struct videobuf_buffer, queue); - list_del_init(&vb->queue); - vb->state = VIDEOBUF_ACTIVE; - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - - /* - * If there is a per_frame setting to apply on the buffer, - * do it before buffer en-queueing. - */ - vm_mem = vb->priv; - - param = pipe->frame_params[vb->i]; - if (param) { - atomisp_makeup_css_parameters(asd, - &asd->params.css_param.update_flag, - ¶m->params); - atomisp_apply_css_parameters(asd, ¶m->params); - - if (param->params.update_flag.dz_config && - asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) { - err = atomisp_calculate_real_zoom_region(asd, - ¶m->params.dz_config, css_pipe_id); - if (!err) - atomisp_css_set_dz_config(asd, - ¶m->params.dz_config); - } - atomisp_css_set_isp_config_applied_frame(asd, - vm_mem->vaddr); - atomisp_css_update_isp_params_on_pipe(asd, - asd->stream_env[stream_id].pipes[css_pipe_id]); - asd->params.dvs_6axis = (struct atomisp_css_dvs_6axis *) - param->params.dvs_6axis; - - /* - * WORKAROUND: - * Because the camera halv3 can't ensure to set zoom - * region to per_frame setting and global setting at - * same time and only set zoom region to pre_frame - * setting now.so when the pre_frame setting inculde - * zoom region,I will set it to global setting. - */ - if (param->params.update_flag.dz_config && - asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO - && !err) { - memcpy(&asd->params.css_param.dz_config, - ¶m->params.dz_config, - sizeof(struct ia_css_dz_config)); - asd->params.css_param.update_flag.dz_config = - (struct atomisp_dz_config *) - &asd->params.css_param.dz_config; - asd->params.css_update_params_needed = true; - } - } - /* Enqueue buffer */ - err = atomisp_q_video_buffer_to_css(asd, vm_mem, stream_id, - css_buf_type, css_pipe_id); - if (err) { - spin_lock_irqsave(&pipe->irq_lock, irqflags); - list_add_tail(&vb->queue, &pipe->activeq); - vb->state = VIDEOBUF_QUEUED; - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - dev_err(asd->isp->dev, "%s, css q fails: %d\n", - __func__, err); - return -EINVAL; - } - pipe->buffers_in_css++; - - /* enqueue 3A/DIS/metadata buffers */ - if (asd->params.curr_grid_info.s3a_grid.enable && - css_pipe_id == asd->params.s3a_enabled_pipe && - css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) - atomisp_q_one_s3a_buffer(asd, stream_id, - css_pipe_id); - - if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info. - metadata_info.size && - css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) - atomisp_q_one_metadata_buffer(asd, stream_id, - css_pipe_id); - - if (dvs_grid && dvs_grid->enable && - css_pipe_id == CSS_PIPE_ID_VIDEO && - css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) - atomisp_q_one_dis_buffer(asd, stream_id, - css_pipe_id); - } - - return 0; -} - -static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd, - enum atomisp_css_pipe_id pipe_id, - uint16_t source_pad) -{ - if (ATOMISP_USE_YUVPP(asd)) { - /* when run ZSL case */ - if (asd->continuous_mode->val && - asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) { - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) - return CSS_BUFFER_TYPE_OUTPUT_FRAME; - else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) - return CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME; - else - return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME; - } - - /*when run SDV case*/ - if (asd->continuous_mode->val && - asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) - return CSS_BUFFER_TYPE_OUTPUT_FRAME; - else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) - return CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME; - else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) - return CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME; - else - return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME; - } - - /*other case: default setting*/ - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE || - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO || - (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO)) - return CSS_BUFFER_TYPE_OUTPUT_FRAME; - else - return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME; - } - - if (pipe_id == CSS_PIPE_ID_COPY || - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE || - source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO || - (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO)) - return CSS_BUFFER_TYPE_OUTPUT_FRAME; - else - return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME; -} - -static int atomisp_qbuffers_to_css_for_all_pipes(struct atomisp_sub_device *asd) -{ - enum atomisp_css_buffer_type buf_type; - enum atomisp_css_pipe_id css_capture_pipe_id = CSS_PIPE_ID_COPY; - enum atomisp_css_pipe_id css_preview_pipe_id = CSS_PIPE_ID_COPY; - enum atomisp_css_pipe_id css_video_pipe_id = CSS_PIPE_ID_COPY; - enum atomisp_input_stream_id input_stream_id; - struct atomisp_video_pipe *capture_pipe; - struct atomisp_video_pipe *preview_pipe; - struct atomisp_video_pipe *video_pipe; - - capture_pipe = &asd->video_out_capture; - preview_pipe = &asd->video_out_preview; - video_pipe = &asd->video_out_video_capture; - - buf_type = atomisp_get_css_buf_type( - asd, css_preview_pipe_id, - atomisp_subdev_source_pad(&preview_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - atomisp_q_video_buffers_to_css(asd, preview_pipe, - input_stream_id, - buf_type, css_preview_pipe_id); - - buf_type = atomisp_get_css_buf_type(asd, css_capture_pipe_id, - atomisp_subdev_source_pad(&capture_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - atomisp_q_video_buffers_to_css(asd, capture_pipe, - input_stream_id, - buf_type, css_capture_pipe_id); - - buf_type = atomisp_get_css_buf_type(asd, css_video_pipe_id, - atomisp_subdev_source_pad(&video_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_VIDEO; - atomisp_q_video_buffers_to_css(asd, video_pipe, - input_stream_id, - buf_type, css_video_pipe_id); - return 0; -} - - -/* queue all available buffers to css */ -int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd) -{ - enum atomisp_css_buffer_type buf_type; - enum atomisp_css_pipe_id css_capture_pipe_id = CSS_PIPE_ID_NUM; - enum atomisp_css_pipe_id css_preview_pipe_id = CSS_PIPE_ID_NUM; - enum atomisp_css_pipe_id css_video_pipe_id = CSS_PIPE_ID_NUM; - enum atomisp_input_stream_id input_stream_id; - struct atomisp_video_pipe *capture_pipe = NULL; - struct atomisp_video_pipe *vf_pipe = NULL; - struct atomisp_video_pipe *preview_pipe = NULL; - struct atomisp_video_pipe *video_pipe = NULL; - bool raw_mode = atomisp_is_mbuscode_raw( - asd->fmt[asd->capture_pad].fmt.code); - - if (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num == 2 && - !asd->yuvpp_mode) - return atomisp_qbuffers_to_css_for_all_pipes(asd); - - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { - video_pipe = &asd->video_out_video_capture; - css_video_pipe_id = CSS_PIPE_ID_VIDEO; - } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { - preview_pipe = &asd->video_out_capture; - css_preview_pipe_id = CSS_PIPE_ID_CAPTURE; - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - if (asd->continuous_mode->val) { - capture_pipe = &asd->video_out_capture; - vf_pipe = &asd->video_out_vf; - css_capture_pipe_id = CSS_PIPE_ID_CAPTURE; - } - video_pipe = &asd->video_out_video_capture; - preview_pipe = &asd->video_out_preview; - css_video_pipe_id = CSS_PIPE_ID_VIDEO; - css_preview_pipe_id = CSS_PIPE_ID_VIDEO; - } else if (asd->continuous_mode->val) { - capture_pipe = &asd->video_out_capture; - vf_pipe = &asd->video_out_vf; - preview_pipe = &asd->video_out_preview; - - css_preview_pipe_id = CSS_PIPE_ID_PREVIEW; - css_capture_pipe_id = CSS_PIPE_ID_CAPTURE; - } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) { - preview_pipe = &asd->video_out_preview; - css_preview_pipe_id = CSS_PIPE_ID_PREVIEW; - } else { - /* ATOMISP_RUN_MODE_STILL_CAPTURE */ - capture_pipe = &asd->video_out_capture; - if (!raw_mode) - vf_pipe = &asd->video_out_vf; - css_capture_pipe_id = CSS_PIPE_ID_CAPTURE; - } - -#ifdef ISP2401_NEW_INPUT_SYSTEM - if (asd->copy_mode) { - css_capture_pipe_id = CSS_PIPE_ID_COPY; - css_preview_pipe_id = CSS_PIPE_ID_COPY; - css_video_pipe_id = CSS_PIPE_ID_COPY; - } -#endif - - if (asd->yuvpp_mode) { - capture_pipe = &asd->video_out_capture; - video_pipe = &asd->video_out_video_capture; - preview_pipe = &asd->video_out_preview; - css_capture_pipe_id = CSS_PIPE_ID_COPY; - css_video_pipe_id = CSS_PIPE_ID_YUVPP; - css_preview_pipe_id = CSS_PIPE_ID_YUVPP; - } - - if (capture_pipe) { - buf_type = atomisp_get_css_buf_type( - asd, css_capture_pipe_id, - atomisp_subdev_source_pad(&capture_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - - /* - * use yuvpp pipe for SOC camera. - */ - if (ATOMISP_USE_YUVPP(asd)) - css_capture_pipe_id = CSS_PIPE_ID_YUVPP; - - atomisp_q_video_buffers_to_css(asd, capture_pipe, - input_stream_id, - buf_type, css_capture_pipe_id); - } - - if (vf_pipe) { - buf_type = atomisp_get_css_buf_type( - asd, css_capture_pipe_id, - atomisp_subdev_source_pad(&vf_pipe->vdev)); - if (asd->stream_env[ATOMISP_INPUT_STREAM_POSTVIEW].stream) - input_stream_id = ATOMISP_INPUT_STREAM_POSTVIEW; - else - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - - /* - * use yuvpp pipe for SOC camera. - */ - if (ATOMISP_USE_YUVPP(asd)) - css_capture_pipe_id = CSS_PIPE_ID_YUVPP; - atomisp_q_video_buffers_to_css(asd, vf_pipe, - input_stream_id, - buf_type, css_capture_pipe_id); - } - - if (preview_pipe) { - buf_type = atomisp_get_css_buf_type( - asd, css_preview_pipe_id, - atomisp_subdev_source_pad(&preview_pipe->vdev)); - if (ATOMISP_SOC_CAMERA(asd) && css_preview_pipe_id == CSS_PIPE_ID_YUVPP) - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - /* else for ext isp use case */ - else if (css_preview_pipe_id == CSS_PIPE_ID_YUVPP) - input_stream_id = ATOMISP_INPUT_STREAM_VIDEO; - else if (asd->stream_env[ATOMISP_INPUT_STREAM_PREVIEW].stream) - input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - else - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - - /* - * use yuvpp pipe for SOC camera. - */ - if (ATOMISP_USE_YUVPP(asd)) - css_preview_pipe_id = CSS_PIPE_ID_YUVPP; - - atomisp_q_video_buffers_to_css(asd, preview_pipe, - input_stream_id, - buf_type, css_preview_pipe_id); - } - - if (video_pipe) { - buf_type = atomisp_get_css_buf_type( - asd, css_video_pipe_id, - atomisp_subdev_source_pad(&video_pipe->vdev)); - if (asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO].stream) - input_stream_id = ATOMISP_INPUT_STREAM_VIDEO; - else - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - - /* - * use yuvpp pipe for SOC camera. - */ - if (ATOMISP_USE_YUVPP(asd)) - css_video_pipe_id = CSS_PIPE_ID_YUVPP; - - atomisp_q_video_buffers_to_css(asd, video_pipe, - input_stream_id, - buf_type, css_video_pipe_id); - } - - return 0; -} - -static void atomisp_buf_queue(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - /* - * when a frame buffer meets following conditions, it should be put into - * the waiting list: - * 1. It is not a main output frame, and it has a per-frame parameter - * to go with it. - * 2. It is not a main output frame, and the waiting buffer list is not - * empty, to keep the FIFO sequence of frame buffer processing, it - * is put to waiting list until previous per-frame parameter buffers - * get enqueued. - */ - if (!atomisp_is_vf_pipe(pipe) && - (pipe->frame_request_config_id[vb->i] || - !list_empty(&pipe->buffers_waiting_for_param))) - list_add_tail(&vb->queue, &pipe->buffers_waiting_for_param); - else - list_add_tail(&vb->queue, &pipe->activeq); - - vb->state = VIDEOBUF_QUEUED; -} - -static void atomisp_buf_release(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - vb->state = VIDEOBUF_NEEDS_INIT; - atomisp_videobuf_free_buf(vb); -} - -static int atomisp_buf_setup_output(struct videobuf_queue *vq, - unsigned int *count, unsigned int *size) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - *size = pipe->pix.sizeimage; - - return 0; -} - -static int atomisp_buf_prepare_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb, - enum v4l2_field field) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - vb->size = pipe->pix.sizeimage; - vb->width = pipe->pix.width; - vb->height = pipe->pix.height; - vb->field = field; - vb->state = VIDEOBUF_PREPARED; - - return 0; -} - -static void atomisp_buf_queue_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - list_add_tail(&vb->queue, &pipe->activeq_out); - vb->state = VIDEOBUF_QUEUED; -} - -static void atomisp_buf_release_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - videobuf_vmalloc_free(vb); - vb->state = VIDEOBUF_NEEDS_INIT; -} - -static const struct videobuf_queue_ops videobuf_qops = { - .buf_setup = atomisp_buf_setup, - .buf_prepare = atomisp_buf_prepare, - .buf_queue = atomisp_buf_queue, - .buf_release = atomisp_buf_release, -}; - -static const struct videobuf_queue_ops videobuf_qops_output = { - .buf_setup = atomisp_buf_setup_output, - .buf_prepare = atomisp_buf_prepare_output, - .buf_queue = atomisp_buf_queue_output, - .buf_release = atomisp_buf_release_output, -}; - -static int atomisp_init_pipe(struct atomisp_video_pipe *pipe) -{ - /* init locks */ - spin_lock_init(&pipe->irq_lock); - - videobuf_queue_vmalloc_init(&pipe->capq, &videobuf_qops, NULL, - &pipe->irq_lock, - V4L2_BUF_TYPE_VIDEO_CAPTURE, - V4L2_FIELD_NONE, - sizeof(struct atomisp_buffer), pipe, - NULL); /* ext_lock: NULL */ - - videobuf_queue_vmalloc_init(&pipe->outq, &videobuf_qops_output, NULL, - &pipe->irq_lock, - V4L2_BUF_TYPE_VIDEO_OUTPUT, - V4L2_FIELD_NONE, - sizeof(struct atomisp_buffer), pipe, - NULL); /* ext_lock: NULL */ - - INIT_LIST_HEAD(&pipe->activeq); - INIT_LIST_HEAD(&pipe->activeq_out); - INIT_LIST_HEAD(&pipe->buffers_waiting_for_param); - INIT_LIST_HEAD(&pipe->per_frame_params); - memset(pipe->frame_request_config_id, 0, - VIDEO_MAX_FRAME * sizeof(unsigned int)); - memset(pipe->frame_params, 0, - VIDEO_MAX_FRAME * - sizeof(struct atomisp_css_params_with_list *)); - - return 0; -} - -static void atomisp_dev_init_struct(struct atomisp_device *isp) -{ - unsigned int i; - - isp->sw_contex.file_input = false; - isp->need_gfx_throttle = true; - isp->isp_fatal_error = false; - isp->mipi_frame_size = 0; - - for (i = 0; i < isp->input_cnt; i++) - isp->inputs[i].asd = NULL; - /* - * For Merrifield, frequency is scalable. - * After boot-up, the default frequency is 200MHz. - */ - isp->sw_contex.running_freq = ISP_FREQ_200MHZ; -} - -static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd) -{ - v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_STILL_CAPTURE); - memset(&asd->params.css_param, 0, sizeof(asd->params.css_param)); - asd->params.color_effect = V4L2_COLORFX_NONE; - asd->params.bad_pixel_en = true; - asd->params.gdc_cac_en = false; - asd->params.video_dis_en = false; - asd->params.sc_en = false; - asd->params.fpn_en = false; - asd->params.xnr_en = false; - asd->params.false_color = 0; - asd->params.online_process = 1; - asd->params.yuv_ds_en = 0; - /* s3a grid not enabled for any pipe */ - asd->params.s3a_enabled_pipe = CSS_PIPE_ID_NUM; - - asd->params.offline_parm.num_captures = 1; - asd->params.offline_parm.skip_frames = 0; - asd->params.offline_parm.offset = 0; - asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; - /* Add for channel */ - asd->input_curr = 0; - - asd->mipi_frame_size = 0; - asd->copy_mode = false; - asd->yuvpp_mode = false; - - asd->stream_prepared = false; - asd->high_speed_mode = false; - asd->sensor_array_res.height = 0; - asd->sensor_array_res.width = 0; - atomisp_css_init_struct(asd); -} -/* - * file operation functions - */ -static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd) -{ - return asd->video_out_preview.users + - asd->video_out_vf.users + - asd->video_out_capture.users + - asd->video_out_video_capture.users + - asd->video_acc.users + - asd->video_in.users; -} - -unsigned int atomisp_dev_users(struct atomisp_device *isp) -{ - unsigned int i, sum; - for (i = 0, sum = 0; i < isp->num_of_streams; i++) - sum += atomisp_subdev_users(&isp->asd[i]); - - return sum; -} - -static int atomisp_open(struct file *file) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = NULL; - struct atomisp_acc_pipe *acc_pipe = NULL; - struct atomisp_sub_device *asd; - bool acc_node = false; - int ret; - - dev_dbg(isp->dev, "open device %s\n", vdev->name); - - rt_mutex_lock(&isp->mutex); - - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) { - acc_pipe = atomisp_to_acc_pipe(vdev); - asd = acc_pipe->asd; - } else { - pipe = atomisp_to_video_pipe(vdev); - asd = pipe->asd; - } - asd->subdev.devnode = vdev; - /* Deferred firmware loading case. */ - if (isp->css_env.isp_css_fw.bytes == 0) { - isp->firmware = atomisp_load_firmware(isp); - if (!isp->firmware) { - dev_err(isp->dev, "Failed to load ISP firmware.\n"); - ret = -ENOENT; - goto error; - } - ret = atomisp_css_load_firmware(isp); - if (ret) { - dev_err(isp->dev, "Failed to init css.\n"); - goto error; - } - /* No need to keep FW in memory anymore. */ - release_firmware(isp->firmware); - isp->firmware = NULL; - isp->css_env.isp_css_fw.data = NULL; - } - - if (acc_node && acc_pipe->users) { - dev_dbg(isp->dev, "acc node already opened\n"); - rt_mutex_unlock(&isp->mutex); - return -EBUSY; - } else if (acc_node) { - goto dev_init; - } - - if (!isp->input_cnt) { - dev_err(isp->dev, "no camera attached\n"); - ret = -EINVAL; - goto error; - } - - /* - * atomisp does not allow multiple open - */ - if (pipe->users) { - dev_dbg(isp->dev, "video node already opened\n"); - rt_mutex_unlock(&isp->mutex); - return -EBUSY; - } - - ret = atomisp_init_pipe(pipe); - if (ret) - goto error; - -dev_init: - if (atomisp_dev_users(isp)) { - dev_dbg(isp->dev, "skip init isp in open\n"); - goto init_subdev; - } - - /* runtime power management, turn on ISP */ - ret = pm_runtime_get_sync(vdev->v4l2_dev->dev); - if (ret < 0) { - dev_err(isp->dev, "Failed to power on device\n"); - goto error; - } - - if (dypool_enable) { - ret = hmm_pool_register(dypool_pgnr, HMM_POOL_TYPE_DYNAMIC); - if (ret) - dev_err(isp->dev, "Failed to register dynamic memory pool.\n"); - } - - /* Init ISP */ - if (atomisp_css_init(isp)) { - ret = -EINVAL; - /* Need to clean up CSS init if it fails. */ - goto css_error; - } - - atomisp_dev_init_struct(isp); - - ret = v4l2_subdev_call(isp->flash, core, s_power, 1); - if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD) { - dev_err(isp->dev, "Failed to power-on flash\n"); - goto css_error; - } - -init_subdev: - if (atomisp_subdev_users(asd)) - goto done; - - atomisp_subdev_init_struct(asd); - -done: - - if (acc_node) - acc_pipe->users++; - else - pipe->users++; - rt_mutex_unlock(&isp->mutex); - return 0; - -css_error: - atomisp_css_uninit(isp); -error: - hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC); - pm_runtime_put(vdev->v4l2_dev->dev); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_release(struct file *file) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe; - struct atomisp_acc_pipe *acc_pipe; - struct atomisp_sub_device *asd; - bool acc_node; - struct v4l2_requestbuffers req; - struct v4l2_subdev_fh fh; - struct v4l2_rect clear_compose = {0}; - int ret = 0; - - v4l2_fh_init(&fh.vfh, vdev); - - req.count = 0; - if (isp == NULL) - return -EBADF; - - mutex_lock(&isp->streamoff_mutex); - rt_mutex_lock(&isp->mutex); - - dev_dbg(isp->dev, "release device %s\n", vdev->name); - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) { - acc_pipe = atomisp_to_acc_pipe(vdev); - asd = acc_pipe->asd; - } else { - pipe = atomisp_to_video_pipe(vdev); - asd = pipe->asd; - } - asd->subdev.devnode = vdev; - if (acc_node) { - acc_pipe->users--; - goto subdev_uninit; - } - pipe->users--; - - if (pipe->capq.streaming) - dev_warn(isp->dev, - "%s: ISP still streaming while closing!", - __func__); - - if (pipe->capq.streaming && - __atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) { - dev_err(isp->dev, - "atomisp_streamoff failed on release, driver bug"); - goto done; - } - - if (pipe->users) - goto done; - - if (__atomisp_reqbufs(file, NULL, &req)) { - dev_err(isp->dev, - "atomisp_reqbufs failed on release, driver bug"); - goto done; - } - - if (pipe->outq.bufs[0]) { - mutex_lock(&pipe->outq.vb_lock); - videobuf_queue_cancel(&pipe->outq); - mutex_unlock(&pipe->outq.vb_lock); - } - - /* - * A little trick here: - * file injection input resolution is recorded in the sink pad, - * therefore can not be cleared when releaseing one device node. - * The sink pad setting can only be cleared when all device nodes - * get released. - */ - if (!isp->sw_contex.file_input && asd->fmt_auto->val) { - struct v4l2_mbus_framefmt isp_sink_fmt = { 0 }; - atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt); - } -subdev_uninit: - if (atomisp_subdev_users(asd)) - goto done; - - /* clear the sink pad for file input */ - if (isp->sw_contex.file_input && asd->fmt_auto->val) { - struct v4l2_mbus_framefmt isp_sink_fmt = { 0 }; - atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt); - } - - atomisp_css_free_stat_buffers(asd); - atomisp_free_internal_buffers(asd); - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - core, s_power, 0); - if (ret) - dev_warn(isp->dev, "Failed to power-off sensor\n"); - - /* clear the asd field to show this camera is not used */ - isp->inputs[asd->input_curr].asd = NULL; - asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; - - if (atomisp_dev_users(isp)) - goto done; - - atomisp_acc_release(asd); - - atomisp_destroy_pipes_stream_force(asd); - atomisp_css_uninit(isp); - - if (defer_fw_load) { - atomisp_css_unload_firmware(isp); - isp->css_env.isp_css_fw.data = NULL; - isp->css_env.isp_css_fw.bytes = 0; - } - - hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC); - - ret = v4l2_subdev_call(isp->flash, core, s_power, 0); - if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD) - dev_warn(isp->dev, "Failed to power-off flash\n"); - - if (pm_runtime_put_sync(vdev->v4l2_dev->dev) < 0) - dev_err(isp->dev, "Failed to power off device\n"); - -done: - if (!acc_node) { - atomisp_subdev_set_selection(&asd->subdev, fh.pad, - V4L2_SUBDEV_FORMAT_ACTIVE, - atomisp_subdev_source_pad(vdev), - V4L2_SEL_TGT_COMPOSE, 0, - &clear_compose); - } - rt_mutex_unlock(&isp->mutex); - mutex_unlock(&isp->streamoff_mutex); - - return 0; -} - -/* - * Memory help functions for image frame and private parameters - */ -static int do_isp_mm_remap(struct atomisp_device *isp, - struct vm_area_struct *vma, - ia_css_ptr isp_virt, u32 host_virt, u32 pgnr) -{ - u32 pfn; - - while (pgnr) { - pfn = hmm_virt_to_phys(isp_virt) >> PAGE_SHIFT; - if (remap_pfn_range(vma, host_virt, pfn, - PAGE_SIZE, PAGE_SHARED)) { - dev_err(isp->dev, "remap_pfn_range err.\n"); - return -EAGAIN; - } - - isp_virt += PAGE_SIZE; - host_virt += PAGE_SIZE; - pgnr--; - } - - return 0; -} - -static int frame_mmap(struct atomisp_device *isp, - const struct atomisp_css_frame *frame, struct vm_area_struct *vma) -{ - ia_css_ptr isp_virt; - u32 host_virt; - u32 pgnr; - - if (!frame) { - dev_err(isp->dev, "%s: NULL frame pointer.\n", __func__); - return -EINVAL; - } - - host_virt = vma->vm_start; - isp_virt = frame->data; - atomisp_get_frame_pgnr(isp, frame, &pgnr); - - if (do_isp_mm_remap(isp, vma, isp_virt, host_virt, pgnr)) - return -EAGAIN; - - return 0; -} - -int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q, - struct vm_area_struct *vma) -{ - u32 offset = vma->vm_pgoff << PAGE_SHIFT; - int ret = -EINVAL, i; - struct atomisp_device *isp = - ((struct atomisp_video_pipe *)(q->priv_data))->isp; - struct videobuf_vmalloc_memory *vm_mem; - struct videobuf_mapping *map; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) { - dev_err(isp->dev, "map appl bug: PROT_WRITE and MAP_SHARED are required\n"); - return -EINVAL; - } - - mutex_lock(&q->vb_lock); - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - struct videobuf_buffer *buf = q->bufs[i]; - if (buf == NULL) - continue; - - map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); - if (!map) { - mutex_unlock(&q->vb_lock); - return -ENOMEM; - } - - buf->map = map; - map->q = q; - - buf->baddr = vma->vm_start; - - if (buf && buf->memory == V4L2_MEMORY_MMAP && - buf->boff == offset) { - vm_mem = buf->priv; - ret = frame_mmap(isp, vm_mem->vaddr, vma); - vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP; - break; - } - } - mutex_unlock(&q->vb_lock); - - return ret; -} - -/* The input frame contains left and right padding that need to be removed. - * There is always ISP_LEFT_PAD padding on the left side. - * There is also padding on the right (padded_width - width). - */ -static int remove_pad_from_frame(struct atomisp_device *isp, - struct atomisp_css_frame *in_frame, __u32 width, __u32 height) -{ - unsigned int i; - unsigned short *buffer; - int ret = 0; - ia_css_ptr load = in_frame->data; - ia_css_ptr store = load; - - buffer = kmalloc(width*sizeof(load), GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - load += ISP_LEFT_PAD; - for (i = 0; i < height; i++) { - ret = hmm_load(load, buffer, width*sizeof(load)); - if (ret < 0) - goto remove_pad_error; - - ret = hmm_store(store, buffer, width*sizeof(store)); - if (ret < 0) - goto remove_pad_error; - - load += in_frame->info.padded_width; - store += width; - } - -remove_pad_error: - kfree(buffer); - return ret; -} - -static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct atomisp_css_frame *raw_virt_addr; - u32 start = vma->vm_start; - u32 end = vma->vm_end; - u32 size = end - start; - u32 origin_size, new_size; - int ret; - - if (!(vma->vm_flags & (VM_WRITE | VM_READ))) - return -EACCES; - - rt_mutex_lock(&isp->mutex); - - if (!(vma->vm_flags & VM_SHARED)) { - /* Map private buffer. - * Set VM_SHARED to the flags since we need - * to map the buffer page by page. - * Without VM_SHARED, remap_pfn_range() treats - * this kind of mapping as invalid. - */ - vma->vm_flags |= VM_SHARED; - ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT); - rt_mutex_unlock(&isp->mutex); - return ret; - } - - /* mmap for ISP offline raw data */ - if (atomisp_subdev_source_pad(vdev) - == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE && - vma->vm_pgoff == (ISP_PARAM_MMAP_OFFSET >> PAGE_SHIFT)) { - new_size = pipe->pix.width * pipe->pix.height * 2; - if (asd->params.online_process != 0) { - ret = -EINVAL; - goto error; - } - raw_virt_addr = asd->raw_output_frame; - if (raw_virt_addr == NULL) { - dev_err(isp->dev, "Failed to request RAW frame\n"); - ret = -EINVAL; - goto error; - } - - ret = remove_pad_from_frame(isp, raw_virt_addr, - pipe->pix.width, pipe->pix.height); - if (ret < 0) { - dev_err(isp->dev, "remove pad failed.\n"); - goto error; - } - origin_size = raw_virt_addr->data_bytes; - raw_virt_addr->data_bytes = new_size; - - if (size != PAGE_ALIGN(new_size)) { - dev_err(isp->dev, "incorrect size for mmap ISP Raw Frame\n"); - ret = -EINVAL; - goto error; - } - - if (frame_mmap(isp, raw_virt_addr, vma)) { - dev_err(isp->dev, "frame_mmap failed.\n"); - raw_virt_addr->data_bytes = origin_size; - ret = -EAGAIN; - goto error; - } - raw_virt_addr->data_bytes = origin_size; - vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP; - rt_mutex_unlock(&isp->mutex); - return 0; - } - - /* - * mmap for normal frames - */ - if (size != pipe->pix.sizeimage) { - dev_err(isp->dev, "incorrect size for mmap ISP frames\n"); - ret = -EINVAL; - goto error; - } - rt_mutex_unlock(&isp->mutex); - - return atomisp_videobuf_mmap_mapper(&pipe->capq, vma); - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; -} - -static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_mmap_mapper(&pipe->outq, vma); -} - -static __poll_t atomisp_poll(struct file *file, - struct poll_table_struct *pt) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - rt_mutex_lock(&isp->mutex); - if (pipe->capq.streaming != 1) { - rt_mutex_unlock(&isp->mutex); - return EPOLLERR; - } - rt_mutex_unlock(&isp->mutex); - - return videobuf_poll_stream(file, &pipe->capq, pt); -} - -const struct v4l2_file_operations atomisp_fops = { - .owner = THIS_MODULE, - .open = atomisp_open, - .release = atomisp_release, - .mmap = atomisp_mmap, - .unlocked_ioctl = video_ioctl2, -#ifdef CONFIG_COMPAT - /* - * There are problems with this code. Disable this for now. - .compat_ioctl32 = atomisp_compat_ioctl32, - */ -#endif - .poll = atomisp_poll, -}; - -const struct v4l2_file_operations atomisp_file_fops = { - .owner = THIS_MODULE, - .open = atomisp_open, - .release = atomisp_release, - .mmap = atomisp_file_mmap, - .unlocked_ioctl = video_ioctl2, -#ifdef CONFIG_COMPAT - /* - * There are problems with this code. Disable this for now. - .compat_ioctl32 = atomisp_compat_ioctl32, - */ -#endif - .poll = atomisp_poll, -}; - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h deleted file mode 100644 index 2faab3429d43..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_FOPS_H__ -#define __ATOMISP_FOPS_H__ -#include "atomisp_subdev.h" - -int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd, - struct atomisp_video_pipe *pipe, - enum atomisp_input_stream_id stream_id, - enum atomisp_css_buffer_type css_buf_type, - enum atomisp_css_pipe_id css_pipe_id); - -unsigned int atomisp_dev_users(struct atomisp_device *isp); -unsigned int atomisp_sub_dev_users(struct atomisp_sub_device *asd); - -/* - * Memory help functions for image frame and private parameters - */ - -int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q, - struct vm_area_struct *vma); - -int atomisp_qbuf_to_css(struct atomisp_device *isp, - struct atomisp_video_pipe *pipe, - struct videobuf_buffer *vb); - -int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd); - -extern const struct v4l2_file_operations atomisp_fops; - -extern bool defer_fw_load; - -#endif /* __ATOMISP_FOPS_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h deleted file mode 100644 index 55ba185b43a0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef _atomisp_helper_h_ -#define _atomisp_helper_h_ -extern void __iomem *atomisp_io_base; - -static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address) -{ - void __iomem *ret = atomisp_io_base + (address & 0x003FFFFF); - return ret; -} -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h deleted file mode 100644 index dc476a3dd271..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __ATOMISP_INTERNAL_H__ -#define __ATOMISP_INTERNAL_H__ - -#include "../../include/linux/atomisp_platform.h" -#include -#include -#include -#include - -#include -#include - -#ifndef ISP2401 -#include "ia_css_types.h" -#include "sh_css_legacy.h" -#else -/*#include "ia_css_types.h"*/ -/*#include "sh_css_legacy.h"*/ -#endif - -#include "atomisp_csi2.h" -#include "atomisp_file.h" -#include "atomisp_subdev.h" -#include "atomisp_tpg.h" -#include "atomisp_compat.h" - -#include "gp_device.h" -#include "irq.h" -#include - -#define V4L2_EVENT_FRAME_END 5 - -#define IS_HWREVISION(isp, rev) \ - (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) == \ - ((rev) << ATOMISP_HW_REVISION_SHIFT)) - -#define MAX_STREAM_NUM 2 - -#define ATOMISP_PCI_DEVICE_SOC_MASK 0xfff8 -/* MRFLD with 0x1178: ISP freq can burst to 457MHz */ -#define ATOMISP_PCI_DEVICE_SOC_MRFLD 0x1178 -/* MRFLD with 0x1179: max ISP freq limited to 400MHz */ -#define ATOMISP_PCI_DEVICE_SOC_MRFLD_1179 0x1179 -/* MRFLD with 0x117a: max ISP freq is 400MHz and max freq at Vmin is 200MHz */ -#define ATOMISP_PCI_DEVICE_SOC_MRFLD_117A 0x117a -#define ATOMISP_PCI_DEVICE_SOC_BYT 0x0f38 -#define ATOMISP_PCI_DEVICE_SOC_ANN 0x1478 -#define ATOMISP_PCI_DEVICE_SOC_CHT 0x22b8 - -#define ATOMISP_PCI_REV_MRFLD_A0_MAX 0 -#define ATOMISP_PCI_REV_BYT_A0_MAX 4 - -#define ATOM_ISP_STEP_WIDTH 2 -#define ATOM_ISP_STEP_HEIGHT 2 - -#define ATOM_ISP_MIN_WIDTH 4 -#define ATOM_ISP_MIN_HEIGHT 4 -#define ATOM_ISP_MAX_WIDTH UINT_MAX -#define ATOM_ISP_MAX_HEIGHT UINT_MAX - -/* sub-QCIF resolution */ -#define ATOM_RESOLUTION_SUBQCIF_WIDTH 128 -#define ATOM_RESOLUTION_SUBQCIF_HEIGHT 96 - -#define ATOM_ISP_MAX_WIDTH_TMP 1280 -#define ATOM_ISP_MAX_HEIGHT_TMP 720 - -#define ATOM_ISP_I2C_BUS_1 4 -#define ATOM_ISP_I2C_BUS_2 5 - -#define ATOM_ISP_POWER_DOWN 0 -#define ATOM_ISP_POWER_UP 1 - -#define ATOM_ISP_MAX_INPUTS 4 - -#define ATOMISP_SC_TYPE_SIZE 2 - -#define ATOMISP_ISP_TIMEOUT_DURATION (2 * HZ) -#define ATOMISP_EXT_ISP_TIMEOUT_DURATION (6 * HZ) -#define ATOMISP_ISP_FILE_TIMEOUT_DURATION (60 * HZ) -#define ATOMISP_WDT_KEEP_CURRENT_DELAY 0 -#define ATOMISP_ISP_MAX_TIMEOUT_COUNT 2 -#define ATOMISP_CSS_STOP_TIMEOUT_US 200000 - -#define ATOMISP_CSS_Q_DEPTH 3 -#define ATOMISP_CSS_EVENTS_MAX 16 -#define ATOMISP_CONT_RAW_FRAMES 15 -#define ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL 8 -#define ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL 8 - -#define ATOMISP_DELAYED_INIT_NOT_QUEUED 0 -#define ATOMISP_DELAYED_INIT_QUEUED 1 -#define ATOMISP_DELAYED_INIT_DONE 2 - -#define ATOMISP_CALC_CSS_PREV_OVERLAP(lines) \ - ((lines) * 38 / 100 & 0xfffffe) - -/* - * Define how fast CPU should be able to serve ISP interrupts. - * The bigger the value, the higher risk that the ISP is not - * triggered sufficiently fast for it to process image during - * vertical blanking time, increasing risk of dropped frames. - * 1000 us is a reasonable value considering that the processing - * time is typically ~2000 us. - */ -#define ATOMISP_MAX_ISR_LATENCY 1000 - -/* Add new YUVPP pipe for SOC sensor. */ -#define ATOMISP_CSS_SUPPORT_YUVPP 1 - -#define ATOMISP_CSS_OUTPUT_SECOND_INDEX 1 -#define ATOMISP_CSS_OUTPUT_DEFAULT_INDEX 0 - -/* - * ATOMISP_SOC_CAMERA - * This is to differentiate between ext-isp and soc camera in - * Moorefield/Baytrail platform. - */ -#define ATOMISP_SOC_CAMERA(asd) \ - (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA \ - && asd->isp->inputs[asd->input_curr].camera_caps-> \ - sensor[asd->sensor_curr].stream_num == 1) - -#define ATOMISP_USE_YUVPP(asd) \ - (ATOMISP_SOC_CAMERA(asd) && ATOMISP_CSS_SUPPORT_YUVPP && \ - !asd->copy_mode) - -#define ATOMISP_DEPTH_SENSOR_STREAMON_COUNT 2 - -#define ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR 0 -#define ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR 1 - -#ifdef ISP2401 -#define ATOMISP_ION_DEVICE_FD_OFFSET 16 -#define ATOMISP_ION_SHARED_FD_MASK (0xFFFF) -#define ATOMISP_ION_DEVICE_FD_MASK (~ATOMISP_ION_SHARED_FD_MASK) -#define ION_FD_UNSET (-1) - -#endif -#define DIV_NEAREST_STEP(n, d, step) \ - round_down((2 * (n) + (d) * (step))/(2 * (d)), (step)) - -struct atomisp_input_subdev { - unsigned int type; - enum atomisp_camera_port port; - struct v4l2_subdev *camera; - struct v4l2_subdev *motor; - struct v4l2_frmsizeenum frame_size; - - /* - * To show this resource is used by - * which stream, in ISP multiple stream mode - */ - struct atomisp_sub_device *asd; - - const struct atomisp_camera_caps *camera_caps; - int sensor_index; -}; - -enum atomisp_dfs_mode { - ATOMISP_DFS_MODE_AUTO = 0, - ATOMISP_DFS_MODE_LOW, - ATOMISP_DFS_MODE_MAX, -}; - -struct atomisp_regs { - /* PCI config space info */ - u16 pcicmdsts; - u32 ispmmadr; - u32 msicap; - u32 msi_addr; - u16 msi_data; - u8 intr; - u32 interrupt_control; - u32 pmcs; - u32 cg_dis; - u32 i_control; - - /* I-Unit PHY related info */ - u32 csi_rcomp_config; - u32 csi_afe_dly; - u32 csi_control; - - /* New for MRFLD */ - u32 csi_afe_rcomp_config; - u32 csi_afe_hs_control; - u32 csi_deadline_control; - u32 csi_access_viol; -}; - -struct atomisp_sw_contex { - bool file_input; - int power_state; - int running_freq; -}; - - -#define ATOMISP_DEVICE_STREAMING_DISABLED 0 -#define ATOMISP_DEVICE_STREAMING_ENABLED 1 -#define ATOMISP_DEVICE_STREAMING_STOPPING 2 - -/* - * ci device struct - */ -struct atomisp_device { - struct pci_dev *pdev; - struct device *dev; - struct v4l2_device v4l2_dev; - struct media_device media_dev; - struct atomisp_platform_data *pdata; - void *mmu_l1_base; - const struct firmware *firmware; - - struct pm_qos_request pm_qos; - s32 max_isr_latency; - - /* - * ISP modules - * Multiple streams are represents by multiple - * atomisp_sub_device instances - */ - struct atomisp_sub_device *asd; - /* - * this will be assiged dyanamically. - * For Merr/BTY(ISP2400), 2 streams are supported. - */ - unsigned int num_of_streams; - - struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS]; - struct atomisp_tpg_device tpg; - struct atomisp_file_device file_dev; - - /* Purpose of mutex is to protect and serialize use of isp data - * structures and css API calls. */ - struct rt_mutex mutex; - /* - * Serialise streamoff: mutex is dropped during streamoff to - * cancel the watchdog queue. MUST be acquired BEFORE - * "mutex". - */ - struct mutex streamoff_mutex; - - unsigned int input_cnt; - struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; - struct v4l2_subdev *flash; - struct v4l2_subdev *motor; - - struct atomisp_regs saved_regs; - struct atomisp_sw_contex sw_contex; - struct atomisp_css_env css_env; - - /* isp timeout status flag */ - bool isp_timeout; - bool isp_fatal_error; - struct workqueue_struct *wdt_work_queue; - struct work_struct wdt_work; -#ifndef ISP2401 - atomic_t wdt_count; -#endif - atomic_t wdt_work_queued; - - spinlock_t lock; /* Just for streaming below */ - - bool need_gfx_throttle; - - unsigned int mipi_frame_size; - const struct atomisp_dfs_config *dfs; - unsigned int hpll_freq; - - bool css_initialized; -}; - -#define v4l2_dev_to_atomisp_device(dev) \ - container_of(dev, struct atomisp_device, v4l2_dev) - -extern struct device *atomisp_dev; - -#define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt) -#ifdef ISP2401 -extern void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, - unsigned int delay); -#endif -extern void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay); -#ifndef ISP2401 -extern void atomisp_wdt_start(struct atomisp_sub_device *asd); -#else -extern void atomisp_wdt_start(struct atomisp_video_pipe *pipe); -extern void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync); -#endif -extern void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync); - -#endif /* __ATOMISP_INTERNAL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c deleted file mode 100644 index 8c67aea67b6b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c +++ /dev/null @@ -1,3123 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include - - -#include -#include -#include - -#include "atomisp_acc.h" -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_fops.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" -#include "atomisp-regs.h" -#include "atomisp_compat.h" - -#include "sh_css_hrt.h" - -#include "gp_device.h" -#include "device_access.h" -#include "irq.h" - -#include "hrt/hive_isp_css_mm_hrt.h" - -/* for v4l2_capability */ -static const char *DRIVER = "atomisp"; /* max size 15 */ -static const char *CARD = "ATOM ISP"; /* max size 31 */ -static const char *BUS_INFO = "PCI-3"; /* max size 31 */ - -/* - * FIXME: ISP should not know beforehand all CIDs supported by sensor. - * Instead, it needs to propagate to sensor unkonwn CIDs. - */ -static struct v4l2_queryctrl ci_v4l2_controls[] = { - { - .id = V4L2_CID_AUTO_WHITE_BALANCE, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Automatic White Balance", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_RED_BALANCE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Red Balance", - .minimum = 0x00, - .maximum = 0xff, - .step = 1, - .default_value = 0x00, - }, - { - .id = V4L2_CID_BLUE_BALANCE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Blue Balance", - .minimum = 0x00, - .maximum = 0xff, - .step = 1, - .default_value = 0x00, - }, - { - .id = V4L2_CID_GAMMA, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Gamma", - .minimum = 0x00, - .maximum = 0xff, - .step = 1, - .default_value = 0x00, - }, - { - .id = V4L2_CID_POWER_LINE_FREQUENCY, - .type = V4L2_CTRL_TYPE_MENU, - .name = "Light frequency filter", - .minimum = 1, - .maximum = 2, - .step = 1, - .default_value = 1, - }, - { - .id = V4L2_CID_COLORFX, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Image Color Effect", - .minimum = 0, - .maximum = 9, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_COLORFX_CBCR, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Image Color Effect CbCr", - .minimum = 0, - .maximum = 0xffff, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Bad Pixel Correction", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "GDC/CAC", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ATOMISP_VIDEO_STABLIZATION, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Video Stablization", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ATOMISP_FIXED_PATTERN_NR, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Fixed Pattern Noise Reduction", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "False Color Correction", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_REQUEST_FLASH, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Request flash frames", - .minimum = 0, - .maximum = 10, - .step = 1, - .default_value = 1, - }, - { - .id = V4L2_CID_ATOMISP_LOW_LIGHT, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Low light mode", - .minimum = 0, - .maximum = 1, - .step = 1, - .default_value = 1, - }, - { - .id = V4L2_CID_BIN_FACTOR_HORZ, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Horizontal binning factor", - .minimum = 0, - .maximum = 10, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_BIN_FACTOR_VERT, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Vertical binning factor", - .minimum = 0, - .maximum = 10, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_2A_STATUS, - .type = V4L2_CTRL_TYPE_BITMASK, - .name = "AE and AWB status", - .minimum = 0, - .maximum = V4L2_2A_STATUS_AE_READY | V4L2_2A_STATUS_AWB_READY, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_EXPOSURE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "exposure", - .minimum = -4, - .maximum = 4, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_EXPOSURE_ZONE_NUM, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "one-time exposure zone number", - .minimum = 0x0, - .maximum = 0xffff, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Exposure auto priority", - .minimum = V4L2_EXPOSURE_AUTO, - .maximum = V4L2_EXPOSURE_APERTURE_PRIORITY, - .step = 1, - .default_value = V4L2_EXPOSURE_AUTO, - }, - { - .id = V4L2_CID_SCENE_MODE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "scene mode", - .minimum = 0, - .maximum = 13, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ISO_SENSITIVITY, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "iso", - .minimum = -4, - .maximum = 4, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_ISO_SENSITIVITY_AUTO, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "iso mode", - .minimum = V4L2_ISO_SENSITIVITY_MANUAL, - .maximum = V4L2_ISO_SENSITIVITY_AUTO, - .step = 1, - .default_value = V4L2_ISO_SENSITIVITY_AUTO, - }, - { - .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "white balance", - .minimum = 0, - .maximum = 9, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_EXPOSURE_METERING, - .type = V4L2_CTRL_TYPE_MENU, - .name = "metering", - .minimum = 0, - .maximum = 3, - .step = 1, - .default_value = 1, - }, - { - .id = V4L2_CID_3A_LOCK, - .type = V4L2_CTRL_TYPE_BITMASK, - .name = "3a lock", - .minimum = 0, - .maximum = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE - | V4L2_LOCK_FOCUS, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_TEST_PATTERN, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Test Pattern", - .minimum = 0, - .maximum = 0xffff, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_TEST_PATTERN_COLOR_R, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Test Pattern Solid Color R", - .minimum = INT_MIN, - .maximum = INT_MAX, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_TEST_PATTERN_COLOR_GR, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Test Pattern Solid Color GR", - .minimum = INT_MIN, - .maximum = INT_MAX, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_TEST_PATTERN_COLOR_GB, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Test Pattern Solid Color GB", - .minimum = INT_MIN, - .maximum = INT_MAX, - .step = 1, - .default_value = 0, - }, - { - .id = V4L2_CID_TEST_PATTERN_COLOR_B, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Test Pattern Solid Color B", - .minimum = INT_MIN, - .maximum = INT_MAX, - .step = 1, - .default_value = 0, - }, -}; -static const u32 ctrls_num = ARRAY_SIZE(ci_v4l2_controls); - -/* - * supported V4L2 fmts and resolutions - */ -const struct atomisp_format_bridge atomisp_output_fmts[] = { - { - .pixelformat = V4L2_PIX_FMT_YUV420, - .depth = 12, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV420, - .sh_fmt = CSS_FRAME_FORMAT_YUV420, - .description = "YUV420, planar", - .planar = true - }, { - .pixelformat = V4L2_PIX_FMT_YVU420, - .depth = 12, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_YVU420, - .sh_fmt = CSS_FRAME_FORMAT_YV12, - .description = "YVU420, planar", - .planar = true - }, { - .pixelformat = V4L2_PIX_FMT_YUV422P, - .depth = 16, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV422P, - .sh_fmt = CSS_FRAME_FORMAT_YUV422, - .description = "YUV422, planar", - .planar = true - }, { - .pixelformat = V4L2_PIX_FMT_YUV444, - .depth = 24, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV444, - .sh_fmt = CSS_FRAME_FORMAT_YUV444, - .description = "YUV444" - }, { - .pixelformat = V4L2_PIX_FMT_NV12, - .depth = 12, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV12, - .sh_fmt = CSS_FRAME_FORMAT_NV12, - .description = "NV12, Y-plane, CbCr interleaved", - .planar = true - }, { - .pixelformat = V4L2_PIX_FMT_NV21, - .depth = 12, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV21, - .sh_fmt = CSS_FRAME_FORMAT_NV21, - .description = "NV21, Y-plane, CbCr interleaved", - .planar = true - }, { - .pixelformat = V4L2_PIX_FMT_NV16, - .depth = 16, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV16, - .sh_fmt = CSS_FRAME_FORMAT_NV16, - .description = "NV16, Y-plane, CbCr interleaved", - .planar = true - }, { - .pixelformat = V4L2_PIX_FMT_YUYV, - .depth = 16, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUYV, - .sh_fmt = CSS_FRAME_FORMAT_YUYV, - .description = "YUYV, interleaved" - }, { - .pixelformat = V4L2_PIX_FMT_UYVY, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16, - .sh_fmt = CSS_FRAME_FORMAT_UYVY, - .description = "UYVY, interleaved" - }, { /* This one is for parallel sensors! DO NOT USE! */ - .pixelformat = V4L2_PIX_FMT_UYVY, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, - .sh_fmt = CSS_FRAME_FORMAT_UYVY, - .description = "UYVY, interleaved" - }, { - .pixelformat = V4L2_PIX_FMT_SBGGR16, - .depth = 16, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_SBGGR16, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 16" - }, { - .pixelformat = V4L2_PIX_FMT_SBGGR8, - .depth = 8, - .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 8" - }, { - .pixelformat = V4L2_PIX_FMT_SGBRG8, - .depth = 8, - .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 8" - }, { - .pixelformat = V4L2_PIX_FMT_SGRBG8, - .depth = 8, - .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 8" - }, { - .pixelformat = V4L2_PIX_FMT_SRGGB8, - .depth = 8, - .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 8" - }, { - .pixelformat = V4L2_PIX_FMT_SBGGR10, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 10" - }, { - .pixelformat = V4L2_PIX_FMT_SGBRG10, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 10" - }, { - .pixelformat = V4L2_PIX_FMT_SGRBG10, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 10" - }, { - .pixelformat = V4L2_PIX_FMT_SRGGB10, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 10" - }, { - .pixelformat = V4L2_PIX_FMT_SBGGR12, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 12" - }, { - .pixelformat = V4L2_PIX_FMT_SGBRG12, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 12" - }, { - .pixelformat = V4L2_PIX_FMT_SGRBG12, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 12" - }, { - .pixelformat = V4L2_PIX_FMT_SRGGB12, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12, - .sh_fmt = CSS_FRAME_FORMAT_RAW, - .description = "Bayer 12" - }, { - .pixelformat = V4L2_PIX_FMT_RGB32, - .depth = 32, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_RGB32, - .sh_fmt = CSS_FRAME_FORMAT_RGBA888, - .description = "32 RGB 8-8-8-8" - }, { - .pixelformat = V4L2_PIX_FMT_RGB565, - .depth = 16, - .mbus_code = MEDIA_BUS_FMT_BGR565_2X8_LE, - .sh_fmt = CSS_FRAME_FORMAT_RGB565, - .description = "16 RGB 5-6-5" - }, { - .pixelformat = V4L2_PIX_FMT_JPEG, - .depth = 8, - .mbus_code = MEDIA_BUS_FMT_JPEG_1X8, - .sh_fmt = CSS_FRAME_FORMAT_BINARY_8, - .description = "JPEG" - }, -#if 0 - { - /* This is a custom format being used by M10MO to send the RAW data */ - .pixelformat = V4L2_PIX_FMT_CUSTOM_M10MO_RAW, - .depth = 8, - .mbus_code = V4L2_MBUS_FMT_CUSTOM_M10MO_RAW, - .sh_fmt = CSS_FRAME_FORMAT_BINARY_8, - .description = "Custom RAW for M10MO" - }, -#endif -}; - -const struct atomisp_format_bridge *atomisp_get_format_bridge( - unsigned int pixelformat) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) { - if (atomisp_output_fmts[i].pixelformat == pixelformat) - return &atomisp_output_fmts[i]; - } - - return NULL; -} - -const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus( - u32 mbus_code) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) { - if (mbus_code == atomisp_output_fmts[i].mbus_code) - return &atomisp_output_fmts[i]; - } - - return NULL; -} - -/* - * v4l2 ioctls - * return ISP capabilities - * - * FIXME: capabilities should be different for video0/video2/video3 - */ -static int atomisp_querycap(struct file *file, void *fh, - struct v4l2_capability *cap) -{ - memset(cap, 0, sizeof(struct v4l2_capability)); - - WARN_ON(sizeof(DRIVER) > sizeof(cap->driver) || - sizeof(CARD) > sizeof(cap->card) || - sizeof(BUS_INFO) > sizeof(cap->bus_info)); - - strncpy(cap->driver, DRIVER, sizeof(cap->driver) - 1); - strncpy(cap->card, CARD, sizeof(cap->card) - 1); - strncpy(cap->bus_info, BUS_INFO, sizeof(cap->card) - 1); - - cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; - return 0; -} - -/* - * enum input are used to check primary/secondary camera - */ -static int atomisp_enum_input(struct file *file, void *fh, - struct v4l2_input *input) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int index = input->index; - - if (index >= isp->input_cnt) - return -EINVAL; - - if (!isp->inputs[index].camera) - return -EINVAL; - - memset(input, 0, sizeof(struct v4l2_input)); - strncpy(input->name, isp->inputs[index].camera->name, - sizeof(input->name) - 1); - - /* - * HACK: append actuator's name to sensor's - * As currently userspace can't talk directly to subdev nodes, this - * ioctl is the only way to enum inputs + possible external actuators - * for 3A tuning purpose. - */ -#ifndef ISP2401 - if (isp->inputs[index].motor && - strlen(isp->inputs[index].motor->name) > 0) { -#else - if (isp->motor && - strlen(isp->motor->name) > 0) { -#endif - const int cur_len = strlen(input->name); - const int max_size = sizeof(input->name) - cur_len - 1; - - if (max_size > 1) { - input->name[cur_len] = '+'; - strncpy(&input->name[cur_len + 1], -#ifndef ISP2401 - isp->inputs[index].motor->name, max_size - 1); -#else - isp->motor->name, max_size - 1); -#endif - } - } - - input->type = V4L2_INPUT_TYPE_CAMERA; - input->index = index; - input->reserved[0] = isp->inputs[index].type; - input->reserved[1] = isp->inputs[index].port; - - return 0; -} - -static unsigned int atomisp_subdev_streaming_count( - struct atomisp_sub_device *asd) -{ - return asd->video_out_preview.capq.streaming - + asd->video_out_capture.capq.streaming - + asd->video_out_video_capture.capq.streaming - + asd->video_out_vf.capq.streaming - + asd->video_in.capq.streaming; -} - -unsigned int atomisp_streaming_count(struct atomisp_device *isp) -{ - unsigned int i, sum; - - for (i = 0, sum = 0; i < isp->num_of_streams; i++) - sum += isp->asd[i].streaming == - ATOMISP_DEVICE_STREAMING_ENABLED; - - return sum; -} - -unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp) -{ - unsigned int i; - - for (i = 0; i < isp->num_of_streams; i++) - if (isp->asd[i].acc.pipeline) - return 1; - - return 0; -} -/* - * get input are used to get current primary/secondary camera - */ -static int atomisp_g_input(struct file *file, void *fh, unsigned int *input) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - - rt_mutex_lock(&isp->mutex); - *input = asd->input_curr; - rt_mutex_unlock(&isp->mutex); - - return 0; -} -/* - * set input are used to set current primary/secondary camera - */ -static int atomisp_s_input(struct file *file, void *fh, unsigned int input) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct v4l2_subdev *camera = NULL; - int ret; - - rt_mutex_lock(&isp->mutex); - if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) { - dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt); - ret = -EINVAL; - goto error; - } - - /* - * check whether the request camera: - * 1: already in use - * 2: if in use, whether it is used by other streams - */ - if (isp->inputs[input].asd != NULL && isp->inputs[input].asd != asd) { - dev_err(isp->dev, - "%s, camera is already used by stream: %d\n", __func__, - isp->inputs[input].asd->index); - ret = -EBUSY; - goto error; - } - - camera = isp->inputs[input].camera; - if (!camera) { - dev_err(isp->dev, "%s, no camera\n", __func__); - ret = -EINVAL; - goto error; - } - - if (atomisp_subdev_streaming_count(asd)) { - dev_err(isp->dev, - "ISP is still streaming, stop first\n"); - ret = -EINVAL; - goto error; - } - - /* power off the current owned sensor, as it is not used this time */ - if (isp->inputs[asd->input_curr].asd == asd && - asd->input_curr != input) { - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - core, s_power, 0); - if (ret) - dev_warn(isp->dev, - "Failed to power-off sensor\n"); - /* clear the asd field to show this camera is not used */ - isp->inputs[asd->input_curr].asd = NULL; - } - - /* powe on the new sensor */ - ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1); - if (ret) { - dev_err(isp->dev, "Failed to power-on sensor\n"); - goto error; - } - /* - * Some sensor driver resets the run mode during power-on, thus force - * update the run mode to sensor after power-on. - */ - atomisp_update_run_mode(asd); - - /* select operating sensor */ - ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing, - 0, isp->inputs[input].sensor_index, 0); - if (ret && (ret != -ENOIOCTLCMD)) { - dev_err(isp->dev, "Failed to select sensor\n"); - goto error; - } - -#ifndef ISP2401 - if (!isp->sw_contex.file_input && isp->inputs[input].motor) - ret = v4l2_subdev_call(isp->inputs[input].motor, core, - init, 1); -#else - if (isp->motor) - ret = v4l2_subdev_call(isp->motor, core, s_power, 1); - - if (!isp->sw_contex.file_input && isp->motor) - ret = v4l2_subdev_call(isp->motor, core, init, 1); -#endif - - asd->input_curr = input; - /* mark this camera is used by the current stream */ - isp->inputs[input].asd = asd; - rt_mutex_unlock(&isp->mutex); - - return 0; - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; -} - -static int atomisp_enum_fmt_cap(struct file *file, void *fh, - struct v4l2_fmtdesc *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct v4l2_subdev_mbus_code_enum code = { 0 }; - unsigned int i, fi = 0; - int rval; - - rt_mutex_lock(&isp->mutex); - rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad, - enum_mbus_code, NULL, &code); - if (rval == -ENOIOCTLCMD) { - dev_warn(isp->dev, "enum_mbus_code pad op not supported. Please fix your sensor driver!\n"); - // rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - // video, enum_mbus_fmt, 0, &code.code); - } - rt_mutex_unlock(&isp->mutex); - - if (rval) - return rval; - - for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) { - const struct atomisp_format_bridge *format = - &atomisp_output_fmts[i]; - - /* - * Is the atomisp-supported format is valid for the - * sensor (configuration)? If not, skip it. - */ - if (format->sh_fmt == CSS_FRAME_FORMAT_RAW - && format->mbus_code != code.code) - continue; - - /* Found a match. Now let's pick f->index'th one. */ - if (fi < f->index) { - fi++; - continue; - } - - strlcpy(f->description, format->description, - sizeof(f->description)); - f->pixelformat = format->pixelformat; - return 0; - } - - return -EINVAL; -} - -static int atomisp_g_fmt_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - - int ret; - - rt_mutex_lock(&isp->mutex); - ret = atomisp_get_fmt(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_g_fmt_file(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - rt_mutex_lock(&isp->mutex); - f->fmt.pix = pipe->pix; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - -/* This function looks up the closest available resolution. */ -static int atomisp_try_fmt_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = atomisp_try_fmt(vdev, f, NULL); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_s_fmt_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - rt_mutex_unlock(&isp->mutex); - return ret; - } - ret = atomisp_set_fmt(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_s_fmt_file(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = atomisp_set_fmt_file(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -/* - * Free videobuffer buffer priv data - */ -void atomisp_videobuf_free_buf(struct videobuf_buffer *vb) -{ - struct videobuf_vmalloc_memory *vm_mem; - - if (vb == NULL) - return; - - vm_mem = vb->priv; - if (vm_mem && vm_mem->vaddr) { - atomisp_css_frame_free(vm_mem->vaddr); - vm_mem->vaddr = NULL; - } -} - -/* - * this function is used to free video buffer queue - */ -static void atomisp_videobuf_free_queue(struct videobuf_queue *q) -{ - int i; - - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - atomisp_videobuf_free_buf(q->bufs[i]); - kfree(q->bufs[i]); - q->bufs[i] = NULL; - } -} - -int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd, - uint16_t stream_id) -{ - struct atomisp_device *isp = asd->isp; - struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf; - struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf; - struct atomisp_metadata_buf *md_buf = NULL, *_md_buf; - int count; - struct atomisp_css_dvs_grid_info *dvs_grid_info = - atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info); - unsigned int i; - - if (list_empty(&asd->s3a_stats) && - asd->params.curr_grid_info.s3a_grid.enable) { - count = ATOMISP_CSS_Q_DEPTH + - ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL; - dev_dbg(isp->dev, "allocating %d 3a buffers\n", count); - while (count--) { - s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL); - if (!s3a_buf) - goto error; - - if (atomisp_css_allocate_stat_buffers( - asd, stream_id, s3a_buf, NULL, NULL)) { - kfree(s3a_buf); - goto error; - } - - list_add_tail(&s3a_buf->list, &asd->s3a_stats); - } - } - - if (list_empty(&asd->dis_stats) && dvs_grid_info && - dvs_grid_info->enable) { - count = ATOMISP_CSS_Q_DEPTH + 1; - dev_dbg(isp->dev, "allocating %d dis buffers\n", count); - while (count--) { - dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL); - if (!dis_buf) { - kfree(s3a_buf); - goto error; - } - if (atomisp_css_allocate_stat_buffers( - asd, stream_id, NULL, dis_buf, NULL)) { - kfree(dis_buf); - goto error; - } - - list_add_tail(&dis_buf->list, &asd->dis_stats); - } - } - - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - if (list_empty(&asd->metadata[i]) && - list_empty(&asd->metadata_ready[i]) && - list_empty(&asd->metadata_in_css[i])) { - count = ATOMISP_CSS_Q_DEPTH + - ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL; - dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n", - count, i); - while (count--) { - md_buf = kzalloc(sizeof(struct atomisp_metadata_buf), - GFP_KERNEL); - if (!md_buf) - goto error; - - if (atomisp_css_allocate_stat_buffers( - asd, stream_id, NULL, NULL, md_buf)) { - kfree(md_buf); - goto error; - } - list_add_tail(&md_buf->list, &asd->metadata[i]); - } - } - } - return 0; - -error: - dev_err(isp->dev, "failed to allocate statistics buffers\n"); - - list_for_each_entry_safe(dis_buf, _dis_buf, &asd->dis_stats, list) { - atomisp_css_free_dis_buffer(dis_buf); - list_del(&dis_buf->list); - kfree(dis_buf); - } - - list_for_each_entry_safe(s3a_buf, _s3a_buf, &asd->s3a_stats, list) { - atomisp_css_free_3a_buffer(s3a_buf); - list_del(&s3a_buf->list); - kfree(s3a_buf); - } - - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i], - list) { - atomisp_css_free_metadata_buffer(md_buf); - list_del(&md_buf->list); - kfree(md_buf); - } - } - return -ENOMEM; -} - -/* - * Initiate Memory Mapping or User Pointer I/O - */ -int __atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct atomisp_css_frame_info frame_info; - struct atomisp_css_frame *frame; - struct videobuf_vmalloc_memory *vm_mem; - uint16_t source_pad = atomisp_subdev_source_pad(vdev); - uint16_t stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); - int ret = 0, i = 0; - - if (req->count == 0) { - mutex_lock(&pipe->capq.vb_lock); - if (!list_empty(&pipe->capq.stream)) - videobuf_queue_cancel(&pipe->capq); - - atomisp_videobuf_free_queue(&pipe->capq); - mutex_unlock(&pipe->capq.vb_lock); - /* clear request config id */ - memset(pipe->frame_request_config_id, 0, - VIDEO_MAX_FRAME * sizeof(unsigned int)); - memset(pipe->frame_params, 0, - VIDEO_MAX_FRAME * - sizeof(struct atomisp_css_params_with_list *)); - return 0; - } - - ret = videobuf_reqbufs(&pipe->capq, req); - if (ret) - return ret; - - atomisp_alloc_css_stat_bufs(asd, stream_id); - - /* - * for user pointer type, buffers are not really allcated here, - * buffers are setup in QBUF operation through v4l2_buffer structure - */ - if (req->memory == V4L2_MEMORY_USERPTR) - return 0; - - ret = atomisp_get_css_frame_info(asd, source_pad, &frame_info); - if (ret) - return ret; - - /* - * Allocate the real frame here for selected node using our - * memory management function - */ - for (i = 0; i < req->count; i++) { - if (atomisp_css_frame_allocate_from_info(&frame, &frame_info)) - goto error; - vm_mem = pipe->capq.bufs[i]->priv; - vm_mem->vaddr = frame; - } - - return ret; - -error: - while (i--) { - vm_mem = pipe->capq.bufs[i]->priv; - atomisp_css_frame_free(vm_mem->vaddr); - } - - if (asd->vf_frame) - atomisp_css_frame_free(asd->vf_frame); - - return -ENOMEM; -} - -int atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = __atomisp_reqbufs(file, fh, req); - rt_mutex_unlock(&isp->mutex); - - return ret; -} - -static int atomisp_reqbufs_file(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - if (req->count == 0) { - mutex_lock(&pipe->outq.vb_lock); - atomisp_videobuf_free_queue(&pipe->outq); - mutex_unlock(&pipe->outq.vb_lock); - return 0; - } - - return videobuf_reqbufs(&pipe->outq, req); -} - -/* application query the status of a buffer */ -static int atomisp_querybuf(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_querybuf(&pipe->capq, buf); -} - -static int atomisp_querybuf_file(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_querybuf(&pipe->outq, buf); -} - -/* - * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or - * filled (output) buffer in the drivers incoming queue. - */ -static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) -{ - static const int NOFLUSH_FLAGS = V4L2_BUF_FLAG_NO_CACHE_INVALIDATE | - V4L2_BUF_FLAG_NO_CACHE_CLEAN; - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct videobuf_buffer *vb; - struct videobuf_vmalloc_memory *vm_mem; - struct atomisp_css_frame_info frame_info; - struct atomisp_css_frame *handle = NULL; - u32 length; - u32 pgnr; - int ret = 0; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto error; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - dev_err(isp->dev, "%s: reject, as ISP at stopping.\n", - __func__); - ret = -EIO; - goto error; - } - - if (!buf || buf->index >= VIDEO_MAX_FRAME || - !pipe->capq.bufs[buf->index]) { - dev_err(isp->dev, "Invalid index for qbuf.\n"); - ret = -EINVAL; - goto error; - } - - /* - * For userptr type frame, we convert user space address to physic - * address and reprograme out page table properly - */ - if (buf->memory == V4L2_MEMORY_USERPTR) { - struct hrt_userbuffer_attr attributes; - vb = pipe->capq.bufs[buf->index]; - vm_mem = vb->priv; - if (!vm_mem) { - ret = -EINVAL; - goto error; - } - - length = vb->bsize; - pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - - if (vb->baddr == buf->m.userptr && vm_mem->vaddr) - goto done; - - if (atomisp_get_css_frame_info(asd, - atomisp_subdev_source_pad(vdev), &frame_info)) { - ret = -EIO; - goto error; - } - - attributes.pgnr = pgnr; -#ifdef CONFIG_ION -#ifndef ISP2401 - attributes.type = buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_ION - ? HRT_USR_ION : HRT_USR_PTR; -#else - if (buf->reserved & ATOMISP_BUFFER_TYPE_IS_ION) { - attributes.type = HRT_USR_ION; - if (asd->ion_dev_fd->val != ION_FD_UNSET) { - dev_dbg(isp->dev, "ION buffer queued, share_fd=%lddev_fd=%d.\n", - buf->m.userptr, asd->ion_dev_fd->val); - /* - * Make sure the shared fd we just got - * from user space isn't larger than - * the space we have for it. - */ - if ((buf->m.userptr & - (ATOMISP_ION_DEVICE_FD_MASK)) != 0) { - dev_err(isp->dev, - "Error: v4l2 buffer fd:0X%0lX > 0XFFFF.\n", - buf->m.userptr); - ret = -EINVAL; - goto error; - } - buf->m.userptr |= asd->ion_dev_fd->val << - ATOMISP_ION_DEVICE_FD_OFFSET; - } else { - dev_err(isp->dev, "v4l2 buffer type is ION, \ - but no dev fd set from userspace.\n"); - ret = -EINVAL; - goto error; - } - } else { - attributes.type = HRT_USR_PTR; - } -#endif -#else - attributes.type = HRT_USR_PTR; -#endif - ret = atomisp_css_frame_map(&handle, &frame_info, - (void __user *)buf->m.userptr, - 0, &attributes); - if (ret) { - dev_err(isp->dev, "Failed to map user buffer\n"); - goto error; - } - - if (vm_mem->vaddr) { - mutex_lock(&pipe->capq.vb_lock); - atomisp_css_frame_free(vm_mem->vaddr); - vm_mem->vaddr = NULL; - vb->state = VIDEOBUF_NEEDS_INIT; - mutex_unlock(&pipe->capq.vb_lock); - } - - vm_mem->vaddr = handle; - - buf->flags &= ~V4L2_BUF_FLAG_MAPPED; - buf->flags |= V4L2_BUF_FLAG_QUEUED; - buf->flags &= ~V4L2_BUF_FLAG_DONE; - } else if (buf->memory == V4L2_MEMORY_MMAP) { - buf->flags |= V4L2_BUF_FLAG_MAPPED; - buf->flags |= V4L2_BUF_FLAG_QUEUED; - buf->flags &= ~V4L2_BUF_FLAG_DONE; - } - -done: - if (!((buf->flags & NOFLUSH_FLAGS) == NOFLUSH_FLAGS)) - wbinvd(); - - if (!atomisp_is_vf_pipe(pipe) && - (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING)) { - /* this buffer will have a per-frame parameter */ - pipe->frame_request_config_id[buf->index] = buf->reserved2 & - ~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING; - dev_dbg(isp->dev, "This buffer requires per_frame setting which has isp_config_id %d\n", - pipe->frame_request_config_id[buf->index]); - } else { - pipe->frame_request_config_id[buf->index] = 0; - } - - pipe->frame_params[buf->index] = NULL; - - rt_mutex_unlock(&isp->mutex); - - ret = videobuf_qbuf(&pipe->capq, buf); - rt_mutex_lock(&isp->mutex); - if (ret) - goto error; - - /* TODO: do this better, not best way to queue to css */ - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - if (!list_empty(&pipe->buffers_waiting_for_param)) { - atomisp_handle_parameter_and_buffer(pipe); - } else { - atomisp_qbuffers_to_css(asd); - -#ifndef ISP2401 - if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd)) - atomisp_wdt_start(asd); -#else - if (!atomisp_is_wdt_running(pipe) && - atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_start(pipe); -#endif - } - } - - /* Workaround: Due to the design of HALv3, - * sometimes in ZSL or SDV mode HAL needs to - * capture multiple images within one streaming cycle. - * But the capture number cannot be determined by HAL. - * So HAL only sets the capture number to be 1 and queue multiple - * buffers. Atomisp driver needs to check this case and re-trigger - * CSS to do capture when new buffer is queued. */ - if (asd->continuous_mode->val && - atomisp_subdev_source_pad(vdev) - == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE && - pipe->capq.streaming && - !asd->enable_raw_buffer_lock->val && - asd->params.offline_parm.num_captures == 1) { -#ifndef ISP2401 - asd->pending_capture_request++; - dev_dbg(isp->dev, "Add one pending capture request.\n"); -#else - if (asd->re_trigger_capture) { - ret = atomisp_css_offline_capture_configure(asd, - asd->params.offline_parm.num_captures, - asd->params.offline_parm.skip_frames, - asd->params.offline_parm.offset); - asd->re_trigger_capture = false; - dev_dbg(isp->dev, "%s Trigger capture again ret=%d\n", - __func__, ret); - - } else { - asd->pending_capture_request++; - asd->re_trigger_capture = false; - dev_dbg(isp->dev, "Add one pending capture request.\n"); - } -#endif - } - rt_mutex_unlock(&isp->mutex); - - dev_dbg(isp->dev, "qbuf buffer %d (%s) for asd%d\n", buf->index, - vdev->name, asd->index); - - return ret; - -error: - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_qbuf_file(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto error; - } - - if (!buf || buf->index >= VIDEO_MAX_FRAME || - !pipe->outq.bufs[buf->index]) { - dev_err(isp->dev, "Invalid index for qbuf.\n"); - ret = -EINVAL; - goto error; - } - - if (buf->memory != V4L2_MEMORY_MMAP) { - dev_err(isp->dev, "Unsupported memory method\n"); - ret = -EINVAL; - goto error; - } - - if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { - dev_err(isp->dev, "Unsupported buffer type\n"); - ret = -EINVAL; - goto error; - } - rt_mutex_unlock(&isp->mutex); - - return videobuf_qbuf(&pipe->outq, buf); - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; -} - -static int __get_frame_exp_id(struct atomisp_video_pipe *pipe, - struct v4l2_buffer *buf) -{ - struct videobuf_vmalloc_memory *vm_mem; - struct atomisp_css_frame *handle; - int i; - - for (i = 0; pipe->capq.bufs[i]; i++) { - vm_mem = pipe->capq.bufs[i]->priv; - handle = vm_mem->vaddr; - if (buf->index == pipe->capq.bufs[i]->i && handle) - return handle->exp_id; - } - return -EINVAL; -} - -/* - * Applications call the VIDIOC_DQBUF ioctl to dequeue a filled (capturing) or - * displayed (output buffer)from the driver's outgoing queue - */ -static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret = 0; - - rt_mutex_lock(&isp->mutex); - - if (isp->isp_fatal_error) { - rt_mutex_unlock(&isp->mutex); - return -EIO; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - rt_mutex_unlock(&isp->mutex); - dev_err(isp->dev, "%s: reject, as ISP at stopping.\n", - __func__); - return -EIO; - } - - rt_mutex_unlock(&isp->mutex); - - ret = videobuf_dqbuf(&pipe->capq, buf, file->f_flags & O_NONBLOCK); - if (ret) { - dev_dbg(isp->dev, "<%s: %d\n", __func__, ret); - return ret; - } - rt_mutex_lock(&isp->mutex); - buf->bytesused = pipe->pix.sizeimage; - buf->reserved = asd->frame_status[buf->index]; - - /* - * Hack: - * Currently frame_status in the enum type which takes no more lower - * 8 bit. - * use bit[31:16] for exp_id as it is only in the range of 1~255 - */ - buf->reserved &= 0x0000ffff; - if (!(buf->flags & V4L2_BUF_FLAG_ERROR)) - buf->reserved |= __get_frame_exp_id(pipe, buf) << 16; - buf->reserved2 = pipe->frame_config_id[buf->index]; - rt_mutex_unlock(&isp->mutex); - - dev_dbg(isp->dev, "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n", - buf->index, vdev->name, asd->index, buf->reserved >> 16, - buf->reserved2); - return 0; -} - -enum atomisp_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd) -{ - if (ATOMISP_USE_YUVPP(asd)) - return CSS_PIPE_ID_YUVPP; - - if (asd->continuous_mode->val) { - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - return CSS_PIPE_ID_VIDEO; - else - return CSS_PIPE_ID_PREVIEW; - } - - /* - * Disable vf_pp and run CSS in video mode. This allows using ISP - * scaling but it has one frame delay due to CSS internal buffering. - */ - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) - return CSS_PIPE_ID_VIDEO; - - /* - * Disable vf_pp and run CSS in still capture mode. In this mode - * CSS does not cause extra latency with buffering, but scaling - * is not available. - */ - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) - return CSS_PIPE_ID_CAPTURE; - - switch (asd->run_mode->val) { - case ATOMISP_RUN_MODE_PREVIEW: - return CSS_PIPE_ID_PREVIEW; - case ATOMISP_RUN_MODE_VIDEO: - return CSS_PIPE_ID_VIDEO; - case ATOMISP_RUN_MODE_STILL_CAPTURE: - /* fall through */ - default: - return CSS_PIPE_ID_CAPTURE; - } -} - -static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - - if (isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - if (asd->high_speed_mode) - return 1; - else - return 2; - } - - if (asd->vfpp->val != ATOMISP_VFPP_ENABLE || - asd->copy_mode) - return 1; - - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO || - (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE && - !atomisp_is_mbuscode_raw( - asd->fmt[ - asd->capture_pad].fmt.code) && - !asd->continuous_mode->val)) - return 2; - else - return 1; -} - -int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, - bool isp_timeout) -{ - unsigned int master = -1, slave = -1, delay_slave = 0; - int i, ret; - - /* - * ISP only support 2 streams now so ignore multiple master/slave - * case to reduce the delay between 2 stream_on calls. - */ - for (i = 0; i < isp->num_of_streams; i++) { - int sensor_index = isp->asd[i].input_curr; - if (isp->inputs[sensor_index].camera_caps-> - sensor[isp->asd[i].sensor_curr].is_slave) - slave = sensor_index; - else - master = sensor_index; - } - - if (master == -1 || slave == -1) { - master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR; - slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR; - dev_warn(isp->dev, - "depth mode use default master=%s.slave=%s.\n", - isp->inputs[master].camera->name, - isp->inputs[slave].camera->name); - } - - ret = v4l2_subdev_call(isp->inputs[master].camera, core, - ioctl, ATOMISP_IOC_G_DEPTH_SYNC_COMP, - &delay_slave); - if (ret) - dev_warn(isp->dev, - "get depth sensor %s compensation delay failed.\n", - isp->inputs[master].camera->name); - - ret = v4l2_subdev_call(isp->inputs[master].camera, - video, s_stream, 1); - if (ret) { - dev_err(isp->dev, "depth mode master sensor %s stream-on failed.\n", - isp->inputs[master].camera->name); - return -EINVAL; - } - - if (delay_slave != 0) - udelay(delay_slave); - - ret = v4l2_subdev_call(isp->inputs[slave].camera, - video, s_stream, 1); - if (ret) { - dev_err(isp->dev, "depth mode slave sensor %s stream-on failed.\n", - isp->inputs[slave].camera->name); - v4l2_subdev_call(isp->inputs[master].camera, video, s_stream, 0); - - return -EINVAL; - } - - return 0; -} - -/* FIXME! */ -#ifndef ISP2401 -static void __wdt_on_master_slave_sensor(struct atomisp_device *isp, - unsigned int wdt_duration) -#else -static void __wdt_on_master_slave_sensor(struct atomisp_video_pipe *pipe, - unsigned int wdt_duration, - bool enable) -#endif -{ -#ifndef ISP2401 - if (atomisp_buffers_queued(&isp->asd[0])) - atomisp_wdt_refresh(&isp->asd[0], wdt_duration); - if (atomisp_buffers_queued(&isp->asd[1])) - atomisp_wdt_refresh(&isp->asd[1], wdt_duration); -#else - static struct atomisp_video_pipe *pipe0; - - if (enable) { - if (atomisp_buffers_queued_pipe(pipe0)) - atomisp_wdt_refresh_pipe(pipe0, wdt_duration); - if (atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_refresh_pipe(pipe, wdt_duration); - } else { - pipe0 = pipe; - } -#endif -} - -static void atomisp_pause_buffer_event(struct atomisp_device *isp) -{ - struct v4l2_event event = {0}; - int i; - - event.type = V4L2_EVENT_ATOMISP_PAUSE_BUFFER; - - for (i = 0; i < isp->num_of_streams; i++) { - int sensor_index = isp->asd[i].input_curr; - if (isp->inputs[sensor_index].camera_caps-> - sensor[isp->asd[i].sensor_curr].is_slave) { - v4l2_event_queue(isp->asd[i].subdev.devnode, &event); - break; - } - } -} - -/* Input system HW workaround */ -/* Input system address translation corrupts burst during */ -/* invalidate. SW workaround for this is to set burst length */ -/* manually to 128 in case of 13MPx snapshot and to 1 otherwise. */ -static void atomisp_dma_burst_len_cfg(struct atomisp_sub_device *asd) -{ - - struct v4l2_mbus_framefmt *sink; - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - if (sink->width * sink->height >= 4096*3072) - atomisp_store_uint32(DMA_BURST_SIZE_REG, 0x7F); - else - atomisp_store_uint32(DMA_BURST_SIZE_REG, 0x00); -} - -/* - * This ioctl start the capture during streaming I/O. - */ -static int atomisp_streamon(struct file *file, void *fh, - enum v4l2_buf_type type) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - enum atomisp_css_pipe_id css_pipe_id; - unsigned int sensor_start_stream; - unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION; - int ret = 0; - unsigned long irqflags; - - dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n", - atomisp_subdev_source_pad(vdev), asd->index); - - if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dev_dbg(isp->dev, "unsupported v4l2 buf type\n"); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto out; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - ret = -EBUSY; - goto out; - } - - if (pipe->capq.streaming) - goto out; - - /* Input system HW workaround */ - atomisp_dma_burst_len_cfg(asd); - - /* - * The number of streaming video nodes is based on which - * binary is going to be run. - */ - sensor_start_stream = atomisp_sensor_start_stream(asd); - - spin_lock_irqsave(&pipe->irq_lock, irqflags); - if (list_empty(&(pipe->capq.stream))) { - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - dev_dbg(isp->dev, "no buffer in the queue\n"); - ret = -EINVAL; - goto out; - } - spin_unlock_irqrestore(&pipe->irq_lock, irqflags); - - ret = videobuf_streamon(&pipe->capq); - if (ret) - goto out; - - /* Reset pending capture request count. */ - asd->pending_capture_request = 0; -#ifdef ISP2401 - asd->re_trigger_capture = false; -#endif - - if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) && - (!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) { - /* trigger still capture */ - if (asd->continuous_mode->val && - atomisp_subdev_source_pad(vdev) - == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { - if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) - dev_dbg(isp->dev, "SDV last video raw buffer id: %u\n", - asd->latest_preview_exp_id); - else - dev_dbg(isp->dev, "ZSL last preview raw buffer id: %u\n", - asd->latest_preview_exp_id); - - if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) { - flush_work(&asd->delayed_init_work); - rt_mutex_unlock(&isp->mutex); - if (wait_for_completion_interruptible( - &asd->init_done) != 0) - return -ERESTARTSYS; - rt_mutex_lock(&isp->mutex); - } - - /* handle per_frame_setting parameter and buffers */ - atomisp_handle_parameter_and_buffer(pipe); - - /* - * only ZSL/SDV capture request will be here, raise - * the ISP freq to the highest possible to minimize - * the S2S latency. - */ - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false); - /* - * When asd->enable_raw_buffer_lock->val is true, - * An extra IOCTL is needed to call - * atomisp_css_exp_id_capture and trigger real capture - */ - if (!asd->enable_raw_buffer_lock->val) { - ret = atomisp_css_offline_capture_configure(asd, - asd->params.offline_parm.num_captures, - asd->params.offline_parm.skip_frames, - asd->params.offline_parm.offset); - if (ret) { - ret = -EINVAL; - goto out; - } - if (asd->depth_mode->val) - atomisp_pause_buffer_event(isp); - } - } - atomisp_qbuffers_to_css(asd); - goto out; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - atomisp_qbuffers_to_css(asd); - goto start_sensor; - } - - css_pipe_id = atomisp_get_css_pipe_id(asd); - - ret = atomisp_acc_load_extensions(asd); - if (ret < 0) { - dev_err(isp->dev, "acc extension failed to load\n"); - goto out; - } - - if (asd->params.css_update_params_needed) { - atomisp_apply_css_parameters(asd, &asd->params.css_param); - if (asd->params.css_param.update_flag.dz_config) - atomisp_css_set_dz_config(asd, - &asd->params.css_param.dz_config); - atomisp_css_update_isp_params(asd); - asd->params.css_update_params_needed = false; - memset(&asd->params.css_param.update_flag, 0, - sizeof(struct atomisp_parameters)); - } - asd->params.dvs_6axis = NULL; - - ret = atomisp_css_start(asd, css_pipe_id, false); - if (ret) - goto out; - - asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED; - atomic_set(&asd->sof_count, -1); - atomic_set(&asd->sequence, -1); - atomic_set(&asd->sequence_temp, -1); - if (isp->sw_contex.file_input) - wdt_duration = ATOMISP_ISP_FILE_TIMEOUT_DURATION; - - asd->params.dis_proj_data_valid = false; - asd->latest_preview_exp_id = 0; - asd->postview_exp_id = 1; - asd->preview_exp_id = 1; - - /* handle per_frame_setting parameter and buffers */ - atomisp_handle_parameter_and_buffer(pipe); - - atomisp_qbuffers_to_css(asd); - - /* Only start sensor when the last streaming instance started */ - if (atomisp_subdev_streaming_count(asd) < sensor_start_stream) - goto out; - -start_sensor: - if (isp->flash) { - asd->params.num_flash_frames = 0; - asd->params.flash_state = ATOMISP_FLASH_IDLE; - atomisp_setup_flash(asd); - } - - if (!isp->sw_contex.file_input) { - atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF, - atomisp_css_valid_sof(isp)); - atomisp_csi2_configure(asd); - /* - * set freq to max when streaming count > 1 which indicate - * dual camera would run - */ - if (atomisp_streaming_count(isp) > 1) { - if (atomisp_freq_scaling(isp, - ATOMISP_DFS_MODE_MAX, false) < 0) - dev_dbg(isp->dev, "dfs failed!\n"); - } else { - if (atomisp_freq_scaling(isp, - ATOMISP_DFS_MODE_AUTO, false) < 0) - dev_dbg(isp->dev, "dfs failed!\n"); - } - } else { - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false) < 0) - dev_dbg(isp->dev, "dfs failed!\n"); - } - - if (asd->depth_mode->val && atomisp_streaming_count(isp) == - ATOMISP_DEPTH_SENSOR_STREAMON_COUNT) { - ret = atomisp_stream_on_master_slave_sensor(isp, false); - if (ret) { - dev_err(isp->dev, "master slave sensor stream on failed!\n"); - goto out; - } -#ifndef ISP2401 - __wdt_on_master_slave_sensor(isp, wdt_duration); -#else - __wdt_on_master_slave_sensor(pipe, wdt_duration, true); -#endif - goto start_delay_wq; - } else if (asd->depth_mode->val && (atomisp_streaming_count(isp) < - ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) { -#ifdef ISP2401 - __wdt_on_master_slave_sensor(pipe, wdt_duration, false); -#endif - goto start_delay_wq; - } - - /* Enable the CSI interface on ANN B0/K0 */ - if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << - ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) { - pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL, - isp->saved_regs.csi_control | - MRFLD_PCI_CSI_CONTROL_CSI_READY); - } - - /* stream on the sensor */ - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 1); - if (ret) { - asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; - ret = -EINVAL; - goto out; - } - -#ifndef ISP2401 - if (atomisp_buffers_queued(asd)) - atomisp_wdt_refresh(asd, wdt_duration); -#else - if (atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_refresh_pipe(pipe, wdt_duration); -#endif - -start_delay_wq: - if (asd->continuous_mode->val) { - struct v4l2_mbus_framefmt *sink; - - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - reinit_completion(&asd->init_done); - asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED; - queue_work(asd->delayed_init_workq, &asd->delayed_init_work); - atomisp_css_set_cont_prev_start_time(isp, - ATOMISP_CALC_CSS_PREV_OVERLAP(sink->height)); - } else { - asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; - } -out: - rt_mutex_unlock(&isp->mutex); - return ret; -} - -int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct atomisp_video_pipe *capture_pipe = NULL; - struct atomisp_video_pipe *vf_pipe = NULL; - struct atomisp_video_pipe *preview_pipe = NULL; - struct atomisp_video_pipe *video_pipe = NULL; - struct videobuf_buffer *vb, *_vb; - enum atomisp_css_pipe_id css_pipe_id; - int ret; - unsigned long flags; - bool first_streamoff = false; - - dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n", - atomisp_subdev_source_pad(vdev), asd->index); - - BUG_ON(!rt_mutex_is_locked(&isp->mutex)); - BUG_ON(!mutex_is_locked(&isp->streamoff_mutex)); - - if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dev_dbg(isp->dev, "unsupported v4l2 buf type\n"); - return -EINVAL; - } - - /* - * do only videobuf_streamoff for capture & vf pipes in - * case of continuous capture - */ - if ((asd->continuous_mode->val || - isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) && - atomisp_subdev_source_pad(vdev) != - ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - atomisp_subdev_source_pad(vdev) != - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { - - if (isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) { - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - } else if (atomisp_subdev_source_pad(vdev) - == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { - /* stop continuous still capture if needed */ - if (asd->params.offline_parm.num_captures == -1) - atomisp_css_offline_capture_configure(asd, - 0, 0, 0); - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, false); - } - /* - * Currently there is no way to flush buffers queued to css. - * When doing videobuf_streamoff, active buffers will be - * marked as VIDEOBUF_NEEDS_INIT. HAL will be able to use - * these buffers again, and these buffers might be queued to - * css more than once! Warn here, if HAL has not dequeued all - * buffers back before calling streamoff. - */ - if (pipe->buffers_in_css != 0) { - WARN(1, "%s: buffers of vdev %s still in CSS!\n", - __func__, pipe->vdev.name); - - /* - * Buffers remained in css maybe dequeued out in the - * next stream on, while this will causes serious - * issues as buffers already get invalid after - * previous stream off. - * - * No way to flush buffers but to reset the whole css - */ - dev_warn(isp->dev, "Reset CSS to clean up css buffers.\n"); - atomisp_css_flush(isp); - } - - return videobuf_streamoff(&pipe->capq); - } - - if (!pipe->capq.streaming) - return 0; - - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; - first_streamoff = true; - } - spin_unlock_irqrestore(&isp->lock, flags); - - if (first_streamoff) { - /* if other streams are running, should not disable watch dog */ - rt_mutex_unlock(&isp->mutex); - atomisp_wdt_stop(asd, true); - - /* - * must stop sending pixels into GP_FIFO before stop - * the pipeline. - */ - if (isp->sw_contex.file_input) - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - - rt_mutex_lock(&isp->mutex); - atomisp_acc_unload_extensions(asd); - } - - spin_lock_irqsave(&isp->lock, flags); - if (atomisp_subdev_streaming_count(asd) == 1) - asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; - spin_unlock_irqrestore(&isp->lock, flags); - - if (!first_streamoff) { - ret = videobuf_streamoff(&pipe->capq); - if (ret) - return ret; - goto stopsensor; - } - - atomisp_clear_css_buffer_counters(asd); - - if (!isp->sw_contex.file_input) - atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF, - false); - - if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) { - cancel_work_sync(&asd->delayed_init_work); - asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; - } - if (first_streamoff) { - css_pipe_id = atomisp_get_css_pipe_id(asd); - ret = atomisp_css_stop(asd, css_pipe_id, false); - } - /* cancel work queue*/ - if (asd->video_out_capture.users) { - capture_pipe = &asd->video_out_capture; - wake_up_interruptible(&capture_pipe->capq.wait); - } - if (asd->video_out_vf.users) { - vf_pipe = &asd->video_out_vf; - wake_up_interruptible(&vf_pipe->capq.wait); - } - if (asd->video_out_preview.users) { - preview_pipe = &asd->video_out_preview; - wake_up_interruptible(&preview_pipe->capq.wait); - } - if (asd->video_out_video_capture.users) { - video_pipe = &asd->video_out_video_capture; - wake_up_interruptible(&video_pipe->capq.wait); - } - ret = videobuf_streamoff(&pipe->capq); - if (ret) - return ret; - - /* cleanup css here */ - /* no need for this, as ISP will be reset anyway */ - /*atomisp_flush_bufs_in_css(isp);*/ - - spin_lock_irqsave(&pipe->irq_lock, flags); - list_for_each_entry_safe(vb, _vb, &pipe->activeq, queue) { - vb->state = VIDEOBUF_PREPARED; - list_del(&vb->queue); - } - list_for_each_entry_safe(vb, _vb, &pipe->buffers_waiting_for_param, queue) { - vb->state = VIDEOBUF_PREPARED; - list_del(&vb->queue); - pipe->frame_request_config_id[vb->i] = 0; - } - spin_unlock_irqrestore(&pipe->irq_lock, flags); - - atomisp_subdev_cleanup_pending_events(asd); -stopsensor: - if (atomisp_subdev_streaming_count(asd) + 1 - != atomisp_sensor_start_stream(asd)) - return 0; - - if (!isp->sw_contex.file_input) - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - - if (isp->flash) { - asd->params.num_flash_frames = 0; - asd->params.flash_state = ATOMISP_FLASH_IDLE; - } - - /* if other streams are running, isp should not be powered off */ - if (atomisp_streaming_count(isp)) { - atomisp_css_flush(isp); - return 0; - } - - /* Disable the CSI interface on ANN B0/K0 */ - if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << - ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) { - pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL, - isp->saved_regs.csi_control & - ~MRFLD_PCI_CSI_CONTROL_CSI_READY); - } - - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false)) - dev_warn(isp->dev, "DFS failed.\n"); - /* - * ISP work around, need to reset isp - * Is it correct time to reset ISP when first node does streamoff? - */ - if (isp->sw_contex.power_state == ATOM_ISP_POWER_UP) { - unsigned int i; - bool recreate_streams[MAX_STREAM_NUM] = {0}; - if (isp->isp_timeout) - dev_err(isp->dev, "%s: Resetting with WA activated", - __func__); - /* - * It is possible that the other asd stream is in the stage - * that v4l2_setfmt is just get called on it, which will - * create css stream on that stream. But at this point, there - * is no way to destroy the css stream created on that stream. - * - * So force stream destroy here. - */ - for (i = 0; i < isp->num_of_streams; i++) { - if (isp->asd[i].stream_prepared) { - atomisp_destroy_pipes_stream_force(&isp-> - asd[i]); - recreate_streams[i] = true; - } - } - - /* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */ - pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control | - MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); - dev_err(isp->dev, "atomisp_reset"); - atomisp_reset(isp); - for (i = 0; i < isp->num_of_streams; i++) { - if (recreate_streams[i]) - atomisp_create_pipes_stream(&isp->asd[i]); - } - isp->isp_timeout = false; - } - return ret; -} - -static int atomisp_streamoff(struct file *file, void *fh, - enum v4l2_buf_type type) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int rval; - - mutex_lock(&isp->streamoff_mutex); - rt_mutex_lock(&isp->mutex); - rval = __atomisp_streamoff(file, fh, type); - rt_mutex_unlock(&isp->mutex); - mutex_unlock(&isp->streamoff_mutex); - - return rval; -} - -/* - * To get the current value of a control. - * applications initialize the id field of a struct v4l2_control and - * call this ioctl with a pointer to this structure - */ -static int atomisp_g_ctrl(struct file *file, void *fh, - struct v4l2_control *control) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - int i, ret = -EINVAL; - - for (i = 0; i < ctrls_num; i++) { - if (ci_v4l2_controls[i].id == control->id) { - ret = 0; - break; - } - } - - if (ret) - return ret; - - rt_mutex_lock(&isp->mutex); - - switch (control->id) { - case V4L2_CID_IRIS_ABSOLUTE: - case V4L2_CID_EXPOSURE_ABSOLUTE: - case V4L2_CID_FNUMBER_ABSOLUTE: - case V4L2_CID_2A_STATUS: - case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: - case V4L2_CID_EXPOSURE: - case V4L2_CID_EXPOSURE_AUTO: - case V4L2_CID_SCENE_MODE: - case V4L2_CID_ISO_SENSITIVITY: - case V4L2_CID_ISO_SENSITIVITY_AUTO: - case V4L2_CID_CONTRAST: - case V4L2_CID_SATURATION: - case V4L2_CID_SHARPNESS: - case V4L2_CID_3A_LOCK: - case V4L2_CID_EXPOSURE_ZONE_NUM: - case V4L2_CID_TEST_PATTERN: - case V4L2_CID_TEST_PATTERN_COLOR_R: - case V4L2_CID_TEST_PATTERN_COLOR_GR: - case V4L2_CID_TEST_PATTERN_COLOR_GB: - case V4L2_CID_TEST_PATTERN_COLOR_B: - rt_mutex_unlock(&isp->mutex); - return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera-> - ctrl_handler, control); - case V4L2_CID_COLORFX: - ret = atomisp_color_effect(asd, 0, &control->value); - break; - case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION: - ret = atomisp_bad_pixel(asd, 0, &control->value); - break; - case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC: - ret = atomisp_gdc_cac(asd, 0, &control->value); - break; - case V4L2_CID_ATOMISP_VIDEO_STABLIZATION: - ret = atomisp_video_stable(asd, 0, &control->value); - break; - case V4L2_CID_ATOMISP_FIXED_PATTERN_NR: - ret = atomisp_fixed_pattern(asd, 0, &control->value); - break; - case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION: - ret = atomisp_false_color(asd, 0, &control->value); - break; - case V4L2_CID_ATOMISP_LOW_LIGHT: - ret = atomisp_low_light(asd, 0, &control->value); - break; - default: - ret = -EINVAL; - break; - } - - rt_mutex_unlock(&isp->mutex); - return ret; -} - -/* - * To change the value of a control. - * applications initialize the id and value fields of a struct v4l2_control - * and call this ioctl. - */ -static int atomisp_s_ctrl(struct file *file, void *fh, - struct v4l2_control *control) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - int i, ret = -EINVAL; - - for (i = 0; i < ctrls_num; i++) { - if (ci_v4l2_controls[i].id == control->id) { - ret = 0; - break; - } - } - - if (ret) - return ret; - - rt_mutex_lock(&isp->mutex); - switch (control->id) { - case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: - case V4L2_CID_EXPOSURE: - case V4L2_CID_EXPOSURE_AUTO: - case V4L2_CID_EXPOSURE_AUTO_PRIORITY: - case V4L2_CID_SCENE_MODE: - case V4L2_CID_ISO_SENSITIVITY: - case V4L2_CID_ISO_SENSITIVITY_AUTO: - case V4L2_CID_POWER_LINE_FREQUENCY: - case V4L2_CID_EXPOSURE_METERING: - case V4L2_CID_CONTRAST: - case V4L2_CID_SATURATION: - case V4L2_CID_SHARPNESS: - case V4L2_CID_3A_LOCK: - case V4L2_CID_COLORFX_CBCR: - case V4L2_CID_TEST_PATTERN: - case V4L2_CID_TEST_PATTERN_COLOR_R: - case V4L2_CID_TEST_PATTERN_COLOR_GR: - case V4L2_CID_TEST_PATTERN_COLOR_GB: - case V4L2_CID_TEST_PATTERN_COLOR_B: - rt_mutex_unlock(&isp->mutex); - return v4l2_s_ctrl(NULL, - isp->inputs[asd->input_curr].camera-> - ctrl_handler, control); - case V4L2_CID_COLORFX: - ret = atomisp_color_effect(asd, 1, &control->value); - break; - case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION: - ret = atomisp_bad_pixel(asd, 1, &control->value); - break; - case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC: - ret = atomisp_gdc_cac(asd, 1, &control->value); - break; - case V4L2_CID_ATOMISP_VIDEO_STABLIZATION: - ret = atomisp_video_stable(asd, 1, &control->value); - break; - case V4L2_CID_ATOMISP_FIXED_PATTERN_NR: - ret = atomisp_fixed_pattern(asd, 1, &control->value); - break; - case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION: - ret = atomisp_false_color(asd, 1, &control->value); - break; - case V4L2_CID_REQUEST_FLASH: - ret = atomisp_flash_enable(asd, control->value); - break; - case V4L2_CID_ATOMISP_LOW_LIGHT: - ret = atomisp_low_light(asd, 1, &control->value); - break; - default: - ret = -EINVAL; - break; - } - rt_mutex_unlock(&isp->mutex); - return ret; -} -/* - * To query the attributes of a control. - * applications set the id field of a struct v4l2_queryctrl and call the - * this ioctl with a pointer to this structure. The driver fills - * the rest of the structure. - */ -static int atomisp_queryctl(struct file *file, void *fh, - struct v4l2_queryctrl *qc) -{ - int i, ret = -EINVAL; - struct video_device *vdev = video_devdata(file); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - - switch (qc->id) { - case V4L2_CID_FOCUS_ABSOLUTE: - case V4L2_CID_FOCUS_RELATIVE: - case V4L2_CID_FOCUS_STATUS: -#ifndef ISP2401 - return v4l2_queryctrl(isp->inputs[asd->input_curr].camera-> - ctrl_handler, qc); -#else - if (isp->motor) - return v4l2_queryctrl(isp->motor->ctrl_handler, qc); - else - return v4l2_queryctrl(isp->inputs[asd->input_curr]. - camera->ctrl_handler, qc); -#endif - } - - if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL) - return ret; - - for (i = 0; i < ctrls_num; i++) { - if (ci_v4l2_controls[i].id == qc->id) { - memcpy(qc, &ci_v4l2_controls[i], - sizeof(struct v4l2_queryctrl)); - qc->reserved[0] = 0; - ret = 0; - break; - } - } - if (ret != 0) - qc->flags = V4L2_CTRL_FLAG_DISABLED; - - return ret; -} - -static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, - struct v4l2_ext_controls *c) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - struct v4l2_control ctrl; - int i; - int ret = 0; - - for (i = 0; i < c->count; i++) { - ctrl.id = c->controls[i].id; - ctrl.value = c->controls[i].value; - switch (ctrl.id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - case V4L2_CID_EXPOSURE_AUTO: - case V4L2_CID_IRIS_ABSOLUTE: - case V4L2_CID_FNUMBER_ABSOLUTE: - case V4L2_CID_BIN_FACTOR_HORZ: - case V4L2_CID_BIN_FACTOR_VERT: - case V4L2_CID_3A_LOCK: - case V4L2_CID_TEST_PATTERN: - case V4L2_CID_TEST_PATTERN_COLOR_R: - case V4L2_CID_TEST_PATTERN_COLOR_GR: - case V4L2_CID_TEST_PATTERN_COLOR_GB: - case V4L2_CID_TEST_PATTERN_COLOR_B: - /* - * Exposure related control will be handled by sensor - * driver - */ - ret = - v4l2_g_ctrl(isp->inputs[asd->input_curr].camera-> - ctrl_handler, &ctrl); - break; - case V4L2_CID_FOCUS_ABSOLUTE: - case V4L2_CID_FOCUS_RELATIVE: - case V4L2_CID_FOCUS_STATUS: - case V4L2_CID_FOCUS_AUTO: -#ifndef ISP2401 - if (isp->inputs[asd->input_curr].motor) -#else - if (isp->motor) -#endif - ret = -#ifndef ISP2401 - v4l2_g_ctrl(isp->inputs[asd->input_curr]. - motor->ctrl_handler, &ctrl); -#else - v4l2_g_ctrl(isp->motor->ctrl_handler, - &ctrl); -#endif - else - ret = - v4l2_g_ctrl(isp->inputs[asd->input_curr]. - camera->ctrl_handler, &ctrl); - break; - case V4L2_CID_FLASH_STATUS: - case V4L2_CID_FLASH_INTENSITY: - case V4L2_CID_FLASH_TORCH_INTENSITY: - case V4L2_CID_FLASH_INDICATOR_INTENSITY: - case V4L2_CID_FLASH_TIMEOUT: - case V4L2_CID_FLASH_STROBE: - case V4L2_CID_FLASH_MODE: - case V4L2_CID_FLASH_STATUS_REGISTER: - if (isp->flash) - ret = - v4l2_g_ctrl(isp->flash->ctrl_handler, - &ctrl); - break; - case V4L2_CID_ZOOM_ABSOLUTE: - rt_mutex_lock(&isp->mutex); - ret = atomisp_digital_zoom(asd, 0, &ctrl.value); - rt_mutex_unlock(&isp->mutex); - break; - case V4L2_CID_G_SKIP_FRAMES: - ret = v4l2_subdev_call( - isp->inputs[asd->input_curr].camera, - sensor, g_skip_frames, (u32 *)&ctrl.value); - break; - default: - ret = -EINVAL; - } - - if (ret) { - c->error_idx = i; - break; - } - c->controls[i].value = ctrl.value; - } - return ret; -} - -/* This ioctl allows the application to get multiple controls by class */ -static int atomisp_g_ext_ctrls(struct file *file, void *fh, - struct v4l2_ext_controls *c) -{ - struct v4l2_control ctrl; - int i, ret = 0; - - /* input_lock is not need for the Camera releated IOCTLs - * The input_lock downgrade the FPS of 3A*/ - ret = atomisp_camera_g_ext_ctrls(file, fh, c); - if (ret != -EINVAL) - return ret; - - for (i = 0; i < c->count; i++) { - ctrl.id = c->controls[i].id; - ctrl.value = c->controls[i].value; - ret = atomisp_g_ctrl(file, fh, &ctrl); - c->controls[i].value = ctrl.value; - if (ret) { - c->error_idx = i; - break; - } - } - return ret; -} - -static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, - struct v4l2_ext_controls *c) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - struct v4l2_control ctrl; - int i; - int ret = 0; - - for (i = 0; i < c->count; i++) { - struct v4l2_ctrl *ctr; - - ctrl.id = c->controls[i].id; - ctrl.value = c->controls[i].value; - switch (ctrl.id) { - case V4L2_CID_EXPOSURE_ABSOLUTE: - case V4L2_CID_EXPOSURE_AUTO: - case V4L2_CID_EXPOSURE_METERING: - case V4L2_CID_IRIS_ABSOLUTE: - case V4L2_CID_FNUMBER_ABSOLUTE: - case V4L2_CID_VCM_TIMEING: - case V4L2_CID_VCM_SLEW: - case V4L2_CID_3A_LOCK: - case V4L2_CID_TEST_PATTERN: - case V4L2_CID_TEST_PATTERN_COLOR_R: - case V4L2_CID_TEST_PATTERN_COLOR_GR: - case V4L2_CID_TEST_PATTERN_COLOR_GB: - case V4L2_CID_TEST_PATTERN_COLOR_B: - ret = v4l2_s_ctrl(NULL, - isp->inputs[asd->input_curr].camera-> - ctrl_handler, &ctrl); - break; - case V4L2_CID_FOCUS_ABSOLUTE: - case V4L2_CID_FOCUS_RELATIVE: - case V4L2_CID_FOCUS_STATUS: - case V4L2_CID_FOCUS_AUTO: -#ifndef ISP2401 - if (isp->inputs[asd->input_curr].motor) -#else - if (isp->motor) -#endif - ret = v4l2_s_ctrl(NULL, -#ifndef ISP2401 - isp->inputs[asd->input_curr]. - motor->ctrl_handler, &ctrl); -#else - isp->motor->ctrl_handler, - &ctrl); -#endif - else - ret = v4l2_s_ctrl(NULL, - isp->inputs[asd->input_curr]. - camera->ctrl_handler, &ctrl); - break; - case V4L2_CID_FLASH_STATUS: - case V4L2_CID_FLASH_INTENSITY: - case V4L2_CID_FLASH_TORCH_INTENSITY: - case V4L2_CID_FLASH_INDICATOR_INTENSITY: - case V4L2_CID_FLASH_TIMEOUT: - case V4L2_CID_FLASH_STROBE: - case V4L2_CID_FLASH_MODE: - case V4L2_CID_FLASH_STATUS_REGISTER: - rt_mutex_lock(&isp->mutex); - if (isp->flash) { - ret = - v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, - &ctrl); - /* When flash mode is changed we need to reset - * flash state */ - if (ctrl.id == V4L2_CID_FLASH_MODE) { - asd->params.flash_state = - ATOMISP_FLASH_IDLE; - asd->params.num_flash_frames = 0; - } - } - rt_mutex_unlock(&isp->mutex); - break; - case V4L2_CID_ZOOM_ABSOLUTE: - rt_mutex_lock(&isp->mutex); - ret = atomisp_digital_zoom(asd, 1, &ctrl.value); - rt_mutex_unlock(&isp->mutex); - break; - default: - ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id); - if (ctr) - ret = v4l2_ctrl_s_ctrl(ctr, ctrl.value); - else - ret = -EINVAL; - } - - if (ret) { - c->error_idx = i; - break; - } - c->controls[i].value = ctrl.value; - } - return ret; -} - -/* This ioctl allows the application to set multiple controls by class */ -static int atomisp_s_ext_ctrls(struct file *file, void *fh, - struct v4l2_ext_controls *c) -{ - struct v4l2_control ctrl; - int i, ret = 0; - - /* input_lock is not need for the Camera releated IOCTLs - * The input_lock downgrade the FPS of 3A*/ - ret = atomisp_camera_s_ext_ctrls(file, fh, c); - if (ret != -EINVAL) - return ret; - - for (i = 0; i < c->count; i++) { - ctrl.id = c->controls[i].id; - ctrl.value = c->controls[i].value; - ret = atomisp_s_ctrl(file, fh, &ctrl); - c->controls[i].value = ctrl.value; - if (ret) { - c->error_idx = i; - break; - } - } - return ret; -} - -/* - * vidioc_g/s_param are used to switch isp running mode - */ -static int atomisp_g_parm(struct file *file, void *fh, - struct v4l2_streamparm *parm) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct atomisp_device *isp = video_get_drvdata(vdev); - - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dev_err(isp->dev, "unsupport v4l2 buf type\n"); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - parm->parm.capture.capturemode = asd->run_mode->val; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - -static int atomisp_s_parm(struct file *file, void *fh, - struct v4l2_streamparm *parm) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - int mode; - int rval; - int fps; - - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dev_err(isp->dev, "unsupport v4l2 buf type\n"); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - - asd->high_speed_mode = false; - switch (parm->parm.capture.capturemode) { - case CI_MODE_NONE: { - struct v4l2_subdev_frame_interval fi = {0}; - - fi.interval = parm->parm.capture.timeperframe; - - rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_frame_interval, &fi); - if (!rval) - parm->parm.capture.timeperframe = fi.interval; - - if (fi.interval.numerator != 0) { - fps = fi.interval.denominator / fi.interval.numerator; - if (fps > 30) - asd->high_speed_mode = true; - } - - goto out; - } - case CI_MODE_VIDEO: - mode = ATOMISP_RUN_MODE_VIDEO; - break; - case CI_MODE_STILL_CAPTURE: - mode = ATOMISP_RUN_MODE_STILL_CAPTURE; - break; - case CI_MODE_CONTINUOUS: - mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE; - break; - case CI_MODE_PREVIEW: - mode = ATOMISP_RUN_MODE_PREVIEW; - break; - default: - rval = -EINVAL; - goto out; - } - - rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode); - -out: - rt_mutex_unlock(&isp->mutex); - - return rval == -ENOIOCTLCMD ? 0 : rval; -} - -static int atomisp_s_parm_file(struct file *file, void *fh, - struct v4l2_streamparm *parm) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - - if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { - dev_err(isp->dev, "unsupport v4l2 buf type for output\n"); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - isp->sw_contex.file_input = true; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - -static long atomisp_vidioc_default(struct file *file, void *fh, - bool valid_prio, unsigned int cmd, void *arg) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd; - bool acc_node; - int err; - - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) - asd = atomisp_to_acc_pipe(vdev)->asd; - else - asd = atomisp_to_video_pipe(vdev)->asd; - - switch (cmd) { - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: - case ATOMISP_IOC_S_SENSOR_EE_CONFIG: -#ifdef ISP2401 - case ATOMISP_IOC_G_UPDATE_EXPOSURE: -#endif - /* we do not need take isp->mutex for these IOCTLs */ - break; - default: - rt_mutex_lock(&isp->mutex); - break; - } - switch (cmd) { -#ifdef ISP2401 - case ATOMISP_IOC_S_SENSOR_RUNMODE: - err = atomisp_set_sensor_runmode(asd, arg); - break; - -#endif - case ATOMISP_IOC_G_XNR: - err = atomisp_xnr(asd, 0, arg); - break; - - case ATOMISP_IOC_S_XNR: - err = atomisp_xnr(asd, 1, arg); - break; - - case ATOMISP_IOC_G_NR: - err = atomisp_nr(asd, 0, arg); - break; - - case ATOMISP_IOC_S_NR: - err = atomisp_nr(asd, 1, arg); - break; - - case ATOMISP_IOC_G_TNR: - err = atomisp_tnr(asd, 0, arg); - break; - - case ATOMISP_IOC_S_TNR: - err = atomisp_tnr(asd, 1, arg); - break; - - case ATOMISP_IOC_G_BLACK_LEVEL_COMP: - err = atomisp_black_level(asd, 0, arg); - break; - - case ATOMISP_IOC_S_BLACK_LEVEL_COMP: - err = atomisp_black_level(asd, 1, arg); - break; - - case ATOMISP_IOC_G_EE: - err = atomisp_ee(asd, 0, arg); - break; - - case ATOMISP_IOC_S_EE: - err = atomisp_ee(asd, 1, arg); - break; - - case ATOMISP_IOC_G_DIS_STAT: - err = atomisp_get_dis_stat(asd, arg); - break; - - case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS: - err = atomisp_get_dvs2_bq_resolutions(asd, arg); - break; - - case ATOMISP_IOC_S_DIS_COEFS: - err = atomisp_css_cp_dvs2_coefs(asd, arg, - &asd->params.css_param, true); - if (!err && arg) - asd->params.css_update_params_needed = true; - break; - - case ATOMISP_IOC_S_DIS_VECTOR: - err = atomisp_cp_dvs_6axis_config(asd, arg, - &asd->params.css_param, true); - if (!err && arg) - asd->params.css_update_params_needed = true; - break; - - case ATOMISP_IOC_G_ISP_PARM: - err = atomisp_param(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_PARM: - err = atomisp_param(asd, 1, arg); - break; - - case ATOMISP_IOC_G_3A_STAT: - err = atomisp_3a_stat(asd, 0, arg); - break; - - case ATOMISP_IOC_G_ISP_GAMMA: - err = atomisp_gamma(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_GAMMA: - err = atomisp_gamma(asd, 1, arg); - break; - - case ATOMISP_IOC_G_ISP_GDC_TAB: - err = atomisp_gdc_cac_table(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_GDC_TAB: - err = atomisp_gdc_cac_table(asd, 1, arg); - break; - - case ATOMISP_IOC_G_ISP_MACC: - err = atomisp_macc_table(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_MACC: - err = atomisp_macc_table(asd, 1, arg); - break; - - case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION: - err = atomisp_bad_pixel_param(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION: - err = atomisp_bad_pixel_param(asd, 1, arg); - break; - - case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION: - err = atomisp_false_color_param(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION: - err = atomisp_false_color_param(asd, 1, arg); - break; - - case ATOMISP_IOC_G_ISP_CTC: - err = atomisp_ctc(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_CTC: - err = atomisp_ctc(asd, 1, arg); - break; - - case ATOMISP_IOC_G_ISP_WHITE_BALANCE: - err = atomisp_white_balance_param(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_WHITE_BALANCE: - err = atomisp_white_balance_param(asd, 1, arg); - break; - - case ATOMISP_IOC_G_3A_CONFIG: - err = atomisp_3a_config_param(asd, 0, arg); - break; - - case ATOMISP_IOC_S_3A_CONFIG: - err = atomisp_3a_config_param(asd, 1, arg); - break; - - case ATOMISP_IOC_S_ISP_FPN_TABLE: - err = atomisp_fixed_pattern_table(asd, arg); - break; - - case ATOMISP_IOC_ISP_MAKERNOTE: - err = atomisp_exif_makernote(asd, arg); - break; - - case ATOMISP_IOC_G_SENSOR_MODE_DATA: - err = atomisp_get_sensor_mode_data(asd, arg); - break; - - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: -#ifndef ISP2401 - if (isp->inputs[asd->input_curr].motor) -#else - if (isp->motor) -#endif -#ifndef ISP2401 - err = v4l2_subdev_call( - isp->inputs[asd->input_curr].motor, - core, ioctl, cmd, arg); -#else - err = v4l2_subdev_call( - isp->motor, - core, ioctl, cmd, arg); -#endif - else - err = v4l2_subdev_call( - isp->inputs[asd->input_curr].camera, - core, ioctl, cmd, arg); - break; - - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: -#ifdef ISP2401 - case ATOMISP_IOC_G_UPDATE_EXPOSURE: -#endif - err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - core, ioctl, cmd, arg); - break; - - case ATOMISP_IOC_ACC_LOAD: - err = atomisp_acc_load(asd, arg); - break; - - case ATOMISP_IOC_ACC_LOAD_TO_PIPE: - err = atomisp_acc_load_to_pipe(asd, arg); - break; - - case ATOMISP_IOC_ACC_UNLOAD: - err = atomisp_acc_unload(asd, arg); - break; - - case ATOMISP_IOC_ACC_START: - err = atomisp_acc_start(asd, arg); - break; - - case ATOMISP_IOC_ACC_WAIT: - err = atomisp_acc_wait(asd, arg); - break; - - case ATOMISP_IOC_ACC_MAP: - err = atomisp_acc_map(asd, arg); - break; - - case ATOMISP_IOC_ACC_UNMAP: - err = atomisp_acc_unmap(asd, arg); - break; - - case ATOMISP_IOC_ACC_S_MAPPED_ARG: - err = atomisp_acc_s_mapped_arg(asd, arg); - break; - - case ATOMISP_IOC_S_ISP_SHD_TAB: - err = atomisp_set_shading_table(asd, arg); - break; - - case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION: - err = atomisp_gamma_correction(asd, 0, arg); - break; - - case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION: - err = atomisp_gamma_correction(asd, 1, arg); - break; - - case ATOMISP_IOC_S_PARAMETERS: - err = atomisp_set_parameters(vdev, arg); - break; - - case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG: - err = atomisp_offline_capture_configure(asd, arg); - break; - case ATOMISP_IOC_G_METADATA: - err = atomisp_get_metadata(asd, 0, arg); - break; - case ATOMISP_IOC_G_METADATA_BY_TYPE: - err = atomisp_get_metadata_by_type(asd, 0, arg); - break; - case ATOMISP_IOC_EXT_ISP_CTRL: - err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - core, ioctl, cmd, arg); - break; - case ATOMISP_IOC_EXP_ID_UNLOCK: - err = atomisp_exp_id_unlock(asd, arg); - break; - case ATOMISP_IOC_EXP_ID_CAPTURE: - err = atomisp_exp_id_capture(asd, arg); - break; - case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE: - err = atomisp_enable_dz_capt_pipe(asd, arg); - break; - case ATOMISP_IOC_G_FORMATS_CONFIG: - err = atomisp_formats(asd, 0, arg); - break; - - case ATOMISP_IOC_S_FORMATS_CONFIG: - err = atomisp_formats(asd, 1, arg); - break; - case ATOMISP_IOC_S_EXPOSURE_WINDOW: - err = atomisp_s_ae_window(asd, arg); - break; - case ATOMISP_IOC_S_ACC_STATE: - err = atomisp_acc_set_state(asd, arg); - break; - case ATOMISP_IOC_G_ACC_STATE: - err = atomisp_acc_get_state(asd, arg); - break; - case ATOMISP_IOC_INJECT_A_FAKE_EVENT: - err = atomisp_inject_a_fake_event(asd, arg); - break; - case ATOMISP_IOC_G_INVALID_FRAME_NUM: - err = atomisp_get_invalid_frame_num(vdev, arg); - break; - case ATOMISP_IOC_S_ARRAY_RESOLUTION: - err = atomisp_set_array_res(asd, arg); - break; - default: - err = -EINVAL; - break; - } - - switch (cmd) { - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: -#ifdef ISP2401 - case ATOMISP_IOC_G_UPDATE_EXPOSURE: -#endif - break; - default: - rt_mutex_unlock(&isp->mutex); - break; - } - return err; -} - -const struct v4l2_ioctl_ops atomisp_ioctl_ops = { - .vidioc_querycap = atomisp_querycap, - .vidioc_enum_input = atomisp_enum_input, - .vidioc_g_input = atomisp_g_input, - .vidioc_s_input = atomisp_s_input, - .vidioc_queryctrl = atomisp_queryctl, - .vidioc_s_ctrl = atomisp_s_ctrl, - .vidioc_g_ctrl = atomisp_g_ctrl, - .vidioc_s_ext_ctrls = atomisp_s_ext_ctrls, - .vidioc_g_ext_ctrls = atomisp_g_ext_ctrls, - .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap, - .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap, - .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap, - .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap, - .vidioc_reqbufs = atomisp_reqbufs, - .vidioc_querybuf = atomisp_querybuf, - .vidioc_qbuf = atomisp_qbuf, - .vidioc_dqbuf = atomisp_dqbuf, - .vidioc_streamon = atomisp_streamon, - .vidioc_streamoff = atomisp_streamoff, - .vidioc_default = atomisp_vidioc_default, - .vidioc_s_parm = atomisp_s_parm, - .vidioc_g_parm = atomisp_g_parm, -}; - -const struct v4l2_ioctl_ops atomisp_file_ioctl_ops = { - .vidioc_querycap = atomisp_querycap, - .vidioc_g_fmt_vid_out = atomisp_g_fmt_file, - .vidioc_s_fmt_vid_out = atomisp_s_fmt_file, - .vidioc_s_parm = atomisp_s_parm_file, - .vidioc_reqbufs = atomisp_reqbufs_file, - .vidioc_querybuf = atomisp_querybuf_file, - .vidioc_qbuf = atomisp_qbuf_file, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h deleted file mode 100644 index 0d2785b9ef99..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_IOCTL_H__ -#define __ATOMISP_IOCTL_H__ - -#include "ia_css.h" - -struct atomisp_device; -struct atomisp_video_pipe; - -extern const struct atomisp_format_bridge atomisp_output_fmts[]; - -const struct atomisp_format_bridge *atomisp_get_format_bridge( - unsigned int pixelformat); -#ifndef ISP2401 -const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus( - u32 mbus_code); -#else -const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32 - mbus_code); -#endif - -int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd, - uint16_t stream_id); - -int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); -int __atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req); - -int atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req); - -enum atomisp_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device - *asd); - -void atomisp_videobuf_free_buf(struct videobuf_buffer *vb); - -extern const struct v4l2_file_operations atomisp_file_fops; - -extern const struct v4l2_ioctl_ops atomisp_ioctl_ops; - -extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops; - -unsigned int atomisp_streaming_count(struct atomisp_device *isp); - -unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp); -/* compat_ioctl for 32bit userland app and 64bit kernel */ -long atomisp_compat_ioctl32(struct file *file, - unsigned int cmd, unsigned long arg); - -int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, bool isp_timeout); -#endif /* __ATOMISP_IOCTL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c deleted file mode 100644 index 49a9973b4289..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c +++ /dev/null @@ -1,1422 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_compat.h" -#include "atomisp_internal.h" - -const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[] = { - { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_8 }, - { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_8 }, - { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_8 }, - { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_8 }, - { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_10 }, - { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_10 }, - { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_10 }, - { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_10 }, - { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_12 }, - { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_12 }, - { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_12 }, - { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_12 }, - { MEDIA_BUS_FMT_UYVY8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0, ATOMISP_INPUT_FORMAT_YUV422_8 }, - { MEDIA_BUS_FMT_YUYV8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0, ATOMISP_INPUT_FORMAT_YUV422_8 }, - { MEDIA_BUS_FMT_JPEG_1X8, 8, 8, CSS_FRAME_FORMAT_BINARY_8, 0, ATOMISP_INPUT_FORMAT_BINARY_8 }, - { V4L2_MBUS_FMT_CUSTOM_NV12, 12, 12, CSS_FRAME_FORMAT_NV12, 0, CSS_FRAME_FORMAT_NV12 }, - { V4L2_MBUS_FMT_CUSTOM_NV21, 12, 12, CSS_FRAME_FORMAT_NV21, 0, CSS_FRAME_FORMAT_NV21 }, - { V4L2_MBUS_FMT_CUSTOM_YUV420, 12, 12, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, 0, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY }, -#if 0 - { V4L2_MBUS_FMT_CUSTOM_M10MO_RAW, 8, 8, CSS_FRAME_FORMAT_BINARY_8, 0, ATOMISP_INPUT_FORMAT_BINARY_8 }, -#endif - /* no valid V4L2 MBUS code for metadata format, so leave it 0. */ - { 0, 0, 0, ATOMISP_INPUT_FORMAT_EMBEDDED, 0, ATOMISP_INPUT_FORMAT_EMBEDDED }, - {} -}; - -static const struct { - u32 code; - u32 compressed; -} compressed_codes[] = { - { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 }, - { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 }, - { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 }, - { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 }, -}; - -u32 atomisp_subdev_uncompressed_code(u32 code) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(compressed_codes); i++) - if (code == compressed_codes[i].compressed) - return compressed_codes[i].code; - - return code; -} - -bool atomisp_subdev_is_compressed(u32 code) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++) - if (code == atomisp_in_fmt_conv[i].code) - return atomisp_in_fmt_conv[i].bpp != - atomisp_in_fmt_conv[i].depth; - - return false; -} - -const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++) - if (code == atomisp_in_fmt_conv[i].code) - return atomisp_in_fmt_conv + i; - - return NULL; -} - -const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt( - enum atomisp_input_format atomisp_in_fmt) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++) - if (atomisp_in_fmt_conv[i].atomisp_in_fmt == atomisp_in_fmt) - return atomisp_in_fmt_conv + i; - - return NULL; -} - -bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd, - unsigned int source_pad) -{ - struct v4l2_mbus_framefmt *sink, *src; - - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - src = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, source_pad); - - return atomisp_is_mbuscode_raw(sink->code) - && !atomisp_is_mbuscode_raw(src->code); -} - -uint16_t atomisp_subdev_source_pad(struct video_device * vdev) -{ - struct media_link *link; - uint16_t ret = 0; - list_for_each_entry(link, &vdev->entity.links, list) { - if (link->source) { - ret = link->source->index; - break; - } - } - return ret; -} - -/* - * V4L2 subdev operations - */ - -/* - * isp_subdev_ioctl - CCDC module private ioctl's - * @sd: ISP V4L2 subdevice - * @cmd: ioctl command - * @arg: ioctl argument - * - * Return 0 on success or a negative error code otherwise. - */ -static long isp_subdev_ioctl(struct v4l2_subdev *sd, - unsigned int cmd, void *arg) -{ - return 0; -} - -/* - * isp_subdev_set_power - Power on/off the CCDC module - * @sd: ISP V4L2 subdevice - * @on: power on/off - * - * Return 0 on success or a negative error code otherwise. - */ -static int isp_subdev_set_power(struct v4l2_subdev *sd, int on) -{ - return 0; -} - -static int isp_subdev_subscribe_event(struct v4l2_subdev *sd, - struct v4l2_fh *fh, - struct v4l2_event_subscription *sub) -{ - struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = isp_sd->isp; - - if (sub->type != V4L2_EVENT_FRAME_SYNC && - sub->type != V4L2_EVENT_FRAME_END && - sub->type != V4L2_EVENT_ATOMISP_3A_STATS_READY && - sub->type != V4L2_EVENT_ATOMISP_METADATA_READY && - sub->type != V4L2_EVENT_ATOMISP_PAUSE_BUFFER && - sub->type != V4L2_EVENT_ATOMISP_CSS_RESET && - sub->type != V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE && - sub->type != V4L2_EVENT_ATOMISP_ACC_COMPLETE) - return -EINVAL; - - if (sub->type == V4L2_EVENT_FRAME_SYNC && - !atomisp_css_valid_sof(isp)) - return -EINVAL; - - return v4l2_event_subscribe(fh, sub, 16, NULL); -} - -static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd, - struct v4l2_fh *fh, - struct v4l2_event_subscription *sub) -{ - return v4l2_event_unsubscribe(fh, sub); -} - -/* - * isp_subdev_enum_mbus_code - Handle pixel format enumeration - * @sd: pointer to v4l2 subdev structure - * @fh : V4L2 subdev file handle - * @code: pointer to v4l2_subdev_pad_mbus_code_enum structure - * return -EINVAL or zero on success - */ -static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1) - return -EINVAL; - - code->code = atomisp_in_fmt_conv[code->index].code; - - return 0; -} - -static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad, - uint32_t target) -{ - switch (pad) { - case ATOMISP_SUBDEV_PAD_SINK: - switch (target) { - case V4L2_SEL_TGT_CROP: - return 0; - } - break; - default: - switch (target) { - case V4L2_SEL_TGT_COMPOSE: - return 0; - } - break; - } - - return -EINVAL; -} - -struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - uint32_t which, uint32_t pad, - uint32_t target) -{ - struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); - - if (which == V4L2_SUBDEV_FORMAT_TRY) { - switch (target) { - case V4L2_SEL_TGT_CROP: - return v4l2_subdev_get_try_crop(sd, cfg, pad); - case V4L2_SEL_TGT_COMPOSE: - return v4l2_subdev_get_try_compose(sd, cfg, pad); - } - } - - switch (target) { - case V4L2_SEL_TGT_CROP: - return &isp_sd->fmt[pad].crop; - case V4L2_SEL_TGT_COMPOSE: - return &isp_sd->fmt[pad].compose; - } - - return NULL; -} - -struct v4l2_mbus_framefmt -*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, uint32_t which, - uint32_t pad) -{ - struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); - - if (which == V4L2_SUBDEV_FORMAT_TRY) - return v4l2_subdev_get_try_format(sd, cfg, pad); - - return &isp_sd->fmt[pad].fmt; -} - -static void isp_get_fmt_rect(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, uint32_t which, - struct v4l2_mbus_framefmt **ffmt, - struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM], - struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM]) -{ - unsigned int i; - - for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) { - ffmt[i] = atomisp_subdev_get_ffmt(sd, cfg, which, i); - crop[i] = atomisp_subdev_get_rect(sd, cfg, which, i, - V4L2_SEL_TGT_CROP); - comp[i] = atomisp_subdev_get_rect(sd, cfg, which, i, - V4L2_SEL_TGT_COMPOSE); - } -} - -static void isp_subdev_propagate(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - uint32_t which, uint32_t pad, uint32_t target, - uint32_t flags) -{ - struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM]; - struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM], - *comp[ATOMISP_SUBDEV_PADS_NUM]; - - if (flags & V4L2_SEL_FLAG_KEEP_CONFIG) - return; - - isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp); - - switch (pad) { - case ATOMISP_SUBDEV_PAD_SINK: { - struct v4l2_rect r = {0}; - - /* Only crop target supported on sink pad. */ - r.width = ffmt[pad]->width; - r.height = ffmt[pad]->height; - - atomisp_subdev_set_selection(sd, cfg, which, pad, - target, flags, &r); - break; - } - } -} - -static int isp_subdev_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct v4l2_rect *rec; - int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target); - - if (rval) - return rval; - - rec = atomisp_subdev_get_rect(sd, cfg, sel->which, sel->pad, - sel->target); - if (!rec) - return -EINVAL; - - sel->r = *rec; - return 0; -} - -static char *atomisp_pad_str[] = { "ATOMISP_SUBDEV_PAD_SINK", - "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE", - "ATOMISP_SUBDEV_PAD_SOURCE_VF", - "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW", - "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO"}; - -int atomisp_subdev_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - uint32_t which, uint32_t pad, uint32_t target, - uint32_t flags, struct v4l2_rect *r) -{ - struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = isp_sd->isp; - struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM]; - uint16_t vdev_pad = atomisp_subdev_source_pad(sd->devnode); - struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM], - *comp[ATOMISP_SUBDEV_PADS_NUM]; - enum atomisp_input_stream_id stream_id; - unsigned int i; - unsigned int padding_w = pad_w; - unsigned int padding_h = pad_h; - - stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad); - - isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp); - - dev_dbg(isp->dev, - "sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n", - atomisp_pad_str[pad], target == V4L2_SEL_TGT_CROP - ? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE", - r->left, r->top, r->width, r->height, - which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY" - : "V4L2_SUBDEV_FORMAT_ACTIVE", flags); - - r->width = rounddown(r->width, ATOM_ISP_STEP_WIDTH); - r->height = rounddown(r->height, ATOM_ISP_STEP_HEIGHT); - - switch (pad) { - case ATOMISP_SUBDEV_PAD_SINK: { - /* Only crop target supported on sink pad. */ - unsigned int dvs_w, dvs_h; - - crop[pad]->width = ffmt[pad]->width; - crop[pad]->height = ffmt[pad]->height; - - /* Workaround for BYT 1080p perfectshot since the maxinum resolution of - * front camera ov2722 is 1932x1092 and cannot use pad_w > 12*/ - if (!strncmp(isp->inputs[isp_sd->input_curr].camera->name, - "ov2722", 6) && crop[pad]->height == 1092) { - padding_w = 12; - padding_h = 12; - } - - if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA) { - padding_w = 0; - padding_h = 0; - } - - if (atomisp_subdev_format_conversion(isp_sd, - isp_sd->capture_pad) - && crop[pad]->width && crop[pad]->height) - crop[pad]->width -= padding_w, crop[pad]->height -= padding_h; - - /* if subdev type is SOC camera,we do not need to set DVS */ - if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA) - isp_sd->params.video_dis_en = 0; - - if (isp_sd->params.video_dis_en && - isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - !isp_sd->continuous_mode->val) { - /* This resolution contains 20 % of DVS slack - * (of the desired captured image before - * scaling, or 1 / 6 of what we get from the - * sensor) in both width and height. Remove - * it. */ - crop[pad]->width = roundup(crop[pad]->width * 5 / 6, - ATOM_ISP_STEP_WIDTH); - crop[pad]->height = roundup(crop[pad]->height * 5 / 6, - ATOM_ISP_STEP_HEIGHT); - } - - crop[pad]->width = min(crop[pad]->width, r->width); - crop[pad]->height = min(crop[pad]->height, r->height); - - if (!(flags & V4L2_SEL_FLAG_KEEP_CONFIG)) { - for (i = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE; - i < ATOMISP_SUBDEV_PADS_NUM; i++) { - struct v4l2_rect tmp = *crop[pad]; - - atomisp_subdev_set_selection( - sd, cfg, which, i, V4L2_SEL_TGT_COMPOSE, - flags, &tmp); - } - } - - if (which == V4L2_SUBDEV_FORMAT_TRY) - break; - - if (isp_sd->params.video_dis_en && - isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO && - !isp_sd->continuous_mode->val) { - dvs_w = rounddown(crop[pad]->width / 5, - ATOM_ISP_STEP_WIDTH); - dvs_h = rounddown(crop[pad]->height / 5, - ATOM_ISP_STEP_HEIGHT); - } else if (!isp_sd->params.video_dis_en && - isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - /* - * For CSS2.0, digital zoom needs to set dvs envelope to 12 - * when dvs is disabled. - */ - dvs_w = dvs_h = 12; - } else - dvs_w = dvs_h = 0; - - atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h); - atomisp_css_input_set_effective_resolution(isp_sd, stream_id, - crop[pad]->width, crop[pad]->height); - - break; - } - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: { - /* Only compose target is supported on source pads. */ - - if (isp_sd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { - /* Scaling is disabled in this mode */ - r->width = crop[ATOMISP_SUBDEV_PAD_SINK]->width; - r->height = crop[ATOMISP_SUBDEV_PAD_SINK]->height; - } - - if (crop[ATOMISP_SUBDEV_PAD_SINK]->width == r->width - && crop[ATOMISP_SUBDEV_PAD_SINK]->height == r->height) - isp_sd->params.yuv_ds_en = false; - else - isp_sd->params.yuv_ds_en = true; - - comp[pad]->width = r->width; - comp[pad]->height = r->height; - - if (r->width == 0 || r->height == 0 || - crop[ATOMISP_SUBDEV_PAD_SINK]->width == 0 || - crop[ATOMISP_SUBDEV_PAD_SINK]->height == 0) - break; - /* - * do cropping on sensor input if ratio of required resolution - * is different with sensor output resolution ratio: - * - * ratio = width / height - * - * if ratio_output < ratio_sensor: - * effect_width = sensor_height * out_width / out_height; - * effect_height = sensor_height; - * else - * effect_width = sensor_width; - * effect_height = sensor_width * out_height / out_width; - * - */ - if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height < - crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height) - atomisp_css_input_set_effective_resolution(isp_sd, - stream_id, - rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]-> - height * r->width / r->height, - ATOM_ISP_STEP_WIDTH), - crop[ATOMISP_SUBDEV_PAD_SINK]->height); - else - atomisp_css_input_set_effective_resolution(isp_sd, - stream_id, - crop[ATOMISP_SUBDEV_PAD_SINK]->width, - rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]-> - width * r->height / r->width, - ATOM_ISP_STEP_WIDTH)); - - break; - } - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - comp[pad]->width = r->width; - comp[pad]->height = r->height; - break; - default: - return -EINVAL; - } - - /* Set format dimensions on non-sink pads as well. */ - if (pad != ATOMISP_SUBDEV_PAD_SINK) { - ffmt[pad]->width = comp[pad]->width; - ffmt[pad]->height = comp[pad]->height; - } - - if (!atomisp_subdev_get_rect(sd, cfg, which, pad, target)) - return -EINVAL; - *r = *atomisp_subdev_get_rect(sd, cfg, which, pad, target); - - dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n", - r->left, r->top, r->width, r->height); - - return 0; -} - -static int isp_subdev_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target); - if (rval) - return rval; - - return atomisp_subdev_set_selection(sd, cfg, sel->which, sel->pad, - sel->target, sel->flags, &sel->r); -} - -static int atomisp_get_sensor_bin_factor(struct atomisp_sub_device *asd) -{ - struct v4l2_control ctrl = {0}; - struct atomisp_device *isp = asd->isp; - int hbin, vbin; - int ret; - - if (isp->inputs[asd->input_curr].type == FILE_INPUT || - isp->inputs[asd->input_curr].type == TEST_PATTERN) - return 0; - - ctrl.id = V4L2_CID_BIN_FACTOR_HORZ; - ret = - v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->ctrl_handler, - &ctrl); - hbin = ctrl.value; - ctrl.id = V4L2_CID_BIN_FACTOR_VERT; - ret |= - v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->ctrl_handler, - &ctrl); - vbin = ctrl.value; - - /* - * ISP needs to know binning factor from sensor. - * In case horizontal and vertical sensor's binning factors - * are different or sensor does not support binning factor CID, - * ISP will apply default 0 value. - */ - if (ret || hbin != vbin) - hbin = 0; - - return hbin; -} - -void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, uint32_t which, - uint32_t pad, struct v4l2_mbus_framefmt *ffmt) -{ - struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = isp_sd->isp; - struct v4l2_mbus_framefmt *__ffmt = - atomisp_subdev_get_ffmt(sd, cfg, which, pad); - uint16_t vdev_pad = atomisp_subdev_source_pad(sd->devnode); - enum atomisp_input_stream_id stream_id; - - dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n", - atomisp_pad_str[pad], ffmt->width, ffmt->height, ffmt->code, - which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY" - : "V4L2_SUBDEV_FORMAT_ACTIVE"); - - stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad); - - switch (pad) { - case ATOMISP_SUBDEV_PAD_SINK: { - const struct atomisp_in_fmt_conv *fc = - atomisp_find_in_fmt_conv(ffmt->code); - - if (!fc) { - fc = atomisp_in_fmt_conv; - ffmt->code = fc->code; - dev_dbg(isp->dev, "using 0x%8.8x instead\n", - ffmt->code); - } - - *__ffmt = *ffmt; - - isp_subdev_propagate(sd, cfg, which, pad, - V4L2_SEL_TGT_CROP, 0); - - if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { - atomisp_css_input_set_resolution(isp_sd, - stream_id, ffmt); - atomisp_css_input_set_binning_factor(isp_sd, - stream_id, - atomisp_get_sensor_bin_factor(isp_sd)); - atomisp_css_input_set_bayer_order(isp_sd, stream_id, - fc->bayer_order); - atomisp_css_input_set_format(isp_sd, stream_id, - fc->css_stream_fmt); - atomisp_css_set_default_isys_config(isp_sd, stream_id, - ffmt); - } - - break; - } - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: - __ffmt->code = ffmt->code; - break; - } -} - -/* - * isp_subdev_get_format - Retrieve the video format on a pad - * @sd : ISP V4L2 subdevice - * @fh : V4L2 subdev file handle - * @pad: Pad number - * @fmt: Format - * - * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond - * to the format type. - */ -static int isp_subdev_get_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) -{ - fmt->format = *atomisp_subdev_get_ffmt(sd, cfg, fmt->which, fmt->pad); - - return 0; -} - -/* - * isp_subdev_set_format - Set the video format on a pad - * @sd : ISP subdev V4L2 subdevice - * @fh : V4L2 subdev file handle - * @pad: Pad number - * @fmt: Format - * - * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond - * to the format type. - */ -static int isp_subdev_set_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) -{ - atomisp_subdev_set_ffmt(sd, cfg, fmt->which, fmt->pad, &fmt->format); - - return 0; -} - -/* V4L2 subdev core operations */ -static const struct v4l2_subdev_core_ops isp_subdev_v4l2_core_ops = { - .ioctl = isp_subdev_ioctl, .s_power = isp_subdev_set_power, - .subscribe_event = isp_subdev_subscribe_event, - .unsubscribe_event = isp_subdev_unsubscribe_event, -}; - -/* V4L2 subdev pad operations */ -static const struct v4l2_subdev_pad_ops isp_subdev_v4l2_pad_ops = { - .enum_mbus_code = isp_subdev_enum_mbus_code, - .get_fmt = isp_subdev_get_format, - .set_fmt = isp_subdev_set_format, - .get_selection = isp_subdev_get_selection, - .set_selection = isp_subdev_set_selection, - .link_validate = v4l2_subdev_link_validate_default, -}; - -/* V4L2 subdev operations */ -static const struct v4l2_subdev_ops isp_subdev_v4l2_ops = { - .core = &isp_subdev_v4l2_core_ops, - .pad = &isp_subdev_v4l2_pad_ops, -}; - -static void isp_subdev_init_params(struct atomisp_sub_device *asd) -{ - unsigned int i; - - /* parameters initialization */ - INIT_LIST_HEAD(&asd->s3a_stats); - INIT_LIST_HEAD(&asd->s3a_stats_in_css); - INIT_LIST_HEAD(&asd->s3a_stats_ready); - INIT_LIST_HEAD(&asd->dis_stats); - INIT_LIST_HEAD(&asd->dis_stats_in_css); - spin_lock_init(&asd->dis_stats_lock); - for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) { - INIT_LIST_HEAD(&asd->metadata[i]); - INIT_LIST_HEAD(&asd->metadata_in_css[i]); - INIT_LIST_HEAD(&asd->metadata_ready[i]); - } -} - -/* -* isp_subdev_link_setup - Setup isp subdev connections -* @entity: ispsubdev media entity -* @local: Pad at the local end of the link -* @remote: Pad at the remote end of the link -* @flags: Link flags -* -* return -EINVAL or zero on success -*/ -static int isp_subdev_link_setup(struct media_entity *entity, - const struct media_pad *local, - const struct media_pad *remote, u32 flags) -{ - struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); - struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = isp_sd->isp; - unsigned int i; - - switch (local->index | is_media_entity_v4l2_subdev(remote->entity)) { - case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN: - /* Read from the sensor CSI2-ports. */ - if (!(flags & MEDIA_LNK_FL_ENABLED)) { - isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE; - break; - } - - if (isp_sd->input != ATOMISP_SUBDEV_INPUT_NONE) - return -EBUSY; - - for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) { - if (remote->entity != &isp->csi2_port[i].subdev.entity) - continue; - - isp_sd->input = ATOMISP_SUBDEV_INPUT_CSI2_PORT1 + i; - return 0; - } - - return -EINVAL; - - case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_F_OLD_BASE: - /* read from memory */ - if (flags & MEDIA_LNK_FL_ENABLED) { - if (isp_sd->input >= ATOMISP_SUBDEV_INPUT_CSI2_PORT1 && - isp_sd->input < (ATOMISP_SUBDEV_INPUT_CSI2_PORT1 - + ATOMISP_CAMERA_NR_PORTS)) - return -EBUSY; - isp_sd->input = ATOMISP_SUBDEV_INPUT_MEMORY; - } else { - if (isp_sd->input == ATOMISP_SUBDEV_INPUT_MEMORY) - isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE; - } - break; - - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW | MEDIA_ENT_F_OLD_BASE: - /* always write to memory */ - break; - - case ATOMISP_SUBDEV_PAD_SOURCE_VF | MEDIA_ENT_F_OLD_BASE: - /* always write to memory */ - break; - - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE | MEDIA_ENT_F_OLD_BASE: - /* always write to memory */ - break; - - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO | MEDIA_ENT_F_OLD_BASE: - /* always write to memory */ - break; - - default: - return -EINVAL; - } - - return 0; -} - -/* media operations */ -static const struct media_entity_operations isp_subdev_media_ops = { - .link_setup = isp_subdev_link_setup, - .link_validate = v4l2_subdev_link_validate, -/* .set_power = v4l2_subdev_set_power, */ -}; - -static int __atomisp_update_run_mode(struct atomisp_sub_device *asd) -{ - struct atomisp_device *isp = asd->isp; - struct v4l2_ctrl *ctrl = asd->run_mode; - struct v4l2_ctrl *c; - s32 mode; - - if (ctrl->val != ATOMISP_RUN_MODE_VIDEO && - asd->continuous_mode->val) - mode = ATOMISP_RUN_MODE_PREVIEW; - else - mode = ctrl->val; - - c = v4l2_ctrl_find( - isp->inputs[asd->input_curr].camera->ctrl_handler, - V4L2_CID_RUN_MODE); - - if (c) - return v4l2_ctrl_s_ctrl(c, mode); - - return 0; -} - -int atomisp_update_run_mode(struct atomisp_sub_device *asd) -{ - int rval; - - mutex_lock(asd->ctrl_handler.lock); - rval = __atomisp_update_run_mode(asd); - mutex_unlock(asd->ctrl_handler.lock); - - return rval; -} - -static int s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct atomisp_sub_device *asd = container_of( - ctrl->handler, struct atomisp_sub_device, ctrl_handler); - - switch (ctrl->id) { - case V4L2_CID_RUN_MODE: - return __atomisp_update_run_mode(asd); - case V4L2_CID_DEPTH_MODE: - if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) { - dev_err(asd->isp->dev, "ISP is streaming, it is not supported to change the depth mode\n"); - return -EINVAL; - } - break; - } - - return 0; -} - -static const struct v4l2_ctrl_ops ctrl_ops = { - .s_ctrl = &s_ctrl, -}; - -static const struct v4l2_ctrl_config ctrl_fmt_auto = { - .ops = &ctrl_ops, - .id = V4L2_CID_FMT_AUTO, - .name = "Automatic format guessing", - .type = V4L2_CTRL_TYPE_BOOLEAN, - .min = 0, - .max = 1, - .step = 1, - .def = 1, -}; - -static const char * const ctrl_run_mode_menu[] = { - NULL, - "Video", - "Still capture", - "Continuous capture", - "Preview", -}; - -static const struct v4l2_ctrl_config ctrl_run_mode = { - .ops = &ctrl_ops, - .id = V4L2_CID_RUN_MODE, - .name = "Atomisp run mode", - .type = V4L2_CTRL_TYPE_MENU, - .min = 1, - .def = 1, - .max = 4, - .qmenu = ctrl_run_mode_menu, -}; - -static const char * const ctrl_vfpp_mode_menu[] = { - "Enable", /* vfpp always enabled */ - "Disable to scaler mode", /* CSS into video mode and disable */ - "Disable to low latency mode", /* CSS into still mode and disable */ -}; - -static const struct v4l2_ctrl_config ctrl_vfpp = { - .id = V4L2_CID_VFPP, - .name = "Atomisp vf postprocess", - .type = V4L2_CTRL_TYPE_MENU, - .min = 0, - .def = 0, - .max = 2, - .qmenu = ctrl_vfpp_mode_menu, -}; - -/* - * Control for ISP continuous mode - * - * When enabled, capture processing is possible without - * stopping the preview pipeline. When disabled, ISP needs - * to be restarted between preview and capture. - */ -static const struct v4l2_ctrl_config ctrl_continuous_mode = { - .ops = &ctrl_ops, - .id = V4L2_CID_ATOMISP_CONTINUOUS_MODE, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Continuous mode", - .min = 0, - .max = 1, - .step = 1, - .def = 0, -}; - -/* - * Control for continuous mode raw buffer size - * - * The size of the RAW ringbuffer sets limit on how much - * back in time application can go when requesting capture - * frames to be rendered, and how many frames can be rendered - * in a burst at full sensor rate. - * - * Note: this setting has a big impact on memory consumption of - * the CSS subsystem. - */ -static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = { - .ops = &ctrl_ops, - .id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Continuous raw ringbuffer size", - .min = 1, - .max = 100, /* depends on CSS version, runtime checked */ - .step = 1, - .def = 3, -}; - -/* - * Control for enabling continuous viewfinder - * - * When enabled, and ISP is in continuous mode (see ctrl_continuous_mode ), - * preview pipeline continues concurrently with capture - * processing. When disabled, and continuous mode is used, - * preview is paused while captures are processed, but - * full pipeline restart is not needed. - * - * By setting this to disabled, capture processing is - * essentially given priority over preview, and the effective - * capture output rate may be higher than with continuous - * viewfinder enabled. - */ -static const struct v4l2_ctrl_config ctrl_continuous_viewfinder = { - .id = V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Continuous viewfinder", - .min = 0, - .max = 1, - .step = 1, - .def = 0, -}; - -/* - * Control for enabling Lock&Unlock Raw Buffer mechanism - * - * When enabled, Raw Buffer can be locked and unlocked. - * Application can hold the exp_id of Raw Buffer - * and unlock it when no longer needed. - * Note: Make sure set this configuration before creating stream. - */ -static const struct v4l2_ctrl_config ctrl_enable_raw_buffer_lock = { - .id = V4L2_CID_ENABLE_RAW_BUFFER_LOCK, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Lock Unlock Raw Buffer", - .min = 0, - .max = 1, - .step = 1, - .def = 0, -}; - -/* - * Control to disable digital zoom of the whole stream - * - * When it is true, pipe configuation enable_dz will be set to false. - * This can help get a better performance by disabling pp binary. - * - * Note: Make sure set this configuration before creating stream. - */ -static const struct v4l2_ctrl_config ctrl_disable_dz = { - .id = V4L2_CID_DISABLE_DZ, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Disable digital zoom", - .min = 0, - .max = 1, - .step = 1, - .def = 0, -}; - -/* - * Control for ISP depth mode - * - * When enabled, that means ISP will deal with dual streams and sensors will be - * in slave/master mode. - * slave sensor will have no output until master sensor is streamed on. - */ -static const struct v4l2_ctrl_config ctrl_depth_mode = { - .ops = &ctrl_ops, - .id = V4L2_CID_DEPTH_MODE, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Depth mode", - .min = 0, - .max = 1, - .step = 1, - .def = 0, -}; - -#ifdef ISP2401 -/* - * Control for selectting ISP version - * - * When enabled, that means ISP version will be used ISP2.7. when disable, the - * isp will default to use ISP2.2. - * Note: Make sure set this configuration before creating stream. - */ -static const struct v4l2_ctrl_config ctrl_select_isp_version = { - .ops = &ctrl_ops, - .id = V4L2_CID_ATOMISP_SELECT_ISP_VERSION, - .type = V4L2_CTRL_TYPE_BOOLEAN, - .name = "Select Isp version", - .min = 0, - .max = 1, - .step = 1, - .def = 0, -}; - -#ifdef CONFIG_ION -/* - * Control for ISP ion device fd - * - * userspace will open ion device and pass the fd to kernel. - * this fd will be used to map shared fd to buffer. - */ -static const struct v4l2_ctrl_config ctrl_ion_dev_fd = { - .ops = &ctrl_ops, - .id = V4L2_CID_ATOMISP_ION_DEVICE_FD, - .type = V4L2_CTRL_TYPE_INTEGER, - .name = "Ion Device Fd", - .min = -1, - .max = 1024, - .step = 1, - .def = ION_FD_UNSET -}; -#endif - -#endif -static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd, - struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type) -{ - pipe->type = buf_type; - pipe->asd = asd; - pipe->isp = asd->isp; - spin_lock_init(&pipe->irq_lock); - INIT_LIST_HEAD(&pipe->activeq); - INIT_LIST_HEAD(&pipe->activeq_out); - INIT_LIST_HEAD(&pipe->buffers_waiting_for_param); - INIT_LIST_HEAD(&pipe->per_frame_params); - memset(pipe->frame_request_config_id, - 0, VIDEO_MAX_FRAME * sizeof(unsigned int)); - memset(pipe->frame_params, - 0, VIDEO_MAX_FRAME * - sizeof(struct atomisp_css_params_with_list *)); -} - -static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_pipe *pipe) -{ - pipe->asd = asd; - pipe->isp = asd->isp; - INIT_LIST_HEAD(&asd->acc.fw); - INIT_LIST_HEAD(&asd->acc.memory_maps); - ida_init(&asd->acc.ida); -} - -/* - * isp_subdev_init_entities - Initialize V4L2 subdev and media entity - * @asd: ISP CCDC module - * - * Return 0 on success and a negative error code on failure. - */ -static int isp_subdev_init_entities(struct atomisp_sub_device *asd) -{ - struct v4l2_subdev *sd = &asd->subdev; - struct media_pad *pads = asd->pads; - struct media_entity *me = &sd->entity; - int ret; - - asd->input = ATOMISP_SUBDEV_INPUT_NONE; - - v4l2_subdev_init(sd, &isp_subdev_v4l2_ops); - sprintf(sd->name, "ATOMISP_SUBDEV_%d", asd->index); - v4l2_set_subdevdata(sd, asd); - sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; - - pads[ATOMISP_SUBDEV_PAD_SINK].flags = MEDIA_PAD_FL_SINK; - pads[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].flags = MEDIA_PAD_FL_SOURCE; - pads[ATOMISP_SUBDEV_PAD_SOURCE_VF].flags = MEDIA_PAD_FL_SOURCE; - pads[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].flags = MEDIA_PAD_FL_SOURCE; - pads[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE; - - asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code = - MEDIA_BUS_FMT_SBGGR10_1X10; - asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].fmt.code = - MEDIA_BUS_FMT_SBGGR10_1X10; - asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VF].fmt.code = - MEDIA_BUS_FMT_SBGGR10_1X10; - asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].fmt.code = - MEDIA_BUS_FMT_SBGGR10_1X10; - asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].fmt.code = - MEDIA_BUS_FMT_SBGGR10_1X10; - - me->ops = &isp_subdev_media_ops; - me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; - ret = media_entity_pads_init(me, ATOMISP_SUBDEV_PADS_NUM, pads); - if (ret < 0) - return ret; - - atomisp_init_subdev_pipe(asd, &asd->video_in, - V4L2_BUF_TYPE_VIDEO_OUTPUT); - - atomisp_init_subdev_pipe(asd, &asd->video_out_preview, - V4L2_BUF_TYPE_VIDEO_CAPTURE); - - atomisp_init_subdev_pipe(asd, &asd->video_out_vf, - V4L2_BUF_TYPE_VIDEO_CAPTURE); - - atomisp_init_subdev_pipe(asd, &asd->video_out_capture, - V4L2_BUF_TYPE_VIDEO_CAPTURE); - - atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture, - V4L2_BUF_TYPE_VIDEO_CAPTURE); - - atomisp_init_acc_pipe(asd, &asd->video_acc); - - ret = atomisp_video_init(&asd->video_in, "MEMORY"); - if (ret < 0) - return ret; - - ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE"); - if (ret < 0) - return ret; - - ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER"); - if (ret < 0) - return ret; - - ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW"); - if (ret < 0) - return ret; - - ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO"); - if (ret < 0) - return ret; - - atomisp_acc_init(&asd->video_acc, "ACC"); - - ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1); - if (ret) - return ret; - - asd->fmt_auto = v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_fmt_auto, NULL); - asd->run_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_run_mode, NULL); - asd->vfpp = v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_vfpp, NULL); - asd->continuous_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_continuous_mode, NULL); - asd->continuous_viewfinder = v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_continuous_viewfinder, - NULL); - asd->continuous_raw_buffer_size = - v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_continuous_raw_buffer_size, - NULL); - - asd->enable_raw_buffer_lock = - v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_enable_raw_buffer_lock, - NULL); - asd->depth_mode = - v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_depth_mode, - NULL); - asd->disable_dz = - v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_disable_dz, - NULL); -#ifdef ISP2401 - asd->select_isp_version = - v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_select_isp_version, - NULL); - -#ifdef CONFIG_ION - asd->ion_dev_fd = - v4l2_ctrl_new_custom(&asd->ctrl_handler, - &ctrl_ion_dev_fd, - NULL); -#endif -#endif - - /* Make controls visible on subdev as well. */ - asd->subdev.ctrl_handler = &asd->ctrl_handler; - spin_lock_init(&asd->raw_buffer_bitmap_lock); - return asd->ctrl_handler.error; -} - -int atomisp_create_pads_links(struct atomisp_device *isp) -{ - struct atomisp_sub_device *asd; - int i, j, ret = 0; - isp->num_of_streams = 2; - for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) { - for (j = 0; j < isp->num_of_streams; j++) { - ret = - media_create_pad_link(&isp->csi2_port[i].subdev. - entity, CSI2_PAD_SOURCE, - &isp->asd[j].subdev.entity, - ATOMISP_SUBDEV_PAD_SINK, 0); - if (ret < 0) - return ret; - } - } - for (i = 0; i < isp->input_cnt - 2; i++) { - ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0, - &isp->csi2_port[isp->inputs[i]. - port].subdev.entity, - CSI2_PAD_SINK, - MEDIA_LNK_FL_ENABLED | - MEDIA_LNK_FL_IMMUTABLE); - if (ret < 0) - return ret; - } - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - ret = media_create_pad_link(&asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW, - &asd->video_out_preview.vdev.entity, - 0, 0); - if (ret < 0) - return ret; - ret = media_create_pad_link(&asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SOURCE_VF, - &asd->video_out_vf.vdev.entity, 0, - 0); - if (ret < 0) - return ret; - ret = media_create_pad_link(&asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE, - &asd->video_out_capture.vdev.entity, - 0, 0); - if (ret < 0) - return ret; - ret = media_create_pad_link(&asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO, - &asd->video_out_video_capture.vdev. - entity, 0, 0); - if (ret < 0) - return ret; - /* - * file input only supported on subdev0 - * so do not create pad link for subdevs other then subdev0 - */ - if (asd->index) - return 0; - ret = media_create_pad_link(&asd->video_in.vdev.entity, - 0, &asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SINK, 0); - if (ret < 0) - return ret; - } - return 0; -} - -static void atomisp_subdev_cleanup_entities(struct atomisp_sub_device *asd) -{ - v4l2_ctrl_handler_free(&asd->ctrl_handler); - - media_entity_cleanup(&asd->subdev.entity); -} - -void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd) -{ - struct v4l2_fh *fh, *fh_tmp; - struct v4l2_event event; - unsigned int i, pending_event; - - list_for_each_entry_safe(fh, fh_tmp, - &asd->subdev.devnode->fh_list, list) { - pending_event = v4l2_event_pending(fh); - for (i = 0; i < pending_event; i++) - v4l2_event_dequeue(fh, &event, 1); - } -} - -void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd) -{ - atomisp_subdev_cleanup_entities(asd); - v4l2_device_unregister_subdev(&asd->subdev); - atomisp_video_unregister(&asd->video_in); - atomisp_video_unregister(&asd->video_out_preview); - atomisp_video_unregister(&asd->video_out_vf); - atomisp_video_unregister(&asd->video_out_capture); - atomisp_video_unregister(&asd->video_out_video_capture); - atomisp_acc_unregister(&asd->video_acc); -} - -int atomisp_subdev_register_entities(struct atomisp_sub_device *asd, - struct v4l2_device *vdev) -{ - int ret; - - /* Register the subdev and video node. */ - ret = v4l2_device_register_subdev(vdev, &asd->subdev); - if (ret < 0) - goto error; - - ret = atomisp_video_register(&asd->video_out_capture, vdev); - if (ret < 0) - goto error; - - ret = atomisp_video_register(&asd->video_out_vf, vdev); - if (ret < 0) - goto error; - - ret = atomisp_video_register(&asd->video_out_preview, vdev); - if (ret < 0) - goto error; - - ret = atomisp_video_register(&asd->video_out_video_capture, vdev); - if (ret < 0) - goto error; - - ret = atomisp_acc_register(&asd->video_acc, vdev); - if (ret < 0) - goto error; - - /* - * file input only supported on subdev0 - * so do not create video node for subdevs other then subdev0 - */ - if (asd->index) - return 0; - ret = atomisp_video_register(&asd->video_in, vdev); - if (ret < 0) - goto error; - - return 0; - -error: - atomisp_subdev_unregister_entities(asd); - return ret; -} - -/* - * atomisp_subdev_init - ISP Subdevice initialization. - * @dev: Device pointer specific to the ATOM ISP. - * - * TODO: Get the initialisation values from platform data. - * - * Return 0 on success or a negative error code otherwise. - */ -int atomisp_subdev_init(struct atomisp_device *isp) -{ - struct atomisp_sub_device *asd; - int i, ret = 0; - - /* - * CSS2.0 running ISP2400 support - * multiple streams - */ - isp->num_of_streams = 2; - isp->asd = devm_kzalloc(isp->dev, sizeof(struct atomisp_sub_device) * - isp->num_of_streams, GFP_KERNEL); - if (!isp->asd) - return -ENOMEM; - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - spin_lock_init(&asd->lock); - asd->isp = isp; - isp_subdev_init_params(asd); - asd->index = i; - ret = isp_subdev_init_entities(asd); - if (ret < 0) { - atomisp_subdev_cleanup_entities(asd); - break; - } - } - - return ret; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h deleted file mode 100644 index 59ff8723c182..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h +++ /dev/null @@ -1,467 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __ATOMISP_SUBDEV_H__ -#define __ATOMISP_SUBDEV_H__ - -#include -#include -#include -#include - -#include "atomisp_common.h" -#include "atomisp_compat.h" -#include "atomisp_v4l2.h" - -#include "ia_css.h" - -/* EXP_ID's ranger is 1 ~ 250 */ -#define ATOMISP_MAX_EXP_ID (250) -enum atomisp_subdev_input_entity { - ATOMISP_SUBDEV_INPUT_NONE, - ATOMISP_SUBDEV_INPUT_MEMORY, - ATOMISP_SUBDEV_INPUT_CSI2, - /* - * The following enum for CSI2 port must go together in one row. - * Otherwise it breaks the code logic. - */ - ATOMISP_SUBDEV_INPUT_CSI2_PORT1, - ATOMISP_SUBDEV_INPUT_CSI2_PORT2, - ATOMISP_SUBDEV_INPUT_CSI2_PORT3, -}; - -#define ATOMISP_SUBDEV_PAD_SINK 0 -/* capture output for still frames */ -#define ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE 1 -/* viewfinder output for downscaled capture output */ -#define ATOMISP_SUBDEV_PAD_SOURCE_VF 2 -/* preview output for display */ -#define ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW 3 -/* main output for video pipeline */ -#define ATOMISP_SUBDEV_PAD_SOURCE_VIDEO 4 -#define ATOMISP_SUBDEV_PADS_NUM 5 - -struct atomisp_in_fmt_conv { - u32 code; - uint8_t bpp; /* bits per pixel */ - uint8_t depth; /* uncompressed */ - enum atomisp_input_format atomisp_in_fmt; - enum atomisp_css_bayer_order bayer_order; - enum atomisp_input_format css_stream_fmt; -}; - -struct atomisp_sub_device; - -struct atomisp_video_pipe { - struct video_device vdev; - enum v4l2_buf_type type; - struct media_pad pad; - struct videobuf_queue capq; - struct videobuf_queue outq; - struct list_head activeq; - struct list_head activeq_out; - /* - * the buffers waiting for per-frame parameters, this is only valid - * in per-frame setting mode. - */ - struct list_head buffers_waiting_for_param; - /* the link list to store per_frame parameters */ - struct list_head per_frame_params; - - unsigned int buffers_in_css; - - /* irq_lock is used to protect video buffer state change operations and - * also to make activeq, activeq_out, capq and outq list - * operations atomic. */ - spinlock_t irq_lock; - unsigned int users; - - struct atomisp_device *isp; - struct v4l2_pix_format pix; - uint32_t sh_fmt; - - struct atomisp_sub_device *asd; - - /* - * This frame_config_id is got from CSS when dequueues buffers from CSS, - * it is used to indicate which parameter it has applied. - */ - unsigned int frame_config_id[VIDEO_MAX_FRAME]; - /* - * This config id is set when camera HAL enqueues buffer, it has a - * non-zero value to indicate which parameter it needs to applu - */ - unsigned int frame_request_config_id[VIDEO_MAX_FRAME]; - struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME]; -#ifdef ISP2401 - - /* - * move wdt from asd struct to create wdt for each pipe - */ - struct timer_list wdt; - unsigned int wdt_duration; /* in jiffies */ - unsigned long wdt_expires; - atomic_t wdt_count; -#endif -}; - -struct atomisp_acc_pipe { - struct video_device vdev; - unsigned int users; - bool running; - struct atomisp_sub_device *asd; - struct atomisp_device *isp; -}; - -struct atomisp_pad_format { - struct v4l2_mbus_framefmt fmt; - struct v4l2_rect crop; - struct v4l2_rect compose; -}; - -/* Internal states for flash process */ -enum atomisp_flash_state { - ATOMISP_FLASH_IDLE, - ATOMISP_FLASH_REQUESTED, - ATOMISP_FLASH_ONGOING, - ATOMISP_FLASH_DONE -}; - -/* - * This structure is used to cache the CSS parameters, it aligns to - * struct ia_css_isp_config but without un-supported and deprecated parts. - */ -struct atomisp_css_params { - struct ia_css_wb_config wb_config; - struct ia_css_cc_config cc_config; - struct ia_css_tnr_config tnr_config; - struct ia_css_ecd_config ecd_config; - struct ia_css_ynr_config ynr_config; - struct ia_css_fc_config fc_config; - struct ia_css_formats_config formats_config; - struct ia_css_cnr_config cnr_config; - struct ia_css_macc_config macc_config; - struct ia_css_ctc_config ctc_config; - struct ia_css_aa_config aa_config; - struct ia_css_aa_config baa_config; - struct ia_css_ce_config ce_config; - struct ia_css_ob_config ob_config; - struct ia_css_dp_config dp_config; - struct ia_css_de_config de_config; - struct ia_css_gc_config gc_config; - struct ia_css_nr_config nr_config; - struct ia_css_ee_config ee_config; - struct ia_css_anr_config anr_config; - struct ia_css_3a_config s3a_config; - struct ia_css_xnr_config xnr_config; - struct ia_css_dz_config dz_config; - struct ia_css_cc_config yuv2rgb_cc_config; - struct ia_css_cc_config rgb2yuv_cc_config; - struct ia_css_macc_table macc_table; - struct ia_css_gamma_table gamma_table; - struct ia_css_ctc_table ctc_table; - - struct ia_css_xnr_table xnr_table; - struct ia_css_rgb_gamma_table r_gamma_table; - struct ia_css_rgb_gamma_table g_gamma_table; - struct ia_css_rgb_gamma_table b_gamma_table; - - struct ia_css_vector motion_vector; - struct ia_css_anr_thres anr_thres; - - struct ia_css_dvs_6axis_config *dvs_6axis; - struct ia_css_dvs2_coefficients *dvs2_coeff; - struct ia_css_shading_table *shading_table; - struct ia_css_morph_table *morph_table; - - /* - * Used to store the user pointer address of the frame. driver needs to - * translate to ia_css_frame * and then set to CSS. - */ - void *output_frame; - uint32_t isp_config_id; - - /* Indicates which parameters need to be updated. */ - struct atomisp_parameters update_flag; -}; - -struct atomisp_subdev_params { - /* FIXME: Determines whether raw capture buffer are being passed to - * user space. Unimplemented for now. */ - int online_process; - int yuv_ds_en; - unsigned int color_effect; - bool gdc_cac_en; - bool macc_en; - bool bad_pixel_en; - bool video_dis_en; - bool sc_en; - bool fpn_en; - bool xnr_en; - bool low_light; - int false_color; - unsigned int histogram_elenum; - - /* Current grid info */ - struct atomisp_css_grid_info curr_grid_info; - enum atomisp_css_pipe_id s3a_enabled_pipe; - - int s3a_output_bytes; - - bool dis_proj_data_valid; - - struct ia_css_dz_config dz_config; /** Digital Zoom */ - struct ia_css_capture_config capture_config; - - struct atomisp_css_isp_config config; - - /* current configurations */ - struct atomisp_css_params css_param; - - /* - * Intermediate buffers used to communicate data between - * CSS and user space. - */ - struct ia_css_3a_statistics *s3a_user_stat; - - void *metadata_user[ATOMISP_METADATA_TYPE_NUM]; - uint32_t metadata_width_size; - - struct ia_css_dvs2_statistics *dvs_stat; - struct atomisp_css_dvs_6axis *dvs_6axis; - uint32_t exp_id; - int dvs_hor_coef_bytes; - int dvs_ver_coef_bytes; - int dvs_ver_proj_bytes; - int dvs_hor_proj_bytes; - - /* Flash */ - int num_flash_frames; - enum atomisp_flash_state flash_state; - enum atomisp_frame_status last_frame_status; - - /* continuous capture */ - struct atomisp_cont_capture_conf offline_parm; - /* Flag to check if driver needs to update params to css */ - bool css_update_params_needed; -}; - -struct atomisp_css_params_with_list { - /* parameters for CSS */ - struct atomisp_css_params params; - struct list_head list; -}; - -struct atomisp_acc_fw { - struct atomisp_css_fw_info *fw; - unsigned int handle; - unsigned int flags; - unsigned int type; - struct { - size_t length; - unsigned long css_ptr; - } args[ATOMISP_ACC_NR_MEMORY]; - struct list_head list; -}; - -struct atomisp_map { - ia_css_ptr ptr; - size_t length; - struct list_head list; - /* FIXME: should keep book which maps are currently used - * by binaries and not allow releasing those - * which are in use. Implement by reference counting. - */ -}; - -struct atomisp_sub_device { - struct v4l2_subdev subdev; - struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM]; - struct atomisp_pad_format fmt[ATOMISP_SUBDEV_PADS_NUM]; - uint16_t capture_pad; /* main capture pad; defines much of isp config */ - - enum atomisp_subdev_input_entity input; - unsigned int output; - struct atomisp_video_pipe video_in; - struct atomisp_video_pipe video_out_capture; /* capture output */ - struct atomisp_video_pipe video_out_vf; /* viewfinder output */ - struct atomisp_video_pipe video_out_preview; /* preview output */ - struct atomisp_acc_pipe video_acc; - /* video pipe main output */ - struct atomisp_video_pipe video_out_video_capture; - /* struct isp_subdev_params params; */ - spinlock_t lock; - struct atomisp_device *isp; - struct v4l2_ctrl_handler ctrl_handler; - struct v4l2_ctrl *fmt_auto; - struct v4l2_ctrl *run_mode; - struct v4l2_ctrl *depth_mode; - struct v4l2_ctrl *vfpp; - struct v4l2_ctrl *continuous_mode; - struct v4l2_ctrl *continuous_raw_buffer_size; - struct v4l2_ctrl *continuous_viewfinder; - struct v4l2_ctrl *enable_raw_buffer_lock; -#ifdef ISP2401 - struct v4l2_ctrl *ion_dev_fd; -#endif - struct v4l2_ctrl *disable_dz; -#ifdef ISP2401 - struct v4l2_ctrl *select_isp_version; -#endif - - struct { - struct list_head fw; - struct list_head memory_maps; - struct atomisp_css_pipeline *pipeline; - bool extension_mode; - struct ida ida; - struct completion acc_done; - void *acc_stages; - } acc; - - struct atomisp_subdev_params params; - - struct atomisp_stream_env stream_env[ATOMISP_INPUT_STREAM_NUM]; - - struct v4l2_pix_format dvs_envelop; - unsigned int s3a_bufs_in_css[CSS_PIPE_ID_NUM]; - unsigned int dis_bufs_in_css; - - unsigned int metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_NUM][CSS_PIPE_ID_NUM]; - /* The list of free and available metadata buffers for CSS */ - struct list_head metadata[ATOMISP_METADATA_TYPE_NUM]; - /* The list of metadata buffers which have been en-queued to CSS */ - struct list_head metadata_in_css[ATOMISP_METADATA_TYPE_NUM]; - /* The list of metadata buffers which are ready for userspace to get */ - struct list_head metadata_ready[ATOMISP_METADATA_TYPE_NUM]; - - /* The list of free and available s3a stat buffers for CSS */ - struct list_head s3a_stats; - /* The list of s3a stat buffers which have been en-queued to CSS */ - struct list_head s3a_stats_in_css; - /* The list of s3a stat buffers which are ready for userspace to get */ - struct list_head s3a_stats_ready; - - struct list_head dis_stats; - struct list_head dis_stats_in_css; - spinlock_t dis_stats_lock; - - struct atomisp_css_frame *vf_frame; /* TODO: needed? */ - struct atomisp_css_frame *raw_output_frame; - enum atomisp_frame_status frame_status[VIDEO_MAX_FRAME]; - - /* This field specifies which camera (v4l2 input) is selected. */ - int input_curr; - /* This field specifies which sensor is being selected when there - are multiple sensors connected to the same MIPI port. */ - int sensor_curr; - - atomic_t sof_count; - atomic_t sequence; /* Sequence value that is assigned to buffer. */ - atomic_t sequence_temp; - - unsigned int streaming; /* Hold both mutex and lock to change this */ - bool stream_prepared; /* whether css stream is created */ - - /* subdev index: will be used to show which subdev is holding the - * resource, like which camera is used by which subdev - */ - unsigned int index; - - /* delayed memory allocation for css */ - struct completion init_done; - struct workqueue_struct *delayed_init_workq; - unsigned int delayed_init; - struct work_struct delayed_init_work; - - unsigned int latest_preview_exp_id; /* CSS ZSL/SDV raw buffer id */ - - unsigned int mipi_frame_size; - - bool copy_mode; /* CSI2+ use copy mode */ - bool yuvpp_mode; /* CSI2+ yuvpp pipe */ - - int raw_buffer_bitmap[ATOMISP_MAX_EXP_ID/32 + 1]; /* Record each Raw Buffer lock status */ - int raw_buffer_locked_count; - spinlock_t raw_buffer_bitmap_lock; - -#ifndef ISP2401 - struct timer_list wdt; - unsigned int wdt_duration; /* in jiffies */ - unsigned long wdt_expires; - -#endif - struct atomisp_resolution sensor_array_res; - bool high_speed_mode; /* Indicate whether now is a high speed mode */ - int pending_capture_request; /* Indicates the number of pending capture requests. */ -#ifndef ISP2401 - -#else - bool re_trigger_capture; -#endif - unsigned int preview_exp_id; - unsigned int postview_exp_id; -}; - -extern const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[]; - -u32 atomisp_subdev_uncompressed_code(u32 code); -bool atomisp_subdev_is_compressed(u32 code); -const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code); -#ifndef ISP2401 -const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt( - enum atomisp_input_format atomisp_in_fmt); -#else -const struct atomisp_in_fmt_conv - *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(enum atomisp_input_format - atomisp_in_fmt); -#endif -const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_compressed(u32 code); -bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd, - unsigned int source_pad); -uint16_t atomisp_subdev_source_pad(struct video_device *vdev); - -/* Get pointer to appropriate format */ -struct v4l2_mbus_framefmt -*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, uint32_t which, - uint32_t pad); -struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - uint32_t which, uint32_t pad, - uint32_t target); -int atomisp_subdev_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - uint32_t which, uint32_t pad, uint32_t target, - uint32_t flags, struct v4l2_rect *r); -/* Actually set the format */ -void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, uint32_t which, - uint32_t pad, struct v4l2_mbus_framefmt *ffmt); - -int atomisp_update_run_mode(struct atomisp_sub_device *asd); - -void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd); - -void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd); -int atomisp_subdev_register_entities(struct atomisp_sub_device *asd, - struct v4l2_device *vdev); -int atomisp_subdev_init(struct atomisp_device *isp); -void atomisp_subdev_cleanup(struct atomisp_device *isp); -int atomisp_create_pads_links(struct atomisp_device *isp); - -#endif /* __ATOMISP_SUBDEV_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h deleted file mode 100644 index 319ded6a96da..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __ATOMISP_TABLES_H__ -#define __ATOMISP_TABLES_H__ - -#include "sh_css_params.h" - -/*Sepia image effect table*/ -static struct atomisp_css_cc_config sepia_cc_config = { - .fraction_bits = 8, - .matrix = {141, 18, 68, -40, -5, -19, 35, 4, 16}, -}; - -/*Negative image effect table*/ -static struct atomisp_css_cc_config nega_cc_config = { - .fraction_bits = 8, - .matrix = {255, 29, 120, 0, 374, 342, 0, 672, -301}, -}; - -/*Mono image effect table*/ -static struct atomisp_css_cc_config mono_cc_config = { - .fraction_bits = 8, - .matrix = {255, 29, 120, 0, 0, 0, 0, 0, 0}, -}; - -/*Skin whiten image effect table*/ -static struct atomisp_css_macc_table skin_low_macc_table = { - .data = { - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 7168, 0, 2048, 8192, - 5120, -1024, 2048, 8192, - 8192, 2048, -1024, 5120, - 8192, 2048, 0, 7168, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192 - } -}; - -static struct atomisp_css_macc_table skin_medium_macc_table = { - .data = { - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 5120, 0, 6144, 8192, - 3072, -1024, 2048, 6144, - 6144, 2048, -1024, 3072, - 8192, 6144, 0, 5120, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192 - } -}; - -static struct atomisp_css_macc_table skin_high_macc_table = { - .data = { - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 4096, 0, 8192, 8192, - 0, -2048, 4096, 6144, - 6144, 4096, -2048, 0, - 8192, 8192, 0, 4096, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192 - } -}; - -/*Blue enhencement image effect table*/ -static struct atomisp_css_macc_table blue_macc_table = { - .data = { - 9728, -3072, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 9728, 0, -3072, 8192, - 12800, 1536, -3072, 8192, - 11264, 0, 0, 11264, - 9728, -3072, 0, 11264 - } -}; - -/*Green enhencement image effect table*/ -static struct atomisp_css_macc_table green_macc_table = { - .data = { - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 10240, 4096, 0, 8192, - 10240, 4096, 0, 12288, - 12288, 0, 0, 12288, - 14336, -2048, 4096, 8192, - 10240, 0, 4096, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192, - 8192, 0, 0, 8192 - } -}; - -static struct atomisp_css_ctc_table vivid_ctc_table = { - .data.vamem_2 = { - 0, 384, 837, 957, 1011, 1062, 1083, 1080, - 1078, 1077, 1053, 1039, 1012, 992, 969, 951, - 929, 906, 886, 866, 845, 823, 809, 790, - 772, 758, 741, 726, 711, 701, 688, 675, - 666, 656, 648, 639, 633, 626, 618, 612, - 603, 594, 582, 572, 557, 545, 529, 516, - 504, 491, 480, 467, 459, 447, 438, 429, - 419, 412, 404, 397, 389, 382, 376, 368, - 363, 357, 351, 345, 340, 336, 330, 326, - 321, 318, 312, 308, 304, 300, 297, 294, - 291, 286, 284, 281, 278, 275, 271, 268, - 261, 257, 251, 245, 240, 235, 232, 225, - 223, 218, 213, 209, 206, 204, 199, 197, - 193, 189, 186, 185, 183, 179, 177, 175, - 172, 170, 169, 167, 164, 164, 162, 160, - 158, 157, 156, 154, 154, 152, 151, 150, - 149, 148, 146, 147, 146, 144, 143, 143, - 142, 141, 140, 141, 139, 138, 138, 138, - 137, 136, 136, 135, 134, 134, 134, 133, - 132, 132, 131, 130, 131, 130, 129, 128, - 129, 127, 127, 127, 127, 125, 125, 125, - 123, 123, 122, 120, 118, 115, 114, 111, - 110, 108, 106, 105, 103, 102, 100, 99, - 97, 97, 96, 95, 94, 93, 93, 91, - 91, 91, 90, 90, 89, 89, 88, 88, - 89, 88, 88, 87, 87, 87, 87, 86, - 87, 87, 86, 87, 86, 86, 84, 84, - 82, 80, 78, 76, 74, 72, 70, 68, - 67, 65, 62, 60, 58, 56, 55, 54, - 53, 51, 49, 49, 47, 45, 45, 45, - 41, 40, 39, 39, 34, 33, 34, 32, - 25, 23, 24, 20, 13, 9, 12, 0, - 0 - } -}; -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c deleted file mode 100644 index adc900272f6f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include -#include -#include "atomisp_internal.h" -#include "atomisp_tpg.h" - -static int tpg_s_stream(struct v4l2_subdev *sd, int enable) -{ - return 0; -} - -static int tpg_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - /*to fake*/ - return 0; -} - -static int tpg_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - - if (format->pad) - return -EINVAL; - /* only raw8 grbg is supported by TPG */ - fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *fmt; - return 0; - } - return 0; -} - -static int tpg_log_status(struct v4l2_subdev *sd) -{ - /*to fake*/ - return 0; -} - -static int tpg_s_power(struct v4l2_subdev *sd, int on) -{ - return 0; -} - -static int tpg_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - /*to fake*/ - return 0; -} - -static int tpg_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) -{ - /*to fake*/ - return 0; -} - -static int tpg_enum_frame_ival(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_interval_enum *fie) -{ - /*to fake*/ - return 0; -} - -static const struct v4l2_subdev_video_ops tpg_video_ops = { - .s_stream = tpg_s_stream, -}; - -static const struct v4l2_subdev_core_ops tpg_core_ops = { - .log_status = tpg_log_status, - .s_power = tpg_s_power, -}; - -static const struct v4l2_subdev_pad_ops tpg_pad_ops = { - .enum_mbus_code = tpg_enum_mbus_code, - .enum_frame_size = tpg_enum_frame_size, - .enum_frame_interval = tpg_enum_frame_ival, - .get_fmt = tpg_get_fmt, - .set_fmt = tpg_set_fmt, -}; - -static const struct v4l2_subdev_ops tpg_ops = { - .core = &tpg_core_ops, - .video = &tpg_video_ops, - .pad = &tpg_pad_ops, -}; - -void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg) -{ - media_entity_cleanup(&tpg->sd.entity); - v4l2_device_unregister_subdev(&tpg->sd); -} - -int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg, - struct v4l2_device *vdev) -{ - int ret; - /* Register the subdev and video nodes. */ - ret = v4l2_device_register_subdev(vdev, &tpg->sd); - if (ret < 0) - goto error; - - return 0; - -error: - atomisp_tpg_unregister_entities(tpg); - return ret; -} - -void atomisp_tpg_cleanup(struct atomisp_device *isp) -{ - -} - -int atomisp_tpg_init(struct atomisp_device *isp) -{ - struct atomisp_tpg_device *tpg = &isp->tpg; - struct v4l2_subdev *sd = &tpg->sd; - struct media_pad *pads = tpg->pads; - struct media_entity *me = &sd->entity; - int ret; - - tpg->isp = isp; - v4l2_subdev_init(sd, &tpg_ops); - sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - strcpy(sd->name, "tpg_subdev"); - v4l2_set_subdevdata(sd, tpg); - - pads[0].flags = MEDIA_PAD_FL_SINK; - me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; - - ret = media_entity_pads_init(me, 1, pads); - if (ret < 0) - goto fail; - return 0; -fail: - atomisp_tpg_cleanup(isp); - return ret; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h deleted file mode 100644 index af354c4bfd3e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_TPG_H__ -#define __ATOMISP_TPG_H__ - -#include -#include - -struct atomisp_tpg_device { - struct v4l2_subdev sd; - struct atomisp_device *isp; - struct media_pad pads[1]; -}; - -void atomisp_tpg_cleanup(struct atomisp_device *isp); -int atomisp_tpg_init(struct atomisp_device *isp); -void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg); -int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg, - struct v4l2_device *vdev); - -#endif /* __ATOMISP_TPG_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h deleted file mode 100644 index 462b296554c7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Support Camera Imaging tracer core. - * - * Copyright (c) 2013 Intel Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM atomisp - -#if !defined(ATOMISP_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ) -#define ATOMISP_TRACE_EVENT_H - -#include -#include -TRACE_EVENT(camera_meminfo, - - TP_PROTO(const char *name, int uptr_size, int counter, int sys_size, - int sys_res_size, int cam_sys_use, int cam_dyc_use, - int cam_res_use), - - TP_ARGS(name, uptr_size, counter, sys_size, sys_res_size, cam_sys_use, - cam_dyc_use, cam_res_use), - - TP_STRUCT__entry( - __array(char, name, 24) - __field(int, uptr_size) - __field(int, counter) - __field(int, sys_size) - __field(int, sys_res_size) - __field(int, cam_res_use) - __field(int, cam_dyc_use) - __field(int, cam_sys_use) - ), - - TP_fast_assign( - strlcpy(__entry->name, name, 24); - __entry->uptr_size = uptr_size; - __entry->counter = counter; - __entry->sys_size = sys_size; - __entry->sys_res_size = sys_res_size; - __entry->cam_res_use = cam_res_use; - __entry->cam_dyc_use = cam_dyc_use; - __entry->cam_sys_use = cam_sys_use; - ), - - TP_printk( - "<%s> User ptr memory:%d pages,\tISP private memory used:%d" - " pages:\tsysFP system size:%d,\treserved size:%d" - "\tcamFP sysUse:%d,\tdycUse:%d,\tresUse:%d.\n", - __entry->name, __entry->uptr_size, __entry->counter, - __entry->sys_size, __entry->sys_res_size, __entry->cam_sys_use, - __entry->cam_dyc_use, __entry->cam_res_use) -); - -TRACE_EVENT(camera_debug, - - TP_PROTO(const char *name, char *info, const int line), - - TP_ARGS(name, info, line), - - TP_STRUCT__entry( - __array(char, name, 24) - __array(char, info, 24) - __field(int, line) - ), - - TP_fast_assign( - strlcpy(__entry->name, name, 24); - strlcpy(__entry->info, info, 24); - __entry->line = line; - ), - - TP_printk("<%s>-<%d> %s\n", __entry->name, __entry->line, - __entry->info) -); - -TRACE_EVENT(ipu_cstate, - - TP_PROTO(int cstate), - - TP_ARGS(cstate), - - TP_STRUCT__entry( - __field(int, cstate) - ), - - TP_fast_assign( - __entry->cstate = cstate; - ), - - TP_printk("cstate=%d", __entry->cstate) -); - -TRACE_EVENT(ipu_pstate, - - TP_PROTO(int freq, int util), - - TP_ARGS(freq, util), - - TP_STRUCT__entry( - __field(int, freq) - __field(int, util) - ), - - TP_fast_assign( - __entry->freq = freq; - __entry->util = util; - ), - - TP_printk("freq=%d util=%d", __entry->freq, __entry->util) -); -#endif - -#undef TRACE_INCLUDE_PATH -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE atomisp_trace_event -/* This part must be outside protection */ -#include diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c deleted file mode 100644 index aaae663cc218..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c +++ /dev/null @@ -1,1562 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "../../include/linux/atomisp_gmin_platform.h" - -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_fops.h" -#include "atomisp_file.h" -#include "atomisp_ioctl.h" -#include "atomisp_internal.h" -#include "atomisp_acc.h" -#include "atomisp-regs.h" -#include "atomisp_dfs_tables.h" -#include "atomisp_drvfs.h" -#include "hmm/hmm.h" -#include "atomisp_trace_event.h" - -#include "hrt/hive_isp_css_mm_hrt.h" - -#include "device_access.h" - -/* G-Min addition: pull this in from intel_mid_pm.h */ -#define CSTATE_EXIT_LATENCY_C1 1 - -static uint skip_fwload; -module_param(skip_fwload, uint, 0644); -MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load"); - -/* set reserved memory pool size in page */ -static unsigned int repool_pgnr; -module_param(repool_pgnr, uint, 0644); -MODULE_PARM_DESC(repool_pgnr, - "Set the reserved memory pool size in page (default:0)"); - -/* set dynamic memory pool size in page */ -unsigned int dypool_pgnr = UINT_MAX; -module_param(dypool_pgnr, uint, 0644); -MODULE_PARM_DESC(dypool_pgnr, - "Set the dynamic memory pool size in page (default:0)"); - -bool dypool_enable; -module_param(dypool_enable, bool, 0644); -MODULE_PARM_DESC(dypool_enable, - "dynamic memory pool enable/disable (default:disable)"); - -/* memory optimization: deferred firmware loading */ -bool defer_fw_load; -module_param(defer_fw_load, bool, 0644); -MODULE_PARM_DESC(defer_fw_load, - "Defer FW loading until device is opened (default:disable)"); - -/* cross componnet debug message flag */ -int dbg_level; -module_param(dbg_level, int, 0644); -MODULE_PARM_DESC(dbg_level, "debug message on/off (default:off)"); - -/* log function switch */ -int dbg_func = 2; -module_param(dbg_func, int, 0644); -MODULE_PARM_DESC(dbg_func, - "log function switch non/trace_printk/printk (default:printk)"); - -int mipicsi_flag; -module_param(mipicsi_flag, int, 0644); -MODULE_PARM_DESC(mipicsi_flag, "mipi csi compression predictor algorithm"); - -/*set to 16x16 since this is the amount of lines and pixels the sensor -exports extra. If these are kept at the 10x8 that they were on, in yuv -downscaling modes incorrect resolutions where requested to the sensor -driver with strange outcomes as a result. The proper way tot do this -would be to have a list of tables the specify the sensor res, mipi rec, -output res, and isp output res. however since we do not have this yet, -the chosen solution is the next best thing. */ -int pad_w = 16; -module_param(pad_w, int, 0644); -MODULE_PARM_DESC(pad_w, "extra data for ISP processing"); - -int pad_h = 16; -module_param(pad_h, int, 0644); -MODULE_PARM_DESC(pad_h, "extra data for ISP processing"); - -struct device *atomisp_dev; - -void __iomem *atomisp_io_base; - -int atomisp_video_init(struct atomisp_video_pipe *video, const char *name) -{ - int ret; - const char *direction; - - switch (video->type) { - case V4L2_BUF_TYPE_VIDEO_CAPTURE: - direction = "output"; - video->pad.flags = MEDIA_PAD_FL_SINK; - video->vdev.fops = &atomisp_fops; - video->vdev.ioctl_ops = &atomisp_ioctl_ops; - break; - case V4L2_BUF_TYPE_VIDEO_OUTPUT: - direction = "input"; - video->pad.flags = MEDIA_PAD_FL_SOURCE; - video->vdev.fops = &atomisp_file_fops; - video->vdev.ioctl_ops = &atomisp_file_ioctl_ops; - break; - default: - return -EINVAL; - } - - ret = media_entity_pads_init(&video->vdev.entity, 1, &video->pad); - if (ret < 0) - return ret; - - /* Initialize the video device. */ - snprintf(video->vdev.name, sizeof(video->vdev.name), - "ATOMISP ISP %s %s", name, direction); - video->vdev.release = video_device_release_empty; - video_set_drvdata(&video->vdev, video->isp); - - return 0; -} - -void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name) -{ - video->vdev.fops = &atomisp_fops; - video->vdev.ioctl_ops = &atomisp_ioctl_ops; - - /* Initialize the video device. */ - snprintf(video->vdev.name, sizeof(video->vdev.name), - "ATOMISP ISP %s", name); - video->vdev.release = video_device_release_empty; - video_set_drvdata(&video->vdev, video->isp); -} - -int atomisp_video_register(struct atomisp_video_pipe *video, - struct v4l2_device *vdev) -{ - int ret; - - video->vdev.v4l2_dev = vdev; - - ret = video_register_device(&video->vdev, VFL_TYPE_GRABBER, -1); - if (ret < 0) - dev_err(vdev->dev, "%s: could not register video device (%d)\n", - __func__, ret); - - return ret; -} - -int atomisp_acc_register(struct atomisp_acc_pipe *video, - struct v4l2_device *vdev) -{ - int ret; - - video->vdev.v4l2_dev = vdev; - - ret = video_register_device(&video->vdev, VFL_TYPE_GRABBER, -1); - if (ret < 0) - dev_err(vdev->dev, "%s: could not register video device (%d)\n", - __func__, ret); - - return ret; -} - -void atomisp_video_unregister(struct atomisp_video_pipe *video) -{ - if (video_is_registered(&video->vdev)) { - media_entity_cleanup(&video->vdev.entity); - video_unregister_device(&video->vdev); - } -} - -void atomisp_acc_unregister(struct atomisp_acc_pipe *video) -{ - if (video_is_registered(&video->vdev)) - video_unregister_device(&video->vdev); -} - -static int atomisp_save_iunit_reg(struct atomisp_device *isp) -{ - struct pci_dev *dev = isp->pdev; - - dev_dbg(isp->dev, "%s\n", __func__); - - pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts); - /* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */ - pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap); - pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr); - pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data); - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr); - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, - &isp->saved_regs.interrupt_control); - - pci_read_config_dword(dev, MRFLD_PCI_PMCS, - &isp->saved_regs.pmcs); - /* Ensure read/write combining is enabled. */ - pci_read_config_dword(dev, PCI_I_CONTROL, - &isp->saved_regs.i_control); - isp->saved_regs.i_control |= - MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING | - MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING; - pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, - &isp->saved_regs.csi_access_viol); - pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL, - &isp->saved_regs.csi_rcomp_config); - /* - * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE. - * ANN/CHV: RCOMP updates do not happen when using CSI2+ path - * and sensor sending "continuous clock". - * TNG/ANN/CHV: MIPI packets are lost if the HS entry sequence - * is missed, and IUNIT can hang. - * For both issues, setting this bit is a workaround. - */ - isp->saved_regs.csi_rcomp_config |= - MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE; - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, - &isp->saved_regs.csi_afe_dly); - pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL, - &isp->saved_regs.csi_control); - if (isp->media_dev.hw_revision >= - (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT)) - isp->saved_regs.csi_control |= - MRFLD_PCI_CSI_CONTROL_PARPATHEN; - /* - * On CHT CSI_READY bit should be enabled before stream on - */ - if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << - ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0))) - isp->saved_regs.csi_control |= - MRFLD_PCI_CSI_CONTROL_CSI_READY; - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, - &isp->saved_regs.csi_afe_rcomp_config); - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL, - &isp->saved_regs.csi_afe_hs_control); - pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL, - &isp->saved_regs.csi_deadline_control); - return 0; -} - -static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp) -{ - struct pci_dev *dev = isp->pdev; - - dev_dbg(isp->dev, "%s\n", __func__); - - pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts); - pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, - isp->saved_regs.ispmmadr); - pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap); - pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr); - pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data); - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, - isp->saved_regs.interrupt_control); - pci_write_config_dword(dev, PCI_I_CONTROL, - isp->saved_regs.i_control); - - pci_write_config_dword(dev, MRFLD_PCI_PMCS, - isp->saved_regs.pmcs); - pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, - isp->saved_regs.csi_access_viol); - pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL, - isp->saved_regs.csi_rcomp_config); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, - isp->saved_regs.csi_afe_dly); - pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL, - isp->saved_regs.csi_control); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, - isp->saved_regs.csi_afe_rcomp_config); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL, - isp->saved_regs.csi_afe_hs_control); - pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL, - isp->saved_regs.csi_deadline_control); - - /* - * for MRFLD, Software/firmware needs to write a 1 to bit0 - * of the register at CSI_RECEIVER_SELECTION_REG to enable - * SH CSI backend write 0 will enable Arasan CSI backend, - * which has bugs(like sighting:4567697 and 4567699) and - * will be removed in B0 - */ - atomisp_store_uint32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1); - return 0; -} - -static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp) -{ - struct pci_dev *dev = isp->pdev; - u32 irq; - unsigned long flags; - - spin_lock_irqsave(&isp->lock, flags); - if (isp->sw_contex.power_state == ATOM_ISP_POWER_DOWN) { - spin_unlock_irqrestore(&isp->lock, flags); - dev_dbg(isp->dev, "<%s %d.\n", __func__, __LINE__); - return 0; - } - /* - * MRFLD HAS requirement: cannot power off i-unit if - * ISP has IRQ not serviced. - * So, here we need to check if there is any pending - * IRQ, if so, waiting for it to be served - */ - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - irq = irq & 1 << INTR_IIR; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); - - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - if (!(irq & (1 << INTR_IIR))) - goto done; - - atomisp_store_uint32(MRFLD_INTR_CLEAR_REG, 0xFFFFFFFF); - atomisp_load_uint32(MRFLD_INTR_STATUS_REG, &irq); - if (irq != 0) { - dev_err(isp->dev, - "%s: fail to clear isp interrupt status reg=0x%x\n", - __func__, irq); - spin_unlock_irqrestore(&isp->lock, flags); - return -EAGAIN; - } else { - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - irq = irq & 1 << INTR_IIR; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); - - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - if (!(irq & (1 << INTR_IIR))) { - atomisp_store_uint32(MRFLD_INTR_ENABLE_REG, 0x0); - goto done; - } - dev_err(isp->dev, - "%s: error in iunit interrupt. status reg=0x%x\n", - __func__, irq); - spin_unlock_irqrestore(&isp->lock, flags); - return -EAGAIN; - } -done: - /* - * MRFLD WORKAROUND: - * before powering off IUNIT, clear the pending interrupts - * and disable the interrupt. driver should avoid writing 0 - * to IIR. It could block subsequent interrupt messages. - * HW sighting:4568410. - */ - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - irq &= ~(1 << INTR_IER); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); - - atomisp_msi_irq_uninit(isp, dev); - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true); - spin_unlock_irqrestore(&isp->lock, flags); - - return 0; -} - - - /* - * WA for DDR DVFS enable/disable - * By default, ISP will force DDR DVFS 1600MHz before disable DVFS - */ -static void punit_ddr_dvfs_enable(bool enable) -{ - int door_bell = 1 << 8; - int max_wait = 30; - int reg; - - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, ®); - if (enable) { - reg &= ~(MRFLD_BIT0 | MRFLD_BIT1); - } else { - reg |= (MRFLD_BIT1 | door_bell); - reg &= ~(MRFLD_BIT0); - } - iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg); - - /* Check Req_ACK to see freq status, wait until door_bell is cleared */ - while ((reg & door_bell) && max_wait--) { - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, ®); - usleep_range(100, 500); - } - - if (max_wait == -1) - pr_info("DDR DVFS, door bell is not cleared within 3ms\n"); -} - -/* Workaround for pmu_nc_set_power_state not ready in MRFLD */ -int atomisp_mrfld_power_down(struct atomisp_device *isp) -{ - unsigned long timeout; - u32 reg_value; - - /* writing 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */ - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, ®_value); - reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK; - reg_value |= MRFLD_ISPSSPM0_IUNIT_POWER_OFF; - iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value); - - /*WA:Enable DVFS*/ - if (IS_CHT) - punit_ddr_dvfs_enable(true); - - /* - * There should be no iunit access while power-down is - * in progress HW sighting: 4567865 - * FIXME: msecs_to_jiffies(50)- experienced value - */ - timeout = jiffies + msecs_to_jiffies(50); - while (1) { - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, ®_value); - dev_dbg(isp->dev, "power-off in progress, ISPSSPM0: 0x%x\n", - reg_value); - /* wait until ISPSSPM0 bit[25:24] shows 0x3 */ - if ((reg_value >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) == - MRFLD_ISPSSPM0_IUNIT_POWER_OFF) { - trace_ipu_cstate(0); - return 0; - } - - if (time_after(jiffies, timeout)) { - dev_err(isp->dev, "power-off iunit timeout.\n"); - return -EBUSY; - } - /* FIXME: experienced value for delay */ - usleep_range(100, 150); - } -} - - -/* Workaround for pmu_nc_set_power_state not ready in MRFLD */ -int atomisp_mrfld_power_up(struct atomisp_device *isp) -{ - unsigned long timeout; - u32 reg_value; - - /*WA for PUNIT, if DVFS enabled, ISP timeout observed*/ - if (IS_CHT) - punit_ddr_dvfs_enable(false); - - /* - * FIXME:WA for ECS28A, with this sleep, CTS - * android.hardware.camera2.cts.CameraDeviceTest#testCameraDeviceAbort - * PASS, no impact on other platforms - */ - if (IS_BYT) - msleep(10); - - /* writing 0x0 to ISPSSPM0 bit[1:0] to power off the IUNIT */ - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, ®_value); - reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK; - iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value); - - /* FIXME: experienced value for delay */ - timeout = jiffies + msecs_to_jiffies(50); - while (1) { - iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, ®_value); - dev_dbg(isp->dev, "power-on in progress, ISPSSPM0: 0x%x\n", - reg_value); - /* wait until ISPSSPM0 bit[25:24] shows 0x0 */ - if ((reg_value >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) == - MRFLD_ISPSSPM0_IUNIT_POWER_ON) { - trace_ipu_cstate(1); - return 0; - } - - if (time_after(jiffies, timeout)) { - dev_err(isp->dev, "power-on iunit timeout.\n"); - return -EBUSY; - } - /* FIXME: experienced value for delay */ - usleep_range(100, 150); - } -} - -int atomisp_runtime_suspend(struct device *dev) -{ - struct atomisp_device *isp = (struct atomisp_device *) - dev_get_drvdata(dev); - int ret; - - ret = atomisp_mrfld_pre_power_down(isp); - if (ret) - return ret; - - /*Turn off the ISP d-phy*/ - ret = atomisp_ospm_dphy_down(isp); - if (ret) - return ret; - pm_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE); - return atomisp_mrfld_power_down(isp); -} - -int atomisp_runtime_resume(struct device *dev) -{ - struct atomisp_device *isp = (struct atomisp_device *) - dev_get_drvdata(dev); - int ret; - - ret = atomisp_mrfld_power_up(isp); - if (ret) - return ret; - - pm_qos_update_request(&isp->pm_qos, isp->max_isr_latency); - if (isp->sw_contex.power_state == ATOM_ISP_POWER_DOWN) { - /*Turn on ISP d-phy */ - ret = atomisp_ospm_dphy_up(isp); - if (ret) { - dev_err(isp->dev, "Failed to power up ISP!.\n"); - return -EINVAL; - } - } - - /*restore register values for iUnit and iUnitPHY registers*/ - if (isp->saved_regs.pcicmdsts) - atomisp_restore_iunit_reg(isp); - - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true); - return 0; -} - -static int __maybe_unused atomisp_suspend(struct device *dev) -{ - struct atomisp_device *isp = (struct atomisp_device *) - dev_get_drvdata(dev); - /* FIXME: only has one isp_subdev at present */ - struct atomisp_sub_device *asd = &isp->asd[0]; - unsigned long flags; - int ret; - - /* - * FIXME: Suspend is not supported by sensors. Abort if any video - * node was opened. - */ - if (atomisp_dev_users(isp)) - return -EBUSY; - - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) { - spin_unlock_irqrestore(&isp->lock, flags); - dev_err(isp->dev, "atomisp cannot suspend at this time.\n"); - return -EINVAL; - } - spin_unlock_irqrestore(&isp->lock, flags); - - ret = atomisp_mrfld_pre_power_down(isp); - if (ret) - return ret; - - /*Turn off the ISP d-phy */ - ret = atomisp_ospm_dphy_down(isp); - if (ret) { - dev_err(isp->dev, "fail to power off ISP\n"); - return ret; - } - pm_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE); - return atomisp_mrfld_power_down(isp); -} - -static int __maybe_unused atomisp_resume(struct device *dev) -{ - struct atomisp_device *isp = (struct atomisp_device *) - dev_get_drvdata(dev); - int ret; - - ret = atomisp_mrfld_power_up(isp); - if (ret) - return ret; - - pm_qos_update_request(&isp->pm_qos, isp->max_isr_latency); - - /*Turn on ISP d-phy */ - ret = atomisp_ospm_dphy_up(isp); - if (ret) { - dev_err(isp->dev, "Failed to power up ISP!.\n"); - return -EINVAL; - } - - /*restore register values for iUnit and iUnitPHY registers*/ - if (isp->saved_regs.pcicmdsts) - atomisp_restore_iunit_reg(isp); - - atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true); - return 0; -} - -int atomisp_csi_lane_config(struct atomisp_device *isp) -{ - static const struct { - u8 code; - u8 lanes[MRFLD_PORT_NUM]; - } portconfigs[] = { - /* Tangier/Merrifield available lane configurations */ - { 0x00, { 4, 1, 0 } }, /* 00000 */ - { 0x01, { 3, 1, 0 } }, /* 00001 */ - { 0x02, { 2, 1, 0 } }, /* 00010 */ - { 0x03, { 1, 1, 0 } }, /* 00011 */ - { 0x04, { 2, 1, 2 } }, /* 00100 */ - { 0x08, { 3, 1, 1 } }, /* 01000 */ - { 0x09, { 2, 1, 1 } }, /* 01001 */ - { 0x0a, { 1, 1, 1 } }, /* 01010 */ - - /* Anniedale/Moorefield only configurations */ - { 0x10, { 4, 2, 0 } }, /* 10000 */ - { 0x11, { 3, 2, 0 } }, /* 10001 */ - { 0x12, { 2, 2, 0 } }, /* 10010 */ - { 0x13, { 1, 2, 0 } }, /* 10011 */ - { 0x14, { 2, 2, 2 } }, /* 10100 */ - { 0x18, { 3, 2, 1 } }, /* 11000 */ - { 0x19, { 2, 2, 1 } }, /* 11001 */ - { 0x1a, { 1, 2, 1 } }, /* 11010 */ - }; - - unsigned int i, j; - u8 sensor_lanes[MRFLD_PORT_NUM] = { 0 }; - u32 csi_control; - int nportconfigs; - u32 port_config_mask; - int port3_lanes_shift; - - if (isp->media_dev.hw_revision < - ATOMISP_HW_REVISION_ISP2401_LEGACY << - ATOMISP_HW_REVISION_SHIFT) { - /* Merrifield */ - port_config_mask = MRFLD_PORT_CONFIG_MASK; - port3_lanes_shift = MRFLD_PORT3_LANES_SHIFT; - } else { - /* Moorefield / Cherryview */ - port_config_mask = CHV_PORT_CONFIG_MASK; - port3_lanes_shift = CHV_PORT3_LANES_SHIFT; - } - - if (isp->media_dev.hw_revision < - ATOMISP_HW_REVISION_ISP2401 << - ATOMISP_HW_REVISION_SHIFT) { - /* Merrifield / Moorefield legacy input system */ - nportconfigs = MRFLD_PORT_CONFIG_NUM; - } else { - /* Moorefield / Cherryview new input system */ - nportconfigs = ARRAY_SIZE(portconfigs); - } - - for (i = 0; i < isp->input_cnt; i++) { - struct camera_mipi_info *mipi_info; - - if (isp->inputs[i].type != RAW_CAMERA && - isp->inputs[i].type != SOC_CAMERA) - continue; - - mipi_info = atomisp_to_sensor_mipi_info(isp->inputs[i].camera); - if (!mipi_info) - continue; - - switch (mipi_info->port) { - case ATOMISP_CAMERA_PORT_PRIMARY: - sensor_lanes[0] = mipi_info->num_lanes; - break; - case ATOMISP_CAMERA_PORT_SECONDARY: - sensor_lanes[1] = mipi_info->num_lanes; - break; - case ATOMISP_CAMERA_PORT_TERTIARY: - sensor_lanes[2] = mipi_info->num_lanes; - break; - default: - dev_err(isp->dev, - "%s: invalid port: %d for the %dth sensor\n", - __func__, mipi_info->port, i); - return -EINVAL; - } - } - - for (i = 0; i < nportconfigs; i++) { - for (j = 0; j < MRFLD_PORT_NUM; j++) - if (sensor_lanes[j] && - sensor_lanes[j] != portconfigs[i].lanes[j]) - break; - - if (j == MRFLD_PORT_NUM) - break; /* Found matching setting */ - } - - if (i >= nportconfigs) { - dev_err(isp->dev, - "%s: could not find the CSI port setting for %d-%d-%d\n", - __func__, - sensor_lanes[0], sensor_lanes[1], sensor_lanes[2]); - return -EINVAL; - } - - pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control); - csi_control &= ~port_config_mask; - csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT) - | (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT)) - | (portconfigs[i].lanes[1] ? 0 : (1 << MRFLD_PORT2_ENABLE_SHIFT)) - | (portconfigs[i].lanes[2] ? 0 : (1 << MRFLD_PORT3_ENABLE_SHIFT)) - | (((1 << portconfigs[i].lanes[0]) - 1) << MRFLD_PORT1_LANES_SHIFT) - | (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT) - | (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift); - - pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control); - - dev_dbg(isp->dev, - "%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n", - __func__, portconfigs[i].lanes[0], portconfigs[i].lanes[1], - portconfigs[i].lanes[2], csi_control); - - return 0; -} - -static int atomisp_subdev_probe(struct atomisp_device *isp) -{ - const struct atomisp_platform_data *pdata; - struct intel_v4l2_subdev_table *subdevs; - int ret, raw_index = -1; - - pdata = atomisp_get_platform_data(); - if (pdata == NULL) { - dev_err(isp->dev, "no platform data available\n"); - return 0; - } - - for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) { - struct v4l2_subdev *subdev; - struct i2c_board_info *board_info = - &subdevs->v4l2_subdev.board_info; - struct i2c_adapter *adapter = - i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id); - int sensor_num, i; - - if (adapter == NULL) { - dev_err(isp->dev, - "Failed to find i2c adapter for subdev %s\n", - board_info->type); - break; - } - - /* In G-Min, the sensor devices will already be probed - * (via ACPI) and registered, do not create new - * ones */ - subdev = atomisp_gmin_find_subdev(adapter, board_info); - ret = v4l2_device_register_subdev(&isp->v4l2_dev, subdev); - if (ret) { - dev_warn(isp->dev, "Subdev %s detection fail\n", - board_info->type); - continue; - } - - if (subdev == NULL) { - dev_warn(isp->dev, "Subdev %s detection fail\n", - board_info->type); - continue; - } - - dev_info(isp->dev, "Subdev %s successfully register\n", - board_info->type); - - switch (subdevs->type) { - case RAW_CAMERA: - raw_index = isp->input_cnt; - dev_dbg(isp->dev, "raw_index: %d\n", raw_index); - case SOC_CAMERA: - dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt); - if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) { - dev_warn(isp->dev, - "too many atomisp inputs, ignored\n"); - break; - } - - isp->inputs[isp->input_cnt].type = subdevs->type; - isp->inputs[isp->input_cnt].port = subdevs->port; - isp->inputs[isp->input_cnt].camera = subdev; - isp->inputs[isp->input_cnt].sensor_index = 0; - /* - * initialize the subdev frame size, then next we can - * judge whether frame_size store effective value via - * pixel_format. - */ - isp->inputs[isp->input_cnt].frame_size.pixel_format = 0; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - sensor_num = isp->inputs[isp->input_cnt] - .camera_caps->sensor_num; - isp->input_cnt++; - for (i = 1; i < sensor_num; i++) { - if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) { - dev_warn(isp->dev, - "atomisp inputs out of range\n"); - break; - } - isp->inputs[isp->input_cnt] = - isp->inputs[isp->input_cnt - 1]; - isp->inputs[isp->input_cnt].sensor_index = i; - isp->input_cnt++; - } - break; - case CAMERA_MOTOR: - isp->motor = subdev; - break; - case LED_FLASH: - case XENON_FLASH: - isp->flash = subdev; - break; - default: - dev_dbg(isp->dev, "unknown subdev probed\n"); - break; - } - - } - - /* - * HACK: Currently VCM belongs to primary sensor only, but correct - * approach must be to acquire from platform code which sensor - * owns it. - */ - if (isp->motor && raw_index >= 0) - isp->inputs[raw_index].motor = isp->motor; - - /* Proceed even if no modules detected. For COS mode and no modules. */ - if (!isp->inputs[0].camera) - dev_warn(isp->dev, "no camera attached or fail to detect\n"); - - return atomisp_csi_lane_config(isp); -} - -static void atomisp_unregister_entities(struct atomisp_device *isp) -{ - unsigned int i; - struct v4l2_subdev *sd, *next; - - for (i = 0; i < isp->num_of_streams; i++) - atomisp_subdev_unregister_entities(&isp->asd[i]); - atomisp_tpg_unregister_entities(&isp->tpg); - atomisp_file_input_unregister_entities(&isp->file_dev); - for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) - atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]); - - list_for_each_entry_safe(sd, next, &isp->v4l2_dev.subdevs, list) - v4l2_device_unregister_subdev(sd); - - v4l2_device_unregister(&isp->v4l2_dev); - media_device_unregister(&isp->media_dev); -} - -static int atomisp_register_entities(struct atomisp_device *isp) -{ - int ret = 0; - unsigned int i; - - isp->media_dev.dev = isp->dev; - - strlcpy(isp->media_dev.model, "Intel Atom ISP", - sizeof(isp->media_dev.model)); - - media_device_init(&isp->media_dev); - isp->v4l2_dev.mdev = &isp->media_dev; - ret = v4l2_device_register(isp->dev, &isp->v4l2_dev); - if (ret < 0) { - dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n", - __func__, ret); - goto v4l2_device_failed; - } - - ret = atomisp_subdev_probe(isp); - if (ret < 0) - goto csi_and_subdev_probe_failed; - - /* Register internal entities */ - for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) { - ret = atomisp_mipi_csi2_register_entities(&isp->csi2_port[i], - &isp->v4l2_dev); - if (ret == 0) - continue; - - /* error case */ - dev_err(isp->dev, "failed to register the CSI port: %d\n", i); - /* deregister all registered CSI ports */ - while (i--) - atomisp_mipi_csi2_unregister_entities( - &isp->csi2_port[i]); - - goto csi_and_subdev_probe_failed; - } - - ret = - atomisp_file_input_register_entities(&isp->file_dev, &isp->v4l2_dev); - if (ret < 0) { - dev_err(isp->dev, "atomisp_file_input_register_entities\n"); - goto file_input_register_failed; - } - - ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev); - if (ret < 0) { - dev_err(isp->dev, "atomisp_tpg_register_entities\n"); - goto tpg_register_failed; - } - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - ret = atomisp_subdev_register_entities(asd, &isp->v4l2_dev); - if (ret < 0) { - dev_err(isp->dev, - "atomisp_subdev_register_entities fail\n"); - for (; i > 0; i--) - atomisp_subdev_unregister_entities( - &isp->asd[i - 1]); - goto subdev_register_failed; - } - } - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - init_completion(&asd->init_done); - - asd->delayed_init_workq = - alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE, - 1); - if (asd->delayed_init_workq == NULL) { - dev_err(isp->dev, - "Failed to initialize delayed init workq\n"); - ret = -ENOMEM; - - for (; i > 0; i--) - destroy_workqueue(isp->asd[i - 1]. - delayed_init_workq); - goto wq_alloc_failed; - } - INIT_WORK(&asd->delayed_init_work, atomisp_delayed_init_work); - } - - for (i = 0; i < isp->input_cnt; i++) { - if (isp->inputs[i].port >= ATOMISP_CAMERA_NR_PORTS) { - dev_err(isp->dev, "isp->inputs port %d not supported\n", - isp->inputs[i].port); - ret = -EINVAL; - goto link_failed; - } - } - - dev_dbg(isp->dev, - "FILE_INPUT enable, camera_cnt: %d\n", isp->input_cnt); - isp->inputs[isp->input_cnt].type = FILE_INPUT; - isp->inputs[isp->input_cnt].port = -1; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - isp->inputs[isp->input_cnt++].camera = &isp->file_dev.sd; - - if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) { - dev_dbg(isp->dev, - "TPG detected, camera_cnt: %d\n", isp->input_cnt); - isp->inputs[isp->input_cnt].type = TEST_PATTERN; - isp->inputs[isp->input_cnt].port = -1; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd; - } else { - dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n"); - } - - ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); - if (ret < 0) - goto link_failed; - - return media_device_register(&isp->media_dev); - -link_failed: - for (i = 0; i < isp->num_of_streams; i++) - destroy_workqueue(isp->asd[i]. - delayed_init_workq); -wq_alloc_failed: - for (i = 0; i < isp->num_of_streams; i++) - atomisp_subdev_unregister_entities( - &isp->asd[i]); -subdev_register_failed: - atomisp_tpg_unregister_entities(&isp->tpg); -tpg_register_failed: - atomisp_file_input_unregister_entities(&isp->file_dev); -file_input_register_failed: - for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) - atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]); -csi_and_subdev_probe_failed: - v4l2_device_unregister(&isp->v4l2_dev); -v4l2_device_failed: - media_device_unregister(&isp->media_dev); - media_device_cleanup(&isp->media_dev); - return ret; -} - -static int atomisp_initialize_modules(struct atomisp_device *isp) -{ - int ret; - - ret = atomisp_mipi_csi2_init(isp); - if (ret < 0) { - dev_err(isp->dev, "mipi csi2 initialization failed\n"); - goto error_mipi_csi2; - } - - ret = atomisp_file_input_init(isp); - if (ret < 0) { - dev_err(isp->dev, - "file input device initialization failed\n"); - goto error_file_input; - } - - ret = atomisp_tpg_init(isp); - if (ret < 0) { - dev_err(isp->dev, "tpg initialization failed\n"); - goto error_tpg; - } - - ret = atomisp_subdev_init(isp); - if (ret < 0) { - dev_err(isp->dev, "ISP subdev initialization failed\n"); - goto error_isp_subdev; - } - - - return 0; - -error_isp_subdev: -error_tpg: - atomisp_tpg_cleanup(isp); -error_file_input: - atomisp_file_input_cleanup(isp); -error_mipi_csi2: - atomisp_mipi_csi2_cleanup(isp); - return ret; -} - -static void atomisp_uninitialize_modules(struct atomisp_device *isp) -{ - atomisp_tpg_cleanup(isp); - atomisp_file_input_cleanup(isp); - atomisp_mipi_csi2_cleanup(isp); -} - -const struct firmware * -atomisp_load_firmware(struct atomisp_device *isp) -{ - const struct firmware *fw; - int rc; - char *fw_path = NULL; - - if (skip_fwload) - return NULL; - - if (isp->media_dev.hw_revision == - ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) - | ATOMISP_HW_STEPPING_A0)) - fw_path = "shisp_2401a0_v21.bin"; - - if (isp->media_dev.hw_revision == - ((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT) - | ATOMISP_HW_STEPPING_A0)) - fw_path = "shisp_2401a0_legacy_v21.bin"; - - if (isp->media_dev.hw_revision == - ((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT) - | ATOMISP_HW_STEPPING_B0)) - fw_path = "shisp_2400b0_v21.bin"; - - if (!fw_path) { - dev_err(isp->dev, "Unsupported hw_revision 0x%x\n", - isp->media_dev.hw_revision); - return NULL; - } - - rc = request_firmware(&fw, fw_path, isp->dev); - if (rc) { - dev_err(isp->dev, - "atomisp: Error %d while requesting firmware %s\n", - rc, fw_path); - return NULL; - } - - return fw; -} - -/* - * Check for flags the driver was compiled with against the PCI - * device. Always returns true on other than ISP 2400. - */ -static bool is_valid_device(struct pci_dev *dev, - const struct pci_device_id *id) -{ - unsigned int a0_max_id; - - switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) { - case ATOMISP_PCI_DEVICE_SOC_MRFLD: - a0_max_id = ATOMISP_PCI_REV_MRFLD_A0_MAX; - break; - case ATOMISP_PCI_DEVICE_SOC_BYT: - a0_max_id = ATOMISP_PCI_REV_BYT_A0_MAX; - break; - default: - return true; - } - - return dev->revision > a0_max_id; -} - -static int init_atomisp_wdts(struct atomisp_device *isp) -{ - int i, err; - - atomic_set(&isp->wdt_work_queued, 0); - isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1); - if (isp->wdt_work_queue == NULL) { - dev_err(isp->dev, "Failed to initialize wdt work queue\n"); - err = -ENOMEM; - goto alloc_fail; - } - INIT_WORK(&isp->wdt_work, atomisp_wdt_work); - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; -#ifndef ISP2401 - timer_setup(&asd->wdt, atomisp_wdt, 0); -#else - timer_setup(&asd->video_out_capture.wdt, atomisp_wdt, 0); - timer_setup(&asd->video_out_preview.wdt, atomisp_wdt, 0); - timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0); - timer_setup(&asd->video_out_video_capture.wdt, atomisp_wdt, 0); -#endif - } - return 0; -alloc_fail: - return err; -} - -#define ATOM_ISP_PCI_BAR 0 - -static int atomisp_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id) -{ - const struct atomisp_platform_data *pdata; - struct atomisp_device *isp; - unsigned int start; - void __iomem *base; - int err, val; - u32 irq; - - if (!dev) { - dev_err(&dev->dev, "atomisp: error device ptr\n"); - return -EINVAL; - } - - if (!is_valid_device(dev, id)) - return -ENODEV; - /* Pointer to struct device. */ - atomisp_dev = &dev->dev; - - pdata = atomisp_get_platform_data(); - if (pdata == NULL) - dev_warn(&dev->dev, "no platform data available\n"); - - err = pcim_enable_device(dev); - if (err) { - dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n", - err); - return err; - } - - start = pci_resource_start(dev, ATOM_ISP_PCI_BAR); - dev_dbg(&dev->dev, "start: 0x%x\n", start); - - err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev)); - if (err) { - dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n", - err); - return err; - } - - base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR]; - dev_dbg(&dev->dev, "base: %p\n", base); - - atomisp_io_base = base; - - dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base); - - isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL); - if (!isp) { - dev_err(&dev->dev, "Failed to alloc CI ISP structure\n"); - return -ENOMEM; - } - isp->pdev = dev; - isp->dev = &dev->dev; - isp->sw_contex.power_state = ATOM_ISP_POWER_UP; - isp->saved_regs.ispmmadr = start; - - rt_mutex_init(&isp->mutex); - mutex_init(&isp->streamoff_mutex); - spin_lock_init(&isp->lock); - - /* This is not a true PCI device on SoC, so the delay is not needed. */ - isp->pdev->d3_delay = 0; - - switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) { - case ATOMISP_PCI_DEVICE_SOC_MRFLD: - isp->media_dev.hw_revision = - (ATOMISP_HW_REVISION_ISP2400 - << ATOMISP_HW_REVISION_SHIFT) | - ATOMISP_HW_STEPPING_B0; - - switch (id->device) { - case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179: - isp->dfs = &dfs_config_merr_1179; - break; - case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A: - isp->dfs = &dfs_config_merr_117a; - break; - default: - isp->dfs = &dfs_config_merr; - break; - } - isp->hpll_freq = HPLL_FREQ_1600MHZ; - break; - case ATOMISP_PCI_DEVICE_SOC_BYT: - isp->media_dev.hw_revision = - (ATOMISP_HW_REVISION_ISP2400 - << ATOMISP_HW_REVISION_SHIFT) | - ATOMISP_HW_STEPPING_B0; -#ifdef FIXME - if (INTEL_MID_BOARD(3, TABLET, BYT, BLK, PRO, CRV2) || - INTEL_MID_BOARD(3, TABLET, BYT, BLK, ENG, CRV2)) { - isp->dfs = &dfs_config_byt_cr; - isp->hpll_freq = HPLL_FREQ_2000MHZ; - } else -#endif - { - isp->dfs = &dfs_config_byt; - isp->hpll_freq = HPLL_FREQ_1600MHZ; - } - /* HPLL frequency is known to be device-specific, but we don't - * have specs yet for exactly how it varies. Default to - * BYT-CR but let provisioning set it via EFI variable */ - isp->hpll_freq = gmin_get_var_int(&dev->dev, "HpllFreq", - HPLL_FREQ_2000MHZ); - - /* - * for BYT/CHT we are put isp into D3cold to avoid pci registers access - * in power off. Set d3cold_delay to 0 since default 100ms is not - * necessary. - */ - isp->pdev->d3cold_delay = 0; - break; - case ATOMISP_PCI_DEVICE_SOC_ANN: - isp->media_dev.hw_revision = ( -#ifdef ISP2401_NEW_INPUT_SYSTEM - ATOMISP_HW_REVISION_ISP2401 -#else - ATOMISP_HW_REVISION_ISP2401_LEGACY -#endif - << ATOMISP_HW_REVISION_SHIFT); - isp->media_dev.hw_revision |= isp->pdev->revision < 2 ? - ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0; - isp->dfs = &dfs_config_merr; - isp->hpll_freq = HPLL_FREQ_1600MHZ; - break; - case ATOMISP_PCI_DEVICE_SOC_CHT: - isp->media_dev.hw_revision = ( -#ifdef ISP2401_NEW_INPUT_SYSTEM - ATOMISP_HW_REVISION_ISP2401 -#else - ATOMISP_HW_REVISION_ISP2401_LEGACY -#endif - << ATOMISP_HW_REVISION_SHIFT); - isp->media_dev.hw_revision |= isp->pdev->revision < 2 ? - ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0; - - isp->dfs = &dfs_config_cht; - isp->pdev->d3cold_delay = 0; - - iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val); - switch (val & CCK_FUSE_HPLL_FREQ_MASK) { - case 0x00: - isp->hpll_freq = HPLL_FREQ_800MHZ; - break; - case 0x01: - isp->hpll_freq = HPLL_FREQ_1600MHZ; - break; - case 0x02: - isp->hpll_freq = HPLL_FREQ_2000MHZ; - break; - default: - isp->hpll_freq = HPLL_FREQ_1600MHZ; - dev_warn(isp->dev, - "read HPLL from cck failed.default 1600MHz.\n"); - } - break; - default: - dev_err(&dev->dev, "un-supported IUNIT device\n"); - return -ENODEV; - } - - dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n", - isp->hpll_freq); - - isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY; - - /* Load isp firmware from user space */ - if (!defer_fw_load) { - isp->firmware = atomisp_load_firmware(isp); - if (!isp->firmware) { - err = -ENOENT; - goto load_fw_fail; - } - - err = atomisp_css_check_firmware_version(isp); - if (err) { - dev_dbg(&dev->dev, "Firmware version check failed\n"); - goto fw_validation_fail; - } - } - - pci_set_master(dev); - pci_set_drvdata(dev, isp); - - err = pci_enable_msi(dev); - if (err) { - dev_err(&dev->dev, "Failed to enable msi (%d)\n", err); - goto enable_msi_fail; - } - - atomisp_msi_irq_init(isp, dev); - - pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - - /* - * for MRFLD, Software/firmware needs to write a 1 to bit 0 of - * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI - * backend write 0 will enable Arasan CSI backend, which has - * bugs(like sighting:4567697 and 4567699) and will be removed - * in B0 - */ - atomisp_store_uint32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1); - - if ((id->device & ATOMISP_PCI_DEVICE_SOC_MASK) == - ATOMISP_PCI_DEVICE_SOC_MRFLD) { - u32 csi_afe_trim; - - /* - * Workaround for imbalance data eye issue which is observed - * on TNG B0. - */ - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, - &csi_afe_trim); - csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << - MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) | - (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << - MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) | - (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << - MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT)); - csi_afe_trim |= (MRFLD_PCI_CSI1_HSRXCLKTRIM << - MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) | - (MRFLD_PCI_CSI2_HSRXCLKTRIM << - MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) | - (MRFLD_PCI_CSI3_HSRXCLKTRIM << - MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, - csi_afe_trim); - } - - err = atomisp_initialize_modules(isp); - if (err < 0) { - dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err); - goto initialize_modules_fail; - } - - err = atomisp_register_entities(isp); - if (err < 0) { - dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n", - err); - goto register_entities_fail; - } - err = atomisp_create_pads_links(isp); - if (err < 0) - goto register_entities_fail; - /* init atomisp wdts */ - if (init_atomisp_wdts(isp) != 0) - goto wdt_work_queue_fail; - - /* save the iunit context only once after all the values are init'ed. */ - atomisp_save_iunit_reg(isp); - - pm_runtime_put_noidle(&dev->dev); - pm_runtime_allow(&dev->dev); - - hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr); - err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED); - if (err) { - dev_err(&dev->dev, "Failed to register reserved memory pool.\n"); - goto hmm_pool_fail; - } - - /* Init ISP memory management */ - hmm_init(); - - err = devm_request_threaded_irq(&dev->dev, dev->irq, - atomisp_isr, atomisp_isr_thread, - IRQF_SHARED, "isp_irq", isp); - if (err) { - dev_err(&dev->dev, "Failed to request irq (%d)\n", err); - goto request_irq_fail; - } - - /* Load firmware into ISP memory */ - if (!defer_fw_load) { - err = atomisp_css_load_firmware(isp); - if (err) { - dev_err(&dev->dev, "Failed to init css.\n"); - goto css_init_fail; - } - } else { - dev_dbg(&dev->dev, "Skip css init.\n"); - } - /* Clear FW image from memory */ - release_firmware(isp->firmware); - isp->firmware = NULL; - isp->css_env.isp_css_fw.data = NULL; - - atomisp_drvfs_init(&dev->driver->driver, isp); - - return 0; - -css_init_fail: - devm_free_irq(&dev->dev, dev->irq, isp); -request_irq_fail: - hmm_cleanup(); - hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); -hmm_pool_fail: - destroy_workqueue(isp->wdt_work_queue); -wdt_work_queue_fail: - atomisp_acc_cleanup(isp); - atomisp_unregister_entities(isp); -register_entities_fail: - atomisp_uninitialize_modules(isp); -initialize_modules_fail: - pm_qos_remove_request(&isp->pm_qos); - atomisp_msi_irq_uninit(isp, dev); -enable_msi_fail: -fw_validation_fail: - release_firmware(isp->firmware); -load_fw_fail: - /* - * Switch off ISP, as keeping it powered on would prevent - * reaching S0ix states. - * - * The following lines have been copied from atomisp suspend path - */ - - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - irq = irq & 1 << INTR_IIR; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); - - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); - irq &= ~(1 << INTR_IER); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); - - atomisp_msi_irq_uninit(isp, dev); - - atomisp_ospm_dphy_down(isp); - - /* Address later when we worry about the ...field chips */ - if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp)) - dev_err(&dev->dev, "Failed to switch off ISP\n"); - return err; -} - -static void atomisp_pci_remove(struct pci_dev *dev) -{ - struct atomisp_device *isp = (struct atomisp_device *) - pci_get_drvdata(dev); - - atomisp_drvfs_exit(); - - atomisp_acc_cleanup(isp); - - atomisp_css_unload_firmware(isp); - hmm_cleanup(); - - pm_runtime_forbid(&dev->dev); - pm_runtime_get_noresume(&dev->dev); - pm_qos_remove_request(&isp->pm_qos); - - atomisp_msi_irq_uninit(isp, dev); - atomisp_unregister_entities(isp); - - destroy_workqueue(isp->wdt_work_queue); - atomisp_file_input_cleanup(isp); - - release_firmware(isp->firmware); - - hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); -} - -static const struct pci_device_id atomisp_pci_tbl[] = { -#if defined(ISP2400) || defined(ISP2400B0) - /* Merrifield */ - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1178)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1179)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x117a)}, - /* Baytrail */ - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, -#elif defined(ISP2401) - /* Anniedale (Merrifield+ / Moorefield) */ - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1478)}, - /* Cherrytrail */ - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, -#endif - {0,} -}; - -MODULE_DEVICE_TABLE(pci, atomisp_pci_tbl); - -static const struct dev_pm_ops atomisp_pm_ops = { - .runtime_suspend = atomisp_runtime_suspend, - .runtime_resume = atomisp_runtime_resume, - .suspend = atomisp_suspend, - .resume = atomisp_resume, -}; - -static struct pci_driver atomisp_pci_driver = { - .driver = { - .pm = &atomisp_pm_ops, - }, - .name = "atomisp-isp2", - .id_table = atomisp_pci_tbl, - .probe = atomisp_pci_probe, - .remove = atomisp_pci_remove, -}; - -module_pci_driver(atomisp_pci_driver); - -MODULE_AUTHOR("Wen Wang "); -MODULE_AUTHOR("Xiaolin Zhang "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel ATOM Platform ISP Driver"); diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h deleted file mode 100644 index 944a6cf40a2f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_V4L2_H__ -#define __ATOMISP_V4L2_H__ - -struct atomisp_video_pipe; -struct atomisp_acc_pipe; -struct v4l2_device; -struct atomisp_device; -struct firmware; - -int atomisp_video_init(struct atomisp_video_pipe *video, const char *name); -void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name); -void atomisp_video_unregister(struct atomisp_video_pipe *video); -int atomisp_video_register(struct atomisp_video_pipe *video, - struct v4l2_device *vdev); -void atomisp_acc_unregister(struct atomisp_acc_pipe *video); -int atomisp_acc_register(struct atomisp_acc_pipe *video, - struct v4l2_device *vdev); -const struct firmware *atomisp_load_firmware(struct atomisp_device *isp); -int atomisp_csi_lane_config(struct atomisp_device *isp); - -#endif /* __ATOMISP_V4L2_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile deleted file mode 100644 index ee5631b0e635..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -ccflags-y += -DISP2400B0 -ISP2400B0 := y diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h deleted file mode 100644 index 914aa7f98700..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_CIRCBUF_H -#define _IA_CSS_CIRCBUF_H - -#include -#include -#include -#include -#include -#include "ia_css_circbuf_comm.h" -#include "ia_css_circbuf_desc.h" - -/**************************************************************** - * - * Data structures. - * - ****************************************************************/ -/** - * @brief Data structure for the circular buffer. - */ -typedef struct ia_css_circbuf_s ia_css_circbuf_t; -struct ia_css_circbuf_s { - ia_css_circbuf_desc_t *desc; /* Pointer to the descriptor of the circbuf */ - ia_css_circbuf_elem_t *elems; /* an array of elements */ -}; - -/** - * @brief Create the circular buffer. - * - * @param cb The pointer to the circular buffer. - * @param elems An array of elements. - * @param desc The descriptor set to the size using ia_css_circbuf_desc_init(). - */ -extern void ia_css_circbuf_create( - ia_css_circbuf_t *cb, - ia_css_circbuf_elem_t *elems, - ia_css_circbuf_desc_t *desc); - -/** - * @brief Destroy the circular buffer. - * - * @param cb The pointer to the circular buffer. - */ -extern void ia_css_circbuf_destroy( - ia_css_circbuf_t *cb); - -/** - * @brief Pop a value out of the circular buffer. - * Get a value at the head of the circular buffer. - * The user should call "ia_css_circbuf_is_empty()" - * to avoid accessing to an empty buffer. - * - * @param cb The pointer to the circular buffer. - * - * @return the pop-out value. - */ -extern uint32_t ia_css_circbuf_pop( - ia_css_circbuf_t *cb); - -/** - * @brief Extract a value out of the circular buffer. - * Get a value at an arbitrary poistion in the circular - * buffer. The user should call "ia_css_circbuf_is_empty()" - * to avoid accessing to an empty buffer. - * - * @param cb The pointer to the circular buffer. - * @param offset The offset from "start" to the target position. - * - * @return the extracted value. - */ -extern uint32_t ia_css_circbuf_extract( - ia_css_circbuf_t *cb, - int offset); - -/**************************************************************** - * - * Inline functions. - * - ****************************************************************/ -/** - * @brief Set the "val" field in the element. - * - * @param elem The pointer to the element. - * @param val The value to be set. - */ -static inline void ia_css_circbuf_elem_set_val( - ia_css_circbuf_elem_t *elem, - uint32_t val) -{ - OP___assert(elem != NULL); - - elem->val = val; -} - -/** - * @brief Initialize the element. - * - * @param elem The pointer to the element. - */ -static inline void ia_css_circbuf_elem_init( - ia_css_circbuf_elem_t *elem) -{ - OP___assert(elem != NULL); - ia_css_circbuf_elem_set_val(elem, 0); -} - -/** - * @brief Copy an element. - * - * @param src The element as the copy source. - * @param dest The element as the copy destination. - */ -static inline void ia_css_circbuf_elem_cpy( - ia_css_circbuf_elem_t *src, - ia_css_circbuf_elem_t *dest) -{ - OP___assert(src != NULL); - OP___assert(dest != NULL); - - ia_css_circbuf_elem_set_val(dest, src->val); -} - -/** - * @brief Get position in the circular buffer. - * - * @param cb The pointer to the circular buffer. - * @param base The base position. - * @param offset The offset. - * - * @return the position at offset. - */ -static inline uint8_t ia_css_circbuf_get_pos_at_offset( - ia_css_circbuf_t *cb, - uint32_t base, - int offset) -{ - uint8_t dest; - - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - OP___assert(cb->desc->size > 0); - - /* step 1: adjudst the offset */ - while (offset < 0) { - offset += cb->desc->size; - } - - /* step 2: shift and round by the upper limit */ - dest = OP_std_modadd(base, offset, cb->desc->size); - - return dest; -} - -/** - * @brief Get the offset between two positions in the circular buffer. - * Get the offset from the source position to the terminal position, - * along the direction in which the new elements come in. - * - * @param cb The pointer to the circular buffer. - * @param src_pos The source position. - * @param dest_pos The terminal position. - * - * @return the offset. - */ -static inline int ia_css_circbuf_get_offset( - ia_css_circbuf_t *cb, - uint32_t src_pos, - uint32_t dest_pos) -{ - int offset; - - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - offset = (int)(dest_pos - src_pos); - offset += (offset < 0) ? cb->desc->size : 0; - - return offset; -} - -/** - * @brief Get the maximum number of elements. - * - * @param cb The pointer to the circular buffer. - * - * @return the maximum number of elements. - * - * TODO: Test this API. - */ -static inline uint32_t ia_css_circbuf_get_size( - ia_css_circbuf_t *cb) -{ - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - return cb->desc->size; -} - -/** - * @brief Get the number of available elements. - * - * @param cb The pointer to the circular buffer. - * - * @return the number of available elements. - */ -static inline uint32_t ia_css_circbuf_get_num_elems( - ia_css_circbuf_t *cb) -{ - int num; - - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - num = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end); - - return (uint32_t)num; -} - -/** - * @brief Test if the circular buffer is empty. - * - * @param cb The pointer to the circular buffer. - * - * @return - * - true when it is empty. - * - false when it is not empty. - */ -static inline bool ia_css_circbuf_is_empty( - ia_css_circbuf_t *cb) -{ - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - return ia_css_circbuf_desc_is_empty(cb->desc); -} - -/** - * @brief Test if the circular buffer is full. - * - * @param cb The pointer to the circular buffer. - * - * @return - * - true when it is full. - * - false when it is not full. - */ -static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb) -{ - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - return ia_css_circbuf_desc_is_full(cb->desc); -} - -/** - * @brief Write a new element into the circular buffer. - * Write a new element WITHOUT checking whether the - * circular buffer is full or not. So it also overwrites - * the oldest element when the buffer is full. - * - * @param cb The pointer to the circular buffer. - * @param elem The new element. - */ -static inline void ia_css_circbuf_write( - ia_css_circbuf_t *cb, - ia_css_circbuf_elem_t elem) -{ - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - /* Cannot continue as the queue is full*/ - assert(!ia_css_circbuf_is_full(cb)); - - ia_css_circbuf_elem_cpy(&elem, &cb->elems[cb->desc->end]); - - cb->desc->end = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, 1); -} - -/** - * @brief Push a value in the circular buffer. - * Put a new value at the tail of the circular buffer. - * The user should call "ia_css_circbuf_is_full()" - * to avoid accessing to a full buffer. - * - * @param cb The pointer to the circular buffer. - * @param val The value to be pushed in. - */ -static inline void ia_css_circbuf_push( - ia_css_circbuf_t *cb, - uint32_t val) -{ - ia_css_circbuf_elem_t elem; - - OP___assert(cb != NULL); - - /* set up an element */ - ia_css_circbuf_elem_init(&elem); - ia_css_circbuf_elem_set_val(&elem, val); - - /* write the element into the buffer */ - ia_css_circbuf_write(cb, elem); -} - -/** - * @brief Get the number of free elements. - * - * @param cb The pointer to the circular buffer. - * - * @return: The number of free elements. - */ -static inline uint32_t ia_css_circbuf_get_free_elems( - ia_css_circbuf_t *cb) -{ - OP___assert(cb != NULL); - OP___assert(cb->desc != NULL); - - return ia_css_circbuf_desc_get_free_elems(cb->desc); -} - -/** - * @brief Peek an element in Circular Buffer. - * - * @param cb The pointer to the circular buffer. - * @param offset Offset to the element. - * - * @return the elements value. - */ -extern uint32_t ia_css_circbuf_peek( - ia_css_circbuf_t *cb, - int offset); - -/** - * @brief Get an element in Circular Buffer. - * - * @param cb The pointer to the circular buffer. - * @param offset Offset to the element. - * - * @return the elements value. - */ -extern uint32_t ia_css_circbuf_peek_from_start( - ia_css_circbuf_t *cb, - int offset); - -/** - * @brief Increase Size of a Circular Buffer. - * Use 'CAUTION' before using this function, This was added to - * support / fix issue with increasing size for tagger only - * - * @param cb The pointer to the circular buffer. - * @param sz_delta delta increase for new size - * @param elems (optional) pointers to new additional elements - * cb element array size will not be increased dynamically, - * but new elements should be added at the end to existing - * cb element array which if of max_size >= new size - * - * @return true on succesfully increasing the size - * false on failure - */ -extern bool ia_css_circbuf_increase_size( - ia_css_circbuf_t *cb, - unsigned int sz_delta, - ia_css_circbuf_elem_t *elems); - -#endif /*_IA_CSS_CIRCBUF_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h deleted file mode 100644 index 3fc0330b9526..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_CIRCBUF_COMM_H -#define _IA_CSS_CIRCBUF_COMM_H - -#include /* uint8_t, uint32_t */ - -#define IA_CSS_CIRCBUF_PADDING 1 /* The circular buffer is implemented in lock-less manner, wherein - * the head and tail can advance independently without any locks. - * But to achieve this, an extra buffer element is required to detect - * queue full & empty conditions, wherein the tail trails the head for - * full and is equal to head for empty condition. This causes 1 buffer - * not being available for use. - */ - -/**************************************************************** - * - * Portable Data structures - * - ****************************************************************/ -/** - * @brief Data structure for the circular descriptor. - */ -typedef struct ia_css_circbuf_desc_s ia_css_circbuf_desc_t; -struct ia_css_circbuf_desc_s { - uint8_t size; /* the maximum number of elements*/ - uint8_t step; /* number of bytes per element */ - uint8_t start; /* index of the oldest element */ - uint8_t end; /* index at which to write the new element */ -}; -#define SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT \ - (4 * sizeof(uint8_t)) - -/** - * @brief Data structure for the circular buffer element. - */ -typedef struct ia_css_circbuf_elem_s ia_css_circbuf_elem_t; -struct ia_css_circbuf_elem_s { - uint32_t val; /* the value stored in the element */ -}; -#define SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT \ - (sizeof(uint32_t)) - -#endif /*_IA_CSS_CIRCBUF_COMM_H*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h deleted file mode 100644 index 8dd7cd6cd3d8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_CIRCBUF_DESC_H_ -#define _IA_CSS_CIRCBUF_DESC_H_ - -#include -#include -#include -#include -#include "ia_css_circbuf_comm.h" -/**************************************************************** - * - * Inline functions. - * - ****************************************************************/ -/** - * @brief Test if the circular buffer is empty. - * - * @param cb_desc The pointer to the circular buffer descriptor. - * - * @return - * - true when it is empty. - * - false when it is not empty. - */ -static inline bool ia_css_circbuf_desc_is_empty( - ia_css_circbuf_desc_t *cb_desc) -{ - OP___assert(cb_desc != NULL); - return (cb_desc->end == cb_desc->start); -} - -/** - * @brief Test if the circular buffer descriptor is full. - * - * @param cb_desc The pointer to the circular buffer - * descriptor. - * - * @return - * - true when it is full. - * - false when it is not full. - */ -static inline bool ia_css_circbuf_desc_is_full( - ia_css_circbuf_desc_t *cb_desc) -{ - OP___assert(cb_desc != NULL); - return (OP_std_modadd(cb_desc->end, 1, cb_desc->size) == cb_desc->start); -} - -/** - * @brief Initialize the circular buffer descriptor - * - * @param cb_desc The pointer circular buffer descriptor - * @param size The size of the circular buffer - */ -static inline void ia_css_circbuf_desc_init( - ia_css_circbuf_desc_t *cb_desc, - int8_t size) -{ - OP___assert(cb_desc != NULL); - cb_desc->size = size; -} - -/** - * @brief Get a position in the circular buffer descriptor. - * - * @param cb The pointer to the circular buffer descriptor. - * @param base The base position. - * @param offset The offset. - * - * @return the position in the circular buffer descriptor. - */ -static inline uint8_t ia_css_circbuf_desc_get_pos_at_offset( - ia_css_circbuf_desc_t *cb_desc, - uint32_t base, - int offset) -{ - uint8_t dest; - OP___assert(cb_desc != NULL); - OP___assert(cb_desc->size > 0); - - /* step 1: adjust the offset */ - while (offset < 0) { - offset += cb_desc->size; - } - - /* step 2: shift and round by the upper limit */ - dest = OP_std_modadd(base, offset, cb_desc->size); - - return dest; -} - -/** - * @brief Get the offset between two positions in the circular buffer - * descriptor. - * Get the offset from the source position to the terminal position, - * along the direction in which the new elements come in. - * - * @param cb_desc The pointer to the circular buffer descriptor. - * @param src_pos The source position. - * @param dest_pos The terminal position. - * - * @return the offset. - */ -static inline int ia_css_circbuf_desc_get_offset( - ia_css_circbuf_desc_t *cb_desc, - uint32_t src_pos, - uint32_t dest_pos) -{ - int offset; - OP___assert(cb_desc != NULL); - - offset = (int)(dest_pos - src_pos); - offset += (offset < 0) ? cb_desc->size : 0; - - return offset; -} - -/** - * @brief Get the number of available elements. - * - * @param cb_desc The pointer to the circular buffer. - * - * @return The number of available elements. - */ -static inline uint32_t ia_css_circbuf_desc_get_num_elems( - ia_css_circbuf_desc_t *cb_desc) -{ - int num; - OP___assert(cb_desc != NULL); - - num = ia_css_circbuf_desc_get_offset(cb_desc, - cb_desc->start, - cb_desc->end); - - return (uint32_t)num; -} - -/** - * @brief Get the number of free elements. - * - * @param cb_desc The pointer to the circular buffer descriptor. - * - * @return: The number of free elements. - */ -static inline uint32_t ia_css_circbuf_desc_get_free_elems( - ia_css_circbuf_desc_t *cb_desc) -{ - uint32_t num; - OP___assert(cb_desc != NULL); - - num = ia_css_circbuf_desc_get_offset(cb_desc, - cb_desc->start, - cb_desc->end); - - return (cb_desc->size - num); -} -#endif /*_IA_CSS_CIRCBUF_DESC_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c deleted file mode 100644 index 050d60f0894f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_circbuf.h" - -#include - -/********************************************************************** - * - * Forward declarations. - * - **********************************************************************/ -/* - * @brief Read the oldest element from the circular buffer. - * Read the oldest element WITHOUT checking whehter the - * circular buffer is empty or not. The oldest element is - * also removed out from the circular buffer. - * - * @param cb The pointer to the circular buffer. - * - * @return the oldest element. - */ -static inline ia_css_circbuf_elem_t -ia_css_circbuf_read(ia_css_circbuf_t *cb); - -/* - * @brief Shift a chunk of elements in the circular buffer. - * A chunk of elements (i.e. the ones from the "start" position - * to the "chunk_src" position) are shifted in the circular buffer, - * along the direction of new elements coming. - * - * @param cb The pointer to the circular buffer. - * @param chunk_src The position at which the first element in the chunk is. - * @param chunk_dest The position to which the first element in the chunk would be shift. - */ -static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb, - uint32_t chunk_src, - uint32_t chunk_dest); - -/* - * @brief Get the "val" field in the element. - * - * @param elem The pointer to the element. - * - * @return the "val" field. - */ -static inline uint32_t -ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem); - -/********************************************************************** - * - * Non-inline functions. - * - **********************************************************************/ -/* - * @brief Create the circular buffer. - * Refer to "ia_css_circbuf.h" for details. - */ -void -ia_css_circbuf_create(ia_css_circbuf_t *cb, - ia_css_circbuf_elem_t *elems, - ia_css_circbuf_desc_t *desc) -{ - uint32_t i; - - OP___assert(desc); - - cb->desc = desc; - /* Initialize to defaults */ - cb->desc->start = 0; - cb->desc->end = 0; - cb->desc->step = 0; - - for (i = 0; i < cb->desc->size; i++) - ia_css_circbuf_elem_init(&elems[i]); - - cb->elems = elems; -} - -/* - * @brief Destroy the circular buffer. - * Refer to "ia_css_circbuf.h" for details. - */ -void ia_css_circbuf_destroy(ia_css_circbuf_t *cb) -{ - cb->desc = NULL; - - cb->elems = NULL; -} - -/* - * @brief Pop a value out of the circular buffer. - * Refer to "ia_css_circbuf.h" for details. - */ -uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb) -{ - uint32_t ret; - ia_css_circbuf_elem_t elem; - - assert(!ia_css_circbuf_is_empty(cb)); - - /* read an element from the buffer */ - elem = ia_css_circbuf_read(cb); - ret = ia_css_circbuf_elem_get_val(&elem); - return ret; -} - -/* - * @brief Extract a value out of the circular buffer. - * Refer to "ia_css_circbuf.h" for details. - */ -uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset) -{ - int max_offset; - uint32_t val; - uint32_t pos; - uint32_t src_pos; - uint32_t dest_pos; - - /* get the maximum offest */ - max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end); - max_offset--; - - /* - * Step 1: When the target element is at the "start" position. - */ - if (offset == 0) { - val = ia_css_circbuf_pop(cb); - return val; - } - - /* - * Step 2: When the target element is out of the range. - */ - if (offset > max_offset) { - val = 0; - return val; - } - - /* - * Step 3: When the target element is between the "start" and - * "end" position. - */ - /* get the position of the target element */ - pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset); - - /* get the value from the target element */ - val = ia_css_circbuf_elem_get_val(&cb->elems[pos]); - - /* shift the elements */ - src_pos = ia_css_circbuf_get_pos_at_offset(cb, pos, -1); - dest_pos = pos; - ia_css_circbuf_shift_chunk(cb, src_pos, dest_pos); - - return val; -} - -/* - * @brief Peek an element from the circular buffer. - * Refer to "ia_css_circbuf.h" for details. - */ -uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset) -{ - int pos; - - pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, offset); - - /* get the value at the position */ - return cb->elems[pos].val; -} - -/* - * @brief Get the value of an element from the circular buffer. - * Refer to "ia_css_circbuf.h" for details. - */ -uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset) -{ - int pos; - - pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset); - - /* get the value at the position */ - return cb->elems[pos].val; -} - -/* @brief increase size of a circular buffer. - * Use 'CAUTION' before using this function. This was added to - * support / fix issue with increasing size for tagger only - * Please refer to "ia_css_circbuf.h" for details. - */ -bool ia_css_circbuf_increase_size( - ia_css_circbuf_t *cb, - unsigned int sz_delta, - ia_css_circbuf_elem_t *elems) -{ - uint8_t curr_size; - uint8_t curr_end; - unsigned int i = 0; - - if (!cb || sz_delta == 0) - return false; - - curr_size = cb->desc->size; - curr_end = cb->desc->end; - /* We assume cb was pre defined as global to allow - * increase in size */ - /* FM: are we sure this cannot cause size to become too big? */ - if (((uint8_t)(cb->desc->size + (uint8_t)sz_delta) > cb->desc->size) && ((uint8_t)sz_delta == sz_delta)) - cb->desc->size += (uint8_t)sz_delta; - else - return false; /* overflow in size */ - - /* If elems are passed update them else we assume its been taken - * care before calling this function */ - if (elems) { - /* cb element array size will not be increased dynamically, - * but pointers to new elements can be added at the end - * of existing pre defined cb element array of - * size >= new size if not already added */ - for (i = curr_size; i < cb->desc->size; i++) - cb->elems[i] = elems[i - curr_size]; - } - /* Fix Start / End */ - if (curr_end < cb->desc->start) { - if (curr_end == 0) { - /* Easily fix End */ - cb->desc->end = curr_size; - } else { - /* Move elements and fix Start*/ - ia_css_circbuf_shift_chunk(cb, - curr_size - 1, - curr_size + sz_delta - 1); - } - } - - return true; -} - -/**************************************************************** - * - * Inline functions. - * - ****************************************************************/ -/* - * @brief Get the "val" field in the element. - * Refer to "Forward declarations" for details. - */ -static inline uint32_t -ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem) -{ - return elem->val; -} - -/* - * @brief Read the oldest element from the circular buffer. - * Refer to "Forward declarations" for details. - */ -static inline ia_css_circbuf_elem_t -ia_css_circbuf_read(ia_css_circbuf_t *cb) -{ - ia_css_circbuf_elem_t elem; - - /* get the element from the target position */ - elem = cb->elems[cb->desc->start]; - - /* clear the target position */ - ia_css_circbuf_elem_init(&cb->elems[cb->desc->start]); - - /* adjust the "start" position */ - cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, 1); - return elem; -} - -/* - * @brief Shift a chunk of elements in the circular buffer. - * Refer to "Forward declarations" for details. - */ -static inline void -ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb, - uint32_t chunk_src, uint32_t chunk_dest) -{ - int chunk_offset; - int chunk_sz; - int i; - - /* get the chunk offset and size */ - chunk_offset = ia_css_circbuf_get_offset(cb, - chunk_src, chunk_dest); - chunk_sz = ia_css_circbuf_get_offset(cb, cb->desc->start, chunk_src) + 1; - - /* shift each element to its terminal position */ - for (i = 0; i < chunk_sz; i++) { - - /* copy the element from the source to the destination */ - ia_css_circbuf_elem_cpy(&cb->elems[chunk_src], - &cb->elems[chunk_dest]); - - /* clear the source position */ - ia_css_circbuf_elem_init(&cb->elems[chunk_src]); - - /* adjust the source/terminal positions */ - chunk_src = ia_css_circbuf_get_pos_at_offset(cb, chunk_src, -1); - chunk_dest = ia_css_circbuf_get_pos_at_offset(cb, chunk_dest, -1); - - } - - /* adjust the index "start" */ - cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, chunk_offset); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h deleted file mode 100644 index 20db4de6beeb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_REFCOUNT_H_ -#define _IA_CSS_REFCOUNT_H_ - -#include -#include -#include - -typedef void (*clear_func)(hrt_vaddress ptr); - -/*! \brief Function for initializing refcount list - * - * \param[in] size Size of the refcount list. - * \return ia_css_err - */ -extern enum ia_css_err ia_css_refcount_init(uint32_t size); - -/*! \brief Function for de-initializing refcount list - * - * \return None - */ -extern void ia_css_refcount_uninit(void); - -/*! \brief Function for increasing reference by 1. - * - * \param[in] id ID of the object. - * \param[in] ptr Data of the object (ptr). - * \return hrt_vaddress (saved address) - */ -extern hrt_vaddress ia_css_refcount_increment(int32_t id, hrt_vaddress ptr); - -/*! \brief Function for decrease reference by 1. - * - * \param[in] id ID of the object. - * \param[in] ptr Data of the object (ptr). - * - * - true, if it is successful. - * - false, otherwise. - */ -extern bool ia_css_refcount_decrement(int32_t id, hrt_vaddress ptr); - -/*! \brief Function to check if reference count is 1. - * - * \param[in] ptr Data of the object (ptr). - * - * - true, if it is successful. - * - false, otherwise. - */ -extern bool ia_css_refcount_is_single(hrt_vaddress ptr); - -/*! \brief Function to clear reference list objects. - * - * \param[in] id ID of the object. - * \param[in] clear_func function to be run to free reference objects. - * - * return None - */ -extern void ia_css_refcount_clear(int32_t id, - clear_func clear_func_ptr); - -/*! \brief Function to verify if object is valid - * - * \param[in] ptr Data of the object (ptr) - * - * - true, if valid - * - false, if invalid - */ -extern bool ia_css_refcount_is_valid(hrt_vaddress ptr); - -#endif /* _IA_CSS_REFCOUNT_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c deleted file mode 100644 index 6e3bd773ee4c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_refcount.h" -#include "memory_access/memory_access.h" -#include "sh_css_defs.h" - -#include "platform_support.h" - -#include "assert_support.h" - -#include "ia_css_debug.h" - -/* TODO: enable for other memory aswell - now only for hrt_vaddress */ -struct ia_css_refcount_entry { - uint32_t count; - hrt_vaddress data; - int32_t id; -}; - -struct ia_css_refcount_list { - uint32_t size; - struct ia_css_refcount_entry *items; -}; - -static struct ia_css_refcount_list myrefcount; - -static struct ia_css_refcount_entry *refcount_find_entry(hrt_vaddress ptr, - bool firstfree) -{ - uint32_t i; - - if (ptr == 0) - return NULL; - if (myrefcount.items == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "refcount_find_entry(): Ref count not initiliazed!\n"); - return NULL; - } - - for (i = 0; i < myrefcount.size; i++) { - - if ((&myrefcount.items[i])->data == 0) { - if (firstfree) { - /* for new entry */ - return &myrefcount.items[i]; - } - } - if ((&myrefcount.items[i])->data == ptr) { - /* found entry */ - return &myrefcount.items[i]; - } - } - return NULL; -} - -enum ia_css_err ia_css_refcount_init(uint32_t size) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - if (size == 0) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_init(): Size of 0 for Ref count init!\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - if (myrefcount.items != NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_init(): Ref count is already initialized\n"); - return IA_CSS_ERR_INTERNAL_ERROR; - } - myrefcount.items = - sh_css_malloc(sizeof(struct ia_css_refcount_entry) * size); - if (!myrefcount.items) - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - if (err == IA_CSS_SUCCESS) { - memset(myrefcount.items, 0, - sizeof(struct ia_css_refcount_entry) * size); - myrefcount.size = size; - } - return err; -} - -void ia_css_refcount_uninit(void) -{ - struct ia_css_refcount_entry *entry; - uint32_t i; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_uninit() entry\n"); - for (i = 0; i < myrefcount.size; i++) { - /* driver verifier tool has issues with &arr[i] - and prefers arr + i; as these are actually equivalent - the line below uses + i - */ - entry = myrefcount.items + i; - if (entry->data != mmgr_NULL) { - /* ia_css_debug_dtrace(IA_CSS_DBG_TRACE, - "ia_css_refcount_uninit: freeing (%x)\n", - entry->data);*/ - hmm_free(entry->data); - entry->data = mmgr_NULL; - entry->count = 0; - entry->id = 0; - } - } - sh_css_free(myrefcount.items); - myrefcount.items = NULL; - myrefcount.size = 0; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_uninit() leave\n"); -} - -hrt_vaddress ia_css_refcount_increment(int32_t id, hrt_vaddress ptr) -{ - struct ia_css_refcount_entry *entry; - - if (ptr == mmgr_NULL) - return ptr; - - entry = refcount_find_entry(ptr, false); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_increment(%x) 0x%x\n", id, ptr); - - if (!entry) { - entry = refcount_find_entry(ptr, true); - assert(entry != NULL); - if (entry == NULL) - return mmgr_NULL; - entry->id = id; - } - - if (entry->id != id) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_refcount_increment(): Ref count IDS do not match!\n"); - return mmgr_NULL; - } - - if (entry->data == ptr) - entry->count += 1; - else if (entry->data == mmgr_NULL) { - entry->data = ptr; - entry->count = 1; - } else - return mmgr_NULL; - - return ptr; -} - -bool ia_css_refcount_decrement(int32_t id, hrt_vaddress ptr) -{ - struct ia_css_refcount_entry *entry; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr); - - if (ptr == mmgr_NULL) - return false; - - entry = refcount_find_entry(ptr, false); - - if (entry) { - if (entry->id != id) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_refcount_decrement(): Ref count IDS do not match!\n"); - return false; - } - if (entry->count > 0) { - entry->count -= 1; - if (entry->count == 0) { - /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE, - "ia_css_refcount_decrement: freeing\n");*/ - hmm_free(ptr); - entry->data = mmgr_NULL; - entry->id = 0; - } - return true; - } - } - - /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not - valid anymore */ - if (entry) - IA_CSS_ERROR("id %x, ptr 0x%x entry %p entry->id %x entry->count %d\n", - id, ptr, entry, entry->id, entry->count); - else - IA_CSS_ERROR("entry NULL\n"); -#ifdef ISP2401 - assert(false); -#endif - - return false; -} - -bool ia_css_refcount_is_single(hrt_vaddress ptr) -{ - struct ia_css_refcount_entry *entry; - - if (ptr == mmgr_NULL) - return false; - - entry = refcount_find_entry(ptr, false); - - if (entry) - return (entry->count == 1); - - return true; -} - -void ia_css_refcount_clear(int32_t id, clear_func clear_func_ptr) -{ - struct ia_css_refcount_entry *entry; - uint32_t i; - uint32_t count = 0; - - assert(clear_func_ptr != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n", - id); - - for (i = 0; i < myrefcount.size; i++) { - /* driver verifier tool has issues with &arr[i] - and prefers arr + i; as these are actually equivalent - the line below uses + i - */ - entry = myrefcount.items + i; - if ((entry->data != mmgr_NULL) && (entry->id == id)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_clear:" - " %x: 0x%x\n", id, entry->data); - if (clear_func_ptr) { - /* clear using provided function */ - clear_func_ptr(entry->data); - } else { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_clear: " - "using hmm_free: " - "no clear_func\n"); - hmm_free(entry->data); - } -#ifndef ISP2401 - -#else - assert(entry->count == 0); -#endif - if (entry->count != 0) { - IA_CSS_WARNING("Ref count for entry %x is not zero!", entry->id); - } - entry->data = mmgr_NULL; - entry->count = 0; - entry->id = 0; - count++; - } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_clear(%x): cleared %d\n", id, - count); -} - -bool ia_css_refcount_is_valid(hrt_vaddress ptr) -{ - struct ia_css_refcount_entry *entry; - - if (ptr == mmgr_NULL) - return false; - - entry = refcount_find_entry(ptr, false); - - return entry != NULL; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h deleted file mode 100644 index a6d650a9a1f4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PIPE_BINARYDESC_H__ -#define __IA_CSS_PIPE_BINARYDESC_H__ - -#include /* ia_css_pipe */ -#include /* ia_css_frame_info */ -#include /* ia_css_binary_descr */ - -/* @brief Get a binary descriptor for copy. - * - * @param[in] pipe - * @param[out] copy_desc - * @param[in/out] in_info - * @param[in/out] out_info - * @param[in/out] vf_info - * @return None - * - */ -extern void ia_css_pipe_get_copy_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *copy_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info); - -/* @brief Get a binary descriptor for vfpp. - * - * @param[in] pipe - * @param[out] vfpp_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @return None - * - */ -extern void ia_css_pipe_get_vfpp_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *vf_pp_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Get numerator and denominator of bayer downscaling factor. - * - * @param[in] bds_factor: The bayer downscaling factor. - * (= The bds_factor member in the sh_css_bds_factor structure.) - * @param[out] bds_factor_numerator: The numerator of the bayer downscaling factor. - * (= The numerator member in the sh_css_bds_factor structure.) - * @param[out] bds_factor_denominator: The denominator of the bayer downscaling factor. - * (= The denominator member in the sh_css_bds_factor structure.) - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err sh_css_bds_factor_get_numerator_denominator( - unsigned int bds_factor, - unsigned int *bds_factor_numerator, - unsigned int *bds_factor_denominator); - -/* @brief Get a binary descriptor for preview stage. - * - * @param[in] pipe - * @param[out] preview_descr - * @param[in/out] in_info - * @param[in/out] bds_out_info - * @param[in/out] out_info - * @param[in/out] vf_info - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err ia_css_pipe_get_preview_binarydesc( - struct ia_css_pipe * const pipe, - struct ia_css_binary_descr *preview_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *bds_out_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info); - -/* @brief Get a binary descriptor for video stage. - * - * @param[in/out] pipe - * @param[out] video_descr - * @param[in/out] in_info - * @param[in/out] bds_out_info - * @param[in/out] vf_info - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err ia_css_pipe_get_video_binarydesc( - struct ia_css_pipe * const pipe, - struct ia_css_binary_descr *video_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *bds_out_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info, - int stream_config_left_padding); - -/* @brief Get a binary descriptor for yuv scaler stage. - * - * @param[in/out] pipe - * @param[out] yuv_scaler_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @param[in/out] internal_out_info - * @param[in/out] vf_info - * @return None - * - */ -void ia_css_pipe_get_yuvscaler_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *yuv_scaler_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *internal_out_info, - struct ia_css_frame_info *vf_info); - -/* @brief Get a binary descriptor for capture pp stage. - * - * @param[in/out] pipe - * @param[out] capture_pp_descr - * @param[in/out] in_info - * @param[in/out] vf_info - * @return None - * - */ -extern void ia_css_pipe_get_capturepp_binarydesc( - struct ia_css_pipe * const pipe, - struct ia_css_binary_descr *capture_pp_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info); - -/* @brief Get a binary descriptor for primary capture. - * - * @param[in] pipe - * @param[out] prim_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @param[in/out] vf_info - * @return None - * - */ -extern void ia_css_pipe_get_primary_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *prim_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info, - unsigned int stage_idx); - -/* @brief Get a binary descriptor for pre gdc stage. - * - * @param[in] pipe - * @param[out] pre_gdc_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @return None - * - */ -extern void ia_css_pipe_get_pre_gdc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *gdc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Get a binary descriptor for gdc stage. - * - * @param[in] pipe - * @param[out] gdc_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @return None - * - */ -extern void ia_css_pipe_get_gdc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *gdc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Get a binary descriptor for post gdc. - * - * @param[in] pipe - * @param[out] post_gdc_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @param[in/out] vf_info - * @return None - * - */ -extern void ia_css_pipe_get_post_gdc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *post_gdc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info); - -/* @brief Get a binary descriptor for de. - * - * @param[in] pipe - * @param[out] pre_de_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @return None - * - */ -extern void ia_css_pipe_get_pre_de_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *pre_de_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Get a binary descriptor for pre anr stage. - * - * @param[in] pipe - * @param[out] pre_anr_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @return None - * - */ -extern void ia_css_pipe_get_pre_anr_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *pre_anr_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Get a binary descriptor for ANR stage. - * - * @param[in] pipe - * @param[out] anr_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @return None - * - */ -extern void ia_css_pipe_get_anr_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *anr_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Get a binary descriptor for post anr stage. - * - * @param[in] pipe - * @param[out] post_anr_descr - * @param[in/out] in_info - * @param[in/out] out_info - * @param[in/out] vf_info - * @return None - * - */ -extern void ia_css_pipe_get_post_anr_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *post_anr_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info); - -/* @brief Get a binary descriptor for ldc stage. - * - * @param[in/out] pipe - * @param[out] capture_pp_descr - * @param[in/out] in_info - * @param[in/out] vf_info - * @return None - * - */ -extern void ia_css_pipe_get_ldc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *ldc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info); - -/* @brief Calculates the required BDS factor - * - * @param[in] input_res - * @param[in] output_res - * @param[in/out] bds_factor - * @return IA_CSS_SUCCESS or error code upon error. - */ -enum ia_css_err binarydesc_calculate_bds_factor( - struct ia_css_resolution input_res, - struct ia_css_resolution output_res, - unsigned int *bds_factor); - -#endif /* __IA_CSS_PIPE_BINARYDESC_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h deleted file mode 100644 index 38690ea093c2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PIPE_STAGEDESC_H__ -#define __IA_CSS_PIPE_STAGEDESC_H__ - -#include /* ia_css_fw_info */ -#include -#include -#include "ia_css_pipeline.h" -#include "ia_css_pipeline_common.h" - -extern void ia_css_pipe_get_generic_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_binary *binary, - struct ia_css_frame *out_frame[], - struct ia_css_frame *in_frame, - struct ia_css_frame *vf_frame); - -extern void ia_css_pipe_get_firmwares_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_binary *binary, - struct ia_css_frame *out_frame[], - struct ia_css_frame *in_frame, - struct ia_css_frame *vf_frame, - const struct ia_css_fw_info *fw, - unsigned int mode); - -extern void ia_css_pipe_get_acc_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_binary *binary, - struct ia_css_fw_info *fw); - -extern void ia_css_pipe_get_sp_func_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_frame *out_frame, - enum ia_css_pipeline_stage_sp_func sp_func, - unsigned max_input_width); - -#endif /*__IA_CSS_PIPE_STAGEDESC__H__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h deleted file mode 100644 index 155b6fb4722b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PIPE_UTIL_H__ -#define __IA_CSS_PIPE_UTIL_H__ - -#include -#include - -/* @brief Get Input format bits per pixel based on stream configuration of this - * pipe. - * - * @param[in] pipe - * @return bits per pixel for the underlying stream - * - */ -extern unsigned int ia_css_pipe_util_pipe_input_format_bpp( - const struct ia_css_pipe * const pipe); - -extern void ia_css_pipe_util_create_output_frames( - struct ia_css_frame *frames[]); - -extern void ia_css_pipe_util_set_output_frames( - struct ia_css_frame *frames[], - unsigned int idx, - struct ia_css_frame *frame); - -#endif /* __IA_CSS_PIPE_UTIL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c deleted file mode 100644 index 98a2a3e9b3e6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c +++ /dev/null @@ -1,880 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_pipe_binarydesc.h" -#include "ia_css_frame_format.h" -#include "ia_css_pipe.h" -#include "ia_css_pipe_util.h" -#include "ia_css_util.h" -#include "ia_css_debug.h" -#include "sh_css_params.h" -#include -/* HRT_GDC_N */ -#include "gdc_device.h" -#include - -/* This module provides a binary descriptions to used to find a binary. Since, - * every stage is associated with a binary, it implicity helps stage - * description. Apart from providing a binary description, this module also - * populates the frame info's when required.*/ - -/* Generic descriptor for offline binaries. Internal function. */ -static void pipe_binarydesc_get_offline( - struct ia_css_pipe const * const pipe, - const int mode, - struct ia_css_binary_descr *descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info[], - struct ia_css_frame_info *vf_info) -{ - unsigned int i; - /* in_info, out_info, vf_info can be NULL */ - assert(pipe != NULL); - assert(descr != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "pipe_binarydesc_get_offline() enter:\n"); - - descr->mode = mode; - descr->online = false; - descr->continuous = pipe->stream->config.continuous; - descr->striped = false; - descr->two_ppc = false; - descr->enable_yuv_ds = false; - descr->enable_high_speed = false; - descr->enable_dvs_6axis = false; - descr->enable_reduced_pipe = false; - descr->enable_dz = true; - descr->enable_xnr = false; - descr->enable_dpc = false; -#ifdef ISP2401 - descr->enable_luma_only = false; - descr->enable_tnr = false; -#endif - descr->enable_capture_pp_bli = false; - descr->enable_fractional_ds = false; - descr->dvs_env.width = 0; - descr->dvs_env.height = 0; - descr->stream_format = pipe->stream->config.input_config.format; - descr->in_info = in_info; - descr->bds_out_info = NULL; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - descr->out_info[i] = out_info[i]; - descr->vf_info = vf_info; - descr->isp_pipe_version = pipe->config.isp_pipe_version; - descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00; - descr->stream_config_left_padding = -1; -} - -void ia_css_pipe_get_copy_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *copy_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info) -{ - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - unsigned int i; - /* out_info can be NULL */ - assert(pipe != NULL); - assert(in_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_COPY, - copy_descr, in_info, out_infos, vf_info); - copy_descr->online = true; - copy_descr->continuous = false; - copy_descr->two_ppc = (pipe->stream->config.pixels_per_clock == 2); - copy_descr->enable_dz = false; - copy_descr->isp_pipe_version = IA_CSS_PIPE_VERSION_1; - IA_CSS_LEAVE_PRIVATE(""); -} -void ia_css_pipe_get_vfpp_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *vf_pp_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - unsigned int i; - /* out_info can be NULL ??? */ - assert(pipe != NULL); - assert(in_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - in_info->raw_bit_depth = 0; - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_VF_PP, - vf_pp_descr, in_info, out_infos, NULL); - vf_pp_descr->enable_fractional_ds = true; - IA_CSS_LEAVE_PRIVATE(""); -} - -static struct sh_css_bds_factor bds_factors_list[] = { - {1, 1, SH_CSS_BDS_FACTOR_1_00}, - {5, 4, SH_CSS_BDS_FACTOR_1_25}, - {3, 2, SH_CSS_BDS_FACTOR_1_50}, - {2, 1, SH_CSS_BDS_FACTOR_2_00}, - {9, 4, SH_CSS_BDS_FACTOR_2_25}, - {5, 2, SH_CSS_BDS_FACTOR_2_50}, - {3, 1, SH_CSS_BDS_FACTOR_3_00}, - {4, 1, SH_CSS_BDS_FACTOR_4_00}, - {9, 2, SH_CSS_BDS_FACTOR_4_50}, - {5, 1, SH_CSS_BDS_FACTOR_5_00}, - {6, 1, SH_CSS_BDS_FACTOR_6_00}, - {8, 1, SH_CSS_BDS_FACTOR_8_00} -}; - -enum ia_css_err sh_css_bds_factor_get_numerator_denominator( - unsigned int bds_factor, - unsigned int *bds_factor_numerator, - unsigned int *bds_factor_denominator) -{ - unsigned int i; - - /* Loop over all bds factors until a match is found */ - for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) { - if (bds_factors_list[i].bds_factor == bds_factor) { - *bds_factor_numerator = bds_factors_list[i].numerator; - *bds_factor_denominator = bds_factors_list[i].denominator; - return IA_CSS_SUCCESS; - } - } - - /* Throw an error since bds_factor cannot be found - in bds_factors_list */ - return IA_CSS_ERR_INVALID_ARGUMENTS; -} - -enum ia_css_err binarydesc_calculate_bds_factor( - struct ia_css_resolution input_res, - struct ia_css_resolution output_res, - unsigned int *bds_factor) -{ - unsigned int i; - unsigned int in_w = input_res.width, - in_h = input_res.height, - out_w = output_res.width, out_h = output_res.height; - - unsigned int max_bds_factor = 8; - unsigned int max_rounding_margin = 2; - /* delta in pixels to account for rounding margin in the calculation */ - unsigned int delta = max_bds_factor * max_rounding_margin; - - /* Assert if the resolutions are not set */ - assert(in_w != 0 && in_h != 0); - assert(out_w != 0 && out_h != 0); - - /* Loop over all bds factors until a match is found */ - for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) { - unsigned num = bds_factors_list[i].numerator; - unsigned den = bds_factors_list[i].denominator; - - /* See width-wise and height-wise if this bds_factor - * satisfies the condition */ - bool cond = (out_w * num / den + delta > in_w) && - (out_w * num / den <= in_w) && - (out_h * num / den + delta > in_h) && - (out_h * num / den <= in_h); - - if (cond) { - *bds_factor = bds_factors_list[i].bds_factor; - return IA_CSS_SUCCESS; - } - } - - /* Throw an error since a suitable bds_factor cannot be found */ - return IA_CSS_ERR_INVALID_ARGUMENTS; -} - -enum ia_css_err ia_css_pipe_get_preview_binarydesc( - struct ia_css_pipe * const pipe, - struct ia_css_binary_descr *preview_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *bds_out_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info) -{ - enum ia_css_err err; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - int mode = IA_CSS_BINARY_MODE_PREVIEW; - unsigned int i; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - assert(vf_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - /* - * Set up the info of the input frame with - * the ISP required resolution - */ - in_info->res = pipe->config.input_effective_res; - in_info->padded_width = in_info->res.width; - in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); - - if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format)) - mode = IA_CSS_BINARY_MODE_COPY; - else - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, mode, - preview_descr, in_info, out_infos, vf_info); - if (pipe->stream->config.online) { - preview_descr->online = pipe->stream->config.online; - preview_descr->two_ppc = - (pipe->stream->config.pixels_per_clock == 2); - } - preview_descr->stream_format = pipe->stream->config.input_config.format; - - /* TODO: Remove this when bds_out_info is available! */ - *bds_out_info = *in_info; - - if (pipe->extra_config.enable_raw_binning) { - if (pipe->config.bayer_ds_out_res.width != 0 && - pipe->config.bayer_ds_out_res.height != 0) { - bds_out_info->res.width = - pipe->config.bayer_ds_out_res.width; - bds_out_info->res.height = - pipe->config.bayer_ds_out_res.height; - bds_out_info->padded_width = - pipe->config.bayer_ds_out_res.width; - err = - binarydesc_calculate_bds_factor(in_info->res, - bds_out_info->res, - &preview_descr->required_bds_factor); - if (err != IA_CSS_SUCCESS) - return err; - } else { - bds_out_info->res.width = in_info->res.width / 2; - bds_out_info->res.height = in_info->res.height / 2; - bds_out_info->padded_width = in_info->padded_width / 2; - preview_descr->required_bds_factor = - SH_CSS_BDS_FACTOR_2_00; - } - } else { - /* TODO: Remove this when bds_out_info->is available! */ - bds_out_info->res.width = in_info->res.width; - bds_out_info->res.height = in_info->res.height; - bds_out_info->padded_width = in_info->padded_width; - preview_descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00; - } - pipe->required_bds_factor = preview_descr->required_bds_factor; - - /* bayer ds and fractional ds cannot be enabled at the same time, - so we disable bds_out_info when fractional ds is used */ - if (!pipe->extra_config.enable_fractional_ds) - preview_descr->bds_out_info = bds_out_info; - else - preview_descr->bds_out_info = NULL; - /* - ----Preview binary----- - --in-->|--out->|vf_veceven|--|--->vf - ----------------------- - * Preview binary normally doesn't have a vf_port but - * instead it has an output port. However, the output is - * generated by vf_veceven module in which we might have - * a downscaling (by 1x, 2x, or 4x). Because the resolution - * might change, we need two different info, namely out_info - * & vf_info. In fill_binary_info we use out&vf info to - * calculate vf decimation factor. - */ - *out_info = *vf_info; - - /* In case of preview_ds binary, we can do any fractional amount - * of downscale, so there is no DS needed in vf_veceven. Therefore, - * out and vf infos will be the same. Otherwise, we set out resolution - * equal to in resolution. */ - if (!pipe->extra_config.enable_fractional_ds) { - /* TODO: Change this when bds_out_info is available! */ - out_info->res.width = bds_out_info->res.width; - out_info->res.height = bds_out_info->res.height; - out_info->padded_width = bds_out_info->padded_width; - } - preview_descr->enable_fractional_ds = - pipe->extra_config.enable_fractional_ds; - - preview_descr->enable_dpc = pipe->config.enable_dpc; - - preview_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -enum ia_css_err ia_css_pipe_get_video_binarydesc( - struct ia_css_pipe * const pipe, - struct ia_css_binary_descr *video_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *bds_out_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info, - int stream_config_left_padding) -{ - int mode = IA_CSS_BINARY_MODE_VIDEO; - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - enum ia_css_err err = IA_CSS_SUCCESS; - bool stream_dz_config = false; - - /* vf_info can be NULL */ - assert(pipe != NULL); - assert(in_info != NULL); - /* assert(vf_info != NULL); */ - IA_CSS_ENTER_PRIVATE(""); - - /* The solution below is not optimal; we should move to using ia_css_pipe_get_copy_binarydesc() - * But for now this fixes things; this code used to be there but was removed - * with gerrit 8908 as this was wrong for Skycam; however 240x still needs this - */ - if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format)) - mode = IA_CSS_BINARY_MODE_COPY; - - in_info->res = pipe->config.input_effective_res; - in_info->padded_width = in_info->res.width; - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, mode, - video_descr, in_info, out_infos, vf_info); - - if (pipe->stream->config.online) { - video_descr->online = pipe->stream->config.online; - video_descr->two_ppc = - (pipe->stream->config.pixels_per_clock == 2); - } - - if (mode == IA_CSS_BINARY_MODE_VIDEO) { - stream_dz_config = - ((pipe->stream->isp_params_configs->dz_config.dx != - HRT_GDC_N) - || (pipe->stream->isp_params_configs->dz_config.dy != - HRT_GDC_N)); - - video_descr->enable_dz = pipe->config.enable_dz - || stream_dz_config; - video_descr->dvs_env = pipe->config.dvs_envelope; - video_descr->enable_yuv_ds = pipe->extra_config.enable_yuv_ds; - video_descr->enable_high_speed = - pipe->extra_config.enable_high_speed; - video_descr->enable_dvs_6axis = - pipe->extra_config.enable_dvs_6axis; - video_descr->enable_reduced_pipe = - pipe->extra_config.enable_reduced_pipe; - video_descr->isp_pipe_version = pipe->config.isp_pipe_version; - video_descr->enable_fractional_ds = - pipe->extra_config.enable_fractional_ds; - video_descr->enable_dpc = - pipe->config.enable_dpc; -#ifdef ISP2401 - video_descr->enable_luma_only = - pipe->config.enable_luma_only; - video_descr->enable_tnr = - pipe->config.enable_tnr; -#endif - - if (pipe->extra_config.enable_raw_binning) { - if (pipe->config.bayer_ds_out_res.width != 0 && - pipe->config.bayer_ds_out_res.height != 0) { - bds_out_info->res.width = - pipe->config.bayer_ds_out_res.width; - bds_out_info->res.height = - pipe->config.bayer_ds_out_res.height; - bds_out_info->padded_width = - pipe->config.bayer_ds_out_res.width; - err = - binarydesc_calculate_bds_factor( - in_info->res, bds_out_info->res, - &video_descr->required_bds_factor); - if (err != IA_CSS_SUCCESS) - return err; - } else { - bds_out_info->res.width = - in_info->res.width / 2; - bds_out_info->res.height = - in_info->res.height / 2; - bds_out_info->padded_width = - in_info->padded_width / 2; - video_descr->required_bds_factor = - SH_CSS_BDS_FACTOR_2_00; - } - } else { - bds_out_info->res.width = in_info->res.width; - bds_out_info->res.height = in_info->res.height; - bds_out_info->padded_width = in_info->padded_width; - video_descr->required_bds_factor = - SH_CSS_BDS_FACTOR_1_00; - } - - pipe->required_bds_factor = video_descr->required_bds_factor; - - /* bayer ds and fractional ds cannot be enabled - at the same time, so we disable bds_out_info when - fractional ds is used */ - if (!pipe->extra_config.enable_fractional_ds) - video_descr->bds_out_info = bds_out_info; - else - video_descr->bds_out_info = NULL; - - video_descr->enable_fractional_ds = - pipe->extra_config.enable_fractional_ds; - video_descr->stream_config_left_padding = stream_config_left_padding; - } - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -void ia_css_pipe_get_yuvscaler_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *yuv_scaler_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *internal_out_info, - struct ia_css_frame_info *vf_info) -{ - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_frame_info *this_vf_info = NULL; - - assert(pipe != NULL); - assert(in_info != NULL); - /* Note: if the following assert fails, the number of ports has been - * changed; in that case an additional initializer must be added - * a few lines below after which this assert can be updated. - */ - assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == 2); - IA_CSS_ENTER_PRIVATE(""); - - in_info->padded_width = in_info->res.width; - in_info->raw_bit_depth = 0; - ia_css_frame_info_set_width(in_info, in_info->res.width, 0); - out_infos[0] = out_info; - out_infos[1] = internal_out_info; - /* add initializers here if - * assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == ...); - * fails - */ - - if (vf_info) { - this_vf_info = (vf_info->res.width == 0 && - vf_info->res.height == 0) ? NULL : vf_info; - } - - pipe_binarydesc_get_offline(pipe, - IA_CSS_BINARY_MODE_CAPTURE_PP, - yuv_scaler_descr, - in_info, out_infos, this_vf_info); - - yuv_scaler_descr->enable_fractional_ds = true; - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_capturepp_binarydesc( - struct ia_css_pipe * const pipe, - struct ia_css_binary_descr *capture_pp_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(vf_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - - /* the in_info is only used for resolution to enable - bayer down scaling. */ - if (pipe->out_yuv_ds_input_info.res.width) - *in_info = pipe->out_yuv_ds_input_info; - else - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_YUV420; - in_info->raw_bit_depth = 0; - ia_css_frame_info_set_width(in_info, in_info->res.width, 0); - - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, - IA_CSS_BINARY_MODE_CAPTURE_PP, - capture_pp_descr, - in_info, out_infos, vf_info); - - capture_pp_descr->enable_capture_pp_bli = - pipe->config.default_capture_config.enable_capture_pp_bli; - capture_pp_descr->enable_fractional_ds = true; - capture_pp_descr->enable_xnr = - pipe->config.default_capture_config.enable_xnr != 0; - IA_CSS_LEAVE_PRIVATE(""); -} - -/* lookup table for high quality primary binaries */ -static unsigned int primary_hq_binary_modes[NUM_PRIMARY_HQ_STAGES] = -{ - IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0, - IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1, - IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2, - IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3, - IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4, - IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5 -}; - -void ia_css_pipe_get_primary_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *prim_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info, - unsigned int stage_idx) -{ - enum ia_css_pipe_version pipe_version = pipe->config.isp_pipe_version; - int mode; - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - assert(stage_idx < NUM_PRIMARY_HQ_STAGES); - /* vf_info can be NULL - example video_binarydescr */ - /*assert(vf_info != NULL);*/ - IA_CSS_ENTER_PRIVATE(""); - - if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1) - mode = primary_hq_binary_modes[stage_idx]; - else - mode = IA_CSS_BINARY_MODE_PRIMARY; - - if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format)) - mode = IA_CSS_BINARY_MODE_COPY; - - in_info->res = pipe->config.input_effective_res; - in_info->padded_width = in_info->res.width; - -#if !defined(HAS_NO_PACKED_RAW_PIXELS) - if (pipe->stream->config.pack_raw_pixels) - in_info->format = IA_CSS_FRAME_FORMAT_RAW_PACKED; - else -#endif - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - - in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, mode, - prim_descr, in_info, out_infos, vf_info); - - if (pipe->stream->config.online && - pipe->stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) { - prim_descr->online = true; - prim_descr->two_ppc = - (pipe->stream->config.pixels_per_clock == 2); - prim_descr->stream_format = pipe->stream->config.input_config.format; - } - if (mode == IA_CSS_BINARY_MODE_PRIMARY) { - prim_descr->isp_pipe_version = pipe->config.isp_pipe_version; - prim_descr->enable_fractional_ds = - pipe->extra_config.enable_fractional_ds; -#ifdef ISP2401 - prim_descr->enable_luma_only = - pipe->config.enable_luma_only; -#endif - /* We have both striped and non-striped primary binaries, - * if continuous viewfinder is required, then we must select - * a striped one. Otherwise we prefer to use a non-striped - * since it has better performance. */ - if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1) - prim_descr->striped = false; - else -#ifndef ISP2401 - prim_descr->striped = prim_descr->continuous && (!pipe->stream->stop_copy_preview || !pipe->stream->disable_cont_vf); -#else - prim_descr->striped = prim_descr->continuous && !pipe->stream->disable_cont_vf; - - if ((pipe->config.default_capture_config.enable_xnr != 0) && - (pipe->extra_config.enable_dvs_6axis == true)) - prim_descr->enable_xnr = true; -#endif - } - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_pre_gdc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *pre_gdc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP, - pre_gdc_descr, in_info, out_infos, NULL); - pre_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_gdc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *gdc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_QPLANE6; - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_GDC, - gdc_descr, in_info, out_infos, NULL); - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_post_gdc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *post_gdc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - assert(vf_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_YUV420_16; - in_info->raw_bit_depth = 16; - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP, - post_gdc_descr, in_info, out_infos, vf_info); - - post_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_pre_de_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *pre_de_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1) - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP, - pre_de_descr, in_info, out_infos, NULL); - else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) { - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_DE, - pre_de_descr, in_info, out_infos, NULL); - } - - if (pipe->stream->config.online) { - pre_de_descr->online = true; - pre_de_descr->two_ppc = - (pipe->stream->config.pixels_per_clock == 2); - pre_de_descr->stream_format = pipe->stream->config.input_config.format; - } - pre_de_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_pre_anr_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *pre_anr_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe); - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP, - pre_anr_descr, in_info, out_infos, NULL); - - if (pipe->stream->config.online) { - pre_anr_descr->online = true; - pre_anr_descr->two_ppc = - (pipe->stream->config.pixels_per_clock == 2); - pre_anr_descr->stream_format = pipe->stream->config.input_config.format; - } - pre_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_anr_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *anr_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - in_info->raw_bit_depth = ANR_ELEMENT_BITS; - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_ANR, - anr_descr, in_info, out_infos, NULL); - - anr_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_PRIVATE(""); -} - - -void ia_css_pipe_get_post_anr_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *post_anr_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - assert(vf_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - - *in_info = *out_info; - in_info->format = IA_CSS_FRAME_FORMAT_RAW; - in_info->raw_bit_depth = ANR_ELEMENT_BITS; - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP, - post_anr_descr, in_info, out_infos, vf_info); - - post_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version; - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_ldc_binarydesc( - struct ia_css_pipe const * const pipe, - struct ia_css_binary_descr *ldc_descr, - struct ia_css_frame_info *in_info, - struct ia_css_frame_info *out_info) -{ - unsigned int i; - struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - - assert(pipe != NULL); - assert(in_info != NULL); - assert(out_info != NULL); - IA_CSS_ENTER_PRIVATE(""); - -#ifndef ISP2401 - *in_info = *out_info; -#else - if (pipe->out_yuv_ds_input_info.res.width) - *in_info = pipe->out_yuv_ds_input_info; - else - *in_info = *out_info; -#endif - in_info->format = IA_CSS_FRAME_FORMAT_YUV420; - in_info->raw_bit_depth = 0; - ia_css_frame_info_set_width(in_info, in_info->res.width, 0); - - out_infos[0] = out_info; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - out_infos[i] = NULL; - - pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_CAPTURE_PP, - ldc_descr, in_info, out_infos, NULL); - ldc_descr->enable_dvs_6axis = - pipe->extra_config.enable_dvs_6axis; - IA_CSS_LEAVE_PRIVATE(""); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c deleted file mode 100644 index 40af8daf5ad9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_pipe_stagedesc.h" -#include "assert_support.h" -#include "ia_css_debug.h" - -void ia_css_pipe_get_generic_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_binary *binary, - struct ia_css_frame *out_frame[], - struct ia_css_frame *in_frame, - struct ia_css_frame *vf_frame) -{ - unsigned int i; - IA_CSS_ENTER_PRIVATE("stage_desc = %p, binary = %p, out_frame = %p, in_frame = %p, vf_frame = %p", - stage_desc, binary, out_frame, in_frame, vf_frame); - - assert(stage_desc != NULL && binary != NULL && binary->info != NULL); - if (stage_desc == NULL || binary == NULL || binary->info == NULL) { - IA_CSS_ERROR("invalid arguments"); - goto ERR; - } - - stage_desc->binary = binary; - stage_desc->firmware = NULL; - stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC; - stage_desc->max_input_width = 0; - stage_desc->mode = binary->info->sp.pipeline.mode; - stage_desc->in_frame = in_frame; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - stage_desc->out_frame[i] = out_frame[i]; - } - stage_desc->vf_frame = vf_frame; -ERR: - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_pipe_get_firmwares_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_binary *binary, - struct ia_css_frame *out_frame[], - struct ia_css_frame *in_frame, - struct ia_css_frame *vf_frame, - const struct ia_css_fw_info *fw, - unsigned int mode) -{ - unsigned int i; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_get_firmwares_stage_desc() enter:\n"); - stage_desc->binary = binary; - stage_desc->firmware = fw; - stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC; - stage_desc->max_input_width = 0; - stage_desc->mode = mode; - stage_desc->in_frame = in_frame; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - stage_desc->out_frame[i] = out_frame[i]; - } - stage_desc->vf_frame = vf_frame; -} - -void ia_css_pipe_get_acc_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_binary *binary, - struct ia_css_fw_info *fw) -{ - unsigned int i; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_get_acc_stage_desc() enter:\n"); - stage_desc->binary = binary; - stage_desc->firmware = fw; - stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC; - stage_desc->max_input_width = 0; - stage_desc->mode = IA_CSS_BINARY_MODE_VF_PP; - stage_desc->in_frame = NULL; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - stage_desc->out_frame[i] = NULL; - } - stage_desc->vf_frame = NULL; -} - -void ia_css_pipe_get_sp_func_stage_desc( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_frame *out_frame, - enum ia_css_pipeline_stage_sp_func sp_func, - unsigned max_input_width) -{ - unsigned int i; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_get_sp_func_stage_desc() enter:\n"); - stage_desc->binary = NULL; - stage_desc->firmware = NULL; - stage_desc->sp_func = sp_func; - stage_desc->max_input_width = max_input_width; - stage_desc->mode = (unsigned int)-1; - stage_desc->in_frame = NULL; - stage_desc->out_frame[0] = out_frame; - for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - stage_desc->out_frame[i] = NULL; - } - stage_desc->vf_frame = NULL; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c deleted file mode 100644 index 5fc1718cb2bd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_pipe_util.h" -#include "ia_css_frame_public.h" -#include "ia_css_pipe.h" -#include "ia_css_util.h" -#include "assert_support.h" - -unsigned int ia_css_pipe_util_pipe_input_format_bpp( - const struct ia_css_pipe * const pipe) -{ - assert(pipe != NULL); - assert(pipe->stream != NULL); - - return ia_css_util_input_format_bpp(pipe->stream->config.input_config.format, - pipe->stream->config.pixels_per_clock == 2); -} - -void ia_css_pipe_util_create_output_frames( - struct ia_css_frame *frames[]) -{ - unsigned int i; - - assert(frames != NULL); - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - frames[i] = NULL; - } -} - -void ia_css_pipe_util_set_output_frames( - struct ia_css_frame *frames[], - unsigned int idx, - struct ia_css_frame *frame) -{ - assert(idx < IA_CSS_BINARY_MAX_OUTPUT_PORTS); - - frames[idx] = frame; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h deleted file mode 100644 index 5ab48f346790..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_UTIL_H__ -#define __IA_CSS_UTIL_H__ - -#include -#include -#include -#include -#include -#include - -/* @brief convert "errno" error code to "ia_css_err" error code - * - * @param[in] "errno" error code - * @return "ia_css_err" error code - * - */ -enum ia_css_err ia_css_convert_errno( - int in_err); - -/* @brief check vf frame info. - * - * @param[in] info - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err ia_css_util_check_vf_info( - const struct ia_css_frame_info * const info); - -/* @brief check input configuration. - * - * @param[in] stream_config - * @param[in] must_be_raw - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err ia_css_util_check_input( - const struct ia_css_stream_config * const stream_config, - bool must_be_raw, - bool must_be_yuv); - -/* @brief check vf and out frame info. - * - * @param[in] out_info - * @param[in] vf_info - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err ia_css_util_check_vf_out_info( - const struct ia_css_frame_info * const out_info, - const struct ia_css_frame_info * const vf_info); - -/* @brief check width and height - * - * @param[in] width - * @param[in] height - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -extern enum ia_css_err ia_css_util_check_res( - unsigned int width, - unsigned int height); - -#ifdef ISP2401 -/* @brief compare resolutions (less or equal) - * - * @param[in] a resolution - * @param[in] b resolution - * @return true if both dimensions of a are less or - * equal than those of b, false otherwise - * - */ -extern bool ia_css_util_res_leq( - struct ia_css_resolution a, - struct ia_css_resolution b); - -/** - * @brief Check if resolution is zero - * - * @param[in] resolution The resolution to check - * - * @returns true if resolution is zero - */ -extern bool ia_css_util_resolution_is_zero( - const struct ia_css_resolution resolution); - -/** - * @brief Check if resolution is even - * - * @param[in] resolution The resolution to check - * - * @returns true if resolution is even - */ -extern bool ia_css_util_resolution_is_even( - const struct ia_css_resolution resolution); - -#endif -/* @brief check width and height - * - * @param[in] stream_format - * @param[in] two_ppc - * @return bits per pixel based on given parameters. - * - */ -extern unsigned int ia_css_util_input_format_bpp( - enum atomisp_input_format stream_format, - bool two_ppc); - -/* @brief check if input format it raw - * - * @param[in] stream_format - * @return true if the input format is raw or false otherwise - * - */ -extern bool ia_css_util_is_input_format_raw( - enum atomisp_input_format stream_format); - -/* @brief check if input format it yuv - * - * @param[in] stream_format - * @return true if the input format is yuv or false otherwise - * - */ -extern bool ia_css_util_is_input_format_yuv( - enum atomisp_input_format stream_format); - -#endif /* __IA_CSS_UTIL_H__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c deleted file mode 100644 index 91e586112332..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_util.h" -#include -#include -#include - -/* for ia_css_binary_max_vf_width() */ -#include "ia_css_binary.h" - - -enum ia_css_err ia_css_convert_errno( - int in_err) -{ - enum ia_css_err out_err; - - switch (in_err) { - case 0: - out_err = IA_CSS_SUCCESS; - break; - case EINVAL: - out_err = IA_CSS_ERR_INVALID_ARGUMENTS; - break; - case ENODATA: - out_err = IA_CSS_ERR_QUEUE_IS_EMPTY; - break; - case ENOSYS: - case ENOTSUP: - out_err = IA_CSS_ERR_INTERNAL_ERROR; - break; - case ENOBUFS: - out_err = IA_CSS_ERR_QUEUE_IS_FULL; - break; - default: - out_err = IA_CSS_ERR_INTERNAL_ERROR; - break; - } - return out_err; -} - -/* MW: Table look-up ??? */ -unsigned int ia_css_util_input_format_bpp( - enum atomisp_input_format format, - bool two_ppc) -{ - unsigned int rval = 0; - switch (format) { - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - case ATOMISP_INPUT_FORMAT_YUV420_8: - case ATOMISP_INPUT_FORMAT_YUV422_8: - case ATOMISP_INPUT_FORMAT_RGB_888: - case ATOMISP_INPUT_FORMAT_RAW_8: - case ATOMISP_INPUT_FORMAT_BINARY_8: - case ATOMISP_INPUT_FORMAT_EMBEDDED: - rval = 8; - break; - case ATOMISP_INPUT_FORMAT_YUV420_10: - case ATOMISP_INPUT_FORMAT_YUV422_10: - case ATOMISP_INPUT_FORMAT_RAW_10: - rval = 10; - break; - case ATOMISP_INPUT_FORMAT_YUV420_16: - case ATOMISP_INPUT_FORMAT_YUV422_16: - rval = 16; - break; - case ATOMISP_INPUT_FORMAT_RGB_444: - rval = 4; - break; - case ATOMISP_INPUT_FORMAT_RGB_555: - rval = 5; - break; - case ATOMISP_INPUT_FORMAT_RGB_565: - rval = 65; - break; - case ATOMISP_INPUT_FORMAT_RGB_666: - case ATOMISP_INPUT_FORMAT_RAW_6: - rval = 6; - break; - case ATOMISP_INPUT_FORMAT_RAW_7: - rval = 7; - break; - case ATOMISP_INPUT_FORMAT_RAW_12: - rval = 12; - break; - case ATOMISP_INPUT_FORMAT_RAW_14: - if (two_ppc) - rval = 14; - else - rval = 12; - break; - case ATOMISP_INPUT_FORMAT_RAW_16: - if (two_ppc) - rval = 16; - else - rval = 12; - break; - default: - rval = 0; - break; - - } - return rval; -} - -enum ia_css_err ia_css_util_check_vf_info( - const struct ia_css_frame_info * const info) -{ - enum ia_css_err err; - unsigned int max_vf_width; - assert(info != NULL); - err = ia_css_frame_check_info(info); - if (err != IA_CSS_SUCCESS) - return err; - max_vf_width = ia_css_binary_max_vf_width(); - if (max_vf_width != 0 && info->res.width > max_vf_width*2) - return IA_CSS_ERR_INVALID_ARGUMENTS; - return IA_CSS_SUCCESS; -} - -enum ia_css_err ia_css_util_check_vf_out_info( - const struct ia_css_frame_info * const out_info, - const struct ia_css_frame_info * const vf_info) -{ - enum ia_css_err err; - - assert(out_info != NULL); - assert(vf_info != NULL); - - err = ia_css_frame_check_info(out_info); - if (err != IA_CSS_SUCCESS) - return err; - err = ia_css_util_check_vf_info(vf_info); - if (err != IA_CSS_SUCCESS) - return err; - return IA_CSS_SUCCESS; -} - -enum ia_css_err ia_css_util_check_res(unsigned int width, unsigned int height) -{ - /* height can be odd number for jpeg/embedded data from ISYS2401 */ - if (((width == 0) || - (height == 0) || - IS_ODD(width))) { - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - return IA_CSS_SUCCESS; -} - -#ifdef ISP2401 -bool ia_css_util_res_leq(struct ia_css_resolution a, struct ia_css_resolution b) -{ - return a.width <= b.width && a.height <= b.height; -} - -bool ia_css_util_resolution_is_zero(const struct ia_css_resolution resolution) -{ - return (resolution.width == 0) || (resolution.height == 0); -} - -bool ia_css_util_resolution_is_even(const struct ia_css_resolution resolution) -{ - return IS_EVEN(resolution.height) && IS_EVEN(resolution.width); -} - -#endif -bool ia_css_util_is_input_format_raw(enum atomisp_input_format format) -{ - return ((format == ATOMISP_INPUT_FORMAT_RAW_6) || - (format == ATOMISP_INPUT_FORMAT_RAW_7) || - (format == ATOMISP_INPUT_FORMAT_RAW_8) || - (format == ATOMISP_INPUT_FORMAT_RAW_10) || - (format == ATOMISP_INPUT_FORMAT_RAW_12)); - /* raw_14 and raw_16 are not supported as input formats to the ISP. - * They can only be copied to a frame in memory using the - * copy binary. - */ -} - -bool ia_css_util_is_input_format_yuv(enum atomisp_input_format format) -{ - return format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY || - format == ATOMISP_INPUT_FORMAT_YUV420_8 || - format == ATOMISP_INPUT_FORMAT_YUV420_10 || - format == ATOMISP_INPUT_FORMAT_YUV420_16 || - format == ATOMISP_INPUT_FORMAT_YUV422_8 || - format == ATOMISP_INPUT_FORMAT_YUV422_10 || - format == ATOMISP_INPUT_FORMAT_YUV422_16; -} - -enum ia_css_err ia_css_util_check_input( - const struct ia_css_stream_config * const stream_config, - bool must_be_raw, - bool must_be_yuv) -{ - assert(stream_config != NULL); - - if (stream_config == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - -#ifdef IS_ISP_2400_SYSTEM - if (stream_config->input_config.effective_res.width == 0 || - stream_config->input_config.effective_res.height == 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; -#endif - if (must_be_raw && - !ia_css_util_is_input_format_raw(stream_config->input_config.format)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if (must_be_yuv && - !ia_css_util_is_input_format_yuv(stream_config->input_config.format)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - return IA_CSS_SUCCESS; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c deleted file mode 100644 index 325b821f276c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_pipeline.h" -#include "ia_css_isp_configs.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_iterator( - const struct ia_css_binary *binary, - const struct ia_css_iterator_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.iterator.size; - offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset; - } - if (size) { - ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_copy_output( - const struct ia_css_binary *binary, - const struct ia_css_copy_output_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size; - offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset; - } - if (size) { - ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_crop( - const struct ia_css_binary *binary, - const struct ia_css_crop_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.crop.size; - offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset; - } - if (size) { - ia_css_crop_config((struct sh_css_isp_crop_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_fpn( - const struct ia_css_binary *binary, - const struct ia_css_fpn_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.fpn.size; - offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset; - } - if (size) { - ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_dvs( - const struct ia_css_binary *binary, - const struct ia_css_dvs_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.dvs.size; - offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset; - } - if (size) { - ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_qplane( - const struct ia_css_binary *binary, - const struct ia_css_qplane_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.qplane.size; - offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset; - } - if (size) { - ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output0( - const struct ia_css_binary *binary, - const struct ia_css_output0_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output0.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset; - } - if (size) { - ia_css_output0_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output1( - const struct ia_css_binary *binary, - const struct ia_css_output1_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output1.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset; - } - if (size) { - ia_css_output1_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output( - const struct ia_css_binary *binary, - const struct ia_css_output_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output.offset; - } - if (size) { - ia_css_output_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ -#ifdef ISP2401 - -void -ia_css_configure_sc( - const struct ia_css_binary *binary, - const struct ia_css_sc_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.sc.size; - offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset; - } - if (size) { - ia_css_sc_config((struct sh_css_isp_sc_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ -#endif - -void -ia_css_configure_raw( - const struct ia_css_binary *binary, - const struct ia_css_raw_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.raw.size; - offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset; - } - if (size) { - ia_css_raw_config((struct sh_css_isp_raw_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_tnr( - const struct ia_css_binary *binary, - const struct ia_css_tnr_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.tnr.size; - offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset; - } - if (size) { - ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_ref( - const struct ia_css_binary *binary, - const struct ia_css_ref_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.ref.size; - offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset; - } - if (size) { - ia_css_ref_config((struct sh_css_isp_ref_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_vf( - const struct ia_css_binary *binary, - const struct ia_css_vf_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.vf.size; - offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset; - } - if (size) { - ia_css_vf_config((struct sh_css_isp_vf_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() leave:\n"); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h deleted file mode 100644 index 8aacd3dbc05a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifdef IA_CSS_INCLUDE_CONFIGURATIONS -#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h" -#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h" -#include "isp/kernels/output/output_1.0/ia_css_output.host.h" -#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h" -#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h" -#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h" -#ifdef ISP2401 -#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h" -#endif -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h" -#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h" -#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h" -#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */ -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_CONFIG_H -#define _IA_CSS_ISP_CONFIG_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_configuration_ids { - IA_CSS_ITERATOR_CONFIG_ID, - IA_CSS_COPY_OUTPUT_CONFIG_ID, - IA_CSS_CROP_CONFIG_ID, - IA_CSS_FPN_CONFIG_ID, - IA_CSS_DVS_CONFIG_ID, - IA_CSS_QPLANE_CONFIG_ID, - IA_CSS_OUTPUT0_CONFIG_ID, - IA_CSS_OUTPUT1_CONFIG_ID, - IA_CSS_OUTPUT_CONFIG_ID, -#ifdef ISP2401 - IA_CSS_SC_CONFIG_ID, -#endif - IA_CSS_RAW_CONFIG_ID, - IA_CSS_TNR_CONFIG_ID, - IA_CSS_REF_CONFIG_ID, - IA_CSS_VF_CONFIG_ID, - IA_CSS_NUM_CONFIGURATION_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_config_memory_offsets { - struct { - struct ia_css_isp_parameter iterator; - struct ia_css_isp_parameter copy_output; - struct ia_css_isp_parameter crop; - struct ia_css_isp_parameter fpn; - struct ia_css_isp_parameter dvs; - struct ia_css_isp_parameter qplane; - struct ia_css_isp_parameter output0; - struct ia_css_isp_parameter output1; - struct ia_css_isp_parameter output; -#ifdef ISP2401 - struct ia_css_isp_parameter sc; -#endif - struct ia_css_isp_parameter raw; - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter ref; - struct ia_css_isp_parameter vf; - } dmem; -}; - -#if defined(IA_CSS_INCLUDE_CONFIGURATIONS) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_iterator( - const struct ia_css_binary *binary, - const struct ia_css_iterator_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_copy_output( - const struct ia_css_binary *binary, - const struct ia_css_copy_output_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_crop( - const struct ia_css_binary *binary, - const struct ia_css_crop_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_fpn( - const struct ia_css_binary *binary, - const struct ia_css_fpn_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_dvs( - const struct ia_css_binary *binary, - const struct ia_css_dvs_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_qplane( - const struct ia_css_binary *binary, - const struct ia_css_qplane_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output0( - const struct ia_css_binary *binary, - const struct ia_css_output0_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output1( - const struct ia_css_binary *binary, - const struct ia_css_output1_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output( - const struct ia_css_binary *binary, - const struct ia_css_output_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -#ifdef ISP2401 -void -ia_css_configure_sc( - const struct ia_css_binary *binary, - const struct ia_css_sc_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -#endif -void -ia_css_configure_raw( - const struct ia_css_binary *binary, - const struct ia_css_raw_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_tnr( - const struct ia_css_binary *binary, - const struct ia_css_tnr_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_ref( - const struct ia_css_binary *binary, - const struct ia_css_ref_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_vf( - const struct ia_css_binary *binary, - const struct ia_css_vf_configuration *config_dmem); - -#endif /* IA_CSS_INCLUDE_CONFIGURATION */ - -#endif /* _IA_CSS_ISP_CONFIG_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c deleted file mode 100644 index d418e763b755..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c +++ /dev/null @@ -1,3221 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#define IA_CSS_INCLUDE_PARAMETERS -#include "sh_css_params.h" -#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h" -#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h" -#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h" -#include "isp/kernels/bh/bh_2/ia_css_bh.host.h" -#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h" -#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h" -#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h" -#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h" -#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h" -#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h" -#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h" -#include "isp/kernels/de/de_1.0/ia_css_de.host.h" -#include "isp/kernels/de/de_2/ia_css_de2.host.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h" -#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h" -#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h" -#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h" -#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h" -#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h" -#include "isp/kernels/ob/ob2/ia_css_ob2.host.h" -#include "isp/kernels/output/output_1.0/ia_css_output.host.h" -#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h" -#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h" -#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h" -#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h" -#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h" -#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h" -#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h" -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h" -#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h" -#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h" -#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h" -#include "isp/kernels/dpc2/ia_css_dpc2.host.h" -#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h" -#include "isp/kernels/bnlm/ia_css_bnlm.host.h" -#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h" -/* Generated code: do not edit or commmit. */ - -#include "ia_css_pipeline.h" -#include "ia_css_isp_params.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_aa( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.aa.size; - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset; - - if (size) { - struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - t->strength = params->aa_config.strength; - } - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_anr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.anr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() enter:\n"); - - ia_css_anr_encode((struct sh_css_isp_anr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->anr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_anr2( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() enter:\n"); - - ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->anr_thres, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bh( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bh.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n"); - - ia_css_bh_encode((struct sh_css_isp_bh_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->s3a_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n"); - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_cnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() enter:\n"); - - ia_css_cnr_encode((struct sh_css_isp_cnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->cnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_crop( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.crop.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() enter:\n"); - - ia_css_crop_encode((struct sh_css_isp_crop_isp_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->crop_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_csc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.csc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() enter:\n"); - - ia_css_csc_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_dp( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n"); - - ia_css_dp_encode((struct sh_css_isp_dp_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dp_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() enter:\n"); - - ia_css_bnr_encode((struct sh_css_isp_bnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->nr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_de( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.de.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.de.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n"); - - ia_css_de_encode((struct sh_css_isp_de_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->de_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ecd( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() enter:\n"); - - ia_css_ecd_encode((struct sh_css_isp_ecd_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ecd_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_formats( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.formats.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() enter:\n"); - - ia_css_formats_encode((struct sh_css_isp_formats_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->formats_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_fpn( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() enter:\n"); - - ia_css_fpn_encode((struct sh_css_isp_fpn_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->fpn_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_gc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.gc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n"); - - ia_css_gc_encode((struct sh_css_isp_gc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->gc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n"); - - ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->gc_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ce( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ce.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n"); - - ia_css_ce_encode((struct sh_css_isp_ce_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ce_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_yuv2rgb( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() enter:\n"); - - ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->yuv2rgb_cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_rgb2yuv( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() enter:\n"); - - ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->rgb2yuv_cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_r_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() enter:\n"); - - ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset], - ¶ms->r_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_g_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() enter:\n"); - - ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->g_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_b_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() enter:\n"); - - ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset], - ¶ms->b_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_uds( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.uds.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset; - - if (size) { - struct sh_css_sp_uds_params *p; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() enter:\n"); - - p = (struct sh_css_sp_uds_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - p->crop_pos = params->uds_config.crop_pos; - p->uds = params->uds_config.uds; - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_raa( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.raa.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() enter:\n"); - - ia_css_raa_encode((struct sh_css_isp_aa_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->raa_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_s3a( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() enter:\n"); - - ia_css_s3a_encode((struct sh_css_isp_s3a_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->s3a_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ob( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ob.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n"); - - ia_css_ob_encode((struct sh_css_isp_ob_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ob_config, -¶ms->stream_configs.ob, size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.ob.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n"); - - ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->ob_config, -¶ms->stream_configs.ob, size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_output( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.output.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.output.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() enter:\n"); - - ia_css_output_encode((struct sh_css_isp_output_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->output_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n"); - - ia_css_sc_encode((struct sh_css_isp_sc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->sc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bds( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bds.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset; - - if (size) { - struct sh_css_isp_bds_params *p; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() enter:\n"); - - p = (struct sh_css_isp_bds_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - p->baf_strength = params->bds_config.strength; - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_tnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() enter:\n"); - - ia_css_tnr_encode((struct sh_css_isp_tnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->tnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_macc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.macc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() enter:\n"); - - ia_css_macc_encode((struct sh_css_isp_macc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->macc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_horicoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() enter:\n"); - - ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_vertcoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() enter:\n"); - - ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_horiproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() enter:\n"); - - ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_vertproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() enter:\n"); - - ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_horicoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() enter:\n"); - - ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_vertcoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() enter:\n"); - - ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_horiproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() enter:\n"); - - ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_vertproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() enter:\n"); - - ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_wb( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.wb.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n"); - - ia_css_wb_encode((struct sh_css_isp_wb_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->wb_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_nr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.nr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n"); - - ia_css_nr_encode((struct sh_css_isp_ynr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->nr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_yee( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yee.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() enter:\n"); - - ia_css_yee_encode((struct sh_css_isp_yee_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->yee_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ynr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() enter:\n"); - - ia_css_ynr_encode((struct sh_css_isp_yee2_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ynr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_fc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n"); - - ia_css_fc_encode((struct sh_css_isp_fc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->fc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ctc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n"); - - ia_css_ctc_encode((struct sh_css_isp_ctc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ctc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n"); - - ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset], - ¶ms->ctc_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr_table( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() enter:\n"); - - ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->xnr_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() enter:\n"); - - ia_css_xnr_encode((struct sh_css_isp_xnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->xnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr3( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n"); - - ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->xnr3_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n"); - } - - } -#ifdef ISP2401 - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n"); - - ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->xnr3_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n"); - } - - } -#endif -} - -/* Code generated by genparam/gencode.c:gen_param_process_table() */ - -void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) = { - ia_css_process_aa, - ia_css_process_anr, - ia_css_process_anr2, - ia_css_process_bh, - ia_css_process_cnr, - ia_css_process_crop, - ia_css_process_csc, - ia_css_process_dp, - ia_css_process_bnr, - ia_css_process_de, - ia_css_process_ecd, - ia_css_process_formats, - ia_css_process_fpn, - ia_css_process_gc, - ia_css_process_ce, - ia_css_process_yuv2rgb, - ia_css_process_rgb2yuv, - ia_css_process_r_gamma, - ia_css_process_g_gamma, - ia_css_process_b_gamma, - ia_css_process_uds, - ia_css_process_raa, - ia_css_process_s3a, - ia_css_process_ob, - ia_css_process_output, - ia_css_process_sc, - ia_css_process_bds, - ia_css_process_tnr, - ia_css_process_macc, - ia_css_process_sdis_horicoef, - ia_css_process_sdis_vertcoef, - ia_css_process_sdis_horiproj, - ia_css_process_sdis_vertproj, - ia_css_process_sdis2_horicoef, - ia_css_process_sdis2_vertcoef, - ia_css_process_sdis2_horiproj, - ia_css_process_sdis2_vertproj, - ia_css_process_wb, - ia_css_process_nr, - ia_css_process_yee, - ia_css_process_ynr, - ia_css_process_fc, - ia_css_process_ctc, - ia_css_process_xnr_table, - ia_css_process_xnr, - ia_css_process_xnr3, -}; - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_dp_config(const struct ia_css_isp_parameters *params, - struct ia_css_dp_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() enter: " - "config=%p\n",config); - - *config = params->dp_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() leave\n"); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_dp_config(struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n"); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dp_config = *config; - params->config_changed[IA_CSS_DP_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_DP_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_dp_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_wb_config(const struct ia_css_isp_parameters *params, - struct ia_css_wb_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() enter: " - "config=%p\n",config); - - *config = params->wb_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() leave\n"); - ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_wb_config(struct ia_css_isp_parameters *params, - const struct ia_css_wb_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n"); - ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->wb_config = *config; - params->config_changed[IA_CSS_WB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_WB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_wb_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_tnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_tnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() enter: " - "config=%p\n",config); - - *config = params->tnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() leave\n"); - ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_tnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_tnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n"); - ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->tnr_config = *config; - params->config_changed[IA_CSS_TNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_TNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_tnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ob_config(const struct ia_css_isp_parameters *params, - struct ia_css_ob_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() enter: " - "config=%p\n",config); - - *config = params->ob_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() leave\n"); - ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ob_config(struct ia_css_isp_parameters *params, - const struct ia_css_ob_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n"); - ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ob_config = *config; - params->config_changed[IA_CSS_OB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_OB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ob_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_de_config(const struct ia_css_isp_parameters *params, - struct ia_css_de_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() enter: " - "config=%p\n",config); - - *config = params->de_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() leave\n"); - ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_de_config(struct ia_css_isp_parameters *params, - const struct ia_css_de_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n"); - ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->de_config = *config; - params->config_changed[IA_CSS_DE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_DE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_de_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_anr_config(const struct ia_css_isp_parameters *params, - struct ia_css_anr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() enter: " - "config=%p\n",config); - - *config = params->anr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() leave\n"); - ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n"); - ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->anr_config = *config; - params->config_changed[IA_CSS_ANR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ANR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_anr2_config(const struct ia_css_isp_parameters *params, - struct ia_css_anr_thres *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() enter: " - "config=%p\n",config); - - *config = params->anr_thres; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() leave\n"); - ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr2_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_thres *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n"); - ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->anr_thres = *config; - params->config_changed[IA_CSS_ANR2_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ANR2_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr2_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ce_config(const struct ia_css_isp_parameters *params, - struct ia_css_ce_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() enter: " - "config=%p\n",config); - - *config = params->ce_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() leave\n"); - ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ce_config(struct ia_css_isp_parameters *params, - const struct ia_css_ce_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n"); - ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ce_config = *config; - params->config_changed[IA_CSS_CE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ce_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ecd_config(const struct ia_css_isp_parameters *params, - struct ia_css_ecd_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() enter: " - "config=%p\n",config); - - *config = params->ecd_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() leave\n"); - ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ecd_config(struct ia_css_isp_parameters *params, - const struct ia_css_ecd_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n"); - ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ecd_config = *config; - params->config_changed[IA_CSS_ECD_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ECD_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ecd_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ynr_config(const struct ia_css_isp_parameters *params, - struct ia_css_ynr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() enter: " - "config=%p\n",config); - - *config = params->ynr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() leave\n"); - ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ynr_config(struct ia_css_isp_parameters *params, - const struct ia_css_ynr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n"); - ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ynr_config = *config; - params->config_changed[IA_CSS_YNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_YNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ynr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_fc_config(const struct ia_css_isp_parameters *params, - struct ia_css_fc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() enter: " - "config=%p\n",config); - - *config = params->fc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() leave\n"); - ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_fc_config(struct ia_css_isp_parameters *params, - const struct ia_css_fc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n"); - ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->fc_config = *config; - params->config_changed[IA_CSS_FC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_FC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_fc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_cnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_cnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() enter: " - "config=%p\n",config); - - *config = params->cnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() leave\n"); - ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_cnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_cnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n"); - ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->cnr_config = *config; - params->config_changed[IA_CSS_CNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_cnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_macc_config(const struct ia_css_isp_parameters *params, - struct ia_css_macc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() enter: " - "config=%p\n",config); - - *config = params->macc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() leave\n"); - ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_macc_config(struct ia_css_isp_parameters *params, - const struct ia_css_macc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n"); - ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->macc_config = *config; - params->config_changed[IA_CSS_MACC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_MACC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_macc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ctc_config(const struct ia_css_isp_parameters *params, - struct ia_css_ctc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() enter: " - "config=%p\n",config); - - *config = params->ctc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() leave\n"); - ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ctc_config(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n"); - ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ctc_config = *config; - params->config_changed[IA_CSS_CTC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CTC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ctc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_aa_config(const struct ia_css_isp_parameters *params, - struct ia_css_aa_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() enter: " - "config=%p\n",config); - - *config = params->aa_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() leave\n"); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_aa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n"); - params->aa_config = *config; - params->config_changed[IA_CSS_AA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_AA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_aa_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() enter: " - "config=%p\n",config); - - *config = params->yuv2rgb_cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() leave\n"); - ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n"); - ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->yuv2rgb_cc_config = *config; - params->config_changed[IA_CSS_YUV2RGB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_YUV2RGB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_yuv2rgb_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() enter: " - "config=%p\n",config); - - *config = params->rgb2yuv_cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() leave\n"); - ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n"); - ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->rgb2yuv_cc_config = *config; - params->config_changed[IA_CSS_RGB2YUV_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_RGB2YUV_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_rgb2yuv_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_csc_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() enter: " - "config=%p\n",config); - - *config = params->cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() leave\n"); - ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_csc_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n"); - ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->cc_config = *config; - params->config_changed[IA_CSS_CSC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CSC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_csc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_nr_config(const struct ia_css_isp_parameters *params, - struct ia_css_nr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() enter: " - "config=%p\n",config); - - *config = params->nr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() leave\n"); - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n"); - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->nr_config = *config; - params->config_changed[IA_CSS_BNR_ID] = true; - params->config_changed[IA_CSS_NR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_NR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_nr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_gc_config(const struct ia_css_isp_parameters *params, - struct ia_css_gc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() enter: " - "config=%p\n",config); - - *config = params->gc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() leave\n"); - ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_gc_config(struct ia_css_isp_parameters *params, - const struct ia_css_gc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n"); - ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->gc_config = *config; - params->config_changed[IA_CSS_GC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_GC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_gc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() leave\n"); - ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horicoef_config() enter:\n"); - ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horicoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() leave\n"); - ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertcoef_config() enter:\n"); - ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertcoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() leave\n"); - ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horiproj_config() enter:\n"); - ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horiproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() leave\n"); - ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertproj_config() enter:\n"); - ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() leave\n"); - ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horicoef_config() enter:\n"); - ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horicoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() leave\n"); - ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertcoef_config() enter:\n"); - ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertcoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() leave\n"); - ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horiproj_config() enter:\n"); - ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horiproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() leave\n"); - ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertproj_config() enter:\n"); - ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() enter: " - "config=%p\n",config); - - *config = params->r_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() leave\n"); - ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n"); - ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->r_gamma_table = *config; - params->config_changed[IA_CSS_R_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_R_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_r_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() enter: " - "config=%p\n",config); - - *config = params->g_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() leave\n"); - ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n"); - ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->g_gamma_table = *config; - params->config_changed[IA_CSS_G_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_G_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_g_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() enter: " - "config=%p\n",config); - - *config = params->b_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() leave\n"); - ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n"); - ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->b_gamma_table = *config; - params->config_changed[IA_CSS_B_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_B_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_b_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() enter: " - "config=%p\n",config); - - *config = params->xnr_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() leave\n"); - ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_table_config() enter:\n"); - ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr_table = *config; - params->config_changed[IA_CSS_XNR_TABLE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR_TABLE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_table_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_formats_config(const struct ia_css_isp_parameters *params, - struct ia_css_formats_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() enter: " - "config=%p\n",config); - - *config = params->formats_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() leave\n"); - ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_formats_config(struct ia_css_isp_parameters *params, - const struct ia_css_formats_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n"); - ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->formats_config = *config; - params->config_changed[IA_CSS_FORMATS_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_FORMATS_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_formats_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() enter: " - "config=%p\n",config); - - *config = params->xnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() leave\n"); - ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n"); - ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr_config = *config; - params->config_changed[IA_CSS_XNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr3_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() enter: " - "config=%p\n",config); - - *config = params->xnr3_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() leave\n"); - ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr3_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr3_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n"); - ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr3_config = *config; - params->config_changed[IA_CSS_XNR3_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR3_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr3_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_s3a_config(const struct ia_css_isp_parameters *params, - struct ia_css_3a_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() enter: " - "config=%p\n",config); - - *config = params->s3a_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() leave\n"); - ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_s3a_config(struct ia_css_isp_parameters *params, - const struct ia_css_3a_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n"); - ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->s3a_config = *config; - params->config_changed[IA_CSS_BH_ID] = true; - params->config_changed[IA_CSS_S3A_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_S3A_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_s3a_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_output_config(const struct ia_css_isp_parameters *params, - struct ia_css_output_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() enter: " - "config=%p\n",config); - - *config = params->output_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() leave\n"); - ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_output_config(struct ia_css_isp_parameters *params, - const struct ia_css_output_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n"); - ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->output_config = *config; - params->config_changed[IA_CSS_OUTPUT_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_OUTPUT_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_output_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_get_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -{ - ia_css_get_dp_config(params, config->dp_config); - ia_css_get_wb_config(params, config->wb_config); - ia_css_get_tnr_config(params, config->tnr_config); - ia_css_get_ob_config(params, config->ob_config); - ia_css_get_de_config(params, config->de_config); - ia_css_get_anr_config(params, config->anr_config); - ia_css_get_anr2_config(params, config->anr_thres); - ia_css_get_ce_config(params, config->ce_config); - ia_css_get_ecd_config(params, config->ecd_config); - ia_css_get_ynr_config(params, config->ynr_config); - ia_css_get_fc_config(params, config->fc_config); - ia_css_get_cnr_config(params, config->cnr_config); - ia_css_get_macc_config(params, config->macc_config); - ia_css_get_ctc_config(params, config->ctc_config); - ia_css_get_aa_config(params, config->aa_config); - ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config); - ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config); - ia_css_get_csc_config(params, config->cc_config); - ia_css_get_nr_config(params, config->nr_config); - ia_css_get_gc_config(params, config->gc_config); - ia_css_get_sdis_horicoef_config(params, config->dvs_coefs); - ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs); - ia_css_get_sdis_horiproj_config(params, config->dvs_coefs); - ia_css_get_sdis_vertproj_config(params, config->dvs_coefs); - ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs); - ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs); - ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs); - ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs); - ia_css_get_r_gamma_config(params, config->r_gamma_table); - ia_css_get_g_gamma_config(params, config->g_gamma_table); - ia_css_get_b_gamma_config(params, config->b_gamma_table); - ia_css_get_xnr_table_config(params, config->xnr_table); - ia_css_get_formats_config(params, config->formats_config); - ia_css_get_xnr_config(params, config->xnr_config); - ia_css_get_xnr3_config(params, config->xnr3_config); - ia_css_get_s3a_config(params, config->s3a_config); - ia_css_get_output_config(params, config->output_config); -} - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_set_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -{ - ia_css_set_dp_config(params, config->dp_config); - ia_css_set_wb_config(params, config->wb_config); - ia_css_set_tnr_config(params, config->tnr_config); - ia_css_set_ob_config(params, config->ob_config); - ia_css_set_de_config(params, config->de_config); - ia_css_set_anr_config(params, config->anr_config); - ia_css_set_anr2_config(params, config->anr_thres); - ia_css_set_ce_config(params, config->ce_config); - ia_css_set_ecd_config(params, config->ecd_config); - ia_css_set_ynr_config(params, config->ynr_config); - ia_css_set_fc_config(params, config->fc_config); - ia_css_set_cnr_config(params, config->cnr_config); - ia_css_set_macc_config(params, config->macc_config); - ia_css_set_ctc_config(params, config->ctc_config); - ia_css_set_aa_config(params, config->aa_config); - ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config); - ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config); - ia_css_set_csc_config(params, config->cc_config); - ia_css_set_nr_config(params, config->nr_config); - ia_css_set_gc_config(params, config->gc_config); - ia_css_set_sdis_horicoef_config(params, config->dvs_coefs); - ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs); - ia_css_set_sdis_horiproj_config(params, config->dvs_coefs); - ia_css_set_sdis_vertproj_config(params, config->dvs_coefs); - ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs); - ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs); - ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs); - ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs); - ia_css_set_r_gamma_config(params, config->r_gamma_table); - ia_css_set_g_gamma_config(params, config->g_gamma_table); - ia_css_set_b_gamma_config(params, config->b_gamma_table); - ia_css_set_xnr_table_config(params, config->xnr_table); - ia_css_set_formats_config(params, config->formats_config); - ia_css_set_xnr_config(params, config->xnr_config); - ia_css_set_xnr3_config(params, config->xnr3_config); - ia_css_set_s3a_config(params, config->s3a_config); - ia_css_set_output_config(params, config->output_config); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h deleted file mode 100644 index 5b3deb7f74ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_PARAM_H -#define _IA_CSS_ISP_PARAM_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_parameter_ids { - IA_CSS_AA_ID, - IA_CSS_ANR_ID, - IA_CSS_ANR2_ID, - IA_CSS_BH_ID, - IA_CSS_CNR_ID, - IA_CSS_CROP_ID, - IA_CSS_CSC_ID, - IA_CSS_DP_ID, - IA_CSS_BNR_ID, - IA_CSS_DE_ID, - IA_CSS_ECD_ID, - IA_CSS_FORMATS_ID, - IA_CSS_FPN_ID, - IA_CSS_GC_ID, - IA_CSS_CE_ID, - IA_CSS_YUV2RGB_ID, - IA_CSS_RGB2YUV_ID, - IA_CSS_R_GAMMA_ID, - IA_CSS_G_GAMMA_ID, - IA_CSS_B_GAMMA_ID, - IA_CSS_UDS_ID, - IA_CSS_RAA_ID, - IA_CSS_S3A_ID, - IA_CSS_OB_ID, - IA_CSS_OUTPUT_ID, - IA_CSS_SC_ID, - IA_CSS_BDS_ID, - IA_CSS_TNR_ID, - IA_CSS_MACC_ID, - IA_CSS_SDIS_HORICOEF_ID, - IA_CSS_SDIS_VERTCOEF_ID, - IA_CSS_SDIS_HORIPROJ_ID, - IA_CSS_SDIS_VERTPROJ_ID, - IA_CSS_SDIS2_HORICOEF_ID, - IA_CSS_SDIS2_VERTCOEF_ID, - IA_CSS_SDIS2_HORIPROJ_ID, - IA_CSS_SDIS2_VERTPROJ_ID, - IA_CSS_WB_ID, - IA_CSS_NR_ID, - IA_CSS_YEE_ID, - IA_CSS_YNR_ID, - IA_CSS_FC_ID, - IA_CSS_CTC_ID, - IA_CSS_XNR_TABLE_ID, - IA_CSS_XNR_ID, - IA_CSS_XNR3_ID, - IA_CSS_NUM_PARAMETER_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_memory_offsets { - struct { - struct ia_css_isp_parameter aa; - struct ia_css_isp_parameter anr; - struct ia_css_isp_parameter bh; - struct ia_css_isp_parameter cnr; - struct ia_css_isp_parameter crop; - struct ia_css_isp_parameter csc; - struct ia_css_isp_parameter dp; - struct ia_css_isp_parameter bnr; - struct ia_css_isp_parameter de; - struct ia_css_isp_parameter ecd; - struct ia_css_isp_parameter formats; - struct ia_css_isp_parameter fpn; - struct ia_css_isp_parameter gc; - struct ia_css_isp_parameter ce; - struct ia_css_isp_parameter yuv2rgb; - struct ia_css_isp_parameter rgb2yuv; - struct ia_css_isp_parameter uds; - struct ia_css_isp_parameter raa; - struct ia_css_isp_parameter s3a; - struct ia_css_isp_parameter ob; - struct ia_css_isp_parameter output; - struct ia_css_isp_parameter sc; - struct ia_css_isp_parameter bds; - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter macc; - struct ia_css_isp_parameter sdis_horiproj; - struct ia_css_isp_parameter sdis_vertproj; - struct ia_css_isp_parameter sdis2_horiproj; - struct ia_css_isp_parameter sdis2_vertproj; - struct ia_css_isp_parameter wb; - struct ia_css_isp_parameter nr; - struct ia_css_isp_parameter yee; - struct ia_css_isp_parameter ynr; - struct ia_css_isp_parameter fc; - struct ia_css_isp_parameter ctc; - struct ia_css_isp_parameter xnr; - struct ia_css_isp_parameter xnr3; - struct ia_css_isp_parameter get; - struct ia_css_isp_parameter put; - } dmem; - struct { - struct ia_css_isp_parameter anr2; - struct ia_css_isp_parameter ob; - struct ia_css_isp_parameter sdis_horicoef; - struct ia_css_isp_parameter sdis_vertcoef; - struct ia_css_isp_parameter sdis2_horicoef; - struct ia_css_isp_parameter sdis2_vertcoef; -#ifdef ISP2401 - struct ia_css_isp_parameter xnr3; -#endif - } vmem; - struct { - struct ia_css_isp_parameter bh; - } hmem0; - struct { - struct ia_css_isp_parameter gc; - struct ia_css_isp_parameter g_gamma; - struct ia_css_isp_parameter xnr_table; - } vamem1; - struct { - struct ia_css_isp_parameter r_gamma; - struct ia_css_isp_parameter ctc; - } vamem0; - struct { - struct ia_css_isp_parameter b_gamma; - } vamem2; -}; - -#if defined(IA_CSS_INCLUDE_PARAMETERS) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/gencode.c:gen_param_process_table() */ - -struct ia_css_pipeline_stage; /* forward declaration */ - -extern void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_dp_config(struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_wb_config(struct ia_css_isp_parameters *params, - const struct ia_css_wb_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_tnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_tnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ob_config(struct ia_css_isp_parameters *params, - const struct ia_css_ob_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_de_config(struct ia_css_isp_parameters *params, - const struct ia_css_de_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr2_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_thres *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ce_config(struct ia_css_isp_parameters *params, - const struct ia_css_ce_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ecd_config(struct ia_css_isp_parameters *params, - const struct ia_css_ecd_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ynr_config(struct ia_css_isp_parameters *params, - const struct ia_css_ynr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_fc_config(struct ia_css_isp_parameters *params, - const struct ia_css_fc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_cnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_cnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_macc_config(struct ia_css_isp_parameters *params, - const struct ia_css_macc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ctc_config(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_aa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_csc_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_gc_config(struct ia_css_isp_parameters *params, - const struct ia_css_gc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_formats_config(struct ia_css_isp_parameters *params, - const struct ia_css_formats_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr3_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr3_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_s3a_config(struct ia_css_isp_parameters *params, - const struct ia_css_3a_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_output_config(struct ia_css_isp_parameters *params, - const struct ia_css_output_config *config); - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_get_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -; -#ifdef ISP2401 - -#endif -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_set_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -; -#ifdef ISP2401 - -#endif -#endif /* IA_CSS_INCLUDE_PARAMETER */ - -#endif /* _IA_CSS_ISP_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c deleted file mode 100644 index fb3ba08f69c1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -/* Generated code: do not edit or commmit. */ - -#include "ia_css_pipeline.h" -#include "ia_css_isp_states.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_aa_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.aa.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset; - - if (size) - memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], 0, size); - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_cnr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset; - - if (size) { - ia_css_init_cnr_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_cnr2_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset; - - if (size) { - ia_css_init_cnr2_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_dp_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.dp.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset; - - if (size) { - ia_css_init_dp_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_de_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.de.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.de.offset; - - if (size) { - ia_css_init_de_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_tnr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->dmem.tnr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset; - - if (size) { - ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_ref_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->dmem.ref.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset; - - if (size) { - ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_ynr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.ynr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset; - - if (size) { - ia_css_init_ynr_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_state_init_table() */ - -void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary) = { - ia_css_initialize_aa_state, - ia_css_initialize_cnr_state, - ia_css_initialize_cnr2_state, - ia_css_initialize_dp_state, - ia_css_initialize_de_state, - ia_css_initialize_tnr_state, - ia_css_initialize_ref_state, - ia_css_initialize_ynr_state, -}; - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h deleted file mode 100644 index 732adafb0a63..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#define IA_CSS_INCLUDE_STATES -#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h" -#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h" -#include "isp/kernels/de/de_1.0/ia_css_de.host.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h" -#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h" -#include "isp/kernels/dpc2/ia_css_dpc2.host.h" -#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h" -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_STATE_H -#define _IA_CSS_ISP_STATE_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_state_ids { - IA_CSS_AA_STATE_ID, - IA_CSS_CNR_STATE_ID, - IA_CSS_CNR2_STATE_ID, - IA_CSS_DP_STATE_ID, - IA_CSS_DE_STATE_ID, - IA_CSS_TNR_STATE_ID, - IA_CSS_REF_STATE_ID, - IA_CSS_YNR_STATE_ID, - IA_CSS_NUM_STATE_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_state_memory_offsets { - struct { - struct ia_css_isp_parameter aa; - struct ia_css_isp_parameter cnr; - struct ia_css_isp_parameter cnr2; - struct ia_css_isp_parameter dp; - struct ia_css_isp_parameter de; - struct ia_css_isp_parameter ynr; - } vmem; - struct { - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter ref; - } dmem; -}; - -#if defined(IA_CSS_INCLUDE_STATES) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/genstate.c:gen_state_init_table() */ - -extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary); - -#endif /* IA_CSS_INCLUDE_STATE */ - -#endif /* _IA_CSS_ISP_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h deleted file mode 100644 index e71e33d9d143..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_BITS_H -#define _HRT_BITS_H - -#include "defs.h" - -#define _hrt_ones(n) HRTCAT(_hrt_ones_, n) -#define _hrt_ones_0x0 0x00000000U -#define _hrt_ones_0x1 0x00000001U -#define _hrt_ones_0x2 0x00000003U -#define _hrt_ones_0x3 0x00000007U -#define _hrt_ones_0x4 0x0000000FU -#define _hrt_ones_0x5 0x0000001FU -#define _hrt_ones_0x6 0x0000003FU -#define _hrt_ones_0x7 0x0000007FU -#define _hrt_ones_0x8 0x000000FFU -#define _hrt_ones_0x9 0x000001FFU -#define _hrt_ones_0xA 0x000003FFU -#define _hrt_ones_0xB 0x000007FFU -#define _hrt_ones_0xC 0x00000FFFU -#define _hrt_ones_0xD 0x00001FFFU -#define _hrt_ones_0xE 0x00003FFFU -#define _hrt_ones_0xF 0x00007FFFU -#define _hrt_ones_0x10 0x0000FFFFU -#define _hrt_ones_0x11 0x0001FFFFU -#define _hrt_ones_0x12 0x0003FFFFU -#define _hrt_ones_0x13 0x0007FFFFU -#define _hrt_ones_0x14 0x000FFFFFU -#define _hrt_ones_0x15 0x001FFFFFU -#define _hrt_ones_0x16 0x003FFFFFU -#define _hrt_ones_0x17 0x007FFFFFU -#define _hrt_ones_0x18 0x00FFFFFFU -#define _hrt_ones_0x19 0x01FFFFFFU -#define _hrt_ones_0x1A 0x03FFFFFFU -#define _hrt_ones_0x1B 0x07FFFFFFU -#define _hrt_ones_0x1C 0x0FFFFFFFU -#define _hrt_ones_0x1D 0x1FFFFFFFU -#define _hrt_ones_0x1E 0x3FFFFFFFU -#define _hrt_ones_0x1F 0x7FFFFFFFU -#define _hrt_ones_0x20 0xFFFFFFFFU - -#define _hrt_ones_0 _hrt_ones_0x0 -#define _hrt_ones_1 _hrt_ones_0x1 -#define _hrt_ones_2 _hrt_ones_0x2 -#define _hrt_ones_3 _hrt_ones_0x3 -#define _hrt_ones_4 _hrt_ones_0x4 -#define _hrt_ones_5 _hrt_ones_0x5 -#define _hrt_ones_6 _hrt_ones_0x6 -#define _hrt_ones_7 _hrt_ones_0x7 -#define _hrt_ones_8 _hrt_ones_0x8 -#define _hrt_ones_9 _hrt_ones_0x9 -#define _hrt_ones_10 _hrt_ones_0xA -#define _hrt_ones_11 _hrt_ones_0xB -#define _hrt_ones_12 _hrt_ones_0xC -#define _hrt_ones_13 _hrt_ones_0xD -#define _hrt_ones_14 _hrt_ones_0xE -#define _hrt_ones_15 _hrt_ones_0xF -#define _hrt_ones_16 _hrt_ones_0x10 -#define _hrt_ones_17 _hrt_ones_0x11 -#define _hrt_ones_18 _hrt_ones_0x12 -#define _hrt_ones_19 _hrt_ones_0x13 -#define _hrt_ones_20 _hrt_ones_0x14 -#define _hrt_ones_21 _hrt_ones_0x15 -#define _hrt_ones_22 _hrt_ones_0x16 -#define _hrt_ones_23 _hrt_ones_0x17 -#define _hrt_ones_24 _hrt_ones_0x18 -#define _hrt_ones_25 _hrt_ones_0x19 -#define _hrt_ones_26 _hrt_ones_0x1A -#define _hrt_ones_27 _hrt_ones_0x1B -#define _hrt_ones_28 _hrt_ones_0x1C -#define _hrt_ones_29 _hrt_ones_0x1D -#define _hrt_ones_30 _hrt_ones_0x1E -#define _hrt_ones_31 _hrt_ones_0x1F -#define _hrt_ones_32 _hrt_ones_0x20 - -#define _hrt_mask(b, n) \ - (_hrt_ones(n) << (b)) -#define _hrt_get_bits(w, b, n) \ - (((w) >> (b)) & _hrt_ones(n)) -#define _hrt_set_bits(w, b, n, v) \ - (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b))) -#define _hrt_get_bit(w, b) \ - (((w) >> (b)) & 1) -#define _hrt_set_bit(w, b, v) \ - (((w) & (~(1 << (b)))) | (((v)&1) << (b))) -#define _hrt_set_lower_half(w, v) \ - _hrt_set_bits(w, 0, 16, v) -#define _hrt_set_upper_half(w, v) \ - _hrt_set_bits(w, 16, 16, v) - -#endif /* _HRT_BITS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h deleted file mode 100644 index b5756bfe8eb6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _cell_params_h -#define _cell_params_h - -#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/ -#define SP_ICACHE_TAG_BITS 4 /*size of tag*/ -#define SP_ICACHE_SET_BITS 8 /* 256 sets*/ -#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/ -#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/ - -#define SP_ICACHE_ADDRESS_BITS \ - (SP_ICACHE_TAG_BITS+SP_ICACHE_BLOCK_ADDRESS_BITS) - -#define SP_PMEM_DEPTH (1< input_selector*/ -/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */ -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding - -/* definition for state machine of data FIFO for decode different type of data */ -#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9 -#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7 -#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7 - -#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */ - -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8 - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6 - - -/* packet bit definition */ -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32 -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32 - - -/*************************************************************************************************/ -/* Custom Decoding */ -/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */ -/*************************************************************************************************/ -#define BE_CUST_EN_IDX 0 /* 2bits */ -#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */ -#define BE_CUST_EN_WIDTH 8 -#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */ -#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */ - -/* Data State config = {get_bits(6bits), valid(1bit)} */ -#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */ -#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */ -#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */ -#define BE_CUST_DATA_STATE_WIDTH 21 -#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */ -#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */ - -/* Pixel Extractor config */ -#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */ -#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */ -#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */ -#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */ -#define BE_CUST_PIX_EXT_WIDTH 29 - -/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */ -#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_WIDTH 16 -#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */ -#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */ -#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */ -#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */ - -#endif /* _mipi_backend_common_defs_h_ */ -#endif /* _css_receiver_2400_common_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h deleted file mode 100644 index 6f5b7d3d3715..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _css_receiver_2400_defs_h_ -#define _css_receiver_2400_defs_h_ - -#include "css_receiver_2400_common_defs.h" - -#define CSS_RECEIVER_DATA_WIDTH 8 -#define CSS_RECEIVER_RX_TRIG 4 -#define CSS_RECEIVER_RF_WORD 32 -#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10 -#define CSS_RECEIVER_CSI_RF_ADDR 4 -#define CSS_RECEIVER_DATA_OUT 12 -#define CSS_RECEIVER_CHN_NO 2 -#define CSS_RECEIVER_DWORD_CNT 11 -#define CSS_RECEIVER_FORMAT_TYP 5 -#define CSS_RECEIVER_HRESPONSE 2 -#define CSS_RECEIVER_STATE_WIDTH 3 -#define CSS_RECEIVER_FIFO_DAT 32 -#define CSS_RECEIVER_CNT_VAL 2 -#define CSS_RECEIVER_PRED10_VAL 10 -#define CSS_RECEIVER_PRED12_VAL 12 -#define CSS_RECEIVER_CNT_WIDTH 8 -#define CSS_RECEIVER_WORD_CNT 16 -#define CSS_RECEIVER_PIXEL_LEN 6 -#define CSS_RECEIVER_PIXEL_CNT 5 -#define CSS_RECEIVER_COMP_8_BIT 8 -#define CSS_RECEIVER_COMP_7_BIT 7 -#define CSS_RECEIVER_COMP_6_BIT 6 - -#define CSI_CONFIG_WIDTH 4 - -/* division of gen_short data, ch_id and fmt_type over streaming data interface */ -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0 -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1) - -#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4 -#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4 - -#define hrt_css_receiver_2400_4_lane_port_offset 0x100 -#define hrt_css_receiver_2400_1_lane_port_offset 0x200 -#define hrt_css_receiver_2400_2_lane_port_offset 0x300 -#define hrt_css_receiver_2400_backend_port_offset 0x100 - -#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0 -#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1 -#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2 -#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3 -#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4 -#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7 -#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8 -#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9 -#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10 -#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11 -#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12 -#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14 -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16 -#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25 -#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26 -#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27 -#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28 - -/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */ -#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0 -#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1 -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2 -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13 -#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16 - -#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun" -#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved" -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry" -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data" -#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync" - -/* Bits for CSI2_DEVICE_READY register */ -#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7 - - -/* Bits for CSI2_FUNC_PROG register */ -#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19 - -/* Bits for INIT_COUNT register */ -#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0 -#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16 - -/* Bits for COUNT registers */ -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8 - -/* Bits for RAW116_18_DATAID register */ -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6 - -/* Bits for COMP_FORMAT register, this selects the compression data format */ -#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8 -#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS) -#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8 - -/* Bits for COMP_PREDICT register, this selects the predictor algorithm */ -#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1 -#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2 - -/* Number of bits used for the delay registers */ -#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8 - -/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */ -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2 - - -/* BITS for backend RAW16 and RAW 18 registers */ - -#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2 -#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1 - -#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2 -#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1 - -/* These hsync and vsync values are for HSS simulation only */ -#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL (1<<16) -#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL (1<<17) - -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28 -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0 -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1) - -// SH Backend Register IDs -#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1 -#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9 -#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10 -#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11 -#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12 -#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13 -#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */ - -#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28 - -#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8 - -#endif /* _css_receiver_2400_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h deleted file mode 100644 index 47505f41790c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_DEFS_H_ -#define _HRT_DEFS_H_ - -#ifndef HRTCAT -#define _HRTCAT(m, n) m##n -#define HRTCAT(m, n) _HRTCAT(m, n) -#endif - -#ifndef HRTSTR -#define _HRTSTR(x) #x -#define HRTSTR(x) _HRTSTR(x) -#endif - -#ifndef HRTMIN -#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b)) -#endif - -#ifndef HRTMAX -#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b)) -#endif - -#endif /* _HRT_DEFS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h deleted file mode 100644 index d184a8b313c9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _dma_v2_defs_h -#define _dma_v2_defs_h - -#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels -#define _DMA_V2_CONNECTIONS_ID Connections -#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths -#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth -#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat -#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass -#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst -#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept -#define _DMA_V2_DEV_SRMD_ID DevSRMD -#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters -#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth -#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth -#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat -#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass -#define _DMA_V2_NO_PACK_ID has_no_pack - -#define _DMA_V2_REG_ALIGN 4 -#define _DMA_V2_REG_ADDR_BITS 2 - -/* Command word */ -#define _DMA_V2_CMD_IDX 0 -#define _DMA_V2_CMD_BITS 6 -#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS) -#define _DMA_V2_CHANNEL_BITS 5 - -/* The command to set a parameter contains the PARAM field next */ -#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS) -#define _DMA_V2_PARAM_BITS 4 - -/* Commands to read, write or init specific blocks contain these - three values */ -#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS) -#define _DMA_V2_SPEC_DEV_A_XB_BITS 8 -#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS) -#define _DMA_V2_SPEC_DEV_B_XB_BITS 8 -#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS) -#define _DMA_V2_SPEC_YB_BITS (32-_DMA_V2_SPEC_DEV_B_XB_BITS-_DMA_V2_SPEC_DEV_A_XB_BITS-_DMA_V2_CMD_BITS-_DMA_V2_CHANNEL_BITS) - -/* */ -#define _DMA_V2_CMD_CTRL_IDX 4 -#define _DMA_V2_CMD_CTRL_BITS 4 - -/* Packing setup word */ -#define _DMA_V2_CONNECTION_IDX 0 -#define _DMA_V2_CONNECTION_BITS 4 -#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS) -#define _DMA_V2_EXTENSION_BITS 1 - -/* Elements packing word */ -#define _DMA_V2_ELEMENTS_IDX 0 -#define _DMA_V2_ELEMENTS_BITS 8 -#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS) -#define _DMA_V2_LEFT_CROPPING_BITS 8 - -#define _DMA_V2_WIDTH_IDX 0 -#define _DMA_V2_WIDTH_BITS 16 - -#define _DMA_V2_HEIGHT_IDX 0 -#define _DMA_V2_HEIGHT_BITS 16 - -#define _DMA_V2_STRIDE_IDX 0 -#define _DMA_V2_STRIDE_BITS 32 - -/* Command IDs */ -#define _DMA_V2_MOVE_B2A_COMMAND 0 -#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1 -#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2 -#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3 -#define _DMA_V2_MOVE_A2B_COMMAND 4 -#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5 -#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6 -#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7 -#define _DMA_V2_INIT_A_COMMAND 8 -#define _DMA_V2_INIT_A_BLOCK_COMMAND 9 -#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10 -#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11 -#define _DMA_V2_INIT_B_COMMAND 12 -#define _DMA_V2_INIT_B_BLOCK_COMMAND 13 -#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14 -#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15 -#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32 -#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33 -#define _DMA_V2_SET_CRUN_COMMAND 62 - -/* Channel Parameter IDs */ -#define _DMA_V2_PACKING_SETUP_PARAM 0 -#define _DMA_V2_STRIDE_A_PARAM 1 -#define _DMA_V2_ELEM_CROPPING_A_PARAM 2 -#define _DMA_V2_WIDTH_A_PARAM 3 -#define _DMA_V2_STRIDE_B_PARAM 4 -#define _DMA_V2_ELEM_CROPPING_B_PARAM 5 -#define _DMA_V2_WIDTH_B_PARAM 6 -#define _DMA_V2_HEIGHT_PARAM 7 -#define _DMA_V2_QUEUED_CMDS 8 - -/* Parameter Constants */ -#define _DMA_V2_ZERO_EXTEND 0 -#define _DMA_V2_SIGN_EXTEND 1 - - /* SLAVE address map */ -#define _DMA_V2_SEL_FSM_CMD 0 -#define _DMA_V2_SEL_CH_REG 1 -#define _DMA_V2_SEL_CONN_GROUP 2 -#define _DMA_V2_SEL_DEV_INTERF 3 - -#define _DMA_V2_ADDR_SEL_COMP_IDX 12 -#define _DMA_V2_ADDR_SEL_COMP_BITS 4 -#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2 -#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6 -#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS+_DMA_V2_ADDR_SEL_CH_REG_IDX) -#define _DMA_V2_ADDR_SEL_PARAM_BITS 4 - -#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2 -#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6 -#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX) -#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4 - -#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2 -#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6 -#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX+_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS) -#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4 - -#define _DMA_V2_FSM_GROUP_CMD_IDX 0 -#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1 -#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2 -#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4 -#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5 -#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6 -#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7 - -#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15 - -#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3 - -#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4 - -#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4 - -#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0 -#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1 -#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2 -#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3 -#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4 -#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5 - -#endif /* _dma_v2_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h deleted file mode 100644 index 77722d205701..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef HRT_GDC_v2_defs_h_ -#define HRT_GDC_v2_defs_h_ - -#define HRT_GDC_IS_V2 - -#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */ -#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */ - -#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */ -#define HRT_GDC_BLI_COEF_ONE (1 << HRT_GDC_BLI_FRAC_BITS) - -#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */ -#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS-2)) /* We represent signed 10 bit coefficients. */ - /* The supported range is [-256, .., +256] */ - /* in 14-bit signed notation, */ - /* We need all ten bits (MSB must be zero). */ - /* -s is inserted to solve this issue, and */ - /* therefore "1" is equal to +256. */ -#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1) - -#define HRT_GDC_LUT_BYTES (HRT_GDC_N*4*2) /* 1024 addresses, 4 coefficients per address, */ - /* 2 bytes per coefficient */ - -#define _HRT_GDC_REG_ALIGN 4 - - // 31 30 29 25 24 0 - // |-----|---|--------|------------------------| - // | CMD | C | Reg_ID | Value | - - - // There are just two commands possible for the GDC block: - // 1 - Configure reg - // 0 - Data token - - // C - Reserved bit - // Used in protocol to indicate whether it is C-run or other type of runs - // In case of C-run, this bit has a value of 1, for all the other runs, it is 0. - - // Reg_ID - Address of the register to be configured - - // Value - Value to store to the addressed register, maximum of 24 bits - - // Configure reg command is not followed by any other token. - // The address of the register and the data to be filled in is contained in the same token - - // When the first data token is received, it must be: - // 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or, - // 2. P0'X (device configured in one of the tetragon modes) - // After the first data token is received, pre-defined number of tokens with the following meaning follow: - // 1. two tokens: SRC address ; DST address - // 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address - -#define HRT_GDC_CONFIG_CMD 1 -#define HRT_GDC_DATA_CMD 0 - - -#define HRT_GDC_CMD_POS 31 -#define HRT_GDC_CMD_BITS 1 -#define HRT_GDC_CRUN_POS 30 -#define HRT_GDC_REG_ID_POS 25 -#define HRT_GDC_REG_ID_BITS 5 -#define HRT_GDC_DATA_POS 0 -#define HRT_GDC_DATA_BITS 25 - -#define HRT_GDC_FRYIPXFRX_BITS 26 -#define HRT_GDC_P0X_BITS 23 - - -#define HRT_GDC_MAX_OXDIM (8192-64) -#define HRT_GDC_MAX_OYDIM 4095 -#define HRT_GDC_MAX_IXDIM (8192-64) -#define HRT_GDC_MAX_IYDIM 4095 -#define HRT_GDC_MAX_DS_FAC 16 -#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC*HRT_GDC_N - 1) -#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX - - -/* GDC lookup tables entries are 10 bits values, but they're - stored 2 by 2 as 32 bit values, yielding 16 bits per entry. - A GDC lookup table contains 64 * 4 elements */ - -#define HRT_GDC_PERF_1_1_pix 0 -#define HRT_GDC_PERF_2_1_pix 1 -#define HRT_GDC_PERF_1_2_pix 2 -#define HRT_GDC_PERF_2_2_pix 3 - -#define HRT_GDC_NND_MODE 0 -#define HRT_GDC_BLI_MODE 1 -#define HRT_GDC_BCI_MODE 2 -#define HRT_GDC_LUT_MODE 3 - -#define HRT_GDC_SCAN_STB 0 -#define HRT_GDC_SCAN_STR 1 - -#define HRT_GDC_MODE_SCALING 0 -#define HRT_GDC_MODE_TETRAGON 1 - -#define HRT_GDC_LUT_COEFF_OFFSET 16 -#define HRT_GDC_FRY_BIT_OFFSET 16 -// FRYIPXFRX is the only register where we store two values in one field, -// to save one token in the scaling protocol. -// Like this, we have three tokens in the scaling protocol, -// Otherwise, we would have had four. -// The register bit-map is: -// 31 26 25 16 15 10 9 0 -// |------|----------|------|----------| -// | XXXX | FRY | IPX | FRX | - - -#define HRT_GDC_CE_FSM0_POS 0 -#define HRT_GDC_CE_FSM0_LEN 2 -#define HRT_GDC_CE_OPY_POS 2 -#define HRT_GDC_CE_OPY_LEN 14 -#define HRT_GDC_CE_OPX_POS 16 -#define HRT_GDC_CE_OPX_LEN 16 -// CHK_ENGINE register bit-map: -// 31 16 15 2 1 0 -// |----------------|-----------|----| -// | OPX | OPY |FSM0| -// However, for the time being at least, -// this implementation is meaningless in hss model, -// So, we just return 0 - - -#define HRT_GDC_CHK_ENGINE_IDX 0 -#define HRT_GDC_WOIX_IDX 1 -#define HRT_GDC_WOIY_IDX 2 -#define HRT_GDC_BPP_IDX 3 -#define HRT_GDC_FRYIPXFRX_IDX 4 -#define HRT_GDC_OXDIM_IDX 5 -#define HRT_GDC_OYDIM_IDX 6 -#define HRT_GDC_SRC_ADDR_IDX 7 -#define HRT_GDC_SRC_END_ADDR_IDX 8 -#define HRT_GDC_SRC_WRAP_ADDR_IDX 9 -#define HRT_GDC_SRC_STRIDE_IDX 10 -#define HRT_GDC_DST_ADDR_IDX 11 -#define HRT_GDC_DST_STRIDE_IDX 12 -#define HRT_GDC_DX_IDX 13 -#define HRT_GDC_DY_IDX 14 -#define HRT_GDC_P0X_IDX 15 -#define HRT_GDC_P0Y_IDX 16 -#define HRT_GDC_P1X_IDX 17 -#define HRT_GDC_P1Y_IDX 18 -#define HRT_GDC_P2X_IDX 19 -#define HRT_GDC_P2Y_IDX 20 -#define HRT_GDC_P3X_IDX 21 -#define HRT_GDC_P3Y_IDX 22 -#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc -#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT -#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right) -#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon - -#define HRT_GDC_LUT_IDX 32 - - -#endif /* HRT_GDC_v2_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h deleted file mode 100644 index 3082e2f5e014..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _gp_timer_defs_h -#define _gp_timer_defs_h - -#define _HRT_GP_TIMER_REG_ALIGN 4 - -#define HIVE_GP_TIMER_RESET_REG_IDX 0 -#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1 -#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer) -#define HIVE_GP_TIMER_VALUE_REG_IDX(timer,timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer) -#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer,timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer) -#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer,timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer) -#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq,timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq) -#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq) -#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq) - -#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0 -#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1 -#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2 -#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3 -#define HIVE_GP_TIMER_COUNT_TYPES 4 - -#endif /* _gp_timer_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h deleted file mode 100644 index a807d4c99041..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _gpio_block_defs_h_ -#define _gpio_block_defs_h_ - -#define _HRT_GPIO_BLOCK_REG_ALIGN 4 - -/* R/W registers */ -#define _gpio_block_reg_do_e 0 -#define _gpio_block_reg_do_select 1 -#define _gpio_block_reg_do_0 2 -#define _gpio_block_reg_do_1 3 -#define _gpio_block_reg_do_pwm_cnt_0 4 -#define _gpio_block_reg_do_pwm_cnt_1 5 -#define _gpio_block_reg_do_pwm_cnt_2 6 -#define _gpio_block_reg_do_pwm_cnt_3 7 -#define _gpio_block_reg_do_pwm_main_cnt 8 -#define _gpio_block_reg_do_pwm_enable 9 -#define _gpio_block_reg_di_debounce_sel 10 -#define _gpio_block_reg_di_debounce_cnt_0 11 -#define _gpio_block_reg_di_debounce_cnt_1 12 -#define _gpio_block_reg_di_debounce_cnt_2 13 -#define _gpio_block_reg_di_debounce_cnt_3 14 -#define _gpio_block_reg_di_active_level 15 - - -/* read-only registers */ -#define _gpio_block_reg_di 16 - -#endif /* _gpio_block_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h deleted file mode 100644 index 39584996092e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h +++ /dev/null @@ -1,416 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _hive_isp_css_defs_h__ -#define _hive_isp_css_defs_h__ - -#define HIVE_ISP_CSS_IS_2400B0_SYSTEM - -#define HIVE_ISP_CTRL_DATA_WIDTH 32 -#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32 -#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1 -#define HIVE_ISP_DDR_ADDRESS_WIDTH 36 - -#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */ -#define HIVE_ISP_NUM_GPIO_PINS 12 - -/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer - and in the DMA parameter list */ -#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}} -#define HIVE_ISP_DDR_WORD_BITS 256 -#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS/8) -#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024) /* hss only */ -#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024) /* RTL only */ -#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8) -#define HIVE_ISP_PAGE_SHIFT 12 -#define HIVE_ISP_PAGE_SIZE (1<_defs.h - */ -typedef enum hrt_isp_css_irq { - hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID , - hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID , - hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID , - hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID , - hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID , - hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID , - hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID , - hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID , - hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID , - hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID , - hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID , - hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID , - hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID , - hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID , - hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID , - hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID , - hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID , - hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID , - hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID , - hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID , -#ifdef _HIVE_ISP_CSS_2401_SYSTEM - hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_IS2401_BIT_ID , -#else - hrt_isp_css_irq_isp_pmem_error = HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID , -#endif - hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID , - hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID , - hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID , - hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID , - hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID , - hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID , - hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID , - hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID , - hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID , - hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID , - hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID , - /* this must (obviously) be the last on in the enum */ - hrt_isp_css_irq_num_irqs -} hrt_isp_css_irq_t; - -typedef enum hrt_isp_css_irq_status { - hrt_isp_css_irq_status_error, - hrt_isp_css_irq_status_more_irqs, - hrt_isp_css_irq_status_success -} hrt_isp_css_irq_status_t; - -#endif /* _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h deleted file mode 100644 index b4211a0c631a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _hive_isp_css_streaming_to_mipi_types_hrt_h_ -#define _hive_isp_css_streaming_to_mipi_types_hrt_h_ - -#include - -#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS)-1) -#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS)-1) - -#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS) -#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH) - -#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h deleted file mode 100644 index 58b0e6effbd0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_HIVE_TYPES_H -#define _HRT_HIVE_TYPES_H - -#include "version.h" -#include "defs.h" - -#ifndef HRTCAT3 -#define _HRTCAT3(m,n,o) m##n##o -#define HRTCAT3(m,n,o) _HRTCAT3(m,n,o) -#endif - -#ifndef HRTCAT4 -#define _HRTCAT4(m,n,o,p) m##n##o##p -#define HRTCAT4(m,n,o,p) _HRTCAT4(m,n,o,p) -#endif - -#ifndef HRTMIN -#define HRTMIN(a,b) (((a)<(b))?(a):(b)) -#endif - -#ifndef HRTMAX -#define HRTMAX(a,b) (((a)>(b))?(a):(b)) -#endif - -/* boolean data type */ -typedef unsigned int hive_bool; -#define hive_false 0 -#define hive_true 1 - -typedef char hive_int8; -typedef short hive_int16; -typedef int hive_int32; -typedef long long hive_int64; - -typedef unsigned char hive_uint8; -typedef unsigned short hive_uint16; -typedef unsigned int hive_uint32; -typedef unsigned long long hive_uint64; - -/* by default assume 32 bit master port (both data and address) */ -#ifndef HRT_DATA_WIDTH -#define HRT_DATA_WIDTH 32 -#endif -#ifndef HRT_ADDRESS_WIDTH -#define HRT_ADDRESS_WIDTH 32 -#endif - -#define HRT_DATA_BYTES (HRT_DATA_WIDTH/8) -#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH/8) - -#if HRT_DATA_WIDTH == 64 -typedef hive_uint64 hrt_data; -#elif HRT_DATA_WIDTH == 32 -typedef hive_uint32 hrt_data; -#else -#error data width not supported -#endif - -#if HRT_ADDRESS_WIDTH == 64 -typedef hive_uint64 hrt_address; -#elif HRT_ADDRESS_WIDTH == 32 -typedef hive_uint32 hrt_address; -#else -#error adddres width not supported -#endif - -/* The SP side representation of an HMM virtual address */ -typedef hive_uint32 hrt_vaddress; - -/* use 64 bit addresses in simulation, where possible */ -typedef hive_uint64 hive_sim_address; - -/* below is for csim, not for hrt, rename and move this elsewhere */ - -typedef unsigned int hive_uint; -typedef hive_uint32 hive_address; -typedef hive_address hive_slave_address; -typedef hive_address hive_mem_address; - -/* MMIO devices */ -typedef hive_uint hive_mmio_id; -typedef hive_mmio_id hive_slave_id; -typedef hive_mmio_id hive_port_id; -typedef hive_mmio_id hive_master_id; -typedef hive_mmio_id hive_mem_id; -typedef hive_mmio_id hive_dev_id; -typedef hive_mmio_id hive_fifo_id; - -typedef hive_uint hive_hier_id; -typedef hive_hier_id hive_device_id; -typedef hive_device_id hive_proc_id; -typedef hive_device_id hive_cell_id; -typedef hive_device_id hive_host_id; -typedef hive_device_id hive_bus_id; -typedef hive_device_id hive_bridge_id; -typedef hive_device_id hive_fifo_adapter_id; -typedef hive_device_id hive_custom_device_id; - -typedef hive_uint hive_slot_id; -typedef hive_uint hive_fu_id; -typedef hive_uint hive_reg_file_id; -typedef hive_uint hive_reg_id; - -/* Streaming devices */ -typedef hive_uint hive_outport_id; -typedef hive_uint hive_inport_id; - -typedef hive_uint hive_msink_id; - -/* HRT specific */ -typedef char* hive_program; -typedef char* hive_function; - -#endif /* _HRT_HIVE_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h deleted file mode 100644 index 7d39e45796ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IF_DEFS_H -#define _IF_DEFS_H - -#define HIVE_IF_FRAME_REQUEST 0xA000 -#define HIVE_IF_LINES_REQUEST 0xB000 -#define HIVE_IF_VECTORS_REQUEST 0xC000 - -#endif /* _IF_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h deleted file mode 100644 index 7766f78cd123..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _if_subsystem_defs_h__ -#define _if_subsystem_defs_h__ - -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8 -#define HIVE_IFMT_GP_REGS_SRST_IDX 9 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10 - -#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11 - -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 - -/* order of the input bits for the ifmt irq controller */ -#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0 -#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1 -#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2 -#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3 -#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4 - -/* order of the input bits for the ifmt Soft reset register */ -#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0 -#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1 -#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2 -#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3 - -/* order of the input bits for the ifmt Soft reset register */ -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3 - -#endif /* _if_subsystem_defs_h__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h deleted file mode 100644 index 87fbf82edb5b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_selector_defs_h -#define _input_selector_defs_h - -#ifndef HIVE_ISP_ISEL_SEL_BITS -#define HIVE_ISP_ISEL_SEL_BITS 2 -#endif - -#ifndef HIVE_ISP_CH_ID_BITS -#define HIVE_ISP_CH_ID_BITS 2 -#endif - -#ifndef HIVE_ISP_FMT_TYPE_BITS -#define HIVE_ISP_FMT_TYPE_BITS 5 -#endif - -/* gp_register register id's -- Outputs */ -#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0 -#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1 -#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5 -#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6 -#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7 - -#define HIVE_ISEL_GP_REGS_SOF_IDX 8 -#define HIVE_ISEL_GP_REGS_EOF_IDX 9 -#define HIVE_ISEL_GP_REGS_SOL_IDX 10 -#define HIVE_ISEL_GP_REGS_EOL_IDX 11 - -#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12 -#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13 -#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14 - -#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15 -#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16 -#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17 -#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18 -#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19 -#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20 -#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21 -#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22 -#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23 -#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24 -#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25 -#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26 -#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27 -#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28 - - -#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29 -#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30 -#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31 -#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32 -#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33 -#define HIVE_ISEL_GP_REGS_SRST_IDX 37 - -#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0 -#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1 -#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2 -#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3 - -/* gp_register register id's -- Inputs */ -#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34 -#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35 -#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36 - -/* irq sources isel irq controller */ -#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0 -#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1 -#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2 -#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3 -#define HIVE_ISEL_IRQ_NUM_IRQS 4 - -#endif /* _input_selector_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h deleted file mode 100644 index 20a13c4cdb56..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_switch_2400_defs_h -#define _input_switch_2400_defs_h - -#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id)*2) + ((fmt_type)>=16)) -#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type)%16) * 2) - -#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0 -#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1 -#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2 -#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3 -#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0 -#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1 -#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2 -#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4 - -#endif /* _input_switch_2400_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h deleted file mode 100644 index a7f0ca80bc9b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_system_ctrl_defs_h -#define _input_system_ctrl_defs_h - -#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */ - -/* --------------------------------------------------*/ - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -// Number of registers -#define ISYS_CTRL_NOF_REGS 23 - -// Register id's of MMIO slave accesible registers -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8 -#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11 -#define ISYS_CTRL_INIT_REG_ID 12 -#define ISYS_CTRL_LAST_COMMAND_REG_ID 13 -#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14 -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15 -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16 -#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22 - - -/* register reset value */ -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3 -#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define ISYS_CTRL_INIT_REG_RSTVAL 0 -#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0 - -/* register width value */ -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9 -#define ISYS_CTRL_INIT_REG_WIDTH 3 -#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */ -#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32 -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1 - -/* bit definitions */ - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ - -/* -InpSysCaptFramesAcq 1/0 [3:0] - 'b0000 -[7:4] - CaptPortId, - CaptA-'b0000 - CaptB-'b0001 - CaptC-'b0010 -[31:16] - NOF_frames -InpSysCaptFrameExt 2/0 [3:0] - 'b0001' -[7:4] - CaptPortId, - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - - 2/1 [31:0] - external capture address -InpSysAcqFrame 2/0 [3:0] - 'b0010, -[31:4] - NOF_ext_mem_words - 2/1 [31:0] - external memory read start address -InpSysOverruleON 1/0 [3:0] - 'b0011, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysOverruleOFF 1/0 [3:0] - 'b0100, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysOverruleCmd 2/0 [3:0] - 'b0101, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - - 2/1 [31:0] - command token value for port opid - - -acknowledge tokens: - -InpSysAckCFA 1/0 [3:0] - 'b0000 - [7:4] - CaptPortId, - CaptA-'b0000 - CaptB- 'b0001 - CaptC-'b0010 - [31:16] - NOF_frames -InpSysAckCFE 1/0 [3:0] - 'b0001' -[7:4] - CaptPortId, - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - -InpSysAckAF 1/0 [3:0] - 'b0010 -InpSysAckOverruleON 1/0 [3:0] - 'b0011, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysAckOverruleOFF 1/0 [3:0] - 'b0100, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysAckOverrule 2/0 [3:0] - 'b0101, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - - 2/1 [31:0] - acknowledge token value from port opid - - - -*/ - - -/* Command and acknowledge tokens IDs */ -#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */ -#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */ -#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */ -#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */ -#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */ -#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */ - -#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0 -#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1 -#define ISYS_CTRL_ACK_AF_TOKEN_ID 2 -#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3 -#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4 -#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5 -#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6 - -#define ISYS_CTRL_TOKEN_ID_MSB 3 -#define ISYS_CTRL_TOKEN_ID_LSB 0 -#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7 -#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4 -#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31 -#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16 -#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31 -#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8 - -#define ISYS_CTRL_TOKEN_ID_IDX 0 -#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1) -#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS) -#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB +1) -#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB -#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1) -#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB -#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1) - -#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */ -#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */ -#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */ -#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */ - -#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */ -#define ISYS_CTRL_NO_DMA_ACK 0 -#define ISYS_CTRL_NO_CAPT_ACK 16 - -#endif /* _input_system_ctrl_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h deleted file mode 100644 index ae62163034a6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_system_defs_h -#define _input_system_defs_h - -/* csi controller modes */ -#define HIVE_CSI_CONFIG_MAIN 0 -#define HIVE_CSI_CONFIG_STEREO1 4 -#define HIVE_CSI_CONFIG_STEREO2 8 - -/* general purpose register IDs */ - -/* Stream Multicast select modes */ -#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0 -#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1 -#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2 - -/* Stream Mux select modes */ -#define HIVE_ISYS_GPREG_MUX_IDX 3 - -/* streaming monitor status and control */ -#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4 -#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5 -#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6 -#define HIVE_ISYS_GPREG_SRST_IDX 7 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8 -#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9 -#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10 - -/* Bit numbers of the soft reset register */ -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0 -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1 -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5 -#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6 -#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7 -#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8 -#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9 -/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */ -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */ -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14 -/* -- */ -#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15 -#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16 -#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17 -#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv -#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23 -#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24 - -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5 - -/* streaming monitor port id's */ -#define HIVE_ISYS_STR_MON_PORT_CAPA 0 -#define HIVE_ISYS_STR_MON_PORT_CAPB 1 -#define HIVE_ISYS_STR_MON_PORT_CAPC 2 -#define HIVE_ISYS_STR_MON_PORT_ACQ 3 -#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4 -#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5 -#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6 -#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7 -#define HIVE_ISYS_STR_MON_PORT_PIXA 8 -#define HIVE_ISYS_STR_MON_PORT_PIXB 9 - -/* interrupt bit ID's */ -#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0 -#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1 -#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2 -#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3 -#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4 -#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5 -#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6 -#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/ -#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8 -#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/ -#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10 -#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/ -#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12 -/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/ -#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13 -#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14 -#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15 -#define HIVE_ISYS_IRQ_CIO2AHB 16 -#define HIVE_ISYS_IRQ_DMA_BIT_ID 17 -#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18 -#define HIVE_ISYS_IRQ_NUM_BITS 19 - -/* DMA */ -#define HIVE_ISYS_DMA_CHANNEL 0 -#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0 -#define HIVE_ISYS_DMA_HEIGHT 1 -#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */ -#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */ -#define HIVE_ISYS_DMA_CROP 0 /* no cropping */ -#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */ - -#endif /* _input_system_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h deleted file mode 100644 index ec6dd4487158..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _irq_controller_defs_h -#define _irq_controller_defs_h - -#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0 -#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1 -#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2 -#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3 -#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4 -#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5 -#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6 - -#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4 - -#endif /* _irq_controller_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h deleted file mode 100644 index 669060d17c4f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Version */ -#define RTL_VERSION - -/* Cell name */ -#define ISP_CELL_TYPE isp2400_mamoiada -#define ISP_VMEM simd_vmem -#define _HRT_ISP_VMEM isp2400_mamoiada_simd_vmem - -/* instruction pipeline depth */ -#define ISP_BRANCHDELAY 5 - -/* bus */ -#define ISP_BUS_WIDTH 32 -#define ISP_BUS_ADDR_WIDTH 32 -#define ISP_BUS_BURST_SIZE 1 - -/* data-path */ -#define ISP_SCALAR_WIDTH 32 -#define ISP_SLICE_NELEMS 4 -#define ISP_VEC_NELEMS 64 -#define ISP_VEC_ELEMBITS 14 -#define ISP_VEC_ELEM8BITS 16 -#define ISP_CLONE_DATAPATH_IS_16 1 - -/* memories */ -#define ISP_DMEM_DEPTH 4096 -#define ISP_DMEM_BSEL_DOWNSAMPLE 8 -#define ISP_VMEM_DEPTH 3072 -#define ISP_VMEM_BSEL_DOWNSAMPLE 8 -#define ISP_VMEM_ELEMBITS 14 -#define ISP_VMEM_ELEM_PRECISION 14 -#define ISP_VMEM_IS_BAMEM 1 -#if ISP_VMEM_IS_BAMEM - #define ISP_VMEM_BAMEM_MAX_BOI_HEIGHT 8 - #define ISP_VMEM_BAMEM_LATENCY 5 - #define ISP_VMEM_BAMEM_BANK_NARROWING_FACTOR 2 - #define ISP_VMEM_BAMEM_NR_DATA_PLANES 8 - #define ISP_VMEM_BAMEM_NR_CFG_REGISTERS 16 - #define ISP_VMEM_BAMEM_LININT 0 - #define ISP_VMEM_BAMEM_DAP_BITS 3 - #define ISP_VMEM_BAMEM_LININT_FRAC_BITS 0 - #define ISP_VMEM_BAMEM_PID_BITS 3 - #define ISP_VMEM_BAMEM_OFFSET_BITS 19 - #define ISP_VMEM_BAMEM_ADDRESS_BITS 25 - #define ISP_VMEM_BAMEM_RID_BITS 4 - #define ISP_VMEM_BAMEM_TRANSPOSITION 1 - #define ISP_VMEM_BAMEM_VEC_PLUS_SLICE 1 - #define ISP_VMEM_BAMEM_ARB_SERVICE_CYCLE_BITS 1 - #define ISP_VMEM_BAMEM_LUT_ELEMS 16 - #define ISP_VMEM_BAMEM_LUT_ADDR_WIDTH 14 - #define ISP_VMEM_BAMEM_HALF_BLOCK_WRITE 1 - #define ISP_VMEM_BAMEM_SMART_FETCH 1 - #define ISP_VMEM_BAMEM_BIG_ENDIANNESS 0 -#endif /* ISP_VMEM_IS_BAMEM */ -#define ISP_PMEM_DEPTH 2048 -#define ISP_PMEM_WIDTH 640 -#define ISP_VAMEM_ADDRESS_BITS 12 -#define ISP_VAMEM_ELEMBITS 12 -#define ISP_VAMEM_DEPTH 2048 -#define ISP_VAMEM_ALIGNMENT 2 -#define ISP_VA_ADDRESS_WIDTH 896 -#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS -#define ISP_HIST_ADDRESS_BITS 12 -#define ISP_HIST_ALIGNMENT 4 -#define ISP_HIST_COMP_IN_PREC 12 -#define ISP_HIST_DEPTH 1024 -#define ISP_HIST_WIDTH 24 -#define ISP_HIST_COMPONENTS 4 - -/* program counter */ -#define ISP_PC_WIDTH 13 - -/* Template switches */ -#define ISP_SHIELD_INPUT_DMEM 0 -#define ISP_SHIELD_OUTPUT_DMEM 1 -#define ISP_SHIELD_INPUT_VMEM 0 -#define ISP_SHIELD_OUTPUT_VMEM 0 -#define ISP_SHIELD_INPUT_PMEM 1 -#define ISP_SHIELD_OUTPUT_PMEM 1 -#define ISP_SHIELD_INPUT_HIST 1 -#define ISP_SHIELD_OUTPUT_HIST 1 -/* When LUT is select the shielding is always on */ -#define ISP_SHIELD_INPUT_VAMEM 1 -#define ISP_SHIELD_OUTPUT_VAMEM 1 - -#define ISP_HAS_IRQ 1 -#define ISP_HAS_SOFT_RESET 1 -#define ISP_HAS_VEC_DIV 0 -#define ISP_HAS_VFU_W_2O 1 -#define ISP_HAS_DEINT3 1 -#define ISP_HAS_LUT 1 -#define ISP_HAS_HIST 1 -#define ISP_HAS_VALSU 1 -#define ISP_HAS_3rdVALSU 1 -#define ISP_VRF1_HAS_2P 1 - -#define ISP_SRU_GUARDING 1 -#define ISP_VLSU_GUARDING 1 - -#define ISP_VRF_RAM 1 -#define ISP_SRF_RAM 1 - -#define ISP_SPLIT_VMUL_VADD_IS 0 -#define ISP_RFSPLIT_FPGA 0 - -/* RSN or Bus pipelining */ -#define ISP_RSN_PIPE 1 -#define ISP_VSF_BUS_PIPE 0 - -/* extra slave port to vmem */ -#define ISP_IF_VMEM 0 -#define ISP_GDC_VMEM 0 - -/* Streaming ports */ -#define ISP_IF 1 -#define ISP_IF_B 1 -#define ISP_GDC 1 -#define ISP_SCL 1 -#define ISP_GPFIFO 1 -#define ISP_SP 1 - -/* Removing Issue Slot(s) */ -#define ISP_HAS_NOT_SIMD_IS2 0 -#define ISP_HAS_NOT_SIMD_IS3 0 -#define ISP_HAS_NOT_SIMD_IS4 0 -#define ISP_HAS_NOT_SIMD_IS4_VADD 0 -#define ISP_HAS_NOT_SIMD_IS5 0 -#define ISP_HAS_NOT_SIMD_IS6 0 -#define ISP_HAS_NOT_SIMD_IS7 0 -#define ISP_HAS_NOT_SIMD_IS8 0 - -/* ICache */ -#define ISP_ICACHE 1 -#define ISP_ICACHE_ONLY 0 -#define ISP_ICACHE_PREFETCH 1 -#define ISP_ICACHE_INDEX_BITS 8 -#define ISP_ICACHE_SET_BITS 5 -#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1 - -/* Experimental Flags */ -#define ISP_EXP_1 0 -#define ISP_EXP_2 0 -#define ISP_EXP_3 0 -#define ISP_EXP_4 0 -#define ISP_EXP_5 0 -#define ISP_EXP_6 0 - -/* Derived values */ -#define ISP_LOG2_PMEM_WIDTH 10 -#define ISP_VEC_WIDTH 896 -#define ISP_SLICE_WIDTH 56 -#define ISP_VMEM_WIDTH 896 -#define ISP_VMEM_ALIGN 128 -#if ISP_VMEM_IS_BAMEM - #define ISP_VMEM_ALIGN_ELEM 2 -#endif /* ISP_VMEM_IS_BAMEM */ -#define ISP_SIMDLSU 1 -#define ISP_LSU_IMM_BITS 12 - -/* convenient shortcuts for software*/ -#define ISP_NWAY ISP_VEC_NELEMS -#define NBITS ISP_VEC_ELEMBITS - -#define _isp_ceil_div(a,b) (((a)+(b)-1)/(b)) - -#define ISP_VEC_ALIGN ISP_VMEM_ALIGN - -/* HRT specific vector support */ -#define isp2400_mamoiada_vector_alignment ISP_VEC_ALIGN -#define isp2400_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS -#define isp2400_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION -#define isp2400_mamoiada_vector_num_elems ISP_VEC_NELEMS - -/* register file sizes */ -#define ISP_RF0_SIZE 64 -#define ISP_RF1_SIZE 16 -#define ISP_RF2_SIZE 64 -#define ISP_RF3_SIZE 4 -#define ISP_RF4_SIZE 64 -#define ISP_RF5_SIZE 16 -#define ISP_RF6_SIZE 16 -#define ISP_RF7_SIZE 16 -#define ISP_RF8_SIZE 16 -#define ISP_RF9_SIZE 16 -#define ISP_RF10_SIZE 16 -#define ISP_RF11_SIZE 16 -#define ISP_VRF1_SIZE 24 -#define ISP_VRF2_SIZE 24 -#define ISP_VRF3_SIZE 24 -#define ISP_VRF4_SIZE 24 -#define ISP_VRF5_SIZE 24 -#define ISP_VRF6_SIZE 24 -#define ISP_VRF7_SIZE 24 -#define ISP_VRF8_SIZE 24 -#define ISP_SRF1_SIZE 4 -#define ISP_SRF2_SIZE 64 -#define ISP_SRF3_SIZE 64 -#define ISP_SRF4_SIZE 32 -#define ISP_SRF5_SIZE 64 -#define ISP_FRF0_SIZE 16 -#define ISP_FRF1_SIZE 4 -#define ISP_FRF2_SIZE 16 -#define ISP_FRF3_SIZE 4 -#define ISP_FRF4_SIZE 4 -#define ISP_FRF5_SIZE 8 -#define ISP_FRF6_SIZE 4 -/* register file read latency */ -#define ISP_VRF1_READ_LAT 1 -#define ISP_VRF2_READ_LAT 1 -#define ISP_VRF3_READ_LAT 1 -#define ISP_VRF4_READ_LAT 1 -#define ISP_VRF5_READ_LAT 1 -#define ISP_VRF6_READ_LAT 1 -#define ISP_VRF7_READ_LAT 1 -#define ISP_VRF8_READ_LAT 1 -#define ISP_SRF1_READ_LAT 1 -#define ISP_SRF2_READ_LAT 1 -#define ISP_SRF3_READ_LAT 1 -#define ISP_SRF4_READ_LAT 1 -#define ISP_SRF5_READ_LAT 1 -#define ISP_SRF5_READ_LAT 1 -/* immediate sizes */ -#define ISP_IS1_IMM_BITS 14 -#define ISP_IS2_IMM_BITS 13 -#define ISP_IS3_IMM_BITS 14 -#define ISP_IS4_IMM_BITS 14 -#define ISP_IS5_IMM_BITS 9 -#define ISP_IS6_IMM_BITS 16 -#define ISP_IS7_IMM_BITS 9 -#define ISP_IS8_IMM_BITS 16 -#define ISP_IS9_IMM_BITS 11 -/* fifo depths */ -#define ISP_IF_FIFO_DEPTH 0 -#define ISP_IF_B_FIFO_DEPTH 0 -#define ISP_DMA_FIFO_DEPTH 0 -#define ISP_OF_FIFO_DEPTH 0 -#define ISP_GDC_FIFO_DEPTH 0 -#define ISP_SCL_FIFO_DEPTH 0 -#define ISP_GPFIFO_FIFO_DEPTH 0 -#define ISP_SP_FIFO_DEPTH 0 diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h deleted file mode 100644 index e00bc841d0f0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp2400_support_h -#define _isp2400_support_h - -#ifndef ISP2400_VECTOR_TYPES -/* This typedef is to be able to include hive header files - in the host code which is useful in crun */ -typedef char *tmemvectors, *tmemvectoru, *tvector; -#endif - -#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val) -#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val) - -#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem) -#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem) - -#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell)) -#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell)) - -#if ISP_HAS_HIST - #define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram) - #define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell)) -#endif - -#endif /* _isp2400_support_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h deleted file mode 100644 index 593620721627..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp_acquisition_defs_h -#define _isp_acquisition_defs_h - -#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */ -#define _ISP_ACQUISITION_BYTES_PER_ELEM 4 - -/* --------------------------------------------------*/ - -#define NOF_ACQ_IRQS 1 - -/* --------------------------------------------------*/ -/* FSM */ -/* --------------------------------------------------*/ -#define MEM2STREAM_FSM_STATE_BITS 2 -#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2 - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -#define NOF_ACQ_REGS 12 - -// Register id's of MMIO slave accesible registers -#define ACQ_START_ADDR_REG_ID 0 -#define ACQ_MEM_REGION_SIZE_REG_ID 1 -#define ACQ_NUM_MEM_REGIONS_REG_ID 2 -#define ACQ_INIT_REG_ID 3 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4 -#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5 -#define ACQ_LAST_COMMAND_REG_ID 6 -#define ACQ_NEXT_COMMAND_REG_ID 7 -#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8 -#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9 -#define ACQ_FSM_STATE_INFO_REG_ID 10 -#define ACQ_INT_CNTR_INFO_REG_ID 11 - -// Register width -#define ACQ_START_ADDR_REG_WIDTH 9 -#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9 -#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9 -#define ACQ_INIT_REG_WIDTH 3 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32 -#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32 -#define ACQ_LAST_COMMAND_REG_WIDTH 32 -#define ACQ_NEXT_COMMAND_REG_WIDTH 32 -#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS *3)) -#define ACQ_INT_CNTR_INFO_REG_WIDTH 32 - -/* register reset value */ -#define ACQ_START_ADDR_REG_RSTVAL 0 -#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128 -#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define ACQ_INIT_REG_RSTVAL 0 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0 -#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0 -#define ACQ_LAST_COMMAND_REG_RSTVAL 0 -#define ACQ_NEXT_COMMAND_REG_RSTVAL 0 -#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0 -#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0 -#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0 -#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0 - -/* bit definitions */ -#define ACQ_INIT_RST_REG_BIT 0 -#define ACQ_INIT_RESYNC_BIT 2 -#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT -#define ACQ_INIT_RST_BITS 1 -#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT -#define ACQ_INIT_RESYNC_BITS 1 - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ -#define ACQ_TOKEN_ID_LSB 0 -#define ACQ_TOKEN_ID_MSB 3 -#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4 -#define ACQ_TOKEN_ID_IDX 0 -#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH -#define ACQ_INIT_CMD_INIT_IDX 4 -#define ACQ_INIT_CMD_INIT_BITS 3 -#define ACQ_CMD_START_ADDR_IDX 4 -#define ACQ_CMD_START_ADDR_BITS 9 -#define ACQ_CMD_NOFWORDS_IDX 13 -#define ACQ_CMD_NOFWORDS_BITS 9 -#define ACQ_MEM_REGION_ID_IDX 22 -#define ACQ_MEM_REGION_ID_BITS 9 -#define ACQ_PACKET_LENGTH_TOKEN_MSB 21 -#define ACQ_PACKET_LENGTH_TOKEN_LSB 13 -#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9 -#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4 -#define ACQ_PACKET_CH_ID_TOKEN_MSB 11 -#define ACQ_PACKET_CH_ID_TOKEN_LSB 10 -#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */ -#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */ - - -/* Command tokens IDs */ -#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b -#define ACQ_READ_REGION_TOKEN_ID 1 //0001b -#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b -#define ACQ_INIT_TOKEN_ID 8 //1000b - -/* Acknowledge token IDs */ -#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b -#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b -#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b -#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b -#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b - -#define ACQ_TOKEN_MEMREGIONID_MSB 30 -#define ACQ_TOKEN_MEMREGIONID_LSB 22 -#define ACQ_TOKEN_NOFWORDS_MSB 21 -#define ACQ_TOKEN_NOFWORDS_LSB 13 -#define ACQ_TOKEN_STARTADDR_MSB 12 -#define ACQ_TOKEN_STARTADDR_LSB 4 - - -/* --------------------------------------------------*/ -/* MIPI */ -/* --------------------------------------------------*/ - -#define WORD_COUNT_WIDTH 16 -#define PKT_CODE_WIDTH 6 -#define CHN_NO_WIDTH 2 -#define ERROR_INFO_WIDTH 8 - -#define LONG_PKTCODE_MAX 63 -#define LONG_PKTCODE_MIN 16 -#define SHORT_PKTCODE_MAX 15 - -#define EOF_CODE 1 - -/* --------------------------------------------------*/ -/* Packet Info */ -/* --------------------------------------------------*/ -#define ACQ_START_OF_FRAME 0 -#define ACQ_END_OF_FRAME 1 -#define ACQ_START_OF_LINE 2 -#define ACQ_END_OF_LINE 3 -#define ACQ_LINE_PAYLOAD 4 -#define ACQ_GEN_SH_PKT 5 - - -/* bit definition */ -#define ACQ_PKT_TYPE_IDX 16 -#define ACQ_PKT_TYPE_BITS 6 -#define ACQ_PKT_SOP_IDX 32 -#define ACQ_WORD_CNT_IDX 0 -#define ACQ_WORD_CNT_BITS 16 -#define ACQ_PKT_INFO_IDX 16 -#define ACQ_PKT_INFO_BITS 8 -#define ACQ_HEADER_DATA_IDX 0 -#define ACQ_HEADER_DATA_BITS 16 -#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX -#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS -#define ACQ_ACK_NOFWORDS_IDX 13 -#define ACQ_ACK_NOFWORDS_BITS 9 -#define ACQ_ACK_PKT_LEN_IDX 4 -#define ACQ_ACK_PKT_LEN_BITS 16 - - -/* --------------------------------------------------*/ -/* Packet Data Type */ -/* --------------------------------------------------*/ - - -#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */ -#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */ -#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */ -#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */ -#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */ -#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */ -#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */ -#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */ -#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */ -#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */ -#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */ -#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */ -#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */ -#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */ -#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */ -#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */ -#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */ -#define ACQ_SOF_DATA 0 /* 00 0000 frame start */ -#define ACQ_EOF_DATA 1 /* 00 0001 frame end */ -#define ACQ_SOL_DATA 2 /* 00 0010 line start */ -#define ACQ_EOL_DATA 3 /* 00 0011 line end */ -#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */ -#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */ -#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */ -#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */ -#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */ -#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */ -#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */ -#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */ -#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -#define ACQ_RESERVED_DATA_TYPE_MIN 56 -#define ACQ_RESERVED_DATA_TYPE_MAX 63 -#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19 -#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23 -#define ACQ_YUV_RESERVED_DATA_TYPE 27 -#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37 -#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39 -#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46 -#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47 - -/* --------------------------------------------------*/ - -#endif /* _isp_acquisition_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h deleted file mode 100644 index 0a249ce3e589..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp_capture_defs_h -#define _isp_capture_defs_h - -#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */ -#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */ -#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM/8 ) -#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */ -#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM - -//#define CAPT_RCV_ACK 1 -//#define CAPT_WRT_ACK 2 -//#define CAPT_IRQ_ACK 3 - -/* --------------------------------------------------*/ - -#define NOF_IRQS 2 - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -// Number of registers -#define CAPT_NOF_REGS 16 - -// Register id's of MMIO slave accesible registers -#define CAPT_START_MODE_REG_ID 0 -#define CAPT_START_ADDR_REG_ID 1 -#define CAPT_MEM_REGION_SIZE_REG_ID 2 -#define CAPT_NUM_MEM_REGIONS_REG_ID 3 -#define CAPT_INIT_REG_ID 4 -#define CAPT_START_REG_ID 5 -#define CAPT_STOP_REG_ID 6 - -#define CAPT_PACKET_LENGTH_REG_ID 7 -#define CAPT_RECEIVED_LENGTH_REG_ID 8 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9 -#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10 -#define CAPT_LAST_COMMAND_REG_ID 11 -#define CAPT_NEXT_COMMAND_REG_ID 12 -#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13 -#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14 -#define CAPT_FSM_STATE_INFO_REG_ID 15 - -// Register width -#define CAPT_START_MODE_REG_WIDTH 1 -#define CAPT_START_ADDR_REG_WIDTH 9 -#define CAPT_MEM_REGION_SIZE_REG_WIDTH 9 -#define CAPT_NUM_MEM_REGIONS_REG_WIDTH 9 -#define CAPT_INIT_REG_WIDTH (18 + 4) - -#define CAPT_START_REG_WIDTH 1 -#define CAPT_STOP_REG_WIDTH 1 - -/* --------------------------------------------------*/ -/* FSM */ -/* --------------------------------------------------*/ -#define CAPT_WRITE2MEM_FSM_STATE_BITS 2 -#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3 - - -#define CAPT_PACKET_LENGTH_REG_WIDTH 17 -#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32 -#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32 -#define CAPT_LAST_COMMAND_REG_WIDTH 32 -/* #define CAPT_NEXT_COMMAND_REG_WIDTH 32 */ -#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3)) - -#define CAPT_INIT_RESTART_MEM_ADDR_WIDTH 9 -#define CAPT_INIT_RESTART_MEM_REGION_WIDTH 9 - -/* register reset value */ -#define CAPT_START_MODE_REG_RSTVAL 0 -#define CAPT_START_ADDR_REG_RSTVAL 0 -#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128 -#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define CAPT_INIT_REG_RSTVAL 0 - -#define CAPT_START_REG_RSTVAL 0 -#define CAPT_STOP_REG_RSTVAL 0 - -#define CAPT_PACKET_LENGTH_REG_RSTVAL 0 -#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0 -#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0 -#define CAPT_LAST_COMMAND_REG_RSTVAL 0 -#define CAPT_NEXT_COMMAND_REG_RSTVAL 0 -#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0 -#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0 -#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0 - -/* bit definitions */ -#define CAPT_INIT_RST_REG_BIT 0 -#define CAPT_INIT_FLUSH_BIT 1 -#define CAPT_INIT_RESYNC_BIT 2 -#define CAPT_INIT_RESTART_BIT 3 -#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4 -#define CAPT_INIT_RESTART_MEM_ADDR_MSB 12 -#define CAPT_INIT_RESTART_MEM_REGION_LSB 13 -#define CAPT_INIT_RESTART_MEM_REGION_MSB 21 - - -#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT -#define CAPT_INIT_RST_REG_BITS 1 -#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT -#define CAPT_INIT_FLUSH_BITS 1 -#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT -#define CAPT_INIT_RESYNC_BITS 1 -#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT -#define CAPT_INIT_RESTART_BITS 1 -#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB -#define CAPT_INIT_RESTART_MEM_ADDR_BITS (CAPT_INIT_RESTART_MEM_ADDR_MSB - CAPT_INIT_RESTART_MEM_ADDR_LSB + 1) -#define CAPT_INIT_RESTART_MEM_REGION_IDX CAPT_INIT_RESTART_MEM_REGION_LSB -#define CAPT_INIT_RESTART_MEM_REGION_BITS (CAPT_INIT_RESTART_MEM_REGION_MSB - CAPT_INIT_RESTART_MEM_REGION_LSB + 1) - - - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ -#define CAPT_TOKEN_ID_LSB 0 -#define CAPT_TOKEN_ID_MSB 3 -#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */ - -/* Command tokens IDs */ -#define CAPT_START_TOKEN_ID 0 /* 0000b */ -#define CAPT_STOP_TOKEN_ID 1 /* 0001b */ -#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */ -#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */ -#define CAPT_INIT_TOKEN_ID 8 /* 1000b */ - -#define CAPT_START_TOKEN_BIT 0 -#define CAPT_STOP_TOKEN_BIT 0 -#define CAPT_FREEZE_TOKEN_BIT 0 -#define CAPT_RESUME_TOKEN_BIT 0 -#define CAPT_INIT_TOKEN_BIT 0 - -/* Acknowledge token IDs */ -#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */ -#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */ -#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */ -#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */ -#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */ -#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */ -#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */ -#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */ - -#define CAPT_PACKET_LENGTH_TOKEN_MSB 19 -#define CAPT_PACKET_LENGTH_TOKEN_LSB 4 -#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20 -#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4 -#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25 -#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20 -#define CAPT_PACKET_CH_ID_TOKEN_MSB 27 -#define CAPT_PACKET_CH_ID_TOKEN_LSB 26 -#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29 -#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21 - -/* bit definition */ -#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB -#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) -#define CAPT_SOP_IDX 32 -#define CAPT_SOP_BITS 1 -#define CAPT_PKT_INFO_IDX 16 -#define CAPT_PKT_INFO_BITS 8 -#define CAPT_PKT_TYPE_IDX 0 -#define CAPT_PKT_TYPE_BITS 6 -#define CAPT_HEADER_DATA_IDX 0 -#define CAPT_HEADER_DATA_BITS 16 -#define CAPT_PKT_DATA_IDX 0 -#define CAPT_PKT_DATA_BITS 32 -#define CAPT_WORD_CNT_IDX 0 -#define CAPT_WORD_CNT_BITS 16 -#define CAPT_ACK_TOKEN_ID_IDX 0 -#define CAPT_ACK_TOKEN_ID_BITS 4 -//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB -//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1) -//#define CAPT_ACK_PKT_INFO_IDX 20 -//#define CAPT_ACK_PKT_INFO_BITS 8 -//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */ -//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */ -#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB -#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1) -#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB -#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1) -#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB -#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1) -#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB -#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1) -#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB -#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1) -#define CAPT_INIT_TOKEN_INIT_IDX 4 -#define CAPT_INIT_TOKEN_INIT_BITS 22 - - -/* --------------------------------------------------*/ -/* MIPI */ -/* --------------------------------------------------*/ - -#define CAPT_WORD_COUNT_WIDTH 16 -#define CAPT_PKT_CODE_WIDTH 6 -#define CAPT_CHN_NO_WIDTH 2 -#define CAPT_ERROR_INFO_WIDTH 8 - -#define LONG_PKTCODE_MAX 63 -#define LONG_PKTCODE_MIN 16 -#define SHORT_PKTCODE_MAX 15 - - -/* --------------------------------------------------*/ -/* Packet Info */ -/* --------------------------------------------------*/ -#define CAPT_START_OF_FRAME 0 -#define CAPT_END_OF_FRAME 1 -#define CAPT_START_OF_LINE 2 -#define CAPT_END_OF_LINE 3 -#define CAPT_LINE_PAYLOAD 4 -#define CAPT_GEN_SH_PKT 5 - - -/* --------------------------------------------------*/ -/* Packet Data Type */ -/* --------------------------------------------------*/ - -#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */ -#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */ -#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */ -#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */ -#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */ -#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */ -#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */ -#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */ -#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */ -#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */ -#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */ -#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */ -#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */ -#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */ -#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */ -#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */ -#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */ -#define CAPT_SOF_DATA 0 /* 00 0000 frame start */ -#define CAPT_EOF_DATA 1 /* 00 0001 frame end */ -#define CAPT_SOL_DATA 2 /* 00 0010 line start */ -#define CAPT_EOL_DATA 3 /* 00 0011 line end */ -#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */ -#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */ -#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */ -#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */ -#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */ -#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */ -#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */ -#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */ -#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -#define CAPT_RESERVED_DATA_TYPE_MIN 56 -#define CAPT_RESERVED_DATA_TYPE_MAX 63 -#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19 -#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23 -#define CAPT_YUV_RESERVED_DATA_TYPE 27 -#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37 -#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39 -#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46 -#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47 - - -/* --------------------------------------------------*/ -/* Capture Unit State */ -/* --------------------------------------------------*/ -#define CAPT_FREE_RUN 0 -#define CAPT_NO_SYNC 1 -#define CAPT_SYNC_SWP 2 -#define CAPT_SYNC_MWP 3 -#define CAPT_SYNC_WAIT 4 -#define CAPT_FREEZE 5 -#define CAPT_RUN 6 - - -/* --------------------------------------------------*/ - -#endif /* _isp_capture_defs_h */ - - - - - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h deleted file mode 100644 index c038f39ffd25..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _mmu_defs_h -#define _mmu_defs_h - -#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0 -#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1 - -#define _HRT_MMU_REG_ALIGN 4 - -#endif /* _mmu_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h deleted file mode 100644 index 9b6c2893d950..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _scalar_processor_2400_params_h -#define _scalar_processor_2400_params_h - -#include "cell_params.h" - -#endif /* _scalar_processor_2400_params_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h deleted file mode 100644 index 1cb62444cf68..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _ST2MEM_DEFS_H -#define _ST2MEM_DEFS_H - -#define _STR2MEM_CRUN_BIT 0x100000 -#define _STR2MEM_CMD_BITS 0x0F0000 -#define _STR2MEM_COUNT_BITS 0x00FFFF - -#define _STR2MEM_BLOCKS_CMD 0xA0000 -#define _STR2MEM_PACKETS_CMD 0xB0000 -#define _STR2MEM_BYTES_CMD 0xC0000 -#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000 - -#define _STR2MEM_SOFT_RESET_REG_ID 0 -#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1 -#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2 -#define _STR2MEM_BIT_SWAPPING_REG_ID 3 -#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4 -#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5 -#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6 -#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7 -#define _STR2MEM_EN_STAT_UPDATE_ID 8 - -#define _STR2MEM_REG_ALIGN 4 - -#endif /* _ST2MEM_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h deleted file mode 100644 index 60143b8743a2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _streaming_to_mipi_defs_h -#define _streaming_to_mipi_defs_h - -#define HIVE_STR_TO_MIPI_VALID_A_BIT 0 -#define HIVE_STR_TO_MIPI_VALID_B_BIT 1 -#define HIVE_STR_TO_MIPI_SOL_BIT 2 -#define HIVE_STR_TO_MIPI_EOL_BIT 3 -#define HIVE_STR_TO_MIPI_SOF_BIT 4 -#define HIVE_STR_TO_MIPI_EOF_BIT 5 -#define HIVE_STR_TO_MIPI_CH_ID_LSB 6 - -#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1) - -#endif /* _streaming_to_mipi_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h deleted file mode 100644 index d2b8972b0d9e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _timed_controller_defs_h -#define _timed_controller_defs_h - -#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0 - -#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4 - -#endif /* _timed_controller_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h deleted file mode 100644 index 5bc0ad34616e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_VAR_H -#define _HRT_VAR_H - -#include "version.h" -#include "system_api.h" -#include "hive_types.h" - -#define hrt_int_type_of_char char -#define hrt_int_type_of_uchar unsigned char -#define hrt_int_type_of_short short -#define hrt_int_type_of_ushort unsigned short -#define hrt_int_type_of_int int -#define hrt_int_type_of_uint unsigned int -#define hrt_int_type_of_long long -#define hrt_int_type_of_ulong unsigned long -#define hrt_int_type_of_ptr unsigned int - -#define hrt_host_type_of_char char -#define hrt_host_type_of_uchar unsigned char -#define hrt_host_type_of_short short -#define hrt_host_type_of_ushort unsigned short -#define hrt_host_type_of_int int -#define hrt_host_type_of_uint unsigned int -#define hrt_host_type_of_long long -#define hrt_host_type_of_ulong unsigned long -#define hrt_host_type_of_ptr void* - -#define HRT_TYPE_BYTES(cell, type) (HRT_TYPE_BITS(cell, type)/8) -#define HRT_HOST_TYPE(cell_type) HRTCAT(hrt_host_type_of_, cell_type) -#define HRT_INT_TYPE(type) HRTCAT(hrt_int_type_of_, type) - -#define hrt_scalar_store(cell, type, var, data) \ - HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\ - cell, \ - HRTCAT(HIVE_MEM_,var), \ - HRTCAT(HIVE_ADDR_,var), \ - (HRT_INT_TYPE(type))(data)) - -#define hrt_scalar_load(cell, type, var) \ - (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \ - cell, \ - HRTCAT(HIVE_MEM_,var), \ - HRTCAT(HIVE_ADDR_,var))) - -#define hrt_indexed_store(cell, type, array, index, data) \ - HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\ - cell, \ - HRTCAT(HIVE_MEM_,array), \ - (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)), \ - (HRT_INT_TYPE(type))(data)) - -#define hrt_indexed_load(cell, type, array, index) \ - (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \ - cell, \ - HRTCAT(HIVE_MEM_,array), \ - (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)))) - -#endif /* _HRT_VAR_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h deleted file mode 100644 index bbc4948baea9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef HRT_VERSION_H -#define HRT_VERSION_H -#define HRT_VERSION_MAJOR 1 -#define HRT_VERSION_MINOR 4 -#define HRT_VERSION 1_4 -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c deleted file mode 100644 index ddc7a8f05153..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c +++ /dev/null @@ -1,3634 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _sp_map_h_ -#define _sp_map_h_ - - -#ifndef _hrt_dummy_use_blob_sp -#define _hrt_dummy_use_blob_sp() -#endif - -#define _hrt_cell_load_program_sp(proc) _hrt_cell_load_program_embedded(proc, sp) - -#ifndef ISP2401 -/* function input_system_acquisition_stop: ADE */ -#else -/* function input_system_acquisition_stop: AD8 */ -#endif - -#ifndef ISP2401 -/* function longjmp: 684E */ -#else -/* function longjmp: 69C1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_HIVE_IF_SRST_MASK -#define HIVE_MEM_HIVE_IF_SRST_MASK scalar_processor_2400_dmem -#define HIVE_ADDR_HIVE_IF_SRST_MASK 0x1C8 -#define HIVE_SIZE_HIVE_IF_SRST_MASK 16 -#else -#endif -#endif -#define HIVE_MEM_sp_HIVE_IF_SRST_MASK scalar_processor_2400_dmem -#define HIVE_ADDR_sp_HIVE_IF_SRST_MASK 0x1C8 -#define HIVE_SIZE_sp_HIVE_IF_SRST_MASK 16 - -#ifndef ISP2401 -/* function tmpmem_init_dmem: 6580 */ -#else -/* function tmpmem_init_dmem: 66BB */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_receive_ack: 5EC4 */ -#else -/* function ia_css_isys_sp_token_map_receive_ack: 5FFF */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_addr_B: 332C */ -#else -/* function ia_css_dmaproxy_sp_set_addr_B: 3520 */ - -/* function ia_css_pipe_data_init_tagger_resources: A4F */ -#endif - -/* function debug_buffer_set_ddr_addr: DD */ - -#ifndef ISP2401 -/* function receiver_port_reg_load: AC2 */ -#else -/* function receiver_port_reg_load: ABC */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_mipi -#define HIVE_MEM_vbuf_mipi scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_mipi 0x631C -#else -#define HIVE_ADDR_vbuf_mipi 0x6378 -#endif -#define HIVE_SIZE_vbuf_mipi 12 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_mipi scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_mipi 0x631C -#else -#define HIVE_ADDR_sp_vbuf_mipi 0x6378 -#endif -#define HIVE_SIZE_sp_vbuf_mipi 12 - -#ifndef ISP2401 -/* function ia_css_event_sp_decode: 351D */ -#else -/* function ia_css_event_sp_decode: 3711 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_get_size: 48A5 */ -#else -/* function ia_css_queue_get_size: 4B2D */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_load: 4EE6 */ -#else -/* function ia_css_queue_load: 5144 */ -#endif - -#ifndef ISP2401 -/* function setjmp: 6857 */ -#else -/* function setjmp: 69CA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp2host_isys_event_queue -#define HIVE_MEM_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x4684 -#else -#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x46CC -#endif -#define HIVE_SIZE_sem_for_sp2host_isys_event_queue 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x4684 -#else -#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x46CC -#endif -#define HIVE_SIZE_sp_sem_for_sp2host_isys_event_queue 20 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_wait_for_ack: 6E07 */ -#else -/* function ia_css_dmaproxy_sp_wait_for_ack: 6F4B */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_func: 510B */ -#else -/* function ia_css_sp_rawcopy_func: 5369 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_marked: 29F7 */ -#else -/* function ia_css_tagger_buf_sp_pop_marked: 2B99 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_stage -#define HIVE_MEM_isp_stage scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_stage 0x5C00 -#else -#define HIVE_ADDR_isp_stage 0x5C60 -#endif -#define HIVE_SIZE_isp_stage 832 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_stage scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_stage 0x5C00 -#else -#define HIVE_ADDR_sp_isp_stage 0x5C60 -#endif -#define HIVE_SIZE_sp_isp_stage 832 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_raw -#define HIVE_MEM_vbuf_raw scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_raw 0x2F4 -#else -#define HIVE_ADDR_vbuf_raw 0x30C -#endif -#define HIVE_SIZE_vbuf_raw 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_raw scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_raw 0x2F4 -#else -#define HIVE_ADDR_sp_vbuf_raw 0x30C -#endif -#define HIVE_SIZE_sp_vbuf_raw 4 - -#ifndef ISP2401 -/* function ia_css_sp_bin_copy_func: 5032 */ -#else -/* function ia_css_sp_bin_copy_func: 5290 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_item_store: 4C34 */ -#else -/* function ia_css_queue_item_store: 4E92 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_metadata_bufs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_buffer_bufs 160 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 160 - -/* function sp_start_isp: 45D */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_binary_group -#define HIVE_MEM_sp_binary_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_binary_group 0x5FF0 -#else -#define HIVE_ADDR_sp_binary_group 0x6050 -#endif -#define HIVE_SIZE_sp_binary_group 32 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_binary_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_binary_group 0x5FF0 -#else -#define HIVE_ADDR_sp_sp_binary_group 0x6050 -#endif -#define HIVE_SIZE_sp_sp_binary_group 32 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_sw_state -#define HIVE_MEM_sp_sw_state scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sw_state 0x62AC -#else -#define HIVE_ADDR_sp_sw_state 0x6308 -#endif -#define HIVE_SIZE_sp_sw_state 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_sw_state scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_sw_state 0x62AC -#else -#define HIVE_ADDR_sp_sp_sw_state 0x6308 -#endif -#define HIVE_SIZE_sp_sp_sw_state 4 - -#ifndef ISP2401 -/* function ia_css_thread_sp_main: D5B */ -#else -/* function ia_css_thread_sp_main: D50 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_internal_buffers: 3723 */ -#else -/* function ia_css_ispctrl_sp_init_internal_buffers: 3952 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_psys_event_queue_handle -#define HIVE_MEM_sp2host_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4B54 -#else -#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4BB0 -#endif -#define HIVE_SIZE_sp2host_psys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4B54 -#else -#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4BB0 -#endif -#define HIVE_SIZE_sp_sp2host_psys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp2host_psys_event_queue -#define HIVE_MEM_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x4698 -#else -#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x46E0 -#endif -#define HIVE_SIZE_sem_for_sp2host_psys_event_queue 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x4698 -#else -#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x46E0 -#endif -#define HIVE_SIZE_sp_sem_for_sp2host_psys_event_queue 20 - -#ifndef ISP2401 -/* function ia_css_tagger_sp_propagate_frame: 2410 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_stop_copy_preview -#define HIVE_MEM_sp_stop_copy_preview scalar_processor_2400_dmem -#define HIVE_ADDR_sp_stop_copy_preview 0x6290 -#define HIVE_SIZE_sp_stop_copy_preview 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_stop_copy_preview scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_stop_copy_preview 0x6290 -#define HIVE_SIZE_sp_sp_stop_copy_preview 4 -#else -/* function ia_css_tagger_sp_propagate_frame: 2460 */ -#endif - -#ifndef ISP2401 -/* function input_system_reg_load: B17 */ -#else -/* function input_system_reg_load: B11 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_handles -#define HIVE_MEM_vbuf_handles scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_handles 0x6328 -#else -#define HIVE_ADDR_vbuf_handles 0x6384 -#endif -#define HIVE_SIZE_vbuf_handles 960 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_handles scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_handles 0x6328 -#else -#define HIVE_ADDR_sp_vbuf_handles 0x6384 -#endif -#define HIVE_SIZE_sp_vbuf_handles 960 - -#ifndef ISP2401 -/* function ia_css_queue_store: 4D9A */ - -/* function ia_css_sp_flash_register: 2C2C */ -#else -/* function ia_css_queue_store: 4FF8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_dummy_function: 5652 */ -#else -/* function ia_css_sp_flash_register: 2DCE */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_create: 5B37 */ -#else -/* function ia_css_isys_sp_backend_create: 5C72 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_init: 1833 */ -#else -/* function ia_css_pipeline_sp_init: 186D */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_configure: 2300 */ -#else -/* function ia_css_tagger_sp_configure: 2350 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_end_binary: 3566 */ -#else -/* function ia_css_ispctrl_sp_end_binary: 375A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs -#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60 -#else -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC -#endif -#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20 - -#ifndef ISP2401 -/* function receiver_port_reg_store: AC9 */ -#else -/* function receiver_port_reg_store: AC3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_is_pending_mask -#define HIVE_MEM_event_is_pending_mask scalar_processor_2400_dmem -#define HIVE_ADDR_event_is_pending_mask 0x5C -#define HIVE_SIZE_event_is_pending_mask 44 -#else -#endif -#endif -#define HIVE_MEM_sp_event_is_pending_mask scalar_processor_2400_dmem -#define HIVE_ADDR_sp_event_is_pending_mask 0x5C -#define HIVE_SIZE_sp_event_is_pending_mask 44 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cb_elems_frame -#define HIVE_MEM_sp_all_cb_elems_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cb_elems_frame 0x46AC -#else -#define HIVE_ADDR_sp_all_cb_elems_frame 0x46F4 -#endif -#define HIVE_SIZE_sp_all_cb_elems_frame 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cb_elems_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46AC -#else -#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46F4 -#endif -#define HIVE_SIZE_sp_sp_all_cb_elems_frame 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_isys_event_queue_handle -#define HIVE_MEM_sp2host_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4B74 -#else -#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4BD0 -#endif -#define HIVE_SIZE_sp2host_isys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4B74 -#else -#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4BD0 -#endif -#define HIVE_SIZE_sp_sp2host_isys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host_sp_com -#define HIVE_MEM_host_sp_com scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host_sp_com 0x4114 -#else -#define HIVE_ADDR_host_sp_com 0x4134 -#endif -#define HIVE_SIZE_host_sp_com 220 -#else -#endif -#endif -#define HIVE_MEM_sp_host_sp_com scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host_sp_com 0x4114 -#else -#define HIVE_ADDR_sp_host_sp_com 0x4134 -#endif -#define HIVE_SIZE_sp_host_sp_com 220 - -#ifndef ISP2401 -/* function ia_css_queue_get_free_space: 49F9 */ -#else -/* function ia_css_queue_get_free_space: 4C57 */ -#endif - -#ifndef ISP2401 -/* function exec_image_pipe: 6C4 */ -#else -/* function exec_image_pipe: 658 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_init_dmem_data -#define HIVE_MEM_sp_init_dmem_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_init_dmem_data 0x62B0 -#else -#define HIVE_ADDR_sp_init_dmem_data 0x630C -#endif -#define HIVE_SIZE_sp_init_dmem_data 24 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_init_dmem_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_init_dmem_data 0x62B0 -#else -#define HIVE_ADDR_sp_sp_init_dmem_data 0x630C -#endif -#define HIVE_SIZE_sp_sp_init_dmem_data 24 - -#ifndef ISP2401 -/* function ia_css_sp_metadata_start: 5914 */ -#else -/* function ia_css_sp_metadata_start: 5A4F */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_init_buffer_queues: 2C9B */ -#else -/* function ia_css_bufq_sp_init_buffer_queues: 2E3D */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_stop: 1816 */ -#else -/* function ia_css_pipeline_sp_stop: 1850 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_connect_pipes: 27EA */ -#else -/* function ia_css_tagger_sp_connect_pipes: 283A */ -#endif - -#ifndef ISP2401 -/* function sp_isys_copy_wait: 70D */ -#else -/* function sp_isys_copy_wait: 6A1 */ -#endif - -/* function is_isp_debug_buffer_full: 337 */ - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_configure_channel_from_info: 32AF */ -#else -/* function ia_css_dmaproxy_sp_configure_channel_from_info: 3490 */ -#endif - -#ifndef ISP2401 -/* function encode_and_post_timer_event: A30 */ -#else -/* function encode_and_post_timer_event: 9C4 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_per_frame_data -#define HIVE_MEM_sp_per_frame_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_per_frame_data 0x41F0 -#else -#define HIVE_ADDR_sp_per_frame_data 0x4210 -#endif -#define HIVE_SIZE_sp_per_frame_data 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_per_frame_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_per_frame_data 0x41F0 -#else -#define HIVE_ADDR_sp_sp_per_frame_data 0x4210 -#endif -#define HIVE_SIZE_sp_sp_per_frame_data 4 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_vbuf_dequeue: 62D4 */ -#else -/* function ia_css_rmgr_sp_vbuf_dequeue: 640F */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_psys_event_queue_handle -#define HIVE_MEM_host2sp_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4B80 -#else -#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4BDC -#endif -#define HIVE_SIZE_host2sp_psys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4B80 -#else -#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4BDC -#endif -#define HIVE_SIZE_sp_host2sp_psys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_xmem_bin_addr -#define HIVE_MEM_xmem_bin_addr scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_xmem_bin_addr 0x41F4 -#else -#define HIVE_ADDR_xmem_bin_addr 0x4214 -#endif -#define HIVE_SIZE_xmem_bin_addr 4 -#else -#endif -#endif -#define HIVE_MEM_sp_xmem_bin_addr scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_xmem_bin_addr 0x41F4 -#else -#define HIVE_ADDR_sp_xmem_bin_addr 0x4214 -#endif -#define HIVE_SIZE_sp_xmem_bin_addr 4 - -#ifndef ISP2401 -/* function tmr_clock_init: 65A0 */ -#else -/* function tmr_clock_init: 66DB */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_run: 1403 */ -#else -/* function ia_css_pipeline_sp_run: 1424 */ -#endif - -#ifndef ISP2401 -/* function memcpy: 68F7 */ -#else -/* function memcpy: 6A6A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GP_DEVICE_BASE -#define HIVE_MEM_GP_DEVICE_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_GP_DEVICE_BASE 0x2FC -#else -#define HIVE_ADDR_GP_DEVICE_BASE 0x314 -#endif -#define HIVE_SIZE_GP_DEVICE_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_GP_DEVICE_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x2FC -#else -#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x314 -#endif -#define HIVE_SIZE_sp_GP_DEVICE_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_thread_sp_ready_queue -#define HIVE_MEM_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E0 -#else -#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E4 -#endif -#define HIVE_SIZE_ia_css_thread_sp_ready_queue 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E0 -#else -#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E4 -#endif -#define HIVE_SIZE_sp_ia_css_thread_sp_ready_queue 12 - -#ifndef ISP2401 -/* function input_system_reg_store: B1E */ -#else -/* function input_system_reg_store: B18 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_start: 5D4D */ -#else -/* function ia_css_isys_sp_frontend_start: 5E88 */ -#endif - -#ifndef ISP2401 -/* function ia_css_uds_sp_scale_params: 6600 */ -#else -/* function ia_css_uds_sp_scale_params: 6773 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_increase_size: E40 */ -#else -/* function ia_css_circbuf_increase_size: E35 */ -#endif - -#ifndef ISP2401 -/* function __divu: 6875 */ -#else -/* function __divu: 69E8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_get_state: C83 */ -#else -/* function ia_css_thread_sp_get_state: C78 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_cont_capt_stop -#define HIVE_MEM_sem_for_cont_capt_stop scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_cont_capt_stop 0x46BC -#else -#define HIVE_ADDR_sem_for_cont_capt_stop 0x4704 -#endif -#define HIVE_SIZE_sem_for_cont_capt_stop 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_cont_capt_stop scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x46BC -#else -#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x4704 -#endif -#define HIVE_SIZE_sp_sem_for_cont_capt_stop 20 - -#ifndef ISP2401 -/* function thread_fiber_sp_main: E39 */ -#else -/* function thread_fiber_sp_main: E2E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_pipe_thread -#define HIVE_MEM_sp_isp_pipe_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_pipe_thread 0x4800 -#define HIVE_SIZE_sp_isp_pipe_thread 340 -#else -#define HIVE_ADDR_sp_isp_pipe_thread 0x4848 -#define HIVE_SIZE_sp_isp_pipe_thread 360 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_pipe_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4800 -#define HIVE_SIZE_sp_sp_isp_pipe_thread 340 -#else -#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4848 -#define HIVE_SIZE_sp_sp_isp_pipe_thread 360 -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_handle_parameter_sets: 128A */ -#else -/* function ia_css_parambuf_sp_handle_parameter_sets: 127F */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_set_state: 5943 */ -#else -/* function ia_css_spctrl_sp_set_state: 5A7E */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_signal: 6AF7 */ -#else -/* function ia_css_thread_sem_sp_signal: 6C6C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_IRQ_BASE -#define HIVE_MEM_IRQ_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_IRQ_BASE 0x2C -#define HIVE_SIZE_IRQ_BASE 16 -#else -#endif -#endif -#define HIVE_MEM_sp_IRQ_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_IRQ_BASE 0x2C -#define HIVE_SIZE_sp_IRQ_BASE 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_TIMED_CTRL_BASE -#define HIVE_MEM_TIMED_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_TIMED_CTRL_BASE 0x40 -#define HIVE_SIZE_TIMED_CTRL_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_TIMED_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_TIMED_CTRL_BASE 0x40 -#define HIVE_SIZE_sp_TIMED_CTRL_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_isr: 6FDC */ - -/* function ia_css_isys_sp_generate_exp_id: 60E5 */ -#else -/* function ia_css_isys_sp_isr: 7139 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_init: 61CF */ -#else -/* function ia_css_isys_sp_generate_exp_id: 6220 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_init: 6BC8 */ -#else -/* function ia_css_rmgr_sp_init: 630A */ -#endif - -#ifndef ISP2401 -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_is_isp_requested -#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_is_isp_requested 0x308 -#define HIVE_SIZE_is_isp_requested 4 -#else -#endif -#endif -#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_sp_is_isp_requested 0x308 -#define HIVE_SIZE_sp_is_isp_requested 4 -#else -/* function ia_css_thread_sem_sp_init: 6D3B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_cb_frame -#define HIVE_MEM_sem_for_reading_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_cb_frame 0x46D0 -#else -#define HIVE_ADDR_sem_for_reading_cb_frame 0x4718 -#endif -#define HIVE_SIZE_sem_for_reading_cb_frame 40 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x46D0 -#else -#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x4718 -#endif -#define HIVE_SIZE_sp_sem_for_reading_cb_frame 40 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_execute: 3217 */ -#else -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_is_isp_requested -#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_is_isp_requested 0x320 -#define HIVE_SIZE_is_isp_requested 4 -#else -#endif -#endif -#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_sp_is_isp_requested 0x320 -#define HIVE_SIZE_sp_is_isp_requested 4 - -/* function ia_css_dmaproxy_sp_execute: 33F6 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_is_empty: 48E0 */ -#else -/* function ia_css_queue_is_empty: 7098 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_has_stopped: 180C */ -#else -/* function ia_css_pipeline_sp_has_stopped: 1846 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_extract: F44 */ -#else -/* function ia_css_circbuf_extract: F39 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_is_locked_from_start: 2B0D */ -#else -/* function ia_css_tagger_buf_sp_is_locked_from_start: 2CAF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_current_sp_thread -#define HIVE_MEM_current_sp_thread scalar_processor_2400_dmem -#define HIVE_ADDR_current_sp_thread 0x1DC -#define HIVE_SIZE_current_sp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_current_sp_thread scalar_processor_2400_dmem -#define HIVE_ADDR_sp_current_sp_thread 0x1DC -#define HIVE_SIZE_sp_current_sp_thread 4 - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_get_spid: 594A */ -#else -/* function ia_css_spctrl_sp_get_spid: 5A85 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_reset_buffers: 2D22 */ -#else -/* function ia_css_bufq_sp_reset_buffers: 2EC4 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read_byte_addr: 6E35 */ -#else -/* function ia_css_dmaproxy_sp_read_byte_addr: 6F79 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_uninit: 61C8 */ -#else -/* function ia_css_rmgr_sp_uninit: 6303 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_stack -#define HIVE_MEM_sp_threads_stack scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_stack 0x164 -#define HIVE_SIZE_sp_threads_stack 28 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_stack scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_stack 0x164 -#define HIVE_SIZE_sp_sp_threads_stack 28 - -#ifndef ISP2401 -/* function ia_css_circbuf_peek: F26 */ -#else -/* function ia_css_circbuf_peek: F1B */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_wait_for_in_param: 1053 */ -#else -/* function ia_css_parambuf_sp_wait_for_in_param: 1048 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_get_exp_id: 5FAD */ -#else -/* function ia_css_isys_sp_token_map_get_exp_id: 60E8 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cb_elems_param -#define HIVE_MEM_sp_all_cb_elems_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cb_elems_param 0x46F8 -#else -#define HIVE_ADDR_sp_all_cb_elems_param 0x4740 -#endif -#define HIVE_SIZE_sp_all_cb_elems_param 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cb_elems_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x46F8 -#else -#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x4740 -#endif -#define HIVE_SIZE_sp_sp_all_cb_elems_param 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_pipeline_sp_curr_binary_id -#define HIVE_MEM_pipeline_sp_curr_binary_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1EC -#else -#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1F0 -#endif -#define HIVE_SIZE_pipeline_sp_curr_binary_id 4 -#else -#endif -#endif -#define HIVE_MEM_sp_pipeline_sp_curr_binary_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1EC -#else -#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1F0 -#endif -#define HIVE_SIZE_sp_pipeline_sp_curr_binary_id 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_frame_desc -#define HIVE_MEM_sp_all_cbs_frame_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4708 -#else -#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4750 -#endif -#define HIVE_SIZE_sp_all_cbs_frame_desc 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_frame_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4708 -#else -#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4750 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_frame_desc 8 - -#ifndef ISP2401 -/* function sp_isys_copy_func_v2: 706 */ -#else -/* function sp_isys_copy_func_v2: 69A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_cb_param -#define HIVE_MEM_sem_for_reading_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_cb_param 0x4710 -#else -#define HIVE_ADDR_sem_for_reading_cb_param 0x4758 -#endif -#define HIVE_SIZE_sem_for_reading_cb_param 40 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4710 -#else -#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4758 -#endif -#define HIVE_SIZE_sp_sem_for_reading_cb_param 40 - -#ifndef ISP2401 -/* function ia_css_queue_get_used_space: 49AD */ -#else -/* function ia_css_queue_get_used_space: 4C0B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_cont_capt_start -#define HIVE_MEM_sem_for_cont_capt_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_cont_capt_start 0x4738 -#else -#define HIVE_ADDR_sem_for_cont_capt_start 0x4780 -#endif -#define HIVE_SIZE_sem_for_cont_capt_start 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_cont_capt_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4738 -#else -#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4780 -#endif -#define HIVE_SIZE_sp_sem_for_cont_capt_start 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_tmp_heap -#define HIVE_MEM_tmp_heap scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_tmp_heap 0x6010 -#else -#define HIVE_ADDR_tmp_heap 0x6070 -#endif -#define HIVE_SIZE_tmp_heap 640 -#else -#endif -#endif -#define HIVE_MEM_sp_tmp_heap scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_tmp_heap 0x6010 -#else -#define HIVE_ADDR_sp_tmp_heap 0x6070 -#endif -#define HIVE_SIZE_sp_tmp_heap 640 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_get_num_vbuf: 64D8 */ -#else -/* function ia_css_rmgr_sp_get_num_vbuf: 6613 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_output_compute_dma_info: 3F49 */ -#else -/* function ia_css_ispctrl_sp_output_compute_dma_info: 418C */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_lock_exp_id: 20CD */ -#else -/* function ia_css_tagger_sp_lock_exp_id: 211D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_s3a_bufs 60 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 60 - -#ifndef ISP2401 -/* function ia_css_queue_is_full: 4A44 */ -#else -/* function ia_css_queue_is_full: 4CA2 */ -#endif - -/* function debug_buffer_init_isp: E4 */ - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_uninit: 5D07 */ -#else -/* function ia_css_isys_sp_frontend_uninit: 5E42 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_exp_id_is_locked: 2003 */ -#else -/* function ia_css_tagger_sp_exp_id_is_locked: 2053 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem -#define HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x66E8 -#else -#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x6744 -#endif -#define HIVE_SIZE_ia_css_rmgr_sp_mipi_frame_sem 60 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x66E8 -#else -#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x6744 -#endif -#define HIVE_SIZE_sp_ia_css_rmgr_sp_mipi_frame_sem 60 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_dump: 62AF */ -#else -/* function ia_css_rmgr_sp_refcount_dump: 63EA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_isp_parameters_id 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_pipe_threads -#define HIVE_MEM_sp_pipe_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_pipe_threads 0x150 -#define HIVE_SIZE_sp_pipe_threads 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_pipe_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_pipe_threads 0x150 -#define HIVE_SIZE_sp_sp_pipe_threads 20 - -#ifndef ISP2401 -/* function sp_event_proxy_func: 71B */ -#else -/* function sp_event_proxy_func: 6AF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_isys_event_queue_handle -#define HIVE_MEM_host2sp_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4BDC -#else -#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4C38 -#endif -#define HIVE_SIZE_host2sp_isys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4BDC -#else -#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4C38 -#endif -#define HIVE_SIZE_sp_host2sp_isys_event_queue_handle 12 - -#ifndef ISP2401 -/* function ia_css_thread_sp_yield: 6A70 */ -#else -/* function ia_css_thread_sp_yield: 6BEA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_param_desc -#define HIVE_MEM_sp_all_cbs_param_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_param_desc 0x474C -#else -#define HIVE_ADDR_sp_all_cbs_param_desc 0x4794 -#endif -#define HIVE_SIZE_sp_all_cbs_param_desc 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_param_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x474C -#else -#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x4794 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_param_desc 8 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb -#define HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4 -#else -#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50 -#endif -#define HIVE_SIZE_ia_css_dmaproxy_sp_invalidate_tlb 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4 -#else -#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50 -#endif -#define HIVE_SIZE_sp_ia_css_dmaproxy_sp_invalidate_tlb 4 - -#ifndef ISP2401 -/* function ia_css_thread_sp_fork: D10 */ -#else -/* function ia_css_thread_sp_fork: D05 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_destroy: 27F4 */ -#else -/* function ia_css_tagger_sp_destroy: 2844 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_vmem_read: 31B7 */ -#else -/* function ia_css_dmaproxy_sp_vmem_read: 3396 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ifmtr_sp_init: 6136 */ -#else -/* function ia_css_ifmtr_sp_init: 6271 */ -#endif - -#ifndef ISP2401 -/* function initialize_sp_group: 6D4 */ -#else -/* function initialize_sp_group: 668 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_peek: 2919 */ -#else -/* function ia_css_tagger_buf_sp_peek: 2ABB */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_init: D3C */ -#else -/* function ia_css_thread_sp_init: D31 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_reset_exp_id: 60DD */ -#else -/* function ia_css_isys_sp_reset_exp_id: 6218 */ -#endif - -#ifndef ISP2401 -/* function qos_scheduler_update_fps: 65F0 */ -#else -/* function qos_scheduler_update_fps: 6763 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_set_stream_base_addr: 461E */ -#else -/* function ia_css_ispctrl_sp_set_stream_base_addr: 4879 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_DMEM_BASE -#define HIVE_MEM_ISP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_DMEM_BASE 0x10 -#define HIVE_SIZE_ISP_DMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_DMEM_BASE 0x10 -#define HIVE_SIZE_sp_ISP_DMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_SP_DMEM_BASE -#define HIVE_MEM_SP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_SP_DMEM_BASE 0x4 -#define HIVE_SIZE_SP_DMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_SP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_SP_DMEM_BASE 0x4 -#define HIVE_SIZE_sp_SP_DMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read: 322D */ -#else -/* function __ia_css_queue_is_empty_text: 4B68 */ - -/* function ia_css_dmaproxy_sp_read: 340C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_raw_copy_line_count -#define HIVE_MEM_raw_copy_line_count scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_raw_copy_line_count 0x2C8 -#else -#define HIVE_ADDR_raw_copy_line_count 0x2E0 -#endif -#define HIVE_SIZE_raw_copy_line_count 4 -#else -#endif -#endif -#define HIVE_MEM_sp_raw_copy_line_count scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_raw_copy_line_count 0x2C8 -#else -#define HIVE_ADDR_sp_raw_copy_line_count 0x2E0 -#endif -#define HIVE_SIZE_sp_raw_copy_line_count 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_tag_cmd_queue_handle -#define HIVE_MEM_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4BE8 -#else -#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4C44 -#endif -#define HIVE_SIZE_host2sp_tag_cmd_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4BE8 -#else -#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4C44 -#endif -#define HIVE_SIZE_sp_host2sp_tag_cmd_queue_handle 12 - -#ifndef ISP2401 -/* function ia_css_queue_peek: 4923 */ -#else -/* function ia_css_queue_peek: 4B81 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_frame_cnt -#define HIVE_MEM_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4A94 -#else -#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4AF0 -#endif -#define HIVE_SIZE_ia_css_flash_sp_frame_cnt 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4A94 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4AF0 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_frame_cnt 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_can_send_token_mask -#define HIVE_MEM_event_can_send_token_mask scalar_processor_2400_dmem -#define HIVE_ADDR_event_can_send_token_mask 0x88 -#define HIVE_SIZE_event_can_send_token_mask 44 -#else -#endif -#endif -#define HIVE_MEM_sp_event_can_send_token_mask scalar_processor_2400_dmem -#define HIVE_ADDR_sp_event_can_send_token_mask 0x88 -#define HIVE_SIZE_sp_event_can_send_token_mask 44 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_thread -#define HIVE_MEM_isp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_thread 0x5F40 -#else -#define HIVE_ADDR_isp_thread 0x5FA0 -#endif -#define HIVE_SIZE_isp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_thread 0x5F40 -#else -#define HIVE_ADDR_sp_isp_thread 0x5FA0 -#endif -#define HIVE_SIZE_sp_isp_thread 4 - -#ifndef ISP2401 -/* function encode_and_post_sp_event_non_blocking: A78 */ -#else -/* function encode_and_post_sp_event_non_blocking: A0C */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_destroy: 5DDF */ -#else -/* function ia_css_isys_sp_frontend_destroy: 5F1A */ -#endif - -/* function is_ddr_debug_buffer_full: 2CC */ - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_stop: 5D1F */ -#else -/* function ia_css_isys_sp_frontend_stop: 5E5A */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_init: 607B */ -#else -/* function ia_css_isys_sp_token_map_init: 61B6 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2969 */ -#else -/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2B0B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_fiber -#define HIVE_MEM_sp_threads_fiber scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_fiber 0x19C -#define HIVE_SIZE_sp_threads_fiber 28 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_fiber scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_fiber 0x19C -#define HIVE_SIZE_sp_sp_threads_fiber 28 - -#ifndef ISP2401 -/* function encode_and_post_sp_event: A01 */ -#else -/* function encode_and_post_sp_event: 995 */ -#endif - -/* function debug_enqueue_ddr: EE */ - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_init_vbuf: 626A */ -#else -/* function ia_css_rmgr_sp_refcount_init_vbuf: 63A5 */ -#endif - -#ifndef ISP2401 -/* function dmaproxy_sp_read_write: 6EE4 */ -#else -/* function dmaproxy_sp_read_write: 7017 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer -#define HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8 -#else -#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54 -#endif -#define HIVE_SIZE_ia_css_dmaproxy_isp_dma_cmd_buffer 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8 -#else -#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54 -#endif -#define HIVE_SIZE_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_buffer_queue_handle -#define HIVE_MEM_host2sp_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4BF4 -#else -#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4C50 -#endif -#define HIVE_SIZE_host2sp_buffer_queue_handle 480 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4BF4 -#else -#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4C50 -#endif -#define HIVE_SIZE_sp_host2sp_buffer_queue_handle 480 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_in_service -#define HIVE_MEM_ia_css_flash_sp_in_service scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3178 -#else -#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3198 -#endif -#define HIVE_SIZE_ia_css_flash_sp_in_service 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_in_service scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3178 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3198 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_in_service 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_process: 6BF0 */ -#else -/* function ia_css_dmaproxy_sp_process: 6D63 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_mark_from_end: 2BF1 */ -#else -/* function ia_css_tagger_buf_sp_mark_from_end: 2D93 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_rcv_acquire_ack: 59EC */ -#else -/* function ia_css_isys_sp_backend_rcv_acquire_ack: 5B27 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_pre_acquire_request: 5A02 */ -#else -/* function ia_css_isys_sp_backend_pre_acquire_request: 5B3D */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_cs: 3653 */ -#else -/* function ia_css_ispctrl_sp_init_cs: 3855 */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_init: 5958 */ -#else -/* function ia_css_spctrl_sp_init: 5A93 */ -#endif - -#ifndef ISP2401 -/* function sp_event_proxy_init: 730 */ -#else -/* function sp_event_proxy_init: 6C4 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_previous_clock_tick 40 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 40 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_output -#define HIVE_MEM_sp_output scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_output 0x41F8 -#else -#define HIVE_ADDR_sp_output 0x4218 -#endif -#define HIVE_SIZE_sp_output 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_output scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_output 0x41F8 -#else -#define HIVE_ADDR_sp_sp_output 0x4218 -#endif -#define HIVE_SIZE_sp_sp_output 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues -#define HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC -#else -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_CTRL_BASE -#define HIVE_MEM_ISP_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_CTRL_BASE 0x8 -#define HIVE_SIZE_ISP_CTRL_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_CTRL_BASE 0x8 -#define HIVE_SIZE_sp_ISP_CTRL_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_INPUT_FORMATTER_BASE -#define HIVE_MEM_INPUT_FORMATTER_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_INPUT_FORMATTER_BASE 0x4C -#define HIVE_SIZE_INPUT_FORMATTER_BASE 16 -#else -#endif -#endif -#define HIVE_MEM_sp_INPUT_FORMATTER_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_INPUT_FORMATTER_BASE 0x4C -#define HIVE_SIZE_sp_INPUT_FORMATTER_BASE 16 - -#ifndef ISP2401 -/* function sp_dma_proxy_reset_channels: 3487 */ -#else -/* function sp_dma_proxy_reset_channels: 367B */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_acquire: 5B0D */ -#else -/* function ia_css_isys_sp_backend_acquire: 5C48 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_update_size: 28E8 */ -#else -/* function ia_css_tagger_sp_update_size: 2A8A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_host_sp_queue -#define HIVE_MEM_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x511C -#else -#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x5178 -#endif -#define HIVE_SIZE_ia_css_bufq_host_sp_queue 2008 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x511C -#else -#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x5178 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_host_sp_queue 2008 - -#ifndef ISP2401 -/* function thread_fiber_sp_create: DA8 */ -#else -/* function thread_fiber_sp_create: D9D */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_increments: 3319 */ -#else -/* function ia_css_dmaproxy_sp_set_increments: 350D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_writing_cb_frame -#define HIVE_MEM_sem_for_writing_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_writing_cb_frame 0x4754 -#else -#define HIVE_ADDR_sem_for_writing_cb_frame 0x479C -#endif -#define HIVE_SIZE_sem_for_writing_cb_frame 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_writing_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x4754 -#else -#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x479C -#endif -#define HIVE_SIZE_sp_sem_for_writing_cb_frame 20 - -#ifndef ISP2401 -/* function receiver_reg_store: AD7 */ -#else -/* function receiver_reg_store: AD1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_writing_cb_param -#define HIVE_MEM_sem_for_writing_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_writing_cb_param 0x4768 -#else -#define HIVE_ADDR_sem_for_writing_cb_param 0x47B0 -#endif -#define HIVE_SIZE_sem_for_writing_cb_param 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_writing_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x4768 -#else -#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x47B0 -#endif -#define HIVE_SIZE_sp_sem_for_writing_cb_param 20 - -/* function sp_start_isp_entry: 453 */ -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifdef HIVE_ADDR_sp_start_isp_entry -#endif -#define HIVE_ADDR_sp_start_isp_entry 0x453 -#endif -#define HIVE_ADDR_sp_sp_start_isp_entry 0x453 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unmark_all: 2B75 */ -#else -/* function ia_css_tagger_buf_sp_unmark_all: 2D17 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unmark_from_start: 2BB6 */ -#else -/* function ia_css_tagger_buf_sp_unmark_from_start: 2D58 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_channel_acquire: 34B3 */ -#else -/* function ia_css_dmaproxy_sp_channel_acquire: 36A7 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_add_num_vbuf: 64B4 */ -#else -/* function ia_css_rmgr_sp_add_num_vbuf: 65EF */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_create: 60C4 */ -#else -/* function ia_css_isys_sp_token_map_create: 61FF */ -#endif - -#ifndef ISP2401 -/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3183 */ -#else -/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3362 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_acquire_buf_elem: 1FDB */ -#else -/* function ia_css_tagger_sp_acquire_buf_elem: 202B */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_is_dynamic_buffer: 306C */ -#else -/* function ia_css_bufq_sp_is_dynamic_buffer: 320E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_group -#define HIVE_MEM_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_group 0x4208 -#define HIVE_SIZE_sp_group 1144 -#else -#define HIVE_ADDR_sp_group 0x4228 -#define HIVE_SIZE_sp_group 1184 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_group 0x4208 -#define HIVE_SIZE_sp_sp_group 1144 -#else -#define HIVE_ADDR_sp_sp_group 0x4228 -#define HIVE_SIZE_sp_sp_group 1184 -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_event_proxy_thread -#define HIVE_MEM_sp_event_proxy_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_event_proxy_thread 0x4954 -#define HIVE_SIZE_sp_event_proxy_thread 68 -#else -#define HIVE_ADDR_sp_event_proxy_thread 0x49B0 -#define HIVE_SIZE_sp_event_proxy_thread 72 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_event_proxy_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_event_proxy_thread 0x4954 -#define HIVE_SIZE_sp_sp_event_proxy_thread 68 -#else -#define HIVE_ADDR_sp_sp_event_proxy_thread 0x49B0 -#define HIVE_SIZE_sp_sp_event_proxy_thread 72 -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_kill: CD6 */ -#else -/* function ia_css_thread_sp_kill: CCB */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_create: 28A2 */ -#else -/* function ia_css_tagger_sp_create: 2A38 */ -#endif - -#ifndef ISP2401 -/* function tmpmem_acquire_dmem: 6561 */ -#else -/* function tmpmem_acquire_dmem: 669C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_MMU_BASE -#define HIVE_MEM_MMU_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_MMU_BASE 0x24 -#define HIVE_SIZE_MMU_BASE 8 -#else -#endif -#endif -#define HIVE_MEM_sp_MMU_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_MMU_BASE 0x24 -#define HIVE_SIZE_sp_MMU_BASE 8 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_channel_release: 349F */ -#else -/* function ia_css_dmaproxy_sp_channel_release: 3693 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_is_idle: 347F */ -#else -/* function ia_css_dmaproxy_sp_is_idle: 3673 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_qos_start -#define HIVE_MEM_sem_for_qos_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_qos_start 0x477C -#else -#define HIVE_ADDR_sem_for_qos_start 0x47C4 -#endif -#define HIVE_SIZE_sem_for_qos_start 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_qos_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_qos_start 0x477C -#else -#define HIVE_ADDR_sp_sem_for_qos_start 0x47C4 -#endif -#define HIVE_SIZE_sp_sem_for_qos_start 20 - -#ifndef ISP2401 -/* function isp_hmem_load: B55 */ -#else -/* function isp_hmem_load: B4F */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_release_buf_elem: 1FB7 */ -#else -/* function ia_css_tagger_sp_release_buf_elem: 2007 */ -#endif - -#ifndef ISP2401 -/* function ia_css_eventq_sp_send: 34F5 */ -#else -/* function ia_css_eventq_sp_send: 36E9 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_isys_sp_error_cnt -#define HIVE_MEM_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x62D4 -#else -#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x6330 -#endif -#define HIVE_SIZE_ia_css_isys_sp_error_cnt 16 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x62D4 -#else -#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x6330 -#endif -#define HIVE_SIZE_sp_ia_css_isys_sp_error_cnt 16 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unlock_from_start: 2AA5 */ -#else -/* function ia_css_tagger_buf_sp_unlock_from_start: 2C47 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_debug_buffer_ddr_address -#define HIVE_MEM_debug_buffer_ddr_address scalar_processor_2400_dmem -#define HIVE_ADDR_debug_buffer_ddr_address 0xBC -#define HIVE_SIZE_debug_buffer_ddr_address 4 -#else -#endif -#endif -#define HIVE_MEM_sp_debug_buffer_ddr_address scalar_processor_2400_dmem -#define HIVE_ADDR_sp_debug_buffer_ddr_address 0xBC -#define HIVE_SIZE_sp_debug_buffer_ddr_address 4 - -#ifndef ISP2401 -/* function sp_isys_copy_request: 714 */ -#else -/* function sp_isys_copy_request: 6A8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_retain_vbuf: 6344 */ -#else -/* function ia_css_rmgr_sp_refcount_retain_vbuf: 647F */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_set_priority: CCE */ -#else -/* function ia_css_thread_sp_set_priority: CC3 */ -#endif - -#ifndef ISP2401 -/* function sizeof_hmem: BFC */ -#else -/* function sizeof_hmem: BF6 */ -#endif - -#ifndef ISP2401 -/* function tmpmem_release_dmem: 6550 */ -#else -/* function tmpmem_release_dmem: 668B */ -#endif - -/* function cnd_input_system_cfg: 392 */ - -#ifndef ISP2401 -/* function __ia_css_sp_rawcopy_func_critical: 6F65 */ -#else -/* function __ia_css_sp_rawcopy_func_critical: 70C2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_width_exception: 3304 */ -#else -/* function __ia_css_dmaproxy_sp_process_text: 3306 */ -#endif - -#ifndef ISP2401 -/* function sp_event_assert: 8B1 */ -#else -/* function ia_css_dmaproxy_sp_set_width_exception: 34F8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_flash_sp_init_internal_params: 2C90 */ -#else -/* function sp_event_assert: 845 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 29AB */ -#else -/* function ia_css_flash_sp_init_internal_params: 2E32 */ -#endif - -#ifndef ISP2401 -/* function __modu: 68BB */ -#else -/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 2B4D */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_isp_vector: 3189 */ -#else -/* function __modu: 6A2E */ - -/* function ia_css_dmaproxy_sp_init_isp_vector: 3368 */ -#endif - -/* function isp_vamem_store: 0 */ - -#ifdef ISP2401 -/* function ia_css_tagger_sp_set_copy_pipe: 2A2F */ - -#endif -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GDC_BASE -#define HIVE_MEM_GDC_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_GDC_BASE 0x44 -#define HIVE_SIZE_GDC_BASE 8 -#else -#endif -#endif -#define HIVE_MEM_sp_GDC_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_GDC_BASE 0x44 -#define HIVE_SIZE_sp_GDC_BASE 8 - -#ifndef ISP2401 -/* function ia_css_queue_local_init: 4C0E */ -#else -/* function ia_css_queue_local_init: 4E6C */ -#endif - -#ifndef ISP2401 -/* function sp_event_proxy_callout_func: 6988 */ -#else -/* function sp_event_proxy_callout_func: 6AFB */ -#endif - -#ifndef ISP2401 -/* function qos_scheduler_schedule_stage: 65C1 */ -#else -/* function qos_scheduler_schedule_stage: 670F */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_thread_sp_num_ready_threads -#define HIVE_MEM_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x49E0 -#else -#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x4A40 -#endif -#define HIVE_SIZE_ia_css_thread_sp_num_ready_threads 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x49E0 -#else -#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x4A40 -#endif -#define HIVE_SIZE_sp_ia_css_thread_sp_num_ready_threads 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_stack_size -#define HIVE_MEM_sp_threads_stack_size scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_stack_size 0x180 -#define HIVE_SIZE_sp_threads_stack_size 28 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_stack_size scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_stack_size 0x180 -#define HIVE_SIZE_sp_sp_threads_stack_size 28 - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_isp_done_row_striping: 3F2F */ -#else -/* function ia_css_ispctrl_sp_isp_done_row_striping: 4172 */ -#endif - -#ifndef ISP2401 -/* function __ia_css_isys_sp_isr_text: 5E09 */ -#else -/* function __ia_css_isys_sp_isr_text: 5F44 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_dequeue: 4A8C */ -#else -/* function ia_css_queue_dequeue: 4CEA */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_configure_channel: 6E4C */ -#else -/* function is_qos_standalone_mode: 66EA */ - -/* function ia_css_dmaproxy_sp_configure_channel: 6F90 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_current_thread_fiber_sp -#define HIVE_MEM_current_thread_fiber_sp scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_current_thread_fiber_sp 0x49E8 -#else -#define HIVE_ADDR_current_thread_fiber_sp 0x4A44 -#endif -#define HIVE_SIZE_current_thread_fiber_sp 4 -#else -#endif -#endif -#define HIVE_MEM_sp_current_thread_fiber_sp scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_current_thread_fiber_sp 0x49E8 -#else -#define HIVE_ADDR_sp_current_thread_fiber_sp 0x4A44 -#endif -#define HIVE_SIZE_sp_current_thread_fiber_sp 4 - -#ifndef ISP2401 -/* function ia_css_circbuf_pop: FD8 */ -#else -/* function ia_css_circbuf_pop: FCD */ -#endif - -#ifndef ISP2401 -/* function memset: 693A */ -#else -/* function memset: 6AAD */ -#endif - -/* function irq_raise_set_token: B6 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GPIO_BASE -#define HIVE_MEM_GPIO_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_GPIO_BASE 0x3C -#define HIVE_SIZE_GPIO_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_GPIO_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_GPIO_BASE 0x3C -#define HIVE_SIZE_sp_GPIO_BASE 4 - -#ifndef ISP2401 -/* function ia_css_pipeline_acc_stage_enable: 17D7 */ -#else -/* function ia_css_pipeline_acc_stage_enable: 17FF */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_unlock_exp_id: 2028 */ -#else -/* function ia_css_tagger_sp_unlock_exp_id: 2078 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_ph -#define HIVE_MEM_isp_ph scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_ph 0x62E4 -#else -#define HIVE_ADDR_isp_ph 0x6340 -#endif -#define HIVE_SIZE_isp_ph 28 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_ph scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_ph 0x62E4 -#else -#define HIVE_ADDR_sp_isp_ph 0x6340 -#endif -#define HIVE_SIZE_sp_isp_ph 28 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_flush: 6009 */ -#else -/* function ia_css_isys_sp_token_map_flush: 6144 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_ds: 37B2 */ -#else -/* function ia_css_ispctrl_sp_init_ds: 39E1 */ -#endif - -#ifndef ISP2401 -/* function get_xmem_base_addr_raw: 3B5F */ -#else -/* function get_xmem_base_addr_raw: 3D9A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_param -#define HIVE_MEM_sp_all_cbs_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_param 0x4790 -#else -#define HIVE_ADDR_sp_all_cbs_param 0x47D8 -#endif -#define HIVE_SIZE_sp_all_cbs_param 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_param 0x4790 -#else -#define HIVE_ADDR_sp_sp_all_cbs_param 0x47D8 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_param 16 - -#ifndef ISP2401 -/* function ia_css_circbuf_create: 1026 */ -#else -/* function ia_css_circbuf_create: 101B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp_group -#define HIVE_MEM_sem_for_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp_group 0x47A0 -#else -#define HIVE_ADDR_sem_for_sp_group 0x47E8 -#endif -#define HIVE_SIZE_sem_for_sp_group 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp_group 0x47A0 -#else -#define HIVE_ADDR_sp_sem_for_sp_group 0x47E8 -#endif -#define HIVE_SIZE_sp_sem_for_sp_group 20 - -#ifndef ISP2401 -/* function ia_css_framebuf_sp_wait_for_in_frame: 64DF */ -#else -/* function __ia_css_dmaproxy_sp_configure_channel_text: 34D7 */ - -/* function ia_css_framebuf_sp_wait_for_in_frame: 661A */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_tag_frame: 556F */ -#else -/* function ia_css_sp_rawcopy_tag_frame: 57B0 */ -#endif - -#ifndef ISP2401 -/* function isp_hmem_clear: B25 */ -#else -/* function isp_hmem_clear: B1F */ -#endif - -#ifndef ISP2401 -/* function ia_css_framebuf_sp_release_in_frame: 6522 */ -#else -/* function ia_css_framebuf_sp_release_in_frame: 665D */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_snd_acquire_request: 5A5F */ -#else -/* function ia_css_isys_sp_backend_snd_acquire_request: 5B9A */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_is_full: 5E90 */ -#else -/* function ia_css_isys_sp_token_map_is_full: 5FCB */ -#endif - -#ifndef ISP2401 -/* function input_system_acquisition_run: AF9 */ -#else -/* function input_system_acquisition_run: AF3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_start_binary: 3631 */ -#else -/* function ia_css_ispctrl_sp_start_binary: 3833 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs -#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4 -#else -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20 - -#ifndef ISP2401 -/* function ia_css_eventq_sp_recv: 34C7 */ -#else -/* function ia_css_eventq_sp_recv: 36BB */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_pool -#define HIVE_MEM_isp_pool scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_pool 0x2E8 -#else -#define HIVE_ADDR_isp_pool 0x300 -#endif -#define HIVE_SIZE_isp_pool 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_pool scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_pool 0x2E8 -#else -#define HIVE_ADDR_sp_isp_pool 0x300 -#endif -#define HIVE_SIZE_sp_isp_pool 4 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_rel_gen: 6211 */ -#else -/* function ia_css_rmgr_sp_rel_gen: 634C */ - -/* function ia_css_tagger_sp_unblock_clients: 2900 */ -#endif - -#ifndef ISP2401 -/* function css_get_frame_processing_time_end: 1FA7 */ -#else -/* function css_get_frame_processing_time_end: 1FF7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_any_pending_mask -#define HIVE_MEM_event_any_pending_mask scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_event_any_pending_mask 0x300 -#else -#define HIVE_ADDR_event_any_pending_mask 0x318 -#endif -#define HIVE_SIZE_event_any_pending_mask 8 -#else -#endif -#endif -#define HIVE_MEM_sp_event_any_pending_mask scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_event_any_pending_mask 0x300 -#else -#define HIVE_ADDR_sp_event_any_pending_mask 0x318 -#endif -#define HIVE_SIZE_sp_event_any_pending_mask 8 - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_push: 5A16 */ -#else -/* function ia_css_isys_sp_backend_push: 5B51 */ -#endif - -/* function sh_css_decode_tag_descr: 352 */ - -/* function debug_enqueue_isp: 27B */ - -#ifndef ISP2401 -/* function qos_scheduler_update_stage_budget: 65AF */ -#else -/* function qos_scheduler_update_stage_budget: 66F2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_uninit: 5951 */ -#else -/* function ia_css_spctrl_sp_uninit: 5A8C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_HIVE_IF_SWITCH_CODE -#define HIVE_MEM_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem -#define HIVE_ADDR_HIVE_IF_SWITCH_CODE 0x1D8 -#define HIVE_SIZE_HIVE_IF_SWITCH_CODE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_HIVE_IF_SWITCH_CODE 0x1D8 -#define HIVE_SIZE_sp_HIVE_IF_SWITCH_CODE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_dis_bufs 140 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_dis_bufs 140 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_lock_from_start: 2AD9 */ -#else -/* function ia_css_tagger_buf_sp_lock_from_start: 2C7B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_isp_idle -#define HIVE_MEM_sem_for_isp_idle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_isp_idle 0x47B4 -#else -#define HIVE_ADDR_sem_for_isp_idle 0x47FC -#endif -#define HIVE_SIZE_sem_for_isp_idle 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_isp_idle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_isp_idle 0x47B4 -#else -#define HIVE_ADDR_sp_sem_for_isp_idle 0x47FC -#endif -#define HIVE_SIZE_sp_sem_for_isp_idle 20 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_write_byte_addr: 31E6 */ -#else -/* function ia_css_dmaproxy_sp_write_byte_addr: 33C5 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init: 315D */ -#else -/* function ia_css_dmaproxy_sp_init: 333C */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2D62 */ -#else -/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2F04 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_VAMEM_BASE -#define HIVE_MEM_ISP_VAMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_VAMEM_BASE 0x14 -#define HIVE_SIZE_ISP_VAMEM_BASE 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_VAMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_VAMEM_BASE 0x14 -#define HIVE_SIZE_sp_ISP_VAMEM_BASE 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_rawcopy_sp_tagger -#define HIVE_MEM_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x6294 -#else -#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x62F0 -#endif -#define HIVE_SIZE_ia_css_rawcopy_sp_tagger 24 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x6294 -#else -#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x62F0 -#endif -#define HIVE_SIZE_sp_ia_css_rawcopy_sp_tagger 24 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x5994 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_exp_ids 70 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x5994 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_exp_ids 70 - -#ifndef ISP2401 -/* function ia_css_queue_item_load: 4D00 */ -#else -/* function ia_css_queue_item_load: 4F5E */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_get_state: 593C */ -#else -/* function ia_css_spctrl_sp_get_state: 5A77 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_uninit: 6026 */ -#else -/* function ia_css_isys_sp_token_map_uninit: 6161 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_callout_sp_thread -#define HIVE_MEM_callout_sp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_callout_sp_thread 0x49DC -#else -#define HIVE_ADDR_callout_sp_thread 0x1E0 -#endif -#define HIVE_SIZE_callout_sp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_callout_sp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_callout_sp_thread 0x49DC -#else -#define HIVE_ADDR_sp_callout_sp_thread 0x1E0 -#endif -#define HIVE_SIZE_sp_callout_sp_thread 4 - -#ifndef ISP2401 -/* function thread_fiber_sp_init: E2F */ -#else -/* function thread_fiber_sp_init: E24 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_SP_PMEM_BASE -#define HIVE_MEM_SP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_SP_PMEM_BASE 0x0 -#define HIVE_SIZE_SP_PMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_SP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_SP_PMEM_BASE 0x0 -#define HIVE_SIZE_sp_SP_PMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_snd_acquire_req: 5F96 */ -#else -/* function ia_css_isys_sp_token_map_snd_acquire_req: 60D1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_input_stream_format -#define HIVE_MEM_sp_isp_input_stream_format scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_input_stream_format 0x40F8 -#else -#define HIVE_ADDR_sp_isp_input_stream_format 0x4118 -#endif -#define HIVE_SIZE_sp_isp_input_stream_format 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_input_stream_format scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x40F8 -#else -#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x4118 -#endif -#define HIVE_SIZE_sp_sp_isp_input_stream_format 20 - -#ifndef ISP2401 -/* function __mod: 68A7 */ -#else -/* function __mod: 6A1A */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_dmem_channel: 3247 */ -#else -/* function ia_css_dmaproxy_sp_init_dmem_channel: 3426 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_join: CFF */ -#else -/* function ia_css_thread_sp_join: CF4 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_add_command: 6F4F */ -#else -/* function ia_css_dmaproxy_sp_add_command: 7082 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_metadata_thread_func: 57F0 */ -#else -/* function ia_css_sp_metadata_thread_func: 594F */ -#endif - -#ifndef ISP2401 -/* function __sp_event_proxy_func_critical: 6975 */ -#else -/* function __sp_event_proxy_func_critical: 6AE8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_metadata_wait: 5903 */ -#else -/* function ia_css_sp_metadata_wait: 5A3E */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_peek_from_start: F08 */ -#else -/* function ia_css_circbuf_peek_from_start: EFD */ -#endif - -#ifndef ISP2401 -/* function ia_css_event_sp_encode: 3552 */ -#else -/* function ia_css_event_sp_encode: 3746 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_run: D72 */ -#else -/* function ia_css_thread_sp_run: D67 */ -#endif - -#ifndef ISP2401 -/* function sp_isys_copy_func: 6F6 */ -#else -/* function sp_isys_copy_func: 68A */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_flush: 5A7F */ -#else -/* function ia_css_isys_sp_backend_flush: 5BBA */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_frame_exists: 599B */ -#else -/* function ia_css_isys_sp_backend_frame_exists: 5AD6 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_init_isp_memories: 4789 */ -#else -/* function ia_css_sp_isp_param_init_isp_memories: 4A11 */ -#endif - -#ifndef ISP2401 -/* function register_isr: 8A9 */ -#else -/* function register_isr: 83D */ -#endif - -/* function irq_raise: C8 */ - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_mmu_invalidate: 3124 */ -#else -/* function ia_css_dmaproxy_sp_mmu_invalidate: 32CC */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_HIVE_IF_SRST_ADDRESS -#define HIVE_MEM_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem -#define HIVE_ADDR_HIVE_IF_SRST_ADDRESS 0x1B8 -#define HIVE_SIZE_HIVE_IF_SRST_ADDRESS 16 -#else -#endif -#endif -#define HIVE_MEM_sp_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem -#define HIVE_ADDR_sp_HIVE_IF_SRST_ADDRESS 0x1B8 -#define HIVE_SIZE_sp_HIVE_IF_SRST_ADDRESS 16 - -#ifndef ISP2401 -/* function pipeline_sp_initialize_stage: 190B */ -#else -/* function pipeline_sp_initialize_stage: 1945 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_isys_sp_frontend_states -#define HIVE_MEM_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x62C8 -#else -#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x6324 -#endif -#define HIVE_SIZE_ia_css_isys_sp_frontend_states 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x62C8 -#else -#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x6324 -#endif -#define HIVE_SIZE_sp_ia_css_isys_sp_frontend_states 12 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6E1E */ -#else -/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6F62 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_done_ds: 3799 */ -#else -/* function ia_css_ispctrl_sp_done_ds: 39C8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_get_mem_inits: 4764 */ -#else -/* function ia_css_sp_isp_param_get_mem_inits: 49EC */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_init_buffer_queues: 13D0 */ -#else -/* function ia_css_parambuf_sp_init_buffer_queues: 13F1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_pfp_spref -#define HIVE_MEM_vbuf_pfp_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_pfp_spref 0x2F0 -#else -#define HIVE_ADDR_vbuf_pfp_spref 0x308 -#endif -#define HIVE_SIZE_vbuf_pfp_spref 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_pfp_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_pfp_spref 0x2F0 -#else -#define HIVE_ADDR_sp_vbuf_pfp_spref 0x308 -#endif -#define HIVE_SIZE_sp_vbuf_pfp_spref 4 - -#ifndef ISP2401 -/* function input_system_cfg: ABB */ -#else -/* function input_system_cfg: AB5 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_HMEM_BASE -#define HIVE_MEM_ISP_HMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_HMEM_BASE 0x20 -#define HIVE_SIZE_ISP_HMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_HMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_HMEM_BASE 0x20 -#define HIVE_SIZE_sp_ISP_HMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_frames -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x59DC -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x5A38 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_frames 280 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x59DC -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x5A38 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_frames 280 - -#ifndef ISP2401 -/* function qos_scheduler_init_stage_budget: 65E8 */ -#else -/* function qos_scheduler_init_stage_budget: 6750 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_release: 5AF4 */ -#else -/* function ia_css_isys_sp_backend_release: 5C2F */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_destroy: 5B1E */ -#else -/* function ia_css_isys_sp_backend_destroy: 5C59 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_buffer_queue_handle -#define HIVE_MEM_sp2host_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5AF4 -#else -#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5B50 -#endif -#define HIVE_SIZE_sp2host_buffer_queue_handle 96 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5AF4 -#else -#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5B50 -#endif -#define HIVE_SIZE_sp_sp2host_buffer_queue_handle 96 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 5F5A */ -#else -/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 6095 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_isp_vars: 4483 */ -#else -/* function ia_css_ispctrl_sp_init_isp_vars: 46DE */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5B70 */ -#else -/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5CAB */ -#endif - -#ifndef ISP2401 -/* function sp_warning: 8DC */ -#else -/* function sp_warning: 870 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_vbuf_enqueue: 6304 */ -#else -/* function ia_css_rmgr_sp_vbuf_enqueue: 643F */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_tag_exp_id: 2142 */ -#else -/* function ia_css_tagger_sp_tag_exp_id: 2192 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_write: 31FD */ -#else -/* function ia_css_dmaproxy_sp_write: 33DC */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_release_in_param: 1250 */ -#else -/* function ia_css_parambuf_sp_release_in_param: 1245 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_irq_sw_interrupt_token -#define HIVE_MEM_irq_sw_interrupt_token scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_irq_sw_interrupt_token 0x40F4 -#else -#define HIVE_ADDR_irq_sw_interrupt_token 0x4114 -#endif -#define HIVE_SIZE_irq_sw_interrupt_token 4 -#else -#endif -#endif -#define HIVE_MEM_sp_irq_sw_interrupt_token scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x40F4 -#else -#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x4114 -#endif -#define HIVE_SIZE_sp_irq_sw_interrupt_token 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_addresses -#define HIVE_MEM_sp_isp_addresses scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_addresses 0x5F44 -#else -#define HIVE_ADDR_sp_isp_addresses 0x5FA4 -#endif -#define HIVE_SIZE_sp_isp_addresses 172 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_addresses scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_addresses 0x5F44 -#else -#define HIVE_ADDR_sp_sp_isp_addresses 0x5FA4 -#endif -#define HIVE_SIZE_sp_sp_isp_addresses 172 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_acq_gen: 6229 */ -#else -/* function ia_css_rmgr_sp_acq_gen: 6364 */ -#endif - -#ifndef ISP2401 -/* function receiver_reg_load: AD0 */ -#else -/* function receiver_reg_load: ACA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isps -#define HIVE_MEM_isps scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isps 0x6300 -#else -#define HIVE_ADDR_isps 0x635C -#endif -#define HIVE_SIZE_isps 28 -#else -#endif -#endif -#define HIVE_MEM_sp_isps scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isps 0x6300 -#else -#define HIVE_ADDR_sp_isps 0x635C -#endif -#define HIVE_SIZE_sp_isps 28 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host_sp_queues_initialized -#define HIVE_MEM_host_sp_queues_initialized scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host_sp_queues_initialized 0x410C -#else -#define HIVE_ADDR_host_sp_queues_initialized 0x412C -#endif -#define HIVE_SIZE_host_sp_queues_initialized 4 -#else -#endif -#endif -#define HIVE_MEM_sp_host_sp_queues_initialized scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host_sp_queues_initialized 0x410C -#else -#define HIVE_ADDR_sp_host_sp_queues_initialized 0x412C -#endif -#define HIVE_SIZE_sp_host_sp_queues_initialized 4 - -#ifndef ISP2401 -/* function ia_css_queue_uninit: 4BCC */ -#else -/* function ia_css_queue_uninit: 4E2A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_ispctrl_sp_isp_started -#define HIVE_MEM_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5BFC -#else -#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5C58 -#endif -#define HIVE_SIZE_ia_css_ispctrl_sp_isp_started 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5BFC -#else -#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5C58 -#endif -#define HIVE_SIZE_sp_ia_css_ispctrl_sp_isp_started 4 - -#ifndef ISP2401 -/* function ia_css_bufq_sp_release_dynamic_buf: 2DCE */ -#else -/* function ia_css_bufq_sp_release_dynamic_buf: 2F70 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_height_exception: 32F5 */ -#else -/* function ia_css_dmaproxy_sp_set_height_exception: 34E9 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_vmem_channel: 327A */ -#else -/* function ia_css_dmaproxy_sp_init_vmem_channel: 345A */ -#endif - -#ifndef ISP2401 -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_num_ready_threads -#define HIVE_MEM_num_ready_threads scalar_processor_2400_dmem -#define HIVE_ADDR_num_ready_threads 0x49E4 -#define HIVE_SIZE_num_ready_threads 4 -#else -#endif -#endif -#define HIVE_MEM_sp_num_ready_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_num_ready_threads 0x49E4 -#define HIVE_SIZE_sp_num_ready_threads 4 - -/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 31CF */ -#else -/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 33AE */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_spref -#define HIVE_MEM_vbuf_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_spref 0x2EC -#else -#define HIVE_ADDR_vbuf_spref 0x304 -#endif -#define HIVE_SIZE_vbuf_spref 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_spref 0x2EC -#else -#define HIVE_ADDR_sp_vbuf_spref 0x304 -#endif -#define HIVE_SIZE_sp_vbuf_spref 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_metadata_thread -#define HIVE_MEM_sp_metadata_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_metadata_thread 0x4998 -#define HIVE_SIZE_sp_metadata_thread 68 -#else -#define HIVE_ADDR_sp_metadata_thread 0x49F8 -#define HIVE_SIZE_sp_metadata_thread 72 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_metadata_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_metadata_thread 0x4998 -#define HIVE_SIZE_sp_sp_metadata_thread 68 -#else -#define HIVE_ADDR_sp_sp_metadata_thread 0x49F8 -#define HIVE_SIZE_sp_sp_metadata_thread 72 -#endif - -#ifndef ISP2401 -/* function ia_css_queue_enqueue: 4B16 */ -#else -/* function ia_css_queue_enqueue: 4D74 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_request -#define HIVE_MEM_ia_css_flash_sp_request scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_request 0x4A98 -#else -#define HIVE_ADDR_ia_css_flash_sp_request 0x4AF4 -#endif -#define HIVE_SIZE_ia_css_flash_sp_request 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_request scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4A98 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4AF4 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_request 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_vmem_write: 31A0 */ -#else -/* function ia_css_dmaproxy_sp_vmem_write: 337F */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_tagger_frames -#define HIVE_MEM_tagger_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_tagger_frames 0x49EC -#else -#define HIVE_ADDR_tagger_frames 0x4A48 -#endif -#define HIVE_SIZE_tagger_frames 168 -#else -#endif -#endif -#define HIVE_MEM_sp_tagger_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_tagger_frames 0x49EC -#else -#define HIVE_ADDR_sp_tagger_frames 0x4A48 -#endif -#define HIVE_SIZE_sp_tagger_frames 168 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_snd_capture_req: 5FB8 */ -#else -/* function ia_css_isys_sp_token_map_snd_capture_req: 60F3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_if -#define HIVE_MEM_sem_for_reading_if scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_if 0x47C8 -#else -#define HIVE_ADDR_sem_for_reading_if 0x4810 -#endif -#define HIVE_SIZE_sem_for_reading_if 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_if scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_if 0x47C8 -#else -#define HIVE_ADDR_sp_sem_for_reading_if 0x4810 -#endif -#define HIVE_SIZE_sp_sem_for_reading_if 20 - -#ifndef ISP2401 -/* function sp_generate_interrupts: 95B */ -#else -/* function sp_generate_interrupts: 8EF */ - -/* function ia_css_pipeline_sp_start: 1858 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_start: 181E */ -#else -/* function ia_css_thread_default_callout: 6BE3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_init: 50F3 */ -#else -/* function ia_css_sp_rawcopy_init: 5351 */ -#endif - -#ifndef ISP2401 -/* function tmr_clock_read: 6596 */ -#else -/* function tmr_clock_read: 66D1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_BAMEM_BASE -#define HIVE_MEM_ISP_BAMEM_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ISP_BAMEM_BASE 0x2F8 -#else -#define HIVE_ADDR_ISP_BAMEM_BASE 0x310 -#endif -#define HIVE_SIZE_ISP_BAMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_BAMEM_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x2F8 -#else -#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x310 -#endif -#define HIVE_SIZE_sp_ISP_BAMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5C1F */ -#else -/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5D5A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues -#define HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54 -#else -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160 - -#ifndef ISP2401 -/* function css_get_frame_processing_time_start: 1FAF */ -#else -/* function css_get_frame_processing_time_start: 1FFF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_frame -#define HIVE_MEM_sp_all_cbs_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_frame 0x47DC -#else -#define HIVE_ADDR_sp_all_cbs_frame 0x4824 -#endif -#define HIVE_SIZE_sp_all_cbs_frame 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_frame 0x47DC -#else -#define HIVE_ADDR_sp_sp_all_cbs_frame 0x4824 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_frame 16 - -#ifndef ISP2401 -/* function thread_sp_queue_print: D8F */ -#else -/* function thread_sp_queue_print: D84 */ -#endif - -#ifndef ISP2401 -/* function sp_notify_eof: 907 */ -#else -/* function sp_notify_eof: 89B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_str2mem -#define HIVE_MEM_sem_for_str2mem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_str2mem 0x47EC -#else -#define HIVE_ADDR_sem_for_str2mem 0x4834 -#endif -#define HIVE_SIZE_sem_for_str2mem 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_str2mem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_str2mem 0x47EC -#else -#define HIVE_ADDR_sp_sem_for_str2mem 0x4834 -#endif -#define HIVE_SIZE_sp_sem_for_str2mem 20 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_is_marked_from_start: 2B41 */ -#else -/* function ia_css_tagger_buf_sp_is_marked_from_start: 2CE3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_acquire_dynamic_buf: 2F86 */ -#else -/* function ia_css_bufq_sp_acquire_dynamic_buf: 3128 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_destroy: 101D */ -#else -/* function ia_css_circbuf_destroy: 1012 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_PMEM_BASE -#define HIVE_MEM_ISP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_PMEM_BASE 0xC -#define HIVE_SIZE_ISP_PMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_PMEM_BASE 0xC -#define HIVE_SIZE_sp_ISP_PMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_mem_load: 46F7 */ -#else -/* function ia_css_sp_isp_param_mem_load: 497F */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_from_start: 292D */ -#else -/* function ia_css_tagger_buf_sp_pop_from_start: 2ACF */ -#endif - -#ifndef ISP2401 -/* function __div: 685F */ -#else -/* function __div: 69D2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_create: 5DF0 */ -#else -/* function ia_css_isys_sp_frontend_create: 5F2B */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_release_vbuf: 6323 */ -#else -/* function ia_css_rmgr_sp_refcount_release_vbuf: 645E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_in_use -#define HIVE_MEM_ia_css_flash_sp_in_use scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4A9C -#else -#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4AF8 -#endif -#define HIVE_SIZE_ia_css_flash_sp_in_use 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_in_use scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4A9C -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4AF8 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_in_use 4 - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_wait: 6B42 */ -#else -/* function ia_css_thread_sem_sp_wait: 6CB7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_sleep_mode -#define HIVE_MEM_sp_sleep_mode scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sleep_mode 0x4110 -#else -#define HIVE_ADDR_sp_sleep_mode 0x4130 -#endif -#define HIVE_SIZE_sp_sleep_mode 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_sleep_mode scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_sleep_mode 0x4110 -#else -#define HIVE_ADDR_sp_sp_sleep_mode 0x4130 -#endif -#define HIVE_SIZE_sp_sp_sleep_mode 4 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_push: 2A3C */ -#else -/* function ia_css_tagger_buf_sp_push: 2BDE */ -#endif - -/* function mmu_invalidate_cache: D3 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_max_cb_elems -#define HIVE_MEM_sp_max_cb_elems scalar_processor_2400_dmem -#define HIVE_ADDR_sp_max_cb_elems 0x148 -#define HIVE_SIZE_sp_max_cb_elems 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_max_cb_elems scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_max_cb_elems 0x148 -#define HIVE_SIZE_sp_sp_max_cb_elems 8 - -#ifndef ISP2401 -/* function ia_css_queue_remote_init: 4BEE */ -#else -/* function ia_css_queue_remote_init: 4E4C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_stop_req -#define HIVE_MEM_isp_stop_req scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_stop_req 0x4680 -#else -#define HIVE_ADDR_isp_stop_req 0x46C8 -#endif -#define HIVE_SIZE_isp_stop_req 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_stop_req scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_stop_req 0x4680 -#else -#define HIVE_ADDR_sp_isp_stop_req 0x46C8 -#endif -#define HIVE_SIZE_sp_isp_stop_req 4 - -#ifndef ISP2401 -#define HIVE_ICACHE_sp_critical_SEGMENT_START 0 -#define HIVE_ICACHE_sp_critical_NUM_SEGMENTS 1 -#endif - -#endif /* _sp_map_h_ */ -#ifndef ISP2401 -extern void sh_css_dump_sp_dmem(void); -void sh_css_dump_sp_dmem(void) -{ -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h deleted file mode 100644 index 146a578b7c74..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __CSI_RX_GLOBAL_H_INCLUDED__ -#define __CSI_RX_GLOBAL_H_INCLUDED__ - -#include - -typedef enum { - CSI_MIPI_PACKET_TYPE_UNDEFINED = 0, - CSI_MIPI_PACKET_TYPE_LONG, - CSI_MIPI_PACKET_TYPE_SHORT, - CSI_MIPI_PACKET_TYPE_RESERVED, - N_CSI_MIPI_PACKET_TYPE -} csi_mipi_packet_type_t; - -typedef struct csi_rx_backend_lut_entry_s csi_rx_backend_lut_entry_t; -struct csi_rx_backend_lut_entry_s { - uint32_t long_packet_entry; - uint32_t short_packet_entry; -}; - -typedef struct csi_rx_backend_cfg_s csi_rx_backend_cfg_t; -struct csi_rx_backend_cfg_s { - /* LUT entry for the packet */ - csi_rx_backend_lut_entry_t lut_entry; - - /* can be derived from the Data Type */ - csi_mipi_packet_type_t csi_mipi_packet_type; - - struct { - bool comp_enable; - uint32_t virtual_channel; - uint32_t data_type; - uint32_t comp_scheme; - uint32_t comp_predictor; - uint32_t comp_bit_idx; - } csi_mipi_cfg; -}; - -typedef struct csi_rx_frontend_cfg_s csi_rx_frontend_cfg_t; -struct csi_rx_frontend_cfg_s { - uint32_t active_lanes; -}; - -extern const uint32_t N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID]; -extern const uint32_t N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID]; -extern const uint32_t N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID]; -/* sid_width for CSI_RX_BACKEND_ID */ -extern const uint32_t N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID]; - -#endif /* __CSI_RX_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c deleted file mode 100644 index 325b821f276c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_pipeline.h" -#include "ia_css_isp_configs.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_iterator( - const struct ia_css_binary *binary, - const struct ia_css_iterator_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.iterator.size; - offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset; - } - if (size) { - ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_copy_output( - const struct ia_css_binary *binary, - const struct ia_css_copy_output_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size; - offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset; - } - if (size) { - ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_crop( - const struct ia_css_binary *binary, - const struct ia_css_crop_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.crop.size; - offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset; - } - if (size) { - ia_css_crop_config((struct sh_css_isp_crop_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_fpn( - const struct ia_css_binary *binary, - const struct ia_css_fpn_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.fpn.size; - offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset; - } - if (size) { - ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_dvs( - const struct ia_css_binary *binary, - const struct ia_css_dvs_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.dvs.size; - offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset; - } - if (size) { - ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_qplane( - const struct ia_css_binary *binary, - const struct ia_css_qplane_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.qplane.size; - offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset; - } - if (size) { - ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output0( - const struct ia_css_binary *binary, - const struct ia_css_output0_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output0.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset; - } - if (size) { - ia_css_output0_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output1( - const struct ia_css_binary *binary, - const struct ia_css_output1_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output1.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset; - } - if (size) { - ia_css_output1_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output( - const struct ia_css_binary *binary, - const struct ia_css_output_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output.offset; - } - if (size) { - ia_css_output_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ -#ifdef ISP2401 - -void -ia_css_configure_sc( - const struct ia_css_binary *binary, - const struct ia_css_sc_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.sc.size; - offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset; - } - if (size) { - ia_css_sc_config((struct sh_css_isp_sc_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ -#endif - -void -ia_css_configure_raw( - const struct ia_css_binary *binary, - const struct ia_css_raw_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.raw.size; - offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset; - } - if (size) { - ia_css_raw_config((struct sh_css_isp_raw_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_tnr( - const struct ia_css_binary *binary, - const struct ia_css_tnr_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.tnr.size; - offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset; - } - if (size) { - ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_ref( - const struct ia_css_binary *binary, - const struct ia_css_ref_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.ref.size; - offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset; - } - if (size) { - ia_css_ref_config((struct sh_css_isp_ref_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_vf( - const struct ia_css_binary *binary, - const struct ia_css_vf_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.vf.size; - offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset; - } - if (size) { - ia_css_vf_config((struct sh_css_isp_vf_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() leave:\n"); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h deleted file mode 100644 index 8aacd3dbc05a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifdef IA_CSS_INCLUDE_CONFIGURATIONS -#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h" -#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h" -#include "isp/kernels/output/output_1.0/ia_css_output.host.h" -#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h" -#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h" -#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h" -#ifdef ISP2401 -#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h" -#endif -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h" -#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h" -#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h" -#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */ -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_CONFIG_H -#define _IA_CSS_ISP_CONFIG_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_configuration_ids { - IA_CSS_ITERATOR_CONFIG_ID, - IA_CSS_COPY_OUTPUT_CONFIG_ID, - IA_CSS_CROP_CONFIG_ID, - IA_CSS_FPN_CONFIG_ID, - IA_CSS_DVS_CONFIG_ID, - IA_CSS_QPLANE_CONFIG_ID, - IA_CSS_OUTPUT0_CONFIG_ID, - IA_CSS_OUTPUT1_CONFIG_ID, - IA_CSS_OUTPUT_CONFIG_ID, -#ifdef ISP2401 - IA_CSS_SC_CONFIG_ID, -#endif - IA_CSS_RAW_CONFIG_ID, - IA_CSS_TNR_CONFIG_ID, - IA_CSS_REF_CONFIG_ID, - IA_CSS_VF_CONFIG_ID, - IA_CSS_NUM_CONFIGURATION_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_config_memory_offsets { - struct { - struct ia_css_isp_parameter iterator; - struct ia_css_isp_parameter copy_output; - struct ia_css_isp_parameter crop; - struct ia_css_isp_parameter fpn; - struct ia_css_isp_parameter dvs; - struct ia_css_isp_parameter qplane; - struct ia_css_isp_parameter output0; - struct ia_css_isp_parameter output1; - struct ia_css_isp_parameter output; -#ifdef ISP2401 - struct ia_css_isp_parameter sc; -#endif - struct ia_css_isp_parameter raw; - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter ref; - struct ia_css_isp_parameter vf; - } dmem; -}; - -#if defined(IA_CSS_INCLUDE_CONFIGURATIONS) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_iterator( - const struct ia_css_binary *binary, - const struct ia_css_iterator_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_copy_output( - const struct ia_css_binary *binary, - const struct ia_css_copy_output_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_crop( - const struct ia_css_binary *binary, - const struct ia_css_crop_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_fpn( - const struct ia_css_binary *binary, - const struct ia_css_fpn_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_dvs( - const struct ia_css_binary *binary, - const struct ia_css_dvs_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_qplane( - const struct ia_css_binary *binary, - const struct ia_css_qplane_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output0( - const struct ia_css_binary *binary, - const struct ia_css_output0_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output1( - const struct ia_css_binary *binary, - const struct ia_css_output1_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output( - const struct ia_css_binary *binary, - const struct ia_css_output_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -#ifdef ISP2401 -void -ia_css_configure_sc( - const struct ia_css_binary *binary, - const struct ia_css_sc_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -#endif -void -ia_css_configure_raw( - const struct ia_css_binary *binary, - const struct ia_css_raw_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_tnr( - const struct ia_css_binary *binary, - const struct ia_css_tnr_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_ref( - const struct ia_css_binary *binary, - const struct ia_css_ref_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_vf( - const struct ia_css_binary *binary, - const struct ia_css_vf_configuration *config_dmem); - -#endif /* IA_CSS_INCLUDE_CONFIGURATION */ - -#endif /* _IA_CSS_ISP_CONFIG_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c deleted file mode 100644 index 11e4463ebb50..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c +++ /dev/null @@ -1,3220 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#define IA_CSS_INCLUDE_PARAMETERS -#include "sh_css_params.h" -#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h" -#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h" -#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h" -#include "isp/kernels/bh/bh_2/ia_css_bh.host.h" -#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h" -#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h" -#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h" -#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h" -#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h" -#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h" -#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h" -#include "isp/kernels/de/de_1.0/ia_css_de.host.h" -#include "isp/kernels/de/de_2/ia_css_de2.host.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h" -#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h" -#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h" -#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h" -#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h" -#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h" -#include "isp/kernels/ob/ob2/ia_css_ob2.host.h" -#include "isp/kernels/output/output_1.0/ia_css_output.host.h" -#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h" -#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h" -#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h" -#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h" -#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h" -#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h" -#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h" -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h" -#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h" -#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h" -#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h" -#include "isp/kernels/dpc2/ia_css_dpc2.host.h" -#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h" -#include "isp/kernels/bnlm/ia_css_bnlm.host.h" -#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h" -/* Generated code: do not edit or commmit. */ - -#include "ia_css_pipeline.h" -#include "ia_css_isp_params.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_aa( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.aa.size; - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset; - - if (size) { - struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - t->strength = params->aa_config.strength; - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_anr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.anr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() enter:\n"); - - ia_css_anr_encode((struct sh_css_isp_anr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->anr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_anr2( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() enter:\n"); - - ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->anr_thres, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bh( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bh.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n"); - - ia_css_bh_encode((struct sh_css_isp_bh_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->s3a_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n"); - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_cnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() enter:\n"); - - ia_css_cnr_encode((struct sh_css_isp_cnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->cnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_crop( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.crop.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() enter:\n"); - - ia_css_crop_encode((struct sh_css_isp_crop_isp_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->crop_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_csc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.csc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() enter:\n"); - - ia_css_csc_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_dp( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n"); - - ia_css_dp_encode((struct sh_css_isp_dp_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dp_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() enter:\n"); - - ia_css_bnr_encode((struct sh_css_isp_bnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->nr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_de( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.de.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.de.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n"); - - ia_css_de_encode((struct sh_css_isp_de_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->de_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ecd( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() enter:\n"); - - ia_css_ecd_encode((struct sh_css_isp_ecd_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ecd_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_formats( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.formats.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() enter:\n"); - - ia_css_formats_encode((struct sh_css_isp_formats_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->formats_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_fpn( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() enter:\n"); - - ia_css_fpn_encode((struct sh_css_isp_fpn_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->fpn_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_gc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.gc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n"); - - ia_css_gc_encode((struct sh_css_isp_gc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->gc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n"); - - ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->gc_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ce( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ce.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n"); - - ia_css_ce_encode((struct sh_css_isp_ce_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ce_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_yuv2rgb( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() enter:\n"); - - ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->yuv2rgb_cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_rgb2yuv( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() enter:\n"); - - ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->rgb2yuv_cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_r_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() enter:\n"); - - ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset], - ¶ms->r_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_g_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() enter:\n"); - - ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->g_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_b_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() enter:\n"); - - ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset], - ¶ms->b_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_uds( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.uds.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset; - - if (size) { - struct sh_css_sp_uds_params *p; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() enter:\n"); - - p = (struct sh_css_sp_uds_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - p->crop_pos = params->uds_config.crop_pos; - p->uds = params->uds_config.uds; - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_raa( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.raa.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() enter:\n"); - - ia_css_raa_encode((struct sh_css_isp_aa_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->raa_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_s3a( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() enter:\n"); - - ia_css_s3a_encode((struct sh_css_isp_s3a_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->s3a_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ob( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ob.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n"); - - ia_css_ob_encode((struct sh_css_isp_ob_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ob_config, -¶ms->stream_configs.ob, size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.ob.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n"); - - ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->ob_config, -¶ms->stream_configs.ob, size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_output( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.output.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.output.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() enter:\n"); - - ia_css_output_encode((struct sh_css_isp_output_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->output_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n"); - - ia_css_sc_encode((struct sh_css_isp_sc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->sc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bds( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bds.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset; - - if (size) { - struct sh_css_isp_bds_params *p; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() enter:\n"); - - p = (struct sh_css_isp_bds_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - p->baf_strength = params->bds_config.strength; - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_tnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() enter:\n"); - - ia_css_tnr_encode((struct sh_css_isp_tnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->tnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_macc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.macc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() enter:\n"); - - ia_css_macc_encode((struct sh_css_isp_macc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->macc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_horicoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() enter:\n"); - - ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_vertcoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() enter:\n"); - - ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_horiproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() enter:\n"); - - ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_vertproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() enter:\n"); - - ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_horicoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() enter:\n"); - - ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_vertcoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() enter:\n"); - - ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_horiproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() enter:\n"); - - ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_vertproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() enter:\n"); - - ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_wb( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.wb.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n"); - - ia_css_wb_encode((struct sh_css_isp_wb_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->wb_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_nr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.nr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n"); - - ia_css_nr_encode((struct sh_css_isp_ynr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->nr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_yee( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yee.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() enter:\n"); - - ia_css_yee_encode((struct sh_css_isp_yee_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->yee_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ynr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() enter:\n"); - - ia_css_ynr_encode((struct sh_css_isp_yee2_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ynr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_fc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n"); - - ia_css_fc_encode((struct sh_css_isp_fc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->fc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ctc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n"); - - ia_css_ctc_encode((struct sh_css_isp_ctc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ctc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n"); - - ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset], - ¶ms->ctc_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr_table( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() enter:\n"); - - ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->xnr_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() enter:\n"); - - ia_css_xnr_encode((struct sh_css_isp_xnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->xnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr3( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n"); - - ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->xnr3_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n"); - } - - } -#ifdef ISP2401 - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n"); - - ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->xnr3_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n"); - } - - } -#endif -} - -/* Code generated by genparam/gencode.c:gen_param_process_table() */ - -void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) = { - ia_css_process_aa, - ia_css_process_anr, - ia_css_process_anr2, - ia_css_process_bh, - ia_css_process_cnr, - ia_css_process_crop, - ia_css_process_csc, - ia_css_process_dp, - ia_css_process_bnr, - ia_css_process_de, - ia_css_process_ecd, - ia_css_process_formats, - ia_css_process_fpn, - ia_css_process_gc, - ia_css_process_ce, - ia_css_process_yuv2rgb, - ia_css_process_rgb2yuv, - ia_css_process_r_gamma, - ia_css_process_g_gamma, - ia_css_process_b_gamma, - ia_css_process_uds, - ia_css_process_raa, - ia_css_process_s3a, - ia_css_process_ob, - ia_css_process_output, - ia_css_process_sc, - ia_css_process_bds, - ia_css_process_tnr, - ia_css_process_macc, - ia_css_process_sdis_horicoef, - ia_css_process_sdis_vertcoef, - ia_css_process_sdis_horiproj, - ia_css_process_sdis_vertproj, - ia_css_process_sdis2_horicoef, - ia_css_process_sdis2_vertcoef, - ia_css_process_sdis2_horiproj, - ia_css_process_sdis2_vertproj, - ia_css_process_wb, - ia_css_process_nr, - ia_css_process_yee, - ia_css_process_ynr, - ia_css_process_fc, - ia_css_process_ctc, - ia_css_process_xnr_table, - ia_css_process_xnr, - ia_css_process_xnr3, -}; - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_dp_config(const struct ia_css_isp_parameters *params, - struct ia_css_dp_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() enter: " - "config=%p\n",config); - - *config = params->dp_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() leave\n"); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_dp_config(struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n"); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dp_config = *config; - params->config_changed[IA_CSS_DP_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_DP_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_dp_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_wb_config(const struct ia_css_isp_parameters *params, - struct ia_css_wb_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() enter: " - "config=%p\n",config); - - *config = params->wb_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() leave\n"); - ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_wb_config(struct ia_css_isp_parameters *params, - const struct ia_css_wb_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n"); - ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->wb_config = *config; - params->config_changed[IA_CSS_WB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_WB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_wb_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_tnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_tnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() enter: " - "config=%p\n",config); - - *config = params->tnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() leave\n"); - ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_tnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_tnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n"); - ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->tnr_config = *config; - params->config_changed[IA_CSS_TNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_TNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_tnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ob_config(const struct ia_css_isp_parameters *params, - struct ia_css_ob_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() enter: " - "config=%p\n",config); - - *config = params->ob_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() leave\n"); - ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ob_config(struct ia_css_isp_parameters *params, - const struct ia_css_ob_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n"); - ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ob_config = *config; - params->config_changed[IA_CSS_OB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_OB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ob_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_de_config(const struct ia_css_isp_parameters *params, - struct ia_css_de_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() enter: " - "config=%p\n",config); - - *config = params->de_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() leave\n"); - ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_de_config(struct ia_css_isp_parameters *params, - const struct ia_css_de_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n"); - ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->de_config = *config; - params->config_changed[IA_CSS_DE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_DE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_de_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_anr_config(const struct ia_css_isp_parameters *params, - struct ia_css_anr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() enter: " - "config=%p\n",config); - - *config = params->anr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() leave\n"); - ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n"); - ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->anr_config = *config; - params->config_changed[IA_CSS_ANR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ANR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_anr2_config(const struct ia_css_isp_parameters *params, - struct ia_css_anr_thres *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() enter: " - "config=%p\n",config); - - *config = params->anr_thres; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() leave\n"); - ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr2_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_thres *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n"); - ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->anr_thres = *config; - params->config_changed[IA_CSS_ANR2_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ANR2_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr2_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ce_config(const struct ia_css_isp_parameters *params, - struct ia_css_ce_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() enter: " - "config=%p\n",config); - - *config = params->ce_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() leave\n"); - ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ce_config(struct ia_css_isp_parameters *params, - const struct ia_css_ce_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n"); - ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ce_config = *config; - params->config_changed[IA_CSS_CE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ce_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ecd_config(const struct ia_css_isp_parameters *params, - struct ia_css_ecd_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() enter: " - "config=%p\n",config); - - *config = params->ecd_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() leave\n"); - ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ecd_config(struct ia_css_isp_parameters *params, - const struct ia_css_ecd_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n"); - ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ecd_config = *config; - params->config_changed[IA_CSS_ECD_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ECD_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ecd_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ynr_config(const struct ia_css_isp_parameters *params, - struct ia_css_ynr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() enter: " - "config=%p\n",config); - - *config = params->ynr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() leave\n"); - ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ynr_config(struct ia_css_isp_parameters *params, - const struct ia_css_ynr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n"); - ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ynr_config = *config; - params->config_changed[IA_CSS_YNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_YNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ynr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_fc_config(const struct ia_css_isp_parameters *params, - struct ia_css_fc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() enter: " - "config=%p\n",config); - - *config = params->fc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() leave\n"); - ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_fc_config(struct ia_css_isp_parameters *params, - const struct ia_css_fc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n"); - ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->fc_config = *config; - params->config_changed[IA_CSS_FC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_FC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_fc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_cnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_cnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() enter: " - "config=%p\n",config); - - *config = params->cnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() leave\n"); - ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_cnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_cnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n"); - ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->cnr_config = *config; - params->config_changed[IA_CSS_CNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_cnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_macc_config(const struct ia_css_isp_parameters *params, - struct ia_css_macc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() enter: " - "config=%p\n",config); - - *config = params->macc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() leave\n"); - ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_macc_config(struct ia_css_isp_parameters *params, - const struct ia_css_macc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n"); - ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->macc_config = *config; - params->config_changed[IA_CSS_MACC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_MACC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_macc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ctc_config(const struct ia_css_isp_parameters *params, - struct ia_css_ctc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() enter: " - "config=%p\n",config); - - *config = params->ctc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() leave\n"); - ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ctc_config(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n"); - ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ctc_config = *config; - params->config_changed[IA_CSS_CTC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CTC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ctc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_aa_config(const struct ia_css_isp_parameters *params, - struct ia_css_aa_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() enter: " - "config=%p\n",config); - - *config = params->aa_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() leave\n"); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_aa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n"); - params->aa_config = *config; - params->config_changed[IA_CSS_AA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_AA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_aa_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() enter: " - "config=%p\n",config); - - *config = params->yuv2rgb_cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() leave\n"); - ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n"); - ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->yuv2rgb_cc_config = *config; - params->config_changed[IA_CSS_YUV2RGB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_YUV2RGB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_yuv2rgb_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() enter: " - "config=%p\n",config); - - *config = params->rgb2yuv_cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() leave\n"); - ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n"); - ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->rgb2yuv_cc_config = *config; - params->config_changed[IA_CSS_RGB2YUV_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_RGB2YUV_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_rgb2yuv_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_csc_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() enter: " - "config=%p\n",config); - - *config = params->cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() leave\n"); - ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_csc_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n"); - ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->cc_config = *config; - params->config_changed[IA_CSS_CSC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CSC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_csc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_nr_config(const struct ia_css_isp_parameters *params, - struct ia_css_nr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() enter: " - "config=%p\n",config); - - *config = params->nr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() leave\n"); - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n"); - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->nr_config = *config; - params->config_changed[IA_CSS_BNR_ID] = true; - params->config_changed[IA_CSS_NR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_NR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_nr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_gc_config(const struct ia_css_isp_parameters *params, - struct ia_css_gc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() enter: " - "config=%p\n",config); - - *config = params->gc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() leave\n"); - ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_gc_config(struct ia_css_isp_parameters *params, - const struct ia_css_gc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n"); - ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->gc_config = *config; - params->config_changed[IA_CSS_GC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_GC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_gc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() leave\n"); - ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horicoef_config() enter:\n"); - ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horicoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() leave\n"); - ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertcoef_config() enter:\n"); - ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertcoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() leave\n"); - ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horiproj_config() enter:\n"); - ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horiproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() leave\n"); - ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertproj_config() enter:\n"); - ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() leave\n"); - ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horicoef_config() enter:\n"); - ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horicoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() leave\n"); - ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertcoef_config() enter:\n"); - ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertcoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() leave\n"); - ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horiproj_config() enter:\n"); - ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horiproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() leave\n"); - ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertproj_config() enter:\n"); - ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() enter: " - "config=%p\n",config); - - *config = params->r_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() leave\n"); - ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n"); - ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->r_gamma_table = *config; - params->config_changed[IA_CSS_R_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_R_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_r_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() enter: " - "config=%p\n",config); - - *config = params->g_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() leave\n"); - ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n"); - ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->g_gamma_table = *config; - params->config_changed[IA_CSS_G_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_G_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_g_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() enter: " - "config=%p\n",config); - - *config = params->b_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() leave\n"); - ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n"); - ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->b_gamma_table = *config; - params->config_changed[IA_CSS_B_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_B_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_b_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() enter: " - "config=%p\n",config); - - *config = params->xnr_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() leave\n"); - ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_table_config() enter:\n"); - ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr_table = *config; - params->config_changed[IA_CSS_XNR_TABLE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR_TABLE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_table_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_formats_config(const struct ia_css_isp_parameters *params, - struct ia_css_formats_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() enter: " - "config=%p\n",config); - - *config = params->formats_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() leave\n"); - ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_formats_config(struct ia_css_isp_parameters *params, - const struct ia_css_formats_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n"); - ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->formats_config = *config; - params->config_changed[IA_CSS_FORMATS_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_FORMATS_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_formats_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() enter: " - "config=%p\n",config); - - *config = params->xnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() leave\n"); - ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n"); - ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr_config = *config; - params->config_changed[IA_CSS_XNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr3_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() enter: " - "config=%p\n",config); - - *config = params->xnr3_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() leave\n"); - ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr3_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr3_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n"); - ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr3_config = *config; - params->config_changed[IA_CSS_XNR3_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR3_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr3_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_s3a_config(const struct ia_css_isp_parameters *params, - struct ia_css_3a_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() enter: " - "config=%p\n",config); - - *config = params->s3a_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() leave\n"); - ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_s3a_config(struct ia_css_isp_parameters *params, - const struct ia_css_3a_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n"); - ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->s3a_config = *config; - params->config_changed[IA_CSS_BH_ID] = true; - params->config_changed[IA_CSS_S3A_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_S3A_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_s3a_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_output_config(const struct ia_css_isp_parameters *params, - struct ia_css_output_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() enter: " - "config=%p\n",config); - - *config = params->output_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() leave\n"); - ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_output_config(struct ia_css_isp_parameters *params, - const struct ia_css_output_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n"); - ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->output_config = *config; - params->config_changed[IA_CSS_OUTPUT_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_OUTPUT_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_output_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_get_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -{ - ia_css_get_dp_config(params, config->dp_config); - ia_css_get_wb_config(params, config->wb_config); - ia_css_get_tnr_config(params, config->tnr_config); - ia_css_get_ob_config(params, config->ob_config); - ia_css_get_de_config(params, config->de_config); - ia_css_get_anr_config(params, config->anr_config); - ia_css_get_anr2_config(params, config->anr_thres); - ia_css_get_ce_config(params, config->ce_config); - ia_css_get_ecd_config(params, config->ecd_config); - ia_css_get_ynr_config(params, config->ynr_config); - ia_css_get_fc_config(params, config->fc_config); - ia_css_get_cnr_config(params, config->cnr_config); - ia_css_get_macc_config(params, config->macc_config); - ia_css_get_ctc_config(params, config->ctc_config); - ia_css_get_aa_config(params, config->aa_config); - ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config); - ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config); - ia_css_get_csc_config(params, config->cc_config); - ia_css_get_nr_config(params, config->nr_config); - ia_css_get_gc_config(params, config->gc_config); - ia_css_get_sdis_horicoef_config(params, config->dvs_coefs); - ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs); - ia_css_get_sdis_horiproj_config(params, config->dvs_coefs); - ia_css_get_sdis_vertproj_config(params, config->dvs_coefs); - ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs); - ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs); - ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs); - ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs); - ia_css_get_r_gamma_config(params, config->r_gamma_table); - ia_css_get_g_gamma_config(params, config->g_gamma_table); - ia_css_get_b_gamma_config(params, config->b_gamma_table); - ia_css_get_xnr_table_config(params, config->xnr_table); - ia_css_get_formats_config(params, config->formats_config); - ia_css_get_xnr_config(params, config->xnr_config); - ia_css_get_xnr3_config(params, config->xnr3_config); - ia_css_get_s3a_config(params, config->s3a_config); - ia_css_get_output_config(params, config->output_config); -} - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_set_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -{ - ia_css_set_dp_config(params, config->dp_config); - ia_css_set_wb_config(params, config->wb_config); - ia_css_set_tnr_config(params, config->tnr_config); - ia_css_set_ob_config(params, config->ob_config); - ia_css_set_de_config(params, config->de_config); - ia_css_set_anr_config(params, config->anr_config); - ia_css_set_anr2_config(params, config->anr_thres); - ia_css_set_ce_config(params, config->ce_config); - ia_css_set_ecd_config(params, config->ecd_config); - ia_css_set_ynr_config(params, config->ynr_config); - ia_css_set_fc_config(params, config->fc_config); - ia_css_set_cnr_config(params, config->cnr_config); - ia_css_set_macc_config(params, config->macc_config); - ia_css_set_ctc_config(params, config->ctc_config); - ia_css_set_aa_config(params, config->aa_config); - ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config); - ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config); - ia_css_set_csc_config(params, config->cc_config); - ia_css_set_nr_config(params, config->nr_config); - ia_css_set_gc_config(params, config->gc_config); - ia_css_set_sdis_horicoef_config(params, config->dvs_coefs); - ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs); - ia_css_set_sdis_horiproj_config(params, config->dvs_coefs); - ia_css_set_sdis_vertproj_config(params, config->dvs_coefs); - ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs); - ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs); - ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs); - ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs); - ia_css_set_r_gamma_config(params, config->r_gamma_table); - ia_css_set_g_gamma_config(params, config->g_gamma_table); - ia_css_set_b_gamma_config(params, config->b_gamma_table); - ia_css_set_xnr_table_config(params, config->xnr_table); - ia_css_set_formats_config(params, config->formats_config); - ia_css_set_xnr_config(params, config->xnr_config); - ia_css_set_xnr3_config(params, config->xnr3_config); - ia_css_set_s3a_config(params, config->s3a_config); - ia_css_set_output_config(params, config->output_config); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h deleted file mode 100644 index 5b3deb7f74ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_PARAM_H -#define _IA_CSS_ISP_PARAM_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_parameter_ids { - IA_CSS_AA_ID, - IA_CSS_ANR_ID, - IA_CSS_ANR2_ID, - IA_CSS_BH_ID, - IA_CSS_CNR_ID, - IA_CSS_CROP_ID, - IA_CSS_CSC_ID, - IA_CSS_DP_ID, - IA_CSS_BNR_ID, - IA_CSS_DE_ID, - IA_CSS_ECD_ID, - IA_CSS_FORMATS_ID, - IA_CSS_FPN_ID, - IA_CSS_GC_ID, - IA_CSS_CE_ID, - IA_CSS_YUV2RGB_ID, - IA_CSS_RGB2YUV_ID, - IA_CSS_R_GAMMA_ID, - IA_CSS_G_GAMMA_ID, - IA_CSS_B_GAMMA_ID, - IA_CSS_UDS_ID, - IA_CSS_RAA_ID, - IA_CSS_S3A_ID, - IA_CSS_OB_ID, - IA_CSS_OUTPUT_ID, - IA_CSS_SC_ID, - IA_CSS_BDS_ID, - IA_CSS_TNR_ID, - IA_CSS_MACC_ID, - IA_CSS_SDIS_HORICOEF_ID, - IA_CSS_SDIS_VERTCOEF_ID, - IA_CSS_SDIS_HORIPROJ_ID, - IA_CSS_SDIS_VERTPROJ_ID, - IA_CSS_SDIS2_HORICOEF_ID, - IA_CSS_SDIS2_VERTCOEF_ID, - IA_CSS_SDIS2_HORIPROJ_ID, - IA_CSS_SDIS2_VERTPROJ_ID, - IA_CSS_WB_ID, - IA_CSS_NR_ID, - IA_CSS_YEE_ID, - IA_CSS_YNR_ID, - IA_CSS_FC_ID, - IA_CSS_CTC_ID, - IA_CSS_XNR_TABLE_ID, - IA_CSS_XNR_ID, - IA_CSS_XNR3_ID, - IA_CSS_NUM_PARAMETER_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_memory_offsets { - struct { - struct ia_css_isp_parameter aa; - struct ia_css_isp_parameter anr; - struct ia_css_isp_parameter bh; - struct ia_css_isp_parameter cnr; - struct ia_css_isp_parameter crop; - struct ia_css_isp_parameter csc; - struct ia_css_isp_parameter dp; - struct ia_css_isp_parameter bnr; - struct ia_css_isp_parameter de; - struct ia_css_isp_parameter ecd; - struct ia_css_isp_parameter formats; - struct ia_css_isp_parameter fpn; - struct ia_css_isp_parameter gc; - struct ia_css_isp_parameter ce; - struct ia_css_isp_parameter yuv2rgb; - struct ia_css_isp_parameter rgb2yuv; - struct ia_css_isp_parameter uds; - struct ia_css_isp_parameter raa; - struct ia_css_isp_parameter s3a; - struct ia_css_isp_parameter ob; - struct ia_css_isp_parameter output; - struct ia_css_isp_parameter sc; - struct ia_css_isp_parameter bds; - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter macc; - struct ia_css_isp_parameter sdis_horiproj; - struct ia_css_isp_parameter sdis_vertproj; - struct ia_css_isp_parameter sdis2_horiproj; - struct ia_css_isp_parameter sdis2_vertproj; - struct ia_css_isp_parameter wb; - struct ia_css_isp_parameter nr; - struct ia_css_isp_parameter yee; - struct ia_css_isp_parameter ynr; - struct ia_css_isp_parameter fc; - struct ia_css_isp_parameter ctc; - struct ia_css_isp_parameter xnr; - struct ia_css_isp_parameter xnr3; - struct ia_css_isp_parameter get; - struct ia_css_isp_parameter put; - } dmem; - struct { - struct ia_css_isp_parameter anr2; - struct ia_css_isp_parameter ob; - struct ia_css_isp_parameter sdis_horicoef; - struct ia_css_isp_parameter sdis_vertcoef; - struct ia_css_isp_parameter sdis2_horicoef; - struct ia_css_isp_parameter sdis2_vertcoef; -#ifdef ISP2401 - struct ia_css_isp_parameter xnr3; -#endif - } vmem; - struct { - struct ia_css_isp_parameter bh; - } hmem0; - struct { - struct ia_css_isp_parameter gc; - struct ia_css_isp_parameter g_gamma; - struct ia_css_isp_parameter xnr_table; - } vamem1; - struct { - struct ia_css_isp_parameter r_gamma; - struct ia_css_isp_parameter ctc; - } vamem0; - struct { - struct ia_css_isp_parameter b_gamma; - } vamem2; -}; - -#if defined(IA_CSS_INCLUDE_PARAMETERS) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/gencode.c:gen_param_process_table() */ - -struct ia_css_pipeline_stage; /* forward declaration */ - -extern void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_dp_config(struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_wb_config(struct ia_css_isp_parameters *params, - const struct ia_css_wb_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_tnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_tnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ob_config(struct ia_css_isp_parameters *params, - const struct ia_css_ob_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_de_config(struct ia_css_isp_parameters *params, - const struct ia_css_de_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr2_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_thres *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ce_config(struct ia_css_isp_parameters *params, - const struct ia_css_ce_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ecd_config(struct ia_css_isp_parameters *params, - const struct ia_css_ecd_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ynr_config(struct ia_css_isp_parameters *params, - const struct ia_css_ynr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_fc_config(struct ia_css_isp_parameters *params, - const struct ia_css_fc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_cnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_cnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_macc_config(struct ia_css_isp_parameters *params, - const struct ia_css_macc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ctc_config(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_aa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_csc_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_gc_config(struct ia_css_isp_parameters *params, - const struct ia_css_gc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_formats_config(struct ia_css_isp_parameters *params, - const struct ia_css_formats_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr3_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr3_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_s3a_config(struct ia_css_isp_parameters *params, - const struct ia_css_3a_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_output_config(struct ia_css_isp_parameters *params, - const struct ia_css_output_config *config); - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_get_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -; -#ifdef ISP2401 - -#endif -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_set_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -; -#ifdef ISP2401 - -#endif -#endif /* IA_CSS_INCLUDE_PARAMETER */ - -#endif /* _IA_CSS_ISP_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c deleted file mode 100644 index e87d05bc73ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#include "ia_css_pipeline.h" -#include "ia_css_isp_states.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_aa_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.aa.size; - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset; - - if (size) - memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], 0, size); - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_cnr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset; - - if (size) { - ia_css_init_cnr_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_cnr2_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset; - - if (size) { - ia_css_init_cnr2_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_dp_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.dp.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset; - - if (size) { - ia_css_init_dp_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_de_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.de.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.de.offset; - - if (size) { - ia_css_init_de_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_tnr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->dmem.tnr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset; - - if (size) { - ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_ref_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->dmem.ref.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset; - - if (size) { - ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_ynr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.ynr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset; - - if (size) { - ia_css_init_ynr_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_state_init_table() */ - -void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary) = { - ia_css_initialize_aa_state, - ia_css_initialize_cnr_state, - ia_css_initialize_cnr2_state, - ia_css_initialize_dp_state, - ia_css_initialize_de_state, - ia_css_initialize_tnr_state, - ia_css_initialize_ref_state, - ia_css_initialize_ynr_state, -}; - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h deleted file mode 100644 index 732adafb0a63..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#define IA_CSS_INCLUDE_STATES -#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h" -#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h" -#include "isp/kernels/de/de_1.0/ia_css_de.host.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h" -#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h" -#include "isp/kernels/dpc2/ia_css_dpc2.host.h" -#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h" -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_STATE_H -#define _IA_CSS_ISP_STATE_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_state_ids { - IA_CSS_AA_STATE_ID, - IA_CSS_CNR_STATE_ID, - IA_CSS_CNR2_STATE_ID, - IA_CSS_DP_STATE_ID, - IA_CSS_DE_STATE_ID, - IA_CSS_TNR_STATE_ID, - IA_CSS_REF_STATE_ID, - IA_CSS_YNR_STATE_ID, - IA_CSS_NUM_STATE_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_state_memory_offsets { - struct { - struct ia_css_isp_parameter aa; - struct ia_css_isp_parameter cnr; - struct ia_css_isp_parameter cnr2; - struct ia_css_isp_parameter dp; - struct ia_css_isp_parameter de; - struct ia_css_isp_parameter ynr; - } vmem; - struct { - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter ref; - } dmem; -}; - -#if defined(IA_CSS_INCLUDE_STATES) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/genstate.c:gen_state_init_table() */ - -extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary); - -#endif /* IA_CSS_INCLUDE_STATE */ - -#endif /* _IA_CSS_ISP_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c deleted file mode 100644 index 505e2b600beb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - - -#include "system_global.h" - -const uint32_t N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = { - 4, /* 4 entries at CSI_RX_BACKEND0_ID*/ - 4, /* 4 entries at CSI_RX_BACKEND1_ID*/ - 4 /* 4 entries at CSI_RX_BACKEND2_ID*/ -}; - -const uint32_t N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = { - 8, /* 8 entries at CSI_RX_BACKEND0_ID*/ - 4, /* 4 entries at CSI_RX_BACKEND1_ID*/ - 4 /* 4 entries at CSI_RX_BACKEND2_ID*/ -}; - -const uint32_t N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID] = { - N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND0_ID */ - N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND1_ID */ - N_CSI_RX_DLANE_ID /* 4 dlanes for CSI_RX_FR0NTEND2_ID */ -}; - -/* sid_width for CSI_RX_BACKEND_ID */ -const uint32_t N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID] = { - 3, - 2, - 2 -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h deleted file mode 100644 index a2e9d54a4a37..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __CSI_RX_LOCAL_H_INCLUDED__ -#define __CSI_RX_LOCAL_H_INCLUDED__ - -#include "csi_rx_global.h" -#define N_CSI_RX_BE_MIPI_COMP_FMT_REG 4 -#define N_CSI_RX_BE_MIPI_CUSTOM_PEC 12 -#define N_CSI_RX_BE_SHORT_PKT_LUT 4 -#define N_CSI_RX_BE_LONG_PKT_LUT 8 -typedef struct csi_rx_fe_ctrl_state_s csi_rx_fe_ctrl_state_t; -typedef struct csi_rx_fe_ctrl_lane_s csi_rx_fe_ctrl_lane_t; -typedef struct csi_rx_be_ctrl_state_s csi_rx_be_ctrl_state_t; -/*mipi_backend_custom_mode_pixel_extraction_config*/ -typedef struct csi_rx_be_ctrl_pec_s csi_rx_be_ctrl_pec_t; - - -struct csi_rx_fe_ctrl_lane_s { - hrt_data termen; - hrt_data settle; -}; -struct csi_rx_fe_ctrl_state_s { - hrt_data enable; - hrt_data nof_enable_lanes; - hrt_data error_handling; - hrt_data status; - hrt_data status_dlane_hs; - hrt_data status_dlane_lp; - csi_rx_fe_ctrl_lane_t clane; - csi_rx_fe_ctrl_lane_t dlane[N_CSI_RX_DLANE_ID]; -}; -struct csi_rx_be_ctrl_state_s { - hrt_data enable; - hrt_data status; - hrt_data comp_format_reg[N_CSI_RX_BE_MIPI_COMP_FMT_REG]; - hrt_data raw16; - hrt_data raw18; - hrt_data force_raw8; - hrt_data irq_status; - hrt_data custom_mode_enable; - hrt_data custom_mode_data_state; - hrt_data pec[N_CSI_RX_BE_MIPI_CUSTOM_PEC]; - hrt_data custom_mode_valid_eop_config; - hrt_data global_lut_disregard_reg; - hrt_data packet_status_stall; - hrt_data short_packet_lut_entry[N_CSI_RX_BE_SHORT_PKT_LUT]; - hrt_data long_packet_lut_entry[N_CSI_RX_BE_LONG_PKT_LUT]; -}; -#endif /* __CSI_RX_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h deleted file mode 100644 index 4fa74e7a96e6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __CSI_RX_PRIVATE_H_INCLUDED__ -#define __CSI_RX_PRIVATE_H_INCLUDED__ - -#include "rx_csi_defs.h" -#include "mipi_backend_defs.h" -#include "csi_rx_public.h" - -#include "device_access.h" /* ia_css_device_load_uint32 */ - -#include "assert_support.h" /* assert */ -#include "print_support.h" /* print */ - - -/***************************************************** - * - * Native command interface (NCI). - * - *****************************************************/ -/** - * @brief Get the csi rx fe state. - * Refer to "csi_rx_public.h" for details. - */ -static inline void csi_rx_fe_ctrl_get_state( - const csi_rx_frontend_ID_t ID, - csi_rx_fe_ctrl_state_t *state) -{ - uint32_t i; - - state->enable = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ENABLE_REG_IDX); - state->nof_enable_lanes = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX); - state->error_handling = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ERROR_HANDLING_REG_IDX); - state->status = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_REG_IDX); - state->status_dlane_hs = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX); - state->status_dlane_lp = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX); - state->clane.termen = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX); - state->clane.settle = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX); - - /* - * Get the values of the register-set per - * dlane. - */ - for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) { - csi_rx_fe_ctrl_get_dlane_state( - ID, - i, - &(state->dlane[i])); - } -} - -/** - * @brief Get the state of the csi rx fe dlane process. - * Refer to "csi_rx_public.h" for details. - */ -static inline void csi_rx_fe_ctrl_get_dlane_state( - const csi_rx_frontend_ID_t ID, - const uint32_t lane, - csi_rx_fe_ctrl_lane_t *dlane_state) -{ - - dlane_state->termen = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane)); - dlane_state->settle = - csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane)); - -} -/** - * @brief dump the csi rx fe state. - * Refer to "csi_rx_public.h" for details. - */ -static inline void csi_rx_fe_ctrl_dump_state( - const csi_rx_frontend_ID_t ID, - csi_rx_fe_ctrl_state_t *state) -{ - uint32_t i; - - ia_css_print("CSI RX FE STATE Controller %d Enable state 0x%x \n", ID, state->enable); - ia_css_print("CSI RX FE STATE Controller %d No Of enable lanes 0x%x \n", ID, state->nof_enable_lanes); - ia_css_print("CSI RX FE STATE Controller %d Error handling 0x%x \n", ID, state->error_handling); - ia_css_print("CSI RX FE STATE Controller %d Status 0x%x \n", ID, state->status); - ia_css_print("CSI RX FE STATE Controller %d Status Dlane HS 0x%x \n", ID, state->status_dlane_hs); - ia_css_print("CSI RX FE STATE Controller %d Status Dlane LP 0x%x \n", ID, state->status_dlane_lp); - ia_css_print("CSI RX FE STATE Controller %d Status term enable LP 0x%x \n", ID, state->clane.termen); - ia_css_print("CSI RX FE STATE Controller %d Status term settle LP 0x%x \n", ID, state->clane.settle); - - /* - * Get the values of the register-set per - * dlane. - */ - for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) { - ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d termen 0x%x \n", ID, i, state->dlane[i].termen); - ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d settle 0x%x \n", ID, i, state->dlane[i].settle); - } -} - -/** - * @brief Get the csi rx be state. - * Refer to "csi_rx_public.h" for details. - */ -static inline void csi_rx_be_ctrl_get_state( - const csi_rx_backend_ID_t ID, - csi_rx_be_ctrl_state_t *state) -{ - uint32_t i; - - state->enable = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_ENABLE_REG_IDX); - - state->status = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_STATUS_REG_IDX); - - for(i = 0; i comp_format_reg[i] = - csi_rx_be_ctrl_reg_load(ID, - _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX+i); - } - - state->raw16 = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX); - - state->raw18 = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX); - state->force_raw8 = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX); - state->irq_status = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX); -#if 0 /* device access error for these registers */ - /* ToDo: rootcause this failure */ - state->custom_mode_enable = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_EN_REG_IDX); - - state->custom_mode_data_state = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX); - for(i = 0; i pec[i] = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX + i); - } - state->custom_mode_valid_eop_config = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX); -#endif - state->global_lut_disregard_reg = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX); - state->packet_status_stall = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX); - /* - * Get the values of the register-set per - * lut. - */ - for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) { - state->short_packet_lut_entry[i] = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX + i); - } - for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) { - state->long_packet_lut_entry[i] = - csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX + i); - } -} - -/** - * @brief Dump the csi rx be state. - * Refer to "csi_rx_public.h" for details. - */ -static inline void csi_rx_be_ctrl_dump_state( - const csi_rx_backend_ID_t ID, - csi_rx_be_ctrl_state_t *state) -{ - uint32_t i; - - ia_css_print("CSI RX BE STATE Controller %d Enable 0x%x \n", ID, state->enable); - ia_css_print("CSI RX BE STATE Controller %d Status 0x%x \n", ID, state->status); - - for(i = 0; i status); - } - ia_css_print("CSI RX BE STATE Controller %d RAW16 0x%x \n", ID, state->raw16); - ia_css_print("CSI RX BE STATE Controller %d RAW18 0x%x \n", ID, state->raw18); - ia_css_print("CSI RX BE STATE Controller %d Force RAW8 0x%x \n", ID, state->force_raw8); - ia_css_print("CSI RX BE STATE Controller %d IRQ state 0x%x \n", ID, state->irq_status); -#if 0 /* ToDo:Getting device access error for this register */ - for(i = 0; i pec[i]); - } -#endif - ia_css_print("CSI RX BE STATE Controller %d Global LUT disregard reg 0x%x \n", ID, state->global_lut_disregard_reg); - ia_css_print("CSI RX BE STATE Controller %d packet stall reg 0x%x \n", ID, state->packet_status_stall); - /* - * Get the values of the register-set per - * lut. - */ - for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) { - ia_css_print("CSI RX BE STATE Controller ID %d Short packat entry %d shart packet lut id 0x%x \n", ID, i, state->short_packet_lut_entry[i]); - } - for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) { - ia_css_print("CSI RX BE STATE Controller ID %d Long packat entry %d Long packet lut id 0x%x \n", ID, i, state->long_packet_lut_entry[i]); - } -} -/* end of NCI */ -/***************************************************** - * - * Device level interface (DLI). - * - *****************************************************/ -/** - * @brief Load the register value. - * Refer to "csi_rx_public.h" for details. - */ -static inline hrt_data csi_rx_fe_ctrl_reg_load( - const csi_rx_frontend_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_CSI_RX_FRONTEND_ID); - assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -} - - -/** - * @brief Store a value to the register. - * Refer to "ibuf_ctrl_public.h" for details. - */ -static inline void csi_rx_fe_ctrl_reg_store( - const csi_rx_frontend_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_CSI_RX_FRONTEND_ID); - assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1); - - ia_css_device_store_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -} -/** - * @brief Load the register value. - * Refer to "csi_rx_public.h" for details. - */ -static inline hrt_data csi_rx_be_ctrl_reg_load( - const csi_rx_backend_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_CSI_RX_BACKEND_ID); - assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -} - - -/** - * @brief Store a value to the register. - * Refer to "ibuf_ctrl_public.h" for details. - */ -static inline void csi_rx_be_ctrl_reg_store( - const csi_rx_backend_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_CSI_RX_BACKEND_ID); - assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1); - - ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -} -/* end of DLI */ - -#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c deleted file mode 100644 index 14973d1c2756..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include "system_global.h" - -const uint32_t N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = { - 8, /* IBUF_CTRL0_ID supports at most 8 processes */ - 4, /* IBUF_CTRL1_ID supports at most 4 processes */ - 4 /* IBUF_CTRL2_ID supports at most 4 processes */ -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h deleted file mode 100644 index ea40284623d1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IBUF_CTRL_LOCAL_H_INCLUDED__ -#define __IBUF_CTRL_LOCAL_H_INCLUDED__ - -#include "ibuf_ctrl_global.h" - -typedef struct ibuf_ctrl_proc_state_s ibuf_ctrl_proc_state_t; -typedef struct ibuf_ctrl_state_s ibuf_ctrl_state_t; - -struct ibuf_ctrl_proc_state_s { - hrt_data num_items; - hrt_data num_stores; - hrt_data dma_channel; - hrt_data dma_command; - hrt_data ibuf_st_addr; - hrt_data ibuf_stride; - hrt_data ibuf_end_addr; - hrt_data dest_st_addr; - hrt_data dest_stride; - hrt_data dest_end_addr; - hrt_data sync_frame; - hrt_data sync_command; - hrt_data store_command; - hrt_data shift_returned_items; - hrt_data elems_ibuf; - hrt_data elems_dest; - hrt_data cur_stores; - hrt_data cur_acks; - hrt_data cur_s2m_ibuf_addr; - hrt_data cur_dma_ibuf_addr; - hrt_data cur_dma_dest_addr; - hrt_data cur_isp_dest_addr; - hrt_data dma_cmds_send; - hrt_data main_cntrl_state; - hrt_data dma_sync_state; - hrt_data isp_sync_state; -}; - -struct ibuf_ctrl_state_s { - hrt_data recalc_words; - hrt_data arbiters; - ibuf_ctrl_proc_state_t proc_state[N_STREAM2MMIO_SID_ID]; -}; - -#endif /* __IBUF_CTRL_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h deleted file mode 100644 index 4d07c2fe1469..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IBUF_CTRL_PRIVATE_H_INCLUDED__ -#define __IBUF_CTRL_PRIVATE_H_INCLUDED__ - -#include "ibuf_ctrl_public.h" - -#include "device_access.h" /* ia_css_device_load_uint32 */ - -#include "assert_support.h" /* assert */ -#include "print_support.h" /* print */ - - -/***************************************************** - * - * Native command interface (NCI). - * - *****************************************************/ -/** - * @brief Get the ibuf-controller state. - * Refer to "ibuf_ctrl_public.h" for details. - */ -STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_state( - const ibuf_ctrl_ID_t ID, - ibuf_ctrl_state_t *state) -{ - uint32_t i; - - state->recalc_words = - ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_RECALC_WORDS_STATUS); - state->arbiters = - ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_ARBITERS_STATUS); - - /* - * Get the values of the register-set per - * ibuf-controller process. - */ - for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) { - ibuf_ctrl_get_proc_state( - ID, - i, - &(state->proc_state[i])); - } -} - -/** - * @brief Get the state of the ibuf-controller process. - * Refer to "ibuf_ctrl_public.h" for details. - */ -STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_proc_state( - const ibuf_ctrl_ID_t ID, - const uint32_t proc_id, - ibuf_ctrl_proc_state_t *state) -{ - hrt_address reg_bank_offset; - - reg_bank_offset = - _IBUF_CNTRL_PROC_REG_ALIGN * (1 + proc_id); - - state->num_items = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_ITEMS_PER_STORE); - - state->num_stores = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_STORES_PER_FRAME); - - state->dma_channel = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CHANNEL); - - state->dma_command = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CMD); - - state->ibuf_st_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_START_ADDRESS); - - state->ibuf_stride = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_STRIDE); - - state->ibuf_end_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_END_ADDRESS); - - state->dest_st_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_START_ADDRESS); - - state->dest_stride = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_STRIDE); - - state->dest_end_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_END_ADDRESS); - - state->sync_frame = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SYNC_FRAME); - - state->sync_command = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_SYNC_CMD); - - state->store_command = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_STORE_CMD); - - state->shift_returned_items = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SHIFT_ITEMS); - - state->elems_ibuf = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_IBUF); - - state->elems_dest = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_DEST); - - state->cur_stores = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_STORES); - - state->cur_acks = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ACKS); - - state->cur_s2m_ibuf_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_S2M_IBUF_ADDR); - - state->cur_dma_ibuf_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_IBUF_ADDR); - - state->cur_dma_dest_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_DEST_ADDR); - - state->cur_isp_dest_addr = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ISP_DEST_ADDR); - - state->dma_cmds_send = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND); - - state->main_cntrl_state = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_MAIN_CNTRL_STATE); - - state->dma_sync_state = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_SYNC_STATE); - - state->isp_sync_state = - ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ISP_SYNC_STATE); -} -/** - * @brief Dump the ibuf-controller state. - * Refer to "ibuf_ctrl_public.h" for details. - */ -STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state( - const ibuf_ctrl_ID_t ID, - ibuf_ctrl_state_t *state) -{ - uint32_t i; - ia_css_print("IBUF controller ID %d recalculate words 0x%x\n", ID, state->recalc_words); - ia_css_print("IBUF controller ID %d arbiters 0x%x\n", ID, state->arbiters); - - /* - * Dump the values of the register-set per - * ibuf-controller process. - */ - for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) { - ia_css_print("IBUF controller ID %d Process ID %d num_items 0x%x\n", ID, i, state->proc_state[i].num_items); - ia_css_print("IBUF controller ID %d Process ID %d num_stores 0x%x\n", ID, i, state->proc_state[i].num_stores); - ia_css_print("IBUF controller ID %d Process ID %d dma_channel 0x%x\n", ID, i, state->proc_state[i].dma_channel); - ia_css_print("IBUF controller ID %d Process ID %d dma_command 0x%x\n", ID, i, state->proc_state[i].dma_command); - ia_css_print("IBUF controller ID %d Process ID %d ibuf_st_addr 0x%x\n", ID, i, state->proc_state[i].ibuf_st_addr); - ia_css_print("IBUF controller ID %d Process ID %d ibuf_stride 0x%x\n", ID, i, state->proc_state[i].ibuf_stride); - ia_css_print("IBUF controller ID %d Process ID %d ibuf_end_addr 0x%x\n", ID, i, state->proc_state[i].ibuf_end_addr); - ia_css_print("IBUF controller ID %d Process ID %d dest_st_addr 0x%x\n", ID, i, state->proc_state[i].dest_st_addr); - ia_css_print("IBUF controller ID %d Process ID %d dest_stride 0x%x\n", ID, i, state->proc_state[i].dest_stride); - ia_css_print("IBUF controller ID %d Process ID %d dest_end_addr 0x%x\n", ID, i, state->proc_state[i].dest_end_addr); - ia_css_print("IBUF controller ID %d Process ID %d sync_frame 0x%x\n", ID, i, state->proc_state[i].sync_frame); - ia_css_print("IBUF controller ID %d Process ID %d sync_command 0x%x\n", ID, i, state->proc_state[i].sync_command); - ia_css_print("IBUF controller ID %d Process ID %d store_command 0x%x\n", ID, i, state->proc_state[i].store_command); - ia_css_print("IBUF controller ID %d Process ID %d shift_returned_items 0x%x\n", ID, i, state->proc_state[i].shift_returned_items); - ia_css_print("IBUF controller ID %d Process ID %d elems_ibuf 0x%x\n", ID, i, state->proc_state[i].elems_ibuf); - ia_css_print("IBUF controller ID %d Process ID %d elems_dest 0x%x\n", ID, i, state->proc_state[i].elems_dest); - ia_css_print("IBUF controller ID %d Process ID %d cur_stores 0x%x\n", ID, i, state->proc_state[i].cur_stores); - ia_css_print("IBUF controller ID %d Process ID %d cur_acks 0x%x\n", ID, i, state->proc_state[i].cur_acks); - ia_css_print("IBUF controller ID %d Process ID %d cur_s2m_ibuf_addr 0x%x\n", ID, i, state->proc_state[i].cur_s2m_ibuf_addr); - ia_css_print("IBUF controller ID %d Process ID %d cur_dma_ibuf_addr 0x%x\n", ID, i, state->proc_state[i].cur_dma_ibuf_addr); - ia_css_print("IBUF controller ID %d Process ID %d cur_dma_dest_addr 0x%x\n", ID, i, state->proc_state[i].cur_dma_dest_addr); - ia_css_print("IBUF controller ID %d Process ID %d cur_isp_dest_addr 0x%x\n", ID, i, state->proc_state[i].cur_isp_dest_addr); - ia_css_print("IBUF controller ID %d Process ID %d dma_cmds_send 0x%x\n", ID, i, state->proc_state[i].dma_cmds_send); - ia_css_print("IBUF controller ID %d Process ID %d main_cntrl_state 0x%x\n", ID, i, state->proc_state[i].main_cntrl_state); - ia_css_print("IBUF controller ID %d Process ID %d dma_sync_state 0x%x\n", ID, i, state->proc_state[i].dma_sync_state); - ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state); - } -} -/* end of NCI */ - -/***************************************************** - * - * Device level interface (DLI). - * - *****************************************************/ -/** - * @brief Load the register value. - * Refer to "ibuf_ctrl_public.h" for details. - */ -STORAGE_CLASS_IBUF_CTRL_C hrt_data ibuf_ctrl_reg_load( - const ibuf_ctrl_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_IBUF_CTRL_ID); - assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -} - - -/** - * @brief Store a value to the register. - * Refer to "ibuf_ctrl_public.h" for details. - */ -STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store( - const ibuf_ctrl_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_IBUF_CTRL_ID); - assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1); - - ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -} -/* end of DLI */ - - -#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h deleted file mode 100644 index f199423e28da..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__ -#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__ - -#include "type_support.h" -#include "input_system_global.h" - -#include "ibuf_ctrl.h" -#include "csi_rx.h" -#include "pixelgen.h" -#include "isys_stream2mmio.h" -#include "isys_irq.h" - -typedef input_system_err_t input_system_error_t; - -typedef enum { - MIPI_FORMAT_SHORT1 = 0x08, - MIPI_FORMAT_SHORT2, - MIPI_FORMAT_SHORT3, - MIPI_FORMAT_SHORT4, - MIPI_FORMAT_SHORT5, - MIPI_FORMAT_SHORT6, - MIPI_FORMAT_SHORT7, - MIPI_FORMAT_SHORT8, - MIPI_FORMAT_EMBEDDED = 0x12, - MIPI_FORMAT_YUV420_8 = 0x18, - MIPI_FORMAT_YUV420_10, - MIPI_FORMAT_YUV420_8_LEGACY, - MIPI_FORMAT_YUV420_8_SHIFT = 0x1C, - MIPI_FORMAT_YUV420_10_SHIFT, - MIPI_FORMAT_YUV422_8 = 0x1E, - MIPI_FORMAT_YUV422_10, - MIPI_FORMAT_RGB444 = 0x20, - MIPI_FORMAT_RGB555, - MIPI_FORMAT_RGB565, - MIPI_FORMAT_RGB666, - MIPI_FORMAT_RGB888, - MIPI_FORMAT_RAW6 = 0x28, - MIPI_FORMAT_RAW7, - MIPI_FORMAT_RAW8, - MIPI_FORMAT_RAW10, - MIPI_FORMAT_RAW12, - MIPI_FORMAT_RAW14, - MIPI_FORMAT_CUSTOM0 = 0x30, - MIPI_FORMAT_CUSTOM1, - MIPI_FORMAT_CUSTOM2, - MIPI_FORMAT_CUSTOM3, - MIPI_FORMAT_CUSTOM4, - MIPI_FORMAT_CUSTOM5, - MIPI_FORMAT_CUSTOM6, - MIPI_FORMAT_CUSTOM7, - //MIPI_FORMAT_RAW16, /*not supported by 2401*/ - //MIPI_FORMAT_RAW18, - N_MIPI_FORMAT -} mipi_format_t; - -#define N_MIPI_FORMAT_CUSTOM 8 - -/* The number of stores for compressed format types */ -#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM) -#define UNCOMPRESSED_BITS_PER_PIXEL_10 10 -#define UNCOMPRESSED_BITS_PER_PIXEL_12 12 -#define COMPRESSED_BITS_PER_PIXEL_6 6 -#define COMPRESSED_BITS_PER_PIXEL_7 7 -#define COMPRESSED_BITS_PER_PIXEL_8 8 -enum mipi_compressor { - MIPI_COMPRESSOR_NONE = 0, - MIPI_COMPRESSOR_10_6_10, - MIPI_COMPRESSOR_10_7_10, - MIPI_COMPRESSOR_10_8_10, - MIPI_COMPRESSOR_12_6_12, - MIPI_COMPRESSOR_12_7_12, - MIPI_COMPRESSOR_12_8_12, - N_MIPI_COMPRESSOR_METHODS -}; - -typedef enum { - MIPI_PREDICTOR_NONE = 0, - MIPI_PREDICTOR_TYPE1, - MIPI_PREDICTOR_TYPE2, - N_MIPI_PREDICTOR_TYPES -} mipi_predictor_t; - -typedef struct input_system_state_s input_system_state_t; -struct input_system_state_s { - ibuf_ctrl_state_t ibuf_ctrl_state[N_IBUF_CTRL_ID]; - csi_rx_fe_ctrl_state_t csi_rx_fe_ctrl_state[N_CSI_RX_FRONTEND_ID]; - csi_rx_be_ctrl_state_t csi_rx_be_ctrl_state[N_CSI_RX_BACKEND_ID]; - pixelgen_ctrl_state_t pixelgen_ctrl_state[N_PIXELGEN_ID]; - stream2mmio_state_t stream2mmio_state[N_STREAM2MMIO_ID]; - isys_irqc_state_t isys_irqc_state[N_ISYS_IRQ_ID]; -}; -#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h deleted file mode 100644 index 97505e436047..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ -#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ - -#include "input_system_public.h" - -STORAGE_CLASS_INPUT_SYSTEM_C input_system_err_t input_system_get_state( - const input_system_ID_t ID, - input_system_state_t *state) -{ - uint32_t i; - - (void)(ID); - - /* get the states of all CSI RX frontend devices */ - for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) { - csi_rx_fe_ctrl_get_state( - (csi_rx_frontend_ID_t)i, - &(state->csi_rx_fe_ctrl_state[i])); - } - - /* get the states of all CIS RX backend devices */ - for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) { - csi_rx_be_ctrl_get_state( - (csi_rx_backend_ID_t)i, - &(state->csi_rx_be_ctrl_state[i])); - } - - /* get the states of all pixelgen devices */ - for (i = 0; i < N_PIXELGEN_ID; i++) { - pixelgen_ctrl_get_state( - (pixelgen_ID_t)i, - &(state->pixelgen_ctrl_state[i])); - } - - /* get the states of all stream2mmio devices */ - for (i = 0; i < N_STREAM2MMIO_ID; i++) { - stream2mmio_get_state( - (stream2mmio_ID_t)i, - &(state->stream2mmio_state[i])); - } - - /* get the states of all ibuf-controller devices */ - for (i = 0; i < N_IBUF_CTRL_ID; i++) { - ibuf_ctrl_get_state( - (ibuf_ctrl_ID_t)i, - &(state->ibuf_ctrl_state[i])); - } - - /* get the states of all isys irq controllers */ - for (i = 0; i < N_ISYS_IRQ_ID; i++) { - isys_irqc_state_get((isys_irq_ID_t)i, &(state->isys_irqc_state[i])); - } - - /* TODO: get the states of all ISYS2401 DMA devices */ - for (i = 0; i < N_ISYS2401_DMA_ID; i++) { - } - - return INPUT_SYSTEM_ERR_NO_ERROR; -} -STORAGE_CLASS_INPUT_SYSTEM_C void input_system_dump_state( - const input_system_ID_t ID, - input_system_state_t *state) -{ - uint32_t i; - - (void)(ID); - - /* dump the states of all CSI RX frontend devices */ - for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) { - csi_rx_fe_ctrl_dump_state( - (csi_rx_frontend_ID_t)i, - &(state->csi_rx_fe_ctrl_state[i])); - } - - /* dump the states of all CIS RX backend devices */ - for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) { - csi_rx_be_ctrl_dump_state( - (csi_rx_backend_ID_t)i, - &(state->csi_rx_be_ctrl_state[i])); - } - - /* dump the states of all pixelgen devices */ - for (i = 0; i < N_PIXELGEN_ID; i++) { - pixelgen_ctrl_dump_state( - (pixelgen_ID_t)i, - &(state->pixelgen_ctrl_state[i])); - } - - /* dump the states of all st2mmio devices */ - for (i = 0; i < N_STREAM2MMIO_ID; i++) { - stream2mmio_dump_state( - (stream2mmio_ID_t)i, - &(state->stream2mmio_state[i])); - } - - /* dump the states of all ibuf-controller devices */ - for (i = 0; i < N_IBUF_CTRL_ID; i++) { - ibuf_ctrl_dump_state( - (ibuf_ctrl_ID_t)i, - &(state->ibuf_ctrl_state[i])); - } - - /* dump the states of all isys irq controllers */ - for (i = 0; i < N_ISYS_IRQ_ID; i++) { - isys_irqc_state_dump((isys_irq_ID_t)i, &(state->isys_irqc_state[i])); - } - - /* TODO: dump the states of all ISYS2401 DMA devices */ - for (i = 0; i < N_ISYS2401_DMA_ID; i++) { - } - - return; -} -#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c deleted file mode 100644 index 77767228985e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "isys_dma.h" -#include "assert_support.h" - -#ifndef __INLINE_ISYS2401_DMA__ -/* - * Include definitions for isys dma register access functions. isys_dma.h - * includes declarations of these functions by including isys_dma_public.h. - */ -#include "isys_dma_private.h" -#endif - -const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID] = { - N_ISYS2401_DMA_CHANNEL -}; - -void isys2401_dma_set_max_burst_size( - const isys2401_dma_ID_t dma_id, - uint32_t max_burst_size) -{ - assert(dma_id < N_ISYS2401_DMA_ID); - assert((max_burst_size > 0x00) && (max_burst_size <= 0xFF)); - - isys2401_dma_reg_store(dma_id, - DMA_DEV_INFO_REG_IDX(_DMA_V2_DEV_INTERF_MAX_BURST_IDX, HIVE_DMA_BUS_DDR_CONN), - (max_burst_size - 1)); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h deleted file mode 100644 index 5c694a26386e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_DMA_LOCAL_H_INCLUDED__ -#define __ISYS_DMA_LOCAL_H_INCLUDED__ - -#include "isys_dma_global.h" - -#endif /* __ISYS_DMA_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h deleted file mode 100644 index 2cd1aeecf617..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_DMA_PRIVATE_H_INCLUDED__ -#define __ISYS_DMA_PRIVATE_H_INCLUDED__ - -#include "isys_dma_public.h" -#include "device_access.h" -#include "assert_support.h" -#include "dma.h" -#include "dma_v2_defs.h" -#include "print_support.h" - - -STORAGE_CLASS_ISYS2401_DMA_C void isys2401_dma_reg_store( - const isys2401_dma_ID_t dma_id, - const unsigned int reg, - const hrt_data value) -{ - unsigned int reg_loc; - - assert(dma_id < N_ISYS2401_DMA_ID); - assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address)-1); - - reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data)); - - ia_css_print("isys dma store at addr(0x%x) val(%u)\n", reg_loc, (unsigned int)value); - ia_css_device_store_uint32(reg_loc, value); -} - -STORAGE_CLASS_ISYS2401_DMA_C hrt_data isys2401_dma_reg_load( - const isys2401_dma_ID_t dma_id, - const unsigned int reg) -{ - unsigned int reg_loc; - hrt_data value; - - assert(dma_id < N_ISYS2401_DMA_ID); - assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address)-1); - - reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data)); - - value = ia_css_device_load_uint32(reg_loc); - ia_css_print("isys dma load from addr(0x%x) val(%u)\n", reg_loc, (unsigned int)value); - - return value; -} - -#endif /* __ISYS_DMA_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c deleted file mode 100644 index 842ae340ae13..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include "device_access.h" -#include "assert_support.h" -#include "ia_css_debug.h" -#include "isys_irq.h" - -#ifndef __INLINE_ISYS2401_IRQ__ -/* - * Include definitions for isys irq private functions. isys_irq.h includes - * declarations of these functions by including isys_irq_public.h. - */ -#include "isys_irq_private.h" -#endif - -/* Public interface */ -STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable( - const isys_irq_ID_t isys_irqc_id) -{ - assert(isys_irqc_id < N_ISYS_IRQ_ID); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Setting irq mask for port %u\n", isys_irqc_id); - isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX, ISYS_IRQ_MASK_REG_VALUE); - isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX, ISYS_IRQ_CLEAR_REG_VALUE); - isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX, ISYS_IRQ_ENABLE_REG_VALUE); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h deleted file mode 100644 index 0bffb5680e25..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_IRQ_LOCAL_H__ -#define __ISYS_IRQ_LOCAL_H__ - -#include - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - -typedef struct isys_irqc_state_s isys_irqc_state_t; - -struct isys_irqc_state_s { - hrt_data edge; - hrt_data mask; - hrt_data status; - hrt_data enable; - hrt_data level_no; -/*hrt_data clear; */ /* write-only register */ -}; - -#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -#endif /* __ISYS_IRQ_LOCAL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h deleted file mode 100644 index e69f39893bd2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_IRQ_PRIVATE_H__ -#define __ISYS_IRQ_PRIVATE_H__ - -#include "isys_irq_global.h" -#include "isys_irq_local.h" - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - -/* -------------------------------------------------------+ - | Native command interface (NCI) | - + -------------------------------------------------------*/ - -/** -* @brief Get the isys irq status. -* Refer to "isys_irq.h" for details. -*/ -STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_get( - const isys_irq_ID_t isys_irqc_id, - isys_irqc_state_t *state) -{ - state->edge = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_EDGE_REG_IDX); - state->mask = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX); - state->status = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_STATUS_REG_IDX); - state->enable = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX); - state->level_no = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_LEVEL_NO_REG_IDX); - /* - ** Invalid to read/load from write-only register 'clear' - ** state->clear = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX); - */ -} - -/** -* @brief Dump the isys irq status. -* Refer to "isys_irq.h" for details. -*/ -STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump( - const isys_irq_ID_t isys_irqc_id, - const isys_irqc_state_t *state) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "isys irq controller id %d" - "\n\tstatus:0x%x\n\tedge:0x%x\n\tmask:0x%x" - "\n\tenable:0x%x\n\tlevel_not_pulse:0x%x\n", - isys_irqc_id, - state->status, state->edge, state->mask, state->enable, state->level_no); -} - -/* end of NCI */ - -/* -------------------------------------------------------+ - | Device level interface (DLI) | - + -------------------------------------------------------*/ - -/* Support functions */ -STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_reg_store( - const isys_irq_ID_t isys_irqc_id, - const unsigned int reg_idx, - const hrt_data value) -{ - unsigned int reg_addr; - - assert(isys_irqc_id < N_ISYS_IRQ_ID); - assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX); - - reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data)); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "isys irq store at addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value); - - ia_css_device_store_uint32(reg_addr, value); -} - -STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load( - const isys_irq_ID_t isys_irqc_id, - const unsigned int reg_idx) -{ - unsigned int reg_addr; - hrt_data value; - - assert(isys_irqc_id < N_ISYS_IRQ_ID); - assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX); - - reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data)); - value = ia_css_device_load_uint32(reg_addr); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "isys irq load from addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value); - - return value; -} - -/* end of DLI */ - -#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -#endif /* __ISYS_IRQ_PRIVATE_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c deleted file mode 100644 index 67570138ba24..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "isys_stream2mmio.h" - -const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID] = { - N_STREAM2MMIO_SID_ID, - STREAM2MMIO_SID4_ID, - STREAM2MMIO_SID4_ID -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h deleted file mode 100644 index 801523977e1d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__ -#define __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__ - -#include "isys_stream2mmio_global.h" - -typedef struct stream2mmio_state_s stream2mmio_state_t; -typedef struct stream2mmio_sid_state_s stream2mmio_sid_state_t; - -struct stream2mmio_sid_state_s { - hrt_data rcv_ack; - hrt_data pix_width_id; - hrt_data start_addr; - hrt_data end_addr; - hrt_data strides; - hrt_data num_items; - hrt_data block_when_no_cmd; -}; - -struct stream2mmio_state_s { - stream2mmio_sid_state_t sid_state[N_STREAM2MMIO_SID_ID]; -}; -#endif /* __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h deleted file mode 100644 index f946105ddf43..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ -#define __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ - -#include "isys_stream2mmio_public.h" -#include "device_access.h" /* ia_css_device_load_uint32 */ -#include "assert_support.h" /* assert */ -#include "print_support.h" /* print */ - -#define STREAM2MMIO_COMMAND_REG_ID 0 -#define STREAM2MMIO_ACKNOWLEDGE_REG_ID 1 -#define STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2 -#define STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */ -#define STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */ -#define STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/ -#define STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */ -#define STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */ -#define STREAM2MMIO_REGS_PER_SID 8 - -/***************************************************** - * - * Native command interface (NCI). - * - *****************************************************/ -/** - * @brief Get the stream2mmio-controller state. - * Refer to "stream2mmio_public.h" for details. - */ -STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_state( - const stream2mmio_ID_t ID, - stream2mmio_state_t *state) -{ - stream2mmio_sid_ID_t i; - - /* - * Get the values of the register-set per - * stream2mmio-controller sids. - */ - for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) { - stream2mmio_get_sid_state(ID, i, &(state->sid_state[i])); - } -} - -/** - * @brief Get the state of the stream2mmio-controller sidess. - * Refer to "stream2mmio_public.h" for details. - */ -STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_sid_state( - const stream2mmio_ID_t ID, - const stream2mmio_sid_ID_t sid_id, - stream2mmio_sid_state_t *state) -{ - - state->rcv_ack = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_ACKNOWLEDGE_REG_ID); - - state->pix_width_id = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_PIX_WIDTH_ID_REG_ID); - - state->start_addr = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_START_ADDR_REG_ID); - - state->end_addr = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_END_ADDR_REG_ID); - - state->strides = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_STRIDE_REG_ID); - - state->num_items = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_NUM_ITEMS_REG_ID); - - state->block_when_no_cmd = - stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID); - -} - -/** - * @brief Dump the state of the stream2mmio-controller sidess. - * Refer to "stream2mmio_public.h" for details. - */ -STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_print_sid_state( - stream2mmio_sid_state_t *state) -{ - ia_css_print("\t \t Receive acks 0x%x\n", state->rcv_ack); - ia_css_print("\t \t Pixel width 0x%x\n", state->pix_width_id); - ia_css_print("\t \t Startaddr 0x%x\n", state->start_addr); - ia_css_print("\t \t Endaddr 0x%x\n", state->end_addr); - ia_css_print("\t \t Strides 0x%x\n", state->strides); - ia_css_print("\t \t Num Items 0x%x\n", state->num_items); - ia_css_print("\t \t block when no cmd 0x%x\n", state->block_when_no_cmd); - -} -/** - * @brief Dump the ibuf-controller state. - * Refer to "stream2mmio_public.h" for details. - */ -STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state( - const stream2mmio_ID_t ID, - stream2mmio_state_t *state) -{ - stream2mmio_sid_ID_t i; - - /* - * Get the values of the register-set per - * stream2mmio-controller sids. - */ - for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) { - ia_css_print("StREAM2MMIO ID %d SID %d\n", ID, i); - stream2mmio_print_sid_state(&(state->sid_state[i])); - } -} -/* end of NCI */ - -/***************************************************** - * - * Device level interface (DLI). - * - *****************************************************/ -/** - * @brief Load the register value. - * Refer to "stream2mmio_public.h" for details. - */ -STORAGE_CLASS_STREAM2MMIO_C hrt_data stream2mmio_reg_load( - const stream2mmio_ID_t ID, - const stream2mmio_sid_ID_t sid_id, - const uint32_t reg_idx) -{ - uint32_t reg_bank_offset; - - assert(ID < N_STREAM2MMIO_ID); - - reg_bank_offset = STREAM2MMIO_REGS_PER_SID * sid_id; - return ia_css_device_load_uint32(STREAM2MMIO_CTRL_BASE[ID] + - (reg_bank_offset + reg_idx) * sizeof(hrt_data)); -} - - -/** - * @brief Store a value to the register. - * Refer to "stream2mmio_public.h" for details. - */ -STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store( - const stream2mmio_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_STREAM2MMIO_ID); - assert(STREAM2MMIO_CTRL_BASE[ID] != (hrt_address)-1); - - ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] + - reg * sizeof(hrt_data), value); -} -/* end of DLI */ - -#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h deleted file mode 100644 index 24f4da9aef40..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __PIXELGEN_LOCAL_H_INCLUDED__ -#define __PIXELGEN_LOCAL_H_INCLUDED__ - -#include "pixelgen_global.h" - -typedef struct pixelgen_ctrl_state_s pixelgen_ctrl_state_t; -struct pixelgen_ctrl_state_s { - hrt_data com_enable; - hrt_data prbs_rstval0; - hrt_data prbs_rstval1; - hrt_data syng_sid; - hrt_data syng_free_run; - hrt_data syng_pause; - hrt_data syng_nof_frames; - hrt_data syng_nof_pixels; - hrt_data syng_nof_line; - hrt_data syng_hblank_cyc; - hrt_data syng_vblank_cyc; - hrt_data syng_stat_hcnt; - hrt_data syng_stat_vcnt; - hrt_data syng_stat_fcnt; - hrt_data syng_stat_done; - hrt_data tpg_mode; - hrt_data tpg_hcnt_mask; - hrt_data tpg_vcnt_mask; - hrt_data tpg_xycnt_mask; - hrt_data tpg_hcnt_delta; - hrt_data tpg_vcnt_delta; - hrt_data tpg_r1; - hrt_data tpg_g1; - hrt_data tpg_b1; - hrt_data tpg_r2; - hrt_data tpg_g2; - hrt_data tpg_b2; -}; -#endif /* __PIXELGEN_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h deleted file mode 100644 index c5bf540eadf1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __PIXELGEN_PRIVATE_H_INCLUDED__ -#define __PIXELGEN_PRIVATE_H_INCLUDED__ -#include "pixelgen_public.h" -#include "hive_isp_css_host_ids_hrt.h" -#include "PixelGen_SysBlock_defs.h" -#include "device_access.h" /* ia_css_device_load_uint32 */ -#include "assert_support.h" /* assert */ - - -/***************************************************** - * - * Native command interface (NCI). - * - *****************************************************/ -/** - * @brief Get the pixelgen state. - * Refer to "pixelgen_public.h" for details. - */ -STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_get_state( - const pixelgen_ID_t ID, - pixelgen_ctrl_state_t *state) -{ - - state->com_enable = - pixelgen_ctrl_reg_load(ID, _PXG_COM_ENABLE_REG_IDX); - state->prbs_rstval0 = - pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG0_IDX); - state->prbs_rstval1 = - pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG1_IDX); - state->syng_sid = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_SID_REG_IDX); - state->syng_free_run = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_FREE_RUN_REG_IDX); - state->syng_pause = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_PAUSE_REG_IDX); - state->syng_nof_frames = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_FRAME_REG_IDX); - state->syng_nof_pixels = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_PIXEL_REG_IDX); - state->syng_nof_line = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_LINE_REG_IDX); - state->syng_hblank_cyc = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_HBLANK_CYC_REG_IDX); - state->syng_vblank_cyc = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_VBLANK_CYC_REG_IDX); - state->syng_stat_hcnt = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_HCNT_REG_IDX); - state->syng_stat_vcnt = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_VCNT_REG_IDX); - state->syng_stat_fcnt = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_FCNT_REG_IDX); - state->syng_stat_done = - pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_DONE_REG_IDX); - state->tpg_mode = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_MODE_REG_IDX); - state->tpg_hcnt_mask = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_MASK_REG_IDX); - state->tpg_vcnt_mask = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_MASK_REG_IDX); - state->tpg_xycnt_mask = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_XYCNT_MASK_REG_IDX); - state->tpg_hcnt_delta = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_DELTA_REG_IDX); - state->tpg_vcnt_delta = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_DELTA_REG_IDX); - state->tpg_r1 = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_R1_REG_IDX); - state->tpg_g1 = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_G1_REG_IDX); - state->tpg_b1 = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_B1_REG_IDX); - state->tpg_r2 = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_R2_REG_IDX); - state->tpg_g2 = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_G2_REG_IDX); - state->tpg_b2 = - pixelgen_ctrl_reg_load(ID, _PXG_TPG_B2_REG_IDX); -} -/** - * @brief Dump the pixelgen state. - * Refer to "pixelgen_public.h" for details. - */ -STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_dump_state( - const pixelgen_ID_t ID, - pixelgen_ctrl_state_t *state) -{ - ia_css_print("Pixel Generator ID %d Enable 0x%x \n", ID, state->com_enable); - ia_css_print("Pixel Generator ID %d PRBS reset vlue 0 0x%x \n", ID, state->prbs_rstval0); - ia_css_print("Pixel Generator ID %d PRBS reset vlue 1 0x%x \n", ID, state->prbs_rstval1); - ia_css_print("Pixel Generator ID %d SYNC SID 0x%x \n", ID, state->syng_sid); - ia_css_print("Pixel Generator ID %d syng free run 0x%x \n", ID, state->syng_free_run); - ia_css_print("Pixel Generator ID %d syng pause 0x%x \n", ID, state->syng_pause); - ia_css_print("Pixel Generator ID %d syng no of frames 0x%x \n", ID, state->syng_nof_frames); - ia_css_print("Pixel Generator ID %d syng no of pixels 0x%x \n", ID, state->syng_nof_pixels); - ia_css_print("Pixel Generator ID %d syng no of line 0x%x \n", ID, state->syng_nof_line); - ia_css_print("Pixel Generator ID %d syng hblank cyc 0x%x \n", ID, state->syng_hblank_cyc); - ia_css_print("Pixel Generator ID %d syng vblank cyc 0x%x \n", ID, state->syng_vblank_cyc); - ia_css_print("Pixel Generator ID %d syng stat hcnt 0x%x \n", ID, state->syng_stat_hcnt); - ia_css_print("Pixel Generator ID %d syng stat vcnt 0x%x \n", ID, state->syng_stat_vcnt); - ia_css_print("Pixel Generator ID %d syng stat fcnt 0x%x \n", ID, state->syng_stat_fcnt); - ia_css_print("Pixel Generator ID %d syng stat done 0x%x \n", ID, state->syng_stat_done); - ia_css_print("Pixel Generator ID %d tpg modee 0x%x \n", ID, state->tpg_mode); - ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x \n", ID, state->tpg_hcnt_mask); - ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x \n", ID, state->tpg_hcnt_mask); - ia_css_print("Pixel Generator ID %d tpg xycnt mask 0x%x \n", ID, state->tpg_xycnt_mask); - ia_css_print("Pixel Generator ID %d tpg hcnt delta 0x%x \n", ID, state->tpg_hcnt_delta); - ia_css_print("Pixel Generator ID %d tpg vcnt delta 0x%x \n", ID, state->tpg_vcnt_delta); - ia_css_print("Pixel Generator ID %d tpg r1 0x%x \n", ID, state->tpg_r1); - ia_css_print("Pixel Generator ID %d tpg g1 0x%x \n", ID, state->tpg_g1); - ia_css_print("Pixel Generator ID %d tpg b1 0x%x \n", ID, state->tpg_b1); - ia_css_print("Pixel Generator ID %d tpg r2 0x%x \n", ID, state->tpg_r2); - ia_css_print("Pixel Generator ID %d tpg g2 0x%x \n", ID, state->tpg_g2); - ia_css_print("Pixel Generator ID %d tpg b2 0x%x \n", ID, state->tpg_b2); -} -/* end of NCI */ -/***************************************************** - * - * Device level interface (DLI). - * - *****************************************************/ -/** - * @brief Load the register value. - * Refer to "pixelgen_public.h" for details. - */ -STORAGE_CLASS_PIXELGEN_C hrt_data pixelgen_ctrl_reg_load( - const pixelgen_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_PIXELGEN_ID); - assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -} - - -/** - * @brief Store a value to the register. - * Refer to "pixelgen_ctrl_public.h" for details. - */ -STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store( - const pixelgen_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_PIXELGEN_ID); - assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1); - - ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -} -/* end of DLI */ -#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h deleted file mode 100644 index 5600b32e29f4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SYSTEM_LOCAL_H_INCLUDED__ -#define __SYSTEM_LOCAL_H_INCLUDED__ - -#ifdef HRT_ISP_CSS_CUSTOM_HOST -#ifndef HRT_USE_VIR_ADDRS -#define HRT_USE_VIR_ADDRS -#endif -/* This interface is deprecated */ -/*#include "hive_isp_css_custom_host_hrt.h"*/ -#endif - -#include "system_global.h" - -#ifdef __FIST__ -#define HRT_ADDRESS_WIDTH 32 /* Surprise, this is a local property and even differs per platform */ -#else -#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */ -#endif - -/* This interface is deprecated */ -#include "hrt/hive_types.h" - -/* - * Cell specific address maps - */ -#if HRT_ADDRESS_WIDTH == 64 - -#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ - -/* DDR */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - 0x0000000120000000ULL}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - 0x0000000000020000ULL}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - 0x0000000000200000ULL}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - 0x0000000000100000ULL}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - 0x00000000001C0000ULL, - 0x00000000001D0000ULL, - 0x00000000001E0000ULL}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - 0x00000000001F0000ULL}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - 0x0000000000010000ULL}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - 0x0000000000300000ULL}; - -/* MMU */ -#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM) -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - 0x0000000000070000ULL, - 0x00000000000A0000ULL}; -#else -#error "system_local.h: SYSTEM must be one of {2400, 2401 }" -#endif - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - 0x0000000000040000ULL}; - -static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { - 0x00000000000CA000ULL}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - 0x0000000000000500ULL, - 0x0000000000030A00ULL, - 0x000000000008C000ULL, - 0x0000000000090200ULL}; -/* - 0x0000000000000500ULL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - 0x0000000000050000ULL, - 0x0000000000060000ULL}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - 0x0000000000000000ULL}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - 0x0000000000000000ULL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x0000000000090000ULL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x0000000000000000ULL}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x0000000000000600ULL; - -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - 0x0000000000000400ULL}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - 0x0000000000000100ULL}; - - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - 0x0000000000030000ULL, - 0x0000000000030200ULL, - 0x0000000000030400ULL, - 0x0000000000030600ULL}; /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - 0x0000000000080000ULL}; -/* 0x0000000000081000ULL, */ /* capture A */ -/* 0x0000000000082000ULL, */ /* capture B */ -/* 0x0000000000083000ULL, */ /* capture C */ -/* 0x0000000000084000ULL, */ /* Acquisition */ -/* 0x0000000000085000ULL, */ /* DMA */ -/* 0x0000000000089000ULL, */ /* ctrl */ -/* 0x000000000008A000ULL, */ /* GP regs */ -/* 0x000000000008B000ULL, */ /* FIFO */ -/* 0x000000000008C000ULL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - 0x0000000000080100ULL}; - -/* IBUF_CTRL, part of the Input System 2401 */ -static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { - 0x00000000000C1800ULL, /* ibuf controller A */ - 0x00000000000C3800ULL, /* ibuf controller B */ - 0x00000000000C5800ULL /* ibuf controller C */ -}; - -/* ISYS IRQ Controllers, part of the Input System 2401 */ -static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { - 0x00000000000C1400ULL, /* port a */ - 0x00000000000C3400ULL, /* port b */ - 0x00000000000C5400ULL /* port c */ -}; - -/* CSI FE, part of the Input System 2401 */ -static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { - 0x00000000000C0400ULL, /* csi fe controller A */ - 0x00000000000C2400ULL, /* csi fe controller B */ - 0x00000000000C4400ULL /* csi fe controller C */ -}; -/* CSI BE, part of the Input System 2401 */ -static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { - 0x00000000000C0800ULL, /* csi be controller A */ - 0x00000000000C2800ULL, /* csi be controller B */ - 0x00000000000C4800ULL /* csi be controller C */ -}; -/* PIXEL Generator, part of the Input System 2401 */ -static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { - 0x00000000000C1000ULL, /* pixel gen controller A */ - 0x00000000000C3000ULL, /* pixel gen controller B */ - 0x00000000000C5000ULL /* pixel gen controller C */ -}; -/* Stream2MMIO, part of the Input System 2401 */ -static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { - 0x00000000000C0C00ULL, /* stream2mmio controller A */ - 0x00000000000C2C00ULL, /* stream2mmio controller B */ - 0x00000000000C4C00ULL /* stream2mmio controller C */ -}; -#elif HRT_ADDRESS_WIDTH == 32 - -#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */ - -/* DDR : Attention, this value not defined in 32-bit */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - 0x00000000UL}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - 0x00020000UL}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - 0xffffffffUL}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - 0xffffffffUL}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - 0xffffffffUL, - 0xffffffffUL, - 0xffffffffUL}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - 0xffffffffUL}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - 0x00010000UL}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - 0x00300000UL}; - -/* MMU */ -#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM) -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - 0x00070000UL, - 0x000A0000UL}; -#else -#error "system_local.h: SYSTEM must be one of {2400, 2401 }" -#endif - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - 0x00040000UL}; - -static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { - 0x000CA000UL}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - 0x00000500UL, - 0x00030A00UL, - 0x0008C000UL, - 0x00090200UL}; -/* - 0x00000500UL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - 0x00050000UL, - 0x00060000UL}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - 0x00000000UL}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - 0x00000000UL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x00090000UL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x00000000UL}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x00000600UL; -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - 0x00000400UL}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - 0x00000100UL}; - - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - 0x00030000UL, - 0x00030200UL, - 0x00030400UL}; -/* 0x00030600UL, */ /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - 0x00080000UL}; -/* 0x00081000UL, */ /* capture A */ -/* 0x00082000UL, */ /* capture B */ -/* 0x00083000UL, */ /* capture C */ -/* 0x00084000UL, */ /* Acquisition */ -/* 0x00085000UL, */ /* DMA */ -/* 0x00089000UL, */ /* ctrl */ -/* 0x0008A000UL, */ /* GP regs */ -/* 0x0008B000UL, */ /* FIFO */ -/* 0x0008C000UL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - 0x00080100UL}; - -/* IBUF_CTRL, part of the Input System 2401 */ -static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { - 0x000C1800UL, /* ibuf controller A */ - 0x000C3800UL, /* ibuf controller B */ - 0x000C5800UL /* ibuf controller C */ -}; - -/* ISYS IRQ Controllers, part of the Input System 2401 */ -static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { - 0x000C1400ULL, /* port a */ - 0x000C3400ULL, /* port b */ - 0x000C5400ULL /* port c */ -}; - -/* CSI FE, part of the Input System 2401 */ -static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { - 0x000C0400UL, /* csi fe controller A */ - 0x000C2400UL, /* csi fe controller B */ - 0x000C4400UL /* csi fe controller C */ -}; -/* CSI BE, part of the Input System 2401 */ -static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { - 0x000C0800UL, /* csi be controller A */ - 0x000C2800UL, /* csi be controller B */ - 0x000C4800UL /* csi be controller C */ -}; -/* PIXEL Generator, part of the Input System 2401 */ -static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { - 0x000C1000UL, /* pixel gen controller A */ - 0x000C3000UL, /* pixel gen controller B */ - 0x000C5000UL /* pixel gen controller C */ -}; -/* Stream2MMIO, part of the Input System 2401 */ -static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { - 0x000C0C00UL, /* stream2mmio controller A */ - 0x000C2C00UL, /* stream2mmio controller B */ - 0x000C4C00UL /* stream2mmio controller C */ -}; - -#else -#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}" -#endif - -#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h deleted file mode 100644 index 1b3391c242a3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _PixelGen_SysBlock_defs_h -#define _PixelGen_SysBlock_defs_h - -#ifdef ISYS2401_PXG_A -#else -#ifdef ISYS2401_PXG_B -#else -#ifdef ISYS2401_PXG_C -#else -#include -#endif -#endif -#endif - -/* Parematers and User_Parameters for HSS */ -#define _PXG_PPC Ppc -#define _PXG_PIXEL_BITS PixelWidth -#define _PXG_MAX_NOF_SID MaxNofSids -#define _PXG_DATA_BITS DataWidth -#define _PXG_CNT_BITS CntWidth -#define _PXG_FIFODEPTH FifoDepth -#define _PXG_DBG Dbg_device_not_included - -/* ID's and Address */ -#define _PXG_ADRRESS_ALIGN_REG 4 - -#define _PXG_COM_ENABLE_REG_IDX 0 -#define _PXG_PRBS_RSTVAL_REG0_IDX 1 -#define _PXG_PRBS_RSTVAL_REG1_IDX 2 -#define _PXG_SYNG_SID_REG_IDX 3 -#define _PXG_SYNG_FREE_RUN_REG_IDX 4 -#define _PXG_SYNG_PAUSE_REG_IDX 5 -#define _PXG_SYNG_NOF_FRAME_REG_IDX 6 -#define _PXG_SYNG_NOF_PIXEL_REG_IDX 7 -#define _PXG_SYNG_NOF_LINE_REG_IDX 8 -#define _PXG_SYNG_HBLANK_CYC_REG_IDX 9 -#define _PXG_SYNG_VBLANK_CYC_REG_IDX 10 -#define _PXG_SYNG_STAT_HCNT_REG_IDX 11 -#define _PXG_SYNG_STAT_VCNT_REG_IDX 12 -#define _PXG_SYNG_STAT_FCNT_REG_IDX 13 -#define _PXG_SYNG_STAT_DONE_REG_IDX 14 -#define _PXG_TPG_MODE_REG_IDX 15 -#define _PXG_TPG_HCNT_MASK_REG_IDX 16 -#define _PXG_TPG_VCNT_MASK_REG_IDX 17 -#define _PXG_TPG_XYCNT_MASK_REG_IDX 18 -#define _PXG_TPG_HCNT_DELTA_REG_IDX 19 -#define _PXG_TPG_VCNT_DELTA_REG_IDX 20 -#define _PXG_TPG_R1_REG_IDX 21 -#define _PXG_TPG_G1_REG_IDX 22 -#define _PXG_TPG_B1_REG_IDX 23 -#define _PXG_TPG_R2_REG_IDX 24 -#define _PXG_TPG_G2_REG_IDX 25 -#define _PXG_TPG_B2_REG_IDX 26 -/* */ -#define _PXG_SYNG_PAUSE_CYCLES 0 -/* Subblock ID's */ -#define _PXG_DISBALE_IDX 0 -#define _PXG_PRBS_IDX 0 -#define _PXG_TPG_IDX 1 -#define _PXG_SYNG_IDX 2 -#define _PXG_SMUX_IDX 3 -/* Register Widths */ -#define _PXG_COM_ENABLE_REG_WIDTH 2 -#define _PXG_COM_SRST_REG_WIDTH 4 -#define _PXG_PRBS_RSTVAL_REG0_WIDTH 31 -#define _PXG_PRBS_RSTVAL_REG1_WIDTH 31 - -#define _PXG_SYNG_SID_REG_WIDTH 3 - -#define _PXG_SYNG_FREE_RUN_REG_WIDTH 1 -#define _PXG_SYNG_PAUSE_REG_WIDTH 1 -/* -#define _PXG_SYNG_NOF_FRAME_REG_WIDTH -#define _PXG_SYNG_NOF_PIXEL_REG_WIDTH -#define _PXG_SYNG_NOF_LINE_REG_WIDTH -#define _PXG_SYNG_HBLANK_CYC_REG_WIDTH -#define _PXG_SYNG_VBLANK_CYC_REG_WIDTH -#define _PXG_SYNG_STAT_HCNT_REG_WIDTH -#define _PXG_SYNG_STAT_VCNT_REG_WIDTH -#define _PXG_SYNG_STAT_FCNT_REG_WIDTH -*/ -#define _PXG_SYNG_STAT_DONE_REG_WIDTH 1 -#define _PXG_TPG_MODE_REG_WIDTH 2 -/* -#define _PXG_TPG_HCNT_MASK_REG_WIDTH -#define _PXG_TPG_VCNT_MASK_REG_WIDTH -#define _PXG_TPG_XYCNT_MASK_REG_WIDTH -*/ -#define _PXG_TPG_HCNT_DELTA_REG_WIDTH 4 -#define _PXG_TPG_VCNT_DELTA_REG_WIDTH 4 -/* -#define _PXG_TPG_R1_REG_WIDTH -#define _PXG_TPG_G1_REG_WIDTH -#define _PXG_TPG_B1_REG_WIDTH -#define _PXG_TPG_R2_REG_WIDTH -#define _PXG_TPG_G2_REG_WIDTH -#define _PXG_TPG_B2_REG_WIDTH -*/ -#define _PXG_FIFO_DEPTH 2 -/* MISC */ -#define _PXG_ENABLE_REG_VAL 1 -#define _PXG_PRBS_ENABLE_REG_VAL 1 -#define _PXG_TPG_ENABLE_REG_VAL 2 -#define _PXG_SYNG_ENABLE_REG_VAL 4 -#define _PXG_FIFO_ENABLE_REG_VAL 8 -#define _PXG_PXL_BITS 14 -#define _PXG_INVALID_FLAG 0xDEADBEEF -#define _PXG_CAFE_FLAG 0xCAFEBABE - - -#endif /* _PixelGen_SysBlock_defs_h */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h deleted file mode 100644 index e71e33d9d143..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_BITS_H -#define _HRT_BITS_H - -#include "defs.h" - -#define _hrt_ones(n) HRTCAT(_hrt_ones_, n) -#define _hrt_ones_0x0 0x00000000U -#define _hrt_ones_0x1 0x00000001U -#define _hrt_ones_0x2 0x00000003U -#define _hrt_ones_0x3 0x00000007U -#define _hrt_ones_0x4 0x0000000FU -#define _hrt_ones_0x5 0x0000001FU -#define _hrt_ones_0x6 0x0000003FU -#define _hrt_ones_0x7 0x0000007FU -#define _hrt_ones_0x8 0x000000FFU -#define _hrt_ones_0x9 0x000001FFU -#define _hrt_ones_0xA 0x000003FFU -#define _hrt_ones_0xB 0x000007FFU -#define _hrt_ones_0xC 0x00000FFFU -#define _hrt_ones_0xD 0x00001FFFU -#define _hrt_ones_0xE 0x00003FFFU -#define _hrt_ones_0xF 0x00007FFFU -#define _hrt_ones_0x10 0x0000FFFFU -#define _hrt_ones_0x11 0x0001FFFFU -#define _hrt_ones_0x12 0x0003FFFFU -#define _hrt_ones_0x13 0x0007FFFFU -#define _hrt_ones_0x14 0x000FFFFFU -#define _hrt_ones_0x15 0x001FFFFFU -#define _hrt_ones_0x16 0x003FFFFFU -#define _hrt_ones_0x17 0x007FFFFFU -#define _hrt_ones_0x18 0x00FFFFFFU -#define _hrt_ones_0x19 0x01FFFFFFU -#define _hrt_ones_0x1A 0x03FFFFFFU -#define _hrt_ones_0x1B 0x07FFFFFFU -#define _hrt_ones_0x1C 0x0FFFFFFFU -#define _hrt_ones_0x1D 0x1FFFFFFFU -#define _hrt_ones_0x1E 0x3FFFFFFFU -#define _hrt_ones_0x1F 0x7FFFFFFFU -#define _hrt_ones_0x20 0xFFFFFFFFU - -#define _hrt_ones_0 _hrt_ones_0x0 -#define _hrt_ones_1 _hrt_ones_0x1 -#define _hrt_ones_2 _hrt_ones_0x2 -#define _hrt_ones_3 _hrt_ones_0x3 -#define _hrt_ones_4 _hrt_ones_0x4 -#define _hrt_ones_5 _hrt_ones_0x5 -#define _hrt_ones_6 _hrt_ones_0x6 -#define _hrt_ones_7 _hrt_ones_0x7 -#define _hrt_ones_8 _hrt_ones_0x8 -#define _hrt_ones_9 _hrt_ones_0x9 -#define _hrt_ones_10 _hrt_ones_0xA -#define _hrt_ones_11 _hrt_ones_0xB -#define _hrt_ones_12 _hrt_ones_0xC -#define _hrt_ones_13 _hrt_ones_0xD -#define _hrt_ones_14 _hrt_ones_0xE -#define _hrt_ones_15 _hrt_ones_0xF -#define _hrt_ones_16 _hrt_ones_0x10 -#define _hrt_ones_17 _hrt_ones_0x11 -#define _hrt_ones_18 _hrt_ones_0x12 -#define _hrt_ones_19 _hrt_ones_0x13 -#define _hrt_ones_20 _hrt_ones_0x14 -#define _hrt_ones_21 _hrt_ones_0x15 -#define _hrt_ones_22 _hrt_ones_0x16 -#define _hrt_ones_23 _hrt_ones_0x17 -#define _hrt_ones_24 _hrt_ones_0x18 -#define _hrt_ones_25 _hrt_ones_0x19 -#define _hrt_ones_26 _hrt_ones_0x1A -#define _hrt_ones_27 _hrt_ones_0x1B -#define _hrt_ones_28 _hrt_ones_0x1C -#define _hrt_ones_29 _hrt_ones_0x1D -#define _hrt_ones_30 _hrt_ones_0x1E -#define _hrt_ones_31 _hrt_ones_0x1F -#define _hrt_ones_32 _hrt_ones_0x20 - -#define _hrt_mask(b, n) \ - (_hrt_ones(n) << (b)) -#define _hrt_get_bits(w, b, n) \ - (((w) >> (b)) & _hrt_ones(n)) -#define _hrt_set_bits(w, b, n, v) \ - (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b))) -#define _hrt_get_bit(w, b) \ - (((w) >> (b)) & 1) -#define _hrt_set_bit(w, b, v) \ - (((w) & (~(1 << (b)))) | (((v)&1) << (b))) -#define _hrt_set_lower_half(w, v) \ - _hrt_set_bits(w, 0, 16, v) -#define _hrt_set_upper_half(w, v) \ - _hrt_set_bits(w, 16, 16, v) - -#endif /* _HRT_BITS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h deleted file mode 100644 index b5756bfe8eb6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _cell_params_h -#define _cell_params_h - -#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/ -#define SP_ICACHE_TAG_BITS 4 /*size of tag*/ -#define SP_ICACHE_SET_BITS 8 /* 256 sets*/ -#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/ -#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/ - -#define SP_ICACHE_ADDRESS_BITS \ - (SP_ICACHE_TAG_BITS+SP_ICACHE_BLOCK_ADDRESS_BITS) - -#define SP_PMEM_DEPTH (1< input_selector*/ -/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */ -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding - -/* definition for state machine of data FIFO for decode different type of data */ -#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9 -#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7 -#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7 - -#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */ - -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8 - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6 - - -/* packet bit definition */ -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32 -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32 - - -/*************************************************************************************************/ -/* Custom Decoding */ -/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */ -/*************************************************************************************************/ -#define BE_CUST_EN_IDX 0 /* 2bits */ -#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */ -#define BE_CUST_EN_WIDTH 8 -#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */ -#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */ - -/* Data State config = {get_bits(6bits), valid(1bit)} */ -#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */ -#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */ -#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */ -#define BE_CUST_DATA_STATE_WIDTH 21 -#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */ -#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */ - -/* Pixel Extractor config */ -#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */ -#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */ -#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */ -#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */ -#define BE_CUST_PIX_EXT_WIDTH 29 - -/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */ -#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_WIDTH 16 -#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */ -#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */ -#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */ -#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */ - -#endif /* _mipi_backend_common_defs_h_ */ -#endif /* _css_receiver_2400_common_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h deleted file mode 100644 index 6f5b7d3d3715..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _css_receiver_2400_defs_h_ -#define _css_receiver_2400_defs_h_ - -#include "css_receiver_2400_common_defs.h" - -#define CSS_RECEIVER_DATA_WIDTH 8 -#define CSS_RECEIVER_RX_TRIG 4 -#define CSS_RECEIVER_RF_WORD 32 -#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10 -#define CSS_RECEIVER_CSI_RF_ADDR 4 -#define CSS_RECEIVER_DATA_OUT 12 -#define CSS_RECEIVER_CHN_NO 2 -#define CSS_RECEIVER_DWORD_CNT 11 -#define CSS_RECEIVER_FORMAT_TYP 5 -#define CSS_RECEIVER_HRESPONSE 2 -#define CSS_RECEIVER_STATE_WIDTH 3 -#define CSS_RECEIVER_FIFO_DAT 32 -#define CSS_RECEIVER_CNT_VAL 2 -#define CSS_RECEIVER_PRED10_VAL 10 -#define CSS_RECEIVER_PRED12_VAL 12 -#define CSS_RECEIVER_CNT_WIDTH 8 -#define CSS_RECEIVER_WORD_CNT 16 -#define CSS_RECEIVER_PIXEL_LEN 6 -#define CSS_RECEIVER_PIXEL_CNT 5 -#define CSS_RECEIVER_COMP_8_BIT 8 -#define CSS_RECEIVER_COMP_7_BIT 7 -#define CSS_RECEIVER_COMP_6_BIT 6 - -#define CSI_CONFIG_WIDTH 4 - -/* division of gen_short data, ch_id and fmt_type over streaming data interface */ -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0 -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1) - -#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4 -#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4 - -#define hrt_css_receiver_2400_4_lane_port_offset 0x100 -#define hrt_css_receiver_2400_1_lane_port_offset 0x200 -#define hrt_css_receiver_2400_2_lane_port_offset 0x300 -#define hrt_css_receiver_2400_backend_port_offset 0x100 - -#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0 -#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1 -#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2 -#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3 -#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4 -#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7 -#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8 -#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9 -#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10 -#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11 -#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12 -#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14 -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16 -#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25 -#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26 -#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27 -#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28 - -/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */ -#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0 -#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1 -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2 -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13 -#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16 - -#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun" -#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved" -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry" -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data" -#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync" - -/* Bits for CSI2_DEVICE_READY register */ -#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7 - - -/* Bits for CSI2_FUNC_PROG register */ -#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19 - -/* Bits for INIT_COUNT register */ -#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0 -#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16 - -/* Bits for COUNT registers */ -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8 - -/* Bits for RAW116_18_DATAID register */ -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6 - -/* Bits for COMP_FORMAT register, this selects the compression data format */ -#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8 -#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS) -#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8 - -/* Bits for COMP_PREDICT register, this selects the predictor algorithm */ -#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1 -#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2 - -/* Number of bits used for the delay registers */ -#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8 - -/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */ -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2 - - -/* BITS for backend RAW16 and RAW 18 registers */ - -#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2 -#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1 - -#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2 -#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1 - -/* These hsync and vsync values are for HSS simulation only */ -#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL (1<<16) -#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL (1<<17) - -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28 -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0 -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1) - -// SH Backend Register IDs -#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1 -#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9 -#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10 -#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11 -#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12 -#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13 -#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */ - -#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28 - -#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8 - -#endif /* _css_receiver_2400_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h deleted file mode 100644 index 47505f41790c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_DEFS_H_ -#define _HRT_DEFS_H_ - -#ifndef HRTCAT -#define _HRTCAT(m, n) m##n -#define HRTCAT(m, n) _HRTCAT(m, n) -#endif - -#ifndef HRTSTR -#define _HRTSTR(x) #x -#define HRTSTR(x) _HRTSTR(x) -#endif - -#ifndef HRTMIN -#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b)) -#endif - -#ifndef HRTMAX -#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b)) -#endif - -#endif /* _HRT_DEFS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h deleted file mode 100644 index d184a8b313c9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _dma_v2_defs_h -#define _dma_v2_defs_h - -#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels -#define _DMA_V2_CONNECTIONS_ID Connections -#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths -#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth -#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat -#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass -#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst -#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept -#define _DMA_V2_DEV_SRMD_ID DevSRMD -#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters -#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth -#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth -#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat -#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass -#define _DMA_V2_NO_PACK_ID has_no_pack - -#define _DMA_V2_REG_ALIGN 4 -#define _DMA_V2_REG_ADDR_BITS 2 - -/* Command word */ -#define _DMA_V2_CMD_IDX 0 -#define _DMA_V2_CMD_BITS 6 -#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS) -#define _DMA_V2_CHANNEL_BITS 5 - -/* The command to set a parameter contains the PARAM field next */ -#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS) -#define _DMA_V2_PARAM_BITS 4 - -/* Commands to read, write or init specific blocks contain these - three values */ -#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS) -#define _DMA_V2_SPEC_DEV_A_XB_BITS 8 -#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS) -#define _DMA_V2_SPEC_DEV_B_XB_BITS 8 -#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS) -#define _DMA_V2_SPEC_YB_BITS (32-_DMA_V2_SPEC_DEV_B_XB_BITS-_DMA_V2_SPEC_DEV_A_XB_BITS-_DMA_V2_CMD_BITS-_DMA_V2_CHANNEL_BITS) - -/* */ -#define _DMA_V2_CMD_CTRL_IDX 4 -#define _DMA_V2_CMD_CTRL_BITS 4 - -/* Packing setup word */ -#define _DMA_V2_CONNECTION_IDX 0 -#define _DMA_V2_CONNECTION_BITS 4 -#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS) -#define _DMA_V2_EXTENSION_BITS 1 - -/* Elements packing word */ -#define _DMA_V2_ELEMENTS_IDX 0 -#define _DMA_V2_ELEMENTS_BITS 8 -#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS) -#define _DMA_V2_LEFT_CROPPING_BITS 8 - -#define _DMA_V2_WIDTH_IDX 0 -#define _DMA_V2_WIDTH_BITS 16 - -#define _DMA_V2_HEIGHT_IDX 0 -#define _DMA_V2_HEIGHT_BITS 16 - -#define _DMA_V2_STRIDE_IDX 0 -#define _DMA_V2_STRIDE_BITS 32 - -/* Command IDs */ -#define _DMA_V2_MOVE_B2A_COMMAND 0 -#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1 -#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2 -#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3 -#define _DMA_V2_MOVE_A2B_COMMAND 4 -#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5 -#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6 -#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7 -#define _DMA_V2_INIT_A_COMMAND 8 -#define _DMA_V2_INIT_A_BLOCK_COMMAND 9 -#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10 -#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11 -#define _DMA_V2_INIT_B_COMMAND 12 -#define _DMA_V2_INIT_B_BLOCK_COMMAND 13 -#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14 -#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15 -#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32 -#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33 -#define _DMA_V2_SET_CRUN_COMMAND 62 - -/* Channel Parameter IDs */ -#define _DMA_V2_PACKING_SETUP_PARAM 0 -#define _DMA_V2_STRIDE_A_PARAM 1 -#define _DMA_V2_ELEM_CROPPING_A_PARAM 2 -#define _DMA_V2_WIDTH_A_PARAM 3 -#define _DMA_V2_STRIDE_B_PARAM 4 -#define _DMA_V2_ELEM_CROPPING_B_PARAM 5 -#define _DMA_V2_WIDTH_B_PARAM 6 -#define _DMA_V2_HEIGHT_PARAM 7 -#define _DMA_V2_QUEUED_CMDS 8 - -/* Parameter Constants */ -#define _DMA_V2_ZERO_EXTEND 0 -#define _DMA_V2_SIGN_EXTEND 1 - - /* SLAVE address map */ -#define _DMA_V2_SEL_FSM_CMD 0 -#define _DMA_V2_SEL_CH_REG 1 -#define _DMA_V2_SEL_CONN_GROUP 2 -#define _DMA_V2_SEL_DEV_INTERF 3 - -#define _DMA_V2_ADDR_SEL_COMP_IDX 12 -#define _DMA_V2_ADDR_SEL_COMP_BITS 4 -#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2 -#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6 -#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS+_DMA_V2_ADDR_SEL_CH_REG_IDX) -#define _DMA_V2_ADDR_SEL_PARAM_BITS 4 - -#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2 -#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6 -#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX) -#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4 - -#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2 -#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6 -#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX+_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS) -#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4 - -#define _DMA_V2_FSM_GROUP_CMD_IDX 0 -#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1 -#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2 -#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4 -#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5 -#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6 -#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7 - -#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15 - -#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3 - -#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4 - -#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4 - -#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0 -#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1 -#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2 -#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3 -#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4 -#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5 - -#endif /* _dma_v2_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h deleted file mode 100644 index 77722d205701..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef HRT_GDC_v2_defs_h_ -#define HRT_GDC_v2_defs_h_ - -#define HRT_GDC_IS_V2 - -#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */ -#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */ - -#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */ -#define HRT_GDC_BLI_COEF_ONE (1 << HRT_GDC_BLI_FRAC_BITS) - -#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */ -#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS-2)) /* We represent signed 10 bit coefficients. */ - /* The supported range is [-256, .., +256] */ - /* in 14-bit signed notation, */ - /* We need all ten bits (MSB must be zero). */ - /* -s is inserted to solve this issue, and */ - /* therefore "1" is equal to +256. */ -#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1) - -#define HRT_GDC_LUT_BYTES (HRT_GDC_N*4*2) /* 1024 addresses, 4 coefficients per address, */ - /* 2 bytes per coefficient */ - -#define _HRT_GDC_REG_ALIGN 4 - - // 31 30 29 25 24 0 - // |-----|---|--------|------------------------| - // | CMD | C | Reg_ID | Value | - - - // There are just two commands possible for the GDC block: - // 1 - Configure reg - // 0 - Data token - - // C - Reserved bit - // Used in protocol to indicate whether it is C-run or other type of runs - // In case of C-run, this bit has a value of 1, for all the other runs, it is 0. - - // Reg_ID - Address of the register to be configured - - // Value - Value to store to the addressed register, maximum of 24 bits - - // Configure reg command is not followed by any other token. - // The address of the register and the data to be filled in is contained in the same token - - // When the first data token is received, it must be: - // 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or, - // 2. P0'X (device configured in one of the tetragon modes) - // After the first data token is received, pre-defined number of tokens with the following meaning follow: - // 1. two tokens: SRC address ; DST address - // 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address - -#define HRT_GDC_CONFIG_CMD 1 -#define HRT_GDC_DATA_CMD 0 - - -#define HRT_GDC_CMD_POS 31 -#define HRT_GDC_CMD_BITS 1 -#define HRT_GDC_CRUN_POS 30 -#define HRT_GDC_REG_ID_POS 25 -#define HRT_GDC_REG_ID_BITS 5 -#define HRT_GDC_DATA_POS 0 -#define HRT_GDC_DATA_BITS 25 - -#define HRT_GDC_FRYIPXFRX_BITS 26 -#define HRT_GDC_P0X_BITS 23 - - -#define HRT_GDC_MAX_OXDIM (8192-64) -#define HRT_GDC_MAX_OYDIM 4095 -#define HRT_GDC_MAX_IXDIM (8192-64) -#define HRT_GDC_MAX_IYDIM 4095 -#define HRT_GDC_MAX_DS_FAC 16 -#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC*HRT_GDC_N - 1) -#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX - - -/* GDC lookup tables entries are 10 bits values, but they're - stored 2 by 2 as 32 bit values, yielding 16 bits per entry. - A GDC lookup table contains 64 * 4 elements */ - -#define HRT_GDC_PERF_1_1_pix 0 -#define HRT_GDC_PERF_2_1_pix 1 -#define HRT_GDC_PERF_1_2_pix 2 -#define HRT_GDC_PERF_2_2_pix 3 - -#define HRT_GDC_NND_MODE 0 -#define HRT_GDC_BLI_MODE 1 -#define HRT_GDC_BCI_MODE 2 -#define HRT_GDC_LUT_MODE 3 - -#define HRT_GDC_SCAN_STB 0 -#define HRT_GDC_SCAN_STR 1 - -#define HRT_GDC_MODE_SCALING 0 -#define HRT_GDC_MODE_TETRAGON 1 - -#define HRT_GDC_LUT_COEFF_OFFSET 16 -#define HRT_GDC_FRY_BIT_OFFSET 16 -// FRYIPXFRX is the only register where we store two values in one field, -// to save one token in the scaling protocol. -// Like this, we have three tokens in the scaling protocol, -// Otherwise, we would have had four. -// The register bit-map is: -// 31 26 25 16 15 10 9 0 -// |------|----------|------|----------| -// | XXXX | FRY | IPX | FRX | - - -#define HRT_GDC_CE_FSM0_POS 0 -#define HRT_GDC_CE_FSM0_LEN 2 -#define HRT_GDC_CE_OPY_POS 2 -#define HRT_GDC_CE_OPY_LEN 14 -#define HRT_GDC_CE_OPX_POS 16 -#define HRT_GDC_CE_OPX_LEN 16 -// CHK_ENGINE register bit-map: -// 31 16 15 2 1 0 -// |----------------|-----------|----| -// | OPX | OPY |FSM0| -// However, for the time being at least, -// this implementation is meaningless in hss model, -// So, we just return 0 - - -#define HRT_GDC_CHK_ENGINE_IDX 0 -#define HRT_GDC_WOIX_IDX 1 -#define HRT_GDC_WOIY_IDX 2 -#define HRT_GDC_BPP_IDX 3 -#define HRT_GDC_FRYIPXFRX_IDX 4 -#define HRT_GDC_OXDIM_IDX 5 -#define HRT_GDC_OYDIM_IDX 6 -#define HRT_GDC_SRC_ADDR_IDX 7 -#define HRT_GDC_SRC_END_ADDR_IDX 8 -#define HRT_GDC_SRC_WRAP_ADDR_IDX 9 -#define HRT_GDC_SRC_STRIDE_IDX 10 -#define HRT_GDC_DST_ADDR_IDX 11 -#define HRT_GDC_DST_STRIDE_IDX 12 -#define HRT_GDC_DX_IDX 13 -#define HRT_GDC_DY_IDX 14 -#define HRT_GDC_P0X_IDX 15 -#define HRT_GDC_P0Y_IDX 16 -#define HRT_GDC_P1X_IDX 17 -#define HRT_GDC_P1Y_IDX 18 -#define HRT_GDC_P2X_IDX 19 -#define HRT_GDC_P2Y_IDX 20 -#define HRT_GDC_P3X_IDX 21 -#define HRT_GDC_P3Y_IDX 22 -#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc -#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT -#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right) -#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon - -#define HRT_GDC_LUT_IDX 32 - - -#endif /* HRT_GDC_v2_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h deleted file mode 100644 index 3082e2f5e014..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _gp_timer_defs_h -#define _gp_timer_defs_h - -#define _HRT_GP_TIMER_REG_ALIGN 4 - -#define HIVE_GP_TIMER_RESET_REG_IDX 0 -#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1 -#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer) -#define HIVE_GP_TIMER_VALUE_REG_IDX(timer,timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer) -#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer,timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer) -#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer,timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer) -#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq,timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq) -#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq) -#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq) - -#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0 -#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1 -#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2 -#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3 -#define HIVE_GP_TIMER_COUNT_TYPES 4 - -#endif /* _gp_timer_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h deleted file mode 100644 index a807d4c99041..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _gpio_block_defs_h_ -#define _gpio_block_defs_h_ - -#define _HRT_GPIO_BLOCK_REG_ALIGN 4 - -/* R/W registers */ -#define _gpio_block_reg_do_e 0 -#define _gpio_block_reg_do_select 1 -#define _gpio_block_reg_do_0 2 -#define _gpio_block_reg_do_1 3 -#define _gpio_block_reg_do_pwm_cnt_0 4 -#define _gpio_block_reg_do_pwm_cnt_1 5 -#define _gpio_block_reg_do_pwm_cnt_2 6 -#define _gpio_block_reg_do_pwm_cnt_3 7 -#define _gpio_block_reg_do_pwm_main_cnt 8 -#define _gpio_block_reg_do_pwm_enable 9 -#define _gpio_block_reg_di_debounce_sel 10 -#define _gpio_block_reg_di_debounce_cnt_0 11 -#define _gpio_block_reg_di_debounce_cnt_1 12 -#define _gpio_block_reg_di_debounce_cnt_2 13 -#define _gpio_block_reg_di_debounce_cnt_3 14 -#define _gpio_block_reg_di_active_level 15 - - -/* read-only registers */ -#define _gpio_block_reg_di 16 - -#endif /* _gpio_block_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h deleted file mode 100644 index 2f7cb2dff0e9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ -#define _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ - -/* - * These are the indices of each interrupt in the interrupt - * controller's registers. these can be used as the irq_id - * argument to the hrt functions irq_controller.h. - * - * The definitions are taken from _defs.h - */ -typedef enum hrt_isp_css_irq { - hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID , - hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID , - hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID , - hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID , - hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID , - hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID , - hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID , - hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID , - hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID , - hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID , - hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID , - hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID , - hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID , - hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID , - hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID , - hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID , - hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID , - hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID , - hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID , - hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID , - hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_IS2401_BIT_ID , - hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID , - hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID , - hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID , - hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID , - hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID , - hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID , - hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID , - hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID , - hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID , - hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID , - hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID , - /* this must (obviously) be the last on in the enum */ - hrt_isp_css_irq_num_irqs -} hrt_isp_css_irq_t; - -typedef enum hrt_isp_css_irq_status { - hrt_isp_css_irq_status_error, - hrt_isp_css_irq_status_more_irqs, - hrt_isp_css_irq_status_success -} hrt_isp_css_irq_status_t; - -#endif /* _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h deleted file mode 100644 index 5a2ce9108ae4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _hive_isp_css_defs_h__ -#define _hive_isp_css_defs_h__ - -#define _HIVE_ISP_CSS_2401_SYSTEM 1 -#define HIVE_ISP_CTRL_DATA_WIDTH 32 -#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32 -#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1 -#define HIVE_ISP_DDR_ADDRESS_WIDTH 36 - -#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */ -#define HIVE_ISP_NUM_GPIO_PINS 12 - -/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer - and in the DMA parameter list */ -#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}} -#define HIVE_ISP_DDR_WORD_BITS 256 -#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS/8) -#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024) -#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024) -#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8) -#define HIVE_ISP_PAGE_SHIFT 12 -#define HIVE_ISP_PAGE_SIZE (1< - -#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS)-1) -#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS)-1) - -#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS) -#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH) - -#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h deleted file mode 100644 index 58b0e6effbd0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_HIVE_TYPES_H -#define _HRT_HIVE_TYPES_H - -#include "version.h" -#include "defs.h" - -#ifndef HRTCAT3 -#define _HRTCAT3(m,n,o) m##n##o -#define HRTCAT3(m,n,o) _HRTCAT3(m,n,o) -#endif - -#ifndef HRTCAT4 -#define _HRTCAT4(m,n,o,p) m##n##o##p -#define HRTCAT4(m,n,o,p) _HRTCAT4(m,n,o,p) -#endif - -#ifndef HRTMIN -#define HRTMIN(a,b) (((a)<(b))?(a):(b)) -#endif - -#ifndef HRTMAX -#define HRTMAX(a,b) (((a)>(b))?(a):(b)) -#endif - -/* boolean data type */ -typedef unsigned int hive_bool; -#define hive_false 0 -#define hive_true 1 - -typedef char hive_int8; -typedef short hive_int16; -typedef int hive_int32; -typedef long long hive_int64; - -typedef unsigned char hive_uint8; -typedef unsigned short hive_uint16; -typedef unsigned int hive_uint32; -typedef unsigned long long hive_uint64; - -/* by default assume 32 bit master port (both data and address) */ -#ifndef HRT_DATA_WIDTH -#define HRT_DATA_WIDTH 32 -#endif -#ifndef HRT_ADDRESS_WIDTH -#define HRT_ADDRESS_WIDTH 32 -#endif - -#define HRT_DATA_BYTES (HRT_DATA_WIDTH/8) -#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH/8) - -#if HRT_DATA_WIDTH == 64 -typedef hive_uint64 hrt_data; -#elif HRT_DATA_WIDTH == 32 -typedef hive_uint32 hrt_data; -#else -#error data width not supported -#endif - -#if HRT_ADDRESS_WIDTH == 64 -typedef hive_uint64 hrt_address; -#elif HRT_ADDRESS_WIDTH == 32 -typedef hive_uint32 hrt_address; -#else -#error adddres width not supported -#endif - -/* The SP side representation of an HMM virtual address */ -typedef hive_uint32 hrt_vaddress; - -/* use 64 bit addresses in simulation, where possible */ -typedef hive_uint64 hive_sim_address; - -/* below is for csim, not for hrt, rename and move this elsewhere */ - -typedef unsigned int hive_uint; -typedef hive_uint32 hive_address; -typedef hive_address hive_slave_address; -typedef hive_address hive_mem_address; - -/* MMIO devices */ -typedef hive_uint hive_mmio_id; -typedef hive_mmio_id hive_slave_id; -typedef hive_mmio_id hive_port_id; -typedef hive_mmio_id hive_master_id; -typedef hive_mmio_id hive_mem_id; -typedef hive_mmio_id hive_dev_id; -typedef hive_mmio_id hive_fifo_id; - -typedef hive_uint hive_hier_id; -typedef hive_hier_id hive_device_id; -typedef hive_device_id hive_proc_id; -typedef hive_device_id hive_cell_id; -typedef hive_device_id hive_host_id; -typedef hive_device_id hive_bus_id; -typedef hive_device_id hive_bridge_id; -typedef hive_device_id hive_fifo_adapter_id; -typedef hive_device_id hive_custom_device_id; - -typedef hive_uint hive_slot_id; -typedef hive_uint hive_fu_id; -typedef hive_uint hive_reg_file_id; -typedef hive_uint hive_reg_id; - -/* Streaming devices */ -typedef hive_uint hive_outport_id; -typedef hive_uint hive_inport_id; - -typedef hive_uint hive_msink_id; - -/* HRT specific */ -typedef char* hive_program; -typedef char* hive_function; - -#endif /* _HRT_HIVE_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h deleted file mode 100644 index f82bb79785cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _ibuf_cntrl_defs_h_ -#define _ibuf_cntrl_defs_h_ - -#include -#include - -#define _IBUF_CNTRL_REG_ALIGN 4 - /* alignment of register banks, first bank are shared configuration and status registers: */ -#define _IBUF_CNTRL_PROC_REG_ALIGN 32 - - /* the actual amount of configuration registers per proc: */ -#define _IBUF_CNTRL_CONFIG_REGS_PER_PROC 18 - /* the actual amount of shared configuration registers: */ -#define _IBUF_CNTRL_CONFIG_REGS_NO_PROC 0 - - /* the actual amount of status registers per proc */ -#define _IBUF_CNTRL_STATUS_REGS_PER_PROC (_IBUF_CNTRL_CONFIG_REGS_PER_PROC + 10) - /* the actual amount shared status registers */ -#define _IBUF_CNTRL_STATUS_REGS_NO_PROC (_IBUF_CNTRL_CONFIG_REGS_NO_PROC + 2) - - /* time out bits, maximum time out value is 2^_IBUF_CNTRL_TIME_OUT_BITS - 1 */ -#define _IBUF_CNTRL_TIME_OUT_BITS 5 - -/* command token definition */ -#define _IBUF_CNTRL_CMD_TOKEN_LSB 0 -#define _IBUF_CNTRL_CMD_TOKEN_MSB 1 - -/* Str2MMIO defines */ -#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_MSB _STREAM2MMIO_CMD_TOKEN_CMD_MSB -#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_LSB _STREAM2MMIO_CMD_TOKEN_CMD_LSB -#define _IBUF_CNTRL_STREAM2MMIO_NUM_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS -#define _IBUF_CNTRL_STREAM2MMIO_ACK_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT -#define _IBUF_CNTRL_STREAM2MMIO_ACK_TOKEN_VALID_BIT _STREAM2MMIO_ACK_TOKEN_VALID_BIT - -/* acknowledge token definition */ -#define _IBUF_CNTRL_ACK_TOKEN_STORES_IDX 0 -#define _IBUF_CNTRL_ACK_TOKEN_STORES_BITS 15 -#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX (_IBUF_CNTRL_ACK_TOKEN_STORES_BITS + _IBUF_CNTRL_ACK_TOKEN_STORES_IDX) -#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS -#define _IBUF_CNTRL_ACK_TOKEN_LSB _IBUF_CNTRL_ACK_TOKEN_STORES_IDX -#define _IBUF_CNTRL_ACK_TOKEN_MSB (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX - 1) - /* bit 31 indicates a valid ack: */ -#define _IBUF_CNTRL_ACK_TOKEN_VALID_BIT (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX) - - -/*shared registers:*/ -#define _IBUF_CNTRL_RECALC_WORDS_STATUS 0 -#define _IBUF_CNTRL_ARBITERS_STATUS 1 - -#define _IBUF_CNTRL_SET_CRUN 2 /* NO PHYSICAL REGISTER!! Only used in HSS model */ - - -/*register addresses for each proc: */ -#define _IBUF_CNTRL_CMD 0 -#define _IBUF_CNTRL_ACK 1 - - /* number of items (packets or words) per frame: */ -#define _IBUF_CNTRL_NUM_ITEMS_PER_STORE 2 - - /* number of stores (packets or words) per store/buffer: */ -#define _IBUF_CNTRL_NUM_STORES_PER_FRAME 3 - - /* the channel and command in the DMA */ -#define _IBUF_CNTRL_DMA_CHANNEL 4 -#define _IBUF_CNTRL_DMA_CMD 5 - - /* the start address and stride of the buffers */ -#define _IBUF_CNTRL_BUFFER_START_ADDRESS 6 -#define _IBUF_CNTRL_BUFFER_STRIDE 7 -#define _IBUF_CNTRL_BUFFER_END_ADDRESS 8 - - /* destination start address, stride and end address; should be the same as in the DMA */ -#define _IBUF_CNTRL_DEST_START_ADDRESS 9 -#define _IBUF_CNTRL_DEST_STRIDE 10 -#define _IBUF_CNTRL_DEST_END_ADDRESS 11 - - /* send a frame sync or not, default 1 */ -#define _IBUF_CNTRL_SYNC_FRAME 12 - - /* str2mmio cmds */ -#define _IBUF_CNTRL_STR2MMIO_SYNC_CMD 13 -#define _IBUF_CNTRL_STR2MMIO_STORE_CMD 14 - - /* num elems p word*/ -#define _IBUF_CNTRL_SHIFT_ITEMS 15 -#define _IBUF_CNTRL_ELEMS_P_WORD_IBUF 16 -#define _IBUF_CNTRL_ELEMS_P_WORD_DEST 17 - - - /* STATUS */ - /* current frame and stores in buffer */ -#define _IBUF_CNTRL_CUR_STORES 18 -#define _IBUF_CNTRL_CUR_ACKS 19 - - /* current buffer and destination address for DMA cmd's */ -#define _IBUF_CNTRL_CUR_S2M_IBUF_ADDR 20 -#define _IBUF_CNTRL_CUR_DMA_IBUF_ADDR 21 -#define _IBUF_CNTRL_CUR_DMA_DEST_ADDR 22 -#define _IBUF_CNTRL_CUR_ISP_DEST_ADDR 23 - -#define _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND 24 - -#define _IBUF_CNTRL_MAIN_CNTRL_STATE 25 -#define _IBUF_CNTRL_DMA_SYNC_STATE 26 -#define _IBUF_CNTRL_ISP_SYNC_STATE 27 - - -/*Commands: */ -#define _IBUF_CNTRL_CMD_STORE_FRAME_IDX 0 -#define _IBUF_CNTRL_CMD_ONLINE_IDX 1 - - /* initialize, copy st_addr to cur_addr etc */ -#define _IBUF_CNTRL_CMD_INITIALIZE 0 - - /* store an online frame (sync with ISP, use end cfg start, stride and end address: */ -#define _IBUF_CNTRL_CMD_STORE_ONLINE_FRAME ((1<<_IBUF_CNTRL_CMD_STORE_FRAME_IDX) | (1<<_IBUF_CNTRL_CMD_ONLINE_IDX)) - - /* store an offline frame (don't sync with ISP, requires start address as 2nd token, no end address: */ -#define _IBUF_CNTRL_CMD_STORE_OFFLINE_FRAME (1<<_IBUF_CNTRL_CMD_STORE_FRAME_IDX) - - /* false command token, should be different then commands. Use online bit, not store frame: */ -#define _IBUF_CNTRL_FALSE_ACK 2 - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h deleted file mode 100644 index 7d39e45796ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IF_DEFS_H -#define _IF_DEFS_H - -#define HIVE_IF_FRAME_REQUEST 0xA000 -#define HIVE_IF_LINES_REQUEST 0xB000 -#define HIVE_IF_VECTORS_REQUEST 0xC000 - -#endif /* _IF_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h deleted file mode 100644 index 7766f78cd123..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _if_subsystem_defs_h__ -#define _if_subsystem_defs_h__ - -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8 -#define HIVE_IFMT_GP_REGS_SRST_IDX 9 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10 - -#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11 - -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 - -/* order of the input bits for the ifmt irq controller */ -#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0 -#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1 -#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2 -#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3 -#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4 - -/* order of the input bits for the ifmt Soft reset register */ -#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0 -#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1 -#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2 -#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3 - -/* order of the input bits for the ifmt Soft reset register */ -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3 - -#endif /* _if_subsystem_defs_h__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h deleted file mode 100644 index 87fbf82edb5b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_selector_defs_h -#define _input_selector_defs_h - -#ifndef HIVE_ISP_ISEL_SEL_BITS -#define HIVE_ISP_ISEL_SEL_BITS 2 -#endif - -#ifndef HIVE_ISP_CH_ID_BITS -#define HIVE_ISP_CH_ID_BITS 2 -#endif - -#ifndef HIVE_ISP_FMT_TYPE_BITS -#define HIVE_ISP_FMT_TYPE_BITS 5 -#endif - -/* gp_register register id's -- Outputs */ -#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0 -#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1 -#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5 -#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6 -#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7 - -#define HIVE_ISEL_GP_REGS_SOF_IDX 8 -#define HIVE_ISEL_GP_REGS_EOF_IDX 9 -#define HIVE_ISEL_GP_REGS_SOL_IDX 10 -#define HIVE_ISEL_GP_REGS_EOL_IDX 11 - -#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12 -#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13 -#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14 - -#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15 -#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16 -#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17 -#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18 -#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19 -#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20 -#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21 -#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22 -#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23 -#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24 -#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25 -#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26 -#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27 -#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28 - - -#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29 -#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30 -#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31 -#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32 -#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33 -#define HIVE_ISEL_GP_REGS_SRST_IDX 37 - -#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0 -#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1 -#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2 -#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3 - -/* gp_register register id's -- Inputs */ -#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34 -#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35 -#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36 - -/* irq sources isel irq controller */ -#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0 -#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1 -#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2 -#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3 -#define HIVE_ISEL_IRQ_NUM_IRQS 4 - -#endif /* _input_selector_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h deleted file mode 100644 index 20a13c4cdb56..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_switch_2400_defs_h -#define _input_switch_2400_defs_h - -#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id)*2) + ((fmt_type)>=16)) -#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type)%16) * 2) - -#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0 -#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1 -#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2 -#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3 -#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0 -#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1 -#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2 -#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4 - -#endif /* _input_switch_2400_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h deleted file mode 100644 index a7f0ca80bc9b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_system_ctrl_defs_h -#define _input_system_ctrl_defs_h - -#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */ - -/* --------------------------------------------------*/ - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -// Number of registers -#define ISYS_CTRL_NOF_REGS 23 - -// Register id's of MMIO slave accesible registers -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8 -#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11 -#define ISYS_CTRL_INIT_REG_ID 12 -#define ISYS_CTRL_LAST_COMMAND_REG_ID 13 -#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14 -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15 -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16 -#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22 - - -/* register reset value */ -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3 -#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define ISYS_CTRL_INIT_REG_RSTVAL 0 -#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0 - -/* register width value */ -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9 -#define ISYS_CTRL_INIT_REG_WIDTH 3 -#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */ -#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32 -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1 - -/* bit definitions */ - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ - -/* -InpSysCaptFramesAcq 1/0 [3:0] - 'b0000 -[7:4] - CaptPortId, - CaptA-'b0000 - CaptB-'b0001 - CaptC-'b0010 -[31:16] - NOF_frames -InpSysCaptFrameExt 2/0 [3:0] - 'b0001' -[7:4] - CaptPortId, - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - - 2/1 [31:0] - external capture address -InpSysAcqFrame 2/0 [3:0] - 'b0010, -[31:4] - NOF_ext_mem_words - 2/1 [31:0] - external memory read start address -InpSysOverruleON 1/0 [3:0] - 'b0011, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysOverruleOFF 1/0 [3:0] - 'b0100, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysOverruleCmd 2/0 [3:0] - 'b0101, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - - 2/1 [31:0] - command token value for port opid - - -acknowledge tokens: - -InpSysAckCFA 1/0 [3:0] - 'b0000 - [7:4] - CaptPortId, - CaptA-'b0000 - CaptB- 'b0001 - CaptC-'b0010 - [31:16] - NOF_frames -InpSysAckCFE 1/0 [3:0] - 'b0001' -[7:4] - CaptPortId, - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - -InpSysAckAF 1/0 [3:0] - 'b0010 -InpSysAckOverruleON 1/0 [3:0] - 'b0011, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysAckOverruleOFF 1/0 [3:0] - 'b0100, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysAckOverrule 2/0 [3:0] - 'b0101, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - - 2/1 [31:0] - acknowledge token value from port opid - - - -*/ - - -/* Command and acknowledge tokens IDs */ -#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */ -#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */ -#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */ -#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */ -#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */ -#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */ - -#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0 -#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1 -#define ISYS_CTRL_ACK_AF_TOKEN_ID 2 -#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3 -#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4 -#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5 -#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6 - -#define ISYS_CTRL_TOKEN_ID_MSB 3 -#define ISYS_CTRL_TOKEN_ID_LSB 0 -#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7 -#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4 -#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31 -#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16 -#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31 -#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8 - -#define ISYS_CTRL_TOKEN_ID_IDX 0 -#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1) -#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS) -#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB +1) -#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB -#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1) -#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB -#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1) - -#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */ -#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */ -#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */ -#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */ - -#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */ -#define ISYS_CTRL_NO_DMA_ACK 0 -#define ISYS_CTRL_NO_CAPT_ACK 16 - -#endif /* _input_system_ctrl_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h deleted file mode 100644 index ae62163034a6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_system_defs_h -#define _input_system_defs_h - -/* csi controller modes */ -#define HIVE_CSI_CONFIG_MAIN 0 -#define HIVE_CSI_CONFIG_STEREO1 4 -#define HIVE_CSI_CONFIG_STEREO2 8 - -/* general purpose register IDs */ - -/* Stream Multicast select modes */ -#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0 -#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1 -#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2 - -/* Stream Mux select modes */ -#define HIVE_ISYS_GPREG_MUX_IDX 3 - -/* streaming monitor status and control */ -#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4 -#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5 -#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6 -#define HIVE_ISYS_GPREG_SRST_IDX 7 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8 -#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9 -#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10 - -/* Bit numbers of the soft reset register */ -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0 -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1 -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5 -#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6 -#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7 -#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8 -#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9 -/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */ -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */ -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14 -/* -- */ -#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15 -#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16 -#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17 -#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv -#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23 -#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24 - -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5 - -/* streaming monitor port id's */ -#define HIVE_ISYS_STR_MON_PORT_CAPA 0 -#define HIVE_ISYS_STR_MON_PORT_CAPB 1 -#define HIVE_ISYS_STR_MON_PORT_CAPC 2 -#define HIVE_ISYS_STR_MON_PORT_ACQ 3 -#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4 -#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5 -#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6 -#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7 -#define HIVE_ISYS_STR_MON_PORT_PIXA 8 -#define HIVE_ISYS_STR_MON_PORT_PIXB 9 - -/* interrupt bit ID's */ -#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0 -#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1 -#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2 -#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3 -#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4 -#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5 -#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6 -#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/ -#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8 -#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/ -#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10 -#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/ -#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12 -/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/ -#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13 -#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14 -#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15 -#define HIVE_ISYS_IRQ_CIO2AHB 16 -#define HIVE_ISYS_IRQ_DMA_BIT_ID 17 -#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18 -#define HIVE_ISYS_IRQ_NUM_BITS 19 - -/* DMA */ -#define HIVE_ISYS_DMA_CHANNEL 0 -#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0 -#define HIVE_ISYS_DMA_HEIGHT 1 -#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */ -#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */ -#define HIVE_ISYS_DMA_CROP 0 /* no cropping */ -#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */ - -#endif /* _input_system_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h deleted file mode 100644 index ec6dd4487158..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _irq_controller_defs_h -#define _irq_controller_defs_h - -#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0 -#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1 -#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2 -#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3 -#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4 -#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5 -#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6 - -#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4 - -#endif /* _irq_controller_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h deleted file mode 100644 index e00bc841d0f0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp2400_support_h -#define _isp2400_support_h - -#ifndef ISP2400_VECTOR_TYPES -/* This typedef is to be able to include hive header files - in the host code which is useful in crun */ -typedef char *tmemvectors, *tmemvectoru, *tvector; -#endif - -#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val) -#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val) - -#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem) -#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem) - -#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell)) -#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell)) - -#if ISP_HAS_HIST - #define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram) - #define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell)) -#endif - -#endif /* _isp2400_support_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h deleted file mode 100644 index 033e23bcf672..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Version */ -#define RTL_VERSION - -/* Cell name */ -#define ISP_CELL_TYPE isp2401_mamoiada -#define ISP_VMEM simd_vmem -#define _HRT_ISP_VMEM isp2401_mamoiada_simd_vmem - -/* instruction pipeline depth */ -#define ISP_BRANCHDELAY 5 - -/* bus */ -#define ISP_BUS_WIDTH 32 -#define ISP_BUS_ADDR_WIDTH 32 -#define ISP_BUS_BURST_SIZE 1 - -/* data-path */ -#define ISP_SCALAR_WIDTH 32 -#define ISP_SLICE_NELEMS 4 -#define ISP_VEC_NELEMS 64 -#define ISP_VEC_ELEMBITS 14 -#define ISP_VEC_ELEM8BITS 16 -#define ISP_CLONE_DATAPATH_IS_16 1 - -/* memories */ -#define ISP_DMEM_DEPTH 4096 -#define ISP_DMEM_BSEL_DOWNSAMPLE 8 -#define ISP_VMEM_DEPTH 3072 -#define ISP_VMEM_BSEL_DOWNSAMPLE 8 -#define ISP_VMEM_ELEMBITS 14 -#define ISP_VMEM_ELEM_PRECISION 14 -#define ISP_VMEM_IS_BAMEM 1 -#if ISP_VMEM_IS_BAMEM - #define ISP_VMEM_BAMEM_MAX_BOI_HEIGHT 8 - #define ISP_VMEM_BAMEM_LATENCY 5 - #define ISP_VMEM_BAMEM_BANK_NARROWING_FACTOR 2 - #define ISP_VMEM_BAMEM_NR_DATA_PLANES 8 - #define ISP_VMEM_BAMEM_NR_CFG_REGISTERS 16 - #define ISP_VMEM_BAMEM_LININT 0 - #define ISP_VMEM_BAMEM_DAP_BITS 3 - #define ISP_VMEM_BAMEM_LININT_FRAC_BITS 0 - #define ISP_VMEM_BAMEM_PID_BITS 3 - #define ISP_VMEM_BAMEM_OFFSET_BITS 19 - #define ISP_VMEM_BAMEM_ADDRESS_BITS 25 - #define ISP_VMEM_BAMEM_RID_BITS 4 - #define ISP_VMEM_BAMEM_TRANSPOSITION 1 - #define ISP_VMEM_BAMEM_VEC_PLUS_SLICE 1 - #define ISP_VMEM_BAMEM_ARB_SERVICE_CYCLE_BITS 1 - #define ISP_VMEM_BAMEM_LUT_ELEMS 16 - #define ISP_VMEM_BAMEM_LUT_ADDR_WIDTH 14 - #define ISP_VMEM_BAMEM_HALF_BLOCK_WRITE 1 - #define ISP_VMEM_BAMEM_SMART_FETCH 1 - #define ISP_VMEM_BAMEM_BIG_ENDIANNESS 0 -#endif /* ISP_VMEM_IS_BAMEM */ -#define ISP_PMEM_DEPTH 2048 -#define ISP_PMEM_WIDTH 640 -#define ISP_VAMEM_ADDRESS_BITS 12 -#define ISP_VAMEM_ELEMBITS 12 -#define ISP_VAMEM_DEPTH 2048 -#define ISP_VAMEM_ALIGNMENT 2 -#define ISP_VA_ADDRESS_WIDTH 896 -#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS -#define ISP_HIST_ADDRESS_BITS 12 -#define ISP_HIST_ALIGNMENT 4 -#define ISP_HIST_COMP_IN_PREC 12 -#define ISP_HIST_DEPTH 1024 -#define ISP_HIST_WIDTH 24 -#define ISP_HIST_COMPONENTS 4 - -/* program counter */ -#define ISP_PC_WIDTH 13 - -/* Template switches */ -#define ISP_SHIELD_INPUT_DMEM 0 -#define ISP_SHIELD_OUTPUT_DMEM 1 -#define ISP_SHIELD_INPUT_VMEM 0 -#define ISP_SHIELD_OUTPUT_VMEM 0 -#define ISP_SHIELD_INPUT_PMEM 1 -#define ISP_SHIELD_OUTPUT_PMEM 1 -#define ISP_SHIELD_INPUT_HIST 1 -#define ISP_SHIELD_OUTPUT_HIST 1 -/* When LUT is select the shielding is always on */ -#define ISP_SHIELD_INPUT_VAMEM 1 -#define ISP_SHIELD_OUTPUT_VAMEM 1 - -#define ISP_HAS_IRQ 1 -#define ISP_HAS_SOFT_RESET 1 -#define ISP_HAS_VEC_DIV 0 -#define ISP_HAS_VFU_W_2O 1 -#define ISP_HAS_DEINT3 1 -#define ISP_HAS_LUT 1 -#define ISP_HAS_HIST 1 -#define ISP_HAS_VALSU 1 -#define ISP_HAS_3rdVALSU 1 -#define ISP_VRF1_HAS_2P 1 - -#define ISP_SRU_GUARDING 1 -#define ISP_VLSU_GUARDING 1 - -#define ISP_VRF_RAM 1 -#define ISP_SRF_RAM 1 - -#define ISP_SPLIT_VMUL_VADD_IS 0 -#define ISP_RFSPLIT_FPGA 0 - -/* RSN or Bus pipelining */ -#define ISP_RSN_PIPE 1 -#define ISP_VSF_BUS_PIPE 0 - -/* extra slave port to vmem */ -#define ISP_IF_VMEM 0 -#define ISP_GDC_VMEM 0 - -/* Streaming ports */ -#define ISP_IF 1 -#define ISP_IF_B 1 -#define ISP_GDC 1 -#define ISP_SCL 1 -#define ISP_GPFIFO 1 -#define ISP_SP 1 - -/* Removing Issue Slot(s) */ -#define ISP_HAS_NOT_SIMD_IS2 0 -#define ISP_HAS_NOT_SIMD_IS3 0 -#define ISP_HAS_NOT_SIMD_IS4 0 -#define ISP_HAS_NOT_SIMD_IS4_VADD 0 -#define ISP_HAS_NOT_SIMD_IS5 0 -#define ISP_HAS_NOT_SIMD_IS6 0 -#define ISP_HAS_NOT_SIMD_IS7 0 -#define ISP_HAS_NOT_SIMD_IS8 0 - -/* ICache */ -#define ISP_ICACHE 1 -#define ISP_ICACHE_ONLY 0 -#define ISP_ICACHE_PREFETCH 1 -#define ISP_ICACHE_INDEX_BITS 8 -#define ISP_ICACHE_SET_BITS 5 -#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1 - -/* Experimental Flags */ -#define ISP_EXP_1 0 -#define ISP_EXP_2 0 -#define ISP_EXP_3 0 -#define ISP_EXP_4 0 -#define ISP_EXP_5 0 -#define ISP_EXP_6 0 - -/* Derived values */ -#define ISP_LOG2_PMEM_WIDTH 10 -#define ISP_VEC_WIDTH 896 -#define ISP_SLICE_WIDTH 56 -#define ISP_VMEM_WIDTH 896 -#define ISP_VMEM_ALIGN 128 -#if ISP_VMEM_IS_BAMEM - #define ISP_VMEM_ALIGN_ELEM 2 -#endif /* ISP_VMEM_IS_BAMEM */ -#define ISP_SIMDLSU 1 -#define ISP_LSU_IMM_BITS 12 - -/* convenient shortcuts for software*/ -#define ISP_NWAY ISP_VEC_NELEMS -#define NBITS ISP_VEC_ELEMBITS - -#define _isp_ceil_div(a,b) (((a)+(b)-1)/(b)) - -#ifdef C_RUN -#define ISP_VEC_ALIGN (_isp_ceil_div(ISP_VEC_WIDTH, 64)*8) -#else -#define ISP_VEC_ALIGN ISP_VMEM_ALIGN -#endif - -/* HRT specific vector support */ -#define isp2401_mamoiada_vector_alignment ISP_VEC_ALIGN -#define isp2401_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS -#define isp2401_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION -#define isp2401_mamoiada_vector_num_elems ISP_VEC_NELEMS - -/* register file sizes */ -#define ISP_RF0_SIZE 64 -#define ISP_RF1_SIZE 16 -#define ISP_RF2_SIZE 64 -#define ISP_RF3_SIZE 4 -#define ISP_RF4_SIZE 64 -#define ISP_RF5_SIZE 16 -#define ISP_RF6_SIZE 16 -#define ISP_RF7_SIZE 16 -#define ISP_RF8_SIZE 16 -#define ISP_RF9_SIZE 16 -#define ISP_RF10_SIZE 16 -#define ISP_RF11_SIZE 16 -#define ISP_VRF1_SIZE 32 -#define ISP_VRF2_SIZE 32 -#define ISP_VRF3_SIZE 32 -#define ISP_VRF4_SIZE 32 -#define ISP_VRF5_SIZE 32 -#define ISP_VRF6_SIZE 32 -#define ISP_VRF7_SIZE 32 -#define ISP_VRF8_SIZE 32 -#define ISP_SRF1_SIZE 4 -#define ISP_SRF2_SIZE 64 -#define ISP_SRF3_SIZE 64 -#define ISP_SRF4_SIZE 32 -#define ISP_SRF5_SIZE 64 -#define ISP_FRF0_SIZE 16 -#define ISP_FRF1_SIZE 4 -#define ISP_FRF2_SIZE 16 -#define ISP_FRF3_SIZE 4 -#define ISP_FRF4_SIZE 4 -#define ISP_FRF5_SIZE 8 -#define ISP_FRF6_SIZE 4 -/* register file read latency */ -#define ISP_VRF1_READ_LAT 1 -#define ISP_VRF2_READ_LAT 1 -#define ISP_VRF3_READ_LAT 1 -#define ISP_VRF4_READ_LAT 1 -#define ISP_VRF5_READ_LAT 1 -#define ISP_VRF6_READ_LAT 1 -#define ISP_VRF7_READ_LAT 1 -#define ISP_VRF8_READ_LAT 1 -#define ISP_SRF1_READ_LAT 1 -#define ISP_SRF2_READ_LAT 1 -#define ISP_SRF3_READ_LAT 1 -#define ISP_SRF4_READ_LAT 1 -#define ISP_SRF5_READ_LAT 1 -#define ISP_SRF5_READ_LAT 1 -/* immediate sizes */ -#define ISP_IS1_IMM_BITS 14 -#define ISP_IS2_IMM_BITS 13 -#define ISP_IS3_IMM_BITS 14 -#define ISP_IS4_IMM_BITS 14 -#define ISP_IS5_IMM_BITS 9 -#define ISP_IS6_IMM_BITS 16 -#define ISP_IS7_IMM_BITS 9 -#define ISP_IS8_IMM_BITS 16 -#define ISP_IS9_IMM_BITS 11 -/* fifo depths */ -#define ISP_IF_FIFO_DEPTH 0 -#define ISP_IF_B_FIFO_DEPTH 0 -#define ISP_DMA_FIFO_DEPTH 0 -#define ISP_OF_FIFO_DEPTH 0 -#define ISP_GDC_FIFO_DEPTH 0 -#define ISP_SCL_FIFO_DEPTH 0 -#define ISP_GPFIFO_FIFO_DEPTH 0 -#define ISP_SP_FIFO_DEPTH 0 diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h deleted file mode 100644 index 593620721627..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp_acquisition_defs_h -#define _isp_acquisition_defs_h - -#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */ -#define _ISP_ACQUISITION_BYTES_PER_ELEM 4 - -/* --------------------------------------------------*/ - -#define NOF_ACQ_IRQS 1 - -/* --------------------------------------------------*/ -/* FSM */ -/* --------------------------------------------------*/ -#define MEM2STREAM_FSM_STATE_BITS 2 -#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2 - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -#define NOF_ACQ_REGS 12 - -// Register id's of MMIO slave accesible registers -#define ACQ_START_ADDR_REG_ID 0 -#define ACQ_MEM_REGION_SIZE_REG_ID 1 -#define ACQ_NUM_MEM_REGIONS_REG_ID 2 -#define ACQ_INIT_REG_ID 3 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4 -#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5 -#define ACQ_LAST_COMMAND_REG_ID 6 -#define ACQ_NEXT_COMMAND_REG_ID 7 -#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8 -#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9 -#define ACQ_FSM_STATE_INFO_REG_ID 10 -#define ACQ_INT_CNTR_INFO_REG_ID 11 - -// Register width -#define ACQ_START_ADDR_REG_WIDTH 9 -#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9 -#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9 -#define ACQ_INIT_REG_WIDTH 3 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32 -#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32 -#define ACQ_LAST_COMMAND_REG_WIDTH 32 -#define ACQ_NEXT_COMMAND_REG_WIDTH 32 -#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS *3)) -#define ACQ_INT_CNTR_INFO_REG_WIDTH 32 - -/* register reset value */ -#define ACQ_START_ADDR_REG_RSTVAL 0 -#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128 -#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define ACQ_INIT_REG_RSTVAL 0 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0 -#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0 -#define ACQ_LAST_COMMAND_REG_RSTVAL 0 -#define ACQ_NEXT_COMMAND_REG_RSTVAL 0 -#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0 -#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0 -#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0 -#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0 - -/* bit definitions */ -#define ACQ_INIT_RST_REG_BIT 0 -#define ACQ_INIT_RESYNC_BIT 2 -#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT -#define ACQ_INIT_RST_BITS 1 -#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT -#define ACQ_INIT_RESYNC_BITS 1 - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ -#define ACQ_TOKEN_ID_LSB 0 -#define ACQ_TOKEN_ID_MSB 3 -#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4 -#define ACQ_TOKEN_ID_IDX 0 -#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH -#define ACQ_INIT_CMD_INIT_IDX 4 -#define ACQ_INIT_CMD_INIT_BITS 3 -#define ACQ_CMD_START_ADDR_IDX 4 -#define ACQ_CMD_START_ADDR_BITS 9 -#define ACQ_CMD_NOFWORDS_IDX 13 -#define ACQ_CMD_NOFWORDS_BITS 9 -#define ACQ_MEM_REGION_ID_IDX 22 -#define ACQ_MEM_REGION_ID_BITS 9 -#define ACQ_PACKET_LENGTH_TOKEN_MSB 21 -#define ACQ_PACKET_LENGTH_TOKEN_LSB 13 -#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9 -#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4 -#define ACQ_PACKET_CH_ID_TOKEN_MSB 11 -#define ACQ_PACKET_CH_ID_TOKEN_LSB 10 -#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */ -#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */ - - -/* Command tokens IDs */ -#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b -#define ACQ_READ_REGION_TOKEN_ID 1 //0001b -#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b -#define ACQ_INIT_TOKEN_ID 8 //1000b - -/* Acknowledge token IDs */ -#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b -#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b -#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b -#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b -#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b - -#define ACQ_TOKEN_MEMREGIONID_MSB 30 -#define ACQ_TOKEN_MEMREGIONID_LSB 22 -#define ACQ_TOKEN_NOFWORDS_MSB 21 -#define ACQ_TOKEN_NOFWORDS_LSB 13 -#define ACQ_TOKEN_STARTADDR_MSB 12 -#define ACQ_TOKEN_STARTADDR_LSB 4 - - -/* --------------------------------------------------*/ -/* MIPI */ -/* --------------------------------------------------*/ - -#define WORD_COUNT_WIDTH 16 -#define PKT_CODE_WIDTH 6 -#define CHN_NO_WIDTH 2 -#define ERROR_INFO_WIDTH 8 - -#define LONG_PKTCODE_MAX 63 -#define LONG_PKTCODE_MIN 16 -#define SHORT_PKTCODE_MAX 15 - -#define EOF_CODE 1 - -/* --------------------------------------------------*/ -/* Packet Info */ -/* --------------------------------------------------*/ -#define ACQ_START_OF_FRAME 0 -#define ACQ_END_OF_FRAME 1 -#define ACQ_START_OF_LINE 2 -#define ACQ_END_OF_LINE 3 -#define ACQ_LINE_PAYLOAD 4 -#define ACQ_GEN_SH_PKT 5 - - -/* bit definition */ -#define ACQ_PKT_TYPE_IDX 16 -#define ACQ_PKT_TYPE_BITS 6 -#define ACQ_PKT_SOP_IDX 32 -#define ACQ_WORD_CNT_IDX 0 -#define ACQ_WORD_CNT_BITS 16 -#define ACQ_PKT_INFO_IDX 16 -#define ACQ_PKT_INFO_BITS 8 -#define ACQ_HEADER_DATA_IDX 0 -#define ACQ_HEADER_DATA_BITS 16 -#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX -#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS -#define ACQ_ACK_NOFWORDS_IDX 13 -#define ACQ_ACK_NOFWORDS_BITS 9 -#define ACQ_ACK_PKT_LEN_IDX 4 -#define ACQ_ACK_PKT_LEN_BITS 16 - - -/* --------------------------------------------------*/ -/* Packet Data Type */ -/* --------------------------------------------------*/ - - -#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */ -#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */ -#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */ -#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */ -#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */ -#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */ -#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */ -#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */ -#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */ -#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */ -#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */ -#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */ -#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */ -#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */ -#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */ -#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */ -#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */ -#define ACQ_SOF_DATA 0 /* 00 0000 frame start */ -#define ACQ_EOF_DATA 1 /* 00 0001 frame end */ -#define ACQ_SOL_DATA 2 /* 00 0010 line start */ -#define ACQ_EOL_DATA 3 /* 00 0011 line end */ -#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */ -#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */ -#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */ -#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */ -#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */ -#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */ -#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */ -#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */ -#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -#define ACQ_RESERVED_DATA_TYPE_MIN 56 -#define ACQ_RESERVED_DATA_TYPE_MAX 63 -#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19 -#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23 -#define ACQ_YUV_RESERVED_DATA_TYPE 27 -#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37 -#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39 -#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46 -#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47 - -/* --------------------------------------------------*/ - -#endif /* _isp_acquisition_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h deleted file mode 100644 index aa413df022f2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp_capture_defs_h -#define _isp_capture_defs_h - -#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */ -#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */ -#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM/8 ) -#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */ -#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM - -//#define CAPT_RCV_ACK 1 -//#define CAPT_WRT_ACK 2 -//#define CAPT_IRQ_ACK 3 - -/* --------------------------------------------------*/ - -#define NOF_IRQS 2 - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -// Number of registers -#define CAPT_NOF_REGS 16 - -// Register id's of MMIO slave accesible registers -#define CAPT_START_MODE_REG_ID 0 -#define CAPT_START_ADDR_REG_ID 1 -#define CAPT_MEM_REGION_SIZE_REG_ID 2 -#define CAPT_NUM_MEM_REGIONS_REG_ID 3 -#define CAPT_INIT_REG_ID 4 -#define CAPT_START_REG_ID 5 -#define CAPT_STOP_REG_ID 6 - -#define CAPT_PACKET_LENGTH_REG_ID 7 -#define CAPT_RECEIVED_LENGTH_REG_ID 8 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9 -#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10 -#define CAPT_LAST_COMMAND_REG_ID 11 -#define CAPT_NEXT_COMMAND_REG_ID 12 -#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13 -#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14 -#define CAPT_FSM_STATE_INFO_REG_ID 15 - -// Register width -#define CAPT_START_MODE_REG_WIDTH 1 -//#define CAPT_START_ADDR_REG_WIDTH 9 -//#define CAPT_MEM_REGION_SIZE_REG_WIDTH 9 -//#define CAPT_NUM_MEM_REGIONS_REG_WIDTH 9 -#define CAPT_INIT_REG_WIDTH (22 + 4) - -#define CAPT_START_REG_WIDTH 1 -#define CAPT_STOP_REG_WIDTH 1 - -/* --------------------------------------------------*/ -/* FSM */ -/* --------------------------------------------------*/ -#define CAPT_WRITE2MEM_FSM_STATE_BITS 2 -#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3 - - -#define CAPT_PACKET_LENGTH_REG_WIDTH 17 -#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32 -#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32 -#define CAPT_LAST_COMMAND_REG_WIDTH 32 -/* #define CAPT_NEXT_COMMAND_REG_WIDTH 32 */ -#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3)) - -//#define CAPT_INIT_RESTART_MEM_ADDR_WIDTH 9 -//#define CAPT_INIT_RESTART_MEM_REGION_WIDTH 9 - -/* register reset value */ -#define CAPT_START_MODE_REG_RSTVAL 0 -#define CAPT_START_ADDR_REG_RSTVAL 0 -#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128 -#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define CAPT_INIT_REG_RSTVAL 0 - -#define CAPT_START_REG_RSTVAL 0 -#define CAPT_STOP_REG_RSTVAL 0 - -#define CAPT_PACKET_LENGTH_REG_RSTVAL 0 -#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0 -#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0 -#define CAPT_LAST_COMMAND_REG_RSTVAL 0 -#define CAPT_NEXT_COMMAND_REG_RSTVAL 0 -#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0 -#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0 -#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0 - -/* bit definitions */ -#define CAPT_INIT_RST_REG_BIT 0 -#define CAPT_INIT_FLUSH_BIT 1 -#define CAPT_INIT_RESYNC_BIT 2 -#define CAPT_INIT_RESTART_BIT 3 -#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4 -#define CAPT_INIT_RESTART_MEM_ADDR_MSB 14 -#define CAPT_INIT_RESTART_MEM_REGION_LSB 15 -#define CAPT_INIT_RESTART_MEM_REGION_MSB 25 - - -#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT -#define CAPT_INIT_RST_REG_BITS 1 -#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT -#define CAPT_INIT_FLUSH_BITS 1 -#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT -#define CAPT_INIT_RESYNC_BITS 1 -#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT -#define CAPT_INIT_RESTART_BITS 1 -#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB -#define CAPT_INIT_RESTART_MEM_ADDR_BITS (CAPT_INIT_RESTART_MEM_ADDR_MSB - CAPT_INIT_RESTART_MEM_ADDR_LSB + 1) -#define CAPT_INIT_RESTART_MEM_REGION_IDX CAPT_INIT_RESTART_MEM_REGION_LSB -#define CAPT_INIT_RESTART_MEM_REGION_BITS (CAPT_INIT_RESTART_MEM_REGION_MSB - CAPT_INIT_RESTART_MEM_REGION_LSB + 1) - - - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ -#define CAPT_TOKEN_ID_LSB 0 -#define CAPT_TOKEN_ID_MSB 3 -#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */ - -/* Command tokens IDs */ -#define CAPT_START_TOKEN_ID 0 /* 0000b */ -#define CAPT_STOP_TOKEN_ID 1 /* 0001b */ -#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */ -#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */ -#define CAPT_INIT_TOKEN_ID 8 /* 1000b */ - -#define CAPT_START_TOKEN_BIT 0 -#define CAPT_STOP_TOKEN_BIT 0 -#define CAPT_FREEZE_TOKEN_BIT 0 -#define CAPT_RESUME_TOKEN_BIT 0 -#define CAPT_INIT_TOKEN_BIT 0 - -/* Acknowledge token IDs */ -#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */ -#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */ -#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */ -#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */ -#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */ -#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */ -#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */ -#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */ - -#define CAPT_PACKET_LENGTH_TOKEN_MSB 19 -#define CAPT_PACKET_LENGTH_TOKEN_LSB 4 -#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20 -#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4 -#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25 -#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20 -#define CAPT_PACKET_CH_ID_TOKEN_MSB 27 -#define CAPT_PACKET_CH_ID_TOKEN_LSB 26 -#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29 -#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21 - -/* bit definition */ -#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB -#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) -#define CAPT_SOP_IDX 32 -#define CAPT_SOP_BITS 1 -#define CAPT_PKT_INFO_IDX 16 -#define CAPT_PKT_INFO_BITS 8 -#define CAPT_PKT_TYPE_IDX 0 -#define CAPT_PKT_TYPE_BITS 6 -#define CAPT_HEADER_DATA_IDX 0 -#define CAPT_HEADER_DATA_BITS 16 -#define CAPT_PKT_DATA_IDX 0 -#define CAPT_PKT_DATA_BITS 32 -#define CAPT_WORD_CNT_IDX 0 -#define CAPT_WORD_CNT_BITS 16 -#define CAPT_ACK_TOKEN_ID_IDX 0 -#define CAPT_ACK_TOKEN_ID_BITS 4 -//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB -//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1) -//#define CAPT_ACK_PKT_INFO_IDX 20 -//#define CAPT_ACK_PKT_INFO_BITS 8 -//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */ -//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */ -#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB -#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1) -#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB -#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1) -#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB -#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1) -#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB -#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1) -#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB -#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1) -#define CAPT_INIT_TOKEN_INIT_IDX 4 -#define CAPT_INIT_TOKEN_INIT_BITS 22 - - -/* --------------------------------------------------*/ -/* MIPI */ -/* --------------------------------------------------*/ - -#define CAPT_WORD_COUNT_WIDTH 16 -#define CAPT_PKT_CODE_WIDTH 6 -#define CAPT_CHN_NO_WIDTH 2 -#define CAPT_ERROR_INFO_WIDTH 8 - -#define LONG_PKTCODE_MAX 63 -#define LONG_PKTCODE_MIN 16 -#define SHORT_PKTCODE_MAX 15 - - -/* --------------------------------------------------*/ -/* Packet Info */ -/* --------------------------------------------------*/ -#define CAPT_START_OF_FRAME 0 -#define CAPT_END_OF_FRAME 1 -#define CAPT_START_OF_LINE 2 -#define CAPT_END_OF_LINE 3 -#define CAPT_LINE_PAYLOAD 4 -#define CAPT_GEN_SH_PKT 5 - - -/* --------------------------------------------------*/ -/* Packet Data Type */ -/* --------------------------------------------------*/ - -#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */ -#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */ -#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */ -#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */ -#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */ -#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */ -#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */ -#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */ -#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */ -#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */ -#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */ -#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */ -#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */ -#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */ -#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */ -#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */ -#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */ -#define CAPT_SOF_DATA 0 /* 00 0000 frame start */ -#define CAPT_EOF_DATA 1 /* 00 0001 frame end */ -#define CAPT_SOL_DATA 2 /* 00 0010 line start */ -#define CAPT_EOL_DATA 3 /* 00 0011 line end */ -#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */ -#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */ -#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */ -#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */ -#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */ -#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */ -#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */ -#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */ -#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -#define CAPT_RESERVED_DATA_TYPE_MIN 56 -#define CAPT_RESERVED_DATA_TYPE_MAX 63 -#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19 -#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23 -#define CAPT_YUV_RESERVED_DATA_TYPE 27 -#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37 -#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39 -#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46 -#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47 - - -/* --------------------------------------------------*/ -/* Capture Unit State */ -/* --------------------------------------------------*/ -#define CAPT_FREE_RUN 0 -#define CAPT_NO_SYNC 1 -#define CAPT_SYNC_SWP 2 -#define CAPT_SYNC_MWP 3 -#define CAPT_SYNC_WAIT 4 -#define CAPT_FREEZE 5 -#define CAPT_RUN 6 - - -/* --------------------------------------------------*/ - -#endif /* _isp_capture_defs_h */ - - - - - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h deleted file mode 100644 index 76705d7a2b44..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _css_receiver_2400_common_defs_h_ -#define _css_receiver_2400_common_defs_h_ -#ifndef _mipi_backend_common_defs_h_ -#define _mipi_backend_common_defs_h_ - -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16 -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2 -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3 -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */ - -/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -/* used reseved mipi positions for these */ -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46 -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47 -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37 -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38 - -//_HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 63 -#define _HRT_MIPI_BACKEND_FMT_TYPE_CUSTOM 63 - -#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6 - -/* Definition of format_types at the interface CSS --> input_selector*/ -/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */ -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding - -/* definition for state machine of data FIFO for decode different type of data */ -#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9 -#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7 -#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7 - -#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */ - -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8 - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6 - - -/* packet bit definition */ -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32 -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32 - - -/*************************************************************************************************/ -/* Custom Decoding */ -/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */ -/*************************************************************************************************/ -/* -#define BE_CUST_EN_IDX 0 // 2bits -#define BE_CUST_EN_DATAID_IDX 2 // 6bits MIPI DATA ID -#define BE_CUST_EN_WIDTH 8 -#define BE_CUST_MODE_ALL 1 // Enable Custom Decoding for all DATA IDs -#define BE_CUST_MODE_ONE 3 // Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID - -// Data State config = {get_bits(6bits), valid(1bit)} // -#define BE_CUST_DATA_STATE_S0_IDX 0 // 7bits -#define BE_CUST_DATA_STATE_S1_IDX 8 //7 // 7bits -#define BE_CUST_DATA_STATE_S2_IDX 16//14 // 7bits / -#define BE_CUST_DATA_STATE_WIDTH 24//21 -#define BE_CUST_DATA_STATE_VALID_IDX 0 // 1bits -#define BE_CUST_DATA_STATE_GETBITS_IDX 1 // 6bits - - - - -// Pixel Extractor config -#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 // 6bits -#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 6//5 // 5bits -#define BE_CUST_PIX_EXT_PIX_MASK_IDX 11//10 // 18bits -#define BE_CUST_PIX_EXT_PIX_EN_IDX 29 //28 // 1bits - -#define BE_CUST_PIX_EXT_WIDTH 30//29 - -// Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} -#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 // 4bits -#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 // 4bits -#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 // 4bits -#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 // 4bits -#define BE_CUST_PIX_VALID_EOP_WIDTH 16 -#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 // Normal (NO less get_bits case) Valid - 1bits -#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 // Normal (NO less get_bits case) EoP - 1bits -#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 // Especial (less get_bits case) Valid - 1bits -#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 // Especial (less get_bits case) EoP - 1bits - -*/ - -#endif /* _mipi_backend_common_defs_h_ */ -#endif /* _css_receiver_2400_common_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h deleted file mode 100644 index db5a1d2caba0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _mipi_backend_defs_h -#define _mipi_backend_defs_h - -#include "mipi_backend_common_defs.h" - -#define MIPI_BACKEND_REG_ALIGN 4 // assuming 32 bit control bus width - -#define _HRT_MIPI_BACKEND_NOF_IRQS 3 // sid_lut - -// SH Backend Register IDs -#define _HRT_MIPI_BACKEND_ENABLE_REG_IDX 0 -#define _HRT_MIPI_BACKEND_STATUS_REG_IDX 1 -//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_IDX 2 -#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX 2 -#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG1_IDX 3 -#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG2_IDX 4 -#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG3_IDX 5 -#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX 6 -#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX 7 -#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX 8 -#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX 9 -#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_IDX 10 -//// -#define _HRT_MIPI_BACKEND_CUST_EN_REG_IDX 11 -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX 12 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX 13 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P1_REG_IDX 14 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P2_REG_IDX 15 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P3_REG_IDX 16 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P0_REG_IDX 17 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P1_REG_IDX 18 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P2_REG_IDX 19 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P3_REG_IDX 20 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P0_REG_IDX 21 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P1_REG_IDX 22 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P2_REG_IDX 23 -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P3_REG_IDX 24 -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX 25 -//// -#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX 26 -#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX 27 -//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_IDX 28 -#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX 28 -#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_IDX 29 -#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_IDX 30 -#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_IDX 31 - -#define _HRT_MIPI_BACKEND_NOF_REGISTERS 32 // excluding the LP LUT entries - -#define _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX 32 - - -///////////////////////////////////////////////////////////////////////////////////////////////////// -#define _HRT_MIPI_BACKEND_ENABLE_REG_WIDTH 1 -#define _HRT_MIPI_BACKEND_STATUS_REG_WIDTH 1 -//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_WIDTH 1 -#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG_WIDTH 32 -#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_WIDTH 7 -#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_WIDTH 9 -#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_WIDTH 8 -#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_WIDTH _HRT_MIPI_BACKEND_NOF_IRQS -#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_WIDTH 0 -#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_WIDTH 1 -#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_WIDTH 1+2+6 -//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_WIDTH 1 -//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_WIDTH 7 -//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_WIDTH 7 -//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_WIDTH 7 -//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_WIDTH 7 - -///////////////////////////////////////////////////////////////////////////////////////////////////// - -#define _HRT_MIPI_BACKEND_NOF_SP_LUT_ENTRIES 4 - -//#define _HRT_MIPI_BACKEND_MAX_NOF_LP_LUT_ENTRIES 16 // to satisfy hss model static array declaration - - -#define _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH 2 -#define _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH 6 -#define _HRT_MIPI_BACKEND_PACKET_ID_WIDTH _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH - -#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB 0 -#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB + (pix_width) - 1) -#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) + 1) -#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) + 1) -#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) + (pix_width) - 1) -#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) + 1) -#define _HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) + 1) -#define _HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) + 1) -#define _HRT_MIPI_BACKEND_STREAMING_WIDTH(pix_width) (_HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) + 1) - -/*************************************************************************************************/ -/* Custom Decoding */ -/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */ -/*************************************************************************************************/ -#define _HRT_MIPI_BACKEND_CUST_EN_IDX 0 /* 2bits */ -#define _HRT_MIPI_BACKEND_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */ -#define _HRT_MIPI_BACKEND_CUST_EN_HIGH_PREC_IDX 8 // 1 bit -#define _HRT_MIPI_BACKEND_CUST_EN_WIDTH 9 -#define _HRT_MIPI_BACKEND_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */ -#define _HRT_MIPI_BACKEND_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */ - -#define _HRT_MIPI_BACKEND_CUST_EN_OPTION_IDX 1 - -/* Data State config = {get_bits(6bits), valid(1bit)} */ -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S0_IDX 0 /* 7bits */ -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S1_IDX 8 /* 7bits */ -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S2_IDX 16 /* was 14 7bits */ -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_WIDTH 24 /* was 21*/ -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */ -#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */ - -/* Pixel Extractor config */ -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 6bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_ALIGN_IDX 6 /* 5bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_MASK_IDX 11 /* was 10 18bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_EN_IDX 29 /* was 28 1bits */ - -#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_WIDTH 30 /* was 29 */ - -/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_WIDTH 16 -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */ -#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */ - -/*************************************************************************************************/ -/* MIPI backend output streaming interface definition */ -/* These parameters define the fields within the streaming bus. These should also be used by the */ -/* subsequent block, ie stream2mmio. */ -/*************************************************************************************************/ -/* The pipe backend - stream2mmio should be design time configurable in */ -/* PixWidth - Number of bits per pixel */ -/* PPC - Pixel per Clocks */ -/* NumSids - Max number of source Ids (ifc's) and derived from that: */ -/* SidWidth - Number of bits required for the sid parameter */ -/* In order to keep this configurability, below Macro's have these as a parameter */ -/*************************************************************************************************/ - -#define HRT_MIPI_BACKEND_STREAM_EOP_BIT 0 -#define HRT_MIPI_BACKEND_STREAM_SOP_BIT 1 -#define HRT_MIPI_BACKEND_STREAM_EOF_BIT 2 -#define HRT_MIPI_BACKEND_STREAM_SOF_BIT 3 -#define HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT 4 -#define HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT+(sid_width)-1) -#define HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width,p) (HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width)+1+p) - -#define HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width,ppc,pix_width,p) (HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width,ppc)+ ((pix_width)*p)) -#define HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width,ppc,pix_width,p) (HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width,ppc,pix_width,p) + (pix_width) - 1) - -#if 0 -//#define HRT_MIPI_BACKEND_STREAM_PIX_BITS 14 -//#define HRT_MIPI_BACKEND_STREAM_CHID_BITS 4 -//#define HRT_MIPI_BACKEND_STREAM_PPC 4 -#endif - -#define HRT_MIPI_BACKEND_STREAM_BITS(sid_width,ppc,pix_width) (HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width,ppc,pix_width,(ppc-1))+1) - - -/* SP and LP LUT BIT POSITIONS */ -#define HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT 0 // 0 -#define HRT_MIPI_BACKEND_LUT_SID_LS_BIT HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT + 1 // 1 -#define HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_LUT_SID_LS_BIT+(sid_width)-1) // 1 + (4) - 1 = 4 -#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1 // 5 -#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH - 1 // 6 -#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7 -#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH - 1 // 12 - -/* #define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7 */ - -#define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1 -#define HRT_MIPI_BACKEND_LP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) + 1 // 13 - - -// temp solution -//#define HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT + 1 // 8 -//#define HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT + 1 // 9 -//#define HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT + 1 // 10 -//#define HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT + 1 // 11 -//#define HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT + 1 // 12 -//#define HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 25 -//#define HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT + 1 // 26 -//#define HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 39 -//#define HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT + 1 // 40 -//#define HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 53 -//#define HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT + 1 // 54 -//#define HRT_MIPI_BACKEND_STREAM_PIXD_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 67 - -// vc hidden in pixb data (passed as raw12 the pipe) -#define HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width,ppc,pix_width) HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width,ppc,pix_width,1) + 10 //HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + 10 // 36 -#define HRT_MIPI_BACKEND_STREAM_VC_MS_BIT(sid_width,ppc,pix_width) HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width,ppc,pix_width) + 1 // 37 - - - - -#endif /* _mipi_backend_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h deleted file mode 100644 index c038f39ffd25..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _mmu_defs_h -#define _mmu_defs_h - -#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0 -#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1 - -#define _HRT_MMU_REG_ALIGN 4 - -#endif /* _mmu_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h deleted file mode 100644 index 0aad86e2e914..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _csi_rx_defs_h -#define _csi_rx_defs_h - -//#include "rx_csi_common_defs.h" - - - -#define MIPI_PKT_DATA_WIDTH 32 -//#define CLK_CROSSING_FIFO_DEPTH 16 -#define _CSI_RX_REG_ALIGN 4 - -//define number of IRQ (see below for definition of each IRQ bits) -#define CSI_RX_NOF_IRQS_BYTE_DOMAIN 11 -#define CSI_RX_NOF_IRQS_ISP_DOMAIN 15 // CSI_RX_NOF_IRQS_BYTE_DOMAIN + remaining from Dphy_rx already on ISP clock domain - -// REGISTER DESCRIPTION -//#define _HRT_CSI_RX_SOFTRESET_REG_IDX 0 -#define _HRT_CSI_RX_ENABLE_REG_IDX 0 -#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX 1 -#define _HRT_CSI_RX_ERROR_HANDLING_REG_IDX 2 -#define _HRT_CSI_RX_STATUS_REG_IDX 3 -#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX 4 -#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX 5 -//#define _HRT_CSI_RX_IRQ_CONFIG_REG_IDX 6 -#define _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX 6 -#define _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX 7 -#define _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane_idx) (8+(2*lane_idx)) -#define _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane_idx) (8+(2*lane_idx)+1) - -#define _HRT_CSI_RX_NOF_REGISTERS(nof_dlanes) (8+2*(nof_dlanes)) - - -//#define _HRT_CSI_RX_SOFTRESET_REG_WIDTH 1 -#define _HRT_CSI_RX_ENABLE_REG_WIDTH 1 -#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_WIDTH 3 -#define _HRT_CSI_RX_ERROR_HANDLING_REG_WIDTH 4 -#define _HRT_CSI_RX_STATUS_REG_WIDTH 1 -#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_WIDTH 8 -#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_WIDTH 24 -#define _HRT_CSI_RX_IRQ_CONFIG_REG_WIDTH (CSI_RX_NOF_IRQS_ISP_DOMAIN) -#define _HRT_CSI_RX_DLY_CNT_REG_WIDTH 24 -//#define _HRT_CSI_RX_IRQ_STATUS_REG_WIDTH NOF_IRQS -//#define _HRT_CSI_RX_IRQ_CLEAR_REG_WIDTH 0 - - -#define ONE_LANE_ENABLED 0 -#define TWO_LANES_ENABLED 1 -#define THREE_LANES_ENABLED 2 -#define FOUR_LANES_ENABLED 3 - -// Error handling reg bit positions -#define ERR_DECISION_BIT 0 -#define DISC_RESERVED_SP_BIT 1 -#define DISC_RESERVED_LP_BIT 2 -#define DIS_INCOMP_PKT_CHK_BIT 3 - -#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_POSEDGE 0 -#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_ORIGINAL 1 - -// Interrupt bits -#define _HRT_RX_CSI_IRQ_SINGLE_PH_ERROR_CORRECTED 0 -#define _HRT_RX_CSI_IRQ_MULTIPLE_PH_ERROR_DETECTED 1 -#define _HRT_RX_CSI_IRQ_PAYLOAD_CHECKSUM_ERROR 2 -#define _HRT_RX_CSI_IRQ_FIFO_FULL_ERROR 3 -#define _HRT_RX_CSI_IRQ_RESERVED_SP_DETECTED 4 -#define _HRT_RX_CSI_IRQ_RESERVED_LP_DETECTED 5 -//#define _HRT_RX_CSI_IRQ_PREMATURE_SOP 6 -#define _HRT_RX_CSI_IRQ_INCOMPLETE_PACKET 6 -#define _HRT_RX_CSI_IRQ_FRAME_SYNC_ERROR 7 -#define _HRT_RX_CSI_IRQ_LINE_SYNC_ERROR 8 -#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_ERROR 9 -#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_SYNC_ERROR 10 - -#define _HRT_RX_CSI_IRQ_DLANE_ESC_ERROR 11 -#define _HRT_RX_CSI_IRQ_DLANE_TRIGGERESC 12 -#define _HRT_RX_CSI_IRQ_DLANE_ULPSESC 13 -#define _HRT_RX_CSI_IRQ_CLANE_ULPSCLKNOT 14 - -/* OLD ARASAN FRONTEND IRQs -#define _HRT_RX_CSI_IRQ_OVERRUN_BIT 0 -#define _HRT_RX_CSI_IRQ_RESERVED_BIT 1 -#define _HRT_RX_CSI_IRQ_SLEEP_MODE_ENTRY_BIT 2 -#define _HRT_RX_CSI_IRQ_SLEEP_MODE_EXIT_BIT 3 -#define _HRT_RX_CSI_IRQ_ERR_SOT_HS_BIT 4 -#define _HRT_RX_CSI_IRQ_ERR_SOT_SYNC_HS_BIT 5 -#define _HRT_RX_CSI_IRQ_ERR_CONTROL_BIT 6 -#define _HRT_RX_CSI_IRQ_ERR_ECC_DOUBLE_BIT 7 -#define _HRT_RX_CSI_IRQ_ERR_ECC_CORRECTED_BIT 8 -#define _HRT_RX_CSI_IRQ_ERR_ECC_NO_CORRECTION_BIT 9 -#define _HRT_RX_CSI_IRQ_ERR_CRC_BIT 10 -#define _HRT_RX_CSI_IRQ_ERR_ID_BIT 11 -#define _HRT_RX_CSI_IRQ_ERR_FRAME_SYNC_BIT 12 -#define _HRT_RX_CSI_IRQ_ERR_FRAME_DATA_BIT 13 -#define _HRT_RX_CSI_IRQ_DATA_TIMEOUT_BIT 14 -#define _HRT_RX_CSI_IRQ_ERR_ESCAPE_BIT 15 -#define _HRT_RX_CSI_IRQ_ERR_LINE_SYNC_BIT 16 -*/ - - -////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE0 0 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE1 1 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE2 2 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE3 3 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE0 4 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE1 5 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE2 6 -#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE3 7 - -////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX -#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE0 0 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE1 1 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE2 2 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE3 3 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE0 4 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE0 5 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE0 6 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE0 7 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE1 8 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE1 9 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE1 10 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE1 11 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE2 12 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE2 13 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE2 14 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE2 15 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE3 16 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE3 17 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE3 18 -#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE3 19 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE0 20 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE1 21 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE2 22 -#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE3 23 - -/*********************************************************/ -/*** Relevant declarations from rx_csi_common_defs.h *****/ -/*********************************************************/ -/* packet bit definition */ -#define _HRT_RX_CSI_PKT_SOP_BITPOS 32 -#define _HRT_RX_CSI_PKT_EOP_BITPOS 33 -#define _HRT_RX_CSI_PKT_PAYLOAD_BITPOS 0 -#define _HRT_RX_CSI_PH_CH_ID_BITPOS 22 -#define _HRT_RX_CSI_PH_FMT_ID_BITPOS 16 -#define _HRT_RX_CSI_PH_DATA_FIELD_BITPOS 0 - -#define _HRT_RX_CSI_PKT_SOP_BITS 1 -#define _HRT_RX_CSI_PKT_EOP_BITS 1 -#define _HRT_RX_CSI_PKT_PAYLOAD_BITS 32 -#define _HRT_RX_CSI_PH_CH_ID_BITS 2 -#define _HRT_RX_CSI_PH_FMT_ID_BITS 6 -#define _HRT_RX_CSI_PH_DATA_FIELD_BITS 16 - -/* Definition of data format ID at the interface CSS_receiver units */ -#define _HRT_RX_CSI_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */ -#define _HRT_RX_CSI_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */ -#define _HRT_RX_CSI_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */ -#define _HRT_RX_CSI_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */ - - -#endif /* _csi_rx_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h deleted file mode 100644 index 9b6c2893d950..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _scalar_processor_2400_params_h -#define _scalar_processor_2400_params_h - -#include "cell_params.h" - -#endif /* _scalar_processor_2400_params_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h deleted file mode 100644 index 1cb62444cf68..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _ST2MEM_DEFS_H -#define _ST2MEM_DEFS_H - -#define _STR2MEM_CRUN_BIT 0x100000 -#define _STR2MEM_CMD_BITS 0x0F0000 -#define _STR2MEM_COUNT_BITS 0x00FFFF - -#define _STR2MEM_BLOCKS_CMD 0xA0000 -#define _STR2MEM_PACKETS_CMD 0xB0000 -#define _STR2MEM_BYTES_CMD 0xC0000 -#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000 - -#define _STR2MEM_SOFT_RESET_REG_ID 0 -#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1 -#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2 -#define _STR2MEM_BIT_SWAPPING_REG_ID 3 -#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4 -#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5 -#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6 -#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7 -#define _STR2MEM_EN_STAT_UPDATE_ID 8 - -#define _STR2MEM_REG_ALIGN 4 - -#endif /* _ST2MEM_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h deleted file mode 100644 index 46b52fe5ae99..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _STREAM2MMMIO_DEFS_H -#define _STREAM2MMMIO_DEFS_H - -#include - -#define _STREAM2MMIO_REG_ALIGN 4 - -#define _STREAM2MMIO_COMMAND_REG_ID 0 -#define _STREAM2MMIO_ACKNOWLEDGE_REG_ID 1 -#define _STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2 -#define _STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */ -#define _STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */ -#define _STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/ -#define _STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */ -#define _STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */ -#define _STREAM2MMIO_REGS_PER_SID 8 - -#define _STREAM2MMIO_SID_REG_OFFSET 8 -#define _STREAM2MMIO_MAX_NOF_SIDS 64 /* value used in hss model */ - -/* command token definition */ -#define _STREAM2MMIO_CMD_TOKEN_CMD_LSB 0 /* bits 1-0 is for the command field */ -#define _STREAM2MMIO_CMD_TOKEN_CMD_MSB 1 - -#define _STREAM2MMIO_CMD_TOKEN_WIDTH (_STREAM2MMIO_CMD_TOKEN_CMD_MSB+1-_STREAM2MMIO_CMD_TOKEN_CMD_LSB) - -#define _STREAM2MMIO_CMD_TOKEN_STORE_WORDS 0 /* command for storing a number of output words indicated by reg _STREAM2MMIO_NUM_ITEMS */ -#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1 /* command for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS */ -#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2 /* command for waiting for a frame start */ - -/* acknowledges from packer module */ -/* fields: eof - indicates whether last (short) packet received was an eof packet */ -/* eop - indicates whether command has ended due to packet end or due to no of words requested has been received */ -/* count - indicates number of words stored */ -#define _STREAM2MMIO_PACK_NUM_ITEMS_BITS 16 -#define _STREAM2MMIO_PACK_ACK_EOP_BIT _STREAM2MMIO_PACK_NUM_ITEMS_BITS -#define _STREAM2MMIO_PACK_ACK_EOF_BIT (_STREAM2MMIO_PACK_ACK_EOP_BIT+1) - -/* acknowledge token definition */ -#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_LSB 0 /* bits 3-0 is for the command field */ -#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_MSB (_STREAM2MMIO_PACK_NUM_ITEMS_BITS-1) -#define _STREAM2MMIO_ACK_TOKEN_EOP_BIT _STREAM2MMIO_PACK_ACK_EOP_BIT -#define _STREAM2MMIO_ACK_TOKEN_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT -#define _STREAM2MMIO_ACK_TOKEN_VALID_BIT (_STREAM2MMIO_ACK_TOKEN_EOF_BIT+1) /* this bit indicates a valid ack */ - /* if there is no valid ack, a read */ - /* on the ack register returns 0 */ -#define _STREAM2MMIO_ACK_TOKEN_WIDTH (_STREAM2MMIO_ACK_TOKEN_VALID_BIT+1) - -/* commands for packer module */ -#define _STREAM2MMIO_PACK_CMD_STORE_WORDS 0 -#define _STREAM2MMIO_PACK_CMD_STORE_LONG_PACKET 1 -#define _STREAM2MMIO_PACK_CMD_STORE_SHORT_PACKET 2 - - - - -#endif /* _STREAM2MMIO_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h deleted file mode 100644 index 60143b8743a2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _streaming_to_mipi_defs_h -#define _streaming_to_mipi_defs_h - -#define HIVE_STR_TO_MIPI_VALID_A_BIT 0 -#define HIVE_STR_TO_MIPI_VALID_B_BIT 1 -#define HIVE_STR_TO_MIPI_SOL_BIT 2 -#define HIVE_STR_TO_MIPI_EOL_BIT 3 -#define HIVE_STR_TO_MIPI_SOF_BIT 4 -#define HIVE_STR_TO_MIPI_EOF_BIT 5 -#define HIVE_STR_TO_MIPI_CH_ID_LSB 6 - -#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1) - -#endif /* _streaming_to_mipi_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h deleted file mode 100644 index d2b8972b0d9e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _timed_controller_defs_h -#define _timed_controller_defs_h - -#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0 - -#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4 - -#endif /* _timed_controller_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h deleted file mode 100644 index 19b19ef484f9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_VAR_H -#define _HRT_VAR_H - -#include "version.h" -#include "system_api.h" -#include "hive_types.h" - -#define hrt_int_type_of_char char -#define hrt_int_type_of_uchar unsigned char -#define hrt_int_type_of_short short -#define hrt_int_type_of_ushort unsigned short -#define hrt_int_type_of_int int -#define hrt_int_type_of_uint unsigned int -#define hrt_int_type_of_long long -#define hrt_int_type_of_ulong unsigned long -#define hrt_int_type_of_ptr unsigned int - -#define hrt_host_type_of_char char -#define hrt_host_type_of_uchar unsigned char -#define hrt_host_type_of_short short -#define hrt_host_type_of_ushort unsigned short -#define hrt_host_type_of_int int -#define hrt_host_type_of_uint unsigned int -#define hrt_host_type_of_long long -#define hrt_host_type_of_ulong unsigned long -#define hrt_host_type_of_ptr void* - -#define HRT_TYPE_BYTES(cell, type) (HRT_TYPE_BITS(cell, type)/8) -#define HRT_HOST_TYPE(cell_type) HRTCAT(hrt_host_type_of_, cell_type) -#define HRT_INT_TYPE(type) HRTCAT(hrt_int_type_of_, type) - -#ifdef C_RUN - -#ifdef C_RUN_DYNAMIC_LINK_PROGRAMS -extern void *csim_processor_get_crun_symbol(hive_proc_id p, const char *sym); -#define _hrt_cell_get_crun_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym)) -#define _hrt_cell_get_crun_indexed_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym)) -#else -#define _hrt_cell_get_crun_symbol(cell,sym) (&sym) -#define _hrt_cell_get_crun_indexed_symbol(cell,sym) (sym) -#endif // C_RUN_DYNAMIC_LINK_PROGRAMS - -#define hrt_scalar_store(cell, type, var, data) \ - ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var)) = (data)) -#define hrt_scalar_load(cell, type, var) \ - ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var))) - -#define hrt_indexed_store(cell, type, array, index, data) \ - ((((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index]) = (data)) -#define hrt_indexed_load(cell, type, array, index) \ - (((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index]) - -#else /* C_RUN */ - -#define hrt_scalar_store(cell, type, var, data) \ - HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\ - cell, \ - HRTCAT(HIVE_MEM_,var), \ - HRTCAT(HIVE_ADDR_,var), \ - (HRT_INT_TYPE(type))(data)) - -#define hrt_scalar_load(cell, type, var) \ - (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \ - cell, \ - HRTCAT(HIVE_MEM_,var), \ - HRTCAT(HIVE_ADDR_,var))) - -#define hrt_indexed_store(cell, type, array, index, data) \ - HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\ - cell, \ - HRTCAT(HIVE_MEM_,array), \ - (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)), \ - (HRT_INT_TYPE(type))(data)) - -#define hrt_indexed_load(cell, type, array, index) \ - (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \ - cell, \ - HRTCAT(HIVE_MEM_,array), \ - (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)))) - -#endif /* C_RUN */ - -#endif /* _HRT_VAR_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h deleted file mode 100644 index bbc4948baea9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef HRT_VERSION_H -#define HRT_VERSION_H -#define HRT_VERSION_MAJOR 1 -#define HRT_VERSION_MINOR 4 -#define HRT_VERSION 1_4 -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h deleted file mode 100644 index edb23252c48e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IBUF_CTRL_GLOBAL_H_INCLUDED__ -#define __IBUF_CTRL_GLOBAL_H_INCLUDED__ - -#include - -#include /* _IBUF_CNTRL_RECALC_WORDS_STATUS, - * _IBUF_CNTRL_ARBITERS_STATUS, - * _IBUF_CNTRL_PROC_REG_ALIGN, - * etc. - */ - -/* Definition of contents of main controller state register is lacking - * in ibuf_cntrl_defs.h, so define these here: - */ -#define _IBUF_CNTRL_MAIN_CNTRL_FSM_MASK 0xf -#define _IBUF_CNTRL_MAIN_CNTRL_FSM_NEXT_COMMAND_CHECK 0x9 -#define _IBUF_CNTRL_MAIN_CNTRL_MEM_INP_BUF_ALLOC (1 << 8) -#define _IBUF_CNTRL_DMA_SYNC_WAIT_FOR_SYNC 1 -#define _IBUF_CNTRL_DMA_SYNC_FSM_WAIT_FOR_ACK (0x3 << 1) - -typedef struct ib_buffer_s ib_buffer_t; -struct ib_buffer_s { - uint32_t start_addr; /* start address of the buffer in the - * "input-buffer hardware block" - */ - - uint32_t stride; /* stride per buffer line (in bytes) */ - uint32_t lines; /* lines in the buffer */ -}; - -typedef struct ibuf_ctrl_cfg_s ibuf_ctrl_cfg_t; -struct ibuf_ctrl_cfg_s { - - bool online; - - struct { - /* DMA configuration */ - uint32_t channel; - uint32_t cmd; /* must be _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND */ - - /* DMA reconfiguration */ - uint32_t shift_returned_items; - uint32_t elems_per_word_in_ibuf; - uint32_t elems_per_word_in_dest; - } dma_cfg; - - ib_buffer_t ib_buffer; - - struct { - uint32_t stride; - uint32_t start_addr; - uint32_t lines; - } dest_buf_cfg; - - uint32_t items_per_store; - uint32_t stores_per_frame; - - struct { - uint32_t sync_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME */ - uint32_t store_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS */ - } stream2mmio_cfg; -}; - -extern const uint32_t N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID]; - -#endif /* __IBUF_CTRL_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h deleted file mode 100644 index 25e3f04f374b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ -#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ - -#define IS_INPUT_SYSTEM_VERSION_VERSION_2401 - -/* CSI reveiver has 3 ports. */ -#define N_CSI_PORTS (3) - -#include "isys_dma.h" /* isys2401_dma_channel, - * isys2401_dma_cfg_t - */ - -#include "ibuf_ctrl.h" /* ibuf_cfg_t, - * ibuf_ctrl_cfg_t - */ - -#include "isys_stream2mmio.h" /* stream2mmio_cfg_t */ - -#include "csi_rx.h" /* csi_rx_frontend_cfg_t, - * csi_rx_backend_cfg_t, - * csi_rx_backend_lut_entry_t - */ -#include "pixelgen.h" - - -#define INPUT_SYSTEM_N_STREAM_ID 6 /* maximum number of simultaneous - virtual channels supported*/ - -typedef enum { - INPUT_SYSTEM_ERR_NO_ERROR = 0, - INPUT_SYSTEM_ERR_CREATE_CHANNEL_FAIL, - INPUT_SYSTEM_ERR_CONFIGURE_CHANNEL_FAIL, - INPUT_SYSTEM_ERR_OPEN_CHANNEL_FAIL, - INPUT_SYSTEM_ERR_TRANSFER_FAIL, - INPUT_SYSTEM_ERR_CREATE_INPUT_PORT_FAIL, - INPUT_SYSTEM_ERR_CONFIGURE_INPUT_PORT_FAIL, - INPUT_SYSTEM_ERR_OPEN_INPUT_PORT_FAIL, - N_INPUT_SYSTEM_ERR -} input_system_err_t; - -typedef enum { - INPUT_SYSTEM_SOURCE_TYPE_UNDEFINED = 0, - INPUT_SYSTEM_SOURCE_TYPE_SENSOR, - INPUT_SYSTEM_SOURCE_TYPE_TPG, - INPUT_SYSTEM_SOURCE_TYPE_PRBS, - N_INPUT_SYSTEM_SOURCE_TYPE -} input_system_source_type_t; - -typedef enum { - INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME, - INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST, -} input_system_polling_mode_t; - -typedef struct input_system_channel_s input_system_channel_t; -struct input_system_channel_s { - stream2mmio_ID_t stream2mmio_id; - stream2mmio_sid_ID_t stream2mmio_sid_id; - - ibuf_ctrl_ID_t ibuf_ctrl_id; - ib_buffer_t ib_buffer; - - isys2401_dma_ID_t dma_id; - isys2401_dma_channel dma_channel; -}; - -typedef struct input_system_channel_cfg_s input_system_channel_cfg_t; -struct input_system_channel_cfg_s { - stream2mmio_cfg_t stream2mmio_cfg; - ibuf_ctrl_cfg_t ibuf_ctrl_cfg; - isys2401_dma_cfg_t dma_cfg; - isys2401_dma_port_cfg_t dma_src_port_cfg; - isys2401_dma_port_cfg_t dma_dest_port_cfg; -}; - -typedef struct input_system_input_port_s input_system_input_port_t; -struct input_system_input_port_s { - input_system_source_type_t source_type; - - struct { - csi_rx_frontend_ID_t frontend_id; - csi_rx_backend_ID_t backend_id; - csi_mipi_packet_type_t packet_type; - csi_rx_backend_lut_entry_t backend_lut_entry; - } csi_rx; - - struct { - csi_mipi_packet_type_t packet_type; - csi_rx_backend_lut_entry_t backend_lut_entry; - } metadata; - - struct { - pixelgen_ID_t pixelgen_id; - } pixelgen; -}; - -typedef struct input_system_input_port_cfg_s input_system_input_port_cfg_t; -struct input_system_input_port_cfg_s { - struct { - csi_rx_frontend_cfg_t frontend_cfg; - csi_rx_backend_cfg_t backend_cfg; - csi_rx_backend_cfg_t md_backend_cfg; - } csi_rx_cfg; - - struct { - pixelgen_tpg_cfg_t tpg_cfg; - pixelgen_prbs_cfg_t prbs_cfg; - } pixelgen_cfg; -}; - -typedef struct input_system_cfg_s input_system_cfg_t; -struct input_system_cfg_s { - input_system_input_port_ID_t input_port_id; - - input_system_source_type_t mode; -#ifdef ISP2401 - input_system_polling_mode_t polling_mode; -#endif - - bool online; - bool raw_packed; - int8_t linked_isys_stream_id; - - struct { - bool comp_enable; - int32_t active_lanes; - int32_t fmt_type; - int32_t ch_id; - int32_t comp_predictor; - int32_t comp_scheme; - } csi_port_attr; - - pixelgen_tpg_cfg_t tpg_port_attr; - - pixelgen_prbs_cfg_t prbs_port_attr; - - struct { - int32_t align_req_in_bytes; - int32_t bits_per_pixel; - int32_t pixels_per_line; - int32_t lines_per_frame; - } input_port_resolution; - - struct { - int32_t left_padding; - int32_t max_isp_input_width; - } output_port_attr; - - struct { - bool enable; - int32_t fmt_type; - int32_t align_req_in_bytes; - int32_t bits_per_pixel; - int32_t pixels_per_line; - int32_t lines_per_frame; - } metadata; -}; - -typedef struct virtual_input_system_stream_s virtual_input_system_stream_t; -struct virtual_input_system_stream_s { - uint32_t id; /*Used when multiple MIPI data types and/or virtual channels are used. - Must be unique within one CSI RX - and lower than SH_CSS_MAX_ISYS_CHANNEL_NODES */ - uint8_t enable_metadata; - input_system_input_port_t input_port; - input_system_channel_t channel; - input_system_channel_t md_channel; /* metadata channel */ - uint8_t online; - int8_t linked_isys_stream_id; - uint8_t valid; -#ifdef ISP2401 - input_system_polling_mode_t polling_mode; - int32_t subscr_index; -#endif -}; - -typedef struct virtual_input_system_stream_cfg_s virtual_input_system_stream_cfg_t; -struct virtual_input_system_stream_cfg_s { - uint8_t enable_metadata; - input_system_input_port_cfg_t input_port_cfg; - input_system_channel_cfg_t channel_cfg; - input_system_channel_cfg_t md_channel_cfg; - uint8_t valid; -}; - -#define ISP_INPUT_BUF_START_ADDR 0 -#define NUM_OF_INPUT_BUF 2 -#define NUM_OF_LINES_PER_BUF 2 -#define LINES_OF_ISP_INPUT_BUF (NUM_OF_INPUT_BUF * NUM_OF_LINES_PER_BUF) -#define ISP_INPUT_BUF_STRIDE SH_CSS_MAX_SENSOR_WIDTH - - -#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h deleted file mode 100644 index 1be5c6956d65..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_DMA_GLOBAL_H_INCLUDED__ -#define __ISYS_DMA_GLOBAL_H_INCLUDED__ - -#include - -#define HIVE_ISYS2401_DMA_IBUF_DDR_CONN 0 -#define HIVE_ISYS2401_DMA_IBUF_VMEM_CONN 1 -#define _DMA_V2_ZERO_EXTEND 0 -#define _DMA_V2_SIGN_EXTEND 1 - -#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND -#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND - -/******************************************************** - * - * DMA Port. - * - * The DMA port definition for the input system - * 2401 DMA is the duplication of the DMA port - * definition for the CSS system DMA. It is duplicated - * here just as the temporal step before the device libary - * is available. The device libary is suppose to provide - * the capability of reusing the control interface of the - * same device prototypes. The refactor team will work on - * this, right? - * - ********************************************************/ -typedef struct isys2401_dma_port_cfg_s isys2401_dma_port_cfg_t; -struct isys2401_dma_port_cfg_s { - uint32_t stride; - uint32_t elements; - uint32_t cropping; - uint32_t width; - }; -/* end of DMA Port */ - -/************************************************ - * - * DMA Device. - * - * The DMA device definition for the input system - * 2401 DMA is the duplicattion of the DMA device - * definition for the CSS system DMA. It is duplicated - * here just as the temporal step before the device libary - * is available. The device libary is suppose to provide - * the capability of reusing the control interface of the - * same device prototypes. The refactor team will work on - * this, right? - * - ************************************************/ -typedef enum { - isys2401_dma_ibuf_to_ddr_connection = HIVE_ISYS2401_DMA_IBUF_DDR_CONN, - isys2401_dma_ibuf_to_vmem_connection = HIVE_ISYS2401_DMA_IBUF_VMEM_CONN -} isys2401_dma_connection; - -typedef enum { - isys2401_dma_zero_extension = _DMA_ZERO_EXTEND, - isys2401_dma_sign_extension = _DMA_SIGN_EXTEND -} isys2401_dma_extension; - -typedef struct isys2401_dma_cfg_s isys2401_dma_cfg_t; -struct isys2401_dma_cfg_s { - isys2401_dma_channel channel; - isys2401_dma_connection connection; - isys2401_dma_extension extension; - uint32_t height; -}; -/* end of DMA Device */ - -/* isys2401_dma_channel limits per DMA ID */ -extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID]; - -#endif /* __ISYS_DMA_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h deleted file mode 100644 index 41d051db3987..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_IRQ_GLOBAL_H__ -#define __ISYS_IRQ_GLOBAL_H__ - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - -/* Register offset/index from base location */ -#define ISYS_IRQ_EDGE_REG_IDX (0) -#define ISYS_IRQ_MASK_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 1) -#define ISYS_IRQ_STATUS_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 2) -#define ISYS_IRQ_CLEAR_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 3) -#define ISYS_IRQ_ENABLE_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 4) -#define ISYS_IRQ_LEVEL_NO_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 5) - -/* Register values */ -#define ISYS_IRQ_MASK_REG_VALUE (0xFFFF) -#define ISYS_IRQ_CLEAR_REG_VALUE (0xFFFF) -#define ISYS_IRQ_ENABLE_REG_VALUE (0xFFFF) - -#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -#endif /* __ISYS_IRQ_GLOBAL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h deleted file mode 100644 index 649f44fd2408..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__ -#define __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__ - -#include - -typedef struct stream2mmio_cfg_s stream2mmio_cfg_t; -struct stream2mmio_cfg_s { - uint32_t bits_per_pixel; - uint32_t enable_blocking; -}; - -/* Stream2MMIO limits per ID*/ -/* - * Stream2MMIO 0 has 8 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. - * - * Stream2MMIO 1 has 4 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. - * - * Stream2MMIO 2 has 4 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. - */ -extern const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID]; - -#endif /* __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h deleted file mode 100644 index 0bf2feb8bbfb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __PIXELGEN_GLOBAL_H_INCLUDED__ -#define __PIXELGEN_GLOBAL_H_INCLUDED__ - -#include - -/** - * Pixel-generator. ("pixelgen_global.h") - */ -/* - * Duplicates "sync_generator_cfg_t" in "input_system_global.h". - */ -typedef struct sync_generator_cfg_s sync_generator_cfg_t; -struct sync_generator_cfg_s { - uint32_t hblank_cycles; - uint32_t vblank_cycles; - uint32_t pixels_per_clock; - uint32_t nr_of_frames; - uint32_t pixels_per_line; - uint32_t lines_per_frame; -}; - -typedef enum { - PIXELGEN_TPG_MODE_RAMP = 0, - PIXELGEN_TPG_MODE_CHBO, - PIXELGEN_TPG_MODE_MONO, - N_PIXELGEN_TPG_MODE -} pixelgen_tpg_mode_t; - -/* - * "pixelgen_tpg_cfg_t" duplicates parts of - * "tpg_cfg_t" in "input_system_global.h". - */ -typedef struct pixelgen_tpg_cfg_s pixelgen_tpg_cfg_t; -struct pixelgen_tpg_cfg_s { - pixelgen_tpg_mode_t mode; /* CHBO, MONO */ - - struct { - /* be used by CHBO and MON */ - uint32_t R1; - uint32_t G1; - uint32_t B1; - - /* be used by CHBO only */ - uint32_t R2; - uint32_t G2; - uint32_t B2; - } color_cfg; - - struct { - uint32_t h_mask; /* horizontal mask */ - uint32_t v_mask; /* vertical mask */ - uint32_t hv_mask; /* horizontal+vertical mask? */ - } mask_cfg; - - struct { - int32_t h_delta; /* horizontal delta? */ - int32_t v_delta; /* vertical delta? */ - } delta_cfg; - - sync_generator_cfg_t sync_gen_cfg; -}; - -/* - * "pixelgen_prbs_cfg_t" duplicates parts of - * prbs_cfg_t" in "input_system_global.h". - */ -typedef struct pixelgen_prbs_cfg_s pixelgen_prbs_cfg_t; -struct pixelgen_prbs_cfg_s { - int32_t seed0; - int32_t seed1; - - sync_generator_cfg_t sync_gen_cfg; -}; - -/* end of Pixel-generator: TPG. ("pixelgen_global.h") */ -#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c deleted file mode 100644 index d733a3503a20..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c +++ /dev/null @@ -1,3686 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _sp_map_h_ -#define _sp_map_h_ - - -#ifndef _hrt_dummy_use_blob_sp -#define _hrt_dummy_use_blob_sp() -#endif - -#define _hrt_cell_load_program_sp(proc) _hrt_cell_load_program_embedded(proc, sp) - -#ifndef ISP2401 -/* function longjmp: 680D */ -#else -/* function longjmp: 6A0B */ -#endif - -#ifndef ISP2401 -/* function tmpmem_init_dmem: 6558 */ -#else -/* function tmpmem_init_dmem: 671E */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_addr_B: 3C50 */ -#else -/* function ia_css_dmaproxy_sp_set_addr_B: 3DC5 */ - -/* function ia_css_pipe_data_init_tagger_resources: AC7 */ -#endif - -/* function debug_buffer_set_ddr_addr: DD */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_mipi -#define HIVE_MEM_vbuf_mipi scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_mipi 0x7398 -#else -#define HIVE_ADDR_vbuf_mipi 0x7444 -#endif -#define HIVE_SIZE_vbuf_mipi 12 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_mipi scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_mipi 0x7398 -#else -#define HIVE_ADDR_sp_vbuf_mipi 0x7444 -#endif -#define HIVE_SIZE_sp_vbuf_mipi 12 - -#ifndef ISP2401 -/* function ia_css_event_sp_decode: 3E41 */ -#else -/* function ia_css_event_sp_decode: 3FB6 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_get_size: 51BF */ -#else -/* function ia_css_queue_get_size: 53C8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_load: 5800 */ -#else -/* function ia_css_queue_load: 59DF */ -#endif - -#ifndef ISP2401 -/* function setjmp: 6816 */ -#else -/* function setjmp: 6A14 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_sfi_get_current_frame: 27BF */ -#else -/* function ia_css_pipeline_sp_sfi_get_current_frame: 2790 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp2host_isys_event_queue -#define HIVE_MEM_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x5760 -#else -#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x57FC -#endif -#define HIVE_SIZE_sem_for_sp2host_isys_event_queue 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x5760 -#else -#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x57FC -#endif -#define HIVE_SIZE_sp_sem_for_sp2host_isys_event_queue 20 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_wait_for_ack: 6DA9 */ -#else -/* function ia_css_dmaproxy_sp_wait_for_ack: 6FF7 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_func: 596B */ -#else -/* function ia_css_sp_rawcopy_func: 5B4A */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_marked: 3339 */ -#else -/* function ia_css_tagger_buf_sp_pop_marked: 345C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_CSI_RX_BE_SID_WIDTH -#define HIVE_MEM_N_CSI_RX_BE_SID_WIDTH scalar_processor_2400_dmem -#define HIVE_ADDR_N_CSI_RX_BE_SID_WIDTH 0x1D0 -#define HIVE_SIZE_N_CSI_RX_BE_SID_WIDTH 12 -#else -#endif -#endif -#define HIVE_MEM_sp_N_CSI_RX_BE_SID_WIDTH scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_CSI_RX_BE_SID_WIDTH 0x1D0 -#define HIVE_SIZE_sp_N_CSI_RX_BE_SID_WIDTH 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_stage -#define HIVE_MEM_isp_stage scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_stage 0x6C98 -#else -#define HIVE_ADDR_isp_stage 0x6D48 -#endif -#define HIVE_SIZE_isp_stage 832 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_stage scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_stage 0x6C98 -#else -#define HIVE_ADDR_sp_isp_stage 0x6D48 -#endif -#define HIVE_SIZE_sp_isp_stage 832 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_raw -#define HIVE_MEM_vbuf_raw scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_raw 0x37C -#else -#define HIVE_ADDR_vbuf_raw 0x394 -#endif -#define HIVE_SIZE_vbuf_raw 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_raw scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_raw 0x37C -#else -#define HIVE_ADDR_sp_vbuf_raw 0x394 -#endif -#define HIVE_SIZE_sp_vbuf_raw 4 - -#ifndef ISP2401 -/* function ia_css_sp_bin_copy_func: 594C */ -#else -/* function ia_css_sp_bin_copy_func: 5B2B */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_item_store: 554E */ -#else -/* function ia_css_queue_item_store: 572D */ -#endif - -#ifndef ISP2401 -/* function input_system_reset: 1286 */ -#else -/* function input_system_reset: 1201 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5B38 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5BE4 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_metadata_bufs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5B38 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5BE4 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5B4C -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5BF8 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_buffer_bufs 160 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5B4C -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5BF8 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 160 - -/* function sp_start_isp: 39C */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_binary_group -#define HIVE_MEM_sp_binary_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_binary_group 0x7088 -#else -#define HIVE_ADDR_sp_binary_group 0x7138 -#endif -#define HIVE_SIZE_sp_binary_group 32 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_binary_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_binary_group 0x7088 -#else -#define HIVE_ADDR_sp_sp_binary_group 0x7138 -#endif -#define HIVE_SIZE_sp_sp_binary_group 32 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_sw_state -#define HIVE_MEM_sp_sw_state scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sw_state 0x7344 -#else -#define HIVE_ADDR_sp_sw_state 0x73F0 -#endif -#define HIVE_SIZE_sp_sw_state 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_sw_state scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_sw_state 0x7344 -#else -#define HIVE_ADDR_sp_sp_sw_state 0x73F0 -#endif -#define HIVE_SIZE_sp_sp_sw_state 4 - -#ifndef ISP2401 -/* function ia_css_thread_sp_main: 13F7 */ -#else -/* function ia_css_thread_sp_main: 136D */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_internal_buffers: 4047 */ -#else -/* function ia_css_ispctrl_sp_init_internal_buffers: 41F7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_psys_event_queue_handle -#define HIVE_MEM_sp2host_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x5BEC -#else -#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x5C98 -#endif -#define HIVE_SIZE_sp2host_psys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x5BEC -#else -#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x5C98 -#endif -#define HIVE_SIZE_sp_sp2host_psys_event_queue_handle 12 - -#ifndef ISP2401 -/* function pixelgen_unit_test: E68 */ -#else -/* function pixelgen_unit_test: E62 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp2host_psys_event_queue -#define HIVE_MEM_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x5774 -#else -#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x5810 -#endif -#define HIVE_SIZE_sem_for_sp2host_psys_event_queue 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x5774 -#else -#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x5810 -#endif -#define HIVE_SIZE_sp_sem_for_sp2host_psys_event_queue 20 - -#ifndef ISP2401 -/* function ia_css_tagger_sp_propagate_frame: 2D52 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_stop_copy_preview -#define HIVE_MEM_sp_stop_copy_preview scalar_processor_2400_dmem -#define HIVE_ADDR_sp_stop_copy_preview 0x7328 -#define HIVE_SIZE_sp_stop_copy_preview 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_stop_copy_preview scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_stop_copy_preview 0x7328 -#define HIVE_SIZE_sp_sp_stop_copy_preview 4 -#else -/* function ia_css_tagger_sp_propagate_frame: 2D23 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_handles -#define HIVE_MEM_vbuf_handles scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_handles 0x73A4 -#else -#define HIVE_ADDR_vbuf_handles 0x7450 -#endif -#define HIVE_SIZE_vbuf_handles 960 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_handles scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_handles 0x73A4 -#else -#define HIVE_ADDR_sp_vbuf_handles 0x7450 -#endif -#define HIVE_SIZE_sp_vbuf_handles 960 - -#ifndef ISP2401 -/* function ia_css_queue_store: 56B4 */ - -/* function ia_css_sp_flash_register: 356E */ -#else -/* function ia_css_queue_store: 5893 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_dummy_function: 5CF7 */ -#else -/* function ia_css_sp_flash_register: 3691 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_init: 201C */ -#else -/* function ia_css_pipeline_sp_init: 1FD7 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_configure: 2C42 */ -#else -/* function ia_css_tagger_sp_configure: 2C13 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_end_binary: 3E8A */ -#else -/* function ia_css_ispctrl_sp_end_binary: 3FFF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs -#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5BF8 -#else -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5CA4 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5BF8 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5CA4 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20 - -#ifndef ISP2401 -/* function pixelgen_tpg_run: F1E */ -#else -/* function pixelgen_tpg_run: F18 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_is_pending_mask -#define HIVE_MEM_event_is_pending_mask scalar_processor_2400_dmem -#define HIVE_ADDR_event_is_pending_mask 0x5C -#define HIVE_SIZE_event_is_pending_mask 44 -#else -#endif -#endif -#define HIVE_MEM_sp_event_is_pending_mask scalar_processor_2400_dmem -#define HIVE_ADDR_sp_event_is_pending_mask 0x5C -#define HIVE_SIZE_sp_event_is_pending_mask 44 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cb_elems_frame -#define HIVE_MEM_sp_all_cb_elems_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cb_elems_frame 0x5788 -#else -#define HIVE_ADDR_sp_all_cb_elems_frame 0x5824 -#endif -#define HIVE_SIZE_sp_all_cb_elems_frame 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cb_elems_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x5788 -#else -#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x5824 -#endif -#define HIVE_SIZE_sp_sp_all_cb_elems_frame 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_isys_event_queue_handle -#define HIVE_MEM_sp2host_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x5C0C -#else -#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x5CB8 -#endif -#define HIVE_SIZE_sp2host_isys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x5C0C -#else -#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x5CB8 -#endif -#define HIVE_SIZE_sp_sp2host_isys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host_sp_com -#define HIVE_MEM_host_sp_com scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host_sp_com 0x3E48 -#else -#define HIVE_ADDR_host_sp_com 0x3E6C -#endif -#define HIVE_SIZE_host_sp_com 220 -#else -#endif -#endif -#define HIVE_MEM_sp_host_sp_com scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host_sp_com 0x3E48 -#else -#define HIVE_ADDR_sp_host_sp_com 0x3E6C -#endif -#define HIVE_SIZE_sp_host_sp_com 220 - -#ifndef ISP2401 -/* function ia_css_queue_get_free_space: 5313 */ -#else -/* function ia_css_queue_get_free_space: 54F2 */ -#endif - -#ifndef ISP2401 -/* function exec_image_pipe: 5E6 */ -#else -/* function exec_image_pipe: 57A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_init_dmem_data -#define HIVE_MEM_sp_init_dmem_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_init_dmem_data 0x7348 -#else -#define HIVE_ADDR_sp_init_dmem_data 0x73F4 -#endif -#define HIVE_SIZE_sp_init_dmem_data 24 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_init_dmem_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_init_dmem_data 0x7348 -#else -#define HIVE_ADDR_sp_sp_init_dmem_data 0x73F4 -#endif -#define HIVE_SIZE_sp_sp_init_dmem_data 24 - -#ifndef ISP2401 -/* function ia_css_sp_metadata_start: 5DD1 */ -#else -/* function ia_css_sp_metadata_start: 5EB3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_init_buffer_queues: 35BF */ -#else -/* function ia_css_bufq_sp_init_buffer_queues: 36E2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_stop: 1FFF */ -#else -/* function ia_css_pipeline_sp_stop: 1FBA */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_connect_pipes: 312C */ -#else -/* function ia_css_tagger_sp_connect_pipes: 30FD */ -#endif - -#ifndef ISP2401 -/* function sp_isys_copy_wait: 644 */ -#else -/* function sp_isys_copy_wait: 5D8 */ -#endif - -/* function is_isp_debug_buffer_full: 337 */ - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_configure_channel_from_info: 3BD3 */ -#else -/* function ia_css_dmaproxy_sp_configure_channel_from_info: 3D35 */ -#endif - -#ifndef ISP2401 -/* function encode_and_post_timer_event: AA8 */ -#else -/* function encode_and_post_timer_event: A3C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_input_system_bz2788_active -#define HIVE_MEM_input_system_bz2788_active scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_input_system_bz2788_active 0x250C -#else -#define HIVE_ADDR_input_system_bz2788_active 0x2524 -#endif -#define HIVE_SIZE_input_system_bz2788_active 4 -#else -#endif -#endif -#define HIVE_MEM_sp_input_system_bz2788_active scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_input_system_bz2788_active 0x250C -#else -#define HIVE_ADDR_sp_input_system_bz2788_active 0x2524 -#endif -#define HIVE_SIZE_sp_input_system_bz2788_active 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_IBUF_CTRL_PROCS -#define HIVE_MEM_N_IBUF_CTRL_PROCS scalar_processor_2400_dmem -#define HIVE_ADDR_N_IBUF_CTRL_PROCS 0x1FC -#define HIVE_SIZE_N_IBUF_CTRL_PROCS 12 -#else -#endif -#endif -#define HIVE_MEM_sp_N_IBUF_CTRL_PROCS scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_IBUF_CTRL_PROCS 0x1FC -#define HIVE_SIZE_sp_N_IBUF_CTRL_PROCS 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_per_frame_data -#define HIVE_MEM_sp_per_frame_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_per_frame_data 0x3F24 -#else -#define HIVE_ADDR_sp_per_frame_data 0x3F48 -#endif -#define HIVE_SIZE_sp_per_frame_data 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_per_frame_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_per_frame_data 0x3F24 -#else -#define HIVE_ADDR_sp_sp_per_frame_data 0x3F48 -#endif -#define HIVE_SIZE_sp_sp_per_frame_data 4 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_vbuf_dequeue: 62AC */ -#else -/* function ia_css_rmgr_sp_vbuf_dequeue: 6472 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_psys_event_queue_handle -#define HIVE_MEM_host2sp_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x5C18 -#else -#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x5CC4 -#endif -#define HIVE_SIZE_host2sp_psys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x5C18 -#else -#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x5CC4 -#endif -#define HIVE_SIZE_sp_host2sp_psys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_xmem_bin_addr -#define HIVE_MEM_xmem_bin_addr scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_xmem_bin_addr 0x3F28 -#else -#define HIVE_ADDR_xmem_bin_addr 0x3F4C -#endif -#define HIVE_SIZE_xmem_bin_addr 4 -#else -#endif -#endif -#define HIVE_MEM_sp_xmem_bin_addr scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_xmem_bin_addr 0x3F28 -#else -#define HIVE_ADDR_sp_xmem_bin_addr 0x3F4C -#endif -#define HIVE_SIZE_sp_xmem_bin_addr 4 - -#ifndef ISP2401 -/* function tmr_clock_init: 16F9 */ -#else -/* function tmr_clock_init: 166F */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_run: 1ABF */ -#else -/* function ia_css_pipeline_sp_run: 1A61 */ -#endif - -#ifndef ISP2401 -/* function memcpy: 68B6 */ -#else -/* function memcpy: 6AB4 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_ISYS2401_DMA_CHANNEL_PROCS -#define HIVE_MEM_N_ISYS2401_DMA_CHANNEL_PROCS scalar_processor_2400_dmem -#define HIVE_ADDR_N_ISYS2401_DMA_CHANNEL_PROCS 0x214 -#define HIVE_SIZE_N_ISYS2401_DMA_CHANNEL_PROCS 4 -#else -#endif -#endif -#define HIVE_MEM_sp_N_ISYS2401_DMA_CHANNEL_PROCS scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_ISYS2401_DMA_CHANNEL_PROCS 0x214 -#define HIVE_SIZE_sp_N_ISYS2401_DMA_CHANNEL_PROCS 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GP_DEVICE_BASE -#define HIVE_MEM_GP_DEVICE_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_GP_DEVICE_BASE 0x384 -#else -#define HIVE_ADDR_GP_DEVICE_BASE 0x39C -#endif -#define HIVE_SIZE_GP_DEVICE_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_GP_DEVICE_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x384 -#else -#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x39C -#endif -#define HIVE_SIZE_sp_GP_DEVICE_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_thread_sp_ready_queue -#define HIVE_MEM_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x278 -#else -#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x27C -#endif -#define HIVE_SIZE_ia_css_thread_sp_ready_queue 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x278 -#else -#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x27C -#endif -#define HIVE_SIZE_sp_ia_css_thread_sp_ready_queue 12 - -#ifndef ISP2401 -/* function stream2mmio_send_command: E0A */ -#else -/* function stream2mmio_send_command: E04 */ -#endif - -#ifndef ISP2401 -/* function ia_css_uds_sp_scale_params: 65BF */ -#else -/* function ia_css_uds_sp_scale_params: 67BD */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_increase_size: 14DC */ -#else -/* function ia_css_circbuf_increase_size: 1452 */ -#endif - -#ifndef ISP2401 -/* function __divu: 6834 */ -#else -/* function __divu: 6A32 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_get_state: 131F */ -#else -/* function ia_css_thread_sp_get_state: 1295 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_cont_capt_stop -#define HIVE_MEM_sem_for_cont_capt_stop scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_cont_capt_stop 0x5798 -#else -#define HIVE_ADDR_sem_for_cont_capt_stop 0x5834 -#endif -#define HIVE_SIZE_sem_for_cont_capt_stop 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_cont_capt_stop scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x5798 -#else -#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x5834 -#endif -#define HIVE_SIZE_sp_sem_for_cont_capt_stop 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_SHORT_PACKET_LUT_ENTRIES -#define HIVE_MEM_N_SHORT_PACKET_LUT_ENTRIES scalar_processor_2400_dmem -#define HIVE_ADDR_N_SHORT_PACKET_LUT_ENTRIES 0x1AC -#define HIVE_SIZE_N_SHORT_PACKET_LUT_ENTRIES 12 -#else -#endif -#endif -#define HIVE_MEM_sp_N_SHORT_PACKET_LUT_ENTRIES scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_SHORT_PACKET_LUT_ENTRIES 0x1AC -#define HIVE_SIZE_sp_N_SHORT_PACKET_LUT_ENTRIES 12 - -#ifndef ISP2401 -/* function thread_fiber_sp_main: 14D5 */ -#else -/* function thread_fiber_sp_main: 144B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_pipe_thread -#define HIVE_MEM_sp_isp_pipe_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_pipe_thread 0x58DC -#define HIVE_SIZE_sp_isp_pipe_thread 340 -#else -#define HIVE_ADDR_sp_isp_pipe_thread 0x5978 -#define HIVE_SIZE_sp_isp_pipe_thread 360 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_pipe_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x58DC -#define HIVE_SIZE_sp_sp_isp_pipe_thread 340 -#else -#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x5978 -#define HIVE_SIZE_sp_sp_isp_pipe_thread 360 -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_handle_parameter_sets: 193F */ -#else -/* function ia_css_parambuf_sp_handle_parameter_sets: 18B5 */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_set_state: 5DED */ -#else -/* function ia_css_spctrl_sp_set_state: 5ECF */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_signal: 6A99 */ -#else -/* function ia_css_thread_sem_sp_signal: 6D18 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_IRQ_BASE -#define HIVE_MEM_IRQ_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_IRQ_BASE 0x2C -#define HIVE_SIZE_IRQ_BASE 16 -#else -#endif -#endif -#define HIVE_MEM_sp_IRQ_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_IRQ_BASE 0x2C -#define HIVE_SIZE_sp_IRQ_BASE 16 - -#ifndef ISP2401 -/* function ia_css_virtual_isys_sp_isr_init: 5E8C */ -#else -/* function ia_css_virtual_isys_sp_isr_init: 5F70 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_TIMED_CTRL_BASE -#define HIVE_MEM_TIMED_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_TIMED_CTRL_BASE 0x40 -#define HIVE_SIZE_TIMED_CTRL_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_TIMED_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_TIMED_CTRL_BASE 0x40 -#define HIVE_SIZE_sp_TIMED_CTRL_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_generate_exp_id: 613C */ - -/* function ia_css_rmgr_sp_init: 61A7 */ -#else -/* function ia_css_isys_sp_generate_exp_id: 6302 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_init: 6B6A */ -#else -/* function ia_css_rmgr_sp_init: 636D */ -#endif - -#ifndef ISP2401 -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_is_isp_requested -#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_is_isp_requested 0x390 -#define HIVE_SIZE_is_isp_requested 4 -#else -#endif -#endif -#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_sp_is_isp_requested 0x390 -#define HIVE_SIZE_sp_is_isp_requested 4 -#else -/* function ia_css_thread_sem_sp_init: 6DE7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_cb_frame -#define HIVE_MEM_sem_for_reading_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_cb_frame 0x57AC -#else -#define HIVE_ADDR_sem_for_reading_cb_frame 0x5848 -#endif -#define HIVE_SIZE_sem_for_reading_cb_frame 40 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x57AC -#else -#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x5848 -#endif -#define HIVE_SIZE_sp_sem_for_reading_cb_frame 40 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_execute: 3B3B */ -#else -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_is_isp_requested -#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_is_isp_requested 0x3A8 -#define HIVE_SIZE_is_isp_requested 4 -#else -#endif -#endif -#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_sp_is_isp_requested 0x3A8 -#define HIVE_SIZE_sp_is_isp_requested 4 - -/* function ia_css_dmaproxy_sp_execute: 3C9B */ -#endif - -#ifndef ISP2401 -/* function csi_rx_backend_rst: CE6 */ -#else -/* function csi_rx_backend_rst: CE0 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_is_empty: 51FA */ -#else -/* function ia_css_queue_is_empty: 7144 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_has_stopped: 1FF5 */ -#else -/* function ia_css_pipeline_sp_has_stopped: 1FB0 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_extract: 15E0 */ -#else -/* function ia_css_circbuf_extract: 1556 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_is_locked_from_start: 344F */ -#else -/* function ia_css_tagger_buf_sp_is_locked_from_start: 3572 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_current_sp_thread -#define HIVE_MEM_current_sp_thread scalar_processor_2400_dmem -#define HIVE_ADDR_current_sp_thread 0x274 -#define HIVE_SIZE_current_sp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_current_sp_thread scalar_processor_2400_dmem -#define HIVE_ADDR_sp_current_sp_thread 0x274 -#define HIVE_SIZE_sp_current_sp_thread 4 - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_get_spid: 5DF4 */ -#else -/* function ia_css_spctrl_sp_get_spid: 5ED6 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_reset_buffers: 3646 */ -#else -/* function ia_css_bufq_sp_reset_buffers: 3769 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read_byte_addr: 6DD7 */ -#else -/* function ia_css_dmaproxy_sp_read_byte_addr: 7025 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_uninit: 61A0 */ -#else -/* function ia_css_rmgr_sp_uninit: 6366 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_stack -#define HIVE_MEM_sp_threads_stack scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_stack 0x164 -#define HIVE_SIZE_sp_threads_stack 24 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_stack scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_stack 0x164 -#define HIVE_SIZE_sp_sp_threads_stack 24 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_STREAM2MMIO_SID_PROCS -#define HIVE_MEM_N_STREAM2MMIO_SID_PROCS scalar_processor_2400_dmem -#define HIVE_ADDR_N_STREAM2MMIO_SID_PROCS 0x218 -#define HIVE_SIZE_N_STREAM2MMIO_SID_PROCS 12 -#else -#endif -#endif -#define HIVE_MEM_sp_N_STREAM2MMIO_SID_PROCS scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_STREAM2MMIO_SID_PROCS 0x218 -#define HIVE_SIZE_sp_N_STREAM2MMIO_SID_PROCS 12 - -#ifndef ISP2401 -/* function ia_css_circbuf_peek: 15C2 */ -#else -/* function ia_css_circbuf_peek: 1538 */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_wait_for_in_param: 1708 */ -#else -/* function ia_css_parambuf_sp_wait_for_in_param: 167E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cb_elems_param -#define HIVE_MEM_sp_all_cb_elems_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cb_elems_param 0x57D4 -#else -#define HIVE_ADDR_sp_all_cb_elems_param 0x5870 -#endif -#define HIVE_SIZE_sp_all_cb_elems_param 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cb_elems_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x57D4 -#else -#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x5870 -#endif -#define HIVE_SIZE_sp_sp_all_cb_elems_param 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_pipeline_sp_curr_binary_id -#define HIVE_MEM_pipeline_sp_curr_binary_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x284 -#else -#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x288 -#endif -#define HIVE_SIZE_pipeline_sp_curr_binary_id 4 -#else -#endif -#endif -#define HIVE_MEM_sp_pipeline_sp_curr_binary_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x284 -#else -#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x288 -#endif -#define HIVE_SIZE_sp_pipeline_sp_curr_binary_id 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_frame_desc -#define HIVE_MEM_sp_all_cbs_frame_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_frame_desc 0x57E4 -#else -#define HIVE_ADDR_sp_all_cbs_frame_desc 0x5880 -#endif -#define HIVE_SIZE_sp_all_cbs_frame_desc 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_frame_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x57E4 -#else -#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x5880 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_frame_desc 8 - -#ifndef ISP2401 -/* function sp_isys_copy_func_v2: 629 */ -#else -/* function sp_isys_copy_func_v2: 5BD */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_cb_param -#define HIVE_MEM_sem_for_reading_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_cb_param 0x57EC -#else -#define HIVE_ADDR_sem_for_reading_cb_param 0x5888 -#endif -#define HIVE_SIZE_sem_for_reading_cb_param 40 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x57EC -#else -#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x5888 -#endif -#define HIVE_SIZE_sp_sem_for_reading_cb_param 40 - -#ifndef ISP2401 -/* function ia_css_queue_get_used_space: 52C7 */ -#else -/* function ia_css_queue_get_used_space: 54A6 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_cont_capt_start -#define HIVE_MEM_sem_for_cont_capt_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_cont_capt_start 0x5814 -#else -#define HIVE_ADDR_sem_for_cont_capt_start 0x58B0 -#endif -#define HIVE_SIZE_sem_for_cont_capt_start 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_cont_capt_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x5814 -#else -#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x58B0 -#endif -#define HIVE_SIZE_sp_sem_for_cont_capt_start 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_tmp_heap -#define HIVE_MEM_tmp_heap scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_tmp_heap 0x70A8 -#else -#define HIVE_ADDR_tmp_heap 0x7158 -#endif -#define HIVE_SIZE_tmp_heap 640 -#else -#endif -#endif -#define HIVE_MEM_sp_tmp_heap scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_tmp_heap 0x70A8 -#else -#define HIVE_ADDR_sp_tmp_heap 0x7158 -#endif -#define HIVE_SIZE_sp_tmp_heap 640 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_get_num_vbuf: 64B0 */ -#else -/* function ia_css_rmgr_sp_get_num_vbuf: 6676 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_output_compute_dma_info: 4863 */ -#else -/* function ia_css_ispctrl_sp_output_compute_dma_info: 4A27 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_lock_exp_id: 2A0F */ -#else -/* function ia_css_tagger_sp_lock_exp_id: 29E0 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5C24 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5CD0 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_s3a_bufs 60 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5C24 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5CD0 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 60 - -#ifndef ISP2401 -/* function ia_css_queue_is_full: 535E */ -#else -/* function ia_css_queue_is_full: 553D */ -#endif - -/* function debug_buffer_init_isp: E4 */ - -#ifndef ISP2401 -/* function ia_css_tagger_sp_exp_id_is_locked: 2945 */ -#else -/* function ia_css_tagger_sp_exp_id_is_locked: 2916 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem -#define HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x7764 -#else -#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x7810 -#endif -#define HIVE_SIZE_ia_css_rmgr_sp_mipi_frame_sem 60 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x7764 -#else -#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x7810 -#endif -#define HIVE_SIZE_sp_ia_css_rmgr_sp_mipi_frame_sem 60 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_dump: 6287 */ -#else -/* function ia_css_rmgr_sp_refcount_dump: 644D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5C60 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5D0C -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_isp_parameters_id 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5C60 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5D0C -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_pipe_threads -#define HIVE_MEM_sp_pipe_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_pipe_threads 0x150 -#define HIVE_SIZE_sp_pipe_threads 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_pipe_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_pipe_threads 0x150 -#define HIVE_SIZE_sp_sp_pipe_threads 20 - -#ifndef ISP2401 -/* function sp_event_proxy_func: 78D */ -#else -/* function sp_event_proxy_func: 721 */ -#endif - -#ifndef ISP2401 -/* function ibuf_ctrl_run: D7F */ -#else -/* function ibuf_ctrl_run: D79 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_isys_event_queue_handle -#define HIVE_MEM_host2sp_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x5C74 -#else -#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x5D20 -#endif -#define HIVE_SIZE_host2sp_isys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x5C74 -#else -#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x5D20 -#endif -#define HIVE_SIZE_sp_host2sp_isys_event_queue_handle 12 - -#ifndef ISP2401 -/* function ia_css_thread_sp_yield: 6A12 */ -#else -/* function ia_css_thread_sp_yield: 6C96 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_param_desc -#define HIVE_MEM_sp_all_cbs_param_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_param_desc 0x5828 -#else -#define HIVE_ADDR_sp_all_cbs_param_desc 0x58C4 -#endif -#define HIVE_SIZE_sp_all_cbs_param_desc 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_param_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x5828 -#else -#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x58C4 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_param_desc 8 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb -#define HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x6C8C -#else -#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x6D38 -#endif -#define HIVE_SIZE_ia_css_dmaproxy_sp_invalidate_tlb 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x6C8C -#else -#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x6D38 -#endif -#define HIVE_SIZE_sp_ia_css_dmaproxy_sp_invalidate_tlb 4 - -#ifndef ISP2401 -/* function ia_css_thread_sp_fork: 13AC */ -#else -/* function ia_css_thread_sp_fork: 1322 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_destroy: 3136 */ -#else -/* function ia_css_tagger_sp_destroy: 3107 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_vmem_read: 3ADB */ -#else -/* function ia_css_dmaproxy_sp_vmem_read: 3C3B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_LONG_PACKET_LUT_ENTRIES -#define HIVE_MEM_N_LONG_PACKET_LUT_ENTRIES scalar_processor_2400_dmem -#define HIVE_ADDR_N_LONG_PACKET_LUT_ENTRIES 0x1B8 -#define HIVE_SIZE_N_LONG_PACKET_LUT_ENTRIES 12 -#else -#endif -#endif -#define HIVE_MEM_sp_N_LONG_PACKET_LUT_ENTRIES scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_LONG_PACKET_LUT_ENTRIES 0x1B8 -#define HIVE_SIZE_sp_N_LONG_PACKET_LUT_ENTRIES 12 - -#ifndef ISP2401 -/* function initialize_sp_group: 5F6 */ -#else -/* function initialize_sp_group: 58A */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_peek: 325B */ -#else -/* function ia_css_tagger_buf_sp_peek: 337E */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_init: 13D8 */ -#else -/* function ia_css_thread_sp_init: 134E */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_reset_exp_id: 6133 */ -#else -/* function qos_scheduler_update_fps: 67AD */ -#endif - -#ifndef ISP2401 -/* function qos_scheduler_update_fps: 65AF */ -#else -/* function ia_css_isys_sp_reset_exp_id: 62F9 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_set_stream_base_addr: 4F38 */ -#else -/* function ia_css_ispctrl_sp_set_stream_base_addr: 5114 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_DMEM_BASE -#define HIVE_MEM_ISP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_DMEM_BASE 0x10 -#define HIVE_SIZE_ISP_DMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_DMEM_BASE 0x10 -#define HIVE_SIZE_sp_ISP_DMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_SP_DMEM_BASE -#define HIVE_MEM_SP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_SP_DMEM_BASE 0x4 -#define HIVE_SIZE_SP_DMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_SP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_SP_DMEM_BASE 0x4 -#define HIVE_SIZE_sp_SP_DMEM_BASE 4 - -#ifndef ISP2401 -/* function ibuf_ctrl_transfer: D67 */ -#else -/* function ibuf_ctrl_transfer: D61 */ - -/* function __ia_css_queue_is_empty_text: 5403 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read: 3B51 */ -#else -/* function ia_css_dmaproxy_sp_read: 3CB1 */ -#endif - -#ifndef ISP2401 -/* function virtual_isys_stream_is_capture_done: 5EB0 */ -#else -/* function virtual_isys_stream_is_capture_done: 5F94 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_raw_copy_line_count -#define HIVE_MEM_raw_copy_line_count scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_raw_copy_line_count 0x360 -#else -#define HIVE_ADDR_raw_copy_line_count 0x378 -#endif -#define HIVE_SIZE_raw_copy_line_count 4 -#else -#endif -#endif -#define HIVE_MEM_sp_raw_copy_line_count scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_raw_copy_line_count 0x360 -#else -#define HIVE_ADDR_sp_raw_copy_line_count 0x378 -#endif -#define HIVE_SIZE_sp_raw_copy_line_count 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_tag_cmd_queue_handle -#define HIVE_MEM_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x5C80 -#else -#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x5D2C -#endif -#define HIVE_SIZE_host2sp_tag_cmd_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x5C80 -#else -#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x5D2C -#endif -#define HIVE_SIZE_sp_host2sp_tag_cmd_queue_handle 12 - -#ifndef ISP2401 -/* function ia_css_queue_peek: 523D */ -#else -/* function ia_css_queue_peek: 541C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_frame_cnt -#define HIVE_MEM_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x5B2C -#else -#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x5BD8 -#endif -#define HIVE_SIZE_ia_css_flash_sp_frame_cnt 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x5B2C -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x5BD8 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_frame_cnt 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_can_send_token_mask -#define HIVE_MEM_event_can_send_token_mask scalar_processor_2400_dmem -#define HIVE_ADDR_event_can_send_token_mask 0x88 -#define HIVE_SIZE_event_can_send_token_mask 44 -#else -#endif -#endif -#define HIVE_MEM_sp_event_can_send_token_mask scalar_processor_2400_dmem -#define HIVE_ADDR_sp_event_can_send_token_mask 0x88 -#define HIVE_SIZE_sp_event_can_send_token_mask 44 - -#ifndef ISP2401 -/* function csi_rx_frontend_stop: C11 */ -#else -/* function csi_rx_frontend_stop: C0B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_thread -#define HIVE_MEM_isp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_thread 0x6FD8 -#else -#define HIVE_ADDR_isp_thread 0x7088 -#endif -#define HIVE_SIZE_isp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_thread 0x6FD8 -#else -#define HIVE_ADDR_sp_isp_thread 0x7088 -#endif -#define HIVE_SIZE_sp_isp_thread 4 - -#ifndef ISP2401 -/* function encode_and_post_sp_event_non_blocking: AF0 */ -#else -/* function encode_and_post_sp_event_non_blocking: A84 */ -#endif - -/* function is_ddr_debug_buffer_full: 2CC */ - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 32AB */ -#else -/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 33CE */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_fiber -#define HIVE_MEM_sp_threads_fiber scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_fiber 0x194 -#define HIVE_SIZE_sp_threads_fiber 24 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_fiber scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_fiber 0x194 -#define HIVE_SIZE_sp_sp_threads_fiber 24 - -#ifndef ISP2401 -/* function encode_and_post_sp_event: A79 */ -#else -/* function encode_and_post_sp_event: A0D */ -#endif - -/* function debug_enqueue_ddr: EE */ - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_init_vbuf: 6242 */ -#else -/* function ia_css_rmgr_sp_refcount_init_vbuf: 6408 */ -#endif - -#ifndef ISP2401 -/* function dmaproxy_sp_read_write: 6E86 */ -#else -/* function dmaproxy_sp_read_write: 70C3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer -#define HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6C90 -#else -#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6D3C -#endif -#define HIVE_SIZE_ia_css_dmaproxy_isp_dma_cmd_buffer 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6C90 -#else -#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6D3C -#endif -#define HIVE_SIZE_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_buffer_queue_handle -#define HIVE_MEM_host2sp_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_buffer_queue_handle 0x5C8C -#else -#define HIVE_ADDR_host2sp_buffer_queue_handle 0x5D38 -#endif -#define HIVE_SIZE_host2sp_buffer_queue_handle 480 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x5C8C -#else -#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x5D38 -#endif -#define HIVE_SIZE_sp_host2sp_buffer_queue_handle 480 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_in_service -#define HIVE_MEM_ia_css_flash_sp_in_service scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3054 -#else -#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3074 -#endif -#define HIVE_SIZE_ia_css_flash_sp_in_service 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_in_service scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3054 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3074 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_in_service 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_process: 6B92 */ -#else -/* function ia_css_dmaproxy_sp_process: 6E0F */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_mark_from_end: 3533 */ -#else -/* function ia_css_tagger_buf_sp_mark_from_end: 3656 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_cs: 3F77 */ -#else -/* function ia_css_ispctrl_sp_init_cs: 40FA */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_init: 5E02 */ -#else -/* function ia_css_spctrl_sp_init: 5EE4 */ -#endif - -#ifndef ISP2401 -/* function sp_event_proxy_init: 7A2 */ -#else -/* function sp_event_proxy_init: 736 */ -#endif - -#ifndef ISP2401 -/* function input_system_input_port_close: 109B */ -#else -/* function input_system_input_port_close: 1095 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5E6C -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5F18 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_previous_clock_tick 40 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5E6C -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5F18 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 40 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_output -#define HIVE_MEM_sp_output scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_output 0x3F2C -#else -#define HIVE_ADDR_sp_output 0x3F50 -#endif -#define HIVE_SIZE_sp_output 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_output scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_output 0x3F2C -#else -#define HIVE_ADDR_sp_sp_output 0x3F50 -#endif -#define HIVE_SIZE_sp_sp_output 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues -#define HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5E94 -#else -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5F40 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5E94 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5F40 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800 - -#ifndef ISP2401 -/* function pixelgen_prbs_config: E93 */ -#else -/* function pixelgen_prbs_config: E8D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_CTRL_BASE -#define HIVE_MEM_ISP_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_CTRL_BASE 0x8 -#define HIVE_SIZE_ISP_CTRL_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_CTRL_BASE 0x8 -#define HIVE_SIZE_sp_ISP_CTRL_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_INPUT_FORMATTER_BASE -#define HIVE_MEM_INPUT_FORMATTER_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_INPUT_FORMATTER_BASE 0x4C -#define HIVE_SIZE_INPUT_FORMATTER_BASE 16 -#else -#endif -#endif -#define HIVE_MEM_sp_INPUT_FORMATTER_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_INPUT_FORMATTER_BASE 0x4C -#define HIVE_SIZE_sp_INPUT_FORMATTER_BASE 16 - -#ifndef ISP2401 -/* function sp_dma_proxy_reset_channels: 3DAB */ -#else -/* function sp_dma_proxy_reset_channels: 3F20 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_update_size: 322A */ -#else -/* function ia_css_tagger_sp_update_size: 334D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_host_sp_queue -#define HIVE_MEM_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x61B4 -#else -#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x6260 -#endif -#define HIVE_SIZE_ia_css_bufq_host_sp_queue 2008 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x61B4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x6260 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_host_sp_queue 2008 - -#ifndef ISP2401 -/* function thread_fiber_sp_create: 1444 */ -#else -/* function thread_fiber_sp_create: 13BA */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_increments: 3C3D */ -#else -/* function ia_css_dmaproxy_sp_set_increments: 3DB2 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_writing_cb_frame -#define HIVE_MEM_sem_for_writing_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_writing_cb_frame 0x5830 -#else -#define HIVE_ADDR_sem_for_writing_cb_frame 0x58CC -#endif -#define HIVE_SIZE_sem_for_writing_cb_frame 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_writing_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x5830 -#else -#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x58CC -#endif -#define HIVE_SIZE_sp_sem_for_writing_cb_frame 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_writing_cb_param -#define HIVE_MEM_sem_for_writing_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_writing_cb_param 0x5844 -#else -#define HIVE_ADDR_sem_for_writing_cb_param 0x58E0 -#endif -#define HIVE_SIZE_sem_for_writing_cb_param 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_writing_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x5844 -#else -#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x58E0 -#endif -#define HIVE_SIZE_sp_sem_for_writing_cb_param 20 - -#ifndef ISP2401 -/* function pixelgen_tpg_is_done: F0D */ -#else -/* function pixelgen_tpg_is_done: F07 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_stream_capture_indication: 5FB6 */ -#else -/* function ia_css_isys_stream_capture_indication: 60D7 */ -#endif - -/* function sp_start_isp_entry: 392 */ -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifdef HIVE_ADDR_sp_start_isp_entry -#endif -#define HIVE_ADDR_sp_start_isp_entry 0x392 -#endif -#define HIVE_ADDR_sp_sp_start_isp_entry 0x392 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unmark_all: 34B7 */ -#else -/* function ia_css_tagger_buf_sp_unmark_all: 35DA */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unmark_from_start: 34F8 */ -#else -/* function ia_css_tagger_buf_sp_unmark_from_start: 361B */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_channel_acquire: 3DD7 */ -#else -/* function ia_css_dmaproxy_sp_channel_acquire: 3F4C */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_add_num_vbuf: 648C */ -#else -/* function ia_css_rmgr_sp_add_num_vbuf: 6652 */ -#endif - -#ifndef ISP2401 -/* function ibuf_ctrl_config: D8B */ -#else -/* function ibuf_ctrl_config: D85 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_stream_stop: 602E */ -#else -/* function ia_css_isys_stream_stop: 61F4 */ -#endif - -#ifndef ISP2401 -/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3AA7 */ -#else -/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3C07 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_acquire_buf_elem: 291D */ -#else -/* function ia_css_tagger_sp_acquire_buf_elem: 28EE */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_is_dynamic_buffer: 3990 */ -#else -/* function ia_css_bufq_sp_is_dynamic_buffer: 3AB3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_group -#define HIVE_MEM_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_group 0x3F3C -#define HIVE_SIZE_sp_group 6176 -#else -#define HIVE_ADDR_sp_group 0x3F60 -#define HIVE_SIZE_sp_group 6296 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_group 0x3F3C -#define HIVE_SIZE_sp_sp_group 6176 -#else -#define HIVE_ADDR_sp_sp_group 0x3F60 -#define HIVE_SIZE_sp_sp_group 6296 -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_event_proxy_thread -#define HIVE_MEM_sp_event_proxy_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_event_proxy_thread 0x5A30 -#define HIVE_SIZE_sp_event_proxy_thread 68 -#else -#define HIVE_ADDR_sp_event_proxy_thread 0x5AE0 -#define HIVE_SIZE_sp_event_proxy_thread 72 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_event_proxy_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_event_proxy_thread 0x5A30 -#define HIVE_SIZE_sp_sp_event_proxy_thread 68 -#else -#define HIVE_ADDR_sp_sp_event_proxy_thread 0x5AE0 -#define HIVE_SIZE_sp_sp_event_proxy_thread 72 -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_kill: 1372 */ -#else -/* function ia_css_thread_sp_kill: 12E8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_create: 31E4 */ -#else -/* function ia_css_tagger_sp_create: 32FB */ -#endif - -#ifndef ISP2401 -/* function tmpmem_acquire_dmem: 6539 */ -#else -/* function tmpmem_acquire_dmem: 66FF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_MMU_BASE -#define HIVE_MEM_MMU_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_MMU_BASE 0x24 -#define HIVE_SIZE_MMU_BASE 8 -#else -#endif -#endif -#define HIVE_MEM_sp_MMU_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_MMU_BASE 0x24 -#define HIVE_SIZE_sp_MMU_BASE 8 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_channel_release: 3DC3 */ -#else -/* function ia_css_dmaproxy_sp_channel_release: 3F38 */ -#endif - -#ifndef ISP2401 -/* function pixelgen_prbs_run: E81 */ -#else -/* function pixelgen_prbs_run: E7B */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_is_idle: 3DA3 */ -#else -/* function ia_css_dmaproxy_sp_is_idle: 3F18 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_qos_start -#define HIVE_MEM_sem_for_qos_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_qos_start 0x5858 -#else -#define HIVE_ADDR_sem_for_qos_start 0x58F4 -#endif -#define HIVE_SIZE_sem_for_qos_start 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_qos_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_qos_start 0x5858 -#else -#define HIVE_ADDR_sp_sem_for_qos_start 0x58F4 -#endif -#define HIVE_SIZE_sp_sem_for_qos_start 20 - -#ifndef ISP2401 -/* function isp_hmem_load: B63 */ -#else -/* function isp_hmem_load: B5D */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_release_buf_elem: 28F9 */ -#else -/* function ia_css_tagger_sp_release_buf_elem: 28CA */ -#endif - -#ifndef ISP2401 -/* function ia_css_eventq_sp_send: 3E19 */ -#else -/* function ia_css_eventq_sp_send: 3F8E */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unlock_from_start: 33E7 */ -#else -/* function ia_css_tagger_buf_sp_unlock_from_start: 350A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_debug_buffer_ddr_address -#define HIVE_MEM_debug_buffer_ddr_address scalar_processor_2400_dmem -#define HIVE_ADDR_debug_buffer_ddr_address 0xBC -#define HIVE_SIZE_debug_buffer_ddr_address 4 -#else -#endif -#endif -#define HIVE_MEM_sp_debug_buffer_ddr_address scalar_processor_2400_dmem -#define HIVE_ADDR_sp_debug_buffer_ddr_address 0xBC -#define HIVE_SIZE_sp_debug_buffer_ddr_address 4 - -#ifndef ISP2401 -/* function sp_isys_copy_request: 6ED */ -#else -/* function sp_isys_copy_request: 681 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_retain_vbuf: 631C */ -#else -/* function ia_css_rmgr_sp_refcount_retain_vbuf: 64E2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_set_priority: 136A */ -#else -/* function ia_css_thread_sp_set_priority: 12E0 */ -#endif - -#ifndef ISP2401 -/* function sizeof_hmem: C0A */ -#else -/* function sizeof_hmem: C04 */ -#endif - -#ifndef ISP2401 -/* function input_system_channel_open: 1241 */ -#else -/* function input_system_channel_open: 11BC */ -#endif - -#ifndef ISP2401 -/* function pixelgen_tpg_stop: EFB */ -#else -/* function pixelgen_tpg_stop: EF5 */ -#endif - -#ifndef ISP2401 -/* function tmpmem_release_dmem: 6528 */ -#else -/* function tmpmem_release_dmem: 66EE */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_width_exception: 3C28 */ -#else -/* function __ia_css_dmaproxy_sp_process_text: 3BAB */ -#endif - -#ifndef ISP2401 -/* function sp_event_assert: 929 */ -#else -/* function ia_css_dmaproxy_sp_set_width_exception: 3D9D */ -#endif - -#ifndef ISP2401 -/* function ia_css_flash_sp_init_internal_params: 35B4 */ -#else -/* function sp_event_assert: 8BD */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 32ED */ -#else -/* function ia_css_flash_sp_init_internal_params: 36D7 */ -#endif - -#ifndef ISP2401 -/* function __modu: 687A */ -#else -/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 3410 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_isp_vector: 3AAD */ -#else -/* function __modu: 6A78 */ -#endif - -#ifndef ISP2401 -/* function input_system_channel_transfer: 122A */ -#else -/* function ia_css_dmaproxy_sp_init_isp_vector: 3C0D */ - -/* function input_system_channel_transfer: 11A5 */ -#endif - -/* function isp_vamem_store: 0 */ - -#ifdef ISP2401 -/* function ia_css_tagger_sp_set_copy_pipe: 32F2 */ - -#endif -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GDC_BASE -#define HIVE_MEM_GDC_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_GDC_BASE 0x44 -#define HIVE_SIZE_GDC_BASE 8 -#else -#endif -#endif -#define HIVE_MEM_sp_GDC_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_GDC_BASE 0x44 -#define HIVE_SIZE_sp_GDC_BASE 8 - -#ifndef ISP2401 -/* function ia_css_queue_local_init: 5528 */ -#else -/* function ia_css_queue_local_init: 5707 */ -#endif - -#ifndef ISP2401 -/* function sp_event_proxy_callout_func: 6947 */ -#else -/* function sp_event_proxy_callout_func: 6B45 */ -#endif - -#ifndef ISP2401 -/* function qos_scheduler_schedule_stage: 6580 */ -#else -/* function qos_scheduler_schedule_stage: 6759 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_thread_sp_num_ready_threads -#define HIVE_MEM_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x5A78 -#else -#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x5B28 -#endif -#define HIVE_SIZE_ia_css_thread_sp_num_ready_threads 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x5A78 -#else -#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x5B28 -#endif -#define HIVE_SIZE_sp_ia_css_thread_sp_num_ready_threads 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_stack_size -#define HIVE_MEM_sp_threads_stack_size scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_stack_size 0x17C -#define HIVE_SIZE_sp_threads_stack_size 24 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_stack_size scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_stack_size 0x17C -#define HIVE_SIZE_sp_sp_threads_stack_size 24 - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_isp_done_row_striping: 4849 */ -#else -/* function ia_css_ispctrl_sp_isp_done_row_striping: 4A0D */ -#endif - -#ifndef ISP2401 -/* function __ia_css_virtual_isys_sp_isr_text: 5E45 */ -#else -/* function __ia_css_virtual_isys_sp_isr_text: 5F4E */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_dequeue: 53A6 */ -#else -/* function ia_css_queue_dequeue: 5585 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_configure_channel: 6DEE */ -#else -/* function is_qos_standalone_mode: 6734 */ - -/* function ia_css_dmaproxy_sp_configure_channel: 703C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_current_thread_fiber_sp -#define HIVE_MEM_current_thread_fiber_sp scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_current_thread_fiber_sp 0x5A80 -#else -#define HIVE_ADDR_current_thread_fiber_sp 0x5B2C -#endif -#define HIVE_SIZE_current_thread_fiber_sp 4 -#else -#endif -#endif -#define HIVE_MEM_sp_current_thread_fiber_sp scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_current_thread_fiber_sp 0x5A80 -#else -#define HIVE_ADDR_sp_current_thread_fiber_sp 0x5B2C -#endif -#define HIVE_SIZE_sp_current_thread_fiber_sp 4 - -#ifndef ISP2401 -/* function ia_css_circbuf_pop: 1674 */ -#else -/* function ia_css_circbuf_pop: 15EA */ -#endif - -#ifndef ISP2401 -/* function memset: 68F9 */ -#else -/* function memset: 6AF7 */ -#endif - -/* function irq_raise_set_token: B6 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GPIO_BASE -#define HIVE_MEM_GPIO_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_GPIO_BASE 0x3C -#define HIVE_SIZE_GPIO_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_GPIO_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_GPIO_BASE 0x3C -#define HIVE_SIZE_sp_GPIO_BASE 4 - -#ifndef ISP2401 -/* function pixelgen_prbs_stop: E6F */ -#else -/* function pixelgen_prbs_stop: E69 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_acc_stage_enable: 1FC0 */ -#else -/* function ia_css_pipeline_acc_stage_enable: 1F69 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_unlock_exp_id: 296A */ -#else -/* function ia_css_tagger_sp_unlock_exp_id: 293B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_ph -#define HIVE_MEM_isp_ph scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_ph 0x7360 -#else -#define HIVE_ADDR_isp_ph 0x740C -#endif -#define HIVE_SIZE_isp_ph 28 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_ph scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_ph 0x7360 -#else -#define HIVE_ADDR_sp_isp_ph 0x740C -#endif -#define HIVE_SIZE_sp_isp_ph 28 - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_ds: 40D6 */ -#else -/* function ia_css_ispctrl_sp_init_ds: 4286 */ -#endif - -#ifndef ISP2401 -/* function get_xmem_base_addr_raw: 4479 */ -#else -/* function get_xmem_base_addr_raw: 4635 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_param -#define HIVE_MEM_sp_all_cbs_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_param 0x586C -#else -#define HIVE_ADDR_sp_all_cbs_param 0x5908 -#endif -#define HIVE_SIZE_sp_all_cbs_param 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_param 0x586C -#else -#define HIVE_ADDR_sp_sp_all_cbs_param 0x5908 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_param 16 - -#ifndef ISP2401 -/* function pixelgen_tpg_config: F30 */ -#else -/* function pixelgen_tpg_config: F2A */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_create: 16C2 */ -#else -/* function ia_css_circbuf_create: 1638 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp_group -#define HIVE_MEM_sem_for_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp_group 0x587C -#else -#define HIVE_ADDR_sem_for_sp_group 0x5918 -#endif -#define HIVE_SIZE_sem_for_sp_group 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp_group 0x587C -#else -#define HIVE_ADDR_sp_sem_for_sp_group 0x5918 -#endif -#define HIVE_SIZE_sp_sem_for_sp_group 20 - -#ifndef ISP2401 -/* function csi_rx_frontend_run: C22 */ -#else -/* function csi_rx_frontend_run: C1C */ - -/* function __ia_css_dmaproxy_sp_configure_channel_text: 3D7C */ -#endif - -#ifndef ISP2401 -/* function ia_css_framebuf_sp_wait_for_in_frame: 64B7 */ -#else -/* function ia_css_framebuf_sp_wait_for_in_frame: 667D */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_stream_open: 60E3 */ -#else -/* function ia_css_isys_stream_open: 62A9 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_tag_frame: 5C71 */ -#else -/* function ia_css_sp_rawcopy_tag_frame: 5E35 */ -#endif - -#ifndef ISP2401 -/* function input_system_channel_configure: 125D */ -#else -/* function input_system_channel_configure: 11D8 */ -#endif - -#ifndef ISP2401 -/* function isp_hmem_clear: B33 */ -#else -/* function isp_hmem_clear: B2D */ -#endif - -#ifndef ISP2401 -/* function ia_css_framebuf_sp_release_in_frame: 64FA */ -#else -/* function ia_css_framebuf_sp_release_in_frame: 66C0 */ -#endif - -#ifndef ISP2401 -/* function stream2mmio_config: E1B */ -#else -/* function stream2mmio_config: E15 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_start_binary: 3F55 */ -#else -/* function ia_css_ispctrl_sp_start_binary: 40D8 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs -#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x698C -#else -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x6A38 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x698C -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x6A38 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20 - -#ifndef ISP2401 -/* function ia_css_eventq_sp_recv: 3DEB */ -#else -/* function ia_css_eventq_sp_recv: 3F60 */ -#endif - -#ifndef ISP2401 -/* function csi_rx_frontend_config: C7A */ -#else -/* function csi_rx_frontend_config: C74 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_pool -#define HIVE_MEM_isp_pool scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_pool 0x370 -#else -#define HIVE_ADDR_isp_pool 0x388 -#endif -#define HIVE_SIZE_isp_pool 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_pool scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_pool 0x370 -#else -#define HIVE_ADDR_sp_isp_pool 0x388 -#endif -#define HIVE_SIZE_sp_isp_pool 4 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_rel_gen: 61E9 */ -#else -/* function ia_css_rmgr_sp_rel_gen: 63AF */ - -/* function ia_css_tagger_sp_unblock_clients: 31C3 */ -#endif - -#ifndef ISP2401 -/* function css_get_frame_processing_time_end: 28E9 */ -#else -/* function css_get_frame_processing_time_end: 28BA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_any_pending_mask -#define HIVE_MEM_event_any_pending_mask scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_event_any_pending_mask 0x388 -#else -#define HIVE_ADDR_event_any_pending_mask 0x3A0 -#endif -#define HIVE_SIZE_event_any_pending_mask 8 -#else -#endif -#endif -#define HIVE_MEM_sp_event_any_pending_mask scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_event_any_pending_mask 0x388 -#else -#define HIVE_ADDR_sp_event_any_pending_mask 0x3A0 -#endif -#define HIVE_SIZE_sp_event_any_pending_mask 8 - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_get_pipe_io_status: 1AB8 */ -#else -/* function ia_css_pipeline_sp_get_pipe_io_status: 1A5A */ -#endif - -/* function sh_css_decode_tag_descr: 352 */ - -/* function debug_enqueue_isp: 27B */ - -#ifndef ISP2401 -/* function qos_scheduler_update_stage_budget: 656E */ -#else -/* function qos_scheduler_update_stage_budget: 673C */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_uninit: 5DFB */ -#else -/* function ia_css_spctrl_sp_uninit: 5EDD */ -#endif - -#ifndef ISP2401 -/* function csi_rx_backend_run: C68 */ -#else -/* function csi_rx_backend_run: C62 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x69A0 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x6A4C -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_dis_bufs 140 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x69A0 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x6A4C -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_dis_bufs 140 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_lock_from_start: 341B */ -#else -/* function ia_css_tagger_buf_sp_lock_from_start: 353E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_isp_idle -#define HIVE_MEM_sem_for_isp_idle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_isp_idle 0x5890 -#else -#define HIVE_ADDR_sem_for_isp_idle 0x592C -#endif -#define HIVE_SIZE_sem_for_isp_idle 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_isp_idle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_isp_idle 0x5890 -#else -#define HIVE_ADDR_sp_sem_for_isp_idle 0x592C -#endif -#define HIVE_SIZE_sp_sem_for_isp_idle 20 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_write_byte_addr: 3B0A */ -#else -/* function ia_css_dmaproxy_sp_write_byte_addr: 3C6A */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init: 3A81 */ -#else -/* function ia_css_dmaproxy_sp_init: 3BE1 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 3686 */ -#else -/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 37A9 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_VAMEM_BASE -#define HIVE_MEM_ISP_VAMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_VAMEM_BASE 0x14 -#define HIVE_SIZE_ISP_VAMEM_BASE 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_VAMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_VAMEM_BASE 0x14 -#define HIVE_SIZE_sp_ISP_VAMEM_BASE 12 - -#ifndef ISP2401 -/* function input_system_channel_sync: 11A4 */ -#else -/* function input_system_channel_sync: 6C10 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_rawcopy_sp_tagger -#define HIVE_MEM_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x732C -#else -#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x73D8 -#endif -#define HIVE_SIZE_ia_css_rawcopy_sp_tagger 24 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x732C -#else -#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x73D8 -#endif -#define HIVE_SIZE_sp_ia_css_rawcopy_sp_tagger 24 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x6A2C -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x6AD8 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_exp_ids 70 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x6A2C -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x6AD8 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_exp_ids 70 - -#ifndef ISP2401 -/* function ia_css_queue_item_load: 561A */ -#else -/* function ia_css_queue_item_load: 57F9 */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_get_state: 5DE6 */ -#else -/* function ia_css_spctrl_sp_get_state: 5EC8 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_callout_sp_thread -#define HIVE_MEM_callout_sp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_callout_sp_thread 0x5A74 -#else -#define HIVE_ADDR_callout_sp_thread 0x278 -#endif -#define HIVE_SIZE_callout_sp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_callout_sp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_callout_sp_thread 0x5A74 -#else -#define HIVE_ADDR_sp_callout_sp_thread 0x278 -#endif -#define HIVE_SIZE_sp_callout_sp_thread 4 - -#ifndef ISP2401 -/* function thread_fiber_sp_init: 14CB */ -#else -/* function thread_fiber_sp_init: 1441 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_SP_PMEM_BASE -#define HIVE_MEM_SP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_SP_PMEM_BASE 0x0 -#define HIVE_SIZE_SP_PMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_SP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_SP_PMEM_BASE 0x0 -#define HIVE_SIZE_sp_SP_PMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_input_stream_format -#define HIVE_MEM_sp_isp_input_stream_format scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_input_stream_format 0x3E2C -#else -#define HIVE_ADDR_sp_isp_input_stream_format 0x3E50 -#endif -#define HIVE_SIZE_sp_isp_input_stream_format 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_input_stream_format scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x3E2C -#else -#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x3E50 -#endif -#define HIVE_SIZE_sp_sp_isp_input_stream_format 20 - -#ifndef ISP2401 -/* function __mod: 6866 */ -#else -/* function __mod: 6A64 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_dmem_channel: 3B6B */ -#else -/* function ia_css_dmaproxy_sp_init_dmem_channel: 3CCB */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_join: 139B */ -#else -/* function ia_css_thread_sp_join: 1311 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_add_command: 6EF1 */ -#else -/* function ia_css_dmaproxy_sp_add_command: 712E */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_metadata_thread_func: 5DDF */ -#else -/* function ia_css_sp_metadata_thread_func: 5EC1 */ -#endif - -#ifndef ISP2401 -/* function __sp_event_proxy_func_critical: 6934 */ -#else -/* function __sp_event_proxy_func_critical: 6B32 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_wait_for_isys_stream_N: 5F53 */ -#else -/* function ia_css_pipeline_sp_wait_for_isys_stream_N: 6074 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_metadata_wait: 5DD8 */ -#else -/* function ia_css_sp_metadata_wait: 5EBA */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_peek_from_start: 15A4 */ -#else -/* function ia_css_circbuf_peek_from_start: 151A */ -#endif - -#ifndef ISP2401 -/* function ia_css_event_sp_encode: 3E76 */ -#else -/* function ia_css_event_sp_encode: 3FEB */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_run: 140E */ -#else -/* function ia_css_thread_sp_run: 1384 */ -#endif - -#ifndef ISP2401 -/* function sp_isys_copy_func: 618 */ -#else -/* function sp_isys_copy_func: 5AC */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_init_isp_memories: 50A3 */ -#else -/* function ia_css_sp_isp_param_init_isp_memories: 52AC */ -#endif - -#ifndef ISP2401 -/* function register_isr: 921 */ -#else -/* function register_isr: 8B5 */ -#endif - -/* function irq_raise: C8 */ - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_mmu_invalidate: 3A48 */ -#else -/* function ia_css_dmaproxy_sp_mmu_invalidate: 3B71 */ -#endif - -#ifndef ISP2401 -/* function csi_rx_backend_disable: C34 */ -#else -/* function csi_rx_backend_disable: C2E */ -#endif - -#ifndef ISP2401 -/* function pipeline_sp_initialize_stage: 2104 */ -#else -/* function pipeline_sp_initialize_stage: 20BF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_N_CSI_RX_FE_CTRL_DLANES -#define HIVE_MEM_N_CSI_RX_FE_CTRL_DLANES scalar_processor_2400_dmem -#define HIVE_ADDR_N_CSI_RX_FE_CTRL_DLANES 0x1C4 -#define HIVE_SIZE_N_CSI_RX_FE_CTRL_DLANES 12 -#else -#endif -#endif -#define HIVE_MEM_sp_N_CSI_RX_FE_CTRL_DLANES scalar_processor_2400_dmem -#define HIVE_ADDR_sp_N_CSI_RX_FE_CTRL_DLANES 0x1C4 -#define HIVE_SIZE_sp_N_CSI_RX_FE_CTRL_DLANES 12 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6DC0 */ -#else -/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 700E */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_done_ds: 40BD */ -#else -/* function ia_css_ispctrl_sp_done_ds: 426D */ -#endif - -#ifndef ISP2401 -/* function csi_rx_backend_config: C8B */ -#else -/* function csi_rx_backend_config: C85 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_get_mem_inits: 507E */ -#else -/* function ia_css_sp_isp_param_get_mem_inits: 5287 */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_init_buffer_queues: 1A85 */ -#else -/* function ia_css_parambuf_sp_init_buffer_queues: 1A27 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_pfp_spref -#define HIVE_MEM_vbuf_pfp_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_pfp_spref 0x378 -#else -#define HIVE_ADDR_vbuf_pfp_spref 0x390 -#endif -#define HIVE_SIZE_vbuf_pfp_spref 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_pfp_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_pfp_spref 0x378 -#else -#define HIVE_ADDR_sp_vbuf_pfp_spref 0x390 -#endif -#define HIVE_SIZE_sp_vbuf_pfp_spref 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_HMEM_BASE -#define HIVE_MEM_ISP_HMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_HMEM_BASE 0x20 -#define HIVE_SIZE_ISP_HMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_HMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_HMEM_BASE 0x20 -#define HIVE_SIZE_sp_ISP_HMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_frames -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x6A74 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x6B20 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_frames 280 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x6A74 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x6B20 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_frames 280 - -#ifndef ISP2401 -/* function qos_scheduler_init_stage_budget: 65A7 */ -#else -/* function qos_scheduler_init_stage_budget: 679A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_buffer_queue_handle -#define HIVE_MEM_sp2host_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_buffer_queue_handle 0x6B8C -#else -#define HIVE_ADDR_sp2host_buffer_queue_handle 0x6C38 -#endif -#define HIVE_SIZE_sp2host_buffer_queue_handle 96 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x6B8C -#else -#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x6C38 -#endif -#define HIVE_SIZE_sp_sp2host_buffer_queue_handle 96 - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_isp_vars: 4D9D */ -#else -/* function ia_css_ispctrl_sp_init_isp_vars: 4F79 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_stream_start: 6010 */ -#else -/* function ia_css_isys_stream_start: 6187 */ -#endif - -#ifndef ISP2401 -/* function sp_warning: 954 */ -#else -/* function sp_warning: 8E8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_vbuf_enqueue: 62DC */ -#else -/* function ia_css_rmgr_sp_vbuf_enqueue: 64A2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_tag_exp_id: 2A84 */ -#else -/* function ia_css_tagger_sp_tag_exp_id: 2A55 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_sfi_release_current_frame: 276B */ -#else -/* function ia_css_pipeline_sp_sfi_release_current_frame: 273C */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_write: 3B21 */ -#else -/* function ia_css_dmaproxy_sp_write: 3C81 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_stream_start_async: 608A */ -#else -/* function ia_css_isys_stream_start_async: 6250 */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_release_in_param: 1905 */ -#else -/* function ia_css_parambuf_sp_release_in_param: 187B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_irq_sw_interrupt_token -#define HIVE_MEM_irq_sw_interrupt_token scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_irq_sw_interrupt_token 0x3E28 -#else -#define HIVE_ADDR_irq_sw_interrupt_token 0x3E4C -#endif -#define HIVE_SIZE_irq_sw_interrupt_token 4 -#else -#endif -#endif -#define HIVE_MEM_sp_irq_sw_interrupt_token scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x3E28 -#else -#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x3E4C -#endif -#define HIVE_SIZE_sp_irq_sw_interrupt_token 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_addresses -#define HIVE_MEM_sp_isp_addresses scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_addresses 0x6FDC -#else -#define HIVE_ADDR_sp_isp_addresses 0x708C -#endif -#define HIVE_SIZE_sp_isp_addresses 172 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_addresses scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_addresses 0x6FDC -#else -#define HIVE_ADDR_sp_sp_isp_addresses 0x708C -#endif -#define HIVE_SIZE_sp_sp_isp_addresses 172 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_acq_gen: 6201 */ -#else -/* function ia_css_rmgr_sp_acq_gen: 63C7 */ -#endif - -#ifndef ISP2401 -/* function input_system_input_port_open: 10ED */ -#else -/* function input_system_input_port_open: 10E7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isps -#define HIVE_MEM_isps scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isps 0x737C -#else -#define HIVE_ADDR_isps 0x7428 -#endif -#define HIVE_SIZE_isps 28 -#else -#endif -#endif -#define HIVE_MEM_sp_isps scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isps 0x737C -#else -#define HIVE_ADDR_sp_isps 0x7428 -#endif -#define HIVE_SIZE_sp_isps 28 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host_sp_queues_initialized -#define HIVE_MEM_host_sp_queues_initialized scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host_sp_queues_initialized 0x3E40 -#else -#define HIVE_ADDR_host_sp_queues_initialized 0x3E64 -#endif -#define HIVE_SIZE_host_sp_queues_initialized 4 -#else -#endif -#endif -#define HIVE_MEM_sp_host_sp_queues_initialized scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host_sp_queues_initialized 0x3E40 -#else -#define HIVE_ADDR_sp_host_sp_queues_initialized 0x3E64 -#endif -#define HIVE_SIZE_sp_host_sp_queues_initialized 4 - -#ifndef ISP2401 -/* function ia_css_queue_uninit: 54E6 */ -#else -/* function ia_css_queue_uninit: 56C5 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_ispctrl_sp_isp_started -#define HIVE_MEM_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x6C94 -#else -#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x6D40 -#endif -#define HIVE_SIZE_ia_css_ispctrl_sp_isp_started 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x6C94 -#else -#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x6D40 -#endif -#define HIVE_SIZE_sp_ia_css_ispctrl_sp_isp_started 4 - -#ifndef ISP2401 -/* function ia_css_bufq_sp_release_dynamic_buf: 36F2 */ -#else -/* function ia_css_bufq_sp_release_dynamic_buf: 3815 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_height_exception: 3C19 */ -#else -/* function ia_css_dmaproxy_sp_set_height_exception: 3D8E */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_vmem_channel: 3B9E */ -#else -/* function ia_css_dmaproxy_sp_init_vmem_channel: 3CFF */ -#endif - -#ifndef ISP2401 -/* function csi_rx_backend_stop: C57 */ -#else -/* function csi_rx_backend_stop: C51 */ -#endif - -#ifndef ISP2401 -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_num_ready_threads -#define HIVE_MEM_num_ready_threads scalar_processor_2400_dmem -#define HIVE_ADDR_num_ready_threads 0x5A7C -#define HIVE_SIZE_num_ready_threads 4 -#else -#endif -#endif -#define HIVE_MEM_sp_num_ready_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_num_ready_threads 0x5A7C -#define HIVE_SIZE_sp_num_ready_threads 4 - -/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 3AF3 */ -#else -/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 3C53 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_spref -#define HIVE_MEM_vbuf_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_spref 0x374 -#else -#define HIVE_ADDR_vbuf_spref 0x38C -#endif -#define HIVE_SIZE_vbuf_spref 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_spref 0x374 -#else -#define HIVE_ADDR_sp_vbuf_spref 0x38C -#endif -#define HIVE_SIZE_sp_vbuf_spref 4 - -#ifndef ISP2401 -/* function ia_css_queue_enqueue: 5430 */ -#else -/* function ia_css_queue_enqueue: 560F */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_request -#define HIVE_MEM_ia_css_flash_sp_request scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_request 0x5B30 -#else -#define HIVE_ADDR_ia_css_flash_sp_request 0x5BDC -#endif -#define HIVE_SIZE_ia_css_flash_sp_request 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_request scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x5B30 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x5BDC -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_request 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_vmem_write: 3AC4 */ -#else -/* function ia_css_dmaproxy_sp_vmem_write: 3C24 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_tagger_frames -#define HIVE_MEM_tagger_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_tagger_frames 0x5A84 -#else -#define HIVE_ADDR_tagger_frames 0x5B30 -#endif -#define HIVE_SIZE_tagger_frames 168 -#else -#endif -#endif -#define HIVE_MEM_sp_tagger_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_tagger_frames 0x5A84 -#else -#define HIVE_ADDR_sp_tagger_frames 0x5B30 -#endif -#define HIVE_SIZE_sp_tagger_frames 168 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_if -#define HIVE_MEM_sem_for_reading_if scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_if 0x58A4 -#else -#define HIVE_ADDR_sem_for_reading_if 0x5940 -#endif -#define HIVE_SIZE_sem_for_reading_if 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_if scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_if 0x58A4 -#else -#define HIVE_ADDR_sp_sem_for_reading_if 0x5940 -#endif -#define HIVE_SIZE_sp_sem_for_reading_if 20 - -#ifndef ISP2401 -/* function sp_generate_interrupts: 9D3 */ -#else -/* function sp_generate_interrupts: 967 */ - -/* function ia_css_pipeline_sp_start: 1FC2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_start: 2007 */ -#else -/* function ia_css_thread_default_callout: 6C8F */ -#endif - -#ifndef ISP2401 -/* function csi_rx_backend_enable: C45 */ -#else -/* function csi_rx_backend_enable: C3F */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_init: 5953 */ -#else -/* function ia_css_sp_rawcopy_init: 5B32 */ -#endif - -#ifndef ISP2401 -/* function input_system_input_port_configure: 113F */ -#else -/* function input_system_input_port_configure: 1139 */ -#endif - -#ifndef ISP2401 -/* function tmr_clock_read: 16EF */ -#else -/* function tmr_clock_read: 1665 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_BAMEM_BASE -#define HIVE_MEM_ISP_BAMEM_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ISP_BAMEM_BASE 0x380 -#else -#define HIVE_ADDR_ISP_BAMEM_BASE 0x398 -#endif -#define HIVE_SIZE_ISP_BAMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_BAMEM_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x380 -#else -#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x398 -#endif -#define HIVE_SIZE_sp_ISP_BAMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues -#define HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6BEC -#else -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6C98 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6BEC -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6C98 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160 - -#ifndef ISP2401 -/* function isys2401_dma_config_legacy: DE0 */ -#else -/* function isys2401_dma_config_legacy: DDA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ibuf_ctrl_master_ports -#define HIVE_MEM_ibuf_ctrl_master_ports scalar_processor_2400_dmem -#define HIVE_ADDR_ibuf_ctrl_master_ports 0x208 -#define HIVE_SIZE_ibuf_ctrl_master_ports 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ibuf_ctrl_master_ports scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ibuf_ctrl_master_ports 0x208 -#define HIVE_SIZE_sp_ibuf_ctrl_master_ports 12 - -#ifndef ISP2401 -/* function css_get_frame_processing_time_start: 28F1 */ -#else -/* function css_get_frame_processing_time_start: 28C2 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_frame -#define HIVE_MEM_sp_all_cbs_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_frame 0x58B8 -#else -#define HIVE_ADDR_sp_all_cbs_frame 0x5954 -#endif -#define HIVE_SIZE_sp_all_cbs_frame 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_frame 0x58B8 -#else -#define HIVE_ADDR_sp_sp_all_cbs_frame 0x5954 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_frame 16 - -#ifndef ISP2401 -/* function ia_css_virtual_isys_sp_isr: 6F07 */ -#else -/* function ia_css_virtual_isys_sp_isr: 716E */ -#endif - -#ifndef ISP2401 -/* function thread_sp_queue_print: 142B */ -#else -/* function thread_sp_queue_print: 13A1 */ -#endif - -#ifndef ISP2401 -/* function sp_notify_eof: 97F */ -#else -/* function sp_notify_eof: 913 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_str2mem -#define HIVE_MEM_sem_for_str2mem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_str2mem 0x58C8 -#else -#define HIVE_ADDR_sem_for_str2mem 0x5964 -#endif -#define HIVE_SIZE_sem_for_str2mem 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_str2mem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_str2mem 0x58C8 -#else -#define HIVE_ADDR_sp_sem_for_str2mem 0x5964 -#endif -#define HIVE_SIZE_sp_sem_for_str2mem 20 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_is_marked_from_start: 3483 */ -#else -/* function ia_css_tagger_buf_sp_is_marked_from_start: 35A6 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_acquire_dynamic_buf: 38AA */ -#else -/* function ia_css_bufq_sp_acquire_dynamic_buf: 39CD */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_sfi_mode_is_enabled: 28BF */ -#else -/* function ia_css_pipeline_sp_sfi_mode_is_enabled: 2890 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_destroy: 16B9 */ -#else -/* function ia_css_circbuf_destroy: 162F */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_PMEM_BASE -#define HIVE_MEM_ISP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_PMEM_BASE 0xC -#define HIVE_SIZE_ISP_PMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_PMEM_BASE 0xC -#define HIVE_SIZE_sp_ISP_PMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_mem_load: 5011 */ -#else -/* function ia_css_sp_isp_param_mem_load: 521A */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_from_start: 326F */ -#else -/* function ia_css_tagger_buf_sp_pop_from_start: 3392 */ -#endif - -#ifndef ISP2401 -/* function __div: 681E */ -#else -/* function __div: 6A1C */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_release_vbuf: 62FB */ -#else -/* function ia_css_rmgr_sp_refcount_release_vbuf: 64C1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_in_use -#define HIVE_MEM_ia_css_flash_sp_in_use scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_in_use 0x5B34 -#else -#define HIVE_ADDR_ia_css_flash_sp_in_use 0x5BE0 -#endif -#define HIVE_SIZE_ia_css_flash_sp_in_use 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_in_use scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x5B34 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x5BE0 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_in_use 4 - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_wait: 6AE4 */ -#else -/* function ia_css_thread_sem_sp_wait: 6D63 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_sleep_mode -#define HIVE_MEM_sp_sleep_mode scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sleep_mode 0x3E44 -#else -#define HIVE_ADDR_sp_sleep_mode 0x3E68 -#endif -#define HIVE_SIZE_sp_sleep_mode 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_sleep_mode scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_sleep_mode 0x3E44 -#else -#define HIVE_ADDR_sp_sp_sleep_mode 0x3E68 -#endif -#define HIVE_SIZE_sp_sp_sleep_mode 4 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_push: 337E */ -#else -/* function ia_css_tagger_buf_sp_push: 34A1 */ -#endif - -/* function mmu_invalidate_cache: D3 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_max_cb_elems -#define HIVE_MEM_sp_max_cb_elems scalar_processor_2400_dmem -#define HIVE_ADDR_sp_max_cb_elems 0x148 -#define HIVE_SIZE_sp_max_cb_elems 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_max_cb_elems scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_max_cb_elems 0x148 -#define HIVE_SIZE_sp_sp_max_cb_elems 8 - -#ifndef ISP2401 -/* function ia_css_queue_remote_init: 5508 */ -#else -/* function ia_css_queue_remote_init: 56E7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_stop_req -#define HIVE_MEM_isp_stop_req scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_stop_req 0x575C -#else -#define HIVE_ADDR_isp_stop_req 0x57F8 -#endif -#define HIVE_SIZE_isp_stop_req 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_stop_req scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_stop_req 0x575C -#else -#define HIVE_ADDR_sp_isp_stop_req 0x57F8 -#endif -#define HIVE_SIZE_sp_isp_stop_req 4 - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_sfi_request_next_frame: 2781 */ -#else -/* function ia_css_pipeline_sp_sfi_request_next_frame: 2752 */ -#endif - -#ifndef ISP2401 -#define HIVE_ICACHE_sp_critical_SEGMENT_START 0 -#define HIVE_ICACHE_sp_critical_NUM_SEGMENTS 1 -#endif - -#endif /* _sp_map_h_ */ -#ifndef ISP2401 -extern void sh_css_dump_sp_dmem(void); -void sh_css_dump_sp_dmem(void) -{ -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h deleted file mode 100644 index 7907f0ff6d6c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h +++ /dev/null @@ -1,458 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SYSTEM_GLOBAL_H_INCLUDED__ -#define __SYSTEM_GLOBAL_H_INCLUDED__ - -#include -#include - -/* - * The longest allowed (uninteruptible) bus transfer, does not - * take stalling into account - */ -#define HIVE_ISP_MAX_BURST_LENGTH 1024 - -/* - * Maximum allowed burst length in words for the ISP DMA - * This value is set to 2 to prevent the ISP DMA from blocking - * the bus for too long; as the input system can only buffer - * 2 lines on Moorefield and Cherrytrail, the input system buffers - * may overflow if blocked for too long (BZ 2726). - */ -#define ISP_DMA_MAX_BURST_LENGTH 2 - -/* - * Create a list of HAS and IS properties that defines the system - * - * The configuration assumes the following - * - The system is hetereogeneous; Multiple cells and devices classes - * - The cell and device instances are homogeneous, each device type - * belongs to the same class - * - Device instances supporting a subset of the class capabilities are - * allowed - * - * We could manage different device classes through the enumerated - * lists (C) or the use of classes (C++), but that is presently not - * fully supported - * - * N.B. the 3 input formatters are of 2 different classess - */ - -#define USE_INPUT_SYSTEM_VERSION_2401 - -#define IS_ISP_2400_SYSTEM -/* - * Since this file is visible everywhere and the system definition - * macros are not, detect the separate definitions for {host, SP, ISP} - * - * The 2401 system has the nice property that it uses a vanilla 2400 SP - * so the SP will believe it is a 2400 system rather than 2401... - */ -/* #if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401) */ -#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) -#define IS_ISP_2401_MAMOIADA_SYSTEM -#define HAS_ISP_2401_MAMOIADA -#define HAS_SP_2400 -/* #elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400)*/ -#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) -#define IS_ISP_2400_MAMOIADA_SYSTEM -#define HAS_ISP_2400_MAMOIADA -#define HAS_SP_2400 -#else -#error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }" -#endif - -#define HAS_MMU_VERSION_2 -#define HAS_DMA_VERSION_2 -#define HAS_GDC_VERSION_2 -#define HAS_VAMEM_VERSION_2 -#define HAS_HMEM_VERSION_1 -#define HAS_BAMEM_VERSION_2 -#define HAS_IRQ_VERSION_2 -#define HAS_IRQ_MAP_VERSION_2 -#define HAS_INPUT_FORMATTER_VERSION_2 -/* 2401: HAS_INPUT_SYSTEM_VERSION_3 */ -/* 2400: HAS_INPUT_SYSTEM_VERSION_2 */ -#define HAS_INPUT_SYSTEM_VERSION_2 -#define HAS_INPUT_SYSTEM_VERSION_2401 -#define HAS_BUFFERED_SENSOR -#define HAS_FIFO_MONITORS_VERSION_2 -/* #define HAS_GP_REGS_VERSION_2 */ -#define HAS_GP_DEVICE_VERSION_2 -#define HAS_GPIO_VERSION_1 -#define HAS_TIMED_CTRL_VERSION_1 -#define HAS_RX_VERSION_2 -#define HAS_NO_INPUT_FORMATTER -/*#define HAS_NO_PACKED_RAW_PIXELS*/ -/*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/ - -#define DMA_DDR_TO_VAMEM_WORKAROUND -#define DMA_DDR_TO_HMEM_WORKAROUND - - -/* - * Semi global. "HRT" is accessible from SP, but - * the HRT types do not fully apply - */ -#define HRT_VADDRESS_WIDTH 32 -/* Surprise, this is a local property*/ -/*#define HRT_ADDRESS_WIDTH 64 */ -#define HRT_DATA_WIDTH 32 - -#define SIZEOF_HRT_REG (HRT_DATA_WIDTH>>3) -#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH/8) - -/* The main bus connecting all devices */ -#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH -#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES - -#define CSI2P_DISABLE_ISYS2401_ONLINE_MODE - -/* per-frame parameter handling support */ -#define SH_CSS_ENABLE_PER_FRAME_PARAMS - -typedef uint32_t hrt_bus_align_t; - -/* - * Enumerate the devices, device access through the API is by ID, - * through the DLI by address. The enumerator terminators are used - * to size the wiring arrays and as an exception value. - */ -typedef enum { - DDR0_ID = 0, - N_DDR_ID -} ddr_ID_t; - -typedef enum { - ISP0_ID = 0, - N_ISP_ID -} isp_ID_t; - -typedef enum { - SP0_ID = 0, - N_SP_ID -} sp_ID_t; - -#if defined(IS_ISP_2401_MAMOIADA_SYSTEM) -typedef enum { - MMU0_ID = 0, - MMU1_ID, - N_MMU_ID -} mmu_ID_t; -#elif defined(IS_ISP_2400_MAMOIADA_SYSTEM) -typedef enum { - MMU0_ID = 0, - MMU1_ID, - N_MMU_ID -} mmu_ID_t; -#else -#error "system_global.h: SYSTEM must be one of {2400, 2401}" -#endif - -typedef enum { - DMA0_ID = 0, - N_DMA_ID -} dma_ID_t; - -typedef enum { - GDC0_ID = 0, - GDC1_ID, - N_GDC_ID -} gdc_ID_t; - -/* this extra define is needed because we want to use it also - in the preprocessor, and that doesn't work with enums. - */ -#define N_GDC_ID_CPP 2 - -typedef enum { - VAMEM0_ID = 0, - VAMEM1_ID, - VAMEM2_ID, - N_VAMEM_ID -} vamem_ID_t; - -typedef enum { - BAMEM0_ID = 0, - N_BAMEM_ID -} bamem_ID_t; - -typedef enum { - HMEM0_ID = 0, - N_HMEM_ID -} hmem_ID_t; - -typedef enum { - ISYS_IRQ0_ID = 0, /* port a */ - ISYS_IRQ1_ID, /* port b */ - ISYS_IRQ2_ID, /* port c */ - N_ISYS_IRQ_ID -} isys_irq_ID_t; - -typedef enum { - IRQ0_ID = 0, /* GP IRQ block */ - IRQ1_ID, /* Input formatter */ - IRQ2_ID, /* input system */ - IRQ3_ID, /* input selector */ - N_IRQ_ID -} irq_ID_t; - -typedef enum { - FIFO_MONITOR0_ID = 0, - N_FIFO_MONITOR_ID -} fifo_monitor_ID_t; - -/* - * Deprecated: Since all gp_reg instances are different - * and put in the address maps of other devices we cannot - * enumerate them as that assumes the instrances are the - * same. - * - * We define a single GP_DEVICE containing all gp_regs - * w.r.t. a single base address - * -typedef enum { - GP_REGS0_ID = 0, - N_GP_REGS_ID -} gp_regs_ID_t; - */ -typedef enum { - GP_DEVICE0_ID = 0, - N_GP_DEVICE_ID -} gp_device_ID_t; - -typedef enum { - GP_TIMER0_ID = 0, - GP_TIMER1_ID, - GP_TIMER2_ID, - GP_TIMER3_ID, - GP_TIMER4_ID, - GP_TIMER5_ID, - GP_TIMER6_ID, - GP_TIMER7_ID, - N_GP_TIMER_ID -} gp_timer_ID_t; - -typedef enum { - GPIO0_ID = 0, - N_GPIO_ID -} gpio_ID_t; - -typedef enum { - TIMED_CTRL0_ID = 0, - N_TIMED_CTRL_ID -} timed_ctrl_ID_t; - -typedef enum { - INPUT_FORMATTER0_ID = 0, - INPUT_FORMATTER1_ID, - INPUT_FORMATTER2_ID, - INPUT_FORMATTER3_ID, - N_INPUT_FORMATTER_ID -} input_formatter_ID_t; - -/* The IF RST is outside the IF */ -#define INPUT_FORMATTER0_SRST_OFFSET 0x0824 -#define INPUT_FORMATTER1_SRST_OFFSET 0x0624 -#define INPUT_FORMATTER2_SRST_OFFSET 0x0424 -#define INPUT_FORMATTER3_SRST_OFFSET 0x0224 - -#define INPUT_FORMATTER0_SRST_MASK 0x0001 -#define INPUT_FORMATTER1_SRST_MASK 0x0002 -#define INPUT_FORMATTER2_SRST_MASK 0x0004 -#define INPUT_FORMATTER3_SRST_MASK 0x0008 - -typedef enum { - INPUT_SYSTEM0_ID = 0, - N_INPUT_SYSTEM_ID -} input_system_ID_t; - -typedef enum { - RX0_ID = 0, - N_RX_ID -} rx_ID_t; - -enum mipi_port_id { - MIPI_PORT0_ID = 0, - MIPI_PORT1_ID, - MIPI_PORT2_ID, - N_MIPI_PORT_ID -}; - -#define N_RX_CHANNEL_ID 4 - -/* Generic port enumeration with an internal port type ID */ -typedef enum { - CSI_PORT0_ID = 0, - CSI_PORT1_ID, - CSI_PORT2_ID, - TPG_PORT0_ID, - PRBS_PORT0_ID, - FIFO_PORT0_ID, - MEMORY_PORT0_ID, - N_INPUT_PORT_ID -} input_port_ID_t; - -typedef enum { - CAPTURE_UNIT0_ID = 0, - CAPTURE_UNIT1_ID, - CAPTURE_UNIT2_ID, - ACQUISITION_UNIT0_ID, - DMA_UNIT0_ID, - CTRL_UNIT0_ID, - GPREGS_UNIT0_ID, - FIFO_UNIT0_ID, - IRQ_UNIT0_ID, - N_SUB_SYSTEM_ID -} sub_system_ID_t; - -#define N_CAPTURE_UNIT_ID 3 -#define N_ACQUISITION_UNIT_ID 1 -#define N_CTRL_UNIT_ID 1 - -/* - * Input-buffer Controller. - */ -typedef enum { - IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */ - IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */ - IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ - N_IBUF_CTRL_ID -} ibuf_ctrl_ID_t; -/* end of Input-buffer Controller */ - -/* - * Stream2MMIO. - */ -typedef enum { - STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */ - STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */ - STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */ - N_STREAM2MMIO_ID -} stream2mmio_ID_t; - -typedef enum { - /* - * Stream2MMIO 0 has 8 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. - * - * Stream2MMIO 1 has 4 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. - * - * Stream2MMIO 2 has 4 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. - */ - STREAM2MMIO_SID0_ID = 0, - STREAM2MMIO_SID1_ID, - STREAM2MMIO_SID2_ID, - STREAM2MMIO_SID3_ID, - STREAM2MMIO_SID4_ID, - STREAM2MMIO_SID5_ID, - STREAM2MMIO_SID6_ID, - STREAM2MMIO_SID7_ID, - N_STREAM2MMIO_SID_ID -} stream2mmio_sid_ID_t; -/* end of Stream2MMIO */ - -/** - * Input System 2401: CSI-MIPI recevier. - */ -typedef enum { - CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */ - CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */ - CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */ - N_CSI_RX_BACKEND_ID -} csi_rx_backend_ID_t; - -typedef enum { - CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */ - CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */ - CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */ -#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID+1) -} csi_rx_frontend_ID_t; - -typedef enum { - CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */ - CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */ - CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */ - CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ - N_CSI_RX_DLANE_ID -} csi_rx_fe_dlane_ID_t; -/* end of CSI-MIPI receiver */ - -typedef enum { - ISYS2401_DMA0_ID = 0, - N_ISYS2401_DMA_ID -} isys2401_dma_ID_t; - -/** - * Pixel-generator. ("system_global.h") - */ -typedef enum { - PIXELGEN0_ID = 0, - PIXELGEN1_ID, - PIXELGEN2_ID, - N_PIXELGEN_ID -} pixelgen_ID_t; -/* end of pixel-generator. ("system_global.h") */ - -typedef enum { - INPUT_SYSTEM_CSI_PORT0_ID = 0, - INPUT_SYSTEM_CSI_PORT1_ID, - INPUT_SYSTEM_CSI_PORT2_ID, - - INPUT_SYSTEM_PIXELGEN_PORT0_ID, - INPUT_SYSTEM_PIXELGEN_PORT1_ID, - INPUT_SYSTEM_PIXELGEN_PORT2_ID, - - N_INPUT_SYSTEM_INPUT_PORT_ID -} input_system_input_port_ID_t; - -#define N_INPUT_SYSTEM_CSI_PORT 3 - -typedef enum { - ISYS2401_DMA_CHANNEL_0 = 0, - ISYS2401_DMA_CHANNEL_1, - ISYS2401_DMA_CHANNEL_2, - ISYS2401_DMA_CHANNEL_3, - ISYS2401_DMA_CHANNEL_4, - ISYS2401_DMA_CHANNEL_5, - ISYS2401_DMA_CHANNEL_6, - ISYS2401_DMA_CHANNEL_7, - ISYS2401_DMA_CHANNEL_8, - ISYS2401_DMA_CHANNEL_9, - ISYS2401_DMA_CHANNEL_10, - ISYS2401_DMA_CHANNEL_11, - N_ISYS2401_DMA_CHANNEL -} isys2401_dma_channel; - -enum ia_css_isp_memories { - IA_CSS_ISP_PMEM0 = 0, - IA_CSS_ISP_DMEM0, - IA_CSS_ISP_VMEM0, - IA_CSS_ISP_VAMEM0, - IA_CSS_ISP_VAMEM1, - IA_CSS_ISP_VAMEM2, - IA_CSS_ISP_HMEM0, - IA_CSS_SP_DMEM0, - IA_CSS_DDR, - N_IA_CSS_MEMORIES -}; -#define IA_CSS_NUM_MEMORIES 9 -/* For driver compatability */ -#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES -#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES - -#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c deleted file mode 100644 index 325b821f276c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_pipeline.h" -#include "ia_css_isp_configs.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_iterator( - const struct ia_css_binary *binary, - const struct ia_css_iterator_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.iterator.size; - offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset; - } - if (size) { - ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_copy_output( - const struct ia_css_binary *binary, - const struct ia_css_copy_output_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size; - offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset; - } - if (size) { - ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_crop( - const struct ia_css_binary *binary, - const struct ia_css_crop_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.crop.size; - offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset; - } - if (size) { - ia_css_crop_config((struct sh_css_isp_crop_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_fpn( - const struct ia_css_binary *binary, - const struct ia_css_fpn_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.fpn.size; - offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset; - } - if (size) { - ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_dvs( - const struct ia_css_binary *binary, - const struct ia_css_dvs_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.dvs.size; - offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset; - } - if (size) { - ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_qplane( - const struct ia_css_binary *binary, - const struct ia_css_qplane_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.qplane.size; - offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset; - } - if (size) { - ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output0( - const struct ia_css_binary *binary, - const struct ia_css_output0_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output0.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset; - } - if (size) { - ia_css_output0_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output1( - const struct ia_css_binary *binary, - const struct ia_css_output1_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output1.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset; - } - if (size) { - ia_css_output1_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output( - const struct ia_css_binary *binary, - const struct ia_css_output_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.output.size; - offset = binary->info->mem_offsets.offsets.config->dmem.output.offset; - } - if (size) { - ia_css_output_config((struct sh_css_isp_output_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ -#ifdef ISP2401 - -void -ia_css_configure_sc( - const struct ia_css_binary *binary, - const struct ia_css_sc_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.sc.size; - offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset; - } - if (size) { - ia_css_sc_config((struct sh_css_isp_sc_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ -#endif - -void -ia_css_configure_raw( - const struct ia_css_binary *binary, - const struct ia_css_raw_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.raw.size; - offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset; - } - if (size) { - ia_css_raw_config((struct sh_css_isp_raw_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_tnr( - const struct ia_css_binary *binary, - const struct ia_css_tnr_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.tnr.size; - offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset; - } - if (size) { - ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_ref( - const struct ia_css_binary *binary, - const struct ia_css_ref_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.ref.size; - offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset; - } - if (size) { - ia_css_ref_config((struct sh_css_isp_ref_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() leave:\n"); -} - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_vf( - const struct ia_css_binary *binary, - const struct ia_css_vf_configuration *config_dmem) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() enter:\n"); - - { - unsigned offset = 0; - unsigned size = 0; - if (binary->info->mem_offsets.offsets.config) { - size = binary->info->mem_offsets.offsets.config->dmem.vf.size; - offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset; - } - if (size) { - ia_css_vf_config((struct sh_css_isp_vf_isp_config *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset], - config_dmem, size); } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() leave:\n"); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h deleted file mode 100644 index 8aacd3dbc05a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifdef IA_CSS_INCLUDE_CONFIGURATIONS -#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h" -#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h" -#include "isp/kernels/output/output_1.0/ia_css_output.host.h" -#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h" -#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h" -#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h" -#ifdef ISP2401 -#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h" -#endif -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h" -#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h" -#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h" -#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */ -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_CONFIG_H -#define _IA_CSS_ISP_CONFIG_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_configuration_ids { - IA_CSS_ITERATOR_CONFIG_ID, - IA_CSS_COPY_OUTPUT_CONFIG_ID, - IA_CSS_CROP_CONFIG_ID, - IA_CSS_FPN_CONFIG_ID, - IA_CSS_DVS_CONFIG_ID, - IA_CSS_QPLANE_CONFIG_ID, - IA_CSS_OUTPUT0_CONFIG_ID, - IA_CSS_OUTPUT1_CONFIG_ID, - IA_CSS_OUTPUT_CONFIG_ID, -#ifdef ISP2401 - IA_CSS_SC_CONFIG_ID, -#endif - IA_CSS_RAW_CONFIG_ID, - IA_CSS_TNR_CONFIG_ID, - IA_CSS_REF_CONFIG_ID, - IA_CSS_VF_CONFIG_ID, - IA_CSS_NUM_CONFIGURATION_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_config_memory_offsets { - struct { - struct ia_css_isp_parameter iterator; - struct ia_css_isp_parameter copy_output; - struct ia_css_isp_parameter crop; - struct ia_css_isp_parameter fpn; - struct ia_css_isp_parameter dvs; - struct ia_css_isp_parameter qplane; - struct ia_css_isp_parameter output0; - struct ia_css_isp_parameter output1; - struct ia_css_isp_parameter output; -#ifdef ISP2401 - struct ia_css_isp_parameter sc; -#endif - struct ia_css_isp_parameter raw; - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter ref; - struct ia_css_isp_parameter vf; - } dmem; -}; - -#if defined(IA_CSS_INCLUDE_CONFIGURATIONS) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_iterator( - const struct ia_css_binary *binary, - const struct ia_css_iterator_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_copy_output( - const struct ia_css_binary *binary, - const struct ia_css_copy_output_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_crop( - const struct ia_css_binary *binary, - const struct ia_css_crop_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_fpn( - const struct ia_css_binary *binary, - const struct ia_css_fpn_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_dvs( - const struct ia_css_binary *binary, - const struct ia_css_dvs_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_qplane( - const struct ia_css_binary *binary, - const struct ia_css_qplane_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output0( - const struct ia_css_binary *binary, - const struct ia_css_output0_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output1( - const struct ia_css_binary *binary, - const struct ia_css_output1_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_output( - const struct ia_css_binary *binary, - const struct ia_css_output_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -#ifdef ISP2401 -void -ia_css_configure_sc( - const struct ia_css_binary *binary, - const struct ia_css_sc_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -#endif -void -ia_css_configure_raw( - const struct ia_css_binary *binary, - const struct ia_css_raw_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_tnr( - const struct ia_css_binary *binary, - const struct ia_css_tnr_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_ref( - const struct ia_css_binary *binary, - const struct ia_css_ref_configuration *config_dmem); - -/* Code generated by genparam/genconfig.c:gen_configure_function() */ - -void -ia_css_configure_vf( - const struct ia_css_binary *binary, - const struct ia_css_vf_configuration *config_dmem); - -#endif /* IA_CSS_INCLUDE_CONFIGURATION */ - -#endif /* _IA_CSS_ISP_CONFIG_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c deleted file mode 100644 index 11e4463ebb50..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c +++ /dev/null @@ -1,3220 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#define IA_CSS_INCLUDE_PARAMETERS -#include "sh_css_params.h" -#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h" -#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h" -#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h" -#include "isp/kernels/bh/bh_2/ia_css_bh.host.h" -#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h" -#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h" -#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h" -#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h" -#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h" -#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h" -#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h" -#include "isp/kernels/de/de_1.0/ia_css_de.host.h" -#include "isp/kernels/de/de_2/ia_css_de2.host.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h" -#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h" -#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h" -#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h" -#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h" -#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h" -#include "isp/kernels/ob/ob2/ia_css_ob2.host.h" -#include "isp/kernels/output/output_1.0/ia_css_output.host.h" -#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h" -#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h" -#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h" -#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h" -#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h" -#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h" -#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h" -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h" -#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h" -#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h" -#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h" -#include "isp/kernels/dpc2/ia_css_dpc2.host.h" -#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h" -#include "isp/kernels/bnlm/ia_css_bnlm.host.h" -#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h" -/* Generated code: do not edit or commmit. */ - -#include "ia_css_pipeline.h" -#include "ia_css_isp_params.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_aa( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.aa.size; - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset; - - if (size) { - struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - t->strength = params->aa_config.strength; - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_anr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.anr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() enter:\n"); - - ia_css_anr_encode((struct sh_css_isp_anr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->anr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_anr2( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() enter:\n"); - - ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->anr_thres, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bh( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bh.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n"); - - ia_css_bh_encode((struct sh_css_isp_bh_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->s3a_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n"); - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_cnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() enter:\n"); - - ia_css_cnr_encode((struct sh_css_isp_cnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->cnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_crop( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.crop.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() enter:\n"); - - ia_css_crop_encode((struct sh_css_isp_crop_isp_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->crop_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_csc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.csc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() enter:\n"); - - ia_css_csc_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_dp( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n"); - - ia_css_dp_encode((struct sh_css_isp_dp_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dp_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() enter:\n"); - - ia_css_bnr_encode((struct sh_css_isp_bnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->nr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_de( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.de.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.de.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n"); - - ia_css_de_encode((struct sh_css_isp_de_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->de_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ecd( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() enter:\n"); - - ia_css_ecd_encode((struct sh_css_isp_ecd_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ecd_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_formats( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.formats.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() enter:\n"); - - ia_css_formats_encode((struct sh_css_isp_formats_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->formats_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_fpn( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() enter:\n"); - - ia_css_fpn_encode((struct sh_css_isp_fpn_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->fpn_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_gc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.gc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n"); - - ia_css_gc_encode((struct sh_css_isp_gc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->gc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n"); - - ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->gc_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ce( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ce.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n"); - - ia_css_ce_encode((struct sh_css_isp_ce_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ce_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_yuv2rgb( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() enter:\n"); - - ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->yuv2rgb_cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_rgb2yuv( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() enter:\n"); - - ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->rgb2yuv_cc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_r_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() enter:\n"); - - ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset], - ¶ms->r_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_g_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() enter:\n"); - - ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->g_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_b_gamma( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() enter:\n"); - - ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset], - ¶ms->b_gamma_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_uds( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.uds.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset; - - if (size) { - struct sh_css_sp_uds_params *p; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() enter:\n"); - - p = (struct sh_css_sp_uds_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - p->crop_pos = params->uds_config.crop_pos; - p->uds = params->uds_config.uds; - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_raa( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.raa.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() enter:\n"); - - ia_css_raa_encode((struct sh_css_isp_aa_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->raa_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_s3a( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() enter:\n"); - - ia_css_s3a_encode((struct sh_css_isp_s3a_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->s3a_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ob( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ob.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n"); - - ia_css_ob_encode((struct sh_css_isp_ob_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ob_config, -¶ms->stream_configs.ob, size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.ob.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n"); - - ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->ob_config, -¶ms->stream_configs.ob, size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_output( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.output.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.output.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() enter:\n"); - - ia_css_output_encode((struct sh_css_isp_output_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->output_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n"); - - ia_css_sc_encode((struct sh_css_isp_sc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->sc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_bds( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bds.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset; - - if (size) { - struct sh_css_isp_bds_params *p; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() enter:\n"); - - p = (struct sh_css_isp_bds_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - p->baf_strength = params->bds_config.strength; - - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_tnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() enter:\n"); - - ia_css_tnr_encode((struct sh_css_isp_tnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->tnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_macc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.macc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() enter:\n"); - - ia_css_macc_encode((struct sh_css_isp_macc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->macc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_horicoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() enter:\n"); - - ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_vertcoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() enter:\n"); - - ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_horiproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() enter:\n"); - - ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis_vertproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() enter:\n"); - - ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_horicoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() enter:\n"); - - ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_vertcoef( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() enter:\n"); - - ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_horiproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() enter:\n"); - - ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_sdis2_vertproj( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() enter:\n"); - - ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->dvs2_coefs, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_wb( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.wb.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n"); - - ia_css_wb_encode((struct sh_css_isp_wb_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->wb_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_nr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.nr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n"); - - ia_css_nr_encode((struct sh_css_isp_ynr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->nr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_yee( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yee.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() enter:\n"); - - ia_css_yee_encode((struct sh_css_isp_yee_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->yee_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ynr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() enter:\n"); - - ia_css_ynr_encode((struct sh_css_isp_yee2_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ynr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_fc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n"); - - ia_css_fc_encode((struct sh_css_isp_fc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->fc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_ctc( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n"); - - ia_css_ctc_encode((struct sh_css_isp_ctc_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->ctc_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n"); - } - - } - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n"); - - ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset], - ¶ms->ctc_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr_table( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() enter:\n"); - - ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset], - ¶ms->xnr_table, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() enter:\n"); - - ia_css_xnr_encode((struct sh_css_isp_xnr_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->xnr_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() leave:\n"); - } - - } -} - -/* Code generated by genparam/gencode.c:gen_process_function() */ - -static void -ia_css_process_xnr3( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n"); - - ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->xnr3_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n"); - } - - } -#ifdef ISP2401 - { - unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset; - - if (size) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n"); - - ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *) - &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset], - ¶ms->xnr3_config, -size); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n"); - } - - } -#endif -} - -/* Code generated by genparam/gencode.c:gen_param_process_table() */ - -void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) = { - ia_css_process_aa, - ia_css_process_anr, - ia_css_process_anr2, - ia_css_process_bh, - ia_css_process_cnr, - ia_css_process_crop, - ia_css_process_csc, - ia_css_process_dp, - ia_css_process_bnr, - ia_css_process_de, - ia_css_process_ecd, - ia_css_process_formats, - ia_css_process_fpn, - ia_css_process_gc, - ia_css_process_ce, - ia_css_process_yuv2rgb, - ia_css_process_rgb2yuv, - ia_css_process_r_gamma, - ia_css_process_g_gamma, - ia_css_process_b_gamma, - ia_css_process_uds, - ia_css_process_raa, - ia_css_process_s3a, - ia_css_process_ob, - ia_css_process_output, - ia_css_process_sc, - ia_css_process_bds, - ia_css_process_tnr, - ia_css_process_macc, - ia_css_process_sdis_horicoef, - ia_css_process_sdis_vertcoef, - ia_css_process_sdis_horiproj, - ia_css_process_sdis_vertproj, - ia_css_process_sdis2_horicoef, - ia_css_process_sdis2_vertcoef, - ia_css_process_sdis2_horiproj, - ia_css_process_sdis2_vertproj, - ia_css_process_wb, - ia_css_process_nr, - ia_css_process_yee, - ia_css_process_ynr, - ia_css_process_fc, - ia_css_process_ctc, - ia_css_process_xnr_table, - ia_css_process_xnr, - ia_css_process_xnr3, -}; - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_dp_config(const struct ia_css_isp_parameters *params, - struct ia_css_dp_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() enter: " - "config=%p\n",config); - - *config = params->dp_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() leave\n"); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_dp_config(struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n"); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dp_config = *config; - params->config_changed[IA_CSS_DP_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_DP_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_dp_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_wb_config(const struct ia_css_isp_parameters *params, - struct ia_css_wb_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() enter: " - "config=%p\n",config); - - *config = params->wb_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() leave\n"); - ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_wb_config(struct ia_css_isp_parameters *params, - const struct ia_css_wb_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n"); - ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->wb_config = *config; - params->config_changed[IA_CSS_WB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_WB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_wb_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_tnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_tnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() enter: " - "config=%p\n",config); - - *config = params->tnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() leave\n"); - ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_tnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_tnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n"); - ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->tnr_config = *config; - params->config_changed[IA_CSS_TNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_TNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_tnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ob_config(const struct ia_css_isp_parameters *params, - struct ia_css_ob_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() enter: " - "config=%p\n",config); - - *config = params->ob_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() leave\n"); - ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ob_config(struct ia_css_isp_parameters *params, - const struct ia_css_ob_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n"); - ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ob_config = *config; - params->config_changed[IA_CSS_OB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_OB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ob_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_de_config(const struct ia_css_isp_parameters *params, - struct ia_css_de_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() enter: " - "config=%p\n",config); - - *config = params->de_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() leave\n"); - ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_de_config(struct ia_css_isp_parameters *params, - const struct ia_css_de_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n"); - ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->de_config = *config; - params->config_changed[IA_CSS_DE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_DE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_de_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_anr_config(const struct ia_css_isp_parameters *params, - struct ia_css_anr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() enter: " - "config=%p\n",config); - - *config = params->anr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() leave\n"); - ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n"); - ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->anr_config = *config; - params->config_changed[IA_CSS_ANR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ANR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_anr2_config(const struct ia_css_isp_parameters *params, - struct ia_css_anr_thres *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() enter: " - "config=%p\n",config); - - *config = params->anr_thres; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() leave\n"); - ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr2_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_thres *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n"); - ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->anr_thres = *config; - params->config_changed[IA_CSS_ANR2_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ANR2_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr2_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ce_config(const struct ia_css_isp_parameters *params, - struct ia_css_ce_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() enter: " - "config=%p\n",config); - - *config = params->ce_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() leave\n"); - ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ce_config(struct ia_css_isp_parameters *params, - const struct ia_css_ce_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n"); - ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ce_config = *config; - params->config_changed[IA_CSS_CE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ce_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ecd_config(const struct ia_css_isp_parameters *params, - struct ia_css_ecd_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() enter: " - "config=%p\n",config); - - *config = params->ecd_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() leave\n"); - ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ecd_config(struct ia_css_isp_parameters *params, - const struct ia_css_ecd_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n"); - ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ecd_config = *config; - params->config_changed[IA_CSS_ECD_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_ECD_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ecd_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ynr_config(const struct ia_css_isp_parameters *params, - struct ia_css_ynr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() enter: " - "config=%p\n",config); - - *config = params->ynr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() leave\n"); - ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ynr_config(struct ia_css_isp_parameters *params, - const struct ia_css_ynr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n"); - ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ynr_config = *config; - params->config_changed[IA_CSS_YNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_YNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ynr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_fc_config(const struct ia_css_isp_parameters *params, - struct ia_css_fc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() enter: " - "config=%p\n",config); - - *config = params->fc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() leave\n"); - ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_fc_config(struct ia_css_isp_parameters *params, - const struct ia_css_fc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n"); - ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->fc_config = *config; - params->config_changed[IA_CSS_FC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_FC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_fc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_cnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_cnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() enter: " - "config=%p\n",config); - - *config = params->cnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() leave\n"); - ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_cnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_cnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n"); - ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->cnr_config = *config; - params->config_changed[IA_CSS_CNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_cnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_macc_config(const struct ia_css_isp_parameters *params, - struct ia_css_macc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() enter: " - "config=%p\n",config); - - *config = params->macc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() leave\n"); - ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_macc_config(struct ia_css_isp_parameters *params, - const struct ia_css_macc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n"); - ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->macc_config = *config; - params->config_changed[IA_CSS_MACC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_MACC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_macc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_ctc_config(const struct ia_css_isp_parameters *params, - struct ia_css_ctc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() enter: " - "config=%p\n",config); - - *config = params->ctc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() leave\n"); - ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ctc_config(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n"); - ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->ctc_config = *config; - params->config_changed[IA_CSS_CTC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CTC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ctc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_aa_config(const struct ia_css_isp_parameters *params, - struct ia_css_aa_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() enter: " - "config=%p\n",config); - - *config = params->aa_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() leave\n"); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_aa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n"); - params->aa_config = *config; - params->config_changed[IA_CSS_AA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_AA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_aa_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() enter: " - "config=%p\n",config); - - *config = params->yuv2rgb_cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() leave\n"); - ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n"); - ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->yuv2rgb_cc_config = *config; - params->config_changed[IA_CSS_YUV2RGB_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_YUV2RGB_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_yuv2rgb_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() enter: " - "config=%p\n",config); - - *config = params->rgb2yuv_cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() leave\n"); - ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n"); - ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->rgb2yuv_cc_config = *config; - params->config_changed[IA_CSS_RGB2YUV_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_RGB2YUV_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_rgb2yuv_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_csc_config(const struct ia_css_isp_parameters *params, - struct ia_css_cc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() enter: " - "config=%p\n",config); - - *config = params->cc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() leave\n"); - ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_csc_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n"); - ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->cc_config = *config; - params->config_changed[IA_CSS_CSC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_CSC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_csc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_nr_config(const struct ia_css_isp_parameters *params, - struct ia_css_nr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() enter: " - "config=%p\n",config); - - *config = params->nr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() leave\n"); - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n"); - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->nr_config = *config; - params->config_changed[IA_CSS_BNR_ID] = true; - params->config_changed[IA_CSS_NR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_NR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_nr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_gc_config(const struct ia_css_isp_parameters *params, - struct ia_css_gc_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() enter: " - "config=%p\n",config); - - *config = params->gc_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() leave\n"); - ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_gc_config(struct ia_css_isp_parameters *params, - const struct ia_css_gc_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n"); - ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->gc_config = *config; - params->config_changed[IA_CSS_GC_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_GC_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_gc_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() leave\n"); - ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horicoef_config() enter:\n"); - ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horicoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() leave\n"); - ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertcoef_config() enter:\n"); - ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertcoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() leave\n"); - ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horiproj_config() enter:\n"); - ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horiproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() leave\n"); - ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertproj_config() enter:\n"); - ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs_coefs = *config; - params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() leave\n"); - ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horicoef_config() enter:\n"); - ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horicoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() leave\n"); - ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertcoef_config() enter:\n"); - ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertcoef_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() leave\n"); - ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horiproj_config() enter:\n"); - ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horiproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params, - struct ia_css_dvs2_coefficients *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() enter: " - "config=%p\n",config); - - *config = params->dvs2_coefs; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() leave\n"); - ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertproj_config() enter:\n"); - ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->dvs2_coefs = *config; - params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true; - params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true; - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertproj_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() enter: " - "config=%p\n",config); - - *config = params->r_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() leave\n"); - ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n"); - ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->r_gamma_table = *config; - params->config_changed[IA_CSS_R_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_R_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_r_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() enter: " - "config=%p\n",config); - - *config = params->g_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() leave\n"); - ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n"); - ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->g_gamma_table = *config; - params->config_changed[IA_CSS_G_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_G_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_g_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params, - struct ia_css_rgb_gamma_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() enter: " - "config=%p\n",config); - - *config = params->b_gamma_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() leave\n"); - ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n"); - ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->b_gamma_table = *config; - params->config_changed[IA_CSS_B_GAMMA_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_B_GAMMA_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_b_gamma_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr_table *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() enter: " - "config=%p\n",config); - - *config = params->xnr_table; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() leave\n"); - ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_table *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_table_config() enter:\n"); - ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr_table = *config; - params->config_changed[IA_CSS_XNR_TABLE_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR_TABLE_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_table_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_formats_config(const struct ia_css_isp_parameters *params, - struct ia_css_formats_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() enter: " - "config=%p\n",config); - - *config = params->formats_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() leave\n"); - ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_formats_config(struct ia_css_isp_parameters *params, - const struct ia_css_formats_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n"); - ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->formats_config = *config; - params->config_changed[IA_CSS_FORMATS_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_FORMATS_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_formats_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() enter: " - "config=%p\n",config); - - *config = params->xnr_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() leave\n"); - ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n"); - ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr_config = *config; - params->config_changed[IA_CSS_XNR_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params, - struct ia_css_xnr3_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() enter: " - "config=%p\n",config); - - *config = params->xnr3_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() leave\n"); - ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr3_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr3_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n"); - ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->xnr3_config = *config; - params->config_changed[IA_CSS_XNR3_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_XNR3_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr3_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_s3a_config(const struct ia_css_isp_parameters *params, - struct ia_css_3a_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() enter: " - "config=%p\n",config); - - *config = params->s3a_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() leave\n"); - ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_s3a_config(struct ia_css_isp_parameters *params, - const struct ia_css_3a_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n"); - ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->s3a_config = *config; - params->config_changed[IA_CSS_BH_ID] = true; - params->config_changed[IA_CSS_S3A_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_S3A_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_s3a_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_get_function() */ - -static void -ia_css_get_output_config(const struct ia_css_isp_parameters *params, - struct ia_css_output_config *config){ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() enter: " - "config=%p\n",config); - - *config = params->output_config; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() leave\n"); - ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE); -} - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_output_config(struct ia_css_isp_parameters *params, - const struct ia_css_output_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n"); - ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE); - params->output_config = *config; - params->config_changed[IA_CSS_OUTPUT_ID] = true; -#ifndef ISP2401 - params->config_changed[IA_CSS_OUTPUT_ID] = true; - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_output_config() leave: " - "return_void\n"); -} - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_get_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -{ - ia_css_get_dp_config(params, config->dp_config); - ia_css_get_wb_config(params, config->wb_config); - ia_css_get_tnr_config(params, config->tnr_config); - ia_css_get_ob_config(params, config->ob_config); - ia_css_get_de_config(params, config->de_config); - ia_css_get_anr_config(params, config->anr_config); - ia_css_get_anr2_config(params, config->anr_thres); - ia_css_get_ce_config(params, config->ce_config); - ia_css_get_ecd_config(params, config->ecd_config); - ia_css_get_ynr_config(params, config->ynr_config); - ia_css_get_fc_config(params, config->fc_config); - ia_css_get_cnr_config(params, config->cnr_config); - ia_css_get_macc_config(params, config->macc_config); - ia_css_get_ctc_config(params, config->ctc_config); - ia_css_get_aa_config(params, config->aa_config); - ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config); - ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config); - ia_css_get_csc_config(params, config->cc_config); - ia_css_get_nr_config(params, config->nr_config); - ia_css_get_gc_config(params, config->gc_config); - ia_css_get_sdis_horicoef_config(params, config->dvs_coefs); - ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs); - ia_css_get_sdis_horiproj_config(params, config->dvs_coefs); - ia_css_get_sdis_vertproj_config(params, config->dvs_coefs); - ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs); - ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs); - ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs); - ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs); - ia_css_get_r_gamma_config(params, config->r_gamma_table); - ia_css_get_g_gamma_config(params, config->g_gamma_table); - ia_css_get_b_gamma_config(params, config->b_gamma_table); - ia_css_get_xnr_table_config(params, config->xnr_table); - ia_css_get_formats_config(params, config->formats_config); - ia_css_get_xnr_config(params, config->xnr_config); - ia_css_get_xnr3_config(params, config->xnr3_config); - ia_css_get_s3a_config(params, config->s3a_config); - ia_css_get_output_config(params, config->output_config); -} - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_set_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -{ - ia_css_set_dp_config(params, config->dp_config); - ia_css_set_wb_config(params, config->wb_config); - ia_css_set_tnr_config(params, config->tnr_config); - ia_css_set_ob_config(params, config->ob_config); - ia_css_set_de_config(params, config->de_config); - ia_css_set_anr_config(params, config->anr_config); - ia_css_set_anr2_config(params, config->anr_thres); - ia_css_set_ce_config(params, config->ce_config); - ia_css_set_ecd_config(params, config->ecd_config); - ia_css_set_ynr_config(params, config->ynr_config); - ia_css_set_fc_config(params, config->fc_config); - ia_css_set_cnr_config(params, config->cnr_config); - ia_css_set_macc_config(params, config->macc_config); - ia_css_set_ctc_config(params, config->ctc_config); - ia_css_set_aa_config(params, config->aa_config); - ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config); - ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config); - ia_css_set_csc_config(params, config->cc_config); - ia_css_set_nr_config(params, config->nr_config); - ia_css_set_gc_config(params, config->gc_config); - ia_css_set_sdis_horicoef_config(params, config->dvs_coefs); - ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs); - ia_css_set_sdis_horiproj_config(params, config->dvs_coefs); - ia_css_set_sdis_vertproj_config(params, config->dvs_coefs); - ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs); - ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs); - ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs); - ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs); - ia_css_set_r_gamma_config(params, config->r_gamma_table); - ia_css_set_g_gamma_config(params, config->g_gamma_table); - ia_css_set_b_gamma_config(params, config->b_gamma_table); - ia_css_set_xnr_table_config(params, config->xnr_table); - ia_css_set_formats_config(params, config->formats_config); - ia_css_set_xnr_config(params, config->xnr_config); - ia_css_set_xnr3_config(params, config->xnr3_config); - ia_css_set_s3a_config(params, config->s3a_config); - ia_css_set_output_config(params, config->output_config); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h deleted file mode 100644 index 5b3deb7f74ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_PARAM_H -#define _IA_CSS_ISP_PARAM_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_parameter_ids { - IA_CSS_AA_ID, - IA_CSS_ANR_ID, - IA_CSS_ANR2_ID, - IA_CSS_BH_ID, - IA_CSS_CNR_ID, - IA_CSS_CROP_ID, - IA_CSS_CSC_ID, - IA_CSS_DP_ID, - IA_CSS_BNR_ID, - IA_CSS_DE_ID, - IA_CSS_ECD_ID, - IA_CSS_FORMATS_ID, - IA_CSS_FPN_ID, - IA_CSS_GC_ID, - IA_CSS_CE_ID, - IA_CSS_YUV2RGB_ID, - IA_CSS_RGB2YUV_ID, - IA_CSS_R_GAMMA_ID, - IA_CSS_G_GAMMA_ID, - IA_CSS_B_GAMMA_ID, - IA_CSS_UDS_ID, - IA_CSS_RAA_ID, - IA_CSS_S3A_ID, - IA_CSS_OB_ID, - IA_CSS_OUTPUT_ID, - IA_CSS_SC_ID, - IA_CSS_BDS_ID, - IA_CSS_TNR_ID, - IA_CSS_MACC_ID, - IA_CSS_SDIS_HORICOEF_ID, - IA_CSS_SDIS_VERTCOEF_ID, - IA_CSS_SDIS_HORIPROJ_ID, - IA_CSS_SDIS_VERTPROJ_ID, - IA_CSS_SDIS2_HORICOEF_ID, - IA_CSS_SDIS2_VERTCOEF_ID, - IA_CSS_SDIS2_HORIPROJ_ID, - IA_CSS_SDIS2_VERTPROJ_ID, - IA_CSS_WB_ID, - IA_CSS_NR_ID, - IA_CSS_YEE_ID, - IA_CSS_YNR_ID, - IA_CSS_FC_ID, - IA_CSS_CTC_ID, - IA_CSS_XNR_TABLE_ID, - IA_CSS_XNR_ID, - IA_CSS_XNR3_ID, - IA_CSS_NUM_PARAMETER_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_memory_offsets { - struct { - struct ia_css_isp_parameter aa; - struct ia_css_isp_parameter anr; - struct ia_css_isp_parameter bh; - struct ia_css_isp_parameter cnr; - struct ia_css_isp_parameter crop; - struct ia_css_isp_parameter csc; - struct ia_css_isp_parameter dp; - struct ia_css_isp_parameter bnr; - struct ia_css_isp_parameter de; - struct ia_css_isp_parameter ecd; - struct ia_css_isp_parameter formats; - struct ia_css_isp_parameter fpn; - struct ia_css_isp_parameter gc; - struct ia_css_isp_parameter ce; - struct ia_css_isp_parameter yuv2rgb; - struct ia_css_isp_parameter rgb2yuv; - struct ia_css_isp_parameter uds; - struct ia_css_isp_parameter raa; - struct ia_css_isp_parameter s3a; - struct ia_css_isp_parameter ob; - struct ia_css_isp_parameter output; - struct ia_css_isp_parameter sc; - struct ia_css_isp_parameter bds; - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter macc; - struct ia_css_isp_parameter sdis_horiproj; - struct ia_css_isp_parameter sdis_vertproj; - struct ia_css_isp_parameter sdis2_horiproj; - struct ia_css_isp_parameter sdis2_vertproj; - struct ia_css_isp_parameter wb; - struct ia_css_isp_parameter nr; - struct ia_css_isp_parameter yee; - struct ia_css_isp_parameter ynr; - struct ia_css_isp_parameter fc; - struct ia_css_isp_parameter ctc; - struct ia_css_isp_parameter xnr; - struct ia_css_isp_parameter xnr3; - struct ia_css_isp_parameter get; - struct ia_css_isp_parameter put; - } dmem; - struct { - struct ia_css_isp_parameter anr2; - struct ia_css_isp_parameter ob; - struct ia_css_isp_parameter sdis_horicoef; - struct ia_css_isp_parameter sdis_vertcoef; - struct ia_css_isp_parameter sdis2_horicoef; - struct ia_css_isp_parameter sdis2_vertcoef; -#ifdef ISP2401 - struct ia_css_isp_parameter xnr3; -#endif - } vmem; - struct { - struct ia_css_isp_parameter bh; - } hmem0; - struct { - struct ia_css_isp_parameter gc; - struct ia_css_isp_parameter g_gamma; - struct ia_css_isp_parameter xnr_table; - } vamem1; - struct { - struct ia_css_isp_parameter r_gamma; - struct ia_css_isp_parameter ctc; - } vamem0; - struct { - struct ia_css_isp_parameter b_gamma; - } vamem2; -}; - -#if defined(IA_CSS_INCLUDE_PARAMETERS) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/gencode.c:gen_param_process_table() */ - -struct ia_css_pipeline_stage; /* forward declaration */ - -extern void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_dp_config(struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_wb_config(struct ia_css_isp_parameters *params, - const struct ia_css_wb_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_tnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_tnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ob_config(struct ia_css_isp_parameters *params, - const struct ia_css_ob_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_de_config(struct ia_css_isp_parameters *params, - const struct ia_css_de_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_anr2_config(struct ia_css_isp_parameters *params, - const struct ia_css_anr_thres *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ce_config(struct ia_css_isp_parameters *params, - const struct ia_css_ce_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ecd_config(struct ia_css_isp_parameters *params, - const struct ia_css_ecd_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ynr_config(struct ia_css_isp_parameters *params, - const struct ia_css_ynr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_fc_config(struct ia_css_isp_parameters *params, - const struct ia_css_fc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_cnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_cnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_macc_config(struct ia_css_isp_parameters *params, - const struct ia_css_macc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_ctc_config(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_aa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_csc_config(struct ia_css_isp_parameters *params, - const struct ia_css_cc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_gc_config(struct ia_css_isp_parameters *params, - const struct ia_css_gc_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params, - const struct ia_css_rgb_gamma_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_table *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_formats_config(struct ia_css_isp_parameters *params, - const struct ia_css_formats_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_xnr3_config(struct ia_css_isp_parameters *params, - const struct ia_css_xnr3_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_s3a_config(struct ia_css_isp_parameters *params, - const struct ia_css_3a_config *config); - -/* Code generated by genparam/gencode.c:gen_set_function() */ - -void -ia_css_set_output_config(struct ia_css_isp_parameters *params, - const struct ia_css_output_config *config); - -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_get_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -; -#ifdef ISP2401 - -#endif -/* Code generated by genparam/gencode.c:gen_global_access_function() */ - -void -ia_css_set_configs(struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config) -; -#ifdef ISP2401 - -#endif -#endif /* IA_CSS_INCLUDE_PARAMETER */ - -#endif /* _IA_CSS_ISP_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c deleted file mode 100644 index e87d05bc73ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Generated code: do not edit or commmit. */ - -#include "ia_css_pipeline.h" -#include "ia_css_isp_states.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_aa_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.aa.size; - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset; - - if (size) - memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], 0, size); - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_cnr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset; - - if (size) { - ia_css_init_cnr_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_cnr2_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset; - - if (size) { - ia_css_init_cnr2_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_dp_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.dp.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset; - - if (size) { - ia_css_init_dp_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_de_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.de.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.de.offset; - - if (size) { - ia_css_init_de_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_tnr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->dmem.tnr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset; - - if (size) { - ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_ref_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->dmem.ref.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset; - - if (size) { - ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_init_function() */ - -static void -ia_css_initialize_ynr_state( - const struct ia_css_binary *binary) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() enter:\n"); - - { - unsigned size = binary->info->mem_offsets.offsets.state->vmem.ynr.size; - - unsigned offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset; - - if (size) { - ia_css_init_ynr_state( - &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], - size); - } - - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() leave:\n"); -} - -/* Code generated by genparam/genstate.c:gen_state_init_table() */ - -void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary) = { - ia_css_initialize_aa_state, - ia_css_initialize_cnr_state, - ia_css_initialize_cnr2_state, - ia_css_initialize_dp_state, - ia_css_initialize_de_state, - ia_css_initialize_tnr_state, - ia_css_initialize_ref_state, - ia_css_initialize_ynr_state, -}; - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h deleted file mode 100644 index 732adafb0a63..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#define IA_CSS_INCLUDE_STATES -#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h" -#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h" -#include "isp/kernels/de/de_1.0/ia_css_de.host.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h" -#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h" -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h" -#include "isp/kernels/dpc2/ia_css_dpc2.host.h" -#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h" -/* Generated code: do not edit or commmit. */ - -#ifndef _IA_CSS_ISP_STATE_H -#define _IA_CSS_ISP_STATE_H - -/* Code generated by genparam/gencode.c:gen_param_enum() */ - -enum ia_css_state_ids { - IA_CSS_AA_STATE_ID, - IA_CSS_CNR_STATE_ID, - IA_CSS_CNR2_STATE_ID, - IA_CSS_DP_STATE_ID, - IA_CSS_DE_STATE_ID, - IA_CSS_TNR_STATE_ID, - IA_CSS_REF_STATE_ID, - IA_CSS_YNR_STATE_ID, - IA_CSS_NUM_STATE_IDS -}; - -/* Code generated by genparam/gencode.c:gen_param_offsets() */ - -struct ia_css_state_memory_offsets { - struct { - struct ia_css_isp_parameter aa; - struct ia_css_isp_parameter cnr; - struct ia_css_isp_parameter cnr2; - struct ia_css_isp_parameter dp; - struct ia_css_isp_parameter de; - struct ia_css_isp_parameter ynr; - } vmem; - struct { - struct ia_css_isp_parameter tnr; - struct ia_css_isp_parameter ref; - } dmem; -}; - -#if defined(IA_CSS_INCLUDE_STATES) - -#include "ia_css_stream.h" /* struct ia_css_stream */ -#include "ia_css_binary.h" /* struct ia_css_binary */ -/* Code generated by genparam/genstate.c:gen_state_init_table() */ - -extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary); - -#endif /* IA_CSS_INCLUDE_STATE */ - -#endif /* _IA_CSS_ISP_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h deleted file mode 100644 index e71e33d9d143..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_BITS_H -#define _HRT_BITS_H - -#include "defs.h" - -#define _hrt_ones(n) HRTCAT(_hrt_ones_, n) -#define _hrt_ones_0x0 0x00000000U -#define _hrt_ones_0x1 0x00000001U -#define _hrt_ones_0x2 0x00000003U -#define _hrt_ones_0x3 0x00000007U -#define _hrt_ones_0x4 0x0000000FU -#define _hrt_ones_0x5 0x0000001FU -#define _hrt_ones_0x6 0x0000003FU -#define _hrt_ones_0x7 0x0000007FU -#define _hrt_ones_0x8 0x000000FFU -#define _hrt_ones_0x9 0x000001FFU -#define _hrt_ones_0xA 0x000003FFU -#define _hrt_ones_0xB 0x000007FFU -#define _hrt_ones_0xC 0x00000FFFU -#define _hrt_ones_0xD 0x00001FFFU -#define _hrt_ones_0xE 0x00003FFFU -#define _hrt_ones_0xF 0x00007FFFU -#define _hrt_ones_0x10 0x0000FFFFU -#define _hrt_ones_0x11 0x0001FFFFU -#define _hrt_ones_0x12 0x0003FFFFU -#define _hrt_ones_0x13 0x0007FFFFU -#define _hrt_ones_0x14 0x000FFFFFU -#define _hrt_ones_0x15 0x001FFFFFU -#define _hrt_ones_0x16 0x003FFFFFU -#define _hrt_ones_0x17 0x007FFFFFU -#define _hrt_ones_0x18 0x00FFFFFFU -#define _hrt_ones_0x19 0x01FFFFFFU -#define _hrt_ones_0x1A 0x03FFFFFFU -#define _hrt_ones_0x1B 0x07FFFFFFU -#define _hrt_ones_0x1C 0x0FFFFFFFU -#define _hrt_ones_0x1D 0x1FFFFFFFU -#define _hrt_ones_0x1E 0x3FFFFFFFU -#define _hrt_ones_0x1F 0x7FFFFFFFU -#define _hrt_ones_0x20 0xFFFFFFFFU - -#define _hrt_ones_0 _hrt_ones_0x0 -#define _hrt_ones_1 _hrt_ones_0x1 -#define _hrt_ones_2 _hrt_ones_0x2 -#define _hrt_ones_3 _hrt_ones_0x3 -#define _hrt_ones_4 _hrt_ones_0x4 -#define _hrt_ones_5 _hrt_ones_0x5 -#define _hrt_ones_6 _hrt_ones_0x6 -#define _hrt_ones_7 _hrt_ones_0x7 -#define _hrt_ones_8 _hrt_ones_0x8 -#define _hrt_ones_9 _hrt_ones_0x9 -#define _hrt_ones_10 _hrt_ones_0xA -#define _hrt_ones_11 _hrt_ones_0xB -#define _hrt_ones_12 _hrt_ones_0xC -#define _hrt_ones_13 _hrt_ones_0xD -#define _hrt_ones_14 _hrt_ones_0xE -#define _hrt_ones_15 _hrt_ones_0xF -#define _hrt_ones_16 _hrt_ones_0x10 -#define _hrt_ones_17 _hrt_ones_0x11 -#define _hrt_ones_18 _hrt_ones_0x12 -#define _hrt_ones_19 _hrt_ones_0x13 -#define _hrt_ones_20 _hrt_ones_0x14 -#define _hrt_ones_21 _hrt_ones_0x15 -#define _hrt_ones_22 _hrt_ones_0x16 -#define _hrt_ones_23 _hrt_ones_0x17 -#define _hrt_ones_24 _hrt_ones_0x18 -#define _hrt_ones_25 _hrt_ones_0x19 -#define _hrt_ones_26 _hrt_ones_0x1A -#define _hrt_ones_27 _hrt_ones_0x1B -#define _hrt_ones_28 _hrt_ones_0x1C -#define _hrt_ones_29 _hrt_ones_0x1D -#define _hrt_ones_30 _hrt_ones_0x1E -#define _hrt_ones_31 _hrt_ones_0x1F -#define _hrt_ones_32 _hrt_ones_0x20 - -#define _hrt_mask(b, n) \ - (_hrt_ones(n) << (b)) -#define _hrt_get_bits(w, b, n) \ - (((w) >> (b)) & _hrt_ones(n)) -#define _hrt_set_bits(w, b, n, v) \ - (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b))) -#define _hrt_get_bit(w, b) \ - (((w) >> (b)) & 1) -#define _hrt_set_bit(w, b, v) \ - (((w) & (~(1 << (b)))) | (((v)&1) << (b))) -#define _hrt_set_lower_half(w, v) \ - _hrt_set_bits(w, 0, 16, v) -#define _hrt_set_upper_half(w, v) \ - _hrt_set_bits(w, 16, 16, v) - -#endif /* _HRT_BITS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h deleted file mode 100644 index b5756bfe8eb6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _cell_params_h -#define _cell_params_h - -#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/ -#define SP_ICACHE_TAG_BITS 4 /*size of tag*/ -#define SP_ICACHE_SET_BITS 8 /* 256 sets*/ -#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/ -#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/ - -#define SP_ICACHE_ADDRESS_BITS \ - (SP_ICACHE_TAG_BITS+SP_ICACHE_BLOCK_ADDRESS_BITS) - -#define SP_PMEM_DEPTH (1< input_selector*/ -/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */ -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29 -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ? -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser -#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding - -/* definition for state machine of data FIFO for decode different type of data */ -#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2 -#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9 -#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7 -#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1 -#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5 -#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3 -#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7 - -#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */ - -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8 - -#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6 - - -/* packet bit definition */ -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32 -#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22 -#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16 -#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0 -#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32 - - -/*************************************************************************************************/ -/* Custom Decoding */ -/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */ -/*************************************************************************************************/ -#define BE_CUST_EN_IDX 0 /* 2bits */ -#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */ -#define BE_CUST_EN_WIDTH 8 -#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */ -#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */ - -/* Data State config = {get_bits(6bits), valid(1bit)} */ -#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */ -#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */ -#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */ -#define BE_CUST_DATA_STATE_WIDTH 21 -#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */ -#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */ - -/* Pixel Extractor config */ -#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */ -#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */ -#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */ -#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */ -#define BE_CUST_PIX_EXT_WIDTH 29 - -/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */ -#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */ -#define BE_CUST_PIX_VALID_EOP_WIDTH 16 -#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */ -#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */ -#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */ -#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */ - -#endif /* _mipi_backend_common_defs_h_ */ -#endif /* _css_receiver_2400_common_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h deleted file mode 100644 index 6f5b7d3d3715..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _css_receiver_2400_defs_h_ -#define _css_receiver_2400_defs_h_ - -#include "css_receiver_2400_common_defs.h" - -#define CSS_RECEIVER_DATA_WIDTH 8 -#define CSS_RECEIVER_RX_TRIG 4 -#define CSS_RECEIVER_RF_WORD 32 -#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10 -#define CSS_RECEIVER_CSI_RF_ADDR 4 -#define CSS_RECEIVER_DATA_OUT 12 -#define CSS_RECEIVER_CHN_NO 2 -#define CSS_RECEIVER_DWORD_CNT 11 -#define CSS_RECEIVER_FORMAT_TYP 5 -#define CSS_RECEIVER_HRESPONSE 2 -#define CSS_RECEIVER_STATE_WIDTH 3 -#define CSS_RECEIVER_FIFO_DAT 32 -#define CSS_RECEIVER_CNT_VAL 2 -#define CSS_RECEIVER_PRED10_VAL 10 -#define CSS_RECEIVER_PRED12_VAL 12 -#define CSS_RECEIVER_CNT_WIDTH 8 -#define CSS_RECEIVER_WORD_CNT 16 -#define CSS_RECEIVER_PIXEL_LEN 6 -#define CSS_RECEIVER_PIXEL_CNT 5 -#define CSS_RECEIVER_COMP_8_BIT 8 -#define CSS_RECEIVER_COMP_7_BIT 7 -#define CSS_RECEIVER_COMP_6_BIT 6 - -#define CSI_CONFIG_WIDTH 4 - -/* division of gen_short data, ch_id and fmt_type over streaming data interface */ -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0 -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1) -#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1) - -#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4 -#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4 - -#define hrt_css_receiver_2400_4_lane_port_offset 0x100 -#define hrt_css_receiver_2400_1_lane_port_offset 0x200 -#define hrt_css_receiver_2400_2_lane_port_offset 0x300 -#define hrt_css_receiver_2400_backend_port_offset 0x100 - -#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0 -#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1 -#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2 -#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3 -#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4 -#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7 -#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8 -#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9 -#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10 -#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11 -#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12 -#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14 -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16 -#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25 -#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26 -#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27 -#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28 - -/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */ -#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0 -#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1 -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2 -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13 -#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15 -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16 - -#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun" -#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved" -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry" -#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data" -#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape" -#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync" - -/* Bits for CSI2_DEVICE_READY register */ -#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6 -#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7 - - -/* Bits for CSI2_FUNC_PROG register */ -#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19 - -/* Bits for INIT_COUNT register */ -#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0 -#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16 - -/* Bits for COUNT registers */ -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8 - -/* Bits for RAW116_18_DATAID register */ -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6 - -/* Bits for COMP_FORMAT register, this selects the compression data format */ -#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8 -#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS) -#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8 - -/* Bits for COMP_PREDICT register, this selects the predictor algorithm */ -#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0 -#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1 -#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2 - -/* Number of bits used for the delay registers */ -#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8 - -/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */ -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3 -#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2 - - -/* BITS for backend RAW16 and RAW 18 registers */ - -#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2 -#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1 - -#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0 -#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6 -#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2 -#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8 -#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1 - -/* These hsync and vsync values are for HSS simulation only */ -#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL (1<<16) -#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL (1<<17) - -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28 -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0 -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1) -#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1) - -// SH Backend Register IDs -#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1 -#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5 -#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6 -#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7 -#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8 -#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9 -#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10 -#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11 -#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12 -#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13 -#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */ -#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */ - -#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28 - -#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7 -#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8 - -#endif /* _css_receiver_2400_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h deleted file mode 100644 index 47505f41790c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_DEFS_H_ -#define _HRT_DEFS_H_ - -#ifndef HRTCAT -#define _HRTCAT(m, n) m##n -#define HRTCAT(m, n) _HRTCAT(m, n) -#endif - -#ifndef HRTSTR -#define _HRTSTR(x) #x -#define HRTSTR(x) _HRTSTR(x) -#endif - -#ifndef HRTMIN -#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b)) -#endif - -#ifndef HRTMAX -#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b)) -#endif - -#endif /* _HRT_DEFS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h deleted file mode 100644 index d184a8b313c9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _dma_v2_defs_h -#define _dma_v2_defs_h - -#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels -#define _DMA_V2_CONNECTIONS_ID Connections -#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths -#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth -#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat -#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass -#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst -#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept -#define _DMA_V2_DEV_SRMD_ID DevSRMD -#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters -#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth -#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth -#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat -#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass -#define _DMA_V2_NO_PACK_ID has_no_pack - -#define _DMA_V2_REG_ALIGN 4 -#define _DMA_V2_REG_ADDR_BITS 2 - -/* Command word */ -#define _DMA_V2_CMD_IDX 0 -#define _DMA_V2_CMD_BITS 6 -#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS) -#define _DMA_V2_CHANNEL_BITS 5 - -/* The command to set a parameter contains the PARAM field next */ -#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS) -#define _DMA_V2_PARAM_BITS 4 - -/* Commands to read, write or init specific blocks contain these - three values */ -#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS) -#define _DMA_V2_SPEC_DEV_A_XB_BITS 8 -#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS) -#define _DMA_V2_SPEC_DEV_B_XB_BITS 8 -#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS) -#define _DMA_V2_SPEC_YB_BITS (32-_DMA_V2_SPEC_DEV_B_XB_BITS-_DMA_V2_SPEC_DEV_A_XB_BITS-_DMA_V2_CMD_BITS-_DMA_V2_CHANNEL_BITS) - -/* */ -#define _DMA_V2_CMD_CTRL_IDX 4 -#define _DMA_V2_CMD_CTRL_BITS 4 - -/* Packing setup word */ -#define _DMA_V2_CONNECTION_IDX 0 -#define _DMA_V2_CONNECTION_BITS 4 -#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS) -#define _DMA_V2_EXTENSION_BITS 1 - -/* Elements packing word */ -#define _DMA_V2_ELEMENTS_IDX 0 -#define _DMA_V2_ELEMENTS_BITS 8 -#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS) -#define _DMA_V2_LEFT_CROPPING_BITS 8 - -#define _DMA_V2_WIDTH_IDX 0 -#define _DMA_V2_WIDTH_BITS 16 - -#define _DMA_V2_HEIGHT_IDX 0 -#define _DMA_V2_HEIGHT_BITS 16 - -#define _DMA_V2_STRIDE_IDX 0 -#define _DMA_V2_STRIDE_BITS 32 - -/* Command IDs */ -#define _DMA_V2_MOVE_B2A_COMMAND 0 -#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1 -#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2 -#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3 -#define _DMA_V2_MOVE_A2B_COMMAND 4 -#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5 -#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6 -#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7 -#define _DMA_V2_INIT_A_COMMAND 8 -#define _DMA_V2_INIT_A_BLOCK_COMMAND 9 -#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10 -#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11 -#define _DMA_V2_INIT_B_COMMAND 12 -#define _DMA_V2_INIT_B_BLOCK_COMMAND 13 -#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14 -#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15 -#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16) -#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32 -#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33 -#define _DMA_V2_SET_CRUN_COMMAND 62 - -/* Channel Parameter IDs */ -#define _DMA_V2_PACKING_SETUP_PARAM 0 -#define _DMA_V2_STRIDE_A_PARAM 1 -#define _DMA_V2_ELEM_CROPPING_A_PARAM 2 -#define _DMA_V2_WIDTH_A_PARAM 3 -#define _DMA_V2_STRIDE_B_PARAM 4 -#define _DMA_V2_ELEM_CROPPING_B_PARAM 5 -#define _DMA_V2_WIDTH_B_PARAM 6 -#define _DMA_V2_HEIGHT_PARAM 7 -#define _DMA_V2_QUEUED_CMDS 8 - -/* Parameter Constants */ -#define _DMA_V2_ZERO_EXTEND 0 -#define _DMA_V2_SIGN_EXTEND 1 - - /* SLAVE address map */ -#define _DMA_V2_SEL_FSM_CMD 0 -#define _DMA_V2_SEL_CH_REG 1 -#define _DMA_V2_SEL_CONN_GROUP 2 -#define _DMA_V2_SEL_DEV_INTERF 3 - -#define _DMA_V2_ADDR_SEL_COMP_IDX 12 -#define _DMA_V2_ADDR_SEL_COMP_BITS 4 -#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2 -#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6 -#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS+_DMA_V2_ADDR_SEL_CH_REG_IDX) -#define _DMA_V2_ADDR_SEL_PARAM_BITS 4 - -#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2 -#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6 -#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX) -#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4 - -#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2 -#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6 -#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX+_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS) -#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4 - -#define _DMA_V2_FSM_GROUP_CMD_IDX 0 -#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1 -#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2 -#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4 -#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5 -#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6 -#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7 - -#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15 -#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15 - -#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3 - -#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4 - -#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2 -#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3 -#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4 - -#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0 -#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1 -#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2 -#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3 -#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4 -#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5 - -#endif /* _dma_v2_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h deleted file mode 100644 index 77722d205701..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef HRT_GDC_v2_defs_h_ -#define HRT_GDC_v2_defs_h_ - -#define HRT_GDC_IS_V2 - -#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */ -#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */ - -#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */ -#define HRT_GDC_BLI_COEF_ONE (1 << HRT_GDC_BLI_FRAC_BITS) - -#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */ -#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS-2)) /* We represent signed 10 bit coefficients. */ - /* The supported range is [-256, .., +256] */ - /* in 14-bit signed notation, */ - /* We need all ten bits (MSB must be zero). */ - /* -s is inserted to solve this issue, and */ - /* therefore "1" is equal to +256. */ -#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1) - -#define HRT_GDC_LUT_BYTES (HRT_GDC_N*4*2) /* 1024 addresses, 4 coefficients per address, */ - /* 2 bytes per coefficient */ - -#define _HRT_GDC_REG_ALIGN 4 - - // 31 30 29 25 24 0 - // |-----|---|--------|------------------------| - // | CMD | C | Reg_ID | Value | - - - // There are just two commands possible for the GDC block: - // 1 - Configure reg - // 0 - Data token - - // C - Reserved bit - // Used in protocol to indicate whether it is C-run or other type of runs - // In case of C-run, this bit has a value of 1, for all the other runs, it is 0. - - // Reg_ID - Address of the register to be configured - - // Value - Value to store to the addressed register, maximum of 24 bits - - // Configure reg command is not followed by any other token. - // The address of the register and the data to be filled in is contained in the same token - - // When the first data token is received, it must be: - // 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or, - // 2. P0'X (device configured in one of the tetragon modes) - // After the first data token is received, pre-defined number of tokens with the following meaning follow: - // 1. two tokens: SRC address ; DST address - // 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address - -#define HRT_GDC_CONFIG_CMD 1 -#define HRT_GDC_DATA_CMD 0 - - -#define HRT_GDC_CMD_POS 31 -#define HRT_GDC_CMD_BITS 1 -#define HRT_GDC_CRUN_POS 30 -#define HRT_GDC_REG_ID_POS 25 -#define HRT_GDC_REG_ID_BITS 5 -#define HRT_GDC_DATA_POS 0 -#define HRT_GDC_DATA_BITS 25 - -#define HRT_GDC_FRYIPXFRX_BITS 26 -#define HRT_GDC_P0X_BITS 23 - - -#define HRT_GDC_MAX_OXDIM (8192-64) -#define HRT_GDC_MAX_OYDIM 4095 -#define HRT_GDC_MAX_IXDIM (8192-64) -#define HRT_GDC_MAX_IYDIM 4095 -#define HRT_GDC_MAX_DS_FAC 16 -#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC*HRT_GDC_N - 1) -#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX - - -/* GDC lookup tables entries are 10 bits values, but they're - stored 2 by 2 as 32 bit values, yielding 16 bits per entry. - A GDC lookup table contains 64 * 4 elements */ - -#define HRT_GDC_PERF_1_1_pix 0 -#define HRT_GDC_PERF_2_1_pix 1 -#define HRT_GDC_PERF_1_2_pix 2 -#define HRT_GDC_PERF_2_2_pix 3 - -#define HRT_GDC_NND_MODE 0 -#define HRT_GDC_BLI_MODE 1 -#define HRT_GDC_BCI_MODE 2 -#define HRT_GDC_LUT_MODE 3 - -#define HRT_GDC_SCAN_STB 0 -#define HRT_GDC_SCAN_STR 1 - -#define HRT_GDC_MODE_SCALING 0 -#define HRT_GDC_MODE_TETRAGON 1 - -#define HRT_GDC_LUT_COEFF_OFFSET 16 -#define HRT_GDC_FRY_BIT_OFFSET 16 -// FRYIPXFRX is the only register where we store two values in one field, -// to save one token in the scaling protocol. -// Like this, we have three tokens in the scaling protocol, -// Otherwise, we would have had four. -// The register bit-map is: -// 31 26 25 16 15 10 9 0 -// |------|----------|------|----------| -// | XXXX | FRY | IPX | FRX | - - -#define HRT_GDC_CE_FSM0_POS 0 -#define HRT_GDC_CE_FSM0_LEN 2 -#define HRT_GDC_CE_OPY_POS 2 -#define HRT_GDC_CE_OPY_LEN 14 -#define HRT_GDC_CE_OPX_POS 16 -#define HRT_GDC_CE_OPX_LEN 16 -// CHK_ENGINE register bit-map: -// 31 16 15 2 1 0 -// |----------------|-----------|----| -// | OPX | OPY |FSM0| -// However, for the time being at least, -// this implementation is meaningless in hss model, -// So, we just return 0 - - -#define HRT_GDC_CHK_ENGINE_IDX 0 -#define HRT_GDC_WOIX_IDX 1 -#define HRT_GDC_WOIY_IDX 2 -#define HRT_GDC_BPP_IDX 3 -#define HRT_GDC_FRYIPXFRX_IDX 4 -#define HRT_GDC_OXDIM_IDX 5 -#define HRT_GDC_OYDIM_IDX 6 -#define HRT_GDC_SRC_ADDR_IDX 7 -#define HRT_GDC_SRC_END_ADDR_IDX 8 -#define HRT_GDC_SRC_WRAP_ADDR_IDX 9 -#define HRT_GDC_SRC_STRIDE_IDX 10 -#define HRT_GDC_DST_ADDR_IDX 11 -#define HRT_GDC_DST_STRIDE_IDX 12 -#define HRT_GDC_DX_IDX 13 -#define HRT_GDC_DY_IDX 14 -#define HRT_GDC_P0X_IDX 15 -#define HRT_GDC_P0Y_IDX 16 -#define HRT_GDC_P1X_IDX 17 -#define HRT_GDC_P1Y_IDX 18 -#define HRT_GDC_P2X_IDX 19 -#define HRT_GDC_P2Y_IDX 20 -#define HRT_GDC_P3X_IDX 21 -#define HRT_GDC_P3Y_IDX 22 -#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc -#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT -#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right) -#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon - -#define HRT_GDC_LUT_IDX 32 - - -#endif /* HRT_GDC_v2_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h deleted file mode 100644 index 3082e2f5e014..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _gp_timer_defs_h -#define _gp_timer_defs_h - -#define _HRT_GP_TIMER_REG_ALIGN 4 - -#define HIVE_GP_TIMER_RESET_REG_IDX 0 -#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1 -#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer) -#define HIVE_GP_TIMER_VALUE_REG_IDX(timer,timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer) -#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer,timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer) -#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer,timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer) -#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq,timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq) -#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq) -#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq) - -#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0 -#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1 -#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2 -#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3 -#define HIVE_GP_TIMER_COUNT_TYPES 4 - -#endif /* _gp_timer_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h deleted file mode 100644 index a807d4c99041..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _gpio_block_defs_h_ -#define _gpio_block_defs_h_ - -#define _HRT_GPIO_BLOCK_REG_ALIGN 4 - -/* R/W registers */ -#define _gpio_block_reg_do_e 0 -#define _gpio_block_reg_do_select 1 -#define _gpio_block_reg_do_0 2 -#define _gpio_block_reg_do_1 3 -#define _gpio_block_reg_do_pwm_cnt_0 4 -#define _gpio_block_reg_do_pwm_cnt_1 5 -#define _gpio_block_reg_do_pwm_cnt_2 6 -#define _gpio_block_reg_do_pwm_cnt_3 7 -#define _gpio_block_reg_do_pwm_main_cnt 8 -#define _gpio_block_reg_do_pwm_enable 9 -#define _gpio_block_reg_di_debounce_sel 10 -#define _gpio_block_reg_di_debounce_cnt_0 11 -#define _gpio_block_reg_di_debounce_cnt_1 12 -#define _gpio_block_reg_di_debounce_cnt_2 13 -#define _gpio_block_reg_di_debounce_cnt_3 14 -#define _gpio_block_reg_di_active_level 15 - - -/* read-only registers */ -#define _gpio_block_reg_di 16 - -#endif /* _gpio_block_defs_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h deleted file mode 100644 index 7a94c1d85b08..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h +++ /dev/null @@ -1,69 +0,0 @@ -/* -#ifndef ISP2401 - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ -#define _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ - -/* - * These are the indices of each interrupt in the interrupt - * controller's registers. these can be used as the irq_id - * argument to the hrt functions irq_controller.h. - * - * The definitions are taken from _defs.h - */ -typedef enum hrt_isp_css_irq { - hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID , - hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID , - hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID , - hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID , - hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID , - hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID , - hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID , - hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID , - hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID , - hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID , - hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID , - hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID , - hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID , - hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID , - hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID , - hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID , - hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID , - hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID , - hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID , - hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID , - hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_IS2401_BIT_ID , - hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID , - hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID , - hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID , - hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID , - hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID , - hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID , - hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID , - hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID , - hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID , - hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID , - hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID , - /* this must (obviously) be the last on in the enum */ - hrt_isp_css_irq_num_irqs -} hrt_isp_css_irq_t; - -typedef enum hrt_isp_css_irq_status { - hrt_isp_css_irq_status_error, - hrt_isp_css_irq_status_more_irqs, - hrt_isp_css_irq_status_success -} hrt_isp_css_irq_status_t; - -#endif /* _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h deleted file mode 100644 index 5a2ce9108ae4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _hive_isp_css_defs_h__ -#define _hive_isp_css_defs_h__ - -#define _HIVE_ISP_CSS_2401_SYSTEM 1 -#define HIVE_ISP_CTRL_DATA_WIDTH 32 -#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32 -#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1 -#define HIVE_ISP_DDR_ADDRESS_WIDTH 36 - -#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */ -#define HIVE_ISP_NUM_GPIO_PINS 12 - -/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer - and in the DMA parameter list */ -#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}} -#define HIVE_ISP_DDR_WORD_BITS 256 -#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS/8) -#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024) -#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024) -#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8) -#define HIVE_ISP_PAGE_SHIFT 12 -#define HIVE_ISP_PAGE_SIZE (1< - -#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS)-1) -#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS)-1) - -#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS) -#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH) - -#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h deleted file mode 100644 index 58b0e6effbd0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_HIVE_TYPES_H -#define _HRT_HIVE_TYPES_H - -#include "version.h" -#include "defs.h" - -#ifndef HRTCAT3 -#define _HRTCAT3(m,n,o) m##n##o -#define HRTCAT3(m,n,o) _HRTCAT3(m,n,o) -#endif - -#ifndef HRTCAT4 -#define _HRTCAT4(m,n,o,p) m##n##o##p -#define HRTCAT4(m,n,o,p) _HRTCAT4(m,n,o,p) -#endif - -#ifndef HRTMIN -#define HRTMIN(a,b) (((a)<(b))?(a):(b)) -#endif - -#ifndef HRTMAX -#define HRTMAX(a,b) (((a)>(b))?(a):(b)) -#endif - -/* boolean data type */ -typedef unsigned int hive_bool; -#define hive_false 0 -#define hive_true 1 - -typedef char hive_int8; -typedef short hive_int16; -typedef int hive_int32; -typedef long long hive_int64; - -typedef unsigned char hive_uint8; -typedef unsigned short hive_uint16; -typedef unsigned int hive_uint32; -typedef unsigned long long hive_uint64; - -/* by default assume 32 bit master port (both data and address) */ -#ifndef HRT_DATA_WIDTH -#define HRT_DATA_WIDTH 32 -#endif -#ifndef HRT_ADDRESS_WIDTH -#define HRT_ADDRESS_WIDTH 32 -#endif - -#define HRT_DATA_BYTES (HRT_DATA_WIDTH/8) -#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH/8) - -#if HRT_DATA_WIDTH == 64 -typedef hive_uint64 hrt_data; -#elif HRT_DATA_WIDTH == 32 -typedef hive_uint32 hrt_data; -#else -#error data width not supported -#endif - -#if HRT_ADDRESS_WIDTH == 64 -typedef hive_uint64 hrt_address; -#elif HRT_ADDRESS_WIDTH == 32 -typedef hive_uint32 hrt_address; -#else -#error adddres width not supported -#endif - -/* The SP side representation of an HMM virtual address */ -typedef hive_uint32 hrt_vaddress; - -/* use 64 bit addresses in simulation, where possible */ -typedef hive_uint64 hive_sim_address; - -/* below is for csim, not for hrt, rename and move this elsewhere */ - -typedef unsigned int hive_uint; -typedef hive_uint32 hive_address; -typedef hive_address hive_slave_address; -typedef hive_address hive_mem_address; - -/* MMIO devices */ -typedef hive_uint hive_mmio_id; -typedef hive_mmio_id hive_slave_id; -typedef hive_mmio_id hive_port_id; -typedef hive_mmio_id hive_master_id; -typedef hive_mmio_id hive_mem_id; -typedef hive_mmio_id hive_dev_id; -typedef hive_mmio_id hive_fifo_id; - -typedef hive_uint hive_hier_id; -typedef hive_hier_id hive_device_id; -typedef hive_device_id hive_proc_id; -typedef hive_device_id hive_cell_id; -typedef hive_device_id hive_host_id; -typedef hive_device_id hive_bus_id; -typedef hive_device_id hive_bridge_id; -typedef hive_device_id hive_fifo_adapter_id; -typedef hive_device_id hive_custom_device_id; - -typedef hive_uint hive_slot_id; -typedef hive_uint hive_fu_id; -typedef hive_uint hive_reg_file_id; -typedef hive_uint hive_reg_id; - -/* Streaming devices */ -typedef hive_uint hive_outport_id; -typedef hive_uint hive_inport_id; - -typedef hive_uint hive_msink_id; - -/* HRT specific */ -typedef char* hive_program; -typedef char* hive_function; - -#endif /* _HRT_HIVE_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h deleted file mode 100644 index 7d39e45796ae..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IF_DEFS_H -#define _IF_DEFS_H - -#define HIVE_IF_FRAME_REQUEST 0xA000 -#define HIVE_IF_LINES_REQUEST 0xB000 -#define HIVE_IF_VECTORS_REQUEST 0xC000 - -#endif /* _IF_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h deleted file mode 100644 index 7766f78cd123..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _if_subsystem_defs_h__ -#define _if_subsystem_defs_h__ - -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7 -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8 -#define HIVE_IFMT_GP_REGS_SRST_IDX 9 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10 - -#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11 - -#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 - -/* order of the input bits for the ifmt irq controller */ -#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0 -#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1 -#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2 -#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3 -#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4 - -/* order of the input bits for the ifmt Soft reset register */ -#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0 -#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1 -#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2 -#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3 - -/* order of the input bits for the ifmt Soft reset register */ -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2 -#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3 - -#endif /* _if_subsystem_defs_h__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h deleted file mode 100644 index 87fbf82edb5b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_selector_defs_h -#define _input_selector_defs_h - -#ifndef HIVE_ISP_ISEL_SEL_BITS -#define HIVE_ISP_ISEL_SEL_BITS 2 -#endif - -#ifndef HIVE_ISP_CH_ID_BITS -#define HIVE_ISP_CH_ID_BITS 2 -#endif - -#ifndef HIVE_ISP_FMT_TYPE_BITS -#define HIVE_ISP_FMT_TYPE_BITS 5 -#endif - -/* gp_register register id's -- Outputs */ -#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0 -#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1 -#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4 -#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5 -#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6 -#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7 - -#define HIVE_ISEL_GP_REGS_SOF_IDX 8 -#define HIVE_ISEL_GP_REGS_EOF_IDX 9 -#define HIVE_ISEL_GP_REGS_SOL_IDX 10 -#define HIVE_ISEL_GP_REGS_EOL_IDX 11 - -#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12 -#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13 -#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14 - -#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15 -#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16 -#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17 -#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18 -#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19 -#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20 -#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21 -#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22 -#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23 -#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24 -#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25 -#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26 -#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27 -#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28 - - -#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29 -#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30 -#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31 -#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32 -#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33 -#define HIVE_ISEL_GP_REGS_SRST_IDX 37 - -#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0 -#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1 -#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2 -#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3 - -/* gp_register register id's -- Inputs */ -#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34 -#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35 -#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36 - -/* irq sources isel irq controller */ -#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0 -#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1 -#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2 -#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3 -#define HIVE_ISEL_IRQ_NUM_IRQS 4 - -#endif /* _input_selector_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h deleted file mode 100644 index 20a13c4cdb56..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_switch_2400_defs_h -#define _input_switch_2400_defs_h - -#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id)*2) + ((fmt_type)>=16)) -#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type)%16) * 2) - -#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0 -#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1 -#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2 -#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3 -#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0 -#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1 -#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2 -#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4 - -#endif /* _input_switch_2400_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h deleted file mode 100644 index a7f0ca80bc9b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_system_ctrl_defs_h -#define _input_system_ctrl_defs_h - -#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */ - -/* --------------------------------------------------*/ - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -// Number of registers -#define ISYS_CTRL_NOF_REGS 23 - -// Register id's of MMIO slave accesible registers -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8 -#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11 -#define ISYS_CTRL_INIT_REG_ID 12 -#define ISYS_CTRL_LAST_COMMAND_REG_ID 13 -#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14 -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15 -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16 -#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22 - - -/* register reset value */ -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3 -#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define ISYS_CTRL_INIT_REG_RSTVAL 0 -#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset) -#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0 - -/* register width value */ -#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9 -#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9 -#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9 -#define ISYS_CTRL_INIT_REG_WIDTH 3 -#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */ -#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32 -#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32 -#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1 - -/* bit definitions */ - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ - -/* -InpSysCaptFramesAcq 1/0 [3:0] - 'b0000 -[7:4] - CaptPortId, - CaptA-'b0000 - CaptB-'b0001 - CaptC-'b0010 -[31:16] - NOF_frames -InpSysCaptFrameExt 2/0 [3:0] - 'b0001' -[7:4] - CaptPortId, - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - - 2/1 [31:0] - external capture address -InpSysAcqFrame 2/0 [3:0] - 'b0010, -[31:4] - NOF_ext_mem_words - 2/1 [31:0] - external memory read start address -InpSysOverruleON 1/0 [3:0] - 'b0011, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysOverruleOFF 1/0 [3:0] - 'b0100, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysOverruleCmd 2/0 [3:0] - 'b0101, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - - 2/1 [31:0] - command token value for port opid - - -acknowledge tokens: - -InpSysAckCFA 1/0 [3:0] - 'b0000 - [7:4] - CaptPortId, - CaptA-'b0000 - CaptB- 'b0001 - CaptC-'b0010 - [31:16] - NOF_frames -InpSysAckCFE 1/0 [3:0] - 'b0001' -[7:4] - CaptPortId, - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - -InpSysAckAF 1/0 [3:0] - 'b0010 -InpSysAckOverruleON 1/0 [3:0] - 'b0011, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysAckOverruleOFF 1/0 [3:0] - 'b0100, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - -InpSysAckOverrule 2/0 [3:0] - 'b0101, -[7:4] - overrule port id (opid) - 'b0000 - CaptA - 'b0001 - CaptB - 'b0010 - CaptC - 'b0011 - Acq - 'b0100 - DMA - - - 2/1 [31:0] - acknowledge token value from port opid - - - -*/ - - -/* Command and acknowledge tokens IDs */ -#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */ -#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */ -#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */ -#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */ -#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */ -#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */ - -#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0 -#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1 -#define ISYS_CTRL_ACK_AF_TOKEN_ID 2 -#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3 -#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4 -#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5 -#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6 - -#define ISYS_CTRL_TOKEN_ID_MSB 3 -#define ISYS_CTRL_TOKEN_ID_LSB 0 -#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7 -#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4 -#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31 -#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16 -#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31 -#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8 - -#define ISYS_CTRL_TOKEN_ID_IDX 0 -#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1) -#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS) -#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB +1) -#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB -#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1) -#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB -#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1) - -#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */ -#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */ -#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */ -#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */ -#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */ - -#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */ -#define ISYS_CTRL_NO_DMA_ACK 0 -#define ISYS_CTRL_NO_CAPT_ACK 16 - -#endif /* _input_system_ctrl_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h deleted file mode 100644 index ae62163034a6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _input_system_defs_h -#define _input_system_defs_h - -/* csi controller modes */ -#define HIVE_CSI_CONFIG_MAIN 0 -#define HIVE_CSI_CONFIG_STEREO1 4 -#define HIVE_CSI_CONFIG_STEREO2 8 - -/* general purpose register IDs */ - -/* Stream Multicast select modes */ -#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0 -#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1 -#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2 - -/* Stream Mux select modes */ -#define HIVE_ISYS_GPREG_MUX_IDX 3 - -/* streaming monitor status and control */ -#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4 -#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5 -#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6 -#define HIVE_ISYS_GPREG_SRST_IDX 7 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8 -#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9 -#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10 - -/* Bit numbers of the soft reset register */ -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0 -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1 -#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4 -#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5 -#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6 -#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7 -#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8 -#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9 -/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */ -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */ -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13 -#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14 -/* -- */ -#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15 -#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16 -#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17 -#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv -#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22 -#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23 -#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24 - -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4 -#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5 - -/* streaming monitor port id's */ -#define HIVE_ISYS_STR_MON_PORT_CAPA 0 -#define HIVE_ISYS_STR_MON_PORT_CAPB 1 -#define HIVE_ISYS_STR_MON_PORT_CAPC 2 -#define HIVE_ISYS_STR_MON_PORT_ACQ 3 -#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4 -#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5 -#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6 -#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7 -#define HIVE_ISYS_STR_MON_PORT_PIXA 8 -#define HIVE_ISYS_STR_MON_PORT_PIXB 9 - -/* interrupt bit ID's */ -#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0 -#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1 -#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2 -#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3 -#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4 -#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5 -#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6 -#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/ -#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8 -#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/ -#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10 -#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11 -/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/ -#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12 -/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/ -#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13 -#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14 -#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15 -#define HIVE_ISYS_IRQ_CIO2AHB 16 -#define HIVE_ISYS_IRQ_DMA_BIT_ID 17 -#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18 -#define HIVE_ISYS_IRQ_NUM_BITS 19 - -/* DMA */ -#define HIVE_ISYS_DMA_CHANNEL 0 -#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0 -#define HIVE_ISYS_DMA_HEIGHT 1 -#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */ -#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */ -#define HIVE_ISYS_DMA_CROP 0 /* no cropping */ -#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */ - -#endif /* _input_system_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h deleted file mode 100644 index ec6dd4487158..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _irq_controller_defs_h -#define _irq_controller_defs_h - -#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0 -#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1 -#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2 -#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3 -#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4 -#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5 -#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6 - -#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4 - -#endif /* _irq_controller_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h deleted file mode 100644 index e00bc841d0f0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp2400_support_h -#define _isp2400_support_h - -#ifndef ISP2400_VECTOR_TYPES -/* This typedef is to be able to include hive header files - in the host code which is useful in crun */ -typedef char *tmemvectors, *tmemvectoru, *tvector; -#endif - -#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val) -#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val) - -#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem) -#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem) - -#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell)) -#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell)) - -#if ISP_HAS_HIST - #define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram) - #define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell)) -#endif - -#endif /* _isp2400_support_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h deleted file mode 100644 index 033e23bcf672..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* Version */ -#define RTL_VERSION - -/* Cell name */ -#define ISP_CELL_TYPE isp2401_mamoiada -#define ISP_VMEM simd_vmem -#define _HRT_ISP_VMEM isp2401_mamoiada_simd_vmem - -/* instruction pipeline depth */ -#define ISP_BRANCHDELAY 5 - -/* bus */ -#define ISP_BUS_WIDTH 32 -#define ISP_BUS_ADDR_WIDTH 32 -#define ISP_BUS_BURST_SIZE 1 - -/* data-path */ -#define ISP_SCALAR_WIDTH 32 -#define ISP_SLICE_NELEMS 4 -#define ISP_VEC_NELEMS 64 -#define ISP_VEC_ELEMBITS 14 -#define ISP_VEC_ELEM8BITS 16 -#define ISP_CLONE_DATAPATH_IS_16 1 - -/* memories */ -#define ISP_DMEM_DEPTH 4096 -#define ISP_DMEM_BSEL_DOWNSAMPLE 8 -#define ISP_VMEM_DEPTH 3072 -#define ISP_VMEM_BSEL_DOWNSAMPLE 8 -#define ISP_VMEM_ELEMBITS 14 -#define ISP_VMEM_ELEM_PRECISION 14 -#define ISP_VMEM_IS_BAMEM 1 -#if ISP_VMEM_IS_BAMEM - #define ISP_VMEM_BAMEM_MAX_BOI_HEIGHT 8 - #define ISP_VMEM_BAMEM_LATENCY 5 - #define ISP_VMEM_BAMEM_BANK_NARROWING_FACTOR 2 - #define ISP_VMEM_BAMEM_NR_DATA_PLANES 8 - #define ISP_VMEM_BAMEM_NR_CFG_REGISTERS 16 - #define ISP_VMEM_BAMEM_LININT 0 - #define ISP_VMEM_BAMEM_DAP_BITS 3 - #define ISP_VMEM_BAMEM_LININT_FRAC_BITS 0 - #define ISP_VMEM_BAMEM_PID_BITS 3 - #define ISP_VMEM_BAMEM_OFFSET_BITS 19 - #define ISP_VMEM_BAMEM_ADDRESS_BITS 25 - #define ISP_VMEM_BAMEM_RID_BITS 4 - #define ISP_VMEM_BAMEM_TRANSPOSITION 1 - #define ISP_VMEM_BAMEM_VEC_PLUS_SLICE 1 - #define ISP_VMEM_BAMEM_ARB_SERVICE_CYCLE_BITS 1 - #define ISP_VMEM_BAMEM_LUT_ELEMS 16 - #define ISP_VMEM_BAMEM_LUT_ADDR_WIDTH 14 - #define ISP_VMEM_BAMEM_HALF_BLOCK_WRITE 1 - #define ISP_VMEM_BAMEM_SMART_FETCH 1 - #define ISP_VMEM_BAMEM_BIG_ENDIANNESS 0 -#endif /* ISP_VMEM_IS_BAMEM */ -#define ISP_PMEM_DEPTH 2048 -#define ISP_PMEM_WIDTH 640 -#define ISP_VAMEM_ADDRESS_BITS 12 -#define ISP_VAMEM_ELEMBITS 12 -#define ISP_VAMEM_DEPTH 2048 -#define ISP_VAMEM_ALIGNMENT 2 -#define ISP_VA_ADDRESS_WIDTH 896 -#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS -#define ISP_HIST_ADDRESS_BITS 12 -#define ISP_HIST_ALIGNMENT 4 -#define ISP_HIST_COMP_IN_PREC 12 -#define ISP_HIST_DEPTH 1024 -#define ISP_HIST_WIDTH 24 -#define ISP_HIST_COMPONENTS 4 - -/* program counter */ -#define ISP_PC_WIDTH 13 - -/* Template switches */ -#define ISP_SHIELD_INPUT_DMEM 0 -#define ISP_SHIELD_OUTPUT_DMEM 1 -#define ISP_SHIELD_INPUT_VMEM 0 -#define ISP_SHIELD_OUTPUT_VMEM 0 -#define ISP_SHIELD_INPUT_PMEM 1 -#define ISP_SHIELD_OUTPUT_PMEM 1 -#define ISP_SHIELD_INPUT_HIST 1 -#define ISP_SHIELD_OUTPUT_HIST 1 -/* When LUT is select the shielding is always on */ -#define ISP_SHIELD_INPUT_VAMEM 1 -#define ISP_SHIELD_OUTPUT_VAMEM 1 - -#define ISP_HAS_IRQ 1 -#define ISP_HAS_SOFT_RESET 1 -#define ISP_HAS_VEC_DIV 0 -#define ISP_HAS_VFU_W_2O 1 -#define ISP_HAS_DEINT3 1 -#define ISP_HAS_LUT 1 -#define ISP_HAS_HIST 1 -#define ISP_HAS_VALSU 1 -#define ISP_HAS_3rdVALSU 1 -#define ISP_VRF1_HAS_2P 1 - -#define ISP_SRU_GUARDING 1 -#define ISP_VLSU_GUARDING 1 - -#define ISP_VRF_RAM 1 -#define ISP_SRF_RAM 1 - -#define ISP_SPLIT_VMUL_VADD_IS 0 -#define ISP_RFSPLIT_FPGA 0 - -/* RSN or Bus pipelining */ -#define ISP_RSN_PIPE 1 -#define ISP_VSF_BUS_PIPE 0 - -/* extra slave port to vmem */ -#define ISP_IF_VMEM 0 -#define ISP_GDC_VMEM 0 - -/* Streaming ports */ -#define ISP_IF 1 -#define ISP_IF_B 1 -#define ISP_GDC 1 -#define ISP_SCL 1 -#define ISP_GPFIFO 1 -#define ISP_SP 1 - -/* Removing Issue Slot(s) */ -#define ISP_HAS_NOT_SIMD_IS2 0 -#define ISP_HAS_NOT_SIMD_IS3 0 -#define ISP_HAS_NOT_SIMD_IS4 0 -#define ISP_HAS_NOT_SIMD_IS4_VADD 0 -#define ISP_HAS_NOT_SIMD_IS5 0 -#define ISP_HAS_NOT_SIMD_IS6 0 -#define ISP_HAS_NOT_SIMD_IS7 0 -#define ISP_HAS_NOT_SIMD_IS8 0 - -/* ICache */ -#define ISP_ICACHE 1 -#define ISP_ICACHE_ONLY 0 -#define ISP_ICACHE_PREFETCH 1 -#define ISP_ICACHE_INDEX_BITS 8 -#define ISP_ICACHE_SET_BITS 5 -#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1 - -/* Experimental Flags */ -#define ISP_EXP_1 0 -#define ISP_EXP_2 0 -#define ISP_EXP_3 0 -#define ISP_EXP_4 0 -#define ISP_EXP_5 0 -#define ISP_EXP_6 0 - -/* Derived values */ -#define ISP_LOG2_PMEM_WIDTH 10 -#define ISP_VEC_WIDTH 896 -#define ISP_SLICE_WIDTH 56 -#define ISP_VMEM_WIDTH 896 -#define ISP_VMEM_ALIGN 128 -#if ISP_VMEM_IS_BAMEM - #define ISP_VMEM_ALIGN_ELEM 2 -#endif /* ISP_VMEM_IS_BAMEM */ -#define ISP_SIMDLSU 1 -#define ISP_LSU_IMM_BITS 12 - -/* convenient shortcuts for software*/ -#define ISP_NWAY ISP_VEC_NELEMS -#define NBITS ISP_VEC_ELEMBITS - -#define _isp_ceil_div(a,b) (((a)+(b)-1)/(b)) - -#ifdef C_RUN -#define ISP_VEC_ALIGN (_isp_ceil_div(ISP_VEC_WIDTH, 64)*8) -#else -#define ISP_VEC_ALIGN ISP_VMEM_ALIGN -#endif - -/* HRT specific vector support */ -#define isp2401_mamoiada_vector_alignment ISP_VEC_ALIGN -#define isp2401_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS -#define isp2401_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION -#define isp2401_mamoiada_vector_num_elems ISP_VEC_NELEMS - -/* register file sizes */ -#define ISP_RF0_SIZE 64 -#define ISP_RF1_SIZE 16 -#define ISP_RF2_SIZE 64 -#define ISP_RF3_SIZE 4 -#define ISP_RF4_SIZE 64 -#define ISP_RF5_SIZE 16 -#define ISP_RF6_SIZE 16 -#define ISP_RF7_SIZE 16 -#define ISP_RF8_SIZE 16 -#define ISP_RF9_SIZE 16 -#define ISP_RF10_SIZE 16 -#define ISP_RF11_SIZE 16 -#define ISP_VRF1_SIZE 32 -#define ISP_VRF2_SIZE 32 -#define ISP_VRF3_SIZE 32 -#define ISP_VRF4_SIZE 32 -#define ISP_VRF5_SIZE 32 -#define ISP_VRF6_SIZE 32 -#define ISP_VRF7_SIZE 32 -#define ISP_VRF8_SIZE 32 -#define ISP_SRF1_SIZE 4 -#define ISP_SRF2_SIZE 64 -#define ISP_SRF3_SIZE 64 -#define ISP_SRF4_SIZE 32 -#define ISP_SRF5_SIZE 64 -#define ISP_FRF0_SIZE 16 -#define ISP_FRF1_SIZE 4 -#define ISP_FRF2_SIZE 16 -#define ISP_FRF3_SIZE 4 -#define ISP_FRF4_SIZE 4 -#define ISP_FRF5_SIZE 8 -#define ISP_FRF6_SIZE 4 -/* register file read latency */ -#define ISP_VRF1_READ_LAT 1 -#define ISP_VRF2_READ_LAT 1 -#define ISP_VRF3_READ_LAT 1 -#define ISP_VRF4_READ_LAT 1 -#define ISP_VRF5_READ_LAT 1 -#define ISP_VRF6_READ_LAT 1 -#define ISP_VRF7_READ_LAT 1 -#define ISP_VRF8_READ_LAT 1 -#define ISP_SRF1_READ_LAT 1 -#define ISP_SRF2_READ_LAT 1 -#define ISP_SRF3_READ_LAT 1 -#define ISP_SRF4_READ_LAT 1 -#define ISP_SRF5_READ_LAT 1 -#define ISP_SRF5_READ_LAT 1 -/* immediate sizes */ -#define ISP_IS1_IMM_BITS 14 -#define ISP_IS2_IMM_BITS 13 -#define ISP_IS3_IMM_BITS 14 -#define ISP_IS4_IMM_BITS 14 -#define ISP_IS5_IMM_BITS 9 -#define ISP_IS6_IMM_BITS 16 -#define ISP_IS7_IMM_BITS 9 -#define ISP_IS8_IMM_BITS 16 -#define ISP_IS9_IMM_BITS 11 -/* fifo depths */ -#define ISP_IF_FIFO_DEPTH 0 -#define ISP_IF_B_FIFO_DEPTH 0 -#define ISP_DMA_FIFO_DEPTH 0 -#define ISP_OF_FIFO_DEPTH 0 -#define ISP_GDC_FIFO_DEPTH 0 -#define ISP_SCL_FIFO_DEPTH 0 -#define ISP_GPFIFO_FIFO_DEPTH 0 -#define ISP_SP_FIFO_DEPTH 0 diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h deleted file mode 100644 index 593620721627..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp_acquisition_defs_h -#define _isp_acquisition_defs_h - -#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */ -#define _ISP_ACQUISITION_BYTES_PER_ELEM 4 - -/* --------------------------------------------------*/ - -#define NOF_ACQ_IRQS 1 - -/* --------------------------------------------------*/ -/* FSM */ -/* --------------------------------------------------*/ -#define MEM2STREAM_FSM_STATE_BITS 2 -#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2 - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -#define NOF_ACQ_REGS 12 - -// Register id's of MMIO slave accesible registers -#define ACQ_START_ADDR_REG_ID 0 -#define ACQ_MEM_REGION_SIZE_REG_ID 1 -#define ACQ_NUM_MEM_REGIONS_REG_ID 2 -#define ACQ_INIT_REG_ID 3 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4 -#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5 -#define ACQ_LAST_COMMAND_REG_ID 6 -#define ACQ_NEXT_COMMAND_REG_ID 7 -#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8 -#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9 -#define ACQ_FSM_STATE_INFO_REG_ID 10 -#define ACQ_INT_CNTR_INFO_REG_ID 11 - -// Register width -#define ACQ_START_ADDR_REG_WIDTH 9 -#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9 -#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9 -#define ACQ_INIT_REG_WIDTH 3 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32 -#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32 -#define ACQ_LAST_COMMAND_REG_WIDTH 32 -#define ACQ_NEXT_COMMAND_REG_WIDTH 32 -#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS *3)) -#define ACQ_INT_CNTR_INFO_REG_WIDTH 32 - -/* register reset value */ -#define ACQ_START_ADDR_REG_RSTVAL 0 -#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128 -#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define ACQ_INIT_REG_RSTVAL 0 -#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0 -#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0 -#define ACQ_LAST_COMMAND_REG_RSTVAL 0 -#define ACQ_NEXT_COMMAND_REG_RSTVAL 0 -#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0 -#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0 -#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0 -#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0 - -/* bit definitions */ -#define ACQ_INIT_RST_REG_BIT 0 -#define ACQ_INIT_RESYNC_BIT 2 -#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT -#define ACQ_INIT_RST_BITS 1 -#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT -#define ACQ_INIT_RESYNC_BITS 1 - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ -#define ACQ_TOKEN_ID_LSB 0 -#define ACQ_TOKEN_ID_MSB 3 -#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4 -#define ACQ_TOKEN_ID_IDX 0 -#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH -#define ACQ_INIT_CMD_INIT_IDX 4 -#define ACQ_INIT_CMD_INIT_BITS 3 -#define ACQ_CMD_START_ADDR_IDX 4 -#define ACQ_CMD_START_ADDR_BITS 9 -#define ACQ_CMD_NOFWORDS_IDX 13 -#define ACQ_CMD_NOFWORDS_BITS 9 -#define ACQ_MEM_REGION_ID_IDX 22 -#define ACQ_MEM_REGION_ID_BITS 9 -#define ACQ_PACKET_LENGTH_TOKEN_MSB 21 -#define ACQ_PACKET_LENGTH_TOKEN_LSB 13 -#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9 -#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4 -#define ACQ_PACKET_CH_ID_TOKEN_MSB 11 -#define ACQ_PACKET_CH_ID_TOKEN_LSB 10 -#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */ -#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */ - - -/* Command tokens IDs */ -#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b -#define ACQ_READ_REGION_TOKEN_ID 1 //0001b -#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b -#define ACQ_INIT_TOKEN_ID 8 //1000b - -/* Acknowledge token IDs */ -#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b -#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b -#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b -#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b -#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b - -#define ACQ_TOKEN_MEMREGIONID_MSB 30 -#define ACQ_TOKEN_MEMREGIONID_LSB 22 -#define ACQ_TOKEN_NOFWORDS_MSB 21 -#define ACQ_TOKEN_NOFWORDS_LSB 13 -#define ACQ_TOKEN_STARTADDR_MSB 12 -#define ACQ_TOKEN_STARTADDR_LSB 4 - - -/* --------------------------------------------------*/ -/* MIPI */ -/* --------------------------------------------------*/ - -#define WORD_COUNT_WIDTH 16 -#define PKT_CODE_WIDTH 6 -#define CHN_NO_WIDTH 2 -#define ERROR_INFO_WIDTH 8 - -#define LONG_PKTCODE_MAX 63 -#define LONG_PKTCODE_MIN 16 -#define SHORT_PKTCODE_MAX 15 - -#define EOF_CODE 1 - -/* --------------------------------------------------*/ -/* Packet Info */ -/* --------------------------------------------------*/ -#define ACQ_START_OF_FRAME 0 -#define ACQ_END_OF_FRAME 1 -#define ACQ_START_OF_LINE 2 -#define ACQ_END_OF_LINE 3 -#define ACQ_LINE_PAYLOAD 4 -#define ACQ_GEN_SH_PKT 5 - - -/* bit definition */ -#define ACQ_PKT_TYPE_IDX 16 -#define ACQ_PKT_TYPE_BITS 6 -#define ACQ_PKT_SOP_IDX 32 -#define ACQ_WORD_CNT_IDX 0 -#define ACQ_WORD_CNT_BITS 16 -#define ACQ_PKT_INFO_IDX 16 -#define ACQ_PKT_INFO_BITS 8 -#define ACQ_HEADER_DATA_IDX 0 -#define ACQ_HEADER_DATA_BITS 16 -#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX -#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS -#define ACQ_ACK_NOFWORDS_IDX 13 -#define ACQ_ACK_NOFWORDS_BITS 9 -#define ACQ_ACK_PKT_LEN_IDX 4 -#define ACQ_ACK_PKT_LEN_BITS 16 - - -/* --------------------------------------------------*/ -/* Packet Data Type */ -/* --------------------------------------------------*/ - - -#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */ -#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */ -#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */ -#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */ -#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */ -#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */ -#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */ -#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */ -#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */ -#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */ -#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */ -#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */ -#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */ -#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */ -#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */ -#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */ -#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */ -#define ACQ_SOF_DATA 0 /* 00 0000 frame start */ -#define ACQ_EOF_DATA 1 /* 00 0001 frame end */ -#define ACQ_SOL_DATA 2 /* 00 0010 line start */ -#define ACQ_EOL_DATA 3 /* 00 0011 line end */ -#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */ -#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */ -#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */ -#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */ -#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */ -#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */ -#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */ -#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */ -#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -#define ACQ_RESERVED_DATA_TYPE_MIN 56 -#define ACQ_RESERVED_DATA_TYPE_MAX 63 -#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19 -#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23 -#define ACQ_YUV_RESERVED_DATA_TYPE 27 -#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37 -#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39 -#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46 -#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47 - -/* --------------------------------------------------*/ - -#endif /* _isp_acquisition_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h deleted file mode 100644 index aa413df022f2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _isp_capture_defs_h -#define _isp_capture_defs_h - -#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */ -#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */ -#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM/8 ) -#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */ -#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM - -//#define CAPT_RCV_ACK 1 -//#define CAPT_WRT_ACK 2 -//#define CAPT_IRQ_ACK 3 - -/* --------------------------------------------------*/ - -#define NOF_IRQS 2 - -/* --------------------------------------------------*/ -/* REGISTER INFO */ -/* --------------------------------------------------*/ - -// Number of registers -#define CAPT_NOF_REGS 16 - -// Register id's of MMIO slave accesible registers -#define CAPT_START_MODE_REG_ID 0 -#define CAPT_START_ADDR_REG_ID 1 -#define CAPT_MEM_REGION_SIZE_REG_ID 2 -#define CAPT_NUM_MEM_REGIONS_REG_ID 3 -#define CAPT_INIT_REG_ID 4 -#define CAPT_START_REG_ID 5 -#define CAPT_STOP_REG_ID 6 - -#define CAPT_PACKET_LENGTH_REG_ID 7 -#define CAPT_RECEIVED_LENGTH_REG_ID 8 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9 -#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10 -#define CAPT_LAST_COMMAND_REG_ID 11 -#define CAPT_NEXT_COMMAND_REG_ID 12 -#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13 -#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14 -#define CAPT_FSM_STATE_INFO_REG_ID 15 - -// Register width -#define CAPT_START_MODE_REG_WIDTH 1 -//#define CAPT_START_ADDR_REG_WIDTH 9 -//#define CAPT_MEM_REGION_SIZE_REG_WIDTH 9 -//#define CAPT_NUM_MEM_REGIONS_REG_WIDTH 9 -#define CAPT_INIT_REG_WIDTH (22 + 4) - -#define CAPT_START_REG_WIDTH 1 -#define CAPT_STOP_REG_WIDTH 1 - -/* --------------------------------------------------*/ -/* FSM */ -/* --------------------------------------------------*/ -#define CAPT_WRITE2MEM_FSM_STATE_BITS 2 -#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3 - - -#define CAPT_PACKET_LENGTH_REG_WIDTH 17 -#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32 -#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32 -#define CAPT_LAST_COMMAND_REG_WIDTH 32 -/* #define CAPT_NEXT_COMMAND_REG_WIDTH 32 */ -#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32 -#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32 -#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3)) - -//#define CAPT_INIT_RESTART_MEM_ADDR_WIDTH 9 -//#define CAPT_INIT_RESTART_MEM_REGION_WIDTH 9 - -/* register reset value */ -#define CAPT_START_MODE_REG_RSTVAL 0 -#define CAPT_START_ADDR_REG_RSTVAL 0 -#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128 -#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3 -#define CAPT_INIT_REG_RSTVAL 0 - -#define CAPT_START_REG_RSTVAL 0 -#define CAPT_STOP_REG_RSTVAL 0 - -#define CAPT_PACKET_LENGTH_REG_RSTVAL 0 -#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0 -#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0 -#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0 -#define CAPT_LAST_COMMAND_REG_RSTVAL 0 -#define CAPT_NEXT_COMMAND_REG_RSTVAL 0 -#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0 -#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0 -#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0 - -/* bit definitions */ -#define CAPT_INIT_RST_REG_BIT 0 -#define CAPT_INIT_FLUSH_BIT 1 -#define CAPT_INIT_RESYNC_BIT 2 -#define CAPT_INIT_RESTART_BIT 3 -#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4 -#define CAPT_INIT_RESTART_MEM_ADDR_MSB 14 -#define CAPT_INIT_RESTART_MEM_REGION_LSB 15 -#define CAPT_INIT_RESTART_MEM_REGION_MSB 25 - - -#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT -#define CAPT_INIT_RST_REG_BITS 1 -#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT -#define CAPT_INIT_FLUSH_BITS 1 -#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT -#define CAPT_INIT_RESYNC_BITS 1 -#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT -#define CAPT_INIT_RESTART_BITS 1 -#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB -#define CAPT_INIT_RESTART_MEM_ADDR_BITS (CAPT_INIT_RESTART_MEM_ADDR_MSB - CAPT_INIT_RESTART_MEM_ADDR_LSB + 1) -#define CAPT_INIT_RESTART_MEM_REGION_IDX CAPT_INIT_RESTART_MEM_REGION_LSB -#define CAPT_INIT_RESTART_MEM_REGION_BITS (CAPT_INIT_RESTART_MEM_REGION_MSB - CAPT_INIT_RESTART_MEM_REGION_LSB + 1) - - - -/* --------------------------------------------------*/ -/* TOKEN INFO */ -/* --------------------------------------------------*/ -#define CAPT_TOKEN_ID_LSB 0 -#define CAPT_TOKEN_ID_MSB 3 -#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */ - -/* Command tokens IDs */ -#define CAPT_START_TOKEN_ID 0 /* 0000b */ -#define CAPT_STOP_TOKEN_ID 1 /* 0001b */ -#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */ -#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */ -#define CAPT_INIT_TOKEN_ID 8 /* 1000b */ - -#define CAPT_START_TOKEN_BIT 0 -#define CAPT_STOP_TOKEN_BIT 0 -#define CAPT_FREEZE_TOKEN_BIT 0 -#define CAPT_RESUME_TOKEN_BIT 0 -#define CAPT_INIT_TOKEN_BIT 0 - -/* Acknowledge token IDs */ -#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */ -#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */ -#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */ -#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */ -#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */ -#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */ -#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */ -#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */ - -#define CAPT_PACKET_LENGTH_TOKEN_MSB 19 -#define CAPT_PACKET_LENGTH_TOKEN_LSB 4 -#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20 -#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4 -#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25 -#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20 -#define CAPT_PACKET_CH_ID_TOKEN_MSB 27 -#define CAPT_PACKET_CH_ID_TOKEN_LSB 26 -#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29 -#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21 - -/* bit definition */ -#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB -#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) -#define CAPT_SOP_IDX 32 -#define CAPT_SOP_BITS 1 -#define CAPT_PKT_INFO_IDX 16 -#define CAPT_PKT_INFO_BITS 8 -#define CAPT_PKT_TYPE_IDX 0 -#define CAPT_PKT_TYPE_BITS 6 -#define CAPT_HEADER_DATA_IDX 0 -#define CAPT_HEADER_DATA_BITS 16 -#define CAPT_PKT_DATA_IDX 0 -#define CAPT_PKT_DATA_BITS 32 -#define CAPT_WORD_CNT_IDX 0 -#define CAPT_WORD_CNT_BITS 16 -#define CAPT_ACK_TOKEN_ID_IDX 0 -#define CAPT_ACK_TOKEN_ID_BITS 4 -//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB -//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1) -//#define CAPT_ACK_PKT_INFO_IDX 20 -//#define CAPT_ACK_PKT_INFO_BITS 8 -//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */ -//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */ -#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB -#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1) -#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB -#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1) -#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB -#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1) -#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB -#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1) -#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB -#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1) -#define CAPT_INIT_TOKEN_INIT_IDX 4 -#define CAPT_INIT_TOKEN_INIT_BITS 22 - - -/* --------------------------------------------------*/ -/* MIPI */ -/* --------------------------------------------------*/ - -#define CAPT_WORD_COUNT_WIDTH 16 -#define CAPT_PKT_CODE_WIDTH 6 -#define CAPT_CHN_NO_WIDTH 2 -#define CAPT_ERROR_INFO_WIDTH 8 - -#define LONG_PKTCODE_MAX 63 -#define LONG_PKTCODE_MIN 16 -#define SHORT_PKTCODE_MAX 15 - - -/* --------------------------------------------------*/ -/* Packet Info */ -/* --------------------------------------------------*/ -#define CAPT_START_OF_FRAME 0 -#define CAPT_END_OF_FRAME 1 -#define CAPT_START_OF_LINE 2 -#define CAPT_END_OF_LINE 3 -#define CAPT_LINE_PAYLOAD 4 -#define CAPT_GEN_SH_PKT 5 - - -/* --------------------------------------------------*/ -/* Packet Data Type */ -/* --------------------------------------------------*/ - -#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */ -#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */ -#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */ -#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */ -#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */ -#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */ -#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */ -#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */ -#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */ -#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */ -#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */ -#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */ -#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */ -#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */ -#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */ -#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */ -#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */ -#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */ -#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */ -#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */ -#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */ -#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */ -#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */ -#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */ -#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */ -#define CAPT_SOF_DATA 0 /* 00 0000 frame start */ -#define CAPT_EOF_DATA 1 /* 00 0001 frame end */ -#define CAPT_SOL_DATA 2 /* 00 0010 line start */ -#define CAPT_EOL_DATA 3 /* 00 0011 line end */ -#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */ -#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */ -#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */ -#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */ -#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */ -#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */ -#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */ -#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */ -#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */ -#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */ -#define CAPT_RESERVED_DATA_TYPE_MIN 56 -#define CAPT_RESERVED_DATA_TYPE_MAX 63 -#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19 -#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23 -#define CAPT_YUV_RESERVED_DATA_TYPE 27 -#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37 -#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39 -#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46 -#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47 - - -/* --------------------------------------------------*/ -/* Capture Unit State */ -/* --------------------------------------------------*/ -#define CAPT_FREE_RUN 0 -#define CAPT_NO_SYNC 1 -#define CAPT_SYNC_SWP 2 -#define CAPT_SYNC_MWP 3 -#define CAPT_SYNC_WAIT 4 -#define CAPT_FREEZE 5 -#define CAPT_RUN 6 - - -/* --------------------------------------------------*/ - -#endif /* _isp_capture_defs_h */ - - - - - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h deleted file mode 100644 index c038f39ffd25..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _mmu_defs_h -#define _mmu_defs_h - -#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0 -#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1 - -#define _HRT_MMU_REG_ALIGN 4 - -#endif /* _mmu_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h deleted file mode 100644 index 9b6c2893d950..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _scalar_processor_2400_params_h -#define _scalar_processor_2400_params_h - -#include "cell_params.h" - -#endif /* _scalar_processor_2400_params_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h deleted file mode 100644 index 1cb62444cf68..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _ST2MEM_DEFS_H -#define _ST2MEM_DEFS_H - -#define _STR2MEM_CRUN_BIT 0x100000 -#define _STR2MEM_CMD_BITS 0x0F0000 -#define _STR2MEM_COUNT_BITS 0x00FFFF - -#define _STR2MEM_BLOCKS_CMD 0xA0000 -#define _STR2MEM_PACKETS_CMD 0xB0000 -#define _STR2MEM_BYTES_CMD 0xC0000 -#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000 - -#define _STR2MEM_SOFT_RESET_REG_ID 0 -#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1 -#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2 -#define _STR2MEM_BIT_SWAPPING_REG_ID 3 -#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4 -#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5 -#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6 -#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7 -#define _STR2MEM_EN_STAT_UPDATE_ID 8 - -#define _STR2MEM_REG_ALIGN 4 - -#endif /* _ST2MEM_DEFS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h deleted file mode 100644 index 60143b8743a2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _streaming_to_mipi_defs_h -#define _streaming_to_mipi_defs_h - -#define HIVE_STR_TO_MIPI_VALID_A_BIT 0 -#define HIVE_STR_TO_MIPI_VALID_B_BIT 1 -#define HIVE_STR_TO_MIPI_SOL_BIT 2 -#define HIVE_STR_TO_MIPI_EOL_BIT 3 -#define HIVE_STR_TO_MIPI_SOF_BIT 4 -#define HIVE_STR_TO_MIPI_EOF_BIT 5 -#define HIVE_STR_TO_MIPI_CH_ID_LSB 6 - -#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1) - -#endif /* _streaming_to_mipi_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h deleted file mode 100644 index d2b8972b0d9e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _timed_controller_defs_h -#define _timed_controller_defs_h - -#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0 - -#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4 - -#endif /* _timed_controller_defs_h */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h deleted file mode 100644 index 19b19ef484f9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _HRT_VAR_H -#define _HRT_VAR_H - -#include "version.h" -#include "system_api.h" -#include "hive_types.h" - -#define hrt_int_type_of_char char -#define hrt_int_type_of_uchar unsigned char -#define hrt_int_type_of_short short -#define hrt_int_type_of_ushort unsigned short -#define hrt_int_type_of_int int -#define hrt_int_type_of_uint unsigned int -#define hrt_int_type_of_long long -#define hrt_int_type_of_ulong unsigned long -#define hrt_int_type_of_ptr unsigned int - -#define hrt_host_type_of_char char -#define hrt_host_type_of_uchar unsigned char -#define hrt_host_type_of_short short -#define hrt_host_type_of_ushort unsigned short -#define hrt_host_type_of_int int -#define hrt_host_type_of_uint unsigned int -#define hrt_host_type_of_long long -#define hrt_host_type_of_ulong unsigned long -#define hrt_host_type_of_ptr void* - -#define HRT_TYPE_BYTES(cell, type) (HRT_TYPE_BITS(cell, type)/8) -#define HRT_HOST_TYPE(cell_type) HRTCAT(hrt_host_type_of_, cell_type) -#define HRT_INT_TYPE(type) HRTCAT(hrt_int_type_of_, type) - -#ifdef C_RUN - -#ifdef C_RUN_DYNAMIC_LINK_PROGRAMS -extern void *csim_processor_get_crun_symbol(hive_proc_id p, const char *sym); -#define _hrt_cell_get_crun_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym)) -#define _hrt_cell_get_crun_indexed_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym)) -#else -#define _hrt_cell_get_crun_symbol(cell,sym) (&sym) -#define _hrt_cell_get_crun_indexed_symbol(cell,sym) (sym) -#endif // C_RUN_DYNAMIC_LINK_PROGRAMS - -#define hrt_scalar_store(cell, type, var, data) \ - ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var)) = (data)) -#define hrt_scalar_load(cell, type, var) \ - ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var))) - -#define hrt_indexed_store(cell, type, array, index, data) \ - ((((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index]) = (data)) -#define hrt_indexed_load(cell, type, array, index) \ - (((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index]) - -#else /* C_RUN */ - -#define hrt_scalar_store(cell, type, var, data) \ - HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\ - cell, \ - HRTCAT(HIVE_MEM_,var), \ - HRTCAT(HIVE_ADDR_,var), \ - (HRT_INT_TYPE(type))(data)) - -#define hrt_scalar_load(cell, type, var) \ - (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \ - cell, \ - HRTCAT(HIVE_MEM_,var), \ - HRTCAT(HIVE_ADDR_,var))) - -#define hrt_indexed_store(cell, type, array, index, data) \ - HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\ - cell, \ - HRTCAT(HIVE_MEM_,array), \ - (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)), \ - (HRT_INT_TYPE(type))(data)) - -#define hrt_indexed_load(cell, type, array, index) \ - (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \ - cell, \ - HRTCAT(HIVE_MEM_,array), \ - (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)))) - -#endif /* C_RUN */ - -#endif /* _HRT_VAR_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h deleted file mode 100644 index bbc4948baea9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef HRT_VERSION_H -#define HRT_VERSION_H -#define HRT_VERSION_MAJOR 1 -#define HRT_VERSION_MINOR 4 -#define HRT_VERSION 1_4 -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c deleted file mode 100644 index 09f0780f0c80..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c +++ /dev/null @@ -1,3634 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _sp_map_h_ -#define _sp_map_h_ - - -#ifndef _hrt_dummy_use_blob_sp -#define _hrt_dummy_use_blob_sp() -#endif - -#define _hrt_cell_load_program_sp(proc) _hrt_cell_load_program_embedded(proc, sp) - -#ifndef ISP2401 -/* function input_system_acquisition_stop: ADE */ -#else -/* function input_system_acquisition_stop: AD8 */ -#endif - -#ifndef ISP2401 -/* function longjmp: 684E */ -#else -/* function longjmp: 69C1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_HIVE_IF_SRST_MASK -#define HIVE_MEM_HIVE_IF_SRST_MASK scalar_processor_2400_dmem -#define HIVE_ADDR_HIVE_IF_SRST_MASK 0x1C8 -#define HIVE_SIZE_HIVE_IF_SRST_MASK 16 -#else -#endif -#endif -#define HIVE_MEM_sp_HIVE_IF_SRST_MASK scalar_processor_2400_dmem -#define HIVE_ADDR_sp_HIVE_IF_SRST_MASK 0x1C8 -#define HIVE_SIZE_sp_HIVE_IF_SRST_MASK 16 - -#ifndef ISP2401 -/* function tmpmem_init_dmem: 6599 */ -#else -/* function tmpmem_init_dmem: 66D4 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_receive_ack: 5EDD */ -#else -/* function ia_css_isys_sp_token_map_receive_ack: 6018 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_addr_B: 3345 */ -#else -/* function ia_css_dmaproxy_sp_set_addr_B: 3539 */ - -/* function ia_css_pipe_data_init_tagger_resources: A4F */ -#endif - -/* function debug_buffer_set_ddr_addr: DD */ - -#ifndef ISP2401 -/* function receiver_port_reg_load: AC2 */ -#else -/* function receiver_port_reg_load: ABC */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_mipi -#define HIVE_MEM_vbuf_mipi scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_mipi 0x631C -#else -#define HIVE_ADDR_vbuf_mipi 0x6378 -#endif -#define HIVE_SIZE_vbuf_mipi 12 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_mipi scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_mipi 0x631C -#else -#define HIVE_ADDR_sp_vbuf_mipi 0x6378 -#endif -#define HIVE_SIZE_sp_vbuf_mipi 12 - -#ifndef ISP2401 -/* function ia_css_event_sp_decode: 3536 */ -#else -/* function ia_css_event_sp_decode: 372A */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_get_size: 48BE */ -#else -/* function ia_css_queue_get_size: 4B46 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_load: 4EFF */ -#else -/* function ia_css_queue_load: 515D */ -#endif - -#ifndef ISP2401 -/* function setjmp: 6857 */ -#else -/* function setjmp: 69CA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp2host_isys_event_queue -#define HIVE_MEM_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x4684 -#else -#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x46CC -#endif -#define HIVE_SIZE_sem_for_sp2host_isys_event_queue 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x4684 -#else -#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x46CC -#endif -#define HIVE_SIZE_sp_sem_for_sp2host_isys_event_queue 20 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_wait_for_ack: 6E07 */ -#else -/* function ia_css_dmaproxy_sp_wait_for_ack: 6F4B */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_func: 5124 */ -#else -/* function ia_css_sp_rawcopy_func: 5382 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_marked: 2A10 */ -#else -/* function ia_css_tagger_buf_sp_pop_marked: 2BB2 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_stage -#define HIVE_MEM_isp_stage scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_stage 0x5C00 -#else -#define HIVE_ADDR_isp_stage 0x5C60 -#endif -#define HIVE_SIZE_isp_stage 832 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_stage scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_stage 0x5C00 -#else -#define HIVE_ADDR_sp_isp_stage 0x5C60 -#endif -#define HIVE_SIZE_sp_isp_stage 832 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_raw -#define HIVE_MEM_vbuf_raw scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_raw 0x2F4 -#else -#define HIVE_ADDR_vbuf_raw 0x30C -#endif -#define HIVE_SIZE_vbuf_raw 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_raw scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_raw 0x2F4 -#else -#define HIVE_ADDR_sp_vbuf_raw 0x30C -#endif -#define HIVE_SIZE_sp_vbuf_raw 4 - -#ifndef ISP2401 -/* function ia_css_sp_bin_copy_func: 504B */ -#else -/* function ia_css_sp_bin_copy_func: 52A9 */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_item_store: 4C4D */ -#else -/* function ia_css_queue_item_store: 4EAB */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_metadata_bufs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_buffer_bufs 160 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 160 - -/* function sp_start_isp: 45D */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_binary_group -#define HIVE_MEM_sp_binary_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_binary_group 0x5FF0 -#else -#define HIVE_ADDR_sp_binary_group 0x6050 -#endif -#define HIVE_SIZE_sp_binary_group 32 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_binary_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_binary_group 0x5FF0 -#else -#define HIVE_ADDR_sp_sp_binary_group 0x6050 -#endif -#define HIVE_SIZE_sp_sp_binary_group 32 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_sw_state -#define HIVE_MEM_sp_sw_state scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sw_state 0x62AC -#else -#define HIVE_ADDR_sp_sw_state 0x6308 -#endif -#define HIVE_SIZE_sp_sw_state 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_sw_state scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_sw_state 0x62AC -#else -#define HIVE_ADDR_sp_sp_sw_state 0x6308 -#endif -#define HIVE_SIZE_sp_sp_sw_state 4 - -#ifndef ISP2401 -/* function ia_css_thread_sp_main: D5B */ -#else -/* function ia_css_thread_sp_main: D50 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_internal_buffers: 373C */ -#else -/* function ia_css_ispctrl_sp_init_internal_buffers: 396B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_psys_event_queue_handle -#define HIVE_MEM_sp2host_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4B54 -#else -#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4BB0 -#endif -#define HIVE_SIZE_sp2host_psys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4B54 -#else -#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4BB0 -#endif -#define HIVE_SIZE_sp_sp2host_psys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp2host_psys_event_queue -#define HIVE_MEM_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x4698 -#else -#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x46E0 -#endif -#define HIVE_SIZE_sem_for_sp2host_psys_event_queue 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x4698 -#else -#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x46E0 -#endif -#define HIVE_SIZE_sp_sem_for_sp2host_psys_event_queue 20 - -#ifndef ISP2401 -/* function ia_css_tagger_sp_propagate_frame: 2429 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_stop_copy_preview -#define HIVE_MEM_sp_stop_copy_preview scalar_processor_2400_dmem -#define HIVE_ADDR_sp_stop_copy_preview 0x6290 -#define HIVE_SIZE_sp_stop_copy_preview 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_stop_copy_preview scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_stop_copy_preview 0x6290 -#define HIVE_SIZE_sp_sp_stop_copy_preview 4 -#else -/* function ia_css_tagger_sp_propagate_frame: 2479 */ -#endif - -#ifndef ISP2401 -/* function input_system_reg_load: B17 */ -#else -/* function input_system_reg_load: B11 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_handles -#define HIVE_MEM_vbuf_handles scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_handles 0x6328 -#else -#define HIVE_ADDR_vbuf_handles 0x6384 -#endif -#define HIVE_SIZE_vbuf_handles 960 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_handles scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_handles 0x6328 -#else -#define HIVE_ADDR_sp_vbuf_handles 0x6384 -#endif -#define HIVE_SIZE_sp_vbuf_handles 960 - -#ifndef ISP2401 -/* function ia_css_queue_store: 4DB3 */ - -/* function ia_css_sp_flash_register: 2C45 */ -#else -/* function ia_css_queue_store: 5011 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_dummy_function: 566B */ -#else -/* function ia_css_sp_flash_register: 2DE7 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_create: 5B50 */ -#else -/* function ia_css_isys_sp_backend_create: 5C8B */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_init: 184C */ -#else -/* function ia_css_pipeline_sp_init: 1886 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_configure: 2319 */ -#else -/* function ia_css_tagger_sp_configure: 2369 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_end_binary: 357F */ -#else -/* function ia_css_ispctrl_sp_end_binary: 3773 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs -#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60 -#else -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC -#endif -#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20 - -#ifndef ISP2401 -/* function receiver_port_reg_store: AC9 */ -#else -/* function receiver_port_reg_store: AC3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_is_pending_mask -#define HIVE_MEM_event_is_pending_mask scalar_processor_2400_dmem -#define HIVE_ADDR_event_is_pending_mask 0x5C -#define HIVE_SIZE_event_is_pending_mask 44 -#else -#endif -#endif -#define HIVE_MEM_sp_event_is_pending_mask scalar_processor_2400_dmem -#define HIVE_ADDR_sp_event_is_pending_mask 0x5C -#define HIVE_SIZE_sp_event_is_pending_mask 44 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cb_elems_frame -#define HIVE_MEM_sp_all_cb_elems_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cb_elems_frame 0x46AC -#else -#define HIVE_ADDR_sp_all_cb_elems_frame 0x46F4 -#endif -#define HIVE_SIZE_sp_all_cb_elems_frame 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cb_elems_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46AC -#else -#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46F4 -#endif -#define HIVE_SIZE_sp_sp_all_cb_elems_frame 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_isys_event_queue_handle -#define HIVE_MEM_sp2host_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4B74 -#else -#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4BD0 -#endif -#define HIVE_SIZE_sp2host_isys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4B74 -#else -#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4BD0 -#endif -#define HIVE_SIZE_sp_sp2host_isys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host_sp_com -#define HIVE_MEM_host_sp_com scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host_sp_com 0x4114 -#else -#define HIVE_ADDR_host_sp_com 0x4134 -#endif -#define HIVE_SIZE_host_sp_com 220 -#else -#endif -#endif -#define HIVE_MEM_sp_host_sp_com scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host_sp_com 0x4114 -#else -#define HIVE_ADDR_sp_host_sp_com 0x4134 -#endif -#define HIVE_SIZE_sp_host_sp_com 220 - -#ifndef ISP2401 -/* function ia_css_queue_get_free_space: 4A12 */ -#else -/* function ia_css_queue_get_free_space: 4C70 */ -#endif - -#ifndef ISP2401 -/* function exec_image_pipe: 6C4 */ -#else -/* function exec_image_pipe: 658 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_init_dmem_data -#define HIVE_MEM_sp_init_dmem_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_init_dmem_data 0x62B0 -#else -#define HIVE_ADDR_sp_init_dmem_data 0x630C -#endif -#define HIVE_SIZE_sp_init_dmem_data 24 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_init_dmem_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_init_dmem_data 0x62B0 -#else -#define HIVE_ADDR_sp_sp_init_dmem_data 0x630C -#endif -#define HIVE_SIZE_sp_sp_init_dmem_data 24 - -#ifndef ISP2401 -/* function ia_css_sp_metadata_start: 592D */ -#else -/* function ia_css_sp_metadata_start: 5A68 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_init_buffer_queues: 2CB4 */ -#else -/* function ia_css_bufq_sp_init_buffer_queues: 2E56 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_stop: 182F */ -#else -/* function ia_css_pipeline_sp_stop: 1869 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_connect_pipes: 2803 */ -#else -/* function ia_css_tagger_sp_connect_pipes: 2853 */ -#endif - -#ifndef ISP2401 -/* function sp_isys_copy_wait: 70D */ -#else -/* function sp_isys_copy_wait: 6A1 */ -#endif - -/* function is_isp_debug_buffer_full: 337 */ - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_configure_channel_from_info: 32C8 */ -#else -/* function ia_css_dmaproxy_sp_configure_channel_from_info: 34A9 */ -#endif - -#ifndef ISP2401 -/* function encode_and_post_timer_event: A30 */ -#else -/* function encode_and_post_timer_event: 9C4 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_per_frame_data -#define HIVE_MEM_sp_per_frame_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_per_frame_data 0x41F0 -#else -#define HIVE_ADDR_sp_per_frame_data 0x4210 -#endif -#define HIVE_SIZE_sp_per_frame_data 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_per_frame_data scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_per_frame_data 0x41F0 -#else -#define HIVE_ADDR_sp_sp_per_frame_data 0x4210 -#endif -#define HIVE_SIZE_sp_sp_per_frame_data 4 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_vbuf_dequeue: 62ED */ -#else -/* function ia_css_rmgr_sp_vbuf_dequeue: 6428 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_psys_event_queue_handle -#define HIVE_MEM_host2sp_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4B80 -#else -#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4BDC -#endif -#define HIVE_SIZE_host2sp_psys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_psys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4B80 -#else -#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4BDC -#endif -#define HIVE_SIZE_sp_host2sp_psys_event_queue_handle 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_xmem_bin_addr -#define HIVE_MEM_xmem_bin_addr scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_xmem_bin_addr 0x41F4 -#else -#define HIVE_ADDR_xmem_bin_addr 0x4214 -#endif -#define HIVE_SIZE_xmem_bin_addr 4 -#else -#endif -#endif -#define HIVE_MEM_sp_xmem_bin_addr scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_xmem_bin_addr 0x41F4 -#else -#define HIVE_ADDR_sp_xmem_bin_addr 0x4214 -#endif -#define HIVE_SIZE_sp_xmem_bin_addr 4 - -#ifndef ISP2401 -/* function tmr_clock_init: 13FB */ -#else -/* function tmr_clock_init: 141C */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_run: 141C */ -#else -/* function ia_css_pipeline_sp_run: 143D */ -#endif - -#ifndef ISP2401 -/* function memcpy: 68F7 */ -#else -/* function memcpy: 6A6A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GP_DEVICE_BASE -#define HIVE_MEM_GP_DEVICE_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_GP_DEVICE_BASE 0x2FC -#else -#define HIVE_ADDR_GP_DEVICE_BASE 0x314 -#endif -#define HIVE_SIZE_GP_DEVICE_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_GP_DEVICE_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x2FC -#else -#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x314 -#endif -#define HIVE_SIZE_sp_GP_DEVICE_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_thread_sp_ready_queue -#define HIVE_MEM_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E0 -#else -#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E4 -#endif -#define HIVE_SIZE_ia_css_thread_sp_ready_queue 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E0 -#else -#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E4 -#endif -#define HIVE_SIZE_sp_ia_css_thread_sp_ready_queue 12 - -#ifndef ISP2401 -/* function input_system_reg_store: B1E */ -#else -/* function input_system_reg_store: B18 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_start: 5D66 */ -#else -/* function ia_css_isys_sp_frontend_start: 5EA1 */ -#endif - -#ifndef ISP2401 -/* function ia_css_uds_sp_scale_params: 6600 */ -#else -/* function ia_css_uds_sp_scale_params: 6773 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_increase_size: E40 */ -#else -/* function ia_css_circbuf_increase_size: E35 */ -#endif - -#ifndef ISP2401 -/* function __divu: 6875 */ -#else -/* function __divu: 69E8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_get_state: C83 */ -#else -/* function ia_css_thread_sp_get_state: C78 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_cont_capt_stop -#define HIVE_MEM_sem_for_cont_capt_stop scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_cont_capt_stop 0x46BC -#else -#define HIVE_ADDR_sem_for_cont_capt_stop 0x4704 -#endif -#define HIVE_SIZE_sem_for_cont_capt_stop 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_cont_capt_stop scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x46BC -#else -#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x4704 -#endif -#define HIVE_SIZE_sp_sem_for_cont_capt_stop 20 - -#ifndef ISP2401 -/* function thread_fiber_sp_main: E39 */ -#else -/* function thread_fiber_sp_main: E2E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_pipe_thread -#define HIVE_MEM_sp_isp_pipe_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_pipe_thread 0x4800 -#define HIVE_SIZE_sp_isp_pipe_thread 340 -#else -#define HIVE_ADDR_sp_isp_pipe_thread 0x4848 -#define HIVE_SIZE_sp_isp_pipe_thread 360 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_pipe_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4800 -#define HIVE_SIZE_sp_sp_isp_pipe_thread 340 -#else -#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4848 -#define HIVE_SIZE_sp_sp_isp_pipe_thread 360 -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_handle_parameter_sets: 128A */ -#else -/* function ia_css_parambuf_sp_handle_parameter_sets: 127F */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_set_state: 595C */ -#else -/* function ia_css_spctrl_sp_set_state: 5A97 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_signal: 6AF7 */ -#else -/* function ia_css_thread_sem_sp_signal: 6C6C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_IRQ_BASE -#define HIVE_MEM_IRQ_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_IRQ_BASE 0x2C -#define HIVE_SIZE_IRQ_BASE 16 -#else -#endif -#endif -#define HIVE_MEM_sp_IRQ_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_IRQ_BASE 0x2C -#define HIVE_SIZE_sp_IRQ_BASE 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_TIMED_CTRL_BASE -#define HIVE_MEM_TIMED_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_TIMED_CTRL_BASE 0x40 -#define HIVE_SIZE_TIMED_CTRL_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_TIMED_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_TIMED_CTRL_BASE 0x40 -#define HIVE_SIZE_sp_TIMED_CTRL_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_isr: 6FDC */ - -/* function ia_css_isys_sp_generate_exp_id: 60FE */ -#else -/* function ia_css_isys_sp_isr: 7139 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_init: 61E8 */ -#else -/* function ia_css_isys_sp_generate_exp_id: 6239 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_init: 6BC8 */ -#else -/* function ia_css_rmgr_sp_init: 6323 */ -#endif - -#ifndef ISP2401 -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_is_isp_requested -#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_is_isp_requested 0x308 -#define HIVE_SIZE_is_isp_requested 4 -#else -#endif -#endif -#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_sp_is_isp_requested 0x308 -#define HIVE_SIZE_sp_is_isp_requested 4 -#else -/* function ia_css_thread_sem_sp_init: 6D3B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_cb_frame -#define HIVE_MEM_sem_for_reading_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_cb_frame 0x46D0 -#else -#define HIVE_ADDR_sem_for_reading_cb_frame 0x4718 -#endif -#define HIVE_SIZE_sem_for_reading_cb_frame 40 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x46D0 -#else -#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x4718 -#endif -#define HIVE_SIZE_sp_sem_for_reading_cb_frame 40 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_execute: 3230 */ -#else -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_is_isp_requested -#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_is_isp_requested 0x320 -#define HIVE_SIZE_is_isp_requested 4 -#else -#endif -#endif -#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem -#define HIVE_ADDR_sp_is_isp_requested 0x320 -#define HIVE_SIZE_sp_is_isp_requested 4 - -/* function ia_css_dmaproxy_sp_execute: 340F */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_is_empty: 48F9 */ -#else -/* function ia_css_queue_is_empty: 7098 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_has_stopped: 1825 */ -#else -/* function ia_css_pipeline_sp_has_stopped: 185F */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_extract: F44 */ -#else -/* function ia_css_circbuf_extract: F39 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_is_locked_from_start: 2B26 */ -#else -/* function ia_css_tagger_buf_sp_is_locked_from_start: 2CC8 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_current_sp_thread -#define HIVE_MEM_current_sp_thread scalar_processor_2400_dmem -#define HIVE_ADDR_current_sp_thread 0x1DC -#define HIVE_SIZE_current_sp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_current_sp_thread scalar_processor_2400_dmem -#define HIVE_ADDR_sp_current_sp_thread 0x1DC -#define HIVE_SIZE_sp_current_sp_thread 4 - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_get_spid: 5963 */ -#else -/* function ia_css_spctrl_sp_get_spid: 5A9E */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_reset_buffers: 2D3B */ -#else -/* function ia_css_bufq_sp_reset_buffers: 2EDD */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read_byte_addr: 6E35 */ -#else -/* function ia_css_dmaproxy_sp_read_byte_addr: 6F79 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_uninit: 61E1 */ -#else -/* function ia_css_rmgr_sp_uninit: 631C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_stack -#define HIVE_MEM_sp_threads_stack scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_stack 0x164 -#define HIVE_SIZE_sp_threads_stack 28 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_stack scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_stack 0x164 -#define HIVE_SIZE_sp_sp_threads_stack 28 - -#ifndef ISP2401 -/* function ia_css_circbuf_peek: F26 */ -#else -/* function ia_css_circbuf_peek: F1B */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_wait_for_in_param: 1053 */ -#else -/* function ia_css_parambuf_sp_wait_for_in_param: 1048 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_get_exp_id: 5FC6 */ -#else -/* function ia_css_isys_sp_token_map_get_exp_id: 6101 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cb_elems_param -#define HIVE_MEM_sp_all_cb_elems_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cb_elems_param 0x46F8 -#else -#define HIVE_ADDR_sp_all_cb_elems_param 0x4740 -#endif -#define HIVE_SIZE_sp_all_cb_elems_param 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cb_elems_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x46F8 -#else -#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x4740 -#endif -#define HIVE_SIZE_sp_sp_all_cb_elems_param 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_pipeline_sp_curr_binary_id -#define HIVE_MEM_pipeline_sp_curr_binary_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1EC -#else -#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1F0 -#endif -#define HIVE_SIZE_pipeline_sp_curr_binary_id 4 -#else -#endif -#endif -#define HIVE_MEM_sp_pipeline_sp_curr_binary_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1EC -#else -#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1F0 -#endif -#define HIVE_SIZE_sp_pipeline_sp_curr_binary_id 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_frame_desc -#define HIVE_MEM_sp_all_cbs_frame_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4708 -#else -#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4750 -#endif -#define HIVE_SIZE_sp_all_cbs_frame_desc 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_frame_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4708 -#else -#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4750 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_frame_desc 8 - -#ifndef ISP2401 -/* function sp_isys_copy_func_v2: 706 */ -#else -/* function sp_isys_copy_func_v2: 69A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_cb_param -#define HIVE_MEM_sem_for_reading_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_cb_param 0x4710 -#else -#define HIVE_ADDR_sem_for_reading_cb_param 0x4758 -#endif -#define HIVE_SIZE_sem_for_reading_cb_param 40 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4710 -#else -#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4758 -#endif -#define HIVE_SIZE_sp_sem_for_reading_cb_param 40 - -#ifndef ISP2401 -/* function ia_css_queue_get_used_space: 49C6 */ -#else -/* function ia_css_queue_get_used_space: 4C24 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_cont_capt_start -#define HIVE_MEM_sem_for_cont_capt_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_cont_capt_start 0x4738 -#else -#define HIVE_ADDR_sem_for_cont_capt_start 0x4780 -#endif -#define HIVE_SIZE_sem_for_cont_capt_start 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_cont_capt_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4738 -#else -#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4780 -#endif -#define HIVE_SIZE_sp_sem_for_cont_capt_start 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_tmp_heap -#define HIVE_MEM_tmp_heap scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_tmp_heap 0x6010 -#else -#define HIVE_ADDR_tmp_heap 0x6070 -#endif -#define HIVE_SIZE_tmp_heap 640 -#else -#endif -#endif -#define HIVE_MEM_sp_tmp_heap scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_tmp_heap 0x6010 -#else -#define HIVE_ADDR_sp_tmp_heap 0x6070 -#endif -#define HIVE_SIZE_sp_tmp_heap 640 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_get_num_vbuf: 64F1 */ -#else -/* function ia_css_rmgr_sp_get_num_vbuf: 662C */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_output_compute_dma_info: 3F62 */ -#else -/* function ia_css_ispctrl_sp_output_compute_dma_info: 41A5 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_lock_exp_id: 20E6 */ -#else -/* function ia_css_tagger_sp_lock_exp_id: 2136 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_s3a_bufs 60 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 60 - -#ifndef ISP2401 -/* function ia_css_queue_is_full: 4A5D */ -#else -/* function ia_css_queue_is_full: 4CBB */ -#endif - -/* function debug_buffer_init_isp: E4 */ - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_uninit: 5D20 */ -#else -/* function ia_css_isys_sp_frontend_uninit: 5E5B */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_exp_id_is_locked: 201C */ -#else -/* function ia_css_tagger_sp_exp_id_is_locked: 206C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem -#define HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x66E8 -#else -#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x6744 -#endif -#define HIVE_SIZE_ia_css_rmgr_sp_mipi_frame_sem 60 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x66E8 -#else -#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x6744 -#endif -#define HIVE_SIZE_sp_ia_css_rmgr_sp_mipi_frame_sem 60 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_dump: 62C8 */ -#else -/* function ia_css_rmgr_sp_refcount_dump: 6403 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_isp_parameters_id 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 20 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_pipe_threads -#define HIVE_MEM_sp_pipe_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_pipe_threads 0x150 -#define HIVE_SIZE_sp_pipe_threads 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_pipe_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_pipe_threads 0x150 -#define HIVE_SIZE_sp_sp_pipe_threads 20 - -#ifndef ISP2401 -/* function sp_event_proxy_func: 71B */ -#else -/* function sp_event_proxy_func: 6AF */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_isys_event_queue_handle -#define HIVE_MEM_host2sp_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4BDC -#else -#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4C38 -#endif -#define HIVE_SIZE_host2sp_isys_event_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_isys_event_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4BDC -#else -#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4C38 -#endif -#define HIVE_SIZE_sp_host2sp_isys_event_queue_handle 12 - -#ifndef ISP2401 -/* function ia_css_thread_sp_yield: 6A70 */ -#else -/* function ia_css_thread_sp_yield: 6BEA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_param_desc -#define HIVE_MEM_sp_all_cbs_param_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_param_desc 0x474C -#else -#define HIVE_ADDR_sp_all_cbs_param_desc 0x4794 -#endif -#define HIVE_SIZE_sp_all_cbs_param_desc 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_param_desc scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x474C -#else -#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x4794 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_param_desc 8 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb -#define HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4 -#else -#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50 -#endif -#define HIVE_SIZE_ia_css_dmaproxy_sp_invalidate_tlb 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4 -#else -#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50 -#endif -#define HIVE_SIZE_sp_ia_css_dmaproxy_sp_invalidate_tlb 4 - -#ifndef ISP2401 -/* function ia_css_thread_sp_fork: D10 */ -#else -/* function ia_css_thread_sp_fork: D05 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_destroy: 280D */ -#else -/* function ia_css_tagger_sp_destroy: 285D */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_vmem_read: 31D0 */ -#else -/* function ia_css_dmaproxy_sp_vmem_read: 33AF */ -#endif - -#ifndef ISP2401 -/* function ia_css_ifmtr_sp_init: 614F */ -#else -/* function ia_css_ifmtr_sp_init: 628A */ -#endif - -#ifndef ISP2401 -/* function initialize_sp_group: 6D4 */ -#else -/* function initialize_sp_group: 668 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_peek: 2932 */ -#else -/* function ia_css_tagger_buf_sp_peek: 2AD4 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_init: D3C */ -#else -/* function ia_css_thread_sp_init: D31 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_reset_exp_id: 60F6 */ -#else -/* function ia_css_isys_sp_reset_exp_id: 6231 */ -#endif - -#ifndef ISP2401 -/* function qos_scheduler_update_fps: 65F0 */ -#else -/* function qos_scheduler_update_fps: 6763 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_set_stream_base_addr: 4637 */ -#else -/* function ia_css_ispctrl_sp_set_stream_base_addr: 4892 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_DMEM_BASE -#define HIVE_MEM_ISP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_DMEM_BASE 0x10 -#define HIVE_SIZE_ISP_DMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_DMEM_BASE 0x10 -#define HIVE_SIZE_sp_ISP_DMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_SP_DMEM_BASE -#define HIVE_MEM_SP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_SP_DMEM_BASE 0x4 -#define HIVE_SIZE_SP_DMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_SP_DMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_SP_DMEM_BASE 0x4 -#define HIVE_SIZE_sp_SP_DMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read: 3246 */ -#else -/* function __ia_css_queue_is_empty_text: 4B81 */ - -/* function ia_css_dmaproxy_sp_read: 3425 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_raw_copy_line_count -#define HIVE_MEM_raw_copy_line_count scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_raw_copy_line_count 0x2C8 -#else -#define HIVE_ADDR_raw_copy_line_count 0x2E0 -#endif -#define HIVE_SIZE_raw_copy_line_count 4 -#else -#endif -#endif -#define HIVE_MEM_sp_raw_copy_line_count scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_raw_copy_line_count 0x2C8 -#else -#define HIVE_ADDR_sp_raw_copy_line_count 0x2E0 -#endif -#define HIVE_SIZE_sp_raw_copy_line_count 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_tag_cmd_queue_handle -#define HIVE_MEM_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4BE8 -#else -#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4C44 -#endif -#define HIVE_SIZE_host2sp_tag_cmd_queue_handle 12 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4BE8 -#else -#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4C44 -#endif -#define HIVE_SIZE_sp_host2sp_tag_cmd_queue_handle 12 - -#ifndef ISP2401 -/* function ia_css_queue_peek: 493C */ -#else -/* function ia_css_queue_peek: 4B9A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_frame_cnt -#define HIVE_MEM_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4A94 -#else -#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4AF0 -#endif -#define HIVE_SIZE_ia_css_flash_sp_frame_cnt 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4A94 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4AF0 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_frame_cnt 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_can_send_token_mask -#define HIVE_MEM_event_can_send_token_mask scalar_processor_2400_dmem -#define HIVE_ADDR_event_can_send_token_mask 0x88 -#define HIVE_SIZE_event_can_send_token_mask 44 -#else -#endif -#endif -#define HIVE_MEM_sp_event_can_send_token_mask scalar_processor_2400_dmem -#define HIVE_ADDR_sp_event_can_send_token_mask 0x88 -#define HIVE_SIZE_sp_event_can_send_token_mask 44 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_thread -#define HIVE_MEM_isp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_thread 0x5F40 -#else -#define HIVE_ADDR_isp_thread 0x5FA0 -#endif -#define HIVE_SIZE_isp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_thread 0x5F40 -#else -#define HIVE_ADDR_sp_isp_thread 0x5FA0 -#endif -#define HIVE_SIZE_sp_isp_thread 4 - -#ifndef ISP2401 -/* function encode_and_post_sp_event_non_blocking: A78 */ -#else -/* function encode_and_post_sp_event_non_blocking: A0C */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_destroy: 5DF8 */ -#else -/* function ia_css_isys_sp_frontend_destroy: 5F33 */ -#endif - -/* function is_ddr_debug_buffer_full: 2CC */ - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_stop: 5D38 */ -#else -/* function ia_css_isys_sp_frontend_stop: 5E73 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_init: 6094 */ -#else -/* function ia_css_isys_sp_token_map_init: 61CF */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2982 */ -#else -/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2B24 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_fiber -#define HIVE_MEM_sp_threads_fiber scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_fiber 0x19C -#define HIVE_SIZE_sp_threads_fiber 28 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_fiber scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_fiber 0x19C -#define HIVE_SIZE_sp_sp_threads_fiber 28 - -#ifndef ISP2401 -/* function encode_and_post_sp_event: A01 */ -#else -/* function encode_and_post_sp_event: 995 */ -#endif - -/* function debug_enqueue_ddr: EE */ - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_init_vbuf: 6283 */ -#else -/* function ia_css_rmgr_sp_refcount_init_vbuf: 63BE */ -#endif - -#ifndef ISP2401 -/* function dmaproxy_sp_read_write: 6EE4 */ -#else -/* function dmaproxy_sp_read_write: 7017 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer -#define HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8 -#else -#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54 -#endif -#define HIVE_SIZE_ia_css_dmaproxy_isp_dma_cmd_buffer 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8 -#else -#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54 -#endif -#define HIVE_SIZE_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host2sp_buffer_queue_handle -#define HIVE_MEM_host2sp_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4BF4 -#else -#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4C50 -#endif -#define HIVE_SIZE_host2sp_buffer_queue_handle 480 -#else -#endif -#endif -#define HIVE_MEM_sp_host2sp_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4BF4 -#else -#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4C50 -#endif -#define HIVE_SIZE_sp_host2sp_buffer_queue_handle 480 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_in_service -#define HIVE_MEM_ia_css_flash_sp_in_service scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3178 -#else -#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3198 -#endif -#define HIVE_SIZE_ia_css_flash_sp_in_service 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_in_service scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3178 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3198 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_in_service 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_process: 6BF0 */ -#else -/* function ia_css_dmaproxy_sp_process: 6D63 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_mark_from_end: 2C0A */ -#else -/* function ia_css_tagger_buf_sp_mark_from_end: 2DAC */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_rcv_acquire_ack: 5A05 */ -#else -/* function ia_css_isys_sp_backend_rcv_acquire_ack: 5B40 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_pre_acquire_request: 5A1B */ -#else -/* function ia_css_isys_sp_backend_pre_acquire_request: 5B56 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_cs: 366C */ -#else -/* function ia_css_ispctrl_sp_init_cs: 386E */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_init: 5971 */ -#else -/* function ia_css_spctrl_sp_init: 5AAC */ -#endif - -#ifndef ISP2401 -/* function sp_event_proxy_init: 730 */ -#else -/* function sp_event_proxy_init: 6C4 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_previous_clock_tick 40 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 40 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_output -#define HIVE_MEM_sp_output scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_output 0x41F8 -#else -#define HIVE_ADDR_sp_output 0x4218 -#endif -#define HIVE_SIZE_sp_output 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_output scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_output 0x41F8 -#else -#define HIVE_ADDR_sp_sp_output 0x4218 -#endif -#define HIVE_SIZE_sp_sp_output 16 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues -#define HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC -#else -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_CTRL_BASE -#define HIVE_MEM_ISP_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_CTRL_BASE 0x8 -#define HIVE_SIZE_ISP_CTRL_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_CTRL_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_CTRL_BASE 0x8 -#define HIVE_SIZE_sp_ISP_CTRL_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_INPUT_FORMATTER_BASE -#define HIVE_MEM_INPUT_FORMATTER_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_INPUT_FORMATTER_BASE 0x4C -#define HIVE_SIZE_INPUT_FORMATTER_BASE 16 -#else -#endif -#endif -#define HIVE_MEM_sp_INPUT_FORMATTER_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_INPUT_FORMATTER_BASE 0x4C -#define HIVE_SIZE_sp_INPUT_FORMATTER_BASE 16 - -#ifndef ISP2401 -/* function sp_dma_proxy_reset_channels: 34A0 */ -#else -/* function sp_dma_proxy_reset_channels: 3694 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_acquire: 5B26 */ -#else -/* function ia_css_isys_sp_backend_acquire: 5C61 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_update_size: 2901 */ -#else -/* function ia_css_tagger_sp_update_size: 2AA3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_host_sp_queue -#define HIVE_MEM_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x511C -#else -#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x5178 -#endif -#define HIVE_SIZE_ia_css_bufq_host_sp_queue 2008 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x511C -#else -#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x5178 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_host_sp_queue 2008 - -#ifndef ISP2401 -/* function thread_fiber_sp_create: DA8 */ -#else -/* function thread_fiber_sp_create: D9D */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_increments: 3332 */ -#else -/* function ia_css_dmaproxy_sp_set_increments: 3526 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_writing_cb_frame -#define HIVE_MEM_sem_for_writing_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_writing_cb_frame 0x4754 -#else -#define HIVE_ADDR_sem_for_writing_cb_frame 0x479C -#endif -#define HIVE_SIZE_sem_for_writing_cb_frame 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_writing_cb_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x4754 -#else -#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x479C -#endif -#define HIVE_SIZE_sp_sem_for_writing_cb_frame 20 - -#ifndef ISP2401 -/* function receiver_reg_store: AD7 */ -#else -/* function receiver_reg_store: AD1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_writing_cb_param -#define HIVE_MEM_sem_for_writing_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_writing_cb_param 0x4768 -#else -#define HIVE_ADDR_sem_for_writing_cb_param 0x47B0 -#endif -#define HIVE_SIZE_sem_for_writing_cb_param 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_writing_cb_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x4768 -#else -#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x47B0 -#endif -#define HIVE_SIZE_sp_sem_for_writing_cb_param 20 - -/* function sp_start_isp_entry: 453 */ -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifdef HIVE_ADDR_sp_start_isp_entry -#endif -#define HIVE_ADDR_sp_start_isp_entry 0x453 -#endif -#define HIVE_ADDR_sp_sp_start_isp_entry 0x453 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unmark_all: 2B8E */ -#else -/* function ia_css_tagger_buf_sp_unmark_all: 2D30 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unmark_from_start: 2BCF */ -#else -/* function ia_css_tagger_buf_sp_unmark_from_start: 2D71 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_channel_acquire: 34CC */ -#else -/* function ia_css_dmaproxy_sp_channel_acquire: 36C0 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_add_num_vbuf: 64CD */ -#else -/* function ia_css_rmgr_sp_add_num_vbuf: 6608 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_create: 60DD */ -#else -/* function ia_css_isys_sp_token_map_create: 6218 */ -#endif - -#ifndef ISP2401 -/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 319C */ -#else -/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 337B */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_acquire_buf_elem: 1FF4 */ -#else -/* function ia_css_tagger_sp_acquire_buf_elem: 2044 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_is_dynamic_buffer: 3085 */ -#else -/* function ia_css_bufq_sp_is_dynamic_buffer: 3227 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_group -#define HIVE_MEM_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_group 0x4208 -#define HIVE_SIZE_sp_group 1144 -#else -#define HIVE_ADDR_sp_group 0x4228 -#define HIVE_SIZE_sp_group 1184 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_group 0x4208 -#define HIVE_SIZE_sp_sp_group 1144 -#else -#define HIVE_ADDR_sp_sp_group 0x4228 -#define HIVE_SIZE_sp_sp_group 1184 -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_event_proxy_thread -#define HIVE_MEM_sp_event_proxy_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_event_proxy_thread 0x4954 -#define HIVE_SIZE_sp_event_proxy_thread 68 -#else -#define HIVE_ADDR_sp_event_proxy_thread 0x49B0 -#define HIVE_SIZE_sp_event_proxy_thread 72 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_event_proxy_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_event_proxy_thread 0x4954 -#define HIVE_SIZE_sp_sp_event_proxy_thread 68 -#else -#define HIVE_ADDR_sp_sp_event_proxy_thread 0x49B0 -#define HIVE_SIZE_sp_sp_event_proxy_thread 72 -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_kill: CD6 */ -#else -/* function ia_css_thread_sp_kill: CCB */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_create: 28BB */ -#else -/* function ia_css_tagger_sp_create: 2A51 */ -#endif - -#ifndef ISP2401 -/* function tmpmem_acquire_dmem: 657A */ -#else -/* function tmpmem_acquire_dmem: 66B5 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_MMU_BASE -#define HIVE_MEM_MMU_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_MMU_BASE 0x24 -#define HIVE_SIZE_MMU_BASE 8 -#else -#endif -#endif -#define HIVE_MEM_sp_MMU_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_MMU_BASE 0x24 -#define HIVE_SIZE_sp_MMU_BASE 8 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_channel_release: 34B8 */ -#else -/* function ia_css_dmaproxy_sp_channel_release: 36AC */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_is_idle: 3498 */ -#else -/* function ia_css_dmaproxy_sp_is_idle: 368C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_qos_start -#define HIVE_MEM_sem_for_qos_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_qos_start 0x477C -#else -#define HIVE_ADDR_sem_for_qos_start 0x47C4 -#endif -#define HIVE_SIZE_sem_for_qos_start 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_qos_start scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_qos_start 0x477C -#else -#define HIVE_ADDR_sp_sem_for_qos_start 0x47C4 -#endif -#define HIVE_SIZE_sp_sem_for_qos_start 20 - -#ifndef ISP2401 -/* function isp_hmem_load: B55 */ -#else -/* function isp_hmem_load: B4F */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_release_buf_elem: 1FD0 */ -#else -/* function ia_css_tagger_sp_release_buf_elem: 2020 */ -#endif - -#ifndef ISP2401 -/* function ia_css_eventq_sp_send: 350E */ -#else -/* function ia_css_eventq_sp_send: 3702 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_isys_sp_error_cnt -#define HIVE_MEM_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x62D4 -#else -#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x6330 -#endif -#define HIVE_SIZE_ia_css_isys_sp_error_cnt 16 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x62D4 -#else -#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x6330 -#endif -#define HIVE_SIZE_sp_ia_css_isys_sp_error_cnt 16 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_unlock_from_start: 2ABE */ -#else -/* function ia_css_tagger_buf_sp_unlock_from_start: 2C60 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_debug_buffer_ddr_address -#define HIVE_MEM_debug_buffer_ddr_address scalar_processor_2400_dmem -#define HIVE_ADDR_debug_buffer_ddr_address 0xBC -#define HIVE_SIZE_debug_buffer_ddr_address 4 -#else -#endif -#endif -#define HIVE_MEM_sp_debug_buffer_ddr_address scalar_processor_2400_dmem -#define HIVE_ADDR_sp_debug_buffer_ddr_address 0xBC -#define HIVE_SIZE_sp_debug_buffer_ddr_address 4 - -#ifndef ISP2401 -/* function sp_isys_copy_request: 714 */ -#else -/* function sp_isys_copy_request: 6A8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_retain_vbuf: 635D */ -#else -/* function ia_css_rmgr_sp_refcount_retain_vbuf: 6498 */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_set_priority: CCE */ -#else -/* function ia_css_thread_sp_set_priority: CC3 */ -#endif - -#ifndef ISP2401 -/* function sizeof_hmem: BFC */ -#else -/* function sizeof_hmem: BF6 */ -#endif - -#ifndef ISP2401 -/* function tmpmem_release_dmem: 6569 */ -#else -/* function tmpmem_release_dmem: 66A4 */ -#endif - -/* function cnd_input_system_cfg: 392 */ - -#ifndef ISP2401 -/* function __ia_css_sp_rawcopy_func_critical: 6F65 */ -#else -/* function __ia_css_sp_rawcopy_func_critical: 70C2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_width_exception: 331D */ -#else -/* function __ia_css_dmaproxy_sp_process_text: 331F */ -#endif - -#ifndef ISP2401 -/* function sp_event_assert: 8B1 */ -#else -/* function ia_css_dmaproxy_sp_set_width_exception: 3511 */ -#endif - -#ifndef ISP2401 -/* function ia_css_flash_sp_init_internal_params: 2CA9 */ -#else -/* function sp_event_assert: 845 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 29C4 */ -#else -/* function ia_css_flash_sp_init_internal_params: 2E4B */ -#endif - -#ifndef ISP2401 -/* function __modu: 68BB */ -#else -/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 2B66 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_isp_vector: 31A2 */ -#else -/* function __modu: 6A2E */ - -/* function ia_css_dmaproxy_sp_init_isp_vector: 3381 */ -#endif - -/* function isp_vamem_store: 0 */ - -#ifdef ISP2401 -/* function ia_css_tagger_sp_set_copy_pipe: 2A48 */ - -#endif -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GDC_BASE -#define HIVE_MEM_GDC_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_GDC_BASE 0x44 -#define HIVE_SIZE_GDC_BASE 8 -#else -#endif -#endif -#define HIVE_MEM_sp_GDC_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_GDC_BASE 0x44 -#define HIVE_SIZE_sp_GDC_BASE 8 - -#ifndef ISP2401 -/* function ia_css_queue_local_init: 4C27 */ -#else -/* function ia_css_queue_local_init: 4E85 */ -#endif - -#ifndef ISP2401 -/* function sp_event_proxy_callout_func: 6988 */ -#else -/* function sp_event_proxy_callout_func: 6AFB */ -#endif - -#ifndef ISP2401 -/* function qos_scheduler_schedule_stage: 65C1 */ -#else -/* function qos_scheduler_schedule_stage: 670F */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_thread_sp_num_ready_threads -#define HIVE_MEM_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x49E0 -#else -#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x4A40 -#endif -#define HIVE_SIZE_ia_css_thread_sp_num_ready_threads 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x49E0 -#else -#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x4A40 -#endif -#define HIVE_SIZE_sp_ia_css_thread_sp_num_ready_threads 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_threads_stack_size -#define HIVE_MEM_sp_threads_stack_size scalar_processor_2400_dmem -#define HIVE_ADDR_sp_threads_stack_size 0x180 -#define HIVE_SIZE_sp_threads_stack_size 28 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_threads_stack_size scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_threads_stack_size 0x180 -#define HIVE_SIZE_sp_sp_threads_stack_size 28 - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_isp_done_row_striping: 3F48 */ -#else -/* function ia_css_ispctrl_sp_isp_done_row_striping: 418B */ -#endif - -#ifndef ISP2401 -/* function __ia_css_isys_sp_isr_text: 5E22 */ -#else -/* function __ia_css_isys_sp_isr_text: 5F5D */ -#endif - -#ifndef ISP2401 -/* function ia_css_queue_dequeue: 4AA5 */ -#else -/* function ia_css_queue_dequeue: 4D03 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_configure_channel: 6E4C */ -#else -/* function is_qos_standalone_mode: 66EA */ - -/* function ia_css_dmaproxy_sp_configure_channel: 6F90 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_current_thread_fiber_sp -#define HIVE_MEM_current_thread_fiber_sp scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_current_thread_fiber_sp 0x49E8 -#else -#define HIVE_ADDR_current_thread_fiber_sp 0x4A44 -#endif -#define HIVE_SIZE_current_thread_fiber_sp 4 -#else -#endif -#endif -#define HIVE_MEM_sp_current_thread_fiber_sp scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_current_thread_fiber_sp 0x49E8 -#else -#define HIVE_ADDR_sp_current_thread_fiber_sp 0x4A44 -#endif -#define HIVE_SIZE_sp_current_thread_fiber_sp 4 - -#ifndef ISP2401 -/* function ia_css_circbuf_pop: FD8 */ -#else -/* function ia_css_circbuf_pop: FCD */ -#endif - -#ifndef ISP2401 -/* function memset: 693A */ -#else -/* function memset: 6AAD */ -#endif - -/* function irq_raise_set_token: B6 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_GPIO_BASE -#define HIVE_MEM_GPIO_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_GPIO_BASE 0x3C -#define HIVE_SIZE_GPIO_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_GPIO_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_GPIO_BASE 0x3C -#define HIVE_SIZE_sp_GPIO_BASE 4 - -#ifndef ISP2401 -/* function ia_css_pipeline_acc_stage_enable: 17F0 */ -#else -/* function ia_css_pipeline_acc_stage_enable: 1818 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_unlock_exp_id: 2041 */ -#else -/* function ia_css_tagger_sp_unlock_exp_id: 2091 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_ph -#define HIVE_MEM_isp_ph scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_ph 0x62E4 -#else -#define HIVE_ADDR_isp_ph 0x6340 -#endif -#define HIVE_SIZE_isp_ph 28 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_ph scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_ph 0x62E4 -#else -#define HIVE_ADDR_sp_isp_ph 0x6340 -#endif -#define HIVE_SIZE_sp_isp_ph 28 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_flush: 6022 */ -#else -/* function ia_css_isys_sp_token_map_flush: 615D */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_ds: 37CB */ -#else -/* function ia_css_ispctrl_sp_init_ds: 39FA */ -#endif - -#ifndef ISP2401 -/* function get_xmem_base_addr_raw: 3B78 */ -#else -/* function get_xmem_base_addr_raw: 3DB3 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_param -#define HIVE_MEM_sp_all_cbs_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_param 0x4790 -#else -#define HIVE_ADDR_sp_all_cbs_param 0x47D8 -#endif -#define HIVE_SIZE_sp_all_cbs_param 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_param scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_param 0x4790 -#else -#define HIVE_ADDR_sp_sp_all_cbs_param 0x47D8 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_param 16 - -#ifndef ISP2401 -/* function ia_css_circbuf_create: 1026 */ -#else -/* function ia_css_circbuf_create: 101B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_sp_group -#define HIVE_MEM_sem_for_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_sp_group 0x47A0 -#else -#define HIVE_ADDR_sem_for_sp_group 0x47E8 -#endif -#define HIVE_SIZE_sem_for_sp_group 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_sp_group scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_sp_group 0x47A0 -#else -#define HIVE_ADDR_sp_sem_for_sp_group 0x47E8 -#endif -#define HIVE_SIZE_sp_sem_for_sp_group 20 - -#ifndef ISP2401 -/* function ia_css_framebuf_sp_wait_for_in_frame: 64F8 */ -#else -/* function __ia_css_dmaproxy_sp_configure_channel_text: 34F0 */ - -/* function ia_css_framebuf_sp_wait_for_in_frame: 6633 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_tag_frame: 5588 */ -#else -/* function ia_css_sp_rawcopy_tag_frame: 57C9 */ -#endif - -#ifndef ISP2401 -/* function isp_hmem_clear: B25 */ -#else -/* function isp_hmem_clear: B1F */ -#endif - -#ifndef ISP2401 -/* function ia_css_framebuf_sp_release_in_frame: 653B */ -#else -/* function ia_css_framebuf_sp_release_in_frame: 6676 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_snd_acquire_request: 5A78 */ -#else -/* function ia_css_isys_sp_backend_snd_acquire_request: 5BB3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_is_full: 5EA9 */ -#else -/* function ia_css_isys_sp_token_map_is_full: 5FE4 */ -#endif - -#ifndef ISP2401 -/* function input_system_acquisition_run: AF9 */ -#else -/* function input_system_acquisition_run: AF3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_start_binary: 364A */ -#else -/* function ia_css_ispctrl_sp_start_binary: 384C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs -#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4 -#else -#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20 - -#ifndef ISP2401 -/* function ia_css_eventq_sp_recv: 34E0 */ -#else -/* function ia_css_eventq_sp_recv: 36D4 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_pool -#define HIVE_MEM_isp_pool scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_pool 0x2E8 -#else -#define HIVE_ADDR_isp_pool 0x300 -#endif -#define HIVE_SIZE_isp_pool 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_pool scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_pool 0x2E8 -#else -#define HIVE_ADDR_sp_isp_pool 0x300 -#endif -#define HIVE_SIZE_sp_isp_pool 4 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_rel_gen: 622A */ -#else -/* function ia_css_rmgr_sp_rel_gen: 6365 */ - -/* function ia_css_tagger_sp_unblock_clients: 2919 */ -#endif - -#ifndef ISP2401 -/* function css_get_frame_processing_time_end: 1FC0 */ -#else -/* function css_get_frame_processing_time_end: 2010 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_event_any_pending_mask -#define HIVE_MEM_event_any_pending_mask scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_event_any_pending_mask 0x300 -#else -#define HIVE_ADDR_event_any_pending_mask 0x318 -#endif -#define HIVE_SIZE_event_any_pending_mask 8 -#else -#endif -#endif -#define HIVE_MEM_sp_event_any_pending_mask scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_event_any_pending_mask 0x300 -#else -#define HIVE_ADDR_sp_event_any_pending_mask 0x318 -#endif -#define HIVE_SIZE_sp_event_any_pending_mask 8 - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_push: 5A2F */ -#else -/* function ia_css_isys_sp_backend_push: 5B6A */ -#endif - -/* function sh_css_decode_tag_descr: 352 */ - -/* function debug_enqueue_isp: 27B */ - -#ifndef ISP2401 -/* function qos_scheduler_update_stage_budget: 65AF */ -#else -/* function qos_scheduler_update_stage_budget: 66F2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_uninit: 596A */ -#else -/* function ia_css_spctrl_sp_uninit: 5AA5 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_HIVE_IF_SWITCH_CODE -#define HIVE_MEM_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem -#define HIVE_ADDR_HIVE_IF_SWITCH_CODE 0x1D8 -#define HIVE_SIZE_HIVE_IF_SWITCH_CODE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_HIVE_IF_SWITCH_CODE 0x1D8 -#define HIVE_SIZE_sp_HIVE_IF_SWITCH_CODE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_dis_bufs 140 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_dis_bufs 140 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_lock_from_start: 2AF2 */ -#else -/* function ia_css_tagger_buf_sp_lock_from_start: 2C94 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_isp_idle -#define HIVE_MEM_sem_for_isp_idle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_isp_idle 0x47B4 -#else -#define HIVE_ADDR_sem_for_isp_idle 0x47FC -#endif -#define HIVE_SIZE_sem_for_isp_idle 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_isp_idle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_isp_idle 0x47B4 -#else -#define HIVE_ADDR_sp_sem_for_isp_idle 0x47FC -#endif -#define HIVE_SIZE_sp_sem_for_isp_idle 20 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_write_byte_addr: 31FF */ -#else -/* function ia_css_dmaproxy_sp_write_byte_addr: 33DE */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init: 3176 */ -#else -/* function ia_css_dmaproxy_sp_init: 3355 */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2D7B */ -#else -/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2F1D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_VAMEM_BASE -#define HIVE_MEM_ISP_VAMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_VAMEM_BASE 0x14 -#define HIVE_SIZE_ISP_VAMEM_BASE 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_VAMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_VAMEM_BASE 0x14 -#define HIVE_SIZE_sp_ISP_VAMEM_BASE 12 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_rawcopy_sp_tagger -#define HIVE_MEM_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x6294 -#else -#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x62F0 -#endif -#define HIVE_SIZE_ia_css_rawcopy_sp_tagger 24 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x6294 -#else -#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x62F0 -#endif -#define HIVE_SIZE_sp_ia_css_rawcopy_sp_tagger 24 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x5994 -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_exp_ids 70 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x5994 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_exp_ids 70 - -#ifndef ISP2401 -/* function ia_css_queue_item_load: 4D19 */ -#else -/* function ia_css_queue_item_load: 4F77 */ -#endif - -#ifndef ISP2401 -/* function ia_css_spctrl_sp_get_state: 5955 */ -#else -/* function ia_css_spctrl_sp_get_state: 5A90 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_uninit: 603F */ -#else -/* function ia_css_isys_sp_token_map_uninit: 617A */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_callout_sp_thread -#define HIVE_MEM_callout_sp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_callout_sp_thread 0x49DC -#else -#define HIVE_ADDR_callout_sp_thread 0x1E0 -#endif -#define HIVE_SIZE_callout_sp_thread 4 -#else -#endif -#endif -#define HIVE_MEM_sp_callout_sp_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_callout_sp_thread 0x49DC -#else -#define HIVE_ADDR_sp_callout_sp_thread 0x1E0 -#endif -#define HIVE_SIZE_sp_callout_sp_thread 4 - -#ifndef ISP2401 -/* function thread_fiber_sp_init: E2F */ -#else -/* function thread_fiber_sp_init: E24 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_SP_PMEM_BASE -#define HIVE_MEM_SP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_SP_PMEM_BASE 0x0 -#define HIVE_SIZE_SP_PMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_SP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_SP_PMEM_BASE 0x0 -#define HIVE_SIZE_sp_SP_PMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_snd_acquire_req: 5FAF */ -#else -/* function ia_css_isys_sp_token_map_snd_acquire_req: 60EA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_input_stream_format -#define HIVE_MEM_sp_isp_input_stream_format scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_input_stream_format 0x40F8 -#else -#define HIVE_ADDR_sp_isp_input_stream_format 0x4118 -#endif -#define HIVE_SIZE_sp_isp_input_stream_format 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_input_stream_format scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x40F8 -#else -#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x4118 -#endif -#define HIVE_SIZE_sp_sp_isp_input_stream_format 20 - -#ifndef ISP2401 -/* function __mod: 68A7 */ -#else -/* function __mod: 6A1A */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_dmem_channel: 3260 */ -#else -/* function ia_css_dmaproxy_sp_init_dmem_channel: 343F */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_join: CFF */ -#else -/* function ia_css_thread_sp_join: CF4 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_add_command: 6F4F */ -#else -/* function ia_css_dmaproxy_sp_add_command: 7082 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_metadata_thread_func: 5809 */ -#else -/* function ia_css_sp_metadata_thread_func: 5968 */ -#endif - -#ifndef ISP2401 -/* function __sp_event_proxy_func_critical: 6975 */ -#else -/* function __sp_event_proxy_func_critical: 6AE8 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_metadata_wait: 591C */ -#else -/* function ia_css_sp_metadata_wait: 5A57 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_peek_from_start: F08 */ -#else -/* function ia_css_circbuf_peek_from_start: EFD */ -#endif - -#ifndef ISP2401 -/* function ia_css_event_sp_encode: 356B */ -#else -/* function ia_css_event_sp_encode: 375F */ -#endif - -#ifndef ISP2401 -/* function ia_css_thread_sp_run: D72 */ -#else -/* function ia_css_thread_sp_run: D67 */ -#endif - -#ifndef ISP2401 -/* function sp_isys_copy_func: 6F6 */ -#else -/* function sp_isys_copy_func: 68A */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_flush: 5A98 */ -#else -/* function ia_css_isys_sp_backend_flush: 5BD3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_frame_exists: 59B4 */ -#else -/* function ia_css_isys_sp_backend_frame_exists: 5AEF */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_init_isp_memories: 47A2 */ -#else -/* function ia_css_sp_isp_param_init_isp_memories: 4A2A */ -#endif - -#ifndef ISP2401 -/* function register_isr: 8A9 */ -#else -/* function register_isr: 83D */ -#endif - -/* function irq_raise: C8 */ - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_mmu_invalidate: 313D */ -#else -/* function ia_css_dmaproxy_sp_mmu_invalidate: 32E5 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_HIVE_IF_SRST_ADDRESS -#define HIVE_MEM_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem -#define HIVE_ADDR_HIVE_IF_SRST_ADDRESS 0x1B8 -#define HIVE_SIZE_HIVE_IF_SRST_ADDRESS 16 -#else -#endif -#endif -#define HIVE_MEM_sp_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem -#define HIVE_ADDR_sp_HIVE_IF_SRST_ADDRESS 0x1B8 -#define HIVE_SIZE_sp_HIVE_IF_SRST_ADDRESS 16 - -#ifndef ISP2401 -/* function pipeline_sp_initialize_stage: 1924 */ -#else -/* function pipeline_sp_initialize_stage: 195E */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_isys_sp_frontend_states -#define HIVE_MEM_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x62C8 -#else -#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x6324 -#endif -#define HIVE_SIZE_ia_css_isys_sp_frontend_states 12 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x62C8 -#else -#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x6324 -#endif -#define HIVE_SIZE_sp_ia_css_isys_sp_frontend_states 12 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6E1E */ -#else -/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6F62 */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_done_ds: 37B2 */ -#else -/* function ia_css_ispctrl_sp_done_ds: 39E1 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_get_mem_inits: 477D */ -#else -/* function ia_css_sp_isp_param_get_mem_inits: 4A05 */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_init_buffer_queues: 13D0 */ -#else -/* function ia_css_parambuf_sp_init_buffer_queues: 13F1 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_pfp_spref -#define HIVE_MEM_vbuf_pfp_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_pfp_spref 0x2F0 -#else -#define HIVE_ADDR_vbuf_pfp_spref 0x308 -#endif -#define HIVE_SIZE_vbuf_pfp_spref 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_pfp_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_pfp_spref 0x2F0 -#else -#define HIVE_ADDR_sp_vbuf_pfp_spref 0x308 -#endif -#define HIVE_SIZE_sp_vbuf_pfp_spref 4 - -#ifndef ISP2401 -/* function input_system_cfg: ABB */ -#else -/* function input_system_cfg: AB5 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_HMEM_BASE -#define HIVE_MEM_ISP_HMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_HMEM_BASE 0x20 -#define HIVE_SIZE_ISP_HMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_HMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_HMEM_BASE 0x20 -#define HIVE_SIZE_sp_ISP_HMEM_BASE 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_frames -#define HIVE_MEM_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x59DC -#else -#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x5A38 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_frames 280 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x59DC -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x5A38 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_frames 280 - -#ifndef ISP2401 -/* function qos_scheduler_init_stage_budget: 65E8 */ -#else -/* function qos_scheduler_init_stage_budget: 6750 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_release: 5B0D */ -#else -/* function ia_css_isys_sp_backend_release: 5C48 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_backend_destroy: 5B37 */ -#else -/* function ia_css_isys_sp_backend_destroy: 5C72 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp2host_buffer_queue_handle -#define HIVE_MEM_sp2host_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5AF4 -#else -#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5B50 -#endif -#define HIVE_SIZE_sp2host_buffer_queue_handle 96 -#else -#endif -#endif -#define HIVE_MEM_sp_sp2host_buffer_queue_handle scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5AF4 -#else -#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5B50 -#endif -#define HIVE_SIZE_sp_sp2host_buffer_queue_handle 96 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 5F73 */ -#else -/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 60AE */ -#endif - -#ifndef ISP2401 -/* function ia_css_ispctrl_sp_init_isp_vars: 449C */ -#else -/* function ia_css_ispctrl_sp_init_isp_vars: 46F7 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5B89 */ -#else -/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5CC4 */ -#endif - -#ifndef ISP2401 -/* function sp_warning: 8DC */ -#else -/* function sp_warning: 870 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_vbuf_enqueue: 631D */ -#else -/* function ia_css_rmgr_sp_vbuf_enqueue: 6458 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_sp_tag_exp_id: 215B */ -#else -/* function ia_css_tagger_sp_tag_exp_id: 21AB */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_write: 3216 */ -#else -/* function ia_css_dmaproxy_sp_write: 33F5 */ -#endif - -#ifndef ISP2401 -/* function ia_css_parambuf_sp_release_in_param: 1250 */ -#else -/* function ia_css_parambuf_sp_release_in_param: 1245 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_irq_sw_interrupt_token -#define HIVE_MEM_irq_sw_interrupt_token scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_irq_sw_interrupt_token 0x40F4 -#else -#define HIVE_ADDR_irq_sw_interrupt_token 0x4114 -#endif -#define HIVE_SIZE_irq_sw_interrupt_token 4 -#else -#endif -#endif -#define HIVE_MEM_sp_irq_sw_interrupt_token scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x40F4 -#else -#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x4114 -#endif -#define HIVE_SIZE_sp_irq_sw_interrupt_token 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_isp_addresses -#define HIVE_MEM_sp_isp_addresses scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_addresses 0x5F44 -#else -#define HIVE_ADDR_sp_isp_addresses 0x5FA4 -#endif -#define HIVE_SIZE_sp_isp_addresses 172 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_isp_addresses scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_isp_addresses 0x5F44 -#else -#define HIVE_ADDR_sp_sp_isp_addresses 0x5FA4 -#endif -#define HIVE_SIZE_sp_sp_isp_addresses 172 - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_acq_gen: 6242 */ -#else -/* function ia_css_rmgr_sp_acq_gen: 637D */ -#endif - -#ifndef ISP2401 -/* function receiver_reg_load: AD0 */ -#else -/* function receiver_reg_load: ACA */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isps -#define HIVE_MEM_isps scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isps 0x6300 -#else -#define HIVE_ADDR_isps 0x635C -#endif -#define HIVE_SIZE_isps 28 -#else -#endif -#endif -#define HIVE_MEM_sp_isps scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isps 0x6300 -#else -#define HIVE_ADDR_sp_isps 0x635C -#endif -#define HIVE_SIZE_sp_isps 28 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_host_sp_queues_initialized -#define HIVE_MEM_host_sp_queues_initialized scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_host_sp_queues_initialized 0x410C -#else -#define HIVE_ADDR_host_sp_queues_initialized 0x412C -#endif -#define HIVE_SIZE_host_sp_queues_initialized 4 -#else -#endif -#endif -#define HIVE_MEM_sp_host_sp_queues_initialized scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_host_sp_queues_initialized 0x410C -#else -#define HIVE_ADDR_sp_host_sp_queues_initialized 0x412C -#endif -#define HIVE_SIZE_sp_host_sp_queues_initialized 4 - -#ifndef ISP2401 -/* function ia_css_queue_uninit: 4BE5 */ -#else -/* function ia_css_queue_uninit: 4E43 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_ispctrl_sp_isp_started -#define HIVE_MEM_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5BFC -#else -#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5C58 -#endif -#define HIVE_SIZE_ia_css_ispctrl_sp_isp_started 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5BFC -#else -#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5C58 -#endif -#define HIVE_SIZE_sp_ia_css_ispctrl_sp_isp_started 4 - -#ifndef ISP2401 -/* function ia_css_bufq_sp_release_dynamic_buf: 2DE7 */ -#else -/* function ia_css_bufq_sp_release_dynamic_buf: 2F89 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_set_height_exception: 330E */ -#else -/* function ia_css_dmaproxy_sp_set_height_exception: 3502 */ -#endif - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_init_vmem_channel: 3293 */ -#else -/* function ia_css_dmaproxy_sp_init_vmem_channel: 3473 */ -#endif - -#ifndef ISP2401 -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_num_ready_threads -#define HIVE_MEM_num_ready_threads scalar_processor_2400_dmem -#define HIVE_ADDR_num_ready_threads 0x49E4 -#define HIVE_SIZE_num_ready_threads 4 -#else -#endif -#endif -#define HIVE_MEM_sp_num_ready_threads scalar_processor_2400_dmem -#define HIVE_ADDR_sp_num_ready_threads 0x49E4 -#define HIVE_SIZE_sp_num_ready_threads 4 - -/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 31E8 */ -#else -/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 33C7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_vbuf_spref -#define HIVE_MEM_vbuf_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_vbuf_spref 0x2EC -#else -#define HIVE_ADDR_vbuf_spref 0x304 -#endif -#define HIVE_SIZE_vbuf_spref 4 -#else -#endif -#endif -#define HIVE_MEM_sp_vbuf_spref scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_vbuf_spref 0x2EC -#else -#define HIVE_ADDR_sp_vbuf_spref 0x304 -#endif -#define HIVE_SIZE_sp_vbuf_spref 4 - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_metadata_thread -#define HIVE_MEM_sp_metadata_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_metadata_thread 0x4998 -#define HIVE_SIZE_sp_metadata_thread 68 -#else -#define HIVE_ADDR_sp_metadata_thread 0x49F8 -#define HIVE_SIZE_sp_metadata_thread 72 -#endif -#else -#endif -#endif -#define HIVE_MEM_sp_sp_metadata_thread scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_metadata_thread 0x4998 -#define HIVE_SIZE_sp_sp_metadata_thread 68 -#else -#define HIVE_ADDR_sp_sp_metadata_thread 0x49F8 -#define HIVE_SIZE_sp_sp_metadata_thread 72 -#endif - -#ifndef ISP2401 -/* function ia_css_queue_enqueue: 4B2F */ -#else -/* function ia_css_queue_enqueue: 4D8D */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_request -#define HIVE_MEM_ia_css_flash_sp_request scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_request 0x4A98 -#else -#define HIVE_ADDR_ia_css_flash_sp_request 0x4AF4 -#endif -#define HIVE_SIZE_ia_css_flash_sp_request 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_request scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4A98 -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4AF4 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_request 4 - -#ifndef ISP2401 -/* function ia_css_dmaproxy_sp_vmem_write: 31B9 */ -#else -/* function ia_css_dmaproxy_sp_vmem_write: 3398 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_tagger_frames -#define HIVE_MEM_tagger_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_tagger_frames 0x49EC -#else -#define HIVE_ADDR_tagger_frames 0x4A48 -#endif -#define HIVE_SIZE_tagger_frames 168 -#else -#endif -#endif -#define HIVE_MEM_sp_tagger_frames scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_tagger_frames 0x49EC -#else -#define HIVE_ADDR_sp_tagger_frames 0x4A48 -#endif -#define HIVE_SIZE_sp_tagger_frames 168 - -#ifndef ISP2401 -/* function ia_css_isys_sp_token_map_snd_capture_req: 5FD1 */ -#else -/* function ia_css_isys_sp_token_map_snd_capture_req: 610C */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_reading_if -#define HIVE_MEM_sem_for_reading_if scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_reading_if 0x47C8 -#else -#define HIVE_ADDR_sem_for_reading_if 0x4810 -#endif -#define HIVE_SIZE_sem_for_reading_if 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_reading_if scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_reading_if 0x47C8 -#else -#define HIVE_ADDR_sp_sem_for_reading_if 0x4810 -#endif -#define HIVE_SIZE_sp_sem_for_reading_if 20 - -#ifndef ISP2401 -/* function sp_generate_interrupts: 95B */ -#else -/* function sp_generate_interrupts: 8EF */ - -/* function ia_css_pipeline_sp_start: 1871 */ -#endif - -#ifndef ISP2401 -/* function ia_css_pipeline_sp_start: 1837 */ -#else -/* function ia_css_thread_default_callout: 6BE3 */ -#endif - -#ifndef ISP2401 -/* function ia_css_sp_rawcopy_init: 510C */ -#else -/* function ia_css_sp_rawcopy_init: 536A */ -#endif - -#ifndef ISP2401 -/* function tmr_clock_read: 13F1 */ -#else -/* function tmr_clock_read: 1412 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_BAMEM_BASE -#define HIVE_MEM_ISP_BAMEM_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ISP_BAMEM_BASE 0x2F8 -#else -#define HIVE_ADDR_ISP_BAMEM_BASE 0x310 -#endif -#define HIVE_SIZE_ISP_BAMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_BAMEM_BASE scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x2F8 -#else -#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x310 -#endif -#define HIVE_SIZE_sp_ISP_BAMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5C38 */ -#else -/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5D73 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues -#define HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54 -#else -#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0 -#endif -#define HIVE_SIZE_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54 -#else -#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0 -#endif -#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160 - -#ifndef ISP2401 -/* function css_get_frame_processing_time_start: 1FC8 */ -#else -/* function css_get_frame_processing_time_start: 2018 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_all_cbs_frame -#define HIVE_MEM_sp_all_cbs_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_all_cbs_frame 0x47DC -#else -#define HIVE_ADDR_sp_all_cbs_frame 0x4824 -#endif -#define HIVE_SIZE_sp_all_cbs_frame 16 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_all_cbs_frame scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_all_cbs_frame 0x47DC -#else -#define HIVE_ADDR_sp_sp_all_cbs_frame 0x4824 -#endif -#define HIVE_SIZE_sp_sp_all_cbs_frame 16 - -#ifndef ISP2401 -/* function thread_sp_queue_print: D8F */ -#else -/* function thread_sp_queue_print: D84 */ -#endif - -#ifndef ISP2401 -/* function sp_notify_eof: 907 */ -#else -/* function sp_notify_eof: 89B */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sem_for_str2mem -#define HIVE_MEM_sem_for_str2mem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sem_for_str2mem 0x47EC -#else -#define HIVE_ADDR_sem_for_str2mem 0x4834 -#endif -#define HIVE_SIZE_sem_for_str2mem 20 -#else -#endif -#endif -#define HIVE_MEM_sp_sem_for_str2mem scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sem_for_str2mem 0x47EC -#else -#define HIVE_ADDR_sp_sem_for_str2mem 0x4834 -#endif -#define HIVE_SIZE_sp_sem_for_str2mem 20 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_is_marked_from_start: 2B5A */ -#else -/* function ia_css_tagger_buf_sp_is_marked_from_start: 2CFC */ -#endif - -#ifndef ISP2401 -/* function ia_css_bufq_sp_acquire_dynamic_buf: 2F9F */ -#else -/* function ia_css_bufq_sp_acquire_dynamic_buf: 3141 */ -#endif - -#ifndef ISP2401 -/* function ia_css_circbuf_destroy: 101D */ -#else -/* function ia_css_circbuf_destroy: 1012 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ISP_PMEM_BASE -#define HIVE_MEM_ISP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_ISP_PMEM_BASE 0xC -#define HIVE_SIZE_ISP_PMEM_BASE 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ISP_PMEM_BASE scalar_processor_2400_dmem -#define HIVE_ADDR_sp_ISP_PMEM_BASE 0xC -#define HIVE_SIZE_sp_ISP_PMEM_BASE 4 - -#ifndef ISP2401 -/* function ia_css_sp_isp_param_mem_load: 4710 */ -#else -/* function ia_css_sp_isp_param_mem_load: 4998 */ -#endif - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_pop_from_start: 2946 */ -#else -/* function ia_css_tagger_buf_sp_pop_from_start: 2AE8 */ -#endif - -#ifndef ISP2401 -/* function __div: 685F */ -#else -/* function __div: 69D2 */ -#endif - -#ifndef ISP2401 -/* function ia_css_isys_sp_frontend_create: 5E09 */ -#else -/* function ia_css_isys_sp_frontend_create: 5F44 */ -#endif - -#ifndef ISP2401 -/* function ia_css_rmgr_sp_refcount_release_vbuf: 633C */ -#else -/* function ia_css_rmgr_sp_refcount_release_vbuf: 6477 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_ia_css_flash_sp_in_use -#define HIVE_MEM_ia_css_flash_sp_in_use scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4A9C -#else -#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4AF8 -#endif -#define HIVE_SIZE_ia_css_flash_sp_in_use 4 -#else -#endif -#endif -#define HIVE_MEM_sp_ia_css_flash_sp_in_use scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4A9C -#else -#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4AF8 -#endif -#define HIVE_SIZE_sp_ia_css_flash_sp_in_use 4 - -#ifndef ISP2401 -/* function ia_css_thread_sem_sp_wait: 6B42 */ -#else -/* function ia_css_thread_sem_sp_wait: 6CB7 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_sleep_mode -#define HIVE_MEM_sp_sleep_mode scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sleep_mode 0x4110 -#else -#define HIVE_ADDR_sp_sleep_mode 0x4130 -#endif -#define HIVE_SIZE_sp_sleep_mode 4 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_sleep_mode scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_sp_sleep_mode 0x4110 -#else -#define HIVE_ADDR_sp_sp_sleep_mode 0x4130 -#endif -#define HIVE_SIZE_sp_sp_sleep_mode 4 - -#ifndef ISP2401 -/* function ia_css_tagger_buf_sp_push: 2A55 */ -#else -/* function ia_css_tagger_buf_sp_push: 2BF7 */ -#endif - -/* function mmu_invalidate_cache: D3 */ - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_sp_max_cb_elems -#define HIVE_MEM_sp_max_cb_elems scalar_processor_2400_dmem -#define HIVE_ADDR_sp_max_cb_elems 0x148 -#define HIVE_SIZE_sp_max_cb_elems 8 -#else -#endif -#endif -#define HIVE_MEM_sp_sp_max_cb_elems scalar_processor_2400_dmem -#define HIVE_ADDR_sp_sp_max_cb_elems 0x148 -#define HIVE_SIZE_sp_sp_max_cb_elems 8 - -#ifndef ISP2401 -/* function ia_css_queue_remote_init: 4C07 */ -#else -/* function ia_css_queue_remote_init: 4E65 */ -#endif - -#ifndef HIVE_MULTIPLE_PROGRAMS -#ifndef HIVE_MEM_isp_stop_req -#define HIVE_MEM_isp_stop_req scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_isp_stop_req 0x4680 -#else -#define HIVE_ADDR_isp_stop_req 0x46C8 -#endif -#define HIVE_SIZE_isp_stop_req 4 -#else -#endif -#endif -#define HIVE_MEM_sp_isp_stop_req scalar_processor_2400_dmem -#ifndef ISP2401 -#define HIVE_ADDR_sp_isp_stop_req 0x4680 -#else -#define HIVE_ADDR_sp_isp_stop_req 0x46C8 -#endif -#define HIVE_SIZE_sp_isp_stop_req 4 - -#ifndef ISP2401 -#define HIVE_ICACHE_sp_critical_SEGMENT_START 0 -#define HIVE_ICACHE_sp_critical_NUM_SEGMENTS 1 -#endif - -#endif /* _sp_map_h_ */ -#ifndef ISP2401 -extern void sh_css_dump_sp_dmem(void); -void sh_css_dump_sp_dmem(void) -{ -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h deleted file mode 100644 index 01f7c33b5b40..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __CSS_TRACE_H_ -#define __CSS_TRACE_H_ - -#include -#ifdef ISP2401 -#include "sh_css_internal.h" /* for SH_CSS_MAX_SP_THREADS */ -#endif - -/* - structs and constants for tracing -*/ - -/* one tracer item: major, minor and counter. The counter value can be used for GP data */ -struct trace_item_t { - uint8_t major; - uint8_t minor; - uint16_t counter; -}; - -#ifdef ISP2401 -#define MAX_SCRATCH_DATA 4 -#define MAX_CMD_DATA 2 - -#endif -/* trace header: holds the version and the topology of the tracer. */ -struct trace_header_t { -#ifndef ISP2401 - /* 1st dword */ -#else - /* 1st dword: descriptor */ -#endif - uint8_t version; - uint8_t max_threads; - uint16_t max_tracer_points; -#ifdef ISP2401 - /* 2nd field: command + data */ -#endif - /* 2nd dword */ - uint32_t command; - /* 3rd & 4th dword */ -#ifndef ISP2401 - uint32_t data[2]; -#else - uint32_t data[MAX_CMD_DATA]; - /* 3rd field: debug pointer */ -#endif - /* 5th & 6th dword: debug pointer mechanism */ - uint32_t debug_ptr_signature; - uint32_t debug_ptr_value; -#ifdef ISP2401 - /* Rest of the header: status & scratch data */ - uint8_t thr_status_byte[SH_CSS_MAX_SP_THREADS]; - uint16_t thr_status_word[SH_CSS_MAX_SP_THREADS]; - uint32_t thr_status_dword[SH_CSS_MAX_SP_THREADS]; - uint32_t scratch_debug[MAX_SCRATCH_DATA]; -#endif -}; - -#ifndef ISP2401 -#define TRACER_VER 2 -#else -/* offsets for master_port read/write */ -#define HDR_HDR_OFFSET 0 /* offset of the header */ -#define HDR_COMMAND_OFFSET offsetof(struct trace_header_t, command) -#define HDR_DATA_OFFSET offsetof(struct trace_header_t, data) -#define HDR_DEBUG_SIGNATURE_OFFSET offsetof(struct trace_header_t, debug_ptr_signature) -#define HDR_DEBUG_POINTER_OFFSET offsetof(struct trace_header_t, debug_ptr_value) -#define HDR_STATUS_OFFSET offsetof(struct trace_header_t, thr_status_byte) -#define HDR_STATUS_OFFSET_BYTE offsetof(struct trace_header_t, thr_status_byte) -#define HDR_STATUS_OFFSET_WORD offsetof(struct trace_header_t, thr_status_word) -#define HDR_STATUS_OFFSET_DWORD offsetof(struct trace_header_t, thr_status_dword) -#define HDR_STATUS_OFFSET_SCRATCH offsetof(struct trace_header_t, scratch_debug) - -/* -Trace version history: - 1: initial version, hdr = descr, command & ptr. - 2: added ISP + 24-bit fields. - 3: added thread ID. - 4: added status in header. -*/ -#define TRACER_VER 4 - -#endif -#define TRACE_BUFF_ADDR 0xA000 -#define TRACE_BUFF_SIZE 0x1000 /* 4K allocated */ - -#define TRACE_ENABLE_SP0 0 -#define TRACE_ENABLE_SP1 0 -#define TRACE_ENABLE_ISP 0 - -#ifndef ISP2401 -typedef enum { -#else -enum TRACE_CORE_ID { -#endif - TRACE_SP0_ID, - TRACE_SP1_ID, - TRACE_ISP_ID -#ifndef ISP2401 -} TRACE_CORE_ID; -#else -}; -#endif - -/* TODO: add timing format? */ -#ifndef ISP2401 -typedef enum { - TRACE_DUMP_FORMAT_POINT, - TRACE_DUMP_FORMAT_VALUE24_HEX, - TRACE_DUMP_FORMAT_VALUE24_DEC, -#else -enum TRACE_DUMP_FORMAT { - TRACE_DUMP_FORMAT_POINT_NO_TID, - TRACE_DUMP_FORMAT_VALUE24, -#endif - TRACE_DUMP_FORMAT_VALUE24_TIMING, -#ifndef ISP2401 - TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA -} TRACE_DUMP_FORMAT; -#else - TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA, - TRACE_DUMP_FORMAT_POINT -}; -#endif - - -/* currently divided as follows:*/ -#if (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 3) -/* can be divided as needed */ -#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE/4) -#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE/4) -#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE/2) -#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 2) -#if TRACE_ENABLE_SP0 -#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE/2) -#else -#define TRACE_SP0_SIZE (0) -#endif -#if TRACE_ENABLE_SP1 -#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE/2) -#else -#define TRACE_SP1_SIZE (0) -#endif -#if TRACE_ENABLE_ISP -#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE/2) -#else -#define TRACE_ISP_SIZE (0) -#endif -#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 1) -#if TRACE_ENABLE_SP0 -#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE) -#else -#define TRACE_SP0_SIZE (0) -#endif -#if TRACE_ENABLE_SP1 -#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE) -#else -#define TRACE_SP1_SIZE (0) -#endif -#if TRACE_ENABLE_ISP -#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE) -#else -#define TRACE_ISP_SIZE (0) -#endif -#else -#define TRACE_SP0_SIZE (0) -#define TRACE_SP1_SIZE (0) -#define TRACE_ISP_SIZE (0) -#endif - -#define TRACE_SP0_ADDR (TRACE_BUFF_ADDR) -#define TRACE_SP1_ADDR (TRACE_SP0_ADDR + TRACE_SP0_SIZE) -#define TRACE_ISP_ADDR (TRACE_SP1_ADDR + TRACE_SP1_SIZE) - -/* check if it's a legal division */ -#if (TRACE_BUFF_SIZE < TRACE_SP0_SIZE + TRACE_SP1_SIZE + TRACE_ISP_SIZE) -#error trace sizes are not divided correctly and are above limit -#endif - -#define TRACE_SP0_HEADER_ADDR (TRACE_SP0_ADDR) -#define TRACE_SP0_HEADER_SIZE (sizeof(struct trace_header_t)) -#ifndef ISP2401 -#define TRACE_SP0_ITEM_SIZE (sizeof(struct trace_item_t)) -#define TRACE_SP0_DATA_ADDR (TRACE_SP0_HEADER_ADDR + TRACE_SP0_HEADER_SIZE) -#define TRACE_SP0_DATA_SIZE (TRACE_SP0_SIZE - TRACE_SP0_HEADER_SIZE) -#define TRACE_SP0_MAX_POINTS (TRACE_SP0_DATA_SIZE / TRACE_SP0_ITEM_SIZE) -#else -#define TRACE_SP0_ITEM_SIZE (sizeof(struct trace_item_t)) -#define TRACE_SP0_DATA_ADDR (TRACE_SP0_HEADER_ADDR + TRACE_SP0_HEADER_SIZE) -#define TRACE_SP0_DATA_SIZE (TRACE_SP0_SIZE - TRACE_SP0_HEADER_SIZE) -#define TRACE_SP0_MAX_POINTS (TRACE_SP0_DATA_SIZE / TRACE_SP0_ITEM_SIZE) -#endif - -#define TRACE_SP1_HEADER_ADDR (TRACE_SP1_ADDR) -#define TRACE_SP1_HEADER_SIZE (sizeof(struct trace_header_t)) -#ifndef ISP2401 -#define TRACE_SP1_ITEM_SIZE (sizeof(struct trace_item_t)) -#define TRACE_SP1_DATA_ADDR (TRACE_SP1_HEADER_ADDR + TRACE_SP1_HEADER_SIZE) -#define TRACE_SP1_DATA_SIZE (TRACE_SP1_SIZE - TRACE_SP1_HEADER_SIZE) -#define TRACE_SP1_MAX_POINTS (TRACE_SP1_DATA_SIZE / TRACE_SP1_ITEM_SIZE) -#else -#define TRACE_SP1_ITEM_SIZE (sizeof(struct trace_item_t)) -#define TRACE_SP1_DATA_ADDR (TRACE_SP1_HEADER_ADDR + TRACE_SP1_HEADER_SIZE) -#define TRACE_SP1_DATA_SIZE (TRACE_SP1_SIZE - TRACE_SP1_HEADER_SIZE) -#define TRACE_SP1_MAX_POINTS (TRACE_SP1_DATA_SIZE / TRACE_SP1_ITEM_SIZE) -#endif - -#define TRACE_ISP_HEADER_ADDR (TRACE_ISP_ADDR) -#define TRACE_ISP_HEADER_SIZE (sizeof(struct trace_header_t)) -#ifndef ISP2401 -#define TRACE_ISP_ITEM_SIZE (sizeof(struct trace_item_t)) -#define TRACE_ISP_DATA_ADDR (TRACE_ISP_HEADER_ADDR + TRACE_ISP_HEADER_SIZE) -#define TRACE_ISP_DATA_SIZE (TRACE_ISP_SIZE - TRACE_ISP_HEADER_SIZE) -#define TRACE_ISP_MAX_POINTS (TRACE_ISP_DATA_SIZE / TRACE_ISP_ITEM_SIZE) - -#else -#define TRACE_ISP_ITEM_SIZE (sizeof(struct trace_item_t)) -#define TRACE_ISP_DATA_ADDR (TRACE_ISP_HEADER_ADDR + TRACE_ISP_HEADER_SIZE) -#define TRACE_ISP_DATA_SIZE (TRACE_ISP_SIZE - TRACE_ISP_HEADER_SIZE) -#define TRACE_ISP_MAX_POINTS (TRACE_ISP_DATA_SIZE / TRACE_ISP_ITEM_SIZE) -#endif - -#ifndef ISP2401 -/* offsets for master_port read/write */ -#define HDR_HDR_OFFSET 0 /* offset of the header */ -#define HDR_COMMAND_OFFSET 4 /* offset of the command */ -#define HDR_DATA_OFFSET 8 /* offset of the command data */ -#define HDR_DEBUG_SIGNATURE_OFFSET 16 /* offset of the param debug signature in trace_header_t */ -#define HDR_DEBUG_POINTER_OFFSET 20 /* offset of the param debug pointer in trace_header_t */ -#endif - -/* common majors */ -#ifdef ISP2401 -/* SP0 */ -#endif -#define MAJOR_MAIN 1 -#define MAJOR_ISP_STAGE_ENTRY 2 -#define MAJOR_DMA_PRXY 3 -#define MAJOR_START_ISP 4 -#ifdef ISP2401 -/* SP1 */ -#define MAJOR_OBSERVER_ISP0_EVENT 21 -#define MAJOR_OBSERVER_OUTPUT_FORM_EVENT 22 -#define MAJOR_OBSERVER_OUTPUT_SCAL_EVENT 23 -#define MAJOR_OBSERVER_IF_ACK 24 -#define MAJOR_OBSERVER_SP0_EVENT 25 -#define MAJOR_OBSERVER_SP_TERMINATE_EVENT 26 -#define MAJOR_OBSERVER_DMA_ACK 27 -#define MAJOR_OBSERVER_ACC_ACK 28 -#endif - -#define DEBUG_PTR_SIGNATURE 0xABCD /* signature for the debug parameter pointer */ - -/* command codes (1st byte) */ -typedef enum { - CMD_SET_ONE_MAJOR = 1, /* mask in one major. 2nd byte in the command is the major code */ - CMD_UNSET_ONE_MAJOR = 2, /* mask out one major. 2nd byte in the command is the major code */ - CMD_SET_ALL_MAJORS = 3, /* set the major print mask. the full mask is in the data DWORD */ - CMD_SET_VERBOSITY = 4 /* set verbosity level */ -} DBG_commands; - -/* command signature */ -#define CMD_SIGNATURE 0xAABBCC00 - -/* shared macros in traces infrastructure */ -/* increment the pointer cyclicly */ -#define DBG_NEXT_ITEM(x, max_items) (((x+1) >= max_items) ? 0 : x+1) -#define DBG_PREV_ITEM(x, max_items) ((x) ? x-1 : max_items-1) - -#define FIELD_MASK(width) (((1 << (width)) - 1)) -#define FIELD_PACK(value,mask,offset) (((value) & (mask)) << (offset)) -#define FIELD_UNPACK(value,mask,offset) (((value) >> (offset)) & (mask)) - - -#define FIELD_VALUE_OFFSET (0) -#define FIELD_VALUE_WIDTH (16) -#define FIELD_VALUE_MASK FIELD_MASK(FIELD_VALUE_WIDTH) -#define FIELD_VALUE_PACK(f) FIELD_PACK(f,FIELD_VALUE_MASK,FIELD_VALUE_OFFSET) -#ifndef ISP2401 -#define FIELD_VALUE_UNPACK(f) FIELD_UNPACK(f,FIELD_VALUE_MASK,FIELD_VALUE_OFFSET) -#else -#define FIELD_VALUE_UNPACK(f) FIELD_UNPACK(f,FIELD_VALUE_MASK,FIELD_VALUE_OFFSET) -#endif - -#define FIELD_MINOR_OFFSET (FIELD_VALUE_OFFSET + FIELD_VALUE_WIDTH) -#define FIELD_MINOR_WIDTH (8) -#define FIELD_MINOR_MASK FIELD_MASK(FIELD_MINOR_WIDTH) -#define FIELD_MINOR_PACK(f) FIELD_PACK(f,FIELD_MINOR_MASK,FIELD_MINOR_OFFSET) -#ifndef ISP2401 -#define FIELD_MINOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MINOR_MASK,FIELD_MINOR_OFFSET) -#else -#define FIELD_MINOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MINOR_MASK,FIELD_MINOR_OFFSET) -#endif - -#define FIELD_MAJOR_OFFSET (FIELD_MINOR_OFFSET + FIELD_MINOR_WIDTH) -#define FIELD_MAJOR_WIDTH (5) -#define FIELD_MAJOR_MASK FIELD_MASK(FIELD_MAJOR_WIDTH) -#define FIELD_MAJOR_PACK(f) FIELD_PACK(f,FIELD_MAJOR_MASK,FIELD_MAJOR_OFFSET) -#ifndef ISP2401 -#define FIELD_MAJOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MAJOR_MASK,FIELD_MAJOR_OFFSET) -#else -#define FIELD_MAJOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MAJOR_MASK,FIELD_MAJOR_OFFSET) -#endif - -#ifndef ISP2401 -#define FIELD_FORMAT_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_WIDTH) -#define FIELD_FORMAT_WIDTH (3) -#define FIELD_FORMAT_MASK FIELD_MASK(FIELD_FORMAT_WIDTH) -#define FIELD_FORMAT_PACK(f) FIELD_PACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET) -#define FIELD_FORMAT_UNPACK(f) FIELD_UNPACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET) -#else -/* for quick traces - only insertion, compatible with the regular point */ -#define FIELD_FULL_MAJOR_WIDTH (8) -#define FIELD_FULL_MAJOR_MASK FIELD_MASK(FIELD_FULL_MAJOR_WIDTH) -#define FIELD_FULL_MAJOR_PACK(f) FIELD_PACK(f,FIELD_FULL_MAJOR_MASK,FIELD_MAJOR_OFFSET) - -/* The following 2 fields are used only when FIELD_TID value is 111b. - * it means we don't want to use thread id, but format. In this case, - * the last 2 MSB bits of the major field will indicates the format - */ -#define FIELD_MAJOR_W_FMT_OFFSET FIELD_MAJOR_OFFSET -#define FIELD_MAJOR_W_FMT_WIDTH (3) -#define FIELD_MAJOR_W_FMT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH) -#define FIELD_MAJOR_W_FMT_PACK(f) FIELD_PACK(f,FIELD_MAJOR_W_FMT_MASK,FIELD_MAJOR_W_FMT_OFFSET) -#define FIELD_MAJOR_W_FMT_UNPACK(f) FIELD_UNPACK(f,FIELD_MAJOR_W_FMT_MASK,FIELD_MAJOR_W_FMT_OFFSET) - -#define FIELD_FORMAT_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_W_FMT_WIDTH) -#define FIELD_FORMAT_WIDTH (2) -#define FIELD_FORMAT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH) -#define FIELD_FORMAT_PACK(f) FIELD_PACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET) -#define FIELD_FORMAT_UNPACK(f) FIELD_UNPACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET) - -#define FIELD_TID_SEL_FORMAT_PAT (7) - -#define FIELD_TID_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_WIDTH) -#define FIELD_TID_WIDTH (3) -#define FIELD_TID_MASK FIELD_MASK(FIELD_TID_WIDTH) -#define FIELD_TID_PACK(f) FIELD_PACK(f,FIELD_TID_MASK,FIELD_TID_OFFSET) -#define FIELD_TID_UNPACK(f) FIELD_UNPACK(f,FIELD_TID_MASK,FIELD_TID_OFFSET) -#endif - -#define FIELD_VALUE_24_OFFSET (0) -#define FIELD_VALUE_24_WIDTH (24) -#ifndef ISP2401 -#define FIELD_VALUE_24_MASK FIELD_MASK(FIELD_VALUE_24_WIDTH) -#else -#define FIELD_VALUE_24_MASK FIELD_MASK(FIELD_VALUE_24_WIDTH) -#endif -#define FIELD_VALUE_24_PACK(f) FIELD_PACK(f,FIELD_VALUE_24_MASK,FIELD_VALUE_24_OFFSET) -#define FIELD_VALUE_24_UNPACK(f) FIELD_UNPACK(f,FIELD_VALUE_24_MASK,FIELD_VALUE_24_OFFSET) - -#ifndef ISP2401 -#define PACK_TRACEPOINT(format,major, minor, value) \ - (FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value)) -#else -#define PACK_TRACEPOINT(tid, major, minor, value) \ - (FIELD_TID_PACK(tid) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value)) - -#define PACK_QUICK_TRACEPOINT(major, minor) \ - (FIELD_FULL_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor)) - -#define PACK_FORMATTED_TRACEPOINT(format, major, minor, value) \ - (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value)) -#endif - -#ifndef ISP2401 -#define PACK_TRACE_VALUE24(format, major, value) \ - (FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_VALUE_24_PACK(value)) -#else -#define PACK_TRACE_VALUE24(major, value) \ - (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_MAJOR_PACK(major) | FIELD_VALUE_24_PACK(value)) -#endif - -#endif /* __CSS_TRACE_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h deleted file mode 100644 index 076c4ba76175..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DEBUG_GLOBAL_H_INCLUDED__ -#define __DEBUG_GLOBAL_H_INCLUDED__ - -#include - -#define DEBUG_BUF_SIZE 1024 -#define DEBUG_BUF_MASK (DEBUG_BUF_SIZE - 1) - -#define DEBUG_DATA_ENABLE_ADDR 0x00 -#define DEBUG_DATA_BUF_MODE_ADDR 0x04 -#define DEBUG_DATA_HEAD_ADDR 0x08 -#define DEBUG_DATA_TAIL_ADDR 0x0C -#define DEBUG_DATA_BUF_ADDR 0x10 - -#define DEBUG_DATA_ENABLE_DDR_ADDR 0x00 -#define DEBUG_DATA_BUF_MODE_DDR_ADDR HIVE_ISP_DDR_WORD_BYTES -#define DEBUG_DATA_HEAD_DDR_ADDR (2 * HIVE_ISP_DDR_WORD_BYTES) -#define DEBUG_DATA_TAIL_DDR_ADDR (3 * HIVE_ISP_DDR_WORD_BYTES) -#define DEBUG_DATA_BUF_DDR_ADDR (4 * HIVE_ISP_DDR_WORD_BYTES) - -#define DEBUG_BUFFER_ISP_DMEM_ADDR 0x0 - -/* - * Enable HAS_WATCHDOG_SP_THREAD_DEBUG for additional SP thread and - * pipe information on watchdog output - * #undef HAS_WATCHDOG_SP_THREAD_DEBUG - * #define HAS_WATCHDOG_SP_THREAD_DEBUG - */ - - -/* - * The linear buffer mode will accept data until the first - * overflow and then stop accepting new data - * The circular buffer mode will accept if there is place - * and discard the data if the buffer is full - */ -typedef enum { - DEBUG_BUFFER_MODE_LINEAR = 0, - DEBUG_BUFFER_MODE_CIRCULAR, - N_DEBUG_BUFFER_MODE -} debug_buf_mode_t; - -struct debug_data_s { - uint32_t enable; - uint32_t bufmode; - uint32_t head; - uint32_t tail; - uint32_t buf[DEBUG_BUF_SIZE]; -}; - -/* thread.sp.c doesn't have a notion of HIVE_ISP_DDR_WORD_BYTES - still one point of control is needed for debug purposes */ - -#ifdef HIVE_ISP_DDR_WORD_BYTES -struct debug_data_ddr_s { - uint32_t enable; - int8_t padding1[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)]; - uint32_t bufmode; - int8_t padding2[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)]; - uint32_t head; - int8_t padding3[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)]; - uint32_t tail; - int8_t padding4[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)]; - uint32_t buf[DEBUG_BUF_SIZE]; -}; -#endif - -#endif /* __DEBUG_GLOBAL_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h deleted file mode 100644 index 60d6de1332cd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DMA_GLOBAL_H_INCLUDED__ -#define __DMA_GLOBAL_H_INCLUDED__ - -#include - -#define IS_DMA_VERSION_2 - -#define HIVE_ISP_NUM_DMA_CONNS 3 -#define HIVE_ISP_NUM_DMA_CHANNELS 32 - -#define N_DMA_CHANNEL_ID HIVE_ISP_NUM_DMA_CHANNELS - -#include "dma_v2_defs.h" - -/* - * Command token bit mappings - * - * transfer / config - * param id[4] channel id[5] cmd id[6] - * | b14 .. b11 | b10 ... b6 | b5 ... b0 | - * - * - * fast transfer: - * height[5] width[8] width[8] channel id[5] cmd id[6] - * | b31 .. b26 | b25 .. b18 | b17 .. b11 | b10 ... b6 | b5 ... b0 | - * - */ - -#define _DMA_PACKING_SETUP_PARAM _DMA_V2_PACKING_SETUP_PARAM -#define _DMA_HEIGHT_PARAM _DMA_V2_HEIGHT_PARAM -#define _DMA_STRIDE_A_PARAM _DMA_V2_STRIDE_A_PARAM -#define _DMA_ELEM_CROPPING_A_PARAM _DMA_V2_ELEM_CROPPING_A_PARAM -#define _DMA_WIDTH_A_PARAM _DMA_V2_WIDTH_A_PARAM -#define _DMA_STRIDE_B_PARAM _DMA_V2_STRIDE_B_PARAM -#define _DMA_ELEM_CROPPING_B_PARAM _DMA_V2_ELEM_CROPPING_B_PARAM -#define _DMA_WIDTH_B_PARAM _DMA_V2_WIDTH_B_PARAM - -#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND -#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND - - -typedef unsigned int dma_channel; - -typedef enum { - dma_isp_to_bus_connection = HIVE_DMA_ISP_BUS_CONN, - dma_isp_to_ddr_connection = HIVE_DMA_ISP_DDR_CONN, - dma_bus_to_ddr_connection = HIVE_DMA_BUS_DDR_CONN, -} dma_connection; - -typedef enum { - dma_zero_extension = _DMA_ZERO_EXTEND, - dma_sign_extension = _DMA_SIGN_EXTEND -} dma_extension; - - -#define DMA_PROP_SHIFT(val, param) ((val) << _DMA_V2_ ## param ## _IDX) -#define DMA_PROP_MASK(param) ((1U << _DMA_V2_ ## param ## _BITS)-1) -#define DMA_PACK(val, param) DMA_PROP_SHIFT((val) & DMA_PROP_MASK(param), param) - -#define DMA_PACK_COMMAND(cmd) DMA_PACK(cmd, CMD) -#define DMA_PACK_CHANNEL(ch) DMA_PACK(ch, CHANNEL) -#define DMA_PACK_PARAM(par) DMA_PACK(par, PARAM) -#define DMA_PACK_EXTENSION(ext) DMA_PACK(ext, EXTENSION) -#define DMA_PACK_LEFT_CROPPING(lc) DMA_PACK(lc, LEFT_CROPPING) -#define DMA_PACK_WIDTH_A(w) DMA_PACK(w, SPEC_DEV_A_XB) -#define DMA_PACK_WIDTH_B(w) DMA_PACK(w, SPEC_DEV_B_XB) -#define DMA_PACK_HEIGHT(h) DMA_PACK(h, SPEC_YB) - -#define DMA_PACK_CMD_CHANNEL(cmd, ch) (DMA_PACK_COMMAND(cmd) | DMA_PACK_CHANNEL(ch)) -#define DMA_PACK_SETUP(conn, ext) ((conn) | DMA_PACK_EXTENSION(ext)) -#define DMA_PACK_CROP_ELEMS(elems, crop) ((elems) | DMA_PACK_LEFT_CROPPING(crop)) - -#define hive_dma_snd(dma_id, token) OP_std_snd(dma_id, (unsigned int)(token)) - -#define DMA_PACK_BLOCK_CMD(cmd, ch, width_a, width_b, height) \ - (DMA_PACK_COMMAND(cmd) | \ - DMA_PACK_CHANNEL(ch) | \ - DMA_PACK_WIDTH_A(width_a) | \ - DMA_PACK_WIDTH_B(width_b) | \ - DMA_PACK_HEIGHT(height)) - -#define hive_dma_move_data(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \ -{ \ - hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \ - hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read?_DMA_V2_MOVE_B2A_COMMAND:_DMA_V2_MOVE_A2B_COMMAND, channel)); \ - hive_dma_snd(dma_id, read?(unsigned)(addr_b):(unsigned)(addr_a)); \ - hive_dma_snd(dma_id, read?(unsigned)(addr_a):(unsigned)(addr_b)); \ - hive_dma_snd(dma_id, to_is_var); \ - hive_dma_snd(dma_id, from_is_var); \ -} -#define hive_dma_move_data_no_ack(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \ -{ \ - hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \ - hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read?_DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND:_DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND, channel)); \ - hive_dma_snd(dma_id, read?(unsigned)(addr_b):(unsigned)(addr_a)); \ - hive_dma_snd(dma_id, read?(unsigned)(addr_a):(unsigned)(addr_b)); \ - hive_dma_snd(dma_id, to_is_var); \ - hive_dma_snd(dma_id, from_is_var); \ -} - -#define hive_dma_move_b2a_data(dma_id, channel, to_addr, from_addr, to_is_var, from_is_var) \ -{ \ - hive_dma_move_data(dma_id, true, channel, to_addr, from_addr, to_is_var, from_is_var) \ -} - -#define hive_dma_move_a2b_data(dma_id, channel, from_addr, to_addr, from_is_var, to_is_var) \ -{ \ - hive_dma_move_data(dma_id, false, channel, from_addr, to_addr, from_is_var, to_is_var) \ -} - -#define hive_dma_set_data(dma_id, channel, address, value, is_var) \ -{ \ - hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \ - hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_INIT_A_COMMAND, channel)); \ - hive_dma_snd(dma_id, value); \ - hive_dma_snd(dma_id, address); \ - hive_dma_snd(dma_id, is_var); \ -} - -#define hive_dma_clear_data(dma_id, channel, address, is_var) hive_dma_set_data(dma_id, channel, address, 0, is_var) - -#define hive_dma_configure(dma_id, channel, connection, extension, height, \ - stride_A, elems_A, cropping_A, width_A, \ - stride_B, elems_B, cropping_B, width_B) \ -{ \ - hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \ - hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \ - hive_dma_snd(dma_id, stride_A); \ - hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, cropping_A)); \ - hive_dma_snd(dma_id, width_A); \ - hive_dma_snd(dma_id, stride_B); \ - hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, cropping_B)); \ - hive_dma_snd(dma_id, width_B); \ - hive_dma_snd(dma_id, height); \ -} - -#define hive_dma_execute(dma_id, channel, cmd, to_addr, from_addr_value, to_is_var, from_is_var) \ -{ \ - hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \ - hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(cmd, channel)); \ - hive_dma_snd(dma_id, to_addr); \ - hive_dma_snd(dma_id, from_addr_value); \ - hive_dma_snd(dma_id, to_is_var); \ - if ((cmd & DMA_CLEAR_CMDBIT) == 0) { \ - hive_dma_snd(dma_id, from_is_var); \ - } \ -} - -#define hive_dma_configure_fast(dma_id, channel, connection, extension, elems_A, elems_B) \ -{ \ - hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \ - hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \ - hive_dma_snd(dma_id, 0); \ - hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, 0)); \ - hive_dma_snd(dma_id, 0); \ - hive_dma_snd(dma_id, 0); \ - hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, 0)); \ - hive_dma_snd(dma_id, 0); \ - hive_dma_snd(dma_id, 1); \ -} - -#define hive_dma_set_parameter(dma_id, channel, param, value) \ -{ \ - hive_dma_snd(dma_id, _DMA_V2_SET_CHANNEL_PARAM_COMMAND | DMA_PACK_CHANNEL(channel) | DMA_PACK_PARAM(param)); \ - hive_dma_snd(dma_id, value); \ -} - -#define DMA_SPECIFIC_CMDBIT 0x01 -#define DMA_CHECK_CMDBIT 0x02 -#define DMA_RW_CMDBIT 0x04 -#define DMA_CLEAR_CMDBIT 0x08 -#define DMA_ACK_CMDBIT 0x10 -#define DMA_CFG_CMDBIT 0x20 -#define DMA_PARAM_CMDBIT 0x01 - -/* Write complete check not necessary if there's no ack */ -#define DMA_NOACK_CMD (DMA_ACK_CMDBIT | DMA_CHECK_CMDBIT) -#define DMA_CFG_CMD (DMA_CFG_CMDBIT) -#define DMA_CFGPARAM_CMD (DMA_CFG_CMDBIT | DMA_PARAM_CMDBIT) - -#define DMA_CMD_NEEDS_ACK(cmd) ((cmd & DMA_NOACK_CMD) == 0) -#define DMA_CMD_IS_TRANSFER(cmd) ((cmd & DMA_CFG_CMDBIT) == 0) -#define DMA_CMD_IS_WR(cmd) ((cmd & DMA_RW_CMDBIT) != 0) -#define DMA_CMD_IS_RD(cmd) ((cmd & DMA_RW_CMDBIT) == 0) -#define DMA_CMD_IS_CLR(cmd) ((cmd & DMA_CLEAR_CMDBIT) != 0) -#define DMA_CMD_IS_CFG(cmd) ((cmd & DMA_CFG_CMDBIT) != 0) -#define DMA_CMD_IS_PARAMCFG(cmd) ((cmd & DMA_CFGPARAM_CMD) == DMA_CFGPARAM_CMD) - -/* As a matter of convention */ -#define DMA_TRANSFER_READ DMA_TRANSFER_B2A -#define DMA_TRANSFER_WRITE DMA_TRANSFER_A2B -/* store/load from the PoV of the system(memory) */ -#define DMA_TRANSFER_STORE DMA_TRANSFER_B2A -#define DMA_TRANSFER_LOAD DMA_TRANSFER_A2B -#define DMA_TRANSFER_CLEAR DMA_TRANSFER_CLEAR_A - -typedef enum { - DMA_TRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT, /* 8 */ - DMA_TRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT, /* 12 */ - DMA_TRANSFER_A2B = DMA_RW_CMDBIT, /* 4 */ - DMA_TRANSFER_B2A = 0, /* 0 */ - DMA_TRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD, /* 26 */ - DMA_TRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 30 */ - DMA_TRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 22 */ - DMA_TRANSFER_B2A_NOACK = DMA_NOACK_CMD, /* 18 */ - DMA_FASTTRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT | DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_A2B = DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_B2A = DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT, - DMA_FASTTRANSFER_B2A_NOACK = DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT, -} dma_transfer_type_t; - -typedef enum { - DMA_CONFIG_SETUP = _DMA_V2_PACKING_SETUP_PARAM, - DMA_CONFIG_HEIGHT = _DMA_V2_HEIGHT_PARAM, - DMA_CONFIG_STRIDE_A_ = _DMA_V2_STRIDE_A_PARAM, - DMA_CONFIG_CROP_ELEM_A = _DMA_V2_ELEM_CROPPING_A_PARAM, - DMA_CONFIG_WIDTH_A = _DMA_V2_WIDTH_A_PARAM, - DMA_CONFIG_STRIDE_B_ = _DMA_V2_STRIDE_B_PARAM, - DMA_CONFIG_CROP_ELEM_B = _DMA_V2_ELEM_CROPPING_B_PARAM, - DMA_CONFIG_WIDTH_B = _DMA_V2_WIDTH_B_PARAM, -} dma_config_type_t; - -struct dma_port_config { - uint8_t crop, elems; - uint16_t width; - uint32_t stride; -}; - -/* Descriptor for dma configuration */ -struct dma_channel_config { - uint8_t connection; - uint8_t extension; - uint8_t height; - struct dma_port_config a, b; -}; - -#endif /* __DMA_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h deleted file mode 100644 index 4df7a405cdcf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __EVENT_FIFO_GLOBAL_H -#define __EVENT_FIFO_GLOBAL_H - -/*#error "event_global.h: No global event information permitted"*/ - -#endif /* __EVENT_FIFO_GLOBAL_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h deleted file mode 100644 index f43bf0ad2468..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __FIFO_MONITOR_GLOBAL_H_INCLUDED__ -#define __FIFO_MONITOR_GLOBAL_H_INCLUDED__ - -#define IS_FIFO_MONITOR_VERSION_2 - -/* -#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 0 -#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1 -#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 2 - * - * Actually, "HIVE_ISP_CSS_STREAM_SWITCH_SP = 1", "HIVE_ISP_CSS_STREAM_SWITCH_ISP = 0" - * "hive_isp_css_stream_switch_hrt.h" - */ -#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 0 -#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1 -#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 2 - -#endif /* __FIFO_MONITOR_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h deleted file mode 100644 index 4505775b224c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GDC_GLOBAL_H_INCLUDED__ -#define __GDC_GLOBAL_H_INCLUDED__ - -#define IS_GDC_VERSION_2 - -#include -#include "gdc_v2_defs.h" - -/* - * Storage addresses for packed data transfer - */ -#define GDC_PARAM_ICX_LEFT_ROUNDED_IDX 0 -#define GDC_PARAM_OXDIM_FLOORED_IDX 1 -#define GDC_PARAM_OXDIM_LAST_IDX 2 -#define GDC_PARAM_WOIX_LAST_IDX 3 -#define GDC_PARAM_IY_TOPLEFT_IDX 4 -#define GDC_PARAM_CHUNK_CNT_IDX 5 -/*#define GDC_PARAM_ELEMENTS_PER_XMEM_ADDR_IDX 6 */ /* Derived from bpp */ -#define GDC_PARAM_BPP_IDX 6 -#define GDC_PARAM_BLOCK_HEIGHT_IDX 7 -/*#define GDC_PARAM_DMA_CHANNEL_STRIDE_A_IDX 8*/ /* The DMA stride == the GDC buffer stride */ -#define GDC_PARAM_WOIX_IDX 8 -#define GDC_PARAM_DMA_CHANNEL_STRIDE_B_IDX 9 -#define GDC_PARAM_DMA_CHANNEL_WIDTH_A_IDX 10 -#define GDC_PARAM_DMA_CHANNEL_WIDTH_B_IDX 11 -#define GDC_PARAM_VECTORS_PER_LINE_IN_IDX 12 -#define GDC_PARAM_VECTORS_PER_LINE_OUT_IDX 13 -#define GDC_PARAM_VMEM_IN_DIMY_IDX 14 -#define GDC_PARAM_COMMAND_IDX 15 -#define N_GDC_PARAM 16 - -/* Because of the packed parameter transfer max(params) == max(fragments) */ -#define N_GDC_FRAGMENTS N_GDC_PARAM - -/* The GDC is capable of higher internal precision than the parameter data structures */ -#define HRT_GDC_COORD_SCALE_BITS 6 -#define HRT_GDC_COORD_SCALE (1 << HRT_GDC_COORD_SCALE_BITS) - -typedef enum { - GDC_CH0_ID = 0, - N_GDC_CHANNEL_ID -} gdc_channel_ID_t; - -typedef enum { - gdc_8_bpp = 8, - gdc_10_bpp = 10, - gdc_12_bpp = 12, - gdc_14_bpp = 14 -} gdc_bits_per_pixel_t; - -typedef struct gdc_scale_param_mem_s { - uint16_t params[N_GDC_PARAM]; - uint16_t ipx_start_array[N_GDC_PARAM]; - uint16_t ibuf_offset[N_GDC_PARAM]; - uint16_t obuf_offset[N_GDC_PARAM]; -} gdc_scale_param_mem_t; - -typedef struct gdc_warp_param_mem_s { - uint32_t origin_x; - uint32_t origin_y; - uint32_t in_addr_offset; - uint32_t in_block_width; - uint32_t in_block_height; - uint32_t p0_x; - uint32_t p0_y; - uint32_t p1_x; - uint32_t p1_y; - uint32_t p2_x; - uint32_t p2_y; - uint32_t p3_x; - uint32_t p3_y; - uint32_t padding[3]; -} gdc_warp_param_mem_t; - - -#endif /* __GDC_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h deleted file mode 100644 index 30ad77059d93..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_DEVICE_GLOBAL_H_INCLUDED__ -#define __GP_DEVICE_GLOBAL_H_INCLUDED__ - -#define IS_GP_DEVICE_VERSION_2 - -#define _REG_GP_IRQ_REQ0_ADDR 0x08 -#define _REG_GP_IRQ_REQ1_ADDR 0x0C -/* The SP sends SW interrupt info to this register */ -#define _REG_GP_IRQ_REQUEST0_ADDR _REG_GP_IRQ_REQ0_ADDR -#define _REG_GP_IRQ_REQUEST1_ADDR _REG_GP_IRQ_REQ1_ADDR - -/* The SP configures FIFO switches in these registers */ -#define _REG_GP_SWITCH_IF_ADDR 0x40 -#define _REG_GP_SWITCH_GDC1_ADDR 0x44 -#define _REG_GP_SWITCH_GDC2_ADDR 0x48 -/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */ -#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800 -#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804 -#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808 -#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C -#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810 -#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814 -#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818 -#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C -#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820 -#define _REG_GP_IFMT_srst 0x00030824 -#define _REG_GP_IFMT_slv_reg_srst 0x00030828 -#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C - -/* @ GP_DEVICE_BASE */ -#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000 -#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004 -#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008 -#define _REG_GP_NR_FRAMES_ADDR 0x0009000C -#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010 -#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014 -#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018 -#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C -#define _REG_GP_ISEL_SOF_ADDR 0x00090020 -#define _REG_GP_ISEL_EOF_ADDR 0x00090024 -#define _REG_GP_ISEL_SOL_ADDR 0x00090028 -#define _REG_GP_ISEL_EOL_ADDR 0x0009002C -#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030 -#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034 -#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038 -#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C -#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040 -#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044 -#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048 -#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C -#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050 -#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054 -#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058 -#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C -#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060 -#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064 -#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068 -#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C -#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070 -#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074 -#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078 -#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C -#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080 -#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084 -#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088 -#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C -#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090 -#define _REG_GP_SOFT_RESET_ADDR 0x00090094 - - -#endif /* __GP_DEVICE_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h deleted file mode 100644 index ee636ad6c5b3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_TIMER_GLOBAL_H_INCLUDED__ -#define __GP_TIMER_GLOBAL_H_INCLUDED__ - -#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ */ - -/* from gp_timer_defs.h*/ -#define GP_TIMER_COUNT_TYPE_HIGH 0 -#define GP_TIMER_COUNT_TYPE_LOW 1 -#define GP_TIMER_COUNT_TYPE_POSEDGE 2 -#define GP_TIMER_COUNT_TYPE_NEGEDGE 3 -#define GP_TIMER_COUNT_TYPE_TYPES 4 - -/* timer - 3 is selected */ -#define GP_TIMER_SEL 3 - -/*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ is selected*/ -#define GP_TIMER_SIGNAL_SELECT HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ - -#endif /* __GP_TIMER_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h deleted file mode 100644 index a82ca2a8cada..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GPIO_GLOBAL_H_INCLUDED__ -#define __GPIO_GLOBAL_H_INCLUDED__ - -#define IS_GPIO_VERSION_1 - -#include - -/* pqiao: following part only defines in hive_isp_css_defs.h in fpga system. - port it here -*/ - -/* GPIO pin defines */ -/*#define HIVE_GPIO_CAMERA_BOARD_RESET_PIN_NR 0 -#define HIVE_GPIO_LCD_CLOCK_SELECT_PIN_NR 7 -#define HIVE_GPIO_HDMI_CLOCK_SELECT_PIN_NR 8 -#define HIVE_GPIO_LCD_VERT_FLIP_PIN_NR 8 -#define HIVE_GPIO_LCD_HOR_FLIP_PIN_NR 9 -#define HIVE_GPIO_AS3683_GPIO_P0_PIN_NR 1 -#define HIVE_GPIO_AS3683_DATA_P1_PIN_NR 2 -#define HIVE_GPIO_AS3683_CLK_P2_PIN_NR 3 -#define HIVE_GPIO_AS3683_T1_F0_PIN_NR 4 -#define HIVE_GPIO_AS3683_SFL_F1_PIN_NR 5 -#define HIVE_GPIO_AS3683_STROBE_F2_PIN_NR 6 -#define HIVE_GPIO_MAX1577_EN1_PIN_NR 1 -#define HIVE_GPIO_MAX1577_EN2_PIN_NR 2 -#define HIVE_GPIO_MAX8685A_EN_PIN_NR 3 -#define HIVE_GPIO_MAX8685A_TRIG_PIN_NR 4*/ - -#define HIVE_GPIO_STROBE_TRIGGER_PIN 2 - -#endif /* __GPIO_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h deleted file mode 100644 index 7e05d7d880d1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __HMEM_GLOBAL_H_INCLUDED__ -#define __HMEM_GLOBAL_H_INCLUDED__ - -#include - -#define IS_HMEM_VERSION_1 - -#include "isp.h" - -/* -#define ISP_HIST_ADDRESS_BITS 12 -#define ISP_HIST_ALIGNMENT 4 -#define ISP_HIST_COMP_IN_PREC 12 -#define ISP_HIST_DEPTH 1024 -#define ISP_HIST_WIDTH 24 -#define ISP_HIST_COMPONENTS 4 -*/ -#define ISP_HIST_ALIGNMENT_LOG2 2 - -#define HMEM_SIZE_LOG2 (ISP_HIST_ADDRESS_BITS-ISP_HIST_ALIGNMENT_LOG2) -#define HMEM_SIZE ISP_HIST_DEPTH - -#define HMEM_UNIT_SIZE (HMEM_SIZE/ISP_HIST_COMPONENTS) -#define HMEM_UNIT_COUNT ISP_HIST_COMPONENTS - -#define HMEM_RANGE_LOG2 ISP_HIST_WIDTH -#define HMEM_RANGE (1UL<head == debug_data_ptr->tail); -} - -STORAGE_CLASS_DEBUG_C hrt_data debug_dequeue(void) -{ - hrt_data value = 0; - - assert(debug_buffer_address != ((hrt_address)-1)); - - debug_synch_queue(); - - if (!is_debug_buffer_empty()) { - value = debug_data_ptr->buf[debug_data_ptr->head]; - debug_data_ptr->head = (debug_data_ptr->head + 1) & DEBUG_BUF_MASK; - sp_dmem_store_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_HEAD_ADDR, debug_data_ptr->head); - } - - return value; -} - -STORAGE_CLASS_DEBUG_C void debug_synch_queue(void) -{ - uint32_t remote_tail = sp_dmem_load_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_TAIL_ADDR); -/* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */ - if (remote_tail > debug_data_ptr->tail) { - size_t delta = remote_tail - debug_data_ptr->tail; - sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t)); - } else if (remote_tail < debug_data_ptr->tail) { - size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail; - sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t)); - sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR, (void *)&(debug_data_ptr->buf[0]), remote_tail*sizeof(uint32_t)); - } /* else we are up to date */ - debug_data_ptr->tail = remote_tail; -} - -STORAGE_CLASS_DEBUG_C void debug_synch_queue_isp(void) -{ - uint32_t remote_tail = isp_dmem_load_uint32(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_TAIL_ADDR); -/* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */ - if (remote_tail > debug_data_ptr->tail) { - size_t delta = remote_tail - debug_data_ptr->tail; - isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t)); - } else if (remote_tail < debug_data_ptr->tail) { - size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail; - isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t)); - isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR, (void *)&(debug_data_ptr->buf[0]), remote_tail*sizeof(uint32_t)); - } /* else we are up to date */ - debug_data_ptr->tail = remote_tail; -} - -STORAGE_CLASS_DEBUG_C void debug_synch_queue_ddr(void) -{ - uint32_t remote_tail; - - mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_TAIL_DDR_ADDR, &remote_tail, sizeof(uint32_t)); -/* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */ - if (remote_tail > debug_data_ptr->tail) { - size_t delta = remote_tail - debug_data_ptr->tail; - mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t)); - } else if (remote_tail < debug_data_ptr->tail) { - size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail; - mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t)); - mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR, (void *)&(debug_data_ptr->buf[0]), remote_tail*sizeof(uint32_t)); - } /* else we are up to date */ - debug_data_ptr->tail = remote_tail; -} - -#endif /* __DEBUG_PRIVATE_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c deleted file mode 100644 index 770db7dff5d3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2016, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include - -#include "dma.h" - -#include "assert_support.h" - -#ifndef __INLINE_DMA__ -#include "dma_private.h" -#endif /* __INLINE_DMA__ */ - -void dma_get_state(const dma_ID_t ID, dma_state_t *state) -{ - int i; - hrt_data tmp; - - assert(ID < N_DMA_ID); - assert(state != NULL); - - tmp = dma_reg_load(ID, DMA_COMMAND_FSM_REG_IDX); - //reg [3:0] : flags error [3], stall, run, idle [0] - //reg [9:4] : command - //reg[14:10] : channel - //reg [23:15] : param - state->fsm_command_idle = tmp & 0x1; - state->fsm_command_run = tmp & 0x2; - state->fsm_command_stalling = tmp & 0x4; - state->fsm_command_error = tmp & 0x8; - state->last_command_channel = (tmp>>10 & 0x1F); - state->last_command_param = (tmp>>15 & 0x0F); - tmp = (tmp>>4) & 0x3F; -/* state->last_command = (dma_commands_t)tmp; */ -/* if the enumerator is made non-linear */ - /* AM: the list below does not cover all the cases*/ - /* and these are not correct */ - /* therefore for just dumpinmg this command*/ - state->last_command = tmp; - -/* - if (tmp == 0) - state->last_command = DMA_COMMAND_READ; - if (tmp == 1) - state->last_command = DMA_COMMAND_WRITE; - if (tmp == 2) - state->last_command = DMA_COMMAND_SET_CHANNEL; - if (tmp == 3) - state->last_command = DMA_COMMAND_SET_PARAM; - if (tmp == 4) - state->last_command = DMA_COMMAND_READ_SPECIFIC; - if (tmp == 5) - state->last_command = DMA_COMMAND_WRITE_SPECIFIC; - if (tmp == 8) - state->last_command = DMA_COMMAND_INIT; - if (tmp == 12) - state->last_command = DMA_COMMAND_INIT_SPECIFIC; - if (tmp == 15) - state->last_command = DMA_COMMAND_RST; -*/ - -/* No sub-fields, idx = 0 */ - state->current_command = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_CMD_IDX)); - state->current_addr_a = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_A_IDX)); - state->current_addr_b = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_B_IDX)); - - tmp = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_idle = tmp & 0x1; - state->fsm_ctrl_run = tmp & 0x2; - state->fsm_ctrl_stalling = tmp & 0x4; - state->fsm_ctrl_error = tmp & 0x8; - tmp = tmp >> 4; -/* state->fsm_ctrl_state = (dma_ctrl_states_t)tmp; */ - if (tmp == 0) - state->fsm_ctrl_state = DMA_CTRL_STATE_IDLE; - if (tmp == 1) - state->fsm_ctrl_state = DMA_CTRL_STATE_REQ_RCV; - if (tmp == 2) - state->fsm_ctrl_state = DMA_CTRL_STATE_RCV; - if (tmp == 3) - state->fsm_ctrl_state = DMA_CTRL_STATE_RCV_REQ; - if (tmp == 4) - state->fsm_ctrl_state = DMA_CTRL_STATE_INIT; - state->fsm_ctrl_source_dev = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_source_addr = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_source_stride = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_source_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_source_height = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_source_dev = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_dest_dev = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_dest_addr = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_dest_stride = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_source_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_dest_height = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_dest_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_source_elems = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_dest_elems = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - state->fsm_ctrl_pack_extension = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX, - _DMA_FSM_GROUP_FSM_CTRL_IDX)); - - tmp = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_PACK_STATE_IDX, - _DMA_FSM_GROUP_FSM_PACK_IDX)); - state->pack_idle = tmp & 0x1; - state->pack_run = tmp & 0x2; - state->pack_stalling = tmp & 0x4; - state->pack_error = tmp & 0x8; - state->pack_cnt_height = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX, - _DMA_FSM_GROUP_FSM_PACK_IDX)); - state->pack_src_cnt_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX, - _DMA_FSM_GROUP_FSM_PACK_IDX)); - state->pack_dest_cnt_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX, - _DMA_FSM_GROUP_FSM_PACK_IDX)); - - tmp = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_REQ_STATE_IDX, - _DMA_FSM_GROUP_FSM_REQ_IDX)); -/* state->read_state = (dma_rw_states_t)tmp; */ - if (tmp == 0) - state->read_state = DMA_RW_STATE_IDLE; - if (tmp == 1) - state->read_state = DMA_RW_STATE_REQ; - if (tmp == 2) - state->read_state = DMA_RW_STATE_NEXT_LINE; - if (tmp == 3) - state->read_state = DMA_RW_STATE_UNLOCK_CHANNEL; - state->read_cnt_height = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX, - _DMA_FSM_GROUP_FSM_REQ_IDX)); - state->read_cnt_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX, - _DMA_FSM_GROUP_FSM_REQ_IDX)); - - tmp = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_WR_STATE_IDX, - _DMA_FSM_GROUP_FSM_WR_IDX)); -/* state->write_state = (dma_rw_states_t)tmp; */ - if (tmp == 0) - state->write_state = DMA_RW_STATE_IDLE; - if (tmp == 1) - state->write_state = DMA_RW_STATE_REQ; - if (tmp == 2) - state->write_state = DMA_RW_STATE_NEXT_LINE; - if (tmp == 3) - state->write_state = DMA_RW_STATE_UNLOCK_CHANNEL; - state->write_height = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX, - _DMA_FSM_GROUP_FSM_WR_IDX)); - state->write_width = dma_reg_load(ID, - DMA_CG_INFO_REG_IDX( - _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX, - _DMA_FSM_GROUP_FSM_WR_IDX)); - - for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) { - dma_port_state_t *port = &(state->port_states[i]); - - tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(0, i)); - port->req_cs = ((tmp & 0x1) != 0); - port->req_we_n = ((tmp & 0x2) != 0); - port->req_run = ((tmp & 0x4) != 0); - port->req_ack = ((tmp & 0x8) != 0); - - tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(1, i)); - port->send_cs = ((tmp & 0x1) != 0); - port->send_we_n = ((tmp & 0x2) != 0); - port->send_run = ((tmp & 0x4) != 0); - port->send_ack = ((tmp & 0x8) != 0); - - tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(2, i)); - if (tmp & 0x1) - port->fifo_state = DMA_FIFO_STATE_WILL_BE_FULL; - if (tmp & 0x2) - port->fifo_state = DMA_FIFO_STATE_FULL; - if (tmp & 0x4) - port->fifo_state = DMA_FIFO_STATE_EMPTY; - port->fifo_counter = tmp >> 3; - } - - for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) { - dma_channel_state_t *ch = &(state->channel_states[i]); - - ch->connection = DMA_GET_CONNECTION(dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_PACKING_SETUP_PARAM))); - ch->sign_extend = DMA_GET_EXTENSION(dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_PACKING_SETUP_PARAM))); - ch->height = dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_HEIGHT_PARAM)); - ch->stride_a = dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_STRIDE_A_PARAM)); - ch->elems_a = DMA_GET_ELEMENTS(dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_ELEM_CROPPING_A_PARAM))); - ch->cropping_a = DMA_GET_CROPPING(dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_ELEM_CROPPING_A_PARAM))); - ch->width_a = dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_WIDTH_A_PARAM)); - ch->stride_b = dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_STRIDE_B_PARAM)); - ch->elems_b = DMA_GET_ELEMENTS(dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_ELEM_CROPPING_B_PARAM))); - ch->cropping_b = DMA_GET_CROPPING(dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_ELEM_CROPPING_B_PARAM))); - ch->width_b = dma_reg_load(ID, - DMA_CHANNEL_PARAM_REG_IDX(i, - _DMA_WIDTH_B_PARAM)); - } -} - -void -dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn, - uint32_t max_burst_size) -{ - assert(ID < N_DMA_ID); - assert(max_burst_size > 0); - dma_reg_store(ID, DMA_DEV_INFO_REG_IDX(_DMA_DEV_INTERF_MAX_BURST_IDX, conn), - max_burst_size - 1); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h deleted file mode 100644 index ab631e6f64b5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DMA_LOCAL_H_INCLUDED__ -#define __DMA_LOCAL_H_INCLUDED__ - -#include -#include "dma_global.h" - -#include /* HRTCAT() */ -#include /* _hrt_get_bits() */ -#include /* HIVE_DMA_NUM_CHANNELS */ -#include - -#define _DMA_FSM_GROUP_CMD_IDX _DMA_V2_FSM_GROUP_CMD_IDX -#define _DMA_FSM_GROUP_ADDR_A_IDX _DMA_V2_FSM_GROUP_ADDR_SRC_IDX -#define _DMA_FSM_GROUP_ADDR_B_IDX _DMA_V2_FSM_GROUP_ADDR_DEST_IDX - -#define _DMA_FSM_GROUP_CMD_CTRL_IDX _DMA_V2_FSM_GROUP_CMD_CTRL_IDX - -#define _DMA_FSM_GROUP_FSM_CTRL_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX -#define _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX - -#define _DMA_FSM_GROUP_FSM_PACK_IDX _DMA_V2_FSM_GROUP_FSM_PACK_IDX -#define _DMA_FSM_GROUP_FSM_PACK_STATE_IDX _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX -#define _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX -#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX -#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX - -#define _DMA_FSM_GROUP_FSM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_REQ_IDX -#define _DMA_FSM_GROUP_FSM_REQ_STATE_IDX _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX -#define _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX -#define _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX - -#define _DMA_FSM_GROUP_FSM_WR_IDX _DMA_V2_FSM_GROUP_FSM_WR_IDX -#define _DMA_FSM_GROUP_FSM_WR_STATE_IDX _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX -#define _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX -#define _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX - -#define _DMA_DEV_INTERF_MAX_BURST_IDX _DMA_V2_DEV_INTERF_MAX_BURST_IDX - -/* - * Macro's to compute the DMA parameter register indices - */ -#define DMA_SEL_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_COMP_BITS)) << _DMA_V2_ADDR_SEL_COMP_IDX) -#define DMA_SEL_CH(ch) (((ch) & _hrt_ones(_DMA_V2_ADDR_SEL_CH_REG_BITS)) << _DMA_V2_ADDR_SEL_CH_REG_IDX) -#define DMA_SEL_PARAM(param) (((param) & _hrt_ones(_DMA_V2_ADDR_SEL_PARAM_BITS)) << _DMA_V2_ADDR_SEL_PARAM_IDX) -/* CG = Connection Group */ -#define DMA_SEL_CG_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX) -#define DMA_SEL_CG_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_IDX) -#define DMA_SEL_DEV_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX) -#define DMA_SEL_DEV_ID(dev) (((dev) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX) - -#define DMA_COMMAND_FSM_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_FSM_CMD) >> 2) -#define DMA_CHANNEL_PARAM_REG_IDX(ch, param) ((DMA_SEL_COMP(_DMA_V2_SEL_CH_REG) | DMA_SEL_CH(ch) | DMA_SEL_PARAM(param)) >> 2) -#define DMA_CG_INFO_REG_IDX(info_id, comp_id) ((DMA_SEL_COMP(_DMA_V2_SEL_CONN_GROUP) | DMA_SEL_CG_INFO(info_id) | DMA_SEL_CG_COMP(comp_id)) >> 2) -#define DMA_DEV_INFO_REG_IDX(info_id, dev_id) ((DMA_SEL_COMP(_DMA_V2_SEL_DEV_INTERF) | DMA_SEL_DEV_INFO(info_id) | DMA_SEL_DEV_ID(dev_id)) >> 2) -#define DMA_RST_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_RESET) >> 2) - -#define DMA_GET_CONNECTION(val) _hrt_get_bits(val, _DMA_V2_CONNECTION_IDX, _DMA_V2_CONNECTION_BITS) -#define DMA_GET_EXTENSION(val) _hrt_get_bits(val, _DMA_V2_EXTENSION_IDX, _DMA_V2_EXTENSION_BITS) -#define DMA_GET_ELEMENTS(val) _hrt_get_bits(val, _DMA_V2_ELEMENTS_IDX, _DMA_V2_ELEMENTS_BITS) -#define DMA_GET_CROPPING(val) _hrt_get_bits(val, _DMA_V2_LEFT_CROPPING_IDX, _DMA_V2_LEFT_CROPPING_BITS) - -typedef enum { - DMA_CTRL_STATE_IDLE, - DMA_CTRL_STATE_REQ_RCV, - DMA_CTRL_STATE_RCV, - DMA_CTRL_STATE_RCV_REQ, - DMA_CTRL_STATE_INIT, - N_DMA_CTRL_STATES -} dma_ctrl_states_t; - -typedef enum { - DMA_COMMAND_READ, - DMA_COMMAND_WRITE, - DMA_COMMAND_SET_CHANNEL, - DMA_COMMAND_SET_PARAM, - DMA_COMMAND_READ_SPECIFIC, - DMA_COMMAND_WRITE_SPECIFIC, - DMA_COMMAND_INIT, - DMA_COMMAND_INIT_SPECIFIC, - DMA_COMMAND_RST, - N_DMA_COMMANDS -} dma_commands_t; - -typedef enum { - DMA_RW_STATE_IDLE, - DMA_RW_STATE_REQ, - DMA_RW_STATE_NEXT_LINE, - DMA_RW_STATE_UNLOCK_CHANNEL, - N_DMA_RW_STATES -} dma_rw_states_t; - -typedef enum { - DMA_FIFO_STATE_WILL_BE_FULL, - DMA_FIFO_STATE_FULL, - DMA_FIFO_STATE_EMPTY, - N_DMA_FIFO_STATES -} dma_fifo_states_t; - -/* typedef struct dma_state_s dma_state_t; */ -typedef struct dma_channel_state_s dma_channel_state_t; -typedef struct dma_port_state_s dma_port_state_t; - -struct dma_port_state_s { - bool req_cs; - bool req_we_n; - bool req_run; - bool req_ack; - bool send_cs; - bool send_we_n; - bool send_run; - bool send_ack; - dma_fifo_states_t fifo_state; - int fifo_counter; -}; - -struct dma_channel_state_s { - int connection; - bool sign_extend; - int height; - int stride_a; - int elems_a; - int cropping_a; - int width_a; - int stride_b; - int elems_b; - int cropping_b; - int width_b; -}; - -struct dma_state_s { - bool fsm_command_idle; - bool fsm_command_run; - bool fsm_command_stalling; - bool fsm_command_error; - dma_commands_t last_command; - int last_command_channel; - int last_command_param; - dma_commands_t current_command; - int current_addr_a; - int current_addr_b; - bool fsm_ctrl_idle; - bool fsm_ctrl_run; - bool fsm_ctrl_stalling; - bool fsm_ctrl_error; - dma_ctrl_states_t fsm_ctrl_state; - int fsm_ctrl_source_dev; - int fsm_ctrl_source_addr; - int fsm_ctrl_source_stride; - int fsm_ctrl_source_width; - int fsm_ctrl_source_height; - int fsm_ctrl_pack_source_dev; - int fsm_ctrl_pack_dest_dev; - int fsm_ctrl_dest_addr; - int fsm_ctrl_dest_stride; - int fsm_ctrl_pack_source_width; - int fsm_ctrl_pack_dest_height; - int fsm_ctrl_pack_dest_width; - int fsm_ctrl_pack_source_elems; - int fsm_ctrl_pack_dest_elems; - int fsm_ctrl_pack_extension; - int pack_idle; - int pack_run; - int pack_stalling; - int pack_error; - int pack_cnt_height; - int pack_src_cnt_width; - int pack_dest_cnt_width; - dma_rw_states_t read_state; - int read_cnt_height; - int read_cnt_width; - dma_rw_states_t write_state; - int write_height; - int write_width; - dma_port_state_t port_states[HIVE_ISP_NUM_DMA_CONNS]; - dma_channel_state_t channel_states[HIVE_DMA_NUM_CHANNELS]; -}; - -#endif /* __DMA_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h deleted file mode 100644 index ba54b1f0467b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DMA_PRIVATE_H_INCLUDED__ -#define __DMA_PRIVATE_H_INCLUDED__ - -#include "dma_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_DMA_C void dma_reg_store(const dma_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ - assert(ID < N_DMA_ID); - assert(DMA_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(DMA_BASE[ID] + reg*sizeof(hrt_data), value); -} - -STORAGE_CLASS_DMA_C hrt_data dma_reg_load(const dma_ID_t ID, - const unsigned int reg) -{ - assert(ID < N_DMA_ID); - assert(DMA_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(DMA_BASE[ID] + reg*sizeof(hrt_data)); -} - -#endif /* __DMA_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c deleted file mode 100644 index 777670948d6f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "event_fifo.h" - -#ifndef __INLINE_EVENT__ -#include "event_fifo_private.h" -#endif /* __INLINE_EVENT__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h deleted file mode 100644 index c595692c6ea9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _EVENT_FIFO_LOCAL_H -#define _EVENT_FIFO_LOCAL_H - -/* - * All events come from connections mapped on the system - * bus but do not use a global IRQ - */ -#include "event_fifo_global.h" - -typedef enum { - SP0_EVENT_ID, - ISP0_EVENT_ID, - STR2MIPI_EVENT_ID, - N_EVENT_ID -} event_ID_t; - -#define EVENT_QUERY_BIT 0 - -/* Events are read from FIFO */ -static const hrt_address event_source_addr[N_EVENT_ID] = { - 0x0000000000380000ULL, - 0x0000000000380004ULL, - 0xffffffffffffffffULL}; - -/* Read from FIFO are blocking, query data availability */ -static const hrt_address event_source_query_addr[N_EVENT_ID] = { - 0x0000000000380010ULL, - 0x0000000000380014ULL, - 0xffffffffffffffffULL}; - -/* Events are written to FIFO */ -static const hrt_address event_sink_addr[N_EVENT_ID] = { - 0x0000000000380008ULL, - 0x000000000038000CULL, - 0x0000000000090104ULL}; - -/* Writes to FIFO are blocking, query data space */ -static const hrt_address event_sink_query_addr[N_EVENT_ID] = { - 0x0000000000380018ULL, - 0x000000000038001CULL, - 0x000000000009010CULL}; - -#endif /* _EVENT_FIFO_LOCAL_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h deleted file mode 100644 index bcfb734c2ed3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __EVENT_FIFO_PRIVATE_H -#define __EVENT_FIFO_PRIVATE_H - -#include "event_fifo_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -#include /* _hrt_get_bits() */ - -STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID) -{ - assert(ID < N_EVENT_ID); - assert(event_source_addr[ID] != ((hrt_address)-1)); - (void)ia_css_device_load_uint32(event_source_addr[ID]); - return; -} - -STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID, - const bool cnd) -{ - if (cnd) { - event_wait_for(ID); - } -} - -STORAGE_CLASS_EVENT_C hrt_data event_receive_token(const event_ID_t ID) -{ - assert(ID < N_EVENT_ID); - assert(event_source_addr[ID] != ((hrt_address)-1)); - return ia_css_device_load_uint32(event_source_addr[ID]); -} - -STORAGE_CLASS_EVENT_C void event_send_token(const event_ID_t ID, - const hrt_data token) -{ - assert(ID < N_EVENT_ID); - assert(event_sink_addr[ID] != ((hrt_address)-1)); - ia_css_device_store_uint32(event_sink_addr[ID], token); -} - -STORAGE_CLASS_EVENT_C bool is_event_pending(const event_ID_t ID) -{ - hrt_data value; - assert(ID < N_EVENT_ID); - assert(event_source_query_addr[ID] != ((hrt_address)-1)); - value = ia_css_device_load_uint32(event_source_query_addr[ID]); - return !_hrt_get_bit(value, EVENT_QUERY_BIT); -} - -STORAGE_CLASS_EVENT_C bool can_event_send_token(const event_ID_t ID) -{ - hrt_data value; - assert(ID < N_EVENT_ID); - assert(event_sink_query_addr[ID] != ((hrt_address)-1)); - value = ia_css_device_load_uint32(event_sink_query_addr[ID]); - return !_hrt_get_bit(value, EVENT_QUERY_BIT); -} - -#endif /* __EVENT_FIFO_PRIVATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c deleted file mode 100644 index 1bf292401adc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c +++ /dev/null @@ -1,567 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "fifo_monitor.h" - -#include -#include "device_access.h" - -#include - -#include "gp_device.h" - -#include "assert_support.h" - -#ifndef __INLINE_FIFO_MONITOR__ -#define STORAGE_CLASS_FIFO_MONITOR_DATA static const -#else -#define STORAGE_CLASS_FIFO_MONITOR_DATA const -#endif /* __INLINE_FIFO_MONITOR__ */ - -STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = { - _REG_GP_SWITCH_IF_ADDR, - _REG_GP_SWITCH_GDC1_ADDR, - _REG_GP_SWITCH_GDC2_ADDR}; - -#ifndef __INLINE_FIFO_MONITOR__ -#include "fifo_monitor_private.h" -#endif /* __INLINE_FIFO_MONITOR__ */ - -static inline bool fifo_monitor_status_valid ( - const fifo_monitor_ID_t ID, - const unsigned int reg, - const unsigned int port_id); - -static inline bool fifo_monitor_status_accept( - const fifo_monitor_ID_t ID, - const unsigned int reg, - const unsigned int port_id); - - -void fifo_channel_get_state( - const fifo_monitor_ID_t ID, - const fifo_channel_t channel_id, - fifo_channel_state_t *state) -{ - assert(channel_id < N_FIFO_CHANNEL); - assert(state != NULL); - - switch (channel_id) { - case FIFO_CHANNEL_ISP0_TO_SP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_SP); /* ISP_STR_MON_PORT_ISP2SP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_SP); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_ISP); /* ISP_STR_MON_PORT_SP2ISP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_ISP); - break; - case FIFO_CHANNEL_SP0_TO_ISP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_ISP); /* ISP_STR_MON_PORT_SP2ISP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_ISP); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_SP); /* ISP_STR_MON_PORT_ISP2SP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_SP); - break; - case FIFO_CHANNEL_ISP0_TO_IF0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_PIF_A); /* ISP_STR_MON_PORT_ISP2PIFA */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_PIF_A); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_A); - break; - case FIFO_CHANNEL_IF0_TO_ISP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_A); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_PIF_A); /* ISP_STR_MON_PORT_PIFA2ISP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_PIF_A); - break; - case FIFO_CHANNEL_ISP0_TO_IF1: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_PIF_B); /* ISP_STR_MON_PORT_ISP2PIFA */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_PIF_B); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_B); - break; - case FIFO_CHANNEL_IF1_TO_ISP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_B); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_PIF_B); /* ISP_STR_MON_PORT_PIFB2ISP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_PIF_B); - break; - case FIFO_CHANNEL_ISP0_TO_DMA0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_DMA); /* ISP_STR_MON_PORT_ISP2DMA */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_DMA); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_DMA_FR_ISP); /* MOD_STR_MON_PORT_ISP2DMA */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_DMA_FR_ISP); - break; - case FIFO_CHANNEL_DMA0_TO_ISP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_DMA2ISP); /* MOD_STR_MON_PORT_DMA2ISP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_DMA2ISP); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_DMA); /* ISP_STR_MON_PORT_DMA2ISP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_DMA); - break; - case FIFO_CHANNEL_ISP0_TO_GDC0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_GDC); /* ISP_STR_MON_PORT_ISP2GDC1 */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_GDC); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_GDC); /* MOD_STR_MON_PORT_CELLS2GDC1 */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_GDC); - break; - case FIFO_CHANNEL_GDC0_TO_ISP0: - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_GDC); /* MOD_STR_MON_PORT_GDC12CELLS */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_GDC); - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_GDC); /* ISP_STR_MON_PORT_GDC12ISP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_GDC); - break; - case FIFO_CHANNEL_ISP0_TO_GDC1: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_ISP2GDC2); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_ISP2GDC2); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_CELLS2GDC2); - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_CELLS2GDC2); - break; - case FIFO_CHANNEL_GDC1_TO_ISP0: - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_GDC22CELLS); - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_GDC22CELLS); - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_GDC22ISP); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_GDC22ISP); - break; - case FIFO_CHANNEL_ISP0_TO_HOST0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_GPD); /* ISP_STR_MON_PORT_ISP2GPD */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_SND_GPD); - { - hrt_data value = ia_css_device_load_uint32(0x0000000000380014ULL); - state->fifo_valid = !_hrt_get_bit(value, 0); - state->sink_accept = false; /* no monitor connected */ - } - break; - case FIFO_CHANNEL_HOST0_TO_ISP0: - { - hrt_data value = ia_css_device_load_uint32(0x000000000038001CULL); - state->fifo_valid = false; /* no monitor connected */ - state->sink_accept = !_hrt_get_bit(value, 0); - } - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_GPD); /* ISP_STR_MON_PORT_FA2ISP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_ISP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_GPD); - break; - case FIFO_CHANNEL_SP0_TO_IF0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_PIF_A); /* SP_STR_MON_PORT_SP2PIFA */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_PIF_A); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_A); - break; - case FIFO_CHANNEL_IF0_TO_SP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_A); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_PIF_A); /* SP_STR_MON_PORT_PIFA2SP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_PIF_A); - break; - case FIFO_CHANNEL_SP0_TO_IF1: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_PIF_B); /* SP_STR_MON_PORT_SP2PIFB */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_PIF_B); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_PIF_B); - break; - case FIFO_CHANNEL_IF1_TO_SP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_PIF_B); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_PIF_B); /* SP_STR_MON_PORT_PIFB2SP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - ISP_STR_MON_PORT_RCV_PIF_B); - break; - case FIFO_CHANNEL_SP0_TO_IF2: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_SIF); /* SP_STR_MON_PORT_SP2SIF */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_SIF); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_SIF); /* MOD_STR_MON_PORT_SP2SIF */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_SIF); - break; - case FIFO_CHANNEL_IF2_TO_SP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_SIF); /* MOD_STR_MON_PORT_SIF2SP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_SIF); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_SIF); /* SP_STR_MON_PORT_SIF2SP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_SIF); - break; - case FIFO_CHANNEL_SP0_TO_DMA0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_DMA); /* SP_STR_MON_PORT_SP2DMA */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_DMA); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_DMA_FR_SP); /* MOD_STR_MON_PORT_SP2DMA */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_DMA_FR_SP); - break; - case FIFO_CHANNEL_DMA0_TO_SP0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_DMA2SP); /* MOD_STR_MON_PORT_DMA2SP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_DMA2SP); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_DMA); /* SP_STR_MON_PORT_DMA2SP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_DMA); - break; - case FIFO_CHANNEL_SP0_TO_GDC0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_SP2GDC1); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_SP2GDC1); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_CELLS2GDC1); - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_CELLS2GDC1); - break; - case FIFO_CHANNEL_GDC0_TO_SP0: - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_GDC12CELLS); - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_GDC12CELLS); - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_GDC12SP); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_GDC12SP); - break; - case FIFO_CHANNEL_SP0_TO_GDC1: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_SP2GDC2); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_SP2GDC2); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_CELLS2GDC2); - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_CELLS2GDC2); - break; - case FIFO_CHANNEL_GDC1_TO_SP0: - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_GDC22CELLS); - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_GDC22CELLS); - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_GDC22SP); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, - SP_STR_MON_PORT_B_GDC22SP); - break; - case FIFO_CHANNEL_SP0_TO_HOST0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_GPD); /* SP_STR_MON_PORT_SP2GPD */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_GPD); - { - hrt_data value = ia_css_device_load_uint32(0x0000000000380010ULL); - state->fifo_valid = !_hrt_get_bit(value, 0); - state->sink_accept = false; /* no monitor connected */ - } - break; - case FIFO_CHANNEL_HOST0_TO_SP0: - { - hrt_data value = ia_css_device_load_uint32(0x0000000000380018ULL); - state->fifo_valid = false; /* no monitor connected */ - state->sink_accept = !_hrt_get_bit(value, 0); - } - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_GPD); /* SP_STR_MON_PORT_FA2SP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_GPD); - break; - case FIFO_CHANNEL_SP0_TO_STREAM2MEM0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_SP2MC */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SND_MC); - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_SP2MC */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_RCV_MC); - break; - case FIFO_CHANNEL_STREAM2MEM0_TO_SP0: - state->fifo_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_MC2SP */ - state->sink_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_MOD_STREAM_STAT_IDX, - MOD_STR_MON_PORT_SND_MC); - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_MC2SP */ - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_RCV_MC); - break; - case FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0: - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SP2ISYS); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_SP2ISYS); - state->fifo_valid = false; - state->sink_accept = false; - break; - case FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0: - state->fifo_valid = false; - state->sink_accept = false; - state->src_valid = fifo_monitor_status_valid(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_ISYS2SP); - state->fifo_accept = fifo_monitor_status_accept(ID, - HIVE_GP_REGS_SP_STREAM_STAT_IDX, - SP_STR_MON_PORT_ISYS2SP); - break; - default: - assert(0); - break; - } - - return; -} - -void fifo_switch_get_state( - const fifo_monitor_ID_t ID, - const fifo_switch_t switch_id, - fifo_switch_state_t *state) -{ - hrt_data data = (hrt_data)-1; - - assert(ID == FIFO_MONITOR0_ID); - assert(switch_id < N_FIFO_SWITCH); - assert(state != NULL); - - (void)ID; - - data = gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]); - - state->is_none = (data == HIVE_ISP_CSS_STREAM_SWITCH_NONE); - state->is_sp = (data == HIVE_ISP_CSS_STREAM_SWITCH_SP); - state->is_isp = (data == HIVE_ISP_CSS_STREAM_SWITCH_ISP); - - return; -} - -void fifo_monitor_get_state( - const fifo_monitor_ID_t ID, - fifo_monitor_state_t *state) -{ - fifo_channel_t ch_id; - fifo_switch_t sw_id; - - assert(ID < N_FIFO_MONITOR_ID); - assert(state != NULL); - - for (ch_id = 0; ch_id < N_FIFO_CHANNEL; ch_id++) { - fifo_channel_get_state(ID, ch_id, - &(state->fifo_channels[ch_id])); - } - - for (sw_id = 0; sw_id < N_FIFO_SWITCH; sw_id++) { - fifo_switch_get_state(ID, sw_id, - &(state->fifo_switches[sw_id])); - } - return; -} - -static inline bool fifo_monitor_status_valid ( - const fifo_monitor_ID_t ID, - const unsigned int reg, - const unsigned int port_id) -{ - hrt_data data = fifo_monitor_reg_load(ID, reg); - - return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1; -} - -static inline bool fifo_monitor_status_accept( - const fifo_monitor_ID_t ID, - const unsigned int reg, - const unsigned int port_id) -{ - hrt_data data = fifo_monitor_reg_load(ID, reg); - - return (data >> (((port_id * 2) + _hive_str_mon_accept_offset))) & 0x1; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h deleted file mode 100644 index ed2f86181788..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __FIFO_MONITOR_LOCAL_H_INCLUDED__ -#define __FIFO_MONITOR_LOCAL_H_INCLUDED__ - -#include -#include "fifo_monitor_global.h" - -#include "hive_isp_css_defs.h" /* ISP_STR_MON_PORT_SND_SP, ... */ - -#define _hive_str_mon_valid_offset 0 -#define _hive_str_mon_accept_offset 1 - -#define FIFO_CHANNEL_SP_VALID_MASK 0x55555555 -#define FIFO_CHANNEL_SP_VALID_B_MASK 0x00000055 -#define FIFO_CHANNEL_ISP_VALID_MASK 0x15555555 -#define FIFO_CHANNEL_MOD_VALID_MASK 0x55555555 - -typedef enum fifo_switch { - FIFO_SWITCH_IF, - FIFO_SWITCH_GDC0, - FIFO_SWITCH_GDC1, - N_FIFO_SWITCH -} fifo_switch_t; - -typedef enum fifo_channel { - FIFO_CHANNEL_ISP0_TO_SP0, - FIFO_CHANNEL_SP0_TO_ISP0, - FIFO_CHANNEL_ISP0_TO_IF0, - FIFO_CHANNEL_IF0_TO_ISP0, - FIFO_CHANNEL_ISP0_TO_IF1, - FIFO_CHANNEL_IF1_TO_ISP0, - FIFO_CHANNEL_ISP0_TO_DMA0, - FIFO_CHANNEL_DMA0_TO_ISP0, - FIFO_CHANNEL_ISP0_TO_GDC0, - FIFO_CHANNEL_GDC0_TO_ISP0, - FIFO_CHANNEL_ISP0_TO_GDC1, - FIFO_CHANNEL_GDC1_TO_ISP0, - FIFO_CHANNEL_ISP0_TO_HOST0, - FIFO_CHANNEL_HOST0_TO_ISP0, - FIFO_CHANNEL_SP0_TO_IF0, - FIFO_CHANNEL_IF0_TO_SP0, - FIFO_CHANNEL_SP0_TO_IF1, - FIFO_CHANNEL_IF1_TO_SP0, - FIFO_CHANNEL_SP0_TO_IF2, - FIFO_CHANNEL_IF2_TO_SP0, - FIFO_CHANNEL_SP0_TO_DMA0, - FIFO_CHANNEL_DMA0_TO_SP0, - FIFO_CHANNEL_SP0_TO_GDC0, - FIFO_CHANNEL_GDC0_TO_SP0, - FIFO_CHANNEL_SP0_TO_GDC1, - FIFO_CHANNEL_GDC1_TO_SP0, - FIFO_CHANNEL_SP0_TO_HOST0, - FIFO_CHANNEL_HOST0_TO_SP0, - FIFO_CHANNEL_SP0_TO_STREAM2MEM0, - FIFO_CHANNEL_STREAM2MEM0_TO_SP0, - FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0, - FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0, -/* - * No clue what this is - * - FIFO_CHANNEL_SP0_TO_IRQ0, - FIFO_CHANNEL_IRQ0_TO_SP0, - */ - N_FIFO_CHANNEL -} fifo_channel_t; - -struct fifo_channel_state_s { - bool src_valid; - bool fifo_accept; - bool fifo_valid; - bool sink_accept; -}; - -/* The switch is tri-state */ -struct fifo_switch_state_s { - bool is_none; - bool is_isp; - bool is_sp; -}; - -struct fifo_monitor_state_s { - struct fifo_channel_state_s fifo_channels[N_FIFO_CHANNEL]; - struct fifo_switch_state_s fifo_switches[N_FIFO_SWITCH]; -}; - -#endif /* __FIFO_MONITOR_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h deleted file mode 100644 index d58cd7d1828d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __FIFO_MONITOR_PRIVATE_H_INCLUDED__ -#define __FIFO_MONITOR_PRIVATE_H_INCLUDED__ - -#include "fifo_monitor_public.h" - -#define __INLINE_GP_DEVICE__ -#include "gp_device.h" - -#include "device_access.h" - -#include "assert_support.h" - -#ifdef __INLINE_FIFO_MONITOR__ -extern const unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH]; -#endif - -STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set( - const fifo_monitor_ID_t ID, - const fifo_switch_t switch_id, - const hrt_data sel) -{ - assert(ID == FIFO_MONITOR0_ID); - assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1); - assert(switch_id < N_FIFO_SWITCH); - (void)ID; - - gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel); - - return; -} - -STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get( - const fifo_monitor_ID_t ID, - const fifo_switch_t switch_id) -{ - assert(ID == FIFO_MONITOR0_ID); - assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1); - assert(switch_id < N_FIFO_SWITCH); - (void)ID; - - return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]); -} - - -STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store( - const fifo_monitor_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ - assert(ID < N_FIFO_MONITOR_ID); - assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load( - const fifo_monitor_ID_t ID, - const unsigned int reg) -{ - assert(ID < N_FIFO_MONITOR_ID); - assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data)); -} - -#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c deleted file mode 100644 index 1966b147f8ab..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* The name "gdc.h is already taken" */ -#include "gdc_device.h" - -#include "device_access.h" - -#include "assert_support.h" - -/* - * Local function declarations - */ -static inline void gdc_reg_store( - const gdc_ID_t ID, - const unsigned int reg, - const hrt_data value); - -static inline hrt_data gdc_reg_load( - const gdc_ID_t ID, - const unsigned int reg); - - -#ifndef __INLINE_GDC__ -#include "gdc_private.h" -#endif /* __INLINE_GDC__ */ - -/* - * Exported function implementations - */ -void gdc_lut_store( - const gdc_ID_t ID, - const int data[4][HRT_GDC_N]) -{ - unsigned int i, lut_offset = HRT_GDC_LUT_IDX; - - assert(ID < N_GDC_ID); - assert(HRT_GDC_LUT_COEFF_OFFSET <= (4*sizeof(hrt_data))); - - for (i = 0; i < HRT_GDC_N; i++) { - hrt_data entry_0 = data[0][i] & HRT_GDC_BCI_COEF_MASK; - hrt_data entry_1 = data[1][i] & HRT_GDC_BCI_COEF_MASK; - hrt_data entry_2 = data[2][i] & HRT_GDC_BCI_COEF_MASK; - hrt_data entry_3 = data[3][i] & HRT_GDC_BCI_COEF_MASK; - - hrt_data word_0 = entry_0 | - (entry_1 << HRT_GDC_LUT_COEFF_OFFSET); - hrt_data word_1 = entry_2 | - (entry_3 << HRT_GDC_LUT_COEFF_OFFSET); - - gdc_reg_store(ID, lut_offset++, word_0); - gdc_reg_store(ID, lut_offset++, word_1); - } - return; -} - -/* - * Input LUT format: - * c0[0-1023], c1[0-1023], c2[0-1023] c3[0-1023] - * - * Output LUT format (interleaved): - * c0[0], c1[0], c2[0], c3[0], c0[1], c1[1], c2[1], c3[1], .... - * c0[1023], c1[1023], c2[1023], c3[1023] - * - * The first format needs c0[0], c1[0] (which are 1024 words apart) - * to program gdc LUT registers. This makes it difficult to do piecemeal - * reads in SP side gdc_lut_store - * - * Interleaved format allows use of contiguous bytes to store into - * gdc LUT registers. - * - * See gdc_lut_store() definition in host/gdc.c vs sp/gdc_private.h - * - */ -void gdc_lut_convert_to_isp_format(const int in_lut[4][HRT_GDC_N], - int out_lut[4][HRT_GDC_N]) -{ - unsigned int i; - int *out = (int *)out_lut; - - for (i = 0; i < HRT_GDC_N; i++) { - out[0] = in_lut[0][i]; - out[1] = in_lut[1][i]; - out[2] = in_lut[2][i]; - out[3] = in_lut[3][i]; - out += 4; - } -} - -int gdc_get_unity( - const gdc_ID_t ID) -{ - assert(ID < N_GDC_ID); - (void)ID; - return (int)(1UL << HRT_GDC_FRAC_BITS); -} - - -/* - * Local function implementations - */ -static inline void gdc_reg_store( - const gdc_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ - ia_css_device_store_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -static inline hrt_data gdc_reg_load( - const gdc_ID_t ID, - const unsigned int reg) -{ - return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data)); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h deleted file mode 100644 index 0c6de867e012..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GDC_LOCAL_H_INCLUDED__ -#define __GDC_LOCAL_H_INCLUDED__ - -#include "gdc_global.h" - -#endif /* __GDC_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h deleted file mode 100644 index f7dec75adf78..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GDC_PRIVATE_H_INCLUDED__ -#define __GDC_PRIVATE_H_INCLUDED__ - -#include "gdc_public.h" - -#endif /* __GDC_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c deleted file mode 100644 index da88aa3af664..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "assert_support.h" -#include "gp_device.h" - -#ifndef __INLINE_GP_DEVICE__ -#include "gp_device_private.h" -#endif /* __INLINE_GP_DEVICE__ */ - -void gp_device_get_state( - const gp_device_ID_t ID, - gp_device_state_t *state) -{ - assert(ID < N_GP_DEVICE_ID); - assert(state != NULL); - - state->syncgen_enable = gp_device_reg_load(ID, - _REG_GP_SYNCGEN_ENABLE_ADDR); - state->syncgen_free_running = gp_device_reg_load(ID, - _REG_GP_SYNCGEN_FREE_RUNNING_ADDR); - state->syncgen_pause = gp_device_reg_load(ID, - _REG_GP_SYNCGEN_PAUSE_ADDR); - state->nr_frames = gp_device_reg_load(ID, - _REG_GP_NR_FRAMES_ADDR); - state->syngen_nr_pix = gp_device_reg_load(ID, - _REG_GP_SYNGEN_NR_PIX_ADDR); - state->syngen_nr_pix = gp_device_reg_load(ID, - _REG_GP_SYNGEN_NR_PIX_ADDR); - state->syngen_nr_lines = gp_device_reg_load(ID, - _REG_GP_SYNGEN_NR_LINES_ADDR); - state->syngen_hblank_cycles = gp_device_reg_load(ID, - _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR); - state->syngen_vblank_cycles = gp_device_reg_load(ID, - _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR); - state->isel_sof = gp_device_reg_load(ID, - _REG_GP_ISEL_SOF_ADDR); - state->isel_eof = gp_device_reg_load(ID, - _REG_GP_ISEL_EOF_ADDR); - state->isel_sol = gp_device_reg_load(ID, - _REG_GP_ISEL_SOL_ADDR); - state->isel_eol = gp_device_reg_load(ID, - _REG_GP_ISEL_EOL_ADDR); - state->isel_lfsr_enable = gp_device_reg_load(ID, - _REG_GP_ISEL_LFSR_ENABLE_ADDR); - state->isel_lfsr_enable_b = gp_device_reg_load(ID, - _REG_GP_ISEL_LFSR_ENABLE_B_ADDR); - state->isel_lfsr_reset_value = gp_device_reg_load(ID, - _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR); - state->isel_tpg_enable = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_ENABLE_ADDR); - state->isel_tpg_enable_b = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_ENABLE_B_ADDR); - state->isel_hor_cnt_mask = gp_device_reg_load(ID, - _REG_GP_ISEL_HOR_CNT_MASK_ADDR); - state->isel_ver_cnt_mask = gp_device_reg_load(ID, - _REG_GP_ISEL_VER_CNT_MASK_ADDR); - state->isel_xy_cnt_mask = gp_device_reg_load(ID, - _REG_GP_ISEL_XY_CNT_MASK_ADDR); - state->isel_hor_cnt_delta = gp_device_reg_load(ID, - _REG_GP_ISEL_HOR_CNT_DELTA_ADDR); - state->isel_ver_cnt_delta = gp_device_reg_load(ID, - _REG_GP_ISEL_VER_CNT_DELTA_ADDR); - state->isel_tpg_mode = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_MODE_ADDR); - state->isel_tpg_red1 = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_RED1_ADDR); - state->isel_tpg_green1 = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_GREEN1_ADDR); - state->isel_tpg_blue1 = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_BLUE1_ADDR); - state->isel_tpg_red2 = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_RED2_ADDR); - state->isel_tpg_green2 = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_GREEN2_ADDR); - state->isel_tpg_blue2 = gp_device_reg_load(ID, - _REG_GP_ISEL_TPG_BLUE2_ADDR); - state->isel_ch_id = gp_device_reg_load(ID, - _REG_GP_ISEL_CH_ID_ADDR); - state->isel_fmt_type = gp_device_reg_load(ID, - _REG_GP_ISEL_FMT_TYPE_ADDR); - state->isel_data_sel = gp_device_reg_load(ID, - _REG_GP_ISEL_DATA_SEL_ADDR); - state->isel_sband_sel = gp_device_reg_load(ID, - _REG_GP_ISEL_SBAND_SEL_ADDR); - state->isel_sync_sel = gp_device_reg_load(ID, - _REG_GP_ISEL_SYNC_SEL_ADDR); - state->syncgen_hor_cnt = gp_device_reg_load(ID, - _REG_GP_SYNCGEN_HOR_CNT_ADDR); - state->syncgen_ver_cnt = gp_device_reg_load(ID, - _REG_GP_SYNCGEN_VER_CNT_ADDR); - state->syncgen_frame_cnt = gp_device_reg_load(ID, - _REG_GP_SYNCGEN_FRAME_CNT_ADDR); - state->soft_reset = gp_device_reg_load(ID, - _REG_GP_SOFT_RESET_ADDR); - return; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h deleted file mode 100644 index 113d5ed32d42..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_DEVICE_LOCAL_H_INCLUDED__ -#define __GP_DEVICE_LOCAL_H_INCLUDED__ - -#include "gp_device_global.h" - -/* @ GP_REGS_BASE -> GP_DEVICE_BASE */ -#define _REG_GP_SDRAM_WAKEUP_ADDR 0x00 -#define _REG_GP_IDLE_ADDR 0x04 -/* #define _REG_GP_IRQ_REQ0_ADDR 0x08 */ -/* #define _REG_GP_IRQ_REQ1_ADDR 0x0C */ -#define _REG_GP_SP_STREAM_STAT_ADDR 0x10 -#define _REG_GP_SP_STREAM_STAT_B_ADDR 0x14 -#define _REG_GP_ISP_STREAM_STAT_ADDR 0x18 -#define _REG_GP_MOD_STREAM_STAT_ADDR 0x1C -#define _REG_GP_SP_STREAM_STAT_IRQ_COND_ADDR 0x20 -#define _REG_GP_SP_STREAM_STAT_B_IRQ_COND_ADDR 0x24 -#define _REG_GP_ISP_STREAM_STAT_IRQ_COND_ADDR 0x28 -#define _REG_GP_MOD_STREAM_STAT_IRQ_COND_ADDR 0x2C -#define _REG_GP_SP_STREAM_STAT_IRQ_ENABLE_ADDR 0x30 -#define _REG_GP_SP_STREAM_STAT_B_IRQ_ENABLE_ADDR 0x34 -#define _REG_GP_ISP_STREAM_STAT_IRQ_ENABLE_ADDR 0x38 -#define _REG_GP_MOD_STREAM_STAT_IRQ_ENABLE_ADDR 0x3C -/* -#define _REG_GP_SWITCH_IF_ADDR 0x40 -#define _REG_GP_SWITCH_GDC1_ADDR 0x44 -#define _REG_GP_SWITCH_GDC2_ADDR 0x48 -*/ -#define _REG_GP_SLV_REG_RST_ADDR 0x50 -#define _REG_GP_SWITCH_ISYS2401_ADDR 0x54 - -/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */ -/* -#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800 -#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804 -#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808 -#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C -#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810 -#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814 -#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818 -#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C -#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820 -#define _REG_GP_IFMT_srst 0x00030824 -#define _REG_GP_IFMT_slv_reg_srst 0x00030828 -#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C -*/ -/* @ GP_DEVICE_BASE */ -/* -#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000 -#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004 -#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008 -#define _REG_GP_NR_FRAMES_ADDR 0x0009000C -#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010 -#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014 -#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018 -#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C -#define _REG_GP_ISEL_SOF_ADDR 0x00090020 -#define _REG_GP_ISEL_EOF_ADDR 0x00090024 -#define _REG_GP_ISEL_SOL_ADDR 0x00090028 -#define _REG_GP_ISEL_EOL_ADDR 0x0009002C -#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030 -#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034 -#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038 -#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C -#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040 -#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044 -#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048 -#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C -#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050 -#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054 -#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058 -#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C -#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060 -#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064 -#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068 -#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C -#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070 -#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074 -#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078 -#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C -#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080 -#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084 -#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088 -#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C -#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090 -#define _REG_GP_SOFT_RESET_ADDR 0x00090094 -*/ - -struct gp_device_state_s { - int syncgen_enable; - int syncgen_free_running; - int syncgen_pause; - int nr_frames; - int syngen_nr_pix; - int syngen_nr_lines; - int syngen_hblank_cycles; - int syngen_vblank_cycles; - int isel_sof; - int isel_eof; - int isel_sol; - int isel_eol; - int isel_lfsr_enable; - int isel_lfsr_enable_b; - int isel_lfsr_reset_value; - int isel_tpg_enable; - int isel_tpg_enable_b; - int isel_hor_cnt_mask; - int isel_ver_cnt_mask; - int isel_xy_cnt_mask; - int isel_hor_cnt_delta; - int isel_ver_cnt_delta; - int isel_tpg_mode; - int isel_tpg_red1; - int isel_tpg_green1; - int isel_tpg_blue1; - int isel_tpg_red2; - int isel_tpg_green2; - int isel_tpg_blue2; - int isel_ch_id; - int isel_fmt_type; - int isel_data_sel; - int isel_sband_sel; - int isel_sync_sel; - int syncgen_hor_cnt; - int syncgen_ver_cnt; - int syncgen_frame_cnt; - int soft_reset; -}; - -#endif /* __GP_DEVICE_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h deleted file mode 100644 index 7c0362c29411..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_DEVICE_PRIVATE_H_INCLUDED__ -#define __GP_DEVICE_PRIVATE_H_INCLUDED__ - -#include "gp_device_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store( - const gp_device_ID_t ID, - const unsigned int reg_addr, - const hrt_data value) -{ - assert(ID < N_GP_DEVICE_ID); - assert(GP_DEVICE_BASE[ID] != (hrt_address)-1); - assert((reg_addr % sizeof(hrt_data)) == 0); - ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value); - return; -} - -STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load( - const gp_device_ID_t ID, - const hrt_address reg_addr) -{ - assert(ID < N_GP_DEVICE_ID); - assert(GP_DEVICE_BASE[ID] != (hrt_address)-1); - assert((reg_addr % sizeof(hrt_data)) == 0); - return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr); -} - -#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c deleted file mode 100644 index b6b1344786b1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include /*uint32_t */ -#include "gp_timer.h" /*system_local.h, - gp_timer_public.h*/ - -#ifndef __INLINE_GP_TIMER__ -#include "gp_timer_private.h" /*device_access.h*/ -#endif /* __INLINE_GP_TIMER__ */ -#include "system_local.h" - -/* FIXME: not sure if reg_load(), reg_store() should be API. - */ -static uint32_t -gp_timer_reg_load(uint32_t reg); - -static void -gp_timer_reg_store(uint32_t reg, uint32_t value); - -static uint32_t -gp_timer_reg_load(uint32_t reg) -{ - return ia_css_device_load_uint32( - GP_TIMER_BASE + - (reg * sizeof(uint32_t))); -} - -static void -gp_timer_reg_store(uint32_t reg, uint32_t value) -{ - ia_css_device_store_uint32((GP_TIMER_BASE + - (reg * sizeof(uint32_t))), - value); -} - -void gp_timer_init(gp_timer_ID_t ID) -{ - /* set_overall_enable*/ - gp_timer_reg_store(_REG_GP_TIMER_OVERALL_ENABLE, 1); - - /*set enable*/ - gp_timer_reg_store(_REG_GP_TIMER_ENABLE_ID(ID), 1); - - /* set signal select */ - gp_timer_reg_store(_REG_GP_TIMER_SIGNAL_SELECT_ID(ID), GP_TIMER_SIGNAL_SELECT); - - /*set count type */ - gp_timer_reg_store(_REG_GP_TIMER_COUNT_TYPE_ID(ID), GP_TIMER_COUNT_TYPE_LOW); - - /*reset gp timer */ - gp_timer_reg_store(_REG_GP_TIMER_RESET_REG, 0xFF); -} - -uint32_t -gp_timer_read(gp_timer_ID_t ID) -{ - return gp_timer_reg_load(_REG_GP_TIMER_VALUE_ID(ID)); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h deleted file mode 100644 index 19ce35d87291..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_TIMER_LOCAL_H_INCLUDED__ -#define __GP_TIMER_LOCAL_H_INCLUDED__ - -#include "gp_timer_global.h" /*GP_TIMER_SEL - GP_TIMER_SIGNAL_SELECT*/ - -#include "gp_timer_defs.h" /*HIVE_GP_TIMER_xxx registers*/ -#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_NUM_COUNTERS - HIVE_GP_TIMER_NUM_IRQS*/ - -#define _REG_GP_TIMER_RESET_REG HIVE_GP_TIMER_RESET_REG_IDX -#define _REG_GP_TIMER_OVERALL_ENABLE HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX - -/*Register offsets for timers [1,7] can be obtained - * by adding (GP_TIMERx_ID * sizeof(uint32_t))*/ -#define _REG_GP_TIMER_ENABLE_ID(timer_id) HIVE_GP_TIMER_ENABLE_REG_IDX(timer_id) -#define _REG_GP_TIMER_VALUE_ID(timer_id) HIVE_GP_TIMER_VALUE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS) -#define _REG_GP_TIMER_COUNT_TYPE_ID(timer_id) HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS) -#define _REG_GP_TIMER_SIGNAL_SELECT_ID(timer_id) HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS) - - -#define _REG_GP_TIMER_IRQ_TRIGGER_VALUE_ID(irq_id) HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS) - -#define _REG_GP_TIMER_IRQ_TIMER_SELECT_ID(irq_id) \ - HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS) - -#define _REG_GP_TIMER_IRQ_ENABLE_ID(irq_id) \ - HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS) - - -#endif /*__GP_TIMER_LOCAL_H_INCLUDED__*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h deleted file mode 100644 index 705be5e5cc70..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_TIMER_PRIVATE_H_INCLUDED__ -#define __GP_TIMER_PRIVATE_H_INCLUDED__ - -#include "gp_timer_public.h" -#include "device_access.h" -#include "assert_support.h" - -#endif /* __GP_TIMER_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h deleted file mode 100644 index f4652b79734d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GPIO_LOCAL_H_INCLUDED__ -#define __GPIO_LOCAL_H_INCLUDED__ - -#include "gpio_global.h" - -#endif /* __GPIO_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h deleted file mode 100644 index b6ebf34eaa9d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GPIO_PRIVATE_H_INCLUDED__ -#define __GPIO_PRIVATE_H_INCLUDED__ - -#include "gpio_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_GPIO_C void gpio_reg_store( - const gpio_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ -OP___assert(ID < N_GPIO_ID); -OP___assert(GPIO_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load( - const gpio_ID_t ID, - const unsigned int reg) -{ -OP___assert(ID < N_GPIO_ID); -OP___assert(GPIO_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data)); -} - -#endif /* __GPIO_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c deleted file mode 100644 index e48f180c9507..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "hmem.h" - -#ifndef __INLINE_HMEM__ -#include "hmem_private.h" -#endif /* __INLINE_HMEM__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h deleted file mode 100644 index 499f55f07253..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __HMEM_LOCAL_H_INCLUDED__ -#define __HMEM_LOCAL_H_INCLUDED__ - -#include "hmem_global.h" - -#endif /* __HMEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h deleted file mode 100644 index 32a780380e11..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __HMEM_PRIVATE_H_INCLUDED__ -#define __HMEM_PRIVATE_H_INCLUDED__ - -#include "hmem_public.h" - -#include "assert_support.h" - -STORAGE_CLASS_HMEM_C size_t sizeof_hmem( - const hmem_ID_t ID) -{ - assert(ID < N_HMEM_ID); - (void)ID; - return HMEM_SIZE*sizeof(hmem_data_t); -} - -#endif /* __HMEM_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c deleted file mode 100644 index 0e1ca995fb06..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2 - -#include "input_formatter.h" -#include -#include "gp_device.h" - -#include "assert_support.h" - -#ifndef __INLINE_INPUT_FORMATTER__ -#include "input_formatter_private.h" -#endif /* __INLINE_INPUT_FORMATTER__ */ - -const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID] = { - INPUT_FORMATTER0_SRST_OFFSET, - INPUT_FORMATTER1_SRST_OFFSET, - INPUT_FORMATTER2_SRST_OFFSET, - INPUT_FORMATTER3_SRST_OFFSET}; - -const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID] = { - INPUT_FORMATTER0_SRST_MASK, - INPUT_FORMATTER1_SRST_MASK, - INPUT_FORMATTER2_SRST_MASK, - INPUT_FORMATTER3_SRST_MASK}; - -const uint8_t HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID] = { - HIVE_INPUT_SWITCH_SELECT_IF_PRIM, - HIVE_INPUT_SWITCH_SELECT_IF_PRIM, - HIVE_INPUT_SWITCH_SELECT_IF_SEC, - HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM}; - -/* MW Should be part of system_global.h, where we have the main enumeration */ -static const bool HIVE_IF_BIN_COPY[N_INPUT_FORMATTER_ID] = { - false, false, false, true -}; - -void input_formatter_rst( - const input_formatter_ID_t ID) -{ - hrt_address addr; - hrt_data rst; - - assert(ID < N_INPUT_FORMATTER_ID); - - addr = HIVE_IF_SRST_ADDRESS[ID]; - rst = HIVE_IF_SRST_MASK[ID]; - - /* TEMPORARY HACK: THIS RESET BREAKS THE METADATA FEATURE - * WICH USES THE STREAM2MEMRY BLOCK. - * MUST BE FIXED PROPERLY - */ - if (!HIVE_IF_BIN_COPY[ID]) { - input_formatter_reg_store(ID, addr, rst); - } - - return; -} - -unsigned int input_formatter_get_alignment( - const input_formatter_ID_t ID) -{ - assert(ID < N_INPUT_FORMATTER_ID); - - return input_formatter_alignment[ID]; -} - -void input_formatter_set_fifo_blocking_mode( - const input_formatter_ID_t ID, - const bool enable) -{ - assert(ID < N_INPUT_FORMATTER_ID); - - /* cnd_input_formatter_reg_store() */ - if (!HIVE_IF_BIN_COPY[ID]) { - input_formatter_reg_store(ID, - HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS, enable); - } - return; -} - -void input_formatter_get_switch_state( - const input_formatter_ID_t ID, - input_formatter_switch_state_t *state) -{ - assert(ID < N_INPUT_FORMATTER_ID); - assert(state != NULL); - - /* We'll change this into an intelligent function to get switch info per IF */ - (void)ID; - - state->if_input_switch_lut_reg[0] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg0); - state->if_input_switch_lut_reg[1] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg1); - state->if_input_switch_lut_reg[2] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg2); - state->if_input_switch_lut_reg[3] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg3); - state->if_input_switch_lut_reg[4] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg4); - state->if_input_switch_lut_reg[5] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg5); - state->if_input_switch_lut_reg[6] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg6); - state->if_input_switch_lut_reg[7] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg7); - state->if_input_switch_fsync_lut = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_fsync_lut); - state->if_input_switch_ch_id_fmt_type = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_ch_id_fmt_type); - - return; -} - -void input_formatter_get_state( - const input_formatter_ID_t ID, - input_formatter_state_t *state) -{ - assert(ID < N_INPUT_FORMATTER_ID); - assert(state != NULL); -/* - state->reset = input_formatter_reg_load(ID, - HIVE_IF_RESET_ADDRESS); - */ - state->start_line = input_formatter_reg_load(ID, - HIVE_IF_START_LINE_ADDRESS); - state->start_column = input_formatter_reg_load(ID, - HIVE_IF_START_COLUMN_ADDRESS); - state->cropped_height = input_formatter_reg_load(ID, - HIVE_IF_CROPPED_HEIGHT_ADDRESS); - state->cropped_width = input_formatter_reg_load(ID, - HIVE_IF_CROPPED_WIDTH_ADDRESS); - state->ver_decimation = input_formatter_reg_load(ID, - HIVE_IF_VERTICAL_DECIMATION_ADDRESS); - state->hor_decimation = input_formatter_reg_load(ID, - HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS); - state->hor_deinterleaving = input_formatter_reg_load(ID, - HIVE_IF_H_DEINTERLEAVING_ADDRESS); - state->left_padding = input_formatter_reg_load(ID, - HIVE_IF_LEFTPADDING_WIDTH_ADDRESS); - state->eol_offset = input_formatter_reg_load(ID, - HIVE_IF_END_OF_LINE_OFFSET_ADDRESS); - state->vmem_start_address = input_formatter_reg_load(ID, - HIVE_IF_VMEM_START_ADDRESS_ADDRESS); - state->vmem_end_address = input_formatter_reg_load(ID, - HIVE_IF_VMEM_END_ADDRESS_ADDRESS); - state->vmem_increment = input_formatter_reg_load(ID, - HIVE_IF_VMEM_INCREMENT_ADDRESS); - state->is_yuv420 = input_formatter_reg_load(ID, - HIVE_IF_YUV_420_FORMAT_ADDRESS); - state->vsync_active_low = input_formatter_reg_load(ID, - HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS); - state->hsync_active_low = input_formatter_reg_load(ID, - HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS); - state->allow_fifo_overflow = input_formatter_reg_load(ID, - HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS); - state->block_fifo_when_no_req = input_formatter_reg_load(ID, - HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS); - state->ver_deinterleaving = input_formatter_reg_load(ID, - HIVE_IF_V_DEINTERLEAVING_ADDRESS); -/* FSM */ - state->fsm_sync_status = input_formatter_reg_load(ID, - HIVE_IF_FSM_SYNC_STATUS); - state->fsm_sync_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_SYNC_COUNTER); - state->fsm_crop_status = input_formatter_reg_load(ID, - HIVE_IF_FSM_CROP_STATUS); - state->fsm_crop_line_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_CROP_LINE_COUNTER); - state->fsm_crop_pixel_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_CROP_PIXEL_COUNTER); - state->fsm_deinterleaving_index = input_formatter_reg_load(ID, - HIVE_IF_FSM_DEINTERLEAVING_IDX); - state->fsm_dec_h_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_DECIMATION_H_COUNTER); - state->fsm_dec_v_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_DECIMATION_V_COUNTER); - state->fsm_dec_block_v_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER); - state->fsm_padding_status = input_formatter_reg_load(ID, - HIVE_IF_FSM_PADDING_STATUS); - state->fsm_padding_elem_counter = input_formatter_reg_load(ID, - HIVE_IF_FSM_PADDING_ELEMENT_COUNTER); - state->fsm_vector_support_error = input_formatter_reg_load(ID, - HIVE_IF_FSM_VECTOR_SUPPORT_ERROR); - state->fsm_vector_buffer_full = input_formatter_reg_load(ID, - HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL); - state->vector_support = input_formatter_reg_load(ID, - HIVE_IF_FSM_VECTOR_SUPPORT); - state->sensor_data_lost = input_formatter_reg_load(ID, - HIVE_IF_FIFO_SENSOR_STATUS); - - return; -} - -void input_formatter_bin_get_state( - const input_formatter_ID_t ID, - input_formatter_bin_state_t *state) -{ - assert(ID < N_INPUT_FORMATTER_ID); - assert(state != NULL); - - state->reset = input_formatter_reg_load(ID, - HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS); - state->input_endianness = input_formatter_reg_load(ID, - HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS); - state->output_endianness = input_formatter_reg_load(ID, - HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS); - state->bitswap = input_formatter_reg_load(ID, - HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS); - state->block_synch = input_formatter_reg_load(ID, - HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS); - state->packet_synch = input_formatter_reg_load(ID, - HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS); - state->readpostwrite_synch = input_formatter_reg_load(ID, - HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS); - state->is_2ppc = input_formatter_reg_load(ID, - HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS); - state->en_status_update = input_formatter_reg_load(ID, - HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS); - return; -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h deleted file mode 100644 index 3e00b5e6bad7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_FORMATTER_LOCAL_H_INCLUDED__ -#define __INPUT_FORMATTER_LOCAL_H_INCLUDED__ - -#include "input_formatter_global.h" - -#include "isp.h" /* ISP_VEC_ALIGN */ - -typedef struct input_formatter_switch_state_s input_formatter_switch_state_t; -typedef struct input_formatter_state_s input_formatter_state_t; -typedef struct input_formatter_bin_state_s input_formatter_bin_state_t; - -#define HIVE_IF_FSM_SYNC_STATUS 0x100 -#define HIVE_IF_FSM_SYNC_COUNTER 0x104 -#define HIVE_IF_FSM_DEINTERLEAVING_IDX 0x114 -#define HIVE_IF_FSM_DECIMATION_H_COUNTER 0x118 -#define HIVE_IF_FSM_DECIMATION_V_COUNTER 0x11C -#define HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER 0x120 -#define HIVE_IF_FSM_PADDING_STATUS 0x124 -#define HIVE_IF_FSM_PADDING_ELEMENT_COUNTER 0x128 -#define HIVE_IF_FSM_VECTOR_SUPPORT_ERROR 0x12C -#define HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL 0x130 -#define HIVE_IF_FSM_VECTOR_SUPPORT 0x134 -#define HIVE_IF_FIFO_SENSOR_STATUS 0x138 - -/* - * The switch LUT's coding defines a sink for each - * single channel ID + channel format type. Conversely - * the sink (i.e. an input formatter) can be reached - * from multiple channel & format type combinations - * - * LUT[0,1] channel=0, format type {0,1,...31} - * LUT[2,3] channel=1, format type {0,1,...31} - * LUT[4,5] channel=2, format type {0,1,...31} - * LUT[6,7] channel=3, format type {0,1,...31} - * - * Each register hold 16 2-bit fields encoding the sink - * {0,1,2,3}, "0" means unconnected. - * - * The single FSYNCH register uses four 3-bit fields of 1-hot - * encoded sink information, "0" means unconnected. - * - * The encoding is redundant. The FSYNCH setting will connect - * a channel to a sink. At that point the LUT's belonging to - * that channel can be directed to another sink. Thus the data - * goes to another place than the synch - */ -struct input_formatter_switch_state_s { - int if_input_switch_lut_reg[8]; - int if_input_switch_fsync_lut; - int if_input_switch_ch_id_fmt_type; - bool if_input_switch_map[HIVE_SWITCH_N_CHANNELS][HIVE_SWITCH_N_FORMATTYPES]; -}; - -struct input_formatter_state_s { -/* int reset; */ - int start_line; - int start_column; - int cropped_height; - int cropped_width; - int ver_decimation; - int hor_decimation; - int ver_deinterleaving; - int hor_deinterleaving; - int left_padding; - int eol_offset; - int vmem_start_address; - int vmem_end_address; - int vmem_increment; - int is_yuv420; - int vsync_active_low; - int hsync_active_low; - int allow_fifo_overflow; - int block_fifo_when_no_req; - int fsm_sync_status; - int fsm_sync_counter; - int fsm_crop_status; - int fsm_crop_line_counter; - int fsm_crop_pixel_counter; - int fsm_deinterleaving_index; - int fsm_dec_h_counter; - int fsm_dec_v_counter; - int fsm_dec_block_v_counter; - int fsm_padding_status; - int fsm_padding_elem_counter; - int fsm_vector_support_error; - int fsm_vector_buffer_full; - int vector_support; - int sensor_data_lost; -}; - -struct input_formatter_bin_state_s { - uint32_t reset; - uint32_t input_endianness; - uint32_t output_endianness; - uint32_t bitswap; - uint32_t block_synch; - uint32_t packet_synch; - uint32_t readpostwrite_synch; - uint32_t is_2ppc; - uint32_t en_status_update; -}; - -static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = { - ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES}; - -#endif /* __INPUT_FORMATTER_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h deleted file mode 100644 index 2f42a9c2771c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ -#define __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ - -#include "input_formatter_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store( - const input_formatter_ID_t ID, - const hrt_address reg_addr, - const hrt_data value) -{ - assert(ID < N_INPUT_FORMATTER_ID); - assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1); - assert((reg_addr % sizeof(hrt_data)) == 0); - ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value); - return; -} - -STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load( - const input_formatter_ID_t ID, - const unsigned int reg_addr) -{ - assert(ID < N_INPUT_FORMATTER_ID); - assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1); - assert((reg_addr % sizeof(hrt_data)) == 0); - return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr); -} - -#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c deleted file mode 100644 index 2515e162828f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c +++ /dev/null @@ -1,1823 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2 - -#include "input_system.h" -#include -#include "gp_device.h" - -#include "assert_support.h" - -#ifndef __INLINE_INPUT_SYSTEM__ -#include "input_system_private.h" -#endif /* __INLINE_INPUT_SYSTEM__ */ - -#define ZERO (0x0) -#define ONE (1U) - -static const ib_buffer_t IB_BUFFER_NULL = {0 ,0, 0 }; - -static input_system_error_t input_system_configure_channel( - const channel_cfg_t channel); - -static input_system_error_t input_system_configure_channel_sensor( - const channel_cfg_t channel); - -static input_system_error_t input_buffer_configuration(void); - -static input_system_error_t configuration_to_registers(void); - -static void receiver_rst(const rx_ID_t ID); -static void input_system_network_rst(const input_system_ID_t ID); - -static void capture_unit_configure( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - const ib_buffer_t* const cfg); - -static void acquisition_unit_configure( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - const ib_buffer_t* const cfg); - -static void ctrl_unit_configure( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - const ctrl_unit_cfg_t* const cfg); - -static void input_system_network_configure( - const input_system_ID_t ID, - const input_system_network_cfg_t * const cfg); - -// MW: CSI is previously named as "rx" short for "receiver" -static input_system_error_t set_csi_cfg( - csi_cfg_t* const lhs, - const csi_cfg_t* const rhs, - input_system_config_flags_t* const flags); - -static input_system_error_t set_source_type( - input_system_source_t* const lhs, - const input_system_source_t rhs, - input_system_config_flags_t* const flags); - -static input_system_error_t input_system_multiplexer_cfg( - input_system_multiplex_t* const lhs, - const input_system_multiplex_t rhs, - input_system_config_flags_t* const flags); - - - -static inline void capture_unit_get_state( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - capture_unit_state_t *state); - -static inline void acquisition_unit_get_state( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - acquisition_unit_state_t *state); - -static inline void ctrl_unit_get_state( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - ctrl_unit_state_t *state); - -static inline void mipi_port_get_state( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - mipi_port_state_t *state); - -static inline void rx_channel_get_state( - const rx_ID_t ID, - const unsigned int ch_id, - rx_channel_state_t *state); - -static void gp_device_rst(const gp_device_ID_t ID); - -static void input_selector_cfg_for_sensor(const gp_device_ID_t ID); - -static void input_switch_rst(const gp_device_ID_t ID); - -static void input_switch_cfg( - const gp_device_ID_t ID, - const input_switch_cfg_t * const cfg -); - -void input_system_get_state( - const input_system_ID_t ID, - input_system_state_t *state) -{ - sub_system_ID_t sub_id; - - assert(ID < N_INPUT_SYSTEM_ID); - assert(state != NULL); - - state->str_multicastA_sel = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_A_IDX); - state->str_multicastB_sel = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_B_IDX); - state->str_multicastC_sel = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_C_IDX); - state->str_mux_sel = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MUX_IDX); - state->str_mon_status = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_STRMON_STAT_IDX); - state->str_mon_irq_cond = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_STRMON_COND_IDX); - state->str_mon_irq_en = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX); - state->isys_srst = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_SRST_IDX); - state->isys_slv_reg_srst = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_SLV_REG_SRST_IDX); - state->str_deint_portA_cnt = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_REG_PORT_A_IDX); - state->str_deint_portB_cnt = input_system_sub_system_reg_load(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_REG_PORT_B_IDX); - - for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) { - capture_unit_get_state(ID, sub_id, - &(state->capture_unit[sub_id - CAPTURE_UNIT0_ID])); - } - for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) { - acquisition_unit_get_state(ID, sub_id, - &(state->acquisition_unit[sub_id - ACQUISITION_UNIT0_ID])); - } - for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) { - ctrl_unit_get_state(ID, sub_id, - &(state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID])); - } - - return; -} - -void receiver_get_state( - const rx_ID_t ID, - receiver_state_t *state) -{ - enum mipi_port_id port_id; - unsigned int ch_id; - - assert(ID < N_RX_ID); - assert(state != NULL); - - state->fs_to_ls_delay = (uint8_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX); - state->ls_to_data_delay = (uint8_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX); - state->data_to_le_delay = (uint8_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX); - state->le_to_fe_delay = (uint8_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX); - state->fe_to_fs_delay = (uint8_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX); - state->le_to_fs_delay = (uint8_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX); - state->is_two_ppc = (bool)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX); - state->backend_rst = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX); - state->raw18 = (uint16_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_RAW18_REG_IDX); - state->force_raw8 = (bool)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX); - state->raw16 = (uint16_t)receiver_reg_load(ID, - _HRT_CSS_RECEIVER_RAW16_REG_IDX); - - for (port_id = (enum mipi_port_id)0; port_id < N_MIPI_PORT_ID; port_id++) { - mipi_port_get_state(ID, port_id, - &(state->mipi_port_state[port_id])); - } - for (ch_id = (unsigned int)0; ch_id < N_RX_CHANNEL_ID; ch_id++) { - rx_channel_get_state(ID, ch_id, - &(state->rx_channel_state[ch_id])); - } - - state->be_gsp_acc_ovl = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX); - state->be_srst = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_SRST_REG_IDX); - state->be_is_two_ppc = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX); - state->be_comp_format0 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX); - state->be_comp_format1 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX); - state->be_comp_format2 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX); - state->be_comp_format3 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX); - state->be_sel = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_SEL_REG_IDX); - state->be_raw16_config = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX); - state->be_raw18_config = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX); - state->be_force_raw8 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX); - state->be_irq_status = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX); - state->be_irq_clear = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX); - - return; -} - -bool is_mipi_format_yuv420( - const mipi_format_t mipi_format) -{ - bool is_yuv420 = ( - (mipi_format == MIPI_FORMAT_YUV420_8) || - (mipi_format == MIPI_FORMAT_YUV420_10) || - (mipi_format == MIPI_FORMAT_YUV420_8_SHIFT) || - (mipi_format == MIPI_FORMAT_YUV420_10_SHIFT)); -/* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */ - - return is_yuv420; -} - -void receiver_set_compression( - const rx_ID_t ID, - const unsigned int cfg_ID, - const mipi_compressor_t comp, - const mipi_predictor_t pred) -{ - const unsigned int field_id = cfg_ID % N_MIPI_FORMAT_CUSTOM; - const unsigned int ch_id = cfg_ID / N_MIPI_FORMAT_CUSTOM; - hrt_data val; - hrt_address addr = 0; - hrt_data reg; - - assert(ID < N_RX_ID); - assert(cfg_ID < N_MIPI_COMPRESSOR_CONTEXT); - assert(field_id < N_MIPI_FORMAT_CUSTOM); - assert(ch_id < N_RX_CHANNEL_ID); - assert(comp < N_MIPI_COMPRESSOR_METHODS); - assert(pred < N_MIPI_PREDICTOR_TYPES); - - val = (((uint8_t)pred) << 3) | comp; - - switch (ch_id) { - case 0: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX); - break; - case 1: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX); - break; - case 2: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX); - break; - case 3: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX); - break; - default: - /* should not happen */ - assert(false); - return; - } - - reg = ((field_id < 6)?(val << (field_id * 5)):(val << ((field_id - 6) * 5))); - receiver_reg_store(ID, addr, reg); - - return; -} - -void receiver_port_enable( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const bool cnd) -{ - hrt_data reg = receiver_port_reg_load(ID, port_ID, - _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX); - - if (cnd) { - reg |= 0x01; - } else { - reg &= ~0x01; - } - - receiver_port_reg_store(ID, port_ID, - _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg); - return; -} - -bool is_receiver_port_enabled( - const rx_ID_t ID, - const enum mipi_port_id port_ID) -{ - hrt_data reg = receiver_port_reg_load(ID, port_ID, - _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX); - return ((reg & 0x01) != 0); -} - -void receiver_irq_enable( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const rx_irq_info_t irq_info) -{ - receiver_port_reg_store(ID, - port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info); - return; -} - -rx_irq_info_t receiver_get_irq_info( - const rx_ID_t ID, - const enum mipi_port_id port_ID) -{ - return receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); -} - -void receiver_irq_clear( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const rx_irq_info_t irq_info) -{ - receiver_port_reg_store(ID, - port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info); - return; -} - -static inline void capture_unit_get_state( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - capture_unit_state_t *state) -{ - assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID)); - assert(state != NULL); - - state->StartMode = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_START_MODE_REG_ID); - state->Start_Addr = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_START_ADDR_REG_ID); - state->Mem_Region_Size = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_MEM_REGION_SIZE_REG_ID); - state->Num_Mem_Regions = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_NUM_MEM_REGIONS_REG_ID); -// AM: Illegal read from following registers. -/* state->Init = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_INIT_REG_ID); - state->Start = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_START_REG_ID); - state->Stop = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_STOP_REG_ID); -*/ - state->Packet_Length = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_PACKET_LENGTH_REG_ID); - state->Received_Length = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_RECEIVED_LENGTH_REG_ID); - state->Received_Short_Packets = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_RECEIVED_SHORT_PACKETS_REG_ID); - state->Received_Long_Packets = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_RECEIVED_LONG_PACKETS_REG_ID); - state->Last_Command = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_LAST_COMMAND_REG_ID); - state->Next_Command = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_NEXT_COMMAND_REG_ID); - state->Last_Acknowledge = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_LAST_ACKNOWLEDGE_REG_ID); - state->Next_Acknowledge = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_NEXT_ACKNOWLEDGE_REG_ID); - state->FSM_State_Info = input_system_sub_system_reg_load(ID, - sub_id, - CAPT_FSM_STATE_INFO_REG_ID); - - return; -} - -static inline void acquisition_unit_get_state( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - acquisition_unit_state_t *state) -{ - assert(sub_id == ACQUISITION_UNIT0_ID); - assert(state != NULL); - - state->Start_Addr = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_START_ADDR_REG_ID); - state->Mem_Region_Size = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_MEM_REGION_SIZE_REG_ID); - state->Num_Mem_Regions = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_NUM_MEM_REGIONS_REG_ID); -// AM: Illegal read from following registers. -/* state->Init = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_INIT_REG_ID); -*/ - state->Received_Short_Packets = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_RECEIVED_SHORT_PACKETS_REG_ID); - state->Received_Long_Packets = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_RECEIVED_LONG_PACKETS_REG_ID); - state->Last_Command = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_LAST_COMMAND_REG_ID); - state->Next_Command = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_NEXT_COMMAND_REG_ID); - state->Last_Acknowledge = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_LAST_ACKNOWLEDGE_REG_ID); - state->Next_Acknowledge = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_NEXT_ACKNOWLEDGE_REG_ID); - state->FSM_State_Info = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_FSM_STATE_INFO_REG_ID); - state->Int_Cntr_Info = input_system_sub_system_reg_load(ID, - sub_id, - ACQ_INT_CNTR_INFO_REG_ID); - - return; -} - -static inline void ctrl_unit_get_state( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - ctrl_unit_state_t *state) -{ - assert(sub_id == CTRL_UNIT0_ID); - assert(state != NULL); - - state->captA_start_addr = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_START_ADDR_A_REG_ID); - state->captB_start_addr = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_START_ADDR_B_REG_ID); - state->captC_start_addr = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_START_ADDR_C_REG_ID); - state->captA_mem_region_size = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID); - state->captB_mem_region_size = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID); - state->captC_mem_region_size = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID); - state->captA_num_mem_regions = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID); - state->captB_num_mem_regions = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID); - state->captC_num_mem_regions = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID); - state->acq_start_addr = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_ACQ_START_ADDR_REG_ID); - state->acq_mem_region_size = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID); - state->acq_num_mem_regions = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID); -// AM: Illegal read from following registers. -/* state->ctrl_init = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_INIT_REG_ID); -*/ - state->last_cmd = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_LAST_COMMAND_REG_ID); - state->next_cmd = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_NEXT_COMMAND_REG_ID); - state->last_ack = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID); - state->next_ack = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID); - state->top_fsm_state = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_FSM_STATE_INFO_REG_ID); - state->captA_fsm_state = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID); - state->captB_fsm_state = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID); - state->captC_fsm_state = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID); - state->acq_fsm_state = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID); - state->capt_reserve_one_mem_region = input_system_sub_system_reg_load(ID, - sub_id, - ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID); - - return; -} - -static inline void mipi_port_get_state( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - mipi_port_state_t *state) -{ - int i; - - assert(ID < N_RX_ID); - assert(port_ID < N_MIPI_PORT_ID); - assert(state != NULL); - - state->device_ready = receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX); - state->irq_status = receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); - state->irq_enable = receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX); - state->timeout_count = receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX); - state->init_count = (uint16_t)receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX); - state->raw16_18 = (uint16_t)receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX); - state->sync_count = receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX); - state->rx_count = receiver_port_reg_load(ID, - port_ID, _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX); - - for (i = 0; i < MIPI_4LANE_CFG ; i++) { - state->lane_sync_count[i] = (uint8_t)((state->sync_count)>>(i*8)); - state->lane_rx_count[i] = (uint8_t)((state->rx_count)>>(i*8)); - } - - return; -} - -static inline void rx_channel_get_state( - const rx_ID_t ID, - const unsigned int ch_id, - rx_channel_state_t *state) -{ - int i; - - assert(ID < N_RX_ID); - assert(ch_id < N_RX_CHANNEL_ID); - assert(state != NULL); - - switch (ch_id) { - case 0: - state->comp_scheme0 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX); - state->comp_scheme1 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX); - break; - case 1: - state->comp_scheme0 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX); - state->comp_scheme1 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX); - break; - case 2: - state->comp_scheme0 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX); - state->comp_scheme1 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX); - break; - case 3: - state->comp_scheme0 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX); - state->comp_scheme1 = receiver_reg_load(ID, - _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX); - break; - } - -/* See Table 7.1.17,..., 7.1.24 */ - for (i = 0; i < 6; i++) { - uint8_t val = (uint8_t)((state->comp_scheme0)>>(i*5)) & 0x1f; - state->comp[i] = (mipi_compressor_t)(val & 0x07); - state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3); - } - for (i = 6; i < N_MIPI_FORMAT_CUSTOM; i++) { - uint8_t val = (uint8_t)((state->comp_scheme0)>>((i-6)*5)) & 0x1f; - state->comp[i] = (mipi_compressor_t)(val & 0x07); - state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3); - } - - return; -} - -// MW: "2400" in the name is not good, but this is to avoid a naming conflict -static input_system_cfg2400_t config; - -static void receiver_rst( - const rx_ID_t ID) -{ - enum mipi_port_id port_id; - - assert(ID < N_RX_ID); - -// Disable all ports. - for (port_id = MIPI_PORT0_ID; port_id < N_MIPI_PORT_ID; port_id++) { - receiver_port_enable(ID, port_id, false); - } - - // AM: Additional actions for stopping receiver? - - return; -} - -//Single function to reset all the devices mapped via GP_DEVICE. -static void gp_device_rst(const gp_device_ID_t ID) -{ - assert(ID < N_GP_DEVICE_ID); - - gp_device_reg_store(ID, _REG_GP_SYNCGEN_ENABLE_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FREE_RUNNING_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNCGEN_PAUSE_ADDR, ONE); - // gp_device_reg_store(ID, _REG_GP_NR_FRAMES_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_LINES_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR, ZERO); -// AM: Following calls cause strange warnings. Probably they should not be initialized. -// gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ZERO); -// gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ZERO); -// gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ZERO); -// gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_B_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_B_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_MASK_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_MASK_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_XY_CNT_MASK_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_DELTA_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_DELTA_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_MODE_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED1_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN1_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE1_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED2_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN2_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE2_ADDR, ZERO); - //gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO); - //gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNCGEN_HOR_CNT_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNCGEN_VER_CNT_ADDR, ZERO); - // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FRAME_CNT_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO); // AM: Maybe this soft reset is not safe. - - return; -} - -static void input_selector_cfg_for_sensor(const gp_device_ID_t ID) -{ - assert(ID < N_GP_DEVICE_ID); - - gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ONE); - gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ONE); - gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ONE); - gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ONE); - gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO); - gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO); - - return; -} - -static void input_switch_rst(const gp_device_ID_t ID) -{ - int addr; - - assert(ID < N_GP_DEVICE_ID); - - // Initialize the data&hsync LUT. - for (addr = _REG_GP_IFMT_input_switch_lut_reg0; - addr <= _REG_GP_IFMT_input_switch_lut_reg7; addr += SIZEOF_HRT_REG) { - - gp_device_reg_store(ID, addr, ZERO); - } - - // Initialize the vsync LUT. - gp_device_reg_store(ID, - _REG_GP_IFMT_input_switch_fsync_lut, - ZERO); - - return; -} - -static void input_switch_cfg( - const gp_device_ID_t ID, - const input_switch_cfg_t * const cfg) -{ - int addr_offset; - - assert(ID < N_GP_DEVICE_ID); - assert(cfg != NULL); - - // Initialize the data&hsync LUT. - for (addr_offset = 0; addr_offset < N_RX_CHANNEL_ID * 2; addr_offset++) { - assert(addr_offset * SIZEOF_HRT_REG + _REG_GP_IFMT_input_switch_lut_reg0 <= _REG_GP_IFMT_input_switch_lut_reg7); - gp_device_reg_store(ID, - _REG_GP_IFMT_input_switch_lut_reg0 + addr_offset * SIZEOF_HRT_REG, - cfg->hsync_data_reg[addr_offset]); - } - - // Initialize the vsync LUT. - gp_device_reg_store(ID, - _REG_GP_IFMT_input_switch_fsync_lut, - cfg->vsync_data_reg); - - return; -} - - -static void input_system_network_rst(const input_system_ID_t ID) -{ - unsigned int sub_id; - - // Reset all 3 multicasts. - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_A_IDX, - INPUT_SYSTEM_DISCARD_ALL); - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_B_IDX, - INPUT_SYSTEM_DISCARD_ALL); - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_C_IDX, - INPUT_SYSTEM_DISCARD_ALL); - - // Reset stream mux. - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MUX_IDX, - N_INPUT_SYSTEM_MULTIPLEX); - - // Reset 3 capture units. - for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) { - input_system_sub_system_reg_store(ID, - sub_id, - CAPT_INIT_REG_ID, - 1U << CAPT_INIT_RST_REG_BIT); - } - - // Reset acquisition unit. - for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) { - input_system_sub_system_reg_store(ID, - sub_id, - ACQ_INIT_REG_ID, - 1U << ACQ_INIT_RST_REG_BIT); - } - - // DMA unit reset is not needed. - - // Reset controller units. - // NB: In future we need to keep part of ctrl_state for split capture and - for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) { - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_INIT_REG_ID, - 1U); //AM: Is there any named constant? - } - - return; -} - -// Function that resets current configuration. -input_system_error_t input_system_configuration_reset(void) -{ - unsigned int i; - - receiver_rst(RX0_ID); - - input_system_network_rst(INPUT_SYSTEM0_ID); - - gp_device_rst(INPUT_SYSTEM0_ID); - - input_switch_rst(INPUT_SYSTEM0_ID); - - //target_rst(); - - // Reset IRQ_CTRLs. - - // Reset configuration data structures. - for (i = 0; i < N_CHANNELS; i++ ) { - config.ch_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; - config.target_isp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; - config.target_sp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; - config.target_strm2mem_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; - } - - for (i = 0; i < N_CSI_PORTS; i++ ) { - config.csi_buffer_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; - config.multicast[i] = INPUT_SYSTEM_CFG_FLAG_RESET; - } - - config.source_type_flags = INPUT_SYSTEM_CFG_FLAG_RESET; - config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_RESET; - config.unallocated_ib_mem_words = IB_CAPACITY_IN_WORDS; - //config.acq_allocated_ib_mem_words = 0; - - // Set the start of the session cofiguration. - config.session_flags = INPUT_SYSTEM_CFG_FLAG_REQUIRED; - - return INPUT_SYSTEM_ERR_NO_ERROR; -} - -// MW: Comments are good, but doxygen is required, place it at the declaration -// Function that appends the channel to current configuration. -static input_system_error_t input_system_configure_channel( - const channel_cfg_t channel) -{ - input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR; - // Check if channel is not already configured. - if (config.ch_flags[channel.ch_id] & INPUT_SYSTEM_CFG_FLAG_SET){ - return INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET; - } else { - switch (channel.source_type){ - case INPUT_SYSTEM_SOURCE_SENSOR : - error = input_system_configure_channel_sensor(channel); - break; - case INPUT_SYSTEM_SOURCE_TPG : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - case INPUT_SYSTEM_SOURCE_PRBS : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - case INPUT_SYSTEM_SOURCE_FIFO : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - default : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - } - - if (error != INPUT_SYSTEM_ERR_NO_ERROR) return error; - // Input switch channel configurations must be combined in united config. - config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2] = - channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[0]; - config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2 + 1] = - channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[1]; - config.input_switch_cfg.vsync_data_reg |= - (channel.target_cfg.input_switch_channel_cfg.vsync_data_reg & 0x7) << (channel.source_cfg.csi_cfg.csi_port * 3); - - // Other targets are just copied and marked as set. - config.target_isp[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_isp_cfg; - config.target_sp[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_sp_cfg; - config.target_strm2mem[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_strm2mem_cfg; - config.target_isp_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET; - config.target_sp_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET; - config.target_strm2mem_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET; - - config.ch_flags[channel.ch_id] = INPUT_SYSTEM_CFG_FLAG_SET; - } - return INPUT_SYSTEM_ERR_NO_ERROR; -} - -// Function that partitions input buffer space with determining addresses. -static input_system_error_t input_buffer_configuration(void) -{ - uint32_t current_address = 0; - uint32_t unallocated_memory = IB_CAPACITY_IN_WORDS; - - ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL; - uint32_t size_requested; - input_system_config_flags_t acq_already_specified = INPUT_SYSTEM_CFG_FLAG_RESET; - input_system_csi_port_t port; - for (port = INPUT_SYSTEM_PORT_A; port < N_INPUT_SYSTEM_PORTS; port++) { - - csi_cfg_t source = config.csi_value[port];//.csi_cfg; - - if ( config.csi_flags[port] & INPUT_SYSTEM_CFG_FLAG_SET) { - - // Check and set csi buffer in input buffer. - switch (source.buffering_mode) { - case INPUT_SYSTEM_FIFO_CAPTURE : - case INPUT_SYSTEM_XMEM_ACQUIRE : - config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED; // Well, not used. - break; - - case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING : - case INPUT_SYSTEM_SRAM_BUFFERING : - case INPUT_SYSTEM_XMEM_BUFFERING : - case INPUT_SYSTEM_XMEM_CAPTURE : - size_requested = source.csi_buffer.mem_reg_size * source.csi_buffer.nof_mem_regs; - if (source.csi_buffer.mem_reg_size > 0 - && source.csi_buffer.nof_mem_regs >0 - && size_requested <= unallocated_memory - ) { - config.csi_buffer[port].mem_reg_addr = current_address; - config.csi_buffer[port].mem_reg_size = source.csi_buffer.mem_reg_size; - config.csi_buffer[port].nof_mem_regs = source.csi_buffer.nof_mem_regs; - current_address += size_requested; - unallocated_memory -= size_requested; - config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_SET; - } else { - config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - break; - - default : - config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - } - - // Check acquisition buffer specified but set it later since it has to be unique. - switch (source.buffering_mode) { - case INPUT_SYSTEM_FIFO_CAPTURE : - case INPUT_SYSTEM_SRAM_BUFFERING : - case INPUT_SYSTEM_XMEM_CAPTURE : - // Nothing to do. - break; - - case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING : - case INPUT_SYSTEM_XMEM_BUFFERING : - case INPUT_SYSTEM_XMEM_ACQUIRE : - if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_RESET) { - size_requested = source.acquisition_buffer.mem_reg_size - * source.acquisition_buffer.nof_mem_regs; - if (source.acquisition_buffer.mem_reg_size > 0 - && source.acquisition_buffer.nof_mem_regs >0 - && size_requested <= unallocated_memory - ) { - candidate_buffer_acq = source.acquisition_buffer; - acq_already_specified = INPUT_SYSTEM_CFG_FLAG_SET; - } - } else { - // Check if specified acquisition buffer is the same as specified before. - if (source.acquisition_buffer.mem_reg_size != candidate_buffer_acq.mem_reg_size - || source.acquisition_buffer.nof_mem_regs != candidate_buffer_acq.nof_mem_regs - ) { - config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - } - break; - - default : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - } - } else { - config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED; - } - } // end of for ( port ) - - // Set the acquisition buffer at the end. - size_requested = candidate_buffer_acq.mem_reg_size * candidate_buffer_acq.nof_mem_regs; - if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_SET - && size_requested <= unallocated_memory) { - config.acquisition_buffer_unique.mem_reg_addr = current_address; - config.acquisition_buffer_unique.mem_reg_size = candidate_buffer_acq.mem_reg_size; - config.acquisition_buffer_unique.nof_mem_regs = candidate_buffer_acq.nof_mem_regs; - current_address += size_requested; - unallocated_memory -= size_requested; - config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_SET; - - assert(current_address <= IB_CAPACITY_IN_WORDS); - } - - return INPUT_SYSTEM_ERR_NO_ERROR; -} - -static void capture_unit_configure( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - const ib_buffer_t* const cfg) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID)); // Commented part is always true. - assert(cfg != NULL); - - input_system_sub_system_reg_store(ID, - sub_id, - CAPT_START_ADDR_REG_ID, - cfg->mem_reg_addr); - input_system_sub_system_reg_store(ID, - sub_id, - CAPT_MEM_REGION_SIZE_REG_ID, - cfg->mem_reg_size); - input_system_sub_system_reg_store(ID, - sub_id, - CAPT_NUM_MEM_REGIONS_REG_ID, - cfg->nof_mem_regs); - - return; -} - - -static void acquisition_unit_configure( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - const ib_buffer_t* const cfg) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(sub_id == ACQUISITION_UNIT0_ID); - assert(cfg != NULL); - - input_system_sub_system_reg_store(ID, - sub_id, - ACQ_START_ADDR_REG_ID, - cfg->mem_reg_addr); - input_system_sub_system_reg_store(ID, - sub_id, - ACQ_NUM_MEM_REGIONS_REG_ID, - cfg->nof_mem_regs); - input_system_sub_system_reg_store(ID, - sub_id, - ACQ_MEM_REGION_SIZE_REG_ID, - cfg->mem_reg_size); - - return; -} - - -static void ctrl_unit_configure( - const input_system_ID_t ID, - const sub_system_ID_t sub_id, - const ctrl_unit_cfg_t* const cfg) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(sub_id == CTRL_UNIT0_ID); - assert(cfg != NULL); - - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_START_ADDR_A_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_addr); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_size); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT0_ID].nof_mem_regs); - - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_START_ADDR_B_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_addr); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_size); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT1_ID].nof_mem_regs); - - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_START_ADDR_C_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_addr); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_size); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID, - cfg->buffer_mipi[CAPTURE_UNIT2_ID].nof_mem_regs); - - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_ACQ_START_ADDR_REG_ID, - cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_addr); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID, - cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_size); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID, - cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].nof_mem_regs); - input_system_sub_system_reg_store(ID, - sub_id, - ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID, - 0); - return; -} - -static void input_system_network_configure( - const input_system_ID_t ID, - const input_system_network_cfg_t * const cfg) -{ - uint32_t sub_id; - - assert(ID < N_INPUT_SYSTEM_ID); - assert(cfg != NULL); - - // Set all 3 multicasts. - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_A_IDX, - cfg->multicast_cfg[CAPTURE_UNIT0_ID]); - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_B_IDX, - cfg->multicast_cfg[CAPTURE_UNIT1_ID]); - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_C_IDX, - cfg->multicast_cfg[CAPTURE_UNIT2_ID]); - - // Set stream mux. - input_system_sub_system_reg_store(ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MUX_IDX, - cfg->mux_cfg); - - // Set capture units. - for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) { - capture_unit_configure(ID, - sub_id, - &(cfg->ctrl_unit_cfg[ID].buffer_mipi[sub_id - CAPTURE_UNIT0_ID])); - } - - // Set acquisition units. - for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) { - acquisition_unit_configure(ID, - sub_id, - &(cfg->ctrl_unit_cfg[sub_id - ACQUISITION_UNIT0_ID].buffer_acquire[sub_id - ACQUISITION_UNIT0_ID])); - } - - // No DMA configuration needed. Ctrl_unit will fully control it. - - // Set controller units. - for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) { - ctrl_unit_configure(ID, - sub_id, - &(cfg->ctrl_unit_cfg[sub_id - CTRL_UNIT0_ID])); - } - - return; -} - -static input_system_error_t configuration_to_registers(void) -{ - input_system_network_cfg_t input_system_network_cfg; - int i; - - assert(config.source_type_flags & INPUT_SYSTEM_CFG_FLAG_SET); - - switch (config.source_type) { - case INPUT_SYSTEM_SOURCE_SENSOR : - - // Determine stream multicasts setting based on the mode of csi_cfg_t. - // AM: This should be moved towards earlier function call, e.g. in - // the commit function. - for (i = MIPI_PORT0_ID; i < N_MIPI_PORT_ID; i++) { - if (config.csi_flags[i] & INPUT_SYSTEM_CFG_FLAG_SET) { - - switch (config.csi_value[i].buffering_mode) { - - case INPUT_SYSTEM_FIFO_CAPTURE: - config.multicast[i] = INPUT_SYSTEM_CSI_BACKEND; - break; - - case INPUT_SYSTEM_XMEM_CAPTURE: - case INPUT_SYSTEM_SRAM_BUFFERING: - case INPUT_SYSTEM_XMEM_BUFFERING: - config.multicast[i] = INPUT_SYSTEM_INPUT_BUFFER; - break; - - case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING: - config.multicast[i] = INPUT_SYSTEM_MULTICAST; - break; - - case INPUT_SYSTEM_XMEM_ACQUIRE: - config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL; - break; - - default: - config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL; - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - //break; - } - } else { - config.multicast[i]= INPUT_SYSTEM_DISCARD_ALL; - } - - input_system_network_cfg.multicast_cfg[i] = config.multicast[i]; - - } // for - - input_system_network_cfg.mux_cfg = config.multiplexer; - - input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT0_ID] = config.csi_buffer[MIPI_PORT0_ID]; - input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT1_ID] = config.csi_buffer[MIPI_PORT1_ID]; - input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT2_ID] = config.csi_buffer[MIPI_PORT2_ID]; - input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID] = - config.acquisition_buffer_unique; - - // First set input network around CSI receiver. - input_system_network_configure(INPUT_SYSTEM0_ID, &input_system_network_cfg); - - // Set the CSI receiver. - //... - break; - - case INPUT_SYSTEM_SOURCE_TPG : - - break; - - case INPUT_SYSTEM_SOURCE_PRBS : - - break; - - case INPUT_SYSTEM_SOURCE_FIFO : - break; - - default : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - - } // end of switch (source_type) - - // Set input selector. - input_selector_cfg_for_sensor(INPUT_SYSTEM0_ID); - - // Set input switch. - input_switch_cfg(INPUT_SYSTEM0_ID, &config.input_switch_cfg); - - // Set input formatters. - // AM: IF are set dynamically. - return INPUT_SYSTEM_ERR_NO_ERROR; -} - - -// Function that applies the whole configuration. -input_system_error_t input_system_configuration_commit(void) -{ - // The last configuration step is to configure the input buffer. - input_system_error_t error = input_buffer_configuration(); - if (error != INPUT_SYSTEM_ERR_NO_ERROR) { - return error; - } - - // Translate the whole configuration into registers. - error = configuration_to_registers(); - if (error != INPUT_SYSTEM_ERR_NO_ERROR) { - return error; - } - - // Translate the whole configuration into ctrl commands etc. - - return INPUT_SYSTEM_ERR_NO_ERROR; -} - - - -// FIFO - -input_system_error_t input_system_csi_fifo_channel_cfg( - uint32_t ch_id, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - target_cfg2400_t target -) -{ - channel_cfg_t channel; - - channel.ch_id = ch_id; - channel.backend_ch = backend_ch; - channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; - //channel.source - channel.source_cfg.csi_cfg.csi_port = port; - channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE; - channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL; - channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL; - channel.source_cfg.csi_cfg.nof_xmem_buffers = 0; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - - -input_system_error_t input_system_csi_fifo_channel_with_counting_cfg( - uint32_t ch_id, - uint32_t nof_frames, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t csi_mem_reg_size, - uint32_t csi_nof_mem_regs, - target_cfg2400_t target -) -{ - channel_cfg_t channel; - - channel.ch_id = ch_id; - channel.backend_ch = backend_ch; - channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; - //channel.source - channel.source_cfg.csi_cfg.csi_port = port; - channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; - channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL; - channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - - -// SRAM - -input_system_error_t input_system_csi_sram_channel_cfg( - uint32_t ch_id, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t csi_mem_reg_size, - uint32_t csi_nof_mem_regs, - // uint32_t acq_mem_reg_size, - // uint32_t acq_nof_mem_regs, - target_cfg2400_t target -) -{ - channel_cfg_t channel; - - channel.ch_id = ch_id; - channel.backend_ch = backend_ch; - channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; - //channel.source - channel.source_cfg.csi_cfg.csi_port = port; - channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_SRAM_BUFFERING; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; - channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL; - channel.source_cfg.csi_cfg.nof_xmem_buffers = 0; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - - -//XMEM - -// Collects all parameters and puts them in channel_cfg_t. -input_system_error_t input_system_csi_xmem_channel_cfg( - uint32_t ch_id, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t csi_mem_reg_size, - uint32_t csi_nof_mem_regs, - uint32_t acq_mem_reg_size, - uint32_t acq_nof_mem_regs, - target_cfg2400_t target, - uint32_t nof_xmem_buffers -) -{ - channel_cfg_t channel; - - channel.ch_id = ch_id; - channel.backend_ch = backend_ch; - channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; - //channel.source - channel.source_cfg.csi_cfg.csi_port = port; - channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_BUFFERING; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; - channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size; - channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs; - channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_xmem_buffers; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - - - - -input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t acq_mem_reg_size, - uint32_t acq_nof_mem_regs, - target_cfg2400_t target) -{ - channel_cfg_t channel; - - channel.ch_id = ch_id; - channel.backend_ch = backend_ch; - channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; - //channel.source - channel.source_cfg.csi_cfg.csi_port = port; - channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_ACQUIRE; - channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL; - channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size; - channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs; - channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - - -input_system_error_t input_system_csi_xmem_capture_only_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, - input_system_csi_port_t port, - uint32_t csi_mem_reg_size, - uint32_t csi_nof_mem_regs, - uint32_t acq_mem_reg_size, - uint32_t acq_nof_mem_regs, - target_cfg2400_t target) -{ - channel_cfg_t channel; - - channel.ch_id = ch_id; - //channel.backend_ch = backend_ch; - channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; - //channel.source - channel.source_cfg.csi_cfg.csi_port = port; - //channel.source_cfg.csi_cfg.backend_ch = backend_ch; - channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_CAPTURE; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; - channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; - channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size; - channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs; - channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0; - channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - - - -// Non - CSI - -input_system_error_t input_system_prbs_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames,//not used yet - uint32_t seed, - uint32_t sync_gen_width, - uint32_t sync_gen_height, - uint32_t sync_gen_hblank_cycles, - uint32_t sync_gen_vblank_cycles, - target_cfg2400_t target -) -{ - channel_cfg_t channel; - - (void)nof_frames; - - channel.ch_id = ch_id; - channel.source_type= INPUT_SYSTEM_SOURCE_PRBS; - - channel.source_cfg.prbs_cfg.seed = seed; - channel.source_cfg.prbs_cfg.sync_gen_cfg.width = sync_gen_width; - channel.source_cfg.prbs_cfg.sync_gen_cfg.height = sync_gen_height; - channel.source_cfg.prbs_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles; - channel.source_cfg.prbs_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles; - - channel.target_cfg = target; - - return input_system_configure_channel(channel); -} - - - -input_system_error_t input_system_tpg_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames,//not used yet - uint32_t x_mask, - uint32_t y_mask, - uint32_t x_delta, - uint32_t y_delta, - uint32_t xy_mask, - uint32_t sync_gen_width, - uint32_t sync_gen_height, - uint32_t sync_gen_hblank_cycles, - uint32_t sync_gen_vblank_cycles, - target_cfg2400_t target -) -{ - channel_cfg_t channel; - - (void)nof_frames; - - channel.ch_id = ch_id; - channel.source_type = INPUT_SYSTEM_SOURCE_TPG; - - channel.source_cfg.tpg_cfg.x_mask = x_mask; - channel.source_cfg.tpg_cfg.y_mask = y_mask; - channel.source_cfg.tpg_cfg.x_delta = x_delta; - channel.source_cfg.tpg_cfg.y_delta = y_delta; - channel.source_cfg.tpg_cfg.xy_mask = xy_mask; - channel.source_cfg.tpg_cfg.sync_gen_cfg.width = sync_gen_width; - channel.source_cfg.tpg_cfg.sync_gen_cfg.height = sync_gen_height; - channel.source_cfg.tpg_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles; - channel.source_cfg.tpg_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - -// MW: Don't use system specific names, (even in system specific files) "cfg2400" -> cfg -input_system_error_t input_system_gpfifo_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, //not used yet - target_cfg2400_t target) -{ - channel_cfg_t channel; - - (void)nof_frames; - - channel.ch_id = ch_id; - channel.source_type = INPUT_SYSTEM_SOURCE_FIFO; - - channel.target_cfg = target; - return input_system_configure_channel(channel); -} - -/////////////////////////////////////////////////////////////////////////// -// -// Private specialized functions for channel setting. -// -/////////////////////////////////////////////////////////////////////////// - -// Fills the parameters to config.csi_value[port] -static input_system_error_t input_system_configure_channel_sensor( - const channel_cfg_t channel) -{ - const uint32_t port = channel.source_cfg.csi_cfg.csi_port; - input_system_error_t status = INPUT_SYSTEM_ERR_NO_ERROR; - - input_system_multiplex_t mux; - - if (port >= N_INPUT_SYSTEM_PORTS) - return INPUT_SYSTEM_ERR_GENERIC; - - //check if port > N_INPUT_SYSTEM_MULTIPLEX - - status = set_source_type(&(config.source_type), channel.source_type, &config.source_type_flags); - if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; - - // Check for conflicts on source (implicitly on multicast, capture unit and input buffer). - - status = set_csi_cfg(&(config.csi_value[port]), &channel.source_cfg.csi_cfg, &(config.csi_flags[port])); - if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; - - - switch (channel.source_cfg.csi_cfg.buffering_mode){ - case INPUT_SYSTEM_FIFO_CAPTURE: - - // Check for conflicts on mux. - mux = INPUT_SYSTEM_MIPI_PORT0 + port; - status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags); - if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; - config.multicast[port] = INPUT_SYSTEM_CSI_BACKEND; - - // Shared resource, so it should be blocked. - //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - - break; - case INPUT_SYSTEM_SRAM_BUFFERING : - - // Check for conflicts on mux. - mux = INPUT_SYSTEM_ACQUISITION_UNIT; - status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags); - if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; - config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER; - - // Shared resource, so it should be blocked. - //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - - break; - case INPUT_SYSTEM_XMEM_BUFFERING : - - // Check for conflicts on mux. - mux = INPUT_SYSTEM_ACQUISITION_UNIT; - status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags); - if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; - config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER; - - // Shared resource, so it should be blocked. - //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; - - break; - case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - case INPUT_SYSTEM_XMEM_CAPTURE : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - case INPUT_SYSTEM_XMEM_ACQUIRE : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - default : - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - break; - } - return INPUT_SYSTEM_ERR_NO_ERROR; -} - -// Test flags and set structure. -static input_system_error_t set_source_type( - input_system_source_t * const lhs, - const input_system_source_t rhs, - input_system_config_flags_t * const flags) -{ - // MW: Not enough asserts - assert(lhs != NULL); - assert(flags != NULL); - - if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - - if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) { - // Check for consistency with already set value. - if ((*lhs) == (rhs)) { - return INPUT_SYSTEM_ERR_NO_ERROR; - } - else { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - } - // Check the value (individually). - if (rhs >= N_INPUT_SYSTEM_SOURCE) { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - // Set the value. - *lhs = rhs; - - *flags |= INPUT_SYSTEM_CFG_FLAG_SET; - return INPUT_SYSTEM_ERR_NO_ERROR; -} - - -// Test flags and set structure. -static input_system_error_t set_csi_cfg( - csi_cfg_t* const lhs, - const csi_cfg_t* const rhs, - input_system_config_flags_t * const flags) -{ - uint32_t memory_required; - uint32_t acq_memory_required; - - assert(lhs != NULL); - assert(flags != NULL); - - if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - - if (*flags & INPUT_SYSTEM_CFG_FLAG_SET) { - // check for consistency with already set value. - if (/*lhs->backend_ch == rhs.backend_ch - &&*/ lhs->buffering_mode == rhs->buffering_mode - && lhs->csi_buffer.mem_reg_size == rhs->csi_buffer.mem_reg_size - && lhs->csi_buffer.nof_mem_regs == rhs->csi_buffer.nof_mem_regs - && lhs->acquisition_buffer.mem_reg_size == rhs->acquisition_buffer.mem_reg_size - && lhs->acquisition_buffer.nof_mem_regs == rhs->acquisition_buffer.nof_mem_regs - && lhs->nof_xmem_buffers == rhs->nof_xmem_buffers - ) { - return INPUT_SYSTEM_ERR_NO_ERROR; - } - else { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - } - // Check the value (individually). - // no check for backend_ch - // no check for nof_xmem_buffers - memory_required = rhs->csi_buffer.mem_reg_size * rhs->csi_buffer.nof_mem_regs; - acq_memory_required = rhs->acquisition_buffer.mem_reg_size * rhs->acquisition_buffer.nof_mem_regs; - if (rhs->buffering_mode >= N_INPUT_SYSTEM_BUFFERING_MODE - || - // Check if required memory is available in input buffer (SRAM). - (memory_required + acq_memory_required )> config.unallocated_ib_mem_words - - ) { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - // Set the value. - //lhs[port]->backend_ch = rhs.backend_ch; - lhs->buffering_mode = rhs->buffering_mode; - lhs->nof_xmem_buffers = rhs->nof_xmem_buffers; - - lhs->csi_buffer.mem_reg_size = rhs->csi_buffer.mem_reg_size; - lhs->csi_buffer.nof_mem_regs = rhs->csi_buffer.nof_mem_regs; - lhs->acquisition_buffer.mem_reg_size = rhs->acquisition_buffer.mem_reg_size; - lhs->acquisition_buffer.nof_mem_regs = rhs->acquisition_buffer.nof_mem_regs; - // ALX: NB: Here we just set buffer parameters, but still not allocate it - // (no addresses determined). That will be done during commit. - - // FIXIT: acq_memory_required is not deducted, since it can be allocated multiple times. - config.unallocated_ib_mem_words -= memory_required; -//assert(config.unallocated_ib_mem_words >=0); - *flags |= INPUT_SYSTEM_CFG_FLAG_SET; - return INPUT_SYSTEM_ERR_NO_ERROR; -} - - -// Test flags and set structure. -static input_system_error_t input_system_multiplexer_cfg( - input_system_multiplex_t* const lhs, - const input_system_multiplex_t rhs, - input_system_config_flags_t* const flags) -{ - assert(lhs != NULL); - assert(flags != NULL); - - if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - - if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) { - // Check for consistency with already set value. - if ((*lhs) == (rhs)) { - return INPUT_SYSTEM_ERR_NO_ERROR; - } - else { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; - } - } - // Check the value (individually). - if (rhs >= N_INPUT_SYSTEM_MULTIPLEX) { - *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; - return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; - } - // Set the value. - *lhs = rhs; - - *flags |= INPUT_SYSTEM_CFG_FLAG_SET; - return INPUT_SYSTEM_ERR_NO_ERROR; -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h deleted file mode 100644 index bf9230fd08f2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__ -#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__ - -#include - -#include "input_system_global.h" - -#include "input_system_defs.h" /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */ -#include "css_receiver_2400_defs.h" /* _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX, _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX,... */ -#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) -#include "isp_capture_defs.h" -#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM) -/* Same name, but keep the distinction,it is a different device */ -#include "isp_capture_defs.h" -#else -#error "input_system_local.h: 2400_SYSTEM must be one of {2400, 2401 }" -#endif -#include "isp_acquisition_defs.h" -#include "input_system_ctrl_defs.h" - - -typedef enum { - INPUT_SYSTEM_ERR_NO_ERROR = 0, - INPUT_SYSTEM_ERR_GENERIC, - INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET, - INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE, - INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED, - N_INPUT_SYSTEM_ERR -} input_system_error_t; - -typedef enum { - INPUT_SYSTEM_PORT_A = 0, - INPUT_SYSTEM_PORT_B, - INPUT_SYSTEM_PORT_C, - N_INPUT_SYSTEM_PORTS -} input_system_csi_port_t; - -typedef struct ctrl_unit_cfg_s ctrl_unit_cfg_t; -typedef struct input_system_network_cfg_s input_system_network_cfg_t; -typedef struct target_cfg2400_s target_cfg2400_t; -typedef struct channel_cfg_s channel_cfg_t; -typedef struct backend_channel_cfg_s backend_channel_cfg_t; -typedef struct input_system_cfg2400_s input_system_cfg2400_t; -typedef struct mipi_port_state_s mipi_port_state_t; -typedef struct rx_channel_state_s rx_channel_state_t; -typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t; -typedef struct input_switch_cfg_s input_switch_cfg_t; - -struct ctrl_unit_cfg_s { - ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID]; - ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID]; -}; - -struct input_system_network_cfg_s { - input_system_connection_t multicast_cfg[N_CAPTURE_UNIT_ID]; - input_system_multiplex_t mux_cfg; - ctrl_unit_cfg_t ctrl_unit_cfg[N_CTRL_UNIT_ID]; -}; - -typedef struct { -// TBD. - uint32_t dummy_parameter; -} target_isp_cfg_t; - - -typedef struct { -// TBD. - uint32_t dummy_parameter; -} target_sp_cfg_t; - - -typedef struct { -// TBD. - uint32_t dummy_parameter; -} target_strm2mem_cfg_t; - -struct input_switch_cfg_channel_s { - uint32_t hsync_data_reg[2]; - uint32_t vsync_data_reg; -}; - -struct target_cfg2400_s { - input_switch_cfg_channel_t input_switch_channel_cfg; - target_isp_cfg_t target_isp_cfg; - target_sp_cfg_t target_sp_cfg; - target_strm2mem_cfg_t target_strm2mem_cfg; -}; - -struct backend_channel_cfg_s { - uint32_t fmt_control_word_1; // Format config. - uint32_t fmt_control_word_2; - uint32_t no_side_band; -}; - -typedef union { - csi_cfg_t csi_cfg; - tpg_cfg_t tpg_cfg; - prbs_cfg_t prbs_cfg; - gpfifo_cfg_t gpfifo_cfg; -} source_cfg_t; - - -struct input_switch_cfg_s { - uint32_t hsync_data_reg[N_RX_CHANNEL_ID * 2]; - uint32_t vsync_data_reg; -}; - -// Configuration of a channel. -struct channel_cfg_s { - uint32_t ch_id; - backend_channel_cfg_t backend_ch; - input_system_source_t source_type; - source_cfg_t source_cfg; - target_cfg2400_t target_cfg; -}; - - -// Complete configuration for input system. -struct input_system_cfg2400_s { - - input_system_source_t source_type; input_system_config_flags_t source_type_flags; - //channel_cfg_t channel[N_CHANNELS]; - input_system_config_flags_t ch_flags[N_CHANNELS]; - // This is the place where the buffers' settings are collected, as given. - csi_cfg_t csi_value[N_CSI_PORTS]; input_system_config_flags_t csi_flags[N_CSI_PORTS]; - - // Possible another struct for ib. - // This buffers set at the end, based on the all configurations. - ib_buffer_t csi_buffer[N_CSI_PORTS]; input_system_config_flags_t csi_buffer_flags[N_CSI_PORTS]; - ib_buffer_t acquisition_buffer_unique; input_system_config_flags_t acquisition_buffer_unique_flags; - uint32_t unallocated_ib_mem_words; // Used for check.DEFAULT = IB_CAPACITY_IN_WORDS. - //uint32_t acq_allocated_ib_mem_words; - - input_system_connection_t multicast[N_CSI_PORTS]; - input_system_multiplex_t multiplexer; input_system_config_flags_t multiplexer_flags; - - - tpg_cfg_t tpg_value; input_system_config_flags_t tpg_flags; - prbs_cfg_t prbs_value; input_system_config_flags_t prbs_flags; - gpfifo_cfg_t gpfifo_value; input_system_config_flags_t gpfifo_flags; - - - input_switch_cfg_t input_switch_cfg; - - - target_isp_cfg_t target_isp [N_CHANNELS]; input_system_config_flags_t target_isp_flags [N_CHANNELS]; - target_sp_cfg_t target_sp [N_CHANNELS]; input_system_config_flags_t target_sp_flags [N_CHANNELS]; - target_strm2mem_cfg_t target_strm2mem [N_CHANNELS]; input_system_config_flags_t target_strm2mem_flags [N_CHANNELS]; - - input_system_config_flags_t session_flags; - -}; - -/* - * For each MIPI port - */ -#define _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX -#define _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX -#define _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX -#define _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX -#define _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX -/* new regs for each MIPI port w.r.t. 2300 */ -#define _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX -#define _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX -#define _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX - -/* _HRT_CSS_RECEIVER_2400_COMP_FORMAT_REG_IDX is not defined per MIPI port but per channel */ -/* _HRT_CSS_RECEIVER_2400_COMP_PREDICT_REG_IDX is not defined per MIPI port but per channel */ -#define _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX -#define _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX -#define _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX -#define _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX -#define _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX -#define _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX -#define _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX -#define _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX -#define _HRT_CSS_RECEIVER_RAW18_REG_IDX _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX -#define _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX -#define _HRT_CSS_RECEIVER_RAW16_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX - -/* Previously MIPI port regs, now 2x2 logical channel regs */ -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX -#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX - -/* Second backend is at offset 0x0700 w.r.t. the first port at offset 0x0100 */ -#define _HRT_CSS_BE_OFFSET 448 -#define _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_SRST_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_SEL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX + _HRT_CSS_BE_OFFSET) -#define _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX + _HRT_CSS_BE_OFFSET) - - -#define _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT -#define _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT -#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT -#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT -#define _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT -#define _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT - -#define _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX -#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX -#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS - -typedef struct capture_unit_state_s capture_unit_state_t; -typedef struct acquisition_unit_state_s acquisition_unit_state_t; -typedef struct ctrl_unit_state_s ctrl_unit_state_t; - -/* - * In 2300 ports can be configured independently and stream - * formats need to be specified. In 2400, there are only 8 - * supported configurations but the HW is fused to support - * only a single one. - * - * In 2300 the compressed format types are programmed by the - * user. In 2400 all stream formats are encoded on the stream. - * - * Use the enum to check validity of a user configuration - */ -typedef enum { - MONO_4L_1L_0L = 0, - MONO_3L_1L_0L, - MONO_2L_1L_0L, - MONO_1L_1L_0L, - STEREO_2L_1L_2L, - STEREO_3L_1L_1L, - STEREO_2L_1L_1L, - STEREO_1L_1L_1L, - N_RX_MODE -} rx_mode_t; - -typedef enum { - MIPI_PREDICTOR_NONE = 0, - MIPI_PREDICTOR_TYPE1, - MIPI_PREDICTOR_TYPE2, - N_MIPI_PREDICTOR_TYPES -} mipi_predictor_t; - -typedef enum { - MIPI_COMPRESSOR_NONE = 0, - MIPI_COMPRESSOR_10_6_10, - MIPI_COMPRESSOR_10_7_10, - MIPI_COMPRESSOR_10_8_10, - MIPI_COMPRESSOR_12_6_12, - MIPI_COMPRESSOR_12_7_12, - MIPI_COMPRESSOR_12_8_12, - N_MIPI_COMPRESSOR_METHODS -} mipi_compressor_t; - -typedef enum { - MIPI_FORMAT_RGB888 = 0, - MIPI_FORMAT_RGB555, - MIPI_FORMAT_RGB444, - MIPI_FORMAT_RGB565, - MIPI_FORMAT_RGB666, - MIPI_FORMAT_RAW8, /* 5 */ - MIPI_FORMAT_RAW10, - MIPI_FORMAT_RAW6, - MIPI_FORMAT_RAW7, - MIPI_FORMAT_RAW12, - MIPI_FORMAT_RAW14, /* 10 */ - MIPI_FORMAT_YUV420_8, - MIPI_FORMAT_YUV420_10, - MIPI_FORMAT_YUV422_8, - MIPI_FORMAT_YUV422_10, - MIPI_FORMAT_CUSTOM0, /* 15 */ - MIPI_FORMAT_YUV420_8_LEGACY, - MIPI_FORMAT_EMBEDDED, - MIPI_FORMAT_CUSTOM1, - MIPI_FORMAT_CUSTOM2, - MIPI_FORMAT_CUSTOM3, /* 20 */ - MIPI_FORMAT_CUSTOM4, - MIPI_FORMAT_CUSTOM5, - MIPI_FORMAT_CUSTOM6, - MIPI_FORMAT_CUSTOM7, - MIPI_FORMAT_YUV420_8_SHIFT, /* 25 */ - MIPI_FORMAT_YUV420_10_SHIFT, - MIPI_FORMAT_RAW16, - MIPI_FORMAT_RAW18, - N_MIPI_FORMAT, -} mipi_format_t; - -#define MIPI_FORMAT_JPEG MIPI_FORMAT_CUSTOM0 -#define MIPI_FORMAT_BINARY_8 MIPI_FORMAT_CUSTOM0 -#define N_MIPI_FORMAT_CUSTOM 8 - -/* The number of stores for compressed format types */ -#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM) - -typedef enum { - RX_IRQ_INFO_BUFFER_OVERRUN = 1UL << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT, - RX_IRQ_INFO_INIT_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT, - RX_IRQ_INFO_ENTER_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT, - RX_IRQ_INFO_EXIT_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT, - RX_IRQ_INFO_ECC_CORRECTED = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT, - RX_IRQ_INFO_ERR_SOT = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT, - RX_IRQ_INFO_ERR_SOT_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT, - RX_IRQ_INFO_ERR_CONTROL = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT, - RX_IRQ_INFO_ERR_ECC_DOUBLE = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT, -/* RX_IRQ_INFO_NO_ERR = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT, */ - RX_IRQ_INFO_ERR_CRC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT, - RX_IRQ_INFO_ERR_UNKNOWN_ID = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT, - RX_IRQ_INFO_ERR_FRAME_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT, - RX_IRQ_INFO_ERR_FRAME_DATA = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT, - RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT, - RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT, - RX_IRQ_INFO_ERR_LINE_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT, -} rx_irq_info_t; - -typedef struct rx_cfg_s rx_cfg_t; - -/* - * Applied per port - */ -struct rx_cfg_s { - rx_mode_t mode; /* The HW config */ - enum mipi_port_id port; /* The port ID to apply the control on */ - unsigned int timeout; - unsigned int initcount; - unsigned int synccount; - unsigned int rxcount; - mipi_predictor_t comp; /* Just for backward compatibility */ - bool is_two_ppc; -}; - -/* NOTE: The base has already an offset of 0x0100 */ -static const hrt_address MIPI_PORT_OFFSET[N_MIPI_PORT_ID] = { - 0x00000000UL, - 0x00000100UL, - 0x00000200UL}; - -static const mipi_lane_cfg_t MIPI_PORT_MAXLANES[N_MIPI_PORT_ID] = { - MIPI_4LANE_CFG, - MIPI_1LANE_CFG, - MIPI_2LANE_CFG}; - -static const bool MIPI_PORT_ACTIVE[N_RX_MODE][N_MIPI_PORT_ID] = { - {true, true, false}, - {true, true, false}, - {true, true, false}, - {true, true, false}, - {true, true, true}, - {true, true, true}, - {true, true, true}, - {true, true, true}}; - -static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = { - {MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG}, - {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG}, - {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG}, - {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG}, - {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_2LANE_CFG}, - {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}, - {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}, - {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}}; - -static const hrt_address SUB_SYSTEM_OFFSET[N_SUB_SYSTEM_ID] = { - 0x00001000UL, - 0x00002000UL, - 0x00003000UL, - 0x00004000UL, - 0x00005000UL, - 0x00009000UL, - 0x0000A000UL, - 0x0000B000UL, - 0x0000C000UL}; - -struct capture_unit_state_s { - int Packet_Length; - int Received_Length; - int Received_Short_Packets; - int Received_Long_Packets; - int Last_Command; - int Next_Command; - int Last_Acknowledge; - int Next_Acknowledge; - int FSM_State_Info; - int StartMode; - int Start_Addr; - int Mem_Region_Size; - int Num_Mem_Regions; -/* int Init; write-only registers - int Start; - int Stop; */ -}; - -struct acquisition_unit_state_s { -/* int Init; write-only register */ - int Received_Short_Packets; - int Received_Long_Packets; - int Last_Command; - int Next_Command; - int Last_Acknowledge; - int Next_Acknowledge; - int FSM_State_Info; - int Int_Cntr_Info; - int Start_Addr; - int Mem_Region_Size; - int Num_Mem_Regions; -}; - -struct ctrl_unit_state_s { - int last_cmd; - int next_cmd; - int last_ack; - int next_ack; - int top_fsm_state; - int captA_fsm_state; - int captB_fsm_state; - int captC_fsm_state; - int acq_fsm_state; - int captA_start_addr; - int captB_start_addr; - int captC_start_addr; - int captA_mem_region_size; - int captB_mem_region_size; - int captC_mem_region_size; - int captA_num_mem_regions; - int captB_num_mem_regions; - int captC_num_mem_regions; - int acq_start_addr; - int acq_mem_region_size; - int acq_num_mem_regions; -/* int ctrl_init; write only register */ - int capt_reserve_one_mem_region; -}; - -struct input_system_state_s { - int str_multicastA_sel; - int str_multicastB_sel; - int str_multicastC_sel; - int str_mux_sel; - int str_mon_status; - int str_mon_irq_cond; - int str_mon_irq_en; - int isys_srst; - int isys_slv_reg_srst; - int str_deint_portA_cnt; - int str_deint_portB_cnt; - struct capture_unit_state_s capture_unit[N_CAPTURE_UNIT_ID]; - struct acquisition_unit_state_s acquisition_unit[N_ACQUISITION_UNIT_ID]; - struct ctrl_unit_state_s ctrl_unit_state[N_CTRL_UNIT_ID]; -}; - -struct mipi_port_state_s { - int device_ready; - int irq_status; - int irq_enable; - uint32_t timeout_count; - uint16_t init_count; - uint16_t raw16_18; - uint32_t sync_count; /*4 x uint8_t */ - uint32_t rx_count; /*4 x uint8_t */ - uint8_t lane_sync_count[MIPI_4LANE_CFG]; - uint8_t lane_rx_count[MIPI_4LANE_CFG]; -}; - -struct rx_channel_state_s { - uint32_t comp_scheme0; - uint32_t comp_scheme1; - mipi_predictor_t pred[N_MIPI_FORMAT_CUSTOM]; - mipi_compressor_t comp[N_MIPI_FORMAT_CUSTOM]; -}; - -struct receiver_state_s { - uint8_t fs_to_ls_delay; - uint8_t ls_to_data_delay; - uint8_t data_to_le_delay; - uint8_t le_to_fe_delay; - uint8_t fe_to_fs_delay; - uint8_t le_to_fs_delay; - bool is_two_ppc; - int backend_rst; - uint16_t raw18; - bool force_raw8; - uint16_t raw16; - struct mipi_port_state_s mipi_port_state[N_MIPI_PORT_ID]; - struct rx_channel_state_s rx_channel_state[N_RX_CHANNEL_ID]; - int be_gsp_acc_ovl; - int be_srst; - int be_is_two_ppc; - int be_comp_format0; - int be_comp_format1; - int be_comp_format2; - int be_comp_format3; - int be_sel; - int be_raw16_config; - int be_raw18_config; - int be_force_raw8; - int be_irq_status; - int be_irq_clear; -}; - -#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h deleted file mode 100644 index 48876bb08b70..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ -#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ - -#include "input_system_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store( - const input_system_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load( - const input_system_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data)); -} - -STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store( - const rx_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_RX_ID); - assert(RX_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(RX_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load( - const rx_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_RX_ID); - assert(RX_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data)); -} - -STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_RX_ID); - assert(port_ID < N_MIPI_PORT_ID); - assert(RX_BASE[ID] != (hrt_address)-1); - assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1); - ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const hrt_address reg) -{ - assert(ID < N_RX_ID); - assert(port_ID < N_MIPI_PORT_ID); - assert(RX_BASE[ID] != (hrt_address)-1); - assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1); - return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data)); -} - -STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store( - const input_system_ID_t ID, - const sub_system_ID_t sub_ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(sub_ID < N_SUB_SYSTEM_ID); - assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1); - assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1); - ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load( - const input_system_ID_t ID, - const sub_system_ID_t sub_ID, - const hrt_address reg) -{ - assert(ID < N_INPUT_SYSTEM_ID); - assert(sub_ID < N_SUB_SYSTEM_ID); - assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1); - assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1); - return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data)); -} - -#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c deleted file mode 100644 index 51daf76c2aea..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c +++ /dev/null @@ -1,448 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "assert_support.h" -#include "irq.h" - -#ifndef __INLINE_GP_DEVICE__ -#define __INLINE_GP_DEVICE__ -#endif -#include "gp_device.h" /* _REG_GP_IRQ_REQUEST_ADDR */ - -#include "platform_support.h" /* hrt_sleep() */ - -static inline void irq_wait_for_write_complete( - const irq_ID_t ID); - -static inline bool any_irq_channel_enabled( - const irq_ID_t ID); - -static inline irq_ID_t virq_get_irq_id( - const virq_id_t irq_ID, - unsigned int *channel_ID); - -#ifndef __INLINE_IRQ__ -#include "irq_private.h" -#endif /* __INLINE_IRQ__ */ - -static unsigned short IRQ_N_CHANNEL[N_IRQ_ID] = { - IRQ0_ID_N_CHANNEL, - IRQ1_ID_N_CHANNEL, - IRQ2_ID_N_CHANNEL, - IRQ3_ID_N_CHANNEL}; - -static unsigned short IRQ_N_ID_OFFSET[N_IRQ_ID + 1] = { - IRQ0_ID_OFFSET, - IRQ1_ID_OFFSET, - IRQ2_ID_OFFSET, - IRQ3_ID_OFFSET, - IRQ_END_OFFSET}; - -static virq_id_t IRQ_NESTING_ID[N_IRQ_ID] = { - N_virq_id, - virq_ifmt, - virq_isys, - virq_isel}; - -void irq_clear_all( - const irq_ID_t ID) -{ - hrt_data mask = 0xFFFFFFFF; - - assert(ID < N_IRQ_ID); - assert(IRQ_N_CHANNEL[ID] <= HRT_DATA_WIDTH); - - if (IRQ_N_CHANNEL[ID] < HRT_DATA_WIDTH) { - mask = ~((~(hrt_data)0)>>IRQ_N_CHANNEL[ID]); - } - - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask); - return; -} - -/* - * Do we want the user to be able to set the signalling method ? - */ -void irq_enable_channel( - const irq_ID_t ID, - const unsigned int irq_id) -{ - unsigned int mask = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_MASK_REG_IDX); - unsigned int enable = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); - unsigned int edge_in = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_EDGE_REG_IDX); - unsigned int me = 1U << irq_id; - - assert(ID < N_IRQ_ID); - assert(irq_id < IRQ_N_CHANNEL[ID]); - - mask |= me; - enable |= me; - edge_in |= me; /* rising edge */ - -/* to avoid mishaps configuration must follow the following order */ - -/* mask this interrupt */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask & ~me); -/* rising edge at input */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_EDGE_REG_IDX, edge_in); -/* enable interrupt to output */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable); -/* clear current irq only */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me); -/* unmask interrupt from input */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask); - - irq_wait_for_write_complete(ID); - - return; -} - -void irq_enable_pulse( - const irq_ID_t ID, - bool pulse) -{ - unsigned int edge_out = 0x0; - - if (pulse) { - edge_out = 0xffffffff; - } - /* output is given as edge, not pulse */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out); - return; -} - -void irq_disable_channel( - const irq_ID_t ID, - const unsigned int irq_id) -{ - unsigned int mask = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_MASK_REG_IDX); - unsigned int enable = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); - unsigned int me = 1U << irq_id; - - assert(ID < N_IRQ_ID); - assert(irq_id < IRQ_N_CHANNEL[ID]); - - mask &= ~me; - enable &= ~me; - -/* enable interrupt to output */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable); -/* unmask interrupt from input */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask); -/* clear current irq only */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me); - - irq_wait_for_write_complete(ID); - - return; -} - -enum hrt_isp_css_irq_status irq_get_channel_id( - const irq_ID_t ID, - unsigned int *irq_id) -{ - unsigned int irq_status = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); - unsigned int idx; - enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success; - - assert(ID < N_IRQ_ID); - assert(irq_id != NULL); - -/* find the first irq bit */ - for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) { - if (irq_status & (1U << idx)) - break; - } - if (idx == IRQ_N_CHANNEL[ID]) - return hrt_isp_css_irq_status_error; - -/* now check whether there are more bits set */ - if (irq_status != (1U << idx)) - status = hrt_isp_css_irq_status_more_irqs; - - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx); - - irq_wait_for_write_complete(ID); - - if (irq_id != NULL) - *irq_id = (unsigned int)idx; - - return status; -} - -static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = { - _REG_GP_IRQ_REQUEST0_ADDR, - _REG_GP_IRQ_REQUEST1_ADDR}; - -void irq_raise( - const irq_ID_t ID, - const irq_sw_channel_id_t irq_id) -{ - hrt_address addr; - - OP___assert(ID == IRQ0_ID); - OP___assert(IRQ_BASE[ID] != (hrt_address)-1); - OP___assert(irq_id < N_IRQ_SW_CHANNEL_ID); - - (void)ID; - - addr = IRQ_REQUEST_ADDR[irq_id]; -/* The SW IRQ pins are remapped to offset zero */ - gp_device_reg_store(GP_DEVICE0_ID, - (unsigned int)addr, 1); - gp_device_reg_store(GP_DEVICE0_ID, - (unsigned int)addr, 0); - return; -} - -void irq_controller_get_state( - const irq_ID_t ID, - irq_controller_state_t *state) -{ - assert(ID < N_IRQ_ID); - assert(state != NULL); - - state->irq_edge = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_EDGE_REG_IDX); - state->irq_mask = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_MASK_REG_IDX); - state->irq_status = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); - state->irq_enable = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); - state->irq_level_not_pulse = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX); - return; -} - -bool any_virq_signal(void) -{ - unsigned int irq_status = irq_reg_load(IRQ0_ID, - _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); - - return (irq_status != 0); -} - -void cnd_virq_enable_channel( - const virq_id_t irq_ID, - const bool en) -{ - irq_ID_t i; - unsigned int channel_ID; - irq_ID_t ID = virq_get_irq_id(irq_ID, &channel_ID); - - assert(ID < N_IRQ_ID); - - for (i=IRQ1_ID;iirq_status_reg[ID] |= irq_data; - - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, irq_data); - - irq_wait_for_write_complete(ID); - } - } - - return irq_status; -} - -void virq_clear_info( - virq_info_t *irq_info) -{ - irq_ID_t ID; - - assert(irq_info != NULL); - - for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) { - irq_info->irq_status_reg[ID] = 0; - } - return; -} - -enum hrt_isp_css_irq_status virq_get_channel_id( - virq_id_t *irq_id) -{ - unsigned int irq_status = irq_reg_load(IRQ0_ID, - _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); - unsigned int idx; - enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success; - irq_ID_t ID; - - assert(irq_id != NULL); - -/* find the first irq bit on device 0 */ - for (idx = 0; idx < IRQ_N_CHANNEL[IRQ0_ID]; idx++) { - if (irq_status & (1U << idx)) - break; - } - - if (idx == IRQ_N_CHANNEL[IRQ0_ID]) { - return hrt_isp_css_irq_status_error; - } - -/* Check whether there are more bits set on device 0 */ - if (irq_status != (1U << idx)) { - status = hrt_isp_css_irq_status_more_irqs; - } - -/* Check whether we have an IRQ on one of the nested devices */ - for (ID = N_IRQ_ID-1 ; ID > (irq_ID_t)0; ID--) { - if (IRQ_NESTING_ID[ID] == (virq_id_t)idx) { - break; - } - } - -/* If we have a nested IRQ, load that state, discard the device 0 state */ - if (ID != IRQ0_ID) { - irq_status = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); -/* find the first irq bit on device "id" */ - for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) { - if (irq_status & (1U << idx)) - break; - } - - if (idx == IRQ_N_CHANNEL[ID]) { - return hrt_isp_css_irq_status_error; - } - -/* Alternatively check whether there are more bits set on this device */ - if (irq_status != (1U << idx)) { - status = hrt_isp_css_irq_status_more_irqs; - } else { -/* If this device is empty, clear the state on device 0 */ - irq_reg_store(IRQ0_ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << IRQ_NESTING_ID[ID]); - } - } /* if (ID != IRQ0_ID) */ - -/* Here we proceed to clear the IRQ on detected device, if no nested IRQ, this is device 0 */ - irq_reg_store(ID, - _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx); - - irq_wait_for_write_complete(ID); - - idx += IRQ_N_ID_OFFSET[ID]; - if (irq_id != NULL) - *irq_id = (virq_id_t)idx; - - return status; -} - -static inline void irq_wait_for_write_complete( - const irq_ID_t ID) -{ - assert(ID < N_IRQ_ID); - assert(IRQ_BASE[ID] != (hrt_address)-1); - (void)ia_css_device_load_uint32(IRQ_BASE[ID] + - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX*sizeof(hrt_data)); -} - -static inline bool any_irq_channel_enabled( - const irq_ID_t ID) -{ - hrt_data en_reg; - - assert(ID < N_IRQ_ID); - - en_reg = irq_reg_load(ID, - _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); - - return (en_reg != 0); -} - -static inline irq_ID_t virq_get_irq_id( - const virq_id_t irq_ID, - unsigned int *channel_ID) -{ - irq_ID_t ID; - - assert(channel_ID != NULL); - - for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) { - if (irq_ID < IRQ_N_ID_OFFSET[ID + 1]) { - break; - } - } - - *channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID]; - - return ID; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h deleted file mode 100644 index f522dfd1a9f1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IRQ_LOCAL_H_INCLUDED__ -#define __IRQ_LOCAL_H_INCLUDED__ - -#include "irq_global.h" - -#include - -/* IRQ0_ID */ -#include "hive_isp_css_defs.h" -#define HIVE_GP_DEV_IRQ_NUM_IRQS 32 -/* IRQ1_ID */ -#include "input_formatter_subsystem_defs.h" -#define HIVE_IFMT_IRQ_NUM_IRQS 5 -/* IRQ2_ID */ -#include "input_system_defs.h" -/* IRQ3_ID */ -#include "input_selector_defs.h" - - -#define IRQ_ID_OFFSET 32 -#define IRQ0_ID_OFFSET 0 -#define IRQ1_ID_OFFSET IRQ_ID_OFFSET -#define IRQ2_ID_OFFSET (2*IRQ_ID_OFFSET) -#define IRQ3_ID_OFFSET (3*IRQ_ID_OFFSET) -#define IRQ_END_OFFSET (4*IRQ_ID_OFFSET) - -#define IRQ0_ID_N_CHANNEL HIVE_GP_DEV_IRQ_NUM_IRQS -#define IRQ1_ID_N_CHANNEL HIVE_IFMT_IRQ_NUM_IRQS -#define IRQ2_ID_N_CHANNEL HIVE_ISYS_IRQ_NUM_BITS -#define IRQ3_ID_N_CHANNEL HIVE_ISEL_IRQ_NUM_IRQS - -typedef struct virq_info_s virq_info_t; -typedef struct irq_controller_state_s irq_controller_state_t; - - -typedef enum { - virq_gpio_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID, - virq_gpio_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID, - virq_gpio_pin_2 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID, - virq_gpio_pin_3 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID, - virq_gpio_pin_4 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID, - virq_gpio_pin_5 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID, - virq_gpio_pin_6 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID, - virq_gpio_pin_7 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID, - virq_gpio_pin_8 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID, - virq_gpio_pin_9 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID, - virq_gpio_pin_10 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID, - virq_gpio_pin_11 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID, - virq_sp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_BIT_ID, - virq_isp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BIT_ID, - virq_isys = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISYS_BIT_ID, - virq_isel = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISEL_BIT_ID, - virq_ifmt = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_IFMT_BIT_ID, - virq_sp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID, - virq_isp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID, - virq_mod_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID, -#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) - virq_isp_pmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID, -#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM) - virq_isys_2401 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_IS2401_BIT_ID, -#else -#error "irq_local.h: 2400_SYSTEM must be one of {2400, 2401 }" -#endif - virq_isp_bamem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID, - virq_isp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID, - virq_sp_icache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID, - virq_sp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID, - virq_mmu_cache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID, - virq_gp_timer_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID, - virq_gp_timer_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID, - virq_sw_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID, - virq_sw_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID, - virq_dma = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_DMA_BIT_ID, - virq_sp_stream_mon_b = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID, - - virq_ifmt0_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID, - virq_ifmt1_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID, - virq_ifmt2_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_SEC_BIT_ID, - virq_ifmt3_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_MEM_CPY_BIT_ID, - virq_ifmt_sideband_changed = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID, - - virq_isys_sof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOF_BIT_ID, - virq_isys_eof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOF_BIT_ID, - virq_isys_sol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOL_BIT_ID, - virq_isys_eol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOL_BIT_ID, - virq_isys_csi = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID, - virq_isys_csi_be = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID, - virq_isys_capt0_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP, - virq_isys_capt0_id_late_sop= IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP, - virq_isys_capt1_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP, - virq_isys_capt1_id_late_sop= IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP, - virq_isys_capt2_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP, - virq_isys_capt2_id_late_sop= IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP, - virq_isys_acq_sop_mismatch = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH, - virq_isys_ctrl_capt0 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPA, - virq_isys_ctrl_capt1 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPB, - virq_isys_ctrl_capt2 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPC, - virq_isys_cio_to_ahb = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CIO2AHB, - virq_isys_dma = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_DMA_BIT_ID, - virq_isys_fifo_monitor = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_STREAM_MON_BIT_ID, - - virq_isel_sof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID, - virq_isel_eof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID, - virq_isel_sol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID, - virq_isel_eol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID, - - N_virq_id = IRQ_END_OFFSET -} virq_id_t; - -struct virq_info_s { - hrt_data irq_status_reg[N_IRQ_ID]; -}; - -struct irq_controller_state_s { - unsigned int irq_edge; - unsigned int irq_mask; - unsigned int irq_status; - unsigned int irq_enable; - unsigned int irq_level_not_pulse; -}; - -#endif /* __IRQ_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h deleted file mode 100644 index 23a13ac696c2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IRQ_PRIVATE_H_INCLUDED__ -#define __IRQ_PRIVATE_H_INCLUDED__ - -#include "irq_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_IRQ_C void irq_reg_store( - const irq_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ - assert(ID < N_IRQ_ID); - assert(IRQ_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_IRQ_C hrt_data irq_reg_load( - const irq_ID_t ID, - const unsigned int reg) -{ - assert(ID < N_IRQ_ID); - assert(IRQ_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data)); -} - -#endif /* __IRQ_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c deleted file mode 100644 index 531c932a48f5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include "isp.h" - -#ifndef __INLINE_ISP__ -#include "isp_private.h" -#endif /* __INLINE_ISP__ */ - -#include "assert_support.h" -#include "platform_support.h" /* hrt_sleep() */ - -void cnd_isp_irq_enable( - const isp_ID_t ID, - const bool cnd) -{ - if (cnd) { - isp_ctrl_setbit(ID, ISP_IRQ_READY_REG, ISP_IRQ_READY_BIT); -/* Enabling the IRQ immediately triggers an interrupt, clear it */ - isp_ctrl_setbit(ID, ISP_IRQ_CLEAR_REG, ISP_IRQ_CLEAR_BIT); - } else { - isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG, - ISP_IRQ_READY_BIT); - } - return; -} - -void isp_get_state( - const isp_ID_t ID, - isp_state_t *state, - isp_stall_t *stall) -{ - hrt_data sc = isp_ctrl_load(ID, ISP_SC_REG); - - assert(state != NULL); - assert(stall != NULL); - -#if defined(_hrt_sysmem_ident_address) - /* Patch to avoid compiler unused symbol warning in C_RUN build */ - (void)__hrt_sysmem_ident_address; - (void)_hrt_sysmem_map_var; -#endif - - state->pc = isp_ctrl_load(ID, ISP_PC_REG); - state->status_register = sc; - state->is_broken = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_BROKEN_BIT); - state->is_idle = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT); - state->is_sleeping = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT); - state->is_stalling = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_STALLING_BIT); - stall->stat_ctrl = - !isp_ctrl_getbit(ID, ISP_CTRL_SINK_REG, ISP_CTRL_SINK_BIT); - stall->pmem = - !isp_ctrl_getbit(ID, ISP_PMEM_SINK_REG, ISP_PMEM_SINK_BIT); - stall->dmem = - !isp_ctrl_getbit(ID, ISP_DMEM_SINK_REG, ISP_DMEM_SINK_BIT); - stall->vmem = - !isp_ctrl_getbit(ID, ISP_VMEM_SINK_REG, ISP_VMEM_SINK_BIT); - stall->fifo0 = - !isp_ctrl_getbit(ID, ISP_FIFO0_SINK_REG, ISP_FIFO0_SINK_BIT); - stall->fifo1 = - !isp_ctrl_getbit(ID, ISP_FIFO1_SINK_REG, ISP_FIFO1_SINK_BIT); - stall->fifo2 = - !isp_ctrl_getbit(ID, ISP_FIFO2_SINK_REG, ISP_FIFO2_SINK_BIT); - stall->fifo3 = - !isp_ctrl_getbit(ID, ISP_FIFO3_SINK_REG, ISP_FIFO3_SINK_BIT); - stall->fifo4 = - !isp_ctrl_getbit(ID, ISP_FIFO4_SINK_REG, ISP_FIFO4_SINK_BIT); - stall->fifo5 = - !isp_ctrl_getbit(ID, ISP_FIFO5_SINK_REG, ISP_FIFO5_SINK_BIT); - stall->fifo6 = - !isp_ctrl_getbit(ID, ISP_FIFO6_SINK_REG, ISP_FIFO6_SINK_BIT); - stall->vamem1 = - !isp_ctrl_getbit(ID, ISP_VAMEM1_SINK_REG, ISP_VAMEM1_SINK_BIT); - stall->vamem2 = - !isp_ctrl_getbit(ID, ISP_VAMEM2_SINK_REG, ISP_VAMEM2_SINK_BIT); - stall->vamem3 = - !isp_ctrl_getbit(ID, ISP_VAMEM3_SINK_REG, ISP_VAMEM3_SINK_BIT); - stall->hmem = - !isp_ctrl_getbit(ID, ISP_HMEM_SINK_REG, ISP_HMEM_SINK_BIT); -/* - stall->icache_master = - !isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG, - ISP_ICACHE_MT_SINK_BIT); - */ - return; -} - -/* ISP functions to control the ISP state from the host, even in crun. */ - -/* Inspect readiness of an ISP indexed by ID */ -unsigned isp_is_ready(isp_ID_t ID) -{ - assert (ID < N_ISP_ID); - return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT); -} - -/* Inspect sleeping of an ISP indexed by ID */ -unsigned isp_is_sleeping(isp_ID_t ID) -{ - assert (ID < N_ISP_ID); - return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT); -} - -/* To be called by the host immediately before starting ISP ID. */ -void isp_start(isp_ID_t ID) -{ - assert (ID < N_ISP_ID); -} - -/* Wake up ISP ID. */ -void isp_wake(isp_ID_t ID) -{ - assert (ID < N_ISP_ID); - isp_ctrl_setbit(ID, ISP_SC_REG, ISP_START_BIT); - hrt_sleep(); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h deleted file mode 100644 index 5dcc52dff3dd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISP_LOCAL_H_INCLUDED__ -#define __ISP_LOCAL_H_INCLUDED__ - -#include - -#include "isp_global.h" - -#include - -#define HIVE_ISP_VMEM_MASK ((1U< -#endif - -#include "isp_public.h" - -#include "device_access.h" - -#include "assert_support.h" -#include "type_support.h" - -STORAGE_CLASS_ISP_C void isp_ctrl_store( - const isp_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ - assert(ID < N_ISP_ID); - assert(ISP_CTRL_BASE[ID] != (hrt_address)-1); -#if !defined(HRT_MEMORY_ACCESS) - ia_css_device_store_uint32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -#else - hrt_master_port_store_32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -#endif - return; -} - -STORAGE_CLASS_ISP_C hrt_data isp_ctrl_load( - const isp_ID_t ID, - const unsigned int reg) -{ - assert(ID < N_ISP_ID); - assert(ISP_CTRL_BASE[ID] != (hrt_address)-1); -#if !defined(HRT_MEMORY_ACCESS) - return ia_css_device_load_uint32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -#else - return hrt_master_port_uload_32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -#endif -} - -STORAGE_CLASS_ISP_C bool isp_ctrl_getbit( - const isp_ID_t ID, - const unsigned int reg, - const unsigned int bit) -{ - hrt_data val = isp_ctrl_load(ID, reg); - return (val & (1UL << bit)) != 0; -} - -STORAGE_CLASS_ISP_C void isp_ctrl_setbit( - const isp_ID_t ID, - const unsigned int reg, - const unsigned int bit) -{ - hrt_data data = isp_ctrl_load(ID, reg); - isp_ctrl_store(ID, reg, (data | (1UL << bit))); - return; -} - -STORAGE_CLASS_ISP_C void isp_ctrl_clearbit( - const isp_ID_t ID, - const unsigned int reg, - const unsigned int bit) -{ - hrt_data data = isp_ctrl_load(ID, reg); - isp_ctrl_store(ID, reg, (data & ~(1UL << bit))); - return; -} - -STORAGE_CLASS_ISP_C void isp_dmem_store( - const isp_ID_t ID, - unsigned int addr, - const void *data, - const size_t size) -{ - assert(ID < N_ISP_ID); - assert(ISP_DMEM_BASE[ID] != (hrt_address)-1); -#if !defined(HRT_MEMORY_ACCESS) - ia_css_device_store(ISP_DMEM_BASE[ID] + addr, data, size); -#else - hrt_master_port_store(ISP_DMEM_BASE[ID] + addr, data, size); -#endif - return; -} - -STORAGE_CLASS_ISP_C void isp_dmem_load( - const isp_ID_t ID, - const unsigned int addr, - void *data, - const size_t size) -{ - assert(ID < N_ISP_ID); - assert(ISP_DMEM_BASE[ID] != (hrt_address)-1); -#if !defined(HRT_MEMORY_ACCESS) - ia_css_device_load(ISP_DMEM_BASE[ID] + addr, data, size); -#else - hrt_master_port_load(ISP_DMEM_BASE[ID] + addr, data, size); -#endif - return; -} - -STORAGE_CLASS_ISP_C void isp_dmem_store_uint32( - const isp_ID_t ID, - unsigned int addr, - const uint32_t data) -{ - assert(ID < N_ISP_ID); - assert(ISP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; -#if !defined(HRT_MEMORY_ACCESS) - ia_css_device_store_uint32(ISP_DMEM_BASE[ID] + addr, data); -#else - hrt_master_port_store_32(ISP_DMEM_BASE[ID] + addr, data); -#endif - return; -} - -STORAGE_CLASS_ISP_C uint32_t isp_dmem_load_uint32( - const isp_ID_t ID, - const unsigned int addr) -{ - assert(ID < N_ISP_ID); - assert(ISP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; -#if !defined(HRT_MEMORY_ACCESS) - return ia_css_device_load_uint32(ISP_DMEM_BASE[ID] + addr); -#else - return hrt_master_port_uload_32(ISP_DMEM_BASE[ID] + addr); -#endif -} - -STORAGE_CLASS_ISP_C uint32_t isp_2w_cat_1w( - const uint16_t x0, - const uint16_t x1) -{ - uint32_t out = ((uint32_t)(x1 & HIVE_ISP_VMEM_MASK) << ISP_VMEM_ELEMBITS) - | (x0 & HIVE_ISP_VMEM_MASK); - return out; -} - -#endif /* __ISP_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c deleted file mode 100644 index 1a1719d3e745..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* The name "mmu.h is already taken" */ -#include "mmu_device.h" - -void mmu_set_page_table_base_index( - const mmu_ID_t ID, - const hrt_data base_index) -{ - mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index); - return; -} - -hrt_data mmu_get_page_table_base_index( - const mmu_ID_t ID) -{ - return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX); -} - -void mmu_invalidate_cache( - const mmu_ID_t ID) -{ - mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1); - return; -} - -void mmu_invalidate_cache_all(void) -{ - mmu_ID_t mmu_id; - for (mmu_id = (mmu_ID_t)0;mmu_id < N_MMU_ID; mmu_id++) { - mmu_invalidate_cache(mmu_id); - } -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h deleted file mode 100644 index 7c3ad157189f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __MMU_LOCAL_H_INCLUDED__ -#define __MMU_LOCAL_H_INCLUDED__ - -#include "mmu_global.h" - -#endif /* __MMU_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c deleted file mode 100644 index db694d3a6fbb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "sp.h" - -#ifndef __INLINE_SP__ -#include "sp_private.h" -#endif /* __INLINE_SP__ */ - -#include "assert_support.h" - -void cnd_sp_irq_enable( - const sp_ID_t ID, - const bool cnd) -{ - if (cnd) { - sp_ctrl_setbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT); -/* Enabling the IRQ immediately triggers an interrupt, clear it */ - sp_ctrl_setbit(ID, SP_IRQ_CLEAR_REG, SP_IRQ_CLEAR_BIT); - } else { - sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT); - } -} - -void sp_get_state( - const sp_ID_t ID, - sp_state_t *state, - sp_stall_t *stall) -{ - hrt_data sc = sp_ctrl_load(ID, SP_SC_REG); - - assert(state != NULL); - assert(stall != NULL); - - state->pc = sp_ctrl_load(ID, SP_PC_REG); - state->status_register = sc; - state->is_broken = (sc & (1U << SP_BROKEN_BIT)) != 0; - state->is_idle = (sc & (1U << SP_IDLE_BIT)) != 0; - state->is_sleeping = (sc & (1U << SP_SLEEPING_BIT)) != 0; - state->is_stalling = (sc & (1U << SP_STALLING_BIT)) != 0; - stall->fifo0 = - !sp_ctrl_getbit(ID, SP_FIFO0_SINK_REG, SP_FIFO0_SINK_BIT); - stall->fifo1 = - !sp_ctrl_getbit(ID, SP_FIFO1_SINK_REG, SP_FIFO1_SINK_BIT); - stall->fifo2 = - !sp_ctrl_getbit(ID, SP_FIFO2_SINK_REG, SP_FIFO2_SINK_BIT); - stall->fifo3 = - !sp_ctrl_getbit(ID, SP_FIFO3_SINK_REG, SP_FIFO3_SINK_BIT); - stall->fifo4 = - !sp_ctrl_getbit(ID, SP_FIFO4_SINK_REG, SP_FIFO4_SINK_BIT); - stall->fifo5 = - !sp_ctrl_getbit(ID, SP_FIFO5_SINK_REG, SP_FIFO5_SINK_BIT); - stall->fifo6 = - !sp_ctrl_getbit(ID, SP_FIFO6_SINK_REG, SP_FIFO6_SINK_BIT); - stall->fifo7 = - !sp_ctrl_getbit(ID, SP_FIFO7_SINK_REG, SP_FIFO7_SINK_BIT); - stall->fifo8 = - !sp_ctrl_getbit(ID, SP_FIFO8_SINK_REG, SP_FIFO8_SINK_BIT); - stall->fifo9 = - !sp_ctrl_getbit(ID, SP_FIFO9_SINK_REG, SP_FIFO9_SINK_BIT); - stall->fifoa = - !sp_ctrl_getbit(ID, SP_FIFOA_SINK_REG, SP_FIFOA_SINK_BIT); - stall->dmem = - !sp_ctrl_getbit(ID, SP_DMEM_SINK_REG, SP_DMEM_SINK_BIT); - stall->control_master = - !sp_ctrl_getbit(ID, SP_CTRL_MT_SINK_REG, SP_CTRL_MT_SINK_BIT); - stall->icache_master = - !sp_ctrl_getbit(ID, SP_ICACHE_MT_SINK_REG, - SP_ICACHE_MT_SINK_BIT); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h deleted file mode 100644 index 3c70b8fdb532..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SP_LOCAL_H_INCLUDED__ -#define __SP_LOCAL_H_INCLUDED__ - -#include -#include "sp_global.h" - -struct sp_state_s { - int pc; - int status_register; - bool is_broken; - bool is_idle; - bool is_sleeping; - bool is_stalling; -}; - -struct sp_stall_s { - bool fifo0; - bool fifo1; - bool fifo2; - bool fifo3; - bool fifo4; - bool fifo5; - bool fifo6; - bool fifo7; - bool fifo8; - bool fifo9; - bool fifoa; - bool dmem; - bool control_master; - bool icache_master; -}; - -#define sp_address_of(var) (HIVE_ADDR_ ## var) - -/* - * deprecated - */ -#define store_sp_int(var, value) \ - sp_dmem_store_uint32(SP0_ID, (unsigned)sp_address_of(var), \ - (uint32_t)(value)) - -#define store_sp_ptr(var, value) \ - sp_dmem_store_uint32(SP0_ID, (unsigned)sp_address_of(var), \ - (uint32_t)(value)) - -#define load_sp_uint(var) \ - sp_dmem_load_uint32(SP0_ID, (unsigned)sp_address_of(var)) - -#define load_sp_array_uint8(array_name, index) \ - sp_dmem_load_uint8(SP0_ID, (unsigned)sp_address_of(array_name) + \ - (index)*sizeof(uint8_t)) - -#define load_sp_array_uint16(array_name, index) \ - sp_dmem_load_uint16(SP0_ID, (unsigned)sp_address_of(array_name) + \ - (index)*sizeof(uint16_t)) - -#define load_sp_array_uint(array_name, index) \ - sp_dmem_load_uint32(SP0_ID, (unsigned)sp_address_of(array_name) + \ - (index)*sizeof(uint32_t)) - -#define store_sp_var(var, data, bytes) \ - sp_dmem_store(SP0_ID, (unsigned)sp_address_of(var), data, bytes) - -#define store_sp_array_uint8(array_name, index, value) \ - sp_dmem_store_uint8(SP0_ID, (unsigned)sp_address_of(array_name) + \ - (index)*sizeof(uint8_t), value) - -#define store_sp_array_uint16(array_name, index, value) \ - sp_dmem_store_uint16(SP0_ID, (unsigned)sp_address_of(array_name) + \ - (index)*sizeof(uint16_t), value) - -#define store_sp_array_uint(array_name, index, value) \ - sp_dmem_store_uint32(SP0_ID, (unsigned)sp_address_of(array_name) + \ - (index)*sizeof(uint32_t), value) - -#define store_sp_var_with_offset(var, offset, data, bytes) \ - sp_dmem_store(SP0_ID, (unsigned)sp_address_of(var) + \ - offset, data, bytes) - -#define load_sp_var(var, data, bytes) \ - sp_dmem_load(SP0_ID, (unsigned)sp_address_of(var), data, bytes) - -#define load_sp_var_with_offset(var, offset, data, bytes) \ - sp_dmem_load(SP0_ID, (unsigned)sp_address_of(var) + offset, \ - data, bytes) - -#endif /* __SP_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h deleted file mode 100644 index 5ea81c0e82d1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SP_PRIVATE_H_INCLUDED__ -#define __SP_PRIVATE_H_INCLUDED__ - -#include "sp_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_SP_C void sp_ctrl_store( - const sp_ID_t ID, - const hrt_address reg, - const hrt_data value) -{ - assert(ID < N_SP_ID); - assert(SP_CTRL_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); - return; -} - -STORAGE_CLASS_SP_C hrt_data sp_ctrl_load( - const sp_ID_t ID, - const hrt_address reg) -{ - assert(ID < N_SP_ID); - assert(SP_CTRL_BASE[ID] != (hrt_address)-1); - return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data)); -} - -STORAGE_CLASS_SP_C bool sp_ctrl_getbit( - const sp_ID_t ID, - const hrt_address reg, - const unsigned int bit) -{ - hrt_data val = sp_ctrl_load(ID, reg); - return (val & (1UL << bit)) != 0; -} - -STORAGE_CLASS_SP_C void sp_ctrl_setbit( - const sp_ID_t ID, - const hrt_address reg, - const unsigned int bit) -{ - hrt_data data = sp_ctrl_load(ID, reg); - sp_ctrl_store(ID, reg, (data | (1UL << bit))); - return; -} - -STORAGE_CLASS_SP_C void sp_ctrl_clearbit( - const sp_ID_t ID, - const hrt_address reg, - const unsigned int bit) -{ - hrt_data data = sp_ctrl_load(ID, reg); - sp_ctrl_store(ID, reg, (data & ~(1UL << bit))); - return; -} - -STORAGE_CLASS_SP_C void sp_dmem_store( - const sp_ID_t ID, - hrt_address addr, - const void *data, - const size_t size) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size); - return; -} - -STORAGE_CLASS_SP_C void sp_dmem_load( - const sp_ID_t ID, - const hrt_address addr, - void *data, - const size_t size) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size); - return; -} - -STORAGE_CLASS_SP_C void sp_dmem_store_uint8( - const sp_ID_t ID, - hrt_address addr, - const uint8_t data) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; - ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data); - return; -} - -STORAGE_CLASS_SP_C void sp_dmem_store_uint16( - const sp_ID_t ID, - hrt_address addr, - const uint16_t data) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; - ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data); - return; -} - -STORAGE_CLASS_SP_C void sp_dmem_store_uint32( - const sp_ID_t ID, - hrt_address addr, - const uint32_t data) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; - ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data); - return; -} - -STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8( - const sp_ID_t ID, - const hrt_address addr) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; - return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr); -} - -STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16( - const sp_ID_t ID, - const hrt_address addr) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; - return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr); -} - -STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32( - const sp_ID_t ID, - const hrt_address addr) -{ - assert(ID < N_SP_ID); - assert(SP_DMEM_BASE[ID] != (hrt_address)-1); - (void)ID; - return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr); -} - -#endif /* __SP_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h deleted file mode 100644 index 8be1cd020bf4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SYSTEM_LOCAL_H_INCLUDED__ -#define __SYSTEM_LOCAL_H_INCLUDED__ - -#ifdef HRT_ISP_CSS_CUSTOM_HOST -#ifndef HRT_USE_VIR_ADDRS -#define HRT_USE_VIR_ADDRS -#endif -/* This interface is deprecated */ -/*#include "hive_isp_css_custom_host_hrt.h"*/ -#endif - -#include "system_global.h" - -#ifdef __FIST__ -#define HRT_ADDRESS_WIDTH 32 /* Surprise, this is a local property and even differs per platform */ -#else -/* HRT assumes 32 by default (see Linux/include/hrt/hive_types.h), overrule it in case it is different */ -#undef HRT_ADDRESS_WIDTH -#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */ -#endif - -/* This interface is deprecated */ -#include "hrt/hive_types.h" - -/* - * Cell specific address maps - */ -#if HRT_ADDRESS_WIDTH==64 - -#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ - -/* DDR */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - (hrt_address)0x0000000120000000ULL}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - (hrt_address)0x0000000000020000ULL}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - (hrt_address)0x0000000000200000ULL}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - (hrt_address)0x0000000000100000ULL}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - (hrt_address)0x00000000001C0000ULL, - (hrt_address)0x00000000001D0000ULL, - (hrt_address)0x00000000001E0000ULL}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - (hrt_address)0x00000000001F0000ULL}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - (hrt_address)0x0000000000010000ULL}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - (hrt_address)0x0000000000300000ULL}; - -static const hrt_address SP_PMEM_BASE[N_SP_ID] = { - (hrt_address)0x00000000000B0000ULL}; - -/* MMU */ -#if defined (IS_ISP_2400_MAMOIADA_SYSTEM) || defined (IS_ISP_2401_MAMOIADA_SYSTEM) -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - (hrt_address)0x0000000000070000ULL, - (hrt_address)0x00000000000A0000ULL}; -#else -#error "system_local.h: SYSTEM must be one of {2400, 2401 }" -#endif - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - (hrt_address)0x0000000000040000ULL}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - (hrt_address)0x0000000000000500ULL, - (hrt_address)0x0000000000030A00ULL, - (hrt_address)0x000000000008C000ULL, - (hrt_address)0x0000000000090200ULL}; -/* - (hrt_address)0x0000000000000500ULL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - (hrt_address)0x0000000000050000ULL, - (hrt_address)0x0000000000060000ULL}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - (hrt_address)0x0000000000000000ULL}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - (hrt_address)0x0000000000000000ULL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x0000000000090000ULL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x0000000000000000ULL}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x0000000000000600ULL; -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - (hrt_address)0x0000000000000400ULL}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - (hrt_address)0x0000000000000100ULL}; - - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - (hrt_address)0x0000000000030000ULL, - (hrt_address)0x0000000000030200ULL, - (hrt_address)0x0000000000030400ULL, - (hrt_address)0x0000000000030600ULL}; /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - (hrt_address)0x0000000000080000ULL}; -/* (hrt_address)0x0000000000081000ULL, */ /* capture A */ -/* (hrt_address)0x0000000000082000ULL, */ /* capture B */ -/* (hrt_address)0x0000000000083000ULL, */ /* capture C */ -/* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */ -/* (hrt_address)0x0000000000085000ULL, */ /* DMA */ -/* (hrt_address)0x0000000000089000ULL, */ /* ctrl */ -/* (hrt_address)0x000000000008A000ULL, */ /* GP regs */ -/* (hrt_address)0x000000000008B000ULL, */ /* FIFO */ -/* (hrt_address)0x000000000008C000ULL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - (hrt_address)0x0000000000080100ULL}; - -#elif HRT_ADDRESS_WIDTH==32 - -#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */ - -/* DDR : Attention, this value not defined in 32-bit */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - (hrt_address)0x00000000UL}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - (hrt_address)0x00020000UL}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - (hrt_address)0x00200000UL}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - (hrt_address)0x100000UL}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - (hrt_address)0xffffffffUL, - (hrt_address)0xffffffffUL, - (hrt_address)0xffffffffUL}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - (hrt_address)0xffffffffUL}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - (hrt_address)0x00010000UL}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - (hrt_address)0x00300000UL}; - -static const hrt_address SP_PMEM_BASE[N_SP_ID] = { - (hrt_address)0x000B0000UL}; - -/* MMU */ -#if defined (IS_ISP_2400_MAMOIADA_SYSTEM) || defined (IS_ISP_2401_MAMOIADA_SYSTEM) -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - (hrt_address)0x00070000UL, - (hrt_address)0x000A0000UL}; -#else -#error "system_local.h: SYSTEM must be one of {2400, 2401 }" -#endif - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - (hrt_address)0x00040000UL}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - (hrt_address)0x00000500UL, - (hrt_address)0x00030A00UL, - (hrt_address)0x0008C000UL, - (hrt_address)0x00090200UL}; -/* - (hrt_address)0x00000500UL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - (hrt_address)0x00050000UL, - (hrt_address)0x00060000UL}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - (hrt_address)0x00000000UL}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - (hrt_address)0x00000000UL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x00090000UL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x00000000UL}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x00000600UL; - -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - (hrt_address)0x00000400UL}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - (hrt_address)0x00000100UL}; - - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - (hrt_address)0x00030000UL, - (hrt_address)0x00030200UL, - (hrt_address)0x00030400UL}; -/* (hrt_address)0x00030600UL, */ /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - (hrt_address)0x00080000UL}; -/* (hrt_address)0x00081000UL, */ /* capture A */ -/* (hrt_address)0x00082000UL, */ /* capture B */ -/* (hrt_address)0x00083000UL, */ /* capture C */ -/* (hrt_address)0x00084000UL, */ /* Acquisition */ -/* (hrt_address)0x00085000UL, */ /* DMA */ -/* (hrt_address)0x00089000UL, */ /* ctrl */ -/* (hrt_address)0x0008A000UL, */ /* GP regs */ -/* (hrt_address)0x0008B000UL, */ /* FIFO */ -/* (hrt_address)0x0008C000UL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - (hrt_address)0x00080100UL}; - -#else -#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}" -#endif - -#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c deleted file mode 100644 index cd12d74024f7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "timed_ctrl.h" - -#ifndef __INLINE_TIMED_CTRL__ -#include "timed_ctrl_private.h" -#endif /* __INLINE_TIMED_CTRL__ */ - -#include "assert_support.h" - -void timed_ctrl_snd_commnd( - const timed_ctrl_ID_t ID, - hrt_data mask, - hrt_data condition, - hrt_data counter, - hrt_address addr, - hrt_data value) -{ - OP___assert(ID == TIMED_CTRL0_ID); - OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1); - - timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, mask); - timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, condition); - timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, counter); - timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, (hrt_data)addr); - timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, value); -} - -/* pqiao TODO: make sure the following commands get - correct BASE address both for csim and android */ - -void timed_ctrl_snd_sp_commnd( - const timed_ctrl_ID_t ID, - hrt_data mask, - hrt_data condition, - hrt_data counter, - const sp_ID_t SP_ID, - hrt_address offset, - hrt_data value) -{ - OP___assert(SP_ID < N_SP_ID); - OP___assert(SP_DMEM_BASE[SP_ID] != (hrt_address)-1); - - timed_ctrl_snd_commnd(ID, mask, condition, counter, - SP_DMEM_BASE[SP_ID]+offset, value); -} - -void timed_ctrl_snd_gpio_commnd( - const timed_ctrl_ID_t ID, - hrt_data mask, - hrt_data condition, - hrt_data counter, - const gpio_ID_t GPIO_ID, - hrt_address offset, - hrt_data value) -{ - OP___assert(GPIO_ID < N_GPIO_ID); - OP___assert(GPIO_BASE[GPIO_ID] != (hrt_address)-1); - - timed_ctrl_snd_commnd(ID, mask, condition, counter, - GPIO_BASE[GPIO_ID]+offset, value); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h deleted file mode 100644 index e570813af28d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TIMED_CTRL_LOCAL_H_INCLUDED__ -#define __TIMED_CTRL_LOCAL_H_INCLUDED__ - -#include "timed_ctrl_global.h" - -#endif /* __TIMED_CTRL_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h deleted file mode 100644 index fb0fdbb88435..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TIMED_CTRL_PRIVATE_H_INCLUDED__ -#define __TIMED_CTRL_PRIVATE_H_INCLUDED__ - -#include "timed_ctrl_public.h" - -#include "device_access.h" - -#include "assert_support.h" - -STORAGE_CLASS_TIMED_CTRL_C void timed_ctrl_reg_store( - const timed_ctrl_ID_t ID, - const unsigned int reg, - const hrt_data value) -{ -OP___assert(ID < N_TIMED_CTRL_ID); -OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1); - ia_css_device_store_uint32(TIMED_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); -} - -#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h deleted file mode 100644 index c4e99afe0d29..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VAMEM_LOCAL_H_INCLUDED__ -#define __VAMEM_LOCAL_H_INCLUDED__ - -#include "vamem_global.h" - -#endif /* __VAMEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h deleted file mode 100644 index 5e05258673d5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VAMEM_PRIVATE_H_INCLUDED__ -#define __VAMEM_PRIVATE_H_INCLUDED__ - -#include "vamem_public.h" - -#include - -#include "assert_support.h" - - -STORAGE_CLASS_ISP_C void isp_vamem_store( - const vamem_ID_t ID, - vamem_data_t *addr, - const vamem_data_t *data, - const size_t size) /* in vamem_data_t */ -{ - assert(ID < N_VAMEM_ID); - assert(ISP_VAMEM_BASE[ID] != (hrt_address)-1); - hrt_master_port_store(ISP_VAMEM_BASE[ID] + (unsigned)addr, data, size * sizeof(vamem_data_t)); -} - - -#endif /* __VAMEM_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c deleted file mode 100644 index ea22c23fc7a4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010 - 2016, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "isp.h" -#include "vmem.h" -#include "vmem_local.h" - -#if !defined(HRT_MEMORY_ACCESS) -#include "ia_css_device_access.h" -#endif -#include "assert_support.h" -#include "platform_support.h" /* hrt_sleep() */ - -typedef unsigned long long hive_uedge; -typedef hive_uedge *hive_wide; - -/* Copied from SDK: sim_semantics.c */ - -/* subword bits move like this: MSB[____xxxx____]LSB -> MSB[00000000xxxx]LSB */ -#define SUBWORD(w, start, end) (((w) & (((1ULL << ((end)-1))-1) << 1 | 1)) >> (start)) - -/* inverse subword bits move like this: MSB[xxxx____xxxx]LSB -> MSB[xxxx0000xxxx]LSB */ -#define INV_SUBWORD(w, start, end) ((w) & (~(((1ULL << ((end)-1))-1) << 1 | 1) | ((1ULL << (start))-1)) ) - -#define uedge_bits (8*sizeof(hive_uedge)) -#define move_lower_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, 0, src_bit) -#define move_upper_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, src_bit, uedge_bits) -#define move_word(target, target_bit, src) move_subword(target, target_bit, src, 0, uedge_bits) - -static void -move_subword ( - hive_uedge *target, - unsigned target_bit, - hive_uedge src, - unsigned src_start, - unsigned src_end) -{ - unsigned int start_elem = target_bit / uedge_bits; - unsigned int start_bit = target_bit % uedge_bits; - unsigned subword_width = src_end - src_start; - - hive_uedge src_subword = SUBWORD(src, src_start, src_end); - - if (subword_width + start_bit > uedge_bits) { /* overlap */ - hive_uedge old_val1; - hive_uedge old_val0 = INV_SUBWORD(target[start_elem], start_bit, uedge_bits); - target[start_elem] = old_val0 | (src_subword << start_bit); - old_val1 = INV_SUBWORD(target[start_elem+1], 0, subword_width + start_bit - uedge_bits); - target[start_elem+1] = old_val1 | (src_subword >> ( uedge_bits - start_bit)); - } else { - hive_uedge old_val = INV_SUBWORD(target[start_elem], start_bit, start_bit + subword_width); - target[start_elem] = old_val | (src_subword << start_bit); - } -} - -static void -hive_sim_wide_unpack( - hive_wide vector, - hive_wide elem, - hive_uint elem_bits, - hive_uint index) -{ - /* pointers into wide_type: */ - unsigned int start_elem = (elem_bits * index) / uedge_bits; - unsigned int start_bit = (elem_bits * index) % uedge_bits; - unsigned int end_elem = (elem_bits * (index + 1) - 1) / uedge_bits; - unsigned int end_bit = ((elem_bits * (index + 1) - 1) % uedge_bits) + 1; - - if (elem_bits == uedge_bits) { - /* easy case for speedup: */ - elem[0] = vector[index]; - } else if (start_elem == end_elem) { - /* only one (<=64 bits) element needs to be (partly) copied: */ - move_subword(elem, 0, vector[start_elem], start_bit, end_bit); - } else { - /* general case: handles edge spanning cases (includes >64bit elements) */ - unsigned int bits_written = 0; - unsigned int i; - move_upper_bits(elem, bits_written, vector[start_elem], start_bit); - bits_written += (64 - start_bit); - for(i = start_elem+1; i < end_elem; i++) { - move_word(elem, bits_written, vector[i]); - bits_written += uedge_bits; - } - move_lower_bits(elem, bits_written , vector[end_elem], end_bit); - } -} - -static void -hive_sim_wide_pack( - hive_wide vector, - hive_wide elem, - hive_uint elem_bits, - hive_uint index) -{ - /* pointers into wide_type: */ - unsigned int start_elem = (elem_bits * index) / uedge_bits; - - /* easy case for speedup: */ - if (elem_bits == uedge_bits) { - vector[start_elem] = elem[0]; - } else if (elem_bits > uedge_bits) { - unsigned bits_to_write = elem_bits; - unsigned start_bit = elem_bits * index; - unsigned i = 0; - for(; bits_to_write > uedge_bits; bits_to_write -= uedge_bits, i++, start_bit += uedge_bits) { - move_word(vector, start_bit, elem[i]); - } - move_lower_bits(vector, start_bit, elem[i], bits_to_write); - } else { - /* only one element needs to be (partly) copied: */ - move_lower_bits(vector, elem_bits * index, elem[0], elem_bits); - } -} - -static void load_vector ( - const isp_ID_t ID, - t_vmem_elem *to, - const t_vmem_elem *from) -{ - unsigned i; - hive_uedge *data; - unsigned size = sizeof(short)*ISP_NWAY; - VMEM_ARRAY(v, 2*ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */ - assert(ISP_BAMEM_BASE[ID] != (hrt_address)-1); -#if !defined(HRT_MEMORY_ACCESS) - ia_css_device_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size); -#else - hrt_master_port_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size); -#endif - data = (hive_uedge *)v; - for (i = 0; i < ISP_NWAY; i++) { - hive_uedge elem = 0; - hive_sim_wide_unpack(data, &elem, ISP_VEC_ELEMBITS, i); - to[i] = elem; - } - hrt_sleep(); /* Spend at least 1 cycles per vector */ -} - -static void store_vector ( - const isp_ID_t ID, - t_vmem_elem *to, - const t_vmem_elem *from) -{ - unsigned i; - unsigned size = sizeof(short)*ISP_NWAY; - VMEM_ARRAY(v, 2*ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */ - //load_vector (&v[1][0], &to[ISP_NWAY]); /* Fetch the next vector, since it will be overwritten. */ - hive_uedge *data = (hive_uedge *)v; - for (i = 0; i < ISP_NWAY; i++) { - hive_sim_wide_pack(data, (hive_wide)&from[i], ISP_VEC_ELEMBITS, i); - } - assert(ISP_BAMEM_BASE[ID] != (hrt_address)-1); -#if !defined(HRT_MEMORY_ACCESS) - ia_css_device_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size); -#else - //hrt_mem_store (ISP, VMEM, (unsigned)to, &v, siz); /* This will overwrite the next vector as well */ - hrt_master_port_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size); -#endif - hrt_sleep(); /* Spend at least 1 cycles per vector */ -} - -void isp_vmem_load( - const isp_ID_t ID, - const t_vmem_elem *from, - t_vmem_elem *to, - unsigned elems) /* In t_vmem_elem */ -{ - unsigned c; - const t_vmem_elem *vp = from; - assert(ID < N_ISP_ID); - assert((unsigned long)from % ISP_VEC_ALIGN == 0); - assert(elems % ISP_NWAY == 0); - for (c = 0; c < elems; c += ISP_NWAY) { - load_vector(ID, &to[c], vp); - vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN); - } -} - -void isp_vmem_store( - const isp_ID_t ID, - t_vmem_elem *to, - const t_vmem_elem *from, - unsigned elems) /* In t_vmem_elem */ -{ - unsigned c; - t_vmem_elem *vp = to; - assert(ID < N_ISP_ID); - assert((unsigned long)to % ISP_VEC_ALIGN == 0); - assert(elems % ISP_NWAY == 0); - for (c = 0; c < elems; c += ISP_NWAY) { - store_vector (ID, vp, &from[c]); - vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN); - } -} - -void isp_vmem_2d_load ( - const isp_ID_t ID, - const t_vmem_elem *from, - t_vmem_elem *to, - unsigned height, - unsigned width, - unsigned stride_to, /* In t_vmem_elem */ - unsigned stride_from /* In t_vmem_elem */) -{ - unsigned h; - - assert(ID < N_ISP_ID); - assert((unsigned long)from % ISP_VEC_ALIGN == 0); - assert(width % ISP_NWAY == 0); - assert(stride_from % ISP_NWAY == 0); - for (h = 0; h < height; h++) { - unsigned c; - const t_vmem_elem *vp = from; - for (c = 0; c < width; c += ISP_NWAY) { - load_vector(ID, &to[stride_to*h + c], vp); - vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN); - } - from = (const t_vmem_elem *)((const char *)from + stride_from/ISP_NWAY*ISP_VEC_ALIGN); - } -} - -void isp_vmem_2d_store ( - const isp_ID_t ID, - t_vmem_elem *to, - const t_vmem_elem *from, - unsigned height, - unsigned width, - unsigned stride_to, /* In t_vmem_elem */ - unsigned stride_from /* In t_vmem_elem */) -{ - unsigned h; - - assert(ID < N_ISP_ID); - assert((unsigned long)to % ISP_VEC_ALIGN == 0); - assert(width % ISP_NWAY == 0); - assert(stride_to % ISP_NWAY == 0); - for (h = 0; h < height; h++) { - unsigned c; - t_vmem_elem *vp = to; - for (c = 0; c < width; c += ISP_NWAY) { - store_vector (ID, vp, &from[stride_from*h + c]); - vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN); - } - to = (t_vmem_elem *)((char *)to + stride_to/ISP_NWAY*ISP_VEC_ALIGN); - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h deleted file mode 100644 index de85644b885e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VMEM_LOCAL_H_INCLUDED__ -#define __VMEM_LOCAL_H_INCLUDED__ - -#include "type_support.h" -#include "vmem_global.h" - -typedef uint16_t t_vmem_elem; - -#define VMEM_ARRAY(x,s) t_vmem_elem x[s/ISP_NWAY][ISP_NWAY] - -void isp_vmem_load( - const isp_ID_t ID, - const t_vmem_elem *from, - t_vmem_elem *to, - unsigned elems); /* In t_vmem_elem */ - -void isp_vmem_store( - const isp_ID_t ID, - t_vmem_elem *to, - const t_vmem_elem *from, - unsigned elems); /* In t_vmem_elem */ - -void isp_vmem_2d_load ( - const isp_ID_t ID, - const t_vmem_elem *from, - t_vmem_elem *to, - unsigned height, - unsigned width, - unsigned stride_to, /* In t_vmem_elem */ - unsigned stride_from /* In t_vmem_elem */); - -void isp_vmem_2d_store ( - const isp_ID_t ID, - t_vmem_elem *to, - const t_vmem_elem *from, - unsigned height, - unsigned width, - unsigned stride_to, /* In t_vmem_elem */ - unsigned stride_from /* In t_vmem_elem */); - -#endif /* __VMEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h deleted file mode 100644 index f48d1281b5a7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VMEM_PRIVATE_H_INCLUDED__ -#define __VMEM_PRIVATE_H_INCLUDED__ - -#include "vmem_public.h" - -#endif /* __VMEM_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h deleted file mode 100644 index 7558f4964313..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_FORMATTER_GLOBAL_H_INCLUDED__ -#define __INPUT_FORMATTER_GLOBAL_H_INCLUDED__ - -#define IS_INPUT_FORMATTER_VERSION2 -#define IS_INPUT_SWITCH_VERSION2 - -#include -#include -#include "if_defs.h" -#include "str2mem_defs.h" -#include "input_switch_2400_defs.h" - -#define _HIVE_INPUT_SWITCH_GET_FSYNC_REG_LSB(ch_id) ((ch_id) * 3) - -#define HIVE_SWITCH_N_CHANNELS 4 -#define HIVE_SWITCH_N_FORMATTYPES 32 -#define HIVE_SWITCH_N_SWITCH_CODE 4 -#define HIVE_SWITCH_M_CHANNELS 0x00000003 -#define HIVE_SWITCH_M_FORMATTYPES 0x0000001f -#define HIVE_SWITCH_M_SWITCH_CODE 0x00000003 -#define HIVE_SWITCH_M_FSYNC 0x00000007 - -#define HIVE_SWITCH_ENCODE_FSYNC(x) \ - (1U<<(((x)-1)&HIVE_SWITCH_M_CHANNELS)) - -#define _HIVE_INPUT_SWITCH_GET_LUT_FIELD(reg, bit_index) \ - (((reg) >> (bit_index)) & HIVE_SWITCH_M_SWITCH_CODE) -#define _HIVE_INPUT_SWITCH_SET_LUT_FIELD(reg, bit_index, val) \ - (((reg) & ~(HIVE_SWITCH_M_SWITCH_CODE<<(bit_index))) | (((hrt_data)(val)&HIVE_SWITCH_M_SWITCH_CODE)<<(bit_index))) -#define _HIVE_INPUT_SWITCH_GET_FSYNC_FIELD(reg, bit_index) \ - (((reg) >> (bit_index)) & HIVE_SWITCH_M_FSYNC) -#define _HIVE_INPUT_SWITCH_SET_FSYNC_FIELD(reg, bit_index, val) \ - (((reg) & ~(HIVE_SWITCH_M_FSYNC<<(bit_index))) | (((hrt_data)(val)&HIVE_SWITCH_M_FSYNC)<<(bit_index))) - -typedef struct input_formatter_cfg_s input_formatter_cfg_t; - -/* Hardware registers */ -/*#define HIVE_IF_RESET_ADDRESS 0x000*/ /* deprecated */ -#define HIVE_IF_START_LINE_ADDRESS 0x004 -#define HIVE_IF_START_COLUMN_ADDRESS 0x008 -#define HIVE_IF_CROPPED_HEIGHT_ADDRESS 0x00C -#define HIVE_IF_CROPPED_WIDTH_ADDRESS 0x010 -#define HIVE_IF_VERTICAL_DECIMATION_ADDRESS 0x014 -#define HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS 0x018 -#define HIVE_IF_H_DEINTERLEAVING_ADDRESS 0x01C -#define HIVE_IF_LEFTPADDING_WIDTH_ADDRESS 0x020 -#define HIVE_IF_END_OF_LINE_OFFSET_ADDRESS 0x024 -#define HIVE_IF_VMEM_START_ADDRESS_ADDRESS 0x028 -#define HIVE_IF_VMEM_END_ADDRESS_ADDRESS 0x02C -#define HIVE_IF_VMEM_INCREMENT_ADDRESS 0x030 -#define HIVE_IF_YUV_420_FORMAT_ADDRESS 0x034 -#define HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS 0x038 -#define HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS 0x03C -#define HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS 0x040 -#define HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS 0x044 -#define HIVE_IF_V_DEINTERLEAVING_ADDRESS 0x048 -#define HIVE_IF_FSM_CROP_PIXEL_COUNTER 0x110 -#define HIVE_IF_FSM_CROP_LINE_COUNTER 0x10C -#define HIVE_IF_FSM_CROP_STATUS 0x108 - -/* Registers only for simulation */ -#define HIVE_IF_CRUN_MODE_ADDRESS 0x04C -#define HIVE_IF_DUMP_OUTPUT_ADDRESS 0x050 - -/* Follow the DMA syntax, "cmd" last */ -#define IF_PACK(val, cmd) ((val & 0x0fff) | (cmd /*& 0xf000*/)) - -#define HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS (_STR2MEM_SOFT_RESET_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_INPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_OUTPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS (_STR2MEM_BIT_SWAPPING_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_BLOCK_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_PACKET_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS (_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS (_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID * _STR2MEM_REG_ALIGN) -#define HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS (_STR2MEM_EN_STAT_UPDATE_ID * _STR2MEM_REG_ALIGN) - -/* - * This data structure is shared between host and SP - */ -struct input_formatter_cfg_s { - uint32_t start_line; - uint32_t start_column; - uint32_t left_padding; - uint32_t cropped_height; - uint32_t cropped_width; - uint32_t deinterleaving; - uint32_t buf_vecs; - uint32_t buf_start_index; - uint32_t buf_increment; - uint32_t buf_eol_offset; - uint32_t is_yuv420_format; - uint32_t block_no_reqs; -}; - -extern const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID]; -extern const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID]; -extern const uint8_t HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID]; - -#endif /* __INPUT_FORMATTER_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h deleted file mode 100644 index 9ba36525e8d3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ -#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ - -#define IS_INPUT_SYSTEM_VERSION_2 - -#include - -//CSI reveiver has 3 ports. -#define N_CSI_PORTS (3) -//AM: Use previous define for this. - -//MIPI allows upto 4 channels. -#define N_CHANNELS (4) -// 12KB = 256bit x 384 words -#define IB_CAPACITY_IN_WORDS (384) - -typedef enum { - MIPI_0LANE_CFG = 0, - MIPI_1LANE_CFG = 1, - MIPI_2LANE_CFG = 2, - MIPI_3LANE_CFG = 3, - MIPI_4LANE_CFG = 4 -} mipi_lane_cfg_t; - -typedef enum { - INPUT_SYSTEM_SOURCE_SENSOR = 0, - INPUT_SYSTEM_SOURCE_FIFO, - INPUT_SYSTEM_SOURCE_TPG, - INPUT_SYSTEM_SOURCE_PRBS, - INPUT_SYSTEM_SOURCE_MEMORY, - N_INPUT_SYSTEM_SOURCE -} input_system_source_t; - -/* internal routing configuration */ -typedef enum { - INPUT_SYSTEM_DISCARD_ALL = 0, - INPUT_SYSTEM_CSI_BACKEND = 1, - INPUT_SYSTEM_INPUT_BUFFER = 2, - INPUT_SYSTEM_MULTICAST = 3, - N_INPUT_SYSTEM_CONNECTION -} input_system_connection_t; - -typedef enum { - INPUT_SYSTEM_MIPI_PORT0, - INPUT_SYSTEM_MIPI_PORT1, - INPUT_SYSTEM_MIPI_PORT2, - INPUT_SYSTEM_ACQUISITION_UNIT, - N_INPUT_SYSTEM_MULTIPLEX -} input_system_multiplex_t; - -typedef enum { - INPUT_SYSTEM_SINK_MEMORY = 0, - INPUT_SYSTEM_SINK_ISP, - INPUT_SYSTEM_SINK_SP, - N_INPUT_SYSTEM_SINK -} input_system_sink_t; - -typedef enum { - INPUT_SYSTEM_FIFO_CAPTURE = 0, - INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING, - INPUT_SYSTEM_SRAM_BUFFERING, - INPUT_SYSTEM_XMEM_BUFFERING, - INPUT_SYSTEM_XMEM_CAPTURE, - INPUT_SYSTEM_XMEM_ACQUIRE, - N_INPUT_SYSTEM_BUFFERING_MODE -} buffering_mode_t; - -typedef struct input_system_cfg_s input_system_cfg_t; -typedef struct sync_generator_cfg_s sync_generator_cfg_t; -typedef struct tpg_cfg_s tpg_cfg_t; -typedef struct prbs_cfg_s prbs_cfg_t; - -/* MW: uint16_t should be sufficient */ -struct input_system_cfg_s { - uint32_t no_side_band; - uint32_t fmt_type; - uint32_t ch_id; - uint32_t input_mode; -}; - -struct sync_generator_cfg_s { - uint32_t width; - uint32_t height; - uint32_t hblank_cycles; - uint32_t vblank_cycles; -}; - -/* MW: tpg & prbs are exclusive */ -struct tpg_cfg_s { - uint32_t x_mask; - uint32_t y_mask; - uint32_t x_delta; - uint32_t y_delta; - uint32_t xy_mask; - sync_generator_cfg_t sync_gen_cfg; -}; - -struct prbs_cfg_s { - uint32_t seed; - sync_generator_cfg_t sync_gen_cfg; -}; - -struct gpfifo_cfg_s { -// TBD. - sync_generator_cfg_t sync_gen_cfg; -}; - -typedef struct gpfifo_cfg_s gpfifo_cfg_t; - -//ALX:Commented out to pass the compilation. -//typedef struct input_system_cfg_s input_system_cfg_t; - -struct ib_buffer_s { - uint32_t mem_reg_size; - uint32_t nof_mem_regs; - uint32_t mem_reg_addr; -}; - -typedef struct ib_buffer_s ib_buffer_t; - -struct csi_cfg_s { - uint32_t csi_port; - buffering_mode_t buffering_mode; - ib_buffer_t csi_buffer; - ib_buffer_t acquisition_buffer; - uint32_t nof_xmem_buffers; -}; - -typedef struct csi_cfg_s csi_cfg_t; - -typedef enum { - INPUT_SYSTEM_CFG_FLAG_RESET = 0, - INPUT_SYSTEM_CFG_FLAG_SET = 1U << 0, - INPUT_SYSTEM_CFG_FLAG_BLOCKED = 1U << 1, - INPUT_SYSTEM_CFG_FLAG_REQUIRED = 1U << 2, - INPUT_SYSTEM_CFG_FLAG_CONFLICT = 1U << 3 // To mark a conflicting configuration. -} input_system_cfg_flag_t; - -typedef uint32_t input_system_config_flags_t; - -#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h deleted file mode 100644 index 64554d80dc0b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IRQ_GLOBAL_H_INCLUDED__ -#define __IRQ_GLOBAL_H_INCLUDED__ - -#include - -#define IS_IRQ_VERSION_2 -#define IS_IRQ_MAP_VERSION_2 - -/* We cannot include the (hrt host ID) file defining the "CSS_RECEIVER" property without side effects */ -#ifndef HAS_NO_RX -#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) -/*#define CSS_RECEIVER testbench_isp_inp_sys_csi_receiver*/ -#include "hive_isp_css_irq_types_hrt.h" /* enum hrt_isp_css_irq */ -#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM) -/*#define CSS_RECEIVER testbench_isp_is_2400_inp_sys_csi_receiver*/ -#include "hive_isp_css_2401_irq_types_hrt.h" /* enum hrt_isp_css_irq */ -#else -#error "irq_global.h: 2400_SYSTEM must be one of {2400, 2401 }" -#endif -#endif - -/* The IRQ is not mapped uniformly on its related interfaces */ -#define IRQ_SW_CHANNEL_OFFSET hrt_isp_css_irq_sw_pin_0 - -typedef enum { - IRQ_SW_CHANNEL0_ID = hrt_isp_css_irq_sw_pin_0 - IRQ_SW_CHANNEL_OFFSET, - IRQ_SW_CHANNEL1_ID = hrt_isp_css_irq_sw_pin_1 - IRQ_SW_CHANNEL_OFFSET, - N_IRQ_SW_CHANNEL_ID -} irq_sw_channel_id_t; - -#endif /* __IRQ_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h deleted file mode 100644 index 14d574849a5b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISP_GLOBAL_H_INCLUDED__ -#define __ISP_GLOBAL_H_INCLUDED__ - -#include - -#if defined (HAS_ISP_2401_MAMOIADA) -#define IS_ISP_2401_MAMOIADA - -#include "isp2401_mamoiada_params.h" -#elif defined (HAS_ISP_2400_MAMOIADA) -#define IS_ISP_2400_MAMOIADA - -#include "isp2400_mamoiada_params.h" -#else -#error "isp_global_h: ISP_2400_MAMOIDA must be one of {2400, 2401 }" -#endif - -#define ISP_PMEM_WIDTH_LOG2 ISP_LOG2_PMEM_WIDTH -#define ISP_PMEM_SIZE ISP_PMEM_DEPTH - -#define ISP_NWAY_LOG2 6 -#define ISP_VEC_NELEMS_LOG2 ISP_NWAY_LOG2 - -#ifdef ISP2401 -#ifdef PIPE_GENERATION -#define PIPEMEM(x) MEM(x) -#define ISP_NWAY (1< - -#endif /* __MMU_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h deleted file mode 100644 index 6ec4e590e3b4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SP_GLOBAL_H_INCLUDED__ -#define __SP_GLOBAL_H_INCLUDED__ - -#include - -#if defined(HAS_SP_2401) -#define IS_SP_2401 -/* 2401 uses 2400 */ -#include -#elif defined(HAS_SP_2400) -#define IS_SP_2400 - -#include -#else -#error "sp_global.h: SP_2400 must be one of {2400, 2401 }" -#endif - -#define SP_PMEM_WIDTH_LOG2 SP_PMEM_LOG_WIDTH_BITS -#define SP_PMEM_SIZE SP_PMEM_DEPTH - -#define SP_DMEM_SIZE 0x4000 - -/* SP Registers */ -#define SP_PC_REG 0x09 -#define SP_SC_REG 0x00 -#define SP_START_ADDR_REG 0x01 -#define SP_ICACHE_ADDR_REG 0x05 -#define SP_IRQ_READY_REG 0x00 -#define SP_IRQ_CLEAR_REG 0x00 -#define SP_ICACHE_INV_REG 0x00 -#define SP_CTRL_SINK_REG 0x0A - -/* SP Register bits */ -#define SP_RST_BIT 0x00 -#define SP_START_BIT 0x01 -#define SP_BREAK_BIT 0x02 -#define SP_RUN_BIT 0x03 -#define SP_BROKEN_BIT 0x04 -#define SP_IDLE_BIT 0x05 /* READY */ -#define SP_SLEEPING_BIT 0x06 -#define SP_STALLING_BIT 0x07 -#define SP_IRQ_CLEAR_BIT 0x08 -#define SP_IRQ_READY_BIT 0x0A -#define SP_IRQ_SLEEPING_BIT 0x0B - -#define SP_ICACHE_INV_BIT 0x0C -#define SP_IPREFETCH_EN_BIT 0x0D - -#define SP_FIFO0_SINK_BIT 0x00 -#define SP_FIFO1_SINK_BIT 0x01 -#define SP_FIFO2_SINK_BIT 0x02 -#define SP_FIFO3_SINK_BIT 0x03 -#define SP_FIFO4_SINK_BIT 0x04 -#define SP_FIFO5_SINK_BIT 0x05 -#define SP_FIFO6_SINK_BIT 0x06 -#define SP_FIFO7_SINK_BIT 0x07 -#define SP_FIFO8_SINK_BIT 0x08 -#define SP_FIFO9_SINK_BIT 0x09 -#define SP_FIFOA_SINK_BIT 0x0A -#define SP_DMEM_SINK_BIT 0x0B -#define SP_CTRL_MT_SINK_BIT 0x0C -#define SP_ICACHE_MT_SINK_BIT 0x0D - -#define SP_FIFO0_SINK_REG 0x0A -#define SP_FIFO1_SINK_REG 0x0A -#define SP_FIFO2_SINK_REG 0x0A -#define SP_FIFO3_SINK_REG 0x0A -#define SP_FIFO4_SINK_REG 0x0A -#define SP_FIFO5_SINK_REG 0x0A -#define SP_FIFO6_SINK_REG 0x0A -#define SP_FIFO7_SINK_REG 0x0A -#define SP_FIFO8_SINK_REG 0x0A -#define SP_FIFO9_SINK_REG 0x0A -#define SP_FIFOA_SINK_REG 0x0A -#define SP_DMEM_SINK_REG 0x0A -#define SP_CTRL_MT_SINK_REG 0x0A -#define SP_ICACHE_MT_SINK_REG 0x0A - -#endif /* __SP_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h deleted file mode 100644 index 6f63962a54e8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h +++ /dev/null @@ -1,348 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SYSTEM_GLOBAL_H_INCLUDED__ -#define __SYSTEM_GLOBAL_H_INCLUDED__ - -#include -#include - -/* - * The longest allowed (uninteruptible) bus transfer, does not - * take stalling into account - */ -#define HIVE_ISP_MAX_BURST_LENGTH 1024 - -/* - * Maximum allowed burst length in words for the ISP DMA - */ -#define ISP_DMA_MAX_BURST_LENGTH 128 - -/* - * Create a list of HAS and IS properties that defines the system - * - * The configuration assumes the following - * - The system is hetereogeneous; Multiple cells and devices classes - * - The cell and device instances are homogeneous, each device type - * belongs to the same class - * - Device instances supporting a subset of the class capabilities are - * allowed - * - * We could manage different device classes through the enumerated - * lists (C) or the use of classes (C++), but that is presently not - * fully supported - * - * N.B. the 3 input formatters are of 2 different classess - */ - -#define IS_ISP_2400_SYSTEM -/* - * Since this file is visible everywhere and the system definition - * macros are not, detect the separate definitions for {host, SP, ISP} - * - * The 2401 system has the nice property that it uses a vanilla 2400 SP - * so the SP will believe it is a 2400 system rather than 2401... - */ -//#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401) -#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) -#define IS_ISP_2401_MAMOIADA_SYSTEM -#define HAS_ISP_2401_MAMOIADA -#define HAS_SP_2400 -//#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400) -#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) -#define IS_ISP_2400_MAMOIADA_SYSTEM -#define HAS_ISP_2400_MAMOIADA -#define HAS_SP_2400 -#else -#error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }" -#endif - -#define USE_INPUT_SYSTEM_VERSION_2 - -#define HAS_MMU_VERSION_2 -#define HAS_DMA_VERSION_2 -#define HAS_GDC_VERSION_2 -#define HAS_VAMEM_VERSION_2 -#define HAS_HMEM_VERSION_1 -#define HAS_BAMEM_VERSION_2 -#define HAS_IRQ_VERSION_2 -#define HAS_IRQ_MAP_VERSION_2 -#define HAS_INPUT_FORMATTER_VERSION_2 -/* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */ -#define HAS_INPUT_SYSTEM_VERSION_2 -#define HAS_BUFFERED_SENSOR -#define HAS_FIFO_MONITORS_VERSION_2 -/* #define HAS_GP_REGS_VERSION_2 */ -#define HAS_GP_DEVICE_VERSION_2 -#define HAS_GPIO_VERSION_1 -#define HAS_TIMED_CTRL_VERSION_1 -#define HAS_RX_VERSION_2 - -#define DMA_DDR_TO_VAMEM_WORKAROUND -#define DMA_DDR_TO_HMEM_WORKAROUND - -/* - * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply - */ -#define HRT_VADDRESS_WIDTH 32 -//#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/ -#define HRT_DATA_WIDTH 32 - -#define SIZEOF_HRT_REG (HRT_DATA_WIDTH>>3) -#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH/8) - -/* The main bus connecting all devices */ -#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH -#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES - -/* per-frame parameter handling support */ -#define SH_CSS_ENABLE_PER_FRAME_PARAMS - -typedef uint32_t hrt_bus_align_t; - -/* - * Enumerate the devices, device access through the API is by ID, through the DLI by address - * The enumerator terminators are used to size the wiring arrays and as an exception value. - */ -typedef enum { - DDR0_ID = 0, - N_DDR_ID -} ddr_ID_t; - -typedef enum { - ISP0_ID = 0, - N_ISP_ID -} isp_ID_t; - -typedef enum { - SP0_ID = 0, - N_SP_ID -} sp_ID_t; - -#if defined (IS_ISP_2401_MAMOIADA_SYSTEM) -typedef enum { - MMU0_ID = 0, - MMU1_ID, - N_MMU_ID -} mmu_ID_t; -#elif defined (IS_ISP_2400_MAMOIADA_SYSTEM) -typedef enum { - MMU0_ID = 0, - MMU1_ID, - N_MMU_ID -} mmu_ID_t; -#else -#error "system_global.h: SYSTEM must be one of {2400, 2401}" -#endif - -typedef enum { - DMA0_ID = 0, - N_DMA_ID -} dma_ID_t; - -typedef enum { - GDC0_ID = 0, - GDC1_ID, - N_GDC_ID -} gdc_ID_t; - -#define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums. - -typedef enum { - VAMEM0_ID = 0, - VAMEM1_ID, - VAMEM2_ID, - N_VAMEM_ID -} vamem_ID_t; - -typedef enum { - BAMEM0_ID = 0, - N_BAMEM_ID -} bamem_ID_t; - -typedef enum { - HMEM0_ID = 0, - N_HMEM_ID -} hmem_ID_t; - -/* -typedef enum { - IRQ0_ID = 0, - N_IRQ_ID -} irq_ID_t; -*/ - -typedef enum { - IRQ0_ID = 0, // GP IRQ block - IRQ1_ID, // Input formatter - IRQ2_ID, // input system - IRQ3_ID, // input selector - N_IRQ_ID -} irq_ID_t; - -typedef enum { - FIFO_MONITOR0_ID = 0, - N_FIFO_MONITOR_ID -} fifo_monitor_ID_t; - -/* - * Deprecated: Since all gp_reg instances are different - * and put in the address maps of other devices we cannot - * enumerate them as that assumes the instrances are the - * same. - * - * We define a single GP_DEVICE containing all gp_regs - * w.r.t. a single base address - * -typedef enum { - GP_REGS0_ID = 0, - N_GP_REGS_ID -} gp_regs_ID_t; - */ -typedef enum { - GP_DEVICE0_ID = 0, - N_GP_DEVICE_ID -} gp_device_ID_t; - -typedef enum { - GP_TIMER0_ID = 0, - GP_TIMER1_ID, - GP_TIMER2_ID, - GP_TIMER3_ID, - GP_TIMER4_ID, - GP_TIMER5_ID, - GP_TIMER6_ID, - GP_TIMER7_ID, - N_GP_TIMER_ID -} gp_timer_ID_t; - -typedef enum { - GPIO0_ID = 0, - N_GPIO_ID -} gpio_ID_t; - -typedef enum { - TIMED_CTRL0_ID = 0, - N_TIMED_CTRL_ID -} timed_ctrl_ID_t; - -typedef enum { - INPUT_FORMATTER0_ID = 0, - INPUT_FORMATTER1_ID, - INPUT_FORMATTER2_ID, - INPUT_FORMATTER3_ID, - N_INPUT_FORMATTER_ID -} input_formatter_ID_t; - -/* The IF RST is outside the IF */ -#define INPUT_FORMATTER0_SRST_OFFSET 0x0824 -#define INPUT_FORMATTER1_SRST_OFFSET 0x0624 -#define INPUT_FORMATTER2_SRST_OFFSET 0x0424 -#define INPUT_FORMATTER3_SRST_OFFSET 0x0224 - -#define INPUT_FORMATTER0_SRST_MASK 0x0001 -#define INPUT_FORMATTER1_SRST_MASK 0x0002 -#define INPUT_FORMATTER2_SRST_MASK 0x0004 -#define INPUT_FORMATTER3_SRST_MASK 0x0008 - -typedef enum { - INPUT_SYSTEM0_ID = 0, - N_INPUT_SYSTEM_ID -} input_system_ID_t; - -typedef enum { - RX0_ID = 0, - N_RX_ID -} rx_ID_t; - -enum mipi_port_id { - MIPI_PORT0_ID = 0, - MIPI_PORT1_ID, - MIPI_PORT2_ID, - N_MIPI_PORT_ID -}; - -#define N_RX_CHANNEL_ID 4 - -/* Generic port enumeration with an internal port type ID */ -typedef enum { - CSI_PORT0_ID = 0, - CSI_PORT1_ID, - CSI_PORT2_ID, - TPG_PORT0_ID, - PRBS_PORT0_ID, - FIFO_PORT0_ID, - MEMORY_PORT0_ID, - N_INPUT_PORT_ID -} input_port_ID_t; - -typedef enum { - CAPTURE_UNIT0_ID = 0, - CAPTURE_UNIT1_ID, - CAPTURE_UNIT2_ID, - ACQUISITION_UNIT0_ID, - DMA_UNIT0_ID, - CTRL_UNIT0_ID, - GPREGS_UNIT0_ID, - FIFO_UNIT0_ID, - IRQ_UNIT0_ID, - N_SUB_SYSTEM_ID -} sub_system_ID_t; - -#define N_CAPTURE_UNIT_ID 3 -#define N_ACQUISITION_UNIT_ID 1 -#define N_CTRL_UNIT_ID 1 - -enum ia_css_isp_memories { - IA_CSS_ISP_PMEM0 = 0, - IA_CSS_ISP_DMEM0, - IA_CSS_ISP_VMEM0, - IA_CSS_ISP_VAMEM0, - IA_CSS_ISP_VAMEM1, - IA_CSS_ISP_VAMEM2, - IA_CSS_ISP_HMEM0, - IA_CSS_SP_DMEM0, - IA_CSS_DDR, - N_IA_CSS_MEMORIES -}; -#define IA_CSS_NUM_MEMORIES 9 -/* For driver compatability */ -#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES -#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES - -#if 0 -typedef enum { - dev_chn, /* device channels, external resource */ - ext_mem, /* external memories */ - int_mem, /* internal memories */ - int_chn /* internal channels, user defined */ -} resource_type_t; - -/* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */ -typedef enum { - vied_nci_dev_chn_dma_ext0, - int_mem_vmem0, - int_mem_dmem0 -} resource_id_t; - -/* enum listing the different memories within a program group. - This enum is used in the mem_ptr_t type */ -typedef enum { - buf_mem_invalid = 0, - buf_mem_vmem_prog0, - buf_mem_dmem_prog0 -} buf_mem_t; - -#endif -#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h deleted file mode 100644 index c3e8a0104092..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TIMED_CTRL_GLOBAL_H_INCLUDED__ -#define __TIMED_CTRL_GLOBAL_H_INCLUDED__ - -#define IS_TIMED_CTRL_VERSION_1 - -#include - -/** - * Order of the input bits for the timed controller taken from - * ISP_CSS_2401 System Architecture Description valid for - * 2400, 2401. - * - * Check for other systems. - */ -#define HIVE_TIMED_CTRL_GPIO_PIN_0_BIT_ID 0 -#define HIVE_TIMED_CTRL_GPIO_PIN_1_BIT_ID 1 -#define HIVE_TIMED_CTRL_GPIO_PIN_2_BIT_ID 2 -#define HIVE_TIMED_CTRL_GPIO_PIN_3_BIT_ID 3 -#define HIVE_TIMED_CTRL_GPIO_PIN_4_BIT_ID 4 -#define HIVE_TIMED_CTRL_GPIO_PIN_5_BIT_ID 5 -#define HIVE_TIMED_CTRL_GPIO_PIN_6_BIT_ID 6 -#define HIVE_TIMED_CTRL_GPIO_PIN_7_BIT_ID 7 -#define HIVE_TIMED_CTRL_GPIO_PIN_8_BIT_ID 8 -#define HIVE_TIMED_CTRL_GPIO_PIN_9_BIT_ID 9 -#define HIVE_TIMED_CTRL_GPIO_PIN_10_BIT_ID 10 -#define HIVE_TIMED_CTRL_GPIO_PIN_11_BIT_ID 11 -#define HIVE_TIMED_CTRL_IRQ_SP_BIT_ID 12 -#define HIVE_TIMED_CTRL_IRQ_ISP_BIT_ID 13 -#define HIVE_TIMED_CTRL_IRQ_INPUT_SYSTEM_BIT_ID 14 -#define HIVE_TIMED_CTRL_IRQ_INPUT_SELECTOR_BIT_ID 15 -#define HIVE_TIMED_CTRL_IRQ_IF_BLOCK_BIT_ID 16 -#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_0_BIT_ID 17 -#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_1_BIT_ID 18 -#define HIVE_TIMED_CTRL_CSI_SOL_BIT_ID 19 -#define HIVE_TIMED_CTRL_CSI_EOL_BIT_ID 20 -#define HIVE_TIMED_CTRL_CSI_SOF_BIT_ID 21 -#define HIVE_TIMED_CTRL_CSI_EOF_BIT_ID 22 -#define HIVE_TIMED_CTRL_IRQ_IS_STREAMING_MONITOR_BIT_ID 23 - - - -#endif /* __TIMED_CTRL_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h deleted file mode 100644 index 58713c6583b9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VAMEM_GLOBAL_H_INCLUDED__ -#define __VAMEM_GLOBAL_H_INCLUDED__ - -#include - -#define IS_VAMEM_VERSION_2 - -/* (log) stepsize of linear interpolation */ -#define VAMEM_INTERP_STEP_LOG2 4 -#define VAMEM_INTERP_STEP (1< -#define assert(cnd) ASSERT(cnd) -#else -/* Windows usermode compilation */ -#include -#endif - -#elif defined(__KERNEL__) -#include - -/* TODO: it would be cleaner to use this: - * #define assert(cnd) BUG_ON(cnd) - * but that causes many compiler warnings (==errors) under Android - * because it seems that the BUG_ON() macro is not seen as a check by - * gcc like the BUG() macro is. */ -#define assert(cnd) \ - do { \ - if (!(cnd)) \ - BUG(); \ - } while (0) - -#elif defined(__FIST__) || defined(__GNUC__) - -/* enable assert for crun */ -#include "assert.h" - -#else /* default for unknown environments */ -#define assert(cnd) ((void)0) -#endif - -#endif /* NDEBUG */ - -#ifndef PIPE_GENERATION -/* Deprecated OP___assert, this is still used in ~1000 places - * in the code. This will be removed over time. - * The implemenation for the pipe generation tool is in see support.isp.h */ -#define OP___assert(cnd) assert(cnd) - -static inline void compile_time_assert (unsigned cond) -{ - /* Call undefined function if cond is false */ - extern void _compile_time_assert (void); - if (!cond) _compile_time_assert(); -} -#endif /* PIPE_GENERATION */ - -#endif /* __ASSERT_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h deleted file mode 100644 index 1b271c3c6a25..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __BITOP_SUPPORT_H_INCLUDED__ -#define __BITOP_SUPPORT_H_INCLUDED__ - -#define bitop_setbit(a, b) ((a) |= (1UL << (b))) - -#define bitop_getbit(a, b) (((a) & (1UL << (b))) != 0) - -#define bitop_clearbit(a, b) ((a) &= ~(1UL << (b))) - -#endif /* __BITOP_SUPPORT_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h deleted file mode 100644 index 917ee8cdb1d9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __CSI_RX_H_INCLUDED__ -#define __CSI_RX_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & - * inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "csi_rx_local.h" - -#ifndef __INLINE_CSI_RX__ -#include "csi_rx_public.h" -#else /* __INLINE_CSI_RX__ */ -#include "csi_rx_private.h" -#endif /* __INLINE_CSI_RX__ */ - -#endif /* __CSI_RX_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h deleted file mode 100644 index 0aa22446e27e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DEBUG_H_INCLUDED__ -#define __DEBUG_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the DMA device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - * - */ - - -#include "system_local.h" -#include "debug_local.h" - -#ifndef __INLINE_DEBUG__ -#define STORAGE_CLASS_DEBUG_H extern -#define STORAGE_CLASS_DEBUG_C -#include "debug_public.h" -#else /* __INLINE_DEBUG__ */ -#define STORAGE_CLASS_DEBUG_H static inline -#define STORAGE_CLASS_DEBUG_C static inline -#include "debug_private.h" -#endif /* __INLINE_DEBUG__ */ - -#endif /* __DEBUG_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h deleted file mode 100644 index 834e7c3e0814..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h +++ /dev/null @@ -1,194 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __DEVICE_ACCESS_H_INCLUDED__ -#define __DEVICE_ACCESS_H_INCLUDED__ - -/*! - * \brief - * Define the public interface for physical system - * access functions to SRAM and registers. Access - * types are limited to those defined in - * All accesses are aligned - * - * The address representation is private to the system - * and represented as/stored in "hrt_address". - * - * The system global address can differ by an offset; - * The device base address. This offset must be added - * by the implementation of the access function - * - * "store" is a transfer to the device - * "load" is a transfer from the device - */ - -#include - -/* - * User provided file that defines the system address types: - * - hrt_address a type that can hold the (sub)system address range - */ -#include "system_types.h" -/* - * We cannot assume that the global system address size is the size of - * a pointer because a (say) 64-bit host can be simulated in a 32-bit - * environment. Only if the host environment is modelled as on the target - * we could use a pointer. Even then, prototyping may need to be done - * before the target environment is available. AS we cannot wait for that - * we are stuck with integer addresses - */ - -/*typedef char *sys_address;*/ -typedef hrt_address sys_address; - -/*! Set the (sub)system base address - - \param base_addr[in] The offset on which the (sub)system is located - in the global address map - - \return none, - */ -extern void device_set_base_address( - const sys_address base_addr); - - -/*! Get the (sub)system base address - - \return base_address, - */ -extern sys_address device_get_base_address(void); - -/*! Read an 8-bit value from a device register or memory in the device - - \param addr[in] Local address - - \return device[addr] - */ -extern uint8_t ia_css_device_load_uint8( - const hrt_address addr); - -/*! Read a 16-bit value from a device register or memory in the device - - \param addr[in] Local address - - \return device[addr] - */ -extern uint16_t ia_css_device_load_uint16( - const hrt_address addr); - -/*! Read a 32-bit value from a device register or memory in the device - - \param addr[in] Local address - - \return device[addr] - */ -extern uint32_t ia_css_device_load_uint32( - const hrt_address addr); - -/*! Read a 64-bit value from a device register or memory in the device - - \param addr[in] Local address - - \return device[addr] - */ -extern uint64_t ia_css_device_load_uint64( - const hrt_address addr); - -/*! Write an 8-bit value to a device register or memory in the device - - \param addr[in] Local address - \param data[in] value - - \return none, device[addr] = value - */ -extern void ia_css_device_store_uint8( - const hrt_address addr, - const uint8_t data); - -/*! Write a 16-bit value to a device register or memory in the device - - \param addr[in] Local address - \param data[in] value - - \return none, device[addr] = value - */ -extern void ia_css_device_store_uint16( - const hrt_address addr, - const uint16_t data); - -/*! Write a 32-bit value to a device register or memory in the device - - \param addr[in] Local address - \param data[in] value - - \return none, device[addr] = value - */ -extern void ia_css_device_store_uint32( - const hrt_address addr, - const uint32_t data); - -/*! Write a 64-bit value to a device register or memory in the device - - \param addr[in] Local address - \param data[in] value - - \return none, device[addr] = value - */ -extern void ia_css_device_store_uint64( - const hrt_address addr, - const uint64_t data); - -/*! Read an array of bytes from device registers or memory in the device - - \param addr[in] Local address - \param data[out] pointer to the destination array - \param size[in] number of bytes to read - - \return none - */ -extern void ia_css_device_load( - const hrt_address addr, - void *data, - const size_t size); - -/*! Write an array of bytes to device registers or memory in the device - - \param addr[in] Local address - \param data[in] pointer to the source array - \param size[in] number of bytes to write - - \return none - */ -extern void ia_css_device_store( - const hrt_address addr, - const void *data, - const size_t size); - -#endif /* __DEVICE_ACCESS_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h deleted file mode 100644 index d9dee691e3f8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DMA_H_INCLUDED__ -#define __DMA_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the DMA device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - * - */ - - -#include "system_local.h" -#include "dma_local.h" - -#ifndef __INLINE_DMA__ -#define STORAGE_CLASS_DMA_H extern -#define STORAGE_CLASS_DMA_C -#include "dma_public.h" -#else /* __INLINE_DMA__ */ -#define STORAGE_CLASS_DMA_H static inline -#define STORAGE_CLASS_DMA_C static inline -#include "dma_private.h" -#endif /* __INLINE_DMA__ */ - -#endif /* __DMA_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h deleted file mode 100644 index 6e5e5dd4107d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ERROR_SUPPORT_H_INCLUDED__ -#define __ERROR_SUPPORT_H_INCLUDED__ - -#if defined(_MSC_VER) -#include -/* - * Put here everything _MSC_VER specific not covered in - * "errno.h" - */ -#define EINVAL 22 -#define EBADE 52 -#define ENODATA 61 -#define ENOTCONN 107 -#define ENOTSUP 252 -#define ENOBUFS 233 - - -#elif defined(__KERNEL__) -#include -/* - * Put here everything __KERNEL__ specific not covered in - * "errno.h" - */ -#define ENOTSUP 252 - -#elif defined(__GNUC__) -#include -/* - * Put here everything __GNUC__ specific not covered in - * "errno.h" - */ - -#else /* default is for the FIST environment */ -#include -/* - * Put here everything FIST specific not covered in - * "errno.h" - */ - -#endif - -#define verifexit(cond,error_tag) \ -do { \ - if (!(cond)){ \ - goto EXIT; \ - } \ -} while(0) - -#define verifjmpexit(cond) \ -do { \ - if (!(cond)){ \ - goto EXIT; \ - } \ -} while(0) - -#endif /* __ERROR_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h deleted file mode 100644 index df579e902796..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __EVENT_FIFO_H -#define __EVENT_FIFO_H - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the IRQ device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "event_fifo_local.h" - -#ifndef __INLINE_EVENT__ -#define STORAGE_CLASS_EVENT_H extern -#define STORAGE_CLASS_EVENT_C -#include "event_fifo_public.h" -#else /* __INLINE_EVENT__ */ -#define STORAGE_CLASS_EVENT_H static inline -#define STORAGE_CLASS_EVENT_C static inline -#include "event_fifo_private.h" -#endif /* __INLINE_EVENT__ */ - -#endif /* __EVENT_FIFO_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h deleted file mode 100644 index f10c4fa2e32b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __FIFO_MONITOR_H_INCLUDED__ -#define __FIFO_MONITOR_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "fifo_monitor_local.h" - -#ifndef __INLINE_FIFO_MONITOR__ -#define STORAGE_CLASS_FIFO_MONITOR_H extern -#define STORAGE_CLASS_FIFO_MONITOR_C -#include "fifo_monitor_public.h" -#else /* __INLINE_FIFO_MONITOR__ */ -#define STORAGE_CLASS_FIFO_MONITOR_H static inline -#define STORAGE_CLASS_FIFO_MONITOR_C static inline -#include "fifo_monitor_private.h" -#endif /* __INLINE_FIFO_MONITOR__ */ - -#endif /* __FIFO_MONITOR_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h deleted file mode 100644 index 75c6854c8e7b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GDC_DEVICE_H_INCLUDED__ -#define __GDC_DEVICE_H_INCLUDED__ - -/* The file gdc.h already exists */ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the GDC device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "gdc_local.h" - -#ifndef __INLINE_GDC__ -#define STORAGE_CLASS_GDC_H extern -#define STORAGE_CLASS_GDC_C -#include "gdc_public.h" -#else /* __INLINE_GDC__ */ -#define STORAGE_CLASS_GDC_H static inline -#define STORAGE_CLASS_GDC_C static inline -#include "gdc_private.h" -#endif /* __INLINE_GDC__ */ - -#endif /* __GDC_DEVICE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h deleted file mode 100644 index aba94e623043..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_DEVICE_H_INCLUDED__ -#define __GP_DEVICE_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "gp_device_local.h" - -#ifndef __INLINE_GP_DEVICE__ -#define STORAGE_CLASS_GP_DEVICE_H extern -#define STORAGE_CLASS_GP_DEVICE_C -#include "gp_device_public.h" -#else /* __INLINE_GP_DEVICE__ */ -#define STORAGE_CLASS_GP_DEVICE_H static inline -#define STORAGE_CLASS_GP_DEVICE_C static inline -#include "gp_device_private.h" -#endif /* __INLINE_GP_DEVICE__ */ - -#endif /* __GP_DEVICE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h deleted file mode 100644 index d5d2df24e11a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_TIMER_H_INCLUDED__ -#define __GP_TIMER_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" /*GP_TIMER_BASE address */ -#include "gp_timer_local.h" /*GP_TIMER register offsets */ - -#ifndef __INLINE_GP_TIMER__ -#define STORAGE_CLASS_GP_TIMER_H extern -#define STORAGE_CLASS_GP_TIMER_C -#include "gp_timer_public.h" /* functions*/ -#else /* __INLINE_GP_TIMER__ */ -#define STORAGE_CLASS_GP_TIMER_H static inline -#define STORAGE_CLASS_GP_TIMER_C static inline -#include "gp_timer_private.h" /* inline functions*/ -#endif /* __INLINE_GP_TIMER__ */ - -#endif /* __GP_TIMER_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h deleted file mode 100644 index d37f7166aa4a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GPIO_H_INCLUDED__ -#define __GPIO_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "gpio_local.h" - -#ifndef __INLINE_GPIO__ -#define STORAGE_CLASS_GPIO_H extern -#define STORAGE_CLASS_GPIO_C -#include "gpio_public.h" -#else /* __INLINE_GPIO__ */ -#define STORAGE_CLASS_GPIO_H static inline -#define STORAGE_CLASS_GPIO_C static inline -#include "gpio_private.h" -#endif /* __INLINE_GPIO__ */ - -#endif /* __GPIO_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h deleted file mode 100644 index a82fd3a21e98..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __HMEM_H_INCLUDED__ -#define __HMEM_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the HMEM device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "hmem_local.h" - -#ifndef __INLINE_HMEM__ -#define STORAGE_CLASS_HMEM_H extern -#define STORAGE_CLASS_HMEM_C -#include "hmem_public.h" -#else /* __INLINE_HMEM__ */ -#define STORAGE_CLASS_HMEM_H static inline -#define STORAGE_CLASS_HMEM_C static inline -#include "hmem_private.h" -#endif /* __INLINE_HMEM__ */ - -#endif /* __HMEM_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h deleted file mode 100644 index 426d022d3a26..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __CSI_RX_PUBLIC_H_INCLUDED__ -#define __CSI_RX_PUBLIC_H_INCLUDED__ - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -/***************************************************** - * - * Native command interface (NCI). - * - *****************************************************/ -/** - * @brief Get the csi rx frontend state. - * Get the state of the csi rx frontend regiester-set. - * - * @param[in] id The global unique ID of the csi rx fe controller. - * @param[out] state Point to the register-state. - */ -extern void csi_rx_fe_ctrl_get_state( - const csi_rx_frontend_ID_t ID, - csi_rx_fe_ctrl_state_t *state); -/** - * @brief Dump the csi rx frontend state. - * Dump the state of the csi rx frontend regiester-set. - * - * @param[in] id The global unique ID of the csi rx fe controller. - * @param[in] state Point to the register-state. - */ -extern void csi_rx_fe_ctrl_dump_state( - const csi_rx_frontend_ID_t ID, - csi_rx_fe_ctrl_state_t *state); -/** - * @brief Get the state of the csi rx fe dlane. - * Get the state of the register set per dlane process. - * - * @param[in] id The global unique ID of the input-buffer controller. - * @param[in] lane The lane ID. - * @param[out] state Point to the dlane state. - */ -extern void csi_rx_fe_ctrl_get_dlane_state( - const csi_rx_frontend_ID_t ID, - const uint32_t lane, - csi_rx_fe_ctrl_lane_t *dlane_state); -/** - * @brief Get the csi rx backend state. - * Get the state of the csi rx backend regiester-set. - * - * @param[in] id The global unique ID of the csi rx be controller. - * @param[out] state Point to the register-state. - */ -extern void csi_rx_be_ctrl_get_state( - const csi_rx_backend_ID_t ID, - csi_rx_be_ctrl_state_t *state); -/** - * @brief Dump the csi rx backend state. - * Dump the state of the csi rx backend regiester-set. - * - * @param[in] id The global unique ID of the csi rx be controller. - * @param[in] state Point to the register-state. - */ -extern void csi_rx_be_ctrl_dump_state( - const csi_rx_backend_ID_t ID, - csi_rx_be_ctrl_state_t *state); -/* end of NCI */ - -/***************************************************** - * - * Device level interface (DLI). - * - *****************************************************/ -/** - * @brief Load the register value. - * Load the value of the register of the csi rx fe. - * - * @param[in] ID The global unique ID for the ibuf-controller instance. - * @param[in] reg The offet address of the register. - * - * @return the value of the register. - */ -extern hrt_data csi_rx_fe_ctrl_reg_load( - const csi_rx_frontend_ID_t ID, - const hrt_address reg); -/** - * @brief Store a value to the register. - * Store a value to the registe of the csi rx fe. - * - * @param[in] ID The global unique ID for the ibuf-controller instance. - * @param[in] reg The offet address of the register. - * @param[in] value The value to be stored. - * - */ -extern void csi_rx_fe_ctrl_reg_store( - const csi_rx_frontend_ID_t ID, - const hrt_address reg, - const hrt_data value); -/** - * @brief Load the register value. - * Load the value of the register of the csirx be. - * - * @param[in] ID The global unique ID for the ibuf-controller instance. - * @param[in] reg The offet address of the register. - * - * @return the value of the register. - */ -extern hrt_data csi_rx_be_ctrl_reg_load( - const csi_rx_backend_ID_t ID, - const hrt_address reg); -/** - * @brief Store a value to the register. - * Store a value to the registe of the csi rx be. - * - * @param[in] ID The global unique ID for the ibuf-controller instance. - * @param[in] reg The offet address of the register. - * @param[in] value The value to be stored. - * - */ -extern void csi_rx_be_ctrl_reg_store( - const csi_rx_backend_ID_t ID, - const hrt_address reg, - const hrt_data value); -/* end of DLI */ -#endif /* USE_INPUT_SYSTEM_VERSION_2401 */ -#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h deleted file mode 100644 index 90b4ba7e023f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DEBUG_PUBLIC_H_INCLUDED__ -#define __DEBUG_PUBLIC_H_INCLUDED__ - -#include -#include "system_types.h" - -/*! brief - * - * Simple queuing trace buffer for debug data - * instantiatable in SP DMEM - * - * The buffer has a remote and and a local store - * which contain duplicate data (when in sync). - * The buffers are automatically synched when the - * user dequeues, or manualy using the synch function - * - * An alternative (storage efficient) implementation - * could manage the buffers to contain unique data - * - * The buffer empty status is computed from local - * state which does not reflect the presence of data - * in the remote buffer (unless the alternative - * implementation is followed) - */ - -typedef struct debug_data_s debug_data_t; -typedef struct debug_data_ddr_s debug_data_ddr_t; - -extern debug_data_t *debug_data_ptr; -extern hrt_address debug_buffer_address; -extern hrt_vaddress debug_buffer_ddr_address; - -/*! Check the empty state of the local debug data buffer - - \return isEmpty(buffer) - */ -STORAGE_CLASS_DEBUG_H bool is_debug_buffer_empty(void); - -/*! Dequeue a token from the debug data buffer - - \return isEmpty(buffer)?0:buffer[head] - */ -STORAGE_CLASS_DEBUG_H hrt_data debug_dequeue(void); - -/*! Synchronise the remote buffer to the local buffer - - \return none - */ -STORAGE_CLASS_DEBUG_H void debug_synch_queue(void); - -/*! Synchronise the remote buffer to the local buffer - - \return none - */ -STORAGE_CLASS_DEBUG_H void debug_synch_queue_isp(void); - - -/*! Synchronise the remote buffer to the local buffer - - \return none - */ -STORAGE_CLASS_DEBUG_H void debug_synch_queue_ddr(void); - -/*! Set the offset/address of the (remote) debug buffer - - \return none - */ -extern void debug_buffer_init( - const hrt_address addr); - -/*! Set the offset/address of the (remote) debug buffer - - \return none - */ -extern void debug_buffer_ddr_init( - const hrt_vaddress addr); - -/*! Set the (remote) operating mode of the debug buffer - - \return none - */ -extern void debug_buffer_setmode( - const debug_buf_mode_t mode); - -#endif /* __DEBUG_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h deleted file mode 100644 index 1d5e38ffe938..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __DMA_PUBLIC_H_INCLUDED__ -#define __DMA_PUBLIC_H_INCLUDED__ - -#include "system_types.h" - -typedef struct dma_state_s dma_state_t; - -/*! Read the control registers of DMA[ID] - - \param ID[in] DMA identifier - \param state[out] input formatter state structure - - \return none, state = DMA[ID].state - */ -extern void dma_get_state( - const dma_ID_t ID, - dma_state_t *state); - -/*! Write to a control register of DMA[ID] - - \param ID[in] DMA identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, DMA[ID].ctrl[reg] = value - */ -STORAGE_CLASS_DMA_H void dma_reg_store( - const dma_ID_t ID, - const unsigned int reg, - const hrt_data value); - -/*! Read from a control register of DMA[ID] - - \param ID[in] DMA identifier - \param reg[in] register index - \param value[in] The data to be written - - \return DMA[ID].ctrl[reg] - */ -STORAGE_CLASS_DMA_H hrt_data dma_reg_load( - const dma_ID_t ID, - const unsigned int reg); - - -/*! Set maximum burst size of DMA[ID] - - \param ID[in] DMA identifier - \param conn[in] Connection to set max burst size for - \param max_burst_size[in] Maximum burst size in words - - \return none -*/ -void -dma_set_max_burst_size( - dma_ID_t ID, - dma_connection conn, - uint32_t max_burst_size); - -#endif /* __DMA_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h deleted file mode 100644 index d95bc7070f4c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __EVENT_FIFO_PUBLIC_H -#define __EVENT_FIFO_PUBLIC_H - -#include -#include "system_types.h" - -/*! Blocking read from an event source EVENT[ID] - - \param ID[in] EVENT identifier - - \return none, dequeue(event_queue[ID]) - */ -STORAGE_CLASS_EVENT_H void event_wait_for( - const event_ID_t ID); - -/*! Conditional blocking wait for an event source EVENT[ID] - - \param ID[in] EVENT identifier - \param cnd[in] predicate - - \return none, if(cnd) dequeue(event_queue[ID]) - */ -STORAGE_CLASS_EVENT_H void cnd_event_wait_for( - const event_ID_t ID, - const bool cnd); - -/*! Blocking read from an event source EVENT[ID] - - \param ID[in] EVENT identifier - - \return dequeue(event_queue[ID]) - */ -STORAGE_CLASS_EVENT_H hrt_data event_receive_token( - const event_ID_t ID); - -/*! Blocking write to an event sink EVENT[ID] - - \param ID[in] EVENT identifier - \param token[in] token to be written on the event - - \return none, enqueue(event_queue[ID]) - */ -STORAGE_CLASS_EVENT_H void event_send_token( - const event_ID_t ID, - const hrt_data token); - -/*! Query an event source EVENT[ID] - - \param ID[in] EVENT identifier - - \return !isempty(event_queue[ID]) - */ -STORAGE_CLASS_EVENT_H bool is_event_pending( - const event_ID_t ID); - -/*! Query an event sink EVENT[ID] - - \param ID[in] EVENT identifier - - \return !isfull(event_queue[ID]) - */ -STORAGE_CLASS_EVENT_H bool can_event_send_token( - const event_ID_t ID); - -#endif /* __EVENT_FIFO_PUBLIC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h deleted file mode 100644 index 329f5d5049f2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __FIFO_MONITOR_PUBLIC_H_INCLUDED__ -#define __FIFO_MONITOR_PUBLIC_H_INCLUDED__ - -#include "system_types.h" - -typedef struct fifo_channel_state_s fifo_channel_state_t; -typedef struct fifo_switch_state_s fifo_switch_state_t; -typedef struct fifo_monitor_state_s fifo_monitor_state_t; - -/*! Set a fifo switch multiplex - - \param ID[in] FIFO_MONITOR identifier - \param switch_id[in] fifo switch identifier - \param sel[in] fifo switch selector - - \return none, fifo_switch[switch_id].sel = sel - */ -STORAGE_CLASS_FIFO_MONITOR_H void fifo_switch_set( - const fifo_monitor_ID_t ID, - const fifo_switch_t switch_id, - const hrt_data sel); - -/*! Get a fifo switch multiplex - - \param ID[in] FIFO_MONITOR identifier - \param switch_id[in] fifo switch identifier - - \return fifo_switch[switch_id].sel - */ -STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_switch_get( - const fifo_monitor_ID_t ID, - const fifo_switch_t switch_id); - -/*! Read the state of FIFO_MONITOR[ID] - - \param ID[in] FIFO_MONITOR identifier - \param state[out] fifo monitor state structure - - \return none, state = FIFO_MONITOR[ID].state - */ -extern void fifo_monitor_get_state( - const fifo_monitor_ID_t ID, - fifo_monitor_state_t *state); - -/*! Read the state of a fifo channel - - \param ID[in] FIFO_MONITOR identifier - \param channel_id[in] fifo channel identifier - \param state[out] fifo channel state structure - - \return none, state = fifo_channel[channel_id].state - */ -extern void fifo_channel_get_state( - const fifo_monitor_ID_t ID, - const fifo_channel_t channel_id, - fifo_channel_state_t *state); - -/*! Read the state of a fifo switch - - \param ID[in] FIFO_MONITOR identifier - \param switch_id[in] fifo switch identifier - \param state[out] fifo switch state structure - - \return none, state = fifo_switch[switch_id].state - */ -extern void fifo_switch_get_state( - const fifo_monitor_ID_t ID, - const fifo_switch_t switch_id, - fifo_switch_state_t *state); - -/*! Write to a control register of FIFO_MONITOR[ID] - - \param ID[in] FIFO_MONITOR identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, FIFO_MONITOR[ID].ctrl[reg] = value - */ -STORAGE_CLASS_FIFO_MONITOR_H void fifo_monitor_reg_store( - const fifo_monitor_ID_t ID, - const unsigned int reg, - const hrt_data value); - -/*! Read from a control register of FIFO_MONITOR[ID] - - \param ID[in] FIFO_MONITOR identifier - \param reg[in] register index - \param value[in] The data to be written - - \return FIFO_MONITOR[ID].ctrl[reg] - */ -STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_monitor_reg_load( - const fifo_monitor_ID_t ID, - const unsigned int reg); - -#endif /* __FIFO_MONITOR_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h deleted file mode 100644 index d09d1e320306..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GDC_PUBLIC_H_INCLUDED__ -#define __GDC_PUBLIC_H_INCLUDED__ - -/*! Write the bicubic interpolation table of GDC[ID] - - \param ID[in] GDC identifier - \param data[in] The data matrix to be written - - \pre - - data must point to a matrix[4][HRT_GDC_N] - - \implementation dependent - - The value of "HRT_GDC_N" is device specific - - The LUT should not be partially written - - The LUT format is a quadri-phase interpolation - table. The layout is device specific - - The range of the values data[n][m] is device - specific - - \return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data - */ -extern void gdc_lut_store( - const gdc_ID_t ID, - const int data[4][HRT_GDC_N]); - -/*! Convert the bicubic interpolation table of GDC[ID] to the ISP-specific format - - \param ID[in] GDC identifier - \param in_lut[in] The data matrix to be converted - \param out_lut[out] The data matrix as the output of conversion - */ -extern void gdc_lut_convert_to_isp_format( - const int in_lut[4][HRT_GDC_N], - int out_lut[4][HRT_GDC_N]); - -/*! Return the integer representation of 1.0 of GDC[ID] - - \param ID[in] GDC identifier - - \return unity - */ -extern int gdc_get_unity( - const gdc_ID_t ID); - -#endif /* __GDC_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h deleted file mode 100644 index acbce0fd658f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_DEVICE_PUBLIC_H_INCLUDED__ -#define __GP_DEVICE_PUBLIC_H_INCLUDED__ - -#include "system_types.h" - -typedef struct gp_device_state_s gp_device_state_t; - -/*! Read the state of GP_DEVICE[ID] - - \param ID[in] GP_DEVICE identifier - \param state[out] gp device state structure - - \return none, state = GP_DEVICE[ID].state - */ -extern void gp_device_get_state( - const gp_device_ID_t ID, - gp_device_state_t *state); - -/*! Write to a control register of GP_DEVICE[ID] - - \param ID[in] GP_DEVICE identifier - \param reg_addr[in] register byte address - \param value[in] The data to be written - - \return none, GP_DEVICE[ID].ctrl[reg] = value - */ -STORAGE_CLASS_GP_DEVICE_H void gp_device_reg_store( - const gp_device_ID_t ID, - const unsigned int reg_addr, - const hrt_data value); - -/*! Read from a control register of GP_DEVICE[ID] - - \param ID[in] GP_DEVICE identifier - \param reg_addr[in] register byte address - \param value[in] The data to be written - - \return GP_DEVICE[ID].ctrl[reg] - */ -STORAGE_CLASS_GP_DEVICE_H hrt_data gp_device_reg_load( - const gp_device_ID_t ID, - const hrt_address reg_addr); - -#endif /* __GP_DEVICE_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h deleted file mode 100644 index 276e2fa9b1e7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GP_TIMER_PUBLIC_H_INCLUDED__ -#define __GP_TIMER_PUBLIC_H_INCLUDED__ - -#include "system_types.h" - -/*! initialize mentioned timer -param ID timer_id -*/ -extern void -gp_timer_init(gp_timer_ID_t ID); - - -/*! read timer value for (platform selected)selected timer. -param ID timer_id - \return uint32_t 32 bit timer value -*/ -extern uint32_t -gp_timer_read(gp_timer_ID_t ID); - -#endif /* __GP_TIMER_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h deleted file mode 100644 index 82eaa0d48bee..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __GPIO_PUBLIC_H_INCLUDED__ -#define __GPIO_PUBLIC_H_INCLUDED__ - -#include "system_types.h" - -/*! Write to a control register of GPIO[ID] - - \param ID[in] GPIO identifier - \param reg_addr[in] register byte address - \param value[in] The data to be written - - \return none, GPIO[ID].ctrl[reg] = value - */ -STORAGE_CLASS_GPIO_H void gpio_reg_store( - const gpio_ID_t ID, - const unsigned int reg_addr, - const hrt_data value); - -/*! Read from a control register of GPIO[ID] - - \param ID[in] GPIO identifier - \param reg_addr[in] register byte address - \param value[in] The data to be written - - \return GPIO[ID].ctrl[reg] - */ -STORAGE_CLASS_GPIO_H hrt_data gpio_reg_load( - const gpio_ID_t ID, - const unsigned int reg_addr); - -#endif /* __GPIO_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h deleted file mode 100644 index 8538f86ab5e6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __HMEM_PUBLIC_H_INCLUDED__ -#define __HMEM_PUBLIC_H_INCLUDED__ - -#include /* size_t */ - -/*! Return the size of HMEM[ID] - - \param ID[in] HMEM identifier - - \Note: The size is the byte size of the area it occupies - in the address map. I.e. disregarding internal structure - - \return sizeof(HMEM[ID]) - */ -STORAGE_CLASS_HMEM_H size_t sizeof_hmem( - const hmem_ID_t ID); - -#endif /* __HMEM_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h deleted file mode 100644 index 98ee9947fb8e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IBUF_CTRL_PUBLIC_H_INCLUDED__ -#define __IBUF_CTRL_PUBLIC_H_INCLUDED__ - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -/***************************************************** - * - * Native command interface (NCI). - * - *****************************************************/ -/** - * @brief Get the ibuf-controller state. - * Get the state of the ibuf-controller regiester-set. - * - * @param[in] id The global unique ID of the input-buffer controller. - * @param[out] state Point to the register-state. - */ -STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_state( - const ibuf_ctrl_ID_t ID, - ibuf_ctrl_state_t *state); - -/** - * @brief Get the state of the ibuf-controller process. - * Get the state of the register set per buf-controller process. - * - * @param[in] id The global unique ID of the input-buffer controller. - * @param[in] proc_id The process ID. - * @param[out] state Point to the process state. - */ -STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state( - const ibuf_ctrl_ID_t ID, - const uint32_t proc_id, - ibuf_ctrl_proc_state_t *state); -/** - * @brief Dump the ibuf-controller state. - * Dump the state of the ibuf-controller regiester-set. - * - * @param[in] id The global unique ID of the input-buffer controller. - * @param[in] state Pointer to the register-state. - */ -STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state( - const ibuf_ctrl_ID_t ID, - ibuf_ctrl_state_t *state); -/* end of NCI */ - -/***************************************************** - * - * Device level interface (DLI). - * - *****************************************************/ -/** - * @brief Load the register value. - * Load the value of the register of the ibuf-controller. - * - * @param[in] ID The global unique ID for the ibuf-controller instance. - * @param[in] reg The offet address of the register. - * - * @return the value of the register. - */ -STORAGE_CLASS_IBUF_CTRL_H hrt_data ibuf_ctrl_reg_load( - const ibuf_ctrl_ID_t ID, - const hrt_address reg); - -/** - * @brief Store a value to the register. - * Store a value to the registe of the ibuf-controller. - * - * @param[in] ID The global unique ID for the ibuf-controller instance. - * @param[in] reg The offet address of the register. - * @param[in] value The value to be stored. - * - */ -STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store( - const ibuf_ctrl_ID_t ID, - const hrt_address reg, - const hrt_data value); -/* end of DLI */ - -#endif /* USE_INPUT_SYSTEM_VERSION_2401 */ -#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h deleted file mode 100644 index 2db70893daf9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_FORMATTER_PUBLIC_H_INCLUDED__ -#define __INPUT_FORMATTER_PUBLIC_H_INCLUDED__ - -#include -#include "system_types.h" - -/*! Reset INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - - \return none, reset(INPUT_FORMATTER[ID]) - */ -extern void input_formatter_rst( - const input_formatter_ID_t ID); - -/*! Set the blocking mode of INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - \param enable[in] blocking enable flag - - \use - - In HW, the capture unit will deliver an infinite stream of frames, - the input formatter will synchronise on the first SOF. In simulation - there are only a fixed number of frames, presented only once. By - enabling blocking the inputformatter will wait on the first presented - frame, thus avoiding race in the simulation setup. - - \return none, INPUT_FORMATTER[ID].blocking_mode = enable - */ -extern void input_formatter_set_fifo_blocking_mode( - const input_formatter_ID_t ID, - const bool enable); - -/*! Return the data alignment of INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - - \return alignment(INPUT_FORMATTER[ID].data) - */ -extern unsigned int input_formatter_get_alignment( - const input_formatter_ID_t ID); - -/*! Read the source switch state into INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - \param state[out] input formatter switch state structure - - \return none, state = INPUT_FORMATTER[ID].switch_state - */ -extern void input_formatter_get_switch_state( - const input_formatter_ID_t ID, - input_formatter_switch_state_t *state); - -/*! Read the control registers of INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - \param state[out] input formatter state structure - - \return none, state = INPUT_FORMATTER[ID].state - */ -extern void input_formatter_get_state( - const input_formatter_ID_t ID, - input_formatter_state_t *state); - -/*! Read the control registers of bin copy INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - \param state[out] input formatter state structure - - \return none, state = INPUT_FORMATTER[ID].state - */ -extern void input_formatter_bin_get_state( - const input_formatter_ID_t ID, - input_formatter_bin_state_t *state); - -/*! Write to a control register of INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - \param reg_addr[in] register byte address - \param value[in] The data to be written - - \return none, INPUT_FORMATTER[ID].ctrl[reg] = value - */ -STORAGE_CLASS_INPUT_FORMATTER_H void input_formatter_reg_store( - const input_formatter_ID_t ID, - const hrt_address reg_addr, - const hrt_data value); - -/*! Read from a control register of INPUT_FORMATTER[ID] - - \param ID[in] INPUT_FORMATTER identifier - \param reg_addr[in] register byte address - \param value[in] The data to be written - - \return INPUT_FORMATTER[ID].ctrl[reg] - */ -STORAGE_CLASS_INPUT_FORMATTER_H hrt_data input_formatter_reg_load( - const input_formatter_ID_t ID, - const unsigned int reg_addr); - -#endif /* __INPUT_FORMATTER_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h deleted file mode 100644 index 6e37ff0fe0f9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ -#define __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ - -#include -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -#include "isys_public.h" -#else - -typedef struct input_system_state_s input_system_state_t; -typedef struct receiver_state_s receiver_state_t; - -/*! Read the state of INPUT_SYSTEM[ID] - - \param ID[in] INPUT_SYSTEM identifier - \param state[out] input system state structure - - \return none, state = INPUT_SYSTEM[ID].state - */ -extern void input_system_get_state( - const input_system_ID_t ID, - input_system_state_t *state); - -/*! Read the state of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param state[out] receiver state structure - - \return none, state = RECEIVER[ID].state - */ -extern void receiver_get_state( - const rx_ID_t ID, - receiver_state_t *state); - -/*! Flag whether a MIPI format is YUV420 - - \param mipi_format[in] MIPI format - - \return mipi_format == YUV420 - */ -extern bool is_mipi_format_yuv420( - const mipi_format_t mipi_format); - -/*! Set compression parameters for cfg[cfg_ID] of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param cfg_ID[in] Configuration identifier - \param comp[in] Compression method - \param pred[in] Predictor method - - \NOTE: the storage of compression configuration is - implementation specific. The config can be - carried either on MIPI ports or on MIPI channels - - \return none, RECEIVER[ID].cfg[cfg_ID] = {comp, pred} - */ -extern void receiver_set_compression( - const rx_ID_t ID, - const unsigned int cfg_ID, - const mipi_compressor_t comp, - const mipi_predictor_t pred); - -/*! Enable PORT[port_ID] of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - \param cnd[in] irq predicate - - \return None, enable(RECEIVER[ID].PORT[port_ID]) - */ -extern void receiver_port_enable( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const bool cnd); - -/*! Flag if PORT[port_ID] of RECEIVER[ID] is enabled - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - - \return enable(RECEIVER[ID].PORT[port_ID]) == true - */ -extern bool is_receiver_port_enabled( - const rx_ID_t ID, - const enum mipi_port_id port_ID); - -/*! Enable the IRQ channels of PORT[port_ID] of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - \param irq_info[in] irq channels - - \return None, enable(RECEIVER[ID].PORT[port_ID].irq_info) - */ -extern void receiver_irq_enable( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const rx_irq_info_t irq_info); - -/*! Return the IRQ status of PORT[port_ID] of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - - \return RECEIVER[ID].PORT[port_ID].irq_info - */ -extern rx_irq_info_t receiver_get_irq_info( - const rx_ID_t ID, - const enum mipi_port_id port_ID); - -/*! Clear the IRQ status of PORT[port_ID] of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - \param irq_info[in] irq status - - \return None, clear(RECEIVER[ID].PORT[port_ID].irq_info) - */ -extern void receiver_irq_clear( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const rx_irq_info_t irq_info); - -/*! Write to a control register of INPUT_SYSTEM[ID] - - \param ID[in] INPUT_SYSTEM identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, INPUT_SYSTEM[ID].ctrl[reg] = value - */ -STORAGE_CLASS_INPUT_SYSTEM_H void input_system_reg_store( - const input_system_ID_t ID, - const hrt_address reg, - const hrt_data value); - -/*! Read from a control register of INPUT_SYSTEM[ID] - - \param ID[in] INPUT_SYSTEM identifier - \param reg[in] register index - \param value[in] The data to be written - - \return INPUT_SYSTEM[ID].ctrl[reg] - */ -STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_reg_load( - const input_system_ID_t ID, - const hrt_address reg); - -/*! Write to a control register of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, RECEIVER[ID].ctrl[reg] = value - */ -STORAGE_CLASS_INPUT_SYSTEM_H void receiver_reg_store( - const rx_ID_t ID, - const hrt_address reg, - const hrt_data value); - -/*! Read from a control register of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param reg[in] register index - \param value[in] The data to be written - - \return RECEIVER[ID].ctrl[reg] - */ -STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_reg_load( - const rx_ID_t ID, - const hrt_address reg); - -/*! Write to a control register of PORT[port_ID] of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, RECEIVER[ID].PORT[port_ID].ctrl[reg] = value - */ -STORAGE_CLASS_INPUT_SYSTEM_H void receiver_port_reg_store( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const hrt_address reg, - const hrt_data value); - -/*! Read from a control register PORT[port_ID] of of RECEIVER[ID] - - \param ID[in] RECEIVER identifier - \param port_ID[in] mipi PORT identifier - \param reg[in] register index - \param value[in] The data to be written - - \return RECEIVER[ID].PORT[port_ID].ctrl[reg] - */ -STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_port_reg_load( - const rx_ID_t ID, - const enum mipi_port_id port_ID, - const hrt_address reg); - -/*! Write to a control register of SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID] - - \param ID[in] INPUT_SYSTEM identifier - \param port_ID[in] sub system identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg] = value - */ -STORAGE_CLASS_INPUT_SYSTEM_H void input_system_sub_system_reg_store( - const input_system_ID_t ID, - const sub_system_ID_t sub_ID, - const hrt_address reg, - const hrt_data value); - -/*! Read from a control register SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID] - - \param ID[in] INPUT_SYSTEM identifier - \param port_ID[in] sub system identifier - \param reg[in] register index - \param value[in] The data to be written - - \return INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg] - */ -STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_sub_system_reg_load( - const input_system_ID_t ID, - const sub_system_ID_t sub_ID, - const hrt_address reg); - - - -/////////////////////////////////////////////////////////////////////////// -// -// Functions for configuration phase on input system. -// -/////////////////////////////////////////////////////////////////////////// - -// Function that resets current configuration. -// remove the argument since it should be private. -input_system_error_t input_system_configuration_reset(void); - -// Function that commits current configuration. -// remove the argument since it should be private. -input_system_error_t input_system_configuration_commit(void); - -/////////////////////////////////////////////////////////////////////////// -// -// User functions: -// (encoded generic function) -// - no checking -// - decoding name and agruments into the generic (channel) configuration -// function. -// -/////////////////////////////////////////////////////////////////////////// - - -// FIFO channel config function user - -input_system_error_t input_system_csi_fifo_channel_cfg( - uint32_t ch_id, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - target_cfg2400_t target -); - -input_system_error_t input_system_csi_fifo_channel_with_counting_cfg( - uint32_t ch_id, - uint32_t nof_frame, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t mem_region_size, - uint32_t nof_mem_regions, - target_cfg2400_t target -); - - -// SRAM channel config function user - -input_system_error_t input_system_csi_sram_channel_cfg( - uint32_t ch_id, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t csi_mem_region_size, - uint32_t csi_nof_mem_regions, - target_cfg2400_t target -); - - -//XMEM channel config function user - -input_system_error_t input_system_csi_xmem_channel_cfg( - uint32_t ch_id, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t mem_region_size, - uint32_t nof_mem_regions, - uint32_t acq_mem_region_size, - uint32_t acq_nof_mem_regions, - target_cfg2400_t target, - uint32_t nof_xmem_buffers -); - -input_system_error_t input_system_csi_xmem_capture_only_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, - input_system_csi_port_t port, - uint32_t csi_mem_region_size, - uint32_t csi_nof_mem_regions, - uint32_t acq_mem_region_size, - uint32_t acq_nof_mem_regions, - target_cfg2400_t target -); - -input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, - input_system_csi_port_t port, - backend_channel_cfg_t backend_ch, - uint32_t acq_mem_region_size, - uint32_t acq_nof_mem_regions, - target_cfg2400_t target -); - -// Non - CSI channel config function user - -input_system_error_t input_system_prbs_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, - uint32_t seed, - uint32_t sync_gen_width, - uint32_t sync_gen_height, - uint32_t sync_gen_hblank_cycles, - uint32_t sync_gen_vblank_cycles, - target_cfg2400_t target -); - - -input_system_error_t input_system_tpg_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames,//not used yet - uint32_t x_mask, - uint32_t y_mask, - uint32_t x_delta, - uint32_t y_delta, - uint32_t xy_mask, - uint32_t sync_gen_width, - uint32_t sync_gen_height, - uint32_t sync_gen_hblank_cycles, - uint32_t sync_gen_vblank_cycles, - target_cfg2400_t target -); - - -input_system_error_t input_system_gpfifo_channel_cfg( - uint32_t ch_id, - uint32_t nof_frames, - target_cfg2400_t target -); -#endif /* #ifdef USE_INPUT_SYSTEM_VERSION_2401 */ - -#endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h deleted file mode 100644 index 9aeaf8f082d2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IRQ_PUBLIC_H_INCLUDED__ -#define __IRQ_PUBLIC_H_INCLUDED__ - -#include -#include "system_types.h" - -/*! Read the control registers of IRQ[ID] - - \param ID[in] IRQ identifier - \param state[out] irq controller state structure - - \return none, state = IRQ[ID].state - */ -extern void irq_controller_get_state( - const irq_ID_t ID, - irq_controller_state_t *state); - -/*! Write to a control register of IRQ[ID] - - \param ID[in] IRQ identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, IRQ[ID].ctrl[reg] = value - */ -STORAGE_CLASS_IRQ_H void irq_reg_store( - const irq_ID_t ID, - const unsigned int reg, - const hrt_data value); - -/*! Read from a control register of IRQ[ID] - - \param ID[in] IRQ identifier - \param reg[in] register index - \param value[in] The data to be written - - \return IRQ[ID].ctrl[reg] - */ -STORAGE_CLASS_IRQ_H hrt_data irq_reg_load( - const irq_ID_t ID, - const unsigned int reg); - -/*! Enable an IRQ channel of IRQ[ID] with a mode - - \param ID[in] IRQ (device) identifier - \param irq[in] IRQ (channel) identifier - - \return none, enable(IRQ[ID].channel[irq_ID]) - */ -extern void irq_enable_channel( - const irq_ID_t ID, - const unsigned int irq_ID); - -/*! Enable pulse interrupts for IRQ[ID] with a mode - - \param ID[in] IRQ (device) identifier - \param enable enable/disable pulse interrupts - - \return none - */ -extern void irq_enable_pulse( - const irq_ID_t ID, - bool pulse); - -/*! Disable an IRQ channel of IRQ[ID] - - \param ID[in] IRQ (device) identifier - \param irq[in] IRQ (channel) identifier - - \return none, disable(IRQ[ID].channel[irq_ID]) - */ -extern void irq_disable_channel( - const irq_ID_t ID, - const unsigned int irq); - -/*! Clear the state of all IRQ channels of IRQ[ID] - - \param ID[in] IRQ (device) identifier - - \return none, clear(IRQ[ID].channel[]) - */ -extern void irq_clear_all( - const irq_ID_t ID); - -/*! Return the ID of a signalling IRQ channel of IRQ[ID] - - \param ID[in] IRQ (device) identifier - \param irq_id[out] active IRQ (channel) identifier - - \Note: This function operates as strtok(), based on the return - state the user is informed if there are additional signalling - channels - - \return state(IRQ[ID]) - */ -extern enum hrt_isp_css_irq_status irq_get_channel_id( - const irq_ID_t ID, - unsigned int *irq_id); - -/*! Raise an interrupt on channel irq_id of device IRQ[ID] - - \param ID[in] IRQ (device) identifier - \param irq_id[in] IRQ (channel) identifier - - \return none, signal(IRQ[ID].channel[irq_id]) - */ -extern void irq_raise( - const irq_ID_t ID, - const irq_sw_channel_id_t irq_id); - -/*! Test if any IRQ channel of the virtual super IRQ has raised a signal - - \return any(VIRQ.channel[irq_ID] != 0) - */ -extern bool any_virq_signal(void); - -/*! Enable an IRQ channel of the virtual super IRQ - - \param irq[in] IRQ (channel) identifier - \param en[in] predicate channel enable - - \return none, VIRQ.channel[irq_ID].enable = en - */ -extern void cnd_virq_enable_channel( - const virq_id_t irq_ID, - const bool en); - -/*! Clear the state of all IRQ channels of the virtual super IRQ - - \return none, clear(VIRQ.channel[]) - */ -extern void virq_clear_all(void); - -/*! Clear the IRQ info state of the virtual super IRQ - - \param irq_info[in/out] The IRQ (channel) state - - \return none - */ -extern void virq_clear_info( - virq_info_t *irq_info); - -/*! Return the ID of a signalling IRQ channel of the virtual super IRQ - - \param irq_id[out] active IRQ (channel) identifier - - \Note: This function operates as strtok(), based on the return - state the user is informed if there are additional signalling - channels - - \return state(IRQ[...]) - */ -extern enum hrt_isp_css_irq_status virq_get_channel_id( - virq_id_t *irq_id); - -/*! Return the IDs of all signaling IRQ channels of the virtual super IRQ - - \param irq_info[out] all active IRQ (channel) identifiers - - \Note: Unlike "irq_get_channel_id()" this function returns all - channel signaling info. The new info is OR'd with the current - info state. N.B. this is the same as repeatedly calling the function - "irq_get_channel_id()" in a (non-blocked) handler routine - - \return (error(state(IRQ[...])) - */ -extern enum hrt_isp_css_irq_status virq_get_channel_signals( - virq_info_t *irq_info); - -#endif /* __IRQ_PUBLIC_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h deleted file mode 100644 index 808ec050efc0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISP_PUBLIC_H_INCLUDED__ -#define __ISP_PUBLIC_H_INCLUDED__ - -#include -#include "system_types.h" - -/*! Enable or disable the program complete irq signal of ISP[ID] - - \param ID[in] SP identifier - \param cnd[in] predicate - - \return none, if(cnd) enable(ISP[ID].irq) else disable(ISP[ID].irq) - */ -extern void cnd_isp_irq_enable( - const isp_ID_t ID, - const bool cnd); - -/*! Read the state of cell ISP[ID] - - \param ID[in] ISP identifier - \param state[out] isp state structure - \param stall[out] isp stall conditions - - \return none, state = ISP[ID].state, stall = ISP[ID].stall - */ -extern void isp_get_state( - const isp_ID_t ID, - isp_state_t *state, - isp_stall_t *stall); - - -/*! Write to the status and control register of ISP[ID] - - \param ID[in] ISP identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, ISP[ID].sc[reg] = value - */ -STORAGE_CLASS_ISP_H void isp_ctrl_store( - const isp_ID_t ID, - const unsigned int reg, - const hrt_data value); - -/*! Read from the status and control register of ISP[ID] - - \param ID[in] ISP identifier - \param reg[in] register index - \param value[in] The data to be written - - \return ISP[ID].sc[reg] - */ -STORAGE_CLASS_ISP_H hrt_data isp_ctrl_load( - const isp_ID_t ID, - const unsigned int reg); - -/*! Get the status of a bitfield in the control register of ISP[ID] - - \param ID[in] ISP identifier - \param reg[in] register index - \param bit[in] The bit index to be checked - - \return (ISP[ID].sc[reg] & (1< -#include "system_types.h" - -typedef struct sp_state_s sp_state_t; -typedef struct sp_stall_s sp_stall_t; - -/*! Enable or disable the program complete irq signal of SP[ID] - - \param ID[in] SP identifier - \param cnd[in] predicate - - \return none, if(cnd) enable(SP[ID].irq) else disable(SP[ID].irq) - */ -extern void cnd_sp_irq_enable( - const sp_ID_t ID, - const bool cnd); - -/*! Read the state of cell SP[ID] - - \param ID[in] SP identifier - \param state[out] sp state structure - \param stall[out] isp stall conditions - - \return none, state = SP[ID].state, stall = SP[ID].stall - */ -extern void sp_get_state( - const sp_ID_t ID, - sp_state_t *state, - sp_stall_t *stall); - -/*! Write to the status and control register of SP[ID] - - \param ID[in] SP identifier - \param reg[in] register index - \param value[in] The data to be written - - \return none, SP[ID].sc[reg] = value - */ -STORAGE_CLASS_SP_H void sp_ctrl_store( - const sp_ID_t ID, - const hrt_address reg, - const hrt_data value); - -/*! Read from the status and control register of SP[ID] - - \param ID[in] SP identifier - \param reg[in] register index - \param value[in] The data to be written - - \return SP[ID].sc[reg] - */ -STORAGE_CLASS_SP_H hrt_data sp_ctrl_load( - const sp_ID_t ID, - const hrt_address reg); - -/*! Get the status of a bitfield in the control register of SP[ID] - - \param ID[in] SP identifier - \param reg[in] register index - \param bit[in] The bit index to be checked - - \return (SP[ID].sc[reg] & (1< -#include - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - -#ifndef __INLINE_ISYS2401_IRQ__ - -#define STORAGE_CLASS_ISYS2401_IRQ_H extern -#define STORAGE_CLASS_ISYS2401_IRQ_C extern -#include "isys_irq_public.h" - -#else /* __INLINE_ISYS2401_IRQ__ */ - -#define STORAGE_CLASS_ISYS2401_IRQ_H static inline -#define STORAGE_CLASS_ISYS2401_IRQ_C static inline -#include "isys_irq_private.h" - -#endif /* __INLINE_ISYS2401_IRQ__ */ - -#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -#endif /* __IA_CSS_ISYS_IRQ_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h deleted file mode 100644 index 16fbf9d25eba..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __ISYS_STREAM2MMIO_H_INCLUDED__ -#define __ISYS_STREAM2MMIO_H_INCLUDED__ - - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & - * inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "isys_stream2mmio_local.h" - -#ifndef __INLINE_STREAM2MMIO__ -#define STORAGE_CLASS_STREAM2MMIO_H extern -#define STORAGE_CLASS_STREAM2MMIO_C -#include "isys_stream2mmio_public.h" -#else /* __INLINE_STREAM2MMIO__ */ -#define STORAGE_CLASS_STREAM2MMIO_H static inline -#define STORAGE_CLASS_STREAM2MMIO_C static inline -#include "isys_stream2mmio_private.h" -#endif /* __INLINE_STREAM2MMIO__ */ - -#endif /* __ISYS_STREAM2MMIO_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h deleted file mode 100644 index 7c52ba54fcf1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __MATH_SUPPORT_H -#define __MATH_SUPPORT_H - -#include /* Override the definition of max/min from linux kernel*/ - -#if defined(_MSC_VER) -#include /* Override the definition of max/min from stdlib.h*/ -#endif /* _MSC_VER */ - -/* in case we have min/max/MIN/MAX macro's undefine them */ -#ifdef min -#undef min -#endif -#ifdef max -#undef max -#endif -#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ -#undef MIN -#endif -#ifdef MAX -#undef MAX -#endif -#ifdef ABS -#undef ABS -#endif - -#define IS_ODD(a) ((a) & 0x1) -#define IS_EVEN(a) (!IS_ODD(a)) - -/* force a value to a lower even value */ -#define EVEN_FLOOR(x) ((x) & ~1) - -#ifdef ISP2401 -/* If the number is odd, find the next even number */ -#define EVEN_CEIL(x) ((IS_ODD(x)) ? ((x) + 1) : (x)) - -#endif -/* A => B */ -#define IMPLIES(a, b) (!(a) || (b)) - -#define ABS(a) ((a) >= 0 ? (a) : -(a)) - -/* for preprocessor and array sizing use MIN and MAX - otherwise use min and max */ -#define MAX(a, b) (((a) > (b)) ? (a) : (b)) -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) -#ifdef ISP2401 -#define ROUND_DIV(a, b) (((b) != 0) ? ((a) + ((b) >> 1)) / (b) : 0) -#endif -#define CEIL_DIV(a, b) (((b) != 0) ? ((a) + (b) - 1) / (b) : 0) -#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) -#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) -#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1)>>(b)) -#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) -#ifdef ISP2401 -#define ROUND_HALF_DOWN_DIV(a, b) (((b) != 0) ? ((a) + (b / 2) - 1) / (b) : 0) -#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) -#endif - - -/*To Find next power of 2 number from x */ -#define bit2(x) ((x) | ((x) >> 1)) -#define bit4(x) (bit2(x) | (bit2(x) >> 2)) -#define bit8(x) (bit4(x) | (bit4(x) >> 4)) -#define bit16(x) (bit8(x) | (bit8(x) >> 8)) -#define bit32(x) (bit16(x) | (bit16(x) >> 16)) -#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) - - -/* min and max should not be macros as they will evaluate their arguments twice. - if you really need a macro (e.g. for CPP or for initializing an array) - use MIN() and MAX(), otherwise use min() and max(). - - -*/ - -#if !defined(PIPE_GENERATION) - -#ifndef INLINE_MATH_SUPPORT_UTILS -/* -This macro versions are added back as we are mixing types in usage of inline. -This causes corner cases of calculations to be incorrect due to conversions -between signed and unsigned variables or overflows. -Before the addition of the inline functions, max, min and ceil_div were macros -and therefore adding them back. - -Leaving out the other math utility functions as they are newly added -*/ - -#define max(a, b) (MAX(a, b)) -#define min(a, b) (MIN(a, b)) -#define ceil_div(a, b) (CEIL_DIV(a, b)) - -#else /* !defined(INLINE_MATH_SUPPORT_UTILS) */ - -static inline int max(int a, int b) -{ - return MAX(a, b); -} - -static inline int min(int a, int b) -{ - return MIN(a, b); -} - -static inline unsigned int ceil_div(unsigned int a, unsigned int b) -{ - return CEIL_DIV(a, b); -} -#endif /* !defined(INLINE_MATH_SUPPORT_UTILS) */ - -static inline unsigned int umax(unsigned int a, unsigned int b) -{ - return MAX(a, b); -} - -static inline unsigned int umin(unsigned int a, unsigned int b) -{ - return MIN(a, b); -} - - -static inline unsigned int ceil_mul(unsigned int a, unsigned int b) -{ - return CEIL_MUL(a, b); -} - -static inline unsigned int ceil_mul2(unsigned int a, unsigned int b) -{ - return CEIL_MUL2(a, b); -} - -static inline unsigned int ceil_shift(unsigned int a, unsigned int b) -{ - return CEIL_SHIFT(a, b); -} - -static inline unsigned int ceil_shift_mul(unsigned int a, unsigned int b) -{ - return CEIL_SHIFT_MUL(a, b); -} - -#ifdef ISP2401 -static inline unsigned int round_half_down_div(unsigned int a, unsigned int b) -{ - return ROUND_HALF_DOWN_DIV(a, b); -} - -static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b) -{ - return ROUND_HALF_DOWN_MUL(a, b); -} -#endif - -/* @brief Next Power of Two - * - * @param[in] unsigned number - * - * @return next power of two - * - * This function rounds input to the nearest power of 2 (2^x) - * towards infinity - * - * Input Range: 0 .. 2^(8*sizeof(int)-1) - * - * IF input is a power of 2 - * out = in - * OTHERWISE - * out = 2^(ceil(log2(in)) - * - */ - -static inline unsigned int ceil_pow2(unsigned int a) -{ - if (a == 0) { - return 1; - } - /* IF input is already a power of two*/ - else if ((!((a)&((a)-1)))) { - return a; - } - else { - unsigned int v = a; - v |= v>>1; - v |= v>>2; - v |= v>>4; - v |= v>>8; - v |= v>>16; - return (v+1); - } -} - -#endif /* !defined(PIPE_GENERATION) */ - -#if !defined(__ISP) -/* - * For SP and ISP, SDK provides the definition of OP_std_modadd. - * We need it only for host - */ -#define OP_std_modadd(base, offset, size) ((base+offset)%(size)) -#endif /* !defined(__ISP) */ - - -#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h deleted file mode 100644 index d2387812f3a6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015-2017, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __MEMORY_ACCESS_H_INCLUDED__ -#define __MEMORY_ACCESS_H_INCLUDED__ - -/*! - * \brief - * Define the public interface for virtual memory - * access functions. Access types are limited to - * those defined in - * - * The address representation is private to the system - * and represented as "hrt_vaddress" rather than a - * pointer, as the memory allocation cannot be accessed - * by dereferencing but reaquires load and store access - * functions - * - * The page table selection or virtual memory context; - * The page table base index; Is implicit. This page - * table base index must be set by the implementation - * of the access function - * - * "store" is a transfer to the system - * "load" is a transfer from the system - * - * Allocation properties can be specified by setting - * attributes (see below) in case of multiple physical - * memories the memory ID is encoded on the attribute - * - * Allocations in the same physical memory, but in a - * different (set of) page tables can be shared through - * a page table information mapping function - */ - -#include -#include "platform_support.h" /* for __func__ */ - -/* - * User provided file that defines the (sub)system address types: - * - hrt_vaddress a type that can hold the (sub)system virtual address range - */ -#include "system_types.h" - -/* - * The MMU base address is a physical address, thus the same type is used - * as for the device base address - */ -#include "device_access.h" - -#include "hmm/hmm.h" - -/*! - * \brief - * Bit masks for specialised allocation functions - * the default is "uncached", "not contiguous", - * "not page aligned" and "not cleared" - * - * Forcing alignment (usually) returns a pointer - * at an alignment boundary that is offset from - * the allocated pointer. Without storing this - * pointer/offset, we cannot free it. The memory - * manager is responsible for the bookkeeping, e.g. - * the allocation function creates a sentinel - * within the allocation referencable from the - * returned pointer/address. - */ -#define MMGR_ATTRIBUTE_MASK 0x000f -#define MMGR_ATTRIBUTE_CACHED 0x0001 -#define MMGR_ATTRIBUTE_CONTIGUOUS 0x0002 -#define MMGR_ATTRIBUTE_PAGEALIGN 0x0004 -#define MMGR_ATTRIBUTE_CLEARED 0x0008 -#define MMGR_ATTRIBUTE_UNUSED 0xfff0 - -/* #define MMGR_ATTRIBUTE_DEFAULT (MMGR_ATTRIBUTE_CACHED) */ -#define MMGR_ATTRIBUTE_DEFAULT 0 - -extern const hrt_vaddress mmgr_NULL; -extern const hrt_vaddress mmgr_EXCEPTION; - -/*! Return the address of an allocation in memory - - \param size[in] Size in bytes of the allocation - \param caller_func[in] Caller function name - \param caller_line[in] Caller function line number - - \return vaddress - */ -extern hrt_vaddress mmgr_malloc(const size_t size); - -/*! Return the address of a zero initialised allocation in memory - - \param N[in] Horizontal dimension of array - \param size[in] Vertical dimension of array Total size is N*size - - \return vaddress - */ -extern hrt_vaddress mmgr_calloc(const size_t N, const size_t size); - -/*! Return the address of an allocation in memory - - \param size[in] Size in bytes of the allocation - \param attribute[in] Bit vector specifying the properties - of the allocation including zero initialisation - - \return vaddress - */ - -extern hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attribute); - -/*! Return the address of a mapped existing allocation in memory - - \param ptr[in] Pointer to an allocation in a different - virtual memory page table, but the same - physical memory - \param size[in] Size of the memory of the pointer - \param attribute[in] Bit vector specifying the properties - of the allocation - \param context Pointer of a context provided by - client/driver for additonal parameters - needed by the implementation - \Note - This interface is tentative, limited to the desired function - the actual interface may require furhter parameters - - \return vaddress - */ -extern hrt_vaddress mmgr_mmap( - const void __user *ptr, - const size_t size, - uint16_t attribute, - void *context); - -/*! Zero initialise an allocation in memory - - \param vaddr[in] Address of an allocation - \param size[in] Size in bytes of the area to be cleared - - \return none - */ -extern void mmgr_clear(hrt_vaddress vaddr, const size_t size); - -/*! Read an array of bytes from a virtual memory address - - \param vaddr[in] Address of an allocation - \param data[out] pointer to the destination array - \param size[in] number of bytes to read - - \return none - */ -extern void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size); - -/*! Write an array of bytes to device registers or memory in the device - - \param vaddr[in] Address of an allocation - \param data[in] pointer to the source array - \param size[in] number of bytes to write - - \return none - */ -extern void mmgr_store(const hrt_vaddress vaddr, const void *data, const size_t size); - -#endif /* __MEMORY_ACCESS_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h deleted file mode 100644 index f3b7273fed1b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h +++ /dev/null @@ -1,38 +0,0 @@ -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#ifndef __MEMORY_REALLOC_H_INCLUDED__ -#define __MEMORY_REALLOC_H_INCLUDED__ - -/*! - * \brief - * Define the internal reallocation of private css memory - * - */ - -#include -/* - * User provided file that defines the (sub)system address types: - * - hrt_vaddress a type that can hold the (sub)system virtual address range - */ -#include "system_types.h" -#include "ia_css_err.h" - -bool reallocate_buffer( - hrt_vaddress *curr_buf, - size_t *curr_size, - size_t needed_size, - bool force, - enum ia_css_err *err); - -#endif /*__MEMORY_REALLOC_H_INCLUDED__*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h deleted file mode 100644 index 38db1ecef3c8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __MISC_SUPPORT_H_INCLUDED__ -#define __MISC_SUPPORT_H_INCLUDED__ - -/* suppress compiler warnings on unused variables */ -#ifndef NOT_USED -#define NOT_USED(a) ((void)(a)) -#endif - -/* Calculate the total bytes for pow(2) byte alignment */ -#define tot_bytes_for_pow2_align(pow2, cur_bytes) ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) - -#endif /* __MISC_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h deleted file mode 100644 index 8f6f1dc40095..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __MMU_DEVICE_H_INCLUDED__ -#define __MMU_DEVICE_H_INCLUDED__ - -/* The file mmu.h already exists */ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the MMU device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "mmu_local.h" - -#include "mmu_public.h" - -#endif /* __MMU_DEVICE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h deleted file mode 100644 index 418d02382d76..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __PIXELGEN_H_INCLUDED__ -#define __PIXELGEN_H_INCLUDED__ - - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & - * inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "pixelgen_local.h" - -#ifndef __INLINE_PIXELGEN__ -#define STORAGE_CLASS_PIXELGEN_H extern -#define STORAGE_CLASS_PIXELGEN_C -#include "pixelgen_public.h" -#else /* __INLINE_PIXELGEN__ */ -#define STORAGE_CLASS_PIXELGEN_H static inline -#define STORAGE_CLASS_PIXELGEN_C static inline -#include "pixelgen_private.h" -#endif /* __INLINE_PIXELGEN__ */ - -#endif /* __PIXELGEN_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h deleted file mode 100644 index 39a125ba563d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __PLATFORM_SUPPORT_H_INCLUDED__ -#define __PLATFORM_SUPPORT_H_INCLUDED__ - -/** -* @file -* Platform specific includes and functionality. -*/ - -#include -#include -#include - -/* For definition of hrt_sleep() */ -#include "hive_isp_css_custom_host_hrt.h" - -#define UINT16_MAX USHRT_MAX -#define UINT32_MAX UINT_MAX -#define UCHAR_MAX (255) - -#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) - -/* - * Put here everything __KERNEL__ specific not covered in - * "assert_support.h", "math_support.h", etc - */ - -#endif /* __PLATFORM_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h deleted file mode 100644 index 37e8116b74a4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __PRINT_SUPPORT_H_INCLUDED__ -#define __PRINT_SUPPORT_H_INCLUDED__ - - -#include - -extern int (*sh_css_printf) (const char *fmt, va_list args); -/* depends on host supplied print function in ia_css_init() */ -static inline void ia_css_print(const char *fmt, ...) -{ - va_list ap; - if (sh_css_printf) { - va_start(ap, fmt); - sh_css_printf(fmt, ap); - va_end(ap); - } -} - -/* Start adding support for bxt tracing functions for poc. From - * bxt_sandbox/support/print_support.h. */ -/* TODO: support these macros in userspace. */ -#define PWARN(format, ...) ia_css_print("warning: ", ##__VA_ARGS__) -#define PRINT(format, ...) ia_css_print(format, ##__VA_ARGS__) -#define PERROR(format, ...) ia_css_print("error: " format, ##__VA_ARGS__) -#define PDEBUG(format, ...) ia_css_print("debug: " format, ##__VA_ARGS__) - -#endif /* __PRINT_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h deleted file mode 100644 index aa5fadf5aadb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __QUEUE_H_INCLUDED__ -#define __QUEUE_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and is system agnostic - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - system and cell agnostic interfaces, constants and identifiers - * - public: cell specific interfaces - * - private: cell specific inline implementations - * - global: inter cell constants and identifiers - * - local: cell specific constants and identifiers - * - */ - - -#include "queue_local.h" - -#ifndef __INLINE_QUEUE__ -#define STORAGE_CLASS_QUEUE_H extern -#define STORAGE_CLASS_QUEUE_C -/* #include "queue_public.h" */ -#include "ia_css_queue.h" -#else /* __INLINE_QUEUE__ */ -#define STORAGE_CLASS_QUEUE_H static inline -#define STORAGE_CLASS_QUEUE_C static inline -#include "queue_private.h" -#endif /* __INLINE_QUEUE__ */ - -#endif /* __QUEUE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h deleted file mode 100644 index bd9f53e6b680..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __RESOURCE_H_INCLUDED__ -#define __RESOURCE_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses a RESOURCE manager. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - * - */ - - -#include "system_local.h" -#include "resource_local.h" - -#ifndef __INLINE_RESOURCE__ -#define STORAGE_CLASS_RESOURCE_H extern -#define STORAGE_CLASS_RESOURCE_C -#include "resource_public.h" -#else /* __INLINE_RESOURCE__ */ -#define STORAGE_CLASS_RESOURCE_H static inline -#define STORAGE_CLASS_RESOURCE_C static inline -#include "resource_private.h" -#endif /* __INLINE_RESOURCE__ */ - -#endif /* __RESOURCE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h deleted file mode 100644 index 43cfb0cb4aa8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SOCKET_H_INCLUDED__ -#define __SOCKET_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the DMA device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - * - */ - - -#include "system_local.h" -#include "socket_local.h" - -#ifndef __INLINE_SOCKET__ -#define STORAGE_CLASS_SOCKET_H extern -#define STORAGE_CLASS_SOCKET_C -#include "socket_public.h" -#else /* __INLINE_SOCKET__ */ -#define STORAGE_CLASS_SOCKET_H static inline -#define STORAGE_CLASS_SOCKET_C static inline -#include "socket_private.h" -#endif /* __INLINE_SOCKET__ */ - -#endif /* __SOCKET_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h deleted file mode 100644 index 8f57f2060791..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SP_H_INCLUDED__ -#define __SP_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the SP cell. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "sp_local.h" - -#ifndef __INLINE_SP__ -#define STORAGE_CLASS_SP_H extern -#define STORAGE_CLASS_SP_C -#include "sp_public.h" -#else /* __INLINE_SP__ */ -#define STORAGE_CLASS_SP_H static inline -#define STORAGE_CLASS_SP_C static inline -#include "sp_private.h" -#endif /* __INLINE_SP__ */ - -#endif /* __SP_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h deleted file mode 100644 index f4d9674cdab6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __STRING_SUPPORT_H_INCLUDED__ -#define __STRING_SUPPORT_H_INCLUDED__ -#include -#include - -#if !defined(_MSC_VER) -/* - * For all non microsoft cases, we need the following functions - */ - - -/* @brief Copy from src_buf to dest_buf. - * - * @param[out] dest_buf. Destination buffer to copy to - * @param[in] dest_size. The size of the destination buffer in bytes - * @param[in] src_buf. The source buffer - * @param[in] src_size. The size of the source buffer in bytes - * @return 0 on success, error code on failure - * @return EINVAL on Invalid arguments - * @return ERANGE on Destination size too small - */ -static inline int memcpy_s( - void* dest_buf, - size_t dest_size, - const void* src_buf, - size_t src_size) -{ - if ((src_buf == NULL) || (dest_buf == NULL)) { - /* Invalid arguments*/ - return EINVAL; - } - - if ((dest_size < src_size) || (src_size == 0)) { - /* Destination too small*/ - return ERANGE; - } - - memcpy(dest_buf, src_buf, src_size); - return 0; -} - -/* @brief Get the length of the string, excluding the null terminator - * - * @param[in] src_str. The source string - * @param[in] max_len. Look only for max_len bytes in the string - * @return Return the string length excluding null character - * @return Return max_len if no null character in the first max_len bytes - * @return Returns 0 if src_str is NULL - */ -static size_t strnlen_s( - const char* src_str, - size_t max_len) -{ - size_t ix; - if (src_str == NULL) { - /* Invalid arguments*/ - return 0; - } - - for (ix = 0; ix < max_len && src_str[ix] != '\0'; ix++) - ; - - /* On Error, it will return src_size == max_len*/ - return ix; -} - -/* @brief Copy string from src_str to dest_str - * - * @param[out] dest_str. Destination buffer to copy to - * @param[in] dest_size. The size of the destination buffer in bytes - * @param[in] src_str. The source buffer - * @param[in] src_size. The size of the source buffer in bytes - * @return Returns 0 on success - * @return Returns EINVAL on invalid arguments - * @return Returns ERANGE on destination size too small - */ -static inline int strncpy_s( - char* dest_str, - size_t dest_size, - const char* src_str, - size_t src_size) -{ - size_t len; - if (dest_str == NULL) { - /* Invalid arguments*/ - return EINVAL; - } - - if ((src_str == NULL) || (dest_size == 0)) { - /* Invalid arguments*/ - dest_str[0] = '\0'; - return EINVAL; - } - - len = strnlen_s(src_str, src_size); - - if (len >= dest_size) { - /* Destination too small*/ - dest_str[0] = '\0'; - return ERANGE; - } - - /* dest_str is big enough for the len */ - strncpy(dest_str, src_str, len); - dest_str[len] = '\0'; - return 0; -} - -/* @brief Copy string from src_str to dest_str - * - * @param[out] dest_str. Destination buffer to copy to - * @param[in] dest_size. The size of the destination buffer in bytes - * @param[in] src_str. The source buffer - * @return Returns 0 on success - * @return Returns EINVAL on invalid arguments - * @return Returns ERANGE on destination size too small - */ -static inline int strcpy_s( - char* dest_str, - size_t dest_size, - const char* src_str) -{ - size_t len; - if (dest_str == NULL) { - /* Invalid arguments*/ - return EINVAL; - } - - if ((src_str == NULL) || (dest_size == 0)) { - /* Invalid arguments*/ - dest_str[0] = '\0'; - return EINVAL; - } - - len = strnlen_s(src_str, dest_size); - - if (len >= dest_size) { - /* Destination too small*/ - dest_str[0] = '\0'; - return ERANGE; - } - - /* dest_str is big enough for the len */ - strncpy(dest_str, src_str, len); - dest_str[len] = '\0'; - return 0; -} - -#endif /*!defined(_MSC_VER)*/ - -#endif /* __STRING_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h deleted file mode 100644 index a8c19cee17da..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#ifndef __SYSTEM_TYPES_H_INCLUDED__ -#define __SYSTEM_TYPES_H_INCLUDED__ - -/** -* @file -* Platform specific types. -*/ - - -#include "system_local.h" - -#endif /* __SYSTEM_TYPES_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h deleted file mode 100644 index ace695643369..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TAG_H_INCLUDED__ -#define __TAG_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and is system agnostic - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: cell specific interfaces - * - private: cell specific inline implementations - * - global: inter cell constants and identifiers - * - local: cell specific constants and identifiers - * - */ - - -#include "tag_local.h" - -#ifndef __INLINE_TAG__ -#define STORAGE_CLASS_TAG_H extern -#define STORAGE_CLASS_TAG_C -#include "tag_public.h" -#else /* __INLINE_TAG__ */ -#define STORAGE_CLASS_TAG_H static inline -#define STORAGE_CLASS_TAG_C static inline -#include "tag_private.h" -#endif /* __INLINE_TAG__ */ - -#endif /* __TAG_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h deleted file mode 100644 index f6bc1c47553f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TIMED_CTRL_H_INCLUDED__ -#define __TIMED_CTRL_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the input system device(s). It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "timed_ctrl_local.h" - -#ifndef __INLINE_TIMED_CTRL__ -#define STORAGE_CLASS_TIMED_CTRL_H extern -#define STORAGE_CLASS_TIMED_CTRL_C -#include "timed_ctrl_public.h" -#else /* __INLINE_TIMED_CTRL__ */ -#define STORAGE_CLASS_TIMED_CTRL_H static inline -#define STORAGE_CLASS_TIMED_CTRL_C static inline -#include "timed_ctrl_private.h" -#endif /* __INLINE_TIMED_CTRL__ */ - -#endif /* __TIMED_CTRL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h deleted file mode 100644 index bc77537fa73a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TYPE_SUPPORT_H_INCLUDED__ -#define __TYPE_SUPPORT_H_INCLUDED__ - -/** -* @file -* Platform specific types. -* -* Per the DLI spec, types are in "type_support.h" and -* "platform_support.h" is for unclassified/to be refactored -* platform specific definitions. -*/ - -#define IA_CSS_UINT8_T_BITS 8 -#define IA_CSS_UINT16_T_BITS 16 -#define IA_CSS_UINT32_T_BITS 32 -#define IA_CSS_INT32_T_BITS 32 -#define IA_CSS_UINT64_T_BITS 64 - -#define CHAR_BIT (8) - -#include -#include -#include -#define HOST_ADDRESS(x) (unsigned long)(x) - -#endif /* __TYPE_SUPPORT_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h deleted file mode 100644 index 82d447bf9704..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VAMEM_H_INCLUDED__ -#define __VAMEM_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the VAMEM device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "vamem_local.h" - -#ifndef __INLINE_VAMEM__ -#define STORAGE_CLASS_VAMEM_H extern -#define STORAGE_CLASS_VAMEM_C -#include "vamem_public.h" -#else /* __INLINE_VAMEM__ */ -#define STORAGE_CLASS_VAMEM_H static inline -#define STORAGE_CLASS_VAMEM_C static inline -#include "vamem_private.h" -#endif /* __INLINE_VAMEM__ */ - -#endif /* __VAMEM_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h deleted file mode 100644 index d3375729c441..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __VMEM_H_INCLUDED__ -#define __VMEM_H_INCLUDED__ - -/* - * This file is included on every cell {SP,ISP,host} and on every system - * that uses the VMEM device. It defines the API to DLI bridge - * - * System and cell specific interfaces and inline code are included - * conditionally through Makefile path settings. - * - * - . system and cell agnostic interfaces, constants and identifiers - * - public: system agnostic, cell specific interfaces - * - private: system dependent, cell specific interfaces & inline implementations - * - global: system specific constants and identifiers - * - local: system and cell specific constants and identifiers - */ - - -#include "system_local.h" -#include "vmem_local.h" - -#ifndef __INLINE_VMEM__ -#define STORAGE_CLASS_VMEM_H extern -#define STORAGE_CLASS_VMEM_C -#include "vmem_public.h" -#else /* __INLINE_VMEM__ */ -#define STORAGE_CLASS_VMEM_H static inline -#define STORAGE_CLASS_VMEM_C static inline -#include "vmem_private.h" -#endif /* __INLINE_VMEM__ */ - -#endif /* __VMEM_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h deleted file mode 100644 index 9f4060319b4b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __QUEUE_LOCAL_H_INCLUDED__ -#define __QUEUE_LOCAL_H_INCLUDED__ - -#include "queue_global.h" - -#endif /* __QUEUE_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h deleted file mode 100644 index 2b396955cdad..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __QUEUE_PRIVATE_H_INCLUDED__ -#define __QUEUE_PRIVATE_H_INCLUDED__ - -#endif /* __QUEUE_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c deleted file mode 100644 index 2cf1d58941bf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "tag.h" -#include /* NULL */ -#include -#include "tag_local.h" - -/* - * @brief Creates the tag description from the given parameters. - * @param[in] num_captures - * @param[in] skip - * @param[in] offset - * @param[out] tag_descr - */ -void -sh_css_create_tag_descr(int num_captures, - unsigned int skip, - int offset, - unsigned int exp_id, - struct sh_css_tag_descr *tag_descr) -{ - assert(tag_descr != NULL); - - tag_descr->num_captures = num_captures; - tag_descr->skip = skip; - tag_descr->offset = offset; - tag_descr->exp_id = exp_id; -} - -/* - * @brief Encodes the members of tag description into a 32-bit value. - * @param[in] tag Pointer to the tag description - * @return (unsigned int) Encoded 32-bit tag-info - */ -unsigned int -sh_css_encode_tag_descr(struct sh_css_tag_descr *tag) -{ - int num_captures; - unsigned int num_captures_sign; - unsigned int skip; - int offset; - unsigned int offset_sign; - unsigned int exp_id; - unsigned int encoded_tag; - - assert(tag != NULL); - - if (tag->num_captures < 0) { - num_captures = -tag->num_captures; - num_captures_sign = 1; - } else { - num_captures = tag->num_captures; - num_captures_sign = 0; - } - skip = tag->skip; - if (tag->offset < 0) { - offset = -tag->offset; - offset_sign = 1; - } else { - offset = tag->offset; - offset_sign = 0; - } - exp_id = tag->exp_id; - - if (exp_id != 0) - { - /* we encode either an exp_id or capture data */ - assert((num_captures == 0) && (skip == 0) && (offset == 0)); - - encoded_tag = TAG_EXP | (exp_id & 0xFF) << TAG_EXP_ID_SHIFT; - } - else - { - encoded_tag = TAG_CAP - | ((num_captures_sign & 0x00000001) << TAG_NUM_CAPTURES_SIGN_SHIFT) - | ((offset_sign & 0x00000001) << TAG_OFFSET_SIGN_SHIFT) - | ((num_captures & 0x000000FF) << TAG_NUM_CAPTURES_SHIFT) - | ((skip & 0x000000FF) << TAG_OFFSET_SHIFT) - | ((offset & 0x000000FF) << TAG_SKIP_SHIFT); - - } - return encoded_tag; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h deleted file mode 100644 index 01a8977c189e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TAG_LOCAL_H_INCLUDED__ -#define __TAG_LOCAL_H_INCLUDED__ - -#include "tag_global.h" - -#define SH_CSS_MINIMUM_TAG_ID (-1) - -#endif /* __TAG_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h deleted file mode 100644 index 0570a95ec5bf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TAG_PRIVATE_H_INCLUDED__ -#define __TAG_PRIVATE_H_INCLUDED__ - -#endif /* __TAG_PRIVATE_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h deleted file mode 100644 index 61330daab734..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __QUEUE_GLOBAL_H_INCLUDED__ -#define __QUEUE_GLOBAL_H_INCLUDED__ - -#endif /* __QUEUE_GLOBAL_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h deleted file mode 100644 index c0d2efadbbe3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SW_EVENT_GLOBAL_H_INCLUDED__ -#define __SW_EVENT_GLOBAL_H_INCLUDED__ - -#define MAX_NR_OF_PAYLOADS_PER_SW_EVENT 4 - -enum ia_css_psys_sw_event { - IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED, /* from host to SP */ - IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED, /* from SP to host */ - IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, /* from SP to host, one way only */ - IA_CSS_PSYS_SW_EVENT_START_STREAM, - IA_CSS_PSYS_SW_EVENT_STOP_STREAM, - IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY, - IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, - IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE /* for extension state change enable/disable */ -}; - -enum ia_css_isys_sw_event { - IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED -}; - -#endif /* __SW_EVENT_GLOBAL_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h deleted file mode 100644 index fda457792c9c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __TAG_GLOBAL_H_INCLUDED__ -#define __TAG_GLOBAL_H_INCLUDED__ - -/* offsets for encoding/decoding the tag into an uint32_t */ - -#define TAG_CAP 1 -#define TAG_EXP 2 - -#define TAG_NUM_CAPTURES_SIGN_SHIFT 6 -#define TAG_OFFSET_SIGN_SHIFT 7 -#define TAG_NUM_CAPTURES_SHIFT 8 -#define TAG_OFFSET_SHIFT 16 -#define TAG_SKIP_SHIFT 24 - -#define TAG_EXP_ID_SHIFT 8 - -/* Data structure containing the tagging information which is used in - * continuous mode to specify which frames should be captured. - * num_captures The number of RAW frames to be processed to - * YUV. Setting this to -1 will make continuous - * capture run until it is stopped. - * skip Skip N frames in between captures. This can be - * used to select a slower capture frame rate than - * the sensor output frame rate. - * offset Start the RAW-to-YUV processing at RAW buffer - * with this offset. This allows the user to - * process RAW frames that were captured in the - * past or future. - * exp_id Exposure id of the RAW frame to tag. - * - * NOTE: Either exp_id = 0 or all other fields are 0 - * (so yeah, this could be a union) - */ - -struct sh_css_tag_descr { - int num_captures; - unsigned int skip; - int offset; - unsigned int exp_id; -}; - -#endif /* __TAG_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h deleted file mode 100644 index e44df6916d90..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h +++ /dev/null @@ -1,57 +0,0 @@ -/* Release Version: irci_stable_candrpv_0415_20150521_0458 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_H_ -#define _IA_CSS_H_ - -/* @file - * This file is the starting point of the CSS-API. It includes all CSS-API - * header files. - */ - -#include "ia_css_3a.h" -#include "ia_css_acc_types.h" -#include "ia_css_buffer.h" -#include "ia_css_control.h" -#include "ia_css_device_access.h" -#include "ia_css_dvs.h" -#include "ia_css_env.h" -#include "ia_css_err.h" -#include "ia_css_event_public.h" -#include "ia_css_firmware.h" -#include "ia_css_frame_public.h" -#include "ia_css_input_port.h" -#include "ia_css_irq.h" -#include "ia_css_metadata.h" -#include "ia_css_mipi.h" -#include "ia_css_pipe_public.h" -#include "ia_css_prbs.h" -#include "ia_css_properties.h" -#include "ia_css_stream_format.h" -#include "ia_css_stream_public.h" -#include "ia_css_tpg.h" -#include "ia_css_version.h" -#include "ia_css_mmu.h" -#include "ia_css_morph.h" -#include "ia_css_shading.h" -#include "ia_css_timer.h" - -/* - Please do not add code to this file. Public functionality is to be - exposed in a function/data type specific header file. - Please add to the appropriate header file or create a new one. - */ - -#endif /* _IA_CSS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h deleted file mode 100644 index 080198796ad0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_3A_H -#define __IA_CSS_3A_H - -/* @file - * This file contains types used for 3A statistics - */ - -#include -#include "ia_css_types.h" -#include "ia_css_err.h" -#include "system_global.h" - -enum ia_css_3a_tables { - IA_CSS_S3A_TBL_HI, - IA_CSS_S3A_TBL_LO, - IA_CSS_RGBY_TBL, - IA_CSS_NUM_3A_TABLES -}; - -/* Structure that holds 3A statistics in the ISP internal - * format. Use ia_css_get_3a_statistics() to translate - * this to the format used on the host (3A library). - * */ -struct ia_css_isp_3a_statistics { - union { - struct { - ia_css_ptr s3a_tbl; - } dmem; - struct { - ia_css_ptr s3a_tbl_hi; - ia_css_ptr s3a_tbl_lo; - } vmem; - } data; - struct { - ia_css_ptr rgby_tbl; - } data_hmem; - uint32_t exp_id; /** exposure id, to match statistics to a frame, - see ia_css_event_public.h for more detail. */ - uint32_t isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */ - ia_css_ptr data_ptr; /** pointer to base of all data */ - uint32_t size; /** total size of all data */ - uint32_t dmem_size; - uint32_t vmem_size; /** both lo and hi have this size */ - uint32_t hmem_size; -}; -#define SIZE_OF_DMEM_STRUCT \ - (SIZE_OF_IA_CSS_PTR) - -#define SIZE_OF_VMEM_STRUCT \ - (2 * SIZE_OF_IA_CSS_PTR) - -#define SIZE_OF_DATA_UNION \ - (MAX(SIZE_OF_DMEM_STRUCT, SIZE_OF_VMEM_STRUCT)) - -#define SIZE_OF_DATA_HMEM_STRUCT \ - (SIZE_OF_IA_CSS_PTR) - -#define SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT \ - (SIZE_OF_DATA_UNION + \ - SIZE_OF_DATA_HMEM_STRUCT + \ - sizeof(uint32_t) + \ - sizeof(uint32_t) + \ - SIZE_OF_IA_CSS_PTR + \ - 4 * sizeof(uint32_t)) - -/* Map with host-side pointers to ISP-format statistics. - * These pointers can either be copies of ISP data or memory mapped - * ISP pointers. - * All of the data behind these pointers is allocated contiguously, the - * allocated pointer is stored in the data_ptr field. The other fields - * point into this one block of data. - */ -struct ia_css_isp_3a_statistics_map { - void *data_ptr; /** Pointer to start of memory */ - struct ia_css_3a_output *dmem_stats; - uint16_t *vmem_stats_hi; - uint16_t *vmem_stats_lo; - struct ia_css_bh_table *hmem_stats; - uint32_t size; /** total size in bytes of data_ptr */ - uint32_t data_allocated; /** indicate whether data_ptr - was allocated or not. */ -}; - -/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer - * @param[out] host_stats Host buffer. - * @param[in] isp_stats ISP buffer. - * @return error value if temporary memory cannot be allocated - * - * This copies 3a statistics from an ISP pointer to a host pointer and then - * translates some of the statistics, details depend on which ISP binary is - * used. - * Always use this function, never copy the buffer directly. - */ -enum ia_css_err -ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats, - const struct ia_css_isp_3a_statistics *isp_stats); - -/* @brief Translate 3A statistics from ISP format to host format. - * @param[out] host_stats host-format statistics - * @param[in] isp_stats ISP-format statistics - * @return None - * - * This function translates statistics from the internal ISP-format to - * the host-format. This function does not include an additional copy - * step. - * */ -void -ia_css_translate_3a_statistics( - struct ia_css_3a_statistics *host_stats, - const struct ia_css_isp_3a_statistics_map *isp_stats); - -/* Convenience functions for alloc/free of certain datatypes */ - -/* @brief Allocate memory for the 3a statistics on the ISP - * @param[in] grid The grid. - * @return Pointer to the allocated 3a statistics buffer on the ISP -*/ -struct ia_css_isp_3a_statistics * -ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid); - -/* @brief Free the 3a statistics memory on the isp - * @param[in] me Pointer to the 3a statistics buffer on the ISP. - * @return None -*/ -void -ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me); - -/* @brief Allocate memory for the 3a statistics on the host - * @param[in] grid The grid. - * @return Pointer to the allocated 3a statistics buffer on the host -*/ -struct ia_css_3a_statistics * -ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid); - -/* @brief Free the 3a statistics memory on the host - * @param[in] me Pointer to the 3a statistics buffer on the host. - * @return None - */ -void -ia_css_3a_statistics_free(struct ia_css_3a_statistics *me); - -/* @brief Allocate a 3a statistics map structure - * @param[in] isp_stats pointer to ISP 3a statistis struct - * @param[in] data_ptr host-side pointer to ISP 3a statistics. - * @return Pointer to the allocated 3a statistics map - * - * This function allocates the ISP 3a statistics map structure - * and uses the data_ptr as base pointer to set the appropriate - * pointers to all relevant subsets of the 3a statistics (dmem, - * vmem, hmem). - * If the data_ptr is NULL, this function will allocate the host-side - * memory. This information is stored in the struct and used in the - * ia_css_isp_3a_statistics_map_free() function to determine whether - * the memory should be freed or not. - * Note that this function does not allocate or map any ISP - * memory. -*/ -struct ia_css_isp_3a_statistics_map * -ia_css_isp_3a_statistics_map_allocate( - const struct ia_css_isp_3a_statistics *isp_stats, - void *data_ptr); - -/* @brief Free the 3a statistics map - * @param[in] me Pointer to the 3a statistics map - * @return None - * - * This function frees the map struct. If the data_ptr inside it - * was allocated inside ia_css_isp_3a_statistics_map_allocate(), it - * will be freed in this function. Otherwise it will not be freed. - */ -void -ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me); - -#endif /* __IA_CSS_3A_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h deleted file mode 100644 index 138bc3bb4627..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h +++ /dev/null @@ -1,468 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_ACC_TYPES_H -#define _IA_CSS_ACC_TYPES_H - -/* @file - * This file contains types used for acceleration - */ - -#include /* HAS_IRQ_MAP_VERSION_# */ -#include -#include -#include - -#include "ia_css_types.h" -#include "ia_css_frame_format.h" - -/* Should be included without the path. - However, that requires adding the path to numerous makefiles - that have nothing to do with isp parameters. - */ -#include "runtime/isp_param/interface/ia_css_isp_param_types.h" - -/* Types for the acceleration API. - * These should be moved to sh_css_internal.h once the old acceleration - * argument handling has been completed. - * After that, interpretation of these structures is no longer needed - * in the kernel and HAL. -*/ - -/* Type of acceleration. - */ -enum ia_css_acc_type { - IA_CSS_ACC_NONE, /** Normal binary */ - IA_CSS_ACC_OUTPUT, /** Accelerator stage on output frame */ - IA_CSS_ACC_VIEWFINDER, /** Accelerator stage on viewfinder frame */ - IA_CSS_ACC_STANDALONE, /** Stand-alone acceleration */ -}; - -/* Cells types - */ -enum ia_css_cell_type { - IA_CSS_SP0 = 0, - IA_CSS_SP1, - IA_CSS_ISP, - MAX_NUM_OF_CELLS -}; - -/* Firmware types. - */ -enum ia_css_fw_type { - ia_css_sp_firmware, /** Firmware for the SP */ - ia_css_isp_firmware, /** Firmware for the ISP */ - ia_css_bootloader_firmware, /** Firmware for the BootLoader */ - ia_css_acc_firmware /** Firmware for accelrations */ -}; - -struct ia_css_blob_descr; - -/* Blob descriptor. - * This structure describes an SP or ISP blob. - * It describes the test, data and bss sections as well as position in a - * firmware file. - * For convenience, it contains dynamic data after loading. - */ -struct ia_css_blob_info { - /** Static blob data */ - uint32_t offset; /** Blob offset in fw file */ - struct ia_css_isp_param_memory_offsets memory_offsets; /** offset wrt hdr in bytes */ - uint32_t prog_name_offset; /** offset wrt hdr in bytes */ - uint32_t size; /** Size of blob */ - uint32_t padding_size; /** total cummulative of bytes added due to section alignment */ - uint32_t icache_source; /** Position of icache in blob */ - uint32_t icache_size; /** Size of icache section */ - uint32_t icache_padding;/** bytes added due to icache section alignment */ - uint32_t text_source; /** Position of text in blob */ - uint32_t text_size; /** Size of text section */ - uint32_t text_padding; /** bytes added due to text section alignment */ - uint32_t data_source; /** Position of data in blob */ - uint32_t data_target; /** Start of data in SP dmem */ - uint32_t data_size; /** Size of text section */ - uint32_t data_padding; /** bytes added due to data section alignment */ - uint32_t bss_target; /** Start position of bss in SP dmem */ - uint32_t bss_size; /** Size of bss section */ - /** Dynamic data filled by loader */ - CSS_ALIGN(const void *code, 8); /** Code section absolute pointer within fw, code = icache + text */ - CSS_ALIGN(const void *data, 8); /** Data section absolute pointer within fw, data = data + bss */ -}; - -struct ia_css_binary_input_info { - uint32_t min_width; - uint32_t min_height; - uint32_t max_width; - uint32_t max_height; - uint32_t source; /* memory, sensor, variable */ -}; - -struct ia_css_binary_output_info { - uint32_t min_width; - uint32_t min_height; - uint32_t max_width; - uint32_t max_height; - uint32_t num_chunks; - uint32_t variable_format; -}; - -struct ia_css_binary_internal_info { - uint32_t max_width; - uint32_t max_height; -}; - -struct ia_css_binary_bds_info { - uint32_t supported_bds_factors; -}; - -struct ia_css_binary_dvs_info { - uint32_t max_envelope_width; - uint32_t max_envelope_height; -}; - -struct ia_css_binary_vf_dec_info { - uint32_t is_variable; - uint32_t max_log_downscale; -}; - -struct ia_css_binary_s3a_info { - uint32_t s3atbl_use_dmem; - uint32_t fixed_s3a_deci_log; -}; - -/* DPC related binary info */ -struct ia_css_binary_dpc_info { - uint32_t bnr_lite; /** bnr lite enable flag */ -}; - -struct ia_css_binary_iterator_info { - uint32_t num_stripes; - uint32_t row_stripes_height; - uint32_t row_stripes_overlap_lines; -}; - -struct ia_css_binary_address_info { - uint32_t isp_addresses; /* Address in ISP dmem */ - uint32_t main_entry; /* Address of entry fct */ - uint32_t in_frame; /* Address in ISP dmem */ - uint32_t out_frame; /* Address in ISP dmem */ - uint32_t in_data; /* Address in ISP dmem */ - uint32_t out_data; /* Address in ISP dmem */ - uint32_t sh_dma_cmd_ptr; /* In ISP dmem */ -}; - -struct ia_css_binary_uds_info { - uint16_t bpp; - uint16_t use_bci; - uint16_t use_str; - uint16_t woix; - uint16_t woiy; - uint16_t extra_out_vecs; - uint16_t vectors_per_line_in; - uint16_t vectors_per_line_out; - uint16_t vectors_c_per_line_in; - uint16_t vectors_c_per_line_out; - uint16_t vmem_gdc_in_block_height_y; - uint16_t vmem_gdc_in_block_height_c; - /* uint16_t padding; */ -}; - -struct ia_css_binary_pipeline_info { - uint32_t mode; - uint32_t isp_pipe_version; - uint32_t pipelining; - uint32_t c_subsampling; - uint32_t top_cropping; - uint32_t left_cropping; - uint32_t variable_resolution; -}; - -struct ia_css_binary_block_info { - uint32_t block_width; - uint32_t block_height; - uint32_t output_block_height; -}; - -/* Structure describing an ISP binary. - * It describes the capabilities of a binary, like the maximum resolution, - * support features, dma channels, uds features, etc. - * This part is to be used by the SP. - * Future refactoring should move binary properties to ia_css_binary_xinfo, - * thereby making the SP code more binary independent. - */ -struct ia_css_binary_info { - CSS_ALIGN(uint32_t id, 8); /* IA_CSS_BINARY_ID_* */ - struct ia_css_binary_pipeline_info pipeline; - struct ia_css_binary_input_info input; - struct ia_css_binary_output_info output; - struct ia_css_binary_internal_info internal; - struct ia_css_binary_bds_info bds; - struct ia_css_binary_dvs_info dvs; - struct ia_css_binary_vf_dec_info vf_dec; - struct ia_css_binary_s3a_info s3a; - struct ia_css_binary_dpc_info dpc_bnr; /** DPC related binary info */ - struct ia_css_binary_iterator_info iterator; - struct ia_css_binary_address_info addresses; - struct ia_css_binary_uds_info uds; - struct ia_css_binary_block_info block; - struct ia_css_isp_param_isp_segments mem_initializers; -/* MW: Packing (related) bools in an integer ?? */ - struct { -#ifdef ISP2401 - uint8_t luma_only; - uint8_t input_yuv; - uint8_t input_raw; -#endif - uint8_t reduced_pipe; - uint8_t vf_veceven; - uint8_t dis; - uint8_t dvs_envelope; - uint8_t uds; - uint8_t dvs_6axis; - uint8_t block_output; - uint8_t streaming_dma; - uint8_t ds; - uint8_t bayer_fir_6db; - uint8_t raw_binning; - uint8_t continuous; - uint8_t s3a; - uint8_t fpnr; - uint8_t sc; - uint8_t macc; - uint8_t output; - uint8_t ref_frame; - uint8_t tnr; - uint8_t xnr; - uint8_t params; - uint8_t ca_gdc; - uint8_t isp_addresses; - uint8_t in_frame; - uint8_t out_frame; - uint8_t high_speed; - uint8_t dpc; - uint8_t padding[2]; - } enable; - struct { -/* DMA channel ID: [0,...,HIVE_ISP_NUM_DMA_CHANNELS> */ - uint8_t ref_y_channel; - uint8_t ref_c_channel; - uint8_t tnr_channel; - uint8_t tnr_out_channel; - uint8_t dvs_coords_channel; - uint8_t output_channel; - uint8_t c_channel; - uint8_t vfout_channel; - uint8_t vfout_c_channel; - uint8_t vfdec_bits_per_pixel; - uint8_t claimed_by_isp; - uint8_t padding[2]; - } dma; -}; - -/* Structure describing an ISP binary. - * It describes the capabilities of a binary, like the maximum resolution, - * support features, dma channels, uds features, etc. - */ -struct ia_css_binary_xinfo { - /* Part that is of interest to the SP. */ - struct ia_css_binary_info sp; - - /* Rest of the binary info, only interesting to the host. */ - enum ia_css_acc_type type; - CSS_ALIGN(int32_t num_output_formats, 8); - enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM]; - CSS_ALIGN(int32_t num_vf_formats, 8); /** number of supported vf formats */ - enum ia_css_frame_format vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */ - uint8_t num_output_pins; - ia_css_ptr xmem_addr; - CSS_ALIGN(const struct ia_css_blob_descr *blob, 8); - CSS_ALIGN(uint32_t blob_index, 8); - CSS_ALIGN(union ia_css_all_memory_offsets mem_offsets, 8); - CSS_ALIGN(struct ia_css_binary_xinfo *next, 8); -}; - -/* Structure describing the Bootloader (an ISP binary). - * It contains several address, either in ddr, isp_dmem or - * the entry function in icache. - */ -struct ia_css_bl_info { - uint32_t num_dma_cmds; /** Number of cmds sent by CSS */ - uint32_t dma_cmd_list; /** Dma command list sent by CSS */ - uint32_t sw_state; /** Polled from css */ - /* Entry functions */ - uint32_t bl_entry; /** The SP entry function */ -}; - -/* Structure describing the SP binary. - * It contains several address, either in ddr, sp_dmem or - * the entry function in pmem. - */ -struct ia_css_sp_info { - uint32_t init_dmem_data; /** data sect config, stored to dmem */ - uint32_t per_frame_data; /** Per frame data, stored to dmem */ - uint32_t group; /** Per pipeline data, loaded by dma */ - uint32_t output; /** SP output data, loaded by dmem */ - uint32_t host_sp_queue; /** Host <-> SP queues */ - uint32_t host_sp_com;/** Host <-> SP commands */ - uint32_t isp_started; /** Polled from sensor thread, csim only */ - uint32_t sw_state; /** Polled from css */ - uint32_t host_sp_queues_initialized; /** Polled from the SP */ - uint32_t sleep_mode; /** different mode to halt SP */ - uint32_t invalidate_tlb; /** inform SP to invalidate mmu TLB */ -#ifndef ISP2401 - uint32_t stop_copy_preview; /** suspend copy and preview pipe when capture */ -#endif - uint32_t debug_buffer_ddr_address; /** inform SP the address - of DDR debug queue */ - uint32_t perf_counter_input_system_error; /** input system perf - counter array */ -#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG - uint32_t debug_wait; /** thread/pipe post mortem debug */ - uint32_t debug_stage; /** thread/pipe post mortem debug */ - uint32_t debug_stripe; /** thread/pipe post mortem debug */ -#endif - uint32_t threads_stack; /** sp thread's stack pointers */ - uint32_t threads_stack_size; /** sp thread's stack sizes */ - uint32_t curr_binary_id; /** current binary id */ - uint32_t raw_copy_line_count; /** raw copy line counter */ - uint32_t ddr_parameter_address; /** acc param ddrptr, sp dmem */ - uint32_t ddr_parameter_size; /** acc param size, sp dmem */ - /* Entry functions */ - uint32_t sp_entry; /** The SP entry function */ - uint32_t tagger_frames_addr; /** Base address of tagger state */ -}; - -/* The following #if is there because this header file is also included - by SP and ISP code but they do not need this data and HIVECC has alignment - issue with the firmware struct/union's. - More permanent solution will be to refactor this include. -*/ -#if !defined(__ISP) -/* Accelerator firmware information. - */ -struct ia_css_acc_info { - uint32_t per_frame_data; /** Dummy for now */ -}; - -/* Firmware information. - */ -union ia_css_fw_union { - struct ia_css_binary_xinfo isp; /** ISP info */ - struct ia_css_sp_info sp; /** SP info */ - struct ia_css_bl_info bl; /** Bootloader info */ - struct ia_css_acc_info acc; /** Accelerator info */ -}; - -/* Firmware information. - */ -struct ia_css_fw_info { - size_t header_size; /** size of fw header */ - CSS_ALIGN(uint32_t type, 8); - union ia_css_fw_union info; /** Binary info */ - struct ia_css_blob_info blob; /** Blob info */ - /* Dynamic part */ - struct ia_css_fw_info *next; - CSS_ALIGN(uint32_t loaded, 8); /** Firmware has been loaded */ - CSS_ALIGN(const uint8_t *isp_code, 8); /** ISP pointer to code */ - /** Firmware handle between user space and kernel */ - CSS_ALIGN(uint32_t handle, 8); - /** Sections to copy from/to ISP */ - struct ia_css_isp_param_css_segments mem_initializers; - /** Initializer for local ISP memories */ -}; - -struct ia_css_blob_descr { - const unsigned char *blob; - struct ia_css_fw_info header; - const char *name; - union ia_css_all_memory_offsets mem_offsets; -}; - -struct ia_css_acc_fw; - -/* Structure describing the SP binary of a stand-alone accelerator. - */ -struct ia_css_acc_sp { - void (*init)(struct ia_css_acc_fw *); /** init for crun */ - uint32_t sp_prog_name_offset; /** program name offset wrt hdr in bytes */ - uint32_t sp_blob_offset; /** blob offset wrt hdr in bytes */ - void *entry; /** Address of sp entry point */ - uint32_t *css_abort; /** SP dmem abort flag */ - void *isp_code; /** SP dmem address holding xmem - address of isp code */ - struct ia_css_fw_info fw; /** SP fw descriptor */ - const uint8_t *code; /** ISP pointer of allocated SP code */ -}; - -/* Acceleration firmware descriptor. - * This descriptor descibes either SP code (stand-alone), or - * ISP code (a separate pipeline stage). - */ -struct ia_css_acc_fw_hdr { - enum ia_css_acc_type type; /** Type of accelerator */ - uint32_t isp_prog_name_offset; /** program name offset wrt - header in bytes */ - uint32_t isp_blob_offset; /** blob offset wrt header - in bytes */ - uint32_t isp_size; /** Size of isp blob */ - const uint8_t *isp_code; /** ISP pointer to code */ - struct ia_css_acc_sp sp; /** Standalone sp code */ - /** Firmware handle between user space and kernel */ - uint32_t handle; - struct ia_css_data parameters; /** Current SP parameters */ -}; - -/* Firmware structure. - * This contains the header and actual blobs. - * For standalone, it contains SP and ISP blob. - * For a pipeline stage accelerator, it contains ISP code only. - * Since its members are variable size, their offsets are described in the - * header and computed using the access macros below. - */ -struct ia_css_acc_fw { - struct ia_css_acc_fw_hdr header; /** firmware header */ - /* - int8_t isp_progname[]; **< ISP program name - int8_t sp_progname[]; **< SP program name, stand-alone only - uint8_t sp_code[]; **< SP blob, stand-alone only - uint8_t isp_code[]; **< ISP blob - */ -}; - -/* Access macros for firmware */ -#define IA_CSS_ACC_OFFSET(t, f, n) ((t)((uint8_t *)(f)+(f->header.n))) -#define IA_CSS_ACC_SP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \ - sp.sp_prog_name_offset) -#define IA_CSS_ACC_ISP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \ - isp_prog_name_offset) -#define IA_CSS_ACC_SP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t *, f, \ - sp.sp_blob_offset) -#define IA_CSS_ACC_SP_DATA(f) (IA_CSS_ACC_SP_CODE(f) + \ - (f)->header.sp.fw.blob.data_source) -#define IA_CSS_ACC_ISP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t*, f,\ - isp_blob_offset) -#define IA_CSS_ACC_ISP_SIZE(f) ((f)->header.isp_size) - -/* Binary name follows header immediately */ -#define IA_CSS_EXT_ISP_PROG_NAME(f) ((const char *)(f)+(f)->blob.prog_name_offset) -#define IA_CSS_EXT_ISP_MEM_OFFSETS(f) \ - ((const struct ia_css_memory_offsets *)((const char *)(f)+(f)->blob.mem_offsets)) - -#endif /* !defined(__ISP) */ - -enum ia_css_sp_sleep_mode { - SP_DISABLE_SLEEP_MODE = 0, - SP_SLEEP_AFTER_FRAME = 1 << 0, - SP_SLEEP_AFTER_IRQ = 1 << 1 -}; -#endif /* _IA_CSS_ACC_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h deleted file mode 100644 index a0058eac7d5a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BUFFER_H -#define __IA_CSS_BUFFER_H - -/* @file - * This file contains datastructures and types for buffers used in CSS - */ - -#include -#include "ia_css_types.h" -#include "ia_css_timer.h" - -/* Enumeration of buffer types. Buffers can be queued and de-queued - * to hand them over between IA and ISP. - */ -enum ia_css_buffer_type { - IA_CSS_BUFFER_TYPE_INVALID = -1, - IA_CSS_BUFFER_TYPE_3A_STATISTICS = 0, - IA_CSS_BUFFER_TYPE_DIS_STATISTICS, - IA_CSS_BUFFER_TYPE_LACE_STATISTICS, - IA_CSS_BUFFER_TYPE_INPUT_FRAME, - IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, - IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME, - IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, - IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME, - IA_CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME, - IA_CSS_BUFFER_TYPE_CUSTOM_INPUT, - IA_CSS_BUFFER_TYPE_CUSTOM_OUTPUT, - IA_CSS_BUFFER_TYPE_METADATA, - IA_CSS_BUFFER_TYPE_PARAMETER_SET, - IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, - IA_CSS_NUM_DYNAMIC_BUFFER_TYPE, - IA_CSS_NUM_BUFFER_TYPE -}; - -/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */ -#if !defined(__ISP) -/* Buffer structure. This is a container structure that enables content - * independent buffer queues and access functions. - */ -struct ia_css_buffer { - enum ia_css_buffer_type type; /** Buffer type. */ - unsigned int exp_id; - /** exposure id for this buffer; 0 = not available - see ia_css_event_public.h for more detail. */ - union { - struct ia_css_isp_3a_statistics *stats_3a; /** 3A statistics & optionally RGBY statistics. */ - struct ia_css_isp_dvs_statistics *stats_dvs; /** DVS statistics. */ - struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /** SKC DVS statistics. */ - struct ia_css_frame *frame; /** Frame buffer. */ - struct ia_css_acc_param *custom_data; /** Custom buffer. */ - struct ia_css_metadata *metadata; /** Sensor metadata. */ - } data; /** Buffer data pointer. */ - uint64_t driver_cookie; /** cookie for the driver */ - struct ia_css_time_meas timing_data; /** timing data (readings from the timer) */ - struct ia_css_clock_tick isys_eof_clock_tick; /** ISYS's end of frame timer tick*/ -}; - -/* @brief Dequeue param buffers from sp2host_queue - * - * @return None - * - * This function must be called at every driver interrupt handler to prevent - * overflow of sp2host_queue. - */ -void -ia_css_dequeue_param_buffers(void); - -#endif /* !__ISP */ - -#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h deleted file mode 100644 index 021a313fab85..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CONTROL_H -#define __IA_CSS_CONTROL_H - -/* @file - * This file contains functionality for starting and controlling CSS - */ - -#include -#include -#include -#include - -/* @brief Initialize the CSS API. - * @param[in] env Environment, provides functions to access the - * environment in which the CSS code runs. This is - * used for host side memory access and message - * printing. May not be NULL. - * @param[in] fw Firmware package containing the firmware for all - * predefined ISP binaries. - * if fw is NULL the firmware must be loaded before - * through a call of ia_css_load_firmware - * @param[in] l1_base Base index (isp2400) - * of the L1 page table. This is a physical - * address or index. - * @param[in] irq_type The type of interrupt to be used (edge or level) - * @return Returns IA_CSS_ERR_INTERNAL_ERROR in case of any - * errors and IA_CSS_SUCCESS otherwise. - * - * This function initializes the API which includes allocating and initializing - * internal data structures. This also interprets the firmware package. All - * contents of this firmware package are copied into local data structures, so - * the fw pointer could be freed after this function completes. - */ -enum ia_css_err ia_css_init( - const struct ia_css_env *env, - const struct ia_css_fw *fw, - uint32_t l1_base, - enum ia_css_irq_type irq_type); - -/* @brief Un-initialize the CSS API. - * @return None - * - * This function deallocates all memory that has been allocated by the CSS API - * Exception: if you explicitly loaded firmware through ia_css_load_firmware - * you need to call ia_css_unload_firmware to deallocate the memory reserved - * for the firmware. - * After this function is called, no other CSS functions should be called - * with the exception of ia_css_init which will re-initialize the CSS code, - * ia_css_unload_firmware to unload the firmware or ia_css_load_firmware - * to load new firmware - */ -void -ia_css_uninit(void); - -/* @brief Suspend CSS API for power down - * @return success or faulure code - * - * suspend shuts down the system by: - * unloading all the streams - * stopping SP - * performing uninit - * - * Currently stream memory is deallocated because of rmmgr issues. - * Need to come up with a bypass that will leave the streams intact. - */ -enum ia_css_err -ia_css_suspend(void); - -/* @brief Resume CSS API from power down - * @return success or failure code - * - * After a power cycle, this function will bring the CSS API back into - * a state where it can be started. - * This will re-initialize the hardware and all the streams. - * Call this function only after ia_css_suspend() has been called. - */ -enum ia_css_err -ia_css_resume(void); - -/* @brief Enable use of a separate queue for ISYS events. - * - * @param[in] enable: enable or disable use of separate ISYS event queues. - * @return error if called when SP is running. - * - * @deprecated{This is a temporary function that allows drivers to migrate to - * the use of the separate ISYS event queue. Once all drivers supports this, it - * will be made the default and this function will be removed. - * This function should only be called when the SP is not running, calling it - * when the SP is running will result in an error value being returned. } - */ -enum ia_css_err -ia_css_enable_isys_event_queue(bool enable); - -/* @brief Test whether the ISP has started. - * - * @return Boolean flag true if the ISP has started or false otherwise. - * - * Temporary function to poll whether the ISP has been started. Once it has, - * the sensor can also be started. */ -bool -ia_css_isp_has_started(void); - -/* @brief Test whether the SP has initialized. - * - * @return Boolean flag true if the SP has initialized or false otherwise. - * - * Temporary function to poll whether the SP has been initialized. Once it has, - * we can enqueue buffers. */ -bool -ia_css_sp_has_initialized(void); - -/* @brief Test whether the SP has terminated. - * - * @return Boolean flag true if the SP has terminated or false otherwise. - * - * Temporary function to poll whether the SP has been terminated. Once it has, - * we can switch mode. */ -bool -ia_css_sp_has_terminated(void); - -/* @brief start SP hardware - * - * @return IA_CSS_SUCCESS or error code upon error. - * - * It will boot the SP hardware and start multi-threading infrastructure. - * All threads will be started and blocked by semaphore. This function should - * be called before any ia_css_stream_start(). - */ -enum ia_css_err -ia_css_start_sp(void); - - -/* @brief stop SP hardware - * - * @return IA_CSS_SUCCESS or error code upon error. - * - * This function will terminate all threads and shut down SP. It should be - * called after all ia_css_stream_stop(). - */ -enum ia_css_err -ia_css_stop_sp(void); - -#endif /* __IA_CSS_CONTROL_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c deleted file mode 100644 index 21b842379acc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_device_access.h" -#include /* for uint*, size_t */ -#include /* for hrt_address */ -#include /* for ia_css_hw_access_env */ -#include /* for assert */ - -static struct ia_css_hw_access_env my_env; - -void -ia_css_device_access_init(const struct ia_css_hw_access_env *env) -{ - assert(env != NULL); - - my_env = *env; -} - -uint8_t -ia_css_device_load_uint8(const hrt_address addr) -{ - return my_env.load_8(addr); -} - -uint16_t -ia_css_device_load_uint16(const hrt_address addr) -{ - return my_env.load_16(addr); -} - -uint32_t -ia_css_device_load_uint32(const hrt_address addr) -{ - return my_env.load_32(addr); -} - -uint64_t -ia_css_device_load_uint64(const hrt_address addr) -{ - assert(0); - - (void)addr; - return 0; -} - -void -ia_css_device_store_uint8(const hrt_address addr, const uint8_t data) -{ - my_env.store_8(addr, data); -} - -void -ia_css_device_store_uint16(const hrt_address addr, const uint16_t data) -{ - my_env.store_16(addr, data); -} - -void -ia_css_device_store_uint32(const hrt_address addr, const uint32_t data) -{ - my_env.store_32(addr, data); -} - -void -ia_css_device_store_uint64(const hrt_address addr, const uint64_t data) -{ - assert(0); - - (void)addr; - (void)data; -} - -void -ia_css_device_load(const hrt_address addr, void *data, const size_t size) -{ - my_env.load(addr, data, (uint32_t)size); -} - -void -ia_css_device_store(const hrt_address addr, const void *data, const size_t size) -{ - my_env.store(addr, data, (uint32_t)size); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h deleted file mode 100644 index 84a960b7abbc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_DEVICE_ACCESS_H -#define _IA_CSS_DEVICE_ACCESS_H - -/* @file - * File containing internal functions for the CSS-API to access the CSS device. - */ - -#include /* for uint*, size_t */ -#include /* for hrt_address */ -#include /* for ia_css_hw_access_env */ - -void -ia_css_device_access_init(const struct ia_css_hw_access_env *env); - -uint8_t -ia_css_device_load_uint8(const hrt_address addr); - -uint16_t -ia_css_device_load_uint16(const hrt_address addr); - -uint32_t -ia_css_device_load_uint32(const hrt_address addr); - -uint64_t -ia_css_device_load_uint64(const hrt_address addr); - -void -ia_css_device_store_uint8(const hrt_address addr, const uint8_t data); - -void -ia_css_device_store_uint16(const hrt_address addr, const uint16_t data); - -void -ia_css_device_store_uint32(const hrt_address addr, const uint32_t data); - -void -ia_css_device_store_uint64(const hrt_address addr, const uint64_t data); - -void -ia_css_device_load(const hrt_address addr, void *data, const size_t size); - -void -ia_css_device_store(const hrt_address addr, const void *data, const size_t size); - -#endif /* _IA_CSS_DEVICE_ACCESS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h deleted file mode 100644 index 1f01534964e3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DVS_H -#define __IA_CSS_DVS_H - -/* @file - * This file contains types for DVS statistics - */ - -#include -#include "ia_css_types.h" -#include "ia_css_err.h" -#include "ia_css_stream_public.h" - -enum dvs_statistics_type { - DVS_STATISTICS, - DVS2_STATISTICS, - SKC_DVS_STATISTICS -}; - - -/* Structure that holds DVS statistics in the ISP internal - * format. Use ia_css_get_dvs_statistics() to translate - * this to the format used on the host (DVS engine). - * */ -struct ia_css_isp_dvs_statistics { - ia_css_ptr hor_proj; - ia_css_ptr ver_proj; - uint32_t hor_size; - uint32_t ver_size; - uint32_t exp_id; /** see ia_css_event_public.h for more detail */ - ia_css_ptr data_ptr; /* base pointer containing all memory */ - uint32_t size; /* size of allocated memory in data_ptr */ -}; - -/* Structure that holds SKC DVS statistics in the ISP internal - * format. Use ia_css_dvs_statistics_get() to translate this to - * the format used on the host. - * */ -struct ia_css_isp_skc_dvs_statistics; - - -#define SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT \ - ((3 * SIZE_OF_IA_CSS_PTR) + \ - (4 * sizeof(uint32_t))) - -/* Map with host-side pointers to ISP-format statistics. - * These pointers can either be copies of ISP data or memory mapped - * ISP pointers. - * All of the data behind these pointers is allocatd contiguously, the - * allocated pointer is stored in the data_ptr field. The other fields - * point into this one block of data. - */ -struct ia_css_isp_dvs_statistics_map { - void *data_ptr; - int32_t *hor_proj; - int32_t *ver_proj; - uint32_t size; /* total size in bytes */ - uint32_t data_allocated; /* indicate whether data was allocated */ -}; - -union ia_css_dvs_statistics_isp { - struct ia_css_isp_dvs_statistics *p_dvs_statistics_isp; - struct ia_css_isp_skc_dvs_statistics *p_skc_dvs_statistics_isp; -}; - -union ia_css_dvs_statistics_host { - struct ia_css_dvs_statistics *p_dvs_statistics_host; - struct ia_css_dvs2_statistics *p_dvs2_statistics_host; - struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host; -}; - -/* @brief Copy DVS statistics from an ISP buffer to a host buffer. - * @param[in] host_stats Host buffer - * @param[in] isp_stats ISP buffer - * @return error value if temporary memory cannot be allocated - * - * This may include a translation step as well depending - * on the ISP version. - * Always use this function, never copy the buffer directly. - * Note that this function uses the mem_load function from the CSS - * environment struct. - * In certain environments this may be slow. In those cases it is - * advised to map the ISP memory into a host-side pointer and use - * the ia_css_translate_dvs_statistics() function instead. - */ -enum ia_css_err -ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats, - const struct ia_css_isp_dvs_statistics *isp_stats); - -/* @brief Translate DVS statistics from ISP format to host format - * @param[in] host_stats Host buffer - * @param[in] isp_stats ISP buffer - * @return None - * - * This function translates the dvs statistics from the ISP-internal - * format to the format used by the DVS library on the CPU. - * This function takes a host-side pointer as input. This can either - * point to a copy of the data or be a memory mapped pointer to the - * ISP memory pages. - */ -void -ia_css_translate_dvs_statistics( - struct ia_css_dvs_statistics *host_stats, - const struct ia_css_isp_dvs_statistics_map *isp_stats); - -/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer. - * @param[in] host_stats Host buffer - * @param[in] isp_stats ISP buffer - * @return error value if temporary memory cannot be allocated - * - * This may include a translation step as well depending - * on the ISP version. - * Always use this function, never copy the buffer directly. - * Note that this function uses the mem_load function from the CSS - * environment struct. - * In certain environments this may be slow. In those cases it is - * advised to map the ISP memory into a host-side pointer and use - * the ia_css_translate_dvs2_statistics() function instead. - */ -enum ia_css_err -ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats, - const struct ia_css_isp_dvs_statistics *isp_stats); - -/* @brief Translate DVS2 statistics from ISP format to host format - * @param[in] host_stats Host buffer - * @param[in] isp_stats ISP buffer - * @return None - * - * This function translates the dvs2 statistics from the ISP-internal - * format to the format used by the DVS2 library on the CPU. - * This function takes a host-side pointer as input. This can either - * point to a copy of the data or be a memory mapped pointer to the - * ISP memory pages. - */ -void -ia_css_translate_dvs2_statistics( - struct ia_css_dvs2_statistics *host_stats, - const struct ia_css_isp_dvs_statistics_map *isp_stats); - -/* @brief Copy DVS statistics from an ISP buffer to a host buffer. - * @param[in] type - DVS statistics type - * @param[in] host_stats Host buffer - * @param[in] isp_stats ISP buffer - * @return None - */ -void -ia_css_dvs_statistics_get(enum dvs_statistics_type type, - union ia_css_dvs_statistics_host *host_stats, - const union ia_css_dvs_statistics_isp *isp_stats); - -/* @brief Allocate the DVS statistics memory on the ISP - * @param[in] grid The grid. - * @return Pointer to the allocated DVS statistics buffer on the ISP -*/ -struct ia_css_isp_dvs_statistics * -ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid); - -/* @brief Free the DVS statistics memory on the ISP - * @param[in] me Pointer to the DVS statistics buffer on the ISP. - * @return None -*/ -void -ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me); - -/* @brief Allocate the DVS 2.0 statistics memory - * @param[in] grid The grid. - * @return Pointer to the allocated DVS statistics buffer on the ISP -*/ -struct ia_css_isp_dvs_statistics * -ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid); - -/* @brief Free the DVS 2.0 statistics memory - * @param[in] me Pointer to the DVS statistics buffer on the ISP. - * @return None -*/ -void -ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me); - -/* @brief Allocate the DVS statistics memory on the host - * @param[in] grid The grid. - * @return Pointer to the allocated DVS statistics buffer on the host -*/ -struct ia_css_dvs_statistics * -ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid); - -/* @brief Free the DVS statistics memory on the host - * @param[in] me Pointer to the DVS statistics buffer on the host. - * @return None -*/ -void -ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me); - -/* @brief Allocate the DVS coefficients memory - * @param[in] grid The grid. - * @return Pointer to the allocated DVS coefficients buffer -*/ -struct ia_css_dvs_coefficients * -ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid); - -/* @brief Free the DVS coefficients memory - * @param[in] me Pointer to the DVS coefficients buffer. - * @return None - */ -void -ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me); - -/* @brief Allocate the DVS 2.0 statistics memory on the host - * @param[in] grid The grid. - * @return Pointer to the allocated DVS 2.0 statistics buffer on the host - */ -struct ia_css_dvs2_statistics * -ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid); - -/* @brief Free the DVS 2.0 statistics memory - * @param[in] me Pointer to the DVS 2.0 statistics buffer on the host. - * @return None -*/ -void -ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me); - -/* @brief Allocate the DVS 2.0 coefficients memory - * @param[in] grid The grid. - * @return Pointer to the allocated DVS 2.0 coefficients buffer -*/ -struct ia_css_dvs2_coefficients * -ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid); - -/* @brief Free the DVS 2.0 coefficients memory - * @param[in] me Pointer to the DVS 2.0 coefficients buffer. - * @return None -*/ -void -ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me); - -/* @brief Allocate the DVS 2.0 6-axis config memory - * @param[in] stream The stream. - * @return Pointer to the allocated DVS 6axis configuration buffer -*/ -struct ia_css_dvs_6axis_config * -ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream); - -/* @brief Free the DVS 2.0 6-axis config memory - * @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer - * @return None - */ -void -ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config); - -/* @brief Allocate a dvs statistics map structure - * @param[in] isp_stats pointer to ISP dvs statistis struct - * @param[in] data_ptr host-side pointer to ISP dvs statistics. - * @return Pointer to the allocated dvs statistics map - * - * This function allocates the ISP dvs statistics map structure - * and uses the data_ptr as base pointer to set the appropriate - * pointers to all relevant subsets of the dvs statistics (dmem, - * vmem, hmem). - * If the data_ptr is NULL, this function will allocate the host-side - * memory. This information is stored in the struct and used in the - * ia_css_isp_dvs_statistics_map_free() function to determine whether - * the memory should be freed or not. - * Note that this function does not allocate or map any ISP - * memory. -*/ -struct ia_css_isp_dvs_statistics_map * -ia_css_isp_dvs_statistics_map_allocate( - const struct ia_css_isp_dvs_statistics *isp_stats, - void *data_ptr); - -/* @brief Free the dvs statistics map - * @param[in] me Pointer to the dvs statistics map - * @return None - * - * This function frees the map struct. If the data_ptr inside it - * was allocated inside ia_css_isp_dvs_statistics_map_allocate(), it - * will be freed in this function. Otherwise it will not be freed. - */ -void -ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me); - -/* @brief Allocate memory for the SKC DVS statistics on the ISP - * @return Pointer to the allocated ACC DVS statistics buffer on the ISP -*/ -struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void); - -#endif /* __IA_CSS_DVS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h deleted file mode 100644 index 8b0218ee658d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ENV_H -#define __IA_CSS_ENV_H - -#include -#include /* va_list */ -#include "ia_css_types.h" -#include "ia_css_acc_types.h" - -/* @file - * This file contains prototypes for functions that need to be provided to the - * CSS-API host-code by the environment in which the CSS-API code runs. - */ - -/* Memory allocation attributes, for use in ia_css_css_mem_env. */ -enum ia_css_mem_attr { - IA_CSS_MEM_ATTR_CACHED = 1 << 0, - IA_CSS_MEM_ATTR_ZEROED = 1 << 1, - IA_CSS_MEM_ATTR_PAGEALIGN = 1 << 2, - IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3, -}; - -/* Environment with function pointers for local IA memory allocation. - * This provides the CSS code with environment specific functionality - * for memory allocation of small local buffers such as local data structures. - * This is never expected to allocate more than one page of memory (4K bytes). - */ -struct ia_css_cpu_mem_env { - void (*flush)(struct ia_css_acc_fw *fw); - /** Flush function to flush the cache for given accelerator. */ -}; - -/* Environment with function pointers to access the CSS hardware. This includes - * registers and local memories. - */ -struct ia_css_hw_access_env { - void (*store_8)(hrt_address addr, uint8_t data); - /** Store an 8 bit value into an address in the CSS HW address space. - The address must be an 8 bit aligned address. */ - void (*store_16)(hrt_address addr, uint16_t data); - /** Store a 16 bit value into an address in the CSS HW address space. - The address must be a 16 bit aligned address. */ - void (*store_32)(hrt_address addr, uint32_t data); - /** Store a 32 bit value into an address in the CSS HW address space. - The address must be a 32 bit aligned address. */ - uint8_t (*load_8)(hrt_address addr); - /** Load an 8 bit value from an address in the CSS HW address - space. The address must be an 8 bit aligned address. */ - uint16_t (*load_16)(hrt_address addr); - /** Load a 16 bit value from an address in the CSS HW address - space. The address must be a 16 bit aligned address. */ - uint32_t (*load_32)(hrt_address addr); - /** Load a 32 bit value from an address in the CSS HW address - space. The address must be a 32 bit aligned address. */ - void (*store)(hrt_address addr, const void *data, uint32_t bytes); - /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */ - void (*load)(hrt_address addr, void *data, uint32_t bytes); - /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */ -}; - -/* Environment with function pointers to print error and debug messages. - */ -struct ia_css_print_env { - int (*debug_print)(const char *fmt, va_list args); - /** Print a debug message. */ - int (*error_print)(const char *fmt, va_list args); - /** Print an error message.*/ -}; - -/* Environment structure. This includes function pointers to access several - * features provided by the environment in which the CSS API is used. - * This is used to run the camera IP in multiple platforms such as Linux, - * Windows and several simulation environments. - */ -struct ia_css_env { - struct ia_css_cpu_mem_env cpu_mem_env; /** local flush. */ - struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */ - struct ia_css_print_env print_env; /** Message printing env. */ -}; - -#endif /* __IA_CSS_ENV_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h deleted file mode 100644 index cf895815ea31..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ERR_H -#define __IA_CSS_ERR_H - -/* @file - * This file contains possible return values for most - * functions in the CSS-API. - */ - -/* Errors, these values are used as the return value for most - * functions in this API. - */ -enum ia_css_err { - IA_CSS_SUCCESS, - IA_CSS_ERR_INTERNAL_ERROR, - IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY, - IA_CSS_ERR_INVALID_ARGUMENTS, - IA_CSS_ERR_SYSTEM_NOT_IDLE, - IA_CSS_ERR_MODE_HAS_NO_VIEWFINDER, - IA_CSS_ERR_QUEUE_IS_FULL, - IA_CSS_ERR_QUEUE_IS_EMPTY, - IA_CSS_ERR_RESOURCE_NOT_AVAILABLE, - IA_CSS_ERR_RESOURCE_LIST_TO_SMALL, - IA_CSS_ERR_RESOURCE_ITEMS_STILL_ALLOCATED, - IA_CSS_ERR_RESOURCE_EXHAUSTED, - IA_CSS_ERR_RESOURCE_ALREADY_ALLOCATED, - IA_CSS_ERR_VERSION_MISMATCH, - IA_CSS_ERR_NOT_SUPPORTED -}; - -/* FW warnings. This enum contains a value for each warning that - * the SP FW could indicate potential performance issue - */ -enum ia_css_fw_warning { - IA_CSS_FW_WARNING_NONE, - IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue. - This warning can be avoided by de-queing ISYS buffers more timely. */ - IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue. - This warning can be avoided by de-queing PSYS buffers more timely. */ - IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers. - This warning can be avoided by unlocking locked frame-buffers more timely. */ - IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked. - This warning can be avoided by unlocking locked frame-buffers more timely. */ - IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer. - This warning can be avoided by unlocking locked frame-buffers more timely. */ - IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger. - This warning can be avoided by providing a param set for each frame. */ -}; - -#endif /* __IA_CSS_ERR_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h deleted file mode 100644 index 036a2f03d3bd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_EVENT_PUBLIC_H -#define __IA_CSS_EVENT_PUBLIC_H - -/* @file - * This file contains CSS-API events functionality - */ - -#include /* uint8_t */ -#include /* ia_css_err */ -#include /* ia_css_pipe */ -#include /* ia_css_timer */ - -/* The event type, distinguishes the kind of events that - * can are generated by the CSS system. - * - * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC: - * 1) "enum ia_css_event_type" (ia_css_event_public.h) - * 2) "enum sh_css_sp_event_type" (sh_css_internal.h) - * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c) - * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c) - */ -enum ia_css_event_type { - IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0, - /** Output frame ready. */ - IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1, - /** Second output frame ready. */ - IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2, - /** Viewfinder Output frame ready. */ - IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3, - /** Second viewfinder Output frame ready. */ - IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4, - /** Indication that 3A statistics are available. */ - IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5, - /** Indication that DIS statistics are available. */ - IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6, - /** Pipeline Done event, sent after last pipeline stage. */ - IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7, - /** Frame tagged. */ - IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8, - /** Input frame ready. */ - IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9, - /** Metadata ready. */ - IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10, - /** Indication that LACE statistics are available. */ - IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11, - /** Extension stage complete. */ - IA_CSS_EVENT_TYPE_TIMER = 1 << 12, - /** Timer event for measuring the SP side latencies. It contains the - 32-bit timer value from the SP */ - IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13, - /** End Of Frame event, sent when in buffered sensor mode. */ - IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14, - /** Performance warning encounter by FW */ - IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15, - /** Assertion hit by FW */ -}; - -#define IA_CSS_EVENT_TYPE_NONE 0 - -/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events. - * The other events (such as PORT_EOF) cannot be enabled/disabled - * and are hence excluded from this macro. - */ -#define IA_CSS_EVENT_TYPE_ALL \ - (IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE | \ - IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE | \ - IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE | \ - IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE | \ - IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE | \ - IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE | \ - IA_CSS_EVENT_TYPE_PIPELINE_DONE | \ - IA_CSS_EVENT_TYPE_FRAME_TAGGED | \ - IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE | \ - IA_CSS_EVENT_TYPE_METADATA_DONE | \ - IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \ - IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) - -/* The event struct, container for the event type and its related values. - * Depending on the event type, either pipe or port will be filled. - * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle. - * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be - * filled. - */ -struct ia_css_event { - struct ia_css_pipe *pipe; - /** Pipe handle on which event happened, NULL for non pipe related - events. */ - enum ia_css_event_type type; - /** Type of Event, always valid/filled. */ - uint8_t port; - /** Port number for EOF event (not valid for other events). */ - uint8_t exp_id; - /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events) - The exposure ID is unique only within a logical stream and it is - only generated on systems that have an input system (such as 2400 - and 2401). - Most outputs produced by the CSS are tagged with an exposure ID. - This allows users of the CSS API to keep track of which buffer - was generated from which sensor output frame. This includes: - EOF event, output frames, 3A statistics, DVS statistics and - sensor metadata. - Exposure IDs start at IA_CSS_MIN_EXPOSURE_ID, increment by one - until IA_CSS_MAX_EXPOSURE_ID is reached, after that they wrap - around to IA_CSS_MIN_EXPOSURE_ID again. - Note that in case frames are dropped, this will not be reflected - in the exposure IDs. Therefor applications should not use this - to detect frame drops. */ - uint32_t fw_handle; - /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other - events). */ - enum ia_css_fw_warning fw_warning; - /** Firmware warning code, only for WARNING events. */ - uint8_t fw_assert_module_id; - /** Firmware module id, only for ASSERT events, should be logged by driver. */ - uint16_t fw_assert_line_no; - /** Firmware line number, only for ASSERT events, should be logged by driver. */ - clock_value_t timer_data; - /** For storing the full 32-bit of the timer value. Valid only for TIMER - event */ - uint8_t timer_code; - /** For storing the code of the TIMER event. Valid only for - TIMER event */ - uint8_t timer_subcode; - /** For storing the subcode of the TIMER event. Valid only - for TIMER event */ -}; - -/* @brief Dequeue a PSYS event from the CSS system. - * - * @param[out] event Pointer to the event struct which will be filled by - * this function if an event is available. - * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are - * available or - * IA_CSS_SUCCESS otherwise. - * - * This function dequeues an event from the PSYS event queue. The queue is - * between the Host CPU and the CSS system. This function can be - * called after an interrupt has been generated that signalled that a new event - * was available and can be used in a polling-like situation where the NO_EVENT - * return value is used to determine whether an event was available or not. - */ -enum ia_css_err -ia_css_dequeue_psys_event(struct ia_css_event *event); - -/* @brief Dequeue an event from the CSS system. - * - * @param[out] event Pointer to the event struct which will be filled by - * this function if an event is available. - * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are - * available or - * IA_CSS_SUCCESS otherwise. - * - * deprecated{Use ia_css_dequeue_psys_event instead}. - * Unless the isys event queue is explicitly enabled, this function will - * dequeue both isys (EOF) and psys events (all others). - */ -enum ia_css_err -ia_css_dequeue_event(struct ia_css_event *event); - -/* @brief Dequeue an ISYS event from the CSS system. - * - * @param[out] event Pointer to the event struct which will be filled by - * this function if an event is available. - * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are - * available or - * IA_CSS_SUCCESS otherwise. - * - * This function dequeues an event from the ISYS event queue. The queue is - * between host and the CSS system. - * Unlike the ia_css_dequeue_event() function, this function can be called - * directly from an interrupt service routine (ISR) and it is safe to call - * this function in parallel with other CSS API functions (but only one - * call to this function should be in flight at any point in time). - * - * The reason for having the ISYS events separate is to prevent them from - * incurring additional latency due to locks being held by other CSS API - * functions. - */ -enum ia_css_err -ia_css_dequeue_isys_event(struct ia_css_event *event); - -#endif /* __IA_CSS_EVENT_PUBLIC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h deleted file mode 100644 index d7d7f0a995e5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FIRMWARE_H -#define __IA_CSS_FIRMWARE_H - -/* @file - * This file contains firmware loading/unloading support functionality - */ - -#include "ia_css_err.h" -#include "ia_css_env.h" - -/* CSS firmware package structure. - */ -struct ia_css_fw { - void *data; /** pointer to the firmware data */ - unsigned int bytes; /** length in bytes of firmware data */ -}; - -/* @brief Loads the firmware - * @param[in] env Environment, provides functions to access the - * environment in which the CSS code runs. This is - * used for host side memory access and message - * printing. - * @param[in] fw Firmware package containing the firmware for all - * predefined ISP binaries. - * @return Returns IA_CSS_ERR_INTERNAL_ERROR in case of any - * errors and IA_CSS_SUCCESS otherwise. - * - * This function interprets the firmware package. All - * contents of this firmware package are copied into local data structures, so - * the fw pointer could be freed after this function completes. - * - * Rationale for this function is that it can be called before ia_css_init, and thus - * speeds up ia_css_init (ia_css_init is called each time a stream is created but the - * firmware only needs to be loaded once). - */ -enum ia_css_err -ia_css_load_firmware(const struct ia_css_env *env, - const struct ia_css_fw *fw); - -/* @brief Unloads the firmware - * @return None - * - * This function unloads the firmware loaded by ia_css_load_firmware. - * It is pointless to call this function if no firmware is loaded, - * but it won't harm. Use this to deallocate all memory associated with the firmware. - */ -void -ia_css_unload_firmware(void); - -/* @brief Checks firmware version - * @param[in] fw Firmware package containing the firmware for all - * predefined ISP binaries. - * @return Returns true when the firmware version matches with the CSS - * host code version and returns false otherwise. - * This function checks if the firmware package version matches with the CSS host code version. - */ -bool -ia_css_check_firmware_version(const struct ia_css_fw *fw); - -#endif /* __IA_CSS_FIRMWARE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h deleted file mode 100644 index e5ffc579aef1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_FRAC_H -#define _IA_CSS_FRAC_H - -/* @file - * This file contains typedefs used for fractional numbers - */ - -#include - -/* Fixed point types. - * NOTE: the 16 bit fixed point types actually occupy 32 bits - * to save on extension operations in the ISP code. - */ -/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */ -typedef uint32_t ia_css_u0_16; -/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */ -typedef uint32_t ia_css_u5_11; -/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */ -typedef uint32_t ia_css_u8_8; -/* Signed fixed point value, 0 integer bits, 15 fractional bits */ -typedef int32_t ia_css_s0_15; - -#endif /* _IA_CSS_FRAC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h deleted file mode 100644 index 2f177edc36ac..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FRAME_FORMAT_H -#define __IA_CSS_FRAME_FORMAT_H - -/* @file - * This file contains information about formats supported in the ISP - */ - -/* Frame formats, some of these come from fourcc.org, others are - better explained by video4linux2. The NV11 seems to be described only - on MSDN pages, but even those seem to be gone now. - Frames can come in many forms, the main categories are RAW, RGB and YUV - (or YCbCr). The YUV frames come in 4 flavors, determined by how the U and V - values are subsampled: - 1. YUV420: hor = 2, ver = 2 - 2. YUV411: hor = 4, ver = 1 - 3. YUV422: hor = 2, ver = 1 - 4. YUV444: hor = 1, ver = 1 - - Warning: not all frame formats are supported as input or output to/from ISP. - Some of these formats are therefore not defined in the output table module. - Modifications in below frame format enum can require modifications in the - output table module. - - Warning2: Throughout the CSS code assumptions are made on the order - of formats in this enumeration type, or some sort of copy is maintained. - The following files are identified: - - FileSupport.h - - css/isp/kernels/fc/fc_1.0/formats.isp.c - - css/isp/kernels/output/output_1.0/output_table.isp.c - - css/isp/kernels/output/sc_output_1.0/formats.hive.c - - css/isp/modes/interface/isp_formats.isp.h - - css/bxt_sandbox/psyspoc/interface/ia_css_pg_info.h - - css/bxt_sandbox/psysapi/data/interface/ia_css_program_group_data.h - - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h -*/ -enum ia_css_frame_format { - IA_CSS_FRAME_FORMAT_NV11 = 0, /** 12 bit YUV 411, Y, UV plane */ - IA_CSS_FRAME_FORMAT_NV12, /** 12 bit YUV 420, Y, UV plane */ - IA_CSS_FRAME_FORMAT_NV12_16, /** 16 bit YUV 420, Y, UV plane */ - IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */ - IA_CSS_FRAME_FORMAT_NV16, /** 16 bit YUV 422, Y, UV plane */ - IA_CSS_FRAME_FORMAT_NV21, /** 12 bit YUV 420, Y, VU plane */ - IA_CSS_FRAME_FORMAT_NV61, /** 16 bit YUV 422, Y, VU plane */ - IA_CSS_FRAME_FORMAT_YV12, /** 12 bit YUV 420, Y, V, U plane */ - IA_CSS_FRAME_FORMAT_YV16, /** 16 bit YUV 422, Y, V, U plane */ - IA_CSS_FRAME_FORMAT_YUV420, /** 12 bit YUV 420, Y, U, V plane */ - IA_CSS_FRAME_FORMAT_YUV420_16, /** yuv420, 16 bits per subpixel */ - IA_CSS_FRAME_FORMAT_YUV422, /** 16 bit YUV 422, Y, U, V plane */ - IA_CSS_FRAME_FORMAT_YUV422_16, /** yuv422, 16 bits per subpixel */ - IA_CSS_FRAME_FORMAT_UYVY, /** 16 bit YUV 422, UYVY interleaved */ - IA_CSS_FRAME_FORMAT_YUYV, /** 16 bit YUV 422, YUYV interleaved */ - IA_CSS_FRAME_FORMAT_YUV444, /** 24 bit YUV 444, Y, U, V plane */ - IA_CSS_FRAME_FORMAT_YUV_LINE, /** Internal format, 2 y lines followed - by a uvinterleaved line */ - IA_CSS_FRAME_FORMAT_RAW, /** RAW, 1 plane */ - IA_CSS_FRAME_FORMAT_RGB565, /** 16 bit RGB, 1 plane. Each 3 sub - pixels are packed into one 16 bit - value, 5 bits for R, 6 bits for G - and 5 bits for B. */ - IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */ - IA_CSS_FRAME_FORMAT_RGBA888, /** 32 bit RGBA, 1 plane, A=Alpha - (alpha is unused) */ - IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */ - IA_CSS_FRAME_FORMAT_BINARY_8, /** byte stream, used for jpeg. For - frames of this type, we set the - height to 1 and the width to the - number of allocated bytes. */ - IA_CSS_FRAME_FORMAT_MIPI, /** MIPI frame, 1 plane */ - IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */ - IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /** 8 bit per Y/U/V. - Y odd line; UYVY - interleaved even line */ - IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd - line; VY even line */ - IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /** 10 bit per Y/U/V. Y odd - line; UYVY interleaved - even line */ -}; - -/* NOTE: IA_CSS_FRAME_FORMAT_NUM was purposely defined outside of enum type ia_css_frame_format, */ -/* because of issues this would cause with the Clockwork code checking tool. */ -#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1) - -/* Number of valid output frame formats for ISP **/ -#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1) - -#endif /* __IA_CSS_FRAME_FORMAT_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h deleted file mode 100644 index 89943e8bf180..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FRAME_PUBLIC_H -#define __IA_CSS_FRAME_PUBLIC_H - -/* @file - * This file contains structs to describe various frame-formats supported by the ISP. - */ - -#include -#include "ia_css_err.h" -#include "ia_css_types.h" -#include "ia_css_frame_format.h" -#include "ia_css_buffer.h" - -/* For RAW input, the bayer order needs to be specified separately. There - * are 4 possible orders. The name is constructed by taking the first two - * colors on the first line and the first two colors from the second line. - */ -enum ia_css_bayer_order { - IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */ - IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */ - IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */ - IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */ -}; -#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1) - -/* Frame plane structure. This describes one plane in an image - * frame buffer. - */ -struct ia_css_frame_plane { - unsigned int height; /** height of a plane in lines */ - unsigned int width; /** width of a line, in DMA elements, note that - for RGB565 the three subpixels are stored in - one element. For all other formats this is - the number of subpixels per line. */ - unsigned int stride; /** stride of a line in bytes */ - unsigned int offset; /** offset in bytes to start of frame data. - offset is wrt data field in ia_css_frame */ -}; - -/* Binary "plane". This is used to story binary streams such as jpeg - * images. This is not actually a real plane. - */ -struct ia_css_frame_binary_plane { - unsigned int size; /** number of bytes in the stream */ - struct ia_css_frame_plane data; /** plane */ -}; - -/* Container for planar YUV frames. This contains 3 planes. - */ -struct ia_css_frame_yuv_planes { - struct ia_css_frame_plane y; /** Y plane */ - struct ia_css_frame_plane u; /** U plane */ - struct ia_css_frame_plane v; /** V plane */ -}; - -/* Container for semi-planar YUV frames. - */ -struct ia_css_frame_nv_planes { - struct ia_css_frame_plane y; /** Y plane */ - struct ia_css_frame_plane uv; /** UV plane */ -}; - -/* Container for planar RGB frames. Each color has its own plane. - */ -struct ia_css_frame_rgb_planes { - struct ia_css_frame_plane r; /** Red plane */ - struct ia_css_frame_plane g; /** Green plane */ - struct ia_css_frame_plane b; /** Blue plane */ -}; - -/* Container for 6-plane frames. These frames are used internally - * in the advanced ISP only. - */ -struct ia_css_frame_plane6_planes { - struct ia_css_frame_plane r; /** Red plane */ - struct ia_css_frame_plane r_at_b; /** Red at blue plane */ - struct ia_css_frame_plane gr; /** Red-green plane */ - struct ia_css_frame_plane gb; /** Blue-green plane */ - struct ia_css_frame_plane b; /** Blue plane */ - struct ia_css_frame_plane b_at_r; /** Blue at red plane */ -}; - -/* Crop info struct - stores the lines to be cropped in isp */ -struct ia_css_crop_info { - /* the final start column and start line - * sum of lines to be cropped + bayer offset - */ - unsigned int start_column; - unsigned int start_line; -}; - -/* Frame info struct. This describes the contents of an image frame buffer. - */ -struct ia_css_frame_info { - struct ia_css_resolution res; /** Frame resolution (valid data) */ - unsigned int padded_width; /** stride of line in memory (in pixels) */ - enum ia_css_frame_format format; /** format of the frame data */ - unsigned int raw_bit_depth; /** number of valid bits per pixel, - only valid for RAW bayer frames */ - enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid - for RAW bayer frames */ - /* the params below are computed based on bayer_order - * we can remove the raw_bayer_order if it is redundant - * keeping it for now as bxt and fpn code seem to use it - */ - struct ia_css_crop_info crop_info; -}; - -#define IA_CSS_BINARY_DEFAULT_FRAME_INFO \ -(struct ia_css_frame_info) { \ - .format = IA_CSS_FRAME_FORMAT_NUM, \ - .raw_bayer_order = IA_CSS_BAYER_ORDER_NUM, \ -} - -/** - * Specifies the DVS loop delay in "frame periods" - */ -enum ia_css_frame_delay { - IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */ - IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */ - IA_CSS_FRAME_DELAY_2 /** Frame delay = 2 */ -}; - -enum ia_css_frame_flash_state { - IA_CSS_FRAME_FLASH_STATE_NONE, - IA_CSS_FRAME_FLASH_STATE_PARTIAL, - IA_CSS_FRAME_FLASH_STATE_FULL -}; - -/* Frame structure. This structure describes an image buffer or frame. - * This is the main structure used for all input and output images. - */ -struct ia_css_frame { - struct ia_css_frame_info info; /** info struct describing the frame */ - ia_css_ptr data; /** pointer to start of image data */ - unsigned int data_bytes; /** size of image data in bytes */ - /* LA: move this to ia_css_buffer */ - /* - * -1 if data address is static during life time of pipeline - * >=0 if data address can change per pipeline/frame iteration - * index to dynamic data: ia_css_frame_in, ia_css_frame_out - * ia_css_frame_out_vf - * index to host-sp queue id: queue_0, queue_1 etc. - */ - int dynamic_queue_id; - /* - * if it is dynamic frame, buf_type indicates which buffer type it - * should use for event generation. we have this because in vf_pp - * binary, we use output port, but we expect VF_OUTPUT_DONE event - */ - enum ia_css_buffer_type buf_type; - enum ia_css_frame_flash_state flash_state; - unsigned int exp_id; - /** exposure id, see ia_css_event_public.h for more detail */ - uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */ - bool valid; /** First video output frame is not valid */ - bool contiguous; /** memory is allocated physically contiguously */ - union { - unsigned int _initialisation_dummy; - struct ia_css_frame_plane raw; - struct ia_css_frame_plane rgb; - struct ia_css_frame_rgb_planes planar_rgb; - struct ia_css_frame_plane yuyv; - struct ia_css_frame_yuv_planes yuv; - struct ia_css_frame_nv_planes nv; - struct ia_css_frame_plane6_planes plane6; - struct ia_css_frame_binary_plane binary; - } planes; /** frame planes, select the right one based on - info.format */ -}; - -#define DEFAULT_FRAME \ -(struct ia_css_frame) { \ - .info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .dynamic_queue_id = SH_CSS_INVALID_QUEUE_ID, \ - .buf_type = IA_CSS_BUFFER_TYPE_INVALID, \ - .flash_state = IA_CSS_FRAME_FLASH_STATE_NONE, \ -} - -/* @brief Fill a frame with zeros - * - * @param frame The frame. - * @return None - * - * Fill a frame with pixel values of zero - */ -void ia_css_frame_zero(struct ia_css_frame *frame); - -/* @brief Allocate a CSS frame structure - * - * @param frame The allocated frame. - * @param width The width (in pixels) of the frame. - * @param height The height (in lines) of the frame. - * @param format The frame format. - * @param stride The padded stride, in pixels. - * @param raw_bit_depth The raw bit depth, in bits. - * @return The error code. - * - * Allocate a CSS frame structure. The memory for the frame data will be - * allocated in the CSS address space. - */ -enum ia_css_err -ia_css_frame_allocate(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int stride, - unsigned int raw_bit_depth); - -/* @brief Allocate a CSS frame structure using a frame info structure. - * - * @param frame The allocated frame. - * @param[in] info The frame info structure. - * @return The error code. - * - * Allocate a frame using the resolution and format from a frame info struct. - * This is a convenience function, implemented on top of - * ia_css_frame_allocate(). - */ -enum ia_css_err -ia_css_frame_allocate_from_info(struct ia_css_frame **frame, - const struct ia_css_frame_info *info); -/* @brief Free a CSS frame structure. - * - * @param[in] frame Pointer to the frame. - * @return None - * - * Free a CSS frame structure. This will free both the frame structure - * and the pixel data pointer contained within the frame structure. - */ -void -ia_css_frame_free(struct ia_css_frame *frame); - -/* @brief Allocate a contiguous CSS frame structure - * - * @param frame The allocated frame. - * @param width The width (in pixels) of the frame. - * @param height The height (in lines) of the frame. - * @param format The frame format. - * @param stride The padded stride, in pixels. - * @param raw_bit_depth The raw bit depth, in bits. - * @return The error code. - * - * Contiguous frame allocation, only for FPGA display driver which needs - * physically contiguous memory. - * Deprecated. - */ -enum ia_css_err -ia_css_frame_allocate_contiguous(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int stride, - unsigned int raw_bit_depth); - -/* @brief Allocate a contiguous CSS frame from a frame info structure. - * - * @param frame The allocated frame. - * @param[in] info The frame info structure. - * @return The error code. - * - * Allocate a frame using the resolution and format from a frame info struct. - * This is a convenience function, implemented on top of - * ia_css_frame_allocate_contiguous(). - * Only for FPGA display driver which needs physically contiguous memory. - * Deprecated. - */ -enum ia_css_err -ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame, - const struct ia_css_frame_info *info); - -/* @brief Allocate a CSS frame structure using a frame info structure. - * - * @param frame The allocated frame. - * @param[in] info The frame info structure. - * @return The error code. - * - * Allocate an empty CSS frame with no data buffer using the parameters - * in the frame info. - */ -enum ia_css_err -ia_css_frame_create_from_info(struct ia_css_frame **frame, - const struct ia_css_frame_info *info); - -/* @brief Set a mapped data buffer to a CSS frame - * - * @param[in] frame Valid CSS frame pointer - * @param[in] mapped_data Mapped data buffer to be assigned to the CSS frame - * @param[in] data_size_bytes Size of the mapped_data in bytes - * @return The error code. - * - * Sets a mapped data buffer to this frame. This function can be called multiple - * times with different buffers or NULL to reset the data pointer. This API - * would not try free the mapped_data and its the callers responsiblity to - * free the mapped_data buffer. However if ia_css_frame_free() is called and - * the frame had a valid data buffer, it would be freed along with the frame. - */ -enum ia_css_err -ia_css_frame_set_data(struct ia_css_frame *frame, - const ia_css_ptr mapped_data, - size_t data_size_bytes); - -/* @brief Map an existing frame data pointer to a CSS frame. - * - * @param frame Pointer to the frame to be initialized - * @param[in] info The frame info. - * @param[in] data Pointer to the allocated frame data. - * @param[in] attribute Attributes to be passed to mmgr_mmap. - * @param[in] context Pointer to the a context to be passed to mmgr_mmap. - * @return The allocated frame structure. - * - * This function maps a pre-allocated pointer into a CSS frame. This can be - * used when an upper software layer is responsible for allocating the frame - * data and it wants to share that frame pointer with the CSS code. - * This function will fill the CSS frame structure just like - * ia_css_frame_allocate() does, but instead of allocating the memory, it will - * map the pre-allocated memory into the CSS address space. - */ -enum ia_css_err -ia_css_frame_map(struct ia_css_frame **frame, - const struct ia_css_frame_info *info, - const void __user *data, - uint16_t attribute, - void *context); - -/* @brief Unmap a CSS frame structure. - * - * @param[in] frame Pointer to the CSS frame. - * @return None - * - * This function unmaps the frame data pointer within a CSS frame and - * then frees the CSS frame structure. Use this for frame pointers created - * using ia_css_frame_map(). - */ -void -ia_css_frame_unmap(struct ia_css_frame *frame); - -#endif /* __IA_CSS_FRAME_PUBLIC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h deleted file mode 100644 index 4557e66891df..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Release Version: irci_stable_candrpv_0415_20150521_0458 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SH_CSS_HOST_DATA_H -#define __SH_CSS_HOST_DATA_H - -#include /* ia_css_pipe */ - -/** - * @brief Allocate structure ia_css_host_data. - * - * @param[in] size Size of the requested host data - * - * @return - * - NULL, can't allocate requested size - * - pointer to structure, field address points to host data with size bytes - */ -struct ia_css_host_data * -ia_css_host_data_allocate(size_t size); - -/** - * @brief Free structure ia_css_host_data. - * - * @param[in] me Pointer to structure, if a NULL is passed functions - * returns without error. Otherwise a valid pointer to - * structure must be passed and a related memory - * is freed. - * - * @return - */ -void ia_css_host_data_free(struct ia_css_host_data *me); - -#endif /* __SH_CSS_HOST_DATA_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h deleted file mode 100644 index ad9ca5449369..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* For MIPI_PORT0_ID to MIPI_PORT2_ID */ -#include "system_global.h" - -#ifndef __IA_CSS_INPUT_PORT_H -#define __IA_CSS_INPUT_PORT_H - -/* @file - * This file contains information about the possible input ports for CSS - */ - -/* Backward compatible for CSS API 2.0 only - * TO BE REMOVED when all drivers move to CSS API 2.1 - */ -#define IA_CSS_CSI2_PORT_4LANE MIPI_PORT0_ID -#define IA_CSS_CSI2_PORT_1LANE MIPI_PORT1_ID -#define IA_CSS_CSI2_PORT_2LANE MIPI_PORT2_ID - -/* The CSI2 interface supports 2 types of compression or can - * be run without compression. - */ -enum ia_css_csi2_compression_type { - IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */ - IA_CSS_CSI2_COMPRESSION_TYPE_1, /** Compression scheme 1 */ - IA_CSS_CSI2_COMPRESSION_TYPE_2 /** Compression scheme 2 */ -}; - -struct ia_css_csi2_compression { - enum ia_css_csi2_compression_type type; - /** Compression used */ - unsigned int compressed_bits_per_pixel; - /** Compressed bits per pixel (only when compression is enabled) */ - unsigned int uncompressed_bits_per_pixel; - /** Uncompressed bits per pixel (only when compression is enabled) */ -}; - -/* Input port structure. - */ -struct ia_css_input_port { - enum mipi_port_id port; /** Physical CSI-2 port */ - unsigned int num_lanes; /** Number of lanes used (4-lane port only) */ - unsigned int timeout; /** Timeout value */ - unsigned int rxcount; /** Register value, should include all lanes */ - struct ia_css_csi2_compression compression; /** Compression used */ -}; - -#endif /* __IA_CSS_INPUT_PORT_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h deleted file mode 100644 index c8840138899a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_IRQ_H -#define __IA_CSS_IRQ_H - -/* @file - * This file contains information for Interrupts/IRQs from CSS - */ - -#include "ia_css_err.h" -#include "ia_css_pipe_public.h" -#include "ia_css_input_port.h" - -/* Interrupt types, these enumerate all supported interrupt types. - */ -enum ia_css_irq_type { - IA_CSS_IRQ_TYPE_EDGE, /** Edge (level) sensitive interrupt */ - IA_CSS_IRQ_TYPE_PULSE /** Pulse-shaped interrupt */ -}; - -/* Interrupt request type. - * When the CSS hardware generates an interrupt, a function in this API - * needs to be called to retrieve information about the interrupt. - * This interrupt type is part of this information and indicates what - * type of information the interrupt signals. - * - * Note that one interrupt can carry multiple interrupt types. For - * example: the online video ISP will generate only 2 interrupts, one to - * signal that the statistics (3a and DIS) are ready and one to signal - * that all output frames are done (output and viewfinder). - * - * DEPRECATED, this interface is not portable it should only define user - * (SW) interrupts - */ -enum ia_css_irq_info { - IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0, - /** the css receiver has encountered an error */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1, - /** the FIFO in the csi receiver has overflown */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2, - /** the css receiver received the start of frame */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3, - /** the css receiver received the end of frame */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4, - /** the css receiver received the start of line */ - IA_CSS_IRQ_INFO_PSYS_EVENTS_READY = 1 << 5, - /** One or more events are available in the PSYS event queue */ - IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY, - /** deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY, - * same functionality.} */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6, - /** the css receiver received the end of line */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7, - /** the css receiver received a change in side band signals */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8, - /** generic short packets (0) */ - IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9, - /** generic short packets (1) */ - IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10, - /** the primary input formatter (A) has encountered an error */ - IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11, - /** the primary input formatter (B) has encountered an error */ - IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12, - /** the secondary input formatter has encountered an error */ - IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13, - /** the stream-to-memory device has encountered an error */ - IA_CSS_IRQ_INFO_SW_0 = 1 << 14, - /** software interrupt 0 */ - IA_CSS_IRQ_INFO_SW_1 = 1 << 15, - /** software interrupt 1 */ - IA_CSS_IRQ_INFO_SW_2 = 1 << 16, - /** software interrupt 2 */ - IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17, - /** ISP binary statistics are ready */ - IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18, - /** the input system in in error */ - IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19, - /** the input formatter in in error */ - IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20, - /** the dma in in error */ - IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21, - /** end-of-frame events are ready in the isys_event queue */ -}; - -/* CSS receiver error types. Whenever the CSS receiver has encountered - * an error, this enumeration is used to indicate which errors have occurred. - * - * Note that multiple error flags can be enabled at once and that this is in - * fact common (whenever an error occurs, it usually results in multiple - * errors). - * - * DEPRECATED: This interface is not portable, different systems have - * different receiver types, or possibly none in case of tests systems. - */ -enum ia_css_rx_irq_info { - IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /** buffer overrun */ - IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */ - IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /** exited sleep mode */ - IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /** ECC corrected */ - IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4, - /** Start of transmission */ - IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /** SOT sync (??) */ - IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /** Control (??) */ - IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /** Double ECC */ - IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /** CRC error */ - IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /** Unknown ID */ - IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/** Frame sync error */ - IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/** Frame data error */ - IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */ - IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/** Unknown escape seq. */ - IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/** Line Sync error */ - IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15, -}; - -/* Interrupt info structure. This structure contains information about an - * interrupt. This needs to be used after an interrupt is received on the IA - * to perform the correct action. - */ -struct ia_css_irq { - enum ia_css_irq_info type; /** Interrupt type. */ - unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */ - unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */ - unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */ - struct ia_css_pipe *pipe; - /** The image pipe that generated the interrupt. */ -}; - -/* @brief Obtain interrupt information. - * - * @param[out] info Pointer to the interrupt info. The interrupt - * information wil be written to this info. - * @return If an error is encountered during the interrupt info - * and no interrupt could be translated successfully, this - * will return IA_CSS_INTERNAL_ERROR. Otherwise - * IA_CSS_SUCCESS. - * - * This function is expected to be executed after an interrupt has been sent - * to the IA from the CSS. This function returns information about the interrupt - * which is needed by the IA code to properly handle the interrupt. This - * information includes the image pipe, buffer type etc. - */ -enum ia_css_err -ia_css_irq_translate(unsigned int *info); - -/* @brief Get CSI receiver error info. - * - * @param[out] irq_bits Pointer to the interrupt bits. The interrupt - * bits will be written this info. - * This will be the error bits that are enabled in the CSI - * receiver error register. - * @return None - * - * This function should be used whenever a CSI receiver error interrupt is - * generated. It provides the detailed information (bits) on the exact error - * that occurred. - * - *@deprecated {this function is DEPRECATED since it only works on CSI port 1. - * Use the function below instead and specify the appropriate port.} - */ -void -ia_css_rx_get_irq_info(unsigned int *irq_bits); - -/* @brief Get CSI receiver error info. - * - * @param[in] port Input port identifier. - * @param[out] irq_bits Pointer to the interrupt bits. The interrupt - * bits will be written this info. - * This will be the error bits that are enabled in the CSI - * receiver error register. - * @return None - * - * This function should be used whenever a CSI receiver error interrupt is - * generated. It provides the detailed information (bits) on the exact error - * that occurred. - */ -void -ia_css_rx_port_get_irq_info(enum mipi_port_id port, unsigned int *irq_bits); - -/* @brief Clear CSI receiver error info. - * - * @param[in] irq_bits The bits that should be cleared from the CSI receiver - * interrupt bits register. - * @return None - * - * This function should be called after ia_css_rx_get_irq_info has been called - * and the error bits have been interpreted. It is advised to use the return - * value of that function as the argument to this function to make sure no new - * error bits get overwritten. - * - * @deprecated{this function is DEPRECATED since it only works on CSI port 1. - * Use the function below instead and specify the appropriate port.} - */ -void -ia_css_rx_clear_irq_info(unsigned int irq_bits); - -/* @brief Clear CSI receiver error info. - * - * @param[in] port Input port identifier. - * @param[in] irq_bits The bits that should be cleared from the CSI receiver - * interrupt bits register. - * @return None - * - * This function should be called after ia_css_rx_get_irq_info has been called - * and the error bits have been interpreted. It is advised to use the return - * value of that function as the argument to this function to make sure no new - * error bits get overwritten. - */ -void -ia_css_rx_port_clear_irq_info(enum mipi_port_id port, unsigned int irq_bits); - -/* @brief Enable or disable specific interrupts. - * - * @param[in] type The interrupt type that will be enabled/disabled. - * @param[in] enable enable or disable. - * @return Returns IA_CSS_INTERNAL_ERROR if this interrupt - * type cannot be enabled/disabled which is true for - * CSS internal interrupts. Otherwise returns - * IA_CSS_SUCCESS. - */ -enum ia_css_err -ia_css_irq_enable(enum ia_css_irq_info type, bool enable); - -#endif /* __IA_CSS_IRQ_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c deleted file mode 100644 index 8222dd0a41f2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015-2017, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include -#include -#include - -const hrt_vaddress mmgr_NULL = (hrt_vaddress)0; -const hrt_vaddress mmgr_EXCEPTION = (hrt_vaddress)-1; - -hrt_vaddress -mmgr_malloc(const size_t size) -{ - return mmgr_alloc_attr(size, 0); -} - -hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attrs) -{ - uint16_t masked_attrs = attrs & MMGR_ATTRIBUTE_MASK; - WARN_ON(attrs & MMGR_ATTRIBUTE_CONTIGUOUS); - - if (masked_attrs & MMGR_ATTRIBUTE_CLEARED) { - if (masked_attrs & MMGR_ATTRIBUTE_CACHED) - return (ia_css_ptr) hrt_isp_css_mm_calloc_cached(size); - else - return (ia_css_ptr) hrt_isp_css_mm_calloc(size); - } else { - if (masked_attrs & MMGR_ATTRIBUTE_CACHED) - return (ia_css_ptr) hrt_isp_css_mm_alloc_cached(size); - else - return (ia_css_ptr) hrt_isp_css_mm_alloc(size); - } -} - -hrt_vaddress -mmgr_calloc(const size_t N, const size_t size) -{ - return mmgr_alloc_attr(size * N, MMGR_ATTRIBUTE_CLEARED); -} - -void mmgr_clear(hrt_vaddress vaddr, const size_t size) -{ - if (vaddr) - hmm_set(vaddr, 0, size); -} - -void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size) -{ - if (vaddr && data) - hmm_load(vaddr, data, size); -} - -void -mmgr_store(const hrt_vaddress vaddr, const void *data, const size_t size) -{ - if (vaddr && data) - hmm_store(vaddr, data, size); -} - -hrt_vaddress -mmgr_mmap(const void __user *ptr, const size_t size, - uint16_t attribute, void *context) -{ - struct hrt_userbuffer_attr *userbuffer_attr = context; - return hrt_isp_css_mm_alloc_user_ptr( - size, ptr, userbuffer_attr->pgnr, - userbuffer_attr->type, - attribute & HRT_BUF_FLAG_CACHED); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h deleted file mode 100644 index ed0b6ab371da..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_METADATA_H -#define __IA_CSS_METADATA_H - -/* @file - * This file contains structure for processing sensor metadata. - */ - -#include -#include "ia_css_types.h" -#include "ia_css_stream_format.h" - -/* Metadata configuration. This data structure contains necessary info - * to process sensor metadata. - */ -struct ia_css_metadata_config { - enum atomisp_input_format data_type; /** Data type of CSI-2 embedded - data. The default value is ATOMISP_INPUT_FORMAT_EMBEDDED. For - certain sensors, user can choose non-default data type for embedded - data. */ - struct ia_css_resolution resolution; /** Resolution */ -}; - -struct ia_css_metadata_info { - struct ia_css_resolution resolution; /** Resolution */ - uint32_t stride; /** Stride in bytes */ - uint32_t size; /** Total size in bytes */ -}; - -struct ia_css_metadata { - struct ia_css_metadata_info info; /** Layout info */ - ia_css_ptr address; /** CSS virtual address */ - uint32_t exp_id; - /** Exposure ID, see ia_css_event_public.h for more detail */ -}; -#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata) - -/* @brief Allocate a metadata buffer. - * @param[in] metadata_info Metadata info struct, contains details on metadata buffers. - * @return Pointer of metadata buffer or NULL (if error) - * - * This function allocates a metadata buffer according to the properties - * specified in the metadata_info struct. - */ -struct ia_css_metadata * -ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info); - -/* @brief Free a metadata buffer. - * - * @param[in] metadata Pointer of metadata buffer. - * @return None - * - * This function frees a metadata buffer. - */ -void -ia_css_metadata_free(struct ia_css_metadata *metadata); - -#endif /* __IA_CSS_METADATA_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h deleted file mode 100644 index 367b2aafa5e8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MIPI_H -#define __IA_CSS_MIPI_H - -/* @file - * This file contains MIPI support functionality - */ - -#include -#include "ia_css_err.h" -#include "ia_css_stream_format.h" -#include "ia_css_input_port.h" - -/* Backward compatible for CSS API 2.0 only - * TO BE REMOVED when all drivers move to CSS API 2.1. - */ -/* @brief Specify a CSS MIPI frame buffer. - * - * @param[in] size_mem_words The frame size in memory words (32B). - * @param[in] contiguous Allocate memory physically contiguously or not. - * @return The error code. - * - * \deprecated{Use ia_css_mipi_buffer_config instead.} - * - * Specifies a CSS MIPI frame buffer: size in memory words (32B). - */ -enum ia_css_err -ia_css_mipi_frame_specify(const unsigned int size_mem_words, - const bool contiguous); - -#if !defined(HAS_NO_INPUT_SYSTEM) -/* @brief Register size of a CSS MIPI frame for check during capturing. - * - * @param[in] port CSI-2 port this check is registered. - * @param[in] size_mem_words The frame size in memory words (32B). - * @return Return the error in case of failure. E.g. MAX_NOF_ENTRIES REACHED - * - * Register size of a CSS MIPI frame to check during capturing. Up to - * IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES entries per port allowed. Entries are reset - * when stream is stopped. - * - * - */ -enum ia_css_err -ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port, - const unsigned int size_mem_words); -#endif - -/* @brief Calculate the size of a mipi frame. - * - * @param[in] width The width (in pixels) of the frame. - * @param[in] height The height (in lines) of the frame. - * @param[in] format The frame (MIPI) format. - * @param[in] hasSOLandEOL Whether frame (MIPI) contains (optional) SOL and EOF packets. - * @param[in] embedded_data_size_words Embedded data size in memory words. - * @param size_mem_words The mipi frame size in memory words (32B). - * @return The error code. - * - * Calculate the size of a mipi frame, based on the resolution and format. - */ -enum ia_css_err -ia_css_mipi_frame_calculate_size(const unsigned int width, - const unsigned int height, - const enum atomisp_input_format format, - const bool hasSOLandEOL, - const unsigned int embedded_data_size_words, - unsigned int *size_mem_words); - -#endif /* __IA_CSS_MIPI_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h deleted file mode 100644 index 13c21056bfbf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MMU_H -#define __IA_CSS_MMU_H - -/* @file - * This file contains one support function for invalidating the CSS MMU cache - */ - -/* @brief Invalidate the MMU internal cache. - * @return None - * - * This function triggers an invalidation of the translate-look-aside - * buffer (TLB) that's inside the CSS MMU. This function should be called - * every time the page tables used by the MMU change. - */ -void -ia_css_mmu_invalidate_cache(void); - -#endif /* __IA_CSS_MMU_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h deleted file mode 100644 index 1021e4f380a5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MMU_PRIVATE_H -#define __IA_CSS_MMU_PRIVATE_H - -#include "system_local.h" - -/* - * This function sets the L1 pagetable address. - * After power-up of the ISP the L1 pagetable can be set. - * Once being set the L1 pagetable is protected against - * further modifications. - */ -void -sh_css_mmu_set_page_table_base_index(hrt_data base_index); - -#endif /* __IA_CSS_MMU_PRIVATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h deleted file mode 100644 index de409638d009..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MORPH_H -#define __IA_CSS_MORPH_H - -/* @file - * This file contains supporting for morphing table - */ - -#include - -/* @brief Morphing table - * @param[in] width Width of the morphing table. - * @param[in] height Height of the morphing table. - * @return Pointer to the morphing table -*/ -struct ia_css_morph_table * -ia_css_morph_table_allocate(unsigned int width, unsigned int height); - -/* @brief Free the morph table - * @param[in] me Pointer to the morph table. - * @return None -*/ -void -ia_css_morph_table_free(struct ia_css_morph_table *me); - -#endif /* __IA_CSS_MORPH_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h deleted file mode 100644 index f6870fa7a18c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PIPE_H__ -#define __IA_CSS_PIPE_H__ - -#include -#include "ia_css_stream.h" -#include "ia_css_frame.h" -#include "ia_css_pipeline.h" -#include "ia_css_binary.h" -#include "sh_css_legacy.h" - -#define PIPE_ENTRY_EMPTY_TOKEN (~0U) -#define PIPE_ENTRY_RESERVED_TOKEN (0x1) - -struct ia_css_preview_settings { - struct ia_css_binary copy_binary; - struct ia_css_binary preview_binary; - struct ia_css_binary vf_pp_binary; - - /* 2401 only for these two - do we in fact use them for anything real */ - struct ia_css_frame *delay_frames[MAX_NUM_DELAY_FRAMES]; - struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; - - struct ia_css_pipe *copy_pipe; - struct ia_css_pipe *capture_pipe; - struct ia_css_pipe *acc_pipe; -}; - -#define IA_CSS_DEFAULT_PREVIEW_SETTINGS \ -(struct ia_css_preview_settings) { \ - .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .preview_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ -} - -struct ia_css_capture_settings { - struct ia_css_binary copy_binary; - /* we extend primary binary to multiple stages because in ISP2.6.1 - * the computation load is too high to fit in one single binary. */ - struct ia_css_binary primary_binary[MAX_NUM_PRIMARY_STAGES]; - unsigned int num_primary_stage; - struct ia_css_binary pre_isp_binary; - struct ia_css_binary anr_gdc_binary; - struct ia_css_binary post_isp_binary; - struct ia_css_binary capture_pp_binary; - struct ia_css_binary vf_pp_binary; - struct ia_css_binary capture_ldc_binary; - struct ia_css_binary *yuv_scaler_binary; - struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; - bool *is_output_stage; - unsigned int num_yuv_scaler; -}; - -#define IA_CSS_DEFAULT_CAPTURE_SETTINGS \ -(struct ia_css_capture_settings) { \ - .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .primary_binary = {IA_CSS_BINARY_DEFAULT_SETTINGS}, \ - .pre_isp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .anr_gdc_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .post_isp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .capture_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .capture_ldc_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ -} - -struct ia_css_video_settings { - struct ia_css_binary copy_binary; - struct ia_css_binary video_binary; - struct ia_css_binary vf_pp_binary; - struct ia_css_binary *yuv_scaler_binary; - struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; -#ifndef ISP2401 - struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES]; -#else - struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; -#endif - struct ia_css_frame *vf_pp_in_frame; - struct ia_css_pipe *copy_pipe; - struct ia_css_pipe *capture_pipe; - bool *is_output_stage; - unsigned int num_yuv_scaler; -}; - -#define IA_CSS_DEFAULT_VIDEO_SETTINGS \ -(struct ia_css_video_settings) { \ - .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .video_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ - .vf_pp_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ -} - -struct ia_css_yuvpp_settings { - struct ia_css_binary copy_binary; - struct ia_css_binary *yuv_scaler_binary; - struct ia_css_binary *vf_pp_binary; - bool *is_output_stage; - unsigned int num_yuv_scaler; - unsigned int num_vf_pp; - unsigned int num_output; -}; - -#define IA_CSS_DEFAULT_YUVPP_SETTINGS \ -(struct ia_css_yuvpp_settings) { \ - .copy_binary = IA_CSS_BINARY_DEFAULT_SETTINGS, \ -} - -struct osys_object; - -struct ia_css_pipe { - /* TODO: Remove stop_requested and use stop_requested in the pipeline */ - bool stop_requested; - struct ia_css_pipe_config config; - struct ia_css_pipe_extra_config extra_config; - struct ia_css_pipe_info info; - enum ia_css_pipe_id mode; - struct ia_css_shading_table *shading_table; - struct ia_css_pipeline pipeline; - struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_frame_info bds_output_info; - struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_frame_info out_yuv_ds_input_info; - struct ia_css_frame_info vf_yuv_ds_input_info; - struct ia_css_fw_info *output_stage; /* extra output stage */ - struct ia_css_fw_info *vf_stage; /* extra vf_stage */ - unsigned int required_bds_factor; - unsigned int dvs_frame_delay; - int num_invalid_frames; - bool enable_viewfinder[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_stream *stream; - struct ia_css_frame in_frame_struct; - struct ia_css_frame out_frame_struct; - struct ia_css_frame vf_frame_struct; - struct ia_css_frame *continuous_frames[NUM_CONTINUOUS_FRAMES]; - struct ia_css_metadata *cont_md_buffers[NUM_CONTINUOUS_FRAMES]; - union { - struct ia_css_preview_settings preview; - struct ia_css_video_settings video; - struct ia_css_capture_settings capture; - struct ia_css_yuvpp_settings yuvpp; - } pipe_settings; - hrt_vaddress scaler_pp_lut; - struct osys_object *osys_obj; - - /* This number is unique per pipe each instance of css. This number is - * reused as pipeline number also. There is a 1-1 mapping between pipe_num - * and sp thread id. Current logic limits pipe_num to - * SH_CSS_MAX_SP_THREADS */ - unsigned int pipe_num; -}; - -#define IA_CSS_DEFAULT_PIPE \ -(struct ia_css_pipe) { \ - .config = DEFAULT_PIPE_CONFIG, \ - .info = DEFAULT_PIPE_INFO, \ - .mode = IA_CSS_PIPE_ID_ACC, /* (pipe_id) */ \ - .pipeline = DEFAULT_PIPELINE, \ - .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .bds_output_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .out_yuv_ds_input_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .vf_yuv_ds_input_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .required_bds_factor = SH_CSS_BDS_FACTOR_1_00, \ - .dvs_frame_delay = 1, \ - .enable_viewfinder = {true}, \ - .in_frame_struct = DEFAULT_FRAME, \ - .out_frame_struct = DEFAULT_FRAME, \ - .vf_frame_struct = DEFAULT_FRAME, \ - .pipe_settings = { \ - .preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS \ - }, \ - .pipe_num = PIPE_ENTRY_EMPTY_TOKEN, \ -} - -void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map); - -enum ia_css_err -sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe, - struct ia_css_isp_parameters *params, - bool commit, struct ia_css_pipe *pipe); - - - -#endif /* __IA_CSS_PIPE_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h deleted file mode 100644 index 11225d5ac442..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h +++ /dev/null @@ -1,579 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PIPE_PUBLIC_H -#define __IA_CSS_PIPE_PUBLIC_H - -/* @file - * This file contains the public interface for CSS pipes. - */ - -#include -#include -#include -#include -#include -#ifdef ISP2401 -#include -#endif - -enum { - IA_CSS_PIPE_OUTPUT_STAGE_0 = 0, - IA_CSS_PIPE_OUTPUT_STAGE_1, - IA_CSS_PIPE_MAX_OUTPUT_STAGE, -}; - -/* Enumeration of pipe modes. This mode can be used to create - * an image pipe for this mode. These pipes can be combined - * to configure and run streams on the ISP. - * - * For example, one can create a preview and capture pipe to - * create a continuous capture stream. - */ -enum ia_css_pipe_mode { - IA_CSS_PIPE_MODE_PREVIEW, /** Preview pipe */ - IA_CSS_PIPE_MODE_VIDEO, /** Video pipe */ - IA_CSS_PIPE_MODE_CAPTURE, /** Still capture pipe */ - IA_CSS_PIPE_MODE_ACC, /** Accelerated pipe */ - IA_CSS_PIPE_MODE_COPY, /** Copy pipe, only used for embedded/image data copying */ - IA_CSS_PIPE_MODE_YUVPP, /** YUV post processing pipe, used for all use cases with YUV input, - for SoC sensor and external ISP */ -}; -/* Temporary define */ -#define IA_CSS_PIPE_MODE_NUM (IA_CSS_PIPE_MODE_YUVPP + 1) - -/** - * Enumeration of pipe versions. - * the order should match with definition in sh_css_defs.h - */ -enum ia_css_pipe_version { - IA_CSS_PIPE_VERSION_1 = 1, /** ISP1.0 pipe */ - IA_CSS_PIPE_VERSION_2_2 = 2, /** ISP2.2 pipe */ - IA_CSS_PIPE_VERSION_2_6_1 = 3, /** ISP2.6.1 pipe */ - IA_CSS_PIPE_VERSION_2_7 = 4 /** ISP2.7 pipe */ -}; - -/** - * Pipe configuration structure. - * Resolution properties are filled by Driver, kernel configurations are - * set by AIC - */ -struct ia_css_pipe_config { - enum ia_css_pipe_mode mode; - /** mode, indicates which mode the pipe should use. */ - enum ia_css_pipe_version isp_pipe_version; - /** pipe version, indicates which imaging pipeline the pipe should use. */ - struct ia_css_resolution input_effective_res; - /** input effective resolution */ - struct ia_css_resolution bayer_ds_out_res; - /** bayer down scaling */ - struct ia_css_resolution capt_pp_in_res; -#ifndef ISP2401 - /** bayer down scaling */ -#else - /** capture post processing input resolution */ -#endif - struct ia_css_resolution vf_pp_in_res; -#ifndef ISP2401 - /** bayer down scaling */ -#else - /** view finder post processing input resolution */ - struct ia_css_resolution output_system_in_res; - /** For IPU3 only: use output_system_in_res to specify what input resolution - will OSYS receive, this resolution is equal to the output resolution of GDC - if not determined CSS will set output_system_in_res with main osys output pin resolution - All other IPUs may ignore this property */ -#endif - struct ia_css_resolution dvs_crop_out_res; - /** dvs crop, video only, not in use yet. Use dvs_envelope below. */ - struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - /** output of YUV scaling */ - struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - /** output of VF YUV scaling */ - struct ia_css_fw_info *acc_extension; - /** Pipeline extension accelerator */ - struct ia_css_fw_info **acc_stages; - /** Standalone accelerator stages */ - uint32_t num_acc_stages; - /** Number of standalone accelerator stages */ - struct ia_css_capture_config default_capture_config; - /** Default capture config for initial capture pipe configuration. */ - struct ia_css_resolution dvs_envelope; /** temporary */ - enum ia_css_frame_delay dvs_frame_delay; - /** indicates the DVS loop delay in frame periods */ - int acc_num_execs; - /** For acceleration pipes only: determine how many times the pipe - should be run. Setting this to -1 means it will run until - stopped. */ - bool enable_dz; - /** Disabling digital zoom for a pipeline, if this is set to false, - then setting a zoom factor will have no effect. - In some use cases this provides better performance. */ - bool enable_dpc; - /** Disabling "Defect Pixel Correction" for a pipeline, if this is set - to false. In some use cases this provides better performance. */ - bool enable_vfpp_bci; - /** Enabling BCI mode will cause yuv_scale binary to be picked up - instead of vf_pp. This only applies to viewfinder post - processing stages. */ -#ifdef ISP2401 - bool enable_luma_only; - /** Enabling of monochrome mode for a pipeline. If enabled only luma processing - will be done. */ - bool enable_tnr; - /** Enabling of TNR (temporal noise reduction). This is only applicable to video - pipes. Non video-pipes should always set this parameter to false. */ -#endif - struct ia_css_isp_config *p_isp_config; - /** Pointer to ISP configuration */ - struct ia_css_resolution gdc_in_buffer_res; - /** GDC in buffer resolution. */ - struct ia_css_point gdc_in_buffer_offset; - /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */ -#ifdef ISP2401 - struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl; - /** Origin of internal frame positioned on shading table at shading correction in ISP. - NOTE: Shading table is larger than or equal to internal frame. - Shading table has shading gains and internal frame has bayer data. - The origin of internal frame is used in shading correction in ISP - to retrieve shading gains which correspond to bayer data. */ -#endif -}; - - -/** - * Default settings for newly created pipe configurations. - */ -#define DEFAULT_PIPE_CONFIG \ -(struct ia_css_pipe_config) { \ - .mode = IA_CSS_PIPE_MODE_PREVIEW, \ - .isp_pipe_version = 1, \ - .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .default_capture_config = DEFAULT_CAPTURE_CONFIG, \ - .dvs_frame_delay = IA_CSS_FRAME_DELAY_1, \ - .acc_num_execs = -1, \ -} - -/* Pipe info, this struct describes properties of a pipe after it's stream has - * been created. - * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated. - * - On the Behalf of CSS-API Committee. - */ -struct ia_css_pipe_info { - struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - /** Info about output resolution. This contains the stride which - should be used for memory allocation. */ - struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - /** Info about viewfinder output resolution (optional). This contains - the stride that should be used for memory allocation. */ - struct ia_css_frame_info raw_output_info; - /** Raw output resolution. This indicates the resolution of the - RAW bayer output for pipes that support this. Currently, only the - still capture pipes support this feature. When this resolution is - smaller than the input resolution, cropping will be performed by - the ISP. The first cropping that will be performed is on the upper - left corner where we crop 8 lines and 8 columns to remove the - pixels normally used to initialize the ISP filters. - This is why the raw output resolution should normally be set to - the input resolution - 8x8. */ -#ifdef ISP2401 - struct ia_css_resolution output_system_in_res_info; - /** For IPU3 only. Info about output system in resolution which is considered - as gdc out resolution. */ -#endif - struct ia_css_shading_info shading_info; - /** After an image pipe is created, this field will contain the info - for the shading correction. */ - struct ia_css_grid_info grid_info; - /** After an image pipe is created, this field will contain the grid - info for 3A and DVS. */ - int num_invalid_frames; - /** The very first frames in a started stream do not contain valid data. - In this field, the CSS-firmware communicates to the host-driver how - many initial frames will contain invalid data; this allows the - host-driver to discard those initial invalid frames and start it's - output at the first valid frame. */ -}; - -/** - * Defaults for ia_css_pipe_info structs. - */ -#define DEFAULT_PIPE_INFO \ -(struct ia_css_pipe_info) { \ - .output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .vf_output_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .raw_output_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .shading_info = DEFAULT_SHADING_INFO, \ - .grid_info = DEFAULT_GRID_INFO, \ -} - -/* @brief Load default pipe configuration - * @param[out] pipe_config The pipe configuration. - * @return None - * - * This function will load the default pipe configuration: -@code - struct ia_css_pipe_config def_config = { - IA_CSS_PIPE_MODE_PREVIEW, // mode - 1, // isp_pipe_version - {0, 0}, // bayer_ds_out_res - {0, 0}, // capt_pp_in_res - {0, 0}, // vf_pp_in_res - {0, 0}, // dvs_crop_out_res - {{0, 0}, 0, 0, 0, 0}, // output_info - {{0, 0}, 0, 0, 0, 0}, // second_output_info - {{0, 0}, 0, 0, 0, 0}, // vf_output_info - {{0, 0}, 0, 0, 0, 0}, // second_vf_output_info - NULL, // acc_extension - NULL, // acc_stages - 0, // num_acc_stages - { - IA_CSS_CAPTURE_MODE_RAW, // mode - false, // enable_xnr - false // enable_raw_output - }, // default_capture_config - {0, 0}, // dvs_envelope - 1, // dvs_frame_delay - -1, // acc_num_execs - true, // enable_dz - NULL, // p_isp_config - }; -@endcode - */ -void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config); - -/* @brief Create a pipe - * @param[in] config The pipe configuration. - * @param[out] pipe The pipe. - * @return IA_CSS_SUCCESS or the error code. - * - * This function will create a pipe with the given - * configuration. - */ -enum ia_css_err -ia_css_pipe_create(const struct ia_css_pipe_config *config, - struct ia_css_pipe **pipe); - -/* @brief Destroy a pipe - * @param[in] pipe The pipe. - * @return IA_CSS_SUCCESS or the error code. - * - * This function will destroy a given pipe. - */ -enum ia_css_err -ia_css_pipe_destroy(struct ia_css_pipe *pipe); - -/* @brief Provides information about a pipe - * @param[in] pipe The pipe. - * @param[out] pipe_info The pipe information. - * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS. - * - * This function will provide information about a given pipe. - */ -enum ia_css_err -ia_css_pipe_get_info(const struct ia_css_pipe *pipe, - struct ia_css_pipe_info *pipe_info); - -/* @brief Configure a pipe with filter coefficients. - * @param[in] pipe The pipe. - * @param[in] config The pointer to ISP configuration. - * @return IA_CSS_SUCCESS or error code upon error. - * - * This function configures the filter coefficients for an image - * pipe. - */ -enum ia_css_err -ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe, - struct ia_css_isp_config *config); - -/* @brief Controls when the Event generator raises an IRQ to the Host. - * - * @param[in] pipe The pipe. - * @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe - related event that is part of this mask will directly - raise an IRQ to the Host when the event occurs in the - CSS. - * @param[in] and_mask Binary or of enum ia_css_event_irq_mask_type. An event - IRQ for the Host is only raised after all pipe related - events have occurred at least once for all the active - pipes. Events are remembered and don't need to occure - at the same moment in time. There is no control over - the order of these events. Once an IRQ has been raised - all remembered events are reset. - * @return IA_CSS_SUCCESS. - * - Controls when the Event generator in the CSS raises an IRQ to the Host. - The main purpose of this function is to reduce the amount of interrupts - between the CSS and the Host. This will help saving power as it wakes up the - Host less often. In case both or_mask and and_mask are - IA_CSS_EVENT_TYPE_NONE for all pipes, no event IRQ's will be raised. An - exception holds for IA_CSS_EVENT_TYPE_PORT_EOF, for this event an IRQ is always - raised. - Note that events are still queued and the Host can poll for them. The - or_mask and and_mask may be active at the same time\n - \n - Default values, for all pipe id's, after ia_css_init:\n - or_mask = IA_CSS_EVENT_TYPE_ALL\n - and_mask = IA_CSS_EVENT_TYPE_NONE\n - \n - Examples\n - \code - ia_css_pipe_set_irq_mask(h_pipe, - IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE | - IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE , - IA_CSS_EVENT_TYPE_NONE); - \endcode - The event generator will only raise an interrupt to the Host when there are - 3A or DIS statistics available from the preview pipe. It will not generate - an interrupt for any other event of the preview pipe e.g when there is an - output frame available. - - \code - ia_css_pipe_set_irq_mask(h_pipe_preview, - IA_CSS_EVENT_TYPE_NONE, - IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE | - IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE ); - - ia_css_pipe_set_irq_mask(h_pipe_capture, - IA_CSS_EVENT_TYPE_NONE, - IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE ); - \endcode - The event generator will only raise an interrupt to the Host when there is - both a frame done and 3A event available from the preview pipe AND when there - is a frame done available from the capture pipe. Note that these events - may occur at different moments in time. Also the order of the events is not - relevant. - - \code - ia_css_pipe_set_irq_mask(h_pipe_preview, - IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, - IA_CSS_EVENT_TYPE_ALL ); - - ia_css_pipe_set_irq_mask(h_pipe_capture, - IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, - IA_CSS_EVENT_TYPE_ALL ); - \endcode - The event generator will only raise an interrupt to the Host when there is an - output frame from the preview pipe OR an output frame from the capture pipe. - All other events (3A, VF output, pipeline done) will not raise an interrupt - to the Host. These events are not lost but always stored in the event queue. - */ -enum ia_css_err -ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe, - unsigned int or_mask, - unsigned int and_mask); - -/* @brief Reads the current event IRQ mask from the CSS. - * - * @param[in] pipe The pipe. - * @param[out] or_mask Current or_mask. The bits in this mask are a binary or - of enum ia_css_event_irq_mask_type. Pointer may be NULL. - * @param[out] and_mask Current and_mask.The bits in this mask are a binary or - of enum ia_css_event_irq_mask_type. Pointer may be NULL. - * @return IA_CSS_SUCCESS. - * - Reads the current event IRQ mask from the CSS. Reading returns the actual - values as used by the SP and not any mirrored values stored at the Host.\n -\n -Precondition:\n -SP must be running.\n - -*/ -enum ia_css_err -ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe, - unsigned int *or_mask, - unsigned int *and_mask); - -/* @brief Queue a buffer for an image pipe. - * - * @param[in] pipe The pipe that will own the buffer. - * @param[in] buffer Pointer to the buffer. - * Note that the caller remains owner of the buffer - * structure. Only the data pointer within it will - * be passed into the internal queues. - * @return IA_CSS_INTERNAL_ERROR in case of unexpected errors, - * IA_CSS_SUCCESS otherwise. - * - * This function adds a buffer (which has a certain buffer type) to the queue - * for this type. This queue is owned by the image pipe. After this function - * completes successfully, the buffer is now owned by the image pipe and should - * no longer be accessed by any other code until it gets dequeued. The image - * pipe will dequeue buffers from this queue, use them and return them to the - * host code via an interrupt. Buffers will be consumed in the same order they - * get queued, but may be returned to the host out of order. - */ -enum ia_css_err -ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe, - const struct ia_css_buffer *buffer); - -/* @brief Dequeue a buffer from an image pipe. - * - * @param[in] pipe The pipeline that the buffer queue belongs to. - * @param[in,out] buffer The buffer is used to lookup the type which determines - * which internal queue to use. - * The resulting buffer pointer is written into the dta - * field. - * @return IA_CSS_ERR_NO_BUFFER if the queue is empty or - * IA_CSS_SUCCESS otherwise. - * - * This function dequeues a buffer from a buffer queue. The queue is indicated - * by the buffer type argument. This function can be called after an interrupt - * has been generated that signalled that a new buffer was available and can - * be used in a polling-like situation where the NO_BUFFER return value is used - * to determine whether a buffer was available or not. - */ -enum ia_css_err -ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe, - struct ia_css_buffer *buffer); - - -/* @brief Set the state (Enable or Disable) of the Extension stage in the - * given pipe. - * @param[in] pipe Pipe handle. - * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle) - * @param[in] enable Enable Flag (1 to enable ; 0 to disable) - * - * @return - * IA_CSS_SUCCESS : Success - * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters - * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe - * (No active stream with this pipe) - * - * This function will request state change (enable or disable) for the Extension - * stage (firmware handle) in the given pipe. - * - * Note: - * 1. Extension can be enabled/disabled only on QOS Extensions - * 2. Extension can be enabled/disabled only with an active QOS Pipe - * 3. Initial(Default) state of QOS Extensions is Disabled - * 4. State change cannot be guaranteed immediately OR on frame boundary - * - */ -enum ia_css_err -ia_css_pipe_set_qos_ext_state (struct ia_css_pipe *pipe, - uint32_t fw_handle, - bool enable); - -/* @brief Get the state (Enable or Disable) of the Extension stage in the - * given pipe. - * @param[in] pipe Pipe handle. - * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle) - * @param[out] *enable Enable Flag - * - * @return - * IA_CSS_SUCCESS : Success - * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters - * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe - * (No active stream with this pipe) - * - * This function will query the state of the Extension stage (firmware handle) - * in the given Pipe. - * - * Note: - * 1. Extension state can be queried only on QOS Extensions - * 2. Extension can be enabled/disabled only with an active QOS Pipe - * 3. Initial(Default) state of QOS Extensions is Disabled. - * - */ -enum ia_css_err -ia_css_pipe_get_qos_ext_state (struct ia_css_pipe *pipe, - uint32_t fw_handle, - bool * enable); - -#ifdef ISP2401 -/* @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime. - * @param[in] pipe Pipe handle. - * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle). - * @param[in] css_seg Parameter memory descriptors for CSS segments. - * @param[in] isp_seg Parameter memory descriptors for ISP segments. - * - * @return - * IA_CSS_SUCCESS : Success - * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters - * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe - * (No active stream with this pipe) - * - * \deprecated{This interface is used to temporarily support a late-developed, - * specific use-case on a specific IPU2 platform. It will not be supported or - * maintained on IPU3 or further.} - */ -enum ia_css_err -ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_handle, - struct ia_css_isp_param_css_segments *css_seg, - struct ia_css_isp_param_isp_segments *isp_seg); - -#endif -/* @brief Get selected configuration settings - * @param[in] pipe The pipe. - * @param[out] config Configuration settings. - * @return None - */ -void -ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe, - struct ia_css_isp_config *config); - -/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit - * address space. So the LUT can be freed by caller. - * @param[in] pipe Pipe handle. - * @param[in] lut Look up tabel - * - * @return - * IA_CSS_SUCCESS : Success - * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters - * - * Note: - * 1) Note that both GDC's are programmed with the same table. - * 2) Current implementation ignores the pipe and overrides the - * global lut. This will be fixed in the future - * 3) This function must be called before stream start - * - */ -enum ia_css_err -ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe, - const void *lut); -/* @brief Checking of DVS statistics ability - * @param[in] pipe_info The pipe info. - * @return true - has DVS statistics ability - * false - otherwise - */ -bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info); - -#ifdef ISP2401 -/* @brief Override the frameformat set on the output pins. - * @param[in] pipe Pipe handle. - * @param[in] output_pin Pin index to set the format on - * 0 - main output pin - * 1 - display output pin - * @param[in] format Format to set - * - * @return - * IA_CSS_SUCCESS : Success - * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters - * IA_CSS_ERR_INTERNAL_ERROR : Pipe misses binary info - * - * Note: - * 1) This is an optional function to override the formats set in the pipe. - * 2) Only overriding with IA_CSS_FRAME_FORMAT_NV12_TILEY is currently allowed. - * 3) This function is only to be used on pipes that use the output system. - * 4) If this function is used, it MUST be called after ia_css_pipe_create. - * 5) If this function is used, this function MUST be called before ia_css_stream_start. - */ -enum ia_css_err -ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe, - int output_pin, - enum ia_css_frame_format format); - -#endif -#endif /* __IA_CSS_PIPE_PUBLIC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h deleted file mode 100644 index 6f24656b6cb4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PRBS_H -#define __IA_CSS_PRBS_H - -/* @file - * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs - */ - -/* Enumerate the PRBS IDs. - */ -enum ia_css_prbs_id { - IA_CSS_PRBS_ID0, - IA_CSS_PRBS_ID1, - IA_CSS_PRBS_ID2 -}; - -/** - * Maximum number of PRBS IDs. - * - * Make sure the value of this define gets changed to reflect the correct - * number of ia_css_prbs_id enum if you add/delete an item in the enum. - */ -#define N_CSS_PRBS_IDS (IA_CSS_PRBS_ID2+1) - -/** - * PRBS configuration structure. - * - * Seed the for the Pseudo Random Bit Sequence. - * - * @deprecated{This interface is deprecated, it is not portable -> move to input system API} - */ -struct ia_css_prbs_config { - enum ia_css_prbs_id id; - unsigned int h_blank; /** horizontal blank */ - unsigned int v_blank; /** vertical blank */ - int seed; /** random seed for the 1st 2-pixel-components/clock */ - int seed1; /** random seed for the 2nd 2-pixel-components/clock */ -}; - -#endif /* __IA_CSS_PRBS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h deleted file mode 100644 index 9a167306611c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_PROPERTIES_H -#define __IA_CSS_PROPERTIES_H - -/* @file - * This file contains support for retrieving properties of some hardware the CSS system - */ - -#include /* bool */ -#include /* ia_css_vamem_type */ - -struct ia_css_properties { - int gdc_coord_one; - bool l1_base_is_index; /** Indicate whether the L1 page base - is a page index or a byte address. */ - enum ia_css_vamem_type vamem_type; -}; - -/* @brief Get hardware properties - * @param[in,out] properties The hardware properties - * @return None - * - * This function returns a number of hardware properties. - */ -void -ia_css_get_properties(struct ia_css_properties *properties); - -#endif /* __IA_CSS_PROPERTIES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h deleted file mode 100644 index 588f53d32b72..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SHADING_H -#define __IA_CSS_SHADING_H - -/* @file - * This file contains support for setting the shading table for CSS - */ - -#include - -/* @brief Shading table - * @param[in] width Width of the shading table. - * @param[in] height Height of the shading table. - * @return Pointer to the shading table -*/ -struct ia_css_shading_table * -ia_css_shading_table_alloc(unsigned int width, - unsigned int height); - -/* @brief Free shading table - * @param[in] table Pointer to the shading table. - * @return None -*/ -void -ia_css_shading_table_free(struct ia_css_shading_table *table); - -#endif /* __IA_CSS_SHADING_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h deleted file mode 100644 index fb6e8c2ca8bf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_STREAM_H_ -#define _IA_CSS_STREAM_H_ - -#include -#include -#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401) -#include -#endif -#include "ia_css_types.h" -#include "ia_css_stream_public.h" - -/** - * structure to hold all internal stream related information - */ -struct ia_css_stream { - struct ia_css_stream_config config; - struct ia_css_stream_info info; -#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401) - rx_cfg_t csi_rx_config; -#endif - bool reconfigure_css_rx; - struct ia_css_pipe *last_pipe; - int num_pipes; - struct ia_css_pipe **pipes; - struct ia_css_pipe *continuous_pipe; - struct ia_css_isp_parameters *isp_params_configs; - struct ia_css_isp_parameters *per_frame_isp_params_configs; - - bool cont_capt; - bool disable_cont_vf; -#ifndef ISP2401 - bool stop_copy_preview; -#endif - bool started; -}; - -/* @brief Get a binary in the stream, which binary has the shading correction. - * - * @param[in] stream: The stream. - * @return The binary which has the shading correction. - * - */ -struct ia_css_binary * -ia_css_stream_get_shading_correction_binary(const struct ia_css_stream *stream); - -struct ia_css_binary * -ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream); - -struct ia_css_binary * -ia_css_stream_get_3a_binary(const struct ia_css_stream *stream); - -unsigned int -ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream); - -bool -sh_css_params_set_binning_factor(struct ia_css_stream *stream, unsigned int sensor_binning); - -void -sh_css_invalidate_params(struct ia_css_stream *stream); - -/* The following functions are used for testing purposes only */ -const struct ia_css_fpn_table * -ia_css_get_fpn_table(struct ia_css_stream *stream); - -/* @brief Get a pointer to the shading table. - * - * @param[in] stream: The stream. - * @return The pointer to the shading table. - * - */ -struct ia_css_shading_table * -ia_css_get_shading_table(struct ia_css_stream *stream); - -void -ia_css_get_isp_dis_coefficients(struct ia_css_stream *stream, - short *horizontal_coefficients, - short *vertical_coefficients); - -void -ia_css_get_isp_dvs2_coefficients(struct ia_css_stream *stream, - short *hor_coefs_odd_real, - short *hor_coefs_odd_imag, - short *hor_coefs_even_real, - short *hor_coefs_even_imag, - short *ver_coefs_odd_real, - short *ver_coefs_odd_imag, - short *ver_coefs_even_real, - short *ver_coefs_even_imag); - -enum ia_css_err -ia_css_stream_isp_parameters_init(struct ia_css_stream *stream); - -void -ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream); - -#endif /*_IA_CSS_STREAM_H_*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h deleted file mode 100644 index f97b9eb2b19c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_STREAM_FORMAT_H -#define __IA_CSS_STREAM_FORMAT_H - -/* @file - * This file contains formats usable for ISP streaming input - */ - -#include /* bool */ -#include "../../../include/linux/atomisp_platform.h" - -unsigned int ia_css_util_input_format_bpp( - enum atomisp_input_format format, - bool two_ppc); - -#endif /* __ATOMISP_INPUT_FORMAT_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h deleted file mode 100644 index ddefad330db7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h +++ /dev/null @@ -1,582 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_STREAM_PUBLIC_H -#define __IA_CSS_STREAM_PUBLIC_H - -/* @file - * This file contains support for configuring and controlling streams - */ - -#include -#include "ia_css_types.h" -#include "ia_css_pipe_public.h" -#include "ia_css_metadata.h" -#include "ia_css_tpg.h" -#include "ia_css_prbs.h" -#include "ia_css_input_port.h" - -/* Input modes, these enumerate all supported input modes. - * Note that not all ISP modes support all input modes. - */ -enum ia_css_input_mode { - IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */ - IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */ - IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */ - IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */ - IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */ - IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */ -}; - -/* Structure of the MIPI buffer configuration - */ -struct ia_css_mipi_buffer_config { - unsigned int size_mem_words; /** The frame size in the system memory - words (32B) */ - bool contiguous; /** Allocated memory physically - contiguously or not. \deprecated{Will be false always.}*/ - unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this - stream */ -}; - -enum { - IA_CSS_STREAM_ISYS_STREAM_0 = 0, - IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX = IA_CSS_STREAM_ISYS_STREAM_0, - IA_CSS_STREAM_ISYS_STREAM_1, - IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH -}; - -/* This is input data configuration for one MIPI data type. We can have - * multiple of this in one virtual channel. - */ -struct ia_css_stream_isys_stream_config { - struct ia_css_resolution input_res; /** Resolution of input data */ - enum atomisp_input_format format; /** Format of input stream. This data - format will be mapped to MIPI data - type internally. */ - int linked_isys_stream_id; /** default value is -1, other value means - current isys_stream shares the same buffer with - indicated isys_stream*/ - bool valid; /** indicate whether other fields have valid value */ -}; - -struct ia_css_stream_input_config { - struct ia_css_resolution input_res; /** Resolution of input data */ - struct ia_css_resolution effective_res; /** Resolution of input data. - Used for CSS 2400/1 System and deprecated for other - systems (replaced by input_effective_res in - ia_css_pipe_config) */ - enum atomisp_input_format format; /** Format of input stream. This data - format will be mapped to MIPI data - type internally. */ - enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */ -}; - - -/* Input stream description. This describes how input will flow into the - * CSS. This is used to program the CSS hardware. - */ -struct ia_css_stream_config { - enum ia_css_input_mode mode; /** Input mode */ - union { - struct ia_css_input_port port; /** Port, for sensor only. */ - struct ia_css_tpg_config tpg; /** TPG configuration */ - struct ia_css_prbs_config prbs; /** PRBS configuration */ - } source; /** Source of input data */ - unsigned int channel_id; /** Channel on which input data - will arrive. Use this field - to specify virtual channel id. - Valid values are: 0, 1, 2, 3 */ - struct ia_css_stream_isys_stream_config isys_config[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH]; - struct ia_css_stream_input_config input_config; - -#ifdef ISP2401 - /* Currently, Android and Windows platforms interpret the binning_factor parameter - * differently. In Android, the binning factor is expressed in the form - * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N - * To use the Windows method of specification, the caller has to define - * macro USE_WINDOWS_BINNING_FACTOR. This is for backward compatibility only - * and will be deprecated. In the future,all platforms will use the N*N method - */ -#endif - unsigned int sensor_binning_factor; /** Binning factor used by sensor - to produce image data. This is - used for shading correction. */ - unsigned int pixels_per_clock; /** Number of pixels per clock, which can be - 1, 2 or 4. */ - bool online; /** offline will activate RAW copy on SP, use this for - continuous capture. */ - /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */ - unsigned init_num_cont_raw_buf; /** initial number of raw buffers to - allocate */ - unsigned target_num_cont_raw_buf; /** total number of raw buffers to - allocate */ - bool pack_raw_pixels; /** Pack pixels in the raw buffers */ - bool continuous; /** Use SP copy feature to continuously capture frames - to system memory and run pipes in offline mode */ - bool disable_cont_viewfinder; /** disable continous viewfinder for ZSL use case */ - int32_t flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */ - int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/ - struct ia_css_mipi_buffer_config mipi_buffer_config; /** mipi buffer configuration */ - struct ia_css_metadata_config metadata_config; /** Metadata configuration. */ - bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */ - bool lock_all; - /** Lock all RAW buffers (true) or lock only buffers processed by - video or preview pipe (false). - This setting needs to be enabled to allow raw buffer locking - without continuous viewfinder. */ -}; - -struct ia_css_stream; - -/* Stream info, this struct describes properties of a stream after it has been - * created. - */ -struct ia_css_stream_info { - struct ia_css_metadata_info metadata_info; - /** Info about the metadata layout, this contains the stride. */ -}; - -/* @brief Load default stream configuration - * @param[in,out] stream_config The stream configuration. - * @return None - * - * This function will reset the stream configuration to the default state: -@code - memset(stream_config, 0, sizeof(*stream_config)); - stream_config->online = true; - stream_config->left_padding = -1; -@endcode - */ -void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config); - -/* - * create the internal structures and fill in the configuration data and pipes - */ - - /* @brief Creates a stream - * @param[in] stream_config The stream configuration. - * @param[in] num_pipes The number of pipes to incorporate in the stream. - * @param[in] pipes The pipes. - * @param[out] stream The stream. - * @return IA_CSS_SUCCESS or the error code. - * - * This function will create a stream with a given configuration and given pipes. - */ -enum ia_css_err -ia_css_stream_create(const struct ia_css_stream_config *stream_config, - int num_pipes, - struct ia_css_pipe *pipes[], - struct ia_css_stream **stream); - -/* @brief Destroys a stream - * @param[in] stream The stream. - * @return IA_CSS_SUCCESS or the error code. - * - * This function will destroy a given stream. - */ -enum ia_css_err -ia_css_stream_destroy(struct ia_css_stream *stream); - -/* @brief Provides information about a stream - * @param[in] stream The stream. - * @param[out] stream_info The information about the stream. - * @return IA_CSS_SUCCESS or the error code. - * - * This function will destroy a given stream. - */ -enum ia_css_err -ia_css_stream_get_info(const struct ia_css_stream *stream, - struct ia_css_stream_info *stream_info); - -/* @brief load (rebuild) a stream that was unloaded. - * @param[in] stream The stream - * @return IA_CSS_SUCCESS or the error code - * - * Rebuild a stream, including allocating structs, setting configuration and - * building the required pipes. - */ -enum ia_css_err -ia_css_stream_load(struct ia_css_stream *stream); - -/* @brief Starts the stream. - * @param[in] stream The stream. - * @return IA_CSS_SUCCESS or the error code. - * - * The dynamic data in - * the buffers are not used and need to be queued with a separate call - * to ia_css_pipe_enqueue_buffer. - * NOTE: this function will only send start event to corresponding - * thread and will not start SP any more. - */ -enum ia_css_err -ia_css_stream_start(struct ia_css_stream *stream); - -/* @brief Stop the stream. - * @param[in] stream The stream. - * @return IA_CSS_SUCCESS or the error code. - * - * NOTE: this function will send stop event to pipes belong to this - * stream but will not terminate threads. - */ -enum ia_css_err -ia_css_stream_stop(struct ia_css_stream *stream); - -/* @brief Check if a stream has stopped - * @param[in] stream The stream. - * @return boolean flag - * - * This function will check if the stream has stopped and return the correspondent boolean flag. - */ -bool -ia_css_stream_has_stopped(struct ia_css_stream *stream); - -/* @brief destroy a stream according to the stream seed previosly saved in the seed array. - * @param[in] stream The stream. - * @return IA_CSS_SUCCESS (no other errors are generated now) - * - * Destroy the stream and all the pipes related to it. - */ -enum ia_css_err -ia_css_stream_unload(struct ia_css_stream *stream); - -/* @brief Returns stream format - * @param[in] stream The stream. - * @return format of the string - * - * This function will return the stream format. - */ -enum atomisp_input_format -ia_css_stream_get_format(const struct ia_css_stream *stream); - -/* @brief Check if the stream is configured for 2 pixels per clock - * @param[in] stream The stream. - * @return boolean flag - * - * This function will check if the stream is configured for 2 pixels per clock and - * return the correspondent boolean flag. - */ -bool -ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream); - -/* @brief Sets the output frame stride (at the last pipe) - * @param[in] stream The stream - * @param[in] output_padded_width - the output buffer stride. - * @return ia_css_err - * - * This function will Set the output frame stride (at the last pipe) - */ -enum ia_css_err -ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width); - -/* @brief Return max number of continuous RAW frames. - * @param[in] stream The stream. - * @param[out] buffer_depth The maximum number of continuous RAW frames. - * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS - * - * This function will return the maximum number of continuous RAW frames - * the system can support. - */ -enum ia_css_err -ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth); - -/* @brief Set nr of continuous RAW frames to use. - * - * @param[in] stream The stream. - * @param[in] buffer_depth Number of frames to set. - * @return IA_CSS_SUCCESS or error code upon error. - * - * Set the number of continuous frames to use during continuous modes. - */ -enum ia_css_err -ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth); - -/* @brief Get number of continuous RAW frames to use. - * @param[in] stream The stream. - * @param[out] buffer_depth The number of frames to use - * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS - * - * Get the currently set number of continuous frames - * to use during continuous modes. - */ -enum ia_css_err -ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth); - -/* ===== CAPTURE ===== */ - -/* @brief Configure the continuous capture - * - * @param[in] stream The stream. - * @param[in] num_captures The number of RAW frames to be processed to - * YUV. Setting this to -1 will make continuous - * capture run until it is stopped. - * This number will also be used to allocate RAW - * buffers. To allow the viewfinder to also - * keep operating, 2 extra buffers will always be - * allocated. - * If the offset is negative and the skip setting - * is greater than 0, additional buffers may be - * needed. - * @param[in] skip Skip N frames in between captures. This can be - * used to select a slower capture frame rate than - * the sensor output frame rate. - * @param[in] offset Start the RAW-to-YUV processing at RAW buffer - * with this offset. This allows the user to - * process RAW frames that were captured in the - * past or future. - * @return IA_CSS_SUCCESS or error code upon error. - * - * For example, to capture the current frame plus the 2 previous - * frames and 2 subsequent frames, you would call - * ia_css_stream_capture(5, 0, -2). - */ -enum ia_css_err -ia_css_stream_capture(struct ia_css_stream *stream, - int num_captures, - unsigned int skip, - int offset); - -/* @brief Specify which raw frame to tag based on exp_id found in frame info - * - * @param[in] stream The stream. - * @param[in] exp_id The exposure id of the raw frame to tag. - * - * @return IA_CSS_SUCCESS or error code upon error. - * - * This function allows the user to tag a raw frame based on the exposure id - * found in the viewfinder frames' frame info. - */ -enum ia_css_err -ia_css_stream_capture_frame(struct ia_css_stream *stream, - unsigned int exp_id); - -/* ===== VIDEO ===== */ - -/* @brief Send streaming data into the css input FIFO - * - * @param[in] stream The stream. - * @param[in] data Pointer to the pixels to be send. - * @param[in] width Width of the input frame. - * @param[in] height Height of the input frame. - * @return None - * - * Send streaming data into the css input FIFO. This is for testing purposes - * only. This uses the channel ID and input format as set by the user with - * the regular functions for this. - * This function blocks until the entire frame has been written into the - * input FIFO. - * - * Note: - * For higher flexibility the ia_css_stream_send_input_frame is replaced by - * three separate functions: - * 1) ia_css_stream_start_input_frame - * 2) ia_css_stream_send_input_line - * 3) ia_css_stream_end_input_frame - * In this way it is possible to stream multiple frames on different - * channel ID's on a line basis. It will be possible to simulate - * line-interleaved Stereo 3D muxed on 1 mipi port. - * These 3 functions are for testing purpose only and can be used in - * conjunction with ia_css_stream_send_input_frame - */ -void -ia_css_stream_send_input_frame(const struct ia_css_stream *stream, - const unsigned short *data, - unsigned int width, - unsigned int height); - -/* @brief Start an input frame on the CSS input FIFO. - * - * @param[in] stream The stream. - * @return None - * - * Starts the streaming to mipi frame by sending SoF for channel channel_id. - * It will use the input_format and two_pixels_per_clock as provided by - * the user. - * For the "correct" use-case, input_format and two_pixels_per_clock must match - * with the values as set by the user with the regular functions. - * To simulate an error, the user can provide "incorrect" values for - * input_format and/or two_pixels_per_clock. - */ -void -ia_css_stream_start_input_frame(const struct ia_css_stream *stream); - -/* @brief Send a line of input data into the CSS input FIFO. - * - * @param[in] stream The stream. - * @param[in] data Array of the first line of image data. - * @param width The width (in pixels) of the first line. - * @param[in] data2 Array of the second line of image data. - * @param width2 The width (in pixels) of the second line. - * @return None - * - * Sends 1 frame line. Start with SoL followed by width bytes of data, followed - * by width2 bytes of data2 and followed by and EoL - * It will use the input_format and two_pixels_per_clock settings as provided - * with the ia_css_stream_start_input_frame function call. - * - * This function blocks until the entire line has been written into the - * input FIFO. - */ -void -ia_css_stream_send_input_line(const struct ia_css_stream *stream, - const unsigned short *data, - unsigned int width, - const unsigned short *data2, - unsigned int width2); - -/* @brief Send a line of input embedded data into the CSS input FIFO. - * - * @param[in] stream Pointer of the stream. - * @param[in] format Format of the embedded data. - * @param[in] data Pointer of the embedded data line. - * @param[in] width The width (in pixels) of the line. - * @return None - * - * Sends one embedded data line to input fifo. Start with SoL followed by - * width bytes of data, and followed by and EoL. - * It will use the two_pixels_per_clock settings as provided with the - * ia_css_stream_start_input_frame function call. - * - * This function blocks until the entire line has been written into the - * input FIFO. - */ -void -ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream, - enum atomisp_input_format format, - const unsigned short *data, - unsigned int width); - -/* @brief End an input frame on the CSS input FIFO. - * - * @param[in] stream The stream. - * @return None - * - * Send the end-of-frame signal into the CSS input FIFO. - */ -void -ia_css_stream_end_input_frame(const struct ia_css_stream *stream); - -/* @brief send a request flash command to SP - * - * @param[in] stream The stream. - * @return None - * - * Driver needs to call this function to send a flash request command - * to SP, SP will be responsible for switching on/off the flash at proper - * time. Due to the SP multi-threading environment, this request may have - * one-frame delay, the driver needs to check the flashed flag in frame info - * to determine which frame is being flashed. - */ -void -ia_css_stream_request_flash(struct ia_css_stream *stream); - -/* @brief Configure a stream with filter coefficients. - * @deprecated {Replaced by - * ia_css_pipe_set_isp_config_on_pipe()} - * - * @param[in] stream The stream. - * @param[in] config The set of filter coefficients. - * @param[in] pipe Pipe to be updated when set isp config, NULL means to - * update all pipes in the stream. - * @return IA_CSS_SUCCESS or error code upon error. - * - * This function configures the filter coefficients for an image - * stream. For image pipes that do not execute any ISP filters, this - * function will have no effect. - * It is safe to call this function while the image stream is running, - * in fact this is the expected behavior most of the time. Proper - * resource locking and double buffering is in place to allow for this. - */ -enum ia_css_err -ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe); - -/* @brief Configure a stream with filter coefficients. - * @deprecated {Replaced by - * ia_css_pipe_set_isp_config()} - * @param[in] stream The stream. - * @param[in] config The set of filter coefficients. - * @return IA_CSS_SUCCESS or error code upon error. - * - * This function configures the filter coefficients for an image - * stream. For image pipes that do not execute any ISP filters, this - * function will have no effect. All pipes of a stream will be updated. - * See ::ia_css_stream_set_isp_config_on_pipe() for the per-pipe alternative. - * It is safe to call this function while the image stream is running, - * in fact this is the expected behaviour most of the time. Proper - * resource locking and double buffering is in place to allow for this. - */ -enum ia_css_err -ia_css_stream_set_isp_config( - struct ia_css_stream *stream, - const struct ia_css_isp_config *config); - -/* @brief Get selected configuration settings - * @param[in] stream The stream. - * @param[out] config Configuration settings. - * @return None - */ -void -ia_css_stream_get_isp_config(const struct ia_css_stream *stream, - struct ia_css_isp_config *config); - -/* @brief allocate continuous raw frames for continuous capture - * @param[in] stream The stream. - * @return IA_CSS_SUCCESS or error code. - * - * because this allocation takes a long time (around 120ms per frame), - * we separate the allocation part and update part to let driver call - * this function without locking. This function is the allocation part - * and next one is update part - */ -enum ia_css_err -ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream); - -/* @brief allocate continuous raw frames for continuous capture - * @param[in] stream The stream. - * @return IA_CSS_SUCCESS or error code. - * - * because this allocation takes a long time (around 120ms per frame), - * we separate the allocation part and update part to let driver call - * this function without locking. This function is the update part - */ -enum ia_css_err -ia_css_update_continuous_frames(struct ia_css_stream *stream); - -/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support) - * @param[in] stream The stream. - * @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer - * @return ia_css_err IA_CSS_SUCCESS or error code - * - * As part of HALv3 Feature requirement, SP locks raw buffer until the Application - * releases its reference to a raw buffer (which are managed by SP), this function allows - * application to explicitly unlock that buffer in SP. - */ -enum ia_css_err -ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id); - -/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe - * @param[in] stream The stream. - * @param[in] enable - true, disable - false - * @return None - * - * Enables or disables digital zoom for capture pipe in provided stream, if capture pipe - * exists. This function sets enable_zoom flag in CAPTURE_PP stage of the capture pipe. - * In process_zoom_and_motion(), decision to enable or disable zoom for every stage depends - * on this flag. - */ -void -ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable); -#endif /* __IA_CSS_STREAM_PUBLIC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h deleted file mode 100644 index b256d7c88716..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_TIMER_H -#define __IA_CSS_TIMER_H - -/* @file - * Timer interface definitions - */ -#include /* for uint32_t */ -#include "ia_css_err.h" - -/* @brief timer reading definition */ -typedef uint32_t clock_value_t; - -/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/ -struct ia_css_clock_tick { - clock_value_t ticks; /** measured time in ticks.*/ -}; - -/* @brief TIMER event codes */ -enum ia_css_tm_event { - IA_CSS_TM_EVENT_AFTER_INIT, - /** Timer Event after Initialization */ - IA_CSS_TM_EVENT_MAIN_END, - /** Timer Event after end of Main */ - IA_CSS_TM_EVENT_THREAD_START, - /** Timer Event after thread start */ - IA_CSS_TM_EVENT_FRAME_PROC_START, - /** Timer Event after Frame Process Start */ - IA_CSS_TM_EVENT_FRAME_PROC_END - /** Timer Event after Frame Process End */ -}; - -/* @brief code measurement common struct */ -struct ia_css_time_meas { - clock_value_t start_timer_value; /** measured time in ticks */ - clock_value_t end_timer_value; /** measured time in ticks */ -}; - -/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */ -#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t) -/* @brief checks to ensure correct alignment for ia_css_time_meas. */ -#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \ - + sizeof(clock_value_t)) - -/* @brief API to fetch timer count directly -* -* @param curr_ts [out] measured count value -* @return IA_CSS_SUCCESS if success -* -*/ -enum ia_css_err -ia_css_timer_get_current_tick( - struct ia_css_clock_tick *curr_ts); - -#endif /* __IA_CSS_TIMER_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h deleted file mode 100644 index 81498bd7485b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TPG_H -#define __IA_CSS_TPG_H - -/* @file - * This file contains support for the test pattern generator (TPG) - */ - -/* Enumerate the TPG IDs. - */ -enum ia_css_tpg_id { - IA_CSS_TPG_ID0, - IA_CSS_TPG_ID1, - IA_CSS_TPG_ID2 -}; - -/** - * Maximum number of TPG IDs. - * - * Make sure the value of this define gets changed to reflect the correct - * number of ia_css_tpg_id enum if you add/delete an item in the enum. - */ -#define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1) - -/* Enumerate the TPG modes. - */ -enum ia_css_tpg_mode { - IA_CSS_TPG_MODE_RAMP, - IA_CSS_TPG_MODE_CHECKERBOARD, - IA_CSS_TPG_MODE_FRAME_BASED_COLOR, - IA_CSS_TPG_MODE_MONO -}; - -/* @brief Configure the test pattern generator. - * - * Configure the Test Pattern Generator, the way these values are used to - * generate the pattern can be seen in the HRT extension for the test pattern - * generator: - * devices/test_pat_gen/hrt/include/test_pat_gen.h: hrt_calc_tpg_data(). - * - * This interface is deprecated, it is not portable -> move to input system API - * -@code -unsigned int test_pattern_value(unsigned int x, unsigned int y) -{ - unsigned int x_val, y_val; - if (x_delta > 0) (x_val = (x << x_delta) & x_mask; - else (x_val = (x >> -x_delta) & x_mask; - if (y_delta > 0) (y_val = (y << y_delta) & y_mask; - else (y_val = (y >> -y_delta) & x_mask; - return (x_val + y_val) & xy_mask; -} -@endcode - */ -struct ia_css_tpg_config { - enum ia_css_tpg_id id; - enum ia_css_tpg_mode mode; - unsigned int x_mask; - int x_delta; - unsigned int y_mask; - int y_delta; - unsigned int xy_mask; -}; - -#endif /* __IA_CSS_TPG_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h deleted file mode 100644 index 259ab3f074ba..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h +++ /dev/null @@ -1,616 +0,0 @@ -/* Release Version: irci_stable_candrpv_0415_20150521_0458 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_TYPES_H -#define _IA_CSS_TYPES_H - -/* @file - * This file contains types used for the ia_css parameters. - * These types are in a separate file because they are expected - * to be used in software layers that do not access the CSS API - * directly but still need to forward parameters for it. - */ - -#include - -#include "ia_css_frac.h" - -#include "isp/kernels/aa/aa_2/ia_css_aa2_types.h" -#include "isp/kernels/anr/anr_1.0/ia_css_anr_types.h" -#include "isp/kernels/anr/anr_2/ia_css_anr2_types.h" -#include "isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h" -#include "isp/kernels/csc/csc_1.0/ia_css_csc_types.h" -#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" -#include "isp/kernels/dp/dp_1.0/ia_css_dp_types.h" -#include "isp/kernels/de/de_1.0/ia_css_de_types.h" -#include "isp/kernels/de/de_2/ia_css_de2_types.h" -#include "isp/kernels/fc/fc_1.0/ia_css_formats_types.h" -#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h" -#include "isp/kernels/gc/gc_1.0/ia_css_gc_types.h" -#include "isp/kernels/gc/gc_2/ia_css_gc2_types.h" -#include "isp/kernels/macc/macc_1.0/ia_css_macc_types.h" -#include "isp/kernels/ob/ob_1.0/ia_css_ob_types.h" -#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h" -#include "isp/kernels/sc/sc_1.0/ia_css_sc_types.h" -#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h" -#include "isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h" -#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h" -#include "isp/kernels/wb/wb_1.0/ia_css_wb_types.h" -#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h" -#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h" -#ifdef ISP2401 -#include "isp/kernels/tnr/tnr3/ia_css_tnr3_types.h" -#endif -#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h" -#include "isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h" -#include "isp/kernels/output/output_1.0/ia_css_output_types.h" - -#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED -/** Should be removed after Driver adaptation will be done */ - -#define IA_CSS_VERSION_MAJOR 2 -#define IA_CSS_VERSION_MINOR 0 -#define IA_CSS_VERSION_REVISION 2 - -#define IA_CSS_MORPH_TABLE_NUM_PLANES 6 - -/* Min and max exposure IDs. These macros are here to allow - * the drivers to get this information. Changing these macros - * constitutes a CSS API change. */ -#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /** Minimum exposure ID */ -#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */ - -/* opaque types */ -struct ia_css_isp_parameters; -struct ia_css_pipe; -struct ia_css_memory_offsets; -struct ia_css_config_memory_offsets; -struct ia_css_state_memory_offsets; - -/* Virtual address within the CSS address space. */ -typedef uint32_t ia_css_ptr; - -/* Generic resolution structure. - */ -struct ia_css_resolution { - uint32_t width; /** Width */ - uint32_t height; /** Height */ -}; - -/* Generic coordinate structure. - */ -struct ia_css_coordinate { - int32_t x; /** Value of a coordinate on the horizontal axis */ - int32_t y; /** Value of a coordinate on the vertical axis */ -}; - -/* Vector with signed values. This is used to indicate motion for - * Digital Image Stabilization. - */ -struct ia_css_vector { - int32_t x; /** horizontal motion (in pixels) */ - int32_t y; /** vertical motion (in pixels) */ -}; - -/* Short hands */ -#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0 -#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0 - -/* CSS data descriptor */ -struct ia_css_data { - ia_css_ptr address; /** CSS virtual address */ - uint32_t size; /** Disabled if 0 */ -}; - -/* Host data descriptor */ -struct ia_css_host_data { - char *address; /** Host address */ - uint32_t size; /** Disabled if 0 */ -}; - -/* ISP data descriptor */ -struct ia_css_isp_data { - uint32_t address; /** ISP address */ - uint32_t size; /** Disabled if 0 */ -}; - -/* Shading Correction types. */ -enum ia_css_shading_correction_type { -#ifndef ISP2401 - IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */ -#else - IA_CSS_SHADING_CORRECTION_NONE, /** Shading Correction is not processed in the pipe. */ - IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */ -#endif - - /** More shading correction types can be added in the future. */ -}; - -/* Shading Correction information. */ -struct ia_css_shading_info { - enum ia_css_shading_correction_type type; /** Shading Correction type. */ - - union { /* Shading Correction information of each Shading Correction types. */ - - /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1. - * - * This structure contains the information necessary to generate - * the shading table required in the isp. - * This structure is filled in the css, - * and the driver needs to get it to generate the shading table. - * - * Before the shading correction is applied, NxN-filter and/or scaling - * are applied in the isp, depending on the isp binaries. - * Then, these should be considered in generating the shading table. - * - Bad pixels on left/top sides generated by NxN-filter - * (Bad pixels are NOT considered currently, - * because they are subtle.) - * - Down-scaling/Up-scaling factor - * - * Shading correction is applied to the area - * which has real sensor data and margin. - * Then, the shading table should cover the area including margin. - * This structure has this information. - * - Origin coordinate of bayer (real sensor data) - * on the shading table - * - * ------------------------ISP 2401----------------------- - * - * the shading table directly required from ISP. - * This structure is filled in CSS, and the driver needs to get it to generate the shading table. - * - * The shading correction is applied to the bayer area which contains sensor data and padding data. - * The shading table should cover this bayer area. - * - * The shading table size directly required from ISP is expressed by these parameters. - * 1. uint32_t num_hor_grids; - * 2. uint32_t num_ver_grids; - * 3. uint32_t bqs_per_grid_cell; - * - * In some isp binaries, the bayer scaling is applied before the shading correction is applied. - * Then, this scaling factor should be considered in generating the shading table. - * The scaling factor is expressed by these parameters. - * 4. uint32_t bayer_scale_hor_ratio_in; - * 5. uint32_t bayer_scale_hor_ratio_out; - * 6. uint32_t bayer_scale_ver_ratio_in; - * 7. uint32_t bayer_scale_ver_ratio_out; - * - * The sensor data size inputted to ISP is expressed by this parameter. - * This is the size BEFORE the bayer scaling is applied. - * 8. struct ia_css_resolution isp_input_sensor_data_res_bqs; - * - * The origin of the sensor data area positioned on the shading table at the shading correction - * is expressed by this parameter. - * The size of this area assumes the size AFTER the bayer scaling is applied - * to the isp_input_sensor_data_resolution_bqs. - * 9. struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl; - * - * ****** Definitions of the shading table and the sensor data at the shading correction ****** - * - * (0,0)--------------------- TW ------------------------------- - * | shading table | - * | (ox,oy)---------- W -------------------------- | - * | | sensor data | | - * | | | | - * TH H sensor data center | | - * | | (cx,cy) | | - * | | | | - * | | | | - * | | | | - * | ------------------------------------------- | - * | | - * ---------------------------------------------------------- - * - * Example of still mode for output 1080p: - * - * num_hor_grids = 66 - * num_ver_grids = 37 - * bqs_per_grid_cell = 16 - * bayer_scale_hor_ratio_in = 1 - * bayer_scale_hor_ratio_out = 1 - * bayer_scale_ver_ratio_in = 1 - * bayer_scale_ver_ratio_out = 1 - * isp_input_sensor_data_resolution_bqs = {966, 546} - * sensor_data_origin_bqs_on_sctbl = {61, 15} - * - * TW, TH [bqs]: width and height of shading table - * TW = (num_hor_grids - 1) * bqs_per_grid_cell = (66 - 1) * 16 = 1040 - * TH = (num_ver_grids - 1) * bqs_per_grid_cell = (37 - 1) * 16 = 576 - * - * W, H [bqs]: width and height of sensor data at shading correction - * W = sensor_data_res_bqs.width - * = isp_input_sensor_data_res_bqs.width - * * bayer_scale_hor_ratio_out / bayer_scale_hor_ratio_in + 0.5 = 966 - * H = sensor_data_res_bqs.height - * = isp_input_sensor_data_res_bqs.height - * * bayer_scale_ver_ratio_out / bayer_scale_ver_ratio_in + 0.5 = 546 - * - * (ox, oy) [bqs]: origin of sensor data positioned on shading table at shading correction - * ox = sensor_data_origin_bqs_on_sctbl.x = 61 - * oy = sensor_data_origin_bqs_on_sctbl.y = 15 - * - * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction - * cx = ox + W/2 = 61 + 966/2 = 544 - * cy = oy + H/2 = 15 + 546/2 = 288 - * - * ****** Relation between the shading table and the sensor data ****** - * - * The origin of the sensor data should be on the shading table. - * 0 <= ox < TW, 0 <= oy < TH - * - * ****** How to center the shading table on the sensor data ****** - * - * To center the shading table on the sensor data, - * CSS decides the shading table size so that a certain grid point is positioned - * on the center of the sensor data at the shading correction. - * CSS expects the shading center is set on this grid point - * when the shading table data is calculated in AIC. - * - * W, H [bqs]: width and height of sensor data at shading correction - * W = sensor_data_res_bqs.width - * H = sensor_data_res_bqs.height - * - * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction - * cx = sensor_data_origin_bqs_on_sctbl.x + W/2 - * cy = sensor_data_origin_bqs_on_sctbl.y + H/2 - * - * CSS decides the shading table size and the sensor data position - * so that the (cx, cy) satisfies this condition. - * mod(cx, bqs_per_grid_cell) = 0 - * mod(cy, bqs_per_grid_cell) = 0 - * - * ****** How to change the sensor data size by processes in the driver and ISP ****** - * - * 1. sensor data size: Physical sensor size - * (The struct ia_css_shading_info does not have this information.) - * 2. process: Driver applies the sensor cropping/binning/scaling to physical sensor size. - * 3. sensor data size: ISP input size (== shading_info.isp_input_sensor_data_res_bqs) - * (ISP assumes the ISP input sensor data is centered on the physical sensor.) - * 4. process: ISP applies the bayer scaling by the factor of shading_info.bayer_scale_*. - * 5. sensor data size: Scaling factor * ISP input size (== shading_info.sensor_data_res_bqs) - * 6. process: ISP applies the shading correction. - * - * ISP block: SC1 - * ISP1: SC1 is used. - * ISP2: SC1 is used. - */ - struct { -#ifndef ISP2401 - uint32_t enable; /** Shading correction enabled. - 0:disabled, 1:enabled */ - uint32_t num_hor_grids; /** Number of data points per line - per color on shading table. */ - uint32_t num_ver_grids; /** Number of lines of data points - per color on shading table. */ - uint32_t bqs_per_grid_cell; /** Grid cell size - in BQ(Bayer Quad) unit. - (1BQ means {Gr,R,B,Gb}(2x2 pixels).) - Valid values are 8,16,32,64. */ -#else - uint32_t num_hor_grids; /** Number of data points per line per color on shading table. */ - uint32_t num_ver_grids; /** Number of lines of data points per color on shading table. */ - uint32_t bqs_per_grid_cell; /** Grid cell size in BQ unit. - NOTE: bqs = size in BQ(Bayer Quad) unit. - 1BQ means {Gr,R,B,Gb} (2x2 pixels). - Horizontal 1 bqs corresponds to horizontal 2 pixels. - Vertical 1 bqs corresponds to vertical 2 pixels. */ -#endif - uint32_t bayer_scale_hor_ratio_in; - uint32_t bayer_scale_hor_ratio_out; -#ifndef ISP2401 - /** Horizontal ratio of bayer scaling - between input width and output width, for the scaling - which should be done before shading correction. - output_width = input_width * bayer_scale_hor_ratio_out - / bayer_scale_hor_ratio_in */ -#else - /** Horizontal ratio of bayer scaling between input width and output width, - for the scaling which should be done before shading correction. - output_width = input_width * bayer_scale_hor_ratio_out - / bayer_scale_hor_ratio_in + 0.5 */ -#endif - uint32_t bayer_scale_ver_ratio_in; - uint32_t bayer_scale_ver_ratio_out; -#ifndef ISP2401 - /** Vertical ratio of bayer scaling - between input height and output height, for the scaling - which should be done before shading correction. - output_height = input_height * bayer_scale_ver_ratio_out - / bayer_scale_ver_ratio_in */ - uint32_t sc_bayer_origin_x_bqs_on_shading_table; - /** X coordinate (in bqs) of bayer origin on shading table. - This indicates the left-most pixel of bayer - (not include margin) inputted to the shading correction. - This corresponds to the left-most pixel of bayer - inputted to isp from sensor. */ - uint32_t sc_bayer_origin_y_bqs_on_shading_table; - /** Y coordinate (in bqs) of bayer origin on shading table. - This indicates the top pixel of bayer - (not include margin) inputted to the shading correction. - This corresponds to the top pixel of bayer - inputted to isp from sensor. */ -#else - /** Vertical ratio of bayer scaling between input height and output height, - for the scaling which should be done before shading correction. - output_height = input_height * bayer_scale_ver_ratio_out - / bayer_scale_ver_ratio_in + 0.5 */ - struct ia_css_resolution isp_input_sensor_data_res_bqs; - /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling. - NOTE: This is NOT the size of the physical sensor size. - CSS requests the driver that ISP inputs sensor data - by the size of isp_input_sensor_data_res_bqs. - The driver sends the sensor data to ISP, - after the adequate cropping/binning/scaling - are applied to the physical sensor data area. - ISP assumes the area of isp_input_sensor_data_res_bqs - is centered on the physical sensor. */ - struct ia_css_resolution sensor_data_res_bqs; - /** Sensor data size (in bqs) at shading correction. - This is the size AFTER bayer scaling. */ - struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl; - /** Origin of sensor data area positioned on shading table at shading correction. - The coordinate x,y should be positive values. */ -#endif - } type_1; - - /** More structures can be added here when more shading correction types will be added - in the future. */ - } info; -}; - -/* Default Shading Correction information of Shading Correction Type 1. */ -#define DEFAULT_SHADING_INFO_TYPE_1 \ -(struct ia_css_shading_info) { \ - .type = IA_CSS_SHADING_CORRECTION_TYPE_1, \ - .info = { \ - .type_1 = { \ - .bayer_scale_hor_ratio_in = 1, \ - .bayer_scale_hor_ratio_out = 1, \ - .bayer_scale_ver_ratio_in = 1, \ - .bayer_scale_ver_ratio_out = 1, \ - } \ - } \ -} - -/* Default Shading Correction information. */ -#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1 - -/* structure that describes the 3A and DIS grids */ -struct ia_css_grid_info { - /* \name ISP input size - * that is visible for user - * @{ - */ - uint32_t isp_in_width; - uint32_t isp_in_height; - /* @}*/ - - struct ia_css_3a_grid_info s3a_grid; /** 3A grid info */ - union ia_css_dvs_grid_u dvs_grid; - /** All types of DVS statistics grid info union */ - - enum ia_css_vamem_type vamem_type; -}; - -/* defaults for ia_css_grid_info structs */ -#define DEFAULT_GRID_INFO \ -(struct ia_css_grid_info) { \ - .dvs_grid = DEFAULT_DVS_GRID_INFO, \ - .vamem_type = IA_CSS_VAMEM_TYPE_1 \ -} - -/* Morphing table, used for geometric distortion and chromatic abberration - * correction (GDCAC, also called GDC). - * This table describes the imperfections introduced by the lens, the - * advanced ISP can correct for these imperfections using this table. - */ -struct ia_css_morph_table { - uint32_t enable; /** To disable GDC, set this field to false. The - coordinates fields can be set to NULL in this case. */ - uint32_t height; /** Table height */ - uint32_t width; /** Table width */ - uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES]; - /** X coordinates that describe the sensor imperfection */ - uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES]; - /** Y coordinates that describe the sensor imperfection */ -}; - -struct ia_css_dvs_6axis_config { - unsigned int exp_id; - /** Exposure ID, see ia_css_event_public.h for more detail */ - uint32_t width_y; - uint32_t height_y; - uint32_t width_uv; - uint32_t height_uv; - uint32_t *xcoords_y; - uint32_t *ycoords_y; - uint32_t *xcoords_uv; - uint32_t *ycoords_uv; -}; - -/** - * This specifies the coordinates (x,y) - */ -struct ia_css_point { - int32_t x; /** x coordinate */ - int32_t y; /** y coordinate */ -}; - -/** - * This specifies the region - */ -struct ia_css_region { - struct ia_css_point origin; /** Starting point coordinates for the region */ - struct ia_css_resolution resolution; /** Region resolution */ -}; - -/** - * Digital zoom: - * This feature is currently available only for video, but will become - * available for preview and capture as well. - * Set the digital zoom factor, this is a logarithmic scale. The actual zoom - * factor will be 64/x. - * Setting dx or dy to 0 disables digital zoom for that direction. - * New API change for Digital zoom:(added struct ia_css_region zoom_region) - * zoom_region specifies the origin of the zoom region and width and - * height of that region. - * origin : This is the coordinate (x,y) within the effective input resolution - * of the stream. where, x >= 0 and y >= 0. (0,0) maps to the upper left of the - * effective input resolution. - * resolution : This is resolution of zoom region. - * where, x + width <= effective input width - * y + height <= effective input height - */ -struct ia_css_dz_config { - uint32_t dx; /** Horizontal zoom factor */ - uint32_t dy; /** Vertical zoom factor */ - struct ia_css_region zoom_region; /** region for zoom */ -}; - -/* The still capture mode, this can be RAW (simply copy sensor input to DDR), - * Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR). - */ -enum ia_css_capture_mode { - IA_CSS_CAPTURE_MODE_RAW, /** no processing, copy data only */ - IA_CSS_CAPTURE_MODE_BAYER, /** bayer processing, up to demosaic */ - IA_CSS_CAPTURE_MODE_PRIMARY, /** primary ISP */ - IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */ - IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */ -}; - -struct ia_css_capture_config { - enum ia_css_capture_mode mode; /** Still capture mode */ - uint32_t enable_xnr; /** Enable/disable XNR */ - uint32_t enable_raw_output; - bool enable_capture_pp_bli; /** Enable capture_pp_bli mode */ -}; - -/* default settings for ia_css_capture_config structs */ -#define DEFAULT_CAPTURE_CONFIG \ -(struct ia_css_capture_config) { \ - .mode = IA_CSS_CAPTURE_MODE_PRIMARY, \ -} - - -/* ISP filter configuration. This is a collection of configurations - * for each of the ISP filters (modules). - * - * NOTE! The contents of all pointers is copied when get or set with the - * exception of the shading and morph tables. For these we only copy the - * pointer, so the caller must make sure the memory contents of these pointers - * remain valid as long as they are used by the CSS. This will be fixed in the - * future by copying the contents instead of just the pointer. - * - * Comment: - * ["ISP block", 1&2] : ISP block is used both for ISP1 and ISP2. - * ["ISP block", 1only] : ISP block is used only for ISP1. - * ["ISP block", 2only] : ISP block is used only for ISP2. - */ -struct ia_css_isp_config { - struct ia_css_wb_config *wb_config; /** White Balance - [WB1, 1&2] */ - struct ia_css_cc_config *cc_config; /** Color Correction - [CSC1, 1only] */ - struct ia_css_tnr_config *tnr_config; /** Temporal Noise Reduction - [TNR1, 1&2] */ - struct ia_css_ecd_config *ecd_config; /** Eigen Color Demosaicing - [DE2, 2only] */ - struct ia_css_ynr_config *ynr_config; /** Y(Luma) Noise Reduction - [YNR2&YEE2, 2only] */ - struct ia_css_fc_config *fc_config; /** Fringe Control - [FC2, 2only] */ - struct ia_css_formats_config *formats_config; /** Formats Control for main output - [FORMATS, 1&2] */ - struct ia_css_cnr_config *cnr_config; /** Chroma Noise Reduction - [CNR2, 2only] */ - struct ia_css_macc_config *macc_config; /** MACC - [MACC2, 2only] */ - struct ia_css_ctc_config *ctc_config; /** Chroma Tone Control - [CTC2, 2only] */ - struct ia_css_aa_config *aa_config; /** YUV Anti-Aliasing - [AA2, 2only] - (not used currently) */ - struct ia_css_aa_config *baa_config; /** Bayer Anti-Aliasing - [BAA2, 1&2] */ - struct ia_css_ce_config *ce_config; /** Chroma Enhancement - [CE1, 1only] */ - struct ia_css_dvs_6axis_config *dvs_6axis_config; - struct ia_css_ob_config *ob_config; /** Objective Black - [OB1, 1&2] */ - struct ia_css_dp_config *dp_config; /** Defect Pixel Correction - [DPC1/DPC2, 1&2] */ - struct ia_css_nr_config *nr_config; /** Noise Reduction - [BNR1&YNR1&CNR1, 1&2]*/ - struct ia_css_ee_config *ee_config; /** Edge Enhancement - [YEE1, 1&2] */ - struct ia_css_de_config *de_config; /** Demosaic - [DE1, 1only] */ - struct ia_css_gc_config *gc_config; /** Gamma Correction (for YUV) - [GC1, 1only] */ - struct ia_css_anr_config *anr_config; /** Advanced Noise Reduction */ - struct ia_css_3a_config *s3a_config; /** 3A Statistics config */ - struct ia_css_xnr_config *xnr_config; /** eXtra Noise Reduction */ - struct ia_css_dz_config *dz_config; /** Digital Zoom */ - struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction - [CCM2, 2only] */ - struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction - [CSC2, 2only] */ - struct ia_css_macc_table *macc_table; /** MACC - [MACC1/MACC2, 1&2]*/ - struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV) - [GC1, 1only] */ - struct ia_css_ctc_table *ctc_table; /** Chroma Tone Control - [CTC1, 1only] */ - - /* \deprecated */ - struct ia_css_xnr_table *xnr_table; /** eXtra Noise Reduction - [XNR1, 1&2] */ - struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction - [GC2, 2only] */ - struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction - [GC2, 2only] */ - struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction - [GC2, 2only] */ - struct ia_css_vector *motion_vector; /** For 2-axis DVS */ - struct ia_css_shading_table *shading_table; - struct ia_css_morph_table *morph_table; - struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */ - struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */ - struct ia_css_capture_config *capture_config; - struct ia_css_anr_thres *anr_thres; - /* @deprecated{Old shading settings, see bugzilla bz675 for details} */ - struct ia_css_shading_settings *shading_settings; - struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */ - /* comment from Lasse: Be aware how this feature will affect coordinate - * normalization in different parts of the system. (e.g. face detection, - * touch focus, 3A statistics and windows of interest, shading correction, - * DVS, GDC) from IQ tool level and application level down-to ISP FW level. - * the risk for regression is not in the individual blocks, but how they - * integrate together. */ - struct ia_css_output_config *output_config; /** Main Output Mirroring, flipping */ - -#ifdef ISP2401 - struct ia_css_tnr3_kernel_config *tnr3_config; /** TNR3 config */ -#endif - struct ia_css_scaler_config *scaler_config; /** Skylake: scaler config (optional) */ - struct ia_css_formats_config *formats_config_display;/** Formats control for viewfinder/display output (optional) - [OSYS, n/a] */ - struct ia_css_output_config *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */ - - struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */ - uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */ -}; - -#endif /* _IA_CSS_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h deleted file mode 100644 index 1e88901e0b82..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_VERSION_H -#define __IA_CSS_VERSION_H - -/* @file - * This file contains functions to retrieve CSS-API version information - */ - -#include - -/* a common size for the version arrays */ -#define MAX_VERSION_SIZE 500 - -/* @brief Retrieves the current CSS version - * @param[out] version A pointer to a buffer where to put the generated - * version string. NULL is ignored. - * @param[in] max_size Size of the version buffer. If version string - * would be larger than max_size, an error is - * returned by this function. - * - * This function generates and returns the version string. If FW is loaded, it - * attaches the FW version. - */ -enum ia_css_err -ia_css_get_version(char *version, int max_size); - -#endif /* __IA_CSS_VERSION_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h deleted file mode 100644 index aad592cb86ef..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -// -// This file contains the version data for the CSS -// -// === Do not change - automatically generated === -// - -#ifndef __IA_CSS_VERSION_DATA_H -#define __IA_CSS_VERSION_DATA_H - - -#ifndef ISP2401 -#define CSS_VERSION_STRING "REL:20150521_21.4_0539; API:2.1.15.3; GIT:irci_candrpv_0415_20150504_35b345#35b345be52ac575f8934abb3a88fea26a94e7343; SDK:/nfs/iir/disks/iir_hivepackages_003/iir_hivepkgs_disk017/Css_Mizuchi/packages/Css_Mizuchi/int_css_mizuchi_20140829_1053; USER:viedifw; " -#else -#define CSS_VERSION_STRING "REL:20150911_37.5_1652; API:2.1.20.9; GIT:irci___#ebf437d53a8951bb7ff6d13fdb7270dab393a92a; SDK:; USER:viedifw; " -#endif - - -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c deleted file mode 100644 index f7dd256b6f7a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif - -#include "ia_css_aa2.host.h" - -/* YUV Anti-Aliasing configuration. */ -const struct ia_css_aa_config default_aa_config = { - 8191 /* default should be 0 */ -}; - -/* Bayer Anti-Aliasing configuration. */ -const struct ia_css_aa_config default_baa_config = { - 8191 /* default should be 0 */ -}; - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h deleted file mode 100644 index 71587d85ff2d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_AA_HOST_H -#define __IA_CSS_AA_HOST_H - -#include "ia_css_aa2_types.h" -#include "ia_css_aa2_param.h" - -/* YUV Anti-Aliasing configuration. */ -extern const struct ia_css_aa_config default_aa_config; - -/* Bayer Anti-Aliasing configuration. */ -extern const struct ia_css_aa_config default_baa_config; - -#endif /* __IA_CSS_AA_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h deleted file mode 100644 index dbab4d6c6cd5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_AA_PARAM_H -#define __IA_CSS_AA_PARAM_H - -#include "type_support.h" - -struct sh_css_isp_aa_params { - int32_t strength; -}; - -#endif /* __IA_CSS_AA_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h deleted file mode 100644 index 0b95bf9b9aaf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_AA2_TYPES_H -#define __IA_CSS_AA2_TYPES_H - -/* @file -* CSS-API header file for Anti-Aliasing parameters. -*/ - - -/* Anti-Aliasing configuration. - * - * This structure is used both for YUV AA and Bayer AA. - * - * 1. YUV Anti-Aliasing - * struct ia_css_aa_config *aa_config - * - * ISP block: AA2 - * (ISP1: AA2 is not used.) - * ISP2: AA2 should be used. But, AA2 is not used currently. - * - * 2. Bayer Anti-Aliasing - * struct ia_css_aa_config *baa_config - * - * ISP block: BAA2 - * ISP1: BAA2 is used. - * ISP2: BAA2 is used. - */ -struct ia_css_aa_config { - uint16_t strength; /** Strength of the filter. - u0.13, [0,8191], - default/ineffective 0 */ -}; - -#endif /* __IA_CSS_AA2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c deleted file mode 100644 index edc4f1ae6d5e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" - -#include "ia_css_anr.host.h" - -const struct ia_css_anr_config default_anr_config = { - 10, - { 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4, - 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4, - 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4, - 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4}, - {10, 20, 30} -}; - -void -ia_css_anr_encode( - struct sh_css_isp_anr_params *to, - const struct ia_css_anr_config *from, - unsigned size) -{ - (void)size; - to->threshold = from->threshold; -} - -void -ia_css_anr_dump( - const struct sh_css_isp_anr_params *anr, - unsigned level) -{ - if (!anr) return; - ia_css_debug_dtrace(level, "Advance Noise Reduction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "anr_threshold", anr->threshold); -} - -void -ia_css_anr_debug_dtrace( - const struct ia_css_anr_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.threshold=%d\n", - config->threshold); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h deleted file mode 100644 index 29566c07653c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR_HOST_H -#define __IA_CSS_ANR_HOST_H - -#include "ia_css_anr_types.h" -#include "ia_css_anr_param.h" - -extern const struct ia_css_anr_config default_anr_config; - -void -ia_css_anr_encode( - struct sh_css_isp_anr_params *to, - const struct ia_css_anr_config *from, - unsigned size); - -void -ia_css_anr_dump( - const struct sh_css_isp_anr_params *anr, - unsigned level); - -void -ia_css_anr_debug_dtrace( - const struct ia_css_anr_config *config, unsigned level) -; - -#endif /* __IA_CSS_ANR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h deleted file mode 100644 index 2621b920c3dc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR_PARAM_H -#define __IA_CSS_ANR_PARAM_H - -#include "type_support.h" - -/* ANR (Advanced Noise Reduction) */ -struct sh_css_isp_anr_params { - int32_t threshold; -}; - -#endif /* __IA_CSS_ANR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h deleted file mode 100644 index dc317a857369..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR_TYPES_H -#define __IA_CSS_ANR_TYPES_H - -/* @file -* CSS-API header file for Advanced Noise Reduction kernel v1 -*/ - -/* Application specific DMA settings */ -#define ANR_BPP 10 -#define ANR_ELEMENT_BITS ((CEIL_DIV(ANR_BPP, 8))*8) - -/* Advanced Noise Reduction configuration. - * This is also known as Low-Light. - */ -struct ia_css_anr_config { - int32_t threshold; /** Threshold */ - int32_t thresholds[4*4*4]; - int32_t factors[3]; -}; - -#endif /* __IA_CSS_ANR_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c deleted file mode 100644 index b338c434453e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" - -#include "ia_css_anr2.host.h" - -void -ia_css_anr2_vmem_encode( - struct ia_css_isp_anr2_params *to, - const struct ia_css_anr_thres *from, - size_t size) -{ - unsigned i; - - (void)size; - for (i = 0; i < ANR_PARAM_SIZE; i++) { - unsigned j; - for (j = 0; j < ISP_VEC_NELEMS; j++) { - to->data[i][j] = from->data[i*ISP_VEC_NELEMS+j]; - } - } -} - -void -ia_css_anr2_debug_dtrace( - const struct ia_css_anr_thres *config, - unsigned level) -{ - (void)config; - (void)level; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h deleted file mode 100644 index 83c37e328591..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR2_HOST_H -#define __IA_CSS_ANR2_HOST_H - -#include "sh_css_params.h" - -#include "ia_css_anr2_types.h" -#include "ia_css_anr_param.h" -#include "ia_css_anr2_table.host.h" - -void -ia_css_anr2_vmem_encode( - struct ia_css_isp_anr2_params *to, - const struct ia_css_anr_thres *from, - size_t size); - -void -ia_css_anr2_debug_dtrace( - const struct ia_css_anr_thres *config, unsigned level) -; - -#endif /* __IA_CSS_ANR2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c deleted file mode 100644 index 2de51fe45623..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "system_global.h" -#include "ia_css_types.h" -#include "ia_css_anr2_table.host.h" - -#if 1 -const struct ia_css_anr_thres default_anr_thres = { -{128, 384, 640, 896, 896, 640, 384, 128, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 128, 384, 640, 896, 896, 640, 384, 128, -0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, -0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, -0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, -30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, -60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, -90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, -10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, -20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, -30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, -20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, -40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, -60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120} -}; -#else -const struct ia_css_anr_thres default_anr_thres = { -{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} -}; -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h deleted file mode 100644 index 534119e064c1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR2_TABLE_HOST_H -#define __IA_CSS_ANR2_TABLE_HOST_H - -#include "ia_css_anr2_types.h" - -extern const struct ia_css_anr_thres default_anr_thres; - -#endif /* __IA_CSS_ANR2_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h deleted file mode 100644 index 9b611315392c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR2_TYPES_H -#define __IA_CSS_ANR2_TYPES_H - -/* @file -* CSS-API header file for Advanced Noise Reduction kernel v2 -*/ - -#include "type_support.h" - -#define ANR_PARAM_SIZE 13 - -/* Advanced Noise Reduction (ANR) thresholds */ -struct ia_css_anr_thres { - int16_t data[13*64]; -}; - -#endif /* __IA_CSS_ANR2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h deleted file mode 100644 index 312141793fd2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ANR2_PARAM_H -#define __IA_CSS_ANR2_PARAM_H - -#include "vmem.h" -#include "ia_css_anr2_types.h" - -/* Advanced Noise Reduction (ANR) thresholds */ - -struct ia_css_isp_anr2_params { - VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS); -}; - -#endif /* __IA_CSS_ANR2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c deleted file mode 100644 index 99c80d2d8f11..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#if !defined(HAS_NO_HMEM) - -#include "memory_access.h" -#include "ia_css_types.h" -#include "sh_css_internal.h" -#include "assert_support.h" -#include "sh_css_frac.h" - -#include "ia_css_bh.host.h" - -void -ia_css_bh_hmem_decode( - struct ia_css_3a_rgby_output *out_ptr, - const struct ia_css_bh_table *hmem_buf) -{ - int i; - - /* - * No weighted histogram, hence no grid definition - */ - if(!hmem_buf) - return; - assert(sizeof_hmem(HMEM0_ID) == sizeof(*hmem_buf)); - - /* Deinterleave */ - for (i = 0; i < HMEM_UNIT_SIZE; i++) { - out_ptr[i].r = hmem_buf->hmem[BH_COLOR_R][i]; - out_ptr[i].g = hmem_buf->hmem[BH_COLOR_G][i]; - out_ptr[i].b = hmem_buf->hmem[BH_COLOR_B][i]; - out_ptr[i].y = hmem_buf->hmem[BH_COLOR_Y][i]; - /* sh_css_print ("hmem[%d] = %d, %d, %d, %d\n", - i, out_ptr[i].r, out_ptr[i].g, out_ptr[i].b, out_ptr[i].y); */ - } -} - -void -ia_css_bh_encode( - struct sh_css_isp_bh_params *to, - const struct ia_css_3a_config *from, - unsigned size) -{ - (void)size; - /* coefficients to calculate Y */ - to->y_coef_r = - uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT); - to->y_coef_g = - uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT); - to->y_coef_b = - uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT); -} - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h deleted file mode 100644 index cbb09299cf21..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BH_HOST_H -#define __IA_CSS_BH_HOST_H - -#include "ia_css_bh_param.h" -#include "s3a/s3a_1.0/ia_css_s3a_types.h" - -void -ia_css_bh_hmem_decode( - struct ia_css_3a_rgby_output *out_ptr, - const struct ia_css_bh_table *hmem_buf); - -void -ia_css_bh_encode( - struct sh_css_isp_bh_params *to, - const struct ia_css_3a_config *from, - unsigned size); - -#endif /* __IA_CSS_BH_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h deleted file mode 100644 index b0a8ef3862e0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_HB_PARAM_H -#define __IA_CSS_HB_PARAM_H - -#include "type_support.h" - -#ifndef PIPE_GENERATION -#define __INLINE_HMEM__ -#include "hmem.h" -#endif - -#include "ia_css_bh_types.h" - -/* AE (3A Support) */ -struct sh_css_isp_bh_params { - /* coefficients to calculate Y */ - int32_t y_coef_r; - int32_t y_coef_g; - int32_t y_coef_b; -}; - -/* This should be hmem_data_t, but that breaks the pipe generator */ -struct sh_css_isp_bh_hmem_params { - uint32_t bh[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE]; -}; - -#endif /* __IA_CSS_HB_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h deleted file mode 100644 index ec1688e7352d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BH_TYPES_H -#define __IA_CSS_BH_TYPES_H - -/* Number of elements in the BH table. - * Should be consistent with hmem.h - */ -#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH -#define IA_CSS_HMEM_BH_UNIT_SIZE (ISP_HIST_DEPTH/ISP_HIST_COMPONENTS) - -#define BH_COLOR_R (0) -#define BH_COLOR_G (1) -#define BH_COLOR_B (2) -#define BH_COLOR_Y (3) -#define BH_COLOR_NUM (4) - -/* BH table */ -struct ia_css_bh_table { - uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE]; -}; - -#endif /* __IA_CSS_BH_TYPES_H */ - - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c deleted file mode 100644 index 6d12e031e6fc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "type_support.h" -#include "ia_css_bnlm.host.h" - -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" /* ia_css_debug_dtrace() */ -#endif -#include - -#define BNLM_DIV_LUT_SIZE (12) -static const int32_t div_lut_nearests[BNLM_DIV_LUT_SIZE] = { - 0, 454, 948, 1484, 2070, 2710, 3412, 4184, 5035, 5978, 7025, 8191 -}; - -static const int32_t div_lut_slopes[BNLM_DIV_LUT_SIZE] = { - -7760, -6960, -6216, -5536, -4912, -4344, -3832, -3360, -2936, -2552, -2208, -2208 -}; - -static const int32_t div_lut_intercepts[BNLM_DIV_LUT_SIZE] = { - 8184, 7752, 7336, 6928, 6536, 6152, 5776, 5416, 5064, 4728, 4408, 4408 -}; - -/* Encodes a look-up table from BNLM public parameters to vmem parameters. - * Input: - * lut : bnlm_lut struct containing encoded vmem parameters look-up table - * lut_thr : array containing threshold values for lut - * lut_val : array containing output values related to lut_thr - * lut_size: Size of lut_val array - */ -static inline void -bnlm_lut_encode(struct bnlm_lut *lut, const int32_t *lut_thr, const int32_t *lut_val, const uint32_t lut_size) -{ - u32 blk, i; - const u32 block_size = 16; - const u32 total_blocks = ISP_VEC_NELEMS / block_size; - - /* Create VMEM LUTs from the threshold and value arrays. - * - * Min size of the LUT is 2 entries. - * - * Max size of the LUT is 16 entries, so that the LUT can fit into a - * single group of 16 elements inside a vector. - * Then these elements are copied into other groups inside the same - * vector. If the LUT size is less than 16, then remaining elements are - * set to 0. - */ - assert((lut_size >= 2) && (lut_size <= block_size)); - /* array lut_thr has (lut_size-1) entries */ - for (i = 0; i < lut_size-2; i++) { - /* Check if the lut_thr is monotonically increasing */ - assert(lut_thr[i] <= lut_thr[i+1]); - } - - /* Initialize */ - for (i = 0; i < total_blocks * block_size; i++) { - lut->thr[0][i] = 0; - lut->val[0][i] = 0; - } - - /* Copy all data */ - for (i = 0; i < lut_size - 1; i++) { - lut->thr[0][i] = lut_thr[i]; - lut->val[0][i] = lut_val[i]; - } - lut->val[0][i] = lut_val[i]; /* val has one more element than thr */ - - /* Copy data from first block to all blocks */ - for (blk = 1; blk < total_blocks; blk++) { - u32 blk_offset = blk * block_size; - for (i = 1; i < lut_size; i++) { - lut->thr[0][blk_offset + i] = lut->thr[0][i]; - lut->val[0][blk_offset + i] = lut->val[0][i]; - } - } -} - -/* - * - Encodes BNLM public parameters into VMEM parameters - * - Generates VMEM parameters which will needed internally ISP - */ -void -ia_css_bnlm_vmem_encode( - struct bnlm_vmem_params *to, - const struct ia_css_bnlm_config *from, - size_t size) -{ - int i; - (void)size; - - /* Initialize LUTs in VMEM parameters */ - bnlm_lut_encode(&to->mu_root_lut, from->mu_root_lut_thr, from->mu_root_lut_val, 16); - bnlm_lut_encode(&to->sad_norm_lut, from->sad_norm_lut_thr, from->sad_norm_lut_val, 16); - bnlm_lut_encode(&to->sig_detail_lut, from->sig_detail_lut_thr, from->sig_detail_lut_val, 16); - bnlm_lut_encode(&to->sig_rad_lut, from->sig_rad_lut_thr, from->sig_rad_lut_val, 16); - bnlm_lut_encode(&to->rad_pow_lut, from->rad_pow_lut_thr, from->rad_pow_lut_val, 16); - bnlm_lut_encode(&to->nl_0_lut, from->nl_0_lut_thr, from->nl_0_lut_val, 16); - bnlm_lut_encode(&to->nl_1_lut, from->nl_1_lut_thr, from->nl_1_lut_val, 16); - bnlm_lut_encode(&to->nl_2_lut, from->nl_2_lut_thr, from->nl_2_lut_val, 16); - bnlm_lut_encode(&to->nl_3_lut, from->nl_3_lut_thr, from->nl_3_lut_val, 16); - - /* Initialize arrays in VMEM parameters */ - memset(to->nl_th, 0, sizeof(to->nl_th)); - to->nl_th[0][0] = from->nl_th[0]; - to->nl_th[0][1] = from->nl_th[1]; - to->nl_th[0][2] = from->nl_th[2]; - - memset(to->match_quality_max_idx, 0, sizeof(to->match_quality_max_idx)); - to->match_quality_max_idx[0][0] = from->match_quality_max_idx[0]; - to->match_quality_max_idx[0][1] = from->match_quality_max_idx[1]; - to->match_quality_max_idx[0][2] = from->match_quality_max_idx[2]; - to->match_quality_max_idx[0][3] = from->match_quality_max_idx[3]; - - bnlm_lut_encode(&to->div_lut, div_lut_nearests, div_lut_slopes, BNLM_DIV_LUT_SIZE); - memset(to->div_lut_intercepts, 0, sizeof(to->div_lut_intercepts)); - for(i = 0; i < BNLM_DIV_LUT_SIZE; i++) { - to->div_lut_intercepts[0][i] = div_lut_intercepts[i]; - } - - memset(to->power_of_2, 0, sizeof(to->power_of_2)); - for (i = 0; i < (ISP_VEC_ELEMBITS-1); i++) { - to->power_of_2[0][i] = 1 << i; - } -} - -/* - Encodes BNLM public parameters into DMEM parameters */ -void -ia_css_bnlm_encode( - struct bnlm_dmem_params *to, - const struct ia_css_bnlm_config *from, - size_t size) -{ - (void)size; - to->rad_enable = from->rad_enable; - to->rad_x_origin = from->rad_x_origin; - to->rad_y_origin = from->rad_y_origin; - to->avg_min_th = from->avg_min_th; - to->max_min_th = from->max_min_th; - - to->exp_coeff_a = from->exp_coeff_a; - to->exp_coeff_b = from->exp_coeff_b; - to->exp_coeff_c = from->exp_coeff_c; - to->exp_exponent = from->exp_exponent; -} - -/* Prints debug traces for BNLM public parameters */ -void -ia_css_bnlm_debug_trace( - const struct ia_css_bnlm_config *config, - unsigned level) -{ - if (!config) - return; - -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(level, "BNLM:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_enable", config->rad_enable); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_x_origin", config->rad_x_origin); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_y_origin", config->rad_y_origin); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "avg_min_th", config->avg_min_th); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "max_min_th", config->max_min_th); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_a", config->exp_coeff_a); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_b", config->exp_coeff_b); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_c", config->exp_coeff_c); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_exponent", config->exp_exponent); - - /* ToDo: print traces for LUTs */ -#endif /* IA_CSS_NO_DEBUG */ - -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h deleted file mode 100644 index 675f6e539b3f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNLM_HOST_H -#define __IA_CSS_BNLM_HOST_H - -#include "ia_css_bnlm_types.h" -#include "ia_css_bnlm_param.h" - -void -ia_css_bnlm_vmem_encode( - struct bnlm_vmem_params *to, - const struct ia_css_bnlm_config *from, - size_t size); - -void -ia_css_bnlm_encode( - struct bnlm_dmem_params *to, - const struct ia_css_bnlm_config *from, - size_t size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_bnlm_debug_trace( - const struct ia_css_bnlm_config *config, - unsigned level); -#endif - -#endif /* __IA_CSS_BNLM_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h deleted file mode 100644 index 2f4be43e594e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNLM_PARAM_H -#define __IA_CSS_BNLM_PARAM_H - -#include "type_support.h" -#include "vmem.h" /* needed for VMEM_ARRAY */ - -struct bnlm_lut { - VMEM_ARRAY(thr, ISP_VEC_NELEMS); /* thresholds */ - VMEM_ARRAY(val, ISP_VEC_NELEMS); /* values */ -}; - -struct bnlm_vmem_params { - VMEM_ARRAY(nl_th, ISP_VEC_NELEMS); - VMEM_ARRAY(match_quality_max_idx, ISP_VEC_NELEMS); - struct bnlm_lut mu_root_lut; - struct bnlm_lut sad_norm_lut; - struct bnlm_lut sig_detail_lut; - struct bnlm_lut sig_rad_lut; - struct bnlm_lut rad_pow_lut; - struct bnlm_lut nl_0_lut; - struct bnlm_lut nl_1_lut; - struct bnlm_lut nl_2_lut; - struct bnlm_lut nl_3_lut; - - /* LUTs used for division approximiation */ - struct bnlm_lut div_lut; - VMEM_ARRAY(div_lut_intercepts, ISP_VEC_NELEMS); - - /* 240x does not have an ISP instruction to left shift each element of a - * vector by different shift value. Hence it will be simulated by multiplying - * the elements by required 2^shift. */ - VMEM_ARRAY(power_of_2, ISP_VEC_NELEMS); -}; - -/* BNLM ISP parameters */ -struct bnlm_dmem_params { - bool rad_enable; - int32_t rad_x_origin; - int32_t rad_y_origin; - int32_t avg_min_th; - int32_t max_min_th; - - int32_t exp_coeff_a; - uint32_t exp_coeff_b; - int32_t exp_coeff_c; - uint32_t exp_exponent; -}; - -#endif /* __IA_CSS_BNLM_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h deleted file mode 100644 index 87e0f19c856b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNLM_TYPES_H -#define __IA_CSS_BNLM_TYPES_H - -/* @file -* CSS-API header file for Bayer Non-Linear Mean parameters. -*/ - -#include "type_support.h" /* int32_t */ - -/* Bayer Non-Linear Mean configuration - * - * \brief BNLM public parameters. - * \details Struct with all parameters for the BNLM kernel that can be set - * from the CSS API. - * - * ISP2.6.1: BNLM is used. - */ -struct ia_css_bnlm_config { - bool rad_enable; /** Enable a radial dependency in a weight calculation */ - int32_t rad_x_origin; /** Initial x coordinate for a radius calculation */ - int32_t rad_y_origin; /** Initial x coordinate for a radius calculation */ - /* a threshold for average of weights if this < Th, do not denoise pixel */ - int32_t avg_min_th; - /* minimum weight for denoising if max < th, do not denoise pixel */ - int32_t max_min_th; - - /**@{*/ - /* Coefficient for approximation, in the form of (1 + x / N)^N, - * that fits the first-order exp() to default exp_lut in BNLM sheet - * */ - int32_t exp_coeff_a; - uint32_t exp_coeff_b; - int32_t exp_coeff_c; - uint32_t exp_exponent; - /**@}*/ - - int32_t nl_th[3]; /** Detail thresholds */ - - /* Index for n-th maximum candidate weight for each detail group */ - int32_t match_quality_max_idx[4]; - - /**@{*/ - /* A lookup table for 1/sqrt(1+mu) approximation */ - int32_t mu_root_lut_thr[15]; - int32_t mu_root_lut_val[16]; - /**@}*/ - /**@{*/ - /* A lookup table for SAD normalization */ - int32_t sad_norm_lut_thr[15]; - int32_t sad_norm_lut_val[16]; - /**@}*/ - /**@{*/ - /* A lookup table that models a weight's dependency on textures */ - int32_t sig_detail_lut_thr[15]; - int32_t sig_detail_lut_val[16]; - /**@}*/ - /**@{*/ - /* A lookup table that models a weight's dependency on a pixel's radial distance */ - int32_t sig_rad_lut_thr[15]; - int32_t sig_rad_lut_val[16]; - /**@}*/ - /**@{*/ - /* A lookup table to control denoise power depending on a pixel's radial distance */ - int32_t rad_pow_lut_thr[15]; - int32_t rad_pow_lut_val[16]; - /**@}*/ - /**@{*/ - /* Non linear transfer functions to calculate the blending coefficient depending on detail group */ - /* detail group 0 */ - /**@{*/ - int32_t nl_0_lut_thr[15]; - int32_t nl_0_lut_val[16]; - /**@}*/ - /**@{*/ - /* detail group 1 */ - int32_t nl_1_lut_thr[15]; - int32_t nl_1_lut_val[16]; - /**@}*/ - /**@{*/ - /* detail group 2 */ - int32_t nl_2_lut_thr[15]; - int32_t nl_2_lut_val[16]; - /**@}*/ - /**@{*/ - /* detail group 3 */ - int32_t nl_3_lut_thr[15]; - int32_t nl_3_lut_val[16]; - /**@}*/ - /**@}*/ -}; - -#endif /* __IA_CSS_BNLM_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c deleted file mode 100644 index a7de6ecb950d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "type_support.h" -#include "ia_css_bnr2_2.host.h" - -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" /* ia_css_debug_dtrace() */ -#endif - -/* Default kernel parameters. */ -const struct ia_css_bnr2_2_config default_bnr2_2_config = { - 200, - 200, - 200, - 0, - 0, - 0, - 200, - 200, - 200, - 0, - 0, - 0, - 0, - 4096, - 8191, - 128, - 1, - 0, - 0, - 0, - 8191, - 0, - 8191 -}; - -void -ia_css_bnr2_2_encode( - struct sh_css_isp_bnr2_2_params *to, - const struct ia_css_bnr2_2_config *from, - size_t size) -{ - (void)size; - to->d_var_gain_r = from->d_var_gain_r; - to->d_var_gain_g = from->d_var_gain_g; - to->d_var_gain_b = from->d_var_gain_b; - to->d_var_gain_slope_r = from->d_var_gain_slope_r; - to->d_var_gain_slope_g = from->d_var_gain_slope_g; - to->d_var_gain_slope_b = from->d_var_gain_slope_b; - - to->n_var_gain_r = from->n_var_gain_r; - to->n_var_gain_g = from->n_var_gain_g; - to->n_var_gain_b = from->n_var_gain_b; - to->n_var_gain_slope_r = from->n_var_gain_slope_r; - to->n_var_gain_slope_g = from->n_var_gain_slope_g; - to->n_var_gain_slope_b = from->n_var_gain_slope_b; - - to->dir_thres = from->dir_thres; - to->dir_thres_w = from->dir_thres_w; - to->var_offset_coef = from->var_offset_coef; - - to->dir_gain = from->dir_gain; - to->detail_gain = from->detail_gain; - to->detail_gain_divisor = from->detail_gain_divisor; - to->detail_level_offset = from->detail_level_offset; - - to->d_var_th_min = from->d_var_th_min; - to->d_var_th_max = from->d_var_th_max; - to->n_var_th_min = from->n_var_th_min; - to->n_var_th_max = from->n_var_th_max; -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_bnr2_2_debug_dtrace( - const struct ia_css_bnr2_2_config *bnr, - unsigned level) -{ - if (!bnr) - return; - - ia_css_debug_dtrace(level, "Bayer Noise Reduction 2.2:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_r", bnr->d_var_gain_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_g", bnr->d_var_gain_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_b", bnr->d_var_gain_b); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_r", bnr->d_var_gain_slope_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_g", bnr->d_var_gain_slope_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_b", bnr->d_var_gain_slope_b); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_r", bnr->n_var_gain_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_g", bnr->n_var_gain_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_b", bnr->n_var_gain_b); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_r", bnr->n_var_gain_slope_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_g", bnr->n_var_gain_slope_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_b", bnr->n_var_gain_slope_b); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres", bnr->dir_thres); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres_w", bnr->dir_thres_w); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "var_offset_coef", bnr->var_offset_coef); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_gain", bnr->dir_gain); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain", bnr->detail_gain); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain_divisor", bnr->detail_gain_divisor); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_level_offset", bnr->detail_level_offset); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_min", bnr->d_var_th_min); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_max", bnr->d_var_th_max); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_min", bnr->n_var_th_min); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_max", bnr->n_var_th_max); -} -#endif /* IA_CSS_NO_DEBUG */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h deleted file mode 100644 index c94b366b8142..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#ifndef __IA_CSS_BNR2_2_HOST_H -#define __IA_CSS_BNR2_2_HOST_H - -#include "ia_css_bnr2_2_types.h" -#include "ia_css_bnr2_2_param.h" - -extern const struct ia_css_bnr2_2_config default_bnr2_2_config; - -void -ia_css_bnr2_2_encode( - struct sh_css_isp_bnr2_2_params *to, - const struct ia_css_bnr2_2_config *from, - size_t size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_bnr2_2_debug_dtrace( - const struct ia_css_bnr2_2_config *config, - unsigned level); -#endif - -#endif /* __IA_CSS_BNR2_2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h deleted file mode 100644 index 6dec27a99d8f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNR2_2_PARAM_H -#define __IA_CSS_BNR2_2_PARAM_H - -#include "type_support.h" - -/* BNR (Bayer Noise Reduction) ISP parameters */ -struct sh_css_isp_bnr2_2_params { - int32_t d_var_gain_r; - int32_t d_var_gain_g; - int32_t d_var_gain_b; - int32_t d_var_gain_slope_r; - int32_t d_var_gain_slope_g; - int32_t d_var_gain_slope_b; - int32_t n_var_gain_r; - int32_t n_var_gain_g; - int32_t n_var_gain_b; - int32_t n_var_gain_slope_r; - int32_t n_var_gain_slope_g; - int32_t n_var_gain_slope_b; - int32_t dir_thres; - int32_t dir_thres_w; - int32_t var_offset_coef; - int32_t dir_gain; - int32_t detail_gain; - int32_t detail_gain_divisor; - int32_t detail_level_offset; - int32_t d_var_th_min; - int32_t d_var_th_max; - int32_t n_var_th_min; - int32_t n_var_th_max; -}; - -#endif /* __IA_CSS_BNR2_2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h deleted file mode 100644 index 551bd0ed3bac..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNR2_2_TYPES_H -#define __IA_CSS_BNR2_2_TYPES_H - -/* @file -* CSS-API header file for Bayer Noise Reduction parameters. -*/ - -#include "type_support.h" /* int32_t */ - -/* Bayer Noise Reduction 2.2 configuration - * - * \brief BNR2_2 public parameters. - * \details Struct with all parameters for the BNR2.2 kernel that can be set - * from the CSS API. - * - * ISP2.6.1: BNR2.2 is used. - */ -struct ia_css_bnr2_2_config { - /**@{*/ - /* Directional variance gain for R/G/B components in dark region */ - int32_t d_var_gain_r; - int32_t d_var_gain_g; - int32_t d_var_gain_b; - /**@}*/ - /**@{*/ - /* Slope of Directional variance gain between dark and bright region */ - int32_t d_var_gain_slope_r; - int32_t d_var_gain_slope_g; - int32_t d_var_gain_slope_b; - /**@}*/ - /**@{*/ - /* Non-Directional variance gain for R/G/B components in dark region */ - int32_t n_var_gain_r; - int32_t n_var_gain_g; - int32_t n_var_gain_b; - /**@}*/ - /**@{*/ - /* Slope of Non-Directional variance gain between dark and bright region */ - int32_t n_var_gain_slope_r; - int32_t n_var_gain_slope_g; - int32_t n_var_gain_slope_b; - /**@}*/ - - int32_t dir_thres; /** Threshold for directional filtering */ - int32_t dir_thres_w; /** Threshold width for directional filtering */ - int32_t var_offset_coef; /** Variance offset coefficient */ - int32_t dir_gain; /** Gain for directional coefficient */ - int32_t detail_gain; /** Gain for low contrast texture control */ - int32_t detail_gain_divisor; /** Gain divisor for low contrast texture control */ - int32_t detail_level_offset; /** Bias value for low contrast texture control */ - int32_t d_var_th_min; /** Minimum clipping value for directional variance*/ - int32_t d_var_th_max; /** Maximum clipping value for diretional variance*/ - int32_t n_var_th_min; /** Minimum clipping value for non-directional variance*/ - int32_t n_var_th_max; /** Maximum clipping value for non-directional variance*/ -}; - -#endif /* __IA_CSS_BNR2_2_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c deleted file mode 100644 index d1baca54c3ad..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_frac.h" - -#include "ia_css_bnr.host.h" - -void -ia_css_bnr_encode( - struct sh_css_isp_bnr_params *to, - const struct ia_css_nr_config *from, - unsigned size) -{ - (void)size; - /* BNR (Bayer Noise Reduction) */ - to->threshold_low = - uDIGIT_FITTING(from->direction, 16, SH_CSS_BAYER_BITS); - to->threshold_width_log2 = uFRACTION_BITS_FITTING(8); - to->threshold_width = - 1 << to->threshold_width_log2; - to->gain_all = - uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT); - to->gain_dir = - uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT); - to->clip = uDIGIT_FITTING((unsigned)16384, 16, SH_CSS_BAYER_BITS); -} - -void -ia_css_bnr_dump( - const struct sh_css_isp_bnr_params *bnr, - unsigned level) -{ - if (!bnr) return; - ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "bnr_gain_all", bnr->gain_all); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "bnr_gain_dir", bnr->gain_dir); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "bnr_threshold_low", - bnr->threshold_low); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "bnr_threshold_width_log2", - bnr->threshold_width_log2); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "bnr_threshold_width", - bnr->threshold_width); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "bnr_clip", bnr->clip); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h deleted file mode 100644 index ccd2abc60537..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNR_HOST_H -#define __IA_CSS_BNR_HOST_H - -#include "sh_css_params.h" - -#include "ynr/ynr_1.0/ia_css_ynr_types.h" -#include "ia_css_bnr_param.h" - -void -ia_css_bnr_encode( - struct sh_css_isp_bnr_params *to, - const struct ia_css_nr_config *from, - unsigned size); - -void -ia_css_bnr_dump( - const struct sh_css_isp_bnr_params *bnr, - unsigned level); - -#endif /* __IA_CSS_DP_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h deleted file mode 100644 index 331e05885ef4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BNR_PARAM_H -#define __IA_CSS_BNR_PARAM_H - -#include "type_support.h" - -/* BNR (Bayer Noise Reduction) */ -struct sh_css_isp_bnr_params { - int32_t gain_all; - int32_t gain_dir; - int32_t threshold_low; - int32_t threshold_width_log2; - int32_t threshold_width; - int32_t clip; -}; - -#endif /* __IA_CSS_BNR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c deleted file mode 100644 index d14fd8fc08b1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" - -#include "ia_css_cnr.host.h" - -/* keep the interface here, it is not enabled yet because host doesn't know the size of individual state */ -void -ia_css_init_cnr_state( - void/*struct sh_css_isp_cnr_vmem_state*/ *state, - size_t size) -{ - memset(state, 0, size); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h deleted file mode 100644 index 6f00d280b7d6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CNR_HOST_H -#define __IA_CSS_CNR_HOST_H - -#include "ia_css_cnr_param.h" - -void -ia_css_init_cnr_state( - void/*struct sh_css_isp_cnr_vmem_state*/ *state, - size_t size); - -#endif /* __IA_CSS_CNR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h deleted file mode 100644 index c1af207cbf9a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CNR_PARAM_H -#define __IA_CSS_CNR_PARAM_H - -#include "type_support.h" - -/* CNR (Chroma Noise Reduction) */ -/* Reuse YNR1 param structure */ -#include "../../ynr/ynr_1.0/ia_css_ynr_param.h" - -#endif /* __IA_CSS_CNR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c deleted file mode 100644 index 4b4b2b715407..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" - -#include "ia_css_cnr2.host.h" - -const struct ia_css_cnr_config default_cnr_config = { - 0, - 0, - 100, - 100, - 100, - 50, - 50, - 50 -}; - -void -ia_css_cnr_encode( - struct sh_css_isp_cnr_params *to, - const struct ia_css_cnr_config *from, - unsigned size) -{ - (void)size; - to->coring_u = from->coring_u; - to->coring_v = from->coring_v; - to->sense_gain_vy = from->sense_gain_vy; - to->sense_gain_vu = from->sense_gain_vu; - to->sense_gain_vv = from->sense_gain_vv; - to->sense_gain_hy = from->sense_gain_hy; - to->sense_gain_hu = from->sense_gain_hu; - to->sense_gain_hv = from->sense_gain_hv; -} - -void -ia_css_cnr_dump( - const struct sh_css_isp_cnr_params *cnr, - unsigned level); - -void -ia_css_cnr_debug_dtrace( - const struct ia_css_cnr_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.coring_u=%d, config.coring_v=%d, " - "config.sense_gain_vy=%d, config.sense_gain_hy=%d, " - "config.sense_gain_vu=%d, config.sense_gain_hu=%d, " - "config.sense_gain_vv=%d, config.sense_gain_hv=%d\n", - config->coring_u, config->coring_v, - config->sense_gain_vy, config->sense_gain_hy, - config->sense_gain_vu, config->sense_gain_hu, - config->sense_gain_vv, config->sense_gain_hv); -} - -void -ia_css_init_cnr2_state( - void/*struct sh_css_isp_cnr_vmem_state*/ *state, - size_t size) -{ - memset(state, 0, size); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h deleted file mode 100644 index abcf0eba706f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CNR2_HOST_H -#define __IA_CSS_CNR2_HOST_H - -#include "ia_css_cnr2_types.h" -#include "ia_css_cnr2_param.h" - -extern const struct ia_css_cnr_config default_cnr_config; - -void -ia_css_cnr_encode( - struct sh_css_isp_cnr_params *to, - const struct ia_css_cnr_config *from, - unsigned size); - -void -ia_css_cnr_dump( - const struct sh_css_isp_cnr_params *cnr, - unsigned level); - -void -ia_css_cnr_debug_dtrace( - const struct ia_css_cnr_config *config, - unsigned level); - -void -ia_css_init_cnr2_state( - void/*struct sh_css_isp_cnr_vmem_state*/ *state, - size_t size); -#endif /* __IA_CSS_CNR2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h deleted file mode 100644 index d6f490e26c94..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CNR2_PARAM_H -#define __IA_CSS_CNR2_PARAM_H - -#include "type_support.h" - -/* CNR (Chroma Noise Reduction) */ -struct sh_css_isp_cnr_params { - int32_t coring_u; - int32_t coring_v; - int32_t sense_gain_vy; - int32_t sense_gain_vu; - int32_t sense_gain_vv; - int32_t sense_gain_hy; - int32_t sense_gain_hu; - int32_t sense_gain_hv; -}; - -#endif /* __IA_CSS_CNR2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h deleted file mode 100644 index 3ebc069d8ada..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CNR2_TYPES_H -#define __IA_CSS_CNR2_TYPES_H - -/* @file -* CSS-API header file for Chroma Noise Reduction (CNR) parameters -*/ - -/* Chroma Noise Reduction configuration. - * - * Small sensitivity of edge means strong smoothness and NR performance. - * If you see blurred color on vertical edges, - * set higher values on sense_gain_h*. - * If you see blurred color on horizontal edges, - * set higher values on sense_gain_v*. - * - * ISP block: CNR2 - * (ISP1: CNR1 is used.) - * (ISP2: CNR1 is used for Preview/Video.) - * ISP2: CNR2 is used for Still. - */ -struct ia_css_cnr_config { - uint16_t coring_u; /** Coring level of U. - u0.13, [0,8191], default/ineffective 0 */ - uint16_t coring_v; /** Coring level of V. - u0.13, [0,8191], default/ineffective 0 */ - uint16_t sense_gain_vy; /** Sensitivity of horizontal edge of Y. - u13.0, [0,8191], default 100, ineffective 8191 */ - uint16_t sense_gain_vu; /** Sensitivity of horizontal edge of U. - u13.0, [0,8191], default 100, ineffective 8191 */ - uint16_t sense_gain_vv; /** Sensitivity of horizontal edge of V. - u13.0, [0,8191], default 100, ineffective 8191 */ - uint16_t sense_gain_hy; /** Sensitivity of vertical edge of Y. - u13.0, [0,8191], default 50, ineffective 8191 */ - uint16_t sense_gain_hu; /** Sensitivity of vertical edge of U. - u13.0, [0,8191], default 50, ineffective 8191 */ - uint16_t sense_gain_hv; /** Sensitivity of vertical edge of V. - u13.0, [0,8191], default 50, ineffective 8191 */ -}; - -#endif /* __IA_CSS_CNR2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h deleted file mode 100644 index 56651ba62598..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CNRX_PARAM_H -#define __IA_CSS_CNRX_PARAM_H - -#include "ia_css_cnr2_param.h" - -#endif /* __IA_CSS_CNRX_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c deleted file mode 100644 index 8f25ee180cda..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "ia_css_conversion.host.h" - -const struct ia_css_conversion_config default_conversion_config = { - 0, - 0, - 0, - 0, -}; - -void -ia_css_conversion_encode( - struct sh_css_isp_conversion_params *to, - const struct ia_css_conversion_config *from, - unsigned size) -{ - (void)size; - to->en = from->en; - to->dummy0 = from->dummy0; - to->dummy1 = from->dummy1; - to->dummy2 = from->dummy2; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h deleted file mode 100644 index da7a0a034a71..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CONVERSION_HOST_H -#define __IA_CSS_CONVERSION_HOST_H - -#include "ia_css_conversion_types.h" -#include "ia_css_conversion_param.h" - -extern const struct ia_css_conversion_config default_conversion_config; - -void -ia_css_conversion_encode( - struct sh_css_isp_conversion_params *to, - const struct ia_css_conversion_config *from, - unsigned size); - -#ifdef ISP2401 -/* workaround until code generation in isp_kernelparameters.host.c is fixed */ -#define ia_css_conversion_par_encode(to, from, size) ia_css_conversion_encode(to, from, size) -#endif -#endif /* __IA_CSS_CONVERSION_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h deleted file mode 100644 index 301d506f447e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CONVERSION_PARAM_H -#define __IA_CSS_CONVERSION_PARAM_H - -#include "type_support.h" - -/* CONVERSION */ -struct sh_css_isp_conversion_params { - uint32_t en; - uint32_t dummy0; - uint32_t dummy1; - uint32_t dummy2; -}; - -#endif /* __IA_CSS_CONVERSION_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h deleted file mode 100644 index 47a38fd65950..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CONVERSION_TYPES_H -#define __IA_CSS_CONVERSION_TYPES_H - -/** - * Conversion Kernel parameters. - * Deinterleave bayer quad into isys format - * - * ISP block: CONVERSION - * - */ -struct ia_css_conversion_config { - uint32_t en; /** en parameter */ - uint32_t dummy0; /** dummy0 dummy parameter 0 */ - uint32_t dummy1; /** dummy1 dummy parameter 1 */ - uint32_t dummy2; /** dummy2 dummy parameter 2 */ -}; - -#endif /* __IA_CSS_CONVERSION_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c deleted file mode 100644 index 45e1ea8b1fb0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_copy_output.host.h" -#include "ia_css_binary.h" -#include "type_support.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" - -static const struct ia_css_copy_output_configuration default_config = { - .enable = false, -}; - -void -ia_css_copy_output_config( - struct sh_css_isp_copy_output_isp_config *to, - const struct ia_css_copy_output_configuration *from, - unsigned size) -{ - (void)size; - to->enable = from->enable; -} - -void -ia_css_copy_output_configure( - const struct ia_css_binary *binary, - bool enable) -{ - struct ia_css_copy_output_configuration config = default_config; - - config.enable = enable; - - ia_css_configure_copy_output(binary, &config); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h deleted file mode 100644 index 3eb77365f8d0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_COPY_OUTPUT_HOST_H -#define __IA_CSS_COPY_OUTPUT_HOST_H - -#include "type_support.h" -#include "ia_css_binary.h" - -#include "ia_css_copy_output_param.h" - -void -ia_css_copy_output_config( - struct sh_css_isp_copy_output_isp_config *to, - const struct ia_css_copy_output_configuration *from, - unsigned size); - -void -ia_css_copy_output_configure( - const struct ia_css_binary *binary, - bool enable); - -#endif /* __IA_CSS_COPY_OUTPUT_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h deleted file mode 100644 index 622d9181e13f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_COPY_PARAM_H -#define __IA_CSS_COPY_PARAM_H - -struct ia_css_copy_output_configuration { - bool enable; -}; - -struct sh_css_isp_copy_output_isp_config { - uint32_t enable; -}; - -#endif /* __IA_CSS_COPY_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c deleted file mode 100644 index 92905220d862..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" -#include "ia_css_crop.host.h" - -static const struct ia_css_crop_configuration default_config = { - .info = (struct ia_css_frame_info *)NULL, -}; - -void -ia_css_crop_encode( - struct sh_css_isp_crop_isp_params *to, - const struct ia_css_crop_config *from, - unsigned size) -{ - (void)size; - to->crop_pos = from->crop_pos; -} - -void -ia_css_crop_config( - struct sh_css_isp_crop_isp_config *to, - const struct ia_css_crop_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - - (void)size; - ia_css_dma_configure_from_info(&to->port_b, from->info); - to->width_a_over_b = elems_a / to->port_b.elems; - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->port_b.elems == 0); -} - -void -ia_css_crop_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - struct ia_css_crop_configuration config = default_config; - - config.info = info; - - ia_css_configure_crop(binary, &config); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h deleted file mode 100644 index 9c1a4c7cac98..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CROP_HOST_H -#define __IA_CSS_CROP_HOST_H - -#include -#include - -#include "ia_css_crop_types.h" -#include "ia_css_crop_param.h" - -void -ia_css_crop_encode( - struct sh_css_isp_crop_isp_params *to, - const struct ia_css_crop_config *from, - unsigned size); - -void -ia_css_crop_config( - struct sh_css_isp_crop_isp_config *to, - const struct ia_css_crop_configuration *from, - unsigned size); - -void -ia_css_crop_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -#endif /* __IA_CSS_CROP_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h deleted file mode 100644 index 0f1812cdd92a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CROP_PARAM_H -#define __IA_CSS_CROP_PARAM_H - -#include -#include "dma.h" -#include "sh_css_internal.h" /* sh_css_crop_pos */ - -/* Crop frame */ -struct sh_css_isp_crop_isp_config { - uint32_t width_a_over_b; - struct dma_port_config port_b; -}; - -struct sh_css_isp_crop_isp_params { - struct sh_css_crop_pos crop_pos; -}; - -#endif /* __IA_CSS_CROP_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h deleted file mode 100644 index b5d454225f89..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CROP_TYPES_H -#define __IA_CSS_CROP_TYPES_H - -/* Crop frame - * - * ISP block: crop frame - */ - -#include -#include "sh_css_uds.h" /* sh_css_crop_pos */ - -struct ia_css_crop_config { - struct sh_css_crop_pos crop_pos; -}; - -struct ia_css_crop_configuration { - const struct ia_css_frame_info *info; -}; - -#endif /* __IA_CSS_CROP_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c deleted file mode 100644 index 9f94ef1de572..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -#include "ia_css_debug.h" -#endif - -#include "ia_css_csc.host.h" - -const struct ia_css_cc_config default_cc_config = { - 8, - {255, 29, 120, 0, -374, -342, 0, -672, 301}, -}; - -void -ia_css_encode_cc( - struct sh_css_isp_csc_params *to, - const struct ia_css_cc_config *from, - unsigned size) -{ - (void)size; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() enter:\n"); -#endif - - to->m_shift = (int16_t) from->fraction_bits; - to->m00 = (int16_t) from->matrix[0]; - to->m01 = (int16_t) from->matrix[1]; - to->m02 = (int16_t) from->matrix[2]; - to->m10 = (int16_t) from->matrix[3]; - to->m11 = (int16_t) from->matrix[4]; - to->m12 = (int16_t) from->matrix[5]; - to->m20 = (int16_t) from->matrix[6]; - to->m21 = (int16_t) from->matrix[7]; - to->m22 = (int16_t) from->matrix[8]; - -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() leave:\n"); -#endif -} - -void -ia_css_csc_encode( - struct sh_css_isp_csc_params *to, - const struct ia_css_cc_config *from, - unsigned size) -{ - ia_css_encode_cc(to, from, size); -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_cc_dump( - const struct sh_css_isp_csc_params *csc, - unsigned level, - const char *name) -{ - if (!csc) return; - ia_css_debug_dtrace(level, "%s\n", name); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m_shift", - csc->m_shift); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m00", - csc->m00); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m01", - csc->m01); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m02", - csc->m02); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m10", - csc->m10); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m11", - csc->m11); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m12", - csc->m12); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m20", - csc->m20); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m21", - csc->m21); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "m22", - csc->m22); -} - -void -ia_css_csc_dump( - const struct sh_css_isp_csc_params *csc, - unsigned level) -{ - ia_css_cc_dump(csc, level, "Color Space Conversion"); -} - -void -ia_css_cc_config_debug_dtrace( - const struct ia_css_cc_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.m[0]=%d, " - "config.m[1]=%d, config.m[2]=%d, " - "config.m[3]=%d, config.m[4]=%d, " - "config.m[5]=%d, config.m[6]=%d, " - "config.m[7]=%d, config.m[8]=%d\n", - config->matrix[0], - config->matrix[1], config->matrix[2], - config->matrix[3], config->matrix[4], - config->matrix[5], config->matrix[6], - config->matrix[7], config->matrix[8]); -} -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h deleted file mode 100644 index eb10d8a5709d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CSC_HOST_H -#define __IA_CSS_CSC_HOST_H - -#include "ia_css_csc_types.h" -#include "ia_css_csc_param.h" - -extern const struct ia_css_cc_config default_cc_config; - -void -ia_css_encode_cc( - struct sh_css_isp_csc_params *to, - const struct ia_css_cc_config *from, - unsigned size); - -void -ia_css_csc_encode( - struct sh_css_isp_csc_params *to, - const struct ia_css_cc_config *from, - unsigned size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_cc_dump( - const struct sh_css_isp_csc_params *csc, unsigned level, - const char *name); - -void -ia_css_csc_dump( - const struct sh_css_isp_csc_params *csc, - unsigned level); - -void -ia_css_cc_config_debug_dtrace( - const struct ia_css_cc_config *config, - unsigned level); - -#define ia_css_csc_debug_dtrace ia_css_cc_config_debug_dtrace -#endif - -#endif /* __IA_CSS_CSC_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h deleted file mode 100644 index 0b054a939baf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CSC_PARAM_H -#define __IA_CSS_CSC_PARAM_H - -#include "type_support.h" -/* CSC (Color Space Conversion) */ -struct sh_css_isp_csc_params { - uint16_t m_shift; - int16_t m00; - int16_t m01; - int16_t m02; - int16_t m10; - int16_t m11; - int16_t m12; - int16_t m20; - int16_t m21; - int16_t m22; -}; - - -#endif /* __IA_CSS_CSC_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h deleted file mode 100644 index 10404380c637..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CSC_TYPES_H -#define __IA_CSS_CSC_TYPES_H - -/* @file -* CSS-API header file for Color Space Conversion parameters. -*/ - -/* Color Correction configuration. - * - * This structure is used for 3 cases. - * ("YCgCo" is the output format of Demosaic.) - * - * 1. Color Space Conversion (YCgCo to YUV) for ISP1. - * ISP block: CSC1 (Color Space Conversion) - * struct ia_css_cc_config *cc_config - * - * 2. Color Correction Matrix (YCgCo to RGB) for ISP2. - * ISP block: CCM2 (Color Correction Matrix) - * struct ia_css_cc_config *yuv2rgb_cc_config - * - * 3. Color Space Conversion (RGB to YUV) for ISP2. - * ISP block: CSC2 (Color Space Conversion) - * struct ia_css_cc_config *rgb2yuv_cc_config - * - * default/ineffective: - * 1. YCgCo -> YUV - * 1 0.174 0.185 - * 0 -0.66252 -0.66874 - * 0 -0.83738 0.58131 - * - * fraction_bits = 12 - * 4096 713 758 - * 0 -2714 -2739 - * 0 -3430 2381 - * - * 2. YCgCo -> RGB - * 1 -1 1 - * 1 1 0 - * 1 -1 -1 - * - * fraction_bits = 12 - * 4096 -4096 4096 - * 4096 4096 0 - * 4096 -4096 -4096 - * - * 3. RGB -> YUV - * 0.299 0.587 0.114 - * -0.16874 -0.33126 0.5 - * 0.5 -0.41869 -0.08131 - * - * fraction_bits = 13 - * 2449 4809 934 - * -1382 -2714 4096 - * 4096 -3430 -666 - */ -struct ia_css_cc_config { - uint32_t fraction_bits;/** Fractional bits of matrix. - u8.0, [0,13] */ - int32_t matrix[3 * 3]; /** Conversion matrix. - s[13-fraction_bits].[fraction_bits], - [-8192,8191] */ -}; - -#endif /* __IA_CSS_CSC_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c deleted file mode 100644 index e27648c46a25..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -#include "ctc/ctc_1.0/ia_css_ctc.host.h" -#include "ia_css_ctc1_5.host.h" - -static void ctc_gradient( - int *dydx, int *shift, - int y1, int y0, int x1, int x0) -{ - int frc_bits = max(IA_CSS_CTC_COEF_SHIFT, 16); - int dy = y1 - y0; - int dx = x1 - x0; - int dydx_int; - int dydx_frc; - int sft; - /* max_dydx = the maxinum gradient = the maximum y (gain) */ - int max_dydx = (1 << IA_CSS_CTC_COEF_SHIFT) - 1; - - if (dx == 0) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() error, illegal division operation\n"); - return; - } else { - dydx_int = dy / dx; - dydx_frc = ((dy - dydx_int * dx) << frc_bits) / dx; - } - - assert(y0 >= 0 && y0 <= max_dydx); - assert(y1 >= 0 && y1 <= max_dydx); - assert(x0 < x1); - assert(dydx != NULL); - assert(shift != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() enter:\n"); - - /* search "sft" which meets this condition: - (1 << (IA_CSS_CTC_COEF_SHIFT - 1)) - <= (((float)dy / (float)dx) * (1 << sft)) - <= ((1 << IA_CSS_CTC_COEF_SHIFT) - 1) */ - for (sft = 0; sft <= IA_CSS_CTC_COEF_SHIFT; sft++) { - int tmp_dydx = (dydx_int << sft) - + (dydx_frc >> (frc_bits - sft)); - if (tmp_dydx <= max_dydx) { - *dydx = tmp_dydx; - *shift = sft; - } - if (tmp_dydx >= max_dydx) - break; - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() leave:\n"); -} - -void -ia_css_ctc_encode( - struct sh_css_isp_ctc_params *to, - const struct ia_css_ctc_config *from, - unsigned size) -{ - (void)size; - to->y0 = from->y0; - to->y1 = from->y1; - to->y2 = from->y2; - to->y3 = from->y3; - to->y4 = from->y4; - to->y5 = from->y5; - - to->ce_gain_exp = from->ce_gain_exp; - - to->x1 = from->x1; - to->x2 = from->x2; - to->x3 = from->x3; - to->x4 = from->x4; - - ctc_gradient(&(to->dydx0), - &(to->dydx0_shift), - from->y1, from->y0, - from->x1, 0); - - ctc_gradient(&(to->dydx1), - &(to->dydx1_shift), - from->y2, from->y1, - from->x2, from->x1); - - ctc_gradient(&to->dydx2, - &to->dydx2_shift, - from->y3, from->y2, - from->x3, from->x2); - - ctc_gradient(&to->dydx3, - &to->dydx3_shift, - from->y4, from->y3, - from->x4, from->x3); - - ctc_gradient(&(to->dydx4), - &(to->dydx4_shift), - from->y5, from->y4, - SH_CSS_BAYER_MAXVAL, from->x4); -} - -void -ia_css_ctc_dump( - const struct sh_css_isp_ctc_params *ctc, - unsigned level); diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h deleted file mode 100644 index d943aff28152..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC1_5_HOST_H -#define __IA_CSS_CTC1_5_HOST_H - -#include "sh_css_params.h" - -#include "ia_css_ctc1_5_param.h" - -void -ia_css_ctc_encode( - struct sh_css_isp_ctc_params *to, - const struct ia_css_ctc_config *from, - unsigned size); - -void -ia_css_ctc_dump( - const struct sh_css_isp_ctc_params *ctc, - unsigned level); - -#endif /* __IA_CSS_CTC1_5_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h deleted file mode 100644 index 8d9ac2b1832c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC1_5_PARAM_H -#define __IA_CSS_CTC1_5_PARAM_H - -#include "type_support.h" -#include "ctc/ctc_1.0/ia_css_ctc_param.h" /* vamem params */ - -/* CTC (Color Tone Control) */ -struct sh_css_isp_ctc_params { - int32_t y0; - int32_t y1; - int32_t y2; - int32_t y3; - int32_t y4; - int32_t y5; - int32_t ce_gain_exp; - int32_t x1; - int32_t x2; - int32_t x3; - int32_t x4; - int32_t dydx0; - int32_t dydx0_shift; - int32_t dydx1; - int32_t dydx1_shift; - int32_t dydx2; - int32_t dydx2_shift; - int32_t dydx3; - int32_t dydx3_shift; - int32_t dydx4; - int32_t dydx4_shift; -}; - -#endif /* __IA_CSS_CTC1_5_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h deleted file mode 100644 index dcd471f9bd66..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTCX_PARAM_H -#define __IA_CSS_CTCX_PARAM_H - -#include "ia_css_ctc1_5_param.h" - -#endif /* __IA_CSS_CTCX_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c deleted file mode 100644 index 07bd24edc7bf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "assert_support.h" - -#include "ia_css_ctc2.host.h" - -#define INEFFECTIVE_VAL 4096 -#define BASIC_VAL 819 - -/*Default configuration of parameters for Ctc2*/ -const struct ia_css_ctc2_config default_ctc2_config = { - INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL, - INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL, - BASIC_VAL * 2, BASIC_VAL * 4, BASIC_VAL * 6, - BASIC_VAL * 8, INEFFECTIVE_VAL, INEFFECTIVE_VAL, - BASIC_VAL >> 1, BASIC_VAL}; - -/* (dydx) = ctc2_slope(y1, y0, x1, x0) - * ----------------------------------------------- - * Calculation of the Slope of a Line = ((y1 - y0) >> 8)/(x1 - x0) - * - * Note: y1, y0 , x1 & x0 must lie within the range 0 <-> 8191 - */ -static int ctc2_slope(int y1, int y0, int x1, int x0) -{ - const int shift_val = 8; - const int max_slope = (1 << IA_CSS_CTC_COEF_SHIFT) - 1; - int dy = y1 - y0; - int dx = x1 - x0; - int rounding = (dx + 1) >> 1; - int dy_shift = dy << shift_val; - int slope, dydx; - - /*Protection for paramater values, & avoiding zero divisions*/ - assert(y0 >= 0 && y0 <= max_slope); - assert(y1 >= 0 && y1 <= max_slope); - assert(x0 >= 0 && x0 <= max_slope); - assert(x1 > 0 && x1 <= max_slope); - assert(dx > 0); - - if (dy < 0) - rounding = -rounding; - slope = (int) (dy_shift + rounding) / dx; - - /*the slope must lie within the range - (-max_slope-1) >= (dydx) >= (max_slope) - */ - if (slope <= -max_slope-1) { - dydx = -max_slope-1; - } else if (slope >= max_slope) { - dydx = max_slope; - } else { - dydx = slope; - } - - return dydx; -} - -/* (void) = ia_css_ctc2_vmem_encode(*to, *from) - * ----------------------------------------------- - * VMEM Encode Function to translate Y parameters from userspace into ISP space - */ -void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to, - const struct ia_css_ctc2_config *from, - size_t size) -{ - unsigned i, j; - const unsigned shffl_blck = 4; - const unsigned lenght_zeros = 11; - short dydx0, dydx1, dydx2, dydx3, dydx4; - - (void)size; - /* - * Calculation of slopes of lines interconnecting - * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0 - */ - dydx0 = ctc2_slope(from->y_y1, from->y_y0, - from->y_x1, 0); - dydx1 = ctc2_slope(from->y_y2, from->y_y1, - from->y_x2, from->y_x1); - dydx2 = ctc2_slope(from->y_y3, from->y_y2, - from->y_x3, from->y_x2); - dydx3 = ctc2_slope(from->y_y4, from->y_y3, - from->y_x4, from->y_x3); - dydx4 = ctc2_slope(from->y_y5, from->y_y4, - SH_CSS_BAYER_MAXVAL, from->y_x4); - - /*Fill 3 arrays with: - * - Luma input gain values y_y0, y_y1, y_y2, y_3, y_y4 - * - Luma kneepoints 0, y_x1, y_x2, y_x3, y_x4 - * - Calculated slopes dydx0, dyxd1, dydx2, dydx3, dydx4 - * - * - Each 64-element array is divided in blocks of 16 elements: - * the 5 parameters + zeros in the remaining 11 positions - * - All blocks of the same array will contain the same data - */ - for (i = 0; i < shffl_blck; i++) { - to->y_x[0][(i << shffl_blck)] = 0; - to->y_x[0][(i << shffl_blck) + 1] = from->y_x1; - to->y_x[0][(i << shffl_blck) + 2] = from->y_x2; - to->y_x[0][(i << shffl_blck) + 3] = from->y_x3; - to->y_x[0][(i << shffl_blck) + 4] = from->y_x4; - - to->y_y[0][(i << shffl_blck)] = from->y_y0; - to->y_y[0][(i << shffl_blck) + 1] = from->y_y1; - to->y_y[0][(i << shffl_blck) + 2] = from->y_y2; - to->y_y[0][(i << shffl_blck) + 3] = from->y_y3; - to->y_y[0][(i << shffl_blck) + 4] = from->y_y4; - - to->e_y_slope[0][(i << shffl_blck)] = dydx0; - to->e_y_slope[0][(i << shffl_blck) + 1] = dydx1; - to->e_y_slope[0][(i << shffl_blck) + 2] = dydx2; - to->e_y_slope[0][(i << shffl_blck) + 3] = dydx3; - to->e_y_slope[0][(i << shffl_blck) + 4] = dydx4; - - for (j = 0; j < lenght_zeros; j++) { - to->y_x[0][(i << shffl_blck) + 5 + j] = 0; - to->y_y[0][(i << shffl_blck) + 5 + j] = 0; - to->e_y_slope[0][(i << shffl_blck)+ 5 + j] = 0; - } - } -} - -/* (void) = ia_css_ctc2_encode(*to, *from) - * ----------------------------------------------- - * DMEM Encode Function to translate UV parameters from userspace into ISP space - */ -void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to, - struct ia_css_ctc2_config *from, - size_t size) -{ - (void)size; - - to->uv_y0 = from->uv_y0; - to->uv_y1 = from->uv_y1; - to->uv_x0 = from->uv_x0; - to->uv_x1 = from->uv_x1; - - /*Slope Calculation*/ - to->uv_dydx = ctc2_slope(from->uv_y1, from->uv_y0, - from->uv_x1, from->uv_x0); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h deleted file mode 100644 index 3733aee24dcd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC2_HOST_H -#define __IA_CSS_CTC2_HOST_H - -#include "ia_css_ctc2_param.h" -#include "ia_css_ctc2_types.h" - -extern const struct ia_css_ctc2_config default_ctc2_config; - -/*Encode Functions to translate parameters from userspace into ISP space*/ - -void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to, - const struct ia_css_ctc2_config *from, - size_t size); - -void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to, - struct ia_css_ctc2_config *from, - size_t size); - -#endif /* __IA_CSS_CTC2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h deleted file mode 100644 index ad7040c9d7cb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC2_PARAM_H -#define __IA_CSS_CTC2_PARAM_H - -#define IA_CSS_CTC_COEF_SHIFT 13 -#include "vmem.h" /* needed for VMEM_ARRAY */ - -/* CTC (Chroma Tone Control)ISP Parameters */ - -/*VMEM Luma params*/ -struct ia_css_isp_ctc2_vmem_params { - /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/ - VMEM_ARRAY(y_x, ISP_VEC_NELEMS); - /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/ - VMEM_ARRAY(y_y, ISP_VEC_NELEMS); - /* Slopes of lines interconnecting - * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/ - VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS); -}; - -/*DMEM Chroma params*/ -struct ia_css_isp_ctc2_dmem_params { - - /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/ - int32_t uv_y0; - int32_t uv_y1; - - /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/ - int32_t uv_x0; - int32_t uv_x1; - - /* Slope of line interconnecting uv_x0 -> uv_x1*/ - int32_t uv_dydx; - -}; -#endif /* __IA_CSS_CTC2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h deleted file mode 100644 index 1222cf33e851..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC2_TYPES_H -#define __IA_CSS_CTC2_TYPES_H - -/* Chroma Tone Control configuration. -* -* ISP block: CTC2 (CTC by polygonal approximation) -* (ISP1: CTC1 (CTC by look-up table) is used.) -* ISP2: CTC2 is used. -* ISP261: CTC2 (CTC by Fast Approximate Distance) -*/ -struct ia_css_ctc2_config { - - /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5 - * --default/ineffective value: 4096(0.5f) - */ - int32_t y_y0; - int32_t y_y1; - int32_t y_y2; - int32_t y_y3; - int32_t y_y4; - int32_t y_y5; - /* 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a - * requirement: 0.0 < y_x1 < y_x2 ctc, &from->data, sizeof(to->ctc)); -} - -void -ia_css_ctc_debug_dtrace( - const struct ia_css_ctc_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.ce_gain_exp=%d, config.y0=%d, " - "config.x1=%d, config.y1=%d, " - "config.x2=%d, config.y2=%d, " - "config.x3=%d, config.y3=%d, " - "config.x4=%d, config.y4=%d\n", - config->ce_gain_exp, config->y0, - config->x1, config->y1, - config->x2, config->y2, - config->x3, config->y3, - config->x4, config->y4); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h deleted file mode 100644 index bec52a6519f9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC_HOST_H -#define __IA_CSS_CTC_HOST_H - -#include "sh_css_params.h" - -#include "ia_css_ctc_param.h" -#include "ia_css_ctc_table.host.h" - -extern const struct ia_css_ctc_config default_ctc_config; - -void -ia_css_ctc_vamem_encode( - struct sh_css_isp_ctc_vamem_params *to, - const struct ia_css_ctc_table *from, - unsigned size); - -void -ia_css_ctc_debug_dtrace( - const struct ia_css_ctc_config *config, unsigned level) -; - -#endif /* __IA_CSS_CTC_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h deleted file mode 100644 index 6e88ad3d2420..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC_PARAM_H -#define __IA_CSS_CTC_PARAM_H - -#include "type_support.h" -#include - -#include "ia_css_ctc_types.h" - -#ifndef PIPE_GENERATION -#if defined(HAS_VAMEM_VERSION_2) -#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 -#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_2_CTC_TABLE_SIZE -#elif defined(HAS_VAMEM_VERSION_1) -#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 -#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_1_CTC_TABLE_SIZE -#else -#error "VAMEM should be {VERSION1, VERSION2}" -#endif - -#else -/* For pipe generation, the size is not relevant */ -#define SH_CSS_ISP_CTC_TABLE_SIZE 0 -#endif - -/* This should be vamem_data_t, but that breaks the pipe generator */ -struct sh_css_isp_ctc_vamem_params { - uint16_t ctc[SH_CSS_ISP_CTC_TABLE_SIZE]; -}; - -#endif /* __IA_CSS_CTC_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c deleted file mode 100644 index edf85aba7716..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include /* memcpy */ -#include "system_global.h" -#include "vamem.h" -#include "ia_css_types.h" -#include "ia_css_ctc_table.host.h" - -struct ia_css_ctc_table default_ctc_table; - -#if defined(HAS_VAMEM_VERSION_2) - -static const uint16_t -default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = { - 0, 384, 837, 957, 1011, 1062, 1083, 1080, -1078, 1077, 1053, 1039, 1012, 992, 969, 951, - 929, 906, 886, 866, 845, 823, 809, 790, - 772, 758, 741, 726, 711, 701, 688, 675, - 666, 656, 648, 639, 633, 626, 618, 612, - 603, 594, 582, 572, 557, 545, 529, 516, - 504, 491, 480, 467, 459, 447, 438, 429, - 419, 412, 404, 397, 389, 382, 376, 368, - 363, 357, 351, 345, 340, 336, 330, 326, - 321, 318, 312, 308, 304, 300, 297, 294, - 291, 286, 284, 281, 278, 275, 271, 268, - 261, 257, 251, 245, 240, 235, 232, 225, - 223, 218, 213, 209, 206, 204, 199, 197, - 193, 189, 186, 185, 183, 179, 177, 175, - 172, 170, 169, 167, 164, 164, 162, 160, - 158, 157, 156, 154, 154, 152, 151, 150, - 149, 148, 146, 147, 146, 144, 143, 143, - 142, 141, 140, 141, 139, 138, 138, 138, - 137, 136, 136, 135, 134, 134, 134, 133, - 132, 132, 131, 130, 131, 130, 129, 128, - 129, 127, 127, 127, 127, 125, 125, 125, - 123, 123, 122, 120, 118, 115, 114, 111, - 110, 108, 106, 105, 103, 102, 100, 99, - 97, 97, 96, 95, 94, 93, 93, 91, - 91, 91, 90, 90, 89, 89, 88, 88, - 89, 88, 88, 87, 87, 87, 87, 86, - 87, 87, 86, 87, 86, 86, 84, 84, - 82, 80, 78, 76, 74, 72, 70, 68, - 67, 65, 62, 60, 58, 56, 55, 54, - 53, 51, 49, 49, 47, 45, 45, 45, - 41, 40, 39, 39, 34, 33, 34, 32, - 25, 23, 24, 20, 13, 9, 12, 0, - 0 -}; - -#elif defined(HAS_VAMEM_VERSION_1) - -/* Default Parameters */ -static const uint16_t -default_ctc_table_data[IA_CSS_VAMEM_1_CTC_TABLE_SIZE] = { - 0, 0, 256, 384, 384, 497, 765, 806, - 837, 851, 888, 901, 957, 981, 993, 1001, - 1011, 1029, 1028, 1039, 1062, 1059, 1073, 1080, - 1083, 1085, 1085, 1098, 1080, 1084, 1085, 1093, - 1078, 1073, 1070, 1069, 1077, 1066, 1072, 1063, - 1053, 1044, 1046, 1053, 1039, 1028, 1025, 1024, - 1012, 1013, 1016, 996, 992, 990, 990, 980, - 969, 968, 961, 955, 951, 949, 933, 930, - 929, 925, 921, 916, 906, 901, 895, 893, - 886, 877, 872, 869, 866, 861, 857, 849, - 845, 838, 836, 832, 823, 821, 815, 813, - 809, 805, 796, 793, 790, 785, 784, 778, - 772, 768, 766, 763, 758, 752, 749, 745, - 741, 740, 736, 730, 726, 724, 723, 718, - 711, 709, 706, 704, 701, 698, 691, 689, - 688, 683, 683, 678, 675, 673, 671, 669, - 666, 663, 661, 660, 656, 656, 653, 650, - 648, 647, 646, 643, 639, 638, 637, 635, - 633, 632, 629, 627, 626, 625, 622, 621, - 618, 618, 614, 614, 612, 609, 606, 606, - 603, 600, 600, 597, 594, 591, 590, 586, - 582, 581, 578, 575, 572, 569, 563, 560, - 557, 554, 551, 548, 545, 539, 536, 533, - 529, 527, 524, 519, 516, 513, 510, 507, - 504, 501, 498, 493, 491, 488, 485, 484, - 480, 476, 474, 471, 467, 466, 464, 460, - 459, 455, 453, 449, 447, 446, 443, 441, - 438, 435, 432, 432, 429, 427, 426, 422, - 419, 418, 416, 414, 412, 410, 408, 406, - 404, 402, 401, 398, 397, 395, 393, 390, - 389, 388, 387, 384, 382, 380, 378, 377, - 376, 375, 372, 370, 368, 368, 366, 364, - 363, 361, 360, 358, 357, 355, 354, 352, - 351, 350, 349, 346, 345, 344, 344, 342, - 340, 339, 337, 337, 336, 335, 333, 331, - 330, 329, 328, 326, 326, 324, 324, 322, - 321, 320, 318, 318, 318, 317, 315, 313, - 312, 311, 311, 310, 308, 307, 306, 306, - 304, 304, 302, 301, 300, 300, 299, 297, - 297, 296, 296, 294, 294, 292, 291, 291, - 291, 290, 288, 287, 286, 286, 287, 285, - 284, 283, 282, 282, 281, 281, 279, 278, - 278, 278, 276, 276, 275, 274, 274, 273, - 271, 270, 269, 268, 268, 267, 265, 262, - 261, 260, 260, 259, 257, 254, 252, 252, - 251, 251, 249, 246, 245, 244, 243, 242, - 240, 239, 239, 237, 235, 235, 233, 231, - 232, 230, 229, 226, 225, 224, 225, 224, - 223, 220, 219, 219, 218, 217, 217, 214, - 213, 213, 212, 211, 209, 209, 209, 208, - 206, 205, 204, 203, 204, 203, 201, 200, - 199, 197, 198, 198, 197, 195, 194, 194, - 193, 192, 192, 191, 189, 190, 189, 188, - 186, 187, 186, 185, 185, 184, 183, 181, - 183, 182, 181, 180, 179, 178, 178, 178, - 177, 176, 175, 176, 175, 174, 174, 173, - 172, 173, 172, 171, 170, 170, 169, 169, - 169, 168, 167, 166, 167, 167, 166, 165, - 164, 164, 164, 163, 164, 163, 162, 163, - 162, 161, 160, 161, 160, 160, 160, 159, - 158, 157, 158, 158, 157, 157, 156, 156, - 156, 156, 155, 155, 154, 154, 154, 154, - 154, 153, 152, 153, 152, 152, 151, 152, - 151, 152, 151, 150, 150, 149, 149, 150, - 149, 149, 148, 148, 148, 149, 148, 147, - 146, 146, 147, 146, 147, 146, 145, 146, - 146, 145, 144, 145, 144, 145, 144, 144, - 143, 143, 143, 144, 143, 142, 142, 142, - 142, 142, 142, 141, 141, 141, 141, 140, - 140, 141, 140, 140, 141, 140, 139, 139, - 139, 140, 139, 139, 138, 138, 137, 139, - 138, 138, 138, 137, 138, 137, 137, 137, - 137, 136, 137, 136, 136, 136, 136, 135, - 136, 135, 135, 135, 135, 136, 135, 135, - 134, 134, 133, 135, 134, 134, 134, 133, - 134, 133, 134, 133, 133, 132, 133, 133, - 132, 133, 132, 132, 132, 132, 131, 131, - 131, 132, 131, 131, 130, 131, 130, 132, - 131, 130, 130, 129, 130, 129, 130, 129, - 129, 129, 130, 129, 128, 128, 128, 128, - 129, 128, 128, 127, 127, 128, 128, 127, - 127, 126, 126, 127, 127, 126, 126, 126, - 127, 126, 126, 126, 125, 125, 126, 125, - 125, 124, 124, 124, 125, 125, 124, 124, - 123, 124, 124, 123, 123, 122, 122, 122, - 122, 122, 121, 120, 120, 119, 118, 118, - 118, 117, 117, 116, 115, 115, 115, 114, - 114, 113, 113, 112, 111, 111, 111, 110, - 110, 109, 109, 108, 108, 108, 107, 107, - 106, 106, 105, 105, 105, 104, 104, 103, - 103, 102, 102, 102, 102, 101, 101, 100, - 100, 99, 99, 99, 99, 99, 99, 98, - 97, 98, 97, 97, 97, 96, 96, 95, - 96, 95, 96, 95, 95, 94, 94, 95, - 94, 94, 94, 93, 93, 92, 93, 93, - 93, 93, 92, 92, 91, 92, 92, 92, - 91, 91, 90, 90, 91, 91, 91, 90, - 90, 90, 90, 91, 90, 90, 90, 89, - 89, 89, 90, 89, 89, 89, 89, 89, - 88, 89, 89, 88, 88, 88, 88, 87, - 89, 88, 88, 88, 88, 88, 87, 88, - 88, 88, 87, 87, 87, 87, 87, 88, - 87, 87, 87, 87, 87, 87, 88, 87, - 87, 87, 87, 86, 86, 87, 87, 87, - 87, 86, 86, 86, 87, 87, 86, 87, - 86, 86, 86, 87, 87, 86, 86, 86, - 86, 86, 87, 87, 86, 85, 85, 85, - 84, 85, 85, 84, 84, 83, 83, 82, - 82, 82, 81, 81, 80, 79, 79, 79, - 78, 77, 77, 76, 76, 76, 75, 74, - 74, 74, 73, 73, 72, 71, 71, 71, - 70, 70, 69, 69, 68, 68, 67, 67, - 67, 66, 66, 65, 65, 64, 64, 63, - 62, 62, 62, 61, 60, 60, 59, 59, - 58, 58, 57, 57, 56, 56, 56, 55, - 55, 54, 55, 55, 54, 53, 53, 52, - 53, 53, 52, 51, 51, 50, 51, 50, - 49, 49, 50, 49, 49, 48, 48, 47, - 47, 48, 46, 45, 45, 45, 46, 45, - 45, 44, 45, 45, 45, 43, 42, 42, - 41, 43, 41, 40, 40, 39, 40, 41, - 39, 39, 39, 39, 39, 38, 35, 35, - 34, 37, 36, 34, 33, 33, 33, 35, - 34, 32, 32, 31, 32, 30, 29, 26, - 25, 25, 27, 26, 23, 23, 23, 25, - 24, 24, 22, 21, 20, 19, 16, 14, - 13, 13, 13, 10, 9, 7, 7, 7, - 12, 12, 12, 7, 0, 0, 0, 0 -}; - -#else -#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}" -#endif - -void -ia_css_config_ctc_table(void) -{ -#if defined(HAS_VAMEM_VERSION_2) - memcpy(default_ctc_table.data.vamem_2, default_ctc_table_data, - sizeof(default_ctc_table_data)); - default_ctc_table.vamem_type = IA_CSS_VAMEM_TYPE_2; -#else - memcpy(default_ctc_table.data.vamem_1, default_ctc_table_data, - sizeof(default_ctc_table_data)); - default_ctc_table.vamem_type = 1IA_CSS_VAMEM_TYPE_1; -#endif -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h deleted file mode 100644 index a350dec8b4ad..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC_TABLE_HOST_H -#define __IA_CSS_CTC_TABLE_HOST_H - -#include "ia_css_ctc_types.h" - -extern struct ia_css_ctc_table default_ctc_table; - -void ia_css_config_ctc_table(void); - -#endif /* __IA_CSS_CTC_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h deleted file mode 100644 index 4ac47ce10566..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_CTC_TYPES_H -#define __IA_CSS_CTC_TYPES_H - -/* @file -* CSS-API header file for Chroma Tone Control parameters. -*/ - -/* Fractional bits for CTC gain (used only for ISP1). - * - * IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits - * of gain(=8), but also the bits(=5) to convert chroma - * from 13bit precision to 8bit precision. - * - * Gain (struct ia_css_ctc_table) : u5.8 - * Input(Chorma) : s0.12 (13bit precision) - * Output(Chorma): s0.7 (8bit precision) - * Output = (Input * Gain) >> IA_CSS_CTC_COEF_SHIFT - */ -#define IA_CSS_CTC_COEF_SHIFT 13 - -/* Number of elements in the CTC table. */ -#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10 -/* Number of elements in the CTC table. */ -#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE (1U<pixelnoise = - uDIGIT_FITTING(from->pixelnoise, 16, SH_CSS_BAYER_BITS); - to->c1_coring_threshold = - uDIGIT_FITTING(from->c1_coring_threshold, 16, - SH_CSS_BAYER_BITS); - to->c2_coring_threshold = - uDIGIT_FITTING(from->c2_coring_threshold, 16, - SH_CSS_BAYER_BITS); -} - -void -ia_css_de_dump( - const struct sh_css_isp_de_params *de, - unsigned level) -{ - if (!de) return; - ia_css_debug_dtrace(level, "Demosaic:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "de_pixelnoise", de->pixelnoise); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "de_c1_coring_threshold", - de->c1_coring_threshold); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "de_c2_coring_threshold", - de->c2_coring_threshold); -} - -void -ia_css_de_debug_dtrace( - const struct ia_css_de_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.pixelnoise=%d, " - "config.c1_coring_threshold=%d, config.c2_coring_threshold=%d\n", - config->pixelnoise, - config->c1_coring_threshold, config->c2_coring_threshold); -} - -void -ia_css_init_de_state( - void/*struct sh_css_isp_de_vmem_state*/ *state, - size_t size) -{ - memset(state, 0, size); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h deleted file mode 100644 index 5dd6f06f2bf1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE_HOST_H -#define __IA_CSS_DE_HOST_H - -#include "ia_css_de_types.h" -#include "ia_css_de_param.h" - -extern const struct ia_css_de_config default_de_config; - -void -ia_css_de_encode( - struct sh_css_isp_de_params *to, - const struct ia_css_de_config *from, - unsigned size); - -void -ia_css_de_dump( - const struct sh_css_isp_de_params *de, - unsigned level); - -void -ia_css_de_debug_dtrace( - const struct ia_css_de_config *config, - unsigned level); - -void -ia_css_init_de_state( - void/*struct sh_css_isp_de_vmem_state*/ *state, - size_t size); - -#endif /* __IA_CSS_DE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h deleted file mode 100644 index 833c80afc7a8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE_PARAM_H -#define __IA_CSS_DE_PARAM_H - -#include "type_support.h" - -/* DE (Demosaic) */ -struct sh_css_isp_de_params { - int32_t pixelnoise; - int32_t c1_coring_threshold; - int32_t c2_coring_threshold; -}; - -#endif /* __IA_CSS_DE_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h deleted file mode 100644 index d64511763436..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE_STATE_H -#define __IA_CSS_DE_STATE_H - -#include "type_support.h" -#include "vmem.h" - -/* DE (Demosaic) */ -struct sh_css_isp_de_vmem_state { - VMEM_ARRAY(de_buf[4], MAX_VECTORS_PER_BUF_LINE*ISP_NWAY); -}; - -#endif /* __IA_CSS_DE_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h deleted file mode 100644 index 803be68abc54..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE_TYPES_H -#define __IA_CSS_DE_TYPES_H - -/* @file -* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters. -*/ - -/* Demosaic (bayer-to-YCgCo) configuration. - * - * ISP block: DE1 - * ISP1: DE1 is used. - * (ISP2: DE2 is used.) - */ -struct ia_css_de_config { - ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination. - u0.16, [0,65535], - default 0, ineffective 0 */ - ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1. - This is the same as nr_config.threshold_cb. - u0.16, [0,65535], - default 128(0.001953125), ineffective 0 */ - ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2. - This is the same as nr_config.threshold_cr. - u0.16, [0,65535], - default 128(0.001953125), ineffective 0 */ -}; - -#endif /* __IA_CSS_DE_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c deleted file mode 100644 index a5247a57bafb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" - -#include "ia_css_de2.host.h" - -const struct ia_css_ecd_config default_ecd_config = { - (1 << (ISP_VEC_ELEMBITS - 1)) * 2 / 3, /* 2/3 */ - (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1.0 */ - 0, /* 0.0 */ -}; - -void -ia_css_ecd_encode( - struct sh_css_isp_ecd_params *to, - const struct ia_css_ecd_config *from, - unsigned size) -{ - (void)size; - to->zip_strength = from->zip_strength; - to->fc_strength = from->fc_strength; - to->fc_debias = from->fc_debias; -} - -void -ia_css_ecd_dump( - const struct sh_css_isp_ecd_params *ecd, - unsigned level); - -void -ia_css_ecd_debug_dtrace( - const struct ia_css_ecd_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.zip_strength=%d, " - "config.fc_strength=%d, config.fc_debias=%d\n", - config->zip_strength, - config->fc_strength, config->fc_debias); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h deleted file mode 100644 index f7cd8448cb30..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE2_HOST_H -#define __IA_CSS_DE2_HOST_H - -#include "ia_css_de2_types.h" -#include "ia_css_de2_param.h" - -extern const struct ia_css_ecd_config default_ecd_config; - -void -ia_css_ecd_encode( - struct sh_css_isp_ecd_params *to, - const struct ia_css_ecd_config *from, - unsigned size); - -void -ia_css_ecd_dump( - const struct sh_css_isp_ecd_params *ecd, - unsigned level); - -void -ia_css_ecd_debug_dtrace( - const struct ia_css_ecd_config *config, unsigned level); - -#endif /* __IA_CSS_DE2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h deleted file mode 100644 index ea2da73a4927..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE2_PARAM_H -#define __IA_CSS_DE2_PARAM_H - -#include "type_support.h" - -/* Reuse DE1 params and extend them */ -#include "../de_1.0/ia_css_de_param.h" - -/* DE (Demosaic) */ -struct sh_css_isp_ecd_params { - int32_t zip_strength; - int32_t fc_strength; - int32_t fc_debias; -}; - -#endif /* __IA_CSS_DE2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h deleted file mode 100644 index 50bdde419bb1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE2_TYPES_H -#define __IA_CSS_DE2_TYPES_H - -/* @file -* CSS-API header file for Demosaicing parameters. -*/ - -/* Eigen Color Demosaicing configuration. - * - * ISP block: DE2 - * (ISP1: DE1 is used.) - * ISP2: DE2 is used. - */ -struct ia_css_ecd_config { - uint16_t zip_strength; /** Strength of zipper reduction. - u0.13, [0,8191], - default 5489(0.67), ineffective 0 */ - uint16_t fc_strength; /** Strength of false color reduction. - u0.13, [0,8191], - default 8191(almost 1.0), ineffective 0 */ - uint16_t fc_debias; /** Prevent color change - on noise or Gr/Gb imbalance. - u0.13, [0,8191], - default 0, ineffective 0 */ -}; - -#endif /* __IA_CSS_DE2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h deleted file mode 100644 index 59af9523604d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DEX_PARAM_H -#define __IA_CSS_DEX_PARAM_H - -#include "ia_css_de2_param.h" - -#endif /* __IA_CSS_DEX_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h deleted file mode 100644 index f2c65ba58983..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DE2_STATE_H -#define __IA_CSS_DE2_STATE_H - -/* Reuse DE1 states */ -#include "../de_1.0/ia_css_de_state.h" - -#endif /* __IA_CSS_DE2_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c deleted file mode 100644 index b1f9dc8d662d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_frac.h" - -#include "ia_css_dp.host.h" - -#ifdef ISP2401 -/* We use a different set of DPC configuration parameters when - * DPC is used before OBC and NORM. Currently these parameters - * are used in usecases which selects both BDS and DPC. - **/ -const struct ia_css_dp_config default_dp_10bpp_config = { - 1024, - 2048, - 32768, - 32768, - 32768, - 32768 -}; -#endif -const struct ia_css_dp_config default_dp_config = { - 8192, - 2048, - 32768, - 32768, - 32768, - 32768 -}; - -void -ia_css_dp_encode( - struct sh_css_isp_dp_params *to, - const struct ia_css_dp_config *from, - unsigned size) -{ - int gain = from->gain; - int gr = from->gr; - int r = from->r; - int b = from->b; - int gb = from->gb; - - (void)size; - to->threshold_single = - SH_CSS_BAYER_MAXVAL; - to->threshold_2adjacent = - uDIGIT_FITTING(from->threshold, 16, SH_CSS_BAYER_BITS); - to->gain = - uDIGIT_FITTING(from->gain, 8, SH_CSS_DP_GAIN_SHIFT); - - to->coef_rr_gr = - uDIGIT_FITTING (gain * gr / r, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_rr_gb = - uDIGIT_FITTING (gain * gb / r, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_bb_gb = - uDIGIT_FITTING (gain * gb / b, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_bb_gr = - uDIGIT_FITTING (gain * gr / b, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_gr_rr = - uDIGIT_FITTING (gain * r / gr, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_gr_bb = - uDIGIT_FITTING (gain * b / gr, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_gb_bb = - uDIGIT_FITTING (gain * b / gb, 8, SH_CSS_DP_GAIN_SHIFT); - to->coef_gb_rr = - uDIGIT_FITTING (gain * r / gb, 8, SH_CSS_DP_GAIN_SHIFT); -} - -void -ia_css_dp_dump( - const struct sh_css_isp_dp_params *dp, - unsigned level) -{ - if (!dp) return; - ia_css_debug_dtrace(level, "Defect Pixel Correction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dp_threshold_single_w_2adj_on", - dp->threshold_single); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dp_threshold_2adj_w_2adj_on", - dp->threshold_2adjacent); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dp_gain", dp->gain); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_rr_gr", dp->coef_rr_gr); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_rr_gb", dp->coef_rr_gb); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_bb_gb", dp->coef_bb_gb); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_bb_gr", dp->coef_bb_gr); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_gr_rr", dp->coef_gr_rr); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_gr_bb", dp->coef_gr_bb); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_gb_bb", dp->coef_gb_bb); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "dpc_coef_gb_rr", dp->coef_gb_rr); -} - -void -ia_css_dp_debug_dtrace( - const struct ia_css_dp_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.threshold=%d, config.gain=%d\n", - config->threshold, config->gain); -} - -void -ia_css_init_dp_state( - void/*struct sh_css_isp_dp_vmem_state*/ *state, - size_t size) -{ - memset(state, 0, size); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h deleted file mode 100644 index db21814ad3db..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DP_HOST_H -#define __IA_CSS_DP_HOST_H - -#include "ia_css_dp_types.h" -#include "ia_css_dp_param.h" - -extern const struct ia_css_dp_config default_dp_config; -#ifdef ISP2401 -extern const struct ia_css_dp_config default_dp_10bpp_config; -#endif - -void -ia_css_dp_encode( - struct sh_css_isp_dp_params *to, - const struct ia_css_dp_config *from, - unsigned size); - -void -ia_css_dp_dump( - const struct sh_css_isp_dp_params *dp, - unsigned level); - -void -ia_css_dp_debug_dtrace( - const struct ia_css_dp_config *config, - unsigned level); - -void -ia_css_init_dp_state( - void/*struct sh_css_isp_dp_vmem_state*/ *state, - size_t size); - -#endif /* __IA_CSS_DP_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h deleted file mode 100644 index fc9035a98d92..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DP_PARAM_H -#define __IA_CSS_DP_PARAM_H - -#include "type_support.h" -#include "bnr/bnr_1.0/ia_css_bnr_param.h" - -/* DP (Defect Pixel Correction) */ -struct sh_css_isp_dp_params { - int32_t threshold_single; - int32_t threshold_2adjacent; - int32_t gain; - int32_t coef_rr_gr; - int32_t coef_rr_gb; - int32_t coef_bb_gb; - int32_t coef_bb_gr; - int32_t coef_gr_rr; - int32_t coef_gr_bb; - int32_t coef_gb_bb; - int32_t coef_gb_rr; -}; - -#endif /* __IA_CSS_DP_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h deleted file mode 100644 index 1bf6dcef7dc7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DP_TYPES_H -#define __IA_CSS_DP_TYPES_H - -/* @file -* CSS-API header file for Defect Pixel Correction (DPC) parameters. -*/ - - -/* Defect Pixel Correction configuration. - * - * ISP block: DPC1 (DPC after WB) - * DPC2 (DPC before WB) - * ISP1: DPC1 is used. - * ISP2: DPC2 is used. - */ -struct ia_css_dp_config { - ia_css_u0_16 threshold; /** The threshold of defect pixel correction, - representing the permissible difference of - intensity between one pixel and its - surrounding pixels. Smaller values result - in more frequent pixel corrections. - u0.16, [0,65535], - default 8192, ineffective 65535 */ - ia_css_u8_8 gain; /** The sensitivity of mis-correction. ISP will - miss a lot of defects if the value is set - too large. - u8.8, [0,65535], - default 4096, ineffective 65535 */ - uint32_t gr; /* unsigned .<16-integer_bits> */ - uint32_t r; /* unsigned .<16-integer_bits> */ - uint32_t b; /* unsigned .<16-integer_bits> */ - uint32_t gb; /* unsigned .<16-integer_bits> */ -}; - -#endif /* __IA_CSS_DP_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c deleted file mode 100644 index bc14b85cf952..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_dpc2.host.h" -#include "assert_support.h" - -void -ia_css_dpc2_encode( - struct ia_css_isp_dpc2_params *to, - const struct ia_css_dpc2_config *from, - size_t size) -{ - (void)size; - - assert ((from->metric1 >= 0) && (from->metric1 <= METRIC1_ONE_FP)); - assert ((from->metric3 >= 0) && (from->metric3 <= METRIC3_ONE_FP)); - assert ((from->metric2 >= METRIC2_ONE_FP) && - (from->metric2 < 256*METRIC2_ONE_FP)); - assert ((from->wb_gain_gr > 0) && (from->wb_gain_gr < 16*WBGAIN_ONE_FP)); - assert ((from->wb_gain_r > 0) && (from->wb_gain_r < 16*WBGAIN_ONE_FP)); - assert ((from->wb_gain_b > 0) && (from->wb_gain_b < 16*WBGAIN_ONE_FP)); - assert ((from->wb_gain_gb > 0) && (from->wb_gain_gb < 16*WBGAIN_ONE_FP)); - - to->metric1 = from->metric1; - to->metric2 = from->metric2; - to->metric3 = from->metric3; - - to->wb_gain_gr = from->wb_gain_gr; - to->wb_gain_r = from->wb_gain_r; - to->wb_gain_b = from->wb_gain_b; - to->wb_gain_gb = from->wb_gain_gb; -} - -/* TODO: AM: This needs a proper implementation. */ -void -ia_css_init_dpc2_state( - void *state, - size_t size) -{ - (void)state; - (void)size; -} - -#ifndef IA_CSS_NO_DEBUG -/* TODO: AM: This needs a proper implementation. */ -void -ia_css_dpc2_debug_dtrace( - const struct ia_css_dpc2_config *config, - unsigned level) -{ - (void)config; - (void)level; -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h deleted file mode 100644 index 38d10a5237c6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DPC2_HOST_H -#define __IA_CSS_DPC2_HOST_H - -#include "ia_css_dpc2_types.h" -#include "ia_css_dpc2_param.h" - -void -ia_css_dpc2_encode( - struct ia_css_isp_dpc2_params *to, - const struct ia_css_dpc2_config *from, - size_t size); - -void -ia_css_init_dpc2_state( - void *state, - size_t size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_dpc2_debug_dtrace( - const struct ia_css_dpc2_config *config, - unsigned level); -#endif - -#endif /* __IA_CSS_DPC2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h deleted file mode 100644 index ef668d54fe16..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DPC2_PARAM_H -#define __IA_CSS_DPC2_PARAM_H - -#include "type_support.h" -#include "vmem.h" /* for VMEM_ARRAY*/ - - -/* 4 planes : GR, R, B, GB */ -#define NUM_PLANES 4 - -/* ToDo: Move this to testsetup */ -#define MAX_FRAME_SIMDWIDTH 30 - -/* 3 lines state per color plane input_line_state */ -#define DPC2_STATE_INPUT_BUFFER_HEIGHT (3 * NUM_PLANES) -/* Each plane has width equal to half frame line */ -#define DPC2_STATE_INPUT_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 line state per color plane for local deviation state*/ -#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_HEIGHT (1 * NUM_PLANES) -/* Each plane has width equal to half frame line */ -#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* MINMAX state buffer stores 1 full input line (GR-R color line) */ -#define DPC2_STATE_SECOND_MINMAX_BUFFER_HEIGHT 1 -#define DPC2_STATE_SECOND_MINMAX_BUFFER_WIDTH MAX_FRAME_SIMDWIDTH - - -struct ia_css_isp_dpc2_params { - int32_t metric1; - int32_t metric2; - int32_t metric3; - int32_t wb_gain_gr; - int32_t wb_gain_r; - int32_t wb_gain_b; - int32_t wb_gain_gb; -}; - -#endif /* __IA_CSS_DPC2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h deleted file mode 100644 index 6727682d287f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DPC2_TYPES_H -#define __IA_CSS_DPC2_TYPES_H - -/* @file -* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters. -*/ - -#include "type_support.h" - -/**@{*/ -/* Floating point constants for different metrics. */ -#define METRIC1_ONE_FP (1<<12) -#define METRIC2_ONE_FP (1<<5) -#define METRIC3_ONE_FP (1<<12) -#define WBGAIN_ONE_FP (1<<9) -/**@}*/ - -/**@{*/ -/* Defect Pixel Correction 2 configuration. - * - * \brief DPC2 public parameters. - * \details Struct with all parameters for the Defect Pixel Correction 2 - * kernel that can be set from the CSS API. - * - * ISP block: DPC1 (DPC after WB) - * DPC2 (DPC before WB) - * ISP1: DPC1 is used. - * ISP2: DPC2 is used. - * - */ -struct ia_css_dpc2_config { - /**@{*/ - int32_t metric1; - int32_t metric2; - int32_t metric3; - int32_t wb_gain_gr; - int32_t wb_gain_r; - int32_t wb_gain_b; - int32_t wb_gain_gb; - /**@}*/ -}; -/**@}*/ - -#endif /* __IA_CSS_DPC2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c deleted file mode 100644 index 955adc4d6ab0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_frame_public.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" - -#include "ia_css_types.h" -#include "ia_css_host_data.h" -#include "sh_css_param_dvs.h" -#include "sh_css_params.h" -#include "ia_css_binary.h" -#include "ia_css_debug.h" -#include "memory_access.h" -#include "assert_support.h" - -#include "ia_css_dvs.host.h" - -static const struct ia_css_dvs_configuration default_config = { - .info = (struct ia_css_frame_info *)NULL, -}; - -void -ia_css_dvs_config( - struct sh_css_isp_dvs_isp_config *to, - const struct ia_css_dvs_configuration *from, - unsigned size) -{ - (void)size; - to->num_horizontal_blocks = - DVS_NUM_BLOCKS_X(from->info->res.width); - to->num_vertical_blocks = - DVS_NUM_BLOCKS_Y(from->info->res.height); -} - -void -ia_css_dvs_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - struct ia_css_dvs_configuration config = default_config; - - config.info = info; - - ia_css_configure_dvs(binary, &config); -} - -static void -convert_coords_to_ispparams( - struct ia_css_host_data *gdc_warp_table, - const struct ia_css_dvs_6axis_config *config, - unsigned int i_stride, - unsigned int o_width, - unsigned int o_height, - unsigned int uv_flag) -{ - unsigned int i, j; -#ifndef ISP2401 - /* Coverity CID 298073 - initialize */ -#endif - gdc_warp_param_mem_t s = { 0 }; - unsigned int x00, x01, x10, x11, - y00, y01, y10, y11; - - unsigned int xmin, ymin, xmax, ymax; - unsigned int topleft_x, topleft_y, bottom_x, bottom_y, - topleft_x_frac, topleft_y_frac; - unsigned int dvs_interp_envelope = (DVS_GDC_INTERP_METHOD == HRT_GDC_BLI_MODE ? - DVS_GDC_BLI_INTERP_ENVELOPE : DVS_GDC_BCI_INTERP_ENVELOPE); - - /* number of blocks per height and width */ - unsigned int num_blocks_y = (uv_flag ? DVS_NUM_BLOCKS_Y_CHROMA(o_height) : DVS_NUM_BLOCKS_Y(o_height) ); - unsigned int num_blocks_x = (uv_flag ? DVS_NUM_BLOCKS_X_CHROMA(o_width) : DVS_NUM_BLOCKS_X(o_width) ); // round num_x up to blockdim_x, if it concerns the Y0Y1 block (uv_flag==0) round up to even - - - unsigned int in_stride = i_stride * DVS_INPUT_BYTES_PER_PIXEL; - unsigned width, height; - unsigned int *xbuff = NULL; - unsigned int *ybuff = NULL; - struct gdc_warp_param_mem_s *ptr; - - assert(config != NULL); - assert(gdc_warp_table != NULL); - assert(gdc_warp_table->address != NULL); - - ptr = (struct gdc_warp_param_mem_s *)gdc_warp_table->address; - - ptr += (2 * uv_flag); /* format is Y0 Y1 UV, so UV starts at 3rd position */ - - if(uv_flag == 0) - { - xbuff = config->xcoords_y; - ybuff = config->ycoords_y; - width = config->width_y; - height = config->height_y; - } - else - { - xbuff = config->xcoords_uv; - ybuff = config->ycoords_uv; - width = config->width_uv; - height = config->height_uv; - } - - IA_CSS_LOG("blockdim_x %d blockdim_y %d", - DVS_BLOCKDIM_X, DVS_BLOCKDIM_Y_LUMA >> uv_flag); - IA_CSS_LOG("num_blocks_x %d num_blocks_y %d", num_blocks_x,num_blocks_y); - IA_CSS_LOG("width %d height %d", width, height); - - assert(width == num_blocks_x + 1); // the width and height of the provided morphing table should be 1 more than the number of blocks - assert(height == num_blocks_y + 1); - - for (j = 0; j < num_blocks_y; j++) { - for (i = 0; i < num_blocks_x; i++) { - - x00 = xbuff[j * width + i]; - x01 = xbuff[j * width + (i+1)]; - x10 = xbuff[(j+1) * width + i]; - x11 = xbuff[(j+1) * width + (i+1)]; - - y00 = ybuff[j * width + i]; - y01 = ybuff[j * width + (i+1)]; - y10 = ybuff[(j+1) * width + i]; - y11 = ybuff[(j+1) * width + (i+1)]; - - xmin = min(x00, x10); - xmax = max(x01, x11); - ymin = min(y00, y01); - ymax = max(y10, y11); - - /* Assert that right column's X is greater */ - assert ( x01 >= xmin); - assert ( x11 >= xmin); - /* Assert that bottom row's Y is greater */ - assert ( y10 >= ymin); - assert ( y11 >= ymin); - - topleft_y = ymin >> DVS_COORD_FRAC_BITS; - topleft_x = ((xmin >> DVS_COORD_FRAC_BITS) - >> XMEM_ALIGN_LOG2) - << (XMEM_ALIGN_LOG2); - s.in_addr_offset = topleft_y * in_stride + topleft_x; - - /* similar to topleft_y calculation, but round up if ymax - * has any fraction bits */ - bottom_y = CEIL_DIV(ymax, 1 << DVS_COORD_FRAC_BITS); - s.in_block_height = bottom_y - topleft_y + dvs_interp_envelope; - - bottom_x = CEIL_DIV(xmax, 1 << DVS_COORD_FRAC_BITS); - s.in_block_width = bottom_x - topleft_x + dvs_interp_envelope; - - topleft_x_frac = topleft_x << (DVS_COORD_FRAC_BITS); - topleft_y_frac = topleft_y << (DVS_COORD_FRAC_BITS); - - s.p0_x = x00 - topleft_x_frac; - s.p1_x = x01 - topleft_x_frac; - s.p2_x = x10 - topleft_x_frac; - s.p3_x = x11 - topleft_x_frac; - - s.p0_y = y00 - topleft_y_frac; - s.p1_y = y01 - topleft_y_frac; - s.p2_y = y10 - topleft_y_frac; - s.p3_y = y11 - topleft_y_frac; - - // block should fit within the boundingbox. - assert(s.p0_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); - assert(s.p1_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); - assert(s.p2_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); - assert(s.p3_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); - assert(s.p0_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); - assert(s.p1_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); - assert(s.p2_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); - assert(s.p3_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); - - // block size should be greater than zero. - assert(s.p0_x < s.p1_x); - assert(s.p2_x < s.p3_x); - assert(s.p0_y < s.p2_y); - assert(s.p1_y < s.p3_y); - -#if 0 - printf("j: %d\ti:%d\n", j, i); - printf("offset: %d\n", s.in_addr_offset); - printf("p0_x: %d\n", s.p0_x); - printf("p0_y: %d\n", s.p0_y); - printf("p1_x: %d\n", s.p1_x); - printf("p1_y: %d\n", s.p1_y); - printf("p2_x: %d\n", s.p2_x); - printf("p2_y: %d\n", s.p2_y); - printf("p3_x: %d\n", s.p3_x); - printf("p3_y: %d\n", s.p3_y); - - printf("p0_x_nofrac[0]: %d\n", s.p0_x>>DVS_COORD_FRAC_BITS); - printf("p0_y_nofrac[1]: %d\n", s.p0_y>>DVS_COORD_FRAC_BITS); - printf("p1_x_nofrac[2]: %d\n", s.p1_x>>DVS_COORD_FRAC_BITS); - printf("p1_y_nofrac[3]: %d\n", s.p1_y>>DVS_COORD_FRAC_BITS); - printf("p2_x_nofrac[0]: %d\n", s.p2_x>>DVS_COORD_FRAC_BITS); - printf("p2_y_nofrac[1]: %d\n", s.p2_y>>DVS_COORD_FRAC_BITS); - printf("p3_x_nofrac[2]: %d\n", s.p3_x>>DVS_COORD_FRAC_BITS); - printf("p3_y_nofrac[3]: %d\n", s.p3_y>>DVS_COORD_FRAC_BITS); - printf("\n"); -#endif - - *ptr = s; - - // storage format: - // Y0 Y1 UV0 Y2 Y3 UV1 - /* if uv_flag equals true increment with 2 incase x is odd, this to - skip the uv position. */ - if (uv_flag) - ptr += 3; - else - ptr += (1 + (i&1)); - } - } -} - -struct ia_css_host_data * -convert_allocate_dvs_6axis_config( - const struct ia_css_dvs_6axis_config *dvs_6axis_config, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *dvs_in_frame_info) -{ - unsigned int i_stride; - unsigned int o_width; - unsigned int o_height; - struct ia_css_host_data *me; - struct gdc_warp_param_mem_s *isp_data_ptr; - - assert(binary != NULL); - assert(dvs_6axis_config != NULL); - assert(dvs_in_frame_info != NULL); - - me = ia_css_host_data_allocate((size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3)); - - if (!me) - return NULL; - - /*DVS only supports input frame of YUV420 or NV12. Fail for all other cases*/ - assert((dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_NV12) - || (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420)); - - isp_data_ptr = (struct gdc_warp_param_mem_s *)me->address; - - i_stride = dvs_in_frame_info->padded_width; - - o_width = binary->out_frame_info[0].res.width; - o_height = binary->out_frame_info[0].res.height; - - /* Y plane */ - convert_coords_to_ispparams(me, dvs_6axis_config, - i_stride, o_width, o_height, 0); - - if (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420) { - /*YUV420 has half the stride for U/V plane*/ - i_stride /=2; - } - - /* UV plane (packed inside the y plane) */ - convert_coords_to_ispparams(me, dvs_6axis_config, - i_stride, o_width/2, o_height/2, 1); - - return me; -} - -enum ia_css_err -store_dvs_6axis_config( - const struct ia_css_dvs_6axis_config *dvs_6axis_config, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *dvs_in_frame_info, - hrt_vaddress ddr_addr_y) -{ - - struct ia_css_host_data *me; - assert(dvs_6axis_config != NULL); - assert(ddr_addr_y != mmgr_NULL); - assert(dvs_in_frame_info != NULL); - - me = convert_allocate_dvs_6axis_config(dvs_6axis_config, - binary, - dvs_in_frame_info); - - if (!me) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - ia_css_params_store_ia_css_host_data( - ddr_addr_y, - me); - ia_css_host_data_free(me); - - return IA_CSS_SUCCESS; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h deleted file mode 100644 index 2f513e29d88c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DVS_HOST_H -#define __IA_CSS_DVS_HOST_H - -#include "ia_css_frame_public.h" -#include "ia_css_binary.h" -#include "sh_css_params.h" - -#include "ia_css_types.h" -#include "ia_css_dvs_types.h" -#include "ia_css_dvs_param.h" - -/* For bilinear interpolation, we need to add +1 to input block height calculation. - * For bicubic interpolation, we will need to add +3 instaed */ -#define DVS_GDC_BLI_INTERP_ENVELOPE 1 -#define DVS_GDC_BCI_INTERP_ENVELOPE 3 - -void -ia_css_dvs_config( - struct sh_css_isp_dvs_isp_config *to, - const struct ia_css_dvs_configuration *from, - unsigned size); - -void -ia_css_dvs_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -void -convert_dvs_6axis_config( - struct ia_css_isp_parameters *params, - const struct ia_css_binary *binary); - -struct ia_css_host_data * -convert_allocate_dvs_6axis_config( - const struct ia_css_dvs_6axis_config *dvs_6axis_config, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *dvs_in_frame_info); - -enum ia_css_err -store_dvs_6axis_config( - const struct ia_css_dvs_6axis_config *dvs_6axis_config, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *dvs_in_frame_info, - hrt_vaddress ddr_addr_y); - -#endif /* __IA_CSS_DVS_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h deleted file mode 100644 index 66a7e58659c0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DVS_PARAM_H -#define __IA_CSS_DVS_PARAM_H - -#include -#ifdef ISP2401 - -#if !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) && !defined(PARAMBIN_GENERATION) -#endif -#include "dma.h" -#ifdef ISP2401 -#endif /* !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) */ - -#endif -#include "uds/uds_1.0/ia_css_uds_param.h" - -#ifdef ISP2401 - -#endif -/* dvserence frame */ -struct sh_css_isp_dvs_isp_config { - uint32_t num_horizontal_blocks; - uint32_t num_vertical_blocks; -}; - -#endif /* __IA_CSS_DVS_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h deleted file mode 100644 index 30772d217fb2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_DVS_TYPES_H -#define __IA_CSS_DVS_TYPES_H - -/* DVS frame - * - * ISP block: dvs frame - */ - -#include "ia_css_frame_public.h" - -struct ia_css_dvs_configuration { - const struct ia_css_frame_info *info; -}; - -#endif /* __IA_CSS_DVS_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c deleted file mode 100644 index 8f2178bf9e68..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif - -#include "type_support.h" -#include "assert_support.h" -#include "math_support.h" /* for min and max */ - -#include "ia_css_eed1_8.host.h" - -/* WARNING1: Number of inv points should be less or equal to 16, - * due to implementation limitation. See kernel design document - * for more details. - * WARNING2: Do not modify the number of inv points without correcting - * the EED1_8 kernel implementation assumptions. - */ -#define NUMBER_OF_CHGRINV_POINTS 15 -#define NUMBER_OF_TCINV_POINTS 9 -#define NUMBER_OF_FCINV_POINTS 9 - -static const int16_t chgrinv_x[NUMBER_OF_CHGRINV_POINTS] = { -0, 16, 64, 144, 272, 448, 672, 976, -1376, 1888, 2528, 3312, 4256, 5376, 6688}; - -static const int16_t chgrinv_a[NUMBER_OF_CHGRINV_POINTS] = { --7171, -256, -29, -3456, -1071, -475, -189, -102, --48, -38, -10, -9, -7, -6, 0}; - -static const int16_t chgrinv_b[NUMBER_OF_CHGRINV_POINTS] = { -8191, 1021, 256, 114, 60, 37, 24, 17, -12, 9, 6, 5, 4, 3, 2}; - -static const int16_t chgrinv_c[NUMBER_OF_CHGRINV_POINTS] = { -1, 1, 1, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0}; - -static const int16_t tcinv_x[NUMBER_OF_TCINV_POINTS] = { -0, 4, 11, 23, 42, 68, 102, 148, 205}; - -static const int16_t tcinv_a[NUMBER_OF_TCINV_POINTS] = { --6364, -631, -126, -34, -13, -6, -4452, -2156, 0}; - -static const int16_t tcinv_b[NUMBER_OF_TCINV_POINTS] = { -8191, 1828, 726, 352, 197, 121, 80, 55, 40}; - -static const int16_t tcinv_c[NUMBER_OF_TCINV_POINTS] = { -1, 1, 1, 1, 1, 1, 0, 0, 0}; - -static const int16_t fcinv_x[NUMBER_OF_FCINV_POINTS] = { -0, 80, 216, 456, 824, 1344, 2040, 2952, 4096}; - -static const int16_t fcinv_a[NUMBER_OF_FCINV_POINTS] = { --5244, -486, -86, -2849, -961, -400, -180, -86, 0}; - -static const int16_t fcinv_b[NUMBER_OF_FCINV_POINTS] = { -8191, 1637, 607, 287, 159, 98, 64, 44, 32}; - -static const int16_t fcinv_c[NUMBER_OF_FCINV_POINTS] = { -1, 1, 1, 0, 0, 0, 0, 0, 0}; - - -void -ia_css_eed1_8_vmem_encode( - struct eed1_8_vmem_params *to, - const struct ia_css_eed1_8_config *from, - size_t size) -{ - unsigned i, j, base; - const unsigned total_blocks = 4; - const unsigned shuffle_block = 16; - - (void)size; - - /* Init */ - for (i = 0; i < ISP_VEC_NELEMS; i++) { - to->e_dew_enh_x[0][i] = 0; - to->e_dew_enh_y[0][i] = 0; - to->e_dew_enh_a[0][i] = 0; - to->e_dew_enh_f[0][i] = 0; - to->chgrinv_x[0][i] = 0; - to->chgrinv_a[0][i] = 0; - to->chgrinv_b[0][i] = 0; - to->chgrinv_c[0][i] = 0; - to->tcinv_x[0][i] = 0; - to->tcinv_a[0][i] = 0; - to->tcinv_b[0][i] = 0; - to->tcinv_c[0][i] = 0; - to->fcinv_x[0][i] = 0; - to->fcinv_a[0][i] = 0; - to->fcinv_b[0][i] = 0; - to->fcinv_c[0][i] = 0; - } - - /* Constraints on dew_enhance_seg_x and dew_enhance_seg_y: - * - values should be greater or equal to 0. - * - values should be ascending. - * - value of index zero is equal to 0. - */ - - /* Checking constraints: */ - /* TODO: investigate if an assert is the right way to report that - * the constraints are violated. - */ - for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { - assert(from->dew_enhance_seg_x[j] > -1); - assert(from->dew_enhance_seg_y[j] > -1); - } - - for (j = 1; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { - assert(from->dew_enhance_seg_x[j] > from->dew_enhance_seg_x[j-1]); - assert(from->dew_enhance_seg_y[j] > from->dew_enhance_seg_y[j-1]); - } - - assert(from->dew_enhance_seg_x[0] == 0); - assert(from->dew_enhance_seg_y[0] == 0); - - /* Constraints on chgrinv_x, tcinv_x and fcinv_x: - * - values should be greater or equal to 0. - * - values should be ascending. - * - value of index zero is equal to 0. - */ - assert(chgrinv_x[0] == 0); - assert(tcinv_x[0] == 0); - assert(fcinv_x[0] == 0); - - for (j = 1; j < NUMBER_OF_CHGRINV_POINTS; j++) { - assert(chgrinv_x[j] > chgrinv_x[j-1]); - } - - for (j = 1; j < NUMBER_OF_TCINV_POINTS; j++) { - assert(tcinv_x[j] > tcinv_x[j-1]); - } - - for (j = 1; j < NUMBER_OF_FCINV_POINTS; j++) { - assert(fcinv_x[j] > fcinv_x[j-1]); - } - - /* The implementation of the calulating 1/x is based on the availability - * of the OP_vec_shuffle16 operation. - * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to - * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or - * initialised as described in the KFS. The remaining elements of a vector are set to 0. - */ - /* TODO: guard this code with above assumptions */ - for(i = 0; i < total_blocks; i++) { - base = shuffle_block * i; - - for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { - to->e_dew_enh_x[0][base + j] = min_t(int, max_t(int, - from->dew_enhance_seg_x[j], 0), - 8191); - to->e_dew_enh_y[0][base + j] = min_t(int, max_t(int, - from->dew_enhance_seg_y[j], -8192), - 8191); - } - - for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) { - to->e_dew_enh_a[0][base + j] = min_t(int, max_t(int, - from->dew_enhance_seg_slope[j], - -8192), 8191); - /* Convert dew_enhance_seg_exp to flag: - * 0 -> 0 - * 1...13 -> 1 - */ - to->e_dew_enh_f[0][base + j] = (min_t(int, max_t(int, - from->dew_enhance_seg_exp[j], - 0), 13) > 0); - } - - /* Hard-coded to 0, in order to be able to handle out of - * range input in the same way as the other segments. - * See KFS for more details. - */ - to->e_dew_enh_a[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0; - to->e_dew_enh_f[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0; - - for (j = 0; j < NUMBER_OF_CHGRINV_POINTS; j++) { - to->chgrinv_x[0][base + j] = chgrinv_x[j]; - to->chgrinv_a[0][base + j] = chgrinv_a[j]; - to->chgrinv_b[0][base + j] = chgrinv_b[j]; - to->chgrinv_c[0][base + j] = chgrinv_c[j]; - } - - for (j = 0; j < NUMBER_OF_TCINV_POINTS; j++) { - to->tcinv_x[0][base + j] = tcinv_x[j]; - to->tcinv_a[0][base + j] = tcinv_a[j]; - to->tcinv_b[0][base + j] = tcinv_b[j]; - to->tcinv_c[0][base + j] = tcinv_c[j]; - } - - for (j = 0; j < NUMBER_OF_FCINV_POINTS; j++) { - to->fcinv_x[0][base + j] = fcinv_x[j]; - to->fcinv_a[0][base + j] = fcinv_a[j]; - to->fcinv_b[0][base + j] = fcinv_b[j]; - to->fcinv_c[0][base + j] = fcinv_c[j]; - } - } -} - - -void -ia_css_eed1_8_encode( - struct eed1_8_dmem_params *to, - const struct ia_css_eed1_8_config *from, - size_t size) -{ - int i; - int min_exp = 0; - - (void)size; - - to->rbzp_strength = from->rbzp_strength; - - to->fcstrength = from->fcstrength; - to->fcthres_0 = from->fcthres_0; - to->fc_sat_coef = from->fc_sat_coef; - to->fc_coring_prm = from->fc_coring_prm; - to->fc_slope = from->fcthres_1 - from->fcthres_0; - - to->aerel_thres0 = from->aerel_thres0; - to->aerel_gain0 = from->aerel_gain0; - to->aerel_thres_diff = from->aerel_thres1 - from->aerel_thres0; - to->aerel_gain_diff = from->aerel_gain1 - from->aerel_gain0; - - to->derel_thres0 = from->derel_thres0; - to->derel_gain0 = from->derel_gain0; - to->derel_thres_diff = (from->derel_thres1 - from->derel_thres0); - to->derel_gain_diff = (from->derel_gain1 - from->derel_gain0); - - to->coring_pos0 = from->coring_pos0; - to->coring_pos_diff = (from->coring_pos1 - from->coring_pos0); - to->coring_neg0 = from->coring_neg0; - to->coring_neg_diff = (from->coring_neg1 - from->coring_neg0); - - /* Note: (ISP_VEC_ELEMBITS -1) - * TODO: currently the testbench does not support to use - * ISP_VEC_ELEMBITS. Investigate how to fix this - */ - to->gain_exp = (13 - from->gain_exp); - to->gain_pos0 = from->gain_pos0; - to->gain_pos_diff = (from->gain_pos1 - from->gain_pos0); - to->gain_neg0 = from->gain_neg0; - to->gain_neg_diff = (from->gain_neg1 - from->gain_neg0); - - to->margin_pos0 = from->pos_margin0; - to->margin_pos_diff = (from->pos_margin1 - from->pos_margin0); - to->margin_neg0 = from->neg_margin0; - to->margin_neg_diff = (from->neg_margin1 - from->neg_margin0); - - /* Encode DEWEnhance exp (e_dew_enh_asr) */ - for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) { - min_exp = max(min_exp, from->dew_enhance_seg_exp[i]); - } - to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13); - - to->dedgew_max = from->dedgew_max; -} - - -void -ia_css_init_eed1_8_state( - void *state, - size_t size) -{ - memset(state, 0, size); -} - - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_eed1_8_debug_dtrace( - const struct ia_css_eed1_8_config *eed, - unsigned level) -{ - if (!eed) - return; - - ia_css_debug_dtrace(level, "Edge Enhancing Demosaic 1.8:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rbzp_strength", eed->rbzp_strength); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcstrength", eed->fcstrength); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_0", eed->fcthres_0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_1", eed->fcthres_1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_sat_coef", eed->fc_sat_coef); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_coring_prm", eed->fc_coring_prm); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres0", eed->aerel_thres0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain0", eed->aerel_gain0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres1", eed->aerel_thres1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain1", eed->aerel_gain1); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres0", eed->derel_thres0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain0", eed->derel_gain0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres1", eed->derel_thres1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain1", eed->derel_gain1); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos0", eed->coring_pos0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos1", eed->coring_pos1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg0", eed->coring_neg0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg1", eed->coring_neg1); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_exp", eed->gain_exp); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos0", eed->gain_pos0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos1", eed->gain_pos1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg0", eed->gain_neg0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg1", eed->gain_neg1); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin0", eed->pos_margin0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin1", eed->pos_margin1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin0", eed->neg_margin0); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin1", eed->neg_margin1); - - ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dedgew_max", eed->dedgew_max); -} -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h deleted file mode 100644 index fff932c1364e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_EED1_8_HOST_H -#define __IA_CSS_EED1_8_HOST_H - -#include "ia_css_eed1_8_types.h" -#include "ia_css_eed1_8_param.h" - -void -ia_css_eed1_8_vmem_encode( - struct eed1_8_vmem_params *to, - const struct ia_css_eed1_8_config *from, - size_t size); - -void -ia_css_eed1_8_encode( - struct eed1_8_dmem_params *to, - const struct ia_css_eed1_8_config *from, - size_t size); - -void -ia_css_init_eed1_8_state( - void *state, - size_t size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_eed1_8_debug_dtrace( - const struct ia_css_eed1_8_config *config, - unsigned level); -#endif - -#endif /* __IA_CSS_EED1_8_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h deleted file mode 100644 index bc3a07fd07eb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_EED1_8_PARAM_H -#define __IA_CSS_EED1_8_PARAM_H - -#include "type_support.h" -#include "vmem.h" /* needed for VMEM_ARRAY */ - -#include "ia_css_eed1_8_types.h" /* IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS */ - - -/* Configuration parameters: */ - -/* Enable median for false color correction - * 0: Do not use median - * 1: Use median - * Default: 1 - */ -#define EED1_8_FC_ENABLE_MEDIAN 1 - -/* Coring Threshold minima - * Used in Tint color suppression. - * Default: 1 - */ -#define EED1_8_CORINGTHMIN 1 - -/* Define size of the state..... TODO: check if this is the correct place */ -/* 4 planes : GR, R, B, GB */ -#define NUM_PLANES 4 - -/* 5 lines state per color plane input_line_state */ -#define EED1_8_STATE_INPUT_BUFFER_HEIGHT (5 * NUM_PLANES) - -/* Each plane has width equal to half frame line */ -#define EED1_8_STATE_INPUT_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 line state per color plane LD_H state */ -#define EED1_8_STATE_LD_H_HEIGHT (1 * NUM_PLANES) -#define EED1_8_STATE_LD_H_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 line state per color plane LD_V state */ -#define EED1_8_STATE_LD_V_HEIGHT (1 * NUM_PLANES) -#define EED1_8_STATE_LD_V_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 line (single plane) state for D_Hr state */ -#define EED1_8_STATE_D_HR_HEIGHT 1 -#define EED1_8_STATE_D_HR_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 line (single plane) state for D_Hb state */ -#define EED1_8_STATE_D_HB_HEIGHT 1 -#define EED1_8_STATE_D_HB_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 2 lines (single plane) state for D_Vr state */ -#define EED1_8_STATE_D_VR_HEIGHT 2 -#define EED1_8_STATE_D_VR_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 2 line (single plane) state for D_Vb state */ -#define EED1_8_STATE_D_VB_HEIGHT 2 -#define EED1_8_STATE_D_VB_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 2 lines state for R and B (= 2 planes) rb_zipped_state */ -#define EED1_8_STATE_RB_ZIPPED_HEIGHT (2 * 2) -#define EED1_8_STATE_RB_ZIPPED_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -#if EED1_8_FC_ENABLE_MEDIAN -/* 1 full input line (GR-R color line) for Yc state */ -#define EED1_8_STATE_YC_HEIGHT 1 -#define EED1_8_STATE_YC_WIDTH MAX_FRAME_SIMDWIDTH - -/* 1 line state per color plane Cg_state */ -#define EED1_8_STATE_CG_HEIGHT (1 * NUM_PLANES) -#define EED1_8_STATE_CG_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 line state per color plane Co_state */ -#define EED1_8_STATE_CO_HEIGHT (1 * NUM_PLANES) -#define EED1_8_STATE_CO_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2) - -/* 1 full input line (GR-R color line) for AbsK state */ -#define EED1_8_STATE_ABSK_HEIGHT 1 -#define EED1_8_STATE_ABSK_WIDTH MAX_FRAME_SIMDWIDTH -#endif - -struct eed1_8_vmem_params { - VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS); - VMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS); - VMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS); - VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS); - VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS); - VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS); - VMEM_ARRAY(chgrinv_b, ISP_VEC_NELEMS); - VMEM_ARRAY(chgrinv_c, ISP_VEC_NELEMS); - VMEM_ARRAY(fcinv_x, ISP_VEC_NELEMS); - VMEM_ARRAY(fcinv_a, ISP_VEC_NELEMS); - VMEM_ARRAY(fcinv_b, ISP_VEC_NELEMS); - VMEM_ARRAY(fcinv_c, ISP_VEC_NELEMS); - VMEM_ARRAY(tcinv_x, ISP_VEC_NELEMS); - VMEM_ARRAY(tcinv_a, ISP_VEC_NELEMS); - VMEM_ARRAY(tcinv_b, ISP_VEC_NELEMS); - VMEM_ARRAY(tcinv_c, ISP_VEC_NELEMS); -}; - -/* EED (Edge Enhancing Demosaic) ISP parameters */ -struct eed1_8_dmem_params { - int32_t rbzp_strength; - - int32_t fcstrength; - int32_t fcthres_0; - int32_t fc_sat_coef; - int32_t fc_coring_prm; - int32_t fc_slope; - - int32_t aerel_thres0; - int32_t aerel_gain0; - int32_t aerel_thres_diff; - int32_t aerel_gain_diff; - - int32_t derel_thres0; - int32_t derel_gain0; - int32_t derel_thres_diff; - int32_t derel_gain_diff; - - int32_t coring_pos0; - int32_t coring_pos_diff; - int32_t coring_neg0; - int32_t coring_neg_diff; - - int32_t gain_exp; - int32_t gain_pos0; - int32_t gain_pos_diff; - int32_t gain_neg0; - int32_t gain_neg_diff; - - int32_t margin_pos0; - int32_t margin_pos_diff; - int32_t margin_neg0; - int32_t margin_neg_diff; - - int32_t e_dew_enh_asr; - int32_t dedgew_max; -}; - -#endif /* __IA_CSS_EED1_8_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h deleted file mode 100644 index 32e91824a5e5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_EED1_8_TYPES_H -#define __IA_CSS_EED1_8_TYPES_H - -/* @file -* CSS-API header file for Edge Enhanced Demosaic parameters. -*/ - - -#include "type_support.h" - -/** - * \brief EED1_8 public parameters. - * \details Struct with all parameters for the EED1.8 kernel that can be set - * from the CSS API. - */ - -/* parameter list is based on ISP261 CSS API public parameter list_all.xlsx from 28-01-2015 */ - -/* Number of segments + 1 segment used in edge reliability enhancement - * Ineffective: N/A - * Default: 9 - */ -#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9 - -/* Edge Enhanced Demosaic configuration - * - * ISP2.6.1: EED1_8 is used. - */ - -struct ia_css_eed1_8_config { - int32_t rbzp_strength; /** Strength of zipper reduction. */ - - int32_t fcstrength; /** Strength of false color reduction. */ - int32_t fcthres_0; /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */ - int32_t fcthres_1; /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */ - int32_t fc_sat_coef; /** How much color saturation to maintain in high color saturation region. */ - int32_t fc_coring_prm; /** Chroma coring coefficient for tint color suppression. */ - - int32_t aerel_thres0; /** Threshold for Non-Directional Reliability at dark region. */ - int32_t aerel_gain0; /** Gain for Non-Directional Reliability at dark region. */ - int32_t aerel_thres1; /** Threshold for Non-Directional Reliability at bright region. */ - int32_t aerel_gain1; /** Gain for Non-Directional Reliability at bright region. */ - - int32_t derel_thres0; /** Threshold for Directional Reliability at dark region. */ - int32_t derel_gain0; /** Gain for Directional Reliability at dark region. */ - int32_t derel_thres1; /** Threshold for Directional Reliability at bright region. */ - int32_t derel_gain1; /** Gain for Directional Reliability at bright region. */ - - int32_t coring_pos0; /** Positive Edge Coring Threshold in dark region. */ - int32_t coring_pos1; /** Positive Edge Coring Threshold in bright region. */ - int32_t coring_neg0; /** Negative Edge Coring Threshold in dark region. */ - int32_t coring_neg1; /** Negative Edge Coring Threshold in bright region. */ - - int32_t gain_exp; /** Common Exponent of Gain. */ - int32_t gain_pos0; /** Gain for Positive Edge in dark region. */ - int32_t gain_pos1; /** Gain for Positive Edge in bright region. */ - int32_t gain_neg0; /** Gain for Negative Edge in dark region. */ - int32_t gain_neg1; /** Gain for Negative Edge in bright region. */ - - int32_t pos_margin0; /** Margin for Positive Edge in dark region. */ - int32_t pos_margin1; /** Margin for Positive Edge in bright region. */ - int32_t neg_margin0; /** Margin for Negative Edge in dark region. */ - int32_t neg_margin1; /** Margin for Negative Edge in bright region. */ - - int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: X. */ - int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: Y. */ - int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /** Segment data for directional edge weight: Slope. */ - int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /** Segment data for directional edge weight: Exponent. */ - int32_t dedgew_max; /** Max Weight for Directional Edge. */ -}; - -#endif /* __IA_CSS_EED1_8_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c deleted file mode 100644 index 94631eee8614..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_formats.host.h" -#include "ia_css_types.h" -#include "sh_css_defs.h" - -/*#include "sh_css_frac.h"*/ -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -#include "ia_css_debug.h" -#endif - -const struct ia_css_formats_config default_formats_config = { - 1 -}; - -void -ia_css_formats_encode( - struct sh_css_isp_formats_params *to, - const struct ia_css_formats_config *from, - unsigned size) -{ - (void)size; - to->video_full_range_flag = from->video_full_range_flag; -} -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -void -ia_css_formats_dump( - const struct sh_css_isp_formats_params *formats, - unsigned level) -{ - if (!formats) return; - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "video_full_range_flag", formats->video_full_range_flag); -} -#endif - -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -void -ia_css_formats_debug_dtrace( - const struct ia_css_formats_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.video_full_range_flag=%d\n", - config->video_full_range_flag); -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h deleted file mode 100644 index 8a90cd83b248..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FORMATS_HOST_H -#define __IA_CSS_FORMATS_HOST_H - -#include "ia_css_formats_types.h" -#include "ia_css_formats_param.h" - -extern const struct ia_css_formats_config default_formats_config; - -void -ia_css_formats_encode( - struct sh_css_isp_formats_params *to, - const struct ia_css_formats_config *from, - unsigned size); -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -void -ia_css_formats_dump( - const struct sh_css_isp_formats_params *formats, - unsigned level); -#endif - -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -void -ia_css_formats_debug_dtrace( - const struct ia_css_formats_config *formats, - unsigned level); -#endif /*IA_CSS_NO_DEBUG*/ - -#endif /* __IA_CSS_FORMATS_HOST_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h deleted file mode 100644 index 2eb6030b6081..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FORMATS_PARAM_H -#define __IA_CSS_FORMATS_PARAM_H - -#include "type_support.h" - -/* FORMATS (Format conversion) */ -struct sh_css_isp_formats_params { - int32_t video_full_range_flag; -}; - -#endif /* __IA_CSS_FORMATS_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h deleted file mode 100644 index 49479572b40d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FORMATS_TYPES_H -#define __IA_CSS_FORMATS_TYPES_H - -/* @file -* CSS-API header file for output format parameters. -*/ - -#include "type_support.h" - -/* Formats configuration. - * - * ISP block: FORMATS - * ISP1: FORMATS is used. - * ISP2: FORMATS is used. - */ -struct ia_css_formats_config { - uint32_t video_full_range_flag; /** selects the range of YUV output. - u8.0, [0,1], - default 1, ineffective n/a\n - 1 - full range, luma 0-255, chroma 0-255\n - 0 - reduced range, luma 16-235, chroma 16-240 */ -}; - -#endif /* __IA_CSS_FORMATS_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h deleted file mode 100644 index cc8dd1a7007f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FIXEDBDS_PARAM_H -#define __IA_CSS_FIXEDBDS_PARAM_H - -#include "type_support.h" - -#ifdef ISP2401 -#define BDS_UNIT 8 -#define FRAC_LOG 3 -#define FRAC_ACC (1< -#include -#include -#include -#include -#include -#include - -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" - -#include "ia_css_fpn.host.h" - -void -ia_css_fpn_encode( - struct sh_css_isp_fpn_params *to, - const struct ia_css_fpn_table *from, - unsigned size) -{ - (void)size; - to->shift = from->shift; - to->enabled = from->data != NULL; -} - -void -ia_css_fpn_dump( - const struct sh_css_isp_fpn_params *fpn, - unsigned level) -{ - if (!fpn) return; - ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "fpn_shift", fpn->shift); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "fpn_enabled", fpn->enabled); -} - -void -ia_css_fpn_config( - struct sh_css_isp_fpn_isp_config *to, - const struct ia_css_fpn_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - - (void)size; - ia_css_dma_configure_from_info(&to->port_b, from->info); - to->width_a_over_b = elems_a / to->port_b.elems; - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->port_b.elems == 0); -} - -void -ia_css_fpn_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; - const struct ia_css_fpn_configuration config = { - &my_info - }; - - my_info.res.width = CEIL_DIV(info->res.width, 2); /* Packed by 2x */ - my_info.res.height = info->res.height; - my_info.padded_width = CEIL_DIV(info->padded_width, 2); /* Packed by 2x */ - my_info.format = info->format; - my_info.raw_bit_depth = FPN_BITS_PER_PIXEL; - my_info.raw_bayer_order = info->raw_bayer_order; - my_info.crop_info = info->crop_info; - - ia_css_configure_fpn(binary, &config); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h deleted file mode 100644 index bb905c8db8c8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FPN_HOST_H -#define __IA_CSS_FPN_HOST_H - -#include "ia_css_binary.h" -#include "ia_css_fpn_types.h" -#include "ia_css_fpn_param.h" - -void -ia_css_fpn_encode( - struct sh_css_isp_fpn_params *to, - const struct ia_css_fpn_table *from, - unsigned size); - -void -ia_css_fpn_dump( - const struct sh_css_isp_fpn_params *fpn, - unsigned level); - -void -ia_css_fpn_config( - struct sh_css_isp_fpn_isp_config *to, - const struct ia_css_fpn_configuration *from, - unsigned size); - -void -ia_css_fpn_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -#endif /* __IA_CSS_FPN_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h deleted file mode 100644 index 68765c3f3bf7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FPN_PARAM_H -#define __IA_CSS_FPN_PARAM_H - -#include "type_support.h" - -#include "dma.h" - -#define FPN_BITS_PER_PIXEL 16 - -/* FPNR (Fixed Pattern Noise Reduction) */ -struct sh_css_isp_fpn_params { - int32_t shift; - int32_t enabled; -}; - -struct sh_css_isp_fpn_isp_config { - uint32_t width_a_over_b; - struct dma_port_config port_b; -}; - -#endif /* __IA_CSS_FPN_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h deleted file mode 100644 index ef287fa3c428..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_FPN_TYPES_H -#define __IA_CSS_FPN_TYPES_H - -/* @file -* CSS-API header file for Fixed Pattern Noise parameters. -*/ - -/* Fixed Pattern Noise table. - * - * This contains the fixed patterns noise values - * obtained from a black frame capture. - * - * "shift" should be set as the smallest value - * which satisfies the requirement the maximum data is less than 64. - * - * ISP block: FPN1 - * ISP1: FPN1 is used. - * ISP2: FPN1 is used. - */ - -struct ia_css_fpn_table { - int16_t *data; /** Table content (fixed patterns noise). - u0.[13-shift], [0,63] */ - uint32_t width; /** Table width (in pixels). - This is the input frame width. */ - uint32_t height; /** Table height (in pixels). - This is the input frame height. */ - uint32_t shift; /** Common exponent of table content. - u8.0, [0,13] */ - uint32_t enabled; /** Fpn is enabled. - bool */ -}; - -struct ia_css_fpn_configuration { - const struct ia_css_frame_info *info; -}; - -#endif /* __IA_CSS_FPN_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c deleted file mode 100644 index 0cfb5c94447f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -#include "ia_css_debug.h" -#endif -#include "sh_css_frac.h" -#include "vamem.h" - -#include "ia_css_gc.host.h" - -const struct ia_css_gc_config default_gc_config = { - 0, - 0 -}; - -const struct ia_css_ce_config default_ce_config = { - 0, - 255 -}; - -void -ia_css_gc_encode( - struct sh_css_isp_gc_params *to, - const struct ia_css_gc_config *from, - unsigned size) -{ - (void)size; - to->gain_k1 = - uDIGIT_FITTING((int)from->gain_k1, 16, - IA_CSS_GAMMA_GAIN_K_SHIFT); - to->gain_k2 = - uDIGIT_FITTING((int)from->gain_k2, 16, - IA_CSS_GAMMA_GAIN_K_SHIFT); -} - -void -ia_css_ce_encode( - struct sh_css_isp_ce_params *to, - const struct ia_css_ce_config *from, - unsigned size) -{ - (void)size; - to->uv_level_min = from->uv_level_min; - to->uv_level_max = from->uv_level_max; -} - -void -ia_css_gc_vamem_encode( - struct sh_css_isp_gc_vamem_params *to, - const struct ia_css_gamma_table *from, - unsigned size) -{ - (void)size; - memcpy (&to->gc, &from->data, sizeof(to->gc)); -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_gc_dump( - const struct sh_css_isp_gc_params *gc, - unsigned level) -{ - if (!gc) return; - ia_css_debug_dtrace(level, "Gamma Correction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "gamma_gain_k1", gc->gain_k1); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "gamma_gain_k2", gc->gain_k2); -} - -void -ia_css_ce_dump( - const struct sh_css_isp_ce_params *ce, - unsigned level) -{ - ia_css_debug_dtrace(level, "Chroma Enhancement:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ce_uv_level_min", ce->uv_level_min); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ce_uv_level_max", ce->uv_level_max); -} - -void -ia_css_gc_debug_dtrace( - const struct ia_css_gc_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.gain_k1=%d, config.gain_k2=%d\n", - config->gain_k1, config->gain_k2); -} - -void -ia_css_ce_debug_dtrace( - const struct ia_css_ce_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.uv_level_min=%d, config.uv_level_max=%d\n", - config->uv_level_min, config->uv_level_max); -} -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h deleted file mode 100644 index 06f08840563e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC_HOST_H -#define __IA_CSS_GC_HOST_H - -#include "ia_css_gc_param.h" -#include "ia_css_gc_table.host.h" - -extern const struct ia_css_gc_config default_gc_config; -extern const struct ia_css_ce_config default_ce_config; - -void -ia_css_gc_encode( - struct sh_css_isp_gc_params *to, - const struct ia_css_gc_config *from, - unsigned size); - -void -ia_css_gc_vamem_encode( - struct sh_css_isp_gc_vamem_params *to, - const struct ia_css_gamma_table *from, - unsigned size); - -void -ia_css_ce_encode( - struct sh_css_isp_ce_params *to, - const struct ia_css_ce_config *from, - unsigned size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_gc_dump( - const struct sh_css_isp_gc_params *gc, - unsigned level); - -void -ia_css_ce_dump( - const struct sh_css_isp_ce_params *ce, - unsigned level); - -void -ia_css_gc_debug_dtrace( - const struct ia_css_gc_config *config, - unsigned level); - -void -ia_css_ce_debug_dtrace( - const struct ia_css_ce_config *config, - unsigned level); - -#endif - -#endif /* __IA_CSS_GC_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h deleted file mode 100644 index 52972b1a07ff..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC_PARAM_H -#define __IA_CSS_GC_PARAM_H - -#include "type_support.h" -#ifndef PIPE_GENERATION -#ifdef __ISP -#define __INLINE_VAMEM__ -#endif -#include "vamem.h" -#include "ia_css_gc_types.h" - -#if defined(IS_VAMEM_VERSION_1) -#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 -#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE -#elif defined(IS_VAMEM_VERSION_2) -#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 -#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE -#else -#error "Undefined vamem version" -#endif - -#else -/* For pipe generation, the size is not relevant */ -#define SH_CSS_ISP_GC_TABLE_SIZE 0 -#endif - -#define GAMMA_OUTPUT_BITS 8 -#define GAMMA_OUTPUT_MAX_VAL ((1< -#include /* memcpy */ -#include "system_global.h" -#include "vamem.h" -#include "ia_css_types.h" -#include "ia_css_gc_table.host.h" - -#if defined(HAS_VAMEM_VERSION_2) - -struct ia_css_gamma_table default_gamma_table; - -static const uint16_t -default_gamma_table_data[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE] = { - 0, 4, 8, 12, 17, 21, 27, 32, - 38, 44, 49, 55, 61, 66, 71, 76, - 80, 84, 88, 92, 95, 98, 102, 105, -108, 110, 113, 116, 118, 121, 123, 126, -128, 130, 132, 135, 137, 139, 141, 143, -145, 146, 148, 150, 152, 153, 155, 156, -158, 160, 161, 162, 164, 165, 166, 168, -169, 170, 171, 172, 174, 175, 176, 177, -178, 179, 180, 181, 182, 183, 184, 184, -185, 186, 187, 188, 189, 189, 190, 191, -192, 192, 193, 194, 195, 195, 196, 197, -197, 198, 198, 199, 200, 200, 201, 201, -202, 203, 203, 204, 204, 205, 205, 206, -206, 207, 207, 208, 208, 209, 209, 210, -210, 210, 211, 211, 212, 212, 213, 213, -214, 214, 214, 215, 215, 216, 216, 216, -217, 217, 218, 218, 218, 219, 219, 220, -220, 220, 221, 221, 222, 222, 222, 223, -223, 223, 224, 224, 225, 225, 225, 226, -226, 226, 227, 227, 227, 228, 228, 228, -229, 229, 229, 230, 230, 230, 231, 231, -231, 232, 232, 232, 233, 233, 233, 234, -234, 234, 234, 235, 235, 235, 236, 236, -236, 237, 237, 237, 237, 238, 238, 238, -239, 239, 239, 239, 240, 240, 240, 241, -241, 241, 241, 242, 242, 242, 242, 243, -243, 243, 243, 244, 244, 244, 245, 245, -245, 245, 246, 246, 246, 246, 247, 247, -247, 247, 248, 248, 248, 248, 249, 249, -249, 249, 250, 250, 250, 250, 251, 251, -251, 251, 252, 252, 252, 252, 253, 253, -253, 253, 254, 254, 254, 254, 255, 255, -255 -}; - -#elif defined(HAS_VAMEM_VERSION_1) - -static const uint16_t -default_gamma_table_data[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 16, - 17, 18, 19, 20, 21, 23, 24, 25, - 27, 28, 29, 31, 32, 33, 35, 36, - 38, 39, 41, 42, 44, 45, 47, 48, - 49, 51, 52, 54, 55, 57, 58, 60, - 61, 62, 64, 65, 66, 68, 69, 70, - 71, 72, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 93, 94, - 95, 96, 97, 98, 98, 99, 100, 101, - 102, 102, 103, 104, 105, 105, 106, 107, - 108, 108, 109, 110, 110, 111, 112, 112, - 113, 114, 114, 115, 116, 116, 117, 118, - 118, 119, 120, 120, 121, 121, 122, 123, - 123, 124, 125, 125, 126, 126, 127, 127, /* 128 */ - 128, 129, 129, 130, 130, 131, 131, 132, - 132, 133, 134, 134, 135, 135, 136, 136, - 137, 137, 138, 138, 139, 139, 140, 140, - 141, 141, 142, 142, 143, 143, 144, 144, - 145, 145, 145, 146, 146, 147, 147, 148, - 148, 149, 149, 150, 150, 150, 151, 151, - 152, 152, 152, 153, 153, 154, 154, 155, - 155, 155, 156, 156, 156, 157, 157, 158, - 158, 158, 159, 159, 160, 160, 160, 161, - 161, 161, 162, 162, 162, 163, 163, 163, - 164, 164, 164, 165, 165, 165, 166, 166, - 166, 167, 167, 167, 168, 168, 168, 169, - 169, 169, 170, 170, 170, 170, 171, 171, - 171, 172, 172, 172, 172, 173, 173, 173, - 174, 174, 174, 174, 175, 175, 175, 176, - 176, 176, 176, 177, 177, 177, 177, 178, /* 256 */ - 178, 178, 178, 179, 179, 179, 179, 180, - 180, 180, 180, 181, 181, 181, 181, 182, - 182, 182, 182, 182, 183, 183, 183, 183, - 184, 184, 184, 184, 184, 185, 185, 185, - 185, 186, 186, 186, 186, 186, 187, 187, - 187, 187, 187, 188, 188, 188, 188, 188, - 189, 189, 189, 189, 189, 190, 190, 190, - 190, 190, 191, 191, 191, 191, 191, 192, - 192, 192, 192, 192, 192, 193, 193, 193, - 193, 193, 194, 194, 194, 194, 194, 194, - 195, 195, 195, 195, 195, 195, 196, 196, - 196, 196, 196, 196, 197, 197, 197, 197, - 197, 197, 198, 198, 198, 198, 198, 198, - 198, 199, 199, 199, 199, 199, 199, 200, - 200, 200, 200, 200, 200, 200, 201, 201, - 201, 201, 201, 201, 201, 202, 202, 202, /* 384 */ - 202, 202, 202, 202, 203, 203, 203, 203, - 203, 203, 203, 204, 204, 204, 204, 204, - 204, 204, 204, 205, 205, 205, 205, 205, - 205, 205, 205, 206, 206, 206, 206, 206, - 206, 206, 206, 207, 207, 207, 207, 207, - 207, 207, 207, 208, 208, 208, 208, 208, - 208, 208, 208, 209, 209, 209, 209, 209, - 209, 209, 209, 209, 210, 210, 210, 210, - 210, 210, 210, 210, 210, 211, 211, 211, - 211, 211, 211, 211, 211, 211, 212, 212, - 212, 212, 212, 212, 212, 212, 212, 213, - 213, 213, 213, 213, 213, 213, 213, 213, - 214, 214, 214, 214, 214, 214, 214, 214, - 214, 214, 215, 215, 215, 215, 215, 215, - 215, 215, 215, 216, 216, 216, 216, 216, - 216, 216, 216, 216, 216, 217, 217, 217, /* 512 */ - 217, 217, 217, 217, 217, 217, 217, 218, - 218, 218, 218, 218, 218, 218, 218, 218, - 218, 219, 219, 219, 219, 219, 219, 219, - 219, 219, 219, 220, 220, 220, 220, 220, - 220, 220, 220, 220, 220, 221, 221, 221, - 221, 221, 221, 221, 221, 221, 221, 221, - 222, 222, 222, 222, 222, 222, 222, 222, - 222, 222, 223, 223, 223, 223, 223, 223, - 223, 223, 223, 223, 223, 224, 224, 224, - 224, 224, 224, 224, 224, 224, 224, 224, - 225, 225, 225, 225, 225, 225, 225, 225, - 225, 225, 225, 226, 226, 226, 226, 226, - 226, 226, 226, 226, 226, 226, 226, 227, - 227, 227, 227, 227, 227, 227, 227, 227, - 227, 227, 228, 228, 228, 228, 228, 228, - 228, 228, 228, 228, 228, 228, 229, 229, - 229, 229, 229, 229, 229, 229, 229, 229, - 229, 229, 230, 230, 230, 230, 230, 230, - 230, 230, 230, 230, 230, 230, 231, 231, - 231, 231, 231, 231, 231, 231, 231, 231, - 231, 231, 231, 232, 232, 232, 232, 232, - 232, 232, 232, 232, 232, 232, 232, 233, - 233, 233, 233, 233, 233, 233, 233, 233, - 233, 233, 233, 233, 234, 234, 234, 234, - 234, 234, 234, 234, 234, 234, 234, 234, - 234, 235, 235, 235, 235, 235, 235, 235, - 235, 235, 235, 235, 235, 235, 236, 236, - 236, 236, 236, 236, 236, 236, 236, 236, - 236, 236, 236, 236, 237, 237, 237, 237, - 237, 237, 237, 237, 237, 237, 237, 237, - 237, 237, 238, 238, 238, 238, 238, 238, - 238, 238, 238, 238, 238, 238, 238, 238, - 239, 239, 239, 239, 239, 239, 239, 239, - 239, 239, 239, 239, 239, 239, 240, 240, - 240, 240, 240, 240, 240, 240, 240, 240, - 240, 240, 240, 240, 241, 241, 241, 241, - 241, 241, 241, 241, 241, 241, 241, 241, - 241, 241, 241, 242, 242, 242, 242, 242, - 242, 242, 242, 242, 242, 242, 242, 242, - 242, 242, 243, 243, 243, 243, 243, 243, - 243, 243, 243, 243, 243, 243, 243, 243, - 243, 244, 244, 244, 244, 244, 244, 244, - 244, 244, 244, 244, 244, 244, 244, 244, - 245, 245, 245, 245, 245, 245, 245, 245, - 245, 245, 245, 245, 245, 245, 245, 246, - 246, 246, 246, 246, 246, 246, 246, 246, - 246, 246, 246, 246, 246, 246, 246, 247, - 247, 247, 247, 247, 247, 247, 247, 247, - 247, 247, 247, 247, 247, 247, 247, 248, - 248, 248, 248, 248, 248, 248, 248, 248, - 248, 248, 248, 248, 248, 248, 248, 249, - 249, 249, 249, 249, 249, 249, 249, 249, - 249, 249, 249, 249, 249, 249, 249, 250, - 250, 250, 250, 250, 250, 250, 250, 250, - 250, 250, 250, 250, 250, 250, 250, 251, - 251, 251, 251, 251, 251, 251, 251, 251, - 251, 251, 251, 251, 251, 251, 251, 252, - 252, 252, 252, 252, 252, 252, 252, 252, - 252, 252, 252, 252, 252, 252, 252, 253, - 253, 253, 253, 253, 253, 253, 253, 253, - 253, 253, 253, 253, 253, 253, 253, 253, - 254, 254, 254, 254, 254, 254, 254, 254, - 254, 254, 254, 254, 254, 254, 254, 254, - 255, 255, 255, 255, 255, 255, 255, 255 -}; - -#else -#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}" -#endif - -void -ia_css_config_gamma_table(void) -{ -#if defined(HAS_VAMEM_VERSION_2) - memcpy(default_gamma_table.data.vamem_2, default_gamma_table_data, - sizeof(default_gamma_table_data)); - default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; -#else - memcpy(default_gamma_table.data.vamem_1, default_gamma_table_data, - sizeof(default_gamma_table_data)); - default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1; -#endif -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h deleted file mode 100644 index 9686623d9cdd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC_TABLE_HOST_H -#define __IA_CSS_GC_TABLE_HOST_H - -#include "ia_css_gc_types.h" - -extern struct ia_css_gamma_table default_gamma_table; - -void ia_css_config_gamma_table(void); - -#endif /* __IA_CSS_GC_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h deleted file mode 100644 index 594807fe2925..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC_TYPES_H -#define __IA_CSS_GC_TYPES_H - -/* @file -* CSS-API header file for Gamma Correction parameters. -*/ - -#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */ - -/* Fractional bits for GAMMA gain */ -#define IA_CSS_GAMMA_GAIN_K_SHIFT 13 - -/* Number of elements in the gamma table. */ -#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10 -#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE (1U<gc, &from->data, sizeof(to->gc)); -} - -void -ia_css_g_gamma_vamem_encode( - struct sh_css_isp_rgb_gamma_vamem_params *to, - const struct ia_css_rgb_gamma_table *from, - unsigned size) -{ - (void)size; - memcpy (&to->gc, &from->data, sizeof(to->gc)); -} - -void -ia_css_b_gamma_vamem_encode( - struct sh_css_isp_rgb_gamma_vamem_params *to, - const struct ia_css_rgb_gamma_table *from, - unsigned size) -{ - (void)size; - memcpy (&to->gc, &from->data, sizeof(to->gc)); -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_yuv2rgb_dump( - const struct sh_css_isp_csc_params *yuv2rgb, - unsigned level) -{ - ia_css_cc_dump(yuv2rgb, level, "YUV to RGB Conversion"); -} - -void -ia_css_rgb2yuv_dump( - const struct sh_css_isp_csc_params *rgb2yuv, - unsigned level) -{ - ia_css_cc_dump(rgb2yuv, level, "RGB to YUV Conversion"); -} - -void -ia_css_rgb_gamma_table_debug_dtrace( - const struct ia_css_rgb_gamma_table *config, - unsigned level) -{ - (void)config; - (void)level; -} -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h deleted file mode 100644 index ba140eefd525..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC2_HOST_H -#define __IA_CSS_GC2_HOST_H - -#include "ia_css_gc2_types.h" -#include "ia_css_gc2_param.h" -#include "ia_css_gc2_table.host.h" - -extern const struct ia_css_cc_config default_yuv2rgb_cc_config; -extern const struct ia_css_cc_config default_rgb2yuv_cc_config; - -void -ia_css_yuv2rgb_encode( - struct sh_css_isp_csc_params *to, - const struct ia_css_cc_config *from, - unsigned size); - -void -ia_css_rgb2yuv_encode( - struct sh_css_isp_csc_params *to, - const struct ia_css_cc_config *from, - unsigned size); - -void -ia_css_r_gamma_vamem_encode( - struct sh_css_isp_rgb_gamma_vamem_params *to, - const struct ia_css_rgb_gamma_table *from, - unsigned size); - -void -ia_css_g_gamma_vamem_encode( - struct sh_css_isp_rgb_gamma_vamem_params *to, - const struct ia_css_rgb_gamma_table *from, - unsigned size); - -void -ia_css_b_gamma_vamem_encode( - struct sh_css_isp_rgb_gamma_vamem_params *to, - const struct ia_css_rgb_gamma_table *from, - unsigned size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_yuv2rgb_dump( - const struct sh_css_isp_csc_params *yuv2rgb, - unsigned level); - -void -ia_css_rgb2yuv_dump( - const struct sh_css_isp_csc_params *rgb2yuv, - unsigned level); - -void -ia_css_rgb_gamma_table_debug_dtrace( - const struct ia_css_rgb_gamma_table *config, - unsigned level); - -#define ia_css_yuv2rgb_debug_dtrace ia_css_cc_config_debug_dtrace -#define ia_css_rgb2yuv_debug_dtrace ia_css_cc_config_debug_dtrace -#define ia_css_r_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace -#define ia_css_g_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace -#define ia_css_b_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace - -#endif - -#endif /* __IA_CSS_GC2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h deleted file mode 100644 index d25239f4d86f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC2_PARAM_H -#define __IA_CSS_GC2_PARAM_H - -#include "type_support.h" -/* Extend GC1 */ -#include "ia_css_gc2_types.h" -#include "gc/gc_1.0/ia_css_gc_param.h" -#include "csc/csc_1.0/ia_css_csc_param.h" - -#ifndef PIPE_GENERATION -#if defined(IS_VAMEM_VERSION_1) -#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE -#elif defined(IS_VAMEM_VERSION_2) -#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE -#else -#error "Undefined vamem version" -#endif - -#else -/* For pipe generation, the size is not relevant */ -#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE 0 -#endif - -/* This should be vamem_data_t, but that breaks the pipe generator */ -struct sh_css_isp_rgb_gamma_vamem_params { - uint16_t gc[SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE]; -}; - -#endif /* __IA_CSS_GC2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c deleted file mode 100644 index f14a66b78714..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include /* memcpy */ -#include "system_global.h" -#include "vamem.h" -#include "ia_css_types.h" -#include "ia_css_gc2_table.host.h" - -struct ia_css_rgb_gamma_table default_r_gamma_table; -struct ia_css_rgb_gamma_table default_g_gamma_table; -struct ia_css_rgb_gamma_table default_b_gamma_table; - -/* Identical default gamma table for R, G, and B. */ - -#if defined(HAS_VAMEM_VERSION_2) - -static const uint16_t -default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = { - 0, 72, 144, 216, 288, 360, 426, 486, - 541, 592, 641, 687, 730, 772, 812, 850, - 887, 923, 958, 991, 1024, 1055, 1086, 1117, -1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335, -1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525, -1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694, -1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848, -1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990, -2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122, -2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247, -2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364, -2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476, -2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583, -2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685, -2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783, -2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878, -2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970, -2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058, -3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144, -3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228, -3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309, -3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388, -3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465, -3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540, -3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614, -3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686, -3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756, -3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825, -3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893, -3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959, -3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024, -4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088, -4095 -}; -#elif defined(HAS_VAMEM_VERSION_1) - -static const uint16_t -default_gamma_table_data[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE] = { - 0, 72, 144, 216, 288, 360, 426, 486, - 541, 592, 641, 687, 730, 772, 812, 850, - 887, 923, 958, 991, 1024, 1055, 1086, 1117, -1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335, -1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525, -1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694, -1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848, -1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990, -2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122, -2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247, -2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364, -2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476, -2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583, -2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685, -2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783, -2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878, -2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970, -2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058, -3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144, -3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228, -3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309, -3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388, -3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465, -3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540, -3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614, -3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686, -3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756, -3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825, -3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893, -3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959, -3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024, -4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088 -}; -#else -#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}" -#endif - -void -ia_css_config_rgb_gamma_tables(void) -{ -#if defined(HAS_VAMEM_VERSION_2) - default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; - default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; - default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; - memcpy(default_r_gamma_table.data.vamem_2, default_gamma_table_data, - sizeof(default_gamma_table_data)); - memcpy(default_g_gamma_table.data.vamem_2, default_gamma_table_data, - sizeof(default_gamma_table_data)); - memcpy(default_b_gamma_table.data.vamem_2, default_gamma_table_data, - sizeof(default_gamma_table_data)); -#else - memcpy(default_r_gamma_table.data.vamem_1, default_gamma_table_data, - sizeof(default_gamma_table_data)); - memcpy(default_g_gamma_table.data.vamem_1, default_gamma_table_data, - sizeof(default_gamma_table_data)); - memcpy(default_b_gamma_table.data.vamem_1, default_gamma_table_data, - sizeof(default_gamma_table_data)); - default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1; - default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1; - default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1; -#endif -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h deleted file mode 100644 index 8686e6e3586c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC2_TABLE_HOST_H -#define __IA_CSS_GC2_TABLE_HOST_H - -#include "ia_css_gc2_types.h" - -extern struct ia_css_rgb_gamma_table default_r_gamma_table; -extern struct ia_css_rgb_gamma_table default_g_gamma_table; -extern struct ia_css_rgb_gamma_table default_b_gamma_table; - -void ia_css_config_rgb_gamma_tables(void); - -#endif /* __IA_CSS_GC2_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h deleted file mode 100644 index fab7467d30a5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_GC2_TYPES_H -#define __IA_CSS_GC2_TYPES_H - -#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */ - -/* @file -* CSS-API header file for Gamma Correction parameters. -*/ - -/* sRGB Gamma table, used for sRGB Gamma Correction. - * - * ISP block: GC2 (sRGB Gamma Correction) - * (ISP1: GC1(YUV Gamma Correction) is used.) - * ISP2: GC2 is used. - */ - -/* Number of elements in the sRGB gamma table. */ -#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8 -#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE (1U<irradiance.match_shift[i] = from->irradiance.match_shift[i]; - to->irradiance.match_mul[i] = from->irradiance.match_mul[i]; - to->irradiance.thr_low[i] = from->irradiance.thr_low[i]; - to->irradiance.thr_high[i] = from->irradiance.thr_high[i]; - to->irradiance.thr_coeff[i] = from->irradiance.thr_coeff[i]; - to->irradiance.thr_shift[i] = from->irradiance.thr_shift[i]; - } - to->irradiance.test_irr = from->irradiance.test_irr; - to->irradiance.weight_bpp = from->irradiance.weight_bpp; - - to->deghost.test_deg = from->deghost.test_deg; - to->exclusion.test_excl = from->exclusion.test_excl; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h deleted file mode 100644 index 8f89bc8f1ca2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Release Version: irci_stable_candrpv_0415_20150521_0458 */ -/* Release Version: irci_ecr-master_20150911_0724 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_HDR_HOST_H -#define __IA_CSS_HDR_HOST_H - -#include "ia_css_hdr_param.h" -#include "ia_css_hdr_types.h" - -extern const struct ia_css_hdr_config default_hdr_config; - -void -ia_css_hdr_init_config( - struct sh_css_isp_hdr_params *to, - const struct ia_css_hdr_config *from, - unsigned size); - -#endif /* __IA_CSS_HDR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h deleted file mode 100644 index 1c053af7d0d3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Release Version: irci_stable_candrpv_0415_20150521_0458 */ -/* Release Version: irci_ecr-master_20150911_0724 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_HDR_PARAMS_H -#define __IA_CSS_HDR_PARAMS_H - -#include "type_support.h" - -#define HDR_NUM_INPUT_FRAMES (3) - -/* HDR irradiance map parameters on ISP. */ -struct sh_css_hdr_irradiance_params { - int32_t test_irr; - int32_t match_shift[HDR_NUM_INPUT_FRAMES - 1]; /* Histogram matching shift parameter */ - int32_t match_mul[HDR_NUM_INPUT_FRAMES - 1]; /* Histogram matching multiplication parameter */ - int32_t thr_low[HDR_NUM_INPUT_FRAMES - 1]; /* Weight map soft threshold low bound parameter */ - int32_t thr_high[HDR_NUM_INPUT_FRAMES - 1]; /* Weight map soft threshold high bound parameter */ - int32_t thr_coeff[HDR_NUM_INPUT_FRAMES - 1]; /* Soft threshold linear function coefficient */ - int32_t thr_shift[HDR_NUM_INPUT_FRAMES - 1]; /* Soft threshold precision shift parameter */ - int32_t weight_bpp; /* Weight map bits per pixel */ -}; - -/* HDR deghosting parameters on ISP */ -struct sh_css_hdr_deghost_params { - int32_t test_deg; -}; - -/* HDR exclusion parameters on ISP */ -struct sh_css_hdr_exclusion_params { - int32_t test_excl; -}; - -/* HDR ISP parameters */ -struct sh_css_isp_hdr_params { - struct sh_css_hdr_irradiance_params irradiance; - struct sh_css_hdr_deghost_params deghost; - struct sh_css_hdr_exclusion_params exclusion; -}; - -#endif /* __IA_CSS_HDR_PARAMS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h deleted file mode 100644 index 26464421b077..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Release Version: irci_stable_candrpv_0415_20150521_0458 */ -/* Release Version: irci_ecr-master_20150911_0724 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_HDR_TYPES_H -#define __IA_CSS_HDR_TYPES_H - -#define IA_CSS_HDR_MAX_NUM_INPUT_FRAMES (3) - -/** - * \brief HDR Irradiance Parameters - * \detail Currently HDR paramters are used only for testing purposes - */ -struct ia_css_hdr_irradiance_params { - int test_irr; /** Test parameter */ - int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Histogram matching shift parameter */ - int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Histogram matching multiplication parameter */ - int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Weight map soft threshold low bound parameter */ - int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Weight map soft threshold high bound parameter */ - int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Soft threshold linear function coefficien */ - int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Soft threshold precision shift parameter */ - int weight_bpp; /** Weight map bits per pixel */ -}; - -/** - * \brief HDR Deghosting Parameters - * \detail Currently HDR paramters are used only for testing purposes - */ -struct ia_css_hdr_deghost_params { - int test_deg; /** Test parameter */ -}; - -/** - * \brief HDR Exclusion Parameters - * \detail Currently HDR paramters are used only for testing purposes - */ -struct ia_css_hdr_exclusion_params { - int test_excl; /** Test parameter */ -}; - -/** - * \brief HDR public paramterers. - * \details Struct with all paramters for HDR that can be seet from - * the CSS API. Currenly, only test paramters are defined. - */ -struct ia_css_hdr_config { - struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance paramaters */ - struct ia_css_hdr_deghost_params deghost; /** HDR deghosting parameters */ - struct ia_css_hdr_exclusion_params exclusion; /** HDR exclusion parameters */ -}; - -#endif /* __IA_CSS_HDR_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c deleted file mode 100644 index a31c9e828e22..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_bayer_io.host.h" -#include "dma.h" -#include "math_support.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif -#include "ia_css_isp_params.h" -#include "ia_css_frame.h" - -void -ia_css_bayer_io_config( - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args) -{ - const struct ia_css_frame *in_frame = args->in_frame; - const struct ia_css_frame **out_frames = (const struct ia_css_frame **)& args->out_frame; - const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info : &binary->in_frame_info; - - const unsigned ddr_bits_per_element = sizeof(short) * 8; - const unsigned ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element); - unsigned size_get = 0, size_put = 0; - unsigned offset = 0; - - if (binary->info->mem_offsets.offsets.param) { - size_get = binary->info->mem_offsets.offsets.param->dmem.get.size; - offset = binary->info->mem_offsets.offsets.param->dmem.get.offset; - } - - if (size_get) { - struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - struct dma_port_config config; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part enter:\n"); -#endif - - ia_css_dma_configure_from_info(&config, in_frame_info); - // The base_address of the input frame will be set in the ISP - to->width = in_frame_info->res.width; - to->height = in_frame_info->res.height; - to->stride = config.stride; - to->ddr_elems_per_word = ddr_elems_per_word; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part leave:\n"); -#endif - } - - if (binary->info->mem_offsets.offsets.param) { - size_put = binary->info->mem_offsets.offsets.param->dmem.put.size; - offset = binary->info->mem_offsets.offsets.param->dmem.put.offset; - } - - if (size_put) { - struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - struct dma_port_config config; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part enter:\n"); -#endif - - ia_css_dma_configure_from_info(&config, &out_frames[0]->info); - to->base_address = out_frames[0]->data; - to->width = out_frames[0]->info.res.width; - to->height = out_frames[0]->info.res.height; - to->stride = config.stride; - to->ddr_elems_per_word = ddr_elems_per_word; - -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part leave:\n"); -#endif - } -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h deleted file mode 100644 index 7e5d4cfe3454..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __BAYER_IO_HOST_H -#define __BAYER_IO_HOST_H - -#include "ia_css_bayer_io_param.h" -#include "ia_css_bayer_io_types.h" -#include "ia_css_binary.h" -#include "sh_css_internal.h" - - -void -ia_css_bayer_io_config( - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args); - -#endif /*__BAYER_IO_HOST_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h deleted file mode 100644 index 7b6f581c4a80..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BAYER_IO_PARAM -#define __IA_CSS_BAYER_IO_PARAM - -#include "../common/ia_css_common_io_param.h" - -#endif /* __IA_CSS_BAYER_IO_PARAM */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h deleted file mode 100644 index 2291b01452f8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_BAYER_IO_TYPES_H -#define __IA_CSS_BAYER_IO_TYPES_H - -#include "../common/ia_css_common_io_types.h" - -#endif /* __IA_CSS_BAYER_IO_TYPES_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h deleted file mode 100644 index f1ce03aa7951..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_COMMON_IO_PARAM -#define __IA_CSS_COMMON_IO_PARAM - -#include "../common/ia_css_common_io_types.h" - -#endif /* __IA_CSS_COMMON_IO_PARAM */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h deleted file mode 100644 index 8a9a97063264..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_COMMON_IO_TYPES -#define __IA_CSS_COMMON_IO_TYPES - -#define MAX_IO_DMA_CHANNELS 2 - -struct ia_css_common_io_config { - unsigned base_address; - unsigned width; - unsigned height; - unsigned stride; - unsigned ddr_elems_per_word; - unsigned dma_channel[MAX_IO_DMA_CHANNELS]; -}; - -#endif /* __IA_CSS_COMMON_IO_TYPES */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h deleted file mode 100644 index 91fb5168c357..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YUV444_IO_PARAM -#define __IA_CSS_YUV444_IO_PARAM - -#include "../common/ia_css_common_io_param.h" - -#endif -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h deleted file mode 100644 index dac440309394..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YUV444_IO_TYPES -#define __IA_CSS_YUV444_IO_TYPES - -#include "../common/ia_css_common_io_types.h" - -#endif -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c deleted file mode 100644 index f80480cf9de2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c +++ /dev/null @@ -1,86 +0,0 @@ -#ifdef ISP2401 -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#include "ia_css_bayer_io.host.h" -#include "dma.h" -#include "math_support.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif -#include "ia_css_isp_params.h" -#include "ia_css_frame.h" - -void -ia_css_bayer_io_config( - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args) -{ - const struct ia_css_frame *in_frame = args->in_frame; - const struct ia_css_frame **out_frames = (const struct ia_css_frame **)& args->out_frame; - const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info : &binary->in_frame_info; - - const unsigned ddr_bits_per_element = sizeof(short) * 8; - const unsigned ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element); - unsigned size_get = 0, size_put = 0; - unsigned offset = 0; - - if (binary->info->mem_offsets.offsets.param) { - size_get = binary->info->mem_offsets.offsets.param->dmem.get.size; - offset = binary->info->mem_offsets.offsets.param->dmem.get.offset; - } - - if (size_get) { - struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - struct dma_port_config config; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part enter:\n"); -#endif - - ia_css_dma_configure_from_info(&config, in_frame_info); - // The base_address of the input frame will be set in the ISP - to->width = in_frame_info->res.width; - to->height = in_frame_info->res.height; - to->stride = config.stride; - to->ddr_elems_per_word = ddr_elems_per_word; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part leave:\n"); -#endif - } - - if (binary->info->mem_offsets.offsets.param) { - size_put = binary->info->mem_offsets.offsets.param->dmem.put.size; - offset = binary->info->mem_offsets.offsets.param->dmem.put.offset; - } - - if (size_put) { - struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - struct dma_port_config config; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part enter:\n"); -#endif - - ia_css_dma_configure_from_info(&config, &out_frames[0]->info); - to->base_address = out_frames[0]->data; - to->width = out_frames[0]->info.res.width; - to->height = out_frames[0]->info.res.height; - to->stride = config.stride; - to->ddr_elems_per_word = ddr_elems_per_word; - -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part leave:\n"); -#endif - } -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h deleted file mode 100644 index ab9fa31bfc5e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __BAYER_IO_HOST_H -#define __BAYER_IO_HOST_H - -#include "ia_css_bayer_io_param.h" -#include "ia_css_bayer_io_types.h" -#include "ia_css_binary.h" -#include "sh_css_internal.h" - - -void -ia_css_bayer_io_config( - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args); - -#endif /*__BAYER_IO_HOST_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h deleted file mode 100644 index bf5a3eccb330..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __IA_CSS_BAYER_IO_PARAM -#define __IA_CSS_BAYER_IO_PARAM - -#include "../common/ia_css_common_io_param.h" - -#endif /* __IA_CSS_BAYER_IO_PARAM */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h deleted file mode 100644 index 9e3c622db4d4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __IA_CSS_BAYER_IO_TYPES_H -#define __IA_CSS_BAYER_IO_TYPES_H - -#include "../common/ia_css_common_io_types.h" - -#endif /* __IA_CSS_BAYER_IO_TYPES_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h deleted file mode 100644 index e5fdcfff0cf7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __IA_CSS_COMMON_IO_PARAM -#define __IA_CSS_COMMON_IO_PARAM - -#include "../common/ia_css_common_io_types.h" - -#endif /* __IA_CSS_COMMON_IO_PARAM */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h deleted file mode 100644 index 0a19e2d1aff4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __IA_CSS_COMMON_IO_TYPES -#define __IA_CSS_COMMON_IO_TYPES - -#define MAX_IO_DMA_CHANNELS 3 - -struct ia_css_common_io_config { - unsigned base_address; - unsigned width; - unsigned height; - unsigned stride; - unsigned ddr_elems_per_word; - unsigned dma_channel[MAX_IO_DMA_CHANNELS]; -}; - -#endif /* __IA_CSS_COMMON_IO_TYPES */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c deleted file mode 100644 index eb9e9439cc21..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c +++ /dev/null @@ -1,86 +0,0 @@ -#ifdef ISP2401 -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#include "ia_css_yuv444_io.host.h" -#include "dma.h" -#include "math_support.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif -#include "ia_css_isp_params.h" -#include "ia_css_frame.h" - -void -ia_css_yuv444_io_config( - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args) -{ - const struct ia_css_frame *in_frame = args->in_frame; - const struct ia_css_frame **out_frames = (const struct ia_css_frame **)& args->out_frame; - const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info : &binary->in_frame_info; - - const unsigned ddr_bits_per_element = sizeof(short) * 8; - const unsigned ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element); - unsigned size_get = 0, size_put = 0; - unsigned offset = 0; - - if (binary->info->mem_offsets.offsets.param) { - size_get = binary->info->mem_offsets.offsets.param->dmem.get.size; - offset = binary->info->mem_offsets.offsets.param->dmem.get.offset; - } - - if (size_get) { - struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - struct dma_port_config config; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() get part enter:\n"); -#endif - - ia_css_dma_configure_from_info(&config, in_frame_info); - // The base_address of the input frame will be set in the ISP - to->width = in_frame_info->res.width; - to->height = in_frame_info->res.height; - to->stride = config.stride; - to->ddr_elems_per_word = ddr_elems_per_word; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() get part leave:\n"); -#endif - } - - if (binary->info->mem_offsets.offsets.param) { - size_put = binary->info->mem_offsets.offsets.param->dmem.put.size; - offset = binary->info->mem_offsets.offsets.param->dmem.put.offset; - } - - if (size_put) { - struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; - struct dma_port_config config; -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() put part enter:\n"); -#endif - - ia_css_dma_configure_from_info(&config, &out_frames[0]->info); - to->base_address = out_frames[0]->data; - to->width = out_frames[0]->info.res.width; - to->height = out_frames[0]->info.res.height; - to->stride = config.stride; - to->ddr_elems_per_word = ddr_elems_per_word; - -#ifndef IA_CSS_NO_DEBUG - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() put part leave:\n"); -#endif - } -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h deleted file mode 100644 index 480172d39aee..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __YUV444_IO_HOST_H -#define __YUV444_IO_HOST_H - -#include "ia_css_yuv444_io_param.h" -#include "ia_css_yuv444_io_types.h" -#include "ia_css_binary.h" -#include "sh_css_internal.h" - - -void -ia_css_yuv444_io_config( - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args); - -#endif /*__YUV44_IO_HOST_H */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h deleted file mode 100644 index cc8eda19c6e8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __IA_CSS_YUV444_IO_PARAM -#define __IA_CSS_YUV444_IO_PARAM - -#include "../common/ia_css_common_io_param.h" - -#endif -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h deleted file mode 100644 index 343325a111e1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __IA_CSS_YUV444_IO_TYPES -#define __IA_CSS_YUV444_IO_TYPES - -#include "../common/ia_css_common_io_types.h" - -#endif -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c deleted file mode 100644 index 9e41cc0a307f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_iterator.host.h" -#include "ia_css_frame_public.h" -#include "ia_css_binary.h" -#include "ia_css_err.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" - -static const struct ia_css_iterator_configuration default_config = { - .input_info = (struct ia_css_frame_info *)NULL, -}; - -void -ia_css_iterator_config( - struct sh_css_isp_iterator_isp_config *to, - const struct ia_css_iterator_configuration *from, - unsigned size) -{ - (void)size; - ia_css_frame_info_to_frame_sp_info(&to->input_info, from->input_info); - ia_css_frame_info_to_frame_sp_info(&to->internal_info, from->internal_info); - ia_css_frame_info_to_frame_sp_info(&to->output_info, from->output_info); - ia_css_frame_info_to_frame_sp_info(&to->vf_info, from->vf_info); - ia_css_resolution_to_sp_resolution(&to->dvs_envelope, from->dvs_envelope); -} - -enum ia_css_err -ia_css_iterator_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *in_info) -{ - struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; - struct ia_css_iterator_configuration config = default_config; - - config.input_info = &binary->in_frame_info; - config.internal_info = &binary->internal_frame_info; - config.output_info = &binary->out_frame_info[0]; - config.vf_info = &binary->vf_frame_info; - config.dvs_envelope = &binary->dvs_envelope; - - /* Use in_info iso binary->in_frame_info. - * They can differ in padded width in case of scaling, e.g. for capture_pp. - * Find out why. - */ - if (in_info) - config.input_info = in_info; - if (binary->out_frame_info[0].res.width == 0) - config.output_info = &binary->out_frame_info[1]; - my_info = *config.output_info; - config.output_info = &my_info; - /* we do this only for preview pipe because in fill_binary_info function - * we assign vf_out res to out res, but for ISP internal processing, we need - * the original out res. for video pipe, it has two output pins --- out and - * vf_out, so it can keep these two resolutions already. */ - if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW && - binary->vf_downscale_log2 > 0) { - /* TODO: Remove this after preview output decimation is fixed - * by configuring out&vf info files properly */ - my_info.padded_width <<= binary->vf_downscale_log2; - my_info.res.width <<= binary->vf_downscale_log2; - my_info.res.height <<= binary->vf_downscale_log2; - } - - ia_css_configure_iterator(binary, &config); - - return IA_CSS_SUCCESS; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h deleted file mode 100644 index d8f249c5a53b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ITERATOR_HOST_H -#define __IA_CSS_ITERATOR_HOST_H - -#include "ia_css_frame_public.h" -#include "ia_css_binary.h" -#include "ia_css_err.h" -#include "ia_css_iterator_param.h" - -void -ia_css_iterator_config( - struct sh_css_isp_iterator_isp_config *to, - const struct ia_css_iterator_configuration *from, - unsigned size); - -enum ia_css_err -ia_css_iterator_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *in_info); - -#endif /* __IA_CSS_ITERATOR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h deleted file mode 100644 index d308126e41d3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_ITERATOR_PARAM_H -#define __IA_CSS_ITERATOR_PARAM_H - -#include "ia_css_types.h" /* ia_css_resolution */ -#include "ia_css_frame_public.h" /* ia_css_frame_info */ -#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */ - -struct ia_css_iterator_configuration { - const struct ia_css_frame_info *input_info; - const struct ia_css_frame_info *internal_info; - const struct ia_css_frame_info *output_info; - const struct ia_css_frame_info *vf_info; - const struct ia_css_resolution *dvs_envelope; -}; - -struct sh_css_isp_iterator_isp_config { - struct ia_css_frame_sp_info input_info; - struct ia_css_frame_sp_info internal_info; - struct ia_css_frame_sp_info output_info; - struct ia_css_frame_sp_info vf_info; - struct ia_css_sp_resolution dvs_envelope; -}; - -#endif /* __IA_CSS_ITERATOR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c deleted file mode 100644 index 5ddf61fc95fa..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" - -#ifndef IA_CSS_NO_DEBUG -/* FIXME: See BZ 4427 */ -#include "ia_css_debug.h" -#endif - -#include "ia_css_macc1_5.host.h" - -const struct ia_css_macc1_5_config default_macc1_5_config = { - 1 -}; - -void -ia_css_macc1_5_encode( - struct sh_css_isp_macc1_5_params *to, - const struct ia_css_macc1_5_config *from, - unsigned int size) -{ - (void)size; - to->exp = from->exp; -} - -void -ia_css_macc1_5_vmem_encode( - struct sh_css_isp_macc1_5_vmem_params *params, - const struct ia_css_macc1_5_table *from, - unsigned int size) -{ - unsigned int i, j, k, idx; - unsigned int idx_map[] = { - 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8}; - - (void)size; - - for (k = 0; k < 4; k++) - for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) { - idx = idx_map[i] + (k * IA_CSS_MACC_NUM_AXES); - j = 4 * i; - - params->data[0][(idx)] = from->data[j]; - params->data[1][(idx)] = from->data[j + 1]; - params->data[2][(idx)] = from->data[j + 2]; - params->data[3][(idx)] = from->data[j + 3]; - } - -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_macc1_5_debug_dtrace( - const struct ia_css_macc1_5_config *config, - unsigned int level) -{ - ia_css_debug_dtrace(level, - "config.exp=%d\n", - config->exp); -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h deleted file mode 100644 index 53ef18f7e912..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC1_5_HOST_H -#define __IA_CSS_MACC1_5_HOST_H - -#include "ia_css_macc1_5_param.h" -#include "ia_css_macc1_5_table.host.h" - -extern const struct ia_css_macc1_5_config default_macc1_5_config; - -void -ia_css_macc1_5_encode( - struct sh_css_isp_macc1_5_params *to, - const struct ia_css_macc1_5_config *from, - unsigned int size); - -void -ia_css_macc1_5_vmem_encode( - struct sh_css_isp_macc1_5_vmem_params *params, - const struct ia_css_macc1_5_table *from, - unsigned int size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_macc1_5_debug_dtrace( - const struct ia_css_macc1_5_config *config, - unsigned int level); -#endif -#endif /* __IA_CSS_MACC1_5_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h deleted file mode 100644 index 41a2da460dcf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC1_5_PARAM_H -#define __IA_CSS_MACC1_5_PARAM_H - -#include "type_support.h" -#include "vmem.h" -#include "ia_css_macc1_5_types.h" - -/* MACC */ -struct sh_css_isp_macc1_5_params { - int32_t exp; -}; - -struct sh_css_isp_macc1_5_vmem_params { - VMEM_ARRAY(data, IA_CSS_MACC_NUM_COEFS*ISP_NWAY); -}; - -#endif /* __IA_CSS_MACC1_5_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c deleted file mode 100644 index 89714bf87b52..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "system_global.h" -#include "ia_css_types.h" -#include "ia_css_macc1_5_table.host.h" - -/* Multi-Axes Color Correction table for ISP2. - * 64values = 2x2matrix for 16area, [s1.12] - * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096} - */ -const struct ia_css_macc1_5_table default_macc1_5_table = { - { 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096 } -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h deleted file mode 100644 index 10a50aa82be8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC1_5_TABLE_HOST_H -#define __IA_CSS_MACC1_5_TABLE_HOST_H - -#include "macc/macc1_5/ia_css_macc1_5_types.h" - -extern const struct ia_css_macc1_5_table default_macc1_5_table; - -#endif /* __IA_CSS_MACC1_5_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h deleted file mode 100644 index 9cd31c2c0253..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC1_5_TYPES_H -#define __IA_CSS_MACC1_5_TYPES_H - -/* @file -* CSS-API header file for Multi-Axis Color Conversion algorithm parameters. -*/ - -/* Multi-Axis Color Conversion configuration - * - * ISP2.6.1: MACC1_5 is used. - */ - - -/* Number of axes in the MACC table. */ -#define IA_CSS_MACC_NUM_AXES 16 -/* Number of coefficients per MACC axes. */ -#define IA_CSS_MACC_NUM_COEFS 4 - -/* Multi-Axes Color Correction (MACC) table. - * - * ISP block: MACC (MACC by only matrix) - * MACC1_5 (MACC by matrix and exponent(ia_css_macc_config)) - * ISP1: MACC is used. - * ISP2: MACC1_5 is used. - * - * [MACC] - * OutU = (data00 * InU + data01 * InV) >> 13 - * OutV = (data10 * InU + data11 * InV) >> 13 - * - * default/ineffective: - * OutU = (8192 * InU + 0 * InV) >> 13 - * OutV = ( 0 * InU + 8192 * InV) >> 13 - * - * [MACC1_5] - * OutU = (data00 * InU + data01 * InV) >> (13 - exp) - * OutV = (data10 * InU + data11 * InV) >> (13 - exp) - * - * default/ineffective: (exp=1) - * OutU = (4096 * InU + 0 * InV) >> (13 - 1) - * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1) - */ -struct ia_css_macc1_5_table { - int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES]; - /** 16 of 2x2 matix - MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191] - default/ineffective: (s1.12) - 16 of "identity 2x2 matix" {4096,0,0,4096} */ -}; - -/* Multi-Axes Color Correction (MACC) configuration. - * - * ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config)) - * ISP2: MACC1_5 is used. - */ -struct ia_css_macc1_5_config { - uint8_t exp; /** Common exponent of ia_css_macc_table. - u8.0, [0,13], default 1, ineffective 1 */ -}; - -#endif /* __IA_CSS_MACC1_5_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c deleted file mode 100644 index 1f7e9e4eec3c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_frac.h" - -#include "ia_css_macc.host.h" - -const struct ia_css_macc_config default_macc_config = { - 1, -}; - -void -ia_css_macc_encode( - struct sh_css_isp_macc_params *to, - const struct ia_css_macc_config *from, - unsigned size) -{ - (void)size; - to->exp = from->exp; -} - -void -ia_css_macc_dump( - const struct sh_css_isp_macc_params *macc, - unsigned level); - -void -ia_css_macc_debug_dtrace( - const struct ia_css_macc_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.exp=%d\n", - config->exp); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h deleted file mode 100644 index 044b01d38ad6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC_HOST_H -#define __IA_CSS_MACC_HOST_H - -#include "sh_css_params.h" - -#include "ia_css_macc_param.h" -#include "ia_css_macc_table.host.h" - -extern const struct ia_css_macc_config default_macc_config; - -void -ia_css_macc_encode( - struct sh_css_isp_macc_params *to, - const struct ia_css_macc_config *from, - unsigned size); - - -void -ia_css_macc_dump( - const struct sh_css_isp_macc_params *macc, - unsigned level); - -void -ia_css_macc_debug_dtrace( - const struct ia_css_macc_config *config, - unsigned level); - -#endif /* __IA_CSS_MACC_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h deleted file mode 100644 index 6a12b922c485..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC_PARAM_H -#define __IA_CSS_MACC_PARAM_H - -#include "type_support.h" - -/* MACC */ -struct sh_css_isp_macc_params { - int32_t exp; -}; - -#endif /* __IA_CSS_MACC_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c deleted file mode 100644 index 8a6c3cafabdc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "system_global.h" -#include "ia_css_types.h" -#include "ia_css_macc_table.host.h" - -/* Multi-Axes Color Correction table for ISP1. - * 64values = 2x2matrix for 16area, [s2.13] - * ineffective: 16 of "identity 2x2 matix" {8192,0,0,8192} - */ -const struct ia_css_macc_table default_macc_table = { - { 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192, - 8192, 0, 0, 8192, 8192, 0, 0, 8192 } -}; - -/* Multi-Axes Color Correction table for ISP2. - * 64values = 2x2matrix for 16area, [s1.12] - * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096} - */ -const struct ia_css_macc_table default_macc2_table = { - { 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096, - 4096, 0, 0, 4096, 4096, 0, 0, 4096 } -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h deleted file mode 100644 index 96d62c9912b8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC_TABLE_HOST_H -#define __IA_CSS_MACC_TABLE_HOST_H - -#include "ia_css_macc_types.h" - -extern const struct ia_css_macc_table default_macc_table; -extern const struct ia_css_macc_table default_macc2_table; - -#endif /* __IA_CSS_MACC_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h deleted file mode 100644 index 2c9e5a8ceb98..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_MACC_TYPES_H -#define __IA_CSS_MACC_TYPES_H - -/* @file -* CSS-API header file for Multi-Axis Color Correction (MACC) parameters. -*/ - -/* Number of axes in the MACC table. */ -#define IA_CSS_MACC_NUM_AXES 16 -/* Number of coefficients per MACC axes. */ -#define IA_CSS_MACC_NUM_COEFS 4 -/* The number of planes in the morphing table. */ - -/* Multi-Axis Color Correction (MACC) table. - * - * ISP block: MACC1 (MACC by only matrix) - * MACC2 (MACC by matrix and exponent(ia_css_macc_config)) - * ISP1: MACC1 is used. - * ISP2: MACC2 is used. - * - * [MACC1] - * OutU = (data00 * InU + data01 * InV) >> 13 - * OutV = (data10 * InU + data11 * InV) >> 13 - * - * default/ineffective: - * OutU = (8192 * InU + 0 * InV) >> 13 - * OutV = ( 0 * InU + 8192 * InV) >> 13 - * - * [MACC2] - * OutU = (data00 * InU + data01 * InV) >> (13 - exp) - * OutV = (data10 * InU + data11 * InV) >> (13 - exp) - * - * default/ineffective: (exp=1) - * OutU = (4096 * InU + 0 * InV) >> (13 - 1) - * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1) - */ - -struct ia_css_macc_table { - int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES]; - /** 16 of 2x2 matix - MACC1: s2.13, [-65536,65535] - default/ineffective: - 16 of "identity 2x2 matix" {8192,0,0,8192} - MACC2: s[macc_config.exp].[13-macc_config.exp], [-8192,8191] - default/ineffective: (s1.12) - 16 of "identity 2x2 matix" {4096,0,0,4096} */ -}; - -#endif /* __IA_CSS_MACC_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c deleted file mode 100644 index 2c2c5a5854a0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_norm.host.h" - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h deleted file mode 100644 index 42b5143ef78f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_NORM_HOST_H -#define __IA_CSS_NORM_HOST_H - -#include "ia_css_norm_param.h" - -#endif /* __IA_CSS_NORM_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h deleted file mode 100644 index 85dc6fc0a56b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_NORM_PARAM_H -#define __IA_CSS_NORM_PARAM_H - - -#endif /* __IA_CSS_NORM_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c deleted file mode 100644 index f77aff13f8e3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "sh_css_frac.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif -#include "isp.h" -#include "ia_css_ob2.host.h" - -const struct ia_css_ob2_config default_ob2_config = { - 0, - 0, - 0, - 0 -}; - -void -ia_css_ob2_encode( - struct sh_css_isp_ob2_params *to, - const struct ia_css_ob2_config *from, - unsigned size) -{ - (void)size; - - /* Blacklevels types are u0_16 */ - to->blacklevel_gr = uDIGIT_FITTING(from->level_gr, 16, SH_CSS_BAYER_BITS); - to->blacklevel_r = uDIGIT_FITTING(from->level_r, 16, SH_CSS_BAYER_BITS); - to->blacklevel_b = uDIGIT_FITTING(from->level_b, 16, SH_CSS_BAYER_BITS); - to->blacklevel_gb = uDIGIT_FITTING(from->level_gb, 16, SH_CSS_BAYER_BITS); -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_ob2_dump( - const struct sh_css_isp_ob2_params *ob2, - unsigned level) -{ - if (!ob2) - return; - - ia_css_debug_dtrace(level, "Optical Black 2:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob2_blacklevel_gr", ob2->blacklevel_gr); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob2_blacklevel_r", ob2->blacklevel_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob2_blacklevel_b", ob2->blacklevel_b); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob2_blacklevel_gb", ob2->blacklevel_gb); - -} - - -void -ia_css_ob2_debug_dtrace( - const struct ia_css_ob2_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.level_gr=%d, config.level_r=%d, " - "config.level_b=%d, config.level_gb=%d, ", - config->level_gr, config->level_r, - config->level_b, config->level_gb); -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h deleted file mode 100644 index 06846502eca3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OB2_HOST_H -#define __IA_CSS_OB2_HOST_H - -#include "ia_css_ob2_types.h" -#include "ia_css_ob2_param.h" - -extern const struct ia_css_ob2_config default_ob2_config; - -void -ia_css_ob2_encode( - struct sh_css_isp_ob2_params *to, - const struct ia_css_ob2_config *from, - unsigned size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_ob2_dump( - const struct sh_css_isp_ob2_params *ob2, - unsigned level); - -void -ia_css_ob2_debug_dtrace( - const struct ia_css_ob2_config *config, unsigned level); -#endif - -#endif /* __IA_CSS_OB2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h deleted file mode 100644 index 5c21d6a3911b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OB2_PARAM_H -#define __IA_CSS_OB2_PARAM_H - -#include "type_support.h" - - -/* OB2 (Optical Black) */ -struct sh_css_isp_ob2_params { - int32_t blacklevel_gr; - int32_t blacklevel_r; - int32_t blacklevel_b; - int32_t blacklevel_gb; -}; - -#endif /* __IA_CSS_OB2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h deleted file mode 100644 index d981394c1c11..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OB2_TYPES_H -#define __IA_CSS_OB2_TYPES_H - -/* @file -* CSS-API header file for Optical Black algorithm parameters. -*/ - -/* Optical Black configuration - * - * ISP2.6.1: OB2 is used. - */ - -#include "ia_css_frac.h" - -struct ia_css_ob2_config { - ia_css_u0_16 level_gr; /** Black level for GR pixels. - u0.16, [0,65535], - default/ineffective 0 */ - ia_css_u0_16 level_r; /** Black level for R pixels. - u0.16, [0,65535], - default/ineffective 0 */ - ia_css_u0_16 level_b; /** Black level for B pixels. - u0.16, [0,65535], - default/ineffective 0 */ - ia_css_u0_16 level_gb; /** Black level for GB pixels. - u0.16, [0,65535], - default/ineffective 0 */ -}; - -#endif /* __IA_CSS_OB2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c deleted file mode 100644 index fd891ac092ed..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "isp.h" - -#include "ia_css_ob.host.h" - -const struct ia_css_ob_config default_ob_config = { - IA_CSS_OB_MODE_NONE, - 0, - 0, - 0, - 0, - 0, - 0 -}; - -/* TODO: include ob.isp.h to get isp knowledge and - add assert on platform restrictions */ - -void -ia_css_ob_configure( - struct sh_css_isp_ob_stream_config *config, - unsigned int isp_pipe_version, - unsigned int raw_bit_depth) -{ - config->isp_pipe_version = isp_pipe_version; - config->raw_bit_depth = raw_bit_depth; -} - -void -ia_css_ob_encode( - struct sh_css_isp_ob_params *to, - const struct ia_css_ob_config *from, - const struct sh_css_isp_ob_stream_config *config, - unsigned size) -{ - unsigned int ob_bit_depth - = config->isp_pipe_version == 2 ? SH_CSS_BAYER_BITS : config->raw_bit_depth; - unsigned int scale = 16 - ob_bit_depth; - - (void)size; - switch (from->mode) { - case IA_CSS_OB_MODE_FIXED: - to->blacklevel_gr = from->level_gr >> scale; - to->blacklevel_r = from->level_r >> scale; - to->blacklevel_b = from->level_b >> scale; - to->blacklevel_gb = from->level_gb >> scale; - to->area_start_bq = 0; - to->area_length_bq = 0; - to->area_length_bq_inverse = 0; - break; - case IA_CSS_OB_MODE_RASTER: - to->blacklevel_gr = 0; - to->blacklevel_r = 0; - to->blacklevel_b = 0; - to->blacklevel_gb = 0; - to->area_start_bq = from->start_position; - to->area_length_bq = - (from->end_position - from->start_position) + 1; - to->area_length_bq_inverse = AREA_LENGTH_UNIT / to->area_length_bq; - break; - default: - to->blacklevel_gr = 0; - to->blacklevel_r = 0; - to->blacklevel_b = 0; - to->blacklevel_gb = 0; - to->area_start_bq = 0; - to->area_length_bq = 0; - to->area_length_bq_inverse = 0; - break; - } -} - -void -ia_css_ob_vmem_encode( - struct sh_css_isp_ob_vmem_params *to, - const struct ia_css_ob_config *from, - const struct sh_css_isp_ob_stream_config *config, - unsigned size) -{ - struct sh_css_isp_ob_params tmp; - struct sh_css_isp_ob_params *ob = &tmp; - - (void)size; - ia_css_ob_encode(&tmp, from, config, sizeof(tmp)); - - { - unsigned i; - unsigned sp_obarea_start_bq = ob->area_start_bq; - unsigned sp_obarea_length_bq = ob->area_length_bq; - unsigned low = sp_obarea_start_bq; - unsigned high = low + sp_obarea_length_bq; - uint16_t all_ones = ~0; - - for (i = 0; i < OBAREA_MASK_SIZE; i++) { - if (i >= low && i < high) - to->vmask[i/ISP_VEC_NELEMS][i%ISP_VEC_NELEMS] = all_ones; - else - to->vmask[i/ISP_VEC_NELEMS][i%ISP_VEC_NELEMS] = 0; - } - } -} - -void -ia_css_ob_dump( - const struct sh_css_isp_ob_params *ob, - unsigned level) -{ - if (!ob) return; - ia_css_debug_dtrace(level, "Optical Black:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob_blacklevel_gr", ob->blacklevel_gr); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob_blacklevel_r", ob->blacklevel_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob_blacklevel_b", ob->blacklevel_b); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ob_blacklevel_gb", ob->blacklevel_gb); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "obarea_start_bq", ob->area_start_bq); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "obarea_length_bq", ob->area_length_bq); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "obarea_length_bq_inverse", - ob->area_length_bq_inverse); -} - - -void -ia_css_ob_debug_dtrace( - const struct ia_css_ob_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.mode=%d, " - "config.level_gr=%d, config.level_r=%d, " - "config.level_b=%d, config.level_gb=%d, " - "config.start_position=%d, config.end_position=%d\n", - config->mode, - config->level_gr, config->level_r, - config->level_b, config->level_gb, - config->start_position, config->end_position); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h deleted file mode 100644 index 4af181470f8d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OB_HOST_H -#define __IA_CSS_OB_HOST_H - -#include "ia_css_ob_types.h" -#include "ia_css_ob_param.h" - -extern const struct ia_css_ob_config default_ob_config; - -void -ia_css_ob_configure( - struct sh_css_isp_ob_stream_config *config, - unsigned int isp_pipe_version, - unsigned int raw_bit_depth); - -void -ia_css_ob_encode( - struct sh_css_isp_ob_params *to, - const struct ia_css_ob_config *from, - const struct sh_css_isp_ob_stream_config *config, - unsigned size); - -void -ia_css_ob_vmem_encode( - struct sh_css_isp_ob_vmem_params *to, - const struct ia_css_ob_config *from, - const struct sh_css_isp_ob_stream_config *config, - unsigned size); - -void -ia_css_ob_dump( - const struct sh_css_isp_ob_params *ob, - unsigned level); - -void -ia_css_ob_debug_dtrace( - const struct ia_css_ob_config *config, unsigned level) -; - -#endif /* __IA_CSS_OB_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h deleted file mode 100644 index a60a644bb4ff..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OB_PARAM_H -#define __IA_CSS_OB_PARAM_H - -#include "type_support.h" -#include "vmem.h" - -#define OBAREA_MASK_SIZE 64 -#define OBAREA_LENGTHBQ_INVERSE_SHIFT 12 - -/* AREA_LENGTH_UNIT is dependent on NWAY, requires rewrite */ -#define AREA_LENGTH_UNIT (1<<12) - - -/* OB (Optical Black) */ -struct sh_css_isp_ob_stream_config { - unsigned isp_pipe_version; - unsigned raw_bit_depth; -}; - -struct sh_css_isp_ob_params { - int32_t blacklevel_gr; - int32_t blacklevel_r; - int32_t blacklevel_b; - int32_t blacklevel_gb; - int32_t area_start_bq; - int32_t area_length_bq; - int32_t area_length_bq_inverse; -}; - -struct sh_css_isp_ob_vmem_params { - VMEM_ARRAY(vmask, OBAREA_MASK_SIZE); -}; - -#endif /* __IA_CSS_OB_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h deleted file mode 100644 index a9717b8f44ac..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OB_TYPES_H -#define __IA_CSS_OB_TYPES_H - -/* @file -* CSS-API header file for Optical Black level parameters. -*/ - -#include "ia_css_frac.h" - -/* Optical black mode. - */ -enum ia_css_ob_mode { - IA_CSS_OB_MODE_NONE, /** OB has no effect. */ - IA_CSS_OB_MODE_FIXED, /** Fixed OB */ - IA_CSS_OB_MODE_RASTER /** Raster OB */ -}; - -/* Optical Black level configuration. - * - * ISP block: OB1 - * ISP1: OB1 is used. - * ISP2: OB1 is used. - */ -struct ia_css_ob_config { - enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster). - enum, [0,2], - default 1, ineffective 0 */ - ia_css_u0_16 level_gr; /** Black level for GR pixels - (used for Fixed Mode only). - u0.16, [0,65535], - default/ineffective 0 */ - ia_css_u0_16 level_r; /** Black level for R pixels - (used for Fixed Mode only). - u0.16, [0,65535], - default/ineffective 0 */ - ia_css_u0_16 level_b; /** Black level for B pixels - (used for Fixed Mode only). - u0.16, [0,65535], - default/ineffective 0 */ - ia_css_u0_16 level_gb; /** Black level for GB pixels - (used for Fixed Mode only). - u0.16, [0,65535], - default/ineffective 0 */ - uint16_t start_position; /** Start position of OB area - (used for Raster Mode only). - u16.0, [0,63], - default/ineffective 0 */ - uint16_t end_position; /** End position of OB area - (used for Raster Mode only). - u16.0, [0,63], - default/ineffective 0 */ -}; - -#endif /* __IA_CSS_OB_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c deleted file mode 100644 index 9efe5e5e4e06..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_frame.h" -#include "ia_css_debug.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "ia_css_output.host.h" -#include "isp.h" - -#include "assert_support.h" - -const struct ia_css_output_config default_output_config = { - 0, - 0 -}; - -static const struct ia_css_output_configuration default_output_configuration = { - .info = (struct ia_css_frame_info *)NULL, -}; - -static const struct ia_css_output0_configuration default_output0_configuration = { - .info = (struct ia_css_frame_info *)NULL, -}; - -static const struct ia_css_output1_configuration default_output1_configuration = { - .info = (struct ia_css_frame_info *)NULL, -}; - -void -ia_css_output_encode( - struct sh_css_isp_output_params *to, - const struct ia_css_output_config *from, - unsigned size) -{ - (void)size; - to->enable_hflip = from->enable_hflip; - to->enable_vflip = from->enable_vflip; -} - -void -ia_css_output_config( - struct sh_css_isp_output_isp_config *to, - const struct ia_css_output_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - - (void)size; - ia_css_dma_configure_from_info(&to->port_b, from->info); - to->width_a_over_b = elems_a / to->port_b.elems; - to->height = from->info ? from->info->res.height : 0; - to->enable = from->info != NULL; - ia_css_frame_info_to_frame_sp_info(&to->info, from->info); - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->port_b.elems == 0); -} - -void -ia_css_output0_config( - struct sh_css_isp_output_isp_config *to, - const struct ia_css_output0_configuration *from, - unsigned size) -{ - ia_css_output_config ( - to, (const struct ia_css_output_configuration *)from, size); -} - -void -ia_css_output1_config( - struct sh_css_isp_output_isp_config *to, - const struct ia_css_output1_configuration *from, - unsigned size) -{ - ia_css_output_config ( - to, (const struct ia_css_output_configuration *)from, size); -} - -void -ia_css_output_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - if (NULL != info) { - struct ia_css_output_configuration config = - default_output_configuration; - - config.info = info; - - ia_css_configure_output(binary, &config); - } -} - -void -ia_css_output0_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - if (NULL != info) { - struct ia_css_output0_configuration config = - default_output0_configuration; - - config.info = info; - - ia_css_configure_output0(binary, &config); - } -} - -void -ia_css_output1_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - - if (NULL != info) { - struct ia_css_output1_configuration config = - default_output1_configuration; - - config.info = info; - - ia_css_configure_output1(binary, &config); - } -} - -void -ia_css_output_dump( - const struct sh_css_isp_output_params *output, - unsigned level) -{ - if (!output) return; - ia_css_debug_dtrace(level, "Horizontal Output Flip:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "enable", output->enable_hflip); - ia_css_debug_dtrace(level, "Vertical Output Flip:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "enable", output->enable_vflip); -} - -void -ia_css_output_debug_dtrace( - const struct ia_css_output_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.enable_hflip=%d", - config->enable_hflip); - ia_css_debug_dtrace(level, - "config.enable_vflip=%d", - config->enable_vflip); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h deleted file mode 100644 index 530f934ce81e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OUTPUT_HOST_H -#define __IA_CSS_OUTPUT_HOST_H - -#include "ia_css_frame_public.h" -#include "ia_css_binary.h" - -#include "ia_css_output_types.h" -#include "ia_css_output_param.h" - -extern const struct ia_css_output_config default_output_config; - -void -ia_css_output_encode( - struct sh_css_isp_output_params *to, - const struct ia_css_output_config *from, - unsigned size); - -void -ia_css_output_config( - struct sh_css_isp_output_isp_config *to, - const struct ia_css_output_configuration *from, - unsigned size); - -void -ia_css_output0_config( - struct sh_css_isp_output_isp_config *to, - const struct ia_css_output0_configuration *from, - unsigned size); - -void -ia_css_output1_config( - struct sh_css_isp_output_isp_config *to, - const struct ia_css_output1_configuration *from, - unsigned size); - -void -ia_css_output_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -void -ia_css_output0_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -void -ia_css_output1_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -void -ia_css_output_dump( - const struct sh_css_isp_output_params *output, - unsigned level); - -void -ia_css_output_debug_dtrace( - const struct ia_css_output_config *config, - unsigned level); - -#endif /* __IA_CSS_OUTPUT_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h deleted file mode 100644 index eb7defa41145..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OUTPUT_PARAM_H -#define __IA_CSS_OUTPUT_PARAM_H - -#include -#include "dma.h" -#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */ - -/* output frame */ -struct sh_css_isp_output_isp_config { - uint32_t width_a_over_b; - uint32_t height; - uint32_t enable; - struct ia_css_frame_sp_info info; - struct dma_port_config port_b; -}; - -struct sh_css_isp_output_params { - uint8_t enable_hflip; - uint8_t enable_vflip; -}; - -#endif /* __IA_CSS_OUTPUT_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h deleted file mode 100644 index 9c7342fb8145..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_OUTPUT_TYPES_H -#define __IA_CSS_OUTPUT_TYPES_H - -/* @file -* CSS-API header file for parameters of output frames. -*/ - -/* Output frame - * - * ISP block: output frame - */ - -//#include "ia_css_frame_public.h" -struct ia_css_frame_info; - -struct ia_css_output_configuration { - const struct ia_css_frame_info *info; -}; - -struct ia_css_output0_configuration { - const struct ia_css_frame_info *info; -}; - -struct ia_css_output1_configuration { - const struct ia_css_frame_info *info; -}; - -struct ia_css_output_config { - uint8_t enable_hflip; /** enable horizontal output mirroring */ - uint8_t enable_vflip; /** enable vertical output mirroring */ -}; - -#endif /* __IA_CSS_OUTPUT_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c deleted file mode 100644 index d1fb4b116003..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_frame.h" -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "assert_support.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" - -#include "ia_css_qplane.host.h" - -static const struct ia_css_qplane_configuration default_config = { - .pipe = (struct sh_css_sp_pipeline *)NULL, -}; - -void -ia_css_qplane_config( - struct sh_css_isp_qplane_isp_config *to, - const struct ia_css_qplane_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - - (void)size; - ia_css_dma_configure_from_info(&to->port_b, from->info); - to->width_a_over_b = elems_a / to->port_b.elems; - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->port_b.elems == 0); - - to->inout_port_config = from->pipe->inout_port_config; - to->format = from->info->format; -} - -void -ia_css_qplane_configure( - const struct sh_css_sp_pipeline *pipe, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *info) -{ - struct ia_css_qplane_configuration config = default_config; - - config.pipe = pipe; - config.info = info; - - ia_css_configure_qplane(binary, &config); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h deleted file mode 100644 index c41e9e5e0fd7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_QPLANE_HOST_H -#define __IA_CSS_QPLANE_HOST_H - -#include -#include - -#if 0 -/* Cannot be included, since sh_css_internal.h is too generic - * e.g. for FW generation. -*/ -#include "sh_css_internal.h" /* sh_css_sp_pipeline */ -#endif - -#include "ia_css_qplane_types.h" -#include "ia_css_qplane_param.h" - -void -ia_css_qplane_config( - struct sh_css_isp_qplane_isp_config *to, - const struct ia_css_qplane_configuration *from, - unsigned size); - -void -ia_css_qplane_configure( - const struct sh_css_sp_pipeline *pipe, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *from); - -#endif /* __IA_CSS_QPLANE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h deleted file mode 100644 index 5885f621de88..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_QPLANE_PARAM_H -#define __IA_CSS_QPLANE_PARAM_H - -#include -#include "dma.h" - -/* qplane channel */ -struct sh_css_isp_qplane_isp_config { - uint32_t width_a_over_b; - struct dma_port_config port_b; - uint32_t inout_port_config; - uint32_t input_needs_raw_binning; - uint32_t format; /* enum ia_css_frame_format */ -}; - -#endif /* __IA_CSS_QPLANE_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h deleted file mode 100644 index 62d371841619..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_QPLANE_TYPES_H -#define __IA_CSS_QPLANE_TYPES_H - -#include -#include "sh_css_internal.h" - -/* qplane frame - * - * ISP block: qplane frame - */ - - -struct ia_css_qplane_configuration { - const struct sh_css_sp_pipeline *pipe; - const struct ia_css_frame_info *info; -}; - -#endif /* __IA_CSS_QPLANE_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c deleted file mode 100644 index fa9ce0fedf23..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_frame.h" -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "assert_support.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" -#include "isp/modes/interface/isp_types.h" - -#include "ia_css_raw.host.h" - - -static const struct ia_css_raw_configuration default_config = { - .pipe = (struct sh_css_sp_pipeline *)NULL, -}; - -static inline unsigned -sh_css_elems_bytes_from_info (unsigned raw_bit_depth) -{ - return CEIL_DIV(raw_bit_depth,8); -} - -/* MW: These areMIPI / ISYS properties, not camera function properties */ -static enum sh_stream_format -css2isp_stream_format(enum atomisp_input_format from) -{ - switch (from) { - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - return sh_stream_format_yuv420_legacy; - case ATOMISP_INPUT_FORMAT_YUV420_8: - case ATOMISP_INPUT_FORMAT_YUV420_10: - case ATOMISP_INPUT_FORMAT_YUV420_16: - return sh_stream_format_yuv420; - case ATOMISP_INPUT_FORMAT_YUV422_8: - case ATOMISP_INPUT_FORMAT_YUV422_10: - case ATOMISP_INPUT_FORMAT_YUV422_16: - return sh_stream_format_yuv422; - case ATOMISP_INPUT_FORMAT_RGB_444: - case ATOMISP_INPUT_FORMAT_RGB_555: - case ATOMISP_INPUT_FORMAT_RGB_565: - case ATOMISP_INPUT_FORMAT_RGB_666: - case ATOMISP_INPUT_FORMAT_RGB_888: - return sh_stream_format_rgb; - case ATOMISP_INPUT_FORMAT_RAW_6: - case ATOMISP_INPUT_FORMAT_RAW_7: - case ATOMISP_INPUT_FORMAT_RAW_8: - case ATOMISP_INPUT_FORMAT_RAW_10: - case ATOMISP_INPUT_FORMAT_RAW_12: - case ATOMISP_INPUT_FORMAT_RAW_14: - case ATOMISP_INPUT_FORMAT_RAW_16: - return sh_stream_format_raw; - case ATOMISP_INPUT_FORMAT_BINARY_8: - default: - return sh_stream_format_raw; - } -} - -void -ia_css_raw_config( - struct sh_css_isp_raw_isp_config *to, - const struct ia_css_raw_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - const struct ia_css_frame_info *in_info = from->in_info; - const struct ia_css_frame_info *internal_info = from->internal_info; - - (void)size; -#if !defined(USE_INPUT_SYSTEM_VERSION_2401) - /* 2401 input system uses input width width */ - in_info = internal_info; -#else - /*in some cases, in_info is NULL*/ - if (in_info) - (void)internal_info; - else - in_info = internal_info; - -#endif - ia_css_dma_configure_from_info(&to->port_b, in_info); - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert((in_info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) || - (elems_a % to->port_b.elems == 0)); - - to->width_a_over_b = elems_a / to->port_b.elems; - to->inout_port_config = from->pipe->inout_port_config; - to->format = in_info->format; - to->required_bds_factor = from->pipe->required_bds_factor; - to->two_ppc = from->two_ppc; - to->stream_format = css2isp_stream_format(from->stream_format); - to->deinterleaved = from->deinterleaved; -#if (defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(CONFIG_CSI2_PLUS)) - to->start_column = in_info->crop_info.start_column; - to->start_line = in_info->crop_info.start_line; - to->enable_left_padding = from->enable_left_padding; -#endif -} - -void -ia_css_raw_configure( - const struct sh_css_sp_pipeline *pipe, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *in_info, - const struct ia_css_frame_info *internal_info, - bool two_ppc, - bool deinterleaved) -{ - uint8_t enable_left_padding = (uint8_t)((binary->left_padding) ? 1 : 0); - struct ia_css_raw_configuration config = default_config; - - config.pipe = pipe; - config.in_info = in_info; - config.internal_info = internal_info; - config.two_ppc = two_ppc; - config.stream_format = binary->input_format; - config.deinterleaved = deinterleaved; - config.enable_left_padding = enable_left_padding; - - ia_css_configure_raw(binary, &config); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h deleted file mode 100644 index ac6b7f6b59c6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_RAW_HOST_H -#define __IA_CSS_RAW_HOST_H - -#include "ia_css_binary.h" - -#include "ia_css_raw_types.h" -#include "ia_css_raw_param.h" - -void -ia_css_raw_config( - struct sh_css_isp_raw_isp_config *to, - const struct ia_css_raw_configuration *from, - unsigned size); - -void -ia_css_raw_configure( - const struct sh_css_sp_pipeline *pipe, - const struct ia_css_binary *binary, - const struct ia_css_frame_info *in_info, - const struct ia_css_frame_info *internal_info, - bool two_ppc, - bool deinterleaved); - -#endif /* __IA_CSS_RAW_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h deleted file mode 100644 index 12168b2dec2d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_RAW_PARAM_H -#define __IA_CSS_RAW_PARAM_H - -#include "type_support.h" - -#include "dma.h" - -/* Raw channel */ -struct sh_css_isp_raw_isp_config { - uint32_t width_a_over_b; - struct dma_port_config port_b; - uint32_t inout_port_config; - uint32_t input_needs_raw_binning; - uint32_t format; /* enum ia_css_frame_format */ - uint32_t required_bds_factor; - uint32_t two_ppc; - uint32_t stream_format; /* enum sh_stream_format */ - uint32_t deinterleaved; - uint32_t start_column; /*left crop offset*/ - uint32_t start_line; /*top crop offset*/ - uint8_t enable_left_padding; /*need this for multiple binary case*/ -}; - -#endif /* __IA_CSS_RAW_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h deleted file mode 100644 index ae868eb5e10f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_RAW_TYPES_H -#define __IA_CSS_RAW_TYPES_H - -#include -#include "sh_css_internal.h" - -/* Raw frame - * - * ISP block: Raw frame - */ - -struct ia_css_raw_configuration { - const struct sh_css_sp_pipeline *pipe; - const struct ia_css_frame_info *in_info; - const struct ia_css_frame_info *internal_info; - bool two_ppc; - enum atomisp_input_format stream_format; - bool deinterleaved; - uint8_t enable_left_padding; -}; - -#endif /* __IA_CSS_RAW_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c deleted file mode 100644 index 92168211683d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#if !defined(HAS_NO_HMEM) - -#include "memory_access.h" -#include "ia_css_types.h" -#include "sh_css_internal.h" -#include "sh_css_frac.h" - -#include "ia_css_raa.host.h" - -void -ia_css_raa_encode( - struct sh_css_isp_aa_params *to, - const struct ia_css_aa_config *from, - unsigned size) -{ - (void)size; - (void)to; - (void)from; -} - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h deleted file mode 100644 index b4f245c19f18..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_RAA_HOST_H -#define __IA_CSS_RAA_HOST_H - -#include "aa/aa_2/ia_css_aa2_types.h" -#include "aa/aa_2/ia_css_aa2_param.h" - -void -ia_css_raa_encode( - struct sh_css_isp_aa_params *to, - const struct ia_css_aa_config *from, - unsigned size); - -#endif /* __IA_CSS_RAA_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c deleted file mode 100644 index 4c0ed5d4d971..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" -#include "ia_css_ref.host.h" - -void -ia_css_ref_config( - struct sh_css_isp_ref_isp_config *to, - const struct ia_css_ref_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS, i; - - (void)size; - ia_css_dma_configure_from_info(&to->port_b, &(from->ref_frames[0]->info)); - to->width_a_over_b = elems_a / to->port_b.elems; - to->dvs_frame_delay = from->dvs_frame_delay; - for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) { - if (from->ref_frames[i]) { - to->ref_frame_addr_y[i] = from->ref_frames[i]->data + from->ref_frames[i]->planes.yuv.y.offset; - to->ref_frame_addr_c[i] = from->ref_frames[i]->data + from->ref_frames[i]->planes.yuv.u.offset; - } else { - to->ref_frame_addr_y[i] = 0; - to->ref_frame_addr_c[i] = 0; - } - } - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->port_b.elems == 0); -} - -void -ia_css_ref_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame **ref_frames, - const uint32_t dvs_frame_delay) -{ - struct ia_css_ref_configuration config; - unsigned i; - - for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) - config.ref_frames[i] = ref_frames[i]; - config.dvs_frame_delay = dvs_frame_delay; - ia_css_configure_ref(binary, &config); -} - -void -ia_css_init_ref_state( - struct sh_css_isp_ref_dmem_state *state, - unsigned size) -{ - (void)size; - assert(MAX_NUM_VIDEO_DELAY_FRAMES >= 2); - state->ref_in_buf_idx = 0; - state->ref_out_buf_idx = 1; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h deleted file mode 100644 index 3c6d728d49ec..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_REF_HOST_H -#define __IA_CSS_REF_HOST_H - -#include -#include - -#include "ia_css_ref_types.h" -#include "ia_css_ref_param.h" -#include "ia_css_ref_state.h" - -void -ia_css_ref_config( - struct sh_css_isp_ref_isp_config *to, - const struct ia_css_ref_configuration *from, - unsigned size); - -void -ia_css_ref_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame **ref_frames, - const uint32_t dvs_frame_delay); - -void -ia_css_init_ref_state( - struct sh_css_isp_ref_dmem_state *state, - unsigned size); -#endif /* __IA_CSS_REF_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h deleted file mode 100644 index 026443b999a6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_REF_PARAM_H -#define __IA_CSS_REF_PARAM_H - -#include -#include "sh_css_defs.h" -#include "dma.h" - -/* Reference frame */ -struct ia_css_ref_configuration { - const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; - uint32_t dvs_frame_delay; -}; - -struct sh_css_isp_ref_isp_config { - uint32_t width_a_over_b; - struct dma_port_config port_b; - hrt_vaddress ref_frame_addr_y[MAX_NUM_VIDEO_DELAY_FRAMES]; - hrt_vaddress ref_frame_addr_c[MAX_NUM_VIDEO_DELAY_FRAMES]; - uint32_t dvs_frame_delay; -}; - -#endif /* __IA_CSS_REF_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h deleted file mode 100644 index 7867be8a7958..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_REF_STATE_H -#define __IA_CSS_REF_STATE_H - -#include "type_support.h" - -/* REF (temporal noise reduction) */ -struct sh_css_isp_ref_dmem_state { - int32_t ref_in_buf_idx; - int32_t ref_out_buf_idx; -}; - -#endif /* __IA_CSS_REF_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h deleted file mode 100644 index 4750fba268b9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_REF_TYPES_H -#define __IA_CSS_REF_TYPES_H - -/* Reference frame - * - * ISP block: reference frame - */ - -#include - - - -#endif /* __IA_CSS_REF_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c deleted file mode 100644 index aa733674f42b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c +++ /dev/null @@ -1,386 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif -#include "sh_css_frac.h" -#include "assert_support.h" - -#include "bh/bh_2/ia_css_bh.host.h" -#include "ia_css_s3a.host.h" - -const struct ia_css_3a_config default_3a_config = { - 25559, - 32768, - 7209, - 65535, - 0, - 65535, - {-3344, -6104, -19143, 19143, 6104, 3344, 0}, - {1027, 0, -9219, 16384, -9219, 1027, 0} -}; - -static unsigned int s3a_raw_bit_depth; - -void -ia_css_s3a_configure(unsigned int raw_bit_depth) -{ - s3a_raw_bit_depth = raw_bit_depth; -} - -static void -ia_css_ae_encode( - struct sh_css_isp_ae_params *to, - const struct ia_css_3a_config *from, - unsigned size) -{ - (void)size; - /* coefficients to calculate Y */ - to->y_coef_r = - uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT); - to->y_coef_g = - uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT); - to->y_coef_b = - uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT); -} - -static void -ia_css_awb_encode( - struct sh_css_isp_awb_params *to, - const struct ia_css_3a_config *from, - unsigned size) -{ - (void)size; - /* AWB level gate */ - to->lg_high_raw = - uDIGIT_FITTING(from->awb_lg_high_raw, 16, s3a_raw_bit_depth); - to->lg_low = - uDIGIT_FITTING(from->awb_lg_low, 16, SH_CSS_BAYER_BITS); - to->lg_high = - uDIGIT_FITTING(from->awb_lg_high, 16, SH_CSS_BAYER_BITS); -} - -static void -ia_css_af_encode( - struct sh_css_isp_af_params *to, - const struct ia_css_3a_config *from, - unsigned size) -{ - unsigned int i; - (void)size; - - /* af fir coefficients */ - for (i = 0; i < 7; ++i) { - to->fir1[i] = - sDIGIT_FITTING(from->af_fir1_coef[i], 15, - SH_CSS_AF_FIR_SHIFT); - to->fir2[i] = - sDIGIT_FITTING(from->af_fir2_coef[i], 15, - SH_CSS_AF_FIR_SHIFT); - } -} - -void -ia_css_s3a_encode( - struct sh_css_isp_s3a_params *to, - const struct ia_css_3a_config *from, - unsigned size) -{ - (void)size; - - ia_css_ae_encode(&to->ae, from, sizeof(to->ae)); - ia_css_awb_encode(&to->awb, from, sizeof(to->awb)); - ia_css_af_encode(&to->af, from, sizeof(to->af)); -} - -#if 0 -void -ia_css_process_s3a( - unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params) -{ - short dmem_offset = stage->binary->info->mem_offsets->dmem.s3a; - - assert(params != NULL); - - if (dmem_offset >= 0) { - ia_css_s3a_encode((struct sh_css_isp_s3a_params *) - &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset], - ¶ms->s3a_config); - ia_css_bh_encode((struct sh_css_isp_bh_params *) - &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset], - ¶ms->s3a_config); - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM0] = true; - } - - params->isp_params_changed = true; -} -#endif - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_ae_dump( - const struct sh_css_isp_ae_params *ae, - unsigned level) -{ - if (!ae) return; - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ae_y_coef_r", ae->y_coef_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ae_y_coef_g", ae->y_coef_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ae_y_coef_b", ae->y_coef_b); -} - -void -ia_css_awb_dump( - const struct sh_css_isp_awb_params *awb, - unsigned level) -{ - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "awb_lg_high_raw", awb->lg_high_raw); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "awb_lg_low", awb->lg_low); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "awb_lg_high", awb->lg_high); -} - -void -ia_css_af_dump( - const struct sh_css_isp_af_params *af, - unsigned level) -{ - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[0]", af->fir1[0]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[1]", af->fir1[1]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[2]", af->fir1[2]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[3]", af->fir1[3]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[4]", af->fir1[4]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[5]", af->fir1[5]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir1[6]", af->fir1[6]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[0]", af->fir2[0]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[1]", af->fir2[1]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[2]", af->fir2[2]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[3]", af->fir2[3]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[4]", af->fir2[4]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[5]", af->fir2[5]); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "af_fir2[6]", af->fir2[6]); -} - -void -ia_css_s3a_dump( - const struct sh_css_isp_s3a_params *s3a, - unsigned level) -{ - ia_css_debug_dtrace(level, "S3A Support:\n"); - ia_css_ae_dump (&s3a->ae, level); - ia_css_awb_dump (&s3a->awb, level); - ia_css_af_dump (&s3a->af, level); -} - -void -ia_css_s3a_debug_dtrace( - const struct ia_css_3a_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.ae_y_coef_r=%d, config.ae_y_coef_g=%d, " - "config.ae_y_coef_b=%d, config.awb_lg_high_raw=%d, " - "config.awb_lg_low=%d, config.awb_lg_high=%d\n", - config->ae_y_coef_r, config->ae_y_coef_g, - config->ae_y_coef_b, config->awb_lg_high_raw, - config->awb_lg_low, config->awb_lg_high); -} -#endif - -void -ia_css_s3a_hmem_decode( - struct ia_css_3a_statistics *host_stats, - const struct ia_css_bh_table *hmem_buf) -{ -#if defined(HAS_NO_HMEM) - (void)host_stats; - (void)hmem_buf; -#else - struct ia_css_3a_rgby_output *out_ptr; - int i; - - /* pixel counts(BQ) for 3A area */ - int count_for_3a; - int sum_r, diff; - - assert(host_stats != NULL); - assert(host_stats->rgby_data != NULL); - assert(hmem_buf != NULL); - - count_for_3a = host_stats->grid.width * host_stats->grid.height - * host_stats->grid.bqs_per_grid_cell - * host_stats->grid.bqs_per_grid_cell; - - out_ptr = host_stats->rgby_data; - - ia_css_bh_hmem_decode(out_ptr, hmem_buf); - - /* Calculate sum of histogram of R, - which should not be less than count_for_3a */ - sum_r = 0; - for (i = 0; i < HMEM_UNIT_SIZE; i++) { - sum_r += out_ptr[i].r; - } - if (sum_r < count_for_3a) { - /* histogram is invalid */ - return; - } - - /* Verify for sum of histogram of R/G/B/Y */ -#if 0 - { - int sum_g = 0; - int sum_b = 0; - int sum_y = 0; - for (i = 0; i < HMEM_UNIT_SIZE; i++) { - sum_g += out_ptr[i].g; - sum_b += out_ptr[i].b; - sum_y += out_ptr[i].y; - } - if (sum_g != sum_r || sum_b != sum_r || sum_y != sum_r) { - /* histogram is invalid */ - return; - } - } -#endif - - /* - * Limit the histogram area only to 3A area. - * In DSP, the histogram of 0 is incremented for pixels - * which are outside of 3A area. That amount should be subtracted here. - * hist[0] = hist[0] - ((sum of all hist[]) - (pixel count for 3A area)) - */ - diff = sum_r - count_for_3a; - out_ptr[0].r -= diff; - out_ptr[0].g -= diff; - out_ptr[0].b -= diff; - out_ptr[0].y -= diff; -#endif -} - -void -ia_css_s3a_dmem_decode( - struct ia_css_3a_statistics *host_stats, - const struct ia_css_3a_output *isp_stats) -{ - int isp_width, host_width, height, i; - struct ia_css_3a_output *host_ptr; - - assert(host_stats != NULL); - assert(host_stats->data != NULL); - assert(isp_stats != NULL); - - isp_width = host_stats->grid.aligned_width; - host_width = host_stats->grid.width; - height = host_stats->grid.height; - host_ptr = host_stats->data; - - /* Getting 3A statistics from DMEM does not involve any - * transformation (like the VMEM version), we just copy the data - * using a different output width. */ - for (i = 0; i < height; i++) { - memcpy(host_ptr, isp_stats, host_width * sizeof(*host_ptr)); - isp_stats += isp_width; - host_ptr += host_width; - } -} - -/* MW: this is an ISP function */ -static inline int -merge_hi_lo_14(unsigned short hi, unsigned short lo) -{ - int val = (int) ((((unsigned int) hi << 14) & 0xfffc000) | - ((unsigned int) lo & 0x3fff)); - return val; -} - -void -ia_css_s3a_vmem_decode( - struct ia_css_3a_statistics *host_stats, - const uint16_t *isp_stats_hi, - const uint16_t *isp_stats_lo) -{ - int out_width, out_height, chunk, rest, kmax, y, x, k, elm_start, elm, ofs; - const uint16_t *hi, *lo; - struct ia_css_3a_output *output; - - assert(host_stats!= NULL); - assert(host_stats->data != NULL); - assert(isp_stats_hi != NULL); - assert(isp_stats_lo != NULL); - - output = host_stats->data; - out_width = host_stats->grid.width; - out_height = host_stats->grid.height; - hi = isp_stats_hi; - lo = isp_stats_lo; - - chunk = ISP_VEC_NELEMS >> host_stats->grid.deci_factor_log2; - chunk = max(chunk, 1); - - for (y = 0; y < out_height; y++) { - elm_start = y * ISP_S3ATBL_HI_LO_STRIDE; - rest = out_width; - x = 0; - while (x < out_width) { - kmax = (rest > chunk) ? chunk : rest; - ofs = y * out_width + x; - elm = elm_start + x * sizeof(*output) / sizeof(int32_t); - for (k = 0; k < kmax; k++, elm++) { - output[ofs + k].ae_y = merge_hi_lo_14( - hi[elm + chunk * 0], lo[elm + chunk * 0]); - output[ofs + k].awb_cnt = merge_hi_lo_14( - hi[elm + chunk * 1], lo[elm + chunk * 1]); - output[ofs + k].awb_gr = merge_hi_lo_14( - hi[elm + chunk * 2], lo[elm + chunk * 2]); - output[ofs + k].awb_r = merge_hi_lo_14( - hi[elm + chunk * 3], lo[elm + chunk * 3]); - output[ofs + k].awb_b = merge_hi_lo_14( - hi[elm + chunk * 4], lo[elm + chunk * 4]); - output[ofs + k].awb_gb = merge_hi_lo_14( - hi[elm + chunk * 5], lo[elm + chunk * 5]); - output[ofs + k].af_hpf1 = merge_hi_lo_14( - hi[elm + chunk * 6], lo[elm + chunk * 6]); - output[ofs + k].af_hpf2 = merge_hi_lo_14( - hi[elm + chunk * 7], lo[elm + chunk * 7]); - } - x += chunk; - rest -= chunk; - } - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h deleted file mode 100644 index 4bc6c0bf478f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_S3A_HOST_H -#define __IA_CSS_S3A_HOST_H - -#include "ia_css_s3a_types.h" -#include "ia_css_s3a_param.h" -#include "bh/bh_2/ia_css_bh.host.h" - -extern const struct ia_css_3a_config default_3a_config; - -void -ia_css_s3a_configure( - unsigned int raw_bit_depth); - -void -ia_css_s3a_encode( - struct sh_css_isp_s3a_params *to, - const struct ia_css_3a_config *from, - unsigned size); - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_ae_dump( - const struct sh_css_isp_ae_params *ae, - unsigned level); - -void -ia_css_awb_dump( - const struct sh_css_isp_awb_params *awb, - unsigned level); - -void -ia_css_af_dump( - const struct sh_css_isp_af_params *af, - unsigned level); - -void -ia_css_s3a_dump( - const struct sh_css_isp_s3a_params *s3a, - unsigned level); - -void -ia_css_s3a_debug_dtrace( - const struct ia_css_3a_config *config, - unsigned level); -#endif - -void -ia_css_s3a_hmem_decode( - struct ia_css_3a_statistics *host_stats, - const struct ia_css_bh_table *hmem_buf); - -void -ia_css_s3a_dmem_decode( - struct ia_css_3a_statistics *host_stats, - const struct ia_css_3a_output *isp_stats); - -void -ia_css_s3a_vmem_decode( - struct ia_css_3a_statistics *host_stats, - const uint16_t *isp_stats_hi, - const uint16_t *isp_stats_lo); - -#endif /* __IA_CSS_S3A_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h deleted file mode 100644 index 35fb0a2c921a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_S3A_PARAM_H -#define __IA_CSS_S3A_PARAM_H - -#include "type_support.h" - -/* AE (3A Support) */ -struct sh_css_isp_ae_params { - /* coefficients to calculate Y */ - int32_t y_coef_r; - int32_t y_coef_g; - int32_t y_coef_b; -}; - -/* AWB (3A Support) */ -struct sh_css_isp_awb_params { - int32_t lg_high_raw; - int32_t lg_low; - int32_t lg_high; -}; - -/* AF (3A Support) */ -struct sh_css_isp_af_params { - int32_t fir1[7]; - int32_t fir2[7]; -}; - -/* S3A (3A Support) */ -struct sh_css_isp_s3a_params { - /* coefficients to calculate Y */ - struct sh_css_isp_ae_params ae; - - /* AWB level gate */ - struct sh_css_isp_awb_params awb; - - /* af fir coefficients */ - struct sh_css_isp_af_params af; -}; - - -#endif /* __IA_CSS_S3A_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h deleted file mode 100644 index 63e70669f085..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_S3A_TYPES_H -#define __IA_CSS_S3A_TYPES_H - -/* @file -* CSS-API header file for 3A statistics parameters. -*/ - -#include - -#if (defined(SYSTEM_css_skycam_c0_system)) && (! defined(PIPE_GENERATION) ) -#include "../../../../components/stats_3a/src/stats_3a_public.h" -#endif - -/* 3A configuration. This configures the 3A statistics collection - * module. - */ - -/* 3A statistics grid - * - * ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE)) - * S3A2 (3A Support for 3A ver.2 (Histogram is used for AE)) - * ISP1: S3A1 is used. - * ISP2: S3A2 is used. - */ -struct ia_css_3a_grid_info { - -#if defined(SYSTEM_css_skycam_c0_system) - uint32_t ae_enable; /** ae enabled in binary, - 0:disabled, 1:enabled */ - struct ae_public_config_grid_config ae_grd_info; /** see description in ae_public.h*/ - - uint32_t awb_enable; /** awb enabled in binary, - 0:disabled, 1:enabled */ - struct awb_public_config_grid_config awb_grd_info; /** see description in awb_public.h*/ - - uint32_t af_enable; /** af enabled in binary, - 0:disabled, 1:enabled */ - struct af_public_grid_config af_grd_info; /** see description in af_public.h*/ - - uint32_t awb_fr_enable; /** awb_fr enabled in binary, - 0:disabled, 1:enabled */ - struct awb_fr_public_grid_config awb_fr_grd_info;/** see description in awb_fr_public.h*/ - - uint32_t elem_bit_depth; /** TODO:Taken from BYT - need input from AIQ - if needed for SKC - Bit depth of element used - to calculate 3A statistics. - This is 13, which is the normalized - bayer bit depth in DSP. */ - -#else - uint32_t enable; /** 3A statistics enabled. - 0:disabled, 1:enabled */ - uint32_t use_dmem; /** DMEM or VMEM determines layout. - 0:3A statistics are stored to VMEM, - 1:3A statistics are stored to DMEM */ - uint32_t has_histogram; /** Statistics include histogram. - 0:no histogram, 1:has histogram */ - uint32_t width; /** Width of 3A grid table. - (= Horizontal number of grid cells - in table, which cells have effective - statistics.) */ - uint32_t height; /** Height of 3A grid table. - (= Vertical number of grid cells - in table, which cells have effective - statistics.) */ - uint32_t aligned_width; /** Horizontal stride (for alloc). - (= Horizontal number of grid cells - in table, which means - the allocated width.) */ - uint32_t aligned_height; /** Vertical stride (for alloc). - (= Vertical number of grid cells - in table, which means - the allocated height.) */ - uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit. - (1BQ means {Gr,R,B,Gb}(2x2 pixels).) - Valid values are 8,16,32,64. */ - uint32_t deci_factor_log2; /** log2 of bqs_per_grid_cell. */ - uint32_t elem_bit_depth; /** Bit depth of element used - to calculate 3A statistics. - This is 13, which is the normalized - bayer bit depth in DSP. */ -#endif -}; - - -/* This struct should be split into 3, for AE, AWB and AF. - * However, that will require driver/ 3A lib modifications. - */ - -/* 3A configuration. This configures the 3A statistics collection - * module. - * - * ae_y_*: Coefficients to calculate luminance from bayer. - * awb_lg_*: Thresholds to check the saturated bayer pixels for AWB. - * Condition of effective pixel for AWB level gate check: - * bayer(sensor) <= awb_lg_high_raw && - * bayer(when AWB statisitcs is calculated) >= awb_lg_low && - * bayer(when AWB statisitcs is calculated) <= awb_lg_high - * af_fir*: Coefficients of high pass filter to calculate AF statistics. - * - * ISP block: S3A1(ae_y_* for AE/AF, awb_lg_* for AWB) - * S3A2(ae_y_* for AF, awb_lg_* for AWB) - * SDVS1(ae_y_*) - * SDVS2(ae_y_*) - * ISP1: S3A1 and SDVS1 are used. - * ISP2: S3A2 and SDVS2 are used. - */ -struct ia_css_3a_config { - ia_css_u0_16 ae_y_coef_r; /** Weight of R for Y. - u0.16, [0,65535], - default/ineffective 25559 */ - ia_css_u0_16 ae_y_coef_g; /** Weight of G for Y. - u0.16, [0,65535], - default/ineffective 32768 */ - ia_css_u0_16 ae_y_coef_b; /** Weight of B for Y. - u0.16, [0,65535], - default/ineffective 7209 */ - ia_css_u0_16 awb_lg_high_raw; /** AWB level gate high for raw. - u0.16, [0,65535], - default 65472(=1023*64), - ineffective 65535 */ - ia_css_u0_16 awb_lg_low; /** AWB level gate low. - u0.16, [0,65535], - default 64(=1*64), - ineffective 0 */ - ia_css_u0_16 awb_lg_high; /** AWB level gate high. - u0.16, [0,65535], - default 65535, - ineffective 65535 */ - ia_css_s0_15 af_fir1_coef[7]; /** AF FIR coefficients of fir1. - s0.15, [-32768,32767], - default/ineffective - -6689,-12207,-32768,32767,12207,6689,0 */ - ia_css_s0_15 af_fir2_coef[7]; /** AF FIR coefficients of fir2. - s0.15, [-32768,32767], - default/ineffective - 2053,0,-18437,32767,-18437,2053,0 */ -}; - -/* 3A statistics. This structure describes the data stored - * in each 3A grid point. - * - * ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE) - * S3A2 (3A Support for 3A ver.2) (Histogram is used for AE) - * - ae_y is used only for S3A1. - * - awb_* and af_* are used both for S3A1 and S3A2. - * ISP1: S3A1 is used. - * ISP2: S3A2 is used. - */ -struct ia_css_3a_output { - int32_t ae_y; /** Sum of Y in a statistics window, for AE. - (u19.13) */ - int32_t awb_cnt; /** Number of effective pixels - in a statistics window. - Pixels passed by the AWB level gate check are - judged as "effective". (u32) */ - int32_t awb_gr; /** Sum of Gr in a statistics window, for AWB. - All Gr pixels (not only for effective pixels) - are summed. (u19.13) */ - int32_t awb_r; /** Sum of R in a statistics window, for AWB. - All R pixels (not only for effective pixels) - are summed. (u19.13) */ - int32_t awb_b; /** Sum of B in a statistics window, for AWB. - All B pixels (not only for effective pixels) - are summed. (u19.13) */ - int32_t awb_gb; /** Sum of Gb in a statistics window, for AWB. - All Gb pixels (not only for effective pixels) - are summed. (u19.13) */ - int32_t af_hpf1; /** Sum of |Y| following high pass filter af_fir1 - within a statistics window, for AF. (u19.13) */ - int32_t af_hpf2; /** Sum of |Y| following high pass filter af_fir2 - within a statistics window, for AF. (u19.13) */ -}; - - -/* 3A Statistics. This structure describes the statistics that are generated - * using the provided configuration (ia_css_3a_config). - */ -struct ia_css_3a_statistics { - struct ia_css_3a_grid_info grid; /** grid info contains the dimensions of the 3A grid */ - struct ia_css_3a_output *data; /** the pointer to 3a_output[grid.width * grid.height] - containing the 3A statistics */ - struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256] - containing the histogram */ -}; - -/* Histogram (Statistics for AE). - * - * 4 histograms(r,g,b,y), - * 256 bins for each histogram, unsigned 24bit value for each bin. - * struct ia_css_3a_rgby_output data[256]; - - * ISP block: HIST2 - * (ISP1: HIST2 is not used.) - * ISP2: HIST2 is used. - */ -struct ia_css_3a_rgby_output { - uint32_t r; /** Number of R of one bin of the histogram R. (u24) */ - uint32_t g; /** Number of G of one bin of the histogram G. (u24) */ - uint32_t b; /** Number of B of one bin of the histogram B. (u24) */ - uint32_t y; /** Number of Y of one bin of the histogram Y. (u24) */ -}; - -#endif /* __IA_CSS_S3A_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c deleted file mode 100644 index 565ae45b7541..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "assert_support.h" -#ifdef ISP2401 -#include "math_support.h" /* min() */ - -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#endif - -#include "ia_css_sc.host.h" - -void -ia_css_sc_encode( - struct sh_css_isp_sc_params *to, - struct ia_css_shading_table **from, - unsigned size) -{ - (void)size; - to->gain_shift = (*from)->fraction_bits; -} - -void -ia_css_sc_dump( - const struct sh_css_isp_sc_params *sc, - unsigned level) -{ - if (!sc) return; - ia_css_debug_dtrace(level, "Shading Correction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "sc_gain_shift", sc->gain_shift); -} - -#ifdef ISP2401 -void -ia_css_sc_config( - struct sh_css_isp_sc_isp_config *to, - const struct ia_css_sc_configuration *from, - unsigned size) -{ - uint32_t internal_org_x_bqs = from->internal_frame_origin_x_bqs_on_sctbl; - uint32_t internal_org_y_bqs = from->internal_frame_origin_y_bqs_on_sctbl; - uint32_t slice, rest, i; - - (void)size; - - /* The internal_frame_origin_x_bqs_on_sctbl is separated to 8 times of slice_vec. */ - rest = internal_org_x_bqs; - for (i = 0; i < SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES; i++) { - slice = min(rest, ((uint32_t)ISP_SLICE_NELEMS)); - rest = rest - slice; - to->interped_gain_hor_slice_bqs[i] = slice; - } - - to->internal_frame_origin_y_bqs_on_sctbl = internal_org_y_bqs; -} - -void -ia_css_sc_configure( - const struct ia_css_binary *binary, - uint32_t internal_frame_origin_x_bqs_on_sctbl, - uint32_t internal_frame_origin_y_bqs_on_sctbl) -{ - const struct ia_css_sc_configuration config = { - internal_frame_origin_x_bqs_on_sctbl, - internal_frame_origin_y_bqs_on_sctbl }; - - ia_css_configure_sc(binary, &config); -} - -#endif -/* ------ deprecated(bz675) : from ------ */ -/* It looks like @parameter{} (in *.pipe) is used to generate the process/get/set functions, - for parameters which should be used in the isp kernels. - However, the ia_css_shading_settings structure has a parameter which is used only in the css, - and does not have a parameter which is used in the isp kernels. - Then, I did not use @parameter{} to generate the get/set function - for the ia_css_shading_settings structure. (michie) */ -void -sh_css_get_shading_settings(const struct ia_css_isp_parameters *params, - struct ia_css_shading_settings *settings) -{ - if (settings == NULL) - return; - assert(params != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_get_shading_settings() enter: settings=%p\n", settings); - - *settings = params->shading_settings; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_get_shading_settings() leave: settings.enable_shading_table_conversion=%d\n", - settings->enable_shading_table_conversion); -} - -void -sh_css_set_shading_settings(struct ia_css_isp_parameters *params, - const struct ia_css_shading_settings *settings) -{ - if (settings == NULL) - return; - assert(params != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_set_shading_settings() enter: settings.enable_shading_table_conversion=%d\n", - settings->enable_shading_table_conversion); - - params->shading_settings = *settings; - params->shading_settings_changed = true; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_set_shading_settings() leave: return_void\n"); -} -/* ------ deprecated(bz675) : to ------ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h deleted file mode 100644 index b35ac3e4009b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SC_HOST_H -#define __IA_CSS_SC_HOST_H - -#include "sh_css_params.h" - -#include "ia_css_sc_types.h" -#include "ia_css_sc_param.h" - -void -ia_css_sc_encode( - struct sh_css_isp_sc_params *to, - struct ia_css_shading_table **from, - unsigned size); - -void -ia_css_sc_dump( - const struct sh_css_isp_sc_params *sc, - unsigned level); - -#ifdef ISP2401 -/* @brief Configure the shading correction. - * @param[out] to Parameters used in the shading correction kernel in the isp. - * @param[in] from Parameters passed from the host. - * @param[in] size Size of the sh_css_isp_sc_isp_config structure. - * - * This function passes the parameters for the shading correction from the host to the isp. - */ -void -ia_css_sc_config( - struct sh_css_isp_sc_isp_config *to, - const struct ia_css_sc_configuration *from, - unsigned size); - -/* @brief Configure the shading correction. - * @param[in] binary The binary, which has the shading correction. - * @param[in] internal_frame_origin_x_bqs_on_sctbl - * X coordinate (in bqs) of the origin of the internal frame on the shading table. - * @param[in] internal_frame_origin_y_bqs_on_sctbl - * Y coordinate (in bqs) of the origin of the internal frame on the shading table. - * - * This function calls the ia_css_configure_sc() function. - * (The ia_css_configure_sc() function is automatically generated in ia_css_isp.configs.c.) - * The ia_css_configure_sc() function calls the ia_css_sc_config() function - * to pass the parameters for the shading correction from the host to the isp. - */ -void -ia_css_sc_configure( - const struct ia_css_binary *binary, - uint32_t internal_frame_origin_x_bqs_on_sctbl, - uint32_t internal_frame_origin_y_bqs_on_sctbl); - -#endif -/* ------ deprecated(bz675) : from ------ */ -void -sh_css_get_shading_settings(const struct ia_css_isp_parameters *params, - struct ia_css_shading_settings *settings); - -void -sh_css_set_shading_settings(struct ia_css_isp_parameters *params, - const struct ia_css_shading_settings *settings); -/* ------ deprecated(bz675) : to ------ */ - -#endif /* __IA_CSS_SC_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h deleted file mode 100644 index d997d5137634..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SC_PARAM_H -#define __IA_CSS_SC_PARAM_H - -#include "type_support.h" - -#ifdef ISP2401 -/* To position the shading center grid point on the center of output image, - * one more grid cell is needed as margin. */ -#define SH_CSS_SCTBL_CENTERING_MARGIN 1 - -/* The shading table width and height are the number of grids, not cells. The last grid should be counted. */ -#define SH_CSS_SCTBL_LAST_GRID_COUNT 1 - -/* Number of horizontal grids per color in the shading table. */ -#define _ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \ - (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + \ - SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT) - -/* Number of vertical grids per color in the shading table. */ -#define _ISP_SCTBL_HEIGHT(input_height, deci_factor_log2) \ - (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + \ - SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT) - -/* Legacy API: Number of horizontal grids per color in the shading table. */ -#define _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(input_width, deci_factor_log2) \ - (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT) - -/* Legacy API: Number of vertical grids per color in the shading table. */ -#define _ISP_SCTBL_LEGACY_HEIGHT(input_height, deci_factor_log2) \ - (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT) - -#endif -/* SC (Shading Corrction) */ -struct sh_css_isp_sc_params { - int32_t gain_shift; -}; - -#ifdef ISP2401 -/* Number of horizontal slice times for interpolated gain: - * - * The start position of the internal frame does not match the start position of the shading table. - * To get a vector of shading gains (interpolated horizontally and vertically) - * which matches a vector on the internal frame, - * vec_slice is used for 2 adjacent vectors of shading gains. - * The number of shift times by vec_slice is 8. - * Max grid cell bqs to support the shading table centerting: N = 32 - * CEIL_DIV(N-1, ISP_SLICE_NELEMS) = CEIL_DIV(31, 4) = 8 - */ -#define SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES 8 - -struct sh_css_isp_sc_isp_config { - uint32_t interped_gain_hor_slice_bqs[SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES]; - uint32_t internal_frame_origin_y_bqs_on_sctbl; -}; - -#endif -#endif /* __IA_CSS_SC_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h deleted file mode 100644 index 30ce499ac8cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SC_TYPES_H -#define __IA_CSS_SC_TYPES_H - -/* @file -* CSS-API header file for Lens Shading Correction (SC) parameters. -*/ - - -/* Number of color planes in the shading table. */ -#define IA_CSS_SC_NUM_COLORS 4 - -/* The 4 colors that a shading table consists of. - * For each color we store a grid of values. - */ -enum ia_css_sc_color { - IA_CSS_SC_COLOR_GR, /** Green on a green-red line */ - IA_CSS_SC_COLOR_R, /** Red */ - IA_CSS_SC_COLOR_B, /** Blue */ - IA_CSS_SC_COLOR_GB /** Green on a green-blue line */ -}; - -/* Lens Shading Correction table. - * - * This describes the color shading artefacts - * introduced by lens imperfections. To correct artefacts, - * bayer values should be multiplied by gains in this table. - * - *------------ deprecated(bz675) : from --------------------------- - * When shading_settings.enable_shading_table_conversion is set as 0, - * this shading table is directly sent to the isp. This table should contain - * the data based on the ia_css_shading_info information filled in the css. - * So, the driver needs to get the ia_css_shading_info information - * from the css, prior to generating the shading table. - * - * When shading_settings.enable_shading_table_conversion is set as 1, - * this shading table is converted in the legacy way in the css - * before it is sent to the isp. - * The driver does not need to get the ia_css_shading_info information. - * - * NOTE: - * The shading table conversion will be removed from the css in the near future, - * because it does not support the bayer scaling by sensor. - * Also, we had better generate the shading table only in one place(AIC). - * At the moment, to support the old driver which assumes the conversion is done in the css, - * shading_settings.enable_shading_table_conversion is set as 1 by default. - *------------ deprecated(bz675) : to --------------------------- - * - * ISP block: SC1 - * ISP1: SC1 is used. - * ISP2: SC1 is used. - */ -struct ia_css_shading_table { - uint32_t enable; /** Set to false for no shading correction. - The data field can be NULL when enable == true */ -/* ------ deprecated(bz675) : from ------ */ - uint32_t sensor_width; /** Native sensor width in pixels. */ - uint32_t sensor_height; /** Native sensor height in lines. - When shading_settings.enable_shading_table_conversion is set - as 0, sensor_width and sensor_height are NOT used. - These are used only in the legacy shading table conversion - in the css, when shading_settings. - enable_shading_table_conversion is set as 1. */ -/* ------ deprecated(bz675) : to ------ */ - uint32_t width; /** Number of data points per line per color. - u8.0, [0,81] */ - uint32_t height; /** Number of lines of data points per color. - u8.0, [0,61] */ - uint32_t fraction_bits; /** Bits of fractional part in the data - points. - u8.0, [0,13] */ - uint16_t *data[IA_CSS_SC_NUM_COLORS]; - /** Table data, one array for each color. - Use ia_css_sc_color to index this array. - u[13-fraction_bits].[fraction_bits], [0,8191] */ -}; - -/* ------ deprecated(bz675) : from ------ */ -/* Shading Correction settings. - * - * NOTE: - * This structure should be removed when the shading table conversion is - * removed from the css. - */ -struct ia_css_shading_settings { - uint32_t enable_shading_table_conversion; /** Set to 0, - if the conversion of the shading table should be disabled - in the css. (default 1) - 0: The shading table is directly sent to the isp. - The shading table should contain the data based on the - ia_css_shading_info information filled in the css. - 1: The shading table is converted in the css, to be fitted - to the shading table definition required in the isp. - NOTE: - Previously, the shading table was always converted in the css - before it was sent to the isp, and this config was not defined. - Currently, the driver is supposed to pass the shading table - which should be directly sent to the isp. - However, some drivers may still pass the shading table which - needs the conversion without setting this config as 1. - To support such an unexpected case for the time being, - enable_shading_table_conversion is set as 1 by default - in the css. */ -}; -/* ------ deprecated(bz675) : to ------ */ - -#ifdef ISP2401 - -/* Shading Correction configuration. - * - * NOTE: The shading table size is larger than or equal to the internal frame size. - */ -struct ia_css_sc_configuration { - uint32_t internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */ - uint32_t internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */ - /** NOTE: bqs = size in BQ(Bayer Quad) unit. - 1BQ means {Gr,R,B,Gb}(2x2 pixels). - Horizontal 1 bqs corresponds to horizontal 2 pixels. - Vertical 1 bqs corresponds to vertical 2 pixels. */ -}; -#endif - -#endif /* __IA_CSS_SC_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h deleted file mode 100644 index 4eb4910798fa..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_SDIS_COMMON_HOST_H -#define _IA_CSS_SDIS_COMMON_HOST_H - -#define ISP_MAX_SDIS_HOR_PROJ_NUM_ISP \ - __ISP_SDIS_HOR_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, ISP_MAX_INTERNAL_HEIGHT, \ - SH_CSS_DIS_DECI_FACTOR_LOG2, ISP_PIPE_VERSION) -#define ISP_MAX_SDIS_VER_PROJ_NUM_ISP \ - __ISP_SDIS_VER_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, \ - SH_CSS_DIS_DECI_FACTOR_LOG2) - -#define _ISP_SDIS_HOR_COEF_NUM_VECS \ - __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_INTERNAL_WIDTH) -#define ISP_MAX_SDIS_HOR_COEF_NUM_VECS \ - __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_MAX_INTERNAL_WIDTH) -#define ISP_MAX_SDIS_VER_COEF_NUM_VECS \ - __ISP_SDIS_VER_COEF_NUM_VECS(ISP_MAX_INTERNAL_HEIGHT) - -/* SDIS Coefficients: */ -/* The ISP uses vectors to store the coefficients, so we round - the number of coefficients up to vectors. */ -#define __ISP_SDIS_HOR_COEF_NUM_VECS(in_width) _ISP_VECS(_ISP_BQS(in_width)) -#define __ISP_SDIS_VER_COEF_NUM_VECS(in_height) _ISP_VECS(_ISP_BQS(in_height)) - -/* SDIS Projections: - * SDIS1: Horizontal projections are calculated for each line. - * Vertical projections are calculated for each column. - * SDIS2: Projections are calculated for each grid cell. - * Grid cells that do not fall completely within the image are not - * valid. The host needs to use the bigger one for the stride but - * should only return the valid ones to the 3A. */ -#define __ISP_SDIS_HOR_PROJ_NUM_ISP(in_width, in_height, deci_factor_log2, \ - isp_pipe_version) \ - ((isp_pipe_version == 1) ? \ - CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2) : \ - CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)) - -#define __ISP_SDIS_VER_PROJ_NUM_ISP(in_width, deci_factor_log2) \ - CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2) - -#define SH_CSS_DIS_VER_NUM_COEF_TYPES(b) \ - (((b)->info->sp.pipeline.isp_pipe_version == 2) ? \ - IA_CSS_DVS2_NUM_COEF_TYPES : \ - IA_CSS_DVS_NUM_COEF_TYPES) - -#ifndef PIPE_GENERATION -#if defined(__ISP) || defined (MK_FIRMWARE) - -/* Array cannot be 2-dimensional, since driver ddr allocation does not know stride */ -struct sh_css_isp_sdis_hori_proj_tbl { - int32_t tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_HOR_PROJ_NUM_ISP]; -#if DVS2_PROJ_MARGIN > 0 - int32_t margin[DVS2_PROJ_MARGIN]; -#endif -}; - -struct sh_css_isp_sdis_vert_proj_tbl { - int32_t tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_VER_PROJ_NUM_ISP]; -#if DVS2_PROJ_MARGIN > 0 - int32_t margin[DVS2_PROJ_MARGIN]; -#endif -}; - -struct sh_css_isp_sdis_hori_coef_tbl { - VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES], ISP_MAX_SDIS_HOR_COEF_NUM_VECS*ISP_NWAY); -}; - -struct sh_css_isp_sdis_vert_coef_tbl { - VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES], ISP_MAX_SDIS_VER_COEF_NUM_VECS*ISP_NWAY); -}; - -#endif /* defined(__ISP) || defined (MK_FIRMWARE) */ -#endif /* PIPE_GENERATION */ - -#ifndef PIPE_GENERATION -struct s_sdis_config { - unsigned horicoef_vectors; - unsigned vertcoef_vectors; - unsigned horiproj_num; - unsigned vertproj_num; -}; - -extern struct s_sdis_config sdis_config; -#endif - -#endif /* _IA_CSS_SDIS_COMMON_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h deleted file mode 100644 index 381e5730d405..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SDIS_COMMON_TYPES_H -#define __IA_CSS_SDIS_COMMON_TYPES_H - -/* @file -* CSS-API header file for DVS statistics parameters. -*/ - -#include - -/* DVS statistics grid dimensions in number of cells. - */ - -struct ia_css_dvs_grid_dim { - uint32_t width; /** Width of DVS grid table in cells */ - uint32_t height; /** Height of DVS grid table in cells */ -}; - -/* DVS statistics dimensions in number of cells for - * grid, coeffieicient and projection. - */ - -struct ia_css_sdis_info { - struct { - struct ia_css_dvs_grid_dim dim; /* Dimensions */ - struct ia_css_dvs_grid_dim pad; /* Padded dimensions */ - } grid, coef, proj; - uint32_t deci_factor_log2; -}; - -/* DVS statistics grid - * - * ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes)) - * SDVS2 (DVS Support for DVS ver.2 (6-axes)) - * ISP1: SDVS1 is used. - * ISP2: SDVS2 is used. - */ -struct ia_css_dvs_grid_res { - uint32_t width; /** Width of DVS grid table. - (= Horizontal number of grid cells - in table, which cells have effective - statistics.) - For DVS1, this is equal to - the number of vertical statistics. */ - uint32_t aligned_width; /** Stride of each grid line. - (= Horizontal number of grid cells - in table, which means - the allocated width.) */ - uint32_t height; /** Height of DVS grid table. - (= Vertical number of grid cells - in table, which cells have effective - statistics.) - For DVS1, This is equal to - the number of horizontal statistics. */ - uint32_t aligned_height;/** Stride of each grid column. - (= Vertical number of grid cells - in table, which means - the allocated height.) */ -}; - -/* TODO: use ia_css_dvs_grid_res in here. - * However, that implies driver I/F changes - */ -struct ia_css_dvs_grid_info { - uint32_t enable; /** DVS statistics enabled. - 0:disabled, 1:enabled */ - uint32_t width; /** Width of DVS grid table. - (= Horizontal number of grid cells - in table, which cells have effective - statistics.) - For DVS1, this is equal to - the number of vertical statistics. */ - uint32_t aligned_width; /** Stride of each grid line. - (= Horizontal number of grid cells - in table, which means - the allocated width.) */ - uint32_t height; /** Height of DVS grid table. - (= Vertical number of grid cells - in table, which cells have effective - statistics.) - For DVS1, This is equal to - the number of horizontal statistics. */ - uint32_t aligned_height;/** Stride of each grid column. - (= Vertical number of grid cells - in table, which means - the allocated height.) */ - uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit. - (1BQ means {Gr,R,B,Gb}(2x2 pixels).) - For DVS1, valid value is 64. - For DVS2, valid value is only 64, - currently. */ - uint32_t num_hor_coefs; /** Number of horizontal coefficients. */ - uint32_t num_ver_coefs; /** Number of vertical coefficients. */ -}; - -/* Number of DVS statistics levels - */ -#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3 - -/* DVS statistics generated by accelerator global configuration - */ -struct dvs_stat_public_dvs_global_cfg { - unsigned char kappa; - /** DVS statistics global configuration - kappa */ - unsigned char match_shift; - /** DVS statistics global configuration - match_shift */ - unsigned char ybin_mode; - /** DVS statistics global configuration - y binning mode */ -}; - -/* DVS statistics generated by accelerator level grid - * configuration - */ -struct dvs_stat_public_dvs_level_grid_cfg { - unsigned char grid_width; - /** DVS statistics grid width */ - unsigned char grid_height; - /** DVS statistics grid height */ - unsigned char block_width; - /** DVS statistics block width */ - unsigned char block_height; - /** DVS statistics block height */ -}; - -/* DVS statistics generated by accelerator level grid start - * configuration - */ -struct dvs_stat_public_dvs_level_grid_start { - unsigned short x_start; - /** DVS statistics level x start */ - unsigned short y_start; - /** DVS statistics level y start */ - unsigned char enable; - /** DVS statistics level enable */ -}; - -/* DVS statistics generated by accelerator level grid end - * configuration - */ -struct dvs_stat_public_dvs_level_grid_end { - unsigned short x_end; - /** DVS statistics level x end */ - unsigned short y_end; - /** DVS statistics level y end */ -}; - -/* DVS statistics generated by accelerator Feature Extraction - * Region Of Interest (FE-ROI) configuration - */ -struct dvs_stat_public_dvs_level_fe_roi_cfg { - unsigned char x_start; - /** DVS statistics fe-roi level x start */ - unsigned char y_start; - /** DVS statistics fe-roi level y start */ - unsigned char x_end; - /** DVS statistics fe-roi level x end */ - unsigned char y_end; - /** DVS statistics fe-roi level y end */ -}; - -/* DVS statistics generated by accelerator public configuration - */ -struct dvs_stat_public_dvs_grd_cfg { - struct dvs_stat_public_dvs_level_grid_cfg grd_cfg; - /** DVS statistics level grid configuration */ - struct dvs_stat_public_dvs_level_grid_start grd_start; - /** DVS statistics level grid start configuration */ - struct dvs_stat_public_dvs_level_grid_end grd_end; - /** DVS statistics level grid end configuration */ -}; - -/* DVS statistics grid generated by accelerator - */ -struct ia_css_dvs_stat_grid_info { - struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg; - /** DVS statistics global configuration (kappa, match, binning) */ - struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS]; - /** DVS statistics grid configuration (blocks and grids) */ - struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS]; - /** DVS statistics FE ROI (region of interest) configuration */ -}; - -/* DVS statistics generated by accelerator default grid info - */ -#define DEFAULT_DVS_GRID_INFO \ -(union ia_css_dvs_grid_u) { \ - .dvs_stat_grid_info = (struct ia_css_dvs_stat_grid_info) { \ - .fe_roi_cfg = { \ - [1] = (struct dvs_stat_public_dvs_level_fe_roi_cfg) { \ - .x_start = 4 \ - } \ - } \ - } \ -} - -/* Union that holds all types of DVS statistics grid info in - * CSS format - * */ -union ia_css_dvs_grid_u { - struct ia_css_dvs_stat_grid_info dvs_stat_grid_info; - /** DVS statistics produced by accelerator grid info */ - struct ia_css_dvs_grid_info dvs_grid_info; - /** DVS (DVS1/DVS2) grid info */ -}; - -#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c deleted file mode 100644 index 0fdd696bf654..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "memory_access.h" -#include "assert_support.h" -#include "ia_css_debug.h" -#include "ia_css_sdis_types.h" -#include "sdis/common/ia_css_sdis_common.host.h" -#include "ia_css_sdis.host.h" - -const struct ia_css_dvs_coefficients default_sdis_config = { - .grid = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .hor_coefs = NULL, - .ver_coefs = NULL -}; - -static void -fill_row(short *private, const short *public, unsigned width, unsigned padding) -{ - assert((int)width >= 0); - assert((int)padding >= 0); - memcpy (private, public, width*sizeof(short)); - memset (&private[width], 0, padding*sizeof(short)); -} - -void ia_css_sdis_horicoef_vmem_encode ( - struct sh_css_isp_sdis_hori_coef_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size) -{ - unsigned aligned_width = from->grid.aligned_width * from->grid.bqs_per_grid_cell; - unsigned width = from->grid.num_hor_coefs; - int padding = aligned_width-width; - unsigned stride = size/IA_CSS_DVS_NUM_COEF_TYPES/sizeof(short); - unsigned total_bytes = aligned_width*IA_CSS_DVS_NUM_COEF_TYPES*sizeof(short); - short *public = from->hor_coefs; - short *private = (short*)to; - unsigned type; - - /* Copy the table, add padding */ - assert(padding >= 0); - assert(total_bytes <= size); - assert(size % (IA_CSS_DVS_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0); - - for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) { - fill_row(&private[type*stride], &public[type*width], width, padding); - } -} - -void ia_css_sdis_vertcoef_vmem_encode ( - struct sh_css_isp_sdis_vert_coef_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size) -{ - unsigned aligned_height = from->grid.aligned_height * from->grid.bqs_per_grid_cell; - unsigned height = from->grid.num_ver_coefs; - int padding = aligned_height-height; - unsigned stride = size/IA_CSS_DVS_NUM_COEF_TYPES/sizeof(short); - unsigned total_bytes = aligned_height*IA_CSS_DVS_NUM_COEF_TYPES*sizeof(short); - short *public = from->ver_coefs; - short *private = (short*)to; - unsigned type; - - /* Copy the table, add padding */ - assert(padding >= 0); - assert(total_bytes <= size); - assert(size % (IA_CSS_DVS_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0); - - for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) { - fill_row(&private[type*stride], &public[type*height], height, padding); - } -} - -void ia_css_sdis_horiproj_encode ( - struct sh_css_isp_sdis_hori_proj_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size) -{ - (void)to; - (void)from; - (void)size; -} - -void ia_css_sdis_vertproj_encode ( - struct sh_css_isp_sdis_vert_proj_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size) -{ - (void)to; - (void)from; - (void)size; -} - -void ia_css_get_isp_dis_coefficients( - struct ia_css_stream *stream, - short *horizontal_coefficients, - short *vertical_coefficients) -{ - struct ia_css_isp_parameters *params; - unsigned int hor_num_isp, ver_num_isp; - unsigned int hor_num_3a, ver_num_3a; - int i; - struct ia_css_binary *dvs_binary; - - IA_CSS_ENTER("void"); - - assert(horizontal_coefficients != NULL); - assert(vertical_coefficients != NULL); - - params = stream->isp_params_configs; - - /* Only video pipe supports DVS */ - dvs_binary = ia_css_stream_get_dvs_binary(stream); - if (!dvs_binary) - return; - - hor_num_isp = dvs_binary->dis.coef.pad.width; - ver_num_isp = dvs_binary->dis.coef.pad.height; - hor_num_3a = dvs_binary->dis.coef.dim.width; - ver_num_3a = dvs_binary->dis.coef.dim.height; - - for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) { - fill_row(&horizontal_coefficients[i*hor_num_isp], - ¶ms->dvs_coefs.hor_coefs[i*hor_num_3a], hor_num_3a, hor_num_isp-hor_num_3a); - } - for (i = 0; i < SH_CSS_DIS_VER_NUM_COEF_TYPES(dvs_binary); i++) { - fill_row(&vertical_coefficients[i*ver_num_isp], - ¶ms->dvs_coefs.ver_coefs[i*ver_num_3a], ver_num_3a, ver_num_isp-ver_num_3a); - } - - IA_CSS_LEAVE("void"); -} - -size_t -ia_css_sdis_hor_coef_tbl_bytes( - const struct ia_css_binary *binary) -{ - if (binary->info->sp.pipeline.isp_pipe_version == 1) - return sizeof(short) * IA_CSS_DVS_NUM_COEF_TYPES * binary->dis.coef.pad.width; - else - return sizeof(short) * IA_CSS_DVS2_NUM_COEF_TYPES * binary->dis.coef.pad.width; -} - -size_t -ia_css_sdis_ver_coef_tbl_bytes( - const struct ia_css_binary *binary) -{ - return sizeof(short) * SH_CSS_DIS_VER_NUM_COEF_TYPES(binary) * binary->dis.coef.pad.height; -} - -void -ia_css_sdis_init_info( - struct ia_css_sdis_info *dis, - unsigned sc_3a_dis_width, - unsigned sc_3a_dis_padded_width, - unsigned sc_3a_dis_height, - unsigned isp_pipe_version, - unsigned enabled) -{ - if (!enabled) { - *dis = (struct ia_css_sdis_info) { }; - return; - } - - dis->deci_factor_log2 = SH_CSS_DIS_DECI_FACTOR_LOG2; - - dis->grid.dim.width = - _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2; - dis->grid.dim.height = - _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2; - dis->grid.pad.width = - CEIL_SHIFT(_ISP_BQS(sc_3a_dis_padded_width), SH_CSS_DIS_DECI_FACTOR_LOG2); - dis->grid.pad.height = - CEIL_SHIFT(_ISP_BQS(sc_3a_dis_height), SH_CSS_DIS_DECI_FACTOR_LOG2); - - dis->coef.dim.width = - (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) << SH_CSS_DIS_DECI_FACTOR_LOG2; - dis->coef.dim.height = - (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2) << SH_CSS_DIS_DECI_FACTOR_LOG2; - dis->coef.pad.width = - __ISP_SDIS_HOR_COEF_NUM_VECS(sc_3a_dis_padded_width) * ISP_VEC_NELEMS; - dis->coef.pad.height = - __ISP_SDIS_VER_COEF_NUM_VECS(sc_3a_dis_height) * ISP_VEC_NELEMS; - if (isp_pipe_version == 1) { - dis->proj.dim.width = - _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2; - dis->proj.dim.height = - _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2; - } else { - dis->proj.dim.width = - (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) * - (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2); - dis->proj.dim.height = - (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) * - (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2); - } - dis->proj.pad.width = - __ISP_SDIS_HOR_PROJ_NUM_ISP(sc_3a_dis_padded_width, - sc_3a_dis_height, - SH_CSS_DIS_DECI_FACTOR_LOG2, - isp_pipe_version); - dis->proj.pad.height = - __ISP_SDIS_VER_PROJ_NUM_ISP(sc_3a_dis_padded_width, - SH_CSS_DIS_DECI_FACTOR_LOG2); -} - -void ia_css_sdis_clear_coefficients( - struct ia_css_dvs_coefficients *dvs_coefs) -{ - dvs_coefs->hor_coefs = NULL; - dvs_coefs->ver_coefs = NULL; -} - -enum ia_css_err -ia_css_get_dvs_statistics( - struct ia_css_dvs_statistics *host_stats, - const struct ia_css_isp_dvs_statistics *isp_stats) -{ - struct ia_css_isp_dvs_statistics_map *map; - enum ia_css_err ret = IA_CSS_SUCCESS; - - IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats); - - assert(host_stats != NULL); - assert(isp_stats != NULL); - - map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL); - if (map) { - mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size); - ia_css_translate_dvs_statistics(host_stats, map); - ia_css_isp_dvs_statistics_map_free(map); - } else { - IA_CSS_ERROR("out of memory"); - ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - IA_CSS_LEAVE_ERR(ret); - return ret; -} - -void -ia_css_translate_dvs_statistics( - struct ia_css_dvs_statistics *host_stats, - const struct ia_css_isp_dvs_statistics_map *isp_stats) -{ - unsigned int hor_num_isp, ver_num_isp, hor_num_dvs, ver_num_dvs, i; - int32_t *hor_ptr_dvs, *ver_ptr_dvs, *hor_ptr_isp, *ver_ptr_isp; - - assert(host_stats != NULL); - assert(host_stats->hor_proj != NULL); - assert(host_stats->ver_proj != NULL); - assert(isp_stats != NULL); - assert(isp_stats->hor_proj != NULL); - assert(isp_stats->ver_proj != NULL); - - IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%p, vaddr=%p", - host_stats->hor_proj, host_stats->ver_proj, - isp_stats->hor_proj, isp_stats->ver_proj); - - hor_num_isp = host_stats->grid.aligned_height; - ver_num_isp = host_stats->grid.aligned_width; - hor_ptr_isp = isp_stats->hor_proj; - ver_ptr_isp = isp_stats->ver_proj; - hor_num_dvs = host_stats->grid.height; - ver_num_dvs = host_stats->grid.width; - hor_ptr_dvs = host_stats->hor_proj; - ver_ptr_dvs = host_stats->ver_proj; - - for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) { - memcpy(hor_ptr_dvs, hor_ptr_isp, hor_num_dvs * sizeof(int32_t)); - hor_ptr_isp += hor_num_isp; - hor_ptr_dvs += hor_num_dvs; - - memcpy(ver_ptr_dvs, ver_ptr_isp, ver_num_dvs * sizeof(int32_t)); - ver_ptr_isp += ver_num_isp; - ver_ptr_dvs += ver_num_dvs; - } - - IA_CSS_LEAVE("void"); -} - -struct ia_css_isp_dvs_statistics * -ia_css_isp_dvs_statistics_allocate( - const struct ia_css_dvs_grid_info *grid) -{ - struct ia_css_isp_dvs_statistics *me; - int hor_size, ver_size; - - assert(grid != NULL); - - IA_CSS_ENTER("grid=%p", grid); - - if (!grid->enable) - return NULL; - - me = sh_css_calloc(1,sizeof(*me)); - if (!me) - goto err; - - hor_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES * grid->aligned_height, - HIVE_ISP_DDR_WORD_BYTES); - ver_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES * grid->aligned_width, - HIVE_ISP_DDR_WORD_BYTES); - - - me->size = hor_size + ver_size; - me->data_ptr = mmgr_malloc(me->size); - if (me->data_ptr == mmgr_NULL) - goto err; - me->hor_size = hor_size; - me->hor_proj = me->data_ptr; - me->ver_size = ver_size; - me->ver_proj = me->data_ptr + hor_size; - - IA_CSS_LEAVE("return=%p", me); - - return me; -err: - ia_css_isp_dvs_statistics_free(me); - - IA_CSS_LEAVE("return=%p", NULL); - - return NULL; -} - -struct ia_css_isp_dvs_statistics_map * -ia_css_isp_dvs_statistics_map_allocate( - const struct ia_css_isp_dvs_statistics *isp_stats, - void *data_ptr) -{ - struct ia_css_isp_dvs_statistics_map *me; - /* Windows compiler does not like adding sizes to a void * - * so we use a local char * instead. */ - char *base_ptr; - - me = sh_css_malloc(sizeof(*me)); - if (!me) { - IA_CSS_LOG("cannot allocate memory"); - goto err; - } - - me->data_ptr = data_ptr; - me->data_allocated = data_ptr == NULL; - - if (!me->data_ptr) { - me->data_ptr = sh_css_malloc(isp_stats->size); - if (!me->data_ptr) { - IA_CSS_LOG("cannot allocate memory"); - goto err; - } - } - base_ptr = me->data_ptr; - - me->size = isp_stats->size; - /* GCC complains when we assign a char * to a void *, so these - * casts are necessary unfortunately. */ - me->hor_proj = (void*)base_ptr; - me->ver_proj = (void*)(base_ptr + isp_stats->hor_size); - - return me; -err: - if (me) - sh_css_free(me); - return NULL; -} - -void -ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me) -{ - if (me) { - if (me->data_allocated) - sh_css_free(me->data_ptr); - sh_css_free(me); - } -} - -void -ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me) -{ - if (me != NULL) { - hmm_free(me->data_ptr); - sh_css_free(me); - } -} - -void ia_css_sdis_horicoef_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} - -void ia_css_sdis_vertcoef_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} - -void ia_css_sdis_horiproj_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} - -void ia_css_sdis_vertproj_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h deleted file mode 100644 index 95e2c61bbcba..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SDIS_HOST_H -#define __IA_CSS_SDIS_HOST_H - -#include "ia_css_sdis_types.h" -#include "ia_css_binary.h" -#include "ia_css_stream.h" -#include "sh_css_params.h" - -extern const struct ia_css_dvs_coefficients default_sdis_config; - -/* Opaque here, since size is binary dependent. */ -struct sh_css_isp_sdis_hori_coef_tbl; -struct sh_css_isp_sdis_vert_coef_tbl; -struct sh_css_isp_sdis_hori_proj_tbl; -struct sh_css_isp_sdis_vert_proj_tbl; - -void ia_css_sdis_horicoef_vmem_encode ( - struct sh_css_isp_sdis_hori_coef_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size); - -void ia_css_sdis_vertcoef_vmem_encode ( - struct sh_css_isp_sdis_vert_coef_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size); - -void ia_css_sdis_horiproj_encode ( - struct sh_css_isp_sdis_hori_proj_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size); - -void ia_css_sdis_vertproj_encode ( - struct sh_css_isp_sdis_vert_proj_tbl *to, - const struct ia_css_dvs_coefficients *from, - unsigned size); - -void ia_css_get_isp_dis_coefficients( - struct ia_css_stream *stream, - short *horizontal_coefficients, - short *vertical_coefficients); - -enum ia_css_err -ia_css_get_dvs_statistics( - struct ia_css_dvs_statistics *host_stats, - const struct ia_css_isp_dvs_statistics *isp_stats); - -void -ia_css_translate_dvs_statistics( - struct ia_css_dvs_statistics *host_stats, - const struct ia_css_isp_dvs_statistics_map *isp_stats); - -struct ia_css_isp_dvs_statistics * -ia_css_isp_dvs_statistics_allocate( - const struct ia_css_dvs_grid_info *grid); - -void -ia_css_isp_dvs_statistics_free( - struct ia_css_isp_dvs_statistics *me); - -size_t ia_css_sdis_hor_coef_tbl_bytes(const struct ia_css_binary *binary); -size_t ia_css_sdis_ver_coef_tbl_bytes(const struct ia_css_binary *binary); - -void -ia_css_sdis_init_info( - struct ia_css_sdis_info *dis, - unsigned sc_3a_dis_width, - unsigned sc_3a_dis_padded_width, - unsigned sc_3a_dis_height, - unsigned isp_pipe_version, - unsigned enabled); - -void ia_css_sdis_clear_coefficients( - struct ia_css_dvs_coefficients *dvs_coefs); - -void ia_css_sdis_horicoef_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level); - -void ia_css_sdis_vertcoef_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level); - -void ia_css_sdis_horiproj_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level); - -void ia_css_sdis_vertproj_debug_dtrace( - const struct ia_css_dvs_coefficients *config, unsigned level); - -#endif /* __IA_CSS_SDIS_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h deleted file mode 100644 index d2ee57008fb6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SDIS_TYPES_H -#define __IA_CSS_SDIS_TYPES_H - -/* @file -* CSS-API header file for DVS statistics parameters. -*/ - -/* Number of DVS coefficient types */ -#define IA_CSS_DVS_NUM_COEF_TYPES 6 - -#ifndef PIPE_GENERATION -#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h" -#endif - -/* DVS 1.0 Coefficients. - * This structure describes the coefficients that are needed for the dvs statistics. - */ - -struct ia_css_dvs_coefficients { - struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */ - int16_t *hor_coefs; /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES] - containing the horizontal coefficients */ - int16_t *ver_coefs; /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES] - containing the vertical coefficients */ -}; - -/* DVS 1.0 Statistics. - * This structure describes the statistics that are generated using the provided coefficients. - */ - -struct ia_css_dvs_statistics { - struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */ - int32_t *hor_proj; /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES] - containing the horizontal projections */ - int32_t *ver_proj; /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES] - containing the vertical projections */ -}; - -#endif /* __IA_CSS_SDIS_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c deleted file mode 100644 index 9bccb6473154..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include "memory_access.h" -#include "ia_css_debug.h" -#include "ia_css_sdis2.host.h" - -const struct ia_css_dvs2_coefficients default_sdis2_config = { - .grid = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .hor_coefs = { NULL, NULL, NULL, NULL }, - .ver_coefs = { NULL, NULL, NULL, NULL }, -}; - -static void -fill_row(short *private, const short *public, unsigned width, unsigned padding) -{ - memcpy (private, public, width*sizeof(short)); - memset (&private[width], 0, padding*sizeof(short)); -} - -void ia_css_sdis2_horicoef_vmem_encode ( - struct sh_css_isp_sdis_hori_coef_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size) -{ - unsigned aligned_width = from->grid.aligned_width * from->grid.bqs_per_grid_cell; - unsigned width = from->grid.num_hor_coefs; - int padding = aligned_width-width; - unsigned stride = size/IA_CSS_DVS2_NUM_COEF_TYPES/sizeof(short); - unsigned total_bytes = aligned_width*IA_CSS_DVS2_NUM_COEF_TYPES*sizeof(short); - short *private = (short*)to; - - - /* Copy the table, add padding */ - assert(padding >= 0); - assert(total_bytes <= size); - assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0); - fill_row(&private[0*stride], from->hor_coefs.odd_real, width, padding); - fill_row(&private[1*stride], from->hor_coefs.odd_imag, width, padding); - fill_row(&private[2*stride], from->hor_coefs.even_real, width, padding); - fill_row(&private[3*stride], from->hor_coefs.even_imag, width, padding); -} - -void ia_css_sdis2_vertcoef_vmem_encode ( - struct sh_css_isp_sdis_vert_coef_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size) -{ - unsigned aligned_height = from->grid.aligned_height * from->grid.bqs_per_grid_cell; - unsigned height = from->grid.num_ver_coefs; - int padding = aligned_height-height; - unsigned stride = size/IA_CSS_DVS2_NUM_COEF_TYPES/sizeof(short); - unsigned total_bytes = aligned_height*IA_CSS_DVS2_NUM_COEF_TYPES*sizeof(short); - short *private = (short*)to; - - /* Copy the table, add padding */ - assert(padding >= 0); - assert(total_bytes <= size); - assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0); - fill_row(&private[0*stride], from->ver_coefs.odd_real, height, padding); - fill_row(&private[1*stride], from->ver_coefs.odd_imag, height, padding); - fill_row(&private[2*stride], from->ver_coefs.even_real, height, padding); - fill_row(&private[3*stride], from->ver_coefs.even_imag, height, padding); -} - -void ia_css_sdis2_horiproj_encode ( - struct sh_css_isp_sdis_hori_proj_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size) -{ - (void)to; - (void)from; - (void)size; -} - -void ia_css_sdis2_vertproj_encode ( - struct sh_css_isp_sdis_vert_proj_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size) -{ - (void)to; - (void)from; - (void)size; -} - -void ia_css_get_isp_dvs2_coefficients( - struct ia_css_stream *stream, - short *hor_coefs_odd_real, - short *hor_coefs_odd_imag, - short *hor_coefs_even_real, - short *hor_coefs_even_imag, - short *ver_coefs_odd_real, - short *ver_coefs_odd_imag, - short *ver_coefs_even_real, - short *ver_coefs_even_imag) -{ - struct ia_css_isp_parameters *params; - unsigned int hor_num_3a, ver_num_3a; - unsigned int hor_num_isp, ver_num_isp; - struct ia_css_binary *dvs_binary; - - IA_CSS_ENTER("void"); - - assert(stream != NULL); - assert(hor_coefs_odd_real != NULL); - assert(hor_coefs_odd_imag != NULL); - assert(hor_coefs_even_real != NULL); - assert(hor_coefs_even_imag != NULL); - assert(ver_coefs_odd_real != NULL); - assert(ver_coefs_odd_imag != NULL); - assert(ver_coefs_even_real != NULL); - assert(ver_coefs_even_imag != NULL); - - params = stream->isp_params_configs; - - /* Only video pipe supports DVS */ - dvs_binary = ia_css_stream_get_dvs_binary(stream); - if (!dvs_binary) - return; - - hor_num_3a = dvs_binary->dis.coef.dim.width; - ver_num_3a = dvs_binary->dis.coef.dim.height; - hor_num_isp = dvs_binary->dis.coef.pad.width; - ver_num_isp = dvs_binary->dis.coef.pad.height; - - memcpy (hor_coefs_odd_real, params->dvs2_coefs.hor_coefs.odd_real, hor_num_3a * sizeof(short)); - memcpy (hor_coefs_odd_imag, params->dvs2_coefs.hor_coefs.odd_imag, hor_num_3a * sizeof(short)); - memcpy (hor_coefs_even_real, params->dvs2_coefs.hor_coefs.even_real, hor_num_3a * sizeof(short)); - memcpy (hor_coefs_even_imag, params->dvs2_coefs.hor_coefs.even_imag, hor_num_3a * sizeof(short)); - memcpy (ver_coefs_odd_real, params->dvs2_coefs.ver_coefs.odd_real, ver_num_3a * sizeof(short)); - memcpy (ver_coefs_odd_imag, params->dvs2_coefs.ver_coefs.odd_imag, ver_num_3a * sizeof(short)); - memcpy (ver_coefs_even_real, params->dvs2_coefs.ver_coefs.even_real, ver_num_3a * sizeof(short)); - memcpy (ver_coefs_even_imag, params->dvs2_coefs.ver_coefs.even_imag, ver_num_3a * sizeof(short)); - - IA_CSS_LEAVE("void"); -} - -void ia_css_sdis2_clear_coefficients( - struct ia_css_dvs2_coefficients *dvs2_coefs) -{ - dvs2_coefs->hor_coefs.odd_real = NULL; - dvs2_coefs->hor_coefs.odd_imag = NULL; - dvs2_coefs->hor_coefs.even_real = NULL; - dvs2_coefs->hor_coefs.even_imag = NULL; - dvs2_coefs->ver_coefs.odd_real = NULL; - dvs2_coefs->ver_coefs.odd_imag = NULL; - dvs2_coefs->ver_coefs.even_real = NULL; - dvs2_coefs->ver_coefs.even_imag = NULL; -} - -enum ia_css_err -ia_css_get_dvs2_statistics( - struct ia_css_dvs2_statistics *host_stats, - const struct ia_css_isp_dvs_statistics *isp_stats) -{ - struct ia_css_isp_dvs_statistics_map *map; - enum ia_css_err ret = IA_CSS_SUCCESS; - - IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats); - - assert(host_stats != NULL); - assert(isp_stats != NULL); - - map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL); - if (map) { - mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size); - ia_css_translate_dvs2_statistics(host_stats, map); - ia_css_isp_dvs_statistics_map_free(map); - } else { - IA_CSS_ERROR("out of memory"); - ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - IA_CSS_LEAVE_ERR(ret); - return ret; -} - -void -ia_css_translate_dvs2_statistics( - struct ia_css_dvs2_statistics *host_stats, - const struct ia_css_isp_dvs_statistics_map *isp_stats) -{ - unsigned int size_bytes, table_width, table_size, height; - unsigned int src_offset = 0, dst_offset = 0; - int32_t *htemp_ptr, *vtemp_ptr; - - assert(host_stats != NULL); - assert(host_stats->hor_prod.odd_real != NULL); - assert(host_stats->hor_prod.odd_imag != NULL); - assert(host_stats->hor_prod.even_real != NULL); - assert(host_stats->hor_prod.even_imag != NULL); - assert(host_stats->ver_prod.odd_real != NULL); - assert(host_stats->ver_prod.odd_imag != NULL); - assert(host_stats->ver_prod.even_real != NULL); - assert(host_stats->ver_prod.even_imag != NULL); - assert(isp_stats != NULL); - assert(isp_stats->hor_proj != NULL); - assert(isp_stats->ver_proj != NULL); - - IA_CSS_ENTER("hor_coefs.odd_real=%p, hor_coefs.odd_imag=%p, " - "hor_coefs.even_real=%p, hor_coefs.even_imag=%p, " - "ver_coefs.odd_real=%p, ver_coefs.odd_imag=%p, " - "ver_coefs.even_real=%p, ver_coefs.even_imag=%p, " - "haddr=%p, vaddr=%p", - host_stats->hor_prod.odd_real, host_stats->hor_prod.odd_imag, - host_stats->hor_prod.even_real, host_stats->hor_prod.even_imag, - host_stats->ver_prod.odd_real, host_stats->ver_prod.odd_imag, - host_stats->ver_prod.even_real, host_stats->ver_prod.even_imag, - isp_stats->hor_proj, isp_stats->ver_proj); - - /* Host side: reflecting the true width in bytes */ - size_bytes = host_stats->grid.aligned_width * sizeof(*htemp_ptr); - - /* DDR side: need to be aligned to the system bus width */ - /* statistics table width in terms of 32-bit words*/ - table_width = CEIL_MUL(size_bytes, HIVE_ISP_DDR_WORD_BYTES) / sizeof(*htemp_ptr); - table_size = table_width * host_stats->grid.aligned_height; - - htemp_ptr = isp_stats->hor_proj; /* horizontal stats */ - vtemp_ptr = isp_stats->ver_proj; /* vertical stats */ - for (height = 0; height < host_stats->grid.aligned_height; height++) { - /* hor stats */ - memcpy(host_stats->hor_prod.odd_real + dst_offset, - &htemp_ptr[0*table_size+src_offset], size_bytes); - memcpy(host_stats->hor_prod.odd_imag + dst_offset, - &htemp_ptr[1*table_size+src_offset], size_bytes); - memcpy(host_stats->hor_prod.even_real + dst_offset, - &htemp_ptr[2*table_size+src_offset], size_bytes); - memcpy(host_stats->hor_prod.even_imag + dst_offset, - &htemp_ptr[3*table_size+src_offset], size_bytes); - - /* ver stats */ - memcpy(host_stats->ver_prod.odd_real + dst_offset, - &vtemp_ptr[0*table_size+src_offset], size_bytes); - memcpy(host_stats->ver_prod.odd_imag + dst_offset, - &vtemp_ptr[1*table_size+src_offset], size_bytes); - memcpy(host_stats->ver_prod.even_real + dst_offset, - &vtemp_ptr[2*table_size+src_offset], size_bytes); - memcpy(host_stats->ver_prod.even_imag + dst_offset, - &vtemp_ptr[3*table_size+src_offset], size_bytes); - - src_offset += table_width; /* aligned table width */ - dst_offset += host_stats->grid.aligned_width; - } - - IA_CSS_LEAVE("void"); -} - -struct ia_css_isp_dvs_statistics * -ia_css_isp_dvs2_statistics_allocate( - const struct ia_css_dvs_grid_info *grid) -{ - struct ia_css_isp_dvs_statistics *me; - int size; - - assert(grid != NULL); - - IA_CSS_ENTER("grid=%p", grid); - - if (!grid->enable) - return NULL; - - me = sh_css_calloc(1,sizeof(*me)); - if (!me) - goto err; - - /* on ISP 2 SDIS DMA model, every row of projection table width must be - aligned to HIVE_ISP_DDR_WORD_BYTES - */ - size = CEIL_MUL(sizeof(int) * grid->aligned_width, HIVE_ISP_DDR_WORD_BYTES) - * grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES; - - me->size = 2*size; - me->data_ptr = mmgr_malloc(me->size); - if (me->data_ptr == mmgr_NULL) - goto err; - me->hor_proj = me->data_ptr; - me->hor_size = size; - me->ver_proj = me->data_ptr + size; - me->ver_size = size; - - IA_CSS_LEAVE("return=%p", me); - return me; -err: - ia_css_isp_dvs2_statistics_free(me); - IA_CSS_LEAVE("return=%p", NULL); - - return NULL; -} - -void -ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me) -{ - if (me != NULL) { - hmm_free(me->data_ptr); - sh_css_free(me); - } -} - -void ia_css_sdis2_horicoef_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} - -void ia_css_sdis2_vertcoef_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} - -void ia_css_sdis2_horiproj_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} - -void ia_css_sdis2_vertproj_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level) -{ - (void)config; - (void)level; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h deleted file mode 100644 index 60198d4279b4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SDIS2_HOST_H -#define __IA_CSS_SDIS2_HOST_H - -#include "ia_css_sdis2_types.h" -#include "ia_css_binary.h" -#include "ia_css_stream.h" -#include "sh_css_params.h" - -extern const struct ia_css_dvs2_coefficients default_sdis2_config; - -/* Opaque here, since size is binary dependent. */ -struct sh_css_isp_sdis_hori_coef_tbl; -struct sh_css_isp_sdis_vert_coef_tbl; -struct sh_css_isp_sdis_hori_proj_tbl; -struct sh_css_isp_sdis_vert_proj_tbl; - -void ia_css_sdis2_horicoef_vmem_encode ( - struct sh_css_isp_sdis_hori_coef_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size); - -void ia_css_sdis2_vertcoef_vmem_encode ( - struct sh_css_isp_sdis_vert_coef_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size); - -void ia_css_sdis2_horiproj_encode ( - struct sh_css_isp_sdis_hori_proj_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size); - -void ia_css_sdis2_vertproj_encode ( - struct sh_css_isp_sdis_vert_proj_tbl *to, - const struct ia_css_dvs2_coefficients *from, - unsigned size); - -void ia_css_get_isp_dvs2_coefficients( - struct ia_css_stream *stream, - short *hor_coefs_odd_real, - short *hor_coefs_odd_imag, - short *hor_coefs_even_real, - short *hor_coefs_even_imag, - short *ver_coefs_odd_real, - short *ver_coefs_odd_imag, - short *ver_coefs_even_real, - short *ver_coefs_even_imag); - -void ia_css_sdis2_clear_coefficients( - struct ia_css_dvs2_coefficients *dvs2_coefs); - -enum ia_css_err -ia_css_get_dvs2_statistics( - struct ia_css_dvs2_statistics *host_stats, - const struct ia_css_isp_dvs_statistics *isp_stats); - -void -ia_css_translate_dvs2_statistics( - struct ia_css_dvs2_statistics *host_stats, - const struct ia_css_isp_dvs_statistics_map *isp_stats); - -struct ia_css_isp_dvs_statistics * -ia_css_isp_dvs2_statistics_allocate( - const struct ia_css_dvs_grid_info *grid); - -void -ia_css_isp_dvs2_statistics_free( - struct ia_css_isp_dvs_statistics *me); - -void ia_css_sdis2_horicoef_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level); - -void ia_css_sdis2_vertcoef_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level); - -void ia_css_sdis2_horiproj_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level); - -void ia_css_sdis2_vertproj_debug_dtrace( - const struct ia_css_dvs2_coefficients *config, unsigned level); - -#endif /* __IA_CSS_SDIS2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h deleted file mode 100644 index 2a0bc4031746..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_SDIS2_TYPES_H -#define __IA_CSS_SDIS2_TYPES_H - -/* @file -* CSS-API header file for DVS statistics parameters. -*/ - -/* Number of DVS coefficient types */ -#define IA_CSS_DVS2_NUM_COEF_TYPES 4 - -#ifndef PIPE_GENERATION -#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h" -#endif - -/* DVS 2.0 Coefficient types. This structure contains 4 pointers to - * arrays that contain the coeffients for each type. - */ -struct ia_css_dvs2_coef_types { - int16_t *odd_real; /** real part of the odd coefficients*/ - int16_t *odd_imag; /** imaginary part of the odd coefficients*/ - int16_t *even_real;/** real part of the even coefficients*/ - int16_t *even_imag;/** imaginary part of the even coefficients*/ -}; - -/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics. - * e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real - * coefficients. - */ -struct ia_css_dvs2_coefficients { - struct ia_css_dvs_grid_info grid; /** grid info contains the dimensions of the dvs grid */ - struct ia_css_dvs2_coef_types hor_coefs; /** struct with pointers that contain the horizontal coefficients */ - struct ia_css_dvs2_coef_types ver_coefs; /** struct with pointers that contain the vertical coefficients */ -}; - -/* DVS 2.0 Statistic types. This structure contains 4 pointers to - * arrays that contain the statistics for each type. - */ -struct ia_css_dvs2_stat_types { - int32_t *odd_real; /** real part of the odd statistics*/ - int32_t *odd_imag; /** imaginary part of the odd statistics*/ - int32_t *even_real;/** real part of the even statistics*/ - int32_t *even_imag;/** imaginary part of the even statistics*/ -}; - -/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients. - * e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing - * the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1] - */ -struct ia_css_dvs2_statistics { - struct ia_css_dvs_grid_info grid; /** grid info contains the dimensions of the dvs grid */ - struct ia_css_dvs2_stat_types hor_prod; /** struct with pointers that contain the horizontal statistics */ - struct ia_css_dvs2_stat_types ver_prod; /** struct with pointers that contain the vertical statistics */ -}; - -#endif /* __IA_CSS_SDIS2_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c deleted file mode 100644 index 78a113bfe8f1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_debug.h" -#include "ia_css_tdf.host.h" - -static const int16_t g_pyramid[8][8] = { -{128, 384, 640, 896, 896, 640, 384, 128}, -{384, 1152, 1920, 2688, 2688, 1920, 1152, 384}, -{640, 1920, 3200, 4480, 4480, 3200, 1920, 640}, -{896, 2688, 4480, 6272, 6272, 4480, 2688, 896}, -{896, 2688, 4480, 6272, 6272, 4480, 2688, 896}, -{640, 1920, 3200, 4480, 4480, 3200, 1920, 640}, -{384, 1152, 1920, 2688, 2688, 1920, 1152, 384}, -{128, 384, 640, 896, 896, 640, 384, 128} -}; - -void -ia_css_tdf_vmem_encode( - struct ia_css_isp_tdf_vmem_params *to, - const struct ia_css_tdf_config *from, - size_t size) -{ - unsigned i; - (void)size; - - for (i = 0; i < ISP_VEC_NELEMS; i++) { - to->pyramid[0][i] = g_pyramid[i/8][i%8]; - to->threshold_flat[0][i] = from->thres_flat_table[i]; - to->threshold_detail[0][i] = from->thres_detail_table[i]; - } - -} - -void -ia_css_tdf_encode( - struct ia_css_isp_tdf_dmem_params *to, - const struct ia_css_tdf_config *from, - size_t size) -{ - (void)size; - to->Epsilon_0 = from->epsilon_0; - to->Epsilon_1 = from->epsilon_1; - to->EpsScaleText = from->eps_scale_text; - to->EpsScaleEdge = from->eps_scale_edge; - to->Sepa_flat = from->sepa_flat; - to->Sepa_Edge = from->sepa_edge; - to->Blend_Flat = from->blend_flat; - to->Blend_Text = from->blend_text; - to->Blend_Edge = from->blend_edge; - to->Shading_Gain = from->shading_gain; - to->Shading_baseGain = from->shading_base_gain; - to->LocalY_Gain = from->local_y_gain; - to->LocalY_baseGain = from->local_y_base_gain; -} - -void -ia_css_tdf_debug_dtrace( - const struct ia_css_tdf_config *config, - unsigned level) -{ - (void)config; - (void)level; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h deleted file mode 100644 index bd628a18e839..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TDF_HOST_H -#define __IA_CSS_TDF_HOST_H - -#include "ia_css_tdf_types.h" -#include "ia_css_tdf_param.h" - -void -ia_css_tdf_vmem_encode( - struct ia_css_isp_tdf_vmem_params *to, - const struct ia_css_tdf_config *from, - size_t size); - -void -ia_css_tdf_encode( - struct ia_css_isp_tdf_dmem_params *to, - const struct ia_css_tdf_config *from, - size_t size); - -void -ia_css_tdf_debug_dtrace( - const struct ia_css_tdf_config *config, unsigned level) -; - -#endif /* __IA_CSS_TDF_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h deleted file mode 100644 index 9334f2e0698b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TDF_PARAM_H -#define __IA_CSS_TDF_PARAM_H - -#include "type_support.h" -#include "vmem.h" /* needed for VMEM_ARRAY */ - -struct ia_css_isp_tdf_vmem_params { - VMEM_ARRAY(pyramid, ISP_VEC_NELEMS); - VMEM_ARRAY(threshold_flat, ISP_VEC_NELEMS); - VMEM_ARRAY(threshold_detail, ISP_VEC_NELEMS); -}; - -struct ia_css_isp_tdf_dmem_params { - int32_t Epsilon_0; - int32_t Epsilon_1; - int32_t EpsScaleText; - int32_t EpsScaleEdge; - int32_t Sepa_flat; - int32_t Sepa_Edge; - int32_t Blend_Flat; - int32_t Blend_Text; - int32_t Blend_Edge; - int32_t Shading_Gain; - int32_t Shading_baseGain; - int32_t LocalY_Gain; - int32_t LocalY_baseGain; -}; - -#endif /* __IA_CSS_TDF_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h deleted file mode 100644 index 91ea8dd4651d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TDF_TYPES_H -#define __IA_CSS_TDF_TYPES_H - -/* @file -* CSS-API header file for Transform Domain Filter parameters. -*/ - -#include "type_support.h" - -/* Transform Domain Filter configuration - * - * \brief TDF public parameters. - * \details Struct with all parameters for the TDF kernel that can be set - * from the CSS API. - * - * ISP2.6.1: TDF is used. - */ -struct ia_css_tdf_config { - int32_t thres_flat_table[64]; /** Final optimized strength table of NR for flat region. */ - int32_t thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */ - int32_t epsilon_0; /** Coefficient to control variance for dark area (for flat region). */ - int32_t epsilon_1; /** Coefficient to control variance for bright area (for flat region). */ - int32_t eps_scale_text; /** Epsilon scaling coefficient for texture region. */ - int32_t eps_scale_edge; /** Epsilon scaling coefficient for edge region. */ - int32_t sepa_flat; /** Threshold to judge flat (edge < m_Flat_thre). */ - int32_t sepa_edge; /** Threshold to judge edge (edge > m_Edge_thre). */ - int32_t blend_flat; /** Blending ratio at flat region. */ - int32_t blend_text; /** Blending ratio at texture region. */ - int32_t blend_edge; /** Blending ratio at edge region. */ - int32_t shading_gain; /** Gain of Shading control. */ - int32_t shading_base_gain; /** Base Gain of Shading control. */ - int32_t local_y_gain; /** Gain of local luminance control. */ - int32_t local_y_base_gain; /** Base gain of local luminance control. */ - int32_t rad_x_origin; /** Initial x coord. for radius computation. */ - int32_t rad_y_origin; /** Initial y coord. for radius computation. */ -}; - -#endif /* __IA_CSS_TDF_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h deleted file mode 100644 index 223423f8c40b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifdef ISP2401 -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef _IA_CSS_TNR3_TYPES_H -#define _IA_CSS_TNR3_TYPES_H - -/* @file -* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel -*/ - -/** - * \brief Number of piecewise linear segments. - * \details The parameters to TNR3 are specified as a piecewise linear segment. - * The number of such segments is fixed at 3. - */ -#define TNR3_NUM_SEGMENTS 3 - -/* Temporal Noise Reduction v3 (TNR3) configuration. - * The parameter to this kernel is fourfold - * 1. Three piecewise linear graphs (one for each plane) with three segments - * each. Each line graph has Luma values on the x axis and sigma values for - * each plane on the y axis. The three linear segments may have a different - * slope and the point of Luma value which where the slope may change is called - * a "Knee" point. As there are three such segments, four points need to be - * specified each on the Luma axis and the per plane Sigma axis. On the Luma - * axis two points are fixed (namely 0 and maximum luma value - depending on - * ISP bit depth). The other two points are the points where the slope may - * change its value. These two points are called knee points. The four points on - * the per plane sigma axis are also specified at the interface. - * 2. One rounding adjustment parameter for each plane - * 3. One maximum feedback threshold value for each plane - * 4. Selection of the reference frame buffer to be used for noise reduction. - */ -struct ia_css_tnr3_kernel_config { - unsigned int maxfb_y; /** Maximum Feedback Gain for Y */ - unsigned int maxfb_u; /** Maximum Feedback Gain for U */ - unsigned int maxfb_v; /** Maximum Feedback Gain for V */ - unsigned int round_adj_y; /** Rounding Adjust for Y */ - unsigned int round_adj_u; /** Rounding Adjust for U */ - unsigned int round_adj_v; /** Rounding Adjust for V */ - unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /** Knee points */ - unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */ - unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for U at points U0, U1, U2, U3 */ - unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for V at points V0, V1, V2, V3 */ - unsigned int ref_buf_select; /** Selection of the reference buffer */ -}; - -#endif -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c deleted file mode 100644 index 222a7bd7f176..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "ia_css_frame.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_frac.h" -#include "assert_support.h" -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#include "isp.h" - -#include "ia_css_tnr.host.h" -const struct ia_css_tnr_config default_tnr_config = { - 32768, - 32, - 32, -}; - -void -ia_css_tnr_encode( - struct sh_css_isp_tnr_params *to, - const struct ia_css_tnr_config *from, - unsigned size) -{ - (void)size; - to->coef = - uDIGIT_FITTING(from->gain, 16, SH_CSS_TNR_COEF_SHIFT); - to->threshold_Y = - uDIGIT_FITTING(from->threshold_y, 16, SH_CSS_ISP_YUV_BITS); - to->threshold_C = - uDIGIT_FITTING(from->threshold_uv, 16, SH_CSS_ISP_YUV_BITS); -} - -void -ia_css_tnr_dump( - const struct sh_css_isp_tnr_params *tnr, - unsigned level) -{ - if (!tnr) return; - ia_css_debug_dtrace(level, "Temporal Noise Reduction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "tnr_coef", tnr->coef); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "tnr_threshold_Y", tnr->threshold_Y); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "tnr_threshold_C", tnr->threshold_C); -} - -void -ia_css_tnr_debug_dtrace( - const struct ia_css_tnr_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.gain=%d, " - "config.threshold_y=%d, config.threshold_uv=%d\n", - config->gain, - config->threshold_y, config->threshold_uv); -} - -void -ia_css_tnr_config( - struct sh_css_isp_tnr_isp_config *to, - const struct ia_css_tnr_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - unsigned i; - - (void)size; - ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->info); - to->width_a_over_b = elems_a / to->port_b.elems; - to->frame_height = from->tnr_frames[0]->info.res.height; -#ifndef ISP2401 - for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) { -#else - for (i = 0; i < NUM_TNR_FRAMES; i++) { -#endif - to->tnr_frame_addr[i] = from->tnr_frames[i]->data + from->tnr_frames[i]->planes.yuyv.offset; - } - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->port_b.elems == 0); -} - -void -ia_css_tnr_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame **frames) -{ - struct ia_css_tnr_configuration config; - unsigned i; - -#ifndef ISP2401 - for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) -#else - for (i = 0; i < NUM_TNR_FRAMES; i++) -#endif - config.tnr_frames[i] = frames[i]; - - ia_css_configure_tnr(binary, &config); -} - -void -ia_css_init_tnr_state( - struct sh_css_isp_tnr_dmem_state *state, - size_t size) -{ - (void)size; - -#ifndef ISP2401 - assert(NUM_VIDEO_TNR_FRAMES >= 2); -#endif - assert(sizeof(*state) == size); - state->tnr_in_buf_idx = 0; - state->tnr_out_buf_idx = 1; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h deleted file mode 100644 index 9290dfad574e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TNR_HOST_H -#define __IA_CSS_TNR_HOST_H - -#include "ia_css_binary.h" -#include "ia_css_tnr_state.h" -#include "ia_css_tnr_types.h" -#include "ia_css_tnr_param.h" - -extern const struct ia_css_tnr_config default_tnr_config; - -void -ia_css_tnr_encode( - struct sh_css_isp_tnr_params *to, - const struct ia_css_tnr_config *from, - unsigned size); - -void -ia_css_tnr_dump( - const struct sh_css_isp_tnr_params *tnr, - unsigned level); - -void -ia_css_tnr_debug_dtrace( - const struct ia_css_tnr_config *config, - unsigned level); - -void -ia_css_tnr_config( - struct sh_css_isp_tnr_isp_config *to, - const struct ia_css_tnr_configuration *from, - unsigned size); - -void -ia_css_tnr_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame **frames); - -void -ia_css_init_tnr_state( - struct sh_css_isp_tnr_dmem_state *state, - size_t size); -#endif /* __IA_CSS_TNR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h deleted file mode 100644 index db4a7cced264..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TNR_PARAM_H -#define __IA_CSS_TNR_PARAM_H - -#include "type_support.h" -#include "sh_css_defs.h" -#include "dma.h" - -/* TNR (Temporal Noise Reduction) */ -struct sh_css_isp_tnr_params { - int32_t coef; - int32_t threshold_Y; - int32_t threshold_C; -}; - -struct ia_css_tnr_configuration { -#ifndef ISP2401 - const struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES]; -#else - const struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; -#endif -}; - -struct sh_css_isp_tnr_isp_config { - uint32_t width_a_over_b; - uint32_t frame_height; - struct dma_port_config port_b; -#ifndef ISP2401 - hrt_vaddress tnr_frame_addr[NUM_VIDEO_TNR_FRAMES]; -#else - hrt_vaddress tnr_frame_addr[NUM_TNR_FRAMES]; -#endif -}; - -#endif /* __IA_CSS_TNR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h deleted file mode 100644 index 8b1218f7235d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TNR_STATE_H -#define __IA_CSS_TNR_STATE_H - -#include "type_support.h" - -/* TNR (temporal noise reduction) */ -struct sh_css_isp_tnr_dmem_state { - uint32_t tnr_in_buf_idx; - uint32_t tnr_out_buf_idx; -}; - -#endif /* __IA_CSS_TNR_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h deleted file mode 100644 index 9bbc9ab2e6c0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_TNR_TYPES_H -#define __IA_CSS_TNR_TYPES_H - -/* @file -* CSS-API header file for Temporal Noise Reduction (TNR) parameters. -*/ - -/* Temporal Noise Reduction (TNR) configuration. - * - * When difference between current frame and previous frame is less than or - * equal to threshold, TNR works and current frame is mixed - * with previous frame. - * When difference between current frame and previous frame is greater - * than threshold, we judge motion is detected. Then, TNR does not work and - * current frame is outputted as it is. - * Therefore, when threshold_y and threshold_uv are set as 0, TNR can be disabled. - * - * ISP block: TNR1 - * ISP1: TNR1 is used. - * ISP2: TNR1 is used. - */ - - -struct ia_css_tnr_config { - ia_css_u0_16 gain; /** Interpolation ratio of current frame - and previous frame. - gain=0.0 -> previous frame is outputted. - gain=1.0 -> current frame is outputted. - u0.16, [0,65535], - default 32768(0.5), ineffective 65535(almost 1.0) */ - ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y. - If difference between current frame and - previous frame is greater than threshold_y, - TNR for Y is disabled. - u0.16, [0,65535], default/ineffective 0 */ - ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of - U/V. - If difference between current frame and - previous frame is greater than threshold_uv, - TNR for UV is disabled. - u0.16, [0,65535], default/ineffective 0 */ -}; - - -#endif /* __IA_CSS_TNR_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h deleted file mode 100644 index 26b7b5bc9391..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_UDS_PARAM_H -#define __IA_CSS_UDS_PARAM_H - -#include "sh_css_uds.h" - -/* uds (Up and Down scaling) */ -struct ia_css_uds_config { - struct sh_css_crop_pos crop_pos; - struct sh_css_uds_info uds; -}; - -struct sh_css_sp_uds_params { - struct sh_css_crop_pos crop_pos; - struct sh_css_uds_info uds; -}; - -#endif /* __IA_CSS_UDS_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c deleted file mode 100644 index c2076e412410..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_vf.host.h" -#include -#include -#include -#include -#include -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" - -#include "isp.h" - -void -ia_css_vf_config( - struct sh_css_isp_vf_isp_config *to, - const struct ia_css_vf_configuration *from, - unsigned size) -{ - unsigned elems_a = ISP_VEC_NELEMS; - - (void)size; - to->vf_downscale_bits = from->vf_downscale_bits; - to->enable = from->info != NULL; - - if (from->info) { - ia_css_frame_info_to_frame_sp_info(&to->info, from->info); - ia_css_dma_configure_from_info(&to->dma.port_b, from->info); - to->dma.width_a_over_b = elems_a / to->dma.port_b.elems; - - /* Assume divisiblity here, may need to generalize to fixed point. */ - assert (elems_a % to->dma.port_b.elems == 0); - } -} - -/* compute the log2 of the downscale factor needed to get closest - * to the requested viewfinder resolution on the upper side. The output cannot - * be smaller than the requested viewfinder resolution. - */ -enum ia_css_err -sh_css_vf_downscale_log2( - const struct ia_css_frame_info *out_info, - const struct ia_css_frame_info *vf_info, - unsigned int *downscale_log2) -{ - unsigned int ds_log2 = 0; - unsigned int out_width; - - if ((out_info == NULL) | (vf_info == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - out_width = out_info->res.width; - - if (out_width == 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* downscale until width smaller than the viewfinder width. We don't - * test for the height since the vmem buffers only put restrictions on - * the width of a line, not on the number of lines in a frame. - */ - while (out_width >= vf_info->res.width) { - ds_log2++; - out_width /= 2; - } - /* now width is smaller, so we go up one step */ - if ((ds_log2 > 0) && (out_width < ia_css_binary_max_vf_width())) - ds_log2--; - /* TODO: use actual max input resolution of vf_pp binary */ - if ((out_info->res.width >> ds_log2) >= 2 * ia_css_binary_max_vf_width()) - return IA_CSS_ERR_INVALID_ARGUMENTS; - *downscale_log2 = ds_log2; - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -configure_kernel( - const struct ia_css_binary_info *info, - const struct ia_css_frame_info *out_info, - const struct ia_css_frame_info *vf_info, - unsigned int *downscale_log2, - struct ia_css_vf_configuration *config) -{ - enum ia_css_err err; - unsigned vf_log_ds = 0; - - /* First compute value */ - if (vf_info) { - err = sh_css_vf_downscale_log2(out_info, vf_info, &vf_log_ds); - if (err != IA_CSS_SUCCESS) - return err; - } - vf_log_ds = min(vf_log_ds, info->vf_dec.max_log_downscale); - *downscale_log2 = vf_log_ds; - - /* Then store it in isp config section */ - config->vf_downscale_bits = vf_log_ds; - return IA_CSS_SUCCESS; -} - -static void -configure_dma( - struct ia_css_vf_configuration *config, - const struct ia_css_frame_info *vf_info) -{ - config->info = vf_info; -} - -enum ia_css_err -ia_css_vf_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info, - unsigned int *downscale_log2) -{ - enum ia_css_err err; - struct ia_css_vf_configuration config; - const struct ia_css_binary_info *info = &binary->info->sp; - - err = configure_kernel(info, out_info, vf_info, downscale_log2, &config); - configure_dma(&config, vf_info); - - if (vf_info) - vf_info->raw_bit_depth = info->dma.vfdec_bits_per_pixel; - ia_css_configure_vf (binary, &config); - - return IA_CSS_SUCCESS; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h deleted file mode 100644 index c7c3625a9a96..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_VF_HOST_H -#define __IA_CSS_VF_HOST_H - -#include "ia_css_frame_public.h" -#include "ia_css_binary.h" - -#include "ia_css_vf_types.h" -#include "ia_css_vf_param.h" - -/* compute the log2 of the downscale factor needed to get closest - * to the requested viewfinder resolution on the upper side. The output cannot - * be smaller than the requested viewfinder resolution. - */ -enum ia_css_err -sh_css_vf_downscale_log2( - const struct ia_css_frame_info *out_info, - const struct ia_css_frame_info *vf_info, - unsigned int *downscale_log2); - -void -ia_css_vf_config( - struct sh_css_isp_vf_isp_config *to, - const struct ia_css_vf_configuration *from, - unsigned size); - -enum ia_css_err -ia_css_vf_configure( - const struct ia_css_binary *binary, - const struct ia_css_frame_info *out_info, - struct ia_css_frame_info *vf_info, - unsigned int *downscale_log2); - -#endif /* __IA_CSS_VF_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h deleted file mode 100644 index 9df4e12f6c2c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_VF_PARAM_H -#define __IA_CSS_VF_PARAM_H - -#include "type_support.h" -#include "dma.h" -#include "gc/gc_1.0/ia_css_gc_param.h" /* GAMMA_OUTPUT_BITS */ -#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */ -#include "ia_css_vf_types.h" - -#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS - -/* Viewfinder decimation */ -struct sh_css_isp_vf_isp_config { - uint32_t vf_downscale_bits; /** Log VF downscale value */ - uint32_t enable; - struct ia_css_frame_sp_info info; - struct { - uint32_t width_a_over_b; - struct dma_port_config port_b; - } dma; -}; - -#endif /* __IA_CSS_VF_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h deleted file mode 100644 index e3efafa279ff..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_VF_TYPES_H -#define __IA_CSS_VF_TYPES_H - -/* Viewfinder decimation - * - * ISP block: vfeven_horizontal_downscale - */ - -#include -#include - -struct ia_css_vf_configuration { - uint32_t vf_downscale_bits; /** Log VF downscale value */ - const struct ia_css_frame_info *info; -}; - -#endif /* __IA_CSS_VF_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c deleted file mode 100644 index b43cb88c6ae4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#ifndef IA_CSS_NO_DEBUG -#include "ia_css_debug.h" -#endif -#include "sh_css_frac.h" - -#include "ia_css_wb.host.h" - -const struct ia_css_wb_config default_wb_config = { - 1, - 32768, - 32768, - 32768, - 32768 -}; - -void -ia_css_wb_encode( - struct sh_css_isp_wb_params *to, - const struct ia_css_wb_config *from, - unsigned size) -{ - (void)size; - to->gain_shift = - uISP_REG_BIT - from->integer_bits; - to->gain_gr = - uDIGIT_FITTING(from->gr, 16 - from->integer_bits, - to->gain_shift); - to->gain_r = - uDIGIT_FITTING(from->r, 16 - from->integer_bits, - to->gain_shift); - to->gain_b = - uDIGIT_FITTING(from->b, 16 - from->integer_bits, - to->gain_shift); - to->gain_gb = - uDIGIT_FITTING(from->gb, 16 - from->integer_bits, - to->gain_shift); -} - -#ifndef IA_CSS_NO_DEBUG -void -ia_css_wb_dump( - const struct sh_css_isp_wb_params *wb, - unsigned level) -{ - if (!wb) return; - ia_css_debug_dtrace(level, "White Balance:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "wb_gain_shift", wb->gain_shift); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "wb_gain_gr", wb->gain_gr); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "wb_gain_r", wb->gain_r); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "wb_gain_b", wb->gain_b); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "wb_gain_gb", wb->gain_gb); -} - -void -ia_css_wb_debug_dtrace( - const struct ia_css_wb_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.integer_bits=%d, " - "config.gr=%d, config.r=%d, " - "config.b=%d, config.gb=%d\n", - config->integer_bits, - config->gr, config->r, - config->b, config->gb); -} -#endif - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h deleted file mode 100644 index 18666baf9f76..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_WB_HOST_H -#define __IA_CSS_WB_HOST_H - -#include "ia_css_wb_types.h" -#include "ia_css_wb_param.h" - -extern const struct ia_css_wb_config default_wb_config; - -void -ia_css_wb_encode( - struct sh_css_isp_wb_params *to, - const struct ia_css_wb_config *from, - unsigned size); - -void -ia_css_wb_dump( - const struct sh_css_isp_wb_params *wb, - unsigned level); - -void -ia_css_wb_debug_dtrace( - const struct ia_css_wb_config *wb, - unsigned level); - -#endif /* __IA_CSS_WB_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h deleted file mode 100644 index c95c53a24067..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_WB_PARAM_H -#define __IA_CSS_WB_PARAM_H - -#include "type_support.h" - -/* WB (White Balance) */ -struct sh_css_isp_wb_params { - int32_t gain_shift; - int32_t gain_gr; - int32_t gain_r; - int32_t gain_b; - int32_t gain_gb; -}; - -#endif /* __IA_CSS_WB_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h deleted file mode 100644 index bf98734d057e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_WB_TYPES_H -#define __IA_CSS_WB_TYPES_H - -/* @file -* CSS-API header file for White Balance parameters. -*/ - - -/* White Balance configuration (Gain Adjust). - * - * ISP block: WB1 - * ISP1: WB1 is used. - * ISP2: WB1 is used. - */ -struct ia_css_wb_config { - uint32_t integer_bits; /** Common exponent of gains. - u8.0, [0,3], - default 1, ineffective 1 */ - uint32_t gr; /** Significand of Gr gain. - u[integer_bits].[16-integer_bits], [0,65535], - default/ineffective 32768(u1.15, 1.0) */ - uint32_t r; /** Significand of R gain. - u[integer_bits].[16-integer_bits], [0,65535], - default/ineffective 32768(u1.15, 1.0) */ - uint32_t b; /** Significand of B gain. - u[integer_bits].[16-integer_bits], [0,65535], - default/ineffective 32768(u1.15, 1.0) */ - uint32_t gb; /** Significand of Gb gain. - u[integer_bits].[16-integer_bits], [0,65535], - default/ineffective 32768(u1.15, 1.0) */ -}; - -#endif /* __IA_CSS_WB_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c deleted file mode 100644 index abcb531f51cc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_frac.h" - -#include "ia_css_xnr.host.h" - -const struct ia_css_xnr_config default_xnr_config = { - /* default threshold 6400 translates to 25 on ISP. */ - 6400 -}; - -void -ia_css_xnr_table_vamem_encode( - struct sh_css_isp_xnr_vamem_params *to, - const struct ia_css_xnr_table *from, - unsigned size) -{ - (void)size; - memcpy (&to->xnr, &from->data, sizeof(to->xnr)); -} - -void -ia_css_xnr_encode( - struct sh_css_isp_xnr_params *to, - const struct ia_css_xnr_config *from, - unsigned size) -{ - (void)size; - - to->threshold = - (uint16_t)uDIGIT_FITTING(from->threshold, 16, SH_CSS_ISP_YUV_BITS); -} - -void -ia_css_xnr_table_debug_dtrace( - const struct ia_css_xnr_table *config, - unsigned level) -{ - (void)config; - (void)level; -} - -void -ia_css_xnr_debug_dtrace( - const struct ia_css_xnr_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.threshold=%d\n", config->threshold); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h deleted file mode 100644 index eb3425eafbbe..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR_HOST_H -#define __IA_CSS_XNR_HOST_H - -#include "sh_css_params.h" - -#include "ia_css_xnr_param.h" -#include "ia_css_xnr_table.host.h" - -extern const struct ia_css_xnr_config default_xnr_config; - -void -ia_css_xnr_table_vamem_encode( - struct sh_css_isp_xnr_vamem_params *to, - const struct ia_css_xnr_table *from, - unsigned size); - -void -ia_css_xnr_encode( - struct sh_css_isp_xnr_params *to, - const struct ia_css_xnr_config *from, - unsigned size); - -void -ia_css_xnr_table_debug_dtrace( - const struct ia_css_xnr_table *s3a, - unsigned level); - -void -ia_css_xnr_debug_dtrace( - const struct ia_css_xnr_config *config, - unsigned level); - -#endif /* __IA_CSS_XNR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h deleted file mode 100644 index a5caebbe2f84..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR_PARAM_H -#define __IA_CSS_XNR_PARAM_H - -#include "type_support.h" -#include - -#ifndef PIPE_GENERATION -#if defined(HAS_VAMEM_VERSION_2) -#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 -#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_2_XNR_TABLE_SIZE -#elif defined(HAS_VAMEM_VERSION_1) -#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 -#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_1_XNR_TABLE_SIZE -#else -#error "Unknown vamem type" -#endif - - -#else -/* For pipe generation, the size is not relevant */ -#define SH_CSS_ISP_XNR_TABLE_SIZE 0 -#endif - -/* This should be vamem_data_t, but that breaks the pipe generator */ -struct sh_css_isp_xnr_vamem_params { - uint16_t xnr[SH_CSS_ISP_XNR_TABLE_SIZE]; -}; - -struct sh_css_isp_xnr_params { - /* XNR threshold. - * type:u0.16 but actual valid range is:[0,255] - * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits) - * default: 25 */ - uint16_t threshold; -}; - -#endif /* __IA_CSS_XNR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c deleted file mode 100644 index cd5fb72fce3f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include /* memcpy */ -#include "system_global.h" -#include "vamem.h" -#include "ia_css_types.h" -#include "ia_css_xnr_table.host.h" - -struct ia_css_xnr_table default_xnr_table; - -#if defined(HAS_VAMEM_VERSION_2) - -static const uint16_t -default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = { - /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */ - 8191>>1, 4096>>1, 2730>>1, 2048>>1, 1638>>1, 1365>>1, 1170>>1, 1024>>1, 910>>1, 819>>1, 744>>1, 682>>1, 630>>1, 585>>1, - 546>>1, 512>>1, - - /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */ - 481>>1, 455>>1, 431>>1, 409>>1, 390>>1, 372>>1, 356>>1, 341>>1, 327>>1, 315>>1, 303>>1, 292>>1, 282>>1, 273>>1, 264>>1, - 256>>1, - - /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */ - 248>>1, 240>>1, 234>>1, 227>>1, 221>>1, 215>>1, 210>>1, 204>>1, 199>>1, 195>>1, 190>>1, 186>>1, 182>>1, 178>>1, 174>>1, - 170>>1, - - /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */ - 167>>1, 163>>1, 160>>1, 157>>1, 154>>1, 151>>1, 148>>1, 146>>1, 143>>1, 141>>1, 138>>1, 136>>1, 134>>1, 132>>1, 130>>1, 128>>1 -}; - -#elif defined(HAS_VAMEM_VERSION_1) - -static const uint16_t -default_xnr_table_data[IA_CSS_VAMEM_1_XNR_TABLE_SIZE] = { - /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */ - 8191>>1, 4096>>1, 2730>>1, 2048>>1, 1638>>1, 1365>>1, 1170>>1, 1024>>1, 910>>1, 819>>1, 744>>1, 682>>1, 630>>1, 585>>1, - 546>>1, 512>>1, - - /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */ - 481>>1, 455>>1, 431>>1, 409>>1, 390>>1, 372>>1, 356>>1, 341>>1, 327>>1, 315>>1, 303>>1, 292>>1, 282>>1, 273>>1, 264>>1, - 256>>1, - - /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */ - 248>>1, 240>>1, 234>>1, 227>>1, 221>>1, 215>>1, 210>>1, 204>>1, 199>>1, 195>>1, 190>>1, 186>>1, 182>>1, 178>>1, 174>>1, - 170>>1, - - /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */ - 167>>1, 163>>1, 160>>1, 157>>1, 154>>1, 151>>1, 148>>1, 146>>1, 143>>1, 141>>1, 138>>1, 136>>1, 134>>1, 132>>1, 130>>1, 128>>1 -}; - -#else -#error "sh_css_params.c: VAMEM version must \ - be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}" -#endif - -void -ia_css_config_xnr_table(void) -{ -#if defined(HAS_VAMEM_VERSION_2) - memcpy(default_xnr_table.data.vamem_2, default_xnr_table_data, - sizeof(default_xnr_table_data)); - default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_2; -#else - memcpy(default_xnr_table.data.vamem_1, default_xnr_table_data, - sizeof(default_xnr_table_data)); - default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_1; -#endif -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h deleted file mode 100644 index 130086713a7f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR_TABLE_HOST_H -#define __IA_CSS_XNR_TABLE_HOST_H - -extern struct ia_css_xnr_table default_xnr_table; - -void ia_css_config_xnr_table(void); - -#endif /* __IA_CSS_XNR_TABLE_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h deleted file mode 100644 index d2b634211a3f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR_TYPES_H -#define __IA_CSS_XNR_TYPES_H - -/* @file -* CSS-API header file for Extra Noise Reduction (XNR) parameters. -*/ - -/* XNR table. - * - * NOTE: The driver does not need to set this table, - * because the default values are set inside the css. - * - * This table contains coefficients used for division in XNR. - * - * u0.12, [0,4095], - * {4095, 2048, 1365, .........., 65, 64} - * ({1/1, 1/2, 1/3, ............., 1/63, 1/64}) - * - * ISP block: XNR1 - * ISP1: XNR1 is used. - * ISP2: XNR1 is used. - * - */ - -/* Number of elements in the xnr table. */ -#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6 -/* Number of elements in the xnr table. */ -#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE (1U< XNR_MAX_ALPHA) - alpha = XNR_MAX_ALPHA; - } - - return alpha; -} - -/* - * Compute the scaled coring value for the ISP kernel from the value on the - * host parameter interface. - */ -static int32_t -compute_coring(int coring) -{ - int32_t isp_coring; - int32_t isp_scale = XNR_CORING_SCALE_FACTOR; - int32_t host_scale = IA_CSS_XNR3_CORING_SCALE; - int32_t offset = host_scale / 2; /* fixed-point 0.5 */ - - /* Convert from public host-side scale factor to isp-side scale - * factor. Clip to [0, isp_scale-1). - */ - isp_coring = ((coring * isp_scale) + offset) / host_scale; - return min(max(isp_coring, 0), isp_scale - 1); -} - -/* - * Compute the scaled blending strength for the ISP kernel from the value on - * the host parameter interface. - */ -static int32_t -compute_blending(int strength) -{ - int32_t isp_strength; - int32_t isp_scale = XNR_BLENDING_SCALE_FACTOR; - int32_t host_scale = IA_CSS_XNR3_BLENDING_SCALE; - int32_t offset = host_scale / 2; /* fixed-point 0.5 */ - - /* Convert from public host-side scale factor to isp-side scale - * factor. The blending factor is positive on the host side, but - * negative on the ISP side because +1.0 cannot be represented - * exactly as s0.11 fixed point, but -1.0 can. - */ - isp_strength = -(((strength * isp_scale) + offset) / host_scale); - return max(min(isp_strength, 0), -XNR_BLENDING_SCALE_FACTOR); -} - -void -ia_css_xnr3_encode( - struct sh_css_isp_xnr3_params *to, - const struct ia_css_xnr3_config *from, - unsigned size) -{ - int kernel_size = XNR_FILTER_SIZE; - /* The adjust factor is the next power of 2 - w.r.t. the kernel size*/ - int adjust_factor = ceil_pow2(kernel_size); - int32_t max_diff = (1 << (ISP_VEC_ELEMBITS - 1)) - 1; - int32_t min_diff = -(1 << (ISP_VEC_ELEMBITS - 1)); - - int32_t alpha_y0 = compute_alpha(from->sigma.y0); - int32_t alpha_y1 = compute_alpha(from->sigma.y1); - int32_t alpha_u0 = compute_alpha(from->sigma.u0); - int32_t alpha_u1 = compute_alpha(from->sigma.u1); - int32_t alpha_v0 = compute_alpha(from->sigma.v0); - int32_t alpha_v1 = compute_alpha(from->sigma.v1); - int32_t alpha_ydiff = (alpha_y1 - alpha_y0) * adjust_factor / kernel_size; - int32_t alpha_udiff = (alpha_u1 - alpha_u0) * adjust_factor / kernel_size; - int32_t alpha_vdiff = (alpha_v1 - alpha_v0) * adjust_factor / kernel_size; - - int32_t coring_u0 = compute_coring(from->coring.u0); - int32_t coring_u1 = compute_coring(from->coring.u1); - int32_t coring_v0 = compute_coring(from->coring.v0); - int32_t coring_v1 = compute_coring(from->coring.v1); - int32_t coring_udiff = (coring_u1 - coring_u0) * adjust_factor / kernel_size; - int32_t coring_vdiff = (coring_v1 - coring_v0) * adjust_factor / kernel_size; - - int32_t blending = compute_blending(from->blending.strength); - - (void)size; - - /* alpha's are represented in qN.5 format */ - to->alpha.y0 = alpha_y0; - to->alpha.u0 = alpha_u0; - to->alpha.v0 = alpha_v0; - to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff); - to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff); - to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff); - - /* coring parameters are expressed in q1.NN format */ - to->coring.u0 = coring_u0; - to->coring.v0 = coring_v0; - to->coring.udiff = min(max(coring_udiff, min_diff), max_diff); - to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff); - - /* blending strength is expressed in q1.NN format */ - to->blending.strength = blending; -} - -#ifdef ISP2401 -/* (void) = ia_css_xnr3_vmem_encode(*to, *from) - * ----------------------------------------------- - * VMEM Encode Function to translate UV parameters from userspace into ISP space -*/ -void -ia_css_xnr3_vmem_encode( - struct sh_css_isp_xnr3_vmem_params *to, - const struct ia_css_xnr3_config *from, - unsigned size) -{ - unsigned i, j, base; - const unsigned total_blocks = 4; - const unsigned shuffle_block = 16; - - (void)from; - (void)size; - - /* Init */ - for (i = 0; i < ISP_VEC_NELEMS; i++) { - to->x[0][i] = 0; - to->a[0][i] = 0; - to->b[0][i] = 0; - to->c[0][i] = 0; - } - - /* Constraints on "x": - * - values should be greater or equal to 0. - * - values should be ascending. - */ - assert(x[0] >= 0); - - for (j = 1; j < XNR3_LOOK_UP_TABLE_POINTS; j++) { - assert(x[j] >= 0); - assert(x[j] > x[j - 1]); - - } - - /* The implementation of the calulating 1/x is based on the availability - * of the OP_vec_shuffle16 operation. - * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to - * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or - * initialised as described in the KFS. The remaining elements of a vector are set to 0. - */ - /* TODO: guard this code with above assumptions */ - for (i = 0; i < total_blocks; i++) { - base = shuffle_block * i; - - for (j = 0; j < XNR3_LOOK_UP_TABLE_POINTS; j++) { - to->x[0][base + j] = x[j]; - to->a[0][base + j] = a[j]; - to->b[0][base + j] = b[j]; - to->c[0][base + j] = c[j]; - } - } -} - -#endif -/* Dummy Function added as the tool expects it*/ -void -ia_css_xnr3_debug_dtrace( - const struct ia_css_xnr3_config *config, - unsigned level) -{ - (void)config; - (void)level; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h deleted file mode 100644 index 6a86924a71fe..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR3_HOST_H -#define __IA_CSS_XNR3_HOST_H - -#include "ia_css_xnr3_param.h" -#include "ia_css_xnr3_types.h" - -extern const struct ia_css_xnr3_config default_xnr3_config; - -void -ia_css_xnr3_encode( - struct sh_css_isp_xnr3_params *to, - const struct ia_css_xnr3_config *from, - unsigned size); - -#ifdef ISP2401 -void -ia_css_xnr3_vmem_encode( - struct sh_css_isp_xnr3_vmem_params *to, - const struct ia_css_xnr3_config *from, - unsigned size); - -#endif -void -ia_css_xnr3_debug_dtrace( - const struct ia_css_xnr3_config *config, - unsigned level); - -#endif /* __IA_CSS_XNR3_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h deleted file mode 100644 index 06c24e848234..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR3_PARAM_H -#define __IA_CSS_XNR3_PARAM_H - -#include "type_support.h" -#ifdef ISP2401 -#include "vmem.h" /* needed for VMEM_ARRAY */ - -#endif - -/* Scaling factor of the alpha values: which fixed-point value represents 1.0? - * It must be chosen such that 1/min_sigma still fits in an ISP vector - * element. */ -#define XNR_ALPHA_SCALE_LOG2 5 -#define XNR_ALPHA_SCALE_FACTOR (1 << XNR_ALPHA_SCALE_LOG2) - -/* Scaling factor of the coring values on the ISP. */ -#define XNR_CORING_SCALE_LOG2 (ISP_VEC_ELEMBITS-1) -#define XNR_CORING_SCALE_FACTOR (1 << XNR_CORING_SCALE_LOG2) - -/* Scaling factor of the blending strength on the ISP. */ -#define XNR_BLENDING_SCALE_LOG2 (ISP_VEC_ELEMBITS-1) -#define XNR_BLENDING_SCALE_FACTOR (1 << XNR_BLENDING_SCALE_LOG2) - -/* XNR3 filter size. Must be 11x11, 9x9 or 5x5. */ -#ifdef FLT_KERNEL_9x9 -#define XNR_FILTER_SIZE 9 -#else -#ifdef FLT_KERNEL_11x11 -#define XNR_FILTER_SIZE 11 -#else -#define XNR_FILTER_SIZE 5 -#endif -#endif - -/* XNR3 alpha (1/sigma) parameters on the ISP, expressed as a base (0) value - * for dark areas, and a scaled diff towards the value for bright areas. */ -struct sh_css_xnr3_alpha_params { - int32_t y0; - int32_t u0; - int32_t v0; - int32_t ydiff; - int32_t udiff; - int32_t vdiff; -}; - -/* XNR3 coring parameters on the ISP, expressed as a base (0) value - * for dark areas, and a scaled diff towards the value for bright areas. */ -struct sh_css_xnr3_coring_params { - int32_t u0; - int32_t v0; - int32_t udiff; - int32_t vdiff; -}; - -/* XNR3 blending strength on the ISP. */ -struct sh_css_xnr3_blending_params { - int32_t strength; -}; - -/* XNR3 ISP parameters */ -struct sh_css_isp_xnr3_params { - struct sh_css_xnr3_alpha_params alpha; - struct sh_css_xnr3_coring_params coring; - struct sh_css_xnr3_blending_params blending; -}; - -#ifdef ISP2401 -/* - * STRUCT sh_css_isp_xnr3_vmem_params - * ----------------------------------------------- - * ISP VMEM parameters - */ -struct sh_css_isp_xnr3_vmem_params { - VMEM_ARRAY(x, ISP_VEC_NELEMS); - VMEM_ARRAY(a, ISP_VEC_NELEMS); - VMEM_ARRAY(b, ISP_VEC_NELEMS); - VMEM_ARRAY(c, ISP_VEC_NELEMS); -}; - - -#endif -#endif /*__IA_CSS_XNR3_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h deleted file mode 100644 index 669200caf72e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_XNR3_TYPES_H -#define __IA_CSS_XNR3_TYPES_H - -/* @file -* CSS-API header file for Extra Noise Reduction (XNR) parameters. -*/ - -/** - * \brief Scale of the XNR sigma parameters. - * \details The define specifies which fixed-point value represents 1.0. - */ -#define IA_CSS_XNR3_SIGMA_SCALE (1 << 10) - -/** - * \brief Scale of the XNR coring parameters. - * \details The define specifies which fixed-point value represents 1.0. - */ -#define IA_CSS_XNR3_CORING_SCALE (1 << 15) - -/** - * \brief Scale of the XNR blending parameter. - * \details The define specifies which fixed-point value represents 1.0. - */ -#define IA_CSS_XNR3_BLENDING_SCALE (1 << 11) - - -/** - * \brief XNR3 Sigma Parameters. - * \details Sigma parameters define the strength of the XNR filter. - * A higher number means stronger filtering. There are two values for each of - * the three YUV planes: one for dark areas and one for bright areas. All - * sigma parameters are fixed-point values between 0.0 and 1.0, scaled with - * IA_CSS_XNR3_SIGMA_SCALE. - */ -struct ia_css_xnr3_sigma_params { - int y0; /** Sigma for Y range similarity in dark area */ - int y1; /** Sigma for Y range similarity in bright area */ - int u0; /** Sigma for U range similarity in dark area */ - int u1; /** Sigma for U range similarity in bright area */ - int v0; /** Sigma for V range similarity in dark area */ - int v1; /** Sigma for V range similarity in bright area */ -}; - -/** - * \brief XNR3 Coring Parameters - * \details Coring parameters define the "coring" strength, which is a soft - * thresholding technique to avoid false coloring. There are two values for - * each of the two chroma planes: one for dark areas and one for bright areas. - * All coring parameters are fixed-point values between 0.0 and 1.0, scaled - * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0. - */ -struct ia_css_xnr3_coring_params { - int u0; /** Coring threshold of U channel in dark area */ - int u1; /** Coring threshold of U channel in bright area */ - int v0; /** Coring threshold of V channel in dark area */ - int v1; /** Coring threshold of V channel in bright area */ -}; - -/** - * \brief XNR3 Blending Parameters - * \details Blending parameters define the blending strength of filtered - * output pixels with the original chroma pixels from before xnr3. The - * blending strength is a fixed-point value between 0.0 and 1.0 (inclusive), - * scaled with IA_CSS_XNR3_BLENDING_SCALE. - * A higher number applies xnr filtering more strongly. A value of 1.0 - * disables the blending and returns the xnr3 filtered output, while a - * value of 0.0 bypasses the entire xnr3 filter. - */ -struct ia_css_xnr3_blending_params { - int strength; /** Blending strength */ -}; - -/** - * \brief XNR3 public parameters. - * \details Struct with all parameters for the XNR3 kernel that can be set - * from the CSS API. - */ -struct ia_css_xnr3_config { - struct ia_css_xnr3_sigma_params sigma; /** XNR3 sigma parameters */ - struct ia_css_xnr3_coring_params coring; /** XNR3 coring parameters */ - struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */ -}; - -#endif /* __IA_CSS_XNR3_TYPES_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c deleted file mode 100644 index d8dccce772a9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_frac.h" - -#include "bnr/bnr_1.0/ia_css_bnr.host.h" -#include "ia_css_ynr.host.h" - -const struct ia_css_nr_config default_nr_config = { - 16384, - 8192, - 1280, - 0, - 0 -}; - -const struct ia_css_ee_config default_ee_config = { - 8192, - 128, - 2048 -}; - -void -ia_css_nr_encode( - struct sh_css_isp_ynr_params *to, - const struct ia_css_nr_config *from, - unsigned size) -{ - (void)size; - /* YNR (Y Noise Reduction) */ - to->threshold = - uDIGIT_FITTING((unsigned)8192, 16, SH_CSS_BAYER_BITS); - to->gain_all = - uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT); - to->gain_dir = - uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT); - to->threshold_cb = - uDIGIT_FITTING(from->threshold_cb, 16, SH_CSS_BAYER_BITS); - to->threshold_cr = - uDIGIT_FITTING(from->threshold_cr, 16, SH_CSS_BAYER_BITS); -} - -void -ia_css_yee_encode( - struct sh_css_isp_yee_params *to, - const struct ia_css_yee_config *from, - unsigned size) -{ - int asiWk1 = (int) from->ee.gain; - int asiWk2 = asiWk1 / 8; - int asiWk3 = asiWk1 / 4; - - (void)size; - /* YEE (Y Edge Enhancement) */ - to->dirthreshold_s = - min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS) - << 1), - SH_CSS_BAYER_MAXVAL); - to->dirthreshold_g = - min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS) - << 4), - SH_CSS_BAYER_MAXVAL); - to->dirthreshold_width_log2 = - uFRACTION_BITS_FITTING(8); - to->dirthreshold_width = - 1 << to->dirthreshold_width_log2; - to->detailgain = - uDIGIT_FITTING(from->ee.detail_gain, 11, - SH_CSS_YEE_DETAIL_GAIN_SHIFT); - to->coring_s = - (uDIGIT_FITTING((unsigned)56, 16, SH_CSS_BAYER_BITS) * - from->ee.threshold) >> 8; - to->coring_g = - (uDIGIT_FITTING((unsigned)224, 16, SH_CSS_BAYER_BITS) * - from->ee.threshold) >> 8; - /* 8; // *1.125 ->[s4.8] */ - to->scale_plus_s = - (asiWk1 + asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT); - /* 8; // ( * -.25)->[s4.8] */ - to->scale_plus_g = - (0 - asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT); - /* 8; // *0.875 ->[s4.8] */ - to->scale_minus_s = - (asiWk1 - asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT); - /* 8; // ( *.25 ) ->[s4.8] */ - to->scale_minus_g = - (asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT); - to->clip_plus_s = - uDIGIT_FITTING((unsigned)32760, 16, SH_CSS_BAYER_BITS); - to->clip_plus_g = 0; - to->clip_minus_s = - uDIGIT_FITTING((unsigned)504, 16, SH_CSS_BAYER_BITS); - to->clip_minus_g = - uDIGIT_FITTING((unsigned)32256, 16, SH_CSS_BAYER_BITS); - to->Yclip = SH_CSS_BAYER_MAXVAL; -} - -void -ia_css_nr_dump( - const struct sh_css_isp_ynr_params *ynr, - unsigned level) -{ - if (!ynr) return; - ia_css_debug_dtrace(level, - "Y Noise Reduction:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynr_threshold", ynr->threshold); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynr_gain_all", ynr->gain_all); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynr_gain_dir", ynr->gain_dir); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynr_threshold_cb", ynr->threshold_cb); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynr_threshold_cr", ynr->threshold_cr); -} - -void -ia_css_yee_dump( - const struct sh_css_isp_yee_params *yee, - unsigned level) -{ - ia_css_debug_dtrace(level, - "Y Edge Enhancement:\n"); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynryee_dirthreshold_s", - yee->dirthreshold_s); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynryee_dirthreshold_g", - yee->dirthreshold_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynryee_dirthreshold_width_log2", - yee->dirthreshold_width_log2); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynryee_dirthreshold_width", - yee->dirthreshold_width); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_detailgain", - yee->detailgain); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_coring_s", - yee->coring_s); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_coring_g", - yee->coring_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_scale_plus_s", - yee->scale_plus_s); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_scale_plus_g", - yee->scale_plus_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_scale_minus_s", - yee->scale_minus_s); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_scale_minus_g", - yee->scale_minus_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_clip_plus_s", - yee->clip_plus_s); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_clip_plus_g", - yee->clip_plus_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_clip_minus_s", - yee->clip_minus_s); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "yee_clip_minus_g", - yee->clip_minus_g); - ia_css_debug_dtrace(level, "\t%-32s = %d\n", - "ynryee_Yclip", - yee->Yclip); -} - -void -ia_css_nr_debug_dtrace( - const struct ia_css_nr_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.direction=%d, " - "config.bnr_gain=%d, config.ynr_gain=%d, " - "config.threshold_cb=%d, config.threshold_cr=%d\n", - config->direction, - config->bnr_gain, config->ynr_gain, - config->threshold_cb, config->threshold_cr); -} - -void -ia_css_ee_debug_dtrace( - const struct ia_css_ee_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.threshold=%d, config.gain=%d, config.detail_gain=%d\n", - config->threshold, config->gain, config->detail_gain); -} - -void -ia_css_init_ynr_state( - void/*struct sh_css_isp_ynr_vmem_state*/ *state, - size_t size) -{ - memset(state, 0, size); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h deleted file mode 100644 index b5730df313ef..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR_HOST_H -#define __IA_CSS_YNR_HOST_H - -#include "ia_css_ynr_types.h" -#include "ia_css_ynr_param.h" - -extern const struct ia_css_nr_config default_nr_config; -extern const struct ia_css_ee_config default_ee_config; - -void -ia_css_nr_encode( - struct sh_css_isp_ynr_params *to, - const struct ia_css_nr_config *from, - unsigned size); - -void -ia_css_yee_encode( - struct sh_css_isp_yee_params *to, - const struct ia_css_yee_config *from, - unsigned size); - -void -ia_css_nr_dump( - const struct sh_css_isp_ynr_params *ynr, - unsigned level); - -void -ia_css_yee_dump( - const struct sh_css_isp_yee_params *yee, - unsigned level); - -void -ia_css_nr_debug_dtrace( - const struct ia_css_nr_config *config, - unsigned level); - -void -ia_css_ee_debug_dtrace( - const struct ia_css_ee_config *config, - unsigned level); - -void -ia_css_init_ynr_state( - void/*struct sh_css_isp_ynr_vmem_state*/ *state, - size_t size); -#endif /* __IA_CSS_YNR_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h deleted file mode 100644 index ad61ec1211e8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR_PARAM_H -#define __IA_CSS_YNR_PARAM_H - -#include "type_support.h" - -/* YNR (Y Noise Reduction) */ -struct sh_css_isp_ynr_params { - int32_t threshold; - int32_t gain_all; - int32_t gain_dir; - int32_t threshold_cb; - int32_t threshold_cr; -}; - -/* YEE (Y Edge Enhancement) */ -struct sh_css_isp_yee_params { - int32_t dirthreshold_s; - int32_t dirthreshold_g; - int32_t dirthreshold_width_log2; - int32_t dirthreshold_width; - int32_t detailgain; - int32_t coring_s; - int32_t coring_g; - int32_t scale_plus_s; - int32_t scale_plus_g; - int32_t scale_minus_s; - int32_t scale_minus_g; - int32_t clip_plus_s; - int32_t clip_plus_g; - int32_t clip_minus_s; - int32_t clip_minus_g; - int32_t Yclip; -}; - -#endif /* __IA_CSS_YNR_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h deleted file mode 100644 index b2348b19c3cd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR_STATE_H -#define __IA_CSS_YNR_STATE_H - -#include "type_support.h" -#include "vmem.h" - -/* YNR (luminance noise reduction) */ -struct sh_css_isp_ynr_vmem_state { - VMEM_ARRAY(ynr_buf[4], MAX_VECTORS_PER_BUF_LINE*ISP_NWAY); -}; - -#endif /* __IA_CSS_YNR_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h deleted file mode 100644 index 3f8589a5a43a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR_TYPES_H -#define __IA_CSS_YNR_TYPES_H - -/* @file -* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR). -*/ - -/* Configuration used by Bayer Noise Reduction (BNR) and - * YCC Noise Reduction (YNR,CNR). - * - * ISP block: BNR1, YNR1, CNR1 - * ISP1: BNR1,YNR1,CNR1 are used. - * ISP2: BNR1,YNR1,CNR1 are used for Preview/Video. - * BNR1,YNR2,CNR2 are used for Still. - */ -struct ia_css_nr_config { - ia_css_u0_16 bnr_gain; /** Strength of noise reduction (BNR). - u0.16, [0,65535], - default 14336(0.21875), ineffective 0 */ - ia_css_u0_16 ynr_gain; /** Strength of noise reduction (YNR). - u0.16, [0,65535], - default 14336(0.21875), ineffective 0 */ - ia_css_u0_16 direction; /** Sensitivity of edge (BNR). - u0.16, [0,65535], - default 512(0.0078125), ineffective 0 */ - ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR). - This is the same as - de_config.c1_coring_threshold. - u0.16, [0,65535], - default 0(0), ineffective 0 */ - ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR). - This is the same as - de_config.c2_coring_threshold. - u0.16, [0,65535], - default 0(0), ineffective 0 */ -}; - -/* Edge Enhancement (sharpen) configuration. - * - * ISP block: YEE1 - * ISP1: YEE1 is used. - * ISP2: YEE1 is used for Preview/Video. - * (YEE2 is used for Still.) - */ -struct ia_css_ee_config { - ia_css_u5_11 gain; /** The strength of sharpness. - u5.11, [0,65535], - default 8192(4.0), ineffective 0 */ - ia_css_u8_8 threshold; /** The threshold that divides noises from - edge. - u8.8, [0,65535], - default 256(1.0), ineffective 65535 */ - ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell - area. - u5.11, [0,65535], - default 2048(1.0), ineffective 0 */ -}; - -/* YNR and YEE (sharpen) configuration. - */ -struct ia_css_yee_config { - struct ia_css_nr_config nr; /** The NR configuration. */ - struct ia_css_ee_config ee; /** The EE configuration. */ -}; - -#endif /* __IA_CSS_YNR_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c deleted file mode 100644 index 44b005004238..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "assert_support.h" - -#include "ia_css_ynr2.host.h" - -const struct ia_css_ynr_config default_ynr_config = { - 0, - 0, - 0, - 0, -}; - -const struct ia_css_fc_config default_fc_config = { - 1, - 0, /* 0 -> ineffective */ - 0, /* 0 -> ineffective */ - 0, /* 0 -> ineffective */ - 0, /* 0 -> ineffective */ - (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ - (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ - (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ - (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ - (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */ - (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */ - (int16_t)- (1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */ - (int16_t)- (1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */ -}; - -void -ia_css_ynr_encode( - struct sh_css_isp_yee2_params *to, - const struct ia_css_ynr_config *from, - unsigned size) -{ - (void)size; - to->edge_sense_gain_0 = from->edge_sense_gain_0; - to->edge_sense_gain_1 = from->edge_sense_gain_1; - to->corner_sense_gain_0 = from->corner_sense_gain_0; - to->corner_sense_gain_1 = from->corner_sense_gain_1; -} - -void -ia_css_fc_encode( - struct sh_css_isp_fc_params *to, - const struct ia_css_fc_config *from, - unsigned size) -{ - (void)size; - to->gain_exp = from->gain_exp; - - to->coring_pos_0 = from->coring_pos_0; - to->coring_pos_1 = from->coring_pos_1; - to->coring_neg_0 = from->coring_neg_0; - to->coring_neg_1 = from->coring_neg_1; - - to->gain_pos_0 = from->gain_pos_0; - to->gain_pos_1 = from->gain_pos_1; - to->gain_neg_0 = from->gain_neg_0; - to->gain_neg_1 = from->gain_neg_1; - - to->crop_pos_0 = from->crop_pos_0; - to->crop_pos_1 = from->crop_pos_1; - to->crop_neg_0 = from->crop_neg_0; - to->crop_neg_1 = from->crop_neg_1; -} - -void -ia_css_ynr_dump( - const struct sh_css_isp_yee2_params *yee2, - unsigned level); - -void -ia_css_fc_dump( - const struct sh_css_isp_fc_params *fc, - unsigned level); - -void -ia_css_fc_debug_dtrace( - const struct ia_css_fc_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.gain_exp=%d, " - "config.coring_pos_0=%d, config.coring_pos_1=%d, " - "config.coring_neg_0=%d, config.coring_neg_1=%d, " - "config.gain_pos_0=%d, config.gain_pos_1=%d, " - "config.gain_neg_0=%d, config.gain_neg_1=%d, " - "config.crop_pos_0=%d, config.crop_pos_1=%d, " - "config.crop_neg_0=%d, config.crop_neg_1=%d\n", - config->gain_exp, - config->coring_pos_0, config->coring_pos_1, - config->coring_neg_0, config->coring_neg_1, - config->gain_pos_0, config->gain_pos_1, - config->gain_neg_0, config->gain_neg_1, - config->crop_pos_0, config->crop_pos_1, - config->crop_neg_0, config->crop_neg_1); -} - -void -ia_css_ynr_debug_dtrace( - const struct ia_css_ynr_config *config, - unsigned level) -{ - ia_css_debug_dtrace(level, - "config.edge_sense_gain_0=%d, config.edge_sense_gain_1=%d, " - "config.corner_sense_gain_0=%d, config.corner_sense_gain_1=%d\n", - config->edge_sense_gain_0, config->edge_sense_gain_1, - config->corner_sense_gain_0, config->corner_sense_gain_1); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h deleted file mode 100644 index 71e89c469e4c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR2_HOST_H -#define __IA_CSS_YNR2_HOST_H - -#include "ia_css_ynr2_types.h" -#include "ia_css_ynr2_param.h" - -extern const struct ia_css_ynr_config default_ynr_config; -extern const struct ia_css_fc_config default_fc_config; - -void -ia_css_ynr_encode( - struct sh_css_isp_yee2_params *to, - const struct ia_css_ynr_config *from, - unsigned size); - -void -ia_css_fc_encode( - struct sh_css_isp_fc_params *to, - const struct ia_css_fc_config *from, - unsigned size); - -void -ia_css_ynr_dump( - const struct sh_css_isp_yee2_params *yee2, - unsigned level); - -void -ia_css_fc_dump( - const struct sh_css_isp_fc_params *fc, - unsigned level); - -void -ia_css_fc_debug_dtrace( - const struct ia_css_fc_config *config, - unsigned level); - -void -ia_css_ynr_debug_dtrace( - const struct ia_css_ynr_config *config, - unsigned level); - -#endif /* __IA_CSS_YNR2_HOST_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h deleted file mode 100644 index e56b695bef27..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR2_PARAM_H -#define __IA_CSS_YNR2_PARAM_H - -#include "type_support.h" - -/* YNR (Y Noise Reduction), YEE (Y Edge Enhancement) */ -struct sh_css_isp_yee2_params { - int32_t edge_sense_gain_0; - int32_t edge_sense_gain_1; - int32_t corner_sense_gain_0; - int32_t corner_sense_gain_1; -}; - -/* Fringe Control */ -struct sh_css_isp_fc_params { - int32_t gain_exp; - uint16_t coring_pos_0; - uint16_t coring_pos_1; - uint16_t coring_neg_0; - uint16_t coring_neg_1; - int32_t gain_pos_0; - int32_t gain_pos_1; - int32_t gain_neg_0; - int32_t gain_neg_1; - int32_t crop_pos_0; - int32_t crop_pos_1; - int32_t crop_neg_0; - int32_t crop_neg_1; -}; - -#endif /* __IA_CSS_YNR2_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h deleted file mode 100644 index 83161a24207d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR2_TYPES_H -#define __IA_CSS_YNR2_TYPES_H - -/* @file -* CSS-API header file for Y(Luma) Noise Reduction. -*/ - -/* Y(Luma) Noise Reduction configuration. - * - * ISP block: YNR2 & YEE2 - * (ISP1: YNR1 and YEE1 are used.) - * (ISP2: YNR1 and YEE1 are used for Preview/Video.) - * ISP2: YNR2 and YEE2 are used for Still. - */ -struct ia_css_ynr_config { - uint16_t edge_sense_gain_0; /** Sensitivity of edge in dark area. - u13.0, [0,8191], - default 1000, ineffective 0 */ - uint16_t edge_sense_gain_1; /** Sensitivity of edge in bright area. - u13.0, [0,8191], - default 1000, ineffective 0 */ - uint16_t corner_sense_gain_0; /** Sensitivity of corner in dark area. - u13.0, [0,8191], - default 1000, ineffective 0 */ - uint16_t corner_sense_gain_1; /** Sensitivity of corner in bright area. - u13.0, [0,8191], - default 1000, ineffective 0 */ -}; - -/* Fringe Control configuration. - * - * ISP block: FC2 (FC2 is used with YNR2/YEE2.) - * (ISP1: FC2 is not used.) - * (ISP2: FC2 is not for Preview/Video.) - * ISP2: FC2 is used for Still. - */ -struct ia_css_fc_config { - uint8_t gain_exp; /** Common exponent of gains. - u8.0, [0,13], - default 1, ineffective 0 */ - uint16_t coring_pos_0; /** Coring threshold for positive edge in dark area. - u0.13, [0,8191], - default 0(0), ineffective 0 */ - uint16_t coring_pos_1; /** Coring threshold for positive edge in bright area. - u0.13, [0,8191], - default 0(0), ineffective 0 */ - uint16_t coring_neg_0; /** Coring threshold for negative edge in dark area. - u0.13, [0,8191], - default 0(0), ineffective 0 */ - uint16_t coring_neg_1; /** Coring threshold for negative edge in bright area. - u0.13, [0,8191], - default 0(0), ineffective 0 */ - uint16_t gain_pos_0; /** Gain for positive edge in dark area. - u0.13, [0,8191], - default 4096(0.5), ineffective 0 */ - uint16_t gain_pos_1; /** Gain for positive edge in bright area. - u0.13, [0,8191], - default 4096(0.5), ineffective 0 */ - uint16_t gain_neg_0; /** Gain for negative edge in dark area. - u0.13, [0,8191], - default 4096(0.5), ineffective 0 */ - uint16_t gain_neg_1; /** Gain for negative edge in bright area. - u0.13, [0,8191], - default 4096(0.5), ineffective 0 */ - uint16_t crop_pos_0; /** Limit for positive edge in dark area. - u0.13, [0,8191], - default/ineffective 8191(almost 1.0) */ - uint16_t crop_pos_1; /** Limit for positive edge in bright area. - u0.13, [0,8191], - default/ineffective 8191(almost 1.0) */ - int16_t crop_neg_0; /** Limit for negative edge in dark area. - s0.13, [-8192,0], - default/ineffective -8192(-1.0) */ - int16_t crop_neg_1; /** Limit for negative edge in bright area. - s0.13, [-8192,0], - default/ineffective -8192(-1.0) */ -}; - -#endif /* __IA_CSS_YNR2_TYPES_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h deleted file mode 100644 index 48fb7d22d7c1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNRX_PARAM_H -#define __IA_CSS_YNRX_PARAM_H - -#include "ia_css_ynr2_param.h" - -#endif /* __IA_CSS_YNRX_PARAM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h deleted file mode 100644 index 2516dd3dc12b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __IA_CSS_YNR2_STATE_H -#define __IA_CSS_YNR2_STATE_H - -/* Reuse YNR1 states */ -#include "../ynr_1.0/ia_css_ynr_state.h" - -#endif /* __IA_CSS_YNR2_STATE_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h deleted file mode 100644 index 32714d5870cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _INPUT_BUF_ISP_H_ -#define _INPUT_BUF_ISP_H_ - -/* Temporary include, since IA_CSS_BINARY_MODE_COPY is still needed */ -#include "sh_css_defs.h" -#include "isp_const.h" /* MAX_VECTORS_PER_INPUT_LINE */ - -#define INPUT_BUF_HEIGHT 2 /* double buffer */ -#define INPUT_BUF_LINES 2 - -#ifndef ENABLE_CONTINUOUS -#define ENABLE_CONTINUOUS 0 -#endif - -/* In continuous mode, the input buffer must be a fixed size for all binaries - * and at a fixed address since it will be used by the SP. */ -#define EXTRA_INPUT_VECTORS 2 /* For left padding */ -#define MAX_VECTORS_PER_INPUT_LINE_CONT (CEIL_DIV(SH_CSS_MAX_SENSOR_WIDTH, ISP_NWAY) + EXTRA_INPUT_VECTORS) - -/* The input buffer should be on a fixed address in vmem, for continuous capture */ -#define INPUT_BUF_ADDR 0x0 -#if (defined(__ISP) && (!defined(MODE) || MODE != IA_CSS_BINARY_MODE_COPY)) - -#if ENABLE_CONTINUOUS -typedef struct { - tmemvectoru raw[INPUT_BUF_HEIGHT][INPUT_BUF_LINES][MAX_VECTORS_PER_INPUT_LINE_CONT]; /* 2 bayer lines */ - /* Two more lines for SP raw copy efficiency */ -#ifndef ENABLE_REDUCED_INPUT_BUFFER - /* "Workaround" solution in the case that space needed vmem exceeds the size of the vmem. */ - /* Since in theory this buffer is not needed for IPU 2.2/2.3, */ - /* the workaround solution will not be needed (and the whole buffer) after the code refactoring. */ - tmemvectoru _raw[INPUT_BUF_HEIGHT][INPUT_BUF_LINES][MAX_VECTORS_PER_INPUT_LINE_CONT]; /* 2 bayer lines */ -#endif -} input_line_type; -#else /* ENABLE CONTINUOUS == 0 */ -typedef struct { - tmemvectoru raw[INPUT_BUF_HEIGHT][INPUT_BUF_LINES][MAX_VECTORS_PER_INPUT_LINE]; /* 2 bayer lines */ -} input_line_type; -#endif /* ENABLE_CONTINUOUS */ - -#endif /*MODE*/ - -#endif /* _INPUT_BUF_ISP_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h deleted file mode 100644 index 2f215dc2ac32..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h +++ /dev/null @@ -1,482 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _COMMON_ISP_CONST_H_ -#define _COMMON_ISP_CONST_H_ - -/*#include "isp.h"*/ /* ISP_VEC_NELEMS */ - -/* Binary independent constants */ - -#ifndef NO_HOIST -# define NO_HOIST HIVE_ATTRIBUTE (( no_hoist )) -#endif - -#define NO_HOIST_CSE HIVE_ATTRIBUTE ((no_hoist, no_cse)) - -#define UNION struct /* Union constructors not allowed in C++ */ - -/* ISP binary identifiers. - These determine the order in which the binaries are looked up, do not change - this! - Also, the SP firmware uses this same order (isp_loader.hive.c). - Also, gen_firmware.c uses this order in its firmware_header. -*/ -/* The binary id is used in pre-processor expressions so we cannot - * use an enum here. */ - /* 24xx pipelines*/ -#define SH_CSS_BINARY_ID_COPY 0 -#define SH_CSS_BINARY_ID_BAYER_DS 1 -#define SH_CSS_BINARY_ID_VF_PP_FULL 2 -#define SH_CSS_BINARY_ID_VF_PP_OPT 3 -#define SH_CSS_BINARY_ID_YUV_SCALE 4 -#define SH_CSS_BINARY_ID_CAPTURE_PP 5 -#define SH_CSS_BINARY_ID_PRE_ISP 6 -#define SH_CSS_BINARY_ID_PRE_ISP_ISP2 7 -#define SH_CSS_BINARY_ID_GDC 8 -#define SH_CSS_BINARY_ID_POST_ISP 9 -#define SH_CSS_BINARY_ID_POST_ISP_ISP2 10 -#define SH_CSS_BINARY_ID_ANR 11 -#define SH_CSS_BINARY_ID_ANR_ISP2 12 -#define SH_CSS_BINARY_ID_PREVIEW_CONT_DS 13 -#define SH_CSS_BINARY_ID_PREVIEW_DS 14 -#define SH_CSS_BINARY_ID_PREVIEW_DEC 15 -#define SH_CSS_BINARY_ID_PREVIEW_CONT_BDS125_ISP2 16 -#define SH_CSS_BINARY_ID_PREVIEW_CONT_DPC_BDS150_ISP2 17 -#define SH_CSS_BINARY_ID_PREVIEW_CONT_BDS150_ISP2 18 -#define SH_CSS_BINARY_ID_PREVIEW_CONT_DPC_BDS200_ISP2 19 -#define SH_CSS_BINARY_ID_PREVIEW_CONT_BDS200_ISP2 20 -#define SH_CSS_BINARY_ID_PREVIEW_DZ 21 -#define SH_CSS_BINARY_ID_PREVIEW_DZ_ISP2 22 -#define SH_CSS_BINARY_ID_PRIMARY_DS 23 -#define SH_CSS_BINARY_ID_PRIMARY_VAR 24 -#define SH_CSS_BINARY_ID_PRIMARY_VAR_ISP2 25 -#define SH_CSS_BINARY_ID_PRIMARY_SMALL 26 -#define SH_CSS_BINARY_ID_PRIMARY_STRIPED 27 -#define SH_CSS_BINARY_ID_PRIMARY_STRIPED_ISP2 28 -#define SH_CSS_BINARY_ID_PRIMARY_8MP 29 -#define SH_CSS_BINARY_ID_PRIMARY_14MP 30 -#define SH_CSS_BINARY_ID_PRIMARY_16MP 31 -#define SH_CSS_BINARY_ID_PRIMARY_REF 32 -#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE0 33 -#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE1 34 -#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE2 35 -#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE3 36 -#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE4 37 -#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE5 38 -#define SH_CSS_BINARY_ID_VIDEO_OFFLINE 39 -#define SH_CSS_BINARY_ID_VIDEO_DS 40 -#define SH_CSS_BINARY_ID_VIDEO_YUV_DS 41 -#define SH_CSS_BINARY_ID_VIDEO_DZ 42 -#define SH_CSS_BINARY_ID_VIDEO_DZ_2400_ONLY 43 -#define SH_CSS_BINARY_ID_VIDEO_HIGH 44 -#define SH_CSS_BINARY_ID_VIDEO_NODZ 45 -#define SH_CSS_BINARY_ID_VIDEO_CONT_MULTIBDS_ISP2_MIN 46 -#define SH_CSS_BINARY_ID_VIDEO_CONT_BDS_300_600_ISP2_MIN 47 -#define SH_CSS_BINARY_ID_VIDEO_CONT_DPC_BDS150_ISP2_MIN 48 -#define SH_CSS_BINARY_ID_VIDEO_CONT_BDS150_ISP2_MIN 49 -#define SH_CSS_BINARY_ID_VIDEO_CONT_DPC_BDS200_ISP2_MIN 50 -#define SH_CSS_BINARY_ID_VIDEO_CONT_BDS200_ISP2_MIN 51 -#define SH_CSS_BINARY_ID_VIDEO_CONT_NOBDS_ISP2_MIN 52 -#define SH_CSS_BINARY_ID_VIDEO_DZ_ISP2_MIN 53 -#define SH_CSS_BINARY_ID_VIDEO_DZ_ISP2 54 -#define SH_CSS_BINARY_ID_VIDEO_LP_ISP2 55 -#define SH_CSS_BINARY_ID_RESERVED1 56 -#define SH_CSS_BINARY_ID_ACCELERATION 57 -#define SH_CSS_BINARY_ID_PRE_DE_ISP2 58 -#define SH_CSS_BINARY_ID_KERNEL_TEST_LOAD_STORE 59 -#define SH_CSS_BINARY_ID_CAPTURE_PP_BLI 60 -#define SH_CSS_BINARY_ID_CAPTURE_PP_LDC 61 -#ifdef ISP2401 -#define SH_CSS_BINARY_ID_PRIMARY_STRIPED_ISP2_XNR 62 -#endif - -/* skycam kerneltest pipelines */ -#ifndef ISP2401 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM 120 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM_STRIPED 121 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN 122 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN_STRIPED 123 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD 124 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD_STRIPED 125 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB 126 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A 127 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A_STRIPED 128 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AF 129 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID 130 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE 131 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE_STRIPED 132 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DEMOSAIC 133 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP1_C0 134 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2 135 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF 136 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF_STRIPED 137 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_REF 138 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS 139 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR 140 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_STRIPED 141 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_BLENDING 142 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_BLOCK 143 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AE 144 -#define SH_CSS_BINARY_ID_VIDEO_RAW 145 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB_FR 146 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP 147 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP_STRIPED 148 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR 149 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_IF 150 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_IF_STRIPED 151 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM 152 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_STRIPED 153 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS_STRIPED 154 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID_STRIPED 155 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV 156 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV_BLOCK 157 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_BLOCK 158 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_STRIPED 159 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_BLOCK_STRIPED 160 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_INPUT_YUV 161 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV 162 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV_16 163 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SPLIT 164 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM_STRIPED 165 - -#else -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM 121 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM_STRIPED 122 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID 123 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID_STRIPED 124 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN 125 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN_STRIPED 126 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD 127 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD_STRIPED 128 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AE 129 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB 130 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AF 131 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB_FR 132 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A 133 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A_STRIPED 134 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE 135 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE_STRIPED 136 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR 137 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR_STRIPED 138 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DEMOSAIC 139 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP 140 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP_STRIPED 141 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP1_C0 142 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2 143 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2_STRIPED 144 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_REF 145 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR 146 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_STRIPED 147 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_BLENDING 148 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF 149 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF_STRIPED 150 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS 151 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS_STRIPED 152 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DVS_STAT_C0 153 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_BLOCK 154 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_STRIPED 155 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM 156 -#define SH_CSS_BINARY_ID_VIDEO_RAW 157 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV 158 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV_BLOCK 159 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_BLOCK 160 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_STRIPED 161 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_BLOCK_STRIPED 162 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_INPUT_YUV 163 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV 164 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV_16 165 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SPLIT 166 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM_STRIPED 167 -#define SH_CSS_BINARY_ID_COPY_KERNELTEST_OUTPUT_SYSTEM 168 -#endif - -/* skycam partial test pipelines*/ -#ifndef ISP2401 -#define SH_CSS_BINARY_ID_IF_TO_DPC 201 -#define SH_CSS_BINARY_ID_IF_TO_BDS 202 -#else -#define SH_CSS_BINARY_ID_IF_TO_BDS 201 -#define SH_CSS_BINARY_ID_IF_TO_BDS_STRIPED 202 -#endif -#define SH_CSS_BINARY_ID_IF_TO_NORM 203 -#ifndef ISP2401 -#define SH_CSS_BINARY_ID_IF_TO_OB 204 -#define SH_CSS_BINARY_ID_IF_TO_LIN 205 -#define SH_CSS_BINARY_ID_IF_TO_SHD 206 -#define SH_CSS_BINARY_ID_IF_TO_BNR 207 -#define SH_CSS_BINARY_ID_IF_TO_RGBPP_NV12_16 208 -#define SH_CSS_BINARY_ID_IF_TO_RGBPP 210 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1 211 -#define SH_CSS_BINARY_ID_IF_TO_DM 214 -#define SH_CSS_BINARY_ID_IF_TO_YUVP2_C0 216 -#define SH_CSS_BINARY_ID_IF_TO_YUVP2_ANR_VIA_ISP 217 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_DVS 218 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_TNR 219 -#define SH_CSS_BINARY_ID_IF_TO_BDS_STRIPED 224 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR_STRIPED 225 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2_STRIPED 227 -#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0 228 -#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0_STRIPED 229 -#define SH_CSS_BINARY_ID_IF_TO_REF 236 -#define SH_CSS_BINARY_ID_IF_TO_DVS_STRIPED 237 -#define SH_CSS_BINARY_ID_IF_TO_YUVP2_STRIPED 238 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1_STRIPED 239 -#define SH_CSS_BINARY_ID_IF_TO_RGBPP_STRIPED 240 -#define SH_CSS_BINARY_ID_IF_TO_ANR_STRIPED 241 -#define SH_CSS_BINARY_ID_IF_TO_BNR_STRIPED 242 -#define SH_CSS_BINARY_ID_IF_TO_SHD_STRIPED 243 -#define SH_CSS_BINARY_ID_IF_TO_LIN_STRIPED 244 -#define SH_CSS_BINARY_ID_IF_TO_OB_STRIPED 245 -#define SH_CSS_BINARY_ID_IF_TO_NORM_STRIPED 248 -#define SH_CSS_BINARY_ID_COPY_KERNELTEST_OUTPUT_SYSTEM 253 -#define SH_CSS_BINARY_ID_IF_TO_XNR 256 -#define SH_CSS_BINARY_ID_IF_TO_XNR_STRIPED 257 -#define SH_CSS_BINARY_ID_IF_TO_REF_STRIPED 258 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS 259 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0 262 -#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY 263 -#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY_STRIPED 264 -#define SH_CSS_BINARY_ID_IF_TO_ANR 265 -#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DVS_STAT_C0 266 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS_STRIPED 270 -#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY 276 -#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY_STRIPED 277 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0_STRIPED 278 -#else -#define SH_CSS_BINARY_ID_IF_TO_NORM_STRIPED 204 -#define SH_CSS_BINARY_ID_IF_TO_OB 205 -#define SH_CSS_BINARY_ID_IF_TO_OB_STRIPED 206 -#define SH_CSS_BINARY_ID_IF_TO_LIN 207 -#define SH_CSS_BINARY_ID_IF_TO_LIN_STRIPED 208 -#define SH_CSS_BINARY_ID_IF_TO_SHD 209 -#define SH_CSS_BINARY_ID_IF_TO_SHD_STRIPED 210 -#define SH_CSS_BINARY_ID_IF_TO_BNR 211 -#define SH_CSS_BINARY_ID_IF_TO_BNR_STRIPED 212 -#define SH_CSS_BINARY_ID_IF_TO_ANR 213 -#define SH_CSS_BINARY_ID_IF_TO_ANR_STRIPED 214 -#define SH_CSS_BINARY_ID_IF_TO_DM 215 -#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0 216 -#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0_STRIPED 217 -#define SH_CSS_BINARY_ID_IF_TO_RGBPP 218 -#define SH_CSS_BINARY_ID_IF_TO_RGBPP_NV12_16 219 -#define SH_CSS_BINARY_ID_IF_TO_RGBPP_STRIPED 220 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1 221 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1_STRIPED 222 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0 223 -#define SH_CSS_BINARY_ID_IF_TO_YUVP2_C0 224 -#define SH_CSS_BINARY_ID_IF_TO_YUVP2_STRIPED 225 -#define SH_CSS_BINARY_ID_IF_TO_XNR 226 -#define SH_CSS_BINARY_ID_IF_TO_XNR_STRIPED 227 -#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY 228 -#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY_STRIPED 229 -#define SH_CSS_BINARY_ID_IF_TO_REF 230 -#define SH_CSS_BINARY_ID_IF_TO_REF_STRIPED 231 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_DVS 232 -#define SH_CSS_BINARY_ID_IF_TO_DVS_STRIPED 233 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_TNR 234 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS 235 -#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS_STRIPED 236 -#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY 237 -#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY_STRIPED 238 -#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0_STRIPED 239 -#define SH_CSS_BINARY_ID_VIDEO_YUVP1_TO_OSYS 240 -#define SH_CSS_BINARY_ID_IF_TO_OSYS_PREVIEW 241 -#define SH_CSS_BINARY_ID_IF_TO_OSYS_PREVIEW_STRIPED 242 -#endif - -/* Skycam IR camera binaries */ -#ifndef ISP2401 -#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_NO_XNR 300 -#define SH_CSS_BINARY_ID_VIDEO_IR_IF_TO_OSYS_NO_DVS_NO_TNR_NO_XNR 301 -#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_NO_XNR_NO_DVS_PRIMARY 302 -#else -#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS 300 -#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_NO_TNR3 301 -#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_PRIMARY 302 - -/* Binaries under development */ -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR3 401 -#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR3_STRIPED 402 - -#endif - -#define XMEM_WIDTH_BITS HIVE_ISP_DDR_WORD_BITS -#define XMEM_SHORTS_PER_WORD (HIVE_ISP_DDR_WORD_BITS/16) -#define XMEM_INTS_PER_WORD (HIVE_ISP_DDR_WORD_BITS/32) -#define XMEM_POW2_BYTES_PER_WORD HIVE_ISP_DDR_WORD_BYTES - -#define BITS8_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 8) -#define BITS16_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 16) - -#if ISP_VEC_NELEMS == 64 -#define ISP_NWAY_LOG2 6 -#elif ISP_VEC_NELEMS == 32 -#define ISP_NWAY_LOG2 5 -#elif ISP_VEC_NELEMS == 16 -#define ISP_NWAY_LOG2 4 -#elif ISP_VEC_NELEMS == 8 -#define ISP_NWAY_LOG2 3 -#else -#error "isp_const.h ISP_VEC_NELEMS must be one of {8, 16, 32, 64}" -#endif - -/* ***************************** - * ISP input/output buffer sizes - * ****************************/ -/* input image */ -#define INPUT_BUF_DMA_HEIGHT 2 -#define INPUT_BUF_HEIGHT 2 /* double buffer */ -#define OUTPUT_BUF_DMA_HEIGHT 2 -#define OUTPUT_BUF_HEIGHT 2 /* double buffer */ -#define OUTPUT_NUM_TRANSFERS 4 - -/* GDC accelerator: Up/Down Scaling */ -/* These should be moved to the gdc_defs.h in the device */ -#define UDS_SCALING_N HRT_GDC_N -/* AB: This should cover the zooming up to 16MP */ -#define UDS_MAX_OXDIM 5000 -/* We support maximally 2 planes with different parameters - - luma and chroma (YUV420) */ -#define UDS_MAX_PLANES 2 -#define UDS_BLI_BLOCK_HEIGHT 2 -#define UDS_BCI_BLOCK_HEIGHT 4 -#define UDS_BLI_INTERP_ENVELOPE 1 -#define UDS_BCI_INTERP_ENVELOPE 3 -#define UDS_MAX_ZOOM_FAC 64 -/* Make it always one FPGA vector. - Four FPGA vectors are required and - four of them fit in one ASIC vector.*/ -#define UDS_MAX_CHUNKS 16 - -#define ISP_LEFT_PADDING _ISP_LEFT_CROP_EXTRA(ISP_LEFT_CROPPING) -#define ISP_LEFT_PADDING_VECS CEIL_DIV(ISP_LEFT_PADDING, ISP_VEC_NELEMS) -/* in case of continuous the croppong of the current binary doesn't matter for the buffer calculation, but the cropping of the sp copy should be used */ -#define ISP_LEFT_PADDING_CONT _ISP_LEFT_CROP_EXTRA(SH_CSS_MAX_LEFT_CROPPING) -#define ISP_LEFT_PADDING_VECS_CONT CEIL_DIV(ISP_LEFT_PADDING_CONT, ISP_VEC_NELEMS) - -#define CEIL_ROUND_DIV_STRIPE(width, stripe, padding) \ - CEIL_MUL(padding + CEIL_DIV(width - padding, stripe), ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS)?4:2)) - -/* output (Y,U,V) image, 4:2:0 */ -#define MAX_VECTORS_PER_LINE \ - CEIL_ROUND_DIV_STRIPE(CEIL_DIV(ISP_MAX_INTERNAL_WIDTH, ISP_VEC_NELEMS), \ - ISP_NUM_STRIPES, \ - ISP_LEFT_PADDING_VECS) - -/* - * ITERATOR_VECTOR_INCREMENT' explanation: - * when striping an even number of iterations, one of the stripes is - * one iteration wider than the other to account for overlap - * so the calc for the output buffer vmem size is: - * ((width[vectors]/num_of_stripes) + 2[vectors]) - */ -#define MAX_VECTORS_PER_OUTPUT_LINE \ - CEIL_DIV(CEIL_DIV(ISP_MAX_OUTPUT_WIDTH, ISP_NUM_STRIPES) + ISP_LEFT_PADDING, ISP_VEC_NELEMS) - -/* Must be even due to interlaced bayer input */ -#define MAX_VECTORS_PER_INPUT_LINE CEIL_MUL((CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) + ISP_LEFT_PADDING_VECS), 2) -#define MAX_VECTORS_PER_INPUT_STRIPE CEIL_ROUND_DIV_STRIPE(MAX_VECTORS_PER_INPUT_LINE, \ - ISP_NUM_STRIPES, \ - ISP_LEFT_PADDING_VECS) - - -/* Add 2 for left croppping */ -#define MAX_SP_RAW_COPY_VECTORS_PER_INPUT_LINE (CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) + 2) - -#define MAX_VECTORS_PER_BUF_LINE \ - (MAX_VECTORS_PER_LINE + DUMMY_BUF_VECTORS) -#define MAX_VECTORS_PER_BUF_INPUT_LINE \ - (MAX_VECTORS_PER_INPUT_STRIPE + DUMMY_BUF_VECTORS) -#define MAX_OUTPUT_Y_FRAME_WIDTH \ - (MAX_VECTORS_PER_LINE * ISP_VEC_NELEMS) -#define MAX_OUTPUT_Y_FRAME_SIMDWIDTH \ - MAX_VECTORS_PER_LINE -#define MAX_OUTPUT_C_FRAME_WIDTH \ - (MAX_OUTPUT_Y_FRAME_WIDTH / 2) -#define MAX_OUTPUT_C_FRAME_SIMDWIDTH \ - CEIL_DIV(MAX_OUTPUT_C_FRAME_WIDTH, ISP_VEC_NELEMS) - -/* should be even */ -#define NO_CHUNKING (OUTPUT_NUM_CHUNKS == 1) - -#define MAX_VECTORS_PER_CHUNK \ - (NO_CHUNKING ? MAX_VECTORS_PER_LINE \ - : 2*CEIL_DIV(MAX_VECTORS_PER_LINE, \ - 2*OUTPUT_NUM_CHUNKS)) - -#define MAX_C_VECTORS_PER_CHUNK \ - (MAX_VECTORS_PER_CHUNK/2) - -/* should be even */ -#define MAX_VECTORS_PER_OUTPUT_CHUNK \ - (NO_CHUNKING ? MAX_VECTORS_PER_OUTPUT_LINE \ - : 2*CEIL_DIV(MAX_VECTORS_PER_OUTPUT_LINE, \ - 2*OUTPUT_NUM_CHUNKS)) - -#define MAX_C_VECTORS_PER_OUTPUT_CHUNK \ - (MAX_VECTORS_PER_OUTPUT_CHUNK/2) - - - -/* should be even */ -#define MAX_VECTORS_PER_INPUT_CHUNK \ - (INPUT_NUM_CHUNKS == 1 ? MAX_VECTORS_PER_INPUT_STRIPE \ - : 2*CEIL_DIV(MAX_VECTORS_PER_INPUT_STRIPE, \ - 2*OUTPUT_NUM_CHUNKS)) - -#define DEFAULT_C_SUBSAMPLING 2 - -/****** DMA buffer properties */ - -#define RAW_BUF_LINES ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2) - -#define RAW_BUF_STRIDE \ - (BINARY_ID == SH_CSS_BINARY_ID_POST_ISP ? MAX_VECTORS_PER_INPUT_CHUNK : \ - ISP_NUM_STRIPES > 1 ? MAX_VECTORS_PER_INPUT_STRIPE+_ISP_EXTRA_PADDING_VECS : \ - !ENABLE_CONTINUOUS ? MAX_VECTORS_PER_INPUT_LINE : \ - MAX_VECTORS_PER_INPUT_CHUNK) - -/* [isp vmem] table size[vectors] per line per color (GR,R,B,GB), - multiples of NWAY */ -#define SCTBL_VECTORS_PER_LINE_PER_COLOR \ - CEIL_DIV(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS) -/* [isp vmem] table size[vectors] per line for 4colors (GR,R,B,GB), - multiples of NWAY */ -#define SCTBL_VECTORS_PER_LINE \ - (SCTBL_VECTORS_PER_LINE_PER_COLOR * IA_CSS_SC_NUM_COLORS) - -/*************/ - -/* Format for fixed primaries */ - -#define ISP_FIXED_PRIMARY_FORMAT IA_CSS_FRAME_FORMAT_NV12 - -#endif /* _COMMON_ISP_CONST_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h deleted file mode 100644 index 37a7d28f6d9f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h +++ /dev/null @@ -1,128 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _ISP_TYPES_H_ -#define _ISP_TYPES_H_ - -/* Workaround: hivecc complains about "tag "sh_css_3a_output" already declared" - without this extra decl. */ -struct ia_css_3a_output; - -#if defined(__ISP) -struct isp_uds_config { - int hive_dx; - int hive_dy; - unsigned hive_woix; - unsigned hive_bpp; /* gdc_bits_per_pixel */ - unsigned hive_bci; -}; - -struct s_isp_gdcac_config { - unsigned nbx; - unsigned nby; -}; - -/* output.hive.c request information */ -typedef enum { - output_y_channel, - output_c_channel, - OUTPUT_NUM_CHANNELS -} output_channel_type; - -typedef struct s_output_dma_info { - unsigned cond; /* Condition for transfer */ - output_channel_type channel_type; - dma_channel channel; - unsigned width_a; - unsigned width_b; - unsigned stride; - unsigned v_delta; /* Offset for v address to do cropping */ - char *x_base; /* X base address */ -} output_dma_info_type; -#endif - -/* Input stream formats, these correspond to the MIPI formats and the way - * the CSS receiver sends these to the input formatter. - * The bit depth of each pixel element is stored in the global variable - * isp_bits_per_pixel. - * NOTE: for rgb565, we set isp_bits_per_pixel to 565, for all other rgb - * formats it's the actual depth (4, for 444, 8 for 888 etc). - */ -enum sh_stream_format { - sh_stream_format_yuv420_legacy, - sh_stream_format_yuv420, - sh_stream_format_yuv422, - sh_stream_format_rgb, - sh_stream_format_raw, - sh_stream_format_binary, /* bytestream such as jpeg */ -}; - -struct s_isp_frames { - /* global variables that are written to by either the SP or the host, - every ISP binary needs these. */ - /* output frame */ - char *xmem_base_addr_y; - char *xmem_base_addr_uv; - char *xmem_base_addr_u; - char *xmem_base_addr_v; - /* 2nd output frame */ - char *xmem_base_addr_second_out_y; - char *xmem_base_addr_second_out_u; - char *xmem_base_addr_second_out_v; - /* input yuv frame */ - char *xmem_base_addr_y_in; - char *xmem_base_addr_u_in; - char *xmem_base_addr_v_in; - /* input raw frame */ - char *xmem_base_addr_raw; - /* output raw frame */ - char *xmem_base_addr_raw_out; - /* viewfinder output (vf_veceven) */ - char *xmem_base_addr_vfout_y; - char *xmem_base_addr_vfout_u; - char *xmem_base_addr_vfout_v; - /* overlay frame (for vf_pp) */ - char *xmem_base_addr_overlay_y; - char *xmem_base_addr_overlay_u; - char *xmem_base_addr_overlay_v; - /* pre-gdc output frame (gdc input) */ - char *xmem_base_addr_qplane_r; - char *xmem_base_addr_qplane_ratb; - char *xmem_base_addr_qplane_gr; - char *xmem_base_addr_qplane_gb; - char *xmem_base_addr_qplane_b; - char *xmem_base_addr_qplane_batr; - /* YUV as input, used by postisp binary */ - char *xmem_base_addr_yuv_16_y; - char *xmem_base_addr_yuv_16_u; - char *xmem_base_addr_yuv_16_v; -}; - -#endif /* _ISP_TYPES_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c deleted file mode 100644 index 6512a1ceb9d3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c +++ /dev/null @@ -1,81 +0,0 @@ -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#include "memory_realloc.h" -#include "ia_css_debug.h" -#include "ia_css_refcount.h" -#include "memory_access.h" - -static bool realloc_isp_css_mm_buf( - hrt_vaddress *curr_buf, - size_t *curr_size, - size_t needed_size, - bool force, - enum ia_css_err *err, - uint16_t mmgr_attribute); - - -bool reallocate_buffer( - hrt_vaddress *curr_buf, - size_t *curr_size, - size_t needed_size, - bool force, - enum ia_css_err *err) -{ - bool ret; - uint16_t mmgr_attribute = MMGR_ATTRIBUTE_DEFAULT; - - IA_CSS_ENTER_PRIVATE("void"); - - ret = realloc_isp_css_mm_buf(curr_buf, - curr_size, needed_size, force, err, mmgr_attribute); - - IA_CSS_LEAVE_PRIVATE("ret=%d", ret); - return ret; -} - -static bool realloc_isp_css_mm_buf( - hrt_vaddress *curr_buf, - size_t *curr_size, - size_t needed_size, - bool force, - enum ia_css_err *err, - uint16_t mmgr_attribute) -{ - int32_t id; - - *err = IA_CSS_SUCCESS; - /* Possible optimization: add a function sh_css_isp_css_mm_realloc() - * and implement on top of hmm. */ - - IA_CSS_ENTER_PRIVATE("void"); - - if (ia_css_refcount_is_single(*curr_buf) && !force && *curr_size >= needed_size) { - IA_CSS_LEAVE_PRIVATE("false"); - return false; - } - - id = IA_CSS_REFCOUNT_PARAM_BUFFER; - ia_css_refcount_decrement(id, *curr_buf); - *curr_buf = ia_css_refcount_increment(id, mmgr_alloc_attr(needed_size, - mmgr_attribute)); - - if (!*curr_buf) { - *err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - *curr_size = 0; - } else { - *curr_size = needed_size; - } - IA_CSS_LEAVE_PRIVATE("true"); - return true; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h deleted file mode 100644 index b62c4d321a4e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h +++ /dev/null @@ -1,257 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_BINARY_H_ -#define _IA_CSS_BINARY_H_ - -#include -#include "ia_css_types.h" -#include "ia_css_err.h" -#include "ia_css_stream_format.h" -#include "ia_css_stream_public.h" -#include "ia_css_frame_public.h" -#include "sh_css_metrics.h" -#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h" - -/* The binary mode is used in pre-processor expressions so we cannot - * use an enum here. */ -#define IA_CSS_BINARY_MODE_COPY 0 -#define IA_CSS_BINARY_MODE_PREVIEW 1 -#define IA_CSS_BINARY_MODE_PRIMARY 2 -#define IA_CSS_BINARY_MODE_VIDEO 3 -#define IA_CSS_BINARY_MODE_PRE_ISP 4 -#define IA_CSS_BINARY_MODE_GDC 5 -#define IA_CSS_BINARY_MODE_POST_ISP 6 -#define IA_CSS_BINARY_MODE_ANR 7 -#define IA_CSS_BINARY_MODE_CAPTURE_PP 8 -#define IA_CSS_BINARY_MODE_VF_PP 9 -#define IA_CSS_BINARY_MODE_PRE_DE 10 -#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0 11 -#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1 12 -#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2 13 -#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3 14 -#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4 15 -#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5 16 -#define IA_CSS_BINARY_NUM_MODES 17 - -#define MAX_NUM_PRIMARY_STAGES 6 -#define NUM_PRIMARY_HQ_STAGES 6 /* number of primary stages for ISP2.6.1 high quality pipe */ -#define NUM_PRIMARY_STAGES 1 /* number of primary satges for ISP1/ISP2.2 pipe */ - -/* Indicate where binaries can read input from */ -#define IA_CSS_BINARY_INPUT_SENSOR 0 -#define IA_CSS_BINARY_INPUT_MEMORY 1 -#define IA_CSS_BINARY_INPUT_VARIABLE 2 - -/* Should be included without the path. - However, that requires adding the path to numerous makefiles - that have nothing to do with isp parameters. - */ -#include "runtime/isp_param/interface/ia_css_isp_param_types.h" - -/* now these ports only include output ports but not vf output ports */ -enum { - IA_CSS_BINARY_OUTPUT_PORT_0 = 0, - IA_CSS_BINARY_OUTPUT_PORT_1 = 1, - IA_CSS_BINARY_MAX_OUTPUT_PORTS = 2 -}; - -struct ia_css_cas_binary_descr { - unsigned int num_stage; - unsigned int num_output_stage; - struct ia_css_frame_info *in_info; - struct ia_css_frame_info *internal_out_info; - struct ia_css_frame_info *out_info; - struct ia_css_frame_info *vf_info; - bool *is_output_stage; -}; - -struct ia_css_binary_descr { - int mode; - bool online; - bool continuous; - bool striped; - bool two_ppc; - bool enable_yuv_ds; - bool enable_high_speed; - bool enable_dvs_6axis; - bool enable_reduced_pipe; - bool enable_dz; - bool enable_xnr; - bool enable_fractional_ds; - bool enable_dpc; -#ifdef ISP2401 - bool enable_luma_only; - bool enable_tnr; -#endif - bool enable_capture_pp_bli; - struct ia_css_resolution dvs_env; - enum atomisp_input_format stream_format; - struct ia_css_frame_info *in_info; /* the info of the input-frame with the - ISP required resolution. */ - struct ia_css_frame_info *bds_out_info; - struct ia_css_frame_info *out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_frame_info *vf_info; - unsigned int isp_pipe_version; - unsigned int required_bds_factor; - int stream_config_left_padding; -}; - -struct ia_css_binary { - const struct ia_css_binary_xinfo *info; - enum atomisp_input_format input_format; - struct ia_css_frame_info in_frame_info; - struct ia_css_frame_info internal_frame_info; - struct ia_css_frame_info out_frame_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_resolution effective_in_frame_res; - struct ia_css_frame_info vf_frame_info; - int input_buf_vectors; - int deci_factor_log2; - int vf_downscale_log2; - int s3atbl_width; - int s3atbl_height; - int s3atbl_isp_width; - int s3atbl_isp_height; - unsigned int morph_tbl_width; - unsigned int morph_tbl_aligned_width; - unsigned int morph_tbl_height; - int sctbl_width_per_color; - int sctbl_aligned_width_per_color; - int sctbl_height; -#ifdef ISP2401 - int sctbl_legacy_width_per_color; - int sctbl_legacy_height; -#endif - struct ia_css_sdis_info dis; - struct ia_css_resolution dvs_envelope; - bool online; - unsigned int uds_xc; - unsigned int uds_yc; - unsigned int left_padding; - struct sh_css_binary_metrics metrics; - struct ia_css_isp_param_host_segments mem_params; - struct ia_css_isp_param_css_segments css_params; -}; - -#define IA_CSS_BINARY_DEFAULT_SETTINGS \ -(struct ia_css_binary) { \ - .input_format = ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, \ - .in_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .internal_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ - .out_frame_info = {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \ - .vf_frame_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO, \ -} - -enum ia_css_err -ia_css_binary_init_infos(void); - -enum ia_css_err -ia_css_binary_uninit(void); - -enum ia_css_err -ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo, - bool online, - bool two_ppc, - enum atomisp_input_format stream_format, - const struct ia_css_frame_info *in_info, - const struct ia_css_frame_info *bds_out_info, - const struct ia_css_frame_info *out_info[], - const struct ia_css_frame_info *vf_info, - struct ia_css_binary *binary, - struct ia_css_resolution *dvs_env, - int stream_config_left_padding, - bool accelerator); - -enum ia_css_err -ia_css_binary_find(struct ia_css_binary_descr *descr, - struct ia_css_binary *binary); - -/* @brief Get the shading information of the specified shading correction type. - * - * @param[in] binary: The isp binary which has the shading correction. - * @param[in] type: The shading correction type. - * @param[in] required_bds_factor: The bayer downscaling factor required in the pipe. - * @param[in] stream_config: The stream configuration. -#ifndef ISP2401 - * @param[out] info: The shading information. -#else - * @param[out] shading_info: The shading information. - * The shading information necessary as API is stored in the shading_info. -#endif - * The driver needs to get this information to generate -#ifndef ISP2401 - * the shading table directly required in the isp. -#else - * the shading table directly required from ISP. - * @param[out] pipe_config: The pipe configuration. - * The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. -#endif - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err -ia_css_binary_get_shading_info(const struct ia_css_binary *binary, - enum ia_css_shading_correction_type type, - unsigned int required_bds_factor, - const struct ia_css_stream_config *stream_config, -#ifndef ISP2401 - struct ia_css_shading_info *info); -#else - struct ia_css_shading_info *shading_info, - struct ia_css_pipe_config *pipe_config); -#endif - -enum ia_css_err -ia_css_binary_3a_grid_info(const struct ia_css_binary *binary, - struct ia_css_grid_info *info, - struct ia_css_pipe *pipe); - -void -ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary, - struct ia_css_grid_info *info, - struct ia_css_pipe *pipe); - -void -ia_css_binary_dvs_stat_grid_info( - const struct ia_css_binary *binary, - struct ia_css_grid_info *info, - struct ia_css_pipe *pipe); - -unsigned -ia_css_binary_max_vf_width(void); - -void -ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary); - -void -ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries, - uint32_t *num_isp_binaries); - -#endif /* _IA_CSS_BINARY_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c deleted file mode 100644 index 0cd6e1da43cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c +++ /dev/null @@ -1,1838 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include /* HR_GDC_N */ -#include "isp.h" /* ISP_VEC_NELEMS */ - -#include "ia_css_binary.h" -#include "ia_css_debug.h" -#include "ia_css_util.h" -#include "ia_css_isp_param.h" -#include "sh_css_internal.h" -#include "sh_css_sp.h" -#include "sh_css_firmware.h" -#include "sh_css_defs.h" -#include "sh_css_legacy.h" - -#include "vf/vf_1.0/ia_css_vf.host.h" -#ifdef ISP2401 -#include "sc/sc_1.0/ia_css_sc.host.h" -#endif -#include "sdis/sdis_1.0/ia_css_sdis.host.h" -#ifdef ISP2401 -#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" /* FRAC_ACC */ -#endif - -#include "camera/pipe/interface/ia_css_pipe_binarydesc.h" - -#include "memory_access.h" - -#include "assert_support.h" - -#define IMPLIES(a, b) (!(a) || (b)) /* A => B */ - -static struct ia_css_binary_xinfo *all_binaries; /* ISP binaries only (no SP) */ -static struct ia_css_binary_xinfo - *binary_infos[IA_CSS_BINARY_NUM_MODES] = { NULL, }; - -static void -ia_css_binary_dvs_env(const struct ia_css_binary_info *info, - const struct ia_css_resolution *dvs_env, - struct ia_css_resolution *binary_dvs_env) -{ - if (info->enable.dvs_envelope) { - assert(dvs_env != NULL); - binary_dvs_env->width = max(dvs_env->width, SH_CSS_MIN_DVS_ENVELOPE); - binary_dvs_env->height = max(dvs_env->height, SH_CSS_MIN_DVS_ENVELOPE); - } -} - -static void -ia_css_binary_internal_res(const struct ia_css_frame_info *in_info, - const struct ia_css_frame_info *bds_out_info, - const struct ia_css_frame_info *out_info, - const struct ia_css_resolution *dvs_env, - const struct ia_css_binary_info *info, - struct ia_css_resolution *internal_res) -{ - unsigned int isp_tmp_internal_width = 0, - isp_tmp_internal_height = 0; - bool binary_supports_yuv_ds = info->enable.ds & 2; - struct ia_css_resolution binary_dvs_env; - - binary_dvs_env.width = 0; - binary_dvs_env.height = 0; - ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env); - - if (binary_supports_yuv_ds) { - if (in_info != NULL) { - isp_tmp_internal_width = in_info->res.width - + info->pipeline.left_cropping + binary_dvs_env.width; - isp_tmp_internal_height = in_info->res.height - + info->pipeline.top_cropping + binary_dvs_env.height; - } - } else if ((bds_out_info != NULL) && (out_info != NULL) && - /* TODO: hack to make video_us case work. this should be reverted after - a nice solution in ISP */ - (bds_out_info->res.width >= out_info->res.width)) { - isp_tmp_internal_width = bds_out_info->padded_width; - isp_tmp_internal_height = bds_out_info->res.height; - } else { - if (out_info != NULL) { - isp_tmp_internal_width = out_info->padded_width; - isp_tmp_internal_height = out_info->res.height; - } - } - - /* We first calculate the resolutions used by the ISP. After that, - * we use those resolutions to compute sizes for tables etc. */ - internal_res->width = __ISP_INTERNAL_WIDTH(isp_tmp_internal_width, - (int)binary_dvs_env.width, - info->pipeline.left_cropping, info->pipeline.mode, - info->pipeline.c_subsampling, - info->output.num_chunks, info->pipeline.pipelining); - internal_res->height = __ISP_INTERNAL_HEIGHT(isp_tmp_internal_height, - info->pipeline.top_cropping, - binary_dvs_env.height); -} - -#ifndef ISP2401 -/* Computation results of the origin coordinate of bayer on the shading table. */ -struct sh_css_shading_table_bayer_origin_compute_results { - uint32_t bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of bayer scaling. */ - uint32_t bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of bayer scaling. */ - uint32_t bayer_scale_ver_ratio_in; /* Vertical ratio (in) of bayer scaling. */ - uint32_t bayer_scale_ver_ratio_out; /* Vertical ratio (out) of bayer scaling. */ - uint32_t sc_bayer_origin_x_bqs_on_shading_table; /* X coordinate (in bqs) of bayer origin on shading table. */ - uint32_t sc_bayer_origin_y_bqs_on_shading_table; /* Y coordinate (in bqs) of bayer origin on shading table. */ -#else -/* Requirements for the shading correction. */ -struct sh_css_binary_sc_requirements { - /* Bayer scaling factor, for the scaling which is applied before shading correction. */ - uint32_t bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of scaling applied BEFORE shading correction. */ - uint32_t bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of scaling applied BEFORE shading correction. */ - uint32_t bayer_scale_ver_ratio_in; /* Vertical ratio (in) of scaling applied BEFORE shading correction. */ - uint32_t bayer_scale_ver_ratio_out; /* Vertical ratio (out) of scaling applied BEFORE shading correction. */ - - /* ISP internal frame is composed of the real sensor data and the padding data. */ - uint32_t sensor_data_origin_x_bqs_on_internal; /* X origin (in bqs) of sensor data on internal frame - at shading correction. */ - uint32_t sensor_data_origin_y_bqs_on_internal; /* Y origin (in bqs) of sensor data on internal frame - at shading correction. */ -#endif -}; - -/* Get the requirements for the shading correction. */ -static enum ia_css_err -#ifndef ISP2401 -ia_css_binary_compute_shading_table_bayer_origin( - const struct ia_css_binary *binary, /* [in] */ - unsigned int required_bds_factor, /* [in] */ - const struct ia_css_stream_config *stream_config, /* [in] */ - struct sh_css_shading_table_bayer_origin_compute_results *res) /* [out] */ -#else -sh_css_binary_get_sc_requirements( - const struct ia_css_binary *binary, /* [in] */ - unsigned int required_bds_factor, /* [in] */ - const struct ia_css_stream_config *stream_config, /* [in] */ - struct sh_css_binary_sc_requirements *scr) /* [out] */ -#endif -{ - enum ia_css_err err; - -#ifndef ISP2401 - /* Numerator and denominator of the fixed bayer downscaling factor. - (numerator >= denominator) */ -#else - /* Numerator and denominator of the fixed bayer downscaling factor. (numerator >= denominator) */ -#endif - unsigned int bds_num, bds_den; - -#ifndef ISP2401 - /* Horizontal/Vertical ratio of bayer scaling - between input area and output area. */ - unsigned int bs_hor_ratio_in; - unsigned int bs_hor_ratio_out; - unsigned int bs_ver_ratio_in; - unsigned int bs_ver_ratio_out; -#else - /* Horizontal/Vertical ratio of bayer scaling between input area and output area. */ - unsigned int bs_hor_ratio_in, bs_hor_ratio_out, bs_ver_ratio_in, bs_ver_ratio_out; -#endif - - /* Left padding set by InputFormatter. */ -#ifndef ISP2401 - unsigned int left_padding_bqs; /* in bqs */ -#else - unsigned int left_padding_bqs; -#endif - -#ifndef ISP2401 - /* Flag for the NEED_BDS_FACTOR_2_00 macro defined in isp kernels. */ - unsigned int need_bds_factor_2_00; - - /* Left padding adjusted inside the isp. */ - unsigned int left_padding_adjusted_bqs; /* in bqs */ - - /* Bad pixels caused by filters. - NxN-filter (before/after bayer scaling) moves the image position - to right/bottom directions by a few pixels. - It causes bad pixels at left/top sides, - and effective bayer size decreases. */ - unsigned int bad_bqs_on_left_before_bs; /* in bqs */ - unsigned int bad_bqs_on_left_after_bs; /* in bqs */ - unsigned int bad_bqs_on_top_before_bs; /* in bqs */ - unsigned int bad_bqs_on_top_after_bs; /* in bqs */ - - /* Get the numerator and denominator of bayer downscaling factor. */ - err = sh_css_bds_factor_get_numerator_denominator - (required_bds_factor, &bds_num, &bds_den); - if (err != IA_CSS_SUCCESS) -#else - /* Flags corresponding to NEED_BDS_FACTOR_2_00/NEED_BDS_FACTOR_1_50/NEED_BDS_FACTOR_1_25 macros - * defined in isp kernels. */ - unsigned int need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25; - - /* Left padding adjusted inside the isp kernels. */ - unsigned int left_padding_adjusted_bqs; - - /* Top padding padded inside the isp kernel for bayer downscaling binaries. */ - unsigned int top_padding_bqs; - - /* Bayer downscaling factor 1.0 by fixed-point. */ - int bds_frac_acc = FRAC_ACC; /* FRAC_ACC is defined in ia_css_fixedbds_param.h. */ - - /* Right/Down shift amount caused by filters applied BEFORE shading corrertion. */ - unsigned int right_shift_bqs_before_bs; /* right shift before bayer scaling */ - unsigned int right_shift_bqs_after_bs; /* right shift after bayer scaling */ - unsigned int down_shift_bqs_before_bs; /* down shift before bayer scaling */ - unsigned int down_shift_bqs_after_bs; /* down shift after bayer scaling */ - - /* Origin of the real sensor data area on the internal frame at shading correction. */ - unsigned int sensor_data_origin_x_bqs_on_internal; - unsigned int sensor_data_origin_y_bqs_on_internal; - - IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p", - binary, required_bds_factor, stream_config); - - /* Get the numerator and denominator of the required bayer downscaling factor. */ - err = sh_css_bds_factor_get_numerator_denominator(required_bds_factor, &bds_num, &bds_den); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); -#endif - return err; -#ifdef ISP2401 - } -#endif - -#ifndef ISP2401 - /* Set the horizontal/vertical ratio of bayer scaling - between input area and output area. */ -#else - IA_CSS_LOG("bds_num=%d, bds_den=%d", bds_num, bds_den); - - /* Set the horizontal/vertical ratio of bayer scaling between input area and output area. */ -#endif - bs_hor_ratio_in = bds_num; - bs_hor_ratio_out = bds_den; - bs_ver_ratio_in = bds_num; - bs_ver_ratio_out = bds_den; - -#ifndef ISP2401 - /* Set the left padding set by InputFormatter. (ifmtr.c) */ -#else - /* Set the left padding set by InputFormatter. (ia_css_ifmtr_configure() in ifmtr.c) */ -#endif - if (stream_config->left_padding == -1) - left_padding_bqs = _ISP_BQS(binary->left_padding); - else -#ifndef ISP2401 - left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - - _ISP_BQS(stream_config->left_padding)); -#else - left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding)); -#endif - -#ifndef ISP2401 - /* Set the left padding adjusted inside the isp. - When bds_factor 2.00 is needed, some padding is added to left_padding - inside the isp, before bayer downscaling. (raw.isp.c) - (Hopefully, left_crop/left_padding/top_crop should be defined in css - appropriately, depending on bds_factor.) - */ -#else - IA_CSS_LOG("stream.left_padding=%d, binary.left_padding=%d, left_padding_bqs=%d", - stream_config->left_padding, binary->left_padding, left_padding_bqs); - - /* Set the left padding adjusted inside the isp kernels. - * When the bds_factor isn't 1.00, the left padding size is adjusted inside the isp, - * before bayer downscaling. (scaled_hor_plane_index(), raw_compute_hphase() in raw.isp.c) - */ -#endif - need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors & - (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0); - -#ifndef ISP2401 - if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0) - left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS; - else -#else - need_bds_factor_1_50 = ((binary->info->sp.bds.supported_bds_factors & - (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_50) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_25) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00))) != 0); - - need_bds_factor_1_25 = ((binary->info->sp.bds.supported_bds_factors & - (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_25) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) | - PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00))) != 0); - - if (binary->info->sp.pipeline.left_cropping > 0 && - (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25)) { - /* - * downscale 2.0 -> first_vec_adjusted_bqs = 128 - * downscale 1.5 -> first_vec_adjusted_bqs = 96 - * downscale 1.25 -> first_vec_adjusted_bqs = 80 - */ - unsigned int first_vec_adjusted_bqs - = ISP_VEC_NELEMS * bs_hor_ratio_in / bs_hor_ratio_out; - left_padding_adjusted_bqs = first_vec_adjusted_bqs - - _ISP_BQS(binary->info->sp.pipeline.left_cropping); - } else -#endif - left_padding_adjusted_bqs = left_padding_bqs; - -#ifndef ISP2401 - /* Currently, the bad pixel caused by filters before bayer scaling - is NOT considered, because the bad pixel is subtle. - When some large filter is used in the future, - we need to consider the bad pixel. - - Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied - to each color plane(Gr/R/B/Gb) before bayer downscaling. - This filter moves each color plane to right/bottom directions - by 1 pixel at the most, depending on downscaling factor. - */ - bad_bqs_on_left_before_bs = 0; - bad_bqs_on_top_before_bs = 0; -#else - IA_CSS_LOG("supported_bds_factors=%d, need_bds_factor:2_00=%d, 1_50=%d, 1_25=%d", - binary->info->sp.bds.supported_bds_factors, - need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25); - IA_CSS_LOG("left_cropping=%d, left_padding_adjusted_bqs=%d", - binary->info->sp.pipeline.left_cropping, left_padding_adjusted_bqs); - - /* Set the top padding padded inside the isp kernel for bayer downscaling binaries. - * When the bds_factor isn't 1.00, the top padding is padded inside the isp - * before bayer downscaling, because the top cropping size (input margin) is not enough. - * (calculate_input_line(), raw_compute_vphase(), dma_read_raw() in raw.isp.c) - * NOTE: In dma_read_raw(), the factor passed to raw_compute_vphase() is got by get_bds_factor_for_dma_read(). - * This factor is BDS_FPVAL_100/BDS_FPVAL_125/BDS_FPVAL_150/BDS_FPVAL_200. - */ - top_padding_bqs = 0; - if (binary->info->sp.pipeline.top_cropping > 0 && - (required_bds_factor == SH_CSS_BDS_FACTOR_1_25 || - required_bds_factor == SH_CSS_BDS_FACTOR_1_50 || - required_bds_factor == SH_CSS_BDS_FACTOR_2_00)) { - /* Calculation from calculate_input_line() and raw_compute_vphase() in raw.isp.c. */ - int top_cropping_bqs = _ISP_BQS(binary->info->sp.pipeline.top_cropping); - /* top cropping (in bqs) */ - int factor = bds_num * bds_frac_acc / bds_den; /* downscaling factor by fixed-point */ - int top_padding_bqsxfrac_acc = (top_cropping_bqs * factor - top_cropping_bqs * bds_frac_acc) - + (2 * bds_frac_acc - factor); /* top padding by fixed-point (in bqs) */ - - top_padding_bqs = (unsigned int)((top_padding_bqsxfrac_acc + bds_frac_acc/2 - 1) / bds_frac_acc); - } - - IA_CSS_LOG("top_cropping=%d, top_padding_bqs=%d", binary->info->sp.pipeline.top_cropping, top_padding_bqs); - - /* Set the right/down shift amount caused by filters applied BEFORE bayer scaling, - * which scaling is applied BEFORE shading corrertion. - * - * When the bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb) - * before bayer downscaling. - * This filter shifts each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel. - */ - right_shift_bqs_before_bs = 0; - down_shift_bqs_before_bs = 0; -#endif - -#ifndef ISP2401 - /* Currently, the bad pixel caused by filters after bayer scaling - is NOT considered, because the bad pixel is subtle. - When some large filter is used in the future, - we need to consider the bad pixel. - - Currently, when DPC&BNR is processed between bayer scaling and - shading correction, DPC&BNR moves each color plane to - right/bottom directions by 1 pixel. - */ - bad_bqs_on_left_after_bs = 0; - bad_bqs_on_top_after_bs = 0; -#else - if (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25) { - right_shift_bqs_before_bs = 1; - down_shift_bqs_before_bs = 1; - } - - IA_CSS_LOG("right_shift_bqs_before_bs=%d, down_shift_bqs_before_bs=%d", - right_shift_bqs_before_bs, down_shift_bqs_before_bs); - - /* Set the right/down shift amount caused by filters applied AFTER bayer scaling, - * which scaling is applied BEFORE shading corrertion. - * - * When DPC&BNR is processed between bayer scaling and shading correction, - * DPC&BNR moves each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel. - */ - right_shift_bqs_after_bs = 0; - down_shift_bqs_after_bs = 0; -#endif - -#ifndef ISP2401 - /* Calculate the origin of bayer (real sensor data area) - located on the shading table during the shading correction. */ - res->sc_bayer_origin_x_bqs_on_shading_table - = ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs) - * bs_hor_ratio_out + bs_hor_ratio_in/2) / bs_hor_ratio_in - + bad_bqs_on_left_after_bs; - /* "+ bs_hor_ratio_in/2": rounding for division by bs_hor_ratio_in */ - res->sc_bayer_origin_y_bqs_on_shading_table - = (bad_bqs_on_top_before_bs - * bs_ver_ratio_out + bs_ver_ratio_in/2) / bs_ver_ratio_in - + bad_bqs_on_top_after_bs; - /* "+ bs_ver_ratio_in/2": rounding for division by bs_ver_ratio_in */ - - res->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in; - res->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out; - res->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in; - res->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out; -#else - if (binary->info->mem_offsets.offsets.param->dmem.dp.size != 0) { /* if DPC&BNR is enabled in the binary */ - right_shift_bqs_after_bs = 1; - down_shift_bqs_after_bs = 1; - } - - IA_CSS_LOG("right_shift_bqs_after_bs=%d, down_shift_bqs_after_bs=%d", - right_shift_bqs_after_bs, down_shift_bqs_after_bs); - - /* Set the origin of the sensor data area on the internal frame at shading correction. */ - { - unsigned int bs_frac = bds_frac_acc; /* scaling factor 1.0 in fixed point */ - unsigned int bs_out, bs_in; /* scaling ratio in fixed point */ - - bs_out = bs_hor_ratio_out * bs_frac; - bs_in = bs_hor_ratio_in * bs_frac; - sensor_data_origin_x_bqs_on_internal - = ((left_padding_adjusted_bqs + right_shift_bqs_before_bs) * bs_out + bs_in/2) / bs_in - + right_shift_bqs_after_bs; /* "+ bs_in/2": rounding */ - - bs_out = bs_ver_ratio_out * bs_frac; - bs_in = bs_ver_ratio_in * bs_frac; - sensor_data_origin_y_bqs_on_internal - = ((top_padding_bqs + down_shift_bqs_before_bs) * bs_out + bs_in/2) / bs_in - + down_shift_bqs_after_bs; /* "+ bs_in/2": rounding */ - } - - scr->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in; - scr->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out; - scr->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in; - scr->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out; - scr->sensor_data_origin_x_bqs_on_internal = (uint32_t)sensor_data_origin_x_bqs_on_internal; - scr->sensor_data_origin_y_bqs_on_internal = (uint32_t)sensor_data_origin_y_bqs_on_internal; - - IA_CSS_LOG("sc_requirements: %d, %d, %d, %d, %d, %d", - scr->bayer_scale_hor_ratio_in, scr->bayer_scale_hor_ratio_out, - scr->bayer_scale_ver_ratio_in, scr->bayer_scale_ver_ratio_out, - scr->sensor_data_origin_x_bqs_on_internal, scr->sensor_data_origin_y_bqs_on_internal); -#endif - -#ifdef ISP2401 - IA_CSS_LEAVE_ERR_PRIVATE(err); -#endif - return err; -} - -/* Get the shading information of Shading Correction Type 1. */ -static enum ia_css_err -ia_css_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */ - unsigned int required_bds_factor, /* [in] */ - const struct ia_css_stream_config *stream_config, /* [in] */ -#ifndef ISP2401 - struct ia_css_shading_info *info) /* [out] */ -#else - struct ia_css_shading_info *shading_info, /* [out] */ - struct ia_css_pipe_config *pipe_config) /* [out] */ -#endif -{ - enum ia_css_err err; -#ifndef ISP2401 - struct sh_css_shading_table_bayer_origin_compute_results res; -#else - struct sh_css_binary_sc_requirements scr; -#endif - -#ifndef ISP2401 - assert(binary != NULL); - assert(info != NULL); -#else - uint32_t in_width_bqs, in_height_bqs, internal_width_bqs, internal_height_bqs; - uint32_t num_hor_grids, num_ver_grids, bqs_per_grid_cell, tbl_width_bqs, tbl_height_bqs; - uint32_t sensor_org_x_bqs_on_internal, sensor_org_y_bqs_on_internal, sensor_width_bqs, sensor_height_bqs; - uint32_t sensor_center_x_bqs_on_internal, sensor_center_y_bqs_on_internal; - uint32_t left, right, upper, lower; - uint32_t adjust_left, adjust_right, adjust_upper, adjust_lower, adjust_width_bqs, adjust_height_bqs; - uint32_t internal_org_x_bqs_on_tbl, internal_org_y_bqs_on_tbl; - uint32_t sensor_org_x_bqs_on_tbl, sensor_org_y_bqs_on_tbl; -#endif - -#ifndef ISP2401 - info->type = IA_CSS_SHADING_CORRECTION_TYPE_1; -#else - assert(binary != NULL); - assert(stream_config != NULL); - assert(shading_info != NULL); - assert(pipe_config != NULL); -#endif - -#ifndef ISP2401 - info->info.type_1.enable = binary->info->sp.enable.sc; - info->info.type_1.num_hor_grids = binary->sctbl_width_per_color; - info->info.type_1.num_ver_grids = binary->sctbl_height; - info->info.type_1.bqs_per_grid_cell = (1 << binary->deci_factor_log2); -#else - IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p", - binary, required_bds_factor, stream_config); -#endif - - /* Initialize by default values. */ -#ifndef ISP2401 - info->info.type_1.bayer_scale_hor_ratio_in = 1; - info->info.type_1.bayer_scale_hor_ratio_out = 1; - info->info.type_1.bayer_scale_ver_ratio_in = 1; - info->info.type_1.bayer_scale_ver_ratio_out = 1; - info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = 0; - info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = 0; - - err = ia_css_binary_compute_shading_table_bayer_origin( - binary, - required_bds_factor, - stream_config, - &res); - if (err != IA_CSS_SUCCESS) -#else - *shading_info = DEFAULT_SHADING_INFO_TYPE_1; - - err = sh_css_binary_get_sc_requirements(binary, required_bds_factor, stream_config, &scr); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); -#endif - return err; -#ifdef ISP2401 - } - - IA_CSS_LOG("binary: id=%d, sctbl=%dx%d, deci=%d", - binary->info->sp.id, binary->sctbl_width_per_color, binary->sctbl_height, binary->deci_factor_log2); - IA_CSS_LOG("binary: in=%dx%d, in_padded_w=%d, int=%dx%d, int_padded_w=%d, out=%dx%d, out_padded_w=%d", - binary->in_frame_info.res.width, binary->in_frame_info.res.height, binary->in_frame_info.padded_width, - binary->internal_frame_info.res.width, binary->internal_frame_info.res.height, - binary->internal_frame_info.padded_width, - binary->out_frame_info[0].res.width, binary->out_frame_info[0].res.height, - binary->out_frame_info[0].padded_width); - - /* Set the input size from sensor, which includes left/top crop size. */ - in_width_bqs = _ISP_BQS(binary->in_frame_info.res.width); - in_height_bqs = _ISP_BQS(binary->in_frame_info.res.height); - - /* Frame size internally used in ISP, including sensor data and padding. - * This is the frame size, to which the shading correction is applied. - */ - internal_width_bqs = _ISP_BQS(binary->internal_frame_info.res.width); - internal_height_bqs = _ISP_BQS(binary->internal_frame_info.res.height); - - /* Shading table. */ - num_hor_grids = binary->sctbl_width_per_color; - num_ver_grids = binary->sctbl_height; - bqs_per_grid_cell = (1 << binary->deci_factor_log2); - tbl_width_bqs = (num_hor_grids - 1) * bqs_per_grid_cell; - tbl_height_bqs = (num_ver_grids - 1) * bqs_per_grid_cell; -#endif - -#ifndef ISP2401 - info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in; - info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out; - info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in; - info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out; - info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table; - info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table; -#else - IA_CSS_LOG("tbl_width_bqs=%d, tbl_height_bqs=%d", tbl_width_bqs, tbl_height_bqs); -#endif - -#ifdef ISP2401 - /* Real sensor data area on the internal frame at shading correction. - * Filters and scaling are applied to the internal frame before shading correction, depending on the binary. - */ - sensor_org_x_bqs_on_internal = scr.sensor_data_origin_x_bqs_on_internal; - sensor_org_y_bqs_on_internal = scr.sensor_data_origin_y_bqs_on_internal; - { - unsigned int bs_frac = 8; /* scaling factor 1.0 in fixed point (8 == FRAC_ACC macro in ISP) */ - unsigned int bs_out, bs_in; /* scaling ratio in fixed point */ - - bs_out = scr.bayer_scale_hor_ratio_out * bs_frac; - bs_in = scr.bayer_scale_hor_ratio_in * bs_frac; - sensor_width_bqs = (in_width_bqs * bs_out + bs_in/2) / bs_in; /* "+ bs_in/2": rounding */ - - bs_out = scr.bayer_scale_ver_ratio_out * bs_frac; - bs_in = scr.bayer_scale_ver_ratio_in * bs_frac; - sensor_height_bqs = (in_height_bqs * bs_out + bs_in/2) / bs_in; /* "+ bs_in/2": rounding */ - } - - /* Center of the sensor data on the internal frame at shading correction. */ - sensor_center_x_bqs_on_internal = sensor_org_x_bqs_on_internal + sensor_width_bqs / 2; - sensor_center_y_bqs_on_internal = sensor_org_y_bqs_on_internal + sensor_height_bqs / 2; - - /* Size of left/right/upper/lower sides of the sensor center on the internal frame. */ - left = sensor_center_x_bqs_on_internal; - right = internal_width_bqs - sensor_center_x_bqs_on_internal; - upper = sensor_center_y_bqs_on_internal; - lower = internal_height_bqs - sensor_center_y_bqs_on_internal; - - /* Align the size of left/right/upper/lower sides to a multiple of the grid cell size. */ - adjust_left = CEIL_MUL(left, bqs_per_grid_cell); - adjust_right = CEIL_MUL(right, bqs_per_grid_cell); - adjust_upper = CEIL_MUL(upper, bqs_per_grid_cell); - adjust_lower = CEIL_MUL(lower, bqs_per_grid_cell); - - /* Shading table should cover the adjusted frame size. */ - adjust_width_bqs = adjust_left + adjust_right; - adjust_height_bqs = adjust_upper + adjust_lower; - - IA_CSS_LOG("adjust_width_bqs=%d, adjust_height_bqs=%d", adjust_width_bqs, adjust_height_bqs); - - if (adjust_width_bqs > tbl_width_bqs || adjust_height_bqs > tbl_height_bqs) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - /* Origin of the internal frame on the shading table. */ - internal_org_x_bqs_on_tbl = adjust_left - left; - internal_org_y_bqs_on_tbl = adjust_upper - upper; - - /* Origin of the real sensor data area on the shading table. */ - sensor_org_x_bqs_on_tbl = internal_org_x_bqs_on_tbl + sensor_org_x_bqs_on_internal; - sensor_org_y_bqs_on_tbl = internal_org_y_bqs_on_tbl + sensor_org_y_bqs_on_internal; - - /* The shading information necessary as API is stored in the shading_info. */ - shading_info->info.type_1.num_hor_grids = num_hor_grids; - shading_info->info.type_1.num_ver_grids = num_ver_grids; - shading_info->info.type_1.bqs_per_grid_cell = bqs_per_grid_cell; - - shading_info->info.type_1.bayer_scale_hor_ratio_in = scr.bayer_scale_hor_ratio_in; - shading_info->info.type_1.bayer_scale_hor_ratio_out = scr.bayer_scale_hor_ratio_out; - shading_info->info.type_1.bayer_scale_ver_ratio_in = scr.bayer_scale_ver_ratio_in; - shading_info->info.type_1.bayer_scale_ver_ratio_out = scr.bayer_scale_ver_ratio_out; - - shading_info->info.type_1.isp_input_sensor_data_res_bqs.width = in_width_bqs; - shading_info->info.type_1.isp_input_sensor_data_res_bqs.height = in_height_bqs; - - shading_info->info.type_1.sensor_data_res_bqs.width = sensor_width_bqs; - shading_info->info.type_1.sensor_data_res_bqs.height = sensor_height_bqs; - - shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x = (int32_t)sensor_org_x_bqs_on_tbl; - shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y = (int32_t)sensor_org_y_bqs_on_tbl; - - /* The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. */ - pipe_config->internal_frame_origin_bqs_on_sctbl.x = (int32_t)internal_org_x_bqs_on_tbl; - pipe_config->internal_frame_origin_bqs_on_sctbl.y = (int32_t)internal_org_y_bqs_on_tbl; - - IA_CSS_LOG("shading_info: grids=%dx%d, cell=%d, scale=%d,%d,%d,%d, input=%dx%d, data=%dx%d, origin=(%d,%d)", - shading_info->info.type_1.num_hor_grids, - shading_info->info.type_1.num_ver_grids, - shading_info->info.type_1.bqs_per_grid_cell, - shading_info->info.type_1.bayer_scale_hor_ratio_in, - shading_info->info.type_1.bayer_scale_hor_ratio_out, - shading_info->info.type_1.bayer_scale_ver_ratio_in, - shading_info->info.type_1.bayer_scale_ver_ratio_out, - shading_info->info.type_1.isp_input_sensor_data_res_bqs.width, - shading_info->info.type_1.isp_input_sensor_data_res_bqs.height, - shading_info->info.type_1.sensor_data_res_bqs.width, - shading_info->info.type_1.sensor_data_res_bqs.height, - shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x, - shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y); - - IA_CSS_LOG("pipe_config: origin=(%d,%d)", - pipe_config->internal_frame_origin_bqs_on_sctbl.x, - pipe_config->internal_frame_origin_bqs_on_sctbl.y); - - IA_CSS_LEAVE_ERR_PRIVATE(err); -#endif - return err; -} - -enum ia_css_err -ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */ - enum ia_css_shading_correction_type type, /* [in] */ - unsigned int required_bds_factor, /* [in] */ - const struct ia_css_stream_config *stream_config, /* [in] */ -#ifndef ISP2401 - struct ia_css_shading_info *info) /* [out] */ -#else - struct ia_css_shading_info *shading_info, /* [out] */ - struct ia_css_pipe_config *pipe_config) /* [out] */ -#endif -{ - enum ia_css_err err; - - assert(binary != NULL); -#ifndef ISP2401 - assert(info != NULL); -#else - assert(shading_info != NULL); - - IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p", - binary, type, required_bds_factor, stream_config); -#endif - - if (type == IA_CSS_SHADING_CORRECTION_TYPE_1) -#ifndef ISP2401 - err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config, info); -#else - err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config, - shading_info, pipe_config); -#endif - - /* Other function calls can be added here when other shading correction types will be added in the future. */ - - else - err = IA_CSS_ERR_NOT_SUPPORTED; - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static void sh_css_binary_common_grid_info(const struct ia_css_binary *binary, - struct ia_css_grid_info *info) -{ - assert(binary != NULL); - assert(info != NULL); - - info->isp_in_width = binary->internal_frame_info.res.width; - info->isp_in_height = binary->internal_frame_info.res.height; - - info->vamem_type = IA_CSS_VAMEM_TYPE_2; -} - -void -ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary, - struct ia_css_grid_info *info, - struct ia_css_pipe *pipe) -{ - struct ia_css_dvs_grid_info *dvs_info; - - (void)pipe; - assert(binary != NULL); - assert(info != NULL); - - dvs_info = &info->dvs_grid.dvs_grid_info; - - /* for DIS, we use a division instead of a ceil_div. If this is smaller - * than the 3a grid size, it indicates that the outer values are not - * valid for DIS. - */ - dvs_info->enable = binary->info->sp.enable.dis; - dvs_info->width = binary->dis.grid.dim.width; - dvs_info->height = binary->dis.grid.dim.height; - dvs_info->aligned_width = binary->dis.grid.pad.width; - dvs_info->aligned_height = binary->dis.grid.pad.height; - dvs_info->bqs_per_grid_cell = 1 << binary->dis.deci_factor_log2; - dvs_info->num_hor_coefs = binary->dis.coef.dim.width; - dvs_info->num_ver_coefs = binary->dis.coef.dim.height; - - sh_css_binary_common_grid_info(binary, info); -} - -void -ia_css_binary_dvs_stat_grid_info( - const struct ia_css_binary *binary, - struct ia_css_grid_info *info, - struct ia_css_pipe *pipe) -{ - (void)pipe; - sh_css_binary_common_grid_info(binary, info); - return; -} - -enum ia_css_err -ia_css_binary_3a_grid_info(const struct ia_css_binary *binary, - struct ia_css_grid_info *info, - struct ia_css_pipe *pipe) -{ - struct ia_css_3a_grid_info *s3a_info; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("binary=%p, info=%p, pipe=%p", - binary, info, pipe); - - assert(binary != NULL); - assert(info != NULL); - s3a_info = &info->s3a_grid; - - - /* 3A statistics grid */ - s3a_info->enable = binary->info->sp.enable.s3a; - s3a_info->width = binary->s3atbl_width; - s3a_info->height = binary->s3atbl_height; - s3a_info->aligned_width = binary->s3atbl_isp_width; - s3a_info->aligned_height = binary->s3atbl_isp_height; - s3a_info->bqs_per_grid_cell = (1 << binary->deci_factor_log2); - s3a_info->deci_factor_log2 = binary->deci_factor_log2; - s3a_info->elem_bit_depth = SH_CSS_BAYER_BITS; - s3a_info->use_dmem = binary->info->sp.s3a.s3atbl_use_dmem; -#if defined(HAS_NO_HMEM) - s3a_info->has_histogram = 1; -#else - s3a_info->has_histogram = 0; -#endif - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static void -binary_init_pc_histogram(struct sh_css_pc_histogram *histo) -{ - assert(histo != NULL); - - histo->length = 0; - histo->run = NULL; - histo->stall = NULL; -} - -static void -binary_init_metrics(struct sh_css_binary_metrics *metrics, - const struct ia_css_binary_info *info) -{ - assert(metrics != NULL); - assert(info != NULL); - - metrics->mode = info->pipeline.mode; - metrics->id = info->id; - metrics->next = NULL; - binary_init_pc_histogram(&metrics->isp_histogram); - binary_init_pc_histogram(&metrics->sp_histogram); -} - -/* move to host part of output module */ -static bool -binary_supports_output_format(const struct ia_css_binary_xinfo *info, - enum ia_css_frame_format format) -{ - int i; - - assert(info != NULL); - - for (i = 0; i < info->num_output_formats; i++) { - if (info->output_formats[i] == format) - return true; - } - return false; -} - -#ifdef ISP2401 -static bool -binary_supports_input_format(const struct ia_css_binary_xinfo *info, - enum atomisp_input_format format) -{ - - assert(info != NULL); - (void)format; - - return true; -} -#endif - -static bool -binary_supports_vf_format(const struct ia_css_binary_xinfo *info, - enum ia_css_frame_format format) -{ - int i; - - assert(info != NULL); - - for (i = 0; i < info->num_vf_formats; i++) { - if (info->vf_formats[i] == format) - return true; - } - return false; -} - -/* move to host part of bds module */ -static bool -supports_bds_factor(uint32_t supported_factors, - uint32_t bds_factor) -{ - return ((supported_factors & PACK_BDS_FACTOR(bds_factor)) != 0); -} - -static enum ia_css_err -binary_init_info(struct ia_css_binary_xinfo *info, unsigned int i, - bool *binary_found) -{ - const unsigned char *blob = sh_css_blob_info[i].blob; - unsigned size = sh_css_blob_info[i].header.blob.size; - - if ((info == NULL) || (binary_found == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - *info = sh_css_blob_info[i].header.info.isp; - *binary_found = blob != NULL; - info->blob_index = i; - /* we don't have this binary, skip it */ - if (!size) - return IA_CSS_SUCCESS; - - info->xmem_addr = sh_css_load_blob(blob, size); - if (!info->xmem_addr) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - return IA_CSS_SUCCESS; -} - -/* When binaries are put at the beginning, they will only - * be selected if no other primary matches. - */ -enum ia_css_err -ia_css_binary_init_infos(void) -{ - unsigned int i; - unsigned int num_of_isp_binaries = sh_css_num_binaries - NUM_OF_SPS - NUM_OF_BLS; - - if (num_of_isp_binaries == 0) - return IA_CSS_SUCCESS; - - all_binaries = sh_css_malloc(num_of_isp_binaries * - sizeof(*all_binaries)); - if (all_binaries == NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - for (i = 0; i < num_of_isp_binaries; i++) { - enum ia_css_err ret; - struct ia_css_binary_xinfo *binary = &all_binaries[i]; - bool binary_found; - - ret = binary_init_info(binary, i, &binary_found); - if (ret != IA_CSS_SUCCESS) - return ret; - if (!binary_found) - continue; - /* Prepend new binary information */ - binary->next = binary_infos[binary->sp.pipeline.mode]; - binary_infos[binary->sp.pipeline.mode] = binary; - binary->blob = &sh_css_blob_info[i]; - binary->mem_offsets = sh_css_blob_info[i].mem_offsets; - } - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_binary_uninit(void) -{ - unsigned int i; - struct ia_css_binary_xinfo *b; - - for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) { - for (b = binary_infos[i]; b; b = b->next) { - if (b->xmem_addr) - hmm_free(b->xmem_addr); - b->xmem_addr = mmgr_NULL; - } - binary_infos[i] = NULL; - } - sh_css_free(all_binaries); - return IA_CSS_SUCCESS; -} - -/* @brief Compute decimation factor for 3A statistics and shading correction. - * - * @param[in] width Frame width in pixels. - * @param[in] height Frame height in pixels. - * @return Log2 of decimation factor (= grid cell size) in bayer quads. - */ -static int -binary_grid_deci_factor_log2(int width, int height) -{ -/* 3A/Shading decimation factor spcification (at August 2008) - * ------------------------------------------------------------------ - * [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells] -#ifndef ISP2401 - * 1280 ?c 32 40 ?c - * 640 ?c 1279 16 40 ?c 80 - * ?c 639 8 ?c 80 -#else - * from 1280 32 from 40 - * from 640 to 1279 16 from 40 to 80 - * to 639 8 to 80 -#endif - * ------------------------------------------------------------------ - */ -/* Maximum and minimum decimation factor by the specification */ -#define MAX_SPEC_DECI_FACT_LOG2 5 -#define MIN_SPEC_DECI_FACT_LOG2 3 -/* the smallest frame width in bayer quads when decimation factor (log2) is 5 or 4, by the specification */ -#define DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ 1280 -#define DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ 640 - - int smallest_factor; /* the smallest factor (log2) where the number of cells does not exceed the limitation */ - int spec_factor; /* the factor (log2) which satisfies the specification */ - - /* Currently supported maximum width and height are 5120(=80*64) and 3840(=60*64). */ - assert(ISP_BQ_GRID_WIDTH(width, MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_WIDTH); - assert(ISP_BQ_GRID_HEIGHT(height, MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_HEIGHT); - - /* Compute the smallest factor. */ - smallest_factor = MAX_SPEC_DECI_FACT_LOG2; - while (ISP_BQ_GRID_WIDTH(width, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_WIDTH && - ISP_BQ_GRID_HEIGHT(height, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_HEIGHT - && smallest_factor > MIN_SPEC_DECI_FACT_LOG2) - smallest_factor--; - - /* Get the factor by the specification. */ - if (_ISP_BQS(width) >= DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ) - spec_factor = 5; - else if (_ISP_BQS(width) >= DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ) - spec_factor = 4; - else - spec_factor = 3; - - /* If smallest_factor is smaller than or equal to spec_factor, choose spec_factor to follow the specification. - If smallest_factor is larger than spec_factor, choose smallest_factor. - - ex. width=2560, height=1920 - smallest_factor=4, spec_factor=5 - smallest_factor < spec_factor -> return spec_factor - - ex. width=300, height=3000 - smallest_factor=5, spec_factor=3 - smallest_factor > spec_factor -> return smallest_factor - */ - return max(smallest_factor, spec_factor); - -#undef MAX_SPEC_DECI_FACT_LOG2 -#undef MIN_SPEC_DECI_FACT_LOG2 -#undef DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ -#undef DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ -} - -static int -binary_in_frame_padded_width(int in_frame_width, - int isp_internal_width, - int dvs_env_width, - int stream_config_left_padding, - int left_cropping, - bool need_scaling) -{ - int rval; - int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */ - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - /* the output image line of Input System 2401 does not have the left paddings */ - nr_of_left_paddings = 0; -#else - /* in other cases, the left padding pixels are always 128 */ - nr_of_left_paddings = 2*ISP_VEC_NELEMS; -#endif - if (need_scaling) { - /* In SDV use-case, we need to match left-padding of - * primary and the video binary. */ - if (stream_config_left_padding != -1) { - /* Different than before, we do left&right padding. */ - rval = - CEIL_MUL(in_frame_width + nr_of_left_paddings, - 2*ISP_VEC_NELEMS); - } else { - /* Different than before, we do left&right padding. */ - in_frame_width += dvs_env_width; - rval = - CEIL_MUL(in_frame_width + - (left_cropping ? nr_of_left_paddings : 0), - 2*ISP_VEC_NELEMS); - } - } else { - rval = isp_internal_width; - } - - return rval; -} - - -enum ia_css_err -ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo, - bool online, - bool two_ppc, - enum atomisp_input_format stream_format, - const struct ia_css_frame_info *in_info, /* can be NULL */ - const struct ia_css_frame_info *bds_out_info, /* can be NULL */ - const struct ia_css_frame_info *out_info[], /* can be NULL */ - const struct ia_css_frame_info *vf_info, /* can be NULL */ - struct ia_css_binary *binary, - struct ia_css_resolution *dvs_env, - int stream_config_left_padding, - bool accelerator) -{ - const struct ia_css_binary_info *info = &xinfo->sp; - unsigned int dvs_env_width = 0, - dvs_env_height = 0, - vf_log_ds = 0, - s3a_log_deci = 0, - bits_per_pixel = 0, - /* Resolution at SC/3A/DIS kernel. */ - sc_3a_dis_width = 0, - /* Resolution at SC/3A/DIS kernel. */ - sc_3a_dis_padded_width = 0, - /* Resolution at SC/3A/DIS kernel. */ - sc_3a_dis_height = 0, - isp_internal_width = 0, - isp_internal_height = 0, - s3a_isp_width = 0; - - bool need_scaling = false; - struct ia_css_resolution binary_dvs_env, internal_res; - enum ia_css_err err; - unsigned int i; - const struct ia_css_frame_info *bin_out_info = NULL; - - assert(info != NULL); - assert(binary != NULL); - - binary->info = xinfo; - if (!accelerator) { - /* binary->css_params has been filled by accelerator itself. */ - err = ia_css_isp_param_allocate_isp_parameters( - &binary->mem_params, &binary->css_params, - &info->mem_initializers); - if (err != IA_CSS_SUCCESS) { - return err; - } - } - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (out_info[i] && (out_info[i]->res.width != 0)) { - bin_out_info = out_info[i]; - break; - } - } - if (in_info != NULL && bin_out_info != NULL) { - need_scaling = (in_info->res.width != bin_out_info->res.width) || - (in_info->res.height != bin_out_info->res.height); - } - - - /* binary_dvs_env has to be equal or larger than SH_CSS_MIN_DVS_ENVELOPE */ - binary_dvs_env.width = 0; - binary_dvs_env.height = 0; - ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env); - dvs_env_width = binary_dvs_env.width; - dvs_env_height = binary_dvs_env.height; - binary->dvs_envelope.width = dvs_env_width; - binary->dvs_envelope.height = dvs_env_height; - - /* internal resolution calculation */ - internal_res.width = 0; - internal_res.height = 0; - ia_css_binary_internal_res(in_info, bds_out_info, bin_out_info, dvs_env, - info, &internal_res); - isp_internal_width = internal_res.width; - isp_internal_height = internal_res.height; - - /* internal frame info */ - if (bin_out_info != NULL) /* { */ - binary->internal_frame_info.format = bin_out_info->format; - /* } */ - binary->internal_frame_info.res.width = isp_internal_width; - binary->internal_frame_info.padded_width = CEIL_MUL(isp_internal_width, 2*ISP_VEC_NELEMS); - binary->internal_frame_info.res.height = isp_internal_height; - binary->internal_frame_info.raw_bit_depth = bits_per_pixel; - - if (in_info != NULL) { - binary->effective_in_frame_res.width = in_info->res.width; - binary->effective_in_frame_res.height = in_info->res.height; - - bits_per_pixel = in_info->raw_bit_depth; - - /* input info */ - binary->in_frame_info.res.width = in_info->res.width + info->pipeline.left_cropping; - binary->in_frame_info.res.height = in_info->res.height + info->pipeline.top_cropping; - - binary->in_frame_info.res.width += dvs_env_width; - binary->in_frame_info.res.height += dvs_env_height; - - binary->in_frame_info.padded_width = - binary_in_frame_padded_width(in_info->res.width, - isp_internal_width, - dvs_env_width, - stream_config_left_padding, - info->pipeline.left_cropping, - need_scaling); - - binary->in_frame_info.format = in_info->format; - binary->in_frame_info.raw_bayer_order = in_info->raw_bayer_order; - binary->in_frame_info.crop_info = in_info->crop_info; - } - - if (online) { - bits_per_pixel = ia_css_util_input_format_bpp( - stream_format, two_ppc); - } - binary->in_frame_info.raw_bit_depth = bits_per_pixel; - - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (out_info[i] != NULL) { - binary->out_frame_info[i].res.width = out_info[i]->res.width; - binary->out_frame_info[i].res.height = out_info[i]->res.height; - binary->out_frame_info[i].padded_width = out_info[i]->padded_width; - if (info->pipeline.mode == IA_CSS_BINARY_MODE_COPY) { - binary->out_frame_info[i].raw_bit_depth = bits_per_pixel; - } else { - /* Only relevant for RAW format. - * At the moment, all outputs are raw, 16 bit per pixel, except for copy. - * To do this cleanly, the binary should specify in its info - * the bit depth per output channel. - */ - binary->out_frame_info[i].raw_bit_depth = 16; - } - binary->out_frame_info[i].format = out_info[i]->format; - } - } - - if (vf_info && (vf_info->res.width != 0)) { - err = ia_css_vf_configure(binary, bin_out_info, (struct ia_css_frame_info *)vf_info, &vf_log_ds); - if (err != IA_CSS_SUCCESS) { - if (!accelerator) { - ia_css_isp_param_destroy_isp_parameters( - &binary->mem_params, - &binary->css_params); - } - return err; - } - } - binary->vf_downscale_log2 = vf_log_ds; - - binary->online = online; - binary->input_format = stream_format; - - /* viewfinder output info */ - if ((vf_info != NULL) && (vf_info->res.width != 0)) { - unsigned int vf_out_vecs, vf_out_width, vf_out_height; - binary->vf_frame_info.format = vf_info->format; - if (bin_out_info == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; - vf_out_vecs = __ISP_VF_OUTPUT_WIDTH_VECS(bin_out_info->padded_width, - vf_log_ds); - vf_out_width = _ISP_VF_OUTPUT_WIDTH(vf_out_vecs); - vf_out_height = _ISP_VF_OUTPUT_HEIGHT(bin_out_info->res.height, - vf_log_ds); - - /* For preview mode, output pin is used instead of vf. */ - if (info->pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW) { - binary->out_frame_info[0].res.width = - (bin_out_info->res.width >> vf_log_ds); - binary->out_frame_info[0].padded_width = vf_out_width; - binary->out_frame_info[0].res.height = vf_out_height; - - binary->vf_frame_info.res.width = 0; - binary->vf_frame_info.padded_width = 0; - binary->vf_frame_info.res.height = 0; - } else { - /* we also store the raw downscaled width. This is - * used for digital zoom in preview to zoom only on - * the width that we actually want to keep, not on - * the aligned width. */ - binary->vf_frame_info.res.width = - (bin_out_info->res.width >> vf_log_ds); - binary->vf_frame_info.padded_width = vf_out_width; - binary->vf_frame_info.res.height = vf_out_height; - } - } else { - binary->vf_frame_info.res.width = 0; - binary->vf_frame_info.padded_width = 0; - binary->vf_frame_info.res.height = 0; - } - - if (info->enable.ca_gdc) { - binary->morph_tbl_width = - _ISP_MORPH_TABLE_WIDTH(isp_internal_width); - binary->morph_tbl_aligned_width = - _ISP_MORPH_TABLE_ALIGNED_WIDTH(isp_internal_width); - binary->morph_tbl_height = - _ISP_MORPH_TABLE_HEIGHT(isp_internal_height); - } else { - binary->morph_tbl_width = 0; - binary->morph_tbl_aligned_width = 0; - binary->morph_tbl_height = 0; - } - - sc_3a_dis_width = binary->in_frame_info.res.width; - sc_3a_dis_padded_width = binary->in_frame_info.padded_width; - sc_3a_dis_height = binary->in_frame_info.res.height; - if (bds_out_info != NULL && in_info != NULL && - bds_out_info->res.width != in_info->res.width) { - /* TODO: Next, "internal_frame_info" should be derived from - * bds_out. So this part will change once it is in place! */ - sc_3a_dis_width = bds_out_info->res.width + info->pipeline.left_cropping; - sc_3a_dis_padded_width = isp_internal_width; - sc_3a_dis_height = isp_internal_height; - } - - - s3a_isp_width = _ISP_S3A_ELEMS_ISP_WIDTH(sc_3a_dis_padded_width, - info->pipeline.left_cropping); - if (info->s3a.fixed_s3a_deci_log) { - s3a_log_deci = info->s3a.fixed_s3a_deci_log; - } else { - s3a_log_deci = binary_grid_deci_factor_log2(s3a_isp_width, - sc_3a_dis_height); - } - binary->deci_factor_log2 = s3a_log_deci; - - if (info->enable.s3a) { - binary->s3atbl_width = - _ISP_S3ATBL_WIDTH(sc_3a_dis_width, - s3a_log_deci); - binary->s3atbl_height = - _ISP_S3ATBL_HEIGHT(sc_3a_dis_height, - s3a_log_deci); - binary->s3atbl_isp_width = - _ISP_S3ATBL_ISP_WIDTH(s3a_isp_width, - s3a_log_deci); - binary->s3atbl_isp_height = - _ISP_S3ATBL_ISP_HEIGHT(sc_3a_dis_height, - s3a_log_deci); - } else { - binary->s3atbl_width = 0; - binary->s3atbl_height = 0; - binary->s3atbl_isp_width = 0; - binary->s3atbl_isp_height = 0; - } - - if (info->enable.sc) { - binary->sctbl_width_per_color = -#ifndef ISP2401 - _ISP_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width, - s3a_log_deci); -#else - _ISP_SCTBL_WIDTH_PER_COLOR(isp_internal_width, s3a_log_deci); -#endif - binary->sctbl_aligned_width_per_color = - SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR; - binary->sctbl_height = -#ifndef ISP2401 - _ISP_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci); -#else - _ISP_SCTBL_HEIGHT(isp_internal_height, s3a_log_deci); - binary->sctbl_legacy_width_per_color = - _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci); - binary->sctbl_legacy_height = - _ISP_SCTBL_LEGACY_HEIGHT(sc_3a_dis_height, s3a_log_deci); -#endif - } else { - binary->sctbl_width_per_color = 0; - binary->sctbl_aligned_width_per_color = 0; - binary->sctbl_height = 0; -#ifdef ISP2401 - binary->sctbl_legacy_width_per_color = 0; - binary->sctbl_legacy_height = 0; -#endif - } - ia_css_sdis_init_info(&binary->dis, - sc_3a_dis_width, - sc_3a_dis_padded_width, - sc_3a_dis_height, - info->pipeline.isp_pipe_version, - info->enable.dis); - if (info->pipeline.left_cropping) - binary->left_padding = 2 * ISP_VEC_NELEMS - info->pipeline.left_cropping; - else - binary->left_padding = 0; - - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_binary_find(struct ia_css_binary_descr *descr, - struct ia_css_binary *binary) -{ - int mode; - bool online; - bool two_ppc; - enum atomisp_input_format stream_format; - const struct ia_css_frame_info *req_in_info, - *req_bds_out_info, - *req_out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS], - *req_bin_out_info = NULL, - *req_vf_info; - - struct ia_css_binary_xinfo *xcandidate; -#ifndef ISP2401 - bool need_ds, need_dz, need_dvs, need_xnr, need_dpc; -#else - bool need_ds, need_dz, need_dvs, need_xnr, need_dpc, need_tnr; -#endif - bool striped; - bool enable_yuv_ds; - bool enable_high_speed; - bool enable_dvs_6axis; - bool enable_reduced_pipe; - bool enable_capture_pp_bli; -#ifdef ISP2401 - bool enable_luma_only; -#endif - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; - bool continuous; - unsigned int isp_pipe_version; - struct ia_css_resolution dvs_env, internal_res; - unsigned int i; - - assert(descr != NULL); - /* MW: used after an error check, may accept NULL, but doubtfull */ - assert(binary != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n", - descr, descr->mode, - binary); - - mode = descr->mode; - online = descr->online; - two_ppc = descr->two_ppc; - stream_format = descr->stream_format; - req_in_info = descr->in_info; - req_bds_out_info = descr->bds_out_info; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - req_out_info[i] = descr->out_info[i]; - if (req_out_info[i] && (req_out_info[i]->res.width != 0)) - req_bin_out_info = req_out_info[i]; - } - if (req_bin_out_info == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; -#ifndef ISP2401 - req_vf_info = descr->vf_info; -#else - - if ((descr->vf_info != NULL) && (descr->vf_info->res.width == 0)) - /* width==0 means that there is no vf pin (e.g. in SkyCam preview case) */ - req_vf_info = NULL; - else - req_vf_info = descr->vf_info; -#endif - - need_xnr = descr->enable_xnr; - need_ds = descr->enable_fractional_ds; - need_dz = false; - need_dvs = false; - need_dpc = descr->enable_dpc; -#ifdef ISP2401 - need_tnr = descr->enable_tnr; -#endif - enable_yuv_ds = descr->enable_yuv_ds; - enable_high_speed = descr->enable_high_speed; - enable_dvs_6axis = descr->enable_dvs_6axis; - enable_reduced_pipe = descr->enable_reduced_pipe; - enable_capture_pp_bli = descr->enable_capture_pp_bli; -#ifdef ISP2401 - enable_luma_only = descr->enable_luma_only; -#endif - continuous = descr->continuous; - striped = descr->striped; - isp_pipe_version = descr->isp_pipe_version; - - dvs_env.width = 0; - dvs_env.height = 0; - internal_res.width = 0; - internal_res.height = 0; - - - if (mode == IA_CSS_BINARY_MODE_VIDEO) { - dvs_env = descr->dvs_env; - need_dz = descr->enable_dz; - /* Video is the only mode that has a nodz variant. */ - need_dvs = dvs_env.width || dvs_env.height; - } - - /* print a map of the binary file */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n"); - for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) { - xcandidate = binary_infos[i]; - if (xcandidate) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i); - while (xcandidate) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n", - xcandidate->blob->name, xcandidate->type, - xcandidate->sp.enable.continuous); - xcandidate = xcandidate->next; - } - } - } - - /* printf("sh_css_binary_find: pipe version %d\n", isp_pipe_version); */ - for (xcandidate = binary_infos[mode]; xcandidate; - xcandidate = xcandidate->next) { - struct ia_css_binary_info *candidate = &xcandidate->sp; - /* printf("sh_css_binary_find: evaluating candidate: - * %d\n",candidate->id); */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n", - candidate, candidate->pipeline.mode, candidate->id); - - /* - * MW: Only a limited set of jointly configured binaries can - * be used in a continuous preview/video mode unless it is - * the copy mode and runs on SP. - */ - if (!candidate->enable.continuous && - continuous && (mode != IA_CSS_BINARY_MODE_COPY)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n", - __LINE__, candidate->enable.continuous, - continuous, mode, - IA_CSS_BINARY_MODE_COPY); - continue; - } - if (striped && candidate->iterator.num_stripes == 1) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: binary is not striped\n", - __LINE__); - continue; - } - - if (candidate->pipeline.isp_pipe_version != isp_pipe_version && - (mode != IA_CSS_BINARY_MODE_COPY) && - (mode != IA_CSS_BINARY_MODE_CAPTURE_PP) && - (mode != IA_CSS_BINARY_MODE_VF_PP)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d != %d)\n", - __LINE__, - candidate->pipeline.isp_pipe_version, isp_pipe_version); - continue; - } - if (!candidate->enable.reduced_pipe && enable_reduced_pipe) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - candidate->enable.reduced_pipe, - enable_reduced_pipe); - continue; - } - if (!candidate->enable.dvs_6axis && enable_dvs_6axis) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - candidate->enable.dvs_6axis, - enable_dvs_6axis); - continue; - } - if (candidate->enable.high_speed && !enable_high_speed) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && !%d\n", - __LINE__, - candidate->enable.high_speed, - enable_high_speed); - continue; - } - if (!candidate->enable.xnr && need_xnr) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && !%d\n", - __LINE__, - candidate->enable.xnr, - need_xnr); - continue; - } - if (!(candidate->enable.ds & 2) && enable_yuv_ds) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - ((candidate->enable.ds & 2) != 0), - enable_yuv_ds); - continue; - } - if ((candidate->enable.ds & 2) && !enable_yuv_ds) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && !%d\n", - __LINE__, - ((candidate->enable.ds & 2) != 0), - enable_yuv_ds); - continue; - } - - if (mode == IA_CSS_BINARY_MODE_VIDEO && - candidate->enable.ds && need_ds) - need_dz = false; - - /* when we require vf output, we need to have vf_veceven */ - if ((req_vf_info != NULL) && !(candidate->enable.vf_veceven || - /* or variable vf vec even */ - candidate->vf_dec.is_variable || - /* or more than one output pin. */ - xcandidate->num_output_pins > 1)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n", - __LINE__, req_vf_info, - candidate->enable.vf_veceven, - candidate->vf_dec.is_variable, - xcandidate->num_output_pins, 1); - continue; - } - if (!candidate->enable.dvs_envelope && need_dvs) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - candidate->enable.dvs_envelope, (int)need_dvs); - continue; - } - /* internal_res check considers input, output, and dvs envelope sizes */ - ia_css_binary_internal_res(req_in_info, req_bds_out_info, - req_bin_out_info, &dvs_env, candidate, &internal_res); - if (internal_res.width > candidate->internal.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d)\n", - __LINE__, internal_res.width, - candidate->internal.max_width); - continue; - } - if (internal_res.height > candidate->internal.max_height) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d)\n", - __LINE__, internal_res.height, - candidate->internal.max_height); - continue; - } - if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, candidate->enable.ds, (int)need_ds); - continue; - } - if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n", - __LINE__, candidate->enable.uds, - candidate->enable.dvs_6axis, (int)need_dz); - continue; - } - if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n", - __LINE__, online, candidate->input.source, - IA_CSS_BINARY_INPUT_MEMORY); - continue; - } - if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n", - __LINE__, online, candidate->input.source, - IA_CSS_BINARY_INPUT_SENSOR); - continue; - } - if (req_bin_out_info->res.width < candidate->output.min_width || - req_bin_out_info->res.width > candidate->output.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n", - __LINE__, - req_bin_out_info->padded_width, - candidate->output.min_width, - req_bin_out_info->padded_width, - candidate->output.max_width); - continue; - } - if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */ - req_vf_info) { /* and we need vf output. */ - if (req_vf_info->res.width > candidate->output.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d < %d)\n", - __LINE__, - req_vf_info->res.width, - candidate->output.max_width); - continue; - } - } - if (req_in_info->padded_width > candidate->input.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d)\n", - __LINE__, req_in_info->padded_width, - candidate->input.max_width); - continue; - } - if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d\n", - __LINE__, - binary_supports_output_format(xcandidate, req_bin_out_info->format)); - continue; - } -#ifdef ISP2401 - if (!binary_supports_input_format(xcandidate, descr->stream_format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d\n", - __LINE__, - binary_supports_input_format(xcandidate, req_in_info->format)); - continue; - } -#endif - if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */ - req_vf_info && /* and we need vf output. */ - /* check if the required vf format - is supported. */ - !binary_supports_output_format(xcandidate, req_vf_info->format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n", - __LINE__, xcandidate->num_output_pins, 1, - req_vf_info, - binary_supports_output_format(xcandidate, req_vf_info->format)); - continue; - } - - /* Check if vf_veceven supports the requested vf format */ - if (xcandidate->num_output_pins == 1 && - req_vf_info && candidate->enable.vf_veceven && - !binary_supports_vf_format(xcandidate, req_vf_info->format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n", - __LINE__, xcandidate->num_output_pins, 1, - req_vf_info, candidate->enable.vf_veceven, - binary_supports_vf_format(xcandidate, req_vf_info->format)); - continue; - } - - /* Check if vf_veceven supports the requested vf width */ - if (xcandidate->num_output_pins == 1 && - req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */ - if (req_vf_info->res.width > candidate->output.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d < %d)\n", - __LINE__, - req_vf_info->res.width, - candidate->output.max_width); - continue; - } - } - - if (!supports_bds_factor(candidate->bds.supported_bds_factors, - descr->required_bds_factor)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", - __LINE__, candidate->bds.supported_bds_factors, - descr->required_bds_factor); - continue; - } - - if (!candidate->enable.dpc && need_dpc) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", - __LINE__, candidate->enable.dpc, - descr->enable_dpc); - continue; - } - - if (candidate->uds.use_bci && enable_capture_pp_bli) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", - __LINE__, candidate->uds.use_bci, - descr->enable_capture_pp_bli); - continue; - } - -#ifdef ISP2401 - if (candidate->enable.luma_only != enable_luma_only) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d != %d\n", - __LINE__, candidate->enable.luma_only, - descr->enable_luma_only); - continue; - } - - if(!candidate->enable.tnr && need_tnr) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, candidate->enable.tnr, - descr->enable_tnr); - continue; - } - -#endif - /* reconfigure any variable properties of the binary */ - err = ia_css_binary_fill_info(xcandidate, online, two_ppc, - stream_format, req_in_info, - req_bds_out_info, - req_out_info, req_vf_info, - binary, &dvs_env, - descr->stream_config_left_padding, - false); - - if (err != IA_CSS_SUCCESS) - break; - binary_init_metrics(&binary->metrics, &binary->info->sp); - break; - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() selected = %p, mode = %d ID = %d\n", - xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() leave: return_err=%d\n", err); - - return err; -} - -unsigned -ia_css_binary_max_vf_width(void) -{ - /* This is (should be) true for IPU1 and IPU2 */ - /* For IPU3 (SkyCam) this pointer is guarenteed to be NULL simply because such a binary does not exist */ - if (binary_infos[IA_CSS_BINARY_MODE_VF_PP]) - return binary_infos[IA_CSS_BINARY_MODE_VF_PP]->sp.output.max_width; - return 0; -} - -void -ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary) -{ - if (binary) { - ia_css_isp_param_destroy_isp_parameters(&binary->mem_params, - &binary->css_params); - } -} - -void -ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries, - uint32_t *num_isp_binaries) -{ - assert(binaries != NULL); - - if (num_isp_binaries) - *num_isp_binaries = 0; - - *binaries = all_binaries; - if (all_binaries && num_isp_binaries) { - /* -1 to account for sp binary which is not stored in all_binaries */ - if (sh_css_num_binaries > 0) - *num_isp_binaries = sh_css_num_binaries - 1; - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h deleted file mode 100644 index 034ec15ec4a1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h +++ /dev/null @@ -1,197 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_BUFQ_H -#define _IA_CSS_BUFQ_H - -#include -#include "ia_css_bufq_comm.h" -#include "ia_css_buffer.h" -#include "ia_css_err.h" -#define BUFQ_EVENT_SIZE 4 - - -/** - * @brief Query the internal frame ID. - * - * @param[in] key The query key. - * @param[out] val The query value. - * - * @return - * true, if the query succeeds; - * false, if the query fails. - */ -bool ia_css_query_internal_queue_id( - enum ia_css_buffer_type buf_type, - unsigned int thread_id, - enum sh_css_queue_id *val - ); - - -/** - * @brief Map buffer type to a internal queue id. - * - * @param[in] thread id Thread in which the buffer type has to be mapped or unmapped - * @param[in] buf_type buffer type. - * @param[in] map boolean flag to specify map or unmap - * @return none - */ -void ia_css_queue_map( - unsigned int thread_id, - enum ia_css_buffer_type buf_type, - bool map - ); - - -/** - * @brief Initilize buffer type to a queue id mapping - * @return none - */ -void ia_css_queue_map_init(void); - - -/** - * @brief initializes bufq module - * It create instances of - * -host to SP buffer queue which is a list with predefined size, - * MxN queues where M is the number threads and N is the number queues per thread - *-SP to host buffer queue , is a list with N queues - *-host to SP event communication queue - * -SP to host event communication queue - * -queue for tagger commands - * @return none - */ -void ia_css_bufq_init(void); - - -/** -* @brief Enqueues an item into host to SP buffer queue - * - * @param thread_index[in] Thread in which the item to be enqueued - * - * @param queue_id[in] Index of the queue in the specified thread - * @param item[in] Object to enqueue. - * @return IA_CSS_SUCCESS or error code upon error. - * -*/ -enum ia_css_err ia_css_bufq_enqueue_buffer( - int thread_index, - int queue_id, - uint32_t item); - -/** -* @brief Dequeues an item from SP to host buffer queue. - * - * @param queue_id[in] Specifies the index of the queue in the list where - * the item has to be read. - * @paramitem [out] Object to be dequeued into this item. - * @return IA_CSS_SUCCESS or error code upon error. - * -*/ -enum ia_css_err ia_css_bufq_dequeue_buffer( - int queue_id, - uint32_t *item); - -/** -* @brief Enqueue an event item into host to SP communication event queue. - * - * @param[in] evt_id The event ID. - * @param[in] evt_payload_0 The event payload. - * @param[in] evt_payload_1 The event payload. - * @param[in] evt_payload_2 The event payload. - * @return IA_CSS_SUCCESS or error code upon error. - * -*/ -enum ia_css_err ia_css_bufq_enqueue_psys_event( - uint8_t evt_id, - uint8_t evt_payload_0, - uint8_t evt_payload_1, - uint8_t evt_payload_2 - ); - -/** - * @brief Dequeue an item from SP to host communication event queue. - * - * @param item Object to be dequeued into this item. - * @return IA_CSS_SUCCESS or error code upon error. - * -*/ -enum ia_css_err ia_css_bufq_dequeue_psys_event( - uint8_t item[BUFQ_EVENT_SIZE] - ); - -/** - * @brief Enqueue an event item into host to SP EOF event queue. - * - * @param[in] evt_id The event ID. - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_bufq_enqueue_isys_event( - uint8_t evt_id); - -/** -* @brief Dequeue an item from SP to host communication EOF event queue. - - * - * @param item Object to be dequeued into this item. - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_bufq_dequeue_isys_event( - uint8_t item[BUFQ_EVENT_SIZE]); - -/** -* @brief Enqueue a tagger command item into tagger command queue.. - * - * @param item Object to be enqueue. - * @return IA_CSS_SUCCESS or error code upon error. - * -*/ -enum ia_css_err ia_css_bufq_enqueue_tag_cmd( - uint32_t item); - -/** -* @brief Uninitializes bufq module. - * - * @return IA_CSS_SUCCESS or error code upon error. - * -*/ -enum ia_css_err ia_css_bufq_deinit(void); - -/** -* @brief Dump queue states - * - * @return None - * -*/ -void ia_css_bufq_dump_queue_info(void); - -#endif /* _IA_CSS_BUFQ_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h deleted file mode 100644 index bb77080591b9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_BUFQ_COMM_H -#define _IA_CSS_BUFQ_COMM_H - -#include "system_global.h" - -enum sh_css_queue_id { - SH_CSS_INVALID_QUEUE_ID = -1, - SH_CSS_QUEUE_A_ID = 0, - SH_CSS_QUEUE_B_ID, - SH_CSS_QUEUE_C_ID, - SH_CSS_QUEUE_D_ID, - SH_CSS_QUEUE_E_ID, - SH_CSS_QUEUE_F_ID, - SH_CSS_QUEUE_G_ID, -#if defined(HAS_NO_INPUT_SYSTEM) - /* input frame queue for skycam */ - SH_CSS_QUEUE_H_ID, -#endif -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - SH_CSS_QUEUE_H_ID, /* for metadata */ -#endif - -#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_H_ID+1) -#else -#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_G_ID+1) -#endif - -}; - -#define SH_CSS_MAX_DYNAMIC_BUFFERS_PER_THREAD SH_CSS_MAX_NUM_QUEUES -/* for now we staticaly assign queue 0 & 1 to parameter sets */ -#define IA_CSS_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_A_ID -#define IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_B_ID - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c deleted file mode 100644 index ffbcdd80d934..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c +++ /dev/null @@ -1,589 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "assert_support.h" /* assert */ -#include "ia_css_buffer.h" -#include "sp.h" -#include "ia_css_bufq.h" /* Bufq API's */ -#include "ia_css_queue.h" /* ia_css_queue_t */ -#include "sw_event_global.h" /* Event IDs.*/ -#include "ia_css_eventq.h" /* ia_css_eventq_recv()*/ -#include "ia_css_debug.h" /* ia_css_debug_dtrace*/ -#include "sh_css_internal.h" /* sh_css_queue_type */ -#include "sp_local.h" /* sp_address_of */ -#include "ia_css_util.h" /* ia_css_convert_errno()*/ -#include "sh_css_firmware.h" /* sh_css_sp_fw*/ - -#define BUFQ_DUMP_FILE_NAME_PREFIX_SIZE 256 - -static char prefix[BUFQ_DUMP_FILE_NAME_PREFIX_SIZE] = {0}; - -/*********************************************************/ -/* Global Queue objects used by CSS */ -/*********************************************************/ - -#ifndef ISP2401 - -struct sh_css_queues { - /* Host2SP buffer queue */ - ia_css_queue_t host2sp_buffer_queue_handles - [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]; - /* SP2Host buffer queue */ - ia_css_queue_t sp2host_buffer_queue_handles - [SH_CSS_MAX_NUM_QUEUES]; - - /* Host2SP event queue */ - ia_css_queue_t host2sp_psys_event_queue_handle; - - /* SP2Host event queue */ - ia_css_queue_t sp2host_psys_event_queue_handle; - -#if !defined(HAS_NO_INPUT_SYSTEM) - /* Host2SP ISYS event queue */ - ia_css_queue_t host2sp_isys_event_queue_handle; - - /* SP2Host ISYS event queue */ - ia_css_queue_t sp2host_isys_event_queue_handle; -#endif - /* Tagger command queue */ - ia_css_queue_t host2sp_tag_cmd_queue_handle; -}; - -#else - -struct sh_css_queues { - /* Host2SP buffer queue */ - ia_css_queue_t host2sp_buffer_queue_handles - [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]; - /* SP2Host buffer queue */ - ia_css_queue_t sp2host_buffer_queue_handles - [SH_CSS_MAX_NUM_QUEUES]; - - /* Host2SP event queue */ - ia_css_queue_t host2sp_psys_event_queue_handle; - - /* SP2Host event queue */ - ia_css_queue_t sp2host_psys_event_queue_handle; - -#if !defined(HAS_NO_INPUT_SYSTEM) - /* Host2SP ISYS event queue */ - ia_css_queue_t host2sp_isys_event_queue_handle; - - /* SP2Host ISYS event queue */ - ia_css_queue_t sp2host_isys_event_queue_handle; - - /* Tagger command queue */ - ia_css_queue_t host2sp_tag_cmd_queue_handle; -#endif -}; - -#endif - -/******************************************************* -*** Static variables -********************************************************/ -static struct sh_css_queues css_queues; - -static int buffer_type_to_queue_id_map[SH_CSS_MAX_SP_THREADS][IA_CSS_NUM_DYNAMIC_BUFFER_TYPE]; -static bool queue_availability[SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]; - -/******************************************************* -*** Static functions -********************************************************/ -static void map_buffer_type_to_queue_id( - unsigned int thread_id, - enum ia_css_buffer_type buf_type - ); -static void unmap_buffer_type_to_queue_id( - unsigned int thread_id, - enum ia_css_buffer_type buf_type - ); - -static ia_css_queue_t *bufq_get_qhandle( - enum sh_css_queue_type type, - enum sh_css_queue_id id, - int thread - ); - -/******************************************************* -*** Public functions -********************************************************/ -void ia_css_queue_map_init(void) -{ - unsigned int i, j; - - for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) { - for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) - queue_availability[i][j] = true; - } - - for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) { - for (j = 0; j < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE; j++) - buffer_type_to_queue_id_map[i][j] = SH_CSS_INVALID_QUEUE_ID; - } -} - -void ia_css_queue_map( - unsigned int thread_id, - enum ia_css_buffer_type buf_type, - bool map) -{ - assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE); - assert(thread_id < SH_CSS_MAX_SP_THREADS); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_queue_map() enter: buf_type=%d, thread_id=%d\n", buf_type, thread_id); - - if (map) - map_buffer_type_to_queue_id(thread_id, buf_type); - else - unmap_buffer_type_to_queue_id(thread_id, buf_type); -} - -/* - * @brief Query the internal queue ID. - */ -bool ia_css_query_internal_queue_id( - enum ia_css_buffer_type buf_type, - unsigned int thread_id, - enum sh_css_queue_id *val) -{ - IA_CSS_ENTER("buf_type=%d, thread_id=%d, val = %p", buf_type, thread_id, val); - - if ((val == NULL) || (thread_id >= SH_CSS_MAX_SP_THREADS) || (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE)) { - IA_CSS_LEAVE("return_val = false"); - return false; - } - - *val = buffer_type_to_queue_id_map[thread_id][buf_type]; - if ((*val == SH_CSS_INVALID_QUEUE_ID) || (*val >= SH_CSS_MAX_NUM_QUEUES)) { - IA_CSS_LOG("INVALID queue ID MAP = %d\n", *val); - IA_CSS_LEAVE("return_val = false"); - return false; - } - IA_CSS_LEAVE("return_val = true"); - return true; -} - -/******************************************************* -*** Static functions -********************************************************/ -static void map_buffer_type_to_queue_id( - unsigned int thread_id, - enum ia_css_buffer_type buf_type) -{ - unsigned int i; - - assert(thread_id < SH_CSS_MAX_SP_THREADS); - assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE); - assert(buffer_type_to_queue_id_map[thread_id][buf_type] == SH_CSS_INVALID_QUEUE_ID); - - /* queue 0 is reserved for parameters because it doesn't depend on events */ - if (buf_type == IA_CSS_BUFFER_TYPE_PARAMETER_SET) { - assert(queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID]); - queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID] = false; - buffer_type_to_queue_id_map[thread_id][buf_type] = IA_CSS_PARAMETER_SET_QUEUE_ID; - return; - } - - /* queue 1 is reserved for per frame parameters because it doesn't depend on events */ - if (buf_type == IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET) { - assert(queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID]); - queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID] = false; - buffer_type_to_queue_id_map[thread_id][buf_type] = IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID; - return; - } - - for (i = SH_CSS_QUEUE_C_ID; i < SH_CSS_MAX_NUM_QUEUES; i++) { - if (queue_availability[thread_id][i]) { - queue_availability[thread_id][i] = false; - buffer_type_to_queue_id_map[thread_id][buf_type] = i; - break; - } - } - - assert(i != SH_CSS_MAX_NUM_QUEUES); - return; -} - -static void unmap_buffer_type_to_queue_id( - unsigned int thread_id, - enum ia_css_buffer_type buf_type) -{ - int queue_id; - - assert(thread_id < SH_CSS_MAX_SP_THREADS); - assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE); - assert(buffer_type_to_queue_id_map[thread_id][buf_type] != SH_CSS_INVALID_QUEUE_ID); - - queue_id = buffer_type_to_queue_id_map[thread_id][buf_type]; - buffer_type_to_queue_id_map[thread_id][buf_type] = SH_CSS_INVALID_QUEUE_ID; - queue_availability[thread_id][queue_id] = true; -} - - -static ia_css_queue_t *bufq_get_qhandle( - enum sh_css_queue_type type, - enum sh_css_queue_id id, - int thread) -{ - ia_css_queue_t *q = NULL; - - switch (type) { - case sh_css_host2sp_buffer_queue: - if ((thread >= SH_CSS_MAX_SP_THREADS) || (thread < 0) || - (id == SH_CSS_INVALID_QUEUE_ID)) - break; - q = &css_queues.host2sp_buffer_queue_handles[thread][id]; - break; - case sh_css_sp2host_buffer_queue: - if (id == SH_CSS_INVALID_QUEUE_ID) - break; - q = &css_queues.sp2host_buffer_queue_handles[id]; - break; - case sh_css_host2sp_psys_event_queue: - q = &css_queues.host2sp_psys_event_queue_handle; - break; - case sh_css_sp2host_psys_event_queue: - q = &css_queues.sp2host_psys_event_queue_handle; - break; -#if !defined(HAS_NO_INPUT_SYSTEM) - case sh_css_host2sp_isys_event_queue: - q = &css_queues.host2sp_isys_event_queue_handle; - break; - case sh_css_sp2host_isys_event_queue: - q = &css_queues.sp2host_isys_event_queue_handle; - break; -#endif - case sh_css_host2sp_tag_cmd_queue: - q = &css_queues.host2sp_tag_cmd_queue_handle; - break; - default: - break; - } - - return q; -} - -/* Local function to initialize a buffer queue. This reduces - * the chances of copy-paste errors or typos. - */ -static inline void -init_bufq(unsigned int desc_offset, - unsigned int elems_offset, - ia_css_queue_t *handle) -{ - const struct ia_css_fw_info *fw; - unsigned int q_base_addr; - ia_css_queue_remote_t remoteq; - - fw = &sh_css_sp_fw; - q_base_addr = fw->info.sp.host_sp_queue; - - /* Setup queue location as SP and proc id as SP0_ID */ - remoteq.location = IA_CSS_QUEUE_LOC_SP; - remoteq.proc_id = SP0_ID; - remoteq.cb_desc_addr = q_base_addr + desc_offset; - remoteq.cb_elems_addr = q_base_addr + elems_offset; - /* Initialize the queue instance and obtain handle */ - ia_css_queue_remote_init(handle, &remoteq); -} - -void ia_css_bufq_init(void) -{ - int i, j; - - IA_CSS_ENTER_PRIVATE(""); - - /* Setup all the local queue descriptors for Host2SP Buffer Queues */ - for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) - for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) { - init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_desc[i][j]), - (uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_elems[i][j]), - &css_queues.host2sp_buffer_queue_handles[i][j]); - } - - /* Setup all the local queue descriptors for SP2Host Buffer Queues */ - for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) { - init_bufq(offsetof(struct host_sp_queues, sp2host_buffer_queues_desc[i]), - offsetof(struct host_sp_queues, sp2host_buffer_queues_elems[i]), - &css_queues.sp2host_buffer_queue_handles[i]); - } - - /* Host2SP event queue*/ - init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_desc), - (uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_elems), - &css_queues.host2sp_psys_event_queue_handle); - - /* SP2Host event queue */ - init_bufq((uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_desc), - (uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_elems), - &css_queues.sp2host_psys_event_queue_handle); - -#if !defined(HAS_NO_INPUT_SYSTEM) - /* Host2SP ISYS event queue */ - init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_desc), - (uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_elems), - &css_queues.host2sp_isys_event_queue_handle); - - /* SP2Host ISYS event queue*/ - init_bufq((uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_desc), - (uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_elems), - &css_queues.sp2host_isys_event_queue_handle); - - /* Host2SP tagger command queue */ - init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_desc), - (uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_elems), - &css_queues.host2sp_tag_cmd_queue_handle); -#endif - - IA_CSS_LEAVE_PRIVATE(""); -} - -enum ia_css_err ia_css_bufq_enqueue_buffer( - int thread_index, - int queue_id, - uint32_t item) -{ - enum ia_css_err return_err = IA_CSS_SUCCESS; - ia_css_queue_t *q; - int error; - - IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id); - if ((thread_index >= SH_CSS_MAX_SP_THREADS) || (thread_index < 0) || - (queue_id == SH_CSS_INVALID_QUEUE_ID)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* Get the queue for communication */ - q = bufq_get_qhandle(sh_css_host2sp_buffer_queue, - queue_id, - thread_index); - if (q != NULL) { - error = ia_css_queue_enqueue(q, item); - return_err = ia_css_convert_errno(error); - } else { - IA_CSS_ERROR("queue is not initialized"); - return_err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - IA_CSS_LEAVE_ERR_PRIVATE(return_err); - return return_err; -} - -enum ia_css_err ia_css_bufq_dequeue_buffer( - int queue_id, - uint32_t *item) -{ - enum ia_css_err return_err; - int error = 0; - ia_css_queue_t *q; - - IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id); - if ((item == NULL) || - (queue_id <= SH_CSS_INVALID_QUEUE_ID) || - (queue_id >= SH_CSS_MAX_NUM_QUEUES) - ) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - q = bufq_get_qhandle(sh_css_sp2host_buffer_queue, - queue_id, - -1); - if (q != NULL) { - error = ia_css_queue_dequeue(q, item); - return_err = ia_css_convert_errno(error); - } else { - IA_CSS_ERROR("queue is not initialized"); - return_err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - IA_CSS_LEAVE_ERR_PRIVATE(return_err); - return return_err; -} - -enum ia_css_err ia_css_bufq_enqueue_psys_event( - uint8_t evt_id, - uint8_t evt_payload_0, - uint8_t evt_payload_1, - uint8_t evt_payload_2) -{ - enum ia_css_err return_err; - int error = 0; - ia_css_queue_t *q; - - IA_CSS_ENTER_PRIVATE("evt_id=%d", evt_id); - q = bufq_get_qhandle(sh_css_host2sp_psys_event_queue, -1, -1); - if (NULL == q) { - IA_CSS_ERROR("queue is not initialized"); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - error = ia_css_eventq_send(q, - evt_id, evt_payload_0, evt_payload_1, evt_payload_2); - - return_err = ia_css_convert_errno(error); - IA_CSS_LEAVE_ERR_PRIVATE(return_err); - return return_err; -} - -enum ia_css_err ia_css_bufq_dequeue_psys_event( - uint8_t item[BUFQ_EVENT_SIZE]) -{ - enum ia_css_err; - int error = 0; - ia_css_queue_t *q; - - /* No ENTER/LEAVE in this function since this is polled - * by some test apps. Enablign logging here floods the log - * files which may cause timeouts. */ - if (item == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - q = bufq_get_qhandle(sh_css_sp2host_psys_event_queue, -1, -1); - if (NULL == q) { - IA_CSS_ERROR("queue is not initialized"); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - error = ia_css_eventq_recv(q, item); - - return ia_css_convert_errno(error); - -} - -enum ia_css_err ia_css_bufq_dequeue_isys_event( - uint8_t item[BUFQ_EVENT_SIZE]) -{ -#if !defined(HAS_NO_INPUT_SYSTEM) - enum ia_css_err; - int error = 0; - ia_css_queue_t *q; - - /* No ENTER/LEAVE in this function since this is polled - * by some test apps. Enablign logging here floods the log - * files which may cause timeouts. */ - if (item == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - q = bufq_get_qhandle(sh_css_sp2host_isys_event_queue, -1, -1); - if (q == NULL) { - IA_CSS_ERROR("queue is not initialized"); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - error = ia_css_eventq_recv(q, item); - return ia_css_convert_errno(error); -#else - (void)item; - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; -#endif -} - -enum ia_css_err ia_css_bufq_enqueue_isys_event(uint8_t evt_id) -{ -#if !defined(HAS_NO_INPUT_SYSTEM) - enum ia_css_err return_err; - int error = 0; - ia_css_queue_t *q; - - IA_CSS_ENTER_PRIVATE("event_id=%d", evt_id); - q = bufq_get_qhandle(sh_css_host2sp_isys_event_queue, -1, -1); - if (q == NULL) { - IA_CSS_ERROR("queue is not initialized"); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - error = ia_css_eventq_send(q, evt_id, 0, 0, 0); - return_err = ia_css_convert_errno(error); - IA_CSS_LEAVE_ERR_PRIVATE(return_err); - return return_err; -#else - (void)evt_id; - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; -#endif -} - -enum ia_css_err ia_css_bufq_enqueue_tag_cmd( - uint32_t item) -{ -#if !defined(HAS_NO_INPUT_SYSTEM) - enum ia_css_err return_err; - int error = 0; - ia_css_queue_t *q; - - IA_CSS_ENTER_PRIVATE("item=%d", item); - q = bufq_get_qhandle(sh_css_host2sp_tag_cmd_queue, -1, -1); - if (NULL == q) { - IA_CSS_ERROR("queue is not initialized"); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - error = ia_css_queue_enqueue(q, item); - - return_err = ia_css_convert_errno(error); - IA_CSS_LEAVE_ERR_PRIVATE(return_err); - return return_err; -#else - (void)item; - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; -#endif -} - -enum ia_css_err ia_css_bufq_deinit(void) -{ - return IA_CSS_SUCCESS; -} - -static void bufq_dump_queue_info(const char *prefix, ia_css_queue_t *qhandle) -{ - uint32_t free = 0, used = 0; - assert(prefix != NULL && qhandle != NULL); - ia_css_queue_get_used_space(qhandle, &used); - ia_css_queue_get_free_space(qhandle, &free); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: used=%u free=%u\n", - prefix, used, free); - -} - -void ia_css_bufq_dump_queue_info(void) -{ - int i, j; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Queue Information:\n"); - - for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) { - for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) { - snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE, - "host2sp_buffer_queue[%u][%u]", i, j); - bufq_dump_queue_info(prefix, - &css_queues.host2sp_buffer_queue_handles[i][j]); - } - } - - for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) { - snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE, - "sp2host_buffer_queue[%u]", i); - bufq_dump_queue_info(prefix, - &css_queues.sp2host_buffer_queue_handles[i]); - } - bufq_dump_queue_info("host2sp_psys_event", - &css_queues.host2sp_psys_event_queue_handle); - bufq_dump_queue_info("sp2host_psys_event", - &css_queues.sp2host_psys_event_queue_handle); - -#if !defined(HAS_NO_INPUT_SYSTEM) - bufq_dump_queue_info("host2sp_isys_event", - &css_queues.host2sp_isys_event_queue_handle); - bufq_dump_queue_info("sp2host_isys_event", - &css_queues.sp2host_isys_event_queue_handle); - bufq_dump_queue_info("host2sp_tag_cmd", - &css_queues.host2sp_tag_cmd_queue_handle); -#endif -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h deleted file mode 100644 index 4b28b2a0863a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _IA_CSS_DEBUG_H_ -#define _IA_CSS_DEBUG_H_ - -/*! \file */ - -#include -#include -#include "ia_css_types.h" -#include "ia_css_binary.h" -#include "ia_css_frame_public.h" -#include "ia_css_pipe_public.h" -#include "ia_css_stream_public.h" -#include "ia_css_metadata.h" -#include "sh_css_internal.h" -#ifdef ISP2401 -#if defined(IS_ISP_2500_SYSTEM) -#include "ia_css_pipe.h" -#endif -#endif - -/* available levels */ -/*! Level for tracing errors */ -#define IA_CSS_DEBUG_ERROR 1 -/*! Level for tracing warnings */ -#define IA_CSS_DEBUG_WARNING 3 -/*! Level for tracing debug messages */ -#define IA_CSS_DEBUG_VERBOSE 5 -/*! Level for tracing trace messages a.o. ia_css public function calls */ -#define IA_CSS_DEBUG_TRACE 6 -/*! Level for tracing trace messages a.o. ia_css private function calls */ -#define IA_CSS_DEBUG_TRACE_PRIVATE 7 -/*! Level for tracing parameter messages e.g. in and out params of functions */ -#define IA_CSS_DEBUG_PARAM 8 -/*! Level for tracing info messages */ -#define IA_CSS_DEBUG_INFO 9 -/* Global variable which controls the verbosity levels of the debug tracing */ -extern unsigned int ia_css_debug_trace_level; - -/*! @brief Enum defining the different isp parameters to dump. - * Values can be combined to dump a combination of sets. - */ -enum ia_css_debug_enable_param_dump { - IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */ - IA_CSS_DEBUG_DUMP_OB = 1 << 1, /** OB table */ - IA_CSS_DEBUG_DUMP_SC = 1 << 2, /** Shading table */ - IA_CSS_DEBUG_DUMP_WB = 1 << 3, /** White balance */ - IA_CSS_DEBUG_DUMP_DP = 1 << 4, /** Defect Pixel */ - IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /** Bayer Noise Reductions */ - IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /** 3A Statistics */ - IA_CSS_DEBUG_DUMP_DE = 1 << 7, /** De Mosaicing */ - IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /** Luma Noise Reduction */ - IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /** Color Space Conversion */ - IA_CSS_DEBUG_DUMP_GC = 1 << 10, /** Gamma Correction */ - IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /** Temporal Noise Reduction */ - IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /** Advanced Noise Reduction */ - IA_CSS_DEBUG_DUMP_CE = 1 << 13, /** Chroma Enhancement */ - IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /** Dump all device parameters */ -}; - -#define IA_CSS_ERROR(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, \ - "%s() %d: error: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__) - -#define IA_CSS_WARNING(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_WARNING, \ - "%s() %d: warning: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__) - -/* Logging macros for public functions (API functions) */ -#define IA_CSS_ENTER(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \ - "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__) - -/* Use this macro for small functions that do not call other functions. */ -#define IA_CSS_ENTER_LEAVE(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \ - "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__) - -#define IA_CSS_LEAVE(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \ - "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__) - -/* Shorthand for returning an enum ia_css_err return value */ -#define IA_CSS_LEAVE_ERR(__err) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \ - "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err) - -/* Use this macro for logging other than enter/leave. - * Note that this macro always uses the PRIVATE logging level. - */ -#define IA_CSS_LOG(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \ - "%s(): " fmt "\n", __func__, ##__VA_ARGS__) - -/* Logging macros for non-API functions. These have a lower trace level */ -#define IA_CSS_ENTER_PRIVATE(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \ - "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__) - -#define IA_CSS_LEAVE_PRIVATE(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \ - "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__) - -/* Shorthand for returning an enum ia_css_err return value */ -#define IA_CSS_LEAVE_ERR_PRIVATE(__err) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \ - "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err) - -/* Use this macro for small functions that do not call other functions. */ -#define IA_CSS_ENTER_LEAVE_PRIVATE(fmt, ...) \ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \ - "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__) - -/*! @brief Function for tracing to the provided printf function in the - * environment. - * @param[in] level Level of the message. - * @param[in] fmt printf like format string - * @param[in] args arguments for the format string - */ -static inline void -ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args) -{ - if (ia_css_debug_trace_level >= level) - sh_css_vprint(fmt, args); -} - -__printf(2, 3) -extern void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...); - -/*! @brief Dump sp thread's stack contents - * SP thread's stack contents are set to 0xcafecafe. This function dumps the - * stack to inspect if the stack's boundaries are compromised. - * @return None - */ -void ia_css_debug_dump_sp_stack_info(void); - -/*! @brief Function to set the global dtrace verbosity level. - * @param[in] trace_level Maximum level of the messages to be traced. - * @return None - */ -void ia_css_debug_set_dtrace_level( - const unsigned int trace_level); - -/*! @brief Function to get the global dtrace verbosity level. - * @return global dtrace verbosity level - */ -unsigned int ia_css_debug_get_dtrace_level(void); - -/*! @brief Dump input formatter state. - * Dumps the input formatter state to tracing output. - * @return None - */ -void ia_css_debug_dump_if_state(void); - -/*! @brief Dump isp hardware state. - * Dumps the isp hardware state to tracing output. - * @return None - */ -void ia_css_debug_dump_isp_state(void); - -/*! @brief Dump sp hardware state. - * Dumps the sp hardware state to tracing output. - * @return None - */ -void ia_css_debug_dump_sp_state(void); - -#ifdef ISP2401 -/*! @brief Dump GAC hardware state. - * Dumps the GAC ACB hardware registers. may be useful for - * detecting a GAC which got hang. - * @return None - */ -void ia_css_debug_dump_gac_state(void); - -#endif -/*! @brief Dump dma controller state. - * Dumps the dma controller state to tracing output. - * @return None - */ -void ia_css_debug_dump_dma_state(void); - -/*! @brief Dump internal sp software state. - * Dumps the sp software state to tracing output. - * @return None - */ -void ia_css_debug_dump_sp_sw_debug_info(void); - -/*! @brief Dump all related hardware state to the trace output - * @param[in] context String to identify context in output. - * @return None - */ -void ia_css_debug_dump_debug_info( - const char *context); - -#if SP_DEBUG != SP_DEBUG_NONE -void ia_css_debug_print_sp_debug_state( - const struct sh_css_sp_debug_state *state); -#endif - -/*! @brief Dump all related binary info data - * @param[in] bi Binary info struct. - * @return None - */ -void ia_css_debug_binary_print( - const struct ia_css_binary *bi); - -void ia_css_debug_sp_dump_mipi_fifo_high_water(void); - -/*! @brief Dump isp gdc fifo state to the trace output - * Dumps the isp gdc fifo state to tracing output. - * @return None - */ -void ia_css_debug_dump_isp_gdc_fifo_state(void); - -/*! @brief Dump dma isp fifo state - * Dumps the dma isp fifo state to tracing output. - * @return None - */ -void ia_css_debug_dump_dma_isp_fifo_state(void); - -/*! @brief Dump dma sp fifo state - * Dumps the dma sp fifo state to tracing output. - * @return None - */ -void ia_css_debug_dump_dma_sp_fifo_state(void); - -/*! \brief Dump pif A isp fifo state - * Dumps the primary input formatter state to tracing output. - * @return None - */ -void ia_css_debug_dump_pif_a_isp_fifo_state(void); - -/*! \brief Dump pif B isp fifo state - * Dumps the primary input formatter state to tracing output. - * \return None - */ -void ia_css_debug_dump_pif_b_isp_fifo_state(void); - -/*! @brief Dump stream-to-memory sp fifo state - * Dumps the stream-to-memory block state to tracing output. - * @return None - */ -void ia_css_debug_dump_str2mem_sp_fifo_state(void); - -/*! @brief Dump isp sp fifo state - * Dumps the isp sp fifo state to tracing output. - * @return None - */ -void ia_css_debug_dump_isp_sp_fifo_state(void); - -/*! @brief Dump all fifo state info to the output - * Dumps all fifo state to tracing output. - * @return None - */ -void ia_css_debug_dump_all_fifo_state(void); - -/*! @brief Dump the rx state to the output - * Dumps the rx state to tracing output. - * @return None - */ -void ia_css_debug_dump_rx_state(void); - -/*! @brief Dump the input system state to the output - * Dumps the input system state to tracing output. - * @return None - */ -void ia_css_debug_dump_isys_state(void); - -/*! @brief Dump the frame info to the trace output - * Dumps the frame info to tracing output. - * @param[in] frame pointer to struct ia_css_frame - * @param[in] descr description output along with the frame info - * @return None - */ -void ia_css_debug_frame_print( - const struct ia_css_frame *frame, - const char *descr); - -/*! @brief Function to enable sp sleep mode. - * Function that enables sp sleep mode - * @param[in] mode indicates when to put sp to sleep - * @return None - */ -void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode); - -/*! @brief Function to wake up sp when in sleep mode. - * After sp has been put to sleep, use this function to let it continue - * to run again. - * @return None - */ -void ia_css_debug_wake_up_sp(void); - -/*! @brief Function to dump isp parameters. - * Dump isp parameters to tracing output - * @param[in] stream pointer to ia_css_stream struct - * @param[in] enable flag indicating which parameters to dump. - * @return None - */ -void ia_css_debug_dump_isp_params(struct ia_css_stream *stream, unsigned int enable); - -/*! @brief Function to dump some sp performance counters. - * Dump sp performance counters, currently input system errors. - * @return None - */ -void ia_css_debug_dump_perf_counters(void); - -#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG -void sh_css_dump_thread_wait_info(void); -void sh_css_dump_pipe_stage_info(void); -void sh_css_dump_pipe_stripe_info(void); -#endif - -void ia_css_debug_dump_isp_binary(void); - -void sh_css_dump_sp_raw_copy_linecount(bool reduced); - -/*! @brief Dump the resolution info to the trace output - * Dumps the resolution info to the trace output. - * @param[in] res pointer to struct ia_css_resolution - * @param[in] label description of resolution output - * @return None - */ -void ia_css_debug_dump_resolution( - const struct ia_css_resolution *res, - const char *label); - -/*! @brief Dump the frame info to the trace output - * Dumps the frame info to the trace output. - * @param[in] info pointer to struct ia_css_frame_info - * @param[in] label description of frame_info output - * @return None - */ -void ia_css_debug_dump_frame_info( - const struct ia_css_frame_info *info, - const char *label); - -/*! @brief Dump the capture config info to the trace output - * Dumps the capture config info to the trace output. - * @param[in] config pointer to struct ia_css_capture_config - * @return None - */ -void ia_css_debug_dump_capture_config( - const struct ia_css_capture_config *config); - -/*! @brief Dump the pipe extra config info to the trace output - * Dumps the pipe extra config info to the trace output. - * @param[in] extra_config pointer to struct ia_css_pipe_extra_config - * @return None - */ -void ia_css_debug_dump_pipe_extra_config( - const struct ia_css_pipe_extra_config *extra_config); - -/*! @brief Dump the pipe config info to the trace output - * Dumps the pipe config info to the trace output. - * @param[in] config pointer to struct ia_css_pipe_config - * @return None - */ -void ia_css_debug_dump_pipe_config( - const struct ia_css_pipe_config *config); - - -/*! @brief Dump the stream config source info to the trace output - * Dumps the stream config source info to the trace output. - * @param[in] config pointer to struct ia_css_stream_config - * @return None - */ -void ia_css_debug_dump_stream_config_source( - const struct ia_css_stream_config *config); - -/*! @brief Dump the mipi buffer config info to the trace output - * Dumps the mipi buffer config info to the trace output. - * @param[in] config pointer to struct ia_css_mipi_buffer_config - * @return None - */ -void ia_css_debug_dump_mipi_buffer_config( - const struct ia_css_mipi_buffer_config *config); - -/*! @brief Dump the metadata config info to the trace output - * Dumps the metadata config info to the trace output. - * @param[in] config pointer to struct ia_css_metadata_config - * @return None - */ -void ia_css_debug_dump_metadata_config( - const struct ia_css_metadata_config *config); - -/*! @brief Dump the stream config info to the trace output - * Dumps the stream config info to the trace output. - * @param[in] config pointer to struct ia_css_stream_config - * @param[in] num_pipes number of pipes for the stream - * @return None - */ -void ia_css_debug_dump_stream_config( - const struct ia_css_stream_config *config, - int num_pipes); - -/*! @brief Dump the state of the SP tagger - * Dumps the internal state of the SP tagger - * @return None - */ -void ia_css_debug_tagger_state(void); - -/** - * @brief Initialize the debug mode. - * - * WARNING: - * This API should be called ONLY once in the debug mode. - * - * @return - * - true, if it is successful. - * - false, otherwise. - */ -bool ia_css_debug_mode_init(void); - -/** - * @brief Disable the DMA channel. - * - * @param[in] dma_ID The ID of the target DMA. - * @param[in] channel_id The ID of the target DMA channel. - * @param[in] request_type The type of the DMA request. - * For example: - * - "0" indicates the writing request. - * - "1" indicates the reading request. - * - * This is part of the DMA API -> dma.h - * - * @return - * - true, if it is successful. - * - false, otherwise. - */ -bool ia_css_debug_mode_disable_dma_channel( - int dma_ID, - int channel_id, - int request_type); -/** - * @brief Enable the DMA channel. - * - * @param[in] dma_ID The ID of the target DMA. - * @param[in] channel_id The ID of the target DMA channel. - * @param[in] request_type The type of the DMA request. - * For example: - * - "0" indicates the writing request. - * - "1" indicates the reading request. - * - * @return - * - true, if it is successful. - * - false, otherwise. - */ -bool ia_css_debug_mode_enable_dma_channel( - int dma_ID, - int channel_id, - int request_type); - -/** - * @brief Dump tracer data. - * [Currently support is only for SKC] - * - * @return - * - none. - */ -void ia_css_debug_dump_trace(void); - -#ifdef ISP2401 -/** - * @brief Program counter dumping (in loop) - * - * @param[in] id The ID of the SP - * @param[in] num_of_dumps The number of dumps - * - * @return - * - none - */ -void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps); - -#if defined(IS_ISP_2500_SYSTEM) -/*! @brief Dump all states for ISP hang case. - * Dumps the ISP previous and current configurations - * GACs status, SP0/1 statuses. - * - * @param[in] pipe The current pipe - * - * @return None - */ -void ia_css_debug_dump_hang_status( - struct ia_css_pipe *pipe); - -/*! @brief External command handler - * External command handler - * - * @return None - */ -void ia_css_debug_ext_command_handler(void); - -#endif -#endif - -#endif /* _IA_CSS_DEBUG_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h deleted file mode 100644 index 88d025807201..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -/* TO DO: Move debug related code from ia_css_internal.h in */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h deleted file mode 100644 index 72ac0e32ebf7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_DEBUG_PIPE_H_ -#define _IA_CSS_DEBUG_PIPE_H_ - -/*! \file */ - -#include -#include -#include "ia_css_pipeline.h" - -/** - * @brief Internal debug support for constructing a pipe graph. - * - * @return None - */ -extern void ia_css_debug_pipe_graph_dump_prologue(void); - -/** - * @brief Internal debug support for constructing a pipe graph. - * - * @return None - */ -extern void ia_css_debug_pipe_graph_dump_epilogue(void); - -/** - * @brief Internal debug support for constructing a pipe graph. - * @param[in] stage Pipeline stage. - * @param[in] id Pipe id. - * - * @return None - */ -extern void ia_css_debug_pipe_graph_dump_stage( - struct ia_css_pipeline_stage *stage, - enum ia_css_pipe_id id); - -/** - * @brief Internal debug support for constructing a pipe graph. - * @param[in] out_frame Output frame of SP raw copy. - * - * @return None - */ -extern void ia_css_debug_pipe_graph_dump_sp_raw_copy( - struct ia_css_frame *out_frame); - - -/** - * @brief Internal debug support for constructing a pipe graph. - * @param[in] stream_config info about sensor and input formatter. - * - * @return None - */ -extern void ia_css_debug_pipe_graph_dump_stream_config( - const struct ia_css_stream_config *stream_config); - -#endif /* _IA_CSS_DEBUG_PIPE_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c deleted file mode 100644 index 4607a76dc78a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c +++ /dev/null @@ -1,3596 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "debug.h" -#include "memory_access.h" - -#ifndef __INLINE_INPUT_SYSTEM__ -#define __INLINE_INPUT_SYSTEM__ -#endif -#ifndef __INLINE_IBUF_CTRL__ -#define __INLINE_IBUF_CTRL__ -#endif -#ifndef __INLINE_CSI_RX__ -#define __INLINE_CSI_RX__ -#endif -#ifndef __INLINE_PIXELGEN__ -#define __INLINE_PIXELGEN__ -#endif -#ifndef __INLINE_STREAM2MMIO__ -#define __INLINE_STREAM2MMIO__ -#endif - -#include "ia_css_debug.h" -#include "ia_css_debug_pipe.h" -#include "ia_css_irq.h" -#include "ia_css_stream.h" -#include "ia_css_pipeline.h" -#include "ia_css_isp_param.h" -#include "sh_css_params.h" -#include "ia_css_bufq.h" -#ifdef ISP2401 -#include "ia_css_queue.h" -#endif - -#include "ia_css_isp_params.h" - -#include "system_local.h" -#include "assert_support.h" -#include "print_support.h" -#include "string_support.h" -#ifdef ISP2401 -#include "ia_css_system_ctrl.h" -#endif - -#include "fifo_monitor.h" - -#if !defined(HAS_NO_INPUT_FORMATTER) -#include "input_formatter.h" -#endif -#include "dma.h" -#include "irq.h" -#include "gp_device.h" -#include "sp.h" -#include "isp.h" -#include "type_support.h" -#include "math_support.h" /* CEIL_DIV */ -#if defined(HAS_INPUT_FORMATTER_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -#include "input_system.h" /* input_formatter_reg_load */ -#endif -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -#include "ia_css_tagger_common.h" -#endif - -#include "sh_css_internal.h" -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "ia_css_isys.h" -#endif -#include "sh_css_sp.h" /* sh_css_sp_get_debug_state() */ - -#include "css_trace.h" /* tracer */ - -#include "device_access.h" /* for ia_css_device_load_uint32 */ - -/* Include all kernel host interfaces for ISP1 */ -#include "anr/anr_1.0/ia_css_anr.host.h" -#include "cnr/cnr_1.0/ia_css_cnr.host.h" -#include "csc/csc_1.0/ia_css_csc.host.h" -#include "de/de_1.0/ia_css_de.host.h" -#include "dp/dp_1.0/ia_css_dp.host.h" -#include "bnr/bnr_1.0/ia_css_bnr.host.h" -#include "fpn/fpn_1.0/ia_css_fpn.host.h" -#include "gc/gc_1.0/ia_css_gc.host.h" -#include "ob/ob_1.0/ia_css_ob.host.h" -#include "s3a/s3a_1.0/ia_css_s3a.host.h" -#include "sc/sc_1.0/ia_css_sc.host.h" -#include "tnr/tnr_1.0/ia_css_tnr.host.h" -#include "uds/uds_1.0/ia_css_uds_param.h" -#include "wb/wb_1.0/ia_css_wb.host.h" -#include "ynr/ynr_1.0/ia_css_ynr.host.h" - -/* Include additional kernel host interfaces for ISP2 */ -#include "aa/aa_2/ia_css_aa2.host.h" -#include "anr/anr_2/ia_css_anr2.host.h" -#include "cnr/cnr_2/ia_css_cnr2.host.h" -#include "de/de_2/ia_css_de2.host.h" -#include "gc/gc_2/ia_css_gc2.host.h" -#include "ynr/ynr_2/ia_css_ynr2.host.h" - -/* Global variable to store the dtrace verbosity level */ -unsigned int ia_css_debug_trace_level = IA_CSS_DEBUG_WARNING; - -#define DPG_START "ia_css_debug_pipe_graph_dump_start " -#define DPG_END " ia_css_debug_pipe_graph_dump_end\n" - -#define ENABLE_LINE_MAX_LENGTH (25) - -#ifdef ISP2401 -#define DBG_EXT_CMD_TRACE_PNTS_DUMP (1 << 8) -#define DBG_EXT_CMD_PUB_CFG_DUMP (1 << 9) -#define DBG_EXT_CMD_GAC_REG_DUMP (1 << 10) -#define DBG_EXT_CMD_GAC_ACB_REG_DUMP (1 << 11) -#define DBG_EXT_CMD_FIFO_DUMP (1 << 12) -#define DBG_EXT_CMD_QUEUE_DUMP (1 << 13) -#define DBG_EXT_CMD_DMA_DUMP (1 << 14) -#define DBG_EXT_CMD_MASK 0xAB0000CD - -#endif -/* - * TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads - * future rework should fix this and remove the define MAX_THREAD_NUM - */ -#define MAX_THREAD_NUM (SH_CSS_MAX_SP_THREADS + SH_CSS_MAX_SP_INTERNAL_THREADS) - -static struct pipe_graph_class { - bool do_init; - int height; - int width; - int eff_height; - int eff_width; - enum atomisp_input_format stream_format; -} pg_inst = {true, 0, 0, 0, 0, N_ATOMISP_INPUT_FORMAT}; - -static const char * const queue_id_to_str[] = { - /* [SH_CSS_QUEUE_A_ID] =*/ "queue_A", - /* [SH_CSS_QUEUE_B_ID] =*/ "queue_B", - /* [SH_CSS_QUEUE_C_ID] =*/ "queue_C", - /* [SH_CSS_QUEUE_D_ID] =*/ "queue_D", - /* [SH_CSS_QUEUE_E_ID] =*/ "queue_E", - /* [SH_CSS_QUEUE_F_ID] =*/ "queue_F", - /* [SH_CSS_QUEUE_G_ID] =*/ "queue_G", - /* [SH_CSS_QUEUE_H_ID] =*/ "queue_H" -}; - -static const char * const pipe_id_to_str[] = { - /* [IA_CSS_PIPE_ID_PREVIEW] =*/ "preview", - /* [IA_CSS_PIPE_ID_COPY] =*/ "copy", - /* [IA_CSS_PIPE_ID_VIDEO] =*/ "video", - /* [IA_CSS_PIPE_ID_CAPTURE] =*/ "capture", - /* [IA_CSS_PIPE_ID_YUVPP] =*/ "yuvpp", - /* [IA_CSS_PIPE_ID_ACC] =*/ "accelerator" -}; - -static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME+10]; -static char ring_buffer[200]; - -void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - ia_css_debug_vdtrace(level, fmt, ap); - va_end(ap); -} - -static void debug_dump_long_array_formatted( - const sp_ID_t sp_id, - hrt_address stack_sp_addr, - unsigned stack_size) -{ - unsigned int i; - uint32_t val; - uint32_t addr = (uint32_t) stack_sp_addr; - uint32_t stack_size_words = CEIL_DIV(stack_size, sizeof(uint32_t)); - - /* When size is not multiple of four, last word is only relevant for - * remaining bytes */ - for (i = 0; i < stack_size_words; i++) { - val = sp_dmem_load_uint32(sp_id, (hrt_address)addr); - if ((i%8) == 0) - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n"); - - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "0x%08x ", val); - addr += sizeof(uint32_t); - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n"); -} - -static void debug_dump_sp_stack_info( - const sp_ID_t sp_id) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_sp_threads_stack; - unsigned int HIVE_ADDR_sp_threads_stack_size; - uint32_t stack_sizes[MAX_THREAD_NUM]; - uint32_t stack_sp_addr[MAX_THREAD_NUM]; - unsigned int i; - - fw = &sh_css_sp_fw; - - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_id(%u) stack info\n", sp_id); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "from objects stack_addr_offset:0x%x stack_size_offset:0x%x\n", - fw->info.sp.threads_stack, - fw->info.sp.threads_stack_size); - - HIVE_ADDR_sp_threads_stack = fw->info.sp.threads_stack; - HIVE_ADDR_sp_threads_stack_size = fw->info.sp.threads_stack_size; - - if (fw->info.sp.threads_stack == 0 || - fw->info.sp.threads_stack_size == 0) - return; - - (void) HIVE_ADDR_sp_threads_stack; - (void) HIVE_ADDR_sp_threads_stack_size; - - sp_dmem_load(sp_id, - (unsigned int)sp_address_of(sp_threads_stack), - &stack_sp_addr, sizeof(stack_sp_addr)); - sp_dmem_load(sp_id, - (unsigned int)sp_address_of(sp_threads_stack_size), - &stack_sizes, sizeof(stack_sizes)); - - for (i = 0 ; i < MAX_THREAD_NUM; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "thread: %u stack_addr: 0x%08x stack_size: %u\n", - i, stack_sp_addr[i], stack_sizes[i]); - debug_dump_long_array_formatted(sp_id, (hrt_address)stack_sp_addr[i], - stack_sizes[i]); - } -} - -void ia_css_debug_dump_sp_stack_info(void) -{ - debug_dump_sp_stack_info(SP0_ID); -} - - -void ia_css_debug_set_dtrace_level(const unsigned int trace_level) -{ - ia_css_debug_trace_level = trace_level; - return; -} - -unsigned int ia_css_debug_get_dtrace_level(void) -{ - return ia_css_debug_trace_level; -} - -static const char *debug_stream_format2str(const enum atomisp_input_format stream_format) -{ - switch (stream_format) { - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - return "yuv420-8-legacy"; - case ATOMISP_INPUT_FORMAT_YUV420_8: - return "yuv420-8"; - case ATOMISP_INPUT_FORMAT_YUV420_10: - return "yuv420-10"; - case ATOMISP_INPUT_FORMAT_YUV420_16: - return "yuv420-16"; - case ATOMISP_INPUT_FORMAT_YUV422_8: - return "yuv422-8"; - case ATOMISP_INPUT_FORMAT_YUV422_10: - return "yuv422-10"; - case ATOMISP_INPUT_FORMAT_YUV422_16: - return "yuv422-16"; - case ATOMISP_INPUT_FORMAT_RGB_444: - return "rgb444"; - case ATOMISP_INPUT_FORMAT_RGB_555: - return "rgb555"; - case ATOMISP_INPUT_FORMAT_RGB_565: - return "rgb565"; - case ATOMISP_INPUT_FORMAT_RGB_666: - return "rgb666"; - case ATOMISP_INPUT_FORMAT_RGB_888: - return "rgb888"; - case ATOMISP_INPUT_FORMAT_RAW_6: - return "raw6"; - case ATOMISP_INPUT_FORMAT_RAW_7: - return "raw7"; - case ATOMISP_INPUT_FORMAT_RAW_8: - return "raw8"; - case ATOMISP_INPUT_FORMAT_RAW_10: - return "raw10"; - case ATOMISP_INPUT_FORMAT_RAW_12: - return "raw12"; - case ATOMISP_INPUT_FORMAT_RAW_14: - return "raw14"; - case ATOMISP_INPUT_FORMAT_RAW_16: - return "raw16"; - case ATOMISP_INPUT_FORMAT_BINARY_8: - return "binary8"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1: - return "generic-short1"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2: - return "generic-short2"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3: - return "generic-short3"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4: - return "generic-short4"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5: - return "generic-short5"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6: - return "generic-short6"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7: - return "generic-short7"; - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8: - return "generic-short8"; - case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT: - return "yuv420-8-shift"; - case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT: - return "yuv420-10-shift"; - case ATOMISP_INPUT_FORMAT_EMBEDDED: - return "embedded-8"; - case ATOMISP_INPUT_FORMAT_USER_DEF1: - return "user-def-8-type-1"; - case ATOMISP_INPUT_FORMAT_USER_DEF2: - return "user-def-8-type-2"; - case ATOMISP_INPUT_FORMAT_USER_DEF3: - return "user-def-8-type-3"; - case ATOMISP_INPUT_FORMAT_USER_DEF4: - return "user-def-8-type-4"; - case ATOMISP_INPUT_FORMAT_USER_DEF5: - return "user-def-8-type-5"; - case ATOMISP_INPUT_FORMAT_USER_DEF6: - return "user-def-8-type-6"; - case ATOMISP_INPUT_FORMAT_USER_DEF7: - return "user-def-8-type-7"; - case ATOMISP_INPUT_FORMAT_USER_DEF8: - return "user-def-8-type-8"; - - default: - assert(!"Unknown stream format"); - return "unknown-stream-format"; - } -}; - -static const char *debug_frame_format2str(const enum ia_css_frame_format frame_format) -{ - switch (frame_format) { - - case IA_CSS_FRAME_FORMAT_NV11: - return "NV11"; - case IA_CSS_FRAME_FORMAT_NV12: - return "NV12"; - case IA_CSS_FRAME_FORMAT_NV12_16: - return "NV12_16"; - case IA_CSS_FRAME_FORMAT_NV12_TILEY: - return "NV12_TILEY"; - case IA_CSS_FRAME_FORMAT_NV16: - return "NV16"; - case IA_CSS_FRAME_FORMAT_NV21: - return "NV21"; - case IA_CSS_FRAME_FORMAT_NV61: - return "NV61"; - case IA_CSS_FRAME_FORMAT_YV12: - return "YV12"; - case IA_CSS_FRAME_FORMAT_YV16: - return "YV16"; - case IA_CSS_FRAME_FORMAT_YUV420: - return "YUV420"; - case IA_CSS_FRAME_FORMAT_YUV420_16: - return "YUV420_16"; - case IA_CSS_FRAME_FORMAT_YUV422: - return "YUV422"; - case IA_CSS_FRAME_FORMAT_YUV422_16: - return "YUV422_16"; - case IA_CSS_FRAME_FORMAT_UYVY: - return "UYVY"; - case IA_CSS_FRAME_FORMAT_YUYV: - return "YUYV"; - case IA_CSS_FRAME_FORMAT_YUV444: - return "YUV444"; - case IA_CSS_FRAME_FORMAT_YUV_LINE: - return "YUV_LINE"; - case IA_CSS_FRAME_FORMAT_RAW: - return "RAW"; - case IA_CSS_FRAME_FORMAT_RGB565: - return "RGB565"; - case IA_CSS_FRAME_FORMAT_PLANAR_RGB888: - return "PLANAR_RGB888"; - case IA_CSS_FRAME_FORMAT_RGBA888: - return "RGBA888"; - case IA_CSS_FRAME_FORMAT_QPLANE6: - return "QPLANE6"; - case IA_CSS_FRAME_FORMAT_BINARY_8: - return "BINARY_8"; - case IA_CSS_FRAME_FORMAT_MIPI: - return "MIPI"; - case IA_CSS_FRAME_FORMAT_RAW_PACKED: - return "RAW_PACKED"; - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - return "CSI_MIPI_YUV420_8"; - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - return "CSI_MIPI_LEGACY_YUV420_8"; - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10: - return "CSI_MIPI_YUV420_10"; - - default: - assert(!"Unknown frame format"); - return "unknown-frame-format"; - } -} - -static void debug_print_sp_state(const sp_state_t *state, const char *cell) -{ - assert(cell != NULL); - assert(state != NULL); - - ia_css_debug_dtrace(2, "%s state:\n", cell); - ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc); - ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register", - state->status_register); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping", - state->is_sleeping); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling", - state->is_stalling); - return; -} - -static void debug_print_isp_state(const isp_state_t *state, const char *cell) -{ - assert(state != NULL); - assert(cell != NULL); - - ia_css_debug_dtrace(2, "%s state:\n", cell); - ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc); - ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register", - state->status_register); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping", - state->is_sleeping); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling", - state->is_stalling); - return; -} - -void ia_css_debug_dump_isp_state(void) -{ - isp_state_t state; - isp_stall_t stall; - - isp_get_state(ISP0_ID, &state, &stall); - - debug_print_isp_state(&state, "ISP"); - - if (state.is_stalling) { -#if !defined(HAS_NO_INPUT_FORMATTER) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "[0] if_prim_a_FIFO stalled", stall.fifo0); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "[1] if_prim_b_FIFO stalled", stall.fifo1); -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled", - stall.fifo2); -#if defined(HAS_ISP_2400_MAMOIADA) || defined(HAS_ISP_2401_MAMOIADA) || defined(IS_ISP_2500_SYSTEM) - - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled", - stall.fifo3); -#if !defined(IS_ISP_2500_SYSTEM) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled", - stall.fifo4); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled", - stall.fifo5); -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled", - stall.fifo6); -#else -#error "ia_css_debug: ISP cell must be one of {2400_MAMOIADA,, 2401_MAMOIADA, 2500_SKYCAM}" -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "status & control stalled", - stall.stat_ctrl); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled", - stall.dmem); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vmem stalled", - stall.vmem); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem1 stalled", - stall.vamem1); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem2 stalled", - stall.vamem2); -#if defined(HAS_ISP_2400_MAMOIADA) || defined(HAS_ISP_2401_MAMOIADA) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem3 stalled", - stall.vamem3); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "hmem stalled", - stall.hmem); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "pmem stalled", - stall.pmem); -#endif - } - return; -} - -void ia_css_debug_dump_sp_state(void) -{ - sp_state_t state; - sp_stall_t stall; - sp_get_state(SP0_ID, &state, &stall); - debug_print_sp_state(&state, "SP"); - if (state.is_stalling) { -#if defined(HAS_SP_2400) || defined(IS_ISP_2500_SYSTEM) -#if !defined(HAS_NO_INPUT_SYSTEM) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled", - stall.fifo0); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled", - stall.fifo1); -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "str_to_mem_FIFO stalled", stall.fifo2); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled", - stall.fifo3); -#if !defined(HAS_NO_INPUT_FORMATTER) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "if_prim_a_FIFO stalled", stall.fifo4); -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled", - stall.fifo5); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled", - stall.fifo6); -#if !defined(HAS_NO_INPUT_FORMATTER) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "if_prim_b_FIFO stalled", stall.fifo7); -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled", - stall.fifo8); -#if !defined(IS_ISP_2500_SYSTEM) - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled", - stall.fifo9); -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled", - stall.fifoa); -#else -#error "ia_css_debug: SP cell must be one of {SP2400, SP2500}" -#endif - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled", - stall.dmem); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "control master stalled", - stall.control_master); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", - "i-cache master stalled", - stall.icache_master); - } - ia_css_debug_dump_trace(); - return; -} - -static void debug_print_fifo_channel_state(const fifo_channel_state_t *state, - const char *descr) -{ - assert(state != NULL); - assert(descr != NULL); - - ia_css_debug_dtrace(2, "FIFO channel: %s\n", descr); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "source valid", - state->src_valid); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo accept", - state->fifo_accept); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo valid", - state->fifo_valid); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "sink accept", - state->sink_accept); - return; -} - -#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2) -void ia_css_debug_dump_pif_a_isp_fifo_state(void) -{ - fifo_channel_state_t pif_to_isp, isp_to_pif; - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_IF0_TO_ISP0, &pif_to_isp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_ISP0_TO_IF0, &isp_to_pif); - debug_print_fifo_channel_state(&pif_to_isp, "Primary IF A to ISP"); - debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF A"); -} - -void ia_css_debug_dump_pif_b_isp_fifo_state(void) -{ - fifo_channel_state_t pif_to_isp, isp_to_pif; - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_IF1_TO_ISP0, &pif_to_isp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_ISP0_TO_IF1, &isp_to_pif); - debug_print_fifo_channel_state(&pif_to_isp, "Primary IF B to ISP"); - debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF B"); -} - -void ia_css_debug_dump_str2mem_sp_fifo_state(void) -{ - fifo_channel_state_t s2m_to_sp, sp_to_s2m; - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_STREAM2MEM0_TO_SP0, &s2m_to_sp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_SP0_TO_STREAM2MEM0, &sp_to_s2m); - debug_print_fifo_channel_state(&s2m_to_sp, "Stream-to-memory to SP"); - debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory"); -} - -static void debug_print_if_state(input_formatter_state_t *state, const char *id) -{ - unsigned int val; - -#if defined(HAS_INPUT_FORMATTER_VERSION_1) - const char *st_reset = (state->reset ? "Active" : "Not active"); -#endif - const char *st_vsync_active_low = - (state->vsync_active_low ? "low" : "high"); - const char *st_hsync_active_low = - (state->hsync_active_low ? "low" : "high"); - - const char *fsm_sync_status_str = "unknown"; - const char *fsm_crop_status_str = "unknown"; - const char *fsm_padding_status_str = "unknown"; - - int st_stline = state->start_line; - int st_stcol = state->start_column; - int st_crpht = state->cropped_height; - int st_crpwd = state->cropped_width; - int st_verdcm = state->ver_decimation; - int st_hordcm = state->hor_decimation; - int st_ver_deinterleaving = state->ver_deinterleaving; - int st_hor_deinterleaving = state->hor_deinterleaving; - int st_leftpd = state->left_padding; - int st_eoloff = state->eol_offset; - int st_vmstartaddr = state->vmem_start_address; - int st_vmendaddr = state->vmem_end_address; - int st_vmincr = state->vmem_increment; - int st_yuv420 = state->is_yuv420; - int st_allow_fifo_overflow = state->allow_fifo_overflow; - int st_block_fifo_when_no_req = state->block_fifo_when_no_req; - - assert(state != NULL); - ia_css_debug_dtrace(2, "InputFormatter State (%s):\n", id); - - ia_css_debug_dtrace(2, "\tConfiguration:\n"); - -#if defined(HAS_INPUT_FORMATTER_VERSION_1) - ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "Software reset", st_reset); -#endif - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped width", st_crpwd); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver decimation", st_verdcm); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor decimation", st_hordcm); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Ver deinterleaving", st_ver_deinterleaving); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Hor deinterleaving", st_hor_deinterleaving); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Left padding", st_leftpd); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "EOL offset (bytes)", st_eoloff); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n", - "VMEM start address", st_vmstartaddr); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n", - "VMEM end address", st_vmendaddr); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n", - "VMEM increment", st_vmincr); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "YUV 420 format", st_yuv420); - ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n", - "Vsync", st_vsync_active_low); - ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n", - "Hsync", st_hsync_active_low); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Allow FIFO overflow", st_allow_fifo_overflow); -/* Flag that tells whether the IF gives backpressure on frames */ -/* - * FYI, this is only on the frame request (indicate), when the IF has - * synch'd on a frame it will always give back pressure - */ - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Block when no request", st_block_fifo_when_no_req); - -#if defined(HAS_INPUT_FORMATTER_VERSION_2) - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "IF_BLOCKED_FIFO_NO_REQ_ADDRESS", - input_formatter_reg_load(INPUT_FORMATTER0_ID, - HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS) - ); - - ia_css_debug_dtrace(2, "\t%-32s:\n", "InputSwitch State"); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg0", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg0)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg1", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg1)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg2", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg2)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg3", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg3)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg4", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg4)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg5", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg5)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg6", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg6)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_lut_reg7", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_lut_reg7)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_input_switch_fsync_lut", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_input_switch_fsync_lut)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_srst", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_srst)); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "_REG_GP_IFMT_slv_reg_srst", - gp_device_reg_load(GP_DEVICE0_ID, - _REG_GP_IFMT_slv_reg_srst)); -#endif - - ia_css_debug_dtrace(2, "\tFSM Status:\n"); - - val = state->fsm_sync_status; - - if (val > 7) - fsm_sync_status_str = "ERROR"; - - switch (val & 0x7) { - case 0: - fsm_sync_status_str = "idle"; - break; - case 1: - fsm_sync_status_str = "request frame"; - break; - case 2: - fsm_sync_status_str = "request lines"; - break; - case 3: - fsm_sync_status_str = "request vectors"; - break; - case 4: - fsm_sync_status_str = "send acknowledge"; - break; - default: - fsm_sync_status_str = "unknown"; - break; - } - - ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", - "FSM Synchronization Status", val, - fsm_sync_status_str); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM Synchronization Counter", - state->fsm_sync_counter); - - val = state->fsm_crop_status; - - if (val > 7) - fsm_crop_status_str = "ERROR"; - - switch (val & 0x7) { - case 0: - fsm_crop_status_str = "idle"; - break; - case 1: - fsm_crop_status_str = "wait line"; - break; - case 2: - fsm_crop_status_str = "crop line"; - break; - case 3: - fsm_crop_status_str = "crop pixel"; - break; - case 4: - fsm_crop_status_str = "pass pixel"; - break; - case 5: - fsm_crop_status_str = "pass line"; - break; - case 6: - fsm_crop_status_str = "lost line"; - break; - default: - fsm_crop_status_str = "unknown"; - break; - } - ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", - "FSM Crop Status", val, fsm_crop_status_str); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM Crop Line Counter", - state->fsm_crop_line_counter); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM Crop Pixel Counter", - state->fsm_crop_pixel_counter); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM Deinterleaving idx buffer", - state->fsm_deinterleaving_index); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM H decimation counter", - state->fsm_dec_h_counter); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM V decimation counter", - state->fsm_dec_v_counter); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM block V decimation counter", - state->fsm_dec_block_v_counter); - - val = state->fsm_padding_status; - - if (val > 7) - fsm_padding_status_str = "ERROR"; - - switch (val & 0x7) { - case 0: - fsm_padding_status_str = "idle"; - break; - case 1: - fsm_padding_status_str = "left pad"; - break; - case 2: - fsm_padding_status_str = "write"; - break; - case 3: - fsm_padding_status_str = "right pad"; - break; - case 4: - fsm_padding_status_str = "send end of line"; - break; - default: - fsm_padding_status_str = "unknown"; - break; - } - - ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Padding Status", - val, fsm_padding_status_str); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM Padding element idx counter", - state->fsm_padding_elem_counter); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support error", - state->fsm_vector_support_error); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support buf full", - state->fsm_vector_buffer_full); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support", - state->vector_support); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost", - state->sensor_data_lost); - return; -} - -static void debug_print_if_bin_state(input_formatter_bin_state_t *state) -{ - ia_css_debug_dtrace(2, "Stream-to-memory state:\n"); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "reset", state->reset); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "input endianness", - state->input_endianness); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "output endianness", - state->output_endianness); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "bitswap", state->bitswap); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "block_synch", - state->block_synch); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "packet_synch", - state->packet_synch); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "readpostwrite_sync", - state->readpostwrite_synch); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "is_2ppc", state->is_2ppc); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "en_status_update", - state->en_status_update); -} - -void ia_css_debug_dump_if_state(void) -{ - input_formatter_state_t if_state; - input_formatter_bin_state_t if_bin_state; - - input_formatter_get_state(INPUT_FORMATTER0_ID, &if_state); - debug_print_if_state(&if_state, "Primary IF A"); - ia_css_debug_dump_pif_a_isp_fifo_state(); - - input_formatter_get_state(INPUT_FORMATTER1_ID, &if_state); - debug_print_if_state(&if_state, "Primary IF B"); - ia_css_debug_dump_pif_b_isp_fifo_state(); - - input_formatter_bin_get_state(INPUT_FORMATTER3_ID, &if_bin_state); - debug_print_if_bin_state(&if_bin_state); - ia_css_debug_dump_str2mem_sp_fifo_state(); -} -#endif - -void ia_css_debug_dump_dma_state(void) -{ - /* note: the var below is made static as it is quite large; - if it is not static it ends up on the stack which could - cause issues for drivers - */ - static dma_state_t state; - int i, ch_id; - - const char *fsm_cmd_st_lbl = "FSM Command flag state"; - const char *fsm_ctl_st_lbl = "FSM Control flag state"; - const char *fsm_ctl_state = NULL; - const char *fsm_ctl_flag = NULL; - const char *fsm_pack_st = NULL; - const char *fsm_read_st = NULL; - const char *fsm_write_st = NULL; - char last_cmd_str[64]; - - dma_get_state(DMA0_ID, &state); - /* Print header for DMA dump status */ - ia_css_debug_dtrace(2, "DMA dump status:\n"); - - /* Print FSM command flag state */ - if (state.fsm_command_idle) - ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "IDLE"); - if (state.fsm_command_run) - ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "RUN"); - if (state.fsm_command_stalling) - ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, - "STALL"); - if (state.fsm_command_error) - ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, - "ERROR"); - - /* Print last command along with the channel */ - ch_id = state.last_command_channel; - - switch (state.last_command) { - case DMA_COMMAND_READ: - snprintf(last_cmd_str, 64, - "Read 2D Block [Channel: %d]", ch_id); - break; - case DMA_COMMAND_WRITE: - snprintf(last_cmd_str, 64, - "Write 2D Block [Channel: %d]", ch_id); - break; - case DMA_COMMAND_SET_CHANNEL: - snprintf(last_cmd_str, 64, "Set Channel [Channel: %d]", ch_id); - break; - case DMA_COMMAND_SET_PARAM: - snprintf(last_cmd_str, 64, - "Set Param: %d [Channel: %d]", - state.last_command_param, ch_id); - break; - case DMA_COMMAND_READ_SPECIFIC: - snprintf(last_cmd_str, 64, - "Read Specific 2D Block [Channel: %d]", ch_id); - break; - case DMA_COMMAND_WRITE_SPECIFIC: - snprintf(last_cmd_str, 64, - "Write Specific 2D Block [Channel: %d]", ch_id); - break; - case DMA_COMMAND_INIT: - snprintf(last_cmd_str, 64, - "Init 2D Block on Device A [Channel: %d]", ch_id); - break; - case DMA_COMMAND_INIT_SPECIFIC: - snprintf(last_cmd_str, 64, - "Init Specific 2D Block [Channel: %d]", ch_id); - break; - case DMA_COMMAND_RST: - snprintf(last_cmd_str, 64, "DMA SW Reset"); - break; - case N_DMA_COMMANDS: - snprintf(last_cmd_str, 64, "UNKNOWN"); - break; - default: - snprintf(last_cmd_str, 64, - "unknown [Channel: %d]", ch_id); - break; - } - ia_css_debug_dtrace(2, "\t%-32s: (0x%X : %s)\n", - "last command received", state.last_command, - last_cmd_str); - - /* Print DMA registers */ - ia_css_debug_dtrace(2, "\t%-32s\n", - "DMA registers, connection group 0"); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Command", - state.current_command); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address A", - state.current_addr_a); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address B", - state.current_addr_b); - - if (state.fsm_ctrl_idle) - fsm_ctl_flag = "IDLE"; - else if (state.fsm_ctrl_run) - fsm_ctl_flag = "RUN"; - else if (state.fsm_ctrl_stalling) - fsm_ctl_flag = "STAL"; - else if (state.fsm_ctrl_error) - fsm_ctl_flag = "ERROR"; - else - fsm_ctl_flag = "UNKNOWN"; - - switch (state.fsm_ctrl_state) { - case DMA_CTRL_STATE_IDLE: - fsm_ctl_state = "Idle state"; - break; - case DMA_CTRL_STATE_REQ_RCV: - fsm_ctl_state = "Req Rcv state"; - break; - case DMA_CTRL_STATE_RCV: - fsm_ctl_state = "Rcv state"; - break; - case DMA_CTRL_STATE_RCV_REQ: - fsm_ctl_state = "Rcv Req state"; - break; - case DMA_CTRL_STATE_INIT: - fsm_ctl_state = "Init state"; - break; - case N_DMA_CTRL_STATES: - fsm_ctl_state = "Unknown"; - break; - } - - ia_css_debug_dtrace(2, "\t\t%-32s: %s -> %s\n", fsm_ctl_st_lbl, - fsm_ctl_flag, fsm_ctl_state); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source dev", - state.fsm_ctrl_source_dev); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source addr", - state.fsm_ctrl_source_addr); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source stride", - state.fsm_ctrl_source_stride); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source width", - state.fsm_ctrl_source_width); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source height", - state.fsm_ctrl_source_height); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source dev", - state.fsm_ctrl_pack_source_dev); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest dev", - state.fsm_ctrl_pack_dest_dev); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest addr", - state.fsm_ctrl_dest_addr); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest stride", - state.fsm_ctrl_dest_stride); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source width", - state.fsm_ctrl_pack_source_width); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest height", - state.fsm_ctrl_pack_dest_height); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest width", - state.fsm_ctrl_pack_dest_width); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source elems", - state.fsm_ctrl_pack_source_elems); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest elems", - state.fsm_ctrl_pack_dest_elems); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack extension", - state.fsm_ctrl_pack_extension); - - if (state.pack_idle) - fsm_pack_st = "IDLE"; - if (state.pack_run) - fsm_pack_st = "RUN"; - if (state.pack_stalling) - fsm_pack_st = "STALL"; - if (state.pack_error) - fsm_pack_st = "ERROR"; - - ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Pack flag state", - fsm_pack_st); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack cnt height", - state.pack_cnt_height); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack src cnt width", - state.pack_src_cnt_width); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack dest cnt width", - state.pack_dest_cnt_width); - - if (state.read_state == DMA_RW_STATE_IDLE) - fsm_read_st = "Idle state"; - if (state.read_state == DMA_RW_STATE_REQ) - fsm_read_st = "Req state"; - if (state.read_state == DMA_RW_STATE_NEXT_LINE) - fsm_read_st = "Next line"; - if (state.read_state == DMA_RW_STATE_UNLOCK_CHANNEL) - fsm_read_st = "Unlock channel"; - - ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Read state", - fsm_read_st); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt height", - state.read_cnt_height); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt width", - state.read_cnt_width); - - if (state.write_state == DMA_RW_STATE_IDLE) - fsm_write_st = "Idle state"; - if (state.write_state == DMA_RW_STATE_REQ) - fsm_write_st = "Req state"; - if (state.write_state == DMA_RW_STATE_NEXT_LINE) - fsm_write_st = "Next line"; - if (state.write_state == DMA_RW_STATE_UNLOCK_CHANNEL) - fsm_write_st = "Unlock channel"; - - ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Write state", - fsm_write_st); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write height", - state.write_height); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write width", - state.write_width); - - for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) { - dma_port_state_t *port = &(state.port_states[i]); - ia_css_debug_dtrace(2, "\tDMA device interface %d\n", i); - ia_css_debug_dtrace(2, "\t\tDMA internal side state\n"); - ia_css_debug_dtrace(2, - "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n", - port->req_cs, port->req_we_n, port->req_run, - port->req_ack); - ia_css_debug_dtrace(2, "\t\tMaster Output side state\n"); - ia_css_debug_dtrace(2, - "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n", - port->send_cs, port->send_we_n, - port->send_run, port->send_ack); - ia_css_debug_dtrace(2, "\t\tFifo state\n"); - if (port->fifo_state == DMA_FIFO_STATE_WILL_BE_FULL) - ia_css_debug_dtrace(2, "\t\t\tFiFo will be full\n"); - else if (port->fifo_state == DMA_FIFO_STATE_FULL) - ia_css_debug_dtrace(2, "\t\t\tFifo Full\n"); - else if (port->fifo_state == DMA_FIFO_STATE_EMPTY) - ia_css_debug_dtrace(2, "\t\t\tFifo Empty\n"); - else - ia_css_debug_dtrace(2, "\t\t\tFifo state unknown\n"); - - ia_css_debug_dtrace(2, "\t\tFifo counter %d\n\n", - port->fifo_counter); - } - - for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) { - dma_channel_state_t *ch = &(state.channel_states[i]); - ia_css_debug_dtrace(2, "\t%-32s: %d\n", "DMA channel register", - i); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Connection", - ch->connection); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Sign extend", - ch->sign_extend); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev A", - ch->stride_a); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev A", - ch->elems_a); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev A", - ch->cropping_a); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev A", - ch->width_a); - ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev B", - ch->stride_b); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev B", - ch->elems_b); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev B", - ch->cropping_b); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev B", - ch->width_b); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Height", ch->height); - } - ia_css_debug_dtrace(2, "\n"); - return; -} - -void ia_css_debug_dump_dma_sp_fifo_state(void) -{ - fifo_channel_state_t dma_to_sp, sp_to_dma; - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_DMA0_TO_SP0, &dma_to_sp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_SP0_TO_DMA0, &sp_to_dma); - debug_print_fifo_channel_state(&dma_to_sp, "DMA to SP"); - debug_print_fifo_channel_state(&sp_to_dma, "SP to DMA"); - return; -} - -void ia_css_debug_dump_dma_isp_fifo_state(void) -{ - fifo_channel_state_t dma_to_isp, isp_to_dma; - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_DMA0_TO_ISP0, &dma_to_isp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_ISP0_TO_DMA0, &isp_to_dma); - debug_print_fifo_channel_state(&dma_to_isp, "DMA to ISP"); - debug_print_fifo_channel_state(&isp_to_dma, "ISP to DMA"); - return; -} - -void ia_css_debug_dump_isp_sp_fifo_state(void) -{ - fifo_channel_state_t sp_to_isp, isp_to_sp; - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_SP0_TO_ISP0, &sp_to_isp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_ISP0_TO_SP0, &isp_to_sp); - debug_print_fifo_channel_state(&sp_to_isp, "SP to ISP"); - debug_print_fifo_channel_state(&isp_to_sp, "ISP to SP"); - return; -} - -void ia_css_debug_dump_isp_gdc_fifo_state(void) -{ - fifo_channel_state_t gdc_to_isp, isp_to_gdc; - - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_GDC0_TO_ISP0, &gdc_to_isp); - fifo_channel_get_state(FIFO_MONITOR0_ID, - FIFO_CHANNEL_ISP0_TO_GDC0, &isp_to_gdc); - debug_print_fifo_channel_state(&gdc_to_isp, "GDC to ISP"); - debug_print_fifo_channel_state(&isp_to_gdc, "ISP to GDC"); - return; -} - -void ia_css_debug_dump_all_fifo_state(void) -{ - int i; - fifo_monitor_state_t state; - fifo_monitor_get_state(FIFO_MONITOR0_ID, &state); - - for (i = 0; i < N_FIFO_CHANNEL; i++) - debug_print_fifo_channel_state(&(state.fifo_channels[i]), - "squepfstqkt"); - return; -} - -static void debug_binary_info_print(const struct ia_css_binary_xinfo *info) -{ - assert(info != NULL); - ia_css_debug_dtrace(2, "id = %d\n", info->sp.id); - ia_css_debug_dtrace(2, "mode = %d\n", info->sp.pipeline.mode); - ia_css_debug_dtrace(2, "max_input_width = %d\n", info->sp.input.max_width); - ia_css_debug_dtrace(2, "min_output_width = %d\n", - info->sp.output.min_width); - ia_css_debug_dtrace(2, "max_output_width = %d\n", - info->sp.output.max_width); - ia_css_debug_dtrace(2, "top_cropping = %d\n", info->sp.pipeline.top_cropping); - ia_css_debug_dtrace(2, "left_cropping = %d\n", info->sp.pipeline.left_cropping); - ia_css_debug_dtrace(2, "xmem_addr = %d\n", info->xmem_addr); - ia_css_debug_dtrace(2, "enable_vf_veceven = %d\n", - info->sp.enable.vf_veceven); - ia_css_debug_dtrace(2, "enable_dis = %d\n", info->sp.enable.dis); - ia_css_debug_dtrace(2, "enable_uds = %d\n", info->sp.enable.uds); - ia_css_debug_dtrace(2, "enable ds = %d\n", info->sp.enable.ds); - ia_css_debug_dtrace(2, "s3atbl_use_dmem = %d\n", info->sp.s3a.s3atbl_use_dmem); - return; -} - -void ia_css_debug_binary_print(const struct ia_css_binary *bi) -{ - unsigned int i; - debug_binary_info_print(bi->info); - ia_css_debug_dtrace(2, - "input: %dx%d, format = %d, padded width = %d\n", - bi->in_frame_info.res.width, - bi->in_frame_info.res.height, - bi->in_frame_info.format, - bi->in_frame_info.padded_width); - ia_css_debug_dtrace(2, - "internal :%dx%d, format = %d, padded width = %d\n", - bi->internal_frame_info.res.width, - bi->internal_frame_info.res.height, - bi->internal_frame_info.format, - bi->internal_frame_info.padded_width); - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (bi->out_frame_info[i].res.width != 0) { - ia_css_debug_dtrace(2, - "out%d: %dx%d, format = %d, padded width = %d\n", - i, - bi->out_frame_info[i].res.width, - bi->out_frame_info[i].res.height, - bi->out_frame_info[i].format, - bi->out_frame_info[i].padded_width); - } - } - ia_css_debug_dtrace(2, - "vf out: %dx%d, format = %d, padded width = %d\n", - bi->vf_frame_info.res.width, - bi->vf_frame_info.res.height, - bi->vf_frame_info.format, - bi->vf_frame_info.padded_width); - ia_css_debug_dtrace(2, "online = %d\n", bi->online); - ia_css_debug_dtrace(2, "input_buf_vectors = %d\n", - bi->input_buf_vectors); - ia_css_debug_dtrace(2, "deci_factor_log2 = %d\n", bi->deci_factor_log2); - ia_css_debug_dtrace(2, "vf_downscale_log2 = %d\n", - bi->vf_downscale_log2); - ia_css_debug_dtrace(2, "dis_deci_factor_log2 = %d\n", - bi->dis.deci_factor_log2); - ia_css_debug_dtrace(2, "dis hor coef num = %d\n", - bi->dis.coef.pad.width); - ia_css_debug_dtrace(2, "dis ver coef num = %d\n", - bi->dis.coef.pad.height); - ia_css_debug_dtrace(2, "dis hor proj num = %d\n", - bi->dis.proj.pad.height); - ia_css_debug_dtrace(2, "sctbl_width_per_color = %d\n", - bi->sctbl_width_per_color); - ia_css_debug_dtrace(2, "s3atbl_width = %d\n", bi->s3atbl_width); - ia_css_debug_dtrace(2, "s3atbl_height = %d\n", bi->s3atbl_height); - return; -} - -void ia_css_debug_frame_print(const struct ia_css_frame *frame, - const char *descr) -{ - char *data = NULL; - - assert(frame != NULL); - assert(descr != NULL); - - data = (char *)HOST_ADDRESS(frame->data); - ia_css_debug_dtrace(2, "frame %s (%p):\n", descr, frame); - ia_css_debug_dtrace(2, " resolution = %dx%d\n", - frame->info.res.width, frame->info.res.height); - ia_css_debug_dtrace(2, " padded width = %d\n", - frame->info.padded_width); - ia_css_debug_dtrace(2, " format = %d\n", frame->info.format); - ia_css_debug_dtrace(2, " is contiguous = %s\n", - frame->contiguous ? "yes" : "no"); - switch (frame->info.format) { - case IA_CSS_FRAME_FORMAT_NV12: - case IA_CSS_FRAME_FORMAT_NV16: - case IA_CSS_FRAME_FORMAT_NV21: - case IA_CSS_FRAME_FORMAT_NV61: - ia_css_debug_dtrace(2, " Y = %p\n", - data + frame->planes.nv.y.offset); - ia_css_debug_dtrace(2, " UV = %p\n", - data + frame->planes.nv.uv.offset); - break; - case IA_CSS_FRAME_FORMAT_YUYV: - case IA_CSS_FRAME_FORMAT_UYVY: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - case IA_CSS_FRAME_FORMAT_YUV_LINE: - ia_css_debug_dtrace(2, " YUYV = %p\n", - data + frame->planes.yuyv.offset); - break; - case IA_CSS_FRAME_FORMAT_YUV420: - case IA_CSS_FRAME_FORMAT_YUV422: - case IA_CSS_FRAME_FORMAT_YUV444: - case IA_CSS_FRAME_FORMAT_YV12: - case IA_CSS_FRAME_FORMAT_YV16: - case IA_CSS_FRAME_FORMAT_YUV420_16: - case IA_CSS_FRAME_FORMAT_YUV422_16: - ia_css_debug_dtrace(2, " Y = %p\n", - data + frame->planes.yuv.y.offset); - ia_css_debug_dtrace(2, " U = %p\n", - data + frame->planes.yuv.u.offset); - ia_css_debug_dtrace(2, " V = %p\n", - data + frame->planes.yuv.v.offset); - break; - case IA_CSS_FRAME_FORMAT_RAW_PACKED: - ia_css_debug_dtrace(2, " RAW PACKED = %p\n", - data + frame->planes.raw.offset); - break; - case IA_CSS_FRAME_FORMAT_RAW: - ia_css_debug_dtrace(2, " RAW = %p\n", - data + frame->planes.raw.offset); - break; - case IA_CSS_FRAME_FORMAT_RGBA888: - case IA_CSS_FRAME_FORMAT_RGB565: - ia_css_debug_dtrace(2, " RGB = %p\n", - data + frame->planes.rgb.offset); - break; - case IA_CSS_FRAME_FORMAT_QPLANE6: - ia_css_debug_dtrace(2, " R = %p\n", - data + frame->planes.plane6.r.offset); - ia_css_debug_dtrace(2, " RatB = %p\n", - data + frame->planes.plane6.r_at_b.offset); - ia_css_debug_dtrace(2, " Gr = %p\n", - data + frame->planes.plane6.gr.offset); - ia_css_debug_dtrace(2, " Gb = %p\n", - data + frame->planes.plane6.gb.offset); - ia_css_debug_dtrace(2, " B = %p\n", - data + frame->planes.plane6.b.offset); - ia_css_debug_dtrace(2, " BatR = %p\n", - data + frame->planes.plane6.b_at_r.offset); - break; - case IA_CSS_FRAME_FORMAT_BINARY_8: - ia_css_debug_dtrace(2, " Binary data = %p\n", - data + frame->planes.binary.data.offset); - break; - default: - ia_css_debug_dtrace(2, " unknown frame type\n"); - break; - } - return; -} - -#if SP_DEBUG != SP_DEBUG_NONE - -void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state - *state) -{ - -#endif - -#if SP_DEBUG == SP_DEBUG_DUMP - - assert(state != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "current SP software counter: %d\n", - state->debug[0]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty output buffer queue head: 0x%x\n", - state->debug[1]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty output buffer queue tail: 0x%x\n", - state->debug[2]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty s3a buffer queue head: 0x%x\n", - state->debug[3]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty s3a buffer queue tail: 0x%x\n", - state->debug[4]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "full output buffer queue head: 0x%x\n", - state->debug[5]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "full output buffer queue tail: 0x%x\n", - state->debug[6]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "full s3a buffer queue head: 0x%x\n", - state->debug[7]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "full s3a buffer queue tail: 0x%x\n", - state->debug[8]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue head: 0x%x\n", - state->debug[9]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue tail: 0x%x\n", - state->debug[10]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "num of stages of current pipeline: 0x%x\n", - state->debug[11]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 1: 0x%x\n", - state->debug[12]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 2: 0x%x\n", - state->debug[13]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "current stage out_vf buffer idx: 0x%x\n", - state->debug[14]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "current stage output buffer idx: 0x%x\n", - state->debug[15]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "current stage s3a buffer idx: 0x%x\n", - state->debug[16]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first char of current stage name: 0x%x\n", - state->debug[17]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current SP thread id: 0x%x\n", - state->debug[18]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty output buffer address 1: 0x%x\n", - state->debug[19]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty output buffer address 2: 0x%x\n", - state->debug[20]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty out_vf buffer address 1: 0x%x\n", - state->debug[21]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty out_vf buffer address 2: 0x%x\n", - state->debug[22]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty s3a_hi buffer address 1: 0x%x\n", - state->debug[23]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty s3a_hi buffer address 2: 0x%x\n", - state->debug[24]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty s3a_lo buffer address 1: 0x%x\n", - state->debug[25]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty s3a_lo buffer address 2: 0x%x\n", - state->debug[26]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty dis_hor buffer address 1: 0x%x\n", - state->debug[27]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty dis_hor buffer address 2: 0x%x\n", - state->debug[28]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty dis_ver buffer address 1: 0x%x\n", - state->debug[29]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty dis_ver buffer address 2: 0x%x\n", - state->debug[30]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "empty param buffer address: 0x%x\n", - state->debug[31]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect frame address: 0x%x\n", - state->debug[32]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect frame container address: 0x%x\n", - state->debug[33]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect frame container payload: 0x%x\n", - state->debug[34]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect s3a_hi address: 0x%x\n", - state->debug[35]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect s3a_hi container address: 0x%x\n", - state->debug[36]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect s3a_hi container payload: 0x%x\n", - state->debug[37]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect s3a_lo address: 0x%x\n", - state->debug[38]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect s3a_lo container address: 0x%x\n", - state->debug[39]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "first incorrect s3a_lo container payload: 0x%x\n", - state->debug[40]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "number of calling flash start function: 0x%x\n", - state->debug[41]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "number of calling flash close function: 0x%x\n", - state->debug[42]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of flashed frame: 0x%x\n", - state->debug[43]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "flash in use flag: 0x%x\n", - state->debug[44]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "number of update frame flashed flag: 0x%x\n", - state->debug[46]); - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "number of active threads: 0x%x\n", - state->debug[45]); - -#elif SP_DEBUG == SP_DEBUG_COPY - - /* Remember last_index because we only want to print new entries */ - static int last_index; - int sp_index = state->index; - int n; - - assert(state != NULL); - if (sp_index < last_index) { - /* SP has been reset */ - last_index = 0; - } - - if (last_index == 0) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "copy-trace init: sp_dbg_if_start_line=%d, " - "sp_dbg_if_start_column=%d, " - "sp_dbg_if_cropped_height=%d, " - "sp_debg_if_cropped_width=%d\n", - state->if_start_line, - state->if_start_column, - state->if_cropped_height, - state->if_cropped_width); - } - - if ((last_index + SH_CSS_SP_DBG_TRACE_DEPTH) < sp_index) { - /* last index can be multiple rounds behind */ - /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */ - last_index = sp_index - SH_CSS_SP_DBG_TRACE_DEPTH; - } - - for (n = last_index; n < sp_index; n++) { - int i = n % SH_CSS_SP_DBG_TRACE_DEPTH; - if (state->trace[i].frame != 0) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "copy-trace: frame=%d, line=%d, " - "pixel_distance=%d, " - "mipi_used_dword=%d, " - "sp_index=%d\n", - state->trace[i].frame, - state->trace[i].line, - state->trace[i].pixel_distance, - state->trace[i].mipi_used_dword, - state->trace[i].sp_index); - } - } - - last_index = sp_index; - -#elif SP_DEBUG == SP_DEBUG_TRACE - -/* - * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will - * me mapped on the file name string. - * - * Adjust this to your trace case! - */ - static char const * const id2filename[8] = { - "param_buffer.sp.c | tagger.sp.c | pipe_data.sp.c", - "isp_init.sp.c", - "sp_raw_copy.hive.c", - "dma_configure.sp.c", - "sp.hive.c", - "event_proxy_sp.hive.c", - "circular_buffer.sp.c", - "frame_buffer.sp.c" - }; - -#if 1 - /* Example SH_CSS_SP_DBG_NR_OF_TRACES==1 */ - /* Adjust this to your trace case */ - static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = { - "default" - }; -#else - /* Example SH_CSS_SP_DBG_NR_OF_TRACES==4 */ - /* Adjust this to your trace case */ - static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = { - "copy", "preview/video", "capture", "acceleration" - }; -#endif - - /* Remember host_index_last because we only want to print new entries */ - static int host_index_last[SH_CSS_SP_DBG_NR_OF_TRACES] = { 0 }; - int t, n; - - assert(state != NULL); - - for (t = 0; t < SH_CSS_SP_DBG_NR_OF_TRACES; t++) { - int sp_index_last = state->index_last[t]; - - if (sp_index_last < host_index_last[t]) { - /* SP has been reset */ - host_index_last[t] = 0; - } - - if ((host_index_last[t] + SH_CSS_SP_DBG_TRACE_DEPTH) < - sp_index_last) { - /* last index can be multiple rounds behind */ - /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */ - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "Warning: trace %s has gap of %d " - "traces\n", - trace_name[t], - (sp_index_last - - (host_index_last[t] + - SH_CSS_SP_DBG_TRACE_DEPTH))); - - host_index_last[t] = - sp_index_last - SH_CSS_SP_DBG_TRACE_DEPTH; - } - - for (n = host_index_last[t]; n < sp_index_last; n++) { - int i = n % SH_CSS_SP_DBG_TRACE_DEPTH; - int l = state->trace[t][i].location & - ((1 << SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS) - 1); - int fid = state->trace[t][i].location >> - SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS; - int ts = state->trace[t][i].time_stamp; - - if (ts) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "%05d trace=%s, file=%s:%d, " - "data=0x%08x\n", - ts, - trace_name[t], - id2filename[fid], l, - state->trace[t][i].data); - } - } - host_index_last[t] = sp_index_last; - } - -#elif SP_DEBUG == SP_DEBUG_MINIMAL - int i; - int base = 0; - int limit = SH_CSS_NUM_SP_DEBUG; - int step = 1; - - assert(state != NULL); - - for (i = base; i < limit; i += step) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "sp_dbg_trace[%d] = %d\n", - i, state->debug[i]); - } -#endif - -#if SP_DEBUG != SP_DEBUG_NONE - - return; -} -#endif - -#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER) -static void debug_print_rx_mipi_port_state(mipi_port_state_t *state) -{ - int i; - unsigned int bits, infos; - - assert(state != NULL); - - bits = state->irq_status; - infos = ia_css_isys_rx_translate_irq_infos(bits); - - ia_css_debug_dtrace(2, "\t\t%-32s: (irq reg = 0x%X)\n", - "receiver errors", bits); - - if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN) - ia_css_debug_dtrace(2, "\t\t\tbuffer overrun\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT) - ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission error\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC) - ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission sync error\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL) - ia_css_debug_dtrace(2, "\t\t\tcontrol error\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE) - ia_css_debug_dtrace(2, "\t\t\t2 or more ECC errors\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC) - ia_css_debug_dtrace(2, "\t\t\tCRC mismatch\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID) - ia_css_debug_dtrace(2, "\t\t\tunknown error\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC) - ia_css_debug_dtrace(2, "\t\t\tframe sync error\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA) - ia_css_debug_dtrace(2, "\t\t\tframe data error\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT) - ia_css_debug_dtrace(2, "\t\t\tdata timeout\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC) - ia_css_debug_dtrace(2, "\t\t\tunknown escape command entry\n"); - if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC) - ia_css_debug_dtrace(2, "\t\t\tline sync error\n"); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "device_ready", state->device_ready); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_status", state->irq_status); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_enable", state->irq_enable); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "timeout_count", state->timeout_count); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "init_count", state->init_count); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16_18", state->raw16_18); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "sync_count", state->sync_count); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "rx_count", state->rx_count); - - for (i = 0; i < MIPI_4LANE_CFG; i++) { - ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n", - "lane_sync_count[", i, "]", - state->lane_sync_count[i]); - } - - for (i = 0; i < MIPI_4LANE_CFG; i++) { - ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n", - "lane_rx_count[", i, "]", - state->lane_rx_count[i]); - } - - return; -} - -static void debug_print_rx_channel_state(rx_channel_state_t *state) -{ - int i; - - assert(state != NULL); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "compression_scheme0", state->comp_scheme0); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "compression_scheme1", state->comp_scheme1); - - for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) { - ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n", - "MIPI Predictor ", i, state->pred[i]); - } - - for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) { - ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n", - "MIPI Compressor ", i, state->comp[i]); - } - - return; -} - -static void debug_print_rx_state(receiver_state_t *state) -{ - int i; - - assert(state != NULL); - ia_css_debug_dtrace(2, "CSI Receiver State:\n"); - - ia_css_debug_dtrace(2, "\tConfiguration:\n"); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "fs_to_ls_delay", state->fs_to_ls_delay); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "ls_to_data_delay", state->ls_to_data_delay); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "data_to_le_delay", state->data_to_le_delay); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "le_to_fe_delay", state->le_to_fe_delay); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "fe_to_fs_delay", state->fe_to_fs_delay); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "le_to_fs_delay", state->le_to_fs_delay); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "is_two_ppc", state->is_two_ppc); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "backend_rst", state->backend_rst); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw18", state->raw18); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "force_raw8", state->force_raw8); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16", state->raw16); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_gsp_acc_ovl", state->be_gsp_acc_ovl); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_srst", state->be_srst); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_is_two_ppc", state->be_is_two_ppc); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_comp_format0", state->be_comp_format0); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_comp_format1", state->be_comp_format1); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_comp_format2", state->be_comp_format2); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_comp_format3", state->be_comp_format3); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_sel", state->be_sel); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_raw16_config", state->be_raw16_config); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_raw18_config", state->be_raw18_config); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_force_raw8", state->be_force_raw8); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_irq_status", state->be_irq_status); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "be_irq_clear", state->be_irq_clear); - - /* mipi port state */ - for (i = 0; i < N_MIPI_PORT_ID; i++) { - ia_css_debug_dtrace(2, "\tMIPI Port %d State:\n", i); - - debug_print_rx_mipi_port_state(&state->mipi_port_state[i]); - } - /* end of mipi port state */ - - /* rx channel state */ - for (i = 0; i < N_RX_CHANNEL_ID; i++) { - ia_css_debug_dtrace(2, "\tRX Channel %d State:\n", i); - - debug_print_rx_channel_state(&state->rx_channel_state[i]); - } - /* end of rx channel state */ - - return; -} -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) -void ia_css_debug_dump_rx_state(void) -{ -#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER) - receiver_state_t state; - - receiver_get_state(RX0_ID, &state); - debug_print_rx_state(&state); -#endif -} -#endif - -void ia_css_debug_dump_sp_sw_debug_info(void) -{ -#if SP_DEBUG != SP_DEBUG_NONE - struct sh_css_sp_debug_state state; - - sh_css_sp_get_debug_state(&state); - ia_css_debug_print_sp_debug_state(&state); -#endif - ia_css_bufq_dump_queue_info(); - ia_css_pipeline_dump_thread_map_info(); - return; -} - -#if defined(USE_INPUT_SYSTEM_VERSION_2) -static void debug_print_isys_capture_unit_state(capture_unit_state_t *state) -{ - assert(state != NULL); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Packet_Length", state->Packet_Length); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Received_Length", state->Received_Length); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Received_Short_Packets", - state->Received_Short_Packets); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Received_Long_Packets", - state->Received_Long_Packets); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Last_Command", state->Last_Command); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Next_Command", state->Next_Command); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Last_Acknowledge", state->Last_Acknowledge); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Next_Acknowledge", state->Next_Acknowledge); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM_State_Info", state->FSM_State_Info); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "StartMode", state->StartMode); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Start_Addr", state->Start_Addr); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Mem_Region_Size", state->Mem_Region_Size); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Num_Mem_Regions", state->Num_Mem_Regions); - return; -} - -static void debug_print_isys_acquisition_unit_state( - acquisition_unit_state_t *state) -{ - assert(state != NULL); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Received_Short_Packets", - state->Received_Short_Packets); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Received_Long_Packets", - state->Received_Long_Packets); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Last_Command", state->Last_Command); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Next_Command", state->Next_Command); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Last_Acknowledge", state->Last_Acknowledge); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Next_Acknowledge", state->Next_Acknowledge); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "FSM_State_Info", state->FSM_State_Info); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Int_Cntr_Info", state->Int_Cntr_Info); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Start_Addr", state->Start_Addr); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Mem_Region_Size", state->Mem_Region_Size); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "Num_Mem_Regions", state->Num_Mem_Regions); -} - -static void debug_print_isys_ctrl_unit_state(ctrl_unit_state_t *state) -{ - assert(state != NULL); - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_cmd", state->last_cmd); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_cmd", state->next_cmd); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_ack", state->last_ack); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_ack", state->next_ack); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "top_fsm_state", state->top_fsm_state); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captA_fsm_state", state->captA_fsm_state); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captB_fsm_state", state->captB_fsm_state); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captC_fsm_state", state->captC_fsm_state); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "acq_fsm_state", state->acq_fsm_state); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captA_start_addr", state->captA_start_addr); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captB_start_addr", state->captB_start_addr); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captC_start_addr", state->captC_start_addr); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captA_mem_region_size", - state->captA_mem_region_size); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captB_mem_region_size", - state->captB_mem_region_size); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captC_mem_region_size", - state->captC_mem_region_size); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captA_num_mem_regions", - state->captA_num_mem_regions); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captB_num_mem_regions", - state->captB_num_mem_regions); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "captC_num_mem_regions", - state->captC_num_mem_regions); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "acq_start_addr", state->acq_start_addr); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "acq_mem_region_size", state->acq_mem_region_size); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "acq_num_mem_regions", state->acq_num_mem_regions); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "capt_reserve_one_mem_region", - state->capt_reserve_one_mem_region); - - return; -} - -static void debug_print_isys_state(input_system_state_t *state) -{ - int i; - - assert(state != NULL); - ia_css_debug_dtrace(2, "InputSystem State:\n"); - - /* configuration */ - ia_css_debug_dtrace(2, "\tConfiguration:\n"); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_multiCastA_sel", state->str_multicastA_sel); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_multicastB_sel", state->str_multicastB_sel); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_multicastC_sel", state->str_multicastC_sel); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_mux_sel", state->str_mux_sel); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_mon_status", state->str_mon_status); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_mon_irq_cond", state->str_mon_irq_cond); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_mon_irq_en", state->str_mon_irq_en); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "isys_srst", state->isys_srst); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "isys_slv_reg_srst", state->isys_slv_reg_srst); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_deint_portA_cnt", state->str_deint_portA_cnt); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "str_deint_portB_cnd", state->str_deint_portB_cnt); - /* end of configuration */ - - /* capture unit state */ - for (i = 0; i < N_CAPTURE_UNIT_ID; i++) { - capture_unit_state_t *capture_unit_state; - - ia_css_debug_dtrace(2, "\tCaptureUnit %d State:\n", i); - - capture_unit_state = &state->capture_unit[i]; - debug_print_isys_capture_unit_state(capture_unit_state); - } - /* end of capture unit state */ - - /* acquisition unit state */ - for (i = 0; i < N_ACQUISITION_UNIT_ID; i++) { - acquisition_unit_state_t *acquisition_unit_state; - - ia_css_debug_dtrace(2, "\tAcquisitionUnit %d State:\n", i); - - acquisition_unit_state = &state->acquisition_unit[i]; - debug_print_isys_acquisition_unit_state(acquisition_unit_state); - } - /* end of acquisition unit state */ - - /* control unit state */ - for (i = 0; i < N_CTRL_UNIT_ID; i++) { - ia_css_debug_dtrace(2, "\tControlUnit %d State:\n", i); - - debug_print_isys_ctrl_unit_state(&state->ctrl_unit_state[i]); - } - /* end of control unit state */ -} - -void ia_css_debug_dump_isys_state(void) -{ - input_system_state_t state; - - input_system_get_state(INPUT_SYSTEM0_ID, &state); - debug_print_isys_state(&state); - - return; -} -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) -void ia_css_debug_dump_isys_state(void) -{ - /* Android compilation fails if made a local variable - stack size on android is limited to 2k and this structure - is around 3.5K, in place of static malloc can be done but - if this call is made too often it will lead to fragment memory - versus a fixed allocation */ - static input_system_state_t state; - - input_system_get_state(INPUT_SYSTEM0_ID, &state); - input_system_dump_state(INPUT_SYSTEM0_ID, &state); -} -#endif - -void ia_css_debug_dump_debug_info(const char *context) -{ - if (context == NULL) - context = "No Context provided"; - - ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context); -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - ia_css_debug_dump_rx_state(); -#endif -#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2) - ia_css_debug_dump_if_state(); -#endif - ia_css_debug_dump_isp_state(); - ia_css_debug_dump_isp_sp_fifo_state(); - ia_css_debug_dump_isp_gdc_fifo_state(); - ia_css_debug_dump_sp_state(); - ia_css_debug_dump_perf_counters(); - -#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG - sh_css_dump_thread_wait_info(); - sh_css_dump_pipe_stage_info(); - sh_css_dump_pipe_stripe_info(); -#endif - ia_css_debug_dump_dma_isp_fifo_state(); - ia_css_debug_dump_dma_sp_fifo_state(); - ia_css_debug_dump_dma_state(); -#if defined(USE_INPUT_SYSTEM_VERSION_2) - ia_css_debug_dump_isys_state(); - - { - irq_controller_state_t state; - irq_controller_get_state(IRQ2_ID, &state); - - ia_css_debug_dtrace(2, "\t%-32s:\n", - "Input System IRQ Controller State"); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_edge", state.irq_edge); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_mask", state.irq_mask); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_status", state.irq_status); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_enable", state.irq_enable); - - ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", - "irq_level_not_pulse", - state.irq_level_not_pulse); - } -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) - ia_css_debug_dump_isys_state(); -#endif -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - ia_css_debug_tagger_state(); -#endif - return; -} - -/* this function is for debug use, it can make SP go to sleep - state after each frame, then user can dump the stable SP dmem. - this function can be called after ia_css_start_sp() - and before sh_css_init_buffer_queues() -*/ -void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_sp_sleep_mode; - - fw = &sh_css_sp_fw; - HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode; - - (void)HIVE_ADDR_sp_sleep_mode; /* Suppres warnings in CRUN */ - - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(sp_sleep_mode), - (uint32_t) mode); -} - -void ia_css_debug_wake_up_sp(void) -{ - /*hrt_ctl_start(SP); */ - sp_ctrl_setbit(SP0_ID, SP_SC_REG, SP_START_BIT); -} - -#if !defined(IS_ISP_2500_SYSTEM) -#define FIND_DMEM_PARAMS_TYPE(stream, kernel, type) \ - (struct HRTCAT(HRTCAT(sh_css_isp_, type), _params) *) \ - findf_dmem_params(stream, offsetof(struct ia_css_memory_offsets, dmem.kernel)) - -#define FIND_DMEM_PARAMS(stream, kernel) FIND_DMEM_PARAMS_TYPE(stream, kernel, kernel) - -/* Find a stage that support the kernel and return the parameters for that kernel */ -static char * -findf_dmem_params(struct ia_css_stream *stream, short idx) -{ - int i; - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *pipe = stream->pipes[i]; - struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe); - struct ia_css_pipeline_stage *stage; - for (stage = pipeline->stages; stage; stage = stage->next) { - struct ia_css_binary *binary = stage->binary; - short *offsets = (short *)&binary->info->mem_offsets.offsets.param->dmem; - short dmem_offset = offsets[idx]; - const struct ia_css_host_data *isp_data = - ia_css_isp_param_get_mem_init(&binary->mem_params, - IA_CSS_PARAM_CLASS_PARAM, IA_CSS_ISP_DMEM0); - if (dmem_offset < 0) - continue; - return &isp_data->address[dmem_offset]; - } - } - return NULL; -} -#endif - -void ia_css_debug_dump_isp_params(struct ia_css_stream *stream, - unsigned int enable) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "ISP PARAMETERS:\n"); -#if defined(IS_ISP_2500_SYSTEM) - (void)enable; - (void)stream; -#else - - assert(stream != NULL); - if ((enable & IA_CSS_DEBUG_DUMP_FPN) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_fpn_dump(FIND_DMEM_PARAMS(stream, fpn), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_OB) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_ob_dump(FIND_DMEM_PARAMS(stream, ob), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_SC) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_sc_dump(FIND_DMEM_PARAMS(stream, sc), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_WB) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_wb_dump(FIND_DMEM_PARAMS(stream, wb), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_DP) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_dp_dump(FIND_DMEM_PARAMS(stream, dp), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_BNR) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_bnr_dump(FIND_DMEM_PARAMS(stream, bnr), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_S3A) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_s3a_dump(FIND_DMEM_PARAMS(stream, s3a), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_DE) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_de_dump(FIND_DMEM_PARAMS(stream, de), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_YNR) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_nr_dump(FIND_DMEM_PARAMS_TYPE(stream, nr, ynr), IA_CSS_DEBUG_VERBOSE); - ia_css_yee_dump(FIND_DMEM_PARAMS(stream, yee), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_CSC) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_csc_dump(FIND_DMEM_PARAMS(stream, csc), IA_CSS_DEBUG_VERBOSE); - ia_css_yuv2rgb_dump(FIND_DMEM_PARAMS_TYPE(stream, yuv2rgb, csc), IA_CSS_DEBUG_VERBOSE); - ia_css_rgb2yuv_dump(FIND_DMEM_PARAMS_TYPE(stream, rgb2yuv, csc), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_GC) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_gc_dump(FIND_DMEM_PARAMS(stream, gc), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_TNR) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_tnr_dump(FIND_DMEM_PARAMS(stream, tnr), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_ANR) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_anr_dump(FIND_DMEM_PARAMS(stream, anr), IA_CSS_DEBUG_VERBOSE); - } - if ((enable & IA_CSS_DEBUG_DUMP_CE) - || (enable & IA_CSS_DEBUG_DUMP_ALL)) { - ia_css_ce_dump(FIND_DMEM_PARAMS(stream, ce), IA_CSS_DEBUG_VERBOSE); - } -#endif -} - -void sh_css_dump_sp_raw_copy_linecount(bool reduced) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_raw_copy_line_count; - int32_t raw_copy_line_count; - static int32_t prev_raw_copy_line_count = -1; - - fw = &sh_css_sp_fw; - HIVE_ADDR_raw_copy_line_count = - fw->info.sp.raw_copy_line_count; - - (void)HIVE_ADDR_raw_copy_line_count; - - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(raw_copy_line_count), - &raw_copy_line_count, - sizeof(raw_copy_line_count)); - - /* only indicate if copy loop is active */ - if (reduced) - raw_copy_line_count = (raw_copy_line_count < 0)?raw_copy_line_count:1; - /* do the handling */ - if (prev_raw_copy_line_count != raw_copy_line_count) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "sh_css_dump_sp_raw_copy_linecount() " - "line_count=%d\n", - raw_copy_line_count); - prev_raw_copy_line_count = raw_copy_line_count; - } -} - -void ia_css_debug_dump_isp_binary(void) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_pipeline_sp_curr_binary_id; - uint32_t curr_binary_id; - static uint32_t prev_binary_id = 0xFFFFFFFF; - static uint32_t sample_count; - - fw = &sh_css_sp_fw; - HIVE_ADDR_pipeline_sp_curr_binary_id = fw->info.sp.curr_binary_id; - - (void)HIVE_ADDR_pipeline_sp_curr_binary_id; - - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(pipeline_sp_curr_binary_id), - &curr_binary_id, - sizeof(curr_binary_id)); - - /* do the handling */ - sample_count++; - if (prev_binary_id != curr_binary_id) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "sh_css_dump_isp_binary() " - "pipe_id=%d, binary_id=%d, sample_count=%d\n", - (curr_binary_id >> 16), - (curr_binary_id & 0x0ffff), - sample_count); - sample_count = 0; - prev_binary_id = curr_binary_id; - } -} - -void ia_css_debug_dump_perf_counters(void) -{ -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - const struct ia_css_fw_info *fw; - int i; - unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt; - int32_t ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID + 1]; /* 3 Capture Units and 1 Acquire Unit. */ - - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n"); - - fw = &sh_css_sp_fw; - HIVE_ADDR_ia_css_isys_sp_error_cnt = fw->info.sp.perf_counter_input_system_error; - - (void)HIVE_ADDR_ia_css_isys_sp_error_cnt; - - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(ia_css_isys_sp_error_cnt), - &ia_css_sp_input_system_error_cnt, - sizeof(ia_css_sp_input_system_error_cnt)); - - for (i = 0; i < N_MIPI_PORT_ID + 1; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n", - i, ia_css_sp_input_system_error_cnt[i]); - } -#endif -} - -/* - -void sh_css_init_ddr_debug_queue(void) -{ - hrt_vaddress ddr_debug_queue_addr = - mmgr_malloc(sizeof(debug_data_ddr_t)); - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_debug_buffer_ddr_address; - - fw = &sh_css_sp_fw; - HIVE_ADDR_debug_buffer_ddr_address = - fw->info.sp.debug_buffer_ddr_address; - - (void)HIVE_ADDR_debug_buffer_ddr_address; - - debug_buffer_ddr_init(ddr_debug_queue_addr); - - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(debug_buffer_ddr_address), - (uint32_t)(ddr_debug_queue_addr)); -} - -void sh_css_load_ddr_debug_queue(void) -{ - debug_synch_queue_ddr(); -} - -void ia_css_debug_dump_ddr_debug_queue(void) -{ - int i; - sh_css_load_ddr_debug_queue(); - for (i = 0; i < DEBUG_BUF_SIZE; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "ddr_debug_queue[%d] = 0x%x\n", - i, debug_data_ptr->buf[i]); - } -} -*/ - -/* - * @brief Initialize the debug mode. - * Refer to "ia_css_debug.h" for more details. - */ -bool ia_css_debug_mode_init(void) -{ - bool rc; - rc = sh_css_sp_init_dma_sw_reg(0); - return rc; -} - -/* - * @brief Disable the DMA channel. - * Refer to "ia_css_debug.h" for more details. - */ -bool -ia_css_debug_mode_disable_dma_channel(int dma_id, - int channel_id, int request_type) -{ - bool rc; - - rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, false); - - return rc; -} - -/* - * @brief Enable the DMA channel. - * Refer to "ia_css_debug.h" for more details. - */ -bool -ia_css_debug_mode_enable_dma_channel(int dma_id, - int channel_id, int request_type) -{ - bool rc; - - rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, true); - - return rc; -} - -static -void dtrace_dot(const char *fmt, ...) -{ - va_list ap; - - assert(fmt != NULL); - va_start(ap, fmt); - - ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_START); - ia_css_debug_vdtrace(IA_CSS_DEBUG_INFO, fmt, ap); - ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_END); - va_end(ap); -} -#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG -void sh_css_dump_thread_wait_info(void) -{ - const struct ia_css_fw_info *fw; - int i; - unsigned int HIVE_ADDR_sp_thread_wait; - int32_t sp_thread_wait[MAX_THREAD_NUM]; - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "SEM WAITS:\n"); - - fw = &sh_css_sp_fw; - HIVE_ADDR_sp_thread_wait = - fw->info.sp.debug_wait; - - (void)HIVE_ADDR_sp_thread_wait; - - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(sp_thread_wait), - &sp_thread_wait, - sizeof(sp_thread_wait)); - for (i = 0; i < MAX_THREAD_NUM; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "\twait[%d] = 0x%X\n", - i, sp_thread_wait[i]); - } - -} - -void sh_css_dump_pipe_stage_info(void) -{ - const struct ia_css_fw_info *fw; - int i; - unsigned int HIVE_ADDR_sp_pipe_stage; - int32_t sp_pipe_stage[MAX_THREAD_NUM]; - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STAGE:\n"); - - fw = &sh_css_sp_fw; - HIVE_ADDR_sp_pipe_stage = - fw->info.sp.debug_stage; - - (void)HIVE_ADDR_sp_pipe_stage; - - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(sp_pipe_stage), - &sp_pipe_stage, - sizeof(sp_pipe_stage)); - for (i = 0; i < MAX_THREAD_NUM; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "\tstage[%d] = %d\n", - i, sp_pipe_stage[i]); - } - -} - -void sh_css_dump_pipe_stripe_info(void) -{ - const struct ia_css_fw_info *fw; - int i; - unsigned int HIVE_ADDR_sp_pipe_stripe; - int32_t sp_pipe_stripe[MAX_THREAD_NUM]; - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STRIPE:\n"); - - fw = &sh_css_sp_fw; - HIVE_ADDR_sp_pipe_stripe = - fw->info.sp.debug_stripe; - - (void)HIVE_ADDR_sp_pipe_stripe; - - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(sp_pipe_stripe), - &sp_pipe_stripe, - sizeof(sp_pipe_stripe)); - for (i = 0; i < MAX_THREAD_NUM; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, - "\tstripe[%d] = %d\n", - i, sp_pipe_stripe[i]); - } - -} -#endif - -static void -ia_css_debug_pipe_graph_dump_frame( - struct ia_css_frame *frame, - enum ia_css_pipe_id id, - char const *blob_name, - char const *frame_name, - bool in_frame) -{ - char bufinfo[100]; - - if (frame->dynamic_queue_id == SH_CSS_INVALID_QUEUE_ID) { - snprintf(bufinfo, sizeof(bufinfo), "Internal"); - } else { - snprintf(bufinfo, sizeof(bufinfo), "Queue: %s %s", - pipe_id_to_str[id], - queue_id_to_str[frame->dynamic_queue_id]); - } - dtrace_dot( - "node [shape = box, " - "fixedsize=true, width=2, height=0.7]; \"%p\" " - "[label = \"%s\\n%d(%d) x %d, %dbpp\\n%s\"];", - frame, - debug_frame_format2str(frame->info.format), - frame->info.res.width, - frame->info.padded_width, - frame->info.res.height, - frame->info.raw_bit_depth, - bufinfo); - - if (in_frame) { - dtrace_dot( - "\"%p\"->\"%s(pipe%d)\" " - "[label = %s_frame];", - frame, - blob_name, id, frame_name); - } else { - dtrace_dot( - "\"%s(pipe%d)\"->\"%p\" " - "[label = %s_frame];", - blob_name, id, - frame, - frame_name); - } -} - -void -ia_css_debug_pipe_graph_dump_prologue(void) -{ - dtrace_dot("digraph sh_css_pipe_graph {"); - dtrace_dot("rankdir=LR;"); - - dtrace_dot("fontsize=9;"); - dtrace_dot("label = \"\\nEnable options: rp=reduced pipe, vfve=vf_veceven, " - "dvse=dvs_envelope, dvs6=dvs_6axis, bo=block_out, " - "fbds=fixed_bayer_ds, bf6=bayer_fir_6db, " - "rawb=raw_binning, cont=continuous, disc=dis_crop\\n" - "dp2a=dp_2adjacent, outp=output, outt=out_table, " - "reff=ref_frame, par=params, gam=gamma, " - "cagdc=ca_gdc, ispa=isp_addresses, inf=in_frame, " - "outf=out_frame, hs=high_speed, inpc=input_chunking\""); -} - -void ia_css_debug_pipe_graph_dump_epilogue(void) -{ - - if (strlen(ring_buffer) > 0) { - dtrace_dot(ring_buffer); - } - - - if (pg_inst.stream_format != N_ATOMISP_INPUT_FORMAT) { - /* An input stream format has been set so assume we have - * an input system and sensor - */ - - - dtrace_dot( - "node [shape = doublecircle, " - "fixedsize=true, width=2.5]; \"input_system\" " - "[label = \"Input system\"];"); - - dtrace_dot( - "\"input_system\"->\"%s\" " - "[label = \"%s\"];", - dot_id_input_bin, debug_stream_format2str(pg_inst.stream_format)); - - dtrace_dot( - "node [shape = doublecircle, " - "fixedsize=true, width=2.5]; \"sensor\" " - "[label = \"Sensor\"];"); - - dtrace_dot( - "\"sensor\"->\"input_system\" " - "[label = \"%s\\n%d x %d\\n(%d x %d)\"];", - debug_stream_format2str(pg_inst.stream_format), - pg_inst.width, pg_inst.height, - pg_inst.eff_width, pg_inst.eff_height); - } - - dtrace_dot("}"); - - /* Reset temp strings */ - memset(dot_id_input_bin, 0, sizeof(dot_id_input_bin)); - memset(ring_buffer, 0, sizeof(ring_buffer)); - - pg_inst.do_init = true; - pg_inst.width = 0; - pg_inst.height = 0; - pg_inst.eff_width = 0; - pg_inst.eff_height = 0; - pg_inst.stream_format = N_ATOMISP_INPUT_FORMAT; -} - -void -ia_css_debug_pipe_graph_dump_stage( - struct ia_css_pipeline_stage *stage, - enum ia_css_pipe_id id) -{ - char blob_name[SH_CSS_MAX_BINARY_NAME+10] = ""; - char const *bin_type = ""; - int i; - - assert(stage != NULL); - if (stage->sp_func != IA_CSS_PIPELINE_NO_FUNC) - return; - - if (pg_inst.do_init) { - ia_css_debug_pipe_graph_dump_prologue(); - pg_inst.do_init = false; - } - - if (stage->binary) { - bin_type = "binary"; - if (stage->binary->info->blob) - snprintf(blob_name, sizeof(blob_name), "%s_stage%d", - stage->binary->info->blob->name, stage->stage_num); - } else if (stage->firmware) { - bin_type = "firmware"; - strncpy_s(blob_name, sizeof(blob_name), IA_CSS_EXT_ISP_PROG_NAME(stage->firmware), sizeof(blob_name)); - } - - /* Guard in case of binaries that don't have any binary_info */ - if (stage->binary_info != NULL) { - char enable_info1[100]; - char enable_info2[100]; - char enable_info3[100]; - char enable_info[200]; - struct ia_css_binary_info *bi = stage->binary_info; - - /* Split it in 2 function-calls to keep the amount of - * parameters per call "reasonable" - */ - snprintf(enable_info1, sizeof(enable_info1), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", - bi->enable.reduced_pipe ? "rp," : "", - bi->enable.vf_veceven ? "vfve," : "", - bi->enable.dis ? "dis," : "", - bi->enable.dvs_envelope ? "dvse," : "", - bi->enable.uds ? "uds," : "", - bi->enable.dvs_6axis ? "dvs6," : "", - bi->enable.block_output ? "bo," : "", - bi->enable.ds ? "ds," : "", - bi->enable.bayer_fir_6db ? "bf6," : "", - bi->enable.raw_binning ? "rawb," : "", - bi->enable.continuous ? "cont," : "", - bi->enable.s3a ? "s3a," : "", - bi->enable.fpnr ? "fpnr," : "", - bi->enable.sc ? "sc," : "" - ); - - snprintf(enable_info2, sizeof(enable_info2), - "%s%s%s%s%s%s%s%s%s%s%s", - bi->enable.macc ? "macc," : "", - bi->enable.output ? "outp," : "", - bi->enable.ref_frame ? "reff," : "", - bi->enable.tnr ? "tnr," : "", - bi->enable.xnr ? "xnr," : "", - bi->enable.params ? "par," : "", - bi->enable.ca_gdc ? "cagdc," : "", - bi->enable.isp_addresses ? "ispa," : "", - bi->enable.in_frame ? "inf," : "", - bi->enable.out_frame ? "outf," : "", - bi->enable.high_speed ? "hs," : "" - ); - - /* And merge them into one string */ - snprintf(enable_info, sizeof(enable_info), "%s%s", - enable_info1, enable_info2); - { - int l, p; - char *ei = enable_info; - - l = strlen(ei); - - /* Replace last ',' with \0 if present */ - if (l && enable_info[l-1] == ',') - enable_info[--l] = '\0'; - - if (l > ENABLE_LINE_MAX_LENGTH) { - /* Too big for one line, find last comma */ - p = ENABLE_LINE_MAX_LENGTH; - while (ei[p] != ',') - p--; - /* Last comma found, copy till that comma */ - strncpy_s(enable_info1, - sizeof(enable_info1), - ei, p); - enable_info1[p] = '\0'; - - ei += p+1; - l = strlen(ei); - - if (l <= ENABLE_LINE_MAX_LENGTH) { - /* The 2nd line fits */ - /* we cannot use ei as argument because - * it is not guarenteed dword aligned - */ - strncpy_s(enable_info2, - sizeof(enable_info2), - ei, l); - enable_info2[l] = '\0'; - snprintf(enable_info, sizeof(enable_info), "%s\\n%s", - enable_info1, enable_info2); - - } else { - /* 2nd line is still too long */ - p = ENABLE_LINE_MAX_LENGTH; - while (ei[p] != ',') - p--; - strncpy_s(enable_info2, - sizeof(enable_info2), - ei, p); - enable_info2[p] = '\0'; - ei += p+1; - l = strlen(ei); - - if (l <= ENABLE_LINE_MAX_LENGTH) { - /* The 3rd line fits */ - /* we cannot use ei as argument because - * it is not guarenteed dword aligned - */ - strcpy_s(enable_info3, - sizeof(enable_info3), ei); - enable_info3[l] = '\0'; - snprintf(enable_info, sizeof(enable_info), - "%s\\n%s\\n%s", - enable_info1, enable_info2, - enable_info3); - } else { - /* 3rd line is still too long */ - p = ENABLE_LINE_MAX_LENGTH; - while (ei[p] != ',') - p--; - strncpy_s(enable_info3, - sizeof(enable_info3), - ei, p); - enable_info3[p] = '\0'; - ei += p+1; - strcpy_s(enable_info3, - sizeof(enable_info3), ei); - snprintf(enable_info, sizeof(enable_info), - "%s\\n%s\\n%s", - enable_info1, enable_info2, - enable_info3); - } - } - } - } - - dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, " - "label=\"%s\\n%s\\n\\n%s\"]; \"%s(pipe%d)\"", - bin_type, blob_name, enable_info, blob_name, id); - - } - else { - dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, " - "label=\"%s\\n%s\\n\"]; \"%s(pipe%d)\"", - bin_type, blob_name, blob_name, id); - } - - if (stage->stage_num == 0) { - /* - * There are some implicite assumptions about which bin is the - * input binary e.g. which one is connected to the input system - * Priority: - * 1) sp_raw_copy bin has highest priority - * 2) First stage==0 binary of preview, video or capture - */ - if (strlen(dot_id_input_bin) == 0) { - snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), - "%s(pipe%d)", blob_name, id); - } - } - - if (stage->args.in_frame) { - ia_css_debug_pipe_graph_dump_frame( - stage->args.in_frame, id, blob_name, - "in", true); - } - -#ifndef ISP2401 - for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) { -#else - for (i = 0; i < NUM_TNR_FRAMES; i++) { -#endif - if (stage->args.tnr_frames[i]) { - ia_css_debug_pipe_graph_dump_frame( - stage->args.tnr_frames[i], id, - blob_name, "tnr_frame", true); - } - } - - for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) { - if (stage->args.delay_frames[i]) { - ia_css_debug_pipe_graph_dump_frame( - stage->args.delay_frames[i], id, - blob_name, "delay_frame", true); - } - } - - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (stage->args.out_frame[i]) { - ia_css_debug_pipe_graph_dump_frame( - stage->args.out_frame[i], id, blob_name, - "out", false); - } - } - - if (stage->args.out_vf_frame) { - ia_css_debug_pipe_graph_dump_frame( - stage->args.out_vf_frame, id, blob_name, - "out_vf", false); - } -} - -void -ia_css_debug_pipe_graph_dump_sp_raw_copy( - struct ia_css_frame *out_frame) -{ - assert(out_frame != NULL); - if (pg_inst.do_init) { - ia_css_debug_pipe_graph_dump_prologue(); - pg_inst.do_init = false; - } - - dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, " - "label=\"%s\\n%s\"]; \"%s(pipe%d)\"", - "sp-binary", "sp_raw_copy", "sp_raw_copy", 1); - - snprintf(ring_buffer, sizeof(ring_buffer), - "node [shape = box, " - "fixedsize=true, width=2, height=0.7]; \"%p\" " - "[label = \"%s\\n%d(%d) x %d\\nRingbuffer\"];", - out_frame, - debug_frame_format2str(out_frame->info.format), - out_frame->info.res.width, - out_frame->info.padded_width, - out_frame->info.res.height); - - dtrace_dot(ring_buffer); - - dtrace_dot( - "\"%s(pipe%d)\"->\"%p\" " - "[label = out_frame];", - "sp_raw_copy", 1, out_frame); - - snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), "%s(pipe%d)", "sp_raw_copy", 1); -} - -void -ia_css_debug_pipe_graph_dump_stream_config( - const struct ia_css_stream_config *stream_config) -{ - pg_inst.width = stream_config->input_config.input_res.width; - pg_inst.height = stream_config->input_config.input_res.height; - pg_inst.eff_width = stream_config->input_config.effective_res.width; - pg_inst.eff_height = stream_config->input_config.effective_res.height; - pg_inst.stream_format = stream_config->input_config.format; -} - -void -ia_css_debug_dump_resolution( - const struct ia_css_resolution *res, - const char *label) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: =%d x =%d\n", - label, res->width, res->height); -} - -void -ia_css_debug_dump_frame_info( - const struct ia_css_frame_info *info, - const char *label) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", label); - ia_css_debug_dump_resolution(&info->res, "res"); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "padded_width: %d\n", - info->padded_width); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", info->format); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bit_depth: %d\n", - info->raw_bit_depth); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bayer_order: %d\n", - info->raw_bayer_order); -} - -void -ia_css_debug_dump_capture_config( - const struct ia_css_capture_config *config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_xnr: %d\n", - config->enable_xnr); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_raw_output: %d\n", - config->enable_raw_output); -} - -void -ia_css_debug_dump_pipe_extra_config( - const struct ia_css_pipe_extra_config *extra_config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__); - if (extra_config) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "enable_raw_binning: %d\n", - extra_config->enable_raw_binning); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_yuv_ds: %d\n", - extra_config->enable_yuv_ds); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "enable_high_speed: %d\n", - extra_config->enable_high_speed); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "enable_dvs_6axis: %d\n", - extra_config->enable_dvs_6axis); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "enable_reduced_pipe: %d\n", - extra_config->enable_reduced_pipe); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "enable_fractional_ds: %d\n", - extra_config->enable_fractional_ds); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "disable_vf_pp: %d\n", - extra_config->disable_vf_pp); - } -} - -void -ia_css_debug_dump_pipe_config( - const struct ia_css_pipe_config *config) -{ - unsigned int i; - - IA_CSS_ENTER_PRIVATE("config = %p", config); - if (!config) { - IA_CSS_ERROR("NULL input parameter"); - IA_CSS_LEAVE_PRIVATE(""); - return; - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "isp_pipe_version: %d\n", - config->isp_pipe_version); - ia_css_debug_dump_resolution(&config->bayer_ds_out_res, - "bayer_ds_out_res"); - ia_css_debug_dump_resolution(&config->capt_pp_in_res, - "capt_pp_in_res"); - ia_css_debug_dump_resolution(&config->vf_pp_in_res, "vf_pp_in_res"); -#ifdef ISP2401 - ia_css_debug_dump_resolution(&config->output_system_in_res, - "output_system_in_res"); -#endif - ia_css_debug_dump_resolution(&config->dvs_crop_out_res, - "dvs_crop_out_res"); - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - ia_css_debug_dump_frame_info(&config->output_info[i], "output_info"); - ia_css_debug_dump_frame_info(&config->vf_output_info[i], - "vf_output_info"); - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_extension: %p\n", - config->acc_extension); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_acc_stages: %d\n", - config->num_acc_stages); - ia_css_debug_dump_capture_config(&config->default_capture_config); - ia_css_debug_dump_resolution(&config->dvs_envelope, "dvs_envelope"); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "dvs_frame_delay: %d\n", - config->dvs_frame_delay); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_num_execs: %d\n", - config->acc_num_execs); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_dz: %d\n", - config->enable_dz); - IA_CSS_LEAVE_PRIVATE(""); -} - -void -ia_css_debug_dump_stream_config_source( - const struct ia_css_stream_config *config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); - switch (config->mode) { - case IA_CSS_INPUT_MODE_SENSOR: - case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.port\n"); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "port: %d\n", - config->source.port.port); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_lanes: %d\n", - config->source.port.num_lanes); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "timeout: %d\n", - config->source.port.timeout); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "compression: %d\n", - config->source.port.compression.type); - break; - case IA_CSS_INPUT_MODE_TPG: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.tpg\n"); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n", - config->source.tpg.id); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", - config->source.tpg.mode); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_mask: 0x%x\n", - config->source.tpg.x_mask); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_delta: %d\n", - config->source.tpg.x_delta); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_mask: 0x%x\n", - config->source.tpg.y_mask); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_delta: %d\n", - config->source.tpg.y_delta); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "xy_mask: 0x%x\n", - config->source.tpg.xy_mask); - break; - case IA_CSS_INPUT_MODE_PRBS: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.prbs\n"); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n", - config->source.prbs.id); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "h_blank: %d\n", - config->source.prbs.h_blank); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "v_blank: %d\n", - config->source.prbs.v_blank); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed: 0x%x\n", - config->source.prbs.seed); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed1: 0x%x\n", - config->source.prbs.seed1); - break; - default: - case IA_CSS_INPUT_MODE_FIFO: - case IA_CSS_INPUT_MODE_MEMORY: - break; - } -} - -void -ia_css_debug_dump_mipi_buffer_config( - const struct ia_css_mipi_buffer_config *config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "size_mem_words: %d\n", - config->size_mem_words); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "nof_mipi_buffers: %d\n", - config->nof_mipi_buffers); -} - -void -ia_css_debug_dump_metadata_config( - const struct ia_css_metadata_config *config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "data_type: %d\n", - config->data_type); - ia_css_debug_dump_resolution(&config->resolution, "resolution"); -} - -void -ia_css_debug_dump_stream_config( - const struct ia_css_stream_config *config, - int num_pipes) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_pipes: %d\n", num_pipes); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode); - ia_css_debug_dump_stream_config_source(config); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "channel_id: %d\n", - config->channel_id); - ia_css_debug_dump_resolution(&config->input_config.input_res, "input_res"); - ia_css_debug_dump_resolution(&config->input_config.effective_res, "effective_res"); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", - config->input_config.format); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "bayer_order: %d\n", - config->input_config.bayer_order); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sensor_binning_factor: %d\n", - config->sensor_binning_factor); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pixels_per_clock: %d\n", - config->pixels_per_clock); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "online: %d\n", - config->online); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "init_num_cont_raw_buf: %d\n", - config->init_num_cont_raw_buf); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "target_num_cont_raw_buf: %d\n", - config->target_num_cont_raw_buf); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pack_raw_pixels: %d\n", - config->pack_raw_pixels); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "continuous: %d\n", - config->continuous); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "flash_gpio_pin: %d\n", - config->flash_gpio_pin); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "left_padding: %d\n", - config->left_padding); - ia_css_debug_dump_mipi_buffer_config(&config->mipi_buffer_config); - ia_css_debug_dump_metadata_config(&config->metadata_config); -} - -/* - Trace support. - - This tracer is using a buffer to trace the flow of the FW and dump misc values (see below for details). - Currently, support is only for SKC. - To enable support for other platforms: - - Allocate a buffer for tracing in DMEM. The longer the better. - - Use the DBG_init routine in sp.hive.c to initiatilize the tracer with the address and size selected. - - Add trace points in the SP code wherever needed. - - Enable the dump below with the required address and required adjustments. - Dump is called at the end of ia_css_debug_dump_sp_state(). -*/ - -/* - dump_trace() : dump the trace points from DMEM2. - for every trace point, the following are printed: index, major:minor and the 16-bit attached value. - The routine looks for the first 0, and then prints from it cyclically. - Data forma in DMEM2: - first 4 DWORDS: header - DWORD 0: data description - byte 0: version - byte 1: number of threads (for future use) - byte 2+3: number ot TPs - DWORD 1: command byte + data (for future use) - byte 0: command - byte 1-3: command signature - DWORD 2-3: additional data (for future use) - Following data is 4-byte oriented: - byte 0: major - byte 1: minor - byte 2-3: data -*/ -#if TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP -#ifndef ISP2401 -static void debug_dump_one_trace(TRACE_CORE_ID proc_id) -#else -static void debug_dump_one_trace(enum TRACE_CORE_ID proc_id) -#endif -{ -#if defined(HAS_TRACER_V2) - uint32_t start_addr; - uint32_t start_addr_data; - uint32_t item_size; -#ifndef ISP2401 - uint32_t tmp; -#else - uint8_t tid_val; - enum TRACE_DUMP_FORMAT dump_format; -#endif - int i, j, max_trace_points, point_num, limit = -1; - /* using a static buffer here as the driver has issues allocating memory */ - static uint32_t trace_read_buf[TRACE_BUFF_SIZE] = {0}; -#ifdef ISP2401 - static struct trace_header_t header; - uint8_t *header_arr; -#endif - - /* read the header and parse it */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "~~~ Tracer "); - switch (proc_id) - { - case TRACE_SP0_ID: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP0"); - start_addr = TRACE_SP0_ADDR; - start_addr_data = TRACE_SP0_DATA_ADDR; - item_size = TRACE_SP0_ITEM_SIZE; - max_trace_points = TRACE_SP0_MAX_POINTS; - break; - case TRACE_SP1_ID: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP1"); - start_addr = TRACE_SP1_ADDR; - start_addr_data = TRACE_SP1_DATA_ADDR; - item_size = TRACE_SP1_ITEM_SIZE; - max_trace_points = TRACE_SP1_MAX_POINTS; - break; - case TRACE_ISP_ID: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ISP"); - start_addr = TRACE_ISP_ADDR; - start_addr_data = TRACE_ISP_DATA_ADDR; - item_size = TRACE_ISP_ITEM_SIZE; - max_trace_points = TRACE_ISP_MAX_POINTS; - break; - default: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\ttraces are not supported for this processor ID - exiting\n"); - return; - } -#ifndef ISP2401 - tmp = ia_css_device_load_uint32(start_addr); - point_num = (tmp >> 16) & 0xFFFF; -#endif - -#ifndef ISP2401 - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", tmp & 0xFF, point_num); - if ((tmp & 0xFF) != TRACER_VER) { -#else - /* Loading byte-by-byte as using the master routine had issues */ - header_arr = (uint8_t *)&header; - for (i = 0; i < (int)sizeof(struct trace_header_t); i++) - header_arr[i] = ia_css_device_load_uint8(start_addr + (i)); - - point_num = header.max_tracer_points; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", header.version, point_num); - if ((header.version & 0xFF) != TRACER_VER) { -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tUnknown version - exiting\n"); - return; - } - if (point_num > max_trace_points) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tToo many points - exiting\n"); - return; - } - /* copy the TPs and find the first 0 */ - for (i = 0; i < point_num; i++) { - trace_read_buf[i] = ia_css_device_load_uint32(start_addr_data + (i * item_size)); - if ((limit == (-1)) && (trace_read_buf[i] == 0)) - limit = i; - } -#ifdef ISP2401 - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Status:\n"); - for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\tT%d: %3d (%02x) %6d (%04x) %10d (%08x)\n", i, - header.thr_status_byte[i], header.thr_status_byte[i], - header.thr_status_word[i], header.thr_status_word[i], - header.thr_status_dword[i], header.thr_status_dword[i]); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Scratch:\n"); - for (i = 0; i < MAX_SCRATCH_DATA; i++) - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%10d (%08x) ", - header.scratch_debug[i], header.scratch_debug[i]); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\n"); - -#endif - /* two 0s in the beginning: empty buffer */ - if ((trace_read_buf[0] == 0) && (trace_read_buf[1] == 0)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tEmpty tracer - exiting\n"); - return; - } - /* no overrun: start from 0 */ - if ((limit == point_num-1) || /* first 0 is at the end - border case */ - (trace_read_buf[limit+1] == 0)) /* did not make a full cycle after the memset */ - limit = 0; - /* overrun: limit is the first non-zero after the first zero */ - else - limit++; - - /* print the TPs */ - for (i = 0; i < point_num; i++) { - j = (limit + i) % point_num; - if (trace_read_buf[j]) - { -#ifndef ISP2401 - TRACE_DUMP_FORMAT dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]); -#else - - tid_val = FIELD_TID_UNPACK(trace_read_buf[j]); - dump_format = TRACE_DUMP_FORMAT_POINT; - - /* - * When tid value is 111b, the data will be interpreted differently: - * tid val is ignored, major field contains 2 bits (msb) for format type - */ - if (tid_val == FIELD_TID_SEL_FORMAT_PAT) { - dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]); - } -#endif - switch (dump_format) - { - case TRACE_DUMP_FORMAT_POINT: - ia_css_debug_dtrace( -#ifndef ISP2401 - IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %d\n", - j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), -#else - IA_CSS_DEBUG_TRACE, "\t\t%d T%d %d:%d value - %x (%d)\n", - j, - tid_val, - FIELD_MAJOR_UNPACK(trace_read_buf[j]), -#endif - FIELD_MINOR_UNPACK(trace_read_buf[j]), -#ifdef ISP2401 - FIELD_VALUE_UNPACK(trace_read_buf[j]), -#endif - FIELD_VALUE_UNPACK(trace_read_buf[j])); - break; -#ifndef ISP2401 - case TRACE_DUMP_FORMAT_VALUE24_HEX: -#else - case TRACE_DUMP_FORMAT_POINT_NO_TID: -#endif - ia_css_debug_dtrace( -#ifndef ISP2401 - IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x H\n", -#else - IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %x (%d)\n", -#endif - j, -#ifndef ISP2401 - FIELD_MAJOR_UNPACK(trace_read_buf[j]), - FIELD_VALUE_24_UNPACK(trace_read_buf[j])); -#else - FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]), - FIELD_MINOR_UNPACK(trace_read_buf[j]), - FIELD_VALUE_UNPACK(trace_read_buf[j]), - FIELD_VALUE_UNPACK(trace_read_buf[j])); -#endif - break; -#ifndef ISP2401 - case TRACE_DUMP_FORMAT_VALUE24_DEC: -#else - case TRACE_DUMP_FORMAT_VALUE24: -#endif - ia_css_debug_dtrace( -#ifndef ISP2401 - IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %d D\n", -#else - IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x (%d)\n", -#endif - j, - FIELD_MAJOR_UNPACK(trace_read_buf[j]), -#ifdef ISP2401 - FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]), - FIELD_VALUE_24_UNPACK(trace_read_buf[j]), -#endif - FIELD_VALUE_24_UNPACK(trace_read_buf[j])); - break; -#ifdef ISP2401 - -#endif - case TRACE_DUMP_FORMAT_VALUE24_TIMING: - ia_css_debug_dtrace( - IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing %x\n", - j, -#ifndef ISP2401 - FIELD_MAJOR_UNPACK(trace_read_buf[j]), -#else - FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]), -#endif - FIELD_VALUE_24_UNPACK(trace_read_buf[j])); - break; - case TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA: - ia_css_debug_dtrace( - IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing delta %x\n", - j, -#ifndef ISP2401 - FIELD_MAJOR_UNPACK(trace_read_buf[j]), -#else - FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]), -#endif - FIELD_VALUE_24_UNPACK(trace_read_buf[j])); - break; - default: - ia_css_debug_dtrace( - IA_CSS_DEBUG_TRACE, - "no such trace dump format %d", -#ifndef ISP2401 - FIELD_FORMAT_UNPACK(trace_read_buf[j])); -#else - dump_format); -#endif - break; - } - } - } -#else - (void)proc_id; -#endif /* HAS_TRACER_V2 */ -} -#endif /* TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP */ - -void ia_css_debug_dump_trace(void) -{ -#if TRACE_ENABLE_SP0 - debug_dump_one_trace(TRACE_SP0_ID); -#endif -#if TRACE_ENABLE_SP1 - debug_dump_one_trace(TRACE_SP1_ID); -#endif -#if TRACE_ENABLE_ISP - debug_dump_one_trace(TRACE_ISP_ID); -#endif -} - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -/* Tagger state dump function. The tagger is only available when the CSS - * contains an input system (2400 or 2401). */ -void ia_css_debug_tagger_state(void) -{ - unsigned int i; - unsigned int HIVE_ADDR_tagger_frames; - ia_css_tagger_buf_sp_elem_t tbuf_frames[MAX_CB_ELEMS_FOR_TAGGER]; - - HIVE_ADDR_tagger_frames = sh_css_sp_fw.info.sp.tagger_frames_addr; - - /* This variable is not used in crun */ - (void)HIVE_ADDR_tagger_frames; - - /* 2400 and 2401 only have 1 SP, so the tagger lives on SP0 */ - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(tagger_frames), - tbuf_frames, - sizeof(tbuf_frames)); - - ia_css_debug_dtrace(2, "Tagger Info:\n"); - for (i = 0; i < MAX_CB_ELEMS_FOR_TAGGER; i++) { - ia_css_debug_dtrace(2, "\t tagger frame[%d]: exp_id=%d, marked=%d, locked=%d\n", - i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock); - } - -} -#endif /* defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -#ifdef ISP2401 -void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps) -{ - unsigned int pc; - unsigned int i; - hrt_data sc = sp_ctrl_load(id, SP_SC_REG); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Status reg: 0x%X\n", id, sc); - sc = sp_ctrl_load(id, SP_CTRL_SINK_REG); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Stall reg: 0x%X\n", id, sc); - for (i = 0; i < num_of_dumps; i++) { - pc = sp_ctrl_load(id, SP_PC_REG); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d PC: 0x%X\n", id, pc); - } -} -#endif - -#if defined(HRT_SCHED) || defined(SH_CSS_DEBUG_SPMEM_DUMP_SUPPORT) -#include "spmem_dump.c" -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h deleted file mode 100644 index ab1d9bed9fd8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_EVENT_H -#define _IA_CSS_EVENT_H - -#include -#include "sw_event_global.h" /*event macros.TODO : Change File Name..???*/ - -bool ia_css_event_encode( - uint8_t *in, - uint8_t nr, - uint32_t *out); - -void ia_css_event_decode( - uint32_t event, - uint8_t *payload); - -#endif /*_IA_CSS_EVENT_H*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c deleted file mode 100644 index 239c06730bf4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c +++ /dev/null @@ -1,126 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "sh_css_sp.h" - -#include "dma.h" /* N_DMA_CHANNEL_ID */ - -#include -#include "ia_css_binary.h" -#include "sh_css_hrt.h" -#include "sh_css_defs.h" -#include "sh_css_internal.h" -#include "ia_css_debug.h" -#include "ia_css_debug_internal.h" -#include "sh_css_legacy.h" - -#include "gdc_device.h" /* HRT_GDC_N */ - -/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */ - -#include "memory_access.h" - -#include "assert_support.h" -#include "platform_support.h" /* hrt_sleep() */ - -#include "ia_css_queue.h" /* host_sp_enqueue_XXX */ -#include "ia_css_event.h" /* ia_css_event_encode */ -/* - * @brief Encode the information into the software-event. - * Refer to "sw_event_public.h" for details. - */ -bool ia_css_event_encode( - uint8_t *in, - uint8_t nr, - uint32_t *out) -{ - bool ret; - uint32_t nr_of_bits; - uint32_t i; - assert(in != NULL); - assert(out != NULL); - OP___assert(nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT); - - /* initialize the output */ - *out = 0; - - /* get the number of bits per information */ - nr_of_bits = sizeof(uint32_t) * 8 / nr; - - /* compress the all inputs into a signle output */ - for (i = 0; i < nr; i++) { - *out <<= nr_of_bits; - *out |= in[i]; - } - - /* get the return value */ - ret = (nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT); - - return ret; -} - -void ia_css_event_decode( - uint32_t event, - uint8_t *payload) -{ - assert(payload[1] == 0); - assert(payload[2] == 0); - assert(payload[3] == 0); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_event_decode() enter:\n"); - - /* First decode according to the common case - * In case of a PORT_EOF event we overwrite with - * the specific values - * This is somewhat ugly but probably somewhat efficient - * (and it avoids some code duplication) - */ - payload[0] = event & 0xff; /*event_code */ - payload[1] = (event >> 8) & 0xff; - payload[2] = (event >> 16) & 0xff; - payload[3] = 0; - - switch (payload[0]) { - case SH_CSS_SP_EVENT_PORT_EOF: - payload[2] = 0; - payload[3] = (event >> 24) & 0xff; - break; - - case SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE: - case SH_CSS_SP_EVENT_TIMER: - case SH_CSS_SP_EVENT_FRAME_TAGGED: - case SH_CSS_SP_EVENT_FW_WARNING: - case SH_CSS_SP_EVENT_FW_ASSERT: - payload[3] = (event >> 24) & 0xff; - break; - default: - break; - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h deleted file mode 100644 index 67eb8fdb33c5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_EVENTQ_H -#define _IA_CSS_EVENTQ_H - -#include "ia_css_queue.h" /* queue APIs */ - -/** - * @brief HOST receives event from SP. - * - * @param[in] eventq_handle eventq_handle. - * @param[in] payload The event payload. - * @return 0 - Successfully dequeue. - * @return EINVAL - Invalid argument. - * @return ENODATA - Queue is empty. - */ -int ia_css_eventq_recv( - ia_css_queue_t *eventq_handle, - uint8_t *payload); - -/** - * @brief The Host sends the event to SP. - * The caller of this API will be blocked until the event - * is sent. - * - * @param[in] eventq_handle eventq_handle. - * @param[in] evt_id The event ID. - * @param[in] evt_payload_0 The event payload. - * @param[in] evt_payload_1 The event payload. - * @param[in] evt_payload_2 The event payload. - * @return 0 - Successfully enqueue. - * @return EINVAL - Invalid argument. - * @return ENOBUFS - Queue is full. - */ -int ia_css_eventq_send( - ia_css_queue_t *eventq_handle, - uint8_t evt_id, - uint8_t evt_payload_0, - uint8_t evt_payload_1, - uint8_t evt_payload_2); -#endif /* _IA_CSS_EVENTQ_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c deleted file mode 100644 index 913a4bf7a34f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_types.h" -#include "assert_support.h" -#include "ia_css_queue.h" /* sp2host_dequeue_irq_event() */ -#include "ia_css_eventq.h" -#include "ia_css_event.h" /* ia_css_event_encode() - ia_css_event_decode() - */ -#include "platform_support.h" /* hrt_sleep() */ - -int ia_css_eventq_recv( - ia_css_queue_t *eventq_handle, - uint8_t *payload) -{ - uint32_t sp_event; - int error; - - /* dequeue the IRQ event */ - error = ia_css_queue_dequeue(eventq_handle, &sp_event); - - /* check whether the IRQ event is available or not */ - if (!error) - ia_css_event_decode(sp_event, payload); - return error; -} - -/* - * @brief The Host sends the event to the SP. - * Refer to "sh_css_sp.h" for details. - */ -int ia_css_eventq_send( - ia_css_queue_t *eventq_handle, - uint8_t evt_id, - uint8_t evt_payload_0, - uint8_t evt_payload_1, - uint8_t evt_payload_2) -{ - uint8_t tmp[4]; - uint32_t sw_event; - int error = ENOSYS; - - /* - * Encode the queue type, the thread ID and - * the queue ID into the event. - */ - tmp[0] = evt_id; - tmp[1] = evt_payload_0; - tmp[2] = evt_payload_1; - tmp[3] = evt_payload_2; - ia_css_event_encode(tmp, 4, &sw_event); - - /* queue the software event (busy-waiting) */ - for ( ; ; ) { - error = ia_css_queue_enqueue(eventq_handle, sw_event); - if (ENOBUFS != error) { - /* We were able to successfully send the event - or had a real failure. return the status*/ - break; - } - /* Wait for the queue to be not full and try again*/ - hrt_sleep(); - } - return error; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h deleted file mode 100644 index 89ad8080ceb1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h +++ /dev/null @@ -1,180 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_FRAME_H__ -#define __IA_CSS_FRAME_H__ - -#ifdef ISP2401 -#include -#endif -#include -#include -#include "dma.h" - -/********************************************************************* -**** Frame INFO APIs -**********************************************************************/ -/* @brief Sets the given width and alignment to the frame info - * - * @param - * @param[in] info The info to which parameters would set - * @param[in] width The width to be set to info - * @param[in] aligned The aligned to be set to info - * @return - */ -void ia_css_frame_info_set_width(struct ia_css_frame_info *info, - unsigned int width, - unsigned int min_padded_width); - -/* @brief Sets the given format to the frame info - * - * @param - * @param[in] info The info to which parameters would set - * @param[in] format The format to be set to info - * @return - */ -void ia_css_frame_info_set_format(struct ia_css_frame_info *info, - enum ia_css_frame_format format); - -/* @brief Sets the frame info with the given parameters - * - * @param - * @param[in] info The info to which parameters would set - * @param[in] width The width to be set to info - * @param[in] height The height to be set to info - * @param[in] format The format to be set to info - * @param[in] aligned The aligned to be set to info - * @return - */ -void ia_css_frame_info_init(struct ia_css_frame_info *info, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int aligned); - -/* @brief Checks whether 2 frame infos has the same resolution - * - * @param - * @param[in] frame_a The first frame to be compared - * @param[in] frame_b The second frame to be compared - * @return Returns true if the frames are equal - */ -bool ia_css_frame_info_is_same_resolution( - const struct ia_css_frame_info *info_a, - const struct ia_css_frame_info *info_b); - -/* @brief Check the frame info is valid - * - * @param - * @param[in] info The frame attributes to be initialized - * @return The error code. - */ -enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info); - -/********************************************************************* -**** Frame APIs -**********************************************************************/ - -/* @brief Initialize the plane depending on the frame type - * - * @param - * @param[in] frame The frame attributes to be initialized - * @return The error code. - */ -enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame); - -/* @brief Free an array of frames - * - * @param - * @param[in] num_frames The number of frames to be freed in the array - * @param[in] **frames_array The array of frames to be removed - * @return - */ -void ia_css_frame_free_multiple(unsigned int num_frames, - struct ia_css_frame **frames_array); - -/* @brief Allocate a CSS frame structure of given size in bytes.. - * - * @param frame The allocated frame. - * @param[in] size_bytes The frame size in bytes. - * @param[in] contiguous Allocate memory physically contiguously or not. - * @return The error code. - * - * Allocate a frame using the given size in bytes. - * The frame structure is partially null initialized. - */ -enum ia_css_err ia_css_frame_allocate_with_buffer_size( - struct ia_css_frame **frame, - const unsigned int size_bytes, - const bool contiguous); - -/* @brief Check whether 2 frames are same type - * - * @param - * @param[in] frame_a The first frame to be compared - * @param[in] frame_b The second frame to be compared - * @return Returns true if the frames are equal - */ -bool ia_css_frame_is_same_type( - const struct ia_css_frame *frame_a, - const struct ia_css_frame *frame_b); - -/* @brief Configure a dma port from frame info - * - * @param - * @param[in] config The DAM port configuration - * @param[in] info The frame info - * @return - */ -void ia_css_dma_configure_from_info( - struct dma_port_config *config, - const struct ia_css_frame_info *info); - -#ifdef ISP2401 -/* @brief Finds the cropping resolution - * This function finds the maximum cropping resolution in an input image keeping - * the aspect ratio for the given output resolution.Calculates the coordinates - * for cropping from the center and returns the starting pixel location of the - * region in the input image. Also returns the dimension of the cropping - * resolution. - * - * @param - * @param[in] in_res Resolution of input image - * @param[in] out_res Resolution of output image - * @param[out] crop_res Crop resolution of input image - * @return Returns IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS on error - */ -enum ia_css_err -ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res, - const struct ia_css_resolution *out_res, - struct ia_css_resolution *crop_res); - -#endif -#endif /* __IA_CSS_FRAME_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h deleted file mode 100644 index a469e0afb2b5..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h +++ /dev/null @@ -1,132 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_FRAME_COMM_H__ -#define __IA_CSS_FRAME_COMM_H__ - -#include "type_support.h" -#include "platform_support.h" -#include "runtime/bufq/interface/ia_css_bufq_comm.h" -#include /* hrt_vaddress */ - -/* - * These structs are derived from structs defined in ia_css_types.h - * (just take out the "_sp" from the struct name to get the "original") - * All the fields that are not needed by the SP are removed. - */ -struct ia_css_frame_sp_plane { - unsigned int offset; /* offset in bytes to start of frame data */ - /* offset is wrt data in sh_css_sp_sp_frame */ -}; - -struct ia_css_frame_sp_binary_plane { - unsigned int size; - struct ia_css_frame_sp_plane data; -}; - -struct ia_css_frame_sp_yuv_planes { - struct ia_css_frame_sp_plane y; - struct ia_css_frame_sp_plane u; - struct ia_css_frame_sp_plane v; -}; - -struct ia_css_frame_sp_nv_planes { - struct ia_css_frame_sp_plane y; - struct ia_css_frame_sp_plane uv; -}; - -struct ia_css_frame_sp_rgb_planes { - struct ia_css_frame_sp_plane r; - struct ia_css_frame_sp_plane g; - struct ia_css_frame_sp_plane b; -}; - -struct ia_css_frame_sp_plane6 { - struct ia_css_frame_sp_plane r; - struct ia_css_frame_sp_plane r_at_b; - struct ia_css_frame_sp_plane gr; - struct ia_css_frame_sp_plane gb; - struct ia_css_frame_sp_plane b; - struct ia_css_frame_sp_plane b_at_r; -}; - -struct ia_css_sp_resolution { - uint16_t width; /* width of valid data in pixels */ - uint16_t height; /* Height of valid data in lines */ -}; - -/* - * Frame info struct. This describes the contents of an image frame buffer. - */ -struct ia_css_frame_sp_info { - struct ia_css_sp_resolution res; - uint16_t padded_width; /* stride of line in memory - (in pixels) */ - unsigned char format; /* format of the frame data */ - unsigned char raw_bit_depth; /* number of valid bits per pixel, - only valid for RAW bayer frames */ - unsigned char raw_bayer_order; /* bayer order, only valid - for RAW bayer frames */ - unsigned char padding[3]; /* Extend to 32 bit multiple */ -}; - -struct ia_css_buffer_sp { - union { - hrt_vaddress xmem_addr; - enum sh_css_queue_id queue_id; - } buf_src; - enum ia_css_buffer_type buf_type; -}; - -struct ia_css_frame_sp { - struct ia_css_frame_sp_info info; - struct ia_css_buffer_sp buf_attr; - union { - struct ia_css_frame_sp_plane raw; - struct ia_css_frame_sp_plane rgb; - struct ia_css_frame_sp_rgb_planes planar_rgb; - struct ia_css_frame_sp_plane yuyv; - struct ia_css_frame_sp_yuv_planes yuv; - struct ia_css_frame_sp_nv_planes nv; - struct ia_css_frame_sp_plane6 plane6; - struct ia_css_frame_sp_binary_plane binary; - } planes; -}; - -void ia_css_frame_info_to_frame_sp_info( - struct ia_css_frame_sp_info *sp_info, - const struct ia_css_frame_info *info); - -void ia_css_resolution_to_sp_resolution( - struct ia_css_sp_resolution *sp_info, - const struct ia_css_resolution *info); - -#endif /*__IA_CSS_FRAME_COMM_H__*/ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c deleted file mode 100644 index fd8e6fda5db4..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c +++ /dev/null @@ -1,1026 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "ia_css_frame.h" -#include -#include "assert_support.h" -#include "ia_css_debug.h" -#include "isp.h" -#include "sh_css_internal.h" -#include "memory_access.h" - - -#define NV12_TILEY_TILE_WIDTH 128 -#define NV12_TILEY_TILE_HEIGHT 32 - -/************************************************************************** -** Static functions declarations -**************************************************************************/ -static void frame_init_plane(struct ia_css_frame_plane *plane, - unsigned int width, - unsigned int stride, - unsigned int height, - unsigned int offset); - -static void frame_init_single_plane(struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bytes_per_pixel); - -static void frame_init_raw_single_plane( - struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bits_per_pixel); - -static void frame_init_mipi_plane(struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bytes_per_pixel); - -static void frame_init_nv_planes(struct ia_css_frame *frame, - unsigned int horizontal_decimation, - unsigned int vertical_decimation, - unsigned int bytes_per_element); - -static void frame_init_yuv_planes(struct ia_css_frame *frame, - unsigned int horizontal_decimation, - unsigned int vertical_decimation, - bool swap_uv, - unsigned int bytes_per_element); - -static void frame_init_rgb_planes(struct ia_css_frame *frame, - unsigned int bytes_per_element); - -static void frame_init_qplane6_planes(struct ia_css_frame *frame); - -static enum ia_css_err frame_allocate_buffer_data(struct ia_css_frame *frame); - -static enum ia_css_err frame_allocate_with_data(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth, - bool contiguous); - -static struct ia_css_frame *frame_create(unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth, - bool contiguous, - bool valid); - -static unsigned -ia_css_elems_bytes_from_info( - const struct ia_css_frame_info *info); - -/************************************************************************** -** CSS API functions, exposed by ia_css.h -**************************************************************************/ - -void ia_css_frame_zero(struct ia_css_frame *frame) -{ - assert(frame != NULL); - mmgr_clear(frame->data, frame->data_bytes); -} - -enum ia_css_err ia_css_frame_allocate_from_info(struct ia_css_frame **frame, - const struct ia_css_frame_info *info) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - if (frame == NULL || info == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_from_info() enter:\n"); - err = - ia_css_frame_allocate(frame, info->res.width, info->res.height, - info->format, info->padded_width, - info->raw_bit_depth); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_from_info() leave:\n"); - return err; -} - -enum ia_css_err ia_css_frame_allocate(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - if (frame == NULL || width == 0 || height == 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, -#ifndef ISP2401 - "ia_css_frame_allocate() enter: width=%d, height=%d, format=%d\n", - width, height, format); -#else - "ia_css_frame_allocate() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n", - width, height, format, padded_width, raw_bit_depth); -#endif - - err = frame_allocate_with_data(frame, width, height, format, - padded_width, raw_bit_depth, false); - -#ifndef ISP2401 - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate() leave: frame=%p\n", *frame); -#else - if ((*frame != NULL) && err == IA_CSS_SUCCESS) - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n", *frame, (*frame)->data); - else - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n", - (void *)-1, (unsigned int)-1); -#endif - - return err; -} - -enum ia_css_err ia_css_frame_map(struct ia_css_frame **frame, - const struct ia_css_frame_info *info, - const void __user *data, - uint16_t attribute, - void *context) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame *me; - assert(frame != NULL); - - /* Create the frame structure */ - err = ia_css_frame_create_from_info(&me, info); - - if (err != IA_CSS_SUCCESS) - return err; - - if (err == IA_CSS_SUCCESS) { - /* use mmgr_mmap to map */ - me->data = (ia_css_ptr) mmgr_mmap(data, - me->data_bytes, - attribute, context); - if (me->data == mmgr_NULL) - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (err != IA_CSS_SUCCESS) { - sh_css_free(me); -#ifndef ISP2401 - return err; -#else - me = NULL; -#endif - } - - *frame = me; - - return err; -} - -enum ia_css_err ia_css_frame_create_from_info(struct ia_css_frame **frame, - const struct ia_css_frame_info *info) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame *me; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_create_from_info() enter:\n"); - if (frame == NULL || info == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_create_from_info() leave:" - " invalid arguments\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - me = frame_create(info->res.width, - info->res.height, - info->format, - info->padded_width, - info->raw_bit_depth, - false, - false); - if (me == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_create_from_info() leave:" - " frame create failed\n"); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - err = ia_css_frame_init_planes(me); - -#ifndef ISP2401 - if (err == IA_CSS_SUCCESS) - *frame = me; - else -#else - if (err != IA_CSS_SUCCESS) { -#endif - sh_css_free(me); -#ifdef ISP2401 - me = NULL; - } - - *frame = me; -#endif - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_create_from_info() leave:\n"); - - return err; -} - -enum ia_css_err ia_css_frame_set_data(struct ia_css_frame *frame, - const ia_css_ptr mapped_data, - size_t data_bytes) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_set_data() enter:\n"); - if (frame == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_set_data() leave: NULL frame\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* If we are setting a valid data. - * Make sure that there is enough - * room for the expected frame format - */ - if ((mapped_data != mmgr_NULL) && (frame->data_bytes > data_bytes)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_set_data() leave: invalid arguments\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - frame->data = mapped_data; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_set_data() leave:\n"); - - return err; -} - -enum ia_css_err ia_css_frame_allocate_contiguous(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous() " -#ifndef ISP2401 - "enter: width=%d, height=%d, format=%d\n", - width, height, format); -#else - "enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n", - width, height, format, padded_width, raw_bit_depth); -#endif - - err = frame_allocate_with_data(frame, width, height, format, - padded_width, raw_bit_depth, true); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous() leave: frame=%p\n", - frame ? *frame : (void *)-1); - - return err; -} - -enum ia_css_err ia_css_frame_allocate_contiguous_from_info( - struct ia_css_frame **frame, - const struct ia_css_frame_info *info) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - assert(frame != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous_from_info() enter:\n"); - err = ia_css_frame_allocate_contiguous(frame, - info->res.width, - info->res.height, - info->format, - info->padded_width, - info->raw_bit_depth); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_allocate_contiguous_from_info() leave:\n"); - return err; -} - -void ia_css_frame_free(struct ia_css_frame *frame) -{ - IA_CSS_ENTER_PRIVATE("frame = %p", frame); - - if (frame != NULL) { - hmm_free(frame->data); - sh_css_free(frame); - } - - IA_CSS_LEAVE_PRIVATE("void"); -} - -/************************************************************************** -** Module public functions -**************************************************************************/ - -enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info) -{ - assert(info != NULL); - if (info->res.width == 0 || info->res.height == 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; - return IA_CSS_SUCCESS; -} - -enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame) -{ - assert(frame != NULL); - - switch (frame->info.format) { - case IA_CSS_FRAME_FORMAT_MIPI: - frame_init_mipi_plane(frame, &frame->planes.raw, - frame->info.res.height, - frame->info.padded_width, - frame->info.raw_bit_depth <= 8 ? 1 : 2); - break; - case IA_CSS_FRAME_FORMAT_RAW_PACKED: - frame_init_raw_single_plane(frame, &frame->planes.raw, - frame->info.res.height, - frame->info.padded_width, - frame->info.raw_bit_depth); - break; - case IA_CSS_FRAME_FORMAT_RAW: - frame_init_single_plane(frame, &frame->planes.raw, - frame->info.res.height, - frame->info.padded_width, - frame->info.raw_bit_depth <= 8 ? 1 : 2); - break; - case IA_CSS_FRAME_FORMAT_RGB565: - frame_init_single_plane(frame, &frame->planes.rgb, - frame->info.res.height, - frame->info.padded_width, 2); - break; - case IA_CSS_FRAME_FORMAT_RGBA888: - frame_init_single_plane(frame, &frame->planes.rgb, - frame->info.res.height, - frame->info.padded_width * 4, 1); - break; - case IA_CSS_FRAME_FORMAT_PLANAR_RGB888: - frame_init_rgb_planes(frame, 1); - break; - /* yuyv and uyvu have the same frame layout, only the data - * positioning differs. - */ - case IA_CSS_FRAME_FORMAT_YUYV: - case IA_CSS_FRAME_FORMAT_UYVY: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - frame_init_single_plane(frame, &frame->planes.yuyv, - frame->info.res.height, - frame->info.padded_width * 2, 1); - break; - case IA_CSS_FRAME_FORMAT_YUV_LINE: - /* Needs 3 extra lines to allow vf_pp prefetching */ - frame_init_single_plane(frame, &frame->planes.yuyv, - frame->info.res.height * 3 / 2 + 3, - frame->info.padded_width, 1); - break; - case IA_CSS_FRAME_FORMAT_NV11: - frame_init_nv_planes(frame, 4, 1, 1); - break; - /* nv12 and nv21 have the same frame layout, only the data - * positioning differs. - */ - case IA_CSS_FRAME_FORMAT_NV12: - case IA_CSS_FRAME_FORMAT_NV21: - case IA_CSS_FRAME_FORMAT_NV12_TILEY: - frame_init_nv_planes(frame, 2, 2, 1); - break; - case IA_CSS_FRAME_FORMAT_NV12_16: - frame_init_nv_planes(frame, 2, 2, 2); - break; - /* nv16 and nv61 have the same frame layout, only the data - * positioning differs. - */ - case IA_CSS_FRAME_FORMAT_NV16: - case IA_CSS_FRAME_FORMAT_NV61: - frame_init_nv_planes(frame, 2, 1, 1); - break; - case IA_CSS_FRAME_FORMAT_YUV420: - frame_init_yuv_planes(frame, 2, 2, false, 1); - break; - case IA_CSS_FRAME_FORMAT_YUV422: - frame_init_yuv_planes(frame, 2, 1, false, 1); - break; - case IA_CSS_FRAME_FORMAT_YUV444: - frame_init_yuv_planes(frame, 1, 1, false, 1); - break; - case IA_CSS_FRAME_FORMAT_YUV420_16: - frame_init_yuv_planes(frame, 2, 2, false, 2); - break; - case IA_CSS_FRAME_FORMAT_YUV422_16: - frame_init_yuv_planes(frame, 2, 1, false, 2); - break; - case IA_CSS_FRAME_FORMAT_YV12: - frame_init_yuv_planes(frame, 2, 2, true, 1); - break; - case IA_CSS_FRAME_FORMAT_YV16: - frame_init_yuv_planes(frame, 2, 1, true, 1); - break; - case IA_CSS_FRAME_FORMAT_QPLANE6: - frame_init_qplane6_planes(frame); - break; - case IA_CSS_FRAME_FORMAT_BINARY_8: - frame_init_single_plane(frame, &frame->planes.binary.data, - frame->info.res.height, - frame->info.padded_width, 1); - frame->planes.binary.size = 0; - break; - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - return IA_CSS_SUCCESS; -} - -void ia_css_frame_info_set_width(struct ia_css_frame_info *info, - unsigned int width, - unsigned int min_padded_width) -{ - unsigned int align; - - IA_CSS_ENTER_PRIVATE("info = %p,width = %d, minimum padded width = %d", - info, width, min_padded_width); - if (info == NULL) { - IA_CSS_ERROR("NULL input parameter"); - IA_CSS_LEAVE_PRIVATE(""); - return; - } - if (min_padded_width > width) - align = min_padded_width; - else - align = width; - - info->res.width = width; - /* frames with a U and V plane of 8 bits per pixel need to have - all planes aligned, this means double the alignment for the - Y plane if the horizontal decimation is 2. */ - if (info->format == IA_CSS_FRAME_FORMAT_YUV420 || - info->format == IA_CSS_FRAME_FORMAT_YV12 || - info->format == IA_CSS_FRAME_FORMAT_NV12 || - info->format == IA_CSS_FRAME_FORMAT_NV21 || - info->format == IA_CSS_FRAME_FORMAT_BINARY_8 || - info->format == IA_CSS_FRAME_FORMAT_YUV_LINE) - info->padded_width = - CEIL_MUL(align, 2 * HIVE_ISP_DDR_WORD_BYTES); - else if (info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY) - info->padded_width = CEIL_MUL(align, NV12_TILEY_TILE_WIDTH); - else if (info->format == IA_CSS_FRAME_FORMAT_RAW || - info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) - info->padded_width = CEIL_MUL(align, 2 * ISP_VEC_NELEMS); - else { - info->padded_width = CEIL_MUL(align, HIVE_ISP_DDR_WORD_BYTES); - } - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_frame_info_set_format(struct ia_css_frame_info *info, - enum ia_css_frame_format format) -{ - assert(info != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_info_set_format() enter:\n"); - info->format = format; -} - -void ia_css_frame_info_init(struct ia_css_frame_info *info, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int aligned) -{ - IA_CSS_ENTER_PRIVATE("info = %p, width = %d, height = %d, format = %d, aligned = %d", - info, width, height, format, aligned); - if (info == NULL) { - IA_CSS_ERROR("NULL input parameter"); - IA_CSS_LEAVE_PRIVATE(""); - return; - } - info->res.height = height; - info->format = format; - ia_css_frame_info_set_width(info, width, aligned); - IA_CSS_LEAVE_PRIVATE(""); -} - -void ia_css_frame_free_multiple(unsigned int num_frames, - struct ia_css_frame **frames_array) -{ - unsigned int i; - for (i = 0; i < num_frames; i++) { - if (frames_array[i]) { - ia_css_frame_free(frames_array[i]); - frames_array[i] = NULL; - } - } -} - -enum ia_css_err ia_css_frame_allocate_with_buffer_size( - struct ia_css_frame **frame, - const unsigned int buffer_size_bytes, - const bool contiguous) -{ - /* AM: Body coppied from frame_allocate_with_data(). */ - enum ia_css_err err; - struct ia_css_frame *me = frame_create(0, 0, - IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */ - 0, 0, contiguous, false); - - if (me == NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - /* Get the data size */ - me->data_bytes = buffer_size_bytes; - - err = frame_allocate_buffer_data(me); - - if (err != IA_CSS_SUCCESS) { - sh_css_free(me); -#ifndef ISP2401 - return err; -#else - me = NULL; -#endif - } - - *frame = me; - - return err; -} - -bool ia_css_frame_info_is_same_resolution( - const struct ia_css_frame_info *info_a, - const struct ia_css_frame_info *info_b) -{ - if (!info_a || !info_b) - return false; - return (info_a->res.width == info_b->res.width) && - (info_a->res.height == info_b->res.height); -} - -bool ia_css_frame_is_same_type(const struct ia_css_frame *frame_a, - const struct ia_css_frame *frame_b) -{ - bool is_equal = false; - const struct ia_css_frame_info *info_a = &frame_a->info, - *info_b = &frame_b->info; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_is_same_type() enter:\n"); - - if (!info_a || !info_b) - return false; - if (info_a->format != info_b->format) - return false; - if (info_a->padded_width != info_b->padded_width) - return false; - is_equal = ia_css_frame_info_is_same_resolution(info_a, info_b); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_frame_is_same_type() leave:\n"); - - return is_equal; -} - -void -ia_css_dma_configure_from_info( - struct dma_port_config *config, - const struct ia_css_frame_info *info) -{ - unsigned is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED; - unsigned bits_per_pixel = is_raw_packed ? info->raw_bit_depth : ia_css_elems_bytes_from_info(info)*8; - unsigned pix_per_ddrword = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel; - unsigned words_per_line = CEIL_DIV(info->padded_width, pix_per_ddrword); - unsigned elems_b = pix_per_ddrword; - - config->stride = HIVE_ISP_DDR_WORD_BYTES * words_per_line; - config->elems = (uint8_t)elems_b; - config->width = (uint16_t)info->res.width; - config->crop = 0; - assert(config->width <= info->padded_width); -} - -/************************************************************************** -** Static functions -**************************************************************************/ - -static void frame_init_plane(struct ia_css_frame_plane *plane, - unsigned int width, - unsigned int stride, - unsigned int height, - unsigned int offset) -{ - plane->height = height; - plane->width = width; - plane->stride = stride; - plane->offset = offset; -} - -static void frame_init_single_plane(struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bytes_per_pixel) -{ - unsigned int stride; - - stride = subpixels_per_line * bytes_per_pixel; - /* Frame height needs to be even number - needed by hw ISYS2401 - In case of odd number, round up to even. - Images won't be impacted by this round up, - only needed by jpeg/embedded data. - As long as buffer allocation and release are using data_bytes, - there won't be memory leak. */ - frame->data_bytes = stride * CEIL_MUL2(height, 2); - frame_init_plane(plane, subpixels_per_line, stride, height, 0); - return; -} - -static void frame_init_raw_single_plane( - struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bits_per_pixel) -{ - unsigned int stride; - assert(frame != NULL); - - stride = HIVE_ISP_DDR_WORD_BYTES * - CEIL_DIV(subpixels_per_line, - HIVE_ISP_DDR_WORD_BITS / bits_per_pixel); - frame->data_bytes = stride * height; - frame_init_plane(plane, subpixels_per_line, stride, height, 0); - return; -} - -static void frame_init_mipi_plane(struct ia_css_frame *frame, - struct ia_css_frame_plane *plane, - unsigned int height, - unsigned int subpixels_per_line, - unsigned int bytes_per_pixel) -{ - unsigned int stride; - - stride = subpixels_per_line * bytes_per_pixel; - frame->data_bytes = 8388608; /* 8*1024*1024 */ - frame->valid = false; - frame->contiguous = true; - frame_init_plane(plane, subpixels_per_line, stride, height, 0); - return; -} - -static void frame_init_nv_planes(struct ia_css_frame *frame, - unsigned int horizontal_decimation, - unsigned int vertical_decimation, - unsigned int bytes_per_element) -{ - unsigned int y_width = frame->info.padded_width; - unsigned int y_height = frame->info.res.height; - unsigned int uv_width; - unsigned int uv_height; - unsigned int y_bytes; - unsigned int uv_bytes; - unsigned int y_stride; - unsigned int uv_stride; - - assert(horizontal_decimation != 0 && vertical_decimation != 0); - - uv_width = 2 * (y_width / horizontal_decimation); - uv_height = y_height / vertical_decimation; - - if (IA_CSS_FRAME_FORMAT_NV12_TILEY == frame->info.format) { - y_width = CEIL_MUL(y_width, NV12_TILEY_TILE_WIDTH); - uv_width = CEIL_MUL(uv_width, NV12_TILEY_TILE_WIDTH); - y_height = CEIL_MUL(y_height, NV12_TILEY_TILE_HEIGHT); - uv_height = CEIL_MUL(uv_height, NV12_TILEY_TILE_HEIGHT); - } - - y_stride = y_width * bytes_per_element; - uv_stride = uv_width * bytes_per_element; - y_bytes = y_stride * y_height; - uv_bytes = uv_stride * uv_height; - - frame->data_bytes = y_bytes + uv_bytes; - frame_init_plane(&frame->planes.nv.y, y_width, y_stride, y_height, 0); - frame_init_plane(&frame->planes.nv.uv, uv_width, - uv_stride, uv_height, y_bytes); - return; -} - -static void frame_init_yuv_planes(struct ia_css_frame *frame, - unsigned int horizontal_decimation, - unsigned int vertical_decimation, - bool swap_uv, - unsigned int bytes_per_element) -{ - unsigned int y_width = frame->info.padded_width, - y_height = frame->info.res.height, - uv_width = y_width / horizontal_decimation, - uv_height = y_height / vertical_decimation, - y_stride, y_bytes, uv_bytes, uv_stride; - - y_stride = y_width * bytes_per_element; - uv_stride = uv_width * bytes_per_element; - y_bytes = y_stride * y_height; - uv_bytes = uv_stride * uv_height; - - frame->data_bytes = y_bytes + 2 * uv_bytes; - frame_init_plane(&frame->planes.yuv.y, y_width, y_stride, y_height, 0); - if (swap_uv) { - frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride, - uv_height, y_bytes); - frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride, - uv_height, y_bytes + uv_bytes); - } else { - frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride, - uv_height, y_bytes); - frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride, - uv_height, y_bytes + uv_bytes); - } - return; -} - -static void frame_init_rgb_planes(struct ia_css_frame *frame, - unsigned int bytes_per_element) -{ - unsigned int width = frame->info.res.width, - height = frame->info.res.height, stride, bytes; - - stride = width * bytes_per_element; - bytes = stride * height; - frame->data_bytes = 3 * bytes; - frame_init_plane(&frame->planes.planar_rgb.r, width, stride, height, 0); - frame_init_plane(&frame->planes.planar_rgb.g, - width, stride, height, 1 * bytes); - frame_init_plane(&frame->planes.planar_rgb.b, - width, stride, height, 2 * bytes); - return; -} - -static void frame_init_qplane6_planes(struct ia_css_frame *frame) -{ - unsigned int width = frame->info.padded_width / 2, - height = frame->info.res.height / 2, bytes, stride; - - stride = width * 2; - bytes = stride * height; - - frame->data_bytes = 6 * bytes; - frame_init_plane(&frame->planes.plane6.r, - width, stride, height, 0 * bytes); - frame_init_plane(&frame->planes.plane6.r_at_b, - width, stride, height, 1 * bytes); - frame_init_plane(&frame->planes.plane6.gr, - width, stride, height, 2 * bytes); - frame_init_plane(&frame->planes.plane6.gb, - width, stride, height, 3 * bytes); - frame_init_plane(&frame->planes.plane6.b, - width, stride, height, 4 * bytes); - frame_init_plane(&frame->planes.plane6.b_at_r, - width, stride, height, 5 * bytes); - return; -} - -static enum ia_css_err frame_allocate_buffer_data(struct ia_css_frame *frame) -{ -#ifdef ISP2401 - IA_CSS_ENTER_LEAVE_PRIVATE("frame->data_bytes=%d\n", frame->data_bytes); -#endif - frame->data = mmgr_alloc_attr(frame->data_bytes, - frame->contiguous ? - MMGR_ATTRIBUTE_CONTIGUOUS : - MMGR_ATTRIBUTE_DEFAULT); - - if (frame->data == mmgr_NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - return IA_CSS_SUCCESS; -} - -static enum ia_css_err frame_allocate_with_data(struct ia_css_frame **frame, - unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth, - bool contiguous) -{ - enum ia_css_err err; - struct ia_css_frame *me = frame_create(width, - height, - format, - padded_width, - raw_bit_depth, - contiguous, - true); - - if (me == NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - err = ia_css_frame_init_planes(me); - - if (err == IA_CSS_SUCCESS) - err = frame_allocate_buffer_data(me); - - if (err != IA_CSS_SUCCESS) { - sh_css_free(me); -#ifndef ISP2401 - return err; -#else - me = NULL; -#endif - } - - *frame = me; - - return err; -} - -static struct ia_css_frame *frame_create(unsigned int width, - unsigned int height, - enum ia_css_frame_format format, - unsigned int padded_width, - unsigned int raw_bit_depth, - bool contiguous, - bool valid) -{ - struct ia_css_frame *me = sh_css_malloc(sizeof(*me)); - - if (me == NULL) - return NULL; - - memset(me, 0, sizeof(*me)); - me->info.res.width = width; - me->info.res.height = height; - me->info.format = format; - me->info.padded_width = padded_width; - me->info.raw_bit_depth = raw_bit_depth; - me->contiguous = contiguous; - me->valid = valid; - me->data_bytes = 0; - me->data = mmgr_NULL; - /* To indicate it is not valid frame. */ - me->dynamic_queue_id = (int)SH_CSS_INVALID_QUEUE_ID; - me->buf_type = IA_CSS_BUFFER_TYPE_INVALID; - - return me; -} - -static unsigned -ia_css_elems_bytes_from_info(const struct ia_css_frame_info *info) -{ - if (info->format == IA_CSS_FRAME_FORMAT_RGB565) - return 2; /* bytes per pixel */ - if (info->format == IA_CSS_FRAME_FORMAT_YUV420_16) - return 2; /* bytes per pixel */ - if (info->format == IA_CSS_FRAME_FORMAT_YUV422_16) - return 2; /* bytes per pixel */ - /* Note: Essentially NV12_16 is a 2 bytes per pixel format, this return value is used - * to configure DMA for the output buffer, - * At least in SKC this data is overwriten by isp_output_init.sp.c except for elements(elems), - * which is configured from this return value, - * NV12_16 is implemented by a double buffer of 8 bit elements hence elems should be configured as 8 */ - if (info->format == IA_CSS_FRAME_FORMAT_NV12_16) - return 1; /* bytes per pixel */ - - if (info->format == IA_CSS_FRAME_FORMAT_RAW - || (info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)) { - if (info->raw_bit_depth) - return CEIL_DIV(info->raw_bit_depth,8); - else - return 2; /* bytes per pixel */ - } - if (info->format == IA_CSS_FRAME_FORMAT_PLANAR_RGB888) - return 3; /* bytes per pixel */ - if (info->format == IA_CSS_FRAME_FORMAT_RGBA888) - return 4; /* bytes per pixel */ - if (info->format == IA_CSS_FRAME_FORMAT_QPLANE6) - return 2; /* bytes per pixel */ - return 1; /* Default is 1 byte per pixel */ -} - -void ia_css_frame_info_to_frame_sp_info( - struct ia_css_frame_sp_info *to, - const struct ia_css_frame_info *from) -{ - ia_css_resolution_to_sp_resolution(&to->res, &from->res); - to->padded_width = (uint16_t)from->padded_width; - to->format = (uint8_t)from->format; - to->raw_bit_depth = (uint8_t)from->raw_bit_depth; - to->raw_bayer_order = from->raw_bayer_order; -} - -void ia_css_resolution_to_sp_resolution( - struct ia_css_sp_resolution *to, - const struct ia_css_resolution *from) -{ - to->width = (uint16_t)from->width; - to->height = (uint16_t)from->height; -} -#ifdef ISP2401 - -enum ia_css_err -ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res, - const struct ia_css_resolution *out_res, - struct ia_css_resolution *crop_res) -{ - uint32_t wd_even_ceil, ht_even_ceil; - uint32_t in_ratio, out_ratio; - - if ((in_res == NULL) || (out_res == NULL) || (crop_res == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - IA_CSS_ENTER_PRIVATE("in(%ux%u) -> out(%ux%u)", in_res->width, - in_res->height, out_res->width, out_res->height); - - if ((in_res->width == 0) - || (in_res->height == 0) - || (out_res->width == 0) - || (out_res->height == 0)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if ((out_res->width > in_res->width) || - (out_res->height > in_res->height)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* If aspect ratio (width/height) of out_res is higher than the aspect - * ratio of the in_res, then we crop vertically, otherwise we crop - * horizontally. - */ - in_ratio = in_res->width * out_res->height; - out_ratio = out_res->width * in_res->height; - - if (in_ratio == out_ratio) { - crop_res->width = in_res->width; - crop_res->height = in_res->height; - } else if (out_ratio > in_ratio) { - crop_res->width = in_res->width; - crop_res->height = ROUND_DIV(out_res->height * crop_res->width, - out_res->width); - } else { - crop_res->height = in_res->height; - crop_res->width = ROUND_DIV(out_res->width * crop_res->height, - out_res->height); - } - - /* Round new (cropped) width and height to an even number. - * binarydesc_calculate_bds_factor is such that we should consider as - * much of the input as possible. This is different only when we end up - * with an odd number in the last step. So, we take the next even number - * if it falls within the input, otherwise take the previous even no. - */ - wd_even_ceil = EVEN_CEIL(crop_res->width); - ht_even_ceil = EVEN_CEIL(crop_res->height); - if ((wd_even_ceil > in_res->width) || (ht_even_ceil > in_res->height)) { - crop_res->width = EVEN_FLOOR(crop_res->width); - crop_res->height = EVEN_FLOOR(crop_res->height); - } else { - crop_res->width = wd_even_ceil; - crop_res->height = ht_even_ceil; - } - - IA_CSS_LEAVE_PRIVATE("in(%ux%u) -> out(%ux%u)", crop_res->width, - crop_res->height, out_res->width, out_res->height); - return IA_CSS_SUCCESS; -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h deleted file mode 100644 index d02bff1bbf46..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_IFMTR_H__ -#define __IA_CSS_IFMTR_H__ - -#include -#include -#include - -extern bool ifmtr_set_if_blocking_mode_reset; - -unsigned int ia_css_ifmtr_lines_needed_for_bayer_order( - const struct ia_css_stream_config *config); - -unsigned int ia_css_ifmtr_columns_needed_for_bayer_order( - const struct ia_css_stream_config *config); - -enum ia_css_err ia_css_ifmtr_configure(struct ia_css_stream_config *config, - struct ia_css_binary *binary); - -#endif /* __IA_CSS_IFMTR_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c deleted file mode 100644 index 1bed027435fd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c +++ /dev/null @@ -1,569 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "system_global.h" -#include - -#ifdef USE_INPUT_SYSTEM_VERSION_2 - -#include "ia_css_ifmtr.h" -#include -#include "sh_css_internal.h" -#include "input_formatter.h" -#include "assert_support.h" -#include "sh_css_sp.h" -#include "isp/modes/interface/input_buf.isp.h" - -/************************************************************ - * Static functions declarations - ************************************************************/ -static enum ia_css_err ifmtr_start_column( - const struct ia_css_stream_config *config, - unsigned int bin_in, - unsigned int *start_column); - -static enum ia_css_err ifmtr_input_start_line( - const struct ia_css_stream_config *config, - unsigned int bin_in, - unsigned int *start_line); - -static void ifmtr_set_if_blocking_mode( - const input_formatter_cfg_t * const config_a, - const input_formatter_cfg_t * const config_b); - -/************************************************************ - * Public functions - ************************************************************/ - -/* ISP expects GRBG bayer order, we skip one line and/or one row - * to correct in case the input bayer order is different. - */ -unsigned int ia_css_ifmtr_lines_needed_for_bayer_order( - const struct ia_css_stream_config *config) -{ - assert(config != NULL); - if ((IA_CSS_BAYER_ORDER_BGGR == config->input_config.bayer_order) - || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order)) - return 1; - - return 0; -} - -unsigned int ia_css_ifmtr_columns_needed_for_bayer_order( - const struct ia_css_stream_config *config) -{ - assert(config != NULL); - if ((IA_CSS_BAYER_ORDER_RGGB == config->input_config.bayer_order) - || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order)) - return 1; - - return 0; -} - -enum ia_css_err ia_css_ifmtr_configure(struct ia_css_stream_config *config, - struct ia_css_binary *binary) -{ - unsigned int start_line, start_column = 0, - cropped_height, - cropped_width, - num_vectors, - buffer_height = 2, - buffer_width, - two_ppc, - vmem_increment = 0, - deinterleaving = 0, - deinterleaving_b = 0, - width_a = 0, - width_b = 0, - bits_per_pixel, - vectors_per_buffer, - vectors_per_line = 0, - buffers_per_line = 0, - buf_offset_a = 0, - buf_offset_b = 0, - line_width = 0, - width_b_factor = 1, start_column_b, - left_padding = 0; - input_formatter_cfg_t if_a_config, if_b_config; - enum atomisp_input_format input_format; - enum ia_css_err err = IA_CSS_SUCCESS; - uint8_t if_config_index; - - /* Determine which input formatter config set is targeted. */ - /* Index is equal to the CSI-2 port used. */ - enum mipi_port_id port; - - if (binary) { - cropped_height = binary->in_frame_info.res.height; - cropped_width = binary->in_frame_info.res.width; - /* This should correspond to the input buffer definition for - ISP binaries in input_buf.isp.h */ - if (binary->info->sp.enable.continuous && binary->info->sp.pipeline.mode != IA_CSS_BINARY_MODE_COPY) - buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS; - else - buffer_width = binary->info->sp.input.max_width; - input_format = binary->input_format; - } else { - /* sp raw copy pipe (IA_CSS_PIPE_MODE_COPY): binary is NULL */ - cropped_height = config->input_config.input_res.height; - cropped_width = config->input_config.input_res.width; - buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS; - input_format = config->input_config.format; - } - two_ppc = config->pixels_per_clock == 2; - if (config->mode == IA_CSS_INPUT_MODE_SENSOR - || config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { - port = config->source.port.port; - if_config_index = (uint8_t) (port - MIPI_PORT0_ID); - } else if (config->mode == IA_CSS_INPUT_MODE_MEMORY) { - if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED; - } else { - if_config_index = 0; - } - - assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS - || if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED); - - /* TODO: check to see if input is RAW and if current mode interprets - * RAW data in any particular bayer order. copy binary with output - * format other than raw should not result in dropping lines and/or - * columns. - */ - err = ifmtr_input_start_line(config, cropped_height, &start_line); - if (err != IA_CSS_SUCCESS) - return err; - err = ifmtr_start_column(config, cropped_width, &start_column); - if (err != IA_CSS_SUCCESS) - return err; - - if (config->left_padding == -1) - if (!binary) - /* sp raw copy pipe: set left_padding value */ - left_padding = 0; - else - left_padding = binary->left_padding; - else - left_padding = 2*ISP_VEC_NELEMS - config->left_padding; - - - if (left_padding) { - num_vectors = CEIL_DIV(cropped_width + left_padding, - ISP_VEC_NELEMS); - } else { - num_vectors = CEIL_DIV(cropped_width, ISP_VEC_NELEMS); - num_vectors *= buffer_height; - /* todo: in case of left padding, - num_vectors is vectors per line, - otherwise vectors per line * buffer_height. */ - } - - start_column_b = start_column; - - bits_per_pixel = input_formatter_get_alignment(INPUT_FORMATTER0_ID) - * 8 / ISP_VEC_NELEMS; - switch (input_format) { - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - if (two_ppc) { - vmem_increment = 1; - deinterleaving = 1; - deinterleaving_b = 1; - /* half lines */ - width_a = cropped_width * deinterleaving / 2; - width_b_factor = 2; - /* full lines */ - width_b = width_a * width_b_factor; - buffer_width *= deinterleaving * 2; - /* Patch from bayer to yuv */ - num_vectors *= deinterleaving; - buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; - vectors_per_line = num_vectors / buffer_height; - /* Even lines are half size */ - line_width = vectors_per_line * - input_formatter_get_alignment(INPUT_FORMATTER0_ID) / - 2; - start_column /= 2; - } else { - vmem_increment = 1; - deinterleaving = 3; - width_a = cropped_width * deinterleaving / 2; - buffer_width = buffer_width * deinterleaving / 2; - /* Patch from bayer to yuv */ - num_vectors = num_vectors / 2 * deinterleaving; - start_column = start_column * deinterleaving / 2; - } - break; - case ATOMISP_INPUT_FORMAT_YUV420_8: - case ATOMISP_INPUT_FORMAT_YUV420_10: - case ATOMISP_INPUT_FORMAT_YUV420_16: - if (two_ppc) { - vmem_increment = 1; - deinterleaving = 1; - width_a = width_b = cropped_width * deinterleaving / 2; - buffer_width *= deinterleaving * 2; - num_vectors *= deinterleaving; - buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; - vectors_per_line = num_vectors / buffer_height; - /* Even lines are half size */ - line_width = vectors_per_line * - input_formatter_get_alignment(INPUT_FORMATTER0_ID) / - 2; - start_column *= deinterleaving; - start_column /= 2; - start_column_b = start_column; - } else { - vmem_increment = 1; - deinterleaving = 1; - width_a = cropped_width * deinterleaving; - buffer_width *= deinterleaving * 2; - num_vectors *= deinterleaving; - start_column *= deinterleaving; - } - break; - case ATOMISP_INPUT_FORMAT_YUV422_8: - case ATOMISP_INPUT_FORMAT_YUV422_10: - case ATOMISP_INPUT_FORMAT_YUV422_16: - if (two_ppc) { - vmem_increment = 1; - deinterleaving = 1; - width_a = width_b = cropped_width * deinterleaving; - buffer_width *= deinterleaving * 2; - num_vectors *= deinterleaving; - start_column *= deinterleaving; - buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; - start_column_b = start_column; - } else { - vmem_increment = 1; - deinterleaving = 2; - width_a = cropped_width * deinterleaving; - buffer_width *= deinterleaving; - num_vectors *= deinterleaving; - start_column *= deinterleaving; - } - break; - case ATOMISP_INPUT_FORMAT_RGB_444: - case ATOMISP_INPUT_FORMAT_RGB_555: - case ATOMISP_INPUT_FORMAT_RGB_565: - case ATOMISP_INPUT_FORMAT_RGB_666: - case ATOMISP_INPUT_FORMAT_RGB_888: - num_vectors *= 2; - if (two_ppc) { - deinterleaving = 2; /* BR in if_a, G in if_b */ - deinterleaving_b = 1; /* BR in if_a, G in if_b */ - buffers_per_line = 4; - start_column_b = start_column; - start_column *= deinterleaving; - start_column_b *= deinterleaving_b; - } else { - deinterleaving = 3; /* BGR */ - buffers_per_line = 3; - start_column *= deinterleaving; - } - vmem_increment = 1; - width_a = cropped_width * deinterleaving; - width_b = cropped_width * deinterleaving_b; - buffer_width *= buffers_per_line; - /* Patch from bayer to rgb */ - num_vectors = num_vectors / 2 * deinterleaving; - buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; - break; - case ATOMISP_INPUT_FORMAT_RAW_6: - case ATOMISP_INPUT_FORMAT_RAW_7: - case ATOMISP_INPUT_FORMAT_RAW_8: - case ATOMISP_INPUT_FORMAT_RAW_10: - case ATOMISP_INPUT_FORMAT_RAW_12: - if (two_ppc) { - int crop_col = (start_column % 2) == 1; - vmem_increment = 2; - deinterleaving = 1; - width_a = width_b = cropped_width / 2; - - /* When two_ppc is enabled AND we need to crop one extra - * column, if_a crops by one extra and we swap the - * output offsets to interleave the bayer pattern in - * the correct order. - */ - buf_offset_a = crop_col ? 1 : 0; - buf_offset_b = crop_col ? 0 : 1; - start_column_b = start_column / 2; - start_column = start_column / 2 + crop_col; - } else { - vmem_increment = 1; - deinterleaving = 2; - if ((!binary) || (config->continuous && binary - && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)) { - /* !binary -> sp raw copy pipe, no deinterleaving */ - deinterleaving = 1; - } - width_a = cropped_width; - /* Must be multiple of deinterleaving */ - num_vectors = CEIL_MUL(num_vectors, deinterleaving); - } - buffer_height *= 2; - if ((!binary) || config->continuous) - /* !binary -> sp raw copy pipe */ - buffer_height *= 2; - vectors_per_line = CEIL_DIV(cropped_width, ISP_VEC_NELEMS); - vectors_per_line = CEIL_MUL(vectors_per_line, deinterleaving); - break; - case ATOMISP_INPUT_FORMAT_RAW_14: - case ATOMISP_INPUT_FORMAT_RAW_16: - if (two_ppc) { - num_vectors *= 2; - vmem_increment = 1; - deinterleaving = 2; - width_a = width_b = cropped_width; - /* B buffer is one line further */ - buf_offset_b = buffer_width / ISP_VEC_NELEMS; - bits_per_pixel *= 2; - } else { - vmem_increment = 1; - deinterleaving = 2; - width_a = cropped_width; - start_column /= deinterleaving; - } - buffer_height *= 2; - break; - case ATOMISP_INPUT_FORMAT_BINARY_8: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7: - case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8: - case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT: - case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT: - case ATOMISP_INPUT_FORMAT_EMBEDDED: - case ATOMISP_INPUT_FORMAT_USER_DEF1: - case ATOMISP_INPUT_FORMAT_USER_DEF2: - case ATOMISP_INPUT_FORMAT_USER_DEF3: - case ATOMISP_INPUT_FORMAT_USER_DEF4: - case ATOMISP_INPUT_FORMAT_USER_DEF5: - case ATOMISP_INPUT_FORMAT_USER_DEF6: - case ATOMISP_INPUT_FORMAT_USER_DEF7: - case ATOMISP_INPUT_FORMAT_USER_DEF8: - break; - } - if (width_a == 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if (two_ppc) - left_padding /= 2; - - /* Default values */ - if (left_padding) - vectors_per_line = num_vectors; - if (!vectors_per_line) { - vectors_per_line = CEIL_MUL(num_vectors / buffer_height, - deinterleaving); - line_width = 0; - } - if (!line_width) - line_width = vectors_per_line * - input_formatter_get_alignment(INPUT_FORMATTER0_ID); - if (!buffers_per_line) - buffers_per_line = deinterleaving; - line_width = CEIL_MUL(line_width, - input_formatter_get_alignment(INPUT_FORMATTER0_ID) - * vmem_increment); - - vectors_per_buffer = buffer_height * buffer_width / ISP_VEC_NELEMS; - - if (config->mode == IA_CSS_INPUT_MODE_TPG && - ((binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO) || - (!binary))) { - /* !binary -> sp raw copy pipe */ - /* workaround for TPG in video mode */ - start_line = 0; - start_column = 0; - cropped_height -= start_line; - width_a -= start_column; - } - - if_a_config.start_line = start_line; - if_a_config.start_column = start_column; - if_a_config.left_padding = left_padding / deinterleaving; - if_a_config.cropped_height = cropped_height; - if_a_config.cropped_width = width_a; - if_a_config.deinterleaving = deinterleaving; - if_a_config.buf_vecs = vectors_per_buffer; - if_a_config.buf_start_index = buf_offset_a; - if_a_config.buf_increment = vmem_increment; - if_a_config.buf_eol_offset = - buffer_width * bits_per_pixel / 8 - line_width; - if_a_config.is_yuv420_format = - (input_format == ATOMISP_INPUT_FORMAT_YUV420_8) - || (input_format == ATOMISP_INPUT_FORMAT_YUV420_10) - || (input_format == ATOMISP_INPUT_FORMAT_YUV420_16); - if_a_config.block_no_reqs = (config->mode != IA_CSS_INPUT_MODE_SENSOR); - - if (two_ppc) { - if (deinterleaving_b) { - deinterleaving = deinterleaving_b; - width_b = cropped_width * deinterleaving; - buffer_width *= deinterleaving; - /* Patch from bayer to rgb */ - num_vectors = num_vectors / 2 * - deinterleaving * width_b_factor; - vectors_per_line = num_vectors / buffer_height; - line_width = vectors_per_line * - input_formatter_get_alignment(INPUT_FORMATTER0_ID); - } - if_b_config.start_line = start_line; - if_b_config.start_column = start_column_b; - if_b_config.left_padding = left_padding / deinterleaving; - if_b_config.cropped_height = cropped_height; - if_b_config.cropped_width = width_b; - if_b_config.deinterleaving = deinterleaving; - if_b_config.buf_vecs = vectors_per_buffer; - if_b_config.buf_start_index = buf_offset_b; - if_b_config.buf_increment = vmem_increment; - if_b_config.buf_eol_offset = - buffer_width * bits_per_pixel / 8 - line_width; - if_b_config.is_yuv420_format = - input_format == ATOMISP_INPUT_FORMAT_YUV420_8 - || input_format == ATOMISP_INPUT_FORMAT_YUV420_10 - || input_format == ATOMISP_INPUT_FORMAT_YUV420_16; - if_b_config.block_no_reqs = - (config->mode != IA_CSS_INPUT_MODE_SENSOR); - - if (SH_CSS_IF_CONFIG_NOT_NEEDED != if_config_index) { - assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS); - - ifmtr_set_if_blocking_mode(&if_a_config, &if_b_config); - /* Set the ifconfigs to SP group */ - sh_css_sp_set_if_configs(&if_a_config, &if_b_config, - if_config_index); - } - } else { - if (SH_CSS_IF_CONFIG_NOT_NEEDED != if_config_index) { - assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS); - - ifmtr_set_if_blocking_mode(&if_a_config, NULL); - /* Set the ifconfigs to SP group */ - sh_css_sp_set_if_configs(&if_a_config, NULL, - if_config_index); - } - } - - return IA_CSS_SUCCESS; -} - -bool ifmtr_set_if_blocking_mode_reset = true; - -/************************************************************ - * Static functions - ************************************************************/ -static void ifmtr_set_if_blocking_mode( - const input_formatter_cfg_t * const config_a, - const input_formatter_cfg_t * const config_b) -{ - int i; - bool block[] = { false, false, false, false }; - assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block))); - -#if !defined(IS_ISP_2400_SYSTEM) -#error "ifmtr_set_if_blocking_mode: ISP_SYSTEM must be one of {IS_ISP_2400_SYSTEM}" -#endif - - block[INPUT_FORMATTER0_ID] = (bool)config_a->block_no_reqs; - if (NULL != config_b) - block[INPUT_FORMATTER1_ID] = (bool)config_b->block_no_reqs; - - /* TODO: next could cause issues when streams are started after - * eachother. */ - /*IF should not be reconfigured/reset from host */ - if (ifmtr_set_if_blocking_mode_reset) { - ifmtr_set_if_blocking_mode_reset = false; - for (i = 0; i < N_INPUT_FORMATTER_ID; i++) { - input_formatter_ID_t id = (input_formatter_ID_t) i; - input_formatter_rst(id); - input_formatter_set_fifo_blocking_mode(id, block[id]); - } - } - - return; -} - -static enum ia_css_err ifmtr_start_column( - const struct ia_css_stream_config *config, - unsigned int bin_in, - unsigned int *start_column) -{ - unsigned int in = config->input_config.input_res.width, start, - for_bayer = ia_css_ifmtr_columns_needed_for_bayer_order(config); - - if (bin_in + 2 * for_bayer > in) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* On the hardware, we want to use the middle of the input, so we - * divide the start column by 2. */ - start = (in - bin_in) / 2; - /* in case the number of extra columns is 2 or odd, we round the start - * column down */ - start &= ~0x1; - - /* now we add the one column (if needed) to correct for the bayer - * order). - */ - start += for_bayer; - *start_column = start; - return IA_CSS_SUCCESS; -} - -static enum ia_css_err ifmtr_input_start_line( - const struct ia_css_stream_config *config, - unsigned int bin_in, - unsigned int *start_line) -{ - unsigned int in = config->input_config.input_res.height, start, - for_bayer = ia_css_ifmtr_lines_needed_for_bayer_order(config); - - if (bin_in + 2 * for_bayer > in) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* On the hardware, we want to use the middle of the input, so we - * divide the start line by 2. On the simulator, we cannot handle extra - * lines at the end of the frame. - */ - start = (in - bin_in) / 2; - /* in case the number of extra lines is 2 or odd, we round the start - * line down. - */ - start &= ~0x1; - - /* now we add the one line (if needed) to correct for the bayer order */ - start += for_bayer; - *start_line = start; - return IA_CSS_SUCCESS; -} - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h deleted file mode 100644 index 545f9e2da59e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_INPUTFIFO_H -#define _IA_CSS_INPUTFIFO_H - -#include -#include - -#include "ia_css_stream_format.h" - -/* SP access */ -void ia_css_inputfifo_send_input_frame( - const unsigned short *data, - unsigned int width, - unsigned int height, - unsigned int ch_id, - enum atomisp_input_format input_format, - bool two_ppc); - -void ia_css_inputfifo_start_frame( - unsigned int ch_id, - enum atomisp_input_format input_format, - bool two_ppc); - -void ia_css_inputfifo_send_line( - unsigned int ch_id, - const unsigned short *data, - unsigned int width, - const unsigned short *data2, - unsigned int width2); - -void ia_css_inputfifo_send_embedded_line( - unsigned int ch_id, - enum atomisp_input_format data_type, - const unsigned short *data, - unsigned int width); - -void ia_css_inputfifo_end_frame( - unsigned int ch_id); - -#endif /* _IA_CSS_INPUTFIFO_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c deleted file mode 100644 index 24ca4aaf8df1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c +++ /dev/null @@ -1,613 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "platform_support.h" - -#include "ia_css_inputfifo.h" - -#include "device_access.h" - -#define __INLINE_SP__ -#include "sp.h" -#define __INLINE_ISP__ -#include "isp.h" -#define __INLINE_IRQ__ -#include "irq.h" -#define __INLINE_FIFO_MONITOR__ -#include "fifo_monitor.h" - -#define __INLINE_EVENT__ -#include "event_fifo.h" -#define __INLINE_SP__ - -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "input_system.h" /* MIPI_PREDICTOR_NONE,... */ -#endif - -#include "assert_support.h" - -/* System independent */ -#include "sh_css_internal.h" -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "ia_css_isys.h" -#endif - -#define HBLANK_CYCLES (187) -#define MARKER_CYCLES (6) - -#if !defined(HAS_NO_INPUT_SYSTEM) -#include -#endif - -/* The data type is used to send special cases: - * yuv420: odd lines (1, 3 etc) are twice as wide as even - * lines (0, 2, 4 etc). - * rgb: for two pixels per clock, the R and B values are sent - * to output_0 while only G is sent to output_1. This means - * that output_1 only gets half the number of values of output_0. - * WARNING: This type should also be used for Legacy YUV420. - * regular: used for all other data types (RAW, YUV422, etc) - */ -enum inputfifo_mipi_data_type { - inputfifo_mipi_data_type_regular, - inputfifo_mipi_data_type_yuv420, - inputfifo_mipi_data_type_yuv420_legacy, - inputfifo_mipi_data_type_rgb, -}; -#if !defined(HAS_NO_INPUT_SYSTEM) -static unsigned int inputfifo_curr_ch_id, inputfifo_curr_fmt_type; -#endif -struct inputfifo_instance { - unsigned int ch_id; - enum atomisp_input_format input_format; - bool two_ppc; - bool streaming; - unsigned int hblank_cycles; - unsigned int marker_cycles; - unsigned int fmt_type; - enum inputfifo_mipi_data_type type; -}; -#if !defined(HAS_NO_INPUT_SYSTEM) -/* - * Maintain a basic streaming to Mipi administration with ch_id as index - * ch_id maps on the "Mipi virtual channel ID" and can have value 0..3 - */ -#define INPUTFIFO_NR_OF_S2M_CHANNELS (4) -static struct inputfifo_instance - inputfifo_inst_admin[INPUTFIFO_NR_OF_S2M_CHANNELS]; - -/* Streaming to MIPI */ -static unsigned inputfifo_wrap_marker( -/* static inline unsigned inputfifo_wrap_marker( */ - unsigned marker) -{ - return marker | - (inputfifo_curr_ch_id << HIVE_STR_TO_MIPI_CH_ID_LSB) | - (inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB); -} - -static inline void -_sh_css_fifo_snd(unsigned token) -{ - while (!can_event_send_token(STR2MIPI_EVENT_ID)) - hrt_sleep(); - event_send_token(STR2MIPI_EVENT_ID, token); - return; -} - -static void inputfifo_send_data_a( -/* static inline void inputfifo_send_data_a( */ -unsigned int data) -{ - unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) | - (data << HIVE_STR_TO_MIPI_DATA_A_LSB); - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_data_b( -/* static inline void inputfifo_send_data_b( */ - unsigned int data) -{ - unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) | - (data << _HIVE_STR_TO_MIPI_DATA_B_LSB); - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_data( -/* static inline void inputfifo_send_data( */ - unsigned int a, - unsigned int b) -{ - unsigned int token = ((1 << HIVE_STR_TO_MIPI_VALID_A_BIT) | - (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) | - (a << HIVE_STR_TO_MIPI_DATA_A_LSB) | - (b << _HIVE_STR_TO_MIPI_DATA_B_LSB)); - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_sol(void) -/* static inline void inputfifo_send_sol(void) */ -{ - hrt_data token = inputfifo_wrap_marker( - 1 << HIVE_STR_TO_MIPI_SOL_BIT); - - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_eol(void) -/* static inline void inputfifo_send_eol(void) */ -{ - hrt_data token = inputfifo_wrap_marker( - 1 << HIVE_STR_TO_MIPI_EOL_BIT); - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_sof(void) -/* static inline void inputfifo_send_sof(void) */ -{ - hrt_data token = inputfifo_wrap_marker( - 1 << HIVE_STR_TO_MIPI_SOF_BIT); - - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_eof(void) -/* static inline void inputfifo_send_eof(void) */ -{ - hrt_data token = inputfifo_wrap_marker( - 1 << HIVE_STR_TO_MIPI_EOF_BIT); - _sh_css_fifo_snd(token); - return; -} - - - -#ifdef __ON__ -static void inputfifo_send_ch_id( -/* static inline void inputfifo_send_ch_id( */ - unsigned int ch_id) -{ - hrt_data token; - inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK; - /* we send an zero marker, this will wrap the ch_id and - * fmt_type automatically. - */ - token = inputfifo_wrap_marker(0); - _sh_css_fifo_snd(token); - return; -} - -static void inputfifo_send_fmt_type( -/* static inline void inputfifo_send_fmt_type( */ - unsigned int fmt_type) -{ - hrt_data token; - inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK; - /* we send an zero marker, this will wrap the ch_id and - * fmt_type automatically. - */ - token = inputfifo_wrap_marker(0); - _sh_css_fifo_snd(token); - return; -} -#endif /* __ON__ */ - - - -static void inputfifo_send_ch_id_and_fmt_type( -/* static inline -void inputfifo_send_ch_id_and_fmt_type( */ - unsigned int ch_id, - unsigned int fmt_type) -{ - hrt_data token; - inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK; - inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK; - /* we send an zero marker, this will wrap the ch_id and - * fmt_type automatically. - */ - token = inputfifo_wrap_marker(0); - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_send_empty_token(void) -/* static inline void inputfifo_send_empty_token(void) */ -{ - hrt_data token = inputfifo_wrap_marker(0); - _sh_css_fifo_snd(token); - return; -} - - - -static void inputfifo_start_frame( -/* static inline void inputfifo_start_frame( */ - unsigned int ch_id, - unsigned int fmt_type) -{ - inputfifo_send_ch_id_and_fmt_type(ch_id, fmt_type); - inputfifo_send_sof(); - return; -} - - - -static void inputfifo_end_frame( - unsigned int marker_cycles) -{ - unsigned int i; - for (i = 0; i < marker_cycles; i++) - inputfifo_send_empty_token(); - inputfifo_send_eof(); - return; -} - - - -static void inputfifo_send_line2( - const unsigned short *data, - unsigned int width, - const unsigned short *data2, - unsigned int width2, - unsigned int hblank_cycles, - unsigned int marker_cycles, - unsigned int two_ppc, - enum inputfifo_mipi_data_type type) -{ - unsigned int i, is_rgb = 0, is_legacy = 0; - - assert(data != NULL); - assert((data2 != NULL) || (width2 == 0)); - if (type == inputfifo_mipi_data_type_rgb) - is_rgb = 1; - - if (type == inputfifo_mipi_data_type_yuv420_legacy) - is_legacy = 1; - - for (i = 0; i < hblank_cycles; i++) - inputfifo_send_empty_token(); - inputfifo_send_sol(); - for (i = 0; i < marker_cycles; i++) - inputfifo_send_empty_token(); - for (i = 0; i < width; i++, data++) { - /* for RGB in two_ppc, we only actually send 2 pixels per - * clock in the even pixels (0, 2 etc). In the other cycles, - * we only send 1 pixel, to data[0]. - */ - unsigned int send_two_pixels = two_ppc; - if ((is_rgb || is_legacy) && (i % 3 == 2)) - send_two_pixels = 0; - if (send_two_pixels) { - if (i + 1 == width) { - /* for jpg (binary) copy, this can occur - * if the file contains an odd number of bytes. - */ - inputfifo_send_data( - data[0], 0); - } else { - inputfifo_send_data( - data[0], data[1]); - } - /* Additional increment because we send 2 pixels */ - data++; - i++; - } else if (two_ppc && is_legacy) { - inputfifo_send_data_b(data[0]); - } else { - inputfifo_send_data_a(data[0]); - } - } - - for (i = 0; i < width2; i++, data2++) { - /* for RGB in two_ppc, we only actually send 2 pixels per - * clock in the even pixels (0, 2 etc). In the other cycles, - * we only send 1 pixel, to data2[0]. - */ - unsigned int send_two_pixels = two_ppc; - if ((is_rgb || is_legacy) && (i % 3 == 2)) - send_two_pixels = 0; - if (send_two_pixels) { - if (i + 1 == width2) { - /* for jpg (binary) copy, this can occur - * if the file contains an odd number of bytes. - */ - inputfifo_send_data( - data2[0], 0); - } else { - inputfifo_send_data( - data2[0], data2[1]); - } - /* Additional increment because we send 2 pixels */ - data2++; - i++; - } else if (two_ppc && is_legacy) { - inputfifo_send_data_b(data2[0]); - } else { - inputfifo_send_data_a(data2[0]); - } - } - for (i = 0; i < hblank_cycles; i++) - inputfifo_send_empty_token(); - inputfifo_send_eol(); - return; -} - - - -static void -inputfifo_send_line(const unsigned short *data, - unsigned int width, - unsigned int hblank_cycles, - unsigned int marker_cycles, - unsigned int two_ppc, - enum inputfifo_mipi_data_type type) -{ - assert(data != NULL); - inputfifo_send_line2(data, width, NULL, 0, - hblank_cycles, - marker_cycles, - two_ppc, - type); -} - - -/* Send a frame of data into the input network via the GP FIFO. - * Parameters: - * - data: array of 16 bit values that contains all data for the frame. - * - width: width of a line in number of subpixels, for yuv420 it is the - * number of Y components per line. - * - height: height of the frame in number of lines. - * - ch_id: channel ID. - * - fmt_type: format type. - * - hblank_cycles: length of horizontal blanking in cycles. - * - marker_cycles: number of empty cycles after start-of-line and before - * end-of-frame. - * - two_ppc: boolean, describes whether to send one or two pixels per clock - * cycle. In this mode, we sent pixels N and N+1 in the same cycle, - * to IF_PRIM_A and IF_PRIM_B respectively. The caller must make - * sure the input data has been formatted correctly for this. - * For example, for RGB formats this means that unused values - * must be inserted. - * - yuv420: boolean, describes whether (non-legacy) yuv420 data is used. In - * this mode, the odd lines (1,3,5 etc) are half as long as the - * even lines (2,4,6 etc). - * Note that the first line is odd (1) and the second line is even - * (2). - * - * This function does not do any reordering of pixels, the caller must make - * sure the data is in the righ format. Please refer to the CSS receiver - * documentation for details on the data formats. - */ - -static void inputfifo_send_frame( - const unsigned short *data, - unsigned int width, - unsigned int height, - unsigned int ch_id, - unsigned int fmt_type, - unsigned int hblank_cycles, - unsigned int marker_cycles, - unsigned int two_ppc, - enum inputfifo_mipi_data_type type) -{ - unsigned int i; - - assert(data != NULL); - inputfifo_start_frame(ch_id, fmt_type); - - for (i = 0; i < height; i++) { - if ((type == inputfifo_mipi_data_type_yuv420) && - (i & 1) == 1) { - inputfifo_send_line(data, 2 * width, - hblank_cycles, - marker_cycles, - two_ppc, type); - data += 2 * width; - } else { - inputfifo_send_line(data, width, - hblank_cycles, - marker_cycles, - two_ppc, type); - data += width; - } - } - inputfifo_end_frame(marker_cycles); - return; -} - - - -static enum inputfifo_mipi_data_type inputfifo_determine_type( - enum atomisp_input_format input_format) -{ - enum inputfifo_mipi_data_type type; - - type = inputfifo_mipi_data_type_regular; - if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) { - type = - inputfifo_mipi_data_type_yuv420_legacy; - } else if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8 || - input_format == ATOMISP_INPUT_FORMAT_YUV420_10 || - input_format == ATOMISP_INPUT_FORMAT_YUV420_16) { - type = - inputfifo_mipi_data_type_yuv420; - } else if (input_format >= ATOMISP_INPUT_FORMAT_RGB_444 && - input_format <= ATOMISP_INPUT_FORMAT_RGB_888) { - type = - inputfifo_mipi_data_type_rgb; - } - return type; -} - - - -static struct inputfifo_instance *inputfifo_get_inst( - unsigned int ch_id) -{ - return &inputfifo_inst_admin[ch_id]; -} - -void ia_css_inputfifo_send_input_frame( - const unsigned short *data, - unsigned int width, - unsigned int height, - unsigned int ch_id, - enum atomisp_input_format input_format, - bool two_ppc) -{ - unsigned int fmt_type, hblank_cycles, marker_cycles; - enum inputfifo_mipi_data_type type; - - assert(data != NULL); - hblank_cycles = HBLANK_CYCLES; - marker_cycles = MARKER_CYCLES; - ia_css_isys_convert_stream_format_to_mipi_format(input_format, - MIPI_PREDICTOR_NONE, - &fmt_type); - - type = inputfifo_determine_type(input_format); - - inputfifo_send_frame(data, width, height, - ch_id, fmt_type, hblank_cycles, marker_cycles, - two_ppc, type); -} - - - -void ia_css_inputfifo_start_frame( - unsigned int ch_id, - enum atomisp_input_format input_format, - bool two_ppc) -{ - struct inputfifo_instance *s2mi; - s2mi = inputfifo_get_inst(ch_id); - - s2mi->ch_id = ch_id; - ia_css_isys_convert_stream_format_to_mipi_format(input_format, - MIPI_PREDICTOR_NONE, - &s2mi->fmt_type); - s2mi->two_ppc = two_ppc; - s2mi->type = inputfifo_determine_type(input_format); - s2mi->hblank_cycles = HBLANK_CYCLES; - s2mi->marker_cycles = MARKER_CYCLES; - s2mi->streaming = true; - - inputfifo_start_frame(ch_id, s2mi->fmt_type); - return; -} - - - -void ia_css_inputfifo_send_line( - unsigned int ch_id, - const unsigned short *data, - unsigned int width, - const unsigned short *data2, - unsigned int width2) -{ - struct inputfifo_instance *s2mi; - - assert(data != NULL); - assert((data2 != NULL) || (width2 == 0)); - s2mi = inputfifo_get_inst(ch_id); - - - /* Set global variables that indicate channel_id and format_type */ - inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK; - inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK; - - inputfifo_send_line2(data, width, data2, width2, - s2mi->hblank_cycles, - s2mi->marker_cycles, - s2mi->two_ppc, - s2mi->type); -} - - -void ia_css_inputfifo_send_embedded_line( - unsigned int ch_id, - enum atomisp_input_format data_type, - const unsigned short *data, - unsigned int width) -{ - struct inputfifo_instance *s2mi; - unsigned int fmt_type; - - assert(data != NULL); - s2mi = inputfifo_get_inst(ch_id); - ia_css_isys_convert_stream_format_to_mipi_format(data_type, - MIPI_PREDICTOR_NONE, &fmt_type); - - /* Set format_type for metadata line. */ - inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK; - - inputfifo_send_line(data, width, s2mi->hblank_cycles, s2mi->marker_cycles, - s2mi->two_ppc, inputfifo_mipi_data_type_regular); -} - - -void ia_css_inputfifo_end_frame( - unsigned int ch_id) -{ - struct inputfifo_instance *s2mi; - s2mi = inputfifo_get_inst(ch_id); - - /* Set global variables that indicate channel_id and format_type */ - inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK; - inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK; - - /* Call existing HRT function */ - inputfifo_end_frame(s2mi->marker_cycles); - - s2mi->streaming = false; - return; -} -#endif /* #if !defined(HAS_NO_INPUT_SYSTEM) */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h deleted file mode 100644 index 285749885105..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h +++ /dev/null @@ -1,118 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_ISP_PARAM_H_ -#define _IA_CSS_ISP_PARAM_H_ - -#include -#include "ia_css_isp_param_types.h" - -/* Set functions for parameter memory descriptors */ -void -ia_css_isp_param_set_mem_init( - struct ia_css_isp_param_host_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem, - char *address, size_t size); - -void -ia_css_isp_param_set_css_mem_init( - struct ia_css_isp_param_css_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem, - hrt_vaddress address, size_t size); - -void -ia_css_isp_param_set_isp_mem_init( - struct ia_css_isp_param_isp_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem, - uint32_t address, size_t size); - -/* Get functions for parameter memory descriptors */ -const struct ia_css_host_data* -ia_css_isp_param_get_mem_init( - const struct ia_css_isp_param_host_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem); - -const struct ia_css_data* -ia_css_isp_param_get_css_mem_init( - const struct ia_css_isp_param_css_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem); - -const struct ia_css_isp_data* -ia_css_isp_param_get_isp_mem_init( - const struct ia_css_isp_param_isp_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem); - -/* Initialize the memory interface sizes and addresses */ -void -ia_css_init_memory_interface( - struct ia_css_isp_param_css_segments *isp_mem_if, - const struct ia_css_isp_param_host_segments *mem_params, - const struct ia_css_isp_param_css_segments *css_params); - -/* Allocate memory parameters */ -enum ia_css_err -ia_css_isp_param_allocate_isp_parameters( - struct ia_css_isp_param_host_segments *mem_params, - struct ia_css_isp_param_css_segments *css_params, - const struct ia_css_isp_param_isp_segments *mem_initializers); - -/* Destroy memory parameters */ -void -ia_css_isp_param_destroy_isp_parameters( - struct ia_css_isp_param_host_segments *mem_params, - struct ia_css_isp_param_css_segments *css_params); - -/* Load fw parameters */ -void -ia_css_isp_param_load_fw_params( - const char *fw, - union ia_css_all_memory_offsets *mem_offsets, - const struct ia_css_isp_param_memory_offsets *memory_offsets, - bool init); - -/* Copy host parameter images to ddr */ -enum ia_css_err -ia_css_isp_param_copy_isp_mem_if_to_ddr( - struct ia_css_isp_param_css_segments *ddr, - const struct ia_css_isp_param_host_segments *host, - enum ia_css_param_class pclass); - -/* Enable a pipeline by setting the control field in the isp dmem parameters */ -void -ia_css_isp_param_enable_pipeline( - const struct ia_css_isp_param_host_segments *mem_params); - -#endif /* _IA_CSS_ISP_PARAM_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h deleted file mode 100644 index 9d111793bb65..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_ISP_PARAM_TYPES_H_ -#define _IA_CSS_ISP_PARAM_TYPES_H_ - -#include "ia_css_types.h" -#include -#include - -/* Short hands */ -#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0 -#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0 - -/* The driver depends on this, to be removed later. */ -#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES - -/* Explicit member numbering to avoid fish type checker bug */ -enum ia_css_param_class { - IA_CSS_PARAM_CLASS_PARAM = 0, /* Late binding parameters, like 3A */ - IA_CSS_PARAM_CLASS_CONFIG = 1, /* Pipe config time parameters, like resolution */ - IA_CSS_PARAM_CLASS_STATE = 2, /* State parameters, like tnr buffer index */ -#if 0 /* Not yet implemented */ - IA_CSS_PARAM_CLASS_FRAME = 3, /* Frame time parameters, like output buffer */ -#endif -}; -#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1) - -/* ISP parameter descriptor */ -struct ia_css_isp_parameter { - uint32_t offset; /* Offset in isp_)parameters, etc. */ - uint32_t size; /* Disabled if 0 */ -}; - - -/* Address/size of each parameter class in each isp memory, host memory pointers */ -struct ia_css_isp_param_host_segments { - struct ia_css_host_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES]; -}; - -/* Address/size of each parameter class in each isp memory, css memory pointers */ -struct ia_css_isp_param_css_segments { - struct ia_css_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES]; -}; - -/* Address/size of each parameter class in each isp memory, isp memory pointers */ -struct ia_css_isp_param_isp_segments { - struct ia_css_isp_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES]; -}; - -/* Memory offsets in binary info */ -struct ia_css_isp_param_memory_offsets { - uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES]; /** offset wrt hdr in bytes */ -}; - -/* Offsets for ISP kernel parameters per isp memory. - * Only relevant for standard ISP binaries, not ACC or SP. - */ -union ia_css_all_memory_offsets { - struct { - CSS_ALIGN(struct ia_css_memory_offsets *param, 8); - CSS_ALIGN(struct ia_css_config_memory_offsets *config, 8); - CSS_ALIGN(struct ia_css_state_memory_offsets *state, 8); - } offsets; - struct { - CSS_ALIGN(void *ptr, 8); - } array[IA_CSS_NUM_PARAM_CLASSES]; -}; - -#endif /* _IA_CSS_ISP_PARAM_TYPES_H_ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c deleted file mode 100644 index f793ce125f02..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c +++ /dev/null @@ -1,227 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "memory_access.h" -#include "ia_css_pipeline.h" -#include "ia_css_isp_param.h" - -/* Set functions for parameter memory descriptors */ - -void -ia_css_isp_param_set_mem_init( - struct ia_css_isp_param_host_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem, - char *address, size_t size) -{ - mem_init->params[pclass][mem].address = address; - mem_init->params[pclass][mem].size = (uint32_t)size; -} - -void -ia_css_isp_param_set_css_mem_init( - struct ia_css_isp_param_css_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem, - hrt_vaddress address, size_t size) -{ - mem_init->params[pclass][mem].address = address; - mem_init->params[pclass][mem].size = (uint32_t)size; -} - -void -ia_css_isp_param_set_isp_mem_init( - struct ia_css_isp_param_isp_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem, - uint32_t address, size_t size) -{ - mem_init->params[pclass][mem].address = address; - mem_init->params[pclass][mem].size = (uint32_t)size; -} - -/* Get functions for parameter memory descriptors */ -const struct ia_css_host_data* -ia_css_isp_param_get_mem_init( - const struct ia_css_isp_param_host_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem) -{ - return &mem_init->params[pclass][mem]; -} - -const struct ia_css_data* -ia_css_isp_param_get_css_mem_init( - const struct ia_css_isp_param_css_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem) -{ - return &mem_init->params[pclass][mem]; -} - -const struct ia_css_isp_data* -ia_css_isp_param_get_isp_mem_init( - const struct ia_css_isp_param_isp_segments *mem_init, - enum ia_css_param_class pclass, - enum ia_css_isp_memories mem) -{ - return &mem_init->params[pclass][mem]; -} - -void -ia_css_init_memory_interface( - struct ia_css_isp_param_css_segments *isp_mem_if, - const struct ia_css_isp_param_host_segments *mem_params, - const struct ia_css_isp_param_css_segments *css_params) -{ - unsigned pclass, mem; - for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) { - memset(isp_mem_if->params[pclass], 0, sizeof(isp_mem_if->params[pclass])); - for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) { - if (!mem_params->params[pclass][mem].address) - continue; - isp_mem_if->params[pclass][mem].size = mem_params->params[pclass][mem].size; - if (pclass != IA_CSS_PARAM_CLASS_PARAM) - isp_mem_if->params[pclass][mem].address = css_params->params[pclass][mem].address; - } - } -} - -enum ia_css_err -ia_css_isp_param_allocate_isp_parameters( - struct ia_css_isp_param_host_segments *mem_params, - struct ia_css_isp_param_css_segments *css_params, - const struct ia_css_isp_param_isp_segments *mem_initializers) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned mem, pclass; - - pclass = IA_CSS_PARAM_CLASS_PARAM; - for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) { - for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) { - uint32_t size = 0; - if (mem_initializers) - size = mem_initializers->params[pclass][mem].size; - mem_params->params[pclass][mem].size = size; - mem_params->params[pclass][mem].address = NULL; - css_params->params[pclass][mem].size = size; - css_params->params[pclass][mem].address = 0x0; - if (size) { - mem_params->params[pclass][mem].address = sh_css_calloc(1, size); - if (!mem_params->params[pclass][mem].address) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto cleanup; - } - if (pclass != IA_CSS_PARAM_CLASS_PARAM) { - css_params->params[pclass][mem].address = mmgr_malloc(size); - if (!css_params->params[pclass][mem].address) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto cleanup; - } - } - } - } - } - return err; -cleanup: - ia_css_isp_param_destroy_isp_parameters(mem_params, css_params); - return err; -} - -void -ia_css_isp_param_destroy_isp_parameters( - struct ia_css_isp_param_host_segments *mem_params, - struct ia_css_isp_param_css_segments *css_params) -{ - unsigned mem, pclass; - - for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) { - for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) { - if (mem_params->params[pclass][mem].address) - sh_css_free(mem_params->params[pclass][mem].address); - if (css_params->params[pclass][mem].address) - hmm_free(css_params->params[pclass][mem].address); - mem_params->params[pclass][mem].address = NULL; - css_params->params[pclass][mem].address = 0x0; - } - } -} - -void -ia_css_isp_param_load_fw_params( - const char *fw, - union ia_css_all_memory_offsets *mem_offsets, - const struct ia_css_isp_param_memory_offsets *memory_offsets, - bool init) -{ - unsigned pclass; - for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) { - mem_offsets->array[pclass].ptr = NULL; - if (init) - mem_offsets->array[pclass].ptr = (void *)(fw + memory_offsets->offsets[pclass]); - } -} - -enum ia_css_err -ia_css_isp_param_copy_isp_mem_if_to_ddr( - struct ia_css_isp_param_css_segments *ddr, - const struct ia_css_isp_param_host_segments *host, - enum ia_css_param_class pclass) -{ - unsigned mem; - - for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) { - size_t size = host->params[pclass][mem].size; - hrt_vaddress ddr_mem_ptr = ddr->params[pclass][mem].address; - char *host_mem_ptr = host->params[pclass][mem].address; - if (size != ddr->params[pclass][mem].size) - return IA_CSS_ERR_INTERNAL_ERROR; - if (!size) - continue; - mmgr_store(ddr_mem_ptr, host_mem_ptr, size); - } - return IA_CSS_SUCCESS; -} - -void -ia_css_isp_param_enable_pipeline( - const struct ia_css_isp_param_host_segments *mem_params) -{ - /* By protocol b0 of the mandatory uint32_t first field of the - input parameter is a disable bit*/ - short dmem_offset = 0; - - if (mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].size == 0) - return; - - *(uint32_t *)&mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].address[dmem_offset] = 0x0; -} - - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h deleted file mode 100644 index 8c005db9766e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h +++ /dev/null @@ -1,201 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_ISYS_H__ -#define __IA_CSS_ISYS_H__ - -#include -#include -#include -#include -#include -#include -#include "ia_css_isys_comm.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -/** - * Virtual Input System. (Input System 2401) - */ -typedef input_system_cfg_t ia_css_isys_descr_t; -/* end of Virtual Input System */ -#endif - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -input_system_error_t ia_css_isys_init(void); -void ia_css_isys_uninit(void); -enum mipi_port_id ia_css_isys_port_to_mipi_port( - enum mipi_port_id api_port); -#endif - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - -/** - * @brief Register one (virtual) stream. This is used to track when all - * virtual streams are configured inside the input system. The CSI RX is - * only started when all registered streams are configured. - * - * @param[in] port CSI port - * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id() - * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES - * @return IA_CSS_SUCCESS if successful, IA_CSS_ERR_INTERNAL_ERROR if - * there is already a stream registered with the same handle - */ -enum ia_css_err ia_css_isys_csi_rx_register_stream( - enum mipi_port_id port, - uint32_t isys_stream_id); - -/** - * @brief Unregister one (virtual) stream. This is used to track when all - * virtual streams are configured inside the input system. The CSI RX is - * only started when all registered streams are configured. - * - * @param[in] port CSI port - * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id() - * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES - * @return IA_CSS_SUCCESS if successful, IA_CSS_ERR_INTERNAL_ERROR if - * there is no stream registered with that handle - */ -enum ia_css_err ia_css_isys_csi_rx_unregister_stream( - enum mipi_port_id port, - uint32_t isys_stream_id); - -enum ia_css_err ia_css_isys_convert_compressed_format( - struct ia_css_csi2_compression *comp, - struct input_system_cfg_s *cfg); -unsigned int ia_css_csi2_calculate_input_system_alignment( - enum atomisp_input_format fmt_type); -#endif - -#if !defined(USE_INPUT_SYSTEM_VERSION_2401) -/* CSS Receiver */ -void ia_css_isys_rx_configure( - const rx_cfg_t *config, - const enum ia_css_input_mode input_mode); - -void ia_css_isys_rx_disable(void); - -void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port); - -unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port); -void ia_css_isys_rx_get_irq_info(enum mipi_port_id port, - unsigned int *irq_infos); -void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port, - unsigned int irq_infos); -unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits); - -#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -/* @brief Translate format and compression to format type. - * - * @param[in] input_format The input format. - * @param[in] compression The compression scheme. - * @param[out] fmt_type Pointer to the resulting format type. - * @return Error code. - * - * Translate an input format and mipi compression pair to the fmt_type. - * This is normally done by the sensor, but when using the input fifo, this - * format type must be sumitted correctly by the application. - */ -enum ia_css_err ia_css_isys_convert_stream_format_to_mipi_format( - enum atomisp_input_format input_format, - mipi_predictor_t compression, - unsigned int *fmt_type); - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -/** - * Virtual Input System. (Input System 2401) - */ -extern ia_css_isys_error_t ia_css_isys_stream_create( - ia_css_isys_descr_t *isys_stream_descr, - ia_css_isys_stream_h isys_stream, - uint32_t isys_stream_id); - -extern void ia_css_isys_stream_destroy( - ia_css_isys_stream_h isys_stream); - -extern ia_css_isys_error_t ia_css_isys_stream_calculate_cfg( - ia_css_isys_stream_h isys_stream, - ia_css_isys_descr_t *isys_stream_descr, - ia_css_isys_stream_cfg_t *isys_stream_cfg); - -extern void ia_css_isys_csi_rx_lut_rmgr_init(void); - -extern void ia_css_isys_csi_rx_lut_rmgr_uninit(void); - -extern bool ia_css_isys_csi_rx_lut_rmgr_acquire( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry); - -extern void ia_css_isys_csi_rx_lut_rmgr_release( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry); - - -extern void ia_css_isys_ibuf_rmgr_init(void); - -extern void ia_css_isys_ibuf_rmgr_uninit(void); - -extern bool ia_css_isys_ibuf_rmgr_acquire( - uint32_t size, - uint32_t *start_addr); - -extern void ia_css_isys_ibuf_rmgr_release( - uint32_t *start_addr); - -extern void ia_css_isys_dma_channel_rmgr_init(void); - -extern void ia_css_isys_dma_channel_rmgr_uninit(void); - -extern bool ia_css_isys_dma_channel_rmgr_acquire( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel); - -extern void ia_css_isys_dma_channel_rmgr_release( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel); - -extern void ia_css_isys_stream2mmio_sid_rmgr_init(void); - -extern void ia_css_isys_stream2mmio_sid_rmgr_uninit(void); - -extern bool ia_css_isys_stream2mmio_sid_rmgr_acquire( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid); - -extern void ia_css_isys_stream2mmio_sid_rmgr_release( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid); - -/* end of Virtual Input System */ -#endif - -#endif /* __IA_CSS_ISYS_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h deleted file mode 100644 index 0c3434ad0613..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_ISYS_COMM_H -#define __IA_CSS_ISYS_COMM_H - -#include -#include - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -#include /* inline */ -#include -#include /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */ - -#define SH_CSS_NODES_PER_THREAD 2 -#define SH_CSS_MAX_ISYS_CHANNEL_NODES (SH_CSS_MAX_SP_THREADS * SH_CSS_NODES_PER_THREAD) - -/* - * a) ia_css_isys_stream_h & ia_css_isys_stream_cfg_t come from host. - * - * b) Here it is better to use actual structures for stream handle - * instead of opaque handles. Otherwise, we need to have another - * communication channel to interpret that opaque handle(this handle is - * maintained by host and needs to be populated to sp for every stream open) - * */ -typedef virtual_input_system_stream_t *ia_css_isys_stream_h; -typedef virtual_input_system_stream_cfg_t ia_css_isys_stream_cfg_t; - -/* - * error check for ISYS APIs. - * */ -typedef bool ia_css_isys_error_t; - -static inline uint32_t ia_css_isys_generate_stream_id( - uint32_t sp_thread_id, - uint32_t stream_id) -{ - return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id; -} - -#endif /* USE_INPUT_SYSTEM_VERSION_2401*/ -#endif /*_IA_CSS_ISYS_COMM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c deleted file mode 100644 index a914ce5532ec..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c +++ /dev/null @@ -1,179 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - -#include "assert_support.h" -#include "platform_support.h" -#include "ia_css_isys.h" -#include "bitop_support.h" -#include "ia_css_pipeline.h" /* ia_css_pipeline_get_pipe_io_status() */ -#include "sh_css_internal.h" /* sh_css_sp_pipeline_io_status - * SH_CSS_MAX_SP_THREADS - */ -#include "csi_rx_rmgr.h" - -static isys_csi_rx_rsrc_t isys_csi_rx_rsrc[N_CSI_RX_BACKEND_ID]; - -void ia_css_isys_csi_rx_lut_rmgr_init(void) -{ - memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc)); -} - -void ia_css_isys_csi_rx_lut_rmgr_uninit(void) -{ - memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc)); -} - -bool ia_css_isys_csi_rx_lut_rmgr_acquire( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry) -{ - bool retval = false; - uint32_t max_num_packets_of_type; - uint32_t num_active_of_type; - isys_csi_rx_rsrc_t *cur_rsrc = NULL; - uint16_t i; - - assert(backend < N_CSI_RX_BACKEND_ID); - assert((packet_type == CSI_MIPI_PACKET_TYPE_LONG) || (packet_type == CSI_MIPI_PACKET_TYPE_SHORT)); - assert(entry != NULL); - - if ((backend < N_CSI_RX_BACKEND_ID) && (entry != NULL)) { - cur_rsrc = &isys_csi_rx_rsrc[backend]; - if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) { - max_num_packets_of_type = N_LONG_PACKET_LUT_ENTRIES[backend]; - num_active_of_type = cur_rsrc->num_long_packets; - } else { - max_num_packets_of_type = N_SHORT_PACKET_LUT_ENTRIES[backend]; - num_active_of_type = cur_rsrc->num_short_packets; - } - - if (num_active_of_type < max_num_packets_of_type) { - for (i = 0; i < max_num_packets_of_type; i++) { - if (bitop_getbit(cur_rsrc->active_table, i) == 0) { - bitop_setbit(cur_rsrc->active_table, i); - - if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) { - entry->long_packet_entry = i; - entry->short_packet_entry = 0; - cur_rsrc->num_long_packets++; - } else { - entry->long_packet_entry = 0; - entry->short_packet_entry = i; - cur_rsrc->num_short_packets++; - } - cur_rsrc->num_active++; - retval = true; - break; - } - } - } - } - return retval; -} - -void ia_css_isys_csi_rx_lut_rmgr_release( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry) -{ - uint32_t max_num_packets; - isys_csi_rx_rsrc_t *cur_rsrc = NULL; - uint32_t packet_entry = 0; - - assert(backend < N_CSI_RX_BACKEND_ID); - assert(entry != NULL); - assert((packet_type >= CSI_MIPI_PACKET_TYPE_LONG) || (packet_type <= CSI_MIPI_PACKET_TYPE_SHORT)); - - if ((backend < N_CSI_RX_BACKEND_ID) && (entry != NULL)) { - if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) { - max_num_packets = N_LONG_PACKET_LUT_ENTRIES[backend]; - packet_entry = entry->long_packet_entry; - } else { - max_num_packets = N_SHORT_PACKET_LUT_ENTRIES[backend]; - packet_entry = entry->short_packet_entry; - } - - cur_rsrc = &isys_csi_rx_rsrc[backend]; - if ((packet_entry < max_num_packets) && (cur_rsrc->num_active > 0)) { - if (bitop_getbit(cur_rsrc->active_table, packet_entry) == 1) { - bitop_clearbit(cur_rsrc->active_table, packet_entry); - - if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) - cur_rsrc->num_long_packets--; - else - cur_rsrc->num_short_packets--; - cur_rsrc->num_active--; - } - } - } -} - -enum ia_css_err ia_css_isys_csi_rx_register_stream( - enum mipi_port_id port, - uint32_t isys_stream_id) -{ - enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR; - - if ((port < N_INPUT_SYSTEM_CSI_PORT) && - (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) { - struct sh_css_sp_pipeline_io_status *pipe_io_status; - pipe_io_status = ia_css_pipeline_get_pipe_io_status(); - if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 0) { - bitop_setbit(pipe_io_status->active[port], isys_stream_id); - pipe_io_status->running[port] = 0; - retval = IA_CSS_SUCCESS; - } - } - return retval; -} - -enum ia_css_err ia_css_isys_csi_rx_unregister_stream( - enum mipi_port_id port, - uint32_t isys_stream_id) -{ - enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR; - - if ((port < N_INPUT_SYSTEM_CSI_PORT) && - (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) { - struct sh_css_sp_pipeline_io_status *pipe_io_status; - pipe_io_status = ia_css_pipeline_get_pipe_io_status(); - if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 1) { - bitop_clearbit(pipe_io_status->active[port], isys_stream_id); - retval = IA_CSS_SUCCESS; - } - } - return retval; -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h deleted file mode 100644 index c27b0ab83c93..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __CSI_RX_RMGR_H_INCLUDED__ -#define __CSI_RX_RMGR_H_INCLUDED__ - -typedef struct isys_csi_rx_rsrc_s isys_csi_rx_rsrc_t; -struct isys_csi_rx_rsrc_s { - uint32_t active_table; - uint32_t num_active; - uint16_t num_long_packets; - uint16_t num_short_packets; -}; - -#endif /* __CSI_RX_RMGR_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c deleted file mode 100644 index d8c3b75d7fac..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c +++ /dev/null @@ -1,140 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010 - 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#endif - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - -#include "assert_support.h" -#include "platform_support.h" -#include "ia_css_isys.h" -#include "ibuf_ctrl_rmgr.h" - -static ibuf_rsrc_t ibuf_rsrc; - -static ibuf_handle_t *getHandle(uint16_t index) -{ - ibuf_handle_t *handle = NULL; - - if (index < MAX_IBUF_HANDLES) - handle = &ibuf_rsrc.handles[index]; - return handle; -} - -void ia_css_isys_ibuf_rmgr_init(void) -{ - memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc)); - ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE; -} - -void ia_css_isys_ibuf_rmgr_uninit(void) -{ - memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc)); - ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE; -} - -bool ia_css_isys_ibuf_rmgr_acquire( - uint32_t size, - uint32_t *start_addr) -{ - bool retval = false; - bool input_buffer_found = false; - uint32_t aligned_size; - ibuf_handle_t *handle = NULL; - uint16_t i; - - assert(start_addr != NULL); - assert(size > 0); - - aligned_size = (size + (IBUF_ALIGN - 1)) & ~(IBUF_ALIGN - 1); - - /* Check if there is an available un-used handle with the size - * that will fulfill the request. - */ - if (ibuf_rsrc.num_active < ibuf_rsrc.num_allocated) { - for (i = 0; i < ibuf_rsrc.num_allocated; i++) { - handle = getHandle(i); - if (!handle->active) { - if (handle->size >= aligned_size) { - handle->active = true; - input_buffer_found = true; - ibuf_rsrc.num_active++; - break; - } - } - } - } - - if (!input_buffer_found) { - /* There were no available handles that fulfilled the - * request. Allocate a new handle with the requested size. - */ - if ((ibuf_rsrc.num_allocated < MAX_IBUF_HANDLES) && - (ibuf_rsrc.free_size >= aligned_size)) { - handle = getHandle(ibuf_rsrc.num_allocated); - handle->start_addr = ibuf_rsrc.free_start_addr; - handle->size = aligned_size; - handle->active = true; - - ibuf_rsrc.free_start_addr += aligned_size; - ibuf_rsrc.free_size -= aligned_size; - ibuf_rsrc.num_active++; - ibuf_rsrc.num_allocated++; - - input_buffer_found = true; - } - } - - if (input_buffer_found && handle) { - *start_addr = handle->start_addr; - retval = true; - } - - return retval; -} - -void ia_css_isys_ibuf_rmgr_release( - uint32_t *start_addr) -{ - uint16_t i; - ibuf_handle_t *handle = NULL; - - assert(start_addr != NULL); - - for (i = 0; i < ibuf_rsrc.num_allocated; i++) { - handle = getHandle(i); - if (handle->active && handle->start_addr == *start_addr) { - handle->active = false; - ibuf_rsrc.num_active--; - break; - } - } -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h deleted file mode 100644 index 424cfe9f3b2a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IBUF_CTRL_RMGR_H_INCLUDED__ -#define __IBUF_CTRL_RMGR_H_INCLUDED__ - -#define MAX_IBUF_HANDLES 24 -#define MAX_INPUT_BUFFER_SIZE (64 * 1024) -#define IBUF_ALIGN 8 - -typedef struct ibuf_handle_s ibuf_handle_t; -struct ibuf_handle_s { - uint32_t start_addr; - uint32_t size; - bool active; -}; - -typedef struct ibuf_rsrc_s ibuf_rsrc_t; -struct ibuf_rsrc_s { - uint32_t free_start_addr; - uint32_t free_size; - uint16_t num_active; - uint16_t num_allocated; - ibuf_handle_t handles[MAX_IBUF_HANDLES]; -}; - -#endif /* __IBUF_CTRL_RMGR_H_INCLUDED */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c deleted file mode 100644 index 4def4a542b7d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - -#include "assert_support.h" -#include "platform_support.h" -#include "ia_css_isys.h" -#include "bitop_support.h" -#include "isys_dma_rmgr.h" - -static isys_dma_rsrc_t isys_dma_rsrc[N_ISYS2401_DMA_ID]; - -void ia_css_isys_dma_channel_rmgr_init(void) -{ - memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t)); -} - -void ia_css_isys_dma_channel_rmgr_uninit(void) -{ - memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t)); -} - -bool ia_css_isys_dma_channel_rmgr_acquire( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel) -{ - bool retval = false; - isys2401_dma_channel i; - isys2401_dma_channel max_dma_channel; - isys_dma_rsrc_t *cur_rsrc = NULL; - - assert(dma_id < N_ISYS2401_DMA_ID); - assert(channel != NULL); - - max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id]; - cur_rsrc = &isys_dma_rsrc[dma_id]; - - if (cur_rsrc->num_active < max_dma_channel) { - for (i = ISYS2401_DMA_CHANNEL_0; i < N_ISYS2401_DMA_CHANNEL; i++) { - if (bitop_getbit(cur_rsrc->active_table, i) == 0) { - bitop_setbit(cur_rsrc->active_table, i); - *channel = i; - cur_rsrc->num_active++; - retval = true; - break; - } - } - } - - return retval; -} - -void ia_css_isys_dma_channel_rmgr_release( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel) -{ - isys2401_dma_channel max_dma_channel; - isys_dma_rsrc_t *cur_rsrc = NULL; - - assert(dma_id < N_ISYS2401_DMA_ID); - assert(channel != NULL); - - max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id]; - cur_rsrc = &isys_dma_rsrc[dma_id]; - - if ((*channel < max_dma_channel) && (cur_rsrc->num_active > 0)) { - if (bitop_getbit(cur_rsrc->active_table, *channel) == 1) { - bitop_clearbit(cur_rsrc->active_table, *channel); - cur_rsrc->num_active--; - } - } -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h deleted file mode 100644 index b2c286537774..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __ISYS_DMA_RMGR_H_INCLUDED__ -#define __ISYS_DMA_RMGR_H_INCLUDED__ - -typedef struct isys_dma_rsrc_s isys_dma_rsrc_t; -struct isys_dma_rsrc_s { - uint32_t active_table; - uint16_t num_active; -}; - -#endif /* __ISYS_DMA_RMGR_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c deleted file mode 100644 index 2ae5e59d5e31..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c +++ /dev/null @@ -1,139 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "input_system.h" - -#ifdef HAS_INPUT_SYSTEM_VERSION_2 -#include "ia_css_isys.h" -#include "platform_support.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -#include "isys_dma.h" /* isys2401_dma_set_max_burst_size() */ -#include "isys_irq.h" -#endif - -#if defined(USE_INPUT_SYSTEM_VERSION_2) -input_system_error_t ia_css_isys_init(void) -{ - backend_channel_cfg_t backend_ch0; - backend_channel_cfg_t backend_ch1; - target_cfg2400_t targetB; - target_cfg2400_t targetC; - uint32_t acq_mem_region_size = 24; - uint32_t acq_nof_mem_regions = 2; - input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR; - - memset(&backend_ch0, 0, sizeof(backend_channel_cfg_t)); - memset(&backend_ch1, 0, sizeof(backend_channel_cfg_t)); - memset(&targetB, 0, sizeof(targetB)); - memset(&targetC, 0, sizeof(targetC)); - - error = input_system_configuration_reset(); - if (error != INPUT_SYSTEM_ERR_NO_ERROR) - return error; - - error = input_system_csi_xmem_channel_cfg( - 0, /*ch_id */ - INPUT_SYSTEM_PORT_A, /*port */ - backend_ch0, /*backend_ch */ - 32, /*mem_region_size */ - 6, /*nof_mem_regions */ - acq_mem_region_size, /*acq_mem_region_size */ - acq_nof_mem_regions, /*acq_nof_mem_regions */ - targetB, /*target */ - 3); /*nof_xmem_buffers */ - if (error != INPUT_SYSTEM_ERR_NO_ERROR) - return error; - - error = input_system_csi_xmem_channel_cfg( - 1, /*ch_id */ - INPUT_SYSTEM_PORT_B, /*port */ - backend_ch0, /*backend_ch */ - 16, /*mem_region_size */ - 3, /*nof_mem_regions */ - acq_mem_region_size, /*acq_mem_region_size */ - acq_nof_mem_regions, /*acq_nof_mem_regions */ - targetB, /*target */ - 3); /*nof_xmem_buffers */ - if (error != INPUT_SYSTEM_ERR_NO_ERROR) - return error; - - error = input_system_csi_xmem_channel_cfg( - 2, /*ch_id */ - INPUT_SYSTEM_PORT_C, /*port */ - backend_ch1, /*backend_ch */ - 32, /*mem_region_size */ - 3, /*nof_mem_regions */ - acq_mem_region_size, /*acq_mem_region_size */ - acq_nof_mem_regions, /*acq_nof_mem_regions */ - targetC, /*target */ - 2); /*nof_xmem_buffers */ - if (error != INPUT_SYSTEM_ERR_NO_ERROR) - return error; - - error = input_system_configuration_commit(); - - return error; -} -#elif defined(USE_INPUT_SYSTEM_VERSION_2401) -input_system_error_t ia_css_isys_init(void) -{ - ia_css_isys_csi_rx_lut_rmgr_init(); - ia_css_isys_ibuf_rmgr_init(); - ia_css_isys_dma_channel_rmgr_init(); - ia_css_isys_stream2mmio_sid_rmgr_init(); - - isys2401_dma_set_max_burst_size(ISYS2401_DMA0_ID, - 1 /* Non Burst DMA transactions */); - - /* Enable 2401 input system IRQ status for driver to retrieve */ - isys_irqc_status_enable(ISYS_IRQ0_ID); - isys_irqc_status_enable(ISYS_IRQ1_ID); - isys_irqc_status_enable(ISYS_IRQ2_ID); - - return INPUT_SYSTEM_ERR_NO_ERROR; -} -#endif - -#if defined(USE_INPUT_SYSTEM_VERSION_2) -void ia_css_isys_uninit(void) -{ -} -#elif defined(USE_INPUT_SYSTEM_VERSION_2401) -void ia_css_isys_uninit(void) -{ - ia_css_isys_csi_rx_lut_rmgr_uninit(); - ia_css_isys_ibuf_rmgr_uninit(); - ia_css_isys_dma_channel_rmgr_uninit(); - ia_css_isys_stream2mmio_sid_rmgr_uninit(); -} -#endif - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c deleted file mode 100644 index 222b294c0ab0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - -#include "assert_support.h" -#include "platform_support.h" -#include "ia_css_isys.h" -#include "bitop_support.h" -#include "isys_stream2mmio_rmgr.h" - -static isys_stream2mmio_rsrc_t isys_stream2mmio_rsrc[N_STREAM2MMIO_ID]; - -void ia_css_isys_stream2mmio_sid_rmgr_init(void) -{ - memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc)); -} - -void ia_css_isys_stream2mmio_sid_rmgr_uninit(void) -{ - memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc)); -} - -bool ia_css_isys_stream2mmio_sid_rmgr_acquire( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid) -{ - bool retval = false; - stream2mmio_sid_ID_t max_sid; - isys_stream2mmio_rsrc_t *cur_rsrc = NULL; - stream2mmio_sid_ID_t i; - - assert(stream2mmio < N_STREAM2MMIO_ID); - assert(sid != NULL); - - if ((stream2mmio < N_STREAM2MMIO_ID) && (sid != NULL)) { - max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio]; - cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio]; - - if (cur_rsrc->num_active < max_sid) { - for (i = STREAM2MMIO_SID0_ID; i < max_sid; i++) { - if (bitop_getbit(cur_rsrc->active_table, i) == 0) { - bitop_setbit(cur_rsrc->active_table, i); - *sid = i; - cur_rsrc->num_active++; - retval = true; - break; - } - } - } - } - return retval; -} - -void ia_css_isys_stream2mmio_sid_rmgr_release( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid) -{ - stream2mmio_sid_ID_t max_sid; - isys_stream2mmio_rsrc_t *cur_rsrc = NULL; - - assert(stream2mmio < N_STREAM2MMIO_ID); - assert(sid != NULL); - - if ((stream2mmio < N_STREAM2MMIO_ID) && (sid != NULL)) { - max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio]; - cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio]; - if ((*sid < max_sid) && (cur_rsrc->num_active > 0)) { - if (bitop_getbit(cur_rsrc->active_table, *sid) == 1) { - bitop_clearbit(cur_rsrc->active_table, *sid); - cur_rsrc->num_active--; - } - } - } -} -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h deleted file mode 100644 index 4f63005b1071..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__ -#define __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__ - -typedef struct isys_stream2mmio_rsrc_s isys_stream2mmio_rsrc_t; -struct isys_stream2mmio_rsrc_s { - uint32_t active_table; - uint16_t num_active; -}; - -#endif /* __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c deleted file mode 100644 index 425bd3cc3f34..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c +++ /dev/null @@ -1,607 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#define __INLINE_INPUT_SYSTEM__ -#include "input_system.h" -#include "assert_support.h" -#include "ia_css_isys.h" -#include "ia_css_irq.h" -#include "sh_css_internal.h" - -#if !defined(USE_INPUT_SYSTEM_VERSION_2401) -void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port) -{ - hrt_data bits = receiver_port_reg_load(RX0_ID, - port, - _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX); - - bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) | -#if defined(HAS_RX_VERSION_2) - (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) | -#endif - (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT) | - /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT) | */ - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT) | - (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT); - /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT); */ - - receiver_port_reg_store(RX0_ID, - port, - _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits); - - /* - * The CSI is nested into the Iunit IRQ's - */ - ia_css_irq_enable(IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR, true); - - return; -} - -/* This function converts between the enum used on the CSS API and the - * internal DLI enum type. - * We do not use an array for this since we cannot use named array - * initializers in Windows. Without that there is no easy way to guarantee - * that the array values would be in the correct order. - * */ -enum mipi_port_id ia_css_isys_port_to_mipi_port(enum mipi_port_id api_port) -{ - /* In this module the validity of the inptu variable should - * have been checked already, so we do not check for erroneous - * values. */ - enum mipi_port_id port = MIPI_PORT0_ID; - - if (api_port == MIPI_PORT1_ID) - port = MIPI_PORT1_ID; - else if (api_port == MIPI_PORT2_ID) - port = MIPI_PORT2_ID; - - return port; -} - -unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port) -{ - return receiver_port_reg_load(RX0_ID, - port, - _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); -} - -void ia_css_rx_get_irq_info(unsigned int *irq_infos) -{ - ia_css_rx_port_get_irq_info(MIPI_PORT1_ID, irq_infos); -} - -void ia_css_rx_port_get_irq_info(enum mipi_port_id api_port, - unsigned int *irq_infos) -{ - enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port); - ia_css_isys_rx_get_irq_info(port, irq_infos); -} - -void ia_css_isys_rx_get_irq_info(enum mipi_port_id port, - unsigned int *irq_infos) -{ - unsigned int bits; - - assert(irq_infos != NULL); - bits = ia_css_isys_rx_get_interrupt_reg(port); - *irq_infos = ia_css_isys_rx_translate_irq_infos(bits); -} - -/* Translate register bits to CSS API enum mask */ -unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits) -{ - unsigned int infos = 0; - - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN; -#if defined(HAS_RX_VERSION_2) - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT; -#endif - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ECC_CORRECTED; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_CONTROL; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_CRC; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC; - if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT)) - infos |= IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC; - - return infos; -} - -void ia_css_rx_clear_irq_info(unsigned int irq_infos) -{ - ia_css_rx_port_clear_irq_info(MIPI_PORT1_ID, irq_infos); -} - -void ia_css_rx_port_clear_irq_info(enum mipi_port_id api_port, unsigned int irq_infos) -{ - enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port); - ia_css_isys_rx_clear_irq_info(port, irq_infos); -} - -void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port, unsigned int irq_infos) -{ - hrt_data bits = receiver_port_reg_load(RX0_ID, - port, - _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX); - - /* MW: Why do we remap the receiver bitmap */ - if (irq_infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT; -#if defined(HAS_RX_VERSION_2) - if (irq_infos & IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT; -#endif - if (irq_infos & IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ECC_CORRECTED) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CRC) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT; - if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC) - bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT; - - receiver_port_reg_store(RX0_ID, - port, - _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits); - - return; -} -#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */ - -enum ia_css_err ia_css_isys_convert_stream_format_to_mipi_format( - enum atomisp_input_format input_format, - mipi_predictor_t compression, - unsigned int *fmt_type) -{ - assert(fmt_type != NULL); - /* - * Custom (user defined) modes. Used for compressed - * MIPI transfers - * - * Checkpatch thinks the indent before "if" is suspect - * I think the only suspect part is the missing "else" - * because of the return. - */ - if (compression != MIPI_PREDICTOR_NONE) { - switch (input_format) { - case ATOMISP_INPUT_FORMAT_RAW_6: - *fmt_type = 6; - break; - case ATOMISP_INPUT_FORMAT_RAW_7: - *fmt_type = 7; - break; - case ATOMISP_INPUT_FORMAT_RAW_8: - *fmt_type = 8; - break; - case ATOMISP_INPUT_FORMAT_RAW_10: - *fmt_type = 10; - break; - case ATOMISP_INPUT_FORMAT_RAW_12: - *fmt_type = 12; - break; - case ATOMISP_INPUT_FORMAT_RAW_14: - *fmt_type = 14; - break; - case ATOMISP_INPUT_FORMAT_RAW_16: - *fmt_type = 16; - break; - default: - return IA_CSS_ERR_INTERNAL_ERROR; - } - return IA_CSS_SUCCESS; - } - /* - * This mapping comes from the Arasan CSS function spec - * (CSS_func_spec1.08_ahb_sep29_08.pdf). - * - * MW: For some reason the mapping is not 1-to-1 - */ - switch (input_format) { - case ATOMISP_INPUT_FORMAT_RGB_888: - *fmt_type = MIPI_FORMAT_RGB888; - break; - case ATOMISP_INPUT_FORMAT_RGB_555: - *fmt_type = MIPI_FORMAT_RGB555; - break; - case ATOMISP_INPUT_FORMAT_RGB_444: - *fmt_type = MIPI_FORMAT_RGB444; - break; - case ATOMISP_INPUT_FORMAT_RGB_565: - *fmt_type = MIPI_FORMAT_RGB565; - break; - case ATOMISP_INPUT_FORMAT_RGB_666: - *fmt_type = MIPI_FORMAT_RGB666; - break; - case ATOMISP_INPUT_FORMAT_RAW_8: - *fmt_type = MIPI_FORMAT_RAW8; - break; - case ATOMISP_INPUT_FORMAT_RAW_10: - *fmt_type = MIPI_FORMAT_RAW10; - break; - case ATOMISP_INPUT_FORMAT_RAW_6: - *fmt_type = MIPI_FORMAT_RAW6; - break; - case ATOMISP_INPUT_FORMAT_RAW_7: - *fmt_type = MIPI_FORMAT_RAW7; - break; - case ATOMISP_INPUT_FORMAT_RAW_12: - *fmt_type = MIPI_FORMAT_RAW12; - break; - case ATOMISP_INPUT_FORMAT_RAW_14: - *fmt_type = MIPI_FORMAT_RAW14; - break; - case ATOMISP_INPUT_FORMAT_YUV420_8: - *fmt_type = MIPI_FORMAT_YUV420_8; - break; - case ATOMISP_INPUT_FORMAT_YUV420_10: - *fmt_type = MIPI_FORMAT_YUV420_10; - break; - case ATOMISP_INPUT_FORMAT_YUV422_8: - *fmt_type = MIPI_FORMAT_YUV422_8; - break; - case ATOMISP_INPUT_FORMAT_YUV422_10: - *fmt_type = MIPI_FORMAT_YUV422_10; - break; - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - *fmt_type = MIPI_FORMAT_YUV420_8_LEGACY; - break; - case ATOMISP_INPUT_FORMAT_EMBEDDED: - *fmt_type = MIPI_FORMAT_EMBEDDED; - break; -#ifndef USE_INPUT_SYSTEM_VERSION_2401 - case ATOMISP_INPUT_FORMAT_RAW_16: - /* This is not specified by Arasan, so we use - * 17 for now. - */ - *fmt_type = MIPI_FORMAT_RAW16; - break; - case ATOMISP_INPUT_FORMAT_BINARY_8: - *fmt_type = MIPI_FORMAT_BINARY_8; - break; -#else - case ATOMISP_INPUT_FORMAT_USER_DEF1: - *fmt_type = MIPI_FORMAT_CUSTOM0; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF2: - *fmt_type = MIPI_FORMAT_CUSTOM1; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF3: - *fmt_type = MIPI_FORMAT_CUSTOM2; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF4: - *fmt_type = MIPI_FORMAT_CUSTOM3; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF5: - *fmt_type = MIPI_FORMAT_CUSTOM4; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF6: - *fmt_type = MIPI_FORMAT_CUSTOM5; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF7: - *fmt_type = MIPI_FORMAT_CUSTOM6; - break; - case ATOMISP_INPUT_FORMAT_USER_DEF8: - *fmt_type = MIPI_FORMAT_CUSTOM7; - break; -#endif - - case ATOMISP_INPUT_FORMAT_YUV420_16: - case ATOMISP_INPUT_FORMAT_YUV422_16: - default: - return IA_CSS_ERR_INTERNAL_ERROR; - } - return IA_CSS_SUCCESS; -} -#if defined(USE_INPUT_SYSTEM_VERSION_2401) -static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(enum ia_css_csi2_compression_type type) -{ - mipi_predictor_t predictor = MIPI_PREDICTOR_NONE; - - switch (type) { - case IA_CSS_CSI2_COMPRESSION_TYPE_1: - predictor = MIPI_PREDICTOR_TYPE1-1; - break; - case IA_CSS_CSI2_COMPRESSION_TYPE_2: - predictor = MIPI_PREDICTOR_TYPE2-1; - default: - break; - } - return predictor; -} -enum ia_css_err ia_css_isys_convert_compressed_format( - struct ia_css_csi2_compression *comp, - struct input_system_cfg_s *cfg) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - assert(comp != NULL); - assert(cfg != NULL); - - if (comp->type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) { - /* compression register bit slicing - 4 bit for each user defined data type - 3 bit indicate compression scheme - 000 No compression - 001 10-6-10 - 010 10-7-10 - 011 10-8-10 - 100 12-6-12 - 101 12-6-12 - 100 12-7-12 - 110 12-8-12 - 1 bit indicate predictor - */ - if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_10) { - switch (comp->compressed_bits_per_pixel) { - case COMPRESSED_BITS_PER_PIXEL_6: - cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_6_10; - break; - case COMPRESSED_BITS_PER_PIXEL_7: - cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_7_10; - break; - case COMPRESSED_BITS_PER_PIXEL_8: - cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_8_10; - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - } else if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_12) { - switch (comp->compressed_bits_per_pixel) { - case COMPRESSED_BITS_PER_PIXEL_6: - cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_6_12; - break; - case COMPRESSED_BITS_PER_PIXEL_7: - cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_7_12; - break; - case COMPRESSED_BITS_PER_PIXEL_8: - cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_8_12; - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - } else - err = IA_CSS_ERR_INVALID_ARGUMENTS; - cfg->csi_port_attr.comp_predictor = sh_css_csi2_compression_type_2_mipi_predictor(comp->type); - cfg->csi_port_attr.comp_enable = true; - } else /* No compression */ - cfg->csi_port_attr.comp_enable = false; - return err; -} - -unsigned int ia_css_csi2_calculate_input_system_alignment( - enum atomisp_input_format fmt_type) -{ - unsigned int memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES; - - switch (fmt_type) { - case ATOMISP_INPUT_FORMAT_RAW_6: - case ATOMISP_INPUT_FORMAT_RAW_7: - case ATOMISP_INPUT_FORMAT_RAW_8: - case ATOMISP_INPUT_FORMAT_RAW_10: - case ATOMISP_INPUT_FORMAT_RAW_12: - case ATOMISP_INPUT_FORMAT_RAW_14: - memory_alignment_in_bytes = 2 * ISP_VEC_NELEMS; - break; - case ATOMISP_INPUT_FORMAT_YUV420_8: - case ATOMISP_INPUT_FORMAT_YUV422_8: - case ATOMISP_INPUT_FORMAT_USER_DEF1: - case ATOMISP_INPUT_FORMAT_USER_DEF2: - case ATOMISP_INPUT_FORMAT_USER_DEF3: - case ATOMISP_INPUT_FORMAT_USER_DEF4: - case ATOMISP_INPUT_FORMAT_USER_DEF5: - case ATOMISP_INPUT_FORMAT_USER_DEF6: - case ATOMISP_INPUT_FORMAT_USER_DEF7: - case ATOMISP_INPUT_FORMAT_USER_DEF8: - /* Planar YUV formats need to have all planes aligned, this means - * double the alignment for the Y plane if the horizontal decimation is 2. */ - memory_alignment_in_bytes = 2 * HIVE_ISP_DDR_WORD_BYTES; - break; - case ATOMISP_INPUT_FORMAT_EMBEDDED: - default: - memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES; - break; - } - return memory_alignment_in_bytes; -} - -#endif - -#if !defined(USE_INPUT_SYSTEM_VERSION_2401) -void ia_css_isys_rx_configure(const rx_cfg_t *config, - const enum ia_css_input_mode input_mode) -{ -#if defined(HAS_RX_VERSION_2) - bool port_enabled[N_MIPI_PORT_ID]; - bool any_port_enabled = false; - enum mipi_port_id port; - - if ((config == NULL) - || (config->mode >= N_RX_MODE) - || (config->port >= N_MIPI_PORT_ID)) { - assert(0); - return; - } - for (port = (enum mipi_port_id) 0; port < N_MIPI_PORT_ID; port++) { - if (is_receiver_port_enabled(RX0_ID, port)) - any_port_enabled = true; - } - /* AM: Check whether this is a problem with multiple - * streams. MS: This is the case. */ - - port = config->port; - receiver_port_enable(RX0_ID, port, false); - - port = config->port; - - /* AM: Check whether this is a problem with multiple streams. */ - if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) { - receiver_port_reg_store(RX0_ID, port, - _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX, - config->timeout); - receiver_port_reg_store(RX0_ID, port, - _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX, - config->initcount); - receiver_port_reg_store(RX0_ID, port, - _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX, - config->synccount); - receiver_port_reg_store(RX0_ID, port, - _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX, - config->rxcount); - - port_enabled[port] = true; - - if (input_mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { - - /* MW: A bit of a hack, straight wiring of the capture - * units,assuming they are linearly enumerated. */ - input_system_sub_system_reg_store(INPUT_SYSTEM0_ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_A_IDX - + (unsigned int)port, - INPUT_SYSTEM_CSI_BACKEND); - /* MW: Like the integration test example we overwite, - * the GPREG_MUX register */ - input_system_sub_system_reg_store(INPUT_SYSTEM0_ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MUX_IDX, - (input_system_multiplex_t) port); - } else { - /* - * AM: A bit of a hack, wiring the input system. - */ - input_system_sub_system_reg_store(INPUT_SYSTEM0_ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MULTICAST_A_IDX - + (unsigned int)port, - INPUT_SYSTEM_INPUT_BUFFER); - input_system_sub_system_reg_store(INPUT_SYSTEM0_ID, - GPREGS_UNIT0_ID, - HIVE_ISYS_GPREG_MUX_IDX, - INPUT_SYSTEM_ACQUISITION_UNIT); - } - } - /* - * The 2ppc is shared for all ports, so we cannot - * disable->configure->enable individual ports - */ - /* AM: Check whether this is a problem with multiple streams. */ - /* MS: 2ppc should be a property per binary and should be - * enabled/disabled per binary. - * Currently it is implemented as a system wide setting due - * to effort and risks. */ - if (!any_port_enabled) { - receiver_reg_store(RX0_ID, - _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX, - config->is_two_ppc); - receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX, - config->is_two_ppc); - } - receiver_port_enable(RX0_ID, port, true); - /* TODO: JB: need to add the beneath used define to mizuchi */ - /* sh_css_sw_hive_isp_css_2400_system_20121224_0125\css - * \hrt\input_system_defs.h - * #define INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG 0X207 - */ - /* TODO: need better name for define - * input_system_reg_store(INPUT_SYSTEM0_ID, - * INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG, 1); - */ - input_system_reg_store(INPUT_SYSTEM0_ID, 0x207, 1); -#else -#error "rx.c: RX version must be one of {RX_VERSION_2}" -#endif - - return; -} - -void ia_css_isys_rx_disable(void) -{ - enum mipi_port_id port; - for (port = (enum mipi_port_id) 0; port < N_MIPI_PORT_ID; port++) { - receiver_port_reg_store(RX0_ID, port, - _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, - false); - } - return; -} -#endif /* if !defined(USE_INPUT_SYSTEM_VERSION_2401) */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c deleted file mode 100644 index 2484949453b7..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c +++ /dev/null @@ -1,898 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "system_global.h" - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - -#include "ia_css_isys.h" -#include "ia_css_debug.h" -#include "math_support.h" -#include "string_support.h" -#include "virtual_isys.h" -#include "isp.h" -#include "sh_css_defs.h" - -/************************************************* - * - * Forwarded Declaration - * - *************************************************/ -#ifndef ISP2401 - -#endif -static bool create_input_system_channel( - input_system_cfg_t *cfg, - bool metadata, - input_system_channel_t *channel); - -static void destroy_input_system_channel( - input_system_channel_t *channel); - -static bool create_input_system_input_port( - input_system_cfg_t *cfg, - input_system_input_port_t *input_port); - -static void destroy_input_system_input_port( - input_system_input_port_t *input_port); - -static bool calculate_input_system_channel_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - input_system_channel_cfg_t *channel_cfg, - bool metadata); - -static bool calculate_input_system_input_port_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - input_system_input_port_cfg_t *input_port_cfg); - -static bool acquire_sid( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid); - -static void release_sid( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid); - -static bool acquire_ib_buffer( - int32_t bits_per_pixel, - int32_t pixels_per_line, - int32_t lines_per_frame, - int32_t align_in_bytes, - bool online, - ib_buffer_t *buf); - -static void release_ib_buffer( - ib_buffer_t *buf); - -static bool acquire_dma_channel( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel); - -static void release_dma_channel( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel); - -static bool acquire_be_lut_entry( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry); - -static void release_be_lut_entry( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry); - -static bool calculate_tpg_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - pixelgen_tpg_cfg_t *cfg); - -static bool calculate_prbs_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - pixelgen_prbs_cfg_t *cfg); - -static bool calculate_fe_cfg( - const input_system_cfg_t *isys_cfg, - csi_rx_frontend_cfg_t *cfg); - -static bool calculate_be_cfg( - const input_system_input_port_t *input_port, - const input_system_cfg_t *isys_cfg, - bool metadata, - csi_rx_backend_cfg_t *cfg); - -static bool calculate_stream2mmio_cfg( - const input_system_cfg_t *isys_cfg, - bool metadata, - stream2mmio_cfg_t *cfg); - -static bool calculate_ibuf_ctrl_cfg( - const input_system_channel_t *channel, - const input_system_input_port_t *input_port, - const input_system_cfg_t *isys_cfg, - ibuf_ctrl_cfg_t *cfg); - -static bool calculate_isys2401_dma_cfg( - const input_system_channel_t *channel, - const input_system_cfg_t *isys_cfg, - isys2401_dma_cfg_t *cfg); - -static bool calculate_isys2401_dma_port_cfg( - const input_system_cfg_t *isys_cfg, - bool raw_packed, - bool metadata, - isys2401_dma_port_cfg_t *cfg); - -static csi_mipi_packet_type_t get_csi_mipi_packet_type( - int32_t data_type); - -static int32_t calculate_stride( - int32_t bits_per_pixel, - int32_t pixels_per_line, - bool raw_packed, - int32_t align_in_bytes); - -/* end of Forwarded Declaration */ - -/************************************************** - * - * Public Methods - * - **************************************************/ -ia_css_isys_error_t ia_css_isys_stream_create( - ia_css_isys_descr_t *isys_stream_descr, - ia_css_isys_stream_h isys_stream, - uint32_t isys_stream_id) -{ - ia_css_isys_error_t rc; - - if (isys_stream_descr == NULL || isys_stream == NULL || - isys_stream_id >= SH_CSS_MAX_ISYS_CHANNEL_NODES) - return false; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "ia_css_isys_stream_create() enter:\n"); - - /*Reset isys_stream to 0*/ - memset(isys_stream, 0, sizeof(*isys_stream)); - isys_stream->enable_metadata = isys_stream_descr->metadata.enable; - isys_stream->id = isys_stream_id; - - isys_stream->linked_isys_stream_id = isys_stream_descr->linked_isys_stream_id; - rc = create_input_system_input_port(isys_stream_descr, &(isys_stream->input_port)); - if (rc == false) - return false; - - rc = create_input_system_channel(isys_stream_descr, false, &(isys_stream->channel)); - if (rc == false) { - destroy_input_system_input_port(&isys_stream->input_port); - return false; - } - -#ifdef ISP2401 - /* - * Early polling is required for timestamp accuracy in certain cause. - * The ISYS HW polling is started on - * ia_css_isys_stream_capture_indication() instead of - * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of - * capture takes longer than getting an ISYS frame - */ - isys_stream->polling_mode = isys_stream_descr->polling_mode; - -#endif - /* create metadata channel */ - if (isys_stream_descr->metadata.enable) { - rc = create_input_system_channel(isys_stream_descr, true, &isys_stream->md_channel); - if (rc == false) { - destroy_input_system_input_port(&isys_stream->input_port); - destroy_input_system_channel(&isys_stream->channel); - return false; - } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "ia_css_isys_stream_create() leave:\n"); - - return true; -} - -void ia_css_isys_stream_destroy( - ia_css_isys_stream_h isys_stream) -{ - destroy_input_system_input_port(&isys_stream->input_port); - destroy_input_system_channel(&(isys_stream->channel)); - if (isys_stream->enable_metadata) { - /* Destroy metadata channel only if its allocated*/ - destroy_input_system_channel(&isys_stream->md_channel); - } -} - -ia_css_isys_error_t ia_css_isys_stream_calculate_cfg( - ia_css_isys_stream_h isys_stream, - ia_css_isys_descr_t *isys_stream_descr, - ia_css_isys_stream_cfg_t *isys_stream_cfg) -{ - ia_css_isys_error_t rc; - - if (isys_stream_cfg == NULL || - isys_stream_descr == NULL || - isys_stream == NULL) - return false; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "ia_css_isys_stream_calculate_cfg() enter:\n"); - - rc = calculate_input_system_channel_cfg( - &(isys_stream->channel), - &(isys_stream->input_port), - isys_stream_descr, - &(isys_stream_cfg->channel_cfg), - false); - if (rc == false) - return false; - - /* configure metadata channel */ - if (isys_stream_descr->metadata.enable) { - isys_stream_cfg->enable_metadata = true; - rc = calculate_input_system_channel_cfg( - &isys_stream->md_channel, - &isys_stream->input_port, - isys_stream_descr, - &isys_stream_cfg->md_channel_cfg, - true); - if (rc == false) - return false; - } - - rc = calculate_input_system_input_port_cfg( - &(isys_stream->channel), - &(isys_stream->input_port), - isys_stream_descr, - &(isys_stream_cfg->input_port_cfg)); - if (rc == false) - return false; - - isys_stream->valid = 1; - isys_stream_cfg->valid = 1; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "ia_css_isys_stream_calculate_cfg() leave:\n"); - return rc; -} - -/* end of Public Methods */ - -/************************************************** - * - * Private Methods - * - **************************************************/ -static bool create_input_system_channel( - input_system_cfg_t *cfg, - bool metadata, - input_system_channel_t *me) -{ - bool rc = true; - - me->dma_id = ISYS2401_DMA0_ID; - - switch (cfg->input_port_id) { - case INPUT_SYSTEM_CSI_PORT0_ID: - case INPUT_SYSTEM_PIXELGEN_PORT0_ID: - me->stream2mmio_id = STREAM2MMIO0_ID; - me->ibuf_ctrl_id = IBUF_CTRL0_ID; - break; - - case INPUT_SYSTEM_CSI_PORT1_ID: - case INPUT_SYSTEM_PIXELGEN_PORT1_ID: - me->stream2mmio_id = STREAM2MMIO1_ID; - me->ibuf_ctrl_id = IBUF_CTRL1_ID; - break; - - case INPUT_SYSTEM_CSI_PORT2_ID: - case INPUT_SYSTEM_PIXELGEN_PORT2_ID: - me->stream2mmio_id = STREAM2MMIO2_ID; - me->ibuf_ctrl_id = IBUF_CTRL2_ID; - break; - default: - rc = false; - break; - } - - if (!rc) - return false; - - if (!acquire_sid(me->stream2mmio_id, &(me->stream2mmio_sid_id))) { - return false; - } - - if (!acquire_ib_buffer( - metadata ? cfg->metadata.bits_per_pixel : cfg->input_port_resolution.bits_per_pixel, - metadata ? cfg->metadata.pixels_per_line : cfg->input_port_resolution.pixels_per_line, - metadata ? cfg->metadata.lines_per_frame : cfg->input_port_resolution.lines_per_frame, - metadata ? cfg->metadata.align_req_in_bytes : cfg->input_port_resolution.align_req_in_bytes, - cfg->online, - &(me->ib_buffer))) { - release_sid(me->stream2mmio_id, &(me->stream2mmio_sid_id)); - return false; - } - - if (!acquire_dma_channel(me->dma_id, &(me->dma_channel))) { - release_sid(me->stream2mmio_id, &(me->stream2mmio_sid_id)); - release_ib_buffer(&(me->ib_buffer)); - return false; - } - - return true; -} - -static void destroy_input_system_channel( - input_system_channel_t *me) -{ - release_sid(me->stream2mmio_id, - &(me->stream2mmio_sid_id)); - - release_ib_buffer(&(me->ib_buffer)); - - release_dma_channel(me->dma_id, &(me->dma_channel)); -} - -static bool create_input_system_input_port( - input_system_cfg_t *cfg, - input_system_input_port_t *me) -{ - csi_mipi_packet_type_t packet_type; - bool rc = true; - - switch (cfg->input_port_id) { - case INPUT_SYSTEM_CSI_PORT0_ID: - me->csi_rx.frontend_id = CSI_RX_FRONTEND0_ID; - me->csi_rx.backend_id = CSI_RX_BACKEND0_ID; - - packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type); - me->csi_rx.packet_type = packet_type; - - rc = acquire_be_lut_entry( - me->csi_rx.backend_id, - packet_type, - &(me->csi_rx.backend_lut_entry)); - break; - case INPUT_SYSTEM_PIXELGEN_PORT0_ID: - me->pixelgen.pixelgen_id = PIXELGEN0_ID; - break; - case INPUT_SYSTEM_CSI_PORT1_ID: - me->csi_rx.frontend_id = CSI_RX_FRONTEND1_ID; - me->csi_rx.backend_id = CSI_RX_BACKEND1_ID; - - packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type); - me->csi_rx.packet_type = packet_type; - - rc = acquire_be_lut_entry( - me->csi_rx.backend_id, - packet_type, - &(me->csi_rx.backend_lut_entry)); - break; - case INPUT_SYSTEM_PIXELGEN_PORT1_ID: - me->pixelgen.pixelgen_id = PIXELGEN1_ID; - - break; - case INPUT_SYSTEM_CSI_PORT2_ID: - me->csi_rx.frontend_id = CSI_RX_FRONTEND2_ID; - me->csi_rx.backend_id = CSI_RX_BACKEND2_ID; - - packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type); - me->csi_rx.packet_type = packet_type; - - rc = acquire_be_lut_entry( - me->csi_rx.backend_id, - packet_type, - &(me->csi_rx.backend_lut_entry)); - break; - case INPUT_SYSTEM_PIXELGEN_PORT2_ID: - me->pixelgen.pixelgen_id = PIXELGEN2_ID; - break; - default: - rc = false; - break; - } - - me->source_type = cfg->mode; - - /* for metadata */ - me->metadata.packet_type = CSI_MIPI_PACKET_TYPE_UNDEFINED; - if (rc && cfg->metadata.enable) { - me->metadata.packet_type = get_csi_mipi_packet_type( - cfg->metadata.fmt_type); - rc = acquire_be_lut_entry( - me->csi_rx.backend_id, - me->metadata.packet_type, - &me->metadata.backend_lut_entry); - } - - return rc; -} - -static void destroy_input_system_input_port( - input_system_input_port_t *me) -{ - if (me->source_type == INPUT_SYSTEM_SOURCE_TYPE_SENSOR) { - release_be_lut_entry( - me->csi_rx.backend_id, - me->csi_rx.packet_type, - &me->csi_rx.backend_lut_entry); - } - - if (me->metadata.packet_type != CSI_MIPI_PACKET_TYPE_UNDEFINED) { - /*Free the backend lut allocated for metadata*/ - release_be_lut_entry( - me->csi_rx.backend_id, - me->metadata.packet_type, - &me->metadata.backend_lut_entry); - } -} - -static bool calculate_input_system_channel_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - input_system_channel_cfg_t *channel_cfg, - bool metadata) -{ - bool rc; - - rc = calculate_stream2mmio_cfg(isys_cfg, metadata, - &(channel_cfg->stream2mmio_cfg)); - if (!rc) - return false; - - rc = calculate_ibuf_ctrl_cfg( - channel, - input_port, - isys_cfg, - &(channel_cfg->ibuf_ctrl_cfg)); - if (!rc) - return false; - if (metadata) - channel_cfg->ibuf_ctrl_cfg.stores_per_frame = isys_cfg->metadata.lines_per_frame; - - rc = calculate_isys2401_dma_cfg( - channel, - isys_cfg, - &(channel_cfg->dma_cfg)); - if (!rc) - return false; - - rc = calculate_isys2401_dma_port_cfg( - isys_cfg, - false, - metadata, - &(channel_cfg->dma_src_port_cfg)); - if (!rc) - return false; - - rc = calculate_isys2401_dma_port_cfg( - isys_cfg, - isys_cfg->raw_packed, - metadata, - &(channel_cfg->dma_dest_port_cfg)); - if (!rc) - return false; - - return true; -} - -static bool calculate_input_system_input_port_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - input_system_input_port_cfg_t *input_port_cfg) -{ - bool rc; - - switch (input_port->source_type) { - case INPUT_SYSTEM_SOURCE_TYPE_SENSOR: - rc = calculate_fe_cfg( - isys_cfg, - &(input_port_cfg->csi_rx_cfg.frontend_cfg)); - - rc &= calculate_be_cfg( - input_port, - isys_cfg, - false, - &(input_port_cfg->csi_rx_cfg.backend_cfg)); - - if (rc && isys_cfg->metadata.enable) - rc &= calculate_be_cfg(input_port, isys_cfg, true, - &input_port_cfg->csi_rx_cfg.md_backend_cfg); - break; - case INPUT_SYSTEM_SOURCE_TYPE_TPG: - rc = calculate_tpg_cfg( - channel, - input_port, - isys_cfg, - &(input_port_cfg->pixelgen_cfg.tpg_cfg)); - break; - case INPUT_SYSTEM_SOURCE_TYPE_PRBS: - rc = calculate_prbs_cfg( - channel, - input_port, - isys_cfg, - &(input_port_cfg->pixelgen_cfg.prbs_cfg)); - break; - default: - rc = false; - break; - } - - return rc; -} - -static bool acquire_sid( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid) -{ - return ia_css_isys_stream2mmio_sid_rmgr_acquire(stream2mmio, sid); -} - -static void release_sid( - stream2mmio_ID_t stream2mmio, - stream2mmio_sid_ID_t *sid) -{ - ia_css_isys_stream2mmio_sid_rmgr_release(stream2mmio, sid); -} - -/* See also: ia_css_dma_configure_from_info() */ -static int32_t calculate_stride( - int32_t bits_per_pixel, - int32_t pixels_per_line, - bool raw_packed, - int32_t align_in_bytes) -{ - int32_t bytes_per_line; - int32_t pixels_per_word; - int32_t words_per_line; - int32_t pixels_per_line_padded; - - pixels_per_line_padded = CEIL_MUL(pixels_per_line, align_in_bytes); - - if (!raw_packed) - bits_per_pixel = CEIL_MUL(bits_per_pixel, 8); - - pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel; - words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word); - bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line; - - return bytes_per_line; -} - -static bool acquire_ib_buffer( - int32_t bits_per_pixel, - int32_t pixels_per_line, - int32_t lines_per_frame, - int32_t align_in_bytes, - bool online, - ib_buffer_t *buf) -{ - buf->stride = calculate_stride(bits_per_pixel, pixels_per_line, false, align_in_bytes); - if (online) - buf->lines = 4; /* use double buffering for online usecases */ - else - buf->lines = 2; - - (void)(lines_per_frame); - return ia_css_isys_ibuf_rmgr_acquire(buf->stride * buf->lines, &buf->start_addr); -} - -static void release_ib_buffer( - ib_buffer_t *buf) -{ - ia_css_isys_ibuf_rmgr_release(&buf->start_addr); -} - -static bool acquire_dma_channel( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel) -{ - return ia_css_isys_dma_channel_rmgr_acquire(dma_id, channel); -} - -static void release_dma_channel( - isys2401_dma_ID_t dma_id, - isys2401_dma_channel *channel) -{ - ia_css_isys_dma_channel_rmgr_release(dma_id, channel); -} - -static bool acquire_be_lut_entry( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry) -{ - return ia_css_isys_csi_rx_lut_rmgr_acquire(backend, packet_type, entry); -} - -static void release_be_lut_entry( - csi_rx_backend_ID_t backend, - csi_mipi_packet_type_t packet_type, - csi_rx_backend_lut_entry_t *entry) -{ - ia_css_isys_csi_rx_lut_rmgr_release(backend, packet_type, entry); -} - -static bool calculate_tpg_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - pixelgen_tpg_cfg_t *cfg) -{ - (void)channel; - (void)input_port; - - memcpy_s( - (void *)cfg, - sizeof(pixelgen_tpg_cfg_t), - (void *)(&(isys_cfg->tpg_port_attr)), - sizeof(pixelgen_tpg_cfg_t)); - return true; -} - -static bool calculate_prbs_cfg( - input_system_channel_t *channel, - input_system_input_port_t *input_port, - input_system_cfg_t *isys_cfg, - pixelgen_prbs_cfg_t *cfg) -{ - (void)channel; - (void)input_port; - - memcpy_s( - (void *)cfg, - sizeof(pixelgen_prbs_cfg_t), - (void *)(&(isys_cfg->prbs_port_attr)), - sizeof(pixelgen_prbs_cfg_t)); - return true; -} - -static bool calculate_fe_cfg( - const input_system_cfg_t *isys_cfg, - csi_rx_frontend_cfg_t *cfg) -{ - cfg->active_lanes = isys_cfg->csi_port_attr.active_lanes; - return true; -} - -static bool calculate_be_cfg( - const input_system_input_port_t *input_port, - const input_system_cfg_t *isys_cfg, - bool metadata, - csi_rx_backend_cfg_t *cfg) -{ - - memcpy_s( - (void *)(&cfg->lut_entry), - sizeof(csi_rx_backend_lut_entry_t), - metadata ? (void *)(&input_port->metadata.backend_lut_entry) : - (void *)(&input_port->csi_rx.backend_lut_entry), - sizeof(csi_rx_backend_lut_entry_t)); - - cfg->csi_mipi_cfg.virtual_channel = isys_cfg->csi_port_attr.ch_id; - if (metadata) { - cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(isys_cfg->metadata.fmt_type); - cfg->csi_mipi_cfg.comp_enable = false; - cfg->csi_mipi_cfg.data_type = isys_cfg->metadata.fmt_type; - } - else { - cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(isys_cfg->csi_port_attr.fmt_type); - cfg->csi_mipi_cfg.data_type = isys_cfg->csi_port_attr.fmt_type; - cfg->csi_mipi_cfg.comp_enable = isys_cfg->csi_port_attr.comp_enable; - cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme; - cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor; - cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type - MIPI_FORMAT_CUSTOM0; - } - - return true; -} - -static bool calculate_stream2mmio_cfg( - const input_system_cfg_t *isys_cfg, - bool metadata, - stream2mmio_cfg_t *cfg -) -{ - cfg->bits_per_pixel = metadata ? isys_cfg->metadata.bits_per_pixel : - isys_cfg->input_port_resolution.bits_per_pixel; - - cfg->enable_blocking = - ((isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_TPG) || - (isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_PRBS)); - - return true; -} - -static bool calculate_ibuf_ctrl_cfg( - const input_system_channel_t *channel, - const input_system_input_port_t *input_port, - const input_system_cfg_t *isys_cfg, - ibuf_ctrl_cfg_t *cfg) -{ - const int32_t bits_per_byte = 8; - int32_t bits_per_pixel; - int32_t bytes_per_pixel; - int32_t left_padding; - - (void)input_port; - - bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel; - bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte); - - left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS) - * bytes_per_pixel; - - cfg->online = isys_cfg->online; - - cfg->dma_cfg.channel = channel->dma_channel; - cfg->dma_cfg.cmd = _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND; - - cfg->dma_cfg.shift_returned_items = 0; - cfg->dma_cfg.elems_per_word_in_ibuf = 0; - cfg->dma_cfg.elems_per_word_in_dest = 0; - - cfg->ib_buffer.start_addr = channel->ib_buffer.start_addr; - cfg->ib_buffer.stride = channel->ib_buffer.stride; - cfg->ib_buffer.lines = channel->ib_buffer.lines; - - /* -#ifndef ISP2401 - * zhengjie.lu@intel.com: -#endif - * "dest_buf_cfg" should be part of the input system output - * port configuration. - * - * TODO: move "dest_buf_cfg" to the input system output - * port configuration. - */ - - /* input_buf addr only available in sched mode; - this buffer is allocated in isp, crun mode addr - can be passed by after ISP allocation */ - if (cfg->online) { - cfg->dest_buf_cfg.start_addr = ISP_INPUT_BUF_START_ADDR + left_padding; - cfg->dest_buf_cfg.stride = bytes_per_pixel - * isys_cfg->output_port_attr.max_isp_input_width; - cfg->dest_buf_cfg.lines = LINES_OF_ISP_INPUT_BUF; - } else if (isys_cfg->raw_packed) { - cfg->dest_buf_cfg.stride = calculate_stride(bits_per_pixel, - isys_cfg->input_port_resolution.pixels_per_line, - isys_cfg->raw_packed, - isys_cfg->input_port_resolution.align_req_in_bytes); - } else { - cfg->dest_buf_cfg.stride = channel->ib_buffer.stride; - } - - /* -#ifndef ISP2401 - * zhengjie.lu@intel.com: -#endif - * "items_per_store" is hard coded as "1", which is ONLY valid - * when the CSI-MIPI long packet is transferred. - * - * TODO: After the 1st stage of MERR+, make the proper solution to - * configure "items_per_store" so that it can also handle the CSI-MIPI - * short packet. - */ - cfg->items_per_store = 1; - - cfg->stores_per_frame = isys_cfg->input_port_resolution.lines_per_frame; - - - cfg->stream2mmio_cfg.sync_cmd = _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME; - - /* TODO: Define conditions as when to use store words vs store packets */ - cfg->stream2mmio_cfg.store_cmd = _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS; - - return true; -} - -static bool calculate_isys2401_dma_cfg( - const input_system_channel_t *channel, - const input_system_cfg_t *isys_cfg, - isys2401_dma_cfg_t *cfg) -{ - cfg->channel = channel->dma_channel; - - /* only online/sensor mode goto vmem - offline/buffered_sensor, tpg and prbs will go to ddr */ - if (isys_cfg->online) - cfg->connection = isys2401_dma_ibuf_to_vmem_connection; - else - cfg->connection = isys2401_dma_ibuf_to_ddr_connection; - - cfg->extension = isys2401_dma_zero_extension; - cfg->height = 1; - - return true; -} - -/* See also: ia_css_dma_configure_from_info() */ -static bool calculate_isys2401_dma_port_cfg( - const input_system_cfg_t *isys_cfg, - bool raw_packed, - bool metadata, - isys2401_dma_port_cfg_t *cfg) -{ - int32_t bits_per_pixel; - int32_t pixels_per_line; - int32_t align_req_in_bytes; - - /* TODO: Move metadata away from isys_cfg to application layer */ - if (metadata) { - bits_per_pixel = isys_cfg->metadata.bits_per_pixel; - pixels_per_line = isys_cfg->metadata.pixels_per_line; - align_req_in_bytes = isys_cfg->metadata.align_req_in_bytes; - } else { - bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel; - pixels_per_line = isys_cfg->input_port_resolution.pixels_per_line; - align_req_in_bytes = isys_cfg->input_port_resolution.align_req_in_bytes; - } - - cfg->stride = calculate_stride(bits_per_pixel, pixels_per_line, raw_packed, align_req_in_bytes); - - if (!raw_packed) - bits_per_pixel = CEIL_MUL(bits_per_pixel, 8); - - cfg->elements = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel; - cfg->cropping = 0; - cfg->width = CEIL_DIV(cfg->stride, HIVE_ISP_DDR_WORD_BYTES); - - return true; -} - -static csi_mipi_packet_type_t get_csi_mipi_packet_type( - int32_t data_type) -{ - csi_mipi_packet_type_t packet_type; - - packet_type = CSI_MIPI_PACKET_TYPE_RESERVED; - - if (data_type >= 0 && data_type <= MIPI_FORMAT_SHORT8) - packet_type = CSI_MIPI_PACKET_TYPE_SHORT; - - if (data_type > MIPI_FORMAT_SHORT8 && data_type <= N_MIPI_FORMAT) - packet_type = CSI_MIPI_PACKET_TYPE_LONG; - - return packet_type; -} -/* end of Private Methods */ -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h deleted file mode 100644 index 66c7293c0a93..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __VIRTUAL_ISYS_H_INCLUDED__ -#define __VIRTUAL_ISYS_H_INCLUDED__ - -/* cmd for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS*/ -#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1 - -/* command for waiting for a frame start */ -#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2 - -#endif /* __VIRTUAL_ISYS_H_INCLUDED__ */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h deleted file mode 100644 index 85ed7db0af55..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h +++ /dev/null @@ -1,302 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_PIPELINE_H__ -#define __IA_CSS_PIPELINE_H__ - -#include "sh_css_internal.h" -#include "ia_css_pipe_public.h" -#include "ia_css_pipeline_common.h" - -#define IA_CSS_PIPELINE_NUM_MAX (20) - - -/* Pipeline stage to be executed on SP/ISP */ -struct ia_css_pipeline_stage { - unsigned int stage_num; - struct ia_css_binary *binary; /* built-in binary */ - struct ia_css_binary_info *binary_info; - const struct ia_css_fw_info *firmware; /* acceleration binary */ - /* SP function for SP stage */ - enum ia_css_pipeline_stage_sp_func sp_func; - unsigned max_input_width; /* For SP raw copy */ - struct sh_css_binary_args args; - int mode; - bool out_frame_allocated[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - bool vf_frame_allocated; - struct ia_css_pipeline_stage *next; - bool enable_zoom; -}; - -/* Pipeline of n stages to be executed on SP/ISP per stage */ -struct ia_css_pipeline { - enum ia_css_pipe_id pipe_id; - uint8_t pipe_num; - bool stop_requested; - struct ia_css_pipeline_stage *stages; - struct ia_css_pipeline_stage *current_stage; - unsigned num_stages; - struct ia_css_frame in_frame; - struct ia_css_frame out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_frame vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - unsigned int dvs_frame_delay; - unsigned inout_port_config; - int num_execs; - bool acquire_isp_each_stage; - uint32_t pipe_qos_config; -}; - -#define DEFAULT_PIPELINE \ -(struct ia_css_pipeline) { \ - .pipe_id = IA_CSS_PIPE_ID_PREVIEW, \ - .in_frame = DEFAULT_FRAME, \ - .out_frame = {DEFAULT_FRAME}, \ - .vf_frame = {DEFAULT_FRAME}, \ - .dvs_frame_delay = IA_CSS_FRAME_DELAY_1, \ - .num_execs = -1, \ - .acquire_isp_each_stage = true, \ - .pipe_qos_config = QOS_INVALID \ -} - -/* Stage descriptor used to create a new stage in the pipeline */ -struct ia_css_pipeline_stage_desc { - struct ia_css_binary *binary; - const struct ia_css_fw_info *firmware; - enum ia_css_pipeline_stage_sp_func sp_func; - unsigned max_input_width; - unsigned int mode; - struct ia_css_frame *in_frame; - struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_frame *vf_frame; -}; - -/* @brief initialize the pipeline module - * - * @return None - * - * Initializes the pipeline module. This API has to be called - * before any operation on the pipeline module is done - */ -void ia_css_pipeline_init(void); - -/* @brief initialize the pipeline structure with default values - * - * @param[out] pipeline structure to be initialized with defaults - * @param[in] pipe_id - * @param[in] pipe_num Number that uniquely identifies a pipeline. - * @return IA_CSS_SUCCESS or error code upon error. - * - * Initializes the pipeline structure with a set of default values. - * This API is expected to be used when a pipeline structure is allocated - * externally and needs sane defaults - */ -enum ia_css_err ia_css_pipeline_create( - struct ia_css_pipeline *pipeline, - enum ia_css_pipe_id pipe_id, - unsigned int pipe_num, - unsigned int dvs_frame_delay); - -/* @brief destroy a pipeline - * - * @param[in] pipeline - * @return None - * - */ -void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline); - - -/* @brief Starts a pipeline - * - * @param[in] pipe_id - * @param[in] pipeline - * @return None - * - */ -void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id, - struct ia_css_pipeline *pipeline); - -/* @brief Request to stop a pipeline - * - * @param[in] pipeline - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline); - -/* @brief Check whether pipeline has stopped - * - * @param[in] pipeline - * @return true if the pipeline has stopped - * - */ -bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe); - -/* @brief clean all the stages pipeline and make it as new - * - * @param[in] pipeline - * @return None - * - */ -void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline); - -/* @brief Add a stage to pipeline. - * - * @param pipeline Pointer to the pipeline to be added to. - * @param[in] stage_desc The description of the stage - * @param[out] stage The successor of the stage. - * @return IA_CSS_SUCCESS or error code upon error. - * - * Add a new stage to a non-NULL pipeline. - * The stage consists of an ISP binary or firmware and input and output - * arguments. -*/ -enum ia_css_err ia_css_pipeline_create_and_add_stage( - struct ia_css_pipeline *pipeline, - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_pipeline_stage **stage); - -/* @brief Finalize the stages in a pipeline - * - * @param pipeline Pointer to the pipeline to be added to. - * @return None - * - * This API is expected to be called after adding all stages -*/ -void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline, - bool continuous); - -/* @brief gets a stage from the pipeline - * - * @param[in] pipeline - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline, - int mode, - struct ia_css_pipeline_stage **stage); - -/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline - * - * @param[in] pipeline - * @param[in] fw_handle - * @param[out] stage Pointer to Stage - * - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeline, - uint32_t fw_handle, - struct ia_css_pipeline_stage **stage); - -/* @brief Gets the Firmware handle correponding the stage num from the pipeline - * - * @param[in] pipeline - * @param[in] stage_num - * @param[out] fw_handle - * - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeline, - uint32_t stage_num, - uint32_t *fw_handle); - -/* @brief gets the output stage from the pipeline - * - * @param[in] pipeline - * @return IA_CSS_SUCCESS or error code upon error. - * - */ -enum ia_css_err ia_css_pipeline_get_output_stage( - struct ia_css_pipeline *pipeline, - int mode, - struct ia_css_pipeline_stage **stage); - -/* @brief Checks whether the pipeline uses params - * - * @param[in] pipeline - * @return true if the pipeline uses params - * - */ -bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline); - -/** - * @brief get the SP thread ID. - * - * @param[in] key The query key, typical use is pipe_num. - * @param[out] val The query value. - * - * @return - * true, if the query succeeds; - * false, if the query fails. - */ -bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val); - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) -/** - * @brief Get the pipeline io status - * - * @param[in] None - * @return - * Pointer to pipe_io_status - */ -struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void); -#endif - -/** - * @brief Map an SP thread to this pipeline - * - * @param[in] pipe_num - * @param[in] map true for mapping and false for unmapping sp threads. - * - */ -void ia_css_pipeline_map(unsigned int pipe_num, bool map); - -/** - * @brief Checks whether the pipeline is mapped to SP threads - * - * @param[in] Query key, typical use is pipe_num - * - * return - * true, pipeline is mapped to SP threads - * false, pipeline is not mapped to SP threads - */ -bool ia_css_pipeline_is_mapped(unsigned int key); - -/** - * @brief Print pipeline thread mapping - * - * @param[in] none - * - * return none - */ -void ia_css_pipeline_dump_thread_map_info(void); - -#endif /*__IA_CSS_PIPELINE_H__*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h deleted file mode 100644 index a7e6edf41cdb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_PIPELINE_COMMON_H__ -#define __IA_CSS_PIPELINE_COMMON_H__ - -enum ia_css_pipeline_stage_sp_func { - IA_CSS_PIPELINE_RAW_COPY = 0, - IA_CSS_PIPELINE_BIN_COPY = 1, - IA_CSS_PIPELINE_ISYS_COPY = 2, - IA_CSS_PIPELINE_NO_FUNC = 3, -}; -#define IA_CSS_PIPELINE_NUM_STAGE_FUNCS 3 - -#endif /*__IA_CSS_PIPELINE_COMMON_H__*/ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c deleted file mode 100644 index 4746620ca212..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c +++ /dev/null @@ -1,805 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "ia_css_debug.h" -#include "sw_event_global.h" /* encode_sw_event */ -#include "sp.h" /* cnd_sp_irq_enable() */ -#include "assert_support.h" -#include "memory_access.h" -#include "sh_css_sp.h" -#include "ia_css_pipeline.h" -#include "ia_css_isp_param.h" -#include "ia_css_bufq.h" - -#define PIPELINE_NUM_UNMAPPED (~0U) -#define PIPELINE_SP_THREAD_EMPTY_TOKEN (0x0) -#define PIPELINE_SP_THREAD_RESERVED_TOKEN (0x1) - - -/******************************************************* -*** Static variables -********************************************************/ -static unsigned int pipeline_num_to_sp_thread_map[IA_CSS_PIPELINE_NUM_MAX]; -static unsigned int pipeline_sp_thread_list[SH_CSS_MAX_SP_THREADS]; - -/******************************************************* -*** Static functions -********************************************************/ -static void pipeline_init_sp_thread_map(void); -static void pipeline_map_num_to_sp_thread(unsigned int pipe_num); -static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num); -static void pipeline_init_defaults( - struct ia_css_pipeline *pipeline, - enum ia_css_pipe_id pipe_id, - unsigned int pipe_num, - unsigned int dvs_frame_delay); - -static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage); -static enum ia_css_err pipeline_stage_create( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_pipeline_stage **new_stage); -static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline); -static void ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me, - bool continuous); - -/******************************************************* -*** Public functions -********************************************************/ -void ia_css_pipeline_init(void) -{ - pipeline_init_sp_thread_map(); -} - -enum ia_css_err ia_css_pipeline_create( - struct ia_css_pipeline *pipeline, - enum ia_css_pipe_id pipe_id, - unsigned int pipe_num, - unsigned int dvs_frame_delay) -{ - assert(pipeline != NULL); - IA_CSS_ENTER_PRIVATE("pipeline = %p, pipe_id = %d, pipe_num = %d, dvs_frame_delay = %d", - pipeline, pipe_id, pipe_num, dvs_frame_delay); - if (pipeline == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipeline_init_defaults(pipeline, pipe_id, pipe_num, dvs_frame_delay); - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -void ia_css_pipeline_map(unsigned int pipe_num, bool map) -{ - assert(pipe_num < IA_CSS_PIPELINE_NUM_MAX); - IA_CSS_ENTER_PRIVATE("pipe_num = %d, map = %d", pipe_num, map); - - if (pipe_num >= IA_CSS_PIPELINE_NUM_MAX) { - IA_CSS_ERROR("Invalid pipe number"); - IA_CSS_LEAVE_PRIVATE("void"); - return; - } - if (map) - pipeline_map_num_to_sp_thread(pipe_num); - else - pipeline_unmap_num_to_sp_thread(pipe_num); - IA_CSS_LEAVE_PRIVATE("void"); -} - -/* @brief destroy a pipeline - * - * @param[in] pipeline - * @return None - * - */ -void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline) -{ - assert(pipeline != NULL); - IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline); - - if (pipeline == NULL) { - IA_CSS_ERROR("NULL input parameter"); - IA_CSS_LEAVE_PRIVATE("void"); - return; - } - - IA_CSS_LOG("pipe_num = %d", pipeline->pipe_num); - - /* Free the pipeline number */ - ia_css_pipeline_clean(pipeline); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -/* Run a pipeline and wait till it completes. */ -void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id, - struct ia_css_pipeline *pipeline) -{ - uint8_t pipe_num = 0; - unsigned int thread_id; - - assert(pipeline != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_start() enter: pipe_id=%d, pipeline=%p\n", - pipe_id, pipeline); - pipeline->pipe_id = pipe_id; - sh_css_sp_init_pipeline(pipeline, pipe_id, pipe_num, - false, false, false, true, SH_CSS_BDS_FACTOR_1_00, - SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD, -#ifndef ISP2401 - IA_CSS_INPUT_MODE_MEMORY, NULL, NULL -#else - IA_CSS_INPUT_MODE_MEMORY, NULL, NULL, -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#ifndef ISP2401 - , (enum mipi_port_id) 0 -#else - (enum mipi_port_id) 0, -#endif -#endif -#ifndef ISP2401 - ); -#else - NULL, NULL); -#endif - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - if (!sh_css_sp_is_running()) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_start() error,leaving\n"); - /* queues are invalid*/ - return; - } - ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM, - (uint8_t)thread_id, - 0, - 0); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_start() leave: return_void\n"); -} - -/* - * @brief Query the SP thread ID. - * Refer to "sh_css_internal.h" for details. - */ -bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val) -{ - - IA_CSS_ENTER("key=%d, val=%p", key, val); - - if ((val == NULL) || (key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) { - IA_CSS_LEAVE("return value = false"); - return false; - } - - *val = pipeline_num_to_sp_thread_map[key]; - - if (*val == (unsigned)PIPELINE_NUM_UNMAPPED) { - IA_CSS_LOG("unmapped pipeline number"); - IA_CSS_LEAVE("return value = false"); - return false; - } - IA_CSS_LEAVE("return value = true"); - return true; -} - -void ia_css_pipeline_dump_thread_map_info(void) -{ - unsigned int i; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "pipeline_num_to_sp_thread_map:\n"); - for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "pipe_num: %u, tid: 0x%x\n", i, pipeline_num_to_sp_thread_map[i]); - } -} - -enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned int thread_id; - - assert(pipeline != NULL); - - if (pipeline == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_request_stop() enter: pipeline=%p\n", - pipeline); - pipeline->stop_requested = true; - - /* Send stop event to the sp*/ - /* This needs improvement, stop on all the pipes available - * in the stream*/ - ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id); - if (!sh_css_sp_is_running()) - { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_request_stop() leaving\n"); - /* queues are invalid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_STOP_STREAM, - (uint8_t)thread_id, - 0, - 0); - sh_css_sp_uninit_pipeline(pipeline->pipe_num); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_request_stop() leave: return_err=%d\n", - err); - return err; -} - -void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline) -{ - struct ia_css_pipeline_stage *s; - - assert(pipeline != NULL); - IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline); - - if (pipeline == NULL) { - IA_CSS_ERROR("NULL input parameter"); - IA_CSS_LEAVE_PRIVATE("void"); - return; - } - s = pipeline->stages; - - while (s) { - struct ia_css_pipeline_stage *next = s->next; - pipeline_stage_destroy(s); - s = next; - } - pipeline_init_defaults(pipeline, pipeline->pipe_id, pipeline->pipe_num, pipeline->dvs_frame_delay); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -/* @brief Add a stage to pipeline. - * - * @param pipeline Pointer to the pipeline to be added to. - * @param[in] stage_desc The description of the stage - * @param[out] stage The successor of the stage. - * @return IA_CSS_SUCCESS or error code upon error. - * - * Add a new stage to a non-NULL pipeline. - * The stage consists of an ISP binary or firmware and input and - * output arguments. -*/ -enum ia_css_err ia_css_pipeline_create_and_add_stage( - struct ia_css_pipeline *pipeline, - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_pipeline_stage **stage) -{ - struct ia_css_pipeline_stage *last, *new_stage = NULL; - enum ia_css_err err; - - /* other arguments can be NULL */ - assert(pipeline != NULL); - assert(stage_desc != NULL); - last = pipeline->stages; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_create_and_add_stage() enter:\n"); - if (!stage_desc->binary && !stage_desc->firmware - && (stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_create_and_add_stage() done:" - " Invalid args\n"); - - return IA_CSS_ERR_INTERNAL_ERROR; - } - - /* Find the last stage */ - while (last && last->next) - last = last->next; - - /* if in_frame is not set, we use the out_frame from the previous - * stage, if no previous stage, it's an error. - */ - if ((stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC) - && (!stage_desc->in_frame) - && (!stage_desc->firmware) - && (!stage_desc->binary->online)) { - - /* Do this only for ISP stages*/ - if (last && last->args.out_frame[0]) - stage_desc->in_frame = last->args.out_frame[0]; - - if (!stage_desc->in_frame) - return IA_CSS_ERR_INTERNAL_ERROR; - } - - /* Create the new stage */ - err = pipeline_stage_create(stage_desc, &new_stage); - if (err != IA_CSS_SUCCESS) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_create_and_add_stage() done:" - " stage_create_failed\n"); - return err; - } - - if (last) - last->next = new_stage; - else - pipeline->stages = new_stage; - - /* Output the new stage */ - if (stage) - *stage = new_stage; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_create_and_add_stage() done:\n"); - return IA_CSS_SUCCESS; -} - -void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline, - bool continuous) -{ - unsigned i = 0; - struct ia_css_pipeline_stage *stage; - - assert(pipeline != NULL); - for (stage = pipeline->stages; stage; stage = stage->next) { - stage->stage_num = i; - i++; - } - pipeline->num_stages = i; - - ia_css_pipeline_set_zoom_stage(pipeline); - ia_css_pipeline_configure_inout_port(pipeline, continuous); -} - -enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline, - int mode, - struct ia_css_pipeline_stage **stage) -{ - struct ia_css_pipeline_stage *s; - assert(pipeline != NULL); - assert(stage != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_get_stage() enter:\n"); - for (s = pipeline->stages; s; s = s->next) { - if (s->mode == mode) { - *stage = s; - return IA_CSS_SUCCESS; - } - } - return IA_CSS_ERR_INTERNAL_ERROR; -} - -enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeline, - uint32_t fw_handle, - struct ia_css_pipeline_stage **stage) -{ - struct ia_css_pipeline_stage *s; - assert(pipeline != NULL); - assert(stage != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,"%s() \n",__func__); - for (s = pipeline->stages; s; s = s->next) { - if ((s->firmware) && (s->firmware->handle == fw_handle)) { - *stage = s; - return IA_CSS_SUCCESS; - } - } - return IA_CSS_ERR_INTERNAL_ERROR; -} - -enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeline, - uint32_t stage_num, - uint32_t *fw_handle) -{ - struct ia_css_pipeline_stage *s; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,"%s() \n",__func__); - if ((pipeline == NULL) || (fw_handle == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - for (s = pipeline->stages; s; s = s->next) { - if((s->stage_num == stage_num) && (s->firmware)) { - *fw_handle = s->firmware->handle; - return IA_CSS_SUCCESS; - } - } - return IA_CSS_ERR_INTERNAL_ERROR; -} - -enum ia_css_err ia_css_pipeline_get_output_stage( - struct ia_css_pipeline *pipeline, - int mode, - struct ia_css_pipeline_stage **stage) -{ - struct ia_css_pipeline_stage *s; - assert(pipeline != NULL); - assert(stage != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_get_output_stage() enter:\n"); - - *stage = NULL; - /* First find acceleration firmware at end of pipe */ - for (s = pipeline->stages; s; s = s->next) { - if (s->firmware && s->mode == mode && - s->firmware->info.isp.sp.enable.output) - *stage = s; - } - if (*stage) - return IA_CSS_SUCCESS; - /* If no firmware, find binary in pipe */ - return ia_css_pipeline_get_stage(pipeline, mode, stage); -} - -bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline) -{ - /* Android compilation files if made an local variable - stack size on android is limited to 2k and this structure - is around 2.5K, in place of static malloc can be done but - if this call is made too often it will lead to fragment memory - versus a fixed allocation */ - static struct sh_css_sp_group sp_group; - unsigned int thread_id; - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_sp_group; - - fw = &sh_css_sp_fw; - HIVE_ADDR_sp_group = fw->info.sp.group; - - ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id); - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(sp_group), - &sp_group, sizeof(struct sh_css_sp_group)); - return sp_group.pipe[thread_id].num_stages == 0; -} - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) -struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void) -{ - return(&sh_css_sp_group.pipe_io_status); -} -#endif - -bool ia_css_pipeline_is_mapped(unsigned int key) -{ - bool ret = false; - - IA_CSS_ENTER_PRIVATE("key = %d", key); - - if ((key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) { - IA_CSS_ERROR("Invalid key!!"); - IA_CSS_LEAVE_PRIVATE("return = %d", false); - return false; - } - - ret = (bool)(pipeline_num_to_sp_thread_map[key] != (unsigned)PIPELINE_NUM_UNMAPPED); - - IA_CSS_LEAVE_PRIVATE("return = %d", ret); - return ret; -} - -/******************************************************* -*** Static functions -********************************************************/ - -/* Pipeline: - * To organize the several different binaries for each type of mode, - * we use a pipeline. A pipeline contains a number of stages, each with - * their own binary and frame pointers. - * When stages are added to a pipeline, output frames that are not passed - * from outside are automatically allocated. - * When input frames are not passed from outside, each stage will use the - * output frame of the previous stage as input (the full resolution output, - * not the viewfinder output). - * Pipelines must be cleaned and re-created when settings of the binaries - * change. - */ -static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage) -{ - unsigned int i; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (stage->out_frame_allocated[i]) { - ia_css_frame_free(stage->args.out_frame[i]); - stage->args.out_frame[i] = NULL; - } - } - if (stage->vf_frame_allocated) { - ia_css_frame_free(stage->args.out_vf_frame); - stage->args.out_vf_frame = NULL; - } - sh_css_free(stage); -} - -static void pipeline_init_sp_thread_map(void) -{ - unsigned int i; - - for (i = 1; i < SH_CSS_MAX_SP_THREADS; i++) - pipeline_sp_thread_list[i] = PIPELINE_SP_THREAD_EMPTY_TOKEN; - - for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) - pipeline_num_to_sp_thread_map[i] = PIPELINE_NUM_UNMAPPED; -} - -static void pipeline_map_num_to_sp_thread(unsigned int pipe_num) -{ - unsigned int i; - bool found_sp_thread = false; - - /* pipe is not mapped to any thread */ - assert(pipeline_num_to_sp_thread_map[pipe_num] - == (unsigned)PIPELINE_NUM_UNMAPPED); - - for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) { - if (pipeline_sp_thread_list[i] == - PIPELINE_SP_THREAD_EMPTY_TOKEN) { - pipeline_sp_thread_list[i] = - PIPELINE_SP_THREAD_RESERVED_TOKEN; - pipeline_num_to_sp_thread_map[pipe_num] = i; - found_sp_thread = true; - break; - } - } - - /* Make sure a mapping is found */ - /* I could do: - assert(i < SH_CSS_MAX_SP_THREADS); - - But the below is more descriptive. - */ - assert(found_sp_thread); -} - -static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num) -{ - unsigned int thread_id; - assert(pipeline_num_to_sp_thread_map[pipe_num] - != (unsigned)PIPELINE_NUM_UNMAPPED); - - thread_id = pipeline_num_to_sp_thread_map[pipe_num]; - pipeline_num_to_sp_thread_map[pipe_num] = PIPELINE_NUM_UNMAPPED; - pipeline_sp_thread_list[thread_id] = PIPELINE_SP_THREAD_EMPTY_TOKEN; -} - -static enum ia_css_err pipeline_stage_create( - struct ia_css_pipeline_stage_desc *stage_desc, - struct ia_css_pipeline_stage **new_stage) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipeline_stage *stage = NULL; - struct ia_css_binary *binary; - struct ia_css_frame *vf_frame; - struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - const struct ia_css_fw_info *firmware; - unsigned int i; - - /* Verify input parameters*/ - if (!(stage_desc->in_frame) && !(stage_desc->firmware) - && (stage_desc->binary) && !(stage_desc->binary->online)) { - err = IA_CSS_ERR_INTERNAL_ERROR; - goto ERR; - } - - binary = stage_desc->binary; - firmware = stage_desc->firmware; - vf_frame = stage_desc->vf_frame; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - out_frame[i] = stage_desc->out_frame[i]; - } - - stage = sh_css_malloc(sizeof(*stage)); - if (stage == NULL) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - memset(stage, 0, sizeof(*stage)); - - if (firmware) { - stage->binary = NULL; - stage->binary_info = - (struct ia_css_binary_info *)&firmware->info.isp; - } else { - stage->binary = binary; - if (binary) - stage->binary_info = - (struct ia_css_binary_info *)binary->info; - else - stage->binary_info = NULL; - } - - stage->firmware = firmware; - stage->sp_func = stage_desc->sp_func; - stage->max_input_width = stage_desc->max_input_width; - stage->mode = stage_desc->mode; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - stage->out_frame_allocated[i] = false; - stage->vf_frame_allocated = false; - stage->next = NULL; - sh_css_binary_args_reset(&stage->args); - - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (!(out_frame[i]) && (binary) - && (binary->out_frame_info[i].res.width)) { - err = ia_css_frame_allocate_from_info(&out_frame[i], - &binary->out_frame_info[i]); - if (err != IA_CSS_SUCCESS) - goto ERR; - stage->out_frame_allocated[i] = true; - } - } - /* VF frame is not needed in case of need_pp - However, the capture binary needs a vf frame to write to. - */ - if (!vf_frame) { - if ((binary && binary->vf_frame_info.res.width) || - (firmware && firmware->info.isp.sp.enable.vf_veceven) - ) { - err = ia_css_frame_allocate_from_info(&vf_frame, - &binary->vf_frame_info); - if (err != IA_CSS_SUCCESS) - goto ERR; - stage->vf_frame_allocated = true; - } - } else if (vf_frame && binary && binary->vf_frame_info.res.width - && !firmware) { - /* only mark as allocated if buffer pointer available */ - if (vf_frame->data != mmgr_NULL) - stage->vf_frame_allocated = true; - } - - stage->args.in_frame = stage_desc->in_frame; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - stage->args.out_frame[i] = out_frame[i]; - stage->args.out_vf_frame = vf_frame; - *new_stage = stage; - return err; -ERR: - if (stage != NULL) - pipeline_stage_destroy(stage); - return err; -} - -static void pipeline_init_defaults( - struct ia_css_pipeline *pipeline, - enum ia_css_pipe_id pipe_id, - unsigned int pipe_num, - unsigned int dvs_frame_delay) -{ - unsigned int i; - - pipeline->pipe_id = pipe_id; - pipeline->stages = NULL; - pipeline->stop_requested = false; - pipeline->current_stage = NULL; - pipeline->in_frame = DEFAULT_FRAME; - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - pipeline->out_frame[i] = DEFAULT_FRAME; - pipeline->vf_frame[i] = DEFAULT_FRAME; - } - pipeline->num_execs = -1; - pipeline->acquire_isp_each_stage = true; - pipeline->pipe_num = (uint8_t)pipe_num; - pipeline->dvs_frame_delay = dvs_frame_delay; -} - -static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline) -{ - struct ia_css_pipeline_stage *stage = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - - assert(pipeline != NULL); - if (pipeline->pipe_id == IA_CSS_PIPE_ID_PREVIEW) { - /* in preview pipeline, vf_pp stage should do zoom */ - err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VF_PP, &stage); - if (err == IA_CSS_SUCCESS) - stage->enable_zoom = true; - } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_CAPTURE) { - /* in capture pipeline, capture_pp stage should do zoom */ - err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP, &stage); - if (err == IA_CSS_SUCCESS) - stage->enable_zoom = true; - } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_VIDEO) { - /* in video pipeline, video stage should do zoom */ - err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VIDEO, &stage); - if (err == IA_CSS_SUCCESS) - stage->enable_zoom = true; - } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_YUVPP) { - /* in yuvpp pipeline, first yuv_scaler stage should do zoom */ - err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP, &stage); - if (err == IA_CSS_SUCCESS) - stage->enable_zoom = true; - } -} - -static void -ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me, bool continuous) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "ia_css_pipeline_configure_inout_port() enter: pipe_id(%d) continuous(%d)\n", - me->pipe_id, continuous); - switch (me->pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - case IA_CSS_PIPE_ID_VIDEO: - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)(continuous ? SH_CSS_COPYSINK_TYPE : SH_CSS_HOST_TYPE), 1); - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - break; - case IA_CSS_PIPE_ID_COPY: /*Copy pipe ports configured to "offline" mode*/ - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - if (continuous) { - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_COPYSINK_TYPE, 1); - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_TAGGERSINK_TYPE, 1); - } else { - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - } - break; - case IA_CSS_PIPE_ID_CAPTURE: - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)(continuous ? SH_CSS_TAGGERSINK_TYPE : SH_CSS_HOST_TYPE), - 1); - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - break; - case IA_CSS_PIPE_ID_YUVPP: - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)(SH_CSS_HOST_TYPE), 1); - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - break; - case IA_CSS_PIPE_ID_ACC: - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - break; - default: - break; - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "ia_css_pipeline_configure_inout_port() leave: inout_port_config(%x)\n", - me->inout_port_config); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h deleted file mode 100644 index aaf2e247cafb..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h +++ /dev/null @@ -1,192 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_QUEUE_H -#define __IA_CSS_QUEUE_H - -#include -#include - -#include "ia_css_queue_comm.h" -#include "../src/queue_access.h" - -/* Local Queue object descriptor */ -struct ia_css_queue_local { - ia_css_circbuf_desc_t *cb_desc; /*Circbuf desc for local queues*/ - ia_css_circbuf_elem_t *cb_elems; /*Circbuf elements*/ -}; -typedef struct ia_css_queue_local ia_css_queue_local_t; - -/* Handle for queue object*/ -typedef struct ia_css_queue ia_css_queue_t; - - -/***************************************************************************** - * Queue Public APIs - *****************************************************************************/ -/* @brief Initialize a local queue instance. - * - * @param[out] qhandle. Handle to queue instance for use with API - * @param[in] desc. Descriptor with queue properties filled-in - * @return 0 - Successful init of local queue instance. - * @return EINVAL - Invalid argument. - * - */ -extern int ia_css_queue_local_init( - ia_css_queue_t *qhandle, - ia_css_queue_local_t *desc); - -/* @brief Initialize a remote queue instance - * - * @param[out] qhandle. Handle to queue instance for use with API - * @param[in] desc. Descriptor with queue properties filled-in - * @return 0 - Successful init of remote queue instance. - * @return EINVAL - Invalid argument. - */ -extern int ia_css_queue_remote_init( - ia_css_queue_t *qhandle, - ia_css_queue_remote_t *desc); - -/* @brief Uninitialize a queue instance - * - * @param[in] qhandle. Handle to queue instance - * @return 0 - Successful uninit. - * - */ -extern int ia_css_queue_uninit( - ia_css_queue_t *qhandle); - -/* @brief Enqueue an item in the queue instance - * - * @param[in] qhandle. Handle to queue instance - * @param[in] item. Object to be enqueued. - * @return 0 - Successful enqueue. - * @return EINVAL - Invalid argument. - * @return ENOBUFS - Queue is full. - * - */ -extern int ia_css_queue_enqueue( - ia_css_queue_t *qhandle, - uint32_t item); - -/* @brief Dequeue an item from the queue instance - * - * @param[in] qhandle. Handle to queue instance - * @param[out] item. Object to be dequeued into this item. - - * @return 0 - Successful dequeue. - * @return EINVAL - Invalid argument. - * @return ENODATA - Queue is empty. - * - */ -extern int ia_css_queue_dequeue( - ia_css_queue_t *qhandle, - uint32_t *item); - -/* @brief Check if the queue is empty - * - * @param[in] qhandle. Handle to queue instance - * @param[in] is_empty True if empty, False if not. - * @return 0 - Successful access state. - * @return EINVAL - Invalid argument. - * @return ENOSYS - Function not implemented. - * - */ -extern int ia_css_queue_is_empty( - ia_css_queue_t *qhandle, - bool *is_empty); - -/* @brief Check if the queue is full - * - * @param[in] qhandle. Handle to queue instance - * @param[in] is_full True if Full, False if not. - * @return 0 - Successfully access state. - * @return EINVAL - Invalid argument. - * @return ENOSYS - Function not implemented. - * - */ -extern int ia_css_queue_is_full( - ia_css_queue_t *qhandle, - bool *is_full); - -/* @brief Get used space in the queue - * - * @param[in] qhandle. Handle to queue instance - * @param[in] size Number of available elements in the queue - * @return 0 - Successfully access state. - * @return EINVAL - Invalid argument. - * - */ -extern int ia_css_queue_get_used_space( - ia_css_queue_t *qhandle, - uint32_t *size); - -/* @brief Get free space in the queue - * - * @param[in] qhandle. Handle to queue instance - * @param[in] size Number of free elements in the queue - * @return 0 - Successfully access state. - * @return EINVAL - Invalid argument. - * - */ -extern int ia_css_queue_get_free_space( - ia_css_queue_t *qhandle, - uint32_t *size); - -/* @brief Peek at an element in the queue - * - * @param[in] qhandle. Handle to queue instance - * @param[in] offset Offset of element to peek, - * starting from head of queue - * @param[in] element Value of element returned - * @return 0 - Successfully access state. - * @return EINVAL - Invalid argument. - * - */ -extern int ia_css_queue_peek( - ia_css_queue_t *qhandle, - uint32_t offset, - uint32_t *element); - -/* @brief Get the usable size for the queue - * - * @param[in] qhandle. Handle to queue instance - * @param[out] size Size value to be returned here. - * @return 0 - Successful get size. - * @return EINVAL - Invalid argument. - * @return ENOSYS - Function not implemented. - * - */ -extern int ia_css_queue_get_size( - ia_css_queue_t *qhandle, - uint32_t *size); - -#endif /* __IA_CSS_QUEUE_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h deleted file mode 100644 index 4ebaeb0c1847..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_QUEUE_COMM_H -#define __IA_CSS_QUEUE_COMM_H - -#include "type_support.h" -#include "ia_css_circbuf.h" -/***************************************************************************** - * Queue Public Data Structures - *****************************************************************************/ - -/* Queue location specifier */ -/* Avoiding enums to save space */ -#define IA_CSS_QUEUE_LOC_HOST 0 -#define IA_CSS_QUEUE_LOC_SP 1 -#define IA_CSS_QUEUE_LOC_ISP 2 - -/* Queue type specifier */ -/* Avoiding enums to save space */ -#define IA_CSS_QUEUE_TYPE_LOCAL 0 -#define IA_CSS_QUEUE_TYPE_REMOTE 1 - -/* for DDR Allocated queues, -allocate minimum these many elements. -DDR->SP' DMEM DMA transfer needs 32byte aligned address. -Since each element size is 4 bytes, 8 elements need to be -DMAed to access single element.*/ -#define IA_CSS_MIN_ELEM_COUNT 8 -#define IA_CSS_DMA_XFER_MASK (IA_CSS_MIN_ELEM_COUNT - 1) - -/* Remote Queue object descriptor */ -struct ia_css_queue_remote { - uint32_t cb_desc_addr; /*Circbuf desc address for remote queues*/ - uint32_t cb_elems_addr; /*Circbuf elements addr for remote queue*/ - uint8_t location; /* Cell location for queue */ - uint8_t proc_id; /* Processor id for queue access */ -}; -typedef struct ia_css_queue_remote ia_css_queue_remote_t; - - -#endif /* __IA_CSS_QUEUE_COMM_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c deleted file mode 100644 index 606376fdf0ba..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_queue.h" -#include -#include -#include -#include "queue_access.h" - -/***************************************************************************** - * Queue Public APIs - *****************************************************************************/ -int ia_css_queue_local_init( - ia_css_queue_t *qhandle, - ia_css_queue_local_t *desc) -{ - if (NULL == qhandle || NULL == desc - || NULL == desc->cb_elems || NULL == desc->cb_desc) { - /* Invalid parameters, return error*/ - return EINVAL; - } - - /* Mark the queue as Local */ - qhandle->type = IA_CSS_QUEUE_TYPE_LOCAL; - - /* Create a local circular buffer queue*/ - ia_css_circbuf_create(&qhandle->desc.cb_local, - desc->cb_elems, - desc->cb_desc); - - return 0; -} - -int ia_css_queue_remote_init( - ia_css_queue_t *qhandle, - ia_css_queue_remote_t *desc) -{ - if (NULL == qhandle || NULL == desc) { - /* Invalid parameters, return error*/ - return EINVAL; - } - - /* Mark the queue as remote*/ - qhandle->type = IA_CSS_QUEUE_TYPE_REMOTE; - - /* Copy over the local queue descriptor*/ - qhandle->location = desc->location; - qhandle->proc_id = desc->proc_id; - qhandle->desc.remote.cb_desc_addr = desc->cb_desc_addr; - qhandle->desc.remote.cb_elems_addr = desc->cb_elems_addr; - - /* If queue is remote, we let the local processor - * do its init, before using it. This is just to get us - * started, we can remove this restriction as we go ahead - */ - - return 0; -} - -int ia_css_queue_uninit( - ia_css_queue_t *qhandle) -{ - if (!qhandle) - return EINVAL; - - /* Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Local queues are created. Destroy it*/ - ia_css_circbuf_destroy(&qhandle->desc.cb_local); - } - - return 0; -} - -int ia_css_queue_enqueue( - ia_css_queue_t *qhandle, - uint32_t item) -{ - int error = 0; - if (NULL == qhandle) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - if (ia_css_circbuf_is_full(&qhandle->desc.cb_local)) { - /* Cannot push the element. Return*/ - return ENOBUFS; - } - - /* Push the element*/ - ia_css_circbuf_push(&qhandle->desc.cb_local, item); - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - ia_css_circbuf_desc_t cb_desc; - ia_css_circbuf_elem_t cb_elem; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - - /* a. Load the queue cb_desc from remote */ - QUEUE_CB_DESC_INIT(&cb_desc); - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* b. Operate on the queue */ - if (ia_css_circbuf_desc_is_full(&cb_desc)) - return ENOBUFS; - - cb_elem.val = item; - - error = ia_css_queue_item_store(qhandle, cb_desc.end, &cb_elem); - if (error != 0) - return error; - - cb_desc.end = (cb_desc.end + 1) % cb_desc.size; - - /* c. Store the queue object */ - /* Set only fields requiring update with - * valid value. Avoids uncessary calls - * to load/store functions - */ - ignore_desc_flags = QUEUE_IGNORE_SIZE_START_STEP_FLAGS; - - error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - } - - return 0; -} - -int ia_css_queue_dequeue( - ia_css_queue_t *qhandle, - uint32_t *item) -{ - int error = 0; - if (qhandle == NULL || NULL == item) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - if (ia_css_circbuf_is_empty(&qhandle->desc.cb_local)) { - /* Nothing to pop. Return empty queue*/ - return ENODATA; - } - - *item = ia_css_circbuf_pop(&qhandle->desc.cb_local); - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - ia_css_circbuf_elem_t cb_elem; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - - QUEUE_CB_DESC_INIT(&cb_desc); - - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* b. Operate on the queue */ - if (ia_css_circbuf_desc_is_empty(&cb_desc)) - return ENODATA; - - error = ia_css_queue_item_load(qhandle, cb_desc.start, &cb_elem); - if (error != 0) - return error; - - *item = cb_elem.val; - - cb_desc.start = OP_std_modadd(cb_desc.start, 1, cb_desc.size); - - /* c. Store the queue object */ - /* Set only fields requiring update with - * valid value. Avoids uncessary calls - * to load/store functions - */ - ignore_desc_flags = QUEUE_IGNORE_SIZE_END_STEP_FLAGS; - error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - } - return 0; -} - -int ia_css_queue_is_full( - ia_css_queue_t *qhandle, - bool *is_full) -{ - int error = 0; - if ((qhandle == NULL) || (is_full == NULL)) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - *is_full = ia_css_circbuf_is_full(&qhandle->desc.cb_local); - return 0; - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - QUEUE_CB_DESC_INIT(&cb_desc); - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* b. Operate on the queue */ - *is_full = ia_css_circbuf_desc_is_full(&cb_desc); - return 0; - } - - return EINVAL; -} - -int ia_css_queue_get_free_space( - ia_css_queue_t *qhandle, - uint32_t *size) -{ - int error = 0; - if ((qhandle == NULL) || (size == NULL)) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - *size = ia_css_circbuf_get_free_elems(&qhandle->desc.cb_local); - return 0; - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - QUEUE_CB_DESC_INIT(&cb_desc); - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* b. Operate on the queue */ - *size = ia_css_circbuf_desc_get_free_elems(&cb_desc); - return 0; - } - - return EINVAL; -} - -int ia_css_queue_get_used_space( - ia_css_queue_t *qhandle, - uint32_t *size) -{ - int error = 0; - if ((qhandle == NULL) || (size == NULL)) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - *size = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local); - return 0; - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - QUEUE_CB_DESC_INIT(&cb_desc); - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* b. Operate on the queue */ - *size = ia_css_circbuf_desc_get_num_elems(&cb_desc); - return 0; - } - - return EINVAL; -} - -int ia_css_queue_peek( - ia_css_queue_t *qhandle, - uint32_t offset, - uint32_t *element) -{ - uint32_t num_elems = 0; - int error = 0; - - if ((qhandle == NULL) || (element == NULL)) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - /* Check if offset is valid */ - num_elems = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local); - if (offset > num_elems) - return EINVAL; - - *element = ia_css_circbuf_peek_from_start(&qhandle->desc.cb_local, (int) offset); - return 0; - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - ia_css_circbuf_elem_t cb_elem; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - - QUEUE_CB_DESC_INIT(&cb_desc); - - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* Check if offset is valid */ - num_elems = ia_css_circbuf_desc_get_num_elems(&cb_desc); - if (offset > num_elems) - return EINVAL; - - offset = OP_std_modadd(cb_desc.start, offset, cb_desc.size); - error = ia_css_queue_item_load(qhandle, (uint8_t)offset, &cb_elem); - if (error != 0) - return error; - - *element = cb_elem.val; - return 0; - } - - return EINVAL; -} - -int ia_css_queue_is_empty( - ia_css_queue_t *qhandle, - bool *is_empty) -{ - int error = 0; - if ((qhandle == NULL) || (is_empty == NULL)) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - *is_empty = ia_css_circbuf_is_empty(&qhandle->desc.cb_local); - return 0; - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG; - - QUEUE_CB_DESC_INIT(&cb_desc); - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* b. Operate on the queue */ - *is_empty = ia_css_circbuf_desc_is_empty(&cb_desc); - return 0; - } - - return EINVAL; -} - -int ia_css_queue_get_size( - ia_css_queue_t *qhandle, - uint32_t *size) -{ - int error = 0; - if ((qhandle == NULL) || (size == NULL)) - return EINVAL; - - /* 1. Load the required queue object */ - if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) { - /* Directly de-ref the object and - * operate on the queue - */ - /* Return maximum usable capacity */ - *size = ia_css_circbuf_get_size(&qhandle->desc.cb_local); - } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) { - /* a. Load the queue from remote */ - ia_css_circbuf_desc_t cb_desc; - uint32_t ignore_desc_flags = QUEUE_IGNORE_START_END_STEP_FLAGS; - - QUEUE_CB_DESC_INIT(&cb_desc); - - error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags); - if (error != 0) - return error; - - /* Return maximum usable capacity */ - *size = cb_desc.size; - } - - return 0; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c deleted file mode 100644 index 7bb2b494836e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c +++ /dev/null @@ -1,192 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "type_support.h" -#include "queue_access.h" -#include "ia_css_circbuf.h" -#include "sp.h" -#include "memory_access.h" -#include "assert_support.h" - -int ia_css_queue_load( - struct ia_css_queue *rdesc, - ia_css_circbuf_desc_t *cb_desc, - uint32_t ignore_desc_flags) -{ - if (rdesc == NULL || cb_desc == NULL) - return EINVAL; - - if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { - assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) { - cb_desc->size = sp_dmem_load_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, size)); - - if (0 == cb_desc->size) { - /* Adding back the workaround which was removed - while refactoring queues. When reading size - through sp_dmem_load_*, sometimes we get back - the value as zero. This causes division by 0 - exception as the size is used in a modular - division operation. */ - return EDOM; - } - } - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG)) - cb_desc->start = sp_dmem_load_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, start)); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG)) - cb_desc->end = sp_dmem_load_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, end)); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG)) - cb_desc->step = sp_dmem_load_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, step)); - - } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { - /* doing DMA transfer of entire structure */ - mmgr_load(rdesc->desc.remote.cb_desc_addr, - (void *)cb_desc, - sizeof(ia_css_circbuf_desc_t)); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { - /* Not supported yet */ - return ENOTSUP; - } - - return 0; -} - -int ia_css_queue_store( - struct ia_css_queue *rdesc, - ia_css_circbuf_desc_t *cb_desc, - uint32_t ignore_desc_flags) -{ - if (rdesc == NULL || cb_desc == NULL) - return EINVAL; - - if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { - assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) - sp_dmem_store_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, size), - cb_desc->size); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG)) - sp_dmem_store_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, start), - cb_desc->start); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG)) - sp_dmem_store_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, end), - cb_desc->end); - - if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG)) - sp_dmem_store_uint8(rdesc->proc_id, - rdesc->desc.remote.cb_desc_addr - + offsetof(ia_css_circbuf_desc_t, step), - cb_desc->step); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { - /* doing DMA transfer of entire structure */ - mmgr_store(rdesc->desc.remote.cb_desc_addr, - (void *)cb_desc, - sizeof(ia_css_circbuf_desc_t)); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { - /* Not supported yet */ - return ENOTSUP; - } - - return 0; -} - -int ia_css_queue_item_load( - struct ia_css_queue *rdesc, - uint8_t position, - ia_css_circbuf_elem_t *item) -{ - if (rdesc == NULL || item == NULL) - return EINVAL; - - if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { - sp_dmem_load(rdesc->proc_id, - rdesc->desc.remote.cb_elems_addr - + position * sizeof(ia_css_circbuf_elem_t), - item, - sizeof(ia_css_circbuf_elem_t)); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { - mmgr_load(rdesc->desc.remote.cb_elems_addr - + position * sizeof(ia_css_circbuf_elem_t), - (void *)item, - sizeof(ia_css_circbuf_elem_t)); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { - /* Not supported yet */ - return ENOTSUP; - } - - return 0; -} - -int ia_css_queue_item_store( - struct ia_css_queue *rdesc, - uint8_t position, - ia_css_circbuf_elem_t *item) -{ - if (rdesc == NULL || item == NULL) - return EINVAL; - - if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { - sp_dmem_store(rdesc->proc_id, - rdesc->desc.remote.cb_elems_addr - + position * sizeof(ia_css_circbuf_elem_t), - item, - sizeof(ia_css_circbuf_elem_t)); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { - mmgr_store(rdesc->desc.remote.cb_elems_addr - + position * sizeof(ia_css_circbuf_elem_t), - (void *)item, - sizeof(ia_css_circbuf_elem_t)); - } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { - /* Not supported yet */ - return ENOTSUP; - } - - return 0; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h deleted file mode 100644 index 4775513f54cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __QUEUE_ACCESS_H -#define __QUEUE_ACCESS_H - -#include -#include -#include -#include - -#define QUEUE_IGNORE_START_FLAG 0x0001 -#define QUEUE_IGNORE_END_FLAG 0x0002 -#define QUEUE_IGNORE_SIZE_FLAG 0x0004 -#define QUEUE_IGNORE_STEP_FLAG 0x0008 -#define QUEUE_IGNORE_DESC_FLAGS_MAX 0x000f - -#define QUEUE_IGNORE_SIZE_START_STEP_FLAGS \ - (QUEUE_IGNORE_SIZE_FLAG | \ - QUEUE_IGNORE_START_FLAG | \ - QUEUE_IGNORE_STEP_FLAG) - -#define QUEUE_IGNORE_SIZE_END_STEP_FLAGS \ - (QUEUE_IGNORE_SIZE_FLAG | \ - QUEUE_IGNORE_END_FLAG | \ - QUEUE_IGNORE_STEP_FLAG) - -#define QUEUE_IGNORE_START_END_STEP_FLAGS \ - (QUEUE_IGNORE_START_FLAG | \ - QUEUE_IGNORE_END_FLAG | \ - QUEUE_IGNORE_STEP_FLAG) - -#define QUEUE_CB_DESC_INIT(cb_desc) \ - do { \ - (cb_desc)->size = 0; \ - (cb_desc)->step = 0; \ - (cb_desc)->start = 0; \ - (cb_desc)->end = 0; \ - } while(0) - -struct ia_css_queue { - uint8_t type; /* Specify remote/local type of access */ - uint8_t location; /* Cell location for queue */ - uint8_t proc_id; /* Processor id for queue access */ - union { - ia_css_circbuf_t cb_local; - struct { - uint32_t cb_desc_addr; /*Circbuf desc address for remote queues*/ - uint32_t cb_elems_addr; /*Circbuf elements addr for remote queue*/ - } remote; - } desc; -}; - -extern int ia_css_queue_load( - struct ia_css_queue *rdesc, - ia_css_circbuf_desc_t *cb_desc, - uint32_t ignore_desc_flags); - -extern int ia_css_queue_store( - struct ia_css_queue *rdesc, - ia_css_circbuf_desc_t *cb_desc, - uint32_t ignore_desc_flags); - -extern int ia_css_queue_item_load( - struct ia_css_queue *rdesc, - uint8_t position, - ia_css_circbuf_elem_t *item); - -extern int ia_css_queue_item_store( - struct ia_css_queue *rdesc, - uint8_t position, - ia_css_circbuf_elem_t *item); - -#endif /* __QUEUE_ACCESS_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h deleted file mode 100644 index 9f78e709b3d0..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_RMGR_H -#define _IA_CSS_RMGR_H - -#include - -#ifndef __INLINE_RMGR__ -#define STORAGE_CLASS_RMGR_H extern -#define STORAGE_CLASS_RMGR_C -#else /* __INLINE_RMGR__ */ -#define STORAGE_CLASS_RMGR_H static inline -#define STORAGE_CLASS_RMGR_C static inline -#endif /* __INLINE_RMGR__ */ - -/** - * @brief Initialize resource manager (host/common) - */ -enum ia_css_err ia_css_rmgr_init(void); - -/** - * @brief Uninitialize resource manager (host/common) - */ -void ia_css_rmgr_uninit(void); - -/***************************************************************** - * Interface definition - resource type (host/common) - ***************************************************************** - * - * struct ia_css_rmgr__pool; - * struct ia_css_rmgr__handle; - * - * STORAGE_CLASS_RMGR_H void ia_css_rmgr_init_( - * struct ia_css_rmgr__pool *pool); - * - * STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_( - * struct ia_css_rmgr__pool *pool); - * - * STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_( - * struct ia_css_rmgr__pool *pool, - * struct ia_css_rmgr__handle **handle); - * - * STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_( - * struct ia_css_rmgr__pool *pool, - * struct ia_css_rmgr__handle **handle); - * - ***************************************************************** - * Interface definition - refcounting (host/common) - ***************************************************************** - * - * void ia_css_rmgr_refcount_retain_( - * struct ia_css_rmgr__handle **handle); - * - * void ia_css_rmgr_refcount_release_( - * struct ia_css_rmgr__handle **handle); - */ - -#include "ia_css_rmgr_vbuf.h" - -#endif /* _IA_CSS_RMGR_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h deleted file mode 100644 index 90ac27cf02cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h +++ /dev/null @@ -1,115 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef _IA_CSS_RMGR_VBUF_H -#define _IA_CSS_RMGR_VBUF_H - -#include "ia_css_rmgr.h" -#include -#include - -/** - * @brief Data structure for the resource handle (host, vbuf) - */ -struct ia_css_rmgr_vbuf_handle { - hrt_vaddress vptr; - uint8_t count; - uint32_t size; -}; - -/** - * @brief Data structure for the resource pool (host, vbuf) - */ -struct ia_css_rmgr_vbuf_pool { - uint8_t copy_on_write; - uint8_t recycle; - uint32_t size; - uint32_t index; - struct ia_css_rmgr_vbuf_handle **handles; -}; - -/** - * @brief VBUF resource pools - */ -extern struct ia_css_rmgr_vbuf_pool *vbuf_ref; -extern struct ia_css_rmgr_vbuf_pool *vbuf_write; -extern struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool; - -/** - * @brief Initialize the resource pool (host, vbuf) - * - * @param pool The pointer to the pool - */ -STORAGE_CLASS_RMGR_H enum ia_css_err ia_css_rmgr_init_vbuf( - struct ia_css_rmgr_vbuf_pool *pool); - -/** - * @brief Uninitialize the resource pool (host, vbuf) - * - * @param pool The pointer to the pool - */ -STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_vbuf( - struct ia_css_rmgr_vbuf_pool *pool); - -/** - * @brief Acquire a handle from the pool (host, vbuf) - * - * @param pool The pointer to the pool - * @param handle The pointer to the handle - */ -STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_vbuf( - struct ia_css_rmgr_vbuf_pool *pool, - struct ia_css_rmgr_vbuf_handle **handle); - -/** - * @brief Release a handle to the pool (host, vbuf) - * - * @param pool The pointer to the pool - * @param handle The pointer to the handle - */ -STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_vbuf( - struct ia_css_rmgr_vbuf_pool *pool, - struct ia_css_rmgr_vbuf_handle **handle); - -/** - * @brief Retain the reference count for a handle (host, vbuf) - * - * @param handle The pointer to the handle - */ -void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle); - -/** - * @brief Release the reference count for a handle (host, vbuf) - * - * @param handle The pointer to the handle - */ -void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle); - -#endif /* _IA_CSS_RMGR_VBUF_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c deleted file mode 100644 index 370ff3816dbe..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include "ia_css_rmgr.h" - -enum ia_css_err ia_css_rmgr_init(void) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - err = ia_css_rmgr_init_vbuf(vbuf_ref); - if (err == IA_CSS_SUCCESS) - err = ia_css_rmgr_init_vbuf(vbuf_write); - if (err == IA_CSS_SUCCESS) - err = ia_css_rmgr_init_vbuf(hmm_buffer_pool); - if (err != IA_CSS_SUCCESS) - ia_css_rmgr_uninit(); - return err; -} - -/* - * @brief Uninitialize resource pool (host) - */ -void ia_css_rmgr_uninit(void) -{ - ia_css_rmgr_uninit_vbuf(hmm_buffer_pool); - ia_css_rmgr_uninit_vbuf(vbuf_write); - ia_css_rmgr_uninit_vbuf(vbuf_ref); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c deleted file mode 100644 index a4d8a48f95ba..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_rmgr.h" - -#include -#include -#include /* memset */ -#include /* mmmgr_malloc, mhmm_free */ -#include - -/* - * @brief VBUF resource handles - */ -#define NUM_HANDLES 1000 -static struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES]; - -/* - * @brief VBUF resource pool - refpool - */ -static struct ia_css_rmgr_vbuf_pool refpool = { - false, /* copy_on_write */ - false, /* recycle */ - 0, /* size */ - 0, /* index */ - NULL, /* handles */ -}; - -/* - * @brief VBUF resource pool - writepool - */ -static struct ia_css_rmgr_vbuf_pool writepool = { - true, /* copy_on_write */ - false, /* recycle */ - 0, /* size */ - 0, /* index */ - NULL, /* handles */ -}; - -/* - * @brief VBUF resource pool - hmmbufferpool - */ -static struct ia_css_rmgr_vbuf_pool hmmbufferpool = { - true, /* copy_on_write */ - true, /* recycle */ - 32, /* size */ - 0, /* index */ - NULL, /* handles */ -}; - -struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool; -struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool; -struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool; - -/* - * @brief Initialize the reference count (host, vbuf) - */ -static void rmgr_refcount_init_vbuf(void) -{ - /* initialize the refcount table */ - memset(&handle_table, 0, sizeof(handle_table)); -} - -/* - * @brief Retain the reference count for a handle (host, vbuf) - * - * @param handle The pointer to the handle - */ -void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle) -{ - int i; - struct ia_css_rmgr_vbuf_handle *h; - if ((handle == NULL) || (*handle == NULL)) { - IA_CSS_LOG("Invalid inputs"); - return; - } - /* new vbuf to count on */ - if ((*handle)->count == 0) { - h = *handle; - *handle = NULL; - for (i = 0; i < NUM_HANDLES; i++) { - if (handle_table[i].count == 0) { - *handle = &handle_table[i]; - break; - } - } - /* if the loop dus not break and *handle == NULL - this is an error handle and report it. - */ - if (*handle == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_i_host_refcount_retain_vbuf() failed to find empty slot!\n"); - return; - } - (*handle)->vptr = h->vptr; - (*handle)->size = h->size; - } - (*handle)->count++; -} - -/* - * @brief Release the reference count for a handle (host, vbuf) - * - * @param handle The pointer to the handle - */ -void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle) -{ - if ((handle == NULL) || ((*handle) == NULL) || (((*handle)->count) == 0)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_rmgr_refcount_release_vbuf() invalid arguments!\n"); - return; - } - /* decrease reference count */ - (*handle)->count--; - /* remove from admin */ - if ((*handle)->count == 0) { - (*handle)->vptr = 0x0; - (*handle)->size = 0; - *handle = NULL; - } -} - -/* - * @brief Initialize the resource pool (host, vbuf) - * - * @param pool The pointer to the pool - */ -enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - size_t bytes_needed; - rmgr_refcount_init_vbuf(); - assert(pool != NULL); - if (pool == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - /* initialize the recycle pool if used */ - if (pool->recycle && pool->size) { - /* allocate memory for storing the handles */ - bytes_needed = - sizeof(void *) * - pool->size; - pool->handles = sh_css_malloc(bytes_needed); - if (pool->handles != NULL) - memset(pool->handles, 0, bytes_needed); - else - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } else { - /* just in case, set the size to 0 */ - pool->size = 0; - pool->handles = NULL; - } - return err; -} - -/* - * @brief Uninitialize the resource pool (host, vbuf) - * - * @param pool The pointer to the pool - */ -void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool) -{ - uint32_t i; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n"); - if (pool == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_rmgr_uninit_vbuf(): NULL argument\n"); - return; - } - if (pool->handles != NULL) { - /* free the hmm buffers */ - for (i = 0; i < pool->size; i++) { - if (pool->handles[i] != NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - " freeing/releasing %x (count=%d)\n", - pool->handles[i]->vptr, - pool->handles[i]->count); - /* free memory */ - hmm_free(pool->handles[i]->vptr); - /* remove from refcount admin */ - ia_css_rmgr_refcount_release_vbuf( - &pool->handles[i]); - } - } - /* now free the pool handles list */ - sh_css_free(pool->handles); - pool->handles = NULL; - } -} - -/* - * @brief Push a handle to the pool - * - * @param pool The pointer to the pool - * @param handle The pointer to the handle - */ -static -void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool, - struct ia_css_rmgr_vbuf_handle **handle) -{ - uint32_t i; - bool succes = false; - assert(pool != NULL); - assert(pool->recycle); - assert(pool->handles != NULL); - assert(handle != NULL); - for (i = 0; i < pool->size; i++) { - if (pool->handles[i] == NULL) { - ia_css_rmgr_refcount_retain_vbuf(handle); - pool->handles[i] = *handle; - succes = true; - break; - } - } - assert(succes); -} - -/* - * @brief Pop a handle from the pool - * - * @param pool The pointer to the pool - * @param handle The pointer to the handle - */ -static -void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool, - struct ia_css_rmgr_vbuf_handle **handle) -{ - uint32_t i; - bool succes = false; - assert(pool != NULL); - assert(pool->recycle); - assert(pool->handles != NULL); - assert(handle != NULL); - assert(*handle != NULL); - for (i = 0; i < pool->size; i++) { - if ((pool->handles[i] != NULL) && - (pool->handles[i]->size == (*handle)->size)) { - *handle = pool->handles[i]; - pool->handles[i] = NULL; - /* dont release, we are returning it... - ia_css_rmgr_refcount_release_vbuf(handle); */ - succes = true; - break; - } - } -} - -/* - * @brief Acquire a handle from the pool (host, vbuf) - * - * @param pool The pointer to the pool - * @param handle The pointer to the handle - */ -void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool, - struct ia_css_rmgr_vbuf_handle **handle) -{ - struct ia_css_rmgr_vbuf_handle h; - - if ((pool == NULL) || (handle == NULL) || (*handle == NULL)) { - IA_CSS_LOG("Invalid inputs"); - return; - } - - if (pool->copy_on_write) { - /* only one reference, reuse (no new retain) */ - if ((*handle)->count == 1) - return; - /* more than one reference, release current buffer */ - if ((*handle)->count > 1) { - /* store current values */ - h.vptr = 0x0; - h.size = (*handle)->size; - /* release ref to current buffer */ - ia_css_rmgr_refcount_release_vbuf(handle); - *handle = &h; - } - /* get new buffer for needed size */ - if ((*handle)->vptr == 0x0) { - if (pool->recycle) { - /* try and pop from pool */ - rmgr_pop_handle(pool, handle); - } - if ((*handle)->vptr == 0x0) { - /* we need to allocate */ - (*handle)->vptr = mmgr_malloc((*handle)->size); - } else { - /* we popped a buffer */ - return; - } - } - } - /* Note that handle will change to an internally maintained one */ - ia_css_rmgr_refcount_retain_vbuf(handle); -} - -/* - * @brief Release a handle to the pool (host, vbuf) - * - * @param pool The pointer to the pool - * @param handle The pointer to the handle - */ -void ia_css_rmgr_rel_vbuf(struct ia_css_rmgr_vbuf_pool *pool, - struct ia_css_rmgr_vbuf_handle **handle) -{ - if ((pool == NULL) || (handle == NULL) || (*handle == NULL)) { - IA_CSS_LOG("Invalid inputs"); - return; - } - /* release the handle */ - if ((*handle)->count == 1) { - if (!pool->recycle) { - /* non recycling pool, free mem */ - hmm_free((*handle)->vptr); - } else { - /* recycle to pool */ - rmgr_push_handle(pool, handle); - } - } - ia_css_rmgr_refcount_release_vbuf(handle); - *handle = NULL; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h deleted file mode 100644 index bc4b1723369e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_SPCTRL_H__ -#define __IA_CSS_SPCTRL_H__ - -#include -#include -#include "ia_css_spctrl_comm.h" - - -typedef struct { - uint32_t ddr_data_offset; /** posistion of data in DDR */ - uint32_t dmem_data_addr; /** data segment address in dmem */ - uint32_t dmem_bss_addr; /** bss segment address in dmem */ - uint32_t data_size; /** data segment size */ - uint32_t bss_size; /** bss segment size */ - uint32_t spctrl_config_dmem_addr; /* - -/* state of SP */ -typedef enum { - IA_CSS_SP_SW_TERMINATED = 0, - IA_CSS_SP_SW_INITIALIZED, - IA_CSS_SP_SW_CONNECTED, - IA_CSS_SP_SW_RUNNING -} ia_css_spctrl_sp_sw_state; - -/* Structure to encapsulate required arguments for - * initialization of SP DMEM using the SP itself - */ -struct ia_css_sp_init_dmem_cfg { - ia_css_ptr ddr_data_addr; /** data segment address in ddr */ - uint32_t dmem_data_addr; /** data segment address in dmem */ - uint32_t dmem_bss_addr; /** bss segment address in dmem */ - uint32_t data_size; /** data segment size */ - uint32_t bss_size; /** bss segment size */ - sp_ID_t sp_id; /* = N_SP_ID) || (spctrl_cfg == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - spctrl_cofig_info[sp_id].code_addr = mmgr_NULL; - - init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config; - init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr; - init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr; - init_dmem_cfg->data_size = spctrl_cfg->data_size; - init_dmem_cfg->bss_size = spctrl_cfg->bss_size; - init_dmem_cfg->sp_id = sp_id; - - spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr; - spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr; - - /* store code (text + icache) and data to DDR - * - * Data used to be stored separately, because of access alignment constraints, - * fix the FW generation instead - */ - code_addr = mmgr_malloc(spctrl_cfg->code_size); - if (code_addr == mmgr_NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size); - - if (sizeof(hrt_vaddress) > sizeof(hrt_data)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "size of hrt_vaddress can not be greater than hrt_data\n"); - hmm_free(code_addr); - code_addr = mmgr_NULL; - return IA_CSS_ERR_INTERNAL_ERROR; - } - - init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset; - if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "DDR address pointer is not properly aligned for DMA transfer\n"); - hmm_free(code_addr); - code_addr = mmgr_NULL; - return IA_CSS_ERR_INTERNAL_ERROR; - } - - spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry; - spctrl_cofig_info[sp_id].code_addr = code_addr; - spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name; - - /* now we program the base address into the icache and - * invalidate the cache. - */ - sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr); - sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT); - spctrl_loaded[sp_id] = true; - return IA_CSS_SUCCESS; -} - -#ifdef ISP2401 -/* reload pre-loaded FW */ -void sh_css_spctrl_reload_fw(sp_ID_t sp_id) -{ - /* now we program the base address into the icache and - * invalidate the cache. - */ - sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr); - sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT); - spctrl_loaded[sp_id] = true; -} -#endif - -hrt_vaddress get_sp_code_addr(sp_ID_t sp_id) -{ - return spctrl_cofig_info[sp_id].code_addr; -} - -enum ia_css_err ia_css_spctrl_unload_fw(sp_ID_t sp_id) -{ - if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id]))) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* freeup the resource */ - if (spctrl_cofig_info[sp_id].code_addr) - hmm_free(spctrl_cofig_info[sp_id].code_addr); - spctrl_loaded[sp_id] = false; - return IA_CSS_SUCCESS; -} - -/* Initialize dmem_cfg in SP dmem and start SP program*/ -enum ia_css_err ia_css_spctrl_start(sp_ID_t sp_id) -{ - if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id]))) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* Set descr in the SP to initialize the SP DMEM */ - /* - * The FW stores user-space pointers to the FW, the ISP pointer - * is only available here - * - */ - assert(sizeof(unsigned int) <= sizeof(hrt_data)); - - sp_dmem_store(sp_id, - spctrl_cofig_info[sp_id].spctrl_config_dmem_addr, - &spctrl_cofig_info[sp_id].dmem_config, - sizeof(spctrl_cofig_info[sp_id].dmem_config)); - /* set the start address */ - sp_ctrl_store(sp_id, SP_START_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].sp_entry); - sp_ctrl_setbit(sp_id, SP_SC_REG, SP_RUN_BIT); - sp_ctrl_setbit(sp_id, SP_SC_REG, SP_START_BIT); - return IA_CSS_SUCCESS; -} - -/* Query the state of SP1 */ -ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id) -{ - ia_css_spctrl_sp_sw_state state = 0; - unsigned int HIVE_ADDR_sp_sw_state; - if (sp_id >= N_SP_ID) - return IA_CSS_SP_SW_TERMINATED; - - HIVE_ADDR_sp_sw_state = spctrl_cofig_info[sp_id].spctrl_state_dmem_addr; - (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */ - if (sp_id == SP0_ID) - state = sp_dmem_load_uint32(sp_id, (unsigned)sp_address_of(sp_sw_state)); - return state; -} - -int ia_css_spctrl_is_idle(sp_ID_t sp_id) -{ - int state = 0; - assert (sp_id < N_SP_ID); - - state = sp_ctrl_getbit(sp_id, SP_SC_REG, SP_IDLE_BIT); - return state; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h deleted file mode 100644 index d0d74957358b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#ifndef __IA_CSS_TAGGER_COMMON_H__ -#define __IA_CSS_TAGGER_COMMON_H__ - -#include -#include - -/** - * @brief The tagger's circular buffer. - * - * Should be one less than NUM_CONTINUOUS_FRAMES in sh_css_internal.h - */ -#if defined(HAS_SP_2400) -#define MAX_CB_ELEMS_FOR_TAGGER 14 -#else -#define MAX_CB_ELEMS_FOR_TAGGER 9 -#endif - -/** - * @brief Data structure for the tagger buffer element. - */ -typedef struct { - uint32_t frame; /* the frame value stored in the element */ - uint32_t param; /* the param value stored in the element */ - uint8_t mark; /* the mark on the element */ - uint8_t lock; /* the lock on the element */ - uint8_t exp_id; /* exp_id of frame, for debugging only */ -} ia_css_tagger_buf_sp_elem_t; - -#endif /* __IA_CSS_TAGGER_COMMON_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c deleted file mode 100644 index b7dd18492a91..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef ISP2401 -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#else -/* -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ -#endif - -#include /* for uint32_t */ -#include "ia_css_timer.h" /*struct ia_css_clock_tick */ -#include "sh_css_legacy.h" /* IA_CSS_PIPE_ID_NUM*/ -#include "gp_timer.h" /*gp_timer_read()*/ -#include "assert_support.h" - -enum ia_css_err -ia_css_timer_get_current_tick( - struct ia_css_clock_tick *curr_ts) { - - assert(curr_ts != NULL); - if (curr_ts == NULL) { - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - curr_ts->ticks = (clock_value_t)gp_timer_read(GP_TIMER_SEL); - return IA_CSS_SUCCESS; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c deleted file mode 100644 index 4bcc835880cf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c +++ /dev/null @@ -1,11094 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/*! \file */ -#include -#include -#include - -#include "ia_css.h" -#include "sh_css_hrt.h" /* only for file 2 MIPI */ -#include "ia_css_buffer.h" -#include "ia_css_binary.h" -#include "sh_css_internal.h" -#include "sh_css_mipi.h" -#include "sh_css_sp.h" /* sh_css_sp_group */ -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "ia_css_isys.h" -#endif -#include "ia_css_frame.h" -#include "sh_css_defs.h" -#include "sh_css_firmware.h" -#include "sh_css_params.h" -#include "sh_css_params_internal.h" -#include "sh_css_param_shading.h" -#include "ia_css_refcount.h" -#include "ia_css_rmgr.h" -#include "ia_css_debug.h" -#include "ia_css_debug_pipe.h" -#include "ia_css_device_access.h" -#include "device_access.h" -#include "sh_css_legacy.h" -#include "ia_css_pipeline.h" -#include "ia_css_stream.h" -#include "sh_css_stream_format.h" -#include "ia_css_pipe.h" -#include "ia_css_util.h" -#include "ia_css_pipe_util.h" -#include "ia_css_pipe_binarydesc.h" -#include "ia_css_pipe_stagedesc.h" -#ifdef USE_INPUT_SYSTEM_VERSION_2 -#include "ia_css_isys.h" -#endif - -#include "memory_access.h" -#include "tag.h" -#include "assert_support.h" -#include "math_support.h" -#include "sw_event_global.h" /* Event IDs.*/ -#if !defined(HAS_NO_INPUT_FORMATTER) -#include "ia_css_ifmtr.h" -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "input_system.h" -#endif -#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */ -#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */ -#include "gdc_device.h" /* HRT_GDC_N */ -#include "dma.h" /* dma_set_max_burst_size() */ -#include "irq.h" /* virq */ -#include "sp.h" /* cnd_sp_irq_enable() */ -#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */ -#include "gp_device.h" /* gp_device_reg_store() */ -#define __INLINE_GPIO__ -#include "gpio.h" -#include "timed_ctrl.h" -#include "platform_support.h" /* hrt_sleep(), inline */ -#include "ia_css_inputfifo.h" -#define WITH_PC_MONITORING 0 - -#define SH_CSS_VIDEO_BUFFER_ALIGNMENT 0 - -#if WITH_PC_MONITORING -#define MULTIPLE_SAMPLES 1 -#define NOF_SAMPLES 60 -#include "linux/kthread.h" -#include "linux/sched.h" -#include "linux/delay.h" -#include "sh_css_metrics.h" -static int thread_alive; -#endif /* WITH_PC_MONITORING */ - -#include "ia_css_spctrl.h" -#include "ia_css_version_data.h" -#include "sh_css_struct.h" -#include "ia_css_bufq.h" -#include "ia_css_timer.h" /* clock_value_t */ - -#include "isp/modes/interface/input_buf.isp.h" - -/* Name of the sp program: should not be built-in */ -#define SP_PROG_NAME "sp" -/* Size of Refcount List */ -#define REFCOUNT_SIZE 1000 - -/* for JPEG, we don't know the length of the image upfront, - * but since we support sensor upto 16MP, we take this as - * upper limit. - */ -#define JPEG_BYTES (16 * 1024 * 1024) - -#define STATS_ENABLED(stage) (stage && stage->binary && stage->binary->info && \ - (stage->binary->info->sp.enable.s3a || stage->binary->info->sp.enable.dis)) - -struct sh_css my_css; - -int (*sh_css_printf) (const char *fmt, va_list args) = NULL; - -/* modes of work: stream_create and stream_destroy will update the save/restore data - only when in working mode, not suspend/resume -*/ -enum ia_sh_css_modes { - sh_css_mode_none = 0, - sh_css_mode_working, - sh_css_mode_suspend, - sh_css_mode_resume -}; - -/* a stream seed, to save and restore the stream data. - the stream seed contains all the data required to "grow" the seed again after it was closed. -*/ -struct sh_css_stream_seed { - struct ia_css_stream **orig_stream; /* pointer to restore the original handle */ - struct ia_css_stream *stream; /* handle, used as ID too.*/ - struct ia_css_stream_config stream_config; /* stream config struct */ - int num_pipes; - struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM]; /* pipe handles */ - struct ia_css_pipe **orig_pipes[IA_CSS_PIPE_ID_NUM]; /* pointer to restore original handle */ - struct ia_css_pipe_config pipe_config[IA_CSS_PIPE_ID_NUM]; /* pipe config structs */ -}; - -#define MAX_ACTIVE_STREAMS 5 -/* A global struct for save/restore to hold all the data that should sustain power-down: - MMU base, IRQ type, env for routines, binary loaded FW and the stream seeds. -*/ -struct sh_css_save { - enum ia_sh_css_modes mode; - uint32_t mmu_base; /* the last mmu_base */ - enum ia_css_irq_type irq_type; - struct sh_css_stream_seed stream_seeds[MAX_ACTIVE_STREAMS]; - struct ia_css_fw *loaded_fw; /* fw struct previously loaded */ - struct ia_css_env driver_env; /* driver-supplied env copy */ -}; - -static bool my_css_save_initialized; /* if my_css_save was initialized */ -static struct sh_css_save my_css_save; - -/* pqiao NOTICE: this is for css internal buffer recycling when stopping pipeline, - this array is temporary and will be replaced by resource manager*/ -/* Taking the biggest Size for number of Elements */ -#define MAX_HMM_BUFFER_NUM \ - (SH_CSS_MAX_NUM_QUEUES * (IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE + 2)) - -struct sh_css_hmm_buffer_record { - bool in_use; - enum ia_css_buffer_type type; - struct ia_css_rmgr_vbuf_handle *h_vbuf; - hrt_address kernel_ptr; -}; - -static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM]; - -#define GPIO_FLASH_PIN_MASK (1 << HIVE_GPIO_STROBE_TRIGGER_PIN) - -static bool fw_explicitly_loaded = false; - -/* - * Local prototypes - */ - -static enum ia_css_err -allocate_delay_frames(struct ia_css_pipe *pipe); - -static enum ia_css_err -sh_css_pipe_start(struct ia_css_stream *stream); - -#ifdef ISP2401 -/* - * @brief Stop all "ia_css_pipe" instances in the target - * "ia_css_stream" instance. - * - * @param[in] stream Point to the target "ia_css_stream" instance. - * - * @return - * - IA_CSS_SUCCESS, if the "stop" requests have been successfully sent out. - * - CSS error code, otherwise. - * - * - * NOTE - * This API sends the "stop" requests to the "ia_css_pipe" - * instances in the same "ia_css_stream" instance. It will - * return without waiting for all "ia_css_pipe" instatnces - * being stopped. - */ -static enum ia_css_err -sh_css_pipes_stop(struct ia_css_stream *stream); - -/* - * @brief Check if all "ia_css_pipe" instances in the target - * "ia_css_stream" instance have stopped. - * - * @param[in] stream Point to the target "ia_css_stream" instance. - * - * @return - * - true, if all "ia_css_pipe" instances in the target "ia_css_stream" - * instance have ben stopped. - * - false, otherwise. - */ -static bool -sh_css_pipes_have_stopped(struct ia_css_stream *stream); - -static enum ia_css_err -ia_css_pipe_check_format(struct ia_css_pipe *pipe, enum ia_css_frame_format format); - -static enum ia_css_err -check_pipe_resolutions(const struct ia_css_pipe *pipe); - -#endif - -static enum ia_css_err -ia_css_pipe_load_extension(struct ia_css_pipe *pipe, - struct ia_css_fw_info *firmware); - -static void -ia_css_pipe_unload_extension(struct ia_css_pipe *pipe, - struct ia_css_fw_info *firmware); -static void -ia_css_reset_defaults(struct sh_css* css); - -static void -sh_css_init_host_sp_control_vars(void); - -static enum ia_css_err set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version); - -static bool -need_capture_pp(const struct ia_css_pipe *pipe); - -static bool -need_yuv_scaler_stage(const struct ia_css_pipe *pipe); - -static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output( - struct ia_css_frame_info *cas_scaler_in_info, - struct ia_css_frame_info *cas_scaler_out_info, - struct ia_css_frame_info *cas_scaler_vf_info, - struct ia_css_cas_binary_descr *descr); - -static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr *descr); - -static bool -need_downscaling(const struct ia_css_resolution in_res, - const struct ia_css_resolution out_res); - -static bool need_capt_ldc(const struct ia_css_pipe *pipe); - -static enum ia_css_err -sh_css_pipe_load_binaries(struct ia_css_pipe *pipe); - -static -enum ia_css_err sh_css_pipe_get_viewfinder_frame_info( - struct ia_css_pipe *pipe, - struct ia_css_frame_info *info, - unsigned int idx); - -static enum ia_css_err -sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe, - struct ia_css_frame_info *info, - unsigned int idx); - -static enum ia_css_err -capture_start(struct ia_css_pipe *pipe); - -static enum ia_css_err -video_start(struct ia_css_pipe *pipe); - -static enum ia_css_err -preview_start(struct ia_css_pipe *pipe); - -static enum ia_css_err -yuvpp_start(struct ia_css_pipe *pipe); - -static bool copy_on_sp(struct ia_css_pipe *pipe); - -static enum ia_css_err -init_vf_frameinfo_defaults(struct ia_css_pipe *pipe, - struct ia_css_frame *vf_frame, unsigned int idx); - -static enum ia_css_err -init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe, - struct ia_css_frame *frame, enum ia_css_frame_format format); - -static enum ia_css_err -init_out_frameinfo_defaults(struct ia_css_pipe *pipe, - struct ia_css_frame *out_frame, unsigned int idx); - -static enum ia_css_err -sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline, - const void *acc_fw); - -static enum ia_css_err -alloc_continuous_frames( - struct ia_css_pipe *pipe, bool init_time); - -static void -pipe_global_init(void); - -static enum ia_css_err -pipe_generate_pipe_num(const struct ia_css_pipe *pipe, unsigned int *pipe_number); - -static void -pipe_release_pipe_num(unsigned int pipe_num); - -static enum ia_css_err -create_host_pipeline_structure(struct ia_css_stream *stream); - -static enum ia_css_err -create_host_pipeline(struct ia_css_stream *stream); - -static enum ia_css_err -create_host_preview_pipeline(struct ia_css_pipe *pipe); - -static enum ia_css_err -create_host_video_pipeline(struct ia_css_pipe *pipe); - -static enum ia_css_err -create_host_copy_pipeline(struct ia_css_pipe *pipe, - unsigned max_input_width, - struct ia_css_frame *out_frame); - -static enum ia_css_err -create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe); - -static enum ia_css_err -create_host_capture_pipeline(struct ia_css_pipe *pipe); - -static enum ia_css_err -create_host_yuvpp_pipeline(struct ia_css_pipe *pipe); - -static enum ia_css_err -create_host_acc_pipeline(struct ia_css_pipe *pipe); - -static unsigned int -sh_css_get_sw_interrupt_value(unsigned int irq); - -static struct ia_css_binary *ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe); - -static struct ia_css_binary * -ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe); - -static struct ia_css_binary * -ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe); - -static void -sh_css_hmm_buffer_record_init(void); - -static void -sh_css_hmm_buffer_record_uninit(void); - -static void -sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record); - -static struct sh_css_hmm_buffer_record -*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf, - enum ia_css_buffer_type type, - hrt_address kernel_ptr); - -static struct sh_css_hmm_buffer_record -*sh_css_hmm_buffer_record_validate(hrt_vaddress ddr_buffer_addr, - enum ia_css_buffer_type type); - -void -ia_css_get_acc_configs( - struct ia_css_pipe *pipe, - struct ia_css_isp_config *config); - - -#if CONFIG_ON_FRAME_ENQUEUE() -static enum ia_css_err set_config_on_frame_enqueue(struct ia_css_frame_info *info, struct frame_data_wrapper *frame); -#endif - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -static unsigned int get_crop_lines_for_bayer_order(const struct ia_css_stream_config *config); -static unsigned int get_crop_columns_for_bayer_order(const struct ia_css_stream_config *config); -static void get_pipe_extra_pixel(struct ia_css_pipe *pipe, - unsigned int *extra_row, unsigned int *extra_column); -#endif - -#ifdef ISP2401 -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -static enum ia_css_err -aspect_ratio_crop_init(struct ia_css_stream *curr_stream, - struct ia_css_pipe *pipes[], - bool *do_crop_status); - -static bool -aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe); - -static enum ia_css_err -aspect_ratio_crop(struct ia_css_pipe *curr_pipe, - struct ia_css_resolution *effective_res); -#endif - -#endif -static void -sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe) -{ - assert(pipe != NULL); - if (pipe == NULL) { - IA_CSS_ERROR("NULL input parameter"); - return; - } - - if (pipe->shading_table) - ia_css_shading_table_free(pipe->shading_table); - pipe->shading_table = NULL; -} - -static enum ia_css_frame_format yuv420_copy_formats[] = { - IA_CSS_FRAME_FORMAT_NV12, - IA_CSS_FRAME_FORMAT_NV21, - IA_CSS_FRAME_FORMAT_YV12, - IA_CSS_FRAME_FORMAT_YUV420, - IA_CSS_FRAME_FORMAT_YUV420_16, - IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, - IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8 -}; - -static enum ia_css_frame_format yuv422_copy_formats[] = { - IA_CSS_FRAME_FORMAT_NV12, - IA_CSS_FRAME_FORMAT_NV16, - IA_CSS_FRAME_FORMAT_NV21, - IA_CSS_FRAME_FORMAT_NV61, - IA_CSS_FRAME_FORMAT_YV12, - IA_CSS_FRAME_FORMAT_YV16, - IA_CSS_FRAME_FORMAT_YUV420, - IA_CSS_FRAME_FORMAT_YUV420_16, - IA_CSS_FRAME_FORMAT_YUV422, - IA_CSS_FRAME_FORMAT_YUV422_16, - IA_CSS_FRAME_FORMAT_UYVY, - IA_CSS_FRAME_FORMAT_YUYV -}; - -/* Verify whether the selected output format is can be produced - * by the copy binary given the stream format. - * */ -static enum ia_css_err -verify_copy_out_frame_format(struct ia_css_pipe *pipe) -{ - enum ia_css_frame_format out_fmt = pipe->output_info[0].format; - unsigned int i, found = 0; - - assert(pipe != NULL); - assert(pipe->stream != NULL); - - switch (pipe->stream->config.input_config.format) { - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - case ATOMISP_INPUT_FORMAT_YUV420_8: - for (i=0; iconfig.input_config.format, - stream->config.pixels_per_clock == 2); - - return bpp; -} - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) -static enum ia_css_err -sh_css_config_input_network(struct ia_css_stream *stream) -{ - unsigned int fmt_type; - struct ia_css_pipe *pipe = stream->last_pipe; - struct ia_css_binary *binary = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - - assert(stream != NULL); - assert(pipe != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_config_input_network() enter:\n"); - - if (pipe->pipeline.stages) - binary = pipe->pipeline.stages->binary; - - err = ia_css_isys_convert_stream_format_to_mipi_format( - stream->config.input_config.format, - stream->csi_rx_config.comp, - &fmt_type); - if (err != IA_CSS_SUCCESS) - return err; - sh_css_sp_program_input_circuit(fmt_type, - stream->config.channel_id, - stream->config.mode); - - if ((binary && (binary->online || stream->config.continuous)) || - pipe->config.mode == IA_CSS_PIPE_MODE_COPY) { - err = ia_css_ifmtr_configure(&stream->config, - binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - if (stream->config.mode == IA_CSS_INPUT_MODE_TPG || - stream->config.mode == IA_CSS_INPUT_MODE_PRBS) { - unsigned int hblank_cycles = 100, - vblank_lines = 6, - width, - height, - vblank_cycles; - width = (stream->config.input_config.input_res.width) / (1 + (stream->config.pixels_per_clock == 2)); - height = stream->config.input_config.input_res.height; - vblank_cycles = vblank_lines * (width + hblank_cycles); - sh_css_sp_configure_sync_gen(width, height, hblank_cycles, - vblank_cycles); -#if defined(IS_ISP_2400_SYSTEM) - if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) { - /* TODO: move define to proper file in tools */ - #define GP_ISEL_TPG_MODE 0x90058 - ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0); - } -#endif - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_config_input_network() leave:\n"); - return IA_CSS_SUCCESS; -} -#elif !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) -static unsigned int csi2_protocol_calculate_max_subpixels_per_line( - enum atomisp_input_format format, - unsigned int pixels_per_line) -{ - unsigned int rval; - - switch (format) { - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - /* - * The frame format layout is shown below. - * - * Line 0: UYY0 UYY0 ... UYY0 - * Line 1: VYY0 VYY0 ... VYY0 - * Line 2: UYY0 UYY0 ... UYY0 - * Line 3: VYY0 VYY0 ... VYY0 - * ... - * Line (n-2): UYY0 UYY0 ... UYY0 - * Line (n-1): VYY0 VYY0 ... VYY0 - * - * In this frame format, the even-line is - * as wide as the odd-line. - * The 0 is introduced by the input system - * (mipi backend). - */ - rval = pixels_per_line * 2; - break; - case ATOMISP_INPUT_FORMAT_YUV420_8: - case ATOMISP_INPUT_FORMAT_YUV420_10: - case ATOMISP_INPUT_FORMAT_YUV420_16: - /* - * The frame format layout is shown below. - * - * Line 0: YYYY YYYY ... YYYY - * Line 1: UYVY UYVY ... UYVY UYVY - * Line 2: YYYY YYYY ... YYYY - * Line 3: UYVY UYVY ... UYVY UYVY - * ... - * Line (n-2): YYYY YYYY ... YYYY - * Line (n-1): UYVY UYVY ... UYVY UYVY - * - * In this frame format, the odd-line is twice - * wider than the even-line. - */ - rval = pixels_per_line * 2; - break; - case ATOMISP_INPUT_FORMAT_YUV422_8: - case ATOMISP_INPUT_FORMAT_YUV422_10: - case ATOMISP_INPUT_FORMAT_YUV422_16: - /* - * The frame format layout is shown below. - * - * Line 0: UYVY UYVY ... UYVY - * Line 1: UYVY UYVY ... UYVY - * Line 2: UYVY UYVY ... UYVY - * Line 3: UYVY UYVY ... UYVY - * ... - * Line (n-2): UYVY UYVY ... UYVY - * Line (n-1): UYVY UYVY ... UYVY - * - * In this frame format, the even-line is - * as wide as the odd-line. - */ - rval = pixels_per_line * 2; - break; - case ATOMISP_INPUT_FORMAT_RGB_444: - case ATOMISP_INPUT_FORMAT_RGB_555: - case ATOMISP_INPUT_FORMAT_RGB_565: - case ATOMISP_INPUT_FORMAT_RGB_666: - case ATOMISP_INPUT_FORMAT_RGB_888: - /* - * The frame format layout is shown below. - * - * Line 0: ABGR ABGR ... ABGR - * Line 1: ABGR ABGR ... ABGR - * Line 2: ABGR ABGR ... ABGR - * Line 3: ABGR ABGR ... ABGR - * ... - * Line (n-2): ABGR ABGR ... ABGR - * Line (n-1): ABGR ABGR ... ABGR - * - * In this frame format, the even-line is - * as wide as the odd-line. - */ - rval = pixels_per_line * 4; - break; - case ATOMISP_INPUT_FORMAT_RAW_6: - case ATOMISP_INPUT_FORMAT_RAW_7: - case ATOMISP_INPUT_FORMAT_RAW_8: - case ATOMISP_INPUT_FORMAT_RAW_10: - case ATOMISP_INPUT_FORMAT_RAW_12: - case ATOMISP_INPUT_FORMAT_RAW_14: - case ATOMISP_INPUT_FORMAT_RAW_16: - case ATOMISP_INPUT_FORMAT_BINARY_8: - case ATOMISP_INPUT_FORMAT_USER_DEF1: - case ATOMISP_INPUT_FORMAT_USER_DEF2: - case ATOMISP_INPUT_FORMAT_USER_DEF3: - case ATOMISP_INPUT_FORMAT_USER_DEF4: - case ATOMISP_INPUT_FORMAT_USER_DEF5: - case ATOMISP_INPUT_FORMAT_USER_DEF6: - case ATOMISP_INPUT_FORMAT_USER_DEF7: - case ATOMISP_INPUT_FORMAT_USER_DEF8: - /* - * The frame format layout is shown below. - * - * Line 0: Pixel Pixel ... Pixel - * Line 1: Pixel Pixel ... Pixel - * Line 2: Pixel Pixel ... Pixel - * Line 3: Pixel Pixel ... Pixel - * ... - * Line (n-2): Pixel Pixel ... Pixel - * Line (n-1): Pixel Pixel ... Pixel - * - * In this frame format, the even-line is - * as wide as the odd-line. - */ - rval = pixels_per_line; - break; - default: - rval = 0; - break; - } - - return rval; -} - -static bool sh_css_translate_stream_cfg_to_input_system_input_port_id( - struct ia_css_stream_config *stream_cfg, - ia_css_isys_descr_t *isys_stream_descr) -{ - bool rc; - - rc = true; - switch (stream_cfg->mode) { - case IA_CSS_INPUT_MODE_TPG: - - if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID0) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID; - } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID1) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID; - } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID2) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID; - } - - break; - case IA_CSS_INPUT_MODE_PRBS: - - if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID; - } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID; - } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID; - } - - break; - case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: - - if (stream_cfg->source.port.port == MIPI_PORT0_ID) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT0_ID; - } else if (stream_cfg->source.port.port == MIPI_PORT1_ID) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT1_ID; - } else if (stream_cfg->source.port.port == MIPI_PORT2_ID) { - isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT2_ID; - } - - break; - default: - rc = false; - break; - } - - return rc; -} - -static bool sh_css_translate_stream_cfg_to_input_system_input_port_type( - struct ia_css_stream_config *stream_cfg, - ia_css_isys_descr_t *isys_stream_descr) -{ - bool rc; - - rc = true; - switch (stream_cfg->mode) { - case IA_CSS_INPUT_MODE_TPG: - - isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_TPG; - - break; - case IA_CSS_INPUT_MODE_PRBS: - - isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_PRBS; - - break; - case IA_CSS_INPUT_MODE_SENSOR: - case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: - - isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_SENSOR; - break; - - default: - rc = false; - break; - } - - return rc; -} - -static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr( - struct ia_css_stream_config *stream_cfg, - ia_css_isys_descr_t *isys_stream_descr, - int isys_stream_idx) -{ - bool rc; - - rc = true; - switch (stream_cfg->mode) { - case IA_CSS_INPUT_MODE_TPG: - if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_RAMP) { - isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_RAMP; - } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_CHECKERBOARD) { - isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_CHBO; - } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_MONO) { - isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_MONO; - } else { - rc = false; - } - - /* - * TODO - * - Make "color_cfg" as part of "ia_css_tpg_config". - */ - isys_stream_descr->tpg_port_attr.color_cfg.R1 = 51; - isys_stream_descr->tpg_port_attr.color_cfg.G1 = 102; - isys_stream_descr->tpg_port_attr.color_cfg.B1 = 255; - isys_stream_descr->tpg_port_attr.color_cfg.R2 = 0; - isys_stream_descr->tpg_port_attr.color_cfg.G2 = 100; - isys_stream_descr->tpg_port_attr.color_cfg.B2 = 160; - - isys_stream_descr->tpg_port_attr.mask_cfg.h_mask = stream_cfg->source.tpg.x_mask; - isys_stream_descr->tpg_port_attr.mask_cfg.v_mask = stream_cfg->source.tpg.y_mask; - isys_stream_descr->tpg_port_attr.mask_cfg.hv_mask = stream_cfg->source.tpg.xy_mask; - - isys_stream_descr->tpg_port_attr.delta_cfg.h_delta = stream_cfg->source.tpg.x_delta; - isys_stream_descr->tpg_port_attr.delta_cfg.v_delta = stream_cfg->source.tpg.y_delta; - - /* - * TODO - * - Make "sync_gen_cfg" as part of "ia_css_tpg_config". - */ - isys_stream_descr->tpg_port_attr.sync_gen_cfg.hblank_cycles = 100; - isys_stream_descr->tpg_port_attr.sync_gen_cfg.vblank_cycles = 100; - isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_clock = stream_cfg->pixels_per_clock; - isys_stream_descr->tpg_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t) ~(0x0); - isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_line = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width; - isys_stream_descr->tpg_port_attr.sync_gen_cfg.lines_per_frame = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height; - - break; - case IA_CSS_INPUT_MODE_PRBS: - - isys_stream_descr->prbs_port_attr.seed0 = stream_cfg->source.prbs.seed; - isys_stream_descr->prbs_port_attr.seed1 = stream_cfg->source.prbs.seed1; - - /* - * TODO - * - Make "sync_gen_cfg" as part of "ia_css_prbs_config". - */ - isys_stream_descr->prbs_port_attr.sync_gen_cfg.hblank_cycles = 100; - isys_stream_descr->prbs_port_attr.sync_gen_cfg.vblank_cycles = 100; - isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_clock = stream_cfg->pixels_per_clock; - isys_stream_descr->prbs_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t) ~(0x0); - isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_line = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width; - isys_stream_descr->prbs_port_attr.sync_gen_cfg.lines_per_frame = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height; - - break; - case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: - { - enum ia_css_err err; - unsigned int fmt_type; - - err = ia_css_isys_convert_stream_format_to_mipi_format( - stream_cfg->isys_config[isys_stream_idx].format, - MIPI_PREDICTOR_NONE, - &fmt_type); - if (err != IA_CSS_SUCCESS) - rc = false; - - isys_stream_descr->csi_port_attr.active_lanes = stream_cfg->source.port.num_lanes; - isys_stream_descr->csi_port_attr.fmt_type = fmt_type; - isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - isys_stream_descr->online = stream_cfg->online; -#endif - err |= ia_css_isys_convert_compressed_format( - &stream_cfg->source.port.compression, - isys_stream_descr); - if (err != IA_CSS_SUCCESS) - rc = false; - - /* metadata */ - isys_stream_descr->metadata.enable = false; - if (stream_cfg->metadata_config.resolution.height > 0) { - err = ia_css_isys_convert_stream_format_to_mipi_format( - stream_cfg->metadata_config.data_type, - MIPI_PREDICTOR_NONE, - &fmt_type); - if (err != IA_CSS_SUCCESS) - rc = false; - isys_stream_descr->metadata.fmt_type = fmt_type; - isys_stream_descr->metadata.bits_per_pixel = - ia_css_util_input_format_bpp(stream_cfg->metadata_config.data_type, true); - isys_stream_descr->metadata.pixels_per_line = stream_cfg->metadata_config.resolution.width; - isys_stream_descr->metadata.lines_per_frame = stream_cfg->metadata_config.resolution.height; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* For new input system, number of str2mmio requests must be even. - * So we round up number of metadata lines to be even. */ - if (isys_stream_descr->metadata.lines_per_frame > 0) - isys_stream_descr->metadata.lines_per_frame += - (isys_stream_descr->metadata.lines_per_frame & 1); -#endif - isys_stream_descr->metadata.align_req_in_bytes = - ia_css_csi2_calculate_input_system_alignment(stream_cfg->metadata_config.data_type); - isys_stream_descr->metadata.enable = true; - } - - break; - } - default: - rc = false; - break; - } - - return rc; -} - -static bool sh_css_translate_stream_cfg_to_input_system_input_port_resolution( - struct ia_css_stream_config *stream_cfg, - ia_css_isys_descr_t *isys_stream_descr, - int isys_stream_idx) -{ - unsigned int bits_per_subpixel; - unsigned int max_subpixels_per_line; - unsigned int lines_per_frame; - unsigned int align_req_in_bytes; - enum atomisp_input_format fmt_type; - - fmt_type = stream_cfg->isys_config[isys_stream_idx].format; - if ((stream_cfg->mode == IA_CSS_INPUT_MODE_SENSOR || - stream_cfg->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) && - stream_cfg->source.port.compression.type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) { - - if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel == - UNCOMPRESSED_BITS_PER_PIXEL_10) { - fmt_type = ATOMISP_INPUT_FORMAT_RAW_10; - } - else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel == - UNCOMPRESSED_BITS_PER_PIXEL_12) { - fmt_type = ATOMISP_INPUT_FORMAT_RAW_12; - } - else - return false; - } - - bits_per_subpixel = - sh_css_stream_format_2_bits_per_subpixel(fmt_type); - if (bits_per_subpixel == 0) - return false; - - max_subpixels_per_line = - csi2_protocol_calculate_max_subpixels_per_line(fmt_type, - stream_cfg->isys_config[isys_stream_idx].input_res.width); - if (max_subpixels_per_line == 0) - return false; - - lines_per_frame = stream_cfg->isys_config[isys_stream_idx].input_res.height; - if (lines_per_frame == 0) - return false; - - align_req_in_bytes = ia_css_csi2_calculate_input_system_alignment(fmt_type); - - /* HW needs subpixel info for their settings */ - isys_stream_descr->input_port_resolution.bits_per_pixel = bits_per_subpixel; - isys_stream_descr->input_port_resolution.pixels_per_line = max_subpixels_per_line; - isys_stream_descr->input_port_resolution.lines_per_frame = lines_per_frame; - isys_stream_descr->input_port_resolution.align_req_in_bytes = align_req_in_bytes; - - return true; -} - -static bool sh_css_translate_stream_cfg_to_isys_stream_descr( - struct ia_css_stream_config *stream_cfg, - bool early_polling, - ia_css_isys_descr_t *isys_stream_descr, - int isys_stream_idx) -{ - bool rc; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_translate_stream_cfg_to_isys_stream_descr() enter:\n"); - rc = sh_css_translate_stream_cfg_to_input_system_input_port_id(stream_cfg, isys_stream_descr); - rc &= sh_css_translate_stream_cfg_to_input_system_input_port_type(stream_cfg, isys_stream_descr); - rc &= sh_css_translate_stream_cfg_to_input_system_input_port_attr(stream_cfg, isys_stream_descr, isys_stream_idx); - rc &= sh_css_translate_stream_cfg_to_input_system_input_port_resolution(stream_cfg, isys_stream_descr, isys_stream_idx); - - isys_stream_descr->raw_packed = stream_cfg->pack_raw_pixels; - isys_stream_descr->linked_isys_stream_id = (int8_t) stream_cfg->isys_config[isys_stream_idx].linked_isys_stream_id; - /* - * Early polling is required for timestamp accuracy in certain case. - * The ISYS HW polling is started on - * ia_css_isys_stream_capture_indication() instead of - * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of - * capture takes longer than getting an ISYS frame - * - * Only 2401 relevant ?? - */ - isys_stream_descr->polling_mode - = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST - : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n"); - - return rc; -} - -static bool sh_css_translate_binary_info_to_input_system_output_port_attr( - struct ia_css_binary *binary, - ia_css_isys_descr_t *isys_stream_descr) -{ - if (!binary) - return false; - - isys_stream_descr->output_port_attr.left_padding = binary->left_padding; - isys_stream_descr->output_port_attr.max_isp_input_width = binary->info->sp.input.max_width; - - return true; -} - -static enum ia_css_err -sh_css_config_input_network(struct ia_css_stream *stream) -{ - bool rc; - ia_css_isys_descr_t isys_stream_descr; - unsigned int sp_thread_id; - struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal; - struct ia_css_pipe *pipe = NULL; - struct ia_css_binary *binary = NULL; - int i; - uint32_t isys_stream_id; - bool early_polling = false; - - assert(stream != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_config_input_network() enter 0x%p:\n", stream); - - if (stream->config.continuous == true) { - if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) { - pipe = stream->last_pipe; - } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP) { - pipe = stream->last_pipe; - } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) { - pipe = stream->last_pipe->pipe_settings.preview.copy_pipe; - } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) { - pipe = stream->last_pipe->pipe_settings.video.copy_pipe; - } - } else { - pipe = stream->last_pipe; - if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) { - /* - * We need to poll the ISYS HW in capture_indication itself - * for "non-continuous" capture usecase for getting accurate - * isys frame capture timestamps. - * This is because the capturepipe propcessing takes longer - * to execute than the input system frame capture. - * 2401 specific - */ - early_polling = true; - } - } - - assert(pipe != NULL); - if (pipe == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; - - if (pipe->pipeline.stages != NULL) - if (pipe->pipeline.stages->binary != NULL) - binary = pipe->pipeline.stages->binary; - - - - if (binary) { - /* this was being done in ifmtr in 2400. - * online and cont bypass the init_in_frameinfo_memory_defaults - * so need to do it here - */ - ia_css_get_crop_offsets(pipe, &binary->in_frame_info); - } - - /* get the SP thread id */ - rc = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &sp_thread_id); - if (!rc) - return IA_CSS_ERR_INTERNAL_ERROR; - /* get the target input terminal */ - sp_pipeline_input_terminal = &(sh_css_sp_group.pipe_io[sp_thread_id].input); - - for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) { - /* initialization */ - memset((void*)(&isys_stream_descr), 0, sizeof(ia_css_isys_descr_t)); - sp_pipeline_input_terminal->context.virtual_input_system_stream[i].valid = 0; - sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i].valid = 0; - - if (!stream->config.isys_config[i].valid) - continue; - - /* translate the stream configuration to the Input System (2401) configuration */ - rc = sh_css_translate_stream_cfg_to_isys_stream_descr( - &(stream->config), - early_polling, - &(isys_stream_descr), i); - - if (stream->config.online) { - rc &= sh_css_translate_binary_info_to_input_system_output_port_attr( - binary, - &(isys_stream_descr)); - } - - if (!rc) - return IA_CSS_ERR_INTERNAL_ERROR; - - isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, i); - - /* create the virtual Input System (2401) */ - rc = ia_css_isys_stream_create( - &(isys_stream_descr), - &(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]), - isys_stream_id); - if (!rc) - return IA_CSS_ERR_INTERNAL_ERROR; - - /* calculate the configuration of the virtual Input System (2401) */ - rc = ia_css_isys_stream_calculate_cfg( - &(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]), - &(isys_stream_descr), - &(sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i])); - if (!rc) { - ia_css_isys_stream_destroy(&(sp_pipeline_input_terminal->context.virtual_input_system_stream[i])); - return IA_CSS_ERR_INTERNAL_ERROR; - } - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_config_input_network() leave:\n"); - - return IA_CSS_SUCCESS; -} - -static inline struct ia_css_pipe *stream_get_last_pipe( - struct ia_css_stream *stream) -{ - struct ia_css_pipe *last_pipe = NULL; - if (stream != NULL) - last_pipe = stream->last_pipe; - - return last_pipe; -} - -static inline struct ia_css_pipe *stream_get_copy_pipe( - struct ia_css_stream *stream) -{ - struct ia_css_pipe *copy_pipe = NULL; - struct ia_css_pipe *last_pipe = NULL; - enum ia_css_pipe_id pipe_id; - - last_pipe = stream_get_last_pipe(stream); - - if ((stream != NULL) && - (last_pipe != NULL) && - (stream->config.continuous)) { - - pipe_id = last_pipe->mode; - switch (pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - copy_pipe = last_pipe->pipe_settings.preview.copy_pipe; - break; - case IA_CSS_PIPE_ID_VIDEO: - copy_pipe = last_pipe->pipe_settings.video.copy_pipe; - break; - default: - copy_pipe = NULL; - break; - } - } - - return copy_pipe; -} - -static inline struct ia_css_pipe *stream_get_target_pipe( - struct ia_css_stream *stream) -{ - struct ia_css_pipe *target_pipe; - - /* get the pipe that consumes the stream */ - if (stream->config.continuous) { - target_pipe = stream_get_copy_pipe(stream); - } else { - target_pipe = stream_get_last_pipe(stream); - } - - return target_pipe; -} - -static enum ia_css_err stream_csi_rx_helper( - struct ia_css_stream *stream, - enum ia_css_err (*func)(enum mipi_port_id, uint32_t)) -{ - enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR; - uint32_t sp_thread_id, stream_id; - bool rc; - struct ia_css_pipe *target_pipe = NULL; - - if ((stream == NULL) || (stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR)) - goto exit; - - target_pipe = stream_get_target_pipe(stream); - - if (target_pipe == NULL) - goto exit; - - rc = ia_css_pipeline_get_sp_thread_id( - ia_css_pipe_get_pipe_num(target_pipe), - &sp_thread_id); - - if (!rc) - goto exit; - - /* (un)register all valid "virtual isys streams" within the ia_css_stream */ - stream_id = 0; - do { - if (stream->config.isys_config[stream_id].valid) { - uint32_t isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, stream_id); - retval = func(stream->config.source.port.port, isys_stream_id); - } - stream_id++; - } while ((retval == IA_CSS_SUCCESS) && - (stream_id < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH)); - -exit: - return retval; -} - -static inline enum ia_css_err stream_register_with_csi_rx( - struct ia_css_stream *stream) -{ - return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_register_stream); -} - -static inline enum ia_css_err stream_unregister_with_csi_rx( - struct ia_css_stream *stream) -{ - return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream); -} -#endif - -#if WITH_PC_MONITORING -static struct task_struct *my_kthread; /* Handle for the monitoring thread */ -static int sh_binary_running; /* Enable sampling in the thread */ - -static void print_pc_histo(char *core_name, struct sh_css_pc_histogram *hist) -{ - unsigned i; - unsigned cnt_run = 0; - unsigned cnt_stall = 0; - - if (hist == NULL) - return; - - sh_css_print("%s histogram length = %d\n", core_name, hist->length); - sh_css_print("%s PC\trun\tstall\n", core_name); - - for (i = 0; i < hist->length; i++) { - if ((hist->run[i] == 0) && (hist->run[i] == hist->stall[i])) - continue; - sh_css_print("%s %d\t%d\t%d\n", - core_name, i, hist->run[i], hist->stall[i]); - cnt_run += hist->run[i]; - cnt_stall += hist->stall[i]; - } - - sh_css_print(" Statistics for %s, cnt_run = %d, cnt_stall = %d, " - "hist->length = %d\n", - core_name, cnt_run, cnt_stall, hist->length); -} - -static void print_pc_histogram(void) -{ - struct ia_css_binary_metrics *metrics; - - for (metrics = sh_css_metrics.binary_metrics; - metrics; - metrics = metrics->next) { - if (metrics->mode == IA_CSS_BINARY_MODE_PREVIEW || - metrics->mode == IA_CSS_BINARY_MODE_VF_PP) { - sh_css_print("pc_histogram for binary %d is SKIPPED\n", - metrics->id); - continue; - } - - sh_css_print(" pc_histogram for binary %d\n", metrics->id); - print_pc_histo(" ISP", &metrics->isp_histogram); - print_pc_histo(" SP", &metrics->sp_histogram); - sh_css_print("print_pc_histogram() done for binay->id = %d, " - "done.\n", metrics->id); - } - - sh_css_print("PC_MONITORING:print_pc_histogram() -- DONE\n"); -} - -static int pc_monitoring(void *data) -{ - int i = 0; - - (void)data; - while (true) { - if (sh_binary_running) { - sh_css_metrics_sample_pcs(); -#if MULTIPLE_SAMPLES - for (i = 0; i < NOF_SAMPLES; i++) - sh_css_metrics_sample_pcs(); -#endif - } - usleep_range(10, 50); - } - return 0; -} - -static void spying_thread_create(void) -{ - my_kthread = kthread_run(pc_monitoring, NULL, "sh_pc_monitor"); - sh_css_metrics_enable_pc_histogram(1); -} - -static void input_frame_info(struct ia_css_frame_info frame_info) -{ - sh_css_print("SH_CSS:input_frame_info() -- frame->info.res.width = %d, " - "frame->info.res.height = %d, format = %d\n", - frame_info.res.width, frame_info.res.height, frame_info.format); -} -#endif /* WITH_PC_MONITORING */ - -static void -start_binary(struct ia_css_pipe *pipe, - struct ia_css_binary *binary) -{ - struct ia_css_stream *stream; - - assert(pipe != NULL); - /* Acceleration uses firmware, the binary thus can be NULL */ - /* assert(binary != NULL); */ - - (void)binary; - -#if !defined(HAS_NO_INPUT_SYSTEM) - stream = pipe->stream; -#else - (void)pipe; - (void)stream; -#endif - - if (binary) - sh_css_metrics_start_binary(&binary->metrics); - -#if WITH_PC_MONITORING - sh_css_print("PC_MONITORING: %s() -- binary id = %d , " - "enable_dvs_envelope = %d\n", - __func__, binary->info->sp.id, - binary->info->sp.enable.dvs_envelope); - input_frame_info(binary->in_frame_info); - - if (binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO) - sh_binary_running = true; -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401) - if (stream->reconfigure_css_rx) { - ia_css_isys_rx_configure(&pipe->stream->csi_rx_config, - pipe->stream->config.mode); - stream->reconfigure_css_rx = false; - } -#endif -} - -/* start the copy function on the SP */ -static enum ia_css_err -start_copy_on_sp(struct ia_css_pipe *pipe, - struct ia_css_frame *out_frame) -{ - - (void)out_frame; - assert(pipe != NULL); - assert(pipe->stream != NULL); - - if ((pipe == NULL) || (pipe->stream == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - -#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401) - if (pipe->stream->reconfigure_css_rx) - ia_css_isys_rx_disable(); -#endif - - if (pipe->stream->config.input_config.format != ATOMISP_INPUT_FORMAT_BINARY_8) - return IA_CSS_ERR_INTERNAL_ERROR; - sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2); - -#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401) - if (pipe->stream->reconfigure_css_rx) { - ia_css_isys_rx_configure(&pipe->stream->csi_rx_config, pipe->stream->config.mode); - pipe->stream->reconfigure_css_rx = false; - } -#endif - - return IA_CSS_SUCCESS; -} - -void sh_css_binary_args_reset(struct sh_css_binary_args *args) -{ - unsigned int i; - -#ifndef ISP2401 - for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) -#else - for (i = 0; i < NUM_TNR_FRAMES; i++) -#endif - args->tnr_frames[i] = NULL; - for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) - args->delay_frames[i] = NULL; - args->in_frame = NULL; - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) - args->out_frame[i] = NULL; - args->out_vf_frame = NULL; - args->copy_vf = false; - args->copy_output = true; - args->vf_downscale_log2 = 0; -} - -static void start_pipe( - struct ia_css_pipe *me, - enum sh_css_pipe_config_override copy_ovrd, - enum ia_css_input_mode input_mode) -{ -#if defined(HAS_NO_INPUT_SYSTEM) - (void)input_mode; -#endif - - IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d", - me, copy_ovrd, input_mode); - - assert(me != NULL); /* all callers are in this file and call with non null argument */ - - sh_css_sp_init_pipeline(&me->pipeline, - me->mode, - (uint8_t)ia_css_pipe_get_pipe_num(me), - me->config.default_capture_config.enable_xnr != 0, - me->stream->config.pixels_per_clock == 2, - me->stream->config.continuous, - false, - me->required_bds_factor, - copy_ovrd, - input_mode, - &me->stream->config.metadata_config, - &me->stream->info.metadata_info -#if !defined(HAS_NO_INPUT_SYSTEM) - ,(input_mode==IA_CSS_INPUT_MODE_MEMORY) ? - (enum mipi_port_id)0 : - me->stream->config.source.port.port -#endif -#ifdef ISP2401 - ,&me->config.internal_frame_origin_bqs_on_sctbl, - me->stream->isp_params_configs -#endif - ); - - if (me->config.mode != IA_CSS_PIPE_MODE_COPY) { - struct ia_css_pipeline_stage *stage; - stage = me->pipeline.stages; - if (stage) { - me->pipeline.current_stage = stage; - start_binary(me, stage->binary); - } - } - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -sh_css_invalidate_shading_tables(struct ia_css_stream *stream) -{ - int i; - assert(stream != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_invalidate_shading_tables() enter:\n"); - - for (i=0; inum_pipes; i++) { - assert(stream->pipes[i] != NULL); - sh_css_pipe_free_shading_table(stream->pipes[i]); - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_invalidate_shading_tables() leave: return_void\n"); -} - -#ifndef ISP2401 -static void -enable_interrupts(enum ia_css_irq_type irq_type) -{ -#ifdef USE_INPUT_SYSTEM_VERSION_2 - enum mipi_port_id port; -#endif - bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE; - IA_CSS_ENTER_PRIVATE(""); - /* Enable IRQ on the SP which signals that SP goes to idle - * (aka ready state) */ - cnd_sp_irq_enable(SP0_ID, true); - /* Set the IRQ device 0 to either level or pulse */ - irq_enable_pulse(IRQ0_ID, enable_pulse); - - cnd_virq_enable_channel(virq_sp, true); - - /* Enable SW interrupt 0, this is used to signal ISYS events */ - cnd_virq_enable_channel( - (virq_id_t)(IRQ_SW_CHANNEL0_ID + IRQ_SW_CHANNEL_OFFSET), - true); - /* Enable SW interrupt 1, this is used to signal PSYS events */ - cnd_virq_enable_channel( - (virq_id_t)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET), - true); -#if !defined(HAS_IRQ_MAP_VERSION_2) - /* IRQ_SW_CHANNEL2_ID does not exist on 240x systems */ - cnd_virq_enable_channel( - (virq_id_t)(IRQ_SW_CHANNEL2_ID + IRQ_SW_CHANNEL_OFFSET), - true); - virq_clear_all(); -#endif - -#ifdef USE_INPUT_SYSTEM_VERSION_2 - for (port = 0; port < N_MIPI_PORT_ID; port++) - ia_css_isys_rx_enable_all_interrupts(port); -#endif - - IA_CSS_LEAVE_PRIVATE(""); -} - -#endif - -static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw, - const char * program, - ia_css_spctrl_cfg *spctrl_cfg) -{ - if((fw == NULL)||(spctrl_cfg == NULL)) - return false; - spctrl_cfg->sp_entry = 0; - spctrl_cfg->program_name = (char *)(program); - - spctrl_cfg->ddr_data_offset = fw->blob.data_source; - spctrl_cfg->dmem_data_addr = fw->blob.data_target; - spctrl_cfg->dmem_bss_addr = fw->blob.bss_target; - spctrl_cfg->data_size = fw->blob.data_size ; - spctrl_cfg->bss_size = fw->blob.bss_size; - - spctrl_cfg->spctrl_config_dmem_addr = fw->info.sp.init_dmem_data; - spctrl_cfg->spctrl_state_dmem_addr = fw->info.sp.sw_state; - - spctrl_cfg->code_size = fw->blob.size; - spctrl_cfg->code = fw->blob.code; - spctrl_cfg->sp_entry = fw->info.sp.sp_entry; /* entry function ptr on SP */ - - return true; -} -void -ia_css_unload_firmware(void) -{ - if (sh_css_num_binaries) - { - /* we have already loaded before so get rid of the old stuff */ - ia_css_binary_uninit(); - sh_css_unload_firmware(); - } - fw_explicitly_loaded = false; -} - -static void -ia_css_reset_defaults(struct sh_css* css) -{ - struct sh_css default_css; - - /* Reset everything to zero */ - memset(&default_css, 0, sizeof(default_css)); - - /* Initialize the non zero values*/ - default_css.check_system_idle = true; - default_css.num_cont_raw_frames = NUM_CONTINUOUS_FRAMES; - - /* All should be 0: but memset does it already. - * default_css.num_mipi_frames[N_CSI_PORTS] = 0; - */ - - default_css.irq_type = IA_CSS_IRQ_TYPE_EDGE; - - /*Set the defaults to the output */ - *css = default_css; -} - -bool -ia_css_check_firmware_version(const struct ia_css_fw *fw) -{ - bool retval = false; - - if (fw != NULL) { - retval = sh_css_check_firmware_version(fw->data); - } - return retval; -} - -enum ia_css_err -ia_css_load_firmware(const struct ia_css_env *env, - const struct ia_css_fw *fw) -{ - enum ia_css_err err; - - if (env == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - if (fw == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() enter\n"); - - /* make sure we initialize my_css */ - if (my_css.flush != env->cpu_mem_env.flush) { - ia_css_reset_defaults(&my_css); - my_css.flush = env->cpu_mem_env.flush; - } - - ia_css_unload_firmware(); /* in case we are called twice */ - err = sh_css_load_firmware(fw->data, fw->bytes); - if (err == IA_CSS_SUCCESS) { - err = ia_css_binary_init_infos(); - if (err == IA_CSS_SUCCESS) - fw_explicitly_loaded = true; - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() leave \n"); - return err; -} - -enum ia_css_err -ia_css_init(const struct ia_css_env *env, - const struct ia_css_fw *fw, - uint32_t mmu_l1_base, - enum ia_css_irq_type irq_type) -{ - enum ia_css_err err; - ia_css_spctrl_cfg spctrl_cfg; - - void (*flush_func)(struct ia_css_acc_fw *fw); - hrt_data select, enable; - - /* - * The C99 standard does not specify the exact object representation of structs; - * the representation is compiler dependent. - * - * The structs that are communicated between host and SP/ISP should have the - * exact same object representation. The compiler that is used to compile the - * firmware is hivecc. - * - * To check if a different compiler, used to compile a host application, uses - * another object representation, macros are defined specifying the size of - * the structs as expected by the firmware. - * - * A host application shall verify that a sizeof( ) of the struct is equal to - * the SIZE_OF_XXX macro of the corresponding struct. If they are not - * equal, functionality will break. - */ - /* Check struct sh_css_ddr_address_map */ - COMPILATION_ERROR_IF( sizeof(struct sh_css_ddr_address_map) != SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT ); - /* Check struct host_sp_queues */ - COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != SIZE_OF_HOST_SP_QUEUES_STRUCT ); - COMPILATION_ERROR_IF( sizeof(struct ia_css_circbuf_desc_s) != SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT ); - COMPILATION_ERROR_IF( sizeof(struct ia_css_circbuf_elem_s) != SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT ); - - /* Check struct host_sp_communication */ - COMPILATION_ERROR_IF( sizeof(struct host_sp_communication) != SIZE_OF_HOST_SP_COMMUNICATION_STRUCT ); - COMPILATION_ERROR_IF( sizeof(struct sh_css_event_irq_mask) != SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT ); - - /* Check struct sh_css_hmm_buffer */ - COMPILATION_ERROR_IF( sizeof(struct sh_css_hmm_buffer) != SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT ); - COMPILATION_ERROR_IF( sizeof(struct ia_css_isp_3a_statistics) != SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT ); - COMPILATION_ERROR_IF( sizeof(struct ia_css_isp_dvs_statistics) != SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT ); - COMPILATION_ERROR_IF( sizeof(struct ia_css_metadata) != SIZE_OF_IA_CSS_METADATA_STRUCT ); - - /* Check struct ia_css_init_dmem_cfg */ - COMPILATION_ERROR_IF( sizeof(struct ia_css_sp_init_dmem_cfg) != SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT ); - - if (fw == NULL && !fw_explicitly_loaded) - return IA_CSS_ERR_INVALID_ARGUMENTS; - if (env == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - sh_css_printf = env->print_env.debug_print; - - IA_CSS_ENTER("void"); - - flush_func = env->cpu_mem_env.flush; - - pipe_global_init(); - ia_css_pipeline_init(); - ia_css_queue_map_init(); - - ia_css_device_access_init(&env->hw_access_env); - - select = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_select) - & (~GPIO_FLASH_PIN_MASK); - enable = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_e) - | GPIO_FLASH_PIN_MASK; - sh_css_mmu_set_page_table_base_index(mmu_l1_base); -#ifndef ISP2401 - my_css_save.mmu_base = mmu_l1_base; -#else - ia_css_save_mmu_base_addr(mmu_l1_base); -#endif - - ia_css_reset_defaults(&my_css); - - my_css_save.driver_env = *env; - my_css.flush = flush_func; - - err = ia_css_rmgr_init(); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - -#ifndef ISP2401 - IA_CSS_LOG("init: %d", my_css_save_initialized); -#else - ia_css_save_restore_data_init(); -#endif - -#ifndef ISP2401 - if (!my_css_save_initialized) - { - my_css_save_initialized = true; - my_css_save.mode = sh_css_mode_working; - memset(my_css_save.stream_seeds, 0, sizeof(struct sh_css_stream_seed) * MAX_ACTIVE_STREAMS); - IA_CSS_LOG("init: %d mode=%d", my_css_save_initialized, my_css_save.mode); - } -#endif - mipi_init(); - -#ifndef ISP2401 - /* In case this has been programmed already, update internal - data structure ... DEPRECATED */ - my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID); - -#endif - my_css.irq_type = irq_type; -#ifndef ISP2401 - my_css_save.irq_type = irq_type; -#else - ia_css_save_irq_type(irq_type); -#endif - enable_interrupts(my_css.irq_type); - - /* configure GPIO to output mode */ - gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_select, select); - gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_e, enable); - gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_0, 0); - - err = ia_css_refcount_init(REFCOUNT_SIZE); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - err = sh_css_params_init(); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - if (fw) - { - ia_css_unload_firmware(); /* in case we already had firmware loaded */ - err = sh_css_load_firmware(fw->data, fw->bytes); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - err = ia_css_binary_init_infos(); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - fw_explicitly_loaded = false; -#ifndef ISP2401 - my_css_save.loaded_fw = (struct ia_css_fw *)fw; -#endif - } - if(!sh_css_setup_spctrl_config(&sh_css_sp_fw,SP_PROG_NAME,&spctrl_cfg)) - return IA_CSS_ERR_INTERNAL_ERROR; - - err = ia_css_spctrl_load_fw(SP0_ID, &spctrl_cfg); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - -#if WITH_PC_MONITORING - if (!thread_alive) { - thread_alive++; - sh_css_print("PC_MONITORING: %s() -- create thread DISABLED\n", - __func__); - spying_thread_create(); - } -#endif - if (!sh_css_hrt_system_is_idle()) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_SYSTEM_NOT_IDLE); - return IA_CSS_ERR_SYSTEM_NOT_IDLE; - } - /* can be called here, queuing works, but: - - when sp is started later, it will wipe queued items - so for now we leave it for later and make sure - updates are not called to frequently. - sh_css_init_buffer_queues(); - */ - -#if defined(HAS_INPUT_SYSTEM_VERSION_2) && defined(HAS_INPUT_SYSTEM_VERSION_2401) -#if defined(USE_INPUT_SYSTEM_VERSION_2) - gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 0); -#elif defined (USE_INPUT_SYSTEM_VERSION_2401) - gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1); -#endif -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) - dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, - ISP_DMA_MAX_BURST_LENGTH); - - if(ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR) - err = IA_CSS_ERR_INVALID_ARGUMENTS; -#endif - - sh_css_params_map_and_store_default_gdc_lut(); - - IA_CSS_LEAVE_ERR(err); - return err; -} - -enum ia_css_err ia_css_suspend(void) -{ - int i; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_suspend() enter\n"); - my_css_save.mode = sh_css_mode_suspend; - for(i=0;i unloading seed %d (%p)\n", i, my_css_save.stream_seeds[i].stream); - ia_css_stream_unload(my_css_save.stream_seeds[i].stream); - } - my_css_save.mode = sh_css_mode_working; - ia_css_stop_sp(); - ia_css_uninit(); - for(i=0;i after 1: seed %d (%p)\n", i, my_css_save.stream_seeds[i].stream); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_suspend() leave\n"); - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_resume(void) -{ - int i, j; - enum ia_css_err err; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_resume() enter: void\n"); - - err = ia_css_init(&(my_css_save.driver_env), my_css_save.loaded_fw, my_css_save.mmu_base, my_css_save.irq_type); - if (err != IA_CSS_SUCCESS) - return err; - err = ia_css_start_sp(); - if (err != IA_CSS_SUCCESS) - return err; - my_css_save.mode = sh_css_mode_resume; - for(i=0;i seed stream %p\n", my_css_save.stream_seeds[i].stream); - if (my_css_save.stream_seeds[i].stream != NULL) - { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "==*> loading seed %d\n", i); - err = ia_css_stream_load(my_css_save.stream_seeds[i].stream); - if (err != IA_CSS_SUCCESS) - { - if (i) - for(j=0;j PAGE_SIZE) - return vmalloc(size); - return kmalloc(size, GFP_KERNEL); -} - -void *sh_css_calloc(size_t N, size_t size) -{ - void *p; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_calloc() enter: N=%zu, size=%zu\n",N,size); - - /* FIXME: this test can probably go away */ - if (size > 0) { - p = sh_css_malloc(N*size); - if (p) - memset(p, 0, size); - return p; - } - return NULL; -} - -void sh_css_free(void *ptr) -{ - if (is_vmalloc_addr(ptr)) - vfree(ptr); - else - kfree(ptr); -} - -/* For Acceleration API: Flush FW (shared buffer pointer) arguments */ -void -sh_css_flush(struct ia_css_acc_fw *fw) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_flush() enter:\n"); - if ((fw != NULL) && (my_css.flush != NULL)) - my_css.flush(fw); -} - -/* Mapping sp threads. Currently, this is done when a stream is created and - * pipelines are ready to be converted to sp pipelines. Be careful if you are - * doing it from stream_create since we could run out of sp threads due to - * allocation on inactive pipelines. */ -static enum ia_css_err -map_sp_threads(struct ia_css_stream *stream, bool map) -{ - struct ia_css_pipe *main_pipe = NULL; - struct ia_css_pipe *copy_pipe = NULL; - struct ia_css_pipe *capture_pipe = NULL; - struct ia_css_pipe *acc_pipe = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - enum ia_css_pipe_id pipe_id; - - assert(stream != NULL); - IA_CSS_ENTER_PRIVATE("stream = %p, map = %s", - stream, map ? "true" : "false"); - - if (stream == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - main_pipe = stream->last_pipe; - pipe_id = main_pipe->mode; - - ia_css_pipeline_map(main_pipe->pipe_num, map); - - switch (pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - copy_pipe = main_pipe->pipe_settings.preview.copy_pipe; - capture_pipe = main_pipe->pipe_settings.preview.capture_pipe; - acc_pipe = main_pipe->pipe_settings.preview.acc_pipe; - break; - - case IA_CSS_PIPE_ID_VIDEO: - copy_pipe = main_pipe->pipe_settings.video.copy_pipe; - capture_pipe = main_pipe->pipe_settings.video.capture_pipe; - break; - - case IA_CSS_PIPE_ID_CAPTURE: - case IA_CSS_PIPE_ID_ACC: - default: - break; - } - - if (acc_pipe) { - ia_css_pipeline_map(acc_pipe->pipe_num, map); - } - - if(capture_pipe) { - ia_css_pipeline_map(capture_pipe->pipe_num, map); - } - - /* Firmware expects copy pipe to be the last pipe mapped. (if needed) */ - if(copy_pipe) { - ia_css_pipeline_map(copy_pipe->pipe_num, map); - } - /* DH regular multi pipe - not continuous mode: map the next pipes too */ - if (!stream->config.continuous) { - int i; - for (i = 1; i < stream->num_pipes; i++) - ia_css_pipeline_map(stream->pipes[i]->pipe_num, map); - } - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -/* creates a host pipeline skeleton for all pipes in a stream. Called during - * stream_create. */ -static enum ia_css_err -create_host_pipeline_structure(struct ia_css_stream *stream) -{ - struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL; - struct ia_css_pipe *acc_pipe = NULL; - enum ia_css_pipe_id pipe_id; - struct ia_css_pipe *main_pipe = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned int copy_pipe_delay = 0, - capture_pipe_delay = 0; - - assert(stream != NULL); - IA_CSS_ENTER_PRIVATE("stream = %p", stream); - - if (stream == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - main_pipe = stream->last_pipe; - assert(main_pipe != NULL); - if (main_pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe_id = main_pipe->mode; - - switch (pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - copy_pipe = main_pipe->pipe_settings.preview.copy_pipe; - copy_pipe_delay = main_pipe->dvs_frame_delay; - capture_pipe = main_pipe->pipe_settings.preview.capture_pipe; - capture_pipe_delay = IA_CSS_FRAME_DELAY_0; - acc_pipe = main_pipe->pipe_settings.preview.acc_pipe; - err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, main_pipe->pipe_num, main_pipe->dvs_frame_delay); - break; - - case IA_CSS_PIPE_ID_VIDEO: - copy_pipe = main_pipe->pipe_settings.video.copy_pipe; - copy_pipe_delay = main_pipe->dvs_frame_delay; - capture_pipe = main_pipe->pipe_settings.video.capture_pipe; - capture_pipe_delay = IA_CSS_FRAME_DELAY_0; - err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, main_pipe->pipe_num, main_pipe->dvs_frame_delay); - break; - - case IA_CSS_PIPE_ID_CAPTURE: - capture_pipe = main_pipe; - capture_pipe_delay = main_pipe->dvs_frame_delay; - break; - - case IA_CSS_PIPE_ID_YUVPP: - err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, - main_pipe->pipe_num, main_pipe->dvs_frame_delay); - break; - - case IA_CSS_PIPE_ID_ACC: - err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, main_pipe->pipe_num, main_pipe->dvs_frame_delay); - break; - - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if ((IA_CSS_SUCCESS == err) && copy_pipe) { - err = ia_css_pipeline_create(©_pipe->pipeline, - copy_pipe->mode, - copy_pipe->pipe_num, - copy_pipe_delay); - } - - if ((IA_CSS_SUCCESS == err) && capture_pipe) { - err = ia_css_pipeline_create(&capture_pipe->pipeline, - capture_pipe->mode, - capture_pipe->pipe_num, - capture_pipe_delay); - } - - if ((IA_CSS_SUCCESS == err) && acc_pipe) { - err = ia_css_pipeline_create(&acc_pipe->pipeline, acc_pipe->mode, acc_pipe->pipe_num, main_pipe->dvs_frame_delay); - } - - /* DH regular multi pipe - not continuous mode: create the next pipelines too */ - if (!stream->config.continuous) { - int i; - for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err; i++) { - main_pipe = stream->pipes[i]; - err = ia_css_pipeline_create(&main_pipe->pipeline, - main_pipe->mode, - main_pipe->pipe_num, - main_pipe->dvs_frame_delay); - } - } - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -/* creates a host pipeline for all pipes in a stream. Called during - * stream_start. */ -static enum ia_css_err -create_host_pipeline(struct ia_css_stream *stream) -{ - struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL; - struct ia_css_pipe *acc_pipe = NULL; - enum ia_css_pipe_id pipe_id; - struct ia_css_pipe *main_pipe = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned max_input_width = 0; - - IA_CSS_ENTER_PRIVATE("stream = %p", stream); - if (stream == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - main_pipe = stream->last_pipe; - pipe_id = main_pipe->mode; - - /* No continuous frame allocation for capture pipe. It uses the - * "main" pipe's frames. */ - if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) || - (pipe_id == IA_CSS_PIPE_ID_VIDEO)) { - /* About pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY: - * The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is too strong. E.g. in SkyCam (with memory - * based input frames) there is no continuous mode and thus no need for allocated continuous frames - * This is not only for SkyCam but for all preview cases that use DDR based input frames. For this - * reason the stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed added. - */ - if (stream->config.continuous || - (pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)) { - err = alloc_continuous_frames(main_pipe, true); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - } - -#if defined(USE_INPUT_SYSTEM_VERSION_2) - /* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */ - if (pipe_id != IA_CSS_PIPE_ID_ACC) { - err = allocate_mipi_frames(main_pipe, &stream->info); - if (err != IA_CSS_SUCCESS) - goto ERR; - } -#elif defined(USE_INPUT_SYSTEM_VERSION_2401) - if ((pipe_id != IA_CSS_PIPE_ID_ACC) && - (main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) { - err = allocate_mipi_frames(main_pipe, &stream->info); - if (err != IA_CSS_SUCCESS) - goto ERR; - } -#endif - - switch (pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - copy_pipe = main_pipe->pipe_settings.preview.copy_pipe; - capture_pipe = main_pipe->pipe_settings.preview.capture_pipe; - acc_pipe = main_pipe->pipe_settings.preview.acc_pipe; - max_input_width = - main_pipe->pipe_settings.preview.preview_binary.info->sp.input.max_width; - - err = create_host_preview_pipeline(main_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - - break; - - case IA_CSS_PIPE_ID_VIDEO: - copy_pipe = main_pipe->pipe_settings.video.copy_pipe; - capture_pipe = main_pipe->pipe_settings.video.capture_pipe; - max_input_width = - main_pipe->pipe_settings.video.video_binary.info->sp.input.max_width; - - err = create_host_video_pipeline(main_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - - break; - - case IA_CSS_PIPE_ID_CAPTURE: - capture_pipe = main_pipe; - - break; - - case IA_CSS_PIPE_ID_YUVPP: - err = create_host_yuvpp_pipeline(main_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - - break; - - case IA_CSS_PIPE_ID_ACC: - err = create_host_acc_pipeline(main_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - if (err != IA_CSS_SUCCESS) - goto ERR; - - if(copy_pipe) { - err = create_host_copy_pipeline(copy_pipe, max_input_width, - main_pipe->continuous_frames[0]); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - if(capture_pipe) { - err = create_host_capture_pipeline(capture_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - if (acc_pipe) { - err = create_host_acc_pipeline(acc_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - /* DH regular multi pipe - not continuous mode: create the next pipelines too */ - if (!stream->config.continuous) { - int i; - for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err; i++) { - switch (stream->pipes[i]->mode) { - case IA_CSS_PIPE_ID_PREVIEW: - err = create_host_preview_pipeline(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_VIDEO: - err = create_host_video_pipeline(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_CAPTURE: - err = create_host_capture_pipeline(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_YUVPP: - err = create_host_yuvpp_pipeline(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_ACC: - err = create_host_acc_pipeline(stream->pipes[i]); - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - if (err != IA_CSS_SUCCESS) - goto ERR; - } - } - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -init_pipe_defaults(enum ia_css_pipe_mode mode, - struct ia_css_pipe *pipe, - bool copy_pipe) -{ - if (pipe == NULL) { - IA_CSS_ERROR("NULL pipe parameter"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* Initialize pipe to pre-defined defaults */ - *pipe = IA_CSS_DEFAULT_PIPE; - - /* TODO: JB should not be needed, but temporary backward reference */ - switch (mode) { - case IA_CSS_PIPE_MODE_PREVIEW: - pipe->mode = IA_CSS_PIPE_ID_PREVIEW; - pipe->pipe_settings.preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS; - break; - case IA_CSS_PIPE_MODE_CAPTURE: - if (copy_pipe) { - pipe->mode = IA_CSS_PIPE_ID_COPY; - } else { - pipe->mode = IA_CSS_PIPE_ID_CAPTURE; - } - pipe->pipe_settings.capture = IA_CSS_DEFAULT_CAPTURE_SETTINGS; - break; - case IA_CSS_PIPE_MODE_VIDEO: - pipe->mode = IA_CSS_PIPE_ID_VIDEO; - pipe->pipe_settings.video = IA_CSS_DEFAULT_VIDEO_SETTINGS; - break; - case IA_CSS_PIPE_MODE_ACC: - pipe->mode = IA_CSS_PIPE_ID_ACC; - break; - case IA_CSS_PIPE_MODE_COPY: - pipe->mode = IA_CSS_PIPE_ID_CAPTURE; - break; - case IA_CSS_PIPE_MODE_YUVPP: - pipe->mode = IA_CSS_PIPE_ID_YUVPP; - pipe->pipe_settings.yuvpp = IA_CSS_DEFAULT_YUVPP_SETTINGS; - break; - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - return IA_CSS_SUCCESS; -} - -static void -pipe_global_init(void) -{ - uint8_t i; - - my_css.pipe_counter = 0; - for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) { - my_css.all_pipes[i] = NULL; - } -} - -static enum ia_css_err -pipe_generate_pipe_num(const struct ia_css_pipe *pipe, unsigned int *pipe_number) -{ - const uint8_t INVALID_PIPE_NUM = (uint8_t)~(0); - uint8_t pipe_num = INVALID_PIPE_NUM; - uint8_t i; - - if (pipe == NULL) { - IA_CSS_ERROR("NULL pipe parameter"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* Assign a new pipe_num .... search for empty place */ - for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) { - if (my_css.all_pipes[i] == NULL) { - /*position is reserved */ - my_css.all_pipes[i] = (struct ia_css_pipe *)pipe; - pipe_num = i; - break; - } - } - if (pipe_num == INVALID_PIPE_NUM) { - /* Max number of pipes already allocated */ - IA_CSS_ERROR("Max number of pipes already created"); - return IA_CSS_ERR_RESOURCE_EXHAUSTED; - } - - my_css.pipe_counter++; - - IA_CSS_LOG("pipe_num (%d)", pipe_num); - - *pipe_number = pipe_num; - return IA_CSS_SUCCESS; -} - -static void -pipe_release_pipe_num(unsigned int pipe_num) -{ - my_css.all_pipes[pipe_num] = NULL; - my_css.pipe_counter--; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "pipe_release_pipe_num (%d)\n", pipe_num); -} - -static enum ia_css_err -create_pipe(enum ia_css_pipe_mode mode, - struct ia_css_pipe **pipe, - bool copy_pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipe *me; - - if (pipe == NULL) { - IA_CSS_ERROR("NULL pipe parameter"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - me = kmalloc(sizeof(*me), GFP_KERNEL); - if (!me) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - err = init_pipe_defaults(mode, me, copy_pipe); - if (err != IA_CSS_SUCCESS) { - kfree(me); - return err; - } - - err = pipe_generate_pipe_num(me, &(me->pipe_num)); - if (err != IA_CSS_SUCCESS) { - kfree(me); - return err; - } - - *pipe = me; - return IA_CSS_SUCCESS; -} - -struct ia_css_pipe * -find_pipe_by_num(uint32_t pipe_num) -{ - unsigned int i; - for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++){ - if (my_css.all_pipes[i] && - ia_css_pipe_get_pipe_num(my_css.all_pipes[i]) == pipe_num) { - return my_css.all_pipes[i]; - } - } - return NULL; -} - -static void sh_css_pipe_free_acc_binaries ( - struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline *pipeline; - struct ia_css_pipeline_stage *stage; - - assert(pipe != NULL); - if (pipe == NULL) { - IA_CSS_ERROR("NULL input pointer"); - return; - } - pipeline = &pipe->pipeline; - - /* loop through the stages and unload them */ - for (stage = pipeline->stages; stage; stage = stage->next) { - struct ia_css_fw_info *firmware = (struct ia_css_fw_info *) - stage->firmware; - if (firmware) - ia_css_pipe_unload_extension(pipe, firmware); - } -} - -enum ia_css_err -ia_css_pipe_destroy(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - IA_CSS_ENTER("pipe = %p", pipe); - - if (pipe == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (pipe->stream != NULL) { - IA_CSS_LOG("ia_css_stream_destroy not called!"); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - switch (pipe->config.mode) { - case IA_CSS_PIPE_MODE_PREVIEW: - /* need to take into account that this function is also called - on the internal copy pipe */ - if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) { - ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES, - pipe->continuous_frames); - ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES, - pipe->cont_md_buffers); - if (pipe->pipe_settings.preview.copy_pipe) { - err = ia_css_pipe_destroy(pipe->pipe_settings.preview.copy_pipe); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_destroy(): " - "destroyed internal copy pipe err=%d\n", err); - } - } - break; - case IA_CSS_PIPE_MODE_VIDEO: - if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) { - ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES, - pipe->continuous_frames); - ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES, - pipe->cont_md_buffers); - if (pipe->pipe_settings.video.copy_pipe) { - err = ia_css_pipe_destroy(pipe->pipe_settings.video.copy_pipe); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_destroy(): " - "destroyed internal copy pipe err=%d\n", err); - } - } -#ifndef ISP2401 - ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES, pipe->pipe_settings.video.tnr_frames); -#else - ia_css_frame_free_multiple(NUM_TNR_FRAMES, pipe->pipe_settings.video.tnr_frames); -#endif - ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES, pipe->pipe_settings.video.delay_frames); - break; - case IA_CSS_PIPE_MODE_CAPTURE: - ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES, pipe->pipe_settings.capture.delay_frames); - break; - case IA_CSS_PIPE_MODE_ACC: - sh_css_pipe_free_acc_binaries(pipe); - break; - case IA_CSS_PIPE_MODE_COPY: - break; - case IA_CSS_PIPE_MODE_YUVPP: - break; - } - - sh_css_params_free_gdc_lut(pipe->scaler_pp_lut); - pipe->scaler_pp_lut = mmgr_NULL; - - my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL; - sh_css_pipe_free_shading_table(pipe); - - ia_css_pipeline_destroy(&pipe->pipeline); - pipe_release_pipe_num(ia_css_pipe_get_pipe_num(pipe)); - - /* Temporarily, not every sh_css_pipe has an acc_extension. */ - if (pipe->config.acc_extension) { - ia_css_pipe_unload_extension(pipe, pipe->config.acc_extension); - } - kfree(pipe); - IA_CSS_LEAVE("err = %d", err); - return err; -} - -void -ia_css_uninit(void) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() enter: void\n"); -#if WITH_PC_MONITORING - sh_css_print("PC_MONITORING: %s() -- started\n", __func__); - print_pc_histogram(); -#endif - - sh_css_params_free_default_gdc_lut(); - - - /* TODO: JB: implement decent check and handling of freeing mipi frames */ - //assert(ref_count_mipi_allocation == 0); //mipi frames are not freed - /* cleanup generic data */ - sh_css_params_uninit(); - ia_css_refcount_uninit(); - - ia_css_rmgr_uninit(); - -#if !defined(HAS_NO_INPUT_FORMATTER) - /* needed for reprogramming the inputformatter after power cycle of css */ - ifmtr_set_if_blocking_mode_reset = true; -#endif - - if (!fw_explicitly_loaded) { - ia_css_unload_firmware(); - } - ia_css_spctrl_unload_fw(SP0_ID); - sh_css_sp_set_sp_running(false); -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - /* check and free any remaining mipi frames */ - free_mipi_frames(NULL); -#endif - - sh_css_sp_reset_global_vars(); - -#if !defined(HAS_NO_INPUT_SYSTEM) - ia_css_isys_uninit(); -#endif - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n"); -} - -#if defined(HAS_IRQ_MAP_VERSION_2) -enum ia_css_err ia_css_irq_translate( - unsigned int *irq_infos) -{ - virq_id_t irq; - enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_more_irqs; - unsigned int infos = 0; - -/* irq_infos can be NULL, but that would make the function useless */ -/* assert(irq_infos != NULL); */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_irq_translate() enter: irq_infos=%p\n",irq_infos); - - while (status == hrt_isp_css_irq_status_more_irqs) { - status = virq_get_channel_id(&irq); - if (status == hrt_isp_css_irq_status_error) - return IA_CSS_ERR_INTERNAL_ERROR; - -#if WITH_PC_MONITORING - sh_css_print("PC_MONITORING: %s() irq = %d, " - "sh_binary_running set to 0\n", __func__, irq); - sh_binary_running = 0 ; -#endif - - switch (irq) { - case virq_sp: - /* When SP goes to idle, info is available in the - * event queue. */ - infos |= IA_CSS_IRQ_INFO_EVENTS_READY; - break; - case virq_isp: - break; -#if !defined(HAS_NO_INPUT_SYSTEM) - case virq_isys_sof: - infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF; - break; - case virq_isys_eof: - infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF; - break; - case virq_isys_csi: - infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR; - break; -#endif -#if !defined(HAS_NO_INPUT_FORMATTER) - case virq_ifmt0_id: - infos |= IA_CSS_IRQ_INFO_IF_ERROR; - break; -#endif - case virq_dma: - infos |= IA_CSS_IRQ_INFO_DMA_ERROR; - break; - case virq_sw_pin_0: - infos |= sh_css_get_sw_interrupt_value(0); - break; - case virq_sw_pin_1: - infos |= sh_css_get_sw_interrupt_value(1); - /* pqiao TODO: also assumption here */ - break; - default: - break; - } - } - - if (irq_infos) - *irq_infos = infos; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_irq_translate() " - "leave: irq_infos=%u\n", infos); - - return IA_CSS_SUCCESS; -} - -enum ia_css_err ia_css_irq_enable( - enum ia_css_irq_info info, - bool enable) -{ - virq_id_t irq = N_virq_id; - IA_CSS_ENTER("info=%d, enable=%d", info, enable); - - switch (info) { -#if !defined(HAS_NO_INPUT_FORMATTER) - case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF: - irq = virq_isys_sof; - break; - case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF: - irq = virq_isys_eof; - break; - case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR: - irq = virq_isys_csi; - break; -#endif -#if !defined(HAS_NO_INPUT_FORMATTER) - case IA_CSS_IRQ_INFO_IF_ERROR: - irq = virq_ifmt0_id; - break; -#endif - case IA_CSS_IRQ_INFO_DMA_ERROR: - irq = virq_dma; - break; - case IA_CSS_IRQ_INFO_SW_0: - irq = virq_sw_pin_0; - break; - case IA_CSS_IRQ_INFO_SW_1: - irq = virq_sw_pin_1; - break; - default: - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - cnd_virq_enable_channel(irq, enable); - - IA_CSS_LEAVE_ERR(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -#else -#error "sh_css.c: IRQ MAP must be one of \ - {IRQ_MAP_VERSION_2}" -#endif - -static unsigned int -sh_css_get_sw_interrupt_value(unsigned int irq) -{ - unsigned int irq_value; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_get_sw_interrupt_value() enter: irq=%d\n",irq); - irq_value = sh_css_sp_get_sw_interrupt_value(irq); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_get_sw_interrupt_value() leave: irq_value=%d\n",irq_value); - return irq_value; -} - -/* configure and load the copy binary, the next binary is used to - determine whether the copy binary needs to do left padding. */ -static enum ia_css_err load_copy_binary( - struct ia_css_pipe *pipe, - struct ia_css_binary *copy_binary, - struct ia_css_binary *next_binary) -{ - struct ia_css_frame_info copy_out_info, copy_in_info, copy_vf_info; - unsigned int left_padding; - enum ia_css_err err; - struct ia_css_binary_descr copy_descr; - - /* next_binary can be NULL */ - assert(pipe != NULL); - assert(copy_binary != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "load_copy_binary() enter:\n"); - - if (next_binary != NULL) { - copy_out_info = next_binary->in_frame_info; - left_padding = next_binary->left_padding; - } else { - copy_out_info = pipe->output_info[0]; - copy_vf_info = pipe->vf_output_info[0]; - ia_css_frame_info_set_format(©_vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE); - left_padding = 0; - } - - ia_css_pipe_get_copy_binarydesc(pipe, ©_descr, - ©_in_info, ©_out_info, (next_binary != NULL) ? NULL : NULL/*TODO: ©_vf_info*/); - err = ia_css_binary_find(©_descr, copy_binary); - if (err != IA_CSS_SUCCESS) - return err; - copy_binary->left_padding = left_padding; - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -alloc_continuous_frames( - struct ia_css_pipe *pipe, bool init_time) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame_info ref_info; - enum ia_css_pipe_id pipe_id; - bool continuous; - unsigned int i, idx; - unsigned int num_frames; - struct ia_css_pipe *capture_pipe = NULL; - - IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time); - - if ((pipe == NULL) || (pipe->stream == NULL)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe_id = pipe->mode; - continuous = pipe->stream->config.continuous; - - if (continuous) { - if (init_time) { - num_frames = pipe->stream->config.init_num_cont_raw_buf; - pipe->stream->continuous_pipe = pipe; - } else - num_frames = pipe->stream->config.target_num_cont_raw_buf; - } else { - num_frames = NUM_ONLINE_INIT_CONTINUOUS_FRAMES; - } - - if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) { - ref_info = pipe->pipe_settings.preview.preview_binary.in_frame_info; - } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) { - ref_info = pipe->pipe_settings.video.video_binary.in_frame_info; - } - else { - /* should not happen */ - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - /* For CSI2+, the continuous frame will hold the full input frame */ - ref_info.res.width = pipe->stream->config.input_config.input_res.width; - ref_info.res.height = pipe->stream->config.input_config.input_res.height; - - /* Ensure padded width is aligned for 2401 */ - ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS); -#endif - -#if !defined(HAS_NO_PACKED_RAW_PIXELS) - if (pipe->stream->config.pack_raw_pixels) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n"); - ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED; - } else -#endif - { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW\n"); - ref_info.format = IA_CSS_FRAME_FORMAT_RAW; - } - - /* Write format back to binary */ - if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) { - pipe->pipe_settings.preview.preview_binary.in_frame_info.format = ref_info.format; - capture_pipe = pipe->pipe_settings.preview.capture_pipe; - } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) { - pipe->pipe_settings.video.video_binary.in_frame_info.format = ref_info.format; - capture_pipe = pipe->pipe_settings.video.capture_pipe; - } else { - /* should not happen */ - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - if (init_time) - idx = 0; - else - idx = pipe->stream->config.init_num_cont_raw_buf; - - for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++) { - /* free previous frame */ - if (pipe->continuous_frames[i]) { - ia_css_frame_free(pipe->continuous_frames[i]); - pipe->continuous_frames[i] = NULL; - } - /* free previous metadata buffer */ - ia_css_metadata_free(pipe->cont_md_buffers[i]); - pipe->cont_md_buffers[i] = NULL; - - /* check if new frame needed */ - if (i < num_frames) { - /* allocate new frame */ - err = ia_css_frame_allocate_from_info( - &pipe->continuous_frames[i], - &ref_info); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - /* allocate metadata buffer */ - pipe->cont_md_buffers[i] = ia_css_metadata_allocate( - &pipe->stream->info.metadata_info); - } - } - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream) -{ - if (stream == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - return alloc_continuous_frames(stream->continuous_pipe, false); -} - -static enum ia_css_err -load_preview_binaries(struct ia_css_pipe *pipe) -{ - struct ia_css_frame_info prev_in_info, - prev_bds_out_info, - prev_out_info, - prev_vf_info; - struct ia_css_binary_descr preview_descr; - bool online; - enum ia_css_err err = IA_CSS_SUCCESS; - bool continuous, need_vf_pp = false; - bool need_isp_copy_binary = false; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - bool sensor = false; -#endif - /* preview only have 1 output pin now */ - struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0]; - struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->stream != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW); - - online = pipe->stream->config.online; - continuous = pipe->stream->config.continuous; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR; -#endif - - if (mycs->preview_binary.info) - return IA_CSS_SUCCESS; - - err = ia_css_util_check_input(&pipe->stream->config, false, false); - if (err != IA_CSS_SUCCESS) - return err; - err = ia_css_frame_check_info(pipe_out_info); - if (err != IA_CSS_SUCCESS) - return err; - - /* Note: the current selection of vf_pp binary and - * parameterization of the preview binary contains a few pieces - * of hardcoded knowledge. This needs to be cleaned up such that - * the binary selection becomes more generic. - * The vf_pp binary is needed if one or more of the following features - * are required: - * 1. YUV downscaling. - * 2. Digital zoom. - * 3. An output format that is not supported by the preview binary. - * In practice this means something other than yuv_line or nv12. - * The decision if the vf_pp binary is needed for YUV downscaling is - * made after the preview binary selection, since some preview binaries - * can perform the requested YUV downscaling. - * */ - need_vf_pp = pipe->config.enable_dz; - need_vf_pp |= pipe_out_info->format != IA_CSS_FRAME_FORMAT_YUV_LINE && - !(pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12 || - pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_16 || - pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY); - - /* Preview step 1 */ - if (pipe->vf_yuv_ds_input_info.res.width) - prev_vf_info = pipe->vf_yuv_ds_input_info; - else - prev_vf_info = *pipe_out_info; - /* If vf_pp is needed, then preview must output yuv_line. - * The exception is when vf_pp is manually disabled, that is only - * used in combination with a pipeline extension that requires - * yuv_line as input. - * */ - if (need_vf_pp) - ia_css_frame_info_set_format(&prev_vf_info, - IA_CSS_FRAME_FORMAT_YUV_LINE); - - err = ia_css_pipe_get_preview_binarydesc( - pipe, - &preview_descr, - &prev_in_info, - &prev_bds_out_info, - &prev_out_info, - &prev_vf_info); - if (err != IA_CSS_SUCCESS) - return err; - err = ia_css_binary_find(&preview_descr, &mycs->preview_binary); - if (err != IA_CSS_SUCCESS) - return err; - -#ifdef ISP2401 - /* The delay latency determines the number of invalid frames after - * a stream is started. */ - pipe->num_invalid_frames = pipe->dvs_frame_delay; - pipe->info.num_invalid_frames = pipe->num_invalid_frames; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "load_preview_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n", - pipe->num_invalid_frames, pipe->dvs_frame_delay); - -#endif - /* The vf_pp binary is needed when (further) YUV downscaling is required */ - need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width; - need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height; - - /* When vf_pp is needed, then the output format of the selected - * preview binary must be yuv_line. If this is not the case, - * then the preview binary selection is done again. - */ - if (need_vf_pp && - (mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) { - - /* Preview step 2 */ - if (pipe->vf_yuv_ds_input_info.res.width) - prev_vf_info = pipe->vf_yuv_ds_input_info; - else - prev_vf_info = *pipe_out_info; - - ia_css_frame_info_set_format(&prev_vf_info, - IA_CSS_FRAME_FORMAT_YUV_LINE); - - err = ia_css_pipe_get_preview_binarydesc( - pipe, - &preview_descr, - &prev_in_info, - &prev_bds_out_info, - &prev_out_info, - &prev_vf_info); - if (err != IA_CSS_SUCCESS) - return err; - err = ia_css_binary_find(&preview_descr, - &mycs->preview_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - if (need_vf_pp) { - struct ia_css_binary_descr vf_pp_descr; - - /* Viewfinder post-processing */ - ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr, - &mycs->preview_binary.out_frame_info[0], - pipe_out_info); - err = ia_css_binary_find(&vf_pp_descr, - &mycs->vf_pp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When the input system is 2401, only the Direct Sensor Mode - * Offline Preview uses the ISP copy binary. - */ - need_isp_copy_binary = !online && sensor; -#else -#ifndef ISP2401 - need_isp_copy_binary = !online && !continuous; -#else - /* About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY: - * This is typical the case with SkyCam (which has no input system) but it also applies to all cases - * where the driver chooses for memory based input frames. In these cases, a copy binary (which typical - * copies sensor data to DDR) does not have much use. - */ - need_isp_copy_binary = !online && !continuous && !(pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY); -#endif -#endif - - /* Copy */ - if (need_isp_copy_binary) { - err = load_copy_binary(pipe, - &mycs->copy_binary, - &mycs->preview_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - if (pipe->shading_table) { - ia_css_shading_table_free(pipe->shading_table); - pipe->shading_table = NULL; - } - - return IA_CSS_SUCCESS; -} - -static void -ia_css_binary_unload(struct ia_css_binary *binary) -{ - ia_css_binary_destroy_isp_parameters(binary); -} - -static enum ia_css_err -unload_preview_binaries(struct ia_css_pipe *pipe) -{ - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - - if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ia_css_binary_unload(&pipe->pipe_settings.preview.copy_binary); - ia_css_binary_unload(&pipe->pipe_settings.preview.preview_binary); - ia_css_binary_unload(&pipe->pipe_settings.preview.vf_pp_binary); - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static const struct ia_css_fw_info *last_output_firmware( - const struct ia_css_fw_info *fw) -{ - const struct ia_css_fw_info *last_fw = NULL; -/* fw can be NULL */ - IA_CSS_ENTER_LEAVE_PRIVATE(""); - - for (; fw; fw = fw->next) { - const struct ia_css_fw_info *info = fw; - if (info->info.isp.sp.enable.output) - last_fw = fw; - } - return last_fw; -} - -static enum ia_css_err add_firmwares( - struct ia_css_pipeline *me, - struct ia_css_binary *binary, - const struct ia_css_fw_info *fw, - const struct ia_css_fw_info *last_fw, - unsigned int binary_mode, - struct ia_css_frame *in_frame, - struct ia_css_frame *out_frame, - struct ia_css_frame *vf_frame, - struct ia_css_pipeline_stage **my_stage, - struct ia_css_pipeline_stage **vf_stage) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipeline_stage *extra_stage = NULL; - struct ia_css_pipeline_stage_desc stage_desc; - -/* all args can be NULL ??? */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "add_firmwares() enter:\n"); - - for (; fw; fw = fw->next) { - struct ia_css_frame *out[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL}; - struct ia_css_frame *in = NULL; - struct ia_css_frame *vf = NULL; - if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0)) { - out[0] = out_frame; - } - if (fw->info.isp.sp.enable.in_frame != 0) { - in = in_frame; - } - if (fw->info.isp.sp.enable.out_frame != 0) { - vf = vf_frame; - } - ia_css_pipe_get_firmwares_stage_desc(&stage_desc, binary, - out, in, vf, fw, binary_mode); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - &extra_stage); - if (err != IA_CSS_SUCCESS) - return err; - if (fw->info.isp.sp.enable.output != 0) - in_frame = extra_stage->args.out_frame[0]; - if (my_stage && !*my_stage && extra_stage) - *my_stage = extra_stage; - if (vf_stage && !*vf_stage && extra_stage && - fw->info.isp.sp.enable.vf_veceven) - *vf_stage = extra_stage; - } - return err; -} - -static enum ia_css_err add_vf_pp_stage( - struct ia_css_pipe *pipe, - struct ia_css_frame *in_frame, - struct ia_css_frame *out_frame, - struct ia_css_binary *vf_pp_binary, - struct ia_css_pipeline_stage **vf_pp_stage) -{ - - struct ia_css_pipeline *me = NULL; - const struct ia_css_fw_info *last_fw = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_pipeline_stage_desc stage_desc; - -/* out_frame can be NULL ??? */ - - if (pipe == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - if (in_frame == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - if (vf_pp_binary == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - if (vf_pp_stage == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - ia_css_pipe_util_create_output_frames(out_frames); - me = &pipe->pipeline; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "add_vf_pp_stage() enter:\n"); - - *vf_pp_stage = NULL; - - last_fw = last_output_firmware(pipe->vf_stage); - if (!pipe->extra_config.disable_vf_pp) { - if (last_fw) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary, - out_frames, in_frame, NULL); - } else{ - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary, - out_frames, in_frame, NULL); - } - err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, vf_pp_stage); - if (err != IA_CSS_SUCCESS) - return err; - in_frame = (*vf_pp_stage)->args.out_frame[0]; - } - err = add_firmwares(me, vf_pp_binary, pipe->vf_stage, last_fw, - IA_CSS_BINARY_MODE_VF_PP, - in_frame, out_frame, NULL, - vf_pp_stage, NULL); - return err; -} - -static enum ia_css_err add_yuv_scaler_stage( - struct ia_css_pipe *pipe, - struct ia_css_pipeline *me, - struct ia_css_frame *in_frame, - struct ia_css_frame *out_frame, - struct ia_css_frame *internal_out_frame, - struct ia_css_binary *yuv_scaler_binary, - struct ia_css_pipeline_stage **pre_vf_pp_stage) -{ - const struct ia_css_fw_info *last_fw; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame *vf_frame = NULL; - struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_pipeline_stage_desc stage_desc; - - /* out_frame can be NULL ??? */ - assert(in_frame != NULL); - assert(pipe != NULL); - assert(me != NULL); - assert(yuv_scaler_binary != NULL); - assert(pre_vf_pp_stage != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "add_yuv_scaler_stage() enter:\n"); - - *pre_vf_pp_stage = NULL; - ia_css_pipe_util_create_output_frames(out_frames); - - last_fw = last_output_firmware(pipe->output_stage); - - if(last_fw) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, - yuv_scaler_binary, out_frames, in_frame, vf_frame); - } else { - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_util_set_output_frames(out_frames, 1, internal_out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, - yuv_scaler_binary, out_frames, in_frame, vf_frame); - } - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - pre_vf_pp_stage); - if (err != IA_CSS_SUCCESS) - return err; - in_frame = (*pre_vf_pp_stage)->args.out_frame[0]; - - err = add_firmwares(me, yuv_scaler_binary, pipe->output_stage, last_fw, - IA_CSS_BINARY_MODE_CAPTURE_PP, - in_frame, out_frame, vf_frame, - NULL, pre_vf_pp_stage); - /* If a firmware produce vf_pp output, we set that as vf_pp input */ - (*pre_vf_pp_stage)->args.vf_downscale_log2 = yuv_scaler_binary->vf_downscale_log2; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "add_yuv_scaler_stage() leave:\n"); - return err; -} - -static enum ia_css_err add_capture_pp_stage( - struct ia_css_pipe *pipe, - struct ia_css_pipeline *me, - struct ia_css_frame *in_frame, - struct ia_css_frame *out_frame, - struct ia_css_binary *capture_pp_binary, - struct ia_css_pipeline_stage **capture_pp_stage) -{ - const struct ia_css_fw_info *last_fw = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame *vf_frame = NULL; - struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_pipeline_stage_desc stage_desc; - - /* out_frame can be NULL ??? */ - assert(in_frame != NULL); - assert(pipe != NULL); - assert(me != NULL); - assert(capture_pp_binary != NULL); - assert(capture_pp_stage != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "add_capture_pp_stage() enter:\n"); - - *capture_pp_stage = NULL; - ia_css_pipe_util_create_output_frames(out_frames); - - last_fw = last_output_firmware(pipe->output_stage); - err = ia_css_frame_allocate_from_info(&vf_frame, - &capture_pp_binary->vf_frame_info); - if (err != IA_CSS_SUCCESS) - return err; - if(last_fw) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, - capture_pp_binary, out_frames, NULL, vf_frame); - } else { - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, - capture_pp_binary, out_frames, NULL, vf_frame); - } - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - capture_pp_stage); - if (err != IA_CSS_SUCCESS) - return err; - err = add_firmwares(me, capture_pp_binary, pipe->output_stage, last_fw, - IA_CSS_BINARY_MODE_CAPTURE_PP, - in_frame, out_frame, vf_frame, - NULL, capture_pp_stage); - /* If a firmware produce vf_pp output, we set that as vf_pp input */ - if (*capture_pp_stage) { - (*capture_pp_stage)->args.vf_downscale_log2 = - capture_pp_binary->vf_downscale_log2; - } - return err; -} - -static void sh_css_setup_queues(void) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_host_sp_queues_initialized; - - sh_css_hmm_buffer_record_init(); - - sh_css_event_init_irq_mask(); - - fw = &sh_css_sp_fw; - HIVE_ADDR_host_sp_queues_initialized = - fw->info.sp.host_sp_queues_initialized; - - ia_css_bufq_init(); - - /* set "host_sp_queues_initialized" to "true" */ - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(host_sp_queues_initialized), - (uint32_t)(1)); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_setup_queues() leave:\n"); -} - -static enum ia_css_err -init_vf_frameinfo_defaults(struct ia_css_pipe *pipe, - struct ia_css_frame *vf_frame, unsigned int idx) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned int thread_id; - enum sh_css_queue_id queue_id; - - assert(vf_frame != NULL); - - sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->info, idx); - vf_frame->contiguous = false; - vf_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id); - vf_frame->dynamic_queue_id = queue_id; - vf_frame->buf_type = IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx; - - err = ia_css_frame_init_planes(vf_frame); - return err; -} - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -static unsigned int -get_crop_lines_for_bayer_order ( - const struct ia_css_stream_config *config) -{ - assert(config != NULL); - if ((IA_CSS_BAYER_ORDER_BGGR == config->input_config.bayer_order) - || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order)) - return 1; - - return 0; -} - -static unsigned int -get_crop_columns_for_bayer_order ( - const struct ia_css_stream_config *config) -{ - assert(config != NULL); - if ((IA_CSS_BAYER_ORDER_RGGB == config->input_config.bayer_order) - || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order)) - return 1; - - return 0; -} - -/* This function is to get the sum of all extra pixels in addition to the effective - * input, it includes dvs envelop and filter run-in */ -static void get_pipe_extra_pixel(struct ia_css_pipe *pipe, - unsigned int *extra_row, unsigned int *extra_column) -{ - enum ia_css_pipe_id pipe_id = pipe->mode; - unsigned int left_cropping = 0, top_cropping = 0; - unsigned int i; - struct ia_css_resolution dvs_env = pipe->config.dvs_envelope; - - /* The dvs envelope info may not be correctly sent down via pipe config - * The check is made and the correct value is populated in the binary info - * Use this value when computing crop, else excess lines may get trimmed - */ - switch (pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - if (pipe->pipe_settings.preview.preview_binary.info) { - left_cropping = pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.left_cropping; - top_cropping = pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.top_cropping; - } - dvs_env = pipe->pipe_settings.preview.preview_binary.dvs_envelope; - break; - case IA_CSS_PIPE_ID_VIDEO: - if (pipe->pipe_settings.video.video_binary.info) { - left_cropping = pipe->pipe_settings.video.video_binary.info->sp.pipeline.left_cropping; - top_cropping = pipe->pipe_settings.video.video_binary.info->sp.pipeline.top_cropping; - } - dvs_env = pipe->pipe_settings.video.video_binary.dvs_envelope; - break; - case IA_CSS_PIPE_ID_CAPTURE: - for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) { - if (pipe->pipe_settings.capture.primary_binary[i].info) { - left_cropping += pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.left_cropping; - top_cropping += pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.top_cropping; - } - dvs_env.width += pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.width; - dvs_env.height += pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.height; - } - break; - default: - break; - } - - *extra_row = top_cropping + dvs_env.height; - *extra_column = left_cropping + dvs_env.width; -} - -void -ia_css_get_crop_offsets ( - struct ia_css_pipe *pipe, - struct ia_css_frame_info *in_frame) -{ - unsigned int row = 0; - unsigned int column = 0; - struct ia_css_resolution *input_res; - struct ia_css_resolution *effective_res; - unsigned int extra_row = 0, extra_col = 0; - unsigned int min_reqd_height, min_reqd_width; - - assert(pipe != NULL); - assert(pipe->stream != NULL); - assert(in_frame != NULL); - - IA_CSS_ENTER_PRIVATE("pipe = %p effective_wd = %u effective_ht = %u", - pipe, pipe->config.input_effective_res.width, - pipe->config.input_effective_res.height); - - input_res = &pipe->stream->config.input_config.input_res; -#ifndef ISP2401 - effective_res = &pipe->stream->config.input_config.effective_res; -#else - effective_res = &pipe->config.input_effective_res; -#endif - - get_pipe_extra_pixel(pipe, &extra_row, &extra_col); - - in_frame->raw_bayer_order = pipe->stream->config.input_config.bayer_order; - - min_reqd_height = effective_res->height + extra_row; - min_reqd_width = effective_res->width + extra_col; - - if (input_res->height > min_reqd_height) { - row = (input_res->height - min_reqd_height) / 2; - row &= ~0x1; - } - if (input_res->width > min_reqd_width) { - column = (input_res->width - min_reqd_width) / 2; - column &= ~0x1; - } - - /* - * TODO: - * 1. Require the special support for RAW10 packed mode. - * 2. Require the special support for the online use cases. - */ - - /* ISP expects GRBG bayer order, we skip one line and/or one row - * to correct in case the input bayer order is different. - */ - column += get_crop_columns_for_bayer_order(&pipe->stream->config); - row += get_crop_lines_for_bayer_order(&pipe->stream->config); - - in_frame->crop_info.start_column = column; - in_frame->crop_info.start_line = row; - - IA_CSS_LEAVE_PRIVATE("void start_col: %u start_row: %u", column, row); - - return; -} -#endif - -static enum ia_css_err -init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe, - struct ia_css_frame *frame, enum ia_css_frame_format format) -{ - struct ia_css_frame *in_frame; - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned int thread_id; - enum sh_css_queue_id queue_id; - - assert(frame != NULL); - in_frame = frame; - - in_frame->info.format = format; - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - if (format == IA_CSS_FRAME_FORMAT_RAW) - in_frame->info.format = (pipe->stream->config.pack_raw_pixels) ? - IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW; -#endif - - - in_frame->info.res.width = pipe->stream->config.input_config.input_res.width; - in_frame->info.res.height = pipe->stream->config.input_config.input_res.height; - in_frame->info.raw_bit_depth = - ia_css_pipe_util_pipe_input_format_bpp(pipe); - ia_css_frame_info_set_width(&in_frame->info, pipe->stream->config.input_config.input_res.width, 0); - in_frame->contiguous = false; - in_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id); - in_frame->dynamic_queue_id = queue_id; - in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - ia_css_get_crop_offsets(pipe, &in_frame->info); -#endif - err = ia_css_frame_init_planes(in_frame); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "init_in_frameinfo_memory_defaults() bayer_order = %d:\n", in_frame->info.raw_bayer_order); - - return err; -} - -static enum ia_css_err -init_out_frameinfo_defaults(struct ia_css_pipe *pipe, - struct ia_css_frame *out_frame, unsigned int idx) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned int thread_id; - enum sh_css_queue_id queue_id; - - assert(out_frame != NULL); - - sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, idx); - out_frame->contiguous = false; - out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id); - out_frame->dynamic_queue_id = queue_id; - out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx; - err = ia_css_frame_init_planes(out_frame); - - return err; -} - -/* Create stages for video pipe */ -static enum ia_css_err create_host_video_pipeline(struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline_stage_desc stage_desc; - struct ia_css_binary *copy_binary, *video_binary, - *yuv_scaler_binary, *vf_pp_binary; - struct ia_css_pipeline_stage *copy_stage = NULL; - struct ia_css_pipeline_stage *video_stage = NULL; - struct ia_css_pipeline_stage *yuv_scaler_stage = NULL; - struct ia_css_pipeline_stage *vf_pp_stage = NULL; - struct ia_css_pipeline *me; - struct ia_css_frame *in_frame = NULL; - struct ia_css_frame *out_frame; - struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_frame *vf_frame = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - bool need_copy = false; - bool need_vf_pp = false; - bool need_yuv_pp = false; - unsigned num_output_pins; - bool need_in_frameinfo_memory = false; - - unsigned int i, num_yuv_scaler; - bool *is_output_stage = NULL; - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ia_css_pipe_util_create_output_frames(out_frames); - out_frame = &pipe->out_frame_struct; - - /* pipeline already created as part of create_host_pipeline_structure */ - me = &pipe->pipeline; - ia_css_pipeline_clean(me); - - me->dvs_frame_delay = pipe->dvs_frame_delay; - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When the input system is 2401, always enable 'in_frameinfo_memory' - * except for the following: online or continuous - */ - need_in_frameinfo_memory = !(pipe->stream->config.online || pipe->stream->config.continuous); -#else - /* Construct in_frame info (only in case we have dynamic input */ - need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY; -#endif - - /* Construct in_frame info (only in case we have dynamic input */ - if (need_in_frameinfo_memory) { - in_frame = &pipe->in_frame_struct; - err = init_in_frameinfo_memory_defaults(pipe, in_frame, IA_CSS_FRAME_FORMAT_RAW); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - out_frame->data = 0; - err = init_out_frameinfo_defaults(pipe, out_frame, 0); - if (err != IA_CSS_SUCCESS) - goto ERR; - - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) { - vf_frame = &pipe->vf_frame_struct; - vf_frame->data = 0; - err = init_vf_frameinfo_defaults(pipe, vf_frame, 0); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - copy_binary = &pipe->pipe_settings.video.copy_binary; - video_binary = &pipe->pipe_settings.video.video_binary; - vf_pp_binary = &pipe->pipe_settings.video.vf_pp_binary; - num_output_pins = video_binary->info->num_output_pins; - - yuv_scaler_binary = pipe->pipe_settings.video.yuv_scaler_binary; - num_yuv_scaler = pipe->pipe_settings.video.num_yuv_scaler; - is_output_stage = pipe->pipe_settings.video.is_output_stage; - - need_copy = (copy_binary != NULL && copy_binary->info != NULL); - need_vf_pp = (vf_pp_binary != NULL && vf_pp_binary->info != NULL); - need_yuv_pp = (yuv_scaler_binary != NULL && yuv_scaler_binary->info != NULL); - - if (need_copy) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - out_frames, NULL, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - ©_stage); - if (err != IA_CSS_SUCCESS) - goto ERR; - in_frame = me->stages->args.out_frame[0]; - } else if (pipe->stream->config.continuous) { -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When continuous is enabled, configure in_frame with the - * last pipe, which is the copy pipe. - */ - in_frame = pipe->stream->last_pipe->continuous_frames[0]; -#else - in_frame = pipe->continuous_frames[0]; -#endif - } - - ia_css_pipe_util_set_output_frames(out_frames, 0, need_yuv_pp ? NULL : out_frame); - - /* when the video binary supports a second output pin, - it can directly produce the vf_frame. */ - if(need_vf_pp) { - ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary, - out_frames, in_frame, NULL); - } else { - ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary, - out_frames, in_frame, vf_frame); - } - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - &video_stage); - if (err != IA_CSS_SUCCESS) - goto ERR; - - /* If we use copy iso video, the input must be yuv iso raw */ - if(video_stage) { - video_stage->args.copy_vf = - video_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY; - video_stage->args.copy_output = video_stage->args.copy_vf; - } - - /* when the video binary supports only 1 output pin, vf_pp is needed to - produce the vf_frame.*/ - if (need_vf_pp && video_stage) { - in_frame = video_stage->args.out_vf_frame; - err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary, - &vf_pp_stage); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - if (video_stage) { - int frm; -#ifndef ISP2401 - for (frm = 0; frm < NUM_VIDEO_TNR_FRAMES; frm++) { -#else - for (frm = 0; frm < NUM_TNR_FRAMES; frm++) { -#endif - video_stage->args.tnr_frames[frm] = - pipe->pipe_settings.video.tnr_frames[frm]; - } - for (frm = 0; frm < MAX_NUM_VIDEO_DELAY_FRAMES; frm++) { - video_stage->args.delay_frames[frm] = - pipe->pipe_settings.video.delay_frames[frm]; - } - } - - /* Append Extension on Video out, if enabled */ - if (!need_vf_pp && video_stage && pipe->config.acc_extension && - (pipe->config.acc_extension->info.isp.type == IA_CSS_ACC_OUTPUT)) - { - struct ia_css_frame *out = NULL; - struct ia_css_frame *in = NULL; - - if ((pipe->config.acc_extension->info.isp.sp.enable.output) && - (pipe->config.acc_extension->info.isp.sp.enable.in_frame) && - (pipe->config.acc_extension->info.isp.sp.enable.out_frame)) { - - /* In/Out Frame mapping to support output frame extension.*/ - out = video_stage->args.out_frame[0]; - err = ia_css_frame_allocate_from_info(&in, &(pipe->output_info[0])); - if (err != IA_CSS_SUCCESS) - goto ERR; - video_stage->args.out_frame[0] = in; - } - - err = add_firmwares( me, video_binary, pipe->output_stage, - last_output_firmware(pipe->output_stage), - IA_CSS_BINARY_MODE_VIDEO, - in, out, NULL, &video_stage, NULL); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - if (need_yuv_pp && video_stage) { - struct ia_css_frame *tmp_in_frame = video_stage->args.out_frame[0]; - struct ia_css_frame *tmp_out_frame = NULL; - - for (i = 0; i < num_yuv_scaler; i++) { - if (is_output_stage[i] == true) { - tmp_out_frame = out_frame; - } else { - tmp_out_frame = NULL; - } - err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame, - NULL, - &yuv_scaler_binary[i], - &yuv_scaler_stage); - - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - /* we use output port 1 as internal output port */ - if (yuv_scaler_stage) - tmp_in_frame = yuv_scaler_stage->args.out_frame[1]; - } - } - - pipe->pipeline.acquire_isp_each_stage = false; - ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous); - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -create_host_acc_pipeline(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - const struct ia_css_fw_info *fw; - unsigned int i; - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->stream == NULL)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe->pipeline.num_execs = pipe->config.acc_num_execs; - /* Reset pipe_qos_config to default disable all QOS extension stages */ - if (pipe->config.acc_extension) - pipe->pipeline.pipe_qos_config = 0; - - fw = pipe->vf_stage; - for (i = 0; fw; fw = fw->next){ - err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - for (i=0; iconfig.num_acc_stages; i++) { - struct ia_css_fw_info *fw = pipe->config.acc_stages[i]; - err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous); - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -/* Create stages for preview */ -static enum ia_css_err -create_host_preview_pipeline(struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline_stage *copy_stage = NULL; - struct ia_css_pipeline_stage *preview_stage = NULL; - struct ia_css_pipeline_stage *vf_pp_stage = NULL; - struct ia_css_pipeline_stage_desc stage_desc; - struct ia_css_pipeline *me = NULL; - struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL; - struct ia_css_frame *in_frame = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame *out_frame; - struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - bool need_in_frameinfo_memory = false; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - bool sensor = false; - bool buffered_sensor = false; - bool online = false; - bool continuous = false; -#endif - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - - ia_css_pipe_util_create_output_frames(out_frames); - /* pipeline already created as part of create_host_pipeline_structure */ - me = &pipe->pipeline; - ia_css_pipeline_clean(me); - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When the input system is 2401, always enable 'in_frameinfo_memory' - * except for the following: - * - Direct Sensor Mode Online Preview - * - Buffered Sensor Mode Online Preview - * - Direct Sensor Mode Continuous Preview - * - Buffered Sensor Mode Continuous Preview - */ - sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR); - buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR); - online = pipe->stream->config.online; - continuous = pipe->stream->config.continuous; - need_in_frameinfo_memory = - !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous))); -#else - /* Construct in_frame info (only in case we have dynamic input */ - need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY; -#endif - if (need_in_frameinfo_memory) { - err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame, IA_CSS_FRAME_FORMAT_RAW); - if (err != IA_CSS_SUCCESS) - goto ERR; - - in_frame = &me->in_frame; - } else { - in_frame = NULL; - } - - err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0); - if (err != IA_CSS_SUCCESS) - goto ERR; - out_frame = &me->out_frame[0]; - - copy_binary = &pipe->pipe_settings.preview.copy_binary; - preview_binary = &pipe->pipe_settings.preview.preview_binary; - if (pipe->pipe_settings.preview.vf_pp_binary.info) - vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary; - - if (pipe->pipe_settings.preview.copy_binary.info) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - out_frames, NULL, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - ©_stage); - if (err != IA_CSS_SUCCESS) - goto ERR; - in_frame = me->stages->args.out_frame[0]; -#ifndef ISP2401 - } else { -#else - } else if (pipe->stream->config.continuous) { -#endif -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When continuous is enabled, configure in_frame with the - * last pipe, which is the copy pipe. - */ - if (continuous || !online){ - in_frame = pipe->stream->last_pipe->continuous_frames[0]; - } -#else - in_frame = pipe->continuous_frames[0]; -#endif - } - - if (vf_pp_binary) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary, - out_frames, in_frame, NULL); - } else { - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary, - out_frames, in_frame, NULL); - } - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - &preview_stage); - if (err != IA_CSS_SUCCESS) - goto ERR; - /* If we use copy iso preview, the input must be yuv iso raw */ - preview_stage->args.copy_vf = - preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY; - preview_stage->args.copy_output = !preview_stage->args.copy_vf; - if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame) { - /* in case of copy, use the vf frame as output frame */ - preview_stage->args.out_vf_frame = - preview_stage->args.out_frame[0]; - } - if (vf_pp_binary) { - if (preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY) - in_frame = preview_stage->args.out_vf_frame; - else - in_frame = preview_stage->args.out_frame[0]; - err = add_vf_pp_stage(pipe, in_frame, out_frame, vf_pp_binary, - &vf_pp_stage); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - - pipe->pipeline.acquire_isp_each_stage = false; - ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous); - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static void send_raw_frames(struct ia_css_pipe *pipe) -{ - if (pipe->stream->config.continuous) { - unsigned int i; - - sh_css_update_host2sp_cont_num_raw_frames - (pipe->stream->config.init_num_cont_raw_buf, true); - sh_css_update_host2sp_cont_num_raw_frames - (pipe->stream->config.target_num_cont_raw_buf, false); - - /* Hand-over all the SP-internal buffers */ - for (i = 0; i < pipe->stream->config.init_num_cont_raw_buf; i++) { - sh_css_update_host2sp_offline_frame(i, - pipe->continuous_frames[i], pipe->cont_md_buffers[i]); - } - } - - return; -} - -static enum ia_css_err -preview_start(struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline *me ; - struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipe *copy_pipe, *capture_pipe; - struct ia_css_pipe *acc_pipe; - enum sh_css_pipe_config_override copy_ovrd; - enum ia_css_input_mode preview_pipe_input_mode; - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - me = &pipe->pipeline; - - preview_pipe_input_mode = pipe->stream->config.mode; - - copy_pipe = pipe->pipe_settings.preview.copy_pipe; - capture_pipe = pipe->pipe_settings.preview.capture_pipe; - acc_pipe = pipe->pipe_settings.preview.acc_pipe; - - copy_binary = &pipe->pipe_settings.preview.copy_binary; - preview_binary = &pipe->pipe_settings.preview.preview_binary; - if (pipe->pipe_settings.preview.vf_pp_binary.info) - vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary; - - sh_css_metrics_start_frame(); - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - /* multi stream video needs mipi buffers */ - err = send_mipi_frames(pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; -#endif - send_raw_frames(pipe); - - { - unsigned int thread_id; - - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - copy_ovrd = 1 << thread_id; - - if (pipe->stream->cont_capt) { - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe), &thread_id); - copy_ovrd |= 1 << thread_id; - } - } - - /* Construct and load the copy pipe */ - if (pipe->stream->config.continuous) { - sh_css_sp_init_pipeline(©_pipe->pipeline, - IA_CSS_PIPE_ID_COPY, - (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe), - false, - pipe->stream->config.pixels_per_clock == 2, false, - false, pipe->required_bds_factor, - copy_ovrd, - pipe->stream->config.mode, - &pipe->stream->config.metadata_config, -#ifndef ISP2401 - &pipe->stream->info.metadata_info -#else - &pipe->stream->info.metadata_info, -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#ifndef ISP2401 - , pipe->stream->config.source.port.port -#else - pipe->stream->config.source.port.port, -#endif -#endif -#ifndef ISP2401 - ); -#else - &pipe->config.internal_frame_origin_bqs_on_sctbl, - pipe->stream->isp_params_configs); -#endif - - /* make the preview pipe start with mem mode input, copy handles - the actual mode */ - preview_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY; - } - - /* Construct and load the capture pipe */ - if (pipe->stream->cont_capt) { - sh_css_sp_init_pipeline(&capture_pipe->pipeline, - IA_CSS_PIPE_ID_CAPTURE, - (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe), - capture_pipe->config.default_capture_config.enable_xnr != 0, - capture_pipe->stream->config.pixels_per_clock == 2, - true, /* continuous */ - false, /* offline */ - capture_pipe->required_bds_factor, - 0, - IA_CSS_INPUT_MODE_MEMORY, - &pipe->stream->config.metadata_config, -#ifndef ISP2401 - &pipe->stream->info.metadata_info -#else - &pipe->stream->info.metadata_info, -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#ifndef ISP2401 - , (enum mipi_port_id)0 -#else - (enum mipi_port_id)0, -#endif -#endif -#ifndef ISP2401 - ); -#else - &capture_pipe->config.internal_frame_origin_bqs_on_sctbl, - capture_pipe->stream->isp_params_configs); -#endif - } - - if (acc_pipe) { - sh_css_sp_init_pipeline(&acc_pipe->pipeline, - IA_CSS_PIPE_ID_ACC, - (uint8_t) ia_css_pipe_get_pipe_num(acc_pipe), - false, - pipe->stream->config.pixels_per_clock == 2, - false, /* continuous */ - false, /* offline */ - pipe->required_bds_factor, - 0, - IA_CSS_INPUT_MODE_MEMORY, - NULL, -#ifndef ISP2401 - NULL -#else - NULL, -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#ifndef ISP2401 - , (enum mipi_port_id) 0 -#else - (enum mipi_port_id) 0, -#endif -#endif -#ifndef ISP2401 - ); -#else - &pipe->config.internal_frame_origin_bqs_on_sctbl, - pipe->stream->isp_params_configs); -#endif - } - - start_pipe(pipe, copy_ovrd, preview_pipe_input_mode); - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -ERR: -#endif - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -enum ia_css_err -ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe, - const struct ia_css_buffer *buffer) -{ - enum ia_css_err return_err = IA_CSS_SUCCESS; - unsigned int thread_id; - enum sh_css_queue_id queue_id; - struct ia_css_pipeline *pipeline; - struct ia_css_pipeline_stage *stage; - struct ia_css_rmgr_vbuf_handle p_vbuf; - struct ia_css_rmgr_vbuf_handle *h_vbuf; - struct sh_css_hmm_buffer ddr_buffer; - enum ia_css_buffer_type buf_type; - enum ia_css_pipe_id pipe_id; - bool ret_err; - - IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer); - - if ((pipe == NULL) || (buffer == NULL)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - buf_type = buffer->type; - /* following code will be enabled when IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME - is removed */ -#if 0 - if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) { - bool found_pipe = false; - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - if ((buffer->data.frame->info.res.width == pipe->output_info[i].res.width) && - (buffer->data.frame->info.res.height == pipe->output_info[i].res.height)) { - buf_type += i; - found_pipe = true; - break; - } - } - if (!found_pipe) - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - if (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) { - bool found_pipe = false; - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - if ((buffer->data.frame->info.res.width == pipe->vf_output_info[i].res.width) && - (buffer->data.frame->info.res.height == pipe->vf_output_info[i].res.height)) { - buf_type += i; - found_pipe = true; - break; - } - } - if (!found_pipe) - return IA_CSS_ERR_INVALID_ARGUMENTS; - } -#endif - pipe_id = pipe->mode; - - IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type); - - - assert(pipe_id < IA_CSS_PIPE_ID_NUM); - assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE); - if ((buf_type == IA_CSS_BUFFER_TYPE_INVALID) || - (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE) || - (pipe_id >= IA_CSS_PIPE_ID_NUM)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - if (!ret_err) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id); - if (!ret_err) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (!sh_css_sp_is_running()) { - IA_CSS_LOG("SP is not running!"); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - /* SP is not running. The queues are not valid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - - pipeline = &pipe->pipeline; - - assert(pipeline != NULL || - pipe_id == IA_CSS_PIPE_ID_COPY || - pipe_id == IA_CSS_PIPE_ID_ACC); - - assert(sizeof(NULL) <= sizeof(ddr_buffer.kernel_ptr)); - ddr_buffer.kernel_ptr = HOST_ADDRESS(NULL); - ddr_buffer.cookie_ptr = buffer->driver_cookie; - ddr_buffer.timing_data = buffer->timing_data; - - if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS) { - if (buffer->data.stats_3a == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_3a); - ddr_buffer.payload.s3a = *buffer->data.stats_3a; - } else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS) { - if (buffer->data.stats_dvs == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_dvs); - ddr_buffer.payload.dis = *buffer->data.stats_dvs; - } else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA) { - if (buffer->data.metadata == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.metadata); - ddr_buffer.payload.metadata = *buffer->data.metadata; - } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)) { - if (buffer->data.frame == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.frame); - ddr_buffer.payload.frame.frame_data = buffer->data.frame->data; - ddr_buffer.payload.frame.flashed = 0; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipe_enqueue_buffer() buf_type=%d, data(DDR address)=0x%x\n", - buf_type, buffer->data.frame->data); - - -#if CONFIG_ON_FRAME_ENQUEUE() - return_err = set_config_on_frame_enqueue( - &buffer->data.frame->info, - &ddr_buffer.payload.frame); - if (IA_CSS_SUCCESS != return_err) { - IA_CSS_LEAVE_ERR(return_err); - return return_err; - } -#endif - } - - /* start of test for using rmgr for acq/rel memory */ - p_vbuf.vptr = 0; - p_vbuf.count = 0; - p_vbuf.size = sizeof(struct sh_css_hmm_buffer); - h_vbuf = &p_vbuf; - /* TODO: change next to correct pool for optimization */ - ia_css_rmgr_acq_vbuf(hmm_buffer_pool, &h_vbuf); - - assert(h_vbuf != NULL); - assert(h_vbuf->vptr != 0x0); - - if ((h_vbuf == NULL) || (h_vbuf->vptr == 0x0)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - mmgr_store(h_vbuf->vptr, - (void *)(&ddr_buffer), - sizeof(struct sh_css_hmm_buffer)); - if ((buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS) - || (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS) - || (buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS)) { - if (pipeline == NULL) { - ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf); - IA_CSS_LOG("pipeline is empty!"); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - for (stage = pipeline->stages; stage; stage = stage->next) { - /* The SP will read the params - after it got empty 3a and dis */ - if (STATS_ENABLED(stage)) { - /* there is a stage that needs it */ - return_err = ia_css_bufq_enqueue_buffer(thread_id, - queue_id, - (uint32_t)h_vbuf->vptr); - } - } - } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME) - || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) { - - return_err = ia_css_bufq_enqueue_buffer(thread_id, - queue_id, - (uint32_t)h_vbuf->vptr); -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - if ((return_err == IA_CSS_SUCCESS) && (IA_CSS_BUFFER_TYPE_OUTPUT_FRAME == buf_type)) { - IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d", - ddr_buffer.payload.frame.frame_data, - queue_id, thread_id); - } -#endif - - } - - if (return_err == IA_CSS_SUCCESS) { - if (sh_css_hmm_buffer_record_acquire( - h_vbuf, buf_type, - HOST_ADDRESS(ddr_buffer.kernel_ptr))) { - IA_CSS_LOG("send vbuf=%p", h_vbuf); - } else { - return_err = IA_CSS_ERR_INTERNAL_ERROR; - IA_CSS_ERROR("hmm_buffer_record[]: no available slots\n"); - } - } - - /* - * Tell the SP which queues are not empty, - * by sending the software event. - */ - if (return_err == IA_CSS_SUCCESS) { - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - IA_CSS_LOG("SP is not running!"); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - return_err = ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED, - (uint8_t)thread_id, - queue_id, - 0); - } else { - ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf); - IA_CSS_ERROR("buffer not enqueued"); - } - - IA_CSS_LEAVE("return value = %d", return_err); - - return return_err; -} - -/* - * TODO: Free up the hmm memory space. - */ -enum ia_css_err -ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe, - struct ia_css_buffer *buffer) -{ - enum ia_css_err return_err; - enum sh_css_queue_id queue_id; - hrt_vaddress ddr_buffer_addr = (hrt_vaddress)0; - struct sh_css_hmm_buffer ddr_buffer; - enum ia_css_buffer_type buf_type; - enum ia_css_pipe_id pipe_id; - unsigned int thread_id; - hrt_address kernel_ptr = 0; - bool ret_err; - - IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer); - - if ((pipe == NULL) || (buffer == NULL)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe_id = pipe->mode; - - buf_type = buffer->type; - - IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type); - - ddr_buffer.kernel_ptr = 0; - - ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - if (!ret_err) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id); - if (!ret_err) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (!sh_css_sp_is_running()) { - IA_CSS_LOG("SP is not running!"); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - /* SP is not running. The queues are not valid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - return_err = ia_css_bufq_dequeue_buffer(queue_id, - (uint32_t *)&ddr_buffer_addr); - - if (return_err == IA_CSS_SUCCESS) { - struct ia_css_frame *frame; - struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL; - - IA_CSS_LOG("receive vbuf=%x", (int)ddr_buffer_addr); - - /* Validate the ddr_buffer_addr and buf_type */ - hmm_buffer_record = sh_css_hmm_buffer_record_validate( - ddr_buffer_addr, buf_type); - if (hmm_buffer_record != NULL) { - /* valid hmm_buffer_record found. Save the kernel_ptr - * for validation after performing mmgr_load. The - * vbuf handle and buffer_record can be released. - */ - kernel_ptr = hmm_buffer_record->kernel_ptr; - ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &hmm_buffer_record->h_vbuf); - sh_css_hmm_buffer_record_reset(hmm_buffer_record); - } else { - IA_CSS_ERROR("hmm_buffer_record not found (0x%x) buf_type(%d)", - ddr_buffer_addr, buf_type); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - mmgr_load(ddr_buffer_addr, - &ddr_buffer, - sizeof(struct sh_css_hmm_buffer)); - - /* if the kernel_ptr is 0 or an invalid, return an error. - * do not access the buffer via the kernal_ptr. - */ - if ((ddr_buffer.kernel_ptr == 0) || - (kernel_ptr != HOST_ADDRESS(ddr_buffer.kernel_ptr))) { - IA_CSS_ERROR("kernel_ptr invalid"); - IA_CSS_ERROR("expected: (0x%llx)", (u64)kernel_ptr); - IA_CSS_ERROR("actual: (0x%llx)", (u64)HOST_ADDRESS(ddr_buffer.kernel_ptr)); - IA_CSS_ERROR("buf_type: %d\n", buf_type); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - if (ddr_buffer.kernel_ptr != 0) { - /* buffer->exp_id : all instances to be removed later once the driver change - * is completed. See patch #5758 for reference */ - buffer->exp_id = 0; - buffer->driver_cookie = ddr_buffer.cookie_ptr; - buffer->timing_data = ddr_buffer.timing_data; - - if ((buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) || - (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)) { - buffer->isys_eof_clock_tick.ticks = ddr_buffer.isys_eof_clock_tick; - } - - switch (buf_type) { - case IA_CSS_BUFFER_TYPE_INPUT_FRAME: - case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME: - case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME: - if ((pipe) && (pipe->stop_requested == true)) - { - -#if defined(USE_INPUT_SYSTEM_VERSION_2) - /* free mipi frames only for old input system - * for 2401 it is done in ia_css_stream_destroy call - */ - return_err = free_mipi_frames(pipe); - if (return_err != IA_CSS_SUCCESS) { - IA_CSS_LOG("free_mipi_frames() failed"); - IA_CSS_LEAVE_ERR(return_err); - return return_err; - } -#endif - pipe->stop_requested = false; - } - case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: - case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: - frame = (struct ia_css_frame*)HOST_ADDRESS(ddr_buffer.kernel_ptr); - buffer->data.frame = frame; - buffer->exp_id = ddr_buffer.payload.frame.exp_id; - frame->exp_id = ddr_buffer.payload.frame.exp_id; - frame->isp_config_id = ddr_buffer.payload.frame.isp_parameters_id; - if (ddr_buffer.payload.frame.flashed == 1) - frame->flash_state = - IA_CSS_FRAME_FLASH_STATE_PARTIAL; - if (ddr_buffer.payload.frame.flashed == 2) - frame->flash_state = - IA_CSS_FRAME_FLASH_STATE_FULL; - frame->valid = pipe->num_invalid_frames == 0; - if (!frame->valid) - pipe->num_invalid_frames--; - - if (frame->info.format == IA_CSS_FRAME_FORMAT_BINARY_8) { -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - frame->planes.binary.size = frame->data_bytes; -#else - frame->planes.binary.size = - sh_css_sp_get_binary_copy_size(); -#endif - } -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - if (IA_CSS_BUFFER_TYPE_OUTPUT_FRAME == buf_type) { - IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d", - frame->data, frame->isp_config_id, thread_id); - } -#endif - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipe_dequeue_buffer() buf_type=%d, data(DDR address)=0x%x\n", - buf_type, buffer->data.frame->data); - - break; - case IA_CSS_BUFFER_TYPE_3A_STATISTICS: - buffer->data.stats_3a = - (struct ia_css_isp_3a_statistics*)HOST_ADDRESS(ddr_buffer.kernel_ptr); - buffer->exp_id = ddr_buffer.payload.s3a.exp_id; - buffer->data.stats_3a->exp_id = ddr_buffer.payload.s3a.exp_id; - buffer->data.stats_3a->isp_config_id = ddr_buffer.payload.s3a.isp_config_id; - break; - case IA_CSS_BUFFER_TYPE_DIS_STATISTICS: - buffer->data.stats_dvs = - (struct ia_css_isp_dvs_statistics*) - HOST_ADDRESS(ddr_buffer.kernel_ptr); - buffer->exp_id = ddr_buffer.payload.dis.exp_id; - buffer->data.stats_dvs->exp_id = ddr_buffer.payload.dis.exp_id; - break; - case IA_CSS_BUFFER_TYPE_LACE_STATISTICS: - break; - case IA_CSS_BUFFER_TYPE_METADATA: - buffer->data.metadata = - (struct ia_css_metadata*)HOST_ADDRESS(ddr_buffer.kernel_ptr); - buffer->exp_id = ddr_buffer.payload.metadata.exp_id; - buffer->data.metadata->exp_id = ddr_buffer.payload.metadata.exp_id; - break; - default: - return_err = IA_CSS_ERR_INTERNAL_ERROR; - break; - } - } - } - - /* - * Tell the SP which queues are not full, - * by sending the software event. - */ - if (return_err == IA_CSS_SUCCESS){ - if (!sh_css_sp_is_running()) { - IA_CSS_LOG("SP is not running!"); - IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - /* SP is not running. The queues are not valid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED, - 0, - queue_id, - 0); - } - IA_CSS_LEAVE("buffer=%p", buffer); - - return return_err; -} - -/* - * Cannot Move this to event module as it is of ia_css_event_type which is declared in ia_css.h - * TODO: modify and move it if possible. - * - * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC: - * 1) "enum ia_css_event_type" (ia_css_event_public.h) - * 2) "enum sh_css_sp_event_type" (sh_css_internal.h) - * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c) - * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c) - */ -static enum ia_css_event_type convert_event_sp_to_host_domain[] = { - IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /** Output frame ready. */ - IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /** Second output frame ready. */ - IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */ - IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /** Second viewfinder Output frame ready. */ - IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /** Indication that 3A statistics are available. */ - IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /** Indication that DIS statistics are available. */ - IA_CSS_EVENT_TYPE_PIPELINE_DONE, /** Pipeline Done event, sent after last pipeline stage. */ - IA_CSS_EVENT_TYPE_FRAME_TAGGED, /** Frame tagged. */ - IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /** Input frame ready. */ - IA_CSS_EVENT_TYPE_METADATA_DONE, /** Metadata ready. */ - IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */ - IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /** Extension stage executed. */ - IA_CSS_EVENT_TYPE_TIMER, /** Timing measurement data. */ - IA_CSS_EVENT_TYPE_PORT_EOF, /** End Of Frame event, sent when in buffered sensor mode. */ - IA_CSS_EVENT_TYPE_FW_WARNING, /** Performance warning encountered by FW */ - IA_CSS_EVENT_TYPE_FW_ASSERT, /** Assertion hit by FW */ - 0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */ -}; - -enum ia_css_err -ia_css_dequeue_event(struct ia_css_event *event) -{ - return ia_css_dequeue_psys_event(event); -} - -enum ia_css_err -ia_css_dequeue_psys_event(struct ia_css_event *event) -{ - enum ia_css_pipe_id pipe_id = 0; - uint8_t payload[4] = {0,0,0,0}; - enum ia_css_err ret_err; - - /*TODO: - * a) use generic decoding function , same as the one used by sp. - * b) group decode and dequeue into eventQueue module - * - * We skip the IA_CSS_ENTER logging call - * to avoid flooding the logs when the host application - * uses polling. */ - if (event == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - /* dequeue the event (if any) from the psys event queue */ - ret_err = ia_css_bufq_dequeue_psys_event(payload); - if (ret_err != IA_CSS_SUCCESS) - return ret_err; - - IA_CSS_LOG("event dequeued from psys event queue"); - - /* Tell the SP that we dequeued an event from the event queue. */ - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0); - - /* Events are decoded into 4 bytes of payload, the first byte - * contains the sp event type. This is converted to a host enum. - * TODO: can this enum conversion be eliminated */ - event->type = convert_event_sp_to_host_domain[payload[0]]; - /* Some sane default values since not all events use all fields. */ - event->pipe = NULL; - event->port = MIPI_PORT0_ID; - event->exp_id = 0; - event->fw_warning = IA_CSS_FW_WARNING_NONE; - event->fw_handle = 0; - event->timer_data = 0; - event->timer_code = 0; - event->timer_subcode = 0; - - if (event->type == IA_CSS_EVENT_TYPE_TIMER) { - /* timer event ??? get the 2nd event and decode the data into the event struct */ - uint32_t tmp_data; - /* 1st event: LSB 16-bit timer data and code */ - event->timer_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8)); - event->timer_code = payload[2]; - payload[0] = payload[1] = payload[2] = payload[3] = 0; - ret_err = ia_css_bufq_dequeue_psys_event(payload); - if (ret_err != IA_CSS_SUCCESS) { - /* no 2nd event ??? an error */ - /* Putting IA_CSS_ERROR is resulting in failures in - * Merrifield smoke testing */ - IA_CSS_WARNING("Timer: Error de-queuing the 2nd TIMER event!!!\n"); - return ret_err; - } - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0); - event->type = convert_event_sp_to_host_domain[payload[0]]; - /* It's a timer */ - if (event->type == IA_CSS_EVENT_TYPE_TIMER) { - /* 2nd event data: MSB 16-bit timer and subcode */ - tmp_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8)); - event->timer_data |= (tmp_data << 16); - event->timer_subcode = payload[2]; - } - /* It's a non timer event. So clear first half of the timer event data. - * If the second part of the TIMER event is not received, we discard - * the first half of the timer data and process the non timer event without - * affecting the flow. So the non timer event falls through - * the code. */ - else { - event->timer_data = 0; - event->timer_code = 0; - event->timer_subcode = 0; - IA_CSS_ERROR("Missing 2nd timer event. Timer event discarded"); - } - } - if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF) { - event->port = (enum mipi_port_id)payload[1]; - event->exp_id = payload[3]; - } else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING) { - event->fw_warning = (enum ia_css_fw_warning)payload[1]; - /* exp_id is only available in these warning types */ - if (event->fw_warning == IA_CSS_FW_WARNING_EXP_ID_LOCKED || - event->fw_warning == IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED) - event->exp_id = payload[3]; - } else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT) { - event->fw_assert_module_id = payload[1]; /* module */ - event->fw_assert_line_no = (payload[2] << 8) + payload[3]; - /* payload[2] is line_no>>8, payload[3] is line_no&0xff */ - } else if (event->type != IA_CSS_EVENT_TYPE_TIMER) { - /* pipe related events. - * payload[1] contains the pipe_num, - * payload[2] contains the pipe_id. These are different. */ - event->pipe = find_pipe_by_num(payload[1]); - pipe_id = (enum ia_css_pipe_id)payload[2]; - /* Check to see if pipe still exists */ - if (!event->pipe) - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - - if (event->type == IA_CSS_EVENT_TYPE_FRAME_TAGGED) { - /* find the capture pipe that goes with this */ - int i, n; - n = event->pipe->stream->num_pipes; - for (i = 0; i < n; i++) { - struct ia_css_pipe *p = - event->pipe->stream->pipes[i]; - if (p->config.mode == IA_CSS_PIPE_MODE_CAPTURE) { - event->pipe = p; - break; - } - } - event->exp_id = payload[3]; - } - if (event->type == IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) { - /* payload[3] contains the acc fw handle. */ - uint32_t stage_num = (uint32_t)payload[3]; - ret_err = ia_css_pipeline_get_fw_from_stage( - &(event->pipe->pipeline), - stage_num, - &(event->fw_handle)); - if (ret_err != IA_CSS_SUCCESS) { - IA_CSS_ERROR("Invalid stage num received for ACC event. stage_num:%u", - stage_num); - return ret_err; - } - } - } - - if (event->pipe) - IA_CSS_LEAVE("event_id=%d, pipe_id=%d", event->type, pipe_id); - else - IA_CSS_LEAVE("event_id=%d", event->type); - - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_dequeue_isys_event(struct ia_css_event *event) -{ - uint8_t payload[4] = {0, 0, 0, 0}; - enum ia_css_err err = IA_CSS_SUCCESS; - - /* We skip the IA_CSS_ENTER logging call - * to avoid flooding the logs when the host application - * uses polling. */ - if (event == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - err = ia_css_bufq_dequeue_isys_event(payload); - if (err != IA_CSS_SUCCESS) - return err; - - IA_CSS_LOG("event dequeued from isys event queue"); - - /* Update SP state to indicate that element was dequeued. */ - ia_css_bufq_enqueue_isys_event(IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED); - - /* Fill return struct with appropriate info */ - event->type = IA_CSS_EVENT_TYPE_PORT_EOF; - /* EOF events are associated with a CSI port, not with a pipe */ - event->pipe = NULL; - event->port = payload[1]; - event->exp_id = payload[3]; - - IA_CSS_LEAVE_ERR(err); - return err; -} - -static void -acc_start(struct ia_css_pipe *pipe) -{ - assert(pipe != NULL); - assert(pipe->stream != NULL); - - start_pipe(pipe, SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD, - pipe->stream->config.mode); -} - -static enum ia_css_err -sh_css_pipe_start(struct ia_css_stream *stream) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - struct ia_css_pipe *pipe; - enum ia_css_pipe_id pipe_id; - unsigned int thread_id; - - IA_CSS_ENTER_PRIVATE("stream = %p", stream); - - if (stream == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - pipe = stream->last_pipe; - if (pipe == NULL) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe_id = pipe->mode; - - if(stream->started == true) { - IA_CSS_WARNING("Cannot start stream that is already started"); - IA_CSS_LEAVE_ERR(err); - return err; - } - - pipe->stop_requested = false; - - switch (pipe_id) { - case IA_CSS_PIPE_ID_PREVIEW: - err = preview_start(pipe); - break; - case IA_CSS_PIPE_ID_VIDEO: - err = video_start(pipe); - break; - case IA_CSS_PIPE_ID_CAPTURE: - err = capture_start(pipe); - break; - case IA_CSS_PIPE_ID_YUVPP: - err = yuvpp_start(pipe); - break; - case IA_CSS_PIPE_ID_ACC: - acc_start(pipe); - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - /* DH regular multi pipe - not continuous mode: start the next pipes too */ - if (!stream->config.continuous) { - int i; - for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err ; i++) { - switch (stream->pipes[i]->mode) { - case IA_CSS_PIPE_ID_PREVIEW: - stream->pipes[i]->stop_requested = false; - err = preview_start(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_VIDEO: - stream->pipes[i]->stop_requested = false; - err = video_start(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_CAPTURE: - stream->pipes[i]->stop_requested = false; - err = capture_start(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_YUVPP: - stream->pipes[i]->stop_requested = false; - err = yuvpp_start(stream->pipes[i]); - break; - case IA_CSS_PIPE_ID_ACC: - stream->pipes[i]->stop_requested = false; - acc_start(stream->pipes[i]); - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - } - } - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - /* Force ISP parameter calculation after a mode change - * Acceleration API examples pass NULL for stream but they - * don't use ISP parameters anyway. So this should be okay. - * The SP binary (jpeg) copy does not use any parameters. - */ - if (!copy_on_sp(pipe)) { - sh_css_invalidate_params(stream); - err = sh_css_param_update_isp_params(pipe, - stream->isp_params_configs, true, NULL); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - - ia_css_debug_pipe_graph_dump_epilogue(); - - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - - if (!sh_css_sp_is_running()) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - /* SP is not running. The queues are not valid */ - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM, - (uint8_t)thread_id, 0, 0); - - /* DH regular multi pipe - not continuous mode: enqueue event to the next pipes too */ - if (!stream->config.continuous) { - int i; - for (i = 1; i < stream->num_pipes; i++) { - ia_css_pipeline_get_sp_thread_id( - ia_css_pipe_get_pipe_num(stream->pipes[i]), - &thread_id); - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_START_STREAM, - (uint8_t)thread_id, 0, 0); - } - } - - /* in case of continuous capture mode, we also start capture thread and copy thread*/ - if (pipe->stream->config.continuous) { - struct ia_css_pipe *copy_pipe = NULL; - - if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) - copy_pipe = pipe->pipe_settings.preview.copy_pipe; - else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) - copy_pipe = pipe->pipe_settings.video.copy_pipe; - - if (copy_pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(copy_pipe), &thread_id); - /* by the time we reach here q is initialized and handle is available.*/ - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_START_STREAM, - (uint8_t)thread_id, 0, 0); - } - if (pipe->stream->cont_capt) { - struct ia_css_pipe *capture_pipe = NULL; - if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) - capture_pipe = pipe->pipe_settings.preview.capture_pipe; - else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) - capture_pipe = pipe->pipe_settings.video.capture_pipe; - - if (capture_pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe), &thread_id); - /* by the time we reach here q is initialized and handle is available.*/ - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_START_STREAM, - (uint8_t)thread_id, 0, 0); - } - - /* in case of PREVIEW mode, check whether QOS acc_pipe is available, then start the qos pipe */ - if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) { - struct ia_css_pipe *acc_pipe = NULL; - acc_pipe = pipe->pipe_settings.preview.acc_pipe; - - if (acc_pipe){ - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(acc_pipe), &thread_id); - /* by the time we reach here q is initialized and handle is available.*/ - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_START_STREAM, - (uint8_t) thread_id, 0, 0); - } - } - - stream->started = true; - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -#ifndef ISP2401 -void -sh_css_enable_cont_capt(bool enable, bool stop_copy_preview) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_enable_cont_capt() enter: enable=%d\n", enable); -//my_css.cont_capt = enable; - my_css.stop_copy_preview = stop_copy_preview; -} - -bool -sh_css_continuous_is_enabled(uint8_t pipe_num) -#else -/* - * @brief Stop all "ia_css_pipe" instances in the target - * "ia_css_stream" instance. - * - * Refer to "Local prototypes" for more info. - */ -static enum ia_css_err -sh_css_pipes_stop(struct ia_css_stream *stream) -#endif -{ -#ifndef ISP2401 - struct ia_css_pipe *pipe; - bool continuous; -#else - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipe *main_pipe; - enum ia_css_pipe_id main_pipe_id; - int i; -#endif - -#ifndef ISP2401 - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num); -#else - assert(stream != NULL); - if (stream == NULL) { - IA_CSS_LOG("stream does NOT exist!"); - err = IA_CSS_ERR_INTERNAL_ERROR; - goto ERR; - } -#endif - -#ifndef ISP2401 - pipe = find_pipe_by_num(pipe_num); - continuous = pipe && pipe->stream->config.continuous; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_continuous_is_enabled() leave: enable=%d\n", - continuous); - return continuous; -} -#else - main_pipe = stream->last_pipe; - assert(main_pipe != NULL); - if (main_pipe == NULL) { - IA_CSS_LOG("main_pipe does NOT exist!"); - err = IA_CSS_ERR_INTERNAL_ERROR; - goto ERR; - } -#endif - -#ifndef ISP2401 -enum ia_css_err -ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth) -{ - if (buffer_depth == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n"); - (void)stream; - *buffer_depth = NUM_CONTINUOUS_FRAMES; - return IA_CSS_SUCCESS; -} -#else - main_pipe_id = main_pipe->mode; - IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id); -#endif - -#ifndef ISP2401 -enum ia_css_err -ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n",buffer_depth); - (void)stream; - if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1) - return IA_CSS_ERR_INVALID_ARGUMENTS; - /* ok, value allowed */ - stream->config.target_num_cont_raw_buf = buffer_depth; - /* TODO: check what to regarding initialization */ - return IA_CSS_SUCCESS; -} -#else - /* - * Stop all "ia_css_pipe" instances in this target - * "ia_css_stream" instance. - */ - for (i = 0; i < stream->num_pipes; i++) { - /* send the "stop" request to the "ia_css_pipe" instance */ - IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d", - stream->pipes[i]->pipeline.pipe_id); - err = ia_css_pipeline_request_stop(&stream->pipes[i]->pipeline); -#endif - -#ifndef ISP2401 -enum ia_css_err -ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth) -{ - if (buffer_depth == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n"); -#else - /* - * Exit this loop if "ia_css_pipeline_request_stop()" - * returns the error code. - * - * The error code would be generated in the following - * two cases: - * (1) The Scalar Processor has already been stopped. - * (2) The "Host->SP" event queue is full. - * - * As the convention of using CSS API 2.0/2.1, such CSS - * error code would be propogated from the CSS-internal - * API returned value to the CSS API returned value. Then - * the CSS driver should capture these error code and - * handle it in the driver exception handling mechanism. - */ - if (err != IA_CSS_SUCCESS) { - goto ERR; - } - } - - /* - * In the CSS firmware use scenario "Continuous Preview" - * as well as "Continuous Video", the "ia_css_pipe" instance - * "Copy Pipe" is activated. This "Copy Pipe" is private to - * the CSS firmware so that it is not listed in the target - * "ia_css_stream" instance. - * - * We need to stop this "Copy Pipe", as well. - */ - if (main_pipe->stream->config.continuous) { - struct ia_css_pipe *copy_pipe = NULL; - - /* get the reference to "Copy Pipe" */ - if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW) - copy_pipe = main_pipe->pipe_settings.preview.copy_pipe; - else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO) - copy_pipe = main_pipe->pipe_settings.video.copy_pipe; - - /* return the error code if "Copy Pipe" does NOT exist */ - assert(copy_pipe != NULL); - if (copy_pipe == NULL) { - IA_CSS_LOG("Copy Pipe does NOT exist!"); - err = IA_CSS_ERR_INTERNAL_ERROR; - goto ERR; - } - - /* send the "stop" request to "Copy Pipe" */ - IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d", - copy_pipe->pipeline.pipe_id); - err = ia_css_pipeline_request_stop(©_pipe->pipeline); - } - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -/* - * @brief Check if all "ia_css_pipe" instances in the target - * "ia_css_stream" instance have stopped. - * - * Refer to "Local prototypes" for more info. - */ -static bool -sh_css_pipes_have_stopped(struct ia_css_stream *stream) -{ - bool rval = true; - - struct ia_css_pipe *main_pipe; - enum ia_css_pipe_id main_pipe_id; - - int i; - - assert(stream != NULL); - if (stream == NULL) { - IA_CSS_LOG("stream does NOT exist!"); - rval = false; - goto RET; - } - - main_pipe = stream->last_pipe; - assert(main_pipe != NULL); - - if (main_pipe == NULL) { - IA_CSS_LOG("main_pipe does NOT exist!"); - rval = false; - goto RET; - } - - main_pipe_id = main_pipe->mode; - IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id); - - /* - * Check if every "ia_css_pipe" instance in this target - * "ia_css_stream" instance has stopped. - */ - for (i = 0; i < stream->num_pipes; i++) { - rval = rval && ia_css_pipeline_has_stopped(&stream->pipes[i]->pipeline); - IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d", - stream->pipes[i]->pipeline.pipe_id, - rval); - } - - /* - * In the CSS firmware use scenario "Continuous Preview" - * as well as "Continuous Video", the "ia_css_pipe" instance - * "Copy Pipe" is activated. This "Copy Pipe" is private to - * the CSS firmware so that it is not listed in the target - * "ia_css_stream" instance. - * - * We need to check if this "Copy Pipe" has stopped, as well. - */ - if (main_pipe->stream->config.continuous) { - struct ia_css_pipe *copy_pipe = NULL; - - /* get the reference to "Copy Pipe" */ - if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW) - copy_pipe = main_pipe->pipe_settings.preview.copy_pipe; - else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO) - copy_pipe = main_pipe->pipe_settings.video.copy_pipe; - - /* return if "Copy Pipe" does NOT exist */ - assert(copy_pipe != NULL); - if (copy_pipe == NULL) { - IA_CSS_LOG("Copy Pipe does NOT exist!"); - - rval = false; - goto RET; - } - - /* check if "Copy Pipe" has stopped or not */ - rval = rval && ia_css_pipeline_has_stopped(©_pipe->pipeline); - IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d", - copy_pipe->pipeline.pipe_id, - rval); - } - -RET: - IA_CSS_LEAVE_PRIVATE("rval=%d", rval); - return rval; -} - -bool -sh_css_continuous_is_enabled(uint8_t pipe_num) -{ - struct ia_css_pipe *pipe; - bool continuous; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num); - - pipe = find_pipe_by_num(pipe_num); - continuous = pipe && pipe->stream->config.continuous; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_continuous_is_enabled() leave: enable=%d\n", - continuous); - return continuous; -} - -enum ia_css_err -ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth) -{ - if (buffer_depth == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n"); - (void)stream; - *buffer_depth = NUM_CONTINUOUS_FRAMES; - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n",buffer_depth); - (void)stream; - if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1) - return IA_CSS_ERR_INVALID_ARGUMENTS; - /* ok, value allowed */ - stream->config.target_num_cont_raw_buf = buffer_depth; - /* TODO: check what to regarding initialization */ - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth) -{ - if (buffer_depth == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n"); -#endif - (void)stream; - *buffer_depth = stream->config.target_num_cont_raw_buf; - return IA_CSS_SUCCESS; -} - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) -unsigned int -sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx) -{ - OP___assert(port < N_CSI_PORTS); - OP___assert(idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_get_mipi_sizes_for_check(port %d, idx %d): %d\n", - port, idx, my_css.mipi_sizes_for_check[port][idx]); - return my_css.mipi_sizes_for_check[port][idx]; -} -#endif - -static enum ia_css_err sh_css_pipe_configure_output( - struct ia_css_pipe *pipe, - unsigned int width, - unsigned int height, - unsigned int padded_width, - enum ia_css_frame_format format, - unsigned int idx) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, paddaed width = %d, format = %d, idx = %d", - pipe, width, height, padded_width, format, idx); - if (pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - err = ia_css_util_check_res(width, height); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (pipe->output_info[idx].res.width != width || - pipe->output_info[idx].res.height != height || - pipe->output_info[idx].format != format) - { - ia_css_frame_info_init( - &pipe->output_info[idx], - width, - height, - format, - padded_width); - } - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe, -#ifndef ISP2401 - struct ia_css_shading_info *info) -#else - struct ia_css_shading_info *shading_info, - struct ia_css_pipe_config *pipe_config) -#endif -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_binary *binary = NULL; - - assert(pipe != NULL); -#ifndef ISP2401 - assert(info != NULL); -#else - assert(shading_info != NULL); - assert(pipe_config != NULL); -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_pipe_get_shading_info() enter:\n"); - - binary = ia_css_pipe_get_shading_correction_binary(pipe); - - if (binary) { - err = ia_css_binary_get_shading_info(binary, - IA_CSS_SHADING_CORRECTION_TYPE_1, - pipe->required_bds_factor, - (const struct ia_css_stream_config *)&pipe->stream->config, -#ifndef ISP2401 - info); -#else - shading_info, pipe_config); -#endif - /* Other function calls can be added here when other shading correction types will be added - * in the future. - */ - } else { - /* When the pipe does not have a binary which has the shading - * correction, this function does not need to fill the shading - * information. It is not a error case, and then - * this function should return IA_CSS_SUCCESS. - */ -#ifndef ISP2401 - memset(info, 0, sizeof(*info)); -#else - memset(shading_info, 0, sizeof(*shading_info)); -#endif - } - return err; -} - -static enum ia_css_err -sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe, - struct ia_css_grid_info *info) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_binary *binary = NULL; - - assert(pipe != NULL); - assert(info != NULL); - - IA_CSS_ENTER_PRIVATE(""); - - binary = ia_css_pipe_get_s3a_binary(pipe); - - if (binary) { - err = ia_css_binary_3a_grid_info(binary, info, pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - } else - memset(&info->s3a_grid, 0, sizeof(info->s3a_grid)); - - binary = ia_css_pipe_get_sdis_binary(pipe); - - if (binary) { - ia_css_binary_dvs_grid_info(binary, info, pipe); - ia_css_binary_dvs_stat_grid_info(binary, info, pipe); - } else { - memset(&info->dvs_grid.dvs_grid_info, 0, - sizeof(info->dvs_grid.dvs_grid_info)); - memset(&info->dvs_grid.dvs_stat_grid_info, 0, - sizeof(info->dvs_grid.dvs_stat_grid_info)); - } - - if (binary != NULL) { - /* copy pipe does not have ISP binary*/ - info->isp_in_width = binary->internal_frame_info.res.width; - info->isp_in_height = binary->internal_frame_info.res.height; - } - -#if defined(HAS_VAMEM_VERSION_2) - info->vamem_type = IA_CSS_VAMEM_TYPE_2; -#elif defined(HAS_VAMEM_VERSION_1) - info->vamem_type = IA_CSS_VAMEM_TYPE_1; -#else -#error "Unknown VAMEM version" -#endif - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -#ifdef ISP2401 -/* - * @brief Check if a format is supported by the pipe. - * - */ -static enum ia_css_err -ia_css_pipe_check_format(struct ia_css_pipe *pipe, enum ia_css_frame_format format) -{ - const enum ia_css_frame_format *supported_formats; - int number_of_formats; - int found = 0; - int i; - - IA_CSS_ENTER_PRIVATE(""); - - if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info) { - IA_CSS_ERROR("Pipe or binary info is not set"); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - supported_formats = pipe->pipe_settings.video.video_binary.info->output_formats; - number_of_formats = sizeof(pipe->pipe_settings.video.video_binary.info->output_formats)/sizeof(enum ia_css_frame_format); - - for (i = 0; i < number_of_formats && !found; i++) { - if (supported_formats[i] == format) { - found = 1; - break; - } - } - if (!found) { - IA_CSS_ERROR("Requested format is not supported by binary"); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } else { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; - } -} -#endif - -static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe) -{ - struct ia_css_frame_info video_in_info, tnr_info, - *video_vf_info, video_bds_out_info, *pipe_out_info, *pipe_vf_out_info; - bool online; - enum ia_css_err err = IA_CSS_SUCCESS; - bool continuous = pipe->stream->config.continuous; - unsigned int i; - unsigned num_output_pins; - struct ia_css_frame_info video_bin_out_info; - bool need_scaler = false; - bool vf_res_different_than_output = false; - bool need_vf_pp = false; - int vf_ds_log2; - struct ia_css_video_settings *mycs = &pipe->pipe_settings.video; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_VIDEO); - /* we only test the video_binary because offline video doesn't need a - * vf_pp binary and online does not (always use) the copy_binary. - * All are always reset at the same time anyway. - */ - if (mycs->video_binary.info) - return IA_CSS_SUCCESS; - - online = pipe->stream->config.online; - pipe_out_info = &pipe->output_info[0]; - pipe_vf_out_info = &pipe->vf_output_info[0]; - - assert(pipe_out_info != NULL); - - /* - * There is no explicit input format requirement for raw or yuv - * What matters is that there is a binary that supports the stream format. - * This is checked in the binary_find(), so no need to check it here - */ - err = ia_css_util_check_input(&pipe->stream->config, false, false); - if (err != IA_CSS_SUCCESS) - return err; - /* cannot have online video and input_mode memory */ - if (online && pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY) - return IA_CSS_ERR_INVALID_ARGUMENTS; - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) { - err = ia_css_util_check_vf_out_info(pipe_out_info, - pipe_vf_out_info); - if (err != IA_CSS_SUCCESS) - return err; - } else { - err = ia_css_frame_check_info(pipe_out_info); - if (err != IA_CSS_SUCCESS) - return err; - } - - if (pipe->out_yuv_ds_input_info.res.width) - video_bin_out_info = pipe->out_yuv_ds_input_info; - else - video_bin_out_info = *pipe_out_info; - - /* Video */ - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]){ - video_vf_info = pipe_vf_out_info; - vf_res_different_than_output = (video_vf_info->res.width != video_bin_out_info.res.width) || - (video_vf_info->res.height != video_bin_out_info.res.height); - } - else { - video_vf_info = NULL; - } - - need_scaler = need_downscaling(video_bin_out_info.res, pipe_out_info->res); - - /* we build up the pipeline starting at the end */ - /* YUV post-processing if needed */ - if (need_scaler) { - struct ia_css_cas_binary_descr cas_scaler_descr = { }; - - /* NV12 is the common format that is supported by both */ - /* yuv_scaler and the video_xx_isp2_min binaries. */ - video_bin_out_info.format = IA_CSS_FRAME_FORMAT_NV12; - - err = ia_css_pipe_create_cas_scaler_desc_single_output( - &video_bin_out_info, - pipe_out_info, - NULL, - &cas_scaler_descr); - if (err != IA_CSS_SUCCESS) - return err; - mycs->num_yuv_scaler = cas_scaler_descr.num_stage; - mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage * - sizeof(struct ia_css_binary), GFP_KERNEL); - if (!mycs->yuv_scaler_binary) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - return err; - } - mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage - * sizeof(bool), GFP_KERNEL); - if (!mycs->is_output_stage) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - return err; - } - for (i = 0; i < cas_scaler_descr.num_stage; i++) { - struct ia_css_binary_descr yuv_scaler_descr; - mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i]; - ia_css_pipe_get_yuvscaler_binarydesc(pipe, - &yuv_scaler_descr, &cas_scaler_descr.in_info[i], - &cas_scaler_descr.out_info[i], - &cas_scaler_descr.internal_out_info[i], - &cas_scaler_descr.vf_info[i]); - err = ia_css_binary_find(&yuv_scaler_descr, - &mycs->yuv_scaler_binary[i]); - if (err != IA_CSS_SUCCESS) { - kfree(mycs->is_output_stage); - mycs->is_output_stage = NULL; - return err; - } - } - ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr); - } - - - { - struct ia_css_binary_descr video_descr; - enum ia_css_frame_format vf_info_format; - - err = ia_css_pipe_get_video_binarydesc(pipe, - &video_descr, &video_in_info, &video_bds_out_info, &video_bin_out_info, video_vf_info, - pipe->stream->config.left_padding); - if (err != IA_CSS_SUCCESS) - return err; - - /* In the case where video_vf_info is not NULL, this allows - * us to find a potential video library with desired vf format. - * If success, no vf_pp binary is needed. - * If failed, we will look up video binary with YUV_LINE vf format - */ - err = ia_css_binary_find(&video_descr, - &mycs->video_binary); - - if (err != IA_CSS_SUCCESS) { - if (video_vf_info) { - /* This will do another video binary lookup later for YUV_LINE format*/ - need_vf_pp = true; - } else - return err; - } else if (video_vf_info) { - /* The first video binary lookup is successful, but we may - * still need vf_pp binary based on additiona check */ - num_output_pins = mycs->video_binary.info->num_output_pins; - vf_ds_log2 = mycs->video_binary.vf_downscale_log2; - - /* If the binary has dual output pins, we need vf_pp if the resolution - * is different. */ - need_vf_pp |= ((num_output_pins == 2) && vf_res_different_than_output); - - /* If the binary has single output pin, we need vf_pp if additional - * scaling is needed for vf */ - need_vf_pp |= ((num_output_pins == 1) && - ((video_vf_info->res.width << vf_ds_log2 != pipe_out_info->res.width) || - (video_vf_info->res.height << vf_ds_log2 != pipe_out_info->res.height))); - } - - if (need_vf_pp) { - /* save the current vf_info format for restoration later */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "load_video_binaries() need_vf_pp; find video binary with YUV_LINE again\n"); - - vf_info_format = video_vf_info->format; - - if (!pipe->config.enable_vfpp_bci) - ia_css_frame_info_set_format(video_vf_info, - IA_CSS_FRAME_FORMAT_YUV_LINE); - - ia_css_binary_destroy_isp_parameters(&mycs->video_binary); - - err = ia_css_binary_find(&video_descr, - &mycs->video_binary); - - /* restore original vf_info format */ - ia_css_frame_info_set_format(video_vf_info, - vf_info_format); - if (err != IA_CSS_SUCCESS) - return err; - } - } - - /* If a video binary does not use a ref_frame, we set the frame delay - * to 0. This is the case for the 1-stage low-power video binary. */ - if (!mycs->video_binary.info->sp.enable.ref_frame) - pipe->dvs_frame_delay = 0; - - /* The delay latency determines the number of invalid frames after - * a stream is started. */ - pipe->num_invalid_frames = pipe->dvs_frame_delay; - pipe->info.num_invalid_frames = pipe->num_invalid_frames; - - /* Viewfinder frames also decrement num_invalid_frames. If the pipe - * outputs a viewfinder output, then we need double the number of - * invalid frames */ - if (video_vf_info) - pipe->num_invalid_frames *= 2; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "load_video_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n", - pipe->num_invalid_frames, pipe->dvs_frame_delay); - -/* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */ -#if !defined(USE_INPUT_SYSTEM_VERSION_2401) - /* Copy */ - if (!online && !continuous) { - /* TODO: what exactly needs doing, prepend the copy binary to - * video base this only on !online? - */ - err = load_copy_binary(pipe, - &mycs->copy_binary, - &mycs->video_binary); - if (err != IA_CSS_SUCCESS) - return err; - } -#else - (void)continuous; -#endif - -#if !defined(HAS_OUTPUT_SYSTEM) - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) { - struct ia_css_binary_descr vf_pp_descr; - - if (mycs->video_binary.vf_frame_info.format - == IA_CSS_FRAME_FORMAT_YUV_LINE) { - ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr, - &mycs->video_binary.vf_frame_info, - pipe_vf_out_info); - } else { - /* output from main binary is not yuv line. currently this is - * possible only when bci is enabled on vfpp output */ - assert(pipe->config.enable_vfpp_bci == true); - ia_css_pipe_get_yuvscaler_binarydesc(pipe, &vf_pp_descr, - &mycs->video_binary.vf_frame_info, - pipe_vf_out_info, NULL, NULL); - } - - err = ia_css_binary_find(&vf_pp_descr, - &mycs->vf_pp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } -#endif - - err = allocate_delay_frames(pipe); - - if (err != IA_CSS_SUCCESS) - return err; - - if (mycs->video_binary.info->sp.enable.block_output) { -#ifdef ISP2401 - unsigned int tnr_width; - unsigned int tnr_height; -#endif - tnr_info = mycs->video_binary.out_frame_info[0]; -#ifdef ISP2401 - - /* Select resolution for TNR. If - * output_system_in_resolution(GDC_out_resolution) is - * being used, then select that as it will also be in resolution for - * TNR. At present, it only make sense for Skycam */ - if (pipe->config.output_system_in_res.width && pipe->config.output_system_in_res.height) { - tnr_width = pipe->config.output_system_in_res.width; - tnr_height = pipe->config.output_system_in_res.height; - } else { - tnr_width = tnr_info.res.width; - tnr_height = tnr_info.res.height; - } - - /* Make tnr reference buffers output block width(in pix) align */ - tnr_info.res.width = - CEIL_MUL(tnr_width, - (mycs->video_binary.info->sp.block.block_width * ISP_NWAY)); - tnr_info.padded_width = tnr_info.res.width; - -#endif - /* Make tnr reference buffers output block height align */ -#ifndef ISP2401 - tnr_info.res.height = - CEIL_MUL(tnr_info.res.height, - mycs->video_binary.info->sp.block.output_block_height); -#else - tnr_info.res.height = - CEIL_MUL(tnr_height, - mycs->video_binary.info->sp.block.output_block_height); -#endif - } else { - tnr_info = mycs->video_binary.internal_frame_info; - } - tnr_info.format = IA_CSS_FRAME_FORMAT_YUV_LINE; - tnr_info.raw_bit_depth = SH_CSS_TNR_BIT_DEPTH; - -#ifndef ISP2401 - for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) { -#else - for (i = 0; i < NUM_TNR_FRAMES; i++) { -#endif - if (mycs->tnr_frames[i]) { - ia_css_frame_free(mycs->tnr_frames[i]); - mycs->tnr_frames[i] = NULL; - } - err = ia_css_frame_allocate_from_info( - &mycs->tnr_frames[i], - &tnr_info); - if (err != IA_CSS_SUCCESS) - return err; - } - IA_CSS_LEAVE_PRIVATE(""); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -unload_video_binaries(struct ia_css_pipe *pipe) -{ - unsigned int i; - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - - if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ia_css_binary_unload(&pipe->pipe_settings.video.copy_binary); - ia_css_binary_unload(&pipe->pipe_settings.video.video_binary); - ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary); -#ifndef ISP2401 - ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary); -#endif - - for (i = 0; i < pipe->pipe_settings.video.num_yuv_scaler; i++) - ia_css_binary_unload(&pipe->pipe_settings.video.yuv_scaler_binary[i]); - - kfree(pipe->pipe_settings.video.is_output_stage); - pipe->pipe_settings.video.is_output_stage = NULL; - kfree(pipe->pipe_settings.video.yuv_scaler_binary); - pipe->pipe_settings.video.yuv_scaler_binary = NULL; - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err video_start(struct ia_css_pipe *pipe) -{ - struct ia_css_binary *copy_binary; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipe *copy_pipe, *capture_pipe; - enum sh_css_pipe_config_override copy_ovrd; - enum ia_css_input_mode video_pipe_input_mode; - - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - video_pipe_input_mode = pipe->stream->config.mode; - - copy_pipe = pipe->pipe_settings.video.copy_pipe; - capture_pipe = pipe->pipe_settings.video.capture_pipe; - - copy_binary = &pipe->pipe_settings.video.copy_binary; - - sh_css_metrics_start_frame(); - - /* multi stream video needs mipi buffers */ - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - err = send_mipi_frames(pipe); - if (err != IA_CSS_SUCCESS) - return err; -#endif - - send_raw_frames(pipe); - { - unsigned int thread_id; - - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - copy_ovrd = 1 << thread_id; - - if (pipe->stream->cont_capt) { - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe), &thread_id); - copy_ovrd |= 1 << thread_id; - } - } - - /* Construct and load the copy pipe */ - if (pipe->stream->config.continuous) { - sh_css_sp_init_pipeline(©_pipe->pipeline, - IA_CSS_PIPE_ID_COPY, - (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe), - false, - pipe->stream->config.pixels_per_clock == 2, false, - false, pipe->required_bds_factor, - copy_ovrd, - pipe->stream->config.mode, - &pipe->stream->config.metadata_config, -#ifndef ISP2401 - &pipe->stream->info.metadata_info -#else - &pipe->stream->info.metadata_info, -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#ifndef ISP2401 - , pipe->stream->config.source.port.port -#else - pipe->stream->config.source.port.port, -#endif -#endif -#ifndef ISP2401 - ); -#else - ©_pipe->config.internal_frame_origin_bqs_on_sctbl, - copy_pipe->stream->isp_params_configs); -#endif - - /* make the video pipe start with mem mode input, copy handles - the actual mode */ - video_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY; - } - - /* Construct and load the capture pipe */ - if (pipe->stream->cont_capt) { - sh_css_sp_init_pipeline(&capture_pipe->pipeline, - IA_CSS_PIPE_ID_CAPTURE, - (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe), - capture_pipe->config.default_capture_config.enable_xnr != 0, - capture_pipe->stream->config.pixels_per_clock == 2, - true, /* continuous */ - false, /* offline */ - capture_pipe->required_bds_factor, - 0, - IA_CSS_INPUT_MODE_MEMORY, - &pipe->stream->config.metadata_config, -#ifndef ISP2401 - &pipe->stream->info.metadata_info -#else - &pipe->stream->info.metadata_info, -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#ifndef ISP2401 - , (enum mipi_port_id)0 -#else - (enum mipi_port_id)0, -#endif -#endif -#ifndef ISP2401 - ); -#else - &capture_pipe->config.internal_frame_origin_bqs_on_sctbl, - capture_pipe->stream->isp_params_configs); -#endif - } - - start_pipe(pipe, copy_ovrd, video_pipe_input_mode); - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static -enum ia_css_err sh_css_pipe_get_viewfinder_frame_info( - struct ia_css_pipe *pipe, - struct ia_css_frame_info *info, - unsigned int idx) -{ - assert(pipe != NULL); - assert(info != NULL); - -/* We could print the pointer as input arg, and the values as output */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_get_viewfinder_frame_info() enter: void\n"); - - if ( pipe->mode == IA_CSS_PIPE_ID_CAPTURE && - (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER)) - return IA_CSS_ERR_MODE_HAS_NO_VIEWFINDER; - /* offline video does not generate viewfinder output */ - *info = pipe->vf_output_info[idx]; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_pipe_get_viewfinder_frame_info() leave: \ - info.res.width=%d, info.res.height=%d, \ - info.padded_width=%d, info.format=%d, \ - info.raw_bit_depth=%d, info.raw_bayer_order=%d\n", - info->res.width,info->res.height, - info->padded_width,info->format, - info->raw_bit_depth,info->raw_bayer_order); - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width, - unsigned int height, unsigned int min_width, - enum ia_css_frame_format format, - unsigned int idx) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, min_width = %d, format = %d, idx = %d\n", - pipe, width, height, min_width, format, idx); - - if (pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - - err = ia_css_util_check_res(width, height); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (pipe->vf_output_info[idx].res.width != width || - pipe->vf_output_info[idx].res.height != height || - pipe->vf_output_info[idx].format != format) { - ia_css_frame_info_init(&pipe->vf_output_info[idx], width, height, - format, min_width); - } - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err load_copy_binaries(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - assert(pipe != NULL); - IA_CSS_ENTER_PRIVATE(""); - - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - if (pipe->pipe_settings.capture.copy_binary.info) - return IA_CSS_SUCCESS; - - err = ia_css_frame_check_info(&pipe->output_info[0]); - if (err != IA_CSS_SUCCESS) - goto ERR; - - err = verify_copy_out_frame_format(pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - - err = load_copy_binary(pipe, - &pipe->pipe_settings.capture.copy_binary, - NULL); - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static bool need_capture_pp( - const struct ia_css_pipe *pipe) -{ - const struct ia_css_frame_info *out_info = &pipe->output_info[0]; - IA_CSS_ENTER_LEAVE_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE); -#ifdef ISP2401 - - /* ldc and capture_pp are not supported in the same pipeline */ - if (need_capt_ldc(pipe) == true) - return false; -#endif - /* determine whether we need to use the capture_pp binary. - * This is needed for: - * 1. XNR or - * 2. Digital Zoom or - * 3. YUV downscaling - */ - if (pipe->out_yuv_ds_input_info.res.width && - ((pipe->out_yuv_ds_input_info.res.width != out_info->res.width) || - (pipe->out_yuv_ds_input_info.res.height != out_info->res.height))) - return true; - - if (pipe->config.default_capture_config.enable_xnr != 0) - return true; - - if ((pipe->stream->isp_params_configs->dz_config.dx < HRT_GDC_N) || - (pipe->stream->isp_params_configs->dz_config.dy < HRT_GDC_N) || - pipe->config.enable_dz) - return true; - - return false; -} - -static bool need_capt_ldc( - const struct ia_css_pipe *pipe) -{ - IA_CSS_ENTER_LEAVE_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE); - return (pipe->extra_config.enable_dvs_6axis) ? true:false; -} - -static enum ia_css_err set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - if (num == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - switch (version) { - case IA_CSS_PIPE_VERSION_2_6_1: - *num = NUM_PRIMARY_HQ_STAGES; - break; - case IA_CSS_PIPE_VERSION_2_2: - case IA_CSS_PIPE_VERSION_1: - *num = NUM_PRIMARY_STAGES; - break; - default: - err = IA_CSS_ERR_INVALID_ARGUMENTS; - break; - } - - return err; -} - -static enum ia_css_err load_primary_binaries( - struct ia_css_pipe *pipe) -{ - bool online = false; - bool memory = false; - bool continuous = false; - bool need_pp = false; - bool need_isp_copy_binary = false; - bool need_ldc = false; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - bool sensor = false; -#endif - struct ia_css_frame_info prim_in_info, - prim_out_info, - capt_pp_out_info, vf_info, - *vf_pp_in_info, *pipe_out_info, -#ifndef ISP2401 - *pipe_vf_out_info, *capt_pp_in_info, - capt_ldc_out_info; -#else - *pipe_vf_out_info; -#endif - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_capture_settings *mycs; - unsigned int i; - bool need_extra_yuv_scaler = false; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->stream != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - - online = pipe->stream->config.online; - memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY; - continuous = pipe->stream->config.continuous; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR); -#endif - - mycs = &pipe->pipe_settings.capture; - pipe_out_info = &pipe->output_info[0]; - pipe_vf_out_info = &pipe->vf_output_info[0]; - - if (mycs->primary_binary[0].info) - return IA_CSS_SUCCESS; - - err = set_num_primary_stages(&mycs->num_primary_stage, pipe->config.isp_pipe_version); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) { - err = ia_css_util_check_vf_out_info(pipe_out_info, pipe_vf_out_info); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - else{ - err = ia_css_frame_check_info(pipe_out_info); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - need_pp = need_capture_pp(pipe); - - /* we use the vf output info to get the primary/capture_pp binary - configured for vf_veceven. It will select the closest downscaling - factor. */ - vf_info = *pipe_vf_out_info; - -/* - * WARNING: The #if def flag has been added below as a - * temporary solution to solve the problem of enabling the - * view finder in a single binary in a capture flow. The - * vf-pp stage has been removed for Skycam in the solution - * provided. The vf-pp stage should be re-introduced when - * required. This should not be considered as a clean solution. - * Proper investigation should be done to come up with the clean - * solution. - * */ - ia_css_frame_info_set_format(&vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE); - - /* TODO: All this yuv_scaler and capturepp calculation logic - * can be shared later. Capture_pp is also a yuv_scale binary - * with extra XNR funcionality. Therefore, it can be made as the - * first step of the cascade. */ - capt_pp_out_info = pipe->out_yuv_ds_input_info; - capt_pp_out_info.format = IA_CSS_FRAME_FORMAT_YUV420; - capt_pp_out_info.res.width /= MAX_PREFERRED_YUV_DS_PER_STEP; - capt_pp_out_info.res.height /= MAX_PREFERRED_YUV_DS_PER_STEP; - ia_css_frame_info_set_width(&capt_pp_out_info, capt_pp_out_info.res.width, 0); - -/* - * WARNING: The #if def flag has been added below as a - * temporary solution to solve the problem of enabling the - * view finder in a single binary in a capture flow. The - * vf-pp stage has been removed for Skycam in the solution - * provided. The vf-pp stage should be re-introduced when - * required. This should not be considered as a clean solution. - * Proper investigation should be done to come up with the clean - * solution. - * */ - need_extra_yuv_scaler = need_downscaling(capt_pp_out_info.res, - pipe_out_info->res); - - if (need_extra_yuv_scaler) { - struct ia_css_cas_binary_descr cas_scaler_descr = { }; - - err = ia_css_pipe_create_cas_scaler_desc_single_output( - &capt_pp_out_info, - pipe_out_info, - NULL, - &cas_scaler_descr); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - mycs->num_yuv_scaler = cas_scaler_descr.num_stage; - mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage * - sizeof(struct ia_css_binary), GFP_KERNEL); - if (!mycs->yuv_scaler_binary) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage * - sizeof(bool), GFP_KERNEL); - if (!mycs->is_output_stage) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - for (i = 0; i < cas_scaler_descr.num_stage; i++) { - struct ia_css_binary_descr yuv_scaler_descr; - mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i]; - ia_css_pipe_get_yuvscaler_binarydesc(pipe, - &yuv_scaler_descr, &cas_scaler_descr.in_info[i], - &cas_scaler_descr.out_info[i], - &cas_scaler_descr.internal_out_info[i], - &cas_scaler_descr.vf_info[i]); - err = ia_css_binary_find(&yuv_scaler_descr, - &mycs->yuv_scaler_binary[i]); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr); - - } else { - capt_pp_out_info = pipe->output_info[0]; - } - - /* TODO Do we disable ldc for skycam */ - need_ldc = need_capt_ldc(pipe); -#ifdef ISP2401 - /* ldc and capt_pp are not supported in the same pipeline */ - if (need_ldc) { - struct ia_css_binary_descr capt_ldc_descr; - ia_css_pipe_get_ldc_binarydesc(pipe, - &capt_ldc_descr, &prim_out_info, - &capt_pp_out_info); -#endif - -#ifdef ISP2401 - err = ia_css_binary_find(&capt_ldc_descr, - &mycs->capture_ldc_binary); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } else if (need_pp) { -#endif - /* we build up the pipeline starting at the end */ - /* Capture post-processing */ -#ifndef ISP2401 - if (need_pp) { -#endif - struct ia_css_binary_descr capture_pp_descr; -#ifndef ISP2401 - capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info; -#endif - - ia_css_pipe_get_capturepp_binarydesc(pipe, -#ifndef ISP2401 - &capture_pp_descr, capt_pp_in_info, -#else - &capture_pp_descr, &prim_out_info, -#endif - &capt_pp_out_info, &vf_info); - err = ia_css_binary_find(&capture_pp_descr, - &mycs->capture_pp_binary); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } -#ifndef ISP2401 - - if(need_ldc) { - struct ia_css_binary_descr capt_ldc_descr; - ia_css_pipe_get_ldc_binarydesc(pipe, - &capt_ldc_descr, &prim_out_info, - &capt_ldc_out_info); - - err = ia_css_binary_find(&capt_ldc_descr, - &mycs->capture_ldc_binary); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } -#endif - } else { - prim_out_info = *pipe_out_info; - } - - /* Primary */ - { - struct ia_css_binary_descr prim_descr[MAX_NUM_PRIMARY_STAGES]; - - for (i = 0; i < mycs->num_primary_stage; i++) { - struct ia_css_frame_info *local_vf_info = NULL; - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && (i == mycs->num_primary_stage - 1)) - local_vf_info = &vf_info; - ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i], &prim_in_info, &prim_out_info, local_vf_info, i); - err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - } - - /* Viewfinder post-processing */ - if (need_pp) { - vf_pp_in_info = - &mycs->capture_pp_binary.vf_frame_info; - } else { - vf_pp_in_info = - &mycs->primary_binary[mycs->num_primary_stage - 1].vf_frame_info; - } - -/* - * WARNING: The #if def flag has been added below as a - * temporary solution to solve the problem of enabling the - * view finder in a single binary in a capture flow. The - * vf-pp stage has been removed for Skycam in the solution - * provided. The vf-pp stage should be re-introduced when - * required. Thisshould not be considered as a clean solution. - * Proper * investigation should be done to come up with the clean - * solution. - * */ - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) - { - struct ia_css_binary_descr vf_pp_descr; - - ia_css_pipe_get_vfpp_binarydesc(pipe, - &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info); - err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - err = allocate_delay_frames(pipe); - - if (err != IA_CSS_SUCCESS) - return err; - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When the input system is 2401, only the Direct Sensor Mode - * Offline Capture uses the ISP copy binary. - */ - need_isp_copy_binary = !online && sensor; -#else - need_isp_copy_binary = !online && !continuous && !memory; -#endif - - /* ISP Copy */ - if (need_isp_copy_binary) { - err = load_copy_binary(pipe, - &mycs->copy_binary, - &mycs->primary_binary[0]); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -allocate_delay_frames(struct ia_css_pipe *pipe) -{ - unsigned int num_delay_frames = 0, i = 0; - unsigned int dvs_frame_delay = 0; - struct ia_css_frame_info ref_info; - enum ia_css_err err = IA_CSS_SUCCESS; - enum ia_css_pipe_id mode = IA_CSS_PIPE_ID_VIDEO; - struct ia_css_frame **delay_frames = NULL; - - IA_CSS_ENTER_PRIVATE(""); - - if (pipe == NULL) { - IA_CSS_ERROR("Invalid args - pipe %p", pipe); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - mode = pipe->mode; - dvs_frame_delay = pipe->dvs_frame_delay; - - if (dvs_frame_delay > 0) - num_delay_frames = dvs_frame_delay + 1; - - switch (mode) { - case IA_CSS_PIPE_ID_CAPTURE: - { - struct ia_css_capture_settings *mycs_capture = &pipe->pipe_settings.capture; - (void)mycs_capture; - return err; - } - break; - case IA_CSS_PIPE_ID_VIDEO: - { - struct ia_css_video_settings *mycs_video = &pipe->pipe_settings.video; - ref_info = mycs_video->video_binary.internal_frame_info; - /*The ref frame expects - * 1. Y plane - * 2. UV plane with line interleaving, like below - * UUUUUU(width/2 times) VVVVVVVV..(width/2 times) - * - * This format is not YUV420(which has Y, U and V planes). - * Its closer to NV12, except that the UV plane has UV - * interleaving, like UVUVUVUVUVUVUVUVU... - * - * TODO: make this ref_frame format as a separate frame format - */ - ref_info.format = IA_CSS_FRAME_FORMAT_NV12; - delay_frames = mycs_video->delay_frames; - } - break; - case IA_CSS_PIPE_ID_PREVIEW: - { - struct ia_css_preview_settings *mycs_preview = &pipe->pipe_settings.preview; - ref_info = mycs_preview->preview_binary.internal_frame_info; - /*The ref frame expects - * 1. Y plane - * 2. UV plane with line interleaving, like below - * UUUUUU(width/2 times) VVVVVVVV..(width/2 times) - * - * This format is not YUV420(which has Y, U and V planes). - * Its closer to NV12, except that the UV plane has UV - * interleaving, like UVUVUVUVUVUVUVUVU... - * - * TODO: make this ref_frame format as a separate frame format - */ - ref_info.format = IA_CSS_FRAME_FORMAT_NV12; - delay_frames = mycs_preview->delay_frames; - } - break; - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - - } - - ref_info.raw_bit_depth = SH_CSS_REF_BIT_DEPTH; - - assert(num_delay_frames <= MAX_NUM_VIDEO_DELAY_FRAMES); - for (i = 0; i < num_delay_frames; i++) { - err = ia_css_frame_allocate_from_info(&delay_frames[i], &ref_info); - if (err != IA_CSS_SUCCESS) - return err; - } - IA_CSS_LEAVE_PRIVATE(""); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err load_advanced_binaries( - struct ia_css_pipe *pipe) -{ - struct ia_css_frame_info pre_in_info, gdc_in_info, - post_in_info, post_out_info, - vf_info, *vf_pp_in_info, *pipe_out_info, - *pipe_vf_out_info; - bool need_pp; - bool need_isp_copy = true; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE(""); - - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - if (pipe->pipe_settings.capture.pre_isp_binary.info) - return IA_CSS_SUCCESS; - pipe_out_info = &pipe->output_info[0]; - pipe_vf_out_info = &pipe->vf_output_info[0]; - - vf_info = *pipe_vf_out_info; - err = ia_css_util_check_vf_out_info(pipe_out_info, &vf_info); - if (err != IA_CSS_SUCCESS) - return err; - need_pp = need_capture_pp(pipe); - - ia_css_frame_info_set_format(&vf_info, - IA_CSS_FRAME_FORMAT_YUV_LINE); - - /* we build up the pipeline starting at the end */ - /* Capture post-processing */ - if (need_pp) { - struct ia_css_binary_descr capture_pp_descr; - - ia_css_pipe_get_capturepp_binarydesc(pipe, - &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info); - err = ia_css_binary_find(&capture_pp_descr, - &pipe->pipe_settings.capture.capture_pp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } else { - post_out_info = *pipe_out_info; - } - - /* Post-gdc */ - { - struct ia_css_binary_descr post_gdc_descr; - - ia_css_pipe_get_post_gdc_binarydesc(pipe, - &post_gdc_descr, &post_in_info, &post_out_info, &vf_info); - err = ia_css_binary_find(&post_gdc_descr, - &pipe->pipe_settings.capture.post_isp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - /* Gdc */ - { - struct ia_css_binary_descr gdc_descr; - - ia_css_pipe_get_gdc_binarydesc(pipe, &gdc_descr, &gdc_in_info, - &pipe->pipe_settings.capture.post_isp_binary.in_frame_info); - err = ia_css_binary_find(&gdc_descr, - &pipe->pipe_settings.capture.anr_gdc_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - pipe->pipe_settings.capture.anr_gdc_binary.left_padding = - pipe->pipe_settings.capture.post_isp_binary.left_padding; - - /* Pre-gdc */ - { - struct ia_css_binary_descr pre_gdc_descr; - - ia_css_pipe_get_pre_gdc_binarydesc(pipe, &pre_gdc_descr, &pre_in_info, - &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info); - err = ia_css_binary_find(&pre_gdc_descr, - &pipe->pipe_settings.capture.pre_isp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - pipe->pipe_settings.capture.pre_isp_binary.left_padding = - pipe->pipe_settings.capture.anr_gdc_binary.left_padding; - - /* Viewfinder post-processing */ - if (need_pp) { - vf_pp_in_info = - &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info; - } else { - vf_pp_in_info = - &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info; - } - - { - struct ia_css_binary_descr vf_pp_descr; - - ia_css_pipe_get_vfpp_binarydesc(pipe, - &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info); - err = ia_css_binary_find(&vf_pp_descr, - &pipe->pipe_settings.capture.vf_pp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - /* Copy */ -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* For CSI2+, only the direct sensor mode/online requires ISP copy */ - need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR; -#endif - if (need_isp_copy) - load_copy_binary(pipe, - &pipe->pipe_settings.capture.copy_binary, - &pipe->pipe_settings.capture.pre_isp_binary); - - return err; -} - -static enum ia_css_err load_bayer_isp_binaries( - struct ia_css_pipe *pipe) -{ - struct ia_css_frame_info pre_isp_in_info, *pipe_out_info; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_binary_descr pre_de_descr; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - pipe_out_info = &pipe->output_info[0]; - - if (pipe->pipe_settings.capture.pre_isp_binary.info) - return IA_CSS_SUCCESS; - - err = ia_css_frame_check_info(pipe_out_info); - if (err != IA_CSS_SUCCESS) - return err; - - ia_css_pipe_get_pre_de_binarydesc(pipe, &pre_de_descr, - &pre_isp_in_info, - pipe_out_info); - - err = ia_css_binary_find(&pre_de_descr, - &pipe->pipe_settings.capture.pre_isp_binary); - - return err; -} - -static enum ia_css_err load_low_light_binaries( - struct ia_css_pipe *pipe) -{ - struct ia_css_frame_info pre_in_info, anr_in_info, - post_in_info, post_out_info, - vf_info, *pipe_vf_out_info, *pipe_out_info, - *vf_pp_in_info; - bool need_pp; - bool need_isp_copy = true; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - - if (pipe->pipe_settings.capture.pre_isp_binary.info) - return IA_CSS_SUCCESS; - pipe_vf_out_info = &pipe->vf_output_info[0]; - pipe_out_info = &pipe->output_info[0]; - - vf_info = *pipe_vf_out_info; - err = ia_css_util_check_vf_out_info(pipe_out_info, - &vf_info); - if (err != IA_CSS_SUCCESS) - return err; - need_pp = need_capture_pp(pipe); - - ia_css_frame_info_set_format(&vf_info, - IA_CSS_FRAME_FORMAT_YUV_LINE); - - /* we build up the pipeline starting at the end */ - /* Capture post-processing */ - if (need_pp) { - struct ia_css_binary_descr capture_pp_descr; - - ia_css_pipe_get_capturepp_binarydesc(pipe, - &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info); - err = ia_css_binary_find(&capture_pp_descr, - &pipe->pipe_settings.capture.capture_pp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } else { - post_out_info = *pipe_out_info; - } - - /* Post-anr */ - { - struct ia_css_binary_descr post_anr_descr; - - ia_css_pipe_get_post_anr_binarydesc(pipe, - &post_anr_descr, &post_in_info, &post_out_info, &vf_info); - err = ia_css_binary_find(&post_anr_descr, - &pipe->pipe_settings.capture.post_isp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - /* Anr */ - { - struct ia_css_binary_descr anr_descr; - - ia_css_pipe_get_anr_binarydesc(pipe, &anr_descr, &anr_in_info, - &pipe->pipe_settings.capture.post_isp_binary.in_frame_info); - err = ia_css_binary_find(&anr_descr, - &pipe->pipe_settings.capture.anr_gdc_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - pipe->pipe_settings.capture.anr_gdc_binary.left_padding = - pipe->pipe_settings.capture.post_isp_binary.left_padding; - - /* Pre-anr */ - { - struct ia_css_binary_descr pre_anr_descr; - - ia_css_pipe_get_pre_anr_binarydesc(pipe, &pre_anr_descr, &pre_in_info, - &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info); - err = ia_css_binary_find(&pre_anr_descr, - &pipe->pipe_settings.capture.pre_isp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - pipe->pipe_settings.capture.pre_isp_binary.left_padding = - pipe->pipe_settings.capture.anr_gdc_binary.left_padding; - - /* Viewfinder post-processing */ - if (need_pp) { - vf_pp_in_info = - &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info; - } else { - vf_pp_in_info = - &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info; - } - - { - struct ia_css_binary_descr vf_pp_descr; - - ia_css_pipe_get_vfpp_binarydesc(pipe, - &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info); - err = ia_css_binary_find(&vf_pp_descr, - &pipe->pipe_settings.capture.vf_pp_binary); - if (err != IA_CSS_SUCCESS) - return err; - } - - /* Copy */ -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* For CSI2+, only the direct sensor mode/online requires ISP copy */ - need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR; -#endif - if (need_isp_copy) - err = load_copy_binary(pipe, - &pipe->pipe_settings.capture.copy_binary, - &pipe->pipe_settings.capture.pre_isp_binary); - - return err; -} - -static bool copy_on_sp(struct ia_css_pipe *pipe) -{ - bool rval; - - assert(pipe != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "copy_on_sp() enter:\n"); - - rval = true; - - rval &= (pipe->mode == IA_CSS_PIPE_ID_CAPTURE); - - rval &= (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW); - - rval &= ((pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) || - (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)); - - return rval; -} - -static enum ia_css_err load_capture_binaries( - struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - bool must_be_raw; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - - if (pipe->pipe_settings.capture.primary_binary[0].info) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; - } - - /* in primary, advanced,low light or bayer, - the input format must be raw */ - must_be_raw = - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT; - err = ia_css_util_check_input(&pipe->stream->config, must_be_raw, false); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (copy_on_sp(pipe) && - pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) { - ia_css_frame_info_init( - &pipe->output_info[0], - JPEG_BYTES, - 1, - IA_CSS_FRAME_FORMAT_BINARY_8, - 0); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; - } - - switch (pipe->config.default_capture_config.mode) { - case IA_CSS_CAPTURE_MODE_RAW: - err = load_copy_binaries(pipe); -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) - if (err == IA_CSS_SUCCESS) - pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online; -#endif - break; - case IA_CSS_CAPTURE_MODE_BAYER: - err = load_bayer_isp_binaries(pipe); - break; - case IA_CSS_CAPTURE_MODE_PRIMARY: - err = load_primary_binaries(pipe); - break; - case IA_CSS_CAPTURE_MODE_ADVANCED: - err = load_advanced_binaries(pipe); - break; - case IA_CSS_CAPTURE_MODE_LOW_LIGHT: - err = load_low_light_binaries(pipe); - break; - } - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -unload_capture_binaries(struct ia_css_pipe *pipe) -{ - unsigned int i; - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - - if ((pipe == NULL) || ((pipe->mode != IA_CSS_PIPE_ID_CAPTURE) && (pipe->mode != IA_CSS_PIPE_ID_COPY))) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ia_css_binary_unload(&pipe->pipe_settings.capture.copy_binary); - for (i = 0; i < MAX_NUM_PRIMARY_STAGES; i++) - ia_css_binary_unload(&pipe->pipe_settings.capture.primary_binary[i]); - ia_css_binary_unload(&pipe->pipe_settings.capture.pre_isp_binary); - ia_css_binary_unload(&pipe->pipe_settings.capture.anr_gdc_binary); - ia_css_binary_unload(&pipe->pipe_settings.capture.post_isp_binary); - ia_css_binary_unload(&pipe->pipe_settings.capture.capture_pp_binary); - ia_css_binary_unload(&pipe->pipe_settings.capture.capture_ldc_binary); - ia_css_binary_unload(&pipe->pipe_settings.capture.vf_pp_binary); - - for (i = 0; i < pipe->pipe_settings.capture.num_yuv_scaler; i++) - ia_css_binary_unload(&pipe->pipe_settings.capture.yuv_scaler_binary[i]); - - kfree(pipe->pipe_settings.capture.is_output_stage); - pipe->pipe_settings.capture.is_output_stage = NULL; - kfree(pipe->pipe_settings.capture.yuv_scaler_binary); - pipe->pipe_settings.capture.yuv_scaler_binary = NULL; - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static bool -need_downscaling(const struct ia_css_resolution in_res, - const struct ia_css_resolution out_res) -{ - - if (in_res.width > out_res.width || in_res.height > out_res.height) - return true; - - return false; -} - -static bool -need_yuv_scaler_stage(const struct ia_css_pipe *pipe) -{ - unsigned int i; - struct ia_css_resolution in_res, out_res; - - bool need_format_conversion = false; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP); - - /* TODO: make generic function */ - need_format_conversion = - ((pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) && - (pipe->output_info[0].format != IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8)); - - in_res = pipe->config.input_effective_res; - - if (pipe->config.enable_dz) - return true; - - if ((pipe->output_info[0].res.width != 0) && need_format_conversion) - return true; - - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - out_res = pipe->output_info[i].res; - - /* A non-zero width means it is a valid output port */ - if ((out_res.width != 0) && need_downscaling(in_res, out_res)) - return true; - } - - return false; -} - -/* TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc */ -/* which has some hard-coded knowledge which prevents reuse of the function. */ -/* Later, merge this with ia_css_pipe_create_cas_scaler_desc */ -static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output( - struct ia_css_frame_info *cas_scaler_in_info, - struct ia_css_frame_info *cas_scaler_out_info, - struct ia_css_frame_info *cas_scaler_vf_info, - struct ia_css_cas_binary_descr *descr) -{ - unsigned int i; - unsigned int hor_ds_factor = 0, ver_ds_factor = 0; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_frame_info tmp_in_info; - - unsigned max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP; - - assert(cas_scaler_in_info != NULL); - assert(cas_scaler_out_info != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() enter:\n"); - - /* We assume that this function is used only for single output port case. */ - descr->num_output_stage = 1; - - hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width , cas_scaler_out_info->res.width); - ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height, cas_scaler_out_info->res.height); - /* use the same horizontal and vertical downscaling factor for simplicity */ - assert(hor_ds_factor == ver_ds_factor); - - i = 1; - while (i < hor_ds_factor) { - descr->num_stage++; - i *= max_scale_factor_per_stage; - } - - descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->in_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->internal_out_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->out_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->vf_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL); - if (!descr->is_output_stage) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - - tmp_in_info = *cas_scaler_in_info; - for (i = 0; i < descr->num_stage; i++) { - - descr->in_info[i] = tmp_in_info; - if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= cas_scaler_out_info->res.width) { - descr->is_output_stage[i] = true; - if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) { - descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width; - descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height; - descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width; - descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420; - } else { - assert(i == (descr->num_stage - 1)); - descr->internal_out_info[i].res.width = 0; - descr->internal_out_info[i].res.height = 0; - } - descr->out_info[i].res.width = cas_scaler_out_info->res.width; - descr->out_info[i].res.height = cas_scaler_out_info->res.height; - descr->out_info[i].padded_width = cas_scaler_out_info->padded_width; - descr->out_info[i].format = cas_scaler_out_info->format; - if (cas_scaler_vf_info != NULL) { - descr->vf_info[i].res.width = cas_scaler_vf_info->res.width; - descr->vf_info[i].res.height = cas_scaler_vf_info->res.height; - descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width; - ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE); - } else { - descr->vf_info[i].res.width = 0; - descr->vf_info[i].res.height = 0; - descr->vf_info[i].padded_width = 0; - } - } else { - descr->is_output_stage[i] = false; - descr->internal_out_info[i].res.width = tmp_in_info.res.width / max_scale_factor_per_stage; - descr->internal_out_info[i].res.height = tmp_in_info.res.height / max_scale_factor_per_stage; - descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420; - ia_css_frame_info_init(&descr->internal_out_info[i], - tmp_in_info.res.width / max_scale_factor_per_stage, - tmp_in_info.res.height / max_scale_factor_per_stage, - IA_CSS_FRAME_FORMAT_YUV420, 0); - descr->out_info[i].res.width = 0; - descr->out_info[i].res.height = 0; - descr->vf_info[i].res.width = 0; - descr->vf_info[i].res.height = 0; - } - tmp_in_info = descr->internal_out_info[i]; - } -ERR: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n", - err); - return err; -} - -/* FIXME: merge most of this and single output version */ -static enum ia_css_err ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pipe, - struct ia_css_cas_binary_descr *descr) -{ - struct ia_css_frame_info in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; - struct ia_css_frame_info *out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_frame_info *vf_out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_frame_info tmp_in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; - unsigned int i, j; - unsigned int hor_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE], - ver_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE], - scale_factor = 0; - unsigned int num_stages = 0; - enum ia_css_err err = IA_CSS_SUCCESS; - - unsigned max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() enter:\n"); - - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - out_info[i] = NULL; - vf_out_info[i] = NULL; - hor_scale_factor[i] = 0; - ver_scale_factor[i] = 0; - } - - in_info.res = pipe->config.input_effective_res; - in_info.padded_width = in_info.res.width; - descr->num_output_stage = 0; - /* Find out how much scaling we need for each output */ - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - if (pipe->output_info[i].res.width != 0) { - out_info[i] = &pipe->output_info[i]; - if (pipe->vf_output_info[i].res.width != 0) - vf_out_info[i] = &pipe->vf_output_info[i]; - descr->num_output_stage += 1; - } - - if (out_info[i] != NULL) { - hor_scale_factor[i] = CEIL_DIV(in_info.res.width, out_info[i]->res.width); - ver_scale_factor[i] = CEIL_DIV(in_info.res.height, out_info[i]->res.height); - /* use the same horizontal and vertical scaling factor for simplicity */ - assert(hor_scale_factor[i] == ver_scale_factor[i]); - scale_factor = 1; - do { - num_stages++; - scale_factor *= max_scale_factor_per_stage; - } while (scale_factor < hor_scale_factor[i]); - - in_info.res = out_info[i]->res; - } - } - - if (need_yuv_scaler_stage(pipe) && (num_stages == 0)) - num_stages = 1; - - descr->num_stage = num_stages; - - descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->in_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->internal_out_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->out_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL); - if (!descr->vf_info) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL); - if (descr->is_output_stage == NULL) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - if (out_info[i]) { - if (i > 0) { - assert((out_info[i-1]->res.width >= out_info[i]->res.width) && - (out_info[i-1]->res.height >= out_info[i]->res.height)); - } - } - } - - tmp_in_info.res = pipe->config.input_effective_res; - tmp_in_info.format = IA_CSS_FRAME_FORMAT_YUV420; - for (i = 0, j = 0; i < descr->num_stage; i++) { - assert(j < 2); - assert(out_info[j] != NULL); - - descr->in_info[i] = tmp_in_info; - if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= out_info[j]->res.width) { - descr->is_output_stage[i] = true; - if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) { - descr->internal_out_info[i].res.width = out_info[j]->res.width; - descr->internal_out_info[i].res.height = out_info[j]->res.height; - descr->internal_out_info[i].padded_width = out_info[j]->padded_width; - descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420; - } else { - assert(i == (descr->num_stage - 1)); - descr->internal_out_info[i].res.width = 0; - descr->internal_out_info[i].res.height = 0; - } - descr->out_info[i].res.width = out_info[j]->res.width; - descr->out_info[i].res.height = out_info[j]->res.height; - descr->out_info[i].padded_width = out_info[j]->padded_width; - descr->out_info[i].format = out_info[j]->format; - if (vf_out_info[j] != NULL) { - descr->vf_info[i].res.width = vf_out_info[j]->res.width; - descr->vf_info[i].res.height = vf_out_info[j]->res.height; - descr->vf_info[i].padded_width = vf_out_info[j]->padded_width; - ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE); - } else { - descr->vf_info[i].res.width = 0; - descr->vf_info[i].res.height = 0; - descr->vf_info[i].padded_width = 0; - } - j++; - } else { - descr->is_output_stage[i] = false; - descr->internal_out_info[i].res.width = tmp_in_info.res.width / max_scale_factor_per_stage; - descr->internal_out_info[i].res.height = tmp_in_info.res.height / max_scale_factor_per_stage; - descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420; - ia_css_frame_info_init(&descr->internal_out_info[i], - tmp_in_info.res.width / max_scale_factor_per_stage, - tmp_in_info.res.height / max_scale_factor_per_stage, - IA_CSS_FRAME_FORMAT_YUV420, 0); - descr->out_info[i].res.width = 0; - descr->out_info[i].res.height = 0; - descr->vf_info[i].res.width = 0; - descr->vf_info[i].res.height = 0; - } - tmp_in_info = descr->internal_out_info[i]; - } -ERR: - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n", - err); - return err; -} - -static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr *descr) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_destroy_cas_scaler_desc() enter:\n"); - kfree(descr->in_info); - descr->in_info = NULL; - kfree(descr->internal_out_info); - descr->internal_out_info = NULL; - kfree(descr->out_info); - descr->out_info = NULL; - kfree(descr->vf_info); - descr->vf_info = NULL; - kfree(descr->is_output_stage); - descr->is_output_stage = NULL; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_destroy_cas_scaler_desc() leave\n"); -} - -static enum ia_css_err -load_yuvpp_binaries(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - bool need_scaler = false; - struct ia_css_frame_info *vf_pp_in_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_yuvpp_settings *mycs; - struct ia_css_binary *next_binary; - struct ia_css_cas_binary_descr cas_scaler_descr = { }; - unsigned int i, j; - bool need_isp_copy_binary = false; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->stream != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP); - - if (pipe->pipe_settings.yuvpp.copy_binary.info) - goto ERR; - - /* Set both must_be_raw and must_be_yuv to false then yuvpp can take rgb inputs */ - err = ia_css_util_check_input(&pipe->stream->config, false, false); - if (err != IA_CSS_SUCCESS) - goto ERR; - - mycs = &pipe->pipe_settings.yuvpp; - - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - if (pipe->vf_output_info[i].res.width != 0) { - err = ia_css_util_check_vf_out_info(&pipe->output_info[i], - &pipe->vf_output_info[i]); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - vf_pp_in_info[i] = NULL; - } - - need_scaler = need_yuv_scaler_stage(pipe); - - /* we build up the pipeline starting at the end */ - /* Capture post-processing */ - if (need_scaler) { - struct ia_css_binary_descr yuv_scaler_descr; - - err = ia_css_pipe_create_cas_scaler_desc(pipe, - &cas_scaler_descr); - if (err != IA_CSS_SUCCESS) - goto ERR; - mycs->num_output = cas_scaler_descr.num_output_stage; - mycs->num_yuv_scaler = cas_scaler_descr.num_stage; - mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage * - sizeof(struct ia_css_binary), GFP_KERNEL); - if (!mycs->yuv_scaler_binary) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage * - sizeof(bool), GFP_KERNEL); - if (!mycs->is_output_stage) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - for (i = 0; i < cas_scaler_descr.num_stage; i++) { - mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i]; - ia_css_pipe_get_yuvscaler_binarydesc(pipe, - &yuv_scaler_descr, &cas_scaler_descr.in_info[i], - &cas_scaler_descr.out_info[i], - &cas_scaler_descr.internal_out_info[i], - &cas_scaler_descr.vf_info[i]); - err = ia_css_binary_find(&yuv_scaler_descr, - &mycs->yuv_scaler_binary[i]); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr); - } else { - mycs->num_output = 1; - } - - if (need_scaler) { - next_binary = &mycs->yuv_scaler_binary[0]; - } else { - next_binary = NULL; - } - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - /* - * NOTES - * - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when - * its input is "ATOMISP_INPUT_FORMAT_YUV422_8"? - * - * In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_ - * binary". However, the "yuv_scale_binary" does NOT support the input-frame - * format as "IA_CSS_STREAM _FORMAT_YUV422_8". - * - * Hence, the "isp_copy_binary" is required to be present in front of the "yuv - * _scale_binary". It would translate the input-frame to the frame formats that - * are supported by the "yuv_scale_binary". - * - * Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_ - * pp_defs.h" for the list of input-frame formats that are supported by the - * "yuv_scale_binary". - */ - need_isp_copy_binary = - (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8); -#else /* !USE_INPUT_SYSTEM_VERSION_2401 */ - need_isp_copy_binary = true; -#endif /* USE_INPUT_SYSTEM_VERSION_2401 */ - - if (need_isp_copy_binary) { - err = load_copy_binary(pipe, - &mycs->copy_binary, - next_binary); - - if (err != IA_CSS_SUCCESS) - goto ERR; - - /* - * NOTES - * - Why is "pipe->pipe_settings.capture.copy_binary.online" specified? - * - * In some use cases, the first stage in the "yuvpp" pipe is the - * "isp_copy_binary". The "isp_copy_binary" is designed to process - * the input from either the system DDR or from the IPU internal VMEM. - * So it provides the flag "online" to specify where its input is from, - * i.e.: - * - * (1) "online <= true", the input is from the IPU internal VMEM. - * (2) "online <= false", the input is from the system DDR. - * - * In other use cases, the first stage in the "yuvpp" pipe is the - * "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the - * input ONLY from the system DDR. So it does not provide the flag "online" - * to specify where its input is from. - */ - pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online; - } - - /* Viewfinder post-processing */ - if (need_scaler) { - for (i = 0, j = 0; i < mycs->num_yuv_scaler; i++) { - if (mycs->is_output_stage[i]) { - assert(j < 2); - vf_pp_in_info[j] = - &mycs->yuv_scaler_binary[i].vf_frame_info; - j++; - } - } - mycs->num_vf_pp = j; - } else { - vf_pp_in_info[0] = - &mycs->copy_binary.vf_frame_info; - for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - vf_pp_in_info[i] = NULL; - } - mycs->num_vf_pp = 1; - } - mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary), - GFP_KERNEL); - if (!mycs->vf_pp_binary) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto ERR; - } - - { - struct ia_css_binary_descr vf_pp_descr; - - for (i = 0; i < mycs->num_vf_pp; i++) { - if (pipe->vf_output_info[i].res.width != 0) { - ia_css_pipe_get_vfpp_binarydesc(pipe, - &vf_pp_descr, vf_pp_in_info[i], &pipe->vf_output_info[i]); - err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary[i]); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - } - } - - if (err != IA_CSS_SUCCESS) - goto ERR; - -ERR: - if (need_scaler) { - ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr); - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "load_yuvpp_binaries() leave, err=%d\n", - err); - return err; -} - -static enum ia_css_err -unload_yuvpp_binaries(struct ia_css_pipe *pipe) -{ - unsigned int i; - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - - if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - ia_css_binary_unload(&pipe->pipe_settings.yuvpp.copy_binary); - for (i = 0; i < pipe->pipe_settings.yuvpp.num_yuv_scaler; i++) { - ia_css_binary_unload(&pipe->pipe_settings.yuvpp.yuv_scaler_binary[i]); - } - for (i = 0; i < pipe->pipe_settings.yuvpp.num_vf_pp; i++) { - ia_css_binary_unload(&pipe->pipe_settings.yuvpp.vf_pp_binary[i]); - } - kfree(pipe->pipe_settings.yuvpp.is_output_stage); - pipe->pipe_settings.yuvpp.is_output_stage = NULL; - kfree(pipe->pipe_settings.yuvpp.yuv_scaler_binary); - pipe->pipe_settings.yuvpp.yuv_scaler_binary = NULL; - kfree(pipe->pipe_settings.yuvpp.vf_pp_binary); - pipe->pipe_settings.yuvpp.vf_pp_binary = NULL; - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err yuvpp_start(struct ia_css_pipe *pipe) -{ - struct ia_css_binary *copy_binary; - enum ia_css_err err = IA_CSS_SUCCESS; - enum sh_css_pipe_config_override copy_ovrd; - enum ia_css_input_mode yuvpp_pipe_input_mode; - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - yuvpp_pipe_input_mode = pipe->stream->config.mode; - - copy_binary = &pipe->pipe_settings.yuvpp.copy_binary; - - sh_css_metrics_start_frame(); - - /* multi stream video needs mipi buffers */ - -#if !defined(HAS_NO_INPUT_SYSTEM) && ( defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) ) - err = send_mipi_frames(pipe); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } -#endif - - { - unsigned int thread_id; - - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - copy_ovrd = 1 << thread_id; - } - - start_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode); - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - - if (pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/ - if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; - } - - switch (pipe->mode) { - case IA_CSS_PIPE_ID_PREVIEW: - err = unload_preview_binaries(pipe); - break; - case IA_CSS_PIPE_ID_VIDEO: - err = unload_video_binaries(pipe); - break; - case IA_CSS_PIPE_ID_CAPTURE: - err = unload_capture_binaries(pipe); - break; - case IA_CSS_PIPE_ID_YUVPP: - err = unload_yuvpp_binaries(pipe); - break; - default: - break; - } - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -sh_css_pipe_load_binaries(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - assert(pipe != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_load_binaries() enter:\n"); - - /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/ - if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) - return err; - - switch (pipe->mode) { - case IA_CSS_PIPE_ID_PREVIEW: - err = load_preview_binaries(pipe); - break; - case IA_CSS_PIPE_ID_VIDEO: - err = load_video_binaries(pipe); - break; - case IA_CSS_PIPE_ID_CAPTURE: - err = load_capture_binaries(pipe); - break; - case IA_CSS_PIPE_ID_YUVPP: - err = load_yuvpp_binaries(pipe); - break; - case IA_CSS_PIPE_ID_ACC: - break; - default: - err = IA_CSS_ERR_INTERNAL_ERROR; - break; - } - if (err != IA_CSS_SUCCESS) { - if (sh_css_pipe_unload_binaries(pipe) != IA_CSS_SUCCESS) { - /* currently css does not support multiple error returns in a single function, - * using IA_CSS_ERR_INTERNAL_ERROR in this case */ - err = IA_CSS_ERR_INTERNAL_ERROR; - } - } - return err; -} - -static enum ia_css_err -create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline *me; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipeline_stage *vf_pp_stage = NULL, - *copy_stage = NULL, - *yuv_scaler_stage = NULL; - struct ia_css_binary *copy_binary, - *vf_pp_binary, - *yuv_scaler_binary; - bool need_scaler = false; - unsigned int num_stage, num_vf_pp_stage, num_output_stage; - unsigned int i, j; - - struct ia_css_frame *in_frame = NULL; - struct ia_css_frame *out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_frame *bin_out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; - struct ia_css_pipeline_stage_desc stage_desc; - bool need_in_frameinfo_memory = false; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - bool sensor = false; - bool buffered_sensor = false; - bool online = false; - bool continuous = false; -#endif - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - me = &pipe->pipeline; - ia_css_pipeline_clean(me); - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - out_frame[i] = NULL; - vf_frame[i] = NULL; - } - ia_css_pipe_util_create_output_frames(bin_out_frame); - num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler; - num_vf_pp_stage = pipe->pipe_settings.yuvpp.num_vf_pp; - num_output_stage = pipe->pipe_settings.yuvpp.num_output; - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When the input system is 2401, always enable 'in_frameinfo_memory' - * except for the following: - * - Direct Sensor Mode Online Capture - * - Direct Sensor Mode Continuous Capture - * - Buffered Sensor Mode Continuous Capture - */ - sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR; - buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR; - online = pipe->stream->config.online; - continuous = pipe->stream->config.continuous; - need_in_frameinfo_memory = - !((sensor && (online || continuous)) || (buffered_sensor && continuous)); -#else - /* Construct in_frame info (only in case we have dynamic input */ - need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY; -#endif - /* the input frame can come from: - * a) memory: connect yuvscaler to me->in_frame - * b) sensor, via copy binary: connect yuvscaler to copy binary later on */ - if (need_in_frameinfo_memory) { - /* TODO: improve for different input formats. */ - - /* - * "pipe->stream->config.input_config.format" represents the sensor output - * frame format, e.g. YUV422 8-bit. - * - * "in_frame_format" represents the imaging pipe's input frame format, e.g. - * Bayer-Quad RAW. - */ - int in_frame_format; - if (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) { - in_frame_format = IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8; - } else if (pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8) { - /* - * When the sensor output frame format is "ATOMISP_INPUT_FORMAT_YUV422_8", - * the "isp_copy_var" binary is selected as the first stage in the yuvpp - * pipe. - * - * For the "isp_copy_var" binary, it reads the YUV422-8 pixels from - * the frame buffer (at DDR) to the frame-line buffer (at VMEM). - * - * By now, the "isp_copy_var" binary does NOT provide a separated - * frame-line buffer to store the YUV422-8 pixels. Instead, it stores - * the YUV422-8 pixels in the frame-line buffer which is designed to - * store the Bayer-Quad RAW pixels. - * - * To direct the "isp_copy_var" binary reading from the RAW frame-line - * buffer, its input frame format must be specified as "IA_CSS_FRAME_ - * FORMAT_RAW". - */ - in_frame_format = IA_CSS_FRAME_FORMAT_RAW; - } else { - in_frame_format = IA_CSS_FRAME_FORMAT_NV12; - } - - err = init_in_frameinfo_memory_defaults(pipe, - &me->in_frame, - in_frame_format); - - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - in_frame = &me->in_frame; - } else { - in_frame = NULL; - } - - for (i = 0; i < num_output_stage; i++) { - assert(i < IA_CSS_PIPE_MAX_OUTPUT_STAGE); - if (pipe->output_info[i].res.width != 0) { - err = init_out_frameinfo_defaults(pipe, &me->out_frame[i], i); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - out_frame[i] = &me->out_frame[i]; - } - - /* Construct vf_frame info (only in case we have VF) */ - if (pipe->vf_output_info[i].res.width != 0) { - err = init_vf_frameinfo_defaults(pipe, &me->vf_frame[i], i); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - vf_frame[i] = &me->vf_frame[i]; - } - } - - copy_binary = &pipe->pipe_settings.yuvpp.copy_binary; - vf_pp_binary = pipe->pipe_settings.yuvpp.vf_pp_binary; - yuv_scaler_binary = pipe->pipe_settings.yuvpp.yuv_scaler_binary; - need_scaler = need_yuv_scaler_stage(pipe); - - if (pipe->pipe_settings.yuvpp.copy_binary.info) { - - struct ia_css_frame *in_frame_local = NULL; - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* After isp copy is enabled in_frame needs to be passed. */ - if (!online) - in_frame_local = in_frame; -#endif - - if (need_scaler) { - ia_css_pipe_util_set_output_frames(bin_out_frame, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - bin_out_frame, in_frame_local, NULL); - } else { - ia_css_pipe_util_set_output_frames(bin_out_frame, 0, out_frame[0]); - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - bin_out_frame, in_frame_local, NULL); - } - - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - ©_stage); - - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - if (copy_stage) { - /* if we use yuv scaler binary, vf output should be from there */ - copy_stage->args.copy_vf = !need_scaler; - /* for yuvpp pipe, it should always be enabled */ - copy_stage->args.copy_output = true; - /* connect output of copy binary to input of yuv scaler */ - in_frame = copy_stage->args.out_frame[0]; - } - } - - if (need_scaler) { - struct ia_css_frame *tmp_out_frame = NULL; - struct ia_css_frame *tmp_vf_frame = NULL; - struct ia_css_frame *tmp_in_frame = in_frame; - - for (i = 0, j = 0; i < num_stage; i++) { - assert(j < num_output_stage); - if (pipe->pipe_settings.yuvpp.is_output_stage[i]) { - tmp_out_frame = out_frame[j]; - tmp_vf_frame = vf_frame[j]; - } else { - tmp_out_frame = NULL; - tmp_vf_frame = NULL; - } - - err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame, - NULL, - &yuv_scaler_binary[i], - &yuv_scaler_stage); - - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - /* we use output port 1 as internal output port */ - tmp_in_frame = yuv_scaler_stage->args.out_frame[1]; - if (pipe->pipe_settings.yuvpp.is_output_stage[i]) { - if (tmp_vf_frame && (tmp_vf_frame->info.res.width != 0)) { - in_frame = yuv_scaler_stage->args.out_vf_frame; - err = add_vf_pp_stage(pipe, in_frame, tmp_vf_frame, &vf_pp_binary[j], - &vf_pp_stage); - - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - j++; - } - } - } else if (copy_stage != NULL) { - if (vf_frame[0] != NULL && vf_frame[0]->info.res.width != 0) { - in_frame = copy_stage->args.out_vf_frame; - err = add_vf_pp_stage(pipe, in_frame, vf_frame[0], &vf_pp_binary[0], - &vf_pp_stage); - } - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - - ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous); - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -create_host_copy_pipeline(struct ia_css_pipe *pipe, - unsigned max_input_width, - struct ia_css_frame *out_frame) -{ - struct ia_css_pipeline *me; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipeline_stage_desc stage_desc; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "create_host_copy_pipeline() enter:\n"); - - /* pipeline already created as part of create_host_pipeline_structure */ - me = &pipe->pipeline; - ia_css_pipeline_clean(me); - - /* Construct out_frame info */ - out_frame->contiguous = false; - out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; - - if (copy_on_sp(pipe) && - pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) { - ia_css_frame_info_init( - &out_frame->info, - JPEG_BYTES, - 1, - IA_CSS_FRAME_FORMAT_BINARY_8, - 0); - } else if (out_frame->info.format == IA_CSS_FRAME_FORMAT_RAW) { - out_frame->info.raw_bit_depth = - ia_css_pipe_util_pipe_input_format_bpp(pipe); - } - - me->num_stages = 1; - me->pipe_id = IA_CSS_PIPE_ID_COPY; - pipe->mode = IA_CSS_PIPE_ID_COPY; - - ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame, - IA_CSS_PIPELINE_RAW_COPY, max_input_width); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - NULL); - - ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "create_host_copy_pipeline() leave:\n"); - - return err; -} - -static enum ia_css_err -create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline *me = &pipe->pipeline; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_pipeline_stage_desc stage_desc; - struct ia_css_frame *out_frame = &me->out_frame[0]; - struct ia_css_pipeline_stage *out_stage = NULL; - unsigned int thread_id; - enum sh_css_queue_id queue_id; - unsigned int max_input_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "create_host_isyscopy_capture_pipeline() enter:\n"); - ia_css_pipeline_clean(me); - - /* Construct out_frame info */ - err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, 0); - if (err != IA_CSS_SUCCESS) - return err; - out_frame->contiguous = false; - out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE; - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id); - out_frame->dynamic_queue_id = queue_id; - out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME; - - me->num_stages = 1; - me->pipe_id = IA_CSS_PIPE_ID_CAPTURE; - pipe->mode = IA_CSS_PIPE_ID_CAPTURE; - ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame, - IA_CSS_PIPELINE_ISYS_COPY, max_input_width); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, &out_stage); - if(err != IA_CSS_SUCCESS) - return err; - - ia_css_pipeline_finalize_stages(me, pipe->stream->config.continuous); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "create_host_isyscopy_capture_pipeline() leave:\n"); - - return err; -} - -static enum ia_css_err -create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline *me; - enum ia_css_err err = IA_CSS_SUCCESS; - enum ia_css_capture_mode mode; - struct ia_css_pipeline_stage *current_stage = NULL; - struct ia_css_pipeline_stage *yuv_scaler_stage = NULL; - struct ia_css_binary *copy_binary, - *primary_binary[MAX_NUM_PRIMARY_STAGES], - *vf_pp_binary, - *pre_isp_binary, - *anr_gdc_binary, - *post_isp_binary, - *yuv_scaler_binary, - *capture_pp_binary, - *capture_ldc_binary; - bool need_pp = false; - bool raw; - - struct ia_css_frame *in_frame; - struct ia_css_frame *out_frame; - struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_frame *vf_frame; - struct ia_css_pipeline_stage_desc stage_desc; - bool need_in_frameinfo_memory = false; -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - bool sensor = false; - bool buffered_sensor = false; - bool online = false; - bool continuous = false; -#endif - unsigned int i, num_yuv_scaler, num_primary_stage; - bool need_yuv_pp = false; - bool *is_output_stage = NULL; - bool need_ldc = false; - - IA_CSS_ENTER_PRIVATE(""); - assert(pipe != NULL); - assert(pipe->stream != NULL); - assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY); - - me = &pipe->pipeline; - mode = pipe->config.default_capture_config.mode; - raw = (mode == IA_CSS_CAPTURE_MODE_RAW); - ia_css_pipeline_clean(me); - ia_css_pipe_util_create_output_frames(out_frames); - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - /* When the input system is 2401, always enable 'in_frameinfo_memory' - * except for the following: - * - Direct Sensor Mode Online Capture - * - Direct Sensor Mode Online Capture - * - Direct Sensor Mode Continuous Capture - * - Buffered Sensor Mode Continuous Capture - */ - sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR); - buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR); - online = pipe->stream->config.online; - continuous = pipe->stream->config.continuous; - need_in_frameinfo_memory = - !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous))); -#else - /* Construct in_frame info (only in case we have dynamic input */ - need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY; -#endif - if (need_in_frameinfo_memory) { - err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame, IA_CSS_FRAME_FORMAT_RAW); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - in_frame = &me->in_frame; - } else { - in_frame = NULL; - } - - err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - out_frame = &me->out_frame[0]; - - /* Construct vf_frame info (only in case we have VF) */ - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) { - if (mode == IA_CSS_CAPTURE_MODE_RAW || mode == IA_CSS_CAPTURE_MODE_BAYER) { - /* These modes don't support viewfinder output */ - vf_frame = NULL; - } else { - init_vf_frameinfo_defaults(pipe, &me->vf_frame[0], 0); - vf_frame = &me->vf_frame[0]; - } - } else { - vf_frame = NULL; - } - - copy_binary = &pipe->pipe_settings.capture.copy_binary; - num_primary_stage = pipe->pipe_settings.capture.num_primary_stage; - if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - for (i = 0; i < num_primary_stage; i++) { - primary_binary[i] = &pipe->pipe_settings.capture.primary_binary[i]; - } - vf_pp_binary = &pipe->pipe_settings.capture.vf_pp_binary; - pre_isp_binary = &pipe->pipe_settings.capture.pre_isp_binary; - anr_gdc_binary = &pipe->pipe_settings.capture.anr_gdc_binary; - post_isp_binary = &pipe->pipe_settings.capture.post_isp_binary; - capture_pp_binary = &pipe->pipe_settings.capture.capture_pp_binary; - yuv_scaler_binary = pipe->pipe_settings.capture.yuv_scaler_binary; - num_yuv_scaler = pipe->pipe_settings.capture.num_yuv_scaler; - is_output_stage = pipe->pipe_settings.capture.is_output_stage; - capture_ldc_binary = &pipe->pipe_settings.capture.capture_ldc_binary; - - need_pp = (need_capture_pp(pipe) || pipe->output_stage) && - mode != IA_CSS_CAPTURE_MODE_RAW && - mode != IA_CSS_CAPTURE_MODE_BAYER; - need_yuv_pp = (yuv_scaler_binary != NULL && yuv_scaler_binary->info != NULL); - need_ldc = (capture_ldc_binary != NULL && capture_ldc_binary->info != NULL); - - if (pipe->pipe_settings.capture.copy_binary.info) { - if (raw) { - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) - if (!continuous) { - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - out_frames, in_frame, NULL); - } else { - in_frame = pipe->stream->last_pipe->continuous_frames[0]; - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - out_frames, in_frame, NULL); - } -#else - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - out_frames, NULL, NULL); -#endif - } else { - ia_css_pipe_util_set_output_frames(out_frames, 0, in_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary, - out_frames, NULL, NULL); - } - - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - ¤t_stage); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } else if (pipe->stream->config.continuous) { - in_frame = pipe->stream->last_pipe->continuous_frames[0]; - } - - if (mode == IA_CSS_CAPTURE_MODE_PRIMARY) { - struct ia_css_frame *local_in_frame = NULL; - struct ia_css_frame *local_out_frame = NULL; - - for (i = 0; i < num_primary_stage; i++) { - if (i == 0) - local_in_frame = in_frame; - else - local_in_frame = NULL; -#ifndef ISP2401 - if (!need_pp && (i == num_primary_stage - 1)) -#else - if (!need_pp && (i == num_primary_stage - 1) && !need_ldc) -#endif - local_out_frame = out_frame; - else - local_out_frame = NULL; - ia_css_pipe_util_set_output_frames(out_frames, 0, local_out_frame); -/* - * WARNING: The #if def flag has been added below as a - * temporary solution to solve the problem of enabling the - * view finder in a single binary in a capture flow. The - * vf-pp stage has been removed from Skycam in the solution - * provided. The vf-pp stage should be re-introduced when - * required. This * should not be considered as a clean solution. - * Proper investigation should be done to come up with the clean - * solution. - * */ - ia_css_pipe_get_generic_stage_desc(&stage_desc, primary_binary[i], - out_frames, local_in_frame, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - ¤t_stage); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - /* If we use copy iso primary, - the input must be yuv iso raw */ - current_stage->args.copy_vf = - primary_binary[0]->info->sp.pipeline.mode == - IA_CSS_BINARY_MODE_COPY; - current_stage->args.copy_output = current_stage->args.copy_vf; - } else if (mode == IA_CSS_CAPTURE_MODE_ADVANCED || - mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary, - out_frames, in_frame, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, NULL); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, anr_gdc_binary, - out_frames, NULL, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, NULL); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - if(need_pp) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary, - out_frames, NULL, NULL); - } else { - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary, - out_frames, NULL, NULL); - } - - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, ¤t_stage); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } else if (mode == IA_CSS_CAPTURE_MODE_BAYER) { - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary, - out_frames, in_frame, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - NULL); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - -#ifndef ISP2401 - if (need_pp && current_stage) { - struct ia_css_frame *local_in_frame = NULL; - local_in_frame = current_stage->args.out_frame[0]; - - if(need_ldc) { - ia_css_pipe_util_set_output_frames(out_frames, 0, NULL); - ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary, - out_frames, local_in_frame, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - ¤t_stage); - local_in_frame = current_stage->args.out_frame[0]; - } - err = add_capture_pp_stage(pipe, me, local_in_frame, need_yuv_pp ? NULL : out_frame, -#else - /* ldc and capture_pp not supported in same pipeline */ - if (need_ldc && current_stage) { - in_frame = current_stage->args.out_frame[0]; - ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame); - ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary, - out_frames, in_frame, NULL); - err = ia_css_pipeline_create_and_add_stage(me, - &stage_desc, - NULL); - } else if (need_pp && current_stage) { - in_frame = current_stage->args.out_frame[0]; - err = add_capture_pp_stage(pipe, me, in_frame, need_yuv_pp ? NULL : out_frame, -#endif - capture_pp_binary, - ¤t_stage); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - - if (need_yuv_pp && current_stage) { - struct ia_css_frame *tmp_in_frame = current_stage->args.out_frame[0]; - struct ia_css_frame *tmp_out_frame = NULL; - - for (i = 0; i < num_yuv_scaler; i++) { - if (is_output_stage[i] == true) - tmp_out_frame = out_frame; - else - tmp_out_frame = NULL; - - err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame, - NULL, - &yuv_scaler_binary[i], - &yuv_scaler_stage); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - /* we use output port 1 as internal output port */ - tmp_in_frame = yuv_scaler_stage->args.out_frame[1]; - } - } - -/* - * WARNING: The #if def flag has been added below as a - * temporary solution to solve the problem of enabling the - * view finder in a single binary in a capture flow. The vf-pp - * stage has been removed from Skycam in the solution provided. - * The vf-pp stage should be re-introduced when required. This - * should not be considered as a clean solution. Proper - * investigation should be done to come up with the clean solution. - * */ - if (mode != IA_CSS_CAPTURE_MODE_RAW && mode != IA_CSS_CAPTURE_MODE_BAYER && current_stage && vf_frame) { - in_frame = current_stage->args.out_vf_frame; - err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary, - ¤t_stage); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "create_host_regular_capture_pipeline() leave:\n"); - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -create_host_capture_pipeline(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - - if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) - err = create_host_isyscopy_capture_pipeline(pipe); - else - err = create_host_regular_capture_pipeline(pipe); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - IA_CSS_LEAVE_ERR_PRIVATE(err); - - return err; -} - -static enum ia_css_err capture_start( - struct ia_css_pipe *pipe) -{ - struct ia_css_pipeline *me; - - enum ia_css_err err = IA_CSS_SUCCESS; - enum sh_css_pipe_config_override copy_ovrd; - - IA_CSS_ENTER_PRIVATE("pipe = %p", pipe); - if (pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - me = &pipe->pipeline; - - if ((pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER ) && - (pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) { - if (copy_on_sp(pipe)) { - err = start_copy_on_sp(pipe, &me->out_frame[0]); - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - -#if defined(USE_INPUT_SYSTEM_VERSION_2) - /* old isys: need to send_mipi_frames() in all pipe modes */ - err = send_mipi_frames(pipe); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } -#elif defined(USE_INPUT_SYSTEM_VERSION_2401) - if (pipe->config.mode != IA_CSS_PIPE_MODE_COPY) { - err = send_mipi_frames(pipe); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - -#endif - - { - unsigned int thread_id; - - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - copy_ovrd = 1 << thread_id; - - } - start_pipe(pipe, copy_ovrd, pipe->stream->config.mode); - -#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401) - /* - * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured, - * which is currently done in start_binary(); but COPY pipe contains no binary, - * and does not call start_binary(); so we need to configure the rx here. - */ - if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY && pipe->stream->reconfigure_css_rx) { - ia_css_isys_rx_configure(&pipe->stream->csi_rx_config, pipe->stream->config.mode); - pipe->stream->reconfigure_css_rx = false; - } -#endif - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - -} - -static enum ia_css_err -sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe, - struct ia_css_frame_info *info, - unsigned int idx) -{ - assert(pipe != NULL); - assert(info != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_pipe_get_output_frame_info() enter:\n"); - - *info = pipe->output_info[idx]; - if (copy_on_sp(pipe) && - pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) { - ia_css_frame_info_init( - info, - JPEG_BYTES, - 1, - IA_CSS_FRAME_FORMAT_BINARY_8, - 0); - } else if (info->format == IA_CSS_FRAME_FORMAT_RAW || - info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) { - info->raw_bit_depth = - ia_css_pipe_util_pipe_input_format_bpp(pipe); - - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_pipe_get_output_frame_info() leave:\n"); - return IA_CSS_SUCCESS; -} - -#if !defined(HAS_NO_INPUT_SYSTEM) -void -ia_css_stream_send_input_frame(const struct ia_css_stream *stream, - const unsigned short *data, - unsigned int width, - unsigned int height) -{ - assert(stream != NULL); - - ia_css_inputfifo_send_input_frame( - data, width, height, - stream->config.channel_id, - stream->config.input_config.format, - stream->config.pixels_per_clock == 2); -} - -void -ia_css_stream_start_input_frame(const struct ia_css_stream *stream) -{ - assert(stream != NULL); - - ia_css_inputfifo_start_frame( - stream->config.channel_id, - stream->config.input_config.format, - stream->config.pixels_per_clock == 2); -} - -void -ia_css_stream_send_input_line(const struct ia_css_stream *stream, - const unsigned short *data, - unsigned int width, - const unsigned short *data2, - unsigned int width2) -{ - assert(stream != NULL); - - ia_css_inputfifo_send_line(stream->config.channel_id, - data, width, data2, width2); -} - -void -ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream, - enum atomisp_input_format format, - const unsigned short *data, - unsigned int width) -{ - assert(stream != NULL); - if (data == NULL || width == 0) - return; - ia_css_inputfifo_send_embedded_line(stream->config.channel_id, - format, data, width); -} - -void -ia_css_stream_end_input_frame(const struct ia_css_stream *stream) -{ - assert(stream != NULL); - - ia_css_inputfifo_end_frame(stream->config.channel_id); -} -#endif - -static void -append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) -{ - IA_CSS_ENTER_PRIVATE("l = %p, firmware = %p", l , firmware); - if (l == NULL) { - IA_CSS_ERROR("NULL fw_info"); - IA_CSS_LEAVE_PRIVATE(""); - return; - } - while (*l) - l = &(*l)->next; - *l = firmware; - /*firmware->next = NULL;*/ /* when multiple acc extensions are loaded, 'next' can be not NULL */ - IA_CSS_LEAVE_PRIVATE(""); -} - -static void -remove_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) -{ - assert(*l); - assert(firmware); - (void)l; - (void)firmware; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "remove_firmware() enter:\n"); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "remove_firmware() leave:\n"); - return; /* removing single and multiple firmware is handled in acc_unload_extension() */ -} - -static enum ia_css_err upload_isp_code(struct ia_css_fw_info *firmware) -{ - hrt_vaddress binary; - - if (firmware == NULL) { - IA_CSS_ERROR("NULL input parameter"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - binary = firmware->info.isp.xmem_addr; - - if (!binary) { - unsigned size = firmware->blob.size; - const unsigned char *blob; - const unsigned char *binary_name; - binary_name = - (const unsigned char *)(IA_CSS_EXT_ISP_PROG_NAME( - firmware)); - blob = binary_name + - strlen((const char *)binary_name) + - 1; - binary = sh_css_load_blob(blob, size); - firmware->info.isp.xmem_addr = binary; - } - - if (!binary) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -acc_load_extension(struct ia_css_fw_info *firmware) -{ - enum ia_css_err err; - struct ia_css_fw_info *hd = firmware; - while (hd){ - err = upload_isp_code(hd); - if (err != IA_CSS_SUCCESS) - return err; - hd = hd->next; - } - - if (firmware == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - firmware->loaded = true; - return IA_CSS_SUCCESS; -} - -static void -acc_unload_extension(struct ia_css_fw_info *firmware) -{ - struct ia_css_fw_info *hd = firmware; - struct ia_css_fw_info *hdn = NULL; - - if (firmware == NULL) /* should not happen */ - return; - /* unload and remove multiple firmwares */ - while (hd){ - hdn = (hd->next) ? &(*hd->next) : NULL; - if (hd->info.isp.xmem_addr) { - hmm_free(hd->info.isp.xmem_addr); - hd->info.isp.xmem_addr = mmgr_NULL; - } - hd->isp_code = NULL; - hd->next = NULL; - hd = hdn; - } - - firmware->loaded = false; -} -/* Load firmware for extension */ -static enum ia_css_err -ia_css_pipe_load_extension(struct ia_css_pipe *pipe, - struct ia_css_fw_info *firmware) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe); - - if ((firmware == NULL) || (pipe == NULL)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (firmware->info.isp.type == IA_CSS_ACC_OUTPUT) { - if (&pipe->output_stage != NULL) - append_firmware(&pipe->output_stage, firmware); - else { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - } - else if (firmware->info.isp.type == IA_CSS_ACC_VIEWFINDER) { - if (&pipe->vf_stage != NULL) - append_firmware(&pipe->vf_stage, firmware); - else { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - return IA_CSS_ERR_INTERNAL_ERROR; - } - } - err = acc_load_extension(firmware); - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -/* Unload firmware for extension */ -static void -ia_css_pipe_unload_extension(struct ia_css_pipe *pipe, - struct ia_css_fw_info *firmware) -{ - IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe); - - if ((firmware == NULL) || (pipe == NULL)) { - IA_CSS_ERROR("NULL input parameters"); - IA_CSS_LEAVE_PRIVATE(""); - return; - } - - if (firmware->info.isp.type == IA_CSS_ACC_OUTPUT) - remove_firmware(&pipe->output_stage, firmware); - else if (firmware->info.isp.type == IA_CSS_ACC_VIEWFINDER) - remove_firmware(&pipe->vf_stage, firmware); - acc_unload_extension(firmware); - - IA_CSS_LEAVE_PRIVATE(""); -} - -bool -ia_css_pipeline_uses_params(struct ia_css_pipeline *me) -{ - struct ia_css_pipeline_stage *stage; - - assert(me != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_uses_params() enter: me=%p\n", me); - - for (stage = me->stages; stage; stage = stage->next) - if (stage->binary_info && stage->binary_info->enable.params) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_uses_params() leave: " - "return_bool=true\n"); - return true; - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipeline_uses_params() leave: return_bool=false\n"); - return false; -} - -static enum ia_css_err -sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline, - const void *acc_fw) -{ - struct ia_css_fw_info *fw = (struct ia_css_fw_info *)acc_fw; - /* In QoS case, load_extension already called, so skipping */ - enum ia_css_err err = IA_CSS_SUCCESS; - if (fw->loaded == false) - err = acc_load_extension(fw); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_pipeline_add_acc_stage() enter: pipeline=%p," - " acc_fw=%p\n", pipeline, acc_fw); - - if (err == IA_CSS_SUCCESS) { - struct ia_css_pipeline_stage_desc stage_desc; - ia_css_pipe_get_acc_stage_desc(&stage_desc, NULL, fw); - err = ia_css_pipeline_create_and_add_stage(pipeline, - &stage_desc, - NULL); - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "sh_css_pipeline_add_acc_stage() leave: return_err=%d\n",err); - return err; -} - -/* - * @brief Tag a specific frame in continuous capture. - * Refer to "sh_css_internal.h" for details. - */ -enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream, - unsigned int exp_id) -{ - struct sh_css_tag_descr tag_descr; - uint32_t encoded_tag_descr; - enum ia_css_err err; - - assert(stream != NULL); - IA_CSS_ENTER("exp_id=%d", exp_id); - - /* Only continuous streams have a tagger */ - if (exp_id == 0 || !stream->config.continuous) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - /* Create the tag descriptor from the parameters */ - sh_css_create_tag_descr(0, 0, 0, exp_id, &tag_descr); - /* Encode the tag descriptor into a 32-bit value */ - encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr); - /* Enqueue the encoded tag to the host2sp queue. - * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0 - * on both host and the SP side. - * It is mainly because it is enough to have only one tag_cmd queue */ - err= ia_css_bufq_enqueue_tag_cmd(encoded_tag_descr); - - IA_CSS_LEAVE_ERR(err); - return err; -} - -/* - * @brief Configure the continuous capture. - * Refer to "sh_css_internal.h" for details. - */ -enum ia_css_err ia_css_stream_capture( - struct ia_css_stream *stream, - int num_captures, - unsigned int skip, - int offset) -{ - struct sh_css_tag_descr tag_descr; - unsigned int encoded_tag_descr; - enum ia_css_err return_err; - - if (stream == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_capture() enter: num_captures=%d," - " skip=%d, offset=%d\n", num_captures, skip,offset); - - /* Check if the tag descriptor is valid */ - if (num_captures < SH_CSS_MINIMUM_TAG_ID) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_capture() leave: return_err=%d\n", - IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* Create the tag descriptor from the parameters */ - sh_css_create_tag_descr(num_captures, skip, offset, 0, &tag_descr); - - - /* Encode the tag descriptor into a 32-bit value */ - encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr); - - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_capture() leaving:" - "queues unavailable\n"); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - - /* Enqueue the encoded tag to the host2sp queue. - * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0 - * on both host and the SP side. - * It is mainly because it is enough to have only one tag_cmd queue */ - return_err = ia_css_bufq_enqueue_tag_cmd((uint32_t)encoded_tag_descr); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_capture() leave: return_err=%d\n", - return_err); - - return return_err; -} - -void ia_css_stream_request_flash(struct ia_css_stream *stream) -{ - (void)stream; - - assert(stream != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_request_flash() enter: void\n"); - -#ifndef ISP2401 - sh_css_write_host2sp_command(host2sp_cmd_start_flash); -#else - if (sh_css_sp_is_running()) { - if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash)) { - IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed"); - ia_css_debug_dump_sp_sw_debug_info(); - ia_css_debug_dump_debug_info(NULL); - } - } else - IA_CSS_LOG("SP is not running!"); - -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_request_flash() leave: return_void\n"); -} - -static void -sh_css_init_host_sp_control_vars(void) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started; - - unsigned int HIVE_ADDR_host_sp_queues_initialized; - unsigned int HIVE_ADDR_sp_sleep_mode; - unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; -#ifndef ISP2401 - unsigned int HIVE_ADDR_sp_stop_copy_preview; -#endif - unsigned int HIVE_ADDR_host_sp_com; - unsigned int o = offsetof(struct host_sp_communication, host2sp_command) - / sizeof(int); - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - unsigned int i; -#endif - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_init_host_sp_control_vars() enter: void\n"); - - fw = &sh_css_sp_fw; - HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started; - - HIVE_ADDR_host_sp_queues_initialized = - fw->info.sp.host_sp_queues_initialized; - HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode; - HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb; -#ifndef ISP2401 - HIVE_ADDR_sp_stop_copy_preview = fw->info.sp.stop_copy_preview; -#endif - HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com; - - (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */ - - (void)HIVE_ADDR_sp_sleep_mode; - (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; -#ifndef ISP2401 - (void)HIVE_ADDR_sp_stop_copy_preview; -#endif - (void)HIVE_ADDR_host_sp_com; - - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started), - (uint32_t)(0)); - - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(host_sp_queues_initialized), - (uint32_t)(0)); - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(sp_sleep_mode), - (uint32_t)(0)); - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb), - (uint32_t)(false)); -#ifndef ISP2401 - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(sp_stop_copy_preview), - my_css.stop_copy_preview?(uint32_t)(1):(uint32_t)(0)); -#endif - store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready); - -#if !defined(HAS_NO_INPUT_SYSTEM) - for (i = 0; i < N_CSI_PORTS; i++) { - sh_css_update_host2sp_num_mipi_frames - (my_css.num_mipi_frames[i]); - } -#endif - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_init_host_sp_control_vars() leave: return_void\n"); -} - -/* - * create the internal structures and fill in the configuration data - */ -void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_config_defaults()\n"); - *pipe_config = DEFAULT_PIPE_CONFIG; -} - -void -ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config) -{ - if (extra_config == NULL) { - IA_CSS_ERROR("NULL input parameter"); - return; - } - - extra_config->enable_raw_binning = false; - extra_config->enable_yuv_ds = false; - extra_config->enable_high_speed = false; - extra_config->enable_dvs_6axis = false; - extra_config->enable_reduced_pipe = false; - extra_config->disable_vf_pp = false; - extra_config->enable_fractional_ds = false; -} - -void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_config_defaults()\n"); - assert(stream_config != NULL); - memset(stream_config, 0, sizeof(*stream_config)); - stream_config->online = true; - stream_config->left_padding = -1; - stream_config->pixels_per_clock = 1; - /* temporary default value for backwards compatibility. - * This field used to be hardcoded within CSS but this has now - * been moved to the stream_config struct. */ - stream_config->source.port.rxcount = 0x04040404; -} - -static enum ia_css_err -ia_css_acc_pipe_create(struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - if (pipe == NULL) { - IA_CSS_ERROR("NULL input parameter"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* There is not meaning for num_execs = 0 semantically. Run atleast once. */ - if (pipe->config.acc_num_execs == 0) - pipe->config.acc_num_execs = 1; - - if (pipe->config.acc_extension) { - err = ia_css_pipe_load_extension(pipe, pipe->config.acc_extension); - } - - return err; -} - -enum ia_css_err -ia_css_pipe_create(const struct ia_css_pipe_config *config, - struct ia_css_pipe **pipe) -{ -#ifndef ISP2401 - if (config == NULL) -#else - enum ia_css_err err = IA_CSS_SUCCESS; - IA_CSS_ENTER_PRIVATE("config = %p, pipe = %p", config, pipe); - - if (config == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); -#endif - return IA_CSS_ERR_INVALID_ARGUMENTS; -#ifndef ISP2401 - if (pipe == NULL) -#else - } - if (pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); -#endif - return IA_CSS_ERR_INVALID_ARGUMENTS; -#ifndef ISP2401 - return ia_css_pipe_create_extra(config, NULL, pipe); -#else - } - - err = ia_css_pipe_create_extra(config, NULL, pipe); - - if(err == IA_CSS_SUCCESS) { - IA_CSS_LOG("pipe created successfully = %p", *pipe); - } - - IA_CSS_LEAVE_ERR_PRIVATE(err); - - return err; -#endif -} - -enum ia_css_err -ia_css_pipe_create_extra(const struct ia_css_pipe_config *config, - const struct ia_css_pipe_extra_config *extra_config, - struct ia_css_pipe **pipe) -{ - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; - struct ia_css_pipe *internal_pipe = NULL; - unsigned int i; - - IA_CSS_ENTER_PRIVATE("config = %p, extra_config = %p and pipe = %p", config, extra_config, pipe); - - /* do not allow to create more than the maximum limit */ - if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_EXHAUSTED); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if ((pipe == NULL) || (config == NULL)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - ia_css_debug_dump_pipe_config(config); - ia_css_debug_dump_pipe_extra_config(extra_config); - - err = create_pipe(config->mode, &internal_pipe, false); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - /* now we have a pipe structure to fill */ - internal_pipe->config = *config; - if (extra_config) - internal_pipe->extra_config = *extra_config; - else - ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config); - - if (config->mode == IA_CSS_PIPE_MODE_ACC) { - /* Temporary hack to migrate acceleration to CSS 2.0. - * In the future the code for all pipe types should be - * unified. */ - *pipe = internal_pipe; - if (!internal_pipe->config.acc_extension && - internal_pipe->config.num_acc_stages == 0){ /* if no acc binary and no standalone stage */ - *pipe = NULL; - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; - } - return ia_css_acc_pipe_create(internal_pipe); - } - - /* Use config value when dvs_frame_delay setting equal to 2, otherwise always 1 by default */ - if (internal_pipe->config.dvs_frame_delay == IA_CSS_FRAME_DELAY_2) - internal_pipe->dvs_frame_delay = 2; - else - internal_pipe->dvs_frame_delay = 1; - - - /* we still keep enable_raw_binning for backward compatibility, for any new - fractional bayer downscaling, we should use bayer_ds_out_res. if both are - specified, bayer_ds_out_res will take precedence.if none is specified, we - set bayer_ds_out_res equal to IF output resolution(IF may do cropping on - sensor output) or use default decimation factor 1. */ - if (internal_pipe->extra_config.enable_raw_binning && - internal_pipe->config.bayer_ds_out_res.width) { - /* fill some code here, if no code is needed, please remove it during integration */ - } - - /* YUV downscaling */ - if ((internal_pipe->config.vf_pp_in_res.width || - internal_pipe->config.capt_pp_in_res.width)) { - enum ia_css_frame_format format; - if (internal_pipe->config.vf_pp_in_res.width) { - format = IA_CSS_FRAME_FORMAT_YUV_LINE; - ia_css_frame_info_init( - &internal_pipe->vf_yuv_ds_input_info, - internal_pipe->config.vf_pp_in_res.width, - internal_pipe->config.vf_pp_in_res.height, - format, 0); - } - if (internal_pipe->config.capt_pp_in_res.width) { - format = IA_CSS_FRAME_FORMAT_YUV420; - ia_css_frame_info_init( - &internal_pipe->out_yuv_ds_input_info, - internal_pipe->config.capt_pp_in_res.width, - internal_pipe->config.capt_pp_in_res.height, - format, 0); - } - } - if (internal_pipe->config.vf_pp_in_res.width && - internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) { - ia_css_frame_info_init( - &internal_pipe->vf_yuv_ds_input_info, - internal_pipe->config.vf_pp_in_res.width, - internal_pipe->config.vf_pp_in_res.height, - IA_CSS_FRAME_FORMAT_YUV_LINE, 0); - } - /* handle bayer downscaling output info */ - if (internal_pipe->config.bayer_ds_out_res.width) { - ia_css_frame_info_init( - &internal_pipe->bds_output_info, - internal_pipe->config.bayer_ds_out_res.width, - internal_pipe->config.bayer_ds_out_res.height, - IA_CSS_FRAME_FORMAT_RAW, 0); - } - - /* handle output info, assume always needed */ - for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { - if (internal_pipe->config.output_info[i].res.width) { - err = sh_css_pipe_configure_output( - internal_pipe, - internal_pipe->config.output_info[i].res.width, - internal_pipe->config.output_info[i].res.height, - internal_pipe->config.output_info[i].padded_width, - internal_pipe->config.output_info[i].format, - i); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - sh_css_free(internal_pipe); - internal_pipe = NULL; - return err; - } - } - - /* handle vf output info, when configured */ - internal_pipe->enable_viewfinder[i] = (internal_pipe->config.vf_output_info[i].res.width != 0); - if (internal_pipe->config.vf_output_info[i].res.width) { - err = sh_css_pipe_configure_viewfinder( - internal_pipe, - internal_pipe->config.vf_output_info[i].res.width, - internal_pipe->config.vf_output_info[i].res.height, - internal_pipe->config.vf_output_info[i].padded_width, - internal_pipe->config.vf_output_info[i].format, - i); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - sh_css_free(internal_pipe); - internal_pipe = NULL; - return err; - } - } - } - if (internal_pipe->config.acc_extension) { - err = ia_css_pipe_load_extension(internal_pipe, - internal_pipe->config.acc_extension); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - sh_css_free(internal_pipe); - return err; - } - } - /* set all info to zeroes first */ - memset(&internal_pipe->info, 0, sizeof(internal_pipe->info)); - - /* all went well, return the pipe */ - *pipe = internal_pipe; - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - - -enum ia_css_err -ia_css_pipe_get_info(const struct ia_css_pipe *pipe, - struct ia_css_pipe_info *pipe_info) -{ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_pipe_get_info()\n"); - assert(pipe_info != NULL); - if (pipe_info == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_pipe_get_info: pipe_info cannot be NULL\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - if (pipe == NULL || pipe->stream == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_pipe_get_info: ia_css_stream_create needs to" - " be called before ia_css_[stream/pipe]_get_info\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - /* we succeeded return the info */ - *pipe_info = pipe->info; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_get_info() leave\n"); - return IA_CSS_SUCCESS; -} - -bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info) -{ - unsigned int i; - - if (pipe_info != NULL) { - for (i = 0; i < IA_CSS_DVS_STAT_NUM_OF_LEVELS; i++) { - if (pipe_info->grid_info.dvs_grid.dvs_stat_grid_info.grd_cfg[i].grd_start.enable) - return true; - } - } - - return false; -} - -#ifdef ISP2401 -enum ia_css_err -ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe, - int pin_index, - enum ia_css_frame_format new_format) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("pipe = %p, pin_index = %d, new_formats = %d", pipe, pin_index, new_format); - - if (NULL == pipe) { - IA_CSS_ERROR("pipe is not set"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (0 != pin_index && 1 != pin_index) { - IA_CSS_ERROR("pin index is not valid"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (IA_CSS_FRAME_FORMAT_NV12_TILEY != new_format) { - IA_CSS_ERROR("new format is not valid"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } else { - err = ia_css_pipe_check_format(pipe, new_format); - if (IA_CSS_SUCCESS == err) { - if (pin_index == 0) { - pipe->output_info[0].format = new_format; - } else { - pipe->vf_output_info[0].format = new_format; - } - } - } - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -#endif -#if defined(USE_INPUT_SYSTEM_VERSION_2) -/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */ -static enum ia_css_err -ia_css_stream_configure_rx(struct ia_css_stream *stream) -{ - struct ia_css_input_port *config; - assert(stream != NULL); - - config = &stream->config.source.port; -/* AM: this code is not reliable, especially for 2400 */ - if (config->num_lanes == 1) - stream->csi_rx_config.mode = MONO_1L_1L_0L; - else if (config->num_lanes == 2) - stream->csi_rx_config.mode = MONO_2L_1L_0L; - else if (config->num_lanes == 3) - stream->csi_rx_config.mode = MONO_3L_1L_0L; - else if (config->num_lanes == 4) - stream->csi_rx_config.mode = MONO_4L_1L_0L; - else if (config->num_lanes != 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if (config->port > MIPI_PORT2_ID) - return IA_CSS_ERR_INVALID_ARGUMENTS; - stream->csi_rx_config.port = - ia_css_isys_port_to_mipi_port(config->port); - stream->csi_rx_config.timeout = config->timeout; - stream->csi_rx_config.initcount = 0; - stream->csi_rx_config.synccount = 0x28282828; - stream->csi_rx_config.rxcount = config->rxcount; - if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE) - stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE; - else { - /* not implemented yet, requires extension of the rx_cfg_t - * struct */ - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2); - stream->reconfigure_css_rx = true; - return IA_CSS_SUCCESS; -} -#endif - -static struct ia_css_pipe * -find_pipe(struct ia_css_pipe *pipes[], - unsigned int num_pipes, - enum ia_css_pipe_mode mode, - bool copy_pipe) -{ - unsigned i; - assert(pipes != NULL); - for (i = 0; i < num_pipes; i++) { - assert(pipes[i] != NULL); - if (pipes[i]->config.mode != mode) - continue; - if (copy_pipe && pipes[i]->mode != IA_CSS_PIPE_ID_COPY) - continue; - return pipes[i]; - } - return NULL; -} - -static enum ia_css_err -ia_css_acc_stream_create(struct ia_css_stream *stream) -{ - int i; - enum ia_css_err err = IA_CSS_SUCCESS; - - assert(stream != NULL); - IA_CSS_ENTER_PRIVATE("stream = %p", stream); - - if (stream == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *pipe = stream->pipes[i]; - assert(pipe != NULL); - if (pipe == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe->stream = stream; - } - - /* Map SP threads before doing anything. */ - err = map_sp_threads(stream, true); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *pipe = stream->pipes[i]; - assert(pipe != NULL); - ia_css_pipe_map_queue(pipe, true); - } - - err = create_host_pipeline_structure(stream); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - stream->started = false; - - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -metadata_info_init(const struct ia_css_metadata_config *mdc, - struct ia_css_metadata_info *md) -{ - /* Either both width and height should be set or neither */ - if ((mdc->resolution.height > 0) ^ (mdc->resolution.width > 0)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - md->resolution = mdc->resolution; - /* We round up the stride to a multiple of the width - * of the port going to DDR, this is a HW requirements (DMA). */ - md->stride = CEIL_MUL(mdc->resolution.width, HIVE_ISP_DDR_WORD_BYTES); - md->size = mdc->resolution.height * md->stride; - return IA_CSS_SUCCESS; -} - -#ifdef ISP2401 -static enum ia_css_err check_pipe_resolutions(const struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE(""); - - if (!pipe || !pipe->stream) { - IA_CSS_ERROR("null arguments"); - err = IA_CSS_ERR_INTERNAL_ERROR; - goto EXIT; - } - - if (ia_css_util_check_res(pipe->config.input_effective_res.width, - pipe->config.input_effective_res.height) != IA_CSS_SUCCESS) { - IA_CSS_ERROR("effective resolution not supported"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - goto EXIT; - } - if (!ia_css_util_resolution_is_zero(pipe->stream->config.input_config.input_res)) { - if (!ia_css_util_res_leq(pipe->config.input_effective_res, - pipe->stream->config.input_config.input_res)) { - IA_CSS_ERROR("effective resolution is larger than input resolution"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - goto EXIT; - } - } - if (!ia_css_util_resolution_is_even(pipe->config.output_info[0].res)) { - IA_CSS_ERROR("output resolution must be even"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - goto EXIT; - } - if (!ia_css_util_resolution_is_even(pipe->config.vf_output_info[0].res)) { - IA_CSS_ERROR("VF resolution must be even"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - goto EXIT; - } -EXIT: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -#endif - -enum ia_css_err -ia_css_stream_create(const struct ia_css_stream_config *stream_config, - int num_pipes, - struct ia_css_pipe *pipes[], - struct ia_css_stream **stream) -{ - struct ia_css_pipe *curr_pipe; - struct ia_css_stream *curr_stream = NULL; - bool spcopyonly; - bool sensor_binning_changed; - int i, j; - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; - struct ia_css_metadata_info md_info; -#ifndef ISP2401 - struct ia_css_resolution effective_res; -#else -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - bool aspect_ratio_crop_enabled = false; -#endif -#endif - - IA_CSS_ENTER("num_pipes=%d", num_pipes); - ia_css_debug_dump_stream_config(stream_config, num_pipes); - - /* some checks */ - if (num_pipes == 0 || - stream == NULL || - pipes == NULL) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } - -#if defined(USE_INPUT_SYSTEM_VERSION_2) - /* We don't support metadata for JPEG stream, since they both use str2mem */ - if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 && - stream_config->metadata_config.resolution.height > 0) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } -#endif - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - if (stream_config->online && stream_config->pack_raw_pixels) { - IA_CSS_LOG("online and pack raw is invalid on input system 2401"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) - ia_css_debug_pipe_graph_dump_stream_config(stream_config); - - /* check if mipi size specified */ - if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - if (!stream_config->online) -#endif - { - unsigned int port = (unsigned int) stream_config->source.port.port; - if (port >= N_MIPI_PORT_ID) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } - - if (my_css.size_mem_words != 0){ - my_css.mipi_frame_size[port] = my_css.size_mem_words; - } else if (stream_config->mipi_buffer_config.size_mem_words != 0) { - my_css.mipi_frame_size[port] = stream_config->mipi_buffer_config.size_mem_words; - } else { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_create() exit: error, need to set mipi frame size.\n"); - assert(stream_config->mipi_buffer_config.size_mem_words != 0); - err = IA_CSS_ERR_INTERNAL_ERROR; - IA_CSS_LEAVE_ERR(err); - return err; - } - - if (my_css.size_mem_words != 0) { - my_css.num_mipi_frames[port] = 2; /* Temp change: Default for backwards compatibility. */ - } else if (stream_config->mipi_buffer_config.nof_mipi_buffers != 0) { - my_css.num_mipi_frames[port] = stream_config->mipi_buffer_config.nof_mipi_buffers; - } else { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_stream_create() exit: error, need to set number of mipi frames.\n"); - assert(stream_config->mipi_buffer_config.nof_mipi_buffers != 0); - err = IA_CSS_ERR_INTERNAL_ERROR; - IA_CSS_LEAVE_ERR(err); - return err; - } - - } -#endif - - /* Currently we only supported metadata up to a certain size. */ - err = metadata_info_init(&stream_config->metadata_config, &md_info); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - - /* allocate the stream instance */ - curr_stream = kmalloc(sizeof(struct ia_css_stream), GFP_KERNEL); - if (!curr_stream) { - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - IA_CSS_LEAVE_ERR(err); - return err; - } - /* default all to 0 */ - memset(curr_stream, 0, sizeof(struct ia_css_stream)); - curr_stream->info.metadata_info = md_info; - - /* allocate pipes */ - curr_stream->num_pipes = num_pipes; - curr_stream->pipes = kzalloc(num_pipes * sizeof(struct ia_css_pipe *), GFP_KERNEL); - if (!curr_stream->pipes) { - curr_stream->num_pipes = 0; - kfree(curr_stream); - curr_stream = NULL; - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - IA_CSS_LEAVE_ERR(err); - return err; - } - /* store pipes */ - spcopyonly = (num_pipes == 1) && (pipes[0]->config.mode == IA_CSS_PIPE_MODE_COPY); - for (i = 0; i < num_pipes; i++) - curr_stream->pipes [i] = pipes[i]; - curr_stream->last_pipe = curr_stream->pipes[0]; - /* take over stream config */ - curr_stream->config = *stream_config; - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) && defined(CSI2P_DISABLE_ISYS2401_ONLINE_MODE) - if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR && - stream_config->online) - curr_stream->config.online = false; -#endif - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - if (curr_stream->config.online) { - curr_stream->config.source.port.num_lanes = stream_config->source.port.num_lanes; - curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR; - } -#endif - /* in case driver doesn't configure init number of raw buffers, configure it here */ - if (curr_stream->config.target_num_cont_raw_buf == 0) - curr_stream->config.target_num_cont_raw_buf = NUM_CONTINUOUS_FRAMES; - if (curr_stream->config.init_num_cont_raw_buf == 0) - curr_stream->config.init_num_cont_raw_buf = curr_stream->config.target_num_cont_raw_buf; - - /* Enable locking & unlocking of buffers in RAW buffer pool */ - if (curr_stream->config.ia_css_enable_raw_buffer_locking) - sh_css_sp_configure_enable_raw_pool_locking( - curr_stream->config.lock_all); - - /* copy mode specific stuff */ - switch (curr_stream->config.mode) { - case IA_CSS_INPUT_MODE_SENSOR: - case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: -#if defined(USE_INPUT_SYSTEM_VERSION_2) - ia_css_stream_configure_rx(curr_stream); -#endif - break; - case IA_CSS_INPUT_MODE_TPG: -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d", - curr_stream->config.source.tpg.x_mask, - curr_stream->config.source.tpg.y_mask, - curr_stream->config.source.tpg.x_delta, - curr_stream->config.source.tpg.y_delta, - curr_stream->config.source.tpg.xy_mask); - - sh_css_sp_configure_tpg( - curr_stream->config.source.tpg.x_mask, - curr_stream->config.source.tpg.y_mask, - curr_stream->config.source.tpg.x_delta, - curr_stream->config.source.tpg.y_delta, - curr_stream->config.source.tpg.xy_mask); -#endif - break; - case IA_CSS_INPUT_MODE_PRBS: -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - IA_CSS_LOG("mode prbs"); - sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed); -#endif - break; - case IA_CSS_INPUT_MODE_MEMORY: - IA_CSS_LOG("mode memory"); - curr_stream->reconfigure_css_rx = false; - break; - default: - IA_CSS_LOG("mode sensor/default"); - } - -#ifdef ISP2401 -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - err = aspect_ratio_crop_init(curr_stream, - pipes, - &aspect_ratio_crop_enabled); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } -#endif - -#endif - for (i = 0; i < num_pipes; i++) { -#ifdef ISP2401 - struct ia_css_resolution effective_res; -#endif - curr_pipe = pipes[i]; - /* set current stream */ - curr_pipe->stream = curr_stream; - /* take over effective info */ - - effective_res = curr_pipe->config.input_effective_res; - if (effective_res.height == 0 || effective_res.width == 0) { - effective_res = curr_pipe->stream->config.input_config.effective_res; -#ifdef ISP2401 - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - /* The aspect ratio cropping is currently only - * supported on the new input system. */ - if (aspect_ratio_crop_check(aspect_ratio_crop_enabled, curr_pipe)) { - - struct ia_css_resolution crop_res; - - err = aspect_ratio_crop(curr_pipe, &crop_res); - if (err == IA_CSS_SUCCESS) { - effective_res = crop_res; - } else { - /* in case of error fallback to default - * effective resolution from driver. */ - IA_CSS_LOG("aspect_ratio_crop() failed with err(%d)", err); - } - } -#endif -#endif - curr_pipe->config.input_effective_res = effective_res; - } - IA_CSS_LOG("effective_res=%dx%d", - effective_res.width, - effective_res.height); - } - -#ifdef ISP2401 - for (i = 0; i < num_pipes; i++) { - if (pipes[i]->config.mode != IA_CSS_PIPE_MODE_ACC && - pipes[i]->config.mode != IA_CSS_PIPE_MODE_COPY) { - err = check_pipe_resolutions(pipes[i]); - if (err != IA_CSS_SUCCESS) { - goto ERR; - } - } - } - -#endif - err = ia_css_stream_isp_parameters_init(curr_stream); - if (err != IA_CSS_SUCCESS) - goto ERR; - IA_CSS_LOG("isp_params_configs: %p", curr_stream->isp_params_configs); - - if (num_pipes == 1 && pipes[0]->config.mode == IA_CSS_PIPE_MODE_ACC) { - *stream = curr_stream; - err = ia_css_acc_stream_create(curr_stream); - goto ERR; - } - /* sensor binning */ - if (!spcopyonly){ - sensor_binning_changed = - sh_css_params_set_binning_factor(curr_stream, curr_stream->config.sensor_binning_factor); - } else { - sensor_binning_changed = false; - } - - IA_CSS_LOG("sensor_binning=%d, changed=%d", - curr_stream->config.sensor_binning_factor, sensor_binning_changed); - /* loop over pipes */ - IA_CSS_LOG("num_pipes=%d", num_pipes); - curr_stream->cont_capt = false; - /* Temporary hack: we give the preview pipe a reference to the capture - * pipe in continuous capture mode. */ - if (curr_stream->config.continuous) { - /* Search for the preview pipe and create the copy pipe */ - struct ia_css_pipe *preview_pipe; - struct ia_css_pipe *video_pipe; - struct ia_css_pipe *acc_pipe; - struct ia_css_pipe *capture_pipe = NULL; - struct ia_css_pipe *copy_pipe = NULL; - - if (num_pipes >= 2) { - curr_stream->cont_capt = true; - curr_stream->disable_cont_vf = curr_stream->config.disable_cont_viewfinder; -#ifndef ISP2401 - curr_stream->stop_copy_preview = my_css.stop_copy_preview; -#endif - } - - /* Create copy pipe here, since it may not be exposed to the driver */ - preview_pipe = find_pipe(pipes, num_pipes, - IA_CSS_PIPE_MODE_PREVIEW, false); - video_pipe = find_pipe(pipes, num_pipes, - IA_CSS_PIPE_MODE_VIDEO, false); - acc_pipe = find_pipe(pipes, num_pipes, - IA_CSS_PIPE_MODE_ACC, false); - if (acc_pipe && num_pipes == 2 && curr_stream->cont_capt == true) - curr_stream->cont_capt = false; /* preview + QoS case will not need cont_capt switch */ - if (curr_stream->cont_capt == true) { - capture_pipe = find_pipe(pipes, num_pipes, - IA_CSS_PIPE_MODE_CAPTURE, false); - if (capture_pipe == NULL) { - err = IA_CSS_ERR_INTERNAL_ERROR; - goto ERR; - } - } - /* We do not support preview and video pipe at the same time */ - if (preview_pipe && video_pipe) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - goto ERR; - } - - if (preview_pipe && !preview_pipe->pipe_settings.preview.copy_pipe) { - err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, ©_pipe, true); - if (err != IA_CSS_SUCCESS) - goto ERR; - ia_css_pipe_config_defaults(©_pipe->config); - preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe; - copy_pipe->stream = curr_stream; - } - if (preview_pipe && (curr_stream->cont_capt == true)) { - preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe; - } - if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) { - err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, ©_pipe, true); - if (err != IA_CSS_SUCCESS) - goto ERR; - ia_css_pipe_config_defaults(©_pipe->config); - video_pipe->pipe_settings.video.copy_pipe = copy_pipe; - copy_pipe->stream = curr_stream; - } - if (video_pipe && (curr_stream->cont_capt == true)) { - video_pipe->pipe_settings.video.capture_pipe = capture_pipe; - } - if (preview_pipe && acc_pipe) { - preview_pipe->pipe_settings.preview.acc_pipe = acc_pipe; - } - } - for (i = 0; i < num_pipes; i++) { - curr_pipe = pipes[i]; - /* set current stream */ - curr_pipe->stream = curr_stream; -#ifndef ISP2401 - /* take over effective info */ - - effective_res = curr_pipe->config.input_effective_res; - err = ia_css_util_check_res( - effective_res.width, - effective_res.height); - if (err != IA_CSS_SUCCESS) - goto ERR; -#endif - /* sensor binning per pipe */ - if (sensor_binning_changed) - sh_css_pipe_free_shading_table(curr_pipe); - } - - /* now pipes have been configured, info should be available */ - for (i = 0; i < num_pipes; i++) { - struct ia_css_pipe_info *pipe_info = NULL; - curr_pipe = pipes[i]; - - err = sh_css_pipe_load_binaries(curr_pipe); - if (err != IA_CSS_SUCCESS) - goto ERR; - - /* handle each pipe */ - pipe_info = &curr_pipe->info; - for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) { - err = sh_css_pipe_get_output_frame_info(curr_pipe, - &pipe_info->output_info[j], j); - if (err != IA_CSS_SUCCESS) - goto ERR; - } -#ifdef ISP2401 - pipe_info->output_system_in_res_info = curr_pipe->config.output_system_in_res; -#endif - if (!spcopyonly){ - err = sh_css_pipe_get_shading_info(curr_pipe, -#ifndef ISP2401 - &pipe_info->shading_info); -#else - &pipe_info->shading_info, &curr_pipe->config); -#endif - if (err != IA_CSS_SUCCESS) - goto ERR; - err = sh_css_pipe_get_grid_info(curr_pipe, - &pipe_info->grid_info); - if (err != IA_CSS_SUCCESS) - goto ERR; - for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) { - sh_css_pipe_get_viewfinder_frame_info(curr_pipe, - &pipe_info->vf_output_info[j], j); - if (err != IA_CSS_SUCCESS) - goto ERR; - } - } - - my_css.active_pipes[ia_css_pipe_get_pipe_num(curr_pipe)] = curr_pipe; - } - - curr_stream->started = false; - - /* Map SP threads before doing anything. */ - err = map_sp_threads(curr_stream, true); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LOG("map_sp_threads: return_err=%d", err); - goto ERR; - } - - for (i = 0; i < num_pipes; i++) { - curr_pipe = pipes[i]; - ia_css_pipe_map_queue(curr_pipe, true); - } - - /* Create host side pipeline objects without stages */ - err = create_host_pipeline_structure(curr_stream); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LOG("create_host_pipeline_structure: return_err=%d", err); - goto ERR; - } - - /* assign curr_stream */ - *stream = curr_stream; - -ERR: -#ifndef ISP2401 - if (err == IA_CSS_SUCCESS) - { - /* working mode: enter into the seed list */ - if (my_css_save.mode == sh_css_mode_working) { - for (i = 0; i < MAX_ACTIVE_STREAMS; i++) - if (!my_css_save.stream_seeds[i].stream) { - IA_CSS_LOG("entered stream into loc=%d", i); - my_css_save.stream_seeds[i].orig_stream = stream; - my_css_save.stream_seeds[i].stream = curr_stream; - my_css_save.stream_seeds[i].num_pipes = num_pipes; - my_css_save.stream_seeds[i].stream_config = *stream_config; - for (j = 0; j < num_pipes; j++) { - my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config; - my_css_save.stream_seeds[i].pipes[j] = pipes[j]; - my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j]; - } - break; - } - } -#else - if (err == IA_CSS_SUCCESS) { - err = ia_css_save_stream(curr_stream); -#endif - } else { - ia_css_stream_destroy(curr_stream); - } -#ifndef ISP2401 - IA_CSS_LEAVE("return_err=%d mode=%d", err, my_css_save.mode); -#else - IA_CSS_LEAVE("return_err=%d", err); -#endif - return err; -} - -enum ia_css_err -ia_css_stream_destroy(struct ia_css_stream *stream) -{ - int i; - enum ia_css_err err = IA_CSS_SUCCESS; -#ifdef ISP2401 - enum ia_css_err err1 = IA_CSS_SUCCESS; - enum ia_css_err err2 = IA_CSS_SUCCESS; -#endif - - IA_CSS_ENTER_PRIVATE("stream = %p", stream); - if (stream == NULL) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - ia_css_stream_isp_parameters_uninit(stream); - - if ((stream->last_pipe != NULL) && - ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) { -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *entry = stream->pipes[i]; - unsigned int sp_thread_id; - struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal; - - assert(entry != NULL); - if (entry != NULL) { - /* get the SP thread id */ - if (ia_css_pipeline_get_sp_thread_id( - ia_css_pipe_get_pipe_num(entry), &sp_thread_id) != true) - return IA_CSS_ERR_INTERNAL_ERROR; - /* get the target input terminal */ - sp_pipeline_input_terminal = - &(sh_css_sp_group.pipe_io[sp_thread_id].input); - - for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) { - ia_css_isys_stream_h isys_stream = - &(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]); - if (stream->config.isys_config[i].valid && isys_stream->valid) - ia_css_isys_stream_destroy(isys_stream); - } - } - } -#ifndef ISP2401 - if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { -#else - if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR || - stream->config.mode == IA_CSS_INPUT_MODE_TPG || - stream->config.mode == IA_CSS_INPUT_MODE_PRBS) { -#endif - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *entry = stream->pipes[i]; - /* free any mipi frames that are remaining: - * some test stream create-destroy cycles do not generate output frames - * and the mipi buffer is not freed in the deque function - */ - if (entry != NULL) - free_mipi_frames(entry); - } - } - stream_unregister_with_csi_rx(stream); -#endif - - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *curr_pipe = stream->pipes[i]; - assert(curr_pipe != NULL); - ia_css_pipe_map_queue(curr_pipe, false); - } - - err = map_sp_threads(stream, false); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - - /* remove references from pipes to stream */ - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *entry = stream->pipes[i]; - assert(entry != NULL); - if (entry != NULL) { - /* clear reference to stream */ - entry->stream = NULL; - /* check internal copy pipe */ - if (entry->mode == IA_CSS_PIPE_ID_PREVIEW && - entry->pipe_settings.preview.copy_pipe) { - IA_CSS_LOG("clearing stream on internal preview copy pipe"); - entry->pipe_settings.preview.copy_pipe->stream = NULL; - } - if (entry->mode == IA_CSS_PIPE_ID_VIDEO && - entry->pipe_settings.video.copy_pipe) { - IA_CSS_LOG("clearing stream on internal video copy pipe"); - entry->pipe_settings.video.copy_pipe->stream = NULL; - } - err = sh_css_pipe_unload_binaries(entry); - } - } - /* free associated memory of stream struct */ - kfree(stream->pipes); - stream->pipes = NULL; - stream->num_pipes = 0; -#ifndef ISP2401 - /* working mode: take out of the seed list */ - if (my_css_save.mode == sh_css_mode_working) - for(i=0;iinfo; - return IA_CSS_SUCCESS; -} - -/* - * Rebuild a stream, including allocating structs, setting configuration and - * building the required pipes. - * The data is taken from the css_save struct updated upon stream creation. - * The stream handle is used to identify the correct entry in the css_save struct - */ -enum ia_css_err -ia_css_stream_load(struct ia_css_stream *stream) -{ -#ifndef ISP2401 - int i; - enum ia_css_err err; - assert(stream != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter, \n"); - for (i = 0; i < MAX_ACTIVE_STREAMS; i++) { - if (my_css_save.stream_seeds[i].stream == stream) { - int j; - for ( j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) { - if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS) { - if (j) { - int k; - for(k=0;klast_pipe == NULL)) { - IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - IA_CSS_LOG("starting %d", stream->last_pipe->mode); - - sh_css_sp_set_disable_continuous_viewfinder(stream->disable_cont_vf); - - /* Create host side pipeline. */ - err = create_host_pipeline(stream); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR(err); - return err; - } - -#if !defined(HAS_NO_INPUT_SYSTEM) -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - if((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) || - (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)) - stream_register_with_csi_rx(stream); -#endif -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - /* Initialize mipi size checks */ - if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) - { - unsigned int idx; - unsigned int port = (unsigned int) (stream->config.source.port.port) ; - - for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) { - sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = sh_css_get_mipi_sizes_for_check(port, idx); - } - } -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) - if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) { - err = sh_css_config_input_network(stream); - if (err != IA_CSS_SUCCESS) - return err; - } -#endif /* !HAS_NO_INPUT_SYSTEM */ - - err = sh_css_pipe_start(stream); - IA_CSS_LEAVE_ERR(err); - return err; -} - -enum ia_css_err -ia_css_stream_stop(struct ia_css_stream *stream) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop() enter/exit\n"); - assert(stream != NULL); - assert(stream->last_pipe != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n", - stream->last_pipe->mode); - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - /* De-initialize mipi size checks */ - if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) - { - unsigned int idx; - unsigned int port = (unsigned int) (stream->config.source.port.port) ; - - for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) { - sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0; - } - } -#endif -#ifndef ISP2401 - err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline); -#else - - err = sh_css_pipes_stop(stream); -#endif - if (err != IA_CSS_SUCCESS) - return err; - - /* Ideally, unmapping should happen after pipeline_stop, but current - * semantics do not allow that. */ - /* err = map_sp_threads(stream, false); */ - - return err; -} - -bool -ia_css_stream_has_stopped(struct ia_css_stream *stream) -{ - bool stopped; - assert(stream != NULL); - -#ifndef ISP2401 - stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline); -#else - stopped = sh_css_pipes_have_stopped(stream); -#endif - - return stopped; -} - -#ifndef ISP2401 -/* - * Destroy the stream and all the pipes related to it. - * The stream handle is used to identify the correct entry in the css_save struct - */ -enum ia_css_err -ia_css_stream_unload(struct ia_css_stream *stream) -{ - int i; - assert(stream != NULL); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() enter, \n"); - /* some checks */ - assert (stream != NULL); - for(i=0;imode; - else - *pipe_id = IA_CSS_PIPE_ID_COPY; - - return IA_CSS_SUCCESS; -} - -enum atomisp_input_format -ia_css_stream_get_format(const struct ia_css_stream *stream) -{ - return stream->config.input_config.format; -} - -bool -ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream) -{ - return (stream->config.pixels_per_clock == 2); -} - -struct ia_css_binary * -ia_css_stream_get_shading_correction_binary(const struct ia_css_stream *stream) -{ - struct ia_css_pipe *pipe; - - assert(stream != NULL); - - pipe = stream->pipes[0]; - - if (stream->num_pipes == 2) { - assert(stream->pipes[1] != NULL); - if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO || - stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW) - pipe = stream->pipes[1]; - } - - return ia_css_pipe_get_shading_correction_binary(pipe); -} - -struct ia_css_binary * -ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream) -{ - int i; - struct ia_css_pipe *video_pipe = NULL; - - /* First we find the video pipe */ - for (i=0; inum_pipes; i++) { - struct ia_css_pipe *pipe = stream->pipes[i]; - if (pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) { - video_pipe = pipe; - break; - } - } - if (video_pipe) - return &video_pipe->pipe_settings.video.video_binary; - return NULL; -} - -struct ia_css_binary * -ia_css_stream_get_3a_binary(const struct ia_css_stream *stream) -{ - struct ia_css_pipe *pipe; - struct ia_css_binary *s3a_binary = NULL; - - assert(stream != NULL); - - pipe = stream->pipes[0]; - - if (stream->num_pipes == 2) { - assert(stream->pipes[1] != NULL); - if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO || - stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW) - pipe = stream->pipes[1]; - } - - s3a_binary = ia_css_pipe_get_s3a_binary(pipe); - - return s3a_binary; -} - - -enum ia_css_err -ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width) -{ - struct ia_css_pipe *pipe; - - assert(stream != NULL); - - pipe = stream->last_pipe; - - assert(pipe != NULL); - - /* set the config also just in case (redundant info? why do we save config in pipe?) */ - pipe->config.output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width; - pipe->output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width; - - return IA_CSS_SUCCESS; -} - -static struct ia_css_binary * -ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe) -{ - struct ia_css_binary *binary = NULL; - - assert(pipe != NULL); - - switch (pipe->config.mode) { - case IA_CSS_PIPE_MODE_PREVIEW: - binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary; - break; - case IA_CSS_PIPE_MODE_VIDEO: - binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary; - break; - case IA_CSS_PIPE_MODE_CAPTURE: - if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) { - unsigned int i; - - for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) { - if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.sc) { - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i]; - break; - } - } - } - else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary; - else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) { - if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1) - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary; - else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary; - } - break; - default: - break; - } - - if (binary && binary->info->sp.enable.sc) - return binary; - - return NULL; -} - -static struct ia_css_binary * -ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe) -{ - struct ia_css_binary *binary = NULL; - - assert(pipe != NULL); - - switch (pipe->config.mode) { - case IA_CSS_PIPE_MODE_PREVIEW: - binary = (struct ia_css_binary*)&pipe->pipe_settings.preview.preview_binary; - break; - case IA_CSS_PIPE_MODE_VIDEO: - binary = (struct ia_css_binary*)&pipe->pipe_settings.video.video_binary; - break; - case IA_CSS_PIPE_MODE_CAPTURE: - if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) { - unsigned int i; - for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) { - if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) { - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i]; - break; - } - } - } - else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary; - else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) { - if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1) - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary; - else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) - binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary; - else - assert(0); - } - break; - default: - break; - } - - if (binary && !binary->info->sp.enable.s3a) - binary = NULL; - - return binary; -} - -static struct ia_css_binary * -ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe) -{ - struct ia_css_binary *binary = NULL; - - assert(pipe != NULL); - - switch (pipe->config.mode) { - case IA_CSS_PIPE_MODE_VIDEO: - binary = (struct ia_css_binary*)&pipe->pipe_settings.video.video_binary; - break; - default: - break; - } - - if (binary && !binary->info->sp.enable.dis) - binary = NULL; - - return binary; -} - -struct ia_css_pipeline * -ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe) -{ - assert(pipe != NULL); - - return (struct ia_css_pipeline*)&pipe->pipeline; -} - -unsigned int -ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe) -{ - assert(pipe != NULL); - - /* KW was not sure this function was not returning a value - that was out of range; so added an assert, and, for the - case when asserts are not enabled, clip to the largest - value; pipe_num is unsigned so the value cannot be too small - */ - assert(pipe->pipe_num < IA_CSS_PIPELINE_NUM_MAX); - - if (pipe->pipe_num >= IA_CSS_PIPELINE_NUM_MAX) - return (IA_CSS_PIPELINE_NUM_MAX - 1); - - return pipe->pipe_num; -} - - -unsigned int -ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe) -{ - assert(pipe != NULL); - - return (unsigned int)pipe->config.isp_pipe_version; -} - -#define SP_START_TIMEOUT_US 30000000 - -enum ia_css_err -ia_css_start_sp(void) -{ - unsigned long timeout; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER(""); - sh_css_sp_start_isp(); - - /* waiting for the SP is completely started */ - timeout = SP_START_TIMEOUT_US; - while((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout) { - timeout--; - hrt_sleep(); - } - if (timeout == 0) { - IA_CSS_ERROR("timeout during SP initialization"); - return IA_CSS_ERR_INTERNAL_ERROR; - } - - /* Workaround, in order to run two streams in parallel. See TASK 4271*/ - /* TODO: Fix this. */ - - sh_css_init_host_sp_control_vars(); - - /* buffers should be initialized only when sp is started */ - /* AM: At the moment it will be done only when there is no stream active. */ - - sh_css_setup_queues(); - ia_css_bufq_dump_queue_info(); - -#ifdef ISP2401 - if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */ - ia_css_set_system_mode(IA_CSS_SYS_MODE_WORKING); - } -#endif - IA_CSS_LEAVE_ERR(err); - return err; -} - -/* - * Time to wait SP for termincate. Only condition when this can happen - * is a fatal hw failure, but we must be able to detect this and emit - * a proper error trace. - */ -#define SP_SHUTDOWN_TIMEOUT_US 200000 - -enum ia_css_err -ia_css_stop_sp(void) -{ - unsigned long timeout; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER("void"); - - if (!sh_css_sp_is_running()) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE("SP already stopped : return_err=%d", err); - - /* Return an error - stop SP should not have been called by driver */ - return err; - } - - /* For now, stop whole SP */ -#ifndef ISP2401 - sh_css_write_host2sp_command(host2sp_cmd_terminate); -#else - if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) { - IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed"); - ia_css_debug_dump_sp_sw_debug_info(); - ia_css_debug_dump_debug_info(NULL); - } -#endif - sh_css_sp_set_sp_running(false); - - timeout = SP_SHUTDOWN_TIMEOUT_US; - while (!ia_css_spctrl_is_idle(SP0_ID) && timeout) { - timeout--; - hrt_sleep(); - } - if ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_TERMINATED)) - IA_CSS_WARNING("SP has not terminated (SW)"); - - if (timeout == 0) { - IA_CSS_WARNING("SP is not idle"); - ia_css_debug_dump_sp_sw_debug_info(); - } - timeout = SP_SHUTDOWN_TIMEOUT_US; - while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout) { - timeout--; - hrt_sleep(); - } - if (timeout == 0) { - IA_CSS_WARNING("ISP is not idle"); - ia_css_debug_dump_sp_sw_debug_info(); - } - - sh_css_hmm_buffer_record_uninit(); - -#ifndef ISP2401 - /* clear pending param sets from refcount */ - sh_css_param_clear_param_sets(); -#else - if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */ - /* clear pending param sets from refcount */ - sh_css_param_clear_param_sets(); - ia_css_set_system_mode(IA_CSS_SYS_MODE_INIT); /* System is initialized but not 'running' */ - } -#endif - - IA_CSS_LEAVE_ERR(err); - return err; -} - -enum ia_css_err -ia_css_update_continuous_frames(struct ia_css_stream *stream) -{ - struct ia_css_pipe *pipe; - unsigned int i; - - ia_css_debug_dtrace( - IA_CSS_DEBUG_TRACE, - "sh_css_update_continuous_frames() enter:\n"); - - if (stream == NULL) { - ia_css_debug_dtrace( - IA_CSS_DEBUG_TRACE, - "sh_css_update_continuous_frames() leave: invalid stream, return_void\n"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - pipe = stream->continuous_pipe; - - for (i = stream->config.init_num_cont_raw_buf; - i < stream->config.target_num_cont_raw_buf; i++) { - sh_css_update_host2sp_offline_frame(i, - pipe->continuous_frames[i], pipe->cont_md_buffers[i]); - } - sh_css_update_host2sp_cont_num_raw_frames - (stream->config.target_num_cont_raw_buf, true); - ia_css_debug_dtrace( - IA_CSS_DEBUG_TRACE, - "sh_css_update_continuous_frames() leave: return_void\n"); - - return IA_CSS_SUCCESS; -} - -void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map) -{ - unsigned int thread_id; - enum ia_css_pipe_id pipe_id; - unsigned int pipe_num; - bool need_input_queue; - - IA_CSS_ENTER(""); - assert(pipe != NULL); - - pipe_id = pipe->mode; - pipe_num = pipe->pipe_num; - - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - -#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2401) - need_input_queue = true; -#else - need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY; -#endif - - /* map required buffer queues to resources */ - /* TODO: to be improved */ - if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) { - if (need_input_queue) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map); -#if defined SH_CSS_ENABLE_METADATA - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map); -#endif - if (pipe->pipe_settings.preview.preview_binary.info && - pipe->pipe_settings.preview.preview_binary.info->sp.enable.s3a) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map); - } else if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE) { - unsigned int i; - - if (need_input_queue) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map); -#if defined SH_CSS_ENABLE_METADATA - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map); -#endif - if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) { - for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) { - if (pipe->pipe_settings.capture.primary_binary[i].info && - pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) { - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map); - break; - } - } - } else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT || - pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) { - if (pipe->pipe_settings.capture.pre_isp_binary.info && - pipe->pipe_settings.capture.pre_isp_binary.info->sp.enable.s3a) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map); - } - } else if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) { - if (need_input_queue) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map); - if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map); -#if defined SH_CSS_ENABLE_METADATA - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map); -#endif - if (pipe->pipe_settings.video.video_binary.info && - pipe->pipe_settings.video.video_binary.info->sp.enable.s3a) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map); - if (pipe->pipe_settings.video.video_binary.info && - (pipe->pipe_settings.video.video_binary.info->sp.enable.dis - )) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_DIS_STATISTICS, map); - } else if (pipe->mode == IA_CSS_PIPE_ID_COPY) { - if (need_input_queue) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map); - if (!pipe->stream->config.continuous) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map); -#if defined SH_CSS_ENABLE_METADATA - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map); -#endif - } else if (pipe->mode == IA_CSS_PIPE_ID_ACC) { - if (need_input_queue) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map); -#if defined SH_CSS_ENABLE_METADATA - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map); -#endif - } else if (pipe->mode == IA_CSS_PIPE_ID_YUVPP) { - unsigned int idx; - for (idx = 0; idx < IA_CSS_PIPE_MAX_OUTPUT_STAGE; idx++) { - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, map); - if (pipe->enable_viewfinder[idx]) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, map); - } - if (need_input_queue) - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map); - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map); -#if defined SH_CSS_ENABLE_METADATA - ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map); -#endif - } - IA_CSS_LEAVE(""); -} - -#if CONFIG_ON_FRAME_ENQUEUE() -static enum ia_css_err set_config_on_frame_enqueue(struct ia_css_frame_info *info, struct frame_data_wrapper *frame) -{ - frame->config_on_frame_enqueue.padded_width = 0; - - /* currently we support configuration on frame enqueue only on YUV formats */ - /* on other formats the padded_width is zeroed for no configuration override */ - switch (info->format) { - case IA_CSS_FRAME_FORMAT_YUV420: - case IA_CSS_FRAME_FORMAT_NV12: - if (info->padded_width > info->res.width) - { - frame->config_on_frame_enqueue.padded_width = info->padded_width; - } - else if ((info->padded_width < info->res.width) && (info->padded_width > 0)) - { - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - /* nothing to do if width == padded width or padded width is zeroed (the same) */ - break; - default: - break; - } - - return IA_CSS_SUCCESS; -} -#endif - -enum ia_css_err -ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id) -{ - enum ia_css_err ret; - - IA_CSS_ENTER(""); - - /* Only continuous streams have a tagger to which we can send the - * unlock message. */ - if (stream == NULL || !stream->config.continuous) { - IA_CSS_ERROR("invalid stream pointer"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if (exp_id > IA_CSS_ISYS_MAX_EXPOSURE_ID || - exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID) { - IA_CSS_ERROR("invalid expsure ID: %d\n", exp_id); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* Send the event. Since we verified that the exp_id is valid, - * we can safely assign it to an 8-bit argument here. */ - ret = ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, exp_id, 0, 0); - - IA_CSS_LEAVE_ERR(ret); - return ret; -} - -/* @brief Set the state (Enable or Disable) of the Extension stage in the - * given pipe. - */ -enum ia_css_err -ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool enable) -{ - unsigned int thread_id; - struct ia_css_pipeline_stage *stage; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER(""); - - /* Parameter Check */ - if (pipe == NULL || pipe->stream == NULL) { - IA_CSS_ERROR("Invalid Pipe."); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } else if (!(pipe->config.acc_extension)) { - IA_CSS_ERROR("Invalid Pipe(No Extension Firmware)"); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } else if (!sh_css_sp_is_running()) { - IA_CSS_ERROR("Leaving: queue unavailable."); - err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } else { - /* Query the threadid and stage_num for the Extension firmware*/ - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - err = ia_css_pipeline_get_stage_from_fw(&(pipe->pipeline), fw_handle, &stage); - if (err == IA_CSS_SUCCESS) { - /* Set the Extension State;. TODO: Add check for stage firmware.type (QOS)*/ - err = ia_css_bufq_enqueue_psys_event( - (uint8_t) IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE, - (uint8_t) thread_id, - (uint8_t) stage->stage_num, - enable ? 1 : 0); - if (err == IA_CSS_SUCCESS) { - if(enable) - SH_CSS_QOS_STAGE_ENABLE(&(sh_css_sp_group.pipe[thread_id]),stage->stage_num); - else - SH_CSS_QOS_STAGE_DISABLE(&(sh_css_sp_group.pipe[thread_id]),stage->stage_num); - } - } - } - IA_CSS_LEAVE("err:%d handle:%u enable:%d", err, fw_handle, enable); - return err; -} - -/* @brief Get the state (Enable or Disable) of the Extension stage in the - * given pipe. - */ -enum ia_css_err -ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool *enable) -{ - struct ia_css_pipeline_stage *stage; - unsigned int thread_id; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER(""); - - /* Parameter Check */ - if (pipe == NULL || pipe->stream == NULL) { - IA_CSS_ERROR("Invalid Pipe."); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } else if (!(pipe->config.acc_extension)) { - IA_CSS_ERROR("Invalid Pipe (No Extension Firmware)."); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } else if (!sh_css_sp_is_running()) { - IA_CSS_ERROR("Leaving: queue unavailable."); - err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } else { - /* Query the threadid and stage_num corresponding to the Extension firmware*/ - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage); - - if (err == IA_CSS_SUCCESS) { - /* Get the Extension State */ - *enable = (SH_CSS_QOS_STAGE_IS_ENABLED(&(sh_css_sp_group.pipe[thread_id]),stage->stage_num)) ? true : false; - } - } - IA_CSS_LEAVE("err:%d handle:%u enable:%d", err, fw_handle, *enable); - return err; -} - -#ifdef ISP2401 -enum ia_css_err -ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_handle, - struct ia_css_isp_param_css_segments *css_seg, struct ia_css_isp_param_isp_segments *isp_seg) -{ - unsigned int HIVE_ADDR_sp_group; - static struct sh_css_sp_group sp_group; - static struct sh_css_sp_stage sp_stage; - static struct sh_css_isp_stage isp_stage; - const struct ia_css_fw_info *fw; - unsigned int thread_id; - struct ia_css_pipeline_stage *stage; - enum ia_css_err err = IA_CSS_SUCCESS; - int stage_num = 0; - enum ia_css_isp_memories mem; - bool enabled; - - IA_CSS_ENTER(""); - - fw = &sh_css_sp_fw; - - /* Parameter Check */ - if (pipe == NULL || pipe->stream == NULL) { - IA_CSS_ERROR("Invalid Pipe."); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } else if (!(pipe->config.acc_extension)) { - IA_CSS_ERROR("Invalid Pipe (No Extension Firmware)."); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } else if (!sh_css_sp_is_running()) { - IA_CSS_ERROR("Leaving: queue unavailable."); - err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } else { - /* Query the thread_id and stage_num corresponding to the Extension firmware */ - ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id); - err = ia_css_pipeline_get_stage_from_fw(&(pipe->pipeline), fw_handle, &stage); - if (err == IA_CSS_SUCCESS) { - /* Get the Extension State */ - enabled = (SH_CSS_QOS_STAGE_IS_ENABLED(&(sh_css_sp_group.pipe[thread_id]), stage->stage_num)) ? true : false; - /* Update mapped arg only when extension stage is not enabled */ - if (enabled) { - IA_CSS_ERROR("Leaving: cannot update when stage is enabled."); - err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } else { - stage_num = stage->stage_num; - - HIVE_ADDR_sp_group = fw->info.sp.group; - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(sp_group), - &sp_group, sizeof(struct sh_css_sp_group)); - mmgr_load(sp_group.pipe[thread_id].sp_stage_addr[stage_num], - &sp_stage, sizeof(struct sh_css_sp_stage)); - - mmgr_load(sp_stage.isp_stage_addr, - &isp_stage, sizeof(struct sh_css_isp_stage)); - - for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) { - isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address = - css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address; - isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size = - css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size; - isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address = - isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address; - isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size = - isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size; - } - - mmgr_store(sp_stage.isp_stage_addr, - &isp_stage, sizeof(struct sh_css_isp_stage)); - } - } - } - IA_CSS_LEAVE("err:%d handle:%u", err, fw_handle); - return err; -} - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -static enum ia_css_err -aspect_ratio_crop_init(struct ia_css_stream *curr_stream, - struct ia_css_pipe *pipes[], - bool *do_crop_status) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - int i; - struct ia_css_pipe *curr_pipe; - uint32_t pipe_mask = 0; - - if ((curr_stream == NULL) || - (curr_stream->num_pipes == 0) || - (pipes == NULL) || - (do_crop_status == NULL)) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } - - for (i = 0; i < curr_stream->num_pipes; i++) { - curr_pipe = pipes[i]; - pipe_mask |= (1 << curr_pipe->config.mode); - } - - *do_crop_status = - (((pipe_mask & (1 << IA_CSS_PIPE_MODE_PREVIEW)) || - (pipe_mask & (1 << IA_CSS_PIPE_MODE_VIDEO))) && - (pipe_mask & (1 << IA_CSS_PIPE_MODE_CAPTURE)) && - curr_stream->config.continuous); - return IA_CSS_SUCCESS; -} - -static bool -aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe) -{ - bool status = false; - - if ((curr_pipe != NULL) && enabled) { - if ((curr_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) || - (curr_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) || - (curr_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE)) - status = true; - } - - return status; -} - -static enum ia_css_err -aspect_ratio_crop(struct ia_css_pipe *curr_pipe, - struct ia_css_resolution *effective_res) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_resolution crop_res; - struct ia_css_resolution *in_res = NULL; - struct ia_css_resolution *out_res = NULL; - bool use_bds_output_info = false; - bool use_vf_pp_in_res = false; - bool use_capt_pp_in_res = false; - - if ((curr_pipe == NULL) || - (effective_res == NULL)) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } - - if ((curr_pipe->config.mode != IA_CSS_PIPE_MODE_PREVIEW) && - (curr_pipe->config.mode != IA_CSS_PIPE_MODE_VIDEO) && - (curr_pipe->config.mode != IA_CSS_PIPE_MODE_CAPTURE)) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR(err); - return err; - } - - use_bds_output_info = - ((curr_pipe->bds_output_info.res.width != 0) && - (curr_pipe->bds_output_info.res.height != 0)); - - use_vf_pp_in_res = - ((curr_pipe->config.vf_pp_in_res.width != 0) && - (curr_pipe->config.vf_pp_in_res.height != 0)); - - use_capt_pp_in_res = - ((curr_pipe->config.capt_pp_in_res.width != 0) && - (curr_pipe->config.capt_pp_in_res.height != 0)); - - in_res = &curr_pipe->stream->config.input_config.effective_res; - out_res = &curr_pipe->output_info[0].res; - - switch (curr_pipe->config.mode) { - case IA_CSS_PIPE_MODE_PREVIEW: - if (use_bds_output_info) - out_res = &curr_pipe->bds_output_info.res; - else if (use_vf_pp_in_res) - out_res = &curr_pipe->config.vf_pp_in_res; - break; - case IA_CSS_PIPE_MODE_VIDEO: - if (use_bds_output_info) - out_res = &curr_pipe->bds_output_info.res; - break; - case IA_CSS_PIPE_MODE_CAPTURE: - if (use_capt_pp_in_res) - out_res = &curr_pipe->config.capt_pp_in_res; - break; - case IA_CSS_PIPE_MODE_ACC: - case IA_CSS_PIPE_MODE_COPY: - case IA_CSS_PIPE_MODE_YUVPP: - default: - IA_CSS_ERROR("aspect ratio cropping invalid args: mode[%d]\n", - curr_pipe->config.mode); - assert(0); - break; - } - - err = ia_css_frame_find_crop_resolution(in_res, out_res, &crop_res); - if (err == IA_CSS_SUCCESS) { - *effective_res = crop_res; - } else { - /* in case of error fallback to default - * effective resolution from driver. */ - IA_CSS_LOG("ia_css_frame_find_crop_resolution() failed with err(%d)", err); - } - return err; -} -#endif - -#endif -static void -sh_css_hmm_buffer_record_init(void) -{ - int i; - -#ifndef ISP2401 - for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) { - sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]); -#else - if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */ - for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) { - sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]); - } -#endif - } -} - -static void -sh_css_hmm_buffer_record_uninit(void) -{ - int i; - struct sh_css_hmm_buffer_record *buffer_record = NULL; - -#ifndef ISP2401 - buffer_record = &hmm_buffer_record[0]; - for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) { - if (buffer_record->in_use) { - if (buffer_record->h_vbuf != NULL) - ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf); - sh_css_hmm_buffer_record_reset(buffer_record); -#else - if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */ - buffer_record = &hmm_buffer_record[0]; - for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) { - if (buffer_record->in_use) { - if (buffer_record->h_vbuf != NULL) - ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf); - sh_css_hmm_buffer_record_reset(buffer_record); - } - buffer_record++; -#endif - } -#ifndef ISP2401 - buffer_record++; -#endif - } -} - -static void -sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record) -{ - assert(buffer_record != NULL); - buffer_record->in_use = false; - buffer_record->type = IA_CSS_BUFFER_TYPE_INVALID; - buffer_record->h_vbuf = NULL; - buffer_record->kernel_ptr = 0; -} - -static struct sh_css_hmm_buffer_record -*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf, - enum ia_css_buffer_type type, - hrt_address kernel_ptr) -{ - int i; - struct sh_css_hmm_buffer_record *buffer_record = NULL; - struct sh_css_hmm_buffer_record *out_buffer_record = NULL; - - assert(h_vbuf != NULL); - assert((type > IA_CSS_BUFFER_TYPE_INVALID) && (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE)); - assert(kernel_ptr != 0); - - buffer_record = &hmm_buffer_record[0]; - for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) { - if (!buffer_record->in_use) { - buffer_record->in_use = true; - buffer_record->type = type; - buffer_record->h_vbuf = h_vbuf; - buffer_record->kernel_ptr = kernel_ptr; - out_buffer_record = buffer_record; - break; - } - buffer_record++; - } - - return out_buffer_record; -} - -static struct sh_css_hmm_buffer_record -*sh_css_hmm_buffer_record_validate(hrt_vaddress ddr_buffer_addr, - enum ia_css_buffer_type type) -{ - int i; - struct sh_css_hmm_buffer_record *buffer_record = NULL; - bool found_record = false; - - buffer_record = &hmm_buffer_record[0]; - for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) { - if ((buffer_record->in_use) && - (buffer_record->type == type) && - (buffer_record->h_vbuf != NULL) && - (buffer_record->h_vbuf->vptr == ddr_buffer_addr)) { - found_record = true; - break; - } - buffer_record++; - } - - if (found_record) - return buffer_record; - else - return NULL; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h deleted file mode 100644 index 4072c564f911..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_DEFS_H_ -#define _SH_CSS_DEFS_H_ - -#include "isp.h" - -/*#include "vamem.h"*/ /* Cannot include for VAMEM properties this file is visible on ISP -> pipeline generator */ - -#include "math_support.h" /* max(), min, etc etc */ - -/* ID's for refcount */ -#define IA_CSS_REFCOUNT_PARAM_SET_POOL 0xCAFE0001 -#define IA_CSS_REFCOUNT_PARAM_BUFFER 0xCAFE0002 - -/* Digital Image Stabilization */ -#define SH_CSS_DIS_DECI_FACTOR_LOG2 6 - -/* UV offset: 1:uv=-128...127, 0:uv=0...255 */ -#define SH_CSS_UV_OFFSET_IS_0 0 - -/* Bits of bayer is adjusted as 13 in ISP */ -#define SH_CSS_BAYER_BITS 13 - -/* Max value of bayer data (unsigned 13bit in ISP) */ -#define SH_CSS_BAYER_MAXVAL ((1U << SH_CSS_BAYER_BITS) - 1) - -/* Bits of yuv in ISP */ -#define SH_CSS_ISP_YUV_BITS 8 - -#define SH_CSS_DP_GAIN_SHIFT 5 -#define SH_CSS_BNR_GAIN_SHIFT 13 -#define SH_CSS_YNR_GAIN_SHIFT 13 -#define SH_CSS_AE_YCOEF_SHIFT 13 -#define SH_CSS_AF_FIR_SHIFT 13 -#define SH_CSS_YEE_DETAIL_GAIN_SHIFT 8 /* [u5.8] */ -#define SH_CSS_YEE_SCALE_SHIFT 8 -#define SH_CSS_TNR_COEF_SHIFT 13 -#define SH_CSS_MACC_COEF_SHIFT 11 /* [s2.11] for ISP1 */ -#define SH_CSS_MACC2_COEF_SHIFT 13 /* [s[exp].[13-exp]] for ISP2 */ -#define SH_CSS_DIS_COEF_SHIFT 13 - -/* enumeration of the bayer downscale factors. When a binary supports multiple - * factors, the OR of these defines is used to build the mask of supported - * factors. The BDS factor is used in pre-processor expressions so we cannot - * use an enum here. */ -#define SH_CSS_BDS_FACTOR_1_00 (0) -#define SH_CSS_BDS_FACTOR_1_25 (1) -#define SH_CSS_BDS_FACTOR_1_50 (2) -#define SH_CSS_BDS_FACTOR_2_00 (3) -#define SH_CSS_BDS_FACTOR_2_25 (4) -#define SH_CSS_BDS_FACTOR_2_50 (5) -#define SH_CSS_BDS_FACTOR_3_00 (6) -#define SH_CSS_BDS_FACTOR_4_00 (7) -#define SH_CSS_BDS_FACTOR_4_50 (8) -#define SH_CSS_BDS_FACTOR_5_00 (9) -#define SH_CSS_BDS_FACTOR_6_00 (10) -#define SH_CSS_BDS_FACTOR_8_00 (11) -#define NUM_BDS_FACTORS (12) - -#define PACK_BDS_FACTOR(factor) (1<<(factor)) - -/* Following macros should match with the type enum ia_css_pipe_version in - * ia_css_pipe_public.h. The reason to add these macros is that enum type - * will be evaluted to 0 in preprocessing time. */ -#define SH_CSS_ISP_PIPE_VERSION_1 1 -#define SH_CSS_ISP_PIPE_VERSION_2_2 2 -#define SH_CSS_ISP_PIPE_VERSION_2_6_1 3 -#define SH_CSS_ISP_PIPE_VERSION_2_7 4 - -/*--------------- sRGB Gamma ----------------- -CCM : YCgCo[0,8191] -> RGB[0,4095] -sRGB Gamma : RGB [0,4095] -> RGB[0,8191] -CSC : RGB [0,8191] -> YUV[0,8191] - -CCM: -Y[0,8191],CgCo[-4096,4095],coef[-8192,8191] -> RGB[0,4095] - -sRGB Gamma: -RGB[0,4095] -(interpolation step16)-> RGB[0,255] -(LUT 12bit)-> RGB[0,4095] -> RGB[0,8191] - -CSC: -RGB[0,8191],coef[-8192,8191] -> RGB[0,8191] ---------------------------------------------*/ -/* Bits of input/output of sRGB Gamma */ -#define SH_CSS_RGB_GAMMA_INPUT_BITS 12 /* [0,4095] */ -#define SH_CSS_RGB_GAMMA_OUTPUT_BITS 13 /* [0,8191] */ - -/* Bits of fractional part of interpolation in vamem, [0,4095]->[0,255] */ -#define SH_CSS_RGB_GAMMA_FRAC_BITS \ - (SH_CSS_RGB_GAMMA_INPUT_BITS - SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE_LOG2) -#define SH_CSS_RGB_GAMMA_ONE (1 << SH_CSS_RGB_GAMMA_FRAC_BITS) - -/* Bits of input of CCM, = 13, Y[0,8191],CgCo[-4096,4095] */ -#define SH_CSS_YUV2RGB_CCM_INPUT_BITS SH_CSS_BAYER_BITS - -/* Bits of output of CCM, = 12, RGB[0,4095] */ -#define SH_CSS_YUV2RGB_CCM_OUTPUT_BITS SH_CSS_RGB_GAMMA_INPUT_BITS - -/* Maximum value of output of CCM */ -#define SH_CSS_YUV2RGB_CCM_MAX_OUTPUT \ - ((1 << SH_CSS_YUV2RGB_CCM_OUTPUT_BITS) - 1) - -#define SH_CSS_NUM_INPUT_BUF_LINES 4 - -/* Left cropping only applicable for sufficiently large nway */ -#if ISP_VEC_NELEMS == 16 -#define SH_CSS_MAX_LEFT_CROPPING 0 -#define SH_CSS_MAX_TOP_CROPPING 0 -#else -#define SH_CSS_MAX_LEFT_CROPPING 12 -#define SH_CSS_MAX_TOP_CROPPING 12 -#endif - -#define SH_CSS_SP_MAX_WIDTH 1280 - -/* This is the maximum grid we can handle in the ISP binaries. - * The host code makes sure no bigger grid is ever selected. */ -#define SH_CSS_MAX_BQ_GRID_WIDTH 80 -#define SH_CSS_MAX_BQ_GRID_HEIGHT 60 - -/* The minimum dvs envelope is 12x12(for IPU2) to make sure the - * invalid rows/columns that result from filter initialization are skipped. */ -#define SH_CSS_MIN_DVS_ENVELOPE 12U - -/* The FPGA system (vec_nelems == 16) only supports upto 5MP */ -#if ISP_VEC_NELEMS == 16 -#define SH_CSS_MAX_SENSOR_WIDTH 2560 -#define SH_CSS_MAX_SENSOR_HEIGHT 1920 -#else -#define SH_CSS_MAX_SENSOR_WIDTH 4608 -#define SH_CSS_MAX_SENSOR_HEIGHT 3450 -#endif - -/* Limited to reduce vmem pressure */ -#if ISP_VMEM_DEPTH >= 3072 -#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH SH_CSS_MAX_SENSOR_WIDTH -#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT SH_CSS_MAX_SENSOR_HEIGHT -#else -#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH 3264 -#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT 2448 -#endif -/* When using bayer decimation */ -/* -#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC 4224 -#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC 3168 -*/ -#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC SH_CSS_MAX_SENSOR_WIDTH -#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC SH_CSS_MAX_SENSOR_HEIGHT - -#define SH_CSS_MIN_SENSOR_WIDTH 2 -#define SH_CSS_MIN_SENSOR_HEIGHT 2 - -#if defined(IS_ISP_2400_SYSTEM) -/* MAX width and height set to the same to allow for rotated - * resolutions. */ -#define SH_CSS_MAX_VF_WIDTH 1920 -#define SH_CSS_MAX_VF_HEIGHT 1920 -#else -#define SH_CSS_MAX_VF_WIDTH 1280 -#define SH_CSS_MAX_VF_HEIGHT 960 -#endif -/* -#define SH_CSS_MAX_VF_WIDTH_DEC 1920 -#define SH_CSS_MAX_VF_HEIGHT_DEC 1080 -*/ -#define SH_CSS_MAX_VF_WIDTH_DEC SH_CSS_MAX_VF_WIDTH -#define SH_CSS_MAX_VF_HEIGHT_DEC SH_CSS_MAX_VF_HEIGHT - -/* We use 16 bits per coordinate component, including integer - and fractional bits */ -#define SH_CSS_MORPH_TABLE_GRID ISP_VEC_NELEMS -#define SH_CSS_MORPH_TABLE_ELEM_BYTES 2 -#define SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD \ - (HIVE_ISP_DDR_WORD_BYTES/SH_CSS_MORPH_TABLE_ELEM_BYTES) - -#ifndef ISP2401 -#define SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 1) -#define SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 1) -#else -/* TODO: I will move macros of "*_SCTBL_*" to SC kernel. - "+ 2" should be "+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT". (michie, Sep/23/2014) */ -#define SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 2) -#define SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 2) -#endif -#define SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \ - CEIL_MUL(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS) - -/* Each line of this table is aligned to the maximum line width. */ -#define SH_CSS_MAX_S3ATBL_WIDTH SH_CSS_MAX_BQ_GRID_WIDTH - -#ifndef ISP2401 -/* The video binary supports a delay of 1 or 2 */ -#define MAX_DVS_FRAME_DELAY 2 -/* We always need one additional frame because the video binary - * reads the previous and writes the current frame concurrently */ -#define MAX_NUM_VIDEO_DELAY_FRAMES (MAX_DVS_FRAME_DELAY + 1) -#define NUM_VIDEO_TNR_FRAMES 2 - -#define NUM_TNR_FRAMES 2 /* FIXME */ - - -#define MAX_NUM_DELAY_FRAMES MAX_NUM_VIDEO_DELAY_FRAMES - -#else -/* Video mode specific DVS define */ -/* The video binary supports a delay of 1 or 2 frames */ -#define VIDEO_FRAME_DELAY 2 -/* +1 because DVS reads the previous and writes the current frame concurrently */ -#define MAX_NUM_VIDEO_DELAY_FRAMES (VIDEO_FRAME_DELAY + 1) - -/* Preview mode specific DVS define. */ -/* In preview we only need GDC functionality (and not the DVS functionality) */ -/* The minimum number of DVS frames you need is 2, one were GDC reads from and another where GDC writes into */ -#define NUM_PREVIEW_DVS_FRAMES (2) - -/* TNR is no longer exclusive to video, SkyCam preview has TNR too (same kernel as video). - * All uses the generic define NUM_TNR_FRAMES. The define NUM_VIDEO_TNR_FRAMES has been deprecated. - * - * Notes - * 1) The value depends on the used TNR kernel and is not something that depends on the mode - * and it is not something you just could choice. - * 2) For the luma only pipeline a version that supports two different sets of TNR reference frames - * is being used. - *. - */ -#define NUM_VALID_TNR_REF_FRAMES (1) /* At least one valid TNR reference frame is required */ -#define NUM_TNR_FRAMES_PER_REF_BUF_SET (2) - -/* In luma-only mode alternate illuminated frames are supported, that requires two double buffers */ -#ifdef ENABLE_LUMA_ONLY -#define NUM_TNR_REF_BUF_SETS (2) -#else -#define NUM_TNR_REF_BUF_SETS (1) -#endif - -#define NUM_TNR_FRAMES (NUM_TNR_FRAMES_PER_REF_BUF_SET * NUM_TNR_REF_BUF_SETS) - -#define MAX_NUM_DELAY_FRAMES MAX(MAX_NUM_VIDEO_DELAY_FRAMES, NUM_PREVIEW_DVS_FRAMES) - -#endif - -/* Note that this is the define used to configure all data structures common for all modes */ -/* It should be equal or bigger to the max number of DVS frames for all possible modes */ -/* Rules: these implement logic shared between the host code and ISP firmware. - The ISP firmware needs these rules to be applied at pre-processor time, - that's why these are macros, not functions. */ -#define _ISP_BQS(num) ((num)/2) -#define _ISP_VECS(width) CEIL_DIV(width, ISP_VEC_NELEMS) - -#define ISP_BQ_GRID_WIDTH(elements_per_line, deci_factor_log2) \ - CEIL_SHIFT(elements_per_line/2, deci_factor_log2) -#define ISP_BQ_GRID_HEIGHT(lines_per_frame, deci_factor_log2) \ - CEIL_SHIFT(lines_per_frame/2, deci_factor_log2) -#define ISP_C_VECTORS_PER_LINE(elements_per_line) \ - _ISP_VECS(elements_per_line/2) - -/* The morphing table is similar to the shading table in the sense that we - have 1 more value than we have cells in the grid. */ -#define _ISP_MORPH_TABLE_WIDTH(int_width) \ - (CEIL_DIV(int_width, SH_CSS_MORPH_TABLE_GRID) + 1) -#define _ISP_MORPH_TABLE_HEIGHT(int_height) \ - (CEIL_DIV(int_height, SH_CSS_MORPH_TABLE_GRID) + 1) -#define _ISP_MORPH_TABLE_ALIGNED_WIDTH(width) \ - CEIL_MUL(_ISP_MORPH_TABLE_WIDTH(width), \ - SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD) - -#ifndef ISP2401 -#define _ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \ - (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + 1) -#define _ISP_SCTBL_HEIGHT(input_height, deci_factor_log2) \ - (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + 1) -#define _ISP_SCTBL_ALIGNED_WIDTH_PER_COLOR(input_width, deci_factor_log2) \ - CEIL_MUL(_ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2), \ - ISP_VEC_NELEMS) - -#endif -/* ***************************************************************** - * Statistics for 3A (Auto Focus, Auto White Balance, Auto Exposure) - * *****************************************************************/ -/* if left cropping is used, 3A statistics are also cropped by 2 vectors. */ -#define _ISP_S3ATBL_WIDTH(in_width, deci_factor_log2) \ - (_ISP_BQS(in_width) >> deci_factor_log2) -#define _ISP_S3ATBL_HEIGHT(in_height, deci_factor_log2) \ - (_ISP_BQS(in_height) >> deci_factor_log2) -#define _ISP_S3A_ELEMS_ISP_WIDTH(width, left_crop) \ - (width - ((left_crop) ? 2 * ISP_VEC_NELEMS : 0)) - -#define _ISP_S3ATBL_ISP_WIDTH(in_width, deci_factor_log2) \ - CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2) -#define _ISP_S3ATBL_ISP_HEIGHT(in_height, deci_factor_log2) \ - CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2) -#define ISP_S3ATBL_VECTORS \ - _ISP_VECS(SH_CSS_MAX_S3ATBL_WIDTH * \ - (sizeof(struct ia_css_3a_output)/sizeof(int32_t))) -#define ISP_S3ATBL_HI_LO_STRIDE \ - (ISP_S3ATBL_VECTORS * ISP_VEC_NELEMS) -#define ISP_S3ATBL_HI_LO_STRIDE_BYTES \ - (sizeof(unsigned short) * ISP_S3ATBL_HI_LO_STRIDE) - -/* Viewfinder support */ -#define __ISP_MAX_VF_OUTPUT_WIDTH(width, left_crop) \ - (width - 2*ISP_VEC_NELEMS + ((left_crop) ? 2 * ISP_VEC_NELEMS : 0)) - -#define __ISP_VF_OUTPUT_WIDTH_VECS(out_width, vf_log_downscale) \ - (_ISP_VECS((out_width) >> (vf_log_downscale))) - -#define _ISP_VF_OUTPUT_WIDTH(vf_out_vecs) ((vf_out_vecs) * ISP_VEC_NELEMS) -#define _ISP_VF_OUTPUT_HEIGHT(out_height, vf_log_ds) \ - ((out_height) >> (vf_log_ds)) - -#define _ISP_LOG_VECTOR_STEP(mode) \ - ((mode) == IA_CSS_BINARY_MODE_CAPTURE_PP ? 2 : 1) - -/* It is preferred to have not more than 2x scaling at one step - * in GDC (assumption is for capture_pp and yuv_scale stages) */ -#define MAX_PREFERRED_YUV_DS_PER_STEP 2 - -/* Rules for computing the internal width. This is extremely complicated - * and definitely needs to be commented and explained. */ -#define _ISP_LEFT_CROP_EXTRA(left_crop) ((left_crop) > 0 ? 2*ISP_VEC_NELEMS : 0) - -#define __ISP_MIN_INTERNAL_WIDTH(num_chunks, pipelining, mode) \ - ((num_chunks) * (pipelining) * (1<<_ISP_LOG_VECTOR_STEP(mode)) * \ - ISP_VEC_NELEMS) - -#define __ISP_PADDED_OUTPUT_WIDTH(out_width, dvs_env_width, left_crop) \ - ((out_width) + MAX(dvs_env_width, _ISP_LEFT_CROP_EXTRA(left_crop))) - -#define __ISP_CHUNK_STRIDE_ISP(mode) \ - ((1<<_ISP_LOG_VECTOR_STEP(mode)) * ISP_VEC_NELEMS) - -#define __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \ - ((c_subsampling) * (num_chunks) * HIVE_ISP_DDR_WORD_BYTES) -#define __ISP_INTERNAL_WIDTH(out_width, \ - dvs_env_width, \ - left_crop, \ - mode, \ - c_subsampling, \ - num_chunks, \ - pipelining) \ - CEIL_MUL2(CEIL_MUL2(MAX(__ISP_PADDED_OUTPUT_WIDTH(out_width, \ - dvs_env_width, \ - left_crop), \ - __ISP_MIN_INTERNAL_WIDTH(num_chunks, \ - pipelining, \ - mode) \ - ), \ - __ISP_CHUNK_STRIDE_ISP(mode) \ - ), \ - __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \ - ) - -#define __ISP_INTERNAL_HEIGHT(out_height, dvs_env_height, top_crop) \ - ((out_height) + (dvs_env_height) + top_crop) - -/* @GC: Input can be up to sensor resolution when either bayer downscaling - * or raw binning is enabled. - * Also, during continuous mode, we need to align to 4*NWAY since input - * should support binning */ -#define _ISP_MAX_INPUT_WIDTH(max_internal_width, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \ - enable_continuous) \ - ((enable_ds) ? \ - SH_CSS_MAX_SENSOR_WIDTH :\ - (enable_fixed_bayer_ds) ? \ - CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC, 4*ISP_VEC_NELEMS) : \ - (enable_raw_bin) ? \ - CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH, 4*ISP_VEC_NELEMS) : \ - (enable_continuous) ? \ - SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH \ - : max_internal_width) - -#define _ISP_INPUT_WIDTH(internal_width, ds_input_width, enable_ds) \ - ((enable_ds) ? (ds_input_width) : (internal_width)) - -#define _ISP_MAX_INPUT_HEIGHT(max_internal_height, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \ - enable_continuous) \ - ((enable_ds) ? \ - SH_CSS_MAX_SENSOR_HEIGHT :\ - (enable_fixed_bayer_ds) ? \ - SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC : \ - (enable_raw_bin || enable_continuous) ? \ - SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT \ - : max_internal_height) - -#define _ISP_INPUT_HEIGHT(internal_height, ds_input_height, enable_ds) \ - ((enable_ds) ? (ds_input_height) : (internal_height)) - -#define SH_CSS_MAX_STAGES 8 /* primary_stage[1-6], capture_pp, vf_pp */ - -/* For CSI2+ input system, it requires extra paddinga from vmem */ -#ifdef CONFIG_CSI2_PLUS -#define _ISP_EXTRA_PADDING_VECS 2 -#else -#define _ISP_EXTRA_PADDING_VECS 0 -#endif /* CONFIG_CSI2_PLUS */ - -#endif /* _SH_CSS_DEFS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h deleted file mode 100644 index 23044aad654f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h +++ /dev/null @@ -1,36 +0,0 @@ -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __SH_CSS_DVS_INFO_H__ -#define __SH_CSS_DVS_INFO_H__ - -#include - -/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */ -#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2)) - -/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */ -#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA)) - -/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently. - * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */ -#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE - -#define DVS_INPUT_BYTES_PER_PIXEL (1) - -#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X)) - -#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA)) - -#endif /* __SH_CSS_DVS_INFO_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c deleted file mode 100644 index 8158ea40d069..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include - -#include -#include "platform_support.h" -#include "sh_css_firmware.h" - -#include "sh_css_defs.h" -#include "ia_css_debug.h" -#include "sh_css_internal.h" -#include "ia_css_isp_param.h" - -#include "memory_access.h" -#include "assert_support.h" -#include "string_support.h" - -#include "isp.h" /* PMEM_WIDTH_LOG2 */ - -#include "ia_css_isp_params.h" -#include "ia_css_isp_configs.h" -#include "ia_css_isp_states.h" - -#define _STR(x) #x -#define STR(x) _STR(x) - -struct firmware_header { - struct sh_css_fw_bi_file_h file_header; - struct ia_css_fw_info binary_header; -}; - -struct fw_param { - const char *name; - const void *buffer; -}; - -/* Warning: same order as SH_CSS_BINARY_ID_* */ -static struct firmware_header *firmware_header; - -/* The string STR is a place holder - * which will be replaced with the actual RELEASE_VERSION - * during package generation. Please do not modify */ -#ifndef ISP2401 -static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458); -#else -static const char *release_version = STR(irci_ecr-master_20150911_0724); -#endif - -#define MAX_FW_REL_VER_NAME 300 -static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---"; - -struct ia_css_fw_info sh_css_sp_fw; -struct ia_css_blob_descr *sh_css_blob_info; /* Only ISP blob info (no SP) */ -unsigned sh_css_num_binaries; /* This includes 1 SP binary */ - -static struct fw_param *fw_minibuffer; - - -char *sh_css_get_fw_version(void) -{ - return FW_rel_ver_name; -} - - -/* - * Split the loaded firmware into blobs - */ - -/* Setup sp/sp1 binary */ -static enum ia_css_err -setup_binary(struct ia_css_fw_info *fw, const char *fw_data, struct ia_css_fw_info *sh_css_fw, unsigned binary_id) -{ - const char *blob_data; - - if ((fw == NULL) || (fw_data == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - blob_data = fw_data + fw->blob.offset; - - *sh_css_fw = *fw; - - sh_css_fw->blob.code = vmalloc(fw->blob.size); - if (sh_css_fw->blob.code == NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - memcpy((void *)sh_css_fw->blob.code, blob_data, fw->blob.size); - sh_css_fw->blob.data = (char *)sh_css_fw->blob.code + fw->blob.data_source; - fw_minibuffer[binary_id].buffer = sh_css_fw->blob.code; - - return IA_CSS_SUCCESS; -} -enum ia_css_err -sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia_css_blob_descr *bd, unsigned index) -{ - const char *name; - const unsigned char *blob; - - if ((fw == NULL) || (bd == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - /* Special case: only one binary in fw */ - if (bi == NULL) bi = (const struct ia_css_fw_info *)fw; - - name = fw + bi->blob.prog_name_offset; - blob = (const unsigned char *)fw + bi->blob.offset; - - /* sanity check */ - if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size + bi->blob.data_size + bi->blob.padding_size) { - /* sanity check, note the padding bytes added for section to DDR alignment */ - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - if ((bi->blob.offset % (1UL<<(ISP_PMEM_WIDTH_LOG2-3))) != 0) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - bd->blob = blob; - bd->header = *bi; - - if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware) { - char *namebuffer; - - namebuffer = kstrdup(name, GFP_KERNEL); - if (!namebuffer) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - bd->name = fw_minibuffer[index].name = namebuffer; - } else { - bd->name = name; - } - - if (bi->type == ia_css_isp_firmware) { - size_t paramstruct_size = sizeof(struct ia_css_memory_offsets); - size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets); - size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets); - - char *parambuf = kmalloc(paramstruct_size + configstruct_size + statestruct_size, - GFP_KERNEL); - if (!parambuf) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL; - bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = NULL; - bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = NULL; - - fw_minibuffer[index].buffer = parambuf; - - /* copy ia_css_memory_offsets */ - memcpy(parambuf, (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_PARAM]), - paramstruct_size); - bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = parambuf; - - /* copy ia_css_config_memory_offsets */ - memcpy(parambuf + paramstruct_size, - (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_CONFIG]), - configstruct_size); - bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = parambuf + paramstruct_size; - - /* copy ia_css_state_memory_offsets */ - memcpy(parambuf + paramstruct_size + configstruct_size, - (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_STATE]), - statestruct_size); - bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = parambuf + paramstruct_size + configstruct_size; - } - return IA_CSS_SUCCESS; -} - -bool -sh_css_check_firmware_version(const char *fw_data) -{ - struct sh_css_fw_bi_file_h *file_header; - - firmware_header = (struct firmware_header *)fw_data; - file_header = &firmware_header->file_header; - - if (strcmp(file_header->version, release_version) != 0) { - return false; - } else { - /* firmware version matches */ - return true; - } -} - -enum ia_css_err -sh_css_load_firmware(const char *fw_data, - unsigned int fw_size) -{ - unsigned i; - struct ia_css_fw_info *binaries; - struct sh_css_fw_bi_file_h *file_header; - bool valid_firmware = false; - - firmware_header = (struct firmware_header *)fw_data; - file_header = &firmware_header->file_header; - binaries = &firmware_header->binary_header; - strncpy(FW_rel_ver_name, file_header->version, min(sizeof(FW_rel_ver_name), sizeof(file_header->version)) - 1); - valid_firmware = sh_css_check_firmware_version(fw_data); - if (!valid_firmware) { -#if !defined(HRT_RTL) - IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!", - file_header->version, release_version); - return IA_CSS_ERR_VERSION_MISMATCH; -#endif - } else { - IA_CSS_LOG("successfully load firmware version %s", release_version); - } - - /* some sanity checks */ - if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h)) - return IA_CSS_ERR_INTERNAL_ERROR; - - if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h)) - return IA_CSS_ERR_INTERNAL_ERROR; - - sh_css_num_binaries = file_header->binary_nr; - /* Only allocate memory for ISP blob info */ - if (sh_css_num_binaries > NUM_OF_SPS) { - sh_css_blob_info = kmalloc( - (sh_css_num_binaries - NUM_OF_SPS) * - sizeof(*sh_css_blob_info), GFP_KERNEL); - if (!sh_css_blob_info) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } else { - sh_css_blob_info = NULL; - } - - fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param), - GFP_KERNEL); - if (!fw_minibuffer) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - for (i = 0; i < sh_css_num_binaries; i++) { - struct ia_css_fw_info *bi = &binaries[i]; - /* note: the var below is made static as it is quite large; - if it is not static it ends up on the stack which could - cause issues for drivers - */ - static struct ia_css_blob_descr bd; - enum ia_css_err err; - - err = sh_css_load_blob_info(fw_data, bi, &bd, i); - - if (err != IA_CSS_SUCCESS) - return IA_CSS_ERR_INTERNAL_ERROR; - - if (bi->blob.offset + bi->blob.size > fw_size) - return IA_CSS_ERR_INTERNAL_ERROR; - - if (bi->type == ia_css_sp_firmware) { - if (i != SP_FIRMWARE) - return IA_CSS_ERR_INTERNAL_ERROR; - err = setup_binary(bi, fw_data, &sh_css_sp_fw, i); - if (err != IA_CSS_SUCCESS) - return err; - } else { - /* All subsequent binaries (including bootloaders) (i>NUM_OF_SPS) are ISP firmware */ - if (i < NUM_OF_SPS) - return IA_CSS_ERR_INTERNAL_ERROR; - - if (bi->type != ia_css_isp_firmware) - return IA_CSS_ERR_INTERNAL_ERROR; - if (sh_css_blob_info == NULL) /* cannot happen but KW does not see this */ - return IA_CSS_ERR_INTERNAL_ERROR; - sh_css_blob_info[i - NUM_OF_SPS] = bd; - } - } - - return IA_CSS_SUCCESS; -} - -void sh_css_unload_firmware(void) -{ - - /* release firmware minibuffer */ - if (fw_minibuffer) { - unsigned int i = 0; - for (i = 0; i < sh_css_num_binaries; i++) { - if (fw_minibuffer[i].name) - kfree((void *)fw_minibuffer[i].name); - if (fw_minibuffer[i].buffer) - vfree((void *)fw_minibuffer[i].buffer); - } - kfree(fw_minibuffer); - fw_minibuffer = NULL; - } - - memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw)); - kfree(sh_css_blob_info); - sh_css_blob_info = NULL; - sh_css_num_binaries = 0; -} - -hrt_vaddress -sh_css_load_blob(const unsigned char *blob, unsigned size) -{ - hrt_vaddress target_addr = mmgr_malloc(size); - /* this will allocate memory aligned to a DDR word boundary which - is required for the CSS DMA to read the instructions. */ - - assert(blob != NULL); - if (target_addr) - mmgr_store(target_addr, blob, size); - return target_addr; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h deleted file mode 100644 index 588aabde8a86..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_FIRMWARE_H_ -#define _SH_CSS_FIRMWARE_H_ - -#include - -#include -#include - -/* This is for the firmware loaded from user space */ -struct sh_css_fw_bi_file_h { - char version[64]; /* branch tag + week day + time */ - int binary_nr; /* Number of binaries */ - unsigned int h_size; /* sizeof(struct sh_css_fw_bi_file_h) */ -}; - -extern struct ia_css_fw_info sh_css_sp_fw; -#if defined(HAS_BL) -extern struct ia_css_fw_info sh_css_bl_fw; -#endif /* HAS_BL */ -extern struct ia_css_blob_descr *sh_css_blob_info; -extern unsigned sh_css_num_binaries; - -char -*sh_css_get_fw_version(void); - -bool -sh_css_check_firmware_version(const char *fw_data); - -enum ia_css_err -sh_css_load_firmware(const char *fw_data, - unsigned int fw_size); - -void sh_css_unload_firmware(void); - -hrt_vaddress sh_css_load_blob(const unsigned char *blob, unsigned size); - -enum ia_css_err -sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia_css_blob_descr *bd, unsigned int i); - -#endif /* _SH_CSS_FIRMWARE_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h deleted file mode 100644 index 90a63b3921e6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SH_CSS_FRAC_H -#define __SH_CSS_FRAC_H - -#include - -#define sISP_REG_BIT ISP_VEC_ELEMBITS -#define uISP_REG_BIT ((unsigned)(sISP_REG_BIT-1)) -#define sSHIFT (16-sISP_REG_BIT) -#define uSHIFT ((unsigned)(16-uISP_REG_BIT)) -#define sFRACTION_BITS_FITTING(a) (a-sSHIFT) -#define uFRACTION_BITS_FITTING(a) ((unsigned)(a-uSHIFT)) -#define sISP_VAL_MIN (-(1<>sSHIFT) >> max(sFRACTION_BITS_FITTING(a)-(b), 0)), \ - sISP_VAL_MIN), sISP_VAL_MAX) -#define uDIGIT_FITTING(v, a, b) \ - min((unsigned)max((unsigned)(((v)>>uSHIFT) \ - >> max((int)(uFRACTION_BITS_FITTING(a)-(b)), 0)), \ - uISP_VAL_MIN), uISP_VAL_MAX) - -#endif /* __SH_CSS_FRAC_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c deleted file mode 100644 index 348183a221a8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include - -struct ia_css_host_data *ia_css_host_data_allocate(size_t size) -{ - struct ia_css_host_data *me; - - me = kmalloc(sizeof(struct ia_css_host_data), GFP_KERNEL); - if (!me) - return NULL; - me->size = (uint32_t)size; - me->address = sh_css_malloc(size); - if (!me->address) { - kfree(me); - return NULL; - } - return me; -} - -void ia_css_host_data_free(struct ia_css_host_data *me) -{ - if (me) { - sh_css_free(me->address); - me->address = NULL; - kfree(me); - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c deleted file mode 100644 index 716d808d56db..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "platform_support.h" - -#include "sh_css_hrt.h" -#include "ia_css_debug.h" - -#include "device_access.h" - -#define __INLINE_EVENT__ -#include "event_fifo.h" -#define __INLINE_SP__ -#include "sp.h" -#define __INLINE_ISP__ -#include "isp.h" -#define __INLINE_IRQ__ -#include "irq.h" -#define __INLINE_FIFO_MONITOR__ -#include "fifo_monitor.h" - -/* System independent */ -#include "sh_css_internal.h" - -bool sh_css_hrt_system_is_idle(void) -{ - bool not_idle = false, idle; - fifo_channel_t ch; - - idle = sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT); - not_idle |= !idle; - if (!idle) - IA_CSS_WARNING("SP not idle"); - - idle = isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT); - not_idle |= !idle; - if (!idle) - IA_CSS_WARNING("ISP not idle"); - - for (ch=0; ch -#include - -#include - -/* SP access */ -void sh_css_hrt_sp_start_si(void); - -void sh_css_hrt_sp_start_copy_frame(void); - -void sh_css_hrt_sp_start_isp(void); - -enum ia_css_err sh_css_hrt_sp_wait(void); - -bool sh_css_hrt_system_is_idle(void); - -#endif /* _SH_CSS_HRT_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h deleted file mode 100644 index 161122e1bcbc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h +++ /dev/null @@ -1,1089 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_INTERNAL_H_ -#define _SH_CSS_INTERNAL_H_ - -#include -#include -#include -#include -#include - -#if !defined(HAS_NO_INPUT_FORMATTER) -#include "input_formatter.h" -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "input_system.h" -#endif - -#include "ia_css_types.h" -#include "ia_css_acc_types.h" -#include "ia_css_buffer.h" - -#include "ia_css_binary.h" -#if !defined(__ISP) -#include "sh_css_firmware.h" /* not needed/desired on SP/ISP */ -#endif -#include "sh_css_legacy.h" -#include "sh_css_defs.h" -#include "sh_css_uds.h" -#include "dma.h" /* N_DMA_CHANNEL_ID */ -#include "ia_css_circbuf_comm.h" /* Circular buffer */ -#include "ia_css_frame_comm.h" -#include "ia_css_3a.h" -#include "ia_css_dvs.h" -#include "ia_css_metadata.h" -#include "runtime/bufq/interface/ia_css_bufq.h" -#include "ia_css_timer.h" - -/* TODO: Move to a more suitable place when sp pipeline design is done. */ -#define IA_CSS_NUM_CB_SEM_READ_RESOURCE 2 -#define IA_CSS_NUM_CB_SEM_WRITE_RESOURCE 1 -#define IA_CSS_NUM_CBS 2 -#define IA_CSS_CB_MAX_ELEMS 2 - -/* Use case specific. index limited to IA_CSS_NUM_CB_SEM_READ_RESOURCE or - * IA_CSS_NUM_CB_SEM_WRITE_RESOURCE for read and write respectively. - * TODO: Enforce the limitation above. -*/ -#define IA_CSS_COPYSINK_SEM_INDEX 0 -#define IA_CSS_TAGGER_SEM_INDEX 1 - -/* Force generation of output event. Used by acceleration pipe. */ -#define IA_CSS_POST_OUT_EVENT_FORCE 2 - -#define SH_CSS_MAX_BINARY_NAME 64 - -#define SP_DEBUG_NONE (0) -#define SP_DEBUG_DUMP (1) -#define SP_DEBUG_COPY (2) -#define SP_DEBUG_TRACE (3) -#define SP_DEBUG_MINIMAL (4) - -#define SP_DEBUG SP_DEBUG_NONE -#define SP_DEBUG_MINIMAL_OVERWRITE 1 - -#define SH_CSS_TNR_BIT_DEPTH 8 -#define SH_CSS_REF_BIT_DEPTH 8 - -/* keep next up to date with the definition for MAX_CB_ELEMS_FOR_TAGGER in tagger.sp.c */ -#if defined(HAS_SP_2400) -#define NUM_CONTINUOUS_FRAMES 15 -#else -#define NUM_CONTINUOUS_FRAMES 10 -#endif -#define NUM_MIPI_FRAMES_PER_STREAM 2 - -#define NUM_ONLINE_INIT_CONTINUOUS_FRAMES 2 - -#define NR_OF_PIPELINES IA_CSS_PIPE_ID_NUM /* Must match with IA_CSS_PIPE_ID_NUM */ - -#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/ -#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -#define SH_CSS_ENABLE_METADATA -#endif - -#if defined(SH_CSS_ENABLE_METADATA) && !defined(USE_INPUT_SYSTEM_VERSION_2401) -#define SH_CSS_ENABLE_METADATA_THREAD -#endif - - - /* - * SH_CSS_MAX_SP_THREADS: - * sp threads visible to host with connected communication queues - * these threads are capable of running an image pipe - * SH_CSS_MAX_SP_INTERNAL_THREADS: - * internal sp service threads, no communication queues to host - * these threads can't be used as image pipe - */ - -#if defined(SH_CSS_ENABLE_METADATA_THREAD) -#define SH_CSS_SP_INTERNAL_METADATA_THREAD 1 -#else -#define SH_CSS_SP_INTERNAL_METADATA_THREAD 0 -#endif - -#define SH_CSS_SP_INTERNAL_SERVICE_THREAD 1 - -#ifdef __DISABLE_UNUSED_THREAD__ - #define SH_CSS_MAX_SP_THREADS 0 -#else - #define SH_CSS_MAX_SP_THREADS 5 -#endif - -#define SH_CSS_MAX_SP_INTERNAL_THREADS (\ - SH_CSS_SP_INTERNAL_SERVICE_THREAD +\ - SH_CSS_SP_INTERNAL_METADATA_THREAD) - -#define SH_CSS_MAX_PIPELINES SH_CSS_MAX_SP_THREADS - -/** - * The C99 standard does not specify the exact object representation of structs; - * the representation is compiler dependent. - * - * The structs that are communicated between host and SP/ISP should have the - * exact same object representation. The compiler that is used to compile the - * firmware is hivecc. - * - * To check if a different compiler, used to compile a host application, uses - * another object representation, macros are defined specifying the size of - * the structs as expected by the firmware. - * - * A host application shall verify that a sizeof( ) of the struct is equal to - * the SIZE_OF_XXX macro of the corresponding struct. If they are not - * equal, functionality will break. - */ -#define CALC_ALIGNMENT_MEMBER(x, y) (CEIL_MUL(x, y) - x) -#define SIZE_OF_HRT_VADDRESS sizeof(hive_uint32) -#define SIZE_OF_IA_CSS_PTR sizeof(uint32_t) - -/* Number of SP's */ -#define NUM_OF_SPS 1 - -#define NUM_OF_BLS 0 - -/* Enum for order of Binaries */ -enum sh_css_order_binaries { - SP_FIRMWARE = 0, - ISP_FIRMWARE -}; - - /* - * JB: keep next enum in sync with thread id's - * and pipe id's - */ -enum sh_css_pipe_config_override { - SH_CSS_PIPE_CONFIG_OVRD_NONE = 0, - SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD = 0xffff -}; - -enum host2sp_commands { - host2sp_cmd_error = 0, - /* - * The host2sp_cmd_ready command is the only command written by the SP - * It acknowledges that is previous command has been received. - * (this does not mean that the command has been executed) - * It also indicates that a new command can be send (it is a queue - * with depth 1). - */ - host2sp_cmd_ready = 1, - /* Command written by the Host */ - host2sp_cmd_dummy, /* No action, can be used as watchdog */ - host2sp_cmd_start_flash, /* Request SP to start the flash */ - host2sp_cmd_terminate, /* SP should terminate itself */ - N_host2sp_cmd -}; - -/* Enumeration used to indicate the events that are produced by - * the SP and consumed by the Host. - * - * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC: - * 1) "enum ia_css_event_type" (ia_css_event_public.h) - * 2) "enum sh_css_sp_event_type" (sh_css_internal.h) - * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c) - * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c) - */ -enum sh_css_sp_event_type { - SH_CSS_SP_EVENT_OUTPUT_FRAME_DONE, - SH_CSS_SP_EVENT_SECOND_OUTPUT_FRAME_DONE, - SH_CSS_SP_EVENT_VF_OUTPUT_FRAME_DONE, - SH_CSS_SP_EVENT_SECOND_VF_OUTPUT_FRAME_DONE, - SH_CSS_SP_EVENT_3A_STATISTICS_DONE, - SH_CSS_SP_EVENT_DIS_STATISTICS_DONE, - SH_CSS_SP_EVENT_PIPELINE_DONE, - SH_CSS_SP_EVENT_FRAME_TAGGED, - SH_CSS_SP_EVENT_INPUT_FRAME_DONE, - SH_CSS_SP_EVENT_METADATA_DONE, - SH_CSS_SP_EVENT_LACE_STATISTICS_DONE, - SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE, - SH_CSS_SP_EVENT_TIMER, - SH_CSS_SP_EVENT_PORT_EOF, - SH_CSS_SP_EVENT_FW_WARNING, - SH_CSS_SP_EVENT_FW_ASSERT, - SH_CSS_SP_EVENT_NR_OF_TYPES /* must be last */ -}; - -/* xmem address map allocation per pipeline, css pointers */ -/* Note that the struct below should only consist of hrt_vaddress-es - Otherwise this will cause a fail in the function ref_sh_css_ddr_address_map - */ -struct sh_css_ddr_address_map { - hrt_vaddress isp_param; - hrt_vaddress isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES]; - hrt_vaddress macc_tbl; - hrt_vaddress fpn_tbl; - hrt_vaddress sc_tbl; - hrt_vaddress tetra_r_x; - hrt_vaddress tetra_r_y; - hrt_vaddress tetra_gr_x; - hrt_vaddress tetra_gr_y; - hrt_vaddress tetra_gb_x; - hrt_vaddress tetra_gb_y; - hrt_vaddress tetra_b_x; - hrt_vaddress tetra_b_y; - hrt_vaddress tetra_ratb_x; - hrt_vaddress tetra_ratb_y; - hrt_vaddress tetra_batr_x; - hrt_vaddress tetra_batr_y; - hrt_vaddress dvs_6axis_params_y; -}; -#define SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT \ - (SIZE_OF_HRT_VADDRESS + \ - (SH_CSS_MAX_STAGES * IA_CSS_NUM_MEMORIES * SIZE_OF_HRT_VADDRESS) + \ - (16 * SIZE_OF_HRT_VADDRESS)) - -/* xmem address map allocation per pipeline */ -struct sh_css_ddr_address_map_size { - size_t isp_param; - size_t isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES]; - size_t macc_tbl; - size_t fpn_tbl; - size_t sc_tbl; - size_t tetra_r_x; - size_t tetra_r_y; - size_t tetra_gr_x; - size_t tetra_gr_y; - size_t tetra_gb_x; - size_t tetra_gb_y; - size_t tetra_b_x; - size_t tetra_b_y; - size_t tetra_ratb_x; - size_t tetra_ratb_y; - size_t tetra_batr_x; - size_t tetra_batr_y; - size_t dvs_6axis_params_y; -}; - -struct sh_css_ddr_address_map_compound { - struct sh_css_ddr_address_map map; - struct sh_css_ddr_address_map_size size; -}; - -struct ia_css_isp_parameter_set_info { - struct sh_css_ddr_address_map mem_map;/** pointers to Parameters in ISP format IMPT: - This should be first member of this struct */ - uint32_t isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */ - ia_css_ptr output_frame_ptr;/** Output frame to which this config has to be applied (optional) */ -}; - -/* this struct contains all arguments that can be passed to - a binary. It depends on the binary which ones are used. */ -struct sh_css_binary_args { - struct ia_css_frame *in_frame; /* input frame */ - struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; /* reference input frame */ -#ifndef ISP2401 - struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES]; /* tnr frames */ -#else - struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; /* tnr frames */ -#endif - struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; /* output frame */ - struct ia_css_frame *out_vf_frame; /* viewfinder output frame */ - bool copy_vf; - bool copy_output; - unsigned vf_downscale_log2; -}; - -#if SP_DEBUG == SP_DEBUG_DUMP - -#define SH_CSS_NUM_SP_DEBUG 48 - -struct sh_css_sp_debug_state { - unsigned int error; - unsigned int debug[SH_CSS_NUM_SP_DEBUG]; -}; - -#elif SP_DEBUG == SP_DEBUG_COPY - -#define SH_CSS_SP_DBG_TRACE_DEPTH (40) - -struct sh_css_sp_debug_trace { - uint16_t frame; - uint16_t line; - uint16_t pixel_distance; - uint16_t mipi_used_dword; - uint16_t sp_index; -}; - -struct sh_css_sp_debug_state { - uint16_t if_start_line; - uint16_t if_start_column; - uint16_t if_cropped_height; - uint16_t if_cropped_width; - unsigned int index; - struct sh_css_sp_debug_trace - trace[SH_CSS_SP_DBG_TRACE_DEPTH]; -}; - -#elif SP_DEBUG == SP_DEBUG_TRACE - -#if 1 -/* Example of just one global trace */ -#define SH_CSS_SP_DBG_NR_OF_TRACES (1) -#define SH_CSS_SP_DBG_TRACE_DEPTH (40) -#else -/* E.g. if you like seperate traces for 4 threads */ -#define SH_CSS_SP_DBG_NR_OF_TRACES (4) -#define SH_CSS_SP_DBG_TRACE_DEPTH (10) -#endif - -#define SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS (13) - -struct sh_css_sp_debug_trace { - uint16_t time_stamp; - uint16_t location; /* bit 15..13 = file_id, 12..0 = line nr. */ - uint32_t data; -}; - -struct sh_css_sp_debug_state { - struct sh_css_sp_debug_trace - trace[SH_CSS_SP_DBG_NR_OF_TRACES][SH_CSS_SP_DBG_TRACE_DEPTH]; - uint16_t index_last[SH_CSS_SP_DBG_NR_OF_TRACES]; - uint8_t index[SH_CSS_SP_DBG_NR_OF_TRACES]; -}; - -#elif SP_DEBUG == SP_DEBUG_MINIMAL - -#define SH_CSS_NUM_SP_DEBUG 128 - -struct sh_css_sp_debug_state { - unsigned int error; - unsigned int debug[SH_CSS_NUM_SP_DEBUG]; -}; - -#endif - - -struct sh_css_sp_debug_command { - /* - * The DMA software-mask, - * Bit 31...24: unused. - * Bit 23...16: unused. - * Bit 15...08: reading-request enabling bits for DMA channel 7..0 - * Bit 07...00: writing-reqeust enabling bits for DMA channel 7..0 - * - * For example, "0...0 0...0 11111011 11111101" indicates that the - * writing request through DMA Channel 1 and the reading request - * through DMA channel 2 are both disabled. The others are enabled. - */ - uint32_t dma_sw_reg; -}; - -#if !defined(HAS_NO_INPUT_FORMATTER) -/* SP input formatter configuration.*/ -struct sh_css_sp_input_formatter_set { - uint32_t stream_format; - input_formatter_cfg_t config_a; - input_formatter_cfg_t config_b; -}; -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) -#define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3) -#endif - -/* SP configuration information */ -struct sh_css_sp_config { - uint8_t no_isp_sync; /* Signal host immediately after start */ - uint8_t enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */ - uint8_t lock_all; - /** If raw buffer locking is enabled, this flag indicates whether raw - frames are locked when their EOF event is successfully sent to the - host (true) or when they are passed to the preview/video pipe - (false). */ -#if !defined(HAS_NO_INPUT_FORMATTER) - struct { - uint8_t a_changed; - uint8_t b_changed; - uint8_t isp_2ppc; - struct sh_css_sp_input_formatter_set set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */ - } input_formatter; -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - sync_generator_cfg_t sync_gen; - tpg_cfg_t tpg; - prbs_cfg_t prbs; - input_system_cfg_t input_circuit; - uint8_t input_circuit_cfg_changed; - uint32_t mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT]; -#endif -#if !defined(HAS_NO_INPUT_SYSTEM) - uint8_t enable_isys_event_queue; -#endif - uint8_t disable_cont_vf; -}; - -enum sh_css_stage_type { - SH_CSS_SP_STAGE_TYPE = 0, - SH_CSS_ISP_STAGE_TYPE = 1 -}; -#define SH_CSS_NUM_STAGE_TYPES 2 - -#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS (1 << 0) -#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \ - ((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS)-1) - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) -struct sh_css_sp_pipeline_terminal { - union { - /* Input System 2401 */ - virtual_input_system_stream_t virtual_input_system_stream[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH]; - } context; - /* - * TODO - * - Remove "virtual_input_system_cfg" when the ISYS2401 DLI is ready. - */ - union { - /* Input System 2401 */ - virtual_input_system_stream_cfg_t virtual_input_system_stream_cfg[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH]; - } ctrl; -}; - -struct sh_css_sp_pipeline_io { - struct sh_css_sp_pipeline_terminal input; - /* pqiao: comment out temporarily to save dmem */ - /*struct sh_css_sp_pipeline_terminal output;*/ -}; - -/* This struct tracks how many streams are registered per CSI port. - * This is used to track which streams have already been configured. - * Only when all streams are configured, the CSI RX is started for that port. - */ -struct sh_css_sp_pipeline_io_status { - uint32_t active[N_INPUT_SYSTEM_CSI_PORT]; /** registered streams */ - uint32_t running[N_INPUT_SYSTEM_CSI_PORT]; /** configured streams */ -}; - -#endif -enum sh_css_port_dir { - SH_CSS_PORT_INPUT = 0, - SH_CSS_PORT_OUTPUT = 1 -}; - -enum sh_css_port_type { - SH_CSS_HOST_TYPE = 0, - SH_CSS_COPYSINK_TYPE = 1, - SH_CSS_TAGGERSINK_TYPE = 2 -}; - -/* Pipe inout settings: output port on 7-4bits, input port on 3-0bits */ -#define SH_CSS_PORT_FLD_WIDTH_IN_BITS (4) -#define SH_CSS_PORT_TYPE_BIT_FLD(pt) (0x1 << (pt)) -#define SH_CSS_PORT_FLD(pd) ((pd) ? SH_CSS_PORT_FLD_WIDTH_IN_BITS : 0) -#define SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) ((p) |= (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd))) -#define SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt) ((p) &= ~(SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd))) -#define SH_CSS_PIPE_PORT_CONFIG_SET(p, pd, pt, val) ((val) ? \ - SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) : SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt)) -#define SH_CSS_PIPE_PORT_CONFIG_GET(p, pd, pt) ((p) & (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd))) -#define SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(p) \ - (!(SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_INPUT, SH_CSS_HOST_TYPE) && \ - SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_OUTPUT, SH_CSS_HOST_TYPE))) - -#define IA_CSS_ACQUIRE_ISP_POS 31 - -/* Flags for metadata processing */ -#define SH_CSS_METADATA_ENABLED 0x01 -#define SH_CSS_METADATA_PROCESSED 0x02 -#define SH_CSS_METADATA_OFFLINE_MODE 0x04 -#define SH_CSS_METADATA_WAIT_INPUT 0x08 - -/* @brief Free an array of metadata buffers. - * - * @param[in] num_bufs Number of metadata buffers to be freed. - * @param[in] bufs Pointer of array of metadata buffers. - * - * This function frees an array of metadata buffers. - */ -void -ia_css_metadata_free_multiple(unsigned int num_bufs, struct ia_css_metadata **bufs); - -/* Macro for handling pipe_qos_config */ -#define QOS_INVALID (~0U) -#define QOS_ALL_STAGES_DISABLED (0U) -#define QOS_STAGE_MASK(num) (0x00000001 << num) -#define SH_CSS_IS_QOS_PIPE(pipe) ((pipe)->pipe_qos_config != QOS_INVALID) -#define SH_CSS_QOS_STAGE_ENABLE(pipe, num) ((pipe)->pipe_qos_config |= QOS_STAGE_MASK(num)) -#define SH_CSS_QOS_STAGE_DISABLE(pipe, num) ((pipe)->pipe_qos_config &= ~QOS_STAGE_MASK(num)) -#define SH_CSS_QOS_STAGE_IS_ENABLED(pipe, num) ((pipe)->pipe_qos_config & QOS_STAGE_MASK(num)) -#define SH_CSS_QOS_STAGE_IS_ALL_DISABLED(pipe) ((pipe)->pipe_qos_config == QOS_ALL_STAGES_DISABLED) -#define SH_CSS_QOS_MODE_PIPE_ADD(mode, pipe) ((mode) |= (0x1 << (pipe)->pipe_id)) -#define SH_CSS_QOS_MODE_PIPE_REMOVE(mode, pipe) ((mode) &= ~(0x1 << (pipe)->pipe_id)) -#define SH_CSS_IS_QOS_ONLY_MODE(mode) ((mode) == (0x1 << IA_CSS_PIPE_ID_ACC)) - -/* Information for a pipeline */ -struct sh_css_sp_pipeline { - uint32_t pipe_id; /* the pipe ID */ - uint32_t pipe_num; /* the dynamic pipe number */ - uint32_t thread_id; /* the sp thread ID */ - uint32_t pipe_config; /* the pipe config */ - uint32_t pipe_qos_config; /* Bitmap of multiple QOS extension fw state. - (0xFFFFFFFF) indicates non QOS pipe.*/ - uint32_t inout_port_config; - uint32_t required_bds_factor; - uint32_t dvs_frame_delay; -#if !defined(HAS_NO_INPUT_SYSTEM) - uint32_t input_system_mode; /* enum ia_css_input_mode */ - uint32_t port_id; /* port_id for input system */ -#endif - uint32_t num_stages; /* the pipe config */ - uint32_t running; /* needed for pipe termination */ - hrt_vaddress sp_stage_addr[SH_CSS_MAX_STAGES]; - hrt_vaddress scaler_pp_lut; /* Early bound LUT */ - uint32_t dummy; /* stage ptr is only used on sp but lives in - this struct; needs cleanup */ - int32_t num_execs; /* number of times to run if this is - an acceleration pipe. */ -#if defined(SH_CSS_ENABLE_METADATA) - struct { - uint32_t format; /* Metadata format in hrt format */ - uint32_t width; /* Width of a line */ - uint32_t height; /* Number of lines */ - uint32_t stride; /* Stride (in bytes) per line */ - uint32_t size; /* Total size (in bytes) */ - hrt_vaddress cont_buf; /* Address of continuous buffer */ - } metadata; -#endif -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - uint32_t output_frame_queue_id; -#endif - union { - struct { - uint32_t bytes_available; - } bin; - struct { - uint32_t height; - uint32_t width; - uint32_t padded_width; - uint32_t max_input_width; - uint32_t raw_bit_depth; - } raw; - } copy; -#ifdef ISP2401 - - /* Parameters passed to Shading Correction kernel. */ - struct { - uint32_t internal_frame_origin_x_bqs_on_sctbl; /* Origin X (bqs) of internal frame on shading table */ - uint32_t internal_frame_origin_y_bqs_on_sctbl; /* Origin Y (bqs) of internal frame on shading table */ - } shading; -#endif -}; - -/* - * The first frames (with comment Dynamic) can be dynamic or static - * The other frames (ref_in and below) can only be static - * Static means that the data addres will not change during the life time - * of the associated pipe. Dynamic means that the data address can - * change with every (frame) iteration of the associated pipe - * - * s3a and dis are now also dynamic but (stil) handled seperately - */ -#define SH_CSS_NUM_DYNAMIC_FRAME_IDS (3) - -struct ia_css_frames_sp { - struct ia_css_frame_sp in; - struct ia_css_frame_sp out[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; - struct ia_css_resolution effective_in_res; - struct ia_css_frame_sp out_vf; - struct ia_css_frame_sp_info internal_frame_info; - struct ia_css_buffer_sp s3a_buf; - struct ia_css_buffer_sp dvs_buf; -#if defined SH_CSS_ENABLE_METADATA - struct ia_css_buffer_sp metadata_buf; -#endif -}; - -/* Information for a single pipeline stage for an ISP */ -struct sh_css_isp_stage { - /* - * For compatability and portabilty, only types - * from "stdint.h" are allowed - * - * Use of "enum" and "bool" is prohibited - * Multiple boolean flags can be stored in an - * integer - */ - struct ia_css_blob_info blob_info; - struct ia_css_binary_info binary_info; - char binary_name[SH_CSS_MAX_BINARY_NAME]; - struct ia_css_isp_param_css_segments mem_initializers; -}; - -/* Information for a single pipeline stage */ -struct sh_css_sp_stage { - /* - * For compatability and portabilty, only types - * from "stdint.h" are allowed - * - * Use of "enum" and "bool" is prohibited - * Multiple boolean flags can be stored in an - * integer - */ - uint8_t num; /* Stage number */ - uint8_t isp_online; - uint8_t isp_copy_vf; - uint8_t isp_copy_output; - uint8_t sp_enable_xnr; - uint8_t isp_deci_log_factor; - uint8_t isp_vf_downscale_bits; - uint8_t deinterleaved; -/* - * NOTE: Programming the input circuit can only be done at the - * start of a session. It is illegal to program it during execution - * The input circuit defines the connectivity - */ - uint8_t program_input_circuit; -/* enum ia_css_pipeline_stage_sp_func func; */ - uint8_t func; - /* The type of the pipe-stage */ - /* enum sh_css_stage_type stage_type; */ - uint8_t stage_type; - uint8_t num_stripes; - uint8_t isp_pipe_version; - struct { - uint8_t vf_output; - uint8_t s3a; - uint8_t sdis; - uint8_t dvs_stats; - uint8_t lace_stats; - } enable; - /* Add padding to come to a word boundary */ - /* unsigned char padding[0]; */ - - struct sh_css_crop_pos sp_out_crop_pos; - struct ia_css_frames_sp frames; - struct ia_css_resolution dvs_envelope; - struct sh_css_uds_info uds; - hrt_vaddress isp_stage_addr; - hrt_vaddress xmem_bin_addr; - hrt_vaddress xmem_map_addr; - - uint16_t top_cropping; - uint16_t row_stripes_height; - uint16_t row_stripes_overlap_lines; - uint8_t if_config_index; /* Which should be applied by this stage. */ -}; - -/* - * Time: 2012-07-19, 17:40. - * Note: Add a new data memeber "debug" in "sh_css_sp_group". This - * data member is used to pass the debugging command from the - * Host to the SP. - * - * Time: Before 2012-07-19. - * Note: - * Group all host initialized SP variables into this struct. - * This is initialized every stage through dma. - * The stage part itself is transfered through sh_css_sp_stage. -*/ -struct sh_css_sp_group { - struct sh_css_sp_config config; - struct sh_css_sp_pipeline pipe[SH_CSS_MAX_SP_THREADS]; -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401) - struct sh_css_sp_pipeline_io pipe_io[SH_CSS_MAX_SP_THREADS]; - struct sh_css_sp_pipeline_io_status pipe_io_status; -#endif - struct sh_css_sp_debug_command debug; -}; - -/* Data in SP dmem that is set from the host every stage. */ -struct sh_css_sp_per_frame_data { - /* ddr address of sp_group and sp_stage */ - hrt_vaddress sp_group_addr; -}; - -#define SH_CSS_NUM_SDW_IRQS 3 - -/* Output data from SP to css */ -struct sh_css_sp_output { - unsigned int bin_copy_bytes_copied; -#if SP_DEBUG != SP_DEBUG_NONE - struct sh_css_sp_debug_state debug; -#endif - unsigned int sw_interrupt_value[SH_CSS_NUM_SDW_IRQS]; -}; - -#define CONFIG_ON_FRAME_ENQUEUE() 0 - -/** - * @brief Data structure for the circular buffer. - * The circular buffer is empty if "start == end". The - * circular buffer is full if "(end + 1) % size == start". - */ -/* Variable Sized Buffer Queue Elements */ - -#define IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE 6 -#define IA_CSS_NUM_ELEMS_HOST2SP_PARAM_QUEUE 3 -#define IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE 6 - -#if !defined(HAS_NO_INPUT_SYSTEM) -/* sp-to-host queue is expected to be emptied in ISR since - * it is used instead of HW interrupts (due to HW design issue). - * We need one queue element per CSI port. */ -#define IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS) -/* The host-to-sp queue needs to allow for some delay - * in the emptying of this queue in the SP since there is no - * separate SP thread for this. */ -#define IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS) -#else -#define IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE 0 -#define IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE 0 -#define IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE 0 -#endif - -#if defined(HAS_SP_2400) -#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 13 -#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 19 -#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 26 /* holds events for all type of buffers, hence deeper */ -#else -#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 6 -#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 6 -#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 6 -#endif - -struct sh_css_hmm_buffer { - union { - struct ia_css_isp_3a_statistics s3a; - struct ia_css_isp_dvs_statistics dis; - hrt_vaddress skc_dvs_statistics; - hrt_vaddress lace_stat; - struct ia_css_metadata metadata; - struct frame_data_wrapper { - hrt_vaddress frame_data; - uint32_t flashed; - uint32_t exp_id; - uint32_t isp_parameters_id; /** Unique ID to track which config was - actually applied to a particular frame */ -#if CONFIG_ON_FRAME_ENQUEUE() - struct sh_css_config_on_frame_enqueue config_on_frame_enqueue; -#endif - } frame; - hrt_vaddress ddr_ptrs; - } payload; - /* - * kernel_ptr is present for host administration purposes only. - * type is uint64_t in order to be 64-bit host compatible. - * uint64_t does not exist on SP/ISP. - * Size of the struct is checked by sp.hive.c. - */ -#if !defined(__ISP) - CSS_ALIGN(uint64_t cookie_ptr, 8); /* TODO: check if this alignment is needed */ - uint64_t kernel_ptr; -#else - CSS_ALIGN(struct { uint32_t a[2]; } cookie_ptr, 8); /* TODO: check if this alignment is needed */ - struct { uint32_t a[2]; } kernel_ptr; -#endif - struct ia_css_time_meas timing_data; - clock_value_t isys_eof_clock_tick; -}; -#if CONFIG_ON_FRAME_ENQUEUE() -#define SIZE_OF_FRAME_STRUCT \ - (SIZE_OF_HRT_VADDRESS + \ - (3 * sizeof(uint32_t)) + \ - sizeof(uint32_t)) -#else -#define SIZE_OF_FRAME_STRUCT \ - (SIZE_OF_HRT_VADDRESS + \ - (3 * sizeof(uint32_t))) -#endif - -#define SIZE_OF_PAYLOAD_UNION \ - (MAX(MAX(MAX(MAX( \ - SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT, \ - SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT), \ - SIZE_OF_IA_CSS_METADATA_STRUCT), \ - SIZE_OF_FRAME_STRUCT), \ - SIZE_OF_HRT_VADDRESS)) - -/* Do not use sizeof(uint64_t) since that does not exist of SP */ -#define SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT \ - (SIZE_OF_PAYLOAD_UNION + \ - CALC_ALIGNMENT_MEMBER(SIZE_OF_PAYLOAD_UNION, 8) + \ - 8 + \ - 8 + \ - SIZE_OF_IA_CSS_TIME_MEAS_STRUCT + \ - SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT + \ - CALC_ALIGNMENT_MEMBER(SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT, 8)) - -enum sh_css_queue_type { - sh_css_invalid_queue_type = -1, - sh_css_host2sp_buffer_queue, - sh_css_sp2host_buffer_queue, - sh_css_host2sp_psys_event_queue, - sh_css_sp2host_psys_event_queue, -#if !defined(HAS_NO_INPUT_SYSTEM) - sh_css_sp2host_isys_event_queue, - sh_css_host2sp_isys_event_queue, - sh_css_host2sp_tag_cmd_queue, -#endif -}; - -struct sh_css_event_irq_mask { - uint16_t or_mask; - uint16_t and_mask; -}; -#define SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT \ - (2 * sizeof(uint16_t)) - -struct host_sp_communication { - /* - * Don't use enum host2sp_commands, because the sizeof an enum is - * compiler dependant and thus non-portable - */ - uint32_t host2sp_command; - - /* - * The frame buffers that are reused by the - * copy pipe in the offline preview mode. - * - * host2sp_offline_frames[0]: the input frame of the preview pipe. - * host2sp_offline_frames[1]: the output frame of the copy pipe. - * - * TODO: - * Remove it when the Host and the SP is decoupled. - */ - hrt_vaddress host2sp_offline_frames[NUM_CONTINUOUS_FRAMES]; - hrt_vaddress host2sp_offline_metadata[NUM_CONTINUOUS_FRAMES]; - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - hrt_vaddress host2sp_mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM]; - hrt_vaddress host2sp_mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM]; - uint32_t host2sp_num_mipi_frames[N_CSI_PORTS]; -#endif - uint32_t host2sp_cont_avail_num_raw_frames; - uint32_t host2sp_cont_extra_num_raw_frames; - uint32_t host2sp_cont_target_num_raw_frames; - struct sh_css_event_irq_mask host2sp_event_irq_mask[NR_OF_PIPELINES]; - -}; - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \ - (sizeof(uint32_t) + \ - (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \ - (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM * SIZE_OF_HRT_VADDRESS * 2) + \ - ((3 + N_CSI_PORTS) * sizeof(uint32_t)) + \ - (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT)) -#else -#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \ - (sizeof(uint32_t) + \ - (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \ - (3 * sizeof(uint32_t)) + \ - (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT)) -#endif - -struct host_sp_queues { - /* - * Queues for the dynamic frame information, - * i.e. the "in_frame" buffer, the "out_frame" - * buffer and the "vf_out_frame" buffer. - */ - ia_css_circbuf_desc_t host2sp_buffer_queues_desc - [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]; - ia_css_circbuf_elem_t host2sp_buffer_queues_elems - [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES] - [IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE]; - ia_css_circbuf_desc_t sp2host_buffer_queues_desc - [SH_CSS_MAX_NUM_QUEUES]; - ia_css_circbuf_elem_t sp2host_buffer_queues_elems - [SH_CSS_MAX_NUM_QUEUES][IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE]; - - /* - * The queues for the events. - */ - ia_css_circbuf_desc_t host2sp_psys_event_queue_desc; - ia_css_circbuf_elem_t host2sp_psys_event_queue_elems - [IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE]; - ia_css_circbuf_desc_t sp2host_psys_event_queue_desc; - ia_css_circbuf_elem_t sp2host_psys_event_queue_elems - [IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE]; - -#if !defined(HAS_NO_INPUT_SYSTEM) - /* - * The queues for the ISYS events. - */ - ia_css_circbuf_desc_t host2sp_isys_event_queue_desc; - ia_css_circbuf_elem_t host2sp_isys_event_queue_elems - [IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE]; - ia_css_circbuf_desc_t sp2host_isys_event_queue_desc; - ia_css_circbuf_elem_t sp2host_isys_event_queue_elems - [IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE]; - /* - * The queue for the tagger commands. - * CHECK: are these last two present on the 2401 ? - */ - ia_css_circbuf_desc_t host2sp_tag_cmd_queue_desc; - ia_css_circbuf_elem_t host2sp_tag_cmd_queue_elems - [IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE]; -#endif -}; - -#define SIZE_OF_QUEUES_ELEMS \ - (SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT * \ - ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE) + \ - (SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE) + \ - (IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE) + \ - (IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE) + \ - (IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE) + \ - (IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE) + \ - (IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE))) - -#if !defined(HAS_NO_INPUT_SYSTEM) -#define IA_CSS_NUM_CIRCBUF_DESCS 5 -#else -#ifndef ISP2401 -#define IA_CSS_NUM_CIRCBUF_DESCS 3 -#else -#define IA_CSS_NUM_CIRCBUF_DESCS 2 -#endif -#endif - -#define SIZE_OF_QUEUES_DESC \ - ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * \ - SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \ - (SH_CSS_MAX_NUM_QUEUES * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \ - (IA_CSS_NUM_CIRCBUF_DESCS * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT)) - -#define SIZE_OF_HOST_SP_QUEUES_STRUCT \ - (SIZE_OF_QUEUES_ELEMS + SIZE_OF_QUEUES_DESC) - -extern int (*sh_css_printf)(const char *fmt, va_list args); - -static inline void -sh_css_print(const char *fmt, ...) -{ - va_list ap; - - if (sh_css_printf) { - va_start(ap, fmt); - sh_css_printf(fmt, ap); - va_end(ap); - } -} - -static inline void -sh_css_vprint(const char *fmt, va_list args) -{ - if (sh_css_printf) - sh_css_printf(fmt, args); -} - -/* The following #if is there because this header file is also included - by SP and ISP code but they do not need this data and HIVECC has alignment - issue with the firmware struct/union's. - More permanent solution will be to refactor this include. -*/ -#if !defined(__ISP) -hrt_vaddress -sh_css_params_ddr_address_map(void); - -enum ia_css_err -sh_css_params_init(void); - -void -sh_css_params_uninit(void); - -void *sh_css_malloc(size_t size); - -void *sh_css_calloc(size_t N, size_t size); - -void sh_css_free(void *ptr); - -/* For Acceleration API: Flush FW (shared buffer pointer) arguments */ -void sh_css_flush(struct ia_css_acc_fw *fw); - - -void -sh_css_binary_args_reset(struct sh_css_binary_args *args); - -/* Check two frames for equality (format, resolution, bits per element) */ -bool -sh_css_frame_equal_types(const struct ia_css_frame *frame_a, - const struct ia_css_frame *frame_b); - -bool -sh_css_frame_info_equal_resolution(const struct ia_css_frame_info *info_a, - const struct ia_css_frame_info *info_b); - -void -sh_css_capture_enable_bayer_downscaling(bool enable); - -void -sh_css_binary_print(const struct ia_css_binary *binary); - -/* aligned argument of sh_css_frame_info_set_width can be used for an extra alignment requirement. - When 0, no extra alignment is done. */ -void -sh_css_frame_info_set_width(struct ia_css_frame_info *info, - unsigned int width, - unsigned int aligned); - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) - -unsigned int -sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx); - -#endif - -hrt_vaddress -sh_css_store_sp_group_to_ddr(void); - -hrt_vaddress -sh_css_store_sp_stage_to_ddr(unsigned pipe, unsigned stage); - -hrt_vaddress -sh_css_store_isp_stage_to_ddr(unsigned pipe, unsigned stage); - - -void -sh_css_update_uds_and_crop_info( - const struct ia_css_binary_info *info, - const struct ia_css_frame_info *in_frame_info, - const struct ia_css_frame_info *out_frame_info, - const struct ia_css_resolution *dvs_env, - const struct ia_css_dz_config *zoom, - const struct ia_css_vector *motion_vector, - struct sh_css_uds_info *uds, /* out */ - struct sh_css_crop_pos *sp_out_crop_pos, /* out */ - bool enable_zoom - ); - -void -sh_css_invalidate_shading_tables(struct ia_css_stream *stream); - -struct ia_css_pipeline * -ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe); - -unsigned int -ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe); - -unsigned int -ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe); - -bool -sh_css_continuous_is_enabled(uint8_t pipe_num); - -struct ia_css_pipe * -find_pipe_by_num(uint32_t pipe_num); - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -void -ia_css_get_crop_offsets( - struct ia_css_pipe *pipe, - struct ia_css_frame_info *in_frame); -#endif -#endif /* !defined(__ISP) */ - -#endif /* _SH_CSS_INTERNAL_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h deleted file mode 100644 index 4fd25ba2cd0d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_LEGACY_H_ -#define _SH_CSS_LEGACY_H_ - -#include -#include -#include -#include -#include -#include - -/* The pipe id type, distinguishes the kind of pipes that - * can be run in parallel. - */ -enum ia_css_pipe_id { - IA_CSS_PIPE_ID_PREVIEW, - IA_CSS_PIPE_ID_COPY, - IA_CSS_PIPE_ID_VIDEO, - IA_CSS_PIPE_ID_CAPTURE, - IA_CSS_PIPE_ID_YUVPP, -#ifndef ISP2401 - IA_CSS_PIPE_ID_ACC, - IA_CSS_PIPE_ID_NUM -#else - IA_CSS_PIPE_ID_ACC -#endif -}; -#ifdef ISP2401 -#define IA_CSS_PIPE_ID_NUM (IA_CSS_PIPE_ID_ACC+1) -#endif - -struct ia_css_pipe_extra_config { - bool enable_raw_binning; - bool enable_yuv_ds; - bool enable_high_speed; - bool enable_dvs_6axis; - bool enable_reduced_pipe; - bool enable_fractional_ds; - bool disable_vf_pp; -}; - -enum ia_css_err -ia_css_pipe_create_extra(const struct ia_css_pipe_config *config, - const struct ia_css_pipe_extra_config *extra_config, - struct ia_css_pipe **pipe); - -void -ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config); - -enum ia_css_err -ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe, - enum ia_css_pipe_id *pipe_id); - -/* DEPRECATED. FPN is not supported. */ -enum ia_css_err -sh_css_set_black_frame(struct ia_css_stream *stream, - const struct ia_css_frame *raw_black_frame); - -#ifndef ISP2401 -void -sh_css_enable_cont_capt(bool enable, bool stop_copy_preview); - -#endif -#endif /* _SH_CSS_LEGACY_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c deleted file mode 100644 index ebdf84d4a138..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* This file will contain the code to implement the functions declared in ia_css_metadata.h - and associated helper functions */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c deleted file mode 100644 index 48e5542b3a43..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "assert_support.h" -#include "sh_css_metrics.h" - -#include "sp.h" -#include "isp.h" - -#include "sh_css_internal.h" - -#define MULTIPLE_PCS 0 -#define SUSPEND 0 -#define NOF_PCS 1 -#define RESUME_MASK 0x8 -#define STOP_MASK 0x0 - -static bool pc_histogram_enabled; -static struct sh_css_pc_histogram *isp_histogram; -static struct sh_css_pc_histogram *sp_histogram; - -struct sh_css_metrics sh_css_metrics; - -void -sh_css_metrics_start_frame(void) -{ - sh_css_metrics.frame_metrics.num_frames++; -} - -static void -clear_histogram(struct sh_css_pc_histogram *histogram) -{ - unsigned i; - - assert(histogram != NULL); - - for (i = 0; i < histogram->length; i++) { - histogram->run[i] = 0; - histogram->stall[i] = 0; - histogram->msink[i] = 0xFFFF; - } -} - -void -sh_css_metrics_enable_pc_histogram(bool enable) -{ - pc_histogram_enabled = enable; -} - -static void -make_histogram(struct sh_css_pc_histogram *histogram, unsigned length) -{ - assert(histogram != NULL); - - if (histogram->length) - return; - if (histogram->run) - return; - histogram->run = sh_css_malloc(length * sizeof(*histogram->run)); - if (!histogram->run) - return; - histogram->stall = sh_css_malloc(length * sizeof(*histogram->stall)); - if (!histogram->stall) - return; - histogram->msink = sh_css_malloc(length * sizeof(*histogram->msink)); - if (!histogram->msink) - return; - - histogram->length = length; - clear_histogram(histogram); -} - -static void -insert_binary_metrics(struct sh_css_binary_metrics **l, - struct sh_css_binary_metrics *metrics) -{ - assert(l != NULL); - assert(*l != NULL); - assert(metrics != NULL); - - for (; *l; l = &(*l)->next) - if (*l == metrics) - return; - - *l = metrics; - metrics->next = NULL; -} - -void -sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics) -{ - assert(metrics != NULL); - - if (!pc_histogram_enabled) - return; - - isp_histogram = &metrics->isp_histogram; - sp_histogram = &metrics->sp_histogram; - make_histogram(isp_histogram, ISP_PMEM_DEPTH); - make_histogram(sp_histogram, SP_PMEM_DEPTH); - insert_binary_metrics(&sh_css_metrics.binary_metrics, metrics); -} - -void -sh_css_metrics_sample_pcs(void) -{ - bool stall; - unsigned int pc; - unsigned int msink; - -#if SUSPEND - unsigned int sc = 0; - unsigned int stopped_sc = 0; - unsigned int resume_sc = 0; -#endif - - -#if MULTIPLE_PCS - int i; - unsigned int pc_tab[NOF_PCS]; - - for (i = 0; i < NOF_PCS; i++) - pc_tab[i] = 0; -#endif - - if (!pc_histogram_enabled) - return; - - if (isp_histogram) { -#if SUSPEND - /* STOP the ISP */ - isp_ctrl_store(ISP0_ID, ISP_SC_REG, STOP_MASK); -#endif - msink = isp_ctrl_load(ISP0_ID, ISP_CTRL_SINK_REG); -#if MULTIPLE_PCS - for (i = 0; i < NOF_PCS; i++) - pc_tab[i] = isp_ctrl_load(ISP0_ID, ISP_PC_REG); -#else - pc = isp_ctrl_load(ISP0_ID, ISP_PC_REG); -#endif - -#if SUSPEND - /* RESUME the ISP */ - isp_ctrl_store(ISP0_ID, ISP_SC_REG, RESUME_MASK); -#endif - isp_histogram->msink[pc] &= msink; - stall = (msink != 0x7FF); - - if (stall) - isp_histogram->stall[pc]++; - else - isp_histogram->run[pc]++; - } - - if (sp_histogram && 0) { - msink = sp_ctrl_load(SP0_ID, SP_CTRL_SINK_REG); - pc = sp_ctrl_load(SP0_ID, SP_PC_REG); - sp_histogram->msink[pc] &= msink; - stall = (msink != 0x7FF); - if (stall) - sp_histogram->stall[pc]++; - else - sp_histogram->run[pc]++; - } -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h deleted file mode 100644 index 2ef9238d95ad..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_METRICS_H_ -#define _SH_CSS_METRICS_H_ - -#include - -struct sh_css_pc_histogram { - unsigned length; - unsigned *run; - unsigned *stall; - unsigned *msink; -}; - -struct sh_css_binary_metrics { - unsigned mode; - unsigned id; - struct sh_css_pc_histogram isp_histogram; - struct sh_css_pc_histogram sp_histogram; - struct sh_css_binary_metrics *next; -}; - -struct ia_css_frame_metrics { - unsigned num_frames; -}; - -struct sh_css_metrics { - struct sh_css_binary_metrics *binary_metrics; - struct ia_css_frame_metrics frame_metrics; -}; - -extern struct sh_css_metrics sh_css_metrics; - -/* includes ia_css_binary.h, which depends on sh_css_metrics.h */ -#include "ia_css_types.h" - -/* Sample ISP and SP pc and add to histogram */ -void sh_css_metrics_enable_pc_histogram(bool enable); -void sh_css_metrics_start_frame(void); -void sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics); -void sh_css_metrics_sample_pcs(void); - -#endif /* _SH_CSS_METRICS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c deleted file mode 100644 index a6a00024bae8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c +++ /dev/null @@ -1,749 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_mipi.h" -#include "sh_css_mipi.h" -#include -#include "system_global.h" -#include "ia_css_err.h" -#include "ia_css_pipe.h" -#include "ia_css_stream_format.h" -#include "sh_css_stream_format.h" -#include "ia_css_stream_public.h" -#include "ia_css_frame_public.h" -#include "ia_css_input_port.h" -#include "ia_css_debug.h" -#include "sh_css_struct.h" -#include "sh_css_defs.h" -#include "sh_css_sp.h" /* sh_css_update_host2sp_mipi_frame sh_css_update_host2sp_num_mipi_frames ... */ -#include "sw_event_global.h" /* IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY */ - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -static uint32_t ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */ -#endif - -enum ia_css_err -ia_css_mipi_frame_specify(const unsigned int size_mem_words, - const bool contiguous) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - my_css.size_mem_words = size_mem_words; - (void)contiguous; - - return err; -} - -#ifdef ISP2401 -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -/* - * Check if a source port or TPG/PRBS ID is valid - */ -static bool ia_css_mipi_is_source_port_valid(struct ia_css_pipe *pipe, - unsigned int *pport) -{ - bool ret = true; - unsigned int port = 0; - unsigned int max_ports = 0; - - switch (pipe->stream->config.mode) { - case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: - port = (unsigned int) pipe->stream->config.source.port.port; - max_ports = N_CSI_PORTS; - break; - case IA_CSS_INPUT_MODE_TPG: - port = (unsigned int) pipe->stream->config.source.tpg.id; - max_ports = N_CSS_TPG_IDS; - break; - case IA_CSS_INPUT_MODE_PRBS: - port = (unsigned int) pipe->stream->config.source.prbs.id; - max_ports = N_CSS_PRBS_IDS; - break; - default: - assert(false); - ret = false; - break; - } - - if (ret) { - assert(port < max_ports); - - if (port >= max_ports) - ret = false; - } - - *pport = port; - - return ret; -} -#endif - -#endif -/* Assumptions: - * - A line is multiple of 4 bytes = 1 word. - * - Each frame has SOF and EOF (each 1 word). - * - Each line has format header and optionally SOL and EOL (each 1 word). - * - Odd and even lines of YUV420 format are different in bites per pixel size. - * - Custom size of embedded data. - * -- Interleaved frames are not taken into account. - * -- Lines are multiples of 8B, and not necessary of (custom 3B, or 7B - * etc.). - * Result is given in DDR mem words, 32B or 256 bits - */ -enum ia_css_err -ia_css_mipi_frame_calculate_size(const unsigned int width, - const unsigned int height, - const enum atomisp_input_format format, - const bool hasSOLandEOL, - const unsigned int embedded_data_size_words, - unsigned int *size_mem_words) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - unsigned int bits_per_pixel = 0; - unsigned int even_line_bytes = 0; - unsigned int odd_line_bytes = 0; - unsigned int words_per_odd_line = 0; - unsigned int words_for_first_line = 0; - unsigned int words_per_even_line = 0; - unsigned int mem_words_per_even_line = 0; - unsigned int mem_words_per_odd_line = 0; - unsigned int mem_words_for_first_line = 0; - unsigned int mem_words_for_EOF = 0; - unsigned int mem_words = 0; - unsigned int width_padded = width; - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - /* The changes will be reverted as soon as RAW - * Buffers are deployed by the 2401 Input System - * in the non-continuous use scenario. - */ - width_padded += (2 * ISP_VEC_NELEMS); -#endif - - IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n", - width_padded, height, format, hasSOLandEOL, embedded_data_size_words); - - switch (format) { - case ATOMISP_INPUT_FORMAT_RAW_6: /* 4p, 3B, 24bits */ - bits_per_pixel = 6; break; - case ATOMISP_INPUT_FORMAT_RAW_7: /* 8p, 7B, 56bits */ - bits_per_pixel = 7; break; - case ATOMISP_INPUT_FORMAT_RAW_8: /* 1p, 1B, 8bits */ - case ATOMISP_INPUT_FORMAT_BINARY_8: /* 8bits, TODO: check. */ - case ATOMISP_INPUT_FORMAT_YUV420_8: /* odd 2p, 2B, 16bits, even 2p, 4B, 32bits */ - bits_per_pixel = 8; break; - case ATOMISP_INPUT_FORMAT_YUV420_10: /* odd 4p, 5B, 40bits, even 4p, 10B, 80bits */ - case ATOMISP_INPUT_FORMAT_RAW_10: /* 4p, 5B, 40bits */ -#if !defined(HAS_NO_PACKED_RAW_PIXELS) - /* The changes will be reverted as soon as RAW - * Buffers are deployed by the 2401 Input System - * in the non-continuous use scenario. - */ - bits_per_pixel = 10; -#else - bits_per_pixel = 16; -#endif - break; - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: /* 2p, 3B, 24bits */ - case ATOMISP_INPUT_FORMAT_RAW_12: /* 2p, 3B, 24bits */ - bits_per_pixel = 12; break; - case ATOMISP_INPUT_FORMAT_RAW_14: /* 4p, 7B, 56bits */ - bits_per_pixel = 14; break; - case ATOMISP_INPUT_FORMAT_RGB_444: /* 1p, 2B, 16bits */ - case ATOMISP_INPUT_FORMAT_RGB_555: /* 1p, 2B, 16bits */ - case ATOMISP_INPUT_FORMAT_RGB_565: /* 1p, 2B, 16bits */ - case ATOMISP_INPUT_FORMAT_YUV422_8: /* 2p, 4B, 32bits */ - bits_per_pixel = 16; break; - case ATOMISP_INPUT_FORMAT_RGB_666: /* 4p, 9B, 72bits */ - bits_per_pixel = 18; break; - case ATOMISP_INPUT_FORMAT_YUV422_10: /* 2p, 5B, 40bits */ - bits_per_pixel = 20; break; - case ATOMISP_INPUT_FORMAT_RGB_888: /* 1p, 3B, 24bits */ - bits_per_pixel = 24; break; - - case ATOMISP_INPUT_FORMAT_YUV420_16: /* Not supported */ - case ATOMISP_INPUT_FORMAT_YUV422_16: /* Not supported */ - case ATOMISP_INPUT_FORMAT_RAW_16: /* TODO: not specified in MIPI SPEC, check */ - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */ - - /* Even lines for YUV420 formats are double in bits_per_pixel. */ - if (format == ATOMISP_INPUT_FORMAT_YUV420_8 - || format == ATOMISP_INPUT_FORMAT_YUV420_10 - || format == ATOMISP_INPUT_FORMAT_YUV420_16) { - even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */ - } else { - even_line_bytes = odd_line_bytes; - } - - /* a frame represented in memory: ()- optional; data - payload words. - * addr 0 1 2 3 4 5 6 7: - * first SOF (SOL) PACK_H data data data data data - * data data data data data data data data - * ... - * data data 0 0 0 0 0 0 - * second (EOL) (SOL) PACK_H data data data data data - * data data data data data data data data - * ... - * data data 0 0 0 0 0 0 - * ... - * last (EOL) EOF 0 0 0 0 0 0 - * - * Embedded lines are regular lines stored before the first and after - * payload lines. - */ - - words_per_odd_line = (odd_line_bytes + 3) >> 2; - /* ceil(odd_line_bytes/4); word = 4 bytes */ - words_per_even_line = (even_line_bytes + 3) >> 2; - words_for_first_line = words_per_odd_line + 2 + (hasSOLandEOL ? 1 : 0); - /* + SOF +packet header + optionally (SOL), but (EOL) is not in the first line */ - words_per_odd_line += (1 + (hasSOLandEOL ? 2 : 0)); - /* each non-first line has format header, and optionally (SOL) and (EOL). */ - words_per_even_line += (1 + (hasSOLandEOL ? 2 : 0)); - - mem_words_per_odd_line = (words_per_odd_line + 7) >> 3; - /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */ - mem_words_for_first_line = (words_for_first_line + 7) >> 3; - mem_words_per_even_line = (words_per_even_line + 7) >> 3; - mem_words_for_EOF = 1; /* last line consisit of the optional (EOL) and EOF */ - - mem_words = ((embedded_data_size_words + 7) >> 3) + - mem_words_for_first_line + - (((height + 1) >> 1) - 1) * mem_words_per_odd_line + - /* ceil (height/2) - 1 (first line is calculated separatelly) */ - (height >> 1) * mem_words_per_even_line + /* floor(height/2) */ - mem_words_for_EOF; - - *size_mem_words = mem_words; /* ceil(words/8); mem word is 32B = 8words. */ - /* Check if the above is still needed. */ - - IA_CSS_LEAVE_ERR(err); - return err; -} - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) -enum ia_css_err -ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port, - const unsigned int size_mem_words) -{ - uint32_t idx; - - enum ia_css_err err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - - OP___assert(port < N_CSI_PORTS); - OP___assert(size_mem_words != 0); - - for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT && - my_css.mipi_sizes_for_check[port][idx] != 0; - idx++) { /* do nothing */ - } - if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT) { - my_css.mipi_sizes_for_check[port][idx] = size_mem_words; - err = IA_CSS_SUCCESS; - } - - return err; -} -#endif - -void -mipi_init(void) -{ -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - unsigned int i; - - for (i = 0; i < N_CSI_PORTS; i++) - ref_count_mipi_allocation[i] = 0; -#endif -} - -enum ia_css_err -calculate_mipi_buff_size( - struct ia_css_stream_config *stream_cfg, - unsigned int *size_mem_words) -{ -#if !defined(USE_INPUT_SYSTEM_VERSION_2401) - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; - (void)stream_cfg; - (void)size_mem_words; -#else - unsigned int width; - unsigned int height; - enum atomisp_input_format format; - bool pack_raw_pixels; - - unsigned int width_padded; - unsigned int bits_per_pixel = 0; - - unsigned int even_line_bytes = 0; - unsigned int odd_line_bytes = 0; - - unsigned int words_per_odd_line = 0; - unsigned int words_per_even_line = 0; - - unsigned int mem_words_per_even_line = 0; - unsigned int mem_words_per_odd_line = 0; - - unsigned int mem_words_per_buff_line = 0; - unsigned int mem_words_per_buff = 0; - enum ia_css_err err = IA_CSS_SUCCESS; - - /** -#ifndef ISP2401 - * zhengjie.lu@intel.com - * -#endif - * NOTE - * - In the struct "ia_css_stream_config", there - * are two members: "input_config" and "isys_config". - * Both of them provide the same information, e.g. - * input_res and format. - * - * Question here is that: which one shall be used? - */ - width = stream_cfg->input_config.input_res.width; - height = stream_cfg->input_config.input_res.height; - format = stream_cfg->input_config.format; - pack_raw_pixels = stream_cfg->pack_raw_pixels; - /* end of NOTE */ - - /** -#ifndef ISP2401 - * zhengjie.lu@intel.com - * -#endif - * NOTE - * - The following code is derived from the - * existing code "ia_css_mipi_frame_calculate_size()". - * - * Question here is: why adding "2 * ISP_VEC_NELEMS" - * to "width_padded", but not making "width_padded" - * aligned with "2 * ISP_VEC_NELEMS"? - */ - /* The changes will be reverted as soon as RAW - * Buffers are deployed by the 2401 Input System - * in the non-continuous use scenario. - */ - width_padded = width + (2 * ISP_VEC_NELEMS); - /* end of NOTE */ - - IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n", - width_padded, height, format); - - bits_per_pixel = sh_css_stream_format_2_bits_per_subpixel(format); - bits_per_pixel = - (format == ATOMISP_INPUT_FORMAT_RAW_10 && pack_raw_pixels) ? bits_per_pixel : 16; - if (bits_per_pixel == 0) - return IA_CSS_ERR_INTERNAL_ERROR; - - odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */ - - /* Even lines for YUV420 formats are double in bits_per_pixel. */ - if (format == ATOMISP_INPUT_FORMAT_YUV420_8 - || format == ATOMISP_INPUT_FORMAT_YUV420_10) { - even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */ - } else { - even_line_bytes = odd_line_bytes; - } - - words_per_odd_line = (odd_line_bytes + 3) >> 2; - /* ceil(odd_line_bytes/4); word = 4 bytes */ - words_per_even_line = (even_line_bytes + 3) >> 2; - - mem_words_per_odd_line = (words_per_odd_line + 7) >> 3; - /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */ - mem_words_per_even_line = (words_per_even_line + 7) >> 3; - - mem_words_per_buff_line = - (mem_words_per_odd_line > mem_words_per_even_line) ? mem_words_per_odd_line : mem_words_per_even_line; - mem_words_per_buff = mem_words_per_buff_line * height; - - *size_mem_words = mem_words_per_buff; - - IA_CSS_LEAVE_ERR(err); -#endif - return err; -} - -enum ia_css_err -allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info) -{ -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; -#ifndef ISP2401 - unsigned int port; -#else - unsigned int port = 0; -#endif - struct ia_css_frame_info mipi_intermediate_info; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) enter:\n", pipe); - - assert(pipe != NULL); - assert(pipe->stream != NULL); - if ((pipe == NULL) || (pipe->stream == NULL)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) exit: pipe or stream is null.\n", - pipe); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - if (pipe->stream->config.online) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n", - pipe); - return IA_CSS_SUCCESS; - } - -#endif -#ifndef ISP2401 - if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { -#else - if (!(pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR || - pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG || - pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)) { -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n", - pipe); - return IA_CSS_SUCCESS; /* AM TODO: Check */ - } - -#ifndef ISP2401 - port = (unsigned int) pipe->stream->config.source.port.port; - assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS) { -#else - if (!ia_css_mipi_is_source_port_valid(pipe, &port)) { -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) exit: error: port is not correct (port=%d).\n", - pipe, port); - return IA_CSS_ERR_INTERNAL_ERROR; - } - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 - err = calculate_mipi_buff_size( - &(pipe->stream->config), - &(my_css.mipi_frame_size[port])); -#endif - -#if defined(USE_INPUT_SYSTEM_VERSION_2) - if (ref_count_mipi_allocation[port] != 0) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n", - pipe, port); - return IA_CSS_SUCCESS; - } -#else - /* 2401 system allows multiple streams to use same physical port. This is not - * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution. - * TODO AM: Once that is changed (removed) this code should be removed as well. - * In that case only 2400 related code should remain. - */ - if (ref_count_mipi_allocation[port] != 0) { - ref_count_mipi_allocation[port]++; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n", - pipe, port); - return IA_CSS_SUCCESS; - } -#endif - - ref_count_mipi_allocation[port]++; - - /* TODO: Cleaning needed. */ - /* This code needs to modified to allocate the MIPI frames in the correct normal way - with an allocate from info, by justin */ - mipi_intermediate_info = pipe->pipe_settings.video.video_binary.internal_frame_info; - mipi_intermediate_info.res.width = 0; - mipi_intermediate_info.res.height = 0; - /* To indicate it is not (yet) valid format. */ - mipi_intermediate_info.format = IA_CSS_FRAME_FORMAT_NUM; - mipi_intermediate_info.padded_width = 0; - mipi_intermediate_info.raw_bit_depth = 0; - - /* AM TODO: mipi frames number should come from stream struct. */ - my_css.num_mipi_frames[port] = NUM_MIPI_FRAMES_PER_STREAM; - - /* Incremental allocation (per stream), not for all streams at once. */ - { /* limit the scope of i,j */ - unsigned i, j; - for (i = 0; i < my_css.num_mipi_frames[port]; i++) { - /* free previous frame */ - if (my_css.mipi_frames[port][i]) { - ia_css_frame_free(my_css.mipi_frames[port][i]); - my_css.mipi_frames[port][i] = NULL; - } - /* check if new frame is needed */ - if (i < my_css.num_mipi_frames[port]) { - /* allocate new frame */ - err = ia_css_frame_allocate_with_buffer_size( - &my_css.mipi_frames[port][i], - my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES, - false); - if (err != IA_CSS_SUCCESS) { - for (j = 0; j < i; j++) { - if (my_css.mipi_frames[port][j]) { - ia_css_frame_free(my_css.mipi_frames[port][j]); - my_css.mipi_frames[port][j] = NULL; - } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p, %d) exit: error: allocation failed.\n", - pipe, port); - return err; - } - } - if (info->metadata_info.size > 0) { - /* free previous metadata buffer */ - if (my_css.mipi_metadata[port][i] != NULL) { - ia_css_metadata_free(my_css.mipi_metadata[port][i]); - my_css.mipi_metadata[port][i] = NULL; - } - /* check if need to allocate a new metadata buffer */ - if (i < my_css.num_mipi_frames[port]) { - /* allocate new metadata buffer */ - my_css.mipi_metadata[port][i] = ia_css_metadata_allocate(&info->metadata_info); - if (my_css.mipi_metadata[port][i] == NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_metadata(%p, %d) failed.\n", - pipe, port); - return err; - } - } - } - } - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "allocate_mipi_frames(%p) exit:\n", pipe); - - return err; -#else - (void)pipe; - (void)info; - return IA_CSS_SUCCESS; -#endif -} - -enum ia_css_err -free_mipi_frames(struct ia_css_pipe *pipe) -{ -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; -#ifndef ISP2401 - unsigned int port; -#else - unsigned int port = 0; -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(%p) enter:\n", pipe); - - /* assert(pipe != NULL); TEMP: TODO: Should be assert only. */ - if (pipe != NULL) { - assert(pipe->stream != NULL); - if ((pipe == NULL) || (pipe->stream == NULL)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(%p) exit: error: pipe or stream is null.\n", - pipe); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - -#ifndef ISP2401 - if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { -#else - if (!(pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR || - pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG || - pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)) { -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(%p) exit: error: wrong mode.\n", - pipe); - return err; - } - -#ifndef ISP2401 - port = (unsigned int) pipe->stream->config.source.port.port; - assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS) { -#else - if (!ia_css_mipi_is_source_port_valid(pipe, &port)) { -#endif - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, -#ifndef ISP2401 - "free_mipi_frames(%p, %d) exit: error: pipe port is not correct.\n", -#else - "free_mipi_frames(%p) exit: error: pipe port is not correct (port=%d).\n", -#endif - pipe, port); - return err; - } -#ifdef ISP2401 - -#endif - if (ref_count_mipi_allocation[port] > 0) { -#if defined(USE_INPUT_SYSTEM_VERSION_2) - assert(ref_count_mipi_allocation[port] == 1); - if (ref_count_mipi_allocation[port] != 1) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(%p) exit: error: wrong ref_count (ref_count=%d).\n", - pipe, ref_count_mipi_allocation[port]); - return err; - } -#endif - - ref_count_mipi_allocation[port]--; - - if (ref_count_mipi_allocation[port] == 0) { - /* no streams are using this buffer, so free it */ - unsigned int i; - for (i = 0; i < my_css.num_mipi_frames[port]; i++) { - if (my_css.mipi_frames[port][i] != NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(port=%d, num=%d).\n", port, i); - ia_css_frame_free(my_css.mipi_frames[port][i]); - my_css.mipi_frames[port][i] = NULL; - } - if (my_css.mipi_metadata[port][i] != NULL) { - ia_css_metadata_free(my_css.mipi_metadata[port][i]); - my_css.mipi_metadata[port][i] = NULL; - } - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(%p) exit (deallocated).\n", pipe); - } -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - else { - /* 2401 system allows multiple streams to use same physical port. This is not - * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution. - * TODO AM: Once that is changed (removed) this code should be removed as well. - * In that case only 2400 related code should remain. - */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(%p) leave: nothing to do, other streams still use this port (port=%d).\n", - pipe, port); - } -#endif - } - } else { /* pipe ==NULL */ - /* AM TEMP: free-ing all mipi buffers just like a legacy code. */ - for (port = CSI_PORT0_ID; port < N_CSI_PORTS; port++) { - unsigned int i; - for (i = 0; i < my_css.num_mipi_frames[port]; i++) { - if (my_css.mipi_frames[port][i] != NULL) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "free_mipi_frames(port=%d, num=%d).\n", port, i); - ia_css_frame_free(my_css.mipi_frames[port][i]); - my_css.mipi_frames[port][i] = NULL; - } - if (my_css.mipi_metadata[port][i] != NULL) { - ia_css_metadata_free(my_css.mipi_metadata[port][i]); - my_css.mipi_metadata[port][i] = NULL; - } - } - ref_count_mipi_allocation[port] = 0; - } - } -#else - (void)pipe; -#endif - return IA_CSS_SUCCESS; -} - -enum ia_css_err -send_mipi_frames(struct ia_css_pipe *pipe) -{ -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR; - unsigned int i; -#ifndef ISP2401 - unsigned int port; -#else - unsigned int port = 0; -#endif - - IA_CSS_ENTER_PRIVATE("pipe=%p", pipe); - - assert(pipe != NULL); - assert(pipe->stream != NULL); - if (pipe == NULL || pipe->stream == NULL) { - IA_CSS_ERROR("pipe or stream is null"); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - /* multi stream video needs mipi buffers */ - /* nothing to be done in other cases. */ -#ifndef ISP2401 - if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { -#else - if (!(pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR || - pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG || - pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)) { -#endif - IA_CSS_LOG("nothing to be done for this mode"); - return IA_CSS_SUCCESS; - /* TODO: AM: maybe this should be returning an error. */ - } - -#ifndef ISP2401 - port = (unsigned int) pipe->stream->config.source.port.port; - assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS) { - IA_CSS_ERROR("invalid port specified (%d)", port); -#else - if (!ia_css_mipi_is_source_port_valid(pipe, &port)) { - IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).\n", pipe, port); -#endif - return err; - } - - /* Hand-over the SP-internal mipi buffers */ - for (i = 0; i < my_css.num_mipi_frames[port]; i++) { - /* Need to include the ofset for port. */ - sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i, - my_css.mipi_frames[port][i]); - sh_css_update_host2sp_mipi_metadata(port * NUM_MIPI_FRAMES_PER_STREAM + i, - my_css.mipi_metadata[port][i]); - } - sh_css_update_host2sp_num_mipi_frames(my_css.num_mipi_frames[port]); - - /********************************** - * Send an event to inform the SP - * that all MIPI frames are passed. - **********************************/ - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - IA_CSS_ERROR("sp is not running"); - return err; - } - - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY, - (uint8_t)port, - (uint8_t)my_css.num_mipi_frames[port], - 0 /* not used */); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); -#else - (void)pipe; -#endif - return IA_CSS_SUCCESS; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h deleted file mode 100644 index 990f678422fd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SH_CSS_MIPI_H -#define __SH_CSS_MIPI_H - -#include /* ia_css_err */ -#include /* ia_css_pipe */ -#include /* ia_css_stream_config */ - -void -mipi_init(void); - -enum ia_css_err -allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info); - -enum ia_css_err -free_mipi_frames(struct ia_css_pipe *pipe); - -enum ia_css_err -send_mipi_frames(struct ia_css_pipe *pipe); - -/** - * @brief Calculate the required MIPI buffer sizes. - * Based on the stream configuration, calculate the - * required MIPI buffer sizes (in DDR words). - * - * @param[in] stream_cfg Point to the target stream configuration - * @param[out] size_mem_words MIPI buffer size in DDR words. - * - * @return - */ -enum ia_css_err -calculate_mipi_buff_size( - struct ia_css_stream_config *stream_cfg, - unsigned int *size_mem_words); - -#endif /* __SH_CSS_MIPI_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c deleted file mode 100644 index 237e38b2f0c1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_mmu.h" -#include "ia_css_mmu_private.h" -#include -#include "sh_css_sp.h" -#include "sh_css_firmware.h" -#include "sp.h" -#include "mmu_device.h" - -void -ia_css_mmu_invalidate_cache(void) -{ - const struct ia_css_fw_info *fw = &sh_css_sp_fw; - unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_mmu_invalidate_cache() enter\n"); - - /* if the SP is not running we should not access its dmem */ - if (sh_css_sp_is_running()) { - HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb; - - (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; /* Suppres warnings in CRUN */ - - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb), - true); - } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_mmu_invalidate_cache() leave\n"); -} - -/* Deprecated, this is an HRT backend function (memory_access.h) */ -void -sh_css_mmu_set_page_table_base_index(hrt_data base_index) -{ - int i; - IA_CSS_ENTER_PRIVATE("base_index=0x%08x\n", base_index); - for (i = 0; i < N_MMU_ID; i++) { - mmu_ID_t mmu_id = i; - mmu_set_page_table_base_index(mmu_id, base_index); - mmu_invalidate_cache(mmu_id); - } - IA_CSS_LEAVE_PRIVATE(""); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c deleted file mode 100644 index 1f4fa25b1e79..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* This file will contain the code to implement the functions declared in ia_css_morph.h - and associated helper functions */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c deleted file mode 100644 index 57dd5e7988c9..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "sh_css_param_dvs.h" -#include -#include -#include -#include -#include "ia_css_debug.h" -#include "memory_access.h" - -static struct ia_css_dvs_6axis_config * -alloc_dvs_6axis_table(const struct ia_css_resolution *frame_res, struct ia_css_dvs_6axis_config *dvs_config_src) -{ - unsigned int width_y = 0; - unsigned int height_y = 0; - unsigned int width_uv = 0; - unsigned int height_uv = 0; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_dvs_6axis_config *dvs_config = NULL; - - dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_malloc(sizeof(struct ia_css_dvs_6axis_config)); - if (dvs_config == NULL) { - IA_CSS_ERROR("out of memory"); - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - else - { /*Initialize new struct with latest config settings*/ - if (NULL != dvs_config_src) { - dvs_config->width_y = width_y = dvs_config_src->width_y; - dvs_config->height_y = height_y = dvs_config_src->height_y; - dvs_config->width_uv = width_uv = dvs_config_src->width_uv; - dvs_config->height_uv = height_uv = dvs_config_src->height_uv; - IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y); - } - else if (NULL != frame_res) { - dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width); - dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA(frame_res->height); - dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA(frame_res->width / 2); /* UV = Y/2, depens on colour format YUV 4.2.0*/ - dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(frame_res->height / 2);/* UV = Y/2, depens on colour format YUV 4.2.0*/ - IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y); - } - - /* Generate Y buffers */ - dvs_config->xcoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t)); - if (dvs_config->xcoords_y == NULL) { - IA_CSS_ERROR("out of memory"); - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto exit; - } - - dvs_config->ycoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t)); - if (dvs_config->ycoords_y == NULL) { - IA_CSS_ERROR("out of memory"); - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto exit; - } - - /* Generate UV buffers */ - IA_CSS_LOG("UV W %d H %d", width_uv, height_uv); - - dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t)); - if (dvs_config->xcoords_uv == NULL) { - IA_CSS_ERROR("out of memory"); - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - goto exit; - } - - dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t)); - if (dvs_config->ycoords_uv == NULL) { - IA_CSS_ERROR("out of memory"); - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } -exit: - if (err != IA_CSS_SUCCESS) { - free_dvs_6axis_table(&dvs_config); /* we might have allocated some memory, release this */ - dvs_config = NULL; - } - } - - IA_CSS_LEAVE("dvs_config=%p", dvs_config); - return dvs_config; -} - -static void -init_dvs_6axis_table_from_default(struct ia_css_dvs_6axis_config *dvs_config, const struct ia_css_resolution *dvs_offset) -{ - unsigned int x, y; - unsigned int width_y = dvs_config->width_y; - unsigned int height_y = dvs_config->height_y; - unsigned int width_uv = dvs_config->width_uv; - unsigned int height_uv = dvs_config->height_uv; - - IA_CSS_LOG("Env_X=%d, Env_Y=%d, width_y=%d, height_y=%d", - dvs_offset->width, dvs_offset->height, width_y, height_y); - for (y = 0; y < height_y; y++) { - for (x = 0; x < width_y; x++) { - dvs_config->xcoords_y[y*width_y + x] = (dvs_offset->width + x*DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS; - } - } - - for (y = 0; y < height_y; y++) { - for (x = 0; x < width_y; x++) { - dvs_config->ycoords_y[y*width_y + x] = (dvs_offset->height + y*DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS; - } - } - - for (y = 0; y < height_uv; y++) { - for (x = 0; x < width_uv; x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */ - dvs_config->xcoords_uv[y*width_uv + x] = ((dvs_offset->width / 2) + x*DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS; - } - } - - for (y = 0; y < height_uv; y++) { - for (x = 0; x < width_uv; x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */ - dvs_config->ycoords_uv[y*width_uv + x] = ((dvs_offset->height / 2) + y*DVS_BLOCKDIM_Y_CHROMA) << DVS_COORD_FRAC_BITS; - } - } - -} - -static void -init_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config, struct ia_css_dvs_6axis_config *dvs_config_src) -{ - unsigned int width_y = dvs_config->width_y; - unsigned int height_y = dvs_config->height_y; - unsigned int width_uv = dvs_config->width_uv; - unsigned int height_uv = dvs_config->height_uv; - - memcpy(dvs_config->xcoords_y, dvs_config_src->xcoords_y, (width_y * height_y * sizeof(uint32_t))); - memcpy(dvs_config->ycoords_y, dvs_config_src->ycoords_y, (width_y * height_y * sizeof(uint32_t))); - memcpy(dvs_config->xcoords_uv, dvs_config_src->xcoords_uv, (width_uv * height_uv * sizeof(uint32_t))); - memcpy(dvs_config->ycoords_uv, dvs_config_src->ycoords_uv, (width_uv * height_uv * sizeof(uint32_t))); -} - -struct ia_css_dvs_6axis_config * -generate_dvs_6axis_table(const struct ia_css_resolution *frame_res, const struct ia_css_resolution *dvs_offset) -{ - struct ia_css_dvs_6axis_config *dvs_6axis_table; - - assert(frame_res != NULL); - assert(dvs_offset != NULL); - - dvs_6axis_table = alloc_dvs_6axis_table(frame_res, NULL); - if (dvs_6axis_table) { - init_dvs_6axis_table_from_default(dvs_6axis_table, dvs_offset); - return dvs_6axis_table; - } - return NULL; -} - -struct ia_css_dvs_6axis_config * -generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config_src) -{ - struct ia_css_dvs_6axis_config *dvs_6axis_table; - - assert(NULL != dvs_config_src); - - dvs_6axis_table = alloc_dvs_6axis_table(NULL, dvs_config_src); - if (dvs_6axis_table) { - init_dvs_6axis_table_from_config(dvs_6axis_table, dvs_config_src); - return dvs_6axis_table; - } - return NULL; -} - -void -free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config) -{ - assert(dvs_6axis_config != NULL); - assert(*dvs_6axis_config != NULL); - - if ((dvs_6axis_config != NULL) && (*dvs_6axis_config != NULL)) - { - IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config)); - if ((*dvs_6axis_config)->xcoords_y != NULL) - { - sh_css_free((*dvs_6axis_config)->xcoords_y); - (*dvs_6axis_config)->xcoords_y = NULL; - } - - if ((*dvs_6axis_config)->ycoords_y != NULL) - { - sh_css_free((*dvs_6axis_config)->ycoords_y); - (*dvs_6axis_config)->ycoords_y = NULL; - } - - /* Free up UV buffers */ - if ((*dvs_6axis_config)->xcoords_uv != NULL) - { - sh_css_free((*dvs_6axis_config)->xcoords_uv); - (*dvs_6axis_config)->xcoords_uv = NULL; - } - - if ((*dvs_6axis_config)->ycoords_uv != NULL) - { - sh_css_free((*dvs_6axis_config)->ycoords_uv); - (*dvs_6axis_config)->ycoords_uv = NULL; - } - - IA_CSS_LEAVE_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config)); - sh_css_free(*dvs_6axis_config); - *dvs_6axis_config = NULL; - } -} - -void copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst, - const struct ia_css_dvs_6axis_config *dvs_config_src) -{ - unsigned int width_y; - unsigned int height_y; - unsigned int width_uv; - unsigned int height_uv; - - assert(dvs_config_src != NULL); - assert(dvs_config_dst != NULL); - assert(dvs_config_src->xcoords_y != NULL); - assert(dvs_config_src->xcoords_uv != NULL); - assert(dvs_config_src->ycoords_y != NULL); - assert(dvs_config_src->ycoords_uv != NULL); - assert(dvs_config_src->width_y == dvs_config_dst->width_y); - assert(dvs_config_src->width_uv == dvs_config_dst->width_uv); - assert(dvs_config_src->height_y == dvs_config_dst->height_y); - assert(dvs_config_src->height_uv == dvs_config_dst->height_uv); - - width_y = dvs_config_src->width_y; - height_y = dvs_config_src->height_y; - width_uv = dvs_config_src->width_uv; /* = Y/2, depens on colour format YUV 4.2.0*/ - height_uv = dvs_config_src->height_uv; - - memcpy(dvs_config_dst->xcoords_y, dvs_config_src->xcoords_y, (width_y * height_y * sizeof(uint32_t))); - memcpy(dvs_config_dst->ycoords_y, dvs_config_src->ycoords_y, (width_y * height_y * sizeof(uint32_t))); - - memcpy(dvs_config_dst->xcoords_uv, dvs_config_src->xcoords_uv, (width_uv * height_uv * sizeof(uint32_t))); - memcpy(dvs_config_dst->ycoords_uv, dvs_config_src->ycoords_uv, (width_uv * height_uv * sizeof(uint32_t))); - -} - -void -ia_css_dvs_statistics_get(enum dvs_statistics_type type, - union ia_css_dvs_statistics_host *host_stats, - const union ia_css_dvs_statistics_isp *isp_stats) -{ - - if (DVS_STATISTICS == type) - { - ia_css_get_dvs_statistics(host_stats->p_dvs_statistics_host, - isp_stats->p_dvs_statistics_isp); - } else if (DVS2_STATISTICS == type) - { - ia_css_get_dvs2_statistics(host_stats->p_dvs2_statistics_host, - isp_stats->p_dvs_statistics_isp); - } - return; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h deleted file mode 100644 index 79b563dc78ee..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_PARAMS_DVS_H_ -#define _SH_CSS_PARAMS_DVS_H_ - -#include -#include -#ifdef ISP2401 -#include -#endif -#include "gdc_global.h" /* gdc_warp_param_mem_t */ - -#define DVS_ENV_MIN_X (12) -#define DVS_ENV_MIN_Y (12) - -#define DVS_BLOCKDIM_X (64) /* X block height*/ -#define DVS_BLOCKDIM_Y_LUMA (64) /* Y block height*/ -#define DVS_BLOCKDIM_Y_CHROMA (32) /* UV height block size is half the Y block height*/ - -#ifndef ISP2401 -/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */ -#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2)) - -/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */ -#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA)) -#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X)) -#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA)) - - -#endif -#define DVS_TABLE_IN_BLOCKDIM_X_LUMA(X) (DVS_NUM_BLOCKS_X(X) + 1) /* N blocks have N + 1 set of coords */ -#define DVS_TABLE_IN_BLOCKDIM_X_CHROMA(X) (DVS_NUM_BLOCKS_X_CHROMA(X) + 1) -#define DVS_TABLE_IN_BLOCKDIM_Y_LUMA(X) (DVS_NUM_BLOCKS_Y(X) + 1) -#define DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(X) (DVS_NUM_BLOCKS_Y_CHROMA(X) + 1) - -#define DVS_ENVELOPE_X(X) (((X) == 0) ? (DVS_ENV_MIN_X) : (X)) -#define DVS_ENVELOPE_Y(X) (((X) == 0) ? (DVS_ENV_MIN_Y) : (X)) - -#define DVS_COORD_FRAC_BITS (10) -#ifndef ISP2401 -#define DVS_INPUT_BYTES_PER_PIXEL (1) -#endif -#define XMEM_ALIGN_LOG2 (5) - -#define DVS_6AXIS_COORDS_ELEMS CEIL_MUL(sizeof(gdc_warp_param_mem_t) \ - , HIVE_ISP_DDR_WORD_BYTES) - -/* currently we only support two output with the same resolution, output 0 is th default one. */ -#define DVS_6AXIS_BYTES(binary) \ - (DVS_6AXIS_COORDS_ELEMS \ - * DVS_NUM_BLOCKS_X((binary)->out_frame_info[0].res.width) \ - * DVS_NUM_BLOCKS_Y((binary)->out_frame_info[0].res.height)) - -#ifndef ISP2401 -/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently. - * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */ -#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE - -#endif -struct ia_css_dvs_6axis_config * -generate_dvs_6axis_table(const struct ia_css_resolution *frame_res, const struct ia_css_resolution *dvs_offset); - -struct ia_css_dvs_6axis_config * -generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config_src); - -void -free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config); - -void -copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst, - const struct ia_css_dvs_6axis_config *dvs_config_src); - - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c deleted file mode 100644 index e6ebd1b08f0d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include - -#include -#include "sh_css_param_shading.h" -#include "ia_css_shading.h" -#include "assert_support.h" -#include "sh_css_defs.h" -#include "sh_css_internal.h" -#include "ia_css_debug.h" -#include "ia_css_pipe_binarydesc.h" - -#include "sh_css_hrt.h" - -#include "platform_support.h" - -/* Bilinear interpolation on shading tables: - * For each target point T, we calculate the 4 surrounding source points: - * ul (upper left), ur (upper right), ll (lower left) and lr (lower right). - * We then calculate the distances from the T to the source points: x0, x1, - * y0 and y1. - * We then calculate the value of T: - * dx0*dy0*Slr + dx0*dy1*Sur + dx1*dy0*Sll + dx1*dy1*Sul. - * We choose a grid size of 1x1 which means: - * dx1 = 1-dx0 - * dy1 = 1-dy0 - * - * Sul dx0 dx1 Sur - * .<----->|<------------->. - * ^ - * dy0| - * v T - * - . - * ^ - * | - * dy1| - * v - * . . - * Sll Slr - * - * Padding: - * The area that the ISP operates on can include padding both on the left - * and the right. We need to padd the shading table such that the shading - * values end up on the correct pixel values. This means we must padd the - * shading table to match the ISP padding. - * We can have 5 cases: - * 1. All 4 points fall in the left padding. - * 2. The left 2 points fall in the left padding. - * 3. All 4 points fall in the cropped (target) region. - * 4. The right 2 points fall in the right padding. - * 5. All 4 points fall in the right padding. - * Cases 1 and 5 are easy to handle: we simply use the - * value 1 in the shading table. - * Cases 2 and 4 require interpolation that takes into - * account how far into the padding area the pixels - * fall. We extrapolate the shading table into the - * padded area and then interpolate. - */ -static void -crop_and_interpolate(unsigned int cropped_width, - unsigned int cropped_height, - unsigned int left_padding, - int right_padding, - int top_padding, - const struct ia_css_shading_table *in_table, - struct ia_css_shading_table *out_table, - enum ia_css_sc_color color) -{ - unsigned int i, j, - sensor_width, - sensor_height, - table_width, - table_height, - table_cell_h, - out_cell_size, - in_cell_size, - out_start_row, - padded_width; - int out_start_col, /* can be negative to indicate padded space */ - table_cell_w; - unsigned short *in_ptr, - *out_ptr; - - assert(in_table != NULL); - assert(out_table != NULL); - - sensor_width = in_table->sensor_width; - sensor_height = in_table->sensor_height; - table_width = in_table->width; - table_height = in_table->height; - in_ptr = in_table->data[color]; - out_ptr = out_table->data[color]; - - padded_width = cropped_width + left_padding + right_padding; - out_cell_size = CEIL_DIV(padded_width, out_table->width - 1); - in_cell_size = CEIL_DIV(sensor_width, table_width - 1); - - out_start_col = ((int)sensor_width - (int)cropped_width)/2 - left_padding; - out_start_row = ((int)sensor_height - (int)cropped_height)/2 - top_padding; - table_cell_w = (int)((table_width-1) * in_cell_size); - table_cell_h = (table_height-1) * in_cell_size; - - for (i = 0; i < out_table->height; i++) { - int ty, src_y0, src_y1; - unsigned int sy0, sy1, dy0, dy1, divy; - - /* calculate target point and make sure it falls within - the table */ - ty = out_start_row + i * out_cell_size; - - /* calculate closest source points in shading table and - make sure they fall within the table */ - src_y0 = ty / (int)in_cell_size; - if (in_cell_size < out_cell_size) - src_y1 = (ty + out_cell_size) / in_cell_size; - else - src_y1 = src_y0 + 1; - src_y0 = clamp(src_y0, 0, (int)table_height-1); - src_y1 = clamp(src_y1, 0, (int)table_height-1); - ty = min(clamp(ty, 0, (int)sensor_height-1), - (int)table_cell_h); - - /* calculate closest source points for distance computation */ - sy0 = min(src_y0 * in_cell_size, sensor_height-1); - sy1 = min(src_y1 * in_cell_size, sensor_height-1); - /* calculate distance between source and target pixels */ - dy0 = ty - sy0; - dy1 = sy1 - ty; - divy = sy1 - sy0; - if (divy == 0) { - dy0 = 1; - divy = 1; - } - - for (j = 0; j < out_table->width; j++, out_ptr++) { - int tx, src_x0, src_x1; - unsigned int sx0, sx1, dx0, dx1, divx; - unsigned short s_ul, s_ur, s_ll, s_lr; - - /* calculate target point */ - tx = out_start_col + j * out_cell_size; - /* calculate closest source points. */ - src_x0 = tx / (int)in_cell_size; - if (in_cell_size < out_cell_size) { - src_x1 = (tx + out_cell_size) / - (int)in_cell_size; - } else { - src_x1 = src_x0 + 1; - } - /* if src points fall in padding, select closest ones.*/ - src_x0 = clamp(src_x0, 0, (int)table_width-1); - src_x1 = clamp(src_x1, 0, (int)table_width-1); - tx = min(clamp(tx, 0, (int)sensor_width-1), - (int)table_cell_w); - /* calculate closest source points for distance - computation */ - sx0 = min(src_x0 * in_cell_size, sensor_width-1); - sx1 = min(src_x1 * in_cell_size, sensor_width-1); - /* calculate distances between source and target - pixels */ - dx0 = tx - sx0; - dx1 = sx1 - tx; - divx = sx1 - sx0; - /* if we're at the edge, we just use the closest - point still in the grid. We make up for the divider - in this case by setting the distance to - out_cell_size, since it's actually 0. */ - if (divx == 0) { - dx0 = 1; - divx = 1; - } - - /* get source pixel values */ - s_ul = in_ptr[(table_width*src_y0)+src_x0]; - s_ur = in_ptr[(table_width*src_y0)+src_x1]; - s_ll = in_ptr[(table_width*src_y1)+src_x0]; - s_lr = in_ptr[(table_width*src_y1)+src_x1]; - - *out_ptr = (unsigned short) ((dx0*dy0*s_lr + dx0*dy1*s_ur + dx1*dy0*s_ll + dx1*dy1*s_ul) / - (divx*divy)); - } - } -} - -void -sh_css_params_shading_id_table_generate( - struct ia_css_shading_table **target_table, -#ifndef ISP2401 - const struct ia_css_binary *binary) -#else - unsigned int table_width, - unsigned int table_height) -#endif -{ - /* initialize table with ones, shift becomes zero */ -#ifndef ISP2401 - unsigned int i, j, table_width, table_height; -#else - unsigned int i, j; -#endif - struct ia_css_shading_table *result; - - assert(target_table != NULL); -#ifndef ISP2401 - assert(binary != NULL); -#endif - -#ifndef ISP2401 - table_width = binary->sctbl_width_per_color; - table_height = binary->sctbl_height; -#endif - result = ia_css_shading_table_alloc(table_width, table_height); - if (result == NULL) { - *target_table = NULL; - return; - } - - for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) { - for (j = 0; j < table_height * table_width; j++) - result->data[i][j] = 1; - } - result->fraction_bits = 0; - *target_table = result; -} - -void -prepare_shading_table(const struct ia_css_shading_table *in_table, - unsigned int sensor_binning, - struct ia_css_shading_table **target_table, - const struct ia_css_binary *binary, - unsigned int bds_factor) -{ - unsigned int input_width, - input_height, - table_width, - table_height, - left_padding, - top_padding, - padded_width, - left_cropping, - i; - unsigned int bds_numerator, bds_denominator; - int right_padding; - - struct ia_css_shading_table *result; - - assert(target_table != NULL); - assert(binary != NULL); - - if (!in_table) { -#ifndef ISP2401 - sh_css_params_shading_id_table_generate(target_table, binary); -#else - sh_css_params_shading_id_table_generate(target_table, - binary->sctbl_legacy_width_per_color, binary->sctbl_legacy_height); -#endif - return; - } - - padded_width = binary->in_frame_info.padded_width; - /* We use the ISP input resolution for the shading table because - shading correction is performed in the bayer domain (before bayer - down scaling). */ -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - padded_width = CEIL_MUL(binary->effective_in_frame_res.width + 2*ISP_VEC_NELEMS, - 2*ISP_VEC_NELEMS); -#endif - input_height = binary->in_frame_info.res.height; - input_width = binary->in_frame_info.res.width; - left_padding = binary->left_padding; - left_cropping = (binary->info->sp.pipeline.left_cropping == 0) ? - binary->dvs_envelope.width : 2*ISP_VEC_NELEMS; - - sh_css_bds_factor_get_numerator_denominator - (bds_factor, &bds_numerator, &bds_denominator); - - left_padding = (left_padding + binary->info->sp.pipeline.left_cropping) * bds_numerator / bds_denominator - binary->info->sp.pipeline.left_cropping; - right_padding = (binary->internal_frame_info.res.width - binary->effective_in_frame_res.width * bds_denominator / bds_numerator - left_cropping) * bds_numerator / bds_denominator; - top_padding = binary->info->sp.pipeline.top_cropping * bds_numerator / bds_denominator - binary->info->sp.pipeline.top_cropping; - -#if !defined(USE_WINDOWS_BINNING_FACTOR) - /* @deprecated{This part of the code will be replaced by the code - * in the #else section below to make the calculation same across - * all platforms. - * Android and Windows platforms interpret the binning_factor parameter - * differently. In Android, the binning factor is expressed in the form - * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N} - */ - - /* We take into account the binning done by the sensor. We do this - by cropping the non-binned part of the shading table and then - increasing the size of a grid cell with this same binning factor. */ - input_width <<= sensor_binning; - input_height <<= sensor_binning; - /* We also scale the padding by the same binning factor. This will - make it much easier later on to calculate the padding of the - shading table. */ - left_padding <<= sensor_binning; - right_padding <<= sensor_binning; - top_padding <<= sensor_binning; -#else - input_width *= sensor_binning; - input_height *= sensor_binning; - left_padding *= sensor_binning; - right_padding *= sensor_binning; - top_padding *= sensor_binning; -#endif /*USE_WINDOWS_BINNING_FACTOR*/ - - /* during simulation, the used resolution can exceed the sensor - resolution, so we clip it. */ - input_width = min(input_width, in_table->sensor_width); - input_height = min(input_height, in_table->sensor_height); - -#ifndef ISP2401 - table_width = binary->sctbl_width_per_color; - table_height = binary->sctbl_height; -#else - /* This prepare_shading_table() function is called only in legacy API (not in new API). - Then, the legacy shading table width and height should be used. */ - table_width = binary->sctbl_legacy_width_per_color; - table_height = binary->sctbl_legacy_height; -#endif - - result = ia_css_shading_table_alloc(table_width, table_height); - if (result == NULL) { - *target_table = NULL; - return; - } - result->sensor_width = in_table->sensor_width; - result->sensor_height = in_table->sensor_height; - result->fraction_bits = in_table->fraction_bits; - - /* now we crop the original shading table and then interpolate to the - requested resolution and decimation factor. */ - for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) { - crop_and_interpolate(input_width, input_height, - left_padding, right_padding, top_padding, - in_table, - result, i); - } - *target_table = result; -} - -struct ia_css_shading_table * -ia_css_shading_table_alloc( - unsigned int width, - unsigned int height) -{ - unsigned int i; - struct ia_css_shading_table *me; - - IA_CSS_ENTER(""); - - me = kmalloc(sizeof(*me), GFP_KERNEL); - if (!me) - return me; - - me->width = width; - me->height = height; - me->sensor_width = 0; - me->sensor_height = 0; - me->fraction_bits = 0; - for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) { - me->data[i] = - sh_css_malloc(width * height * sizeof(*me->data[0])); - if (me->data[i] == NULL) { - unsigned int j; - for (j = 0; j < i; j++) { - sh_css_free(me->data[j]); - me->data[j] = NULL; - } - kfree(me); - return NULL; - } - } - - IA_CSS_LEAVE(""); - return me; -} - -void -ia_css_shading_table_free(struct ia_css_shading_table *table) -{ - unsigned int i; - - if (table == NULL) - return; - - /* We only output logging when the table is not NULL, otherwise - * logs will give the impression that a table was freed. - * */ - IA_CSS_ENTER(""); - - for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) { - if (table->data[i]) { - sh_css_free(table->data[i]); - table->data[i] = NULL; - } - } - kfree(table); - - IA_CSS_LEAVE(""); -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h deleted file mode 100644 index e87863b7c8cc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SH_CSS_PARAMS_SHADING_H -#define __SH_CSS_PARAMS_SHADING_H - -#include -#include - -void -sh_css_params_shading_id_table_generate( - struct ia_css_shading_table **target_table, -#ifndef ISP2401 - const struct ia_css_binary *binary); -#else - unsigned int table_width, - unsigned int table_height); -#endif - -void -prepare_shading_table(const struct ia_css_shading_table *in_table, - unsigned int sensor_binning, - struct ia_css_shading_table **target_table, - const struct ia_css_binary *binary, - unsigned int bds_factor); - -#endif /* __SH_CSS_PARAMS_SHADING_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c deleted file mode 100644 index 43529b1605c3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c +++ /dev/null @@ -1,5253 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "gdc_device.h" /* gdc_lut_store(), ... */ -#include "isp.h" /* ISP_VEC_ELEMBITS */ -#include "vamem.h" -#if !defined(HAS_NO_HMEM) -#ifndef __INLINE_HMEM__ -#define __INLINE_HMEM__ -#endif -#include "hmem.h" -#endif /* !defined(HAS_NO_HMEM) */ -#define IA_CSS_INCLUDE_PARAMETERS -#define IA_CSS_INCLUDE_ACC_PARAMETERS - -#include "sh_css_params.h" -#include "ia_css_queue.h" -#include "sw_event_global.h" /* Event IDs */ - -#include "platform_support.h" -#include "assert_support.h" -#include "misc_support.h" /* NOT_USED */ -#include "math_support.h" /* max(), min() EVEN_FLOOR()*/ - -#include "ia_css_stream.h" -#include "sh_css_params_internal.h" -#include "sh_css_param_shading.h" -#include "sh_css_param_dvs.h" -#include "ia_css_refcount.h" -#include "sh_css_internal.h" -#include "ia_css_control.h" -#include "ia_css_shading.h" -#include "sh_css_defs.h" -#include "sh_css_sp.h" -#include "ia_css_pipeline.h" -#include "ia_css_debug.h" -#include "memory_access.h" -#if 0 /* FIXME */ -#include "memory_realloc.h" -#endif -#include "ia_css_isp_param.h" -#include "ia_css_isp_params.h" -#include "ia_css_mipi.h" -#include "ia_css_morph.h" -#include "ia_css_host_data.h" -#include "ia_css_pipe.h" -#include "ia_css_pipe_binarydesc.h" -#if 0 -#include "ia_css_system_ctrl.h" -#endif - -/* Include all kernel host interfaces for ISP1 */ - -#include "anr/anr_1.0/ia_css_anr.host.h" -#include "cnr/cnr_1.0/ia_css_cnr.host.h" -#include "csc/csc_1.0/ia_css_csc.host.h" -#include "de/de_1.0/ia_css_de.host.h" -#include "dp/dp_1.0/ia_css_dp.host.h" -#include "bnr/bnr_1.0/ia_css_bnr.host.h" -#include "dvs/dvs_1.0/ia_css_dvs.host.h" -#include "fpn/fpn_1.0/ia_css_fpn.host.h" -#include "gc/gc_1.0/ia_css_gc.host.h" -#include "macc/macc_1.0/ia_css_macc.host.h" -#include "ctc/ctc_1.0/ia_css_ctc.host.h" -#include "ob/ob_1.0/ia_css_ob.host.h" -#include "raw/raw_1.0/ia_css_raw.host.h" -#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" -#include "s3a/s3a_1.0/ia_css_s3a.host.h" -#include "sc/sc_1.0/ia_css_sc.host.h" -#include "sdis/sdis_1.0/ia_css_sdis.host.h" -#include "tnr/tnr_1.0/ia_css_tnr.host.h" -#include "uds/uds_1.0/ia_css_uds_param.h" -#include "wb/wb_1.0/ia_css_wb.host.h" -#include "ynr/ynr_1.0/ia_css_ynr.host.h" -#include "xnr/xnr_1.0/ia_css_xnr.host.h" - -/* Include additional kernel host interfaces for ISP2 */ - -#include "aa/aa_2/ia_css_aa2.host.h" -#include "anr/anr_2/ia_css_anr2.host.h" -#include "bh/bh_2/ia_css_bh.host.h" -#include "cnr/cnr_2/ia_css_cnr2.host.h" -#include "ctc/ctc1_5/ia_css_ctc1_5.host.h" -#include "de/de_2/ia_css_de2.host.h" -#include "gc/gc_2/ia_css_gc2.host.h" -#include "sdis/sdis_2/ia_css_sdis2.host.h" -#include "ynr/ynr_2/ia_css_ynr2.host.h" -#include "fc/fc_1.0/ia_css_formats.host.h" - -#include "xnr/xnr_3.0/ia_css_xnr3.host.h" - -#if defined(HAS_OUTPUT_SYSTEM) -#include -#endif - -#include "sh_css_frac.h" -#include "ia_css_bufq.h" - -#define FPNTBL_BYTES(binary) \ - (sizeof(char) * (binary)->in_frame_info.res.height * \ - (binary)->in_frame_info.padded_width) - -#ifndef ISP2401 - -#define SCTBL_BYTES(binary) \ - (sizeof(unsigned short) * (binary)->sctbl_height * \ - (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS) - -#else - -#define SCTBL_BYTES(binary) \ - (sizeof(unsigned short) * max((binary)->sctbl_height, (binary)->sctbl_legacy_height) * \ - /* height should be the larger height between new api and legacy api */ \ - (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS) - -#endif - -#define MORPH_PLANE_BYTES(binary) \ - (SH_CSS_MORPH_TABLE_ELEM_BYTES * (binary)->morph_tbl_aligned_width * \ - (binary)->morph_tbl_height) - -/* We keep a second copy of the ptr struct for the SP to access. - Again, this would not be necessary on the chip. */ -static hrt_vaddress sp_ddr_ptrs; - -/* sp group address on DDR */ -static hrt_vaddress xmem_sp_group_ptrs; - -static hrt_vaddress xmem_sp_stage_ptrs[IA_CSS_PIPE_ID_NUM] - [SH_CSS_MAX_STAGES]; -static hrt_vaddress xmem_isp_stage_ptrs[IA_CSS_PIPE_ID_NUM] - [SH_CSS_MAX_STAGES]; - -static hrt_vaddress default_gdc_lut; -static int interleaved_lut_temp[4][HRT_GDC_N]; - -/* END DO NOT MOVE INTO VIMALS_WORLD */ - -/* Digital Zoom lookup table. See documentation for more details about the - * contents of this table. - */ -#if defined(HAS_GDC_VERSION_2) -#if defined(CONFIG_CSI2_PLUS) -/* - * Coefficients from - * Css_Mizuchi/regressions/20140424_0930/all/applications/common/gdc_v2_common/lut.h - */ - -static const int zoom_table[4][HRT_GDC_N] = { - { 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, -1, - -1, -1, -1, -1, -1, -1, -1, -1, - -1, -2, -2, -2, -2, -2, -2, -2, - -3, -3, -3, -3, -3, -3, -3, -4, - -4, -4, -4, -4, -5, -5, -5, -5, - -5, -5, -6, -6, -6, -6, -7, -7, - -7, -7, -7, -8, -8, -8, -8, -9, - -9, -9, -9, -10, -10, -10, -10, -11, - -11, -11, -12, -12, -12, -12, -13, -13, - -13, -14, -14, -14, -15, -15, -15, -15, - -16, -16, -16, -17, -17, -17, -18, -18, - -18, -19, -19, -20, -20, -20, -21, -21, - -21, -22, -22, -22, -23, -23, -24, -24, - -24, -25, -25, -25, -26, -26, -27, -27, - -28, -28, -28, -29, -29, -30, -30, -30, - -31, -31, -32, -32, -33, -33, -33, -34, - -34, -35, -35, -36, -36, -37, -37, -37, - -38, -38, -39, -39, -40, -40, -41, -41, - -42, -42, -43, -43, -44, -44, -45, -45, - -46, -46, -47, -47, -48, -48, -49, -49, - -50, -50, -51, -51, -52, -52, -53, -53, - -54, -54, -55, -55, -56, -56, -57, -57, - -58, -59, -59, -60, -60, -61, -61, -62, - -62, -63, -63, -64, -65, -65, -66, -66, - -67, -67, -68, -69, -69, -70, -70, -71, - -71, -72, -73, -73, -74, -74, -75, -75, - -76, -77, -77, -78, -78, -79, -80, -80, - -81, -81, -82, -83, -83, -84, -84, -85, - -86, -86, -87, -87, -88, -89, -89, -90, - -91, -91, -92, -92, -93, -94, -94, -95, - -96, -96, -97, -97, -98, -99, -99, -100, - -101, -101, -102, -102, -103, -104, -104, -105, - -106, -106, -107, -108, -108, -109, -109, -110, - -111, -111, -112, -113, -113, -114, -115, -115, - -116, -117, -117, -118, -119, -119, -120, -121, - -121, -122, -122, -123, -124, -124, -125, -126, - -126, -127, -128, -128, -129, -130, -130, -131, - -132, -132, -133, -134, -134, -135, -136, -136, - -137, -138, -138, -139, -140, -140, -141, -142, - -142, -143, -144, -144, -145, -146, -146, -147, - -148, -148, -149, -150, -150, -151, -152, -152, - -153, -154, -154, -155, -156, -156, -157, -158, - -158, -159, -160, -160, -161, -162, -162, -163, - -164, -164, -165, -166, -166, -167, -168, -168, - -169, -170, -170, -171, -172, -172, -173, -174, - -174, -175, -176, -176, -177, -178, -178, -179, - -180, -180, -181, -181, -182, -183, -183, -184, - -185, -185, -186, -187, -187, -188, -189, -189, - -190, -191, -191, -192, -193, -193, -194, -194, - -195, -196, -196, -197, -198, -198, -199, -200, - -200, -201, -201, -202, -203, -203, -204, -205, - -205, -206, -206, -207, -208, -208, -209, -210, - -210, -211, -211, -212, -213, -213, -214, -215, - -215, -216, -216, -217, -218, -218, -219, -219, - -220, -221, -221, -222, -222, -223, -224, -224, - -225, -225, -226, -227, -227, -228, -228, -229, - -229, -230, -231, -231, -232, -232, -233, -233, - -234, -235, -235, -236, -236, -237, -237, -238, - -239, -239, -240, -240, -241, -241, -242, -242, - -243, -244, -244, -245, -245, -246, -246, -247, - -247, -248, -248, -249, -249, -250, -250, -251, - -251, -252, -252, -253, -253, -254, -254, -255, - -256, -256, -256, -257, -257, -258, -258, -259, - -259, -260, -260, -261, -261, -262, -262, -263, - -263, -264, -264, -265, -265, -266, -266, -266, - -267, -267, -268, -268, -269, -269, -270, -270, - -270, -271, -271, -272, -272, -273, -273, -273, - -274, -274, -275, -275, -275, -276, -276, -277, - -277, -277, -278, -278, -279, -279, -279, -280, - -280, -280, -281, -281, -282, -282, -282, -283, - -283, -283, -284, -284, -284, -285, -285, -285, - -286, -286, -286, -287, -287, -287, -288, -288, - -288, -289, -289, -289, -289, -290, -290, -290, - -291, -291, -291, -291, -292, -292, -292, -293, - -293, -293, -293, -294, -294, -294, -294, -295, - -295, -295, -295, -295, -296, -296, -296, -296, - -297, -297, -297, -297, -297, -298, -298, -298, - -298, -298, -299, -299, -299, -299, -299, -299, - -300, -300, -300, -300, -300, -300, -300, -301, - -301, -301, -301, -301, -301, -301, -301, -301, - -302, -302, -302, -302, -302, -302, -302, -302, - -302, -302, -302, -302, -302, -303, -303, -303, - -303, -303, -303, -303, -303, -303, -303, -303, - -303, -303, -303, -303, -303, -303, -303, -303, - -303, -303, -303, -303, -303, -303, -303, -303, - -303, -303, -302, -302, -302, -302, -302, -302, - -302, -302, -302, -302, -302, -302, -301, -301, - -301, -301, -301, -301, -301, -301, -300, -300, - -300, -300, -300, -300, -299, -299, -299, -299, - -299, -299, -298, -298, -298, -298, -298, -297, - -297, -297, -297, -296, -296, -296, -296, -295, - -295, -295, -295, -294, -294, -294, -293, -293, - -293, -293, -292, -292, -292, -291, -291, -291, - -290, -290, -290, -289, -289, -289, -288, -288, - -288, -287, -287, -286, -286, -286, -285, -285, - -284, -284, -284, -283, -283, -282, -282, -281, - -281, -280, -280, -279, -279, -279, -278, -278, - -277, -277, -276, -276, -275, -275, -274, -273, - -273, -272, -272, -271, -271, -270, -270, -269, - -268, -268, -267, -267, -266, -266, -265, -264, - -264, -263, -262, -262, -261, -260, -260, -259, - -259, -258, -257, -256, -256, -255, -254, -254, - -253, -252, -252, -251, -250, -249, -249, -248, - -247, -246, -246, -245, -244, -243, -242, -242, - -241, -240, -239, -238, -238, -237, -236, -235, - -234, -233, -233, -232, -231, -230, -229, -228, - -227, -226, -226, -225, -224, -223, -222, -221, - -220, -219, -218, -217, -216, -215, -214, -213, - -212, -211, -210, -209, -208, -207, -206, -205, - -204, -203, -202, -201, -200, -199, -198, -197, - -196, -194, -193, -192, -191, -190, -189, -188, - -187, -185, -184, -183, -182, -181, -180, -178, - -177, -176, -175, -174, -172, -171, -170, -169, - -167, -166, -165, -164, -162, -161, -160, -158, - -157, -156, -155, -153, -152, -151, -149, -148, - -147, -145, -144, -142, -141, -140, -138, -137, - -135, -134, -133, -131, -130, -128, -127, -125, - -124, -122, -121, -120, -118, -117, -115, -114, - -112, -110, -109, -107, -106, -104, -103, -101, - -100, -98, -96, -95, -93, -92, -90, -88, - -87, -85, -83, -82, -80, -78, -77, -75, - -73, -72, -70, -68, -67, -65, -63, -61, - -60, -58, -56, -54, -52, -51, -49, -47, - -45, -43, -42, -40, -38, -36, -34, -32, - -31, -29, -27, -25, -23, -21, -19, -17, - -15, -13, -11, -9, -7, -5, -3, -1 - }, - { 0, 2, 4, 6, 8, 10, 12, 14, - 16, 18, 20, 22, 25, 27, 29, 31, - 33, 36, 38, 40, 43, 45, 47, 50, - 52, 54, 57, 59, 61, 64, 66, 69, - 71, 74, 76, 79, 81, 84, 86, 89, - 92, 94, 97, 99, 102, 105, 107, 110, - 113, 116, 118, 121, 124, 127, 129, 132, - 135, 138, 141, 144, 146, 149, 152, 155, - 158, 161, 164, 167, 170, 173, 176, 179, - 182, 185, 188, 191, 194, 197, 200, 203, - 207, 210, 213, 216, 219, 222, 226, 229, - 232, 235, 239, 242, 245, 248, 252, 255, - 258, 262, 265, 269, 272, 275, 279, 282, - 286, 289, 292, 296, 299, 303, 306, 310, - 313, 317, 321, 324, 328, 331, 335, 338, - 342, 346, 349, 353, 357, 360, 364, 368, - 372, 375, 379, 383, 386, 390, 394, 398, - 402, 405, 409, 413, 417, 421, 425, 429, - 432, 436, 440, 444, 448, 452, 456, 460, - 464, 468, 472, 476, 480, 484, 488, 492, - 496, 500, 504, 508, 512, 516, 521, 525, - 529, 533, 537, 541, 546, 550, 554, 558, - 562, 567, 571, 575, 579, 584, 588, 592, - 596, 601, 605, 609, 614, 618, 622, 627, - 631, 635, 640, 644, 649, 653, 657, 662, - 666, 671, 675, 680, 684, 689, 693, 698, - 702, 707, 711, 716, 720, 725, 729, 734, - 738, 743, 747, 752, 757, 761, 766, 771, - 775, 780, 784, 789, 794, 798, 803, 808, - 813, 817, 822, 827, 831, 836, 841, 846, - 850, 855, 860, 865, 870, 874, 879, 884, - 889, 894, 898, 903, 908, 913, 918, 923, - 928, 932, 937, 942, 947, 952, 957, 962, - 967, 972, 977, 982, 986, 991, 996, 1001, - 1006, 1011, 1016, 1021, 1026, 1031, 1036, 1041, - 1046, 1051, 1056, 1062, 1067, 1072, 1077, 1082, - 1087, 1092, 1097, 1102, 1107, 1112, 1117, 1122, - 1128, 1133, 1138, 1143, 1148, 1153, 1158, 1164, - 1169, 1174, 1179, 1184, 1189, 1195, 1200, 1205, - 1210, 1215, 1221, 1226, 1231, 1236, 1242, 1247, - 1252, 1257, 1262, 1268, 1273, 1278, 1284, 1289, - 1294, 1299, 1305, 1310, 1315, 1321, 1326, 1331, - 1336, 1342, 1347, 1352, 1358, 1363, 1368, 1374, - 1379, 1384, 1390, 1395, 1400, 1406, 1411, 1417, - 1422, 1427, 1433, 1438, 1443, 1449, 1454, 1460, - 1465, 1470, 1476, 1481, 1487, 1492, 1497, 1503, - 1508, 1514, 1519, 1525, 1530, 1535, 1541, 1546, - 1552, 1557, 1563, 1568, 1574, 1579, 1585, 1590, - 1596, 1601, 1606, 1612, 1617, 1623, 1628, 1634, - 1639, 1645, 1650, 1656, 1661, 1667, 1672, 1678, - 1683, 1689, 1694, 1700, 1705, 1711, 1716, 1722, - 1727, 1733, 1738, 1744, 1749, 1755, 1761, 1766, - 1772, 1777, 1783, 1788, 1794, 1799, 1805, 1810, - 1816, 1821, 1827, 1832, 1838, 1844, 1849, 1855, - 1860, 1866, 1871, 1877, 1882, 1888, 1893, 1899, - 1905, 1910, 1916, 1921, 1927, 1932, 1938, 1943, - 1949, 1955, 1960, 1966, 1971, 1977, 1982, 1988, - 1993, 1999, 2005, 2010, 2016, 2021, 2027, 2032, - 2038, 2043, 2049, 2055, 2060, 2066, 2071, 2077, - 2082, 2088, 2093, 2099, 2105, 2110, 2116, 2121, - 2127, 2132, 2138, 2143, 2149, 2154, 2160, 2165, - 2171, 2177, 2182, 2188, 2193, 2199, 2204, 2210, - 2215, 2221, 2226, 2232, 2237, 2243, 2248, 2254, - 2259, 2265, 2270, 2276, 2281, 2287, 2292, 2298, - 2304, 2309, 2314, 2320, 2325, 2331, 2336, 2342, - 2347, 2353, 2358, 2364, 2369, 2375, 2380, 2386, - 2391, 2397, 2402, 2408, 2413, 2419, 2424, 2429, - 2435, 2440, 2446, 2451, 2457, 2462, 2467, 2473, - 2478, 2484, 2489, 2495, 2500, 2505, 2511, 2516, - 2522, 2527, 2532, 2538, 2543, 2549, 2554, 2559, - 2565, 2570, 2575, 2581, 2586, 2591, 2597, 2602, - 2607, 2613, 2618, 2623, 2629, 2634, 2639, 2645, - 2650, 2655, 2661, 2666, 2671, 2676, 2682, 2687, - 2692, 2698, 2703, 2708, 2713, 2719, 2724, 2729, - 2734, 2740, 2745, 2750, 2755, 2760, 2766, 2771, - 2776, 2781, 2786, 2792, 2797, 2802, 2807, 2812, - 2817, 2823, 2828, 2833, 2838, 2843, 2848, 2853, - 2859, 2864, 2869, 2874, 2879, 2884, 2889, 2894, - 2899, 2904, 2909, 2914, 2919, 2924, 2930, 2935, - 2940, 2945, 2950, 2955, 2960, 2965, 2970, 2975, - 2980, 2984, 2989, 2994, 2999, 3004, 3009, 3014, - 3019, 3024, 3029, 3034, 3039, 3044, 3048, 3053, - 3058, 3063, 3068, 3073, 3078, 3082, 3087, 3092, - 3097, 3102, 3106, 3111, 3116, 3121, 3126, 3130, - 3135, 3140, 3145, 3149, 3154, 3159, 3163, 3168, - 3173, 3177, 3182, 3187, 3191, 3196, 3201, 3205, - 3210, 3215, 3219, 3224, 3228, 3233, 3238, 3242, - 3247, 3251, 3256, 3260, 3265, 3269, 3274, 3279, - 3283, 3287, 3292, 3296, 3301, 3305, 3310, 3314, - 3319, 3323, 3327, 3332, 3336, 3341, 3345, 3349, - 3354, 3358, 3362, 3367, 3371, 3375, 3380, 3384, - 3388, 3393, 3397, 3401, 3405, 3410, 3414, 3418, - 3422, 3426, 3431, 3435, 3439, 3443, 3447, 3451, - 3455, 3460, 3464, 3468, 3472, 3476, 3480, 3484, - 3488, 3492, 3496, 3500, 3504, 3508, 3512, 3516, - 3520, 3524, 3528, 3532, 3536, 3540, 3544, 3548, - 3552, 3555, 3559, 3563, 3567, 3571, 3575, 3578, - 3582, 3586, 3590, 3593, 3597, 3601, 3605, 3608, - 3612, 3616, 3619, 3623, 3627, 3630, 3634, 3638, - 3641, 3645, 3649, 3652, 3656, 3659, 3663, 3666, - 3670, 3673, 3677, 3680, 3684, 3687, 3691, 3694, - 3698, 3701, 3704, 3708, 3711, 3714, 3718, 3721, - 3724, 3728, 3731, 3734, 3738, 3741, 3744, 3747, - 3751, 3754, 3757, 3760, 3763, 3767, 3770, 3773, - 3776, 3779, 3782, 3785, 3788, 3791, 3794, 3798, - 3801, 3804, 3807, 3809, 3812, 3815, 3818, 3821, - 3824, 3827, 3830, 3833, 3836, 3839, 3841, 3844, - 3847, 3850, 3853, 3855, 3858, 3861, 3864, 3866, - 3869, 3872, 3874, 3877, 3880, 3882, 3885, 3887, - 3890, 3893, 3895, 3898, 3900, 3903, 3905, 3908, - 3910, 3913, 3915, 3917, 3920, 3922, 3925, 3927, - 3929, 3932, 3934, 3936, 3939, 3941, 3943, 3945, - 3948, 3950, 3952, 3954, 3956, 3958, 3961, 3963, - 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, - 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3994, - 3996, 3998, 4000, 4002, 4004, 4005, 4007, 4009, - 4011, 4012, 4014, 4016, 4017, 4019, 4021, 4022, - 4024, 4025, 4027, 4028, 4030, 4031, 4033, 4034, - 4036, 4037, 4039, 4040, 4042, 4043, 4044, 4046, - 4047, 4048, 4050, 4051, 4052, 4053, 4055, 4056, - 4057, 4058, 4059, 4060, 4062, 4063, 4064, 4065, - 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, - 4074, 4075, 4075, 4076, 4077, 4078, 4079, 4079, - 4080, 4081, 4082, 4082, 4083, 4084, 4084, 4085, - 4086, 4086, 4087, 4087, 4088, 4088, 4089, 4089, - 4090, 4090, 4091, 4091, 4092, 4092, 4092, 4093, - 4093, 4093, 4094, 4094, 4094, 4094, 4095, 4095, - 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095 - }, - { 4096, 4095, 4095, 4095, 4095, 4095, 4095, 4095, - 4095, 4095, 4095, 4094, 4094, 4094, 4094, 4093, - 4093, 4093, 4092, 4092, 4092, 4091, 4091, 4090, - 4090, 4089, 4089, 4088, 4088, 4087, 4087, 4086, - 4086, 4085, 4084, 4084, 4083, 4082, 4082, 4081, - 4080, 4079, 4079, 4078, 4077, 4076, 4075, 4075, - 4074, 4073, 4072, 4071, 4070, 4069, 4068, 4067, - 4066, 4065, 4064, 4063, 4062, 4060, 4059, 4058, - 4057, 4056, 4055, 4053, 4052, 4051, 4050, 4048, - 4047, 4046, 4044, 4043, 4042, 4040, 4039, 4037, - 4036, 4034, 4033, 4031, 4030, 4028, 4027, 4025, - 4024, 4022, 4021, 4019, 4017, 4016, 4014, 4012, - 4011, 4009, 4007, 4005, 4004, 4002, 4000, 3998, - 3996, 3994, 3993, 3991, 3989, 3987, 3985, 3983, - 3981, 3979, 3977, 3975, 3973, 3971, 3969, 3967, - 3965, 3963, 3961, 3958, 3956, 3954, 3952, 3950, - 3948, 3945, 3943, 3941, 3939, 3936, 3934, 3932, - 3929, 3927, 3925, 3922, 3920, 3917, 3915, 3913, - 3910, 3908, 3905, 3903, 3900, 3898, 3895, 3893, - 3890, 3887, 3885, 3882, 3880, 3877, 3874, 3872, - 3869, 3866, 3864, 3861, 3858, 3855, 3853, 3850, - 3847, 3844, 3841, 3839, 3836, 3833, 3830, 3827, - 3824, 3821, 3818, 3815, 3812, 3809, 3807, 3804, - 3801, 3798, 3794, 3791, 3788, 3785, 3782, 3779, - 3776, 3773, 3770, 3767, 3763, 3760, 3757, 3754, - 3751, 3747, 3744, 3741, 3738, 3734, 3731, 3728, - 3724, 3721, 3718, 3714, 3711, 3708, 3704, 3701, - 3698, 3694, 3691, 3687, 3684, 3680, 3677, 3673, - 3670, 3666, 3663, 3659, 3656, 3652, 3649, 3645, - 3641, 3638, 3634, 3630, 3627, 3623, 3619, 3616, - 3612, 3608, 3605, 3601, 3597, 3593, 3590, 3586, - 3582, 3578, 3575, 3571, 3567, 3563, 3559, 3555, - 3552, 3548, 3544, 3540, 3536, 3532, 3528, 3524, - 3520, 3516, 3512, 3508, 3504, 3500, 3496, 3492, - 3488, 3484, 3480, 3476, 3472, 3468, 3464, 3460, - 3455, 3451, 3447, 3443, 3439, 3435, 3431, 3426, - 3422, 3418, 3414, 3410, 3405, 3401, 3397, 3393, - 3388, 3384, 3380, 3375, 3371, 3367, 3362, 3358, - 3354, 3349, 3345, 3341, 3336, 3332, 3327, 3323, - 3319, 3314, 3310, 3305, 3301, 3296, 3292, 3287, - 3283, 3279, 3274, 3269, 3265, 3260, 3256, 3251, - 3247, 3242, 3238, 3233, 3228, 3224, 3219, 3215, - 3210, 3205, 3201, 3196, 3191, 3187, 3182, 3177, - 3173, 3168, 3163, 3159, 3154, 3149, 3145, 3140, - 3135, 3130, 3126, 3121, 3116, 3111, 3106, 3102, - 3097, 3092, 3087, 3082, 3078, 3073, 3068, 3063, - 3058, 3053, 3048, 3044, 3039, 3034, 3029, 3024, - 3019, 3014, 3009, 3004, 2999, 2994, 2989, 2984, - 2980, 2975, 2970, 2965, 2960, 2955, 2950, 2945, - 2940, 2935, 2930, 2924, 2919, 2914, 2909, 2904, - 2899, 2894, 2889, 2884, 2879, 2874, 2869, 2864, - 2859, 2853, 2848, 2843, 2838, 2833, 2828, 2823, - 2817, 2812, 2807, 2802, 2797, 2792, 2786, 2781, - 2776, 2771, 2766, 2760, 2755, 2750, 2745, 2740, - 2734, 2729, 2724, 2719, 2713, 2708, 2703, 2698, - 2692, 2687, 2682, 2676, 2671, 2666, 2661, 2655, - 2650, 2645, 2639, 2634, 2629, 2623, 2618, 2613, - 2607, 2602, 2597, 2591, 2586, 2581, 2575, 2570, - 2565, 2559, 2554, 2549, 2543, 2538, 2532, 2527, - 2522, 2516, 2511, 2505, 2500, 2495, 2489, 2484, - 2478, 2473, 2467, 2462, 2457, 2451, 2446, 2440, - 2435, 2429, 2424, 2419, 2413, 2408, 2402, 2397, - 2391, 2386, 2380, 2375, 2369, 2364, 2358, 2353, - 2347, 2342, 2336, 2331, 2325, 2320, 2314, 2309, - 2304, 2298, 2292, 2287, 2281, 2276, 2270, 2265, - 2259, 2254, 2248, 2243, 2237, 2232, 2226, 2221, - 2215, 2210, 2204, 2199, 2193, 2188, 2182, 2177, - 2171, 2165, 2160, 2154, 2149, 2143, 2138, 2132, - 2127, 2121, 2116, 2110, 2105, 2099, 2093, 2088, - 2082, 2077, 2071, 2066, 2060, 2055, 2049, 2043, - 2038, 2032, 2027, 2021, 2016, 2010, 2005, 1999, - 1993, 1988, 1982, 1977, 1971, 1966, 1960, 1955, - 1949, 1943, 1938, 1932, 1927, 1921, 1916, 1910, - 1905, 1899, 1893, 1888, 1882, 1877, 1871, 1866, - 1860, 1855, 1849, 1844, 1838, 1832, 1827, 1821, - 1816, 1810, 1805, 1799, 1794, 1788, 1783, 1777, - 1772, 1766, 1761, 1755, 1749, 1744, 1738, 1733, - 1727, 1722, 1716, 1711, 1705, 1700, 1694, 1689, - 1683, 1678, 1672, 1667, 1661, 1656, 1650, 1645, - 1639, 1634, 1628, 1623, 1617, 1612, 1606, 1601, - 1596, 1590, 1585, 1579, 1574, 1568, 1563, 1557, - 1552, 1546, 1541, 1535, 1530, 1525, 1519, 1514, - 1508, 1503, 1497, 1492, 1487, 1481, 1476, 1470, - 1465, 1460, 1454, 1449, 1443, 1438, 1433, 1427, - 1422, 1417, 1411, 1406, 1400, 1395, 1390, 1384, - 1379, 1374, 1368, 1363, 1358, 1352, 1347, 1342, - 1336, 1331, 1326, 1321, 1315, 1310, 1305, 1299, - 1294, 1289, 1284, 1278, 1273, 1268, 1262, 1257, - 1252, 1247, 1242, 1236, 1231, 1226, 1221, 1215, - 1210, 1205, 1200, 1195, 1189, 1184, 1179, 1174, - 1169, 1164, 1158, 1153, 1148, 1143, 1138, 1133, - 1128, 1122, 1117, 1112, 1107, 1102, 1097, 1092, - 1087, 1082, 1077, 1072, 1067, 1062, 1056, 1051, - 1046, 1041, 1036, 1031, 1026, 1021, 1016, 1011, - 1006, 1001, 996, 991, 986, 982, 977, 972, - 967, 962, 957, 952, 947, 942, 937, 932, - 928, 923, 918, 913, 908, 903, 898, 894, - 889, 884, 879, 874, 870, 865, 860, 855, - 850, 846, 841, 836, 831, 827, 822, 817, - 813, 808, 803, 798, 794, 789, 784, 780, - 775, 771, 766, 761, 757, 752, 747, 743, - 738, 734, 729, 725, 720, 716, 711, 707, - 702, 698, 693, 689, 684, 680, 675, 671, - 666, 662, 657, 653, 649, 644, 640, 635, - 631, 627, 622, 618, 614, 609, 605, 601, - 596, 592, 588, 584, 579, 575, 571, 567, - 562, 558, 554, 550, 546, 541, 537, 533, - 529, 525, 521, 516, 512, 508, 504, 500, - 496, 492, 488, 484, 480, 476, 472, 468, - 464, 460, 456, 452, 448, 444, 440, 436, - 432, 429, 425, 421, 417, 413, 409, 405, - 402, 398, 394, 390, 386, 383, 379, 375, - 372, 368, 364, 360, 357, 353, 349, 346, - 342, 338, 335, 331, 328, 324, 321, 317, - 313, 310, 306, 303, 299, 296, 292, 289, - 286, 282, 279, 275, 272, 269, 265, 262, - 258, 255, 252, 248, 245, 242, 239, 235, - 232, 229, 226, 222, 219, 216, 213, 210, - 207, 203, 200, 197, 194, 191, 188, 185, - 182, 179, 176, 173, 170, 167, 164, 161, - 158, 155, 152, 149, 146, 144, 141, 138, - 135, 132, 129, 127, 124, 121, 118, 116, - 113, 110, 107, 105, 102, 99, 97, 94, - 92, 89, 86, 84, 81, 79, 76, 74, - 71, 69, 66, 64, 61, 59, 57, 54, - 52, 50, 47, 45, 43, 40, 38, 36, - 33, 31, 29, 27, 25, 22, 20, 18, - 16, 14, 12, 10, 8, 6, 4, 2 - }, - { 0, -1, -3, -5, -7, -9, -11, -13, - -15, -17, -19, -20, -23, -25, -27, -28, - -30, -33, -34, -36, -39, -40, -42, -43, - -45, -46, -49, -50, -52, -54, -56, -58, - -60, -61, -62, -65, -66, -68, -70, -72, - -73, -74, -77, -78, -80, -82, -83, -85, - -87, -89, -90, -92, -93, -95, -96, -98, - -100, -102, -103, -105, -106, -107, -108, -110, - -112, -114, -116, -116, -118, -120, -122, -122, - -124, -126, -127, -128, -130, -131, -133, -133, - -136, -137, -138, -139, -141, -142, -144, -145, - -147, -147, -150, -151, -151, -153, -155, -156, - -157, -159, -160, -161, -163, -164, -165, -166, - -168, -168, -170, -171, -172, -174, -174, -176, - -177, -178, -180, -181, -182, -183, -184, -185, - -187, -188, -189, -190, -191, -192, -193, -195, - -196, -196, -198, -199, -200, -200, -202, -204, - -204, -205, -206, -207, -208, -209, -211, -212, - -212, -213, -214, -215, -216, -217, -218, -220, - -220, -221, -222, -223, -224, -225, -225, -227, - -227, -228, -229, -230, -230, -231, -233, -234, - -234, -235, -235, -237, -238, -239, -239, -240, - -240, -242, -242, -243, -243, -245, -246, -247, - -247, -249, -248, -249, -250, -251, -251, -253, - -253, -253, -255, -255, -256, -256, -257, -258, - -259, -259, -260, -261, -261, -262, -262, -264, - -263, -265, -265, -265, -266, -267, -267, -268, - -269, -269, -269, -270, -271, -271, -272, -273, - -273, -273, -274, -274, -276, -275, -276, -277, - -277, -278, -278, -278, -279, -279, -280, -281, - -280, -281, -282, -283, -283, -282, -284, -284, - -284, -285, -285, -286, -286, -286, -287, -287, - -288, -288, -288, -289, -289, -289, -290, -290, - -290, -291, -291, -292, -291, -291, -292, -292, - -292, -293, -293, -293, -294, -294, -295, -295, - -294, -295, -295, -296, -297, -297, -297, -297, - -297, -297, -298, -298, -297, -298, -298, -298, - -299, -299, -300, -299, -299, -300, -299, -300, - -301, -300, -300, -301, -300, -301, -301, -301, - -301, -301, -302, -301, -302, -301, -302, -302, - -302, -302, -302, -302, -302, -302, -303, -302, - -303, -302, -303, -303, -302, -303, -303, -303, - -302, -303, -303, -302, -303, -303, -302, -303, - -303, -302, -303, -303, -302, -303, -303, -303, - -303, -302, -303, -303, -302, -302, -302, -303, - -302, -302, -302, -301, -303, -302, -301, -302, - -301, -301, -301, -302, -301, -301, -301, -300, - -301, -300, -300, -300, -300, -299, -300, -299, - -300, -300, -299, -300, -299, -299, -299, -299, - -298, -299, -298, -297, -297, -297, -296, -297, - -296, -296, -296, -296, -295, -296, -295, -296, - -295, -294, -294, -294, -293, -294, -294, -293, - -293, -292, -293, -292, -292, -292, -291, -290, - -291, -290, -291, -289, -289, -290, -289, -289, - -288, -288, -288, -288, -286, -287, -286, -286, - -286, -285, -286, -284, -284, -284, -284, -283, - -283, -283, -282, -282, -282, -281, -280, -281, - -279, -280, -280, -278, -279, -278, -278, -277, - -278, -276, -276, -277, -275, -276, -274, -275, - -274, -273, -273, -272, -273, -272, -272, -271, - -270, -270, -269, -269, -269, -268, -268, -267, - -267, -266, -266, -266, -265, -265, -264, -264, - -263, -263, -262, -262, -261, -261, -260, -260, - -259, -259, -258, -258, -257, -257, -256, -256, - -256, -255, -254, -254, -253, -253, -252, -252, - -251, -251, -250, -250, -249, -249, -248, -248, - -247, -247, -246, -246, -245, -245, -244, -244, - -243, -242, -242, -241, -241, -240, -239, -239, - -239, -238, -238, -237, -237, -235, -235, -235, - -234, -234, -232, -233, -232, -232, -231, -229, - -230, -229, -228, -228, -227, -226, -227, -225, - -224, -225, -223, -223, -222, -222, -221, -221, - -220, -219, -219, -218, -218, -216, -217, -216, - -215, -215, -214, -213, -212, -213, -211, -211, - -210, -210, -209, -209, -208, -206, -207, -206, - -205, -204, -204, -204, -203, -202, -202, -200, - -200, -200, -200, -198, -197, -197, -196, -195, - -195, -195, -194, -194, -192, -192, -191, -191, - -189, -189, -188, -188, -187, -186, -186, -186, - -185, -185, -183, -183, -182, -182, -181, -181, - -180, -178, -178, -177, -177, -176, -176, -174, - -174, -173, -173, -172, -172, -172, -170, -170, - -168, -168, -167, -167, -167, -165, -165, -164, - -164, -164, -162, -162, -161, -160, -160, -158, - -158, -158, -157, -156, -155, -155, -154, -153, - -153, -152, -151, -151, -150, -149, -149, -148, - -147, -147, -146, -146, -144, -144, -144, -142, - -142, -141, -142, -140, -140, -139, -138, -138, - -137, -136, -136, -134, -134, -133, -134, -132, - -132, -131, -130, -130, -128, -128, -128, -127, - -127, -126, -124, -124, -124, -123, -123, -122, - -121, -120, -120, -119, -118, -118, -117, -117, - -116, -115, -115, -115, -114, -113, -111, -111, - -110, -110, -109, -109, -108, -107, -107, -106, - -105, -104, -104, -103, -102, -103, -102, -101, - -101, -100, -99, -99, -98, -97, -97, -96, - -96, -95, -94, -94, -93, -92, -92, -91, - -91, -90, -89, -88, -88, -88, -87, -86, - -85, -86, -84, -84, -83, -82, -82, -81, - -81, -80, -80, -78, -79, -77, -77, -77, - -76, -76, -75, -74, -74, -73, -72, -72, - -72, -71, -70, -70, -69, -68, -68, -68, - -66, -67, -66, -65, -65, -65, -63, -63, - -62, -62, -61, -61, -60, -60, -60, -58, - -58, -58, -56, -56, -56, -55, -54, -55, - -54, -54, -53, -52, -51, -51, -51, -50, - -49, -49, -49, -49, -48, -47, -46, -46, - -46, -46, -45, -43, -43, -43, -43, -42, - -42, -42, -40, -40, -40, -39, -39, -38, - -38, -38, -37, -37, -36, -36, -35, -35, - -34, -35, -34, -33, -33, -32, -32, -31, - -31, -31, -30, -29, -29, -29, -28, -27, - -28, -28, -27, -26, -26, -25, -25, -25, - -24, -24, -24, -23, -23, -22, -22, -22, - -21, -21, -20, -20, -20, -20, -19, -18, - -19, -18, -18, -17, -18, -17, -16, -17, - -16, -15, -15, -15, -14, -14, -15, -13, - -13, -13, -13, -12, -12, -11, -12, -11, - -12, -10, -10, -10, -10, -10, -9, -10, - -9, -9, -9, -8, -8, -7, -8, -7, - -7, -7, -6, -6, -6, -7, -6, -6, - -5, -5, -5, -5, -5, -4, -4, -5, - -4, -4, -3, -3, -3, -3, -3, -2, - -3, -2, -2, -2, -1, -2, -1, -2, - -1, -1, -1, -1, -1, 0, -1, 0, - -1, -1, 0, 0, -1, 0, 0, -1, - 1, 1, 0, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 - } -}; -#else /* defined(CONFIG_CSI2_PLUS) */ -static const int zoom_table[4][HRT_GDC_N] = { - { 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, - -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, - -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, - -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, - -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, - -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4 - }, - { 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, - 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, - 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, - 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, - 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, - 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, - 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, - 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, - 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, - 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, - 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, - 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, - 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, - 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, - 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, - 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, - 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, - 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, - 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, - 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, - 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, - 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, - 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, - 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, - 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, - 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, - 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, - 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, - 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, - 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, - 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, - 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, - 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, - 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, - 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, - 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, - 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, - 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, - 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, - 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, - 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, - 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, - 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, - 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, - 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, - 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, - 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, - 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, - 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, - 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, - 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, - 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, - 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, - 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, - 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, - 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, - 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, - 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, - 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, - 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, - 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, - 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, - 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, - 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, - 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, - 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, - 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, - 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, - 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, - 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, - 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, - 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, - 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, - 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, - 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, - 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, - 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, - 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, - 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, - 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, - 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, - 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, - 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, - 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, - 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, - 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, - 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, - 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, - 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, - 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, - 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, - 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, - 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, - 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, - 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, - 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, - 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, - 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, - 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, - 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, - 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, - 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, - 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, - 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, - 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, - 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, - 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, - 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, - 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, - 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, - 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, - 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, - 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, - 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, - 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, - 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, - 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, - 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, - 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, - 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, - 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, - 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4 - }, - { 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, - 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, - 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, - 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, - 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, - 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, - 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, - 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, - 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, - 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, - 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, - 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, - 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, - 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, - 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, - 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, - 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, - 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, - 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, - 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, - 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, - 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, - 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, - 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, - 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, - 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, - 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, - 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, - 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, - 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, - 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, - 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, - 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, - 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, - 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, - 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, - 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, - 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, - 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, - 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, - 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, - 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, - 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, - 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, - 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, - 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, - 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, - 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, - 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, - 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, - 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, - 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, - 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, - 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, - 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, - 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, - 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, - 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, - 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, - 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, - 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, - 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, - 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, - 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, - 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, - 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, - 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, - 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, - 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, - 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, - 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, - 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, - 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, - 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, - 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, - 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, - 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, - 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, - 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, - 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, - 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, - 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, - 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, - 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, - 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, - 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, - 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, - 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, - 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, - 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, - 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, - 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, - 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, - 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, - 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, - 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, - 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, - 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, - 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, - 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, - 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, - 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, - 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, - 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, - 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, - 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, - 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, - 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, - 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, - 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, - 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, - 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, - 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, - 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, - 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, - 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, - 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, - 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, - 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, - 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, - 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, - 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, - 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, - 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4 - }, - { 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, - -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, - 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, - 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4 - } -}; -#endif -#else -#error "sh_css_params.c: GDC version must be \ - one of {GDC_VERSION_2}" -#endif - -static const struct ia_css_dz_config default_dz_config = { - HRT_GDC_N, - HRT_GDC_N, - { \ - {0, 0}, \ - {0, 0}, \ - } -}; - -static const struct ia_css_vector default_motion_config = { - 0, - 0 -}; - -/* ------ deprecated(bz675) : from ------ */ -static const struct ia_css_shading_settings default_shading_settings = { - 1 /* enable shading table conversion in the css - (This matches the legacy way.) */ -}; -/* ------ deprecated(bz675) : to ------ */ - -struct ia_css_isp_skc_dvs_statistics { - ia_css_ptr p_data; -}; - -static enum ia_css_err -ref_sh_css_ddr_address_map( - struct sh_css_ddr_address_map *map, - struct sh_css_ddr_address_map *out); - -static enum ia_css_err -write_ia_css_isp_parameter_set_info_to_ddr( - struct ia_css_isp_parameter_set_info *me, - hrt_vaddress *out); - -static enum ia_css_err -free_ia_css_isp_parameter_set_info(hrt_vaddress ptr); - -static enum ia_css_err -sh_css_params_write_to_ddr_internal( - struct ia_css_pipe *pipe, - unsigned pipe_id, - struct ia_css_isp_parameters *params, - const struct ia_css_pipeline_stage *stage, - struct sh_css_ddr_address_map *ddr_map, - struct sh_css_ddr_address_map_size *ddr_map_size); - -static enum ia_css_err -sh_css_create_isp_params(struct ia_css_stream *stream, - struct ia_css_isp_parameters **isp_params_out); - -static bool -sh_css_init_isp_params_from_global(struct ia_css_stream *stream, - struct ia_css_isp_parameters *params, - bool use_default_config, - struct ia_css_pipe *pipe_in); - -static enum ia_css_err -sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe, - struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe_in); - -static enum ia_css_err -sh_css_set_global_isp_config_on_pipe( - struct ia_css_pipe *curr_pipe, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe); - -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) -static enum ia_css_err -sh_css_set_per_frame_isp_config_on_pipe( - struct ia_css_stream *stream, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe); -#endif - -static enum ia_css_err -sh_css_update_uds_and_crop_info_based_on_zoom_region( - const struct ia_css_binary_info *info, - const struct ia_css_frame_info *in_frame_info, - const struct ia_css_frame_info *out_frame_info, - const struct ia_css_resolution *dvs_env, - const struct ia_css_dz_config *zoom, - const struct ia_css_vector *motion_vector, - struct sh_css_uds_info *uds, /* out */ - struct sh_css_crop_pos *sp_out_crop_pos, /* out */ - struct ia_css_resolution pipe_in_res, - bool enable_zoom); - -hrt_vaddress -sh_css_params_ddr_address_map(void) -{ - return sp_ddr_ptrs; -} - -/* **************************************************** - * Each coefficient is stored as 7bits to fit 2 of them into one - * ISP vector element, so we will store 4 coefficents on every - * memory word (32bits) - * - * 0: Coefficient 0 used bits - * 1: Coefficient 1 used bits - * 2: Coefficient 2 used bits - * 3: Coefficient 3 used bits - * x: not used - * - * xx33333332222222 | xx11111110000000 - * - * *************************************************** - */ -static struct ia_css_host_data * -convert_allocate_fpntbl(struct ia_css_isp_parameters *params) -{ - unsigned int i, j; - short *data_ptr; - struct ia_css_host_data *me; - unsigned int isp_format_data_size; - uint32_t *isp_format_data_ptr; - - assert(params != NULL); - - data_ptr = params->fpn_config.data; - isp_format_data_size = params->fpn_config.height * params->fpn_config.width * sizeof(uint32_t); - - me = ia_css_host_data_allocate(isp_format_data_size); - - if (!me) - return NULL; - - isp_format_data_ptr = (uint32_t *)me->address; - - for (i = 0; i < params->fpn_config.height; i++) { - for (j = 0; - j < params->fpn_config.width; - j += 4, data_ptr += 4, isp_format_data_ptr++) { - int data = data_ptr[0] << 0 | - data_ptr[1] << 7 | - data_ptr[2] << 16 | - data_ptr[3] << 23; - *isp_format_data_ptr = data; - } - } - return me; -} - -static enum ia_css_err -store_fpntbl(struct ia_css_isp_parameters *params, hrt_vaddress ptr) -{ - struct ia_css_host_data *isp_data; - - assert(params != NULL); - assert(ptr != mmgr_NULL); - - isp_data = convert_allocate_fpntbl(params); - if (!isp_data) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - ia_css_params_store_ia_css_host_data(ptr, isp_data); - - ia_css_host_data_free(isp_data); - return IA_CSS_SUCCESS; -} - -static void -convert_raw_to_fpn(struct ia_css_isp_parameters *params) -{ - int maxval = 0; - unsigned int i; - - assert(params != NULL); - - /* Find the maximum value in the table */ - for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++) { - int val = params->fpn_config.data[i]; - /* Make sure FPN value can be represented in 13-bit unsigned - * number (ISP precision - 1), but note that actual input range - * depends on precision of input frame data. - */ - if (val < 0) { -/* Checkpatch patch */ - val = 0; - } else if (val >= (1 << 13)) { -/* Checkpatch patch */ -/* MW: BUG, is "13" a system or application property */ - val = (1 << 13) - 1; - } - maxval = max(maxval, val); - } - /* Find the lowest shift value to remap the values in the range - * 0..maxval to 0..2^shiftval*63. - */ - params->fpn_config.shift = 0; - while (maxval > 63) { -/* MW: BUG, is "63" a system or application property */ - maxval >>= 1; - params->fpn_config.shift++; - } - /* Adjust the values in the table for the shift value */ - for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++) - ((unsigned short *) params->fpn_config.data)[i] >>= params->fpn_config.shift; -} - -static void -ia_css_process_kernel(struct ia_css_stream *stream, - struct ia_css_isp_parameters *params, - void (*process)(unsigned pipe_id, - const struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params)) -{ - int i; - for (i = 0; i < stream->num_pipes; i++) { - struct ia_css_pipe *pipe = stream->pipes[i]; - struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe); - struct ia_css_pipeline_stage *stage; - - /* update the other buffers to the pipe specific copies */ - for (stage = pipeline->stages; stage; stage = stage->next) { - if (!stage || !stage->binary) continue; - process(pipeline->pipe_id, stage, params); - } - } -} - -static enum ia_css_err -sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe, bool *is_dp_10bpp) { - - enum ia_css_err err = IA_CSS_SUCCESS; - /* Currently we check if 10bpp DPC configuration is required based - * on the use case,i.e. if BDS and DPC is both enabled. The more cleaner - * design choice would be to expose the type of DPC (either 10bpp or 13bpp) - * using the binary info, but the current control flow does not allow this - * implementation. (This is because the configuration is set before a - * binary is selected, and the binary info is not available) - */ - if((pipe == NULL) || (is_dp_10bpp == NULL)) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR); - err = IA_CSS_ERR_INTERNAL_ERROR; - } else { - *is_dp_10bpp = false; - - /* check if DPC is enabled from the host */ - if (pipe->config.enable_dpc) { - /*check if BDS is enabled*/ - unsigned int required_bds_factor = SH_CSS_BDS_FACTOR_1_00; - if ((pipe->config.bayer_ds_out_res.width != 0) && - (pipe->config.bayer_ds_out_res.height != 0)) { - if (IA_CSS_SUCCESS == binarydesc_calculate_bds_factor( - pipe->config.input_effective_res, - pipe->config.bayer_ds_out_res, - &required_bds_factor)) { - if (SH_CSS_BDS_FACTOR_1_00 != required_bds_factor) { - /*we use 10bpp BDS configuration*/ - *is_dp_10bpp = true; - } - } - } - } - } - - return err; -} - -enum ia_css_err -sh_css_set_black_frame(struct ia_css_stream *stream, - const struct ia_css_frame *raw_black_frame) -{ - struct ia_css_isp_parameters *params; - /* this function desperately needs to be moved to the ISP or SP such - * that it can use the DMA. - */ - unsigned int height, width, y, x, k, data; - hrt_vaddress ptr; - - assert(stream != NULL); - assert(raw_black_frame != NULL); - - params = stream->isp_params_configs; - height = raw_black_frame->info.res.height; - width = raw_black_frame->info.padded_width, - - ptr = raw_black_frame->data - + raw_black_frame->planes.raw.offset; - - IA_CSS_ENTER_PRIVATE("black_frame=%p", raw_black_frame); - - if (params->fpn_config.data && - (params->fpn_config.width != width || params->fpn_config.height != height)) { - sh_css_free(params->fpn_config.data); - params->fpn_config.data = NULL; - } - if (params->fpn_config.data == NULL) { - params->fpn_config.data = sh_css_malloc(height * width * sizeof(short)); - if (!params->fpn_config.data) { - IA_CSS_ERROR("out of memory"); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - params->fpn_config.width = width; - params->fpn_config.height = height; - params->fpn_config.shift = 0; - } - - /* store raw to fpntbl */ - for (y = 0; y < height; y++) { - for (x = 0; x < width; x += (ISP_VEC_NELEMS * 2)) { - int ofs = y * width + x; - for (k = 0; k < ISP_VEC_NELEMS; k += 2) { - mmgr_load(ptr, (void *)(&data), sizeof(int)); - params->fpn_config.data[ofs + 2 * k] = - (short) (data & 0xFFFF); - params->fpn_config.data[ofs + 2 * k + 2] = - (short) ((data >> 16) & 0xFFFF); - ptr += sizeof(int); /* byte system address */ - } - for (k = 0; k < ISP_VEC_NELEMS; k += 2) { - mmgr_load(ptr, (void *)(&data), sizeof(int)); - params->fpn_config.data[ofs + 2 * k + 1] = - (short) (data & 0xFFFF); - params->fpn_config.data[ofs + 2 * k + 3] = - (short) ((data >> 16) & 0xFFFF); - ptr += sizeof(int); /* byte system address */ - } - } - } - - /* raw -> fpn */ - convert_raw_to_fpn(params); - - /* overwrite isp parameter */ - ia_css_process_kernel(stream, params, ia_css_kernel_process_param[IA_CSS_FPN_ID]); - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - - return IA_CSS_SUCCESS; -} - -bool -sh_css_params_set_binning_factor(struct ia_css_stream *stream, unsigned int binning_fact) -{ - struct ia_css_isp_parameters *params; - - IA_CSS_ENTER_PRIVATE("void"); - assert(stream != NULL); - - params = stream->isp_params_configs; - - if (params->sensor_binning != binning_fact) { - params->sensor_binning = binning_fact; - params->sc_table_changed = true; - } - - IA_CSS_LEAVE_PRIVATE("void"); - - return params->sc_table_changed; -} - -static void -sh_css_update_shading_table_status(struct ia_css_pipe *pipe, - struct ia_css_isp_parameters *params) -{ - if (params && pipe && (pipe->pipe_num != params->sc_table_last_pipe_num)) { - params->sc_table_dirty = true; - params->sc_table_last_pipe_num = pipe->pipe_num; - } -} - -static void -sh_css_set_shading_table(struct ia_css_stream *stream, - struct ia_css_isp_parameters *params, - const struct ia_css_shading_table *table) -{ - IA_CSS_ENTER_PRIVATE(""); - if (table == NULL) - return; - assert(stream != NULL); - - if (!table->enable) - table = NULL; - - if ((table != params->sc_table) || params->sc_table_dirty) { - params->sc_table = table; - params->sc_table_changed = true; - params->sc_table_dirty = false; - /* Not very clean, this goes to sh_css.c to invalidate the - * shading table for all pipes. Should replaced by a loop - * and a pipe-specific call. - */ - if (!params->output_frame) - sh_css_invalidate_shading_tables(stream); - } - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -ia_css_params_store_ia_css_host_data( - hrt_vaddress ddr_addr, - struct ia_css_host_data *data) -{ - assert(data != NULL); - assert(data->address != NULL); - assert(ddr_addr != mmgr_NULL); - - IA_CSS_ENTER_PRIVATE(""); - - mmgr_store(ddr_addr, - (void *)(data->address), - (size_t)data->size); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -struct ia_css_host_data * -ia_css_params_alloc_convert_sctbl( - const struct ia_css_pipeline_stage *stage, - const struct ia_css_shading_table *shading_table) -{ - const struct ia_css_binary *binary = stage->binary; - struct ia_css_host_data *sctbl; - unsigned int i, j, aligned_width, row_padding; - unsigned int sctbl_size; - short int *ptr; - - assert(binary != NULL); - assert(shading_table != NULL); - - IA_CSS_ENTER_PRIVATE(""); - - if (shading_table == NULL) { - IA_CSS_LEAVE_PRIVATE("void"); - return NULL; - } - - aligned_width = binary->sctbl_aligned_width_per_color; - row_padding = aligned_width - shading_table->width; - sctbl_size = shading_table->height * IA_CSS_SC_NUM_COLORS * aligned_width * sizeof(short); - - sctbl = ia_css_host_data_allocate((size_t)sctbl_size); - - if (!sctbl) - return NULL; - ptr = (short int*)sctbl->address; - memset(ptr, - 0, - sctbl_size); - - for (i = 0; i < shading_table->height; i++) { - for (j = 0; j < IA_CSS_SC_NUM_COLORS; j++) { - memcpy(ptr, - &shading_table->data[j] - [i*shading_table->width], - shading_table->width * sizeof(short)); - ptr += aligned_width; - } - } - - IA_CSS_LEAVE_PRIVATE("void"); - return sctbl; -} - -enum ia_css_err ia_css_params_store_sctbl( - const struct ia_css_pipeline_stage *stage, - hrt_vaddress sc_tbl, - const struct ia_css_shading_table *sc_config) -{ - struct ia_css_host_data *isp_sc_tbl; - - IA_CSS_ENTER_PRIVATE(""); - - if (sc_config == NULL) { - IA_CSS_LEAVE_PRIVATE("void"); - return IA_CSS_SUCCESS; - } - - isp_sc_tbl = ia_css_params_alloc_convert_sctbl(stage, sc_config); - if (!isp_sc_tbl) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - /* store the shading table to ddr */ - ia_css_params_store_ia_css_host_data(sc_tbl, isp_sc_tbl); - ia_css_host_data_free(isp_sc_tbl); - - IA_CSS_LEAVE_PRIVATE("void"); - - return IA_CSS_SUCCESS; -} - -static void -sh_css_enable_pipeline(const struct ia_css_binary *binary) -{ - if (!binary) - return; - - IA_CSS_ENTER_PRIVATE(""); - - ia_css_isp_param_enable_pipeline(&binary->mem_params); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static enum ia_css_err -ia_css_process_zoom_and_motion( - struct ia_css_isp_parameters *params, - const struct ia_css_pipeline_stage *first_stage) -{ - /* first_stage can be NULL */ - const struct ia_css_pipeline_stage *stage; - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_resolution pipe_in_res; - pipe_in_res.width = 0; - pipe_in_res.height = 0; - - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE(""); - - /* Go through all stages to udate uds and cropping */ - for (stage = first_stage; stage; stage = stage->next) { - - struct ia_css_binary *binary; - /* note: the var below is made static as it is quite large; - if it is not static it ends up on the stack which could - cause issues for drivers - */ - static struct ia_css_binary tmp_binary; - - const struct ia_css_binary_xinfo *info = NULL; - - binary = stage->binary; - if (binary) { - info = binary->info; - } else { - const struct sh_css_binary_args *args = &stage->args; - const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL}; - if (args->out_frame[0]) - out_infos[0] = &args->out_frame[0]->info; - info = &stage->firmware->info.isp; - ia_css_binary_fill_info(info, false, false, - ATOMISP_INPUT_FORMAT_RAW_10, - args->in_frame ? &args->in_frame->info : NULL, - NULL, - out_infos, - args->out_vf_frame ? &args->out_vf_frame->info - : NULL, - &tmp_binary, - NULL, - -1, true); - binary = &tmp_binary; - binary->info = info; - } - - if (stage == first_stage) { - /* we will use pipe_in_res to scale the zoom crop region if needed */ - pipe_in_res = binary->effective_in_frame_res; - } - - assert(stage->stage_num < SH_CSS_MAX_STAGES); - if (params->dz_config.zoom_region.resolution.width == 0 && - params->dz_config.zoom_region.resolution.height == 0) { - sh_css_update_uds_and_crop_info( - &info->sp, - &binary->in_frame_info, - &binary->out_frame_info[0], - &binary->dvs_envelope, - ¶ms->dz_config, - ¶ms->motion_config, - ¶ms->uds[stage->stage_num].uds, - ¶ms->uds[stage->stage_num].crop_pos, - stage->enable_zoom); - } else { - err = sh_css_update_uds_and_crop_info_based_on_zoom_region( - &info->sp, - &binary->in_frame_info, - &binary->out_frame_info[0], - &binary->dvs_envelope, - ¶ms->dz_config, - ¶ms->motion_config, - ¶ms->uds[stage->stage_num].uds, - ¶ms->uds[stage->stage_num].crop_pos, - pipe_in_res, - stage->enable_zoom); - if (err != IA_CSS_SUCCESS) - return err; - } - } - params->isp_params_changed = true; - - IA_CSS_LEAVE_PRIVATE("void"); - return err; -} - -static void -sh_css_set_gamma_table(struct ia_css_isp_parameters *params, - const struct ia_css_gamma_table *table) -{ - if (table == NULL) - return; - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - params->gc_table = *table; - params->config_changed[IA_CSS_GC_ID] = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_gamma_table(const struct ia_css_isp_parameters *params, - struct ia_css_gamma_table *table) -{ - if (table == NULL) - return; - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - *table = params->gc_table; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_ctc_table(struct ia_css_isp_parameters *params, - const struct ia_css_ctc_table *table) -{ - if (table == NULL) - return; - - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - params->ctc_table = *table; - params->config_changed[IA_CSS_CTC_ID] = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_ctc_table(const struct ia_css_isp_parameters *params, - struct ia_css_ctc_table *table) -{ - if (table == NULL) - return; - - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - *table = params->ctc_table; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_macc_table(struct ia_css_isp_parameters *params, - const struct ia_css_macc_table *table) -{ - if (table == NULL) - return; - - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - params->macc_table = *table; - params->config_changed[IA_CSS_MACC_ID] = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_macc_table(const struct ia_css_isp_parameters *params, - struct ia_css_macc_table *table) -{ - if (table == NULL) - return; - - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - *table = params->macc_table; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void ia_css_morph_table_free( - struct ia_css_morph_table *me) -{ - - unsigned int i; - - if (me == NULL) - return; - - IA_CSS_ENTER(""); - - - - for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - if (me->coordinates_x[i]) { - sh_css_free(me->coordinates_x[i]); - me->coordinates_x[i] = NULL; - } - if (me->coordinates_y[i]) { - sh_css_free(me->coordinates_y[i]); - me->coordinates_y[i] = NULL; - } - } - - sh_css_free(me); - IA_CSS_LEAVE("void"); - -} - - -struct ia_css_morph_table *ia_css_morph_table_allocate( - unsigned int width, - unsigned int height) -{ - - unsigned int i; - struct ia_css_morph_table *me; - - IA_CSS_ENTER(""); - - me = sh_css_malloc(sizeof(*me)); - if (me == NULL) { - IA_CSS_ERROR("out of memory"); - return me; - } - - for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - me->coordinates_x[i] = NULL; - me->coordinates_y[i] = NULL; - } - - for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - me->coordinates_x[i] = - sh_css_malloc(height * width * - sizeof(*me->coordinates_x[i])); - me->coordinates_y[i] = - sh_css_malloc(height * width * - sizeof(*me->coordinates_y[i])); - - if ((me->coordinates_x[i] == NULL) || - (me->coordinates_y[i] == NULL)) { - ia_css_morph_table_free(me); - me = NULL; - return me; - } - } - me->width = width; - me->height = height; - IA_CSS_LEAVE(""); - return me; - -} - - -static enum ia_css_err sh_css_params_default_morph_table( - struct ia_css_morph_table **table, - const struct ia_css_binary *binary) -{ - /* MW 2400 advanced requires different scaling */ - unsigned int i, j, k, step, width, height; - short start_x[IA_CSS_MORPH_TABLE_NUM_PLANES] = { -8, 0, -8, 0, 0, -8 }, - start_y[IA_CSS_MORPH_TABLE_NUM_PLANES] = { 0, 0, -8, -8, -8, 0 }; - struct ia_css_morph_table *tab; - - assert(table != NULL); - assert(binary != NULL); - - IA_CSS_ENTER_PRIVATE(""); - - step = (ISP_VEC_NELEMS / 16) * 128, - width = binary->morph_tbl_width, - height = binary->morph_tbl_height; - - tab = ia_css_morph_table_allocate(width, height); - if (tab == NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - short val_y = start_y[i]; - for (j = 0; j < height; j++) { - short val_x = start_x[i]; - unsigned short *x_ptr, *y_ptr; - - x_ptr = &tab->coordinates_x[i][j * width]; - y_ptr = &tab->coordinates_y[i][j * width]; - for (k = 0; k < width; - k++, x_ptr++, y_ptr++, val_x += (short)step) { - if (k == 0) - *x_ptr = 0; - else if (k == width - 1) - *x_ptr = val_x + 2 * start_x[i]; - else - *x_ptr = val_x; - if (j == 0) - *y_ptr = 0; - else - *y_ptr = val_y; - } - val_y += (short)step; - } - } - *table = tab; - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - - return IA_CSS_SUCCESS; -} - -static void -sh_css_set_morph_table(struct ia_css_isp_parameters *params, - const struct ia_css_morph_table *table) -{ - if (table == NULL) - return; - - IA_CSS_ENTER_PRIVATE("table=%p", table); - - assert(params != NULL); - if (table->enable == false) - table = NULL; - params->morph_table = table; - params->morph_table_changed = true; - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -ia_css_translate_3a_statistics( - struct ia_css_3a_statistics *host_stats, - const struct ia_css_isp_3a_statistics_map *isp_stats) -{ - IA_CSS_ENTER(""); - if (host_stats->grid.use_dmem) { - IA_CSS_LOG("3A: DMEM"); - ia_css_s3a_dmem_decode(host_stats, isp_stats->dmem_stats); - } else { - IA_CSS_LOG("3A: VMEM"); - ia_css_s3a_vmem_decode(host_stats, isp_stats->vmem_stats_hi, - isp_stats->vmem_stats_lo); - } -#if !defined(HAS_NO_HMEM) - IA_CSS_LOG("3A: HMEM"); - ia_css_s3a_hmem_decode(host_stats, isp_stats->hmem_stats); -#endif - - IA_CSS_LEAVE("void"); -} - -void -ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me) -{ - if (me) { - if (me->data_allocated) { - sh_css_free(me->data_ptr); - me->data_ptr = NULL; - me->data_allocated = false; - } - sh_css_free(me); - } -} - -struct ia_css_isp_3a_statistics_map * -ia_css_isp_3a_statistics_map_allocate( - const struct ia_css_isp_3a_statistics *isp_stats, - void *data_ptr) -{ - struct ia_css_isp_3a_statistics_map *me; - /* Windows compiler does not like adding sizes to a void * - * so we use a local char * instead. */ - char *base_ptr; - - me = sh_css_malloc(sizeof(*me)); - if (!me) { - IA_CSS_LEAVE("cannot allocate memory"); - goto err; - } - - me->data_ptr = data_ptr; - me->data_allocated = data_ptr == NULL; - if (!data_ptr) { - me->data_ptr = sh_css_malloc(isp_stats->size); - if (!me->data_ptr) { - IA_CSS_LEAVE("cannot allocate memory"); - goto err; - } - } - base_ptr = me->data_ptr; - - me->size = isp_stats->size; - /* GCC complains when we assign a char * to a void *, so these - * casts are necessary unfortunately. */ - me->dmem_stats = (void *)base_ptr; - me->vmem_stats_hi = (void *)(base_ptr + isp_stats->dmem_size); - me->vmem_stats_lo = (void *)(base_ptr + isp_stats->dmem_size + - isp_stats->vmem_size); - me->hmem_stats = (void *)(base_ptr + isp_stats->dmem_size + - 2 * isp_stats->vmem_size); - - IA_CSS_LEAVE("map=%p", me); - return me; - -err: - if (me) - sh_css_free(me); - return NULL; - -} - -enum ia_css_err -ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats, - const struct ia_css_isp_3a_statistics *isp_stats) -{ - struct ia_css_isp_3a_statistics_map *map; - enum ia_css_err ret = IA_CSS_SUCCESS; - - IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats); - - assert(host_stats != NULL); - assert(isp_stats != NULL); - - map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL); - if (map) { - mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size); - ia_css_translate_3a_statistics(host_stats, map); - ia_css_isp_3a_statistics_map_free(map); - } else { - IA_CSS_ERROR("out of memory"); - ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - IA_CSS_LEAVE_ERR(ret); - return ret; -} - -/* Parameter encoding is not yet orthogonal. - This function hnadles some of the exceptions. -*/ -static void -ia_css_set_param_exceptions(const struct ia_css_pipe *pipe, - struct ia_css_isp_parameters *params) -{ - assert(params != NULL); - - /* Copy also to DP. Should be done by the driver. */ - params->dp_config.gr = params->wb_config.gr; - params->dp_config.r = params->wb_config.r; - params->dp_config.b = params->wb_config.b; - params->dp_config.gb = params->wb_config.gb; -#ifdef ISP2401 - assert(pipe != NULL); - assert(pipe->mode < IA_CSS_PIPE_ID_NUM); - - if (pipe->mode < IA_CSS_PIPE_ID_NUM) { - params->pipe_dp_config[pipe->mode].gr = params->wb_config.gr; - params->pipe_dp_config[pipe->mode].r = params->wb_config.r; - params->pipe_dp_config[pipe->mode].b = params->wb_config.b; - params->pipe_dp_config[pipe->mode].gb = params->wb_config.gb; - } -#endif -} - -#ifdef ISP2401 -static void -sh_css_set_dp_config(const struct ia_css_pipe *pipe, - struct ia_css_isp_parameters *params, - const struct ia_css_dp_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - assert(pipe != NULL); - assert(pipe->mode < IA_CSS_PIPE_ID_NUM); - - IA_CSS_ENTER_PRIVATE("config=%p", config); - ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE); - if (pipe->mode < IA_CSS_PIPE_ID_NUM) { - params->pipe_dp_config[pipe->mode] = *config; - params->pipe_dpc_config_changed[pipe->mode] = true; - } - IA_CSS_LEAVE_PRIVATE("void"); -} -#endif - -static void -sh_css_get_dp_config(const struct ia_css_pipe *pipe, - const struct ia_css_isp_parameters *params, - struct ia_css_dp_config *config) -{ - if (config == NULL) - return; - - assert(params != NULL); - assert(pipe != NULL); - IA_CSS_ENTER_PRIVATE("config=%p", config); - - *config = params->pipe_dp_config[pipe->mode]; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_nr_config(struct ia_css_isp_parameters *params, - const struct ia_css_nr_config *config) -{ - if (config == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("config=%p", config); - - ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE); - params->nr_config = *config; - params->yee_config.nr = *config; - params->config_changed[IA_CSS_NR_ID] = true; - params->config_changed[IA_CSS_YEE_ID] = true; - params->config_changed[IA_CSS_BNR_ID] = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_ee_config(struct ia_css_isp_parameters *params, - const struct ia_css_ee_config *config) -{ - if (config == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("config=%p", config); - ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE); - - params->ee_config = *config; - params->yee_config.ee = *config; - params->config_changed[IA_CSS_YEE_ID] = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_ee_config(const struct ia_css_isp_parameters *params, - struct ia_css_ee_config *config) -{ - if (config == NULL) - return; - - IA_CSS_ENTER_PRIVATE("config=%p", config); - - assert(params != NULL); - *config = params->ee_config; - - ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE); - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe, - struct ia_css_isp_parameters *params, - const struct ia_css_dvs_6axis_config *dvs_config) -{ - if (dvs_config == NULL) - return; - assert(params != NULL); - assert(pipe != NULL); - assert(dvs_config->height_y == dvs_config->height_uv); - assert((dvs_config->width_y - 1) == 2 * (dvs_config->width_uv - 1)); - assert(pipe->mode < IA_CSS_PIPE_ID_NUM); - - IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config); - - copy_dvs_6axis_table(params->pipe_dvs_6axis_config[pipe->mode], dvs_config); - -#if !defined(HAS_NO_DVS_6AXIS_CONFIG_UPDATE) - params->pipe_dvs_6axis_config_changed[pipe->mode] = true; -#endif - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe, - const struct ia_css_isp_parameters *params, - struct ia_css_dvs_6axis_config *dvs_config) -{ - if (dvs_config == NULL) - return; - assert(params != NULL); - assert(pipe != NULL); - assert(dvs_config->height_y == dvs_config->height_uv); - assert((dvs_config->width_y - 1) == 2 * dvs_config->width_uv - 1); - - IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config); - - if ((pipe->mode < IA_CSS_PIPE_ID_NUM) && - (dvs_config->width_y == params->pipe_dvs_6axis_config[pipe->mode]->width_y) && - (dvs_config->height_y == params->pipe_dvs_6axis_config[pipe->mode]->height_y) && - (dvs_config->width_uv == params->pipe_dvs_6axis_config[pipe->mode]->width_uv) && - (dvs_config->height_uv == params->pipe_dvs_6axis_config[pipe->mode]->height_uv) && - dvs_config->xcoords_y && - dvs_config->ycoords_y && - dvs_config->xcoords_uv && - dvs_config->ycoords_uv) - { - copy_dvs_6axis_table(dvs_config, params->pipe_dvs_6axis_config[pipe->mode]); - } - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_baa_config(struct ia_css_isp_parameters *params, - const struct ia_css_aa_config *config) -{ - if (config == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("config=%p", config); - - params->bds_config = *config; - params->config_changed[IA_CSS_BDS_ID] = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_baa_config(const struct ia_css_isp_parameters *params, - struct ia_css_aa_config *config) -{ - if (config == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("config=%p", config); - - *config = params->bds_config; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_set_dz_config(struct ia_css_isp_parameters *params, - const struct ia_css_dz_config *config) -{ - if (config == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("dx=%d, dy=%d", config->dx, config->dy); - - assert(config->dx <= HRT_GDC_N); - assert(config->dy <= HRT_GDC_N); - - params->dz_config = *config; - params->dz_config_changed = true; - /* JK: Why isp params changed?? */ - params->isp_params_changed = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_dz_config(const struct ia_css_isp_parameters *params, - struct ia_css_dz_config *config) -{ - if (config == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("config=%p", config); - - *config = params->dz_config; - - IA_CSS_LEAVE_PRIVATE("dx=%d, dy=%d", config->dx, config->dy); -} - -static void -sh_css_set_motion_vector(struct ia_css_isp_parameters *params, - const struct ia_css_vector *motion) -{ - if (motion == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("x=%d, y=%d", motion->x, motion->y); - - params->motion_config = *motion; - /* JK: Why do isp params change? */ - params->motion_config_changed = true; - params->isp_params_changed = true; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -sh_css_get_motion_vector(const struct ia_css_isp_parameters *params, - struct ia_css_vector *motion) -{ - if (motion == NULL) - return; - assert(params != NULL); - - IA_CSS_ENTER_PRIVATE("motion=%p", motion); - - *motion = params->motion_config; - - IA_CSS_LEAVE_PRIVATE("x=%d, y=%d", motion->x, motion->y); -} - -struct ia_css_isp_config * -sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe) -{ - if (pipe == NULL) - { - IA_CSS_ERROR("pipe=%p", NULL); - return NULL; - } - return pipe->config.p_isp_config; -} - -enum ia_css_err -ia_css_stream_set_isp_config( - struct ia_css_stream *stream, - const struct ia_css_isp_config *config) -{ - return ia_css_stream_set_isp_config_on_pipe(stream, config, NULL); -} - -enum ia_css_err -ia_css_stream_set_isp_config_on_pipe( - struct ia_css_stream *stream, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - if ((stream == NULL) || (config == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - IA_CSS_ENTER("stream=%p, config=%p, pipe=%p", stream, config, pipe); - -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - if (config->output_frame) - err = sh_css_set_per_frame_isp_config_on_pipe(stream, config, pipe); - else -#endif - err = sh_css_set_global_isp_config_on_pipe(stream->pipes[0], config, pipe); - - IA_CSS_LEAVE_ERR(err); - return err; -} - -enum ia_css_err -ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe, - struct ia_css_isp_config *config) -{ - struct ia_css_pipe *pipe_in = pipe; - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER("pipe=%p", pipe); - - if ((pipe == NULL) || (pipe->stream == NULL)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "config=%p\n", config); - -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - if (config->output_frame) - err = sh_css_set_per_frame_isp_config_on_pipe(pipe->stream, config, pipe); - else -#endif - err = sh_css_set_global_isp_config_on_pipe(pipe, config, pipe_in); - IA_CSS_LEAVE_ERR(err); - return err; -} - -static enum ia_css_err -sh_css_set_global_isp_config_on_pipe( - struct ia_css_pipe *curr_pipe, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - enum ia_css_err err1 = IA_CSS_SUCCESS; - enum ia_css_err err2 = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", curr_pipe, config, pipe); - - err1 = sh_css_init_isp_params_from_config(curr_pipe, curr_pipe->stream->isp_params_configs, config, pipe); - - /* Now commit all changes to the SP */ - err2 = sh_css_param_update_isp_params(curr_pipe, curr_pipe->stream->isp_params_configs, sh_css_sp_is_running(), pipe); - - /* The following code is intentional. The sh_css_init_isp_params_from_config interface - * throws an error when both DPC and BDS is enabled. The CSS API must pass this error - * information to the caller, ie. the host. We do not return this error immediately, - * but instead continue with updating the ISP params to enable testing of features - * which are currently in TR phase. */ - - err = (err1 != IA_CSS_SUCCESS ) ? err1 : ((err2 != IA_CSS_SUCCESS) ? err2 : err); - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) -static enum ia_css_err -sh_css_set_per_frame_isp_config_on_pipe( - struct ia_css_stream *stream, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe) -{ - unsigned i; - bool per_frame_config_created = false; - enum ia_css_err err = IA_CSS_SUCCESS; - enum ia_css_err err1 = IA_CSS_SUCCESS; - enum ia_css_err err2 = IA_CSS_SUCCESS; - enum ia_css_err err3 = IA_CSS_SUCCESS; - - struct sh_css_ddr_address_map *ddr_ptrs; - struct sh_css_ddr_address_map_size *ddr_ptrs_size; - struct ia_css_isp_parameters *params; - - IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", stream, config, pipe); - - if (!pipe) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - goto exit; - } - - /* create per-frame ISP params object with default values - * from stream->isp_params_configs if one doesn't already exist - */ - if (!stream->per_frame_isp_params_configs) - { - err = sh_css_create_isp_params(stream, - &stream->per_frame_isp_params_configs); - if(err != IA_CSS_SUCCESS) - goto exit; - per_frame_config_created = true; - } - - params = stream->per_frame_isp_params_configs; - - /* update new ISP params object with the new config */ - if (!sh_css_init_isp_params_from_global(stream, params, false, pipe)) { - err1 = IA_CSS_ERR_INVALID_ARGUMENTS; - } - - err2 = sh_css_init_isp_params_from_config(stream->pipes[0], params, config, pipe); - - if (per_frame_config_created) - { - ddr_ptrs = ¶ms->ddr_ptrs; - ddr_ptrs_size = ¶ms->ddr_ptrs_size; - /* create per pipe reference to general ddr_ptrs */ - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - ref_sh_css_ddr_address_map(ddr_ptrs, ¶ms->pipe_ddr_ptrs[i]); - params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size; - } - } - - /* now commit to ddr */ - err3 = sh_css_param_update_isp_params(stream->pipes[0], params, sh_css_sp_is_running(), pipe); - - /* The following code is intentional. The sh_css_init_sp_params_from_config and - * sh_css_init_isp_params_from_config throws an error when both DPC and BDS is enabled. - * The CSS API must pass this error information to the caller, ie. the host. - * We do not return this error immediately, but instead continue with updating the ISP params - * to enable testing of features which are currently in TR phase. */ - err = (err1 != IA_CSS_SUCCESS) ? err1 : - (err2 != IA_CSS_SUCCESS) ? err2 : - (err3 != IA_CSS_SUCCESS) ? err3 : err; -exit: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} -#endif - -static enum ia_css_err -sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe, - struct ia_css_isp_parameters *params, - const struct ia_css_isp_config *config, - struct ia_css_pipe *pipe_in) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - bool is_dp_10bpp = true; - assert(pipe != NULL); - - IA_CSS_ENTER_PRIVATE("pipe=%p, config=%p, params=%p", pipe, config, params); - - ia_css_set_configs(params, config); - - - sh_css_set_nr_config(params, config->nr_config); - sh_css_set_ee_config(params, config->ee_config); - sh_css_set_baa_config(params, config->baa_config); - if ((pipe->mode < IA_CSS_PIPE_ID_NUM) && - (params->pipe_dvs_6axis_config[pipe->mode])) - sh_css_set_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config); - sh_css_set_dz_config(params, config->dz_config); - sh_css_set_motion_vector(params, config->motion_vector); - sh_css_update_shading_table_status(pipe_in, params); - sh_css_set_shading_table(pipe->stream, params, config->shading_table); - sh_css_set_morph_table(params, config->morph_table); - sh_css_set_macc_table(params, config->macc_table); - sh_css_set_gamma_table(params, config->gamma_table); - sh_css_set_ctc_table(params, config->ctc_table); -/* ------ deprecated(bz675) : from ------ */ - sh_css_set_shading_settings(params, config->shading_settings); -/* ------ deprecated(bz675) : to ------ */ - - params->dis_coef_table_changed = (config->dvs_coefs != NULL); - params->dvs2_coef_table_changed = (config->dvs2_coefs != NULL); - - params->output_frame = config->output_frame; - params->isp_parameters_id = config->isp_config_id; -#ifdef ISP2401 - /* Currently we do not offer CSS interface to set different - * configurations for DPC, i.e. depending on DPC being enabled - * before (NORM+OBC) or after. The folllowing code to set the - * DPC configuration should be updated when this interface is made - * available */ - sh_css_set_dp_config(pipe, params, config->dp_config); - ia_css_set_param_exceptions(pipe, params); -#endif - - if (IA_CSS_SUCCESS == - sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp)) { - /* return an error when both DPC and BDS is enabled by the - * user. */ - /* we do not exit from this point immediately to allow internal - * firmware feature testing. */ - if(is_dp_10bpp) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - } else { - err = IA_CSS_ERR_INTERNAL_ERROR; - goto exit; - } - -#ifndef ISP2401 - ia_css_set_param_exceptions(pipe, params); -#endif -exit: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -void -ia_css_stream_get_isp_config( - const struct ia_css_stream *stream, - struct ia_css_isp_config *config) -{ - IA_CSS_ENTER("void"); - ia_css_pipe_get_isp_config(stream->pipes[0], config); - IA_CSS_LEAVE("void"); -} - -void -ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe, - struct ia_css_isp_config *config) -{ - struct ia_css_isp_parameters *params = NULL; - - assert(config != NULL); - - IA_CSS_ENTER("config=%p", config); - - params = pipe->stream->isp_params_configs; - assert(params != NULL); - - ia_css_get_configs(params, config); - - sh_css_get_ee_config(params, config->ee_config); - sh_css_get_baa_config(params, config->baa_config); - sh_css_get_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config); - sh_css_get_dp_config(pipe, params, config->dp_config); - sh_css_get_macc_table(params, config->macc_table); - sh_css_get_gamma_table(params, config->gamma_table); - sh_css_get_ctc_table(params, config->ctc_table); - sh_css_get_dz_config(params, config->dz_config); - sh_css_get_motion_vector(params, config->motion_vector); -/* ------ deprecated(bz675) : from ------ */ - sh_css_get_shading_settings(params, config->shading_settings); -/* ------ deprecated(bz675) : to ------ */ - - config->output_frame = params->output_frame; - config->isp_config_id = params->isp_parameters_id; - - IA_CSS_LEAVE("void"); -} - -#ifndef ISP2401 -/* - * coding style says the return of "mmgr_NULL" is the error signal - * - * Deprecated: Implement mmgr_realloc() - */ -static bool realloc_isp_css_mm_buf( - hrt_vaddress *curr_buf, - size_t *curr_size, - size_t needed_size, - bool force, - enum ia_css_err *err, - uint16_t mmgr_attribute) -{ - int32_t id; - - *err = IA_CSS_SUCCESS; - /* Possible optimization: add a function sh_css_isp_css_mm_realloc() - * and implement on top of hmm. */ - - IA_CSS_ENTER_PRIVATE("void"); - - if (!force && *curr_size >= needed_size) { - IA_CSS_LEAVE_PRIVATE("false"); - return false; - } - /* don't reallocate if single ref to buffer and same size */ - if (*curr_size == needed_size && ia_css_refcount_is_single(*curr_buf)) { - IA_CSS_LEAVE_PRIVATE("false"); - return false; - } - - id = IA_CSS_REFCOUNT_PARAM_BUFFER; - ia_css_refcount_decrement(id, *curr_buf); - *curr_buf = ia_css_refcount_increment(id, mmgr_alloc_attr(needed_size, - mmgr_attribute)); - - if (!*curr_buf) { - *err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - *curr_size = 0; - } else { - *curr_size = needed_size; - } - IA_CSS_LEAVE_PRIVATE("true"); - return true; -} - -static bool reallocate_buffer( - hrt_vaddress *curr_buf, - size_t *curr_size, - size_t needed_size, - bool force, - enum ia_css_err *err) -{ - bool ret; - uint16_t mmgr_attribute = MMGR_ATTRIBUTE_DEFAULT; - - IA_CSS_ENTER_PRIVATE("void"); - - ret = realloc_isp_css_mm_buf(curr_buf, - curr_size, needed_size, force, err, mmgr_attribute); - - IA_CSS_LEAVE_PRIVATE("ret=%d", ret); - return ret; -} - -#endif - -struct ia_css_isp_3a_statistics * -ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid) -{ - struct ia_css_isp_3a_statistics *me; - - IA_CSS_ENTER("grid=%p", grid); - - assert(grid != NULL); - - /* MW: Does "grid->enable" also control the histogram output ?? */ - if (!grid->enable) - return NULL; - - me = sh_css_calloc(1, sizeof(*me)); - if (!me) - goto err; - - if (grid->use_dmem) { - me->dmem_size = sizeof(struct ia_css_3a_output) * - grid->aligned_width * - grid->aligned_height; - } else { - me->vmem_size = ISP_S3ATBL_HI_LO_STRIDE_BYTES * - grid->aligned_height; - } -#if !defined(HAS_NO_HMEM) - me->hmem_size = sizeof_hmem(HMEM0_ID); -#endif - - /* All subsections need to be aligned to the system bus width */ - me->dmem_size = CEIL_MUL(me->dmem_size, HIVE_ISP_DDR_WORD_BYTES); - me->vmem_size = CEIL_MUL(me->vmem_size, HIVE_ISP_DDR_WORD_BYTES); - me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES); - - me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size; - me->data_ptr = mmgr_malloc(me->size); - if (me->data_ptr == mmgr_NULL) { - sh_css_free(me); - me = NULL; - goto err; - } - if (me->dmem_size) - me->data.dmem.s3a_tbl = me->data_ptr; - if (me->vmem_size) { - me->data.vmem.s3a_tbl_hi = me->data_ptr + me->dmem_size; - me->data.vmem.s3a_tbl_lo = me->data_ptr + me->dmem_size + me->vmem_size; - } - if (me->hmem_size) - me->data_hmem.rgby_tbl = me->data_ptr + me->dmem_size + 2 * me->vmem_size; - - -err: - IA_CSS_LEAVE("return=%p", me); - return me; -} - -void -ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me) -{ - if (me != NULL) { - hmm_free(me->data_ptr); - sh_css_free(me); - } -} - -struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void) -{ - return NULL; -} - -struct ia_css_metadata * -ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info) -{ - struct ia_css_metadata *md = NULL; - - IA_CSS_ENTER(""); - - if (metadata_info->size == 0) - return NULL; - - md = sh_css_malloc(sizeof(*md)); - if (md == NULL) - goto error; - - md->info = *metadata_info; - md->exp_id = 0; - md->address = mmgr_malloc(metadata_info->size); - if (md->address == mmgr_NULL) - goto error; - - IA_CSS_LEAVE("return=%p", md); - return md; - -error: - ia_css_metadata_free(md); - IA_CSS_LEAVE("return=%p", NULL); - return NULL; -} - -void -ia_css_metadata_free(struct ia_css_metadata *me) -{ - if (me != NULL) { - /* The enter and leave macros are placed inside - * the condition to avoid false logging of metadata - * free events when metadata is disabled. - * We found this to be confusing during development - * and debugging. */ - IA_CSS_ENTER("me=%p", me); - hmm_free(me->address); - sh_css_free(me); - IA_CSS_LEAVE("void"); - } -} - -void -ia_css_metadata_free_multiple(unsigned int num_bufs, struct ia_css_metadata **bufs) -{ - unsigned int i; - - if (bufs != NULL) { - for (i = 0; i < num_bufs; i++) - ia_css_metadata_free(bufs[i]); - } -} - -static unsigned g_param_buffer_dequeue_count = 0; -static unsigned g_param_buffer_enqueue_count = 0; - -enum ia_css_err -ia_css_stream_isp_parameters_init(struct ia_css_stream *stream) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned i; - struct sh_css_ddr_address_map *ddr_ptrs; - struct sh_css_ddr_address_map_size *ddr_ptrs_size; - struct ia_css_isp_parameters *params; - - assert(stream != NULL); - IA_CSS_ENTER_PRIVATE("void"); - - if (stream == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS); - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - /* TMP: tracking of paramsets */ - g_param_buffer_dequeue_count = 0; - g_param_buffer_enqueue_count = 0; - - stream->per_frame_isp_params_configs = NULL; - err = sh_css_create_isp_params(stream, - &stream->isp_params_configs); - if(err != IA_CSS_SUCCESS) - goto ERR; - - params = stream->isp_params_configs; - if (!sh_css_init_isp_params_from_global(stream, params, true, NULL)) { - /* we do not return the error immediately to enable internal - * firmware feature testing */ - err = IA_CSS_ERR_INVALID_ARGUMENTS; - } - - ddr_ptrs = ¶ms->ddr_ptrs; - ddr_ptrs_size = ¶ms->ddr_ptrs_size; - - /* create per pipe reference to general ddr_ptrs */ - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - ref_sh_css_ddr_address_map(ddr_ptrs, ¶ms->pipe_ddr_ptrs[i]); - params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size; - } - -ERR: - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static void -ia_css_set_sdis_config( - struct ia_css_isp_parameters *params, - const struct ia_css_dvs_coefficients *dvs_coefs) -{ - ia_css_set_sdis_horicoef_config(params, dvs_coefs); - ia_css_set_sdis_vertcoef_config(params, dvs_coefs); - ia_css_set_sdis_horiproj_config(params, dvs_coefs); - ia_css_set_sdis_vertproj_config(params, dvs_coefs); -} - -static void -ia_css_set_sdis2_config( - struct ia_css_isp_parameters *params, - const struct ia_css_dvs2_coefficients *dvs2_coefs) -{ - ia_css_set_sdis2_horicoef_config(params, dvs2_coefs); - ia_css_set_sdis2_vertcoef_config(params, dvs2_coefs); - ia_css_set_sdis2_horiproj_config(params, dvs2_coefs); - ia_css_set_sdis2_vertproj_config(params, dvs2_coefs); -} - -static enum ia_css_err -sh_css_create_isp_params(struct ia_css_stream *stream, - struct ia_css_isp_parameters **isp_params_out) -{ - bool succ = true; - unsigned i; - struct sh_css_ddr_address_map *ddr_ptrs; - struct sh_css_ddr_address_map_size *ddr_ptrs_size; - enum ia_css_err err = IA_CSS_SUCCESS; - size_t params_size; - struct ia_css_isp_parameters *params = - sh_css_malloc(sizeof(struct ia_css_isp_parameters)); - - if (!params) - { - *isp_params_out = NULL; - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - IA_CSS_ERROR("%s:%d error: cannot allocate memory", __FILE__, __LINE__); - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } else { - memset(params, 0, sizeof(struct ia_css_isp_parameters)); - } - - ddr_ptrs = ¶ms->ddr_ptrs; - ddr_ptrs_size = ¶ms->ddr_ptrs_size; - - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - memset(¶ms->pipe_ddr_ptrs[i], 0, - sizeof(params->pipe_ddr_ptrs[i])); - memset(¶ms->pipe_ddr_ptrs_size[i], 0, - sizeof(params->pipe_ddr_ptrs_size[i])); - } - - memset(ddr_ptrs, 0, sizeof(*ddr_ptrs)); - memset(ddr_ptrs_size, 0, sizeof(*ddr_ptrs_size)); - - params_size = sizeof(params->uds); - ddr_ptrs_size->isp_param = params_size; - ddr_ptrs->isp_param = - ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER, - mmgr_malloc(params_size)); - succ &= (ddr_ptrs->isp_param != mmgr_NULL); - - ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table); - ddr_ptrs->macc_tbl = - ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER, - mmgr_malloc(sizeof(struct ia_css_macc_table))); - succ &= (ddr_ptrs->macc_tbl != mmgr_NULL); - - *isp_params_out = params; - return err; -} - -static bool -sh_css_init_isp_params_from_global(struct ia_css_stream *stream, - struct ia_css_isp_parameters *params, - bool use_default_config, - struct ia_css_pipe *pipe_in) -{ - bool retval = true; - int i = 0; - bool is_dp_10bpp = true; - unsigned isp_pipe_version = ia_css_pipe_get_isp_pipe_version(stream->pipes[0]); - struct ia_css_isp_parameters *stream_params = stream->isp_params_configs; - - if (!use_default_config && !stream_params) { - retval = false; - goto exit; - } - - params->output_frame = NULL; - params->isp_parameters_id = 0; - - if (use_default_config) - { - ia_css_set_xnr3_config(params, &default_xnr3_config); - - sh_css_set_nr_config(params, &default_nr_config); - sh_css_set_ee_config(params, &default_ee_config); - if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) - sh_css_set_macc_table(params, &default_macc_table); - else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2) - sh_css_set_macc_table(params, &default_macc2_table); - sh_css_set_gamma_table(params, &default_gamma_table); - sh_css_set_ctc_table(params, &default_ctc_table); - sh_css_set_baa_config(params, &default_baa_config); - sh_css_set_dz_config(params, &default_dz_config); -/* ------ deprecated(bz675) : from ------ */ - sh_css_set_shading_settings(params, &default_shading_settings); -/* ------ deprecated(bz675) : to ------ */ - - ia_css_set_s3a_config(params, &default_3a_config); - ia_css_set_wb_config(params, &default_wb_config); - ia_css_set_csc_config(params, &default_cc_config); - ia_css_set_tnr_config(params, &default_tnr_config); - ia_css_set_ob_config(params, &default_ob_config); - ia_css_set_dp_config(params, &default_dp_config); -#ifndef ISP2401 - ia_css_set_param_exceptions(pipe_in, params); -#else - - for (i = 0; i < stream->num_pipes; i++) { - if (IA_CSS_SUCCESS == sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) { - /* set the return value as false if both DPC and - * BDS is enabled by the user. But we do not return - * the value immediately to enable internal firmware - * feature testing. */ - if(is_dp_10bpp) { - sh_css_set_dp_config(stream->pipes[i], params, &default_dp_10bpp_config); - } else { - sh_css_set_dp_config(stream->pipes[i], params, &default_dp_config); - } - } else { - retval = false; - goto exit; - } - - ia_css_set_param_exceptions(stream->pipes[i], params); - } - -#endif - ia_css_set_de_config(params, &default_de_config); - ia_css_set_gc_config(params, &default_gc_config); - ia_css_set_anr_config(params, &default_anr_config); - ia_css_set_anr2_config(params, &default_anr_thres); - ia_css_set_ce_config(params, &default_ce_config); - ia_css_set_xnr_table_config(params, &default_xnr_table); - ia_css_set_ecd_config(params, &default_ecd_config); - ia_css_set_ynr_config(params, &default_ynr_config); - ia_css_set_fc_config(params, &default_fc_config); - ia_css_set_cnr_config(params, &default_cnr_config); - ia_css_set_macc_config(params, &default_macc_config); - ia_css_set_ctc_config(params, &default_ctc_config); - ia_css_set_aa_config(params, &default_aa_config); - ia_css_set_r_gamma_config(params, &default_r_gamma_table); - ia_css_set_g_gamma_config(params, &default_g_gamma_table); - ia_css_set_b_gamma_config(params, &default_b_gamma_table); - ia_css_set_yuv2rgb_config(params, &default_yuv2rgb_cc_config); - ia_css_set_rgb2yuv_config(params, &default_rgb2yuv_cc_config); - ia_css_set_xnr_config(params, &default_xnr_config); - ia_css_set_sdis_config(params, &default_sdis_config); - ia_css_set_sdis2_config(params, &default_sdis2_config); - ia_css_set_formats_config(params, &default_formats_config); - - params->fpn_config.data = NULL; - params->config_changed[IA_CSS_FPN_ID] = true; - params->fpn_config.enabled = 0; - - params->motion_config = default_motion_config; - params->motion_config_changed = true; - - params->morph_table = NULL; - params->morph_table_changed = true; - - params->sc_table = NULL; - params->sc_table_changed = true; - params->sc_table_dirty = false; - params->sc_table_last_pipe_num = 0; - - ia_css_sdis2_clear_coefficients(¶ms->dvs2_coefs); - params->dvs2_coef_table_changed = true; - - ia_css_sdis_clear_coefficients(¶ms->dvs_coefs); - params->dis_coef_table_changed = true; -#ifdef ISP2401 - ia_css_tnr3_set_default_config(¶ms->tnr3_config); -#endif - } - else - { - ia_css_set_xnr3_config(params, &stream_params->xnr3_config); - - sh_css_set_nr_config(params, &stream_params->nr_config); - sh_css_set_ee_config(params, &stream_params->ee_config); - if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) - sh_css_set_macc_table(params, &stream_params->macc_table); - else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2) - sh_css_set_macc_table(params, &stream_params->macc_table); - sh_css_set_gamma_table(params, &stream_params->gc_table); - sh_css_set_ctc_table(params, &stream_params->ctc_table); - sh_css_set_baa_config(params, &stream_params->bds_config); - sh_css_set_dz_config(params, &stream_params->dz_config); -/* ------ deprecated(bz675) : from ------ */ - sh_css_set_shading_settings(params, &stream_params->shading_settings); -/* ------ deprecated(bz675) : to ------ */ - - ia_css_set_s3a_config(params, &stream_params->s3a_config); - ia_css_set_wb_config(params, &stream_params->wb_config); - ia_css_set_csc_config(params, &stream_params->cc_config); - ia_css_set_tnr_config(params, &stream_params->tnr_config); - ia_css_set_ob_config(params, &stream_params->ob_config); - ia_css_set_dp_config(params, &stream_params->dp_config); - ia_css_set_de_config(params, &stream_params->de_config); - ia_css_set_gc_config(params, &stream_params->gc_config); - ia_css_set_anr_config(params, &stream_params->anr_config); - ia_css_set_anr2_config(params, &stream_params->anr_thres); - ia_css_set_ce_config(params, &stream_params->ce_config); - ia_css_set_xnr_table_config(params, &stream_params->xnr_table); - ia_css_set_ecd_config(params, &stream_params->ecd_config); - ia_css_set_ynr_config(params, &stream_params->ynr_config); - ia_css_set_fc_config(params, &stream_params->fc_config); - ia_css_set_cnr_config(params, &stream_params->cnr_config); - ia_css_set_macc_config(params, &stream_params->macc_config); - ia_css_set_ctc_config(params, &stream_params->ctc_config); - ia_css_set_aa_config(params, &stream_params->aa_config); - ia_css_set_r_gamma_config(params, &stream_params->r_gamma_table); - ia_css_set_g_gamma_config(params, &stream_params->g_gamma_table); - ia_css_set_b_gamma_config(params, &stream_params->b_gamma_table); - ia_css_set_yuv2rgb_config(params, &stream_params->yuv2rgb_cc_config); - ia_css_set_rgb2yuv_config(params, &stream_params->rgb2yuv_cc_config); - ia_css_set_xnr_config(params, &stream_params->xnr_config); - ia_css_set_formats_config(params, &stream_params->formats_config); - - for (i = 0; i < stream->num_pipes; i++) { - if (IA_CSS_SUCCESS == - sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) { - /* set the return value as false if both DPC and - * BDS is enabled by the user. But we do not return - * the value immediately to enable internal firmware - * feature testing. */ -#ifndef ISP2401 - retval = !is_dp_10bpp; -#else - if (is_dp_10bpp) { - retval = false; - } - } else { - retval = false; - goto exit; - } - if (stream->pipes[i]->mode < IA_CSS_PIPE_ID_NUM) { - sh_css_set_dp_config(stream->pipes[i], params, - &stream_params->pipe_dp_config[stream->pipes[i]->mode]); - ia_css_set_param_exceptions(stream->pipes[i], params); -#endif - } else { - retval = false; - goto exit; - } - } - -#ifndef ISP2401 - ia_css_set_param_exceptions(pipe_in, params); - -#endif - params->fpn_config.data = stream_params->fpn_config.data; - params->config_changed[IA_CSS_FPN_ID] = stream_params->config_changed[IA_CSS_FPN_ID]; - params->fpn_config.enabled = stream_params->fpn_config.enabled; - - sh_css_set_motion_vector(params, &stream_params->motion_config); - sh_css_set_morph_table(params, stream_params->morph_table); - - if (stream_params->sc_table) { - sh_css_update_shading_table_status(pipe_in, params); - sh_css_set_shading_table(stream, params, stream_params->sc_table); - } - else { - params->sc_table = NULL; - params->sc_table_changed = true; - params->sc_table_dirty = false; - params->sc_table_last_pipe_num = 0; - } - - /* Only IA_CSS_PIPE_ID_VIDEO & IA_CSS_PIPE_ID_CAPTURE will support dvs_6axis_config*/ - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - if (stream_params->pipe_dvs_6axis_config[i]) { - if (params->pipe_dvs_6axis_config[i]) { - copy_dvs_6axis_table(params->pipe_dvs_6axis_config[i], - stream_params->pipe_dvs_6axis_config[i]); - } else { - params->pipe_dvs_6axis_config[i] = - generate_dvs_6axis_table_from_config(stream_params->pipe_dvs_6axis_config[i]); - } - } - } - ia_css_set_sdis_config(params, &stream_params->dvs_coefs); - params->dis_coef_table_changed = stream_params->dis_coef_table_changed; - - ia_css_set_sdis2_config(params, &stream_params->dvs2_coefs); - params->dvs2_coef_table_changed = stream_params->dvs2_coef_table_changed; - params->sensor_binning = stream_params->sensor_binning; - } - -exit: - return retval; -} - -enum ia_css_err -sh_css_params_init(void) -{ - int i, p; - - IA_CSS_ENTER_PRIVATE("void"); - - /* TMP: tracking of paramsets */ - g_param_buffer_dequeue_count = 0; - g_param_buffer_enqueue_count = 0; - - for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++) { - for (i = 0; i < SH_CSS_MAX_STAGES; i++) { - xmem_sp_stage_ptrs[p][i] = - ia_css_refcount_increment(-1, - mmgr_calloc(1, - sizeof(struct sh_css_sp_stage))); - xmem_isp_stage_ptrs[p][i] = - ia_css_refcount_increment(-1, - mmgr_calloc(1, - sizeof(struct sh_css_isp_stage))); - - if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) || - (xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) { - sh_css_params_uninit(); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - } - } - - ia_css_config_gamma_table(); - ia_css_config_ctc_table(); - ia_css_config_rgb_gamma_tables(); - ia_css_config_xnr_table(); - - sp_ddr_ptrs = ia_css_refcount_increment(-1, mmgr_calloc(1, - CEIL_MUL(sizeof(struct sh_css_ddr_address_map), - HIVE_ISP_DDR_WORD_BYTES))); - xmem_sp_group_ptrs = ia_css_refcount_increment(-1, mmgr_calloc(1, - sizeof(struct sh_css_sp_group))); - - if ((sp_ddr_ptrs == mmgr_NULL) || - (xmem_sp_group_ptrs == mmgr_NULL)) { - ia_css_uninit(); - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -static void host_lut_store(const void *lut) -{ - unsigned i; - - for (i = 0; i < N_GDC_ID; i++) - gdc_lut_store((gdc_ID_t)i, (const int (*)[HRT_GDC_N]) lut); -} - -/* Note that allocation is in ipu address space. */ -inline hrt_vaddress sh_css_params_alloc_gdc_lut(void) -{ - return mmgr_malloc(sizeof(zoom_table)); -} - -inline void sh_css_params_free_gdc_lut(hrt_vaddress addr) -{ - if (addr != mmgr_NULL) - hmm_free(addr); -} - -enum ia_css_err ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe, - const void *lut) -{ - enum ia_css_err err = IA_CSS_SUCCESS; -#ifndef ISP2401 - bool store = true; -#else - bool stream_started = false; -#endif - IA_CSS_ENTER("pipe=%p lut=%p", pipe, lut); - - if (lut == NULL || pipe == NULL) { - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE("err=%d", err); - return err; - } - - /* If the pipe belongs to a stream and the stream has started, it is not - * safe to store lut to gdc HW. If pipe->stream is NULL, then no stream is - * created with this pipe, so it is safe to do this operation as long as - * ia_css_init() has been called. */ - if (pipe->stream && pipe->stream->started) { - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "unable to set scaler lut since stream has started\n"); -#ifndef ISP2401 - store = false; -#else - stream_started = true; -#endif - err = IA_CSS_ERR_NOT_SUPPORTED; - } - - /* Free any existing tables. */ - sh_css_params_free_gdc_lut(pipe->scaler_pp_lut); - pipe->scaler_pp_lut = mmgr_NULL; - -#ifndef ISP2401 - if (store) { - pipe->scaler_pp_lut = mmgr_malloc(sizeof(zoom_table)); -#else - if (!stream_started) { - pipe->scaler_pp_lut = sh_css_params_alloc_gdc_lut(); -#endif - if (pipe->scaler_pp_lut == mmgr_NULL) { -#ifndef ISP2401 - IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; -#else - ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "unable to allocate scaler_pp_lut\n"); - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } else { - gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut, - interleaved_lut_temp); - mmgr_store(pipe->scaler_pp_lut, - (int *)interleaved_lut_temp, - sizeof(zoom_table)); -#endif - } -#ifndef ISP2401 - - gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut, interleaved_lut_temp); - mmgr_store(pipe->scaler_pp_lut, (int *)interleaved_lut_temp, - sizeof(zoom_table)); -#endif - } - - IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err); - return err; -} - -/* if pipe is NULL, returns default lut addr. */ -hrt_vaddress sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe) -{ - assert(pipe != NULL); - - if (pipe->scaler_pp_lut != mmgr_NULL) - return pipe->scaler_pp_lut; - else - return sh_css_params_get_default_gdc_lut(); -} - -enum ia_css_err sh_css_params_map_and_store_default_gdc_lut(void) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - - IA_CSS_ENTER_PRIVATE("void"); - - /* Is table already mapped? Nothing to do if it is mapped. */ - if (default_gdc_lut != mmgr_NULL) - return err; - - host_lut_store((void *)zoom_table); - -#ifndef ISP2401 - default_gdc_lut = mmgr_malloc(sizeof(zoom_table)); -#else - default_gdc_lut = sh_css_params_alloc_gdc_lut(); -#endif - if (default_gdc_lut == mmgr_NULL) - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])zoom_table, - interleaved_lut_temp); - mmgr_store(default_gdc_lut, (int *)interleaved_lut_temp, - sizeof(zoom_table)); - - IA_CSS_LEAVE_PRIVATE("lut(%u) err=%d", default_gdc_lut, err); - return err; -} - -void sh_css_params_free_default_gdc_lut(void) -{ - IA_CSS_ENTER_PRIVATE("void"); - - sh_css_params_free_gdc_lut(default_gdc_lut); - default_gdc_lut = mmgr_NULL; - - IA_CSS_LEAVE_PRIVATE("void"); - -} - -hrt_vaddress sh_css_params_get_default_gdc_lut(void) -{ - return default_gdc_lut; -} - -static void free_param_set_callback( - hrt_vaddress ptr) -{ - IA_CSS_ENTER_PRIVATE("void"); - - free_ia_css_isp_parameter_set_info(ptr); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void free_buffer_callback( - hrt_vaddress ptr) -{ - IA_CSS_ENTER_PRIVATE("void"); - - hmm_free(ptr); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -sh_css_param_clear_param_sets(void) -{ - IA_CSS_ENTER_PRIVATE("void"); - - ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -/* - * MW: we can define hmm_free() to return a NULL - * then you can write ptr = hmm_free(ptr); - */ -#define safe_free(id, x) \ - do { \ - ia_css_refcount_decrement(id, x); \ - (x) = mmgr_NULL; \ - } while(0) - -static void free_map(struct sh_css_ddr_address_map *map) -{ - unsigned int i; - - hrt_vaddress *addrs = (hrt_vaddress *)map; - - IA_CSS_ENTER_PRIVATE("void"); - - /* free buffers */ - for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size)/ - sizeof(size_t)); i++) { - if (addrs[i] == mmgr_NULL) - continue; - safe_free(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]); - } - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream) -{ - int i; - struct ia_css_isp_parameters *params = stream->isp_params_configs; - struct ia_css_isp_parameters *per_frame_params = - stream->per_frame_isp_params_configs; - - IA_CSS_ENTER_PRIVATE("void"); - if (params == NULL) { - IA_CSS_LEAVE_PRIVATE("isp_param_configs is NULL"); - return; - } - - /* free existing ddr_ptr maps */ - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) - { - free_map(¶ms->pipe_ddr_ptrs[i]); - if (per_frame_params) - free_map(&per_frame_params->pipe_ddr_ptrs[i]); - /* Free up theDVS table memory blocks before recomputing new table */ - if (params->pipe_dvs_6axis_config[i]) - free_dvs_6axis_table(&(params->pipe_dvs_6axis_config[i])); - if (per_frame_params && per_frame_params->pipe_dvs_6axis_config[i]) - free_dvs_6axis_table(&(per_frame_params->pipe_dvs_6axis_config[i])); - } - free_map(¶ms->ddr_ptrs); - if (per_frame_params) - free_map(&per_frame_params->ddr_ptrs); - - if (params->fpn_config.data) { - sh_css_free(params->fpn_config.data); - params->fpn_config.data = NULL; - } - - /* Free up sc_config (temporal shading table) if it is allocated. */ - if (params->sc_config) { - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; - } - if (per_frame_params) { - if (per_frame_params->sc_config) { - ia_css_shading_table_free(per_frame_params->sc_config); - per_frame_params->sc_config = NULL; - } - } - - sh_css_free(params); - if (per_frame_params) - sh_css_free(per_frame_params); - stream->isp_params_configs = NULL; - stream->per_frame_isp_params_configs = NULL; - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -sh_css_params_uninit(void) -{ - unsigned p, i; - - IA_CSS_ENTER_PRIVATE("void"); - - ia_css_refcount_decrement(-1, sp_ddr_ptrs); - sp_ddr_ptrs = mmgr_NULL; - ia_css_refcount_decrement(-1, xmem_sp_group_ptrs); - xmem_sp_group_ptrs = mmgr_NULL; - - for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++) - for (i = 0; i < SH_CSS_MAX_STAGES; i++) { - ia_css_refcount_decrement(-1, xmem_sp_stage_ptrs[p][i]); - xmem_sp_stage_ptrs[p][i] = mmgr_NULL; - ia_css_refcount_decrement(-1, xmem_isp_stage_ptrs[p][i]); - xmem_isp_stage_ptrs[p][i] = mmgr_NULL; - } - - /* go through the pools to clear references */ - ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback); - ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_BUFFER, &free_buffer_callback); - ia_css_refcount_clear(-1, &free_buffer_callback); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static struct ia_css_host_data * -convert_allocate_morph_plane( - unsigned short *data, - unsigned int width, - unsigned int height, - unsigned int aligned_width) -{ - unsigned int i, j, padding, w; - struct ia_css_host_data *me; - unsigned int isp_data_size; - uint16_t *isp_data_ptr; - - IA_CSS_ENTER_PRIVATE("void"); - - /* currently we don't have morph table interpolation yet, - * so we allow a wider table to be used. This will be removed - * in the future. */ - if (width > aligned_width) { - padding = 0; - w = aligned_width; - } else { - padding = aligned_width - width; - w = width; - } - isp_data_size = height * (w + padding) * sizeof(uint16_t); - - me = ia_css_host_data_allocate((size_t) isp_data_size); - - if (!me) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return NULL; - } - - isp_data_ptr = (uint16_t *)me->address; - - memset(isp_data_ptr, 0, (size_t)isp_data_size); - - for (i = 0; i < height; i++) { - for (j = 0; j < w; j++) - *isp_data_ptr++ = (uint16_t)data[j]; - isp_data_ptr += padding; - data += width; - } - - IA_CSS_LEAVE_PRIVATE("void"); - return me; -} - -static enum ia_css_err -store_morph_plane( - unsigned short *data, - unsigned int width, - unsigned int height, - hrt_vaddress dest, - unsigned int aligned_width) -{ - struct ia_css_host_data *isp_data; - - assert(dest != mmgr_NULL); - - isp_data = convert_allocate_morph_plane(data, width, height, aligned_width); - if (!isp_data) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - ia_css_params_store_ia_css_host_data(dest, isp_data); - - ia_css_host_data_free(isp_data); - return IA_CSS_SUCCESS; -} - -static void sh_css_update_isp_params_to_ddr( - struct ia_css_isp_parameters *params, - hrt_vaddress ddr_ptr) -{ - size_t size = sizeof(params->uds); - - IA_CSS_ENTER_PRIVATE("void"); - - assert(params != NULL); - - mmgr_store(ddr_ptr, &(params->uds), size); - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void sh_css_update_isp_mem_params_to_ddr( - const struct ia_css_binary *binary, - hrt_vaddress ddr_mem_ptr, - size_t size, - enum ia_css_isp_memories mem) -{ - const struct ia_css_host_data *params; - - IA_CSS_ENTER_PRIVATE("void"); - - params = ia_css_isp_param_get_mem_init(&binary->mem_params, IA_CSS_PARAM_CLASS_PARAM, mem); - mmgr_store(ddr_mem_ptr, params->address, size); - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void ia_css_dequeue_param_buffers(/*unsigned int pipe_num*/ void) -{ - unsigned int i; - hrt_vaddress cpy; - enum sh_css_queue_id param_queue_ids[3] = { IA_CSS_PARAMETER_SET_QUEUE_ID, - IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID, - SH_CSS_INVALID_QUEUE_ID}; - - IA_CSS_ENTER_PRIVATE("void"); - - if (!sh_css_sp_is_running()) { - IA_CSS_LEAVE_PRIVATE("sp is not running"); - /* SP is not running. The queues are not valid */ - return; - } - - for (i = 0; SH_CSS_INVALID_QUEUE_ID != param_queue_ids[i]; i++) { - cpy = (hrt_vaddress)0; - /* clean-up old copy */ - while (IA_CSS_SUCCESS == ia_css_bufq_dequeue_buffer(param_queue_ids[i], (uint32_t *)&cpy)) { - /* TMP: keep track of dequeued param set count - */ - g_param_buffer_dequeue_count++; - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED, - 0, - param_queue_ids[i], - 0); - - IA_CSS_LOG("dequeued param set %x from %d, release ref", cpy, 0); - free_ia_css_isp_parameter_set_info(cpy); - cpy = (hrt_vaddress)0; - } - } - - IA_CSS_LEAVE_PRIVATE("void"); -} - -static void -process_kernel_parameters(unsigned int pipe_id, - struct ia_css_pipeline_stage *stage, - struct ia_css_isp_parameters *params, - unsigned int isp_pipe_version, - unsigned int raw_bit_depth) -{ - unsigned param_id; - - (void)isp_pipe_version; - (void)raw_bit_depth; - - sh_css_enable_pipeline(stage->binary); - - if (params->config_changed[IA_CSS_OB_ID]) { - ia_css_ob_configure(¶ms->stream_configs.ob, - isp_pipe_version, raw_bit_depth); - } - if (params->config_changed[IA_CSS_S3A_ID]) { - ia_css_s3a_configure(raw_bit_depth); - } - /* Copy stage uds parameters to config, since they can differ per stage. - */ - params->crop_config.crop_pos = params->uds[stage->stage_num].crop_pos; - params->uds_config.crop_pos = params->uds[stage->stage_num].crop_pos; - params->uds_config.uds = params->uds[stage->stage_num].uds; - /* Call parameter process functions for all kernels */ - /* Skip SC, since that is called on a temp sc table */ - for (param_id = 0; param_id < IA_CSS_NUM_PARAMETER_IDS; param_id++) { - if (param_id == IA_CSS_SC_ID) continue; - if (params->config_changed[param_id]) - ia_css_kernel_process_param[param_id](pipe_id, stage, params); - } -} - -enum ia_css_err -sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe, - struct ia_css_isp_parameters *params, - bool commit, - struct ia_css_pipe *pipe_in) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - hrt_vaddress cpy; - int i; - unsigned int raw_bit_depth = 10; - unsigned int isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_1; - bool acc_cluster_params_changed = false; - unsigned int thread_id, pipe_num; - - (void)acc_cluster_params_changed; - - assert(curr_pipe != NULL); - - IA_CSS_ENTER_PRIVATE("pipe=%p, isp_parameters_id=%d", pipe_in, params->isp_parameters_id); - raw_bit_depth = ia_css_stream_input_format_bits_per_pixel(curr_pipe->stream); - - /* now make the map available to the sp */ - if (!commit) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - /* enqueue a copies of the mem_map to - the designated pipelines */ - for (i = 0; i < curr_pipe->stream->num_pipes; i++) { - struct ia_css_pipe *pipe; - struct sh_css_ddr_address_map *cur_map; - struct sh_css_ddr_address_map_size *cur_map_size; - struct ia_css_isp_parameter_set_info isp_params_info; - struct ia_css_pipeline *pipeline; - struct ia_css_pipeline_stage *stage; - - enum sh_css_queue_id queue_id; - - pipe = curr_pipe->stream->pipes[i]; - pipeline = ia_css_pipe_get_pipeline(pipe); - pipe_num = ia_css_pipe_get_pipe_num(pipe); - isp_pipe_version = ia_css_pipe_get_isp_pipe_version(pipe); - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - ia_css_query_internal_queue_id(params->output_frame - ? IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET - : IA_CSS_BUFFER_TYPE_PARAMETER_SET, - thread_id, &queue_id); -#else - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_PARAMETER_SET, thread_id, &queue_id); -#endif - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - break; - } - cur_map = ¶ms->pipe_ddr_ptrs[pipeline->pipe_id]; - cur_map_size = ¶ms->pipe_ddr_ptrs_size[pipeline->pipe_id]; - - /* TODO: Normally, zoom and motion parameters shouldn't - * be part of "isp_params" as it is resolution/pipe dependant - * Therefore, move the zoom config elsewhere (e.g. shading - * table can be taken as an example! @GC - * */ - { - /* we have to do this per pipeline because */ - /* the processing is a.o. resolution dependent */ - err = ia_css_process_zoom_and_motion(params, - pipeline->stages); - if (err != IA_CSS_SUCCESS) - return err; - } - /* check if to actually update the parameters for this pipe */ - /* When API change is implemented making good distinction between - * stream config and pipe config this skipping code can be moved out of the #ifdef */ - if (pipe_in && (pipe != pipe_in)) { - IA_CSS_LOG("skipping pipe %p", pipe); - continue; - } - - /* BZ 125915, should be moved till after "update other buff" */ - /* update the other buffers to the pipe specific copies */ - for (stage = pipeline->stages; stage; stage = stage->next) { - unsigned mem; - - if (!stage || !stage->binary) - continue; - - process_kernel_parameters(pipeline->pipe_id, - stage, params, - isp_pipe_version, raw_bit_depth); - - err = sh_css_params_write_to_ddr_internal( - pipe, - pipeline->pipe_id, - params, - stage, - cur_map, - cur_map_size); - - if (err != IA_CSS_SUCCESS) - break; - for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) { - params->isp_mem_params_changed - [pipeline->pipe_id][stage->stage_num][mem] = false; - } - } /* for */ - if (err != IA_CSS_SUCCESS) - break; - /* update isp_params to pipe specific copies */ - if (params->isp_params_changed) { - reallocate_buffer(&cur_map->isp_param, - &cur_map_size->isp_param, - cur_map_size->isp_param, - true, - &err); - if (err != IA_CSS_SUCCESS) - break; - sh_css_update_isp_params_to_ddr(params, cur_map->isp_param); - } - - /* last make referenced copy */ - err = ref_sh_css_ddr_address_map( - cur_map, - &isp_params_info.mem_map); - if (err != IA_CSS_SUCCESS) - break; - - /* Update Parameters ID */ - isp_params_info.isp_parameters_id = params->isp_parameters_id; - - /* Update output frame pointer */ - isp_params_info.output_frame_ptr = - (params->output_frame) ? params->output_frame->data : mmgr_NULL; - - /* now write the copy to ddr */ - err = write_ia_css_isp_parameter_set_info_to_ddr(&isp_params_info, &cpy); - if (err != IA_CSS_SUCCESS) - break; - - /* enqueue the set to sp */ - IA_CSS_LOG("queue param set %x to %d", cpy, thread_id); - - err = ia_css_bufq_enqueue_buffer(thread_id, queue_id, (uint32_t)cpy); - if (IA_CSS_SUCCESS != err) { - free_ia_css_isp_parameter_set_info(cpy); -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - IA_CSS_LOG("pfp: FAILED to add config id %d for OF %d to q %d on thread %d", - isp_params_info.isp_parameters_id, - isp_params_info.output_frame_ptr, - queue_id, thread_id); -#endif - break; - } - else { - /* TMP: check discrepancy between nr of enqueued - * parameter sets and dequeued sets - */ - g_param_buffer_enqueue_count++; - assert(g_param_buffer_enqueue_count < g_param_buffer_dequeue_count+50); -#ifdef ISP2401 - ia_css_save_latest_paramset_ptr(pipe, cpy); -#endif - /* - * Tell the SP which queues are not empty, - * by sending the software event. - */ - if (!sh_css_sp_is_running()) { - /* SP is not running. The queues are not valid */ - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE); - return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE; - } - ia_css_bufq_enqueue_psys_event( - IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED, - (uint8_t)thread_id, - (uint8_t)queue_id, - 0); -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - IA_CSS_LOG("pfp: added config id %d for OF %d to q %d on thread %d", - isp_params_info.isp_parameters_id, - isp_params_info.output_frame_ptr, - queue_id, thread_id); -#endif - } - /* clean-up old copy */ - ia_css_dequeue_param_buffers(/*pipe_num*/); - params->pipe_dvs_6axis_config_changed[pipeline->pipe_id] = false; - } /* end for each 'active' pipeline */ - /* clear the changed flags after all params - for all pipelines have been updated */ - params->isp_params_changed = false; - params->sc_table_changed = false; - params->dis_coef_table_changed = false; - params->dvs2_coef_table_changed = false; - params->morph_table_changed = false; - params->dz_config_changed = false; - params->motion_config_changed = false; -/* ------ deprecated(bz675) : from ------ */ - params->shading_settings_changed = false; -/* ------ deprecated(bz675) : to ------ */ - - memset(¶ms->config_changed[0], 0, sizeof(params->config_changed)); - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -sh_css_params_write_to_ddr_internal( - struct ia_css_pipe *pipe, - unsigned pipe_id, - struct ia_css_isp_parameters *params, - const struct ia_css_pipeline_stage *stage, - struct sh_css_ddr_address_map *ddr_map, - struct sh_css_ddr_address_map_size *ddr_map_size) -{ - enum ia_css_err err; - const struct ia_css_binary *binary; - - unsigned stage_num; - unsigned mem; - bool buff_realloced; - - /* struct is > 128 bytes so it should not be on stack (see checkpatch) */ - static struct ia_css_macc_table converted_macc_table; - - IA_CSS_ENTER_PRIVATE("void"); - assert(params != NULL); - assert(ddr_map != NULL); - assert(ddr_map_size != NULL); - assert(stage != NULL); - - binary = stage->binary; - assert(binary != NULL); - - - stage_num = stage->stage_num; - - if (binary->info->sp.enable.fpnr) { - buff_realloced = reallocate_buffer(&ddr_map->fpn_tbl, - &ddr_map_size->fpn_tbl, - (size_t)(FPNTBL_BYTES(binary)), - params->config_changed[IA_CSS_FPN_ID], - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (params->config_changed[IA_CSS_FPN_ID] || buff_realloced) { - if (params->fpn_config.enabled) { - err = store_fpntbl(params, ddr_map->fpn_tbl); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - } - } - - if (binary->info->sp.enable.sc) { - uint32_t enable_conv = params-> - shading_settings.enable_shading_table_conversion; - - buff_realloced = reallocate_buffer(&ddr_map->sc_tbl, - &ddr_map_size->sc_tbl, - (size_t)(SCTBL_BYTES(binary)), - params->sc_table_changed, - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - if (params->shading_settings_changed || - params->sc_table_changed || buff_realloced) { - if (enable_conv == 0) { - if (params->sc_table) { - /* store the shading table to ddr */ - err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_table); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - /* set sc_config to isp */ - params->sc_config = (struct ia_css_shading_table *)params->sc_table; - ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params); - params->sc_config = NULL; - } else { - /* generate the identical shading table */ - if (params->sc_config) { - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; - } -#ifndef ISP2401 - sh_css_params_shading_id_table_generate(¶ms->sc_config, binary); -#else - sh_css_params_shading_id_table_generate(¶ms->sc_config, - binary->sctbl_width_per_color, binary->sctbl_height); -#endif - if (params->sc_config == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - /* store the shading table to ddr */ - err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - /* set sc_config to isp */ - ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params); - - /* free the shading table */ - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; - } - } else { /* legacy */ -/* ------ deprecated(bz675) : from ------ */ - /* shading table is full resolution, reduce */ - if (params->sc_config) { - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; - } - prepare_shading_table( - (const struct ia_css_shading_table *)params->sc_table, - params->sensor_binning, - ¶ms->sc_config, - binary, pipe->required_bds_factor); - if (params->sc_config == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - - /* store the shading table to ddr */ - err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - /* set sc_config to isp */ - ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params); - - /* free the shading table */ - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; -/* ------ deprecated(bz675) : to ------ */ - } - } - } -#ifdef ISP2401 - /* DPC configuration is made pipe specific to allow flexibility in positioning of the - * DPC kernel. The code below sets the pipe specific configuration to - * individual binaries. */ - if (params->pipe_dpc_config_changed[pipe_id] && binary->info->sp.enable.dpc) { - unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size; - - unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset; - if (size) { - ia_css_dp_encode((struct sh_css_isp_dp_params *) - &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset], - ¶ms->pipe_dp_config[pipe_id], size); -#endif - -#ifdef ISP2401 - params->isp_params_changed = true; - params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true; - } - } -#endif - if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc) { - unsigned int i, j, idx; - unsigned int idx_map[] = { - 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8}; - - for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) { - idx = 4*idx_map[i]; - j = 4*i; - - if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) { - converted_macc_table.data[idx] = - (int16_t)sDIGIT_FITTING(params->macc_table.data[j], - 13, SH_CSS_MACC_COEF_SHIFT); - converted_macc_table.data[idx+1] = - (int16_t)sDIGIT_FITTING(params->macc_table.data[j+1], - 13, SH_CSS_MACC_COEF_SHIFT); - converted_macc_table.data[idx+2] = - (int16_t)sDIGIT_FITTING(params->macc_table.data[j+2], - 13, SH_CSS_MACC_COEF_SHIFT); - converted_macc_table.data[idx+3] = - (int16_t)sDIGIT_FITTING(params->macc_table.data[j+3], - 13, SH_CSS_MACC_COEF_SHIFT); - } else if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2) { - converted_macc_table.data[idx] = - params->macc_table.data[j]; - converted_macc_table.data[idx+1] = - params->macc_table.data[j+1]; - converted_macc_table.data[idx+2] = - params->macc_table.data[j+2]; - converted_macc_table.data[idx+3] = - params->macc_table.data[j+3]; - } - } - reallocate_buffer(&ddr_map->macc_tbl, - &ddr_map_size->macc_tbl, - ddr_map_size->macc_tbl, - true, - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - mmgr_store(ddr_map->macc_tbl, - converted_macc_table.data, - sizeof(converted_macc_table.data)); - } - - if (binary->info->sp.enable.dvs_6axis) { - /* because UV is packed into the Y plane, calc total - * YYU size = /2 gives size of UV-only, - * total YYU size = UV-only * 3. - */ - buff_realloced = reallocate_buffer( - &ddr_map->dvs_6axis_params_y, - &ddr_map_size->dvs_6axis_params_y, - (size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3), - params->pipe_dvs_6axis_config_changed[pipe_id], - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - if (params->pipe_dvs_6axis_config_changed[pipe_id] || buff_realloced) { - const struct ia_css_frame_info *dvs_in_frame_info; - - if ( stage->args.delay_frames[0] ) { - /*When delay frames are present(as in case of video), - they are used for dvs. Configure DVS using those params*/ - dvs_in_frame_info = &stage->args.delay_frames[0]->info; - } else { - /*Otherwise, use input frame to configure DVS*/ - dvs_in_frame_info = &stage->args.in_frame->info; - } - - /* Generate default DVS unity table on start up*/ - if (params->pipe_dvs_6axis_config[pipe_id] == NULL) { - -#ifndef ISP2401 - struct ia_css_resolution dvs_offset; - dvs_offset.width = -#else - struct ia_css_resolution dvs_offset = {0, 0}; - if (binary->dvs_envelope.width || binary->dvs_envelope.height) { - dvs_offset.width = -#endif - (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2; -#ifndef ISP2401 - dvs_offset.height = -#else - dvs_offset.height = -#endif - (PIX_SHIFT_FILTER_RUN_IN_Y + binary->dvs_envelope.height) / 2; -#ifdef ISP2401 - } -#endif - - params->pipe_dvs_6axis_config[pipe_id] = - generate_dvs_6axis_table(&binary->out_frame_info[0].res, &dvs_offset); - if (params->pipe_dvs_6axis_config[pipe_id] == NULL) { - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY); - return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - } - params->pipe_dvs_6axis_config_changed[pipe_id] = true; - } - - store_dvs_6axis_config(params->pipe_dvs_6axis_config[pipe_id], - binary, - dvs_in_frame_info, - ddr_map->dvs_6axis_params_y); - params->isp_params_changed = true; - } - } - - if (binary->info->sp.enable.ca_gdc) { - unsigned int i; - hrt_vaddress *virt_addr_tetra_x[ - IA_CSS_MORPH_TABLE_NUM_PLANES]; - size_t *virt_size_tetra_x[ - IA_CSS_MORPH_TABLE_NUM_PLANES]; - hrt_vaddress *virt_addr_tetra_y[ - IA_CSS_MORPH_TABLE_NUM_PLANES]; - size_t *virt_size_tetra_y[ - IA_CSS_MORPH_TABLE_NUM_PLANES]; - - virt_addr_tetra_x[0] = &ddr_map->tetra_r_x; - virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x; - virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x; - virt_addr_tetra_x[3] = &ddr_map->tetra_b_x; - virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x; - virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x; - - virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x; - virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x; - virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x; - virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x; - virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x; - virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x; - - virt_addr_tetra_y[0] = &ddr_map->tetra_r_y; - virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y; - virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y; - virt_addr_tetra_y[3] = &ddr_map->tetra_b_y; - virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y; - virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y; - - virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y; - virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y; - virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y; - virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y; - virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y; - virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y; - - buff_realloced = false; - for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - buff_realloced |= - reallocate_buffer(virt_addr_tetra_x[i], - virt_size_tetra_x[i], - (size_t) - (MORPH_PLANE_BYTES(binary)), - params->morph_table_changed, - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - buff_realloced |= - reallocate_buffer(virt_addr_tetra_y[i], - virt_size_tetra_y[i], - (size_t) - (MORPH_PLANE_BYTES(binary)), - params->morph_table_changed, - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - } - if (params->morph_table_changed || buff_realloced) { - const struct ia_css_morph_table *table = params->morph_table; - struct ia_css_morph_table *id_table = NULL; - - if ((table != NULL) && - (table->width < binary->morph_tbl_width || - table->height < binary->morph_tbl_height)) { - table = NULL; - } - if (table == NULL) { - err = sh_css_params_default_morph_table(&id_table, - binary); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - table = id_table; - } - - for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - store_morph_plane(table->coordinates_x[i], - table->width, - table->height, - *virt_addr_tetra_x[i], - binary->morph_tbl_aligned_width); - store_morph_plane(table->coordinates_y[i], - table->width, - table->height, - *virt_addr_tetra_y[i], - binary->morph_tbl_aligned_width); - } - if (id_table != NULL) - ia_css_morph_table_free(id_table); - } - } - - /* After special cases like SC, FPN since they may change parameters */ - for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) { - const struct ia_css_isp_data *isp_data = - ia_css_isp_param_get_isp_mem_init(&binary->info->sp.mem_initializers, IA_CSS_PARAM_CLASS_PARAM, mem); - size_t size = isp_data->size; - if (!size) continue; - buff_realloced = reallocate_buffer(&ddr_map->isp_mem_param[stage_num][mem], - &ddr_map_size->isp_mem_param[stage_num][mem], - size, - params->isp_mem_params_changed[pipe_id][stage_num][mem], - &err); - if (err != IA_CSS_SUCCESS) { - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - if (params->isp_mem_params_changed[pipe_id][stage_num][mem] || buff_realloced) { - sh_css_update_isp_mem_params_to_ddr(binary, - ddr_map->isp_mem_param[stage_num][mem], - ddr_map_size->isp_mem_param[stage_num][mem], mem); - } - } - - IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS); - return IA_CSS_SUCCESS; -} - -const struct ia_css_fpn_table *ia_css_get_fpn_table(struct ia_css_stream *stream) -{ - struct ia_css_isp_parameters *params; - - IA_CSS_ENTER_LEAVE("void"); - assert(stream != NULL); - - params = stream->isp_params_configs; - - return &(params->fpn_config); -} - -struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream *stream) -{ - struct ia_css_shading_table *table = NULL; - struct ia_css_isp_parameters *params; - - IA_CSS_ENTER("void"); - - assert(stream != NULL); - - params = stream->isp_params_configs; - if (!params) - return NULL; - - if (params->shading_settings.enable_shading_table_conversion == 0) { - if (params->sc_table) { - table = (struct ia_css_shading_table *)params->sc_table; - } else { - const struct ia_css_binary *binary - = ia_css_stream_get_shading_correction_binary(stream); - if (binary) { - /* generate the identical shading table */ - if (params->sc_config) { - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; - } -#ifndef ISP2401 - sh_css_params_shading_id_table_generate(¶ms->sc_config, binary); - -#else - sh_css_params_shading_id_table_generate(¶ms->sc_config, - binary->sctbl_width_per_color, binary->sctbl_height); -#endif - table = params->sc_config; - /* The sc_config will be freed in the - * ia_css_stream_isp_parameters_uninit function. */ - } - } - } else { -/* ------ deprecated(bz675) : from ------ */ - const struct ia_css_binary *binary - = ia_css_stream_get_shading_correction_binary(stream); - struct ia_css_pipe *pipe; - - /**********************************************************************/ - /* following code is copied from function ia_css_stream_get_shading_correction_binary() - * to match with the binary */ - pipe = stream->pipes[0]; - - if (stream->num_pipes == 2) { - assert(stream->pipes[1] != NULL); - if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO || - stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW) - pipe = stream->pipes[1]; - } - /**********************************************************************/ - if (binary) { - if (params->sc_config) { - ia_css_shading_table_free(params->sc_config); - params->sc_config = NULL; - } - prepare_shading_table( - (const struct ia_css_shading_table *)params->sc_table, - params->sensor_binning, - ¶ms->sc_config, - binary, pipe->required_bds_factor); - - table = params->sc_config; - /* The sc_config will be freed in the - * ia_css_stream_isp_parameters_uninit function. */ - } -/* ------ deprecated(bz675) : to ------ */ - } - - IA_CSS_LEAVE("table=%p", table); - - return table; -} - - -hrt_vaddress sh_css_store_sp_group_to_ddr(void) -{ - IA_CSS_ENTER_LEAVE_PRIVATE("void"); - mmgr_store(xmem_sp_group_ptrs, - &sh_css_sp_group, - sizeof(struct sh_css_sp_group)); - return xmem_sp_group_ptrs; -} - -hrt_vaddress sh_css_store_sp_stage_to_ddr( - unsigned pipe, - unsigned stage) -{ - IA_CSS_ENTER_LEAVE_PRIVATE("void"); - mmgr_store(xmem_sp_stage_ptrs[pipe][stage], - &sh_css_sp_stage, - sizeof(struct sh_css_sp_stage)); - return xmem_sp_stage_ptrs[pipe][stage]; -} - -hrt_vaddress sh_css_store_isp_stage_to_ddr( - unsigned pipe, - unsigned stage) -{ - IA_CSS_ENTER_LEAVE_PRIVATE("void"); - mmgr_store(xmem_isp_stage_ptrs[pipe][stage], - &sh_css_isp_stage, - sizeof(struct sh_css_isp_stage)); - return xmem_isp_stage_ptrs[pipe][stage]; -} - -static enum ia_css_err ref_sh_css_ddr_address_map( - struct sh_css_ddr_address_map *map, - struct sh_css_ddr_address_map *out) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - unsigned int i; - - /* we will use a union to copy things; overlaying an array - with the struct; that way adding fields in the struct - will keep things working, and we will not get type errors. - */ - union { - struct sh_css_ddr_address_map *map; - hrt_vaddress *addrs; - } in_addrs, to_addrs; - - IA_CSS_ENTER_PRIVATE("void"); - assert(map != NULL); - assert(out != NULL); - - in_addrs.map = map; - to_addrs.map = out; - - assert(sizeof(struct sh_css_ddr_address_map_size)/sizeof(size_t) == - sizeof(struct sh_css_ddr_address_map)/sizeof(hrt_vaddress)); - - /* copy map using size info */ - for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size)/ - sizeof(size_t)); i++) { - if (in_addrs.addrs[i] == mmgr_NULL) - to_addrs.addrs[i] = mmgr_NULL; - else - to_addrs.addrs[i] = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER, in_addrs.addrs[i]); - } - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err write_ia_css_isp_parameter_set_info_to_ddr( - struct ia_css_isp_parameter_set_info *me, - hrt_vaddress *out) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - bool succ; - - IA_CSS_ENTER_PRIVATE("void"); - - assert(me != NULL); - assert(out != NULL); - - *out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL, mmgr_malloc( - sizeof(struct ia_css_isp_parameter_set_info))); - succ = (*out != mmgr_NULL); - if (succ) - mmgr_store(*out, - me, sizeof(struct ia_css_isp_parameter_set_info)); - else - err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -static enum ia_css_err -free_ia_css_isp_parameter_set_info( - hrt_vaddress ptr) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - struct ia_css_isp_parameter_set_info isp_params_info; - unsigned int i; - hrt_vaddress *addrs = (hrt_vaddress *)&isp_params_info.mem_map; - - IA_CSS_ENTER_PRIVATE("ptr = %u", ptr); - - /* sanity check - ptr must be valid */ - if (!ia_css_refcount_is_valid(ptr)) { - IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_SET_POOL(0x%x) invalid arg", __func__, ptr); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; - } - - mmgr_load(ptr, &isp_params_info.mem_map, sizeof(struct sh_css_ddr_address_map)); - /* copy map using size info */ - for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size)/ - sizeof(size_t)); i++) { - if (addrs[i] == mmgr_NULL) - continue; - - /* sanity check - ptr must be valid */ -#ifndef ISP2401 - if (!ia_css_refcount_is_valid(addrs[i])) { -#else - if (ia_css_refcount_is_valid(addrs[i])) { - ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]); - } else { -#endif - IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_BUFFER(0x%x) invalid arg", __func__, ptr); - err = IA_CSS_ERR_INVALID_ARGUMENTS; - continue; - } -#ifndef ISP2401 - - ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]); -#endif - } - ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_SET_POOL, ptr); - - IA_CSS_LEAVE_ERR_PRIVATE(err); - return err; -} - -/* Mark all parameters as changed to force recomputing the derived ISP parameters */ -void -sh_css_invalidate_params(struct ia_css_stream *stream) -{ - struct ia_css_isp_parameters *params; - unsigned i, j, mem; - - IA_CSS_ENTER_PRIVATE("void"); - assert(stream != NULL); - - params = stream->isp_params_configs; - params->isp_params_changed = true; - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - for (j = 0; j < SH_CSS_MAX_STAGES; j++) { - for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) { - params->isp_mem_params_changed[i][j][mem] = true; - } - } - } - - memset(¶ms->config_changed[0], 1, sizeof(params->config_changed)); - params->dis_coef_table_changed = true; - params->dvs2_coef_table_changed = true; - params->morph_table_changed = true; - params->sc_table_changed = true; - params->dz_config_changed = true; - params->motion_config_changed = true; - - /*Free up theDVS table memory blocks before recomputing new table */ - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - if (params->pipe_dvs_6axis_config[i]) { - free_dvs_6axis_table(&(params->pipe_dvs_6axis_config[i])); - params->pipe_dvs_6axis_config_changed[i] = true; - } - } - - IA_CSS_LEAVE_PRIVATE("void"); -} - -void -sh_css_update_uds_and_crop_info( - const struct ia_css_binary_info *info, - const struct ia_css_frame_info *in_frame_info, - const struct ia_css_frame_info *out_frame_info, - const struct ia_css_resolution *dvs_env, - const struct ia_css_dz_config *zoom, - const struct ia_css_vector *motion_vector, - struct sh_css_uds_info *uds, /* out */ - struct sh_css_crop_pos *sp_out_crop_pos, /* out */ - bool enable_zoom) -{ - IA_CSS_ENTER_PRIVATE("void"); - - assert(info != NULL); - assert(in_frame_info != NULL); - assert(out_frame_info != NULL); - assert(dvs_env != NULL); - assert(zoom != NULL); - assert(motion_vector != NULL); - assert(uds != NULL); - assert(sp_out_crop_pos != NULL); - - uds->curr_dx = enable_zoom ? (uint16_t)zoom->dx : HRT_GDC_N; - uds->curr_dy = enable_zoom ? (uint16_t)zoom->dy : HRT_GDC_N; - - if (info->enable.dvs_envelope) { - unsigned int crop_x = 0, - crop_y = 0, - uds_xc = 0, - uds_yc = 0, - env_width, env_height; - int half_env_x, half_env_y; - int motion_x = motion_vector->x; - int motion_y = motion_vector->y; - bool upscale_x = in_frame_info->res.width < out_frame_info->res.width; - bool upscale_y = in_frame_info->res.height < out_frame_info->res.height; - - if (info->enable.uds && !info->enable.ds) { - /** - * we calculate with the envelope that we can actually - * use, the min dvs envelope is for the filter - * initialization. - */ - env_width = dvs_env->width - - SH_CSS_MIN_DVS_ENVELOPE; - env_height = dvs_env->height - - SH_CSS_MIN_DVS_ENVELOPE; - half_env_x = env_width / 2; - half_env_y = env_height / 2; - /** - * for digital zoom, we use the dvs envelope and make - * sure that we don't include the 8 leftmost pixels or - * 8 topmost rows. - */ - if (upscale_x) { - uds_xc = (in_frame_info->res.width - + env_width - + SH_CSS_MIN_DVS_ENVELOPE) / 2; - } else { - uds_xc = (out_frame_info->res.width - + env_width) / 2 - + SH_CSS_MIN_DVS_ENVELOPE; - } - if (upscale_y) { - uds_yc = (in_frame_info->res.height - + env_height - + SH_CSS_MIN_DVS_ENVELOPE) / 2; - } else { - uds_yc = (out_frame_info->res.height - + env_height) / 2 - + SH_CSS_MIN_DVS_ENVELOPE; - } - /* clip the motion vector to +/- half the envelope */ - motion_x = clamp(motion_x, -half_env_x, half_env_x); - motion_y = clamp(motion_y, -half_env_y, half_env_y); - uds_xc += motion_x; - uds_yc += motion_y; - /* uds can be pipelined, remove top lines */ - crop_y = 2; - } else if (info->enable.ds) { - env_width = dvs_env->width; - env_height = dvs_env->height; - half_env_x = env_width / 2; - half_env_y = env_height / 2; - /* clip the motion vector to +/- half the envelope */ - motion_x = clamp(motion_x, -half_env_x, half_env_x); - motion_y = clamp(motion_y, -half_env_y, half_env_y); - /* for video with downscaling, the envelope is included - in the input resolution. */ - uds_xc = in_frame_info->res.width/2 + motion_x; - uds_yc = in_frame_info->res.height/2 + motion_y; - crop_x = info->pipeline.left_cropping; - /* ds == 2 (yuv_ds) can be pipelined, remove top - lines */ - if (info->enable.ds & 1) - crop_y = info->pipeline.top_cropping; - else - crop_y = 2; - } else { - /* video nodz: here we can only crop. We make sure we - crop at least the first 8x8 pixels away. */ - env_width = dvs_env->width - - SH_CSS_MIN_DVS_ENVELOPE; - env_height = dvs_env->height - - SH_CSS_MIN_DVS_ENVELOPE; - half_env_x = env_width / 2; - half_env_y = env_height / 2; - motion_x = clamp(motion_x, -half_env_x, half_env_x); - motion_y = clamp(motion_y, -half_env_y, half_env_y); - crop_x = SH_CSS_MIN_DVS_ENVELOPE - + half_env_x + motion_x; - crop_y = SH_CSS_MIN_DVS_ENVELOPE - + half_env_y + motion_y; - } - - /* Must enforce that the crop position is even */ - crop_x = EVEN_FLOOR(crop_x); - crop_y = EVEN_FLOOR(crop_y); - uds_xc = EVEN_FLOOR(uds_xc); - uds_yc = EVEN_FLOOR(uds_yc); - - uds->xc = (uint16_t)uds_xc; - uds->yc = (uint16_t)uds_yc; - sp_out_crop_pos->x = (uint16_t)crop_x; - sp_out_crop_pos->y = (uint16_t)crop_y; - } - else { - /* for down scaling, we always use the center of the image */ - uds->xc = (uint16_t)in_frame_info->res.width / 2; - uds->yc = (uint16_t)in_frame_info->res.height / 2; - sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping; - sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping; - } - IA_CSS_LEAVE_PRIVATE("void"); -} - -static enum ia_css_err -sh_css_update_uds_and_crop_info_based_on_zoom_region( - const struct ia_css_binary_info *info, - const struct ia_css_frame_info *in_frame_info, - const struct ia_css_frame_info *out_frame_info, - const struct ia_css_resolution *dvs_env, - const struct ia_css_dz_config *zoom, - const struct ia_css_vector *motion_vector, - struct sh_css_uds_info *uds, /* out */ - struct sh_css_crop_pos *sp_out_crop_pos, /* out */ - struct ia_css_resolution pipe_in_res, - bool enable_zoom) -{ - unsigned int x0 = 0, y0 = 0, x1 = 0, y1 = 0; - enum ia_css_err err = IA_CSS_SUCCESS; - /* Note: - * Filter_Envelope = 0 for NND/LUT - * Filter_Envelope = 1 for BCI - * Filter_Envelope = 3 for BLI - * Currently, not considering this filter envelope because, In uds.sp.c is recalculating - * the dx/dy based on filter envelope and other information (ia_css_uds_sp_scale_params) - * Ideally, That should be done on host side not on sp side. - */ - unsigned int filter_envelope = 0; - IA_CSS_ENTER_PRIVATE("void"); - - assert(info != NULL); - assert(in_frame_info != NULL); - assert(out_frame_info != NULL); - assert(dvs_env != NULL); - assert(zoom != NULL); - assert(motion_vector != NULL); - assert(uds != NULL); - assert(sp_out_crop_pos != NULL); - x0 = zoom->zoom_region.origin.x; - y0 = zoom->zoom_region.origin.y; - x1 = zoom->zoom_region.resolution.width + x0; - y1 = zoom->zoom_region.resolution.height + y0; - - if ((x0 > x1) || (y0 > y1) || (x1 > pipe_in_res.width) || (y1 > pipe_in_res.height)) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - if (!enable_zoom) { - uds->curr_dx = HRT_GDC_N; - uds->curr_dy = HRT_GDC_N; - } - - if (info->enable.dvs_envelope) { - /* Zoom region is only supported by the UDS module on ISP - * 2 and higher. It is not supported in video mode on ISP 1 */ - return IA_CSS_ERR_INVALID_ARGUMENTS; - } else { - if (enable_zoom) { - /* A. Calculate dx/dy based on crop region using in_frame_info - * Scale the crop region if in_frame_info to the stage is not same as - * actual effective input of the pipeline - */ - if (in_frame_info->res.width != pipe_in_res.width || - in_frame_info->res.height != pipe_in_res.height) { - x0 = (x0 * in_frame_info->res.width) / (pipe_in_res.width); - y0 = (y0 * in_frame_info->res.height) / (pipe_in_res.height); - x1 = (x1 * in_frame_info->res.width) / (pipe_in_res.width); - y1 = (y1 * in_frame_info->res.height) / (pipe_in_res.height); - } - uds->curr_dx = - ((x1 - x0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.width; - uds->curr_dy = - ((y1 - y0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.height; - - /* B. Calculate xc/yc based on crop region */ - uds->xc = (uint16_t) x0 + (((x1)-(x0)) / 2); - uds->yc = (uint16_t) y0 + (((y1)-(y0)) / 2); - } else { - uds->xc = (uint16_t)in_frame_info->res.width / 2; - uds->yc = (uint16_t)in_frame_info->res.height / 2; - } - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "uds->curr_dx=%d, uds->xc=%d, uds->yc=%d\n", - uds->curr_dx, uds->xc, uds->yc); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x0=%d, y0=%d, x1=%d, y1=%d\n", - x0, y0, x1, y1); - sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping; - sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping; - } - IA_CSS_LEAVE_PRIVATE("void"); - return err; -} - -struct ia_css_3a_statistics * -ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid) -{ - struct ia_css_3a_statistics *me; - int grid_size; - - IA_CSS_ENTER("grid=%p", grid); - - assert(grid != NULL); - - me = sh_css_calloc(1, sizeof(*me)); - if (!me) - goto err; - - me->grid = *grid; - grid_size = grid->width * grid->height; - me->data = sh_css_malloc(grid_size * sizeof(*me->data)); - if (!me->data) - goto err; -#if !defined(HAS_NO_HMEM) - /* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */ - me->rgby_data = (struct ia_css_3a_rgby_output *)sh_css_malloc(sizeof_hmem(HMEM0_ID)); -#else - me->rgby_data = NULL; -#endif - - IA_CSS_LEAVE("return=%p", me); - return me; -err: - ia_css_3a_statistics_free(me); - - IA_CSS_LEAVE("return=%p", NULL); - return NULL; -} - -void -ia_css_3a_statistics_free(struct ia_css_3a_statistics *me) -{ - if (me) { - sh_css_free(me->rgby_data); - sh_css_free(me->data); - memset(me, 0, sizeof(struct ia_css_3a_statistics)); - sh_css_free(me); - } -} - -struct ia_css_dvs_statistics * -ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid) -{ - struct ia_css_dvs_statistics *me; - - assert(grid != NULL); - - me = sh_css_calloc(1, sizeof(*me)); - if (!me) - goto err; - - me->grid = *grid; - me->hor_proj = sh_css_malloc(grid->height * IA_CSS_DVS_NUM_COEF_TYPES * - sizeof(*me->hor_proj)); - if (!me->hor_proj) - goto err; - - me->ver_proj = sh_css_malloc(grid->width * IA_CSS_DVS_NUM_COEF_TYPES * - sizeof(*me->ver_proj)); - if (!me->ver_proj) - goto err; - - return me; -err: - ia_css_dvs_statistics_free(me); - return NULL; - -} - -void -ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me) -{ - if (me) { - sh_css_free(me->hor_proj); - sh_css_free(me->ver_proj); - memset(me, 0, sizeof(struct ia_css_dvs_statistics)); - sh_css_free(me); - } -} - -struct ia_css_dvs_coefficients * -ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid) -{ - struct ia_css_dvs_coefficients *me; - - assert(grid != NULL); - - me = sh_css_calloc(1, sizeof(*me)); - if (!me) - goto err; - - me->grid = *grid; - - me->hor_coefs = sh_css_malloc(grid->num_hor_coefs * - IA_CSS_DVS_NUM_COEF_TYPES * - sizeof(*me->hor_coefs)); - if (!me->hor_coefs) - goto err; - - me->ver_coefs = sh_css_malloc(grid->num_ver_coefs * - IA_CSS_DVS_NUM_COEF_TYPES * - sizeof(*me->ver_coefs)); - if (!me->ver_coefs) - goto err; - - return me; -err: - ia_css_dvs_coefficients_free(me); - return NULL; -} - -void -ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me) -{ - if (me) { - sh_css_free(me->hor_coefs); - sh_css_free(me->ver_coefs); - memset(me, 0, sizeof(struct ia_css_dvs_coefficients)); - sh_css_free(me); - } -} - -struct ia_css_dvs2_statistics * -ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid) -{ - struct ia_css_dvs2_statistics *me; - - assert(grid != NULL); - - me = sh_css_calloc(1, sizeof(*me)); - if (!me) - goto err; - - me->grid = *grid; - - me->hor_prod.odd_real = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->hor_prod.odd_real)); - if (!me->hor_prod.odd_real) - goto err; - - me->hor_prod.odd_imag = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->hor_prod.odd_imag)); - if (!me->hor_prod.odd_imag) - goto err; - - me->hor_prod.even_real = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->hor_prod.even_real)); - if (!me->hor_prod.even_real) - goto err; - - me->hor_prod.even_imag = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->hor_prod.even_imag)); - if (!me->hor_prod.even_imag) - goto err; - - me->ver_prod.odd_real = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->ver_prod.odd_real)); - if (!me->ver_prod.odd_real) - goto err; - - me->ver_prod.odd_imag = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->ver_prod.odd_imag)); - if (!me->ver_prod.odd_imag) - goto err; - - me->ver_prod.even_real = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->ver_prod.even_real)); - if (!me->ver_prod.even_real) - goto err; - - me->ver_prod.even_imag = sh_css_malloc(grid->aligned_width * - grid->aligned_height * sizeof(*me->ver_prod.even_imag)); - if (!me->ver_prod.even_imag) - goto err; - - return me; -err: - ia_css_dvs2_statistics_free(me); - return NULL; - -} - -void -ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me) -{ - if (me) { - sh_css_free(me->hor_prod.odd_real); - sh_css_free(me->hor_prod.odd_imag); - sh_css_free(me->hor_prod.even_real); - sh_css_free(me->hor_prod.even_imag); - sh_css_free(me->ver_prod.odd_real); - sh_css_free(me->ver_prod.odd_imag); - sh_css_free(me->ver_prod.even_real); - sh_css_free(me->ver_prod.even_imag); - memset(me, 0, sizeof(struct ia_css_dvs2_statistics)); - sh_css_free(me); - } -} - - -struct ia_css_dvs2_coefficients * -ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid) -{ - struct ia_css_dvs2_coefficients *me; - - assert(grid != NULL); - - me = sh_css_calloc(1, sizeof(*me)); - if (!me) - goto err; - - me->grid = *grid; - - me->hor_coefs.odd_real = sh_css_malloc(grid->num_hor_coefs * - sizeof(*me->hor_coefs.odd_real)); - if (!me->hor_coefs.odd_real) - goto err; - - me->hor_coefs.odd_imag = sh_css_malloc(grid->num_hor_coefs * - sizeof(*me->hor_coefs.odd_imag)); - if (!me->hor_coefs.odd_imag) - goto err; - - me->hor_coefs.even_real = sh_css_malloc(grid->num_hor_coefs * - sizeof(*me->hor_coefs.even_real)); - if (!me->hor_coefs.even_real) - goto err; - - me->hor_coefs.even_imag = sh_css_malloc(grid->num_hor_coefs * - sizeof(*me->hor_coefs.even_imag)); - if (!me->hor_coefs.even_imag) - goto err; - - me->ver_coefs.odd_real = sh_css_malloc(grid->num_ver_coefs * - sizeof(*me->ver_coefs.odd_real)); - if (!me->ver_coefs.odd_real) - goto err; - - me->ver_coefs.odd_imag = sh_css_malloc(grid->num_ver_coefs * - sizeof(*me->ver_coefs.odd_imag)); - if (!me->ver_coefs.odd_imag) - goto err; - - me->ver_coefs.even_real = sh_css_malloc(grid->num_ver_coefs * - sizeof(*me->ver_coefs.even_real)); - if (!me->ver_coefs.even_real) - goto err; - - me->ver_coefs.even_imag = sh_css_malloc(grid->num_ver_coefs * - sizeof(*me->ver_coefs.even_imag)); - if (!me->ver_coefs.even_imag) - goto err; - - return me; -err: - ia_css_dvs2_coefficients_free(me); - return NULL; -} - -void -ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me) -{ - if (me) { - sh_css_free(me->hor_coefs.odd_real); - sh_css_free(me->hor_coefs.odd_imag); - sh_css_free(me->hor_coefs.even_real); - sh_css_free(me->hor_coefs.even_imag); - sh_css_free(me->ver_coefs.odd_real); - sh_css_free(me->ver_coefs.odd_imag); - sh_css_free(me->ver_coefs.even_real); - sh_css_free(me->ver_coefs.even_imag); - memset(me, 0, sizeof(struct ia_css_dvs2_coefficients)); - sh_css_free(me); - } -} - -struct ia_css_dvs_6axis_config * -ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream) -{ - struct ia_css_dvs_6axis_config *dvs_config = NULL; - struct ia_css_isp_parameters *params = NULL; - unsigned int width_y; - unsigned int height_y; - unsigned int width_uv; - unsigned int height_uv; - - assert(stream != NULL); - params = stream->isp_params_configs; - - /* Backward compatibility by default consider pipe as Video*/ - if (!params || (params && !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO])) { - goto err; - } - - dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_calloc(1, sizeof(struct ia_css_dvs_6axis_config)); - if (!dvs_config) - goto err; - - dvs_config->width_y = width_y = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_y; - dvs_config->height_y = height_y = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_y; - dvs_config->width_uv = width_uv = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_uv; - dvs_config->height_uv = height_uv = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_uv; - IA_CSS_LOG("table Y: W %d H %d", width_y, height_y); - IA_CSS_LOG("table UV: W %d H %d", width_uv, height_uv); - dvs_config->xcoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t)); - if (!dvs_config->xcoords_y) - goto err; - - dvs_config->ycoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t)); - if (!dvs_config->ycoords_y) - goto err; - - dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t)); - if (!dvs_config->xcoords_uv) - goto err; - - dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t)); - if (!dvs_config->ycoords_uv) - goto err; - - return dvs_config; -err: - ia_css_dvs2_6axis_config_free(dvs_config); - return NULL; -} - -void -ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config) -{ - if (dvs_6axis_config) { - sh_css_free(dvs_6axis_config->xcoords_y); - sh_css_free(dvs_6axis_config->ycoords_y); - sh_css_free(dvs_6axis_config->xcoords_uv); - sh_css_free(dvs_6axis_config->ycoords_uv); - memset(dvs_6axis_config, 0, sizeof(struct ia_css_dvs_6axis_config)); - sh_css_free(dvs_6axis_config); - } -} - -void -ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable) -{ - struct ia_css_pipe *pipe; - struct ia_css_pipeline *pipeline; - struct ia_css_pipeline_stage *stage; - enum ia_css_pipe_id pipe_id; - enum ia_css_err err; - int i; - - if (stream == NULL) - return; - - for (i = 0; i < stream->num_pipes; i++) { - pipe = stream->pipes[i]; - pipeline = ia_css_pipe_get_pipeline(pipe); - pipe_id = pipeline->pipe_id; - - if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) { - err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP, &stage); - if (err == IA_CSS_SUCCESS) - stage->enable_zoom = enable; - break; - } - } -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h deleted file mode 100644 index 270ec2b60a3e..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_PARAMS_H_ -#define _SH_CSS_PARAMS_H_ - -/*! \file */ - -/* Forward declaration to break mutual dependency */ -struct ia_css_isp_parameters; - -#include -#include "ia_css_types.h" -#include "ia_css_binary.h" -#include "sh_css_legacy.h" - -#include "sh_css_defs.h" /* SH_CSS_MAX_STAGES */ -#include "ia_css_pipeline.h" -#include "ia_css_isp_params.h" -#include "uds/uds_1.0/ia_css_uds_param.h" -#include "crop/crop_1.0/ia_css_crop_types.h" - - -#define PIX_SHIFT_FILTER_RUN_IN_X 12 -#define PIX_SHIFT_FILTER_RUN_IN_Y 12 - -#include "ob/ob_1.0/ia_css_ob_param.h" -/* Isp configurations per stream */ -struct sh_css_isp_param_configs { - /* OB (Optical Black) */ - struct sh_css_isp_ob_stream_config ob; -}; - - -/* Isp parameters per stream */ -struct ia_css_isp_parameters { - /* UDS */ - struct sh_css_sp_uds_params uds[SH_CSS_MAX_STAGES]; - struct sh_css_isp_param_configs stream_configs; - struct ia_css_fpn_table fpn_config; - struct ia_css_vector motion_config; - const struct ia_css_morph_table *morph_table; - const struct ia_css_shading_table *sc_table; - struct ia_css_shading_table *sc_config; - struct ia_css_macc_table macc_table; - struct ia_css_gamma_table gc_table; - struct ia_css_ctc_table ctc_table; - struct ia_css_xnr_table xnr_table; - - struct ia_css_dz_config dz_config; - struct ia_css_3a_config s3a_config; - struct ia_css_wb_config wb_config; - struct ia_css_cc_config cc_config; - struct ia_css_cc_config yuv2rgb_cc_config; - struct ia_css_cc_config rgb2yuv_cc_config; - struct ia_css_tnr_config tnr_config; - struct ia_css_ob_config ob_config; - /*----- DPC configuration -----*/ - /* The default DPC configuration is retained and currently set - * using the stream configuration. The code generated from genparams - * uses this configuration to set the DPC parameters per stage but this - * will be overwritten by the per pipe configuration */ - struct ia_css_dp_config dp_config; - /* ------ pipe specific DPC configuration ------ */ - /* Please note that this implementation is a temporary solution and - * should be replaced by CSS per pipe configuration when the support - * is ready (HSD 1303967698)*/ - struct ia_css_dp_config pipe_dp_config[IA_CSS_PIPE_ID_NUM]; - struct ia_css_nr_config nr_config; - struct ia_css_ee_config ee_config; - struct ia_css_de_config de_config; - struct ia_css_gc_config gc_config; - struct ia_css_anr_config anr_config; - struct ia_css_ce_config ce_config; - struct ia_css_formats_config formats_config; -/* ---- deprecated: replaced with pipe_dvs_6axis_config---- */ - struct ia_css_dvs_6axis_config *dvs_6axis_config; - struct ia_css_ecd_config ecd_config; - struct ia_css_ynr_config ynr_config; - struct ia_css_yee_config yee_config; - struct ia_css_fc_config fc_config; - struct ia_css_cnr_config cnr_config; - struct ia_css_macc_config macc_config; - struct ia_css_ctc_config ctc_config; - struct ia_css_aa_config aa_config; - struct ia_css_aa_config bds_config; - struct ia_css_aa_config raa_config; - struct ia_css_rgb_gamma_table r_gamma_table; - struct ia_css_rgb_gamma_table g_gamma_table; - struct ia_css_rgb_gamma_table b_gamma_table; - struct ia_css_anr_thres anr_thres; - struct ia_css_xnr_config xnr_config; - struct ia_css_xnr3_config xnr3_config; - struct ia_css_uds_config uds_config; - struct ia_css_crop_config crop_config; - struct ia_css_output_config output_config; - struct ia_css_dvs_6axis_config *pipe_dvs_6axis_config[IA_CSS_PIPE_ID_NUM]; -/* ------ deprecated(bz675) : from ------ */ - struct ia_css_shading_settings shading_settings; -/* ------ deprecated(bz675) : to ------ */ - struct ia_css_dvs_coefficients dvs_coefs; - struct ia_css_dvs2_coefficients dvs2_coefs; - - bool isp_params_changed; - bool isp_mem_params_changed - [IA_CSS_PIPE_ID_NUM][SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES]; - bool dz_config_changed; - bool motion_config_changed; - bool dis_coef_table_changed; - bool dvs2_coef_table_changed; - bool morph_table_changed; - bool sc_table_changed; - bool sc_table_dirty; - unsigned int sc_table_last_pipe_num; - bool anr_thres_changed; -/* ---- deprecated: replaced with pipe_dvs_6axis_config_changed ---- */ - bool dvs_6axis_config_changed; - /* ------ pipe specific DPC configuration ------ */ - /* Please note that this implementation is a temporary solution and - * should be replaced by CSS per pipe configuration when the support - * is ready (HSD 1303967698) */ - bool pipe_dpc_config_changed[IA_CSS_PIPE_ID_NUM]; -/* ------ deprecated(bz675) : from ------ */ - bool shading_settings_changed; -/* ------ deprecated(bz675) : to ------ */ - bool pipe_dvs_6axis_config_changed[IA_CSS_PIPE_ID_NUM]; - - bool config_changed[IA_CSS_NUM_PARAMETER_IDS]; - - unsigned int sensor_binning; - /* local buffers, used to re-order the 3a statistics in vmem-format */ - struct sh_css_ddr_address_map pipe_ddr_ptrs[IA_CSS_PIPE_ID_NUM]; - struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM]; - struct sh_css_ddr_address_map ddr_ptrs; - struct sh_css_ddr_address_map_size ddr_ptrs_size; - struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */ - uint32_t isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */ -}; - -void -ia_css_params_store_ia_css_host_data( - hrt_vaddress ddr_addr, - struct ia_css_host_data *data); - -enum ia_css_err -ia_css_params_store_sctbl( - const struct ia_css_pipeline_stage *stage, - hrt_vaddress ddr_addr, - const struct ia_css_shading_table *shading_table); - -struct ia_css_host_data * -ia_css_params_alloc_convert_sctbl( - const struct ia_css_pipeline_stage *stage, - const struct ia_css_shading_table *shading_table); - -struct ia_css_isp_config * -sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe); - -/* ipu address allocation/free for gdc lut */ -hrt_vaddress -sh_css_params_alloc_gdc_lut(void); -void -sh_css_params_free_gdc_lut(hrt_vaddress addr); - -enum ia_css_err -sh_css_params_map_and_store_default_gdc_lut(void); - -void -sh_css_params_free_default_gdc_lut(void); - -hrt_vaddress -sh_css_params_get_default_gdc_lut(void); - -hrt_vaddress -sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe); - -#endif /* _SH_CSS_PARAMS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h deleted file mode 100644 index baca24532f9f..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_PARAMS_INTERNAL_H_ -#define _SH_CSS_PARAMS_INTERNAL_H_ - -void -sh_css_param_clear_param_sets(void); - -#endif /* _SH_CSS_PARAMS_INTERNAL_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c deleted file mode 100644 index 1f57ffad8921..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* This file will contain the code to implement the functions declared in ia_css_pipe.h and ia_css_pipe_public.h - and associated helper functions */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c deleted file mode 100644 index ad46996cfbd3..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_properties.h" -#include -#include "ia_css_types.h" -#include "gdc_device.h" - -void -ia_css_get_properties(struct ia_css_properties *properties) -{ - assert(properties != NULL); -#if defined(HAS_GDC_VERSION_2) || defined(HAS_GDC_VERSION_3) -/* - * MW: We don't want to store the coordinates - * full range in memory: Truncate - */ - properties->gdc_coord_one = gdc_get_unity(GDC0_ID)/HRT_GDC_COORD_SCALE; -#else -#error "Unknown GDC version" -#endif - - properties->l1_base_is_index = true; - -#if defined(HAS_VAMEM_VERSION_1) - properties->vamem_type = IA_CSS_VAMEM_TYPE_1; -#elif defined(HAS_VAMEM_VERSION_2) - properties->vamem_type = IA_CSS_VAMEM_TYPE_2; -#else -#error "Unknown VAMEM version" -#endif -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c deleted file mode 100644 index 2a2d0f4db44b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* This file will contain the code to implement the functions declared in ia_css_shading.h - and associated helper functions */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c deleted file mode 100644 index cdbe914787c8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c +++ /dev/null @@ -1,1799 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "sh_css_sp.h" - -#if !defined(HAS_NO_INPUT_FORMATTER) -#include "input_formatter.h" -#endif - -#include "dma.h" /* N_DMA_CHANNEL_ID */ - -#include "ia_css_buffer.h" -#include "ia_css_binary.h" -#include "sh_css_hrt.h" -#include "sh_css_defs.h" -#include "sh_css_internal.h" -#include "ia_css_control.h" -#include "ia_css_debug.h" -#include "ia_css_debug_pipe.h" -#include "ia_css_event_public.h" -#include "ia_css_mmu.h" -#include "ia_css_stream.h" -#include "ia_css_isp_param.h" -#include "sh_css_params.h" -#include "sh_css_legacy.h" -#include "ia_css_frame_comm.h" -#if !defined(HAS_NO_INPUT_SYSTEM) -#include "ia_css_isys.h" -#endif - -#include "gdc_device.h" /* HRT_GDC_N */ - -/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */ - -#include "memory_access.h" - -#include "assert_support.h" -#include "platform_support.h" /* hrt_sleep() */ - -#include "sw_event_global.h" /* Event IDs.*/ -#include "ia_css_event.h" -#include "mmu_device.h" -#include "ia_css_spctrl.h" - -#ifndef offsetof -#define offsetof(T, x) ((unsigned)&(((T *)0)->x)) -#endif - -#define IA_CSS_INCLUDE_CONFIGURATIONS -#include "ia_css_isp_configs.h" -#define IA_CSS_INCLUDE_STATES -#include "ia_css_isp_states.h" - -#ifndef ISP2401 -#include "isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h" -#else -#include "isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h" -#endif - -struct sh_css_sp_group sh_css_sp_group; -struct sh_css_sp_stage sh_css_sp_stage; -struct sh_css_isp_stage sh_css_isp_stage; -static struct sh_css_sp_output sh_css_sp_output; -static struct sh_css_sp_per_frame_data per_frame_data; - -/* true if SP supports frame loop and host2sp_commands */ -/* For the moment there is only code that sets this bool to true */ -/* TODO: add code that sets this bool to false */ -static bool sp_running; - -static enum ia_css_err -set_output_frame_buffer(const struct ia_css_frame *frame, - unsigned idx); - -static void -sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf, - const enum sh_css_queue_id queue_id, - const hrt_vaddress xmem_addr, - const enum ia_css_buffer_type buf_type); - -static void -initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr); - -static void -initialize_stage_frames(struct ia_css_frames_sp *frames); - -/* This data is stored every frame */ -void -store_sp_group_data(void) -{ - per_frame_data.sp_group_addr = sh_css_store_sp_group_to_ddr(); -} - -static void -copy_isp_stage_to_sp_stage(void) -{ - /* [WW07.5]type casting will cause potential issues */ - sh_css_sp_stage.num_stripes = (uint8_t) sh_css_isp_stage.binary_info.iterator.num_stripes; - sh_css_sp_stage.row_stripes_height = (uint16_t) sh_css_isp_stage.binary_info.iterator.row_stripes_height; - sh_css_sp_stage.row_stripes_overlap_lines = (uint16_t) sh_css_isp_stage.binary_info.iterator.row_stripes_overlap_lines; - sh_css_sp_stage.top_cropping = (uint16_t) sh_css_isp_stage.binary_info.pipeline.top_cropping; - /* moved to sh_css_sp_init_stage - sh_css_sp_stage.enable.vf_output = - sh_css_isp_stage.binary_info.enable.vf_veceven || - sh_css_isp_stage.binary_info.num_output_pins > 1; - */ - sh_css_sp_stage.enable.sdis = sh_css_isp_stage.binary_info.enable.dis; - sh_css_sp_stage.enable.s3a = sh_css_isp_stage.binary_info.enable.s3a; -#ifdef ISP2401 - sh_css_sp_stage.enable.lace_stats = sh_css_isp_stage.binary_info.enable.lace_stats; -#endif -} - -void -store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num, unsigned stage) -{ - unsigned int thread_id; - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - copy_isp_stage_to_sp_stage(); - if (id != IA_CSS_PIPE_ID_COPY) - sh_css_sp_stage.isp_stage_addr = - sh_css_store_isp_stage_to_ddr(pipe_num, stage); - sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = - sh_css_store_sp_stage_to_ddr(pipe_num, stage); - - /* Clear for next frame */ - sh_css_sp_stage.program_input_circuit = false; -} - -static void -store_sp_per_frame_data(const struct ia_css_fw_info *fw) -{ - unsigned int HIVE_ADDR_sp_per_frame_data = 0; - - assert(fw != NULL); - - switch (fw->type) { - case ia_css_sp_firmware: - HIVE_ADDR_sp_per_frame_data = fw->info.sp.per_frame_data; - break; - case ia_css_acc_firmware: - HIVE_ADDR_sp_per_frame_data = fw->info.acc.per_frame_data; - break; - case ia_css_isp_firmware: - return; - } - - sp_dmem_store(SP0_ID, - (unsigned int)sp_address_of(sp_per_frame_data), - &per_frame_data, - sizeof(per_frame_data)); -} - -static void -sh_css_store_sp_per_frame_data(enum ia_css_pipe_id pipe_id, - unsigned int pipe_num, - const struct ia_css_fw_info *sp_fw) -{ - if (!sp_fw) - sp_fw = &sh_css_sp_fw; - - store_sp_stage_data(pipe_id, pipe_num, 0); - store_sp_group_data(); - store_sp_per_frame_data(sp_fw); -} - -#if SP_DEBUG != SP_DEBUG_NONE - -void -sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state) -{ - const struct ia_css_fw_info *fw = &sh_css_sp_fw; - unsigned int HIVE_ADDR_sp_output = fw->info.sp.output; - unsigned i; - unsigned offset = (unsigned int)offsetof(struct sh_css_sp_output, debug)/sizeof(int); - - assert(state != NULL); - - (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */ - for (i = 0; i < sizeof(*state)/sizeof(int); i++) - ((unsigned *)state)[i] = load_sp_array_uint(sp_output, i+offset); -} - -#endif - -void -sh_css_sp_start_binary_copy(unsigned int pipe_num, struct ia_css_frame *out_frame, - unsigned two_ppc) -{ - enum ia_css_pipe_id pipe_id; - unsigned int thread_id; - struct sh_css_sp_pipeline *pipe; - uint8_t stage_num = 0; - - assert(out_frame != NULL); - pipe_id = IA_CSS_PIPE_ID_CAPTURE; - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - pipe = &sh_css_sp_group.pipe[thread_id]; - - pipe->copy.bin.bytes_available = out_frame->data_bytes; - pipe->num_stages = 1; - pipe->pipe_id = pipe_id; - pipe->pipe_num = pipe_num; - pipe->thread_id = thread_id; - pipe->pipe_config = 0x0; /* No parameters */ - pipe->pipe_qos_config = QOS_INVALID; - - if (pipe->inout_port_config == 0) { - SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - } - IA_CSS_LOG("pipe_id %d port_config %08x", - pipe->pipe_id, pipe->inout_port_config); - -#if !defined(HAS_NO_INPUT_FORMATTER) - sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc; -#else - (void)two_ppc; -#endif - - sh_css_sp_stage.num = stage_num; - sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE; - sh_css_sp_stage.func = - (unsigned int)IA_CSS_PIPELINE_BIN_COPY; - - set_output_frame_buffer(out_frame, 0); - - /* sp_bin_copy_init on the SP does not deal with dynamica/static yet */ - /* For now always update the dynamic data from out frames. */ - sh_css_store_sp_per_frame_data(pipe_id, pipe_num, &sh_css_sp_fw); -} - -static void -sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame, - unsigned pipe_num, - unsigned two_ppc, - unsigned max_input_width, - enum sh_css_pipe_config_override pipe_conf_override, - unsigned int if_config_index) -{ - enum ia_css_pipe_id pipe_id; - unsigned int thread_id; - uint8_t stage_num = 0; - struct sh_css_sp_pipeline *pipe; - - assert(out_frame != NULL); - - { - /* - * Clear sh_css_sp_stage for easy debugging. - * program_input_circuit must be saved as it is set outside - * this function. - */ - uint8_t program_input_circuit; - program_input_circuit = sh_css_sp_stage.program_input_circuit; - memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage)); - sh_css_sp_stage.program_input_circuit = program_input_circuit; - } - - pipe_id = IA_CSS_PIPE_ID_COPY; - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - pipe = &sh_css_sp_group.pipe[thread_id]; - - pipe->copy.raw.height = out_frame->info.res.height; - pipe->copy.raw.width = out_frame->info.res.width; - pipe->copy.raw.padded_width = out_frame->info.padded_width; - pipe->copy.raw.raw_bit_depth = out_frame->info.raw_bit_depth; - pipe->copy.raw.max_input_width = max_input_width; - pipe->num_stages = 1; - pipe->pipe_id = pipe_id; - /* TODO: next indicates from which queues parameters need to be - sampled, needs checking/improvement */ - if (pipe_conf_override == SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD) - pipe->pipe_config = - (SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id); - else - pipe->pipe_config = pipe_conf_override; - - pipe->pipe_qos_config = QOS_INVALID; - - if (pipe->inout_port_config == 0) { - SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config, - (uint8_t)SH_CSS_PORT_INPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config, - (uint8_t)SH_CSS_PORT_OUTPUT, - (uint8_t)SH_CSS_HOST_TYPE, 1); - } - IA_CSS_LOG("pipe_id %d port_config %08x", - pipe->pipe_id, pipe->inout_port_config); - -#if !defined(HAS_NO_INPUT_FORMATTER) - sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc; -#else - (void)two_ppc; -#endif - - sh_css_sp_stage.num = stage_num; - sh_css_sp_stage.xmem_bin_addr = 0x0; - sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE; - sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_RAW_COPY; - sh_css_sp_stage.if_config_index = (uint8_t) if_config_index; - set_output_frame_buffer(out_frame, 0); - - ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame); -} - -static void -sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame, - unsigned pipe_num, unsigned max_input_width, unsigned int if_config_index) -{ - enum ia_css_pipe_id pipe_id; - unsigned int thread_id; - uint8_t stage_num = 0; - struct sh_css_sp_pipeline *pipe; -#if defined SH_CSS_ENABLE_METADATA - enum sh_css_queue_id queue_id; -#endif - - assert(out_frame != NULL); - - { - /* - * Clear sh_css_sp_stage for easy debugging. - * program_input_circuit must be saved as it is set outside - * this function. - */ - uint8_t program_input_circuit; - program_input_circuit = sh_css_sp_stage.program_input_circuit; - memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage)); - sh_css_sp_stage.program_input_circuit = program_input_circuit; - } - - pipe_id = IA_CSS_PIPE_ID_COPY; - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - pipe = &sh_css_sp_group.pipe[thread_id]; - - pipe->copy.raw.height = out_frame->info.res.height; - pipe->copy.raw.width = out_frame->info.res.width; - pipe->copy.raw.padded_width = out_frame->info.padded_width; - pipe->copy.raw.raw_bit_depth = out_frame->info.raw_bit_depth; - pipe->copy.raw.max_input_width = max_input_width; - pipe->num_stages = 1; - pipe->pipe_id = pipe_id; - pipe->pipe_config = 0x0; /* No parameters */ - pipe->pipe_qos_config = QOS_INVALID; - - initialize_stage_frames(&sh_css_sp_stage.frames); - sh_css_sp_stage.num = stage_num; - sh_css_sp_stage.xmem_bin_addr = 0x0; - sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE; - sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_ISYS_COPY; - sh_css_sp_stage.if_config_index = (uint8_t) if_config_index; - - set_output_frame_buffer(out_frame, 0); - -#if defined SH_CSS_ENABLE_METADATA - if (pipe->metadata.height > 0) { - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id); - sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA); - } -#endif - - ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame); -} - -unsigned int -sh_css_sp_get_binary_copy_size(void) -{ - const struct ia_css_fw_info *fw = &sh_css_sp_fw; - unsigned int HIVE_ADDR_sp_output = fw->info.sp.output; - unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output, - bin_copy_bytes_copied) / sizeof(int); - (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */ - return load_sp_array_uint(sp_output, offset); -} - -unsigned int -sh_css_sp_get_sw_interrupt_value(unsigned int irq) -{ - const struct ia_css_fw_info *fw = &sh_css_sp_fw; - unsigned int HIVE_ADDR_sp_output = fw->info.sp.output; - unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output, sw_interrupt_value) - / sizeof(int); - (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */ - return load_sp_array_uint(sp_output, offset+irq); -} - -static void -sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf, - const enum sh_css_queue_id queue_id, - const hrt_vaddress xmem_addr, - const enum ia_css_buffer_type buf_type) -{ - assert(buf_type < IA_CSS_NUM_BUFFER_TYPE); - if (queue_id > SH_CSS_INVALID_QUEUE_ID) { - /* - * value >=0 indicates that function init_frame_pointers() - * should use the dynamic data address - */ - assert(queue_id < SH_CSS_MAX_NUM_QUEUES); - - /* Klocwork assumes assert can be disabled; - Since we can get there with any type, and it does not - know that frame_in->dynamic_data_index can only be set - for one of the types in the assert) it has to assume we - can get here for any type. however this could lead to an - out of bounds reference when indexing buf_type about 10 - lines below. In order to satisfy KW an additional if - has been added. This one will always yield true. - */ - if ((queue_id < SH_CSS_MAX_NUM_QUEUES)) - { - dest_buf->buf_src.queue_id = queue_id; - } - } else { - assert(xmem_addr != mmgr_EXCEPTION); - dest_buf->buf_src.xmem_addr = xmem_addr; - } - dest_buf->buf_type = buf_type; -} - -static void -sh_css_copy_frame_to_spframe(struct ia_css_frame_sp *sp_frame_out, - const struct ia_css_frame *frame_in) -{ - assert(frame_in != NULL); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_copy_frame_to_spframe():\n"); - - - sh_css_copy_buffer_attr_to_spbuffer(&sp_frame_out->buf_attr, - frame_in->dynamic_queue_id, - frame_in->data, - frame_in->buf_type); - - ia_css_frame_info_to_frame_sp_info(&sp_frame_out->info, &frame_in->info); - - switch (frame_in->info.format) { - case IA_CSS_FRAME_FORMAT_RAW_PACKED: - case IA_CSS_FRAME_FORMAT_RAW: - sp_frame_out->planes.raw.offset = frame_in->planes.raw.offset; - break; - case IA_CSS_FRAME_FORMAT_RGB565: - case IA_CSS_FRAME_FORMAT_RGBA888: - sp_frame_out->planes.rgb.offset = frame_in->planes.rgb.offset; - break; - case IA_CSS_FRAME_FORMAT_PLANAR_RGB888: - sp_frame_out->planes.planar_rgb.r.offset = - frame_in->planes.planar_rgb.r.offset; - sp_frame_out->planes.planar_rgb.g.offset = - frame_in->planes.planar_rgb.g.offset; - sp_frame_out->planes.planar_rgb.b.offset = - frame_in->planes.planar_rgb.b.offset; - break; - case IA_CSS_FRAME_FORMAT_YUYV: - case IA_CSS_FRAME_FORMAT_UYVY: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - case IA_CSS_FRAME_FORMAT_YUV_LINE: - sp_frame_out->planes.yuyv.offset = frame_in->planes.yuyv.offset; - break; - case IA_CSS_FRAME_FORMAT_NV11: - case IA_CSS_FRAME_FORMAT_NV12: - case IA_CSS_FRAME_FORMAT_NV12_16: - case IA_CSS_FRAME_FORMAT_NV12_TILEY: - case IA_CSS_FRAME_FORMAT_NV21: - case IA_CSS_FRAME_FORMAT_NV16: - case IA_CSS_FRAME_FORMAT_NV61: - sp_frame_out->planes.nv.y.offset = - frame_in->planes.nv.y.offset; - sp_frame_out->planes.nv.uv.offset = - frame_in->planes.nv.uv.offset; - break; - case IA_CSS_FRAME_FORMAT_YUV420: - case IA_CSS_FRAME_FORMAT_YUV422: - case IA_CSS_FRAME_FORMAT_YUV444: - case IA_CSS_FRAME_FORMAT_YUV420_16: - case IA_CSS_FRAME_FORMAT_YUV422_16: - case IA_CSS_FRAME_FORMAT_YV12: - case IA_CSS_FRAME_FORMAT_YV16: - sp_frame_out->planes.yuv.y.offset = - frame_in->planes.yuv.y.offset; - sp_frame_out->planes.yuv.u.offset = - frame_in->planes.yuv.u.offset; - sp_frame_out->planes.yuv.v.offset = - frame_in->planes.yuv.v.offset; - break; - case IA_CSS_FRAME_FORMAT_QPLANE6: - sp_frame_out->planes.plane6.r.offset = - frame_in->planes.plane6.r.offset; - sp_frame_out->planes.plane6.r_at_b.offset = - frame_in->planes.plane6.r_at_b.offset; - sp_frame_out->planes.plane6.gr.offset = - frame_in->planes.plane6.gr.offset; - sp_frame_out->planes.plane6.gb.offset = - frame_in->planes.plane6.gb.offset; - sp_frame_out->planes.plane6.b.offset = - frame_in->planes.plane6.b.offset; - sp_frame_out->planes.plane6.b_at_r.offset = - frame_in->planes.plane6.b_at_r.offset; - break; - case IA_CSS_FRAME_FORMAT_BINARY_8: - sp_frame_out->planes.binary.data.offset = - frame_in->planes.binary.data.offset; - break; - default: - /* This should not happen, but in case it does, - * nullify the planes - */ - memset(&sp_frame_out->planes, 0, sizeof(sp_frame_out->planes)); - break; - } - -} - -static enum ia_css_err -set_input_frame_buffer(const struct ia_css_frame *frame) -{ - if (frame == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - switch (frame->info.format) { - case IA_CSS_FRAME_FORMAT_QPLANE6: - case IA_CSS_FRAME_FORMAT_YUV420_16: - case IA_CSS_FRAME_FORMAT_RAW_PACKED: - case IA_CSS_FRAME_FORMAT_RAW: - case IA_CSS_FRAME_FORMAT_YUV420: - case IA_CSS_FRAME_FORMAT_YUYV: - case IA_CSS_FRAME_FORMAT_YUV_LINE: - case IA_CSS_FRAME_FORMAT_NV12: - case IA_CSS_FRAME_FORMAT_NV12_16: - case IA_CSS_FRAME_FORMAT_NV12_TILEY: - case IA_CSS_FRAME_FORMAT_NV21: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10: - break; - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.in, frame); - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -set_output_frame_buffer(const struct ia_css_frame *frame, - unsigned idx) -{ - if (frame == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - switch (frame->info.format) { - case IA_CSS_FRAME_FORMAT_YUV420: - case IA_CSS_FRAME_FORMAT_YUV422: - case IA_CSS_FRAME_FORMAT_YUV444: - case IA_CSS_FRAME_FORMAT_YV12: - case IA_CSS_FRAME_FORMAT_YV16: - case IA_CSS_FRAME_FORMAT_YUV420_16: - case IA_CSS_FRAME_FORMAT_YUV422_16: - case IA_CSS_FRAME_FORMAT_NV11: - case IA_CSS_FRAME_FORMAT_NV12: - case IA_CSS_FRAME_FORMAT_NV12_16: - case IA_CSS_FRAME_FORMAT_NV12_TILEY: - case IA_CSS_FRAME_FORMAT_NV16: - case IA_CSS_FRAME_FORMAT_NV21: - case IA_CSS_FRAME_FORMAT_NV61: - case IA_CSS_FRAME_FORMAT_YUYV: - case IA_CSS_FRAME_FORMAT_UYVY: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - case IA_CSS_FRAME_FORMAT_YUV_LINE: - case IA_CSS_FRAME_FORMAT_RGB565: - case IA_CSS_FRAME_FORMAT_RGBA888: - case IA_CSS_FRAME_FORMAT_PLANAR_RGB888: - case IA_CSS_FRAME_FORMAT_RAW: - case IA_CSS_FRAME_FORMAT_RAW_PACKED: - case IA_CSS_FRAME_FORMAT_QPLANE6: - case IA_CSS_FRAME_FORMAT_BINARY_8: - break; - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out[idx], frame); - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -set_view_finder_buffer(const struct ia_css_frame *frame) -{ - if (frame == NULL) - return IA_CSS_ERR_INVALID_ARGUMENTS; - - switch (frame->info.format) { - /* the dual output pin */ - case IA_CSS_FRAME_FORMAT_NV12: - case IA_CSS_FRAME_FORMAT_NV12_16: - case IA_CSS_FRAME_FORMAT_NV21: - case IA_CSS_FRAME_FORMAT_YUYV: - case IA_CSS_FRAME_FORMAT_UYVY: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: - case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: - case IA_CSS_FRAME_FORMAT_YUV420: - case IA_CSS_FRAME_FORMAT_YV12: - case IA_CSS_FRAME_FORMAT_NV12_TILEY: - - /* for vf_veceven */ - case IA_CSS_FRAME_FORMAT_YUV_LINE: - break; - default: - return IA_CSS_ERR_INVALID_ARGUMENTS; - } - - sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out_vf, frame); - return IA_CSS_SUCCESS; -} - -#if !defined(HAS_NO_INPUT_FORMATTER) -void sh_css_sp_set_if_configs( - const input_formatter_cfg_t *config_a, - const input_formatter_cfg_t *config_b, - const uint8_t if_config_index - ) -{ - assert(if_config_index < SH_CSS_MAX_IF_CONFIGS); - assert(config_a != NULL); - - sh_css_sp_group.config.input_formatter.set[if_config_index].config_a = *config_a; - sh_css_sp_group.config.input_formatter.a_changed = true; - - if (config_b != NULL) { - sh_css_sp_group.config.input_formatter.set[if_config_index].config_b = *config_b; - sh_css_sp_group.config.input_formatter.b_changed = true; - } - - return; -} -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) -void -sh_css_sp_program_input_circuit(int fmt_type, - int ch_id, - enum ia_css_input_mode input_mode) -{ - sh_css_sp_group.config.input_circuit.no_side_band = false; - sh_css_sp_group.config.input_circuit.fmt_type = fmt_type; - sh_css_sp_group.config.input_circuit.ch_id = ch_id; - sh_css_sp_group.config.input_circuit.input_mode = input_mode; -/* - * The SP group is only loaded at SP boot time and is read once - * change flags as "input_circuit_cfg_changed" must be reset on the SP - */ - sh_css_sp_group.config.input_circuit_cfg_changed = true; - sh_css_sp_stage.program_input_circuit = true; -} -#endif - -#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2) -void -sh_css_sp_configure_sync_gen(int width, int height, - int hblank_cycles, - int vblank_cycles) -{ - sh_css_sp_group.config.sync_gen.width = width; - sh_css_sp_group.config.sync_gen.height = height; - sh_css_sp_group.config.sync_gen.hblank_cycles = hblank_cycles; - sh_css_sp_group.config.sync_gen.vblank_cycles = vblank_cycles; -} - -void -sh_css_sp_configure_tpg(int x_mask, - int y_mask, - int x_delta, - int y_delta, - int xy_mask) -{ - sh_css_sp_group.config.tpg.x_mask = x_mask; - sh_css_sp_group.config.tpg.y_mask = y_mask; - sh_css_sp_group.config.tpg.x_delta = x_delta; - sh_css_sp_group.config.tpg.y_delta = y_delta; - sh_css_sp_group.config.tpg.xy_mask = xy_mask; -} - -void -sh_css_sp_configure_prbs(int seed) -{ - sh_css_sp_group.config.prbs.seed = seed; -} -#endif - -void -sh_css_sp_configure_enable_raw_pool_locking(bool lock_all) -{ - sh_css_sp_group.config.enable_raw_pool_locking = true; - sh_css_sp_group.config.lock_all = lock_all; -} - -void -sh_css_sp_enable_isys_event_queue(bool enable) -{ -#if !defined(HAS_NO_INPUT_SYSTEM) - sh_css_sp_group.config.enable_isys_event_queue = enable; -#else - (void)enable; -#endif -} - -void -sh_css_sp_set_disable_continuous_viewfinder(bool flag) -{ - sh_css_sp_group.config.disable_cont_vf = flag; -} - -static enum ia_css_err -sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args) -{ - enum ia_css_err err = IA_CSS_SUCCESS; - int i; - - assert(args != NULL); - - if (args->in_frame) - err = set_input_frame_buffer(args->in_frame); - if (err == IA_CSS_SUCCESS && args->out_vf_frame) - err = set_view_finder_buffer(args->out_vf_frame); - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - if (err == IA_CSS_SUCCESS && args->out_frame[i]) - err = set_output_frame_buffer(args->out_frame[i], i); - } - - /* we don't pass this error back to the upper layer, so we add a assert here - because we actually hit the error here but it still works by accident... */ - if (err != IA_CSS_SUCCESS) assert(false); - return err; -} - -static void -sh_css_sp_init_group(bool two_ppc, - enum atomisp_input_format input_format, - bool no_isp_sync, - uint8_t if_config_index) -{ -#if !defined(HAS_NO_INPUT_FORMATTER) - sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc; -#else - (void)two_ppc; -#endif - - sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync; - /* decide whether the frame is processed online or offline */ - if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return; -#if !defined(HAS_NO_INPUT_FORMATTER) - assert(if_config_index < SH_CSS_MAX_IF_CONFIGS); - sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format = input_format; -#else - (void)input_format; -#endif -} - -void -sh_css_stage_write_binary_info(struct ia_css_binary_info *info) -{ - assert(info != NULL); - sh_css_isp_stage.binary_info = *info; -} - -static enum ia_css_err -copy_isp_mem_if_to_ddr(struct ia_css_binary *binary) -{ - enum ia_css_err err; - - err = ia_css_isp_param_copy_isp_mem_if_to_ddr( - &binary->css_params, - &binary->mem_params, - IA_CSS_PARAM_CLASS_CONFIG); - if (err != IA_CSS_SUCCESS) - return err; - err = ia_css_isp_param_copy_isp_mem_if_to_ddr( - &binary->css_params, - &binary->mem_params, - IA_CSS_PARAM_CLASS_STATE); - if (err != IA_CSS_SUCCESS) - return err; - return IA_CSS_SUCCESS; -} - -static bool -is_sp_stage(struct ia_css_pipeline_stage *stage) -{ - assert(stage != NULL); - return stage->sp_func != IA_CSS_PIPELINE_NO_FUNC; -} - -static enum ia_css_err -configure_isp_from_args( - const struct sh_css_sp_pipeline *pipeline, - const struct ia_css_binary *binary, - const struct sh_css_binary_args *args, - bool two_ppc, - bool deinterleaved) -{ -#ifdef ISP2401 - struct ia_css_pipe *pipe = find_pipe_by_num(pipeline->pipe_num); - const struct ia_css_resolution *res; - -#endif - ia_css_fpn_configure(binary, &binary->in_frame_info); - ia_css_crop_configure(binary, &args->delay_frames[0]->info); - ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info); - ia_css_output0_configure(binary, &args->out_frame[0]->info); - ia_css_output1_configure(binary, &args->out_vf_frame->info); - ia_css_copy_output_configure(binary, args->copy_output); - ia_css_output0_configure(binary, &args->out_frame[0]->info); -#ifdef ISP2401 - ia_css_sc_configure(binary, pipeline->shading.internal_frame_origin_x_bqs_on_sctbl, - pipeline->shading.internal_frame_origin_y_bqs_on_sctbl); -#endif - ia_css_iterator_configure(binary, &args->in_frame->info); - ia_css_dvs_configure(binary, &args->out_frame[0]->info); - ia_css_output_configure(binary, &args->out_frame[0]->info); - ia_css_raw_configure(pipeline, binary, &args->in_frame->info, &binary->in_frame_info, two_ppc, deinterleaved); - ia_css_ref_configure(binary, (const struct ia_css_frame **)args->delay_frames, pipeline->dvs_frame_delay); - ia_css_tnr_configure(binary, (const struct ia_css_frame **)args->tnr_frames); - ia_css_bayer_io_config(binary, args); - return IA_CSS_SUCCESS; -} - -static void -initialize_isp_states(const struct ia_css_binary *binary) -{ - unsigned int i; - - if (!binary->info->mem_offsets.offsets.state) - return; - for (i = 0; i < IA_CSS_NUM_STATE_IDS; i++) { - ia_css_kernel_init_state[i](binary); - } -} - -static void -initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr) -{ - buf_attr->buf_src.queue_id = SH_CSS_INVALID_QUEUE_ID; - buf_attr->buf_type = IA_CSS_BUFFER_TYPE_INVALID; -} - -static void -initialize_stage_frames(struct ia_css_frames_sp *frames) -{ - unsigned int i; - - initialize_frame_buffer_attribute(&frames->in.buf_attr); - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - initialize_frame_buffer_attribute(&frames->out[i].buf_attr); - } - initialize_frame_buffer_attribute(&frames->out_vf.buf_attr); - initialize_frame_buffer_attribute(&frames->s3a_buf); - initialize_frame_buffer_attribute(&frames->dvs_buf); -#if defined SH_CSS_ENABLE_METADATA - initialize_frame_buffer_attribute(&frames->metadata_buf); -#endif -} - -static enum ia_css_err -sh_css_sp_init_stage(struct ia_css_binary *binary, - const char *binary_name, - const struct ia_css_blob_info *blob_info, - const struct sh_css_binary_args *args, - unsigned int pipe_num, - unsigned stage, - bool xnr, - const struct ia_css_isp_param_css_segments *isp_mem_if, - unsigned int if_config_index, - bool two_ppc) -{ - const struct ia_css_binary_xinfo *xinfo; - const struct ia_css_binary_info *info; - enum ia_css_err err = IA_CSS_SUCCESS; - int i; - struct ia_css_pipe *pipe = NULL; - unsigned int thread_id; - enum sh_css_queue_id queue_id; - bool continuous = sh_css_continuous_is_enabled((uint8_t)pipe_num); - - assert(binary != NULL); - assert(blob_info != NULL); - assert(args != NULL); - assert(isp_mem_if != NULL); - - xinfo = binary->info; - info = &xinfo->sp; - { - /* - * Clear sh_css_sp_stage for easy debugging. - * program_input_circuit must be saved as it is set outside - * this function. - */ - uint8_t program_input_circuit; - program_input_circuit = sh_css_sp_stage.program_input_circuit; - memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage)); - sh_css_sp_stage.program_input_circuit = (uint8_t)program_input_circuit; - } - - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - - if (info == NULL) { - sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL; - return IA_CSS_SUCCESS; - } - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) - (void)continuous; - sh_css_sp_stage.deinterleaved = 0; -#else - sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous); -#endif - - initialize_stage_frames(&sh_css_sp_stage.frames); - /* - * TODO: Make the Host dynamically determine - * the stage type. - */ - sh_css_sp_stage.stage_type = SH_CSS_ISP_STAGE_TYPE; - sh_css_sp_stage.num = (uint8_t)stage; - sh_css_sp_stage.isp_online = (uint8_t)binary->online; - sh_css_sp_stage.isp_copy_vf = (uint8_t)args->copy_vf; - sh_css_sp_stage.isp_copy_output = (uint8_t)args->copy_output; - sh_css_sp_stage.enable.vf_output = (args->out_vf_frame != NULL); - - /* Copy the frame infos first, to be overwritten by the frames, - if these are present. - */ - sh_css_sp_stage.frames.effective_in_res.width = binary->effective_in_frame_res.width; - sh_css_sp_stage.frames.effective_in_res.height = binary->effective_in_frame_res.height; - - ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.in.info, - &binary->in_frame_info); - for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { - ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.out[i].info, - &binary->out_frame_info[i]); - } - ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.internal_frame_info, - &binary->internal_frame_info); - sh_css_sp_stage.dvs_envelope.width = binary->dvs_envelope.width; - sh_css_sp_stage.dvs_envelope.height = binary->dvs_envelope.height; - sh_css_sp_stage.isp_pipe_version = (uint8_t)info->pipeline.isp_pipe_version; - sh_css_sp_stage.isp_deci_log_factor = (uint8_t)binary->deci_factor_log2; - sh_css_sp_stage.isp_vf_downscale_bits = (uint8_t)binary->vf_downscale_log2; - - sh_css_sp_stage.if_config_index = (uint8_t) if_config_index; - - sh_css_sp_stage.sp_enable_xnr = (uint8_t)xnr; - sh_css_sp_stage.xmem_bin_addr = xinfo->xmem_addr; - sh_css_sp_stage.xmem_map_addr = sh_css_params_ddr_address_map(); - sh_css_isp_stage.blob_info = *blob_info; - sh_css_stage_write_binary_info((struct ia_css_binary_info *)info); - - /* Make sure binary name is smaller than allowed string size */ - assert(strlen(binary_name) < SH_CSS_MAX_BINARY_NAME-1); - strncpy(sh_css_isp_stage.binary_name, binary_name, SH_CSS_MAX_BINARY_NAME-1); - sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0; - sh_css_isp_stage.mem_initializers = *isp_mem_if; - - /* - * Even when a stage does not need uds and does not params, - * ia_css_uds_sp_scale_params() seems to be called (needs - * further investigation). This function can not deal with - * dx, dy = {0, 0} - */ - - err = sh_css_sp_write_frame_pointers(args); - /* TODO: move it to a better place */ - if (binary->info->sp.enable.s3a) { - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_3A_STATISTICS, thread_id, &queue_id); - sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.s3a_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_3A_STATISTICS); - } - if (binary->info->sp.enable.dis) { - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_DIS_STATISTICS, thread_id, &queue_id); - sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.dvs_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_DIS_STATISTICS); - } -#if defined SH_CSS_ENABLE_METADATA - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id); - sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA); -#endif - if (err != IA_CSS_SUCCESS) - return err; - -#ifdef USE_INPUT_SYSTEM_VERSION_2401 -#ifndef ISP2401 - if (args->in_frame) { - pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num); - if (pipe == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; - ia_css_get_crop_offsets(pipe, &args->in_frame->info); - } else if (&binary->in_frame_info) { - pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num); - if (pipe == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; - ia_css_get_crop_offsets(pipe, &binary->in_frame_info); -#else - if (stage == 0) { - if (args->in_frame) { - pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num); - if (pipe == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; - ia_css_get_crop_offsets(pipe, &args->in_frame->info); - } else if (&binary->in_frame_info) { - pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num); - if (pipe == NULL) - return IA_CSS_ERR_INTERNAL_ERROR; - ia_css_get_crop_offsets(pipe, &binary->in_frame_info); - } -#endif - } -#else - (void)pipe; /*avoid build warning*/ -#endif - - err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id], - binary, args, two_ppc, sh_css_sp_stage.deinterleaved); - if (err != IA_CSS_SUCCESS) - return err; - - initialize_isp_states(binary); - - /* we do this only for preview pipe because in fill_binary_info function - * we assign vf_out res to out res, but for ISP internal processing, we need - * the original out res. for video pipe, it has two output pins --- out and - * vf_out, so it can keep these two resolutions already. */ - if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW && - (binary->vf_downscale_log2 > 0)) { - /* TODO: Remove this after preview output decimation is fixed - * by configuring out&vf info fiels properly */ - sh_css_sp_stage.frames.out[0].info.padded_width - <<= binary->vf_downscale_log2; - sh_css_sp_stage.frames.out[0].info.res.width - <<= binary->vf_downscale_log2; - sh_css_sp_stage.frames.out[0].info.res.height - <<= binary->vf_downscale_log2; - } - err = copy_isp_mem_if_to_ddr(binary); - if (err != IA_CSS_SUCCESS) - return err; - - return IA_CSS_SUCCESS; -} - -static enum ia_css_err -sp_init_stage(struct ia_css_pipeline_stage *stage, - unsigned int pipe_num, - bool xnr, - unsigned int if_config_index, - bool two_ppc) -{ - struct ia_css_binary *binary; - const struct ia_css_fw_info *firmware; - const struct sh_css_binary_args *args; - unsigned stage_num; -/* - * Initialiser required because of the "else" path below. - * Is this a valid path ? - */ - const char *binary_name = ""; - const struct ia_css_binary_xinfo *info = NULL; - /* note: the var below is made static as it is quite large; - if it is not static it ends up on the stack which could - cause issues for drivers - */ - static struct ia_css_binary tmp_binary; - const struct ia_css_blob_info *blob_info = NULL; - struct ia_css_isp_param_css_segments isp_mem_if; - /* LA: should be ia_css_data, should not contain host pointer. - However, CSS/DDR pointer is not available yet. - Hack is to store it in params->ddr_ptrs and then copy it late in the SP just before vmem init. - TODO: Call this after CSS/DDR allocation and store that pointer. - Best is to allocate it at stage creation time together with host pointer. - Remove vmem from params. - */ - struct ia_css_isp_param_css_segments *mem_if = &isp_mem_if; - - enum ia_css_err err = IA_CSS_SUCCESS; - - assert(stage != NULL); - - binary = stage->binary; - firmware = stage->firmware; - args = &stage->args; - stage_num = stage->stage_num; - - - if (binary) { - info = binary->info; - binary_name = (const char *)(info->blob->name); - blob_info = &info->blob->header.blob; - ia_css_init_memory_interface(mem_if, &binary->mem_params, &binary->css_params); - } else if (firmware) { - const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL}; - if (args->out_frame[0]) - out_infos[0] = &args->out_frame[0]->info; - info = &firmware->info.isp; - ia_css_binary_fill_info(info, false, false, - ATOMISP_INPUT_FORMAT_RAW_10, - args->in_frame ? &args->in_frame->info : NULL, - NULL, - out_infos, - args->out_vf_frame ? &args->out_vf_frame->info - : NULL, - &tmp_binary, - NULL, - -1, true); - binary = &tmp_binary; - binary->info = info; - binary_name = IA_CSS_EXT_ISP_PROG_NAME(firmware); - blob_info = &firmware->blob; - mem_if = (struct ia_css_isp_param_css_segments *)&firmware->mem_initializers; - } else { - /* SP stage */ - assert(stage->sp_func != IA_CSS_PIPELINE_NO_FUNC); - /* binary and blob_info are now NULL. - These will be passed to sh_css_sp_init_stage - and dereferenced there, so passing a NULL - pointer is no good. return an error */ - return IA_CSS_ERR_INTERNAL_ERROR; - } - - err = sh_css_sp_init_stage(binary, - (const char *)binary_name, - blob_info, - args, - pipe_num, - stage_num, - xnr, - mem_if, - if_config_index, - two_ppc); - return err; -} - -static void -sp_init_sp_stage(struct ia_css_pipeline_stage *stage, - unsigned pipe_num, - bool two_ppc, - enum sh_css_pipe_config_override copy_ovrd, - unsigned int if_config_index) -{ - const struct sh_css_binary_args *args = &stage->args; - - assert(stage != NULL); - switch (stage->sp_func) { - case IA_CSS_PIPELINE_RAW_COPY: - sh_css_sp_start_raw_copy(args->out_frame[0], - pipe_num, two_ppc, - stage->max_input_width, - copy_ovrd, if_config_index); - break; - case IA_CSS_PIPELINE_BIN_COPY: - assert(false); /* TBI */ - case IA_CSS_PIPELINE_ISYS_COPY: - sh_css_sp_start_isys_copy(args->out_frame[0], - pipe_num, stage->max_input_width, if_config_index); - break; - case IA_CSS_PIPELINE_NO_FUNC: - assert(false); - } -} - -void -sh_css_sp_init_pipeline(struct ia_css_pipeline *me, - enum ia_css_pipe_id id, - uint8_t pipe_num, - bool xnr, - bool two_ppc, - bool continuous, - bool offline, - unsigned int required_bds_factor, - enum sh_css_pipe_config_override copy_ovrd, - enum ia_css_input_mode input_mode, - const struct ia_css_metadata_config *md_config, - const struct ia_css_metadata_info *md_info, -#if !defined(HAS_NO_INPUT_SYSTEM) - const enum mipi_port_id port_id -#endif -#ifdef ISP2401 - , - const struct ia_css_coordinate *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame - positioned on shading table at shading correction in ISP. */ - const struct ia_css_isp_parameters *params -#endif - ) -{ - /* Get first stage */ - struct ia_css_pipeline_stage *stage = NULL; - struct ia_css_binary *first_binary = NULL; - struct ia_css_pipe *pipe = NULL; - unsigned num; - - enum ia_css_pipe_id pipe_id = id; - unsigned int thread_id; - uint8_t if_config_index, tmp_if_config_index; - - assert(me != NULL); - -#if !defined(HAS_NO_INPUT_SYSTEM) - assert(me->stages != NULL); - - first_binary = me->stages->binary; - - if (input_mode == IA_CSS_INPUT_MODE_SENSOR || - input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { - assert(port_id < N_MIPI_PORT_ID); - if (port_id >= N_MIPI_PORT_ID) /* should not happen but KW does not know */ - return; /* we should be able to return an error */ - if_config_index = (uint8_t) (port_id - MIPI_PORT0_ID); - } else if (input_mode == IA_CSS_INPUT_MODE_MEMORY) { - if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED; - } else { - if_config_index = 0x0; - } -#else - (void)input_mode; - if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED; -#endif - - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline)); - - /* Count stages */ - for (stage = me->stages, num = 0; stage; stage = stage->next, num++) { - stage->stage_num = num; - ia_css_debug_pipe_graph_dump_stage(stage, id); - } - me->num_stages = num; - - if (first_binary != NULL) { - /* Init pipeline data */ - sh_css_sp_init_group(two_ppc, first_binary->input_format, - offline, if_config_index); - } /* if (first_binary != NULL) */ - -#if defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(USE_INPUT_SYSTEM_VERSION_2) - /* Signal the host immediately after start for SP_ISYS_COPY only */ - if ((me->num_stages == 1) && me->stages && - (me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY)) - sh_css_sp_group.config.no_isp_sync = true; -#endif - - /* Init stage data */ - sh_css_init_host2sp_frame_data(); - - sh_css_sp_group.pipe[thread_id].num_stages = 0; - sh_css_sp_group.pipe[thread_id].pipe_id = pipe_id; - sh_css_sp_group.pipe[thread_id].thread_id = thread_id; - sh_css_sp_group.pipe[thread_id].pipe_num = pipe_num; - sh_css_sp_group.pipe[thread_id].num_execs = me->num_execs; - sh_css_sp_group.pipe[thread_id].pipe_qos_config = me->pipe_qos_config; - sh_css_sp_group.pipe[thread_id].required_bds_factor = required_bds_factor; -#if !defined(HAS_NO_INPUT_SYSTEM) - sh_css_sp_group.pipe[thread_id].input_system_mode - = (uint32_t)input_mode; - sh_css_sp_group.pipe[thread_id].port_id = port_id; -#endif - sh_css_sp_group.pipe[thread_id].dvs_frame_delay = (uint32_t)me->dvs_frame_delay; - - /* TODO: next indicates from which queues parameters need to be - sampled, needs checking/improvement */ - if (ia_css_pipeline_uses_params(me)) { - sh_css_sp_group.pipe[thread_id].pipe_config = - SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id; - } - - /* For continuous use-cases, SP copy is responsible for sampling the - * parameters */ - if (continuous) - sh_css_sp_group.pipe[thread_id].pipe_config = 0; - - sh_css_sp_group.pipe[thread_id].inout_port_config = me->inout_port_config; - - pipe = find_pipe_by_num(pipe_num); - assert(pipe != NULL); - if (pipe == NULL) { - return; - } - sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe); - -#if defined(SH_CSS_ENABLE_METADATA) - if (md_info != NULL && md_info->size > 0) { - sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width; - sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height; - sh_css_sp_group.pipe[thread_id].metadata.stride = md_info->stride; - sh_css_sp_group.pipe[thread_id].metadata.size = md_info->size; - ia_css_isys_convert_stream_format_to_mipi_format( - md_config->data_type, MIPI_PREDICTOR_NONE, - &sh_css_sp_group.pipe[thread_id].metadata.format); - } -#else - (void)md_config; - (void)md_info; -#endif - -#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS) - sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID; - if (IA_CSS_PIPE_ID_COPY != pipe_id) { - ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, (enum sh_css_queue_id *)(&sh_css_sp_group.pipe[thread_id].output_frame_queue_id)); - } -#endif - -#ifdef ISP2401 - /* For the shading correction type 1 (the legacy shading table conversion in css is not used), - * the parameters are passed to the isp for the shading table centering. - */ - if (internal_frame_origin_bqs_on_sctbl != NULL && - params != NULL && params->shading_settings.enable_shading_table_conversion == 0) { - sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl - = (uint32_t)internal_frame_origin_bqs_on_sctbl->x; - sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl - = (uint32_t)internal_frame_origin_bqs_on_sctbl->y; - } else { - sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl = 0; - sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl = 0; - } - -#endif - IA_CSS_LOG("pipe_id %d port_config %08x", - pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config); - - for (stage = me->stages, num = 0; stage; stage = stage->next, num++) { - sh_css_sp_group.pipe[thread_id].num_stages++; - if (is_sp_stage(stage)) { - sp_init_sp_stage(stage, pipe_num, two_ppc, - copy_ovrd, if_config_index); - } else { - if ((stage->stage_num != 0) || SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(me->inout_port_config)) - tmp_if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED; - else - tmp_if_config_index = if_config_index; - sp_init_stage(stage, pipe_num, - xnr, tmp_if_config_index, two_ppc); - } - - store_sp_stage_data(pipe_id, pipe_num, num); - } - sh_css_sp_group.pipe[thread_id].pipe_config |= (uint32_t) - (me->acquire_isp_each_stage << IA_CSS_ACQUIRE_ISP_POS); - store_sp_group_data(); - -} - -void -sh_css_sp_uninit_pipeline(unsigned int pipe_num) -{ - unsigned int thread_id; - ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id); - /*memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));*/ - sh_css_sp_group.pipe[thread_id].num_stages = 0; -} - -bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command) -{ - unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command) - / sizeof(int); - enum host2sp_commands last_cmd = host2sp_cmd_error; - (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ - - /* Previous command must be handled by SP (by design) */ - last_cmd = load_sp_array_uint(host_sp_com, offset); - if (last_cmd != host2sp_cmd_ready) - IA_CSS_ERROR("last host command not handled by SP(%d)", last_cmd); - - store_sp_array_uint(host_sp_com, offset, host2sp_command); - - return (last_cmd == host2sp_cmd_ready); -} - -enum host2sp_commands -sh_css_read_host2sp_command(void) -{ - unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command) - / sizeof(int); - (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ - return (enum host2sp_commands)load_sp_array_uint(host_sp_com, offset); -} - - -/* - * Frame data is no longer part of the sp_stage structure but part of a - * seperate structure. The aim is to make the sp_data struct static - * (it defines a pipeline) and that the dynamic (per frame) data is stored - * separetly. - * - * This function must be called first every where were you start constructing - * a new pipeline by defining one or more stages with use of variable - * sh_css_sp_stage. Even the special cases like accelerator and copy_frame - * These have a pipeline of just 1 stage. - */ -void -sh_css_init_host2sp_frame_data(void) -{ - /* Clean table */ - unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - - (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ - /* - * rvanimme: don't clean it to save static frame info line ref_in - * ref_out, and tnr_frames. Once this static data is in a - * seperate data struct, this may be enable (but still, there is - * no need for it) - */ -} - - -/* - * @brief Update the offline frame information in host_sp_communication. - * Refer to "sh_css_sp.h" for more details. - */ -void -sh_css_update_host2sp_offline_frame( - unsigned frame_num, - struct ia_css_frame *frame, - struct ia_css_metadata *metadata) -{ - unsigned int HIVE_ADDR_host_sp_com; - unsigned int offset; - - assert(frame_num < NUM_CONTINUOUS_FRAMES); - - /* Write new frame data into SP DMEM */ - HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_offline_frames) - / sizeof(int); - offset += frame_num; - store_sp_array_uint(host_sp_com, offset, frame ? frame->data : 0); - - /* Write metadata buffer into SP DMEM */ - offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_offline_metadata) - / sizeof(int); - offset += frame_num; - store_sp_array_uint(host_sp_com, offset, metadata ? metadata->address : 0); -} - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -/* - * @brief Update the mipi frame information in host_sp_communication. - * Refer to "sh_css_sp.h" for more details. - */ -void -sh_css_update_host2sp_mipi_frame( - unsigned frame_num, - struct ia_css_frame *frame) -{ - unsigned int HIVE_ADDR_host_sp_com; - unsigned int offset; - - /* MIPI buffers are dedicated to port, so now there are more of them. */ - assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM)); - - /* Write new frame data into SP DMEM */ - HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_mipi_frames) - / sizeof(int); - offset += frame_num; - - store_sp_array_uint(host_sp_com, offset, - frame ? frame->data : 0); -} - -/* - * @brief Update the mipi metadata information in host_sp_communication. - * Refer to "sh_css_sp.h" for more details. - */ -void -sh_css_update_host2sp_mipi_metadata( - unsigned frame_num, - struct ia_css_metadata *metadata) -{ - unsigned int HIVE_ADDR_host_sp_com; - unsigned int o; - - /* MIPI buffers are dedicated to port, so now there are more of them. */ - assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM)); - - /* Write new frame data into SP DMEM */ - HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - o = offsetof(struct host_sp_communication, host2sp_mipi_metadata) - / sizeof(int); - o += frame_num; - store_sp_array_uint(host_sp_com, o, - metadata ? metadata->address : 0); -} - -void -sh_css_update_host2sp_num_mipi_frames(unsigned num_frames) -{ - unsigned int HIVE_ADDR_host_sp_com; - unsigned int offset; - - /* Write new frame data into SP DMEM */ - HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_num_mipi_frames) - / sizeof(int); - - store_sp_array_uint(host_sp_com, offset, num_frames); -} -#endif - -void -sh_css_update_host2sp_cont_num_raw_frames(unsigned num_frames, bool set_avail) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_host_sp_com; - unsigned int extra_num_frames, avail_num_frames; - unsigned int offset, offset_extra; - - /* Write new frame data into SP DMEM */ - fw = &sh_css_sp_fw; - HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com; - if (set_avail) { - offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_cont_avail_num_raw_frames) - / sizeof(int); - avail_num_frames = load_sp_array_uint(host_sp_com, offset); - extra_num_frames = num_frames - avail_num_frames; - offset_extra = (unsigned int)offsetof(struct host_sp_communication, host2sp_cont_extra_num_raw_frames) - / sizeof(int); - store_sp_array_uint(host_sp_com, offset_extra, extra_num_frames); - } else - offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_cont_target_num_raw_frames) - / sizeof(int); - - store_sp_array_uint(host_sp_com, offset, num_frames); -} - -void -sh_css_event_init_irq_mask(void) -{ - int i; - unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - unsigned int offset; - struct sh_css_event_irq_mask event_irq_mask_init; - - event_irq_mask_init.or_mask = IA_CSS_EVENT_TYPE_ALL; - event_irq_mask_init.and_mask = IA_CSS_EVENT_TYPE_NONE; - (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */ - - assert(sizeof(event_irq_mask_init) % HRT_BUS_BYTES == 0); - for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) { - offset = (unsigned int)offsetof(struct host_sp_communication, - host2sp_event_irq_mask[i]); - assert(offset % HRT_BUS_BYTES == 0); - sp_dmem_store(SP0_ID, - (unsigned int)sp_address_of(host_sp_com) + offset, - &event_irq_mask_init, sizeof(event_irq_mask_init)); - } - -} - -enum ia_css_err -ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe, - unsigned int or_mask, - unsigned int and_mask) -{ - unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - unsigned int offset; - struct sh_css_event_irq_mask event_irq_mask; - unsigned int pipe_num; - - assert(pipe != NULL); - - assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES); - /* Linux kernel does not have UINT16_MAX - * Therefore decided to comment out these 2 asserts for Linux - * Alternatives that were not chosen: - * - add a conditional #define for UINT16_MAX - * - compare with (uint16_t)~0 or 0xffff - * - different assert for Linux and Windows - */ - - (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ - - IA_CSS_LOG("or_mask=%x, and_mask=%x", or_mask, and_mask); - event_irq_mask.or_mask = (uint16_t)or_mask; - event_irq_mask.and_mask = (uint16_t)and_mask; - - pipe_num = ia_css_pipe_get_pipe_num(pipe); - if (pipe_num >= IA_CSS_PIPE_ID_NUM) - return IA_CSS_ERR_INTERNAL_ERROR; - offset = (unsigned int)offsetof(struct host_sp_communication, - host2sp_event_irq_mask[pipe_num]); - assert(offset % HRT_BUS_BYTES == 0); - sp_dmem_store(SP0_ID, - (unsigned int)sp_address_of(host_sp_com) + offset, - &event_irq_mask, sizeof(event_irq_mask)); - - return IA_CSS_SUCCESS; -} - -enum ia_css_err -ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe, - unsigned int *or_mask, - unsigned int *and_mask) -{ - unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com; - unsigned int offset; - struct sh_css_event_irq_mask event_irq_mask; - unsigned int pipe_num; - - (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */ - - IA_CSS_ENTER_LEAVE(""); - - assert(pipe != NULL); - assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES); - - pipe_num = ia_css_pipe_get_pipe_num(pipe); - if (pipe_num >= IA_CSS_PIPE_ID_NUM) - return IA_CSS_ERR_INTERNAL_ERROR; - offset = (unsigned int)offsetof(struct host_sp_communication, - host2sp_event_irq_mask[pipe_num]); - assert(offset % HRT_BUS_BYTES == 0); - sp_dmem_load(SP0_ID, - (unsigned int)sp_address_of(host_sp_com) + offset, - &event_irq_mask, sizeof(event_irq_mask)); - - if (or_mask) - *or_mask = event_irq_mask.or_mask; - - if (and_mask) - *and_mask = event_irq_mask.and_mask; - - return IA_CSS_SUCCESS; -} - -void -sh_css_sp_set_sp_running(bool flag) -{ - sp_running = flag; -} - -bool -sh_css_sp_is_running(void) -{ - return sp_running; -} - -void -sh_css_sp_start_isp(void) -{ - const struct ia_css_fw_info *fw; - unsigned int HIVE_ADDR_sp_sw_state; - - fw = &sh_css_sp_fw; - HIVE_ADDR_sp_sw_state = fw->info.sp.sw_state; - - - if (sp_running) - return; - - (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */ - - /* no longer here, sp started immediately */ - /*ia_css_debug_pipe_graph_dump_epilogue();*/ - - store_sp_group_data(); - store_sp_per_frame_data(fw); - - sp_dmem_store_uint32(SP0_ID, - (unsigned int)sp_address_of(sp_sw_state), - (uint32_t)(IA_CSS_SP_SW_TERMINATED)); - - - /* Note 1: The sp_start_isp function contains a wait till - * the input network is configured by the SP. - * Note 2: Not all SP binaries supports host2sp_commands. - * In case a binary does support it, the host2sp_command - * will have status cmd_ready after return of the function - * sh_css_hrt_sp_start_isp. There is no race-condition here - * because only after the process_frame command has been - * received, the SP starts configuring the input network. - */ - - /* we need to set sp_running before we call ia_css_mmu_invalidate_cache - * as ia_css_mmu_invalidate_cache checks on sp_running to - * avoid that it accesses dmem while the SP is not powered - */ - sp_running = true; - ia_css_mmu_invalidate_cache(); - /* Invalidate all MMU caches */ - mmu_invalidate_cache_all(); - - ia_css_spctrl_start(SP0_ID); - -} - -bool -ia_css_isp_has_started(void) -{ - const struct ia_css_fw_info *fw = &sh_css_sp_fw; - unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started; - (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */ - - return (bool)load_sp_uint(ia_css_ispctrl_sp_isp_started); -} - - -/* - * @brief Initialize the DMA software-mask in the debug mode. - * Refer to "sh_css_sp.h" for more details. - */ -bool -sh_css_sp_init_dma_sw_reg(int dma_id) -{ - int i; - - /* enable all the DMA channels */ - for (i = 0; i < N_DMA_CHANNEL_ID; i++) { - /* enable the writing request */ - sh_css_sp_set_dma_sw_reg(dma_id, - i, - 0, - true); - /* enable the reading request */ - sh_css_sp_set_dma_sw_reg(dma_id, - i, - 1, - true); - } - - return true; -} - -/* - * @brief Set the DMA software-mask in the debug mode. - * Refer to "sh_css_sp.h" for more details. - */ -bool -sh_css_sp_set_dma_sw_reg(int dma_id, - int channel_id, - int request_type, - bool enable) -{ - uint32_t sw_reg; - uint32_t bit_val; - uint32_t bit_offset; - uint32_t bit_mask; - - (void)dma_id; - - assert(channel_id >= 0 && channel_id < N_DMA_CHANNEL_ID); - assert(request_type >= 0); - - /* get the software-mask */ - sw_reg = - sh_css_sp_group.debug.dma_sw_reg; - - /* get the offest of the target bit */ - bit_offset = (8 * request_type) + channel_id; - - /* clear the value of the target bit */ - bit_mask = ~(1 << bit_offset); - sw_reg &= bit_mask; - - /* set the value of the bit for the DMA channel */ - bit_val = enable ? 1 : 0; - bit_val <<= bit_offset; - sw_reg |= bit_val; - - /* update the software status of DMA channels */ - sh_css_sp_group.debug.dma_sw_reg = sw_reg; - - return true; -} - -void -sh_css_sp_reset_global_vars(void) -{ - memset(&sh_css_sp_group, 0, sizeof(struct sh_css_sp_group)); - memset(&sh_css_sp_stage, 0, sizeof(struct sh_css_sp_stage)); - memset(&sh_css_isp_stage, 0, sizeof(struct sh_css_isp_stage)); - memset(&sh_css_sp_output, 0, sizeof(struct sh_css_sp_output)); - memset(&per_frame_data, 0, sizeof(struct sh_css_sp_per_frame_data)); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h deleted file mode 100644 index 3c41e997de79..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_SP_H_ -#define _SH_CSS_SP_H_ - -#include -#include -#if !defined(HAS_NO_INPUT_FORMATTER) -#include "input_formatter.h" -#endif - -#include "ia_css_binary.h" -#include "ia_css_types.h" -#include "ia_css_pipeline.h" - -/* Function to initialize the data and bss section descr of the binary */ -void -sh_css_sp_store_init_dmem(const struct ia_css_fw_info *fw); - -void -store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num, unsigned stage); - -void -sh_css_stage_write_binary_info(struct ia_css_binary_info *info); - -void -store_sp_group_data(void); - -/* Start binary (jpeg) copy on the SP */ -void -sh_css_sp_start_binary_copy(unsigned int pipe_num, struct ia_css_frame *out_frame, - unsigned two_ppc); - -unsigned int -sh_css_sp_get_binary_copy_size(void); - -/* Return the value of a SW interrupt */ -unsigned int -sh_css_sp_get_sw_interrupt_value(unsigned int irq); - -void -sh_css_sp_init_pipeline(struct ia_css_pipeline *me, - enum ia_css_pipe_id id, - uint8_t pipe_num, - bool xnr, - bool two_ppc, - bool continuous, - bool offline, - unsigned int required_bds_factor, - enum sh_css_pipe_config_override copy_ovrd, - enum ia_css_input_mode input_mode, - const struct ia_css_metadata_config *md_config, - const struct ia_css_metadata_info *md_info, -#if !defined(HAS_NO_INPUT_SYSTEM) - const enum mipi_port_id port_id -#endif -#ifdef ISP2401 - , - const struct ia_css_coordinate *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame - positioned on shading table at shading correction in ISP. */ - const struct ia_css_isp_parameters *params -#endif - ); - -void -sh_css_sp_uninit_pipeline(unsigned int pipe_num); - -bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command); - -enum host2sp_commands -sh_css_read_host2sp_command(void); - -void -sh_css_init_host2sp_frame_data(void); - -/** - * @brief Update the offline frame information in host_sp_communication. - * - * @param[in] frame_num The offline frame number. - * @param[in] frame The pointer to the offline frame. - */ -void -sh_css_update_host2sp_offline_frame( - unsigned frame_num, - struct ia_css_frame *frame, - struct ia_css_metadata *metadata); - -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) -/** - * @brief Update the mipi frame information in host_sp_communication. - * - * @param[in] frame_num The mipi frame number. - * @param[in] frame The pointer to the mipi frame. - */ -void -sh_css_update_host2sp_mipi_frame( - unsigned frame_num, - struct ia_css_frame *frame); - -/** - * @brief Update the mipi metadata information in host_sp_communication. - * - * @param[in] frame_num The mipi frame number. - * @param[in] metadata The pointer to the mipi metadata. - */ -void -sh_css_update_host2sp_mipi_metadata( - unsigned frame_num, - struct ia_css_metadata *metadata); - -/** - * @brief Update the nr of mipi frames to use in host_sp_communication. - * - * @param[in] num_frames The number of mipi frames to use. - */ -void -sh_css_update_host2sp_num_mipi_frames(unsigned num_frames); -#endif - -/** - * @brief Update the nr of offline frames to use in host_sp_communication. - * - * @param[in] num_frames The number of raw frames to use. - */ -void -sh_css_update_host2sp_cont_num_raw_frames(unsigned num_frames, bool set_avail); - -void -sh_css_event_init_irq_mask(void); - -void -sh_css_sp_start_isp(void); - -void -sh_css_sp_set_sp_running(bool flag); - -bool -sh_css_sp_is_running(void); - -#if SP_DEBUG != SP_DEBUG_NONE - -void -sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state); - -#endif - -#if !defined(HAS_NO_INPUT_FORMATTER) -void -sh_css_sp_set_if_configs( - const input_formatter_cfg_t *config_a, - const input_formatter_cfg_t *config_b, - const uint8_t if_config_index); -#endif - -void -sh_css_sp_program_input_circuit(int fmt_type, - int ch_id, - enum ia_css_input_mode input_mode); - -void -sh_css_sp_configure_sync_gen(int width, - int height, - int hblank_cycles, - int vblank_cycles); - -void -sh_css_sp_configure_tpg(int x_mask, - int y_mask, - int x_delta, - int y_delta, - int xy_mask); - -void -sh_css_sp_configure_prbs(int seed); - -void -sh_css_sp_configure_enable_raw_pool_locking(bool lock_all); - -void -sh_css_sp_enable_isys_event_queue(bool enable); - -void -sh_css_sp_set_disable_continuous_viewfinder(bool flag); - -void -sh_css_sp_reset_global_vars(void); - -/** - * @brief Initialize the DMA software-mask in the debug mode. - * This API should be ONLY called in the debugging mode. - * And it should be always called before the first call of - * "sh_css_set_dma_sw_reg(...)". - * - * @param[in] dma_id The ID of the target DMA. - * - * @return - * - true, if it is successful. - * - false, otherwise. - */ -bool -sh_css_sp_init_dma_sw_reg(int dma_id); - -/** - * @brief Set the DMA software-mask in the debug mode. - * This API should be ONLYL called in the debugging mode. Must - * call "sh_css_set_dma_sw_reg(...)" before this - * API is called for the first time. - * - * @param[in] dma_id The ID of the target DMA. - * @param[in] channel_id The ID of the target DMA channel. - * @param[in] request_type The type of the DMA request. - * For example: - * - "0" indicates the writing request. - * - "1" indicates the reading request. - * - * @param[in] enable If it is "true", the target DMA - * channel is enabled in the software. - * Otherwise, the target DMA channel - * is disabled in the software. - * - * @return - * - true, if it is successful. - * - false, otherwise. - */ -bool -sh_css_sp_set_dma_sw_reg(int dma_id, - int channel_id, - int request_type, - bool enable); - - -extern struct sh_css_sp_group sh_css_sp_group; -extern struct sh_css_sp_stage sh_css_sp_stage; -extern struct sh_css_isp_stage sh_css_isp_stage; - -#endif /* _SH_CSS_SP_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c deleted file mode 100644 index 60bddbb3d4c6..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -/* This file will contain the code to implement the functions declared in ia_css_stream.h - and associated helper functions */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c deleted file mode 100644 index 77f135e7dc3c..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "sh_css_stream_format.h" -#include - -unsigned int sh_css_stream_format_2_bits_per_subpixel( - enum atomisp_input_format format) -{ - unsigned int rval; - - switch (format) { - case ATOMISP_INPUT_FORMAT_RGB_444: - rval = 4; - break; - case ATOMISP_INPUT_FORMAT_RGB_555: - rval = 5; - break; - case ATOMISP_INPUT_FORMAT_RGB_565: - case ATOMISP_INPUT_FORMAT_RGB_666: - case ATOMISP_INPUT_FORMAT_RAW_6: - rval = 6; - break; - case ATOMISP_INPUT_FORMAT_RAW_7: - rval = 7; - break; - case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: - case ATOMISP_INPUT_FORMAT_YUV420_8: - case ATOMISP_INPUT_FORMAT_YUV422_8: - case ATOMISP_INPUT_FORMAT_RGB_888: - case ATOMISP_INPUT_FORMAT_RAW_8: - case ATOMISP_INPUT_FORMAT_BINARY_8: - case ATOMISP_INPUT_FORMAT_USER_DEF1: - case ATOMISP_INPUT_FORMAT_USER_DEF2: - case ATOMISP_INPUT_FORMAT_USER_DEF3: - case ATOMISP_INPUT_FORMAT_USER_DEF4: - case ATOMISP_INPUT_FORMAT_USER_DEF5: - case ATOMISP_INPUT_FORMAT_USER_DEF6: - case ATOMISP_INPUT_FORMAT_USER_DEF7: - case ATOMISP_INPUT_FORMAT_USER_DEF8: - rval = 8; - break; - case ATOMISP_INPUT_FORMAT_YUV420_10: - case ATOMISP_INPUT_FORMAT_YUV422_10: - case ATOMISP_INPUT_FORMAT_RAW_10: - rval = 10; - break; - case ATOMISP_INPUT_FORMAT_RAW_12: - rval = 12; - break; - case ATOMISP_INPUT_FORMAT_RAW_14: - rval = 14; - break; - case ATOMISP_INPUT_FORMAT_RAW_16: - case ATOMISP_INPUT_FORMAT_YUV420_16: - case ATOMISP_INPUT_FORMAT_YUV422_16: - rval = 16; - break; - default: - rval = 0; - break; - } - - return rval; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h deleted file mode 100644 index b699f538e0dd..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SH_CSS_STREAM_FORMAT_H -#define __SH_CSS_STREAM_FORMAT_H - -#include - -unsigned int sh_css_stream_format_2_bits_per_subpixel( - enum atomisp_input_format format); - -#endif /* __SH_CSS_STREAM_FORMAT_H */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h deleted file mode 100644 index 0b8e3d872069..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SH_CSS_STRUCT_H -#define __SH_CSS_STRUCT_H - -/* This header files contains the definition of the - sh_css struct and friends; locigally the file would - probably be called sh_css.h after the pattern - .h but sh_css.h is the predecesssor of ia_css.h - so this could cause confusion; hence the _struct - in the filename -*/ - -#include -#include -#include "ia_css_pipeline.h" -#include "ia_css_pipe_public.h" -#include "ia_css_frame_public.h" -#include "ia_css_queue.h" -#include "ia_css_irq.h" - -struct sh_css { - struct ia_css_pipe *active_pipes[IA_CSS_PIPELINE_NUM_MAX]; - /* All of the pipes created at any point of time. At this moment there can - * be no more than MAX_SP_THREADS of them because pipe_num is reused as SP - * thread_id to which a pipe's pipeline is associated. At a later point, if - * we support more pipe objects, we should add test code to test that - * possibility. Also, active_pipes[] should be able to hold only - * SH_CSS_MAX_SP_THREADS objects. Anything else is misleading. */ - struct ia_css_pipe *all_pipes[IA_CSS_PIPELINE_NUM_MAX]; - void * (*malloc)(size_t bytes, bool zero_mem); - void (*free)(void *ptr); -#ifdef ISP2401 - void * (*malloc_ex)(size_t bytes, bool zero_mem, const char *caller_func, int caller_line); - void (*free_ex)(void *ptr, const char *caller_func, int caller_line); -#endif - void (*flush)(struct ia_css_acc_fw *fw); - bool check_system_idle; -#ifndef ISP2401 - bool stop_copy_preview; -#endif - unsigned int num_cont_raw_frames; -#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) - unsigned int num_mipi_frames[N_CSI_PORTS]; - struct ia_css_frame *mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM]; - struct ia_css_metadata *mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM]; - unsigned int mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT]; - unsigned int mipi_frame_size[N_CSI_PORTS]; -#endif - hrt_vaddress sp_bin_addr; - hrt_data page_table_base_index; - unsigned int size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/ - enum ia_css_irq_type irq_type; - unsigned int pipe_counter; - - unsigned int type; /* 2400 or 2401 for now */ -}; - -#define IPU_2400 1 -#define IPU_2401 2 - -#define IS_2400() (my_css.type == IPU_2400) -#define IS_2401() (my_css.type == IPU_2401) - -extern struct sh_css my_css; - -#endif /* __SH_CSS_STRUCT_H */ - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h deleted file mode 100644 index 5ded3a1437bf..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _SH_CSS_UDS_H_ -#define _SH_CSS_UDS_H_ - -#include - -#define SIZE_OF_SH_CSS_UDS_INFO_IN_BITS (4 * 16) -#define SIZE_OF_SH_CSS_CROP_POS_IN_BITS (2 * 16) - -/* Uds types, used in pipeline_global.h and sh_css_internal.h */ - -struct sh_css_uds_info { - uint16_t curr_dx; - uint16_t curr_dy; - uint16_t xc; - uint16_t yc; -}; - -struct sh_css_crop_pos { - uint16_t x; - uint16_t y; -}; - -#endif /* _SH_CSS_UDS_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c deleted file mode 100644 index 6e0c5e7f8620..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include "ia_css_version.h" -#include "ia_css_version_data.h" -#include "ia_css_err.h" -#include "sh_css_firmware.h" - -enum ia_css_err -ia_css_get_version(char *version, int max_size) -{ - if (max_size <= (int)strlen(CSS_VERSION_STRING) + (int)strlen(sh_css_get_fw_version()) + 5) - return IA_CSS_ERR_INVALID_ARGUMENTS; - strcpy(version, CSS_VERSION_STRING); - strcat(version, "FW:"); - strcat(version, sh_css_get_fw_version()); - strcat(version, "; "); - return IA_CSS_SUCCESS; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c deleted file mode 100644 index 15bc10b5e9b1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c +++ /dev/null @@ -1,727 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains entry functions for memory management of ISP driver - */ -#include -#include -#include -#include /* for kmap */ -#include /* for page_to_phys */ -#include - -#include "hmm/hmm.h" -#include "hmm/hmm_pool.h" -#include "hmm/hmm_bo.h" - -#include "atomisp_internal.h" -#include "asm/cacheflush.h" -#include "mmu/isp_mmu.h" -#include "mmu/sh_mmu_mrfld.h" - -struct hmm_bo_device bo_device; -struct hmm_pool dynamic_pool; -struct hmm_pool reserved_pool; -static ia_css_ptr dummy_ptr; -static bool hmm_initialized; -struct _hmm_mem_stat hmm_mem_stat; - -/* - * p: private - * s: shared - * u: user - * i: ion - */ -static const char hmm_bo_type_string[] = "psui"; - -static ssize_t bo_show(struct device *dev, struct device_attribute *attr, - char *buf, struct list_head *bo_list, bool active) -{ - ssize_t ret = 0; - struct hmm_buffer_object *bo; - unsigned long flags; - int i; - long total[HMM_BO_LAST] = { 0 }; - long count[HMM_BO_LAST] = { 0 }; - int index1 = 0; - int index2 = 0; - - ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n"); - if (ret <= 0) - return 0; - - index1 += ret; - - spin_lock_irqsave(&bo_device.list_lock, flags); - list_for_each_entry(bo, bo_list, list) { - if ((active && (bo->status & HMM_BO_ALLOCED)) || - (!active && !(bo->status & HMM_BO_ALLOCED))) { - ret = scnprintf(buf + index1, PAGE_SIZE - index1, - "%c %d\n", - hmm_bo_type_string[bo->type], bo->pgnr); - - total[bo->type] += bo->pgnr; - count[bo->type]++; - if (ret > 0) - index1 += ret; - } - } - spin_unlock_irqrestore(&bo_device.list_lock, flags); - - for (i = 0; i < HMM_BO_LAST; i++) { - if (count[i]) { - ret = scnprintf(buf + index1 + index2, - PAGE_SIZE - index1 - index2, - "%ld %c buffer objects: %ld KB\n", - count[i], hmm_bo_type_string[i], - total[i] * 4); - if (ret > 0) - index2 += ret; - } - } - - /* Add trailing zero, not included by scnprintf */ - return index1 + index2 + 1; -} - -static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true); -} - -static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false); -} - -static ssize_t reserved_pool_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t ret = 0; - - struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info; - unsigned long flags; - - if (!pinfo || !pinfo->initialized) - return 0; - - spin_lock_irqsave(&pinfo->list_lock, flags); - ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n", - pinfo->index, pinfo->pgnr); - spin_unlock_irqrestore(&pinfo->list_lock, flags); - - if (ret > 0) - ret++; /* Add trailing zero, not included by scnprintf */ - - return ret; -}; - -static ssize_t dynamic_pool_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t ret = 0; - - struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info; - unsigned long flags; - - if (!pinfo || !pinfo->initialized) - return 0; - - spin_lock_irqsave(&pinfo->list_lock, flags); - ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n", - pinfo->pgnr, pinfo->pool_size); - spin_unlock_irqrestore(&pinfo->list_lock, flags); - - if (ret > 0) - ret++; /* Add trailing zero, not included by scnprintf */ - - return ret; -}; - -static DEVICE_ATTR_RO(active_bo); -static DEVICE_ATTR_RO(free_bo); -static DEVICE_ATTR_RO(reserved_pool); -static DEVICE_ATTR_RO(dynamic_pool); - -static struct attribute *sysfs_attrs_ctrl[] = { - &dev_attr_active_bo.attr, - &dev_attr_free_bo.attr, - &dev_attr_reserved_pool.attr, - &dev_attr_dynamic_pool.attr, - NULL -}; - -static struct attribute_group atomisp_attribute_group[] = { - {.attrs = sysfs_attrs_ctrl }, -}; - -int hmm_init(void) -{ - int ret; - - ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld, - ISP_VM_START, ISP_VM_SIZE); - if (ret) - dev_err(atomisp_dev, "hmm_bo_device_init failed.\n"); - - hmm_initialized = true; - - /* - * As hmm use NULL to indicate invalid ISP virtual address, - * and ISP_VM_START is defined to 0 too, so we allocate - * one piece of dummy memory, which should return value 0, - * at the beginning, to avoid hmm_alloc return 0 in the - * further allocation. - */ - dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED); - - if (!ret) { - ret = sysfs_create_group(&atomisp_dev->kobj, - atomisp_attribute_group); - if (ret) - dev_err(atomisp_dev, - "%s Failed to create sysfs\n", __func__); - } - - return ret; -} - -void hmm_cleanup(void) -{ - sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group); - - /* free dummy memory first */ - hmm_free(dummy_ptr); - dummy_ptr = 0; - - hmm_bo_device_exit(&bo_device); - hmm_initialized = false; -} - -ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, - int from_highmem, const void __user *userptr, bool cached) -{ - unsigned int pgnr; - struct hmm_buffer_object *bo; - int ret; - - /* - * Check if we are initialized. In the ideal world we wouldn't need - * this but we can tackle it once the driver is a lot cleaner - */ - - if (!hmm_initialized) - hmm_init(); - /* Get page number from size */ - pgnr = size_to_pgnr_ceil(bytes); - - /* Buffer object structure init */ - bo = hmm_bo_alloc(&bo_device, pgnr); - if (!bo) { - dev_err(atomisp_dev, "hmm_bo_create failed.\n"); - goto create_bo_err; - } - - /* Allocate pages for memory */ - ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); - if (ret) { - dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n"); - goto alloc_page_err; - } - - /* Combind the virtual address and pages togather */ - ret = hmm_bo_bind(bo); - if (ret) { - dev_err(atomisp_dev, "hmm_bo_bind failed.\n"); - goto bind_err; - } - - hmm_mem_stat.tol_cnt += pgnr; - - return bo->start; - -bind_err: - hmm_bo_free_pages(bo); -alloc_page_err: - hmm_bo_unref(bo); -create_bo_err: - return 0; -} - -void hmm_free(ia_css_ptr virt) -{ - struct hmm_buffer_object *bo; - - WARN_ON(!virt); - - bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); - - if (!bo) { - dev_err(atomisp_dev, - "can not find buffer object start with address 0x%x\n", - (unsigned int)virt); - return; - } - - hmm_mem_stat.tol_cnt -= bo->pgnr; - - hmm_bo_unbind(bo); - hmm_bo_free_pages(bo); - hmm_bo_unref(bo); -} - -static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) -{ - if (!bo) { - dev_err(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - ptr); - return -EINVAL; - } - - if (!hmm_bo_page_allocated(bo)) { - dev_err(atomisp_dev, - "buffer object has no page allocated.\n"); - return -EINVAL; - } - - if (!hmm_bo_allocated(bo)) { - dev_err(atomisp_dev, - "buffer object has no virtual address space allocated.\n"); - return -EINVAL; - } - - return 0; -} - -/* Read function in ISP memory management */ -static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, - unsigned int bytes) -{ - struct hmm_buffer_object *bo; - unsigned int idx, offset, len; - char *src, *des; - int ret; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - ret = hmm_check_bo(bo, virt); - if (ret) - return ret; - - des = (char *)data; - while (bytes) { - idx = (virt - bo->start) >> PAGE_SHIFT; - offset = (virt - bo->start) - (idx << PAGE_SHIFT); - - src = (char *)kmap(bo->page_obj[idx].page) + offset; - - if ((bytes + offset) >= PAGE_SIZE) { - len = PAGE_SIZE - offset; - bytes -= len; - } else { - len = bytes; - bytes = 0; - } - - virt += len; /* update virt for next loop */ - - if (des) { - memcpy(des, src, len); - des += len; - } - - clflush_cache_range(src, len); - - kunmap(bo->page_obj[idx].page); - } - - return 0; -} - -/* Read function in ISP memory management */ -static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes) -{ - struct hmm_buffer_object *bo; - int ret; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - ret = hmm_check_bo(bo, virt); - if (ret) - return ret; - - if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { - void *src = bo->vmap_addr; - - src += (virt - bo->start); - memcpy(data, src, bytes); - if (bo->status & HMM_BO_VMAPED_CACHED) - clflush_cache_range(src, bytes); - } else { - void *vptr; - - vptr = hmm_bo_vmap(bo, true); - if (!vptr) - return load_and_flush_by_kmap(virt, data, bytes); - else - vptr = vptr + (virt - bo->start); - - memcpy(data, vptr, bytes); - clflush_cache_range(vptr, bytes); - hmm_bo_vunmap(bo); - } - - return 0; -} - -/* Read function in ISP memory management */ -int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes) -{ - if (!data) { - dev_err(atomisp_dev, - "hmm_load NULL argument\n"); - return -EINVAL; - } - return load_and_flush(virt, data, bytes); -} - -/* Flush hmm data from the data cache */ -int hmm_flush(ia_css_ptr virt, unsigned int bytes) -{ - return load_and_flush(virt, NULL, bytes); -} - -/* Write function in ISP memory management */ -int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) -{ - struct hmm_buffer_object *bo; - unsigned int idx, offset, len; - char *src, *des; - int ret; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - ret = hmm_check_bo(bo, virt); - if (ret) - return ret; - - if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { - void *dst = bo->vmap_addr; - - dst += (virt - bo->start); - memcpy(dst, data, bytes); - if (bo->status & HMM_BO_VMAPED_CACHED) - clflush_cache_range(dst, bytes); - } else { - void *vptr; - - vptr = hmm_bo_vmap(bo, true); - if (vptr) { - vptr = vptr + (virt - bo->start); - - memcpy(vptr, data, bytes); - clflush_cache_range(vptr, bytes); - hmm_bo_vunmap(bo); - return 0; - } - } - - src = (char *)data; - while (bytes) { - idx = (virt - bo->start) >> PAGE_SHIFT; - offset = (virt - bo->start) - (idx << PAGE_SHIFT); - - if (in_atomic()) - des = (char *)kmap_atomic(bo->page_obj[idx].page); - else - des = (char *)kmap(bo->page_obj[idx].page); - - if (!des) { - dev_err(atomisp_dev, - "kmap buffer object page failed: pg_idx = %d\n", - idx); - return -EINVAL; - } - - des += offset; - - if ((bytes + offset) >= PAGE_SIZE) { - len = PAGE_SIZE - offset; - bytes -= len; - } else { - len = bytes; - bytes = 0; - } - - virt += len; - - memcpy(des, src, len); - - src += len; - - clflush_cache_range(des, len); - - if (in_atomic()) - /* - * Note: kunmap_atomic requires return addr from - * kmap_atomic, not the page. See linux/highmem.h - */ - kunmap_atomic(des - offset); - else - kunmap(bo->page_obj[idx].page); - } - - return 0; -} - -/* memset function in ISP memory management */ -int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) -{ - struct hmm_buffer_object *bo; - unsigned int idx, offset, len; - char *des; - int ret; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - ret = hmm_check_bo(bo, virt); - if (ret) - return ret; - - if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { - void *dst = bo->vmap_addr; - - dst += (virt - bo->start); - memset(dst, c, bytes); - - if (bo->status & HMM_BO_VMAPED_CACHED) - clflush_cache_range(dst, bytes); - } else { - void *vptr; - - vptr = hmm_bo_vmap(bo, true); - if (vptr) { - vptr = vptr + (virt - bo->start); - memset(vptr, c, bytes); - clflush_cache_range(vptr, bytes); - hmm_bo_vunmap(bo); - return 0; - } - } - - while (bytes) { - idx = (virt - bo->start) >> PAGE_SHIFT; - offset = (virt - bo->start) - (idx << PAGE_SHIFT); - - des = (char *)kmap(bo->page_obj[idx].page) + offset; - - if ((bytes + offset) >= PAGE_SIZE) { - len = PAGE_SIZE - offset; - bytes -= len; - } else { - len = bytes; - bytes = 0; - } - - virt += len; - - memset(des, c, len); - - clflush_cache_range(des, len); - - kunmap(bo->page_obj[idx].page); - } - - return 0; -} - -/* Virtual address to physical address convert */ -phys_addr_t hmm_virt_to_phys(ia_css_ptr virt) -{ - unsigned int idx, offset; - struct hmm_buffer_object *bo; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - if (!bo) { - dev_err(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); - return -1; - } - - idx = (virt - bo->start) >> PAGE_SHIFT; - offset = (virt - bo->start) - (idx << PAGE_SHIFT); - - return page_to_phys(bo->page_obj[idx].page) + offset; -} - -int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt) -{ - struct hmm_buffer_object *bo; - - bo = hmm_bo_device_search_start(&bo_device, virt); - if (!bo) { - dev_err(atomisp_dev, - "can not find buffer object start with address 0x%x\n", - virt); - return -EINVAL; - } - - return hmm_bo_mmap(vma, bo); -} - -/* Map ISP virtual address into IA virtual address */ -void *hmm_vmap(ia_css_ptr virt, bool cached) -{ - struct hmm_buffer_object *bo; - void *ptr; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - if (!bo) { - dev_err(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); - return NULL; - } - - ptr = hmm_bo_vmap(bo, cached); - if (ptr) - return ptr + (virt - bo->start); - else - return NULL; -} - -/* Flush the memory which is mapped as cached memory through hmm_vmap */ -void hmm_flush_vmap(ia_css_ptr virt) -{ - struct hmm_buffer_object *bo; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - if (!bo) { - dev_warn(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); - return; - } - - hmm_bo_flush_vmap(bo); -} - -void hmm_vunmap(ia_css_ptr virt) -{ - struct hmm_buffer_object *bo; - - bo = hmm_bo_device_search_in_range(&bo_device, virt); - if (!bo) { - dev_warn(atomisp_dev, - "can not find buffer object contains address 0x%x\n", - virt); - return; - } - - hmm_bo_vunmap(bo); -} - -int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type) -{ - switch (pool_type) { - case HMM_POOL_TYPE_RESERVED: - reserved_pool.pops = &reserved_pops; - return reserved_pool.pops->pool_init(&reserved_pool.pool_info, - pool_size); - case HMM_POOL_TYPE_DYNAMIC: - dynamic_pool.pops = &dynamic_pops; - return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info, - pool_size); - default: - dev_err(atomisp_dev, "invalid pool type.\n"); - return -EINVAL; - } -} - -void hmm_pool_unregister(enum hmm_pool_type pool_type) -{ - switch (pool_type) { - case HMM_POOL_TYPE_RESERVED: - if (reserved_pool.pops && reserved_pool.pops->pool_exit) - reserved_pool.pops->pool_exit(&reserved_pool.pool_info); - break; - case HMM_POOL_TYPE_DYNAMIC: - if (dynamic_pool.pops && dynamic_pool.pops->pool_exit) - dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info); - break; - default: - dev_err(atomisp_dev, "invalid pool type.\n"); - break; - } - - return; -} - -void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached) -{ - return hmm_vmap(ptr, cached); - /* vmunmap will be done in hmm_bo_release() */ -} - -ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr) -{ - struct hmm_buffer_object *bo; - - bo = hmm_bo_device_search_vmap_start(&bo_device, ptr); - if (bo) - return bo->start; - - dev_err(atomisp_dev, - "can not find buffer object whose kernel virtual address is %p\n", - ptr); - return 0; -} - -void hmm_show_mem_stat(const char *func, const int line) -{ - trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", - hmm_mem_stat.tol_cnt, - hmm_mem_stat.usr_size, hmm_mem_stat.res_size, - hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, - hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); -} - -void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr) -{ - hmm_mem_stat.res_size = res_pgnr; - /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */ - if (0 == hmm_mem_stat.res_size) { - hmm_mem_stat.res_size = -1; - hmm_mem_stat.res_cnt = -1; - } - - /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */ - if (!dyc_en) { - hmm_mem_stat.dyc_size = -1; - hmm_mem_stat.dyc_thr = -1; - } else { - hmm_mem_stat.dyc_size = 0; - hmm_mem_stat.dyc_thr = dyc_pgnr; - } - hmm_mem_stat.usr_size = 0; - hmm_mem_stat.sys_size = 0; - hmm_mem_stat.tol_cnt = 0; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c deleted file mode 100644 index a6620d2c9f50..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c +++ /dev/null @@ -1,1528 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains functions for buffer object structure management - */ -#include -#include -#include /* for GFP_ATOMIC */ -#include -#include -#include -#include -#include /* for kmalloc */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "atomisp_internal.h" -#include "hmm/hmm_common.h" -#include "hmm/hmm_pool.h" -#include "hmm/hmm_bo.h" - -static unsigned int order_to_nr(unsigned int order) -{ - return 1U << order; -} - -static unsigned int nr_to_order_bottom(unsigned int nr) -{ - return fls(nr) - 1; -} - -static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache) -{ - struct hmm_buffer_object *bo; - - bo = kmem_cache_alloc(bo_cache, GFP_KERNEL); - if (!bo) - dev_err(atomisp_dev, "%s: failed!\n", __func__); - - return bo; -} - -static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, - unsigned int pgnr) -{ - check_bodev_null_return(bdev, -EINVAL); - var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL, - "hmm_bo_device not inited yet.\n"); - /* prevent zero size buffer object */ - if (pgnr == 0) { - dev_err(atomisp_dev, "0 size buffer is not allowed.\n"); - return -EINVAL; - } - - memset(bo, 0, sizeof(*bo)); - mutex_init(&bo->mutex); - - /* init the bo->list HEAD as an element of entire_bo_list */ - INIT_LIST_HEAD(&bo->list); - - bo->bdev = bdev; - bo->vmap_addr = NULL; - bo->status = HMM_BO_FREE; - bo->start = bdev->start; - bo->pgnr = pgnr; - bo->end = bo->start + pgnr_to_size(pgnr); - bo->prev = NULL; - bo->next = NULL; - - return 0; -} - -static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree( - struct rb_node *node, unsigned int pgnr) -{ - struct hmm_buffer_object *this, *ret_bo, *temp_bo; - - this = rb_entry(node, struct hmm_buffer_object, node); - if (this->pgnr == pgnr || - (this->pgnr > pgnr && this->node.rb_left == NULL)) { - goto remove_bo_and_return; - } else { - if (this->pgnr < pgnr) { - if (!this->node.rb_right) - return NULL; - ret_bo = __bo_search_and_remove_from_free_rbtree( - this->node.rb_right, pgnr); - } else { - ret_bo = __bo_search_and_remove_from_free_rbtree( - this->node.rb_left, pgnr); - } - if (!ret_bo) { - if (this->pgnr > pgnr) - goto remove_bo_and_return; - else - return NULL; - } - return ret_bo; - } - -remove_bo_and_return: - /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL. - * 1. check if 'this->next' is NULL: - * yes: erase 'this' node and rebalance rbtree, return 'this'. - */ - if (this->next == NULL) { - rb_erase(&this->node, &this->bdev->free_rbtree); - return this; - } - /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo. - * 2. check if 'this->next->next' is NULL: - * yes: change the related 'next/prev' pointer, - * return 'this->next' but the rbtree stays unchanged. - */ - temp_bo = this->next; - this->next = temp_bo->next; - if (temp_bo->next) - temp_bo->next->prev = this; - temp_bo->next = NULL; - temp_bo->prev = NULL; - return temp_bo; -} - -static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root, - ia_css_ptr start) -{ - struct rb_node *n = root->rb_node; - struct hmm_buffer_object *bo; - - do { - bo = rb_entry(n, struct hmm_buffer_object, node); - - if (bo->start > start) { - if (n->rb_left == NULL) - return NULL; - n = n->rb_left; - } else if (bo->start < start) { - if (n->rb_right == NULL) - return NULL; - n = n->rb_right; - } else { - return bo; - } - } while (n); - - return NULL; -} - -static struct hmm_buffer_object *__bo_search_by_addr_in_range( - struct rb_root *root, unsigned int start) -{ - struct rb_node *n = root->rb_node; - struct hmm_buffer_object *bo; - - do { - bo = rb_entry(n, struct hmm_buffer_object, node); - - if (bo->start > start) { - if (n->rb_left == NULL) - return NULL; - n = n->rb_left; - } else { - if (bo->end > start) - return bo; - if (n->rb_right == NULL) - return NULL; - n = n->rb_right; - } - } while (n); - - return NULL; -} - -static void __bo_insert_to_free_rbtree(struct rb_root *root, - struct hmm_buffer_object *bo) -{ - struct rb_node **new = &(root->rb_node); - struct rb_node *parent = NULL; - struct hmm_buffer_object *this; - unsigned int pgnr = bo->pgnr; - - while (*new) { - parent = *new; - this = container_of(*new, struct hmm_buffer_object, node); - - if (pgnr < this->pgnr) { - new = &((*new)->rb_left); - } else if (pgnr > this->pgnr) { - new = &((*new)->rb_right); - } else { - bo->prev = this; - bo->next = this->next; - if (this->next) - this->next->prev = bo; - this->next = bo; - bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE; - return; - } - } - - bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE; - - rb_link_node(&bo->node, parent, new); - rb_insert_color(&bo->node, root); -} - -static void __bo_insert_to_alloc_rbtree(struct rb_root *root, - struct hmm_buffer_object *bo) -{ - struct rb_node **new = &(root->rb_node); - struct rb_node *parent = NULL; - struct hmm_buffer_object *this; - unsigned int start = bo->start; - - while (*new) { - parent = *new; - this = container_of(*new, struct hmm_buffer_object, node); - - if (start < this->start) - new = &((*new)->rb_left); - else - new = &((*new)->rb_right); - } - - kref_init(&bo->kref); - bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED; - - rb_link_node(&bo->node, parent, new); - rb_insert_color(&bo->node, root); -} - -static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev, - struct hmm_buffer_object *bo, - unsigned int pgnr) -{ - struct hmm_buffer_object *new_bo; - unsigned long flags; - int ret; - - new_bo = __bo_alloc(bdev->bo_cache); - if (!new_bo) { - dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__); - return NULL; - } - ret = __bo_init(bdev, new_bo, pgnr); - if (ret) { - dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__); - kmem_cache_free(bdev->bo_cache, new_bo); - return NULL; - } - - new_bo->start = bo->start; - new_bo->end = new_bo->start + pgnr_to_size(pgnr); - bo->start = new_bo->end; - bo->pgnr = bo->pgnr - pgnr; - - spin_lock_irqsave(&bdev->list_lock, flags); - list_add_tail(&new_bo->list, &bo->list); - spin_unlock_irqrestore(&bdev->list_lock, flags); - - return new_bo; -} - -static void __bo_take_off_handling(struct hmm_buffer_object *bo) -{ - struct hmm_bo_device *bdev = bo->bdev; - /* There are 4 situations when we take off a known bo from free rbtree: - * 1. if bo->next && bo->prev == NULL, bo is a rbtree node - * and does not have a linked list after bo, to take off this bo, - * we just need erase bo directly and rebalance the free rbtree - */ - if (bo->prev == NULL && bo->next == NULL) { - rb_erase(&bo->node, &bdev->free_rbtree); - /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node, - * and has a linked list,to take off this bo we need erase bo - * first, then, insert bo->next into free rbtree and rebalance - * the free rbtree - */ - } else if (bo->prev == NULL && bo->next != NULL) { - bo->next->prev = NULL; - rb_erase(&bo->node, &bdev->free_rbtree); - __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next); - bo->next = NULL; - /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree - * node, bo is the last element of the linked list after rbtree - * node, to take off this bo, we just need set the "prev/next" - * pointers to NULL, the free rbtree stays unchaged - */ - } else if (bo->prev != NULL && bo->next == NULL) { - bo->prev->next = NULL; - bo->prev = NULL; - /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree - * node, bo is in the middle of the linked list after rbtree node, - * to take off this bo, we just set take the "prev/next" pointers - * to NULL, the free rbtree stays unchaged - */ - } else if (bo->prev != NULL && bo->next != NULL) { - bo->next->prev = bo->prev; - bo->prev->next = bo->next; - bo->next = NULL; - bo->prev = NULL; - } -} - -static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo, - struct hmm_buffer_object *next_bo) -{ - struct hmm_bo_device *bdev; - unsigned long flags; - - bdev = bo->bdev; - next_bo->start = bo->start; - next_bo->pgnr = next_bo->pgnr + bo->pgnr; - - spin_lock_irqsave(&bdev->list_lock, flags); - list_del(&bo->list); - spin_unlock_irqrestore(&bdev->list_lock, flags); - - kmem_cache_free(bo->bdev->bo_cache, bo); - - return next_bo; -} - -/* - * hmm_bo_device functions. - */ -int hmm_bo_device_init(struct hmm_bo_device *bdev, - struct isp_mmu_client *mmu_driver, - unsigned int vaddr_start, - unsigned int size) -{ - struct hmm_buffer_object *bo; - unsigned long flags; - int ret; - - check_bodev_null_return(bdev, -EINVAL); - - ret = isp_mmu_init(&bdev->mmu, mmu_driver); - if (ret) { - dev_err(atomisp_dev, "isp_mmu_init failed.\n"); - return ret; - } - - bdev->start = vaddr_start; - bdev->pgnr = size_to_pgnr_ceil(size); - bdev->size = pgnr_to_size(bdev->pgnr); - - spin_lock_init(&bdev->list_lock); - mutex_init(&bdev->rbtree_mutex); - - bdev->flag = HMM_BO_DEVICE_INITED; - - INIT_LIST_HEAD(&bdev->entire_bo_list); - bdev->allocated_rbtree = RB_ROOT; - bdev->free_rbtree = RB_ROOT; - - bdev->bo_cache = kmem_cache_create("bo_cache", - sizeof(struct hmm_buffer_object), 0, 0, NULL); - if (!bdev->bo_cache) { - dev_err(atomisp_dev, "%s: create cache failed!\n", __func__); - isp_mmu_exit(&bdev->mmu); - return -ENOMEM; - } - - bo = __bo_alloc(bdev->bo_cache); - if (!bo) { - dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__); - isp_mmu_exit(&bdev->mmu); - return -ENOMEM; - } - - ret = __bo_init(bdev, bo, bdev->pgnr); - if (ret) { - dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__); - kmem_cache_free(bdev->bo_cache, bo); - isp_mmu_exit(&bdev->mmu); - return -EINVAL; - } - - spin_lock_irqsave(&bdev->list_lock, flags); - list_add_tail(&bo->list, &bdev->entire_bo_list); - spin_unlock_irqrestore(&bdev->list_lock, flags); - - __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); - - return 0; -} - -struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev, - unsigned int pgnr) -{ - struct hmm_buffer_object *bo, *new_bo; - struct rb_root *root = &bdev->free_rbtree; - - check_bodev_null_return(bdev, NULL); - var_equal_return(hmm_bo_device_inited(bdev), 0, NULL, - "hmm_bo_device not inited yet.\n"); - - if (pgnr == 0) { - dev_err(atomisp_dev, "0 size buffer is not allowed.\n"); - return NULL; - } - - mutex_lock(&bdev->rbtree_mutex); - bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr); - if (!bo) { - mutex_unlock(&bdev->rbtree_mutex); - dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed", - __func__); - return NULL; - } - - if (bo->pgnr > pgnr) { - new_bo = __bo_break_up(bdev, bo, pgnr); - if (!new_bo) { - mutex_unlock(&bdev->rbtree_mutex); - dev_err(atomisp_dev, "%s: __bo_break_up failed!\n", - __func__); - return NULL; - } - - __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo); - __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); - - mutex_unlock(&bdev->rbtree_mutex); - return new_bo; - } - - __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo); - - mutex_unlock(&bdev->rbtree_mutex); - return bo; -} - -void hmm_bo_release(struct hmm_buffer_object *bo) -{ - struct hmm_bo_device *bdev = bo->bdev; - struct hmm_buffer_object *next_bo, *prev_bo; - - mutex_lock(&bdev->rbtree_mutex); - - /* - * FIX ME: - * - * how to destroy the bo when it is stilled MMAPED? - * - * ideally, this will not happened as hmm_bo_release - * will only be called when kref reaches 0, and in mmap - * operation the hmm_bo_ref will eventually be called. - * so, if this happened, something goes wrong. - */ - if (bo->status & HMM_BO_MMAPED) { - mutex_unlock(&bdev->rbtree_mutex); - dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n"); - return; - } - - if (bo->status & HMM_BO_BINDED) { - dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n"); - hmm_bo_unbind(bo); - } - - if (bo->status & HMM_BO_PAGE_ALLOCED) { - dev_warn(atomisp_dev, "the pages is not freed, free pages first\n"); - hmm_bo_free_pages(bo); - } - if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { - dev_warn(atomisp_dev, "the vunmap is not done, do it...\n"); - hmm_bo_vunmap(bo); - } - - rb_erase(&bo->node, &bdev->allocated_rbtree); - - prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list); - next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list); - - if (bo->list.prev != &bdev->entire_bo_list && - prev_bo->end == bo->start && - (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) { - __bo_take_off_handling(prev_bo); - bo = __bo_merge(prev_bo, bo); - } - - if (bo->list.next != &bdev->entire_bo_list && - next_bo->start == bo->end && - (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) { - __bo_take_off_handling(next_bo); - bo = __bo_merge(bo, next_bo); - } - - __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); - - mutex_unlock(&bdev->rbtree_mutex); - return; -} - -void hmm_bo_device_exit(struct hmm_bo_device *bdev) -{ - struct hmm_buffer_object *bo; - unsigned long flags; - - dev_dbg(atomisp_dev, "%s: entering!\n", __func__); - - check_bodev_null_return_void(bdev); - - /* - * release all allocated bos even they a in use - * and all bos will be merged into a big bo - */ - while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree)) - hmm_bo_release( - rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node)); - - dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n", - __func__); - - /* free all bos to release all ISP virtual memory */ - while (!list_empty(&bdev->entire_bo_list)) { - bo = list_to_hmm_bo(bdev->entire_bo_list.next); - - spin_lock_irqsave(&bdev->list_lock, flags); - list_del(&bo->list); - spin_unlock_irqrestore(&bdev->list_lock, flags); - - kmem_cache_free(bdev->bo_cache, bo); - } - - dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__); - - kmem_cache_destroy(bdev->bo_cache); - - isp_mmu_exit(&bdev->mmu); -} - -int hmm_bo_device_inited(struct hmm_bo_device *bdev) -{ - check_bodev_null_return(bdev, -EINVAL); - - return bdev->flag == HMM_BO_DEVICE_INITED; -} - -int hmm_bo_allocated(struct hmm_buffer_object *bo) -{ - check_bo_null_return(bo, 0); - - return bo->status & HMM_BO_ALLOCED; -} - -struct hmm_buffer_object *hmm_bo_device_search_start( - struct hmm_bo_device *bdev, ia_css_ptr vaddr) -{ - struct hmm_buffer_object *bo; - - check_bodev_null_return(bdev, NULL); - - mutex_lock(&bdev->rbtree_mutex); - bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr); - if (!bo) { - mutex_unlock(&bdev->rbtree_mutex); - dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n", - __func__, vaddr); - return NULL; - } - mutex_unlock(&bdev->rbtree_mutex); - - return bo; -} - -struct hmm_buffer_object *hmm_bo_device_search_in_range( - struct hmm_bo_device *bdev, unsigned int vaddr) -{ - struct hmm_buffer_object *bo; - - check_bodev_null_return(bdev, NULL); - - mutex_lock(&bdev->rbtree_mutex); - bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr); - if (!bo) { - mutex_unlock(&bdev->rbtree_mutex); - dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n", - __func__, vaddr); - return NULL; - } - mutex_unlock(&bdev->rbtree_mutex); - - return bo; -} - -struct hmm_buffer_object *hmm_bo_device_search_vmap_start( - struct hmm_bo_device *bdev, const void *vaddr) -{ - struct list_head *pos; - struct hmm_buffer_object *bo; - unsigned long flags; - - check_bodev_null_return(bdev, NULL); - - spin_lock_irqsave(&bdev->list_lock, flags); - list_for_each(pos, &bdev->entire_bo_list) { - bo = list_to_hmm_bo(pos); - /* pass bo which has no vm_node allocated */ - if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE) - continue; - if (bo->vmap_addr == vaddr) - goto found; - } - spin_unlock_irqrestore(&bdev->list_lock, flags); - return NULL; -found: - spin_unlock_irqrestore(&bdev->list_lock, flags); - return bo; - -} - - -static void free_private_bo_pages(struct hmm_buffer_object *bo, - struct hmm_pool *dypool, - struct hmm_pool *repool, - int free_pgnr) -{ - int i, ret; - - for (i = 0; i < free_pgnr; i++) { - switch (bo->page_obj[i].type) { - case HMM_PAGE_TYPE_RESERVED: - if (repool->pops - && repool->pops->pool_free_pages) { - repool->pops->pool_free_pages(repool->pool_info, - &bo->page_obj[i]); - hmm_mem_stat.res_cnt--; - } - break; - /* - * HMM_PAGE_TYPE_GENERAL indicates that pages are from system - * memory, so when free them, they should be put into dynamic - * pool. - */ - case HMM_PAGE_TYPE_DYNAMIC: - case HMM_PAGE_TYPE_GENERAL: - if (dypool->pops - && dypool->pops->pool_inited - && dypool->pops->pool_inited(dypool->pool_info)) { - if (dypool->pops->pool_free_pages) - dypool->pops->pool_free_pages( - dypool->pool_info, - &bo->page_obj[i]); - break; - } - - /* - * if dynamic memory pool doesn't exist, need to free - * pages to system directly. - */ - default: - ret = set_pages_wb(bo->page_obj[i].page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret = %d\n", - ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why page - address be valid,it maybe memory corruption by lowmemory - */ - if (!ret) { - __free_pages(bo->page_obj[i].page, 0); - hmm_mem_stat.sys_size--; - } - break; - } - } - - return; -} - -/*Allocate pages which will be used only by ISP*/ -static int alloc_private_pages(struct hmm_buffer_object *bo, - int from_highmem, - bool cached, - struct hmm_pool *dypool, - struct hmm_pool *repool) -{ - int ret; - unsigned int pgnr, order, blk_pgnr, alloc_pgnr; - struct page *pages; - gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ - int i, j; - int failure_number = 0; - bool reduce_order = false; - bool lack_mem = true; - - if (from_highmem) - gfp |= __GFP_HIGHMEM; - - pgnr = bo->pgnr; - - bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object), - GFP_KERNEL); - if (unlikely(!bo->page_obj)) - return -ENOMEM; - - i = 0; - alloc_pgnr = 0; - - /* - * get physical pages from dynamic pages pool. - */ - if (dypool->pops && dypool->pops->pool_alloc_pages) { - alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info, - bo->page_obj, pgnr, - cached); - hmm_mem_stat.dyc_size -= alloc_pgnr; - - if (alloc_pgnr == pgnr) - return 0; - } - - pgnr -= alloc_pgnr; - i += alloc_pgnr; - - /* - * get physical pages from reserved pages pool for atomisp. - */ - if (repool->pops && repool->pops->pool_alloc_pages) { - alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info, - &bo->page_obj[i], pgnr, - cached); - hmm_mem_stat.res_cnt += alloc_pgnr; - if (alloc_pgnr == pgnr) - return 0; - } - - pgnr -= alloc_pgnr; - i += alloc_pgnr; - - while (pgnr) { - order = nr_to_order_bottom(pgnr); - /* - * if be short of memory, we will set order to 0 - * everytime. - */ - if (lack_mem) - order = HMM_MIN_ORDER; - else if (order > HMM_MAX_ORDER) - order = HMM_MAX_ORDER; -retry: - /* - * When order > HMM_MIN_ORDER, for performance reasons we don't - * want alloc_pages() to sleep. In case it fails and fallbacks - * to HMM_MIN_ORDER or in case the requested order is originally - * the minimum value, we can allow alloc_pages() to sleep for - * robustness purpose. - * - * REVISIT: why __GFP_FS is necessary? - */ - if (order == HMM_MIN_ORDER) { - gfp &= ~GFP_NOWAIT; - gfp |= __GFP_RECLAIM | __GFP_FS; - } - - pages = alloc_pages(gfp, order); - if (unlikely(!pages)) { - /* - * in low memory case, if allocation page fails, - * we turn to try if order=0 allocation could - * succeed. if order=0 fails too, that means there is - * no memory left. - */ - if (order == HMM_MIN_ORDER) { - dev_err(atomisp_dev, - "%s: cannot allocate pages\n", - __func__); - goto cleanup; - } - order = HMM_MIN_ORDER; - failure_number++; - reduce_order = true; - /* - * if fail two times continuously, we think be short - * of memory now. - */ - if (failure_number == 2) { - lack_mem = true; - failure_number = 0; - } - goto retry; - } else { - blk_pgnr = order_to_nr(order); - - if (!cached) { - /* - * set memory to uncacheable -- UC_MINUS - */ - ret = set_pages_uc(pages, blk_pgnr); - if (ret) { - dev_err(atomisp_dev, - "set page uncacheable" - "failed.\n"); - - __free_pages(pages, order); - - goto cleanup; - } - } - - for (j = 0; j < blk_pgnr; j++) { - bo->page_obj[i].page = pages + j; - bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL; - } - - pgnr -= blk_pgnr; - hmm_mem_stat.sys_size += blk_pgnr; - - /* - * if order is not reduced this time, clear - * failure_number. - */ - if (reduce_order) - reduce_order = false; - else - failure_number = 0; - } - } - - return 0; -cleanup: - alloc_pgnr = i; - free_private_bo_pages(bo, dypool, repool, alloc_pgnr); - - kfree(bo->page_obj); - - return -ENOMEM; -} - -static void free_private_pages(struct hmm_buffer_object *bo, - struct hmm_pool *dypool, - struct hmm_pool *repool) -{ - free_private_bo_pages(bo, dypool, repool, bo->pgnr); - - kfree(bo->page_obj); -} - -/* - * Hacked from kernel function __get_user_pages in mm/memory.c - * - * Handle buffers allocated by other kernel space driver and mmaped into user - * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure - * - * Get physical pages from user space virtual address and update into page list - */ -static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) -{ - int i, ret; - unsigned long vm_flags; - - if (nr_pages <= 0) - return 0; - - VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); - - /* - * Require read or write permissions. - * If FOLL_FORCE is set, we only require the "MAY" flags. - */ - vm_flags = (gup_flags & FOLL_WRITE) ? - (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); - vm_flags &= (gup_flags & FOLL_FORCE) ? - (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); - i = 0; - - do { - struct vm_area_struct *vma; - - vma = find_vma(mm, start); - if (!vma) { - dev_err(atomisp_dev, "find_vma failed\n"); - return i ? : -EFAULT; - } - - if (is_vm_hugetlb_page(vma)) { - /* - i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &nr_pages, i, gup_flags); - */ - continue; - } - - do { - struct page *page; - unsigned long pfn; - - /* - * If we have a pending SIGKILL, don't keep faulting - * pages and potentially allocating memory. - */ - if (unlikely(fatal_signal_pending(current))) { - dev_err(atomisp_dev, - "fatal_signal_pending in %s\n", - __func__); - return i ? i : -ERESTARTSYS; - } - - ret = follow_pfn(vma, start, &pfn); - if (ret) { - dev_err(atomisp_dev, "follow_pfn() failed\n"); - return i ? : -EFAULT; - } - - page = pfn_to_page(pfn); - if (IS_ERR(page)) - return i ? i : PTR_ERR(page); - if (pages) { - pages[i] = page; - get_page(page); - flush_anon_page(vma, page, start); - flush_dcache_page(page); - } - if (vmas) - vmas[i] = vma; - i++; - start += PAGE_SIZE; - nr_pages--; - } while (nr_pages && start < vma->vm_end); - } while (nr_pages); - - return i; -} - -static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, int nr_pages, int write, int force, - struct page **pages, struct vm_area_struct **vmas) -{ - int flags = FOLL_TOUCH; - - if (pages) - flags |= FOLL_GET; - if (write) - flags |= FOLL_WRITE; - if (force) - flags |= FOLL_FORCE; - - return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas); -} - -/* - * Convert user space virtual address into pages list - */ -static int alloc_user_pages(struct hmm_buffer_object *bo, - const void __user *userptr, bool cached) -{ - int page_nr; - int i; - struct vm_area_struct *vma; - struct page **pages; - - pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); - if (unlikely(!pages)) - return -ENOMEM; - - bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object), - GFP_KERNEL); - if (unlikely(!bo->page_obj)) { - kfree(pages); - return -ENOMEM; - } - - mutex_unlock(&bo->mutex); - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, (unsigned long)userptr); - up_read(¤t->mm->mmap_sem); - if (vma == NULL) { - dev_err(atomisp_dev, "find_vma failed\n"); - kfree(bo->page_obj); - kfree(pages); - mutex_lock(&bo->mutex); - return -EFAULT; - } - mutex_lock(&bo->mutex); - /* - * Handle frame buffer allocated in other kerenl space driver - * and map to user space - */ - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { - page_nr = get_pfnmap_pages(current, current->mm, - (unsigned long)userptr, - (int)(bo->pgnr), 1, 0, - pages, NULL); - bo->mem_type = HMM_BO_MEM_TYPE_PFN; - } else { - /*Handle frame buffer allocated in user space*/ - mutex_unlock(&bo->mutex); - page_nr = get_user_pages_fast((unsigned long)userptr, - (int)(bo->pgnr), 1, pages); - mutex_lock(&bo->mutex); - bo->mem_type = HMM_BO_MEM_TYPE_USER; - } - - /* can be written by caller, not forced */ - if (page_nr != bo->pgnr) { - dev_err(atomisp_dev, - "get_user_pages err: bo->pgnr = %d, " - "pgnr actually pinned = %d.\n", - bo->pgnr, page_nr); - goto out_of_mem; - } - - for (i = 0; i < bo->pgnr; i++) { - bo->page_obj[i].page = pages[i]; - bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL; - } - hmm_mem_stat.usr_size += bo->pgnr; - kfree(pages); - - return 0; - -out_of_mem: - for (i = 0; i < page_nr; i++) - put_page(pages[i]); - kfree(pages); - kfree(bo->page_obj); - - return -ENOMEM; -} - -static void free_user_pages(struct hmm_buffer_object *bo) -{ - int i; - - for (i = 0; i < bo->pgnr; i++) - put_page(bo->page_obj[i].page); - hmm_mem_stat.usr_size -= bo->pgnr; - - kfree(bo->page_obj); -} - -/* - * allocate/free physical pages for the bo. - * - * type indicate where are the pages from. currently we have 3 types - * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE. - * - * from_highmem is only valid when type is HMM_BO_PRIVATE, it will - * try to alloc memory from highmem if from_highmem is set. - * - * userptr is only valid when type is HMM_BO_USER, it indicates - * the start address from user space task. - * - * from_highmem and userptr will both be ignored when type is - * HMM_BO_SHARE. - */ -int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, - enum hmm_bo_type type, int from_highmem, - const void __user *userptr, bool cached) -{ - int ret = -EINVAL; - - check_bo_null_return(bo, -EINVAL); - - mutex_lock(&bo->mutex); - check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); - - /* - * TO DO: - * add HMM_BO_USER type - */ - if (type == HMM_BO_PRIVATE) { - ret = alloc_private_pages(bo, from_highmem, - cached, &dynamic_pool, &reserved_pool); - } else if (type == HMM_BO_USER) { - ret = alloc_user_pages(bo, userptr, cached); - } else { - dev_err(atomisp_dev, "invalid buffer type.\n"); - ret = -EINVAL; - } - if (ret) - goto alloc_err; - - bo->type = type; - - bo->status |= HMM_BO_PAGE_ALLOCED; - - mutex_unlock(&bo->mutex); - - return 0; - -alloc_err: - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, "alloc pages err...\n"); - return ret; -status_err: - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, - "buffer object has already page allocated.\n"); - return -EINVAL; -} - -/* - * free physical pages of the bo. - */ -void hmm_bo_free_pages(struct hmm_buffer_object *bo) -{ - check_bo_null_return_void(bo); - - mutex_lock(&bo->mutex); - - check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2); - - /* clear the flag anyway. */ - bo->status &= (~HMM_BO_PAGE_ALLOCED); - - if (bo->type == HMM_BO_PRIVATE) - free_private_pages(bo, &dynamic_pool, &reserved_pool); - else if (bo->type == HMM_BO_USER) - free_user_pages(bo); - else - dev_err(atomisp_dev, "invalid buffer type.\n"); - mutex_unlock(&bo->mutex); - - return; - -status_err2: - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, - "buffer object not page allocated yet.\n"); -} - -int hmm_bo_page_allocated(struct hmm_buffer_object *bo) -{ - check_bo_null_return(bo, 0); - - return bo->status & HMM_BO_PAGE_ALLOCED; -} - -/* - * get physical page info of the bo. - */ -int hmm_bo_get_page_info(struct hmm_buffer_object *bo, - struct hmm_page_object **page_obj, int *pgnr) -{ - check_bo_null_return(bo, -EINVAL); - - mutex_lock(&bo->mutex); - - check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); - - *page_obj = bo->page_obj; - *pgnr = bo->pgnr; - - mutex_unlock(&bo->mutex); - - return 0; - -status_err: - dev_err(atomisp_dev, - "buffer object not page allocated yet.\n"); - mutex_unlock(&bo->mutex); - return -EINVAL; -} - -/* - * bind the physical pages to a virtual address space. - */ -int hmm_bo_bind(struct hmm_buffer_object *bo) -{ - int ret; - unsigned int virt; - struct hmm_bo_device *bdev; - unsigned int i; - - check_bo_null_return(bo, -EINVAL); - - mutex_lock(&bo->mutex); - - check_bo_status_yes_goto(bo, - HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED, - status_err1); - - check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2); - - bdev = bo->bdev; - - virt = bo->start; - - for (i = 0; i < bo->pgnr; i++) { - ret = - isp_mmu_map(&bdev->mmu, virt, - page_to_phys(bo->page_obj[i].page), 1); - if (ret) - goto map_err; - virt += (1 << PAGE_SHIFT); - } - - /* - * flush TBL here. - * - * theoretically, we donot need to flush TLB as we didnot change - * any existed address mappings, but for Silicon Hive's MMU, its - * really a bug here. I guess when fetching PTEs (page table entity) - * to TLB, its MMU will fetch additional INVALID PTEs automatically - * for performance issue. EX, we only set up 1 page address mapping, - * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time, - * so the additional 3 PTEs are invalid. - */ - if (bo->start != 0x0) - isp_mmu_flush_tlb_range(&bdev->mmu, bo->start, - (bo->pgnr << PAGE_SHIFT)); - - bo->status |= HMM_BO_BINDED; - - mutex_unlock(&bo->mutex); - - return 0; - -map_err: - /* unbind the physical pages with related virtual address space */ - virt = bo->start; - for ( ; i > 0; i--) { - isp_mmu_unmap(&bdev->mmu, virt, 1); - virt += pgnr_to_size(1); - } - - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, - "setup MMU address mapping failed.\n"); - return ret; - -status_err2: - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, "buffer object already binded.\n"); - return -EINVAL; -status_err1: - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, - "buffer object vm_node or page not allocated.\n"); - return -EINVAL; -} - -/* - * unbind the physical pages with related virtual address space. - */ -void hmm_bo_unbind(struct hmm_buffer_object *bo) -{ - unsigned int virt; - struct hmm_bo_device *bdev; - unsigned int i; - - check_bo_null_return_void(bo); - - mutex_lock(&bo->mutex); - - check_bo_status_yes_goto(bo, - HMM_BO_PAGE_ALLOCED | - HMM_BO_ALLOCED | - HMM_BO_BINDED, status_err); - - bdev = bo->bdev; - - virt = bo->start; - - for (i = 0; i < bo->pgnr; i++) { - isp_mmu_unmap(&bdev->mmu, virt, 1); - virt += pgnr_to_size(1); - } - - /* - * flush TLB as the address mapping has been removed and - * related TLBs should be invalidated. - */ - isp_mmu_flush_tlb_range(&bdev->mmu, bo->start, - (bo->pgnr << PAGE_SHIFT)); - - bo->status &= (~HMM_BO_BINDED); - - mutex_unlock(&bo->mutex); - - return; - -status_err: - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, - "buffer vm or page not allocated or not binded yet.\n"); -} - -int hmm_bo_binded(struct hmm_buffer_object *bo) -{ - int ret; - - check_bo_null_return(bo, 0); - - mutex_lock(&bo->mutex); - - ret = bo->status & HMM_BO_BINDED; - - mutex_unlock(&bo->mutex); - - return ret; -} - -void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) -{ - struct page **pages; - int i; - - check_bo_null_return(bo, NULL); - - mutex_lock(&bo->mutex); - if (((bo->status & HMM_BO_VMAPED) && !cached) || - ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) { - mutex_unlock(&bo->mutex); - return bo->vmap_addr; - } - - /* cached status need to be changed, so vunmap first */ - if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { - vunmap(bo->vmap_addr); - bo->vmap_addr = NULL; - bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); - } - - pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL); - if (unlikely(!pages)) { - mutex_unlock(&bo->mutex); - return NULL; - } - - for (i = 0; i < bo->pgnr; i++) - pages[i] = bo->page_obj[i].page; - - bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP, - cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE); - if (unlikely(!bo->vmap_addr)) { - kfree(pages); - mutex_unlock(&bo->mutex); - dev_err(atomisp_dev, "vmap failed...\n"); - return NULL; - } - bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED); - - kfree(pages); - - mutex_unlock(&bo->mutex); - return bo->vmap_addr; -} - -void hmm_bo_flush_vmap(struct hmm_buffer_object *bo) -{ - check_bo_null_return_void(bo); - - mutex_lock(&bo->mutex); - if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) { - mutex_unlock(&bo->mutex); - return; - } - - clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE); - mutex_unlock(&bo->mutex); -} - -void hmm_bo_vunmap(struct hmm_buffer_object *bo) -{ - check_bo_null_return_void(bo); - - mutex_lock(&bo->mutex); - if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { - vunmap(bo->vmap_addr); - bo->vmap_addr = NULL; - bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); - } - - mutex_unlock(&bo->mutex); - return; -} - -void hmm_bo_ref(struct hmm_buffer_object *bo) -{ - check_bo_null_return_void(bo); - - kref_get(&bo->kref); -} - -static void kref_hmm_bo_release(struct kref *kref) -{ - if (!kref) - return; - - hmm_bo_release(kref_to_hmm_bo(kref)); -} - -void hmm_bo_unref(struct hmm_buffer_object *bo) -{ - check_bo_null_return_void(bo); - - kref_put(&bo->kref, kref_hmm_bo_release); -} - -static void hmm_bo_vm_open(struct vm_area_struct *vma) -{ - struct hmm_buffer_object *bo = - (struct hmm_buffer_object *)vma->vm_private_data; - - check_bo_null_return_void(bo); - - hmm_bo_ref(bo); - - mutex_lock(&bo->mutex); - - bo->status |= HMM_BO_MMAPED; - - bo->mmap_count++; - - mutex_unlock(&bo->mutex); -} - -static void hmm_bo_vm_close(struct vm_area_struct *vma) -{ - struct hmm_buffer_object *bo = - (struct hmm_buffer_object *)vma->vm_private_data; - - check_bo_null_return_void(bo); - - hmm_bo_unref(bo); - - mutex_lock(&bo->mutex); - - bo->mmap_count--; - - if (!bo->mmap_count) { - bo->status &= (~HMM_BO_MMAPED); - vma->vm_private_data = NULL; - } - - mutex_unlock(&bo->mutex); -} - -static const struct vm_operations_struct hmm_bo_vm_ops = { - .open = hmm_bo_vm_open, - .close = hmm_bo_vm_close, -}; - -/* - * mmap the bo to user space. - */ -int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo) -{ - unsigned int start, end; - unsigned int virt; - unsigned int pgnr, i; - unsigned int pfn; - - check_bo_null_return(bo, -EINVAL); - - check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); - - pgnr = bo->pgnr; - start = vma->vm_start; - end = vma->vm_end; - - /* - * check vma's virtual address space size and buffer object's size. - * must be the same. - */ - if ((start + pgnr_to_size(pgnr)) != end) { - dev_warn(atomisp_dev, - "vma's address space size not equal" - " to buffer object's size"); - return -EINVAL; - } - - virt = vma->vm_start; - for (i = 0; i < pgnr; i++) { - pfn = page_to_pfn(bo->page_obj[i].page); - if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) { - dev_warn(atomisp_dev, - "remap_pfn_range failed:" - " virt = 0x%x, pfn = 0x%x," - " mapped_pgnr = %d\n", virt, pfn, 1); - return -EINVAL; - } - virt += PAGE_SIZE; - } - - vma->vm_private_data = bo; - - vma->vm_ops = &hmm_bo_vm_ops; - vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP; - - /* - * call hmm_bo_vm_open explictly. - */ - hmm_bo_vm_open(vma); - - return 0; - -status_err: - dev_err(atomisp_dev, "buffer page not allocated yet.\n"); - return -EINVAL; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c deleted file mode 100644 index f59fd9908257..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains functions for dynamic memory pool management - */ -#include -#include -#include - -#include - -#include "atomisp_internal.h" - -#include "hmm/hmm_pool.h" - -/* - * dynamic memory pool ops. - */ -static unsigned int get_pages_from_dynamic_pool(void *pool, - struct hmm_page_object *page_obj, - unsigned int size, bool cached) -{ - struct hmm_page *hmm_page; - unsigned long flags; - unsigned int i = 0; - struct hmm_dynamic_pool_info *dypool_info = pool; - - if (!dypool_info) - return 0; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - if (dypool_info->initialized) { - while (!list_empty(&dypool_info->pages_list)) { - hmm_page = list_entry(dypool_info->pages_list.next, - struct hmm_page, list); - - list_del(&hmm_page->list); - dypool_info->pgnr--; - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - page_obj[i].page = hmm_page->page; - page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC; - kmem_cache_free(dypool_info->pgptr_cache, hmm_page); - - if (i == size) - return i; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - } - } - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - return i; -} - -static void free_pages_to_dynamic_pool(void *pool, - struct hmm_page_object *page_obj) -{ - struct hmm_page *hmm_page; - unsigned long flags; - int ret; - struct hmm_dynamic_pool_info *dypool_info = pool; - - if (!dypool_info) - return; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - if (!dypool_info->initialized) { - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - return; - } - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - if (page_obj->type == HMM_PAGE_TYPE_RESERVED) - return; - - if (dypool_info->pgnr >= dypool_info->pool_size) { - /* free page directly back to system */ - ret = set_pages_wb(page_obj->page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret=%d\n", ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why page - address be valid, it maybe memory corruption by lowmemory - */ - if (!ret) { - __free_pages(page_obj->page, 0); - hmm_mem_stat.sys_size--; - } - return; - } - hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache, - GFP_KERNEL); - if (!hmm_page) { - /* free page directly */ - ret = set_pages_wb(page_obj->page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret=%d\n", ret); - if (!ret) { - __free_pages(page_obj->page, 0); - hmm_mem_stat.sys_size--; - } - return; - } - - hmm_page->page = page_obj->page; - - /* - * add to pages_list of pages_pool - */ - spin_lock_irqsave(&dypool_info->list_lock, flags); - list_add_tail(&hmm_page->list, &dypool_info->pages_list); - dypool_info->pgnr++; - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - hmm_mem_stat.dyc_size++; -} - -static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size) -{ - struct hmm_dynamic_pool_info *dypool_info; - - if (pool_size == 0) - return 0; - - dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info), - GFP_KERNEL); - if (unlikely(!dypool_info)) - return -ENOMEM; - - dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache", - sizeof(struct hmm_page), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!dypool_info->pgptr_cache) { - kfree(dypool_info); - return -ENOMEM; - } - - INIT_LIST_HEAD(&dypool_info->pages_list); - spin_lock_init(&dypool_info->list_lock); - dypool_info->initialized = true; - dypool_info->pool_size = pool_size; - dypool_info->pgnr = 0; - - *pool = dypool_info; - - return 0; -} - -static void hmm_dynamic_pool_exit(void **pool) -{ - struct hmm_dynamic_pool_info *dypool_info = *pool; - struct hmm_page *hmm_page; - unsigned long flags; - int ret; - - if (!dypool_info) - return; - - spin_lock_irqsave(&dypool_info->list_lock, flags); - if (!dypool_info->initialized) { - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - return; - } - dypool_info->initialized = false; - - while (!list_empty(&dypool_info->pages_list)) { - hmm_page = list_entry(dypool_info->pages_list.next, - struct hmm_page, list); - - list_del(&hmm_page->list); - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - /* can cause thread sleep, so cannot be put into spin_lock */ - ret = set_pages_wb(hmm_page->page, 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err...ret=%d\n", ret); - if (!ret) { - __free_pages(hmm_page->page, 0); - hmm_mem_stat.dyc_size--; - hmm_mem_stat.sys_size--; - } - kmem_cache_free(dypool_info->pgptr_cache, hmm_page); - spin_lock_irqsave(&dypool_info->list_lock, flags); - } - - spin_unlock_irqrestore(&dypool_info->list_lock, flags); - - kmem_cache_destroy(dypool_info->pgptr_cache); - - kfree(dypool_info); - - *pool = NULL; -} - -static int hmm_dynamic_pool_inited(void *pool) -{ - struct hmm_dynamic_pool_info *dypool_info = pool; - - if (!dypool_info) - return 0; - - return dypool_info->initialized; -} - -struct hmm_pool_ops dynamic_pops = { - .pool_init = hmm_dynamic_pool_init, - .pool_exit = hmm_dynamic_pool_exit, - .pool_alloc_pages = get_pages_from_dynamic_pool, - .pool_free_pages = free_pages_to_dynamic_pool, - .pool_inited = hmm_dynamic_pool_inited, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c deleted file mode 100644 index f300e7547997..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains functions for reserved memory pool management - */ -#include -#include -#include - -#include - -#include "atomisp_internal.h" -#include "hmm/hmm_pool.h" - -/* - * reserved memory pool ops. - */ -static unsigned int get_pages_from_reserved_pool(void *pool, - struct hmm_page_object *page_obj, - unsigned int size, bool cached) -{ - unsigned long flags; - unsigned int i = 0; - unsigned int repool_pgnr; - int j; - struct hmm_reserved_pool_info *repool_info = pool; - - if (!repool_info) - return 0; - - spin_lock_irqsave(&repool_info->list_lock, flags); - if (repool_info->initialized) { - repool_pgnr = repool_info->index; - - for (j = repool_pgnr-1; j >= 0; j--) { - page_obj[i].page = repool_info->pages[j]; - page_obj[i].type = HMM_PAGE_TYPE_RESERVED; - i++; - repool_info->index--; - if (i == size) - break; - } - } - spin_unlock_irqrestore(&repool_info->list_lock, flags); - return i; -} - -static void free_pages_to_reserved_pool(void *pool, - struct hmm_page_object *page_obj) -{ - unsigned long flags; - struct hmm_reserved_pool_info *repool_info = pool; - - if (!repool_info) - return; - - spin_lock_irqsave(&repool_info->list_lock, flags); - - if (repool_info->initialized && - repool_info->index < repool_info->pgnr && - page_obj->type == HMM_PAGE_TYPE_RESERVED) { - repool_info->pages[repool_info->index++] = page_obj->page; - } - - spin_unlock_irqrestore(&repool_info->list_lock, flags); -} - -static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info, - unsigned int pool_size) -{ - struct hmm_reserved_pool_info *pool_info; - - pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info), - GFP_KERNEL); - if (unlikely(!pool_info)) - return -ENOMEM; - - pool_info->pages = kmalloc(sizeof(struct page *) * pool_size, - GFP_KERNEL); - if (unlikely(!pool_info->pages)) { - kfree(pool_info); - return -ENOMEM; - } - - pool_info->index = 0; - pool_info->pgnr = 0; - spin_lock_init(&pool_info->list_lock); - pool_info->initialized = true; - - *repool_info = pool_info; - - return 0; -} - -static int hmm_reserved_pool_init(void **pool, unsigned int pool_size) -{ - int ret; - unsigned int blk_pgnr; - unsigned int pgnr = pool_size; - unsigned int order = 0; - unsigned int i = 0; - int fail_number = 0; - struct page *pages; - int j; - struct hmm_reserved_pool_info *repool_info; - if (pool_size == 0) - return 0; - - ret = hmm_reserved_pool_setup(&repool_info, pool_size); - if (ret) { - dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n"); - return ret; - } - - pgnr = pool_size; - - i = 0; - order = MAX_ORDER; - - while (pgnr) { - blk_pgnr = 1U << order; - while (blk_pgnr > pgnr) { - order--; - blk_pgnr >>= 1U; - } - BUG_ON(order > MAX_ORDER); - - pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order); - if (unlikely(!pages)) { - if (order == 0) { - fail_number++; - dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n", - __func__, fail_number); - /* if fail five times, will goto end */ - - /* FIXME: whether is the mechanism is ok? */ - if (fail_number == ALLOC_PAGE_FAIL_NUM) - goto end; - } else { - order--; - } - } else { - blk_pgnr = 1U << order; - - ret = set_pages_uc(pages, blk_pgnr); - if (ret) { - dev_err(atomisp_dev, - "set pages uncached failed\n"); - __free_pages(pages, order); - goto end; - } - - for (j = 0; j < blk_pgnr; j++) - repool_info->pages[i++] = pages + j; - - repool_info->index += blk_pgnr; - repool_info->pgnr += blk_pgnr; - - pgnr -= blk_pgnr; - - fail_number = 0; - } - } - -end: - repool_info->initialized = true; - - *pool = repool_info; - - dev_info(atomisp_dev, - "hmm_reserved_pool init successfully," - "hmm_reserved_pool is with %d pages.\n", - repool_info->pgnr); - return 0; -} - -static void hmm_reserved_pool_exit(void **pool) -{ - unsigned long flags; - int i, ret; - unsigned int pgnr; - struct hmm_reserved_pool_info *repool_info = *pool; - - if (!repool_info) - return; - - spin_lock_irqsave(&repool_info->list_lock, flags); - if (!repool_info->initialized) { - spin_unlock_irqrestore(&repool_info->list_lock, flags); - return; - } - pgnr = repool_info->pgnr; - repool_info->index = 0; - repool_info->pgnr = 0; - repool_info->initialized = false; - spin_unlock_irqrestore(&repool_info->list_lock, flags); - - for (i = 0; i < pgnr; i++) { - ret = set_pages_wb(repool_info->pages[i], 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err...ret=%d\n", ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why - page address be valid, it maybe memory corruption by lowmemory - */ - if (!ret) - __free_pages(repool_info->pages[i], 0); - } - - kfree(repool_info->pages); - kfree(repool_info); - - *pool = NULL; -} - -static int hmm_reserved_pool_inited(void *pool) -{ - struct hmm_reserved_pool_info *repool_info = pool; - - if (!repool_info) - return 0; - - return repool_info->initialized; -} - -struct hmm_pool_ops reserved_pops = { - .pool_init = hmm_reserved_pool_init, - .pool_exit = hmm_reserved_pool_exit, - .pool_alloc_pages = get_pages_from_reserved_pool, - .pool_free_pages = free_pages_to_reserved_pool, - .pool_inited = hmm_reserved_pool_inited, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c deleted file mode 100644 index 0df96e661983..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * This file contains function for ISP virtual address management in ISP driver - */ -#include -#include -#include -#include -#include - -#include "atomisp_internal.h" -#include "mmu/isp_mmu.h" -#include "hmm/hmm_vm.h" -#include "hmm/hmm_common.h" - -static unsigned int vm_node_end(unsigned int start, unsigned int pgnr) -{ - return start + pgnr_to_size(pgnr); -} - -static int addr_in_vm_node(unsigned int addr, - struct hmm_vm_node *node) -{ - return (addr >= node->start) && (addr < (node->start + node->size)); -} - -int hmm_vm_init(struct hmm_vm *vm, unsigned int start, - unsigned int size) -{ - if (!vm) - return -1; - - vm->start = start; - vm->pgnr = size_to_pgnr_ceil(size); - vm->size = pgnr_to_size(vm->pgnr); - - INIT_LIST_HEAD(&vm->vm_node_list); - spin_lock_init(&vm->lock); - vm->cache = kmem_cache_create("atomisp_vm", sizeof(struct hmm_vm_node), - 0, 0, NULL); - - return vm->cache != NULL ? 0 : -ENOMEM; -} - -void hmm_vm_clean(struct hmm_vm *vm) -{ - struct hmm_vm_node *node, *tmp; - struct list_head new_head; - - if (!vm) - return; - - spin_lock(&vm->lock); - list_replace_init(&vm->vm_node_list, &new_head); - spin_unlock(&vm->lock); - - list_for_each_entry_safe(node, tmp, &new_head, list) { - list_del(&node->list); - kmem_cache_free(vm->cache, node); - } - - kmem_cache_destroy(vm->cache); -} - -static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr, - struct hmm_vm *vm) -{ - struct hmm_vm_node *node; - - node = kmem_cache_alloc(vm->cache, GFP_KERNEL); - if (!node) - return NULL; - - INIT_LIST_HEAD(&node->list); - node->pgnr = pgnr; - node->size = pgnr_to_size(pgnr); - node->vm = vm; - - return node; -} - -struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm, unsigned int pgnr) -{ - struct list_head *head; - struct hmm_vm_node *node, *cur, *next; - unsigned int vm_start, vm_end; - unsigned int addr; - unsigned int size; - - if (!vm) - return NULL; - - vm_start = vm->start; - vm_end = vm_node_end(vm->start, vm->pgnr); - size = pgnr_to_size(pgnr); - - addr = vm_start; - head = &vm->vm_node_list; - - node = alloc_hmm_vm_node(pgnr, vm); - if (!node) { - dev_err(atomisp_dev, "no memory to allocate hmm vm node.\n"); - return NULL; - } - - spin_lock(&vm->lock); - /* - * if list is empty, the loop code will not be executed. - */ - list_for_each_entry(cur, head, list) { - /* Add gap between vm areas as helper to not hide overflow */ - addr = PAGE_ALIGN(vm_node_end(cur->start, cur->pgnr) + 1); - - if (list_is_last(&cur->list, head)) { - if (addr + size > vm_end) { - /* vm area does not have space anymore */ - spin_unlock(&vm->lock); - kmem_cache_free(vm->cache, node); - dev_err(atomisp_dev, - "no enough virtual address space.\n"); - return NULL; - } - - /* We still have vm space to add new node to tail */ - break; - } - - next = list_entry(cur->list.next, struct hmm_vm_node, list); - if ((next->start - addr) > size) - break; - } - node->start = addr; - node->vm = vm; - list_add(&node->list, &cur->list); - spin_unlock(&vm->lock); - - return node; -} - -void hmm_vm_free_node(struct hmm_vm_node *node) -{ - struct hmm_vm *vm; - - if (!node) - return; - - vm = node->vm; - - spin_lock(&vm->lock); - list_del(&node->list); - spin_unlock(&vm->lock); - - kmem_cache_free(vm->cache, node); -} - -struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm, unsigned int addr) -{ - struct hmm_vm_node *node; - - if (!vm) - return NULL; - - spin_lock(&vm->lock); - - list_for_each_entry(node, &vm->vm_node_list, list) { - if (node->start == addr) { - spin_unlock(&vm->lock); - return node; - } - } - - spin_unlock(&vm->lock); - return NULL; -} - -struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm, - unsigned int addr) -{ - struct hmm_vm_node *node; - - if (!vm) - return NULL; - - spin_lock(&vm->lock); - - list_for_each_entry(node, &vm->vm_node_list, list) { - if (addr_in_vm_node(addr, node)) { - spin_unlock(&vm->lock); - return node; - } - } - - spin_unlock(&vm->lock); - return NULL; -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h deleted file mode 100644 index fb38fc540b81..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef _hive_isp_css_custom_host_hrt_h_ -#define _hive_isp_css_custom_host_hrt_h_ - -#include -#include "atomisp_helper.h" - -/* - * _hrt_master_port_store/load/uload -macros using __force attributed - * cast to intentional dereferencing __iomem attributed (noderef) - * pointer from atomisp_get_io_virt_addr - */ -#define _hrt_master_port_store_8(a, d) \ - (*((s8 __force *)atomisp_get_io_virt_addr(a)) = (d)) - -#define _hrt_master_port_store_16(a, d) \ - (*((s16 __force *)atomisp_get_io_virt_addr(a)) = (d)) - -#define _hrt_master_port_store_32(a, d) \ - (*((s32 __force *)atomisp_get_io_virt_addr(a)) = (d)) - -#define _hrt_master_port_load_8(a) \ - (*(s8 __force *)atomisp_get_io_virt_addr(a)) - -#define _hrt_master_port_load_16(a) \ - (*(s16 __force *)atomisp_get_io_virt_addr(a)) - -#define _hrt_master_port_load_32(a) \ - (*(s32 __force *)atomisp_get_io_virt_addr(a)) - -#define _hrt_master_port_uload_8(a) \ - (*(u8 __force *)atomisp_get_io_virt_addr(a)) - -#define _hrt_master_port_uload_16(a) \ - (*(u16 __force *)atomisp_get_io_virt_addr(a)) - -#define _hrt_master_port_uload_32(a) \ - (*(u32 __force *)atomisp_get_io_virt_addr(a)) - -#define _hrt_master_port_store_8_volatile(a, d) _hrt_master_port_store_8(a, d) -#define _hrt_master_port_store_16_volatile(a, d) _hrt_master_port_store_16(a, d) -#define _hrt_master_port_store_32_volatile(a, d) _hrt_master_port_store_32(a, d) - -#define _hrt_master_port_load_8_volatile(a) _hrt_master_port_load_8(a) -#define _hrt_master_port_load_16_volatile(a) _hrt_master_port_load_16(a) -#define _hrt_master_port_load_32_volatile(a) _hrt_master_port_load_32(a) - -#define _hrt_master_port_uload_8_volatile(a) _hrt_master_port_uload_8(a) -#define _hrt_master_port_uload_16_volatile(a) _hrt_master_port_uload_16(a) -#define _hrt_master_port_uload_32_volatile(a) _hrt_master_port_uload_32(a) - -static inline void hrt_sleep(void) -{ - udelay(1); -} - -static inline uint32_t _hrt_mem_store(uint32_t to, const void *from, size_t n) -{ - unsigned i; - uint32_t _to = to; - const char *_from = (const char *)from; - for (i = 0; i < n; i++, _to++, _from++) - _hrt_master_port_store_8(_to, *_from); - return _to; -} - -static inline void *_hrt_mem_load(uint32_t from, void *to, size_t n) -{ - unsigned i; - char *_to = (char *)to; - uint32_t _from = from; - for (i = 0; i < n; i++, _to++, _from++) - *_to = _hrt_master_port_load_8(_from); - return _to; -} - -static inline uint32_t _hrt_mem_set(uint32_t to, int c, size_t n) -{ - unsigned i; - uint32_t _to = to; - for (i = 0; i < n; i++, _to++) - _hrt_master_port_store_8(_to, c); - return _to; -} - -#endif /* _hive_isp_css_custom_host_hrt_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c deleted file mode 100644 index 9b186517f20a..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include "atomisp_internal.h" - -#include "hive_isp_css_mm_hrt.h" -#include "hmm/hmm.h" - -#define __page_align(size) (((size) + (PAGE_SIZE-1)) & (~(PAGE_SIZE-1))) - -static void __user *my_userptr; -static unsigned my_num_pages; -static enum hrt_userptr_type my_usr_type; - -void hrt_isp_css_mm_set_user_ptr(void __user *userptr, - unsigned int num_pages, - enum hrt_userptr_type type) -{ - my_userptr = userptr; - my_num_pages = num_pages; - my_usr_type = type; -} - -static ia_css_ptr __hrt_isp_css_mm_alloc(size_t bytes, - const void __user *userptr, - unsigned int num_pages, - enum hrt_userptr_type type, - bool cached) -{ -#ifdef CONFIG_ION - if (type == HRT_USR_ION) - return hmm_alloc(bytes, HMM_BO_ION, 0, - userptr, cached); - -#endif - if (type == HRT_USR_PTR) { - if (userptr == NULL) - return hmm_alloc(bytes, HMM_BO_PRIVATE, 0, - NULL, cached); - else { - if (num_pages < ((__page_align(bytes)) >> PAGE_SHIFT)) - dev_err(atomisp_dev, - "user space memory size is less" - " than the expected size..\n"); - else if (num_pages > ((__page_align(bytes)) - >> PAGE_SHIFT)) - dev_err(atomisp_dev, - "user space memory size is" - " large than the expected size..\n"); - - return hmm_alloc(bytes, HMM_BO_USER, 0, - userptr, cached); - } - } else { - dev_err(atomisp_dev, "user ptr type is incorrect.\n"); - return 0; - } -} - -ia_css_ptr hrt_isp_css_mm_alloc(size_t bytes) -{ - return __hrt_isp_css_mm_alloc(bytes, my_userptr, - my_num_pages, my_usr_type, false); -} - -ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes, - const void __user *userptr, - unsigned int num_pages, - enum hrt_userptr_type type, - bool cached) -{ - return __hrt_isp_css_mm_alloc(bytes, userptr, num_pages, - type, cached); -} - -ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes) -{ - if (my_userptr == NULL) - return hmm_alloc(bytes, HMM_BO_PRIVATE, 0, NULL, - HMM_CACHED); - else { - if (my_num_pages < ((__page_align(bytes)) >> PAGE_SHIFT)) - dev_err(atomisp_dev, - "user space memory size is less" - " than the expected size..\n"); - else if (my_num_pages > ((__page_align(bytes)) >> PAGE_SHIFT)) - dev_err(atomisp_dev, - "user space memory size is" - " large than the expected size..\n"); - - return hmm_alloc(bytes, HMM_BO_USER, 0, - my_userptr, HMM_CACHED); - } -} - -ia_css_ptr hrt_isp_css_mm_calloc(size_t bytes) -{ - ia_css_ptr ptr = hrt_isp_css_mm_alloc(bytes); - if (ptr) - hmm_set(ptr, 0, bytes); - return ptr; -} - -ia_css_ptr hrt_isp_css_mm_calloc_cached(size_t bytes) -{ - ia_css_ptr ptr = hrt_isp_css_mm_alloc_cached(bytes); - if (ptr) - hmm_set(ptr, 0, bytes); - return ptr; -} - diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h deleted file mode 100644 index 93762e71b4ca..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Support for Medfield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef _hive_isp_css_mm_hrt_h_ -#define _hive_isp_css_mm_hrt_h_ - -#include -#include - -#define HRT_BUF_FLAG_CACHED (1 << 0) - -enum hrt_userptr_type { - HRT_USR_PTR = 0, -#ifdef CONFIG_ION - HRT_USR_ION, -#endif -}; - -struct hrt_userbuffer_attr { - enum hrt_userptr_type type; - unsigned int pgnr; -}; - -void hrt_isp_css_mm_set_user_ptr(void __user *userptr, - unsigned int num_pages, enum hrt_userptr_type); - -/* Allocate memory, returns a virtual address */ -ia_css_ptr hrt_isp_css_mm_alloc(size_t bytes); -ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes, - const void __user *userptr, - unsigned int num_pages, - enum hrt_userptr_type, - bool cached); -ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes); - -/* allocate memory and initialize with zeros, - returns a virtual address */ -ia_css_ptr hrt_isp_css_mm_calloc(size_t bytes); -ia_css_ptr hrt_isp_css_mm_calloc_cached(size_t bytes); - -#endif /* _hive_isp_css_mm_hrt_h_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h deleted file mode 100644 index 7dcc73c9f49d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __HMM_H__ -#define __HMM_H__ - -#include -#include -#include -#include - -#include "hmm/hmm_pool.h" -#include "ia_css_types.h" - -#define HMM_CACHED true -#define HMM_UNCACHED false - -int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type); -void hmm_pool_unregister(enum hmm_pool_type pool_type); - -int hmm_init(void); -void hmm_cleanup(void); - -ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, - int from_highmem, const void __user *userptr, bool cached); -void hmm_free(ia_css_ptr ptr); -int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes); -int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes); -int hmm_set(ia_css_ptr virt, int c, unsigned int bytes); -int hmm_flush(ia_css_ptr virt, unsigned int bytes); - -/* - * get kernel memory physical address from ISP virtual address. - */ -phys_addr_t hmm_virt_to_phys(ia_css_ptr virt); - -/* - * map ISP memory starts with virt to kernel virtual address - * by using vmap. return NULL if failed. - * - * virt must be the start address of ISP memory (return by hmm_alloc), - * do not pass any other address. - */ -void *hmm_vmap(ia_css_ptr virt, bool cached); -void hmm_vunmap(ia_css_ptr virt); - -/* - * flush the cache for the vmapped buffer. - * if the buffer has not been vmapped, return directly. - */ -void hmm_flush_vmap(ia_css_ptr virt); - -/* - * Address translation from ISP shared memory address to kernel virtual address - * if the memory is not vmmaped, then do it. - */ -void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached); - -/* - * Address translation from kernel virtual address to ISP shared memory address - */ -ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr); - -/* - * map ISP memory starts with virt to specific vma. - * - * used for mmap operation. - * - * virt must be the start address of ISP memory (return by hmm_alloc), - * do not pass any other address. - */ -int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt); - -/* show memory statistic - */ -void hmm_show_mem_stat(const char *func, const int line); - -/* init memory statistic - */ -void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr); - -extern bool dypool_enable; -extern unsigned int dypool_pgnr; -extern struct hmm_bo_device bo_device; - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h deleted file mode 100644 index 508d6fd68f93..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __HMM_BO_H__ -#define __HMM_BO_H__ - -#include -#include -#include -#include -#include -#include "mmu/isp_mmu.h" -#include "hmm/hmm_common.h" -#include "ia_css_types.h" - -#define check_bodev_null_return(bdev, exp) \ - check_null_return(bdev, exp, \ - "NULL hmm_bo_device.\n") - -#define check_bodev_null_return_void(bdev) \ - check_null_return_void(bdev, \ - "NULL hmm_bo_device.\n") - -#define check_bo_status_yes_goto(bo, _status, label) \ - var_not_equal_goto((bo->status & (_status)), (_status), \ - label, \ - "HMM buffer status not contain %s.\n", \ - #_status) - -#define check_bo_status_no_goto(bo, _status, label) \ - var_equal_goto((bo->status & (_status)), (_status), \ - label, \ - "HMM buffer status contains %s.\n", \ - #_status) - -#define rbtree_node_to_hmm_bo(root_node) \ - container_of((root_node), struct hmm_buffer_object, node) - -#define list_to_hmm_bo(list_ptr) \ - list_entry((list_ptr), struct hmm_buffer_object, list) - -#define kref_to_hmm_bo(kref_ptr) \ - list_entry((kref_ptr), struct hmm_buffer_object, kref) - -#define check_bo_null_return(bo, exp) \ - check_null_return(bo, exp, "NULL hmm buffer object.\n") - -#define check_bo_null_return_void(bo) \ - check_null_return_void(bo, "NULL hmm buffer object.\n") - -#define HMM_MAX_ORDER 3 -#define HMM_MIN_ORDER 0 - -#define ISP_VM_START 0x0 -#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */ -#define ISP_PTR_NULL NULL - -#define HMM_BO_DEVICE_INITED 0x1 - -enum hmm_bo_type { - HMM_BO_PRIVATE, - HMM_BO_SHARE, - HMM_BO_USER, -#ifdef CONFIG_ION - HMM_BO_ION, -#endif - HMM_BO_LAST, -}; - -enum hmm_page_type { - HMM_PAGE_TYPE_RESERVED, - HMM_PAGE_TYPE_DYNAMIC, - HMM_PAGE_TYPE_GENERAL, -}; - -#define HMM_BO_MASK 0x1 -#define HMM_BO_FREE 0x0 -#define HMM_BO_ALLOCED 0x1 -#define HMM_BO_PAGE_ALLOCED 0x2 -#define HMM_BO_BINDED 0x4 -#define HMM_BO_MMAPED 0x8 -#define HMM_BO_VMAPED 0x10 -#define HMM_BO_VMAPED_CACHED 0x20 -#define HMM_BO_ACTIVE 0x1000 -#define HMM_BO_MEM_TYPE_USER 0x1 -#define HMM_BO_MEM_TYPE_PFN 0x2 - -struct hmm_bo_device { - struct isp_mmu mmu; - - /* start/pgnr/size is used to record the virtual memory of this bo */ - unsigned int start; - unsigned int pgnr; - unsigned int size; - - /* list lock is used to protect the entire_bo_list */ - spinlock_t list_lock; -#ifdef CONFIG_ION - struct ion_client *iclient; -#endif - int flag; - - /* linked list for entire buffer object */ - struct list_head entire_bo_list; - /* rbtree for maintain entire allocated vm */ - struct rb_root allocated_rbtree; - /* rbtree for maintain entire free vm */ - struct rb_root free_rbtree; - struct mutex rbtree_mutex; - struct kmem_cache *bo_cache; -}; - -struct hmm_page_object { - struct page *page; - enum hmm_page_type type; -}; - -struct hmm_buffer_object { - struct hmm_bo_device *bdev; - struct list_head list; - struct kref kref; - - /* mutex protecting this BO */ - struct mutex mutex; - enum hmm_bo_type type; - struct hmm_page_object *page_obj; /* physical pages */ - int from_highmem; - int mmap_count; -#ifdef CONFIG_ION - struct ion_handle *ihandle; -#endif - int status; - int mem_type; - void *vmap_addr; /* kernel virtual address by vmap */ - - struct rb_node node; - unsigned int start; - unsigned int end; - unsigned int pgnr; - /* - * When insert a bo which has the same pgnr with an existed - * bo node in the free_rbtree, using "prev & next" pointer - * to maintain a bo linked list instead of insert this bo - * into free_rbtree directly, it will make sure each node - * in free_rbtree has different pgnr. - * "prev & next" default is NULL. - */ - struct hmm_buffer_object *prev; - struct hmm_buffer_object *next; -}; - -struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev, - unsigned int pgnr); - -void hmm_bo_release(struct hmm_buffer_object *bo); - -int hmm_bo_device_init(struct hmm_bo_device *bdev, - struct isp_mmu_client *mmu_driver, - unsigned int vaddr_start, unsigned int size); - -/* - * clean up all hmm_bo_device related things. - */ -void hmm_bo_device_exit(struct hmm_bo_device *bdev); - -/* - * whether the bo device is inited or not. - */ -int hmm_bo_device_inited(struct hmm_bo_device *bdev); - -/* - * increse buffer object reference. - */ -void hmm_bo_ref(struct hmm_buffer_object *bo); - -/* - * decrese buffer object reference. if reference reaches 0, - * release function of the buffer object will be called. - * - * this call is also used to release hmm_buffer_object or its - * upper level object with it embedded in. you need to call - * this function when it is no longer used. - * - * Note: - * - * user dont need to care about internal resource release of - * the buffer object in the release callback, it will be - * handled internally. - * - * this call will only release internal resource of the buffer - * object but will not free the buffer object itself, as the - * buffer object can be both pre-allocated statically or - * dynamically allocated. so user need to deal with the release - * of the buffer object itself manually. below example shows - * the normal case of using the buffer object. - * - * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr); - * ...... - * hmm_bo_unref(bo); - * - * or: - * - * struct hmm_buffer_object bo; - * - * hmm_bo_init(bdev, &bo, pgnr, NULL); - * ... - * hmm_bo_unref(&bo); - */ -void hmm_bo_unref(struct hmm_buffer_object *bo); - - -/* - * allocate/free physical pages for the bo. will try to alloc mem - * from highmem if from_highmem is set, and type indicate that the - * pages will be allocated by using video driver (for share buffer) - * or by ISP driver itself. - */ - - -int hmm_bo_allocated(struct hmm_buffer_object *bo); - - -/* - * allocate/free physical pages for the bo. will try to alloc mem - * from highmem if from_highmem is set, and type indicate that the - * pages will be allocated by using video driver (for share buffer) - * or by ISP driver itself. - */ -int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, - enum hmm_bo_type type, int from_highmem, - const void __user *userptr, bool cached); -void hmm_bo_free_pages(struct hmm_buffer_object *bo); -int hmm_bo_page_allocated(struct hmm_buffer_object *bo); - -/* - * get physical page info of the bo. - */ -int hmm_bo_get_page_info(struct hmm_buffer_object *bo, - struct hmm_page_object **page_obj, int *pgnr); - -/* - * bind/unbind the physical pages to a virtual address space. - */ -int hmm_bo_bind(struct hmm_buffer_object *bo); -void hmm_bo_unbind(struct hmm_buffer_object *bo); -int hmm_bo_binded(struct hmm_buffer_object *bo); - -/* - * vmap buffer object's pages to contiguous kernel virtual address. - * if the buffer has been vmaped, return the virtual address directly. - */ -void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached); - -/* - * flush the cache for the vmapped buffer object's pages, - * if the buffer has not been vmapped, return directly. - */ -void hmm_bo_flush_vmap(struct hmm_buffer_object *bo); - -/* - * vunmap buffer object's kernel virtual address. - */ -void hmm_bo_vunmap(struct hmm_buffer_object *bo); - -/* - * mmap the bo's physical pages to specific vma. - * - * vma's address space size must be the same as bo's size, - * otherwise it will return -EINVAL. - * - * vma->vm_flags will be set to (VM_RESERVED | VM_IO). - */ -int hmm_bo_mmap(struct vm_area_struct *vma, - struct hmm_buffer_object *bo); - -extern struct hmm_pool dynamic_pool; -extern struct hmm_pool reserved_pool; - -/* - * find the buffer object by its virtual address vaddr. - * return NULL if no such buffer object found. - */ -struct hmm_buffer_object *hmm_bo_device_search_start( - struct hmm_bo_device *bdev, ia_css_ptr vaddr); - -/* - * find the buffer object by its virtual address. - * it does not need to be the start address of one bo, - * it can be an address within the range of one bo. - * return NULL if no such buffer object found. - */ -struct hmm_buffer_object *hmm_bo_device_search_in_range( - struct hmm_bo_device *bdev, ia_css_ptr vaddr); - -/* - * find the buffer object with kernel virtual address vaddr. - * return NULL if no such buffer object found. - */ -struct hmm_buffer_object *hmm_bo_device_search_vmap_start( - struct hmm_bo_device *bdev, const void *vaddr); - - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h deleted file mode 100644 index 00885203fb14..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __HMM_BO_COMMON_H__ -#define __HMM_BO_COMMON_H__ - -#define HMM_BO_NAME "HMM" - -/* - * some common use micros - */ -#define var_equal_return(var1, var2, exp, fmt, arg ...) \ - do { \ - if ((var1) == (var2)) { \ - dev_err(atomisp_dev, \ - fmt, ## arg); \ - return exp;\ - } \ - } while (0) - -#define var_equal_return_void(var1, var2, fmt, arg ...) \ - do { \ - if ((var1) == (var2)) { \ - dev_err(atomisp_dev, \ - fmt, ## arg); \ - return;\ - } \ - } while (0) - -#define var_equal_goto(var1, var2, label, fmt, arg ...) \ - do { \ - if ((var1) == (var2)) { \ - dev_err(atomisp_dev, \ - fmt, ## arg); \ - goto label;\ - } \ - } while (0) - -#define var_not_equal_goto(var1, var2, label, fmt, arg ...) \ - do { \ - if ((var1) != (var2)) { \ - dev_err(atomisp_dev, \ - fmt, ## arg); \ - goto label;\ - } \ - } while (0) - -#define check_null_return(ptr, exp, fmt, arg ...) \ - var_equal_return(ptr, NULL, exp, fmt, ## arg) - -#define check_null_return_void(ptr, fmt, arg ...) \ - var_equal_return_void(ptr, NULL, fmt, ## arg) - -/* hmm_mem_stat is used to trace the hmm mem used by ISP pipe. The unit is page - * number. - * - * res_size: reserved mem pool size, being allocated from system at system boot time. - * res_size >= res_cnt. - * sys_size: system mem pool size, being allocated from system at camera running time. - * dyc_size: dynamic mem pool size. - * dyc_thr: dynamic mem pool high watermark. - * dyc_size <= dyc_thr. - * usr_size: user ptr mem size. - * - * res_cnt: track the mem allocated from reserved pool at camera running time. - * tol_cnt: track the total mem used by ISP pipe at camera running time. - */ -struct _hmm_mem_stat { - int res_size; - int sys_size; - int dyc_size; - int dyc_thr; - int usr_size; - int res_cnt; - int tol_cnt; -}; - -extern struct _hmm_mem_stat hmm_mem_stat; - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h deleted file mode 100644 index bf24e44462bc..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#ifndef __HMM_POOL_H__ -#define __HMM_POOL_H__ - -#include -#include -#include -#include -#include -#include -#include "hmm_common.h" -#include "hmm/hmm_bo.h" - -#define ALLOC_PAGE_FAIL_NUM 5 - -enum hmm_pool_type { - HMM_POOL_TYPE_RESERVED, - HMM_POOL_TYPE_DYNAMIC, -}; - -/** - * struct hmm_pool_ops - memory pool callbacks. - * - * @pool_init: initialize the memory pool. - * @pool_exit: uninitialize the memory pool. - * @pool_alloc_pages: allocate pages from memory pool. - * @pool_free_pages: free pages to memory pool. - * @pool_inited: check whether memory pool is initialized. - */ -struct hmm_pool_ops { - int (*pool_init)(void **pool, unsigned int pool_size); - void (*pool_exit)(void **pool); - unsigned int (*pool_alloc_pages)(void *pool, - struct hmm_page_object *page_obj, - unsigned int size, bool cached); - void (*pool_free_pages)(void *pool, - struct hmm_page_object *page_obj); - int (*pool_inited)(void *pool); -}; - -struct hmm_pool { - struct hmm_pool_ops *pops; - - void *pool_info; -}; - -/** - * struct hmm_reserved_pool_info - represents reserved pool private data. - * @pages: a array that store physical pages. - * The array is as reserved memory pool. - * @index: to indicate the first blank page number - * in reserved memory pool(pages array). - * @pgnr: the valid page amount in reserved memory - * pool. - * @list_lock: list lock is used to protect the operation - * to reserved memory pool. - * @flag: reserved memory pool state flag. - */ -struct hmm_reserved_pool_info { - struct page **pages; - - unsigned int index; - unsigned int pgnr; - spinlock_t list_lock; - bool initialized; -}; - -/** - * struct hmm_dynamic_pool_info - represents dynamic pool private data. - * @pages_list: a list that store physical pages. - * The pages list is as dynamic memory pool. - * @list_lock: list lock is used to protect the operation - * to dynamic memory pool. - * @flag: dynamic memory pool state flag. - * @pgptr_cache: struct kmem_cache, manages a cache. - */ -struct hmm_dynamic_pool_info { - struct list_head pages_list; - - /* list lock is used to protect the free pages block lists */ - spinlock_t list_lock; - - struct kmem_cache *pgptr_cache; - bool initialized; - - unsigned int pool_size; - unsigned int pgnr; -}; - -struct hmm_page { - struct page *page; - struct list_head list; -}; - -extern struct hmm_pool_ops reserved_pops; -extern struct hmm_pool_ops dynamic_pops; - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h deleted file mode 100644 index 52098161082d..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __HMM_VM_H__ -#define __HMM_VM_H__ - -#include -#include -#include -#include - -struct hmm_vm { - unsigned int start; - unsigned int pgnr; - unsigned int size; - struct list_head vm_node_list; - spinlock_t lock; - struct kmem_cache *cache; -}; - -struct hmm_vm_node { - struct list_head list; - unsigned int start; - unsigned int pgnr; - unsigned int size; - struct hmm_vm *vm; -}; -#define ISP_VM_START 0x0 -#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */ -#define ISP_PTR_NULL NULL - -int hmm_vm_init(struct hmm_vm *vm, unsigned int start, - unsigned int size); - -void hmm_vm_clean(struct hmm_vm *vm); - -struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm, - unsigned int pgnr); - -void hmm_vm_free_node(struct hmm_vm_node *node); - -struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm, - unsigned int addr); - -struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm, - unsigned int addr); - -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h deleted file mode 100644 index 4b2d94a37ea1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * ISP MMU driver for classic two-level page tables - */ -#ifndef __ISP_MMU_H__ -#define __ISP_MMU_H__ - -#include -#include -#include - -/* - * do not change these values, the page size for ISP must be the - * same as kernel's page size. - */ -#define ISP_PAGE_OFFSET 12 -#define ISP_PAGE_SIZE (1U << ISP_PAGE_OFFSET) -#define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1)) - -#define ISP_L1PT_OFFSET 22 -#define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1)) - -#define ISP_L2PT_OFFSET 12 -#define ISP_L2PT_MASK (~(ISP_L1PT_MASK|(~(ISP_PAGE_MASK)))) - -#define ISP_L1PT_PTES 1024 -#define ISP_L2PT_PTES 1024 - -#define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \ - >> ISP_L1PT_OFFSET) - -#define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \ - >> ISP_L2PT_OFFSET) - -#define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE-1)) \ - & ISP_PAGE_MASK) - -#define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\ - ((l1_idx) << ISP_L1PT_OFFSET) | \ - ((l2_idx) << ISP_L2PT_OFFSET) | \ - (offset)\ -} while (0) - -#define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET) -#define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\ - >> ISP_PAGE_OFFSET) -#define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET) - -struct isp_mmu; - -struct isp_mmu_client { - /* - * const value - * - * @name: - * driver name - * @pte_valid_mask: - * should be 1 bit valid data, meaning the value should - * be power of 2. - */ - char *name; - unsigned int pte_valid_mask; - unsigned int null_pte; - - /* - * get page directory base address (physical address). - * - * must be provided. - */ - unsigned int (*get_pd_base) (struct isp_mmu *mmu, phys_addr_t pd_base); - /* - * callback to flush tlb. - * - * tlb_flush_range will at least flush TLBs containing - * address mapping from addr to addr + size. - * - * tlb_flush_all will flush all TLBs. - * - * tlb_flush_all is must be provided. if tlb_flush_range is - * not valid, it will set to tlb_flush_all by default. - */ - void (*tlb_flush_range) (struct isp_mmu *mmu, - unsigned int addr, unsigned int size); - void (*tlb_flush_all) (struct isp_mmu *mmu); - unsigned int (*phys_to_pte) (struct isp_mmu *mmu, - phys_addr_t phys); - phys_addr_t (*pte_to_phys) (struct isp_mmu *mmu, - unsigned int pte); - -}; - -struct isp_mmu { - struct isp_mmu_client *driver; - unsigned int l1_pte; - int l2_pgt_refcount[ISP_L1PT_PTES]; - phys_addr_t base_address; - - struct mutex pt_mutex; - struct kmem_cache *tbl_cache; -}; - -/* flags for PDE and PTE */ -#define ISP_PTE_VALID_MASK(mmu) \ - ((mmu)->driver->pte_valid_mask) - -#define ISP_PTE_VALID(mmu, pte) \ - ((pte) & ISP_PTE_VALID_MASK(mmu)) - -#define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK) -#define PAGE_VALID(page) ((page) != NULL_PAGE) - -/* - * init mmu with specific mmu driver. - */ -int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver); -/* - * cleanup all mmu related things. - */ -void isp_mmu_exit(struct isp_mmu *mmu); - -/* - * setup/remove address mapping for pgnr continous physical pages - * and isp_virt. - * - * map/unmap is mutex lock protected, and caller does not have - * to do lock/unlock operation. - * - * map/unmap will not flush tlb, and caller needs to deal with - * this itself. - */ -int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt, - phys_addr_t phys, unsigned int pgnr); - -void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt, - unsigned int pgnr); - -static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu) -{ - if (mmu->driver && mmu->driver->tlb_flush_all) - mmu->driver->tlb_flush_all(mmu); -} - -#define isp_mmu_flush_tlb isp_mmu_flush_tlb_all - -static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu, - unsigned int start, unsigned int size) -{ - if (mmu->driver && mmu->driver->tlb_flush_range) - mmu->driver->tlb_flush_range(mmu, start, size); -} - -#endif /* ISP_MMU_H_ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h deleted file mode 100644 index 662e98f41da2..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Support for Merrifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __SH_MMU_MRFLD_H__ -#define __SH_MMU_MRFLD_H__ - -extern struct isp_mmu_client sh_mmu_mrfld; -#endif diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c deleted file mode 100644 index 198f29f4a324..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c +++ /dev/null @@ -1,584 +0,0 @@ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -/* - * ISP MMU management wrap code - */ -#include -#include -#include -#include /* for GFP_ATOMIC */ -#include /* for kmalloc */ -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_X86 -#include -#endif - -#include "atomisp_internal.h" -#include "mmu/isp_mmu.h" - -/* - * 64-bit x86 processor physical address layout: - * 0 - 0x7fffffff DDR RAM (2GB) - * 0x80000000 - 0xffffffff MMIO (2GB) - * 0x100000000 - 0x3fffffffffff DDR RAM (64TB) - * So if the system has more than 2GB DDR memory, the lower 2GB occupies the - * physical address 0 - 0x7fffffff and the rest will start from 0x100000000. - * We have to make sure memory is allocated from the lower 2GB for devices - * that are only 32-bit capable(e.g. the ISP MMU). - * - * For any confusion, contact bin.gao@intel.com. - */ -#define NR_PAGES_2GB (SZ_2G / PAGE_SIZE) - -static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, - unsigned int end_isp_virt); - -static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx) -{ - unsigned int *pt_virt = phys_to_virt(pt); - return *(pt_virt + idx); -} - -static void atomisp_set_pte(phys_addr_t pt, - unsigned int idx, unsigned int pte) -{ - unsigned int *pt_virt = phys_to_virt(pt); - *(pt_virt + idx) = pte; -} - -static void *isp_pt_phys_to_virt(phys_addr_t phys) -{ - return phys_to_virt(phys); -} - -static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu, - unsigned int pte) -{ - return mmu->driver->pte_to_phys(mmu, pte); -} - -static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu, - phys_addr_t phys) -{ - unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); - return (unsigned int) (pte | ISP_PTE_VALID_MASK(mmu)); -} - -/* - * allocate a uncacheable page table. - * return physical address. - */ -static phys_addr_t alloc_page_table(struct isp_mmu *mmu) -{ - int i; - phys_addr_t page; - void *virt; - - /*page table lock may needed here*/ - /* - * The slab allocator(kmem_cache and kmalloc family) doesn't handle - * GFP_DMA32 flag, so we have to use buddy allocator. - */ - if (totalram_pages > (unsigned long)NR_PAGES_2GB) - virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); - else - virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL); - if (!virt) - return (phys_addr_t)NULL_PAGE; - - /* - * we need a uncacheable page table. - */ -#ifdef CONFIG_X86 - set_memory_uc((unsigned long)virt, 1); -#endif - - page = virt_to_phys(virt); - - for (i = 0; i < 1024; i++) { - /* NEED CHECK */ - atomisp_set_pte(page, i, mmu->driver->null_pte); - } - - return page; -} - -static void free_page_table(struct isp_mmu *mmu, phys_addr_t page) -{ - void *virt; - page &= ISP_PAGE_MASK; - /* - * reset the page to write back before free - */ - virt = phys_to_virt(page); - -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)virt, 1); -#endif - - kmem_cache_free(mmu->tbl_cache, virt); -} - -static void mmu_remap_error(struct isp_mmu *mmu, - phys_addr_t l1_pt, unsigned int l1_idx, - phys_addr_t l2_pt, unsigned int l2_idx, - unsigned int isp_virt, phys_addr_t old_phys, - phys_addr_t new_phys) -{ - dev_err(atomisp_dev, "address remap:\n\n" - "\tL1 PT: virt = %p, phys = 0x%llx, " - "idx = %d\n" - "\tL2 PT: virt = %p, phys = 0x%llx, " - "idx = %d\n" - "\told: isp_virt = 0x%x, phys = 0x%llx\n" - "\tnew: isp_virt = 0x%x, phys = 0x%llx\n", - isp_pt_phys_to_virt(l1_pt), - (u64)l1_pt, l1_idx, - isp_pt_phys_to_virt(l2_pt), - (u64)l2_pt, l2_idx, isp_virt, - (u64)old_phys, isp_virt, - (u64)new_phys); -} - -static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu, - phys_addr_t l1_pt, unsigned int l1_idx, - phys_addr_t l2_pt, unsigned int l2_idx, - unsigned int isp_virt, unsigned int pte) -{ - dev_err(atomisp_dev, "unmap unvalid L2 pte:\n\n" - "\tL1 PT: virt = %p, phys = 0x%llx, " - "idx = %d\n" - "\tL2 PT: virt = %p, phys = 0x%llx, " - "idx = %d\n" - "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n", - isp_pt_phys_to_virt(l1_pt), - (u64)l1_pt, l1_idx, - isp_pt_phys_to_virt(l2_pt), - (u64)l2_pt, l2_idx, isp_virt, - pte); -} - -static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu, - phys_addr_t l1_pt, unsigned int l1_idx, - unsigned int isp_virt, unsigned int pte) -{ - dev_err(atomisp_dev, "unmap unvalid L1 pte (L2 PT):\n\n" - "\tL1 PT: virt = %p, phys = 0x%llx, " - "idx = %d\n" - "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n", - isp_pt_phys_to_virt(l1_pt), - (u64)l1_pt, l1_idx, (unsigned int)isp_virt, - pte); -} - -static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte) -{ - dev_err(atomisp_dev, "unmap unvalid L1PT:\n\n" - "L1PT = 0x%x\n", (unsigned int)pte); -} - -/* - * Update L2 page table according to isp virtual address and page physical - * address - */ -static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt, - unsigned int l1_idx, phys_addr_t l2_pt, - unsigned int start, unsigned int end, phys_addr_t phys) -{ - unsigned int ptr; - unsigned int idx; - unsigned int pte; - - l2_pt &= ISP_PAGE_MASK; - - start = start & ISP_PAGE_MASK; - end = ISP_PAGE_ALIGN(end); - phys &= ISP_PAGE_MASK; - - ptr = start; - do { - idx = ISP_PTR_TO_L2_IDX(ptr); - - pte = atomisp_get_pte(l2_pt, idx); - - if (ISP_PTE_VALID(mmu, pte)) { - mmu_remap_error(mmu, l1_pt, l1_idx, - l2_pt, idx, ptr, pte, phys); - - /* free all mapped pages */ - free_mmu_map(mmu, start, ptr); - - return -EINVAL; - } - - pte = isp_pgaddr_to_pte_valid(mmu, phys); - - atomisp_set_pte(l2_pt, idx, pte); - mmu->l2_pgt_refcount[l1_idx]++; - ptr += (1U << ISP_L2PT_OFFSET); - phys += (1U << ISP_L2PT_OFFSET); - } while (ptr < end && idx < ISP_L2PT_PTES - 1); - - return 0; -} - -/* - * Update L1 page table according to isp virtual address and page physical - * address - */ -static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt, - unsigned int start, unsigned int end, - phys_addr_t phys) -{ - phys_addr_t l2_pt; - unsigned int ptr, l1_aligned; - unsigned int idx; - unsigned int l2_pte; - int ret; - - l1_pt &= ISP_PAGE_MASK; - - start = start & ISP_PAGE_MASK; - end = ISP_PAGE_ALIGN(end); - phys &= ISP_PAGE_MASK; - - ptr = start; - do { - idx = ISP_PTR_TO_L1_IDX(ptr); - - l2_pte = atomisp_get_pte(l1_pt, idx); - - if (!ISP_PTE_VALID(mmu, l2_pte)) { - l2_pt = alloc_page_table(mmu); - if (l2_pt == NULL_PAGE) { - dev_err(atomisp_dev, - "alloc page table fail.\n"); - - /* free all mapped pages */ - free_mmu_map(mmu, start, ptr); - - return -ENOMEM; - } - - l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt); - - atomisp_set_pte(l1_pt, idx, l2_pte); - mmu->l2_pgt_refcount[idx] = 0; - } - - l2_pt = isp_pte_to_pgaddr(mmu, l2_pte); - - l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET); - - if (l1_aligned < end) { - ret = mmu_l2_map(mmu, l1_pt, idx, - l2_pt, ptr, l1_aligned, phys); - phys += (l1_aligned - ptr); - ptr = l1_aligned; - } else { - ret = mmu_l2_map(mmu, l1_pt, idx, - l2_pt, ptr, end, phys); - phys += (end - ptr); - ptr = end; - } - - if (ret) { - dev_err(atomisp_dev, "setup mapping in L2PT fail.\n"); - - /* free all mapped pages */ - free_mmu_map(mmu, start, ptr); - - return -EINVAL; - } - } while (ptr < end && idx < ISP_L1PT_PTES); - - return 0; -} - -/* - * Update page table according to isp virtual address and page physical - * address - */ -static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt, - phys_addr_t phys, unsigned int pgnr) -{ - unsigned int start, end; - phys_addr_t l1_pt; - int ret; - - mutex_lock(&mmu->pt_mutex); - if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { - /* - * allocate 1 new page for L1 page table - */ - l1_pt = alloc_page_table(mmu); - if (l1_pt == NULL_PAGE) { - dev_err(atomisp_dev, "alloc page table fail.\n"); - mutex_unlock(&mmu->pt_mutex); - return -ENOMEM; - } - - /* - * setup L1 page table physical addr to MMU - */ - mmu->base_address = l1_pt; - mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt); - memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES); - } - - l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); - - start = (isp_virt) & ISP_PAGE_MASK; - end = start + (pgnr << ISP_PAGE_OFFSET); - phys &= ISP_PAGE_MASK; - - ret = mmu_l1_map(mmu, l1_pt, start, end, phys); - - if (ret) - dev_err(atomisp_dev, "setup mapping in L1PT fail.\n"); - - mutex_unlock(&mmu->pt_mutex); - return ret; -} - -/* - * Free L2 page table according to isp virtual address and page physical - * address - */ -static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt, - unsigned int l1_idx, phys_addr_t l2_pt, - unsigned int start, unsigned int end) -{ - - unsigned int ptr; - unsigned int idx; - unsigned int pte; - - l2_pt &= ISP_PAGE_MASK; - - start = start & ISP_PAGE_MASK; - end = ISP_PAGE_ALIGN(end); - - ptr = start; - do { - idx = ISP_PTR_TO_L2_IDX(ptr); - - pte = atomisp_get_pte(l2_pt, idx); - - if (!ISP_PTE_VALID(mmu, pte)) - mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx, - l2_pt, idx, ptr, pte); - - atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte); - mmu->l2_pgt_refcount[l1_idx]--; - ptr += (1U << ISP_L2PT_OFFSET); - } while (ptr < end && idx < ISP_L2PT_PTES - 1); - - if (mmu->l2_pgt_refcount[l1_idx] == 0) { - free_page_table(mmu, l2_pt); - atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte); - } -} - -/* - * Free L1 page table according to isp virtual address and page physical - * address - */ -static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt, - unsigned int start, unsigned int end) -{ - phys_addr_t l2_pt; - unsigned int ptr, l1_aligned; - unsigned int idx; - unsigned int l2_pte; - - l1_pt &= ISP_PAGE_MASK; - - start = start & ISP_PAGE_MASK; - end = ISP_PAGE_ALIGN(end); - - ptr = start; - do { - idx = ISP_PTR_TO_L1_IDX(ptr); - - l2_pte = atomisp_get_pte(l1_pt, idx); - - if (!ISP_PTE_VALID(mmu, l2_pte)) { - mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte); - continue; - } - - l2_pt = isp_pte_to_pgaddr(mmu, l2_pte); - - l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET); - - if (l1_aligned < end) { - mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned); - ptr = l1_aligned; - } else { - mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end); - ptr = end; - } - /* - * use the same L2 page next time, so we don't - * need to invalidate and free this PT. - */ - /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */ - } while (ptr < end && idx < ISP_L1PT_PTES); -} - -/* - * Free page table according to isp virtual address and page physical - * address - */ -static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt, - unsigned int pgnr) -{ - unsigned int start, end; - phys_addr_t l1_pt; - - mutex_lock(&mmu->pt_mutex); - if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { - mmu_unmap_l1_pt_error(mmu, mmu->l1_pte); - mutex_unlock(&mmu->pt_mutex); - return; - } - - l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); - - start = (isp_virt) & ISP_PAGE_MASK; - end = start + (pgnr << ISP_PAGE_OFFSET); - - mmu_l1_unmap(mmu, l1_pt, start, end); - mutex_unlock(&mmu->pt_mutex); -} - -/* - * Free page tables according to isp start virtual address and end virtual - * address. - */ -static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, - unsigned int end_isp_virt) -{ - unsigned int pgnr; - unsigned int start, end; - - start = (start_isp_virt) & ISP_PAGE_MASK; - end = (end_isp_virt) & ISP_PAGE_MASK; - pgnr = (end - start) >> ISP_PAGE_OFFSET; - mmu_unmap(mmu, start, pgnr); -} - -int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt, - phys_addr_t phys, unsigned int pgnr) -{ - return mmu_map(mmu, isp_virt, phys, pgnr); -} - -void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt, - unsigned int pgnr) -{ - mmu_unmap(mmu, isp_virt, pgnr); -} - -static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu, - unsigned int start, - unsigned int size) -{ - isp_mmu_flush_tlb(mmu); -} - -/*MMU init for internal structure*/ -int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver) -{ - if (!mmu) /* error */ - return -EINVAL; - if (!driver) /* error */ - return -EINVAL; - - if (!driver->name) - dev_warn(atomisp_dev, "NULL name for MMU driver...\n"); - - mmu->driver = driver; - - if (!driver->tlb_flush_all) { - dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n"); - return -EINVAL; - } - - if (!driver->tlb_flush_range) - driver->tlb_flush_range = isp_mmu_flush_tlb_range_default; - - if (!driver->pte_valid_mask) { - dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n"); - return -EINVAL; - } - - mmu->l1_pte = driver->null_pte; - - mutex_init(&mmu->pt_mutex); - - mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE, - ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN, - NULL); - if (!mmu->tbl_cache) - return -ENOMEM; - - return 0; -} - -/*Free L1 and L2 page table*/ -void isp_mmu_exit(struct isp_mmu *mmu) -{ - unsigned int idx; - unsigned int pte; - phys_addr_t l1_pt, l2_pt; - - if (!mmu) - return; - - if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { - dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n", - (unsigned int)mmu->l1_pte); - return; - } - - l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); - - for (idx = 0; idx < ISP_L1PT_PTES; idx++) { - pte = atomisp_get_pte(l1_pt, idx); - - if (ISP_PTE_VALID(mmu, pte)) { - l2_pt = isp_pte_to_pgaddr(mmu, pte); - - free_page_table(mmu, l2_pt); - } - } - - free_page_table(mmu, l1_pt); - - kmem_cache_destroy(mmu->tbl_cache); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c deleted file mode 100644 index c0212564b7c8..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Support for Merrifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2012 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2012 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ -#include "type_support.h" -#include "mmu/isp_mmu.h" -#include "mmu/sh_mmu_mrfld.h" -#include "memory_access/memory_access.h" -#include "atomisp_compat.h" - -#define MERR_VALID_PTE_MASK 0x80000000 - -/* - * include SH header file here - */ - -static unsigned int sh_phys_to_pte(struct isp_mmu *mmu, - phys_addr_t phys) -{ - return phys >> ISP_PAGE_OFFSET; -} - -static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu, - unsigned int pte) -{ - unsigned int mask = mmu->driver->pte_valid_mask; - return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET); -} - -static unsigned int sh_get_pd_base(struct isp_mmu *mmu, - phys_addr_t phys) -{ - unsigned int pte = sh_phys_to_pte(mmu, phys); - return HOST_ADDRESS(pte); -} - -/* - * callback to flush tlb. - * - * tlb_flush_range will at least flush TLBs containing - * address mapping from addr to addr + size. - * - * tlb_flush_all will flush all TLBs. - * - * tlb_flush_all is must be provided. if tlb_flush_range is - * not valid, it will set to tlb_flush_all by default. - */ -static void sh_tlb_flush(struct isp_mmu *mmu) -{ - atomisp_css_mmu_invalidate_cache(); -} - -struct isp_mmu_client sh_mmu_mrfld = { - .name = "Silicon Hive ISP3000 MMU", - .pte_valid_mask = MERR_VALID_PTE_MASK, - .null_pte = ~MERR_VALID_PTE_MASK, - .get_pd_base = sh_get_pd_base, - .tlb_flush_all = sh_tlb_flush, - .phys_to_pte = sh_phys_to_pte, - .pte_to_phys = sh_pte_to_phys, -}; diff --git a/drivers/staging/media/atomisp/platform/Makefile b/drivers/staging/media/atomisp/platform/Makefile deleted file mode 100644 index 0e3b7e1c81c6..000000000000 --- a/drivers/staging/media/atomisp/platform/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for camera drivers. -# - -obj-$(CONFIG_INTEL_ATOMISP) += intel-mid/ diff --git a/drivers/staging/media/atomisp/platform/intel-mid/Makefile b/drivers/staging/media/atomisp/platform/intel-mid/Makefile deleted file mode 100644 index c53db1364e21..000000000000 --- a/drivers/staging/media/atomisp/platform/intel-mid/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# -# Makefile for intel-mid devices. -# -obj-$(CONFIG_INTEL_ATOMISP) += atomisp_gmin_platform.o diff --git a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c deleted file mode 100644 index 70c34de98707..000000000000 --- a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c +++ /dev/null @@ -1,779 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../../include/linux/atomisp_platform.h" -#include "../../include/linux/atomisp_gmin_platform.h" - -#define MAX_SUBDEVS 8 - -#define VLV2_CLK_PLL_19P2MHZ 1 /* XTAL on CHT */ -#define ELDO1_SEL_REG 0x19 -#define ELDO1_1P8V 0x16 -#define ELDO1_CTRL_SHIFT 0x00 -#define ELDO2_SEL_REG 0x1a -#define ELDO2_1P8V 0x16 -#define ELDO2_CTRL_SHIFT 0x01 - -struct gmin_subdev { - struct v4l2_subdev *subdev; - int clock_num; - int clock_src; - bool clock_on; - struct clk *pmc_clk; - struct gpio_desc *gpio0; - struct gpio_desc *gpio1; - struct regulator *v1p8_reg; - struct regulator *v2p8_reg; - struct regulator *v1p2_reg; - struct regulator *v2p8_vcm_reg; - enum atomisp_camera_port csi_port; - unsigned int csi_lanes; - enum atomisp_input_format csi_fmt; - enum atomisp_bayer_order csi_bayer; - bool v1p8_on; - bool v2p8_on; - bool v1p2_on; - bool v2p8_vcm_on; -}; - -static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS]; - -static enum { PMIC_UNSET = 0, PMIC_REGULATOR, PMIC_AXP, PMIC_TI, - PMIC_CRYSTALCOVE } pmic_id; - -/* The atomisp uses type==0 for the end-of-list marker, so leave space. */ -static struct intel_v4l2_subdev_table pdata_subdevs[MAX_SUBDEVS + 1]; - -static const struct atomisp_platform_data pdata = { - .subdevs = pdata_subdevs, -}; - -/* - * Something of a hack. The ECS E7 board drives camera 2.8v from an - * external regulator instead of the PMIC. There's a gmin_CamV2P8 - * config variable that specifies the GPIO to handle this particular - * case, but this needs a broader architecture for handling camera - * power. - */ -enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 }; -static int v2p8_gpio = V2P8_GPIO_UNSET; - -/* - * Something of a hack. The CHT RVP board drives camera 1.8v from an - * external regulator instead of the PMIC just like ECS E7 board, see the - * comments above. - */ -enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 }; -static int v1p8_gpio = V1P8_GPIO_UNSET; - -static LIST_HEAD(vcm_devices); -static DEFINE_MUTEX(vcm_lock); - -static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev); - -/* - * Legacy/stub behavior copied from upstream platform_camera.c. The - * atomisp driver relies on these values being non-NULL in a few - * places, even though they are hard-coded in all current - * implementations. - */ -const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void) -{ - static const struct atomisp_camera_caps caps = { - .sensor_num = 1, - .sensor = { - { .stream_num = 1, }, - }, - }; - return ∩︀ -} -EXPORT_SYMBOL_GPL(atomisp_get_default_camera_caps); - -const struct atomisp_platform_data *atomisp_get_platform_data(void) -{ - return &pdata; -} -EXPORT_SYMBOL_GPL(atomisp_get_platform_data); - -int atomisp_register_i2c_module(struct v4l2_subdev *subdev, - struct camera_sensor_platform_data *plat_data, - enum intel_v4l2_subdev_type type) -{ - int i; - struct i2c_board_info *bi; - struct gmin_subdev *gs; - struct i2c_client *client = v4l2_get_subdevdata(subdev); - struct acpi_device *adev = ACPI_COMPANION(&client->dev); - - dev_info(&client->dev, "register atomisp i2c module type %d\n", type); - - /* The windows driver model (and thus most BIOSes by default) - * uses ACPI runtime power management for camera devices, but - * we don't. Disable it, or else the rails will be needlessly - * tickled during suspend/resume. This has caused power and - * performance issues on multiple devices. - */ - adev->power.flags.power_resources = 0; - - for (i = 0; i < MAX_SUBDEVS; i++) - if (!pdata.subdevs[i].type) - break; - - if (pdata.subdevs[i].type) - return -ENOMEM; - - /* Note subtlety of initialization order: at the point where - * this registration API gets called, the platform data - * callbacks have probably already been invoked, so the - * gmin_subdev struct is already initialized for us. - */ - gs = find_gmin_subdev(subdev); - - pdata.subdevs[i].type = type; - pdata.subdevs[i].port = gs->csi_port; - pdata.subdevs[i].subdev = subdev; - pdata.subdevs[i].v4l2_subdev.i2c_adapter_id = client->adapter->nr; - - /* Convert i2c_client to i2c_board_info */ - bi = &pdata.subdevs[i].v4l2_subdev.board_info; - memcpy(bi->type, client->name, I2C_NAME_SIZE); - bi->flags = client->flags; - bi->addr = client->addr; - bi->irq = client->irq; - bi->platform_data = plat_data; - - return 0; -} -EXPORT_SYMBOL_GPL(atomisp_register_i2c_module); - -struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter, - struct i2c_board_info *board_info) -{ - int i; - - for (i = 0; i < MAX_SUBDEVS && pdata.subdevs[i].type; i++) { - struct intel_v4l2_subdev_table *sd = &pdata.subdevs[i]; - - if (sd->v4l2_subdev.i2c_adapter_id == adapter->nr && - sd->v4l2_subdev.board_info.addr == board_info->addr) - return sd->subdev; - } - return NULL; -} -EXPORT_SYMBOL_GPL(atomisp_gmin_find_subdev); - -int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd) -{ - int i, j; - - if (!sd) - return 0; - - for (i = 0; i < MAX_SUBDEVS; i++) { - if (pdata.subdevs[i].subdev == sd) { - for (j = i + 1; j <= MAX_SUBDEVS; j++) - pdata.subdevs[j - 1] = pdata.subdevs[j]; - } - if (gmin_subdevs[i].subdev == sd) { - if (gmin_subdevs[i].gpio0) - gpiod_put(gmin_subdevs[i].gpio0); - gmin_subdevs[i].gpio0 = NULL; - if (gmin_subdevs[i].gpio1) - gpiod_put(gmin_subdevs[i].gpio1); - gmin_subdevs[i].gpio1 = NULL; - if (pmic_id == PMIC_REGULATOR) { - regulator_put(gmin_subdevs[i].v1p8_reg); - regulator_put(gmin_subdevs[i].v2p8_reg); - regulator_put(gmin_subdevs[i].v1p2_reg); - regulator_put(gmin_subdevs[i].v2p8_vcm_reg); - } - gmin_subdevs[i].subdev = NULL; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(atomisp_gmin_remove_subdev); - -struct gmin_cfg_var { - const char *name, *val; -}; - -static struct gmin_cfg_var ffrd8_vars[] = { - { "INTCF1B:00_ImxId", "0x134" }, - { "INTCF1B:00_CsiPort", "1" }, - { "INTCF1B:00_CsiLanes", "4" }, - { "INTCF1B:00_CamClk", "0" }, - {}, -}; - -/* Cribbed from MCG defaults in the mt9m114 driver, not actually verified - * vs. T100 hardware - */ -static struct gmin_cfg_var t100_vars[] = { - { "INT33F0:00_CsiPort", "0" }, - { "INT33F0:00_CsiLanes", "1" }, - { "INT33F0:00_CamClk", "1" }, - {}, -}; - -static struct gmin_cfg_var mrd7_vars[] = { - {"INT33F8:00_CamType", "1"}, - {"INT33F8:00_CsiPort", "1"}, - {"INT33F8:00_CsiLanes", "2"}, - {"INT33F8:00_CsiFmt", "13"}, - {"INT33F8:00_CsiBayer", "0"}, - {"INT33F8:00_CamClk", "0"}, - {"INT33F9:00_CamType", "1"}, - {"INT33F9:00_CsiPort", "0"}, - {"INT33F9:00_CsiLanes", "1"}, - {"INT33F9:00_CsiFmt", "13"}, - {"INT33F9:00_CsiBayer", "0"}, - {"INT33F9:00_CamClk", "1"}, - {}, -}; - -static struct gmin_cfg_var ecs7_vars[] = { - {"INT33BE:00_CsiPort", "1"}, - {"INT33BE:00_CsiLanes", "2"}, - {"INT33BE:00_CsiFmt", "13"}, - {"INT33BE:00_CsiBayer", "2"}, - {"INT33BE:00_CamClk", "0"}, - {"INT33F0:00_CsiPort", "0"}, - {"INT33F0:00_CsiLanes", "1"}, - {"INT33F0:00_CsiFmt", "13"}, - {"INT33F0:00_CsiBayer", "0"}, - {"INT33F0:00_CamClk", "1"}, - {"gmin_V2P8GPIO", "402"}, - {}, -}; - -static struct gmin_cfg_var i8880_vars[] = { - {"XXOV2680:00_CsiPort", "1"}, - {"XXOV2680:00_CsiLanes", "1"}, - {"XXOV2680:00_CamClk", "0"}, - {"XXGC0310:00_CsiPort", "0"}, - {"XXGC0310:00_CsiLanes", "1"}, - {"XXGC0310:00_CamClk", "1"}, - {}, -}; - -static const struct dmi_system_id gmin_vars[] = { - { - .ident = "BYT-T FFD8", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), - }, - .driver_data = ffrd8_vars, - }, - { - .ident = "T100TA", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "T100TA"), - }, - .driver_data = t100_vars, - }, - { - .ident = "MRD7", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "TABLET"), - DMI_MATCH(DMI_BOARD_VERSION, "MRD 7"), - }, - .driver_data = mrd7_vars, - }, - { - .ident = "ST70408", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "ST70408"), - }, - .driver_data = ecs7_vars, - }, - { - .ident = "VTA0803", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "VTA0803"), - }, - .driver_data = i8880_vars, - }, - {} -}; - -#define GMIN_CFG_VAR_EFI_GUID EFI_GUID(0xecb54cd9, 0xe5ae, 0x4fdc, \ - 0xa9, 0x71, 0xe8, 0x77, \ - 0x75, 0x60, 0x68, 0xf7) - -#define CFG_VAR_NAME_MAX 64 - -#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */ -static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME]; - -static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev) -{ - int i, ret; - struct device *dev; - struct i2c_client *client = v4l2_get_subdevdata(subdev); - - if (!pmic_id) - pmic_id = PMIC_REGULATOR; - - if (!client) - return NULL; - - dev = &client->dev; - - for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++) - ; - if (i >= MAX_SUBDEVS) - return NULL; - - dev_info(dev, - "gmin: initializing atomisp module subdev data.PMIC ID %d\n", - pmic_id); - - gmin_subdevs[i].subdev = subdev; - gmin_subdevs[i].clock_num = gmin_get_var_int(dev, "CamClk", 0); - /*WA:CHT requires XTAL clock as PLL is not stable.*/ - gmin_subdevs[i].clock_src = gmin_get_var_int(dev, "ClkSrc", - VLV2_CLK_PLL_19P2MHZ); - gmin_subdevs[i].csi_port = gmin_get_var_int(dev, "CsiPort", 0); - gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, "CsiLanes", 1); - - /* get PMC clock with clock framework */ - snprintf(gmin_pmc_clk_name, - sizeof(gmin_pmc_clk_name), - "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num); - - gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name); - if (IS_ERR(gmin_subdevs[i].pmc_clk)) { - ret = PTR_ERR(gmin_subdevs[i].pmc_clk); - - dev_err(dev, - "Failed to get clk from %s : %d\n", - gmin_pmc_clk_name, - ret); - - return NULL; - } - - /* - * The firmware might enable the clock at - * boot (this information may or may not - * be reflected in the enable clock register). - * To change the rate we must disable the clock - * first to cover these cases. Due to common - * clock framework restrictions that do not allow - * to disable a clock that has not been enabled, - * we need to enable the clock first. - */ - ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk); - if (!ret) - clk_disable_unprepare(gmin_subdevs[i].pmc_clk); - - gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); - if (IS_ERR(gmin_subdevs[i].gpio0)) - gmin_subdevs[i].gpio0 = NULL; - - gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW); - if (IS_ERR(gmin_subdevs[i].gpio1)) - gmin_subdevs[i].gpio1 = NULL; - - if (pmic_id == PMIC_REGULATOR) { - gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX"); - gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX"); - gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A"); - gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B"); - - /* Note: ideally we would initialize v[12]p8_on to the - * output of regulator_is_enabled(), but sadly that - * API is broken with the current drivers, returning - * "1" for a regulator that will then emit a - * "unbalanced disable" WARNing if we try to disable - * it. - */ - } - - return &gmin_subdevs[i]; -} - -static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev) -{ - int i; - - for (i = 0; i < MAX_SUBDEVS; i++) - if (gmin_subdevs[i].subdev == subdev) - return &gmin_subdevs[i]; - return gmin_subdev_add(subdev); -} - -static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on) -{ - struct gmin_subdev *gs = find_gmin_subdev(subdev); - - if (gs) { - gpiod_set_value(gs->gpio0, on); - return 0; - } - return -EINVAL; -} - -static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on) -{ - struct gmin_subdev *gs = find_gmin_subdev(subdev); - - if (gs) { - gpiod_set_value(gs->gpio1, on); - return 0; - } - return -EINVAL; -} - -static int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on) -{ - struct gmin_subdev *gs = find_gmin_subdev(subdev); - - if (!gs || gs->v1p2_on == on) - return 0; - gs->v1p2_on = on; - - if (gs->v1p2_reg) { - if (on) - return regulator_enable(gs->v1p2_reg); - else - return regulator_disable(gs->v1p2_reg); - } - - /*TODO:v1p2 needs to extend to other PMICs*/ - - return -EINVAL; -} - -static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on) -{ - struct gmin_subdev *gs = find_gmin_subdev(subdev); - int ret; - - if (v1p8_gpio == V1P8_GPIO_UNSET) { - v1p8_gpio = gmin_get_var_int(NULL, "V1P8GPIO", V1P8_GPIO_NONE); - if (v1p8_gpio != V1P8_GPIO_NONE) { - pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n", - v1p8_gpio); - ret = gpio_request(v1p8_gpio, "camera_v1p8_en"); - if (!ret) - ret = gpio_direction_output(v1p8_gpio, 0); - if (ret) - pr_err("V1P8 GPIO initialization failed\n"); - } - } - - if (!gs || gs->v1p8_on == on) - return 0; - gs->v1p8_on = on; - - if (v1p8_gpio >= 0) - gpio_set_value(v1p8_gpio, on); - - if (gs->v1p8_reg) { - regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000); - if (on) - return regulator_enable(gs->v1p8_reg); - else - return regulator_disable(gs->v1p8_reg); - } - - return -EINVAL; -} - -static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on) -{ - struct gmin_subdev *gs = find_gmin_subdev(subdev); - int ret; - - if (v2p8_gpio == V2P8_GPIO_UNSET) { - v2p8_gpio = gmin_get_var_int(NULL, "V2P8GPIO", V2P8_GPIO_NONE); - if (v2p8_gpio != V2P8_GPIO_NONE) { - pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n", - v2p8_gpio); - ret = gpio_request(v2p8_gpio, "camera_v2p8"); - if (!ret) - ret = gpio_direction_output(v2p8_gpio, 0); - if (ret) - pr_err("V2P8 GPIO initialization failed\n"); - } - } - - if (!gs || gs->v2p8_on == on) - return 0; - gs->v2p8_on = on; - - if (v2p8_gpio >= 0) - gpio_set_value(v2p8_gpio, on); - - if (gs->v2p8_reg) { - regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000); - if (on) - return regulator_enable(gs->v2p8_reg); - else - return regulator_disable(gs->v2p8_reg); - } - - return -EINVAL; -} - -static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on) -{ - int ret = 0; - struct gmin_subdev *gs = find_gmin_subdev(subdev); - struct i2c_client *client = v4l2_get_subdevdata(subdev); - - if (gs->clock_on == !!on) - return 0; - - if (on) { - ret = clk_set_rate(gs->pmc_clk, gs->clock_src); - - if (ret) - dev_err(&client->dev, "unable to set PMC rate %d\n", - gs->clock_src); - - ret = clk_prepare_enable(gs->pmc_clk); - if (ret == 0) - gs->clock_on = true; - } else { - clk_disable_unprepare(gs->pmc_clk); - gs->clock_on = false; - } - - return ret; -} - -static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct gmin_subdev *gs = find_gmin_subdev(sd); - - if (!client || !gs) - return -ENODEV; - - return camera_sensor_csi(sd, gs->csi_port, gs->csi_lanes, - gs->csi_fmt, gs->csi_bayer, flag); -} - -static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev, - char *camera_module) -{ - struct i2c_client *client = v4l2_get_subdevdata(subdev); - struct gmin_subdev *gs = find_gmin_subdev(subdev); - struct camera_vcm_control *vcm; - - if (client == NULL || gs == NULL) - return NULL; - - if (!camera_module) - return NULL; - - mutex_lock(&vcm_lock); - list_for_each_entry(vcm, &vcm_devices, list) { - if (!strcmp(camera_module, vcm->camera_module)) { - mutex_unlock(&vcm_lock); - return vcm; - } - } - - mutex_unlock(&vcm_lock); - return NULL; -} - -static struct camera_sensor_platform_data gmin_plat = { - .gpio0_ctrl = gmin_gpio0_ctrl, - .gpio1_ctrl = gmin_gpio1_ctrl, - .v1p8_ctrl = gmin_v1p8_ctrl, - .v2p8_ctrl = gmin_v2p8_ctrl, - .v1p2_ctrl = gmin_v1p2_ctrl, - .flisclk_ctrl = gmin_flisclk_ctrl, - .csi_cfg = gmin_csi_cfg, - .get_vcm_ctrl = gmin_get_vcm_ctrl, -}; - -struct camera_sensor_platform_data *gmin_camera_platform_data( - struct v4l2_subdev *subdev, - enum atomisp_input_format csi_format, - enum atomisp_bayer_order csi_bayer) -{ - struct gmin_subdev *gs = find_gmin_subdev(subdev); - - gs->csi_fmt = csi_format; - gs->csi_bayer = csi_bayer; - - return &gmin_plat; -} -EXPORT_SYMBOL_GPL(gmin_camera_platform_data); - -int atomisp_gmin_register_vcm_control(struct camera_vcm_control *vcmCtrl) -{ - if (!vcmCtrl) - return -EINVAL; - - mutex_lock(&vcm_lock); - list_add_tail(&vcmCtrl->list, &vcm_devices); - mutex_unlock(&vcm_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(atomisp_gmin_register_vcm_control); - -static int gmin_get_hardcoded_var(struct gmin_cfg_var *varlist, - const char *var8, char *out, size_t *out_len) -{ - struct gmin_cfg_var *gv; - - for (gv = varlist; gv->name; gv++) { - size_t vl; - - if (strcmp(var8, gv->name)) - continue; - - vl = strlen(gv->val); - if (vl > *out_len - 1) - return -ENOSPC; - - strcpy(out, gv->val); - *out_len = vl; - return 0; - } - - return -EINVAL; -} - -/* Retrieves a device-specific configuration variable. The dev - * argument should be a device with an ACPI companion, as all - * configuration is based on firmware ID. - */ -static int gmin_get_config_var(struct device *dev, const char *var, - char *out, size_t *out_len) -{ - char var8[CFG_VAR_NAME_MAX]; - efi_char16_t var16[CFG_VAR_NAME_MAX]; - struct efivar_entry *ev; - const struct dmi_system_id *id; - int i, ret; - - if (dev && ACPI_COMPANION(dev)) - dev = &ACPI_COMPANION(dev)->dev; - - if (dev) - ret = snprintf(var8, sizeof(var8), "%s_%s", dev_name(dev), var); - else - ret = snprintf(var8, sizeof(var8), "gmin_%s", var); - - if (ret < 0 || ret >= sizeof(var8) - 1) - return -EINVAL; - - /* First check a hard-coded list of board-specific variables. - * Some device firmwares lack the ability to set EFI variables at - * runtime. - */ - id = dmi_first_match(gmin_vars); - if (id) - return gmin_get_hardcoded_var(id->driver_data, var8, out, out_len); - - /* Our variable names are ASCII by construction, but EFI names - * are wide chars. Convert and zero-pad. - */ - memset(var16, 0, sizeof(var16)); - for (i = 0; i < sizeof(var8) && var8[i]; i++) - var16[i] = var8[i]; - - /* Not sure this API usage is kosher; efivar_entry_get()'s - * implementation simply uses VariableName and VendorGuid from - * the struct and ignores the rest, but it seems like there - * ought to be an "official" efivar_entry registered - * somewhere? - */ - ev = kzalloc(sizeof(*ev), GFP_KERNEL); - if (!ev) - return -ENOMEM; - memcpy(&ev->var.VariableName, var16, sizeof(var16)); - ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID; - ev->var.DataSize = *out_len; - - ret = efivar_entry_get(ev, &ev->var.Attributes, - &ev->var.DataSize, ev->var.Data); - if (ret == 0) { - memcpy(out, ev->var.Data, ev->var.DataSize); - *out_len = ev->var.DataSize; - } else if (dev) { - dev_warn(dev, "Failed to find gmin variable %s\n", var8); - } - - kfree(ev); - - return ret; -} - -int gmin_get_var_int(struct device *dev, const char *var, int def) -{ - char val[CFG_VAR_NAME_MAX]; - size_t len = sizeof(val); - long result; - int ret; - - ret = gmin_get_config_var(dev, var, val, &len); - if (!ret) { - val[len] = 0; - ret = kstrtol(val, 0, &result); - } - - return ret ? def : result; -} -EXPORT_SYMBOL_GPL(gmin_get_var_int); - -int camera_sensor_csi(struct v4l2_subdev *sd, u32 port, - u32 lanes, u32 format, u32 bayer_order, int flag) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *csi = NULL; - - if (flag) { - csi = kzalloc(sizeof(*csi), GFP_KERNEL); - if (!csi) - return -ENOMEM; - csi->port = port; - csi->num_lanes = lanes; - csi->input_format = format; - csi->raw_bayer_order = bayer_order; - v4l2_set_subdev_hostdata(sd, (void *)csi); - csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED; - csi->metadata_effective_width = NULL; - dev_info(&client->dev, - "camera pdata: port: %d lanes: %d order: %8.8x\n", - port, lanes, bayer_order); - } else { - csi = v4l2_get_subdev_hostdata(sd); - kfree(csi); - } - - return 0; -} -EXPORT_SYMBOL_GPL(camera_sensor_csi); - -/* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't - * work. Disable so the kernel framework doesn't hang the device - * trying. The driver itself does direct calls to the PUNIT to manage - * ISP power. - */ -static void isp_pm_cap_fixup(struct pci_dev *dev) -{ - dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n"); - dev->pm_cap = 0; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup); -- cgit v1.2.3 From 1fc3b37f34f69ee3fd61ca624fc005fb0bfe3984 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 4 May 2018 10:08:08 -0400 Subject: media: v4l: cadence: Add Cadence MIPI-CSI2 RX driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Cadence CSI-2 RX Controller is an hardware block meant to be used as a bridge between a CSI-2 bus and pixel grabbers. It supports operating with internal or external D-PHY, with up to 4 lanes, or without any D-PHY. The current code only supports the latter case. It also support dynamic mapping of the CSI-2 virtual channels to the associated pixel grabbers, but that isn't allowed at the moment either. Acked-by: Benoit Parrot Reviewed-by: Niklas Söderlund Signed-off-by: Maxime Ripard Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 7 + drivers/media/platform/Kconfig | 1 + drivers/media/platform/Makefile | 1 + drivers/media/platform/cadence/Kconfig | 23 ++ drivers/media/platform/cadence/Makefile | 3 + drivers/media/platform/cadence/cdns-csi2rx.c | 498 +++++++++++++++++++++++++++ 6 files changed, 533 insertions(+) create mode 100644 drivers/media/platform/cadence/Kconfig create mode 100644 drivers/media/platform/cadence/Makefile create mode 100644 drivers/media/platform/cadence/cdns-csi2rx.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index bef6cbe7adc3..519f172c9267 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3136,6 +3136,13 @@ S: Supported F: Documentation/filesystems/caching/cachefiles.txt F: fs/cachefiles/ +CADENCE MIPI-CSI2 BRIDGES +M: Maxime Ripard +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/cdns,*.txt +F: drivers/media/platform/cadence/cdns-csi2* + CADET FM/AM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 5c3ae58cc40c..46a5431cfac7 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -26,6 +26,7 @@ config VIDEO_VIA_CAMERA # # Platform multimedia device configuration # +source "drivers/media/platform/cadence/Kconfig" source "drivers/media/platform/davinci/Kconfig" diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 932515df4477..04bc1502a30e 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -3,6 +3,7 @@ # Makefile for the video capture/playback device drivers. # +obj-$(CONFIG_VIDEO_CADENCE) += cadence/ obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/ obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/ diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig new file mode 100644 index 000000000000..70c95d79c8f7 --- /dev/null +++ b/drivers/media/platform/cadence/Kconfig @@ -0,0 +1,23 @@ +config VIDEO_CADENCE + bool "Cadence Video Devices" + help + If you have a media device designed by Cadence, say Y. + + Note that this option doesn't include new drivers in the kernel: + saying N will just cause Kconfig to skip all the questions about + Cadence media devices. + +if VIDEO_CADENCE + +config VIDEO_CADENCE_CSI2RX + tristate "Cadence MIPI-CSI2 RX Controller" + depends on MEDIA_CONTROLLER + depends on VIDEO_V4L2_SUBDEV_API + select V4L2_FWNODE + help + Support for the Cadence MIPI CSI2 Receiver controller. + + To compile this driver as a module, choose M here: the module will be + called cdns-csi2rx. + +endif diff --git a/drivers/media/platform/cadence/Makefile b/drivers/media/platform/cadence/Makefile new file mode 100644 index 000000000000..388e4f8c3b90 --- /dev/null +++ b/drivers/media/platform/cadence/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_VIDEO_CADENCE_CSI2RX) += cdns-csi2rx.o diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c new file mode 100644 index 000000000000..fe612ec1f99f --- /dev/null +++ b/drivers/media/platform/cadence/cdns-csi2rx.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Cadence MIPI-CSI2 RX Controller v1.3 + * + * Copyright (C) 2017 Cadence Design Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define CSI2RX_DEVICE_CFG_REG 0x000 + +#define CSI2RX_SOFT_RESET_REG 0x004 +#define CSI2RX_SOFT_RESET_PROTOCOL BIT(1) +#define CSI2RX_SOFT_RESET_FRONT BIT(0) + +#define CSI2RX_STATIC_CFG_REG 0x008 +#define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4)) +#define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8) + +#define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100) + +#define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000) +#define CSI2RX_STREAM_CTRL_START BIT(0) + +#define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008) +#define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT BIT(31) +#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16) + +#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c) +#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8) + +#define CSI2RX_LANES_MAX 4 +#define CSI2RX_STREAMS_MAX 4 + +enum csi2rx_pads { + CSI2RX_PAD_SINK, + CSI2RX_PAD_SOURCE_STREAM0, + CSI2RX_PAD_SOURCE_STREAM1, + CSI2RX_PAD_SOURCE_STREAM2, + CSI2RX_PAD_SOURCE_STREAM3, + CSI2RX_PAD_MAX, +}; + +struct csi2rx_priv { + struct device *dev; + unsigned int count; + + /* + * Used to prevent race conditions between multiple, + * concurrent calls to start and stop. + */ + struct mutex lock; + + void __iomem *base; + struct clk *sys_clk; + struct clk *p_clk; + struct clk *pixel_clk[CSI2RX_STREAMS_MAX]; + struct phy *dphy; + + u8 lanes[CSI2RX_LANES_MAX]; + u8 num_lanes; + u8 max_lanes; + u8 max_streams; + bool has_internal_dphy; + + struct v4l2_subdev subdev; + struct v4l2_async_notifier notifier; + struct media_pad pads[CSI2RX_PAD_MAX]; + + /* Remote source */ + struct v4l2_async_subdev asd; + struct v4l2_subdev *source_subdev; + int source_pad; +}; + +static inline +struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev) +{ + return container_of(subdev, struct csi2rx_priv, subdev); +} + +static void csi2rx_reset(struct csi2rx_priv *csi2rx) +{ + writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT, + csi2rx->base + CSI2RX_SOFT_RESET_REG); + + udelay(10); + + writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG); +} + +static int csi2rx_start(struct csi2rx_priv *csi2rx) +{ + unsigned int i; + unsigned long lanes_used = 0; + u32 reg; + int ret; + + ret = clk_prepare_enable(csi2rx->p_clk); + if (ret) + return ret; + + csi2rx_reset(csi2rx); + + reg = csi2rx->num_lanes << 8; + for (i = 0; i < csi2rx->num_lanes; i++) { + reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]); + set_bit(csi2rx->lanes[i], &lanes_used); + } + + /* + * Even the unused lanes need to be mapped. In order to avoid + * to map twice to the same physical lane, keep the lanes used + * in the previous loop, and only map unused physical lanes to + * the rest of our logical lanes. + */ + for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { + unsigned int idx = find_first_zero_bit(&lanes_used, + sizeof(lanes_used)); + set_bit(idx, &lanes_used); + reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); + } + + writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG); + + ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); + if (ret) + goto err_disable_pclk; + + /* + * Create a static mapping between the CSI virtual channels + * and the output stream. + * + * This should be enhanced, but v4l2 lacks the support for + * changing that mapping dynamically. + * + * We also cannot enable and disable independent streams here, + * hence the reference counting. + */ + for (i = 0; i < csi2rx->max_streams; i++) { + ret = clk_prepare_enable(csi2rx->pixel_clk[i]); + if (ret) + goto err_disable_pixclk; + + writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF, + csi2rx->base + CSI2RX_STREAM_CFG_REG(i)); + + writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT | + CSI2RX_STREAM_DATA_CFG_VC_SELECT(i), + csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i)); + + writel(CSI2RX_STREAM_CTRL_START, + csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); + } + + ret = clk_prepare_enable(csi2rx->sys_clk); + if (ret) + goto err_disable_pixclk; + + clk_disable_unprepare(csi2rx->p_clk); + + return 0; + +err_disable_pixclk: + for (; i >= 0; i--) + clk_disable_unprepare(csi2rx->pixel_clk[i]); + +err_disable_pclk: + clk_disable_unprepare(csi2rx->p_clk); + + return ret; +} + +static void csi2rx_stop(struct csi2rx_priv *csi2rx) +{ + unsigned int i; + + clk_prepare_enable(csi2rx->p_clk); + clk_disable_unprepare(csi2rx->sys_clk); + + for (i = 0; i < csi2rx->max_streams; i++) { + writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); + + clk_disable_unprepare(csi2rx->pixel_clk[i]); + } + + clk_disable_unprepare(csi2rx->p_clk); + + if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false)) + dev_warn(csi2rx->dev, "Couldn't disable our subdev\n"); +} + +static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable) +{ + struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); + int ret = 0; + + mutex_lock(&csi2rx->lock); + + if (enable) { + /* + * If we're not the first users, there's no need to + * enable the whole controller. + */ + if (!csi2rx->count) { + ret = csi2rx_start(csi2rx); + if (ret) + goto out; + } + + csi2rx->count++; + } else { + csi2rx->count--; + + /* + * Let the last user turn off the lights. + */ + if (!csi2rx->count) + csi2rx_stop(csi2rx); + } + +out: + mutex_unlock(&csi2rx->lock); + return ret; +} + +static const struct v4l2_subdev_video_ops csi2rx_video_ops = { + .s_stream = csi2rx_s_stream, +}; + +static const struct v4l2_subdev_ops csi2rx_subdev_ops = { + .video = &csi2rx_video_ops, +}; + +static int csi2rx_async_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *s_subdev, + struct v4l2_async_subdev *asd) +{ + struct v4l2_subdev *subdev = notifier->sd; + struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); + + csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity, + s_subdev->fwnode, + MEDIA_PAD_FL_SOURCE); + if (csi2rx->source_pad < 0) { + dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n", + s_subdev->name); + return csi2rx->source_pad; + } + + csi2rx->source_subdev = s_subdev; + + dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name, + csi2rx->source_pad); + + return media_create_pad_link(&csi2rx->source_subdev->entity, + csi2rx->source_pad, + &csi2rx->subdev.entity, 0, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); +} + +static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = { + .bound = csi2rx_async_bound, +}; + +static int csi2rx_get_resources(struct csi2rx_priv *csi2rx, + struct platform_device *pdev) +{ + struct resource *res; + unsigned char i; + u32 dev_cfg; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + csi2rx->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(csi2rx->base)) + return PTR_ERR(csi2rx->base); + + csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk"); + if (IS_ERR(csi2rx->sys_clk)) { + dev_err(&pdev->dev, "Couldn't get sys clock\n"); + return PTR_ERR(csi2rx->sys_clk); + } + + csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk"); + if (IS_ERR(csi2rx->p_clk)) { + dev_err(&pdev->dev, "Couldn't get P clock\n"); + return PTR_ERR(csi2rx->p_clk); + } + + csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy"); + if (IS_ERR(csi2rx->dphy)) { + dev_err(&pdev->dev, "Couldn't get external D-PHY\n"); + return PTR_ERR(csi2rx->dphy); + } + + /* + * FIXME: Once we'll have external D-PHY support, the check + * will need to be removed. + */ + if (csi2rx->dphy) { + dev_err(&pdev->dev, "External D-PHY not supported yet\n"); + return -EINVAL; + } + + clk_prepare_enable(csi2rx->p_clk); + dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG); + clk_disable_unprepare(csi2rx->p_clk); + + csi2rx->max_lanes = dev_cfg & 7; + if (csi2rx->max_lanes > CSI2RX_LANES_MAX) { + dev_err(&pdev->dev, "Invalid number of lanes: %u\n", + csi2rx->max_lanes); + return -EINVAL; + } + + csi2rx->max_streams = (dev_cfg >> 4) & 7; + if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) { + dev_err(&pdev->dev, "Invalid number of streams: %u\n", + csi2rx->max_streams); + return -EINVAL; + } + + csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false; + + /* + * FIXME: Once we'll have internal D-PHY support, the check + * will need to be removed. + */ + if (csi2rx->has_internal_dphy) { + dev_err(&pdev->dev, "Internal D-PHY not supported yet\n"); + return -EINVAL; + } + + for (i = 0; i < csi2rx->max_streams; i++) { + char clk_name[16]; + + snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i); + csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name); + if (IS_ERR(csi2rx->pixel_clk[i])) { + dev_err(&pdev->dev, "Couldn't get clock %s\n", clk_name); + return PTR_ERR(csi2rx->pixel_clk[i]); + } + } + + return 0; +} + +static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx) +{ + struct v4l2_fwnode_endpoint v4l2_ep; + struct fwnode_handle *fwh; + struct device_node *ep; + int ret; + + ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0); + if (!ep) + return -EINVAL; + + fwh = of_fwnode_handle(ep); + ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep); + if (ret) { + dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n"); + of_node_put(ep); + return ret; + } + + if (v4l2_ep.bus_type != V4L2_MBUS_CSI2) { + dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n", + v4l2_ep.bus_type); + of_node_put(ep); + return -EINVAL; + } + + memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes, + sizeof(csi2rx->lanes)); + csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes; + if (csi2rx->num_lanes > csi2rx->max_lanes) { + dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n", + csi2rx->num_lanes); + of_node_put(ep); + return -EINVAL; + } + + csi2rx->asd.match.fwnode = fwnode_graph_get_remote_port_parent(fwh); + csi2rx->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; + of_node_put(ep); + + csi2rx->notifier.subdevs = devm_kzalloc(csi2rx->dev, + sizeof(*csi2rx->notifier.subdevs), + GFP_KERNEL); + if (!csi2rx->notifier.subdevs) + return -ENOMEM; + + csi2rx->notifier.subdevs[0] = &csi2rx->asd; + csi2rx->notifier.num_subdevs = 1; + csi2rx->notifier.ops = &csi2rx_notifier_ops; + + return v4l2_async_subdev_notifier_register(&csi2rx->subdev, + &csi2rx->notifier); +} + +static int csi2rx_probe(struct platform_device *pdev) +{ + struct csi2rx_priv *csi2rx; + unsigned int i; + int ret; + + csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL); + if (!csi2rx) + return -ENOMEM; + platform_set_drvdata(pdev, csi2rx); + csi2rx->dev = &pdev->dev; + mutex_init(&csi2rx->lock); + + ret = csi2rx_get_resources(csi2rx, pdev); + if (ret) + goto err_free_priv; + + ret = csi2rx_parse_dt(csi2rx); + if (ret) + goto err_free_priv; + + csi2rx->subdev.owner = THIS_MODULE; + csi2rx->subdev.dev = &pdev->dev; + v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops); + v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev); + snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s", + KBUILD_MODNAME, dev_name(&pdev->dev)); + + /* Create our media pads */ + csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) + csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX, + csi2rx->pads); + if (ret) + goto err_free_priv; + + ret = v4l2_async_register_subdev(&csi2rx->subdev); + if (ret < 0) + goto err_free_priv; + + dev_info(&pdev->dev, + "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n", + csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams, + csi2rx->has_internal_dphy ? "internal" : "no"); + + return 0; + +err_free_priv: + kfree(csi2rx); + return ret; +} + +static int csi2rx_remove(struct platform_device *pdev) +{ + struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev); + + v4l2_async_unregister_subdev(&csi2rx->subdev); + kfree(csi2rx); + + return 0; +} + +static const struct of_device_id csi2rx_of_table[] = { + { .compatible = "cdns,csi2rx" }, + { }, +}; +MODULE_DEVICE_TABLE(of, csi2rx_of_table); + +static struct platform_driver csi2rx_driver = { + .probe = csi2rx_probe, + .remove = csi2rx_remove, + + .driver = { + .name = "cdns-csi2rx", + .of_match_table = csi2rx_of_table, + }, +}; +module_platform_driver(csi2rx_driver); +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Cadence CSI2-RX controller"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 3de9beeeb4ffc82c9745f095a6a26e0aef492379 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 16 May 2018 14:36:01 -0400 Subject: block: fix MAINTAINERS email for nbd I've been missing stuff because it's been going into my work email which is a black hole. Update to the email I actually use so I stop missing patches and bug reports. Signed-off-by: Josef Bacik Signed-off-by: Jens Axboe --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index df6e9bb2559a..10eca48b11b4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9700,7 +9700,7 @@ S: Maintained F: drivers/net/ethernet/netronome/ NETWORK BLOCK DEVICE (NBD) -M: Josef Bacik +M: Josef Bacik S: Maintained L: linux-block@vger.kernel.org L: nbd@other.debian.org -- cgit v1.2.3 From c90ddb69d4b24bc32edf9c7bcfec85e52427d1c1 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Wed, 16 May 2018 14:48:49 -0500 Subject: MAINTAINERS: update sound/soc/intel maintainers The information for Intel SoC drivers was not updated for several years. Add myself, Liam and Keyon (Jie) as maintainers to get notified of contributions and bug reports. As discussed with Mark and Takashi, I'll also monitor alsa-devel and ack Intel patches as necessary. Signed-off-by: Pierre-Louis Bossart Signed-off-by: Mark Brown --- MAINTAINERS | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 2b62e6cf6b1c..fc29793c2f67 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7004,14 +7004,13 @@ L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/i810/ -INTEL ASoC BDW/HSW DRIVERS +INTEL ASoC DRIVERS +M: Pierre-Louis Bossart +M: Liam Girdwood M: Jie Yang L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported -F: sound/soc/intel/common/sst-dsp* -F: sound/soc/intel/common/sst-firmware.c -F: sound/soc/intel/boards/broadwell.c -F: sound/soc/intel/haswell/ +F: sound/soc/intel/ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support -- cgit v1.2.3 From 1e9d42194e4c8f0ba3f9d4f72b5f54050ddf7a39 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 19 Apr 2018 22:00:07 +0200 Subject: i2c: gpio: move header to platform_data This header only contains platform_data. Move it to the proper directory. Signed-off-by: Wolfram Sang Acked-by: Tony Lindgren Acked-by: Lee Jones Acked-by: Robert Jarzmik Acked-by: Mauro Carvalho Chehab Acked-by: James Hogan Acked-by: Greg Ungerer --- MAINTAINERS | 2 +- arch/arm/mach-ks8695/board-acs5k.c | 2 +- arch/arm/mach-omap1/board-htcherald.c | 2 +- arch/arm/mach-pxa/palmz72.c | 2 +- arch/arm/mach-pxa/viper.c | 2 +- arch/arm/mach-sa1100/simpad.c | 2 +- arch/mips/alchemy/board-gpr.c | 2 +- drivers/i2c/busses/i2c-gpio.c | 2 +- drivers/media/platform/marvell-ccic/mmp-driver.c | 2 +- drivers/mfd/sm501.c | 2 +- include/linux/i2c-gpio.h | 34 ------------------------ include/linux/platform_data/i2c-gpio.h | 34 ++++++++++++++++++++++++ 12 files changed, 44 insertions(+), 44 deletions(-) delete mode 100644 include/linux/i2c-gpio.h create mode 100644 include/linux/platform_data/i2c-gpio.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 58b9861ccf99..38760fcce99a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5878,7 +5878,7 @@ GENERIC GPIO I2C DRIVER M: Haavard Skinnemoen S: Supported F: drivers/i2c/busses/i2c-gpio.c -F: include/linux/i2c-gpio.h +F: include/linux/platform_data/i2c-gpio.h GENERIC GPIO I2C MULTIPLEXER DRIVER M: Peter Korsgaard diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c index 937eb1d47e7b..ef835d82cdb9 100644 --- a/arch/arm/mach-ks8695/board-acs5k.c +++ b/arch/arm/mach-ks8695/board-acs5k.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c index 67d46690a56e..da8f3fc3180f 100644 --- a/arch/arm/mach-omap1/board-htcherald.c +++ b/arch/arm/mach-omap1/board-htcherald.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 5877e547cecd..c053c8ce1586 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 90d0f277de55..39e05b7008d8 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index ace010479eb6..49a61e6f3c5f 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include "generic.h" diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c index 4e79dbd54a33..fa75d75b5ba9 100644 --- a/arch/mips/alchemy/board-gpr.c +++ b/arch/mips/alchemy/board-gpr.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 58abb3eced58..005e6e0330c2 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index 816f4b6a7b8e..d9f0dd0d3525 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index ad774161a22d..66af659b01b2 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/i2c-gpio.h b/include/linux/i2c-gpio.h deleted file mode 100644 index 352c1426fd4d..000000000000 --- a/include/linux/i2c-gpio.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * i2c-gpio interface to platform code - * - * Copyright (C) 2007 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _LINUX_I2C_GPIO_H -#define _LINUX_I2C_GPIO_H - -/** - * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio - * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz - * @timeout: clock stretching timeout in jiffies. If the slave keeps - * SCL low for longer than this, the transfer will time out. - * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin - * isn't actively driven high when setting the output value high. - * gpio_get_value() must return the actual pin state even if the - * pin is configured as an output. - * @scl_is_open_drain: SCL is set up as open drain. Same requirements - * as for sda_is_open_drain apply. - * @scl_is_output_only: SCL output drivers cannot be turned off. - */ -struct i2c_gpio_platform_data { - int udelay; - int timeout; - unsigned int sda_is_open_drain:1; - unsigned int scl_is_open_drain:1; - unsigned int scl_is_output_only:1; -}; - -#endif /* _LINUX_I2C_GPIO_H */ diff --git a/include/linux/platform_data/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h new file mode 100644 index 000000000000..352c1426fd4d --- /dev/null +++ b/include/linux/platform_data/i2c-gpio.h @@ -0,0 +1,34 @@ +/* + * i2c-gpio interface to platform code + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_I2C_GPIO_H +#define _LINUX_I2C_GPIO_H + +/** + * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio + * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz + * @timeout: clock stretching timeout in jiffies. If the slave keeps + * SCL low for longer than this, the transfer will time out. + * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin + * isn't actively driven high when setting the output value high. + * gpio_get_value() must return the actual pin state even if the + * pin is configured as an output. + * @scl_is_open_drain: SCL is set up as open drain. Same requirements + * as for sda_is_open_drain apply. + * @scl_is_output_only: SCL output drivers cannot be turned off. + */ +struct i2c_gpio_platform_data { + int udelay; + int timeout; + unsigned int sda_is_open_drain:1; + unsigned int scl_is_open_drain:1; + unsigned int scl_is_output_only:1; +}; + +#endif /* _LINUX_I2C_GPIO_H */ -- cgit v1.2.3 From 62ea22c4954f5b147488eefa644d668e843be6f7 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 19 Apr 2018 22:00:08 +0200 Subject: i2c: mux: gpio: move header to platform_data This header only contains platform_data. Move it to the proper directory. Signed-off-by: Wolfram Sang Acked-by: Peter Korsgaard --- Documentation/i2c/muxes/i2c-mux-gpio | 4 +-- MAINTAINERS | 2 +- drivers/i2c/busses/i2c-i801.c | 2 +- drivers/i2c/muxes/i2c-mux-gpio.c | 2 +- include/linux/i2c-mux-gpio.h | 43 ------------------------------ include/linux/platform_data/i2c-mux-gpio.h | 43 ++++++++++++++++++++++++++++++ 6 files changed, 48 insertions(+), 48 deletions(-) delete mode 100644 include/linux/i2c-mux-gpio.h create mode 100644 include/linux/platform_data/i2c-mux-gpio.h (limited to 'MAINTAINERS') diff --git a/Documentation/i2c/muxes/i2c-mux-gpio b/Documentation/i2c/muxes/i2c-mux-gpio index 7a8d7d261632..893ecdfe6e43 100644 --- a/Documentation/i2c/muxes/i2c-mux-gpio +++ b/Documentation/i2c/muxes/i2c-mux-gpio @@ -30,12 +30,12 @@ i2c-mux-gpio uses the platform bus, so you need to provide a struct platform_device with the platform_data pointing to a struct i2c_mux_gpio_platform_data with the I2C adapter number of the master bus, the number of bus segments to create and the GPIO pins used -to control it. See include/linux/i2c-mux-gpio.h for details. +to control it. See include/linux/platform_data/i2c-mux-gpio.h for details. E.G. something like this for a MUX providing 4 bus segments controlled through 3 GPIO pins: -#include +#include #include static const unsigned myboard_gpiomux_gpios[] = { diff --git a/MAINTAINERS b/MAINTAINERS index 38760fcce99a..894f2bf9c9ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5885,7 +5885,7 @@ M: Peter Korsgaard L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/muxes/i2c-mux-gpio.c -F: include/linux/i2c-mux-gpio.h +F: include/linux/platform_data/i2c-mux-gpio.h F: Documentation/i2c/muxes/i2c-mux-gpio GENERIC HDLC (WAN) DRIVERS diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index e0d59e9ff3c6..bff160d1ce3f 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -106,7 +106,7 @@ #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI #include -#include +#include #endif /* I801 SMBus address offsets */ diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index 1a9973ede443..15a7cc0459fb 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include diff --git a/include/linux/i2c-mux-gpio.h b/include/linux/i2c-mux-gpio.h deleted file mode 100644 index 4406108201fe..000000000000 --- a/include/linux/i2c-mux-gpio.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * i2c-mux-gpio interface to platform code - * - * Peter Korsgaard - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _LINUX_I2C_MUX_GPIO_H -#define _LINUX_I2C_MUX_GPIO_H - -/* MUX has no specific idle mode */ -#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1) - -/** - * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio - * @parent: Parent I2C bus adapter number - * @base_nr: Base I2C bus number to number adapters from or zero for dynamic - * @values: Array of bitmasks of GPIO settings (low/high) for each - * position - * @n_values: Number of multiplexer positions (busses to instantiate) - * @classes: Optional I2C auto-detection classes - * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given - * relative to the base GPIO number of that chip - * @gpios: Array of GPIO numbers used to control MUX - * @n_gpios: Number of GPIOs used to control MUX - * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used - */ -struct i2c_mux_gpio_platform_data { - int parent; - int base_nr; - const unsigned *values; - int n_values; - const unsigned *classes; - char *gpio_chip; - const unsigned *gpios; - int n_gpios; - unsigned idle; -}; - -#endif /* _LINUX_I2C_MUX_GPIO_H */ diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h new file mode 100644 index 000000000000..4406108201fe --- /dev/null +++ b/include/linux/platform_data/i2c-mux-gpio.h @@ -0,0 +1,43 @@ +/* + * i2c-mux-gpio interface to platform code + * + * Peter Korsgaard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_I2C_MUX_GPIO_H +#define _LINUX_I2C_MUX_GPIO_H + +/* MUX has no specific idle mode */ +#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1) + +/** + * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio + * @parent: Parent I2C bus adapter number + * @base_nr: Base I2C bus number to number adapters from or zero for dynamic + * @values: Array of bitmasks of GPIO settings (low/high) for each + * position + * @n_values: Number of multiplexer positions (busses to instantiate) + * @classes: Optional I2C auto-detection classes + * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given + * relative to the base GPIO number of that chip + * @gpios: Array of GPIO numbers used to control MUX + * @n_gpios: Number of GPIOs used to control MUX + * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used + */ +struct i2c_mux_gpio_platform_data { + int parent; + int base_nr; + const unsigned *values; + int n_values; + const unsigned *classes; + char *gpio_chip; + const unsigned *gpios; + int n_gpios; + unsigned idle; +}; + +#endif /* _LINUX_I2C_MUX_GPIO_H */ -- cgit v1.2.3 From 79fc540fd543f47e77e1c7d407f2c082872a4625 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 19 Apr 2018 22:00:10 +0200 Subject: i2c: omap: move header to platform_data This header only contains platform_data. Move it to the proper directory. Signed-off-by: Wolfram Sang Acked-by: Tony Lindgren --- MAINTAINERS | 4 +-- arch/arm/mach-omap1/common.h | 2 +- arch/arm/mach-omap1/i2c.c | 2 +- arch/arm/mach-omap2/common.h | 2 +- arch/arm/mach-omap2/omap_hwmod_2420_data.c | 2 +- arch/arm/mach-omap2/omap_hwmod_2430_data.c | 2 +- arch/arm/mach-omap2/omap_hwmod_33xx_data.c | 2 +- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 2 +- arch/arm/mach-omap2/omap_hwmod_44xx_data.c | 2 +- arch/arm/mach-omap2/omap_hwmod_54xx_data.c | 2 +- arch/arm/mach-omap2/omap_hwmod_7xx_data.c | 2 +- drivers/i2c/busses/i2c-omap.c | 2 +- include/linux/i2c-omap.h | 39 ------------------------------ include/linux/platform_data/i2c-omap.h | 39 ++++++++++++++++++++++++++++++ 14 files changed, 52 insertions(+), 52 deletions(-) delete mode 100644 include/linux/i2c-omap.h create mode 100644 include/linux/platform_data/i2c-omap.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 894f2bf9c9ba..e4a6c963bdcf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10245,7 +10245,7 @@ F: arch/arm/mach-omap1/ F: arch/arm/plat-omap/ F: arch/arm/configs/omap1_defconfig F: drivers/i2c/busses/i2c-omap.c -F: include/linux/i2c-omap.h +F: include/linux/platform_data/i2c-omap.h OMAP2+ SUPPORT M: Tony Lindgren @@ -10277,7 +10277,7 @@ F: drivers/regulator/tps65218-regulator.c F: drivers/regulator/tps65910-regulator.c F: drivers/regulator/twl-regulator.c F: drivers/regulator/twl6030-regulator.c -F: include/linux/i2c-omap.h +F: include/linux/platform_data/i2c-omap.h ONION OMEGA2+ BOARD M: Harvey Hunt diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index d83ff257eaa8..c6537d2c2859 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h @@ -27,7 +27,7 @@ #define __ARCH_ARM_MACH_OMAP1_COMMON_H #include -#include +#include #include #include diff --git a/arch/arm/mach-omap1/i2c.c b/arch/arm/mach-omap1/i2c.c index 5bdf3c4190f9..9250f263ac51 100644 --- a/arch/arm/mach-omap1/i2c.c +++ b/arch/arm/mach-omap1/i2c.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include "soc.h" diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index fbe0b78bf489..ed1a7e2f176a 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c index fe66cf247874..d684fac8f592 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -13,7 +13,7 @@ * XXX these should be marked initdata for multi-OMAP kernels */ -#include +#include #include #include "omap_hwmod.h" diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index 74eefd30518c..abef9f6f9bf5 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -13,7 +13,7 @@ * XXX these should be marked initdata for multi-OMAP kernels */ -#include +#include #include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 53e1ac3724f2..c9483bc06228 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -14,7 +14,7 @@ * GNU General Public License for more details. */ -#include +#include #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 23336b6c7125..9c0953de24da 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -15,7 +15,7 @@ * XXX these should be marked initdata for multi-OMAP kernels */ -#include +#include #include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index e4f8ae9cd637..9e4b4243fec7 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index c72cd84b07ec..890c789485d3 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 62352d1e6361..56b141fce973 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b9172f08fd05..65d06a819307 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h deleted file mode 100644 index 3444265ee8ee..000000000000 --- a/include/linux/i2c-omap.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __I2C_OMAP_H__ -#define __I2C_OMAP_H__ - -#include - -/* - * Version 2 of the I2C peripheral unit has a different register - * layout and extra registers. The ID register in the V2 peripheral - * unit on the OMAP4430 reports the same ID as the V1 peripheral - * unit on the OMAP3530, so we must inform the driver which IP - * version we know it is running on from platform / cpu-specific - * code using these constants in the hwmod class definition. - */ - -#define OMAP_I2C_IP_VERSION_1 1 -#define OMAP_I2C_IP_VERSION_2 2 - -/* struct omap_i2c_bus_platform_data .flags meanings */ - -#define OMAP_I2C_FLAG_NO_FIFO BIT(0) -#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) -#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) -#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) -#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) -/* how the CPU address bus must be translated for I2C unit access */ -#define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0 -#define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7) -#define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8) -#define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7 - -struct omap_i2c_bus_platform_data { - u32 clkrate; - u32 rev; - u32 flags; - void (*set_mpu_wkup_lat)(struct device *dev, long set); -}; - -#endif diff --git a/include/linux/platform_data/i2c-omap.h b/include/linux/platform_data/i2c-omap.h new file mode 100644 index 000000000000..3444265ee8ee --- /dev/null +++ b/include/linux/platform_data/i2c-omap.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __I2C_OMAP_H__ +#define __I2C_OMAP_H__ + +#include + +/* + * Version 2 of the I2C peripheral unit has a different register + * layout and extra registers. The ID register in the V2 peripheral + * unit on the OMAP4430 reports the same ID as the V1 peripheral + * unit on the OMAP3530, so we must inform the driver which IP + * version we know it is running on from platform / cpu-specific + * code using these constants in the hwmod class definition. + */ + +#define OMAP_I2C_IP_VERSION_1 1 +#define OMAP_I2C_IP_VERSION_2 2 + +/* struct omap_i2c_bus_platform_data .flags meanings */ + +#define OMAP_I2C_FLAG_NO_FIFO BIT(0) +#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) +#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) +#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) +#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) +/* how the CPU address bus must be translated for I2C unit access */ +#define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0 +#define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7) +#define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8) +#define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7 + +struct omap_i2c_bus_platform_data { + u32 clkrate; + u32 rev; + u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); +}; + +#endif -- cgit v1.2.3 From 782e6769c0df744e773dc2acff71c974b3bba4e9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Apr 2018 15:24:51 +0200 Subject: dma-mapping: provide a generic dma-noncoherent implementation Add a new dma_map_ops implementation that uses dma-direct for the address mapping of streaming mappings, and which requires arch-specific implemenations of coherent allocate/free. Architectures have to provide flushing helpers to ownership trasnfers to the device and/or CPU, and can provide optional implementations of the coherent mmap functionality, and the cache_flush routines for non-coherent long term allocations. Signed-off-by: Christoph Hellwig Tested-by: Alexey Brodkin Acked-by: Vineet Gupta --- MAINTAINERS | 2 + include/asm-generic/dma-mapping.h | 9 ++++ include/linux/dma-direct.h | 7 ++- include/linux/dma-mapping.h | 1 + include/linux/dma-noncoherent.h | 47 ++++++++++++++++++ lib/Kconfig | 20 ++++++++ lib/Makefile | 1 + lib/dma-direct.c | 8 +-- lib/dma-noncoherent.c | 102 ++++++++++++++++++++++++++++++++++++++ 9 files changed, 192 insertions(+), 5 deletions(-) create mode 100644 include/linux/dma-noncoherent.h create mode 100644 lib/dma-noncoherent.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 79bb02ff812f..08d0d15d4958 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4334,12 +4334,14 @@ W: http://git.infradead.org/users/hch/dma-mapping.git S: Supported F: lib/dma-debug.c F: lib/dma-direct.c +F: lib/dma-noncoherent.c F: lib/dma-virt.c F: drivers/base/dma-mapping.c F: drivers/base/dma-coherent.c F: include/asm-generic/dma-mapping.h F: include/linux/dma-direct.h F: include/linux/dma-mapping.h +F: include/linux/dma-noncoherent.h DME1737 HARDWARE MONITOR DRIVER M: Juerg Haefliger diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 880a292d792f..ad2868263867 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -4,7 +4,16 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { + /* + * Use the non-coherent ops if available. If an architecture wants a + * more fine-grained selection of operations it will have to implement + * get_arch_dma_ops itself or use the per-device dma_ops. + */ +#ifdef CONFIG_DMA_NONCOHERENT_OPS + return &dma_noncoherent_ops; +#else return &dma_direct_ops; +#endif } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 53ad6a47f513..8d9f33febde5 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); - +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr); #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 25a9a2b04f78..4be070df5fc5 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -136,6 +136,7 @@ struct dma_map_ops { }; extern const struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_noncoherent_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h new file mode 100644 index 000000000000..10b2654d549b --- /dev/null +++ b/include/linux/dma-noncoherent.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_NONCOHERENT_H +#define _LINUX_DMA_NONCOHERENT_H 1 + +#include + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); + +#ifdef CONFIG_DMA_NONCOHERENT_MMAP +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +#else +#define arch_dma_mmap NULL +#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ + +#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC +void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); +#else +#define arch_dma_cache_sync NULL +#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir); +#else +static inline void arch_sync_dma_for_device(struct device *dev, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) +{ +} +#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir); +#else +static inline void arch_sync_dma_for_cpu(struct device *dev, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) +{ +} +#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ + +#endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 6c4e9d0ce5d1..7a913937888b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -441,10 +441,30 @@ config ARCH_DMA_ADDR_T_64BIT config IOMMU_HELPER bool +config ARCH_HAS_SYNC_DMA_FOR_DEVICE + bool + +config ARCH_HAS_SYNC_DMA_FOR_CPU + bool + select NEED_DMA_MAP_STATE + config DMA_DIRECT_OPS bool depends on HAS_DMA +config DMA_NONCOHERENT_OPS + bool + depends on HAS_DMA + select DMA_DIRECT_OPS + +config DMA_NONCOHERENT_MMAP + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_NONCOHERENT_CACHE_SYNC + bool + depends on DMA_NONCOHERENT_OPS + config DMA_VIRT_OPS bool depends on HAS_DMA diff --git a/lib/Makefile b/lib/Makefile index 94203b5eecd4..9f18c8152281 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o +lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o diff --git a/lib/dma-direct.c b/lib/dma-direct.c index df9e726e0712..b824eb218782 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -128,7 +128,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, free_pages((unsigned long)cpu_addr, page_order); } -static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -139,8 +139,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, return dma_addr; } -static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) { int i; struct scatterlist *sg; @@ -175,7 +175,7 @@ int dma_direct_supported(struct device *dev, u64 mask) return 1; } -static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DIRECT_MAPPING_ERROR; } diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c new file mode 100644 index 000000000000..79e9a757387f --- /dev/null +++ b/lib/dma-noncoherent.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Christoph Hellwig. + * + * DMA operations that map physical memory directly without providing cache + * coherence. + */ +#include +#include +#include +#include +#include + +static void dma_noncoherent_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); +} + +static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t addr; + + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(dev, page_to_phys(page) + offset, + size, dir); + return addr; +} + +static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); + if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); + return nents; +} + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +static void dma_noncoherent_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); +} + +static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); +} + +static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); +} +#endif + +const struct dma_map_ops dma_noncoherent_ops = { + .alloc = arch_dma_alloc, + .free = arch_dma_free, + .mmap = arch_dma_mmap, + .sync_single_for_device = dma_noncoherent_sync_single_for_device, + .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, + .map_page = dma_noncoherent_map_page, + .map_sg = dma_noncoherent_map_sg, +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU + .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, + .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, + .unmap_page = dma_noncoherent_unmap_page, + .unmap_sg = dma_noncoherent_unmap_sg, +#endif + .dma_supported = dma_direct_supported, + .mapping_error = dma_direct_mapping_error, + .cache_sync = arch_dma_cache_sync, +}; +EXPORT_SYMBOL(dma_noncoherent_ops); -- cgit v1.2.3 From 63e482f172f4680a4ff00305dd140589ae270306 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 20 May 2018 12:03:54 -0300 Subject: MAINTAINERS: drm: fsl-dcu: Update to Alison's NXP email address The freescale.com email domain is not valid anymore, so use the nxp.com domain instead. Signed-off-by: Fabio Estevam Signed-off-by: Stefan Agner --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..268bcfd04b14 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4665,7 +4665,7 @@ F: Documentation/devicetree/bindings/display/exynos/ DRM DRIVERS FOR FREESCALE DCU M: Stefan Agner -M: Alison Wang +M: Alison Wang L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/fsl-dcu/ -- cgit v1.2.3 From 877b7cb0b6f283593a663134ee52703f12c895cc Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 19 May 2018 22:31:34 +0200 Subject: net: dsa: mv88e6xxx: Add minimal platform_data support Not all the world uses device tree. Some parts of the world still use platform devices and platform data. Add basic support for probing a Marvell switch via platform data. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/dsa/mv88e6xxx/chip.c | 56 ++++++++++++++++++++++++++++----- include/linux/platform_data/mv88e6xxx.h | 17 ++++++++++ 3 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 include/linux/platform_data/mv88e6xxx.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 658880464b9d..9f2045a5adac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8466,6 +8466,7 @@ M: Vivien Didelot L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mv88e6xxx/ +F: linux/platform_data/mv88e6xxx.h F: Documentation/devicetree/bindings/net/dsa/marvell.txt MARVELL ARMADA DRM SUPPORT diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 2bb3f03ee1cb..5b40382036ea 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -4350,6 +4351,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) return -ENOMEM; ds->priv = chip; + ds->dev = dev; ds->ops = &mv88e6xxx_switch_ops; ds->ageing_time_min = chip->info->age_time_coeff; ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; @@ -4364,36 +4366,73 @@ static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) dsa_unregister_switch(chip->ds); } +static const void *pdata_device_get_match_data(struct device *dev) +{ + const struct of_device_id *matches = dev->driver->of_match_table; + const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data; + + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; + matches++) { + if (!strcmp(pdata->compatible, matches->compatible)) + return matches->data; + } + return NULL; +} + static int mv88e6xxx_probe(struct mdio_device *mdiodev) { + struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data; struct device *dev = &mdiodev->dev; struct device_node *np = dev->of_node; const struct mv88e6xxx_info *compat_info; struct mv88e6xxx_chip *chip; u32 eeprom_len; + int port; int err; - compat_info = of_device_get_match_data(dev); + if (np) + compat_info = of_device_get_match_data(dev); + + if (pdata) { + compat_info = pdata_device_get_match_data(dev); + + if (!pdata->netdev) + return -EINVAL; + + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(pdata->enabled_ports & (1 << port))) + continue; + if (strcmp(pdata->cd.port_names[port], "cpu")) + continue; + pdata->cd.netdev[port] = &pdata->netdev->dev; + break; + } + } + if (!compat_info) return -EINVAL; chip = mv88e6xxx_alloc_chip(dev); - if (!chip) - return -ENOMEM; + if (!chip) { + err = -ENOMEM; + goto out; + } chip->info = compat_info; err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); if (err) - return err; + goto out; chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(chip->reset)) - return PTR_ERR(chip->reset); + if (IS_ERR(chip->reset)) { + err = PTR_ERR(chip->reset); + goto out; + } err = mv88e6xxx_detect(chip); if (err) - return err; + goto out; mv88e6xxx_phy_init(chip); @@ -4468,6 +4507,9 @@ out_g1_irq: mv88e6xxx_irq_poll_free(chip); mutex_unlock(&chip->reg_lock); out: + if (pdata) + dev_put(pdata->netdev); + return err; } diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h new file mode 100644 index 000000000000..88e91e05f48f --- /dev/null +++ b/include/linux/platform_data/mv88e6xxx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DSA_MV88E6XXX_H +#define __DSA_MV88E6XXX_H + +#include + +struct dsa_mv88e6xxx_pdata { + /* Must be first, such that dsa_register_switch() can access this + * without gory pointer manipulations + */ + struct dsa_chip_data cd; + const char *compatible; + unsigned int enabled_ports; + struct net_device *netdev; +}; + +#endif -- cgit v1.2.3 From 09f5c8ecc19a15eedf41afe43c5b8e2383229226 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Sun, 20 May 2018 10:47:36 +0530 Subject: MAINTAINERS: Add Actions Semi S900 pinctrl entries Add S900 pinctrl entries under ARCH_ACTIONS Signed-off-by: Manivannan Sadhasivam Signed-off-by: Linus Walleij --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..974c8e86aed4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1130,10 +1130,12 @@ F: arch/arm/mach-actions/ F: arch/arm/boot/dts/owl-* F: arch/arm64/boot/dts/actions/ F: drivers/clocksource/owl-* +F: drivers/pinctrl/actions/* F: drivers/soc/actions/ F: include/dt-bindings/power/owl-* F: include/linux/soc/actions/ F: Documentation/devicetree/bindings/arm/actions.txt +F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt F: Documentation/devicetree/bindings/power/actions,owl-sps.txt F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt -- cgit v1.2.3 From 21432a8dd57db16a945969d27150da72f735ac7e Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Mon, 21 May 2018 01:01:52 +0800 Subject: MAINTAINERS: update entry for Mediatek pin controller Add new files for the entry Signed-off-by: Sean Wang Signed-off-by: Linus Walleij --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 974c8e86aed4..7b135d48dd1d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11106,6 +11106,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt +F: drivers/pinctrl/mediatek/mtk-eint.* F: drivers/pinctrl/mediatek/pinctrl-mtk-common.* F: drivers/pinctrl/mediatek/pinctrl-mt2701.c F: drivers/pinctrl/mediatek/pinctrl-mt7622.c -- cgit v1.2.3 From d6edc07cca93b8669512501f25d03187ca531f63 Mon Sep 17 00:00:00 2001 From: Alan Tull Date: Wed, 16 May 2018 18:50:07 -0500 Subject: MAINTAINERS: Add driver-api/fpga path Add Documentation/driver-api/fpga path to MAINTAINERS file for fpga. Signed-off-by: Alan Tull Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 4f5d8932a6c8..225289367751 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5577,6 +5577,7 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git Q: http://patchwork.kernel.org/project/linux-fpga/list/ F: Documentation/fpga/ +F: Documentation/driver-api/fpga/ F: Documentation/devicetree/bindings/fpga/ F: drivers/fpga/ F: include/linux/fpga/ -- cgit v1.2.3 From 68afa17322f2c9a0fffca62e7afe9d60b0dff87e Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 11 May 2018 12:43:57 -0400 Subject: media: zoran: move to staging in preparation for removal This driver hasn't been tested in a long, long time. The hardware is ancient and pretty much obsolete. This driver also needs to be converted to newer media frameworks (vb2!) but due to the lack of time and interest that is unlikely to happen. So this driver is a prime candidate for removal. If someone is interested in working on this driver to prevent its removal, then please contact the linux-media mailinglist. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 2 +- drivers/media/pci/Kconfig | 1 - drivers/media/pci/Makefile | 1 - drivers/media/pci/zoran/Kconfig | 75 - drivers/media/pci/zoran/Makefile | 7 - drivers/media/pci/zoran/videocodec.c | 403 ---- drivers/media/pci/zoran/videocodec.h | 349 ---- drivers/media/pci/zoran/zoran.h | 402 ---- drivers/media/pci/zoran/zoran_card.c | 1524 --------------- drivers/media/pci/zoran/zoran_card.h | 50 - drivers/media/pci/zoran/zoran_device.c | 1619 ---------------- drivers/media/pci/zoran/zoran_device.h | 91 - drivers/media/pci/zoran/zoran_driver.c | 2849 ---------------------------- drivers/media/pci/zoran/zoran_procfs.c | 221 --- drivers/media/pci/zoran/zoran_procfs.h | 32 - drivers/media/pci/zoran/zr36016.c | 516 ----- drivers/media/pci/zoran/zr36016.h | 107 -- drivers/media/pci/zoran/zr36050.c | 896 --------- drivers/media/pci/zoran/zr36050.h | 179 -- drivers/media/pci/zoran/zr36057.h | 164 -- drivers/media/pci/zoran/zr36060.c | 1006 ---------- drivers/media/pci/zoran/zr36060.h | 216 --- drivers/staging/media/Kconfig | 2 + drivers/staging/media/Makefile | 1 + drivers/staging/media/zoran/Kconfig | 75 + drivers/staging/media/zoran/Makefile | 7 + drivers/staging/media/zoran/TODO | 4 + drivers/staging/media/zoran/videocodec.c | 403 ++++ drivers/staging/media/zoran/videocodec.h | 349 ++++ drivers/staging/media/zoran/zoran.h | 402 ++++ drivers/staging/media/zoran/zoran_card.c | 1524 +++++++++++++++ drivers/staging/media/zoran/zoran_card.h | 50 + drivers/staging/media/zoran/zoran_device.c | 1619 ++++++++++++++++ drivers/staging/media/zoran/zoran_device.h | 91 + drivers/staging/media/zoran/zoran_driver.c | 2849 ++++++++++++++++++++++++++++ drivers/staging/media/zoran/zoran_procfs.c | 221 +++ drivers/staging/media/zoran/zoran_procfs.h | 32 + drivers/staging/media/zoran/zr36016.c | 516 +++++ drivers/staging/media/zoran/zr36016.h | 107 ++ drivers/staging/media/zoran/zr36050.c | 896 +++++++++ drivers/staging/media/zoran/zr36050.h | 179 ++ drivers/staging/media/zoran/zr36057.h | 164 ++ drivers/staging/media/zoran/zr36060.c | 1006 ++++++++++ drivers/staging/media/zoran/zr36060.h | 216 +++ 44 files changed, 10714 insertions(+), 10709 deletions(-) delete mode 100644 drivers/media/pci/zoran/Kconfig delete mode 100644 drivers/media/pci/zoran/Makefile delete mode 100644 drivers/media/pci/zoran/videocodec.c delete mode 100644 drivers/media/pci/zoran/videocodec.h delete mode 100644 drivers/media/pci/zoran/zoran.h delete mode 100644 drivers/media/pci/zoran/zoran_card.c delete mode 100644 drivers/media/pci/zoran/zoran_card.h delete mode 100644 drivers/media/pci/zoran/zoran_device.c delete mode 100644 drivers/media/pci/zoran/zoran_device.h delete mode 100644 drivers/media/pci/zoran/zoran_driver.c delete mode 100644 drivers/media/pci/zoran/zoran_procfs.c delete mode 100644 drivers/media/pci/zoran/zoran_procfs.h delete mode 100644 drivers/media/pci/zoran/zr36016.c delete mode 100644 drivers/media/pci/zoran/zr36016.h delete mode 100644 drivers/media/pci/zoran/zr36050.c delete mode 100644 drivers/media/pci/zoran/zr36050.h delete mode 100644 drivers/media/pci/zoran/zr36057.h delete mode 100644 drivers/media/pci/zoran/zr36060.c delete mode 100644 drivers/media/pci/zoran/zr36060.h create mode 100644 drivers/staging/media/zoran/Kconfig create mode 100644 drivers/staging/media/zoran/Makefile create mode 100644 drivers/staging/media/zoran/TODO create mode 100644 drivers/staging/media/zoran/videocodec.c create mode 100644 drivers/staging/media/zoran/videocodec.h create mode 100644 drivers/staging/media/zoran/zoran.h create mode 100644 drivers/staging/media/zoran/zoran_card.c create mode 100644 drivers/staging/media/zoran/zoran_card.h create mode 100644 drivers/staging/media/zoran/zoran_device.c create mode 100644 drivers/staging/media/zoran/zoran_device.h create mode 100644 drivers/staging/media/zoran/zoran_driver.c create mode 100644 drivers/staging/media/zoran/zoran_procfs.c create mode 100644 drivers/staging/media/zoran/zoran_procfs.h create mode 100644 drivers/staging/media/zoran/zr36016.c create mode 100644 drivers/staging/media/zoran/zr36016.h create mode 100644 drivers/staging/media/zoran/zr36050.c create mode 100644 drivers/staging/media/zoran/zr36050.h create mode 100644 drivers/staging/media/zoran/zr36057.h create mode 100644 drivers/staging/media/zoran/zr36060.c create mode 100644 drivers/staging/media/zoran/zr36060.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 519f172c9267..cbcd5ab4c2f4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15609,7 +15609,7 @@ L: linux-media@vger.kernel.org W: http://mjpeg.sourceforge.net/driver-zoran/ T: hg https://linuxtv.org/hg/v4l-dvb S: Odd Fixes -F: drivers/media/pci/zoran/ +F: drivers/staging/media/zoran/ ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER M: Minchan Kim diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig index 5932e225f9c0..1f09123e2bf9 100644 --- a/drivers/media/pci/Kconfig +++ b/drivers/media/pci/Kconfig @@ -16,7 +16,6 @@ source "drivers/media/pci/sta2x11/Kconfig" source "drivers/media/pci/tw5864/Kconfig" source "drivers/media/pci/tw68/Kconfig" source "drivers/media/pci/tw686x/Kconfig" -source "drivers/media/pci/zoran/Kconfig" endif if MEDIA_ANALOG_TV_SUPPORT diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile index 1c5ab07a8cff..984fa247096d 100644 --- a/drivers/media/pci/Makefile +++ b/drivers/media/pci/Makefile @@ -18,7 +18,6 @@ obj-y += ttpci/ \ intel/ obj-$(CONFIG_VIDEO_IVTV) += ivtv/ -obj-$(CONFIG_VIDEO_ZORAN) += zoran/ obj-$(CONFIG_VIDEO_CX18) += cx18/ obj-$(CONFIG_VIDEO_CX23885) += cx23885/ obj-$(CONFIG_VIDEO_CX25821) += cx25821/ diff --git a/drivers/media/pci/zoran/Kconfig b/drivers/media/pci/zoran/Kconfig deleted file mode 100644 index 39ec35bd21a5..000000000000 --- a/drivers/media/pci/zoran/Kconfig +++ /dev/null @@ -1,75 +0,0 @@ -config VIDEO_ZORAN - tristate "Zoran ZR36057/36067 Video For Linux" - depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS - depends on !ALPHA - help - Say Y for support for MJPEG capture cards based on the Zoran - 36057/36067 PCI controller chipset. This includes the Iomega - Buz, Pinnacle DC10+ and the Linux Media Labs LML33. There is - a driver homepage at . For - more information, check . - - To compile this driver as a module, choose M here: the - module will be called zr36067. - -config VIDEO_ZORAN_DC30 - tristate "Pinnacle/Miro DC30(+) support" - depends on VIDEO_ZORAN - select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_VPX3220 if MEDIA_SUBDRV_AUTOSELECT - help - Support for the Pinnacle/Miro DC30(+) MJPEG capture/playback - card. This also supports really old DC10 cards based on the - zr36050 MJPEG codec and zr36016 VFE. - -config VIDEO_ZORAN_ZR36060 - tristate "Zoran ZR36060" - depends on VIDEO_ZORAN - help - Say Y to support Zoran boards based on 36060 chips. - This includes Iomega Buz, Pinnacle DC10, Linux media Labs 33 - and 33 R10 and AverMedia 6 boards. - -config VIDEO_ZORAN_BUZ - tristate "Iomega Buz support" - depends on VIDEO_ZORAN_ZR36060 - select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_SAA7185 if MEDIA_SUBDRV_AUTOSELECT - help - Support for the Iomega Buz MJPEG capture/playback card. - -config VIDEO_ZORAN_DC10 - tristate "Pinnacle/Miro DC10(+) support" - depends on VIDEO_ZORAN_ZR36060 - select VIDEO_SAA7110 if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT - help - Support for the Pinnacle/Miro DC10(+) MJPEG capture/playback - card. - -config VIDEO_ZORAN_LML33 - tristate "Linux Media Labs LML33 support" - depends on VIDEO_ZORAN_ZR36060 - select VIDEO_BT819 if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT - help - Support for the Linux Media Labs LML33 MJPEG capture/playback - card. - -config VIDEO_ZORAN_LML33R10 - tristate "Linux Media Labs LML33R10 support" - depends on VIDEO_ZORAN_ZR36060 - select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_ADV7170 if MEDIA_SUBDRV_AUTOSELECT - help - support for the Linux Media Labs LML33R10 MJPEG capture/playback - card. - -config VIDEO_ZORAN_AVS6EYES - tristate "AverMedia 6 Eyes support" - depends on VIDEO_ZORAN_ZR36060 - select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_BT866 if MEDIA_SUBDRV_AUTOSELECT - select VIDEO_KS0127 if MEDIA_SUBDRV_AUTOSELECT - help - Support for the AverMedia 6 Eyes video surveillance card. diff --git a/drivers/media/pci/zoran/Makefile b/drivers/media/pci/zoran/Makefile deleted file mode 100644 index 21ac29a71458..000000000000 --- a/drivers/media/pci/zoran/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -zr36067-objs := zoran_procfs.o zoran_device.o \ - zoran_driver.o zoran_card.o - -obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o -obj-$(CONFIG_VIDEO_ZORAN_DC30) += zr36050.o zr36016.o -obj-$(CONFIG_VIDEO_ZORAN_ZR36060) += zr36060.o diff --git a/drivers/media/pci/zoran/videocodec.c b/drivers/media/pci/zoran/videocodec.c deleted file mode 100644 index 5ff23ef89215..000000000000 --- a/drivers/media/pci/zoran/videocodec.c +++ /dev/null @@ -1,403 +0,0 @@ -/* - * VIDEO MOTION CODECs internal API for video devices - * - * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's - * bound to a master device. - * - * (c) 2002 Wolfgang Scherr - * - * $Id: videocodec.c,v 1.1.2.8 2003/03/29 07:16:04 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#define VIDEOCODEC_VERSION "v0.2" - -#include -#include -#include -#include -#include - -// kernel config is here (procfs flag) - -#ifdef CONFIG_PROC_FS -#include -#include -#include -#endif - -#include "videocodec.h" - -static int debug; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (debug >= num) \ - printk(format, ##args); \ - } while (0) - -struct attached_list { - struct videocodec *codec; - struct attached_list *next; -}; - -struct codec_list { - const struct videocodec *codec; - int attached; - struct attached_list *list; - struct codec_list *next; -}; - -static struct codec_list *codeclist_top = NULL; - -/* ================================================= */ -/* function prototypes of the master/slave interface */ -/* ================================================= */ - -struct videocodec * -videocodec_attach (struct videocodec_master *master) -{ - struct codec_list *h = codeclist_top; - struct attached_list *a, *ptr; - struct videocodec *codec; - int res; - - if (!master) { - dprintk(1, KERN_ERR "videocodec_attach: no data\n"); - return NULL; - } - - dprintk(2, - "videocodec_attach: '%s', flags %lx, magic %lx\n", - master->name, master->flags, master->magic); - - if (!h) { - dprintk(1, - KERN_ERR - "videocodec_attach: no device available\n"); - return NULL; - } - - while (h) { - // attach only if the slave has at least the flags - // expected by the master - if ((master->flags & h->codec->flags) == master->flags) { - dprintk(4, "videocodec_attach: try '%s'\n", - h->codec->name); - - if (!try_module_get(h->codec->owner)) - return NULL; - - codec = kmemdup(h->codec, sizeof(struct videocodec), - GFP_KERNEL); - if (!codec) { - dprintk(1, - KERN_ERR - "videocodec_attach: no mem\n"); - goto out_module_put; - } - - res = strlen(codec->name); - snprintf(codec->name + res, sizeof(codec->name) - res, - "[%d]", h->attached); - codec->master_data = master; - res = codec->setup(codec); - if (res == 0) { - dprintk(3, "videocodec_attach '%s'\n", - codec->name); - ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL); - if (!ptr) { - dprintk(1, - KERN_ERR - "videocodec_attach: no memory\n"); - goto out_kfree; - } - ptr->codec = codec; - - a = h->list; - if (!a) { - h->list = ptr; - dprintk(4, - "videocodec: first element\n"); - } else { - while (a->next) - a = a->next; // find end - a->next = ptr; - dprintk(4, - "videocodec: in after '%s'\n", - h->codec->name); - } - - h->attached += 1; - return codec; - } else { - kfree(codec); - } - } - h = h->next; - } - - dprintk(1, KERN_ERR "videocodec_attach: no codec found!\n"); - return NULL; - - out_module_put: - module_put(h->codec->owner); - out_kfree: - kfree(codec); - return NULL; -} - -int -videocodec_detach (struct videocodec *codec) -{ - struct codec_list *h = codeclist_top; - struct attached_list *a, *prev; - int res; - - if (!codec) { - dprintk(1, KERN_ERR "videocodec_detach: no data\n"); - return -EINVAL; - } - - dprintk(2, - "videocodec_detach: '%s', type: %x, flags %lx, magic %lx\n", - codec->name, codec->type, codec->flags, codec->magic); - - if (!h) { - dprintk(1, - KERN_ERR "videocodec_detach: no device left...\n"); - return -ENXIO; - } - - while (h) { - a = h->list; - prev = NULL; - while (a) { - if (codec == a->codec) { - res = a->codec->unset(a->codec); - if (res >= 0) { - dprintk(3, - "videocodec_detach: '%s'\n", - a->codec->name); - a->codec->master_data = NULL; - } else { - dprintk(1, - KERN_ERR - "videocodec_detach: '%s'\n", - a->codec->name); - a->codec->master_data = NULL; - } - if (prev == NULL) { - h->list = a->next; - dprintk(4, - "videocodec: delete first\n"); - } else { - prev->next = a->next; - dprintk(4, - "videocodec: delete middle\n"); - } - module_put(a->codec->owner); - kfree(a->codec); - kfree(a); - h->attached -= 1; - return 0; - } - prev = a; - a = a->next; - } - h = h->next; - } - - dprintk(1, KERN_ERR "videocodec_detach: given codec not found!\n"); - return -EINVAL; -} - -int -videocodec_register (const struct videocodec *codec) -{ - struct codec_list *ptr, *h = codeclist_top; - - if (!codec) { - dprintk(1, KERN_ERR "videocodec_register: no data!\n"); - return -EINVAL; - } - - dprintk(2, - "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", - codec->name, codec->type, codec->flags, codec->magic); - - ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL); - if (!ptr) { - dprintk(1, KERN_ERR "videocodec_register: no memory\n"); - return -ENOMEM; - } - ptr->codec = codec; - - if (!h) { - codeclist_top = ptr; - dprintk(4, "videocodec: hooked in as first element\n"); - } else { - while (h->next) - h = h->next; // find the end - h->next = ptr; - dprintk(4, "videocodec: hooked in after '%s'\n", - h->codec->name); - } - - return 0; -} - -int -videocodec_unregister (const struct videocodec *codec) -{ - struct codec_list *prev = NULL, *h = codeclist_top; - - if (!codec) { - dprintk(1, KERN_ERR "videocodec_unregister: no data!\n"); - return -EINVAL; - } - - dprintk(2, - "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", - codec->name, codec->type, codec->flags, codec->magic); - - if (!h) { - dprintk(1, - KERN_ERR - "videocodec_unregister: no device left...\n"); - return -ENXIO; - } - - while (h) { - if (codec == h->codec) { - if (h->attached) { - dprintk(1, - KERN_ERR - "videocodec: '%s' is used\n", - h->codec->name); - return -EBUSY; - } - dprintk(3, "videocodec: unregister '%s' is ok.\n", - h->codec->name); - if (prev == NULL) { - codeclist_top = h->next; - dprintk(4, - "videocodec: delete first element\n"); - } else { - prev->next = h->next; - dprintk(4, - "videocodec: delete middle element\n"); - } - kfree(h); - return 0; - } - prev = h; - h = h->next; - } - - dprintk(1, - KERN_ERR - "videocodec_unregister: given codec not found!\n"); - return -EINVAL; -} - -#ifdef CONFIG_PROC_FS -static int proc_videocodecs_show(struct seq_file *m, void *v) -{ - struct codec_list *h = codeclist_top; - struct attached_list *a; - - seq_printf(m, "lave or attached aster name type flags magic "); - seq_printf(m, "(connected as)\n"); - - while (h) { - seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n", - h->codec->name, h->codec->type, - h->codec->flags, h->codec->magic); - a = h->list; - while (a) { - seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n", - a->codec->master_data->name, - a->codec->master_data->type, - a->codec->master_data->flags, - a->codec->master_data->magic, - a->codec->name); - a = a->next; - } - h = h->next; - } - - return 0; -} - -static int proc_videocodecs_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_videocodecs_show, NULL); -} - -static const struct file_operations videocodecs_proc_fops = { - .owner = THIS_MODULE, - .open = proc_videocodecs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif - -/* ===================== */ -/* hook in driver module */ -/* ===================== */ -static int __init -videocodec_init (void) -{ -#ifdef CONFIG_PROC_FS - static struct proc_dir_entry *videocodec_proc_entry; -#endif - - printk(KERN_INFO "Linux video codec intermediate layer: %s\n", - VIDEOCODEC_VERSION); - -#ifdef CONFIG_PROC_FS - videocodec_proc_entry = proc_create("videocodecs", 0, NULL, &videocodecs_proc_fops); - if (!videocodec_proc_entry) { - dprintk(1, KERN_ERR "videocodec: can't init procfs.\n"); - } -#endif - return 0; -} - -static void __exit -videocodec_exit (void) -{ -#ifdef CONFIG_PROC_FS - remove_proc_entry("videocodecs", NULL); -#endif -} - -EXPORT_SYMBOL(videocodec_attach); -EXPORT_SYMBOL(videocodec_detach); -EXPORT_SYMBOL(videocodec_register); -EXPORT_SYMBOL(videocodec_unregister); - -module_init(videocodec_init); -module_exit(videocodec_exit); - -MODULE_AUTHOR("Wolfgang Scherr "); -MODULE_DESCRIPTION("Intermediate API module for video codecs " - VIDEOCODEC_VERSION); -MODULE_LICENSE("GPL"); diff --git a/drivers/media/pci/zoran/videocodec.h b/drivers/media/pci/zoran/videocodec.h deleted file mode 100644 index 8ed5a0f7ac01..000000000000 --- a/drivers/media/pci/zoran/videocodec.h +++ /dev/null @@ -1,349 +0,0 @@ -/* - * VIDEO MOTION CODECs internal API for video devices - * - * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's - * bound to a master device. - * - * (c) 2002 Wolfgang Scherr - * - * $Id: videocodec.h,v 1.1.2.4 2003/01/14 21:15:03 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -/* =================== */ -/* general description */ -/* =================== */ - -/* Should ease the (re-)usage of drivers supporting cards with (different) - video codecs. The codecs register to this module their functionality, - and the processors (masters) can attach to them if they fit. - - The codecs are typically have a "strong" binding to their master - so I - don't think it makes sense to have a full blown interfacing as with e.g. - i2c. If you have an other opinion, let's discuss & implement it :-))) - - Usage: - - The slave has just to setup the videocodec structure and use two functions: - videocodec_register(codecdata); - videocodec_unregister(codecdata); - The best is just calling them at module (de-)initialisation. - - The master sets up the structure videocodec_master and calls: - codecdata=videocodec_attach(master_codecdata); - videocodec_detach(codecdata); - - The slave is called during attach/detach via functions setup previously - during register. At that time, the master_data pointer is set up - and the slave can access any io registers of the master device (in the case - the slave is bound to it). Otherwise it doesn't need this functions and - therfor they may not be initialized. - - The other functions are just for convenience, as they are for sure used by - most/all of the codecs. The last ones may be omitted, too. - - See the structure declaration below for more information and which data has - to be set up for the master and the slave. - - ---------------------------------------------------------------------------- - The master should have "knowledge" of the slave and vice versa. So the data - structures sent to/from slave via set_data/get_data set_image/get_image are - device dependent and vary between MJPEG/MPEG/WAVELET/... devices. (!!!!) - ---------------------------------------------------------------------------- -*/ - - -/* ========================================== */ -/* description of the videocodec_io structure */ -/* ========================================== */ - -/* - ==== master setup ==== - name -> name of the device structure for reference and debugging - master_data -> data ref. for the master (e.g. the zr36055,57,67) - readreg -> ref. to read-fn from register (setup by master, used by slave) - writereg -> ref. to write-fn to register (setup by master, used by slave) - this two functions do the lowlevel I/O job - - ==== slave functionality setup ==== - slave_data -> data ref. for the slave (e.g. the zr36050,60) - check -> fn-ref. checks availability of an device, returns -EIO on failure or - the type on success - this makes espcecially sense if a driver module supports more than - one codec which may be quite similar to access, nevertheless it - is good for a first functionality check - - -- main functions you always need for compression/decompression -- - - set_mode -> this fn-ref. resets the entire codec, and sets up the mode - with the last defined norm/size (or device default if not - available) - it returns 0 if the mode is possible - set_size -> this fn-ref. sets the norm and image size for - compression/decompression (returns 0 on success) - the norm param is defined in videodev2.h (V4L2_STD_*) - - additional setup may be available, too - but the codec should work with - some default values even without this - - set_data -> sets device-specific data (tables, quality etc.) - get_data -> query device-specific data (tables, quality etc.) - - if the device delivers interrupts, they may be setup/handled here - setup_interrupt -> codec irq setup (not needed for 36050/60) - handle_interrupt -> codec irq handling (not needed for 36050/60) - - if the device delivers pictures, they may be handled here - put_image -> puts image data to the codec (not needed for 36050/60) - get_image -> gets image data from the codec (not needed for 36050/60) - the calls include frame numbers and flags (even/odd/...) - if needed and a flag which allows blocking until its ready -*/ - -/* ============== */ -/* user interface */ -/* ============== */ - -/* - Currently there is only a information display planned, as the layer - is not visible for the user space at all. - - Information is available via procfs. The current entry is "/proc/videocodecs" - but it makes sense to "hide" it in the /proc/video tree of v4l(2) --TODO--. - -A example for such an output is: - -lave or attached aster name type flags magic (connected as) -S zr36050 0002 0000d001 00000000 (TEMPLATE) -M zr36055[0] 0001 0000c001 00000000 (zr36050[0]) -M zr36055[1] 0001 0000c001 00000000 (zr36050[1]) - -*/ - - -/* =============================================== */ -/* special defines for the videocodec_io structure */ -/* =============================================== */ - -#ifndef __LINUX_VIDEOCODEC_H -#define __LINUX_VIDEOCODEC_H - -#include - -#define CODEC_DO_COMPRESSION 0 -#define CODEC_DO_EXPANSION 1 - -/* this are the current codec flags I think they are needed */ -/* -> type value in structure */ -#define CODEC_FLAG_JPEG 0x00000001L // JPEG codec -#define CODEC_FLAG_MPEG 0x00000002L // MPEG1/2/4 codec -#define CODEC_FLAG_DIVX 0x00000004L // DIVX codec -#define CODEC_FLAG_WAVELET 0x00000008L // WAVELET codec - // room for other types - -#define CODEC_FLAG_MAGIC 0x00000800L // magic key must match -#define CODEC_FLAG_HARDWARE 0x00001000L // is a hardware codec -#define CODEC_FLAG_VFE 0x00002000L // has direct video frontend -#define CODEC_FLAG_ENCODER 0x00004000L // compression capability -#define CODEC_FLAG_DECODER 0x00008000L // decompression capability -#define CODEC_FLAG_NEEDIRQ 0x00010000L // needs irq handling -#define CODEC_FLAG_RDWRPIC 0x00020000L // handles picture I/O - -/* a list of modes, some are just examples (is there any HW?) */ -#define CODEC_MODE_BJPG 0x0001 // Baseline JPEG -#define CODEC_MODE_LJPG 0x0002 // Lossless JPEG -#define CODEC_MODE_MPEG1 0x0003 // MPEG 1 -#define CODEC_MODE_MPEG2 0x0004 // MPEG 2 -#define CODEC_MODE_MPEG4 0x0005 // MPEG 4 -#define CODEC_MODE_MSDIVX 0x0006 // MS DivX -#define CODEC_MODE_ODIVX 0x0007 // Open DivX -#define CODEC_MODE_WAVELET 0x0008 // Wavelet - -/* this are the current codec types I want to implement */ -/* -> type value in structure */ -#define CODEC_TYPE_NONE 0 -#define CODEC_TYPE_L64702 1 -#define CODEC_TYPE_ZR36050 2 -#define CODEC_TYPE_ZR36016 3 -#define CODEC_TYPE_ZR36060 4 - -/* the type of data may be enhanced by future implementations (data-fn.'s) */ -/* -> used in command */ -#define CODEC_G_STATUS 0x0000 /* codec status (query only) */ -#define CODEC_S_CODEC_MODE 0x0001 /* codec mode (baseline JPEG, MPEG1,... */ -#define CODEC_G_CODEC_MODE 0x8001 -#define CODEC_S_VFE 0x0002 /* additional video frontend setup */ -#define CODEC_G_VFE 0x8002 -#define CODEC_S_MMAP 0x0003 /* MMAP setup (if available) */ - -#define CODEC_S_JPEG_TDS_BYTE 0x0010 /* target data size in bytes */ -#define CODEC_G_JPEG_TDS_BYTE 0x8010 -#define CODEC_S_JPEG_SCALE 0x0011 /* scaling factor for quant. tables */ -#define CODEC_G_JPEG_SCALE 0x8011 -#define CODEC_S_JPEG_HDT_DATA 0x0018 /* huffman-tables */ -#define CODEC_G_JPEG_HDT_DATA 0x8018 -#define CODEC_S_JPEG_QDT_DATA 0x0019 /* quantizing-tables */ -#define CODEC_G_JPEG_QDT_DATA 0x8019 -#define CODEC_S_JPEG_APP_DATA 0x001A /* APP marker */ -#define CODEC_G_JPEG_APP_DATA 0x801A -#define CODEC_S_JPEG_COM_DATA 0x001B /* COM marker */ -#define CODEC_G_JPEG_COM_DATA 0x801B - -#define CODEC_S_PRIVATE 0x1000 /* "private" commands start here */ -#define CODEC_G_PRIVATE 0x9000 - -#define CODEC_G_FLAG 0x8000 /* this is how 'get' is detected */ - -/* types of transfer, directly user space or a kernel buffer (image-fn.'s) */ -/* -> used in get_image, put_image */ -#define CODEC_TRANSFER_KERNEL 0 /* use "memcopy" */ -#define CODEC_TRANSFER_USER 1 /* use "to/from_user" */ - - -/* ========================= */ -/* the structures itself ... */ -/* ========================= */ - -struct vfe_polarity { - unsigned int vsync_pol:1; - unsigned int hsync_pol:1; - unsigned int field_pol:1; - unsigned int blank_pol:1; - unsigned int subimg_pol:1; - unsigned int poe_pol:1; - unsigned int pvalid_pol:1; - unsigned int vclk_pol:1; -}; - -struct vfe_settings { - __u32 x, y; /* Offsets into image */ - __u32 width, height; /* Area to capture */ - __u16 decimation; /* Decimation divider */ - __u16 flags; /* Flags for capture */ - __u16 quality; /* quality of the video */ -}; - -struct tvnorm { - u16 Wt, Wa, HStart, HSyncStart, Ht, Ha, VStart; -}; - -struct jpeg_com_marker { - int len; /* number of usable bytes in data */ - char data[60]; -}; - -struct jpeg_app_marker { - int appn; /* number app segment */ - int len; /* number of usable bytes in data */ - char data[60]; -}; - -struct videocodec { - struct module *owner; - /* -- filled in by slave device during register -- */ - char name[32]; - unsigned long magic; /* may be used for client<->master attaching */ - unsigned long flags; /* functionality flags */ - unsigned int type; /* codec type */ - - /* -- these is filled in later during master device attach -- */ - - struct videocodec_master *master_data; - - /* -- these are filled in by the slave device during register -- */ - - void *data; /* private slave data */ - - /* attach/detach client functions (indirect call) */ - int (*setup) (struct videocodec * codec); - int (*unset) (struct videocodec * codec); - - /* main functions, every client needs them for sure! */ - // set compression or decompression (or freeze, stop, standby, etc) - int (*set_mode) (struct videocodec * codec, - int mode); - // setup picture size and norm (for the codec's video frontend) - int (*set_video) (struct videocodec * codec, - struct tvnorm * norm, - struct vfe_settings * cap, - struct vfe_polarity * pol); - // other control commands, also mmap setup etc. - int (*control) (struct videocodec * codec, - int type, - int size, - void *data); - - /* additional setup/query/processing (may be NULL pointer) */ - // interrupt setup / handling (for irq's delivered by master) - int (*setup_interrupt) (struct videocodec * codec, - long mode); - int (*handle_interrupt) (struct videocodec * codec, - int source, - long flag); - // picture interface (if any) - long (*put_image) (struct videocodec * codec, - int tr_type, - int block, - long *fr_num, - long *flag, - long size, - void *buf); - long (*get_image) (struct videocodec * codec, - int tr_type, - int block, - long *fr_num, - long *flag, - long size, - void *buf); -}; - -struct videocodec_master { - /* -- filled in by master device for registration -- */ - char name[32]; - unsigned long magic; /* may be used for client<->master attaching */ - unsigned long flags; /* functionality flags */ - unsigned int type; /* master type */ - - void *data; /* private master data */ - - __u32(*readreg) (struct videocodec * codec, - __u16 reg); - void (*writereg) (struct videocodec * codec, - __u16 reg, - __u32 value); -}; - - -/* ================================================= */ -/* function prototypes of the master/slave interface */ -/* ================================================= */ - -/* attach and detach commands for the master */ -// * master structure needs to be kmalloc'ed before calling attach -// and free'd after calling detach -// * returns pointer on success, NULL on failure -extern struct videocodec *videocodec_attach(struct videocodec_master *); -// * 0 on success, <0 (errno) on failure -extern int videocodec_detach(struct videocodec *); - -/* register and unregister commands for the slaves */ -// * 0 on success, <0 (errno) on failure -extern int videocodec_register(const struct videocodec *); -// * 0 on success, <0 (errno) on failure -extern int videocodec_unregister(const struct videocodec *); - -/* the other calls are directly done via the videocodec structure! */ - -#endif /*ifndef __LINUX_VIDEOCODEC_H */ diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h deleted file mode 100644 index 9bb3c21aa275..000000000000 --- a/drivers/media/pci/zoran/zoran.h +++ /dev/null @@ -1,402 +0,0 @@ -/* - * zoran - Iomega Buz driver - * - * Copyright (C) 1999 Rainer Johanni - * - * based on - * - * zoran.0.0.3 Copyright (C) 1998 Dave Perks - * - * and - * - * bttv - Bt848 frame grabber driver - * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) - * & Marcus Metzler (mocm@thp.uni-koeln.de) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _BUZ_H_ -#define _BUZ_H_ - -#include -#include -#include - -struct zoran_sync { - unsigned long frame; /* number of buffer that has been free'd */ - unsigned long length; /* number of code bytes in buffer (capture only) */ - unsigned long seq; /* frame sequence number */ - struct timeval timestamp; /* timestamp */ -}; - - -#define ZORAN_NAME "ZORAN" /* name of the device */ - -#define ZR_DEVNAME(zr) ((zr)->name) - -#define BUZ_MAX_WIDTH (zr->timing->Wa) -#define BUZ_MAX_HEIGHT (zr->timing->Ha) -#define BUZ_MIN_WIDTH 32 /* never display less than 32 pixels */ -#define BUZ_MIN_HEIGHT 24 /* never display less than 24 rows */ - -#define BUZ_NUM_STAT_COM 4 -#define BUZ_MASK_STAT_COM 3 - -#define BUZ_MAX_FRAME 256 /* Must be a power of 2 */ -#define BUZ_MASK_FRAME 255 /* Must be BUZ_MAX_FRAME-1 */ - -#define BUZ_MAX_INPUT 16 - -#if VIDEO_MAX_FRAME <= 32 -# define V4L_MAX_FRAME 32 -#elif VIDEO_MAX_FRAME <= 64 -# define V4L_MAX_FRAME 64 -#else -# error "Too many video frame buffers to handle" -#endif -#define V4L_MASK_FRAME (V4L_MAX_FRAME - 1) - -#define MAX_FRAME (BUZ_MAX_FRAME > VIDEO_MAX_FRAME ? BUZ_MAX_FRAME : VIDEO_MAX_FRAME) - -#include "zr36057.h" - -enum card_type { - UNKNOWN = -1, - - /* Pinnacle/Miro */ - DC10_old, /* DC30 like */ - DC10_new, /* DC10plus like */ - DC10plus, - DC30, - DC30plus, - - /* Linux Media Labs */ - LML33, - LML33R10, - - /* Iomega */ - BUZ, - - /* AverMedia */ - AVS6EYES, - - /* total number of cards */ - NUM_CARDS -}; - -enum zoran_codec_mode { - BUZ_MODE_IDLE, /* nothing going on */ - BUZ_MODE_MOTION_COMPRESS, /* grabbing frames */ - BUZ_MODE_MOTION_DECOMPRESS, /* playing frames */ - BUZ_MODE_STILL_COMPRESS, /* still frame conversion */ - BUZ_MODE_STILL_DECOMPRESS /* still frame conversion */ -}; - -enum zoran_buffer_state { - BUZ_STATE_USER, /* buffer is owned by application */ - BUZ_STATE_PEND, /* buffer is queued in pend[] ready to feed to I/O */ - BUZ_STATE_DMA, /* buffer is queued in dma[] for I/O */ - BUZ_STATE_DONE /* buffer is ready to return to application */ -}; - -enum zoran_map_mode { - ZORAN_MAP_MODE_RAW, - ZORAN_MAP_MODE_JPG_REC, -#define ZORAN_MAP_MODE_JPG ZORAN_MAP_MODE_JPG_REC - ZORAN_MAP_MODE_JPG_PLAY, -}; - -enum gpio_type { - ZR_GPIO_JPEG_SLEEP = 0, - ZR_GPIO_JPEG_RESET, - ZR_GPIO_JPEG_FRAME, - ZR_GPIO_VID_DIR, - ZR_GPIO_VID_EN, - ZR_GPIO_VID_RESET, - ZR_GPIO_CLK_SEL1, - ZR_GPIO_CLK_SEL2, - ZR_GPIO_MAX, -}; - -enum gpcs_type { - GPCS_JPEG_RESET = 0, - GPCS_JPEG_START, - GPCS_MAX, -}; - -struct zoran_format { - char *name; - __u32 fourcc; - int colorspace; - int depth; - __u32 flags; - __u32 vfespfr; -}; -/* flags */ -#define ZORAN_FORMAT_COMPRESSED 1<<0 -#define ZORAN_FORMAT_OVERLAY 1<<1 -#define ZORAN_FORMAT_CAPTURE 1<<2 -#define ZORAN_FORMAT_PLAYBACK 1<<3 - -/* overlay-settings */ -struct zoran_overlay_settings { - int is_set; - int x, y, width, height; /* position */ - int clipcount; /* position and number of clips */ - const struct zoran_format *format; /* overlay format */ -}; - -/* v4l-capture settings */ -struct zoran_v4l_settings { - int width, height, bytesperline; /* capture size */ - const struct zoran_format *format; /* capture format */ -}; - -/* jpg-capture/-playback settings */ -struct zoran_jpg_settings { - int decimation; /* this bit is used to set everything to default */ - int HorDcm, VerDcm, TmpDcm; /* capture decimation settings (TmpDcm=1 means both fields) */ - int field_per_buff, odd_even; /* field-settings (odd_even=1 (+TmpDcm=1) means top-field-first) */ - int img_x, img_y, img_width, img_height; /* crop settings (subframe capture) */ - struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */ -}; - -struct zoran_fh; - -struct zoran_mapping { - struct zoran_fh *fh; - atomic_t count; -}; - -struct zoran_buffer { - struct zoran_mapping *map; - enum zoran_buffer_state state; /* state: unused/pending/dma/done */ - struct zoran_sync bs; /* DONE: info to return to application */ - union { - struct { - __le32 *frag_tab; /* addresses of frag table */ - u32 frag_tab_bus; /* same value cached to save time in ISR */ - } jpg; - struct { - char *fbuffer; /* virtual address of frame buffer */ - unsigned long fbuffer_phys;/* physical address of frame buffer */ - unsigned long fbuffer_bus;/* bus address of frame buffer */ - } v4l; - }; -}; - -enum zoran_lock_activity { - ZORAN_FREE, /* free for use */ - ZORAN_ACTIVE, /* active but unlocked */ - ZORAN_LOCKED, /* locked */ -}; - -/* buffer collections */ -struct zoran_buffer_col { - enum zoran_lock_activity active; /* feature currently in use? */ - unsigned int num_buffers, buffer_size; - struct zoran_buffer buffer[MAX_FRAME]; /* buffers */ - u8 allocated; /* Flag if buffers are allocated */ - u8 need_contiguous; /* Flag if contiguous buffers are needed */ - /* only applies to jpg buffers, raw buffers are always contiguous */ -}; - -struct zoran; - -/* zoran_fh contains per-open() settings */ -struct zoran_fh { - struct v4l2_fh fh; - struct zoran *zr; - - enum zoran_map_mode map_mode; /* Flag which bufferset will map by next mmap() */ - - struct zoran_overlay_settings overlay_settings; - u32 *overlay_mask; /* overlay mask */ - enum zoran_lock_activity overlay_active;/* feature currently in use? */ - - struct zoran_buffer_col buffers; /* buffers' info */ - - struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */ - struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */ -}; - -struct card_info { - enum card_type type; - char name[32]; - const char *i2c_decoder; /* i2c decoder device */ - const unsigned short *addrs_decoder; - const char *i2c_encoder; /* i2c encoder device */ - const unsigned short *addrs_encoder; - u16 video_vfe, video_codec; /* videocodec types */ - u16 audio_chip; /* audio type */ - - int inputs; /* number of video inputs */ - struct input { - int muxsel; - char name[32]; - } input[BUZ_MAX_INPUT]; - - v4l2_std_id norms; - struct tvnorm *tvn[3]; /* supported TV norms */ - - u32 jpeg_int; /* JPEG interrupt */ - u32 vsync_int; /* VSYNC interrupt */ - s8 gpio[ZR_GPIO_MAX]; - u8 gpcs[GPCS_MAX]; - - struct vfe_polarity vfe_pol; - u8 gpio_pol[ZR_GPIO_MAX]; - - /* is the /GWS line connected? */ - u8 gws_not_connected; - - /* avs6eyes mux setting */ - u8 input_mux; - - void (*init) (struct zoran * zr); -}; - -struct zoran { - struct v4l2_device v4l2_dev; - struct v4l2_ctrl_handler hdl; - struct video_device *video_dev; - - struct i2c_adapter i2c_adapter; /* */ - struct i2c_algo_bit_data i2c_algo; /* */ - u32 i2cbr; - - struct v4l2_subdev *decoder; /* video decoder sub-device */ - struct v4l2_subdev *encoder; /* video encoder sub-device */ - - struct videocodec *codec; /* video codec */ - struct videocodec *vfe; /* video front end */ - - struct mutex lock; /* file ops serialize lock */ - - u8 initialized; /* flag if zoran has been correctly initialized */ - int user; /* number of current users */ - struct card_info card; - struct tvnorm *timing; - - unsigned short id; /* number of this device */ - char name[32]; /* name of this device */ - struct pci_dev *pci_dev; /* PCI device */ - unsigned char revision; /* revision of zr36057 */ - unsigned char __iomem *zr36057_mem;/* pointer to mapped IO memory */ - - spinlock_t spinlock; /* Spinlock */ - - /* Video for Linux parameters */ - int input; /* card's norm and input */ - v4l2_std_id norm; - - /* Current buffer params */ - void *vbuf_base; - int vbuf_height, vbuf_width; - int vbuf_depth; - int vbuf_bytesperline; - - struct zoran_overlay_settings overlay_settings; - u32 *overlay_mask; /* overlay mask */ - enum zoran_lock_activity overlay_active; /* feature currently in use? */ - - wait_queue_head_t v4l_capq; - - int v4l_overlay_active; /* Overlay grab is activated */ - int v4l_memgrab_active; /* Memory grab is activated */ - - int v4l_grab_frame; /* Frame number being currently grabbed */ -#define NO_GRAB_ACTIVE (-1) - unsigned long v4l_grab_seq; /* Number of frames grabbed */ - struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */ - - /* V4L grab queue of frames pending */ - unsigned long v4l_pend_head; - unsigned long v4l_pend_tail; - unsigned long v4l_sync_tail; - int v4l_pend[V4L_MAX_FRAME]; - struct zoran_buffer_col v4l_buffers; /* V4L buffers' info */ - - /* Buz MJPEG parameters */ - enum zoran_codec_mode codec_mode; /* status of codec */ - struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */ - - wait_queue_head_t jpg_capq; /* wait here for grab to finish */ - - /* grab queue counts/indices, mask with BUZ_MASK_STAT_COM before using as index */ - /* (dma_head - dma_tail) is number active in DMA, must be <= BUZ_NUM_STAT_COM */ - /* (value & BUZ_MASK_STAT_COM) corresponds to index in stat_com table */ - unsigned long jpg_que_head; /* Index where to put next buffer which is queued */ - unsigned long jpg_dma_head; /* Index of next buffer which goes into stat_com */ - unsigned long jpg_dma_tail; /* Index of last buffer in stat_com */ - unsigned long jpg_que_tail; /* Index of last buffer in queue */ - unsigned long jpg_seq_num; /* count of frames since grab/play started */ - unsigned long jpg_err_seq; /* last seq_num before error */ - unsigned long jpg_err_shift; - unsigned long jpg_queued_num; /* count of frames queued since grab/play started */ - - /* zr36057's code buffer table */ - __le32 *stat_com; /* stat_com[i] is indexed by dma_head/tail & BUZ_MASK_STAT_COM */ - - /* (value & BUZ_MASK_FRAME) corresponds to index in pend[] queue */ - int jpg_pend[BUZ_MAX_FRAME]; - - /* array indexed by frame number */ - struct zoran_buffer_col jpg_buffers; /* MJPEG buffers' info */ - - /* Additional stuff for testing */ -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *zoran_proc; -#else - void *zoran_proc; -#endif - int testing; - int jpeg_error; - int intr_counter_GIRQ1; - int intr_counter_GIRQ0; - int intr_counter_CodRepIRQ; - int intr_counter_JPEGRepIRQ; - int field_counter; - int IRQ1_in; - int IRQ1_out; - int JPEG_in; - int JPEG_out; - int JPEG_0; - int JPEG_1; - int END_event_missed; - int JPEG_missed; - int JPEG_error; - int num_errors; - int JPEG_max_missed; - int JPEG_min_missed; - - u32 last_isr; - unsigned long frame_num; - - wait_queue_head_t test_q; -}; - -static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev) -{ - return container_of(v4l2_dev, struct zoran, v4l2_dev); -} - -/* There was something called _ALPHA_BUZ that used the PCI address instead of - * the kernel iomapped address for btread/btwrite. */ -#define btwrite(dat,adr) writel((dat), zr->zr36057_mem+(adr)) -#define btread(adr) readl(zr->zr36057_mem+(adr)) - -#define btand(dat,adr) btwrite((dat) & btread(adr), adr) -#define btor(dat,adr) btwrite((dat) | btread(adr), adr) -#define btaor(dat,mask,adr) btwrite((dat) | ((mask) & btread(adr)), adr) - -#endif diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c deleted file mode 100644 index a6b9ebd20263..000000000000 --- a/drivers/media/pci/zoran/zoran_card.c +++ /dev/null @@ -1,1524 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * This part handles card-specific data and detection - * - * Copyright (C) 2000 Serguei Miridonov - * - * Currently maintained by: - * Ronald Bultje - * Laurent Pinchart - * Mailinglist - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "videocodec.h" -#include "zoran.h" -#include "zoran_card.h" -#include "zoran_device.h" -#include "zoran_procfs.h" - -extern const struct zoran_format zoran_formats[]; - -static int card[BUZ_MAX] = { [0 ... (BUZ_MAX-1)] = -1 }; -module_param_array(card, int, NULL, 0444); -MODULE_PARM_DESC(card, "Card type"); - -/* - The video mem address of the video card. - The driver has a little database for some videocards - to determine it from there. If your video card is not in there - you have either to give it to the driver as a parameter - or set in in a VIDIOCSFBUF ioctl - */ - -static unsigned long vidmem; /* default = 0 - Video memory base address */ -module_param_hw(vidmem, ulong, iomem, 0444); -MODULE_PARM_DESC(vidmem, "Default video memory base address"); - -/* - Default input and video norm at startup of the driver. -*/ - -static unsigned int default_input; /* default 0 = Composite, 1 = S-Video */ -module_param(default_input, uint, 0444); -MODULE_PARM_DESC(default_input, - "Default input (0=Composite, 1=S-Video, 2=Internal)"); - -static int default_mux = 1; /* 6 Eyes input selection */ -module_param(default_mux, int, 0644); -MODULE_PARM_DESC(default_mux, - "Default 6 Eyes mux setting (Input selection)"); - -static int default_norm; /* default 0 = PAL, 1 = NTSC 2 = SECAM */ -module_param(default_norm, int, 0444); -MODULE_PARM_DESC(default_norm, "Default norm (0=PAL, 1=NTSC, 2=SECAM)"); - -/* /dev/videoN, -1 for autodetect */ -static int video_nr[BUZ_MAX] = { [0 ... (BUZ_MAX-1)] = -1 }; -module_param_array(video_nr, int, NULL, 0444); -MODULE_PARM_DESC(video_nr, "Video device number (-1=Auto)"); - -int v4l_nbufs = 4; -int v4l_bufsize = 864; /* Everybody should be able to work with this setting */ -module_param(v4l_nbufs, int, 0644); -MODULE_PARM_DESC(v4l_nbufs, "Maximum number of V4L buffers to use"); -module_param(v4l_bufsize, int, 0644); -MODULE_PARM_DESC(v4l_bufsize, "Maximum size per V4L buffer (in kB)"); - -int jpg_nbufs = 32; -int jpg_bufsize = 512; /* max size for 100% quality full-PAL frame */ -module_param(jpg_nbufs, int, 0644); -MODULE_PARM_DESC(jpg_nbufs, "Maximum number of JPG buffers to use"); -module_param(jpg_bufsize, int, 0644); -MODULE_PARM_DESC(jpg_bufsize, "Maximum size per JPG buffer (in kB)"); - -int pass_through = 0; /* 1=Pass through TV signal when device is not used */ - /* 0=Show color bar when device is not used (LML33: only if lml33dpath=1) */ -module_param(pass_through, int, 0644); -MODULE_PARM_DESC(pass_through, - "Pass TV signal through to TV-out when idling"); - -int zr36067_debug = 1; -module_param_named(debug, zr36067_debug, int, 0644); -MODULE_PARM_DESC(debug, "Debug level (0-5)"); - -#define ZORAN_VERSION "0.10.1" - -MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver"); -MODULE_AUTHOR("Serguei Miridonov"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(ZORAN_VERSION); - -#define ZR_DEVICE(subven, subdev, data) { \ - .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \ - .subvendor = (subven), .subdevice = (subdev), .driver_data = (data) } - -static const struct pci_device_id zr36067_pci_tbl[] = { - ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC10PLUS, DC10plus), - ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC30PLUS, DC30plus), - ZR_DEVICE(PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, PCI_DEVICE_ID_LML_33R10, LML33R10), - ZR_DEVICE(PCI_VENDOR_ID_IOMEGA, PCI_DEVICE_ID_IOMEGA_BUZ, BUZ), - ZR_DEVICE(PCI_ANY_ID, PCI_ANY_ID, NUM_CARDS), - {0} -}; -MODULE_DEVICE_TABLE(pci, zr36067_pci_tbl); - -static unsigned int zoran_num; /* number of cards found */ - -/* videocodec bus functions ZR36060 */ -static u32 -zr36060_read (struct videocodec *codec, - u16 reg) -{ - struct zoran *zr = (struct zoran *) codec->master_data->data; - __u32 data; - - if (post_office_wait(zr) - || post_office_write(zr, 0, 1, reg >> 8) - || post_office_write(zr, 0, 2, reg & 0xff)) { - return -1; - } - - data = post_office_read(zr, 0, 3) & 0xff; - return data; -} - -static void -zr36060_write (struct videocodec *codec, - u16 reg, - u32 val) -{ - struct zoran *zr = (struct zoran *) codec->master_data->data; - - if (post_office_wait(zr) - || post_office_write(zr, 0, 1, reg >> 8) - || post_office_write(zr, 0, 2, reg & 0xff)) { - return; - } - - post_office_write(zr, 0, 3, val & 0xff); -} - -/* videocodec bus functions ZR36050 */ -static u32 -zr36050_read (struct videocodec *codec, - u16 reg) -{ - struct zoran *zr = (struct zoran *) codec->master_data->data; - __u32 data; - - if (post_office_wait(zr) - || post_office_write(zr, 1, 0, reg >> 2)) { // reg. HIGHBYTES - return -1; - } - - data = post_office_read(zr, 0, reg & 0x03) & 0xff; // reg. LOWBYTES + read - return data; -} - -static void -zr36050_write (struct videocodec *codec, - u16 reg, - u32 val) -{ - struct zoran *zr = (struct zoran *) codec->master_data->data; - - if (post_office_wait(zr) - || post_office_write(zr, 1, 0, reg >> 2)) { // reg. HIGHBYTES - return; - } - - post_office_write(zr, 0, reg & 0x03, val & 0xff); // reg. LOWBYTES + wr. data -} - -/* videocodec bus functions ZR36016 */ -static u32 -zr36016_read (struct videocodec *codec, - u16 reg) -{ - struct zoran *zr = (struct zoran *) codec->master_data->data; - __u32 data; - - if (post_office_wait(zr)) { - return -1; - } - - data = post_office_read(zr, 2, reg & 0x03) & 0xff; // read - return data; -} - -/* hack for in zoran_device.c */ -void -zr36016_write (struct videocodec *codec, - u16 reg, - u32 val) -{ - struct zoran *zr = (struct zoran *) codec->master_data->data; - - if (post_office_wait(zr)) { - return; - } - - post_office_write(zr, 2, reg & 0x03, val & 0x0ff); // wr. data -} - -/* - * Board specific information - */ - -static void -dc10_init (struct zoran *zr) -{ - dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); - - /* Pixel clock selection */ - GPIO(zr, 4, 0); - GPIO(zr, 5, 1); - /* Enable the video bus sync signals */ - GPIO(zr, 7, 0); -} - -static void -dc10plus_init (struct zoran *zr) -{ - dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); -} - -static void -buz_init (struct zoran *zr) -{ - dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); - - /* some stuff from Iomega */ - pci_write_config_dword(zr->pci_dev, 0xfc, 0x90680f15); - pci_write_config_dword(zr->pci_dev, 0x0c, 0x00012020); - pci_write_config_dword(zr->pci_dev, 0xe8, 0xc0200000); -} - -static void -lml33_init (struct zoran *zr) -{ - dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); - - GPIO(zr, 2, 1); // Set Composite input/output -} - -static void -avs6eyes_init (struct zoran *zr) -{ - // AverMedia 6-Eyes original driver by Christer Weinigel - - // Lifted straight from Christer's old driver and - // modified slightly by Martin Samuelsson. - - int mux = default_mux; /* 1 = BT866, 7 = VID1 */ - - GPIO(zr, 4, 1); /* Bt866 SLEEP on */ - udelay(2); - - GPIO(zr, 0, 1); /* ZR36060 /RESET on */ - GPIO(zr, 1, 0); /* ZR36060 /SLEEP on */ - GPIO(zr, 2, mux & 1); /* MUX S0 */ - GPIO(zr, 3, 0); /* /FRAME on */ - GPIO(zr, 4, 0); /* Bt866 SLEEP off */ - GPIO(zr, 5, mux & 2); /* MUX S1 */ - GPIO(zr, 6, 0); /* ? */ - GPIO(zr, 7, mux & 4); /* MUX S2 */ - -} - -static char * -codecid_to_modulename (u16 codecid) -{ - char *name = NULL; - - switch (codecid) { - case CODEC_TYPE_ZR36060: - name = "zr36060"; - break; - case CODEC_TYPE_ZR36050: - name = "zr36050"; - break; - case CODEC_TYPE_ZR36016: - name = "zr36016"; - break; - } - - return name; -} - -// struct tvnorm { -// u16 Wt, Wa, HStart, HSyncStart, Ht, Ha, VStart; -// }; - -static struct tvnorm f50sqpixel = { 944, 768, 83, 880, 625, 576, 16 }; -static struct tvnorm f60sqpixel = { 780, 640, 51, 716, 525, 480, 12 }; -static struct tvnorm f50ccir601 = { 864, 720, 75, 804, 625, 576, 18 }; -static struct tvnorm f60ccir601 = { 858, 720, 57, 788, 525, 480, 16 }; - -static struct tvnorm f50ccir601_lml33 = { 864, 720, 75+34, 804, 625, 576, 18 }; -static struct tvnorm f60ccir601_lml33 = { 858, 720, 57+34, 788, 525, 480, 16 }; - -/* The DC10 (57/16/50) uses VActive as HSync, so HStart must be 0 */ -static struct tvnorm f50sqpixel_dc10 = { 944, 768, 0, 880, 625, 576, 0 }; -static struct tvnorm f60sqpixel_dc10 = { 780, 640, 0, 716, 525, 480, 12 }; - -/* FIXME: I cannot swap U and V in saa7114, so i do one - * pixel left shift in zoran (75 -> 74) - * (Maxim Yevtyushkin ) */ -static struct tvnorm f50ccir601_lm33r10 = { 864, 720, 74+54, 804, 625, 576, 18 }; -static struct tvnorm f60ccir601_lm33r10 = { 858, 720, 56+54, 788, 525, 480, 16 }; - -/* FIXME: The ks0127 seem incapable of swapping U and V, too, which is why I - * copy Maxim's left shift hack for the 6 Eyes. - * - * Christer's driver used the unshifted norms, though... - * /Sam */ -static struct tvnorm f50ccir601_avs6eyes = { 864, 720, 74, 804, 625, 576, 18 }; -static struct tvnorm f60ccir601_avs6eyes = { 858, 720, 56, 788, 525, 480, 16 }; - -static const unsigned short vpx3220_addrs[] = { 0x43, 0x47, I2C_CLIENT_END }; -static const unsigned short saa7110_addrs[] = { 0x4e, 0x4f, I2C_CLIENT_END }; -static const unsigned short saa7111_addrs[] = { 0x25, 0x24, I2C_CLIENT_END }; -static const unsigned short saa7114_addrs[] = { 0x21, 0x20, I2C_CLIENT_END }; -static const unsigned short adv717x_addrs[] = { 0x6a, 0x6b, 0x2a, 0x2b, I2C_CLIENT_END }; -static const unsigned short ks0127_addrs[] = { 0x6c, 0x6d, I2C_CLIENT_END }; -static const unsigned short saa7185_addrs[] = { 0x44, I2C_CLIENT_END }; -static const unsigned short bt819_addrs[] = { 0x45, I2C_CLIENT_END }; -static const unsigned short bt856_addrs[] = { 0x44, I2C_CLIENT_END }; -static const unsigned short bt866_addrs[] = { 0x44, I2C_CLIENT_END }; - -static struct card_info zoran_cards[NUM_CARDS] = { - { - .type = DC10_old, - .name = "DC10(old)", - .i2c_decoder = "vpx3220a", - .addrs_decoder = vpx3220_addrs, - .video_codec = CODEC_TYPE_ZR36050, - .video_vfe = CODEC_TYPE_ZR36016, - - .inputs = 3, - .input = { - { 1, "Composite" }, - { 2, "S-Video" }, - { 0, "Internal/comp" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, - .tvn = { - &f50sqpixel_dc10, - &f60sqpixel_dc10, - &f50sqpixel_dc10 - }, - .jpeg_int = 0, - .vsync_int = ZR36057_ISR_GIRQ1, - .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 }, - .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 }, - .gpcs = { -1, 0 }, - .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .gws_not_connected = 0, - .input_mux = 0, - .init = &dc10_init, - }, { - .type = DC10_new, - .name = "DC10(new)", - .i2c_decoder = "saa7110", - .addrs_decoder = saa7110_addrs, - .i2c_encoder = "adv7175", - .addrs_encoder = adv717x_addrs, - .video_codec = CODEC_TYPE_ZR36060, - - .inputs = 3, - .input = { - { 0, "Composite" }, - { 7, "S-Video" }, - { 5, "Internal/comp" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, - .tvn = { - &f50sqpixel, - &f60sqpixel, - &f50sqpixel}, - .jpeg_int = ZR36057_ISR_GIRQ0, - .vsync_int = ZR36057_ISR_GIRQ1, - .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 }, - .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .gpcs = { -1, 1}, - .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 }, - .gws_not_connected = 0, - .input_mux = 0, - .init = &dc10plus_init, - }, { - .type = DC10plus, - .name = "DC10plus", - .i2c_decoder = "saa7110", - .addrs_decoder = saa7110_addrs, - .i2c_encoder = "adv7175", - .addrs_encoder = adv717x_addrs, - .video_codec = CODEC_TYPE_ZR36060, - - .inputs = 3, - .input = { - { 0, "Composite" }, - { 7, "S-Video" }, - { 5, "Internal/comp" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, - .tvn = { - &f50sqpixel, - &f60sqpixel, - &f50sqpixel - }, - .jpeg_int = ZR36057_ISR_GIRQ0, - .vsync_int = ZR36057_ISR_GIRQ1, - .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 }, - .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .gpcs = { -1, 1 }, - .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 }, - .gws_not_connected = 0, - .input_mux = 0, - .init = &dc10plus_init, - }, { - .type = DC30, - .name = "DC30", - .i2c_decoder = "vpx3220a", - .addrs_decoder = vpx3220_addrs, - .i2c_encoder = "adv7175", - .addrs_encoder = adv717x_addrs, - .video_codec = CODEC_TYPE_ZR36050, - .video_vfe = CODEC_TYPE_ZR36016, - - .inputs = 3, - .input = { - { 1, "Composite" }, - { 2, "S-Video" }, - { 0, "Internal/comp" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, - .tvn = { - &f50sqpixel_dc10, - &f60sqpixel_dc10, - &f50sqpixel_dc10 - }, - .jpeg_int = 0, - .vsync_int = ZR36057_ISR_GIRQ1, - .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 }, - .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 }, - .gpcs = { -1, 0 }, - .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .gws_not_connected = 0, - .input_mux = 0, - .init = &dc10_init, - }, { - .type = DC30plus, - .name = "DC30plus", - .i2c_decoder = "vpx3220a", - .addrs_decoder = vpx3220_addrs, - .i2c_encoder = "adv7175", - .addrs_encoder = adv717x_addrs, - .video_codec = CODEC_TYPE_ZR36050, - .video_vfe = CODEC_TYPE_ZR36016, - - .inputs = 3, - .input = { - { 1, "Composite" }, - { 2, "S-Video" }, - { 0, "Internal/comp" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, - .tvn = { - &f50sqpixel_dc10, - &f60sqpixel_dc10, - &f50sqpixel_dc10 - }, - .jpeg_int = 0, - .vsync_int = ZR36057_ISR_GIRQ1, - .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 }, - .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 }, - .gpcs = { -1, 0 }, - .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .gws_not_connected = 0, - .input_mux = 0, - .init = &dc10_init, - }, { - .type = LML33, - .name = "LML33", - .i2c_decoder = "bt819a", - .addrs_decoder = bt819_addrs, - .i2c_encoder = "bt856", - .addrs_encoder = bt856_addrs, - .video_codec = CODEC_TYPE_ZR36060, - - .inputs = 2, - .input = { - { 0, "Composite" }, - { 7, "S-Video" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL, - .tvn = { - &f50ccir601_lml33, - &f60ccir601_lml33, - NULL - }, - .jpeg_int = ZR36057_ISR_GIRQ1, - .vsync_int = ZR36057_ISR_GIRQ0, - .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 }, - .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 }, - .gpcs = { 3, 1 }, - .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 }, - .gws_not_connected = 1, - .input_mux = 0, - .init = &lml33_init, - }, { - .type = LML33R10, - .name = "LML33R10", - .i2c_decoder = "saa7114", - .addrs_decoder = saa7114_addrs, - .i2c_encoder = "adv7170", - .addrs_encoder = adv717x_addrs, - .video_codec = CODEC_TYPE_ZR36060, - - .inputs = 2, - .input = { - { 0, "Composite" }, - { 7, "S-Video" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL, - .tvn = { - &f50ccir601_lm33r10, - &f60ccir601_lm33r10, - NULL - }, - .jpeg_int = ZR36057_ISR_GIRQ1, - .vsync_int = ZR36057_ISR_GIRQ0, - .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 }, - .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 }, - .gpcs = { 3, 1 }, - .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 }, - .gws_not_connected = 1, - .input_mux = 0, - .init = &lml33_init, - }, { - .type = BUZ, - .name = "Buz", - .i2c_decoder = "saa7111", - .addrs_decoder = saa7111_addrs, - .i2c_encoder = "saa7185", - .addrs_encoder = saa7185_addrs, - .video_codec = CODEC_TYPE_ZR36060, - - .inputs = 2, - .input = { - { 3, "Composite" }, - { 7, "S-Video" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, - .tvn = { - &f50ccir601, - &f60ccir601, - &f50ccir601 - }, - .jpeg_int = ZR36057_ISR_GIRQ1, - .vsync_int = ZR36057_ISR_GIRQ0, - .gpio = { 1, -1, 3, -1, -1, -1, -1, -1 }, - .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, - .gpcs = { 3, 1 }, - .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 }, - .gws_not_connected = 1, - .input_mux = 0, - .init = &buz_init, - }, { - .type = AVS6EYES, - .name = "6-Eyes", - /* AverMedia chose not to brand the 6-Eyes. Thus it - can't be autodetected, and requires card=x. */ - .i2c_decoder = "ks0127", - .addrs_decoder = ks0127_addrs, - .i2c_encoder = "bt866", - .addrs_encoder = bt866_addrs, - .video_codec = CODEC_TYPE_ZR36060, - - .inputs = 10, - .input = { - { 0, "Composite 1" }, - { 1, "Composite 2" }, - { 2, "Composite 3" }, - { 4, "Composite 4" }, - { 5, "Composite 5" }, - { 6, "Composite 6" }, - { 8, "S-Video 1" }, - { 9, "S-Video 2" }, - {10, "S-Video 3" }, - {15, "YCbCr" } - }, - .norms = V4L2_STD_NTSC|V4L2_STD_PAL, - .tvn = { - &f50ccir601_avs6eyes, - &f60ccir601_avs6eyes, - NULL - }, - .jpeg_int = ZR36057_ISR_GIRQ1, - .vsync_int = ZR36057_ISR_GIRQ0, - .gpio = { 1, 0, 3, -1, -1, -1, -1, -1 },// Validity unknown /Sam - .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, // Validity unknown /Sam - .gpcs = { 3, 1 }, // Validity unknown /Sam - .vfe_pol = { 1, 0, 0, 0, 0, 1, 0, 0 }, // Validity unknown /Sam - .gws_not_connected = 1, - .input_mux = 1, - .init = &avs6eyes_init, - } - -}; - -/* - * I2C functions - */ -/* software I2C functions */ -static int -zoran_i2c_getsda (void *data) -{ - struct zoran *zr = (struct zoran *) data; - - return (btread(ZR36057_I2CBR) >> 1) & 1; -} - -static int -zoran_i2c_getscl (void *data) -{ - struct zoran *zr = (struct zoran *) data; - - return btread(ZR36057_I2CBR) & 1; -} - -static void -zoran_i2c_setsda (void *data, - int state) -{ - struct zoran *zr = (struct zoran *) data; - - if (state) - zr->i2cbr |= 2; - else - zr->i2cbr &= ~2; - btwrite(zr->i2cbr, ZR36057_I2CBR); -} - -static void -zoran_i2c_setscl (void *data, - int state) -{ - struct zoran *zr = (struct zoran *) data; - - if (state) - zr->i2cbr |= 1; - else - zr->i2cbr &= ~1; - btwrite(zr->i2cbr, ZR36057_I2CBR); -} - -static const struct i2c_algo_bit_data zoran_i2c_bit_data_template = { - .setsda = zoran_i2c_setsda, - .setscl = zoran_i2c_setscl, - .getsda = zoran_i2c_getsda, - .getscl = zoran_i2c_getscl, - .udelay = 10, - .timeout = 100, -}; - -static int -zoran_register_i2c (struct zoran *zr) -{ - zr->i2c_algo = zoran_i2c_bit_data_template; - zr->i2c_algo.data = zr; - strlcpy(zr->i2c_adapter.name, ZR_DEVNAME(zr), - sizeof(zr->i2c_adapter.name)); - i2c_set_adapdata(&zr->i2c_adapter, &zr->v4l2_dev); - zr->i2c_adapter.algo_data = &zr->i2c_algo; - zr->i2c_adapter.dev.parent = &zr->pci_dev->dev; - return i2c_bit_add_bus(&zr->i2c_adapter); -} - -static void -zoran_unregister_i2c (struct zoran *zr) -{ - i2c_del_adapter(&zr->i2c_adapter); -} - -/* Check a zoran_params struct for correctness, insert default params */ - -int -zoran_check_jpg_settings (struct zoran *zr, - struct zoran_jpg_settings *settings, - int try) -{ - int err = 0, err0 = 0; - - dprintk(4, - KERN_DEBUG - "%s: %s - dec: %d, Hdcm: %d, Vdcm: %d, Tdcm: %d\n", - ZR_DEVNAME(zr), __func__, settings->decimation, settings->HorDcm, - settings->VerDcm, settings->TmpDcm); - dprintk(4, - KERN_DEBUG - "%s: %s - x: %d, y: %d, w: %d, y: %d\n", - ZR_DEVNAME(zr), __func__, settings->img_x, settings->img_y, - settings->img_width, settings->img_height); - /* Check decimation, set default values for decimation = 1, 2, 4 */ - switch (settings->decimation) { - case 1: - - settings->HorDcm = 1; - settings->VerDcm = 1; - settings->TmpDcm = 1; - settings->field_per_buff = 2; - settings->img_x = 0; - settings->img_y = 0; - settings->img_width = BUZ_MAX_WIDTH; - settings->img_height = BUZ_MAX_HEIGHT / 2; - break; - case 2: - - settings->HorDcm = 2; - settings->VerDcm = 1; - settings->TmpDcm = 2; - settings->field_per_buff = 1; - settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; - settings->img_y = 0; - settings->img_width = - (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; - settings->img_height = BUZ_MAX_HEIGHT / 2; - break; - case 4: - - if (zr->card.type == DC10_new) { - dprintk(1, - KERN_DEBUG - "%s: %s - HDec by 4 is not supported on the DC10\n", - ZR_DEVNAME(zr), __func__); - err0++; - break; - } - - settings->HorDcm = 4; - settings->VerDcm = 2; - settings->TmpDcm = 2; - settings->field_per_buff = 1; - settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; - settings->img_y = 0; - settings->img_width = - (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; - settings->img_height = BUZ_MAX_HEIGHT / 2; - break; - case 0: - - /* We have to check the data the user has set */ - - if (settings->HorDcm != 1 && settings->HorDcm != 2 && - (zr->card.type == DC10_new || settings->HorDcm != 4)) { - settings->HorDcm = clamp(settings->HorDcm, 1, 2); - err0++; - } - if (settings->VerDcm != 1 && settings->VerDcm != 2) { - settings->VerDcm = clamp(settings->VerDcm, 1, 2); - err0++; - } - if (settings->TmpDcm != 1 && settings->TmpDcm != 2) { - settings->TmpDcm = clamp(settings->TmpDcm, 1, 2); - err0++; - } - if (settings->field_per_buff != 1 && - settings->field_per_buff != 2) { - settings->field_per_buff = clamp(settings->field_per_buff, 1, 2); - err0++; - } - if (settings->img_x < 0) { - settings->img_x = 0; - err0++; - } - if (settings->img_y < 0) { - settings->img_y = 0; - err0++; - } - if (settings->img_width < 0 || settings->img_width > BUZ_MAX_WIDTH) { - settings->img_width = clamp(settings->img_width, 0, (int)BUZ_MAX_WIDTH); - err0++; - } - if (settings->img_height < 0 || settings->img_height > BUZ_MAX_HEIGHT / 2) { - settings->img_height = clamp(settings->img_height, 0, BUZ_MAX_HEIGHT / 2); - err0++; - } - if (settings->img_x + settings->img_width > BUZ_MAX_WIDTH) { - settings->img_x = BUZ_MAX_WIDTH - settings->img_width; - err0++; - } - if (settings->img_y + settings->img_height > BUZ_MAX_HEIGHT / 2) { - settings->img_y = BUZ_MAX_HEIGHT / 2 - settings->img_height; - err0++; - } - if (settings->img_width % (16 * settings->HorDcm) != 0) { - settings->img_width -= settings->img_width % (16 * settings->HorDcm); - if (settings->img_width == 0) - settings->img_width = 16 * settings->HorDcm; - err0++; - } - if (settings->img_height % (8 * settings->VerDcm) != 0) { - settings->img_height -= settings->img_height % (8 * settings->VerDcm); - if (settings->img_height == 0) - settings->img_height = 8 * settings->VerDcm; - err0++; - } - - if (!try && err0) { - dprintk(1, - KERN_ERR - "%s: %s - error in params for decimation = 0\n", - ZR_DEVNAME(zr), __func__); - err++; - } - break; - default: - dprintk(1, - KERN_ERR - "%s: %s - decimation = %d, must be 0, 1, 2 or 4\n", - ZR_DEVNAME(zr), __func__, settings->decimation); - err++; - break; - } - - if (settings->jpg_comp.quality > 100) - settings->jpg_comp.quality = 100; - if (settings->jpg_comp.quality < 5) - settings->jpg_comp.quality = 5; - if (settings->jpg_comp.APPn < 0) - settings->jpg_comp.APPn = 0; - if (settings->jpg_comp.APPn > 15) - settings->jpg_comp.APPn = 15; - if (settings->jpg_comp.APP_len < 0) - settings->jpg_comp.APP_len = 0; - if (settings->jpg_comp.APP_len > 60) - settings->jpg_comp.APP_len = 60; - if (settings->jpg_comp.COM_len < 0) - settings->jpg_comp.COM_len = 0; - if (settings->jpg_comp.COM_len > 60) - settings->jpg_comp.COM_len = 60; - if (err) - return -EINVAL; - return 0; -} - -void -zoran_open_init_params (struct zoran *zr) -{ - int i; - - /* User must explicitly set a window */ - zr->overlay_settings.is_set = 0; - zr->overlay_mask = NULL; - zr->overlay_active = ZORAN_FREE; - - zr->v4l_memgrab_active = 0; - zr->v4l_overlay_active = 0; - zr->v4l_grab_frame = NO_GRAB_ACTIVE; - zr->v4l_grab_seq = 0; - zr->v4l_settings.width = 192; - zr->v4l_settings.height = 144; - zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */ - zr->v4l_settings.bytesperline = - zr->v4l_settings.width * - ((zr->v4l_settings.format->depth + 7) / 8); - - /* DMA ring stuff for V4L */ - zr->v4l_pend_tail = 0; - zr->v4l_pend_head = 0; - zr->v4l_sync_tail = 0; - zr->v4l_buffers.active = ZORAN_FREE; - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ - } - zr->v4l_buffers.allocated = 0; - - for (i = 0; i < BUZ_MAX_FRAME; i++) { - zr->jpg_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ - } - zr->jpg_buffers.active = ZORAN_FREE; - zr->jpg_buffers.allocated = 0; - /* Set necessary params and call zoran_check_jpg_settings to set the defaults */ - zr->jpg_settings.decimation = 1; - zr->jpg_settings.jpg_comp.quality = 50; /* default compression factor 8 */ - if (zr->card.type != BUZ) - zr->jpg_settings.odd_even = 1; - else - zr->jpg_settings.odd_even = 0; - zr->jpg_settings.jpg_comp.APPn = 0; - zr->jpg_settings.jpg_comp.APP_len = 0; /* No APPn marker */ - memset(zr->jpg_settings.jpg_comp.APP_data, 0, - sizeof(zr->jpg_settings.jpg_comp.APP_data)); - zr->jpg_settings.jpg_comp.COM_len = 0; /* No COM marker */ - memset(zr->jpg_settings.jpg_comp.COM_data, 0, - sizeof(zr->jpg_settings.jpg_comp.COM_data)); - zr->jpg_settings.jpg_comp.jpeg_markers = - V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; - i = zoran_check_jpg_settings(zr, &zr->jpg_settings, 0); - if (i) - dprintk(1, KERN_ERR "%s: %s internal error\n", - ZR_DEVNAME(zr), __func__); - - clear_interrupt_counters(zr); - zr->testing = 0; -} - -static void test_interrupts (struct zoran *zr) -{ - DEFINE_WAIT(wait); - int timeout, icr; - - clear_interrupt_counters(zr); - - zr->testing = 1; - icr = btread(ZR36057_ICR); - btwrite(0x78000000 | ZR36057_ICR_IntPinEn, ZR36057_ICR); - prepare_to_wait(&zr->test_q, &wait, TASK_INTERRUPTIBLE); - timeout = schedule_timeout(HZ); - finish_wait(&zr->test_q, &wait); - btwrite(0, ZR36057_ICR); - btwrite(0x78000000, ZR36057_ISR); - zr->testing = 0; - dprintk(5, KERN_INFO "%s: Testing interrupts...\n", ZR_DEVNAME(zr)); - if (timeout) { - dprintk(1, ": time spent: %d\n", 1 * HZ - timeout); - } - if (zr36067_debug > 1) - print_interrupts(zr); - btwrite(icr, ZR36057_ICR); -} - -static int zr36057_init (struct zoran *zr) -{ - int j, err; - - dprintk(1, - KERN_INFO - "%s: %s - initializing card[%d], zr=%p\n", - ZR_DEVNAME(zr), __func__, zr->id, zr); - - /* default setup of all parameters which will persist between opens */ - zr->user = 0; - - init_waitqueue_head(&zr->v4l_capq); - init_waitqueue_head(&zr->jpg_capq); - init_waitqueue_head(&zr->test_q); - zr->jpg_buffers.allocated = 0; - zr->v4l_buffers.allocated = 0; - - zr->vbuf_base = (void *) vidmem; - zr->vbuf_width = 0; - zr->vbuf_height = 0; - zr->vbuf_depth = 0; - zr->vbuf_bytesperline = 0; - - /* Avoid nonsense settings from user for default input/norm */ - if (default_norm < 0 || default_norm > 2) - default_norm = 0; - if (default_norm == 0) { - zr->norm = V4L2_STD_PAL; - zr->timing = zr->card.tvn[0]; - } else if (default_norm == 1) { - zr->norm = V4L2_STD_NTSC; - zr->timing = zr->card.tvn[1]; - } else { - zr->norm = V4L2_STD_SECAM; - zr->timing = zr->card.tvn[2]; - } - if (zr->timing == NULL) { - dprintk(1, - KERN_WARNING - "%s: %s - default TV standard not supported by hardware. PAL will be used.\n", - ZR_DEVNAME(zr), __func__); - zr->norm = V4L2_STD_PAL; - zr->timing = zr->card.tvn[0]; - } - - if (default_input > zr->card.inputs-1) { - dprintk(1, - KERN_WARNING - "%s: default_input value %d out of range (0-%d)\n", - ZR_DEVNAME(zr), default_input, zr->card.inputs-1); - default_input = 0; - } - zr->input = default_input; - - /* default setup (will be repeated at every open) */ - zoran_open_init_params(zr); - - /* allocate memory *before* doing anything to the hardware - * in case allocation fails */ - zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL); - zr->video_dev = video_device_alloc(); - if (!zr->stat_com || !zr->video_dev) { - dprintk(1, - KERN_ERR - "%s: %s - kmalloc (STAT_COM) failed\n", - ZR_DEVNAME(zr), __func__); - err = -ENOMEM; - goto exit_free; - } - for (j = 0; j < BUZ_NUM_STAT_COM; j++) { - zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ - } - - /* - * Now add the template and register the device unit. - */ - *zr->video_dev = zoran_template; - zr->video_dev->v4l2_dev = &zr->v4l2_dev; - zr->video_dev->lock = &zr->lock; - strcpy(zr->video_dev->name, ZR_DEVNAME(zr)); - /* It's not a mem2mem device, but you can both capture and output from - one and the same device. This should really be split up into two - device nodes, but that's a job for another day. */ - zr->video_dev->vfl_dir = VFL_DIR_M2M; - err = video_register_device(zr->video_dev, VFL_TYPE_GRABBER, video_nr[zr->id]); - if (err < 0) - goto exit_free; - video_set_drvdata(zr->video_dev, zr); - - zoran_init_hardware(zr); - if (zr36067_debug > 2) - detect_guest_activity(zr); - test_interrupts(zr); - if (!pass_through) { - decoder_call(zr, video, s_stream, 0); - encoder_call(zr, video, s_routing, 2, 0, 0); - } - - zr->zoran_proc = NULL; - zr->initialized = 1; - return 0; - -exit_free: - kfree(zr->stat_com); - kfree(zr->video_dev); - return err; -} - -static void zoran_remove(struct pci_dev *pdev) -{ - struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev); - struct zoran *zr = to_zoran(v4l2_dev); - - if (!zr->initialized) - goto exit_free; - - /* unregister videocodec bus */ - if (zr->codec) { - struct videocodec_master *master = zr->codec->master_data; - - videocodec_detach(zr->codec); - kfree(master); - } - if (zr->vfe) { - struct videocodec_master *master = zr->vfe->master_data; - - videocodec_detach(zr->vfe); - kfree(master); - } - - /* unregister i2c bus */ - zoran_unregister_i2c(zr); - /* disable PCI bus-mastering */ - zoran_set_pci_master(zr, 0); - /* put chip into reset */ - btwrite(0, ZR36057_SPGPPCR); - free_irq(zr->pci_dev->irq, zr); - /* unmap and free memory */ - kfree(zr->stat_com); - zoran_proc_cleanup(zr); - iounmap(zr->zr36057_mem); - pci_disable_device(zr->pci_dev); - video_unregister_device(zr->video_dev); -exit_free: - v4l2_ctrl_handler_free(&zr->hdl); - v4l2_device_unregister(&zr->v4l2_dev); - kfree(zr); -} - -void -zoran_vdev_release (struct video_device *vdev) -{ - kfree(vdev); -} - -static struct videocodec_master *zoran_setup_videocodec(struct zoran *zr, - int type) -{ - struct videocodec_master *m = NULL; - - m = kmalloc(sizeof(struct videocodec_master), GFP_KERNEL); - if (!m) { - dprintk(1, KERN_ERR "%s: %s - no memory\n", - ZR_DEVNAME(zr), __func__); - return m; - } - - /* magic and type are unused for master struct. Makes sense only at - codec structs. - In the past, .type were initialized to the old V4L1 .hardware - value, as VID_HARDWARE_ZR36067 - */ - m->magic = 0L; - m->type = 0; - - m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER; - strlcpy(m->name, ZR_DEVNAME(zr), sizeof(m->name)); - m->data = zr; - - switch (type) - { - case CODEC_TYPE_ZR36060: - m->readreg = zr36060_read; - m->writereg = zr36060_write; - m->flags |= CODEC_FLAG_JPEG | CODEC_FLAG_VFE; - break; - case CODEC_TYPE_ZR36050: - m->readreg = zr36050_read; - m->writereg = zr36050_write; - m->flags |= CODEC_FLAG_JPEG; - break; - case CODEC_TYPE_ZR36016: - m->readreg = zr36016_read; - m->writereg = zr36016_write; - m->flags |= CODEC_FLAG_VFE; - break; - } - - return m; -} - -static void zoran_subdev_notify(struct v4l2_subdev *sd, unsigned int cmd, void *arg) -{ - struct zoran *zr = to_zoran(sd->v4l2_dev); - - /* Bt819 needs to reset its FIFO buffer using #FRST pin and - LML33 card uses GPIO(7) for that. */ - if (cmd == BT819_FIFO_RESET_LOW) - GPIO(zr, 7, 0); - else if (cmd == BT819_FIFO_RESET_HIGH) - GPIO(zr, 7, 1); -} - -/* - * Scan for a Buz card (actually for the PCI controller ZR36057), - * request the irq and map the io memory - */ -static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - unsigned char latency, need_latency; - struct zoran *zr; - int result; - struct videocodec_master *master_vfe = NULL; - struct videocodec_master *master_codec = NULL; - int card_num; - char *codec_name, *vfe_name; - unsigned int nr; - - - nr = zoran_num++; - if (nr >= BUZ_MAX) { - dprintk(1, KERN_ERR "%s: driver limited to %d card(s) maximum\n", - ZORAN_NAME, BUZ_MAX); - return -ENOENT; - } - - zr = kzalloc(sizeof(struct zoran), GFP_KERNEL); - if (!zr) { - dprintk(1, KERN_ERR "%s: %s - kzalloc failed\n", - ZORAN_NAME, __func__); - return -ENOMEM; - } - zr->v4l2_dev.notify = zoran_subdev_notify; - if (v4l2_device_register(&pdev->dev, &zr->v4l2_dev)) - goto zr_free_mem; - zr->pci_dev = pdev; - zr->id = nr; - snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id); - if (v4l2_ctrl_handler_init(&zr->hdl, 10)) - goto zr_unreg; - zr->v4l2_dev.ctrl_handler = &zr->hdl; - spin_lock_init(&zr->spinlock); - mutex_init(&zr->lock); - if (pci_enable_device(pdev)) - goto zr_unreg; - zr->revision = zr->pci_dev->revision; - - dprintk(1, - KERN_INFO - "%s: Zoran ZR360%c7 (rev %d), irq: %d, memory: 0x%08llx\n", - ZR_DEVNAME(zr), zr->revision < 2 ? '5' : '6', zr->revision, - zr->pci_dev->irq, (uint64_t)pci_resource_start(zr->pci_dev, 0)); - if (zr->revision >= 2) { - dprintk(1, - KERN_INFO - "%s: Subsystem vendor=0x%04x id=0x%04x\n", - ZR_DEVNAME(zr), zr->pci_dev->subsystem_vendor, - zr->pci_dev->subsystem_device); - } - - /* Use auto-detected card type? */ - if (card[nr] == -1) { - if (zr->revision < 2) { - dprintk(1, - KERN_ERR - "%s: No card type specified, please use the card=X module parameter\n", - ZR_DEVNAME(zr)); - dprintk(1, - KERN_ERR - "%s: It is not possible to auto-detect ZR36057 based cards\n", - ZR_DEVNAME(zr)); - goto zr_unreg; - } - - card_num = ent->driver_data; - if (card_num >= NUM_CARDS) { - dprintk(1, - KERN_ERR - "%s: Unknown card, try specifying card=X module parameter\n", - ZR_DEVNAME(zr)); - goto zr_unreg; - } - dprintk(3, - KERN_DEBUG - "%s: %s() - card %s detected\n", - ZR_DEVNAME(zr), __func__, zoran_cards[card_num].name); - } else { - card_num = card[nr]; - if (card_num >= NUM_CARDS || card_num < 0) { - dprintk(1, - KERN_ERR - "%s: User specified card type %d out of range (0 .. %d)\n", - ZR_DEVNAME(zr), card_num, NUM_CARDS - 1); - goto zr_unreg; - } - } - - /* even though we make this a non pointer and thus - * theoretically allow for making changes to this struct - * on a per-individual card basis at runtime, this is - * strongly discouraged. This structure is intended to - * keep general card information, no settings or anything */ - zr->card = zoran_cards[card_num]; - snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), - "%s[%u]", zr->card.name, zr->id); - - zr->zr36057_mem = pci_ioremap_bar(zr->pci_dev, 0); - if (!zr->zr36057_mem) { - dprintk(1, KERN_ERR "%s: %s() - ioremap failed\n", - ZR_DEVNAME(zr), __func__); - goto zr_unreg; - } - - result = request_irq(zr->pci_dev->irq, zoran_irq, - IRQF_SHARED, ZR_DEVNAME(zr), zr); - if (result < 0) { - if (result == -EINVAL) { - dprintk(1, - KERN_ERR - "%s: %s - bad irq number or handler\n", - ZR_DEVNAME(zr), __func__); - } else if (result == -EBUSY) { - dprintk(1, - KERN_ERR - "%s: %s - IRQ %d busy, change your PnP config in BIOS\n", - ZR_DEVNAME(zr), __func__, zr->pci_dev->irq); - } else { - dprintk(1, - KERN_ERR - "%s: %s - can't assign irq, error code %d\n", - ZR_DEVNAME(zr), __func__, result); - } - goto zr_unmap; - } - - /* set PCI latency timer */ - pci_read_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, - &latency); - need_latency = zr->revision > 1 ? 32 : 48; - if (latency != need_latency) { - dprintk(2, KERN_INFO "%s: Changing PCI latency from %d to %d\n", - ZR_DEVNAME(zr), latency, need_latency); - pci_write_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, - need_latency); - } - - zr36057_restart(zr); - /* i2c */ - dprintk(2, KERN_INFO "%s: Initializing i2c bus...\n", - ZR_DEVNAME(zr)); - - if (zoran_register_i2c(zr) < 0) { - dprintk(1, KERN_ERR "%s: %s - can't initialize i2c bus\n", - ZR_DEVNAME(zr), __func__); - goto zr_free_irq; - } - - zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, - &zr->i2c_adapter, zr->card.i2c_decoder, - 0, zr->card.addrs_decoder); - - if (zr->card.i2c_encoder) - zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, - &zr->i2c_adapter, zr->card.i2c_encoder, - 0, zr->card.addrs_encoder); - - dprintk(2, - KERN_INFO "%s: Initializing videocodec bus...\n", - ZR_DEVNAME(zr)); - - if (zr->card.video_codec) { - codec_name = codecid_to_modulename(zr->card.video_codec); - if (codec_name) { - result = request_module(codec_name); - if (result) { - dprintk(1, - KERN_ERR - "%s: failed to load modules %s: %d\n", - ZR_DEVNAME(zr), codec_name, result); - } - } - } - if (zr->card.video_vfe) { - vfe_name = codecid_to_modulename(zr->card.video_vfe); - if (vfe_name) { - result = request_module(vfe_name); - if (result < 0) { - dprintk(1, - KERN_ERR - "%s: failed to load modules %s: %d\n", - ZR_DEVNAME(zr), vfe_name, result); - } - } - } - - /* reset JPEG codec */ - jpeg_codec_sleep(zr, 1); - jpeg_codec_reset(zr); - /* video bus enabled */ - /* display codec revision */ - if (zr->card.video_codec != 0) { - master_codec = zoran_setup_videocodec(zr, zr->card.video_codec); - if (!master_codec) - goto zr_unreg_i2c; - zr->codec = videocodec_attach(master_codec); - if (!zr->codec) { - dprintk(1, KERN_ERR "%s: %s - no codec found\n", - ZR_DEVNAME(zr), __func__); - goto zr_free_codec; - } - if (zr->codec->type != zr->card.video_codec) { - dprintk(1, KERN_ERR "%s: %s - wrong codec\n", - ZR_DEVNAME(zr), __func__); - goto zr_detach_codec; - } - } - if (zr->card.video_vfe != 0) { - master_vfe = zoran_setup_videocodec(zr, zr->card.video_vfe); - if (!master_vfe) - goto zr_detach_codec; - zr->vfe = videocodec_attach(master_vfe); - if (!zr->vfe) { - dprintk(1, KERN_ERR "%s: %s - no VFE found\n", - ZR_DEVNAME(zr), __func__); - goto zr_free_vfe; - } - if (zr->vfe->type != zr->card.video_vfe) { - dprintk(1, KERN_ERR "%s: %s = wrong VFE\n", - ZR_DEVNAME(zr), __func__); - goto zr_detach_vfe; - } - } - - /* take care of Natoma chipset and a revision 1 zr36057 */ - if ((pci_pci_problems & PCIPCI_NATOMA) && zr->revision <= 1) { - zr->jpg_buffers.need_contiguous = 1; - dprintk(1, KERN_INFO - "%s: ZR36057/Natoma bug, max. buffer size is 128K\n", - ZR_DEVNAME(zr)); - } - - if (zr36057_init(zr) < 0) - goto zr_detach_vfe; - - zoran_proc_init(zr); - - return 0; - -zr_detach_vfe: - videocodec_detach(zr->vfe); -zr_free_vfe: - kfree(master_vfe); -zr_detach_codec: - videocodec_detach(zr->codec); -zr_free_codec: - kfree(master_codec); -zr_unreg_i2c: - zoran_unregister_i2c(zr); -zr_free_irq: - btwrite(0, ZR36057_SPGPPCR); - free_irq(zr->pci_dev->irq, zr); -zr_unmap: - iounmap(zr->zr36057_mem); -zr_unreg: - v4l2_ctrl_handler_free(&zr->hdl); - v4l2_device_unregister(&zr->v4l2_dev); -zr_free_mem: - kfree(zr); - - return -ENODEV; -} - -static struct pci_driver zoran_driver = { - .name = "zr36067", - .id_table = zr36067_pci_tbl, - .probe = zoran_probe, - .remove = zoran_remove, -}; - -static int __init zoran_init(void) -{ - int res; - - printk(KERN_INFO "Zoran MJPEG board driver version %s\n", - ZORAN_VERSION); - - /* check the parameters we have been given, adjust if necessary */ - if (v4l_nbufs < 2) - v4l_nbufs = 2; - if (v4l_nbufs > VIDEO_MAX_FRAME) - v4l_nbufs = VIDEO_MAX_FRAME; - /* The user specfies the in KB, we want them in byte - * (and page aligned) */ - v4l_bufsize = PAGE_ALIGN(v4l_bufsize * 1024); - if (v4l_bufsize < 32768) - v4l_bufsize = 32768; - /* 2 MB is arbitrary but sufficient for the maximum possible images */ - if (v4l_bufsize > 2048 * 1024) - v4l_bufsize = 2048 * 1024; - if (jpg_nbufs < 4) - jpg_nbufs = 4; - if (jpg_nbufs > BUZ_MAX_FRAME) - jpg_nbufs = BUZ_MAX_FRAME; - jpg_bufsize = PAGE_ALIGN(jpg_bufsize * 1024); - if (jpg_bufsize < 8192) - jpg_bufsize = 8192; - if (jpg_bufsize > (512 * 1024)) - jpg_bufsize = 512 * 1024; - /* Use parameter for vidmem or try to find a video card */ - if (vidmem) { - dprintk(1, - KERN_INFO - "%s: Using supplied video memory base address @ 0x%lx\n", - ZORAN_NAME, vidmem); - } - - /* some mainboards might not do PCI-PCI data transfer well */ - if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL|PCIPCI_ALIMAGIK)) { - dprintk(1, - KERN_WARNING - "%s: chipset does not support reliable PCI-PCI DMA\n", - ZORAN_NAME); - } - - res = pci_register_driver(&zoran_driver); - if (res) { - dprintk(1, - KERN_ERR - "%s: Unable to register ZR36057 driver\n", - ZORAN_NAME); - return res; - } - - return 0; -} - -static void __exit zoran_exit(void) -{ - pci_unregister_driver(&zoran_driver); -} - -module_init(zoran_init); -module_exit(zoran_exit); diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h deleted file mode 100644 index 0cdb7d34926d..000000000000 --- a/drivers/media/pci/zoran/zoran_card.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * This part handles card-specific data and detection - * - * Copyright (C) 2000 Serguei Miridonov - * - * Currently maintained by: - * Ronald Bultje - * Laurent Pinchart - * Mailinglist - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ZORAN_CARD_H__ -#define __ZORAN_CARD_H__ - -extern int zr36067_debug; - -#define dprintk(num, format, args...) \ - do { \ - if (zr36067_debug >= num) \ - printk(format, ##args); \ - } while (0) - -/* Anybody who uses more than four? */ -#define BUZ_MAX 4 - -extern const struct video_device zoran_template; - -extern int zoran_check_jpg_settings(struct zoran *zr, - struct zoran_jpg_settings *settings, - int try); -extern void zoran_open_init_params(struct zoran *zr); -extern void zoran_vdev_release(struct video_device *vdev); - -void zr36016_write(struct videocodec *codec, u16 reg, u32 val); - -#endif /* __ZORAN_CARD_H__ */ diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c deleted file mode 100644 index 40adceebca7e..000000000000 --- a/drivers/media/pci/zoran/zoran_device.c +++ /dev/null @@ -1,1619 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * This part handles device access (PCI/I2C/codec/...) - * - * Copyright (C) 2000 Serguei Miridonov - * - * Currently maintained by: - * Ronald Bultje - * Laurent Pinchart - * Mailinglist - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include - -#include "videocodec.h" -#include "zoran.h" -#include "zoran_device.h" -#include "zoran_card.h" - -#define IRQ_MASK ( ZR36057_ISR_GIRQ0 | \ - ZR36057_ISR_GIRQ1 | \ - ZR36057_ISR_JPEGRepIRQ ) - -static bool lml33dpath; /* default = 0 - * 1 will use digital path in capture - * mode instead of analog. It can be - * used for picture adjustments using - * tool like xawtv while watching image - * on TV monitor connected to the output. - * However, due to absence of 75 Ohm - * load on Bt819 input, there will be - * some image imperfections */ - -module_param(lml33dpath, bool, 0644); -MODULE_PARM_DESC(lml33dpath, - "Use digital path capture mode (on LML33 cards)"); - -static void -zr36057_init_vfe (struct zoran *zr); - -/* - * General Purpose I/O and Guest bus access - */ - -/* - * This is a bit tricky. When a board lacks a GPIO function, the corresponding - * GPIO bit number in the card_info structure is set to 0. - */ - -void -GPIO (struct zoran *zr, - int bit, - unsigned int value) -{ - u32 reg; - u32 mask; - - /* Make sure the bit number is legal - * A bit number of -1 (lacking) gives a mask of 0, - * making it harmless */ - mask = (1 << (24 + bit)) & 0xff000000; - reg = btread(ZR36057_GPPGCR1) & ~mask; - if (value) { - reg |= mask; - } - btwrite(reg, ZR36057_GPPGCR1); - udelay(1); -} - -/* - * Wait til post office is no longer busy - */ - -int -post_office_wait (struct zoran *zr) -{ - u32 por; - -// while (((por = btread(ZR36057_POR)) & (ZR36057_POR_POPen | ZR36057_POR_POTime)) == ZR36057_POR_POPen) { - while ((por = btread(ZR36057_POR)) & ZR36057_POR_POPen) { - /* wait for something to happen */ - } - if ((por & ZR36057_POR_POTime) && !zr->card.gws_not_connected) { - /* In LML33/BUZ \GWS line is not connected, so it has always timeout set */ - dprintk(1, KERN_INFO "%s: pop timeout %08x\n", ZR_DEVNAME(zr), - por); - return -1; - } - - return 0; -} - -int -post_office_write (struct zoran *zr, - unsigned int guest, - unsigned int reg, - unsigned int value) -{ - u32 por; - - por = - ZR36057_POR_PODir | ZR36057_POR_POTime | ((guest & 7) << 20) | - ((reg & 7) << 16) | (value & 0xFF); - btwrite(por, ZR36057_POR); - - return post_office_wait(zr); -} - -int -post_office_read (struct zoran *zr, - unsigned int guest, - unsigned int reg) -{ - u32 por; - - por = ZR36057_POR_POTime | ((guest & 7) << 20) | ((reg & 7) << 16); - btwrite(por, ZR36057_POR); - if (post_office_wait(zr) < 0) { - return -1; - } - - return btread(ZR36057_POR) & 0xFF; -} - -/* - * detect guests - */ - -static void -dump_guests (struct zoran *zr) -{ - if (zr36067_debug > 2) { - int i, guest[8]; - - for (i = 1; i < 8; i++) { // Don't read jpeg codec here - guest[i] = post_office_read(zr, i, 0); - } - - printk(KERN_INFO "%s: Guests: %*ph\n", - ZR_DEVNAME(zr), 8, guest); - } -} - -void -detect_guest_activity (struct zoran *zr) -{ - int timeout, i, j, res, guest[8], guest0[8], change[8][3]; - ktime_t t0, t1; - - dump_guests(zr); - printk(KERN_INFO "%s: Detecting guests activity, please wait...\n", - ZR_DEVNAME(zr)); - for (i = 1; i < 8; i++) { // Don't read jpeg codec here - guest0[i] = guest[i] = post_office_read(zr, i, 0); - } - - timeout = 0; - j = 0; - t0 = ktime_get(); - while (timeout < 10000) { - udelay(10); - timeout++; - for (i = 1; (i < 8) && (j < 8); i++) { - res = post_office_read(zr, i, 0); - if (res != guest[i]) { - t1 = ktime_get(); - change[j][0] = ktime_to_us(ktime_sub(t1, t0)); - t0 = t1; - change[j][1] = i; - change[j][2] = res; - j++; - guest[i] = res; - } - } - if (j >= 8) - break; - } - - printk(KERN_INFO "%s: Guests: %*ph\n", ZR_DEVNAME(zr), 8, guest0); - - if (j == 0) { - printk(KERN_INFO "%s: No activity detected.\n", ZR_DEVNAME(zr)); - return; - } - for (i = 0; i < j; i++) { - printk(KERN_INFO "%s: %6d: %d => 0x%02x\n", ZR_DEVNAME(zr), - change[i][0], change[i][1], change[i][2]); - } -} - -/* - * JPEG Codec access - */ - -void -jpeg_codec_sleep (struct zoran *zr, - int sleep) -{ - GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_SLEEP], !sleep); - if (!sleep) { - dprintk(3, - KERN_DEBUG - "%s: jpeg_codec_sleep() - wake GPIO=0x%08x\n", - ZR_DEVNAME(zr), btread(ZR36057_GPPGCR1)); - udelay(500); - } else { - dprintk(3, - KERN_DEBUG - "%s: jpeg_codec_sleep() - sleep GPIO=0x%08x\n", - ZR_DEVNAME(zr), btread(ZR36057_GPPGCR1)); - udelay(2); - } -} - -int -jpeg_codec_reset (struct zoran *zr) -{ - /* Take the codec out of sleep */ - jpeg_codec_sleep(zr, 0); - - if (zr->card.gpcs[GPCS_JPEG_RESET] != 0xff) { - post_office_write(zr, zr->card.gpcs[GPCS_JPEG_RESET], 0, - 0); - udelay(2); - } else { - GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 0); - udelay(2); - GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 1); - udelay(2); - } - - return 0; -} - -/* - * Set the registers for the size we have specified. Don't bother - * trying to understand this without the ZR36057 manual in front of - * you [AC]. - * - * PS: The manual is free for download in .pdf format from - * www.zoran.com - nicely done those folks. - */ - -static void -zr36057_adjust_vfe (struct zoran *zr, - enum zoran_codec_mode mode) -{ - u32 reg; - - switch (mode) { - case BUZ_MODE_MOTION_DECOMPRESS: - btand(~ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR); - reg = btread(ZR36057_VFEHCR); - if ((reg & (1 << 10)) && zr->card.type != LML33R10) { - reg += ((1 << 10) | 1); - } - btwrite(reg, ZR36057_VFEHCR); - break; - case BUZ_MODE_MOTION_COMPRESS: - case BUZ_MODE_IDLE: - default: - if ((zr->norm & V4L2_STD_NTSC) || - (zr->card.type == LML33R10 && - (zr->norm & V4L2_STD_PAL))) - btand(~ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR); - else - btor(ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR); - reg = btread(ZR36057_VFEHCR); - if (!(reg & (1 << 10)) && zr->card.type != LML33R10) { - reg -= ((1 << 10) | 1); - } - btwrite(reg, ZR36057_VFEHCR); - break; - } -} - -/* - * set geometry - */ - -static void -zr36057_set_vfe (struct zoran *zr, - int video_width, - int video_height, - const struct zoran_format *format) -{ - struct tvnorm *tvn; - unsigned HStart, HEnd, VStart, VEnd; - unsigned DispMode; - unsigned VidWinWid, VidWinHt; - unsigned hcrop1, hcrop2, vcrop1, vcrop2; - unsigned Wa, We, Ha, He; - unsigned X, Y, HorDcm, VerDcm; - u32 reg; - unsigned mask_line_size; - - tvn = zr->timing; - - Wa = tvn->Wa; - Ha = tvn->Ha; - - dprintk(2, KERN_INFO "%s: set_vfe() - width = %d, height = %d\n", - ZR_DEVNAME(zr), video_width, video_height); - - if (video_width < BUZ_MIN_WIDTH || - video_height < BUZ_MIN_HEIGHT || - video_width > Wa || video_height > Ha) { - dprintk(1, KERN_ERR "%s: set_vfe: w=%d h=%d not valid\n", - ZR_DEVNAME(zr), video_width, video_height); - return; - } - - /**** zr36057 ****/ - - /* horizontal */ - VidWinWid = video_width; - X = DIV_ROUND_UP(VidWinWid * 64, tvn->Wa); - We = (VidWinWid * 64) / X; - HorDcm = 64 - X; - hcrop1 = 2 * ((tvn->Wa - We) / 4); - hcrop2 = tvn->Wa - We - hcrop1; - HStart = tvn->HStart ? tvn->HStart : 1; - /* (Ronald) Original comment: - * "| 1 Doesn't have any effect, tested on both a DC10 and a DC10+" - * this is false. It inverses chroma values on the LML33R10 (so Cr - * suddenly is shown as Cb and reverse, really cool effect if you - * want to see blue faces, not useful otherwise). So don't use |1. - * However, the DC10 has '0' as HStart, but does need |1, so we - * use a dirty check... - */ - HEnd = HStart + tvn->Wa - 1; - HStart += hcrop1; - HEnd -= hcrop2; - reg = ((HStart & ZR36057_VFEHCR_Hmask) << ZR36057_VFEHCR_HStart) - | ((HEnd & ZR36057_VFEHCR_Hmask) << ZR36057_VFEHCR_HEnd); - if (zr->card.vfe_pol.hsync_pol) - reg |= ZR36057_VFEHCR_HSPol; - btwrite(reg, ZR36057_VFEHCR); - - /* Vertical */ - DispMode = !(video_height > BUZ_MAX_HEIGHT / 2); - VidWinHt = DispMode ? video_height : video_height / 2; - Y = DIV_ROUND_UP(VidWinHt * 64 * 2, tvn->Ha); - He = (VidWinHt * 64) / Y; - VerDcm = 64 - Y; - vcrop1 = (tvn->Ha / 2 - He) / 2; - vcrop2 = tvn->Ha / 2 - He - vcrop1; - VStart = tvn->VStart; - VEnd = VStart + tvn->Ha / 2; // - 1; FIXME SnapShot times out with -1 in 768*576 on the DC10 - LP - VStart += vcrop1; - VEnd -= vcrop2; - reg = ((VStart & ZR36057_VFEVCR_Vmask) << ZR36057_VFEVCR_VStart) - | ((VEnd & ZR36057_VFEVCR_Vmask) << ZR36057_VFEVCR_VEnd); - if (zr->card.vfe_pol.vsync_pol) - reg |= ZR36057_VFEVCR_VSPol; - btwrite(reg, ZR36057_VFEVCR); - - /* scaler and pixel format */ - reg = 0; - reg |= (HorDcm << ZR36057_VFESPFR_HorDcm); - reg |= (VerDcm << ZR36057_VFESPFR_VerDcm); - reg |= (DispMode << ZR36057_VFESPFR_DispMode); - /* RJ: I don't know, why the following has to be the opposite - * of the corresponding ZR36060 setting, but only this way - * we get the correct colors when uncompressing to the screen */ - //reg |= ZR36057_VFESPFR_VCLKPol; /**/ - /* RJ: Don't know if that is needed for NTSC also */ - if (!(zr->norm & V4L2_STD_NTSC)) - reg |= ZR36057_VFESPFR_ExtFl; // NEEDED!!!!!!! Wolfgang - reg |= ZR36057_VFESPFR_TopField; - if (HorDcm >= 48) { - reg |= 3 << ZR36057_VFESPFR_HFilter; /* 5 tap filter */ - } else if (HorDcm >= 32) { - reg |= 2 << ZR36057_VFESPFR_HFilter; /* 4 tap filter */ - } else if (HorDcm >= 16) { - reg |= 1 << ZR36057_VFESPFR_HFilter; /* 3 tap filter */ - } - reg |= format->vfespfr; - btwrite(reg, ZR36057_VFESPFR); - - /* display configuration */ - reg = (16 << ZR36057_VDCR_MinPix) - | (VidWinHt << ZR36057_VDCR_VidWinHt) - | (VidWinWid << ZR36057_VDCR_VidWinWid); - if (pci_pci_problems & PCIPCI_TRITON) - // || zr->revision < 1) // Revision 1 has also Triton support - reg &= ~ZR36057_VDCR_Triton; - else - reg |= ZR36057_VDCR_Triton; - btwrite(reg, ZR36057_VDCR); - - /* (Ronald) don't write this if overlay_mask = NULL */ - if (zr->overlay_mask) { - /* Write overlay clipping mask data, but don't enable overlay clipping */ - /* RJ: since this makes only sense on the screen, we use - * zr->overlay_settings.width instead of video_width */ - - mask_line_size = (BUZ_MAX_WIDTH + 31) / 32; - reg = virt_to_bus(zr->overlay_mask); - btwrite(reg, ZR36057_MMTR); - reg = virt_to_bus(zr->overlay_mask + mask_line_size); - btwrite(reg, ZR36057_MMBR); - reg = - mask_line_size - (zr->overlay_settings.width + - 31) / 32; - if (DispMode == 0) - reg += mask_line_size; - reg <<= ZR36057_OCR_MaskStride; - btwrite(reg, ZR36057_OCR); - } - - zr36057_adjust_vfe(zr, zr->codec_mode); -} - -/* - * Switch overlay on or off - */ - -void -zr36057_overlay (struct zoran *zr, - int on) -{ - u32 reg; - - if (on) { - /* do the necessary settings ... */ - btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); /* switch it off first */ - - zr36057_set_vfe(zr, - zr->overlay_settings.width, - zr->overlay_settings.height, - zr->overlay_settings.format); - - /* Start and length of each line MUST be 4-byte aligned. - * This should be already checked before the call to this routine. - * All error messages are internal driver checking only! */ - - /* video display top and bottom registers */ - reg = (long) zr->vbuf_base + - zr->overlay_settings.x * - ((zr->overlay_settings.format->depth + 7) / 8) + - zr->overlay_settings.y * - zr->vbuf_bytesperline; - btwrite(reg, ZR36057_VDTR); - if (reg & 3) - dprintk(1, - KERN_ERR - "%s: zr36057_overlay() - video_address not aligned\n", - ZR_DEVNAME(zr)); - if (zr->overlay_settings.height > BUZ_MAX_HEIGHT / 2) - reg += zr->vbuf_bytesperline; - btwrite(reg, ZR36057_VDBR); - - /* video stride, status, and frame grab register */ - reg = zr->vbuf_bytesperline - - zr->overlay_settings.width * - ((zr->overlay_settings.format->depth + 7) / 8); - if (zr->overlay_settings.height > BUZ_MAX_HEIGHT / 2) - reg += zr->vbuf_bytesperline; - if (reg & 3) - dprintk(1, - KERN_ERR - "%s: zr36057_overlay() - video_stride not aligned\n", - ZR_DEVNAME(zr)); - reg = (reg << ZR36057_VSSFGR_DispStride); - reg |= ZR36057_VSSFGR_VidOvf; /* clear overflow status */ - btwrite(reg, ZR36057_VSSFGR); - - /* Set overlay clipping */ - if (zr->overlay_settings.clipcount > 0) - btor(ZR36057_OCR_OvlEnable, ZR36057_OCR); - - /* ... and switch it on */ - btor(ZR36057_VDCR_VidEn, ZR36057_VDCR); - } else { - /* Switch it off */ - btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); - } -} - -/* - * The overlay mask has one bit for each pixel on a scan line, - * and the maximum window size is BUZ_MAX_WIDTH * BUZ_MAX_HEIGHT pixels. - */ - -void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count) -{ - struct zoran *zr = fh->zr; - unsigned mask_line_size = (BUZ_MAX_WIDTH + 31) / 32; - u32 *mask; - int x, y, width, height; - unsigned i, j, k; - - /* fill mask with one bits */ - memset(fh->overlay_mask, ~0, mask_line_size * 4 * BUZ_MAX_HEIGHT); - - for (i = 0; i < count; ++i) { - /* pick up local copy of clip */ - x = vp[i].c.left; - y = vp[i].c.top; - width = vp[i].c.width; - height = vp[i].c.height; - - /* trim clips that extend beyond the window */ - if (x < 0) { - width += x; - x = 0; - } - if (y < 0) { - height += y; - y = 0; - } - if (x + width > fh->overlay_settings.width) { - width = fh->overlay_settings.width - x; - } - if (y + height > fh->overlay_settings.height) { - height = fh->overlay_settings.height - y; - } - - /* ignore degenerate clips */ - if (height <= 0) { - continue; - } - if (width <= 0) { - continue; - } - - /* apply clip for each scan line */ - for (j = 0; j < height; ++j) { - /* reset bit for each pixel */ - /* this can be optimized later if need be */ - mask = fh->overlay_mask + (y + j) * mask_line_size; - for (k = 0; k < width; ++k) { - mask[(x + k) / 32] &= - ~((u32) 1 << (x + k) % 32); - } - } - } -} - -/* Enable/Disable uncompressed memory grabbing of the 36057 */ - -void -zr36057_set_memgrab (struct zoran *zr, - int mode) -{ - if (mode) { - /* We only check SnapShot and not FrameGrab here. SnapShot==1 - * means a capture is already in progress, but FrameGrab==1 - * doesn't necessary mean that. It's more correct to say a 1 - * to 0 transition indicates a capture completed. If a - * capture is pending when capturing is tuned off, FrameGrab - * will be stuck at 1 until capturing is turned back on. - */ - if (btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SnapShot) - dprintk(1, - KERN_WARNING - "%s: zr36057_set_memgrab(1) with SnapShot on!?\n", - ZR_DEVNAME(zr)); - - /* switch on VSync interrupts */ - btwrite(IRQ_MASK, ZR36057_ISR); // Clear Interrupts - btor(zr->card.vsync_int, ZR36057_ICR); // SW - - /* enable SnapShot */ - btor(ZR36057_VSSFGR_SnapShot, ZR36057_VSSFGR); - - /* Set zr36057 video front end and enable video */ - zr36057_set_vfe(zr, zr->v4l_settings.width, - zr->v4l_settings.height, - zr->v4l_settings.format); - - zr->v4l_memgrab_active = 1; - } else { - /* switch off VSync interrupts */ - btand(~zr->card.vsync_int, ZR36057_ICR); // SW - - zr->v4l_memgrab_active = 0; - zr->v4l_grab_frame = NO_GRAB_ACTIVE; - - /* reenable grabbing to screen if it was running */ - if (zr->v4l_overlay_active) { - zr36057_overlay(zr, 1); - } else { - btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); - btand(~ZR36057_VSSFGR_SnapShot, ZR36057_VSSFGR); - } - } -} - -int -wait_grab_pending (struct zoran *zr) -{ - unsigned long flags; - - /* wait until all pending grabs are finished */ - - if (!zr->v4l_memgrab_active) - return 0; - - wait_event_interruptible(zr->v4l_capq, - (zr->v4l_pend_tail == zr->v4l_pend_head)); - if (signal_pending(current)) - return -ERESTARTSYS; - - spin_lock_irqsave(&zr->spinlock, flags); - zr36057_set_memgrab(zr, 0); - spin_unlock_irqrestore(&zr->spinlock, flags); - - return 0; -} - -/***************************************************************************** - * * - * Set up the Buz-specific MJPEG part * - * * - *****************************************************************************/ - -static inline void -set_frame (struct zoran *zr, - int val) -{ - GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_FRAME], val); -} - -static void -set_videobus_dir (struct zoran *zr, - int val) -{ - switch (zr->card.type) { - case LML33: - case LML33R10: - if (!lml33dpath) - GPIO(zr, 5, val); - else - GPIO(zr, 5, 1); - break; - default: - GPIO(zr, zr->card.gpio[ZR_GPIO_VID_DIR], - zr->card.gpio_pol[ZR_GPIO_VID_DIR] ? !val : val); - break; - } -} - -static void -init_jpeg_queue (struct zoran *zr) -{ - int i; - - /* re-initialize DMA ring stuff */ - zr->jpg_que_head = 0; - zr->jpg_dma_head = 0; - zr->jpg_dma_tail = 0; - zr->jpg_que_tail = 0; - zr->jpg_seq_num = 0; - zr->JPEG_error = 0; - zr->num_errors = 0; - zr->jpg_err_seq = 0; - zr->jpg_err_shift = 0; - zr->jpg_queued_num = 0; - for (i = 0; i < zr->jpg_buffers.num_buffers; i++) { - zr->jpg_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ - } - for (i = 0; i < BUZ_NUM_STAT_COM; i++) { - zr->stat_com[i] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ - } -} - -static void -zr36057_set_jpg (struct zoran *zr, - enum zoran_codec_mode mode) -{ - struct tvnorm *tvn; - u32 reg; - - tvn = zr->timing; - - /* assert P_Reset, disable code transfer, deassert Active */ - btwrite(0, ZR36057_JPC); - - /* MJPEG compression mode */ - switch (mode) { - - case BUZ_MODE_MOTION_COMPRESS: - default: - reg = ZR36057_JMC_MJPGCmpMode; - break; - - case BUZ_MODE_MOTION_DECOMPRESS: - reg = ZR36057_JMC_MJPGExpMode; - reg |= ZR36057_JMC_SyncMstr; - /* RJ: The following is experimental - improves the output to screen */ - //if(zr->jpg_settings.VFIFO_FB) reg |= ZR36057_JMC_VFIFO_FB; // No, it doesn't. SM - break; - - case BUZ_MODE_STILL_COMPRESS: - reg = ZR36057_JMC_JPGCmpMode; - break; - - case BUZ_MODE_STILL_DECOMPRESS: - reg = ZR36057_JMC_JPGExpMode; - break; - - } - reg |= ZR36057_JMC_JPG; - if (zr->jpg_settings.field_per_buff == 1) - reg |= ZR36057_JMC_Fld_per_buff; - btwrite(reg, ZR36057_JMC); - - /* vertical */ - btor(ZR36057_VFEVCR_VSPol, ZR36057_VFEVCR); - reg = (6 << ZR36057_VSP_VsyncSize) | - (tvn->Ht << ZR36057_VSP_FrmTot); - btwrite(reg, ZR36057_VSP); - reg = ((zr->jpg_settings.img_y + tvn->VStart) << ZR36057_FVAP_NAY) | - (zr->jpg_settings.img_height << ZR36057_FVAP_PAY); - btwrite(reg, ZR36057_FVAP); - - /* horizontal */ - if (zr->card.vfe_pol.hsync_pol) - btor(ZR36057_VFEHCR_HSPol, ZR36057_VFEHCR); - else - btand(~ZR36057_VFEHCR_HSPol, ZR36057_VFEHCR); - reg = ((tvn->HSyncStart) << ZR36057_HSP_HsyncStart) | - (tvn->Wt << ZR36057_HSP_LineTot); - btwrite(reg, ZR36057_HSP); - reg = ((zr->jpg_settings.img_x + - tvn->HStart + 4) << ZR36057_FHAP_NAX) | - (zr->jpg_settings.img_width << ZR36057_FHAP_PAX); - btwrite(reg, ZR36057_FHAP); - - /* field process parameters */ - if (zr->jpg_settings.odd_even) - reg = ZR36057_FPP_Odd_Even; - else - reg = 0; - - btwrite(reg, ZR36057_FPP); - - /* Set proper VCLK Polarity, else colors will be wrong during playback */ - //btor(ZR36057_VFESPFR_VCLKPol, ZR36057_VFESPFR); - - /* code base address */ - reg = virt_to_bus(zr->stat_com); - btwrite(reg, ZR36057_JCBA); - - /* FIFO threshold (FIFO is 160. double words) */ - /* NOTE: decimal values here */ - switch (mode) { - - case BUZ_MODE_STILL_COMPRESS: - case BUZ_MODE_MOTION_COMPRESS: - if (zr->card.type != BUZ) - reg = 140; - else - reg = 60; - break; - - case BUZ_MODE_STILL_DECOMPRESS: - case BUZ_MODE_MOTION_DECOMPRESS: - reg = 20; - break; - - default: - reg = 80; - break; - - } - btwrite(reg, ZR36057_JCFT); - zr36057_adjust_vfe(zr, mode); - -} - -void -print_interrupts (struct zoran *zr) -{ - int res, noerr = 0; - - printk(KERN_INFO "%s: interrupts received:", ZR_DEVNAME(zr)); - if ((res = zr->field_counter) < -1 || res > 1) { - printk(KERN_CONT " FD:%d", res); - } - if ((res = zr->intr_counter_GIRQ1) != 0) { - printk(KERN_CONT " GIRQ1:%d", res); - noerr++; - } - if ((res = zr->intr_counter_GIRQ0) != 0) { - printk(KERN_CONT " GIRQ0:%d", res); - noerr++; - } - if ((res = zr->intr_counter_CodRepIRQ) != 0) { - printk(KERN_CONT " CodRepIRQ:%d", res); - noerr++; - } - if ((res = zr->intr_counter_JPEGRepIRQ) != 0) { - printk(KERN_CONT " JPEGRepIRQ:%d", res); - noerr++; - } - if (zr->JPEG_max_missed) { - printk(KERN_CONT " JPEG delays: max=%d min=%d", zr->JPEG_max_missed, - zr->JPEG_min_missed); - } - if (zr->END_event_missed) { - printk(KERN_CONT " ENDs missed: %d", zr->END_event_missed); - } - //if (zr->jpg_queued_num) { - printk(KERN_CONT " queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail, - zr->jpg_dma_tail, zr->jpg_dma_head, zr->jpg_que_head); - //} - if (!noerr) { - printk(KERN_CONT ": no interrupts detected."); - } - printk(KERN_CONT "\n"); -} - -void -clear_interrupt_counters (struct zoran *zr) -{ - zr->intr_counter_GIRQ1 = 0; - zr->intr_counter_GIRQ0 = 0; - zr->intr_counter_CodRepIRQ = 0; - zr->intr_counter_JPEGRepIRQ = 0; - zr->field_counter = 0; - zr->IRQ1_in = 0; - zr->IRQ1_out = 0; - zr->JPEG_in = 0; - zr->JPEG_out = 0; - zr->JPEG_0 = 0; - zr->JPEG_1 = 0; - zr->END_event_missed = 0; - zr->JPEG_missed = 0; - zr->JPEG_max_missed = 0; - zr->JPEG_min_missed = 0x7fffffff; -} - -static u32 -count_reset_interrupt (struct zoran *zr) -{ - u32 isr; - - if ((isr = btread(ZR36057_ISR) & 0x78000000)) { - if (isr & ZR36057_ISR_GIRQ1) { - btwrite(ZR36057_ISR_GIRQ1, ZR36057_ISR); - zr->intr_counter_GIRQ1++; - } - if (isr & ZR36057_ISR_GIRQ0) { - btwrite(ZR36057_ISR_GIRQ0, ZR36057_ISR); - zr->intr_counter_GIRQ0++; - } - if (isr & ZR36057_ISR_CodRepIRQ) { - btwrite(ZR36057_ISR_CodRepIRQ, ZR36057_ISR); - zr->intr_counter_CodRepIRQ++; - } - if (isr & ZR36057_ISR_JPEGRepIRQ) { - btwrite(ZR36057_ISR_JPEGRepIRQ, ZR36057_ISR); - zr->intr_counter_JPEGRepIRQ++; - } - } - return isr; -} - -void -jpeg_start (struct zoran *zr) -{ - int reg; - - zr->frame_num = 0; - - /* deassert P_reset, disable code transfer, deassert Active */ - btwrite(ZR36057_JPC_P_Reset, ZR36057_JPC); - /* stop flushing the internal code buffer */ - btand(~ZR36057_MCTCR_CFlush, ZR36057_MCTCR); - /* enable code transfer */ - btor(ZR36057_JPC_CodTrnsEn, ZR36057_JPC); - - /* clear IRQs */ - btwrite(IRQ_MASK, ZR36057_ISR); - /* enable the JPEG IRQs */ - btwrite(zr->card.jpeg_int | - ZR36057_ICR_JPEGRepIRQ | - ZR36057_ICR_IntPinEn, - ZR36057_ICR); - - set_frame(zr, 0); // \FRAME - - /* set the JPEG codec guest ID */ - reg = (zr->card.gpcs[1] << ZR36057_JCGI_JPEGuestID) | - (0 << ZR36057_JCGI_JPEGuestReg); - btwrite(reg, ZR36057_JCGI); - - if (zr->card.video_vfe == CODEC_TYPE_ZR36016 && - zr->card.video_codec == CODEC_TYPE_ZR36050) { - /* Enable processing on the ZR36016 */ - if (zr->vfe) - zr36016_write(zr->vfe, 0, 1); - - /* load the address of the GO register in the ZR36050 latch */ - post_office_write(zr, 0, 0, 0); - } - - /* assert Active */ - btor(ZR36057_JPC_Active, ZR36057_JPC); - - /* enable the Go generation */ - btor(ZR36057_JMC_Go_en, ZR36057_JMC); - udelay(30); - - set_frame(zr, 1); // /FRAME - - dprintk(3, KERN_DEBUG "%s: jpeg_start\n", ZR_DEVNAME(zr)); -} - -void -zr36057_enable_jpg (struct zoran *zr, - enum zoran_codec_mode mode) -{ - struct vfe_settings cap; - int field_size = - zr->jpg_buffers.buffer_size / zr->jpg_settings.field_per_buff; - - zr->codec_mode = mode; - - cap.x = zr->jpg_settings.img_x; - cap.y = zr->jpg_settings.img_y; - cap.width = zr->jpg_settings.img_width; - cap.height = zr->jpg_settings.img_height; - cap.decimation = - zr->jpg_settings.HorDcm | (zr->jpg_settings.VerDcm << 8); - cap.quality = zr->jpg_settings.jpg_comp.quality; - - switch (mode) { - - case BUZ_MODE_MOTION_COMPRESS: { - struct jpeg_app_marker app; - struct jpeg_com_marker com; - - /* In motion compress mode, the decoder output must be enabled, and - * the video bus direction set to input. - */ - set_videobus_dir(zr, 0); - decoder_call(zr, video, s_stream, 1); - encoder_call(zr, video, s_routing, 0, 0, 0); - - /* Take the JPEG codec and the VFE out of sleep */ - jpeg_codec_sleep(zr, 0); - - /* set JPEG app/com marker */ - app.appn = zr->jpg_settings.jpg_comp.APPn; - app.len = zr->jpg_settings.jpg_comp.APP_len; - memcpy(app.data, zr->jpg_settings.jpg_comp.APP_data, 60); - zr->codec->control(zr->codec, CODEC_S_JPEG_APP_DATA, - sizeof(struct jpeg_app_marker), &app); - - com.len = zr->jpg_settings.jpg_comp.COM_len; - memcpy(com.data, zr->jpg_settings.jpg_comp.COM_data, 60); - zr->codec->control(zr->codec, CODEC_S_JPEG_COM_DATA, - sizeof(struct jpeg_com_marker), &com); - - /* Setup the JPEG codec */ - zr->codec->control(zr->codec, CODEC_S_JPEG_TDS_BYTE, - sizeof(int), &field_size); - zr->codec->set_video(zr->codec, zr->timing, &cap, - &zr->card.vfe_pol); - zr->codec->set_mode(zr->codec, CODEC_DO_COMPRESSION); - - /* Setup the VFE */ - if (zr->vfe) { - zr->vfe->control(zr->vfe, CODEC_S_JPEG_TDS_BYTE, - sizeof(int), &field_size); - zr->vfe->set_video(zr->vfe, zr->timing, &cap, - &zr->card.vfe_pol); - zr->vfe->set_mode(zr->vfe, CODEC_DO_COMPRESSION); - } - - init_jpeg_queue(zr); - zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO - - clear_interrupt_counters(zr); - dprintk(2, KERN_INFO "%s: enable_jpg(MOTION_COMPRESS)\n", - ZR_DEVNAME(zr)); - break; - } - - case BUZ_MODE_MOTION_DECOMPRESS: - /* In motion decompression mode, the decoder output must be disabled, and - * the video bus direction set to output. - */ - decoder_call(zr, video, s_stream, 0); - set_videobus_dir(zr, 1); - encoder_call(zr, video, s_routing, 1, 0, 0); - - /* Take the JPEG codec and the VFE out of sleep */ - jpeg_codec_sleep(zr, 0); - /* Setup the VFE */ - if (zr->vfe) { - zr->vfe->set_video(zr->vfe, zr->timing, &cap, - &zr->card.vfe_pol); - zr->vfe->set_mode(zr->vfe, CODEC_DO_EXPANSION); - } - /* Setup the JPEG codec */ - zr->codec->set_video(zr->codec, zr->timing, &cap, - &zr->card.vfe_pol); - zr->codec->set_mode(zr->codec, CODEC_DO_EXPANSION); - - init_jpeg_queue(zr); - zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO - - clear_interrupt_counters(zr); - dprintk(2, KERN_INFO "%s: enable_jpg(MOTION_DECOMPRESS)\n", - ZR_DEVNAME(zr)); - break; - - case BUZ_MODE_IDLE: - default: - /* shut down processing */ - btand(~(zr->card.jpeg_int | ZR36057_ICR_JPEGRepIRQ), - ZR36057_ICR); - btwrite(zr->card.jpeg_int | ZR36057_ICR_JPEGRepIRQ, - ZR36057_ISR); - btand(~ZR36057_JMC_Go_en, ZR36057_JMC); // \Go_en - - msleep(50); - - set_videobus_dir(zr, 0); - set_frame(zr, 1); // /FRAME - btor(ZR36057_MCTCR_CFlush, ZR36057_MCTCR); // /CFlush - btwrite(0, ZR36057_JPC); // \P_Reset,\CodTrnsEn,\Active - btand(~ZR36057_JMC_VFIFO_FB, ZR36057_JMC); - btand(~ZR36057_JMC_SyncMstr, ZR36057_JMC); - jpeg_codec_reset(zr); - jpeg_codec_sleep(zr, 1); - zr36057_adjust_vfe(zr, mode); - - decoder_call(zr, video, s_stream, 1); - encoder_call(zr, video, s_routing, 0, 0, 0); - - dprintk(2, KERN_INFO "%s: enable_jpg(IDLE)\n", ZR_DEVNAME(zr)); - break; - - } -} - -/* when this is called the spinlock must be held */ -void -zoran_feed_stat_com (struct zoran *zr) -{ - /* move frames from pending queue to DMA */ - - int frame, i, max_stat_com; - - max_stat_com = - (zr->jpg_settings.TmpDcm == - 1) ? BUZ_NUM_STAT_COM : (BUZ_NUM_STAT_COM >> 1); - - while ((zr->jpg_dma_head - zr->jpg_dma_tail) < max_stat_com && - zr->jpg_dma_head < zr->jpg_que_head) { - - frame = zr->jpg_pend[zr->jpg_dma_head & BUZ_MASK_FRAME]; - if (zr->jpg_settings.TmpDcm == 1) { - /* fill 1 stat_com entry */ - i = (zr->jpg_dma_head - - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; - if (!(zr->stat_com[i] & cpu_to_le32(1))) - break; - zr->stat_com[i] = - cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus); - } else { - /* fill 2 stat_com entries */ - i = ((zr->jpg_dma_head - - zr->jpg_err_shift) & 1) * 2; - if (!(zr->stat_com[i] & cpu_to_le32(1))) - break; - zr->stat_com[i] = - cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus); - zr->stat_com[i + 1] = - cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus); - } - zr->jpg_buffers.buffer[frame].state = BUZ_STATE_DMA; - zr->jpg_dma_head++; - - } - if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) - zr->jpg_queued_num++; -} - -/* when this is called the spinlock must be held */ -static void -zoran_reap_stat_com (struct zoran *zr) -{ - /* move frames from DMA queue to done queue */ - - int i; - u32 stat_com; - unsigned int seq; - unsigned int dif; - struct zoran_buffer *buffer; - int frame; - - /* In motion decompress we don't have a hardware frame counter, - * we just count the interrupts here */ - - if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) { - zr->jpg_seq_num++; - } - while (zr->jpg_dma_tail < zr->jpg_dma_head) { - if (zr->jpg_settings.TmpDcm == 1) - i = (zr->jpg_dma_tail - - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; - else - i = ((zr->jpg_dma_tail - - zr->jpg_err_shift) & 1) * 2 + 1; - - stat_com = le32_to_cpu(zr->stat_com[i]); - - if ((stat_com & 1) == 0) { - return; - } - frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME]; - buffer = &zr->jpg_buffers.buffer[frame]; - v4l2_get_timestamp(&buffer->bs.timestamp); - - if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { - buffer->bs.length = (stat_com & 0x7fffff) >> 1; - - /* update sequence number with the help of the counter in stat_com */ - - seq = ((stat_com >> 24) + zr->jpg_err_seq) & 0xff; - dif = (seq - zr->jpg_seq_num) & 0xff; - zr->jpg_seq_num += dif; - } else { - buffer->bs.length = 0; - } - buffer->bs.seq = - zr->jpg_settings.TmpDcm == - 2 ? (zr->jpg_seq_num >> 1) : zr->jpg_seq_num; - buffer->state = BUZ_STATE_DONE; - - zr->jpg_dma_tail++; - } -} - -static void zoran_restart(struct zoran *zr) -{ - /* Now the stat_comm buffer is ready for restart */ - unsigned int status = 0; - int mode; - - if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { - decoder_call(zr, video, g_input_status, &status); - mode = CODEC_DO_COMPRESSION; - } else { - status = V4L2_IN_ST_NO_SIGNAL; - mode = CODEC_DO_EXPANSION; - } - if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS || - !(status & V4L2_IN_ST_NO_SIGNAL)) { - /********** RESTART code *************/ - jpeg_codec_reset(zr); - zr->codec->set_mode(zr->codec, mode); - zr36057_set_jpg(zr, zr->codec_mode); - jpeg_start(zr); - - if (zr->num_errors <= 8) - dprintk(2, KERN_INFO "%s: Restart\n", - ZR_DEVNAME(zr)); - - zr->JPEG_missed = 0; - zr->JPEG_error = 2; - /********** End RESTART code ***********/ - } -} - -static void -error_handler (struct zoran *zr, - u32 astat, - u32 stat) -{ - int i; - - /* This is JPEG error handling part */ - if (zr->codec_mode != BUZ_MODE_MOTION_COMPRESS && - zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS) { - return; - } - - if ((stat & 1) == 0 && - zr->codec_mode == BUZ_MODE_MOTION_COMPRESS && - zr->jpg_dma_tail - zr->jpg_que_tail >= zr->jpg_buffers.num_buffers) { - /* No free buffers... */ - zoran_reap_stat_com(zr); - zoran_feed_stat_com(zr); - wake_up_interruptible(&zr->jpg_capq); - zr->JPEG_missed = 0; - return; - } - - if (zr->JPEG_error == 1) { - zoran_restart(zr); - return; - } - - /* - * First entry: error just happened during normal operation - * - * In BUZ_MODE_MOTION_COMPRESS: - * - * Possible glitch in TV signal. In this case we should - * stop the codec and wait for good quality signal before - * restarting it to avoid further problems - * - * In BUZ_MODE_MOTION_DECOMPRESS: - * - * Bad JPEG frame: we have to mark it as processed (codec crashed - * and was not able to do it itself), and to remove it from queue. - */ - btand(~ZR36057_JMC_Go_en, ZR36057_JMC); - udelay(1); - stat = stat | (post_office_read(zr, 7, 0) & 3) << 8; - btwrite(0, ZR36057_JPC); - btor(ZR36057_MCTCR_CFlush, ZR36057_MCTCR); - jpeg_codec_reset(zr); - jpeg_codec_sleep(zr, 1); - zr->JPEG_error = 1; - zr->num_errors++; - - /* Report error */ - if (zr36067_debug > 1 && zr->num_errors <= 8) { - long frame; - int j; - - frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME]; - printk(KERN_ERR - "%s: JPEG error stat=0x%08x(0x%08x) queue_state=%ld/%ld/%ld/%ld seq=%ld frame=%ld. Codec stopped. ", - ZR_DEVNAME(zr), stat, zr->last_isr, - zr->jpg_que_tail, zr->jpg_dma_tail, - zr->jpg_dma_head, zr->jpg_que_head, - zr->jpg_seq_num, frame); - printk(KERN_INFO "stat_com frames:"); - for (j = 0; j < BUZ_NUM_STAT_COM; j++) { - for (i = 0; i < zr->jpg_buffers.num_buffers; i++) { - if (le32_to_cpu(zr->stat_com[j]) == zr->jpg_buffers.buffer[i].jpg.frag_tab_bus) - printk(KERN_CONT "% d->%d", j, i); - } - } - printk(KERN_CONT "\n"); - } - /* Find an entry in stat_com and rotate contents */ - if (zr->jpg_settings.TmpDcm == 1) - i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; - else - i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; - if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) { - /* Mimic zr36067 operation */ - zr->stat_com[i] |= cpu_to_le32(1); - if (zr->jpg_settings.TmpDcm != 1) - zr->stat_com[i + 1] |= cpu_to_le32(1); - /* Refill */ - zoran_reap_stat_com(zr); - zoran_feed_stat_com(zr); - wake_up_interruptible(&zr->jpg_capq); - /* Find an entry in stat_com again after refill */ - if (zr->jpg_settings.TmpDcm == 1) - i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; - else - i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; - } - if (i) { - /* Rotate stat_comm entries to make current entry first */ - int j; - __le32 bus_addr[BUZ_NUM_STAT_COM]; - - /* Here we are copying the stat_com array, which - * is already in little endian format, so - * no endian conversions here - */ - memcpy(bus_addr, zr->stat_com, sizeof(bus_addr)); - - for (j = 0; j < BUZ_NUM_STAT_COM; j++) - zr->stat_com[j] = bus_addr[(i + j) & BUZ_MASK_STAT_COM]; - - zr->jpg_err_shift += i; - zr->jpg_err_shift &= BUZ_MASK_STAT_COM; - } - if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) - zr->jpg_err_seq = zr->jpg_seq_num; /* + 1; */ - zoran_restart(zr); -} - -irqreturn_t -zoran_irq (int irq, - void *dev_id) -{ - u32 stat, astat; - int count; - struct zoran *zr; - unsigned long flags; - - zr = dev_id; - count = 0; - - if (zr->testing) { - /* Testing interrupts */ - spin_lock_irqsave(&zr->spinlock, flags); - while ((stat = count_reset_interrupt(zr))) { - if (count++ > 100) { - btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); - dprintk(1, - KERN_ERR - "%s: IRQ lockup while testing, isr=0x%08x, cleared int mask\n", - ZR_DEVNAME(zr), stat); - wake_up_interruptible(&zr->test_q); - } - } - zr->last_isr = stat; - spin_unlock_irqrestore(&zr->spinlock, flags); - return IRQ_HANDLED; - } - - spin_lock_irqsave(&zr->spinlock, flags); - while (1) { - /* get/clear interrupt status bits */ - stat = count_reset_interrupt(zr); - astat = stat & IRQ_MASK; - if (!astat) { - break; - } - dprintk(4, - KERN_DEBUG - "zoran_irq: astat: 0x%08x, mask: 0x%08x\n", - astat, btread(ZR36057_ICR)); - if (astat & zr->card.vsync_int) { // SW - - if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS || - zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { - /* count missed interrupts */ - zr->JPEG_missed++; - } - //post_office_read(zr,1,0); - /* Interrupts may still happen when - * zr->v4l_memgrab_active is switched off. - * We simply ignore them */ - - if (zr->v4l_memgrab_active) { - /* A lot more checks should be here ... */ - if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SnapShot) == 0) - dprintk(1, - KERN_WARNING - "%s: BuzIRQ with SnapShot off ???\n", - ZR_DEVNAME(zr)); - - if (zr->v4l_grab_frame != NO_GRAB_ACTIVE) { - /* There is a grab on a frame going on, check if it has finished */ - if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_FrameGrab) == 0) { - /* it is finished, notify the user */ - - zr->v4l_buffers.buffer[zr->v4l_grab_frame].state = BUZ_STATE_DONE; - zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.seq = zr->v4l_grab_seq; - v4l2_get_timestamp(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp); - zr->v4l_grab_frame = NO_GRAB_ACTIVE; - zr->v4l_pend_tail++; - } - } - - if (zr->v4l_grab_frame == NO_GRAB_ACTIVE) - wake_up_interruptible(&zr->v4l_capq); - - /* Check if there is another grab queued */ - - if (zr->v4l_grab_frame == NO_GRAB_ACTIVE && - zr->v4l_pend_tail != zr->v4l_pend_head) { - int frame = zr->v4l_pend[zr->v4l_pend_tail & V4L_MASK_FRAME]; - u32 reg; - - zr->v4l_grab_frame = frame; - - /* Set zr36057 video front end and enable video */ - - /* Buffer address */ - - reg = zr->v4l_buffers.buffer[frame].v4l.fbuffer_bus; - btwrite(reg, ZR36057_VDTR); - if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2) - reg += zr->v4l_settings.bytesperline; - btwrite(reg, ZR36057_VDBR); - - /* video stride, status, and frame grab register */ - reg = 0; - if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2) - reg += zr->v4l_settings.bytesperline; - reg = (reg << ZR36057_VSSFGR_DispStride); - reg |= ZR36057_VSSFGR_VidOvf; - reg |= ZR36057_VSSFGR_SnapShot; - reg |= ZR36057_VSSFGR_FrameGrab; - btwrite(reg, ZR36057_VSSFGR); - - btor(ZR36057_VDCR_VidEn, - ZR36057_VDCR); - } - } - - /* even if we don't grab, we do want to increment - * the sequence counter to see lost frames */ - zr->v4l_grab_seq++; - } -#if (IRQ_MASK & ZR36057_ISR_CodRepIRQ) - if (astat & ZR36057_ISR_CodRepIRQ) { - zr->intr_counter_CodRepIRQ++; - IDEBUG(printk(KERN_DEBUG "%s: ZR36057_ISR_CodRepIRQ\n", - ZR_DEVNAME(zr))); - btand(~ZR36057_ICR_CodRepIRQ, ZR36057_ICR); - } -#endif /* (IRQ_MASK & ZR36057_ISR_CodRepIRQ) */ - -#if (IRQ_MASK & ZR36057_ISR_JPEGRepIRQ) - if ((astat & ZR36057_ISR_JPEGRepIRQ) && - (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS || - zr->codec_mode == BUZ_MODE_MOTION_COMPRESS)) { - if (zr36067_debug > 1 && (!zr->frame_num || zr->JPEG_error)) { - char sv[BUZ_NUM_STAT_COM + 1]; - int i; - - printk(KERN_INFO - "%s: first frame ready: state=0x%08x odd_even=%d field_per_buff=%d delay=%d\n", - ZR_DEVNAME(zr), stat, - zr->jpg_settings.odd_even, - zr->jpg_settings.field_per_buff, - zr->JPEG_missed); - - for (i = 0; i < BUZ_NUM_STAT_COM; i++) - sv[i] = le32_to_cpu(zr->stat_com[i]) & 1 ? '1' : '0'; - sv[BUZ_NUM_STAT_COM] = 0; - printk(KERN_INFO - "%s: stat_com=%s queue_state=%ld/%ld/%ld/%ld\n", - ZR_DEVNAME(zr), sv, - zr->jpg_que_tail, - zr->jpg_dma_tail, - zr->jpg_dma_head, - zr->jpg_que_head); - } else { - /* Get statistics */ - if (zr->JPEG_missed > zr->JPEG_max_missed) - zr->JPEG_max_missed = zr->JPEG_missed; - if (zr->JPEG_missed < zr->JPEG_min_missed) - zr->JPEG_min_missed = zr->JPEG_missed; - } - - if (zr36067_debug > 2 && zr->frame_num < 6) { - int i; - - printk(KERN_INFO "%s: seq=%ld stat_com:", - ZR_DEVNAME(zr), zr->jpg_seq_num); - for (i = 0; i < 4; i++) { - printk(KERN_CONT " %08x", - le32_to_cpu(zr->stat_com[i])); - } - printk(KERN_CONT "\n"); - } - zr->frame_num++; - zr->JPEG_missed = 0; - zr->JPEG_error = 0; - zoran_reap_stat_com(zr); - zoran_feed_stat_com(zr); - wake_up_interruptible(&zr->jpg_capq); - } -#endif /* (IRQ_MASK & ZR36057_ISR_JPEGRepIRQ) */ - - /* DATERR, too many fields missed, error processing */ - if ((astat & zr->card.jpeg_int) || - zr->JPEG_missed > 25 || - zr->JPEG_error == 1 || - ((zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) && - (zr->frame_num && (zr->JPEG_missed > zr->jpg_settings.field_per_buff)))) { - error_handler(zr, astat, stat); - } - - count++; - if (count > 10) { - dprintk(2, KERN_WARNING "%s: irq loop %d\n", - ZR_DEVNAME(zr), count); - if (count > 20) { - btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); - dprintk(2, - KERN_ERR - "%s: IRQ lockup, cleared int mask\n", - ZR_DEVNAME(zr)); - break; - } - } - zr->last_isr = stat; - } - spin_unlock_irqrestore(&zr->spinlock, flags); - - return IRQ_HANDLED; -} - -void -zoran_set_pci_master (struct zoran *zr, - int set_master) -{ - if (set_master) { - pci_set_master(zr->pci_dev); - } else { - u16 command; - - pci_read_config_word(zr->pci_dev, PCI_COMMAND, &command); - command &= ~PCI_COMMAND_MASTER; - pci_write_config_word(zr->pci_dev, PCI_COMMAND, command); - } -} - -void -zoran_init_hardware (struct zoran *zr) -{ - /* Enable bus-mastering */ - zoran_set_pci_master(zr, 1); - - /* Initialize the board */ - if (zr->card.init) { - zr->card.init(zr); - } - - decoder_call(zr, core, init, 0); - decoder_call(zr, video, s_std, zr->norm); - decoder_call(zr, video, s_routing, - zr->card.input[zr->input].muxsel, 0, 0); - - encoder_call(zr, core, init, 0); - encoder_call(zr, video, s_std_output, zr->norm); - encoder_call(zr, video, s_routing, 0, 0, 0); - - /* toggle JPEG codec sleep to sync PLL */ - jpeg_codec_sleep(zr, 1); - jpeg_codec_sleep(zr, 0); - - /* - * set individual interrupt enables (without GIRQ1) - * but don't global enable until zoran_open() - */ - zr36057_init_vfe(zr); - - zr36057_enable_jpg(zr, BUZ_MODE_IDLE); - - btwrite(IRQ_MASK, ZR36057_ISR); // Clears interrupts -} - -void -zr36057_restart (struct zoran *zr) -{ - btwrite(0, ZR36057_SPGPPCR); - mdelay(1); - btor(ZR36057_SPGPPCR_SoftReset, ZR36057_SPGPPCR); - mdelay(1); - - /* assert P_Reset */ - btwrite(0, ZR36057_JPC); - /* set up GPIO direction - all output */ - btwrite(ZR36057_SPGPPCR_SoftReset | 0, ZR36057_SPGPPCR); - - /* set up GPIO pins and guest bus timing */ - btwrite((0x81 << 24) | 0x8888, ZR36057_GPPGCR1); -} - -/* - * initialize video front end - */ - -static void -zr36057_init_vfe (struct zoran *zr) -{ - u32 reg; - - reg = btread(ZR36057_VFESPFR); - reg |= ZR36057_VFESPFR_LittleEndian; - reg &= ~ZR36057_VFESPFR_VCLKPol; - reg |= ZR36057_VFESPFR_ExtFl; - reg |= ZR36057_VFESPFR_TopField; - btwrite(reg, ZR36057_VFESPFR); - reg = btread(ZR36057_VDCR); - if (pci_pci_problems & PCIPCI_TRITON) - // || zr->revision < 1) // Revision 1 has also Triton support - reg &= ~ZR36057_VDCR_Triton; - else - reg |= ZR36057_VDCR_Triton; - btwrite(reg, ZR36057_VDCR); -} diff --git a/drivers/media/pci/zoran/zoran_device.h b/drivers/media/pci/zoran/zoran_device.h deleted file mode 100644 index a507aaad4ebb..000000000000 --- a/drivers/media/pci/zoran/zoran_device.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * This part handles card-specific data and detection - * - * Copyright (C) 2000 Serguei Miridonov - * - * Currently maintained by: - * Ronald Bultje - * Laurent Pinchart - * Mailinglist - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ZORAN_DEVICE_H__ -#define __ZORAN_DEVICE_H__ - -/* general purpose I/O */ -extern void GPIO(struct zoran *zr, - int bit, - unsigned int value); - -/* codec (or actually: guest bus) access */ -extern int post_office_wait(struct zoran *zr); -extern int post_office_write(struct zoran *zr, - unsigned guest, - unsigned reg, - unsigned value); -extern int post_office_read(struct zoran *zr, - unsigned guest, - unsigned reg); - -extern void detect_guest_activity(struct zoran *zr); - -extern void jpeg_codec_sleep(struct zoran *zr, - int sleep); -extern int jpeg_codec_reset(struct zoran *zr); - -/* zr360x7 access to raw capture */ -extern void zr36057_overlay(struct zoran *zr, - int on); -extern void write_overlay_mask(struct zoran_fh *fh, - struct v4l2_clip *vp, - int count); -extern void zr36057_set_memgrab(struct zoran *zr, - int mode); -extern int wait_grab_pending(struct zoran *zr); - -/* interrupts */ -extern void print_interrupts(struct zoran *zr); -extern void clear_interrupt_counters(struct zoran *zr); -extern irqreturn_t zoran_irq(int irq, void *dev_id); - -/* JPEG codec access */ -extern void jpeg_start(struct zoran *zr); -extern void zr36057_enable_jpg(struct zoran *zr, - enum zoran_codec_mode mode); -extern void zoran_feed_stat_com(struct zoran *zr); - -/* general */ -extern void zoran_set_pci_master(struct zoran *zr, - int set_master); -extern void zoran_init_hardware(struct zoran *zr); -extern void zr36057_restart(struct zoran *zr); - -extern const struct zoran_format zoran_formats[]; - -extern int v4l_nbufs; -extern int v4l_bufsize; -extern int jpg_nbufs; -extern int jpg_bufsize; -extern int pass_through; - -/* i2c */ -#define decoder_call(zr, o, f, args...) \ - v4l2_subdev_call(zr->decoder, o, f, ##args) -#define encoder_call(zr, o, f, args...) \ - v4l2_subdev_call(zr->encoder, o, f, ##args) - -#endif /* __ZORAN_DEVICE_H__ */ diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c deleted file mode 100644 index 14f9c0e26a1c..000000000000 --- a/drivers/media/pci/zoran/zoran_driver.c +++ /dev/null @@ -1,2849 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * Copyright (C) 2000 Serguei Miridonov - * - * Changes for BUZ by Wolfgang Scherr - * - * Changes for DC10/DC30 by Laurent Pinchart - * - * Changes for LML33R10 by Maxim Yevtyushkin - * - * Changes for videodev2/v4l2 by Ronald Bultje - * - * Based on - * - * Miro DC10 driver - * Copyright (C) 1999 Wolfgang Scherr - * - * Iomega Buz driver version 1.0 - * Copyright (C) 1999 Rainer Johanni - * - * buz.0.0.3 - * Copyright (C) 1998 Dave Perks - * - * bttv - Bt848 frame grabber driver - * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) - * & Marcus Metzler (mocm@thp.uni-koeln.de) - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include -#include -#include -#include -#include "videocodec.h" - -#include -#include -#include -#include - -#include -#include "zoran.h" -#include "zoran_device.h" -#include "zoran_card.h" - - -const struct zoran_format zoran_formats[] = { - { - .name = "15-bit RGB LE", - .fourcc = V4L2_PIX_FMT_RGB555, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 15, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif| - ZR36057_VFESPFR_LittleEndian, - }, { - .name = "15-bit RGB BE", - .fourcc = V4L2_PIX_FMT_RGB555X, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 15, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif, - }, { - .name = "16-bit RGB LE", - .fourcc = V4L2_PIX_FMT_RGB565, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 16, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif| - ZR36057_VFESPFR_LittleEndian, - }, { - .name = "16-bit RGB BE", - .fourcc = V4L2_PIX_FMT_RGB565X, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 16, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif, - }, { - .name = "24-bit RGB", - .fourcc = V4L2_PIX_FMT_BGR24, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 24, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_Pack24, - }, { - .name = "32-bit RGB LE", - .fourcc = V4L2_PIX_FMT_BGR32, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 32, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_LittleEndian, - }, { - .name = "32-bit RGB BE", - .fourcc = V4L2_PIX_FMT_RGB32, - .colorspace = V4L2_COLORSPACE_SRGB, - .depth = 32, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_RGB888, - }, { - .name = "4:2:2, packed, YUYV", - .fourcc = V4L2_PIX_FMT_YUYV, - .colorspace = V4L2_COLORSPACE_SMPTE170M, - .depth = 16, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_YUV422, - }, { - .name = "4:2:2, packed, UYVY", - .fourcc = V4L2_PIX_FMT_UYVY, - .colorspace = V4L2_COLORSPACE_SMPTE170M, - .depth = 16, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_OVERLAY, - .vfespfr = ZR36057_VFESPFR_YUV422|ZR36057_VFESPFR_LittleEndian, - }, { - .name = "Hardware-encoded Motion-JPEG", - .fourcc = V4L2_PIX_FMT_MJPEG, - .colorspace = V4L2_COLORSPACE_SMPTE170M, - .depth = 0, - .flags = ZORAN_FORMAT_CAPTURE | - ZORAN_FORMAT_PLAYBACK | - ZORAN_FORMAT_COMPRESSED, - } -}; -#define NUM_FORMATS ARRAY_SIZE(zoran_formats) - - /* small helper function for calculating buffersizes for v4l2 - * we calculate the nearest higher power-of-two, which - * will be the recommended buffersize */ -static __u32 -zoran_v4l2_calc_bufsize (struct zoran_jpg_settings *settings) -{ - __u8 div = settings->VerDcm * settings->HorDcm * settings->TmpDcm; - __u32 num = (1024 * 512) / (div); - __u32 result = 2; - - num--; - while (num) { - num >>= 1; - result <<= 1; - } - - if (result > jpg_bufsize) - return jpg_bufsize; - if (result < 8192) - return 8192; - return result; -} - -/* forward references */ -static void v4l_fbuffer_free(struct zoran_fh *fh); -static void jpg_fbuffer_free(struct zoran_fh *fh); - -/* Set mapping mode */ -static void map_mode_raw(struct zoran_fh *fh) -{ - fh->map_mode = ZORAN_MAP_MODE_RAW; - fh->buffers.buffer_size = v4l_bufsize; - fh->buffers.num_buffers = v4l_nbufs; -} -static void map_mode_jpg(struct zoran_fh *fh, int play) -{ - fh->map_mode = play ? ZORAN_MAP_MODE_JPG_PLAY : ZORAN_MAP_MODE_JPG_REC; - fh->buffers.buffer_size = jpg_bufsize; - fh->buffers.num_buffers = jpg_nbufs; -} -static inline const char *mode_name(enum zoran_map_mode mode) -{ - return mode == ZORAN_MAP_MODE_RAW ? "V4L" : "JPG"; -} - -/* - * Allocate the V4L grab buffers - * - * These have to be pysically contiguous. - */ - -static int v4l_fbuffer_alloc(struct zoran_fh *fh) -{ - struct zoran *zr = fh->zr; - int i, off; - unsigned char *mem; - - for (i = 0; i < fh->buffers.num_buffers; i++) { - if (fh->buffers.buffer[i].v4l.fbuffer) - dprintk(2, - KERN_WARNING - "%s: %s - buffer %d already allocated!?\n", - ZR_DEVNAME(zr), __func__, i); - - //udelay(20); - mem = kmalloc(fh->buffers.buffer_size, - GFP_KERNEL | __GFP_NOWARN); - if (!mem) { - dprintk(1, - KERN_ERR - "%s: %s - kmalloc for V4L buf %d failed\n", - ZR_DEVNAME(zr), __func__, i); - v4l_fbuffer_free(fh); - return -ENOBUFS; - } - fh->buffers.buffer[i].v4l.fbuffer = mem; - fh->buffers.buffer[i].v4l.fbuffer_phys = virt_to_phys(mem); - fh->buffers.buffer[i].v4l.fbuffer_bus = virt_to_bus(mem); - for (off = 0; off < fh->buffers.buffer_size; - off += PAGE_SIZE) - SetPageReserved(virt_to_page(mem + off)); - dprintk(4, - KERN_INFO - "%s: %s - V4L frame %d mem %p (bus: 0x%llx)\n", - ZR_DEVNAME(zr), __func__, i, mem, - (unsigned long long)virt_to_bus(mem)); - } - - fh->buffers.allocated = 1; - - return 0; -} - -/* free the V4L grab buffers */ -static void v4l_fbuffer_free(struct zoran_fh *fh) -{ - struct zoran *zr = fh->zr; - int i, off; - unsigned char *mem; - - dprintk(4, KERN_INFO "%s: %s\n", ZR_DEVNAME(zr), __func__); - - for (i = 0; i < fh->buffers.num_buffers; i++) { - if (!fh->buffers.buffer[i].v4l.fbuffer) - continue; - - mem = fh->buffers.buffer[i].v4l.fbuffer; - for (off = 0; off < fh->buffers.buffer_size; - off += PAGE_SIZE) - ClearPageReserved(virt_to_page(mem + off)); - kfree(fh->buffers.buffer[i].v4l.fbuffer); - fh->buffers.buffer[i].v4l.fbuffer = NULL; - } - - fh->buffers.allocated = 0; -} - -/* - * Allocate the MJPEG grab buffers. - * - * If a Natoma chipset is present and this is a revision 1 zr36057, - * each MJPEG buffer needs to be physically contiguous. - * (RJ: This statement is from Dave Perks' original driver, - * I could never check it because I have a zr36067) - * - * RJ: The contents grab buffers needs never be accessed in the driver. - * Therefore there is no need to allocate them with vmalloc in order - * to get a contiguous virtual memory space. - * I don't understand why many other drivers first allocate them with - * vmalloc (which uses internally also get_zeroed_page, but delivers you - * virtual addresses) and then again have to make a lot of efforts - * to get the physical address. - * - * Ben Capper: - * On big-endian architectures (such as ppc) some extra steps - * are needed. When reading and writing to the stat_com array - * and fragment buffers, the device expects to see little- - * endian values. The use of cpu_to_le32() and le32_to_cpu() - * in this function (and one or two others in zoran_device.c) - * ensure that these values are always stored in little-endian - * form, regardless of architecture. The zr36057 does Very Bad - * Things on big endian architectures if the stat_com array - * and fragment buffers are not little-endian. - */ - -static int jpg_fbuffer_alloc(struct zoran_fh *fh) -{ - struct zoran *zr = fh->zr; - int i, j, off; - u8 *mem; - - for (i = 0; i < fh->buffers.num_buffers; i++) { - if (fh->buffers.buffer[i].jpg.frag_tab) - dprintk(2, - KERN_WARNING - "%s: %s - buffer %d already allocated!?\n", - ZR_DEVNAME(zr), __func__, i); - - /* Allocate fragment table for this buffer */ - - mem = (void *)get_zeroed_page(GFP_KERNEL); - if (!mem) { - dprintk(1, - KERN_ERR - "%s: %s - get_zeroed_page (frag_tab) failed for buffer %d\n", - ZR_DEVNAME(zr), __func__, i); - jpg_fbuffer_free(fh); - return -ENOBUFS; - } - fh->buffers.buffer[i].jpg.frag_tab = (__le32 *)mem; - fh->buffers.buffer[i].jpg.frag_tab_bus = virt_to_bus(mem); - - if (fh->buffers.need_contiguous) { - mem = kmalloc(fh->buffers.buffer_size, GFP_KERNEL); - if (mem == NULL) { - dprintk(1, - KERN_ERR - "%s: %s - kmalloc failed for buffer %d\n", - ZR_DEVNAME(zr), __func__, i); - jpg_fbuffer_free(fh); - return -ENOBUFS; - } - fh->buffers.buffer[i].jpg.frag_tab[0] = - cpu_to_le32(virt_to_bus(mem)); - fh->buffers.buffer[i].jpg.frag_tab[1] = - cpu_to_le32((fh->buffers.buffer_size >> 1) | 1); - for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) - SetPageReserved(virt_to_page(mem + off)); - } else { - /* jpg_bufsize is already page aligned */ - for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { - mem = (void *)get_zeroed_page(GFP_KERNEL); - if (mem == NULL) { - dprintk(1, - KERN_ERR - "%s: %s - get_zeroed_page failed for buffer %d\n", - ZR_DEVNAME(zr), __func__, i); - jpg_fbuffer_free(fh); - return -ENOBUFS; - } - - fh->buffers.buffer[i].jpg.frag_tab[2 * j] = - cpu_to_le32(virt_to_bus(mem)); - fh->buffers.buffer[i].jpg.frag_tab[2 * j + 1] = - cpu_to_le32((PAGE_SIZE >> 2) << 1); - SetPageReserved(virt_to_page(mem)); - } - - fh->buffers.buffer[i].jpg.frag_tab[2 * j - 1] |= cpu_to_le32(1); - } - } - - dprintk(4, - KERN_DEBUG "%s: %s - %d KB allocated\n", - ZR_DEVNAME(zr), __func__, - (fh->buffers.num_buffers * fh->buffers.buffer_size) >> 10); - - fh->buffers.allocated = 1; - - return 0; -} - -/* free the MJPEG grab buffers */ -static void jpg_fbuffer_free(struct zoran_fh *fh) -{ - struct zoran *zr = fh->zr; - int i, j, off; - unsigned char *mem; - __le32 frag_tab; - struct zoran_buffer *buffer; - - dprintk(4, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); - - for (i = 0, buffer = &fh->buffers.buffer[0]; - i < fh->buffers.num_buffers; i++, buffer++) { - if (!buffer->jpg.frag_tab) - continue; - - if (fh->buffers.need_contiguous) { - frag_tab = buffer->jpg.frag_tab[0]; - - if (frag_tab) { - mem = bus_to_virt(le32_to_cpu(frag_tab)); - for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) - ClearPageReserved(virt_to_page(mem + off)); - kfree(mem); - buffer->jpg.frag_tab[0] = 0; - buffer->jpg.frag_tab[1] = 0; - } - } else { - for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { - frag_tab = buffer->jpg.frag_tab[2 * j]; - - if (!frag_tab) - break; - ClearPageReserved(virt_to_page(bus_to_virt(le32_to_cpu(frag_tab)))); - free_page((unsigned long)bus_to_virt(le32_to_cpu(frag_tab))); - buffer->jpg.frag_tab[2 * j] = 0; - buffer->jpg.frag_tab[2 * j + 1] = 0; - } - } - - free_page((unsigned long)buffer->jpg.frag_tab); - buffer->jpg.frag_tab = NULL; - } - - fh->buffers.allocated = 0; -} - -/* - * V4L Buffer grabbing - */ - -static int -zoran_v4l_set_format (struct zoran_fh *fh, - int width, - int height, - const struct zoran_format *format) -{ - struct zoran *zr = fh->zr; - int bpp; - - /* Check size and format of the grab wanted */ - - if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH || - height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) { - dprintk(1, - KERN_ERR - "%s: %s - wrong frame size (%dx%d)\n", - ZR_DEVNAME(zr), __func__, width, height); - return -EINVAL; - } - - bpp = (format->depth + 7) / 8; - - /* Check against available buffer size */ - if (height * width * bpp > fh->buffers.buffer_size) { - dprintk(1, - KERN_ERR - "%s: %s - video buffer size (%d kB) is too small\n", - ZR_DEVNAME(zr), __func__, fh->buffers.buffer_size >> 10); - return -EINVAL; - } - - /* The video front end needs 4-byte alinged line sizes */ - - if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) { - dprintk(1, - KERN_ERR - "%s: %s - wrong frame alignment\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - fh->v4l_settings.width = width; - fh->v4l_settings.height = height; - fh->v4l_settings.format = format; - fh->v4l_settings.bytesperline = bpp * fh->v4l_settings.width; - - return 0; -} - -static int zoran_v4l_queue_frame(struct zoran_fh *fh, int num) -{ - struct zoran *zr = fh->zr; - unsigned long flags; - int res = 0; - - if (!fh->buffers.allocated) { - dprintk(1, - KERN_ERR - "%s: %s - buffers not yet allocated\n", - ZR_DEVNAME(zr), __func__); - res = -ENOMEM; - } - - /* No grabbing outside the buffer range! */ - if (num >= fh->buffers.num_buffers || num < 0) { - dprintk(1, - KERN_ERR - "%s: %s - buffer %d is out of range\n", - ZR_DEVNAME(zr), __func__, num); - res = -EINVAL; - } - - spin_lock_irqsave(&zr->spinlock, flags); - - if (fh->buffers.active == ZORAN_FREE) { - if (zr->v4l_buffers.active == ZORAN_FREE) { - zr->v4l_buffers = fh->buffers; - fh->buffers.active = ZORAN_ACTIVE; - } else { - dprintk(1, - KERN_ERR - "%s: %s - another session is already capturing\n", - ZR_DEVNAME(zr), __func__); - res = -EBUSY; - } - } - - /* make sure a grab isn't going on currently with this buffer */ - if (!res) { - switch (zr->v4l_buffers.buffer[num].state) { - default: - case BUZ_STATE_PEND: - if (zr->v4l_buffers.active == ZORAN_FREE) { - fh->buffers.active = ZORAN_FREE; - zr->v4l_buffers.allocated = 0; - } - res = -EBUSY; /* what are you doing? */ - break; - case BUZ_STATE_DONE: - dprintk(2, - KERN_WARNING - "%s: %s - queueing buffer %d in state DONE!?\n", - ZR_DEVNAME(zr), __func__, num); - /* fall through */ - case BUZ_STATE_USER: - /* since there is at least one unused buffer there's room for at least - * one more pend[] entry */ - zr->v4l_pend[zr->v4l_pend_head++ & V4L_MASK_FRAME] = num; - zr->v4l_buffers.buffer[num].state = BUZ_STATE_PEND; - zr->v4l_buffers.buffer[num].bs.length = - fh->v4l_settings.bytesperline * - zr->v4l_settings.height; - fh->buffers.buffer[num] = zr->v4l_buffers.buffer[num]; - break; - } - } - - spin_unlock_irqrestore(&zr->spinlock, flags); - - if (!res && zr->v4l_buffers.active == ZORAN_FREE) - zr->v4l_buffers.active = fh->buffers.active; - - return res; -} - -/* - * Sync on a V4L buffer - */ - -static int v4l_sync(struct zoran_fh *fh, int frame) -{ - struct zoran *zr = fh->zr; - unsigned long flags; - - if (fh->buffers.active == ZORAN_FREE) { - dprintk(1, - KERN_ERR - "%s: %s - no grab active for this session\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - /* check passed-in frame number */ - if (frame >= fh->buffers.num_buffers || frame < 0) { - dprintk(1, - KERN_ERR "%s: %s - frame %d is invalid\n", - ZR_DEVNAME(zr), __func__, frame); - return -EINVAL; - } - - /* Check if is buffer was queued at all */ - if (zr->v4l_buffers.buffer[frame].state == BUZ_STATE_USER) { - dprintk(1, - KERN_ERR - "%s: %s - attempt to sync on a buffer which was not queued?\n", - ZR_DEVNAME(zr), __func__); - return -EPROTO; - } - - mutex_unlock(&zr->lock); - /* wait on this buffer to get ready */ - if (!wait_event_interruptible_timeout(zr->v4l_capq, - (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ)) { - mutex_lock(&zr->lock); - return -ETIME; - } - mutex_lock(&zr->lock); - if (signal_pending(current)) - return -ERESTARTSYS; - - /* buffer should now be in BUZ_STATE_DONE */ - if (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_DONE) - dprintk(2, - KERN_ERR "%s: %s - internal state error\n", - ZR_DEVNAME(zr), __func__); - - zr->v4l_buffers.buffer[frame].state = BUZ_STATE_USER; - fh->buffers.buffer[frame] = zr->v4l_buffers.buffer[frame]; - - spin_lock_irqsave(&zr->spinlock, flags); - - /* Check if streaming capture has finished */ - if (zr->v4l_pend_tail == zr->v4l_pend_head) { - zr36057_set_memgrab(zr, 0); - if (zr->v4l_buffers.active == ZORAN_ACTIVE) { - fh->buffers.active = zr->v4l_buffers.active = ZORAN_FREE; - zr->v4l_buffers.allocated = 0; - } - } - - spin_unlock_irqrestore(&zr->spinlock, flags); - - return 0; -} - -/* - * Queue a MJPEG buffer for capture/playback - */ - -static int zoran_jpg_queue_frame(struct zoran_fh *fh, int num, - enum zoran_codec_mode mode) -{ - struct zoran *zr = fh->zr; - unsigned long flags; - int res = 0; - - /* Check if buffers are allocated */ - if (!fh->buffers.allocated) { - dprintk(1, - KERN_ERR - "%s: %s - buffers not yet allocated\n", - ZR_DEVNAME(zr), __func__); - return -ENOMEM; - } - - /* No grabbing outside the buffer range! */ - if (num >= fh->buffers.num_buffers || num < 0) { - dprintk(1, - KERN_ERR - "%s: %s - buffer %d out of range\n", - ZR_DEVNAME(zr), __func__, num); - return -EINVAL; - } - - /* what is the codec mode right now? */ - if (zr->codec_mode == BUZ_MODE_IDLE) { - zr->jpg_settings = fh->jpg_settings; - } else if (zr->codec_mode != mode) { - /* wrong codec mode active - invalid */ - dprintk(1, - KERN_ERR - "%s: %s - codec in wrong mode\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - if (fh->buffers.active == ZORAN_FREE) { - if (zr->jpg_buffers.active == ZORAN_FREE) { - zr->jpg_buffers = fh->buffers; - fh->buffers.active = ZORAN_ACTIVE; - } else { - dprintk(1, - KERN_ERR - "%s: %s - another session is already capturing\n", - ZR_DEVNAME(zr), __func__); - res = -EBUSY; - } - } - - if (!res && zr->codec_mode == BUZ_MODE_IDLE) { - /* Ok load up the jpeg codec */ - zr36057_enable_jpg(zr, mode); - } - - spin_lock_irqsave(&zr->spinlock, flags); - - if (!res) { - switch (zr->jpg_buffers.buffer[num].state) { - case BUZ_STATE_DONE: - dprintk(2, - KERN_WARNING - "%s: %s - queing frame in BUZ_STATE_DONE state!?\n", - ZR_DEVNAME(zr), __func__); - /* fall through */ - case BUZ_STATE_USER: - /* since there is at least one unused buffer there's room for at - *least one more pend[] entry */ - zr->jpg_pend[zr->jpg_que_head++ & BUZ_MASK_FRAME] = num; - zr->jpg_buffers.buffer[num].state = BUZ_STATE_PEND; - fh->buffers.buffer[num] = zr->jpg_buffers.buffer[num]; - zoran_feed_stat_com(zr); - break; - default: - case BUZ_STATE_DMA: - case BUZ_STATE_PEND: - if (zr->jpg_buffers.active == ZORAN_FREE) { - fh->buffers.active = ZORAN_FREE; - zr->jpg_buffers.allocated = 0; - } - res = -EBUSY; /* what are you doing? */ - break; - } - } - - spin_unlock_irqrestore(&zr->spinlock, flags); - - if (!res && zr->jpg_buffers.active == ZORAN_FREE) - zr->jpg_buffers.active = fh->buffers.active; - - return res; -} - -static int jpg_qbuf(struct zoran_fh *fh, int frame, enum zoran_codec_mode mode) -{ - struct zoran *zr = fh->zr; - int res = 0; - - /* Does the user want to stop streaming? */ - if (frame < 0) { - if (zr->codec_mode == mode) { - if (fh->buffers.active == ZORAN_FREE) { - dprintk(1, - KERN_ERR - "%s: %s(-1) - session not active\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - fh->buffers.active = zr->jpg_buffers.active = ZORAN_FREE; - zr->jpg_buffers.allocated = 0; - zr36057_enable_jpg(zr, BUZ_MODE_IDLE); - return 0; - } else { - dprintk(1, - KERN_ERR - "%s: %s - stop streaming but not in streaming mode\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - } - - if ((res = zoran_jpg_queue_frame(fh, frame, mode))) - return res; - - /* Start the jpeg codec when the first frame is queued */ - if (!res && zr->jpg_que_head == 1) - jpeg_start(zr); - - return res; -} - -/* - * Sync on a MJPEG buffer - */ - -static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs) -{ - struct zoran *zr = fh->zr; - unsigned long flags; - int frame; - - if (fh->buffers.active == ZORAN_FREE) { - dprintk(1, - KERN_ERR - "%s: %s - capture is not currently active\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - if (zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS && - zr->codec_mode != BUZ_MODE_MOTION_COMPRESS) { - dprintk(1, - KERN_ERR - "%s: %s - codec not in streaming mode\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - mutex_unlock(&zr->lock); - if (!wait_event_interruptible_timeout(zr->jpg_capq, - (zr->jpg_que_tail != zr->jpg_dma_tail || - zr->jpg_dma_tail == zr->jpg_dma_head), - 10*HZ)) { - int isr; - - btand(~ZR36057_JMC_Go_en, ZR36057_JMC); - udelay(1); - zr->codec->control(zr->codec, CODEC_G_STATUS, - sizeof(isr), &isr); - mutex_lock(&zr->lock); - dprintk(1, - KERN_ERR - "%s: %s - timeout: codec isr=0x%02x\n", - ZR_DEVNAME(zr), __func__, isr); - - return -ETIME; - - } - mutex_lock(&zr->lock); - if (signal_pending(current)) - return -ERESTARTSYS; - - spin_lock_irqsave(&zr->spinlock, flags); - - if (zr->jpg_dma_tail != zr->jpg_dma_head) - frame = zr->jpg_pend[zr->jpg_que_tail++ & BUZ_MASK_FRAME]; - else - frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; - - /* buffer should now be in BUZ_STATE_DONE */ - if (zr->jpg_buffers.buffer[frame].state != BUZ_STATE_DONE) - dprintk(2, - KERN_ERR "%s: %s - internal state error\n", - ZR_DEVNAME(zr), __func__); - - *bs = zr->jpg_buffers.buffer[frame].bs; - bs->frame = frame; - zr->jpg_buffers.buffer[frame].state = BUZ_STATE_USER; - fh->buffers.buffer[frame] = zr->jpg_buffers.buffer[frame]; - - spin_unlock_irqrestore(&zr->spinlock, flags); - - return 0; -} - -static void zoran_open_init_session(struct zoran_fh *fh) -{ - int i; - struct zoran *zr = fh->zr; - - /* Per default, map the V4L Buffers */ - map_mode_raw(fh); - - /* take over the card's current settings */ - fh->overlay_settings = zr->overlay_settings; - fh->overlay_settings.is_set = 0; - fh->overlay_settings.format = zr->overlay_settings.format; - fh->overlay_active = ZORAN_FREE; - - /* v4l settings */ - fh->v4l_settings = zr->v4l_settings; - /* jpg settings */ - fh->jpg_settings = zr->jpg_settings; - - /* buffers */ - memset(&fh->buffers, 0, sizeof(fh->buffers)); - for (i = 0; i < MAX_FRAME; i++) { - fh->buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ - fh->buffers.buffer[i].bs.frame = i; - } - fh->buffers.allocated = 0; - fh->buffers.active = ZORAN_FREE; -} - -static void zoran_close_end_session(struct zoran_fh *fh) -{ - struct zoran *zr = fh->zr; - - /* overlay */ - if (fh->overlay_active != ZORAN_FREE) { - fh->overlay_active = zr->overlay_active = ZORAN_FREE; - zr->v4l_overlay_active = 0; - if (!zr->v4l_memgrab_active) - zr36057_overlay(zr, 0); - zr->overlay_mask = NULL; - } - - if (fh->map_mode == ZORAN_MAP_MODE_RAW) { - /* v4l capture */ - if (fh->buffers.active != ZORAN_FREE) { - unsigned long flags; - - spin_lock_irqsave(&zr->spinlock, flags); - zr36057_set_memgrab(zr, 0); - zr->v4l_buffers.allocated = 0; - zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; - spin_unlock_irqrestore(&zr->spinlock, flags); - } - - /* v4l buffers */ - if (fh->buffers.allocated) - v4l_fbuffer_free(fh); - } else { - /* jpg capture */ - if (fh->buffers.active != ZORAN_FREE) { - zr36057_enable_jpg(zr, BUZ_MODE_IDLE); - zr->jpg_buffers.allocated = 0; - zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE; - } - - /* jpg buffers */ - if (fh->buffers.allocated) - jpg_fbuffer_free(fh); - } -} - -/* - * Open a zoran card. Right now the flags stuff is just playing - */ - -static int zoran_open(struct file *file) -{ - struct zoran *zr = video_drvdata(file); - struct zoran_fh *fh; - int res, first_open = 0; - - dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", - ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); - - mutex_lock(&zr->lock); - - if (zr->user >= 2048) { - dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", - ZR_DEVNAME(zr), zr->user); - res = -EBUSY; - goto fail_unlock; - } - - /* now, create the open()-specific file_ops struct */ - fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL); - if (!fh) { - dprintk(1, - KERN_ERR - "%s: %s - allocation of zoran_fh failed\n", - ZR_DEVNAME(zr), __func__); - res = -ENOMEM; - goto fail_unlock; - } - v4l2_fh_init(&fh->fh, video_devdata(file)); - - /* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows - * on norm-change! */ - fh->overlay_mask = - kmalloc(((768 + 31) / 32) * 576 * 4, GFP_KERNEL); - if (!fh->overlay_mask) { - dprintk(1, - KERN_ERR - "%s: %s - allocation of overlay_mask failed\n", - ZR_DEVNAME(zr), __func__); - res = -ENOMEM; - goto fail_fh; - } - - if (zr->user++ == 0) - first_open = 1; - - /* default setup - TODO: look at flags */ - if (first_open) { /* First device open */ - zr36057_restart(zr); - zoran_open_init_params(zr); - zoran_init_hardware(zr); - - btor(ZR36057_ICR_IntPinEn, ZR36057_ICR); - } - - /* set file_ops stuff */ - file->private_data = fh; - fh->zr = zr; - zoran_open_init_session(fh); - v4l2_fh_add(&fh->fh); - mutex_unlock(&zr->lock); - - return 0; - -fail_fh: - v4l2_fh_exit(&fh->fh); - kfree(fh); -fail_unlock: - mutex_unlock(&zr->lock); - - dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", - ZR_DEVNAME(zr), res, zr->user); - - return res; -} - -static int -zoran_close(struct file *file) -{ - struct zoran_fh *fh = file->private_data; - struct zoran *zr = fh->zr; - - dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(+)=%d\n", - ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user - 1); - - /* kernel locks (fs/device.c), so don't do that ourselves - * (prevents deadlocks) */ - mutex_lock(&zr->lock); - - zoran_close_end_session(fh); - - if (zr->user-- == 1) { /* Last process */ - /* Clean up JPEG process */ - wake_up_interruptible(&zr->jpg_capq); - zr36057_enable_jpg(zr, BUZ_MODE_IDLE); - zr->jpg_buffers.allocated = 0; - zr->jpg_buffers.active = ZORAN_FREE; - - /* disable interrupts */ - btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); - - if (zr36067_debug > 1) - print_interrupts(zr); - - /* Overlay off */ - zr->v4l_overlay_active = 0; - zr36057_overlay(zr, 0); - zr->overlay_mask = NULL; - - /* capture off */ - wake_up_interruptible(&zr->v4l_capq); - zr36057_set_memgrab(zr, 0); - zr->v4l_buffers.allocated = 0; - zr->v4l_buffers.active = ZORAN_FREE; - zoran_set_pci_master(zr, 0); - - if (!pass_through) { /* Switch to color bar */ - decoder_call(zr, video, s_stream, 0); - encoder_call(zr, video, s_routing, 2, 0, 0); - } - } - mutex_unlock(&zr->lock); - - v4l2_fh_del(&fh->fh); - v4l2_fh_exit(&fh->fh); - kfree(fh->overlay_mask); - kfree(fh); - - dprintk(4, KERN_INFO "%s: %s done\n", ZR_DEVNAME(zr), __func__); - - return 0; -} - -static int setup_fbuffer(struct zoran_fh *fh, - void *base, - const struct zoran_format *fmt, - int width, - int height, - int bytesperline) -{ - struct zoran *zr = fh->zr; - - /* (Ronald) v4l/v4l2 guidelines */ - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) - return -EPERM; - - /* Don't allow frame buffer overlay if PCI or AGP is buggy, or on - ALi Magik (that needs very low latency while the card needs a - higher value always) */ - - if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK)) - return -ENXIO; - - /* we need a bytesperline value, even if not given */ - if (!bytesperline) - bytesperline = width * ((fmt->depth + 7) & ~7) / 8; - -#if 0 - if (zr->overlay_active) { - /* dzjee... stupid users... don't even bother to turn off - * overlay before changing the memory location... - * normally, we would return errors here. However, one of - * the tools that does this is... xawtv! and since xawtv - * is used by +/- 99% of the users, we'd rather be user- - * friendly and silently do as if nothing went wrong */ - dprintk(3, - KERN_ERR - "%s: %s - forced overlay turnoff because framebuffer changed\n", - ZR_DEVNAME(zr), __func__); - zr36057_overlay(zr, 0); - } -#endif - - if (!(fmt->flags & ZORAN_FORMAT_OVERLAY)) { - dprintk(1, - KERN_ERR - "%s: %s - no valid overlay format given\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - if (height <= 0 || width <= 0 || bytesperline <= 0) { - dprintk(1, - KERN_ERR - "%s: %s - invalid height/width/bpl value (%d|%d|%d)\n", - ZR_DEVNAME(zr), __func__, width, height, bytesperline); - return -EINVAL; - } - if (bytesperline & 3) { - dprintk(1, - KERN_ERR - "%s: %s - bytesperline (%d) must be 4-byte aligned\n", - ZR_DEVNAME(zr), __func__, bytesperline); - return -EINVAL; - } - - zr->vbuf_base = (void *) ((unsigned long) base & ~3); - zr->vbuf_height = height; - zr->vbuf_width = width; - zr->vbuf_depth = fmt->depth; - zr->overlay_settings.format = fmt; - zr->vbuf_bytesperline = bytesperline; - - /* The user should set new window parameters */ - zr->overlay_settings.is_set = 0; - - return 0; -} - - -static int setup_window(struct zoran_fh *fh, - int x, - int y, - int width, - int height, - struct v4l2_clip __user *clips, - unsigned int clipcount, - void __user *bitmap) -{ - struct zoran *zr = fh->zr; - struct v4l2_clip *vcp = NULL; - int on, end; - - - if (!zr->vbuf_base) { - dprintk(1, - KERN_ERR - "%s: %s - frame buffer has to be set first\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - if (!fh->overlay_settings.format) { - dprintk(1, - KERN_ERR - "%s: %s - no overlay format set\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - if (clipcount > 2048) { - dprintk(1, - KERN_ERR - "%s: %s - invalid clipcount\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - /* - * The video front end needs 4-byte alinged line sizes, we correct that - * silently here if necessary - */ - if (zr->vbuf_depth == 15 || zr->vbuf_depth == 16) { - end = (x + width) & ~1; /* round down */ - x = (x + 1) & ~1; /* round up */ - width = end - x; - } - - if (zr->vbuf_depth == 24) { - end = (x + width) & ~3; /* round down */ - x = (x + 3) & ~3; /* round up */ - width = end - x; - } - - if (width > BUZ_MAX_WIDTH) - width = BUZ_MAX_WIDTH; - if (height > BUZ_MAX_HEIGHT) - height = BUZ_MAX_HEIGHT; - - /* Check for invalid parameters */ - if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT || - width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) { - dprintk(1, - KERN_ERR - "%s: %s - width = %d or height = %d invalid\n", - ZR_DEVNAME(zr), __func__, width, height); - return -EINVAL; - } - - fh->overlay_settings.x = x; - fh->overlay_settings.y = y; - fh->overlay_settings.width = width; - fh->overlay_settings.height = height; - fh->overlay_settings.clipcount = clipcount; - - /* - * If an overlay is running, we have to switch it off - * and switch it on again in order to get the new settings in effect. - * - * We also want to avoid that the overlay mask is written - * when an overlay is running. - */ - - on = zr->v4l_overlay_active && !zr->v4l_memgrab_active && - zr->overlay_active != ZORAN_FREE && - fh->overlay_active != ZORAN_FREE; - if (on) - zr36057_overlay(zr, 0); - - /* - * Write the overlay mask if clips are wanted. - * We prefer a bitmap. - */ - if (bitmap) { - /* fake value - it just means we want clips */ - fh->overlay_settings.clipcount = 1; - - if (copy_from_user(fh->overlay_mask, bitmap, - (width * height + 7) / 8)) { - return -EFAULT; - } - } else if (clipcount) { - /* write our own bitmap from the clips */ - vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4)); - if (vcp == NULL) { - dprintk(1, - KERN_ERR - "%s: %s - Alloc of clip mask failed\n", - ZR_DEVNAME(zr), __func__); - return -ENOMEM; - } - if (copy_from_user - (vcp, clips, sizeof(struct v4l2_clip) * clipcount)) { - vfree(vcp); - return -EFAULT; - } - write_overlay_mask(fh, vcp, clipcount); - vfree(vcp); - } - - fh->overlay_settings.is_set = 1; - if (fh->overlay_active != ZORAN_FREE && - zr->overlay_active != ZORAN_FREE) - zr->overlay_settings = fh->overlay_settings; - - if (on) - zr36057_overlay(zr, 1); - - /* Make sure the changes come into effect */ - return wait_grab_pending(zr); -} - -static int setup_overlay(struct zoran_fh *fh, int on) -{ - struct zoran *zr = fh->zr; - - /* If there is nothing to do, return immediately */ - if ((on && fh->overlay_active != ZORAN_FREE) || - (!on && fh->overlay_active == ZORAN_FREE)) - return 0; - - /* check whether we're touching someone else's overlay */ - if (on && zr->overlay_active != ZORAN_FREE && - fh->overlay_active == ZORAN_FREE) { - dprintk(1, - KERN_ERR - "%s: %s - overlay is already active for another session\n", - ZR_DEVNAME(zr), __func__); - return -EBUSY; - } - if (!on && zr->overlay_active != ZORAN_FREE && - fh->overlay_active == ZORAN_FREE) { - dprintk(1, - KERN_ERR - "%s: %s - you cannot cancel someone else's session\n", - ZR_DEVNAME(zr), __func__); - return -EPERM; - } - - if (on == 0) { - zr->overlay_active = fh->overlay_active = ZORAN_FREE; - zr->v4l_overlay_active = 0; - /* When a grab is running, the video simply - * won't be switched on any more */ - if (!zr->v4l_memgrab_active) - zr36057_overlay(zr, 0); - zr->overlay_mask = NULL; - } else { - if (!zr->vbuf_base || !fh->overlay_settings.is_set) { - dprintk(1, - KERN_ERR - "%s: %s - buffer or window not set\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - if (!fh->overlay_settings.format) { - dprintk(1, - KERN_ERR - "%s: %s - no overlay format set\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - zr->overlay_active = fh->overlay_active = ZORAN_LOCKED; - zr->v4l_overlay_active = 1; - zr->overlay_mask = fh->overlay_mask; - zr->overlay_settings = fh->overlay_settings; - if (!zr->v4l_memgrab_active) - zr36057_overlay(zr, 1); - /* When a grab is running, the video will be - * switched on when grab is finished */ - } - - /* Make sure the changes come into effect */ - return wait_grab_pending(zr); -} - -/* get the status of a buffer in the clients buffer queue */ -static int zoran_v4l2_buffer_status(struct zoran_fh *fh, - struct v4l2_buffer *buf, int num) -{ - struct zoran *zr = fh->zr; - unsigned long flags; - - buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - - switch (fh->map_mode) { - case ZORAN_MAP_MODE_RAW: - /* check range */ - if (num < 0 || num >= fh->buffers.num_buffers || - !fh->buffers.allocated) { - dprintk(1, - KERN_ERR - "%s: %s - wrong number or buffers not allocated\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - spin_lock_irqsave(&zr->spinlock, flags); - dprintk(3, - KERN_DEBUG - "%s: %s() - raw active=%c, buffer %d: state=%c, map=%c\n", - ZR_DEVNAME(zr), __func__, - "FAL"[fh->buffers.active], num, - "UPMD"[zr->v4l_buffers.buffer[num].state], - fh->buffers.buffer[num].map ? 'Y' : 'N'); - spin_unlock_irqrestore(&zr->spinlock, flags); - - buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - buf->length = fh->buffers.buffer_size; - - /* get buffer */ - buf->bytesused = fh->buffers.buffer[num].bs.length; - if (fh->buffers.buffer[num].state == BUZ_STATE_DONE || - fh->buffers.buffer[num].state == BUZ_STATE_USER) { - buf->sequence = fh->buffers.buffer[num].bs.seq; - buf->flags |= V4L2_BUF_FLAG_DONE; - buf->timestamp = fh->buffers.buffer[num].bs.timestamp; - } else { - buf->flags |= V4L2_BUF_FLAG_QUEUED; - } - - if (fh->v4l_settings.height <= BUZ_MAX_HEIGHT / 2) - buf->field = V4L2_FIELD_TOP; - else - buf->field = V4L2_FIELD_INTERLACED; - - break; - - case ZORAN_MAP_MODE_JPG_REC: - case ZORAN_MAP_MODE_JPG_PLAY: - - /* check range */ - if (num < 0 || num >= fh->buffers.num_buffers || - !fh->buffers.allocated) { - dprintk(1, - KERN_ERR - "%s: %s - wrong number or buffers not allocated\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - buf->type = (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ? - V4L2_BUF_TYPE_VIDEO_CAPTURE : - V4L2_BUF_TYPE_VIDEO_OUTPUT; - buf->length = fh->buffers.buffer_size; - - /* these variables are only written after frame has been captured */ - if (fh->buffers.buffer[num].state == BUZ_STATE_DONE || - fh->buffers.buffer[num].state == BUZ_STATE_USER) { - buf->sequence = fh->buffers.buffer[num].bs.seq; - buf->timestamp = fh->buffers.buffer[num].bs.timestamp; - buf->bytesused = fh->buffers.buffer[num].bs.length; - buf->flags |= V4L2_BUF_FLAG_DONE; - } else { - buf->flags |= V4L2_BUF_FLAG_QUEUED; - } - - /* which fields are these? */ - if (fh->jpg_settings.TmpDcm != 1) - buf->field = fh->jpg_settings.odd_even ? - V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; - else - buf->field = fh->jpg_settings.odd_even ? - V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT; - - break; - - default: - - dprintk(5, - KERN_ERR - "%s: %s - invalid buffer type|map_mode (%d|%d)\n", - ZR_DEVNAME(zr), __func__, buf->type, fh->map_mode); - return -EINVAL; - } - - buf->memory = V4L2_MEMORY_MMAP; - buf->index = num; - buf->m.offset = buf->length * num; - - return 0; -} - -static int -zoran_set_norm (struct zoran *zr, - v4l2_std_id norm) -{ - int on; - - if (zr->v4l_buffers.active != ZORAN_FREE || - zr->jpg_buffers.active != ZORAN_FREE) { - dprintk(1, - KERN_WARNING - "%s: %s called while in playback/capture mode\n", - ZR_DEVNAME(zr), __func__); - return -EBUSY; - } - - if (!(norm & zr->card.norms)) { - dprintk(1, - KERN_ERR "%s: %s - unsupported norm %llx\n", - ZR_DEVNAME(zr), __func__, norm); - return -EINVAL; - } - - if (norm & V4L2_STD_SECAM) - zr->timing = zr->card.tvn[2]; - else if (norm & V4L2_STD_NTSC) - zr->timing = zr->card.tvn[1]; - else - zr->timing = zr->card.tvn[0]; - - /* We switch overlay off and on since a change in the - * norm needs different VFE settings */ - on = zr->overlay_active && !zr->v4l_memgrab_active; - if (on) - zr36057_overlay(zr, 0); - - decoder_call(zr, video, s_std, norm); - encoder_call(zr, video, s_std_output, norm); - - if (on) - zr36057_overlay(zr, 1); - - /* Make sure the changes come into effect */ - zr->norm = norm; - - return 0; -} - -static int -zoran_set_input (struct zoran *zr, - int input) -{ - if (input == zr->input) { - return 0; - } - - if (zr->v4l_buffers.active != ZORAN_FREE || - zr->jpg_buffers.active != ZORAN_FREE) { - dprintk(1, - KERN_WARNING - "%s: %s called while in playback/capture mode\n", - ZR_DEVNAME(zr), __func__); - return -EBUSY; - } - - if (input < 0 || input >= zr->card.inputs) { - dprintk(1, - KERN_ERR - "%s: %s - unsupported input %d\n", - ZR_DEVNAME(zr), __func__, input); - return -EINVAL; - } - - zr->input = input; - - decoder_call(zr, video, s_routing, - zr->card.input[input].muxsel, 0, 0); - - return 0; -} - -/* - * ioctl routine - */ - -static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - strncpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)-1); - strncpy(cap->driver, "zoran", sizeof(cap->driver)-1); - snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", - pci_name(zr->pci_dev)); - cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; - return 0; -} - -static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag) -{ - unsigned int num, i; - - for (num = i = 0; i < NUM_FORMATS; i++) { - if (zoran_formats[i].flags & flag && num++ == fmt->index) { - strncpy(fmt->description, zoran_formats[i].name, - sizeof(fmt->description) - 1); - /* fmt struct pre-zeroed, so adding '\0' not needed */ - fmt->pixelformat = zoran_formats[i].fourcc; - if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED) - fmt->flags |= V4L2_FMT_FLAG_COMPRESSED; - return 0; - } - } - return -EINVAL; -} - -static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh, - struct v4l2_fmtdesc *f) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE); -} - -static int zoran_enum_fmt_vid_out(struct file *file, void *__fh, - struct v4l2_fmtdesc *f) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK); -} - -static int zoran_enum_fmt_vid_overlay(struct file *file, void *__fh, - struct v4l2_fmtdesc *f) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - return zoran_enum_fmt(zr, f, ZORAN_FORMAT_OVERLAY); -} - -static int zoran_g_fmt_vid_out(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - - fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm; - fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 / - (fh->jpg_settings.VerDcm * fh->jpg_settings.TmpDcm); - fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&fh->jpg_settings); - fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; - if (fh->jpg_settings.TmpDcm == 1) - fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? - V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); - else - fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? - V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); - fmt->fmt.pix.bytesperline = 0; - fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; - - return 0; -} - -static int zoran_g_fmt_vid_cap(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - if (fh->map_mode != ZORAN_MAP_MODE_RAW) - return zoran_g_fmt_vid_out(file, fh, fmt); - - fmt->fmt.pix.width = fh->v4l_settings.width; - fmt->fmt.pix.height = fh->v4l_settings.height; - fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline * - fh->v4l_settings.height; - fmt->fmt.pix.pixelformat = fh->v4l_settings.format->fourcc; - fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace; - fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline; - if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2)) - fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; - else - fmt->fmt.pix.field = V4L2_FIELD_TOP; - return 0; -} - -static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - fmt->fmt.win.w.left = fh->overlay_settings.x; - fmt->fmt.win.w.top = fh->overlay_settings.y; - fmt->fmt.win.w.width = fh->overlay_settings.width; - fmt->fmt.win.w.height = fh->overlay_settings.height; - if (fh->overlay_settings.width * 2 > BUZ_MAX_HEIGHT) - fmt->fmt.win.field = V4L2_FIELD_INTERLACED; - else - fmt->fmt.win.field = V4L2_FIELD_TOP; - - return 0; -} - -static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH) - fmt->fmt.win.w.width = BUZ_MAX_WIDTH; - if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH) - fmt->fmt.win.w.width = BUZ_MIN_WIDTH; - if (fmt->fmt.win.w.height > BUZ_MAX_HEIGHT) - fmt->fmt.win.w.height = BUZ_MAX_HEIGHT; - if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT) - fmt->fmt.win.w.height = BUZ_MIN_HEIGHT; - - return 0; -} - -static int zoran_try_fmt_vid_out(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - struct zoran_jpg_settings settings; - int res = 0; - - if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG) - return -EINVAL; - - settings = fh->jpg_settings; - - /* we actually need to set 'real' parameters now */ - if ((fmt->fmt.pix.height * 2) > BUZ_MAX_HEIGHT) - settings.TmpDcm = 1; - else - settings.TmpDcm = 2; - settings.decimation = 0; - if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2) - settings.VerDcm = 2; - else - settings.VerDcm = 1; - if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4) - settings.HorDcm = 4; - else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2) - settings.HorDcm = 2; - else - settings.HorDcm = 1; - if (settings.TmpDcm == 1) - settings.field_per_buff = 2; - else - settings.field_per_buff = 1; - - if (settings.HorDcm > 1) { - settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; - settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; - } else { - settings.img_x = 0; - settings.img_width = BUZ_MAX_WIDTH; - } - - /* check */ - res = zoran_check_jpg_settings(zr, &settings, 1); - if (res) - return res; - - /* tell the user what we actually did */ - fmt->fmt.pix.width = settings.img_width / settings.HorDcm; - fmt->fmt.pix.height = settings.img_height * 2 / - (settings.TmpDcm * settings.VerDcm); - if (settings.TmpDcm == 1) - fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? - V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); - else - fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? - V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); - - fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings); - fmt->fmt.pix.bytesperline = 0; - fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; - return res; -} - -static int zoran_try_fmt_vid_cap(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int bpp; - int i; - - if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) - return zoran_try_fmt_vid_out(file, fh, fmt); - - for (i = 0; i < NUM_FORMATS; i++) - if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat) - break; - - if (i == NUM_FORMATS) - return -EINVAL; - - bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8); - v4l_bound_align_image( - &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2, - &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0); - return 0; -} - -static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - int res; - - dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n", - fmt->fmt.win.w.left, fmt->fmt.win.w.top, - fmt->fmt.win.w.width, - fmt->fmt.win.w.height, - fmt->fmt.win.clipcount, - fmt->fmt.win.bitmap); - res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top, - fmt->fmt.win.w.width, fmt->fmt.win.w.height, - (struct v4l2_clip __user *)fmt->fmt.win.clips, - fmt->fmt.win.clipcount, fmt->fmt.win.bitmap); - return res; -} - -static int zoran_s_fmt_vid_out(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - __le32 printformat = __cpu_to_le32(fmt->fmt.pix.pixelformat); - struct zoran_jpg_settings settings; - int res = 0; - - dprintk(3, "size=%dx%d, fmt=0x%x (%4.4s)\n", - fmt->fmt.pix.width, fmt->fmt.pix.height, - fmt->fmt.pix.pixelformat, - (char *) &printformat); - if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG) - return -EINVAL; - - if (fh->buffers.allocated) { - dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n", - ZR_DEVNAME(zr)); - res = -EBUSY; - return res; - } - - settings = fh->jpg_settings; - - /* we actually need to set 'real' parameters now */ - if (fmt->fmt.pix.height * 2 > BUZ_MAX_HEIGHT) - settings.TmpDcm = 1; - else - settings.TmpDcm = 2; - settings.decimation = 0; - if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2) - settings.VerDcm = 2; - else - settings.VerDcm = 1; - if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4) - settings.HorDcm = 4; - else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2) - settings.HorDcm = 2; - else - settings.HorDcm = 1; - if (settings.TmpDcm == 1) - settings.field_per_buff = 2; - else - settings.field_per_buff = 1; - - if (settings.HorDcm > 1) { - settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; - settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; - } else { - settings.img_x = 0; - settings.img_width = BUZ_MAX_WIDTH; - } - - /* check */ - res = zoran_check_jpg_settings(zr, &settings, 0); - if (res) - return res; - - /* it's ok, so set them */ - fh->jpg_settings = settings; - - map_mode_jpg(fh, fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT); - fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); - - /* tell the user what we actually did */ - fmt->fmt.pix.width = settings.img_width / settings.HorDcm; - fmt->fmt.pix.height = settings.img_height * 2 / - (settings.TmpDcm * settings.VerDcm); - if (settings.TmpDcm == 1) - fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? - V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); - else - fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? - V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); - fmt->fmt.pix.bytesperline = 0; - fmt->fmt.pix.sizeimage = fh->buffers.buffer_size; - fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; - return res; -} - -static int zoran_s_fmt_vid_cap(struct file *file, void *__fh, - struct v4l2_format *fmt) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int i; - int res = 0; - - if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) - return zoran_s_fmt_vid_out(file, fh, fmt); - - for (i = 0; i < NUM_FORMATS; i++) - if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc) - break; - if (i == NUM_FORMATS) { - dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - unknown/unsupported format 0x%x\n", - ZR_DEVNAME(zr), fmt->fmt.pix.pixelformat); - return -EINVAL; - } - - if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) || - fh->buffers.active != ZORAN_FREE) { - dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n", - ZR_DEVNAME(zr)); - res = -EBUSY; - return res; - } - if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT) - fmt->fmt.pix.height = BUZ_MAX_HEIGHT; - if (fmt->fmt.pix.width > BUZ_MAX_WIDTH) - fmt->fmt.pix.width = BUZ_MAX_WIDTH; - - map_mode_raw(fh); - - res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height, - &zoran_formats[i]); - if (res) - return res; - - /* tell the user the results/missing stuff */ - fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline; - fmt->fmt.pix.sizeimage = fh->v4l_settings.height * fh->v4l_settings.bytesperline; - fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace; - if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2)) - fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; - else - fmt->fmt.pix.field = V4L2_FIELD_TOP; - return res; -} - -static int zoran_g_fbuf(struct file *file, void *__fh, - struct v4l2_framebuffer *fb) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - memset(fb, 0, sizeof(*fb)); - fb->base = zr->vbuf_base; - fb->fmt.width = zr->vbuf_width; - fb->fmt.height = zr->vbuf_height; - if (zr->overlay_settings.format) - fb->fmt.pixelformat = fh->overlay_settings.format->fourcc; - fb->fmt.bytesperline = zr->vbuf_bytesperline; - fb->fmt.colorspace = V4L2_COLORSPACE_SRGB; - fb->fmt.field = V4L2_FIELD_INTERLACED; - fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; - - return 0; -} - -static int zoran_s_fbuf(struct file *file, void *__fh, - const struct v4l2_framebuffer *fb) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int i, res = 0; - __le32 printformat = __cpu_to_le32(fb->fmt.pixelformat); - - for (i = 0; i < NUM_FORMATS; i++) - if (zoran_formats[i].fourcc == fb->fmt.pixelformat) - break; - if (i == NUM_FORMATS) { - dprintk(1, KERN_ERR "%s: VIDIOC_S_FBUF - format=0x%x (%4.4s) not allowed\n", - ZR_DEVNAME(zr), fb->fmt.pixelformat, - (char *)&printformat); - return -EINVAL; - } - - res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width, - fb->fmt.height, fb->fmt.bytesperline); - - return res; -} - -static int zoran_overlay(struct file *file, void *__fh, unsigned int on) -{ - struct zoran_fh *fh = __fh; - int res; - - res = setup_overlay(fh, on); - - return res; -} - -static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type); - -static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffers *req) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res = 0; - - if (req->memory != V4L2_MEMORY_MMAP) { - dprintk(2, - KERN_ERR - "%s: only MEMORY_MMAP capture is supported, not %d\n", - ZR_DEVNAME(zr), req->memory); - return -EINVAL; - } - - if (req->count == 0) - return zoran_streamoff(file, fh, req->type); - - if (fh->buffers.allocated) { - dprintk(2, - KERN_ERR - "%s: VIDIOC_REQBUFS - buffers already allocated\n", - ZR_DEVNAME(zr)); - res = -EBUSY; - return res; - } - - if (fh->map_mode == ZORAN_MAP_MODE_RAW && - req->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { - /* control user input */ - if (req->count < 2) - req->count = 2; - if (req->count > v4l_nbufs) - req->count = v4l_nbufs; - - /* The next mmap will map the V4L buffers */ - map_mode_raw(fh); - fh->buffers.num_buffers = req->count; - - if (v4l_fbuffer_alloc(fh)) { - res = -ENOMEM; - return res; - } - } else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC || - fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) { - /* we need to calculate size ourselves now */ - if (req->count < 4) - req->count = 4; - if (req->count > jpg_nbufs) - req->count = jpg_nbufs; - - /* The next mmap will map the MJPEG buffers */ - map_mode_jpg(fh, req->type == V4L2_BUF_TYPE_VIDEO_OUTPUT); - fh->buffers.num_buffers = req->count; - fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); - - if (jpg_fbuffer_alloc(fh)) { - res = -ENOMEM; - return res; - } - } else { - dprintk(1, - KERN_ERR - "%s: VIDIOC_REQBUFS - unknown type %d\n", - ZR_DEVNAME(zr), req->type); - res = -EINVAL; - return res; - } - return res; -} - -static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf) -{ - struct zoran_fh *fh = __fh; - int res; - - res = zoran_v4l2_buffer_status(fh, buf, buf->index); - - return res; -} - -static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res = 0, codec_mode, buf_type; - - switch (fh->map_mode) { - case ZORAN_MAP_MODE_RAW: - if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dprintk(1, KERN_ERR - "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", - ZR_DEVNAME(zr), buf->type, fh->map_mode); - res = -EINVAL; - return res; - } - - res = zoran_v4l_queue_frame(fh, buf->index); - if (res) - return res; - if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED) - zr36057_set_memgrab(zr, 1); - break; - - case ZORAN_MAP_MODE_JPG_REC: - case ZORAN_MAP_MODE_JPG_PLAY: - if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) { - buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; - codec_mode = BUZ_MODE_MOTION_DECOMPRESS; - } else { - buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - codec_mode = BUZ_MODE_MOTION_COMPRESS; - } - - if (buf->type != buf_type) { - dprintk(1, KERN_ERR - "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", - ZR_DEVNAME(zr), buf->type, fh->map_mode); - res = -EINVAL; - return res; - } - - res = zoran_jpg_queue_frame(fh, buf->index, codec_mode); - if (res != 0) - return res; - if (zr->codec_mode == BUZ_MODE_IDLE && - fh->buffers.active == ZORAN_LOCKED) - zr36057_enable_jpg(zr, codec_mode); - - break; - - default: - dprintk(1, KERN_ERR - "%s: VIDIOC_QBUF - unsupported type %d\n", - ZR_DEVNAME(zr), buf->type); - res = -EINVAL; - break; - } - return res; -} - -static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res = 0, buf_type, num = -1; /* compiler borks here (?) */ - - switch (fh->map_mode) { - case ZORAN_MAP_MODE_RAW: - if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dprintk(1, KERN_ERR - "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", - ZR_DEVNAME(zr), buf->type, fh->map_mode); - res = -EINVAL; - return res; - } - - num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME]; - if (file->f_flags & O_NONBLOCK && - zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) { - res = -EAGAIN; - return res; - } - res = v4l_sync(fh, num); - if (res) - return res; - zr->v4l_sync_tail++; - res = zoran_v4l2_buffer_status(fh, buf, num); - break; - - case ZORAN_MAP_MODE_JPG_REC: - case ZORAN_MAP_MODE_JPG_PLAY: - { - struct zoran_sync bs; - - if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) - buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; - else - buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - if (buf->type != buf_type) { - dprintk(1, KERN_ERR - "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", - ZR_DEVNAME(zr), buf->type, fh->map_mode); - res = -EINVAL; - return res; - } - - num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; - - if (file->f_flags & O_NONBLOCK && - zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) { - res = -EAGAIN; - return res; - } - bs.frame = 0; /* suppress compiler warning */ - res = jpg_sync(fh, &bs); - if (res) - return res; - res = zoran_v4l2_buffer_status(fh, buf, bs.frame); - break; - } - - default: - dprintk(1, KERN_ERR - "%s: VIDIOC_DQBUF - unsupported type %d\n", - ZR_DEVNAME(zr), buf->type); - res = -EINVAL; - break; - } - return res; -} - -static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res = 0; - - switch (fh->map_mode) { - case ZORAN_MAP_MODE_RAW: /* raw capture */ - if (zr->v4l_buffers.active != ZORAN_ACTIVE || - fh->buffers.active != ZORAN_ACTIVE) { - res = -EBUSY; - return res; - } - - zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED; - zr->v4l_settings = fh->v4l_settings; - - zr->v4l_sync_tail = zr->v4l_pend_tail; - if (!zr->v4l_memgrab_active && - zr->v4l_pend_head != zr->v4l_pend_tail) { - zr36057_set_memgrab(zr, 1); - } - break; - - case ZORAN_MAP_MODE_JPG_REC: - case ZORAN_MAP_MODE_JPG_PLAY: - /* what is the codec mode right now? */ - if (zr->jpg_buffers.active != ZORAN_ACTIVE || - fh->buffers.active != ZORAN_ACTIVE) { - res = -EBUSY; - return res; - } - - zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED; - - if (zr->jpg_que_head != zr->jpg_que_tail) { - /* Start the jpeg codec when the first frame is queued */ - jpeg_start(zr); - } - break; - - default: - dprintk(1, - KERN_ERR - "%s: VIDIOC_STREAMON - invalid map mode %d\n", - ZR_DEVNAME(zr), fh->map_mode); - res = -EINVAL; - break; - } - return res; -} - -static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int i, res = 0; - unsigned long flags; - - switch (fh->map_mode) { - case ZORAN_MAP_MODE_RAW: /* raw capture */ - if (fh->buffers.active == ZORAN_FREE && - zr->v4l_buffers.active != ZORAN_FREE) { - res = -EPERM; /* stay off other's settings! */ - return res; - } - if (zr->v4l_buffers.active == ZORAN_FREE) - return res; - - spin_lock_irqsave(&zr->spinlock, flags); - /* unload capture */ - if (zr->v4l_memgrab_active) { - - zr36057_set_memgrab(zr, 0); - } - - for (i = 0; i < fh->buffers.num_buffers; i++) - zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER; - fh->buffers = zr->v4l_buffers; - - zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; - - zr->v4l_grab_seq = 0; - zr->v4l_pend_head = zr->v4l_pend_tail = 0; - zr->v4l_sync_tail = 0; - - spin_unlock_irqrestore(&zr->spinlock, flags); - - break; - - case ZORAN_MAP_MODE_JPG_REC: - case ZORAN_MAP_MODE_JPG_PLAY: - if (fh->buffers.active == ZORAN_FREE && - zr->jpg_buffers.active != ZORAN_FREE) { - res = -EPERM; /* stay off other's settings! */ - return res; - } - if (zr->jpg_buffers.active == ZORAN_FREE) - return res; - - res = jpg_qbuf(fh, -1, - (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ? - BUZ_MODE_MOTION_COMPRESS : - BUZ_MODE_MOTION_DECOMPRESS); - if (res) - return res; - break; - default: - dprintk(1, KERN_ERR - "%s: VIDIOC_STREAMOFF - invalid map mode %d\n", - ZR_DEVNAME(zr), fh->map_mode); - res = -EINVAL; - break; - } - return res; -} -static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - *std = zr->norm; - return 0; -} - -static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res = 0; - - res = zoran_set_norm(zr, std); - if (res) - return res; - - res = wait_grab_pending(zr); - return res; -} - -static int zoran_enum_input(struct file *file, void *__fh, - struct v4l2_input *inp) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - if (inp->index >= zr->card.inputs) - return -EINVAL; - - strncpy(inp->name, zr->card.input[inp->index].name, - sizeof(inp->name) - 1); - inp->type = V4L2_INPUT_TYPE_CAMERA; - inp->std = V4L2_STD_ALL; - - /* Get status of video decoder */ - decoder_call(zr, video, g_input_status, &inp->status); - return 0; -} - -static int zoran_g_input(struct file *file, void *__fh, unsigned int *input) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - *input = zr->input; - - return 0; -} - -static int zoran_s_input(struct file *file, void *__fh, unsigned int input) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res; - - res = zoran_set_input(zr, input); - if (res) - return res; - - /* Make sure the changes come into effect */ - res = wait_grab_pending(zr); - return res; -} - -static int zoran_enum_output(struct file *file, void *__fh, - struct v4l2_output *outp) -{ - if (outp->index != 0) - return -EINVAL; - - outp->index = 0; - outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY; - strncpy(outp->name, "Autodetect", sizeof(outp->name)-1); - - return 0; -} - -static int zoran_g_output(struct file *file, void *__fh, unsigned int *output) -{ - *output = 0; - - return 0; -} - -static int zoran_s_output(struct file *file, void *__fh, unsigned int output) -{ - if (output != 0) - return -EINVAL; - - return 0; -} - -/* cropping (sub-frame capture) */ -static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - - if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && - sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - if (fh->map_mode == ZORAN_MAP_MODE_RAW) { - dprintk(1, KERN_ERR - "%s: VIDIOC_G_SELECTION - subcapture only supported for compressed capture\n", - ZR_DEVNAME(zr)); - return -EINVAL; - } - - switch (sel->target) { - case V4L2_SEL_TGT_CROP: - sel->r.top = fh->jpg_settings.img_y; - sel->r.left = fh->jpg_settings.img_x; - sel->r.width = fh->jpg_settings.img_width; - sel->r.height = fh->jpg_settings.img_height; - break; - case V4L2_SEL_TGT_CROP_DEFAULT: - sel->r.top = sel->r.left = 0; - sel->r.width = BUZ_MIN_WIDTH; - sel->r.height = BUZ_MIN_HEIGHT; - break; - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.top = sel->r.left = 0; - sel->r.width = BUZ_MAX_WIDTH; - sel->r.height = BUZ_MAX_HEIGHT; - break; - default: - return -EINVAL; - } - return 0; -} - -static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - struct zoran_jpg_settings settings; - int res; - - if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && - sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - if (sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - if (fh->map_mode == ZORAN_MAP_MODE_RAW) { - dprintk(1, KERN_ERR - "%s: VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n", - ZR_DEVNAME(zr)); - return -EINVAL; - } - - settings = fh->jpg_settings; - - if (fh->buffers.allocated) { - dprintk(1, KERN_ERR - "%s: VIDIOC_S_SELECTION - cannot change settings while active\n", - ZR_DEVNAME(zr)); - return -EBUSY; - } - - /* move into a form that we understand */ - settings.img_x = sel->r.left; - settings.img_y = sel->r.top; - settings.img_width = sel->r.width; - settings.img_height = sel->r.height; - - /* check validity */ - res = zoran_check_jpg_settings(zr, &settings, 0); - if (res) - return res; - - /* accept */ - fh->jpg_settings = settings; - return res; -} - -static int zoran_g_jpegcomp(struct file *file, void *__fh, - struct v4l2_jpegcompression *params) -{ - struct zoran_fh *fh = __fh; - memset(params, 0, sizeof(*params)); - - params->quality = fh->jpg_settings.jpg_comp.quality; - params->APPn = fh->jpg_settings.jpg_comp.APPn; - memcpy(params->APP_data, - fh->jpg_settings.jpg_comp.APP_data, - fh->jpg_settings.jpg_comp.APP_len); - params->APP_len = fh->jpg_settings.jpg_comp.APP_len; - memcpy(params->COM_data, - fh->jpg_settings.jpg_comp.COM_data, - fh->jpg_settings.jpg_comp.COM_len); - params->COM_len = fh->jpg_settings.jpg_comp.COM_len; - params->jpeg_markers = - fh->jpg_settings.jpg_comp.jpeg_markers; - - return 0; -} - -static int zoran_s_jpegcomp(struct file *file, void *__fh, - const struct v4l2_jpegcompression *params) -{ - struct zoran_fh *fh = __fh; - struct zoran *zr = fh->zr; - int res = 0; - struct zoran_jpg_settings settings; - - settings = fh->jpg_settings; - - settings.jpg_comp = *params; - - if (fh->buffers.active != ZORAN_FREE) { - dprintk(1, KERN_WARNING - "%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n", - ZR_DEVNAME(zr)); - res = -EBUSY; - return res; - } - - res = zoran_check_jpg_settings(zr, &settings, 0); - if (res) - return res; - if (!fh->buffers.allocated) - fh->buffers.buffer_size = - zoran_v4l2_calc_bufsize(&fh->jpg_settings); - fh->jpg_settings.jpg_comp = settings.jpg_comp; - return res; -} - -static __poll_t -zoran_poll (struct file *file, - poll_table *wait) -{ - struct zoran_fh *fh = file->private_data; - struct zoran *zr = fh->zr; - __poll_t res = v4l2_ctrl_poll(file, wait); - int frame; - unsigned long flags; - - /* we should check whether buffers are ready to be synced on - * (w/o waits - O_NONBLOCK) here - * if ready for read (sync), return EPOLLIN|EPOLLRDNORM, - * if ready for write (sync), return EPOLLOUT|EPOLLWRNORM, - * if error, return EPOLLERR, - * if no buffers queued or so, return EPOLLNVAL - */ - - switch (fh->map_mode) { - case ZORAN_MAP_MODE_RAW: - poll_wait(file, &zr->v4l_capq, wait); - frame = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME]; - - spin_lock_irqsave(&zr->spinlock, flags); - dprintk(3, - KERN_DEBUG - "%s: %s() raw - active=%c, sync_tail=%lu/%c, pend_tail=%lu, pend_head=%lu\n", - ZR_DEVNAME(zr), __func__, - "FAL"[fh->buffers.active], zr->v4l_sync_tail, - "UPMD"[zr->v4l_buffers.buffer[frame].state], - zr->v4l_pend_tail, zr->v4l_pend_head); - /* Process is the one capturing? */ - if (fh->buffers.active != ZORAN_FREE && - /* Buffer ready to DQBUF? */ - zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE) - res |= EPOLLIN | EPOLLRDNORM; - spin_unlock_irqrestore(&zr->spinlock, flags); - - break; - - case ZORAN_MAP_MODE_JPG_REC: - case ZORAN_MAP_MODE_JPG_PLAY: - poll_wait(file, &zr->jpg_capq, wait); - frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; - - spin_lock_irqsave(&zr->spinlock, flags); - dprintk(3, - KERN_DEBUG - "%s: %s() jpg - active=%c, que_tail=%lu/%c, que_head=%lu, dma=%lu/%lu\n", - ZR_DEVNAME(zr), __func__, - "FAL"[fh->buffers.active], zr->jpg_que_tail, - "UPMD"[zr->jpg_buffers.buffer[frame].state], - zr->jpg_que_head, zr->jpg_dma_tail, zr->jpg_dma_head); - if (fh->buffers.active != ZORAN_FREE && - zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) { - if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) - res |= EPOLLIN | EPOLLRDNORM; - else - res |= EPOLLOUT | EPOLLWRNORM; - } - spin_unlock_irqrestore(&zr->spinlock, flags); - - break; - - default: - dprintk(1, - KERN_ERR - "%s: %s - internal error, unknown map_mode=%d\n", - ZR_DEVNAME(zr), __func__, fh->map_mode); - res |= EPOLLERR; - } - - return res; -} - - -/* - * This maps the buffers to user space. - * - * Depending on the state of fh->map_mode - * the V4L or the MJPEG buffers are mapped - * per buffer or all together - * - * Note that we need to connect to some - * unmap signal event to unmap the de-allocate - * the buffer accordingly (zoran_vm_close()) - */ - -static void -zoran_vm_open (struct vm_area_struct *vma) -{ - struct zoran_mapping *map = vma->vm_private_data; - atomic_inc(&map->count); -} - -static void -zoran_vm_close (struct vm_area_struct *vma) -{ - struct zoran_mapping *map = vma->vm_private_data; - struct zoran_fh *fh = map->fh; - struct zoran *zr = fh->zr; - int i; - - dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr), - __func__, mode_name(fh->map_mode)); - - for (i = 0; i < fh->buffers.num_buffers; i++) { - if (fh->buffers.buffer[i].map == map) - fh->buffers.buffer[i].map = NULL; - } - kfree(map); - - /* Any buffers still mapped? */ - for (i = 0; i < fh->buffers.num_buffers; i++) { - if (fh->buffers.buffer[i].map) { - return; - } - } - - dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr), - __func__, mode_name(fh->map_mode)); - - if (fh->map_mode == ZORAN_MAP_MODE_RAW) { - if (fh->buffers.active != ZORAN_FREE) { - unsigned long flags; - - spin_lock_irqsave(&zr->spinlock, flags); - zr36057_set_memgrab(zr, 0); - zr->v4l_buffers.allocated = 0; - zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; - spin_unlock_irqrestore(&zr->spinlock, flags); - } - v4l_fbuffer_free(fh); - } else { - if (fh->buffers.active != ZORAN_FREE) { - jpg_qbuf(fh, -1, zr->codec_mode); - zr->jpg_buffers.allocated = 0; - zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE; - } - jpg_fbuffer_free(fh); - } -} - -static const struct vm_operations_struct zoran_vm_ops = { - .open = zoran_vm_open, - .close = zoran_vm_close, -}; - -static int -zoran_mmap (struct file *file, - struct vm_area_struct *vma) -{ - struct zoran_fh *fh = file->private_data; - struct zoran *zr = fh->zr; - unsigned long size = (vma->vm_end - vma->vm_start); - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - int i, j; - unsigned long page, start = vma->vm_start, todo, pos, fraglen; - int first, last; - struct zoran_mapping *map; - int res = 0; - - dprintk(3, - KERN_INFO "%s: %s(%s) of 0x%08lx-0x%08lx (size=%lu)\n", - ZR_DEVNAME(zr), __func__, - mode_name(fh->map_mode), vma->vm_start, vma->vm_end, size); - - if (!(vma->vm_flags & VM_SHARED) || !(vma->vm_flags & VM_READ) || - !(vma->vm_flags & VM_WRITE)) { - dprintk(1, - KERN_ERR - "%s: %s - no MAP_SHARED/PROT_{READ,WRITE} given\n", - ZR_DEVNAME(zr), __func__); - return -EINVAL; - } - - if (!fh->buffers.allocated) { - dprintk(1, - KERN_ERR - "%s: %s(%s) - buffers not yet allocated\n", - ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); - res = -ENOMEM; - return res; - } - - first = offset / fh->buffers.buffer_size; - last = first - 1 + size / fh->buffers.buffer_size; - if (offset % fh->buffers.buffer_size != 0 || - size % fh->buffers.buffer_size != 0 || first < 0 || - last < 0 || first >= fh->buffers.num_buffers || - last >= fh->buffers.buffer_size) { - dprintk(1, - KERN_ERR - "%s: %s(%s) - offset=%lu or size=%lu invalid for bufsize=%d and numbufs=%d\n", - ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), offset, size, - fh->buffers.buffer_size, - fh->buffers.num_buffers); - res = -EINVAL; - return res; - } - - /* Check if any buffers are already mapped */ - for (i = first; i <= last; i++) { - if (fh->buffers.buffer[i].map) { - dprintk(1, - KERN_ERR - "%s: %s(%s) - buffer %d already mapped\n", - ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i); - res = -EBUSY; - return res; - } - } - - /* map these buffers */ - map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL); - if (!map) { - res = -ENOMEM; - return res; - } - map->fh = fh; - atomic_set(&map->count, 1); - - vma->vm_ops = &zoran_vm_ops; - vma->vm_flags |= VM_DONTEXPAND; - vma->vm_private_data = map; - - if (fh->map_mode == ZORAN_MAP_MODE_RAW) { - for (i = first; i <= last; i++) { - todo = size; - if (todo > fh->buffers.buffer_size) - todo = fh->buffers.buffer_size; - page = fh->buffers.buffer[i].v4l.fbuffer_phys; - if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, - todo, PAGE_SHARED)) { - dprintk(1, - KERN_ERR - "%s: %s(V4L) - remap_pfn_range failed\n", - ZR_DEVNAME(zr), __func__); - res = -EAGAIN; - return res; - } - size -= todo; - start += todo; - fh->buffers.buffer[i].map = map; - if (size == 0) - break; - } - } else { - for (i = first; i <= last; i++) { - for (j = 0; - j < fh->buffers.buffer_size / PAGE_SIZE; - j++) { - fraglen = - (le32_to_cpu(fh->buffers.buffer[i].jpg. - frag_tab[2 * j + 1]) & ~1) << 1; - todo = size; - if (todo > fraglen) - todo = fraglen; - pos = - le32_to_cpu(fh->buffers. - buffer[i].jpg.frag_tab[2 * j]); - /* should just be pos on i386 */ - page = virt_to_phys(bus_to_virt(pos)) - >> PAGE_SHIFT; - if (remap_pfn_range(vma, start, page, - todo, PAGE_SHARED)) { - dprintk(1, - KERN_ERR - "%s: %s(V4L) - remap_pfn_range failed\n", - ZR_DEVNAME(zr), __func__); - res = -EAGAIN; - return res; - } - size -= todo; - start += todo; - if (size == 0) - break; - if (le32_to_cpu(fh->buffers.buffer[i].jpg. - frag_tab[2 * j + 1]) & 1) - break; /* was last fragment */ - } - fh->buffers.buffer[i].map = map; - if (size == 0) - break; - - } - } - return res; -} - -static const struct v4l2_ioctl_ops zoran_ioctl_ops = { - .vidioc_querycap = zoran_querycap, - .vidioc_s_selection = zoran_s_selection, - .vidioc_g_selection = zoran_g_selection, - .vidioc_enum_input = zoran_enum_input, - .vidioc_g_input = zoran_g_input, - .vidioc_s_input = zoran_s_input, - .vidioc_enum_output = zoran_enum_output, - .vidioc_g_output = zoran_g_output, - .vidioc_s_output = zoran_s_output, - .vidioc_g_fbuf = zoran_g_fbuf, - .vidioc_s_fbuf = zoran_s_fbuf, - .vidioc_g_std = zoran_g_std, - .vidioc_s_std = zoran_s_std, - .vidioc_g_jpegcomp = zoran_g_jpegcomp, - .vidioc_s_jpegcomp = zoran_s_jpegcomp, - .vidioc_overlay = zoran_overlay, - .vidioc_reqbufs = zoran_reqbufs, - .vidioc_querybuf = zoran_querybuf, - .vidioc_qbuf = zoran_qbuf, - .vidioc_dqbuf = zoran_dqbuf, - .vidioc_streamon = zoran_streamon, - .vidioc_streamoff = zoran_streamoff, - .vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap, - .vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out, - .vidioc_enum_fmt_vid_overlay = zoran_enum_fmt_vid_overlay, - .vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap, - .vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out, - .vidioc_g_fmt_vid_overlay = zoran_g_fmt_vid_overlay, - .vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap, - .vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out, - .vidioc_s_fmt_vid_overlay = zoran_s_fmt_vid_overlay, - .vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap, - .vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out, - .vidioc_try_fmt_vid_overlay = zoran_try_fmt_vid_overlay, - .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, - .vidioc_unsubscribe_event = v4l2_event_unsubscribe, -}; - -static const struct v4l2_file_operations zoran_fops = { - .owner = THIS_MODULE, - .open = zoran_open, - .release = zoran_close, - .unlocked_ioctl = video_ioctl2, - .mmap = zoran_mmap, - .poll = zoran_poll, -}; - -const struct video_device zoran_template = { - .name = ZORAN_NAME, - .fops = &zoran_fops, - .ioctl_ops = &zoran_ioctl_ops, - .release = &zoran_vdev_release, - .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, -}; - diff --git a/drivers/media/pci/zoran/zoran_procfs.c b/drivers/media/pci/zoran/zoran_procfs.c deleted file mode 100644 index 78ac8f853748..000000000000 --- a/drivers/media/pci/zoran/zoran_procfs.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * This part handles the procFS entries (/proc/ZORAN[%d]) - * - * Copyright (C) 2000 Serguei Miridonov - * - * Currently maintained by: - * Ronald Bultje - * Laurent Pinchart - * Mailinglist - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "videocodec.h" -#include "zoran.h" -#include "zoran_procfs.h" -#include "zoran_card.h" - -#ifdef CONFIG_PROC_FS -struct procfs_params_zr36067 { - char *name; - short reg; - u32 mask; - short bit; -}; - -static const struct procfs_params_zr36067 zr67[] = { - {"HSPol", 0x000, 1, 30}, - {"HStart", 0x000, 0x3ff, 10}, - {"HEnd", 0x000, 0x3ff, 0}, - - {"VSPol", 0x004, 1, 30}, - {"VStart", 0x004, 0x3ff, 10}, - {"VEnd", 0x004, 0x3ff, 0}, - - {"ExtFl", 0x008, 1, 26}, - {"TopField", 0x008, 1, 25}, - {"VCLKPol", 0x008, 1, 24}, - {"DupFld", 0x008, 1, 20}, - {"LittleEndian", 0x008, 1, 0}, - - {"HsyncStart", 0x10c, 0xffff, 16}, - {"LineTot", 0x10c, 0xffff, 0}, - - {"NAX", 0x110, 0xffff, 16}, - {"PAX", 0x110, 0xffff, 0}, - - {"NAY", 0x114, 0xffff, 16}, - {"PAY", 0x114, 0xffff, 0}, - - /* {"",,,}, */ - - {NULL, 0, 0, 0}, -}; - -static void -setparam (struct zoran *zr, - char *name, - char *sval) -{ - int i = 0, reg0, reg, val; - - while (zr67[i].name != NULL) { - if (!strncmp(name, zr67[i].name, strlen(zr67[i].name))) { - reg = reg0 = btread(zr67[i].reg); - reg &= ~(zr67[i].mask << zr67[i].bit); - if (!isdigit(sval[0])) - break; - val = simple_strtoul(sval, NULL, 0); - if ((val & ~zr67[i].mask)) - break; - reg |= (val & zr67[i].mask) << zr67[i].bit; - dprintk(4, - KERN_INFO - "%s: setparam: setting ZR36067 register 0x%03x: 0x%08x=>0x%08x %s=%d\n", - ZR_DEVNAME(zr), zr67[i].reg, reg0, reg, - zr67[i].name, val); - btwrite(reg, zr67[i].reg); - break; - } - i++; - } -} - -static int zoran_show(struct seq_file *p, void *v) -{ - struct zoran *zr = p->private; - int i; - - seq_printf(p, "ZR36067 registers:\n"); - for (i = 0; i < 0x130; i += 16) - seq_printf(p, "%03X %08X %08X %08X %08X \n", i, - btread(i), btread(i+4), btread(i+8), btread(i+12)); - return 0; -} - -static int zoran_open(struct inode *inode, struct file *file) -{ - struct zoran *data = PDE_DATA(inode); - return single_open(file, zoran_show, data); -} - -static ssize_t zoran_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct zoran *zr = PDE_DATA(file_inode(file)); - char *string, *sp; - char *line, *ldelim, *varname, *svar, *tdelim; - - if (count > 32768) /* Stupidity filter */ - return -EINVAL; - - string = sp = vmalloc(count + 1); - if (!string) { - dprintk(1, - KERN_ERR - "%s: write_proc: can not allocate memory\n", - ZR_DEVNAME(zr)); - return -ENOMEM; - } - if (copy_from_user(string, buffer, count)) { - vfree (string); - return -EFAULT; - } - string[count] = 0; - dprintk(4, KERN_INFO "%s: write_proc: name=%pD count=%zu zr=%p\n", - ZR_DEVNAME(zr), file, count, zr); - ldelim = " \t\n"; - tdelim = "="; - line = strpbrk(sp, ldelim); - while (line) { - *line = 0; - svar = strpbrk(sp, tdelim); - if (svar) { - *svar = 0; - varname = sp; - svar++; - setparam(zr, varname, svar); - } - sp = line + 1; - line = strpbrk(sp, ldelim); - } - vfree(string); - - return count; -} - -static const struct file_operations zoran_operations = { - .owner = THIS_MODULE, - .open = zoran_open, - .read = seq_read, - .write = zoran_write, - .llseek = seq_lseek, - .release = single_release, -}; -#endif - -int -zoran_proc_init (struct zoran *zr) -{ -#ifdef CONFIG_PROC_FS - char name[8]; - - snprintf(name, 7, "zoran%d", zr->id); - zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr); - if (zr->zoran_proc != NULL) { - dprintk(2, - KERN_INFO - "%s: procfs entry /proc/%s allocated. data=%p\n", - ZR_DEVNAME(zr), name, zr); - } else { - dprintk(1, KERN_ERR "%s: Unable to initialise /proc/%s\n", - ZR_DEVNAME(zr), name); - return 1; - } -#endif - return 0; -} - -void -zoran_proc_cleanup (struct zoran *zr) -{ -#ifdef CONFIG_PROC_FS - char name[8]; - - snprintf(name, 7, "zoran%d", zr->id); - if (zr->zoran_proc) - remove_proc_entry(name, NULL); - zr->zoran_proc = NULL; -#endif -} diff --git a/drivers/media/pci/zoran/zoran_procfs.h b/drivers/media/pci/zoran/zoran_procfs.h deleted file mode 100644 index 0ac7cb0011f2..000000000000 --- a/drivers/media/pci/zoran/zoran_procfs.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Zoran zr36057/zr36067 PCI controller driver, for the - * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux - * Media Labs LML33/LML33R10. - * - * This part handles card-specific data and detection - * - * Copyright (C) 2000 Serguei Miridonov - * - * Currently maintained by: - * Ronald Bultje - * Laurent Pinchart - * Mailinglist - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ZORAN_PROCFS_H__ -#define __ZORAN_PROCFS_H__ - -extern int zoran_proc_init(struct zoran *zr); -extern void zoran_proc_cleanup(struct zoran *zr); - -#endif /* __ZORAN_PROCFS_H__ */ diff --git a/drivers/media/pci/zoran/zr36016.c b/drivers/media/pci/zoran/zr36016.c deleted file mode 100644 index 8736b9d8d97e..000000000000 --- a/drivers/media/pci/zoran/zr36016.c +++ /dev/null @@ -1,516 +0,0 @@ -/* - * Zoran ZR36016 basic configuration functions - * - * Copyright (C) 2001 Wolfgang Scherr - * - * $Id: zr36016.c,v 1.1.2.14 2003/08/20 19:46:55 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#define ZR016_VERSION "v0.7" - -#include -#include -#include -#include - -#include -#include - -/* I/O commands, error codes */ -#include - -/* v4l API */ - -/* headerfile of this module */ -#include "zr36016.h" - -/* codec io API */ -#include "videocodec.h" - -/* it doesn't make sense to have more than 20 or so, - just to prevent some unwanted loops */ -#define MAX_CODECS 20 - -/* amount of chips attached via this driver */ -static int zr36016_codecs; - -/* debugging is available via module parameter */ -static int debug; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (debug >= num) \ - printk(format, ##args); \ - } while (0) - -/* ========================================================================= - Local hardware I/O functions: - - read/write via codec layer (registers are located in the master device) - ========================================================================= */ - -/* read and write functions */ -static u8 -zr36016_read (struct zr36016 *ptr, - u16 reg) -{ - u8 value = 0; - - // just in case something is wrong... - if (ptr->codec->master_data->readreg) - value = - (ptr->codec->master_data-> - readreg(ptr->codec, reg)) & 0xFF; - else - dprintk(1, - KERN_ERR "%s: invalid I/O setup, nothing read!\n", - ptr->name); - - dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, - value); - - return value; -} - -static void -zr36016_write (struct zr36016 *ptr, - u16 reg, - u8 value) -{ - dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, - reg); - - // just in case something is wrong... - if (ptr->codec->master_data->writereg) { - ptr->codec->master_data->writereg(ptr->codec, reg, value); - } else - dprintk(1, - KERN_ERR - "%s: invalid I/O setup, nothing written!\n", - ptr->name); -} - -/* indirect read and write functions */ -/* the 016 supports auto-addr-increment, but - * writing it all time cost not much and is safer... */ -static u8 -zr36016_readi (struct zr36016 *ptr, - u16 reg) -{ - u8 value = 0; - - // just in case something is wrong... - if ((ptr->codec->master_data->writereg) && - (ptr->codec->master_data->readreg)) { - ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR - value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA - } else - dprintk(1, - KERN_ERR - "%s: invalid I/O setup, nothing read (i)!\n", - ptr->name); - - dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, - reg, value); - return value; -} - -static void -zr36016_writei (struct zr36016 *ptr, - u16 reg, - u8 value) -{ - dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, - value, reg); - - // just in case something is wrong... - if (ptr->codec->master_data->writereg) { - ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR - ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA - } else - dprintk(1, - KERN_ERR - "%s: invalid I/O setup, nothing written (i)!\n", - ptr->name); -} - -/* ========================================================================= - Local helper function: - - version read - ========================================================================= */ - -/* version kept in datastructure */ -static u8 -zr36016_read_version (struct zr36016 *ptr) -{ - ptr->version = zr36016_read(ptr, 0) >> 4; - return ptr->version; -} - -/* ========================================================================= - Local helper function: - - basic test of "connectivity", writes/reads to/from PAX-Lo register - ========================================================================= */ - -static int -zr36016_basic_test (struct zr36016 *ptr) -{ - if (debug) { - int i; - zr36016_writei(ptr, ZR016I_PAX_LO, 0x55); - dprintk(1, KERN_INFO "%s: registers: ", ptr->name); - for (i = 0; i <= 0x0b; i++) - dprintk(1, "%02x ", zr36016_readi(ptr, i)); - dprintk(1, "\n"); - } - // for testing just write 0, then the default value to a register and read - // it back in both cases - zr36016_writei(ptr, ZR016I_PAX_LO, 0x00); - if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to vfe processor!\n", - ptr->name); - return -ENXIO; - } - zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0); - if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to vfe processor!\n", - ptr->name); - return -ENXIO; - } - // we allow version numbers from 0-3, should be enough, though - zr36016_read_version(ptr); - if (ptr->version & 0x0c) { - dprintk(1, - KERN_ERR - "%s: attach failed, suspicious version %d found...\n", - ptr->name, ptr->version); - return -ENXIO; - } - - return 0; /* looks good! */ -} - -/* ========================================================================= - Local helper function: - - simple loop for pushing the init datasets - NO USE -- - ========================================================================= */ - -#if 0 -static int zr36016_pushit (struct zr36016 *ptr, - u16 startreg, - u16 len, - const char *data) -{ - int i=0; - - dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", - ptr->name, startreg,len); - while (imode == CODEC_DO_COMPRESSION ? - ZR016_COMPRESSION : ZR016_EXPANSION)); - - // misc setup - zr36016_writei(ptr, ZR016I_SETUP1, - (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) | - (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI); - zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR); - - // Window setup - // (no extra offset for now, norm defines offset, default width height) - zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8); - zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF); - zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8); - zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF); - zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8); - zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF); - zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8); - zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF); - - /* shall we continue now, please? */ - zr36016_write(ptr, ZR016_GOSTOP, 1); -} - -/* ========================================================================= - CODEC API FUNCTIONS - - this functions are accessed by the master via the API structure - ========================================================================= */ - -/* set compression/expansion mode and launches codec - - this should be the last call from the master before starting processing */ -static int -zr36016_set_mode (struct videocodec *codec, - int mode) -{ - struct zr36016 *ptr = (struct zr36016 *) codec->data; - - dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); - - if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) - return -EINVAL; - - ptr->mode = mode; - zr36016_init(ptr); - - return 0; -} - -/* set picture size */ -static int -zr36016_set_video (struct videocodec *codec, - struct tvnorm *norm, - struct vfe_settings *cap, - struct vfe_polarity *pol) -{ - struct zr36016 *ptr = (struct zr36016 *) codec->data; - - dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", - ptr->name, norm->HStart, norm->VStart, - cap->x, cap->y, cap->width, cap->height, - cap->decimation); - - /* if () return -EINVAL; - * trust the master driver that it knows what it does - so - * we allow invalid startx/y for now ... */ - ptr->width = cap->width; - ptr->height = cap->height; - /* (Ronald) This is ugly. zoran_device.c, line 387 - * already mentions what happens if HStart is even - * (blue faces, etc., cr/cb inversed). There's probably - * some good reason why HStart is 0 instead of 1, so I'm - * leaving it to this for now, but really... This can be - * done a lot simpler */ - ptr->xoff = (norm->HStart ? norm->HStart : 1) + cap->x; - /* Something to note here (I don't understand it), setting - * VStart too high will cause the codec to 'not work'. I - * really don't get it. values of 16 (VStart) already break - * it here. Just '0' seems to work. More testing needed! */ - ptr->yoff = norm->VStart + cap->y; - /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */ - ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1; - ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1; - - return 0; -} - -/* additional control functions */ -static int -zr36016_control (struct videocodec *codec, - int type, - int size, - void *data) -{ - struct zr36016 *ptr = (struct zr36016 *) codec->data; - int *ival = (int *) data; - - dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, - size); - - switch (type) { - case CODEC_G_STATUS: /* get last status - we don't know it ... */ - if (size != sizeof(int)) - return -EFAULT; - *ival = 0; - break; - - case CODEC_G_CODEC_MODE: - if (size != sizeof(int)) - return -EFAULT; - *ival = 0; - break; - - case CODEC_S_CODEC_MODE: - if (size != sizeof(int)) - return -EFAULT; - if (*ival != 0) - return -EINVAL; - /* not needed, do nothing */ - return 0; - - case CODEC_G_VFE: - case CODEC_S_VFE: - return 0; - - case CODEC_S_MMAP: - /* not available, give an error */ - return -ENXIO; - - default: - return -EINVAL; - } - - return size; -} - -/* ========================================================================= - Exit and unregister function: - - Deinitializes Zoran's JPEG processor - ========================================================================= */ - -static int -zr36016_unset (struct videocodec *codec) -{ - struct zr36016 *ptr = codec->data; - - if (ptr) { - /* do wee need some codec deinit here, too ???? */ - - dprintk(1, "%s: finished codec #%d\n", ptr->name, - ptr->num); - kfree(ptr); - codec->data = NULL; - - zr36016_codecs--; - return 0; - } - - return -EFAULT; -} - -/* ========================================================================= - Setup and registry function: - - Initializes Zoran's JPEG processor - - Also sets pixel size, average code size, mode (compr./decompr.) - (the given size is determined by the processor with the video interface) - ========================================================================= */ - -static int -zr36016_setup (struct videocodec *codec) -{ - struct zr36016 *ptr; - int res; - - dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", - zr36016_codecs); - - if (zr36016_codecs == MAX_CODECS) { - dprintk(1, - KERN_ERR "zr36016: Can't attach more codecs!\n"); - return -ENOSPC; - } - //mem structure init - codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL); - if (NULL == ptr) { - dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n"); - return -ENOMEM; - } - - snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]", - zr36016_codecs); - ptr->num = zr36016_codecs++; - ptr->codec = codec; - - //testing - res = zr36016_basic_test(ptr); - if (res < 0) { - zr36016_unset(codec); - return res; - } - //final setup - ptr->mode = CODEC_DO_COMPRESSION; - ptr->width = 768; - ptr->height = 288; - ptr->xdec = 1; - ptr->ydec = 0; - zr36016_init(ptr); - - dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", - ptr->name, ptr->version); - - return 0; -} - -static const struct videocodec zr36016_codec = { - .owner = THIS_MODULE, - .name = "zr36016", - .magic = 0L, // magic not used - .flags = - CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER | - CODEC_FLAG_DECODER, - .type = CODEC_TYPE_ZR36016, - .setup = zr36016_setup, // functionality - .unset = zr36016_unset, - .set_mode = zr36016_set_mode, - .set_video = zr36016_set_video, - .control = zr36016_control, - // others are not used -}; - -/* ========================================================================= - HOOK IN DRIVER AS KERNEL MODULE - ========================================================================= */ - -static int __init -zr36016_init_module (void) -{ - //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION); - zr36016_codecs = 0; - return videocodec_register(&zr36016_codec); -} - -static void __exit -zr36016_cleanup_module (void) -{ - if (zr36016_codecs) { - dprintk(1, - "zr36016: something's wrong - %d codecs left somehow.\n", - zr36016_codecs); - } - videocodec_unregister(&zr36016_codec); -} - -module_init(zr36016_init_module); -module_exit(zr36016_cleanup_module); - -MODULE_AUTHOR("Wolfgang Scherr "); -MODULE_DESCRIPTION("Driver module for ZR36016 video frontends " - ZR016_VERSION); -MODULE_LICENSE("GPL"); diff --git a/drivers/media/pci/zoran/zr36016.h b/drivers/media/pci/zoran/zr36016.h deleted file mode 100644 index 784bcf5727b8..000000000000 --- a/drivers/media/pci/zoran/zr36016.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Zoran ZR36016 basic configuration functions - header file - * - * Copyright (C) 2001 Wolfgang Scherr - * - * $Id: zr36016.h,v 1.1.2.3 2003/01/14 21:18:07 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#ifndef ZR36016_H -#define ZR36016_H - -/* data stored for each zoran jpeg codec chip */ -struct zr36016 { - char name[32]; - int num; - /* io datastructure */ - struct videocodec *codec; - // coder status - __u8 version; - // actual coder setup - int mode; - - __u16 xoff; - __u16 yoff; - __u16 width; - __u16 height; - __u16 xdec; - __u16 ydec; -}; - -/* direct register addresses */ -#define ZR016_GOSTOP 0x00 -#define ZR016_MODE 0x01 -#define ZR016_IADDR 0x02 -#define ZR016_IDATA 0x03 - -/* indirect register addresses */ -#define ZR016I_SETUP1 0x00 -#define ZR016I_SETUP2 0x01 -#define ZR016I_NAX_LO 0x02 -#define ZR016I_NAX_HI 0x03 -#define ZR016I_PAX_LO 0x04 -#define ZR016I_PAX_HI 0x05 -#define ZR016I_NAY_LO 0x06 -#define ZR016I_NAY_HI 0x07 -#define ZR016I_PAY_LO 0x08 -#define ZR016I_PAY_HI 0x09 -#define ZR016I_NOL_LO 0x0a -#define ZR016I_NOL_HI 0x0b - -/* possible values for mode register */ -#define ZR016_RGB444_YUV444 0x00 -#define ZR016_RGB444_YUV422 0x01 -#define ZR016_RGB444_YUV411 0x02 -#define ZR016_RGB444_Y400 0x03 -#define ZR016_RGB444_RGB444 0x04 -#define ZR016_YUV444_YUV444 0x08 -#define ZR016_YUV444_YUV422 0x09 -#define ZR016_YUV444_YUV411 0x0a -#define ZR016_YUV444_Y400 0x0b -#define ZR016_YUV444_RGB444 0x0c -#define ZR016_YUV422_YUV422 0x11 -#define ZR016_YUV422_YUV411 0x12 -#define ZR016_YUV422_Y400 0x13 -#define ZR016_YUV411_YUV411 0x16 -#define ZR016_YUV411_Y400 0x17 -#define ZR016_4444_4444 0x19 -#define ZR016_100_100 0x1b - -#define ZR016_RGB444 0x00 -#define ZR016_YUV444 0x20 -#define ZR016_YUV422 0x40 - -#define ZR016_COMPRESSION 0x80 -#define ZR016_EXPANSION 0x80 - -/* possible values for setup 1 register */ -#define ZR016_CKRT 0x80 -#define ZR016_VERT 0x40 -#define ZR016_HORZ 0x20 -#define ZR016_HRFL 0x10 -#define ZR016_DSFL 0x08 -#define ZR016_SBFL 0x04 -#define ZR016_RSTR 0x02 -#define ZR016_CNTI 0x01 - -/* possible values for setup 2 register */ -#define ZR016_SYEN 0x40 -#define ZR016_CCIR 0x04 -#define ZR016_SIGN 0x02 -#define ZR016_YMCS 0x01 - -#endif /*fndef ZR36016_H */ diff --git a/drivers/media/pci/zoran/zr36050.c b/drivers/media/pci/zoran/zr36050.c deleted file mode 100644 index 5ebfc16672f3..000000000000 --- a/drivers/media/pci/zoran/zr36050.c +++ /dev/null @@ -1,896 +0,0 @@ -/* - * Zoran ZR36050 basic configuration functions - * - * Copyright (C) 2001 Wolfgang Scherr - * - * $Id: zr36050.c,v 1.1.2.11 2003/08/03 14:54:53 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#define ZR050_VERSION "v0.7.1" - -#include -#include -#include -#include - -#include -#include - -/* I/O commands, error codes */ -#include - -/* headerfile of this module */ -#include "zr36050.h" - -/* codec io API */ -#include "videocodec.h" - -/* it doesn't make sense to have more than 20 or so, - just to prevent some unwanted loops */ -#define MAX_CODECS 20 - -/* amount of chips attached via this driver */ -static int zr36050_codecs; - -/* debugging is available via module parameter */ -static int debug; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (debug >= num) \ - printk(format, ##args); \ - } while (0) - -/* ========================================================================= - Local hardware I/O functions: - - read/write via codec layer (registers are located in the master device) - ========================================================================= */ - -/* read and write functions */ -static u8 -zr36050_read (struct zr36050 *ptr, - u16 reg) -{ - u8 value = 0; - - // just in case something is wrong... - if (ptr->codec->master_data->readreg) - value = (ptr->codec->master_data->readreg(ptr->codec, - reg)) & 0xFF; - else - dprintk(1, - KERN_ERR "%s: invalid I/O setup, nothing read!\n", - ptr->name); - - dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, - value); - - return value; -} - -static void -zr36050_write (struct zr36050 *ptr, - u16 reg, - u8 value) -{ - dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, - reg); - - // just in case something is wrong... - if (ptr->codec->master_data->writereg) - ptr->codec->master_data->writereg(ptr->codec, reg, value); - else - dprintk(1, - KERN_ERR - "%s: invalid I/O setup, nothing written!\n", - ptr->name); -} - -/* ========================================================================= - Local helper function: - - status read - ========================================================================= */ - -/* status is kept in datastructure */ -static u8 -zr36050_read_status1 (struct zr36050 *ptr) -{ - ptr->status1 = zr36050_read(ptr, ZR050_STATUS_1); - - zr36050_read(ptr, 0); - return ptr->status1; -} - -/* ========================================================================= - Local helper function: - - scale factor read - ========================================================================= */ - -/* scale factor is kept in datastructure */ -static u16 -zr36050_read_scalefactor (struct zr36050 *ptr) -{ - ptr->scalefact = (zr36050_read(ptr, ZR050_SF_HI) << 8) | - (zr36050_read(ptr, ZR050_SF_LO) & 0xFF); - - /* leave 0 selected for an eventually GO from master */ - zr36050_read(ptr, 0); - return ptr->scalefact; -} - -/* ========================================================================= - Local helper function: - - wait if codec is ready to proceed (end of processing) or time is over - ========================================================================= */ - -static void -zr36050_wait_end (struct zr36050 *ptr) -{ - int i = 0; - - while (!(zr36050_read_status1(ptr) & 0x4)) { - udelay(1); - if (i++ > 200000) { // 200ms, there is for sure something wrong!!! - dprintk(1, - "%s: timeout at wait_end (last status: 0x%02x)\n", - ptr->name, ptr->status1); - break; - } - } -} - -/* ========================================================================= - Local helper function: - - basic test of "connectivity", writes/reads to/from memory the SOF marker - ========================================================================= */ - -static int -zr36050_basic_test (struct zr36050 *ptr) -{ - zr36050_write(ptr, ZR050_SOF_IDX, 0x00); - zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00); - if ((zr36050_read(ptr, ZR050_SOF_IDX) | - zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to jpeg processor!\n", - ptr->name); - return -ENXIO; - } - zr36050_write(ptr, ZR050_SOF_IDX, 0xff); - zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0); - if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) | - zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to jpeg processor!\n", - ptr->name); - return -ENXIO; - } - - zr36050_wait_end(ptr); - if ((ptr->status1 & 0x4) == 0) { - dprintk(1, - KERN_ERR - "%s: attach failed, jpeg processor failed (end flag)!\n", - ptr->name); - return -EBUSY; - } - - return 0; /* looks good! */ -} - -/* ========================================================================= - Local helper function: - - simple loop for pushing the init datasets - ========================================================================= */ - -static int -zr36050_pushit (struct zr36050 *ptr, - u16 startreg, - u16 len, - const char *data) -{ - int i = 0; - - dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, - startreg, len); - while (i < len) { - zr36050_write(ptr, startreg++, data[i++]); - } - - return i; -} - -/* ========================================================================= - Basic datasets: - - jpeg baseline setup data (you find it on lots places in internet, or just - extract it from any regular .jpg image...) - - Could be variable, but until it's not needed it they are just fixed to save - memory. Otherwise expand zr36050 structure with arrays, push the values to - it and initialize from there, as e.g. the linux zr36057/60 driver does it. - ========================================================================= */ - -static const char zr36050_dqt[0x86] = { - 0xff, 0xdb, //Marker: DQT - 0x00, 0x84, //Length: 2*65+2 - 0x00, //Pq,Tq first table - 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, - 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, - 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, - 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, - 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, - 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, - 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, - 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, - 0x01, //Pq,Tq second table - 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, - 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 -}; - -static const char zr36050_dht[0x1a4] = { - 0xff, 0xc4, //Marker: DHT - 0x01, 0xa2, //Length: 2*AC, 2*DC - 0x00, //DC first table - 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, - 0x01, //DC second table - 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, - 0x10, //AC first table - 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, - 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, - 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, - 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, - 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, - 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, - 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, - 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, - 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, - 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, - 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, - 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, - 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, - 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, - 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, - 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, - 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, - 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, - 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, - 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, - 0xF8, 0xF9, 0xFA, - 0x11, //AC second table - 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, - 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, - 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, - 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, - 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, - 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, - 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, - 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, - 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, - 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, - 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, - 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, - 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, - 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, - 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, - 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, - 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, - 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, - 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, - 0xF9, 0xFA -}; - -/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ -#define NO_OF_COMPONENTS 0x3 //Y,U,V -#define BASELINE_PRECISION 0x8 //MCU size (?) -static const char zr36050_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT -static const char zr36050_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC -static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC - -/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ -static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; -static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; - -/* ========================================================================= - Local helper functions: - - calculation and setup of parameter-dependent JPEG baseline segments - (needed for compression only) - ========================================================================= */ - -/* ------------------------------------------------------------------------- */ - -/* SOF (start of frame) segment depends on width, height and sampling ratio - of each color component */ - -static int -zr36050_set_sof (struct zr36050 *ptr) -{ - char sof_data[34]; // max. size of register set - int i; - - dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, - ptr->width, ptr->height, NO_OF_COMPONENTS); - sof_data[0] = 0xff; - sof_data[1] = 0xc0; - sof_data[2] = 0x00; - sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; - sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36050 - sof_data[5] = (ptr->height) >> 8; - sof_data[6] = (ptr->height) & 0xff; - sof_data[7] = (ptr->width) >> 8; - sof_data[8] = (ptr->width) & 0xff; - sof_data[9] = NO_OF_COMPONENTS; - for (i = 0; i < NO_OF_COMPONENTS; i++) { - sof_data[10 + (i * 3)] = i; // index identifier - sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios - sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection - } - return zr36050_pushit(ptr, ZR050_SOF_IDX, - (3 * NO_OF_COMPONENTS) + 10, sof_data); -} - -/* ------------------------------------------------------------------------- */ - -/* SOS (start of scan) segment depends on the used scan components - of each color component */ - -static int -zr36050_set_sos (struct zr36050 *ptr) -{ - char sos_data[16]; // max. size of register set - int i; - - dprintk(3, "%s: write SOS\n", ptr->name); - sos_data[0] = 0xff; - sos_data[1] = 0xda; - sos_data[2] = 0x00; - sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; - sos_data[4] = NO_OF_COMPONENTS; - for (i = 0; i < NO_OF_COMPONENTS; i++) { - sos_data[5 + (i * 2)] = i; // index - sos_data[6 + (i * 2)] = (zr36050_td[i] << 4) | zr36050_ta[i]; // AC/DC tbl.sel. - } - sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start - sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3F; - sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; - return zr36050_pushit(ptr, ZR050_SOS1_IDX, - 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, - sos_data); -} - -/* ------------------------------------------------------------------------- */ - -/* DRI (define restart interval) */ - -static int -zr36050_set_dri (struct zr36050 *ptr) -{ - char dri_data[6]; // max. size of register set - - dprintk(3, "%s: write DRI\n", ptr->name); - dri_data[0] = 0xff; - dri_data[1] = 0xdd; - dri_data[2] = 0x00; - dri_data[3] = 0x04; - dri_data[4] = ptr->dri >> 8; - dri_data[5] = ptr->dri & 0xff; - return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data); -} - -/* ========================================================================= - Setup function: - - Setup compression/decompression of Zoran's JPEG processor - ( see also zoran 36050 manual ) - - ... sorry for the spaghetti code ... - ========================================================================= */ -static void -zr36050_init (struct zr36050 *ptr) -{ - int sum = 0; - long bitcnt, tmp; - - if (ptr->mode == CODEC_DO_COMPRESSION) { - dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); - - /* 050 communicates with 057 in master mode */ - zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR); - - /* encoding table preload for compression */ - zr36050_write(ptr, ZR050_MODE, - ZR050_MO_COMP | ZR050_MO_TLM); - zr36050_write(ptr, ZR050_OPTIONS, 0); - - /* disable all IRQs */ - zr36050_write(ptr, ZR050_INT_REQ_0, 0); - zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 - - /* volume control settings */ - /*zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);*/ - zr36050_write(ptr, ZR050_SF_HI, ptr->scalefact >> 8); - zr36050_write(ptr, ZR050_SF_LO, ptr->scalefact & 0xff); - - zr36050_write(ptr, ZR050_AF_HI, 0xff); - zr36050_write(ptr, ZR050_AF_M, 0xff); - zr36050_write(ptr, ZR050_AF_LO, 0xff); - - /* setup the variable jpeg tables */ - sum += zr36050_set_sof(ptr); - sum += zr36050_set_sos(ptr); - sum += zr36050_set_dri(ptr); - - /* setup the fixed jpeg tables - maybe variable, though - - * (see table init section above) */ - dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name); - sum += zr36050_pushit(ptr, ZR050_DQT_IDX, - sizeof(zr36050_dqt), zr36050_dqt); - sum += zr36050_pushit(ptr, ZR050_DHT_IDX, - sizeof(zr36050_dht), zr36050_dht); - zr36050_write(ptr, ZR050_APP_IDX, 0xff); - zr36050_write(ptr, ZR050_APP_IDX + 1, 0xe0 + ptr->app.appn); - zr36050_write(ptr, ZR050_APP_IDX + 2, 0x00); - zr36050_write(ptr, ZR050_APP_IDX + 3, ptr->app.len + 2); - sum += zr36050_pushit(ptr, ZR050_APP_IDX + 4, 60, - ptr->app.data) + 4; - zr36050_write(ptr, ZR050_COM_IDX, 0xff); - zr36050_write(ptr, ZR050_COM_IDX + 1, 0xfe); - zr36050_write(ptr, ZR050_COM_IDX + 2, 0x00); - zr36050_write(ptr, ZR050_COM_IDX + 3, ptr->com.len + 2); - sum += zr36050_pushit(ptr, ZR050_COM_IDX + 4, 60, - ptr->com.data) + 4; - - /* do the internal huffman table preload */ - zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); - - zr36050_write(ptr, ZR050_GO, 1); // launch codec - zr36050_wait_end(ptr); - dprintk(2, "%s: Status after table preload: 0x%02x\n", - ptr->name, ptr->status1); - - if ((ptr->status1 & 0x4) == 0) { - dprintk(1, KERN_ERR "%s: init aborted!\n", - ptr->name); - return; // something is wrong, its timed out!!!! - } - - /* setup misc. data for compression (target code sizes) */ - - /* size of compressed code to reach without header data */ - sum = ptr->real_code_vol - sum; - bitcnt = sum << 3; /* need the size in bits */ - - tmp = bitcnt >> 16; - dprintk(3, - "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", - ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); - zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8); - zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff); - tmp = bitcnt & 0xffff; - zr36050_write(ptr, ZR050_TCV_NET_ML, tmp >> 8); - zr36050_write(ptr, ZR050_TCV_NET_LO, tmp & 0xff); - - bitcnt -= bitcnt >> 7; // bits without stuffing - bitcnt -= ((bitcnt * 5) >> 6); // bits without eob - - tmp = bitcnt >> 16; - dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", - ptr->name, bitcnt, tmp); - zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8); - zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff); - tmp = bitcnt & 0xffff; - zr36050_write(ptr, ZR050_TCV_DATA_ML, tmp >> 8); - zr36050_write(ptr, ZR050_TCV_DATA_LO, tmp & 0xff); - - /* compression setup with or without bitrate control */ - zr36050_write(ptr, ZR050_MODE, - ZR050_MO_COMP | ZR050_MO_PASS2 | - (ptr->bitrate_ctrl ? ZR050_MO_BRC : 0)); - - /* this headers seem to deliver "valid AVI" jpeg frames */ - zr36050_write(ptr, ZR050_MARKERS_EN, - ZR050_ME_DQT | ZR050_ME_DHT | - ((ptr->app.len > 0) ? ZR050_ME_APP : 0) | - ((ptr->com.len > 0) ? ZR050_ME_COM : 0)); - } else { - dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); - - /* 050 communicates with 055 in master mode */ - zr36050_write(ptr, ZR050_HARDWARE, - ZR050_HW_MSTR | ZR050_HW_CFIS_2_CLK); - - /* encoding table preload */ - zr36050_write(ptr, ZR050_MODE, ZR050_MO_TLM); - - /* disable all IRQs */ - zr36050_write(ptr, ZR050_INT_REQ_0, 0); - zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 - - dprintk(3, "%s: write DHT\n", ptr->name); - zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), - zr36050_dht); - - /* do the internal huffman table preload */ - zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); - - zr36050_write(ptr, ZR050_GO, 1); // launch codec - zr36050_wait_end(ptr); - dprintk(2, "%s: Status after table preload: 0x%02x\n", - ptr->name, ptr->status1); - - if ((ptr->status1 & 0x4) == 0) { - dprintk(1, KERN_ERR "%s: init aborted!\n", - ptr->name); - return; // something is wrong, its timed out!!!! - } - - /* setup misc. data for expansion */ - zr36050_write(ptr, ZR050_MODE, 0); - zr36050_write(ptr, ZR050_MARKERS_EN, 0); - } - - /* adr on selected, to allow GO from master */ - zr36050_read(ptr, 0); -} - -/* ========================================================================= - CODEC API FUNCTIONS - - this functions are accessed by the master via the API structure - ========================================================================= */ - -/* set compression/expansion mode and launches codec - - this should be the last call from the master before starting processing */ -static int -zr36050_set_mode (struct videocodec *codec, - int mode) -{ - struct zr36050 *ptr = (struct zr36050 *) codec->data; - - dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); - - if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) - return -EINVAL; - - ptr->mode = mode; - zr36050_init(ptr); - - return 0; -} - -/* set picture size (norm is ignored as the codec doesn't know about it) */ -static int -zr36050_set_video (struct videocodec *codec, - struct tvnorm *norm, - struct vfe_settings *cap, - struct vfe_polarity *pol) -{ - struct zr36050 *ptr = (struct zr36050 *) codec->data; - int size; - - dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n", - ptr->name, norm->HStart, norm->VStart, - cap->x, cap->y, cap->width, cap->height, - cap->decimation, cap->quality); - /* if () return -EINVAL; - * trust the master driver that it knows what it does - so - * we allow invalid startx/y and norm for now ... */ - ptr->width = cap->width / (cap->decimation & 0xff); - ptr->height = cap->height / ((cap->decimation >> 8) & 0xff); - - /* (KM) JPEG quality */ - size = ptr->width * ptr->height; - size *= 16; /* size in bits */ - /* apply quality setting */ - size = size * cap->quality / 200; - - /* Minimum: 1kb */ - if (size < 8192) - size = 8192; - /* Maximum: 7/8 of code buffer */ - if (size > ptr->total_code_vol * 7) - size = ptr->total_code_vol * 7; - - ptr->real_code_vol = size >> 3; /* in bytes */ - - /* Set max_block_vol here (previously in zr36050_init, moved - * here for consistency with zr36060 code */ - zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol); - - return 0; -} - -/* additional control functions */ -static int -zr36050_control (struct videocodec *codec, - int type, - int size, - void *data) -{ - struct zr36050 *ptr = (struct zr36050 *) codec->data; - int *ival = (int *) data; - - dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, - size); - - switch (type) { - case CODEC_G_STATUS: /* get last status */ - if (size != sizeof(int)) - return -EFAULT; - zr36050_read_status1(ptr); - *ival = ptr->status1; - break; - - case CODEC_G_CODEC_MODE: - if (size != sizeof(int)) - return -EFAULT; - *ival = CODEC_MODE_BJPG; - break; - - case CODEC_S_CODEC_MODE: - if (size != sizeof(int)) - return -EFAULT; - if (*ival != CODEC_MODE_BJPG) - return -EINVAL; - /* not needed, do nothing */ - return 0; - - case CODEC_G_VFE: - case CODEC_S_VFE: - /* not needed, do nothing */ - return 0; - - case CODEC_S_MMAP: - /* not available, give an error */ - return -ENXIO; - - case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ - if (size != sizeof(int)) - return -EFAULT; - *ival = ptr->total_code_vol; - break; - - case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ - if (size != sizeof(int)) - return -EFAULT; - ptr->total_code_vol = *ival; - /* (Kieran Morrissey) - * code copied from zr36060.c to ensure proper bitrate */ - ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; - break; - - case CODEC_G_JPEG_SCALE: /* get scaling factor */ - if (size != sizeof(int)) - return -EFAULT; - *ival = zr36050_read_scalefactor(ptr); - break; - - case CODEC_S_JPEG_SCALE: /* set scaling factor */ - if (size != sizeof(int)) - return -EFAULT; - ptr->scalefact = *ival; - break; - - case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ - struct jpeg_app_marker *app = data; - - if (size != sizeof(struct jpeg_app_marker)) - return -EFAULT; - - *app = ptr->app; - break; - } - - case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ - struct jpeg_app_marker *app = data; - - if (size != sizeof(struct jpeg_app_marker)) - return -EFAULT; - - ptr->app = *app; - break; - } - - case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ - struct jpeg_com_marker *com = data; - - if (size != sizeof(struct jpeg_com_marker)) - return -EFAULT; - - *com = ptr->com; - break; - } - - case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ - struct jpeg_com_marker *com = data; - - if (size != sizeof(struct jpeg_com_marker)) - return -EFAULT; - - ptr->com = *com; - break; - } - - default: - return -EINVAL; - } - - return size; -} - -/* ========================================================================= - Exit and unregister function: - - Deinitializes Zoran's JPEG processor - ========================================================================= */ - -static int -zr36050_unset (struct videocodec *codec) -{ - struct zr36050 *ptr = codec->data; - - if (ptr) { - /* do wee need some codec deinit here, too ???? */ - - dprintk(1, "%s: finished codec #%d\n", ptr->name, - ptr->num); - kfree(ptr); - codec->data = NULL; - - zr36050_codecs--; - return 0; - } - - return -EFAULT; -} - -/* ========================================================================= - Setup and registry function: - - Initializes Zoran's JPEG processor - - Also sets pixel size, average code size, mode (compr./decompr.) - (the given size is determined by the processor with the video interface) - ========================================================================= */ - -static int -zr36050_setup (struct videocodec *codec) -{ - struct zr36050 *ptr; - int res; - - dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n", - zr36050_codecs); - - if (zr36050_codecs == MAX_CODECS) { - dprintk(1, - KERN_ERR "zr36050: Can't attach more codecs!\n"); - return -ENOSPC; - } - //mem structure init - codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL); - if (NULL == ptr) { - dprintk(1, KERN_ERR "zr36050: Can't get enough memory!\n"); - return -ENOMEM; - } - - snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]", - zr36050_codecs); - ptr->num = zr36050_codecs++; - ptr->codec = codec; - - //testing - res = zr36050_basic_test(ptr); - if (res < 0) { - zr36050_unset(codec); - return res; - } - //final setup - memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8); - memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8); - - ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag - * (what is the difference?) */ - ptr->mode = CODEC_DO_COMPRESSION; - ptr->width = 384; - ptr->height = 288; - ptr->total_code_vol = 16000; - ptr->max_block_vol = 240; - ptr->scalefact = 0x100; - ptr->dri = 1; - - /* no app/com marker by default */ - ptr->app.appn = 0; - ptr->app.len = 0; - ptr->com.len = 0; - - zr36050_init(ptr); - - dprintk(1, KERN_INFO "%s: codec attached and running\n", - ptr->name); - - return 0; -} - -static const struct videocodec zr36050_codec = { - .owner = THIS_MODULE, - .name = "zr36050", - .magic = 0L, // magic not used - .flags = - CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | - CODEC_FLAG_DECODER, - .type = CODEC_TYPE_ZR36050, - .setup = zr36050_setup, // functionality - .unset = zr36050_unset, - .set_mode = zr36050_set_mode, - .set_video = zr36050_set_video, - .control = zr36050_control, - // others are not used -}; - -/* ========================================================================= - HOOK IN DRIVER AS KERNEL MODULE - ========================================================================= */ - -static int __init -zr36050_init_module (void) -{ - //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION); - zr36050_codecs = 0; - return videocodec_register(&zr36050_codec); -} - -static void __exit -zr36050_cleanup_module (void) -{ - if (zr36050_codecs) { - dprintk(1, - "zr36050: something's wrong - %d codecs left somehow.\n", - zr36050_codecs); - } - videocodec_unregister(&zr36050_codec); -} - -module_init(zr36050_init_module); -module_exit(zr36050_cleanup_module); - -MODULE_AUTHOR("Wolfgang Scherr "); -MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors " - ZR050_VERSION); -MODULE_LICENSE("GPL"); diff --git a/drivers/media/pci/zoran/zr36050.h b/drivers/media/pci/zoran/zr36050.h deleted file mode 100644 index 9236486d3c2b..000000000000 --- a/drivers/media/pci/zoran/zr36050.h +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Zoran ZR36050 basic configuration functions - header file - * - * Copyright (C) 2001 Wolfgang Scherr - * - * $Id: zr36050.h,v 1.1.2.2 2003/01/14 21:18:22 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#ifndef ZR36050_H -#define ZR36050_H - -#include "videocodec.h" - -/* data stored for each zoran jpeg codec chip */ -struct zr36050 { - char name[32]; - int num; - /* io datastructure */ - struct videocodec *codec; - // last coder status - __u8 status1; - // actual coder setup - int mode; - - __u16 width; - __u16 height; - - __u16 bitrate_ctrl; - - __u32 total_code_vol; - __u32 real_code_vol; - __u16 max_block_vol; - - __u8 h_samp_ratio[8]; - __u8 v_samp_ratio[8]; - __u16 scalefact; - __u16 dri; - - /* com/app marker */ - struct jpeg_com_marker com; - struct jpeg_app_marker app; -}; - -/* zr36050 register addresses */ -#define ZR050_GO 0x000 -#define ZR050_HARDWARE 0x002 -#define ZR050_MODE 0x003 -#define ZR050_OPTIONS 0x004 -#define ZR050_MBCV 0x005 -#define ZR050_MARKERS_EN 0x006 -#define ZR050_INT_REQ_0 0x007 -#define ZR050_INT_REQ_1 0x008 -#define ZR050_TCV_NET_HI 0x009 -#define ZR050_TCV_NET_MH 0x00a -#define ZR050_TCV_NET_ML 0x00b -#define ZR050_TCV_NET_LO 0x00c -#define ZR050_TCV_DATA_HI 0x00d -#define ZR050_TCV_DATA_MH 0x00e -#define ZR050_TCV_DATA_ML 0x00f -#define ZR050_TCV_DATA_LO 0x010 -#define ZR050_SF_HI 0x011 -#define ZR050_SF_LO 0x012 -#define ZR050_AF_HI 0x013 -#define ZR050_AF_M 0x014 -#define ZR050_AF_LO 0x015 -#define ZR050_ACV_HI 0x016 -#define ZR050_ACV_MH 0x017 -#define ZR050_ACV_ML 0x018 -#define ZR050_ACV_LO 0x019 -#define ZR050_ACT_HI 0x01a -#define ZR050_ACT_MH 0x01b -#define ZR050_ACT_ML 0x01c -#define ZR050_ACT_LO 0x01d -#define ZR050_ACV_TRUN_HI 0x01e -#define ZR050_ACV_TRUN_MH 0x01f -#define ZR050_ACV_TRUN_ML 0x020 -#define ZR050_ACV_TRUN_LO 0x021 -#define ZR050_STATUS_0 0x02e -#define ZR050_STATUS_1 0x02f - -#define ZR050_SOF_IDX 0x040 -#define ZR050_SOS1_IDX 0x07a -#define ZR050_SOS2_IDX 0x08a -#define ZR050_SOS3_IDX 0x09a -#define ZR050_SOS4_IDX 0x0aa -#define ZR050_DRI_IDX 0x0c0 -#define ZR050_DNL_IDX 0x0c6 -#define ZR050_DQT_IDX 0x0cc -#define ZR050_DHT_IDX 0x1d4 -#define ZR050_APP_IDX 0x380 -#define ZR050_COM_IDX 0x3c0 - -/* zr36050 hardware register bits */ - -#define ZR050_HW_BSWD 0x80 -#define ZR050_HW_MSTR 0x40 -#define ZR050_HW_DMA 0x20 -#define ZR050_HW_CFIS_1_CLK 0x00 -#define ZR050_HW_CFIS_2_CLK 0x04 -#define ZR050_HW_CFIS_3_CLK 0x08 -#define ZR050_HW_CFIS_4_CLK 0x0C -#define ZR050_HW_CFIS_5_CLK 0x10 -#define ZR050_HW_CFIS_6_CLK 0x14 -#define ZR050_HW_CFIS_7_CLK 0x18 -#define ZR050_HW_CFIS_8_CLK 0x1C -#define ZR050_HW_BELE 0x01 - -/* zr36050 mode register bits */ - -#define ZR050_MO_COMP 0x80 -#define ZR050_MO_ATP 0x40 -#define ZR050_MO_PASS2 0x20 -#define ZR050_MO_TLM 0x10 -#define ZR050_MO_DCONLY 0x08 -#define ZR050_MO_BRC 0x04 - -#define ZR050_MO_ATP 0x40 -#define ZR050_MO_PASS2 0x20 -#define ZR050_MO_TLM 0x10 -#define ZR050_MO_DCONLY 0x08 - -/* zr36050 option register bits */ - -#define ZR050_OP_NSCN_1 0x00 -#define ZR050_OP_NSCN_2 0x20 -#define ZR050_OP_NSCN_3 0x40 -#define ZR050_OP_NSCN_4 0x60 -#define ZR050_OP_NSCN_5 0x80 -#define ZR050_OP_NSCN_6 0xA0 -#define ZR050_OP_NSCN_7 0xC0 -#define ZR050_OP_NSCN_8 0xE0 -#define ZR050_OP_OVF 0x10 - - -/* zr36050 markers-enable register bits */ - -#define ZR050_ME_APP 0x80 -#define ZR050_ME_COM 0x40 -#define ZR050_ME_DRI 0x20 -#define ZR050_ME_DQT 0x10 -#define ZR050_ME_DHT 0x08 -#define ZR050_ME_DNL 0x04 -#define ZR050_ME_DQTI 0x02 -#define ZR050_ME_DHTI 0x01 - -/* zr36050 status0/1 register bit masks */ - -#define ZR050_ST_RST_MASK 0x20 -#define ZR050_ST_SOF_MASK 0x02 -#define ZR050_ST_SOS_MASK 0x02 -#define ZR050_ST_DATRDY_MASK 0x80 -#define ZR050_ST_MRKDET_MASK 0x40 -#define ZR050_ST_RFM_MASK 0x10 -#define ZR050_ST_RFD_MASK 0x08 -#define ZR050_ST_END_MASK 0x04 -#define ZR050_ST_TCVOVF_MASK 0x02 -#define ZR050_ST_DATOVF_MASK 0x01 - -/* pixel component idx */ - -#define ZR050_Y_COMPONENT 0 -#define ZR050_U_COMPONENT 1 -#define ZR050_V_COMPONENT 2 - -#endif /*fndef ZR36050_H */ diff --git a/drivers/media/pci/zoran/zr36057.h b/drivers/media/pci/zoran/zr36057.h deleted file mode 100644 index c8acb21dcb5c..000000000000 --- a/drivers/media/pci/zoran/zr36057.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * zr36057.h - zr36057 register offsets - * - * Copyright (C) 1998 Dave Perks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _ZR36057_H_ -#define _ZR36057_H_ - - -/* Zoran ZR36057 registers */ - -#define ZR36057_VFEHCR 0x000 /* Video Front End, Horizontal Configuration Register */ -#define ZR36057_VFEHCR_HSPol (1<<30) -#define ZR36057_VFEHCR_HStart 10 -#define ZR36057_VFEHCR_HEnd 0 -#define ZR36057_VFEHCR_Hmask 0x3ff - -#define ZR36057_VFEVCR 0x004 /* Video Front End, Vertical Configuration Register */ -#define ZR36057_VFEVCR_VSPol (1<<30) -#define ZR36057_VFEVCR_VStart 10 -#define ZR36057_VFEVCR_VEnd 0 -#define ZR36057_VFEVCR_Vmask 0x3ff - -#define ZR36057_VFESPFR 0x008 /* Video Front End, Scaler and Pixel Format Register */ -#define ZR36057_VFESPFR_ExtFl (1<<26) -#define ZR36057_VFESPFR_TopField (1<<25) -#define ZR36057_VFESPFR_VCLKPol (1<<24) -#define ZR36057_VFESPFR_HFilter 21 -#define ZR36057_VFESPFR_HorDcm 14 -#define ZR36057_VFESPFR_VerDcm 8 -#define ZR36057_VFESPFR_DispMode 6 -#define ZR36057_VFESPFR_YUV422 (0<<3) -#define ZR36057_VFESPFR_RGB888 (1<<3) -#define ZR36057_VFESPFR_RGB565 (2<<3) -#define ZR36057_VFESPFR_RGB555 (3<<3) -#define ZR36057_VFESPFR_ErrDif (1<<2) -#define ZR36057_VFESPFR_Pack24 (1<<1) -#define ZR36057_VFESPFR_LittleEndian (1<<0) - -#define ZR36057_VDTR 0x00c /* Video Display "Top" Register */ - -#define ZR36057_VDBR 0x010 /* Video Display "Bottom" Register */ - -#define ZR36057_VSSFGR 0x014 /* Video Stride, Status, and Frame Grab Register */ -#define ZR36057_VSSFGR_DispStride 16 -#define ZR36057_VSSFGR_VidOvf (1<<8) -#define ZR36057_VSSFGR_SnapShot (1<<1) -#define ZR36057_VSSFGR_FrameGrab (1<<0) - -#define ZR36057_VDCR 0x018 /* Video Display Configuration Register */ -#define ZR36057_VDCR_VidEn (1<<31) -#define ZR36057_VDCR_MinPix 24 -#define ZR36057_VDCR_Triton (1<<24) -#define ZR36057_VDCR_VidWinHt 12 -#define ZR36057_VDCR_VidWinWid 0 - -#define ZR36057_MMTR 0x01c /* Masking Map "Top" Register */ - -#define ZR36057_MMBR 0x020 /* Masking Map "Bottom" Register */ - -#define ZR36057_OCR 0x024 /* Overlay Control Register */ -#define ZR36057_OCR_OvlEnable (1 << 15) -#define ZR36057_OCR_MaskStride 0 - -#define ZR36057_SPGPPCR 0x028 /* System, PCI, and General Purpose Pins Control Register */ -#define ZR36057_SPGPPCR_SoftReset (1<<24) - -#define ZR36057_GPPGCR1 0x02c /* General Purpose Pins and GuestBus Control Register (1) */ - -#define ZR36057_MCSAR 0x030 /* MPEG Code Source Address Register */ - -#define ZR36057_MCTCR 0x034 /* MPEG Code Transfer Control Register */ -#define ZR36057_MCTCR_CodTime (1 << 30) -#define ZR36057_MCTCR_CEmpty (1 << 29) -#define ZR36057_MCTCR_CFlush (1 << 28) -#define ZR36057_MCTCR_CodGuestID 20 -#define ZR36057_MCTCR_CodGuestReg 16 - -#define ZR36057_MCMPR 0x038 /* MPEG Code Memory Pointer Register */ - -#define ZR36057_ISR 0x03c /* Interrupt Status Register */ -#define ZR36057_ISR_GIRQ1 (1<<30) -#define ZR36057_ISR_GIRQ0 (1<<29) -#define ZR36057_ISR_CodRepIRQ (1<<28) -#define ZR36057_ISR_JPEGRepIRQ (1<<27) - -#define ZR36057_ICR 0x040 /* Interrupt Control Register */ -#define ZR36057_ICR_GIRQ1 (1<<30) -#define ZR36057_ICR_GIRQ0 (1<<29) -#define ZR36057_ICR_CodRepIRQ (1<<28) -#define ZR36057_ICR_JPEGRepIRQ (1<<27) -#define ZR36057_ICR_IntPinEn (1<<24) - -#define ZR36057_I2CBR 0x044 /* I2C Bus Register */ -#define ZR36057_I2CBR_SDA (1<<1) -#define ZR36057_I2CBR_SCL (1<<0) - -#define ZR36057_JMC 0x100 /* JPEG Mode and Control */ -#define ZR36057_JMC_JPG (1 << 31) -#define ZR36057_JMC_JPGExpMode (0 << 29) -#define ZR36057_JMC_JPGCmpMode (1 << 29) -#define ZR36057_JMC_MJPGExpMode (2 << 29) -#define ZR36057_JMC_MJPGCmpMode (3 << 29) -#define ZR36057_JMC_RTBUSY_FB (1 << 6) -#define ZR36057_JMC_Go_en (1 << 5) -#define ZR36057_JMC_SyncMstr (1 << 4) -#define ZR36057_JMC_Fld_per_buff (1 << 3) -#define ZR36057_JMC_VFIFO_FB (1 << 2) -#define ZR36057_JMC_CFIFO_FB (1 << 1) -#define ZR36057_JMC_Stll_LitEndian (1 << 0) - -#define ZR36057_JPC 0x104 /* JPEG Process Control */ -#define ZR36057_JPC_P_Reset (1 << 7) -#define ZR36057_JPC_CodTrnsEn (1 << 5) -#define ZR36057_JPC_Active (1 << 0) - -#define ZR36057_VSP 0x108 /* Vertical Sync Parameters */ -#define ZR36057_VSP_VsyncSize 16 -#define ZR36057_VSP_FrmTot 0 - -#define ZR36057_HSP 0x10c /* Horizontal Sync Parameters */ -#define ZR36057_HSP_HsyncStart 16 -#define ZR36057_HSP_LineTot 0 - -#define ZR36057_FHAP 0x110 /* Field Horizontal Active Portion */ -#define ZR36057_FHAP_NAX 16 -#define ZR36057_FHAP_PAX 0 - -#define ZR36057_FVAP 0x114 /* Field Vertical Active Portion */ -#define ZR36057_FVAP_NAY 16 -#define ZR36057_FVAP_PAY 0 - -#define ZR36057_FPP 0x118 /* Field Process Parameters */ -#define ZR36057_FPP_Odd_Even (1 << 0) - -#define ZR36057_JCBA 0x11c /* JPEG Code Base Address */ - -#define ZR36057_JCFT 0x120 /* JPEG Code FIFO Threshold */ - -#define ZR36057_JCGI 0x124 /* JPEG Codec Guest ID */ -#define ZR36057_JCGI_JPEGuestID 4 -#define ZR36057_JCGI_JPEGuestReg 0 - -#define ZR36057_GCR2 0x12c /* GuestBus Control Register (2) */ - -#define ZR36057_POR 0x200 /* Post Office Register */ -#define ZR36057_POR_POPen (1<<25) -#define ZR36057_POR_POTime (1<<24) -#define ZR36057_POR_PODir (1<<23) - -#define ZR36057_STR 0x300 /* "Still" Transfer Register */ - -#endif diff --git a/drivers/media/pci/zoran/zr36060.c b/drivers/media/pci/zoran/zr36060.c deleted file mode 100644 index 2c2e8130fc96..000000000000 --- a/drivers/media/pci/zoran/zr36060.c +++ /dev/null @@ -1,1006 +0,0 @@ -/* - * Zoran ZR36060 basic configuration functions - * - * Copyright (C) 2002 Laurent Pinchart - * - * $Id: zr36060.c,v 1.1.2.22 2003/05/06 09:35:36 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#define ZR060_VERSION "v0.7" - -#include -#include -#include -#include - -#include -#include - -/* I/O commands, error codes */ -#include - -/* headerfile of this module */ -#include "zr36060.h" - -/* codec io API */ -#include "videocodec.h" - -/* it doesn't make sense to have more than 20 or so, - just to prevent some unwanted loops */ -#define MAX_CODECS 20 - -/* amount of chips attached via this driver */ -static int zr36060_codecs; - -static bool low_bitrate; -module_param(low_bitrate, bool, 0); -MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate"); - -/* debugging is available via module parameter */ -static int debug; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0-4)"); - -#define dprintk(num, format, args...) \ - do { \ - if (debug >= num) \ - printk(format, ##args); \ - } while (0) - -/* ========================================================================= - Local hardware I/O functions: - - read/write via codec layer (registers are located in the master device) - ========================================================================= */ - -/* read and write functions */ -static u8 -zr36060_read (struct zr36060 *ptr, - u16 reg) -{ - u8 value = 0; - - // just in case something is wrong... - if (ptr->codec->master_data->readreg) - value = (ptr->codec->master_data->readreg(ptr->codec, - reg)) & 0xff; - else - dprintk(1, - KERN_ERR "%s: invalid I/O setup, nothing read!\n", - ptr->name); - - //dprintk(4, "%s: reading from 0x%04x: %02x\n",ptr->name,reg,value); - - return value; -} - -static void -zr36060_write(struct zr36060 *ptr, - u16 reg, - u8 value) -{ - //dprintk(4, "%s: writing 0x%02x to 0x%04x\n",ptr->name,value,reg); - dprintk(4, "0x%02x @0x%04x\n", value, reg); - - // just in case something is wrong... - if (ptr->codec->master_data->writereg) - ptr->codec->master_data->writereg(ptr->codec, reg, value); - else - dprintk(1, - KERN_ERR - "%s: invalid I/O setup, nothing written!\n", - ptr->name); -} - -/* ========================================================================= - Local helper function: - - status read - ========================================================================= */ - -/* status is kept in datastructure */ -static u8 -zr36060_read_status (struct zr36060 *ptr) -{ - ptr->status = zr36060_read(ptr, ZR060_CFSR); - - zr36060_read(ptr, 0); - return ptr->status; -} - -/* ========================================================================= - Local helper function: - - scale factor read - ========================================================================= */ - -/* scale factor is kept in datastructure */ -static u16 -zr36060_read_scalefactor (struct zr36060 *ptr) -{ - ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) | - (zr36060_read(ptr, ZR060_SF_LO) & 0xFF); - - /* leave 0 selected for an eventually GO from master */ - zr36060_read(ptr, 0); - return ptr->scalefact; -} - -/* ========================================================================= - Local helper function: - - wait if codec is ready to proceed (end of processing) or time is over - ========================================================================= */ - -static void -zr36060_wait_end (struct zr36060 *ptr) -{ - int i = 0; - - while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) { - udelay(1); - if (i++ > 200000) { // 200ms, there is for sure something wrong!!! - dprintk(1, - "%s: timeout at wait_end (last status: 0x%02x)\n", - ptr->name, ptr->status); - break; - } - } -} - -/* ========================================================================= - Local helper function: - - basic test of "connectivity", writes/reads to/from memory the SOF marker - ========================================================================= */ - -static int -zr36060_basic_test (struct zr36060 *ptr) -{ - if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) && - (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) { - dprintk(1, - KERN_ERR - "%s: attach failed, can't connect to jpeg processor!\n", - ptr->name); - return -ENXIO; - } - - zr36060_wait_end(ptr); - if (ptr->status & ZR060_CFSR_Busy) { - dprintk(1, - KERN_ERR - "%s: attach failed, jpeg processor failed (end flag)!\n", - ptr->name); - return -EBUSY; - } - - return 0; /* looks good! */ -} - -/* ========================================================================= - Local helper function: - - simple loop for pushing the init datasets - ========================================================================= */ - -static int -zr36060_pushit (struct zr36060 *ptr, - u16 startreg, - u16 len, - const char *data) -{ - int i = 0; - - dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, - startreg, len); - while (i < len) { - zr36060_write(ptr, startreg++, data[i++]); - } - - return i; -} - -/* ========================================================================= - Basic datasets: - - jpeg baseline setup data (you find it on lots places in internet, or just - extract it from any regular .jpg image...) - - Could be variable, but until it's not needed it they are just fixed to save - memory. Otherwise expand zr36060 structure with arrays, push the values to - it and initialize from there, as e.g. the linux zr36057/60 driver does it. - ========================================================================= */ - -static const char zr36060_dqt[0x86] = { - 0xff, 0xdb, //Marker: DQT - 0x00, 0x84, //Length: 2*65+2 - 0x00, //Pq,Tq first table - 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, - 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, - 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, - 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, - 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, - 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, - 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, - 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, - 0x01, //Pq,Tq second table - 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, - 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 -}; - -static const char zr36060_dht[0x1a4] = { - 0xff, 0xc4, //Marker: DHT - 0x01, 0xa2, //Length: 2*AC, 2*DC - 0x00, //DC first table - 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, - 0x01, //DC second table - 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, - 0x10, //AC first table - 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, - 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, - 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, - 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, - 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, - 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, - 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, - 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, - 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, - 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, - 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, - 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, - 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, - 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, - 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, - 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, - 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, - 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, - 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, - 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, - 0xF8, 0xF9, 0xFA, - 0x11, //AC second table - 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, - 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, - 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, - 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, - 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, - 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, - 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, - 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, - 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, - 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, - 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, - 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, - 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, - 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, - 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, - 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, - 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, - 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, - 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, - 0xF9, 0xFA -}; - -/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ -#define NO_OF_COMPONENTS 0x3 //Y,U,V -#define BASELINE_PRECISION 0x8 //MCU size (?) -static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT -static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC -static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC - -/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ -static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; -static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; - -/* ========================================================================= - Local helper functions: - - calculation and setup of parameter-dependent JPEG baseline segments - (needed for compression only) - ========================================================================= */ - -/* ------------------------------------------------------------------------- */ - -/* SOF (start of frame) segment depends on width, height and sampling ratio - of each color component */ - -static int -zr36060_set_sof (struct zr36060 *ptr) -{ - char sof_data[34]; // max. size of register set - int i; - - dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, - ptr->width, ptr->height, NO_OF_COMPONENTS); - sof_data[0] = 0xff; - sof_data[1] = 0xc0; - sof_data[2] = 0x00; - sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; - sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060 - sof_data[5] = (ptr->height) >> 8; - sof_data[6] = (ptr->height) & 0xff; - sof_data[7] = (ptr->width) >> 8; - sof_data[8] = (ptr->width) & 0xff; - sof_data[9] = NO_OF_COMPONENTS; - for (i = 0; i < NO_OF_COMPONENTS; i++) { - sof_data[10 + (i * 3)] = i; // index identifier - sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | - (ptr->v_samp_ratio[i]); // sampling ratios - sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection - } - return zr36060_pushit(ptr, ZR060_SOF_IDX, - (3 * NO_OF_COMPONENTS) + 10, sof_data); -} - -/* ------------------------------------------------------------------------- */ - -/* SOS (start of scan) segment depends on the used scan components - of each color component */ - -static int -zr36060_set_sos (struct zr36060 *ptr) -{ - char sos_data[16]; // max. size of register set - int i; - - dprintk(3, "%s: write SOS\n", ptr->name); - sos_data[0] = 0xff; - sos_data[1] = 0xda; - sos_data[2] = 0x00; - sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; - sos_data[4] = NO_OF_COMPONENTS; - for (i = 0; i < NO_OF_COMPONENTS; i++) { - sos_data[5 + (i * 2)] = i; // index - sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) | - zr36060_ta[i]; // AC/DC tbl.sel. - } - sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start - sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f; - sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; - return zr36060_pushit(ptr, ZR060_SOS_IDX, - 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, - sos_data); -} - -/* ------------------------------------------------------------------------- */ - -/* DRI (define restart interval) */ - -static int -zr36060_set_dri (struct zr36060 *ptr) -{ - char dri_data[6]; // max. size of register set - - dprintk(3, "%s: write DRI\n", ptr->name); - dri_data[0] = 0xff; - dri_data[1] = 0xdd; - dri_data[2] = 0x00; - dri_data[3] = 0x04; - dri_data[4] = (ptr->dri) >> 8; - dri_data[5] = (ptr->dri) & 0xff; - return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data); -} - -/* ========================================================================= - Setup function: - - Setup compression/decompression of Zoran's JPEG processor - ( see also zoran 36060 manual ) - - ... sorry for the spaghetti code ... - ========================================================================= */ -static void -zr36060_init (struct zr36060 *ptr) -{ - int sum = 0; - long bitcnt, tmp; - - if (ptr->mode == CODEC_DO_COMPRESSION) { - dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); - - zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); - - /* 060 communicates with 067 in master mode */ - zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); - - /* Compression with or without variable scale factor */ - /*FIXME: What about ptr->bitrate_ctrl? */ - zr36060_write(ptr, ZR060_CMR, - ZR060_CMR_Comp | ZR060_CMR_Pass2 | - ZR060_CMR_BRB); - - /* Must be zero */ - zr36060_write(ptr, ZR060_MBZ, 0x00); - zr36060_write(ptr, ZR060_TCR_HI, 0x00); - zr36060_write(ptr, ZR060_TCR_LO, 0x00); - - /* Disable all IRQs - no DataErr means autoreset */ - zr36060_write(ptr, ZR060_IMR, 0); - - /* volume control settings */ - zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8); - zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff); - - zr36060_write(ptr, ZR060_AF_HI, 0xff); - zr36060_write(ptr, ZR060_AF_M, 0xff); - zr36060_write(ptr, ZR060_AF_LO, 0xff); - - /* setup the variable jpeg tables */ - sum += zr36060_set_sof(ptr); - sum += zr36060_set_sos(ptr); - sum += zr36060_set_dri(ptr); - - /* setup the fixed jpeg tables - maybe variable, though - - * (see table init section above) */ - sum += - zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt), - zr36060_dqt); - sum += - zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), - zr36060_dht); - zr36060_write(ptr, ZR060_APP_IDX, 0xff); - zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn); - zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00); - zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2); - sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60, - ptr->app.data) + 4; - zr36060_write(ptr, ZR060_COM_IDX, 0xff); - zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe); - zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00); - zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2); - sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60, - ptr->com.data) + 4; - - /* setup misc. data for compression (target code sizes) */ - - /* size of compressed code to reach without header data */ - sum = ptr->real_code_vol - sum; - bitcnt = sum << 3; /* need the size in bits */ - - tmp = bitcnt >> 16; - dprintk(3, - "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", - ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); - zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8); - zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff); - tmp = bitcnt & 0xffff; - zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8); - zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff); - - bitcnt -= bitcnt >> 7; // bits without stuffing - bitcnt -= ((bitcnt * 5) >> 6); // bits without eob - - tmp = bitcnt >> 16; - dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", - ptr->name, bitcnt, tmp); - zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8); - zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff); - tmp = bitcnt & 0xffff; - zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8); - zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff); - - /* JPEG markers to be included in the compressed stream */ - zr36060_write(ptr, ZR060_MER, - ZR060_MER_DQT | ZR060_MER_DHT | - ((ptr->com.len > 0) ? ZR060_MER_Com : 0) | - ((ptr->app.len > 0) ? ZR060_MER_App : 0)); - - /* Setup the Video Frontend */ - /* Limit pixel range to 16..235 as per CCIR-601 */ - zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); - - } else { - dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); - - zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); - - /* 060 communicates with 067 in master mode */ - zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); - - /* Decompression */ - zr36060_write(ptr, ZR060_CMR, 0); - - /* Must be zero */ - zr36060_write(ptr, ZR060_MBZ, 0x00); - zr36060_write(ptr, ZR060_TCR_HI, 0x00); - zr36060_write(ptr, ZR060_TCR_LO, 0x00); - - /* Disable all IRQs - no DataErr means autoreset */ - zr36060_write(ptr, ZR060_IMR, 0); - - /* setup misc. data for expansion */ - zr36060_write(ptr, ZR060_MER, 0); - - /* setup the fixed jpeg tables - maybe variable, though - - * (see table init section above) */ - zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), - zr36060_dht); - - /* Setup the Video Frontend */ - //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FIExt); - //this doesn't seem right and doesn't work... - zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); - } - - /* Load the tables */ - zr36060_write(ptr, ZR060_LOAD, - ZR060_LOAD_SyncRst | ZR060_LOAD_Load); - zr36060_wait_end(ptr); - dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, - ptr->status); - - if (ptr->status & ZR060_CFSR_Busy) { - dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); - return; // something is wrong, its timed out!!!! - } -} - -/* ========================================================================= - CODEC API FUNCTIONS - - this functions are accessed by the master via the API structure - ========================================================================= */ - -/* set compression/expansion mode and launches codec - - this should be the last call from the master before starting processing */ -static int -zr36060_set_mode (struct videocodec *codec, - int mode) -{ - struct zr36060 *ptr = (struct zr36060 *) codec->data; - - dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); - - if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) - return -EINVAL; - - ptr->mode = mode; - zr36060_init(ptr); - - return 0; -} - -/* set picture size (norm is ignored as the codec doesn't know about it) */ -static int -zr36060_set_video (struct videocodec *codec, - struct tvnorm *norm, - struct vfe_settings *cap, - struct vfe_polarity *pol) -{ - struct zr36060 *ptr = (struct zr36060 *) codec->data; - u32 reg; - int size; - - dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, - cap->x, cap->y, cap->width, cap->height, cap->decimation); - - /* if () return -EINVAL; - * trust the master driver that it knows what it does - so - * we allow invalid startx/y and norm for now ... */ - ptr->width = cap->width / (cap->decimation & 0xff); - ptr->height = cap->height / (cap->decimation >> 8); - - zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); - - /* Note that VSPol/HSPol bits in zr36060 have the opposite - * meaning of their zr360x7 counterparts with the same names - * N.b. for VSPol this is only true if FIVEdge = 0 (default, - * left unchanged here - in accordance with datasheet). - */ - reg = (!pol->vsync_pol ? ZR060_VPR_VSPol : 0) - | (!pol->hsync_pol ? ZR060_VPR_HSPol : 0) - | (pol->field_pol ? ZR060_VPR_FIPol : 0) - | (pol->blank_pol ? ZR060_VPR_BLPol : 0) - | (pol->subimg_pol ? ZR060_VPR_SImgPol : 0) - | (pol->poe_pol ? ZR060_VPR_PoePol : 0) - | (pol->pvalid_pol ? ZR060_VPR_PValPol : 0) - | (pol->vclk_pol ? ZR060_VPR_VCLKPol : 0); - zr36060_write(ptr, ZR060_VPR, reg); - - reg = 0; - switch (cap->decimation & 0xff) { - default: - case 1: - break; - - case 2: - reg |= ZR060_SR_HScale2; - break; - - case 4: - reg |= ZR060_SR_HScale4; - break; - } - - switch (cap->decimation >> 8) { - default: - case 1: - break; - - case 2: - reg |= ZR060_SR_VScale; - break; - } - zr36060_write(ptr, ZR060_SR, reg); - - zr36060_write(ptr, ZR060_BCR_Y, 0x00); - zr36060_write(ptr, ZR060_BCR_U, 0x80); - zr36060_write(ptr, ZR060_BCR_V, 0x80); - - /* sync generator */ - - reg = norm->Ht - 1; /* Vtotal */ - zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff); - - reg = norm->Wt - 1; /* Htotal */ - zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff); - - reg = 6 - 1; /* VsyncSize */ - zr36060_write(ptr, ZR060_SGR_VSYNC, reg); - - //reg = 30 - 1; /* HsyncSize */ -///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68); - reg = 68; - zr36060_write(ptr, ZR060_SGR_HSYNC, reg); - - reg = norm->VStart - 1; /* BVstart */ - zr36060_write(ptr, ZR060_SGR_BVSTART, reg); - - reg += norm->Ha / 2; /* BVend */ - zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff); - - reg = norm->HStart - 1; /* BHstart */ - zr36060_write(ptr, ZR060_SGR_BHSTART, reg); - - reg += norm->Wa; /* BHend */ - zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff); - - /* active area */ - reg = cap->y + norm->VStart; /* Vstart */ - zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff); - - reg += cap->height; /* Vend */ - zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff); - - reg = cap->x + norm->HStart; /* Hstart */ - zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff); - - reg += cap->width; /* Hend */ - zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff); - - /* subimage area */ - reg = norm->VStart - 4; /* SVstart */ - zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff); - - reg += norm->Ha / 2 + 8; /* SVend */ - zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff); - - reg = norm->HStart /*+ 64 */ - 4; /* SHstart */ - zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff); - - reg += norm->Wa + 8; /* SHend */ - zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff); - zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff); - - size = ptr->width * ptr->height; - /* Target compressed field size in bits: */ - size = size * 16; /* uncompressed size in bits */ - /* (Ronald) by default, quality = 100 is a compression - * ratio 1:2. Setting low_bitrate (insmod option) sets - * it to 1:4 (instead of 1:2, zr36060 max) as limit because the - * buz can't handle more at decimation=1... Use low_bitrate if - * you have a Buz, unless you know what you're doing */ - size = size * cap->quality / (low_bitrate ? 400 : 200); - /* Lower limit (arbitrary, 1 KB) */ - if (size < 8192) - size = 8192; - /* Upper limit: 7/8 of the code buffers */ - if (size > ptr->total_code_vol * 7) - size = ptr->total_code_vol * 7; - - ptr->real_code_vol = size >> 3; /* in bytes */ - - /* the MBCVR is the *maximum* block volume, according to the - * JPEG ISO specs, this shouldn't be used, since that allows - * for the best encoding quality. So set it to it's max value */ - reg = ptr->max_block_vol; - zr36060_write(ptr, ZR060_MBCVR, reg); - - return 0; -} - -/* additional control functions */ -static int -zr36060_control (struct videocodec *codec, - int type, - int size, - void *data) -{ - struct zr36060 *ptr = (struct zr36060 *) codec->data; - int *ival = (int *) data; - - dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, - size); - - switch (type) { - case CODEC_G_STATUS: /* get last status */ - if (size != sizeof(int)) - return -EFAULT; - zr36060_read_status(ptr); - *ival = ptr->status; - break; - - case CODEC_G_CODEC_MODE: - if (size != sizeof(int)) - return -EFAULT; - *ival = CODEC_MODE_BJPG; - break; - - case CODEC_S_CODEC_MODE: - if (size != sizeof(int)) - return -EFAULT; - if (*ival != CODEC_MODE_BJPG) - return -EINVAL; - /* not needed, do nothing */ - return 0; - - case CODEC_G_VFE: - case CODEC_S_VFE: - /* not needed, do nothing */ - return 0; - - case CODEC_S_MMAP: - /* not available, give an error */ - return -ENXIO; - - case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ - if (size != sizeof(int)) - return -EFAULT; - *ival = ptr->total_code_vol; - break; - - case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ - if (size != sizeof(int)) - return -EFAULT; - ptr->total_code_vol = *ival; - ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; - break; - - case CODEC_G_JPEG_SCALE: /* get scaling factor */ - if (size != sizeof(int)) - return -EFAULT; - *ival = zr36060_read_scalefactor(ptr); - break; - - case CODEC_S_JPEG_SCALE: /* set scaling factor */ - if (size != sizeof(int)) - return -EFAULT; - ptr->scalefact = *ival; - break; - - case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ - struct jpeg_app_marker *app = data; - - if (size != sizeof(struct jpeg_app_marker)) - return -EFAULT; - - *app = ptr->app; - break; - } - - case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ - struct jpeg_app_marker *app = data; - - if (size != sizeof(struct jpeg_app_marker)) - return -EFAULT; - - ptr->app = *app; - break; - } - - case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ - struct jpeg_com_marker *com = data; - - if (size != sizeof(struct jpeg_com_marker)) - return -EFAULT; - - *com = ptr->com; - break; - } - - case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ - struct jpeg_com_marker *com = data; - - if (size != sizeof(struct jpeg_com_marker)) - return -EFAULT; - - ptr->com = *com; - break; - } - - default: - return -EINVAL; - } - - return size; -} - -/* ========================================================================= - Exit and unregister function: - - Deinitializes Zoran's JPEG processor - ========================================================================= */ - -static int -zr36060_unset (struct videocodec *codec) -{ - struct zr36060 *ptr = codec->data; - - if (ptr) { - /* do wee need some codec deinit here, too ???? */ - - dprintk(1, "%s: finished codec #%d\n", ptr->name, - ptr->num); - kfree(ptr); - codec->data = NULL; - - zr36060_codecs--; - return 0; - } - - return -EFAULT; -} - -/* ========================================================================= - Setup and registry function: - - Initializes Zoran's JPEG processor - - Also sets pixel size, average code size, mode (compr./decompr.) - (the given size is determined by the processor with the video interface) - ========================================================================= */ - -static int -zr36060_setup (struct videocodec *codec) -{ - struct zr36060 *ptr; - int res; - - dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", - zr36060_codecs); - - if (zr36060_codecs == MAX_CODECS) { - dprintk(1, - KERN_ERR "zr36060: Can't attach more codecs!\n"); - return -ENOSPC; - } - //mem structure init - codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL); - if (NULL == ptr) { - dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n"); - return -ENOMEM; - } - - snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]", - zr36060_codecs); - ptr->num = zr36060_codecs++; - ptr->codec = codec; - - //testing - res = zr36060_basic_test(ptr); - if (res < 0) { - zr36060_unset(codec); - return res; - } - //final setup - memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8); - memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8); - - ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag - * (what is the difference?) */ - ptr->mode = CODEC_DO_COMPRESSION; - ptr->width = 384; - ptr->height = 288; - ptr->total_code_vol = 16000; /* CHECKME */ - ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; - ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */ - ptr->scalefact = 0x100; - ptr->dri = 1; /* CHECKME, was 8 is 1 */ - - /* by default, no COM or APP markers - app should set those */ - ptr->com.len = 0; - ptr->app.appn = 0; - ptr->app.len = 0; - - zr36060_init(ptr); - - dprintk(1, KERN_INFO "%s: codec attached and running\n", - ptr->name); - - return 0; -} - -static const struct videocodec zr36060_codec = { - .owner = THIS_MODULE, - .name = "zr36060", - .magic = 0L, // magic not used - .flags = - CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | - CODEC_FLAG_DECODER | CODEC_FLAG_VFE, - .type = CODEC_TYPE_ZR36060, - .setup = zr36060_setup, // functionality - .unset = zr36060_unset, - .set_mode = zr36060_set_mode, - .set_video = zr36060_set_video, - .control = zr36060_control, - // others are not used -}; - -/* ========================================================================= - HOOK IN DRIVER AS KERNEL MODULE - ========================================================================= */ - -static int __init -zr36060_init_module (void) -{ - //dprintk(1, "zr36060 driver %s\n",ZR060_VERSION); - zr36060_codecs = 0; - return videocodec_register(&zr36060_codec); -} - -static void __exit -zr36060_cleanup_module (void) -{ - if (zr36060_codecs) { - dprintk(1, - "zr36060: something's wrong - %d codecs left somehow.\n", - zr36060_codecs); - } - - /* however, we can't just stay alive */ - videocodec_unregister(&zr36060_codec); -} - -module_init(zr36060_init_module); -module_exit(zr36060_cleanup_module); - -MODULE_AUTHOR("Laurent Pinchart "); -MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " - ZR060_VERSION); -MODULE_LICENSE("GPL"); diff --git a/drivers/media/pci/zoran/zr36060.h b/drivers/media/pci/zoran/zr36060.h deleted file mode 100644 index 82911757ba78..000000000000 --- a/drivers/media/pci/zoran/zr36060.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Zoran ZR36060 basic configuration functions - header file - * - * Copyright (C) 2002 Laurent Pinchart - * - * $Id: zr36060.h,v 1.1.1.1.2.3 2003/01/14 21:18:47 rbultje Exp $ - * - * ------------------------------------------------------------------------ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * ------------------------------------------------------------------------ - */ - -#ifndef ZR36060_H -#define ZR36060_H - -#include "videocodec.h" - -/* data stored for each zoran jpeg codec chip */ -struct zr36060 { - char name[32]; - int num; - /* io datastructure */ - struct videocodec *codec; - // last coder status - __u8 status; - // actual coder setup - int mode; - - __u16 width; - __u16 height; - - __u16 bitrate_ctrl; - - __u32 total_code_vol; - __u32 real_code_vol; - __u16 max_block_vol; - - __u8 h_samp_ratio[8]; - __u8 v_samp_ratio[8]; - __u16 scalefact; - __u16 dri; - - /* app/com marker data */ - struct jpeg_app_marker app; - struct jpeg_com_marker com; -}; - -/* ZR36060 register addresses */ -#define ZR060_LOAD 0x000 -#define ZR060_CFSR 0x001 -#define ZR060_CIR 0x002 -#define ZR060_CMR 0x003 -#define ZR060_MBZ 0x004 -#define ZR060_MBCVR 0x005 -#define ZR060_MER 0x006 -#define ZR060_IMR 0x007 -#define ZR060_ISR 0x008 -#define ZR060_TCV_NET_HI 0x009 -#define ZR060_TCV_NET_MH 0x00a -#define ZR060_TCV_NET_ML 0x00b -#define ZR060_TCV_NET_LO 0x00c -#define ZR060_TCV_DATA_HI 0x00d -#define ZR060_TCV_DATA_MH 0x00e -#define ZR060_TCV_DATA_ML 0x00f -#define ZR060_TCV_DATA_LO 0x010 -#define ZR060_SF_HI 0x011 -#define ZR060_SF_LO 0x012 -#define ZR060_AF_HI 0x013 -#define ZR060_AF_M 0x014 -#define ZR060_AF_LO 0x015 -#define ZR060_ACV_HI 0x016 -#define ZR060_ACV_MH 0x017 -#define ZR060_ACV_ML 0x018 -#define ZR060_ACV_LO 0x019 -#define ZR060_ACT_HI 0x01a -#define ZR060_ACT_MH 0x01b -#define ZR060_ACT_ML 0x01c -#define ZR060_ACT_LO 0x01d -#define ZR060_ACV_TRUN_HI 0x01e -#define ZR060_ACV_TRUN_MH 0x01f -#define ZR060_ACV_TRUN_ML 0x020 -#define ZR060_ACV_TRUN_LO 0x021 -#define ZR060_IDR_DEV 0x022 -#define ZR060_IDR_REV 0x023 -#define ZR060_TCR_HI 0x024 -#define ZR060_TCR_LO 0x025 -#define ZR060_VCR 0x030 -#define ZR060_VPR 0x031 -#define ZR060_SR 0x032 -#define ZR060_BCR_Y 0x033 -#define ZR060_BCR_U 0x034 -#define ZR060_BCR_V 0x035 -#define ZR060_SGR_VTOTAL_HI 0x036 -#define ZR060_SGR_VTOTAL_LO 0x037 -#define ZR060_SGR_HTOTAL_HI 0x038 -#define ZR060_SGR_HTOTAL_LO 0x039 -#define ZR060_SGR_VSYNC 0x03a -#define ZR060_SGR_HSYNC 0x03b -#define ZR060_SGR_BVSTART 0x03c -#define ZR060_SGR_BHSTART 0x03d -#define ZR060_SGR_BVEND_HI 0x03e -#define ZR060_SGR_BVEND_LO 0x03f -#define ZR060_SGR_BHEND_HI 0x040 -#define ZR060_SGR_BHEND_LO 0x041 -#define ZR060_AAR_VSTART_HI 0x042 -#define ZR060_AAR_VSTART_LO 0x043 -#define ZR060_AAR_VEND_HI 0x044 -#define ZR060_AAR_VEND_LO 0x045 -#define ZR060_AAR_HSTART_HI 0x046 -#define ZR060_AAR_HSTART_LO 0x047 -#define ZR060_AAR_HEND_HI 0x048 -#define ZR060_AAR_HEND_LO 0x049 -#define ZR060_SWR_VSTART_HI 0x04a -#define ZR060_SWR_VSTART_LO 0x04b -#define ZR060_SWR_VEND_HI 0x04c -#define ZR060_SWR_VEND_LO 0x04d -#define ZR060_SWR_HSTART_HI 0x04e -#define ZR060_SWR_HSTART_LO 0x04f -#define ZR060_SWR_HEND_HI 0x050 -#define ZR060_SWR_HEND_LO 0x051 - -#define ZR060_SOF_IDX 0x060 -#define ZR060_SOS_IDX 0x07a -#define ZR060_DRI_IDX 0x0c0 -#define ZR060_DQT_IDX 0x0cc -#define ZR060_DHT_IDX 0x1d4 -#define ZR060_APP_IDX 0x380 -#define ZR060_COM_IDX 0x3c0 - -/* ZR36060 LOAD register bits */ - -#define ZR060_LOAD_Load (1 << 7) -#define ZR060_LOAD_SyncRst (1 << 0) - -/* ZR36060 Code FIFO Status register bits */ - -#define ZR060_CFSR_Busy (1 << 7) -#define ZR060_CFSR_CBusy (1 << 2) -#define ZR060_CFSR_CFIFO (3 << 0) - -/* ZR36060 Code Interface register */ - -#define ZR060_CIR_Code16 (1 << 7) -#define ZR060_CIR_Endian (1 << 6) -#define ZR060_CIR_CFIS (1 << 2) -#define ZR060_CIR_CodeMstr (1 << 0) - -/* ZR36060 Codec Mode register */ - -#define ZR060_CMR_Comp (1 << 7) -#define ZR060_CMR_ATP (1 << 6) -#define ZR060_CMR_Pass2 (1 << 5) -#define ZR060_CMR_TLM (1 << 4) -#define ZR060_CMR_BRB (1 << 2) -#define ZR060_CMR_FSF (1 << 1) - -/* ZR36060 Markers Enable register */ - -#define ZR060_MER_App (1 << 7) -#define ZR060_MER_Com (1 << 6) -#define ZR060_MER_DRI (1 << 5) -#define ZR060_MER_DQT (1 << 4) -#define ZR060_MER_DHT (1 << 3) - -/* ZR36060 Interrupt Mask register */ - -#define ZR060_IMR_EOAV (1 << 3) -#define ZR060_IMR_EOI (1 << 2) -#define ZR060_IMR_End (1 << 1) -#define ZR060_IMR_DataErr (1 << 0) - -/* ZR36060 Interrupt Status register */ - -#define ZR060_ISR_ProCnt (3 << 6) -#define ZR060_ISR_EOAV (1 << 3) -#define ZR060_ISR_EOI (1 << 2) -#define ZR060_ISR_End (1 << 1) -#define ZR060_ISR_DataErr (1 << 0) - -/* ZR36060 Video Control register */ - -#define ZR060_VCR_Video8 (1 << 7) -#define ZR060_VCR_Range (1 << 6) -#define ZR060_VCR_FIDet (1 << 3) -#define ZR060_VCR_FIVedge (1 << 2) -#define ZR060_VCR_FIExt (1 << 1) -#define ZR060_VCR_SyncMstr (1 << 0) - -/* ZR36060 Video Polarity register */ - -#define ZR060_VPR_VCLKPol (1 << 7) -#define ZR060_VPR_PValPol (1 << 6) -#define ZR060_VPR_PoePol (1 << 5) -#define ZR060_VPR_SImgPol (1 << 4) -#define ZR060_VPR_BLPol (1 << 3) -#define ZR060_VPR_FIPol (1 << 2) -#define ZR060_VPR_HSPol (1 << 1) -#define ZR060_VPR_VSPol (1 << 0) - -/* ZR36060 Scaling register */ - -#define ZR060_SR_VScale (1 << 2) -#define ZR060_SR_HScale2 (1 << 0) -#define ZR060_SR_HScale4 (2 << 0) - -#endif /*fndef ZR36060_H */ diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 494f369695d7..db5cf67047ad 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -33,4 +33,6 @@ source "drivers/staging/media/omap4iss/Kconfig" source "drivers/staging/media/tegra-vde/Kconfig" +source "drivers/staging/media/zoran/Kconfig" + endif diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 59c184c7cfa8..503fbe47fa58 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031/ obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/ +obj-$(CONFIG_VIDEO_ZORAN) += zoran/ diff --git a/drivers/staging/media/zoran/Kconfig b/drivers/staging/media/zoran/Kconfig new file mode 100644 index 000000000000..63df5de5068d --- /dev/null +++ b/drivers/staging/media/zoran/Kconfig @@ -0,0 +1,75 @@ +config VIDEO_ZORAN + tristate "Zoran ZR36057/36067 Video For Linux (Deprecated)" + depends on PCI && I2C_ALGOBIT && VIDEO_V4L2 && VIRT_TO_BUS + depends on !ALPHA + help + Say Y for support for MJPEG capture cards based on the Zoran + 36057/36067 PCI controller chipset. This includes the Iomega + Buz, Pinnacle DC10+ and the Linux Media Labs LML33. There is + a driver homepage at . For + more information, check . + + To compile this driver as a module, choose M here: the + module will be called zr36067. + +config VIDEO_ZORAN_DC30 + tristate "Pinnacle/Miro DC30(+) support" + depends on VIDEO_ZORAN + select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_VPX3220 if MEDIA_SUBDRV_AUTOSELECT + help + Support for the Pinnacle/Miro DC30(+) MJPEG capture/playback + card. This also supports really old DC10 cards based on the + zr36050 MJPEG codec and zr36016 VFE. + +config VIDEO_ZORAN_ZR36060 + tristate "Zoran ZR36060" + depends on VIDEO_ZORAN + help + Say Y to support Zoran boards based on 36060 chips. + This includes Iomega Buz, Pinnacle DC10, Linux media Labs 33 + and 33 R10 and AverMedia 6 boards. + +config VIDEO_ZORAN_BUZ + tristate "Iomega Buz support" + depends on VIDEO_ZORAN_ZR36060 + select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_SAA7185 if MEDIA_SUBDRV_AUTOSELECT + help + Support for the Iomega Buz MJPEG capture/playback card. + +config VIDEO_ZORAN_DC10 + tristate "Pinnacle/Miro DC10(+) support" + depends on VIDEO_ZORAN_ZR36060 + select VIDEO_SAA7110 if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT + help + Support for the Pinnacle/Miro DC10(+) MJPEG capture/playback + card. + +config VIDEO_ZORAN_LML33 + tristate "Linux Media Labs LML33 support" + depends on VIDEO_ZORAN_ZR36060 + select VIDEO_BT819 if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT + help + Support for the Linux Media Labs LML33 MJPEG capture/playback + card. + +config VIDEO_ZORAN_LML33R10 + tristate "Linux Media Labs LML33R10 support" + depends on VIDEO_ZORAN_ZR36060 + select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_ADV7170 if MEDIA_SUBDRV_AUTOSELECT + help + support for the Linux Media Labs LML33R10 MJPEG capture/playback + card. + +config VIDEO_ZORAN_AVS6EYES + tristate "AverMedia 6 Eyes support" + depends on VIDEO_ZORAN_ZR36060 + select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_BT866 if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_KS0127 if MEDIA_SUBDRV_AUTOSELECT + help + Support for the AverMedia 6 Eyes video surveillance card. diff --git a/drivers/staging/media/zoran/Makefile b/drivers/staging/media/zoran/Makefile new file mode 100644 index 000000000000..21ac29a71458 --- /dev/null +++ b/drivers/staging/media/zoran/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +zr36067-objs := zoran_procfs.o zoran_device.o \ + zoran_driver.o zoran_card.o + +obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o +obj-$(CONFIG_VIDEO_ZORAN_DC30) += zr36050.o zr36016.o +obj-$(CONFIG_VIDEO_ZORAN_ZR36060) += zr36060.o diff --git a/drivers/staging/media/zoran/TODO b/drivers/staging/media/zoran/TODO new file mode 100644 index 000000000000..54464095d0d7 --- /dev/null +++ b/drivers/staging/media/zoran/TODO @@ -0,0 +1,4 @@ +The zoran driver is marked deprecated. It will be removed +around May 2019 unless someone is willing to update this +driver to the latest V4L2 frameworks (especially the vb2 +framework). diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/staging/media/zoran/videocodec.c new file mode 100644 index 000000000000..5ff23ef89215 --- /dev/null +++ b/drivers/staging/media/zoran/videocodec.c @@ -0,0 +1,403 @@ +/* + * VIDEO MOTION CODECs internal API for video devices + * + * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's + * bound to a master device. + * + * (c) 2002 Wolfgang Scherr + * + * $Id: videocodec.c,v 1.1.2.8 2003/03/29 07:16:04 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#define VIDEOCODEC_VERSION "v0.2" + +#include +#include +#include +#include +#include + +// kernel config is here (procfs flag) + +#ifdef CONFIG_PROC_FS +#include +#include +#include +#endif + +#include "videocodec.h" + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0-4)"); + +#define dprintk(num, format, args...) \ + do { \ + if (debug >= num) \ + printk(format, ##args); \ + } while (0) + +struct attached_list { + struct videocodec *codec; + struct attached_list *next; +}; + +struct codec_list { + const struct videocodec *codec; + int attached; + struct attached_list *list; + struct codec_list *next; +}; + +static struct codec_list *codeclist_top = NULL; + +/* ================================================= */ +/* function prototypes of the master/slave interface */ +/* ================================================= */ + +struct videocodec * +videocodec_attach (struct videocodec_master *master) +{ + struct codec_list *h = codeclist_top; + struct attached_list *a, *ptr; + struct videocodec *codec; + int res; + + if (!master) { + dprintk(1, KERN_ERR "videocodec_attach: no data\n"); + return NULL; + } + + dprintk(2, + "videocodec_attach: '%s', flags %lx, magic %lx\n", + master->name, master->flags, master->magic); + + if (!h) { + dprintk(1, + KERN_ERR + "videocodec_attach: no device available\n"); + return NULL; + } + + while (h) { + // attach only if the slave has at least the flags + // expected by the master + if ((master->flags & h->codec->flags) == master->flags) { + dprintk(4, "videocodec_attach: try '%s'\n", + h->codec->name); + + if (!try_module_get(h->codec->owner)) + return NULL; + + codec = kmemdup(h->codec, sizeof(struct videocodec), + GFP_KERNEL); + if (!codec) { + dprintk(1, + KERN_ERR + "videocodec_attach: no mem\n"); + goto out_module_put; + } + + res = strlen(codec->name); + snprintf(codec->name + res, sizeof(codec->name) - res, + "[%d]", h->attached); + codec->master_data = master; + res = codec->setup(codec); + if (res == 0) { + dprintk(3, "videocodec_attach '%s'\n", + codec->name); + ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL); + if (!ptr) { + dprintk(1, + KERN_ERR + "videocodec_attach: no memory\n"); + goto out_kfree; + } + ptr->codec = codec; + + a = h->list; + if (!a) { + h->list = ptr; + dprintk(4, + "videocodec: first element\n"); + } else { + while (a->next) + a = a->next; // find end + a->next = ptr; + dprintk(4, + "videocodec: in after '%s'\n", + h->codec->name); + } + + h->attached += 1; + return codec; + } else { + kfree(codec); + } + } + h = h->next; + } + + dprintk(1, KERN_ERR "videocodec_attach: no codec found!\n"); + return NULL; + + out_module_put: + module_put(h->codec->owner); + out_kfree: + kfree(codec); + return NULL; +} + +int +videocodec_detach (struct videocodec *codec) +{ + struct codec_list *h = codeclist_top; + struct attached_list *a, *prev; + int res; + + if (!codec) { + dprintk(1, KERN_ERR "videocodec_detach: no data\n"); + return -EINVAL; + } + + dprintk(2, + "videocodec_detach: '%s', type: %x, flags %lx, magic %lx\n", + codec->name, codec->type, codec->flags, codec->magic); + + if (!h) { + dprintk(1, + KERN_ERR "videocodec_detach: no device left...\n"); + return -ENXIO; + } + + while (h) { + a = h->list; + prev = NULL; + while (a) { + if (codec == a->codec) { + res = a->codec->unset(a->codec); + if (res >= 0) { + dprintk(3, + "videocodec_detach: '%s'\n", + a->codec->name); + a->codec->master_data = NULL; + } else { + dprintk(1, + KERN_ERR + "videocodec_detach: '%s'\n", + a->codec->name); + a->codec->master_data = NULL; + } + if (prev == NULL) { + h->list = a->next; + dprintk(4, + "videocodec: delete first\n"); + } else { + prev->next = a->next; + dprintk(4, + "videocodec: delete middle\n"); + } + module_put(a->codec->owner); + kfree(a->codec); + kfree(a); + h->attached -= 1; + return 0; + } + prev = a; + a = a->next; + } + h = h->next; + } + + dprintk(1, KERN_ERR "videocodec_detach: given codec not found!\n"); + return -EINVAL; +} + +int +videocodec_register (const struct videocodec *codec) +{ + struct codec_list *ptr, *h = codeclist_top; + + if (!codec) { + dprintk(1, KERN_ERR "videocodec_register: no data!\n"); + return -EINVAL; + } + + dprintk(2, + "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", + codec->name, codec->type, codec->flags, codec->magic); + + ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL); + if (!ptr) { + dprintk(1, KERN_ERR "videocodec_register: no memory\n"); + return -ENOMEM; + } + ptr->codec = codec; + + if (!h) { + codeclist_top = ptr; + dprintk(4, "videocodec: hooked in as first element\n"); + } else { + while (h->next) + h = h->next; // find the end + h->next = ptr; + dprintk(4, "videocodec: hooked in after '%s'\n", + h->codec->name); + } + + return 0; +} + +int +videocodec_unregister (const struct videocodec *codec) +{ + struct codec_list *prev = NULL, *h = codeclist_top; + + if (!codec) { + dprintk(1, KERN_ERR "videocodec_unregister: no data!\n"); + return -EINVAL; + } + + dprintk(2, + "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", + codec->name, codec->type, codec->flags, codec->magic); + + if (!h) { + dprintk(1, + KERN_ERR + "videocodec_unregister: no device left...\n"); + return -ENXIO; + } + + while (h) { + if (codec == h->codec) { + if (h->attached) { + dprintk(1, + KERN_ERR + "videocodec: '%s' is used\n", + h->codec->name); + return -EBUSY; + } + dprintk(3, "videocodec: unregister '%s' is ok.\n", + h->codec->name); + if (prev == NULL) { + codeclist_top = h->next; + dprintk(4, + "videocodec: delete first element\n"); + } else { + prev->next = h->next; + dprintk(4, + "videocodec: delete middle element\n"); + } + kfree(h); + return 0; + } + prev = h; + h = h->next; + } + + dprintk(1, + KERN_ERR + "videocodec_unregister: given codec not found!\n"); + return -EINVAL; +} + +#ifdef CONFIG_PROC_FS +static int proc_videocodecs_show(struct seq_file *m, void *v) +{ + struct codec_list *h = codeclist_top; + struct attached_list *a; + + seq_printf(m, "lave or attached aster name type flags magic "); + seq_printf(m, "(connected as)\n"); + + while (h) { + seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n", + h->codec->name, h->codec->type, + h->codec->flags, h->codec->magic); + a = h->list; + while (a) { + seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n", + a->codec->master_data->name, + a->codec->master_data->type, + a->codec->master_data->flags, + a->codec->master_data->magic, + a->codec->name); + a = a->next; + } + h = h->next; + } + + return 0; +} + +static int proc_videocodecs_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_videocodecs_show, NULL); +} + +static const struct file_operations videocodecs_proc_fops = { + .owner = THIS_MODULE, + .open = proc_videocodecs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +/* ===================== */ +/* hook in driver module */ +/* ===================== */ +static int __init +videocodec_init (void) +{ +#ifdef CONFIG_PROC_FS + static struct proc_dir_entry *videocodec_proc_entry; +#endif + + printk(KERN_INFO "Linux video codec intermediate layer: %s\n", + VIDEOCODEC_VERSION); + +#ifdef CONFIG_PROC_FS + videocodec_proc_entry = proc_create("videocodecs", 0, NULL, &videocodecs_proc_fops); + if (!videocodec_proc_entry) { + dprintk(1, KERN_ERR "videocodec: can't init procfs.\n"); + } +#endif + return 0; +} + +static void __exit +videocodec_exit (void) +{ +#ifdef CONFIG_PROC_FS + remove_proc_entry("videocodecs", NULL); +#endif +} + +EXPORT_SYMBOL(videocodec_attach); +EXPORT_SYMBOL(videocodec_detach); +EXPORT_SYMBOL(videocodec_register); +EXPORT_SYMBOL(videocodec_unregister); + +module_init(videocodec_init); +module_exit(videocodec_exit); + +MODULE_AUTHOR("Wolfgang Scherr "); +MODULE_DESCRIPTION("Intermediate API module for video codecs " + VIDEOCODEC_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/staging/media/zoran/videocodec.h new file mode 100644 index 000000000000..8ed5a0f7ac01 --- /dev/null +++ b/drivers/staging/media/zoran/videocodec.h @@ -0,0 +1,349 @@ +/* + * VIDEO MOTION CODECs internal API for video devices + * + * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's + * bound to a master device. + * + * (c) 2002 Wolfgang Scherr + * + * $Id: videocodec.h,v 1.1.2.4 2003/01/14 21:15:03 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +/* =================== */ +/* general description */ +/* =================== */ + +/* Should ease the (re-)usage of drivers supporting cards with (different) + video codecs. The codecs register to this module their functionality, + and the processors (masters) can attach to them if they fit. + + The codecs are typically have a "strong" binding to their master - so I + don't think it makes sense to have a full blown interfacing as with e.g. + i2c. If you have an other opinion, let's discuss & implement it :-))) + + Usage: + + The slave has just to setup the videocodec structure and use two functions: + videocodec_register(codecdata); + videocodec_unregister(codecdata); + The best is just calling them at module (de-)initialisation. + + The master sets up the structure videocodec_master and calls: + codecdata=videocodec_attach(master_codecdata); + videocodec_detach(codecdata); + + The slave is called during attach/detach via functions setup previously + during register. At that time, the master_data pointer is set up + and the slave can access any io registers of the master device (in the case + the slave is bound to it). Otherwise it doesn't need this functions and + therfor they may not be initialized. + + The other functions are just for convenience, as they are for sure used by + most/all of the codecs. The last ones may be omitted, too. + + See the structure declaration below for more information and which data has + to be set up for the master and the slave. + + ---------------------------------------------------------------------------- + The master should have "knowledge" of the slave and vice versa. So the data + structures sent to/from slave via set_data/get_data set_image/get_image are + device dependent and vary between MJPEG/MPEG/WAVELET/... devices. (!!!!) + ---------------------------------------------------------------------------- +*/ + + +/* ========================================== */ +/* description of the videocodec_io structure */ +/* ========================================== */ + +/* + ==== master setup ==== + name -> name of the device structure for reference and debugging + master_data -> data ref. for the master (e.g. the zr36055,57,67) + readreg -> ref. to read-fn from register (setup by master, used by slave) + writereg -> ref. to write-fn to register (setup by master, used by slave) + this two functions do the lowlevel I/O job + + ==== slave functionality setup ==== + slave_data -> data ref. for the slave (e.g. the zr36050,60) + check -> fn-ref. checks availability of an device, returns -EIO on failure or + the type on success + this makes espcecially sense if a driver module supports more than + one codec which may be quite similar to access, nevertheless it + is good for a first functionality check + + -- main functions you always need for compression/decompression -- + + set_mode -> this fn-ref. resets the entire codec, and sets up the mode + with the last defined norm/size (or device default if not + available) - it returns 0 if the mode is possible + set_size -> this fn-ref. sets the norm and image size for + compression/decompression (returns 0 on success) + the norm param is defined in videodev2.h (V4L2_STD_*) + + additional setup may be available, too - but the codec should work with + some default values even without this + + set_data -> sets device-specific data (tables, quality etc.) + get_data -> query device-specific data (tables, quality etc.) + + if the device delivers interrupts, they may be setup/handled here + setup_interrupt -> codec irq setup (not needed for 36050/60) + handle_interrupt -> codec irq handling (not needed for 36050/60) + + if the device delivers pictures, they may be handled here + put_image -> puts image data to the codec (not needed for 36050/60) + get_image -> gets image data from the codec (not needed for 36050/60) + the calls include frame numbers and flags (even/odd/...) + if needed and a flag which allows blocking until its ready +*/ + +/* ============== */ +/* user interface */ +/* ============== */ + +/* + Currently there is only a information display planned, as the layer + is not visible for the user space at all. + + Information is available via procfs. The current entry is "/proc/videocodecs" + but it makes sense to "hide" it in the /proc/video tree of v4l(2) --TODO--. + +A example for such an output is: + +lave or attached aster name type flags magic (connected as) +S zr36050 0002 0000d001 00000000 (TEMPLATE) +M zr36055[0] 0001 0000c001 00000000 (zr36050[0]) +M zr36055[1] 0001 0000c001 00000000 (zr36050[1]) + +*/ + + +/* =============================================== */ +/* special defines for the videocodec_io structure */ +/* =============================================== */ + +#ifndef __LINUX_VIDEOCODEC_H +#define __LINUX_VIDEOCODEC_H + +#include + +#define CODEC_DO_COMPRESSION 0 +#define CODEC_DO_EXPANSION 1 + +/* this are the current codec flags I think they are needed */ +/* -> type value in structure */ +#define CODEC_FLAG_JPEG 0x00000001L // JPEG codec +#define CODEC_FLAG_MPEG 0x00000002L // MPEG1/2/4 codec +#define CODEC_FLAG_DIVX 0x00000004L // DIVX codec +#define CODEC_FLAG_WAVELET 0x00000008L // WAVELET codec + // room for other types + +#define CODEC_FLAG_MAGIC 0x00000800L // magic key must match +#define CODEC_FLAG_HARDWARE 0x00001000L // is a hardware codec +#define CODEC_FLAG_VFE 0x00002000L // has direct video frontend +#define CODEC_FLAG_ENCODER 0x00004000L // compression capability +#define CODEC_FLAG_DECODER 0x00008000L // decompression capability +#define CODEC_FLAG_NEEDIRQ 0x00010000L // needs irq handling +#define CODEC_FLAG_RDWRPIC 0x00020000L // handles picture I/O + +/* a list of modes, some are just examples (is there any HW?) */ +#define CODEC_MODE_BJPG 0x0001 // Baseline JPEG +#define CODEC_MODE_LJPG 0x0002 // Lossless JPEG +#define CODEC_MODE_MPEG1 0x0003 // MPEG 1 +#define CODEC_MODE_MPEG2 0x0004 // MPEG 2 +#define CODEC_MODE_MPEG4 0x0005 // MPEG 4 +#define CODEC_MODE_MSDIVX 0x0006 // MS DivX +#define CODEC_MODE_ODIVX 0x0007 // Open DivX +#define CODEC_MODE_WAVELET 0x0008 // Wavelet + +/* this are the current codec types I want to implement */ +/* -> type value in structure */ +#define CODEC_TYPE_NONE 0 +#define CODEC_TYPE_L64702 1 +#define CODEC_TYPE_ZR36050 2 +#define CODEC_TYPE_ZR36016 3 +#define CODEC_TYPE_ZR36060 4 + +/* the type of data may be enhanced by future implementations (data-fn.'s) */ +/* -> used in command */ +#define CODEC_G_STATUS 0x0000 /* codec status (query only) */ +#define CODEC_S_CODEC_MODE 0x0001 /* codec mode (baseline JPEG, MPEG1,... */ +#define CODEC_G_CODEC_MODE 0x8001 +#define CODEC_S_VFE 0x0002 /* additional video frontend setup */ +#define CODEC_G_VFE 0x8002 +#define CODEC_S_MMAP 0x0003 /* MMAP setup (if available) */ + +#define CODEC_S_JPEG_TDS_BYTE 0x0010 /* target data size in bytes */ +#define CODEC_G_JPEG_TDS_BYTE 0x8010 +#define CODEC_S_JPEG_SCALE 0x0011 /* scaling factor for quant. tables */ +#define CODEC_G_JPEG_SCALE 0x8011 +#define CODEC_S_JPEG_HDT_DATA 0x0018 /* huffman-tables */ +#define CODEC_G_JPEG_HDT_DATA 0x8018 +#define CODEC_S_JPEG_QDT_DATA 0x0019 /* quantizing-tables */ +#define CODEC_G_JPEG_QDT_DATA 0x8019 +#define CODEC_S_JPEG_APP_DATA 0x001A /* APP marker */ +#define CODEC_G_JPEG_APP_DATA 0x801A +#define CODEC_S_JPEG_COM_DATA 0x001B /* COM marker */ +#define CODEC_G_JPEG_COM_DATA 0x801B + +#define CODEC_S_PRIVATE 0x1000 /* "private" commands start here */ +#define CODEC_G_PRIVATE 0x9000 + +#define CODEC_G_FLAG 0x8000 /* this is how 'get' is detected */ + +/* types of transfer, directly user space or a kernel buffer (image-fn.'s) */ +/* -> used in get_image, put_image */ +#define CODEC_TRANSFER_KERNEL 0 /* use "memcopy" */ +#define CODEC_TRANSFER_USER 1 /* use "to/from_user" */ + + +/* ========================= */ +/* the structures itself ... */ +/* ========================= */ + +struct vfe_polarity { + unsigned int vsync_pol:1; + unsigned int hsync_pol:1; + unsigned int field_pol:1; + unsigned int blank_pol:1; + unsigned int subimg_pol:1; + unsigned int poe_pol:1; + unsigned int pvalid_pol:1; + unsigned int vclk_pol:1; +}; + +struct vfe_settings { + __u32 x, y; /* Offsets into image */ + __u32 width, height; /* Area to capture */ + __u16 decimation; /* Decimation divider */ + __u16 flags; /* Flags for capture */ + __u16 quality; /* quality of the video */ +}; + +struct tvnorm { + u16 Wt, Wa, HStart, HSyncStart, Ht, Ha, VStart; +}; + +struct jpeg_com_marker { + int len; /* number of usable bytes in data */ + char data[60]; +}; + +struct jpeg_app_marker { + int appn; /* number app segment */ + int len; /* number of usable bytes in data */ + char data[60]; +}; + +struct videocodec { + struct module *owner; + /* -- filled in by slave device during register -- */ + char name[32]; + unsigned long magic; /* may be used for client<->master attaching */ + unsigned long flags; /* functionality flags */ + unsigned int type; /* codec type */ + + /* -- these is filled in later during master device attach -- */ + + struct videocodec_master *master_data; + + /* -- these are filled in by the slave device during register -- */ + + void *data; /* private slave data */ + + /* attach/detach client functions (indirect call) */ + int (*setup) (struct videocodec * codec); + int (*unset) (struct videocodec * codec); + + /* main functions, every client needs them for sure! */ + // set compression or decompression (or freeze, stop, standby, etc) + int (*set_mode) (struct videocodec * codec, + int mode); + // setup picture size and norm (for the codec's video frontend) + int (*set_video) (struct videocodec * codec, + struct tvnorm * norm, + struct vfe_settings * cap, + struct vfe_polarity * pol); + // other control commands, also mmap setup etc. + int (*control) (struct videocodec * codec, + int type, + int size, + void *data); + + /* additional setup/query/processing (may be NULL pointer) */ + // interrupt setup / handling (for irq's delivered by master) + int (*setup_interrupt) (struct videocodec * codec, + long mode); + int (*handle_interrupt) (struct videocodec * codec, + int source, + long flag); + // picture interface (if any) + long (*put_image) (struct videocodec * codec, + int tr_type, + int block, + long *fr_num, + long *flag, + long size, + void *buf); + long (*get_image) (struct videocodec * codec, + int tr_type, + int block, + long *fr_num, + long *flag, + long size, + void *buf); +}; + +struct videocodec_master { + /* -- filled in by master device for registration -- */ + char name[32]; + unsigned long magic; /* may be used for client<->master attaching */ + unsigned long flags; /* functionality flags */ + unsigned int type; /* master type */ + + void *data; /* private master data */ + + __u32(*readreg) (struct videocodec * codec, + __u16 reg); + void (*writereg) (struct videocodec * codec, + __u16 reg, + __u32 value); +}; + + +/* ================================================= */ +/* function prototypes of the master/slave interface */ +/* ================================================= */ + +/* attach and detach commands for the master */ +// * master structure needs to be kmalloc'ed before calling attach +// and free'd after calling detach +// * returns pointer on success, NULL on failure +extern struct videocodec *videocodec_attach(struct videocodec_master *); +// * 0 on success, <0 (errno) on failure +extern int videocodec_detach(struct videocodec *); + +/* register and unregister commands for the slaves */ +// * 0 on success, <0 (errno) on failure +extern int videocodec_register(const struct videocodec *); +// * 0 on success, <0 (errno) on failure +extern int videocodec_unregister(const struct videocodec *); + +/* the other calls are directly done via the videocodec structure! */ + +#endif /*ifndef __LINUX_VIDEOCODEC_H */ diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h new file mode 100644 index 000000000000..9bb3c21aa275 --- /dev/null +++ b/drivers/staging/media/zoran/zoran.h @@ -0,0 +1,402 @@ +/* + * zoran - Iomega Buz driver + * + * Copyright (C) 1999 Rainer Johanni + * + * based on + * + * zoran.0.0.3 Copyright (C) 1998 Dave Perks + * + * and + * + * bttv - Bt848 frame grabber driver + * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) + * & Marcus Metzler (mocm@thp.uni-koeln.de) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _BUZ_H_ +#define _BUZ_H_ + +#include +#include +#include + +struct zoran_sync { + unsigned long frame; /* number of buffer that has been free'd */ + unsigned long length; /* number of code bytes in buffer (capture only) */ + unsigned long seq; /* frame sequence number */ + struct timeval timestamp; /* timestamp */ +}; + + +#define ZORAN_NAME "ZORAN" /* name of the device */ + +#define ZR_DEVNAME(zr) ((zr)->name) + +#define BUZ_MAX_WIDTH (zr->timing->Wa) +#define BUZ_MAX_HEIGHT (zr->timing->Ha) +#define BUZ_MIN_WIDTH 32 /* never display less than 32 pixels */ +#define BUZ_MIN_HEIGHT 24 /* never display less than 24 rows */ + +#define BUZ_NUM_STAT_COM 4 +#define BUZ_MASK_STAT_COM 3 + +#define BUZ_MAX_FRAME 256 /* Must be a power of 2 */ +#define BUZ_MASK_FRAME 255 /* Must be BUZ_MAX_FRAME-1 */ + +#define BUZ_MAX_INPUT 16 + +#if VIDEO_MAX_FRAME <= 32 +# define V4L_MAX_FRAME 32 +#elif VIDEO_MAX_FRAME <= 64 +# define V4L_MAX_FRAME 64 +#else +# error "Too many video frame buffers to handle" +#endif +#define V4L_MASK_FRAME (V4L_MAX_FRAME - 1) + +#define MAX_FRAME (BUZ_MAX_FRAME > VIDEO_MAX_FRAME ? BUZ_MAX_FRAME : VIDEO_MAX_FRAME) + +#include "zr36057.h" + +enum card_type { + UNKNOWN = -1, + + /* Pinnacle/Miro */ + DC10_old, /* DC30 like */ + DC10_new, /* DC10plus like */ + DC10plus, + DC30, + DC30plus, + + /* Linux Media Labs */ + LML33, + LML33R10, + + /* Iomega */ + BUZ, + + /* AverMedia */ + AVS6EYES, + + /* total number of cards */ + NUM_CARDS +}; + +enum zoran_codec_mode { + BUZ_MODE_IDLE, /* nothing going on */ + BUZ_MODE_MOTION_COMPRESS, /* grabbing frames */ + BUZ_MODE_MOTION_DECOMPRESS, /* playing frames */ + BUZ_MODE_STILL_COMPRESS, /* still frame conversion */ + BUZ_MODE_STILL_DECOMPRESS /* still frame conversion */ +}; + +enum zoran_buffer_state { + BUZ_STATE_USER, /* buffer is owned by application */ + BUZ_STATE_PEND, /* buffer is queued in pend[] ready to feed to I/O */ + BUZ_STATE_DMA, /* buffer is queued in dma[] for I/O */ + BUZ_STATE_DONE /* buffer is ready to return to application */ +}; + +enum zoran_map_mode { + ZORAN_MAP_MODE_RAW, + ZORAN_MAP_MODE_JPG_REC, +#define ZORAN_MAP_MODE_JPG ZORAN_MAP_MODE_JPG_REC + ZORAN_MAP_MODE_JPG_PLAY, +}; + +enum gpio_type { + ZR_GPIO_JPEG_SLEEP = 0, + ZR_GPIO_JPEG_RESET, + ZR_GPIO_JPEG_FRAME, + ZR_GPIO_VID_DIR, + ZR_GPIO_VID_EN, + ZR_GPIO_VID_RESET, + ZR_GPIO_CLK_SEL1, + ZR_GPIO_CLK_SEL2, + ZR_GPIO_MAX, +}; + +enum gpcs_type { + GPCS_JPEG_RESET = 0, + GPCS_JPEG_START, + GPCS_MAX, +}; + +struct zoran_format { + char *name; + __u32 fourcc; + int colorspace; + int depth; + __u32 flags; + __u32 vfespfr; +}; +/* flags */ +#define ZORAN_FORMAT_COMPRESSED 1<<0 +#define ZORAN_FORMAT_OVERLAY 1<<1 +#define ZORAN_FORMAT_CAPTURE 1<<2 +#define ZORAN_FORMAT_PLAYBACK 1<<3 + +/* overlay-settings */ +struct zoran_overlay_settings { + int is_set; + int x, y, width, height; /* position */ + int clipcount; /* position and number of clips */ + const struct zoran_format *format; /* overlay format */ +}; + +/* v4l-capture settings */ +struct zoran_v4l_settings { + int width, height, bytesperline; /* capture size */ + const struct zoran_format *format; /* capture format */ +}; + +/* jpg-capture/-playback settings */ +struct zoran_jpg_settings { + int decimation; /* this bit is used to set everything to default */ + int HorDcm, VerDcm, TmpDcm; /* capture decimation settings (TmpDcm=1 means both fields) */ + int field_per_buff, odd_even; /* field-settings (odd_even=1 (+TmpDcm=1) means top-field-first) */ + int img_x, img_y, img_width, img_height; /* crop settings (subframe capture) */ + struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */ +}; + +struct zoran_fh; + +struct zoran_mapping { + struct zoran_fh *fh; + atomic_t count; +}; + +struct zoran_buffer { + struct zoran_mapping *map; + enum zoran_buffer_state state; /* state: unused/pending/dma/done */ + struct zoran_sync bs; /* DONE: info to return to application */ + union { + struct { + __le32 *frag_tab; /* addresses of frag table */ + u32 frag_tab_bus; /* same value cached to save time in ISR */ + } jpg; + struct { + char *fbuffer; /* virtual address of frame buffer */ + unsigned long fbuffer_phys;/* physical address of frame buffer */ + unsigned long fbuffer_bus;/* bus address of frame buffer */ + } v4l; + }; +}; + +enum zoran_lock_activity { + ZORAN_FREE, /* free for use */ + ZORAN_ACTIVE, /* active but unlocked */ + ZORAN_LOCKED, /* locked */ +}; + +/* buffer collections */ +struct zoran_buffer_col { + enum zoran_lock_activity active; /* feature currently in use? */ + unsigned int num_buffers, buffer_size; + struct zoran_buffer buffer[MAX_FRAME]; /* buffers */ + u8 allocated; /* Flag if buffers are allocated */ + u8 need_contiguous; /* Flag if contiguous buffers are needed */ + /* only applies to jpg buffers, raw buffers are always contiguous */ +}; + +struct zoran; + +/* zoran_fh contains per-open() settings */ +struct zoran_fh { + struct v4l2_fh fh; + struct zoran *zr; + + enum zoran_map_mode map_mode; /* Flag which bufferset will map by next mmap() */ + + struct zoran_overlay_settings overlay_settings; + u32 *overlay_mask; /* overlay mask */ + enum zoran_lock_activity overlay_active;/* feature currently in use? */ + + struct zoran_buffer_col buffers; /* buffers' info */ + + struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */ + struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */ +}; + +struct card_info { + enum card_type type; + char name[32]; + const char *i2c_decoder; /* i2c decoder device */ + const unsigned short *addrs_decoder; + const char *i2c_encoder; /* i2c encoder device */ + const unsigned short *addrs_encoder; + u16 video_vfe, video_codec; /* videocodec types */ + u16 audio_chip; /* audio type */ + + int inputs; /* number of video inputs */ + struct input { + int muxsel; + char name[32]; + } input[BUZ_MAX_INPUT]; + + v4l2_std_id norms; + struct tvnorm *tvn[3]; /* supported TV norms */ + + u32 jpeg_int; /* JPEG interrupt */ + u32 vsync_int; /* VSYNC interrupt */ + s8 gpio[ZR_GPIO_MAX]; + u8 gpcs[GPCS_MAX]; + + struct vfe_polarity vfe_pol; + u8 gpio_pol[ZR_GPIO_MAX]; + + /* is the /GWS line connected? */ + u8 gws_not_connected; + + /* avs6eyes mux setting */ + u8 input_mux; + + void (*init) (struct zoran * zr); +}; + +struct zoran { + struct v4l2_device v4l2_dev; + struct v4l2_ctrl_handler hdl; + struct video_device *video_dev; + + struct i2c_adapter i2c_adapter; /* */ + struct i2c_algo_bit_data i2c_algo; /* */ + u32 i2cbr; + + struct v4l2_subdev *decoder; /* video decoder sub-device */ + struct v4l2_subdev *encoder; /* video encoder sub-device */ + + struct videocodec *codec; /* video codec */ + struct videocodec *vfe; /* video front end */ + + struct mutex lock; /* file ops serialize lock */ + + u8 initialized; /* flag if zoran has been correctly initialized */ + int user; /* number of current users */ + struct card_info card; + struct tvnorm *timing; + + unsigned short id; /* number of this device */ + char name[32]; /* name of this device */ + struct pci_dev *pci_dev; /* PCI device */ + unsigned char revision; /* revision of zr36057 */ + unsigned char __iomem *zr36057_mem;/* pointer to mapped IO memory */ + + spinlock_t spinlock; /* Spinlock */ + + /* Video for Linux parameters */ + int input; /* card's norm and input */ + v4l2_std_id norm; + + /* Current buffer params */ + void *vbuf_base; + int vbuf_height, vbuf_width; + int vbuf_depth; + int vbuf_bytesperline; + + struct zoran_overlay_settings overlay_settings; + u32 *overlay_mask; /* overlay mask */ + enum zoran_lock_activity overlay_active; /* feature currently in use? */ + + wait_queue_head_t v4l_capq; + + int v4l_overlay_active; /* Overlay grab is activated */ + int v4l_memgrab_active; /* Memory grab is activated */ + + int v4l_grab_frame; /* Frame number being currently grabbed */ +#define NO_GRAB_ACTIVE (-1) + unsigned long v4l_grab_seq; /* Number of frames grabbed */ + struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */ + + /* V4L grab queue of frames pending */ + unsigned long v4l_pend_head; + unsigned long v4l_pend_tail; + unsigned long v4l_sync_tail; + int v4l_pend[V4L_MAX_FRAME]; + struct zoran_buffer_col v4l_buffers; /* V4L buffers' info */ + + /* Buz MJPEG parameters */ + enum zoran_codec_mode codec_mode; /* status of codec */ + struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */ + + wait_queue_head_t jpg_capq; /* wait here for grab to finish */ + + /* grab queue counts/indices, mask with BUZ_MASK_STAT_COM before using as index */ + /* (dma_head - dma_tail) is number active in DMA, must be <= BUZ_NUM_STAT_COM */ + /* (value & BUZ_MASK_STAT_COM) corresponds to index in stat_com table */ + unsigned long jpg_que_head; /* Index where to put next buffer which is queued */ + unsigned long jpg_dma_head; /* Index of next buffer which goes into stat_com */ + unsigned long jpg_dma_tail; /* Index of last buffer in stat_com */ + unsigned long jpg_que_tail; /* Index of last buffer in queue */ + unsigned long jpg_seq_num; /* count of frames since grab/play started */ + unsigned long jpg_err_seq; /* last seq_num before error */ + unsigned long jpg_err_shift; + unsigned long jpg_queued_num; /* count of frames queued since grab/play started */ + + /* zr36057's code buffer table */ + __le32 *stat_com; /* stat_com[i] is indexed by dma_head/tail & BUZ_MASK_STAT_COM */ + + /* (value & BUZ_MASK_FRAME) corresponds to index in pend[] queue */ + int jpg_pend[BUZ_MAX_FRAME]; + + /* array indexed by frame number */ + struct zoran_buffer_col jpg_buffers; /* MJPEG buffers' info */ + + /* Additional stuff for testing */ +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *zoran_proc; +#else + void *zoran_proc; +#endif + int testing; + int jpeg_error; + int intr_counter_GIRQ1; + int intr_counter_GIRQ0; + int intr_counter_CodRepIRQ; + int intr_counter_JPEGRepIRQ; + int field_counter; + int IRQ1_in; + int IRQ1_out; + int JPEG_in; + int JPEG_out; + int JPEG_0; + int JPEG_1; + int END_event_missed; + int JPEG_missed; + int JPEG_error; + int num_errors; + int JPEG_max_missed; + int JPEG_min_missed; + + u32 last_isr; + unsigned long frame_num; + + wait_queue_head_t test_q; +}; + +static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev) +{ + return container_of(v4l2_dev, struct zoran, v4l2_dev); +} + +/* There was something called _ALPHA_BUZ that used the PCI address instead of + * the kernel iomapped address for btread/btwrite. */ +#define btwrite(dat,adr) writel((dat), zr->zr36057_mem+(adr)) +#define btread(adr) readl(zr->zr36057_mem+(adr)) + +#define btand(dat,adr) btwrite((dat) & btread(adr), adr) +#define btor(dat,adr) btwrite((dat) | btread(adr), adr) +#define btaor(dat,mask,adr) btwrite((dat) | ((mask) & btread(adr)), adr) + +#endif diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c new file mode 100644 index 000000000000..a6b9ebd20263 --- /dev/null +++ b/drivers/staging/media/zoran/zoran_card.c @@ -0,0 +1,1524 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * This part handles card-specific data and detection + * + * Copyright (C) 2000 Serguei Miridonov + * + * Currently maintained by: + * Ronald Bultje + * Laurent Pinchart + * Mailinglist + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "videocodec.h" +#include "zoran.h" +#include "zoran_card.h" +#include "zoran_device.h" +#include "zoran_procfs.h" + +extern const struct zoran_format zoran_formats[]; + +static int card[BUZ_MAX] = { [0 ... (BUZ_MAX-1)] = -1 }; +module_param_array(card, int, NULL, 0444); +MODULE_PARM_DESC(card, "Card type"); + +/* + The video mem address of the video card. + The driver has a little database for some videocards + to determine it from there. If your video card is not in there + you have either to give it to the driver as a parameter + or set in in a VIDIOCSFBUF ioctl + */ + +static unsigned long vidmem; /* default = 0 - Video memory base address */ +module_param_hw(vidmem, ulong, iomem, 0444); +MODULE_PARM_DESC(vidmem, "Default video memory base address"); + +/* + Default input and video norm at startup of the driver. +*/ + +static unsigned int default_input; /* default 0 = Composite, 1 = S-Video */ +module_param(default_input, uint, 0444); +MODULE_PARM_DESC(default_input, + "Default input (0=Composite, 1=S-Video, 2=Internal)"); + +static int default_mux = 1; /* 6 Eyes input selection */ +module_param(default_mux, int, 0644); +MODULE_PARM_DESC(default_mux, + "Default 6 Eyes mux setting (Input selection)"); + +static int default_norm; /* default 0 = PAL, 1 = NTSC 2 = SECAM */ +module_param(default_norm, int, 0444); +MODULE_PARM_DESC(default_norm, "Default norm (0=PAL, 1=NTSC, 2=SECAM)"); + +/* /dev/videoN, -1 for autodetect */ +static int video_nr[BUZ_MAX] = { [0 ... (BUZ_MAX-1)] = -1 }; +module_param_array(video_nr, int, NULL, 0444); +MODULE_PARM_DESC(video_nr, "Video device number (-1=Auto)"); + +int v4l_nbufs = 4; +int v4l_bufsize = 864; /* Everybody should be able to work with this setting */ +module_param(v4l_nbufs, int, 0644); +MODULE_PARM_DESC(v4l_nbufs, "Maximum number of V4L buffers to use"); +module_param(v4l_bufsize, int, 0644); +MODULE_PARM_DESC(v4l_bufsize, "Maximum size per V4L buffer (in kB)"); + +int jpg_nbufs = 32; +int jpg_bufsize = 512; /* max size for 100% quality full-PAL frame */ +module_param(jpg_nbufs, int, 0644); +MODULE_PARM_DESC(jpg_nbufs, "Maximum number of JPG buffers to use"); +module_param(jpg_bufsize, int, 0644); +MODULE_PARM_DESC(jpg_bufsize, "Maximum size per JPG buffer (in kB)"); + +int pass_through = 0; /* 1=Pass through TV signal when device is not used */ + /* 0=Show color bar when device is not used (LML33: only if lml33dpath=1) */ +module_param(pass_through, int, 0644); +MODULE_PARM_DESC(pass_through, + "Pass TV signal through to TV-out when idling"); + +int zr36067_debug = 1; +module_param_named(debug, zr36067_debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug level (0-5)"); + +#define ZORAN_VERSION "0.10.1" + +MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver"); +MODULE_AUTHOR("Serguei Miridonov"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ZORAN_VERSION); + +#define ZR_DEVICE(subven, subdev, data) { \ + .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \ + .subvendor = (subven), .subdevice = (subdev), .driver_data = (data) } + +static const struct pci_device_id zr36067_pci_tbl[] = { + ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC10PLUS, DC10plus), + ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC30PLUS, DC30plus), + ZR_DEVICE(PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, PCI_DEVICE_ID_LML_33R10, LML33R10), + ZR_DEVICE(PCI_VENDOR_ID_IOMEGA, PCI_DEVICE_ID_IOMEGA_BUZ, BUZ), + ZR_DEVICE(PCI_ANY_ID, PCI_ANY_ID, NUM_CARDS), + {0} +}; +MODULE_DEVICE_TABLE(pci, zr36067_pci_tbl); + +static unsigned int zoran_num; /* number of cards found */ + +/* videocodec bus functions ZR36060 */ +static u32 +zr36060_read (struct videocodec *codec, + u16 reg) +{ + struct zoran *zr = (struct zoran *) codec->master_data->data; + __u32 data; + + if (post_office_wait(zr) + || post_office_write(zr, 0, 1, reg >> 8) + || post_office_write(zr, 0, 2, reg & 0xff)) { + return -1; + } + + data = post_office_read(zr, 0, 3) & 0xff; + return data; +} + +static void +zr36060_write (struct videocodec *codec, + u16 reg, + u32 val) +{ + struct zoran *zr = (struct zoran *) codec->master_data->data; + + if (post_office_wait(zr) + || post_office_write(zr, 0, 1, reg >> 8) + || post_office_write(zr, 0, 2, reg & 0xff)) { + return; + } + + post_office_write(zr, 0, 3, val & 0xff); +} + +/* videocodec bus functions ZR36050 */ +static u32 +zr36050_read (struct videocodec *codec, + u16 reg) +{ + struct zoran *zr = (struct zoran *) codec->master_data->data; + __u32 data; + + if (post_office_wait(zr) + || post_office_write(zr, 1, 0, reg >> 2)) { // reg. HIGHBYTES + return -1; + } + + data = post_office_read(zr, 0, reg & 0x03) & 0xff; // reg. LOWBYTES + read + return data; +} + +static void +zr36050_write (struct videocodec *codec, + u16 reg, + u32 val) +{ + struct zoran *zr = (struct zoran *) codec->master_data->data; + + if (post_office_wait(zr) + || post_office_write(zr, 1, 0, reg >> 2)) { // reg. HIGHBYTES + return; + } + + post_office_write(zr, 0, reg & 0x03, val & 0xff); // reg. LOWBYTES + wr. data +} + +/* videocodec bus functions ZR36016 */ +static u32 +zr36016_read (struct videocodec *codec, + u16 reg) +{ + struct zoran *zr = (struct zoran *) codec->master_data->data; + __u32 data; + + if (post_office_wait(zr)) { + return -1; + } + + data = post_office_read(zr, 2, reg & 0x03) & 0xff; // read + return data; +} + +/* hack for in zoran_device.c */ +void +zr36016_write (struct videocodec *codec, + u16 reg, + u32 val) +{ + struct zoran *zr = (struct zoran *) codec->master_data->data; + + if (post_office_wait(zr)) { + return; + } + + post_office_write(zr, 2, reg & 0x03, val & 0x0ff); // wr. data +} + +/* + * Board specific information + */ + +static void +dc10_init (struct zoran *zr) +{ + dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); + + /* Pixel clock selection */ + GPIO(zr, 4, 0); + GPIO(zr, 5, 1); + /* Enable the video bus sync signals */ + GPIO(zr, 7, 0); +} + +static void +dc10plus_init (struct zoran *zr) +{ + dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); +} + +static void +buz_init (struct zoran *zr) +{ + dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); + + /* some stuff from Iomega */ + pci_write_config_dword(zr->pci_dev, 0xfc, 0x90680f15); + pci_write_config_dword(zr->pci_dev, 0x0c, 0x00012020); + pci_write_config_dword(zr->pci_dev, 0xe8, 0xc0200000); +} + +static void +lml33_init (struct zoran *zr) +{ + dprintk(3, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); + + GPIO(zr, 2, 1); // Set Composite input/output +} + +static void +avs6eyes_init (struct zoran *zr) +{ + // AverMedia 6-Eyes original driver by Christer Weinigel + + // Lifted straight from Christer's old driver and + // modified slightly by Martin Samuelsson. + + int mux = default_mux; /* 1 = BT866, 7 = VID1 */ + + GPIO(zr, 4, 1); /* Bt866 SLEEP on */ + udelay(2); + + GPIO(zr, 0, 1); /* ZR36060 /RESET on */ + GPIO(zr, 1, 0); /* ZR36060 /SLEEP on */ + GPIO(zr, 2, mux & 1); /* MUX S0 */ + GPIO(zr, 3, 0); /* /FRAME on */ + GPIO(zr, 4, 0); /* Bt866 SLEEP off */ + GPIO(zr, 5, mux & 2); /* MUX S1 */ + GPIO(zr, 6, 0); /* ? */ + GPIO(zr, 7, mux & 4); /* MUX S2 */ + +} + +static char * +codecid_to_modulename (u16 codecid) +{ + char *name = NULL; + + switch (codecid) { + case CODEC_TYPE_ZR36060: + name = "zr36060"; + break; + case CODEC_TYPE_ZR36050: + name = "zr36050"; + break; + case CODEC_TYPE_ZR36016: + name = "zr36016"; + break; + } + + return name; +} + +// struct tvnorm { +// u16 Wt, Wa, HStart, HSyncStart, Ht, Ha, VStart; +// }; + +static struct tvnorm f50sqpixel = { 944, 768, 83, 880, 625, 576, 16 }; +static struct tvnorm f60sqpixel = { 780, 640, 51, 716, 525, 480, 12 }; +static struct tvnorm f50ccir601 = { 864, 720, 75, 804, 625, 576, 18 }; +static struct tvnorm f60ccir601 = { 858, 720, 57, 788, 525, 480, 16 }; + +static struct tvnorm f50ccir601_lml33 = { 864, 720, 75+34, 804, 625, 576, 18 }; +static struct tvnorm f60ccir601_lml33 = { 858, 720, 57+34, 788, 525, 480, 16 }; + +/* The DC10 (57/16/50) uses VActive as HSync, so HStart must be 0 */ +static struct tvnorm f50sqpixel_dc10 = { 944, 768, 0, 880, 625, 576, 0 }; +static struct tvnorm f60sqpixel_dc10 = { 780, 640, 0, 716, 525, 480, 12 }; + +/* FIXME: I cannot swap U and V in saa7114, so i do one + * pixel left shift in zoran (75 -> 74) + * (Maxim Yevtyushkin ) */ +static struct tvnorm f50ccir601_lm33r10 = { 864, 720, 74+54, 804, 625, 576, 18 }; +static struct tvnorm f60ccir601_lm33r10 = { 858, 720, 56+54, 788, 525, 480, 16 }; + +/* FIXME: The ks0127 seem incapable of swapping U and V, too, which is why I + * copy Maxim's left shift hack for the 6 Eyes. + * + * Christer's driver used the unshifted norms, though... + * /Sam */ +static struct tvnorm f50ccir601_avs6eyes = { 864, 720, 74, 804, 625, 576, 18 }; +static struct tvnorm f60ccir601_avs6eyes = { 858, 720, 56, 788, 525, 480, 16 }; + +static const unsigned short vpx3220_addrs[] = { 0x43, 0x47, I2C_CLIENT_END }; +static const unsigned short saa7110_addrs[] = { 0x4e, 0x4f, I2C_CLIENT_END }; +static const unsigned short saa7111_addrs[] = { 0x25, 0x24, I2C_CLIENT_END }; +static const unsigned short saa7114_addrs[] = { 0x21, 0x20, I2C_CLIENT_END }; +static const unsigned short adv717x_addrs[] = { 0x6a, 0x6b, 0x2a, 0x2b, I2C_CLIENT_END }; +static const unsigned short ks0127_addrs[] = { 0x6c, 0x6d, I2C_CLIENT_END }; +static const unsigned short saa7185_addrs[] = { 0x44, I2C_CLIENT_END }; +static const unsigned short bt819_addrs[] = { 0x45, I2C_CLIENT_END }; +static const unsigned short bt856_addrs[] = { 0x44, I2C_CLIENT_END }; +static const unsigned short bt866_addrs[] = { 0x44, I2C_CLIENT_END }; + +static struct card_info zoran_cards[NUM_CARDS] = { + { + .type = DC10_old, + .name = "DC10(old)", + .i2c_decoder = "vpx3220a", + .addrs_decoder = vpx3220_addrs, + .video_codec = CODEC_TYPE_ZR36050, + .video_vfe = CODEC_TYPE_ZR36016, + + .inputs = 3, + .input = { + { 1, "Composite" }, + { 2, "S-Video" }, + { 0, "Internal/comp" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, + .tvn = { + &f50sqpixel_dc10, + &f60sqpixel_dc10, + &f50sqpixel_dc10 + }, + .jpeg_int = 0, + .vsync_int = ZR36057_ISR_GIRQ1, + .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 }, + .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 }, + .gpcs = { -1, 0 }, + .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, + .gws_not_connected = 0, + .input_mux = 0, + .init = &dc10_init, + }, { + .type = DC10_new, + .name = "DC10(new)", + .i2c_decoder = "saa7110", + .addrs_decoder = saa7110_addrs, + .i2c_encoder = "adv7175", + .addrs_encoder = adv717x_addrs, + .video_codec = CODEC_TYPE_ZR36060, + + .inputs = 3, + .input = { + { 0, "Composite" }, + { 7, "S-Video" }, + { 5, "Internal/comp" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, + .tvn = { + &f50sqpixel, + &f60sqpixel, + &f50sqpixel}, + .jpeg_int = ZR36057_ISR_GIRQ0, + .vsync_int = ZR36057_ISR_GIRQ1, + .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 }, + .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, + .gpcs = { -1, 1}, + .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 }, + .gws_not_connected = 0, + .input_mux = 0, + .init = &dc10plus_init, + }, { + .type = DC10plus, + .name = "DC10plus", + .i2c_decoder = "saa7110", + .addrs_decoder = saa7110_addrs, + .i2c_encoder = "adv7175", + .addrs_encoder = adv717x_addrs, + .video_codec = CODEC_TYPE_ZR36060, + + .inputs = 3, + .input = { + { 0, "Composite" }, + { 7, "S-Video" }, + { 5, "Internal/comp" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, + .tvn = { + &f50sqpixel, + &f60sqpixel, + &f50sqpixel + }, + .jpeg_int = ZR36057_ISR_GIRQ0, + .vsync_int = ZR36057_ISR_GIRQ1, + .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 }, + .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, + .gpcs = { -1, 1 }, + .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 }, + .gws_not_connected = 0, + .input_mux = 0, + .init = &dc10plus_init, + }, { + .type = DC30, + .name = "DC30", + .i2c_decoder = "vpx3220a", + .addrs_decoder = vpx3220_addrs, + .i2c_encoder = "adv7175", + .addrs_encoder = adv717x_addrs, + .video_codec = CODEC_TYPE_ZR36050, + .video_vfe = CODEC_TYPE_ZR36016, + + .inputs = 3, + .input = { + { 1, "Composite" }, + { 2, "S-Video" }, + { 0, "Internal/comp" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, + .tvn = { + &f50sqpixel_dc10, + &f60sqpixel_dc10, + &f50sqpixel_dc10 + }, + .jpeg_int = 0, + .vsync_int = ZR36057_ISR_GIRQ1, + .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 }, + .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 }, + .gpcs = { -1, 0 }, + .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, + .gws_not_connected = 0, + .input_mux = 0, + .init = &dc10_init, + }, { + .type = DC30plus, + .name = "DC30plus", + .i2c_decoder = "vpx3220a", + .addrs_decoder = vpx3220_addrs, + .i2c_encoder = "adv7175", + .addrs_encoder = adv717x_addrs, + .video_codec = CODEC_TYPE_ZR36050, + .video_vfe = CODEC_TYPE_ZR36016, + + .inputs = 3, + .input = { + { 1, "Composite" }, + { 2, "S-Video" }, + { 0, "Internal/comp" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, + .tvn = { + &f50sqpixel_dc10, + &f60sqpixel_dc10, + &f50sqpixel_dc10 + }, + .jpeg_int = 0, + .vsync_int = ZR36057_ISR_GIRQ1, + .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 }, + .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 }, + .gpcs = { -1, 0 }, + .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, + .gws_not_connected = 0, + .input_mux = 0, + .init = &dc10_init, + }, { + .type = LML33, + .name = "LML33", + .i2c_decoder = "bt819a", + .addrs_decoder = bt819_addrs, + .i2c_encoder = "bt856", + .addrs_encoder = bt856_addrs, + .video_codec = CODEC_TYPE_ZR36060, + + .inputs = 2, + .input = { + { 0, "Composite" }, + { 7, "S-Video" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL, + .tvn = { + &f50ccir601_lml33, + &f60ccir601_lml33, + NULL + }, + .jpeg_int = ZR36057_ISR_GIRQ1, + .vsync_int = ZR36057_ISR_GIRQ0, + .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 }, + .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 }, + .gpcs = { 3, 1 }, + .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 }, + .gws_not_connected = 1, + .input_mux = 0, + .init = &lml33_init, + }, { + .type = LML33R10, + .name = "LML33R10", + .i2c_decoder = "saa7114", + .addrs_decoder = saa7114_addrs, + .i2c_encoder = "adv7170", + .addrs_encoder = adv717x_addrs, + .video_codec = CODEC_TYPE_ZR36060, + + .inputs = 2, + .input = { + { 0, "Composite" }, + { 7, "S-Video" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL, + .tvn = { + &f50ccir601_lm33r10, + &f60ccir601_lm33r10, + NULL + }, + .jpeg_int = ZR36057_ISR_GIRQ1, + .vsync_int = ZR36057_ISR_GIRQ0, + .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 }, + .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 }, + .gpcs = { 3, 1 }, + .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 }, + .gws_not_connected = 1, + .input_mux = 0, + .init = &lml33_init, + }, { + .type = BUZ, + .name = "Buz", + .i2c_decoder = "saa7111", + .addrs_decoder = saa7111_addrs, + .i2c_encoder = "saa7185", + .addrs_encoder = saa7185_addrs, + .video_codec = CODEC_TYPE_ZR36060, + + .inputs = 2, + .input = { + { 3, "Composite" }, + { 7, "S-Video" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL|V4L2_STD_SECAM, + .tvn = { + &f50ccir601, + &f60ccir601, + &f50ccir601 + }, + .jpeg_int = ZR36057_ISR_GIRQ1, + .vsync_int = ZR36057_ISR_GIRQ0, + .gpio = { 1, -1, 3, -1, -1, -1, -1, -1 }, + .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, + .gpcs = { 3, 1 }, + .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 }, + .gws_not_connected = 1, + .input_mux = 0, + .init = &buz_init, + }, { + .type = AVS6EYES, + .name = "6-Eyes", + /* AverMedia chose not to brand the 6-Eyes. Thus it + can't be autodetected, and requires card=x. */ + .i2c_decoder = "ks0127", + .addrs_decoder = ks0127_addrs, + .i2c_encoder = "bt866", + .addrs_encoder = bt866_addrs, + .video_codec = CODEC_TYPE_ZR36060, + + .inputs = 10, + .input = { + { 0, "Composite 1" }, + { 1, "Composite 2" }, + { 2, "Composite 3" }, + { 4, "Composite 4" }, + { 5, "Composite 5" }, + { 6, "Composite 6" }, + { 8, "S-Video 1" }, + { 9, "S-Video 2" }, + {10, "S-Video 3" }, + {15, "YCbCr" } + }, + .norms = V4L2_STD_NTSC|V4L2_STD_PAL, + .tvn = { + &f50ccir601_avs6eyes, + &f60ccir601_avs6eyes, + NULL + }, + .jpeg_int = ZR36057_ISR_GIRQ1, + .vsync_int = ZR36057_ISR_GIRQ0, + .gpio = { 1, 0, 3, -1, -1, -1, -1, -1 },// Validity unknown /Sam + .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, // Validity unknown /Sam + .gpcs = { 3, 1 }, // Validity unknown /Sam + .vfe_pol = { 1, 0, 0, 0, 0, 1, 0, 0 }, // Validity unknown /Sam + .gws_not_connected = 1, + .input_mux = 1, + .init = &avs6eyes_init, + } + +}; + +/* + * I2C functions + */ +/* software I2C functions */ +static int +zoran_i2c_getsda (void *data) +{ + struct zoran *zr = (struct zoran *) data; + + return (btread(ZR36057_I2CBR) >> 1) & 1; +} + +static int +zoran_i2c_getscl (void *data) +{ + struct zoran *zr = (struct zoran *) data; + + return btread(ZR36057_I2CBR) & 1; +} + +static void +zoran_i2c_setsda (void *data, + int state) +{ + struct zoran *zr = (struct zoran *) data; + + if (state) + zr->i2cbr |= 2; + else + zr->i2cbr &= ~2; + btwrite(zr->i2cbr, ZR36057_I2CBR); +} + +static void +zoran_i2c_setscl (void *data, + int state) +{ + struct zoran *zr = (struct zoran *) data; + + if (state) + zr->i2cbr |= 1; + else + zr->i2cbr &= ~1; + btwrite(zr->i2cbr, ZR36057_I2CBR); +} + +static const struct i2c_algo_bit_data zoran_i2c_bit_data_template = { + .setsda = zoran_i2c_setsda, + .setscl = zoran_i2c_setscl, + .getsda = zoran_i2c_getsda, + .getscl = zoran_i2c_getscl, + .udelay = 10, + .timeout = 100, +}; + +static int +zoran_register_i2c (struct zoran *zr) +{ + zr->i2c_algo = zoran_i2c_bit_data_template; + zr->i2c_algo.data = zr; + strlcpy(zr->i2c_adapter.name, ZR_DEVNAME(zr), + sizeof(zr->i2c_adapter.name)); + i2c_set_adapdata(&zr->i2c_adapter, &zr->v4l2_dev); + zr->i2c_adapter.algo_data = &zr->i2c_algo; + zr->i2c_adapter.dev.parent = &zr->pci_dev->dev; + return i2c_bit_add_bus(&zr->i2c_adapter); +} + +static void +zoran_unregister_i2c (struct zoran *zr) +{ + i2c_del_adapter(&zr->i2c_adapter); +} + +/* Check a zoran_params struct for correctness, insert default params */ + +int +zoran_check_jpg_settings (struct zoran *zr, + struct zoran_jpg_settings *settings, + int try) +{ + int err = 0, err0 = 0; + + dprintk(4, + KERN_DEBUG + "%s: %s - dec: %d, Hdcm: %d, Vdcm: %d, Tdcm: %d\n", + ZR_DEVNAME(zr), __func__, settings->decimation, settings->HorDcm, + settings->VerDcm, settings->TmpDcm); + dprintk(4, + KERN_DEBUG + "%s: %s - x: %d, y: %d, w: %d, y: %d\n", + ZR_DEVNAME(zr), __func__, settings->img_x, settings->img_y, + settings->img_width, settings->img_height); + /* Check decimation, set default values for decimation = 1, 2, 4 */ + switch (settings->decimation) { + case 1: + + settings->HorDcm = 1; + settings->VerDcm = 1; + settings->TmpDcm = 1; + settings->field_per_buff = 2; + settings->img_x = 0; + settings->img_y = 0; + settings->img_width = BUZ_MAX_WIDTH; + settings->img_height = BUZ_MAX_HEIGHT / 2; + break; + case 2: + + settings->HorDcm = 2; + settings->VerDcm = 1; + settings->TmpDcm = 2; + settings->field_per_buff = 1; + settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; + settings->img_y = 0; + settings->img_width = + (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; + settings->img_height = BUZ_MAX_HEIGHT / 2; + break; + case 4: + + if (zr->card.type == DC10_new) { + dprintk(1, + KERN_DEBUG + "%s: %s - HDec by 4 is not supported on the DC10\n", + ZR_DEVNAME(zr), __func__); + err0++; + break; + } + + settings->HorDcm = 4; + settings->VerDcm = 2; + settings->TmpDcm = 2; + settings->field_per_buff = 1; + settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; + settings->img_y = 0; + settings->img_width = + (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; + settings->img_height = BUZ_MAX_HEIGHT / 2; + break; + case 0: + + /* We have to check the data the user has set */ + + if (settings->HorDcm != 1 && settings->HorDcm != 2 && + (zr->card.type == DC10_new || settings->HorDcm != 4)) { + settings->HorDcm = clamp(settings->HorDcm, 1, 2); + err0++; + } + if (settings->VerDcm != 1 && settings->VerDcm != 2) { + settings->VerDcm = clamp(settings->VerDcm, 1, 2); + err0++; + } + if (settings->TmpDcm != 1 && settings->TmpDcm != 2) { + settings->TmpDcm = clamp(settings->TmpDcm, 1, 2); + err0++; + } + if (settings->field_per_buff != 1 && + settings->field_per_buff != 2) { + settings->field_per_buff = clamp(settings->field_per_buff, 1, 2); + err0++; + } + if (settings->img_x < 0) { + settings->img_x = 0; + err0++; + } + if (settings->img_y < 0) { + settings->img_y = 0; + err0++; + } + if (settings->img_width < 0 || settings->img_width > BUZ_MAX_WIDTH) { + settings->img_width = clamp(settings->img_width, 0, (int)BUZ_MAX_WIDTH); + err0++; + } + if (settings->img_height < 0 || settings->img_height > BUZ_MAX_HEIGHT / 2) { + settings->img_height = clamp(settings->img_height, 0, BUZ_MAX_HEIGHT / 2); + err0++; + } + if (settings->img_x + settings->img_width > BUZ_MAX_WIDTH) { + settings->img_x = BUZ_MAX_WIDTH - settings->img_width; + err0++; + } + if (settings->img_y + settings->img_height > BUZ_MAX_HEIGHT / 2) { + settings->img_y = BUZ_MAX_HEIGHT / 2 - settings->img_height; + err0++; + } + if (settings->img_width % (16 * settings->HorDcm) != 0) { + settings->img_width -= settings->img_width % (16 * settings->HorDcm); + if (settings->img_width == 0) + settings->img_width = 16 * settings->HorDcm; + err0++; + } + if (settings->img_height % (8 * settings->VerDcm) != 0) { + settings->img_height -= settings->img_height % (8 * settings->VerDcm); + if (settings->img_height == 0) + settings->img_height = 8 * settings->VerDcm; + err0++; + } + + if (!try && err0) { + dprintk(1, + KERN_ERR + "%s: %s - error in params for decimation = 0\n", + ZR_DEVNAME(zr), __func__); + err++; + } + break; + default: + dprintk(1, + KERN_ERR + "%s: %s - decimation = %d, must be 0, 1, 2 or 4\n", + ZR_DEVNAME(zr), __func__, settings->decimation); + err++; + break; + } + + if (settings->jpg_comp.quality > 100) + settings->jpg_comp.quality = 100; + if (settings->jpg_comp.quality < 5) + settings->jpg_comp.quality = 5; + if (settings->jpg_comp.APPn < 0) + settings->jpg_comp.APPn = 0; + if (settings->jpg_comp.APPn > 15) + settings->jpg_comp.APPn = 15; + if (settings->jpg_comp.APP_len < 0) + settings->jpg_comp.APP_len = 0; + if (settings->jpg_comp.APP_len > 60) + settings->jpg_comp.APP_len = 60; + if (settings->jpg_comp.COM_len < 0) + settings->jpg_comp.COM_len = 0; + if (settings->jpg_comp.COM_len > 60) + settings->jpg_comp.COM_len = 60; + if (err) + return -EINVAL; + return 0; +} + +void +zoran_open_init_params (struct zoran *zr) +{ + int i; + + /* User must explicitly set a window */ + zr->overlay_settings.is_set = 0; + zr->overlay_mask = NULL; + zr->overlay_active = ZORAN_FREE; + + zr->v4l_memgrab_active = 0; + zr->v4l_overlay_active = 0; + zr->v4l_grab_frame = NO_GRAB_ACTIVE; + zr->v4l_grab_seq = 0; + zr->v4l_settings.width = 192; + zr->v4l_settings.height = 144; + zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */ + zr->v4l_settings.bytesperline = + zr->v4l_settings.width * + ((zr->v4l_settings.format->depth + 7) / 8); + + /* DMA ring stuff for V4L */ + zr->v4l_pend_tail = 0; + zr->v4l_pend_head = 0; + zr->v4l_sync_tail = 0; + zr->v4l_buffers.active = ZORAN_FREE; + for (i = 0; i < VIDEO_MAX_FRAME; i++) { + zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ + } + zr->v4l_buffers.allocated = 0; + + for (i = 0; i < BUZ_MAX_FRAME; i++) { + zr->jpg_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ + } + zr->jpg_buffers.active = ZORAN_FREE; + zr->jpg_buffers.allocated = 0; + /* Set necessary params and call zoran_check_jpg_settings to set the defaults */ + zr->jpg_settings.decimation = 1; + zr->jpg_settings.jpg_comp.quality = 50; /* default compression factor 8 */ + if (zr->card.type != BUZ) + zr->jpg_settings.odd_even = 1; + else + zr->jpg_settings.odd_even = 0; + zr->jpg_settings.jpg_comp.APPn = 0; + zr->jpg_settings.jpg_comp.APP_len = 0; /* No APPn marker */ + memset(zr->jpg_settings.jpg_comp.APP_data, 0, + sizeof(zr->jpg_settings.jpg_comp.APP_data)); + zr->jpg_settings.jpg_comp.COM_len = 0; /* No COM marker */ + memset(zr->jpg_settings.jpg_comp.COM_data, 0, + sizeof(zr->jpg_settings.jpg_comp.COM_data)); + zr->jpg_settings.jpg_comp.jpeg_markers = + V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; + i = zoran_check_jpg_settings(zr, &zr->jpg_settings, 0); + if (i) + dprintk(1, KERN_ERR "%s: %s internal error\n", + ZR_DEVNAME(zr), __func__); + + clear_interrupt_counters(zr); + zr->testing = 0; +} + +static void test_interrupts (struct zoran *zr) +{ + DEFINE_WAIT(wait); + int timeout, icr; + + clear_interrupt_counters(zr); + + zr->testing = 1; + icr = btread(ZR36057_ICR); + btwrite(0x78000000 | ZR36057_ICR_IntPinEn, ZR36057_ICR); + prepare_to_wait(&zr->test_q, &wait, TASK_INTERRUPTIBLE); + timeout = schedule_timeout(HZ); + finish_wait(&zr->test_q, &wait); + btwrite(0, ZR36057_ICR); + btwrite(0x78000000, ZR36057_ISR); + zr->testing = 0; + dprintk(5, KERN_INFO "%s: Testing interrupts...\n", ZR_DEVNAME(zr)); + if (timeout) { + dprintk(1, ": time spent: %d\n", 1 * HZ - timeout); + } + if (zr36067_debug > 1) + print_interrupts(zr); + btwrite(icr, ZR36057_ICR); +} + +static int zr36057_init (struct zoran *zr) +{ + int j, err; + + dprintk(1, + KERN_INFO + "%s: %s - initializing card[%d], zr=%p\n", + ZR_DEVNAME(zr), __func__, zr->id, zr); + + /* default setup of all parameters which will persist between opens */ + zr->user = 0; + + init_waitqueue_head(&zr->v4l_capq); + init_waitqueue_head(&zr->jpg_capq); + init_waitqueue_head(&zr->test_q); + zr->jpg_buffers.allocated = 0; + zr->v4l_buffers.allocated = 0; + + zr->vbuf_base = (void *) vidmem; + zr->vbuf_width = 0; + zr->vbuf_height = 0; + zr->vbuf_depth = 0; + zr->vbuf_bytesperline = 0; + + /* Avoid nonsense settings from user for default input/norm */ + if (default_norm < 0 || default_norm > 2) + default_norm = 0; + if (default_norm == 0) { + zr->norm = V4L2_STD_PAL; + zr->timing = zr->card.tvn[0]; + } else if (default_norm == 1) { + zr->norm = V4L2_STD_NTSC; + zr->timing = zr->card.tvn[1]; + } else { + zr->norm = V4L2_STD_SECAM; + zr->timing = zr->card.tvn[2]; + } + if (zr->timing == NULL) { + dprintk(1, + KERN_WARNING + "%s: %s - default TV standard not supported by hardware. PAL will be used.\n", + ZR_DEVNAME(zr), __func__); + zr->norm = V4L2_STD_PAL; + zr->timing = zr->card.tvn[0]; + } + + if (default_input > zr->card.inputs-1) { + dprintk(1, + KERN_WARNING + "%s: default_input value %d out of range (0-%d)\n", + ZR_DEVNAME(zr), default_input, zr->card.inputs-1); + default_input = 0; + } + zr->input = default_input; + + /* default setup (will be repeated at every open) */ + zoran_open_init_params(zr); + + /* allocate memory *before* doing anything to the hardware + * in case allocation fails */ + zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL); + zr->video_dev = video_device_alloc(); + if (!zr->stat_com || !zr->video_dev) { + dprintk(1, + KERN_ERR + "%s: %s - kmalloc (STAT_COM) failed\n", + ZR_DEVNAME(zr), __func__); + err = -ENOMEM; + goto exit_free; + } + for (j = 0; j < BUZ_NUM_STAT_COM; j++) { + zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ + } + + /* + * Now add the template and register the device unit. + */ + *zr->video_dev = zoran_template; + zr->video_dev->v4l2_dev = &zr->v4l2_dev; + zr->video_dev->lock = &zr->lock; + strcpy(zr->video_dev->name, ZR_DEVNAME(zr)); + /* It's not a mem2mem device, but you can both capture and output from + one and the same device. This should really be split up into two + device nodes, but that's a job for another day. */ + zr->video_dev->vfl_dir = VFL_DIR_M2M; + err = video_register_device(zr->video_dev, VFL_TYPE_GRABBER, video_nr[zr->id]); + if (err < 0) + goto exit_free; + video_set_drvdata(zr->video_dev, zr); + + zoran_init_hardware(zr); + if (zr36067_debug > 2) + detect_guest_activity(zr); + test_interrupts(zr); + if (!pass_through) { + decoder_call(zr, video, s_stream, 0); + encoder_call(zr, video, s_routing, 2, 0, 0); + } + + zr->zoran_proc = NULL; + zr->initialized = 1; + return 0; + +exit_free: + kfree(zr->stat_com); + kfree(zr->video_dev); + return err; +} + +static void zoran_remove(struct pci_dev *pdev) +{ + struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev); + struct zoran *zr = to_zoran(v4l2_dev); + + if (!zr->initialized) + goto exit_free; + + /* unregister videocodec bus */ + if (zr->codec) { + struct videocodec_master *master = zr->codec->master_data; + + videocodec_detach(zr->codec); + kfree(master); + } + if (zr->vfe) { + struct videocodec_master *master = zr->vfe->master_data; + + videocodec_detach(zr->vfe); + kfree(master); + } + + /* unregister i2c bus */ + zoran_unregister_i2c(zr); + /* disable PCI bus-mastering */ + zoran_set_pci_master(zr, 0); + /* put chip into reset */ + btwrite(0, ZR36057_SPGPPCR); + free_irq(zr->pci_dev->irq, zr); + /* unmap and free memory */ + kfree(zr->stat_com); + zoran_proc_cleanup(zr); + iounmap(zr->zr36057_mem); + pci_disable_device(zr->pci_dev); + video_unregister_device(zr->video_dev); +exit_free: + v4l2_ctrl_handler_free(&zr->hdl); + v4l2_device_unregister(&zr->v4l2_dev); + kfree(zr); +} + +void +zoran_vdev_release (struct video_device *vdev) +{ + kfree(vdev); +} + +static struct videocodec_master *zoran_setup_videocodec(struct zoran *zr, + int type) +{ + struct videocodec_master *m = NULL; + + m = kmalloc(sizeof(struct videocodec_master), GFP_KERNEL); + if (!m) { + dprintk(1, KERN_ERR "%s: %s - no memory\n", + ZR_DEVNAME(zr), __func__); + return m; + } + + /* magic and type are unused for master struct. Makes sense only at + codec structs. + In the past, .type were initialized to the old V4L1 .hardware + value, as VID_HARDWARE_ZR36067 + */ + m->magic = 0L; + m->type = 0; + + m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER; + strlcpy(m->name, ZR_DEVNAME(zr), sizeof(m->name)); + m->data = zr; + + switch (type) + { + case CODEC_TYPE_ZR36060: + m->readreg = zr36060_read; + m->writereg = zr36060_write; + m->flags |= CODEC_FLAG_JPEG | CODEC_FLAG_VFE; + break; + case CODEC_TYPE_ZR36050: + m->readreg = zr36050_read; + m->writereg = zr36050_write; + m->flags |= CODEC_FLAG_JPEG; + break; + case CODEC_TYPE_ZR36016: + m->readreg = zr36016_read; + m->writereg = zr36016_write; + m->flags |= CODEC_FLAG_VFE; + break; + } + + return m; +} + +static void zoran_subdev_notify(struct v4l2_subdev *sd, unsigned int cmd, void *arg) +{ + struct zoran *zr = to_zoran(sd->v4l2_dev); + + /* Bt819 needs to reset its FIFO buffer using #FRST pin and + LML33 card uses GPIO(7) for that. */ + if (cmd == BT819_FIFO_RESET_LOW) + GPIO(zr, 7, 0); + else if (cmd == BT819_FIFO_RESET_HIGH) + GPIO(zr, 7, 1); +} + +/* + * Scan for a Buz card (actually for the PCI controller ZR36057), + * request the irq and map the io memory + */ +static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned char latency, need_latency; + struct zoran *zr; + int result; + struct videocodec_master *master_vfe = NULL; + struct videocodec_master *master_codec = NULL; + int card_num; + char *codec_name, *vfe_name; + unsigned int nr; + + + nr = zoran_num++; + if (nr >= BUZ_MAX) { + dprintk(1, KERN_ERR "%s: driver limited to %d card(s) maximum\n", + ZORAN_NAME, BUZ_MAX); + return -ENOENT; + } + + zr = kzalloc(sizeof(struct zoran), GFP_KERNEL); + if (!zr) { + dprintk(1, KERN_ERR "%s: %s - kzalloc failed\n", + ZORAN_NAME, __func__); + return -ENOMEM; + } + zr->v4l2_dev.notify = zoran_subdev_notify; + if (v4l2_device_register(&pdev->dev, &zr->v4l2_dev)) + goto zr_free_mem; + zr->pci_dev = pdev; + zr->id = nr; + snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id); + if (v4l2_ctrl_handler_init(&zr->hdl, 10)) + goto zr_unreg; + zr->v4l2_dev.ctrl_handler = &zr->hdl; + spin_lock_init(&zr->spinlock); + mutex_init(&zr->lock); + if (pci_enable_device(pdev)) + goto zr_unreg; + zr->revision = zr->pci_dev->revision; + + dprintk(1, + KERN_INFO + "%s: Zoran ZR360%c7 (rev %d), irq: %d, memory: 0x%08llx\n", + ZR_DEVNAME(zr), zr->revision < 2 ? '5' : '6', zr->revision, + zr->pci_dev->irq, (uint64_t)pci_resource_start(zr->pci_dev, 0)); + if (zr->revision >= 2) { + dprintk(1, + KERN_INFO + "%s: Subsystem vendor=0x%04x id=0x%04x\n", + ZR_DEVNAME(zr), zr->pci_dev->subsystem_vendor, + zr->pci_dev->subsystem_device); + } + + /* Use auto-detected card type? */ + if (card[nr] == -1) { + if (zr->revision < 2) { + dprintk(1, + KERN_ERR + "%s: No card type specified, please use the card=X module parameter\n", + ZR_DEVNAME(zr)); + dprintk(1, + KERN_ERR + "%s: It is not possible to auto-detect ZR36057 based cards\n", + ZR_DEVNAME(zr)); + goto zr_unreg; + } + + card_num = ent->driver_data; + if (card_num >= NUM_CARDS) { + dprintk(1, + KERN_ERR + "%s: Unknown card, try specifying card=X module parameter\n", + ZR_DEVNAME(zr)); + goto zr_unreg; + } + dprintk(3, + KERN_DEBUG + "%s: %s() - card %s detected\n", + ZR_DEVNAME(zr), __func__, zoran_cards[card_num].name); + } else { + card_num = card[nr]; + if (card_num >= NUM_CARDS || card_num < 0) { + dprintk(1, + KERN_ERR + "%s: User specified card type %d out of range (0 .. %d)\n", + ZR_DEVNAME(zr), card_num, NUM_CARDS - 1); + goto zr_unreg; + } + } + + /* even though we make this a non pointer and thus + * theoretically allow for making changes to this struct + * on a per-individual card basis at runtime, this is + * strongly discouraged. This structure is intended to + * keep general card information, no settings or anything */ + zr->card = zoran_cards[card_num]; + snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), + "%s[%u]", zr->card.name, zr->id); + + zr->zr36057_mem = pci_ioremap_bar(zr->pci_dev, 0); + if (!zr->zr36057_mem) { + dprintk(1, KERN_ERR "%s: %s() - ioremap failed\n", + ZR_DEVNAME(zr), __func__); + goto zr_unreg; + } + + result = request_irq(zr->pci_dev->irq, zoran_irq, + IRQF_SHARED, ZR_DEVNAME(zr), zr); + if (result < 0) { + if (result == -EINVAL) { + dprintk(1, + KERN_ERR + "%s: %s - bad irq number or handler\n", + ZR_DEVNAME(zr), __func__); + } else if (result == -EBUSY) { + dprintk(1, + KERN_ERR + "%s: %s - IRQ %d busy, change your PnP config in BIOS\n", + ZR_DEVNAME(zr), __func__, zr->pci_dev->irq); + } else { + dprintk(1, + KERN_ERR + "%s: %s - can't assign irq, error code %d\n", + ZR_DEVNAME(zr), __func__, result); + } + goto zr_unmap; + } + + /* set PCI latency timer */ + pci_read_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, + &latency); + need_latency = zr->revision > 1 ? 32 : 48; + if (latency != need_latency) { + dprintk(2, KERN_INFO "%s: Changing PCI latency from %d to %d\n", + ZR_DEVNAME(zr), latency, need_latency); + pci_write_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, + need_latency); + } + + zr36057_restart(zr); + /* i2c */ + dprintk(2, KERN_INFO "%s: Initializing i2c bus...\n", + ZR_DEVNAME(zr)); + + if (zoran_register_i2c(zr) < 0) { + dprintk(1, KERN_ERR "%s: %s - can't initialize i2c bus\n", + ZR_DEVNAME(zr), __func__); + goto zr_free_irq; + } + + zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, + &zr->i2c_adapter, zr->card.i2c_decoder, + 0, zr->card.addrs_decoder); + + if (zr->card.i2c_encoder) + zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, + &zr->i2c_adapter, zr->card.i2c_encoder, + 0, zr->card.addrs_encoder); + + dprintk(2, + KERN_INFO "%s: Initializing videocodec bus...\n", + ZR_DEVNAME(zr)); + + if (zr->card.video_codec) { + codec_name = codecid_to_modulename(zr->card.video_codec); + if (codec_name) { + result = request_module(codec_name); + if (result) { + dprintk(1, + KERN_ERR + "%s: failed to load modules %s: %d\n", + ZR_DEVNAME(zr), codec_name, result); + } + } + } + if (zr->card.video_vfe) { + vfe_name = codecid_to_modulename(zr->card.video_vfe); + if (vfe_name) { + result = request_module(vfe_name); + if (result < 0) { + dprintk(1, + KERN_ERR + "%s: failed to load modules %s: %d\n", + ZR_DEVNAME(zr), vfe_name, result); + } + } + } + + /* reset JPEG codec */ + jpeg_codec_sleep(zr, 1); + jpeg_codec_reset(zr); + /* video bus enabled */ + /* display codec revision */ + if (zr->card.video_codec != 0) { + master_codec = zoran_setup_videocodec(zr, zr->card.video_codec); + if (!master_codec) + goto zr_unreg_i2c; + zr->codec = videocodec_attach(master_codec); + if (!zr->codec) { + dprintk(1, KERN_ERR "%s: %s - no codec found\n", + ZR_DEVNAME(zr), __func__); + goto zr_free_codec; + } + if (zr->codec->type != zr->card.video_codec) { + dprintk(1, KERN_ERR "%s: %s - wrong codec\n", + ZR_DEVNAME(zr), __func__); + goto zr_detach_codec; + } + } + if (zr->card.video_vfe != 0) { + master_vfe = zoran_setup_videocodec(zr, zr->card.video_vfe); + if (!master_vfe) + goto zr_detach_codec; + zr->vfe = videocodec_attach(master_vfe); + if (!zr->vfe) { + dprintk(1, KERN_ERR "%s: %s - no VFE found\n", + ZR_DEVNAME(zr), __func__); + goto zr_free_vfe; + } + if (zr->vfe->type != zr->card.video_vfe) { + dprintk(1, KERN_ERR "%s: %s = wrong VFE\n", + ZR_DEVNAME(zr), __func__); + goto zr_detach_vfe; + } + } + + /* take care of Natoma chipset and a revision 1 zr36057 */ + if ((pci_pci_problems & PCIPCI_NATOMA) && zr->revision <= 1) { + zr->jpg_buffers.need_contiguous = 1; + dprintk(1, KERN_INFO + "%s: ZR36057/Natoma bug, max. buffer size is 128K\n", + ZR_DEVNAME(zr)); + } + + if (zr36057_init(zr) < 0) + goto zr_detach_vfe; + + zoran_proc_init(zr); + + return 0; + +zr_detach_vfe: + videocodec_detach(zr->vfe); +zr_free_vfe: + kfree(master_vfe); +zr_detach_codec: + videocodec_detach(zr->codec); +zr_free_codec: + kfree(master_codec); +zr_unreg_i2c: + zoran_unregister_i2c(zr); +zr_free_irq: + btwrite(0, ZR36057_SPGPPCR); + free_irq(zr->pci_dev->irq, zr); +zr_unmap: + iounmap(zr->zr36057_mem); +zr_unreg: + v4l2_ctrl_handler_free(&zr->hdl); + v4l2_device_unregister(&zr->v4l2_dev); +zr_free_mem: + kfree(zr); + + return -ENODEV; +} + +static struct pci_driver zoran_driver = { + .name = "zr36067", + .id_table = zr36067_pci_tbl, + .probe = zoran_probe, + .remove = zoran_remove, +}; + +static int __init zoran_init(void) +{ + int res; + + printk(KERN_INFO "Zoran MJPEG board driver version %s\n", + ZORAN_VERSION); + + /* check the parameters we have been given, adjust if necessary */ + if (v4l_nbufs < 2) + v4l_nbufs = 2; + if (v4l_nbufs > VIDEO_MAX_FRAME) + v4l_nbufs = VIDEO_MAX_FRAME; + /* The user specfies the in KB, we want them in byte + * (and page aligned) */ + v4l_bufsize = PAGE_ALIGN(v4l_bufsize * 1024); + if (v4l_bufsize < 32768) + v4l_bufsize = 32768; + /* 2 MB is arbitrary but sufficient for the maximum possible images */ + if (v4l_bufsize > 2048 * 1024) + v4l_bufsize = 2048 * 1024; + if (jpg_nbufs < 4) + jpg_nbufs = 4; + if (jpg_nbufs > BUZ_MAX_FRAME) + jpg_nbufs = BUZ_MAX_FRAME; + jpg_bufsize = PAGE_ALIGN(jpg_bufsize * 1024); + if (jpg_bufsize < 8192) + jpg_bufsize = 8192; + if (jpg_bufsize > (512 * 1024)) + jpg_bufsize = 512 * 1024; + /* Use parameter for vidmem or try to find a video card */ + if (vidmem) { + dprintk(1, + KERN_INFO + "%s: Using supplied video memory base address @ 0x%lx\n", + ZORAN_NAME, vidmem); + } + + /* some mainboards might not do PCI-PCI data transfer well */ + if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL|PCIPCI_ALIMAGIK)) { + dprintk(1, + KERN_WARNING + "%s: chipset does not support reliable PCI-PCI DMA\n", + ZORAN_NAME); + } + + res = pci_register_driver(&zoran_driver); + if (res) { + dprintk(1, + KERN_ERR + "%s: Unable to register ZR36057 driver\n", + ZORAN_NAME); + return res; + } + + return 0; +} + +static void __exit zoran_exit(void) +{ + pci_unregister_driver(&zoran_driver); +} + +module_init(zoran_init); +module_exit(zoran_exit); diff --git a/drivers/staging/media/zoran/zoran_card.h b/drivers/staging/media/zoran/zoran_card.h new file mode 100644 index 000000000000..0cdb7d34926d --- /dev/null +++ b/drivers/staging/media/zoran/zoran_card.h @@ -0,0 +1,50 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * This part handles card-specific data and detection + * + * Copyright (C) 2000 Serguei Miridonov + * + * Currently maintained by: + * Ronald Bultje + * Laurent Pinchart + * Mailinglist + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ZORAN_CARD_H__ +#define __ZORAN_CARD_H__ + +extern int zr36067_debug; + +#define dprintk(num, format, args...) \ + do { \ + if (zr36067_debug >= num) \ + printk(format, ##args); \ + } while (0) + +/* Anybody who uses more than four? */ +#define BUZ_MAX 4 + +extern const struct video_device zoran_template; + +extern int zoran_check_jpg_settings(struct zoran *zr, + struct zoran_jpg_settings *settings, + int try); +extern void zoran_open_init_params(struct zoran *zr); +extern void zoran_vdev_release(struct video_device *vdev); + +void zr36016_write(struct videocodec *codec, u16 reg, u32 val); + +#endif /* __ZORAN_CARD_H__ */ diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c new file mode 100644 index 000000000000..40adceebca7e --- /dev/null +++ b/drivers/staging/media/zoran/zoran_device.c @@ -0,0 +1,1619 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * This part handles device access (PCI/I2C/codec/...) + * + * Copyright (C) 2000 Serguei Miridonov + * + * Currently maintained by: + * Ronald Bultje + * Laurent Pinchart + * Mailinglist + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "videocodec.h" +#include "zoran.h" +#include "zoran_device.h" +#include "zoran_card.h" + +#define IRQ_MASK ( ZR36057_ISR_GIRQ0 | \ + ZR36057_ISR_GIRQ1 | \ + ZR36057_ISR_JPEGRepIRQ ) + +static bool lml33dpath; /* default = 0 + * 1 will use digital path in capture + * mode instead of analog. It can be + * used for picture adjustments using + * tool like xawtv while watching image + * on TV monitor connected to the output. + * However, due to absence of 75 Ohm + * load on Bt819 input, there will be + * some image imperfections */ + +module_param(lml33dpath, bool, 0644); +MODULE_PARM_DESC(lml33dpath, + "Use digital path capture mode (on LML33 cards)"); + +static void +zr36057_init_vfe (struct zoran *zr); + +/* + * General Purpose I/O and Guest bus access + */ + +/* + * This is a bit tricky. When a board lacks a GPIO function, the corresponding + * GPIO bit number in the card_info structure is set to 0. + */ + +void +GPIO (struct zoran *zr, + int bit, + unsigned int value) +{ + u32 reg; + u32 mask; + + /* Make sure the bit number is legal + * A bit number of -1 (lacking) gives a mask of 0, + * making it harmless */ + mask = (1 << (24 + bit)) & 0xff000000; + reg = btread(ZR36057_GPPGCR1) & ~mask; + if (value) { + reg |= mask; + } + btwrite(reg, ZR36057_GPPGCR1); + udelay(1); +} + +/* + * Wait til post office is no longer busy + */ + +int +post_office_wait (struct zoran *zr) +{ + u32 por; + +// while (((por = btread(ZR36057_POR)) & (ZR36057_POR_POPen | ZR36057_POR_POTime)) == ZR36057_POR_POPen) { + while ((por = btread(ZR36057_POR)) & ZR36057_POR_POPen) { + /* wait for something to happen */ + } + if ((por & ZR36057_POR_POTime) && !zr->card.gws_not_connected) { + /* In LML33/BUZ \GWS line is not connected, so it has always timeout set */ + dprintk(1, KERN_INFO "%s: pop timeout %08x\n", ZR_DEVNAME(zr), + por); + return -1; + } + + return 0; +} + +int +post_office_write (struct zoran *zr, + unsigned int guest, + unsigned int reg, + unsigned int value) +{ + u32 por; + + por = + ZR36057_POR_PODir | ZR36057_POR_POTime | ((guest & 7) << 20) | + ((reg & 7) << 16) | (value & 0xFF); + btwrite(por, ZR36057_POR); + + return post_office_wait(zr); +} + +int +post_office_read (struct zoran *zr, + unsigned int guest, + unsigned int reg) +{ + u32 por; + + por = ZR36057_POR_POTime | ((guest & 7) << 20) | ((reg & 7) << 16); + btwrite(por, ZR36057_POR); + if (post_office_wait(zr) < 0) { + return -1; + } + + return btread(ZR36057_POR) & 0xFF; +} + +/* + * detect guests + */ + +static void +dump_guests (struct zoran *zr) +{ + if (zr36067_debug > 2) { + int i, guest[8]; + + for (i = 1; i < 8; i++) { // Don't read jpeg codec here + guest[i] = post_office_read(zr, i, 0); + } + + printk(KERN_INFO "%s: Guests: %*ph\n", + ZR_DEVNAME(zr), 8, guest); + } +} + +void +detect_guest_activity (struct zoran *zr) +{ + int timeout, i, j, res, guest[8], guest0[8], change[8][3]; + ktime_t t0, t1; + + dump_guests(zr); + printk(KERN_INFO "%s: Detecting guests activity, please wait...\n", + ZR_DEVNAME(zr)); + for (i = 1; i < 8; i++) { // Don't read jpeg codec here + guest0[i] = guest[i] = post_office_read(zr, i, 0); + } + + timeout = 0; + j = 0; + t0 = ktime_get(); + while (timeout < 10000) { + udelay(10); + timeout++; + for (i = 1; (i < 8) && (j < 8); i++) { + res = post_office_read(zr, i, 0); + if (res != guest[i]) { + t1 = ktime_get(); + change[j][0] = ktime_to_us(ktime_sub(t1, t0)); + t0 = t1; + change[j][1] = i; + change[j][2] = res; + j++; + guest[i] = res; + } + } + if (j >= 8) + break; + } + + printk(KERN_INFO "%s: Guests: %*ph\n", ZR_DEVNAME(zr), 8, guest0); + + if (j == 0) { + printk(KERN_INFO "%s: No activity detected.\n", ZR_DEVNAME(zr)); + return; + } + for (i = 0; i < j; i++) { + printk(KERN_INFO "%s: %6d: %d => 0x%02x\n", ZR_DEVNAME(zr), + change[i][0], change[i][1], change[i][2]); + } +} + +/* + * JPEG Codec access + */ + +void +jpeg_codec_sleep (struct zoran *zr, + int sleep) +{ + GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_SLEEP], !sleep); + if (!sleep) { + dprintk(3, + KERN_DEBUG + "%s: jpeg_codec_sleep() - wake GPIO=0x%08x\n", + ZR_DEVNAME(zr), btread(ZR36057_GPPGCR1)); + udelay(500); + } else { + dprintk(3, + KERN_DEBUG + "%s: jpeg_codec_sleep() - sleep GPIO=0x%08x\n", + ZR_DEVNAME(zr), btread(ZR36057_GPPGCR1)); + udelay(2); + } +} + +int +jpeg_codec_reset (struct zoran *zr) +{ + /* Take the codec out of sleep */ + jpeg_codec_sleep(zr, 0); + + if (zr->card.gpcs[GPCS_JPEG_RESET] != 0xff) { + post_office_write(zr, zr->card.gpcs[GPCS_JPEG_RESET], 0, + 0); + udelay(2); + } else { + GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 0); + udelay(2); + GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 1); + udelay(2); + } + + return 0; +} + +/* + * Set the registers for the size we have specified. Don't bother + * trying to understand this without the ZR36057 manual in front of + * you [AC]. + * + * PS: The manual is free for download in .pdf format from + * www.zoran.com - nicely done those folks. + */ + +static void +zr36057_adjust_vfe (struct zoran *zr, + enum zoran_codec_mode mode) +{ + u32 reg; + + switch (mode) { + case BUZ_MODE_MOTION_DECOMPRESS: + btand(~ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR); + reg = btread(ZR36057_VFEHCR); + if ((reg & (1 << 10)) && zr->card.type != LML33R10) { + reg += ((1 << 10) | 1); + } + btwrite(reg, ZR36057_VFEHCR); + break; + case BUZ_MODE_MOTION_COMPRESS: + case BUZ_MODE_IDLE: + default: + if ((zr->norm & V4L2_STD_NTSC) || + (zr->card.type == LML33R10 && + (zr->norm & V4L2_STD_PAL))) + btand(~ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR); + else + btor(ZR36057_VFESPFR_ExtFl, ZR36057_VFESPFR); + reg = btread(ZR36057_VFEHCR); + if (!(reg & (1 << 10)) && zr->card.type != LML33R10) { + reg -= ((1 << 10) | 1); + } + btwrite(reg, ZR36057_VFEHCR); + break; + } +} + +/* + * set geometry + */ + +static void +zr36057_set_vfe (struct zoran *zr, + int video_width, + int video_height, + const struct zoran_format *format) +{ + struct tvnorm *tvn; + unsigned HStart, HEnd, VStart, VEnd; + unsigned DispMode; + unsigned VidWinWid, VidWinHt; + unsigned hcrop1, hcrop2, vcrop1, vcrop2; + unsigned Wa, We, Ha, He; + unsigned X, Y, HorDcm, VerDcm; + u32 reg; + unsigned mask_line_size; + + tvn = zr->timing; + + Wa = tvn->Wa; + Ha = tvn->Ha; + + dprintk(2, KERN_INFO "%s: set_vfe() - width = %d, height = %d\n", + ZR_DEVNAME(zr), video_width, video_height); + + if (video_width < BUZ_MIN_WIDTH || + video_height < BUZ_MIN_HEIGHT || + video_width > Wa || video_height > Ha) { + dprintk(1, KERN_ERR "%s: set_vfe: w=%d h=%d not valid\n", + ZR_DEVNAME(zr), video_width, video_height); + return; + } + + /**** zr36057 ****/ + + /* horizontal */ + VidWinWid = video_width; + X = DIV_ROUND_UP(VidWinWid * 64, tvn->Wa); + We = (VidWinWid * 64) / X; + HorDcm = 64 - X; + hcrop1 = 2 * ((tvn->Wa - We) / 4); + hcrop2 = tvn->Wa - We - hcrop1; + HStart = tvn->HStart ? tvn->HStart : 1; + /* (Ronald) Original comment: + * "| 1 Doesn't have any effect, tested on both a DC10 and a DC10+" + * this is false. It inverses chroma values on the LML33R10 (so Cr + * suddenly is shown as Cb and reverse, really cool effect if you + * want to see blue faces, not useful otherwise). So don't use |1. + * However, the DC10 has '0' as HStart, but does need |1, so we + * use a dirty check... + */ + HEnd = HStart + tvn->Wa - 1; + HStart += hcrop1; + HEnd -= hcrop2; + reg = ((HStart & ZR36057_VFEHCR_Hmask) << ZR36057_VFEHCR_HStart) + | ((HEnd & ZR36057_VFEHCR_Hmask) << ZR36057_VFEHCR_HEnd); + if (zr->card.vfe_pol.hsync_pol) + reg |= ZR36057_VFEHCR_HSPol; + btwrite(reg, ZR36057_VFEHCR); + + /* Vertical */ + DispMode = !(video_height > BUZ_MAX_HEIGHT / 2); + VidWinHt = DispMode ? video_height : video_height / 2; + Y = DIV_ROUND_UP(VidWinHt * 64 * 2, tvn->Ha); + He = (VidWinHt * 64) / Y; + VerDcm = 64 - Y; + vcrop1 = (tvn->Ha / 2 - He) / 2; + vcrop2 = tvn->Ha / 2 - He - vcrop1; + VStart = tvn->VStart; + VEnd = VStart + tvn->Ha / 2; // - 1; FIXME SnapShot times out with -1 in 768*576 on the DC10 - LP + VStart += vcrop1; + VEnd -= vcrop2; + reg = ((VStart & ZR36057_VFEVCR_Vmask) << ZR36057_VFEVCR_VStart) + | ((VEnd & ZR36057_VFEVCR_Vmask) << ZR36057_VFEVCR_VEnd); + if (zr->card.vfe_pol.vsync_pol) + reg |= ZR36057_VFEVCR_VSPol; + btwrite(reg, ZR36057_VFEVCR); + + /* scaler and pixel format */ + reg = 0; + reg |= (HorDcm << ZR36057_VFESPFR_HorDcm); + reg |= (VerDcm << ZR36057_VFESPFR_VerDcm); + reg |= (DispMode << ZR36057_VFESPFR_DispMode); + /* RJ: I don't know, why the following has to be the opposite + * of the corresponding ZR36060 setting, but only this way + * we get the correct colors when uncompressing to the screen */ + //reg |= ZR36057_VFESPFR_VCLKPol; /**/ + /* RJ: Don't know if that is needed for NTSC also */ + if (!(zr->norm & V4L2_STD_NTSC)) + reg |= ZR36057_VFESPFR_ExtFl; // NEEDED!!!!!!! Wolfgang + reg |= ZR36057_VFESPFR_TopField; + if (HorDcm >= 48) { + reg |= 3 << ZR36057_VFESPFR_HFilter; /* 5 tap filter */ + } else if (HorDcm >= 32) { + reg |= 2 << ZR36057_VFESPFR_HFilter; /* 4 tap filter */ + } else if (HorDcm >= 16) { + reg |= 1 << ZR36057_VFESPFR_HFilter; /* 3 tap filter */ + } + reg |= format->vfespfr; + btwrite(reg, ZR36057_VFESPFR); + + /* display configuration */ + reg = (16 << ZR36057_VDCR_MinPix) + | (VidWinHt << ZR36057_VDCR_VidWinHt) + | (VidWinWid << ZR36057_VDCR_VidWinWid); + if (pci_pci_problems & PCIPCI_TRITON) + // || zr->revision < 1) // Revision 1 has also Triton support + reg &= ~ZR36057_VDCR_Triton; + else + reg |= ZR36057_VDCR_Triton; + btwrite(reg, ZR36057_VDCR); + + /* (Ronald) don't write this if overlay_mask = NULL */ + if (zr->overlay_mask) { + /* Write overlay clipping mask data, but don't enable overlay clipping */ + /* RJ: since this makes only sense on the screen, we use + * zr->overlay_settings.width instead of video_width */ + + mask_line_size = (BUZ_MAX_WIDTH + 31) / 32; + reg = virt_to_bus(zr->overlay_mask); + btwrite(reg, ZR36057_MMTR); + reg = virt_to_bus(zr->overlay_mask + mask_line_size); + btwrite(reg, ZR36057_MMBR); + reg = + mask_line_size - (zr->overlay_settings.width + + 31) / 32; + if (DispMode == 0) + reg += mask_line_size; + reg <<= ZR36057_OCR_MaskStride; + btwrite(reg, ZR36057_OCR); + } + + zr36057_adjust_vfe(zr, zr->codec_mode); +} + +/* + * Switch overlay on or off + */ + +void +zr36057_overlay (struct zoran *zr, + int on) +{ + u32 reg; + + if (on) { + /* do the necessary settings ... */ + btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); /* switch it off first */ + + zr36057_set_vfe(zr, + zr->overlay_settings.width, + zr->overlay_settings.height, + zr->overlay_settings.format); + + /* Start and length of each line MUST be 4-byte aligned. + * This should be already checked before the call to this routine. + * All error messages are internal driver checking only! */ + + /* video display top and bottom registers */ + reg = (long) zr->vbuf_base + + zr->overlay_settings.x * + ((zr->overlay_settings.format->depth + 7) / 8) + + zr->overlay_settings.y * + zr->vbuf_bytesperline; + btwrite(reg, ZR36057_VDTR); + if (reg & 3) + dprintk(1, + KERN_ERR + "%s: zr36057_overlay() - video_address not aligned\n", + ZR_DEVNAME(zr)); + if (zr->overlay_settings.height > BUZ_MAX_HEIGHT / 2) + reg += zr->vbuf_bytesperline; + btwrite(reg, ZR36057_VDBR); + + /* video stride, status, and frame grab register */ + reg = zr->vbuf_bytesperline - + zr->overlay_settings.width * + ((zr->overlay_settings.format->depth + 7) / 8); + if (zr->overlay_settings.height > BUZ_MAX_HEIGHT / 2) + reg += zr->vbuf_bytesperline; + if (reg & 3) + dprintk(1, + KERN_ERR + "%s: zr36057_overlay() - video_stride not aligned\n", + ZR_DEVNAME(zr)); + reg = (reg << ZR36057_VSSFGR_DispStride); + reg |= ZR36057_VSSFGR_VidOvf; /* clear overflow status */ + btwrite(reg, ZR36057_VSSFGR); + + /* Set overlay clipping */ + if (zr->overlay_settings.clipcount > 0) + btor(ZR36057_OCR_OvlEnable, ZR36057_OCR); + + /* ... and switch it on */ + btor(ZR36057_VDCR_VidEn, ZR36057_VDCR); + } else { + /* Switch it off */ + btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); + } +} + +/* + * The overlay mask has one bit for each pixel on a scan line, + * and the maximum window size is BUZ_MAX_WIDTH * BUZ_MAX_HEIGHT pixels. + */ + +void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count) +{ + struct zoran *zr = fh->zr; + unsigned mask_line_size = (BUZ_MAX_WIDTH + 31) / 32; + u32 *mask; + int x, y, width, height; + unsigned i, j, k; + + /* fill mask with one bits */ + memset(fh->overlay_mask, ~0, mask_line_size * 4 * BUZ_MAX_HEIGHT); + + for (i = 0; i < count; ++i) { + /* pick up local copy of clip */ + x = vp[i].c.left; + y = vp[i].c.top; + width = vp[i].c.width; + height = vp[i].c.height; + + /* trim clips that extend beyond the window */ + if (x < 0) { + width += x; + x = 0; + } + if (y < 0) { + height += y; + y = 0; + } + if (x + width > fh->overlay_settings.width) { + width = fh->overlay_settings.width - x; + } + if (y + height > fh->overlay_settings.height) { + height = fh->overlay_settings.height - y; + } + + /* ignore degenerate clips */ + if (height <= 0) { + continue; + } + if (width <= 0) { + continue; + } + + /* apply clip for each scan line */ + for (j = 0; j < height; ++j) { + /* reset bit for each pixel */ + /* this can be optimized later if need be */ + mask = fh->overlay_mask + (y + j) * mask_line_size; + for (k = 0; k < width; ++k) { + mask[(x + k) / 32] &= + ~((u32) 1 << (x + k) % 32); + } + } + } +} + +/* Enable/Disable uncompressed memory grabbing of the 36057 */ + +void +zr36057_set_memgrab (struct zoran *zr, + int mode) +{ + if (mode) { + /* We only check SnapShot and not FrameGrab here. SnapShot==1 + * means a capture is already in progress, but FrameGrab==1 + * doesn't necessary mean that. It's more correct to say a 1 + * to 0 transition indicates a capture completed. If a + * capture is pending when capturing is tuned off, FrameGrab + * will be stuck at 1 until capturing is turned back on. + */ + if (btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SnapShot) + dprintk(1, + KERN_WARNING + "%s: zr36057_set_memgrab(1) with SnapShot on!?\n", + ZR_DEVNAME(zr)); + + /* switch on VSync interrupts */ + btwrite(IRQ_MASK, ZR36057_ISR); // Clear Interrupts + btor(zr->card.vsync_int, ZR36057_ICR); // SW + + /* enable SnapShot */ + btor(ZR36057_VSSFGR_SnapShot, ZR36057_VSSFGR); + + /* Set zr36057 video front end and enable video */ + zr36057_set_vfe(zr, zr->v4l_settings.width, + zr->v4l_settings.height, + zr->v4l_settings.format); + + zr->v4l_memgrab_active = 1; + } else { + /* switch off VSync interrupts */ + btand(~zr->card.vsync_int, ZR36057_ICR); // SW + + zr->v4l_memgrab_active = 0; + zr->v4l_grab_frame = NO_GRAB_ACTIVE; + + /* reenable grabbing to screen if it was running */ + if (zr->v4l_overlay_active) { + zr36057_overlay(zr, 1); + } else { + btand(~ZR36057_VDCR_VidEn, ZR36057_VDCR); + btand(~ZR36057_VSSFGR_SnapShot, ZR36057_VSSFGR); + } + } +} + +int +wait_grab_pending (struct zoran *zr) +{ + unsigned long flags; + + /* wait until all pending grabs are finished */ + + if (!zr->v4l_memgrab_active) + return 0; + + wait_event_interruptible(zr->v4l_capq, + (zr->v4l_pend_tail == zr->v4l_pend_head)); + if (signal_pending(current)) + return -ERESTARTSYS; + + spin_lock_irqsave(&zr->spinlock, flags); + zr36057_set_memgrab(zr, 0); + spin_unlock_irqrestore(&zr->spinlock, flags); + + return 0; +} + +/***************************************************************************** + * * + * Set up the Buz-specific MJPEG part * + * * + *****************************************************************************/ + +static inline void +set_frame (struct zoran *zr, + int val) +{ + GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_FRAME], val); +} + +static void +set_videobus_dir (struct zoran *zr, + int val) +{ + switch (zr->card.type) { + case LML33: + case LML33R10: + if (!lml33dpath) + GPIO(zr, 5, val); + else + GPIO(zr, 5, 1); + break; + default: + GPIO(zr, zr->card.gpio[ZR_GPIO_VID_DIR], + zr->card.gpio_pol[ZR_GPIO_VID_DIR] ? !val : val); + break; + } +} + +static void +init_jpeg_queue (struct zoran *zr) +{ + int i; + + /* re-initialize DMA ring stuff */ + zr->jpg_que_head = 0; + zr->jpg_dma_head = 0; + zr->jpg_dma_tail = 0; + zr->jpg_que_tail = 0; + zr->jpg_seq_num = 0; + zr->JPEG_error = 0; + zr->num_errors = 0; + zr->jpg_err_seq = 0; + zr->jpg_err_shift = 0; + zr->jpg_queued_num = 0; + for (i = 0; i < zr->jpg_buffers.num_buffers; i++) { + zr->jpg_buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ + } + for (i = 0; i < BUZ_NUM_STAT_COM; i++) { + zr->stat_com[i] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ + } +} + +static void +zr36057_set_jpg (struct zoran *zr, + enum zoran_codec_mode mode) +{ + struct tvnorm *tvn; + u32 reg; + + tvn = zr->timing; + + /* assert P_Reset, disable code transfer, deassert Active */ + btwrite(0, ZR36057_JPC); + + /* MJPEG compression mode */ + switch (mode) { + + case BUZ_MODE_MOTION_COMPRESS: + default: + reg = ZR36057_JMC_MJPGCmpMode; + break; + + case BUZ_MODE_MOTION_DECOMPRESS: + reg = ZR36057_JMC_MJPGExpMode; + reg |= ZR36057_JMC_SyncMstr; + /* RJ: The following is experimental - improves the output to screen */ + //if(zr->jpg_settings.VFIFO_FB) reg |= ZR36057_JMC_VFIFO_FB; // No, it doesn't. SM + break; + + case BUZ_MODE_STILL_COMPRESS: + reg = ZR36057_JMC_JPGCmpMode; + break; + + case BUZ_MODE_STILL_DECOMPRESS: + reg = ZR36057_JMC_JPGExpMode; + break; + + } + reg |= ZR36057_JMC_JPG; + if (zr->jpg_settings.field_per_buff == 1) + reg |= ZR36057_JMC_Fld_per_buff; + btwrite(reg, ZR36057_JMC); + + /* vertical */ + btor(ZR36057_VFEVCR_VSPol, ZR36057_VFEVCR); + reg = (6 << ZR36057_VSP_VsyncSize) | + (tvn->Ht << ZR36057_VSP_FrmTot); + btwrite(reg, ZR36057_VSP); + reg = ((zr->jpg_settings.img_y + tvn->VStart) << ZR36057_FVAP_NAY) | + (zr->jpg_settings.img_height << ZR36057_FVAP_PAY); + btwrite(reg, ZR36057_FVAP); + + /* horizontal */ + if (zr->card.vfe_pol.hsync_pol) + btor(ZR36057_VFEHCR_HSPol, ZR36057_VFEHCR); + else + btand(~ZR36057_VFEHCR_HSPol, ZR36057_VFEHCR); + reg = ((tvn->HSyncStart) << ZR36057_HSP_HsyncStart) | + (tvn->Wt << ZR36057_HSP_LineTot); + btwrite(reg, ZR36057_HSP); + reg = ((zr->jpg_settings.img_x + + tvn->HStart + 4) << ZR36057_FHAP_NAX) | + (zr->jpg_settings.img_width << ZR36057_FHAP_PAX); + btwrite(reg, ZR36057_FHAP); + + /* field process parameters */ + if (zr->jpg_settings.odd_even) + reg = ZR36057_FPP_Odd_Even; + else + reg = 0; + + btwrite(reg, ZR36057_FPP); + + /* Set proper VCLK Polarity, else colors will be wrong during playback */ + //btor(ZR36057_VFESPFR_VCLKPol, ZR36057_VFESPFR); + + /* code base address */ + reg = virt_to_bus(zr->stat_com); + btwrite(reg, ZR36057_JCBA); + + /* FIFO threshold (FIFO is 160. double words) */ + /* NOTE: decimal values here */ + switch (mode) { + + case BUZ_MODE_STILL_COMPRESS: + case BUZ_MODE_MOTION_COMPRESS: + if (zr->card.type != BUZ) + reg = 140; + else + reg = 60; + break; + + case BUZ_MODE_STILL_DECOMPRESS: + case BUZ_MODE_MOTION_DECOMPRESS: + reg = 20; + break; + + default: + reg = 80; + break; + + } + btwrite(reg, ZR36057_JCFT); + zr36057_adjust_vfe(zr, mode); + +} + +void +print_interrupts (struct zoran *zr) +{ + int res, noerr = 0; + + printk(KERN_INFO "%s: interrupts received:", ZR_DEVNAME(zr)); + if ((res = zr->field_counter) < -1 || res > 1) { + printk(KERN_CONT " FD:%d", res); + } + if ((res = zr->intr_counter_GIRQ1) != 0) { + printk(KERN_CONT " GIRQ1:%d", res); + noerr++; + } + if ((res = zr->intr_counter_GIRQ0) != 0) { + printk(KERN_CONT " GIRQ0:%d", res); + noerr++; + } + if ((res = zr->intr_counter_CodRepIRQ) != 0) { + printk(KERN_CONT " CodRepIRQ:%d", res); + noerr++; + } + if ((res = zr->intr_counter_JPEGRepIRQ) != 0) { + printk(KERN_CONT " JPEGRepIRQ:%d", res); + noerr++; + } + if (zr->JPEG_max_missed) { + printk(KERN_CONT " JPEG delays: max=%d min=%d", zr->JPEG_max_missed, + zr->JPEG_min_missed); + } + if (zr->END_event_missed) { + printk(KERN_CONT " ENDs missed: %d", zr->END_event_missed); + } + //if (zr->jpg_queued_num) { + printk(KERN_CONT " queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail, + zr->jpg_dma_tail, zr->jpg_dma_head, zr->jpg_que_head); + //} + if (!noerr) { + printk(KERN_CONT ": no interrupts detected."); + } + printk(KERN_CONT "\n"); +} + +void +clear_interrupt_counters (struct zoran *zr) +{ + zr->intr_counter_GIRQ1 = 0; + zr->intr_counter_GIRQ0 = 0; + zr->intr_counter_CodRepIRQ = 0; + zr->intr_counter_JPEGRepIRQ = 0; + zr->field_counter = 0; + zr->IRQ1_in = 0; + zr->IRQ1_out = 0; + zr->JPEG_in = 0; + zr->JPEG_out = 0; + zr->JPEG_0 = 0; + zr->JPEG_1 = 0; + zr->END_event_missed = 0; + zr->JPEG_missed = 0; + zr->JPEG_max_missed = 0; + zr->JPEG_min_missed = 0x7fffffff; +} + +static u32 +count_reset_interrupt (struct zoran *zr) +{ + u32 isr; + + if ((isr = btread(ZR36057_ISR) & 0x78000000)) { + if (isr & ZR36057_ISR_GIRQ1) { + btwrite(ZR36057_ISR_GIRQ1, ZR36057_ISR); + zr->intr_counter_GIRQ1++; + } + if (isr & ZR36057_ISR_GIRQ0) { + btwrite(ZR36057_ISR_GIRQ0, ZR36057_ISR); + zr->intr_counter_GIRQ0++; + } + if (isr & ZR36057_ISR_CodRepIRQ) { + btwrite(ZR36057_ISR_CodRepIRQ, ZR36057_ISR); + zr->intr_counter_CodRepIRQ++; + } + if (isr & ZR36057_ISR_JPEGRepIRQ) { + btwrite(ZR36057_ISR_JPEGRepIRQ, ZR36057_ISR); + zr->intr_counter_JPEGRepIRQ++; + } + } + return isr; +} + +void +jpeg_start (struct zoran *zr) +{ + int reg; + + zr->frame_num = 0; + + /* deassert P_reset, disable code transfer, deassert Active */ + btwrite(ZR36057_JPC_P_Reset, ZR36057_JPC); + /* stop flushing the internal code buffer */ + btand(~ZR36057_MCTCR_CFlush, ZR36057_MCTCR); + /* enable code transfer */ + btor(ZR36057_JPC_CodTrnsEn, ZR36057_JPC); + + /* clear IRQs */ + btwrite(IRQ_MASK, ZR36057_ISR); + /* enable the JPEG IRQs */ + btwrite(zr->card.jpeg_int | + ZR36057_ICR_JPEGRepIRQ | + ZR36057_ICR_IntPinEn, + ZR36057_ICR); + + set_frame(zr, 0); // \FRAME + + /* set the JPEG codec guest ID */ + reg = (zr->card.gpcs[1] << ZR36057_JCGI_JPEGuestID) | + (0 << ZR36057_JCGI_JPEGuestReg); + btwrite(reg, ZR36057_JCGI); + + if (zr->card.video_vfe == CODEC_TYPE_ZR36016 && + zr->card.video_codec == CODEC_TYPE_ZR36050) { + /* Enable processing on the ZR36016 */ + if (zr->vfe) + zr36016_write(zr->vfe, 0, 1); + + /* load the address of the GO register in the ZR36050 latch */ + post_office_write(zr, 0, 0, 0); + } + + /* assert Active */ + btor(ZR36057_JPC_Active, ZR36057_JPC); + + /* enable the Go generation */ + btor(ZR36057_JMC_Go_en, ZR36057_JMC); + udelay(30); + + set_frame(zr, 1); // /FRAME + + dprintk(3, KERN_DEBUG "%s: jpeg_start\n", ZR_DEVNAME(zr)); +} + +void +zr36057_enable_jpg (struct zoran *zr, + enum zoran_codec_mode mode) +{ + struct vfe_settings cap; + int field_size = + zr->jpg_buffers.buffer_size / zr->jpg_settings.field_per_buff; + + zr->codec_mode = mode; + + cap.x = zr->jpg_settings.img_x; + cap.y = zr->jpg_settings.img_y; + cap.width = zr->jpg_settings.img_width; + cap.height = zr->jpg_settings.img_height; + cap.decimation = + zr->jpg_settings.HorDcm | (zr->jpg_settings.VerDcm << 8); + cap.quality = zr->jpg_settings.jpg_comp.quality; + + switch (mode) { + + case BUZ_MODE_MOTION_COMPRESS: { + struct jpeg_app_marker app; + struct jpeg_com_marker com; + + /* In motion compress mode, the decoder output must be enabled, and + * the video bus direction set to input. + */ + set_videobus_dir(zr, 0); + decoder_call(zr, video, s_stream, 1); + encoder_call(zr, video, s_routing, 0, 0, 0); + + /* Take the JPEG codec and the VFE out of sleep */ + jpeg_codec_sleep(zr, 0); + + /* set JPEG app/com marker */ + app.appn = zr->jpg_settings.jpg_comp.APPn; + app.len = zr->jpg_settings.jpg_comp.APP_len; + memcpy(app.data, zr->jpg_settings.jpg_comp.APP_data, 60); + zr->codec->control(zr->codec, CODEC_S_JPEG_APP_DATA, + sizeof(struct jpeg_app_marker), &app); + + com.len = zr->jpg_settings.jpg_comp.COM_len; + memcpy(com.data, zr->jpg_settings.jpg_comp.COM_data, 60); + zr->codec->control(zr->codec, CODEC_S_JPEG_COM_DATA, + sizeof(struct jpeg_com_marker), &com); + + /* Setup the JPEG codec */ + zr->codec->control(zr->codec, CODEC_S_JPEG_TDS_BYTE, + sizeof(int), &field_size); + zr->codec->set_video(zr->codec, zr->timing, &cap, + &zr->card.vfe_pol); + zr->codec->set_mode(zr->codec, CODEC_DO_COMPRESSION); + + /* Setup the VFE */ + if (zr->vfe) { + zr->vfe->control(zr->vfe, CODEC_S_JPEG_TDS_BYTE, + sizeof(int), &field_size); + zr->vfe->set_video(zr->vfe, zr->timing, &cap, + &zr->card.vfe_pol); + zr->vfe->set_mode(zr->vfe, CODEC_DO_COMPRESSION); + } + + init_jpeg_queue(zr); + zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO + + clear_interrupt_counters(zr); + dprintk(2, KERN_INFO "%s: enable_jpg(MOTION_COMPRESS)\n", + ZR_DEVNAME(zr)); + break; + } + + case BUZ_MODE_MOTION_DECOMPRESS: + /* In motion decompression mode, the decoder output must be disabled, and + * the video bus direction set to output. + */ + decoder_call(zr, video, s_stream, 0); + set_videobus_dir(zr, 1); + encoder_call(zr, video, s_routing, 1, 0, 0); + + /* Take the JPEG codec and the VFE out of sleep */ + jpeg_codec_sleep(zr, 0); + /* Setup the VFE */ + if (zr->vfe) { + zr->vfe->set_video(zr->vfe, zr->timing, &cap, + &zr->card.vfe_pol); + zr->vfe->set_mode(zr->vfe, CODEC_DO_EXPANSION); + } + /* Setup the JPEG codec */ + zr->codec->set_video(zr->codec, zr->timing, &cap, + &zr->card.vfe_pol); + zr->codec->set_mode(zr->codec, CODEC_DO_EXPANSION); + + init_jpeg_queue(zr); + zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO + + clear_interrupt_counters(zr); + dprintk(2, KERN_INFO "%s: enable_jpg(MOTION_DECOMPRESS)\n", + ZR_DEVNAME(zr)); + break; + + case BUZ_MODE_IDLE: + default: + /* shut down processing */ + btand(~(zr->card.jpeg_int | ZR36057_ICR_JPEGRepIRQ), + ZR36057_ICR); + btwrite(zr->card.jpeg_int | ZR36057_ICR_JPEGRepIRQ, + ZR36057_ISR); + btand(~ZR36057_JMC_Go_en, ZR36057_JMC); // \Go_en + + msleep(50); + + set_videobus_dir(zr, 0); + set_frame(zr, 1); // /FRAME + btor(ZR36057_MCTCR_CFlush, ZR36057_MCTCR); // /CFlush + btwrite(0, ZR36057_JPC); // \P_Reset,\CodTrnsEn,\Active + btand(~ZR36057_JMC_VFIFO_FB, ZR36057_JMC); + btand(~ZR36057_JMC_SyncMstr, ZR36057_JMC); + jpeg_codec_reset(zr); + jpeg_codec_sleep(zr, 1); + zr36057_adjust_vfe(zr, mode); + + decoder_call(zr, video, s_stream, 1); + encoder_call(zr, video, s_routing, 0, 0, 0); + + dprintk(2, KERN_INFO "%s: enable_jpg(IDLE)\n", ZR_DEVNAME(zr)); + break; + + } +} + +/* when this is called the spinlock must be held */ +void +zoran_feed_stat_com (struct zoran *zr) +{ + /* move frames from pending queue to DMA */ + + int frame, i, max_stat_com; + + max_stat_com = + (zr->jpg_settings.TmpDcm == + 1) ? BUZ_NUM_STAT_COM : (BUZ_NUM_STAT_COM >> 1); + + while ((zr->jpg_dma_head - zr->jpg_dma_tail) < max_stat_com && + zr->jpg_dma_head < zr->jpg_que_head) { + + frame = zr->jpg_pend[zr->jpg_dma_head & BUZ_MASK_FRAME]; + if (zr->jpg_settings.TmpDcm == 1) { + /* fill 1 stat_com entry */ + i = (zr->jpg_dma_head - + zr->jpg_err_shift) & BUZ_MASK_STAT_COM; + if (!(zr->stat_com[i] & cpu_to_le32(1))) + break; + zr->stat_com[i] = + cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus); + } else { + /* fill 2 stat_com entries */ + i = ((zr->jpg_dma_head - + zr->jpg_err_shift) & 1) * 2; + if (!(zr->stat_com[i] & cpu_to_le32(1))) + break; + zr->stat_com[i] = + cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus); + zr->stat_com[i + 1] = + cpu_to_le32(zr->jpg_buffers.buffer[frame].jpg.frag_tab_bus); + } + zr->jpg_buffers.buffer[frame].state = BUZ_STATE_DMA; + zr->jpg_dma_head++; + + } + if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) + zr->jpg_queued_num++; +} + +/* when this is called the spinlock must be held */ +static void +zoran_reap_stat_com (struct zoran *zr) +{ + /* move frames from DMA queue to done queue */ + + int i; + u32 stat_com; + unsigned int seq; + unsigned int dif; + struct zoran_buffer *buffer; + int frame; + + /* In motion decompress we don't have a hardware frame counter, + * we just count the interrupts here */ + + if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) { + zr->jpg_seq_num++; + } + while (zr->jpg_dma_tail < zr->jpg_dma_head) { + if (zr->jpg_settings.TmpDcm == 1) + i = (zr->jpg_dma_tail - + zr->jpg_err_shift) & BUZ_MASK_STAT_COM; + else + i = ((zr->jpg_dma_tail - + zr->jpg_err_shift) & 1) * 2 + 1; + + stat_com = le32_to_cpu(zr->stat_com[i]); + + if ((stat_com & 1) == 0) { + return; + } + frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME]; + buffer = &zr->jpg_buffers.buffer[frame]; + v4l2_get_timestamp(&buffer->bs.timestamp); + + if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { + buffer->bs.length = (stat_com & 0x7fffff) >> 1; + + /* update sequence number with the help of the counter in stat_com */ + + seq = ((stat_com >> 24) + zr->jpg_err_seq) & 0xff; + dif = (seq - zr->jpg_seq_num) & 0xff; + zr->jpg_seq_num += dif; + } else { + buffer->bs.length = 0; + } + buffer->bs.seq = + zr->jpg_settings.TmpDcm == + 2 ? (zr->jpg_seq_num >> 1) : zr->jpg_seq_num; + buffer->state = BUZ_STATE_DONE; + + zr->jpg_dma_tail++; + } +} + +static void zoran_restart(struct zoran *zr) +{ + /* Now the stat_comm buffer is ready for restart */ + unsigned int status = 0; + int mode; + + if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { + decoder_call(zr, video, g_input_status, &status); + mode = CODEC_DO_COMPRESSION; + } else { + status = V4L2_IN_ST_NO_SIGNAL; + mode = CODEC_DO_EXPANSION; + } + if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS || + !(status & V4L2_IN_ST_NO_SIGNAL)) { + /********** RESTART code *************/ + jpeg_codec_reset(zr); + zr->codec->set_mode(zr->codec, mode); + zr36057_set_jpg(zr, zr->codec_mode); + jpeg_start(zr); + + if (zr->num_errors <= 8) + dprintk(2, KERN_INFO "%s: Restart\n", + ZR_DEVNAME(zr)); + + zr->JPEG_missed = 0; + zr->JPEG_error = 2; + /********** End RESTART code ***********/ + } +} + +static void +error_handler (struct zoran *zr, + u32 astat, + u32 stat) +{ + int i; + + /* This is JPEG error handling part */ + if (zr->codec_mode != BUZ_MODE_MOTION_COMPRESS && + zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS) { + return; + } + + if ((stat & 1) == 0 && + zr->codec_mode == BUZ_MODE_MOTION_COMPRESS && + zr->jpg_dma_tail - zr->jpg_que_tail >= zr->jpg_buffers.num_buffers) { + /* No free buffers... */ + zoran_reap_stat_com(zr); + zoran_feed_stat_com(zr); + wake_up_interruptible(&zr->jpg_capq); + zr->JPEG_missed = 0; + return; + } + + if (zr->JPEG_error == 1) { + zoran_restart(zr); + return; + } + + /* + * First entry: error just happened during normal operation + * + * In BUZ_MODE_MOTION_COMPRESS: + * + * Possible glitch in TV signal. In this case we should + * stop the codec and wait for good quality signal before + * restarting it to avoid further problems + * + * In BUZ_MODE_MOTION_DECOMPRESS: + * + * Bad JPEG frame: we have to mark it as processed (codec crashed + * and was not able to do it itself), and to remove it from queue. + */ + btand(~ZR36057_JMC_Go_en, ZR36057_JMC); + udelay(1); + stat = stat | (post_office_read(zr, 7, 0) & 3) << 8; + btwrite(0, ZR36057_JPC); + btor(ZR36057_MCTCR_CFlush, ZR36057_MCTCR); + jpeg_codec_reset(zr); + jpeg_codec_sleep(zr, 1); + zr->JPEG_error = 1; + zr->num_errors++; + + /* Report error */ + if (zr36067_debug > 1 && zr->num_errors <= 8) { + long frame; + int j; + + frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME]; + printk(KERN_ERR + "%s: JPEG error stat=0x%08x(0x%08x) queue_state=%ld/%ld/%ld/%ld seq=%ld frame=%ld. Codec stopped. ", + ZR_DEVNAME(zr), stat, zr->last_isr, + zr->jpg_que_tail, zr->jpg_dma_tail, + zr->jpg_dma_head, zr->jpg_que_head, + zr->jpg_seq_num, frame); + printk(KERN_INFO "stat_com frames:"); + for (j = 0; j < BUZ_NUM_STAT_COM; j++) { + for (i = 0; i < zr->jpg_buffers.num_buffers; i++) { + if (le32_to_cpu(zr->stat_com[j]) == zr->jpg_buffers.buffer[i].jpg.frag_tab_bus) + printk(KERN_CONT "% d->%d", j, i); + } + } + printk(KERN_CONT "\n"); + } + /* Find an entry in stat_com and rotate contents */ + if (zr->jpg_settings.TmpDcm == 1) + i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; + else + i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; + if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) { + /* Mimic zr36067 operation */ + zr->stat_com[i] |= cpu_to_le32(1); + if (zr->jpg_settings.TmpDcm != 1) + zr->stat_com[i + 1] |= cpu_to_le32(1); + /* Refill */ + zoran_reap_stat_com(zr); + zoran_feed_stat_com(zr); + wake_up_interruptible(&zr->jpg_capq); + /* Find an entry in stat_com again after refill */ + if (zr->jpg_settings.TmpDcm == 1) + i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; + else + i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; + } + if (i) { + /* Rotate stat_comm entries to make current entry first */ + int j; + __le32 bus_addr[BUZ_NUM_STAT_COM]; + + /* Here we are copying the stat_com array, which + * is already in little endian format, so + * no endian conversions here + */ + memcpy(bus_addr, zr->stat_com, sizeof(bus_addr)); + + for (j = 0; j < BUZ_NUM_STAT_COM; j++) + zr->stat_com[j] = bus_addr[(i + j) & BUZ_MASK_STAT_COM]; + + zr->jpg_err_shift += i; + zr->jpg_err_shift &= BUZ_MASK_STAT_COM; + } + if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) + zr->jpg_err_seq = zr->jpg_seq_num; /* + 1; */ + zoran_restart(zr); +} + +irqreturn_t +zoran_irq (int irq, + void *dev_id) +{ + u32 stat, astat; + int count; + struct zoran *zr; + unsigned long flags; + + zr = dev_id; + count = 0; + + if (zr->testing) { + /* Testing interrupts */ + spin_lock_irqsave(&zr->spinlock, flags); + while ((stat = count_reset_interrupt(zr))) { + if (count++ > 100) { + btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); + dprintk(1, + KERN_ERR + "%s: IRQ lockup while testing, isr=0x%08x, cleared int mask\n", + ZR_DEVNAME(zr), stat); + wake_up_interruptible(&zr->test_q); + } + } + zr->last_isr = stat; + spin_unlock_irqrestore(&zr->spinlock, flags); + return IRQ_HANDLED; + } + + spin_lock_irqsave(&zr->spinlock, flags); + while (1) { + /* get/clear interrupt status bits */ + stat = count_reset_interrupt(zr); + astat = stat & IRQ_MASK; + if (!astat) { + break; + } + dprintk(4, + KERN_DEBUG + "zoran_irq: astat: 0x%08x, mask: 0x%08x\n", + astat, btread(ZR36057_ICR)); + if (astat & zr->card.vsync_int) { // SW + + if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS || + zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { + /* count missed interrupts */ + zr->JPEG_missed++; + } + //post_office_read(zr,1,0); + /* Interrupts may still happen when + * zr->v4l_memgrab_active is switched off. + * We simply ignore them */ + + if (zr->v4l_memgrab_active) { + /* A lot more checks should be here ... */ + if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SnapShot) == 0) + dprintk(1, + KERN_WARNING + "%s: BuzIRQ with SnapShot off ???\n", + ZR_DEVNAME(zr)); + + if (zr->v4l_grab_frame != NO_GRAB_ACTIVE) { + /* There is a grab on a frame going on, check if it has finished */ + if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_FrameGrab) == 0) { + /* it is finished, notify the user */ + + zr->v4l_buffers.buffer[zr->v4l_grab_frame].state = BUZ_STATE_DONE; + zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.seq = zr->v4l_grab_seq; + v4l2_get_timestamp(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp); + zr->v4l_grab_frame = NO_GRAB_ACTIVE; + zr->v4l_pend_tail++; + } + } + + if (zr->v4l_grab_frame == NO_GRAB_ACTIVE) + wake_up_interruptible(&zr->v4l_capq); + + /* Check if there is another grab queued */ + + if (zr->v4l_grab_frame == NO_GRAB_ACTIVE && + zr->v4l_pend_tail != zr->v4l_pend_head) { + int frame = zr->v4l_pend[zr->v4l_pend_tail & V4L_MASK_FRAME]; + u32 reg; + + zr->v4l_grab_frame = frame; + + /* Set zr36057 video front end and enable video */ + + /* Buffer address */ + + reg = zr->v4l_buffers.buffer[frame].v4l.fbuffer_bus; + btwrite(reg, ZR36057_VDTR); + if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2) + reg += zr->v4l_settings.bytesperline; + btwrite(reg, ZR36057_VDBR); + + /* video stride, status, and frame grab register */ + reg = 0; + if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2) + reg += zr->v4l_settings.bytesperline; + reg = (reg << ZR36057_VSSFGR_DispStride); + reg |= ZR36057_VSSFGR_VidOvf; + reg |= ZR36057_VSSFGR_SnapShot; + reg |= ZR36057_VSSFGR_FrameGrab; + btwrite(reg, ZR36057_VSSFGR); + + btor(ZR36057_VDCR_VidEn, + ZR36057_VDCR); + } + } + + /* even if we don't grab, we do want to increment + * the sequence counter to see lost frames */ + zr->v4l_grab_seq++; + } +#if (IRQ_MASK & ZR36057_ISR_CodRepIRQ) + if (astat & ZR36057_ISR_CodRepIRQ) { + zr->intr_counter_CodRepIRQ++; + IDEBUG(printk(KERN_DEBUG "%s: ZR36057_ISR_CodRepIRQ\n", + ZR_DEVNAME(zr))); + btand(~ZR36057_ICR_CodRepIRQ, ZR36057_ICR); + } +#endif /* (IRQ_MASK & ZR36057_ISR_CodRepIRQ) */ + +#if (IRQ_MASK & ZR36057_ISR_JPEGRepIRQ) + if ((astat & ZR36057_ISR_JPEGRepIRQ) && + (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS || + zr->codec_mode == BUZ_MODE_MOTION_COMPRESS)) { + if (zr36067_debug > 1 && (!zr->frame_num || zr->JPEG_error)) { + char sv[BUZ_NUM_STAT_COM + 1]; + int i; + + printk(KERN_INFO + "%s: first frame ready: state=0x%08x odd_even=%d field_per_buff=%d delay=%d\n", + ZR_DEVNAME(zr), stat, + zr->jpg_settings.odd_even, + zr->jpg_settings.field_per_buff, + zr->JPEG_missed); + + for (i = 0; i < BUZ_NUM_STAT_COM; i++) + sv[i] = le32_to_cpu(zr->stat_com[i]) & 1 ? '1' : '0'; + sv[BUZ_NUM_STAT_COM] = 0; + printk(KERN_INFO + "%s: stat_com=%s queue_state=%ld/%ld/%ld/%ld\n", + ZR_DEVNAME(zr), sv, + zr->jpg_que_tail, + zr->jpg_dma_tail, + zr->jpg_dma_head, + zr->jpg_que_head); + } else { + /* Get statistics */ + if (zr->JPEG_missed > zr->JPEG_max_missed) + zr->JPEG_max_missed = zr->JPEG_missed; + if (zr->JPEG_missed < zr->JPEG_min_missed) + zr->JPEG_min_missed = zr->JPEG_missed; + } + + if (zr36067_debug > 2 && zr->frame_num < 6) { + int i; + + printk(KERN_INFO "%s: seq=%ld stat_com:", + ZR_DEVNAME(zr), zr->jpg_seq_num); + for (i = 0; i < 4; i++) { + printk(KERN_CONT " %08x", + le32_to_cpu(zr->stat_com[i])); + } + printk(KERN_CONT "\n"); + } + zr->frame_num++; + zr->JPEG_missed = 0; + zr->JPEG_error = 0; + zoran_reap_stat_com(zr); + zoran_feed_stat_com(zr); + wake_up_interruptible(&zr->jpg_capq); + } +#endif /* (IRQ_MASK & ZR36057_ISR_JPEGRepIRQ) */ + + /* DATERR, too many fields missed, error processing */ + if ((astat & zr->card.jpeg_int) || + zr->JPEG_missed > 25 || + zr->JPEG_error == 1 || + ((zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS) && + (zr->frame_num && (zr->JPEG_missed > zr->jpg_settings.field_per_buff)))) { + error_handler(zr, astat, stat); + } + + count++; + if (count > 10) { + dprintk(2, KERN_WARNING "%s: irq loop %d\n", + ZR_DEVNAME(zr), count); + if (count > 20) { + btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); + dprintk(2, + KERN_ERR + "%s: IRQ lockup, cleared int mask\n", + ZR_DEVNAME(zr)); + break; + } + } + zr->last_isr = stat; + } + spin_unlock_irqrestore(&zr->spinlock, flags); + + return IRQ_HANDLED; +} + +void +zoran_set_pci_master (struct zoran *zr, + int set_master) +{ + if (set_master) { + pci_set_master(zr->pci_dev); + } else { + u16 command; + + pci_read_config_word(zr->pci_dev, PCI_COMMAND, &command); + command &= ~PCI_COMMAND_MASTER; + pci_write_config_word(zr->pci_dev, PCI_COMMAND, command); + } +} + +void +zoran_init_hardware (struct zoran *zr) +{ + /* Enable bus-mastering */ + zoran_set_pci_master(zr, 1); + + /* Initialize the board */ + if (zr->card.init) { + zr->card.init(zr); + } + + decoder_call(zr, core, init, 0); + decoder_call(zr, video, s_std, zr->norm); + decoder_call(zr, video, s_routing, + zr->card.input[zr->input].muxsel, 0, 0); + + encoder_call(zr, core, init, 0); + encoder_call(zr, video, s_std_output, zr->norm); + encoder_call(zr, video, s_routing, 0, 0, 0); + + /* toggle JPEG codec sleep to sync PLL */ + jpeg_codec_sleep(zr, 1); + jpeg_codec_sleep(zr, 0); + + /* + * set individual interrupt enables (without GIRQ1) + * but don't global enable until zoran_open() + */ + zr36057_init_vfe(zr); + + zr36057_enable_jpg(zr, BUZ_MODE_IDLE); + + btwrite(IRQ_MASK, ZR36057_ISR); // Clears interrupts +} + +void +zr36057_restart (struct zoran *zr) +{ + btwrite(0, ZR36057_SPGPPCR); + mdelay(1); + btor(ZR36057_SPGPPCR_SoftReset, ZR36057_SPGPPCR); + mdelay(1); + + /* assert P_Reset */ + btwrite(0, ZR36057_JPC); + /* set up GPIO direction - all output */ + btwrite(ZR36057_SPGPPCR_SoftReset | 0, ZR36057_SPGPPCR); + + /* set up GPIO pins and guest bus timing */ + btwrite((0x81 << 24) | 0x8888, ZR36057_GPPGCR1); +} + +/* + * initialize video front end + */ + +static void +zr36057_init_vfe (struct zoran *zr) +{ + u32 reg; + + reg = btread(ZR36057_VFESPFR); + reg |= ZR36057_VFESPFR_LittleEndian; + reg &= ~ZR36057_VFESPFR_VCLKPol; + reg |= ZR36057_VFESPFR_ExtFl; + reg |= ZR36057_VFESPFR_TopField; + btwrite(reg, ZR36057_VFESPFR); + reg = btread(ZR36057_VDCR); + if (pci_pci_problems & PCIPCI_TRITON) + // || zr->revision < 1) // Revision 1 has also Triton support + reg &= ~ZR36057_VDCR_Triton; + else + reg |= ZR36057_VDCR_Triton; + btwrite(reg, ZR36057_VDCR); +} diff --git a/drivers/staging/media/zoran/zoran_device.h b/drivers/staging/media/zoran/zoran_device.h new file mode 100644 index 000000000000..a507aaad4ebb --- /dev/null +++ b/drivers/staging/media/zoran/zoran_device.h @@ -0,0 +1,91 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * This part handles card-specific data and detection + * + * Copyright (C) 2000 Serguei Miridonov + * + * Currently maintained by: + * Ronald Bultje + * Laurent Pinchart + * Mailinglist + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ZORAN_DEVICE_H__ +#define __ZORAN_DEVICE_H__ + +/* general purpose I/O */ +extern void GPIO(struct zoran *zr, + int bit, + unsigned int value); + +/* codec (or actually: guest bus) access */ +extern int post_office_wait(struct zoran *zr); +extern int post_office_write(struct zoran *zr, + unsigned guest, + unsigned reg, + unsigned value); +extern int post_office_read(struct zoran *zr, + unsigned guest, + unsigned reg); + +extern void detect_guest_activity(struct zoran *zr); + +extern void jpeg_codec_sleep(struct zoran *zr, + int sleep); +extern int jpeg_codec_reset(struct zoran *zr); + +/* zr360x7 access to raw capture */ +extern void zr36057_overlay(struct zoran *zr, + int on); +extern void write_overlay_mask(struct zoran_fh *fh, + struct v4l2_clip *vp, + int count); +extern void zr36057_set_memgrab(struct zoran *zr, + int mode); +extern int wait_grab_pending(struct zoran *zr); + +/* interrupts */ +extern void print_interrupts(struct zoran *zr); +extern void clear_interrupt_counters(struct zoran *zr); +extern irqreturn_t zoran_irq(int irq, void *dev_id); + +/* JPEG codec access */ +extern void jpeg_start(struct zoran *zr); +extern void zr36057_enable_jpg(struct zoran *zr, + enum zoran_codec_mode mode); +extern void zoran_feed_stat_com(struct zoran *zr); + +/* general */ +extern void zoran_set_pci_master(struct zoran *zr, + int set_master); +extern void zoran_init_hardware(struct zoran *zr); +extern void zr36057_restart(struct zoran *zr); + +extern const struct zoran_format zoran_formats[]; + +extern int v4l_nbufs; +extern int v4l_bufsize; +extern int jpg_nbufs; +extern int jpg_bufsize; +extern int pass_through; + +/* i2c */ +#define decoder_call(zr, o, f, args...) \ + v4l2_subdev_call(zr->decoder, o, f, ##args) +#define encoder_call(zr, o, f, args...) \ + v4l2_subdev_call(zr->encoder, o, f, ##args) + +#endif /* __ZORAN_DEVICE_H__ */ diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c new file mode 100644 index 000000000000..14f9c0e26a1c --- /dev/null +++ b/drivers/staging/media/zoran/zoran_driver.c @@ -0,0 +1,2849 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * Copyright (C) 2000 Serguei Miridonov + * + * Changes for BUZ by Wolfgang Scherr + * + * Changes for DC10/DC30 by Laurent Pinchart + * + * Changes for LML33R10 by Maxim Yevtyushkin + * + * Changes for videodev2/v4l2 by Ronald Bultje + * + * Based on + * + * Miro DC10 driver + * Copyright (C) 1999 Wolfgang Scherr + * + * Iomega Buz driver version 1.0 + * Copyright (C) 1999 Rainer Johanni + * + * buz.0.0.3 + * Copyright (C) 1998 Dave Perks + * + * bttv - Bt848 frame grabber driver + * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) + * & Marcus Metzler (mocm@thp.uni-koeln.de) + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include "videocodec.h" + +#include +#include +#include +#include + +#include +#include "zoran.h" +#include "zoran_device.h" +#include "zoran_card.h" + + +const struct zoran_format zoran_formats[] = { + { + .name = "15-bit RGB LE", + .fourcc = V4L2_PIX_FMT_RGB555, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 15, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif| + ZR36057_VFESPFR_LittleEndian, + }, { + .name = "15-bit RGB BE", + .fourcc = V4L2_PIX_FMT_RGB555X, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 15, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif, + }, { + .name = "16-bit RGB LE", + .fourcc = V4L2_PIX_FMT_RGB565, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 16, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif| + ZR36057_VFESPFR_LittleEndian, + }, { + .name = "16-bit RGB BE", + .fourcc = V4L2_PIX_FMT_RGB565X, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 16, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif, + }, { + .name = "24-bit RGB", + .fourcc = V4L2_PIX_FMT_BGR24, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 24, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_Pack24, + }, { + .name = "32-bit RGB LE", + .fourcc = V4L2_PIX_FMT_BGR32, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 32, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_LittleEndian, + }, { + .name = "32-bit RGB BE", + .fourcc = V4L2_PIX_FMT_RGB32, + .colorspace = V4L2_COLORSPACE_SRGB, + .depth = 32, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_RGB888, + }, { + .name = "4:2:2, packed, YUYV", + .fourcc = V4L2_PIX_FMT_YUYV, + .colorspace = V4L2_COLORSPACE_SMPTE170M, + .depth = 16, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_YUV422, + }, { + .name = "4:2:2, packed, UYVY", + .fourcc = V4L2_PIX_FMT_UYVY, + .colorspace = V4L2_COLORSPACE_SMPTE170M, + .depth = 16, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_OVERLAY, + .vfespfr = ZR36057_VFESPFR_YUV422|ZR36057_VFESPFR_LittleEndian, + }, { + .name = "Hardware-encoded Motion-JPEG", + .fourcc = V4L2_PIX_FMT_MJPEG, + .colorspace = V4L2_COLORSPACE_SMPTE170M, + .depth = 0, + .flags = ZORAN_FORMAT_CAPTURE | + ZORAN_FORMAT_PLAYBACK | + ZORAN_FORMAT_COMPRESSED, + } +}; +#define NUM_FORMATS ARRAY_SIZE(zoran_formats) + + /* small helper function for calculating buffersizes for v4l2 + * we calculate the nearest higher power-of-two, which + * will be the recommended buffersize */ +static __u32 +zoran_v4l2_calc_bufsize (struct zoran_jpg_settings *settings) +{ + __u8 div = settings->VerDcm * settings->HorDcm * settings->TmpDcm; + __u32 num = (1024 * 512) / (div); + __u32 result = 2; + + num--; + while (num) { + num >>= 1; + result <<= 1; + } + + if (result > jpg_bufsize) + return jpg_bufsize; + if (result < 8192) + return 8192; + return result; +} + +/* forward references */ +static void v4l_fbuffer_free(struct zoran_fh *fh); +static void jpg_fbuffer_free(struct zoran_fh *fh); + +/* Set mapping mode */ +static void map_mode_raw(struct zoran_fh *fh) +{ + fh->map_mode = ZORAN_MAP_MODE_RAW; + fh->buffers.buffer_size = v4l_bufsize; + fh->buffers.num_buffers = v4l_nbufs; +} +static void map_mode_jpg(struct zoran_fh *fh, int play) +{ + fh->map_mode = play ? ZORAN_MAP_MODE_JPG_PLAY : ZORAN_MAP_MODE_JPG_REC; + fh->buffers.buffer_size = jpg_bufsize; + fh->buffers.num_buffers = jpg_nbufs; +} +static inline const char *mode_name(enum zoran_map_mode mode) +{ + return mode == ZORAN_MAP_MODE_RAW ? "V4L" : "JPG"; +} + +/* + * Allocate the V4L grab buffers + * + * These have to be pysically contiguous. + */ + +static int v4l_fbuffer_alloc(struct zoran_fh *fh) +{ + struct zoran *zr = fh->zr; + int i, off; + unsigned char *mem; + + for (i = 0; i < fh->buffers.num_buffers; i++) { + if (fh->buffers.buffer[i].v4l.fbuffer) + dprintk(2, + KERN_WARNING + "%s: %s - buffer %d already allocated!?\n", + ZR_DEVNAME(zr), __func__, i); + + //udelay(20); + mem = kmalloc(fh->buffers.buffer_size, + GFP_KERNEL | __GFP_NOWARN); + if (!mem) { + dprintk(1, + KERN_ERR + "%s: %s - kmalloc for V4L buf %d failed\n", + ZR_DEVNAME(zr), __func__, i); + v4l_fbuffer_free(fh); + return -ENOBUFS; + } + fh->buffers.buffer[i].v4l.fbuffer = mem; + fh->buffers.buffer[i].v4l.fbuffer_phys = virt_to_phys(mem); + fh->buffers.buffer[i].v4l.fbuffer_bus = virt_to_bus(mem); + for (off = 0; off < fh->buffers.buffer_size; + off += PAGE_SIZE) + SetPageReserved(virt_to_page(mem + off)); + dprintk(4, + KERN_INFO + "%s: %s - V4L frame %d mem %p (bus: 0x%llx)\n", + ZR_DEVNAME(zr), __func__, i, mem, + (unsigned long long)virt_to_bus(mem)); + } + + fh->buffers.allocated = 1; + + return 0; +} + +/* free the V4L grab buffers */ +static void v4l_fbuffer_free(struct zoran_fh *fh) +{ + struct zoran *zr = fh->zr; + int i, off; + unsigned char *mem; + + dprintk(4, KERN_INFO "%s: %s\n", ZR_DEVNAME(zr), __func__); + + for (i = 0; i < fh->buffers.num_buffers; i++) { + if (!fh->buffers.buffer[i].v4l.fbuffer) + continue; + + mem = fh->buffers.buffer[i].v4l.fbuffer; + for (off = 0; off < fh->buffers.buffer_size; + off += PAGE_SIZE) + ClearPageReserved(virt_to_page(mem + off)); + kfree(fh->buffers.buffer[i].v4l.fbuffer); + fh->buffers.buffer[i].v4l.fbuffer = NULL; + } + + fh->buffers.allocated = 0; +} + +/* + * Allocate the MJPEG grab buffers. + * + * If a Natoma chipset is present and this is a revision 1 zr36057, + * each MJPEG buffer needs to be physically contiguous. + * (RJ: This statement is from Dave Perks' original driver, + * I could never check it because I have a zr36067) + * + * RJ: The contents grab buffers needs never be accessed in the driver. + * Therefore there is no need to allocate them with vmalloc in order + * to get a contiguous virtual memory space. + * I don't understand why many other drivers first allocate them with + * vmalloc (which uses internally also get_zeroed_page, but delivers you + * virtual addresses) and then again have to make a lot of efforts + * to get the physical address. + * + * Ben Capper: + * On big-endian architectures (such as ppc) some extra steps + * are needed. When reading and writing to the stat_com array + * and fragment buffers, the device expects to see little- + * endian values. The use of cpu_to_le32() and le32_to_cpu() + * in this function (and one or two others in zoran_device.c) + * ensure that these values are always stored in little-endian + * form, regardless of architecture. The zr36057 does Very Bad + * Things on big endian architectures if the stat_com array + * and fragment buffers are not little-endian. + */ + +static int jpg_fbuffer_alloc(struct zoran_fh *fh) +{ + struct zoran *zr = fh->zr; + int i, j, off; + u8 *mem; + + for (i = 0; i < fh->buffers.num_buffers; i++) { + if (fh->buffers.buffer[i].jpg.frag_tab) + dprintk(2, + KERN_WARNING + "%s: %s - buffer %d already allocated!?\n", + ZR_DEVNAME(zr), __func__, i); + + /* Allocate fragment table for this buffer */ + + mem = (void *)get_zeroed_page(GFP_KERNEL); + if (!mem) { + dprintk(1, + KERN_ERR + "%s: %s - get_zeroed_page (frag_tab) failed for buffer %d\n", + ZR_DEVNAME(zr), __func__, i); + jpg_fbuffer_free(fh); + return -ENOBUFS; + } + fh->buffers.buffer[i].jpg.frag_tab = (__le32 *)mem; + fh->buffers.buffer[i].jpg.frag_tab_bus = virt_to_bus(mem); + + if (fh->buffers.need_contiguous) { + mem = kmalloc(fh->buffers.buffer_size, GFP_KERNEL); + if (mem == NULL) { + dprintk(1, + KERN_ERR + "%s: %s - kmalloc failed for buffer %d\n", + ZR_DEVNAME(zr), __func__, i); + jpg_fbuffer_free(fh); + return -ENOBUFS; + } + fh->buffers.buffer[i].jpg.frag_tab[0] = + cpu_to_le32(virt_to_bus(mem)); + fh->buffers.buffer[i].jpg.frag_tab[1] = + cpu_to_le32((fh->buffers.buffer_size >> 1) | 1); + for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) + SetPageReserved(virt_to_page(mem + off)); + } else { + /* jpg_bufsize is already page aligned */ + for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { + mem = (void *)get_zeroed_page(GFP_KERNEL); + if (mem == NULL) { + dprintk(1, + KERN_ERR + "%s: %s - get_zeroed_page failed for buffer %d\n", + ZR_DEVNAME(zr), __func__, i); + jpg_fbuffer_free(fh); + return -ENOBUFS; + } + + fh->buffers.buffer[i].jpg.frag_tab[2 * j] = + cpu_to_le32(virt_to_bus(mem)); + fh->buffers.buffer[i].jpg.frag_tab[2 * j + 1] = + cpu_to_le32((PAGE_SIZE >> 2) << 1); + SetPageReserved(virt_to_page(mem)); + } + + fh->buffers.buffer[i].jpg.frag_tab[2 * j - 1] |= cpu_to_le32(1); + } + } + + dprintk(4, + KERN_DEBUG "%s: %s - %d KB allocated\n", + ZR_DEVNAME(zr), __func__, + (fh->buffers.num_buffers * fh->buffers.buffer_size) >> 10); + + fh->buffers.allocated = 1; + + return 0; +} + +/* free the MJPEG grab buffers */ +static void jpg_fbuffer_free(struct zoran_fh *fh) +{ + struct zoran *zr = fh->zr; + int i, j, off; + unsigned char *mem; + __le32 frag_tab; + struct zoran_buffer *buffer; + + dprintk(4, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); + + for (i = 0, buffer = &fh->buffers.buffer[0]; + i < fh->buffers.num_buffers; i++, buffer++) { + if (!buffer->jpg.frag_tab) + continue; + + if (fh->buffers.need_contiguous) { + frag_tab = buffer->jpg.frag_tab[0]; + + if (frag_tab) { + mem = bus_to_virt(le32_to_cpu(frag_tab)); + for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) + ClearPageReserved(virt_to_page(mem + off)); + kfree(mem); + buffer->jpg.frag_tab[0] = 0; + buffer->jpg.frag_tab[1] = 0; + } + } else { + for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { + frag_tab = buffer->jpg.frag_tab[2 * j]; + + if (!frag_tab) + break; + ClearPageReserved(virt_to_page(bus_to_virt(le32_to_cpu(frag_tab)))); + free_page((unsigned long)bus_to_virt(le32_to_cpu(frag_tab))); + buffer->jpg.frag_tab[2 * j] = 0; + buffer->jpg.frag_tab[2 * j + 1] = 0; + } + } + + free_page((unsigned long)buffer->jpg.frag_tab); + buffer->jpg.frag_tab = NULL; + } + + fh->buffers.allocated = 0; +} + +/* + * V4L Buffer grabbing + */ + +static int +zoran_v4l_set_format (struct zoran_fh *fh, + int width, + int height, + const struct zoran_format *format) +{ + struct zoran *zr = fh->zr; + int bpp; + + /* Check size and format of the grab wanted */ + + if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH || + height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) { + dprintk(1, + KERN_ERR + "%s: %s - wrong frame size (%dx%d)\n", + ZR_DEVNAME(zr), __func__, width, height); + return -EINVAL; + } + + bpp = (format->depth + 7) / 8; + + /* Check against available buffer size */ + if (height * width * bpp > fh->buffers.buffer_size) { + dprintk(1, + KERN_ERR + "%s: %s - video buffer size (%d kB) is too small\n", + ZR_DEVNAME(zr), __func__, fh->buffers.buffer_size >> 10); + return -EINVAL; + } + + /* The video front end needs 4-byte alinged line sizes */ + + if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) { + dprintk(1, + KERN_ERR + "%s: %s - wrong frame alignment\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + fh->v4l_settings.width = width; + fh->v4l_settings.height = height; + fh->v4l_settings.format = format; + fh->v4l_settings.bytesperline = bpp * fh->v4l_settings.width; + + return 0; +} + +static int zoran_v4l_queue_frame(struct zoran_fh *fh, int num) +{ + struct zoran *zr = fh->zr; + unsigned long flags; + int res = 0; + + if (!fh->buffers.allocated) { + dprintk(1, + KERN_ERR + "%s: %s - buffers not yet allocated\n", + ZR_DEVNAME(zr), __func__); + res = -ENOMEM; + } + + /* No grabbing outside the buffer range! */ + if (num >= fh->buffers.num_buffers || num < 0) { + dprintk(1, + KERN_ERR + "%s: %s - buffer %d is out of range\n", + ZR_DEVNAME(zr), __func__, num); + res = -EINVAL; + } + + spin_lock_irqsave(&zr->spinlock, flags); + + if (fh->buffers.active == ZORAN_FREE) { + if (zr->v4l_buffers.active == ZORAN_FREE) { + zr->v4l_buffers = fh->buffers; + fh->buffers.active = ZORAN_ACTIVE; + } else { + dprintk(1, + KERN_ERR + "%s: %s - another session is already capturing\n", + ZR_DEVNAME(zr), __func__); + res = -EBUSY; + } + } + + /* make sure a grab isn't going on currently with this buffer */ + if (!res) { + switch (zr->v4l_buffers.buffer[num].state) { + default: + case BUZ_STATE_PEND: + if (zr->v4l_buffers.active == ZORAN_FREE) { + fh->buffers.active = ZORAN_FREE; + zr->v4l_buffers.allocated = 0; + } + res = -EBUSY; /* what are you doing? */ + break; + case BUZ_STATE_DONE: + dprintk(2, + KERN_WARNING + "%s: %s - queueing buffer %d in state DONE!?\n", + ZR_DEVNAME(zr), __func__, num); + /* fall through */ + case BUZ_STATE_USER: + /* since there is at least one unused buffer there's room for at least + * one more pend[] entry */ + zr->v4l_pend[zr->v4l_pend_head++ & V4L_MASK_FRAME] = num; + zr->v4l_buffers.buffer[num].state = BUZ_STATE_PEND; + zr->v4l_buffers.buffer[num].bs.length = + fh->v4l_settings.bytesperline * + zr->v4l_settings.height; + fh->buffers.buffer[num] = zr->v4l_buffers.buffer[num]; + break; + } + } + + spin_unlock_irqrestore(&zr->spinlock, flags); + + if (!res && zr->v4l_buffers.active == ZORAN_FREE) + zr->v4l_buffers.active = fh->buffers.active; + + return res; +} + +/* + * Sync on a V4L buffer + */ + +static int v4l_sync(struct zoran_fh *fh, int frame) +{ + struct zoran *zr = fh->zr; + unsigned long flags; + + if (fh->buffers.active == ZORAN_FREE) { + dprintk(1, + KERN_ERR + "%s: %s - no grab active for this session\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + /* check passed-in frame number */ + if (frame >= fh->buffers.num_buffers || frame < 0) { + dprintk(1, + KERN_ERR "%s: %s - frame %d is invalid\n", + ZR_DEVNAME(zr), __func__, frame); + return -EINVAL; + } + + /* Check if is buffer was queued at all */ + if (zr->v4l_buffers.buffer[frame].state == BUZ_STATE_USER) { + dprintk(1, + KERN_ERR + "%s: %s - attempt to sync on a buffer which was not queued?\n", + ZR_DEVNAME(zr), __func__); + return -EPROTO; + } + + mutex_unlock(&zr->lock); + /* wait on this buffer to get ready */ + if (!wait_event_interruptible_timeout(zr->v4l_capq, + (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ)) { + mutex_lock(&zr->lock); + return -ETIME; + } + mutex_lock(&zr->lock); + if (signal_pending(current)) + return -ERESTARTSYS; + + /* buffer should now be in BUZ_STATE_DONE */ + if (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_DONE) + dprintk(2, + KERN_ERR "%s: %s - internal state error\n", + ZR_DEVNAME(zr), __func__); + + zr->v4l_buffers.buffer[frame].state = BUZ_STATE_USER; + fh->buffers.buffer[frame] = zr->v4l_buffers.buffer[frame]; + + spin_lock_irqsave(&zr->spinlock, flags); + + /* Check if streaming capture has finished */ + if (zr->v4l_pend_tail == zr->v4l_pend_head) { + zr36057_set_memgrab(zr, 0); + if (zr->v4l_buffers.active == ZORAN_ACTIVE) { + fh->buffers.active = zr->v4l_buffers.active = ZORAN_FREE; + zr->v4l_buffers.allocated = 0; + } + } + + spin_unlock_irqrestore(&zr->spinlock, flags); + + return 0; +} + +/* + * Queue a MJPEG buffer for capture/playback + */ + +static int zoran_jpg_queue_frame(struct zoran_fh *fh, int num, + enum zoran_codec_mode mode) +{ + struct zoran *zr = fh->zr; + unsigned long flags; + int res = 0; + + /* Check if buffers are allocated */ + if (!fh->buffers.allocated) { + dprintk(1, + KERN_ERR + "%s: %s - buffers not yet allocated\n", + ZR_DEVNAME(zr), __func__); + return -ENOMEM; + } + + /* No grabbing outside the buffer range! */ + if (num >= fh->buffers.num_buffers || num < 0) { + dprintk(1, + KERN_ERR + "%s: %s - buffer %d out of range\n", + ZR_DEVNAME(zr), __func__, num); + return -EINVAL; + } + + /* what is the codec mode right now? */ + if (zr->codec_mode == BUZ_MODE_IDLE) { + zr->jpg_settings = fh->jpg_settings; + } else if (zr->codec_mode != mode) { + /* wrong codec mode active - invalid */ + dprintk(1, + KERN_ERR + "%s: %s - codec in wrong mode\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + if (fh->buffers.active == ZORAN_FREE) { + if (zr->jpg_buffers.active == ZORAN_FREE) { + zr->jpg_buffers = fh->buffers; + fh->buffers.active = ZORAN_ACTIVE; + } else { + dprintk(1, + KERN_ERR + "%s: %s - another session is already capturing\n", + ZR_DEVNAME(zr), __func__); + res = -EBUSY; + } + } + + if (!res && zr->codec_mode == BUZ_MODE_IDLE) { + /* Ok load up the jpeg codec */ + zr36057_enable_jpg(zr, mode); + } + + spin_lock_irqsave(&zr->spinlock, flags); + + if (!res) { + switch (zr->jpg_buffers.buffer[num].state) { + case BUZ_STATE_DONE: + dprintk(2, + KERN_WARNING + "%s: %s - queing frame in BUZ_STATE_DONE state!?\n", + ZR_DEVNAME(zr), __func__); + /* fall through */ + case BUZ_STATE_USER: + /* since there is at least one unused buffer there's room for at + *least one more pend[] entry */ + zr->jpg_pend[zr->jpg_que_head++ & BUZ_MASK_FRAME] = num; + zr->jpg_buffers.buffer[num].state = BUZ_STATE_PEND; + fh->buffers.buffer[num] = zr->jpg_buffers.buffer[num]; + zoran_feed_stat_com(zr); + break; + default: + case BUZ_STATE_DMA: + case BUZ_STATE_PEND: + if (zr->jpg_buffers.active == ZORAN_FREE) { + fh->buffers.active = ZORAN_FREE; + zr->jpg_buffers.allocated = 0; + } + res = -EBUSY; /* what are you doing? */ + break; + } + } + + spin_unlock_irqrestore(&zr->spinlock, flags); + + if (!res && zr->jpg_buffers.active == ZORAN_FREE) + zr->jpg_buffers.active = fh->buffers.active; + + return res; +} + +static int jpg_qbuf(struct zoran_fh *fh, int frame, enum zoran_codec_mode mode) +{ + struct zoran *zr = fh->zr; + int res = 0; + + /* Does the user want to stop streaming? */ + if (frame < 0) { + if (zr->codec_mode == mode) { + if (fh->buffers.active == ZORAN_FREE) { + dprintk(1, + KERN_ERR + "%s: %s(-1) - session not active\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + fh->buffers.active = zr->jpg_buffers.active = ZORAN_FREE; + zr->jpg_buffers.allocated = 0; + zr36057_enable_jpg(zr, BUZ_MODE_IDLE); + return 0; + } else { + dprintk(1, + KERN_ERR + "%s: %s - stop streaming but not in streaming mode\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + } + + if ((res = zoran_jpg_queue_frame(fh, frame, mode))) + return res; + + /* Start the jpeg codec when the first frame is queued */ + if (!res && zr->jpg_que_head == 1) + jpeg_start(zr); + + return res; +} + +/* + * Sync on a MJPEG buffer + */ + +static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs) +{ + struct zoran *zr = fh->zr; + unsigned long flags; + int frame; + + if (fh->buffers.active == ZORAN_FREE) { + dprintk(1, + KERN_ERR + "%s: %s - capture is not currently active\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + if (zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS && + zr->codec_mode != BUZ_MODE_MOTION_COMPRESS) { + dprintk(1, + KERN_ERR + "%s: %s - codec not in streaming mode\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + mutex_unlock(&zr->lock); + if (!wait_event_interruptible_timeout(zr->jpg_capq, + (zr->jpg_que_tail != zr->jpg_dma_tail || + zr->jpg_dma_tail == zr->jpg_dma_head), + 10*HZ)) { + int isr; + + btand(~ZR36057_JMC_Go_en, ZR36057_JMC); + udelay(1); + zr->codec->control(zr->codec, CODEC_G_STATUS, + sizeof(isr), &isr); + mutex_lock(&zr->lock); + dprintk(1, + KERN_ERR + "%s: %s - timeout: codec isr=0x%02x\n", + ZR_DEVNAME(zr), __func__, isr); + + return -ETIME; + + } + mutex_lock(&zr->lock); + if (signal_pending(current)) + return -ERESTARTSYS; + + spin_lock_irqsave(&zr->spinlock, flags); + + if (zr->jpg_dma_tail != zr->jpg_dma_head) + frame = zr->jpg_pend[zr->jpg_que_tail++ & BUZ_MASK_FRAME]; + else + frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; + + /* buffer should now be in BUZ_STATE_DONE */ + if (zr->jpg_buffers.buffer[frame].state != BUZ_STATE_DONE) + dprintk(2, + KERN_ERR "%s: %s - internal state error\n", + ZR_DEVNAME(zr), __func__); + + *bs = zr->jpg_buffers.buffer[frame].bs; + bs->frame = frame; + zr->jpg_buffers.buffer[frame].state = BUZ_STATE_USER; + fh->buffers.buffer[frame] = zr->jpg_buffers.buffer[frame]; + + spin_unlock_irqrestore(&zr->spinlock, flags); + + return 0; +} + +static void zoran_open_init_session(struct zoran_fh *fh) +{ + int i; + struct zoran *zr = fh->zr; + + /* Per default, map the V4L Buffers */ + map_mode_raw(fh); + + /* take over the card's current settings */ + fh->overlay_settings = zr->overlay_settings; + fh->overlay_settings.is_set = 0; + fh->overlay_settings.format = zr->overlay_settings.format; + fh->overlay_active = ZORAN_FREE; + + /* v4l settings */ + fh->v4l_settings = zr->v4l_settings; + /* jpg settings */ + fh->jpg_settings = zr->jpg_settings; + + /* buffers */ + memset(&fh->buffers, 0, sizeof(fh->buffers)); + for (i = 0; i < MAX_FRAME; i++) { + fh->buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ + fh->buffers.buffer[i].bs.frame = i; + } + fh->buffers.allocated = 0; + fh->buffers.active = ZORAN_FREE; +} + +static void zoran_close_end_session(struct zoran_fh *fh) +{ + struct zoran *zr = fh->zr; + + /* overlay */ + if (fh->overlay_active != ZORAN_FREE) { + fh->overlay_active = zr->overlay_active = ZORAN_FREE; + zr->v4l_overlay_active = 0; + if (!zr->v4l_memgrab_active) + zr36057_overlay(zr, 0); + zr->overlay_mask = NULL; + } + + if (fh->map_mode == ZORAN_MAP_MODE_RAW) { + /* v4l capture */ + if (fh->buffers.active != ZORAN_FREE) { + unsigned long flags; + + spin_lock_irqsave(&zr->spinlock, flags); + zr36057_set_memgrab(zr, 0); + zr->v4l_buffers.allocated = 0; + zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; + spin_unlock_irqrestore(&zr->spinlock, flags); + } + + /* v4l buffers */ + if (fh->buffers.allocated) + v4l_fbuffer_free(fh); + } else { + /* jpg capture */ + if (fh->buffers.active != ZORAN_FREE) { + zr36057_enable_jpg(zr, BUZ_MODE_IDLE); + zr->jpg_buffers.allocated = 0; + zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE; + } + + /* jpg buffers */ + if (fh->buffers.allocated) + jpg_fbuffer_free(fh); + } +} + +/* + * Open a zoran card. Right now the flags stuff is just playing + */ + +static int zoran_open(struct file *file) +{ + struct zoran *zr = video_drvdata(file); + struct zoran_fh *fh; + int res, first_open = 0; + + dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", + ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); + + mutex_lock(&zr->lock); + + if (zr->user >= 2048) { + dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", + ZR_DEVNAME(zr), zr->user); + res = -EBUSY; + goto fail_unlock; + } + + /* now, create the open()-specific file_ops struct */ + fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL); + if (!fh) { + dprintk(1, + KERN_ERR + "%s: %s - allocation of zoran_fh failed\n", + ZR_DEVNAME(zr), __func__); + res = -ENOMEM; + goto fail_unlock; + } + v4l2_fh_init(&fh->fh, video_devdata(file)); + + /* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows + * on norm-change! */ + fh->overlay_mask = + kmalloc(((768 + 31) / 32) * 576 * 4, GFP_KERNEL); + if (!fh->overlay_mask) { + dprintk(1, + KERN_ERR + "%s: %s - allocation of overlay_mask failed\n", + ZR_DEVNAME(zr), __func__); + res = -ENOMEM; + goto fail_fh; + } + + if (zr->user++ == 0) + first_open = 1; + + /* default setup - TODO: look at flags */ + if (first_open) { /* First device open */ + zr36057_restart(zr); + zoran_open_init_params(zr); + zoran_init_hardware(zr); + + btor(ZR36057_ICR_IntPinEn, ZR36057_ICR); + } + + /* set file_ops stuff */ + file->private_data = fh; + fh->zr = zr; + zoran_open_init_session(fh); + v4l2_fh_add(&fh->fh); + mutex_unlock(&zr->lock); + + return 0; + +fail_fh: + v4l2_fh_exit(&fh->fh); + kfree(fh); +fail_unlock: + mutex_unlock(&zr->lock); + + dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", + ZR_DEVNAME(zr), res, zr->user); + + return res; +} + +static int +zoran_close(struct file *file) +{ + struct zoran_fh *fh = file->private_data; + struct zoran *zr = fh->zr; + + dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(+)=%d\n", + ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user - 1); + + /* kernel locks (fs/device.c), so don't do that ourselves + * (prevents deadlocks) */ + mutex_lock(&zr->lock); + + zoran_close_end_session(fh); + + if (zr->user-- == 1) { /* Last process */ + /* Clean up JPEG process */ + wake_up_interruptible(&zr->jpg_capq); + zr36057_enable_jpg(zr, BUZ_MODE_IDLE); + zr->jpg_buffers.allocated = 0; + zr->jpg_buffers.active = ZORAN_FREE; + + /* disable interrupts */ + btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); + + if (zr36067_debug > 1) + print_interrupts(zr); + + /* Overlay off */ + zr->v4l_overlay_active = 0; + zr36057_overlay(zr, 0); + zr->overlay_mask = NULL; + + /* capture off */ + wake_up_interruptible(&zr->v4l_capq); + zr36057_set_memgrab(zr, 0); + zr->v4l_buffers.allocated = 0; + zr->v4l_buffers.active = ZORAN_FREE; + zoran_set_pci_master(zr, 0); + + if (!pass_through) { /* Switch to color bar */ + decoder_call(zr, video, s_stream, 0); + encoder_call(zr, video, s_routing, 2, 0, 0); + } + } + mutex_unlock(&zr->lock); + + v4l2_fh_del(&fh->fh); + v4l2_fh_exit(&fh->fh); + kfree(fh->overlay_mask); + kfree(fh); + + dprintk(4, KERN_INFO "%s: %s done\n", ZR_DEVNAME(zr), __func__); + + return 0; +} + +static int setup_fbuffer(struct zoran_fh *fh, + void *base, + const struct zoran_format *fmt, + int width, + int height, + int bytesperline) +{ + struct zoran *zr = fh->zr; + + /* (Ronald) v4l/v4l2 guidelines */ + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* Don't allow frame buffer overlay if PCI or AGP is buggy, or on + ALi Magik (that needs very low latency while the card needs a + higher value always) */ + + if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK)) + return -ENXIO; + + /* we need a bytesperline value, even if not given */ + if (!bytesperline) + bytesperline = width * ((fmt->depth + 7) & ~7) / 8; + +#if 0 + if (zr->overlay_active) { + /* dzjee... stupid users... don't even bother to turn off + * overlay before changing the memory location... + * normally, we would return errors here. However, one of + * the tools that does this is... xawtv! and since xawtv + * is used by +/- 99% of the users, we'd rather be user- + * friendly and silently do as if nothing went wrong */ + dprintk(3, + KERN_ERR + "%s: %s - forced overlay turnoff because framebuffer changed\n", + ZR_DEVNAME(zr), __func__); + zr36057_overlay(zr, 0); + } +#endif + + if (!(fmt->flags & ZORAN_FORMAT_OVERLAY)) { + dprintk(1, + KERN_ERR + "%s: %s - no valid overlay format given\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + if (height <= 0 || width <= 0 || bytesperline <= 0) { + dprintk(1, + KERN_ERR + "%s: %s - invalid height/width/bpl value (%d|%d|%d)\n", + ZR_DEVNAME(zr), __func__, width, height, bytesperline); + return -EINVAL; + } + if (bytesperline & 3) { + dprintk(1, + KERN_ERR + "%s: %s - bytesperline (%d) must be 4-byte aligned\n", + ZR_DEVNAME(zr), __func__, bytesperline); + return -EINVAL; + } + + zr->vbuf_base = (void *) ((unsigned long) base & ~3); + zr->vbuf_height = height; + zr->vbuf_width = width; + zr->vbuf_depth = fmt->depth; + zr->overlay_settings.format = fmt; + zr->vbuf_bytesperline = bytesperline; + + /* The user should set new window parameters */ + zr->overlay_settings.is_set = 0; + + return 0; +} + + +static int setup_window(struct zoran_fh *fh, + int x, + int y, + int width, + int height, + struct v4l2_clip __user *clips, + unsigned int clipcount, + void __user *bitmap) +{ + struct zoran *zr = fh->zr; + struct v4l2_clip *vcp = NULL; + int on, end; + + + if (!zr->vbuf_base) { + dprintk(1, + KERN_ERR + "%s: %s - frame buffer has to be set first\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + if (!fh->overlay_settings.format) { + dprintk(1, + KERN_ERR + "%s: %s - no overlay format set\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + if (clipcount > 2048) { + dprintk(1, + KERN_ERR + "%s: %s - invalid clipcount\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + /* + * The video front end needs 4-byte alinged line sizes, we correct that + * silently here if necessary + */ + if (zr->vbuf_depth == 15 || zr->vbuf_depth == 16) { + end = (x + width) & ~1; /* round down */ + x = (x + 1) & ~1; /* round up */ + width = end - x; + } + + if (zr->vbuf_depth == 24) { + end = (x + width) & ~3; /* round down */ + x = (x + 3) & ~3; /* round up */ + width = end - x; + } + + if (width > BUZ_MAX_WIDTH) + width = BUZ_MAX_WIDTH; + if (height > BUZ_MAX_HEIGHT) + height = BUZ_MAX_HEIGHT; + + /* Check for invalid parameters */ + if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT || + width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) { + dprintk(1, + KERN_ERR + "%s: %s - width = %d or height = %d invalid\n", + ZR_DEVNAME(zr), __func__, width, height); + return -EINVAL; + } + + fh->overlay_settings.x = x; + fh->overlay_settings.y = y; + fh->overlay_settings.width = width; + fh->overlay_settings.height = height; + fh->overlay_settings.clipcount = clipcount; + + /* + * If an overlay is running, we have to switch it off + * and switch it on again in order to get the new settings in effect. + * + * We also want to avoid that the overlay mask is written + * when an overlay is running. + */ + + on = zr->v4l_overlay_active && !zr->v4l_memgrab_active && + zr->overlay_active != ZORAN_FREE && + fh->overlay_active != ZORAN_FREE; + if (on) + zr36057_overlay(zr, 0); + + /* + * Write the overlay mask if clips are wanted. + * We prefer a bitmap. + */ + if (bitmap) { + /* fake value - it just means we want clips */ + fh->overlay_settings.clipcount = 1; + + if (copy_from_user(fh->overlay_mask, bitmap, + (width * height + 7) / 8)) { + return -EFAULT; + } + } else if (clipcount) { + /* write our own bitmap from the clips */ + vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4)); + if (vcp == NULL) { + dprintk(1, + KERN_ERR + "%s: %s - Alloc of clip mask failed\n", + ZR_DEVNAME(zr), __func__); + return -ENOMEM; + } + if (copy_from_user + (vcp, clips, sizeof(struct v4l2_clip) * clipcount)) { + vfree(vcp); + return -EFAULT; + } + write_overlay_mask(fh, vcp, clipcount); + vfree(vcp); + } + + fh->overlay_settings.is_set = 1; + if (fh->overlay_active != ZORAN_FREE && + zr->overlay_active != ZORAN_FREE) + zr->overlay_settings = fh->overlay_settings; + + if (on) + zr36057_overlay(zr, 1); + + /* Make sure the changes come into effect */ + return wait_grab_pending(zr); +} + +static int setup_overlay(struct zoran_fh *fh, int on) +{ + struct zoran *zr = fh->zr; + + /* If there is nothing to do, return immediately */ + if ((on && fh->overlay_active != ZORAN_FREE) || + (!on && fh->overlay_active == ZORAN_FREE)) + return 0; + + /* check whether we're touching someone else's overlay */ + if (on && zr->overlay_active != ZORAN_FREE && + fh->overlay_active == ZORAN_FREE) { + dprintk(1, + KERN_ERR + "%s: %s - overlay is already active for another session\n", + ZR_DEVNAME(zr), __func__); + return -EBUSY; + } + if (!on && zr->overlay_active != ZORAN_FREE && + fh->overlay_active == ZORAN_FREE) { + dprintk(1, + KERN_ERR + "%s: %s - you cannot cancel someone else's session\n", + ZR_DEVNAME(zr), __func__); + return -EPERM; + } + + if (on == 0) { + zr->overlay_active = fh->overlay_active = ZORAN_FREE; + zr->v4l_overlay_active = 0; + /* When a grab is running, the video simply + * won't be switched on any more */ + if (!zr->v4l_memgrab_active) + zr36057_overlay(zr, 0); + zr->overlay_mask = NULL; + } else { + if (!zr->vbuf_base || !fh->overlay_settings.is_set) { + dprintk(1, + KERN_ERR + "%s: %s - buffer or window not set\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + if (!fh->overlay_settings.format) { + dprintk(1, + KERN_ERR + "%s: %s - no overlay format set\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + zr->overlay_active = fh->overlay_active = ZORAN_LOCKED; + zr->v4l_overlay_active = 1; + zr->overlay_mask = fh->overlay_mask; + zr->overlay_settings = fh->overlay_settings; + if (!zr->v4l_memgrab_active) + zr36057_overlay(zr, 1); + /* When a grab is running, the video will be + * switched on when grab is finished */ + } + + /* Make sure the changes come into effect */ + return wait_grab_pending(zr); +} + +/* get the status of a buffer in the clients buffer queue */ +static int zoran_v4l2_buffer_status(struct zoran_fh *fh, + struct v4l2_buffer *buf, int num) +{ + struct zoran *zr = fh->zr; + unsigned long flags; + + buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + + switch (fh->map_mode) { + case ZORAN_MAP_MODE_RAW: + /* check range */ + if (num < 0 || num >= fh->buffers.num_buffers || + !fh->buffers.allocated) { + dprintk(1, + KERN_ERR + "%s: %s - wrong number or buffers not allocated\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + spin_lock_irqsave(&zr->spinlock, flags); + dprintk(3, + KERN_DEBUG + "%s: %s() - raw active=%c, buffer %d: state=%c, map=%c\n", + ZR_DEVNAME(zr), __func__, + "FAL"[fh->buffers.active], num, + "UPMD"[zr->v4l_buffers.buffer[num].state], + fh->buffers.buffer[num].map ? 'Y' : 'N'); + spin_unlock_irqrestore(&zr->spinlock, flags); + + buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + buf->length = fh->buffers.buffer_size; + + /* get buffer */ + buf->bytesused = fh->buffers.buffer[num].bs.length; + if (fh->buffers.buffer[num].state == BUZ_STATE_DONE || + fh->buffers.buffer[num].state == BUZ_STATE_USER) { + buf->sequence = fh->buffers.buffer[num].bs.seq; + buf->flags |= V4L2_BUF_FLAG_DONE; + buf->timestamp = fh->buffers.buffer[num].bs.timestamp; + } else { + buf->flags |= V4L2_BUF_FLAG_QUEUED; + } + + if (fh->v4l_settings.height <= BUZ_MAX_HEIGHT / 2) + buf->field = V4L2_FIELD_TOP; + else + buf->field = V4L2_FIELD_INTERLACED; + + break; + + case ZORAN_MAP_MODE_JPG_REC: + case ZORAN_MAP_MODE_JPG_PLAY: + + /* check range */ + if (num < 0 || num >= fh->buffers.num_buffers || + !fh->buffers.allocated) { + dprintk(1, + KERN_ERR + "%s: %s - wrong number or buffers not allocated\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + buf->type = (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ? + V4L2_BUF_TYPE_VIDEO_CAPTURE : + V4L2_BUF_TYPE_VIDEO_OUTPUT; + buf->length = fh->buffers.buffer_size; + + /* these variables are only written after frame has been captured */ + if (fh->buffers.buffer[num].state == BUZ_STATE_DONE || + fh->buffers.buffer[num].state == BUZ_STATE_USER) { + buf->sequence = fh->buffers.buffer[num].bs.seq; + buf->timestamp = fh->buffers.buffer[num].bs.timestamp; + buf->bytesused = fh->buffers.buffer[num].bs.length; + buf->flags |= V4L2_BUF_FLAG_DONE; + } else { + buf->flags |= V4L2_BUF_FLAG_QUEUED; + } + + /* which fields are these? */ + if (fh->jpg_settings.TmpDcm != 1) + buf->field = fh->jpg_settings.odd_even ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; + else + buf->field = fh->jpg_settings.odd_even ? + V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT; + + break; + + default: + + dprintk(5, + KERN_ERR + "%s: %s - invalid buffer type|map_mode (%d|%d)\n", + ZR_DEVNAME(zr), __func__, buf->type, fh->map_mode); + return -EINVAL; + } + + buf->memory = V4L2_MEMORY_MMAP; + buf->index = num; + buf->m.offset = buf->length * num; + + return 0; +} + +static int +zoran_set_norm (struct zoran *zr, + v4l2_std_id norm) +{ + int on; + + if (zr->v4l_buffers.active != ZORAN_FREE || + zr->jpg_buffers.active != ZORAN_FREE) { + dprintk(1, + KERN_WARNING + "%s: %s called while in playback/capture mode\n", + ZR_DEVNAME(zr), __func__); + return -EBUSY; + } + + if (!(norm & zr->card.norms)) { + dprintk(1, + KERN_ERR "%s: %s - unsupported norm %llx\n", + ZR_DEVNAME(zr), __func__, norm); + return -EINVAL; + } + + if (norm & V4L2_STD_SECAM) + zr->timing = zr->card.tvn[2]; + else if (norm & V4L2_STD_NTSC) + zr->timing = zr->card.tvn[1]; + else + zr->timing = zr->card.tvn[0]; + + /* We switch overlay off and on since a change in the + * norm needs different VFE settings */ + on = zr->overlay_active && !zr->v4l_memgrab_active; + if (on) + zr36057_overlay(zr, 0); + + decoder_call(zr, video, s_std, norm); + encoder_call(zr, video, s_std_output, norm); + + if (on) + zr36057_overlay(zr, 1); + + /* Make sure the changes come into effect */ + zr->norm = norm; + + return 0; +} + +static int +zoran_set_input (struct zoran *zr, + int input) +{ + if (input == zr->input) { + return 0; + } + + if (zr->v4l_buffers.active != ZORAN_FREE || + zr->jpg_buffers.active != ZORAN_FREE) { + dprintk(1, + KERN_WARNING + "%s: %s called while in playback/capture mode\n", + ZR_DEVNAME(zr), __func__); + return -EBUSY; + } + + if (input < 0 || input >= zr->card.inputs) { + dprintk(1, + KERN_ERR + "%s: %s - unsupported input %d\n", + ZR_DEVNAME(zr), __func__, input); + return -EINVAL; + } + + zr->input = input; + + decoder_call(zr, video, s_routing, + zr->card.input[input].muxsel, 0, 0); + + return 0; +} + +/* + * ioctl routine + */ + +static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + strncpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)-1); + strncpy(cap->driver, "zoran", sizeof(cap->driver)-1); + snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", + pci_name(zr->pci_dev)); + cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; +} + +static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag) +{ + unsigned int num, i; + + for (num = i = 0; i < NUM_FORMATS; i++) { + if (zoran_formats[i].flags & flag && num++ == fmt->index) { + strncpy(fmt->description, zoran_formats[i].name, + sizeof(fmt->description) - 1); + /* fmt struct pre-zeroed, so adding '\0' not needed */ + fmt->pixelformat = zoran_formats[i].fourcc; + if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED) + fmt->flags |= V4L2_FMT_FLAG_COMPRESSED; + return 0; + } + } + return -EINVAL; +} + +static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh, + struct v4l2_fmtdesc *f) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE); +} + +static int zoran_enum_fmt_vid_out(struct file *file, void *__fh, + struct v4l2_fmtdesc *f) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK); +} + +static int zoran_enum_fmt_vid_overlay(struct file *file, void *__fh, + struct v4l2_fmtdesc *f) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + return zoran_enum_fmt(zr, f, ZORAN_FORMAT_OVERLAY); +} + +static int zoran_g_fmt_vid_out(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + + fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm; + fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 / + (fh->jpg_settings.VerDcm * fh->jpg_settings.TmpDcm); + fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&fh->jpg_settings); + fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; + if (fh->jpg_settings.TmpDcm == 1) + fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? + V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); + else + fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); + fmt->fmt.pix.bytesperline = 0; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + + return 0; +} + +static int zoran_g_fmt_vid_cap(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + if (fh->map_mode != ZORAN_MAP_MODE_RAW) + return zoran_g_fmt_vid_out(file, fh, fmt); + + fmt->fmt.pix.width = fh->v4l_settings.width; + fmt->fmt.pix.height = fh->v4l_settings.height; + fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline * + fh->v4l_settings.height; + fmt->fmt.pix.pixelformat = fh->v4l_settings.format->fourcc; + fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace; + fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline; + if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2)) + fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; + else + fmt->fmt.pix.field = V4L2_FIELD_TOP; + return 0; +} + +static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + fmt->fmt.win.w.left = fh->overlay_settings.x; + fmt->fmt.win.w.top = fh->overlay_settings.y; + fmt->fmt.win.w.width = fh->overlay_settings.width; + fmt->fmt.win.w.height = fh->overlay_settings.height; + if (fh->overlay_settings.width * 2 > BUZ_MAX_HEIGHT) + fmt->fmt.win.field = V4L2_FIELD_INTERLACED; + else + fmt->fmt.win.field = V4L2_FIELD_TOP; + + return 0; +} + +static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH) + fmt->fmt.win.w.width = BUZ_MAX_WIDTH; + if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH) + fmt->fmt.win.w.width = BUZ_MIN_WIDTH; + if (fmt->fmt.win.w.height > BUZ_MAX_HEIGHT) + fmt->fmt.win.w.height = BUZ_MAX_HEIGHT; + if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT) + fmt->fmt.win.w.height = BUZ_MIN_HEIGHT; + + return 0; +} + +static int zoran_try_fmt_vid_out(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + struct zoran_jpg_settings settings; + int res = 0; + + if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG) + return -EINVAL; + + settings = fh->jpg_settings; + + /* we actually need to set 'real' parameters now */ + if ((fmt->fmt.pix.height * 2) > BUZ_MAX_HEIGHT) + settings.TmpDcm = 1; + else + settings.TmpDcm = 2; + settings.decimation = 0; + if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2) + settings.VerDcm = 2; + else + settings.VerDcm = 1; + if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4) + settings.HorDcm = 4; + else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2) + settings.HorDcm = 2; + else + settings.HorDcm = 1; + if (settings.TmpDcm == 1) + settings.field_per_buff = 2; + else + settings.field_per_buff = 1; + + if (settings.HorDcm > 1) { + settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; + settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; + } else { + settings.img_x = 0; + settings.img_width = BUZ_MAX_WIDTH; + } + + /* check */ + res = zoran_check_jpg_settings(zr, &settings, 1); + if (res) + return res; + + /* tell the user what we actually did */ + fmt->fmt.pix.width = settings.img_width / settings.HorDcm; + fmt->fmt.pix.height = settings.img_height * 2 / + (settings.TmpDcm * settings.VerDcm); + if (settings.TmpDcm == 1) + fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? + V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); + else + fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); + + fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings); + fmt->fmt.pix.bytesperline = 0; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + return res; +} + +static int zoran_try_fmt_vid_cap(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int bpp; + int i; + + if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) + return zoran_try_fmt_vid_out(file, fh, fmt); + + for (i = 0; i < NUM_FORMATS; i++) + if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat) + break; + + if (i == NUM_FORMATS) + return -EINVAL; + + bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8); + v4l_bound_align_image( + &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2, + &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0); + return 0; +} + +static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + int res; + + dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n", + fmt->fmt.win.w.left, fmt->fmt.win.w.top, + fmt->fmt.win.w.width, + fmt->fmt.win.w.height, + fmt->fmt.win.clipcount, + fmt->fmt.win.bitmap); + res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top, + fmt->fmt.win.w.width, fmt->fmt.win.w.height, + (struct v4l2_clip __user *)fmt->fmt.win.clips, + fmt->fmt.win.clipcount, fmt->fmt.win.bitmap); + return res; +} + +static int zoran_s_fmt_vid_out(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + __le32 printformat = __cpu_to_le32(fmt->fmt.pix.pixelformat); + struct zoran_jpg_settings settings; + int res = 0; + + dprintk(3, "size=%dx%d, fmt=0x%x (%4.4s)\n", + fmt->fmt.pix.width, fmt->fmt.pix.height, + fmt->fmt.pix.pixelformat, + (char *) &printformat); + if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG) + return -EINVAL; + + if (fh->buffers.allocated) { + dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n", + ZR_DEVNAME(zr)); + res = -EBUSY; + return res; + } + + settings = fh->jpg_settings; + + /* we actually need to set 'real' parameters now */ + if (fmt->fmt.pix.height * 2 > BUZ_MAX_HEIGHT) + settings.TmpDcm = 1; + else + settings.TmpDcm = 2; + settings.decimation = 0; + if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2) + settings.VerDcm = 2; + else + settings.VerDcm = 1; + if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4) + settings.HorDcm = 4; + else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2) + settings.HorDcm = 2; + else + settings.HorDcm = 1; + if (settings.TmpDcm == 1) + settings.field_per_buff = 2; + else + settings.field_per_buff = 1; + + if (settings.HorDcm > 1) { + settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; + settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; + } else { + settings.img_x = 0; + settings.img_width = BUZ_MAX_WIDTH; + } + + /* check */ + res = zoran_check_jpg_settings(zr, &settings, 0); + if (res) + return res; + + /* it's ok, so set them */ + fh->jpg_settings = settings; + + map_mode_jpg(fh, fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT); + fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); + + /* tell the user what we actually did */ + fmt->fmt.pix.width = settings.img_width / settings.HorDcm; + fmt->fmt.pix.height = settings.img_height * 2 / + (settings.TmpDcm * settings.VerDcm); + if (settings.TmpDcm == 1) + fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? + V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); + else + fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); + fmt->fmt.pix.bytesperline = 0; + fmt->fmt.pix.sizeimage = fh->buffers.buffer_size; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + return res; +} + +static int zoran_s_fmt_vid_cap(struct file *file, void *__fh, + struct v4l2_format *fmt) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int i; + int res = 0; + + if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) + return zoran_s_fmt_vid_out(file, fh, fmt); + + for (i = 0; i < NUM_FORMATS; i++) + if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc) + break; + if (i == NUM_FORMATS) { + dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - unknown/unsupported format 0x%x\n", + ZR_DEVNAME(zr), fmt->fmt.pix.pixelformat); + return -EINVAL; + } + + if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) || + fh->buffers.active != ZORAN_FREE) { + dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n", + ZR_DEVNAME(zr)); + res = -EBUSY; + return res; + } + if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT) + fmt->fmt.pix.height = BUZ_MAX_HEIGHT; + if (fmt->fmt.pix.width > BUZ_MAX_WIDTH) + fmt->fmt.pix.width = BUZ_MAX_WIDTH; + + map_mode_raw(fh); + + res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height, + &zoran_formats[i]); + if (res) + return res; + + /* tell the user the results/missing stuff */ + fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline; + fmt->fmt.pix.sizeimage = fh->v4l_settings.height * fh->v4l_settings.bytesperline; + fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace; + if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2)) + fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; + else + fmt->fmt.pix.field = V4L2_FIELD_TOP; + return res; +} + +static int zoran_g_fbuf(struct file *file, void *__fh, + struct v4l2_framebuffer *fb) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + memset(fb, 0, sizeof(*fb)); + fb->base = zr->vbuf_base; + fb->fmt.width = zr->vbuf_width; + fb->fmt.height = zr->vbuf_height; + if (zr->overlay_settings.format) + fb->fmt.pixelformat = fh->overlay_settings.format->fourcc; + fb->fmt.bytesperline = zr->vbuf_bytesperline; + fb->fmt.colorspace = V4L2_COLORSPACE_SRGB; + fb->fmt.field = V4L2_FIELD_INTERLACED; + fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; + + return 0; +} + +static int zoran_s_fbuf(struct file *file, void *__fh, + const struct v4l2_framebuffer *fb) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int i, res = 0; + __le32 printformat = __cpu_to_le32(fb->fmt.pixelformat); + + for (i = 0; i < NUM_FORMATS; i++) + if (zoran_formats[i].fourcc == fb->fmt.pixelformat) + break; + if (i == NUM_FORMATS) { + dprintk(1, KERN_ERR "%s: VIDIOC_S_FBUF - format=0x%x (%4.4s) not allowed\n", + ZR_DEVNAME(zr), fb->fmt.pixelformat, + (char *)&printformat); + return -EINVAL; + } + + res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width, + fb->fmt.height, fb->fmt.bytesperline); + + return res; +} + +static int zoran_overlay(struct file *file, void *__fh, unsigned int on) +{ + struct zoran_fh *fh = __fh; + int res; + + res = setup_overlay(fh, on); + + return res; +} + +static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type); + +static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffers *req) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res = 0; + + if (req->memory != V4L2_MEMORY_MMAP) { + dprintk(2, + KERN_ERR + "%s: only MEMORY_MMAP capture is supported, not %d\n", + ZR_DEVNAME(zr), req->memory); + return -EINVAL; + } + + if (req->count == 0) + return zoran_streamoff(file, fh, req->type); + + if (fh->buffers.allocated) { + dprintk(2, + KERN_ERR + "%s: VIDIOC_REQBUFS - buffers already allocated\n", + ZR_DEVNAME(zr)); + res = -EBUSY; + return res; + } + + if (fh->map_mode == ZORAN_MAP_MODE_RAW && + req->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + /* control user input */ + if (req->count < 2) + req->count = 2; + if (req->count > v4l_nbufs) + req->count = v4l_nbufs; + + /* The next mmap will map the V4L buffers */ + map_mode_raw(fh); + fh->buffers.num_buffers = req->count; + + if (v4l_fbuffer_alloc(fh)) { + res = -ENOMEM; + return res; + } + } else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC || + fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) { + /* we need to calculate size ourselves now */ + if (req->count < 4) + req->count = 4; + if (req->count > jpg_nbufs) + req->count = jpg_nbufs; + + /* The next mmap will map the MJPEG buffers */ + map_mode_jpg(fh, req->type == V4L2_BUF_TYPE_VIDEO_OUTPUT); + fh->buffers.num_buffers = req->count; + fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); + + if (jpg_fbuffer_alloc(fh)) { + res = -ENOMEM; + return res; + } + } else { + dprintk(1, + KERN_ERR + "%s: VIDIOC_REQBUFS - unknown type %d\n", + ZR_DEVNAME(zr), req->type); + res = -EINVAL; + return res; + } + return res; +} + +static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf) +{ + struct zoran_fh *fh = __fh; + int res; + + res = zoran_v4l2_buffer_status(fh, buf, buf->index); + + return res; +} + +static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res = 0, codec_mode, buf_type; + + switch (fh->map_mode) { + case ZORAN_MAP_MODE_RAW: + if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { + dprintk(1, KERN_ERR + "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", + ZR_DEVNAME(zr), buf->type, fh->map_mode); + res = -EINVAL; + return res; + } + + res = zoran_v4l_queue_frame(fh, buf->index); + if (res) + return res; + if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED) + zr36057_set_memgrab(zr, 1); + break; + + case ZORAN_MAP_MODE_JPG_REC: + case ZORAN_MAP_MODE_JPG_PLAY: + if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) { + buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + codec_mode = BUZ_MODE_MOTION_DECOMPRESS; + } else { + buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + codec_mode = BUZ_MODE_MOTION_COMPRESS; + } + + if (buf->type != buf_type) { + dprintk(1, KERN_ERR + "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", + ZR_DEVNAME(zr), buf->type, fh->map_mode); + res = -EINVAL; + return res; + } + + res = zoran_jpg_queue_frame(fh, buf->index, codec_mode); + if (res != 0) + return res; + if (zr->codec_mode == BUZ_MODE_IDLE && + fh->buffers.active == ZORAN_LOCKED) + zr36057_enable_jpg(zr, codec_mode); + + break; + + default: + dprintk(1, KERN_ERR + "%s: VIDIOC_QBUF - unsupported type %d\n", + ZR_DEVNAME(zr), buf->type); + res = -EINVAL; + break; + } + return res; +} + +static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res = 0, buf_type, num = -1; /* compiler borks here (?) */ + + switch (fh->map_mode) { + case ZORAN_MAP_MODE_RAW: + if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { + dprintk(1, KERN_ERR + "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", + ZR_DEVNAME(zr), buf->type, fh->map_mode); + res = -EINVAL; + return res; + } + + num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME]; + if (file->f_flags & O_NONBLOCK && + zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) { + res = -EAGAIN; + return res; + } + res = v4l_sync(fh, num); + if (res) + return res; + zr->v4l_sync_tail++; + res = zoran_v4l2_buffer_status(fh, buf, num); + break; + + case ZORAN_MAP_MODE_JPG_REC: + case ZORAN_MAP_MODE_JPG_PLAY: + { + struct zoran_sync bs; + + if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) + buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + else + buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + if (buf->type != buf_type) { + dprintk(1, KERN_ERR + "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", + ZR_DEVNAME(zr), buf->type, fh->map_mode); + res = -EINVAL; + return res; + } + + num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; + + if (file->f_flags & O_NONBLOCK && + zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) { + res = -EAGAIN; + return res; + } + bs.frame = 0; /* suppress compiler warning */ + res = jpg_sync(fh, &bs); + if (res) + return res; + res = zoran_v4l2_buffer_status(fh, buf, bs.frame); + break; + } + + default: + dprintk(1, KERN_ERR + "%s: VIDIOC_DQBUF - unsupported type %d\n", + ZR_DEVNAME(zr), buf->type); + res = -EINVAL; + break; + } + return res; +} + +static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res = 0; + + switch (fh->map_mode) { + case ZORAN_MAP_MODE_RAW: /* raw capture */ + if (zr->v4l_buffers.active != ZORAN_ACTIVE || + fh->buffers.active != ZORAN_ACTIVE) { + res = -EBUSY; + return res; + } + + zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED; + zr->v4l_settings = fh->v4l_settings; + + zr->v4l_sync_tail = zr->v4l_pend_tail; + if (!zr->v4l_memgrab_active && + zr->v4l_pend_head != zr->v4l_pend_tail) { + zr36057_set_memgrab(zr, 1); + } + break; + + case ZORAN_MAP_MODE_JPG_REC: + case ZORAN_MAP_MODE_JPG_PLAY: + /* what is the codec mode right now? */ + if (zr->jpg_buffers.active != ZORAN_ACTIVE || + fh->buffers.active != ZORAN_ACTIVE) { + res = -EBUSY; + return res; + } + + zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED; + + if (zr->jpg_que_head != zr->jpg_que_tail) { + /* Start the jpeg codec when the first frame is queued */ + jpeg_start(zr); + } + break; + + default: + dprintk(1, + KERN_ERR + "%s: VIDIOC_STREAMON - invalid map mode %d\n", + ZR_DEVNAME(zr), fh->map_mode); + res = -EINVAL; + break; + } + return res; +} + +static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int i, res = 0; + unsigned long flags; + + switch (fh->map_mode) { + case ZORAN_MAP_MODE_RAW: /* raw capture */ + if (fh->buffers.active == ZORAN_FREE && + zr->v4l_buffers.active != ZORAN_FREE) { + res = -EPERM; /* stay off other's settings! */ + return res; + } + if (zr->v4l_buffers.active == ZORAN_FREE) + return res; + + spin_lock_irqsave(&zr->spinlock, flags); + /* unload capture */ + if (zr->v4l_memgrab_active) { + + zr36057_set_memgrab(zr, 0); + } + + for (i = 0; i < fh->buffers.num_buffers; i++) + zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER; + fh->buffers = zr->v4l_buffers; + + zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; + + zr->v4l_grab_seq = 0; + zr->v4l_pend_head = zr->v4l_pend_tail = 0; + zr->v4l_sync_tail = 0; + + spin_unlock_irqrestore(&zr->spinlock, flags); + + break; + + case ZORAN_MAP_MODE_JPG_REC: + case ZORAN_MAP_MODE_JPG_PLAY: + if (fh->buffers.active == ZORAN_FREE && + zr->jpg_buffers.active != ZORAN_FREE) { + res = -EPERM; /* stay off other's settings! */ + return res; + } + if (zr->jpg_buffers.active == ZORAN_FREE) + return res; + + res = jpg_qbuf(fh, -1, + (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ? + BUZ_MODE_MOTION_COMPRESS : + BUZ_MODE_MOTION_DECOMPRESS); + if (res) + return res; + break; + default: + dprintk(1, KERN_ERR + "%s: VIDIOC_STREAMOFF - invalid map mode %d\n", + ZR_DEVNAME(zr), fh->map_mode); + res = -EINVAL; + break; + } + return res; +} +static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + *std = zr->norm; + return 0; +} + +static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res = 0; + + res = zoran_set_norm(zr, std); + if (res) + return res; + + res = wait_grab_pending(zr); + return res; +} + +static int zoran_enum_input(struct file *file, void *__fh, + struct v4l2_input *inp) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + if (inp->index >= zr->card.inputs) + return -EINVAL; + + strncpy(inp->name, zr->card.input[inp->index].name, + sizeof(inp->name) - 1); + inp->type = V4L2_INPUT_TYPE_CAMERA; + inp->std = V4L2_STD_ALL; + + /* Get status of video decoder */ + decoder_call(zr, video, g_input_status, &inp->status); + return 0; +} + +static int zoran_g_input(struct file *file, void *__fh, unsigned int *input) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + *input = zr->input; + + return 0; +} + +static int zoran_s_input(struct file *file, void *__fh, unsigned int input) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res; + + res = zoran_set_input(zr, input); + if (res) + return res; + + /* Make sure the changes come into effect */ + res = wait_grab_pending(zr); + return res; +} + +static int zoran_enum_output(struct file *file, void *__fh, + struct v4l2_output *outp) +{ + if (outp->index != 0) + return -EINVAL; + + outp->index = 0; + outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY; + strncpy(outp->name, "Autodetect", sizeof(outp->name)-1); + + return 0; +} + +static int zoran_g_output(struct file *file, void *__fh, unsigned int *output) +{ + *output = 0; + + return 0; +} + +static int zoran_s_output(struct file *file, void *__fh, unsigned int output) +{ + if (output != 0) + return -EINVAL; + + return 0; +} + +/* cropping (sub-frame capture) */ +static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + + if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && + sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + if (fh->map_mode == ZORAN_MAP_MODE_RAW) { + dprintk(1, KERN_ERR + "%s: VIDIOC_G_SELECTION - subcapture only supported for compressed capture\n", + ZR_DEVNAME(zr)); + return -EINVAL; + } + + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + sel->r.top = fh->jpg_settings.img_y; + sel->r.left = fh->jpg_settings.img_x; + sel->r.width = fh->jpg_settings.img_width; + sel->r.height = fh->jpg_settings.img_height; + break; + case V4L2_SEL_TGT_CROP_DEFAULT: + sel->r.top = sel->r.left = 0; + sel->r.width = BUZ_MIN_WIDTH; + sel->r.height = BUZ_MIN_HEIGHT; + break; + case V4L2_SEL_TGT_CROP_BOUNDS: + sel->r.top = sel->r.left = 0; + sel->r.width = BUZ_MAX_WIDTH; + sel->r.height = BUZ_MAX_HEIGHT; + break; + default: + return -EINVAL; + } + return 0; +} + +static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + struct zoran_jpg_settings settings; + int res; + + if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && + sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + if (sel->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + if (fh->map_mode == ZORAN_MAP_MODE_RAW) { + dprintk(1, KERN_ERR + "%s: VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n", + ZR_DEVNAME(zr)); + return -EINVAL; + } + + settings = fh->jpg_settings; + + if (fh->buffers.allocated) { + dprintk(1, KERN_ERR + "%s: VIDIOC_S_SELECTION - cannot change settings while active\n", + ZR_DEVNAME(zr)); + return -EBUSY; + } + + /* move into a form that we understand */ + settings.img_x = sel->r.left; + settings.img_y = sel->r.top; + settings.img_width = sel->r.width; + settings.img_height = sel->r.height; + + /* check validity */ + res = zoran_check_jpg_settings(zr, &settings, 0); + if (res) + return res; + + /* accept */ + fh->jpg_settings = settings; + return res; +} + +static int zoran_g_jpegcomp(struct file *file, void *__fh, + struct v4l2_jpegcompression *params) +{ + struct zoran_fh *fh = __fh; + memset(params, 0, sizeof(*params)); + + params->quality = fh->jpg_settings.jpg_comp.quality; + params->APPn = fh->jpg_settings.jpg_comp.APPn; + memcpy(params->APP_data, + fh->jpg_settings.jpg_comp.APP_data, + fh->jpg_settings.jpg_comp.APP_len); + params->APP_len = fh->jpg_settings.jpg_comp.APP_len; + memcpy(params->COM_data, + fh->jpg_settings.jpg_comp.COM_data, + fh->jpg_settings.jpg_comp.COM_len); + params->COM_len = fh->jpg_settings.jpg_comp.COM_len; + params->jpeg_markers = + fh->jpg_settings.jpg_comp.jpeg_markers; + + return 0; +} + +static int zoran_s_jpegcomp(struct file *file, void *__fh, + const struct v4l2_jpegcompression *params) +{ + struct zoran_fh *fh = __fh; + struct zoran *zr = fh->zr; + int res = 0; + struct zoran_jpg_settings settings; + + settings = fh->jpg_settings; + + settings.jpg_comp = *params; + + if (fh->buffers.active != ZORAN_FREE) { + dprintk(1, KERN_WARNING + "%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n", + ZR_DEVNAME(zr)); + res = -EBUSY; + return res; + } + + res = zoran_check_jpg_settings(zr, &settings, 0); + if (res) + return res; + if (!fh->buffers.allocated) + fh->buffers.buffer_size = + zoran_v4l2_calc_bufsize(&fh->jpg_settings); + fh->jpg_settings.jpg_comp = settings.jpg_comp; + return res; +} + +static __poll_t +zoran_poll (struct file *file, + poll_table *wait) +{ + struct zoran_fh *fh = file->private_data; + struct zoran *zr = fh->zr; + __poll_t res = v4l2_ctrl_poll(file, wait); + int frame; + unsigned long flags; + + /* we should check whether buffers are ready to be synced on + * (w/o waits - O_NONBLOCK) here + * if ready for read (sync), return EPOLLIN|EPOLLRDNORM, + * if ready for write (sync), return EPOLLOUT|EPOLLWRNORM, + * if error, return EPOLLERR, + * if no buffers queued or so, return EPOLLNVAL + */ + + switch (fh->map_mode) { + case ZORAN_MAP_MODE_RAW: + poll_wait(file, &zr->v4l_capq, wait); + frame = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME]; + + spin_lock_irqsave(&zr->spinlock, flags); + dprintk(3, + KERN_DEBUG + "%s: %s() raw - active=%c, sync_tail=%lu/%c, pend_tail=%lu, pend_head=%lu\n", + ZR_DEVNAME(zr), __func__, + "FAL"[fh->buffers.active], zr->v4l_sync_tail, + "UPMD"[zr->v4l_buffers.buffer[frame].state], + zr->v4l_pend_tail, zr->v4l_pend_head); + /* Process is the one capturing? */ + if (fh->buffers.active != ZORAN_FREE && + /* Buffer ready to DQBUF? */ + zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE) + res |= EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&zr->spinlock, flags); + + break; + + case ZORAN_MAP_MODE_JPG_REC: + case ZORAN_MAP_MODE_JPG_PLAY: + poll_wait(file, &zr->jpg_capq, wait); + frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; + + spin_lock_irqsave(&zr->spinlock, flags); + dprintk(3, + KERN_DEBUG + "%s: %s() jpg - active=%c, que_tail=%lu/%c, que_head=%lu, dma=%lu/%lu\n", + ZR_DEVNAME(zr), __func__, + "FAL"[fh->buffers.active], zr->jpg_que_tail, + "UPMD"[zr->jpg_buffers.buffer[frame].state], + zr->jpg_que_head, zr->jpg_dma_tail, zr->jpg_dma_head); + if (fh->buffers.active != ZORAN_FREE && + zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) { + if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) + res |= EPOLLIN | EPOLLRDNORM; + else + res |= EPOLLOUT | EPOLLWRNORM; + } + spin_unlock_irqrestore(&zr->spinlock, flags); + + break; + + default: + dprintk(1, + KERN_ERR + "%s: %s - internal error, unknown map_mode=%d\n", + ZR_DEVNAME(zr), __func__, fh->map_mode); + res |= EPOLLERR; + } + + return res; +} + + +/* + * This maps the buffers to user space. + * + * Depending on the state of fh->map_mode + * the V4L or the MJPEG buffers are mapped + * per buffer or all together + * + * Note that we need to connect to some + * unmap signal event to unmap the de-allocate + * the buffer accordingly (zoran_vm_close()) + */ + +static void +zoran_vm_open (struct vm_area_struct *vma) +{ + struct zoran_mapping *map = vma->vm_private_data; + atomic_inc(&map->count); +} + +static void +zoran_vm_close (struct vm_area_struct *vma) +{ + struct zoran_mapping *map = vma->vm_private_data; + struct zoran_fh *fh = map->fh; + struct zoran *zr = fh->zr; + int i; + + dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr), + __func__, mode_name(fh->map_mode)); + + for (i = 0; i < fh->buffers.num_buffers; i++) { + if (fh->buffers.buffer[i].map == map) + fh->buffers.buffer[i].map = NULL; + } + kfree(map); + + /* Any buffers still mapped? */ + for (i = 0; i < fh->buffers.num_buffers; i++) { + if (fh->buffers.buffer[i].map) { + return; + } + } + + dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr), + __func__, mode_name(fh->map_mode)); + + if (fh->map_mode == ZORAN_MAP_MODE_RAW) { + if (fh->buffers.active != ZORAN_FREE) { + unsigned long flags; + + spin_lock_irqsave(&zr->spinlock, flags); + zr36057_set_memgrab(zr, 0); + zr->v4l_buffers.allocated = 0; + zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; + spin_unlock_irqrestore(&zr->spinlock, flags); + } + v4l_fbuffer_free(fh); + } else { + if (fh->buffers.active != ZORAN_FREE) { + jpg_qbuf(fh, -1, zr->codec_mode); + zr->jpg_buffers.allocated = 0; + zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE; + } + jpg_fbuffer_free(fh); + } +} + +static const struct vm_operations_struct zoran_vm_ops = { + .open = zoran_vm_open, + .close = zoran_vm_close, +}; + +static int +zoran_mmap (struct file *file, + struct vm_area_struct *vma) +{ + struct zoran_fh *fh = file->private_data; + struct zoran *zr = fh->zr; + unsigned long size = (vma->vm_end - vma->vm_start); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + int i, j; + unsigned long page, start = vma->vm_start, todo, pos, fraglen; + int first, last; + struct zoran_mapping *map; + int res = 0; + + dprintk(3, + KERN_INFO "%s: %s(%s) of 0x%08lx-0x%08lx (size=%lu)\n", + ZR_DEVNAME(zr), __func__, + mode_name(fh->map_mode), vma->vm_start, vma->vm_end, size); + + if (!(vma->vm_flags & VM_SHARED) || !(vma->vm_flags & VM_READ) || + !(vma->vm_flags & VM_WRITE)) { + dprintk(1, + KERN_ERR + "%s: %s - no MAP_SHARED/PROT_{READ,WRITE} given\n", + ZR_DEVNAME(zr), __func__); + return -EINVAL; + } + + if (!fh->buffers.allocated) { + dprintk(1, + KERN_ERR + "%s: %s(%s) - buffers not yet allocated\n", + ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); + res = -ENOMEM; + return res; + } + + first = offset / fh->buffers.buffer_size; + last = first - 1 + size / fh->buffers.buffer_size; + if (offset % fh->buffers.buffer_size != 0 || + size % fh->buffers.buffer_size != 0 || first < 0 || + last < 0 || first >= fh->buffers.num_buffers || + last >= fh->buffers.buffer_size) { + dprintk(1, + KERN_ERR + "%s: %s(%s) - offset=%lu or size=%lu invalid for bufsize=%d and numbufs=%d\n", + ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), offset, size, + fh->buffers.buffer_size, + fh->buffers.num_buffers); + res = -EINVAL; + return res; + } + + /* Check if any buffers are already mapped */ + for (i = first; i <= last; i++) { + if (fh->buffers.buffer[i].map) { + dprintk(1, + KERN_ERR + "%s: %s(%s) - buffer %d already mapped\n", + ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i); + res = -EBUSY; + return res; + } + } + + /* map these buffers */ + map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL); + if (!map) { + res = -ENOMEM; + return res; + } + map->fh = fh; + atomic_set(&map->count, 1); + + vma->vm_ops = &zoran_vm_ops; + vma->vm_flags |= VM_DONTEXPAND; + vma->vm_private_data = map; + + if (fh->map_mode == ZORAN_MAP_MODE_RAW) { + for (i = first; i <= last; i++) { + todo = size; + if (todo > fh->buffers.buffer_size) + todo = fh->buffers.buffer_size; + page = fh->buffers.buffer[i].v4l.fbuffer_phys; + if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, + todo, PAGE_SHARED)) { + dprintk(1, + KERN_ERR + "%s: %s(V4L) - remap_pfn_range failed\n", + ZR_DEVNAME(zr), __func__); + res = -EAGAIN; + return res; + } + size -= todo; + start += todo; + fh->buffers.buffer[i].map = map; + if (size == 0) + break; + } + } else { + for (i = first; i <= last; i++) { + for (j = 0; + j < fh->buffers.buffer_size / PAGE_SIZE; + j++) { + fraglen = + (le32_to_cpu(fh->buffers.buffer[i].jpg. + frag_tab[2 * j + 1]) & ~1) << 1; + todo = size; + if (todo > fraglen) + todo = fraglen; + pos = + le32_to_cpu(fh->buffers. + buffer[i].jpg.frag_tab[2 * j]); + /* should just be pos on i386 */ + page = virt_to_phys(bus_to_virt(pos)) + >> PAGE_SHIFT; + if (remap_pfn_range(vma, start, page, + todo, PAGE_SHARED)) { + dprintk(1, + KERN_ERR + "%s: %s(V4L) - remap_pfn_range failed\n", + ZR_DEVNAME(zr), __func__); + res = -EAGAIN; + return res; + } + size -= todo; + start += todo; + if (size == 0) + break; + if (le32_to_cpu(fh->buffers.buffer[i].jpg. + frag_tab[2 * j + 1]) & 1) + break; /* was last fragment */ + } + fh->buffers.buffer[i].map = map; + if (size == 0) + break; + + } + } + return res; +} + +static const struct v4l2_ioctl_ops zoran_ioctl_ops = { + .vidioc_querycap = zoran_querycap, + .vidioc_s_selection = zoran_s_selection, + .vidioc_g_selection = zoran_g_selection, + .vidioc_enum_input = zoran_enum_input, + .vidioc_g_input = zoran_g_input, + .vidioc_s_input = zoran_s_input, + .vidioc_enum_output = zoran_enum_output, + .vidioc_g_output = zoran_g_output, + .vidioc_s_output = zoran_s_output, + .vidioc_g_fbuf = zoran_g_fbuf, + .vidioc_s_fbuf = zoran_s_fbuf, + .vidioc_g_std = zoran_g_std, + .vidioc_s_std = zoran_s_std, + .vidioc_g_jpegcomp = zoran_g_jpegcomp, + .vidioc_s_jpegcomp = zoran_s_jpegcomp, + .vidioc_overlay = zoran_overlay, + .vidioc_reqbufs = zoran_reqbufs, + .vidioc_querybuf = zoran_querybuf, + .vidioc_qbuf = zoran_qbuf, + .vidioc_dqbuf = zoran_dqbuf, + .vidioc_streamon = zoran_streamon, + .vidioc_streamoff = zoran_streamoff, + .vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap, + .vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out, + .vidioc_enum_fmt_vid_overlay = zoran_enum_fmt_vid_overlay, + .vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap, + .vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out, + .vidioc_g_fmt_vid_overlay = zoran_g_fmt_vid_overlay, + .vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap, + .vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out, + .vidioc_s_fmt_vid_overlay = zoran_s_fmt_vid_overlay, + .vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap, + .vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out, + .vidioc_try_fmt_vid_overlay = zoran_try_fmt_vid_overlay, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static const struct v4l2_file_operations zoran_fops = { + .owner = THIS_MODULE, + .open = zoran_open, + .release = zoran_close, + .unlocked_ioctl = video_ioctl2, + .mmap = zoran_mmap, + .poll = zoran_poll, +}; + +const struct video_device zoran_template = { + .name = ZORAN_NAME, + .fops = &zoran_fops, + .ioctl_ops = &zoran_ioctl_ops, + .release = &zoran_vdev_release, + .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, +}; + diff --git a/drivers/staging/media/zoran/zoran_procfs.c b/drivers/staging/media/zoran/zoran_procfs.c new file mode 100644 index 000000000000..78ac8f853748 --- /dev/null +++ b/drivers/staging/media/zoran/zoran_procfs.c @@ -0,0 +1,221 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * This part handles the procFS entries (/proc/ZORAN[%d]) + * + * Copyright (C) 2000 Serguei Miridonov + * + * Currently maintained by: + * Ronald Bultje + * Laurent Pinchart + * Mailinglist + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "videocodec.h" +#include "zoran.h" +#include "zoran_procfs.h" +#include "zoran_card.h" + +#ifdef CONFIG_PROC_FS +struct procfs_params_zr36067 { + char *name; + short reg; + u32 mask; + short bit; +}; + +static const struct procfs_params_zr36067 zr67[] = { + {"HSPol", 0x000, 1, 30}, + {"HStart", 0x000, 0x3ff, 10}, + {"HEnd", 0x000, 0x3ff, 0}, + + {"VSPol", 0x004, 1, 30}, + {"VStart", 0x004, 0x3ff, 10}, + {"VEnd", 0x004, 0x3ff, 0}, + + {"ExtFl", 0x008, 1, 26}, + {"TopField", 0x008, 1, 25}, + {"VCLKPol", 0x008, 1, 24}, + {"DupFld", 0x008, 1, 20}, + {"LittleEndian", 0x008, 1, 0}, + + {"HsyncStart", 0x10c, 0xffff, 16}, + {"LineTot", 0x10c, 0xffff, 0}, + + {"NAX", 0x110, 0xffff, 16}, + {"PAX", 0x110, 0xffff, 0}, + + {"NAY", 0x114, 0xffff, 16}, + {"PAY", 0x114, 0xffff, 0}, + + /* {"",,,}, */ + + {NULL, 0, 0, 0}, +}; + +static void +setparam (struct zoran *zr, + char *name, + char *sval) +{ + int i = 0, reg0, reg, val; + + while (zr67[i].name != NULL) { + if (!strncmp(name, zr67[i].name, strlen(zr67[i].name))) { + reg = reg0 = btread(zr67[i].reg); + reg &= ~(zr67[i].mask << zr67[i].bit); + if (!isdigit(sval[0])) + break; + val = simple_strtoul(sval, NULL, 0); + if ((val & ~zr67[i].mask)) + break; + reg |= (val & zr67[i].mask) << zr67[i].bit; + dprintk(4, + KERN_INFO + "%s: setparam: setting ZR36067 register 0x%03x: 0x%08x=>0x%08x %s=%d\n", + ZR_DEVNAME(zr), zr67[i].reg, reg0, reg, + zr67[i].name, val); + btwrite(reg, zr67[i].reg); + break; + } + i++; + } +} + +static int zoran_show(struct seq_file *p, void *v) +{ + struct zoran *zr = p->private; + int i; + + seq_printf(p, "ZR36067 registers:\n"); + for (i = 0; i < 0x130; i += 16) + seq_printf(p, "%03X %08X %08X %08X %08X \n", i, + btread(i), btread(i+4), btread(i+8), btread(i+12)); + return 0; +} + +static int zoran_open(struct inode *inode, struct file *file) +{ + struct zoran *data = PDE_DATA(inode); + return single_open(file, zoran_show, data); +} + +static ssize_t zoran_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct zoran *zr = PDE_DATA(file_inode(file)); + char *string, *sp; + char *line, *ldelim, *varname, *svar, *tdelim; + + if (count > 32768) /* Stupidity filter */ + return -EINVAL; + + string = sp = vmalloc(count + 1); + if (!string) { + dprintk(1, + KERN_ERR + "%s: write_proc: can not allocate memory\n", + ZR_DEVNAME(zr)); + return -ENOMEM; + } + if (copy_from_user(string, buffer, count)) { + vfree (string); + return -EFAULT; + } + string[count] = 0; + dprintk(4, KERN_INFO "%s: write_proc: name=%pD count=%zu zr=%p\n", + ZR_DEVNAME(zr), file, count, zr); + ldelim = " \t\n"; + tdelim = "="; + line = strpbrk(sp, ldelim); + while (line) { + *line = 0; + svar = strpbrk(sp, tdelim); + if (svar) { + *svar = 0; + varname = sp; + svar++; + setparam(zr, varname, svar); + } + sp = line + 1; + line = strpbrk(sp, ldelim); + } + vfree(string); + + return count; +} + +static const struct file_operations zoran_operations = { + .owner = THIS_MODULE, + .open = zoran_open, + .read = seq_read, + .write = zoran_write, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +int +zoran_proc_init (struct zoran *zr) +{ +#ifdef CONFIG_PROC_FS + char name[8]; + + snprintf(name, 7, "zoran%d", zr->id); + zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr); + if (zr->zoran_proc != NULL) { + dprintk(2, + KERN_INFO + "%s: procfs entry /proc/%s allocated. data=%p\n", + ZR_DEVNAME(zr), name, zr); + } else { + dprintk(1, KERN_ERR "%s: Unable to initialise /proc/%s\n", + ZR_DEVNAME(zr), name); + return 1; + } +#endif + return 0; +} + +void +zoran_proc_cleanup (struct zoran *zr) +{ +#ifdef CONFIG_PROC_FS + char name[8]; + + snprintf(name, 7, "zoran%d", zr->id); + if (zr->zoran_proc) + remove_proc_entry(name, NULL); + zr->zoran_proc = NULL; +#endif +} diff --git a/drivers/staging/media/zoran/zoran_procfs.h b/drivers/staging/media/zoran/zoran_procfs.h new file mode 100644 index 000000000000..0ac7cb0011f2 --- /dev/null +++ b/drivers/staging/media/zoran/zoran_procfs.h @@ -0,0 +1,32 @@ +/* + * Zoran zr36057/zr36067 PCI controller driver, for the + * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux + * Media Labs LML33/LML33R10. + * + * This part handles card-specific data and detection + * + * Copyright (C) 2000 Serguei Miridonov + * + * Currently maintained by: + * Ronald Bultje + * Laurent Pinchart + * Mailinglist + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ZORAN_PROCFS_H__ +#define __ZORAN_PROCFS_H__ + +extern int zoran_proc_init(struct zoran *zr); +extern void zoran_proc_cleanup(struct zoran *zr); + +#endif /* __ZORAN_PROCFS_H__ */ diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c new file mode 100644 index 000000000000..8736b9d8d97e --- /dev/null +++ b/drivers/staging/media/zoran/zr36016.c @@ -0,0 +1,516 @@ +/* + * Zoran ZR36016 basic configuration functions + * + * Copyright (C) 2001 Wolfgang Scherr + * + * $Id: zr36016.c,v 1.1.2.14 2003/08/20 19:46:55 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#define ZR016_VERSION "v0.7" + +#include +#include +#include +#include + +#include +#include + +/* I/O commands, error codes */ +#include + +/* v4l API */ + +/* headerfile of this module */ +#include "zr36016.h" + +/* codec io API */ +#include "videocodec.h" + +/* it doesn't make sense to have more than 20 or so, + just to prevent some unwanted loops */ +#define MAX_CODECS 20 + +/* amount of chips attached via this driver */ +static int zr36016_codecs; + +/* debugging is available via module parameter */ +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0-4)"); + +#define dprintk(num, format, args...) \ + do { \ + if (debug >= num) \ + printk(format, ##args); \ + } while (0) + +/* ========================================================================= + Local hardware I/O functions: + + read/write via codec layer (registers are located in the master device) + ========================================================================= */ + +/* read and write functions */ +static u8 +zr36016_read (struct zr36016 *ptr, + u16 reg) +{ + u8 value = 0; + + // just in case something is wrong... + if (ptr->codec->master_data->readreg) + value = + (ptr->codec->master_data-> + readreg(ptr->codec, reg)) & 0xFF; + else + dprintk(1, + KERN_ERR "%s: invalid I/O setup, nothing read!\n", + ptr->name); + + dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, + value); + + return value; +} + +static void +zr36016_write (struct zr36016 *ptr, + u16 reg, + u8 value) +{ + dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, + reg); + + // just in case something is wrong... + if (ptr->codec->master_data->writereg) { + ptr->codec->master_data->writereg(ptr->codec, reg, value); + } else + dprintk(1, + KERN_ERR + "%s: invalid I/O setup, nothing written!\n", + ptr->name); +} + +/* indirect read and write functions */ +/* the 016 supports auto-addr-increment, but + * writing it all time cost not much and is safer... */ +static u8 +zr36016_readi (struct zr36016 *ptr, + u16 reg) +{ + u8 value = 0; + + // just in case something is wrong... + if ((ptr->codec->master_data->writereg) && + (ptr->codec->master_data->readreg)) { + ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR + value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA + } else + dprintk(1, + KERN_ERR + "%s: invalid I/O setup, nothing read (i)!\n", + ptr->name); + + dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, + reg, value); + return value; +} + +static void +zr36016_writei (struct zr36016 *ptr, + u16 reg, + u8 value) +{ + dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name, + value, reg); + + // just in case something is wrong... + if (ptr->codec->master_data->writereg) { + ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR + ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA + } else + dprintk(1, + KERN_ERR + "%s: invalid I/O setup, nothing written (i)!\n", + ptr->name); +} + +/* ========================================================================= + Local helper function: + + version read + ========================================================================= */ + +/* version kept in datastructure */ +static u8 +zr36016_read_version (struct zr36016 *ptr) +{ + ptr->version = zr36016_read(ptr, 0) >> 4; + return ptr->version; +} + +/* ========================================================================= + Local helper function: + + basic test of "connectivity", writes/reads to/from PAX-Lo register + ========================================================================= */ + +static int +zr36016_basic_test (struct zr36016 *ptr) +{ + if (debug) { + int i; + zr36016_writei(ptr, ZR016I_PAX_LO, 0x55); + dprintk(1, KERN_INFO "%s: registers: ", ptr->name); + for (i = 0; i <= 0x0b; i++) + dprintk(1, "%02x ", zr36016_readi(ptr, i)); + dprintk(1, "\n"); + } + // for testing just write 0, then the default value to a register and read + // it back in both cases + zr36016_writei(ptr, ZR016I_PAX_LO, 0x00); + if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) { + dprintk(1, + KERN_ERR + "%s: attach failed, can't connect to vfe processor!\n", + ptr->name); + return -ENXIO; + } + zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0); + if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) { + dprintk(1, + KERN_ERR + "%s: attach failed, can't connect to vfe processor!\n", + ptr->name); + return -ENXIO; + } + // we allow version numbers from 0-3, should be enough, though + zr36016_read_version(ptr); + if (ptr->version & 0x0c) { + dprintk(1, + KERN_ERR + "%s: attach failed, suspicious version %d found...\n", + ptr->name, ptr->version); + return -ENXIO; + } + + return 0; /* looks good! */ +} + +/* ========================================================================= + Local helper function: + + simple loop for pushing the init datasets - NO USE -- + ========================================================================= */ + +#if 0 +static int zr36016_pushit (struct zr36016 *ptr, + u16 startreg, + u16 len, + const char *data) +{ + int i=0; + + dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", + ptr->name, startreg,len); + while (imode == CODEC_DO_COMPRESSION ? + ZR016_COMPRESSION : ZR016_EXPANSION)); + + // misc setup + zr36016_writei(ptr, ZR016I_SETUP1, + (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) | + (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI); + zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR); + + // Window setup + // (no extra offset for now, norm defines offset, default width height) + zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8); + zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF); + zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8); + zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF); + zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8); + zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF); + zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8); + zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF); + + /* shall we continue now, please? */ + zr36016_write(ptr, ZR016_GOSTOP, 1); +} + +/* ========================================================================= + CODEC API FUNCTIONS + + this functions are accessed by the master via the API structure + ========================================================================= */ + +/* set compression/expansion mode and launches codec - + this should be the last call from the master before starting processing */ +static int +zr36016_set_mode (struct videocodec *codec, + int mode) +{ + struct zr36016 *ptr = (struct zr36016 *) codec->data; + + dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); + + if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) + return -EINVAL; + + ptr->mode = mode; + zr36016_init(ptr); + + return 0; +} + +/* set picture size */ +static int +zr36016_set_video (struct videocodec *codec, + struct tvnorm *norm, + struct vfe_settings *cap, + struct vfe_polarity *pol) +{ + struct zr36016 *ptr = (struct zr36016 *) codec->data; + + dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n", + ptr->name, norm->HStart, norm->VStart, + cap->x, cap->y, cap->width, cap->height, + cap->decimation); + + /* if () return -EINVAL; + * trust the master driver that it knows what it does - so + * we allow invalid startx/y for now ... */ + ptr->width = cap->width; + ptr->height = cap->height; + /* (Ronald) This is ugly. zoran_device.c, line 387 + * already mentions what happens if HStart is even + * (blue faces, etc., cr/cb inversed). There's probably + * some good reason why HStart is 0 instead of 1, so I'm + * leaving it to this for now, but really... This can be + * done a lot simpler */ + ptr->xoff = (norm->HStart ? norm->HStart : 1) + cap->x; + /* Something to note here (I don't understand it), setting + * VStart too high will cause the codec to 'not work'. I + * really don't get it. values of 16 (VStart) already break + * it here. Just '0' seems to work. More testing needed! */ + ptr->yoff = norm->VStart + cap->y; + /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */ + ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1; + ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1; + + return 0; +} + +/* additional control functions */ +static int +zr36016_control (struct videocodec *codec, + int type, + int size, + void *data) +{ + struct zr36016 *ptr = (struct zr36016 *) codec->data; + int *ival = (int *) data; + + dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, + size); + + switch (type) { + case CODEC_G_STATUS: /* get last status - we don't know it ... */ + if (size != sizeof(int)) + return -EFAULT; + *ival = 0; + break; + + case CODEC_G_CODEC_MODE: + if (size != sizeof(int)) + return -EFAULT; + *ival = 0; + break; + + case CODEC_S_CODEC_MODE: + if (size != sizeof(int)) + return -EFAULT; + if (*ival != 0) + return -EINVAL; + /* not needed, do nothing */ + return 0; + + case CODEC_G_VFE: + case CODEC_S_VFE: + return 0; + + case CODEC_S_MMAP: + /* not available, give an error */ + return -ENXIO; + + default: + return -EINVAL; + } + + return size; +} + +/* ========================================================================= + Exit and unregister function: + + Deinitializes Zoran's JPEG processor + ========================================================================= */ + +static int +zr36016_unset (struct videocodec *codec) +{ + struct zr36016 *ptr = codec->data; + + if (ptr) { + /* do wee need some codec deinit here, too ???? */ + + dprintk(1, "%s: finished codec #%d\n", ptr->name, + ptr->num); + kfree(ptr); + codec->data = NULL; + + zr36016_codecs--; + return 0; + } + + return -EFAULT; +} + +/* ========================================================================= + Setup and registry function: + + Initializes Zoran's JPEG processor + + Also sets pixel size, average code size, mode (compr./decompr.) + (the given size is determined by the processor with the video interface) + ========================================================================= */ + +static int +zr36016_setup (struct videocodec *codec) +{ + struct zr36016 *ptr; + int res; + + dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", + zr36016_codecs); + + if (zr36016_codecs == MAX_CODECS) { + dprintk(1, + KERN_ERR "zr36016: Can't attach more codecs!\n"); + return -ENOSPC; + } + //mem structure init + codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL); + if (NULL == ptr) { + dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n"); + return -ENOMEM; + } + + snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]", + zr36016_codecs); + ptr->num = zr36016_codecs++; + ptr->codec = codec; + + //testing + res = zr36016_basic_test(ptr); + if (res < 0) { + zr36016_unset(codec); + return res; + } + //final setup + ptr->mode = CODEC_DO_COMPRESSION; + ptr->width = 768; + ptr->height = 288; + ptr->xdec = 1; + ptr->ydec = 0; + zr36016_init(ptr); + + dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", + ptr->name, ptr->version); + + return 0; +} + +static const struct videocodec zr36016_codec = { + .owner = THIS_MODULE, + .name = "zr36016", + .magic = 0L, // magic not used + .flags = + CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER | + CODEC_FLAG_DECODER, + .type = CODEC_TYPE_ZR36016, + .setup = zr36016_setup, // functionality + .unset = zr36016_unset, + .set_mode = zr36016_set_mode, + .set_video = zr36016_set_video, + .control = zr36016_control, + // others are not used +}; + +/* ========================================================================= + HOOK IN DRIVER AS KERNEL MODULE + ========================================================================= */ + +static int __init +zr36016_init_module (void) +{ + //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION); + zr36016_codecs = 0; + return videocodec_register(&zr36016_codec); +} + +static void __exit +zr36016_cleanup_module (void) +{ + if (zr36016_codecs) { + dprintk(1, + "zr36016: something's wrong - %d codecs left somehow.\n", + zr36016_codecs); + } + videocodec_unregister(&zr36016_codec); +} + +module_init(zr36016_init_module); +module_exit(zr36016_cleanup_module); + +MODULE_AUTHOR("Wolfgang Scherr "); +MODULE_DESCRIPTION("Driver module for ZR36016 video frontends " + ZR016_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/zoran/zr36016.h b/drivers/staging/media/zoran/zr36016.h new file mode 100644 index 000000000000..784bcf5727b8 --- /dev/null +++ b/drivers/staging/media/zoran/zr36016.h @@ -0,0 +1,107 @@ +/* + * Zoran ZR36016 basic configuration functions - header file + * + * Copyright (C) 2001 Wolfgang Scherr + * + * $Id: zr36016.h,v 1.1.2.3 2003/01/14 21:18:07 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#ifndef ZR36016_H +#define ZR36016_H + +/* data stored for each zoran jpeg codec chip */ +struct zr36016 { + char name[32]; + int num; + /* io datastructure */ + struct videocodec *codec; + // coder status + __u8 version; + // actual coder setup + int mode; + + __u16 xoff; + __u16 yoff; + __u16 width; + __u16 height; + __u16 xdec; + __u16 ydec; +}; + +/* direct register addresses */ +#define ZR016_GOSTOP 0x00 +#define ZR016_MODE 0x01 +#define ZR016_IADDR 0x02 +#define ZR016_IDATA 0x03 + +/* indirect register addresses */ +#define ZR016I_SETUP1 0x00 +#define ZR016I_SETUP2 0x01 +#define ZR016I_NAX_LO 0x02 +#define ZR016I_NAX_HI 0x03 +#define ZR016I_PAX_LO 0x04 +#define ZR016I_PAX_HI 0x05 +#define ZR016I_NAY_LO 0x06 +#define ZR016I_NAY_HI 0x07 +#define ZR016I_PAY_LO 0x08 +#define ZR016I_PAY_HI 0x09 +#define ZR016I_NOL_LO 0x0a +#define ZR016I_NOL_HI 0x0b + +/* possible values for mode register */ +#define ZR016_RGB444_YUV444 0x00 +#define ZR016_RGB444_YUV422 0x01 +#define ZR016_RGB444_YUV411 0x02 +#define ZR016_RGB444_Y400 0x03 +#define ZR016_RGB444_RGB444 0x04 +#define ZR016_YUV444_YUV444 0x08 +#define ZR016_YUV444_YUV422 0x09 +#define ZR016_YUV444_YUV411 0x0a +#define ZR016_YUV444_Y400 0x0b +#define ZR016_YUV444_RGB444 0x0c +#define ZR016_YUV422_YUV422 0x11 +#define ZR016_YUV422_YUV411 0x12 +#define ZR016_YUV422_Y400 0x13 +#define ZR016_YUV411_YUV411 0x16 +#define ZR016_YUV411_Y400 0x17 +#define ZR016_4444_4444 0x19 +#define ZR016_100_100 0x1b + +#define ZR016_RGB444 0x00 +#define ZR016_YUV444 0x20 +#define ZR016_YUV422 0x40 + +#define ZR016_COMPRESSION 0x80 +#define ZR016_EXPANSION 0x80 + +/* possible values for setup 1 register */ +#define ZR016_CKRT 0x80 +#define ZR016_VERT 0x40 +#define ZR016_HORZ 0x20 +#define ZR016_HRFL 0x10 +#define ZR016_DSFL 0x08 +#define ZR016_SBFL 0x04 +#define ZR016_RSTR 0x02 +#define ZR016_CNTI 0x01 + +/* possible values for setup 2 register */ +#define ZR016_SYEN 0x40 +#define ZR016_CCIR 0x04 +#define ZR016_SIGN 0x02 +#define ZR016_YMCS 0x01 + +#endif /*fndef ZR36016_H */ diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c new file mode 100644 index 000000000000..5ebfc16672f3 --- /dev/null +++ b/drivers/staging/media/zoran/zr36050.c @@ -0,0 +1,896 @@ +/* + * Zoran ZR36050 basic configuration functions + * + * Copyright (C) 2001 Wolfgang Scherr + * + * $Id: zr36050.c,v 1.1.2.11 2003/08/03 14:54:53 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#define ZR050_VERSION "v0.7.1" + +#include +#include +#include +#include + +#include +#include + +/* I/O commands, error codes */ +#include + +/* headerfile of this module */ +#include "zr36050.h" + +/* codec io API */ +#include "videocodec.h" + +/* it doesn't make sense to have more than 20 or so, + just to prevent some unwanted loops */ +#define MAX_CODECS 20 + +/* amount of chips attached via this driver */ +static int zr36050_codecs; + +/* debugging is available via module parameter */ +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0-4)"); + +#define dprintk(num, format, args...) \ + do { \ + if (debug >= num) \ + printk(format, ##args); \ + } while (0) + +/* ========================================================================= + Local hardware I/O functions: + + read/write via codec layer (registers are located in the master device) + ========================================================================= */ + +/* read and write functions */ +static u8 +zr36050_read (struct zr36050 *ptr, + u16 reg) +{ + u8 value = 0; + + // just in case something is wrong... + if (ptr->codec->master_data->readreg) + value = (ptr->codec->master_data->readreg(ptr->codec, + reg)) & 0xFF; + else + dprintk(1, + KERN_ERR "%s: invalid I/O setup, nothing read!\n", + ptr->name); + + dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, + value); + + return value; +} + +static void +zr36050_write (struct zr36050 *ptr, + u16 reg, + u8 value) +{ + dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, + reg); + + // just in case something is wrong... + if (ptr->codec->master_data->writereg) + ptr->codec->master_data->writereg(ptr->codec, reg, value); + else + dprintk(1, + KERN_ERR + "%s: invalid I/O setup, nothing written!\n", + ptr->name); +} + +/* ========================================================================= + Local helper function: + + status read + ========================================================================= */ + +/* status is kept in datastructure */ +static u8 +zr36050_read_status1 (struct zr36050 *ptr) +{ + ptr->status1 = zr36050_read(ptr, ZR050_STATUS_1); + + zr36050_read(ptr, 0); + return ptr->status1; +} + +/* ========================================================================= + Local helper function: + + scale factor read + ========================================================================= */ + +/* scale factor is kept in datastructure */ +static u16 +zr36050_read_scalefactor (struct zr36050 *ptr) +{ + ptr->scalefact = (zr36050_read(ptr, ZR050_SF_HI) << 8) | + (zr36050_read(ptr, ZR050_SF_LO) & 0xFF); + + /* leave 0 selected for an eventually GO from master */ + zr36050_read(ptr, 0); + return ptr->scalefact; +} + +/* ========================================================================= + Local helper function: + + wait if codec is ready to proceed (end of processing) or time is over + ========================================================================= */ + +static void +zr36050_wait_end (struct zr36050 *ptr) +{ + int i = 0; + + while (!(zr36050_read_status1(ptr) & 0x4)) { + udelay(1); + if (i++ > 200000) { // 200ms, there is for sure something wrong!!! + dprintk(1, + "%s: timeout at wait_end (last status: 0x%02x)\n", + ptr->name, ptr->status1); + break; + } + } +} + +/* ========================================================================= + Local helper function: + + basic test of "connectivity", writes/reads to/from memory the SOF marker + ========================================================================= */ + +static int +zr36050_basic_test (struct zr36050 *ptr) +{ + zr36050_write(ptr, ZR050_SOF_IDX, 0x00); + zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00); + if ((zr36050_read(ptr, ZR050_SOF_IDX) | + zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) { + dprintk(1, + KERN_ERR + "%s: attach failed, can't connect to jpeg processor!\n", + ptr->name); + return -ENXIO; + } + zr36050_write(ptr, ZR050_SOF_IDX, 0xff); + zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0); + if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) | + zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) { + dprintk(1, + KERN_ERR + "%s: attach failed, can't connect to jpeg processor!\n", + ptr->name); + return -ENXIO; + } + + zr36050_wait_end(ptr); + if ((ptr->status1 & 0x4) == 0) { + dprintk(1, + KERN_ERR + "%s: attach failed, jpeg processor failed (end flag)!\n", + ptr->name); + return -EBUSY; + } + + return 0; /* looks good! */ +} + +/* ========================================================================= + Local helper function: + + simple loop for pushing the init datasets + ========================================================================= */ + +static int +zr36050_pushit (struct zr36050 *ptr, + u16 startreg, + u16 len, + const char *data) +{ + int i = 0; + + dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, + startreg, len); + while (i < len) { + zr36050_write(ptr, startreg++, data[i++]); + } + + return i; +} + +/* ========================================================================= + Basic datasets: + + jpeg baseline setup data (you find it on lots places in internet, or just + extract it from any regular .jpg image...) + + Could be variable, but until it's not needed it they are just fixed to save + memory. Otherwise expand zr36050 structure with arrays, push the values to + it and initialize from there, as e.g. the linux zr36057/60 driver does it. + ========================================================================= */ + +static const char zr36050_dqt[0x86] = { + 0xff, 0xdb, //Marker: DQT + 0x00, 0x84, //Length: 2*65+2 + 0x00, //Pq,Tq first table + 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, + 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, + 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, + 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, + 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, + 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, + 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, + 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, + 0x01, //Pq,Tq second table + 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, + 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 +}; + +static const char zr36050_dht[0x1a4] = { + 0xff, 0xc4, //Marker: DHT + 0x01, 0xa2, //Length: 2*AC, 2*DC + 0x00, //DC first table + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, + 0x01, //DC second table + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, + 0x10, //AC first table + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, + 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, + 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, + 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, + 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, + 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, + 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, + 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, + 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, + 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, + 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, + 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, + 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + 0xF8, 0xF9, 0xFA, + 0x11, //AC second table + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, + 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, + 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, + 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, + 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, + 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, + 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, + 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, + 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, + 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, + 0xF9, 0xFA +}; + +/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ +#define NO_OF_COMPONENTS 0x3 //Y,U,V +#define BASELINE_PRECISION 0x8 //MCU size (?) +static const char zr36050_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT +static const char zr36050_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC +static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC + +/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ +static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; +static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; + +/* ========================================================================= + Local helper functions: + + calculation and setup of parameter-dependent JPEG baseline segments + (needed for compression only) + ========================================================================= */ + +/* ------------------------------------------------------------------------- */ + +/* SOF (start of frame) segment depends on width, height and sampling ratio + of each color component */ + +static int +zr36050_set_sof (struct zr36050 *ptr) +{ + char sof_data[34]; // max. size of register set + int i; + + dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, + ptr->width, ptr->height, NO_OF_COMPONENTS); + sof_data[0] = 0xff; + sof_data[1] = 0xc0; + sof_data[2] = 0x00; + sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; + sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36050 + sof_data[5] = (ptr->height) >> 8; + sof_data[6] = (ptr->height) & 0xff; + sof_data[7] = (ptr->width) >> 8; + sof_data[8] = (ptr->width) & 0xff; + sof_data[9] = NO_OF_COMPONENTS; + for (i = 0; i < NO_OF_COMPONENTS; i++) { + sof_data[10 + (i * 3)] = i; // index identifier + sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios + sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection + } + return zr36050_pushit(ptr, ZR050_SOF_IDX, + (3 * NO_OF_COMPONENTS) + 10, sof_data); +} + +/* ------------------------------------------------------------------------- */ + +/* SOS (start of scan) segment depends on the used scan components + of each color component */ + +static int +zr36050_set_sos (struct zr36050 *ptr) +{ + char sos_data[16]; // max. size of register set + int i; + + dprintk(3, "%s: write SOS\n", ptr->name); + sos_data[0] = 0xff; + sos_data[1] = 0xda; + sos_data[2] = 0x00; + sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; + sos_data[4] = NO_OF_COMPONENTS; + for (i = 0; i < NO_OF_COMPONENTS; i++) { + sos_data[5 + (i * 2)] = i; // index + sos_data[6 + (i * 2)] = (zr36050_td[i] << 4) | zr36050_ta[i]; // AC/DC tbl.sel. + } + sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start + sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3F; + sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; + return zr36050_pushit(ptr, ZR050_SOS1_IDX, + 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, + sos_data); +} + +/* ------------------------------------------------------------------------- */ + +/* DRI (define restart interval) */ + +static int +zr36050_set_dri (struct zr36050 *ptr) +{ + char dri_data[6]; // max. size of register set + + dprintk(3, "%s: write DRI\n", ptr->name); + dri_data[0] = 0xff; + dri_data[1] = 0xdd; + dri_data[2] = 0x00; + dri_data[3] = 0x04; + dri_data[4] = ptr->dri >> 8; + dri_data[5] = ptr->dri & 0xff; + return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data); +} + +/* ========================================================================= + Setup function: + + Setup compression/decompression of Zoran's JPEG processor + ( see also zoran 36050 manual ) + + ... sorry for the spaghetti code ... + ========================================================================= */ +static void +zr36050_init (struct zr36050 *ptr) +{ + int sum = 0; + long bitcnt, tmp; + + if (ptr->mode == CODEC_DO_COMPRESSION) { + dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); + + /* 050 communicates with 057 in master mode */ + zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR); + + /* encoding table preload for compression */ + zr36050_write(ptr, ZR050_MODE, + ZR050_MO_COMP | ZR050_MO_TLM); + zr36050_write(ptr, ZR050_OPTIONS, 0); + + /* disable all IRQs */ + zr36050_write(ptr, ZR050_INT_REQ_0, 0); + zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 + + /* volume control settings */ + /*zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);*/ + zr36050_write(ptr, ZR050_SF_HI, ptr->scalefact >> 8); + zr36050_write(ptr, ZR050_SF_LO, ptr->scalefact & 0xff); + + zr36050_write(ptr, ZR050_AF_HI, 0xff); + zr36050_write(ptr, ZR050_AF_M, 0xff); + zr36050_write(ptr, ZR050_AF_LO, 0xff); + + /* setup the variable jpeg tables */ + sum += zr36050_set_sof(ptr); + sum += zr36050_set_sos(ptr); + sum += zr36050_set_dri(ptr); + + /* setup the fixed jpeg tables - maybe variable, though - + * (see table init section above) */ + dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name); + sum += zr36050_pushit(ptr, ZR050_DQT_IDX, + sizeof(zr36050_dqt), zr36050_dqt); + sum += zr36050_pushit(ptr, ZR050_DHT_IDX, + sizeof(zr36050_dht), zr36050_dht); + zr36050_write(ptr, ZR050_APP_IDX, 0xff); + zr36050_write(ptr, ZR050_APP_IDX + 1, 0xe0 + ptr->app.appn); + zr36050_write(ptr, ZR050_APP_IDX + 2, 0x00); + zr36050_write(ptr, ZR050_APP_IDX + 3, ptr->app.len + 2); + sum += zr36050_pushit(ptr, ZR050_APP_IDX + 4, 60, + ptr->app.data) + 4; + zr36050_write(ptr, ZR050_COM_IDX, 0xff); + zr36050_write(ptr, ZR050_COM_IDX + 1, 0xfe); + zr36050_write(ptr, ZR050_COM_IDX + 2, 0x00); + zr36050_write(ptr, ZR050_COM_IDX + 3, ptr->com.len + 2); + sum += zr36050_pushit(ptr, ZR050_COM_IDX + 4, 60, + ptr->com.data) + 4; + + /* do the internal huffman table preload */ + zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); + + zr36050_write(ptr, ZR050_GO, 1); // launch codec + zr36050_wait_end(ptr); + dprintk(2, "%s: Status after table preload: 0x%02x\n", + ptr->name, ptr->status1); + + if ((ptr->status1 & 0x4) == 0) { + dprintk(1, KERN_ERR "%s: init aborted!\n", + ptr->name); + return; // something is wrong, its timed out!!!! + } + + /* setup misc. data for compression (target code sizes) */ + + /* size of compressed code to reach without header data */ + sum = ptr->real_code_vol - sum; + bitcnt = sum << 3; /* need the size in bits */ + + tmp = bitcnt >> 16; + dprintk(3, + "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", + ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); + zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8); + zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff); + tmp = bitcnt & 0xffff; + zr36050_write(ptr, ZR050_TCV_NET_ML, tmp >> 8); + zr36050_write(ptr, ZR050_TCV_NET_LO, tmp & 0xff); + + bitcnt -= bitcnt >> 7; // bits without stuffing + bitcnt -= ((bitcnt * 5) >> 6); // bits without eob + + tmp = bitcnt >> 16; + dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", + ptr->name, bitcnt, tmp); + zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8); + zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff); + tmp = bitcnt & 0xffff; + zr36050_write(ptr, ZR050_TCV_DATA_ML, tmp >> 8); + zr36050_write(ptr, ZR050_TCV_DATA_LO, tmp & 0xff); + + /* compression setup with or without bitrate control */ + zr36050_write(ptr, ZR050_MODE, + ZR050_MO_COMP | ZR050_MO_PASS2 | + (ptr->bitrate_ctrl ? ZR050_MO_BRC : 0)); + + /* this headers seem to deliver "valid AVI" jpeg frames */ + zr36050_write(ptr, ZR050_MARKERS_EN, + ZR050_ME_DQT | ZR050_ME_DHT | + ((ptr->app.len > 0) ? ZR050_ME_APP : 0) | + ((ptr->com.len > 0) ? ZR050_ME_COM : 0)); + } else { + dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); + + /* 050 communicates with 055 in master mode */ + zr36050_write(ptr, ZR050_HARDWARE, + ZR050_HW_MSTR | ZR050_HW_CFIS_2_CLK); + + /* encoding table preload */ + zr36050_write(ptr, ZR050_MODE, ZR050_MO_TLM); + + /* disable all IRQs */ + zr36050_write(ptr, ZR050_INT_REQ_0, 0); + zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 + + dprintk(3, "%s: write DHT\n", ptr->name); + zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), + zr36050_dht); + + /* do the internal huffman table preload */ + zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); + + zr36050_write(ptr, ZR050_GO, 1); // launch codec + zr36050_wait_end(ptr); + dprintk(2, "%s: Status after table preload: 0x%02x\n", + ptr->name, ptr->status1); + + if ((ptr->status1 & 0x4) == 0) { + dprintk(1, KERN_ERR "%s: init aborted!\n", + ptr->name); + return; // something is wrong, its timed out!!!! + } + + /* setup misc. data for expansion */ + zr36050_write(ptr, ZR050_MODE, 0); + zr36050_write(ptr, ZR050_MARKERS_EN, 0); + } + + /* adr on selected, to allow GO from master */ + zr36050_read(ptr, 0); +} + +/* ========================================================================= + CODEC API FUNCTIONS + + this functions are accessed by the master via the API structure + ========================================================================= */ + +/* set compression/expansion mode and launches codec - + this should be the last call from the master before starting processing */ +static int +zr36050_set_mode (struct videocodec *codec, + int mode) +{ + struct zr36050 *ptr = (struct zr36050 *) codec->data; + + dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); + + if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) + return -EINVAL; + + ptr->mode = mode; + zr36050_init(ptr); + + return 0; +} + +/* set picture size (norm is ignored as the codec doesn't know about it) */ +static int +zr36050_set_video (struct videocodec *codec, + struct tvnorm *norm, + struct vfe_settings *cap, + struct vfe_polarity *pol) +{ + struct zr36050 *ptr = (struct zr36050 *) codec->data; + int size; + + dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n", + ptr->name, norm->HStart, norm->VStart, + cap->x, cap->y, cap->width, cap->height, + cap->decimation, cap->quality); + /* if () return -EINVAL; + * trust the master driver that it knows what it does - so + * we allow invalid startx/y and norm for now ... */ + ptr->width = cap->width / (cap->decimation & 0xff); + ptr->height = cap->height / ((cap->decimation >> 8) & 0xff); + + /* (KM) JPEG quality */ + size = ptr->width * ptr->height; + size *= 16; /* size in bits */ + /* apply quality setting */ + size = size * cap->quality / 200; + + /* Minimum: 1kb */ + if (size < 8192) + size = 8192; + /* Maximum: 7/8 of code buffer */ + if (size > ptr->total_code_vol * 7) + size = ptr->total_code_vol * 7; + + ptr->real_code_vol = size >> 3; /* in bytes */ + + /* Set max_block_vol here (previously in zr36050_init, moved + * here for consistency with zr36060 code */ + zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol); + + return 0; +} + +/* additional control functions */ +static int +zr36050_control (struct videocodec *codec, + int type, + int size, + void *data) +{ + struct zr36050 *ptr = (struct zr36050 *) codec->data; + int *ival = (int *) data; + + dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, + size); + + switch (type) { + case CODEC_G_STATUS: /* get last status */ + if (size != sizeof(int)) + return -EFAULT; + zr36050_read_status1(ptr); + *ival = ptr->status1; + break; + + case CODEC_G_CODEC_MODE: + if (size != sizeof(int)) + return -EFAULT; + *ival = CODEC_MODE_BJPG; + break; + + case CODEC_S_CODEC_MODE: + if (size != sizeof(int)) + return -EFAULT; + if (*ival != CODEC_MODE_BJPG) + return -EINVAL; + /* not needed, do nothing */ + return 0; + + case CODEC_G_VFE: + case CODEC_S_VFE: + /* not needed, do nothing */ + return 0; + + case CODEC_S_MMAP: + /* not available, give an error */ + return -ENXIO; + + case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ + if (size != sizeof(int)) + return -EFAULT; + *ival = ptr->total_code_vol; + break; + + case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ + if (size != sizeof(int)) + return -EFAULT; + ptr->total_code_vol = *ival; + /* (Kieran Morrissey) + * code copied from zr36060.c to ensure proper bitrate */ + ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; + break; + + case CODEC_G_JPEG_SCALE: /* get scaling factor */ + if (size != sizeof(int)) + return -EFAULT; + *ival = zr36050_read_scalefactor(ptr); + break; + + case CODEC_S_JPEG_SCALE: /* set scaling factor */ + if (size != sizeof(int)) + return -EFAULT; + ptr->scalefact = *ival; + break; + + case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ + struct jpeg_app_marker *app = data; + + if (size != sizeof(struct jpeg_app_marker)) + return -EFAULT; + + *app = ptr->app; + break; + } + + case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ + struct jpeg_app_marker *app = data; + + if (size != sizeof(struct jpeg_app_marker)) + return -EFAULT; + + ptr->app = *app; + break; + } + + case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ + struct jpeg_com_marker *com = data; + + if (size != sizeof(struct jpeg_com_marker)) + return -EFAULT; + + *com = ptr->com; + break; + } + + case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ + struct jpeg_com_marker *com = data; + + if (size != sizeof(struct jpeg_com_marker)) + return -EFAULT; + + ptr->com = *com; + break; + } + + default: + return -EINVAL; + } + + return size; +} + +/* ========================================================================= + Exit and unregister function: + + Deinitializes Zoran's JPEG processor + ========================================================================= */ + +static int +zr36050_unset (struct videocodec *codec) +{ + struct zr36050 *ptr = codec->data; + + if (ptr) { + /* do wee need some codec deinit here, too ???? */ + + dprintk(1, "%s: finished codec #%d\n", ptr->name, + ptr->num); + kfree(ptr); + codec->data = NULL; + + zr36050_codecs--; + return 0; + } + + return -EFAULT; +} + +/* ========================================================================= + Setup and registry function: + + Initializes Zoran's JPEG processor + + Also sets pixel size, average code size, mode (compr./decompr.) + (the given size is determined by the processor with the video interface) + ========================================================================= */ + +static int +zr36050_setup (struct videocodec *codec) +{ + struct zr36050 *ptr; + int res; + + dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n", + zr36050_codecs); + + if (zr36050_codecs == MAX_CODECS) { + dprintk(1, + KERN_ERR "zr36050: Can't attach more codecs!\n"); + return -ENOSPC; + } + //mem structure init + codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL); + if (NULL == ptr) { + dprintk(1, KERN_ERR "zr36050: Can't get enough memory!\n"); + return -ENOMEM; + } + + snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]", + zr36050_codecs); + ptr->num = zr36050_codecs++; + ptr->codec = codec; + + //testing + res = zr36050_basic_test(ptr); + if (res < 0) { + zr36050_unset(codec); + return res; + } + //final setup + memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8); + memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8); + + ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag + * (what is the difference?) */ + ptr->mode = CODEC_DO_COMPRESSION; + ptr->width = 384; + ptr->height = 288; + ptr->total_code_vol = 16000; + ptr->max_block_vol = 240; + ptr->scalefact = 0x100; + ptr->dri = 1; + + /* no app/com marker by default */ + ptr->app.appn = 0; + ptr->app.len = 0; + ptr->com.len = 0; + + zr36050_init(ptr); + + dprintk(1, KERN_INFO "%s: codec attached and running\n", + ptr->name); + + return 0; +} + +static const struct videocodec zr36050_codec = { + .owner = THIS_MODULE, + .name = "zr36050", + .magic = 0L, // magic not used + .flags = + CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | + CODEC_FLAG_DECODER, + .type = CODEC_TYPE_ZR36050, + .setup = zr36050_setup, // functionality + .unset = zr36050_unset, + .set_mode = zr36050_set_mode, + .set_video = zr36050_set_video, + .control = zr36050_control, + // others are not used +}; + +/* ========================================================================= + HOOK IN DRIVER AS KERNEL MODULE + ========================================================================= */ + +static int __init +zr36050_init_module (void) +{ + //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION); + zr36050_codecs = 0; + return videocodec_register(&zr36050_codec); +} + +static void __exit +zr36050_cleanup_module (void) +{ + if (zr36050_codecs) { + dprintk(1, + "zr36050: something's wrong - %d codecs left somehow.\n", + zr36050_codecs); + } + videocodec_unregister(&zr36050_codec); +} + +module_init(zr36050_init_module); +module_exit(zr36050_cleanup_module); + +MODULE_AUTHOR("Wolfgang Scherr "); +MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors " + ZR050_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/zoran/zr36050.h b/drivers/staging/media/zoran/zr36050.h new file mode 100644 index 000000000000..9236486d3c2b --- /dev/null +++ b/drivers/staging/media/zoran/zr36050.h @@ -0,0 +1,179 @@ +/* + * Zoran ZR36050 basic configuration functions - header file + * + * Copyright (C) 2001 Wolfgang Scherr + * + * $Id: zr36050.h,v 1.1.2.2 2003/01/14 21:18:22 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#ifndef ZR36050_H +#define ZR36050_H + +#include "videocodec.h" + +/* data stored for each zoran jpeg codec chip */ +struct zr36050 { + char name[32]; + int num; + /* io datastructure */ + struct videocodec *codec; + // last coder status + __u8 status1; + // actual coder setup + int mode; + + __u16 width; + __u16 height; + + __u16 bitrate_ctrl; + + __u32 total_code_vol; + __u32 real_code_vol; + __u16 max_block_vol; + + __u8 h_samp_ratio[8]; + __u8 v_samp_ratio[8]; + __u16 scalefact; + __u16 dri; + + /* com/app marker */ + struct jpeg_com_marker com; + struct jpeg_app_marker app; +}; + +/* zr36050 register addresses */ +#define ZR050_GO 0x000 +#define ZR050_HARDWARE 0x002 +#define ZR050_MODE 0x003 +#define ZR050_OPTIONS 0x004 +#define ZR050_MBCV 0x005 +#define ZR050_MARKERS_EN 0x006 +#define ZR050_INT_REQ_0 0x007 +#define ZR050_INT_REQ_1 0x008 +#define ZR050_TCV_NET_HI 0x009 +#define ZR050_TCV_NET_MH 0x00a +#define ZR050_TCV_NET_ML 0x00b +#define ZR050_TCV_NET_LO 0x00c +#define ZR050_TCV_DATA_HI 0x00d +#define ZR050_TCV_DATA_MH 0x00e +#define ZR050_TCV_DATA_ML 0x00f +#define ZR050_TCV_DATA_LO 0x010 +#define ZR050_SF_HI 0x011 +#define ZR050_SF_LO 0x012 +#define ZR050_AF_HI 0x013 +#define ZR050_AF_M 0x014 +#define ZR050_AF_LO 0x015 +#define ZR050_ACV_HI 0x016 +#define ZR050_ACV_MH 0x017 +#define ZR050_ACV_ML 0x018 +#define ZR050_ACV_LO 0x019 +#define ZR050_ACT_HI 0x01a +#define ZR050_ACT_MH 0x01b +#define ZR050_ACT_ML 0x01c +#define ZR050_ACT_LO 0x01d +#define ZR050_ACV_TRUN_HI 0x01e +#define ZR050_ACV_TRUN_MH 0x01f +#define ZR050_ACV_TRUN_ML 0x020 +#define ZR050_ACV_TRUN_LO 0x021 +#define ZR050_STATUS_0 0x02e +#define ZR050_STATUS_1 0x02f + +#define ZR050_SOF_IDX 0x040 +#define ZR050_SOS1_IDX 0x07a +#define ZR050_SOS2_IDX 0x08a +#define ZR050_SOS3_IDX 0x09a +#define ZR050_SOS4_IDX 0x0aa +#define ZR050_DRI_IDX 0x0c0 +#define ZR050_DNL_IDX 0x0c6 +#define ZR050_DQT_IDX 0x0cc +#define ZR050_DHT_IDX 0x1d4 +#define ZR050_APP_IDX 0x380 +#define ZR050_COM_IDX 0x3c0 + +/* zr36050 hardware register bits */ + +#define ZR050_HW_BSWD 0x80 +#define ZR050_HW_MSTR 0x40 +#define ZR050_HW_DMA 0x20 +#define ZR050_HW_CFIS_1_CLK 0x00 +#define ZR050_HW_CFIS_2_CLK 0x04 +#define ZR050_HW_CFIS_3_CLK 0x08 +#define ZR050_HW_CFIS_4_CLK 0x0C +#define ZR050_HW_CFIS_5_CLK 0x10 +#define ZR050_HW_CFIS_6_CLK 0x14 +#define ZR050_HW_CFIS_7_CLK 0x18 +#define ZR050_HW_CFIS_8_CLK 0x1C +#define ZR050_HW_BELE 0x01 + +/* zr36050 mode register bits */ + +#define ZR050_MO_COMP 0x80 +#define ZR050_MO_ATP 0x40 +#define ZR050_MO_PASS2 0x20 +#define ZR050_MO_TLM 0x10 +#define ZR050_MO_DCONLY 0x08 +#define ZR050_MO_BRC 0x04 + +#define ZR050_MO_ATP 0x40 +#define ZR050_MO_PASS2 0x20 +#define ZR050_MO_TLM 0x10 +#define ZR050_MO_DCONLY 0x08 + +/* zr36050 option register bits */ + +#define ZR050_OP_NSCN_1 0x00 +#define ZR050_OP_NSCN_2 0x20 +#define ZR050_OP_NSCN_3 0x40 +#define ZR050_OP_NSCN_4 0x60 +#define ZR050_OP_NSCN_5 0x80 +#define ZR050_OP_NSCN_6 0xA0 +#define ZR050_OP_NSCN_7 0xC0 +#define ZR050_OP_NSCN_8 0xE0 +#define ZR050_OP_OVF 0x10 + + +/* zr36050 markers-enable register bits */ + +#define ZR050_ME_APP 0x80 +#define ZR050_ME_COM 0x40 +#define ZR050_ME_DRI 0x20 +#define ZR050_ME_DQT 0x10 +#define ZR050_ME_DHT 0x08 +#define ZR050_ME_DNL 0x04 +#define ZR050_ME_DQTI 0x02 +#define ZR050_ME_DHTI 0x01 + +/* zr36050 status0/1 register bit masks */ + +#define ZR050_ST_RST_MASK 0x20 +#define ZR050_ST_SOF_MASK 0x02 +#define ZR050_ST_SOS_MASK 0x02 +#define ZR050_ST_DATRDY_MASK 0x80 +#define ZR050_ST_MRKDET_MASK 0x40 +#define ZR050_ST_RFM_MASK 0x10 +#define ZR050_ST_RFD_MASK 0x08 +#define ZR050_ST_END_MASK 0x04 +#define ZR050_ST_TCVOVF_MASK 0x02 +#define ZR050_ST_DATOVF_MASK 0x01 + +/* pixel component idx */ + +#define ZR050_Y_COMPONENT 0 +#define ZR050_U_COMPONENT 1 +#define ZR050_V_COMPONENT 2 + +#endif /*fndef ZR36050_H */ diff --git a/drivers/staging/media/zoran/zr36057.h b/drivers/staging/media/zoran/zr36057.h new file mode 100644 index 000000000000..c8acb21dcb5c --- /dev/null +++ b/drivers/staging/media/zoran/zr36057.h @@ -0,0 +1,164 @@ +/* + * zr36057.h - zr36057 register offsets + * + * Copyright (C) 1998 Dave Perks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ZR36057_H_ +#define _ZR36057_H_ + + +/* Zoran ZR36057 registers */ + +#define ZR36057_VFEHCR 0x000 /* Video Front End, Horizontal Configuration Register */ +#define ZR36057_VFEHCR_HSPol (1<<30) +#define ZR36057_VFEHCR_HStart 10 +#define ZR36057_VFEHCR_HEnd 0 +#define ZR36057_VFEHCR_Hmask 0x3ff + +#define ZR36057_VFEVCR 0x004 /* Video Front End, Vertical Configuration Register */ +#define ZR36057_VFEVCR_VSPol (1<<30) +#define ZR36057_VFEVCR_VStart 10 +#define ZR36057_VFEVCR_VEnd 0 +#define ZR36057_VFEVCR_Vmask 0x3ff + +#define ZR36057_VFESPFR 0x008 /* Video Front End, Scaler and Pixel Format Register */ +#define ZR36057_VFESPFR_ExtFl (1<<26) +#define ZR36057_VFESPFR_TopField (1<<25) +#define ZR36057_VFESPFR_VCLKPol (1<<24) +#define ZR36057_VFESPFR_HFilter 21 +#define ZR36057_VFESPFR_HorDcm 14 +#define ZR36057_VFESPFR_VerDcm 8 +#define ZR36057_VFESPFR_DispMode 6 +#define ZR36057_VFESPFR_YUV422 (0<<3) +#define ZR36057_VFESPFR_RGB888 (1<<3) +#define ZR36057_VFESPFR_RGB565 (2<<3) +#define ZR36057_VFESPFR_RGB555 (3<<3) +#define ZR36057_VFESPFR_ErrDif (1<<2) +#define ZR36057_VFESPFR_Pack24 (1<<1) +#define ZR36057_VFESPFR_LittleEndian (1<<0) + +#define ZR36057_VDTR 0x00c /* Video Display "Top" Register */ + +#define ZR36057_VDBR 0x010 /* Video Display "Bottom" Register */ + +#define ZR36057_VSSFGR 0x014 /* Video Stride, Status, and Frame Grab Register */ +#define ZR36057_VSSFGR_DispStride 16 +#define ZR36057_VSSFGR_VidOvf (1<<8) +#define ZR36057_VSSFGR_SnapShot (1<<1) +#define ZR36057_VSSFGR_FrameGrab (1<<0) + +#define ZR36057_VDCR 0x018 /* Video Display Configuration Register */ +#define ZR36057_VDCR_VidEn (1<<31) +#define ZR36057_VDCR_MinPix 24 +#define ZR36057_VDCR_Triton (1<<24) +#define ZR36057_VDCR_VidWinHt 12 +#define ZR36057_VDCR_VidWinWid 0 + +#define ZR36057_MMTR 0x01c /* Masking Map "Top" Register */ + +#define ZR36057_MMBR 0x020 /* Masking Map "Bottom" Register */ + +#define ZR36057_OCR 0x024 /* Overlay Control Register */ +#define ZR36057_OCR_OvlEnable (1 << 15) +#define ZR36057_OCR_MaskStride 0 + +#define ZR36057_SPGPPCR 0x028 /* System, PCI, and General Purpose Pins Control Register */ +#define ZR36057_SPGPPCR_SoftReset (1<<24) + +#define ZR36057_GPPGCR1 0x02c /* General Purpose Pins and GuestBus Control Register (1) */ + +#define ZR36057_MCSAR 0x030 /* MPEG Code Source Address Register */ + +#define ZR36057_MCTCR 0x034 /* MPEG Code Transfer Control Register */ +#define ZR36057_MCTCR_CodTime (1 << 30) +#define ZR36057_MCTCR_CEmpty (1 << 29) +#define ZR36057_MCTCR_CFlush (1 << 28) +#define ZR36057_MCTCR_CodGuestID 20 +#define ZR36057_MCTCR_CodGuestReg 16 + +#define ZR36057_MCMPR 0x038 /* MPEG Code Memory Pointer Register */ + +#define ZR36057_ISR 0x03c /* Interrupt Status Register */ +#define ZR36057_ISR_GIRQ1 (1<<30) +#define ZR36057_ISR_GIRQ0 (1<<29) +#define ZR36057_ISR_CodRepIRQ (1<<28) +#define ZR36057_ISR_JPEGRepIRQ (1<<27) + +#define ZR36057_ICR 0x040 /* Interrupt Control Register */ +#define ZR36057_ICR_GIRQ1 (1<<30) +#define ZR36057_ICR_GIRQ0 (1<<29) +#define ZR36057_ICR_CodRepIRQ (1<<28) +#define ZR36057_ICR_JPEGRepIRQ (1<<27) +#define ZR36057_ICR_IntPinEn (1<<24) + +#define ZR36057_I2CBR 0x044 /* I2C Bus Register */ +#define ZR36057_I2CBR_SDA (1<<1) +#define ZR36057_I2CBR_SCL (1<<0) + +#define ZR36057_JMC 0x100 /* JPEG Mode and Control */ +#define ZR36057_JMC_JPG (1 << 31) +#define ZR36057_JMC_JPGExpMode (0 << 29) +#define ZR36057_JMC_JPGCmpMode (1 << 29) +#define ZR36057_JMC_MJPGExpMode (2 << 29) +#define ZR36057_JMC_MJPGCmpMode (3 << 29) +#define ZR36057_JMC_RTBUSY_FB (1 << 6) +#define ZR36057_JMC_Go_en (1 << 5) +#define ZR36057_JMC_SyncMstr (1 << 4) +#define ZR36057_JMC_Fld_per_buff (1 << 3) +#define ZR36057_JMC_VFIFO_FB (1 << 2) +#define ZR36057_JMC_CFIFO_FB (1 << 1) +#define ZR36057_JMC_Stll_LitEndian (1 << 0) + +#define ZR36057_JPC 0x104 /* JPEG Process Control */ +#define ZR36057_JPC_P_Reset (1 << 7) +#define ZR36057_JPC_CodTrnsEn (1 << 5) +#define ZR36057_JPC_Active (1 << 0) + +#define ZR36057_VSP 0x108 /* Vertical Sync Parameters */ +#define ZR36057_VSP_VsyncSize 16 +#define ZR36057_VSP_FrmTot 0 + +#define ZR36057_HSP 0x10c /* Horizontal Sync Parameters */ +#define ZR36057_HSP_HsyncStart 16 +#define ZR36057_HSP_LineTot 0 + +#define ZR36057_FHAP 0x110 /* Field Horizontal Active Portion */ +#define ZR36057_FHAP_NAX 16 +#define ZR36057_FHAP_PAX 0 + +#define ZR36057_FVAP 0x114 /* Field Vertical Active Portion */ +#define ZR36057_FVAP_NAY 16 +#define ZR36057_FVAP_PAY 0 + +#define ZR36057_FPP 0x118 /* Field Process Parameters */ +#define ZR36057_FPP_Odd_Even (1 << 0) + +#define ZR36057_JCBA 0x11c /* JPEG Code Base Address */ + +#define ZR36057_JCFT 0x120 /* JPEG Code FIFO Threshold */ + +#define ZR36057_JCGI 0x124 /* JPEG Codec Guest ID */ +#define ZR36057_JCGI_JPEGuestID 4 +#define ZR36057_JCGI_JPEGuestReg 0 + +#define ZR36057_GCR2 0x12c /* GuestBus Control Register (2) */ + +#define ZR36057_POR 0x200 /* Post Office Register */ +#define ZR36057_POR_POPen (1<<25) +#define ZR36057_POR_POTime (1<<24) +#define ZR36057_POR_PODir (1<<23) + +#define ZR36057_STR 0x300 /* "Still" Transfer Register */ + +#endif diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c new file mode 100644 index 000000000000..2c2e8130fc96 --- /dev/null +++ b/drivers/staging/media/zoran/zr36060.c @@ -0,0 +1,1006 @@ +/* + * Zoran ZR36060 basic configuration functions + * + * Copyright (C) 2002 Laurent Pinchart + * + * $Id: zr36060.c,v 1.1.2.22 2003/05/06 09:35:36 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#define ZR060_VERSION "v0.7" + +#include +#include +#include +#include + +#include +#include + +/* I/O commands, error codes */ +#include + +/* headerfile of this module */ +#include "zr36060.h" + +/* codec io API */ +#include "videocodec.h" + +/* it doesn't make sense to have more than 20 or so, + just to prevent some unwanted loops */ +#define MAX_CODECS 20 + +/* amount of chips attached via this driver */ +static int zr36060_codecs; + +static bool low_bitrate; +module_param(low_bitrate, bool, 0); +MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate"); + +/* debugging is available via module parameter */ +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0-4)"); + +#define dprintk(num, format, args...) \ + do { \ + if (debug >= num) \ + printk(format, ##args); \ + } while (0) + +/* ========================================================================= + Local hardware I/O functions: + + read/write via codec layer (registers are located in the master device) + ========================================================================= */ + +/* read and write functions */ +static u8 +zr36060_read (struct zr36060 *ptr, + u16 reg) +{ + u8 value = 0; + + // just in case something is wrong... + if (ptr->codec->master_data->readreg) + value = (ptr->codec->master_data->readreg(ptr->codec, + reg)) & 0xff; + else + dprintk(1, + KERN_ERR "%s: invalid I/O setup, nothing read!\n", + ptr->name); + + //dprintk(4, "%s: reading from 0x%04x: %02x\n",ptr->name,reg,value); + + return value; +} + +static void +zr36060_write(struct zr36060 *ptr, + u16 reg, + u8 value) +{ + //dprintk(4, "%s: writing 0x%02x to 0x%04x\n",ptr->name,value,reg); + dprintk(4, "0x%02x @0x%04x\n", value, reg); + + // just in case something is wrong... + if (ptr->codec->master_data->writereg) + ptr->codec->master_data->writereg(ptr->codec, reg, value); + else + dprintk(1, + KERN_ERR + "%s: invalid I/O setup, nothing written!\n", + ptr->name); +} + +/* ========================================================================= + Local helper function: + + status read + ========================================================================= */ + +/* status is kept in datastructure */ +static u8 +zr36060_read_status (struct zr36060 *ptr) +{ + ptr->status = zr36060_read(ptr, ZR060_CFSR); + + zr36060_read(ptr, 0); + return ptr->status; +} + +/* ========================================================================= + Local helper function: + + scale factor read + ========================================================================= */ + +/* scale factor is kept in datastructure */ +static u16 +zr36060_read_scalefactor (struct zr36060 *ptr) +{ + ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) | + (zr36060_read(ptr, ZR060_SF_LO) & 0xFF); + + /* leave 0 selected for an eventually GO from master */ + zr36060_read(ptr, 0); + return ptr->scalefact; +} + +/* ========================================================================= + Local helper function: + + wait if codec is ready to proceed (end of processing) or time is over + ========================================================================= */ + +static void +zr36060_wait_end (struct zr36060 *ptr) +{ + int i = 0; + + while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) { + udelay(1); + if (i++ > 200000) { // 200ms, there is for sure something wrong!!! + dprintk(1, + "%s: timeout at wait_end (last status: 0x%02x)\n", + ptr->name, ptr->status); + break; + } + } +} + +/* ========================================================================= + Local helper function: + + basic test of "connectivity", writes/reads to/from memory the SOF marker + ========================================================================= */ + +static int +zr36060_basic_test (struct zr36060 *ptr) +{ + if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) && + (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) { + dprintk(1, + KERN_ERR + "%s: attach failed, can't connect to jpeg processor!\n", + ptr->name); + return -ENXIO; + } + + zr36060_wait_end(ptr); + if (ptr->status & ZR060_CFSR_Busy) { + dprintk(1, + KERN_ERR + "%s: attach failed, jpeg processor failed (end flag)!\n", + ptr->name); + return -EBUSY; + } + + return 0; /* looks good! */ +} + +/* ========================================================================= + Local helper function: + + simple loop for pushing the init datasets + ========================================================================= */ + +static int +zr36060_pushit (struct zr36060 *ptr, + u16 startreg, + u16 len, + const char *data) +{ + int i = 0; + + dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, + startreg, len); + while (i < len) { + zr36060_write(ptr, startreg++, data[i++]); + } + + return i; +} + +/* ========================================================================= + Basic datasets: + + jpeg baseline setup data (you find it on lots places in internet, or just + extract it from any regular .jpg image...) + + Could be variable, but until it's not needed it they are just fixed to save + memory. Otherwise expand zr36060 structure with arrays, push the values to + it and initialize from there, as e.g. the linux zr36057/60 driver does it. + ========================================================================= */ + +static const char zr36060_dqt[0x86] = { + 0xff, 0xdb, //Marker: DQT + 0x00, 0x84, //Length: 2*65+2 + 0x00, //Pq,Tq first table + 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, + 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, + 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, + 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, + 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, + 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, + 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, + 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, + 0x01, //Pq,Tq second table + 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, + 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 +}; + +static const char zr36060_dht[0x1a4] = { + 0xff, 0xc4, //Marker: DHT + 0x01, 0xa2, //Length: 2*AC, 2*DC + 0x00, //DC first table + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, + 0x01, //DC second table + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, + 0x10, //AC first table + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, + 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, + 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, + 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, + 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, + 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, + 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, + 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, + 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, + 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, + 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, + 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, + 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + 0xF8, 0xF9, 0xFA, + 0x11, //AC second table + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, + 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, + 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, + 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, + 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, + 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, + 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, + 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, + 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, + 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, + 0xF9, 0xFA +}; + +/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ +#define NO_OF_COMPONENTS 0x3 //Y,U,V +#define BASELINE_PRECISION 0x8 //MCU size (?) +static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT +static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC +static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC + +/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ +static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; +static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; + +/* ========================================================================= + Local helper functions: + + calculation and setup of parameter-dependent JPEG baseline segments + (needed for compression only) + ========================================================================= */ + +/* ------------------------------------------------------------------------- */ + +/* SOF (start of frame) segment depends on width, height and sampling ratio + of each color component */ + +static int +zr36060_set_sof (struct zr36060 *ptr) +{ + char sof_data[34]; // max. size of register set + int i; + + dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, + ptr->width, ptr->height, NO_OF_COMPONENTS); + sof_data[0] = 0xff; + sof_data[1] = 0xc0; + sof_data[2] = 0x00; + sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; + sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060 + sof_data[5] = (ptr->height) >> 8; + sof_data[6] = (ptr->height) & 0xff; + sof_data[7] = (ptr->width) >> 8; + sof_data[8] = (ptr->width) & 0xff; + sof_data[9] = NO_OF_COMPONENTS; + for (i = 0; i < NO_OF_COMPONENTS; i++) { + sof_data[10 + (i * 3)] = i; // index identifier + sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | + (ptr->v_samp_ratio[i]); // sampling ratios + sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection + } + return zr36060_pushit(ptr, ZR060_SOF_IDX, + (3 * NO_OF_COMPONENTS) + 10, sof_data); +} + +/* ------------------------------------------------------------------------- */ + +/* SOS (start of scan) segment depends on the used scan components + of each color component */ + +static int +zr36060_set_sos (struct zr36060 *ptr) +{ + char sos_data[16]; // max. size of register set + int i; + + dprintk(3, "%s: write SOS\n", ptr->name); + sos_data[0] = 0xff; + sos_data[1] = 0xda; + sos_data[2] = 0x00; + sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; + sos_data[4] = NO_OF_COMPONENTS; + for (i = 0; i < NO_OF_COMPONENTS; i++) { + sos_data[5 + (i * 2)] = i; // index + sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) | + zr36060_ta[i]; // AC/DC tbl.sel. + } + sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start + sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f; + sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; + return zr36060_pushit(ptr, ZR060_SOS_IDX, + 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, + sos_data); +} + +/* ------------------------------------------------------------------------- */ + +/* DRI (define restart interval) */ + +static int +zr36060_set_dri (struct zr36060 *ptr) +{ + char dri_data[6]; // max. size of register set + + dprintk(3, "%s: write DRI\n", ptr->name); + dri_data[0] = 0xff; + dri_data[1] = 0xdd; + dri_data[2] = 0x00; + dri_data[3] = 0x04; + dri_data[4] = (ptr->dri) >> 8; + dri_data[5] = (ptr->dri) & 0xff; + return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data); +} + +/* ========================================================================= + Setup function: + + Setup compression/decompression of Zoran's JPEG processor + ( see also zoran 36060 manual ) + + ... sorry for the spaghetti code ... + ========================================================================= */ +static void +zr36060_init (struct zr36060 *ptr) +{ + int sum = 0; + long bitcnt, tmp; + + if (ptr->mode == CODEC_DO_COMPRESSION) { + dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); + + zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); + + /* 060 communicates with 067 in master mode */ + zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); + + /* Compression with or without variable scale factor */ + /*FIXME: What about ptr->bitrate_ctrl? */ + zr36060_write(ptr, ZR060_CMR, + ZR060_CMR_Comp | ZR060_CMR_Pass2 | + ZR060_CMR_BRB); + + /* Must be zero */ + zr36060_write(ptr, ZR060_MBZ, 0x00); + zr36060_write(ptr, ZR060_TCR_HI, 0x00); + zr36060_write(ptr, ZR060_TCR_LO, 0x00); + + /* Disable all IRQs - no DataErr means autoreset */ + zr36060_write(ptr, ZR060_IMR, 0); + + /* volume control settings */ + zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8); + zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff); + + zr36060_write(ptr, ZR060_AF_HI, 0xff); + zr36060_write(ptr, ZR060_AF_M, 0xff); + zr36060_write(ptr, ZR060_AF_LO, 0xff); + + /* setup the variable jpeg tables */ + sum += zr36060_set_sof(ptr); + sum += zr36060_set_sos(ptr); + sum += zr36060_set_dri(ptr); + + /* setup the fixed jpeg tables - maybe variable, though - + * (see table init section above) */ + sum += + zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt), + zr36060_dqt); + sum += + zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), + zr36060_dht); + zr36060_write(ptr, ZR060_APP_IDX, 0xff); + zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn); + zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00); + zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2); + sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60, + ptr->app.data) + 4; + zr36060_write(ptr, ZR060_COM_IDX, 0xff); + zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe); + zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00); + zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2); + sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60, + ptr->com.data) + 4; + + /* setup misc. data for compression (target code sizes) */ + + /* size of compressed code to reach without header data */ + sum = ptr->real_code_vol - sum; + bitcnt = sum << 3; /* need the size in bits */ + + tmp = bitcnt >> 16; + dprintk(3, + "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", + ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); + zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8); + zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff); + tmp = bitcnt & 0xffff; + zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8); + zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff); + + bitcnt -= bitcnt >> 7; // bits without stuffing + bitcnt -= ((bitcnt * 5) >> 6); // bits without eob + + tmp = bitcnt >> 16; + dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", + ptr->name, bitcnt, tmp); + zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8); + zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff); + tmp = bitcnt & 0xffff; + zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8); + zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff); + + /* JPEG markers to be included in the compressed stream */ + zr36060_write(ptr, ZR060_MER, + ZR060_MER_DQT | ZR060_MER_DHT | + ((ptr->com.len > 0) ? ZR060_MER_Com : 0) | + ((ptr->app.len > 0) ? ZR060_MER_App : 0)); + + /* Setup the Video Frontend */ + /* Limit pixel range to 16..235 as per CCIR-601 */ + zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); + + } else { + dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); + + zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); + + /* 060 communicates with 067 in master mode */ + zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); + + /* Decompression */ + zr36060_write(ptr, ZR060_CMR, 0); + + /* Must be zero */ + zr36060_write(ptr, ZR060_MBZ, 0x00); + zr36060_write(ptr, ZR060_TCR_HI, 0x00); + zr36060_write(ptr, ZR060_TCR_LO, 0x00); + + /* Disable all IRQs - no DataErr means autoreset */ + zr36060_write(ptr, ZR060_IMR, 0); + + /* setup misc. data for expansion */ + zr36060_write(ptr, ZR060_MER, 0); + + /* setup the fixed jpeg tables - maybe variable, though - + * (see table init section above) */ + zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), + zr36060_dht); + + /* Setup the Video Frontend */ + //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FIExt); + //this doesn't seem right and doesn't work... + zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); + } + + /* Load the tables */ + zr36060_write(ptr, ZR060_LOAD, + ZR060_LOAD_SyncRst | ZR060_LOAD_Load); + zr36060_wait_end(ptr); + dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, + ptr->status); + + if (ptr->status & ZR060_CFSR_Busy) { + dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); + return; // something is wrong, its timed out!!!! + } +} + +/* ========================================================================= + CODEC API FUNCTIONS + + this functions are accessed by the master via the API structure + ========================================================================= */ + +/* set compression/expansion mode and launches codec - + this should be the last call from the master before starting processing */ +static int +zr36060_set_mode (struct videocodec *codec, + int mode) +{ + struct zr36060 *ptr = (struct zr36060 *) codec->data; + + dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); + + if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) + return -EINVAL; + + ptr->mode = mode; + zr36060_init(ptr); + + return 0; +} + +/* set picture size (norm is ignored as the codec doesn't know about it) */ +static int +zr36060_set_video (struct videocodec *codec, + struct tvnorm *norm, + struct vfe_settings *cap, + struct vfe_polarity *pol) +{ + struct zr36060 *ptr = (struct zr36060 *) codec->data; + u32 reg; + int size; + + dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, + cap->x, cap->y, cap->width, cap->height, cap->decimation); + + /* if () return -EINVAL; + * trust the master driver that it knows what it does - so + * we allow invalid startx/y and norm for now ... */ + ptr->width = cap->width / (cap->decimation & 0xff); + ptr->height = cap->height / (cap->decimation >> 8); + + zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); + + /* Note that VSPol/HSPol bits in zr36060 have the opposite + * meaning of their zr360x7 counterparts with the same names + * N.b. for VSPol this is only true if FIVEdge = 0 (default, + * left unchanged here - in accordance with datasheet). + */ + reg = (!pol->vsync_pol ? ZR060_VPR_VSPol : 0) + | (!pol->hsync_pol ? ZR060_VPR_HSPol : 0) + | (pol->field_pol ? ZR060_VPR_FIPol : 0) + | (pol->blank_pol ? ZR060_VPR_BLPol : 0) + | (pol->subimg_pol ? ZR060_VPR_SImgPol : 0) + | (pol->poe_pol ? ZR060_VPR_PoePol : 0) + | (pol->pvalid_pol ? ZR060_VPR_PValPol : 0) + | (pol->vclk_pol ? ZR060_VPR_VCLKPol : 0); + zr36060_write(ptr, ZR060_VPR, reg); + + reg = 0; + switch (cap->decimation & 0xff) { + default: + case 1: + break; + + case 2: + reg |= ZR060_SR_HScale2; + break; + + case 4: + reg |= ZR060_SR_HScale4; + break; + } + + switch (cap->decimation >> 8) { + default: + case 1: + break; + + case 2: + reg |= ZR060_SR_VScale; + break; + } + zr36060_write(ptr, ZR060_SR, reg); + + zr36060_write(ptr, ZR060_BCR_Y, 0x00); + zr36060_write(ptr, ZR060_BCR_U, 0x80); + zr36060_write(ptr, ZR060_BCR_V, 0x80); + + /* sync generator */ + + reg = norm->Ht - 1; /* Vtotal */ + zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff); + + reg = norm->Wt - 1; /* Htotal */ + zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff); + + reg = 6 - 1; /* VsyncSize */ + zr36060_write(ptr, ZR060_SGR_VSYNC, reg); + + //reg = 30 - 1; /* HsyncSize */ +///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68); + reg = 68; + zr36060_write(ptr, ZR060_SGR_HSYNC, reg); + + reg = norm->VStart - 1; /* BVstart */ + zr36060_write(ptr, ZR060_SGR_BVSTART, reg); + + reg += norm->Ha / 2; /* BVend */ + zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff); + + reg = norm->HStart - 1; /* BHstart */ + zr36060_write(ptr, ZR060_SGR_BHSTART, reg); + + reg += norm->Wa; /* BHend */ + zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff); + + /* active area */ + reg = cap->y + norm->VStart; /* Vstart */ + zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff); + + reg += cap->height; /* Vend */ + zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff); + + reg = cap->x + norm->HStart; /* Hstart */ + zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff); + + reg += cap->width; /* Hend */ + zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff); + + /* subimage area */ + reg = norm->VStart - 4; /* SVstart */ + zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff); + + reg += norm->Ha / 2 + 8; /* SVend */ + zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff); + + reg = norm->HStart /*+ 64 */ - 4; /* SHstart */ + zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff); + + reg += norm->Wa + 8; /* SHend */ + zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff); + zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff); + + size = ptr->width * ptr->height; + /* Target compressed field size in bits: */ + size = size * 16; /* uncompressed size in bits */ + /* (Ronald) by default, quality = 100 is a compression + * ratio 1:2. Setting low_bitrate (insmod option) sets + * it to 1:4 (instead of 1:2, zr36060 max) as limit because the + * buz can't handle more at decimation=1... Use low_bitrate if + * you have a Buz, unless you know what you're doing */ + size = size * cap->quality / (low_bitrate ? 400 : 200); + /* Lower limit (arbitrary, 1 KB) */ + if (size < 8192) + size = 8192; + /* Upper limit: 7/8 of the code buffers */ + if (size > ptr->total_code_vol * 7) + size = ptr->total_code_vol * 7; + + ptr->real_code_vol = size >> 3; /* in bytes */ + + /* the MBCVR is the *maximum* block volume, according to the + * JPEG ISO specs, this shouldn't be used, since that allows + * for the best encoding quality. So set it to it's max value */ + reg = ptr->max_block_vol; + zr36060_write(ptr, ZR060_MBCVR, reg); + + return 0; +} + +/* additional control functions */ +static int +zr36060_control (struct videocodec *codec, + int type, + int size, + void *data) +{ + struct zr36060 *ptr = (struct zr36060 *) codec->data; + int *ival = (int *) data; + + dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, + size); + + switch (type) { + case CODEC_G_STATUS: /* get last status */ + if (size != sizeof(int)) + return -EFAULT; + zr36060_read_status(ptr); + *ival = ptr->status; + break; + + case CODEC_G_CODEC_MODE: + if (size != sizeof(int)) + return -EFAULT; + *ival = CODEC_MODE_BJPG; + break; + + case CODEC_S_CODEC_MODE: + if (size != sizeof(int)) + return -EFAULT; + if (*ival != CODEC_MODE_BJPG) + return -EINVAL; + /* not needed, do nothing */ + return 0; + + case CODEC_G_VFE: + case CODEC_S_VFE: + /* not needed, do nothing */ + return 0; + + case CODEC_S_MMAP: + /* not available, give an error */ + return -ENXIO; + + case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ + if (size != sizeof(int)) + return -EFAULT; + *ival = ptr->total_code_vol; + break; + + case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ + if (size != sizeof(int)) + return -EFAULT; + ptr->total_code_vol = *ival; + ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; + break; + + case CODEC_G_JPEG_SCALE: /* get scaling factor */ + if (size != sizeof(int)) + return -EFAULT; + *ival = zr36060_read_scalefactor(ptr); + break; + + case CODEC_S_JPEG_SCALE: /* set scaling factor */ + if (size != sizeof(int)) + return -EFAULT; + ptr->scalefact = *ival; + break; + + case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ + struct jpeg_app_marker *app = data; + + if (size != sizeof(struct jpeg_app_marker)) + return -EFAULT; + + *app = ptr->app; + break; + } + + case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ + struct jpeg_app_marker *app = data; + + if (size != sizeof(struct jpeg_app_marker)) + return -EFAULT; + + ptr->app = *app; + break; + } + + case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ + struct jpeg_com_marker *com = data; + + if (size != sizeof(struct jpeg_com_marker)) + return -EFAULT; + + *com = ptr->com; + break; + } + + case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ + struct jpeg_com_marker *com = data; + + if (size != sizeof(struct jpeg_com_marker)) + return -EFAULT; + + ptr->com = *com; + break; + } + + default: + return -EINVAL; + } + + return size; +} + +/* ========================================================================= + Exit and unregister function: + + Deinitializes Zoran's JPEG processor + ========================================================================= */ + +static int +zr36060_unset (struct videocodec *codec) +{ + struct zr36060 *ptr = codec->data; + + if (ptr) { + /* do wee need some codec deinit here, too ???? */ + + dprintk(1, "%s: finished codec #%d\n", ptr->name, + ptr->num); + kfree(ptr); + codec->data = NULL; + + zr36060_codecs--; + return 0; + } + + return -EFAULT; +} + +/* ========================================================================= + Setup and registry function: + + Initializes Zoran's JPEG processor + + Also sets pixel size, average code size, mode (compr./decompr.) + (the given size is determined by the processor with the video interface) + ========================================================================= */ + +static int +zr36060_setup (struct videocodec *codec) +{ + struct zr36060 *ptr; + int res; + + dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", + zr36060_codecs); + + if (zr36060_codecs == MAX_CODECS) { + dprintk(1, + KERN_ERR "zr36060: Can't attach more codecs!\n"); + return -ENOSPC; + } + //mem structure init + codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL); + if (NULL == ptr) { + dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n"); + return -ENOMEM; + } + + snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]", + zr36060_codecs); + ptr->num = zr36060_codecs++; + ptr->codec = codec; + + //testing + res = zr36060_basic_test(ptr); + if (res < 0) { + zr36060_unset(codec); + return res; + } + //final setup + memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8); + memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8); + + ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag + * (what is the difference?) */ + ptr->mode = CODEC_DO_COMPRESSION; + ptr->width = 384; + ptr->height = 288; + ptr->total_code_vol = 16000; /* CHECKME */ + ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; + ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */ + ptr->scalefact = 0x100; + ptr->dri = 1; /* CHECKME, was 8 is 1 */ + + /* by default, no COM or APP markers - app should set those */ + ptr->com.len = 0; + ptr->app.appn = 0; + ptr->app.len = 0; + + zr36060_init(ptr); + + dprintk(1, KERN_INFO "%s: codec attached and running\n", + ptr->name); + + return 0; +} + +static const struct videocodec zr36060_codec = { + .owner = THIS_MODULE, + .name = "zr36060", + .magic = 0L, // magic not used + .flags = + CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | + CODEC_FLAG_DECODER | CODEC_FLAG_VFE, + .type = CODEC_TYPE_ZR36060, + .setup = zr36060_setup, // functionality + .unset = zr36060_unset, + .set_mode = zr36060_set_mode, + .set_video = zr36060_set_video, + .control = zr36060_control, + // others are not used +}; + +/* ========================================================================= + HOOK IN DRIVER AS KERNEL MODULE + ========================================================================= */ + +static int __init +zr36060_init_module (void) +{ + //dprintk(1, "zr36060 driver %s\n",ZR060_VERSION); + zr36060_codecs = 0; + return videocodec_register(&zr36060_codec); +} + +static void __exit +zr36060_cleanup_module (void) +{ + if (zr36060_codecs) { + dprintk(1, + "zr36060: something's wrong - %d codecs left somehow.\n", + zr36060_codecs); + } + + /* however, we can't just stay alive */ + videocodec_unregister(&zr36060_codec); +} + +module_init(zr36060_init_module); +module_exit(zr36060_cleanup_module); + +MODULE_AUTHOR("Laurent Pinchart "); +MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " + ZR060_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/zoran/zr36060.h b/drivers/staging/media/zoran/zr36060.h new file mode 100644 index 000000000000..82911757ba78 --- /dev/null +++ b/drivers/staging/media/zoran/zr36060.h @@ -0,0 +1,216 @@ +/* + * Zoran ZR36060 basic configuration functions - header file + * + * Copyright (C) 2002 Laurent Pinchart + * + * $Id: zr36060.h,v 1.1.1.1.2.3 2003/01/14 21:18:47 rbultje Exp $ + * + * ------------------------------------------------------------------------ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ------------------------------------------------------------------------ + */ + +#ifndef ZR36060_H +#define ZR36060_H + +#include "videocodec.h" + +/* data stored for each zoran jpeg codec chip */ +struct zr36060 { + char name[32]; + int num; + /* io datastructure */ + struct videocodec *codec; + // last coder status + __u8 status; + // actual coder setup + int mode; + + __u16 width; + __u16 height; + + __u16 bitrate_ctrl; + + __u32 total_code_vol; + __u32 real_code_vol; + __u16 max_block_vol; + + __u8 h_samp_ratio[8]; + __u8 v_samp_ratio[8]; + __u16 scalefact; + __u16 dri; + + /* app/com marker data */ + struct jpeg_app_marker app; + struct jpeg_com_marker com; +}; + +/* ZR36060 register addresses */ +#define ZR060_LOAD 0x000 +#define ZR060_CFSR 0x001 +#define ZR060_CIR 0x002 +#define ZR060_CMR 0x003 +#define ZR060_MBZ 0x004 +#define ZR060_MBCVR 0x005 +#define ZR060_MER 0x006 +#define ZR060_IMR 0x007 +#define ZR060_ISR 0x008 +#define ZR060_TCV_NET_HI 0x009 +#define ZR060_TCV_NET_MH 0x00a +#define ZR060_TCV_NET_ML 0x00b +#define ZR060_TCV_NET_LO 0x00c +#define ZR060_TCV_DATA_HI 0x00d +#define ZR060_TCV_DATA_MH 0x00e +#define ZR060_TCV_DATA_ML 0x00f +#define ZR060_TCV_DATA_LO 0x010 +#define ZR060_SF_HI 0x011 +#define ZR060_SF_LO 0x012 +#define ZR060_AF_HI 0x013 +#define ZR060_AF_M 0x014 +#define ZR060_AF_LO 0x015 +#define ZR060_ACV_HI 0x016 +#define ZR060_ACV_MH 0x017 +#define ZR060_ACV_ML 0x018 +#define ZR060_ACV_LO 0x019 +#define ZR060_ACT_HI 0x01a +#define ZR060_ACT_MH 0x01b +#define ZR060_ACT_ML 0x01c +#define ZR060_ACT_LO 0x01d +#define ZR060_ACV_TRUN_HI 0x01e +#define ZR060_ACV_TRUN_MH 0x01f +#define ZR060_ACV_TRUN_ML 0x020 +#define ZR060_ACV_TRUN_LO 0x021 +#define ZR060_IDR_DEV 0x022 +#define ZR060_IDR_REV 0x023 +#define ZR060_TCR_HI 0x024 +#define ZR060_TCR_LO 0x025 +#define ZR060_VCR 0x030 +#define ZR060_VPR 0x031 +#define ZR060_SR 0x032 +#define ZR060_BCR_Y 0x033 +#define ZR060_BCR_U 0x034 +#define ZR060_BCR_V 0x035 +#define ZR060_SGR_VTOTAL_HI 0x036 +#define ZR060_SGR_VTOTAL_LO 0x037 +#define ZR060_SGR_HTOTAL_HI 0x038 +#define ZR060_SGR_HTOTAL_LO 0x039 +#define ZR060_SGR_VSYNC 0x03a +#define ZR060_SGR_HSYNC 0x03b +#define ZR060_SGR_BVSTART 0x03c +#define ZR060_SGR_BHSTART 0x03d +#define ZR060_SGR_BVEND_HI 0x03e +#define ZR060_SGR_BVEND_LO 0x03f +#define ZR060_SGR_BHEND_HI 0x040 +#define ZR060_SGR_BHEND_LO 0x041 +#define ZR060_AAR_VSTART_HI 0x042 +#define ZR060_AAR_VSTART_LO 0x043 +#define ZR060_AAR_VEND_HI 0x044 +#define ZR060_AAR_VEND_LO 0x045 +#define ZR060_AAR_HSTART_HI 0x046 +#define ZR060_AAR_HSTART_LO 0x047 +#define ZR060_AAR_HEND_HI 0x048 +#define ZR060_AAR_HEND_LO 0x049 +#define ZR060_SWR_VSTART_HI 0x04a +#define ZR060_SWR_VSTART_LO 0x04b +#define ZR060_SWR_VEND_HI 0x04c +#define ZR060_SWR_VEND_LO 0x04d +#define ZR060_SWR_HSTART_HI 0x04e +#define ZR060_SWR_HSTART_LO 0x04f +#define ZR060_SWR_HEND_HI 0x050 +#define ZR060_SWR_HEND_LO 0x051 + +#define ZR060_SOF_IDX 0x060 +#define ZR060_SOS_IDX 0x07a +#define ZR060_DRI_IDX 0x0c0 +#define ZR060_DQT_IDX 0x0cc +#define ZR060_DHT_IDX 0x1d4 +#define ZR060_APP_IDX 0x380 +#define ZR060_COM_IDX 0x3c0 + +/* ZR36060 LOAD register bits */ + +#define ZR060_LOAD_Load (1 << 7) +#define ZR060_LOAD_SyncRst (1 << 0) + +/* ZR36060 Code FIFO Status register bits */ + +#define ZR060_CFSR_Busy (1 << 7) +#define ZR060_CFSR_CBusy (1 << 2) +#define ZR060_CFSR_CFIFO (3 << 0) + +/* ZR36060 Code Interface register */ + +#define ZR060_CIR_Code16 (1 << 7) +#define ZR060_CIR_Endian (1 << 6) +#define ZR060_CIR_CFIS (1 << 2) +#define ZR060_CIR_CodeMstr (1 << 0) + +/* ZR36060 Codec Mode register */ + +#define ZR060_CMR_Comp (1 << 7) +#define ZR060_CMR_ATP (1 << 6) +#define ZR060_CMR_Pass2 (1 << 5) +#define ZR060_CMR_TLM (1 << 4) +#define ZR060_CMR_BRB (1 << 2) +#define ZR060_CMR_FSF (1 << 1) + +/* ZR36060 Markers Enable register */ + +#define ZR060_MER_App (1 << 7) +#define ZR060_MER_Com (1 << 6) +#define ZR060_MER_DRI (1 << 5) +#define ZR060_MER_DQT (1 << 4) +#define ZR060_MER_DHT (1 << 3) + +/* ZR36060 Interrupt Mask register */ + +#define ZR060_IMR_EOAV (1 << 3) +#define ZR060_IMR_EOI (1 << 2) +#define ZR060_IMR_End (1 << 1) +#define ZR060_IMR_DataErr (1 << 0) + +/* ZR36060 Interrupt Status register */ + +#define ZR060_ISR_ProCnt (3 << 6) +#define ZR060_ISR_EOAV (1 << 3) +#define ZR060_ISR_EOI (1 << 2) +#define ZR060_ISR_End (1 << 1) +#define ZR060_ISR_DataErr (1 << 0) + +/* ZR36060 Video Control register */ + +#define ZR060_VCR_Video8 (1 << 7) +#define ZR060_VCR_Range (1 << 6) +#define ZR060_VCR_FIDet (1 << 3) +#define ZR060_VCR_FIVedge (1 << 2) +#define ZR060_VCR_FIExt (1 << 1) +#define ZR060_VCR_SyncMstr (1 << 0) + +/* ZR36060 Video Polarity register */ + +#define ZR060_VPR_VCLKPol (1 << 7) +#define ZR060_VPR_PValPol (1 << 6) +#define ZR060_VPR_PoePol (1 << 5) +#define ZR060_VPR_SImgPol (1 << 4) +#define ZR060_VPR_BLPol (1 << 3) +#define ZR060_VPR_FIPol (1 << 2) +#define ZR060_VPR_HSPol (1 << 1) +#define ZR060_VPR_VSPol (1 << 0) + +/* ZR36060 Scaling register */ + +#define ZR060_SR_VScale (1 << 2) +#define ZR060_SR_HScale2 (1 << 0) +#define ZR060_SR_HScale4 (2 << 0) + +#endif /*fndef ZR36060_H */ -- cgit v1.2.3 From 5c103791e4bef2830ab4cfa321fe2cf3c93730a7 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Fri, 25 May 2018 13:14:07 -0700 Subject: MAINTAINERS: hwmon: Add Documentation/devicetree/bindings/hwmon The hardware monitoring mailing list should be copied for changes in hwmon devicetree bindings. Signed-off-by: Guenter Roeck --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 078fd80f664f..1de11c4c3b56 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6218,6 +6218,7 @@ L: linux-hwmon@vger.kernel.org W: http://hwmon.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained +F: Documentation/devicetree/bindings/hwmon/ F: Documentation/hwmon/ F: drivers/hwmon/ F: include/linux/hwmon*.h -- cgit v1.2.3 From 488de0317bc50b0271de7baa03ecc91c3901d8ed Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Fri, 27 Apr 2018 15:55:41 +0200 Subject: MAINTAINERS: Update pattern for qcom_scm Update pattern for qcom_scm, so that get_maintainer.pl will show the correct maintainers + lists, not only for qcom_scm.c, but also for the files: qcom_scm-32.c, qcom_scm-64.c, qcom_scm.h. Signed-off-by: Niklas Cassel Reviewed-by: Bjorn Andersson Signed-off-by: Andy Gross --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..2761027ab419 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1812,7 +1812,7 @@ F: drivers/spi/spi-qup.c F: drivers/tty/serial/msm_serial.c F: drivers/*/pm8???-* F: drivers/mfd/ssbi.c -F: drivers/firmware/qcom_scm.c +F: drivers/firmware/qcom_scm* T: git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git ARM/RADISYS ENP2611 MACHINE SUPPORT -- cgit v1.2.3 From c309f0cdc30667efd233f1220587fb0b18c8423b Mon Sep 17 00:00:00 2001 From: Dong Aisheng Date: Sat, 28 Apr 2018 03:06:17 +0800 Subject: MAINTAINERS: add NXP linux team maillist as i.MX reviewer Add NXP linux team upstream maillist as reviewer Cc: Sascha Hauer Cc: Fabio Estevam Signed-off-by: Dong Aisheng Signed-off-by: Shawn Guo --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..1defbc831a8f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1403,6 +1403,7 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo M: Sascha Hauer R: Fabio Estevam +R: NXP Linux Team L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git -- cgit v1.2.3 From 385c83ebc36baa117702a15ba584c65d0e15b4bf Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Mon, 14 May 2018 20:56:34 -0400 Subject: media: rcar-csi2: add Renesas R-Car MIPI CSI-2 receiver documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Documentation for Renesas R-Car MIPI CSI-2 receiver. The CSI-2 receivers are located between the video sources (CSI-2 transmitters) and the video grabbers (VIN) on Gen3 of Renesas R-Car SoC. Each CSI-2 device is connected to more than one VIN device which simultaneously can receive video from the same CSI-2 device. Each VIN device can also be connected to more than one CSI-2 device. The routing of which links are used is controlled by the VIN devices. There are only a few possible routes which are set by hardware limitations, which are different for each SoC in the Gen3 family. Signed-off-by: Niklas Söderlund Acked-by: Rob Herring Reviewed-by: Laurent Pinchart Reviewed-by: Jacopo Mondi Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- .../bindings/media/renesas,rcar-csi2.txt | 101 +++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 102 insertions(+) create mode 100644 Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt new file mode 100644 index 000000000000..2d385b65b275 --- /dev/null +++ b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt @@ -0,0 +1,101 @@ +Renesas R-Car MIPI CSI-2 +------------------------ + +The R-Car CSI-2 receiver device provides MIPI CSI-2 capabilities for the +Renesas R-Car family of devices. It is used in conjunction with the +R-Car VIN module, which provides the video capture capabilities. + +Mandatory properties +-------------------- + - compatible: Must be one or more of the following + - "renesas,r8a7795-csi2" for the R8A7795 device. + - "renesas,r8a7796-csi2" for the R8A7796 device. + - "renesas,r8a77965-csi2" for the R8A77965 device. + - "renesas,r8a77970-csi2" for the R8A77970 device. + + - reg: the register base and size for the device registers + - interrupts: the interrupt for the device + - clocks: reference to the parent clock + +The device node shall contain two 'port' child nodes according to the +bindings defined in Documentation/devicetree/bindings/media/ +video-interfaces.txt. port@0 shall connect to the CSI-2 source. port@1 +shall connect to all the R-Car VIN modules that have a hardware +connection to the CSI-2 receiver. + +- port@0- Video source (mandatory) + - endpoint@0 - sub-node describing the endpoint that is the video source + +- port@1 - VIN instances (optional) + - One endpoint sub-node for every R-Car VIN instance which is connected + to the R-Car CSI-2 receiver. + +Example: + + csi20: csi2@fea80000 { + compatible = "renesas,r8a7796-csi2"; + reg = <0 0xfea80000 0 0x10000>; + interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 714>; + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; + resets = <&cpg 714>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <0>; + + csi20_in: endpoint@0 { + reg = <0>; + clock-lanes = <0>; + data-lanes = <1>; + remote-endpoint = <&adv7482_txb>; + }; + }; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + csi20vin0: endpoint@0 { + reg = <0>; + remote-endpoint = <&vin0csi20>; + }; + csi20vin1: endpoint@1 { + reg = <1>; + remote-endpoint = <&vin1csi20>; + }; + csi20vin2: endpoint@2 { + reg = <2>; + remote-endpoint = <&vin2csi20>; + }; + csi20vin3: endpoint@3 { + reg = <3>; + remote-endpoint = <&vin3csi20>; + }; + csi20vin4: endpoint@4 { + reg = <4>; + remote-endpoint = <&vin4csi20>; + }; + csi20vin5: endpoint@5 { + reg = <5>; + remote-endpoint = <&vin5csi20>; + }; + csi20vin6: endpoint@6 { + reg = <6>; + remote-endpoint = <&vin6csi20>; + }; + csi20vin7: endpoint@7 { + reg = <7>; + remote-endpoint = <&vin7csi20>; + }; + }; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index cbcd5ab4c2f4..8347eb6adc4e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8832,6 +8832,7 @@ L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported +F: Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt F: Documentation/devicetree/bindings/media/rcar_vin.txt F: drivers/media/platform/rcar-vin/ -- cgit v1.2.3 From e4802cb00bfe3d2ae4fbdec48fe4eda10f3b5486 Mon Sep 17 00:00:00 2001 From: Jason Chen Date: Wed, 2 May 2018 11:46:08 -0400 Subject: media: imx258: Add imx258 camera sensor driver Add a V4L2 sub-device driver for the Sony IMX258 image sensor. This is a camera sensor using the I2C bus for control and the CSI-2 bus for data. Signed-off-by: Jason Chen Signed-off-by: Andy Yeh Signed-off-by: Alan Chiang Reviewed-by: Tomasz Figa Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 7 + drivers/media/i2c/Kconfig | 11 + drivers/media/i2c/Makefile | 1 + drivers/media/i2c/imx258.c | 1320 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1339 insertions(+) create mode 100644 drivers/media/i2c/imx258.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 8347eb6adc4e..d9db405297cd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13070,6 +13070,13 @@ S: Maintained F: drivers/ssb/ F: include/linux/ssb/ +SONY IMX258 SENSOR DRIVER +M: Sakari Ailus +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/imx258.c + SONY IMX274 SENSOR DRIVER M: Leon Luo L: linux-media@vger.kernel.org diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index b95b44702ccb..341452fe98df 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -575,6 +575,17 @@ config VIDEO_APTINA_PLL config VIDEO_SMIAPP_PLL tristate +config VIDEO_IMX258 + tristate "Sony IMX258 sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + ---help--- + This is a Video4Linux2 sensor-level driver for the Sony + IMX258 camera. + + To compile this driver as a module, choose M here: the + module will be called imx258. + config VIDEO_IMX274 tristate "Sony IMX274 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index ff6e2914abda..d679d57cd3b3 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -101,6 +101,7 @@ obj-$(CONFIG_VIDEO_I2C) += video-i2c.o obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o obj-$(CONFIG_VIDEO_OV2659) += ov2659.o obj-$(CONFIG_VIDEO_TC358743) += tc358743.o +obj-$(CONFIG_VIDEO_IMX258) += imx258.o obj-$(CONFIG_VIDEO_IMX274) += imx274.o obj-$(CONFIG_SDR_MAX2175) += max2175.o diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c new file mode 100644 index 000000000000..fad3012f4fe5 --- /dev/null +++ b/drivers/media/i2c/imx258.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IMX258_REG_VALUE_08BIT 1 +#define IMX258_REG_VALUE_16BIT 2 + +#define IMX258_REG_MODE_SELECT 0x0100 +#define IMX258_MODE_STANDBY 0x00 +#define IMX258_MODE_STREAMING 0x01 + +/* Chip ID */ +#define IMX258_REG_CHIP_ID 0x0016 +#define IMX258_CHIP_ID 0x0258 + +/* V_TIMING internal */ +#define IMX258_VTS_30FPS 0x0c98 +#define IMX258_VTS_30FPS_2K 0x0638 +#define IMX258_VTS_30FPS_VGA 0x034c +#define IMX258_VTS_MAX 0xffff + +/*Frame Length Line*/ +#define IMX258_FLL_MIN 0x08a6 +#define IMX258_FLL_MAX 0xffff +#define IMX258_FLL_STEP 1 +#define IMX258_FLL_DEFAULT 0x0c98 + +/* HBLANK control - read only */ +#define IMX258_PPL_DEFAULT 5352 + +/* Exposure control */ +#define IMX258_REG_EXPOSURE 0x0202 +#define IMX258_EXPOSURE_MIN 4 +#define IMX258_EXPOSURE_STEP 1 +#define IMX258_EXPOSURE_DEFAULT 0x640 +#define IMX258_EXPOSURE_MAX 65535 + +/* Analog gain control */ +#define IMX258_REG_ANALOG_GAIN 0x0204 +#define IMX258_ANA_GAIN_MIN 0 +#define IMX258_ANA_GAIN_MAX 0x1fff +#define IMX258_ANA_GAIN_STEP 1 +#define IMX258_ANA_GAIN_DEFAULT 0x0 + +/* Digital gain control */ +#define IMX258_REG_GR_DIGITAL_GAIN 0x020e +#define IMX258_REG_R_DIGITAL_GAIN 0x0210 +#define IMX258_REG_B_DIGITAL_GAIN 0x0212 +#define IMX258_REG_GB_DIGITAL_GAIN 0x0214 +#define IMX258_DGTL_GAIN_MIN 0 +#define IMX258_DGTL_GAIN_MAX 4096 /* Max = 0xFFF */ +#define IMX258_DGTL_GAIN_DEFAULT 1024 +#define IMX258_DGTL_GAIN_STEP 1 + +/* Test Pattern Control */ +#define IMX258_REG_TEST_PATTERN 0x0600 +#define IMX258_TEST_PATTERN_DISABLE 0 +#define IMX258_TEST_PATTERN_SOLID_COLOR 1 +#define IMX258_TEST_PATTERN_COLOR_BARS 2 +#define IMX258_TEST_PATTERN_GREY_COLOR 3 +#define IMX258_TEST_PATTERN_PN9 4 + +/* Orientation */ +#define REG_MIRROR_FLIP_CONTROL 0x0101 +#define REG_CONFIG_MIRROR_FLIP 0x03 +#define REG_CONFIG_FLIP_TEST_PATTERN 0x02 + +struct imx258_reg { + u16 address; + u8 val; +}; + +struct imx258_reg_list { + u32 num_of_regs; + const struct imx258_reg *regs; +}; + +/* Link frequency config */ +struct imx258_link_freq_config { + u32 pixels_per_line; + + /* PLL registers for this link frequency */ + struct imx258_reg_list reg_list; +}; + +/* Mode : resolution and related config&values */ +struct imx258_mode { + /* Frame width */ + u32 width; + /* Frame height */ + u32 height; + + /* V-timing */ + u32 vts_def; + u32 vts_min; + + /* Index of Link frequency config to be used */ + u32 link_freq_index; + /* Default register values */ + struct imx258_reg_list reg_list; +}; + +/* 4208x3118 needs 1267Mbps/lane, 4 lanes */ +static const struct imx258_reg mipi_data_rate_1267mbps[] = { + { 0x0301, 0x05 }, + { 0x0303, 0x02 }, + { 0x0305, 0x03 }, + { 0x0306, 0x00 }, + { 0x0307, 0xC6 }, + { 0x0309, 0x0A }, + { 0x030B, 0x01 }, + { 0x030D, 0x02 }, + { 0x030E, 0x00 }, + { 0x030F, 0xD8 }, + { 0x0310, 0x00 }, + { 0x0820, 0x13 }, + { 0x0821, 0x4C }, + { 0x0822, 0xCC }, + { 0x0823, 0xCC }, +}; + +static const struct imx258_reg mipi_data_rate_640mbps[] = { + { 0x0301, 0x05 }, + { 0x0303, 0x02 }, + { 0x0305, 0x03 }, + { 0x0306, 0x00 }, + { 0x0307, 0x64 }, + { 0x0309, 0x0A }, + { 0x030B, 0x01 }, + { 0x030D, 0x02 }, + { 0x030E, 0x00 }, + { 0x030F, 0xD8 }, + { 0x0310, 0x00 }, + { 0x0820, 0x0A }, + { 0x0821, 0x00 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, +}; + +static const struct imx258_reg mode_4208x3118_regs[] = { + { 0x0136, 0x13 }, + { 0x0137, 0x33 }, + { 0x3051, 0x00 }, + { 0x3052, 0x00 }, + { 0x4E21, 0x14 }, + { 0x6B11, 0xCF }, + { 0x7FF0, 0x08 }, + { 0x7FF1, 0x0F }, + { 0x7FF2, 0x08 }, + { 0x7FF3, 0x1B }, + { 0x7FF4, 0x23 }, + { 0x7FF5, 0x60 }, + { 0x7FF6, 0x00 }, + { 0x7FF7, 0x01 }, + { 0x7FF8, 0x00 }, + { 0x7FF9, 0x78 }, + { 0x7FFA, 0x00 }, + { 0x7FFB, 0x00 }, + { 0x7FFC, 0x00 }, + { 0x7FFD, 0x00 }, + { 0x7FFE, 0x00 }, + { 0x7FFF, 0x03 }, + { 0x7F76, 0x03 }, + { 0x7F77, 0xFE }, + { 0x7FA8, 0x03 }, + { 0x7FA9, 0xFE }, + { 0x7B24, 0x81 }, + { 0x7B25, 0x00 }, + { 0x6564, 0x07 }, + { 0x6B0D, 0x41 }, + { 0x653D, 0x04 }, + { 0x6B05, 0x8C }, + { 0x6B06, 0xF9 }, + { 0x6B08, 0x65 }, + { 0x6B09, 0xFC }, + { 0x6B0A, 0xCF }, + { 0x6B0B, 0xD2 }, + { 0x6700, 0x0E }, + { 0x6707, 0x0E }, + { 0x9104, 0x00 }, + { 0x4648, 0x7F }, + { 0x7420, 0x00 }, + { 0x7421, 0x1C }, + { 0x7422, 0x00 }, + { 0x7423, 0xD7 }, + { 0x5F04, 0x00 }, + { 0x5F05, 0xED }, + { 0x0112, 0x0A }, + { 0x0113, 0x0A }, + { 0x0114, 0x03 }, + { 0x0342, 0x14 }, + { 0x0343, 0xE8 }, + { 0x0340, 0x0C }, + { 0x0341, 0x50 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x10 }, + { 0x0349, 0x6F }, + { 0x034A, 0x0C }, + { 0x034B, 0x2E }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0x00 }, + { 0x040A, 0x00 }, + { 0x040B, 0x00 }, + { 0x040C, 0x10 }, + { 0x040D, 0x70 }, + { 0x040E, 0x0C }, + { 0x040F, 0x30 }, + { 0x3038, 0x00 }, + { 0x303A, 0x00 }, + { 0x303B, 0x10 }, + { 0x300D, 0x00 }, + { 0x034C, 0x10 }, + { 0x034D, 0x70 }, + { 0x034E, 0x0C }, + { 0x034F, 0x30 }, + { 0x0350, 0x01 }, + { 0x0202, 0x0C }, + { 0x0203, 0x46 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x020E, 0x01 }, + { 0x020F, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x7BCD, 0x00 }, + { 0x94DC, 0x20 }, + { 0x94DD, 0x20 }, + { 0x94DE, 0x20 }, + { 0x95DC, 0x20 }, + { 0x95DD, 0x20 }, + { 0x95DE, 0x20 }, + { 0x7FB0, 0x00 }, + { 0x9010, 0x3E }, + { 0x9419, 0x50 }, + { 0x941B, 0x50 }, + { 0x9519, 0x50 }, + { 0x951B, 0x50 }, + { 0x3030, 0x00 }, + { 0x3032, 0x00 }, + { 0x0220, 0x00 }, +}; + +static const struct imx258_reg mode_2104_1560_regs[] = { + { 0x0136, 0x13 }, + { 0x0137, 0x33 }, + { 0x3051, 0x00 }, + { 0x3052, 0x00 }, + { 0x4E21, 0x14 }, + { 0x6B11, 0xCF }, + { 0x7FF0, 0x08 }, + { 0x7FF1, 0x0F }, + { 0x7FF2, 0x08 }, + { 0x7FF3, 0x1B }, + { 0x7FF4, 0x23 }, + { 0x7FF5, 0x60 }, + { 0x7FF6, 0x00 }, + { 0x7FF7, 0x01 }, + { 0x7FF8, 0x00 }, + { 0x7FF9, 0x78 }, + { 0x7FFA, 0x00 }, + { 0x7FFB, 0x00 }, + { 0x7FFC, 0x00 }, + { 0x7FFD, 0x00 }, + { 0x7FFE, 0x00 }, + { 0x7FFF, 0x03 }, + { 0x7F76, 0x03 }, + { 0x7F77, 0xFE }, + { 0x7FA8, 0x03 }, + { 0x7FA9, 0xFE }, + { 0x7B24, 0x81 }, + { 0x7B25, 0x00 }, + { 0x6564, 0x07 }, + { 0x6B0D, 0x41 }, + { 0x653D, 0x04 }, + { 0x6B05, 0x8C }, + { 0x6B06, 0xF9 }, + { 0x6B08, 0x65 }, + { 0x6B09, 0xFC }, + { 0x6B0A, 0xCF }, + { 0x6B0B, 0xD2 }, + { 0x6700, 0x0E }, + { 0x6707, 0x0E }, + { 0x9104, 0x00 }, + { 0x4648, 0x7F }, + { 0x7420, 0x00 }, + { 0x7421, 0x1C }, + { 0x7422, 0x00 }, + { 0x7423, 0xD7 }, + { 0x5F04, 0x00 }, + { 0x5F05, 0xED }, + { 0x0112, 0x0A }, + { 0x0113, 0x0A }, + { 0x0114, 0x03 }, + { 0x0342, 0x14 }, + { 0x0343, 0xE8 }, + { 0x0340, 0x06 }, + { 0x0341, 0x38 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x10 }, + { 0x0349, 0x6F }, + { 0x034A, 0x0C }, + { 0x034B, 0x2E }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x12 }, + { 0x0401, 0x01 }, + { 0x0404, 0x00 }, + { 0x0405, 0x20 }, + { 0x0408, 0x00 }, + { 0x0409, 0x02 }, + { 0x040A, 0x00 }, + { 0x040B, 0x00 }, + { 0x040C, 0x10 }, + { 0x040D, 0x6A }, + { 0x040E, 0x06 }, + { 0x040F, 0x18 }, + { 0x3038, 0x00 }, + { 0x303A, 0x00 }, + { 0x303B, 0x10 }, + { 0x300D, 0x00 }, + { 0x034C, 0x08 }, + { 0x034D, 0x38 }, + { 0x034E, 0x06 }, + { 0x034F, 0x18 }, + { 0x0350, 0x01 }, + { 0x0202, 0x06 }, + { 0x0203, 0x2E }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x020E, 0x01 }, + { 0x020F, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x7BCD, 0x01 }, + { 0x94DC, 0x20 }, + { 0x94DD, 0x20 }, + { 0x94DE, 0x20 }, + { 0x95DC, 0x20 }, + { 0x95DD, 0x20 }, + { 0x95DE, 0x20 }, + { 0x7FB0, 0x00 }, + { 0x9010, 0x3E }, + { 0x9419, 0x50 }, + { 0x941B, 0x50 }, + { 0x9519, 0x50 }, + { 0x951B, 0x50 }, + { 0x3030, 0x00 }, + { 0x3032, 0x00 }, + { 0x0220, 0x00 }, +}; + +static const struct imx258_reg mode_1048_780_regs[] = { + { 0x0136, 0x13 }, + { 0x0137, 0x33 }, + { 0x3051, 0x00 }, + { 0x3052, 0x00 }, + { 0x4E21, 0x14 }, + { 0x6B11, 0xCF }, + { 0x7FF0, 0x08 }, + { 0x7FF1, 0x0F }, + { 0x7FF2, 0x08 }, + { 0x7FF3, 0x1B }, + { 0x7FF4, 0x23 }, + { 0x7FF5, 0x60 }, + { 0x7FF6, 0x00 }, + { 0x7FF7, 0x01 }, + { 0x7FF8, 0x00 }, + { 0x7FF9, 0x78 }, + { 0x7FFA, 0x00 }, + { 0x7FFB, 0x00 }, + { 0x7FFC, 0x00 }, + { 0x7FFD, 0x00 }, + { 0x7FFE, 0x00 }, + { 0x7FFF, 0x03 }, + { 0x7F76, 0x03 }, + { 0x7F77, 0xFE }, + { 0x7FA8, 0x03 }, + { 0x7FA9, 0xFE }, + { 0x7B24, 0x81 }, + { 0x7B25, 0x00 }, + { 0x6564, 0x07 }, + { 0x6B0D, 0x41 }, + { 0x653D, 0x04 }, + { 0x6B05, 0x8C }, + { 0x6B06, 0xF9 }, + { 0x6B08, 0x65 }, + { 0x6B09, 0xFC }, + { 0x6B0A, 0xCF }, + { 0x6B0B, 0xD2 }, + { 0x6700, 0x0E }, + { 0x6707, 0x0E }, + { 0x9104, 0x00 }, + { 0x4648, 0x7F }, + { 0x7420, 0x00 }, + { 0x7421, 0x1C }, + { 0x7422, 0x00 }, + { 0x7423, 0xD7 }, + { 0x5F04, 0x00 }, + { 0x5F05, 0xED }, + { 0x0112, 0x0A }, + { 0x0113, 0x0A }, + { 0x0114, 0x03 }, + { 0x0342, 0x14 }, + { 0x0343, 0xE8 }, + { 0x0340, 0x03 }, + { 0x0341, 0x4C }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x10 }, + { 0x0349, 0x6F }, + { 0x034A, 0x0C }, + { 0x034B, 0x2E }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x14 }, + { 0x0401, 0x01 }, + { 0x0404, 0x00 }, + { 0x0405, 0x40 }, + { 0x0408, 0x00 }, + { 0x0409, 0x06 }, + { 0x040A, 0x00 }, + { 0x040B, 0x00 }, + { 0x040C, 0x10 }, + { 0x040D, 0x64 }, + { 0x040E, 0x03 }, + { 0x040F, 0x0C }, + { 0x3038, 0x00 }, + { 0x303A, 0x00 }, + { 0x303B, 0x10 }, + { 0x300D, 0x00 }, + { 0x034C, 0x04 }, + { 0x034D, 0x18 }, + { 0x034E, 0x03 }, + { 0x034F, 0x0C }, + { 0x0350, 0x01 }, + { 0x0202, 0x03 }, + { 0x0203, 0x42 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x020E, 0x01 }, + { 0x020F, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x7BCD, 0x00 }, + { 0x94DC, 0x20 }, + { 0x94DD, 0x20 }, + { 0x94DE, 0x20 }, + { 0x95DC, 0x20 }, + { 0x95DD, 0x20 }, + { 0x95DE, 0x20 }, + { 0x7FB0, 0x00 }, + { 0x9010, 0x3E }, + { 0x9419, 0x50 }, + { 0x941B, 0x50 }, + { 0x9519, 0x50 }, + { 0x951B, 0x50 }, + { 0x3030, 0x00 }, + { 0x3032, 0x00 }, + { 0x0220, 0x00 }, +}; + +static const char * const imx258_test_pattern_menu[] = { + "Disabled", + "Color Bars", + "Solid Color", + "Grey Color Bars", + "PN9" +}; + +static const int imx258_test_pattern_val[] = { + IMX258_TEST_PATTERN_DISABLE, + IMX258_TEST_PATTERN_COLOR_BARS, + IMX258_TEST_PATTERN_SOLID_COLOR, + IMX258_TEST_PATTERN_GREY_COLOR, + IMX258_TEST_PATTERN_PN9, +}; + +/* Configurations for supported link frequencies */ +#define IMX258_LINK_FREQ_634MHZ 633600000ULL +#define IMX258_LINK_FREQ_320MHZ 320000000ULL + +enum { + IMX258_LINK_FREQ_1267MBPS, + IMX258_LINK_FREQ_640MBPS, +}; + +/* + * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample + * data rate => double data rate; number of lanes => 4; bits per pixel => 10 + */ +static u64 link_freq_to_pixel_rate(u64 f) +{ + f *= 2 * 4; + do_div(f, 10); + + return f; +} + +/* Menu items for LINK_FREQ V4L2 control */ +static const s64 link_freq_menu_items[] = { + IMX258_LINK_FREQ_634MHZ, + IMX258_LINK_FREQ_320MHZ, +}; + +/* Link frequency configs */ +static const struct imx258_link_freq_config link_freq_configs[] = { + [IMX258_LINK_FREQ_1267MBPS] = { + .pixels_per_line = IMX258_PPL_DEFAULT, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_1267mbps), + .regs = mipi_data_rate_1267mbps, + } + }, + [IMX258_LINK_FREQ_640MBPS] = { + .pixels_per_line = IMX258_PPL_DEFAULT, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_640mbps), + .regs = mipi_data_rate_640mbps, + } + }, +}; + +/* Mode configs */ +static const struct imx258_mode supported_modes[] = { + { + .width = 4208, + .height = 3118, + .vts_def = IMX258_VTS_30FPS, + .vts_min = IMX258_VTS_30FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_4208x3118_regs), + .regs = mode_4208x3118_regs, + }, + .link_freq_index = IMX258_LINK_FREQ_1267MBPS, + }, + { + .width = 2104, + .height = 1560, + .vts_def = IMX258_VTS_30FPS_2K, + .vts_min = IMX258_VTS_30FPS_2K, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_2104_1560_regs), + .regs = mode_2104_1560_regs, + }, + .link_freq_index = IMX258_LINK_FREQ_640MBPS, + }, + { + .width = 1048, + .height = 780, + .vts_def = IMX258_VTS_30FPS_VGA, + .vts_min = IMX258_VTS_30FPS_VGA, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1048_780_regs), + .regs = mode_1048_780_regs, + }, + .link_freq_index = IMX258_LINK_FREQ_640MBPS, + }, +}; + +struct imx258 { + struct v4l2_subdev sd; + struct media_pad pad; + + struct v4l2_ctrl_handler ctrl_handler; + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *exposure; + + /* Current mode */ + const struct imx258_mode *cur_mode; + + /* + * Mutex for serialized access: + * Protect sensor module set pad format and start/stop streaming safely. + */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +static inline struct imx258 *to_imx258(struct v4l2_subdev *_sd) +{ + return container_of(_sd, struct imx258, sd); +} + +/* Read registers up to 2 at a time */ +static int imx258_read_reg(struct imx258 *imx258, u16 reg, u32 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + struct i2c_msg msgs[2]; + u8 addr_buf[2] = { reg >> 8, reg & 0xff }; + u8 data_buf[4] = { 0, }; + int ret; + + if (len > 4) + return -EINVAL; + + /* Write register address */ + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = ARRAY_SIZE(addr_buf); + msgs[0].buf = addr_buf; + + /* Read data from register */ + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_buf[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = get_unaligned_be32(data_buf); + + return 0; +} + +/* Write registers up to 2 at a time */ +static int imx258_write_reg(struct imx258 *imx258, u16 reg, u32 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + u8 buf[6]; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, buf); + put_unaligned_be32(val << (8 * (4 - len)), buf + 2); + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +/* Write a list of registers */ +static int imx258_write_regs(struct imx258 *imx258, + const struct imx258_reg *regs, u32 len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + unsigned int i; + int ret; + + for (i = 0; i < len; i++) { + ret = imx258_write_reg(imx258, regs[i].address, 1, + regs[i].val); + if (ret) { + dev_err_ratelimited( + &client->dev, + "Failed to write reg 0x%4.4x. error = %d\n", + regs[i].address, ret); + + return ret; + } + } + + return 0; +} + +/* Open sub-device */ +static int imx258_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, 0); + + /* Initialize try_fmt */ + try_fmt->width = supported_modes[0].width; + try_fmt->height = supported_modes[0].height; + try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; + try_fmt->field = V4L2_FIELD_NONE; + + return 0; +} + +static int imx258_update_digital_gain(struct imx258 *imx258, u32 len, u32 val) +{ + int ret; + + ret = imx258_write_reg(imx258, IMX258_REG_GR_DIGITAL_GAIN, + IMX258_REG_VALUE_16BIT, + val); + if (ret) + return ret; + ret = imx258_write_reg(imx258, IMX258_REG_GB_DIGITAL_GAIN, + IMX258_REG_VALUE_16BIT, + val); + if (ret) + return ret; + ret = imx258_write_reg(imx258, IMX258_REG_R_DIGITAL_GAIN, + IMX258_REG_VALUE_16BIT, + val); + if (ret) + return ret; + ret = imx258_write_reg(imx258, IMX258_REG_B_DIGITAL_GAIN, + IMX258_REG_VALUE_16BIT, + val); + if (ret) + return ret; + return 0; +} + +static int imx258_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct imx258 *imx258 = + container_of(ctrl->handler, struct imx258, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + int ret = 0; + + /* + * Applying V4L2 control value only happens + * when power is up for streaming + */ + if (pm_runtime_get_if_in_use(&client->dev) == 0) + return 0; + + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + ret = imx258_write_reg(imx258, IMX258_REG_ANALOG_GAIN, + IMX258_REG_VALUE_16BIT, + ctrl->val); + break; + case V4L2_CID_EXPOSURE: + ret = imx258_write_reg(imx258, IMX258_REG_EXPOSURE, + IMX258_REG_VALUE_16BIT, + ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + ret = imx258_update_digital_gain(imx258, IMX258_REG_VALUE_16BIT, + ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = imx258_write_reg(imx258, IMX258_REG_TEST_PATTERN, + IMX258_REG_VALUE_16BIT, + imx258_test_pattern_val[ctrl->val]); + + ret = imx258_write_reg(imx258, REG_MIRROR_FLIP_CONTROL, + IMX258_REG_VALUE_08BIT, + ctrl->val == imx258_test_pattern_val + [IMX258_TEST_PATTERN_DISABLE] ? + REG_CONFIG_MIRROR_FLIP : + REG_CONFIG_FLIP_TEST_PATTERN); + break; + default: + dev_info(&client->dev, + "ctrl(id:0x%x,val:0x%x) is not handled\n", + ctrl->id, ctrl->val); + ret = -EINVAL; + break; + } + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops imx258_ctrl_ops = { + .s_ctrl = imx258_set_ctrl, +}; + +static int imx258_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + /* Only one bayer order(GRBG) is supported */ + if (code->index > 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_SGRBG10_1X10; + + return 0; +} + +static int imx258_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static void imx258_update_pad_format(const struct imx258_mode *mode, + struct v4l2_subdev_format *fmt) +{ + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + fmt->format.field = V4L2_FIELD_NONE; +} + +static int __imx258_get_pad_format(struct imx258 *imx258, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + fmt->format = *v4l2_subdev_get_try_format(&imx258->sd, cfg, + fmt->pad); + else + imx258_update_pad_format(imx258->cur_mode, fmt); + + return 0; +} + +static int imx258_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx258 *imx258 = to_imx258(sd); + int ret; + + mutex_lock(&imx258->mutex); + ret = __imx258_get_pad_format(imx258, cfg, fmt); + mutex_unlock(&imx258->mutex); + + return ret; +} + +static int imx258_set_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx258 *imx258 = to_imx258(sd); + const struct imx258_mode *mode; + struct v4l2_mbus_framefmt *framefmt; + s32 vblank_def; + s32 vblank_min; + s64 h_blank; + s64 pixel_rate; + s64 link_freq; + + mutex_lock(&imx258->mutex); + + /* Only one raw bayer(GBRG) order is supported */ + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + + mode = v4l2_find_nearest_size(supported_modes, + ARRAY_SIZE(supported_modes), width, height, + fmt->format.width, fmt->format.height); + imx258_update_pad_format(mode, fmt); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + *framefmt = fmt->format; + } else { + imx258->cur_mode = mode; + __v4l2_ctrl_s_ctrl(imx258->link_freq, mode->link_freq_index); + + link_freq = link_freq_menu_items[mode->link_freq_index]; + pixel_rate = link_freq_to_pixel_rate(link_freq); + __v4l2_ctrl_s_ctrl_int64(imx258->pixel_rate, pixel_rate); + /* Update limits and set FPS to default */ + vblank_def = imx258->cur_mode->vts_def - + imx258->cur_mode->height; + vblank_min = imx258->cur_mode->vts_min - + imx258->cur_mode->height; + __v4l2_ctrl_modify_range( + imx258->vblank, vblank_min, + IMX258_VTS_MAX - imx258->cur_mode->height, 1, + vblank_def); + __v4l2_ctrl_s_ctrl(imx258->vblank, vblank_def); + h_blank = + link_freq_configs[mode->link_freq_index].pixels_per_line + - imx258->cur_mode->width; + __v4l2_ctrl_modify_range(imx258->hblank, h_blank, + h_blank, 1, h_blank); + } + + mutex_unlock(&imx258->mutex); + + return 0; +} + +/* Start streaming */ +static int imx258_start_streaming(struct imx258 *imx258) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + const struct imx258_reg_list *reg_list; + int ret, link_freq_index; + + /* Setup PLL */ + link_freq_index = imx258->cur_mode->link_freq_index; + reg_list = &link_freq_configs[link_freq_index].reg_list; + ret = imx258_write_regs(imx258, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return ret; + } + + /* Apply default values of current mode */ + reg_list = &imx258->cur_mode->reg_list; + ret = imx258_write_regs(imx258, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return ret; + } + + /* Set Orientation be 180 degree */ + ret = imx258_write_reg(imx258, REG_MIRROR_FLIP_CONTROL, + IMX258_REG_VALUE_08BIT, REG_CONFIG_MIRROR_FLIP); + if (ret) { + dev_err(&client->dev, "%s failed to set orientation\n", + __func__); + return ret; + } + + /* Apply customized values from user */ + ret = __v4l2_ctrl_handler_setup(imx258->sd.ctrl_handler); + if (ret) + return ret; + + /* set stream on register */ + return imx258_write_reg(imx258, IMX258_REG_MODE_SELECT, + IMX258_REG_VALUE_08BIT, + IMX258_MODE_STREAMING); +} + +/* Stop streaming */ +static int imx258_stop_streaming(struct imx258 *imx258) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + int ret; + + /* set stream off register */ + ret = imx258_write_reg(imx258, IMX258_REG_MODE_SELECT, + IMX258_REG_VALUE_08BIT, IMX258_MODE_STANDBY); + if (ret) + dev_err(&client->dev, "%s failed to set stream\n", __func__); + + /* + * Return success even if it was an error, as there is nothing the + * caller can do about it. + */ + return 0; +} + +static int imx258_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct imx258 *imx258 = to_imx258(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + mutex_lock(&imx258->mutex); + if (imx258->streaming == enable) { + mutex_unlock(&imx258->mutex); + return 0; + } + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto err_unlock; + } + + /* + * Apply default & customized values + * and then start streaming. + */ + ret = imx258_start_streaming(imx258); + if (ret) + goto err_rpm_put; + } else { + imx258_stop_streaming(imx258); + pm_runtime_put(&client->dev); + } + + imx258->streaming = enable; + mutex_unlock(&imx258->mutex); + + return ret; + +err_rpm_put: + pm_runtime_put(&client->dev); +err_unlock: + mutex_unlock(&imx258->mutex); + + return ret; +} + +static int __maybe_unused imx258_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx258 *imx258 = to_imx258(sd); + + if (imx258->streaming) + imx258_stop_streaming(imx258); + + return 0; +} + +static int __maybe_unused imx258_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx258 *imx258 = to_imx258(sd); + int ret; + + if (imx258->streaming) { + ret = imx258_start_streaming(imx258); + if (ret) + goto error; + } + + return 0; + +error: + imx258_stop_streaming(imx258); + imx258->streaming = 0; + return ret; +} + +/* Verify chip ID */ +static int imx258_identify_module(struct imx258 *imx258) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + int ret; + u32 val; + + ret = imx258_read_reg(imx258, IMX258_REG_CHIP_ID, + IMX258_REG_VALUE_16BIT, &val); + if (ret) { + dev_err(&client->dev, "failed to read chip id %x\n", + IMX258_CHIP_ID); + return ret; + } + + if (val != IMX258_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + IMX258_CHIP_ID, val); + return -EIO; + } + + return 0; +} + +static const struct v4l2_subdev_video_ops imx258_video_ops = { + .s_stream = imx258_set_stream, +}; + +static const struct v4l2_subdev_pad_ops imx258_pad_ops = { + .enum_mbus_code = imx258_enum_mbus_code, + .get_fmt = imx258_get_pad_format, + .set_fmt = imx258_set_pad_format, + .enum_frame_size = imx258_enum_frame_size, +}; + +static const struct v4l2_subdev_ops imx258_subdev_ops = { + .video = &imx258_video_ops, + .pad = &imx258_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops imx258_internal_ops = { + .open = imx258_open, +}; + +/* Initialize control handlers */ +static int imx258_init_controls(struct imx258 *imx258) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd); + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 exposure_max; + s64 vblank_def; + s64 vblank_min; + s64 pixel_rate_min; + s64 pixel_rate_max; + int ret; + + ctrl_hdlr = &imx258->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); + if (ret) + return ret; + + mutex_init(&imx258->mutex); + ctrl_hdlr->lock = &imx258->mutex; + imx258->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, + &imx258_ctrl_ops, + V4L2_CID_LINK_FREQ, + ARRAY_SIZE(link_freq_menu_items) - 1, + 0, + link_freq_menu_items); + + if (imx258->link_freq) + imx258->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]); + pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]); + /* By default, PIXEL_RATE is read only */ + imx258->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx258_ctrl_ops, + V4L2_CID_PIXEL_RATE, + pixel_rate_min, pixel_rate_max, + 1, pixel_rate_max); + + + vblank_def = imx258->cur_mode->vts_def - imx258->cur_mode->height; + vblank_min = imx258->cur_mode->vts_min - imx258->cur_mode->height; + imx258->vblank = v4l2_ctrl_new_std( + ctrl_hdlr, &imx258_ctrl_ops, V4L2_CID_VBLANK, + vblank_min, + IMX258_VTS_MAX - imx258->cur_mode->height, 1, + vblank_def); + + if (imx258->vblank) + imx258->vblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + imx258->hblank = v4l2_ctrl_new_std( + ctrl_hdlr, &imx258_ctrl_ops, V4L2_CID_HBLANK, + IMX258_PPL_DEFAULT - imx258->cur_mode->width, + IMX258_PPL_DEFAULT - imx258->cur_mode->width, + 1, + IMX258_PPL_DEFAULT - imx258->cur_mode->width); + + if (imx258->hblank) + imx258->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + exposure_max = imx258->cur_mode->vts_def - 8; + imx258->exposure = v4l2_ctrl_new_std( + ctrl_hdlr, &imx258_ctrl_ops, + V4L2_CID_EXPOSURE, IMX258_EXPOSURE_MIN, + IMX258_EXPOSURE_MAX, IMX258_EXPOSURE_STEP, + IMX258_EXPOSURE_DEFAULT); + + v4l2_ctrl_new_std(ctrl_hdlr, &imx258_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + IMX258_ANA_GAIN_MIN, IMX258_ANA_GAIN_MAX, + IMX258_ANA_GAIN_STEP, IMX258_ANA_GAIN_DEFAULT); + + v4l2_ctrl_new_std(ctrl_hdlr, &imx258_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + IMX258_DGTL_GAIN_MIN, IMX258_DGTL_GAIN_MAX, + IMX258_DGTL_GAIN_STEP, + IMX258_DGTL_GAIN_DEFAULT); + + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx258_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(imx258_test_pattern_menu) - 1, + 0, 0, imx258_test_pattern_menu); + + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + dev_err(&client->dev, "%s control init failed (%d)\n", + __func__, ret); + goto error; + } + + imx258->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +error: + v4l2_ctrl_handler_free(ctrl_hdlr); + mutex_destroy(&imx258->mutex); + + return ret; +} + +static void imx258_free_controls(struct imx258 *imx258) +{ + v4l2_ctrl_handler_free(imx258->sd.ctrl_handler); + mutex_destroy(&imx258->mutex); +} + +static int imx258_probe(struct i2c_client *client) +{ + struct imx258 *imx258; + int ret; + u32 val = 0; + + device_property_read_u32(&client->dev, "clock-frequency", &val); + if (val != 19200000) + return -EINVAL; + + imx258 = devm_kzalloc(&client->dev, sizeof(*imx258), GFP_KERNEL); + if (!imx258) + return -ENOMEM; + + /* Initialize subdev */ + v4l2_i2c_subdev_init(&imx258->sd, client, &imx258_subdev_ops); + + /* Check module identity */ + ret = imx258_identify_module(imx258); + if (ret) + return ret; + + /* Set default mode to max resolution */ + imx258->cur_mode = &supported_modes[0]; + + ret = imx258_init_controls(imx258); + if (ret) + return ret; + + /* Initialize subdev */ + imx258->sd.internal_ops = &imx258_internal_ops; + imx258->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + imx258->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + /* Initialize source pad */ + imx258->pad.flags = MEDIA_PAD_FL_SOURCE; + + ret = media_entity_pads_init(&imx258->sd.entity, 1, &imx258->pad); + if (ret) + goto error_handler_free; + + ret = v4l2_async_register_subdev_sensor_common(&imx258->sd); + if (ret < 0) + goto error_media_entity; + + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_idle(&client->dev); + + return 0; + +error_media_entity: + media_entity_cleanup(&imx258->sd.entity); + +error_handler_free: + imx258_free_controls(imx258); + + return ret; +} + +static int imx258_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx258 *imx258 = to_imx258(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + imx258_free_controls(imx258); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + return 0; +} + +static const struct dev_pm_ops imx258_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx258_suspend, imx258_resume) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id imx258_acpi_ids[] = { + { "SONY258A" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(acpi, imx258_acpi_ids); +#endif + +static struct i2c_driver imx258_i2c_driver = { + .driver = { + .name = "imx258", + .pm = &imx258_pm_ops, + .acpi_match_table = ACPI_PTR(imx258_acpi_ids), + }, + .probe_new = imx258_probe, + .remove = imx258_remove, +}; + +module_i2c_driver(imx258_i2c_driver); + +MODULE_AUTHOR("Yeh, Andy "); +MODULE_AUTHOR("Chiang, Alan "); +MODULE_AUTHOR("Chen, Jason "); +MODULE_DESCRIPTION("Sony IMX258 sensor driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From f12de5e911ef29935abc580618bd9b720832612d Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 6 May 2018 10:19:16 -0400 Subject: media: dt-bindings: ov772x: add device tree binding This adds a device tree binding documentation for OV7720/OV7725 sensor. Cc: Jacopo Mondi Cc: Hans Verkuil Cc: Mauro Carvalho Chehab Cc: Rob Herring Reviewed-by: Rob Herring Reviewed-by: Jacopo Mondi Reviewed-by: Laurent Pinchart Signed-off-by: Akinobu Mita Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- .../devicetree/bindings/media/i2c/ov772x.txt | 40 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 41 insertions(+) create mode 100644 Documentation/devicetree/bindings/media/i2c/ov772x.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/media/i2c/ov772x.txt b/Documentation/devicetree/bindings/media/i2c/ov772x.txt new file mode 100644 index 000000000000..0b3ede5b8e6a --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/ov772x.txt @@ -0,0 +1,40 @@ +* Omnivision OV7720/OV7725 CMOS sensor + +The Omnivision OV7720/OV7725 sensor supports multiple resolutions output, +such as VGA, QVGA, and any size scaling down from CIF to 40x30. It also can +support the YUV422, RGB565/555/444, GRB422 or raw RGB output formats. + +Required Properties: +- compatible: shall be one of + "ovti,ov7720" + "ovti,ov7725" +- clocks: reference to the xclk input clock. + +Optional Properties: +- reset-gpios: reference to the GPIO connected to the RSTB pin which is + active low, if any. +- powerdown-gpios: reference to the GPIO connected to the PWDN pin which is + active high, if any. + +The device node shall contain one 'port' child node with one child 'endpoint' +subnode for its digital output video port, in accordance with the video +interface bindings defined in Documentation/devicetree/bindings/media/ +video-interfaces.txt. + +Example: + +&i2c0 { + ov772x: camera@21 { + compatible = "ovti,ov7725"; + reg = <0x21>; + reset-gpios = <&axi_gpio_0 0 GPIO_ACTIVE_LOW>; + powerdown-gpios = <&axi_gpio_0 1 GPIO_ACTIVE_LOW>; + clocks = <&xclk>; + + port { + ov772x_0: endpoint { + remote-endpoint = <&vcap1_in0>; + }; + }; + }; +}; diff --git a/MAINTAINERS b/MAINTAINERS index d9db405297cd..a38e24a3702e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10364,6 +10364,7 @@ T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/i2c/ov772x.c F: include/media/i2c/ov772x.h +F: Documentation/devicetree/bindings/media/i2c/ov772x.txt OMNIVISION OV7740 SENSOR DRIVER M: Wenyou Yang -- cgit v1.2.3 From 316d55d55f49eca442e4fd948f5fa92bab0c8312 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 28 May 2018 18:21:56 +0900 Subject: Documentation: kconfig: document a new Kconfig macro language Add a document for the macro language introduced to Kconfig. The motivation of this work is to move the compiler option tests to Kconfig from Makefile. A number of kernel features require the compiler support. Enabling such features blindly in Kconfig ends up with a lot of nasty build-time testing in Makefiles. If a chosen feature turns out unsupported by the compiler, what the build system can do is either to disable it (silently!) or to forcibly break the build, despite Kconfig has let the user to enable it. By moving the compiler capability tests to Kconfig, features unsupported by the compiler will be hidden automatically. This change was strongly prompted by Linus Torvalds. You can find his suggestions [1] [2] in ML. The original idea was to add a new attribute with 'option shell=...', but I found more generalized text expansion would make Kconfig more powerful and lovely. The basic ideas are from Make, but there are some differences. [1]: https://lkml.org/lkml/2016/12/9/577 [2]: https://lkml.org/lkml/2018/2/7/527 Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook Reviewed-by: Randy Dunlap --- Documentation/kbuild/kconfig-macro-language.txt | 242 ++++++++++++++++++++++++ MAINTAINERS | 2 +- 2 files changed, 243 insertions(+), 1 deletion(-) create mode 100644 Documentation/kbuild/kconfig-macro-language.txt (limited to 'MAINTAINERS') diff --git a/Documentation/kbuild/kconfig-macro-language.txt b/Documentation/kbuild/kconfig-macro-language.txt new file mode 100644 index 000000000000..07da2ea68dce --- /dev/null +++ b/Documentation/kbuild/kconfig-macro-language.txt @@ -0,0 +1,242 @@ +Concept +------- + +The basic idea was inspired by Make. When we look at Make, we notice sort of +two languages in one. One language describes dependency graphs consisting of +targets and prerequisites. The other is a macro language for performing textual +substitution. + +There is clear distinction between the two language stages. For example, you +can write a makefile like follows: + + APP := foo + SRC := foo.c + CC := gcc + + $(APP): $(SRC) + $(CC) -o $(APP) $(SRC) + +The macro language replaces the variable references with their expanded form, +and handles as if the source file were input like follows: + + foo: foo.c + gcc -o foo foo.c + +Then, Make analyzes the dependency graph and determines the targets to be +updated. + +The idea is quite similar in Kconfig - it is possible to describe a Kconfig +file like this: + + CC := gcc + + config CC_HAS_FOO + def_bool $(shell, $(srctree)/scripts/gcc-check-foo.sh $(CC)) + +The macro language in Kconfig processes the source file into the following +intermediate: + + config CC_HAS_FOO + def_bool y + +Then, Kconfig moves onto the evaluation stage to resolve inter-symbol +dependency as explained in kconfig-language.txt. + + +Variables +--------- + +Like in Make, a variable in Kconfig works as a macro variable. A macro +variable is expanded "in place" to yield a text string that may then be +expanded further. To get the value of a variable, enclose the variable name in +$( ). The parentheses are required even for single-letter variable names; $X is +a syntax error. The curly brace form as in ${CC} is not supported either. + +There are two types of variables: simply expanded variables and recursively +expanded variables. + +A simply expanded variable is defined using the := assignment operator. Its +righthand side is expanded immediately upon reading the line from the Kconfig +file. + +A recursively expanded variable is defined using the = assignment operator. +Its righthand side is simply stored as the value of the variable without +expanding it in any way. Instead, the expansion is performed when the variable +is used. + +There is another type of assignment operator; += is used to append text to a +variable. The righthand side of += is expanded immediately if the lefthand +side was originally defined as a simple variable. Otherwise, its evaluation is +deferred. + +The variable reference can take parameters, in the following form: + + $(name,arg1,arg2,arg3) + +You can consider the parameterized reference as a function. (more precisely, +"user-defined function" in contrast to "built-in function" listed below). + +Useful functions must be expanded when they are used since the same function is +expanded differently if different parameters are passed. Hence, a user-defined +function is defined using the = assignment operator. The parameters are +referenced within the body definition with $(1), $(2), etc. + +In fact, recursively expanded variables and user-defined functions are the same +internally. (In other words, "variable" is "function with zero argument".) +When we say "variable" in a broad sense, it includes "user-defined function". + + +Built-in functions +------------------ + +Like Make, Kconfig provides several built-in functions. Every function takes a +particular number of arguments. + +In Make, every built-in function takes at least one argument. Kconfig allows +zero argument for built-in functions, such as $(fileno), $(lineno). You could +consider those as "built-in variable", but it is just a matter of how we call +it after all. Let's say "built-in function" here to refer to natively supported +functionality. + +Kconfig currently supports the following built-in functions. + + - $(shell,command) + + The "shell" function accepts a single argument that is expanded and passed + to a subshell for execution. The standard output of the command is then read + and returned as the value of the function. Every newline in the output is + replaced with a space. Any trailing newlines are deleted. The standard error + is not returned, nor is any program exit status. + + - $(info,text) + + The "info" function takes a single argument and prints it to stdout. + It evaluates to an empty string. + + - $(warning-if,condition,text) + + The "warning-if" function takes two arguments. If the condition part is "y", + the text part is sent to stderr. The text is prefixed with the name of the + current Kconfig file and the current line number. + + - $(error-if,condition,text) + + The "error-if" function is similar to "warning-if", but it terminates the + parsing immediately if the condition part is "y". + + - $(filename) + + The 'filename' takes no argument, and $(filename) is expanded to the file + name being parsed. + + - $(lineno) + + The 'lineno' takes no argument, and $(lineno) is expanded to the line number + being parsed. + + +Make vs Kconfig +--------------- + +Kconfig adopts Make-like macro language, but the function call syntax is +slightly different. + +A function call in Make looks like this: + + $(func-name arg1,arg2,arg3) + +The function name and the first argument are separated by at least one +whitespace. Then, leading whitespaces are trimmed from the first argument, +while whitespaces in the other arguments are kept. You need to use a kind of +trick to start the first parameter with spaces. For example, if you want +to make "info" function print " hello", you can write like follows: + + empty := + space := $(empty) $(empty) + $(info $(space)$(space)hello) + +Kconfig uses only commas for delimiters, and keeps all whitespaces in the +function call. Some people prefer putting a space after each comma delimiter: + + $(func-name, arg1, arg2, arg3) + +In this case, "func-name" will receive " arg1", " arg2", " arg3". The presence +of leading spaces may matter depending on the function. The same applies to +Make - for example, $(subst .c, .o, $(sources)) is a typical mistake; it +replaces ".c" with " .o". + +In Make, a user-defined function is referenced by using a built-in function, +'call', like this: + + $(call my-func,arg1,arg2,arg3) + +Kconfig invokes user-defined functions and built-in functions in the same way. +The omission of 'call' makes the syntax shorter. + +In Make, some functions treat commas verbatim instead of argument separators. +For example, $(shell echo hello, world) runs the command "echo hello, world". +Likewise, $(info hello, world) prints "hello, world" to stdout. You could say +this is _useful_ inconsistency. + +In Kconfig, for simpler implementation and grammatical consistency, commas that +appear in the $( ) context are always delimiters. It means + + $(shell, echo hello, world) + +is an error because it is passing two parameters where the 'shell' function +accepts only one. To pass commas in arguments, you can use the following trick: + + comma := , + $(shell, echo hello$(comma) world) + + +Caveats +------- + +A variable (or function) cannot be expanded across tokens. So, you cannot use +a variable as a shorthand for an expression that consists of multiple tokens. +The following works: + + RANGE_MIN := 1 + RANGE_MAX := 3 + + config FOO + int "foo" + range $(RANGE_MIN) $(RANGE_MAX) + +But, the following does not work: + + RANGES := 1 3 + + config FOO + int "foo" + range $(RANGES) + +A variable cannot be expanded to any keyword in Kconfig. The following does +not work: + + MY_TYPE := tristate + + config FOO + $(MY_TYPE) "foo" + default y + +Obviously from the design, $(shell command) is expanded in the textual +substitution phase. You cannot pass symbols to the 'shell' function. +The following does not work as expected. + + config ENDIAN_FLAG + string + default "-mbig-endian" if CPU_BIG_ENDIAN + default "-mlittle-endian" if CPU_LITTLE_ENDIAN + + config CC_HAS_ENDIAN_FLAG + def_bool $(shell $(srctree)/scripts/gcc-check-flag ENDIAN_FLAG) + +Instead, you can do like follows so that any function call is statically +expanded. + + config CC_HAS_ENDIAN_FLAG + bool + default $(shell $(srctree)/scripts/gcc-check-flag -mbig-endian) if CPU_BIG_ENDIAN + default $(shell $(srctree)/scripts/gcc-check-flag -mlittle-endian) if CPU_LITTLE_ENDIAN diff --git a/MAINTAINERS b/MAINTAINERS index ca4afd68530c..b87723a6ef32 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7637,7 +7637,7 @@ M: Masahiro Yamada T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig L: linux-kbuild@vger.kernel.org S: Maintained -F: Documentation/kbuild/kconfig-language.txt +F: Documentation/kbuild/kconfig* F: scripts/kconfig/ KDUMP -- cgit v1.2.3 From e1cfdc0e72fc9ad7c04ad6329acb92876e062849 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 28 May 2018 18:21:59 +0900 Subject: kconfig: add basic helper macros to scripts/Kconfig.include Kconfig got text processing tools like we see in Make. Add Kconfig helper macros to scripts/Kconfig.include like we collect Makefile macros in scripts/Kbuild.include. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook Reviewed-by: Ulf Magnusson --- Kconfig | 2 ++ MAINTAINERS | 1 + arch/x86/um/Kconfig | 2 ++ scripts/Kconfig.include | 27 +++++++++++++++++++++++++++ 4 files changed, 32 insertions(+) create mode 100644 scripts/Kconfig.include (limited to 'MAINTAINERS') diff --git a/Kconfig b/Kconfig index 5b55d876af03..a90d9f9e268b 100644 --- a/Kconfig +++ b/Kconfig @@ -7,4 +7,6 @@ mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration" comment "Compiler: $(CC_VERSION_TEXT)" +source "scripts/Kconfig.include" + source "arch/$(SRCARCH)/Kconfig" diff --git a/MAINTAINERS b/MAINTAINERS index b87723a6ef32..79decb12cfbf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7639,6 +7639,7 @@ L: linux-kbuild@vger.kernel.org S: Maintained F: Documentation/kbuild/kconfig* F: scripts/kconfig/ +F: scripts/Kconfig.include KDUMP M: Dave Young diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index a992f8e94887..9d529f22fd9d 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -3,6 +3,8 @@ mainmenu "User Mode Linux/$(SUBARCH) $(KERNELVERSION) Kernel Configuration" comment "Compiler: $(CC_VERSION_TEXT)" +source "scripts/Kconfig.include" + source "arch/um/Kconfig.common" menu "UML-specific options" diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include new file mode 100644 index 000000000000..bf7c0c9fa3a4 --- /dev/null +++ b/scripts/Kconfig.include @@ -0,0 +1,27 @@ +# Kconfig helper macros + +# Convenient variables +comma := , +quote := " +squote := ' +empty := +space := $(empty) $(empty) +dollar := $ +right_paren := ) +left_paren := ( + +# $(if-success,,,) +# Return if exits with 0, otherwise. +if-success = $(shell,{ $(1); } >/dev/null 2>&1 && echo "$(2)" || echo "$(3)") + +# $(success,) +# Return y if exits with 0, n otherwise +success = $(if-success,$(1),y,n) + +# $(cc-option,) +# Return y if the compiler supports , n otherwise +cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null) + +# $(ld-option,) +# Return y if the linker supports , n otherwise +ld-option = $(success,$(LD) -v $(1)) -- cgit v1.2.3 From 30c8bd5aa8b2c78546c3e52337101b9c85879320 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 24 May 2018 09:55:13 -0700 Subject: net: Introduce generic failover module The failover module provides a generic interface for paravirtual drivers to register a netdev and a set of ops with a failover instance. The ops are used as event handlers that get called to handle netdev register/ unregister/link change/name change events on slave pci ethernet devices with the same mac address as the failover netdev. This enables paravirtual drivers to use a VF as an accelerated low latency datapath. It also allows migration of VMs with direct attached VFs by failing over to the paravirtual datapath when the VF is unplugged. Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- Documentation/networking/failover.rst | 18 ++ MAINTAINERS | 8 + include/linux/netdevice.h | 16 ++ include/net/failover.h | 36 ++++ net/Kconfig | 13 ++ net/core/Makefile | 1 + net/core/failover.c | 315 ++++++++++++++++++++++++++++++++++ 7 files changed, 407 insertions(+) create mode 100644 Documentation/networking/failover.rst create mode 100644 include/net/failover.h create mode 100644 net/core/failover.c (limited to 'MAINTAINERS') diff --git a/Documentation/networking/failover.rst b/Documentation/networking/failover.rst new file mode 100644 index 000000000000..f0c8483cdbf5 --- /dev/null +++ b/Documentation/networking/failover.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======== +FAILOVER +======== + +Overview +======== + +The failover module provides a generic interface for paravirtual drivers +to register a netdev and a set of ops with a failover instance. The ops +are used as event handlers that get called to handle netdev register/ +unregister/link change/name change events on slave pci ethernet devices +with the same mac address as the failover netdev. + +This enables paravirtual drivers to use a VF as an accelerated low latency +datapath. It also allows live migration of VMs with direct attached VFs by +failing over to the paravirtual datapath when the VF is unplugged. diff --git a/MAINTAINERS b/MAINTAINERS index f492431b239b..6c59bdf49a8a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5411,6 +5411,14 @@ S: Maintained F: Documentation/hwmon/f71805f F: drivers/hwmon/f71805f.c +FAILOVER MODULE +M: Sridhar Samudrala +L: netdev@vger.kernel.org +S: Supported +F: net/core/failover.c +F: include/net/failover.h +F: Documentation/networking/failover.rst + FANOTIFY M: Jan Kara R: Amir Goldstein diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8452f72087ef..f45b1a4e37ab 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1425,6 +1425,8 @@ struct net_device_ops { * entity (i.e. the master device for bridged veth) * @IFF_MACSEC: device is a MACsec device * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook + * @IFF_FAILOVER: device is a failover master device + * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1454,6 +1456,8 @@ enum netdev_priv_flags { IFF_PHONY_HEADROOM = 1<<24, IFF_MACSEC = 1<<25, IFF_NO_RX_HANDLER = 1<<26, + IFF_FAILOVER = 1<<27, + IFF_FAILOVER_SLAVE = 1<<28, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1482,6 +1486,8 @@ enum netdev_priv_flags { #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED #define IFF_MACSEC IFF_MACSEC #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER +#define IFF_FAILOVER IFF_FAILOVER +#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE /** * struct net_device - The DEVICE structure. @@ -4336,6 +4342,16 @@ static inline bool netif_is_rxfh_configured(const struct net_device *dev) return dev->priv_flags & IFF_RXFH_CONFIGURED; } +static inline bool netif_is_failover(const struct net_device *dev) +{ + return dev->priv_flags & IFF_FAILOVER; +} + +static inline bool netif_is_failover_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_FAILOVER_SLAVE; +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/net/failover.h b/include/net/failover.h new file mode 100644 index 000000000000..bb15438f39c7 --- /dev/null +++ b/include/net/failover.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _FAILOVER_H +#define _FAILOVER_H + +#include + +struct failover_ops { + int (*slave_pre_register)(struct net_device *slave_dev, + struct net_device *failover_dev); + int (*slave_register)(struct net_device *slave_dev, + struct net_device *failover_dev); + int (*slave_pre_unregister)(struct net_device *slave_dev, + struct net_device *failover_dev); + int (*slave_unregister)(struct net_device *slave_dev, + struct net_device *failover_dev); + int (*slave_link_change)(struct net_device *slave_dev, + struct net_device *failover_dev); + int (*slave_name_change)(struct net_device *slave_dev, + struct net_device *failover_dev); + rx_handler_result_t (*slave_handle_frame)(struct sk_buff **pskb); +}; + +struct failover { + struct list_head list; + struct net_device __rcu *failover_dev; + struct failover_ops __rcu *ops; +}; + +struct failover *failover_register(struct net_device *dev, + struct failover_ops *ops); +void failover_unregister(struct failover *failover); +int failover_slave_unregister(struct net_device *slave_dev); + +#endif /* _FAILOVER_H */ diff --git a/net/Kconfig b/net/Kconfig index ba554cedb615..f738a6f27665 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -432,6 +432,19 @@ config MAY_USE_DEVLINK config PAGE_POOL bool +config FAILOVER + tristate "Generic failover module" + help + The failover module provides a generic interface for paravirtual + drivers to register a netdev and a set of ops with a failover + instance. The ops are used as event handlers that get called to + handle netdev register/unregister/link change/name change events + on slave pci ethernet devices with the same mac address as the + failover netdev. This enables paravirtual drivers to use a + VF as an accelerated low latency datapath. It also allows live + migration of VMs with direct attached VFs by failing over to the + paravirtual datapath when the VF is unplugged. + endif # if NET # Used by archs to tell that they support BPF JIT compiler plus which flavour. diff --git a/net/core/Makefile b/net/core/Makefile index 7080417f8bc8..80175e6a2eb8 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -31,3 +31,4 @@ obj-$(CONFIG_DST_CACHE) += dst_cache.o obj-$(CONFIG_HWBM) += hwbm.o obj-$(CONFIG_NET_DEVLINK) += devlink.o obj-$(CONFIG_GRO_CELLS) += gro_cells.o +obj-$(CONFIG_FAILOVER) += failover.o diff --git a/net/core/failover.c b/net/core/failover.c new file mode 100644 index 000000000000..4a92a98ccce9 --- /dev/null +++ b/net/core/failover.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* A common module to handle registrations and notifications for paravirtual + * drivers to enable accelerated datapath and support VF live migration. + * + * The notifier and event handling code is based on netvsc driver. + */ + +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(failover_list); +static DEFINE_SPINLOCK(failover_lock); + +static struct net_device *failover_get_bymac(u8 *mac, struct failover_ops **ops) +{ + struct net_device *failover_dev; + struct failover *failover; + + spin_lock(&failover_lock); + list_for_each_entry(failover, &failover_list, list) { + failover_dev = rtnl_dereference(failover->failover_dev); + if (ether_addr_equal(failover_dev->perm_addr, mac)) { + *ops = rtnl_dereference(failover->ops); + spin_unlock(&failover_lock); + return failover_dev; + } + } + spin_unlock(&failover_lock); + return NULL; +} + +/** + * failover_slave_register - Register a slave netdev + * + * @slave_dev: slave netdev that is being registered + * + * Registers a slave device to a failover instance. Only ethernet devices + * are supported. + */ +static int failover_slave_register(struct net_device *slave_dev) +{ + struct netdev_lag_upper_info lag_upper_info; + struct net_device *failover_dev; + struct failover_ops *fops; + int err; + + if (slave_dev->type != ARPHRD_ETHER) + goto done; + + ASSERT_RTNL(); + + failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); + if (!failover_dev) + goto done; + + if (fops && fops->slave_pre_register && + fops->slave_pre_register(slave_dev, failover_dev)) + goto done; + + err = netdev_rx_handler_register(slave_dev, fops->slave_handle_frame, + failover_dev); + if (err) { + netdev_err(slave_dev, "can not register failover rx handler (err = %d)\n", + err); + goto done; + } + + lag_upper_info.tx_type = NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + err = netdev_master_upper_dev_link(slave_dev, failover_dev, NULL, + &lag_upper_info, NULL); + if (err) { + netdev_err(slave_dev, "can not set failover device %s (err = %d)\n", + failover_dev->name, err); + goto err_upper_link; + } + + slave_dev->priv_flags |= IFF_FAILOVER_SLAVE; + + if (fops && fops->slave_register && + !fops->slave_register(slave_dev, failover_dev)) + return NOTIFY_OK; + + netdev_upper_dev_unlink(slave_dev, failover_dev); + slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; +err_upper_link: + netdev_rx_handler_unregister(slave_dev); +done: + return NOTIFY_DONE; +} + +/** + * failover_slave_unregister - Unregister a slave netdev + * + * @slave_dev: slave netdev that is being unregistered + * + * Unregisters a slave device from a failover instance. + */ +int failover_slave_unregister(struct net_device *slave_dev) +{ + struct net_device *failover_dev; + struct failover_ops *fops; + + if (!netif_is_failover_slave(slave_dev)) + goto done; + + ASSERT_RTNL(); + + failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); + if (!failover_dev) + goto done; + + if (fops && fops->slave_pre_unregister && + fops->slave_pre_unregister(slave_dev, failover_dev)) + goto done; + + netdev_rx_handler_unregister(slave_dev); + netdev_upper_dev_unlink(slave_dev, failover_dev); + slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; + + if (fops && fops->slave_unregister && + !fops->slave_unregister(slave_dev, failover_dev)) + return NOTIFY_OK; + +done: + return NOTIFY_DONE; +} +EXPORT_SYMBOL_GPL(failover_slave_unregister); + +static int failover_slave_link_change(struct net_device *slave_dev) +{ + struct net_device *failover_dev; + struct failover_ops *fops; + + if (!netif_is_failover_slave(slave_dev)) + goto done; + + ASSERT_RTNL(); + + failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); + if (!failover_dev) + goto done; + + if (!netif_running(failover_dev)) + goto done; + + if (fops && fops->slave_link_change && + !fops->slave_link_change(slave_dev, failover_dev)) + return NOTIFY_OK; + +done: + return NOTIFY_DONE; +} + +static int failover_slave_name_change(struct net_device *slave_dev) +{ + struct net_device *failover_dev; + struct failover_ops *fops; + + if (!netif_is_failover_slave(slave_dev)) + goto done; + + ASSERT_RTNL(); + + failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); + if (!failover_dev) + goto done; + + if (!netif_running(failover_dev)) + goto done; + + if (fops && fops->slave_name_change && + !fops->slave_name_change(slave_dev, failover_dev)) + return NOTIFY_OK; + +done: + return NOTIFY_DONE; +} + +static int +failover_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + + /* Skip parent events */ + if (netif_is_failover(event_dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_REGISTER: + return failover_slave_register(event_dev); + case NETDEV_UNREGISTER: + return failover_slave_unregister(event_dev); + case NETDEV_UP: + case NETDEV_DOWN: + case NETDEV_CHANGE: + return failover_slave_link_change(event_dev); + case NETDEV_CHANGENAME: + return failover_slave_name_change(event_dev); + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block failover_notifier = { + .notifier_call = failover_event, +}; + +static void +failover_existing_slave_register(struct net_device *failover_dev) +{ + struct net *net = dev_net(failover_dev); + struct net_device *dev; + + rtnl_lock(); + for_each_netdev(net, dev) { + if (netif_is_failover(dev)) + continue; + if (ether_addr_equal(failover_dev->perm_addr, dev->perm_addr)) + failover_slave_register(dev); + } + rtnl_unlock(); +} + +/** + * failover_register - Register a failover instance + * + * @dev: failover netdev + * @ops: failover ops + * + * Allocate and register a failover instance for a failover netdev. ops + * provides handlers for slave device register/unregister/link change/ + * name change events. + * + * Return: pointer to failover instance + */ +struct failover *failover_register(struct net_device *dev, + struct failover_ops *ops) +{ + struct failover *failover; + + if (dev->type != ARPHRD_ETHER) + return ERR_PTR(-EINVAL); + + failover = kzalloc(sizeof(*failover), GFP_KERNEL); + if (!failover) + return ERR_PTR(-ENOMEM); + + rcu_assign_pointer(failover->ops, ops); + dev_hold(dev); + dev->priv_flags |= IFF_FAILOVER; + rcu_assign_pointer(failover->failover_dev, dev); + + spin_lock(&failover_lock); + list_add_tail(&failover->list, &failover_list); + spin_unlock(&failover_lock); + + netdev_info(dev, "failover master:%s registered\n", dev->name); + + failover_existing_slave_register(dev); + + return failover; +} +EXPORT_SYMBOL_GPL(failover_register); + +/** + * failover_unregister - Unregister a failover instance + * + * @failover: pointer to failover instance + * + * Unregisters and frees a failover instance. + */ +void failover_unregister(struct failover *failover) +{ + struct net_device *failover_dev; + + failover_dev = rcu_dereference(failover->failover_dev); + + netdev_info(failover_dev, "failover master:%s unregistered\n", + failover_dev->name); + + failover_dev->priv_flags &= ~IFF_FAILOVER; + dev_put(failover_dev); + + spin_lock(&failover_lock); + list_del(&failover->list); + spin_unlock(&failover_lock); + + kfree(failover); +} +EXPORT_SYMBOL_GPL(failover_unregister); + +static __init int +failover_init(void) +{ + register_netdevice_notifier(&failover_notifier); + + return 0; +} +module_init(failover_init); + +static __exit +void failover_exit(void) +{ + unregister_netdevice_notifier(&failover_notifier); +} +module_exit(failover_exit); + +MODULE_DESCRIPTION("Generic failover infrastructure/interface"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From cfc80d9a11635404a40199a1c9471c96890f3f74 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 24 May 2018 09:55:15 -0700 Subject: net: Introduce net_failover driver The net_failover driver provides an automated failover mechanism via APIs to create and destroy a failover master netdev and manages a primary and standby slave netdevs that get registered via the generic failover infrastructure. The failover netdev acts a master device and controls 2 slave devices. The original paravirtual interface gets registered as 'standby' slave netdev and a passthru/vf device with the same MAC gets registered as 'primary' slave netdev. Both 'standby' and 'failover' netdevs are associated with the same 'pci' device. The user accesses the network interface via 'failover' netdev. The 'failover' netdev chooses 'primary' netdev as default for transmits when it is available with link up and running. This can be used by paravirtual drivers to enable an alternate low latency datapath. It also enables hypervisor controlled live migration of a VM with direct attached VF by failing over to the paravirtual datapath when the VF is unplugged. Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- Documentation/networking/net_failover.rst | 26 + MAINTAINERS | 8 + drivers/net/Kconfig | 12 + drivers/net/Makefile | 1 + drivers/net/net_failover.c | 836 ++++++++++++++++++++++++++++++ include/net/net_failover.h | 40 ++ 6 files changed, 923 insertions(+) create mode 100644 Documentation/networking/net_failover.rst create mode 100644 drivers/net/net_failover.c create mode 100644 include/net/net_failover.h (limited to 'MAINTAINERS') diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst new file mode 100644 index 000000000000..d4513ad31809 --- /dev/null +++ b/Documentation/networking/net_failover.rst @@ -0,0 +1,26 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============ +NET_FAILOVER +============ + +Overview +======== + +The net_failover driver provides an automated failover mechanism via APIs +to create and destroy a failover master netdev and mananges a primary and +standby slave netdevs that get registered via the generic failover +infrastructrure. + +The failover netdev acts a master device and controls 2 slave devices. The +original paravirtual interface is registered as 'standby' slave netdev and +a passthru/vf device with the same MAC gets registered as 'primary' slave +netdev. Both 'standby' and 'failover' netdevs are associated with the same +'pci' device. The user accesses the network interface via 'failover' netdev. +The 'failover' netdev chooses 'primary' netdev as default for transmits when +it is available with link up and running. + +This can be used by paravirtual drivers to enable an alternate low latency +datapath. It also enables hypervisor controlled live migration of a VM with +direct attached VF by failing over to the paravirtual datapath when the VF +is unplugged. diff --git a/MAINTAINERS b/MAINTAINERS index 6c59bdf49a8a..1831ff5863a1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9654,6 +9654,14 @@ S: Maintained F: Documentation/hwmon/nct6775 F: drivers/hwmon/nct6775.c +NET_FAILOVER MODULE +M: Sridhar Samudrala +L: netdev@vger.kernel.org +S: Supported +F: driver/net/net_failover.c +F: include/net/net_failover.h +F: Documentation/networking/net_failover.rst + NETEFFECT IWARP RNIC DRIVER (IW_NES) M: Faisal Latif L: linux-rdma@vger.kernel.org diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a029b27fd002..2cdaff90a9ec 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -510,4 +510,16 @@ config NETDEVSIM To compile this driver as a module, choose M here: the module will be called netdevsim. +config NET_FAILOVER + tristate "Failover driver" + select FAILOVER + help + This provides an automated failover mechanism via APIs to create + and destroy a failover master netdev and manages a primary and + standby slave netdevs that get registered via the generic failover + infrastructure. This can be used by paravirtual drivers to enable + an alternate low latency datapath. It alsoenables live migration of + a VM with direct attached VF by failing over to the paravirtual + datapath when the VF is unplugged. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 91e67e375dd4..21cde7e78621 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/ thunderbolt-net-y += thunderbolt.o obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o obj-$(CONFIG_NETDEVSIM) += netdevsim/ +obj-$(CONFIG_NET_FAILOVER) += net_failover.o diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c new file mode 100644 index 000000000000..8b508e2cf29b --- /dev/null +++ b/drivers/net/net_failover.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* This provides a net_failover interface for paravirtual drivers to + * provide an alternate datapath by exporting APIs to create and + * destroy a upper 'net_failover' netdev. The upper dev manages the + * original paravirtual interface as a 'standby' netdev and uses the + * generic failover infrastructure to register and manage a direct + * attached VF as a 'primary' netdev. This enables live migration of + * a VM with direct attached VF by failing over to the paravirtual + * datapath when the VF is unplugged. + * + * Some of the netdev management routines are based on bond/team driver as + * this driver provides active-backup functionality similar to those drivers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool net_failover_xmit_ready(struct net_device *dev) +{ + return netif_running(dev) && netif_carrier_ok(dev); +} + +static int net_failover_open(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int err; + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + if (primary_dev) { + err = dev_open(primary_dev); + if (err) + goto err_primary_open; + } + + standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (standby_dev) { + err = dev_open(standby_dev); + if (err) + goto err_standby_open; + } + + if ((primary_dev && net_failover_xmit_ready(primary_dev)) || + (standby_dev && net_failover_xmit_ready(standby_dev))) { + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } + + return 0; + +err_standby_open: + dev_close(primary_dev); +err_primary_open: + netif_tx_disable(dev); + return err; +} + +static int net_failover_close(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + netif_tx_disable(dev); + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (slave_dev) + dev_close(slave_dev); + + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (slave_dev) + dev_close(slave_dev); + + return 0; +} + +static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *xmit_dev; + + /* Try xmit via primary netdev followed by standby netdev */ + xmit_dev = rcu_dereference_bh(nfo_info->primary_dev); + if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) { + xmit_dev = rcu_dereference_bh(nfo_info->standby_dev); + if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) + return net_failover_drop_xmit(skb, dev); + } + + skb->dev = xmit_dev; + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + + return dev_queue_xmit(skb); +} + +static u16 net_failover_select_queue(struct net_device *dev, + struct sk_buff *skb, void *accel_priv, + select_queue_fallback_t fallback) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev; + u16 txq; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + const struct net_device_ops *ops = primary_dev->netdev_ops; + + if (ops->ndo_select_queue) + txq = ops->ndo_select_queue(primary_dev, skb, + accel_priv, fallback); + else + txq = fallback(primary_dev, skb); + + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + return txq; + } + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + + /* Save the original txq to restore before passing to the driver */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + if (unlikely(txq >= dev->real_num_tx_queues)) { + do { + txq -= dev->real_num_tx_queues; + } while (txq >= dev->real_num_tx_queues); + } + + return txq; +} + +/* fold stats, assuming all rtnl_link_stats64 fields are u64, but + * that some drivers can provide 32bit values only. + */ +static void net_failover_fold_stats(struct rtnl_link_stats64 *_res, + const struct rtnl_link_stats64 *_new, + const struct rtnl_link_stats64 *_old) +{ + const u64 *new = (const u64 *)_new; + const u64 *old = (const u64 *)_old; + u64 *res = (u64 *)_res; + int i; + + for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { + u64 nv = new[i]; + u64 ov = old[i]; + s64 delta = nv - ov; + + /* detects if this particular field is 32bit only */ + if (((nv | ov) >> 32) == 0) + delta = (s64)(s32)((u32)nv - (u32)ov); + + /* filter anomalies, some drivers reset their stats + * at down/up events. + */ + if (delta > 0) + res[i] += delta; + } +} + +static void net_failover_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + const struct rtnl_link_stats64 *new; + struct rtnl_link_stats64 temp; + struct net_device *slave_dev; + + spin_lock(&nfo_info->stats_lock); + memcpy(stats, &nfo_info->failover_stats, sizeof(*stats)); + + rcu_read_lock(); + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) { + new = dev_get_stats(slave_dev, &temp); + net_failover_fold_stats(stats, new, &nfo_info->primary_stats); + memcpy(&nfo_info->primary_stats, new, sizeof(*new)); + } + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) { + new = dev_get_stats(slave_dev, &temp); + net_failover_fold_stats(stats, new, &nfo_info->standby_stats); + memcpy(&nfo_info->standby_stats, new, sizeof(*new)); + } + + rcu_read_unlock(); + + memcpy(&nfo_info->failover_stats, stats, sizeof(*stats)); + spin_unlock(&nfo_info->stats_lock); +} + +static int net_failover_change_mtu(struct net_device *dev, int new_mtu) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int ret = 0; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + ret = dev_set_mtu(primary_dev, new_mtu); + if (ret) + return ret; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + ret = dev_set_mtu(standby_dev, new_mtu); + if (ret) { + if (primary_dev) + dev_set_mtu(primary_dev, dev->mtu); + return ret; + } + } + + dev->mtu = new_mtu; + + return 0; +} + +static void net_failover_set_rx_mode(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + rcu_read_lock(); + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) { + dev_uc_sync_multiple(slave_dev, dev); + dev_mc_sync_multiple(slave_dev, dev); + } + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) { + dev_uc_sync_multiple(slave_dev, dev); + dev_mc_sync_multiple(slave_dev, dev); + } + + rcu_read_unlock(); +} + +static int net_failover_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int ret = 0; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + ret = vlan_vid_add(primary_dev, proto, vid); + if (ret) + return ret; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + ret = vlan_vid_add(standby_dev, proto, vid); + if (ret) + if (primary_dev) + vlan_vid_del(primary_dev, proto, vid); + } + + return ret; +} + +static int net_failover_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) + vlan_vid_del(slave_dev, proto, vid); + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) + vlan_vid_del(slave_dev, proto, vid); + + return 0; +} + +static const struct net_device_ops failover_dev_ops = { + .ndo_open = net_failover_open, + .ndo_stop = net_failover_close, + .ndo_start_xmit = net_failover_start_xmit, + .ndo_select_queue = net_failover_select_queue, + .ndo_get_stats64 = net_failover_get_stats, + .ndo_change_mtu = net_failover_change_mtu, + .ndo_set_rx_mode = net_failover_set_rx_mode, + .ndo_vlan_rx_add_vid = net_failover_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = net_failover_vlan_rx_kill_vid, + .ndo_validate_addr = eth_validate_addr, + .ndo_features_check = passthru_features_check, +}; + +#define FAILOVER_NAME "net_failover" +#define FAILOVER_VERSION "0.1" + +static void nfo_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version)); +} + +static int nfo_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + cmd->base.speed = SPEED_UNKNOWN; + + return 0; + } + } + + return __ethtool_get_link_ksettings(slave_dev, cmd); +} + +static const struct ethtool_ops failover_ethtool_ops = { + .get_drvinfo = nfo_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = nfo_ethtool_get_link_ksettings, +}; + +/* Called when slave dev is injecting data into network stack. + * Change the associated network device from lower dev to failover dev. + * note: already called with rcu_read_lock + */ +static rx_handler_result_t net_failover_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *dev = rcu_dereference(skb->dev->rx_handler_data); + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + standby_dev = rcu_dereference(nfo_info->standby_dev); + + if (primary_dev && skb->dev == standby_dev) + return RX_HANDLER_EXACT; + + skb->dev = dev; + + return RX_HANDLER_ANOTHER; +} + +static void net_failover_compute_features(struct net_device *dev) +{ + u32 vlan_features = FAILOVER_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t enc_features = FAILOVER_ENC_FEATURES; + unsigned short max_hard_header_len = ETH_HLEN; + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + vlan_features = + netdev_increment_features(vlan_features, + primary_dev->vlan_features, + FAILOVER_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + primary_dev->hw_enc_features, + FAILOVER_ENC_FEATURES); + + dst_release_flag &= primary_dev->priv_flags; + if (primary_dev->hard_header_len > max_hard_header_len) + max_hard_header_len = primary_dev->hard_header_len; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + vlan_features = + netdev_increment_features(vlan_features, + standby_dev->vlan_features, + FAILOVER_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + standby_dev->hw_enc_features, + FAILOVER_ENC_FEATURES); + + dst_release_flag &= standby_dev->priv_flags; + if (standby_dev->hard_header_len > max_hard_header_len) + max_hard_header_len = standby_dev->hard_header_len; + } + + dev->vlan_features = vlan_features; + dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + dev->hard_header_len = max_hard_header_len; + + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if (dst_release_flag == (IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM)) + dev->priv_flags |= IFF_XMIT_DST_RELEASE; + + netdev_change_features(dev); +} + +static void net_failover_lower_state_changed(struct net_device *slave_dev, + struct net_device *primary_dev, + struct net_device *standby_dev) +{ + struct netdev_lag_lower_state_info info; + + if (netif_carrier_ok(slave_dev)) + info.link_up = true; + else + info.link_up = false; + + if (slave_dev == primary_dev) { + if (netif_running(primary_dev)) + info.tx_enabled = true; + else + info.tx_enabled = false; + } else { + if ((primary_dev && netif_running(primary_dev)) || + (!netif_running(standby_dev))) + info.tx_enabled = false; + else + info.tx_enabled = true; + } + + netdev_lower_state_changed(slave_dev, &info); +} + +static int net_failover_slave_pre_register(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + + nfo_info = netdev_priv(failover_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + if (slave_is_standby ? standby_dev : primary_dev) { + netdev_err(failover_dev, "%s attempting to register as slave dev when %s already present\n", + slave_dev->name, + slave_is_standby ? "standby" : "primary"); + return -EINVAL; + } + + /* We want to allow only a direct attached VF device as a primary + * netdev. As there is no easy way to check for a VF device, restrict + * this to a pci device. + */ + if (!slave_is_standby && (!slave_dev->dev.parent || + !dev_is_pci(slave_dev->dev.parent))) + return -EINVAL; + + if (failover_dev->features & NETIF_F_VLAN_CHALLENGED && + vlan_uses_dev(failover_dev)) { + netdev_err(failover_dev, "Device %s is VLAN challenged and failover device has VLAN set up\n", + failover_dev->name); + return -EINVAL; + } + + return 0; +} + +static int net_failover_slave_register(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + u32 orig_mtu; + int err; + + /* Align MTU of slave with failover dev */ + orig_mtu = slave_dev->mtu; + err = dev_set_mtu(slave_dev, failover_dev->mtu); + if (err) { + netdev_err(failover_dev, "unable to change mtu of %s to %u register failed\n", + slave_dev->name, failover_dev->mtu); + goto done; + } + + dev_hold(slave_dev); + + if (netif_running(failover_dev)) { + err = dev_open(slave_dev); + if (err && (err != -EBUSY)) { + netdev_err(failover_dev, "Opening slave %s failed err:%d\n", + slave_dev->name, err); + goto err_dev_open; + } + } + + netif_addr_lock_bh(failover_dev); + dev_uc_sync_multiple(slave_dev, failover_dev); + dev_uc_sync_multiple(slave_dev, failover_dev); + netif_addr_unlock_bh(failover_dev); + + err = vlan_vids_add_by_dev(slave_dev, failover_dev); + if (err) { + netdev_err(failover_dev, "Failed to add vlan ids to device %s err:%d\n", + slave_dev->name, err); + goto err_vlan_add; + } + + nfo_info = netdev_priv(failover_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + + if (slave_is_standby) { + rcu_assign_pointer(nfo_info->standby_dev, slave_dev); + standby_dev = slave_dev; + dev_get_stats(standby_dev, &nfo_info->standby_stats); + } else { + rcu_assign_pointer(nfo_info->primary_dev, slave_dev); + primary_dev = slave_dev; + dev_get_stats(primary_dev, &nfo_info->primary_stats); + failover_dev->min_mtu = slave_dev->min_mtu; + failover_dev->max_mtu = slave_dev->max_mtu; + } + + net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); + net_failover_compute_features(failover_dev); + + call_netdevice_notifiers(NETDEV_JOIN, slave_dev); + + netdev_info(failover_dev, "failover %s slave:%s registered\n", + slave_is_standby ? "standby" : "primary", slave_dev->name); + + return 0; + +err_vlan_add: + dev_uc_unsync(slave_dev, failover_dev); + dev_mc_unsync(slave_dev, failover_dev); + dev_close(slave_dev); +err_dev_open: + dev_put(slave_dev); + dev_set_mtu(slave_dev, orig_mtu); +done: + return err; +} + +static int net_failover_slave_pre_unregister(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + return 0; +} + +static int net_failover_slave_unregister(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + + nfo_info = netdev_priv(failover_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + vlan_vids_del_by_dev(slave_dev, failover_dev); + dev_uc_unsync(slave_dev, failover_dev); + dev_mc_unsync(slave_dev, failover_dev); + dev_close(slave_dev); + + nfo_info = netdev_priv(failover_dev); + dev_get_stats(failover_dev, &nfo_info->failover_stats); + + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + if (slave_is_standby) { + RCU_INIT_POINTER(nfo_info->standby_dev, NULL); + } else { + RCU_INIT_POINTER(nfo_info->primary_dev, NULL); + if (standby_dev) { + failover_dev->min_mtu = standby_dev->min_mtu; + failover_dev->max_mtu = standby_dev->max_mtu; + } + } + + dev_put(slave_dev); + + net_failover_compute_features(failover_dev); + + netdev_info(failover_dev, "failover %s slave:%s unregistered\n", + slave_is_standby ? "standby" : "primary", slave_dev->name); + + return 0; +} + +static int net_failover_slave_link_change(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *primary_dev, *standby_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + if ((primary_dev && net_failover_xmit_ready(primary_dev)) || + (standby_dev && net_failover_xmit_ready(standby_dev))) { + netif_carrier_on(failover_dev); + netif_tx_wake_all_queues(failover_dev); + } else { + dev_get_stats(failover_dev, &nfo_info->failover_stats); + netif_carrier_off(failover_dev); + netif_tx_stop_all_queues(failover_dev); + } + + net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); + + return 0; +} + +static int net_failover_slave_name_change(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *primary_dev, *standby_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + /* We need to bring up the slave after the rename by udev in case + * open failed with EBUSY when it was registered. + */ + dev_open(slave_dev); + + return 0; +} + +static struct failover_ops net_failover_ops = { + .slave_pre_register = net_failover_slave_pre_register, + .slave_register = net_failover_slave_register, + .slave_pre_unregister = net_failover_slave_pre_unregister, + .slave_unregister = net_failover_slave_unregister, + .slave_link_change = net_failover_slave_link_change, + .slave_name_change = net_failover_slave_name_change, + .slave_handle_frame = net_failover_handle_frame, +}; + +/** + * net_failover_create - Create and register a failover instance + * + * @dev: standby netdev + * + * Creates a failover netdev and registers a failover instance for a standby + * netdev. Used by paravirtual drivers that use 3-netdev model. + * The failover netdev acts as a master device and controls 2 slave devices - + * the original standby netdev and a VF netdev with the same MAC gets + * registered as primary netdev. + * + * Return: pointer to failover instance + */ +struct failover *net_failover_create(struct net_device *standby_dev) +{ + struct device *dev = standby_dev->dev.parent; + struct net_device *failover_dev; + struct failover *failover; + int err; + + /* Alloc at least 2 queues, for now we are going with 16 assuming + * that VF devices being enslaved won't have too many queues. + */ + failover_dev = alloc_etherdev_mq(sizeof(struct net_failover_info), 16); + if (!failover_dev) { + dev_err(dev, "Unable to allocate failover_netdev!\n"); + return ERR_PTR(-ENOMEM); + } + + dev_net_set(failover_dev, dev_net(standby_dev)); + SET_NETDEV_DEV(failover_dev, dev); + + failover_dev->netdev_ops = &failover_dev_ops; + failover_dev->ethtool_ops = &failover_ethtool_ops; + + /* Initialize the device options */ + failover_dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; + failover_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | + IFF_TX_SKB_SHARING); + + /* don't acquire failover netdev's netif_tx_lock when transmitting */ + failover_dev->features |= NETIF_F_LLTX; + + /* Don't allow failover devices to change network namespaces. */ + failover_dev->features |= NETIF_F_NETNS_LOCAL; + + failover_dev->hw_features = FAILOVER_VLAN_FEATURES | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + failover_dev->features |= failover_dev->hw_features; + + memcpy(failover_dev->dev_addr, standby_dev->dev_addr, + failover_dev->addr_len); + + failover_dev->min_mtu = standby_dev->min_mtu; + failover_dev->max_mtu = standby_dev->max_mtu; + + err = register_netdev(failover_dev); + if (err) { + dev_err(dev, "Unable to register failover_dev!\n"); + goto err_register_netdev; + } + + netif_carrier_off(failover_dev); + + failover = failover_register(failover_dev, &net_failover_ops); + if (IS_ERR(failover)) + goto err_failover_register; + + return failover; + +err_failover_register: + unregister_netdev(failover_dev); +err_register_netdev: + free_netdev(failover_dev); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(net_failover_create); + +/** + * net_failover_destroy - Destroy a failover instance + * + * @failover: pointer to failover instance + * + * Unregisters any slave netdevs associated with the failover instance by + * calling failover_slave_unregister(). + * unregisters the failover instance itself and finally frees the failover + * netdev. Used by paravirtual drivers that use 3-netdev model. + * + */ +void net_failover_destroy(struct failover *failover) +{ + struct net_failover_info *nfo_info; + struct net_device *failover_dev; + struct net_device *slave_dev; + + if (!failover) + return; + + failover_dev = rcu_dereference(failover->failover_dev); + nfo_info = netdev_priv(failover_dev); + + netif_device_detach(failover_dev); + + rtnl_lock(); + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (slave_dev) + failover_slave_unregister(slave_dev); + + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (slave_dev) + failover_slave_unregister(slave_dev); + + failover_unregister(failover); + + unregister_netdevice(failover_dev); + + rtnl_unlock(); + + free_netdev(failover_dev); +} +EXPORT_SYMBOL_GPL(net_failover_destroy); + +static __init int +net_failover_init(void) +{ + return 0; +} +module_init(net_failover_init); + +static __exit +void net_failover_exit(void) +{ +} +module_exit(net_failover_exit); + +MODULE_DESCRIPTION("Failover driver for Paravirtual drivers"); +MODULE_LICENSE("GPL v2"); diff --git a/include/net/net_failover.h b/include/net/net_failover.h new file mode 100644 index 000000000000..b12a1c469d1c --- /dev/null +++ b/include/net/net_failover.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _NET_FAILOVER_H +#define _NET_FAILOVER_H + +#include + +/* failover state */ +struct net_failover_info { + /* primary netdev with same MAC */ + struct net_device __rcu *primary_dev; + + /* standby netdev */ + struct net_device __rcu *standby_dev; + + /* primary netdev stats */ + struct rtnl_link_stats64 primary_stats; + + /* standby netdev stats */ + struct rtnl_link_stats64 standby_stats; + + /* aggregated stats */ + struct rtnl_link_stats64 failover_stats; + + /* spinlock while updating stats */ + spinlock_t stats_lock; +}; + +struct failover *net_failover_create(struct net_device *standby_dev); +void net_failover_destroy(struct failover *failover); + +#define FAILOVER_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_HIGHDMA | NETIF_F_LRO) + +#define FAILOVER_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_RXCSUM | NETIF_F_ALL_TSO) + +#endif /* _NET_FAILOVER_H */ -- cgit v1.2.3 From 69b7516c5632121510da46ad9a02b6c57b4ce271 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Tue, 15 May 2018 17:33:19 +0200 Subject: MAINTAINERS: Add entry for STM32 timer and lptimer drivers Add an entry to make myself a maintainer of STM32 timer and lptimer drivers. Signed-off-by: Fabrice Gasnier Signed-off-by: Lee Jones --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..360bbe8f46d7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13382,6 +13382,16 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/stk1160/ +STM32 TIMER/LPTIMER DRIVERS +M: Fabrice Gasnier +S: Maintained +F: drivers/*/stm32-*timer* +F: drivers/pwm/pwm-stm32* +F: include/linux/*/stm32-*tim* +F: Documentation/ABI/testing/*timer-stm32 +F: Documentation/devicetree/bindings/*/stm32-*timer +F: Documentation/devicetree/bindings/pwm/pwm-stm32* + STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro M: Alexandre Torgue -- cgit v1.2.3 From 46e2856b8e188949757c9123fd7f9ce36edd1a52 Mon Sep 17 00:00:00 2001 From: Ilia Lin Date: Wed, 30 May 2018 05:39:28 +0300 Subject: cpufreq: Add Kryo CPU scaling driver In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors, the CPU frequency subset and voltage value of each OPP varies based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables defines the voltage and frequency value based on the msm-id in SMEM and speedbin blown in the efuse combination. The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC to provide the OPP framework with required information. This is used to determine the voltage and frequency value for each OPP of operating-points-v2 table when it is parsed by the OPP framework. Signed-off-by: Ilia Lin Acked-by: Viresh Kumar Reviewed-by: Amit Kucheria Tested-by: Amit Kucheria Signed-off-by: Rafael J. Wysocki --- MAINTAINERS | 7 ++ drivers/cpufreq/Kconfig.arm | 11 ++ drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cpufreq-dt-platdev.c | 3 + drivers/cpufreq/qcom-cpufreq-kryo.c | 212 +++++++++++++++++++++++++++++++++++ 5 files changed, 234 insertions(+) create mode 100644 drivers/cpufreq/qcom-cpufreq-kryo.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 58b9861ccf99..02f6a49f3272 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11653,6 +11653,13 @@ F: Documentation/devicetree/bindings/media/qcom,camss.txt F: Documentation/media/v4l-drivers/qcom_camss.rst F: drivers/media/platform/qcom/camss-8x16/ +QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 +M: Ilia Lin +L: linux-pm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt +F: drivers/cpufreq/qcom-cpufreq-kryo.c + QUALCOMM EMAC GIGABIT ETHERNET DRIVER M: Timur Tabi L: netdev@vger.kernel.org diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index a8a2e210c624..c7ce928fbf1f 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -124,6 +124,17 @@ config ARM_OMAP2PLUS_CPUFREQ depends on ARCH_OMAP2PLUS default ARCH_OMAP2PLUS +config ARM_QCOM_CPUFREQ_KRYO + bool "Qualcomm Kryo based CPUFreq" + depends on ARM64 + depends on QCOM_QFPROM + depends on QCOM_SMEM + select PM_OPP + help + This adds the CPUFreq driver for Qualcomm Kryo SoC based boards. + + If in doubt, say N. + config ARM_S3C_CPUFREQ bool help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 8d24ade3bd02..fb4a2ecac43b 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o +obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index c2165edadd41..fe14c57de6ca 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -116,6 +116,9 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "nvidia,tegra124", }, + { .compatible = "qcom,apq8096", }, + { .compatible = "qcom,msm8996", }, + { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c new file mode 100644 index 000000000000..d049fe4b80c4 --- /dev/null +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +/* + * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors, + * the CPU frequency subset and voltage value of each OPP varies + * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables + * defines the voltage and frequency value based on the msm-id in SMEM + * and speedbin blown in the efuse combination. + * The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC + * to provide the OPP framework with required information. + * This is used to determine the voltage and frequency value for each OPP of + * operating-points-v2 table when it is parsed by the OPP framework. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MSM_ID_SMEM 137 + +enum _msm_id { + MSM8996V3 = 0xF6ul, + APQ8096V3 = 0x123ul, + MSM8996SG = 0x131ul, + APQ8096SG = 0x138ul, +}; + +enum _msm8996_version { + MSM8996_V3, + MSM8996_SG, + NUM_OF_MSM8996_VERSIONS, +}; + +static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) +{ + size_t len; + u32 *msm_id; + enum _msm8996_version version; + + msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len); + if (IS_ERR(msm_id)) + return NUM_OF_MSM8996_VERSIONS; + + /* The first 4 bytes are format, next to them is the actual msm-id */ + msm_id++; + + switch ((enum _msm_id)*msm_id) { + case MSM8996V3: + case APQ8096V3: + version = MSM8996_V3; + break; + case MSM8996SG: + case APQ8096SG: + version = MSM8996_SG; + break; + default: + version = NUM_OF_MSM8996_VERSIONS; + } + + return version; +} + +static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) +{ + struct opp_table *opp_tables[NR_CPUS] = {0}; + struct platform_device *cpufreq_dt_pdev; + enum _msm8996_version msm8996_version; + struct nvmem_cell *speedbin_nvmem; + struct device_node *np; + struct device *cpu_dev; + unsigned cpu; + u8 *speedbin; + u32 versions; + size_t len; + int ret; + + cpu_dev = get_cpu_device(0); + if (NULL == cpu_dev) + ret = -ENODEV; + + msm8996_version = qcom_cpufreq_kryo_get_msm_id(); + if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { + dev_err(cpu_dev, "Not Snapdragon 820/821!"); + return -ENODEV; + } + + np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (IS_ERR(np)) + return PTR_ERR(np); + + ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); + if (!ret) { + of_node_put(np); + return -ENOENT; + } + + speedbin_nvmem = of_nvmem_cell_get(np, NULL); + of_node_put(np); + if (IS_ERR(speedbin_nvmem)) { + dev_err(cpu_dev, "Could not get nvmem cell: %ld\n", + PTR_ERR(speedbin_nvmem)); + return PTR_ERR(speedbin_nvmem); + } + + speedbin = nvmem_cell_read(speedbin_nvmem, &len); + nvmem_cell_put(speedbin_nvmem); + + switch (msm8996_version) { + case MSM8996_V3: + versions = 1 << (unsigned int)(*speedbin); + break; + case MSM8996_SG: + versions = 1 << ((unsigned int)(*speedbin) + 4); + break; + default: + BUG(); + break; + } + + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (NULL == cpu_dev) { + ret = -ENODEV; + goto free_opp; + } + + opp_tables[cpu] = dev_pm_opp_set_supported_hw(cpu_dev, + &versions, 1); + if (IS_ERR(opp_tables[cpu])) { + ret = PTR_ERR(opp_tables[cpu]); + dev_err(cpu_dev, "Failed to set supported hardware\n"); + goto free_opp; + } + } + + cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, + NULL, 0); + if (!IS_ERR(cpufreq_dt_pdev)) + return 0; + + ret = PTR_ERR(cpufreq_dt_pdev); + dev_err(cpu_dev, "Failed to register platform device\n"); + +free_opp: + for_each_possible_cpu(cpu) { + if (IS_ERR_OR_NULL(opp_tables[cpu])) + break; + dev_pm_opp_put_supported_hw(opp_tables[cpu]); + } + + return ret; +} + +static struct platform_driver qcom_cpufreq_kryo_driver = { + .probe = qcom_cpufreq_kryo_probe, + .driver = { + .name = "qcom-cpufreq-kryo", + }, +}; + +static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { + { .compatible = "qcom,apq8096", }, + { .compatible = "qcom,msm8996", }, +}; + +/* + * Since the driver depends on smem and nvmem drivers, which may + * return EPROBE_DEFER, all the real activity is done in the probe, + * which may be defered as well. The init here is only registering + * the driver and the platform device. + */ +static int __init qcom_cpufreq_kryo_init(void) +{ + struct device_node *np = of_find_node_by_path("/"); + const struct of_device_id *match; + int ret; + + if (!np) + return -ENODEV; + + match = of_match_node(qcom_cpufreq_kryo_match_list, np); + of_node_put(np); + if (!match) + return -ENODEV; + + ret = platform_driver_register(&qcom_cpufreq_kryo_driver); + if (unlikely(ret < 0)) + return ret; + + ret = PTR_ERR_OR_ZERO(platform_device_register_simple( + "qcom-cpufreq-kryo", -1, NULL, 0)); + if (0 == ret) + return 0; + + platform_driver_unregister(&qcom_cpufreq_kryo_driver); + return ret; +} +module_init(qcom_cpufreq_kryo_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 92f9ccca4c08802504be8bd7528641eaacf47474 Mon Sep 17 00:00:00 2001 From: Subrahmanya Lingappa Date: Thu, 10 May 2018 00:34:53 -0400 Subject: PCI: mobiveil: Add Mobiveil PCIe Host Bridge IP driver DT bindings Add DT bindings for the Mobiveil PCIe Host Bridge IP driver and update the vendor prefixes file. Signed-off-by: Subrahmanya Lingappa Signed-off-by: Lorenzo Pieralisi Signed-off-by: Bjorn Helgaas Acked-by: Rob Herring --- .../devicetree/bindings/pci/mobiveil-pcie.txt | 73 ++++++++++++++++++++++ .../devicetree/bindings/vendor-prefixes.txt | 1 + MAINTAINERS | 7 +++ 3 files changed, 81 insertions(+) create mode 100644 Documentation/devicetree/bindings/pci/mobiveil-pcie.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt new file mode 100644 index 000000000000..65038aa642e5 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt @@ -0,0 +1,73 @@ +* Mobiveil AXI PCIe Root Port Bridge DT description + +Mobiveil's GPEX 4.0 is a PCIe Gen4 root port bridge IP. This configurable IP +has up to 8 outbound and inbound windows for the address translation. + +Required properties: +- #address-cells: Address representation for root ports, set to <3> +- #size-cells: Size representation for root ports, set to <2> +- #interrupt-cells: specifies the number of cells needed to encode an + interrupt source. The value must be 1. +- compatible: Should contain "mbvl,gpex40-pcie" +- reg: Should contain PCIe registers location and length + "config_axi_slave": PCIe controller registers + "csr_axi_slave" : Bridge config registers + "gpio_slave" : GPIO registers to control slot power + "apb_csr" : MSI registers + +- device_type: must be "pci" +- apio-wins : number of requested apio outbound windows + default 2 outbound windows are configured - + 1. Config window + 2. Memory window +- ppio-wins : number of requested ppio inbound windows + default 1 inbound memory window is configured. +- bus-range: PCI bus numbers covered +- interrupt-controller: identifies the node as an interrupt controller +- #interrupt-cells: specifies the number of cells needed to encode an + interrupt source. The value must be 1. +- interrupt-parent : phandle to the interrupt controller that + it is attached to, it should be set to gic to point to + ARM's Generic Interrupt Controller node in system DT. +- interrupts: The interrupt line of the PCIe controller + last cell of this field is set to 4 to + denote it as IRQ_TYPE_LEVEL_HIGH type interrupt. +- interrupt-map-mask, + interrupt-map: standard PCI properties to define the mapping of the + PCI interface to interrupt numbers. +- ranges: ranges for the PCI memory regions (I/O space region is not + supported by hardware) + Please refer to the standard PCI bus binding document for a more + detailed explanation + + +Example: +++++++++ + pcie0: pcie@a0000000 { + #address-cells = <3>; + #size-cells = <2>; + compatible = "mbvl,gpex40-pcie"; + reg = <0xa0000000 0x00001000>, + <0xb0000000 0x00010000>, + <0xff000000 0x00200000>, + <0xb0010000 0x00001000>; + reg-names = "config_axi_slave", + "csr_axi_slave", + "gpio_slave", + "apb_csr"; + device_type = "pci"; + apio-wins = <2>; + ppio-wins = <1>; + bus-range = <0x00000000 0x000000ff>; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <1>; + interrupts = < 0 89 4 >; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 0 &pci_express 0>, + <0 0 0 1 &pci_express 1>, + <0 0 0 2 &pci_express 2>, + <0 0 0 3 &pci_express 3>; + ranges = < 0x83000000 0 0x00000000 0xa8000000 0 0x8000000>; + + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index b5f978a4cac6..1b7e7c36fda1 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -203,6 +203,7 @@ lwn Liebherr-Werk Nenzing GmbH macnica Macnica Americas marvell Marvell Technology Group Ltd. maxim Maxim Integrated Products +mbvl Mobiveil Inc. mcube mCube meas Measurement Specialties mediatek MediaTek Inc. diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..7fe4a2cadd3d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9393,6 +9393,13 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/mn88473* +PCI DRIVER FOR MOBIVEIL PCIE IP +M: Subrahmanya Lingappa +L: linux-pci@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt +F: drivers/pci/host/pcie-mobiveil.c + MODULE SUPPORT M: Jessica Yu T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next -- cgit v1.2.3 From 0c3a4cf84fb9ab17a4c9d3b34cfd6098887dd998 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Tue, 29 May 2018 11:47:44 +0800 Subject: MAINTAINERS: add myself as maintainer for QorIQ PTP clock driver Added myself as maintainer for QorIQ PTP clock driver. Since gianfar_ptp.c was renamed to ptp_qoriq.c, let's maintain it under QorIQ PTP clock driver. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- MAINTAINERS | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1831ff5863a1..e7396119ce58 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5628,7 +5628,6 @@ M: Claudiu Manoil L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/gianfar* -X: drivers/net/ethernet/freescale/gianfar_ptp.c F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt FREESCALE GPMI NAND DRIVER @@ -5675,6 +5674,14 @@ S: Maintained F: drivers/net/ethernet/freescale/fman F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt +FREESCALE QORIQ PTP CLOCK DRIVER +M: Yangbo Lu +L: netdev@vger.kernel.org +S: Maintained +F: drivers/ptp/ptp_qoriq.c +F: include/linux/fsl/ptp_qoriq.h +F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt + FREESCALE QUAD SPI DRIVER M: Han Xu L: linux-mtd@lists.infradead.org @@ -11421,7 +11428,6 @@ S: Maintained W: http://linuxptp.sourceforge.net/ F: Documentation/ABI/testing/sysfs-ptp F: Documentation/ptp/* -F: drivers/net/ethernet/freescale/gianfar_ptp.c F: drivers/net/phy/dp83640* F: drivers/ptp/* F: include/linux/ptp_cl* -- cgit v1.2.3 From a12ab9e125f184454fc484483fe2029962fe6a34 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 19 Apr 2018 14:50:29 +0200 Subject: selftests: move RTC tests to rtc subfolder Move the RTC tests out of the timers folder as they are mostly unrelated. Keep rtcpie in timers as it only test hrtimers. Signed-off-by: Alexandre Belloni Signed-off-by: Shuah Khan (Samsung OSG) --- MAINTAINERS | 2 +- tools/testing/selftests/Makefile | 1 + tools/testing/selftests/rtc/.gitignore | 2 + tools/testing/selftests/rtc/Makefile | 9 + tools/testing/selftests/rtc/rtctest.c | 328 +++++++++++++++++++++++ tools/testing/selftests/rtc/setdate.c | 86 ++++++ tools/testing/selftests/timers/.gitignore | 2 - tools/testing/selftests/timers/Makefile | 4 +- tools/testing/selftests/timers/rtctest.c | 328 ----------------------- tools/testing/selftests/timers/rtctest_setdate.c | 86 ------ 10 files changed, 429 insertions(+), 419 deletions(-) create mode 100644 tools/testing/selftests/rtc/.gitignore create mode 100644 tools/testing/selftests/rtc/Makefile create mode 100644 tools/testing/selftests/rtc/rtctest.c create mode 100644 tools/testing/selftests/rtc/setdate.c delete mode 100644 tools/testing/selftests/timers/rtctest.c delete mode 100644 tools/testing/selftests/timers/rtctest_setdate.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ca4afd68530c..a376a4adc675 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11878,7 +11878,7 @@ F: include/linux/rtc.h F: include/uapi/linux/rtc.h F: include/linux/rtc/ F: include/linux/platform_data/rtc-* -F: tools/testing/selftests/timers/rtctest.c +F: tools/testing/selftests/rtc/ REALTEK AUDIO CODECS M: Bard Liao diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 32aafa92074c..a368279301b7 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -28,6 +28,7 @@ TARGETS += powerpc TARGETS += proc TARGETS += pstore TARGETS += ptrace +TARGETS += rtc TARGETS += seccomp TARGETS += sigaltstack TARGETS += size diff --git a/tools/testing/selftests/rtc/.gitignore b/tools/testing/selftests/rtc/.gitignore new file mode 100644 index 000000000000..d0ad44f6294a --- /dev/null +++ b/tools/testing/selftests/rtc/.gitignore @@ -0,0 +1,2 @@ +rtctest +setdate diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile new file mode 100644 index 000000000000..de9c8566672a --- /dev/null +++ b/tools/testing/selftests/rtc/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +CFLAGS += -O3 -Wl,-no-as-needed -Wall +LDFLAGS += -lrt -lpthread -lm + +TEST_GEN_PROGS = rtctest + +TEST_GEN_PROGS_EXTENDED = setdate + +include ../lib.mk diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c new file mode 100644 index 000000000000..6e17b96551ec --- /dev/null +++ b/tools/testing/selftests/rtc/rtctest.c @@ -0,0 +1,328 @@ +/* + * Real Time Clock Driver Test/Example Program + * + * Compile with: + * gcc -s -Wall -Wstrict-prototypes rtctest.c -o rtctest + * + * Copyright (C) 1996, Paul Gortmaker. + * + * Released under the GNU General Public License, version 2, + * included herein by reference. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +/* + * This expects the new RTC class driver framework, working with + * clocks that will often not be clones of what the PC-AT had. + * Use the command line to specify another RTC if you need one. + */ +static const char default_rtc[] = "/dev/rtc0"; + +static struct rtc_time cutoff_dates[] = { + { + .tm_year = 70, /* 1970 -1900 */ + .tm_mday = 1, + }, + /* signed time_t 19/01/2038 3:14:08 */ + { + .tm_year = 138, + .tm_mday = 19, + }, + { + .tm_year = 138, + .tm_mday = 20, + }, + { + .tm_year = 199, /* 2099 -1900 */ + .tm_mday = 1, + }, + { + .tm_year = 200, /* 2100 -1900 */ + .tm_mday = 1, + }, + /* unsigned time_t 07/02/2106 7:28:15*/ + { + .tm_year = 205, + .tm_mon = 1, + .tm_mday = 7, + }, + { + .tm_year = 206, + .tm_mon = 1, + .tm_mday = 8, + }, + /* signed time on 64bit in nanoseconds 12/04/2262 01:47:16*/ + { + .tm_year = 362, + .tm_mon = 3, + .tm_mday = 12, + }, + { + .tm_year = 362, /* 2262 -1900 */ + .tm_mon = 3, + .tm_mday = 13, + }, +}; + +static int compare_dates(struct rtc_time *a, struct rtc_time *b) +{ + if (a->tm_year != b->tm_year || + a->tm_mon != b->tm_mon || + a->tm_mday != b->tm_mday || + a->tm_hour != b->tm_hour || + a->tm_min != b->tm_min || + ((b->tm_sec - a->tm_sec) > 1)) + return 1; + + return 0; +} + +int main(int argc, char **argv) +{ + int i, fd, retval, irqcount = 0, dangerous = 0; + unsigned long data; + struct rtc_time rtc_tm; + const char *rtc = default_rtc; + + switch (argc) { + case 3: + if (*argv[2] == 'd') + dangerous = 1; + case 2: + rtc = argv[1]; + /* FALLTHROUGH */ + case 1: + break; + default: + fprintf(stderr, "usage: rtctest [rtcdev] [d]\n"); + return 1; + } + + fd = open(rtc, O_RDONLY); + + if (fd == -1) { + perror(rtc); + exit(errno); + } + + fprintf(stderr, "\n\t\t\tRTC Driver Test Example.\n\n"); + + /* Turn on update interrupts (one per second) */ + retval = ioctl(fd, RTC_UIE_ON, 0); + if (retval == -1) { + if (errno == EINVAL) { + fprintf(stderr, + "\n...Update IRQs not supported.\n"); + goto test_READ; + } + perror("RTC_UIE_ON ioctl"); + exit(errno); + } + + fprintf(stderr, "Counting 5 update (1/sec) interrupts from reading %s:", + rtc); + fflush(stderr); + for (i=1; i<6; i++) { + /* This read will block */ + retval = read(fd, &data, sizeof(unsigned long)); + if (retval == -1) { + perror("read"); + exit(errno); + } + fprintf(stderr, " %d",i); + fflush(stderr); + irqcount++; + } + + fprintf(stderr, "\nAgain, from using select(2) on /dev/rtc:"); + fflush(stderr); + for (i=1; i<6; i++) { + struct timeval tv = {5, 0}; /* 5 second timeout on select */ + fd_set readfds; + + FD_ZERO(&readfds); + FD_SET(fd, &readfds); + /* The select will wait until an RTC interrupt happens. */ + retval = select(fd+1, &readfds, NULL, NULL, &tv); + if (retval == -1) { + perror("select"); + exit(errno); + } + /* This read won't block unlike the select-less case above. */ + retval = read(fd, &data, sizeof(unsigned long)); + if (retval == -1) { + perror("read"); + exit(errno); + } + fprintf(stderr, " %d",i); + fflush(stderr); + irqcount++; + } + + /* Turn off update interrupts */ + retval = ioctl(fd, RTC_UIE_OFF, 0); + if (retval == -1) { + perror("RTC_UIE_OFF ioctl"); + exit(errno); + } + +test_READ: + /* Read the RTC time/date */ + retval = ioctl(fd, RTC_RD_TIME, &rtc_tm); + if (retval == -1) { + perror("RTC_RD_TIME ioctl"); + exit(errno); + } + + fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n", + rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900, + rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec); + + /* Set the alarm to 5 sec in the future, and check for rollover */ + rtc_tm.tm_sec += 5; + if (rtc_tm.tm_sec >= 60) { + rtc_tm.tm_sec %= 60; + rtc_tm.tm_min++; + } + if (rtc_tm.tm_min == 60) { + rtc_tm.tm_min = 0; + rtc_tm.tm_hour++; + } + if (rtc_tm.tm_hour == 24) + rtc_tm.tm_hour = 0; + + retval = ioctl(fd, RTC_ALM_SET, &rtc_tm); + if (retval == -1) { + if (errno == EINVAL) { + fprintf(stderr, + "\n...Alarm IRQs not supported.\n"); + goto test_DATE; + } + + perror("RTC_ALM_SET ioctl"); + exit(errno); + } + + /* Read the current alarm settings */ + retval = ioctl(fd, RTC_ALM_READ, &rtc_tm); + if (retval == -1) { + if (errno == EINVAL) { + fprintf(stderr, + "\n...EINVAL reading current alarm setting.\n"); + goto test_DATE; + } + perror("RTC_ALM_READ ioctl"); + exit(errno); + } + + fprintf(stderr, "Alarm time now set to %02d:%02d:%02d.\n", + rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec); + + /* Enable alarm interrupts */ + retval = ioctl(fd, RTC_AIE_ON, 0); + if (retval == -1) { + if (errno == EINVAL || errno == EIO) { + fprintf(stderr, + "\n...Alarm IRQs not supported.\n"); + goto test_DATE; + } + + perror("RTC_AIE_ON ioctl"); + exit(errno); + } + + fprintf(stderr, "Waiting 5 seconds for alarm..."); + fflush(stderr); + /* This blocks until the alarm ring causes an interrupt */ + retval = read(fd, &data, sizeof(unsigned long)); + if (retval == -1) { + perror("read"); + exit(errno); + } + irqcount++; + fprintf(stderr, " okay. Alarm rang.\n"); + + /* Disable alarm interrupts */ + retval = ioctl(fd, RTC_AIE_OFF, 0); + if (retval == -1) { + perror("RTC_AIE_OFF ioctl"); + exit(errno); + } + +test_DATE: + if (!dangerous) + goto done; + + fprintf(stderr, "\nTesting problematic dates\n"); + + for (i = 0; i < ARRAY_SIZE(cutoff_dates); i++) { + struct rtc_time current; + + /* Write the new date in RTC */ + retval = ioctl(fd, RTC_SET_TIME, &cutoff_dates[i]); + if (retval == -1) { + perror("RTC_SET_TIME ioctl"); + close(fd); + exit(errno); + } + + /* Read back */ + retval = ioctl(fd, RTC_RD_TIME, ¤t); + if (retval == -1) { + perror("RTC_RD_TIME ioctl"); + exit(errno); + } + + if(compare_dates(&cutoff_dates[i], ¤t)) { + fprintf(stderr,"Setting date %d failed\n", + cutoff_dates[i].tm_year + 1900); + goto done; + } + + cutoff_dates[i].tm_sec += 5; + + /* Write the new alarm in RTC */ + retval = ioctl(fd, RTC_ALM_SET, &cutoff_dates[i]); + if (retval == -1) { + perror("RTC_ALM_SET ioctl"); + close(fd); + exit(errno); + } + + /* Read back */ + retval = ioctl(fd, RTC_ALM_READ, ¤t); + if (retval == -1) { + perror("RTC_ALM_READ ioctl"); + exit(errno); + } + + if(compare_dates(&cutoff_dates[i], ¤t)) { + fprintf(stderr,"Setting alarm %d failed\n", + cutoff_dates[i].tm_year + 1900); + goto done; + } + + fprintf(stderr, "Setting year %d is OK \n", + cutoff_dates[i].tm_year + 1900); + } +done: + fprintf(stderr, "\n\n\t\t\t *** Test complete ***\n"); + + close(fd); + + return 0; +} diff --git a/tools/testing/selftests/rtc/setdate.c b/tools/testing/selftests/rtc/setdate.c new file mode 100644 index 000000000000..2cb78489eca4 --- /dev/null +++ b/tools/testing/selftests/rtc/setdate.c @@ -0,0 +1,86 @@ +/* Real Time Clock Driver Test + * by: Benjamin Gaignard (benjamin.gaignard@linaro.org) + * + * To build + * gcc rtctest_setdate.c -o rtctest_setdate + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char default_time[] = "00:00:00"; + +int main(int argc, char **argv) +{ + int fd, retval; + struct rtc_time new, current; + const char *rtc, *date; + const char *time = default_time; + + switch (argc) { + case 4: + time = argv[3]; + /* FALLTHROUGH */ + case 3: + date = argv[2]; + rtc = argv[1]; + break; + default: + fprintf(stderr, "usage: rtctest_setdate [HH:MM:SS]\n"); + return 1; + } + + fd = open(rtc, O_RDONLY); + if (fd == -1) { + perror(rtc); + exit(errno); + } + + sscanf(date, "%d-%d-%d", &new.tm_mday, &new.tm_mon, &new.tm_year); + new.tm_mon -= 1; + new.tm_year -= 1900; + sscanf(time, "%d:%d:%d", &new.tm_hour, &new.tm_min, &new.tm_sec); + + fprintf(stderr, "Test will set RTC date/time to %d-%d-%d, %02d:%02d:%02d.\n", + new.tm_mday, new.tm_mon + 1, new.tm_year + 1900, + new.tm_hour, new.tm_min, new.tm_sec); + + /* Write the new date in RTC */ + retval = ioctl(fd, RTC_SET_TIME, &new); + if (retval == -1) { + perror("RTC_SET_TIME ioctl"); + close(fd); + exit(errno); + } + + /* Read back */ + retval = ioctl(fd, RTC_RD_TIME, ¤t); + if (retval == -1) { + perror("RTC_RD_TIME ioctl"); + exit(errno); + } + + fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n", + current.tm_mday, current.tm_mon + 1, current.tm_year + 1900, + current.tm_hour, current.tm_min, current.tm_sec); + + close(fd); + return 0; +} diff --git a/tools/testing/selftests/timers/.gitignore b/tools/testing/selftests/timers/.gitignore index 353ae15daa1e..32a9eadb2d4e 100644 --- a/tools/testing/selftests/timers/.gitignore +++ b/tools/testing/selftests/timers/.gitignore @@ -10,7 +10,6 @@ nsleep-lat posix_timers raw_skew rtcpie -rtctest set-2038 set-tai set-timer-lat @@ -20,4 +19,3 @@ valid-adjtimex adjtick set-tz freq-step -rtctest_setdate diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 8be7895ff918..c02683cfb6c9 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -5,13 +5,13 @@ LDFLAGS += -lrt -lpthread -lm # these are all "safe" tests that don't modify # system time or require escalated privileges TEST_GEN_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ - inconsistency-check raw_skew threadtest rtctest rtcpie + inconsistency-check raw_skew threadtest rtcpie DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \ skew_consistency clocksource-switch freq-step leap-a-day \ leapcrash set-tai set-2038 set-tz -TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS) rtctest_setdate +TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS) include ../lib.mk diff --git a/tools/testing/selftests/timers/rtctest.c b/tools/testing/selftests/timers/rtctest.c deleted file mode 100644 index 6e17b96551ec..000000000000 --- a/tools/testing/selftests/timers/rtctest.c +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Real Time Clock Driver Test/Example Program - * - * Compile with: - * gcc -s -Wall -Wstrict-prototypes rtctest.c -o rtctest - * - * Copyright (C) 1996, Paul Gortmaker. - * - * Released under the GNU General Public License, version 2, - * included herein by reference. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - -/* - * This expects the new RTC class driver framework, working with - * clocks that will often not be clones of what the PC-AT had. - * Use the command line to specify another RTC if you need one. - */ -static const char default_rtc[] = "/dev/rtc0"; - -static struct rtc_time cutoff_dates[] = { - { - .tm_year = 70, /* 1970 -1900 */ - .tm_mday = 1, - }, - /* signed time_t 19/01/2038 3:14:08 */ - { - .tm_year = 138, - .tm_mday = 19, - }, - { - .tm_year = 138, - .tm_mday = 20, - }, - { - .tm_year = 199, /* 2099 -1900 */ - .tm_mday = 1, - }, - { - .tm_year = 200, /* 2100 -1900 */ - .tm_mday = 1, - }, - /* unsigned time_t 07/02/2106 7:28:15*/ - { - .tm_year = 205, - .tm_mon = 1, - .tm_mday = 7, - }, - { - .tm_year = 206, - .tm_mon = 1, - .tm_mday = 8, - }, - /* signed time on 64bit in nanoseconds 12/04/2262 01:47:16*/ - { - .tm_year = 362, - .tm_mon = 3, - .tm_mday = 12, - }, - { - .tm_year = 362, /* 2262 -1900 */ - .tm_mon = 3, - .tm_mday = 13, - }, -}; - -static int compare_dates(struct rtc_time *a, struct rtc_time *b) -{ - if (a->tm_year != b->tm_year || - a->tm_mon != b->tm_mon || - a->tm_mday != b->tm_mday || - a->tm_hour != b->tm_hour || - a->tm_min != b->tm_min || - ((b->tm_sec - a->tm_sec) > 1)) - return 1; - - return 0; -} - -int main(int argc, char **argv) -{ - int i, fd, retval, irqcount = 0, dangerous = 0; - unsigned long data; - struct rtc_time rtc_tm; - const char *rtc = default_rtc; - - switch (argc) { - case 3: - if (*argv[2] == 'd') - dangerous = 1; - case 2: - rtc = argv[1]; - /* FALLTHROUGH */ - case 1: - break; - default: - fprintf(stderr, "usage: rtctest [rtcdev] [d]\n"); - return 1; - } - - fd = open(rtc, O_RDONLY); - - if (fd == -1) { - perror(rtc); - exit(errno); - } - - fprintf(stderr, "\n\t\t\tRTC Driver Test Example.\n\n"); - - /* Turn on update interrupts (one per second) */ - retval = ioctl(fd, RTC_UIE_ON, 0); - if (retval == -1) { - if (errno == EINVAL) { - fprintf(stderr, - "\n...Update IRQs not supported.\n"); - goto test_READ; - } - perror("RTC_UIE_ON ioctl"); - exit(errno); - } - - fprintf(stderr, "Counting 5 update (1/sec) interrupts from reading %s:", - rtc); - fflush(stderr); - for (i=1; i<6; i++) { - /* This read will block */ - retval = read(fd, &data, sizeof(unsigned long)); - if (retval == -1) { - perror("read"); - exit(errno); - } - fprintf(stderr, " %d",i); - fflush(stderr); - irqcount++; - } - - fprintf(stderr, "\nAgain, from using select(2) on /dev/rtc:"); - fflush(stderr); - for (i=1; i<6; i++) { - struct timeval tv = {5, 0}; /* 5 second timeout on select */ - fd_set readfds; - - FD_ZERO(&readfds); - FD_SET(fd, &readfds); - /* The select will wait until an RTC interrupt happens. */ - retval = select(fd+1, &readfds, NULL, NULL, &tv); - if (retval == -1) { - perror("select"); - exit(errno); - } - /* This read won't block unlike the select-less case above. */ - retval = read(fd, &data, sizeof(unsigned long)); - if (retval == -1) { - perror("read"); - exit(errno); - } - fprintf(stderr, " %d",i); - fflush(stderr); - irqcount++; - } - - /* Turn off update interrupts */ - retval = ioctl(fd, RTC_UIE_OFF, 0); - if (retval == -1) { - perror("RTC_UIE_OFF ioctl"); - exit(errno); - } - -test_READ: - /* Read the RTC time/date */ - retval = ioctl(fd, RTC_RD_TIME, &rtc_tm); - if (retval == -1) { - perror("RTC_RD_TIME ioctl"); - exit(errno); - } - - fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n", - rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900, - rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec); - - /* Set the alarm to 5 sec in the future, and check for rollover */ - rtc_tm.tm_sec += 5; - if (rtc_tm.tm_sec >= 60) { - rtc_tm.tm_sec %= 60; - rtc_tm.tm_min++; - } - if (rtc_tm.tm_min == 60) { - rtc_tm.tm_min = 0; - rtc_tm.tm_hour++; - } - if (rtc_tm.tm_hour == 24) - rtc_tm.tm_hour = 0; - - retval = ioctl(fd, RTC_ALM_SET, &rtc_tm); - if (retval == -1) { - if (errno == EINVAL) { - fprintf(stderr, - "\n...Alarm IRQs not supported.\n"); - goto test_DATE; - } - - perror("RTC_ALM_SET ioctl"); - exit(errno); - } - - /* Read the current alarm settings */ - retval = ioctl(fd, RTC_ALM_READ, &rtc_tm); - if (retval == -1) { - if (errno == EINVAL) { - fprintf(stderr, - "\n...EINVAL reading current alarm setting.\n"); - goto test_DATE; - } - perror("RTC_ALM_READ ioctl"); - exit(errno); - } - - fprintf(stderr, "Alarm time now set to %02d:%02d:%02d.\n", - rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec); - - /* Enable alarm interrupts */ - retval = ioctl(fd, RTC_AIE_ON, 0); - if (retval == -1) { - if (errno == EINVAL || errno == EIO) { - fprintf(stderr, - "\n...Alarm IRQs not supported.\n"); - goto test_DATE; - } - - perror("RTC_AIE_ON ioctl"); - exit(errno); - } - - fprintf(stderr, "Waiting 5 seconds for alarm..."); - fflush(stderr); - /* This blocks until the alarm ring causes an interrupt */ - retval = read(fd, &data, sizeof(unsigned long)); - if (retval == -1) { - perror("read"); - exit(errno); - } - irqcount++; - fprintf(stderr, " okay. Alarm rang.\n"); - - /* Disable alarm interrupts */ - retval = ioctl(fd, RTC_AIE_OFF, 0); - if (retval == -1) { - perror("RTC_AIE_OFF ioctl"); - exit(errno); - } - -test_DATE: - if (!dangerous) - goto done; - - fprintf(stderr, "\nTesting problematic dates\n"); - - for (i = 0; i < ARRAY_SIZE(cutoff_dates); i++) { - struct rtc_time current; - - /* Write the new date in RTC */ - retval = ioctl(fd, RTC_SET_TIME, &cutoff_dates[i]); - if (retval == -1) { - perror("RTC_SET_TIME ioctl"); - close(fd); - exit(errno); - } - - /* Read back */ - retval = ioctl(fd, RTC_RD_TIME, ¤t); - if (retval == -1) { - perror("RTC_RD_TIME ioctl"); - exit(errno); - } - - if(compare_dates(&cutoff_dates[i], ¤t)) { - fprintf(stderr,"Setting date %d failed\n", - cutoff_dates[i].tm_year + 1900); - goto done; - } - - cutoff_dates[i].tm_sec += 5; - - /* Write the new alarm in RTC */ - retval = ioctl(fd, RTC_ALM_SET, &cutoff_dates[i]); - if (retval == -1) { - perror("RTC_ALM_SET ioctl"); - close(fd); - exit(errno); - } - - /* Read back */ - retval = ioctl(fd, RTC_ALM_READ, ¤t); - if (retval == -1) { - perror("RTC_ALM_READ ioctl"); - exit(errno); - } - - if(compare_dates(&cutoff_dates[i], ¤t)) { - fprintf(stderr,"Setting alarm %d failed\n", - cutoff_dates[i].tm_year + 1900); - goto done; - } - - fprintf(stderr, "Setting year %d is OK \n", - cutoff_dates[i].tm_year + 1900); - } -done: - fprintf(stderr, "\n\n\t\t\t *** Test complete ***\n"); - - close(fd); - - return 0; -} diff --git a/tools/testing/selftests/timers/rtctest_setdate.c b/tools/testing/selftests/timers/rtctest_setdate.c deleted file mode 100644 index 2cb78489eca4..000000000000 --- a/tools/testing/selftests/timers/rtctest_setdate.c +++ /dev/null @@ -1,86 +0,0 @@ -/* Real Time Clock Driver Test - * by: Benjamin Gaignard (benjamin.gaignard@linaro.org) - * - * To build - * gcc rtctest_setdate.c -o rtctest_setdate - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static const char default_time[] = "00:00:00"; - -int main(int argc, char **argv) -{ - int fd, retval; - struct rtc_time new, current; - const char *rtc, *date; - const char *time = default_time; - - switch (argc) { - case 4: - time = argv[3]; - /* FALLTHROUGH */ - case 3: - date = argv[2]; - rtc = argv[1]; - break; - default: - fprintf(stderr, "usage: rtctest_setdate [HH:MM:SS]\n"); - return 1; - } - - fd = open(rtc, O_RDONLY); - if (fd == -1) { - perror(rtc); - exit(errno); - } - - sscanf(date, "%d-%d-%d", &new.tm_mday, &new.tm_mon, &new.tm_year); - new.tm_mon -= 1; - new.tm_year -= 1900; - sscanf(time, "%d:%d:%d", &new.tm_hour, &new.tm_min, &new.tm_sec); - - fprintf(stderr, "Test will set RTC date/time to %d-%d-%d, %02d:%02d:%02d.\n", - new.tm_mday, new.tm_mon + 1, new.tm_year + 1900, - new.tm_hour, new.tm_min, new.tm_sec); - - /* Write the new date in RTC */ - retval = ioctl(fd, RTC_SET_TIME, &new); - if (retval == -1) { - perror("RTC_SET_TIME ioctl"); - close(fd); - exit(errno); - } - - /* Read back */ - retval = ioctl(fd, RTC_RD_TIME, ¤t); - if (retval == -1) { - perror("RTC_RD_TIME ioctl"); - exit(errno); - } - - fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n", - current.tm_mday, current.tm_mon + 1, current.tm_year + 1900, - current.tm_hour, current.tm_min, current.tm_sec); - - close(fd); - return 0; -} -- cgit v1.2.3 From 5ef12cb4a3a78ffb331c03a795a15eea4ae35155 Mon Sep 17 00:00:00 2001 From: "Shuah Khan (Samsung OSG)" Date: Wed, 30 May 2018 21:00:57 -0600 Subject: selftests: add test for USB over IP driver Add test for USB over IP driver. This test runs several tests on a device specified in the -b argument and path to the usbip tools. usbip_test.sh -b -p e.g: cd tools/testing selftests/drivers/usb/usbip sudo ./usbip_test.sh -b 3-10.2 -p /tools/usb/usbip This test should be run as root and user should build usbip tools before running the test. The usbip test isn't included in the Kselftest run as it requires user to specify a device to run tests on. Signed-off-by: Shuah Khan (Samsung OSG) Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 1 + .../selftests/drivers/usb/usbip/usbip_test.sh | 198 +++++++++++++++++++++ 2 files changed, 199 insertions(+) create mode 100755 tools/testing/selftests/drivers/usb/usbip/usbip_test.sh (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 4b7d977572ac..34d02d8a7728 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14670,6 +14670,7 @@ S: Maintained F: Documentation/usb/usbip_protocol.txt F: drivers/usb/usbip/ F: tools/usb/usbip/ +F: tools/testing/selftests/drivers/usb/usbip/ USB PEGASUS DRIVER M: Petko Manolov diff --git a/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh b/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh new file mode 100755 index 000000000000..1893d0f59ad7 --- /dev/null +++ b/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh @@ -0,0 +1,198 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +usage() { echo "usbip_test.sh -b -p "; exit 1; } + +while getopts "h:b:p:" arg; do + case "${arg}" in + h) + usage + ;; + b) + busid=${OPTARG} + ;; + p) + tools_path=${OPTARG} + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z "${busid}" ]; then + usage +fi + +echo "Running USB over IP Testing on $busid"; + +test_end_msg="End of USB over IP Testing on $busid" + +if [ $UID != 0 ]; then + echo "Please run usbip_test as root [SKIP]" + echo $test_end_msg + exit $ksft_skip +fi + +echo "Load usbip_host module" +if ! /sbin/modprobe -q -n usbip_host; then + echo "usbip_test: module usbip_host is not found [SKIP]" + echo $test_end_msg + exit $ksft_skip +fi + +if /sbin/modprobe -q usbip_host; then + /sbin/modprobe -q -r test_bitmap + echo "usbip_test: module usbip_host is loaded [OK]" +else + echo "usbip_test: module usbip_host failed to load [FAIL]" + echo $test_end_msg + exit 1 +fi + +echo "Load vhci_hcd module" +if /sbin/modprobe -q vhci_hcd; then + /sbin/modprobe -q -r test_bitmap + echo "usbip_test: module vhci_hcd is loaded [OK]" +else + echo "usbip_test: module vhci_hcd failed to load [FAIL]" + echo $test_end_msg + exit 1 +fi +echo "==============================================================" + +cd $tools_path; + +if [ ! -f src/usbip ]; then + echo "Please build usbip tools" + echo $test_end_msg + exit $ksft_skip +fi + +echo "Expect to see export-able devices"; +src/usbip list -l; +echo "==============================================================" + +echo "Run lsusb to see all usb devices" +lsusb -t; +echo "==============================================================" + +src/usbipd -D; + +echo "Get exported devices from localhost - expect to see none"; +src/usbip list -r localhost; +echo "==============================================================" + +echo "bind devices"; +src/usbip bind -b $busid; +echo "==============================================================" + +echo "Run lsusb - bound devices should be under usbip_host control" +lsusb -t; +echo "==============================================================" + +echo "bind devices - expect already bound messages" +src/usbip bind -b $busid; +echo "==============================================================" + +echo "Get exported devices from localhost - expect to see exported devices"; +src/usbip list -r localhost; +echo "==============================================================" + +echo "unbind devices"; +src/usbip unbind -b $busid; +echo "==============================================================" + +echo "Run lsusb - bound devices should be rebound to original drivers" +lsusb -t; +echo "==============================================================" + +echo "unbind devices - expect no devices bound message"; +src/usbip unbind -b $busid; +echo "==============================================================" + +echo "Get exported devices from localhost - expect to see none"; +src/usbip list -r localhost; +echo "==============================================================" + +echo "List imported devices - expect to see none"; +src/usbip port; +echo "==============================================================" + +echo "Import devices from localhost - should fail with no devices" +src/usbip attach -r localhost -b $busid; +echo "==============================================================" + +echo "bind devices"; +src/usbip bind -b $busid; +echo "==============================================================" + +echo "List imported devices - expect to see exported devices"; +src/usbip list -r localhost; +echo "==============================================================" + +echo "List imported devices - expect to see none"; +src/usbip port; +echo "==============================================================" + +echo "Import devices from localhost - should work" +src/usbip attach -r localhost -b $busid; +echo "==============================================================" + +echo "List imported devices - expect to see imported devices"; +src/usbip port; +echo "==============================================================" + +echo "Import devices from localhost - expect already imported messages" +src/usbip attach -r localhost -b $busid; +echo "==============================================================" + +echo "Un-import devices"; +src/usbip detach -p 00; +src/usbip detach -p 01; +echo "==============================================================" + +echo "List imported devices - expect to see none"; +src/usbip port; +echo "==============================================================" + +echo "Un-import devices - expect no devices to detach messages"; +src/usbip detach -p 00; +src/usbip detach -p 01; +echo "==============================================================" + +echo "Detach invalid port tests - expect invalid port error message"; +src/usbip detach -p 100; +echo "==============================================================" + +echo "Expect to see export-able devices"; +src/usbip list -l; +echo "==============================================================" + +echo "Remove usbip_host module"; +rmmod usbip_host; + +echo "Run lsusb - bound devices should be rebound to original drivers" +lsusb -t; +echo "==============================================================" + +echo "Run bind without usbip_host - expect fail" +src/usbip bind -b $busid; +echo "==============================================================" + +echo "Run lsusb - devices that failed to bind aren't bound to any driver" +lsusb -t; +echo "==============================================================" + +echo "modprobe usbip_host - does it work?" +/sbin/modprobe usbip_host +echo "Should see -busid- is not in match_busid table... skip! dmesg" +echo "==============================================================" +dmesg | grep "is not in match_busid table" +echo "==============================================================" + +echo $test_end_msg -- cgit v1.2.3 From c42812da1395bf57a6f7b5934b5339a3a42c2c7d Mon Sep 17 00:00:00 2001 From: David Lechner Date: Sat, 12 May 2018 17:05:44 -0500 Subject: MAINTAINERS: add entry for LEGO MINDSTORMS EV3 This adds an entry to MAINTAINERS for LEGO MINDSTORMS EV3 (an ARM-based robotics platform). The files listed are exclusive to this device. Add me as reviewer so that I will be cc'ed for any changes to these files. Signed-off-by: David Lechner Acked-by: Sekhar Nori Signed-off-by: Sebastian Reichel --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a93b8cfca489..683b32898f0c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7976,6 +7976,13 @@ S: Maintained F: Documentation/misc-devices/eeprom F: drivers/misc/eeprom/eeprom.c +LEGO MINDSTORMS EV3 +R: David Lechner +S: Maintained +F: arch/arm/boot/dts/da850-lego-ev3.dts +F: Documentation/devicetree/bindings/power/supply/lego_ev3_battery.txt +F: drivers/power/supply/lego_ev3_battery.c + LEGO USB Tower driver M: Juergen Stuber L: legousb-devel@lists.sourceforge.net -- cgit v1.2.3 From 41a233dcbe7dfac00da74d703d4e5f6fe17c5f63 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Sat, 27 Jan 2018 20:09:16 -0500 Subject: MAINTAINERS: add turbostat utility Signed-off-by: Len Brown --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ca4afd68530c..0d00b6c5370b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14371,6 +14371,15 @@ S: Maintained F: drivers/tc/ F: include/linux/tc.h +TURBOSTAT UTILITY +M: "Len Brown" +L: linux-pm@vger.kernel.org +B: https://bugzilla.kernel.org +Q: https://patchwork.kernel.org/project/linux-pm/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git turbostat +S: Supported +F: tools/power/x86/turbostat/ + TW5864 VIDEO4LINUX DRIVER M: Bluecherry Maintainers M: Anton Sviridenko -- cgit v1.2.3 From 10a76564ae865cbf30ed30e8cbdc1a047e0559ae Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Jun 2018 23:06:41 +0200 Subject: bpf, doc: add missing patchwork url and libbpf to maintainers Add missing bits under tools/lib/bpf/ and also Q: entry in order to make it easier for people to retrieve current patch queue. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f492431b239b..2fd51db09fa4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2722,6 +2722,7 @@ L: netdev@vger.kernel.org L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git +Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 S: Supported F: arch/x86/net/bpf_jit* F: Documentation/networking/filter.txt @@ -2740,6 +2741,7 @@ F: net/sched/act_bpf.c F: net/sched/cls_bpf.c F: samples/bpf/ F: tools/bpf/ +F: tools/lib/bpf/ F: tools/testing/selftests/bpf/ BROADCOM B44 10/100 ETHERNET DRIVER -- cgit v1.2.3 From d3adce9d0334fcd5de1e9aa8749c8953ffc9b5a7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 4 Jun 2018 06:50:29 -0700 Subject: MAINTAINERS: TCP gets its first maintainer Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- MAINTAINERS | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0ae0dbf0e15e..70d61c2b1be4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9862,6 +9862,19 @@ F: net/ipv6/calipso.c F: net/netfilter/xt_CONNSECMARK.c F: net/netfilter/xt_SECMARK.c +NETWORKING [TCP] +M: Eric Dumazet +L: netdev@vger.kernel.org +S: Maintained +F: net/ipv4/tcp*.c +F: net/ipv4/syncookies.c +F: net/ipv6/tcp*.c +F: net/ipv6/syncookies.c +F: include/uapi/linux/tcp.h +F: include/net/tcp.h +F: include/linux/tcp.h +F: include/trace/events/tcp.h + NETWORKING [TLS] M: Boris Pismenny M: Aviad Yehezkel -- cgit v1.2.3 From 6d1c2cf2247599c5b50df7d04e254ef0edec812c Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Mon, 4 Jun 2018 09:05:50 -0500 Subject: MAINTAINERS: Add Andreas Gruenbacher as a maintainer for gfs2 Add Andreas Gruenbacher as a maintainer for the gfs2 file system and remove Steve Whitehouse. Signed-off-by: Bob Peterson --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b60179d948bb..73745884a595 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5939,8 +5939,8 @@ S: Maintained F: scripts/get_maintainer.pl GFS2 FILE SYSTEM -M: Steven Whitehouse M: Bob Peterson +M: Andreas Gruenbacher L: cluster-devel@redhat.com W: http://sources.redhat.com/cluster/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git -- cgit v1.2.3 From 85d63445f41125dafeddda74e5b13b7eefac9407 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 10 May 2018 12:20:13 -0700 Subject: Documentation: e100: Update the Intel 10/100 driver doc Over the years, several of the links have changed or are no longer valid so update them. In addition, the default values were incorrect for a couple of parameters. Converted the text file to the reStructuredText (RST) format, since the Linux kernel documentation now uses this format for documentation. Signed-off-by: Jeff Kirsher Tested-by: Aaron Brown --- Documentation/networking/e100.rst | 177 +++++++++++++++++++++++++++++++++++ Documentation/networking/e100.txt | 183 ------------------------------------- Documentation/networking/index.rst | 1 + MAINTAINERS | 2 +- 4 files changed, 179 insertions(+), 184 deletions(-) create mode 100644 Documentation/networking/e100.rst delete mode 100644 Documentation/networking/e100.txt (limited to 'MAINTAINERS') diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst new file mode 100644 index 000000000000..d4d837027925 --- /dev/null +++ b/Documentation/networking/e100.rst @@ -0,0 +1,177 @@ +Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters +============================================================== + +June 1, 2018 + +Contents +======== + +- In This Release +- Identifying Your Adapter +- Building and Installation +- Driver Configuration Parameters +- Additional Configurations +- Known Issues +- Support + + +In This Release +=============== + +This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of +Adapters. This driver includes support for Itanium(R)2-based systems. + +For questions related to hardware requirements, refer to the documentation +supplied with your Intel PRO/100 adapter. + +The following features are now available in supported kernels: + - Native VLANs + - Channel Bonding (teaming) + - SNMP + +Channel Bonding documentation can be found in the Linux kernel source: +/Documentation/networking/bonding.txt + + +Identifying Your Adapter +======================== + +For information on how to identify your adapter, and for the latest Intel +network drivers, refer to the Intel Support website: +http://www.intel.com/support + +Driver Configuration Parameters +=============================== + +The default value for each parameter is generally the recommended setting, +unless otherwise noted. + +Rx Descriptors: Number of receive descriptors. A receive descriptor is a data + structure that describes a receive buffer and its attributes to the network + controller. The data in the descriptor is used by the controller to write + data from the controller to host memory. In the 3.x.x driver the valid range + for this parameter is 64-256. The default value is 256. This parameter can be + changed using the command:: + + ethtool -G eth? rx n + + Where n is the number of desired Rx descriptors. + +Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data + structure that describes a transmit buffer and its attributes to the network + controller. The data in the descriptor is used by the controller to read + data from the host memory to the controller. In the 3.x.x driver the valid + range for this parameter is 64-256. The default value is 128. This parameter + can be changed using the command:: + + ethtool -G eth? tx n + + Where n is the number of desired Tx descriptors. + +Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by + default. The ethtool utility can be used as follows to force speed/duplex.:: + + ethtool -s eth? autoneg off speed {10|100} duplex {full|half} + + NOTE: setting the speed/duplex to incorrect values will cause the link to + fail. + +Event Log Message Level: The driver uses the message level flag to log events + to syslog. The message level can be set at driver load time. It can also be + set using the command:: + + ethtool -s eth? msglvl n + + +Additional Configurations +========================= + + Configuring the Driver on Different Distributions + ------------------------------------------------- + + Configuring a network driver to load properly when the system is started is + distribution dependent. Typically, the configuration process involves adding + an alias line to /etc/modprobe.d/*.conf as well as editing other system + startup scripts and/or configuration files. Many popular Linux + distributions ship with tools to make these changes for you. To learn the + proper way to configure a network device for your system, refer to your + distribution documentation. If during this process you are asked for the + driver or module name, the name for the Linux Base Driver for the Intel + PRO/100 Family of Adapters is e100. + + As an example, if you install the e100 driver for two PRO/100 adapters + (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/ + + alias eth0 e100 + alias eth1 e100 + + Viewing Link Messages + --------------------- + In order to see link messages and other Intel driver information on your + console, you must set the dmesg level up to six. This can be done by + entering the following on the command line before loading the e100 driver:: + + dmesg -n 6 + + If you wish to see all messages issued by the driver, including debug + messages, set the dmesg level to eight. + + NOTE: This setting is not saved across reboots. + + + ethtool + ------- + + The driver utilizes the ethtool interface for driver configuration and + diagnostics, as well as displaying statistical information. The ethtool + version 1.6 or later is required for this functionality. + + The latest release of ethtool can be found from + https://www.kernel.org/pub/software/network/ethtool/ + + Enabling Wake on LAN* (WoL) + --------------------------- + WoL is provided through the ethtool* utility. For instructions on enabling + WoL with ethtool, refer to the ethtool man page. + + WoL will be enabled on the system during the next shut down or reboot. For + this driver version, in order to enable WoL, the e100 driver must be + loaded when shutting down or rebooting the system. + + NAPI + ---- + + NAPI (Rx polling mode) is supported in the e100 driver. + + See https://wiki.linuxfoundation.org/networking/napi for more information + on NAPI. + + Multiple Interfaces on Same Ethernet Broadcast Network + ------------------------------------------------------ + + Due to the default ARP behavior on Linux, it is not possible to have + one system on two IP networks in the same Ethernet broadcast domain + (non-partitioned switch) behave as expected. All Ethernet interfaces + will respond to IP traffic for any IP address assigned to the system. + This results in unbalanced receive traffic. + + If you have multiple interfaces in a server, either turn on ARP + filtering by + + (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter + (this only works if your kernel's version is higher than 2.4.5), or + + (2) installing the interfaces in separate broadcast domains (either + in different switches or in a switch partitioned to VLANs). + + +Support +======= +For general information, go to the Intel support website at: +http://www.intel.com/support/ + +or the Intel Wired Networking project hosted by Sourceforge at: +http://sourceforge.net/projects/e1000 +If an issue is identified with the released source code on a supported kernel +with a supported adapter, email the specific information related to the issue +to e1000-devel@lists.sf.net. diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt deleted file mode 100644 index 54810b82c01a..000000000000 --- a/Documentation/networking/e100.txt +++ /dev/null @@ -1,183 +0,0 @@ -Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters -============================================================== - -March 15, 2011 - -Contents -======== - -- In This Release -- Identifying Your Adapter -- Building and Installation -- Driver Configuration Parameters -- Additional Configurations -- Known Issues -- Support - - -In This Release -=============== - -This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of -Adapters. This driver includes support for Itanium(R)2-based systems. - -For questions related to hardware requirements, refer to the documentation -supplied with your Intel PRO/100 adapter. - -The following features are now available in supported kernels: - - Native VLANs - - Channel Bonding (teaming) - - SNMP - -Channel Bonding documentation can be found in the Linux kernel source: -/Documentation/networking/bonding.txt - - -Identifying Your Adapter -======================== - -For more information on how to identify your adapter, go to the Adapter & -Driver ID Guide at: - - http://support.intel.com/support/network/adapter/pro100/21397.htm - -For the latest Intel network drivers for Linux, refer to the following -website. In the search field, enter your adapter name or type, or use the -networking link on the left to search for your adapter: - - http://downloadfinder.intel.com/scripts-df/support_intel.asp - -Driver Configuration Parameters -=============================== - -The default value for each parameter is generally the recommended setting, -unless otherwise noted. - -Rx Descriptors: Number of receive descriptors. A receive descriptor is a data - structure that describes a receive buffer and its attributes to the network - controller. The data in the descriptor is used by the controller to write - data from the controller to host memory. In the 3.x.x driver the valid range - for this parameter is 64-256. The default value is 64. This parameter can be - changed using the command: - - ethtool -G eth? rx n, where n is the number of desired rx descriptors. - -Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data - structure that describes a transmit buffer and its attributes to the network - controller. The data in the descriptor is used by the controller to read - data from the host memory to the controller. In the 3.x.x driver the valid - range for this parameter is 64-256. The default value is 64. This parameter - can be changed using the command: - - ethtool -G eth? tx n, where n is the number of desired tx descriptors. - -Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by - default. The ethtool utility can be used as follows to force speed/duplex. - - ethtool -s eth? autoneg off speed {10|100} duplex {full|half} - - NOTE: setting the speed/duplex to incorrect values will cause the link to - fail. - -Event Log Message Level: The driver uses the message level flag to log events - to syslog. The message level can be set at driver load time. It can also be - set using the command: - - ethtool -s eth? msglvl n - - -Additional Configurations -========================= - - Configuring the Driver on Different Distributions - ------------------------------------------------- - - Configuring a network driver to load properly when the system is started is - distribution dependent. Typically, the configuration process involves adding - an alias line to /etc/modprobe.d/*.conf as well as editing other system - startup scripts and/or configuration files. Many popular Linux - distributions ship with tools to make these changes for you. To learn the - proper way to configure a network device for your system, refer to your - distribution documentation. If during this process you are asked for the - driver or module name, the name for the Linux Base Driver for the Intel - PRO/100 Family of Adapters is e100. - - As an example, if you install the e100 driver for two PRO/100 adapters - (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/ - - alias eth0 e100 - alias eth1 e100 - - Viewing Link Messages - --------------------- - In order to see link messages and other Intel driver information on your - console, you must set the dmesg level up to six. This can be done by - entering the following on the command line before loading the e100 driver: - - dmesg -n 8 - - If you wish to see all messages issued by the driver, including debug - messages, set the dmesg level to eight. - - NOTE: This setting is not saved across reboots. - - - ethtool - ------- - - The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. The ethtool - version 1.6 or later is required for this functionality. - - The latest release of ethtool can be found from - https://www.kernel.org/pub/software/network/ethtool/ - - Enabling Wake on LAN* (WoL) - --------------------------- - WoL is provided through the ethtool* utility. For instructions on enabling - WoL with ethtool, refer to the ethtool man page. - - WoL will be enabled on the system during the next shut down or reboot. For - this driver version, in order to enable WoL, the e100 driver must be - loaded when shutting down or rebooting the system. - - NAPI - ---- - - NAPI (Rx polling mode) is supported in the e100 driver. - - See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI. - - Multiple Interfaces on Same Ethernet Broadcast Network - ------------------------------------------------------ - - Due to the default ARP behavior on Linux, it is not possible to have - one system on two IP networks in the same Ethernet broadcast domain - (non-partitioned switch) behave as expected. All Ethernet interfaces - will respond to IP traffic for any IP address assigned to the system. - This results in unbalanced receive traffic. - - If you have multiple interfaces in a server, either turn on ARP - filtering by - - (1) entering: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter - (this only works if your kernel's version is higher than 2.4.5), or - - (2) installing the interfaces in separate broadcast domains (either - in different switches or in a switch partitioned to VLANs). - - -Support -======= - -For general information, go to the Intel support website at: - - http://support.intel.com - - or the Intel Wired Networking project hosted by Sourceforge at: - - http://sourceforge.net/projects/e1000 - -If an issue is identified with the released source code on the supported -kernel with a supported adapter, email the specific information related to the -issue to e1000-devel@lists.sourceforge.net. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index cbd9bdd4a79e..d11a62977edd 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -10,6 +10,7 @@ Contents: batman-adv can dpaa2/index + e100 kapi z8530book msg_zerocopy diff --git a/MAINTAINERS b/MAINTAINERS index 0ae0dbf0e15e..d68981ca9896 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7089,7 +7089,7 @@ Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git S: Supported -F: Documentation/networking/e100.txt +F: Documentation/networking/e100.rst F: Documentation/networking/e1000.txt F: Documentation/networking/e1000e.txt F: Documentation/networking/igb.txt -- cgit v1.2.3 From 228046e76189ce542f15321b760e380551468017 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 10 May 2018 12:55:38 -0700 Subject: Documentation: e1000: Update kernel documentation Updated the e1000.txt kernel documentation with the latest information. Also convert the text file to reStructuredText (RST) format, since the Linux kernel documentation now uses this format for documentation. Signed-off-by: Jeff Kirsher Tested-by: Aaron Brown --- Documentation/networking/e1000.rst | 422 +++++++++++++++++++++++++++++++++ Documentation/networking/e1000.txt | 461 ------------------------------------- Documentation/networking/index.rst | 1 + MAINTAINERS | 2 +- 4 files changed, 424 insertions(+), 462 deletions(-) create mode 100644 Documentation/networking/e1000.rst delete mode 100644 Documentation/networking/e1000.txt (limited to 'MAINTAINERS') diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst new file mode 100644 index 000000000000..616848940e63 --- /dev/null +++ b/Documentation/networking/e1000.rst @@ -0,0 +1,422 @@ +Linux* Base Driver for Intel(R) Ethernet Network Connection +=========================================================== + +Intel Gigabit Linux driver. +Copyright(c) 1999 - 2013 Intel Corporation. + +Contents +======== + +- Identifying Your Adapter +- Command Line Parameters +- Speed and Duplex Configuration +- Additional Configurations +- Support + +Identifying Your Adapter +======================== + +For more information on how to identify your adapter, go to the Adapter & +Driver ID Guide at: + + http://support.intel.com/support/go/network/adapter/idguide.htm + +For the latest Intel network drivers for Linux, refer to the following +website. In the search field, enter your adapter name or type, or use the +networking link on the left to search for your adapter: + + http://support.intel.com/support/go/network/adapter/home.htm + +Command Line Parameters +======================= + +The default value for each parameter is generally the recommended setting, +unless otherwise noted. + +NOTES: For more information about the AutoNeg, Duplex, and Speed + parameters, see the "Speed and Duplex Configuration" section in + this document. + + For more information about the InterruptThrottleRate, + RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay + parameters, see the application note at: + http://www.intel.com/design/network/applnots/ap450.htm + +AutoNeg +------- +(Supported only on adapters with copper connections) +Valid Range: 0x01-0x0F, 0x20-0x2F +Default Value: 0x2F + +This parameter is a bit-mask that specifies the speed and duplex settings +advertised by the adapter. When this parameter is used, the Speed and +Duplex parameters must not be specified. + +NOTE: Refer to the Speed and Duplex section of this readme for more + information on the AutoNeg parameter. + +Duplex +------ +(Supported only on adapters with copper connections) +Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) +Default Value: 0 + +This defines the direction in which data is allowed to flow. Can be +either one or two-directional. If both Duplex and the link partner are +set to auto-negotiate, the board auto-detects the correct duplex. If the +link partner is forced (either full or half), Duplex defaults to half- +duplex. + +FlowControl +----------- +Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) +Default Value: Reads flow control settings from the EEPROM + +This parameter controls the automatic generation(Tx) and response(Rx) +to Ethernet PAUSE frames. + +InterruptThrottleRate +--------------------- +(not supported on Intel(R) 82542, 82543 or 82544-based adapters) +Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, + 4=simplified balancing) +Default Value: 3 + +The driver can limit the amount of interrupts per second that the adapter +will generate for incoming packets. It does this by writing a value to the +adapter that is based on the maximum amount of interrupts that the adapter +will generate per second. + +Setting InterruptThrottleRate to a value greater or equal to 100 +will program the adapter to send out a maximum of that many interrupts +per second, even if more packets have come in. This reduces interrupt +load on the system and can lower CPU utilization under heavy load, +but will increase latency as packets are not processed as quickly. + +The default behaviour of the driver previously assumed a static +InterruptThrottleRate value of 8000, providing a good fallback value for +all traffic types,but lacking in small packet performance and latency. +The hardware can handle many more small packets per second however, and +for this reason an adaptive interrupt moderation algorithm was implemented. + +Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which +it dynamically adjusts the InterruptThrottleRate value based on the traffic +that it receives. After determining the type of incoming traffic in the last +timeframe, it will adjust the InterruptThrottleRate to an appropriate value +for that traffic. + +The algorithm classifies the incoming traffic every interval into +classes. Once the class is determined, the InterruptThrottleRate value is +adjusted to suit that traffic type the best. There are three classes defined: +"Bulk traffic", for large amounts of packets of normal size; "Low latency", +for small amounts of traffic and/or a significant percentage of small +packets; and "Lowest latency", for almost completely small packets or +minimal traffic. + +In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 +for traffic that falls in class "Bulk traffic". If traffic falls in the "Low +latency" or "Lowest latency" class, the InterruptThrottleRate is increased +stepwise to 20000. This default mode is suitable for most applications. + +For situations where low latency is vital such as cluster or +grid computing, the algorithm can reduce latency even more when +InterruptThrottleRate is set to mode 1. In this mode, which operates +the same as mode 3, the InterruptThrottleRate will be increased stepwise to +70000 for traffic in class "Lowest latency". + +In simplified mode the interrupt rate is based on the ratio of TX and +RX traffic. If the bytes per second rate is approximately equal, the +interrupt rate will drop as low as 2000 interrupts per second. If the +traffic is mostly transmit or mostly receive, the interrupt rate could +be as high as 8000. + +Setting InterruptThrottleRate to 0 turns off any interrupt moderation +and may improve small packet latency, but is generally not suitable +for bulk throughput traffic. + +NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and + RxAbsIntDelay parameters. In other words, minimizing the receive + and/or transmit absolute delays does not force the controller to + generate more interrupts than what the Interrupt Throttle Rate + allows. + +CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection + (controller 82547), setting InterruptThrottleRate to a value + greater than 75,000, may hang (stop transmitting) adapters + under certain network conditions. If this occurs a NETDEV + WATCHDOG message is logged in the system event log. In + addition, the controller is automatically reset, restoring + the network connection. To eliminate the potential for the + hang, ensure that InterruptThrottleRate is set no greater + than 75,000 and is not set to 0. + +NOTE: When e1000 is loaded with default settings and multiple adapters + are in use simultaneously, the CPU utilization may increase non- + linearly. In order to limit the CPU utilization without impacting + the overall throughput, we recommend that you load the driver as + follows:: + + modprobe e1000 InterruptThrottleRate=3000,3000,3000 + + This sets the InterruptThrottleRate to 3000 interrupts/sec for + the first, second, and third instances of the driver. The range + of 2000 to 3000 interrupts per second works on a majority of + systems and is a good starting point, but the optimal value will + be platform-specific. If CPU utilization is not a concern, use + RX_POLLING (NAPI) and default driver settings. + +RxDescriptors +------------- +Valid Range: 48-256 for 82542 and 82543-based adapters + 48-4096 for all other supported adapters +Default Value: 256 + +This value specifies the number of receive buffer descriptors allocated +by the driver. Increasing this value allows the driver to buffer more +incoming packets, at the expense of increased system memory utilization. + +Each descriptor is 16 bytes. A receive buffer is also allocated for each +descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending +on the MTU setting. The maximum MTU size is 16110. + +NOTE: MTU designates the frame size. It only needs to be set for Jumbo + Frames. Depending on the available system resources, the request + for a higher number of receive descriptors may be denied. In this + case, use a lower number. + +RxIntDelay +---------- +Valid Range: 0-65535 (0=off) +Default Value: 0 + +This value delays the generation of receive interrupts in units of 1.024 +microseconds. Receive interrupt reduction can improve CPU efficiency if +properly tuned for specific network traffic. Increasing this value adds +extra latency to frame reception and can end up decreasing the throughput +of TCP traffic. If the system is reporting dropped receives, this value +may be set too high, causing the driver to run out of available receive +descriptors. + +CAUTION: When setting RxIntDelay to a value other than 0, adapters may + hang (stop transmitting) under certain network conditions. If + this occurs a NETDEV WATCHDOG message is logged in the system + event log. In addition, the controller is automatically reset, + restoring the network connection. To eliminate the potential + for the hang ensure that RxIntDelay is set to 0. + +RxAbsIntDelay +------------- +(This parameter is supported only on 82540, 82545 and later adapters.) +Valid Range: 0-65535 (0=off) +Default Value: 128 + +This value, in units of 1.024 microseconds, limits the delay in which a +receive interrupt is generated. Useful only if RxIntDelay is non-zero, +this value ensures that an interrupt is generated after the initial +packet is received within the set amount of time. Proper tuning, +along with RxIntDelay, may improve traffic throughput in specific network +conditions. + +Speed +----- +(This parameter is supported only on adapters with copper connections.) +Valid Settings: 0, 10, 100, 1000 +Default Value: 0 (auto-negotiate at all supported speeds) + +Speed forces the line speed to the specified value in megabits per second +(Mbps). If this parameter is not specified or is set to 0 and the link +partner is set to auto-negotiate, the board will auto-detect the correct +speed. Duplex should also be set when Speed is set to either 10 or 100. + +TxDescriptors +------------- +Valid Range: 48-256 for 82542 and 82543-based adapters + 48-4096 for all other supported adapters +Default Value: 256 + +This value is the number of transmit descriptors allocated by the driver. +Increasing this value allows the driver to queue more transmits. Each +descriptor is 16 bytes. + +NOTE: Depending on the available system resources, the request for a + higher number of transmit descriptors may be denied. In this case, + use a lower number. + +TxIntDelay +---------- +Valid Range: 0-65535 (0=off) +Default Value: 8 + +This value delays the generation of transmit interrupts in units of +1.024 microseconds. Transmit interrupt reduction can improve CPU +efficiency if properly tuned for specific network traffic. If the +system is reporting dropped transmits, this value may be set too high +causing the driver to run out of available transmit descriptors. + +TxAbsIntDelay +------------- +(This parameter is supported only on 82540, 82545 and later adapters.) +Valid Range: 0-65535 (0=off) +Default Value: 32 + +This value, in units of 1.024 microseconds, limits the delay in which a +transmit interrupt is generated. Useful only if TxIntDelay is non-zero, +this value ensures that an interrupt is generated after the initial +packet is sent on the wire within the set amount of time. Proper tuning, +along with TxIntDelay, may improve traffic throughput in specific +network conditions. + +XsumRX +------ +(This parameter is NOT supported on the 82542-based adapter.) +Valid Range: 0-1 +Default Value: 1 + +A value of '1' indicates that the driver should enable IP checksum +offload for received packets (both UDP and TCP) to the adapter hardware. + +Copybreak +--------- +Valid Range: 0-xxxxxxx (0=off) +Default Value: 256 +Usage: modprobe e1000.ko copybreak=128 + +Driver copies all packets below or equaling this size to a fresh RX +buffer before handing it up the stack. + +This parameter is different than other parameters, in that it is a +single (not 1,1,1 etc.) parameter applied to all driver instances and +it is also available during runtime at +/sys/module/e1000/parameters/copybreak + +SmartPowerDownEnable +-------------------- +Valid Range: 0-1 +Default Value: 0 (disabled) + +Allows PHY to turn off in lower power states. The user can turn off +this parameter in supported chipsets. + +Speed and Duplex Configuration +============================== + +Three keywords are used to control the speed and duplex configuration. +These keywords are Speed, Duplex, and AutoNeg. + +If the board uses a fiber interface, these keywords are ignored, and the +fiber interface board only links at 1000 Mbps full-duplex. + +For copper-based boards, the keywords interact as follows: + + The default operation is auto-negotiate. The board advertises all + supported speed and duplex combinations, and it links at the highest + common speed and duplex mode IF the link partner is set to auto-negotiate. + + If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps + is advertised (The 1000BaseT spec requires auto-negotiation.) + + If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- + negotiation is disabled, and the AutoNeg parameter is ignored. Partner + SHOULD also be forced. + +The AutoNeg parameter is used when more control is required over the +auto-negotiation process. It should be used when you wish to control which +speed and duplex combinations are advertised during the auto-negotiation +process. + +The parameter may be specified as either a decimal or hexadecimal value as +determined by the bitmap below. + +Bit position 7 6 5 4 3 2 1 0 +Decimal Value 128 64 32 16 8 4 2 1 +Hex value 80 40 20 10 8 4 2 1 +Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 +Duplex Full Full Half Full Half + +Some examples of using AutoNeg: + + modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half) + modprobe e1000 AutoNeg=1 (Same as above) + modprobe e1000 AutoNeg=0x02 (Restricts autonegotiation to 10 Full) + modprobe e1000 AutoNeg=0x03 (Restricts autonegotiation to 10 Half or 10 Full) + modprobe e1000 AutoNeg=0x04 (Restricts autonegotiation to 100 Half) + modprobe e1000 AutoNeg=0x05 (Restricts autonegotiation to 10 Half or 100 + Half) + modprobe e1000 AutoNeg=0x020 (Restricts autonegotiation to 1000 Full) + modprobe e1000 AutoNeg=32 (Same as above) + +Note that when this parameter is used, Speed and Duplex must not be specified. + +If the link partner is forced to a specific speed and duplex, then this +parameter should not be used. Instead, use the Speed and Duplex parameters +previously mentioned to force the adapter to the same speed and duplex. + +Additional Configurations +========================= + + Jumbo Frames + ------------ + Jumbo Frames support is enabled by changing the MTU to a value larger than + the default of 1500. Use the ifconfig command to increase the MTU size. + For example:: + + ifconfig eth mtu 9000 up + + This setting is not saved across reboots. It can be made permanent if + you add:: + + MTU=9000 + + to the file /etc/sysconfig/network-scripts/ifcfg-eth. This example + applies to the Red Hat distributions; other distributions may store this + setting in a different location. + + Notes: + Degradation in throughput performance may be observed in some Jumbo frames + environments. If this is observed, increasing the application's socket buffer + size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help. + See the specific application manual and /usr/src/linux*/Documentation/ + networking/ip-sysctl.txt for more details. + + - The maximum MTU setting for Jumbo Frames is 16110. This value coincides + with the maximum Jumbo Frames size of 16128. + + - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in + poor performance or loss of link. + + - Adapters based on the Intel(R) 82542 and 82573V/E controller do not + support Jumbo Frames. These correspond to the following product names: + Intel(R) PRO/1000 Gigabit Server Adapter + Intel(R) PRO/1000 PM Network Connection + + ethtool + ------- + The driver utilizes the ethtool interface for driver configuration and + diagnostics, as well as displaying statistical information. The ethtool + version 1.6 or later is required for this functionality. + + The latest release of ethtool can be found from + https://www.kernel.org/pub/software/network/ethtool/ + + Enabling Wake on LAN* (WoL) + --------------------------- + WoL is configured through the ethtool* utility. + + WoL will be enabled on the system during the next shut down or reboot. + For this driver version, in order to enable WoL, the e1000 driver must be + loaded when shutting down or rebooting the system. + +Support +======= + +For general information, go to the Intel support website at: + + http://support.intel.com + +or the Intel Wired Networking project hosted by Sourceforge at: + + http://sourceforge.net/projects/e1000 + +If an issue is identified with the released source code on the supported +kernel with a supported adapter, email the specific information related +to the issue to e1000-devel@lists.sf.net diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt deleted file mode 100644 index 1f6ed848363d..000000000000 --- a/Documentation/networking/e1000.txt +++ /dev/null @@ -1,461 +0,0 @@ -Linux* Base Driver for Intel(R) Ethernet Network Connection -=========================================================== - -Intel Gigabit Linux driver. -Copyright(c) 1999 - 2013 Intel Corporation. - -Contents -======== - -- Identifying Your Adapter -- Command Line Parameters -- Speed and Duplex Configuration -- Additional Configurations -- Support - -Identifying Your Adapter -======================== - -For more information on how to identify your adapter, go to the Adapter & -Driver ID Guide at: - - http://support.intel.com/support/go/network/adapter/idguide.htm - -For the latest Intel network drivers for Linux, refer to the following -website. In the search field, enter your adapter name or type, or use the -networking link on the left to search for your adapter: - - http://support.intel.com/support/go/network/adapter/home.htm - -Command Line Parameters -======================= - -The default value for each parameter is generally the recommended setting, -unless otherwise noted. - -NOTES: For more information about the AutoNeg, Duplex, and Speed - parameters, see the "Speed and Duplex Configuration" section in - this document. - - For more information about the InterruptThrottleRate, - RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay - parameters, see the application note at: - http://www.intel.com/design/network/applnots/ap450.htm - -AutoNeg -------- -(Supported only on adapters with copper connections) -Valid Range: 0x01-0x0F, 0x20-0x2F -Default Value: 0x2F - -This parameter is a bit-mask that specifies the speed and duplex settings -advertised by the adapter. When this parameter is used, the Speed and -Duplex parameters must not be specified. - -NOTE: Refer to the Speed and Duplex section of this readme for more - information on the AutoNeg parameter. - -Duplex ------- -(Supported only on adapters with copper connections) -Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) -Default Value: 0 - -This defines the direction in which data is allowed to flow. Can be -either one or two-directional. If both Duplex and the link partner are -set to auto-negotiate, the board auto-detects the correct duplex. If the -link partner is forced (either full or half), Duplex defaults to half- -duplex. - -FlowControl ------------ -Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) -Default Value: Reads flow control settings from the EEPROM - -This parameter controls the automatic generation(Tx) and response(Rx) -to Ethernet PAUSE frames. - -InterruptThrottleRate ---------------------- -(not supported on Intel(R) 82542, 82543 or 82544-based adapters) -Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, - 4=simplified balancing) -Default Value: 3 - -The driver can limit the amount of interrupts per second that the adapter -will generate for incoming packets. It does this by writing a value to the -adapter that is based on the maximum amount of interrupts that the adapter -will generate per second. - -Setting InterruptThrottleRate to a value greater or equal to 100 -will program the adapter to send out a maximum of that many interrupts -per second, even if more packets have come in. This reduces interrupt -load on the system and can lower CPU utilization under heavy load, -but will increase latency as packets are not processed as quickly. - -The default behaviour of the driver previously assumed a static -InterruptThrottleRate value of 8000, providing a good fallback value for -all traffic types,but lacking in small packet performance and latency. -The hardware can handle many more small packets per second however, and -for this reason an adaptive interrupt moderation algorithm was implemented. - -Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which -it dynamically adjusts the InterruptThrottleRate value based on the traffic -that it receives. After determining the type of incoming traffic in the last -timeframe, it will adjust the InterruptThrottleRate to an appropriate value -for that traffic. - -The algorithm classifies the incoming traffic every interval into -classes. Once the class is determined, the InterruptThrottleRate value is -adjusted to suit that traffic type the best. There are three classes defined: -"Bulk traffic", for large amounts of packets of normal size; "Low latency", -for small amounts of traffic and/or a significant percentage of small -packets; and "Lowest latency", for almost completely small packets or -minimal traffic. - -In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 -for traffic that falls in class "Bulk traffic". If traffic falls in the "Low -latency" or "Lowest latency" class, the InterruptThrottleRate is increased -stepwise to 20000. This default mode is suitable for most applications. - -For situations where low latency is vital such as cluster or -grid computing, the algorithm can reduce latency even more when -InterruptThrottleRate is set to mode 1. In this mode, which operates -the same as mode 3, the InterruptThrottleRate will be increased stepwise to -70000 for traffic in class "Lowest latency". - -In simplified mode the interrupt rate is based on the ratio of TX and -RX traffic. If the bytes per second rate is approximately equal, the -interrupt rate will drop as low as 2000 interrupts per second. If the -traffic is mostly transmit or mostly receive, the interrupt rate could -be as high as 8000. - -Setting InterruptThrottleRate to 0 turns off any interrupt moderation -and may improve small packet latency, but is generally not suitable -for bulk throughput traffic. - -NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and - RxAbsIntDelay parameters. In other words, minimizing the receive - and/or transmit absolute delays does not force the controller to - generate more interrupts than what the Interrupt Throttle Rate - allows. - -CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection - (controller 82547), setting InterruptThrottleRate to a value - greater than 75,000, may hang (stop transmitting) adapters - under certain network conditions. If this occurs a NETDEV - WATCHDOG message is logged in the system event log. In - addition, the controller is automatically reset, restoring - the network connection. To eliminate the potential for the - hang, ensure that InterruptThrottleRate is set no greater - than 75,000 and is not set to 0. - -NOTE: When e1000 is loaded with default settings and multiple adapters - are in use simultaneously, the CPU utilization may increase non- - linearly. In order to limit the CPU utilization without impacting - the overall throughput, we recommend that you load the driver as - follows: - - modprobe e1000 InterruptThrottleRate=3000,3000,3000 - - This sets the InterruptThrottleRate to 3000 interrupts/sec for - the first, second, and third instances of the driver. The range - of 2000 to 3000 interrupts per second works on a majority of - systems and is a good starting point, but the optimal value will - be platform-specific. If CPU utilization is not a concern, use - RX_POLLING (NAPI) and default driver settings. - -RxDescriptors -------------- -Valid Range: 80-256 for 82542 and 82543-based adapters - 80-4096 for all other supported adapters -Default Value: 256 - -This value specifies the number of receive buffer descriptors allocated -by the driver. Increasing this value allows the driver to buffer more -incoming packets, at the expense of increased system memory utilization. - -Each descriptor is 16 bytes. A receive buffer is also allocated for each -descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending -on the MTU setting. The maximum MTU size is 16110. - -NOTE: MTU designates the frame size. It only needs to be set for Jumbo - Frames. Depending on the available system resources, the request - for a higher number of receive descriptors may be denied. In this - case, use a lower number. - -RxIntDelay ----------- -Valid Range: 0-65535 (0=off) -Default Value: 0 - -This value delays the generation of receive interrupts in units of 1.024 -microseconds. Receive interrupt reduction can improve CPU efficiency if -properly tuned for specific network traffic. Increasing this value adds -extra latency to frame reception and can end up decreasing the throughput -of TCP traffic. If the system is reporting dropped receives, this value -may be set too high, causing the driver to run out of available receive -descriptors. - -CAUTION: When setting RxIntDelay to a value other than 0, adapters may - hang (stop transmitting) under certain network conditions. If - this occurs a NETDEV WATCHDOG message is logged in the system - event log. In addition, the controller is automatically reset, - restoring the network connection. To eliminate the potential - for the hang ensure that RxIntDelay is set to 0. - -RxAbsIntDelay -------------- -(This parameter is supported only on 82540, 82545 and later adapters.) -Valid Range: 0-65535 (0=off) -Default Value: 128 - -This value, in units of 1.024 microseconds, limits the delay in which a -receive interrupt is generated. Useful only if RxIntDelay is non-zero, -this value ensures that an interrupt is generated after the initial -packet is received within the set amount of time. Proper tuning, -along with RxIntDelay, may improve traffic throughput in specific network -conditions. - -Speed ------ -(This parameter is supported only on adapters with copper connections.) -Valid Settings: 0, 10, 100, 1000 -Default Value: 0 (auto-negotiate at all supported speeds) - -Speed forces the line speed to the specified value in megabits per second -(Mbps). If this parameter is not specified or is set to 0 and the link -partner is set to auto-negotiate, the board will auto-detect the correct -speed. Duplex should also be set when Speed is set to either 10 or 100. - -TxDescriptors -------------- -Valid Range: 80-256 for 82542 and 82543-based adapters - 80-4096 for all other supported adapters -Default Value: 256 - -This value is the number of transmit descriptors allocated by the driver. -Increasing this value allows the driver to queue more transmits. Each -descriptor is 16 bytes. - -NOTE: Depending on the available system resources, the request for a - higher number of transmit descriptors may be denied. In this case, - use a lower number. - -TxDescriptorStep ----------------- -Valid Range: 1 (use every Tx Descriptor) - 4 (use every 4th Tx Descriptor) - -Default Value: 1 (use every Tx Descriptor) - -On certain non-Intel architectures, it has been observed that intense TX -traffic bursts of short packets may result in an improper descriptor -writeback. If this occurs, the driver will report a "TX Timeout" and reset -the adapter, after which the transmit flow will restart, though data may -have stalled for as much as 10 seconds before it resumes. - -The improper writeback does not occur on the first descriptor in a system -memory cache-line, which is typically 32 bytes, or 4 descriptors long. - -Setting TxDescriptorStep to a value of 4 will ensure that all TX descriptors -are aligned to the start of a system memory cache line, and so this problem -will not occur. - -NOTES: Setting TxDescriptorStep to 4 effectively reduces the number of - TxDescriptors available for transmits to 1/4 of the normal allocation. - This has a possible negative performance impact, which may be - compensated for by allocating more descriptors using the TxDescriptors - module parameter. - - There are other conditions which may result in "TX Timeout", which will - not be resolved by the use of the TxDescriptorStep parameter. As the - issue addressed by this parameter has never been observed on Intel - Architecture platforms, it should not be used on Intel platforms. - -TxIntDelay ----------- -Valid Range: 0-65535 (0=off) -Default Value: 64 - -This value delays the generation of transmit interrupts in units of -1.024 microseconds. Transmit interrupt reduction can improve CPU -efficiency if properly tuned for specific network traffic. If the -system is reporting dropped transmits, this value may be set too high -causing the driver to run out of available transmit descriptors. - -TxAbsIntDelay -------------- -(This parameter is supported only on 82540, 82545 and later adapters.) -Valid Range: 0-65535 (0=off) -Default Value: 64 - -This value, in units of 1.024 microseconds, limits the delay in which a -transmit interrupt is generated. Useful only if TxIntDelay is non-zero, -this value ensures that an interrupt is generated after the initial -packet is sent on the wire within the set amount of time. Proper tuning, -along with TxIntDelay, may improve traffic throughput in specific -network conditions. - -XsumRX ------- -(This parameter is NOT supported on the 82542-based adapter.) -Valid Range: 0-1 -Default Value: 1 - -A value of '1' indicates that the driver should enable IP checksum -offload for received packets (both UDP and TCP) to the adapter hardware. - -Copybreak ---------- -Valid Range: 0-xxxxxxx (0=off) -Default Value: 256 -Usage: insmod e1000.ko copybreak=128 - -Driver copies all packets below or equaling this size to a fresh RX -buffer before handing it up the stack. - -This parameter is different than other parameters, in that it is a -single (not 1,1,1 etc.) parameter applied to all driver instances and -it is also available during runtime at -/sys/module/e1000/parameters/copybreak - -SmartPowerDownEnable --------------------- -Valid Range: 0-1 -Default Value: 0 (disabled) - -Allows PHY to turn off in lower power states. The user can turn off -this parameter in supported chipsets. - -KumeranLockLoss ---------------- -Valid Range: 0-1 -Default Value: 1 (enabled) - -This workaround skips resetting the PHY at shutdown for the initial -silicon releases of ICH8 systems. - -Speed and Duplex Configuration -============================== - -Three keywords are used to control the speed and duplex configuration. -These keywords are Speed, Duplex, and AutoNeg. - -If the board uses a fiber interface, these keywords are ignored, and the -fiber interface board only links at 1000 Mbps full-duplex. - -For copper-based boards, the keywords interact as follows: - - The default operation is auto-negotiate. The board advertises all - supported speed and duplex combinations, and it links at the highest - common speed and duplex mode IF the link partner is set to auto-negotiate. - - If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps - is advertised (The 1000BaseT spec requires auto-negotiation.) - - If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- - negotiation is disabled, and the AutoNeg parameter is ignored. Partner - SHOULD also be forced. - -The AutoNeg parameter is used when more control is required over the -auto-negotiation process. It should be used when you wish to control which -speed and duplex combinations are advertised during the auto-negotiation -process. - -The parameter may be specified as either a decimal or hexadecimal value as -determined by the bitmap below. - -Bit position 7 6 5 4 3 2 1 0 -Decimal Value 128 64 32 16 8 4 2 1 -Hex value 80 40 20 10 8 4 2 1 -Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 -Duplex Full Full Half Full Half - -Some examples of using AutoNeg: - - modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half) - modprobe e1000 AutoNeg=1 (Same as above) - modprobe e1000 AutoNeg=0x02 (Restricts autonegotiation to 10 Full) - modprobe e1000 AutoNeg=0x03 (Restricts autonegotiation to 10 Half or 10 Full) - modprobe e1000 AutoNeg=0x04 (Restricts autonegotiation to 100 Half) - modprobe e1000 AutoNeg=0x05 (Restricts autonegotiation to 10 Half or 100 - Half) - modprobe e1000 AutoNeg=0x020 (Restricts autonegotiation to 1000 Full) - modprobe e1000 AutoNeg=32 (Same as above) - -Note that when this parameter is used, Speed and Duplex must not be specified. - -If the link partner is forced to a specific speed and duplex, then this -parameter should not be used. Instead, use the Speed and Duplex parameters -previously mentioned to force the adapter to the same speed and duplex. - -Additional Configurations -========================= - - Jumbo Frames - ------------ - Jumbo Frames support is enabled by changing the MTU to a value larger than - the default of 1500. Use the ifconfig command to increase the MTU size. - For example: - - ifconfig eth mtu 9000 up - - This setting is not saved across reboots. It can be made permanent if - you add: - - MTU=9000 - - to the file /etc/sysconfig/network-scripts/ifcfg-eth. This example - applies to the Red Hat distributions; other distributions may store this - setting in a different location. - - Notes: - Degradation in throughput performance may be observed in some Jumbo frames - environments. If this is observed, increasing the application's socket buffer - size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help. - See the specific application manual and /usr/src/linux*/Documentation/ - networking/ip-sysctl.txt for more details. - - - The maximum MTU setting for Jumbo Frames is 16110. This value coincides - with the maximum Jumbo Frames size of 16128. - - - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in - poor performance or loss of link. - - - Adapters based on the Intel(R) 82542 and 82573V/E controller do not - support Jumbo Frames. These correspond to the following product names: - Intel(R) PRO/1000 Gigabit Server Adapter - Intel(R) PRO/1000 PM Network Connection - - ethtool - ------- - The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. The ethtool - version 1.6 or later is required for this functionality. - - The latest release of ethtool can be found from - https://www.kernel.org/pub/software/network/ethtool/ - - Enabling Wake on LAN* (WoL) - --------------------------- - WoL is configured through the ethtool* utility. - - WoL will be enabled on the system during the next shut down or reboot. - For this driver version, in order to enable WoL, the e1000 driver must be - loaded when shutting down or rebooting the system. - -Support -======= - -For general information, go to the Intel support website at: - - http://support.intel.com - -or the Intel Wired Networking project hosted by Sourceforge at: - - http://sourceforge.net/projects/e1000 - -If an issue is identified with the released source code on the supported -kernel with a supported adapter, email the specific information related -to the issue to e1000-devel@lists.sf.net diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index d11a62977edd..fec8588a588e 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -11,6 +11,7 @@ Contents: can dpaa2/index e100 + e1000 kapi z8530book msg_zerocopy diff --git a/MAINTAINERS b/MAINTAINERS index d68981ca9896..32472fbf4d6e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7090,7 +7090,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git S: Supported F: Documentation/networking/e100.rst -F: Documentation/networking/e1000.txt +F: Documentation/networking/e1000.rst F: Documentation/networking/e1000e.txt F: Documentation/networking/igb.txt F: Documentation/networking/igbvf.txt -- cgit v1.2.3 From 3ed45d7ffa565876e627c4716a8b8b4986a471b1 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Fri, 20 Apr 2018 12:37:14 -0700 Subject: MAINTAINERS: Add myself as a maintainer for SiFive's drivers There aren't actually any files in the tree that match these patterns right now, but we've just started submitting our drivers so I thought it would be good to make sure there's at least someone at SiFive who's listed as maintaining them. I'm leaving the RISC-V lists on here because: * As of today, all the RISC-V ASICs that people can actually buy are from SiFive -- though hopefully there'll be more soon! * The RTL for many of our devices is open source, so I anticipate these devices might make they way chips from other vendors. * We may standardize some of these devices as part of a RISC-V specification at some point in the future. I'm a bit swamped right now so I might not be the most active maintainer of these drivers, but I think it'd be good to make sure someone who has hardware access gets CC'd on updates to our drivers just as a sanity check. Hopefully that's an OK way to handle this. Signed-off-by: Palmer Dabbelt --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9c125f705f78..a8f906d1d1e2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12764,6 +12764,14 @@ F: drivers/media/usb/siano/ F: drivers/media/usb/siano/ F: drivers/media/mmc/siano/ +SIFIVE DRIVERS +M: Palmer Dabbelt +L: linux-riscv@lists.infradead.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git +S: Supported +K: sifive +N: sifive + SILEAD TOUCHSCREEN DRIVER M: Hans de Goede L: linux-input@vger.kernel.org -- cgit v1.2.3 From 9c5217640e381294c76f5256933a113386971b7c Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Fri, 27 Apr 2018 18:15:07 -0700 Subject: MAINTAINERS: Update Albert's email, he's back at Berkeley When I was adding a MAINTAINERS entry for SiFive's drivers I realized that Albert's email is out of date -- he's gone back to Berkeley, so his SiFive email is technically defunct. This patch updates his entry to a current email address, hosted at Berkeley. Signed-off-by: Palmer Dabbelt --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a8f906d1d1e2..c2d32726264f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12004,7 +12004,7 @@ F: drivers/mtd/nand/raw/r852.h RISC-V ARCHITECTURE M: Palmer Dabbelt -M: Albert Ou +M: Albert Ou L: linux-riscv@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git S: Supported -- cgit v1.2.3 From 2562c011f89785dea5b7e12449c587527dba3d2f Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 4 Jun 2018 13:48:32 -0500 Subject: MAINTAINERS: add Josh Poimboeuf as faddr2line maintainer ... so I finally get credit for my greatest accomplishment. And, less importantly, so get_maintainer.pl will actually CC me on future patches. Signed-off-by: Josh Poimboeuf Signed-off-by: Linus Torvalds --- MAINTAINERS | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9c125f705f78..e97062c62d70 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5413,6 +5413,11 @@ S: Maintained F: Documentation/hwmon/f71805f F: drivers/hwmon/f71805f.c +FADDR2LINE +M: Josh Poimboeuf +S: Maintained +F: scripts/faddr2line + FANOTIFY M: Jan Kara R: Amir Goldstein -- cgit v1.2.3 From be65f9ed267fd7d8b3146b7c4be9ecdd3e0aa3ed Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 1 Jun 2018 10:59:48 +0200 Subject: staging: lustre: delete the filesystem from the tree. The Lustre filesystem has been in the kernel tree for over 5 years now. While it has been an endless source of enjoyment for new kernel developers learning how to do basic codingstyle cleanups, as well as an semi-entertaining source of bewilderment from the vfs developers any time they have looked into the codebase to try to figure out how to port their latest api changes to this filesystem, it has not really moved forward into the "this is in shape to get out of staging" despite many half-completed attempts. And getting code out of staging is the main goal of that portion of the kernel tree. Code should not stagnate and it feels like having this code in staging is only causing the development cycle of the filesystem to take longer than it should. There is a whole separate out-of-tree copy of this codebase where the developers work on it, and then random changes are thrown over the wall at staging at some later point in time. This dual-tree development model has never worked, and the state of this codebase is proof of that. So, let's just delete the whole mess. Now the lustre developers can go off and work in their out-of-tree codebase and not have to worry about providing valid changelog entries and breaking their patches up into logical pieces. They can take the time they have spend doing those types of housekeeping chores and get the codebase into a much better shape, and it can be submitted for inclusion into the real part of the kernel tree when ready. Cc: Oleg Drokin Cc: Andreas Dilger Cc: James Simmons Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 9 - drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - drivers/staging/lustre/Kconfig | 3 - drivers/staging/lustre/Makefile | 2 - drivers/staging/lustre/README.txt | 83 - drivers/staging/lustre/TODO | 302 -- .../staging/lustre/include/linux/libcfs/libcfs.h | 76 - .../lustre/include/linux/libcfs/libcfs_cpu.h | 434 -- .../lustre/include/linux/libcfs/libcfs_crypto.h | 208 - .../lustre/include/linux/libcfs/libcfs_debug.h | 207 - .../lustre/include/linux/libcfs/libcfs_fail.h | 194 - .../lustre/include/linux/libcfs/libcfs_hash.h | 869 ---- .../lustre/include/linux/libcfs/libcfs_private.h | 200 - .../lustre/include/linux/libcfs/libcfs_string.h | 102 - drivers/staging/lustre/include/linux/lnet/api.h | 212 - .../staging/lustre/include/linux/lnet/lib-lnet.h | 652 --- .../staging/lustre/include/linux/lnet/lib-types.h | 666 ---- .../staging/lustre/include/linux/lnet/socklnd.h | 87 - .../lustre/include/uapi/linux/lnet/libcfs_debug.h | 149 - .../lustre/include/uapi/linux/lnet/libcfs_ioctl.h | 141 - .../lustre/include/uapi/linux/lnet/lnet-dlc.h | 150 - .../lustre/include/uapi/linux/lnet/lnet-types.h | 669 ---- .../lustre/include/uapi/linux/lnet/lnetctl.h | 123 - .../lustre/include/uapi/linux/lnet/lnetst.h | 556 --- .../lustre/include/uapi/linux/lnet/nidstr.h | 119 - .../lustre/include/uapi/linux/lnet/socklnd.h | 44 - .../lustre/include/uapi/linux/lustre/lustre_cfg.h | 261 -- .../lustre/include/uapi/linux/lustre/lustre_fid.h | 293 -- .../include/uapi/linux/lustre/lustre_fiemap.h | 72 - .../lustre/include/uapi/linux/lustre/lustre_idl.h | 2690 ------------- .../include/uapi/linux/lustre/lustre_ioctl.h | 229 -- .../include/uapi/linux/lustre/lustre_kernelcomm.h | 94 - .../include/uapi/linux/lustre/lustre_ostid.h | 236 -- .../include/uapi/linux/lustre/lustre_param.h | 94 - .../lustre/include/uapi/linux/lustre/lustre_user.h | 1327 ------ .../lustre/include/uapi/linux/lustre/lustre_ver.h | 27 - drivers/staging/lustre/lnet/Kconfig | 46 - drivers/staging/lustre/lnet/Makefile | 1 - drivers/staging/lustre/lnet/klnds/Makefile | 1 - drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile | 5 - .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 2958 -------------- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 1048 ----- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 3763 ----------------- .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c | 296 -- drivers/staging/lustre/lnet/klnds/socklnd/Makefile | 6 - .../staging/lustre/lnet/klnds/socklnd/socklnd.c | 2921 -------------- .../staging/lustre/lnet/klnds/socklnd/socklnd.h | 704 ---- .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 2586 ------------ .../lustre/lnet/klnds/socklnd/socklnd_lib.c | 534 --- .../lustre/lnet/klnds/socklnd/socklnd_modparams.c | 184 - .../lustre/lnet/klnds/socklnd/socklnd_proto.c | 810 ---- drivers/staging/lustre/lnet/libcfs/Makefile | 16 - drivers/staging/lustre/lnet/libcfs/debug.c | 461 --- drivers/staging/lustre/lnet/libcfs/fail.c | 146 - drivers/staging/lustre/lnet/libcfs/hash.c | 2065 ---------- drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c | 1086 ----- drivers/staging/lustre/lnet/libcfs/libcfs_lock.c | 155 - drivers/staging/lustre/lnet/libcfs/libcfs_mem.c | 171 - drivers/staging/lustre/lnet/libcfs/libcfs_string.c | 562 --- .../lustre/lnet/libcfs/linux-crypto-adler.c | 139 - drivers/staging/lustre/lnet/libcfs/linux-crypto.c | 447 --- drivers/staging/lustre/lnet/libcfs/linux-crypto.h | 30 - drivers/staging/lustre/lnet/libcfs/linux-debug.c | 142 - .../staging/lustre/lnet/libcfs/linux-tracefile.c | 258 -- drivers/staging/lustre/lnet/libcfs/module.c | 758 ---- drivers/staging/lustre/lnet/libcfs/tracefile.c | 1198 ------ drivers/staging/lustre/lnet/libcfs/tracefile.h | 274 -- drivers/staging/lustre/lnet/lnet/Makefile | 10 - drivers/staging/lustre/lnet/lnet/acceptor.c | 501 --- drivers/staging/lustre/lnet/lnet/api-ni.c | 2307 ----------- drivers/staging/lustre/lnet/lnet/config.c | 1235 ------ drivers/staging/lustre/lnet/lnet/lib-eq.c | 426 -- drivers/staging/lustre/lnet/lnet/lib-md.c | 463 --- drivers/staging/lustre/lnet/lnet/lib-me.c | 274 -- drivers/staging/lustre/lnet/lnet/lib-move.c | 2386 ----------- drivers/staging/lustre/lnet/lnet/lib-msg.c | 625 --- drivers/staging/lustre/lnet/lnet/lib-ptl.c | 987 ----- drivers/staging/lustre/lnet/lnet/lib-socket.c | 585 --- drivers/staging/lustre/lnet/lnet/lo.c | 105 - drivers/staging/lustre/lnet/lnet/module.c | 239 -- drivers/staging/lustre/lnet/lnet/net_fault.c | 1023 ----- drivers/staging/lustre/lnet/lnet/nidstrings.c | 1261 ------ drivers/staging/lustre/lnet/lnet/peer.c | 456 --- drivers/staging/lustre/lnet/lnet/router.c | 1799 --------- drivers/staging/lustre/lnet/lnet/router_proc.c | 907 ----- drivers/staging/lustre/lnet/selftest/Makefile | 7 - drivers/staging/lustre/lnet/selftest/brw_test.c | 526 --- drivers/staging/lustre/lnet/selftest/conctl.c | 801 ---- drivers/staging/lustre/lnet/selftest/conrpc.c | 1396 ------- drivers/staging/lustre/lnet/selftest/conrpc.h | 142 - drivers/staging/lustre/lnet/selftest/console.c | 2104 ---------- drivers/staging/lustre/lnet/selftest/console.h | 244 -- drivers/staging/lustre/lnet/selftest/framework.c | 1786 --------- drivers/staging/lustre/lnet/selftest/module.c | 169 - drivers/staging/lustre/lnet/selftest/ping_test.c | 228 -- drivers/staging/lustre/lnet/selftest/rpc.c | 1682 -------- drivers/staging/lustre/lnet/selftest/rpc.h | 295 -- drivers/staging/lustre/lnet/selftest/selftest.h | 622 --- drivers/staging/lustre/lnet/selftest/timer.c | 244 -- drivers/staging/lustre/lnet/selftest/timer.h | 50 - drivers/staging/lustre/lustre/Kconfig | 45 - drivers/staging/lustre/lustre/Makefile | 2 - drivers/staging/lustre/lustre/fid/Makefile | 5 - drivers/staging/lustre/lustre/fid/fid_internal.h | 46 - drivers/staging/lustre/lustre/fid/fid_lib.c | 87 - drivers/staging/lustre/lustre/fid/fid_request.c | 410 -- drivers/staging/lustre/lustre/fid/lproc_fid.c | 225 -- drivers/staging/lustre/lustre/fld/Makefile | 5 - drivers/staging/lustre/lustre/fld/fld_cache.c | 516 --- drivers/staging/lustre/lustre/fld/fld_internal.h | 170 - drivers/staging/lustre/lustre/fld/fld_request.c | 446 --- drivers/staging/lustre/lustre/fld/lproc_fld.c | 154 - drivers/staging/lustre/lustre/include/cl_object.h | 2463 ------------ .../staging/lustre/lustre/include/interval_tree.h | 119 - drivers/staging/lustre/lustre/include/llog_swab.h | 67 - .../staging/lustre/lustre/include/lprocfs_status.h | 646 --- drivers/staging/lustre/lustre/include/lu_object.h | 1305 ------ drivers/staging/lustre/lustre/include/lu_ref.h | 178 - drivers/staging/lustre/lustre/include/lustre_acl.h | 51 - .../staging/lustre/lustre/include/lustre_compat.h | 82 - .../staging/lustre/lustre/include/lustre_debug.h | 52 - .../staging/lustre/lustre/include/lustre_disk.h | 152 - drivers/staging/lustre/lustre/include/lustre_dlm.h | 1346 ------- .../lustre/lustre/include/lustre_dlm_flags.h | 402 -- .../staging/lustre/lustre/include/lustre_errno.h | 198 - .../staging/lustre/lustre/include/lustre_export.h | 250 -- drivers/staging/lustre/lustre/include/lustre_fid.h | 676 ---- drivers/staging/lustre/lustre/include/lustre_fld.h | 137 - drivers/staging/lustre/lustre/include/lustre_ha.h | 61 - .../staging/lustre/lustre/include/lustre_handles.h | 91 - .../staging/lustre/lustre/include/lustre_import.h | 369 -- .../staging/lustre/lustre/include/lustre_intent.h | 71 - .../lustre/lustre/include/lustre_kernelcomm.h | 56 - drivers/staging/lustre/lustre/include/lustre_lib.h | 126 - .../staging/lustre/lustre/include/lustre_linkea.h | 93 - drivers/staging/lustre/lustre/include/lustre_lmv.h | 174 - drivers/staging/lustre/lustre/include/lustre_log.h | 382 -- drivers/staging/lustre/lustre/include/lustre_mdc.h | 229 -- drivers/staging/lustre/lustre/include/lustre_mds.h | 62 - drivers/staging/lustre/lustre/include/lustre_net.h | 2360 ----------- drivers/staging/lustre/lustre/include/lustre_nrs.h | 718 ---- .../lustre/lustre/include/lustre_nrs_fifo.h | 71 - .../staging/lustre/lustre/include/lustre_obdo.h | 55 - .../lustre/include/lustre_patchless_compat.h | 68 - .../lustre/lustre/include/lustre_req_layout.h | 307 -- drivers/staging/lustre/lustre/include/lustre_sec.h | 1072 ----- .../staging/lustre/lustre/include/lustre_swab.h | 109 - drivers/staging/lustre/lustre/include/obd.h | 1114 ------ drivers/staging/lustre/lustre/include/obd_cksum.h | 153 - drivers/staging/lustre/lustre/include/obd_class.h | 1603 -------- .../staging/lustre/lustre/include/obd_support.h | 517 --- drivers/staging/lustre/lustre/include/seq_range.h | 200 - drivers/staging/lustre/lustre/ldlm/interval_tree.c | 599 --- drivers/staging/lustre/lustre/ldlm/l_lock.c | 73 - drivers/staging/lustre/lustre/ldlm/ldlm_extent.c | 258 -- drivers/staging/lustre/lustre/ldlm/ldlm_flock.c | 486 --- .../staging/lustre/lustre/ldlm/ldlm_inodebits.c | 69 - drivers/staging/lustre/lustre/ldlm/ldlm_internal.h | 342 -- drivers/staging/lustre/lustre/ldlm/ldlm_lib.c | 842 ---- drivers/staging/lustre/lustre/ldlm/ldlm_lock.c | 2135 ---------- drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c | 1163 ------ drivers/staging/lustre/lustre/ldlm/ldlm_plain.c | 68 - drivers/staging/lustre/lustre/ldlm/ldlm_pool.c | 1013 ----- drivers/staging/lustre/lustre/ldlm/ldlm_request.c | 2033 ---------- drivers/staging/lustre/lustre/ldlm/ldlm_resource.c | 1318 ------ drivers/staging/lustre/lustre/llite/Makefile | 13 - drivers/staging/lustre/lustre/llite/acl.c | 108 - drivers/staging/lustre/lustre/llite/dcache.c | 300 -- drivers/staging/lustre/lustre/llite/dir.c | 1708 -------- drivers/staging/lustre/lustre/llite/file.c | 3580 ----------------- drivers/staging/lustre/lustre/llite/glimpse.c | 205 - drivers/staging/lustre/lustre/llite/lcommon_cl.c | 292 -- drivers/staging/lustre/lustre/llite/lcommon_misc.c | 186 - .../staging/lustre/lustre/llite/llite_internal.h | 1344 ------- drivers/staging/lustre/lustre/llite/llite_lib.c | 2668 ------------- drivers/staging/lustre/lustre/llite/llite_mmap.c | 480 --- drivers/staging/lustre/lustre/llite/llite_nfs.c | 375 -- drivers/staging/lustre/lustre/llite/lproc_llite.c | 1659 -------- drivers/staging/lustre/lustre/llite/namei.c | 1207 ------ drivers/staging/lustre/lustre/llite/range_lock.c | 241 -- drivers/staging/lustre/lustre/llite/range_lock.h | 83 - drivers/staging/lustre/lustre/llite/rw.c | 1214 ------ drivers/staging/lustre/lustre/llite/rw26.c | 641 --- drivers/staging/lustre/lustre/llite/statahead.c | 1577 -------- drivers/staging/lustre/lustre/llite/super25.c | 189 - drivers/staging/lustre/lustre/llite/symlink.c | 159 - drivers/staging/lustre/lustre/llite/vvp_dev.c | 640 --- drivers/staging/lustre/lustre/llite/vvp_internal.h | 321 -- drivers/staging/lustre/lustre/llite/vvp_io.c | 1374 ------- drivers/staging/lustre/lustre/llite/vvp_lock.c | 87 - drivers/staging/lustre/lustre/llite/vvp_object.c | 303 -- drivers/staging/lustre/lustre/llite/vvp_page.c | 523 --- drivers/staging/lustre/lustre/llite/xattr.c | 665 ---- drivers/staging/lustre/lustre/llite/xattr_cache.c | 504 --- .../staging/lustre/lustre/llite/xattr_security.c | 96 - drivers/staging/lustre/lustre/lmv/Makefile | 5 - drivers/staging/lustre/lustre/lmv/lmv_fld.c | 82 - drivers/staging/lustre/lustre/lmv/lmv_intent.c | 521 --- drivers/staging/lustre/lustre/lmv/lmv_internal.h | 164 - drivers/staging/lustre/lustre/lmv/lmv_obd.c | 3131 --------------- drivers/staging/lustre/lustre/lmv/lproc_lmv.c | 173 - drivers/staging/lustre/lustre/lov/Makefile | 9 - .../staging/lustre/lustre/lov/lov_cl_internal.h | 639 --- drivers/staging/lustre/lustre/lov/lov_dev.c | 384 -- drivers/staging/lustre/lustre/lov/lov_ea.c | 331 -- drivers/staging/lustre/lustre/lov/lov_internal.h | 286 -- drivers/staging/lustre/lustre/lov/lov_io.c | 1023 ----- drivers/staging/lustre/lustre/lov/lov_lock.c | 348 -- drivers/staging/lustre/lustre/lov/lov_merge.c | 105 - drivers/staging/lustre/lustre/lov/lov_obd.c | 1444 ------- drivers/staging/lustre/lustre/lov/lov_object.c | 1625 -------- drivers/staging/lustre/lustre/lov/lov_offset.c | 269 -- drivers/staging/lustre/lustre/lov/lov_pack.c | 400 -- drivers/staging/lustre/lustre/lov/lov_page.c | 136 - drivers/staging/lustre/lustre/lov/lov_pool.c | 546 --- drivers/staging/lustre/lustre/lov/lov_request.c | 354 -- drivers/staging/lustre/lustre/lov/lovsub_dev.c | 147 - drivers/staging/lustre/lustre/lov/lovsub_lock.c | 81 - drivers/staging/lustre/lustre/lov/lovsub_object.c | 180 - drivers/staging/lustre/lustre/lov/lovsub_page.c | 68 - drivers/staging/lustre/lustre/lov/lproc_lov.c | 299 -- drivers/staging/lustre/lustre/mdc/Makefile | 5 - drivers/staging/lustre/lustre/mdc/lproc_mdc.c | 231 -- drivers/staging/lustre/lustre/mdc/mdc_internal.h | 144 - drivers/staging/lustre/lustre/mdc/mdc_lib.c | 498 --- drivers/staging/lustre/lustre/mdc/mdc_locks.c | 1239 ------ drivers/staging/lustre/lustre/mdc/mdc_reint.c | 421 -- drivers/staging/lustre/lustre/mdc/mdc_request.c | 2770 ------------- drivers/staging/lustre/lustre/mgc/Makefile | 5 - drivers/staging/lustre/lustre/mgc/lproc_mgc.c | 69 - drivers/staging/lustre/lustre/mgc/mgc_internal.h | 57 - drivers/staging/lustre/lustre/mgc/mgc_request.c | 1851 --------- drivers/staging/lustre/lustre/obdclass/Makefile | 12 - .../staging/lustre/lustre/obdclass/cl_internal.h | 95 - drivers/staging/lustre/lustre/obdclass/cl_io.c | 1151 ------ drivers/staging/lustre/lustre/obdclass/cl_lock.c | 275 -- drivers/staging/lustre/lustre/obdclass/cl_object.c | 1059 ----- drivers/staging/lustre/lustre/obdclass/cl_page.c | 1045 ----- drivers/staging/lustre/lustre/obdclass/class_obd.c | 544 --- drivers/staging/lustre/lustre/obdclass/debug.c | 96 - drivers/staging/lustre/lustre/obdclass/genops.c | 1480 ------- .../staging/lustre/lustre/obdclass/kernelcomm.c | 240 -- drivers/staging/lustre/lustre/obdclass/linkea.c | 249 -- .../lustre/lustre/obdclass/linux/linux-module.c | 514 --- .../lustre/lustre/obdclass/linux/linux-sysctl.c | 162 - drivers/staging/lustre/lustre/obdclass/llog.c | 524 --- drivers/staging/lustre/lustre/obdclass/llog_cat.c | 236 -- .../staging/lustre/lustre/obdclass/llog_internal.h | 79 - drivers/staging/lustre/lustre/obdclass/llog_obd.c | 225 -- drivers/staging/lustre/lustre/obdclass/llog_swab.c | 412 -- .../lustre/lustre/obdclass/lprocfs_counters.c | 134 - .../lustre/lustre/obdclass/lprocfs_status.c | 1698 -------- drivers/staging/lustre/lustre/obdclass/lu_object.c | 2056 ---------- drivers/staging/lustre/lustre/obdclass/lu_ref.c | 45 - .../lustre/lustre/obdclass/lustre_handles.c | 241 -- .../staging/lustre/lustre/obdclass/lustre_peer.c | 214 - .../staging/lustre/lustre/obdclass/obd_config.c | 1538 ------- drivers/staging/lustre/lustre/obdclass/obd_mount.c | 1245 ------ drivers/staging/lustre/lustre/obdclass/obdo.c | 181 - .../staging/lustre/lustre/obdclass/statfs_pack.c | 58 - drivers/staging/lustre/lustre/obdclass/uuid.c | 45 - drivers/staging/lustre/lustre/obdecho/Makefile | 5 - .../staging/lustre/lustre/obdecho/echo_client.c | 1729 -------- .../staging/lustre/lustre/obdecho/echo_internal.h | 42 - drivers/staging/lustre/lustre/osc/Makefile | 6 - drivers/staging/lustre/lustre/osc/lproc_osc.c | 838 ---- drivers/staging/lustre/lustre/osc/osc_cache.c | 3306 --------------- .../staging/lustre/lustre/osc/osc_cl_internal.h | 681 ---- drivers/staging/lustre/lustre/osc/osc_dev.c | 246 -- drivers/staging/lustre/lustre/osc/osc_internal.h | 237 -- drivers/staging/lustre/lustre/osc/osc_io.c | 918 ----- drivers/staging/lustre/lustre/osc/osc_lock.c | 1230 ------ drivers/staging/lustre/lustre/osc/osc_object.c | 473 --- drivers/staging/lustre/lustre/osc/osc_page.c | 1094 ----- drivers/staging/lustre/lustre/osc/osc_quota.c | 236 -- drivers/staging/lustre/lustre/osc/osc_request.c | 2907 -------------- drivers/staging/lustre/lustre/ptlrpc/Makefile | 23 - drivers/staging/lustre/lustre/ptlrpc/client.c | 3271 --------------- drivers/staging/lustre/lustre/ptlrpc/connection.c | 192 - drivers/staging/lustre/lustre/ptlrpc/errno.c | 383 -- drivers/staging/lustre/lustre/ptlrpc/events.c | 585 --- drivers/staging/lustre/lustre/ptlrpc/import.c | 1677 -------- drivers/staging/lustre/lustre/ptlrpc/layout.c | 2232 ----------- drivers/staging/lustre/lustre/ptlrpc/llog_client.c | 338 -- drivers/staging/lustre/lustre/ptlrpc/llog_net.c | 67 - .../staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c | 1316 ------ drivers/staging/lustre/lustre/ptlrpc/niobuf.c | 771 ---- drivers/staging/lustre/lustre/ptlrpc/nrs.c | 1613 -------- drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c | 270 -- .../staging/lustre/lustre/ptlrpc/pack_generic.c | 2311 ----------- drivers/staging/lustre/lustre/ptlrpc/pers.c | 72 - drivers/staging/lustre/lustre/ptlrpc/pinger.c | 474 --- .../staging/lustre/lustre/ptlrpc/ptlrpc_internal.h | 371 -- .../staging/lustre/lustre/ptlrpc/ptlrpc_module.c | 186 - drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 914 ----- drivers/staging/lustre/lustre/ptlrpc/recover.c | 374 -- drivers/staging/lustre/lustre/ptlrpc/sec.c | 2379 ----------- drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c | 572 --- drivers/staging/lustre/lustre/ptlrpc/sec_config.c | 850 ---- drivers/staging/lustre/lustre/ptlrpc/sec_gc.c | 190 - drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c | 170 - drivers/staging/lustre/lustre/ptlrpc/sec_null.c | 459 --- drivers/staging/lustre/lustre/ptlrpc/sec_plain.c | 1023 ----- drivers/staging/lustre/lustre/ptlrpc/service.c | 2807 ------------- drivers/staging/lustre/lustre/ptlrpc/wiretest.c | 4210 -------------------- drivers/staging/lustre/sysfs-fs-lustre | 654 --- scripts/selinux/mdp/mdp.c | 1 - 308 files changed, 195272 deletions(-) delete mode 100644 drivers/staging/lustre/Kconfig delete mode 100644 drivers/staging/lustre/Makefile delete mode 100644 drivers/staging/lustre/README.txt delete mode 100644 drivers/staging/lustre/TODO delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_private.h delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_string.h delete mode 100644 drivers/staging/lustre/include/linux/lnet/api.h delete mode 100644 drivers/staging/lustre/include/linux/lnet/lib-lnet.h delete mode 100644 drivers/staging/lustre/include/linux/lnet/lib-types.h delete mode 100644 drivers/staging/lustre/include/linux/lnet/socklnd.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h delete mode 100644 drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h delete mode 100644 drivers/staging/lustre/lnet/Kconfig delete mode 100644 drivers/staging/lustre/lnet/Makefile delete mode 100644 drivers/staging/lustre/lnet/klnds/Makefile delete mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile delete mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c delete mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h delete mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c delete mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/Makefile delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/Makefile delete mode 100644 drivers/staging/lustre/lnet/libcfs/debug.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/fail.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/hash.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_lock.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_mem.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_string.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/linux-crypto-adler.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/linux-crypto.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/linux-crypto.h delete mode 100644 drivers/staging/lustre/lnet/libcfs/linux-debug.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/linux-tracefile.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/module.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/tracefile.c delete mode 100644 drivers/staging/lustre/lnet/libcfs/tracefile.h delete mode 100644 drivers/staging/lustre/lnet/lnet/Makefile delete mode 100644 drivers/staging/lustre/lnet/lnet/acceptor.c delete mode 100644 drivers/staging/lustre/lnet/lnet/api-ni.c delete mode 100644 drivers/staging/lustre/lnet/lnet/config.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-eq.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-md.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-me.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-move.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-msg.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-ptl.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lib-socket.c delete mode 100644 drivers/staging/lustre/lnet/lnet/lo.c delete mode 100644 drivers/staging/lustre/lnet/lnet/module.c delete mode 100644 drivers/staging/lustre/lnet/lnet/net_fault.c delete mode 100644 drivers/staging/lustre/lnet/lnet/nidstrings.c delete mode 100644 drivers/staging/lustre/lnet/lnet/peer.c delete mode 100644 drivers/staging/lustre/lnet/lnet/router.c delete mode 100644 drivers/staging/lustre/lnet/lnet/router_proc.c delete mode 100644 drivers/staging/lustre/lnet/selftest/Makefile delete mode 100644 drivers/staging/lustre/lnet/selftest/brw_test.c delete mode 100644 drivers/staging/lustre/lnet/selftest/conctl.c delete mode 100644 drivers/staging/lustre/lnet/selftest/conrpc.c delete mode 100644 drivers/staging/lustre/lnet/selftest/conrpc.h delete mode 100644 drivers/staging/lustre/lnet/selftest/console.c delete mode 100644 drivers/staging/lustre/lnet/selftest/console.h delete mode 100644 drivers/staging/lustre/lnet/selftest/framework.c delete mode 100644 drivers/staging/lustre/lnet/selftest/module.c delete mode 100644 drivers/staging/lustre/lnet/selftest/ping_test.c delete mode 100644 drivers/staging/lustre/lnet/selftest/rpc.c delete mode 100644 drivers/staging/lustre/lnet/selftest/rpc.h delete mode 100644 drivers/staging/lustre/lnet/selftest/selftest.h delete mode 100644 drivers/staging/lustre/lnet/selftest/timer.c delete mode 100644 drivers/staging/lustre/lnet/selftest/timer.h delete mode 100644 drivers/staging/lustre/lustre/Kconfig delete mode 100644 drivers/staging/lustre/lustre/Makefile delete mode 100644 drivers/staging/lustre/lustre/fid/Makefile delete mode 100644 drivers/staging/lustre/lustre/fid/fid_internal.h delete mode 100644 drivers/staging/lustre/lustre/fid/fid_lib.c delete mode 100644 drivers/staging/lustre/lustre/fid/fid_request.c delete mode 100644 drivers/staging/lustre/lustre/fid/lproc_fid.c delete mode 100644 drivers/staging/lustre/lustre/fld/Makefile delete mode 100644 drivers/staging/lustre/lustre/fld/fld_cache.c delete mode 100644 drivers/staging/lustre/lustre/fld/fld_internal.h delete mode 100644 drivers/staging/lustre/lustre/fld/fld_request.c delete mode 100644 drivers/staging/lustre/lustre/fld/lproc_fld.c delete mode 100644 drivers/staging/lustre/lustre/include/cl_object.h delete mode 100644 drivers/staging/lustre/lustre/include/interval_tree.h delete mode 100644 drivers/staging/lustre/lustre/include/llog_swab.h delete mode 100644 drivers/staging/lustre/lustre/include/lprocfs_status.h delete mode 100644 drivers/staging/lustre/lustre/include/lu_object.h delete mode 100644 drivers/staging/lustre/lustre/include/lu_ref.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_acl.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_compat.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_debug.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_disk.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_dlm.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_dlm_flags.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_errno.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_export.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_fid.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_fld.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_ha.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_handles.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_import.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_intent.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_kernelcomm.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_lib.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_linkea.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_lmv.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_log.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_mdc.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_mds.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_net.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_nrs.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_obdo.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_patchless_compat.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_req_layout.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_sec.h delete mode 100644 drivers/staging/lustre/lustre/include/lustre_swab.h delete mode 100644 drivers/staging/lustre/lustre/include/obd.h delete mode 100644 drivers/staging/lustre/lustre/include/obd_cksum.h delete mode 100644 drivers/staging/lustre/lustre/include/obd_class.h delete mode 100644 drivers/staging/lustre/lustre/include/obd_support.h delete mode 100644 drivers/staging/lustre/lustre/include/seq_range.h delete mode 100644 drivers/staging/lustre/lustre/ldlm/interval_tree.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/l_lock.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_extent.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_flock.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_internal.h delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_lib.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_lock.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_plain.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_pool.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_request.c delete mode 100644 drivers/staging/lustre/lustre/ldlm/ldlm_resource.c delete mode 100644 drivers/staging/lustre/lustre/llite/Makefile delete mode 100644 drivers/staging/lustre/lustre/llite/acl.c delete mode 100644 drivers/staging/lustre/lustre/llite/dcache.c delete mode 100644 drivers/staging/lustre/lustre/llite/dir.c delete mode 100644 drivers/staging/lustre/lustre/llite/file.c delete mode 100644 drivers/staging/lustre/lustre/llite/glimpse.c delete mode 100644 drivers/staging/lustre/lustre/llite/lcommon_cl.c delete mode 100644 drivers/staging/lustre/lustre/llite/lcommon_misc.c delete mode 100644 drivers/staging/lustre/lustre/llite/llite_internal.h delete mode 100644 drivers/staging/lustre/lustre/llite/llite_lib.c delete mode 100644 drivers/staging/lustre/lustre/llite/llite_mmap.c delete mode 100644 drivers/staging/lustre/lustre/llite/llite_nfs.c delete mode 100644 drivers/staging/lustre/lustre/llite/lproc_llite.c delete mode 100644 drivers/staging/lustre/lustre/llite/namei.c delete mode 100644 drivers/staging/lustre/lustre/llite/range_lock.c delete mode 100644 drivers/staging/lustre/lustre/llite/range_lock.h delete mode 100644 drivers/staging/lustre/lustre/llite/rw.c delete mode 100644 drivers/staging/lustre/lustre/llite/rw26.c delete mode 100644 drivers/staging/lustre/lustre/llite/statahead.c delete mode 100644 drivers/staging/lustre/lustre/llite/super25.c delete mode 100644 drivers/staging/lustre/lustre/llite/symlink.c delete mode 100644 drivers/staging/lustre/lustre/llite/vvp_dev.c delete mode 100644 drivers/staging/lustre/lustre/llite/vvp_internal.h delete mode 100644 drivers/staging/lustre/lustre/llite/vvp_io.c delete mode 100644 drivers/staging/lustre/lustre/llite/vvp_lock.c delete mode 100644 drivers/staging/lustre/lustre/llite/vvp_object.c delete mode 100644 drivers/staging/lustre/lustre/llite/vvp_page.c delete mode 100644 drivers/staging/lustre/lustre/llite/xattr.c delete mode 100644 drivers/staging/lustre/lustre/llite/xattr_cache.c delete mode 100644 drivers/staging/lustre/lustre/llite/xattr_security.c delete mode 100644 drivers/staging/lustre/lustre/lmv/Makefile delete mode 100644 drivers/staging/lustre/lustre/lmv/lmv_fld.c delete mode 100644 drivers/staging/lustre/lustre/lmv/lmv_intent.c delete mode 100644 drivers/staging/lustre/lustre/lmv/lmv_internal.h delete mode 100644 drivers/staging/lustre/lustre/lmv/lmv_obd.c delete mode 100644 drivers/staging/lustre/lustre/lmv/lproc_lmv.c delete mode 100644 drivers/staging/lustre/lustre/lov/Makefile delete mode 100644 drivers/staging/lustre/lustre/lov/lov_cl_internal.h delete mode 100644 drivers/staging/lustre/lustre/lov/lov_dev.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_ea.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_internal.h delete mode 100644 drivers/staging/lustre/lustre/lov/lov_io.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_lock.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_merge.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_obd.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_object.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_offset.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_pack.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_page.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_pool.c delete mode 100644 drivers/staging/lustre/lustre/lov/lov_request.c delete mode 100644 drivers/staging/lustre/lustre/lov/lovsub_dev.c delete mode 100644 drivers/staging/lustre/lustre/lov/lovsub_lock.c delete mode 100644 drivers/staging/lustre/lustre/lov/lovsub_object.c delete mode 100644 drivers/staging/lustre/lustre/lov/lovsub_page.c delete mode 100644 drivers/staging/lustre/lustre/lov/lproc_lov.c delete mode 100644 drivers/staging/lustre/lustre/mdc/Makefile delete mode 100644 drivers/staging/lustre/lustre/mdc/lproc_mdc.c delete mode 100644 drivers/staging/lustre/lustre/mdc/mdc_internal.h delete mode 100644 drivers/staging/lustre/lustre/mdc/mdc_lib.c delete mode 100644 drivers/staging/lustre/lustre/mdc/mdc_locks.c delete mode 100644 drivers/staging/lustre/lustre/mdc/mdc_reint.c delete mode 100644 drivers/staging/lustre/lustre/mdc/mdc_request.c delete mode 100644 drivers/staging/lustre/lustre/mgc/Makefile delete mode 100644 drivers/staging/lustre/lustre/mgc/lproc_mgc.c delete mode 100644 drivers/staging/lustre/lustre/mgc/mgc_internal.h delete mode 100644 drivers/staging/lustre/lustre/mgc/mgc_request.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/Makefile delete mode 100644 drivers/staging/lustre/lustre/obdclass/cl_internal.h delete mode 100644 drivers/staging/lustre/lustre/obdclass/cl_io.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/cl_lock.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/cl_object.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/cl_page.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/class_obd.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/debug.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/genops.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/kernelcomm.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/linkea.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/linux/linux-module.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/llog.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/llog_cat.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/llog_internal.h delete mode 100644 drivers/staging/lustre/lustre/obdclass/llog_obd.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/llog_swab.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/lprocfs_status.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/lu_object.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/lu_ref.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/lustre_handles.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/lustre_peer.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/obd_config.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/obd_mount.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/obdo.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/statfs_pack.c delete mode 100644 drivers/staging/lustre/lustre/obdclass/uuid.c delete mode 100644 drivers/staging/lustre/lustre/obdecho/Makefile delete mode 100644 drivers/staging/lustre/lustre/obdecho/echo_client.c delete mode 100644 drivers/staging/lustre/lustre/obdecho/echo_internal.h delete mode 100644 drivers/staging/lustre/lustre/osc/Makefile delete mode 100644 drivers/staging/lustre/lustre/osc/lproc_osc.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_cache.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_cl_internal.h delete mode 100644 drivers/staging/lustre/lustre/osc/osc_dev.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_internal.h delete mode 100644 drivers/staging/lustre/lustre/osc/osc_io.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_lock.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_object.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_page.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_quota.c delete mode 100644 drivers/staging/lustre/lustre/osc/osc_request.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/Makefile delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/client.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/connection.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/errno.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/events.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/import.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/layout.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/llog_client.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/llog_net.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/niobuf.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/nrs.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/pack_generic.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/pers.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/pinger.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/recover.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec_config.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec_gc.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec_null.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/sec_plain.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/service.c delete mode 100644 drivers/staging/lustre/lustre/ptlrpc/wiretest.c delete mode 100644 drivers/staging/lustre/sysfs-fs-lustre (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 4b65225d443a..db158767de20 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13329,15 +13329,6 @@ S: Odd Fixes F: Documentation/devicetree/bindings/staging/iio/ F: drivers/staging/iio/ -STAGING - LUSTRE PARALLEL FILESYSTEM -M: Oleg Drokin -M: Andreas Dilger -M: James Simmons -L: lustre-devel@lists.lustre.org (moderated for non-subscribers) -W: http://wiki.lustre.org/ -S: Maintained -F: drivers/staging/lustre - STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) M: Marc Dietrich L: ac100@lists.launchpad.net (moderated for non-subscribers) diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d5926f0d3f6c..1c357ef669ae 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -84,8 +84,6 @@ source "drivers/staging/netlogic/Kconfig" source "drivers/staging/mt29f_spinand/Kconfig" -source "drivers/staging/lustre/Kconfig" - source "drivers/staging/dgnc/Kconfig" source "drivers/staging/gs_fpgaboot/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 919753c3d3f6..2edb9860931e 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -32,7 +32,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/ obj-$(CONFIG_LTE_GDM724X) += gdm724x/ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/ obj-$(CONFIG_GOLDFISH) += goldfish/ -obj-$(CONFIG_LNET) += lustre/ obj-$(CONFIG_DGNC) += dgnc/ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig deleted file mode 100644 index b7d81096eee9..000000000000 --- a/drivers/staging/lustre/Kconfig +++ /dev/null @@ -1,3 +0,0 @@ -source "drivers/staging/lustre/lnet/Kconfig" - -source "drivers/staging/lustre/lustre/Kconfig" diff --git a/drivers/staging/lustre/Makefile b/drivers/staging/lustre/Makefile deleted file mode 100644 index 95ffe337a80a..000000000000 --- a/drivers/staging/lustre/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_LNET) += lnet/ -obj-$(CONFIG_LUSTRE_FS) += lustre/ diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt deleted file mode 100644 index 783959240490..000000000000 --- a/drivers/staging/lustre/README.txt +++ /dev/null @@ -1,83 +0,0 @@ -Lustre Parallel Filesystem Client -================================= - -The Lustre file system is an open-source, parallel file system -that supports many requirements of leadership class HPC simulation -environments. -Born from a research project at Carnegie Mellon University, -the Lustre file system is a widely-used option in HPC. -The Lustre file system provides a POSIX compliant file system interface, -can scale to thousands of clients, petabytes of storage and -hundreds of gigabytes per second of I/O bandwidth. - -Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS), -Lustre has independent Metadata and Data servers that clients can access -in parallel to maximize performance. - -In order to use Lustre client you will need to download the "lustre-client" -package that contains the userspace tools from http://lustre.org/download/ - -You will need to install and configure your Lustre servers separately. - -Mount Syntax -============ -After you installed the lustre-client tools including mount.lustre binary -you can mount your Lustre filesystem with: - -mount -t lustre mgs:/fsname mnt - -where mgs is the host name or ip address of your Lustre MGS(management service) -fsname is the name of the filesystem you would like to mount. - - -Mount Options -============= - - noflock - Disable posix file locking (Applications trying to use - the functionality will get ENOSYS) - - localflock - Enable local flock support, using only client-local flock - (faster, for applications that require flock but do not run - on multiple nodes). - - flock - Enable cluster-global posix file locking coherent across all - client nodes. - - user_xattr, nouser_xattr - Support "user." extended attributes (or not) - - user_fid2path, nouser_fid2path - Enable FID to path translation by regular users (or not) - - checksum, nochecksum - Verify data consistency on the wire and in memory as it passes - between the layers (or not). - - lruresize, nolruresize - Allow lock LRU to be controlled by memory pressure on the server - (or only 100 (default, controlled by lru_size proc parameter) locks - per CPU per server on this client). - - lazystatfs, nolazystatfs - Do not block in statfs() if some of the servers are down. - - 32bitapi - Shrink inode numbers to fit into 32 bits. This is necessary - if you plan to reexport Lustre filesystem from this client via - NFSv4. - - verbose, noverbose - Enable mount/umount console messages (or not) - -More Information -================ -You can get more information at the Lustre website: http://wiki.lustre.org/ - -Source for the userspace tools and out-of-tree client and server code -is available at: http://git.hpdd.intel.com/fs/lustre-release.git - -Latest binary packages: -http://lustre.org/download/ diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO deleted file mode 100644 index 5332cdb19bfa..000000000000 --- a/drivers/staging/lustre/TODO +++ /dev/null @@ -1,302 +0,0 @@ -Currently all the work directed toward the lustre upstream client is tracked -at the following link: - -https://jira.hpdd.intel.com/browse/LU-9679 - -Under this ticket you will see the following work items that need to be -addressed: - -****************************************************************************** -* libcfs cleanup -* -* https://jira.hpdd.intel.com/browse/LU-9859 -* -* Track all the cleanups and simplification of the libcfs module. Remove -* functions the kernel provides. Possibly integrate some of the functionality -* into the kernel proper. -* -****************************************************************************** - -https://jira.hpdd.intel.com/browse/LU-100086 - -LNET_MINOR conflicts with USERIO_MINOR - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8130 - -Fix and simplify libcfs hash handling - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8703 - -The current way we handle SMP is wrong. Platforms like ARM and KNL can have -core and NUMA setups with things like NUMA nodes with no cores. We need to -handle such cases. This work also greatly simplified the lustre SMP code. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9019 - -Replace libcfs time API with standard kernel APIs. Also migrate away from -jiffies. We found jiffies can vary on nodes which can lead to corner cases -that can break the file system due to nodes having inconsistent behavior. -So move to time64_t and ktime_t as much as possible. - -****************************************************************************** -* Proper IB support for ko2iblnd -****************************************************************************** -https://jira.hpdd.intel.com/browse/LU-9179 - -Poor performance for the ko2iblnd driver. This is related to many of the -patches below that are missing from the linux client. ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9886 - -Crash in upstream kiblnd_handle_early_rxs() ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10394 / LU-10526 / LU-10089 - -Default to default to using MEM_REG ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10459 - -throttle tx based on queue depth ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9943 - -correct WR fast reg accounting ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10291 - -remove concurrent_sends tunable ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10213 - -calculate qp max_send_wrs properly ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9810 - -use less CQ entries for each connection ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10129 / LU-9180 - -rework map_on_demand behavior ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10129 - -query device capabilities ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10015 - -fix race at kiblnd_connect_peer ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9983 - -allow for discontiguous fragments ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9500 - -Don't Page Align remote_addr with FastReg ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9448 - -handle empty CPTs ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9507 - -Don't Assert On Reconnect with MultiQP ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9472 - -Fix FastReg map/unmap for MLX5 ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9425 - -Turn on 2 sges by default ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8943 - -Enable Multiple OPA Endpoints between Nodes ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-5718 - -multiple sges for work request ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9094 - -kill timedout txs from ibp_tx_queue ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9094 - -reconnect peer for REJ_INVALID_SERVICE_ID ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8752 - -Stop MLX5 triggering a dump_cqe ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8874 - -Move ko2iblnd to latest RDMA changes ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8875 / LU-8874 - -Change to new RDMA done callback mechanism - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9164 / LU-8874 - -Incorporate RDMA map/unamp API's into ko2iblnd - -****************************************************************************** -* sysfs/debugfs fixes -* -* https://jira.hpdd.intel.com/browse/LU-8066 -* -* The original migration to sysfs was done in haste without properly working -* utilities to test the changes. This covers the work to restore the proper -* behavior. Huge project to make this right. -* -****************************************************************************** - -https://jira.hpdd.intel.com/browse/LU-9431 - -The function class_process_proc_param was used for our mass updates of proc -tunables. It didn't work with sysfs and it was just ugly so it was removed. -In the process the ability to mass update thousands of clients was lost. This -work restores this in a sane way. - ------------------------------------------------------------------------------- -https://jira.hpdd.intel.com/browse/LU-9091 - -One the major request of users is the ability to pass in parameters into a -sysfs file in various different units. For example we can set max_pages_per_rpc -but this can vary on platforms due to different platform sizes. So you can -set this like max_pages_per_rpc=16MiB. The original code to handle this written -before the string helpers were created so the code doesn't follow that format -but it would be easy to move to. Currently the string helpers does the reverse -of what we need, changing bytes to string. We need to change a string to bytes. - -****************************************************************************** -* Proper user land to kernel space interface for Lustre -* -* https://jira.hpdd.intel.com/browse/LU-9680 -* -****************************************************************************** - -https://jira.hpdd.intel.com/browse/LU-8915 - -Don't use linux list structure as user land arguments for lnet selftest. -This code is pretty poor quality and really needs to be reworked. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8834 - -The lustre ioctl LL_IOC_FUTIMES_3 is very generic. Need to either work with -other file systems with similar functionality and make a common syscall -interface or rework our server code to automagically do it for us. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-6202 - -Cleanup up ioctl handling. We have many obsolete ioctls. Also the way we do -ioctls can be changed over to netlink. This also has the benefit of working -better with HPC systems that do IO forwarding. Such systems don't like ioctls -very well. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9667 - -More cleanups by making our utilities use sysfs instead of ioctls for LNet. -Also it has been requested to move the remaining ioctls to the netlink API. - -****************************************************************************** -* Misc -****************************************************************************** - ------------------------------------------------------------------------------- -https://jira.hpdd.intel.com/browse/LU-9855 - -Clean up obdclass preprocessor code. One of the major eye sores is the various -pointer redirections and macros used by the obdclass. This makes the code very -difficult to understand. It was requested by the Al Viro to clean this up before -we leave staging. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9633 - -Migrate to sphinx kernel-doc style comments. Add documents in Documentation. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-6142 - -Possible remaining coding style fix. Remove deadcode. Enforce kernel code -style. Other minor misc cleanups... - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8837 - -Separate client/server functionality. Functions only used by server can be -removed from client. Most of this has been done but we need a inspect of the -code to make sure. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-8964 - -Lustre client readahead/writeback control needs to better suit kernel providings. -Currently its being explored. We could end up replacing the CLIO read ahead -abstract with the kernel proper version. - ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9862 - -Patch that landed for LU-7890 leads to static checker errors ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-9868 - -dcache/namei fixes for lustre ------------------------------------------------------------------------------- - -https://jira.hpdd.intel.com/browse/LU-10467 - -use standard linux wait_events macros work by Neil Brown - ------------------------------------------------------------------------------- - -Please send any patches to Greg Kroah-Hartman , Andreas Dilger -, James Simmons and -Oleg Drokin . diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h deleted file mode 100644 index edc7ed0dcb94..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h +++ /dev/null @@ -1,76 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LIBCFS_LIBCFS_H__ -#define __LIBCFS_LIBCFS_H__ - -#include -#include -#include - -#include -#include -#include - -#define LIBCFS_VERSION "0.7.0" - -extern struct blocking_notifier_head libcfs_ioctl_list; -static inline int notifier_from_ioctl_errno(int err) -{ - if (err == -EINVAL) - return NOTIFY_OK; - return notifier_from_errno(err) | NOTIFY_STOP_MASK; -} - -int libcfs_setup(void); - -extern struct workqueue_struct *cfs_rehash_wq; - -void lustre_insert_debugfs(struct ctl_table *table); -int lprocfs_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, loff_t pos, - void __user *buffer, int len)); - -/* - * Memory - */ -#if BITS_PER_LONG == 32 -/* limit to lowmem on 32-bit systems */ -#define NUM_CACHEPAGES \ - min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4) -#else -#define NUM_CACHEPAGES totalram_pages -#endif - -#endif /* __LIBCFS_LIBCFS_H__ */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h deleted file mode 100644 index 61641c41c492..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h +++ /dev/null @@ -1,434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_cpu.h - * - * CPU partition - * . CPU partition is virtual processing unit - * - * . CPU partition can present 1-N cores, or 1-N NUMA nodes, - * in other words, CPU partition is a processors pool. - * - * CPU Partition Table (CPT) - * . a set of CPU partitions - * - * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP - * - * . User can specify total number of CPU partitions while creating a - * CPT, ID of CPU partition is always start from 0. - * - * Example: if there are 8 cores on the system, while creating a CPT - * with cpu_npartitions=4: - * core[0, 1] = partition[0], core[2, 3] = partition[1] - * core[4, 5] = partition[2], core[6, 7] = partition[3] - * - * cpu_npartitions=1: - * core[0, 1, ... 7] = partition[0] - * - * . User can also specify CPU partitions by string pattern - * - * Examples: cpu_partitions="0[0,1], 1[2,3]" - * cpu_partitions="N 0[0-3], 1[4-8]" - * - * The first character "N" means following numbers are numa ID - * - * . NUMA allocators, CPU affinity threads are built over CPU partitions, - * instead of HW CPUs or HW nodes. - * - * . By default, Lustre modules should refer to the global cfs_cpt_tab, - * instead of accessing HW CPUs directly, so concurrency of Lustre can be - * configured by cpu_npartitions of the global cfs_cpt_tab - * - * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the - * same way as 2.2 or earlier versions - * - * Author: liang@whamcloud.com - */ - -#ifndef __LIBCFS_CPU_H__ -#define __LIBCFS_CPU_H__ - -#include -#include -#include - -/* any CPU partition */ -#define CFS_CPT_ANY (-1) - -#ifdef CONFIG_SMP -/** virtual processing unit */ -struct cfs_cpu_partition { - /* CPUs mask for this partition */ - cpumask_var_t cpt_cpumask; - /* nodes mask for this partition */ - nodemask_t *cpt_nodemask; - /* spread rotor for NUMA allocator */ - unsigned int cpt_spread_rotor; -}; - - -/** descriptor for CPU partitions */ -struct cfs_cpt_table { - /* version, reserved for hotplug */ - unsigned int ctb_version; - /* spread rotor for NUMA allocator */ - unsigned int ctb_spread_rotor; - /* # of CPU partitions */ - unsigned int ctb_nparts; - /* partitions tables */ - struct cfs_cpu_partition *ctb_parts; - /* shadow HW CPU to CPU partition ID */ - int *ctb_cpu2cpt; - /* all cpus in this partition table */ - cpumask_var_t ctb_cpumask; - /* all nodes in this partition table */ - nodemask_t *ctb_nodemask; -}; - -extern struct cfs_cpt_table *cfs_cpt_tab; - -/** - * return cpumask of CPU partition \a cpt - */ -cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt); -/** - * print string information of cpt-table - */ -int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len); -/** - * return total number of CPU partitions in \a cptab - */ -int -cfs_cpt_number(struct cfs_cpt_table *cptab); -/** - * return number of HW cores or hyper-threadings in a CPU partition \a cpt - */ -int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt); -/** - * is there any online CPU in CPU partition \a cpt - */ -int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt); -/** - * return nodemask of CPU partition \a cpt - */ -nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt); -/** - * shadow current HW processor ID to CPU-partition ID of \a cptab - */ -int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap); -/** - * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab - */ -int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu); -/** - * bind current thread on a CPU-partition \a cpt of \a cptab - */ -int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt); -/** - * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success, - * otherwise 0 is returned - */ -int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu); -/** - * remove \a cpu from CPU partition \a cpt of \a cptab - */ -void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu); -/** - * add all cpus in \a mask to CPU partition \a cpt - * return 1 if successfully set all CPUs, otherwise return 0 - */ -int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, - int cpt, cpumask_t *mask); -/** - * remove all cpus in \a mask from CPU partition \a cpt - */ -void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, - int cpt, cpumask_t *mask); -/** - * add all cpus in NUMA node \a node to CPU partition \a cpt - * return 1 if successfully set all CPUs, otherwise return 0 - */ -int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node); -/** - * remove all cpus in NUMA node \a node from CPU partition \a cpt - */ -void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node); - -/** - * add all cpus in node mask \a mask to CPU partition \a cpt - * return 1 if successfully set all CPUs, otherwise return 0 - */ -int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, - int cpt, nodemask_t *mask); -/** - * remove all cpus in node mask \a mask from CPU partition \a cpt - */ -void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, - int cpt, nodemask_t *mask); -/** - * unset all cpus for CPU partition \a cpt - */ -void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt); -/** - * convert partition id \a cpt to numa node id, if there are more than one - * nodes in this partition, it might return a different node id each time. - */ -int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt); - -/** - * return number of HTs in the same core of \a cpu - */ -int cfs_cpu_ht_nsiblings(int cpu); - -int cfs_cpu_init(void); -void cfs_cpu_fini(void); - -#else /* !CONFIG_SMP */ -struct cfs_cpt_table; -#define cfs_cpt_tab ((struct cfs_cpt_table *)NULL) - -static inline cpumask_var_t * -cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt) -{ - return NULL; -} - -static inline int -cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) -{ - return 0; -} -static inline int -cfs_cpt_number(struct cfs_cpt_table *cptab) -{ - return 1; -} - -static inline int -cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) -{ - return 1; -} - -static inline int -cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) -{ - return 1; -} - -static inline nodemask_t * -cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) -{ - return NULL; -} - -static inline int -cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - return 1; -} - -static inline void -cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ -} - -static inline int -cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - return 1; -} - -static inline void -cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ -} - -static inline int -cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - return 1; -} - -static inline void -cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ -} - -static inline int -cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - return 1; -} - -static inline void -cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ -} - -static inline void -cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) -{ -} - -static inline int -cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) -{ - return 0; -} - -static inline int -cfs_cpu_ht_nsiblings(int cpu) -{ - return 1; -} - -static inline int -cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) -{ - return 0; -} - -static inline int -cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) -{ - return 0; -} - -static inline int -cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) -{ - return 0; -} - -static inline int -cfs_cpu_init(void) -{ - return 0; -} - -static inline void cfs_cpu_fini(void) -{ -} - -#endif /* CONFIG_SMP */ - -/** - * destroy a CPU partition table - */ -void cfs_cpt_table_free(struct cfs_cpt_table *cptab); -/** - * create a cfs_cpt_table with \a ncpt number of partitions - */ -struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt); - -/* - * allocate per-cpu-partition data, returned value is an array of pointers, - * variable can be indexed by CPU ID. - * cptab != NULL: size of array is number of CPU partitions - * cptab == NULL: size of array is number of HW cores - */ -void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); -/* - * destroy per-cpu-partition variable - */ -void cfs_percpt_free(void *vars); -int cfs_percpt_number(void *vars); - -#define cfs_percpt_for_each(var, i, vars) \ - for (i = 0; i < cfs_percpt_number(vars) && \ - ((var) = (vars)[i]) != NULL; i++) - -/* - * percpu partition lock - * - * There are some use-cases like this in Lustre: - * . each CPU partition has it's own private data which is frequently changed, - * and mostly by the local CPU partition. - * . all CPU partitions share some global data, these data are rarely changed. - * - * LNet is typical example. - * CPU partition lock is designed for this kind of use-cases: - * . each CPU partition has it's own private lock - * . change on private data just needs to take the private lock - * . read on shared data just needs to take _any_ of private locks - * . change on shared data needs to take _all_ private locks, - * which is slow and should be really rare. - */ -enum { - CFS_PERCPT_LOCK_EX = -1, /* negative */ -}; - -struct cfs_percpt_lock { - /* cpu-partition-table for this lock */ - struct cfs_cpt_table *pcl_cptab; - /* exclusively locked */ - unsigned int pcl_locked; - /* private lock table */ - spinlock_t **pcl_locks; -}; - -/* return number of private locks */ -#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab) - -/* - * create a cpu-partition lock based on CPU partition table \a cptab, - * each private lock has extra \a psize bytes padding data - */ -struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab, - struct lock_class_key *keys); -/* destroy a cpu-partition lock */ -void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); - -/* lock private lock \a index of \a pcl */ -void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); - -/* unlock private lock \a index of \a pcl */ -void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); - -#define CFS_PERCPT_LOCK_KEYS 256 - -/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */ -#define cfs_percpt_lock_alloc(cptab) \ -({ \ - static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \ - struct cfs_percpt_lock *___lk; \ - \ - if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \ - ___lk = cfs_percpt_lock_create(cptab, NULL); \ - else \ - ___lk = cfs_percpt_lock_create(cptab, ___keys); \ - ___lk; \ -}) - -/** - * iterate over all CPU partitions in \a cptab - */ -#define cfs_cpt_for_each(i, cptab) \ - for (i = 0; i < cfs_cpt_number(cptab); i++) - -#endif /* __LIBCFS_CPU_H__ */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h deleted file mode 100644 index 176fae7319e3..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h +++ /dev/null @@ -1,208 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - */ - -#ifndef _LIBCFS_CRYPTO_H -#define _LIBCFS_CRYPTO_H - -#include -struct page; - -struct cfs_crypto_hash_type { - char *cht_name; /*< hash algorithm name, equal to - * format name for crypto api - */ - unsigned int cht_key; /*< init key by default (valid for - * 4 bytes context like crc32, adler - */ - unsigned int cht_size; /**< hash digest size */ -}; - -enum cfs_crypto_hash_alg { - CFS_HASH_ALG_NULL = 0, - CFS_HASH_ALG_ADLER32, - CFS_HASH_ALG_CRC32, - CFS_HASH_ALG_MD5, - CFS_HASH_ALG_SHA1, - CFS_HASH_ALG_SHA256, - CFS_HASH_ALG_SHA384, - CFS_HASH_ALG_SHA512, - CFS_HASH_ALG_CRC32C, - CFS_HASH_ALG_MAX, - CFS_HASH_ALG_UNKNOWN = 0xff -}; - -static struct cfs_crypto_hash_type hash_types[] = { - [CFS_HASH_ALG_NULL] = { - .cht_name = "null", - .cht_key = 0, - .cht_size = 0 - }, - [CFS_HASH_ALG_ADLER32] = { - .cht_name = "adler32", - .cht_key = 1, - .cht_size = 4 - }, - [CFS_HASH_ALG_CRC32] = { - .cht_name = "crc32", - .cht_key = ~0, - .cht_size = 4 - }, - [CFS_HASH_ALG_CRC32C] = { - .cht_name = "crc32c", - .cht_key = ~0, - .cht_size = 4 - }, - [CFS_HASH_ALG_MD5] = { - .cht_name = "md5", - .cht_key = 0, - .cht_size = 16 - }, - [CFS_HASH_ALG_SHA1] = { - .cht_name = "sha1", - .cht_key = 0, - .cht_size = 20 - }, - [CFS_HASH_ALG_SHA256] = { - .cht_name = "sha256", - .cht_key = 0, - .cht_size = 32 - }, - [CFS_HASH_ALG_SHA384] = { - .cht_name = "sha384", - .cht_key = 0, - .cht_size = 48 - }, - [CFS_HASH_ALG_SHA512] = { - .cht_name = "sha512", - .cht_key = 0, - .cht_size = 64 - }, - [CFS_HASH_ALG_MAX] = { - .cht_name = NULL, - .cht_key = 0, - .cht_size = 64 - }, -}; - -/* Maximum size of hash_types[].cht_size */ -#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64 - -/** - * Return hash algorithm information for the specified algorithm identifier - * - * Hash information includes algorithm name, initial seed, hash size. - * - * \retval cfs_crypto_hash_type for valid ID (CFS_HASH_ALG_*) - * \retval NULL for unknown algorithm identifier - */ -static inline const struct cfs_crypto_hash_type * -cfs_crypto_hash_type(enum cfs_crypto_hash_alg hash_alg) -{ - struct cfs_crypto_hash_type *ht; - - if (hash_alg < CFS_HASH_ALG_MAX) { - ht = &hash_types[hash_alg]; - if (ht->cht_name) - return ht; - } - return NULL; -} - -/** - * Return hash name for hash algorithm identifier - * - * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*) - * - * \retval string name of known hash algorithm - * \retval "unknown" if hash algorithm is unknown - */ -static inline const char * -cfs_crypto_hash_name(enum cfs_crypto_hash_alg hash_alg) -{ - const struct cfs_crypto_hash_type *ht; - - ht = cfs_crypto_hash_type(hash_alg); - if (ht) - return ht->cht_name; - return "unknown"; -} - -/** - * Return digest size for hash algorithm type - * - * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*) - * - * \retval hash algorithm digest size in bytes - * \retval 0 if hash algorithm type is unknown - */ -static inline int cfs_crypto_hash_digestsize(enum cfs_crypto_hash_alg hash_alg) -{ - const struct cfs_crypto_hash_type *ht; - - ht = cfs_crypto_hash_type(hash_alg); - if (ht) - return ht->cht_size; - return 0; -} - -/** - * Find hash algorithm ID for the specified algorithm name - * - * \retval hash algorithm ID for valid ID (CFS_HASH_ALG_*) - * \retval CFS_HASH_ALG_UNKNOWN for unknown algorithm name - */ -static inline unsigned char cfs_crypto_hash_alg(const char *algname) -{ - enum cfs_crypto_hash_alg hash_alg; - - for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++) - if (!strcmp(hash_types[hash_alg].cht_name, algname)) - return hash_alg; - - return CFS_HASH_ALG_UNKNOWN; -} - -int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, - const void *buf, unsigned int buf_len, - unsigned char *key, unsigned int key_len, - unsigned char *hash, unsigned int *hash_len); - -struct ahash_request * -cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg, - unsigned char *key, unsigned int key_len); -int cfs_crypto_hash_update_page(struct ahash_request *desc, - struct page *page, unsigned int offset, - unsigned int len); -int cfs_crypto_hash_update(struct ahash_request *desc, const void *buf, - unsigned int buf_len); -int cfs_crypto_hash_final(struct ahash_request *desc, - unsigned char *hash, unsigned int *hash_len); -int cfs_crypto_register(void); -void cfs_crypto_unregister(void); -int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg); -#endif diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h deleted file mode 100644 index 17534a76362a..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h +++ /dev/null @@ -1,207 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_debug.h - * - * Debug messages and assertions - * - */ - -#ifndef __LIBCFS_DEBUG_H__ -#define __LIBCFS_DEBUG_H__ - -#include -#include - -/* - * Debugging - */ -extern unsigned int libcfs_subsystem_debug; -extern unsigned int libcfs_stack; -extern unsigned int libcfs_debug; -extern unsigned int libcfs_printk; -extern unsigned int libcfs_console_ratelimit; -extern unsigned int libcfs_console_max_delay; -extern unsigned int libcfs_console_min_delay; -extern unsigned int libcfs_console_backoff; -extern unsigned int libcfs_debug_binary; -extern char libcfs_debug_file_path_arr[PATH_MAX]; - -int libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys); -int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys); - -/* Has there been an LBUG? */ -extern unsigned int libcfs_catastrophe; -extern unsigned int libcfs_panic_on_lbug; - -/* Enable debug-checks on stack size - except on x86_64 */ -#if !defined(__x86_64__) -# ifdef __ia64__ -# define CDEBUG_STACK() (THREAD_SIZE - \ - ((unsigned long)__builtin_dwarf_cfa() & \ - (THREAD_SIZE - 1))) -# else -# define CDEBUG_STACK() (THREAD_SIZE - \ - ((unsigned long)__builtin_frame_address(0) & \ - (THREAD_SIZE - 1))) -# endif /* __ia64__ */ - -#define __CHECK_STACK(msgdata, mask, cdls) \ -do { \ - if (unlikely(CDEBUG_STACK() > libcfs_stack)) { \ - LIBCFS_DEBUG_MSG_DATA_INIT(msgdata, D_WARNING, NULL); \ - libcfs_stack = CDEBUG_STACK(); \ - libcfs_debug_msg(msgdata, \ - "maximum lustre stack %lu\n", \ - CDEBUG_STACK()); \ - (msgdata)->msg_mask = mask; \ - (msgdata)->msg_cdls = cdls; \ - dump_stack(); \ - /*panic("LBUG");*/ \ - } \ -} while (0) -#define CFS_CHECK_STACK(msgdata, mask, cdls) __CHECK_STACK(msgdata, mask, cdls) -#else /* __x86_64__ */ -#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while (0) -#define CDEBUG_STACK() (0L) -#endif /* __x86_64__ */ - -#ifndef DEBUG_SUBSYSTEM -# define DEBUG_SUBSYSTEM S_UNDEFINED -#endif - -#define CDEBUG_DEFAULT_MAX_DELAY (600 * HZ) /* jiffies */ -#define CDEBUG_DEFAULT_MIN_DELAY ((HZ + 1) / 2) /* jiffies */ -#define CDEBUG_DEFAULT_BACKOFF 2 -struct cfs_debug_limit_state { - unsigned long cdls_next; - unsigned int cdls_delay; - int cdls_count; -}; - -struct libcfs_debug_msg_data { - const char *msg_file; - const char *msg_fn; - int msg_subsys; - int msg_line; - int msg_mask; - struct cfs_debug_limit_state *msg_cdls; -}; - -#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \ -do { \ - (data)->msg_subsys = DEBUG_SUBSYSTEM; \ - (data)->msg_file = __FILE__; \ - (data)->msg_fn = __func__; \ - (data)->msg_line = __LINE__; \ - (data)->msg_cdls = (cdls); \ - (data)->msg_mask = (mask); \ -} while (0) - -#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \ - static struct libcfs_debug_msg_data dataname = { \ - .msg_subsys = DEBUG_SUBSYSTEM, \ - .msg_file = __FILE__, \ - .msg_fn = __func__, \ - .msg_line = __LINE__, \ - .msg_cdls = (cdls) }; \ - dataname.msg_mask = (mask) - -/** - * Filters out logging messages based on mask and subsystem. - */ -static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem) -{ - return mask & D_CANTMASK || - ((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem)); -} - -#define __CDEBUG(cdls, mask, format, ...) \ -do { \ - static struct libcfs_debug_msg_data msgdata; \ - \ - CFS_CHECK_STACK(&msgdata, mask, cdls); \ - \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \ - libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \ - } \ -} while (0) - -#define CDEBUG(mask, format, ...) __CDEBUG(NULL, mask, format, ## __VA_ARGS__) - -#define CDEBUG_LIMIT(mask, format, ...) \ -do { \ - static struct cfs_debug_limit_state cdls; \ - \ - __CDEBUG(&cdls, mask, format, ## __VA_ARGS__); \ -} while (0) - -/* - * Lustre Error Checksum: calculates checksum - * of Hex number by XORing the nybbles. - */ -#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \ - ((hexnum) >> 8 & 0xf)) - -#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__) -#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__) -#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a) -#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__) - -#define LCONSOLE(mask, format, ...) CDEBUG(D_CONSOLE | (mask), format, ## __VA_ARGS__) -#define LCONSOLE_INFO(format, ...) CDEBUG_LIMIT(D_CONSOLE, format, ## __VA_ARGS__) -#define LCONSOLE_WARN(format, ...) CDEBUG_LIMIT(D_CONSOLE | D_WARNING, format, ## __VA_ARGS__) -#define LCONSOLE_ERROR_MSG(errnum, format, ...) CDEBUG_LIMIT(D_CONSOLE | D_ERROR, \ - "%x-%x: " format, errnum, LERRCHKSUM(errnum), ## __VA_ARGS__) -#define LCONSOLE_ERROR(format, ...) LCONSOLE_ERROR_MSG(0x00, format, ## __VA_ARGS__) - -#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__) - -int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, - const char *format1, ...) - __printf(2, 3); - -int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, - const char *format1, - va_list args, const char *format2, ...) - __printf(4, 5); - -/* other external symbols that tracefile provides: */ -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob); -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_buffer, char *append); - -#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log" - -#endif /* __LIBCFS_DEBUG_H__ */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h deleted file mode 100644 index 8074e390b4d1..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h +++ /dev/null @@ -1,194 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Oracle Corporation, Inc. - */ - -#ifndef _LIBCFS_FAIL_H -#define _LIBCFS_FAIL_H - -#include -#include - -extern unsigned long cfs_fail_loc; -extern unsigned int cfs_fail_val; -extern int cfs_fail_err; - -extern wait_queue_head_t cfs_race_waitq; -extern int cfs_race_state; - -int __cfs_fail_check_set(u32 id, u32 value, int set); -int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set); - -enum { - CFS_FAIL_LOC_NOSET = 0, - CFS_FAIL_LOC_ORSET = 1, - CFS_FAIL_LOC_RESET = 2, - CFS_FAIL_LOC_VALUE = 3 -}; - -/* Failure injection control */ -#define CFS_FAIL_MASK_SYS 0x0000FF00 -#define CFS_FAIL_MASK_LOC (0x000000FF | CFS_FAIL_MASK_SYS) - -#define CFS_FAILED_BIT 30 -/* CFS_FAILED is 0x40000000 */ -#define CFS_FAILED BIT(CFS_FAILED_BIT) - -#define CFS_FAIL_ONCE_BIT 31 -/* CFS_FAIL_ONCE is 0x80000000 */ -#define CFS_FAIL_ONCE BIT(CFS_FAIL_ONCE_BIT) - -/* The following flags aren't made to be combined */ -#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */ -#define CFS_FAIL_SOME 0x10000000 /* only fail N times */ -#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */ -#define CFS_FAIL_USR1 0x04000000 /* user flag */ - -#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */ - -static inline bool CFS_FAIL_PRECHECK(u32 id) -{ - return cfs_fail_loc && - ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) || - (cfs_fail_loc & id & CFS_FAULT)); -} - -static inline int cfs_fail_check_set(u32 id, u32 value, - int set, int quiet) -{ - int ret = 0; - - if (unlikely(CFS_FAIL_PRECHECK(id))) { - ret = __cfs_fail_check_set(id, value, set); - if (ret) { - if (quiet) { - CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n", - id, value); - } else { - LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n", - id, value); - } - } - } - - return ret; -} - -/* If id hit cfs_fail_loc, return 1, otherwise return 0 */ -#define CFS_FAIL_CHECK(id) \ - cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 0) -#define CFS_FAIL_CHECK_QUIET(id) \ - cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1) - -/* - * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1, - * otherwise return 0 - */ -#define CFS_FAIL_CHECK_VALUE(id, value) \ - cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0) -#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \ - cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1) - -/* - * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1, - * otherwise return 0 - */ -#define CFS_FAIL_CHECK_ORSET(id, value) \ - cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0) -#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \ - cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1) - -/* - * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1, - * otherwise return 0 - */ -#define CFS_FAIL_CHECK_RESET(id, value) \ - cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0) -#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \ - cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1) - -static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set) -{ - if (unlikely(CFS_FAIL_PRECHECK(id))) - return __cfs_fail_timeout_set(id, value, ms, set); - return 0; -} - -/* If id hit cfs_fail_loc, sleep for seconds or milliseconds */ -#define CFS_FAIL_TIMEOUT(id, secs) \ - cfs_fail_timeout_set(id, 0, (secs) * 1000, CFS_FAIL_LOC_NOSET) - -#define CFS_FAIL_TIMEOUT_MS(id, ms) \ - cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET) - -/* - * If id hit cfs_fail_loc, cfs_fail_loc |= value and - * sleep seconds or milliseconds - */ -#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \ - cfs_fail_timeout_set(id, value, (secs) * 1000, CFS_FAIL_LOC_ORSET) - -#define CFS_FAIL_TIMEOUT_RESET(id, value, secs) \ - cfs_fail_timeout_set(id, value, (secs) * 1000, CFS_FAIL_LOC_RESET) - -#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \ - cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET) - -#define CFS_FAULT_CHECK(id) \ - CFS_FAIL_CHECK(CFS_FAULT | (id)) - -/* - * The idea here is to synchronise two threads to force a race. The - * first thread that calls this with a matching fail_loc is put to - * sleep. The next thread that calls with the same fail_loc wakes up - * the first and continues. - */ -static inline void cfs_race(u32 id) -{ - if (CFS_FAIL_PRECHECK(id)) { - if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) { - int rc; - - cfs_race_state = 0; - CERROR("cfs_race id %x sleeping\n", id); - rc = wait_event_interruptible(cfs_race_waitq, - !!cfs_race_state); - CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc); - } else { - CERROR("cfs_fail_race id %x waking\n", id); - cfs_race_state = 1; - wake_up(&cfs_race_waitq); - } - } -} - -#define CFS_RACE(id) cfs_race(id) - -#endif /* _LIBCFS_FAIL_H */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h deleted file mode 100644 index be315958a4b3..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h +++ /dev/null @@ -1,869 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_hash.h - * - * Hashing routines - * - */ - -#ifndef __LIBCFS_HASH_H__ -#define __LIBCFS_HASH_H__ - -#include -#include -#include -#include - -/* - * Knuth recommends primes in approximately golden ratio to the maximum - * integer representable by a machine word for multiplicative hashing. - * Chuck Lever verified the effectiveness of this technique: - * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf - * - * These primes are chosen to be bit-sparse, that is operations on - * them can use shifts and additions instead of multiplications for - * machines where multiplications are slow. - */ -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL -/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ -#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL - -/** disable debug */ -#define CFS_HASH_DEBUG_NONE 0 -/* - * record hash depth and output to console when it's too deep, - * computing overhead is low but consume more memory - */ -#define CFS_HASH_DEBUG_1 1 -/** expensive, check key validation */ -#define CFS_HASH_DEBUG_2 2 - -#define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE - -struct cfs_hash_ops; -struct cfs_hash_lock_ops; -struct cfs_hash_hlist_ops; - -union cfs_hash_lock { - rwlock_t rw; /**< rwlock */ - spinlock_t spin; /**< spinlock */ -}; - -/** - * cfs_hash_bucket is a container of: - * - lock, counter ... - * - array of hash-head starting from hsb_head[0], hash-head can be one of - * . struct cfs_hash_head - * . struct cfs_hash_head_dep - * . struct cfs_hash_dhead - * . struct cfs_hash_dhead_dep - * which depends on requirement of user - * - some extra bytes (caller can require it while creating hash) - */ -struct cfs_hash_bucket { - union cfs_hash_lock hsb_lock; /**< bucket lock */ - u32 hsb_count; /**< current entries */ - u32 hsb_version; /**< change version */ - unsigned int hsb_index; /**< index of bucket */ - int hsb_depmax; /**< max depth on bucket */ - long hsb_head[0]; /**< hash-head array */ -}; - -/** - * cfs_hash bucket descriptor, it's normally in stack of caller - */ -struct cfs_hash_bd { - /* address of bucket */ - struct cfs_hash_bucket *bd_bucket; - /* offset in bucket */ - unsigned int bd_offset; -}; - -#define CFS_HASH_NAME_LEN 16 /**< default name length */ -#define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */ - -#define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */ -#define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */ -#define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS - -/** - * common hash attributes. - */ -enum cfs_hash_tag { - /** - * don't need any lock, caller will protect operations with it's - * own lock. With this flag: - * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK - * will be ignored. - * . Some functions will be disabled with this flag, i.e: - * cfs_hash_for_each_empty, cfs_hash_rehash - */ - CFS_HASH_NO_LOCK = BIT(0), - /** no bucket lock, use one spinlock to protect the whole hash */ - CFS_HASH_NO_BKTLOCK = BIT(1), - /** rwlock to protect bucket */ - CFS_HASH_RW_BKTLOCK = BIT(2), - /** spinlock to protect bucket */ - CFS_HASH_SPIN_BKTLOCK = BIT(3), - /** always add new item to tail */ - CFS_HASH_ADD_TAIL = BIT(4), - /** hash-table doesn't have refcount on item */ - CFS_HASH_NO_ITEMREF = BIT(5), - /** big name for param-tree */ - CFS_HASH_BIGNAME = BIT(6), - /** track global count */ - CFS_HASH_COUNTER = BIT(7), - /** rehash item by new key */ - CFS_HASH_REHASH_KEY = BIT(8), - /** Enable dynamic hash resizing */ - CFS_HASH_REHASH = BIT(9), - /** can shrink hash-size */ - CFS_HASH_SHRINK = BIT(10), - /** assert hash is empty on exit */ - CFS_HASH_ASSERT_EMPTY = BIT(11), - /** record hlist depth */ - CFS_HASH_DEPTH = BIT(12), - /** - * rehash is always scheduled in a different thread, so current - * change on hash table is non-blocking - */ - CFS_HASH_NBLK_CHANGE = BIT(13), - /** - * NB, we typed hs_flags as u16, please change it - * if you need to extend >=16 flags - */ -}; - -/** most used attributes */ -#define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \ - CFS_HASH_COUNTER | CFS_HASH_REHASH) - -/** - * cfs_hash is a hash-table implementation for general purpose, it can support: - * . two refcount modes - * hash-table with & without refcount - * . four lock modes - * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock - * . general operations - * lookup, add(add_tail or add_head), delete - * . rehash - * grows or shrink - * . iteration - * locked iteration and unlocked iteration - * . bigname - * support long name hash - * . debug - * trace max searching depth - * - * Rehash: - * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker) - * is spawned to handle the rehash in the background, it's possible that other - * processes can concurrently perform additions, deletions, and lookups - * without being blocked on rehash completion, because rehash will release - * the global wrlock for each bucket. - * - * rehash and iteration can't run at the same time because it's too tricky - * to keep both of them safe and correct. - * As they are relatively rare operations, so: - * . if iteration is in progress while we try to launch rehash, then - * it just giveup, iterator will launch rehash at the end. - * . if rehash is in progress while we try to iterate the hash table, - * then we just wait (shouldn't be very long time), anyway, nobody - * should expect iteration of whole hash-table to be non-blocking. - * - * During rehashing, a (key,object) pair may be in one of two buckets, - * depending on whether the worker task has yet to transfer the object - * to its new location in the table. Lookups and deletions need to search both - * locations; additions must take care to only insert into the new bucket. - */ - -struct cfs_hash { - /** - * serialize with rehash, or serialize all operations if - * the hash-table has CFS_HASH_NO_BKTLOCK - */ - union cfs_hash_lock hs_lock; - /** hash operations */ - struct cfs_hash_ops *hs_ops; - /** hash lock operations */ - struct cfs_hash_lock_ops *hs_lops; - /** hash list operations */ - struct cfs_hash_hlist_ops *hs_hops; - /** hash buckets-table */ - struct cfs_hash_bucket **hs_buckets; - /** total number of items on this hash-table */ - atomic_t hs_count; - /** hash flags, see cfs_hash_tag for detail */ - u16 hs_flags; - /** # of extra-bytes for bucket, for user saving extended attributes */ - u16 hs_extra_bytes; - /** wants to iterate */ - u8 hs_iterating; - /** hash-table is dying */ - u8 hs_exiting; - /** current hash bits */ - u8 hs_cur_bits; - /** min hash bits */ - u8 hs_min_bits; - /** max hash bits */ - u8 hs_max_bits; - /** bits for rehash */ - u8 hs_rehash_bits; - /** bits for each bucket */ - u8 hs_bkt_bits; - /** resize min threshold */ - u16 hs_min_theta; - /** resize max threshold */ - u16 hs_max_theta; - /** resize count */ - u32 hs_rehash_count; - /** # of iterators (caller of cfs_hash_for_each_*) */ - u32 hs_iterators; - /** rehash workitem */ - struct work_struct hs_rehash_work; - /** refcount on this hash table */ - atomic_t hs_refcount; - /** rehash buckets-table */ - struct cfs_hash_bucket **hs_rehash_buckets; -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 - /** serialize debug members */ - spinlock_t hs_dep_lock; - /** max depth */ - unsigned int hs_dep_max; - /** id of the deepest bucket */ - unsigned int hs_dep_bkt; - /** offset in the deepest bucket */ - unsigned int hs_dep_off; - /** bits when we found the max depth */ - unsigned int hs_dep_bits; - /** workitem to output max depth */ - struct work_struct hs_dep_work; -#endif - /** name of htable */ - char hs_name[0]; -}; - -struct cfs_hash_lock_ops { - /** lock the hash table */ - void (*hs_lock)(union cfs_hash_lock *lock, int exclusive); - /** unlock the hash table */ - void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive); - /** lock the hash bucket */ - void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive); - /** unlock the hash bucket */ - void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive); -}; - -struct cfs_hash_hlist_ops { - /** return hlist_head of hash-head of @bd */ - struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, - struct cfs_hash_bd *bd); - /** return hash-head size */ - int (*hop_hhead_size)(struct cfs_hash *hs); - /** add @hnode to hash-head of @bd */ - int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode); - /** remove @hnode from hash-head of @bd */ - int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode); -}; - -struct cfs_hash_ops { - /** return hashed value from @key */ - unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key, - unsigned int mask); - /** return key address of @hnode */ - void * (*hs_key)(struct hlist_node *hnode); - /** copy key from @hnode to @key */ - void (*hs_keycpy)(struct hlist_node *hnode, void *key); - /** - * compare @key with key of @hnode - * returns 1 on a match - */ - int (*hs_keycmp)(const void *key, struct hlist_node *hnode); - /** return object address of @hnode, i.e: container_of(...hnode) */ - void * (*hs_object)(struct hlist_node *hnode); - /** get refcount of item, always called with holding bucket-lock */ - void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode); - /** release refcount of item */ - void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode); - /** release refcount of item, always called with holding bucket-lock */ - void (*hs_put_locked)(struct cfs_hash *hs, - struct hlist_node *hnode); - /** it's called before removing of @hnode */ - void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode); -}; - -/** total number of buckets in @hs */ -#define CFS_HASH_NBKT(hs) \ - BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits) - -/** total number of buckets in @hs while rehashing */ -#define CFS_HASH_RH_NBKT(hs) \ - BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits) - -/** number of hlist for in bucket */ -#define CFS_HASH_BKT_NHLIST(hs) BIT((hs)->hs_bkt_bits) - -/** total number of hlist in @hs */ -#define CFS_HASH_NHLIST(hs) BIT((hs)->hs_cur_bits) - -/** total number of hlist in @hs while rehashing */ -#define CFS_HASH_RH_NHLIST(hs) BIT((hs)->hs_rehash_bits) - -static inline int -cfs_hash_with_no_lock(struct cfs_hash *hs) -{ - /* caller will serialize all operations for this hash-table */ - return hs->hs_flags & CFS_HASH_NO_LOCK; -} - -static inline int -cfs_hash_with_no_bktlock(struct cfs_hash *hs) -{ - /* no bucket lock, one single lock to protect the hash-table */ - return hs->hs_flags & CFS_HASH_NO_BKTLOCK; -} - -static inline int -cfs_hash_with_rw_bktlock(struct cfs_hash *hs) -{ - /* rwlock to protect hash bucket */ - return hs->hs_flags & CFS_HASH_RW_BKTLOCK; -} - -static inline int -cfs_hash_with_spin_bktlock(struct cfs_hash *hs) -{ - /* spinlock to protect hash bucket */ - return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK; -} - -static inline int -cfs_hash_with_add_tail(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_ADD_TAIL; -} - -static inline int -cfs_hash_with_no_itemref(struct cfs_hash *hs) -{ - /* - * hash-table doesn't keep refcount on item, - * item can't be removed from hash unless it's - * ZERO refcount - */ - return hs->hs_flags & CFS_HASH_NO_ITEMREF; -} - -static inline int -cfs_hash_with_bigname(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_BIGNAME; -} - -static inline int -cfs_hash_with_counter(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_COUNTER; -} - -static inline int -cfs_hash_with_rehash(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_REHASH; -} - -static inline int -cfs_hash_with_rehash_key(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_REHASH_KEY; -} - -static inline int -cfs_hash_with_shrink(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_SHRINK; -} - -static inline int -cfs_hash_with_assert_empty(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_ASSERT_EMPTY; -} - -static inline int -cfs_hash_with_depth(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_DEPTH; -} - -static inline int -cfs_hash_with_nblk_change(struct cfs_hash *hs) -{ - return hs->hs_flags & CFS_HASH_NBLK_CHANGE; -} - -static inline int -cfs_hash_is_exiting(struct cfs_hash *hs) -{ - /* cfs_hash_destroy is called */ - return hs->hs_exiting; -} - -static inline int -cfs_hash_is_rehashing(struct cfs_hash *hs) -{ - /* rehash is launched */ - return !!hs->hs_rehash_bits; -} - -static inline int -cfs_hash_is_iterating(struct cfs_hash *hs) -{ - /* someone is calling cfs_hash_for_each_* */ - return hs->hs_iterating || hs->hs_iterators; -} - -static inline int -cfs_hash_bkt_size(struct cfs_hash *hs) -{ - return offsetof(struct cfs_hash_bucket, hsb_head[0]) + - hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) + - hs->hs_extra_bytes; -} - -static inline unsigned -cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask) -{ - return hs->hs_ops->hs_hash(hs, key, mask); -} - -static inline void * -cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode) -{ - return hs->hs_ops->hs_key(hnode); -} - -static inline void -cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key) -{ - if (hs->hs_ops->hs_keycpy) - hs->hs_ops->hs_keycpy(hnode, key); -} - -/** - * Returns 1 on a match, - */ -static inline int -cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - return hs->hs_ops->hs_keycmp(key, hnode); -} - -static inline void * -cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode) -{ - return hs->hs_ops->hs_object(hnode); -} - -static inline void -cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode) -{ - return hs->hs_ops->hs_get(hs, hnode); -} - -static inline void -cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) -{ - return hs->hs_ops->hs_put_locked(hs, hnode); -} - -static inline void -cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode) -{ - return hs->hs_ops->hs_put(hs, hnode); -} - -static inline void -cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode) -{ - if (hs->hs_ops->hs_exit) - hs->hs_ops->hs_exit(hs, hnode); -} - -static inline void cfs_hash_lock(struct cfs_hash *hs, int excl) -{ - hs->hs_lops->hs_lock(&hs->hs_lock, excl); -} - -static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl) -{ - hs->hs_lops->hs_unlock(&hs->hs_lock, excl); -} - -static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs, - atomic_t *condition) -{ - LASSERT(cfs_hash_with_no_bktlock(hs)); - return atomic_dec_and_lock(condition, &hs->hs_lock.spin); -} - -static inline void cfs_hash_bd_lock(struct cfs_hash *hs, - struct cfs_hash_bd *bd, int excl) -{ - hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl); -} - -static inline void cfs_hash_bd_unlock(struct cfs_hash *hs, - struct cfs_hash_bd *bd, int excl) -{ - hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl); -} - -/** - * operations on cfs_hash bucket (bd: bucket descriptor), - * they are normally for hash-table without rehash - */ -void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bd); - -static inline void -cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bd, int excl) -{ - cfs_hash_bd_get(hs, key, bd); - cfs_hash_bd_lock(hs, bd, excl); -} - -static inline unsigned -cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits); -} - -static inline void -cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index, - struct cfs_hash_bd *bd) -{ - bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits]; - bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U); -} - -static inline void * -cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - return (void *)bd->bd_bucket + - cfs_hash_bkt_size(hs) - hs->hs_extra_bytes; -} - -static inline u32 -cfs_hash_bd_version_get(struct cfs_hash_bd *bd) -{ - /* need hold cfs_hash_bd_lock */ - return bd->bd_bucket->hsb_version; -} - -static inline u32 -cfs_hash_bd_count_get(struct cfs_hash_bd *bd) -{ - /* need hold cfs_hash_bd_lock */ - return bd->bd_bucket->hsb_count; -} - -static inline int -cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd) -{ - return bd->bd_bucket->hsb_depmax; -} - -static inline int -cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) -{ - if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index) - return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index; - - if (bd1->bd_offset != bd2->bd_offset) - return bd1->bd_offset - bd2->bd_offset; - - return 0; -} - -void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode); -void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode); -void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, - struct cfs_hash_bd *bd_new, - struct hlist_node *hnode); - -static inline int -cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd, - atomic_t *condition) -{ - LASSERT(cfs_hash_with_spin_bktlock(hs)); - return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin); -} - -static inline struct hlist_head * -cfs_hash_bd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - return hs->hs_hops->hop_hhead(hs, bd); -} - -struct hlist_node * -cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key); -struct hlist_node * -cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key); - -/** - * operations on cfs_hash bucket (bd: bucket descriptor), - * they are safe for hash-table with rehash - */ -void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bds); -void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - int excl); -void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - int excl); - -static inline void -cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_dual_bd_get(hs, key, bds); - cfs_hash_dual_bd_lock(hs, bds, excl); -} - -struct hlist_node * -cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key); -struct hlist_node * -cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode, - int insist_add); -struct hlist_node * -cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode); - -/* Hash init/cleanup functions */ -struct cfs_hash * -cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, - unsigned int bkt_bits, unsigned int extra_bytes, - unsigned int min_theta, unsigned int max_theta, - struct cfs_hash_ops *ops, unsigned int flags); - -struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs); -void cfs_hash_putref(struct cfs_hash *hs); - -/* Hash addition functions */ -void cfs_hash_add(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode); -int cfs_hash_add_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode); -void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode); - -/* Hash deletion functions */ -void *cfs_hash_del(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode); -void *cfs_hash_del_key(struct cfs_hash *hs, const void *key); - -/* Hash lookup/for_each functions */ -#define CFS_HASH_LOOP_HOG 1024 - -typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, - struct cfs_hash_bd *bd, - struct hlist_node *node, - void *data); -void * -cfs_hash_lookup(struct cfs_hash *hs, const void *key); -void -cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data); -void -cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, - void *data); -int -cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, - void *data, int start); -int -cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, - void *data); -void -cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, - cfs_hash_for_each_cb_t cb, void *data); -typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data); -void -cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data); - -void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex, - cfs_hash_for_each_cb_t cb, void *data); -int cfs_hash_is_empty(struct cfs_hash *hs); -u64 cfs_hash_size_get(struct cfs_hash *hs); - -/* - * Rehash - Theta is calculated to be the average chained - * hash depth assuming a perfectly uniform hash function. - */ -void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs); -void cfs_hash_rehash_cancel(struct cfs_hash *hs); -void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash); -void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, - void *new_key, struct hlist_node *hnode); - -#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 -/* Validate hnode references the correct key */ -static inline void -cfs_hash_key_validate(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - LASSERT(cfs_hash_keycmp(hs, key, hnode)); -} - -/* Validate hnode is in the correct bucket */ -static inline void -cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_bd bds[2]; - - cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds); - LASSERT(bds[0].bd_bucket == bd->bd_bucket || - bds[1].bd_bucket == bd->bd_bucket); -} - -#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */ - -static inline void -cfs_hash_key_validate(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) {} - -static inline void -cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) {} - -#endif /* CFS_HASH_DEBUG_LEVEL */ - -#define CFS_HASH_THETA_BITS 10 -#define CFS_HASH_MIN_THETA BIT(CFS_HASH_THETA_BITS - 1) -#define CFS_HASH_MAX_THETA BIT(CFS_HASH_THETA_BITS + 1) - -/* Return integer component of theta */ -static inline int __cfs_hash_theta_int(int theta) -{ - return (theta >> CFS_HASH_THETA_BITS); -} - -/* Return a fractional value between 0 and 999 */ -static inline int __cfs_hash_theta_frac(int theta) -{ - return ((theta * 1000) >> CFS_HASH_THETA_BITS) - - (__cfs_hash_theta_int(theta) * 1000); -} - -static inline int __cfs_hash_theta(struct cfs_hash *hs) -{ - return (atomic_read(&hs->hs_count) << - CFS_HASH_THETA_BITS) >> hs->hs_cur_bits; -} - -static inline void -__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max) -{ - LASSERT(min < max); - hs->hs_min_theta = (u16)min; - hs->hs_max_theta = (u16)max; -} - -/* Generic debug formatting routines mainly for proc handler */ -struct seq_file; -void cfs_hash_debug_header(struct seq_file *m); -void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m); - -/* - * Generic djb2 hash algorithm for character arrays. - */ -static inline unsigned -cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask) -{ - unsigned int i, hash = 5381; - - LASSERT(key); - - for (i = 0; i < size; i++) - hash = hash * 33 + ((char *)key)[i]; - - return (hash & mask); -} - -/* - * Generic u32 hash algorithm. - */ -static inline unsigned -cfs_hash_u32_hash(const u32 key, unsigned int mask) -{ - return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask); -} - -/* - * Generic u64 hash algorithm. - */ -static inline unsigned -cfs_hash_u64_hash(const u64 key, unsigned int mask) -{ - return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask); -} - -/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */ -#define cfs_hash_for_each_bd(bds, n, i) \ - for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++) - -/** iterate over all buckets of @hs */ -#define cfs_hash_for_each_bucket(hs, bd, pos) \ - for (pos = 0; \ - pos < CFS_HASH_NBKT(hs) && \ - ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++) - -/** iterate over all hlist of bucket @bd */ -#define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \ - for ((bd)->bd_offset = 0; \ - (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \ - (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \ - (bd)->bd_offset++) - -/* !__LIBCFS__HASH_H__ */ -#endif diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h deleted file mode 100644 index 491d5971d199..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ /dev/null @@ -1,200 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_private.h - * - * Various defines for libcfs. - * - */ - -#ifndef __LIBCFS_PRIVATE_H__ -#define __LIBCFS_PRIVATE_H__ - -#ifndef DEBUG_SUBSYSTEM -# define DEBUG_SUBSYSTEM S_UNDEFINED -#endif - -#define LASSERTF(cond, fmt, ...) \ -do { \ - if (unlikely(!(cond))) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \ - libcfs_debug_msg(&__msg_data, \ - "ASSERTION( %s ) failed: " fmt, #cond, \ - ## __VA_ARGS__); \ - lbug_with_loc(&__msg_data); \ - } \ -} while (0) - -#define LASSERT(cond) LASSERTF(cond, "\n") - -#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK -/** - * This is for more expensive checks that one doesn't want to be enabled all - * the time. LINVRNT() has to be explicitly enabled by - * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option. - */ -# define LINVRNT(exp) LASSERT(exp) -#else -# define LINVRNT(exp) ((void)sizeof !!(exp)) -#endif - -void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg); - -#define LBUG() \ -do { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ - lbug_with_loc(&msgdata); \ -} while (0) - -/* - * Use #define rather than inline, as lnet_cpt_table() might - * not be defined yet - */ -#define kmalloc_cpt(size, flags, cpt) \ - kmalloc_node(size, flags, cfs_cpt_spread_node(lnet_cpt_table(), cpt)) - -#define kzalloc_cpt(size, flags, cpt) \ - kmalloc_node(size, flags | __GFP_ZERO, \ - cfs_cpt_spread_node(lnet_cpt_table(), cpt)) - -#define kvmalloc_cpt(size, flags, cpt) \ - kvmalloc_node(size, flags, \ - cfs_cpt_spread_node(lnet_cpt_table(), cpt)) - -#define kvzalloc_cpt(size, flags, cpt) \ - kvmalloc_node(size, flags | __GFP_ZERO, \ - cfs_cpt_spread_node(lnet_cpt_table(), cpt)) - -/******************************************************************************/ - -void libcfs_debug_dumplog(void); -int libcfs_debug_init(unsigned long bufsize); -int libcfs_debug_cleanup(void); -int libcfs_debug_clear_buffer(void); -int libcfs_debug_mark_buffer(const char *text); - -/* - * allocate a variable array, returned value is an array of pointers. - * Caller can specify length of array by count. - */ -void *cfs_array_alloc(int count, unsigned int size); -void cfs_array_free(void *vars); - -#define LASSERT_ATOMIC_ENABLED (1) - -#if LASSERT_ATOMIC_ENABLED - -/** assert value of @a is equal to @v */ -#define LASSERT_ATOMIC_EQ(a, v) \ - LASSERTF(atomic_read(a) == v, "value: %d\n", atomic_read((a))) - -/** assert value of @a is unequal to @v */ -#define LASSERT_ATOMIC_NE(a, v) \ - LASSERTF(atomic_read(a) != v, "value: %d\n", atomic_read((a))) - -/** assert value of @a is little than @v */ -#define LASSERT_ATOMIC_LT(a, v) \ - LASSERTF(atomic_read(a) < v, "value: %d\n", atomic_read((a))) - -/** assert value of @a is little/equal to @v */ -#define LASSERT_ATOMIC_LE(a, v) \ - LASSERTF(atomic_read(a) <= v, "value: %d\n", atomic_read((a))) - -/** assert value of @a is great than @v */ -#define LASSERT_ATOMIC_GT(a, v) \ - LASSERTF(atomic_read(a) > v, "value: %d\n", atomic_read((a))) - -/** assert value of @a is great/equal to @v */ -#define LASSERT_ATOMIC_GE(a, v) \ - LASSERTF(atomic_read(a) >= v, "value: %d\n", atomic_read((a))) - -/** assert value of @a is great than @v1 and little than @v2 */ -#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \ -do { \ - int __v = atomic_read(a); \ - LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \ -} while (0) - -/** assert value of @a is great than @v1 and little/equal to @v2 */ -#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \ -do { \ - int __v = atomic_read(a); \ - LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \ -} while (0) - -/** assert value of @a is great/equal to @v1 and little than @v2 */ -#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \ -do { \ - int __v = atomic_read(a); \ - LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \ -} while (0) - -/** assert value of @a is great/equal to @v1 and little/equal to @v2 */ -#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \ -do { \ - int __v = atomic_read(a); \ - LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \ -} while (0) - -#else /* !LASSERT_ATOMIC_ENABLED */ - -#define LASSERT_ATOMIC_EQ(a, v) do {} while (0) -#define LASSERT_ATOMIC_NE(a, v) do {} while (0) -#define LASSERT_ATOMIC_LT(a, v) do {} while (0) -#define LASSERT_ATOMIC_LE(a, v) do {} while (0) -#define LASSERT_ATOMIC_GT(a, v) do {} while (0) -#define LASSERT_ATOMIC_GE(a, v) do {} while (0) -#define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0) -#define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0) -#define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0) -#define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0) - -#endif /* LASSERT_ATOMIC_ENABLED */ - -#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0) -#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0) - -/* implication */ -#define ergo(a, b) (!(a) || (b)) -/* logical equivalence */ -#define equi(a, b) (!!(a) == !!(b)) - -#ifndef HAVE_CFS_SIZE_ROUND -static inline size_t cfs_size_round(int val) -{ - return round_up(val, 8); -} - -#define HAVE_CFS_SIZE_ROUND -#endif - -#endif diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h deleted file mode 100644 index cd7c3ccb2dc0..000000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_string.h - * - * Generic string manipulation functions. - * - * Author: Nathan Rutman - */ - -#ifndef __LIBCFS_STRING_H__ -#define __LIBCFS_STRING_H__ - -#include - -/* libcfs_string.c */ -/* Convert a text string to a bitmask */ -int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), - int *oldmask, int minmask, int allmask); -/* trim leading and trailing space characters */ -char *cfs_firststr(char *str, size_t size); - -/** - * Structure to represent NULL-less strings. - */ -struct cfs_lstr { - char *ls_str; - int ls_len; -}; - -/* - * Structure to represent \ token of the syntax. - */ -struct cfs_range_expr { - /* - * Link to cfs_expr_list::el_exprs. - */ - struct list_head re_link; - u32 re_lo; - u32 re_hi; - u32 re_stride; -}; - -struct cfs_expr_list { - struct list_head el_link; - struct list_head el_exprs; -}; - -int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res); -int cfs_str2num_check(char *str, int nob, unsigned int *num, - unsigned int min, unsigned int max); -int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list); -int cfs_expr_list_print(char *buffer, int count, - struct cfs_expr_list *expr_list); -int cfs_expr_list_values(struct cfs_expr_list *expr_list, - int max, u32 **values); -static inline void -cfs_expr_list_values_free(u32 *values, int num) -{ - /* - * This array is allocated by kvalloc(), so it shouldn't be freed - * by OBD_FREE() if it's called by module other than libcfs & LNet, - * otherwise we will see fake memory leak - */ - kvfree(values); -} - -void cfs_expr_list_free(struct cfs_expr_list *expr_list); -int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max, - struct cfs_expr_list **elpp); -void cfs_expr_list_free_list(struct list_head *list); - -#endif diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h deleted file mode 100644 index dae2e4f0056c..000000000000 --- a/drivers/staging/lustre/include/linux/lnet/api.h +++ /dev/null @@ -1,212 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011 - 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - */ - -#ifndef __LNET_API_H__ -#define __LNET_API_H__ - -/** \defgroup lnet LNet - * - * The Lustre Networking subsystem. - * - * LNet is an asynchronous message-passing API, which provides an unreliable - * connectionless service that can't guarantee any order. It supports OFA IB, - * TCP/IP, and Cray Interconnects, and routes between heterogeneous networks. - * - * @{ - */ - -#include - -/** \defgroup lnet_init_fini Initialization and cleanup - * The LNet must be properly initialized before any LNet calls can be made. - * @{ - */ -int LNetNIInit(lnet_pid_t requested_pid); -int LNetNIFini(void); -/** @} lnet_init_fini */ - -/** \defgroup lnet_addr LNet addressing and basic types - * - * Addressing scheme and basic data types of LNet. - * - * The LNet API is memory-oriented, so LNet must be able to address not only - * end-points but also memory region within a process address space. - * An ::lnet_nid_t addresses an end-point. An ::lnet_pid_t identifies a process - * in a node. A portal represents an opening in the address space of a - * process. Match bits is criteria to identify a region of memory inside a - * portal, and offset specifies an offset within the memory region. - * - * LNet creates a table of portals for each process during initialization. - * This table has MAX_PORTALS entries and its size can't be dynamically - * changed. A portal stays empty until the owning process starts to add - * memory regions to it. A portal is sometimes called an index because - * it's an entry in the portals table of a process. - * - * \see LNetMEAttach - * @{ - */ -int LNetGetId(unsigned int index, struct lnet_process_id *id); -int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order); - -/** @} lnet_addr */ - -/** \defgroup lnet_me Match entries - * - * A match entry (abbreviated as ME) describes a set of criteria to accept - * incoming requests. - * - * A portal is essentially a match list plus a set of attributes. A match - * list is a chain of MEs. Each ME includes a pointer to a memory descriptor - * and a set of match criteria. The match criteria can be used to reject - * incoming requests based on process ID or the match bits provided in the - * request. MEs can be dynamically inserted into a match list by LNetMEAttach() - * and LNetMEInsert(), and removed from its list by LNetMEUnlink(). - * @{ - */ -int LNetMEAttach(unsigned int portal, - struct lnet_process_id match_id_in, - __u64 match_bits_in, - __u64 ignore_bits_in, - enum lnet_unlink unlink_in, - enum lnet_ins_pos pos_in, - struct lnet_handle_me *handle_out); - -int LNetMEInsert(struct lnet_handle_me current_in, - struct lnet_process_id match_id_in, - __u64 match_bits_in, - __u64 ignore_bits_in, - enum lnet_unlink unlink_in, - enum lnet_ins_pos position_in, - struct lnet_handle_me *handle_out); - -int LNetMEUnlink(struct lnet_handle_me current_in); -/** @} lnet_me */ - -/** \defgroup lnet_md Memory descriptors - * - * A memory descriptor contains information about a region of a user's - * memory (either in kernel or user space) and optionally points to an - * event queue where information about the operations performed on the - * memory descriptor are recorded. Memory descriptor is abbreviated as - * MD and can be used interchangeably with the memory region it describes. - * - * The LNet API provides two operations to create MDs: LNetMDAttach() - * and LNetMDBind(); one operation to unlink and release the resources - * associated with a MD: LNetMDUnlink(). - * @{ - */ -int LNetMDAttach(struct lnet_handle_me current_in, - struct lnet_md md_in, - enum lnet_unlink unlink_in, - struct lnet_handle_md *md_handle_out); - -int LNetMDBind(struct lnet_md md_in, - enum lnet_unlink unlink_in, - struct lnet_handle_md *md_handle_out); - -int LNetMDUnlink(struct lnet_handle_md md_in); -/** @} lnet_md */ - -/** \defgroup lnet_eq Events and event queues - * - * Event queues (abbreviated as EQ) are used to log operations performed on - * local MDs. In particular, they signal the completion of a data transmission - * into or out of a MD. They can also be used to hold acknowledgments for - * completed PUT operations and indicate when a MD has been unlinked. Multiple - * MDs can share a single EQ. An EQ may have an optional event handler - * associated with it. If an event handler exists, it will be run for each - * event that is deposited into the EQ. - * - * In addition to the lnet_handle_eq, the LNet API defines two types - * associated with events: The ::lnet_event_kind defines the kinds of events - * that can be stored in an EQ. The lnet_event defines a structure that - * holds the information about with an event. - * - * There are five functions for dealing with EQs: LNetEQAlloc() is used to - * create an EQ and allocate the resources needed, while LNetEQFree() - * releases these resources and free the EQ. LNetEQGet() retrieves the next - * event from an EQ, and LNetEQWait() can be used to block a process until - * an EQ has at least one event. LNetEQPoll() can be used to test or wait - * on multiple EQs. - * @{ - */ -int LNetEQAlloc(unsigned int count_in, - lnet_eq_handler_t handler, - struct lnet_handle_eq *handle_out); - -int LNetEQFree(struct lnet_handle_eq eventq_in); - -int LNetEQPoll(struct lnet_handle_eq *eventqs_in, - int neq_in, - int timeout_ms, - int interruptible, - struct lnet_event *event_out, - int *which_eq_out); -/** @} lnet_eq */ - -/** \defgroup lnet_data Data movement operations - * - * The LNet API provides two data movement operations: LNetPut() - * and LNetGet(). - * @{ - */ -int LNetPut(lnet_nid_t self, - struct lnet_handle_md md_in, - enum lnet_ack_req ack_req_in, - struct lnet_process_id target_in, - unsigned int portal_in, - __u64 match_bits_in, - unsigned int offset_in, - __u64 hdr_data_in); - -int LNetGet(lnet_nid_t self, - struct lnet_handle_md md_in, - struct lnet_process_id target_in, - unsigned int portal_in, - __u64 match_bits_in, - unsigned int offset_in); -/** @} lnet_data */ - -/** \defgroup lnet_misc Miscellaneous operations. - * Miscellaneous operations. - * @{ - */ -int LNetSetLazyPortal(int portal); -int LNetClearLazyPortal(int portal); -int LNetCtl(unsigned int cmd, void *arg); -void LNetDebugPeer(struct lnet_process_id id); - -/** @} lnet_misc */ - -/** @} lnet */ -#endif diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h deleted file mode 100644 index 973c17a1c4a1..000000000000 --- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h +++ /dev/null @@ -1,652 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - * - * lnet/include/lnet/lib-lnet.h - */ - -#ifndef __LNET_LIB_LNET_H__ -#define __LNET_LIB_LNET_H__ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -extern struct lnet the_lnet; /* THE network */ - -#if (BITS_PER_LONG == 32) -/* 2 CPTs, allowing more CPTs might make us under memory pressure */ -#define LNET_CPT_MAX_BITS 1 - -#else /* 64-bit system */ -/* - * 256 CPTs for thousands of CPUs, allowing more CPTs might make us - * under risk of consuming all lh_cookie. - */ -#define LNET_CPT_MAX_BITS 8 -#endif /* BITS_PER_LONG == 32 */ - -/* max allowed CPT number */ -#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS) - -#define LNET_CPT_NUMBER (the_lnet.ln_cpt_number) -#define LNET_CPT_BITS (the_lnet.ln_cpt_bits) -#define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1) - -/** exclusive lock */ -#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX - -/* need both kernel and user-land acceptor */ -#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512 -#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023 - -static inline int lnet_is_route_alive(struct lnet_route *route) -{ - /* gateway is down */ - if (!route->lr_gateway->lp_alive) - return 0; - /* no NI status, assume it's alive */ - if ((route->lr_gateway->lp_ping_feats & - LNET_PING_FEAT_NI_STATUS) == 0) - return 1; - /* has NI status, check # down NIs */ - return route->lr_downis == 0; -} - -static inline int lnet_is_wire_handle_none(struct lnet_handle_wire *wh) -{ - return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE && - wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE); -} - -static inline int lnet_md_exhausted(struct lnet_libmd *md) -{ - return (!md->md_threshold || - ((md->md_options & LNET_MD_MAX_SIZE) && - md->md_offset + md->md_max_size > md->md_length)); -} - -static inline int lnet_md_unlinkable(struct lnet_libmd *md) -{ - /* - * Should unlink md when its refcount is 0 and either: - * - md has been flagged for deletion (by auto unlink or - * LNetM[DE]Unlink, in the latter case md may not be exhausted). - * - auto unlink is on and md is exhausted. - */ - if (md->md_refcount) - return 0; - - if (md->md_flags & LNET_MD_FLAG_ZOMBIE) - return 1; - - return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) && - lnet_md_exhausted(md)); -} - -#define lnet_cpt_table() (the_lnet.ln_cpt_table) -#define lnet_cpt_current() cfs_cpt_current(the_lnet.ln_cpt_table, 1) - -static inline int -lnet_cpt_of_cookie(__u64 cookie) -{ - unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK; - - /* - * LNET_CPT_NUMBER doesn't have to be power2, which means we can - * get illegal cpt from it's invalid cookie - */ - return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER; -} - -static inline void -lnet_res_lock(int cpt) -{ - cfs_percpt_lock(the_lnet.ln_res_lock, cpt); -} - -static inline void -lnet_res_unlock(int cpt) -{ - cfs_percpt_unlock(the_lnet.ln_res_lock, cpt); -} - -static inline int -lnet_res_lock_current(void) -{ - int cpt = lnet_cpt_current(); - - lnet_res_lock(cpt); - return cpt; -} - -static inline void -lnet_net_lock(int cpt) -{ - cfs_percpt_lock(the_lnet.ln_net_lock, cpt); -} - -static inline void -lnet_net_unlock(int cpt) -{ - cfs_percpt_unlock(the_lnet.ln_net_lock, cpt); -} - -static inline int -lnet_net_lock_current(void) -{ - int cpt = lnet_cpt_current(); - - lnet_net_lock(cpt); - return cpt; -} - -#define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX) -#define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX) - -#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock) -#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock) -#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock) -#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock) -#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock) -#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock) - -#define MAX_PORTALS 64 - -static inline struct lnet_libmd * -lnet_md_alloc(struct lnet_md *umd) -{ - struct lnet_libmd *md; - unsigned int size; - unsigned int niov; - - if (umd->options & LNET_MD_KIOV) { - niov = umd->length; - size = offsetof(struct lnet_libmd, md_iov.kiov[niov]); - } else { - niov = umd->options & LNET_MD_IOVEC ? umd->length : 1; - size = offsetof(struct lnet_libmd, md_iov.iov[niov]); - } - - md = kzalloc(size, GFP_NOFS); - - if (md) { - /* Set here in case of early free */ - md->md_options = umd->options; - md->md_niov = niov; - INIT_LIST_HEAD(&md->md_list); - } - - return md; -} - -struct lnet_libhandle *lnet_res_lh_lookup(struct lnet_res_container *rec, - __u64 cookie); -void lnet_res_lh_initialize(struct lnet_res_container *rec, - struct lnet_libhandle *lh); -static inline void -lnet_res_lh_invalidate(struct lnet_libhandle *lh) -{ - /* NB: cookie is still useful, don't reset it */ - list_del(&lh->lh_hash_chain); -} - -static inline void -lnet_eq2handle(struct lnet_handle_eq *handle, struct lnet_eq *eq) -{ - if (!eq) { - LNetInvalidateEQHandle(handle); - return; - } - - handle->cookie = eq->eq_lh.lh_cookie; -} - -static inline struct lnet_eq * -lnet_handle2eq(struct lnet_handle_eq *handle) -{ - struct lnet_libhandle *lh; - - lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie); - if (!lh) - return NULL; - - return lh_entry(lh, struct lnet_eq, eq_lh); -} - -static inline void -lnet_md2handle(struct lnet_handle_md *handle, struct lnet_libmd *md) -{ - handle->cookie = md->md_lh.lh_cookie; -} - -static inline struct lnet_libmd * -lnet_handle2md(struct lnet_handle_md *handle) -{ - /* ALWAYS called with resource lock held */ - struct lnet_libhandle *lh; - int cpt; - - cpt = lnet_cpt_of_cookie(handle->cookie); - lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], - handle->cookie); - if (!lh) - return NULL; - - return lh_entry(lh, struct lnet_libmd, md_lh); -} - -static inline struct lnet_libmd * -lnet_wire_handle2md(struct lnet_handle_wire *wh) -{ - /* ALWAYS called with resource lock held */ - struct lnet_libhandle *lh; - int cpt; - - if (wh->wh_interface_cookie != the_lnet.ln_interface_cookie) - return NULL; - - cpt = lnet_cpt_of_cookie(wh->wh_object_cookie); - lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], - wh->wh_object_cookie); - if (!lh) - return NULL; - - return lh_entry(lh, struct lnet_libmd, md_lh); -} - -static inline void -lnet_me2handle(struct lnet_handle_me *handle, struct lnet_me *me) -{ - handle->cookie = me->me_lh.lh_cookie; -} - -static inline struct lnet_me * -lnet_handle2me(struct lnet_handle_me *handle) -{ - /* ALWAYS called with resource lock held */ - struct lnet_libhandle *lh; - int cpt; - - cpt = lnet_cpt_of_cookie(handle->cookie); - lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt], - handle->cookie); - if (!lh) - return NULL; - - return lh_entry(lh, struct lnet_me, me_lh); -} - -static inline void -lnet_peer_addref_locked(struct lnet_peer *lp) -{ - LASSERT(lp->lp_refcount > 0); - lp->lp_refcount++; -} - -void lnet_destroy_peer_locked(struct lnet_peer *lp); - -static inline void -lnet_peer_decref_locked(struct lnet_peer *lp) -{ - LASSERT(lp->lp_refcount > 0); - lp->lp_refcount--; - if (!lp->lp_refcount) - lnet_destroy_peer_locked(lp); -} - -static inline int -lnet_isrouter(struct lnet_peer *lp) -{ - return lp->lp_rtr_refcount ? 1 : 0; -} - -static inline void -lnet_ni_addref_locked(struct lnet_ni *ni, int cpt) -{ - LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER); - LASSERT(*ni->ni_refs[cpt] >= 0); - - (*ni->ni_refs[cpt])++; -} - -static inline void -lnet_ni_addref(struct lnet_ni *ni) -{ - lnet_net_lock(0); - lnet_ni_addref_locked(ni, 0); - lnet_net_unlock(0); -} - -static inline void -lnet_ni_decref_locked(struct lnet_ni *ni, int cpt) -{ - LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER); - LASSERT(*ni->ni_refs[cpt] > 0); - - (*ni->ni_refs[cpt])--; -} - -static inline void -lnet_ni_decref(struct lnet_ni *ni) -{ - lnet_net_lock(0); - lnet_ni_decref_locked(ni, 0); - lnet_net_unlock(0); -} - -void lnet_ni_free(struct lnet_ni *ni); -struct lnet_ni * -lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist); - -static inline int -lnet_nid2peerhash(lnet_nid_t nid) -{ - return hash_long(nid, LNET_PEER_HASH_BITS); -} - -static inline struct list_head * -lnet_net2rnethash(__u32 net) -{ - return &the_lnet.ln_remote_nets_hash[(LNET_NETNUM(net) + - LNET_NETTYP(net)) & - ((1U << the_lnet.ln_remote_nets_hbits) - 1)]; -} - -extern struct lnet_lnd the_lolnd; -extern int avoid_asym_router_failure; - -int lnet_cpt_of_nid_locked(lnet_nid_t nid); -int lnet_cpt_of_nid(lnet_nid_t nid); -struct lnet_ni *lnet_nid2ni_locked(lnet_nid_t nid, int cpt); -struct lnet_ni *lnet_net2ni_locked(__u32 net, int cpt); -struct lnet_ni *lnet_net2ni(__u32 net); - -extern int portal_rotor; - -int lnet_lib_init(void); -void lnet_lib_exit(void); - -int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive, - unsigned long when); -void lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive, - unsigned long when); -int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid, - unsigned int priority); -int lnet_check_routes(void); -int lnet_del_route(__u32 net, lnet_nid_t gw_nid); -void lnet_destroy_routes(void); -int lnet_get_route(int idx, __u32 *net, __u32 *hops, - lnet_nid_t *gateway, __u32 *alive, __u32 *priority); -int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg); - -void lnet_router_debugfs_init(void); -void lnet_router_debugfs_fini(void); -int lnet_rtrpools_alloc(int im_a_router); -void lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages); -int lnet_rtrpools_adjust(int tiny, int small, int large); -int lnet_rtrpools_enable(void); -void lnet_rtrpools_disable(void); -void lnet_rtrpools_free(int keep_pools); -struct lnet_remotenet *lnet_find_net_locked(__u32 net); -int lnet_dyn_add_ni(lnet_pid_t requested_pid, - struct lnet_ioctl_config_data *conf); -int lnet_dyn_del_ni(__u32 net); -int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason); - -int lnet_islocalnid(lnet_nid_t nid); -int lnet_islocalnet(__u32 net); - -void lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md, - unsigned int offset, unsigned int mlen); -void lnet_msg_detach_md(struct lnet_msg *msg, int status); -void lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev); -void lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type); -void lnet_msg_commit(struct lnet_msg *msg, int cpt); -void lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status); - -void lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev); -void lnet_prep_send(struct lnet_msg *msg, int type, - struct lnet_process_id target, unsigned int offset, - unsigned int len); -int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid); -void lnet_return_tx_credits_locked(struct lnet_msg *msg); -void lnet_return_rx_credits_locked(struct lnet_msg *msg); -void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp); -void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt); - -/* portals functions */ -/* portals attributes */ -static inline int -lnet_ptl_is_lazy(struct lnet_portal *ptl) -{ - return !!(ptl->ptl_options & LNET_PTL_LAZY); -} - -static inline int -lnet_ptl_is_unique(struct lnet_portal *ptl) -{ - return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE); -} - -static inline int -lnet_ptl_is_wildcard(struct lnet_portal *ptl) -{ - return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD); -} - -static inline void -lnet_ptl_setopt(struct lnet_portal *ptl, int opt) -{ - ptl->ptl_options |= opt; -} - -static inline void -lnet_ptl_unsetopt(struct lnet_portal *ptl, int opt) -{ - ptl->ptl_options &= ~opt; -} - -/* match-table functions */ -struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable, - struct lnet_process_id id, __u64 mbits); -struct lnet_match_table *lnet_mt_of_attach(unsigned int index, - struct lnet_process_id id, - __u64 mbits, __u64 ignore_bits, - enum lnet_ins_pos pos); -int lnet_mt_match_md(struct lnet_match_table *mtable, - struct lnet_match_info *info, struct lnet_msg *msg); - -/* portals match/attach functions */ -void lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md, - struct list_head *matches, struct list_head *drops); -void lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md); -int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg); - -/* initialized and finalize portals */ -int lnet_portals_create(void); -void lnet_portals_destroy(void); - -/* message functions */ -int lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, - lnet_nid_t fromnid, void *private, int rdma_req); -int lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg); -int lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg); - -void lnet_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, - int delayed, unsigned int offset, unsigned int mlen, - unsigned int rlen); -void lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, - int delayed, unsigned int offset, - unsigned int mlen, unsigned int rlen); - -struct lnet_msg *lnet_create_reply_msg(struct lnet_ni *ni, - struct lnet_msg *get_msg); -void lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *msg, - unsigned int len); - -void lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int rc); - -void lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, - unsigned int nob); -void lnet_drop_delayed_msg_list(struct list_head *head, char *reason); -void lnet_recv_delayed_msg_list(struct list_head *head); - -int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt); -void lnet_msg_container_cleanup(struct lnet_msg_container *container); -void lnet_msg_containers_destroy(void); -int lnet_msg_containers_create(void); - -char *lnet_msgtyp2str(int type); -void lnet_print_hdr(struct lnet_hdr *hdr); -int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold); - -/** \addtogroup lnet_fault_simulation @{ */ - -int lnet_fault_ctl(int cmd, struct libcfs_ioctl_data *data); -int lnet_fault_init(void); -void lnet_fault_fini(void); - -bool lnet_drop_rule_match(struct lnet_hdr *hdr); - -int lnet_delay_rule_add(struct lnet_fault_attr *attr); -int lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown); -int lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, - struct lnet_fault_stat *stat); -void lnet_delay_rule_reset(void); -void lnet_delay_rule_check(void); -bool lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg); - -/** @} lnet_fault_simulation */ - -void lnet_counters_get(struct lnet_counters *counters); -void lnet_counters_reset(void); - -unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov); -int lnet_extract_iov(int dst_niov, struct kvec *dst, - int src_niov, const struct kvec *src, - unsigned int offset, unsigned int len); - -unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov); -int lnet_extract_kiov(int dst_niov, struct bio_vec *dst, - int src_niov, const struct bio_vec *src, - unsigned int offset, unsigned int len); - -void lnet_copy_iov2iter(struct iov_iter *to, - unsigned int nsiov, const struct kvec *siov, - unsigned int soffset, unsigned int nob); -void lnet_copy_kiov2iter(struct iov_iter *to, - unsigned int nkiov, const struct bio_vec *kiov, - unsigned int kiovoffset, unsigned int nob); - -void lnet_me_unlink(struct lnet_me *me); - -void lnet_md_unlink(struct lnet_libmd *md); -void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd); - -void lnet_register_lnd(struct lnet_lnd *lnd); -void lnet_unregister_lnd(struct lnet_lnd *lnd); - -int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, - __u32 local_ip, __u32 peer_ip, int peer_port); -void lnet_connect_console_error(int rc, lnet_nid_t peer_nid, - __u32 peer_ip, int port); -int lnet_count_acceptor_nis(void); -int lnet_acceptor_timeout(void); -int lnet_acceptor_port(void); - -int lnet_count_acceptor_nis(void); -int lnet_acceptor_port(void); - -int lnet_acceptor_start(void); -void lnet_acceptor_stop(void); - -int lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask); -int lnet_ipif_enumerate(char ***names); -void lnet_ipif_free_enumeration(char **names, int n); -int lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize); -int lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize); -int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port); -int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout); -int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout); - -int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog); -int lnet_sock_accept(struct socket **newsockp, struct socket *sock); -int lnet_sock_connect(struct socket **sockp, int *fatal, - __u32 local_ip, int local_port, - __u32 peer_ip, int peer_port); -void libcfs_sock_release(struct socket *sock); - -int lnet_peers_start_down(void); -int lnet_peer_buffer_credits(struct lnet_ni *ni); - -int lnet_router_checker_start(void); -void lnet_router_checker_stop(void); -void lnet_router_ni_update_locked(struct lnet_peer *gw, __u32 net); -void lnet_swap_pinginfo(struct lnet_ping_info *info); - -int lnet_parse_ip2nets(char **networksp, char *ip2nets); -int lnet_parse_routes(char *route_str, int *im_a_router); -int lnet_parse_networks(struct list_head *nilist, char *networks); -int lnet_net_unique(__u32 net, struct list_head *nilist); - -int lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt); -struct lnet_peer *lnet_find_peer_locked(struct lnet_peer_table *ptable, - lnet_nid_t nid); -void lnet_peer_tables_cleanup(struct lnet_ni *ni); -void lnet_peer_tables_destroy(void); -int lnet_peer_tables_create(void); -void lnet_debug_peer(lnet_nid_t nid); -int lnet_get_peer_info(__u32 peer_index, __u64 *nid, - char alivness[LNET_MAX_STR_LEN], - __u32 *cpt_iter, __u32 *refcount, - __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits, - __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis, - __u32 *peer_tx_qnob); - -static inline void -lnet_peer_set_alive(struct lnet_peer *lp) -{ - lp->lp_last_query = jiffies; - lp->lp_last_alive = jiffies; - if (!lp->lp_alive) - lnet_notify_locked(lp, 0, 1, lp->lp_last_alive); -} - -#endif diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h deleted file mode 100644 index cfe8ee424e94..000000000000 --- a/drivers/staging/lustre/include/linux/lnet/lib-types.h +++ /dev/null @@ -1,666 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - * - * lnet/include/lnet/lib-types.h - */ - -#ifndef __LNET_LIB_TYPES_H__ -#define __LNET_LIB_TYPES_H__ - -#include -#include -#include -#include - -#include -#include - -/* Max payload size */ -#define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD -#if (LNET_MAX_PAYLOAD < LNET_MTU) -# error "LNET_MAX_PAYLOAD too small - error in configure --with-max-payload-mb" -#elif (LNET_MAX_PAYLOAD > (PAGE_SIZE * LNET_MAX_IOV)) -# error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb" -#endif - -/* forward refs */ -struct lnet_libmd; - -struct lnet_msg { - struct list_head msg_activelist; - struct list_head msg_list; /* Q for credits/MD */ - - struct lnet_process_id msg_target; - /* where is it from, it's only for building event */ - lnet_nid_t msg_from; - __u32 msg_type; - - /* committed for sending */ - unsigned int msg_tx_committed:1; - /* CPT # this message committed for sending */ - unsigned int msg_tx_cpt:15; - /* committed for receiving */ - unsigned int msg_rx_committed:1; - /* CPT # this message committed for receiving */ - unsigned int msg_rx_cpt:15; - /* queued for tx credit */ - unsigned int msg_tx_delayed:1; - /* queued for RX buffer */ - unsigned int msg_rx_delayed:1; - /* ready for pending on RX delay list */ - unsigned int msg_rx_ready_delay:1; - - unsigned int msg_vmflush:1; /* VM trying to free memory */ - unsigned int msg_target_is_router:1; /* sending to a router */ - unsigned int msg_routing:1; /* being forwarded */ - unsigned int msg_ack:1; /* ack on finalize (PUT) */ - unsigned int msg_sending:1; /* outgoing message */ - unsigned int msg_receiving:1; /* being received */ - unsigned int msg_txcredit:1; /* taken an NI send credit */ - unsigned int msg_peertxcredit:1; /* taken a peer send credit */ - unsigned int msg_rtrcredit:1; /* taken a global router credit */ - unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */ - unsigned int msg_onactivelist:1; /* on the activelist */ - unsigned int msg_rdma_get:1; - - struct lnet_peer *msg_txpeer; /* peer I'm sending to */ - struct lnet_peer *msg_rxpeer; /* peer I received from */ - - void *msg_private; - struct lnet_libmd *msg_md; - - unsigned int msg_len; - unsigned int msg_wanted; - unsigned int msg_offset; - unsigned int msg_niov; - struct kvec *msg_iov; - struct bio_vec *msg_kiov; - - struct lnet_event msg_ev; - struct lnet_hdr msg_hdr; -}; - -struct lnet_libhandle { - struct list_head lh_hash_chain; - __u64 lh_cookie; -}; - -#define lh_entry(ptr, type, member) \ - ((type *)((char *)(ptr) - (char *)(&((type *)0)->member))) - -struct lnet_eq { - struct list_head eq_list; - struct lnet_libhandle eq_lh; - unsigned long eq_enq_seq; - unsigned long eq_deq_seq; - unsigned int eq_size; - lnet_eq_handler_t eq_callback; - struct lnet_event *eq_events; - int **eq_refs; /* percpt refcount for EQ */ -}; - -struct lnet_me { - struct list_head me_list; - struct lnet_libhandle me_lh; - struct lnet_process_id me_match_id; - unsigned int me_portal; - unsigned int me_pos; /* hash offset in mt_hash */ - __u64 me_match_bits; - __u64 me_ignore_bits; - enum lnet_unlink me_unlink; - struct lnet_libmd *me_md; -}; - -struct lnet_libmd { - struct list_head md_list; - struct lnet_libhandle md_lh; - struct lnet_me *md_me; - char *md_start; - unsigned int md_offset; - unsigned int md_length; - unsigned int md_max_size; - int md_threshold; - int md_refcount; - unsigned int md_options; - unsigned int md_flags; - void *md_user_ptr; - struct lnet_eq *md_eq; - unsigned int md_niov; /* # frags */ - union { - struct kvec iov[LNET_MAX_IOV]; - struct bio_vec kiov[LNET_MAX_IOV]; - } md_iov; -}; - -#define LNET_MD_FLAG_ZOMBIE BIT(0) -#define LNET_MD_FLAG_AUTO_UNLINK BIT(1) -#define LNET_MD_FLAG_ABORTED BIT(2) - -struct lnet_test_peer { - /* info about peers we are trying to fail */ - struct list_head tp_list; /* ln_test_peers */ - lnet_nid_t tp_nid; /* matching nid */ - unsigned int tp_threshold; /* # failures to simulate */ -}; - -#define LNET_COOKIE_TYPE_MD 1 -#define LNET_COOKIE_TYPE_ME 2 -#define LNET_COOKIE_TYPE_EQ 3 -#define LNET_COOKIE_TYPE_BITS 2 -#define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL) - -struct lnet_ni; /* forward ref */ - -struct lnet_lnd { - /* fields managed by portals */ - struct list_head lnd_list; /* stash in the LND table */ - int lnd_refcount; /* # active instances */ - - /* fields initialised by the LND */ - __u32 lnd_type; - - int (*lnd_startup)(struct lnet_ni *ni); - void (*lnd_shutdown)(struct lnet_ni *ni); - int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg); - - /* - * In data movement APIs below, payload buffers are described as a set - * of 'niov' fragments which are... - * EITHER - * in virtual memory (struct iovec *iov != NULL) - * OR - * in pages (kernel only: plt_kiov_t *kiov != NULL). - * The LND may NOT overwrite these fragment descriptors. - * An 'offset' and may specify a byte offset within the set of - * fragments to start from - */ - - /* - * Start sending a preformatted message. 'private' is NULL for PUT and - * GET messages; otherwise this is a response to an incoming message - * and 'private' is the 'private' passed to lnet_parse(). Return - * non-zero for immediate failure, otherwise complete later with - * lnet_finalize() - */ - int (*lnd_send)(struct lnet_ni *ni, void *private, - struct lnet_msg *msg); - - /* - * Start receiving 'mlen' bytes of payload data, skipping the following - * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to - * lnet_parse(). Return non-zero for immediate failure, otherwise - * complete later with lnet_finalize(). This also gives back a receive - * credit if the LND does flow control. - */ - int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg, - int delayed, struct iov_iter *to, unsigned int rlen); - - /* - * lnet_parse() has had to delay processing of this message - * (e.g. waiting for a forwarding buffer or send credits). Give the - * LND a chance to free urgently needed resources. If called, return 0 - * for success and do NOT give back a receive credit; that has to wait - * until lnd_recv() gets called. On failure return < 0 and - * release resources; lnd_recv() will not be called. - */ - int (*lnd_eager_recv)(struct lnet_ni *ni, void *private, - struct lnet_msg *msg, void **new_privatep); - - /* notification of peer health */ - void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive); - - /* query of peer aliveness */ - void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, - unsigned long *when); - - /* accept a new connection */ - int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock); -}; - -struct lnet_tx_queue { - int tq_credits; /* # tx credits free */ - int tq_credits_min; /* lowest it's been */ - int tq_credits_max; /* total # tx credits */ - struct list_head tq_delayed; /* delayed TXs */ -}; - -struct lnet_ni { - spinlock_t ni_lock; - struct list_head ni_list; /* chain on ln_nis */ - struct list_head ni_cptlist; /* chain on ln_nis_cpt */ - int ni_maxtxcredits; /* # tx credits */ - /* # per-peer send credits */ - int ni_peertxcredits; - /* # per-peer router buffer credits */ - int ni_peerrtrcredits; - /* seconds to consider peer dead */ - int ni_peertimeout; - int ni_ncpts; /* number of CPTs */ - __u32 *ni_cpts; /* bond NI on some CPTs */ - lnet_nid_t ni_nid; /* interface's NID */ - void *ni_data; /* instance-specific data */ - struct lnet_lnd *ni_lnd; /* procedural interface */ - struct lnet_tx_queue **ni_tx_queues; /* percpt TX queues */ - int **ni_refs; /* percpt reference count */ - time64_t ni_last_alive;/* when I was last alive */ - struct lnet_ni_status *ni_status; /* my health status */ - /* per NI LND tunables */ - struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables; - /* equivalent interfaces to use */ - char *ni_interfaces[LNET_MAX_INTERFACES]; - /* original net namespace */ - struct net *ni_net_ns; -}; - -#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL - -/* - * NB: value of these features equal to LNET_PROTO_PING_VERSION_x - * of old LNet, so there shouldn't be any compatibility issue - */ -#define LNET_PING_FEAT_INVAL (0) /* no feature */ -#define LNET_PING_FEAT_BASE BIT(0) /* just a ping */ -#define LNET_PING_FEAT_NI_STATUS BIT(1) /* return NI status */ -#define LNET_PING_FEAT_RTE_DISABLED BIT(2) /* Routing enabled */ - -#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \ - LNET_PING_FEAT_NI_STATUS) - -/* router checker data, per router */ -#define LNET_MAX_RTR_NIS 16 -#define LNET_PINGINFO_SIZE offsetof(struct lnet_ping_info, pi_ni[LNET_MAX_RTR_NIS]) -struct lnet_rc_data { - /* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */ - struct list_head rcd_list; - struct lnet_handle_md rcd_mdh; /* ping buffer MD */ - struct lnet_peer *rcd_gateway; /* reference to gateway */ - struct lnet_ping_info *rcd_pinginfo; /* ping buffer */ -}; - -struct lnet_peer { - struct list_head lp_hashlist; /* chain on peer hash */ - struct list_head lp_txq; /* messages blocking for - * tx credits - */ - struct list_head lp_rtrq; /* messages blocking for - * router credits - */ - struct list_head lp_rtr_list; /* chain on router list */ - int lp_txcredits; /* # tx credits available */ - int lp_mintxcredits; /* low water mark */ - int lp_rtrcredits; /* # router credits */ - int lp_minrtrcredits; /* low water mark */ - unsigned int lp_alive:1; /* alive/dead? */ - unsigned int lp_notify:1; /* notification outstanding? */ - unsigned int lp_notifylnd:1;/* outstanding notification - * for LND? - */ - unsigned int lp_notifying:1; /* some thread is handling - * notification - */ - unsigned int lp_ping_notsent;/* SEND event outstanding - * from ping - */ - int lp_alive_count; /* # times router went - * dead<->alive - */ - long lp_txqnob; /* ytes queued for sending */ - unsigned long lp_timestamp; /* time of last aliveness - * news - */ - unsigned long lp_ping_timestamp;/* time of last ping - * attempt - */ - unsigned long lp_ping_deadline; /* != 0 if ping reply - * expected - */ - unsigned long lp_last_alive; /* when I was last alive */ - unsigned long lp_last_query; /* when lp_ni was queried - * last time - */ - struct lnet_ni *lp_ni; /* interface peer is on */ - lnet_nid_t lp_nid; /* peer's NID */ - int lp_refcount; /* # refs */ - int lp_cpt; /* CPT this peer attached on */ - /* # refs from lnet_route::lr_gateway */ - int lp_rtr_refcount; - /* returned RC ping features */ - unsigned int lp_ping_feats; - struct list_head lp_routes; /* routers on this peer */ - struct lnet_rc_data *lp_rcd; /* router checker state */ -}; - -/* peer hash size */ -#define LNET_PEER_HASH_BITS 9 -#define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS) - -/* peer hash table */ -struct lnet_peer_table { - int pt_version; /* /proc validity stamp */ - int pt_number; /* # peers extant */ - /* # zombies to go to deathrow (and not there yet) */ - int pt_zombies; - struct list_head pt_deathrow; /* zombie peers */ - struct list_head *pt_hash; /* NID->peer hash */ -}; - -/* - * peer aliveness is enabled only on routers for peers in a network where the - * lnet_ni::ni_peertimeout has been set to a positive value - */ -#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \ - (lp)->lp_ni->ni_peertimeout > 0) - -struct lnet_route { - struct list_head lr_list; /* chain on net */ - struct list_head lr_gwlist; /* chain on gateway */ - struct lnet_peer *lr_gateway; /* router node */ - __u32 lr_net; /* remote network number */ - int lr_seq; /* sequence for round-robin */ - unsigned int lr_downis; /* number of down NIs */ - __u32 lr_hops; /* how far I am */ - unsigned int lr_priority; /* route priority */ -}; - -#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7) -#define LNET_REMOTE_NETS_HASH_MAX (1U << 16) -#define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits) - -struct lnet_remotenet { - struct list_head lrn_list; /* chain on - * ln_remote_nets_hash - */ - struct list_head lrn_routes; /* routes to me */ - __u32 lrn_net; /* my net number */ -}; - -/** lnet message has credit and can be submitted to lnd for send/receive */ -#define LNET_CREDIT_OK 0 -/** lnet message is waiting for credit */ -#define LNET_CREDIT_WAIT 1 - -struct lnet_rtrbufpool { - struct list_head rbp_bufs; /* my free buffer pool */ - struct list_head rbp_msgs; /* messages blocking - * for a buffer - */ - int rbp_npages; /* # pages in each buffer */ - /* requested number of buffers */ - int rbp_req_nbuffers; - /* # buffers actually allocated */ - int rbp_nbuffers; - int rbp_credits; /* # free buffers - * blocked messages - */ - int rbp_mincredits; /* low water mark */ -}; - -struct lnet_rtrbuf { - struct list_head rb_list; /* chain on rbp_bufs */ - struct lnet_rtrbufpool *rb_pool; /* owning pool */ - struct bio_vec rb_kiov[0]; /* the buffer space */ -}; - -#define LNET_PEER_HASHSIZE 503 /* prime! */ - -#define LNET_TINY_BUF_IDX 0 -#define LNET_SMALL_BUF_IDX 1 -#define LNET_LARGE_BUF_IDX 2 - -/* # different router buffer pools */ -#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1) - -enum lnet_match_flags { - /* Didn't match anything */ - LNET_MATCHMD_NONE = BIT(0), - /* Matched OK */ - LNET_MATCHMD_OK = BIT(1), - /* Must be discarded */ - LNET_MATCHMD_DROP = BIT(2), - /* match and buffer is exhausted */ - LNET_MATCHMD_EXHAUSTED = BIT(3), - /* match or drop */ - LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP), -}; - -/* Options for lnet_portal::ptl_options */ -#define LNET_PTL_LAZY BIT(0) -#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */ -#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */ - -/* parameter for matching operations (GET, PUT) */ -struct lnet_match_info { - __u64 mi_mbits; - struct lnet_process_id mi_id; - unsigned int mi_opc; - unsigned int mi_portal; - unsigned int mi_rlength; - unsigned int mi_roffset; -}; - -/* ME hash of RDMA portal */ -#define LNET_MT_HASH_BITS 8 -#define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS) -#define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1) -/* - * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash, - * the last entry is reserved for MEs with ignore-bits - */ -#define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE -/* - * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which - * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the - * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] - */ -#define LNET_MT_BITS_U64 6 /* 2^6 bits */ -#define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64) -#define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1) - -/* portal match table */ -struct lnet_match_table { - /* reserved for upcoming patches, CPU partition ID */ - unsigned int mt_cpt; - unsigned int mt_portal; /* portal index */ - /* - * match table is set as "enabled" if there's non-exhausted MD - * attached on mt_mhash, it's only valid for wildcard portal - */ - unsigned int mt_enabled; - /* bitmap to flag whether MEs on mt_hash are exhausted or not */ - __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP]; - struct list_head *mt_mhash; /* matching hash */ -}; - -/* these are only useful for wildcard portal */ -/* Turn off message rotor for wildcard portals */ -#define LNET_PTL_ROTOR_OFF 0 -/* round-robin dispatch all PUT messages for wildcard portals */ -#define LNET_PTL_ROTOR_ON 1 -/* round-robin dispatch routed PUT message for wildcard portals */ -#define LNET_PTL_ROTOR_RR_RT 2 -/* dispatch routed PUT message by hashing source NID for wildcard portals */ -#define LNET_PTL_ROTOR_HASH_RT 3 - -struct lnet_portal { - spinlock_t ptl_lock; - unsigned int ptl_index; /* portal ID, reserved */ - /* flags on this portal: lazy, unique... */ - unsigned int ptl_options; - /* list of messages which are stealing buffer */ - struct list_head ptl_msg_stealing; - /* messages blocking for MD */ - struct list_head ptl_msg_delayed; - /* Match table for each CPT */ - struct lnet_match_table **ptl_mtables; - /* spread rotor of incoming "PUT" */ - unsigned int ptl_rotor; - /* # active entries for this portal */ - int ptl_mt_nmaps; - /* array of active entries' cpu-partition-id */ - int ptl_mt_maps[0]; -}; - -#define LNET_LH_HASH_BITS 12 -#define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS) -#define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1) - -/* resource container (ME, MD, EQ) */ -struct lnet_res_container { - unsigned int rec_type; /* container type */ - __u64 rec_lh_cookie; /* cookie generator */ - struct list_head rec_active; /* active resource list */ - struct list_head *rec_lh_hash; /* handle hash */ -}; - -/* message container */ -struct lnet_msg_container { - int msc_init; /* initialized or not */ - /* max # threads finalizing */ - int msc_nfinalizers; - /* msgs waiting to complete finalizing */ - struct list_head msc_finalizing; - struct list_head msc_active; /* active message list */ - /* threads doing finalization */ - void **msc_finalizers; -}; - -/* Router Checker states */ -#define LNET_RC_STATE_SHUTDOWN 0 /* not started */ -#define LNET_RC_STATE_RUNNING 1 /* started up OK */ -#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */ - -struct lnet { - /* CPU partition table of LNet */ - struct cfs_cpt_table *ln_cpt_table; - /* number of CPTs in ln_cpt_table */ - unsigned int ln_cpt_number; - unsigned int ln_cpt_bits; - - /* protect LNet resources (ME/MD/EQ) */ - struct cfs_percpt_lock *ln_res_lock; - /* # portals */ - int ln_nportals; - /* the vector of portals */ - struct lnet_portal **ln_portals; - /* percpt ME containers */ - struct lnet_res_container **ln_me_containers; - /* percpt MD container */ - struct lnet_res_container **ln_md_containers; - - /* Event Queue container */ - struct lnet_res_container ln_eq_container; - wait_queue_head_t ln_eq_waitq; - spinlock_t ln_eq_wait_lock; - unsigned int ln_remote_nets_hbits; - - /* protect NI, peer table, credits, routers, rtrbuf... */ - struct cfs_percpt_lock *ln_net_lock; - /* percpt message containers for active/finalizing/freed message */ - struct lnet_msg_container **ln_msg_containers; - struct lnet_counters **ln_counters; - struct lnet_peer_table **ln_peer_tables; - /* failure simulation */ - struct list_head ln_test_peers; - struct list_head ln_drop_rules; - struct list_head ln_delay_rules; - - struct list_head ln_nis; /* LND instances */ - /* NIs bond on specific CPT(s) */ - struct list_head ln_nis_cpt; - /* dying LND instances */ - struct list_head ln_nis_zombie; - struct lnet_ni *ln_loni; /* the loopback NI */ - - /* remote networks with routes to them */ - struct list_head *ln_remote_nets_hash; - /* validity stamp */ - __u64 ln_remote_nets_version; - /* list of all known routers */ - struct list_head ln_routers; - /* validity stamp */ - __u64 ln_routers_version; - /* percpt router buffer pools */ - struct lnet_rtrbufpool **ln_rtrpools; - - struct lnet_handle_md ln_ping_target_md; - struct lnet_handle_eq ln_ping_target_eq; - struct lnet_ping_info *ln_ping_info; - - /* router checker startup/shutdown state */ - int ln_rc_state; - /* router checker's event queue */ - struct lnet_handle_eq ln_rc_eqh; - /* rcd still pending on net */ - struct list_head ln_rcd_deathrow; - /* rcd ready for free */ - struct list_head ln_rcd_zombie; - /* serialise startup/shutdown */ - struct completion ln_rc_signal; - - struct mutex ln_api_mutex; - struct mutex ln_lnd_mutex; - struct mutex ln_delay_mutex; - /* Have I called LNetNIInit myself? */ - int ln_niinit_self; - /* LNetNIInit/LNetNIFini counter */ - int ln_refcount; - /* shutdown in progress */ - int ln_shutdown; - - int ln_routing; /* am I a router? */ - lnet_pid_t ln_pid; /* requested pid */ - /* uniquely identifies this ni in this epoch */ - __u64 ln_interface_cookie; - /* registered LNDs */ - struct list_head ln_lnds; - - /* test protocol compatibility flags */ - int ln_testprotocompat; - - /* - * 0 - load the NIs from the mod params - * 1 - do not load the NIs from the mod params - * Reverse logic to ensure that other calls to LNetNIInit - * need no change - */ - bool ln_nis_from_mod_params; - - /* - * waitq for router checker. As long as there are no routes in - * the list, the router checker will sleep on this queue. when - * routes are added the thread will wake up - */ - wait_queue_head_t ln_rc_waitq; - -}; - -#endif diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h deleted file mode 100644 index 6bd1bca190a3..000000000000 --- a/drivers/staging/lustre/include/linux/lnet/socklnd.h +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012 - 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - * - * lnet/include/lnet/socklnd.h - */ -#ifndef __LNET_LNET_SOCKLND_H__ -#define __LNET_LNET_SOCKLND_H__ - -#include -#include - -struct ksock_hello_msg { - __u32 kshm_magic; /* magic number of socklnd message */ - __u32 kshm_version; /* version of socklnd message */ - lnet_nid_t kshm_src_nid; /* sender's nid */ - lnet_nid_t kshm_dst_nid; /* destination nid */ - lnet_pid_t kshm_src_pid; /* sender's pid */ - lnet_pid_t kshm_dst_pid; /* destination pid */ - __u64 kshm_src_incarnation; /* sender's incarnation */ - __u64 kshm_dst_incarnation; /* destination's incarnation */ - __u32 kshm_ctype; /* connection type */ - __u32 kshm_nips; /* # IP addrs */ - __u32 kshm_ips[0]; /* IP addrs */ -} WIRE_ATTR; - -struct ksock_lnet_msg { - struct lnet_hdr ksnm_hdr; /* lnet hdr */ - - /* - * ksnm_payload is removed because of winnt compiler's limitation: - * zero-sized array can only be placed at the tail of [nested] - * structure definitions. lnet payload will be stored just after - * the body of structure ksock_lnet_msg_t - */ -} WIRE_ATTR; - -struct ksock_msg { - __u32 ksm_type; /* type of socklnd message */ - __u32 ksm_csum; /* checksum if != 0 */ - __u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */ - union { - struct ksock_lnet_msg lnetmsg; /* lnet message, it's empty if - * it's NOOP - */ - } WIRE_ATTR ksm_u; -} WIRE_ATTR; - -#define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */ -#define KSOCK_MSG_LNET 0xC1 /* lnet msg */ - -/* - * We need to know this number to parse hello msg from ksocklnd in - * other LND (usocklnd, for example) - */ -#define KSOCK_PROTO_V2 2 -#define KSOCK_PROTO_V3 3 - -#endif diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h b/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h deleted file mode 100644 index c4d9472b374f..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2014, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_debug.h - * - * Debug messages and assertions - * - */ - -#ifndef __UAPI_LIBCFS_DEBUG_H__ -#define __UAPI_LIBCFS_DEBUG_H__ - -/** - * Format for debug message headers - */ -struct ptldebug_header { - __u32 ph_len; - __u32 ph_flags; - __u32 ph_subsys; - __u32 ph_mask; - __u16 ph_cpu_id; - __u16 ph_type; - /* time_t overflow in 2106 */ - __u32 ph_sec; - __u64 ph_usec; - __u32 ph_stack; - __u32 ph_pid; - __u32 ph_extern_pid; - __u32 ph_line_num; -} __attribute__((packed)); - -#define PH_FLAG_FIRST_RECORD 1 - -/* Debugging subsystems (32 bits, non-overlapping) */ -#define S_UNDEFINED 0x00000001 -#define S_MDC 0x00000002 -#define S_MDS 0x00000004 -#define S_OSC 0x00000008 -#define S_OST 0x00000010 -#define S_CLASS 0x00000020 -#define S_LOG 0x00000040 -#define S_LLITE 0x00000080 -#define S_RPC 0x00000100 -#define S_MGMT 0x00000200 -#define S_LNET 0x00000400 -#define S_LND 0x00000800 /* ALL LNDs */ -#define S_PINGER 0x00001000 -#define S_FILTER 0x00002000 -#define S_LIBCFS 0x00004000 -#define S_ECHO 0x00008000 -#define S_LDLM 0x00010000 -#define S_LOV 0x00020000 -#define S_LQUOTA 0x00040000 -#define S_OSD 0x00080000 -#define S_LFSCK 0x00100000 -#define S_SNAPSHOT 0x00200000 -/* unused */ -#define S_LMV 0x00800000 /* b_new_cmd */ -/* unused */ -#define S_SEC 0x02000000 /* upcall cache */ -#define S_GSS 0x04000000 /* b_new_cmd */ -/* unused */ -#define S_MGC 0x10000000 -#define S_MGS 0x20000000 -#define S_FID 0x40000000 /* b_new_cmd */ -#define S_FLD 0x80000000 /* b_new_cmd */ - -#define LIBCFS_DEBUG_SUBSYS_NAMES { \ - "undefined", "mdc", "mds", "osc", "ost", "class", "log", \ - "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", \ - "libcfs", "echo", "ldlm", "lov", "lquota", "osd", "lfsck", \ - "snapshot", "", "lmv", "", "sec", "gss", "", "mgc", "mgs", \ - "fid", "fld", NULL } - -/* Debugging masks (32 bits, non-overlapping) */ -#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */ -#define D_INODE 0x00000002 -#define D_SUPER 0x00000004 -#define D_EXT2 0x00000008 /* anything from ext2_debug */ -#define D_MALLOC 0x00000010 /* print malloc, free information */ -#define D_CACHE 0x00000020 /* cache-related items */ -#define D_INFO 0x00000040 /* general information */ -#define D_IOCTL 0x00000080 /* ioctl related information */ -#define D_NETERROR 0x00000100 /* network errors */ -#define D_NET 0x00000200 /* network communications */ -#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */ -#define D_BUFFS 0x00000800 -#define D_OTHER 0x00001000 -#define D_DENTRY 0x00002000 -#define D_NETTRACE 0x00004000 -#define D_PAGE 0x00008000 /* bulk page handling */ -#define D_DLMTRACE 0x00010000 -#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */ -#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */ -#define D_HA 0x00080000 /* recovery and failover */ -#define D_RPCTRACE 0x00100000 /* for distributed debugging */ -#define D_VFSTRACE 0x00200000 -#define D_READA 0x00400000 /* read-ahead */ -#define D_MMAP 0x00800000 -#define D_CONFIG 0x01000000 -#define D_CONSOLE 0x02000000 -#define D_QUOTA 0x04000000 -#define D_SEC 0x08000000 -#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */ -#define D_HSM 0x20000000 -#define D_SNAPSHOT 0x40000000 /* snapshot */ -#define D_LAYOUT 0x80000000 - -#define LIBCFS_DEBUG_MASKS_NAMES { \ - "trace", "inode", "super", "ext2", "malloc", "cache", "info", \ - "ioctl", "neterror", "net", "warning", "buffs", "other", \ - "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \ - "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \ - "console", "quota", "sec", "lfsck", "hsm", "snapshot", "layout",\ - NULL } - -#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE) - -#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log" - -#endif /* __UAPI_LIBCFS_DEBUG_H__ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h b/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h deleted file mode 100644 index cce6b58e3682..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/include/libcfs/libcfs_ioctl.h - * - * Low-level ioctl data structures. Kernel ioctl functions declared here, - * and user space functions are in libcfs/util/ioctl.h. - * - */ - -#ifndef __LIBCFS_IOCTL_H__ -#define __LIBCFS_IOCTL_H__ - -#include -#include - -#define LIBCFS_IOCTL_VERSION 0x0001000a -#define LIBCFS_IOCTL_VERSION2 0x0001000b - -struct libcfs_ioctl_hdr { - __u32 ioc_len; - __u32 ioc_version; -}; - -/** max size to copy from userspace */ -#define LIBCFS_IOC_DATA_MAX (128 * 1024) - -struct libcfs_ioctl_data { - struct libcfs_ioctl_hdr ioc_hdr; - - __u64 ioc_nid; - __u64 ioc_u64[1]; - - __u32 ioc_flags; - __u32 ioc_count; - __u32 ioc_net; - __u32 ioc_u32[7]; - - __u32 ioc_inllen1; - char *ioc_inlbuf1; - __u32 ioc_inllen2; - char *ioc_inlbuf2; - - __u32 ioc_plen1; /* buffers in userspace */ - void __user *ioc_pbuf1; - __u32 ioc_plen2; /* buffers in userspace */ - void __user *ioc_pbuf2; - - char ioc_bulk[0]; -}; - -struct libcfs_debug_ioctl_data { - struct libcfs_ioctl_hdr hdr; - unsigned int subs; - unsigned int debug; -}; - -/* 'f' ioctls are defined in lustre_ioctl.h and lustre_user.h except for: */ -#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long) -#define IOCTL_LIBCFS_TYPE long - -#define IOC_LIBCFS_TYPE ('e') -#define IOC_LIBCFS_MIN_NR 30 -/* libcfs ioctls */ -/* IOC_LIBCFS_PANIC obsolete in 2.8.0, was _IOWR('e', 30, IOCTL_LIBCFS_TYPE) */ -#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE) -/* IOC_LIBCFS_MEMHOG obsolete in 2.8.0, was _IOWR('e', 36, IOCTL_LIBCFS_TYPE) */ -/* lnet ioctls */ -#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE) -/* IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE) */ -#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE) -/* IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE) */ -#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, IOCTL_LIBCFS_TYPE) -/* lnd ioctls */ -#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE) -/* ioctl 77 is free for use */ -#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE) -#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE) - -/* - * DLC Specific IOCTL numbers. - * In order to maintain backward compatibility with any possible external - * tools which might be accessing the IOCTL numbers, a new group of IOCTL - * number have been allocated. - */ -#define IOCTL_CONFIG_SIZE struct lnet_ioctl_config_data -#define IOC_LIBCFS_ADD_ROUTE _IOWR(IOC_LIBCFS_TYPE, 81, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_DEL_ROUTE _IOWR(IOC_LIBCFS_TYPE, 82, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_GET_ROUTE _IOWR(IOC_LIBCFS_TYPE, 83, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_ADD_NET _IOWR(IOC_LIBCFS_TYPE, 84, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_DEL_NET _IOWR(IOC_LIBCFS_TYPE, 85, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_GET_NET _IOWR(IOC_LIBCFS_TYPE, 86, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_CONFIG_RTR _IOWR(IOC_LIBCFS_TYPE, 87, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_ADD_BUF _IOWR(IOC_LIBCFS_TYPE, 88, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_GET_BUF _IOWR(IOC_LIBCFS_TYPE, 89, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_GET_PEER_INFO _IOWR(IOC_LIBCFS_TYPE, 90, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE) -#define IOC_LIBCFS_MAX_NR 91 - -#endif /* __LIBCFS_IOCTL_H__ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h deleted file mode 100644 index c1619f411d81..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h +++ /dev/null @@ -1,150 +0,0 @@ -/* - * LGPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library. - * - * LGPL HEADER END - * - */ -/* - * Copyright (c) 2014, Intel Corporation. - */ -/* - * Author: Amir Shehata - */ - -#ifndef LNET_DLC_H -#define LNET_DLC_H - -#include -#include - -#define MAX_NUM_SHOW_ENTRIES 32 -#define LNET_MAX_STR_LEN 128 -#define LNET_MAX_SHOW_NUM_CPT 128 -#define LNET_UNDEFINED_HOPS ((__u32)(-1)) - -struct lnet_ioctl_config_lnd_cmn_tunables { - __u32 lct_version; - __u32 lct_peer_timeout; - __u32 lct_peer_tx_credits; - __u32 lct_peer_rtr_credits; - __u32 lct_max_tx_credits; -}; - -struct lnet_ioctl_config_o2iblnd_tunables { - __u32 lnd_version; - __u32 lnd_peercredits_hiw; - __u32 lnd_map_on_demand; - __u32 lnd_concurrent_sends; - __u32 lnd_fmr_pool_size; - __u32 lnd_fmr_flush_trigger; - __u32 lnd_fmr_cache; - __u16 lnd_conns_per_peer; - __u16 pad; -}; - -struct lnet_ioctl_config_lnd_tunables { - struct lnet_ioctl_config_lnd_cmn_tunables lt_cmn; - union { - struct lnet_ioctl_config_o2iblnd_tunables lt_o2ib; - } lt_tun_u; -}; - -struct lnet_ioctl_net_config { - char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN]; - __u32 ni_status; - __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT]; - char cfg_bulk[0]; -}; - -#define LNET_TINY_BUF_IDX 0 -#define LNET_SMALL_BUF_IDX 1 -#define LNET_LARGE_BUF_IDX 2 - -/* # different router buffer pools */ -#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1) - -struct lnet_ioctl_pool_cfg { - struct { - __u32 pl_npages; - __u32 pl_nbuffers; - __u32 pl_credits; - __u32 pl_mincredits; - } pl_pools[LNET_NRBPOOLS]; - __u32 pl_routing; -}; - -struct lnet_ioctl_config_data { - struct libcfs_ioctl_hdr cfg_hdr; - - __u32 cfg_net; - __u32 cfg_count; - __u64 cfg_nid; - __u32 cfg_ncpts; - - union { - struct { - __u32 rtr_hop; - __u32 rtr_priority; - __u32 rtr_flags; - } cfg_route; - struct { - char net_intf[LNET_MAX_STR_LEN]; - __s32 net_peer_timeout; - __s32 net_peer_tx_credits; - __s32 net_peer_rtr_credits; - __s32 net_max_tx_credits; - __u32 net_cksum_algo; - __u32 net_interface_count; - } cfg_net; - struct { - __u32 buf_enable; - __s32 buf_tiny; - __s32 buf_small; - __s32 buf_large; - } cfg_buffers; - } cfg_config_u; - - char cfg_bulk[0]; -}; - -struct lnet_ioctl_peer { - struct libcfs_ioctl_hdr pr_hdr; - __u32 pr_count; - __u32 pr_pad; - __u64 pr_nid; - - union { - struct { - char cr_aliveness[LNET_MAX_STR_LEN]; - __u32 cr_refcount; - __u32 cr_ni_peer_tx_credits; - __u32 cr_peer_tx_credits; - __u32 cr_peer_rtr_credits; - __u32 cr_peer_min_rtr_credits; - __u32 cr_peer_tx_qnob; - __u32 cr_ncpt; - } pr_peer_credits; - } pr_lnd_u; -}; - -struct lnet_ioctl_lnet_stats { - struct libcfs_ioctl_hdr st_hdr; - struct lnet_counters st_cntrs; -}; - -#endif /* LNET_DLC_H */ diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h deleted file mode 100644 index 1be9b7aa7326..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h +++ /dev/null @@ -1,669 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012 - 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - */ - -#ifndef __LNET_TYPES_H__ -#define __LNET_TYPES_H__ - -#include -#include - -/** \addtogroup lnet - * @{ - */ - -#define LNET_VERSION "0.6.0" - -/** \addtogroup lnet_addr - * @{ - */ - -/** Portal reserved for LNet's own use. - * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments. - */ -#define LNET_RESERVED_PORTAL 0 - -/** - * Address of an end-point in an LNet network. - * - * A node can have multiple end-points and hence multiple addresses. - * An LNet network can be a simple network (e.g. tcp0) or a network of - * LNet networks connected by LNet routers. Therefore an end-point address - * has two parts: network ID, and address within a network. - * - * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID. - */ -typedef __u64 lnet_nid_t; -/** - * ID of a process in a node. Shortened as PID to distinguish from - * lnet_process_id, the global process ID. - */ -typedef __u32 lnet_pid_t; - -/** wildcard NID that matches any end-point address */ -#define LNET_NID_ANY ((lnet_nid_t)(-1)) -/** wildcard PID that matches any lnet_pid_t */ -#define LNET_PID_ANY ((lnet_pid_t)(-1)) - -#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */ -#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */ -#define LNET_PID_LUSTRE 12345 - -#define LNET_TIME_FOREVER (-1) - -/* how an LNET NID encodes net:address */ -/** extract the address part of an lnet_nid_t */ - -static inline __u32 LNET_NIDADDR(lnet_nid_t nid) -{ - return nid & 0xffffffff; -} - -static inline __u32 LNET_NIDNET(lnet_nid_t nid) -{ - return (nid >> 32) & 0xffffffff; -} - -static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr) -{ - return (((__u64)net) << 32) | addr; -} - -static inline __u32 LNET_NETNUM(__u32 net) -{ - return net & 0xffff; -} - -static inline __u32 LNET_NETTYP(__u32 net) -{ - return (net >> 16) & 0xffff; -} - -static inline __u32 LNET_MKNET(__u32 type, __u32 num) -{ - return (type << 16) | num; -} - -#define WIRE_ATTR __packed - -/* Packed version of lnet_process_id to transfer via network */ -struct lnet_process_id_packed { - /* node id / process id */ - lnet_nid_t nid; - lnet_pid_t pid; -} WIRE_ATTR; - -/* - * The wire handle's interface cookie only matches one network interface in - * one epoch (i.e. new cookie when the interface restarts or the node - * reboots). The object cookie only matches one object on that interface - * during that object's lifetime (i.e. no cookie re-use). - */ -struct lnet_handle_wire { - __u64 wh_interface_cookie; - __u64 wh_object_cookie; -} WIRE_ATTR; - -enum lnet_msg_type { - LNET_MSG_ACK = 0, - LNET_MSG_PUT, - LNET_MSG_GET, - LNET_MSG_REPLY, - LNET_MSG_HELLO, -}; - -/* - * The variant fields of the portals message header are aligned on an 8 - * byte boundary in the message header. Note that all types used in these - * wire structs MUST be fixed size and the smaller types are placed at the - * end. - */ -struct lnet_ack { - struct lnet_handle_wire dst_wmd; - __u64 match_bits; - __u32 mlength; -} WIRE_ATTR; - -struct lnet_put { - struct lnet_handle_wire ack_wmd; - __u64 match_bits; - __u64 hdr_data; - __u32 ptl_index; - __u32 offset; -} WIRE_ATTR; - -struct lnet_get { - struct lnet_handle_wire return_wmd; - __u64 match_bits; - __u32 ptl_index; - __u32 src_offset; - __u32 sink_length; -} WIRE_ATTR; - -struct lnet_reply { - struct lnet_handle_wire dst_wmd; -} WIRE_ATTR; - -struct lnet_hello { - __u64 incarnation; - __u32 type; -} WIRE_ATTR; - -struct lnet_hdr { - lnet_nid_t dest_nid; - lnet_nid_t src_nid; - lnet_pid_t dest_pid; - lnet_pid_t src_pid; - __u32 type; /* enum lnet_msg_type */ - __u32 payload_length; /* payload data to follow */ - /*<------__u64 aligned------->*/ - union { - struct lnet_ack ack; - struct lnet_put put; - struct lnet_get get; - struct lnet_reply reply; - struct lnet_hello hello; - } msg; -} WIRE_ATTR; - -/* - * A HELLO message contains a magic number and protocol version - * code in the header's dest_nid, the peer's NID in the src_nid, and - * LNET_MSG_HELLO in the type field. All other common fields are zero - * (including payload_size; i.e. no payload). - * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is - * running the same protocol and to find out its NID. These LNDs should - * exchange HELLO messages when a connection is first established. Individual - * LNDs can put whatever else they fancy in struct lnet_hdr::msg. - */ -struct lnet_magicversion { - __u32 magic; /* LNET_PROTO_TCP_MAGIC */ - __u16 version_major; /* increment on incompatible change */ - __u16 version_minor; /* increment on compatible change */ -} WIRE_ATTR; - -/* PROTO MAGIC for LNDs */ -#define LNET_PROTO_IB_MAGIC 0x0be91b91 -#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */ -#define LNET_PROTO_TCP_MAGIC 0xeebc0ded -#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100 -#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */ - -/* Placeholder for a future "unified" protocol across all LNDs */ -/* - * Current LNDs that receive a request with this magic will respond with a - * "stub" reply using their current protocol - */ -#define LNET_PROTO_MAGIC 0x45726963 /* ! */ - -#define LNET_PROTO_TCP_VERSION_MAJOR 1 -#define LNET_PROTO_TCP_VERSION_MINOR 0 - -/* Acceptor connection request */ -struct lnet_acceptor_connreq { - __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */ - __u32 acr_version; /* protocol version */ - __u64 acr_nid; /* target NID */ -} WIRE_ATTR; - -#define LNET_PROTO_ACCEPTOR_VERSION 1 - -struct lnet_ni_status { - lnet_nid_t ns_nid; - __u32 ns_status; - __u32 ns_unused; -} WIRE_ATTR; - -struct lnet_ping_info { - __u32 pi_magic; - __u32 pi_features; - lnet_pid_t pi_pid; - __u32 pi_nnis; - struct lnet_ni_status pi_ni[0]; -} WIRE_ATTR; - -struct lnet_counters { - __u32 msgs_alloc; - __u32 msgs_max; - __u32 errors; - __u32 send_count; - __u32 recv_count; - __u32 route_count; - __u32 drop_count; - __u64 send_length; - __u64 recv_length; - __u64 route_length; - __u64 drop_length; -} WIRE_ATTR; - -#define LNET_NI_STATUS_UP 0x15aac0de -#define LNET_NI_STATUS_DOWN 0xdeadface -#define LNET_NI_STATUS_INVALID 0x00000000 - -#define LNET_MAX_INTERFACES 16 - -/** - * Objects maintained by the LNet are accessed through handles. Handle types - * have names of the form lnet_handle_xx, where xx is one of the two letter - * object type codes ('eq' for event queue, 'md' for memory descriptor, and - * 'me' for match entry). Each type of object is given a unique handle type - * to enhance type checking. - */ -#define LNET_WIRE_HANDLE_COOKIE_NONE (-1) - -struct lnet_handle_eq { - u64 cookie; -}; - -/** - * Invalidate eq handle @h. - */ -static inline void LNetInvalidateEQHandle(struct lnet_handle_eq *h) -{ - h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE; -} - -/** - * Check whether eq handle @h is invalid. - * - * @return 1 if handle is invalid, 0 if valid. - */ -static inline int LNetEQHandleIsInvalid(struct lnet_handle_eq h) -{ - return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie); -} - -struct lnet_handle_md { - u64 cookie; -}; - -/** - * Invalidate md handle @h. - */ -static inline void LNetInvalidateMDHandle(struct lnet_handle_md *h) -{ - h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE; -} - -/** - * Check whether eq handle @h is invalid. - * - * @return 1 if handle is invalid, 0 if valid. - */ -static inline int LNetMDHandleIsInvalid(struct lnet_handle_md h) -{ - return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie); -} - -struct lnet_handle_me { - u64 cookie; -}; - -/** - * Global process ID. - */ -struct lnet_process_id { - /** node id */ - lnet_nid_t nid; - /** process id */ - lnet_pid_t pid; -}; -/** @} lnet_addr */ - -/** \addtogroup lnet_me - * @{ - */ - -/** - * Specifies whether the match entry or memory descriptor should be unlinked - * automatically (LNET_UNLINK) or not (LNET_RETAIN). - */ -enum lnet_unlink { - LNET_RETAIN = 0, - LNET_UNLINK -}; - -/** - * Values of the type lnet_ins_pos are used to control where a new match - * entry is inserted. The value LNET_INS_BEFORE is used to insert the new - * entry before the current entry or before the head of the list. The value - * LNET_INS_AFTER is used to insert the new entry after the current entry - * or after the last item in the list. - */ -enum lnet_ins_pos { - /** insert ME before current position or head of the list */ - LNET_INS_BEFORE, - /** insert ME after current position or tail of the list */ - LNET_INS_AFTER, - /** attach ME at tail of local CPU partition ME list */ - LNET_INS_LOCAL -}; - -/** @} lnet_me */ - -/** \addtogroup lnet_md - * @{ - */ - -/** - * Defines the visible parts of a memory descriptor. Values of this type - * are used to initialize memory descriptors. - */ -struct lnet_md { - /** - * Specify the memory region associated with the memory descriptor. - * If the options field has: - * - LNET_MD_KIOV bit set: The start field points to the starting - * address of an array of struct bio_vec and the length field specifies - * the number of entries in the array. The length can't be bigger - * than LNET_MAX_IOV. The struct bio_vec is used to describe page-based - * fragments that are not necessarily mapped in virtual memory. - * - LNET_MD_IOVEC bit set: The start field points to the starting - * address of an array of struct iovec and the length field specifies - * the number of entries in the array. The length can't be bigger - * than LNET_MAX_IOV. The struct iovec is used to describe fragments - * that have virtual addresses. - * - Otherwise: The memory region is contiguous. The start field - * specifies the starting address for the memory region and the - * length field specifies its length. - * - * When the memory region is fragmented, all fragments but the first - * one must start on page boundary, and all but the last must end on - * page boundary. - */ - void *start; - unsigned int length; - /** - * Specifies the maximum number of operations that can be performed - * on the memory descriptor. An operation is any action that could - * possibly generate an event. In the usual case, the threshold value - * is decremented for each operation on the MD. When the threshold - * drops to zero, the MD becomes inactive and does not respond to - * operations. A threshold value of LNET_MD_THRESH_INF indicates that - * there is no bound on the number of operations that may be applied - * to a MD. - */ - int threshold; - /** - * Specifies the largest incoming request that the memory descriptor - * should respond to. When the unused portion of a MD (length - - * local offset) falls below this value, the MD becomes inactive and - * does not respond to further operations. This value is only used - * if the LNET_MD_MAX_SIZE option is set. - */ - int max_size; - /** - * Specifies the behavior of the memory descriptor. A bitwise OR - * of the following values can be used: - * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD. - * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD. - * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory - * region is provided by the incoming request. By default, the - * offset is maintained locally. When maintained locally, the - * offset is incremented by the length of the request so that - * the next operation (PUT or GET) will access the next part of - * the memory region. Note that only one offset variable exists - * per memory descriptor. If both PUT and GET operations are - * performed on a memory descriptor, the offset is updated each time. - * - LNET_MD_TRUNCATE: The length provided in the incoming request can - * be reduced to match the memory available in the region (determined - * by subtracting the offset from the length of the memory region). - * By default, if the length in the incoming operation is greater - * than the amount of memory available, the operation is rejected. - * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for - * incoming PUT operations, even if requested. By default, - * acknowledgments are sent for PUT operations that request an - * acknowledgment. Acknowledgments are never sent for GET operations. - * The data sent in the REPLY serves as an implicit acknowledgment. - * - LNET_MD_KIOV: The start and length fields specify an array of - * struct bio_vec. - * - LNET_MD_IOVEC: The start and length fields specify an array of - * struct iovec. - * - LNET_MD_MAX_SIZE: The max_size field is valid. - * - * Note: - * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather - * capability for memory descriptors. They can't be both set. - * - When LNET_MD_MAX_SIZE is set, the total length of the memory - * region (i.e. sum of all fragment lengths) must not be less than - * \a max_size. - */ - unsigned int options; - /** - * A user-specified value that is associated with the memory - * descriptor. The value does not need to be a pointer, but must fit - * in the space used by a pointer. This value is recorded in events - * associated with operations on this MD. - */ - void *user_ptr; - /** - * A handle for the event queue used to log the operations performed on - * the memory region. If this argument is a NULL handle (i.e. nullified - * by LNetInvalidateHandle()), operations performed on this memory - * descriptor are not logged. - */ - struct lnet_handle_eq eq_handle; -}; - -/* - * Max Transfer Unit (minimum supported everywhere). - * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks) - * these limits are system wide and not interface-local. - */ -#define LNET_MTU_BITS 20 -#define LNET_MTU (1 << LNET_MTU_BITS) - -/** limit on the number of fragments in discontiguous MDs */ -#define LNET_MAX_IOV 256 - -/** - * Options for the MD structure. See lnet_md::options. - */ -#define LNET_MD_OP_PUT (1 << 0) -/** See lnet_md::options. */ -#define LNET_MD_OP_GET (1 << 1) -/** See lnet_md::options. */ -#define LNET_MD_MANAGE_REMOTE (1 << 2) -/* unused (1 << 3) */ -/** See lnet_md::options. */ -#define LNET_MD_TRUNCATE (1 << 4) -/** See lnet_md::options. */ -#define LNET_MD_ACK_DISABLE (1 << 5) -/** See lnet_md::options. */ -#define LNET_MD_IOVEC (1 << 6) -/** See lnet_md::options. */ -#define LNET_MD_MAX_SIZE (1 << 7) -/** See lnet_md::options. */ -#define LNET_MD_KIOV (1 << 8) - -/* For compatibility with Cray Portals */ -#define LNET_MD_PHYS 0 - -/** Infinite threshold on MD operations. See lnet_md::threshold */ -#define LNET_MD_THRESH_INF (-1) - -/** @} lnet_md */ - -/** \addtogroup lnet_eq - * @{ - */ - -/** - * Six types of events can be logged in an event queue. - */ -enum lnet_event_kind { - /** An incoming GET operation has completed on the MD. */ - LNET_EVENT_GET = 1, - /** - * An incoming PUT operation has completed on the MD. The - * underlying layers will not alter the memory (on behalf of this - * operation) once this event has been logged. - */ - LNET_EVENT_PUT, - /** - * A REPLY operation has completed. This event is logged after the - * data (if any) from the REPLY has been written into the MD. - */ - LNET_EVENT_REPLY, - /** An acknowledgment has been received. */ - LNET_EVENT_ACK, - /** - * An outgoing send (PUT or GET) operation has completed. This event - * is logged after the entire buffer has been sent and it is safe for - * the caller to reuse the buffer. - * - * Note: - * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can - * happen even when the message has not yet been put out on wire. - * - It's unsafe to assume that in an outgoing GET operation - * the LNET_EVENT_SEND event would happen before the - * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and - * LNET_EVENT_ACK events in an outgoing PUT operation. - */ - LNET_EVENT_SEND, - /** - * A MD has been unlinked. Note that LNetMDUnlink() does not - * necessarily trigger an LNET_EVENT_UNLINK event. - * \see LNetMDUnlink - */ - LNET_EVENT_UNLINK, -}; - -#define LNET_SEQ_GT(a, b) (((signed long)((a) - (b))) > 0) - -/** - * Information about an event on a MD. - */ -struct lnet_event { - /** The identifier (nid, pid) of the target. */ - struct lnet_process_id target; - /** The identifier (nid, pid) of the initiator. */ - struct lnet_process_id initiator; - /** - * The NID of the immediate sender. If the request has been forwarded - * by routers, this is the NID of the last hop; otherwise it's the - * same as the initiator. - */ - lnet_nid_t sender; - /** Indicates the type of the event. */ - enum lnet_event_kind type; - /** The portal table index specified in the request */ - unsigned int pt_index; - /** A copy of the match bits specified in the request. */ - __u64 match_bits; - /** The length (in bytes) specified in the request. */ - unsigned int rlength; - /** - * The length (in bytes) of the data that was manipulated by the - * operation. For truncated operations, the manipulated length will be - * the number of bytes specified by the MD (possibly with an offset, - * see lnet_md). For all other operations, the manipulated length - * will be the length of the requested operation, i.e. rlength. - */ - unsigned int mlength; - /** - * The handle to the MD associated with the event. The handle may be - * invalid if the MD has been unlinked. - */ - struct lnet_handle_md md_handle; - /** - * A snapshot of the state of the MD immediately after the event has - * been processed. In particular, the threshold field in md will - * reflect the value of the threshold after the operation occurred. - */ - struct lnet_md md; - /** - * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT. - * \see LNetPut - */ - __u64 hdr_data; - /** - * Indicates the completion status of the operation. It's 0 for - * successful operations, otherwise it's an error code. - */ - int status; - /** - * Indicates whether the MD has been unlinked. Note that: - * - An event with unlinked set is the last event on the MD. - * - This field is also set for an explicit LNET_EVENT_UNLINK event. - * \see LNetMDUnlink - */ - int unlinked; - /** - * The displacement (in bytes) into the memory region that the - * operation used. The offset can be determined by the operation for - * a remote managed MD or by the local MD. - * \see lnet_md::options - */ - unsigned int offset; - /** - * The sequence number for this event. Sequence numbers are unique - * to each event. - */ - volatile unsigned long sequence; -}; - -/** - * Event queue handler function type. - * - * The EQ handler runs for each event that is deposited into the EQ. The - * handler is supplied with a pointer to the event that triggered the - * handler invocation. - * - * The handler must not block, must be reentrant, and must not call any LNet - * API functions. It should return as quickly as possible. - */ -typedef void (*lnet_eq_handler_t)(struct lnet_event *event); -#define LNET_EQ_HANDLER_NONE NULL -/** @} lnet_eq */ - -/** \addtogroup lnet_data - * @{ - */ - -/** - * Specify whether an acknowledgment should be sent by target when the PUT - * operation completes (i.e., when the data has been written to a MD of the - * target process). - * - * \see lnet_md::options for the discussion on LNET_MD_ACK_DISABLE by which - * acknowledgments can be disabled for a MD. - */ -enum lnet_ack_req { - /** Request an acknowledgment */ - LNET_ACK_REQ, - /** Request that no acknowledgment should be generated. */ - LNET_NOACK_REQ -}; -/** @} lnet_data */ - -/** @} lnet */ -#endif diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h deleted file mode 100644 index cccb32dd28f2..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * This file is part of Portals, http://www.sf.net/projects/lustre/ - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * header for lnet ioctl - */ -#ifndef _LNETCTL_H_ -#define _LNETCTL_H_ - -#include - -/** \addtogroup lnet_fault_simulation - * @{ - */ - -enum { - LNET_CTL_DROP_ADD, - LNET_CTL_DROP_DEL, - LNET_CTL_DROP_RESET, - LNET_CTL_DROP_LIST, - LNET_CTL_DELAY_ADD, - LNET_CTL_DELAY_DEL, - LNET_CTL_DELAY_RESET, - LNET_CTL_DELAY_LIST, -}; - -#define LNET_ACK_BIT (1 << 0) -#define LNET_PUT_BIT (1 << 1) -#define LNET_GET_BIT (1 << 2) -#define LNET_REPLY_BIT (1 << 3) - -/** ioctl parameter for LNet fault simulation */ -struct lnet_fault_attr { - /** - * source NID of drop rule - * LNET_NID_ANY is wildcard for all sources - * 255.255.255.255@net is wildcard for all addresses from @net - */ - lnet_nid_t fa_src; - /** destination NID of drop rule, see \a dr_src for details */ - lnet_nid_t fa_dst; - /** - * Portal mask to drop, -1 means all portals, for example: - * fa_ptl_mask = (1 << _LDLM_CB_REQUEST_PORTAL ) | - * (1 << LDLM_CANCEL_REQUEST_PORTAL) - * - * If it is non-zero then only PUT and GET will be filtered, otherwise - * there is no portal filter, all matched messages will be checked. - */ - __u64 fa_ptl_mask; - /** - * message types to drop, for example: - * dra_type = LNET_DROP_ACK_BIT | LNET_DROP_PUT_BIT - * - * If it is non-zero then only specified message types are filtered, - * otherwise all message types will be checked. - */ - __u32 fa_msg_mask; - union { - /** message drop simulation */ - struct { - /** drop rate of this rule */ - __u32 da_rate; - /** - * time interval of message drop, it is exclusive - * with da_rate - */ - __u32 da_interval; - } drop; - /** message latency simulation */ - struct { - __u32 la_rate; - /** - * time interval of message delay, it is exclusive - * with la_rate - */ - __u32 la_interval; - /** latency to delay */ - __u32 la_latency; - } delay; - __u64 space[8]; - } u; -}; - -/** fault simluation stats */ -struct lnet_fault_stat { - /** total # matched messages */ - __u64 fs_count; - /** # dropped LNET_MSG_PUT by this rule */ - __u64 fs_put; - /** # dropped LNET_MSG_ACK by this rule */ - __u64 fs_ack; - /** # dropped LNET_MSG_GET by this rule */ - __u64 fs_get; - /** # dropped LNET_MSG_REPLY by this rule */ - __u64 fs_reply; - union { - struct { - /** total # dropped messages */ - __u64 ds_dropped; - } drop; - struct { - /** total # delayed messages */ - __u64 ls_delayed; - } delay; - __u64 space[8]; - } u; -}; - -/** @} lnet_fault_simulation */ - -#define LNET_DEV_ID 0 -#define LNET_DEV_PATH "/dev/lnet" - -#endif diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h b/drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h deleted file mode 100644 index a4f9ff01d458..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h +++ /dev/null @@ -1,556 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011 - 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - * - * lnet/include/lnet/lnetst.h - * - * Author: Liang Zhen - */ - -#ifndef __LNET_ST_H__ -#define __LNET_ST_H__ - -#include - -#define LST_FEAT_NONE (0) -#define LST_FEAT_BULK_LEN (1 << 0) /* enable variable page size */ - -#define LST_FEATS_EMPTY (LST_FEAT_NONE) -#define LST_FEATS_MASK (LST_FEAT_NONE | LST_FEAT_BULK_LEN) - -#define LST_NAME_SIZE 32 /* max name buffer length */ - -#define LSTIO_DEBUG 0xC00 /* debug */ -#define LSTIO_SESSION_NEW 0xC01 /* create session */ -#define LSTIO_SESSION_END 0xC02 /* end session */ -#define LSTIO_SESSION_INFO 0xC03 /* query session */ -#define LSTIO_GROUP_ADD 0xC10 /* add group */ -#define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */ -#define LSTIO_GROUP_INFO 0xC12 /* query default information of - * specified group - */ -#define LSTIO_GROUP_DEL 0xC13 /* delete group */ -#define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */ -#define LSTIO_GROUP_UPDATE 0xC15 /* update group */ -#define LSTIO_BATCH_ADD 0xC20 /* add batch */ -#define LSTIO_BATCH_START 0xC21 /* start batch */ -#define LSTIO_BATCH_STOP 0xC22 /* stop batch */ -#define LSTIO_BATCH_DEL 0xC23 /* delete batch */ -#define LSTIO_BATCH_LIST 0xC24 /* show all batches in the session */ -#define LSTIO_BATCH_INFO 0xC25 /* show defail of specified batch */ -#define LSTIO_TEST_ADD 0xC26 /* add test (to batch) */ -#define LSTIO_BATCH_QUERY 0xC27 /* query batch status */ -#define LSTIO_STAT_QUERY 0xC30 /* get stats */ - -struct lst_sid { - lnet_nid_t ses_nid; /* nid of console node */ - __u64 ses_stamp; /* time stamp */ -}; /*** session id */ - -extern struct lst_sid LST_INVALID_SID; - -struct lst_bid { - __u64 bat_id; /* unique id in session */ -}; /*** batch id (group of tests) */ - -/* Status of test node */ -#define LST_NODE_ACTIVE 0x1 /* node in this session */ -#define LST_NODE_BUSY 0x2 /* node is taken by other session */ -#define LST_NODE_DOWN 0x4 /* node is down */ -#define LST_NODE_UNKNOWN 0x8 /* node not in session */ - -struct lstcon_node_ent { - struct lnet_process_id nde_id; /* id of node */ - int nde_state; /* state of node */ -}; /*** node entry, for list_group command */ - -struct lstcon_ndlist_ent { - int nle_nnode; /* # of nodes */ - int nle_nactive; /* # of active nodes */ - int nle_nbusy; /* # of busy nodes */ - int nle_ndown; /* # of down nodes */ - int nle_nunknown; /* # of unknown nodes */ -}; /*** node_list entry, for list_batch command */ - -struct lstcon_test_ent { - int tse_type; /* test type */ - int tse_loop; /* loop count */ - int tse_concur; /* concurrency of test */ -}; /* test summary entry, for - * list_batch command - */ - -struct lstcon_batch_ent { - int bae_state; /* batch status */ - int bae_timeout; /* batch timeout */ - int bae_ntest; /* # of tests in the batch */ -}; /* batch summary entry, for - * list_batch command - */ - -struct lstcon_test_batch_ent { - struct lstcon_ndlist_ent tbe_cli_nle; /* client (group) node_list - * entry - */ - struct lstcon_ndlist_ent tbe_srv_nle; /* server (group) node_list - * entry - */ - union { - struct lstcon_test_ent tbe_test; /* test entry */ - struct lstcon_batch_ent tbe_batch;/* batch entry */ - } u; -}; /* test/batch verbose information entry, - * for list_batch command - */ - -struct lstcon_rpc_ent { - struct list_head rpe_link; /* link chain */ - struct lnet_process_id rpe_peer; /* peer's id */ - struct timeval rpe_stamp; /* time stamp of RPC */ - int rpe_state; /* peer's state */ - int rpe_rpc_errno; /* RPC errno */ - - struct lst_sid rpe_sid; /* peer's session id */ - int rpe_fwk_errno; /* framework errno */ - int rpe_priv[4]; /* private data */ - char rpe_payload[0]; /* private reply payload */ -}; - -struct lstcon_trans_stat { - int trs_rpc_stat[4]; /* RPCs stat (0: total 1: failed - * 2: finished - * 4: reserved - */ - int trs_rpc_errno; /* RPC errno */ - int trs_fwk_stat[8]; /* framework stat */ - int trs_fwk_errno; /* errno of the first remote error */ - void *trs_fwk_private; /* private framework stat */ -}; - -static inline int -lstcon_rpc_stat_total(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_rpc_stat[0] : stat->trs_rpc_stat[0]; -} - -static inline int -lstcon_rpc_stat_success(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_rpc_stat[1] : stat->trs_rpc_stat[1]; -} - -static inline int -lstcon_rpc_stat_failure(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_rpc_stat[2] : stat->trs_rpc_stat[2]; -} - -static inline int -lstcon_sesop_stat_success(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0]; -} - -static inline int -lstcon_sesop_stat_failure(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1]; -} - -static inline int -lstcon_sesqry_stat_active(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0]; -} - -static inline int -lstcon_sesqry_stat_busy(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1]; -} - -static inline int -lstcon_sesqry_stat_unknown(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2]; -} - -static inline int -lstcon_tsbop_stat_success(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0]; -} - -static inline int -lstcon_tsbop_stat_failure(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1]; -} - -static inline int -lstcon_tsbqry_stat_idle(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0]; -} - -static inline int -lstcon_tsbqry_stat_run(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1]; -} - -static inline int -lstcon_tsbqry_stat_failure(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2]; -} - -static inline int -lstcon_statqry_stat_success(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0]; -} - -static inline int -lstcon_statqry_stat_failure(struct lstcon_trans_stat *stat, int inc) -{ - return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1]; -} - -/* create a session */ -struct lstio_session_new_args { - int lstio_ses_key; /* IN: local key */ - int lstio_ses_timeout; /* IN: session timeout */ - int lstio_ses_force; /* IN: force create ? */ - /** IN: session features */ - unsigned int lstio_ses_feats; - struct lst_sid __user *lstio_ses_idp; /* OUT: session id */ - int lstio_ses_nmlen; /* IN: name length */ - char __user *lstio_ses_namep; /* IN: session name */ -}; - -/* query current session */ -struct lstio_session_info_args { - struct lst_sid __user *lstio_ses_idp; /* OUT: session id */ - int __user *lstio_ses_keyp; /* OUT: local key */ - /** OUT: session features */ - unsigned int __user *lstio_ses_featp; - struct lstcon_ndlist_ent __user *lstio_ses_ndinfo;/* OUT: */ - int lstio_ses_nmlen; /* IN: name length */ - char __user *lstio_ses_namep; /* OUT: session name */ -}; - -/* delete a session */ -struct lstio_session_end_args { - int lstio_ses_key; /* IN: session key */ -}; - -#define LST_OPC_SESSION 1 -#define LST_OPC_GROUP 2 -#define LST_OPC_NODES 3 -#define LST_OPC_BATCHCLI 4 -#define LST_OPC_BATCHSRV 5 - -struct lstio_debug_args { - int lstio_dbg_key; /* IN: session key */ - int lstio_dbg_type; /* IN: debug - * session|batch| - * group|nodes list - */ - int lstio_dbg_flags; /* IN: reserved debug - * flags - */ - int lstio_dbg_timeout; /* IN: timeout of - * debug - */ - int lstio_dbg_nmlen; /* IN: len of name */ - char __user *lstio_dbg_namep; /* IN: name of - * group|batch - */ - int lstio_dbg_count; /* IN: # of test nodes - * to debug - */ - struct lnet_process_id __user *lstio_dbg_idsp; /* IN: id of test - * nodes - */ - struct list_head __user *lstio_dbg_resultp; /* OUT: list head of - * result buffer - */ -}; - -struct lstio_group_add_args { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char __user *lstio_grp_namep; /* IN: group name */ -}; - -struct lstio_group_del_args { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char __user *lstio_grp_namep; /* IN: group name */ -}; - -#define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */ -#define LST_GROUP_REFRESH 2 /* refresh inactive nodes - * in the group - */ -#define LST_GROUP_RMND 3 /* delete nodes from the group */ - -struct lstio_group_update_args { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_opc; /* IN: OPC */ - int lstio_grp_args; /* IN: arguments */ - int lstio_grp_nmlen; /* IN: name length */ - char __user *lstio_grp_namep; /* IN: group name */ - int lstio_grp_count; /* IN: # of nodes id */ - struct lnet_process_id __user *lstio_grp_idsp; /* IN: array of nodes */ - struct list_head __user *lstio_grp_resultp; /* OUT: list head of - * result buffer - */ -}; - -struct lstio_group_nodes_args { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char __user *lstio_grp_namep; /* IN: group name */ - int lstio_grp_count; /* IN: # of nodes */ - /** OUT: session features */ - unsigned int __user *lstio_grp_featp; - struct lnet_process_id __user *lstio_grp_idsp; /* IN: nodes */ - struct list_head __user *lstio_grp_resultp; /* OUT: list head of - * result buffer - */ -}; - -struct lstio_group_list_args { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_idx; /* IN: group idx */ - int lstio_grp_nmlen; /* IN: name len */ - char __user *lstio_grp_namep; /* OUT: name */ -}; - -struct lstio_group_info_args { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name len */ - char __user *lstio_grp_namep; /* IN: name */ - struct lstcon_ndlist_ent __user *lstio_grp_entp;/* OUT: description - * of group - */ - int __user *lstio_grp_idxp; /* IN/OUT: node index */ - int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */ - struct lstcon_node_ent __user *lstio_grp_dentsp;/* OUT: nodent array */ -}; - -#define LST_DEFAULT_BATCH "batch" /* default batch name */ - -struct lstio_batch_add_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: batch name */ -}; - -struct lstio_batch_del_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: batch name */ -}; - -struct lstio_batch_run_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_timeout; /* IN: timeout for - * the batch - */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: batch name */ - struct list_head __user *lstio_bat_resultp; /* OUT: list head of - * result buffer - */ -}; - -struct lstio_batch_stop_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_force; /* IN: abort unfinished - * test RPC - */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: batch name */ - struct list_head __user *lstio_bat_resultp; /* OUT: list head of - * result buffer - */ -}; - -struct lstio_batch_query_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_testidx; /* IN: test index */ - int lstio_bat_client; /* IN: we testing - * client? - */ - int lstio_bat_timeout; /* IN: timeout for - * waiting - */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: batch name */ - struct list_head __user *lstio_bat_resultp; /* OUT: list head of - * result buffer - */ -}; - -struct lstio_batch_list_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_idx; /* IN: index */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: batch name */ -}; - -struct lstio_batch_info_args { - int lstio_bat_key; /* IN: session key */ - int lstio_bat_nmlen; /* IN: name length */ - char __user *lstio_bat_namep; /* IN: name */ - int lstio_bat_server; /* IN: query server - * or not - */ - int lstio_bat_testidx; /* IN: test index */ - struct lstcon_test_batch_ent __user *lstio_bat_entp;/* OUT: batch ent */ - - int __user *lstio_bat_idxp; /* IN/OUT: index of node */ - int __user *lstio_bat_ndentp; /* IN/OUT: # of nodent */ - struct lstcon_node_ent __user *lstio_bat_dentsp;/* array of nodent */ -}; - -/* add stat in session */ -struct lstio_stat_args { - int lstio_sta_key; /* IN: session key */ - int lstio_sta_timeout; /* IN: timeout for - * stat request - */ - int lstio_sta_nmlen; /* IN: group name - * length - */ - char __user *lstio_sta_namep; /* IN: group name */ - int lstio_sta_count; /* IN: # of pid */ - struct lnet_process_id __user *lstio_sta_idsp; /* IN: pid */ - struct list_head __user *lstio_sta_resultp; /* OUT: list head of - * result buffer - */ -}; - -enum lst_test_type { - LST_TEST_BULK = 1, - LST_TEST_PING = 2 -}; - -/* create a test in a batch */ -#define LST_MAX_CONCUR 1024 /* Max concurrency of test */ - -struct lstio_test_args { - int lstio_tes_key; /* IN: session key */ - int lstio_tes_bat_nmlen; /* IN: batch name len */ - char __user *lstio_tes_bat_name; /* IN: batch name */ - int lstio_tes_type; /* IN: test type */ - int lstio_tes_oneside; /* IN: one sided test */ - int lstio_tes_loop; /* IN: loop count */ - int lstio_tes_concur; /* IN: concurrency */ - - int lstio_tes_dist; /* IN: node distribution in - * destination groups - */ - int lstio_tes_span; /* IN: node span in - * destination groups - */ - int lstio_tes_sgrp_nmlen; /* IN: source group - * name length - */ - char __user *lstio_tes_sgrp_name; /* IN: group name */ - int lstio_tes_dgrp_nmlen; /* IN: destination group - * name length - */ - char __user *lstio_tes_dgrp_name; /* IN: group name */ - - int lstio_tes_param_len; /* IN: param buffer len */ - void __user *lstio_tes_param; /* IN: parameter for specified - * test: lstio_bulk_param_t, - * lstio_ping_param_t, - * ... more - */ - int __user *lstio_tes_retp; /* OUT: private returned - * value - */ - struct list_head __user *lstio_tes_resultp;/* OUT: list head of - * result buffer - */ -}; - -enum lst_brw_type { - LST_BRW_READ = 1, - LST_BRW_WRITE = 2 -}; - -enum lst_brw_flags { - LST_BRW_CHECK_NONE = 1, - LST_BRW_CHECK_SIMPLE = 2, - LST_BRW_CHECK_FULL = 3 -}; - -struct lst_test_bulk_param { - int blk_opc; /* bulk operation code */ - int blk_size; /* size (bytes) */ - int blk_time; /* time of running the test*/ - int blk_flags; /* reserved flags */ - int blk_cli_off; /* bulk offset on client */ - int blk_srv_off; /* reserved: bulk offset on server */ -}; - -struct lst_test_ping_param { - int png_size; /* size of ping message */ - int png_time; /* time */ - int png_loop; /* loop */ - int png_flags; /* reserved flags */ -}; - -struct srpc_counters { - __u32 errors; - __u32 rpcs_sent; - __u32 rpcs_rcvd; - __u32 rpcs_dropped; - __u32 rpcs_expired; - __u64 bulk_get; - __u64 bulk_put; -} WIRE_ATTR; - -struct sfw_counters { - /** milliseconds since current session started */ - __u32 running_ms; - __u32 active_batches; - __u32 zombie_sessions; - __u32 brw_errors; - __u32 ping_errors; -} WIRE_ATTR; - -#endif diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h b/drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h deleted file mode 100644 index 882074ed6021..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -#ifndef _LNET_NIDSTRINGS_H -#define _LNET_NIDSTRINGS_H - -#include - -/** - * Lustre Network Driver types. - */ -enum { - /* - * Only add to these values (i.e. don't ever change or redefine them): - * network addresses depend on them... - */ - QSWLND = 1, - SOCKLND = 2, - GMLND = 3, - PTLLND = 4, - O2IBLND = 5, - CIBLND = 6, - OPENIBLND = 7, - IIBLND = 8, - LOLND = 9, - RALND = 10, - VIBLND = 11, - MXLND = 12, - GNILND = 13, - GNIIPLND = 14, -}; - -struct list_head; - -#define LNET_NIDSTR_COUNT 1024 /* # of nidstrings */ -#define LNET_NIDSTR_SIZE 32 /* size of each one (see below for usage) */ - -/* support decl needed by both kernel and user space */ -char *libcfs_next_nidstring(void); -int libcfs_isknown_lnd(__u32 lnd); -char *libcfs_lnd2modname(__u32 lnd); -char *libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size); -static inline char *libcfs_lnd2str(__u32 lnd) -{ - return libcfs_lnd2str_r(lnd, libcfs_next_nidstring(), - LNET_NIDSTR_SIZE); -} - -int libcfs_str2lnd(const char *str); -char *libcfs_net2str_r(__u32 net, char *buf, size_t buf_size); -static inline char *libcfs_net2str(__u32 net) -{ - return libcfs_net2str_r(net, libcfs_next_nidstring(), - LNET_NIDSTR_SIZE); -} - -char *libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size); -static inline char *libcfs_nid2str(lnet_nid_t nid) -{ - return libcfs_nid2str_r(nid, libcfs_next_nidstring(), - LNET_NIDSTR_SIZE); -} - -__u32 libcfs_str2net(const char *str); -lnet_nid_t libcfs_str2nid(const char *str); -int libcfs_str2anynid(lnet_nid_t *nid, const char *str); -char *libcfs_id2str(struct lnet_process_id id); -void cfs_free_nidlist(struct list_head *list); -int cfs_parse_nidlist(char *str, int len, struct list_head *list); -int cfs_print_nidlist(char *buffer, int count, struct list_head *list); -int cfs_match_nid(lnet_nid_t nid, struct list_head *list); - -int cfs_ip_addr_parse(char *str, int len, struct list_head *list); -int cfs_ip_addr_match(__u32 addr, struct list_head *list); -bool cfs_nidrange_is_contiguous(struct list_head *nidlist); -void cfs_nidrange_find_min_max(struct list_head *nidlist, char *min_nid, - char *max_nid, size_t nidstr_length); - -struct netstrfns { - __u32 nf_type; - char *nf_name; - char *nf_modname; - void (*nf_addr2str)(__u32 addr, char *str, size_t size); - int (*nf_str2addr)(const char *str, int nob, __u32 *addr); - int (*nf_parse_addrlist)(char *str, int len, - struct list_head *list); - int (*nf_print_addrlist)(char *buffer, int count, - struct list_head *list); - int (*nf_match_addr)(__u32 addr, struct list_head *list); - bool (*nf_is_contiguous)(struct list_head *nidlist); - void (*nf_min_max)(struct list_head *nidlist, __u32 *min_nid, - __u32 *max_nid); -}; - -#endif /* _LNET_NIDSTRINGS_H */ diff --git a/drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h b/drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h deleted file mode 100644 index 6453e053fa99..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * #defines shared between socknal implementation and utilities - */ -#ifndef __UAPI_LNET_SOCKLND_H__ -#define __UAPI_LNET_SOCKLND_H__ - -#define SOCKLND_CONN_NONE (-1) -#define SOCKLND_CONN_ANY 0 -#define SOCKLND_CONN_CONTROL 1 -#define SOCKLND_CONN_BULK_IN 2 -#define SOCKLND_CONN_BULK_OUT 3 -#define SOCKLND_CONN_NTYPES 4 - -#define SOCKLND_CONN_ACK SOCKLND_CONN_BULK_IN - -#endif diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h deleted file mode 100644 index 11b51d93f64c..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h +++ /dev/null @@ -1,261 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _UAPI_LUSTRE_CFG_H_ -#define _UAPI_LUSTRE_CFG_H_ - -#include -#include -#include - -/** \defgroup cfg cfg - * - * @{ - */ - -/* - * 1cf6 - * lcfG - */ -#define LUSTRE_CFG_VERSION 0x1cf60001 -#define LUSTRE_CFG_MAX_BUFCOUNT 8 - -#define LCFG_HDR_SIZE(count) \ - __ALIGN_KERNEL(offsetof(struct lustre_cfg, lcfg_buflens[(count)]), 8) - -/** If the LCFG_REQUIRED bit is set in a configuration command, - * then the client is required to understand this parameter - * in order to mount the filesystem. If it does not understand - * a REQUIRED command the client mount will fail. - */ -#define LCFG_REQUIRED 0x0001000 - -enum lcfg_command_type { - LCFG_ATTACH = 0x00cf001, /**< create a new obd instance */ - LCFG_DETACH = 0x00cf002, /**< destroy obd instance */ - LCFG_SETUP = 0x00cf003, /**< call type-specific setup */ - LCFG_CLEANUP = 0x00cf004, /**< call type-specific cleanup - */ - LCFG_ADD_UUID = 0x00cf005, /**< add a nid to a niduuid */ - LCFG_DEL_UUID = 0x00cf006, /**< remove a nid from - * a niduuid - */ - LCFG_MOUNTOPT = 0x00cf007, /**< create a profile - * (mdc, osc) - */ - LCFG_DEL_MOUNTOPT = 0x00cf008, /**< destroy a profile */ - LCFG_SET_TIMEOUT = 0x00cf009, /**< set obd_timeout */ - LCFG_SET_UPCALL = 0x00cf00a, /**< deprecated */ - LCFG_ADD_CONN = 0x00cf00b, /**< add a failover niduuid to - * an obd - */ - LCFG_DEL_CONN = 0x00cf00c, /**< remove a failover niduuid */ - LCFG_LOV_ADD_OBD = 0x00cf00d, /**< add an osc to a lov */ - LCFG_LOV_DEL_OBD = 0x00cf00e, /**< remove an osc from a lov */ - LCFG_PARAM = 0x00cf00f, /**< set a proc parameter */ - LCFG_MARKER = 0x00cf010, /**< metadata about next - * cfg rec - */ - LCFG_LOG_START = 0x00ce011, /**< mgc only, process a - * cfg log - */ - LCFG_LOG_END = 0x00ce012, /**< stop processing updates */ - LCFG_LOV_ADD_INA = 0x00ce013, /**< like LOV_ADD_OBD, - * inactive - */ - LCFG_ADD_MDC = 0x00cf014, /**< add an mdc to a lmv */ - LCFG_DEL_MDC = 0x00cf015, /**< remove an mdc from a lmv */ - LCFG_SPTLRPC_CONF = 0x00ce016, /**< security */ - LCFG_POOL_NEW = 0x00ce020, /**< create an ost pool name */ - LCFG_POOL_ADD = 0x00ce021, /**< add an ost to a pool */ - LCFG_POOL_REM = 0x00ce022, /**< remove an ost from a pool */ - LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */ - LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */ - LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre - * cleanup cleanup - */ - LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set - * a proc parameters - */ -}; - -struct lustre_cfg_bufs { - void *lcfg_buf[LUSTRE_CFG_MAX_BUFCOUNT]; - __u32 lcfg_buflen[LUSTRE_CFG_MAX_BUFCOUNT]; - __u32 lcfg_bufcount; -}; - -struct lustre_cfg { - __u32 lcfg_version; - __u32 lcfg_command; - - __u32 lcfg_num; - __u32 lcfg_flags; - __u64 lcfg_nid; - __u32 lcfg_nal; /* not used any more */ - - __u32 lcfg_bufcount; - __u32 lcfg_buflens[0]; -}; - -enum cfg_record_type { - PORTALS_CFG_TYPE = 1, - LUSTRE_CFG_TYPE = 123, -}; - -#define LUSTRE_CFG_BUFLEN(lcfg, idx) \ - ((lcfg)->lcfg_bufcount <= (idx) ? 0 : (lcfg)->lcfg_buflens[(idx)]) - -static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs, - __u32 index, void *buf, __u32 buflen) -{ - if (index >= LUSTRE_CFG_MAX_BUFCOUNT) - return; - - if (!bufs) - return; - - if (bufs->lcfg_bufcount <= index) - bufs->lcfg_bufcount = index + 1; - - bufs->lcfg_buf[index] = buf; - bufs->lcfg_buflen[index] = buflen; -} - -static inline void lustre_cfg_bufs_set_string(struct lustre_cfg_bufs *bufs, - __u32 index, char *str) -{ - lustre_cfg_bufs_set(bufs, index, str, str ? strlen(str) + 1 : 0); -} - -static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs, - char *name) -{ - memset((bufs), 0, sizeof(*bufs)); - if (name) - lustre_cfg_bufs_set_string(bufs, 0, name); -} - -static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, __u32 index) -{ - __u32 i; - size_t offset; - __u32 bufcount; - - if (!lcfg) - return NULL; - - bufcount = lcfg->lcfg_bufcount; - if (index >= bufcount) - return NULL; - - offset = LCFG_HDR_SIZE(lcfg->lcfg_bufcount); - for (i = 0; i < index; i++) - offset += __ALIGN_KERNEL(lcfg->lcfg_buflens[i], 8); - return (char *)lcfg + offset; -} - -static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs, - struct lustre_cfg *lcfg) -{ - __u32 i; - - bufs->lcfg_bufcount = lcfg->lcfg_bufcount; - for (i = 0; i < bufs->lcfg_bufcount; i++) { - bufs->lcfg_buflen[i] = lcfg->lcfg_buflens[i]; - bufs->lcfg_buf[i] = lustre_cfg_buf(lcfg, i); - } -} - -static inline __u32 lustre_cfg_len(__u32 bufcount, __u32 *buflens) -{ - __u32 i; - __u32 len; - - len = LCFG_HDR_SIZE(bufcount); - for (i = 0; i < bufcount; i++) - len += __ALIGN_KERNEL(buflens[i], 8); - - return __ALIGN_KERNEL(len, 8); -} - -static inline void lustre_cfg_init(struct lustre_cfg *lcfg, int cmd, - struct lustre_cfg_bufs *bufs) -{ - char *ptr; - __u32 i; - - lcfg->lcfg_version = LUSTRE_CFG_VERSION; - lcfg->lcfg_command = cmd; - lcfg->lcfg_bufcount = bufs->lcfg_bufcount; - - ptr = (char *)lcfg + LCFG_HDR_SIZE(lcfg->lcfg_bufcount); - for (i = 0; i < lcfg->lcfg_bufcount; i++) { - lcfg->lcfg_buflens[i] = bufs->lcfg_buflen[i]; - if (bufs->lcfg_buf[i]) { - memcpy(ptr, bufs->lcfg_buf[i], bufs->lcfg_buflen[i]); - ptr += __ALIGN_KERNEL(bufs->lcfg_buflen[i], 8); - } - } -} - -static inline int lustre_cfg_sanity_check(void *buf, size_t len) -{ - struct lustre_cfg *lcfg = (struct lustre_cfg *)buf; - - if (!lcfg) - return -EINVAL; - - /* check that the first bits of the struct are valid */ - if (len < LCFG_HDR_SIZE(0)) - return -EINVAL; - - if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) - return -EINVAL; - - if (lcfg->lcfg_bufcount >= LUSTRE_CFG_MAX_BUFCOUNT) - return -EINVAL; - - /* check that the buflens are valid */ - if (len < LCFG_HDR_SIZE(lcfg->lcfg_bufcount)) - return -EINVAL; - - /* make sure all the pointers point inside the data */ - if (len < lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)) - return -EINVAL; - - return 0; -} - -/** @} cfg */ - -#endif /* _UAPI_LUSTRE_CFG_H_ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h deleted file mode 100644 index 2e7a8d103777..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h +++ /dev/null @@ -1,293 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - * - * Copyright 2016 Cray Inc, all rights reserved. - * Author: Ben Evans. - * - * all fid manipulation functions go here - * - * FIDS are globally unique within a Lustre filessytem, and are made up - * of three parts: sequence, Object ID, and version. - * - */ -#ifndef _UAPI_LUSTRE_FID_H_ -#define _UAPI_LUSTRE_FID_H_ - -#include - -/** returns fid object sequence */ -static inline __u64 fid_seq(const struct lu_fid *fid) -{ - return fid->f_seq; -} - -/** returns fid object id */ -static inline __u32 fid_oid(const struct lu_fid *fid) -{ - return fid->f_oid; -} - -/** returns fid object version */ -static inline __u32 fid_ver(const struct lu_fid *fid) -{ - return fid->f_ver; -} - -static inline void fid_zero(struct lu_fid *fid) -{ - memset(fid, 0, sizeof(*fid)); -} - -static inline __u64 fid_ver_oid(const struct lu_fid *fid) -{ - return (__u64)fid_ver(fid) << 32 | fid_oid(fid); -} - -static inline bool fid_seq_is_mdt0(__u64 seq) -{ - return seq == FID_SEQ_OST_MDT0; -} - -static inline bool fid_seq_is_mdt(__u64 seq) -{ - return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL; -}; - -static inline bool fid_seq_is_echo(__u64 seq) -{ - return seq == FID_SEQ_ECHO; -} - -static inline bool fid_is_echo(const struct lu_fid *fid) -{ - return fid_seq_is_echo(fid_seq(fid)); -} - -static inline bool fid_seq_is_llog(__u64 seq) -{ - return seq == FID_SEQ_LLOG; -} - -static inline bool fid_is_llog(const struct lu_fid *fid) -{ - /* file with OID == 0 is not llog but contains last oid */ - return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0; -} - -static inline bool fid_seq_is_rsvd(__u64 seq) -{ - return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD; -}; - -static inline bool fid_seq_is_special(__u64 seq) -{ - return seq == FID_SEQ_SPECIAL; -}; - -static inline bool fid_seq_is_local_file(__u64 seq) -{ - return seq == FID_SEQ_LOCAL_FILE || - seq == FID_SEQ_LOCAL_NAME; -}; - -static inline bool fid_seq_is_root(__u64 seq) -{ - return seq == FID_SEQ_ROOT; -} - -static inline bool fid_seq_is_dot(__u64 seq) -{ - return seq == FID_SEQ_DOT_LUSTRE; -} - -static inline bool fid_seq_is_default(__u64 seq) -{ - return seq == FID_SEQ_LOV_DEFAULT; -} - -static inline bool fid_is_mdt0(const struct lu_fid *fid) -{ - return fid_seq_is_mdt0(fid_seq(fid)); -} - -/** - * Check if a fid is igif or not. - * \param fid the fid to be tested. - * \return true if the fid is an igif; otherwise false. - */ -static inline bool fid_seq_is_igif(__u64 seq) -{ - return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; -} - -static inline bool fid_is_igif(const struct lu_fid *fid) -{ - return fid_seq_is_igif(fid_seq(fid)); -} - -/** - * Check if a fid is idif or not. - * \param fid the fid to be tested. - * \return true if the fid is an idif; otherwise false. - */ -static inline bool fid_seq_is_idif(__u64 seq) -{ - return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; -} - -static inline bool fid_is_idif(const struct lu_fid *fid) -{ - return fid_seq_is_idif(fid_seq(fid)); -} - -static inline bool fid_is_local_file(const struct lu_fid *fid) -{ - return fid_seq_is_local_file(fid_seq(fid)); -} - -static inline bool fid_seq_is_norm(__u64 seq) -{ - return (seq >= FID_SEQ_NORMAL); -} - -static inline bool fid_is_norm(const struct lu_fid *fid) -{ - return fid_seq_is_norm(fid_seq(fid)); -} - -/* convert an OST objid into an IDIF FID SEQ number */ -static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx) -{ - return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); -} - -/* convert a packed IDIF FID into an OST objid */ -static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver) -{ - return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; -} - -static inline __u32 idif_ost_idx(__u64 seq) -{ - return (seq >> 16) & 0xffff; -} - -/* extract ost index from IDIF FID */ -static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid) -{ - return idif_ost_idx(fid_seq(fid)); -} - -/** - * Get inode number from an igif. - * \param fid an igif to get inode number from. - * \return inode number for the igif. - */ -static inline ino_t lu_igif_ino(const struct lu_fid *fid) -{ - return fid_seq(fid); -} - -/** - * Get inode generation from an igif. - * \param fid an igif to get inode generation from. - * \return inode generation for the igif. - */ -static inline __u32 lu_igif_gen(const struct lu_fid *fid) -{ - return fid_oid(fid); -} - -/** - * Build igif from the inode number/generation. - */ -static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen) -{ - fid->f_seq = ino; - fid->f_oid = gen; - fid->f_ver = 0; -} - -/* - * Fids are transmitted across network (in the sender byte-ordering), - * and stored on disk in big-endian order. - */ -static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src) -{ - dst->f_seq = __cpu_to_le64(fid_seq(src)); - dst->f_oid = __cpu_to_le32(fid_oid(src)); - dst->f_ver = __cpu_to_le32(fid_ver(src)); -} - -static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src) -{ - dst->f_seq = __le64_to_cpu(fid_seq(src)); - dst->f_oid = __le32_to_cpu(fid_oid(src)); - dst->f_ver = __le32_to_cpu(fid_ver(src)); -} - -static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src) -{ - dst->f_seq = __cpu_to_be64(fid_seq(src)); - dst->f_oid = __cpu_to_be32(fid_oid(src)); - dst->f_ver = __cpu_to_be32(fid_ver(src)); -} - -static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) -{ - dst->f_seq = __be64_to_cpu(fid_seq(src)); - dst->f_oid = __be32_to_cpu(fid_oid(src)); - dst->f_ver = __be32_to_cpu(fid_ver(src)); -} - -static inline bool fid_is_sane(const struct lu_fid *fid) -{ - return fid && ((fid_seq(fid) >= FID_SEQ_START && !fid_ver(fid)) || - fid_is_igif(fid) || fid_is_idif(fid) || - fid_seq_is_rsvd(fid_seq(fid))); -} - -static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) -{ - return !memcmp(f0, f1, sizeof(*f0)); -} - -static inline int lu_fid_cmp(const struct lu_fid *f0, - const struct lu_fid *f1) -{ - if (fid_seq(f0) != fid_seq(f1)) - return fid_seq(f0) > fid_seq(f1) ? 1 : -1; - - if (fid_oid(f0) != fid_oid(f1)) - return fid_oid(f0) > fid_oid(f1) ? 1 : -1; - - if (fid_ver(f0) != fid_ver(f1)) - return fid_ver(f0) > fid_ver(f1) ? 1 : -1; - - return 0; -} -#endif diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h deleted file mode 100644 index d375a476f5ea..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2014, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * FIEMAP data structures and flags. This header file will be used until - * fiemap.h is available in the upstream kernel. - * - * Author: Kalpak Shah - * Author: Andreas Dilger - */ - -#ifndef _LUSTRE_FIEMAP_H -#define _LUSTRE_FIEMAP_H - -#include -#include - -/* XXX: We use fiemap_extent::fe_reserved[0] */ -#define fe_device fe_reserved[0] - -static inline size_t fiemap_count_to_size(size_t extent_count) -{ - return sizeof(struct fiemap) + extent_count * - sizeof(struct fiemap_extent); -} - -static inline unsigned int fiemap_size_to_count(size_t array_size) -{ - return (array_size - sizeof(struct fiemap)) / - sizeof(struct fiemap_extent); -} - -#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */ - -#ifdef FIEMAP_FLAGS_COMPAT -#undef FIEMAP_FLAGS_COMPAT -#endif - -/* Lustre specific flags - use a high bit, don't conflict with upstream flag */ -#define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */ -#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. - * Sets NO_DIRECT flag - */ - -#endif /* _LUSTRE_FIEMAP_H */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h deleted file mode 100644 index 6c7e3992d646..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h +++ /dev/null @@ -1,2690 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Lustre wire protocol definitions. - */ - -/** \defgroup lustreidl lustreidl - * - * Lustre wire protocol definitions. - * - * ALL structs passing over the wire should be declared here. Structs - * that are used in interfaces with userspace should go in lustre_user.h. - * - * All structs being declared here should be built from simple fixed-size - * types (__u8, __u16, __u32, __u64) or be built from other types or - * structs also declared in this file. Similarly, all flags and magic - * values in those structs should also be declared here. This ensures - * that the Lustre wire protocol is not influenced by external dependencies. - * - * The only other acceptable items in this file are VERY SIMPLE accessor - * functions to avoid callers grubbing inside the structures. Nothing that - * depends on external functions or definitions should be in here. - * - * Structs must be properly aligned to put 64-bit values on an 8-byte - * boundary. Any structs being added here must also be added to - * utils/wirecheck.c and "make newwiretest" run to regenerate the - * utils/wiretest.c sources. This allows us to verify that wire structs - * have the proper alignment/size on all architectures. - * - * DO NOT CHANGE any of the structs, flags, values declared here and used - * in released Lustre versions. Some structs may have padding fields that - * can be used. Some structs might allow addition at the end (verify this - * in the code to ensure that new/old clients that see this larger struct - * do not fail, otherwise you need to implement protocol compatibility). - * - * @{ - */ - -#ifndef _LUSTRE_IDL_H_ -#define _LUSTRE_IDL_H_ - -#include -#include - -#include -/* Defn's shared with user-space. */ -#include -#include - -/* - * GENERAL STUFF - */ -/* FOO_REQUEST_PORTAL is for incoming requests on the FOO - * FOO_REPLY_PORTAL is for incoming replies on the FOO - * FOO_BULK_PORTAL is for incoming bulk on the FOO - */ - -/* Lustre service names are following the format - * service name + MDT + seq name - */ -#define LUSTRE_MDT_MAXNAMELEN 80 - -#define CONNMGR_REQUEST_PORTAL 1 -#define CONNMGR_REPLY_PORTAL 2 -/*#define OSC_REQUEST_PORTAL 3 */ -#define OSC_REPLY_PORTAL 4 -/*#define OSC_BULK_PORTAL 5 */ -#define OST_IO_PORTAL 6 -#define OST_CREATE_PORTAL 7 -#define OST_BULK_PORTAL 8 -/*#define MDC_REQUEST_PORTAL 9 */ -#define MDC_REPLY_PORTAL 10 -/*#define MDC_BULK_PORTAL 11 */ -#define MDS_REQUEST_PORTAL 12 -/*#define MDS_REPLY_PORTAL 13 */ -#define MDS_BULK_PORTAL 14 -#define LDLM_CB_REQUEST_PORTAL 15 -#define LDLM_CB_REPLY_PORTAL 16 -#define LDLM_CANCEL_REQUEST_PORTAL 17 -#define LDLM_CANCEL_REPLY_PORTAL 18 -/*#define PTLBD_REQUEST_PORTAL 19 */ -/*#define PTLBD_REPLY_PORTAL 20 */ -/*#define PTLBD_BULK_PORTAL 21 */ -#define MDS_SETATTR_PORTAL 22 -#define MDS_READPAGE_PORTAL 23 -#define OUT_PORTAL 24 - -#define MGC_REPLY_PORTAL 25 -#define MGS_REQUEST_PORTAL 26 -#define MGS_REPLY_PORTAL 27 -#define OST_REQUEST_PORTAL 28 -#define FLD_REQUEST_PORTAL 29 -#define SEQ_METADATA_PORTAL 30 -#define SEQ_DATA_PORTAL 31 -#define SEQ_CONTROLLER_PORTAL 32 -#define MGS_BULK_PORTAL 33 - -/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, - * n8851@cray.com - */ - -/* packet types */ -#define PTL_RPC_MSG_REQUEST 4711 -#define PTL_RPC_MSG_ERR 4712 -#define PTL_RPC_MSG_REPLY 4713 - -/* DON'T use swabbed values of MAGIC as magic! */ -#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3 -#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B - -#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2 - -#define PTLRPC_MSG_VERSION 0x00000003 -#define LUSTRE_VERSION_MASK 0xffff0000 -#define LUSTRE_OBD_VERSION 0x00010000 -#define LUSTRE_MDS_VERSION 0x00020000 -#define LUSTRE_OST_VERSION 0x00030000 -#define LUSTRE_DLM_VERSION 0x00040000 -#define LUSTRE_LOG_VERSION 0x00050000 -#define LUSTRE_MGS_VERSION 0x00060000 - -/** - * Describes a range of sequence, lsr_start is included but lsr_end is - * not in the range. - * Same structure is used in fld module where lsr_index field holds mdt id - * of the home mdt. - */ -struct lu_seq_range { - __u64 lsr_start; - __u64 lsr_end; - __u32 lsr_index; - __u32 lsr_flags; -}; - -struct lu_seq_range_array { - __u32 lsra_count; - __u32 lsra_padding; - struct lu_seq_range lsra_lsr[0]; -}; - -#define LU_SEQ_RANGE_MDT 0x0 -#define LU_SEQ_RANGE_OST 0x1 -#define LU_SEQ_RANGE_ANY 0x3 - -#define LU_SEQ_RANGE_MASK 0x3 - -/** \defgroup lu_fid lu_fid - * @{ - */ - -/** - * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. - * Deprecated since HSM and SOM attributes are now stored in separate on-disk - * xattr. - */ -enum lma_compat { - LMAC_HSM = 0x00000001, -/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */ - LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ - LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is - * under /O//d. - */ -}; - -/** - * Masks for all features that should be supported by a Lustre version to - * access a specific file. - * This information is stored in lustre_mdt_attrs::lma_incompat. - */ -enum lma_incompat { - LMAI_RELEASED = 0x00000001, /* file is released */ - LMAI_AGENT = 0x00000002, /* agent inode */ - LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object - * is on the remote MDT - */ -}; - -#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) - -/** - * fid constants - */ -enum { - /** LASTID file has zero OID */ - LUSTRE_FID_LASTID_OID = 0UL, - /** initial fid id value */ - LUSTRE_FID_INIT_OID = 1UL -}; - -/* copytool uses a 32b bitmask field to encode archive-Ids during register - * with MDT thru kuc. - * archive num = 0 => all - * archive num from 1 to 32 - */ -#define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8) - -/** - * Note that reserved SEQ numbers below 12 will conflict with ldiskfs - * inodes in the IGIF namespace, so these reserved SEQ numbers can be - * used for other purposes and not risk collisions with existing inodes. - * - * Different FID Format - * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs - */ -enum fid_seq { - FID_SEQ_OST_MDT0 = 0, - FID_SEQ_LLOG = 1, /* unnamed llogs */ - FID_SEQ_ECHO = 2, - FID_SEQ_OST_MDT1 = 3, - FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ - FID_SEQ_LLOG_NAME = 10, /* named llogs */ - FID_SEQ_RSVD = 11, - FID_SEQ_IGIF = 12, - FID_SEQ_IGIF_MAX = 0x0ffffffffULL, - FID_SEQ_IDIF = 0x100000000ULL, - FID_SEQ_IDIF_MAX = 0x1ffffffffULL, - /* Normal FID sequence starts from this value, i.e. 1<<33 */ - FID_SEQ_START = 0x200000000ULL, - /* sequence for local pre-defined FIDs listed in local_oid */ - FID_SEQ_LOCAL_FILE = 0x200000001ULL, - FID_SEQ_DOT_LUSTRE = 0x200000002ULL, - /* sequence is used for local named objects FIDs generated - * by local_object_storage library - */ - FID_SEQ_LOCAL_NAME = 0x200000003ULL, - /* Because current FLD will only cache the fid sequence, instead - * of oid on the client side, if the FID needs to be exposed to - * clients sides, it needs to make sure all of fids under one - * sequence will be located in one MDT. - */ - FID_SEQ_SPECIAL = 0x200000004ULL, - FID_SEQ_QUOTA = 0x200000005ULL, - FID_SEQ_QUOTA_GLB = 0x200000006ULL, - FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */ - FID_SEQ_NORMAL = 0x200000400ULL, - FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL -}; - -#define OBIF_OID_MAX_BITS 32 -#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS) -#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1) -#define IDIF_OID_MAX_BITS 48 -#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS) -#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1) - -/** OID for FID_SEQ_SPECIAL */ -enum special_oid { - /* Big Filesystem Lock to serialize rename operations */ - FID_OID_SPECIAL_BFL = 1UL, -}; - -/** OID for FID_SEQ_DOT_LUSTRE */ -enum dot_lustre_oid { - FID_OID_DOT_LUSTRE = 1UL, - FID_OID_DOT_LUSTRE_OBF = 2UL, -}; - -/** OID for FID_SEQ_ROOT */ -enum root_oid { - FID_OID_ROOT = 1UL, - FID_OID_ECHO_ROOT = 2UL, -}; - -/** @} lu_fid */ - -/** \defgroup lu_dir lu_dir - * @{ - */ - -/** - * Enumeration of possible directory entry attributes. - * - * Attributes follow directory entry header in the order they appear in this - * enumeration. - */ -enum lu_dirent_attrs { - LUDA_FID = 0x0001, - LUDA_TYPE = 0x0002, - LUDA_64BITHASH = 0x0004, -}; - -/** - * Layout of readdir pages, as transmitted on wire. - */ -struct lu_dirent { - /** valid if LUDA_FID is set. */ - struct lu_fid lde_fid; - /** a unique entry identifier: a hash or an offset. */ - __u64 lde_hash; - /** total record length, including all attributes. */ - __u16 lde_reclen; - /** name length */ - __u16 lde_namelen; - /** optional variable size attributes following this entry. - * taken from enum lu_dirent_attrs. - */ - __u32 lde_attrs; - /** name is followed by the attributes indicated in ->ldp_attrs, in - * their natural order. After the last attribute, padding bytes are - * added to make ->lde_reclen a multiple of 8. - */ - char lde_name[0]; -}; - -/* - * Definitions of optional directory entry attributes formats. - * - * Individual attributes do not have their length encoded in a generic way. It - * is assumed that consumer of an attribute knows its format. This means that - * it is impossible to skip over an unknown attribute, except by skipping over all - * remaining attributes (by using ->lde_reclen), which is not too - * constraining, because new server versions will append new attributes at - * the end of an entry. - */ - -/** - * Fid directory attribute: a fid of an object referenced by the entry. This - * will be almost always requested by the client and supplied by the server. - * - * Aligned to 8 bytes. - */ -/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */ - -/** - * File type. - * - * Aligned to 2 bytes. - */ -struct luda_type { - __u16 lt_type; -}; - -#ifndef IFSHIFT -#define IFSHIFT 12 -#endif - -#ifndef IFTODT -#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT) -#endif -#ifndef DTTOIF -#define DTTOIF(dirtype) ((dirtype) << IFSHIFT) -#endif - -struct lu_dirpage { - __le64 ldp_hash_start; - __le64 ldp_hash_end; - __le32 ldp_flags; - __le32 ldp_pad0; - struct lu_dirent ldp_entries[0]; -}; - -enum lu_dirpage_flags { - /** - * dirpage contains no entry. - */ - LDF_EMPTY = 1 << 0, - /** - * last entry's lde_hash equals ldp_hash_end. - */ - LDF_COLLIDE = 1 << 1 -}; - -static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp) -{ - if (__le32_to_cpu(dp->ldp_flags) & LDF_EMPTY) - return NULL; - else - return dp->ldp_entries; -} - -static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent) -{ - struct lu_dirent *next; - - if (__le16_to_cpu(ent->lde_reclen) != 0) - next = ((void *)ent) + __le16_to_cpu(ent->lde_reclen); - else - next = NULL; - - return next; -} - -static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr) -{ - size_t size; - - if (attr & LUDA_TYPE) { - const size_t align = sizeof(struct luda_type) - 1; - - size = (sizeof(struct lu_dirent) + namelen + align) & ~align; - size += sizeof(struct luda_type); - } else { - size = sizeof(struct lu_dirent) + namelen; - } - - return (size + 7) & ~7; -} - -#define MDS_DIR_END_OFF 0xfffffffffffffffeULL - -/** - * MDS_READPAGE page size - * - * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than PAGE_SIZE because the client needs to - * access the struct lu_dirpage header packed at the beginning of - * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server PAGE_SIZE differ. - */ -#define LU_PAGE_SHIFT 12 -#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) -#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) - -#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT)) - -/** @} lu_dir */ - -struct lustre_handle { - __u64 cookie; -}; - -#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL - -static inline bool lustre_handle_is_used(const struct lustre_handle *lh) -{ - return lh->cookie != 0ull; -} - -static inline bool lustre_handle_equal(const struct lustre_handle *lh1, - const struct lustre_handle *lh2) -{ - return lh1->cookie == lh2->cookie; -} - -static inline void lustre_handle_copy(struct lustre_handle *tgt, - const struct lustre_handle *src) -{ - tgt->cookie = src->cookie; -} - -/* flags for lm_flags */ -#define MSGHDR_AT_SUPPORT 0x1 -#define MSGHDR_CKSUM_INCOMPAT18 0x2 - -#define lustre_msg lustre_msg_v2 -/* we depend on this structure to be 8-byte aligned */ -/* this type is only endian-adjusted in lustre_unpack_msg() */ -struct lustre_msg_v2 { - __u32 lm_bufcount; - __u32 lm_secflvr; - __u32 lm_magic; - __u32 lm_repsize; - __u32 lm_cksum; - __u32 lm_flags; - __u32 lm_padding_2; - __u32 lm_padding_3; - __u32 lm_buflens[0]; -}; - -/* without gss, ptlrpc_body is put at the first buffer. */ -#define PTLRPC_NUM_VERSIONS 4 - -struct ptlrpc_body_v3 { - struct lustre_handle pb_handle; - __u32 pb_type; - __u32 pb_version; - __u32 pb_opc; - __u32 pb_status; - __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */ - __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */ - __u16 pb_padding0; - __u32 pb_padding1; - __u64 pb_last_committed; - __u64 pb_transno; - __u32 pb_flags; - __u32 pb_op_flags; - __u32 pb_conn_cnt; - __u32 pb_timeout; /* for req, the deadline, for rep, the service est */ - __u32 pb_service_time; /* for rep, actual service time */ - __u32 pb_limit; - __u64 pb_slv; - /* VBR: pre-versions */ - __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; - __u64 pb_mbits; /**< match bits for bulk request */ - /* padding for future needs */ - __u64 pb_padding64_0; - __u64 pb_padding64_1; - __u64 pb_padding64_2; - char pb_jobid[LUSTRE_JOBID_SIZE]; -}; - -#define ptlrpc_body ptlrpc_body_v3 - -struct ptlrpc_body_v2 { - struct lustre_handle pb_handle; - __u32 pb_type; - __u32 pb_version; - __u32 pb_opc; - __u32 pb_status; - __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */ - __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */ - __u16 pb_padding0; - __u32 pb_padding1; - __u64 pb_last_committed; - __u64 pb_transno; - __u32 pb_flags; - __u32 pb_op_flags; - __u32 pb_conn_cnt; - __u32 pb_timeout; /* for req, the deadline, for rep, the service est */ - __u32 pb_service_time; /* for rep, actual service time, also used for - * net_latency of req - */ - __u32 pb_limit; - __u64 pb_slv; - /* VBR: pre-versions */ - __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; - __u64 pb_mbits; /**< unused in V2 */ - /* padding for future needs */ - __u64 pb_padding64_0; - __u64 pb_padding64_1; - __u64 pb_padding64_2; -}; - -/* message body offset for lustre_msg_v2 */ -/* ptlrpc body offset in all request/reply messages */ -#define MSG_PTLRPC_BODY_OFF 0 - -/* normal request/reply message record offset */ -#define REQ_REC_OFF 1 -#define REPLY_REC_OFF 1 - -/* ldlm request message body offset */ -#define DLM_LOCKREQ_OFF 1 /* lockreq offset */ -#define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */ - -/* ldlm intent lock message body offset */ -#define DLM_INTENT_IT_OFF 2 /* intent lock it offset */ -#define DLM_INTENT_REC_OFF 3 /* intent lock record offset */ - -/* ldlm reply message body offset */ -#define DLM_LOCKREPLY_OFF 1 /* lockrep offset */ -#define DLM_REPLY_REC_OFF 2 /* reply record offset */ - -/** only use in req->rq_{req,rep}_swab_mask */ -#define MSG_PTLRPC_HEADER_OFF 31 - -/* Flags that are operation-specific go in the top 16 bits. */ -#define MSG_OP_FLAG_MASK 0xffff0000 -#define MSG_OP_FLAG_SHIFT 16 - -/* Flags that apply to all requests are in the bottom 16 bits */ -#define MSG_GEN_FLAG_MASK 0x0000ffff -#define MSG_LAST_REPLAY 0x0001 -#define MSG_RESENT 0x0002 -#define MSG_REPLAY 0x0004 -/* #define MSG_AT_SUPPORT 0x0008 - * This was used in early prototypes of adaptive timeouts, and while there - * shouldn't be any users of that code there also isn't a need for using this - * bits. Defer usage until at least 1.10 to avoid potential conflict. - */ -#define MSG_DELAY_REPLAY 0x0010 -#define MSG_VERSION_REPLAY 0x0020 -#define MSG_REQ_REPLAY_DONE 0x0040 -#define MSG_LOCK_REPLAY_DONE 0x0080 - -/* - * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT) - */ - -#define MSG_CONNECT_RECOVERING 0x00000001 -#define MSG_CONNECT_RECONNECT 0x00000002 -#define MSG_CONNECT_REPLAYABLE 0x00000004 -/*#define MSG_CONNECT_PEER 0x8 */ -#define MSG_CONNECT_LIBCLIENT 0x00000010 -#define MSG_CONNECT_INITIAL 0x00000020 -#define MSG_CONNECT_ASYNC 0x00000040 -#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */ -#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ - -/* Connect flags */ -#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ -#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ -#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ -#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ -#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ -#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ -#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ -#define OBD_CONNECT_LARGE_ACL 0x200ULL /* more than 32 ACL entries */ -#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ -#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ -#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ -#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated. - *We do not support JOIN FILE - *anymore, reserve this flags - *just for preventing such bit - *to be reused. - */ -#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ -#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ -#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /* Remote client, never used - * in production. Removed in - * 2.9. Keep this flag to - * avoid reuse. - */ -#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /* Remote client by force, - * never used in production. - * Removed in 2.9. Keep this - * flag to avoid reuse - */ -#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ -#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ -#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ -#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ -#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ -#define OBD_CONNECT_REAL 0x8000000ULL /* obsolete since 2.8 */ -#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ -#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ -#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ -#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ -#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ -#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */ -#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */ -#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */ -#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits - * directory hash - */ -#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */ -#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */ -#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */ -#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */ -#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS - * RPC error properly - */ -#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for - * finer space reservation - */ -#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8 - * policy and 2.x server - */ -#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */ -#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */ -#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ -#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */ -#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */ -#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */ -#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/ -#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack - * name in request - */ -#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */ -#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */ -#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify - * RPCs in parallel - */ -#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */ -#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */ -#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */ -/** bulk matchbits is sent within ptlrpc_body */ -#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL -#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */ -#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */ - -/* XXX README XXX: - * Please DO NOT add flag values here before first ensuring that this same - * flag value is not in use on some other branch. Please clear any such - * changes with senior engineers before starting to use a new flag. Then, - * submit a small patch against EVERY branch that ONLY adds the new flag, - * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the - * flag to check_obd_connect_data(), and updates wiretests accordingly, so it - * can be approved and landed easily to reserve the flag for future use. - */ - -/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS - * connection. It is a temporary bug fix for Imperative Recovery interop - * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for - * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. - */ -#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS - -#define OCD_HAS_FLAG(ocd, flg) \ - (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg)) - -/* Features required for this version of the client to work with server */ -#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ - OBD_CONNECT_FULL20) - -/* This structure is used for both request and reply. - * - * If we eventually have separate connect data for different types, which we - * almost certainly will, then perhaps we stick a union in here. - */ -struct obd_connect_data { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes */ - __u64 ocd_ibits_known; /* inode bits this client understands */ - __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ - __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ - __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ - __u64 ocd_transno; /* first transno from client to be replayed */ - __u32 ocd_group; /* MDS group on OST */ - __u32 ocd_cksum_types; /* supported checksum algorithms */ - __u32 ocd_max_easize; /* How big LOV EA can be on MDS */ - __u32 ocd_instance; /* instance # of this target */ - __u64 ocd_maxbytes; /* Maximum stripe size in bytes */ - /* Fields after ocd_maxbytes are only accessible by the receiver - * if the corresponding flag in ocd_connect_flags is set. Accessing - * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. - */ - __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */ - __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */ - __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 ocd_connect_flags2; - __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */ - __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */ -}; - -/* XXX README XXX: - * Please DO NOT use any fields here before first ensuring that this same - * field is not in use on some other branch. Please clear any such changes - * with senior engineers before starting to use a new field. Then, submit - * a small patch against EVERY branch that ONLY adds the new field along with - * the matching OBD_CONNECT flag, so that can be approved and landed easily to - * reserve the flag for future use. - */ - -/* - * Supported checksum algorithms. Up to 32 checksum types are supported. - * (32-bit mask stored in obd_connect_data::ocd_cksum_types) - * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new - * algorithm and also the OBD_FL_CKSUM* flags. - */ -enum cksum_type { - OBD_CKSUM_CRC32 = 0x00000001, - OBD_CKSUM_ADLER = 0x00000002, - OBD_CKSUM_CRC32C = 0x00000004, -}; - -/* - * OST requests: OBDO & OBD request records - */ - -/* opcodes */ -enum ost_cmd { - OST_REPLY = 0, /* reply ? */ - OST_GETATTR = 1, - OST_SETATTR = 2, - OST_READ = 3, - OST_WRITE = 4, - OST_CREATE = 5, - OST_DESTROY = 6, - OST_GET_INFO = 7, - OST_CONNECT = 8, - OST_DISCONNECT = 9, - OST_PUNCH = 10, - OST_OPEN = 11, - OST_CLOSE = 12, - OST_STATFS = 13, - OST_SYNC = 16, - OST_SET_INFO = 17, - OST_QUOTACHECK = 18, /* not used since 2.4 */ - OST_QUOTACTL = 19, - OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ - OST_LAST_OPC -}; -#define OST_FIRST_OPC OST_REPLY - -enum obdo_flags { - OBD_FL_INLINEDATA = 0x00000001, - OBD_FL_OBDMDEXISTS = 0x00000002, - OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ - OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ - OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ - OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */ - OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ - OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */ - OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */ - OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */ - OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */ - OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */ - OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */ - OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */ - OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ - OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ - OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ - OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. - * XXX: obsoleted - reserved for old - * clients prior than 2.2 - */ - OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ - OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ - OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */ - OBD_FL_SHORT_IO = 0x00400000, /* short io request */ - - /* Note that while these checksum values are currently separate bits, - * in 2.x we can actually allow all values from 1-31 if we wanted. - */ - OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER | - OBD_FL_CKSUM_CRC32C, - - /* mask for local-only flag, which won't be sent over network */ - OBD_FL_LOCAL_MASK = 0xF0000000, -}; - -/* - * All LOV EA magics should have the same postfix, if some new version - * Lustre instroduces new LOV EA magic, then when down-grade to an old - * Lustre, even though the old version system does not recognizes such - * new magic, it still can distinguish the corrupted cases by checking - * the magic's postfix. - */ -#define LOV_MAGIC_MAGIC 0x0BD0 -#define LOV_MAGIC_MASK 0xFFFF - -#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC) -#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC) -#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC) -#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC) -/* reserved for specifying OSTs */ -#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC) -#define LOV_MAGIC LOV_MAGIC_V1 - -/* - * magic for fully defined striping - * the idea is that we should have different magics for striping "hints" - * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct - * lov_mds_md_v[13]). at the moment the magics are used in wire protocol, - * we can't just change it w/o long way preparation, but we still need a - * mechanism to allow LOD to differentiate hint versus ready striping. - * so, at the moment we do a trick: MDT knows what to expect from request - * depending on the case (replay uses ready striping, non-replay req uses - * hints), so MDT replaces magic with appropriate one and now LOD can - * easily understand what's inside -bzzz - */ -#define LOV_MAGIC_V1_DEF 0x0CD10BD0 -#define LOV_MAGIC_V3_DEF 0x0CD30BD0 - -#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK) -#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK) - -#define lov_ost_data lov_ost_data_v1 -struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/ - struct ost_id l_ost_oi; /* OST object ID */ - __u32 l_ost_gen; /* generation of this l_ost_idx */ - __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */ -}; - -#define lov_mds_md lov_mds_md_v1 -struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ - __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - struct ost_id lmm_oi; /* LOV object ID */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - /* lmm_stripe_count used to be __u32 */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - __u16 lmm_layout_gen; /* layout generation number */ - struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ -}; - -#define MAX_MD_SIZE \ - (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) -#define MIN_MD_SIZE \ - (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data)) - -#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access" -#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default" -#define XATTR_USER_PREFIX "user." -#define XATTR_TRUSTED_PREFIX "trusted." -#define XATTR_SECURITY_PREFIX "security." -#define XATTR_LUSTRE_PREFIX "lustre." - -#define XATTR_NAME_LOV "trusted.lov" -#define XATTR_NAME_LMA "trusted.lma" -#define XATTR_NAME_LMV "trusted.lmv" -#define XATTR_NAME_DEFAULT_LMV "trusted.dmv" -#define XATTR_NAME_LINK "trusted.link" -#define XATTR_NAME_FID "trusted.fid" -#define XATTR_NAME_VERSION "trusted.version" -#define XATTR_NAME_SOM "trusted.som" -#define XATTR_NAME_HSM "trusted.hsm" -#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace" - -struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ - __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - struct ost_id lmm_oi; /* LOV object ID */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - /* lmm_stripe_count used to be __u32 */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - __u16 lmm_layout_gen; /* layout generation number */ - char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */ - struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ -}; - -static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) -{ - if (lmm_magic == LOV_MAGIC_V3) - return sizeof(struct lov_mds_md_v3) + - stripes * sizeof(struct lov_ost_data_v1); - else - return sizeof(struct lov_mds_md_v1) + - stripes * sizeof(struct lov_ost_data_v1); -} - -static inline __u32 -lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) -{ - switch (lmm_magic) { - case LOV_MAGIC_V1: { - struct lov_mds_md_v1 lmm; - - if (buf_size < sizeof(lmm)) - return 0; - - return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); - } - case LOV_MAGIC_V3: { - struct lov_mds_md_v3 lmm; - - if (buf_size < sizeof(lmm)) - return 0; - - return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); - } - default: - return 0; - } -} - -#define OBD_MD_FLID (0x00000001ULL) /* object ID */ -#define OBD_MD_FLATIME (0x00000002ULL) /* access time */ -#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */ -#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */ -#define OBD_MD_FLSIZE (0x00000010ULL) /* size */ -#define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */ -#define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */ -#define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */ -#define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */ -#define OBD_MD_FLUID (0x00000200ULL) /* user ID */ -#define OBD_MD_FLGID (0x00000400ULL) /* group ID */ -#define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */ -#define OBD_MD_FLNLINK (0x00002000ULL) /* link count */ -#define OBD_MD_FLGENER (0x00004000ULL) /* generation number */ -/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */ -#define OBD_MD_FLRDEV (0x00010000ULL) /* device number */ -#define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */ -#define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */ -#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */ -#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ -#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ -/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */ -/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */ -#define OBD_MD_FLGROUP (0x01000000ULL) /* group */ -#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ -#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ - /* ->mds if epoch opens or closes - */ -#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */ -#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */ -#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */ -#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */ -#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */ - -#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ -#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ -#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ -#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ - -#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ -#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ -#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */ -#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ -/* OBD_MD_FLRMTPERM (0x0000010000000000ULL) remote perm, obsolete */ -#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */ -#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */ -#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */ -#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */ -#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes - * under lock; for xattr - * requests means the - * client holds the lock - */ -#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ - -/* OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */ -/* OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */ -/* OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */ -/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */ - -#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ -#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent - * executed - */ - -#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */ - -#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \ - OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \ - OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \ - OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \ - OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP) - -#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) - -/* don't forget obdo_fid which is way down at the bottom so it can - * come after the definition of llog_cookie - */ - -enum hss_valid { - HSS_SETMASK = 0x01, - HSS_CLEARMASK = 0x02, - HSS_ARCHIVE_ID = 0x04, -}; - -struct hsm_state_set { - __u32 hss_valid; - __u32 hss_archive_id; - __u64 hss_setmask; - __u64 hss_clearmask; -}; - -/* ost_body.data values for OST_BRW */ - -#define OBD_BRW_READ 0x01 -#define OBD_BRW_WRITE 0x02 -#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous - * transfer and is not accounted in - * the grant. - */ -#define OBD_BRW_CHECK 0x10 -#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ -#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ -#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ -#define OBD_BRW_NOQUOTA 0x100 -#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ -#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ -#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ -#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ -#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ -#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server - * that the client is running low on - * space for unstable pages; asking - * it to sync quickly - */ - -#define OBD_OBJECT_EOF LUSTRE_EOF - -#define OST_MIN_PRECREATE 32 -#define OST_MAX_PRECREATE 20000 - -struct obd_ioobj { - struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ - __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, - * now (PTLRPC_BULK_OPS_COUNT - 1) in - * high 16 bits in 2.4 and later - */ - __u32 ioo_bufcnt; /* number of niobufs for this object */ -}; - -/* - * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in - * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS. - * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits. - */ -#define IOOBJ_MAX_BRW_BITS 16 -#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1) -#define ioobj_max_brw_set(ioo, num) \ -do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0) - -/* multiple of 8 bytes => can array */ -struct niobuf_remote { - __u64 rnb_offset; - __u32 rnb_len; - __u32 rnb_flags; -}; - -/* lock value block communicated between the filter and llite */ - -/* OST_LVB_ERR_INIT is needed because the return code in rc is - * negative, i.e. because ((MASK + rc) & MASK) != MASK. - */ -#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL -#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL -#define OST_LVB_IS_ERR(blocks) \ - ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK) -#define OST_LVB_SET_ERR(blocks, rc) \ - do { blocks = OST_LVB_ERR_INIT + rc; } while (0) -#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT) - -struct ost_lvb_v1 { - __u64 lvb_size; - __s64 lvb_mtime; - __s64 lvb_atime; - __s64 lvb_ctime; - __u64 lvb_blocks; -}; - -struct ost_lvb { - __u64 lvb_size; - __s64 lvb_mtime; - __s64 lvb_atime; - __s64 lvb_ctime; - __u64 lvb_blocks; - __u32 lvb_mtime_ns; - __u32 lvb_atime_ns; - __u32 lvb_ctime_ns; - __u32 lvb_padding; -}; - -/* - * lquota data structures - */ - -/* The lquota_id structure is a union of all the possible identifier types that - * can be used with quota, this includes: - * - 64-bit user ID - * - 64-bit group ID - * - a FID which can be used for per-directory quota in the future - */ -union lquota_id { - struct lu_fid qid_fid; /* FID for per-directory quota */ - __u64 qid_uid; /* user identifier */ - __u64 qid_gid; /* group identifier */ -}; - -/* quotactl management */ -struct obd_quotactl { - __u32 qc_cmd; - __u32 qc_type; /* see Q_* flag below */ - __u32 qc_id; - __u32 qc_stat; - struct obd_dqinfo qc_dqinfo; - struct obd_dqblk qc_dqblk; -}; - -#define Q_COPY(out, in, member) (out)->member = (in)->member - -#define QCTL_COPY(out, in) \ -do { \ - Q_COPY(out, in, qc_cmd); \ - Q_COPY(out, in, qc_type); \ - Q_COPY(out, in, qc_id); \ - Q_COPY(out, in, qc_stat); \ - Q_COPY(out, in, qc_dqinfo); \ - Q_COPY(out, in, qc_dqblk); \ -} while (0) - -/* Data structures associated with the quota locks */ - -/* Glimpse descriptor used for the index & per-ID quota locks */ -struct ldlm_gl_lquota_desc { - union lquota_id gl_id; /* quota ID subject to the glimpse */ - __u64 gl_flags; /* see LQUOTA_FL* below */ - __u64 gl_ver; /* new index version */ - __u64 gl_hardlimit; /* new hardlimit or qunit value */ - __u64 gl_softlimit; /* new softlimit */ - __u64 gl_time; - __u64 gl_pad2; -}; - -/* quota glimpse flags */ -#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */ - -/* LVB used with quota (global and per-ID) locks */ -struct lquota_lvb { - __u64 lvb_flags; /* see LQUOTA_FL* above */ - __u64 lvb_id_may_rel; /* space that might be released later */ - __u64 lvb_id_rel; /* space released by the slave for this ID */ - __u64 lvb_id_qunit; /* current qunit value */ - __u64 lvb_pad1; -}; - -/* op codes */ -enum quota_cmd { - QUOTA_DQACQ = 601, - QUOTA_DQREL = 602, - QUOTA_LAST_OPC -}; -#define QUOTA_FIRST_OPC QUOTA_DQACQ - -/* - * MDS REQ RECORDS - */ - -/* opcodes */ -enum mds_cmd { - MDS_GETATTR = 33, - MDS_GETATTR_NAME = 34, - MDS_CLOSE = 35, - MDS_REINT = 36, - MDS_READPAGE = 37, - MDS_CONNECT = 38, - MDS_DISCONNECT = 39, - MDS_GETSTATUS = 40, - MDS_STATFS = 41, - MDS_PIN = 42, /* obsolete, never used in a release */ - MDS_UNPIN = 43, /* obsolete, never used in a release */ - MDS_SYNC = 44, - MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */ - MDS_SET_INFO = 46, - MDS_QUOTACHECK = 47, /* not used since 2.4 */ - MDS_QUOTACTL = 48, - MDS_GETXATTR = 49, - MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ - MDS_WRITEPAGE = 51, - MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */ - MDS_GET_INFO = 53, - MDS_HSM_STATE_GET = 54, - MDS_HSM_STATE_SET = 55, - MDS_HSM_ACTION = 56, - MDS_HSM_PROGRESS = 57, - MDS_HSM_REQUEST = 58, - MDS_HSM_CT_REGISTER = 59, - MDS_HSM_CT_UNREGISTER = 60, - MDS_SWAP_LAYOUTS = 61, - MDS_LAST_OPC -}; - -#define MDS_FIRST_OPC MDS_GETATTR - -/* - * Do not exceed 63 - */ - -enum mdt_reint_cmd { - REINT_SETATTR = 1, - REINT_CREATE = 2, - REINT_LINK = 3, - REINT_UNLINK = 4, - REINT_RENAME = 5, - REINT_OPEN = 6, - REINT_SETXATTR = 7, - REINT_RMENTRY = 8, - REINT_MIGRATE = 9, - REINT_MAX -}; - -/* the disposition of the intent outlines what was executed */ -#define DISP_IT_EXECD 0x00000001 -#define DISP_LOOKUP_EXECD 0x00000002 -#define DISP_LOOKUP_NEG 0x00000004 -#define DISP_LOOKUP_POS 0x00000008 -#define DISP_OPEN_CREATE 0x00000010 -#define DISP_OPEN_OPEN 0x00000020 -#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */ -#define DISP_ENQ_OPEN_REF 0x00800000 -#define DISP_ENQ_CREATE_REF 0x01000000 -#define DISP_OPEN_LOCK 0x02000000 -#define DISP_OPEN_LEASE 0x04000000 -#define DISP_OPEN_STRIPE 0x08000000 -#define DISP_OPEN_DENY 0x10000000 - -/* INODE LOCK PARTS */ -#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also - * was used to protect permission (mode, - * owner, group etc) before 2.4. - */ -#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ -#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ -#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ - -/* The PERM bit is added int 2.4, and it is used to protect permission(mode, - * owner, group, acl etc), so to separate the permission from LOOKUP lock. - * Because for remote directories(in DNE), these locks will be granted by - * different MDTs(different ldlm namespace). - * - * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. - * For Remote directory, the master MDT, where the remote directory is, will - * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, - * will grant LOOKUP_LOCK. - */ -#define MDS_INODELOCK_PERM 0x000010 -#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ - -#define MDS_INODELOCK_MAXSHIFT 5 -/* This FULL lock is useful to take on unlink sort of operations */ -#define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1) - -/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], - * but was moved into name[1] along with the OID to avoid consuming the - * name[2,3] fields that need to be used for the quota id (also a FID). - */ -enum { - LUSTRE_RES_ID_SEQ_OFF = 0, - LUSTRE_RES_ID_VER_OID_OFF = 1, - LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */ - LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2, - LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3, - LUSTRE_RES_ID_HSH_OFF = 3 -}; - -#define MDS_STATUS_CONN 1 -#define MDS_STATUS_LOV 2 - -/* these should be identical to their EXT4_*_FL counterparts, they are - * redefined here only to avoid dragging in fs/ext4/ext4.h - */ -#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ -#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ -#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ -#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */ -#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */ -#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */ -#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */ -#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ -#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */ -#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ - -/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values - * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire - * protocol equivalents of LDISKFS_*_FL values stored on disk, while - * the S_* flags are kernel-internal values that change between kernel - * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS. - * See b=16526 for a full history. - */ -static inline int ll_ext_to_inode_flags(int flags) -{ - return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) | - ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) | - ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) | - ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) | - ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0)); -} - -static inline int ll_inode_to_ext_flags(int iflags) -{ - return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) | - ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) | - ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) | - ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) | - ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0)); -} - -/* 64 possible states */ -enum md_transient_state { - MS_RESTORE = (1 << 0), /* restore is running */ -}; - -struct mdt_body { - struct lu_fid mbo_fid1; - struct lu_fid mbo_fid2; - struct lustre_handle mbo_handle; - __u64 mbo_valid; - __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */ - __s64 mbo_mtime; - __s64 mbo_atime; - __s64 mbo_ctime; - __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */ - __u64 mbo_ioepoch; - __u64 mbo_t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 - */ - __u32 mbo_fsuid; - __u32 mbo_fsgid; - __u32 mbo_capability; - __u32 mbo_mode; - __u32 mbo_uid; - __u32 mbo_gid; - __u32 mbo_flags; /* LUSTRE_*_FL file attributes */ - __u32 mbo_rdev; - __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 mbo_unused2; /* was "generation" until 2.4.0 */ - __u32 mbo_suppgid; - __u32 mbo_eadatasize; - __u32 mbo_aclsize; - __u32 mbo_max_mdsize; - __u32 mbo_unused3; /* was max_cookiesize until 2.8 */ - __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */ - __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */ - __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */ - __u64 mbo_padding_6; - __u64 mbo_padding_7; - __u64 mbo_padding_8; - __u64 mbo_padding_9; - __u64 mbo_padding_10; -}; /* 216 */ - -struct mdt_ioepoch { - struct lustre_handle mio_handle; - __u64 mio_unused1; /* was ioepoch */ - __u32 mio_unused2; /* was flags */ - __u32 mio_padding; -}; - -/* permissions for md_perm.mp_perm */ -enum { - CFS_SETUID_PERM = 0x01, - CFS_SETGID_PERM = 0x02, - CFS_SETGRP_PERM = 0x04, -}; - -struct mdt_rec_setattr { - __u32 sa_opcode; - __u32 sa_cap; - __u32 sa_fsuid; - __u32 sa_fsuid_h; - __u32 sa_fsgid; - __u32 sa_fsgid_h; - __u32 sa_suppgid; - __u32 sa_suppgid_h; - __u32 sa_padding_1; - __u32 sa_padding_1_h; - struct lu_fid sa_fid; - __u64 sa_valid; - __u32 sa_uid; - __u32 sa_gid; - __u64 sa_size; - __u64 sa_blocks; - __s64 sa_mtime; - __s64 sa_atime; - __s64 sa_ctime; - __u32 sa_attr_flags; - __u32 sa_mode; - __u32 sa_bias; /* some operation flags */ - __u32 sa_padding_3; - __u32 sa_padding_4; - __u32 sa_padding_5; -}; - -/* - * Attribute flags used in mdt_rec_setattr::sa_valid. - * The kernel's #defines for ATTR_* should not be used over the network - * since the client and MDS may run different kernels (see bug 13828) - * Therefore, we should only use MDS_ATTR_* attributes for sa_valid. - */ -#define MDS_ATTR_MODE 0x1ULL /* = 1 */ -#define MDS_ATTR_UID 0x2ULL /* = 2 */ -#define MDS_ATTR_GID 0x4ULL /* = 4 */ -#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ -#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ -#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ -#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ -#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */ -#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */ -#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */ -#define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */ -#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */ -#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */ -#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */ -#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, - * ie O_TRUNC - */ -#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */ - -#define MDS_FMODE_CLOSED 00000000 -#define MDS_FMODE_EXEC 00000004 -/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */ -/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */ -/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */ - -#define MDS_OPEN_CREATED 00000010 -#define MDS_OPEN_CROSS 00000020 - -#define MDS_OPEN_CREAT 00000100 -#define MDS_OPEN_EXCL 00000200 -#define MDS_OPEN_TRUNC 00001000 -#define MDS_OPEN_APPEND 00002000 -#define MDS_OPEN_SYNC 00010000 -#define MDS_OPEN_DIRECTORY 00200000 - -#define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */ -#define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */ -#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */ -#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file. - * We do not support JOIN FILE - * anymore, reserve this flags - * just for preventing such bit - * to be reused. - */ - -#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ -#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ -#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ -#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ -#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or - * hsm restore) - */ -#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created - * unlinked - */ -#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease - * delegation, succeed if it's not - * being opened with conflict mode. - */ -#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */ - -#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \ - MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \ - MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \ - MDS_OPEN_RELEASE) - -enum mds_op_bias { - MDS_CHECK_SPLIT = 1 << 0, - MDS_CROSS_REF = 1 << 1, - MDS_VTX_BYPASS = 1 << 2, - MDS_PERM_BYPASS = 1 << 3, -/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */ - MDS_QUOTA_IGNORE = 1 << 5, - MDS_CLOSE_CLEANUP = 1 << 6, - MDS_KEEP_ORPHAN = 1 << 7, - MDS_RECOV_OPEN = 1 << 8, - MDS_DATA_MODIFIED = 1 << 9, - MDS_CREATE_VOLATILE = 1 << 10, - MDS_OWNEROVERRIDE = 1 << 11, - MDS_HSM_RELEASE = 1 << 12, - MDS_RENAME_MIGRATE = 1 << 13, - MDS_CLOSE_LAYOUT_SWAP = 1 << 14, -}; - -/* instance of mdt_reint_rec */ -struct mdt_rec_create { - __u32 cr_opcode; - __u32 cr_cap; - __u32 cr_fsuid; - __u32 cr_fsuid_h; - __u32 cr_fsgid; - __u32 cr_fsgid_h; - __u32 cr_suppgid1; - __u32 cr_suppgid1_h; - __u32 cr_suppgid2; - __u32 cr_suppgid2_h; - struct lu_fid cr_fid1; - struct lu_fid cr_fid2; - struct lustre_handle cr_old_handle; /* handle in case of open replay */ - __s64 cr_time; - __u64 cr_rdev; - __u64 cr_ioepoch; - __u64 cr_padding_1; /* rr_blocks */ - __u32 cr_mode; - __u32 cr_bias; - /* use of helpers set/get_mrc_cr_flags() is needed to access - * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to - * extend cr_flags size without breaking 1.8 compat - */ - __u32 cr_flags_l; /* for use with open, low 32 bits */ - __u32 cr_flags_h; /* for use with open, high 32 bits */ - __u32 cr_umask; /* umask for create */ - __u32 cr_padding_4; /* rr_padding_4 */ -}; - -/* instance of mdt_reint_rec */ -struct mdt_rec_link { - __u32 lk_opcode; - __u32 lk_cap; - __u32 lk_fsuid; - __u32 lk_fsuid_h; - __u32 lk_fsgid; - __u32 lk_fsgid_h; - __u32 lk_suppgid1; - __u32 lk_suppgid1_h; - __u32 lk_suppgid2; - __u32 lk_suppgid2_h; - struct lu_fid lk_fid1; - struct lu_fid lk_fid2; - __s64 lk_time; - __u64 lk_padding_1; /* rr_atime */ - __u64 lk_padding_2; /* rr_ctime */ - __u64 lk_padding_3; /* rr_size */ - __u64 lk_padding_4; /* rr_blocks */ - __u32 lk_bias; - __u32 lk_padding_5; /* rr_mode */ - __u32 lk_padding_6; /* rr_flags */ - __u32 lk_padding_7; /* rr_padding_2 */ - __u32 lk_padding_8; /* rr_padding_3 */ - __u32 lk_padding_9; /* rr_padding_4 */ -}; - -/* instance of mdt_reint_rec */ -struct mdt_rec_unlink { - __u32 ul_opcode; - __u32 ul_cap; - __u32 ul_fsuid; - __u32 ul_fsuid_h; - __u32 ul_fsgid; - __u32 ul_fsgid_h; - __u32 ul_suppgid1; - __u32 ul_suppgid1_h; - __u32 ul_suppgid2; - __u32 ul_suppgid2_h; - struct lu_fid ul_fid1; - struct lu_fid ul_fid2; - __s64 ul_time; - __u64 ul_padding_2; /* rr_atime */ - __u64 ul_padding_3; /* rr_ctime */ - __u64 ul_padding_4; /* rr_size */ - __u64 ul_padding_5; /* rr_blocks */ - __u32 ul_bias; - __u32 ul_mode; - __u32 ul_padding_6; /* rr_flags */ - __u32 ul_padding_7; /* rr_padding_2 */ - __u32 ul_padding_8; /* rr_padding_3 */ - __u32 ul_padding_9; /* rr_padding_4 */ -}; - -/* instance of mdt_reint_rec */ -struct mdt_rec_rename { - __u32 rn_opcode; - __u32 rn_cap; - __u32 rn_fsuid; - __u32 rn_fsuid_h; - __u32 rn_fsgid; - __u32 rn_fsgid_h; - __u32 rn_suppgid1; - __u32 rn_suppgid1_h; - __u32 rn_suppgid2; - __u32 rn_suppgid2_h; - struct lu_fid rn_fid1; - struct lu_fid rn_fid2; - __s64 rn_time; - __u64 rn_padding_1; /* rr_atime */ - __u64 rn_padding_2; /* rr_ctime */ - __u64 rn_padding_3; /* rr_size */ - __u64 rn_padding_4; /* rr_blocks */ - __u32 rn_bias; /* some operation flags */ - __u32 rn_mode; /* cross-ref rename has mode */ - __u32 rn_padding_5; /* rr_flags */ - __u32 rn_padding_6; /* rr_padding_2 */ - __u32 rn_padding_7; /* rr_padding_3 */ - __u32 rn_padding_8; /* rr_padding_4 */ -}; - -/* instance of mdt_reint_rec */ -struct mdt_rec_setxattr { - __u32 sx_opcode; - __u32 sx_cap; - __u32 sx_fsuid; - __u32 sx_fsuid_h; - __u32 sx_fsgid; - __u32 sx_fsgid_h; - __u32 sx_suppgid1; - __u32 sx_suppgid1_h; - __u32 sx_suppgid2; - __u32 sx_suppgid2_h; - struct lu_fid sx_fid; - __u64 sx_padding_1; /* These three are rr_fid2 */ - __u32 sx_padding_2; - __u32 sx_padding_3; - __u64 sx_valid; - __s64 sx_time; - __u64 sx_padding_5; /* rr_ctime */ - __u64 sx_padding_6; /* rr_size */ - __u64 sx_padding_7; /* rr_blocks */ - __u32 sx_size; - __u32 sx_flags; - __u32 sx_padding_8; /* rr_flags */ - __u32 sx_padding_9; /* rr_padding_2 */ - __u32 sx_padding_10; /* rr_padding_3 */ - __u32 sx_padding_11; /* rr_padding_4 */ -}; - -/* - * mdt_rec_reint is the template for all mdt_reint_xxx structures. - * Do NOT change the size of various members, otherwise the value - * will be broken in lustre_swab_mdt_rec_reint(). - * - * If you add new members in other mdt_reint_xxx structures and need to use the - * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also. - */ -struct mdt_rec_reint { - __u32 rr_opcode; - __u32 rr_cap; - __u32 rr_fsuid; - __u32 rr_fsuid_h; - __u32 rr_fsgid; - __u32 rr_fsgid_h; - __u32 rr_suppgid1; - __u32 rr_suppgid1_h; - __u32 rr_suppgid2; - __u32 rr_suppgid2_h; - struct lu_fid rr_fid1; - struct lu_fid rr_fid2; - __s64 rr_mtime; - __s64 rr_atime; - __s64 rr_ctime; - __u64 rr_size; - __u64 rr_blocks; - __u32 rr_bias; - __u32 rr_mode; - __u32 rr_flags; - __u32 rr_flags_h; - __u32 rr_umask; - __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ -}; - -/* lmv structures */ -struct lmv_desc { - __u32 ld_tgt_count; /* how many MDS's */ - __u32 ld_active_tgt_count; /* how many active */ - __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default hash pattern */ - __u64 ld_default_hash_size; - __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */ - __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */ - __u32 ld_qos_maxage; /* in second */ - __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */ - __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */ - struct obd_uuid ld_uuid; -}; - -/* LMV layout EA, and it will be stored both in master and slave object */ -struct lmv_mds_md_v1 { - __u32 lmv_magic; - __u32 lmv_stripe_count; - __u32 lmv_master_mdt_index; /* On master object, it is master - * MDT index, on slave object, it - * is stripe index of the slave obj - */ - __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate - * which hash function to be used, - * Note: only lower 16 bits is being - * used for now. Higher 16 bits will - * be used to mark the object status, - * for example migrating or dead. - */ - __u32 lmv_layout_version; /* Used for directory restriping */ - __u32 lmv_padding1; - __u64 lmv_padding2; - __u64 lmv_padding3; - char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */ - struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */ -}; - -#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */ -#define LMV_MAGIC LMV_MAGIC_V1 - -/* #define LMV_USER_MAGIC 0x0CD30CD0 */ -#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */ - -/* - *Right now only the lower part(0-16bits) of lmv_hash_type is being used, - * and the higher part will be the flag to indicate the status of object, - * for example the object is being migrated. And the hash function - * might be interpreted differently with different flags. - */ -#define LMV_HASH_TYPE_MASK 0x0000ffff - -#define LMV_HASH_FLAG_MIGRATION 0x80000000 -#define LMV_HASH_FLAG_DEAD 0x40000000 - -/** - * The FNV-1a hash algorithm is as follows: - * hash = FNV_offset_basis - * for each octet_of_data to be hashed - * hash = hash XOR octet_of_data - * hash = hash × FNV_prime - * return hash - * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash - * - * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source - * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL - **/ -#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL -#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL -static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size) -{ - __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS; - const unsigned char *p = buf; - size_t i; - - for (i = 0; i < size; i++) { - hash ^= p[i]; - hash *= LUSTRE_FNV_1A_64_PRIME; - } - - return hash; -} - -union lmv_mds_md { - __u32 lmv_magic; - struct lmv_mds_md_v1 lmv_md_v1; - struct lmv_user_md lmv_user_md; -}; - -static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) -{ - ssize_t len = -EINVAL; - - switch (lmm_magic) { - case LMV_MAGIC_V1: { - struct lmv_mds_md_v1 *lmm1; - - len = sizeof(*lmm1); - len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]); - break; } - default: - break; - } - return len; -} - -static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm) -{ - switch (__le32_to_cpu(lmm->lmv_magic)) { - case LMV_MAGIC_V1: - return __le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count); - case LMV_USER_MAGIC: - return __le32_to_cpu(lmm->lmv_user_md.lum_stripe_count); - default: - return -EINVAL; - } -} - -enum fld_rpc_opc { - FLD_QUERY = 900, - FLD_READ = 901, - FLD_LAST_OPC, - FLD_FIRST_OPC = FLD_QUERY -}; - -enum seq_rpc_opc { - SEQ_QUERY = 700, - SEQ_LAST_OPC, - SEQ_FIRST_OPC = SEQ_QUERY -}; - -enum seq_op { - SEQ_ALLOC_SUPER = 0, - SEQ_ALLOC_META = 1 -}; - -enum fld_op { - FLD_CREATE = 0, - FLD_DELETE = 1, - FLD_LOOKUP = 2, -}; - -/* - * LOV data structures - */ - -#define LOV_MAX_UUID_BUFFER_SIZE 8192 -/* The size of the buffer the lov/mdc reserves for the - * array of UUIDs returned by the MDS. With the current - * protocol, this will limit the max number of OSTs per LOV - */ - -#define LOV_DESC_MAGIC 0xB0CCDE5C -#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ -#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS) - -/* LOV settings descriptor (should only contain static info) */ -struct lov_desc { - __u32 ld_tgt_count; /* how many OBD's */ - __u32 ld_active_tgt_count; /* how many active */ - __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default PATTERN_RAID0 */ - __u64 ld_default_stripe_size; /* in bytes */ - __u64 ld_default_stripe_offset; /* in bytes */ - __u32 ld_padding_0; /* unused */ - __u32 ld_qos_maxage; /* in second */ - __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */ - __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */ - struct obd_uuid ld_uuid; -}; - -#define ld_magic ld_active_tgt_count /* for swabbing from llogs */ - -/* - * LDLM requests: - */ -/* opcodes -- MUST be distinct from OST/MDS opcodes */ -enum ldlm_cmd { - LDLM_ENQUEUE = 101, - LDLM_CONVERT = 102, - LDLM_CANCEL = 103, - LDLM_BL_CALLBACK = 104, - LDLM_CP_CALLBACK = 105, - LDLM_GL_CALLBACK = 106, - LDLM_SET_INFO = 107, - LDLM_LAST_OPC -}; -#define LDLM_FIRST_OPC LDLM_ENQUEUE - -#define RES_NAME_SIZE 4 -struct ldlm_res_id { - __u64 name[RES_NAME_SIZE]; -}; - -#define DLDLMRES "[%#llx:%#llx:%#llx].%llx" -#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \ - (res)->lr_name.name[2], (res)->lr_name.name[3] - -/* lock types */ -enum ldlm_mode { - LCK_MINMODE = 0, - LCK_EX = 1, - LCK_PW = 2, - LCK_PR = 4, - LCK_CW = 8, - LCK_CR = 16, - LCK_NL = 32, - LCK_GROUP = 64, - LCK_COS = 128, - LCK_MAXMODE -}; - -#define LCK_MODE_NUM 8 - -enum ldlm_type { - LDLM_PLAIN = 10, - LDLM_EXTENT = 11, - LDLM_FLOCK = 12, - LDLM_IBITS = 13, - LDLM_MAX_TYPE -}; - -#define LDLM_MIN_TYPE LDLM_PLAIN - -struct ldlm_extent { - __u64 start; - __u64 end; - __u64 gid; -}; - -struct ldlm_inodebits { - __u64 bits; -}; - -struct ldlm_flock_wire { - __u64 lfw_start; - __u64 lfw_end; - __u64 lfw_owner; - __u32 lfw_padding; - __u32 lfw_pid; -}; - -/* it's important that the fields of the ldlm_extent structure match - * the first fields of the ldlm_flock structure because there is only - * one ldlm_swab routine to process the ldlm_policy_data_t union. if - * this ever changes we will need to swab the union differently based - * on the resource type. - */ - -union ldlm_wire_policy_data { - struct ldlm_extent l_extent; - struct ldlm_flock_wire l_flock; - struct ldlm_inodebits l_inodebits; -}; - -union ldlm_gl_desc { - struct ldlm_gl_lquota_desc lquota_desc; -}; - -enum ldlm_intent_flags { - IT_OPEN = 0x00000001, - IT_CREAT = 0x00000002, - IT_OPEN_CREAT = 0x00000003, - IT_READDIR = 0x00000004, - IT_GETATTR = 0x00000008, - IT_LOOKUP = 0x00000010, - IT_UNLINK = 0x00000020, - IT_TRUNC = 0x00000040, - IT_GETXATTR = 0x00000080, - IT_EXEC = 0x00000100, - IT_PIN = 0x00000200, - IT_LAYOUT = 0x00000400, - IT_QUOTA_DQACQ = 0x00000800, - IT_QUOTA_CONN = 0x00001000, - IT_SETXATTR = 0x00002000, -}; - -struct ldlm_intent { - __u64 opc; -}; - -struct ldlm_resource_desc { - enum ldlm_type lr_type; - __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ - struct ldlm_res_id lr_name; -}; - -struct ldlm_lock_desc { - struct ldlm_resource_desc l_resource; - enum ldlm_mode l_req_mode; - enum ldlm_mode l_granted_mode; - union ldlm_wire_policy_data l_policy_data; -}; - -#define LDLM_LOCKREQ_HANDLES 2 -#define LDLM_ENQUEUE_CANCEL_OFF 1 - -struct ldlm_request { - __u32 lock_flags; - __u32 lock_count; - struct ldlm_lock_desc lock_desc; - struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES]; -}; - -struct ldlm_reply { - __u32 lock_flags; - __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */ - struct ldlm_lock_desc lock_desc; - struct lustre_handle lock_handle; - __u64 lock_policy_res1; - __u64 lock_policy_res2; -}; - -#define ldlm_flags_to_wire(flags) ((__u32)(flags)) -#define ldlm_flags_from_wire(flags) ((__u64)(flags)) - -/* - * Opcodes for mountconf (mgs and mgc) - */ -enum mgs_cmd { - MGS_CONNECT = 250, - MGS_DISCONNECT, - MGS_EXCEPTION, /* node died, etc. */ - MGS_TARGET_REG, /* whenever target starts up */ - MGS_TARGET_DEL, - MGS_SET_INFO, - MGS_CONFIG_READ, - MGS_LAST_OPC -}; -#define MGS_FIRST_OPC MGS_CONNECT - -#define MGS_PARAM_MAXLEN 1024 -#define KEY_SET_INFO "set_info" - -struct mgs_send_param { - char mgs_param[MGS_PARAM_MAXLEN]; -}; - -/* We pass this info to the MGS so it can write config logs */ -#define MTI_NAME_MAXLEN 64 -#define MTI_PARAM_MAXLEN 4096 -#define MTI_NIDS_MAX 32 -struct mgs_target_info { - __u32 mti_lustre_ver; - __u32 mti_stripe_index; - __u32 mti_config_ver; - __u32 mti_flags; - __u32 mti_nid_count; - __u32 mti_instance; /* Running instance of target */ - char mti_fsname[MTI_NAME_MAXLEN]; - char mti_svname[MTI_NAME_MAXLEN]; - char mti_uuid[sizeof(struct obd_uuid)]; - __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/ - char mti_params[MTI_PARAM_MAXLEN]; -}; - -struct mgs_nidtbl_entry { - __u64 mne_version; /* table version of this entry */ - __u32 mne_instance; /* target instance # */ - __u32 mne_index; /* target index */ - __u32 mne_length; /* length of this entry - by bytes */ - __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */ - __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */ - __u8 mne_nid_size; /* size of each NID, by bytes */ - __u8 mne_nid_count; /* # of NIDs in buffer */ - union { - lnet_nid_t nids[0]; /* variable size buffer for NIDs. */ - } u; -}; - -struct mgs_config_body { - char mcb_name[MTI_NAME_MAXLEN]; /* logname */ - __u64 mcb_offset; /* next index of config log to request */ - __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */ - __u8 mcb_reserved; - __u8 mcb_bits; /* bits unit size of config log */ - __u32 mcb_units; /* # of units for bulk transfer */ -}; - -struct mgs_config_res { - __u64 mcr_offset; /* index of last config log */ - __u64 mcr_size; /* size of the log */ -}; - -/* Config marker flags (in config log) */ -#define CM_START 0x01 -#define CM_END 0x02 -#define CM_SKIP 0x04 -#define CM_UPGRADE146 0x08 -#define CM_EXCLUDE 0x10 -#define CM_START_SKIP (CM_START | CM_SKIP) - -struct cfg_marker { - __u32 cm_step; /* aka config version */ - __u32 cm_flags; - __u32 cm_vers; /* lustre release version number */ - __u32 cm_padding; /* 64 bit align */ - __s64 cm_createtime; /*when this record was first created */ - __s64 cm_canceltime; /*when this record is no longer valid*/ - char cm_tgtname[MTI_NAME_MAXLEN]; - char cm_comment[MTI_NAME_MAXLEN]; -}; - -/* - * Opcodes for multiple servers. - */ - -enum obd_cmd { - OBD_PING = 400, - OBD_LOG_CANCEL, - OBD_QC_CALLBACK, /* not used since 2.4 */ - OBD_IDX_READ, - OBD_LAST_OPC -}; -#define OBD_FIRST_OPC OBD_PING - -/** - * llog contexts indices. - * - * There is compatibility problem with indexes below, they are not - * continuous and must keep their numbers for compatibility needs. - * See LU-5218 for details. - */ -enum llog_ctxt_id { - LLOG_CONFIG_ORIG_CTXT = 0, - LLOG_CONFIG_REPL_CTXT = 1, - LLOG_MDS_OST_ORIG_CTXT = 2, - LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */ - LLOG_SIZE_ORIG_CTXT = 4, - LLOG_SIZE_REPL_CTXT = 5, - LLOG_TEST_ORIG_CTXT = 8, - LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */ - LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */ - LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */ - /* for multiple changelog consumers */ - LLOG_CHANGELOG_USER_ORIG_CTXT = 14, - LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */ - LLOG_MAX_CTXTS -}; - -/** Identifier for a single log object */ -struct llog_logid { - struct ost_id lgl_oi; - __u32 lgl_ogen; -} __packed; - -/** Records written to the CATALOGS list */ -#define CATLIST "CATALOGS" -struct llog_catid { - struct llog_logid lci_logid; - __u32 lci_padding1; - __u32 lci_padding2; - __u32 lci_padding3; -} __packed; - -/* Log data record types - there is no specific reason that these need to - * be related to the RPC opcodes, but no reason not to (may be handy later?) - */ -#define LLOG_OP_MAGIC 0x10600000 -#define LLOG_OP_MASK 0xfff00000 - -enum llog_op_type { - LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000, - OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00, - /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */ - MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) | - REINT_UNLINK, /* obsolete after 2.5.0 */ - MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) | - REINT_UNLINK, - /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */ - MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) | - REINT_SETATTR, - OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000, - /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */ - LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000, - /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */ - CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000, - CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000, - HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, - LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, - LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, -}; - -#define LLOG_REC_HDR_NEEDS_SWABBING(r) \ - (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC)) - -/** Log record header - stored in little endian order. - * Each record must start with this struct, end with a llog_rec_tail, - * and be a multiple of 256 bits in size. - */ -struct llog_rec_hdr { - __u32 lrh_len; - __u32 lrh_index; - __u32 lrh_type; - __u32 lrh_id; -}; - -struct llog_rec_tail { - __u32 lrt_len; - __u32 lrt_index; -}; - -/* Where data follow just after header */ -#define REC_DATA(ptr) \ - ((void *)((char *)ptr + sizeof(struct llog_rec_hdr))) - -#define REC_DATA_LEN(rec) \ - (rec->lrh_len - sizeof(struct llog_rec_hdr) - \ - sizeof(struct llog_rec_tail)) - -struct llog_logid_rec { - struct llog_rec_hdr lid_hdr; - struct llog_logid lid_id; - __u32 lid_padding1; - __u64 lid_padding2; - __u64 lid_padding3; - struct llog_rec_tail lid_tail; -} __packed; - -struct llog_unlink_rec { - struct llog_rec_hdr lur_hdr; - __u64 lur_oid; - __u32 lur_oseq; - __u32 lur_count; - struct llog_rec_tail lur_tail; -} __packed; - -struct llog_unlink64_rec { - struct llog_rec_hdr lur_hdr; - struct lu_fid lur_fid; - __u32 lur_count; /* to destroy the lost precreated */ - __u32 lur_padding1; - __u64 lur_padding2; - __u64 lur_padding3; - struct llog_rec_tail lur_tail; -} __packed; - -struct llog_setattr64_rec { - struct llog_rec_hdr lsr_hdr; - struct ost_id lsr_oi; - __u32 lsr_uid; - __u32 lsr_uid_h; - __u32 lsr_gid; - __u32 lsr_gid_h; - __u64 lsr_valid; - struct llog_rec_tail lsr_tail; -} __packed; - -struct llog_size_change_rec { - struct llog_rec_hdr lsc_hdr; - struct ll_fid lsc_fid; - __u32 lsc_ioepoch; - __u32 lsc_padding1; - __u64 lsc_padding2; - __u64 lsc_padding3; - struct llog_rec_tail lsc_tail; -} __packed; - -/* changelog llog name, needed by client replicators */ -#define CHANGELOG_CATALOG "changelog_catalog" - -struct changelog_setinfo { - __u64 cs_recno; - __u32 cs_id; -} __packed; - -/** changelog record */ -struct llog_changelog_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_rec cr; /**< Variable length field */ - struct llog_rec_tail cr_do_not_use; /**< for_sizezof_only */ -} __packed; - -struct llog_changelog_user_rec { - struct llog_rec_hdr cur_hdr; - __u32 cur_id; - __u32 cur_padding; - __u64 cur_endrec; - struct llog_rec_tail cur_tail; -} __packed; - -enum agent_req_status { - ARS_WAITING, - ARS_STARTED, - ARS_FAILED, - ARS_CANCELED, - ARS_SUCCEED, -}; - -static inline const char *agent_req_status2name(const enum agent_req_status ars) -{ - switch (ars) { - case ARS_WAITING: - return "WAITING"; - case ARS_STARTED: - return "STARTED"; - case ARS_FAILED: - return "FAILED"; - case ARS_CANCELED: - return "CANCELED"; - case ARS_SUCCEED: - return "SUCCEED"; - default: - return "UNKNOWN"; - } -} - -struct llog_agent_req_rec { - struct llog_rec_hdr arr_hdr; /**< record header */ - __u32 arr_status; /**< status of the request */ - /* must match enum - * agent_req_status - */ - __u32 arr_archive_id; /**< backend archive number */ - __u64 arr_flags; /**< req flags */ - __u64 arr_compound_id;/**< compound cookie */ - __u64 arr_req_create; /**< req. creation time */ - __u64 arr_req_change; /**< req. status change time */ - struct hsm_action_item arr_hai; /**< req. to the agent */ - struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ -} __packed; - -/* Old llog gen for compatibility */ -struct llog_gen { - __u64 mnt_cnt; - __u64 conn_cnt; -} __packed; - -struct llog_gen_rec { - struct llog_rec_hdr lgr_hdr; - struct llog_gen lgr_gen; - __u64 padding1; - __u64 padding2; - __u64 padding3; - struct llog_rec_tail lgr_tail; -}; - -/* flags for the logs */ -enum llog_flag { - LLOG_F_ZAP_WHEN_EMPTY = 0x1, - LLOG_F_IS_CAT = 0x2, - LLOG_F_IS_PLAIN = 0x4, - LLOG_F_EXT_JOBID = 0x8, - LLOG_F_IS_FIXSIZE = 0x10, - - /* - * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from - * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here, - * because the catlog record is usually fixed size, but its plain - * log record can be variable - */ - LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID, -}; - -/* On-disk header structure of each log object, stored in little endian order */ -#define LLOG_MIN_CHUNK_SIZE 8192 -#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) + - * sizeof(llh_tail) - sizeof(llh_bitmap) - */ -#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE) -#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */ - -/* flags for the logs */ -struct llog_log_hdr { - struct llog_rec_hdr llh_hdr; - __s64 llh_timestamp; - __u32 llh_count; - __u32 llh_bitmap_offset; - __u32 llh_size; - __u32 llh_flags; - __u32 llh_cat_idx; - /* for a catalog the first plain slot is next to it */ - struct obd_uuid llh_tgtuuid; - __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23]; - /* These fields must always be at the end of the llog_log_hdr. - * Note: llh_bitmap size is variable because llog chunk size could be - * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192 - * bytes, and the real size is stored in llh_hdr.lrh_len, which means - * llh_tail should only be referred by LLOG_HDR_TAIL(). - * But this structure is also used by client/server llog interface - * (see llog_client.c), it will be kept in its original way to avoid - * compatibility issue. - */ - __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)]; - struct llog_rec_tail llh_tail; -} __packed; - -#undef LLOG_HEADER_SIZE -#undef LLOG_BITMAP_BYTES - -#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ - llh->llh_bitmap_offset - \ - sizeof(llh->llh_tail)) * 8) -#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \ - (llh)->llh_bitmap_offset) -#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \ - llh->llh_hdr.lrh_len - \ - sizeof(llh->llh_tail))) - -/** log cookies are used to reference a specific log file and a record - * therein - */ -struct llog_cookie { - struct llog_logid lgc_lgl; - __u32 lgc_subsys; - __u32 lgc_index; - __u32 lgc_padding; -} __packed; - -/** llog protocol */ -enum llogd_rpc_ops { - LLOG_ORIGIN_HANDLE_CREATE = 501, - LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502, - LLOG_ORIGIN_HANDLE_READ_HEADER = 503, - LLOG_ORIGIN_HANDLE_WRITE_REC = 504, - LLOG_ORIGIN_HANDLE_CLOSE = 505, - LLOG_ORIGIN_CONNECT = 506, - LLOG_CATINFO = 507, /* deprecated */ - LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508, - LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/ - LLOG_LAST_OPC, - LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE -}; - -struct llogd_body { - struct llog_logid lgd_logid; - __u32 lgd_ctxt_idx; - __u32 lgd_llh_flags; - __u32 lgd_index; - __u32 lgd_saved_index; - __u32 lgd_len; - __u64 lgd_cur_offset; -} __packed; - -struct llogd_conn_body { - struct llog_gen lgdc_gen; - struct llog_logid lgdc_logid; - __u32 lgdc_ctxt_idx; -} __packed; - -/* Note: 64-bit types are 64-bit aligned in structure */ -struct obdo { - __u64 o_valid; /* hot fields in this obdo */ - struct ost_id o_oi; - __u64 o_parent_seq; - __u64 o_size; /* o_size-o_blocks == ost_lvb */ - __s64 o_mtime; - __s64 o_atime; - __s64 o_ctime; - __u64 o_blocks; /* brw: cli sent cached bytes */ - __u64 o_grant; - - /* 32-bit fields start here: keep an even number of them via padding */ - __u32 o_blksize; /* optimal IO blocksize */ - __u32 o_mode; /* brw: cli sent cache remain */ - __u32 o_uid; - __u32 o_gid; - __u32 o_flags; - __u32 o_nlink; /* brw: checksum */ - __u32 o_parent_oid; - __u32 o_misc; /* brw: o_dropped */ - - __u64 o_ioepoch; /* epoch in ost writes */ - __u32 o_stripe_idx; /* holds stripe idx */ - __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong locks - */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS, - * obsolete in 2.8, reused in OSP - */ - __u32 o_uid_h; - __u32 o_gid_h; - - __u64 o_data_version; /* getattr: sum of iversion for - * each stripe. - * brw: grant space consumed on - * the client for the write - */ - __u64 o_padding_4; - __u64 o_padding_5; - __u64 o_padding_6; -}; - -#define o_dirty o_blocks -#define o_undirty o_mode -#define o_dropped o_misc -#define o_cksum o_nlink -#define o_grant_used o_data_version - -/* request structure for OST's */ -struct ost_body { - struct obdo oa; -}; - -/* Key for FIEMAP to be used in get_info calls */ -struct ll_fiemap_info_key { - char lfik_name[8]; - struct obdo lfik_oa; - struct fiemap lfik_fiemap; -}; - -/* security opcodes */ -enum sec_cmd { - SEC_CTX_INIT = 801, - SEC_CTX_INIT_CONT = 802, - SEC_CTX_FINI = 803, - SEC_LAST_OPC, - SEC_FIRST_OPC = SEC_CTX_INIT -}; - -/* - * capa related definitions - */ -#define CAPA_HMAC_MAX_LEN 64 -#define CAPA_HMAC_KEY_MAX_LEN 56 - -/* NB take care when changing the sequence of elements this struct, - * because the offset info is used in find_capa() - */ -struct lustre_capa { - struct lu_fid lc_fid; /** fid */ - __u64 lc_opc; /** operations allowed */ - __u64 lc_uid; /** file owner */ - __u64 lc_gid; /** file group */ - __u32 lc_flags; /** HMAC algorithm & flags */ - __u32 lc_keyid; /** key# used for the capability */ - __u32 lc_timeout; /** capa timeout value (sec) */ -/* FIXME: y2038 time_t overflow: */ - __u32 lc_expiry; /** expiry time (sec) */ - __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ -} __packed; - -/** lustre_capa::lc_opc */ -enum { - CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */ - CAPA_OPC_BODY_READ = 1 << 1, /**< read object data */ - CAPA_OPC_INDEX_LOOKUP = 1 << 2, /**< lookup object fid */ - CAPA_OPC_INDEX_INSERT = 1 << 3, /**< insert object fid */ - CAPA_OPC_INDEX_DELETE = 1 << 4, /**< delete object fid */ - CAPA_OPC_OSS_WRITE = 1 << 5, /**< write oss object data */ - CAPA_OPC_OSS_READ = 1 << 6, /**< read oss object data */ - CAPA_OPC_OSS_TRUNC = 1 << 7, /**< truncate oss object */ - CAPA_OPC_OSS_DESTROY = 1 << 8, /**< destroy oss object */ - CAPA_OPC_META_WRITE = 1 << 9, /**< write object meta data */ - CAPA_OPC_META_READ = 1 << 10, /**< read object meta data */ -}; - -#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE) -#define CAPA_OPC_MDS_ONLY \ - (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \ - CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE) -#define CAPA_OPC_OSS_ONLY \ - (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \ - CAPA_OPC_OSS_DESTROY) -#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY -#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY) - -struct lustre_capa_key { - __u64 lk_seq; /**< mds# */ - __u32 lk_keyid; /**< key# */ - __u32 lk_padding; - __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ -} __packed; - -/** The link ea holds 1 \a link_ea_entry for each hardlink */ -#define LINK_EA_MAGIC 0x11EAF1DFUL -struct link_ea_header { - __u32 leh_magic; - __u32 leh_reccount; - __u64 leh_len; /* total size */ - __u32 leh_overflow_time; - __u32 leh_padding; -}; - -/** Hardlink data is name and parent fid. - * Stored in this crazy struct for maximum packing and endian-neutrality - */ -struct link_ea_entry { - /** __u16 stored big-endian, unaligned */ - unsigned char lee_reclen[2]; - unsigned char lee_parent_fid[sizeof(struct lu_fid)]; - char lee_name[0]; -} __packed; - -/** fid2path request/reply structure */ -struct getinfo_fid2path { - struct lu_fid gf_fid; - __u64 gf_recno; - __u32 gf_linkno; - __u32 gf_pathlen; - char gf_path[0]; -} __packed; - -/** path2parent request/reply structures */ -struct getparent { - struct lu_fid gp_fid; /**< parent FID */ - __u32 gp_linkno; /**< hardlink number */ - __u32 gp_name_size; /**< size of the name field */ - char gp_name[0]; /**< zero-terminated link name */ -} __packed; - -enum { - LAYOUT_INTENT_ACCESS = 0, - LAYOUT_INTENT_READ = 1, - LAYOUT_INTENT_WRITE = 2, - LAYOUT_INTENT_GLIMPSE = 3, - LAYOUT_INTENT_TRUNC = 4, - LAYOUT_INTENT_RELEASE = 5, - LAYOUT_INTENT_RESTORE = 6 -}; - -/* enqueue layout lock with intent */ -struct layout_intent { - __u32 li_opc; /* intent operation for enqueue, read, write etc */ - __u32 li_flags; - __u64 li_start; - __u64 li_end; -}; - -/** - * On the wire version of hsm_progress structure. - * - * Contains the userspace hsm_progress and some internal fields. - */ -struct hsm_progress_kernel { - /* Field taken from struct hsm_progress */ - struct lu_fid hpk_fid; - __u64 hpk_cookie; - struct hsm_extent hpk_extent; - __u16 hpk_flags; - __u16 hpk_errval; /* positive val */ - __u32 hpk_padding1; - /* Additional fields */ - __u64 hpk_data_version; - __u64 hpk_padding2; -} __packed; - -/** layout swap request structure - * fid1 and fid2 are in mdt_body - */ -struct mdc_swap_layouts { - __u64 msl_flags; -} __packed; - -struct close_data { - struct lustre_handle cd_handle; - struct lu_fid cd_fid; - __u64 cd_data_version; - __u64 cd_reserved[8]; -}; - -#endif -/** @} lustreidl */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h deleted file mode 100644 index 6e4e109fb874..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -#ifndef _UAPI_LUSTRE_IOCTL_H_ -#define _UAPI_LUSTRE_IOCTL_H_ - -#include -#include -#include -#include - -#if !defined(__KERNEL__) && !defined(LUSTRE_UTILS) -# error This file is for Lustre internal use only. -#endif - -enum md_echo_cmd { - ECHO_MD_CREATE = 1, /* Open/Create file on MDT */ - ECHO_MD_MKDIR = 2, /* Mkdir on MDT */ - ECHO_MD_DESTROY = 3, /* Unlink file on MDT */ - ECHO_MD_RMDIR = 4, /* Rmdir on MDT */ - ECHO_MD_LOOKUP = 5, /* Lookup on MDT */ - ECHO_MD_GETATTR = 6, /* Getattr on MDT */ - ECHO_MD_SETATTR = 7, /* Setattr on MDT */ - ECHO_MD_ALLOC_FID = 8, /* Get FIDs from MDT */ -}; - -#define OBD_DEV_ID 1 -#define OBD_DEV_NAME "obd" -#define OBD_DEV_PATH "/dev/" OBD_DEV_NAME - -#define OBD_IOCTL_VERSION 0x00010004 -#define OBD_DEV_BY_DEVNAME 0xffffd0de - -struct obd_ioctl_data { - __u32 ioc_len; - __u32 ioc_version; - - union { - __u64 ioc_cookie; - __u64 ioc_u64_1; - }; - union { - __u32 ioc_conn1; - __u32 ioc_u32_1; - }; - union { - __u32 ioc_conn2; - __u32 ioc_u32_2; - }; - - struct obdo ioc_obdo1; - struct obdo ioc_obdo2; - - __u64 ioc_count; - __u64 ioc_offset; - __u32 ioc_dev; - __u32 ioc_command; - - __u64 ioc_nid; - __u32 ioc_nal; - __u32 ioc_type; - - /* buffers the kernel will treat as user pointers */ - __u32 ioc_plen1; - char __user *ioc_pbuf1; - __u32 ioc_plen2; - char __user *ioc_pbuf2; - - /* inline buffers for various arguments */ - __u32 ioc_inllen1; - char *ioc_inlbuf1; - __u32 ioc_inllen2; - char *ioc_inlbuf2; - __u32 ioc_inllen3; - char *ioc_inlbuf3; - __u32 ioc_inllen4; - char *ioc_inlbuf4; - - char ioc_bulk[0]; -}; - -struct obd_ioctl_hdr { - __u32 ioc_len; - __u32 ioc_version; -}; - -static inline __u32 obd_ioctl_packlen(struct obd_ioctl_data *data) -{ - __u32 len = __ALIGN_KERNEL(sizeof(*data), 8); - - len += __ALIGN_KERNEL(data->ioc_inllen1, 8); - len += __ALIGN_KERNEL(data->ioc_inllen2, 8); - len += __ALIGN_KERNEL(data->ioc_inllen3, 8); - len += __ALIGN_KERNEL(data->ioc_inllen4, 8); - - return len; -} - -/* - * OBD_IOC_DATA_TYPE is only for compatibility reasons with older - * Linux Lustre user tools. New ioctls should NOT use this macro as - * the ioctl "size". Instead the ioctl should get a "size" argument - * which is the actual data type used by the ioctl, to ensure the - * ioctl interface is versioned correctly. - */ -#define OBD_IOC_DATA_TYPE long - -/* IOC_LDLM_TEST _IOWR('f', 40, long) */ -/* IOC_LDLM_DUMP _IOWR('f', 41, long) */ -/* IOC_LDLM_REGRESS_START _IOWR('f', 42, long) */ -/* IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long) */ - -#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE) -#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE) -/* OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE) */ - -#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE) -#define OBD_IOC_GETATTR _IOWR('f', 108, OBD_IOC_DATA_TYPE) -#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE) -#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE) - -#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE) -#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE) -/* OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE) */ - -/* OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE) */ -#define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE) -#define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE) -#define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE) -#define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE) -#define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE) -#define OBD_IOC_GETMDNAME _IOR('f', 131, char[MAX_OBD_NAME]) -#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME -#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE) -#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE) -#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE) - -/* OBD_IOC_DEC_FS_USE_COUNT _IO('f', 139) */ -#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE) -#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE) -#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE) -/* OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE) */ -#define OBD_GET_VERSION _IOWR('f', 144, OBD_IOC_DATA_TYPE) -/* OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_CLOSE_UUID _IOWR('f', 147, OBD_IOC_DATA_TYPE) */ -#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE) -#define OBD_IOC_GETDEVICE _IOWR('f', 149, OBD_IOC_DATA_TYPE) -#define OBD_IOC_FID2PATH _IOWR('f', 150, OBD_IOC_DATA_TYPE) -/* lustre/lustre_user.h 151-153 */ -/* OBD_IOC_LOV_SETSTRIPE 154 LL_IOC_LOV_SETSTRIPE */ -/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */ -/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */ -/* lustre/lustre_user.h 157-159 */ -/* OBD_IOC_QUOTACHECK _IOW('f', 160, int) */ -/* OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *) */ -#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl) -/* lustre/lustre_user.h 163-176 */ -#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data) -#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data) -#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data) -/* OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE) */ -#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE) -/* OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE) */ -/* OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE) */ -#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE) -#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE) -#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE) - -#define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE) -#define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE) -#define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE) -#define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE) -#define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE) -#define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE) -/* OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) */ -#define OBD_IOC_NODEMAP _IOWR('f', 197, OBD_IOC_DATA_TYPE) - -/* ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */ -/* ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */ -/* ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */ -/* ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */ - -#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE) - -/* lustre/lustre_user.h 212-217 */ -#define OBD_IOC_GET_MNTOPT _IOW('f', 220, mntopt_t) -#define OBD_IOC_ECHO_MD _IOR('f', 221, struct obd_ioctl_data) -#define OBD_IOC_ECHO_ALLOC_SEQ _IOWR('f', 222, struct obd_ioctl_data) -#define OBD_IOC_START_LFSCK _IOWR('f', 230, OBD_IOC_DATA_TYPE) -#define OBD_IOC_STOP_LFSCK _IOW('f', 231, OBD_IOC_DATA_TYPE) -#define OBD_IOC_QUERY_LFSCK _IOR('f', 232, struct obd_ioctl_data) -/* lustre/lustre_user.h 240-249 */ -/* LIBCFS_IOC_DEBUG_MASK 250 */ - -#define IOC_OSC_SET_ACTIVE _IOWR('h', 21, void *) - -#endif /* _UAPI_LUSTRE_IOCTL_H_ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h deleted file mode 100644 index 94dadbe8e069..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2013, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * - * Author: Nathan Rutman - * - * Kernel <-> userspace communication routines. - * The definitions below are used in the kernel and userspace. - */ - -#ifndef __UAPI_LUSTRE_KERNELCOMM_H__ -#define __UAPI_LUSTRE_KERNELCOMM_H__ - -#include - -/* KUC message header. - * All current and future KUC messages should use this header. - * To avoid having to include Lustre headers from libcfs, define this here. - */ -struct kuc_hdr { - __u16 kuc_magic; - /* Each new Lustre feature should use a different transport */ - __u8 kuc_transport; - __u8 kuc_flags; - /* Message type or opcode, transport-specific */ - __u16 kuc_msgtype; - /* Including header */ - __u16 kuc_msglen; -} __aligned(sizeof(__u64)); - -#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE) - -#define KUC_MAGIC 0x191C /*Lustre9etLinC */ - -/* kuc_msgtype values are defined in each transport */ -enum kuc_transport_type { - KUC_TRANSPORT_GENERIC = 1, - KUC_TRANSPORT_HSM = 2, - KUC_TRANSPORT_CHANGELOG = 3, -}; - -enum kuc_generic_message_type { - KUC_MSG_SHUTDOWN = 1, -}; - -/* KUC Broadcast Groups. This determines which userspace process hears which - * messages. Mutliple transports may be used within a group, or multiple - * groups may use the same transport. Broadcast - * groups need not be used if e.g. a UID is specified instead; - * use group 0 to signify unicast. - */ -#define KUC_GRP_HSM 0x02 -#define KUC_GRP_MAX KUC_GRP_HSM - -#define LK_FLG_STOP 0x01 -#define LK_NOFD -1U - -/* kernelcomm control structure, passed from userspace to kernel */ -struct lustre_kernelcomm { - __u32 lk_wfd; - __u32 lk_rfd; - __u32 lk_uid; - __u32 lk_group; - __u32 lk_data; - __u32 lk_flags; -} __packed; - -#endif /* __UAPI_LUSTRE_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h deleted file mode 100644 index 3343b602219b..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h +++ /dev/null @@ -1,236 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - * - * Copyright 2015 Cray Inc, all rights reserved. - * Author: Ben Evans. - * - * Define ost_id associated functions - */ - -#ifndef _UAPI_LUSTRE_OSTID_H_ -#define _UAPI_LUSTRE_OSTID_H_ - -#include -#include - -static inline __u64 lmm_oi_id(const struct ost_id *oi) -{ - return oi->oi.oi_id; -} - -static inline __u64 lmm_oi_seq(const struct ost_id *oi) -{ - return oi->oi.oi_seq; -} - -static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq) -{ - oi->oi.oi_seq = seq; -} - -static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid) -{ - oi->oi.oi_id = oid; -} - -static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi, - const struct ost_id *src_oi) -{ - dst_oi->oi.oi_id = __le64_to_cpu(src_oi->oi.oi_id); - dst_oi->oi.oi_seq = __le64_to_cpu(src_oi->oi.oi_seq); -} - -static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, - const struct ost_id *src_oi) -{ - dst_oi->oi.oi_id = __cpu_to_le64(src_oi->oi.oi_id); - dst_oi->oi.oi_seq = __cpu_to_le64(src_oi->oi.oi_seq); -} - -/* extract OST sequence (group) from a wire ost_id (id/seq) pair */ -static inline __u64 ostid_seq(const struct ost_id *ostid) -{ - if (fid_seq_is_mdt0(ostid->oi.oi_seq)) - return FID_SEQ_OST_MDT0; - - if (fid_seq_is_default(ostid->oi.oi_seq)) - return FID_SEQ_LOV_DEFAULT; - - if (fid_is_idif(&ostid->oi_fid)) - return FID_SEQ_OST_MDT0; - - return fid_seq(&ostid->oi_fid); -} - -/* extract OST objid from a wire ost_id (id/seq) pair */ -static inline __u64 ostid_id(const struct ost_id *ostid) -{ - if (fid_seq_is_mdt0(ostid->oi.oi_seq)) - return ostid->oi.oi_id & IDIF_OID_MASK; - - if (fid_seq_is_default(ostid->oi.oi_seq)) - return ostid->oi.oi_id; - - if (fid_is_idif(&ostid->oi_fid)) - return fid_idif_id(fid_seq(&ostid->oi_fid), - fid_oid(&ostid->oi_fid), 0); - - return fid_oid(&ostid->oi_fid); -} - -static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) -{ - if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) { - oi->oi.oi_seq = seq; - } else { - oi->oi_fid.f_seq = seq; - /* - * Note: if f_oid + f_ver is zero, we need init it - * to be 1, otherwise, ostid_seq will treat this - * as old ostid (oi_seq == 0) - */ - if (!oi->oi_fid.f_oid && !oi->oi_fid.f_ver) - oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; - } -} - -static inline void ostid_set_seq_mdt0(struct ost_id *oi) -{ - ostid_set_seq(oi, FID_SEQ_OST_MDT0); -} - -static inline void ostid_set_seq_echo(struct ost_id *oi) -{ - ostid_set_seq(oi, FID_SEQ_ECHO); -} - -static inline void ostid_set_seq_llog(struct ost_id *oi) -{ - ostid_set_seq(oi, FID_SEQ_LLOG); -} - -static inline void ostid_cpu_to_le(const struct ost_id *src_oi, - struct ost_id *dst_oi) -{ - if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { - dst_oi->oi.oi_id = __cpu_to_le64(src_oi->oi.oi_id); - dst_oi->oi.oi_seq = __cpu_to_le64(src_oi->oi.oi_seq); - } else { - fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid); - } -} - -static inline void ostid_le_to_cpu(const struct ost_id *src_oi, - struct ost_id *dst_oi) -{ - if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { - dst_oi->oi.oi_id = __le64_to_cpu(src_oi->oi.oi_id); - dst_oi->oi.oi_seq = __le64_to_cpu(src_oi->oi.oi_seq); - } else { - fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid); - } -} - -/** - * Sigh, because pre-2.4 uses - * struct lov_mds_md_v1 { - * ........ - * __u64 lmm_object_id; - * __u64 lmm_object_seq; - * ...... - * } - * to identify the LOV(MDT) object, and lmm_object_seq will - * be normal_fid, which make it hard to combine these conversion - * to ostid_to FID. so we will do lmm_oi/fid conversion separately - * - * We can tell the lmm_oi by this way, - * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0 - * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL - * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k}, - * lmm_oi.f_ver = 0 - * - * But currently lmm_oi/lsm_oi does not have any "real" usages, - * except for printing some information, and the user can always - * get the real FID from LMA, besides this multiple case check might - * make swab more complicate. So we will keep using id/seq for lmm_oi. - */ - -static inline void fid_to_lmm_oi(const struct lu_fid *fid, - struct ost_id *oi) -{ - oi->oi.oi_id = fid_oid(fid); - oi->oi.oi_seq = fid_seq(fid); -} - -/** - * Unpack an OST object id/seq (group) into a FID. This is needed for - * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper - * FIDs. Note that if an id/seq is already in FID/IDIF format it will - * be passed through unchanged. Only legacy OST objects in "group 0" - * will be mapped into the IDIF namespace so that they can fit into the - * struct lu_fid fields without loss. - */ -static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid, - __u32 ost_idx) -{ - __u64 seq = ostid_seq(ostid); - - if (ost_idx > 0xffff) - return -EBADF; - - if (fid_seq_is_mdt0(seq)) { - __u64 oid = ostid_id(ostid); - - /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" - * that we map into the IDIF namespace. It allows up to 2^48 - * objects per OST, as this is the object namespace that has - * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. - */ - if (oid >= IDIF_MAX_OID) - return -EBADF; - - fid->f_seq = fid_idif_seq(oid, ost_idx); - /* truncate to 32 bits by assignment */ - fid->f_oid = oid; - /* in theory, not currently used */ - fid->f_ver = oid >> 48; - } else if (!fid_seq_is_default(seq)) { - /* This is either an IDIF object, which identifies objects - * across all OSTs, or a regular FID. The IDIF namespace - * maps legacy OST objects into the FID namespace. In both - * cases, we just pass the FID through, no conversion needed. - */ - if (ostid->oi_fid.f_ver) - return -EBADF; - - *fid = ostid->oi_fid; - } - - return 0; -} -#endif /* _UAPI_LUSTRE_OSTID_H_ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h deleted file mode 100644 index 1eab2ceca338..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * User-settable parameter keys - * - * Author: Nathan Rutman - */ - -#ifndef _UAPI_LUSTRE_PARAM_H_ -#define _UAPI_LUSTRE_PARAM_H_ - -/** \defgroup param param - * - * @{ - */ - -/****************** User-settable parameter keys *********************/ -/* e.g. - * tunefs.lustre --param="failover.node=192.168.0.13@tcp0" /dev/sda - * lctl conf_param testfs-OST0000 failover.node=3@elan,192.168.0.3@tcp0 - * ... testfs-MDT0000.lov.stripesize=4M - * ... testfs-OST0000.ost.client_cache_seconds=15 - * ... testfs.sys.timeout= - * ... testfs.llite.max_read_ahead_mb=16 - */ - -/* System global or special params not handled in obd's proc - * See mgs_write_log_sys() - */ -#define PARAM_TIMEOUT "timeout=" /* global */ -#define PARAM_LDLM_TIMEOUT "ldlm_timeout=" /* global */ -#define PARAM_AT_MIN "at_min=" /* global */ -#define PARAM_AT_MAX "at_max=" /* global */ -#define PARAM_AT_EXTRA "at_extra=" /* global */ -#define PARAM_AT_EARLY_MARGIN "at_early_margin=" /* global */ -#define PARAM_AT_HISTORY "at_history=" /* global */ -#define PARAM_JOBID_VAR "jobid_var=" /* global */ -#define PARAM_MGSNODE "mgsnode=" /* only at mounttime */ -#define PARAM_FAILNODE "failover.node=" /* add failover nid */ -#define PARAM_FAILMODE "failover.mode=" /* initial mount only */ -#define PARAM_ACTIVE "active=" /* activate/deactivate */ -#define PARAM_NETWORK "network=" /* bind on nid */ -#define PARAM_ID_UPCALL "identity_upcall=" /* identity upcall */ - -/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */ -#define PARAM_OST "ost." -#define PARAM_OSD "osd." -#define PARAM_OSC "osc." -#define PARAM_MDT "mdt." -#define PARAM_HSM "mdt.hsm." -#define PARAM_MDD "mdd." -#define PARAM_MDC "mdc." -#define PARAM_LLITE "llite." -#define PARAM_LOV "lov." -#define PARAM_LOD "lod." -#define PARAM_OSP "osp." -#define PARAM_SYS "sys." /* global */ -#define PARAM_SRPC "srpc." -#define PARAM_SRPC_FLVR "srpc.flavor." -#define PARAM_SRPC_UDESC "srpc.udesc.cli2mdt" -#define PARAM_SEC "security." -#define PARAM_QUOTA "quota." /* global */ - -/** @} param */ - -#endif /* _UAPI_LUSTRE_PARAM_H_ */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h deleted file mode 100644 index 69387f36d1f1..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h +++ /dev/null @@ -1,1327 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre/lustre_user.h - * - * Lustre public user-space interface definitions. - */ - -#ifndef _LUSTRE_USER_H -#define _LUSTRE_USER_H - -/** \defgroup lustreuser lustreuser - * - * @{ - */ - -#ifdef __KERNEL__ -# include -# include -# include -# include /* snprintf() */ -# include -#else /* !__KERNEL__ */ -# define NEED_QUOTA_DEFS -# include /* snprintf() */ -# include -# include -# include -#endif /* __KERNEL__ */ -#include - -/* - * We need to always use 64bit version because the structure - * is shared across entire cluster where 32bit and 64bit machines - * are co-existing. - */ -#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64) -typedef struct stat64 lstat_t; -#define lstat_f lstat64 -#define fstat_f fstat64 -#define fstatat_f fstatat64 -#else -typedef struct stat lstat_t; -#define lstat_f lstat -#define fstat_f fstat -#define fstatat_f fstatat -#endif - -#define HAVE_LOV_USER_MDS_DATA - -#define LUSTRE_EOF 0xffffffffffffffffULL - -/* for statfs() */ -#define LL_SUPER_MAGIC 0x0BD00BD0 - -#ifndef FSFILT_IOC_GETFLAGS -#define FSFILT_IOC_GETFLAGS _IOR('f', 1, long) -#define FSFILT_IOC_SETFLAGS _IOW('f', 2, long) -#define FSFILT_IOC_GETVERSION _IOR('f', 3, long) -#define FSFILT_IOC_SETVERSION _IOW('f', 4, long) -#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long) -#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long) -#endif - -/* FIEMAP flags supported by Lustre */ -#define LUSTRE_FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_DEVICE_ORDER) - -enum obd_statfs_state { - OS_STATE_DEGRADED = 0x00000001, /**< RAID degraded/rebuilding */ - OS_STATE_READONLY = 0x00000002, /**< filesystem is read-only */ - OS_STATE_RDONLY_1 = 0x00000004, /**< obsolete 1.6, was EROFS=30 */ - OS_STATE_RDONLY_2 = 0x00000008, /**< obsolete 1.6, was EROFS=30 */ - OS_STATE_RDONLY_3 = 0x00000010, /**< obsolete 1.6, was EROFS=30 */ -}; - -struct obd_statfs { - __u64 os_type; - __u64 os_blocks; - __u64 os_bfree; - __u64 os_bavail; - __u64 os_files; - __u64 os_ffree; - __u8 os_fsid[40]; - __u32 os_bsize; - __u32 os_namelen; - __u64 os_maxbytes; - __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */ - __u32 os_fprecreated; /* objs available now to the caller */ - /* used in QoS code to find preferred OSTs */ - __u32 os_spare2; - __u32 os_spare3; - __u32 os_spare4; - __u32 os_spare5; - __u32 os_spare6; - __u32 os_spare7; - __u32 os_spare8; - __u32 os_spare9; -}; - -/** - * File IDentifier. - * - * FID is a cluster-wide unique identifier of a file or an object (stripe). - * FIDs are never reused. - **/ -struct lu_fid { - /** - * FID sequence. Sequence is a unit of migration: all files (objects) - * with FIDs from a given sequence are stored on the same server. - * Lustre should support 2^64 objects, so even if each sequence - * has only a single object we can still enumerate 2^64 objects. - **/ - __u64 f_seq; - /* FID number within sequence. */ - __u32 f_oid; - /** - * FID version, used to distinguish different versions (in the sense - * of snapshots, etc.) of the same file system object. Not currently - * used. - **/ - __u32 f_ver; -}; - -static inline bool fid_is_zero(const struct lu_fid *fid) -{ - return !fid->f_seq && !fid->f_oid; -} - -struct filter_fid { - struct lu_fid ff_parent; /* ff_parent.f_ver == file stripe number */ -}; - -/* keep this one for compatibility */ -struct filter_fid_old { - struct lu_fid ff_parent; - __u64 ff_objid; - __u64 ff_seq; -}; - -/* Userspace should treat lu_fid as opaque, and only use the following methods - * to print or parse them. Other functions (e.g. compare, swab) could be moved - * here from lustre_idl.h if needed. - */ -struct lu_fid; - -/** - * Following struct for object attributes, that will be kept inode's EA. - * Introduced in 2.0 release (please see b15993, for details) - * Added to all objects since Lustre 2.4 as contains self FID - */ -struct lustre_mdt_attrs { - /** - * Bitfield for supported data in this structure. From enum lma_compat. - * lma_self_fid and lma_flags are always available. - */ - __u32 lma_compat; - /** - * Per-file incompat feature list. Lustre version should support all - * flags set in this field. The supported feature mask is available in - * LMA_INCOMPAT_SUPP. - */ - __u32 lma_incompat; - /** FID of this inode */ - struct lu_fid lma_self_fid; -}; - -/** - * Prior to 2.4, the LMA structure also included SOM attributes which has since - * been moved to a dedicated xattr - * lma_flags was also removed because of lma_compat/incompat fields. - */ -#define LMA_OLD_SIZE (sizeof(struct lustre_mdt_attrs) + 5 * sizeof(__u64)) - -/** - * OST object IDentifier. - */ -struct ost_id { - union { - struct { - __u64 oi_id; - __u64 oi_seq; - } oi; - struct lu_fid oi_fid; - }; -}; - -#define DOSTID "%#llx:%llu" -#define POSTID(oi) ostid_seq(oi), ostid_id(oi) - -/* - * The ioctl naming rules: - * LL_* - works on the currently opened filehandle instead of parent dir - * *_OBD_* - gets data for both OSC or MDC (LOV, LMV indirectly) - * *_MDC_* - gets/sets data related to MDC - * *_LOV_* - gets/sets data related to OSC/LOV - * *FILE* - called on parent dir and passes in a filename - * *STRIPE* - set/get lov_user_md - * *INFO - set/get lov_user_mds_data - */ -/* lustre_ioctl.h 101-150 */ -#define LL_IOC_GETFLAGS _IOR('f', 151, long) -#define LL_IOC_SETFLAGS _IOW('f', 152, long) -#define LL_IOC_CLRFLAGS _IOW('f', 153, long) -#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long) -#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long) -#define LL_IOC_LOV_SETEA _IOW('f', 156, long) -/* LL_IOC_RECREATE_OBJ 157 obsolete */ -/* LL_IOC_RECREATE_FID 158 obsolete */ -#define LL_IOC_GROUP_LOCK _IOW('f', 158, long) -#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long) -/* #define LL_IOC_QUOTACHECK 160 OBD_IOC_QUOTACHECK */ -/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */ -/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */ -#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *) -/* IOC_LOV_GETINFO 165 obsolete */ -#define LL_IOC_FLUSHCTX _IOW('f', 166, long) -/* LL_IOC_RMTACL 167 obsolete */ -#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long) -#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long) -#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long) -#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid) -#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long) -#define LL_IOC_PATH2FID _IOR('f', 173, long) -#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *) -#define LL_IOC_GET_MDTIDX _IOR('f', 175, int) - -/* lustre_ioctl.h 177-210 */ -#define LL_IOC_HSM_STATE_GET _IOR('f', 211, struct hsm_user_state) -#define LL_IOC_HSM_STATE_SET _IOW('f', 212, struct hsm_state_set) -#define LL_IOC_HSM_CT_START _IOW('f', 213, struct lustre_kernelcomm) -#define LL_IOC_HSM_COPY_START _IOW('f', 214, struct hsm_copy *) -#define LL_IOC_HSM_COPY_END _IOW('f', 215, struct hsm_copy *) -#define LL_IOC_HSM_PROGRESS _IOW('f', 216, struct hsm_user_request) -#define LL_IOC_HSM_REQUEST _IOW('f', 217, struct hsm_user_request) -#define LL_IOC_DATA_VERSION _IOR('f', 218, struct ioc_data_version) -#define LL_IOC_LOV_SWAP_LAYOUTS _IOW('f', 219, \ - struct lustre_swap_layouts) -#define LL_IOC_HSM_ACTION _IOR('f', 220, \ - struct hsm_current_action) -/* see for ioctl numbers 221-232 */ - -#define LL_IOC_LMV_SETSTRIPE _IOWR('f', 240, struct lmv_user_md) -#define LL_IOC_LMV_GETSTRIPE _IOWR('f', 241, struct lmv_user_md) -#define LL_IOC_SET_LEASE _IOWR('f', 243, long) -#define LL_IOC_GET_LEASE _IO('f', 244) -#define LL_IOC_HSM_IMPORT _IOWR('f', 245, struct hsm_user_import) -#define LL_IOC_LMV_SET_DEFAULT_STRIPE _IOWR('f', 246, struct lmv_user_md) -#define LL_IOC_MIGRATE _IOR('f', 247, int) -#define LL_IOC_FID2MDTIDX _IOWR('f', 248, struct lu_fid) -#define LL_IOC_GETPARENT _IOWR('f', 249, struct getparent) - -/* Lease types for use as arg and return of LL_IOC_{GET,SET}_LEASE ioctl. */ -enum ll_lease_type { - LL_LEASE_RDLCK = 0x1, - LL_LEASE_WRLCK = 0x2, - LL_LEASE_UNLCK = 0x4, -}; - -#define LL_STATFS_LMV 1 -#define LL_STATFS_LOV 2 -#define LL_STATFS_NODELAY 4 - -#define IOC_MDC_TYPE 'i' -#define IOC_MDC_LOOKUP _IOWR(IOC_MDC_TYPE, 20, struct obd_device *) -#define IOC_MDC_GETFILESTRIPE _IOWR(IOC_MDC_TYPE, 21, struct lov_user_md *) -#define IOC_MDC_GETFILEINFO _IOWR(IOC_MDC_TYPE, 22, struct lov_user_mds_data *) -#define LL_IOC_MDC_GETINFO _IOWR(IOC_MDC_TYPE, 23, struct lov_user_mds_data *) - -#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */ - -/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular - * files, but are unlikely to be used in practice and are not harmful if - * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character - * devices and are safe for use on new files (See LU-812, LU-4209). - */ -#define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC) - -#define LL_FILE_IGNORE_LOCK 0x00000001 -#define LL_FILE_GROUP_LOCKED 0x00000002 -#define LL_FILE_READAHEA 0x00000004 -#define LL_FILE_LOCKED_DIRECTIO 0x00000008 /* client-side locks with dio */ -#define LL_FILE_LOCKLESS_IO 0x00000010 /* server-side locks with cio */ -#define LL_FILE_RMTACL 0x00000020 - -#define LOV_USER_MAGIC_V1 0x0BD10BD0 -#define LOV_USER_MAGIC LOV_USER_MAGIC_V1 -#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0 -#define LOV_USER_MAGIC_V3 0x0BD30BD0 -/* 0x0BD40BD0 is occupied by LOV_MAGIC_MIGRATE */ -#define LOV_USER_MAGIC_SPECIFIC 0x0BD50BD0 /* for specific OSTs */ - -#define LMV_USER_MAGIC 0x0CD30CD0 /*default lmv magic*/ - -#define LOV_PATTERN_RAID0 0x001 -#define LOV_PATTERN_RAID1 0x002 -#define LOV_PATTERN_FIRST 0x100 -#define LOV_PATTERN_CMOBD 0x200 - -#define LOV_PATTERN_F_MASK 0xffff0000 -#define LOV_PATTERN_F_HOLE 0x40000000 /* there is hole in LOV EA */ -#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */ - -#define LOV_MAXPOOLNAME 15 -#define LOV_POOLNAMEF "%.15s" - -#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */ -#define LOV_MIN_STRIPE_SIZE (1 << LOV_MIN_STRIPE_BITS) -#define LOV_MAX_STRIPE_COUNT_OLD 160 -/* This calculation is crafted so that input of 4096 will result in 160 - * which in turn is equal to old maximal stripe count. - * XXX: In fact this is too simplified for now, what it also need is to get - * ea_type argument to clearly know how much space each stripe consumes. - * - * The limit of 12 pages is somewhat arbitrary, but is a reasonably large - * allocation that is sufficient for the current generation of systems. - * - * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) - */ -#define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */ -#define LOV_ALL_STRIPES 0xffff /* only valid for directories */ -#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ - -#define XATTR_LUSTRE_PREFIX "lustre." -#define XATTR_LUSTRE_LOV "lustre.lov" - -#define lov_user_ost_data lov_user_ost_data_v1 -struct lov_user_ost_data_v1 { /* per-stripe data structure */ - struct ost_id l_ost_oi; /* OST object ID */ - __u32 l_ost_gen; /* generation of this OST index */ - __u32 l_ost_idx; /* OST index in LOV */ -} __packed; - -#define lov_user_md lov_user_md_v1 -struct lov_user_md_v1 { /* LOV EA user data (host-endian) */ - __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V1 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - struct ost_id lmm_oi; /* LOV object ID */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - union { - __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing - */ - __u16 lmm_layout_gen; /* layout generation number - * used when reading - */ - }; - struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ -} __attribute__((packed, __may_alias__)); - -struct lov_user_md_v3 { /* LOV EA user data (host-endian) */ - __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V3 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - struct ost_id lmm_oi; /* LOV object ID */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - union { - __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing - */ - __u16 lmm_layout_gen; /* layout generation number - * used when reading - */ - }; - char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */ - struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ -} __packed; - -static inline __u32 lov_user_md_size(__u16 stripes, __u32 lmm_magic) -{ - if (lmm_magic == LOV_USER_MAGIC_V1) - return sizeof(struct lov_user_md_v1) + - stripes * sizeof(struct lov_user_ost_data_v1); - return sizeof(struct lov_user_md_v3) + - stripes * sizeof(struct lov_user_ost_data_v1); -} - -/* Compile with -D_LARGEFILE64_SOURCE or -D_GNU_SOURCE (or #define) to - * use this. It is unsafe to #define those values in this header as it - * is possible the application has already #included . - */ -#ifdef HAVE_LOV_USER_MDS_DATA -#define lov_user_mds_data lov_user_mds_data_v1 -struct lov_user_mds_data_v1 { - lstat_t lmd_st; /* MDS stat struct */ - struct lov_user_md_v1 lmd_lmm; /* LOV EA V1 user data */ -} __packed; - -struct lov_user_mds_data_v3 { - lstat_t lmd_st; /* MDS stat struct */ - struct lov_user_md_v3 lmd_lmm; /* LOV EA V3 user data */ -} __packed; -#endif - -struct lmv_user_mds_data { - struct lu_fid lum_fid; - __u32 lum_padding; - __u32 lum_mds; -}; - -enum lmv_hash_type { - LMV_HASH_TYPE_UNKNOWN = 0, /* 0 is reserved for testing purpose */ - LMV_HASH_TYPE_ALL_CHARS = 1, - LMV_HASH_TYPE_FNV_1A_64 = 2, -}; - -#define LMV_HASH_NAME_ALL_CHARS "all_char" -#define LMV_HASH_NAME_FNV_1A_64 "fnv_1a_64" - -/* - * Got this according to how get LOV_MAX_STRIPE_COUNT, see above, - * (max buffer size - lmv+rpc header) / sizeof(struct lmv_user_mds_data) - */ -#define LMV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */ -#define lmv_user_md lmv_user_md_v1 -struct lmv_user_md_v1 { - __u32 lum_magic; /* must be the first field */ - __u32 lum_stripe_count; /* dirstripe count */ - __u32 lum_stripe_offset; /* MDT idx for default dirstripe */ - __u32 lum_hash_type; /* Dir stripe policy */ - __u32 lum_type; /* LMV type: default or normal */ - __u32 lum_padding1; - __u32 lum_padding2; - __u32 lum_padding3; - char lum_pool_name[LOV_MAXPOOLNAME + 1]; - struct lmv_user_mds_data lum_objects[0]; -} __packed; - -static inline int lmv_user_md_size(int stripes, int lmm_magic) -{ - return sizeof(struct lmv_user_md) + - stripes * sizeof(struct lmv_user_mds_data); -} - -struct ll_recreate_obj { - __u64 lrc_id; - __u32 lrc_ost_idx; -}; - -struct ll_fid { - __u64 id; /* holds object id */ - __u32 generation; /* holds object generation */ - __u32 f_type; /* holds object type or stripe idx when passing it to - * OST for saving into EA. - */ -}; - -#define UUID_MAX 40 -struct obd_uuid { - char uuid[UUID_MAX]; -}; - -static inline bool obd_uuid_equals(const struct obd_uuid *u1, - const struct obd_uuid *u2) -{ - return strcmp((char *)u1->uuid, (char *)u2->uuid) == 0; -} - -static inline int obd_uuid_empty(struct obd_uuid *uuid) -{ - return uuid->uuid[0] == '\0'; -} - -static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp) -{ - strncpy((char *)uuid->uuid, tmp, sizeof(*uuid)); - uuid->uuid[sizeof(*uuid) - 1] = '\0'; -} - -/* For printf's only, make sure uuid is terminated */ -static inline char *obd_uuid2str(const struct obd_uuid *uuid) -{ - if (!uuid) - return NULL; - - if (uuid->uuid[sizeof(*uuid) - 1] != '\0') { - /* Obviously not safe, but for printfs, no real harm done... - * we're always null-terminated, even in a race. - */ - static char temp[sizeof(*uuid)]; - - memcpy(temp, uuid->uuid, sizeof(*uuid) - 1); - temp[sizeof(*uuid) - 1] = '\0'; - return temp; - } - return (char *)(uuid->uuid); -} - -/* Extract fsname from uuid (or target name) of a target - * e.g. (myfs-OST0007_UUID -> myfs) - * see also deuuidify. - */ -static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) -{ - char *p; - - strncpy(buf, uuid, buflen - 1); - buf[buflen - 1] = '\0'; - p = strrchr(buf, '-'); - if (p) - *p = '\0'; -} - -/* printf display format - * * usage: printf("file FID is "DFID"\n", PFID(fid)); - */ -#define FID_NOBRACE_LEN 40 -#define FID_LEN (FID_NOBRACE_LEN + 2) -#define DFID_NOBRACE "%#llx:0x%x:0x%x" -#define DFID "[" DFID_NOBRACE "]" -#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver - -/* scanf input parse format for fids in DFID_NOBRACE format - * Need to strip '[' from DFID format first or use "["SFID"]" at caller. - * usage: sscanf(fidstr, SFID, RFID(&fid)); - */ -#define SFID "0x%llx:0x%x:0x%x" -#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver) - -/********* Quotas **********/ - -#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */ -#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */ -#define Q_GETOINFO 0x800102 /* get obd quota info */ -#define Q_GETOQUOTA 0x800103 /* get obd quotas */ -#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ - -/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */ -#define LUSTRE_Q_QUOTAON 0x800002 /* deprecated as of 2.4 */ -#define LUSTRE_Q_QUOTAOFF 0x800003 /* deprecated as of 2.4 */ -#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */ -#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */ -#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */ -#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */ -/* lustre-specific control commands */ -#define LUSTRE_Q_INVALIDATE 0x80000b /* deprecated as of 2.4 */ -#define LUSTRE_Q_FINVALIDATE 0x80000c /* deprecated as of 2.4 */ - -#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */ - -#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629 - -/* permission */ -#define N_PERMS_MAX 64 - -struct perm_downcall_data { - __u64 pdd_nid; - __u32 pdd_perm; - __u32 pdd_padding; -}; - -struct identity_downcall_data { - __u32 idd_magic; - __u32 idd_err; - __u32 idd_uid; - __u32 idd_gid; - __u32 idd_nperms; - __u32 idd_ngroups; - struct perm_downcall_data idd_perms[N_PERMS_MAX]; - __u32 idd_groups[0]; -}; - -/* lustre volatile file support - * file name header: .^L^S^T^R:volatile" - */ -#define LUSTRE_VOLATILE_HDR ".\x0c\x13\x14\x12:VOLATILE" -#define LUSTRE_VOLATILE_HDR_LEN 14 -/* hdr + MDT index */ -#define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:" - -enum lustre_quota_version { - LUSTRE_QUOTA_V2 = 1 -}; - -/* XXX: same as if_dqinfo struct in kernel */ -struct obd_dqinfo { - __u64 dqi_bgrace; - __u64 dqi_igrace; - __u32 dqi_flags; - __u32 dqi_valid; -}; - -/* XXX: same as if_dqblk struct in kernel, plus one padding */ -struct obd_dqblk { - __u64 dqb_bhardlimit; - __u64 dqb_bsoftlimit; - __u64 dqb_curspace; - __u64 dqb_ihardlimit; - __u64 dqb_isoftlimit; - __u64 dqb_curinodes; - __u64 dqb_btime; - __u64 dqb_itime; - __u32 dqb_valid; - __u32 dqb_padding; -}; - -enum { - QC_GENERAL = 0, - QC_MDTIDX = 1, - QC_OSTIDX = 2, - QC_UUID = 3 -}; - -struct if_quotactl { - __u32 qc_cmd; - __u32 qc_type; - __u32 qc_id; - __u32 qc_stat; - __u32 qc_valid; - __u32 qc_idx; - struct obd_dqinfo qc_dqinfo; - struct obd_dqblk qc_dqblk; - char obd_type[16]; - struct obd_uuid obd_uuid; -}; - -/* swap layout flags */ -#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0) -#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1) -#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2) -#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3) -#define SWAP_LAYOUTS_CLOSE (1 << 4) - -/* Swap XATTR_NAME_HSM as well, only on the MDT so far */ -#define SWAP_LAYOUTS_MDS_HSM (1 << 31) -struct lustre_swap_layouts { - __u64 sl_flags; - __u32 sl_fd; - __u32 sl_gid; - __u64 sl_dv1; - __u64 sl_dv2; -}; - -/********* Changelogs **********/ -/** Changelog record types */ -enum changelog_rec_type { - CL_MARK = 0, - CL_CREATE = 1, /* namespace */ - CL_MKDIR = 2, /* namespace */ - CL_HARDLINK = 3, /* namespace */ - CL_SOFTLINK = 4, /* namespace */ - CL_MKNOD = 5, /* namespace */ - CL_UNLINK = 6, /* namespace */ - CL_RMDIR = 7, /* namespace */ - CL_RENAME = 8, /* namespace */ - CL_EXT = 9, /* namespace extended record (2nd half of rename) */ - CL_OPEN = 10, /* not currently used */ - CL_CLOSE = 11, /* may be written to log only with mtime change */ - CL_LAYOUT = 12, /* file layout/striping modified */ - CL_TRUNC = 13, - CL_SETATTR = 14, - CL_XATTR = 15, - CL_HSM = 16, /* HSM specific events, see flags */ - CL_MTIME = 17, /* Precedence: setattr > mtime > ctime > atime */ - CL_CTIME = 18, - CL_ATIME = 19, - CL_LAST -}; - -static inline const char *changelog_type2str(int type) -{ - static const char *changelog_str[] = { - "MARK", "CREAT", "MKDIR", "HLINK", "SLINK", "MKNOD", "UNLNK", - "RMDIR", "RENME", "RNMTO", "OPEN", "CLOSE", "LYOUT", "TRUNC", - "SATTR", "XATTR", "HSM", "MTIME", "CTIME", "ATIME", - }; - - if (type >= 0 && type < CL_LAST) - return changelog_str[type]; - return NULL; -} - -/* per-record flags */ -#define CLF_FLAGSHIFT 12 -#define CLF_FLAGMASK ((1U << CLF_FLAGSHIFT) - 1) -#define CLF_VERMASK (~CLF_FLAGMASK) -enum changelog_rec_flags { - CLF_VERSION = 0x1000, - CLF_RENAME = 0x2000, - CLF_JOBID = 0x4000, - CLF_SUPPORTED = CLF_VERSION | CLF_RENAME | CLF_JOBID -}; - -/* Anything under the flagmask may be per-type (if desired) */ -/* Flags for unlink */ -#define CLF_UNLINK_LAST 0x0001 /* Unlink of last hardlink */ -#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */ - /* HSM cleaning needed */ -/* Flags for rename */ -#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of - * target - */ -#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target - * has an archive in backend - */ - -/* Flags for HSM */ -/* 12b used (from high weight to low weight): - * 2b for flags - * 3b for event - * 7b for error code - */ -#define CLF_HSM_ERR_L 0 /* HSM return code, 7 bits */ -#define CLF_HSM_ERR_H 6 -#define CLF_HSM_EVENT_L 7 /* HSM event, 3 bits, see enum hsm_event */ -#define CLF_HSM_EVENT_H 9 -#define CLF_HSM_FLAG_L 10 /* HSM flags, 2 bits, 1 used, 1 spare */ -#define CLF_HSM_FLAG_H 11 -#define CLF_HSM_SPARE_L 12 /* 4 spare bits */ -#define CLF_HSM_SPARE_H 15 -#define CLF_HSM_LAST 15 - -/* Remove bits higher than _h, then extract the value - * between _h and _l by shifting lower weigth to bit 0. - */ -#define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \ - >> (CLF_HSM_LAST - _h + _l)) - -#define CLF_HSM_SUCCESS 0x00 -#define CLF_HSM_MAXERROR 0x7E -#define CLF_HSM_ERROVERFLOW 0x7F - -#define CLF_HSM_DIRTY 1 /* file is dirty after HSM request end */ - -/* 3 bits field => 8 values allowed */ -enum hsm_event { - HE_ARCHIVE = 0, - HE_RESTORE = 1, - HE_CANCEL = 2, - HE_RELEASE = 3, - HE_REMOVE = 4, - HE_STATE = 5, - HE_SPARE1 = 6, - HE_SPARE2 = 7, -}; - -static inline enum hsm_event hsm_get_cl_event(__u16 flags) -{ - return CLF_GET_BITS(flags, CLF_HSM_EVENT_H, CLF_HSM_EVENT_L); -} - -static inline void hsm_set_cl_event(int *flags, enum hsm_event he) -{ - *flags |= (he << CLF_HSM_EVENT_L); -} - -static inline __u16 hsm_get_cl_flags(int flags) -{ - return CLF_GET_BITS(flags, CLF_HSM_FLAG_H, CLF_HSM_FLAG_L); -} - -static inline void hsm_set_cl_flags(int *flags, int bits) -{ - *flags |= (bits << CLF_HSM_FLAG_L); -} - -static inline int hsm_get_cl_error(int flags) -{ - return CLF_GET_BITS(flags, CLF_HSM_ERR_H, CLF_HSM_ERR_L); -} - -static inline void hsm_set_cl_error(int *flags, int error) -{ - *flags |= (error << CLF_HSM_ERR_L); -} - -enum changelog_send_flag { - /* Not yet implemented */ - CHANGELOG_FLAG_FOLLOW = 0x01, - /* - * Blocking IO makes sense in case of slow user parsing of the records, - * but it also prevents us from cleaning up if the records are not - * consumed. - */ - CHANGELOG_FLAG_BLOCK = 0x02, - /* Pack jobid into the changelog records if available. */ - CHANGELOG_FLAG_JOBID = 0x04, -}; - -#define CR_MAXSIZE cfs_size_round(2 * NAME_MAX + 2 + \ - changelog_rec_offset(CLF_SUPPORTED)) - -/* 31 usable bytes string + null terminator. */ -#define LUSTRE_JOBID_SIZE 32 - -/* - * This is the minimal changelog record. It can contain extensions - * such as rename fields or process jobid. Its exact content is described - * by the cr_flags. - * - * Extensions are packed in the same order as their corresponding flags. - */ -struct changelog_rec { - __u16 cr_namelen; - __u16 cr_flags; /**< \a changelog_rec_flags */ - __u32 cr_type; /**< \a changelog_rec_type */ - __u64 cr_index; /**< changelog record number */ - __u64 cr_prev; /**< last index for this target fid */ - __u64 cr_time; - union { - struct lu_fid cr_tfid; /**< target fid */ - __u32 cr_markerflags; /**< CL_MARK flags */ - }; - struct lu_fid cr_pfid; /**< parent fid */ -} __packed; - -/* Changelog extension for RENAME. */ -struct changelog_ext_rename { - struct lu_fid cr_sfid; /**< source fid, or zero */ - struct lu_fid cr_spfid; /**< source parent fid, or zero */ -}; - -/* Changelog extension to include JOBID. */ -struct changelog_ext_jobid { - char cr_jobid[LUSTRE_JOBID_SIZE]; /**< zero-terminated string. */ -}; - -static inline size_t changelog_rec_offset(enum changelog_rec_flags crf) -{ - size_t size = sizeof(struct changelog_rec); - - if (crf & CLF_RENAME) - size += sizeof(struct changelog_ext_rename); - - if (crf & CLF_JOBID) - size += sizeof(struct changelog_ext_jobid); - - return size; -} - -static inline size_t changelog_rec_size(struct changelog_rec *rec) -{ - return changelog_rec_offset(rec->cr_flags); -} - -static inline size_t changelog_rec_varsize(struct changelog_rec *rec) -{ - return changelog_rec_size(rec) - sizeof(*rec) + rec->cr_namelen; -} - -static inline -struct changelog_ext_rename *changelog_rec_rename(struct changelog_rec *rec) -{ - enum changelog_rec_flags crf = rec->cr_flags & CLF_VERSION; - - return (struct changelog_ext_rename *)((char *)rec + - changelog_rec_offset(crf)); -} - -/* The jobid follows the rename extension, if present */ -static inline -struct changelog_ext_jobid *changelog_rec_jobid(struct changelog_rec *rec) -{ - enum changelog_rec_flags crf = rec->cr_flags & - (CLF_VERSION | CLF_RENAME); - - return (struct changelog_ext_jobid *)((char *)rec + - changelog_rec_offset(crf)); -} - -/* The name follows the rename and jobid extensions, if present */ -static inline char *changelog_rec_name(struct changelog_rec *rec) -{ - return (char *)rec + changelog_rec_offset(rec->cr_flags & - CLF_SUPPORTED); -} - -static inline size_t changelog_rec_snamelen(struct changelog_rec *rec) -{ - return rec->cr_namelen - strlen(changelog_rec_name(rec)) - 1; -} - -static inline char *changelog_rec_sname(struct changelog_rec *rec) -{ - char *cr_name = changelog_rec_name(rec); - - return cr_name + strlen(cr_name) + 1; -} - -/** - * Remap a record to the desired format as specified by the crf flags. - * The record must be big enough to contain the final remapped version. - * Superfluous extension fields are removed and missing ones are added - * and zeroed. The flags of the record are updated accordingly. - * - * The jobid and rename extensions can be added to a record, to match the - * format an application expects, typically. In this case, the newly added - * fields will be zeroed. - * The Jobid field can be removed, to guarantee compatibility with older - * clients that don't expect this field in the records they process. - * - * The following assumptions are being made: - * - CLF_RENAME will not be removed - * - CLF_JOBID will not be added without CLF_RENAME being added too - * - * @param[in,out] rec The record to remap. - * @param[in] crf_wanted Flags describing the desired extensions. - */ -static inline void changelog_remap_rec(struct changelog_rec *rec, - enum changelog_rec_flags crf_wanted) -{ - char *jid_mov, *rnm_mov; - - crf_wanted &= CLF_SUPPORTED; - - if ((rec->cr_flags & CLF_SUPPORTED) == crf_wanted) - return; - - /* First move the variable-length name field */ - memmove((char *)rec + changelog_rec_offset(crf_wanted), - changelog_rec_name(rec), rec->cr_namelen); - - /* Locations of jobid and rename extensions in the remapped record */ - jid_mov = (char *)rec + - changelog_rec_offset(crf_wanted & ~CLF_JOBID); - rnm_mov = (char *)rec + - changelog_rec_offset(crf_wanted & ~(CLF_JOBID | CLF_RENAME)); - - /* Move the extension fields to the desired positions */ - if ((crf_wanted & CLF_JOBID) && (rec->cr_flags & CLF_JOBID)) - memmove(jid_mov, changelog_rec_jobid(rec), - sizeof(struct changelog_ext_jobid)); - - if ((crf_wanted & CLF_RENAME) && (rec->cr_flags & CLF_RENAME)) - memmove(rnm_mov, changelog_rec_rename(rec), - sizeof(struct changelog_ext_rename)); - - /* Clear newly added fields */ - if ((crf_wanted & CLF_JOBID) && !(rec->cr_flags & CLF_JOBID)) - memset(jid_mov, 0, sizeof(struct changelog_ext_jobid)); - - if ((crf_wanted & CLF_RENAME) && !(rec->cr_flags & CLF_RENAME)) - memset(rnm_mov, 0, sizeof(struct changelog_ext_rename)); - - /* Update the record's flags accordingly */ - rec->cr_flags = (rec->cr_flags & CLF_FLAGMASK) | crf_wanted; -} - -struct ioc_changelog { - __u64 icc_recno; - __u32 icc_mdtindex; - __u32 icc_id; - __u32 icc_flags; -}; - -enum changelog_message_type { - CL_RECORD = 10, /* message is a changelog_rec */ - CL_EOF = 11, /* at end of current changelog */ -}; - -/********* Misc **********/ - -struct ioc_data_version { - __u64 idv_version; - __u64 idv_flags; /* See LL_DV_xxx */ -}; - -#define LL_DV_RD_FLUSH (1 << 0) /* Flush dirty pages from clients */ -#define LL_DV_WR_FLUSH (1 << 1) /* Flush all caching pages from clients */ - -#ifndef offsetof -# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) -#endif - -#define dot_lustre_name ".lustre" - -/********* HSM **********/ - -/** HSM per-file state - * See HSM_FLAGS below. - */ -enum hsm_states { - HS_NONE = 0x00000000, - HS_EXISTS = 0x00000001, - HS_DIRTY = 0x00000002, - HS_RELEASED = 0x00000004, - HS_ARCHIVED = 0x00000008, - HS_NORELEASE = 0x00000010, - HS_NOARCHIVE = 0x00000020, - HS_LOST = 0x00000040, -}; - -/* HSM user-setable flags. */ -#define HSM_USER_MASK (HS_NORELEASE | HS_NOARCHIVE | HS_DIRTY) - -/* Other HSM flags. */ -#define HSM_STATUS_MASK (HS_EXISTS | HS_LOST | HS_RELEASED | HS_ARCHIVED) - -/* - * All HSM-related possible flags that could be applied to a file. - * This should be kept in sync with hsm_states. - */ -#define HSM_FLAGS_MASK (HSM_USER_MASK | HSM_STATUS_MASK) - -/** - * HSM request progress state - */ -enum hsm_progress_states { - HPS_WAITING = 1, - HPS_RUNNING = 2, - HPS_DONE = 3, -}; - -#define HPS_NONE 0 - -static inline char *hsm_progress_state2name(enum hsm_progress_states s) -{ - switch (s) { - case HPS_WAITING: return "waiting"; - case HPS_RUNNING: return "running"; - case HPS_DONE: return "done"; - default: return "unknown"; - } -} - -struct hsm_extent { - __u64 offset; - __u64 length; -} __packed; - -/** - * Current HSM states of a Lustre file. - * - * This structure purpose is to be sent to user-space mainly. It describes the - * current HSM flags and in-progress action. - */ -struct hsm_user_state { - /** Current HSM states, from enum hsm_states. */ - __u32 hus_states; - __u32 hus_archive_id; - /** The current undergoing action, if there is one */ - __u32 hus_in_progress_state; - __u32 hus_in_progress_action; - struct hsm_extent hus_in_progress_location; - char hus_extended_info[]; -}; - -struct hsm_state_set_ioc { - struct lu_fid hssi_fid; - __u64 hssi_setmask; - __u64 hssi_clearmask; -}; - -/* - * This structure describes the current in-progress action for a file. - * it is returned to user space and send over the wire - */ -struct hsm_current_action { - /** The current undergoing action, if there is one */ - /* state is one of hsm_progress_states */ - __u32 hca_state; - /* action is one of hsm_user_action */ - __u32 hca_action; - struct hsm_extent hca_location; -}; - -/***** HSM user requests ******/ -/* User-generated (lfs/ioctl) request types */ -enum hsm_user_action { - HUA_NONE = 1, /* no action (noop) */ - HUA_ARCHIVE = 10, /* copy to hsm */ - HUA_RESTORE = 11, /* prestage */ - HUA_RELEASE = 12, /* drop ost objects */ - HUA_REMOVE = 13, /* remove from archive */ - HUA_CANCEL = 14 /* cancel a request */ -}; - -static inline char *hsm_user_action2name(enum hsm_user_action a) -{ - switch (a) { - case HUA_NONE: return "NOOP"; - case HUA_ARCHIVE: return "ARCHIVE"; - case HUA_RESTORE: return "RESTORE"; - case HUA_RELEASE: return "RELEASE"; - case HUA_REMOVE: return "REMOVE"; - case HUA_CANCEL: return "CANCEL"; - default: return "UNKNOWN"; - } -} - -/* - * List of hr_flags (bit field) - */ -#define HSM_FORCE_ACTION 0x0001 -/* used by CT, connot be set by user */ -#define HSM_GHOST_COPY 0x0002 - -/** - * Contains all the fixed part of struct hsm_user_request. - * - */ -struct hsm_request { - __u32 hr_action; /* enum hsm_user_action */ - __u32 hr_archive_id; /* archive id, used only with HUA_ARCHIVE */ - __u64 hr_flags; /* request flags */ - __u32 hr_itemcount; /* item count in hur_user_item vector */ - __u32 hr_data_len; -}; - -struct hsm_user_item { - struct lu_fid hui_fid; - struct hsm_extent hui_extent; -} __packed; - -struct hsm_user_request { - struct hsm_request hur_request; - struct hsm_user_item hur_user_item[0]; - /* extra data blob at end of struct (after all - * hur_user_items), only use helpers to access it - */ -} __packed; - -/** Return pointer to data field in a hsm user request */ -static inline void *hur_data(struct hsm_user_request *hur) -{ - return &hur->hur_user_item[hur->hur_request.hr_itemcount]; -} - -/** - * Compute the current length of the provided hsm_user_request. This returns -1 - * instead of an errno because ssize_t is defined to be only [ -1, SSIZE_MAX ] - * - * return -1 on bounds check error. - */ -static inline ssize_t hur_len(struct hsm_user_request *hur) -{ - __u64 size; - - /* can't overflow a __u64 since hr_itemcount is only __u32 */ - size = offsetof(struct hsm_user_request, hur_user_item[0]) + - (__u64)hur->hur_request.hr_itemcount * - sizeof(hur->hur_user_item[0]) + hur->hur_request.hr_data_len; - - if (size != (ssize_t)size) - return -1; - - return size; -} - -/****** HSM RPCs to copytool *****/ -/* Message types the copytool may receive */ -enum hsm_message_type { - HMT_ACTION_LIST = 100, /* message is a hsm_action_list */ -}; - -/* Actions the copytool may be instructed to take for a given action_item */ -enum hsm_copytool_action { - HSMA_NONE = 10, /* no action */ - HSMA_ARCHIVE = 20, /* arbitrary offset */ - HSMA_RESTORE = 21, - HSMA_REMOVE = 22, - HSMA_CANCEL = 23 -}; - -static inline char *hsm_copytool_action2name(enum hsm_copytool_action a) -{ - switch (a) { - case HSMA_NONE: return "NOOP"; - case HSMA_ARCHIVE: return "ARCHIVE"; - case HSMA_RESTORE: return "RESTORE"; - case HSMA_REMOVE: return "REMOVE"; - case HSMA_CANCEL: return "CANCEL"; - default: return "UNKNOWN"; - } -} - -/* Copytool item action description */ -struct hsm_action_item { - __u32 hai_len; /* valid size of this struct */ - __u32 hai_action; /* hsm_copytool_action, but use known size */ - struct lu_fid hai_fid; /* Lustre FID to operated on */ - struct lu_fid hai_dfid; /* fid used for data access */ - struct hsm_extent hai_extent; /* byte range to operate on */ - __u64 hai_cookie; /* action cookie from coordinator */ - __u64 hai_gid; /* grouplock id */ - char hai_data[0]; /* variable length */ -} __packed; - -/* - * helper function which print in hexa the first bytes of - * hai opaque field - * \param hai [IN] record to print - * \param buffer [OUT] output buffer - * \param len [IN] max buffer len - * \retval buffer - */ -static inline char *hai_dump_data_field(struct hsm_action_item *hai, - char *buffer, size_t len) -{ - int i, data_len; - char *ptr; - - ptr = buffer; - data_len = hai->hai_len - sizeof(*hai); - for (i = 0; (i < data_len) && (len > 2); i++) { - snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]); - ptr += 2; - len -= 2; - } - - *ptr = '\0'; - - return buffer; -} - -/* Copytool action list */ -#define HAL_VERSION 1 -#define HAL_MAXSIZE LNET_MTU /* bytes, used in userspace only */ -struct hsm_action_list { - __u32 hal_version; - __u32 hal_count; /* number of hai's to follow */ - __u64 hal_compound_id; /* returned by coordinator */ - __u64 hal_flags; - __u32 hal_archive_id; /* which archive backend */ - __u32 padding1; - char hal_fsname[0]; /* null-terminated */ - /* struct hsm_action_item[hal_count] follows, aligned on 8-byte - * boundaries. See hai_first - */ -} __packed; - -#ifndef HAVE_CFS_SIZE_ROUND -static inline int cfs_size_round(int val) -{ - return (val + 7) & (~0x7); -} - -#define HAVE_CFS_SIZE_ROUND -#endif - -/* Return pointer to first hai in action list */ -static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal) -{ - return (struct hsm_action_item *)(hal->hal_fsname + - cfs_size_round(strlen(hal-> \ - hal_fsname) - + 1)); -} - -/* Return pointer to next hai */ -static inline struct hsm_action_item *hai_next(struct hsm_action_item *hai) -{ - return (struct hsm_action_item *)((char *)hai + - cfs_size_round(hai->hai_len)); -} - -/* Return size of an hsm_action_list */ -static inline int hal_size(struct hsm_action_list *hal) -{ - int i, sz; - struct hsm_action_item *hai; - - sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1); - hai = hai_first(hal); - for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai)) - sz += cfs_size_round(hai->hai_len); - - return sz; -} - -/* HSM file import - * describe the attributes to be set on imported file - */ -struct hsm_user_import { - __u64 hui_size; - __u64 hui_atime; - __u64 hui_mtime; - __u32 hui_atime_ns; - __u32 hui_mtime_ns; - __u32 hui_uid; - __u32 hui_gid; - __u32 hui_mode; - __u32 hui_archive_id; -}; - -/* Copytool progress reporting */ -#define HP_FLAG_COMPLETED 0x01 -#define HP_FLAG_RETRY 0x02 - -struct hsm_progress { - struct lu_fid hp_fid; - __u64 hp_cookie; - struct hsm_extent hp_extent; - __u16 hp_flags; - __u16 hp_errval; /* positive val */ - __u32 padding; -}; - -struct hsm_copy { - __u64 hc_data_version; - __u16 hc_flags; - __u16 hc_errval; /* positive val */ - __u32 padding; - struct hsm_action_item hc_hai; -}; - -/** @} lustreuser */ - -#endif /* _LUSTRE_USER_H */ diff --git a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h b/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h deleted file mode 100644 index 19c9135e2273..000000000000 --- a/drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef _LUSTRE_VER_H_ -#define _LUSTRE_VER_H_ - -#define LUSTRE_MAJOR 2 -#define LUSTRE_MINOR 6 -#define LUSTRE_PATCH 99 -#define LUSTRE_FIX 0 -#define LUSTRE_VERSION_STRING "2.6.99" - -#define OBD_OCD_VERSION(major, minor, patch, fix) \ - (((major) << 24) + ((minor) << 16) + ((patch) << 8) + (fix)) - -#define OBD_OCD_VERSION_MAJOR(version) ((int)((version) >> 24) & 255) -#define OBD_OCD_VERSION_MINOR(version) ((int)((version) >> 16) & 255) -#define OBD_OCD_VERSION_PATCH(version) ((int)((version) >> 8) & 255) -#define OBD_OCD_VERSION_FIX(version) ((int)((version) >> 0) & 255) - -#define LUSTRE_VERSION_CODE \ - OBD_OCD_VERSION(LUSTRE_MAJOR, LUSTRE_MINOR, LUSTRE_PATCH, LUSTRE_FIX) - -/* - * If lustre version of client and servers it connects to differs by more - * than this amount, client would issue a warning. - */ -#define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0) - -#endif diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig deleted file mode 100644 index ad049e6f24e4..000000000000 --- a/drivers/staging/lustre/lnet/Kconfig +++ /dev/null @@ -1,46 +0,0 @@ -config LNET - tristate "Lustre networking subsystem (LNet)" - depends on INET - help - The Lustre network layer, also known as LNet, is a networking abstaction - level API that was initially created to allow Lustre Filesystem to utilize - very different networks like tcp and ib verbs in a uniform way. In the - case of Lustre routers only the LNet layer is required. Lately other - projects are also looking into using LNet as their networking API as well. - -config LNET_MAX_PAYLOAD - int "Lustre lnet max transfer payload (default 1MB)" - depends on LNET - default "1048576" - help - This option defines the maximum size of payload in bytes that lnet - can put into its transport. - - If unsure, use default. - -config LNET_SELFTEST - tristate "Lustre networking self testing" - depends on LNET - help - Choose Y here if you want to do lnet self testing. To compile this - as a module, choose M here: the module will be called lnet_selftest. - - To compile this as a kernel modules, choose M here and it will be - called lnet_selftest. - - If unsure, say N. - - See also http://wiki.lustre.org/ - -config LNET_XPRT_IB - tristate "LNET infiniband support" - depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS - default LNET && INFINIBAND - help - This option allows the LNET users to use infiniband as an - RDMA-enabled transport. - - To compile this as a kernel module, choose M here and it will be - called ko2iblnd. - - If unsure, say N. diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile deleted file mode 100644 index 0a380fe88ce8..000000000000 --- a/drivers/staging/lustre/lnet/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_LNET) += libcfs/ lnet/ klnds/ selftest/ diff --git a/drivers/staging/lustre/lnet/klnds/Makefile b/drivers/staging/lustre/lnet/klnds/Makefile deleted file mode 100644 index c23e4f67f837..000000000000 --- a/drivers/staging/lustre/lnet/klnds/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_LNET) += o2iblnd/ socklnd/ diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile deleted file mode 100644 index 4affe1d79948..000000000000 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LNET_XPRT_IB) += ko2iblnd.o -ko2iblnd-y := o2iblnd.o o2iblnd_cb.o o2iblnd_modparams.o diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c deleted file mode 100644 index f0b4eb42bc1d..000000000000 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ /dev/null @@ -1,2958 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/klnds/o2iblnd/o2iblnd.c - * - * Author: Eric Barton - */ - -#include -#include -#include "o2iblnd.h" - -static struct lnet_lnd the_o2iblnd; - -struct kib_data kiblnd_data; - -static __u32 kiblnd_cksum(void *ptr, int nob) -{ - char *c = ptr; - __u32 sum = 0; - - while (nob-- > 0) - sum = ((sum << 1) | (sum >> 31)) + *c++; - - /* ensure I don't return 0 (== no checksum) */ - return !sum ? 1 : sum; -} - -static char *kiblnd_msgtype2str(int type) -{ - switch (type) { - case IBLND_MSG_CONNREQ: - return "CONNREQ"; - - case IBLND_MSG_CONNACK: - return "CONNACK"; - - case IBLND_MSG_NOOP: - return "NOOP"; - - case IBLND_MSG_IMMEDIATE: - return "IMMEDIATE"; - - case IBLND_MSG_PUT_REQ: - return "PUT_REQ"; - - case IBLND_MSG_PUT_NAK: - return "PUT_NAK"; - - case IBLND_MSG_PUT_ACK: - return "PUT_ACK"; - - case IBLND_MSG_PUT_DONE: - return "PUT_DONE"; - - case IBLND_MSG_GET_REQ: - return "GET_REQ"; - - case IBLND_MSG_GET_DONE: - return "GET_DONE"; - - default: - return "???"; - } -} - -static int kiblnd_msgtype2size(int type) -{ - const int hdr_size = offsetof(struct kib_msg, ibm_u); - - switch (type) { - case IBLND_MSG_CONNREQ: - case IBLND_MSG_CONNACK: - return hdr_size + sizeof(struct kib_connparams); - - case IBLND_MSG_NOOP: - return hdr_size; - - case IBLND_MSG_IMMEDIATE: - return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]); - - case IBLND_MSG_PUT_REQ: - return hdr_size + sizeof(struct kib_putreq_msg); - - case IBLND_MSG_PUT_ACK: - return hdr_size + sizeof(struct kib_putack_msg); - - case IBLND_MSG_GET_REQ: - return hdr_size + sizeof(struct kib_get_msg); - - case IBLND_MSG_PUT_NAK: - case IBLND_MSG_PUT_DONE: - case IBLND_MSG_GET_DONE: - return hdr_size + sizeof(struct kib_completion_msg); - default: - return -1; - } -} - -static int kiblnd_unpack_rd(struct kib_msg *msg, int flip) -{ - struct kib_rdma_desc *rd; - int msg_size; - int nob; - int n; - int i; - - LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || - msg->ibm_type == IBLND_MSG_PUT_ACK); - - rd = msg->ibm_type == IBLND_MSG_GET_REQ ? - &msg->ibm_u.get.ibgm_rd : - &msg->ibm_u.putack.ibpam_rd; - - if (flip) { - __swab32s(&rd->rd_key); - __swab32s(&rd->rd_nfrags); - } - - n = rd->rd_nfrags; - - nob = offsetof(struct kib_msg, ibm_u) + - kiblnd_rd_msg_size(rd, msg->ibm_type, n); - - if (msg->ibm_nob < nob) { - CERROR("Short %s: %d(%d)\n", - kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob); - return 1; - } - - msg_size = kiblnd_rd_size(rd); - if (msg_size <= 0 || msg_size > LNET_MAX_PAYLOAD) { - CERROR("Bad msg_size: %d, should be 0 < n <= %d\n", - msg_size, LNET_MAX_PAYLOAD); - return 1; - } - - if (!flip) - return 0; - - for (i = 0; i < n; i++) { - __swab32s(&rd->rd_frags[i].rf_nob); - __swab64s(&rd->rd_frags[i].rf_addr); - } - - return 0; -} - -void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version, - int credits, lnet_nid_t dstnid, __u64 dststamp) -{ - struct kib_net *net = ni->ni_data; - - /* - * CAVEAT EMPTOR! all message fields not set here should have been - * initialised previously. - */ - msg->ibm_magic = IBLND_MSG_MAGIC; - msg->ibm_version = version; - /* ibm_type */ - msg->ibm_credits = credits; - /* ibm_nob */ - msg->ibm_cksum = 0; - msg->ibm_srcnid = ni->ni_nid; - msg->ibm_srcstamp = net->ibn_incarnation; - msg->ibm_dstnid = dstnid; - msg->ibm_dststamp = dststamp; - - if (*kiblnd_tunables.kib_cksum) { - /* NB ibm_cksum zero while computing cksum */ - msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob); - } -} - -int kiblnd_unpack_msg(struct kib_msg *msg, int nob) -{ - const int hdr_size = offsetof(struct kib_msg, ibm_u); - __u32 msg_cksum; - __u16 version; - int msg_nob; - int flip; - - /* 6 bytes are enough to have received magic + version */ - if (nob < 6) { - CERROR("Short message: %d\n", nob); - return -EPROTO; - } - - if (msg->ibm_magic == IBLND_MSG_MAGIC) { - flip = 0; - } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) { - flip = 1; - } else { - CERROR("Bad magic: %08x\n", msg->ibm_magic); - return -EPROTO; - } - - version = flip ? __swab16(msg->ibm_version) : msg->ibm_version; - if (version != IBLND_MSG_VERSION && - version != IBLND_MSG_VERSION_1) { - CERROR("Bad version: %x\n", version); - return -EPROTO; - } - - if (nob < hdr_size) { - CERROR("Short message: %d\n", nob); - return -EPROTO; - } - - msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob; - if (msg_nob > nob) { - CERROR("Short message: got %d, wanted %d\n", nob, msg_nob); - return -EPROTO; - } - - /* - * checksum must be computed with ibm_cksum zero and BEFORE anything - * gets flipped - */ - msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; - msg->ibm_cksum = 0; - if (msg_cksum && - msg_cksum != kiblnd_cksum(msg, msg_nob)) { - CERROR("Bad checksum\n"); - return -EPROTO; - } - - msg->ibm_cksum = msg_cksum; - - if (flip) { - /* leave magic unflipped as a clue to peer endianness */ - msg->ibm_version = version; - BUILD_BUG_ON(sizeof(msg->ibm_type) != 1); - BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1); - msg->ibm_nob = msg_nob; - __swab64s(&msg->ibm_srcnid); - __swab64s(&msg->ibm_srcstamp); - __swab64s(&msg->ibm_dstnid); - __swab64s(&msg->ibm_dststamp); - } - - if (msg->ibm_srcnid == LNET_NID_ANY) { - CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid)); - return -EPROTO; - } - - if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) { - CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type), - msg_nob, kiblnd_msgtype2size(msg->ibm_type)); - return -EPROTO; - } - - switch (msg->ibm_type) { - default: - CERROR("Unknown message type %x\n", msg->ibm_type); - return -EPROTO; - - case IBLND_MSG_NOOP: - case IBLND_MSG_IMMEDIATE: - case IBLND_MSG_PUT_REQ: - break; - - case IBLND_MSG_PUT_ACK: - case IBLND_MSG_GET_REQ: - if (kiblnd_unpack_rd(msg, flip)) - return -EPROTO; - break; - - case IBLND_MSG_PUT_NAK: - case IBLND_MSG_PUT_DONE: - case IBLND_MSG_GET_DONE: - if (flip) - __swab32s(&msg->ibm_u.completion.ibcm_status); - break; - - case IBLND_MSG_CONNREQ: - case IBLND_MSG_CONNACK: - if (flip) { - __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth); - __swab16s(&msg->ibm_u.connparams.ibcp_max_frags); - __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size); - } - break; - } - return 0; -} - -int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp, - lnet_nid_t nid) -{ - struct kib_peer *peer; - struct kib_net *net = ni->ni_data; - int cpt = lnet_cpt_of_nid(nid); - unsigned long flags; - - LASSERT(net); - LASSERT(nid != LNET_NID_ANY); - - peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt); - if (!peer) { - CERROR("Cannot allocate peer\n"); - return -ENOMEM; - } - - peer->ibp_ni = ni; - peer->ibp_nid = nid; - peer->ibp_error = 0; - peer->ibp_last_alive = 0; - peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni); - peer->ibp_queue_depth = ni->ni_peertxcredits; - atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ - - INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ - INIT_LIST_HEAD(&peer->ibp_conns); - INIT_LIST_HEAD(&peer->ibp_tx_queue); - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(!net->ibn_shutdown); - - /* npeers only grows with the global lock held */ - atomic_inc(&net->ibn_npeers); - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - *peerp = peer; - return 0; -} - -void kiblnd_destroy_peer(struct kib_peer *peer) -{ - struct kib_net *net = peer->ibp_ni->ni_data; - - LASSERT(net); - LASSERT(!atomic_read(&peer->ibp_refcount)); - LASSERT(!kiblnd_peer_active(peer)); - LASSERT(kiblnd_peer_idle(peer)); - LASSERT(list_empty(&peer->ibp_tx_queue)); - - kfree(peer); - - /* - * NB a peer's connections keep a reference on their peer until - * they are destroyed, so we can be assured that _all_ state to do - * with this peer has been cleaned up when its refcount drops to - * zero. - */ - atomic_dec(&net->ibn_npeers); -} - -struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid) -{ - /* - * the caller is responsible for accounting the additional reference - * that this creates - */ - struct list_head *peer_list = kiblnd_nid2peerlist(nid); - struct list_head *tmp; - struct kib_peer *peer; - - list_for_each(tmp, peer_list) { - peer = list_entry(tmp, struct kib_peer, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); - - if (peer->ibp_nid != nid) - continue; - - CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n", - peer, libcfs_nid2str(nid), - atomic_read(&peer->ibp_refcount), - peer->ibp_version); - return peer; - } - return NULL; -} - -void kiblnd_unlink_peer_locked(struct kib_peer *peer) -{ - LASSERT(list_empty(&peer->ibp_conns)); - - LASSERT(kiblnd_peer_active(peer)); - list_del_init(&peer->ibp_list); - /* lose peerlist's ref */ - kiblnd_peer_decref(peer); -} - -static int kiblnd_get_peer_info(struct lnet_ni *ni, int index, - lnet_nid_t *nidp, int *count) -{ - struct kib_peer *peer; - struct list_head *ptmp; - int i; - unsigned long flags; - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, struct kib_peer, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); - - if (peer->ibp_ni != ni) - continue; - - if (index-- > 0) - continue; - - *nidp = peer->ibp_nid; - *count = atomic_read(&peer->ibp_refcount); - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - return 0; - } - } - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - return -ENOENT; -} - -static void kiblnd_del_peer_locked(struct kib_peer *peer) -{ - struct list_head *ctmp; - struct list_head *cnxt; - struct kib_conn *conn; - - if (list_empty(&peer->ibp_conns)) { - kiblnd_unlink_peer_locked(peer); - } else { - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, struct kib_conn, ibc_list); - - kiblnd_close_conn_locked(conn, 0); - } - /* NB closing peer's last conn unlinked it. */ - } - /* - * NB peer now unlinked; might even be freed if the peer table had the - * last ref on it. - */ -} - -static int kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid) -{ - LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - struct kib_peer *peer; - int lo; - int hi; - int i; - unsigned long flags; - int rc = -ENOENT; - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - if (nid != LNET_NID_ANY) { - lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - } else { - lo = 0; - hi = kiblnd_data.kib_peer_hash_size - 1; - } - - for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, struct kib_peer, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); - - if (peer->ibp_ni != ni) - continue; - - if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid)) - continue; - - if (!list_empty(&peer->ibp_tx_queue)) { - LASSERT(list_empty(&peer->ibp_conns)); - - list_splice_init(&peer->ibp_tx_queue, - &zombies); - } - - kiblnd_del_peer_locked(peer); - rc = 0; /* matched something */ - } - } - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - kiblnd_txlist_done(ni, &zombies, -EIO); - - return rc; -} - -static struct kib_conn *kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index) -{ - struct kib_peer *peer; - struct list_head *ptmp; - struct kib_conn *conn; - struct list_head *ctmp; - int i; - unsigned long flags; - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, struct kib_peer, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); - - if (peer->ibp_ni != ni) - continue; - - list_for_each(ctmp, &peer->ibp_conns) { - if (index-- > 0) - continue; - - conn = list_entry(ctmp, struct kib_conn, - ibc_list); - kiblnd_conn_addref(conn); - read_unlock_irqrestore( - &kiblnd_data.kib_global_lock, - flags); - return conn; - } - } - } - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - return NULL; -} - -int kiblnd_translate_mtu(int value) -{ - switch (value) { - default: - return -1; - case 0: - return 0; - case 256: - return IB_MTU_256; - case 512: - return IB_MTU_512; - case 1024: - return IB_MTU_1024; - case 2048: - return IB_MTU_2048; - case 4096: - return IB_MTU_4096; - } -} - -static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) -{ - int mtu; - - /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ - if (!cmid->route.path_rec) - return; - - mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); - LASSERT(mtu >= 0); - if (mtu) - cmid->route.path_rec->mtu = mtu; -} - -static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt) -{ - cpumask_var_t *mask; - int vectors; - int off; - int i; - lnet_nid_t nid = conn->ibc_peer->ibp_nid; - - vectors = conn->ibc_cmid->device->num_comp_vectors; - if (vectors <= 1) - return 0; - - mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); - if (!mask) - return 0; - - /* hash NID to CPU id in this partition... */ - off = do_div(nid, cpumask_weight(*mask)); - for_each_cpu(i, *mask) { - if (!off--) - return i % vectors; - } - - LBUG(); - return 1; -} - -struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid, - int state, int version) -{ - /* - * CAVEAT EMPTOR: - * If the new conn is created successfully it takes over the caller's - * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself - * is destroyed. On failure, the caller's ref on 'peer' remains and - * she must dispose of 'cmid'. (Actually I'd block forever if I tried - * to destroy 'cmid' here since I'm called from the CM which still has - * its ref on 'cmid'). - */ - rwlock_t *glock = &kiblnd_data.kib_global_lock; - struct kib_net *net = peer->ibp_ni->ni_data; - struct kib_dev *dev; - struct ib_qp_init_attr *init_qp_attr; - struct kib_sched_info *sched; - struct ib_cq_init_attr cq_attr = {}; - struct kib_conn *conn; - struct ib_cq *cq; - unsigned long flags; - int cpt; - int rc; - int i; - - LASSERT(net); - LASSERT(!in_interrupt()); - - dev = net->ibn_dev; - - cpt = lnet_cpt_of_nid(peer->ibp_nid); - sched = kiblnd_data.kib_scheds[cpt]; - - LASSERT(sched->ibs_nthreads > 0); - - init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt); - if (!init_qp_attr) { - CERROR("Can't allocate qp_attr for %s\n", - libcfs_nid2str(peer->ibp_nid)); - goto failed_0; - } - - conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt); - if (!conn) { - CERROR("Can't allocate connection for %s\n", - libcfs_nid2str(peer->ibp_nid)); - goto failed_1; - } - - conn->ibc_state = IBLND_CONN_INIT; - conn->ibc_version = version; - conn->ibc_peer = peer; /* I take the caller's ref */ - cmid->context = conn; /* for future CM callbacks */ - conn->ibc_cmid = cmid; - conn->ibc_max_frags = peer->ibp_max_frags; - conn->ibc_queue_depth = peer->ibp_queue_depth; - - INIT_LIST_HEAD(&conn->ibc_early_rxs); - INIT_LIST_HEAD(&conn->ibc_tx_noops); - INIT_LIST_HEAD(&conn->ibc_tx_queue); - INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd); - INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred); - INIT_LIST_HEAD(&conn->ibc_active_txs); - spin_lock_init(&conn->ibc_lock); - - conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt); - if (!conn->ibc_connvars) { - CERROR("Can't allocate in-progress connection state\n"); - goto failed_2; - } - - write_lock_irqsave(glock, flags); - if (dev->ibd_failover) { - write_unlock_irqrestore(glock, flags); - CERROR("%s: failover in progress\n", dev->ibd_ifname); - goto failed_2; - } - - if (dev->ibd_hdev->ibh_ibdev != cmid->device) { - /* wakeup failover thread and teardown connection */ - if (kiblnd_dev_can_failover(dev)) { - list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - wake_up(&kiblnd_data.kib_failover_waitq); - } - - write_unlock_irqrestore(glock, flags); - CERROR("cmid HCA(%s), kib_dev(%s) need failover\n", - cmid->device->name, dev->ibd_ifname); - goto failed_2; - } - - kiblnd_hdev_addref_locked(dev->ibd_hdev); - conn->ibc_hdev = dev->ibd_hdev; - - kiblnd_setup_mtu_locked(cmid); - - write_unlock_irqrestore(glock, flags); - - conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx), - GFP_NOFS, cpt); - if (!conn->ibc_rxs) { - CERROR("Cannot allocate RX buffers\n"); - goto failed_2; - } - - rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, - IBLND_RX_MSG_PAGES(conn)); - if (rc) - goto failed_2; - - kiblnd_map_rx_descs(conn); - - cq_attr.cqe = IBLND_CQ_ENTRIES(conn); - cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); - cq = ib_create_cq(cmid->device, - kiblnd_cq_completion, kiblnd_cq_event, conn, - &cq_attr); - if (IS_ERR(cq)) { - CERROR("Failed to create CQ with %d CQEs: %ld\n", - IBLND_CQ_ENTRIES(conn), PTR_ERR(cq)); - goto failed_2; - } - - conn->ibc_cq = cq; - - rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - if (rc) { - CERROR("Can't request completion notification: %d\n", rc); - goto failed_2; - } - - init_qp_attr->event_handler = kiblnd_qp_event; - init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn); - init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn); - init_qp_attr->cap.max_send_sge = 1; - init_qp_attr->cap.max_recv_sge = 1; - init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; - init_qp_attr->qp_type = IB_QPT_RC; - init_qp_attr->send_cq = cq; - init_qp_attr->recv_cq = cq; - - conn->ibc_sched = sched; - - rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); - if (rc) { - CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", - rc, init_qp_attr->cap.max_send_wr, - init_qp_attr->cap.max_recv_wr); - goto failed_2; - } - - kfree(init_qp_attr); - - /* 1 ref for caller and each rxmsg */ - atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn)); - conn->ibc_nrx = IBLND_RX_MSGS(conn); - - /* post receives */ - for (i = 0; i < IBLND_RX_MSGS(conn); i++) { - rc = kiblnd_post_rx(&conn->ibc_rxs[i], - IBLND_POSTRX_NO_CREDIT); - if (rc) { - CERROR("Can't post rxmsg: %d\n", rc); - - /* Make posted receives complete */ - kiblnd_abort_receives(conn); - - /* - * correct # of posted buffers - * NB locking needed now I'm racing with completion - */ - spin_lock_irqsave(&sched->ibs_lock, flags); - conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i; - spin_unlock_irqrestore(&sched->ibs_lock, flags); - - /* - * cmid will be destroyed by CM(ofed) after cm_callback - * returned, so we can't refer it anymore - * (by kiblnd_connd()->kiblnd_destroy_conn) - */ - rdma_destroy_qp(conn->ibc_cmid); - conn->ibc_cmid = NULL; - - /* Drop my own and unused rxbuffer refcounts */ - while (i++ <= IBLND_RX_MSGS(conn)) - kiblnd_conn_decref(conn); - - return NULL; - } - } - - /* Init successful! */ - LASSERT(state == IBLND_CONN_ACTIVE_CONNECT || - state == IBLND_CONN_PASSIVE_WAIT); - conn->ibc_state = state; - - /* 1 more conn */ - atomic_inc(&net->ibn_nconns); - return conn; - - failed_2: - kiblnd_destroy_conn(conn); - kfree(conn); - failed_1: - kfree(init_qp_attr); - failed_0: - return NULL; -} - -void kiblnd_destroy_conn(struct kib_conn *conn) -{ - struct rdma_cm_id *cmid = conn->ibc_cmid; - struct kib_peer *peer = conn->ibc_peer; - int rc; - - LASSERT(!in_interrupt()); - LASSERT(!atomic_read(&conn->ibc_refcount)); - LASSERT(list_empty(&conn->ibc_early_rxs)); - LASSERT(list_empty(&conn->ibc_tx_noops)); - LASSERT(list_empty(&conn->ibc_tx_queue)); - LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd)); - LASSERT(list_empty(&conn->ibc_tx_queue_nocred)); - LASSERT(list_empty(&conn->ibc_active_txs)); - LASSERT(!conn->ibc_noops_posted); - LASSERT(!conn->ibc_nsends_posted); - - switch (conn->ibc_state) { - default: - /* conn must be completely disengaged from the network */ - LBUG(); - - case IBLND_CONN_DISCONNECTED: - /* connvars should have been freed already */ - LASSERT(!conn->ibc_connvars); - break; - - case IBLND_CONN_INIT: - break; - } - - /* conn->ibc_cmid might be destroyed by CM already */ - if (cmid && cmid->qp) - rdma_destroy_qp(cmid); - - if (conn->ibc_cq) { - rc = ib_destroy_cq(conn->ibc_cq); - if (rc) - CWARN("Error destroying CQ: %d\n", rc); - } - - if (conn->ibc_rx_pages) - kiblnd_unmap_rx_descs(conn); - - kfree(conn->ibc_rxs); - kfree(conn->ibc_connvars); - - if (conn->ibc_hdev) - kiblnd_hdev_decref(conn->ibc_hdev); - - /* See CAVEAT EMPTOR above in kiblnd_create_conn */ - if (conn->ibc_state != IBLND_CONN_INIT) { - struct kib_net *net = peer->ibp_ni->ni_data; - - kiblnd_peer_decref(peer); - rdma_destroy_id(cmid); - atomic_dec(&net->ibn_nconns); - } -} - -int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why) -{ - struct kib_conn *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, struct kib_conn, ibc_list); - - CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n", - libcfs_nid2str(peer->ibp_nid), - conn->ibc_version, why); - - kiblnd_close_conn_locked(conn, why); - count++; - } - - return count; -} - -int kiblnd_close_stale_conns_locked(struct kib_peer *peer, - int version, __u64 incarnation) -{ - struct kib_conn *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, struct kib_conn, ibc_list); - - if (conn->ibc_version == version && - conn->ibc_incarnation == incarnation) - continue; - - CDEBUG(D_NET, - "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n", - libcfs_nid2str(peer->ibp_nid), - conn->ibc_version, conn->ibc_incarnation, - version, incarnation); - - kiblnd_close_conn_locked(conn, -ESTALE); - count++; - } - - return count; -} - -static int kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid) -{ - struct kib_peer *peer; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - unsigned long flags; - int count = 0; - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - if (nid != LNET_NID_ANY) { - lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - } else { - lo = 0; - hi = kiblnd_data.kib_peer_hash_size - 1; - } - - for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, struct kib_peer, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); - - if (peer->ibp_ni != ni) - continue; - - if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid)) - continue; - - count += kiblnd_close_peer_conns_locked(peer, 0); - } - } - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - /* wildcards always succeed */ - if (nid == LNET_NID_ANY) - return 0; - - return !count ? -ENOENT : 0; -} - -static int kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg) -{ - struct libcfs_ioctl_data *data = arg; - int rc = -EINVAL; - - switch (cmd) { - case IOC_LIBCFS_GET_PEER: { - lnet_nid_t nid = 0; - int count = 0; - - rc = kiblnd_get_peer_info(ni, data->ioc_count, - &nid, &count); - data->ioc_nid = nid; - data->ioc_count = count; - break; - } - - case IOC_LIBCFS_DEL_PEER: { - rc = kiblnd_del_peer(ni, data->ioc_nid); - break; - } - case IOC_LIBCFS_GET_CONN: { - struct kib_conn *conn; - - rc = 0; - conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); - if (!conn) { - rc = -ENOENT; - break; - } - - LASSERT(conn->ibc_cmid); - data->ioc_nid = conn->ibc_peer->ibp_nid; - if (!conn->ibc_cmid->route.path_rec) - data->ioc_u32[0] = 0; /* iWarp has no path MTU */ - else - data->ioc_u32[0] = - ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu); - kiblnd_conn_decref(conn); - break; - } - case IOC_LIBCFS_CLOSE_CONNECTION: { - rc = kiblnd_close_matching_conns(ni, data->ioc_nid); - break; - } - - default: - break; - } - - return rc; -} - -static void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, - unsigned long *when) -{ - unsigned long last_alive = 0; - unsigned long now = jiffies; - rwlock_t *glock = &kiblnd_data.kib_global_lock; - struct kib_peer *peer; - unsigned long flags; - - read_lock_irqsave(glock, flags); - - peer = kiblnd_find_peer_locked(nid); - if (peer) - last_alive = peer->ibp_last_alive; - - read_unlock_irqrestore(glock, flags); - - if (last_alive) - *when = last_alive; - - /* - * peer is not persistent in hash, trigger peer creation - * and connection establishment with a NULL tx - */ - if (!peer) - kiblnd_launch_tx(ni, NULL, nid); - - CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", - libcfs_nid2str(nid), peer, - last_alive ? (now - last_alive) / HZ : -1); -} - -static void kiblnd_free_pages(struct kib_pages *p) -{ - int npages = p->ibp_npages; - int i; - - for (i = 0; i < npages; i++) { - if (p->ibp_pages[i]) - __free_page(p->ibp_pages[i]); - } - - kfree(p); -} - -int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) -{ - struct kib_pages *p; - int i; - - p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]), - GFP_NOFS, cpt); - if (!p) { - CERROR("Can't allocate descriptor for %d pages\n", npages); - return -ENOMEM; - } - - p->ibp_npages = npages; - - for (i = 0; i < npages; i++) { - p->ibp_pages[i] = alloc_pages_node( - cfs_cpt_spread_node(lnet_cpt_table(), cpt), - GFP_NOFS, 0); - if (!p->ibp_pages[i]) { - CERROR("Can't allocate page %d of %d\n", i, npages); - kiblnd_free_pages(p); - return -ENOMEM; - } - } - - *pp = p; - return 0; -} - -void kiblnd_unmap_rx_descs(struct kib_conn *conn) -{ - struct kib_rx *rx; - int i; - - LASSERT(conn->ibc_rxs); - LASSERT(conn->ibc_hdev); - - for (i = 0; i < IBLND_RX_MSGS(conn); i++) { - rx = &conn->ibc_rxs[i]; - - LASSERT(rx->rx_nob >= 0); /* not posted */ - - kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev, - KIBLND_UNMAP_ADDR(rx, rx_msgunmap, - rx->rx_msgaddr), - IBLND_MSG_SIZE, DMA_FROM_DEVICE); - } - - kiblnd_free_pages(conn->ibc_rx_pages); - - conn->ibc_rx_pages = NULL; -} - -void kiblnd_map_rx_descs(struct kib_conn *conn) -{ - struct kib_rx *rx; - struct page *pg; - int pg_off; - int ipg; - int i; - - for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) { - pg = conn->ibc_rx_pages->ibp_pages[ipg]; - rx = &conn->ibc_rxs[i]; - - rx->rx_conn = conn; - rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off); - - rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev, - rx->rx_msg, - IBLND_MSG_SIZE, - DMA_FROM_DEVICE); - LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, - rx->rx_msgaddr)); - KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); - - CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", - i, rx->rx_msg, rx->rx_msgaddr, - (__u64)(page_to_phys(pg) + pg_off)); - - pg_off += IBLND_MSG_SIZE; - LASSERT(pg_off <= PAGE_SIZE); - - if (pg_off == PAGE_SIZE) { - pg_off = 0; - ipg++; - LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn)); - } - } -} - -static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo) -{ - struct kib_hca_dev *hdev = tpo->tpo_hdev; - struct kib_tx *tx; - int i; - - LASSERT(!tpo->tpo_pool.po_allocated); - - if (!hdev) - return; - - for (i = 0; i < tpo->tpo_pool.po_size; i++) { - tx = &tpo->tpo_tx_descs[i]; - kiblnd_dma_unmap_single(hdev->ibh_ibdev, - KIBLND_UNMAP_ADDR(tx, tx_msgunmap, - tx->tx_msgaddr), - IBLND_MSG_SIZE, DMA_TO_DEVICE); - } - - kiblnd_hdev_decref(hdev); - tpo->tpo_hdev = NULL; -} - -static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev) -{ - struct kib_hca_dev *hdev; - unsigned long flags; - int i = 0; - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (dev->ibd_failover) { - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (!(i++ % 50)) - CDEBUG(D_NET, "%s: Wait for failover\n", - dev->ibd_ifname); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 100); - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - } - - kiblnd_hdev_addref_locked(dev->ibd_hdev); - hdev = dev->ibd_hdev; - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - return hdev; -} - -static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo) -{ - struct kib_pages *txpgs = tpo->tpo_tx_pages; - struct kib_pool *pool = &tpo->tpo_pool; - struct kib_net *net = pool->po_owner->ps_net; - struct kib_dev *dev; - struct page *page; - struct kib_tx *tx; - int page_offset; - int ipage; - int i; - - LASSERT(net); - - dev = net->ibn_dev; - - /* pre-mapped messages are not bigger than 1 page */ - BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE); - - /* No fancy arithmetic when we do the buffer calculations */ - BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE); - - tpo->tpo_hdev = kiblnd_current_hdev(dev); - - for (ipage = page_offset = i = 0; i < pool->po_size; i++) { - page = txpgs->ibp_pages[ipage]; - tx = &tpo->tpo_tx_descs[i]; - - tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) + - page_offset); - - tx->tx_msgaddr = kiblnd_dma_map_single( - tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, - IBLND_MSG_SIZE, DMA_TO_DEVICE); - LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, - tx->tx_msgaddr)); - KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); - - list_add(&tx->tx_list, &pool->po_free_list); - - page_offset += IBLND_MSG_SIZE; - LASSERT(page_offset <= PAGE_SIZE); - - if (page_offset == PAGE_SIZE) { - page_offset = 0; - ipage++; - LASSERT(ipage <= txpgs->ibp_npages); - } - } -} - -static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo) -{ - LASSERT(!fpo->fpo_map_count); - - if (fpo->fpo_is_fmr) { - if (fpo->fmr.fpo_fmr_pool) - ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool); - } else { - struct kib_fast_reg_descriptor *frd, *tmp; - int i = 0; - - list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list, - frd_list) { - list_del(&frd->frd_list); - ib_dereg_mr(frd->frd_mr); - kfree(frd); - i++; - } - if (i < fpo->fast_reg.fpo_pool_size) - CERROR("FastReg pool still has %d regions registered\n", - fpo->fast_reg.fpo_pool_size - i); - } - - if (fpo->fpo_hdev) - kiblnd_hdev_decref(fpo->fpo_hdev); - - kfree(fpo); -} - -static void kiblnd_destroy_fmr_pool_list(struct list_head *head) -{ - struct kib_fmr_pool *fpo, *tmp; - - list_for_each_entry_safe(fpo, tmp, head, fpo_list) { - list_del(&fpo->fpo_list); - kiblnd_destroy_fmr_pool(fpo); - } -} - -static int -kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables, - int ncpts) -{ - int size = tunables->lnd_fmr_pool_size / ncpts; - - return max(IBLND_FMR_POOL, size); -} - -static int -kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables, - int ncpts) -{ - int size = tunables->lnd_fmr_flush_trigger / ncpts; - - return max(IBLND_FMR_POOL_FLUSH, size); -} - -static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo) -{ - struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE, - .page_shift = PAGE_SHIFT, - .access = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE), - .pool_size = fps->fps_pool_size, - .dirty_watermark = fps->fps_flush_trigger, - .flush_function = NULL, - .flush_arg = NULL, - .cache = !!fps->fps_cache }; - int rc = 0; - - fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, - ¶m); - if (IS_ERR(fpo->fmr.fpo_fmr_pool)) { - rc = PTR_ERR(fpo->fmr.fpo_fmr_pool); - if (rc != -ENOSYS) - CERROR("Failed to create FMR pool: %d\n", rc); - else - CERROR("FMRs are not supported\n"); - } - - return rc; -} - -static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo) -{ - struct kib_fast_reg_descriptor *frd, *tmp; - int i, rc; - - INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); - fpo->fast_reg.fpo_pool_size = 0; - for (i = 0; i < fps->fps_pool_size; i++) { - frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt); - if (!frd) { - CERROR("Failed to allocate a new fast_reg descriptor\n"); - rc = -ENOMEM; - goto out; - } - - frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd, - IB_MR_TYPE_MEM_REG, - LNET_MAX_PAYLOAD / PAGE_SIZE); - if (IS_ERR(frd->frd_mr)) { - rc = PTR_ERR(frd->frd_mr); - CERROR("Failed to allocate ib_alloc_mr: %d\n", rc); - frd->frd_mr = NULL; - goto out_middle; - } - - frd->frd_valid = true; - - list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list); - fpo->fast_reg.fpo_pool_size++; - } - - return 0; - -out_middle: - if (frd->frd_mr) - ib_dereg_mr(frd->frd_mr); - kfree(frd); - -out: - list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list, - frd_list) { - list_del(&frd->frd_list); - ib_dereg_mr(frd->frd_mr); - kfree(frd); - } - - return rc; -} - -static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, - struct kib_fmr_pool **pp_fpo) -{ - struct kib_dev *dev = fps->fps_net->ibn_dev; - struct ib_device_attr *dev_attr; - struct kib_fmr_pool *fpo; - int rc; - - fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt); - if (!fpo) - return -ENOMEM; - - fpo->fpo_hdev = kiblnd_current_hdev(dev); - dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs; - - /* Check for FMR or FastReg support */ - fpo->fpo_is_fmr = 0; - if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr && - fpo->fpo_hdev->ibh_ibdev->dealloc_fmr && - fpo->fpo_hdev->ibh_ibdev->map_phys_fmr && - fpo->fpo_hdev->ibh_ibdev->unmap_fmr) { - LCONSOLE_INFO("Using FMR for registration\n"); - fpo->fpo_is_fmr = 1; - } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - LCONSOLE_INFO("Using FastReg for registration\n"); - } else { - rc = -ENOSYS; - LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n"); - goto out_fpo; - } - - if (fpo->fpo_is_fmr) - rc = kiblnd_alloc_fmr_pool(fps, fpo); - else - rc = kiblnd_alloc_freg_pool(fps, fpo); - if (rc) - goto out_fpo; - - fpo->fpo_deadline = jiffies + IBLND_POOL_DEADLINE * HZ; - fpo->fpo_owner = fps; - *pp_fpo = fpo; - - return 0; - -out_fpo: - kiblnd_hdev_decref(fpo->fpo_hdev); - kfree(fpo); - return rc; -} - -static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, - struct list_head *zombies) -{ - if (!fps->fps_net) /* initialized? */ - return; - - spin_lock(&fps->fps_lock); - - while (!list_empty(&fps->fps_pool_list)) { - struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next, - struct kib_fmr_pool, fpo_list); - fpo->fpo_failed = 1; - list_del(&fpo->fpo_list); - if (!fpo->fpo_map_count) - list_add(&fpo->fpo_list, zombies); - else - list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); - } - - spin_unlock(&fps->fps_lock); -} - -static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps) -{ - if (fps->fps_net) { /* initialized? */ - kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); - kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list); - } -} - -static int -kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts, - struct kib_net *net, - struct lnet_ioctl_config_o2iblnd_tunables *tunables) -{ - struct kib_fmr_pool *fpo; - int rc; - - memset(fps, 0, sizeof(*fps)); - - fps->fps_net = net; - fps->fps_cpt = cpt; - - fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts); - fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts); - fps->fps_cache = tunables->lnd_fmr_cache; - - spin_lock_init(&fps->fps_lock); - INIT_LIST_HEAD(&fps->fps_pool_list); - INIT_LIST_HEAD(&fps->fps_failed_pool_list); - - rc = kiblnd_create_fmr_pool(fps, &fpo); - if (!rc) - list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); - - return rc; -} - -static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now) -{ - if (fpo->fpo_map_count) /* still in use */ - return 0; - if (fpo->fpo_failed) - return 1; - return time_after_eq(now, fpo->fpo_deadline); -} - -static int -kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd) -{ - __u64 *pages = tx->tx_pages; - struct kib_hca_dev *hdev; - int npages; - int size; - int i; - - hdev = tx->tx_pool->tpo_hdev; - - for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { - for (size = 0; size < rd->rd_frags[i].rf_nob; - size += hdev->ibh_page_size) { - pages[npages++] = (rd->rd_frags[i].rf_addr & - hdev->ibh_page_mask) + size; - } - } - - return npages; -} - -void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) -{ - LIST_HEAD(zombies); - struct kib_fmr_pool *fpo = fmr->fmr_pool; - struct kib_fmr_poolset *fps; - unsigned long now = jiffies; - struct kib_fmr_pool *tmp; - int rc; - - if (!fpo) - return; - - fps = fpo->fpo_owner; - if (fpo->fpo_is_fmr) { - if (fmr->fmr_pfmr) { - rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT(!rc); - fmr->fmr_pfmr = NULL; - } - - if (status) { - rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool); - LASSERT(!rc); - } - } else { - struct kib_fast_reg_descriptor *frd = fmr->fmr_frd; - - if (frd) { - frd->frd_valid = false; - spin_lock(&fps->fps_lock); - list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list); - spin_unlock(&fps->fps_lock); - fmr->fmr_frd = NULL; - } - } - fmr->fmr_pool = NULL; - - spin_lock(&fps->fps_lock); - fpo->fpo_map_count--; /* decref the pool */ - - list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) { - /* the first pool is persistent */ - if (fps->fps_pool_list.next == &fpo->fpo_list) - continue; - - if (kiblnd_fmr_pool_is_idle(fpo, now)) { - list_move(&fpo->fpo_list, &zombies); - fps->fps_version++; - } - } - spin_unlock(&fps->fps_lock); - - if (!list_empty(&zombies)) - kiblnd_destroy_fmr_pool_list(&zombies); -} - -int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, - struct kib_rdma_desc *rd, __u32 nob, __u64 iov, - struct kib_fmr *fmr) -{ - __u64 *pages = tx->tx_pages; - bool is_rx = (rd != tx->tx_rd); - bool tx_pages_mapped = false; - struct kib_fmr_pool *fpo; - int npages = 0; - __u64 version; - int rc; - - again: - spin_lock(&fps->fps_lock); - version = fps->fps_version; - list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) { - fpo->fpo_deadline = jiffies + IBLND_POOL_DEADLINE * HZ; - fpo->fpo_map_count++; - - if (fpo->fpo_is_fmr) { - struct ib_pool_fmr *pfmr; - - spin_unlock(&fps->fps_lock); - - if (!tx_pages_mapped) { - npages = kiblnd_map_tx_pages(tx, rd); - tx_pages_mapped = 1; - } - - pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool, - pages, npages, iov); - if (likely(!IS_ERR(pfmr))) { - fmr->fmr_key = is_rx ? pfmr->fmr->rkey : - pfmr->fmr->lkey; - fmr->fmr_frd = NULL; - fmr->fmr_pfmr = pfmr; - fmr->fmr_pool = fpo; - return 0; - } - rc = PTR_ERR(pfmr); - } else { - if (!list_empty(&fpo->fast_reg.fpo_pool_list)) { - struct kib_fast_reg_descriptor *frd; - struct ib_reg_wr *wr; - struct ib_mr *mr; - int n; - - frd = list_first_entry(&fpo->fast_reg.fpo_pool_list, - struct kib_fast_reg_descriptor, - frd_list); - list_del(&frd->frd_list); - spin_unlock(&fps->fps_lock); - - mr = frd->frd_mr; - - if (!frd->frd_valid) { - __u32 key = is_rx ? mr->rkey : mr->lkey; - struct ib_send_wr *inv_wr; - - inv_wr = &frd->frd_inv_wr; - memset(inv_wr, 0, sizeof(*inv_wr)); - inv_wr->opcode = IB_WR_LOCAL_INV; - inv_wr->wr_id = IBLND_WID_MR; - inv_wr->ex.invalidate_rkey = key; - - /* Bump the key */ - key = ib_inc_rkey(key); - ib_update_fast_reg_key(mr, key); - } - - n = ib_map_mr_sg(mr, tx->tx_frags, - tx->tx_nfrags, NULL, PAGE_SIZE); - if (unlikely(n != tx->tx_nfrags)) { - CERROR("Failed to map mr %d/%d elements\n", - n, tx->tx_nfrags); - return n < 0 ? n : -EINVAL; - } - - mr->iova = iov; - - /* Prepare FastReg WR */ - wr = &frd->frd_fastreg_wr; - memset(wr, 0, sizeof(*wr)); - wr->wr.opcode = IB_WR_REG_MR; - wr->wr.wr_id = IBLND_WID_MR; - wr->wr.num_sge = 0; - wr->wr.send_flags = 0; - wr->mr = mr; - wr->key = is_rx ? mr->rkey : mr->lkey; - wr->access = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE); - - fmr->fmr_key = is_rx ? mr->rkey : mr->lkey; - fmr->fmr_frd = frd; - fmr->fmr_pfmr = NULL; - fmr->fmr_pool = fpo; - return 0; - } - spin_unlock(&fps->fps_lock); - rc = -EAGAIN; - } - - spin_lock(&fps->fps_lock); - fpo->fpo_map_count--; - if (rc != -EAGAIN) { - spin_unlock(&fps->fps_lock); - return rc; - } - - /* EAGAIN and ... */ - if (version != fps->fps_version) { - spin_unlock(&fps->fps_lock); - goto again; - } - } - - if (fps->fps_increasing) { - spin_unlock(&fps->fps_lock); - CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n"); - schedule(); - goto again; - } - - if (time_before(jiffies, fps->fps_next_retry)) { - /* someone failed recently */ - spin_unlock(&fps->fps_lock); - return -EAGAIN; - } - - fps->fps_increasing = 1; - spin_unlock(&fps->fps_lock); - - CDEBUG(D_NET, "Allocate new FMR pool\n"); - rc = kiblnd_create_fmr_pool(fps, &fpo); - spin_lock(&fps->fps_lock); - fps->fps_increasing = 0; - if (!rc) { - fps->fps_version++; - list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); - } else { - fps->fps_next_retry = jiffies + IBLND_POOL_RETRY * HZ; - } - spin_unlock(&fps->fps_lock); - - goto again; -} - -static void kiblnd_fini_pool(struct kib_pool *pool) -{ - LASSERT(list_empty(&pool->po_free_list)); - LASSERT(!pool->po_allocated); - - CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); -} - -static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size) -{ - CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); - - memset(pool, 0, sizeof(*pool)); - INIT_LIST_HEAD(&pool->po_free_list); - pool->po_deadline = jiffies + IBLND_POOL_DEADLINE * HZ; - pool->po_owner = ps; - pool->po_size = size; -} - -static void kiblnd_destroy_pool_list(struct list_head *head) -{ - struct kib_pool *pool; - - while (!list_empty(head)) { - pool = list_entry(head->next, struct kib_pool, po_list); - list_del(&pool->po_list); - - LASSERT(pool->po_owner); - pool->po_owner->ps_pool_destroy(pool); - } -} - -static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) -{ - if (!ps->ps_net) /* initialized? */ - return; - - spin_lock(&ps->ps_lock); - while (!list_empty(&ps->ps_pool_list)) { - struct kib_pool *po = list_entry(ps->ps_pool_list.next, - struct kib_pool, po_list); - po->po_failed = 1; - list_del(&po->po_list); - if (!po->po_allocated) - list_add(&po->po_list, zombies); - else - list_add(&po->po_list, &ps->ps_failed_pool_list); - } - spin_unlock(&ps->ps_lock); -} - -static void kiblnd_fini_poolset(struct kib_poolset *ps) -{ - if (ps->ps_net) { /* initialized? */ - kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); - kiblnd_destroy_pool_list(&ps->ps_pool_list); - } -} - -static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt, - struct kib_net *net, char *name, int size, - kib_ps_pool_create_t po_create, - kib_ps_pool_destroy_t po_destroy, - kib_ps_node_init_t nd_init, - kib_ps_node_fini_t nd_fini) -{ - struct kib_pool *pool; - int rc; - - memset(ps, 0, sizeof(*ps)); - - ps->ps_cpt = cpt; - ps->ps_net = net; - ps->ps_pool_create = po_create; - ps->ps_pool_destroy = po_destroy; - ps->ps_node_init = nd_init; - ps->ps_node_fini = nd_fini; - ps->ps_pool_size = size; - if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name)) - >= sizeof(ps->ps_name)) - return -E2BIG; - spin_lock_init(&ps->ps_lock); - INIT_LIST_HEAD(&ps->ps_pool_list); - INIT_LIST_HEAD(&ps->ps_failed_pool_list); - - rc = ps->ps_pool_create(ps, size, &pool); - if (!rc) - list_add(&pool->po_list, &ps->ps_pool_list); - else - CERROR("Failed to create the first pool for %s\n", ps->ps_name); - - return rc; -} - -static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now) -{ - if (pool->po_allocated) /* still in use */ - return 0; - if (pool->po_failed) - return 1; - return time_after_eq(now, pool->po_deadline); -} - -void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node) -{ - LIST_HEAD(zombies); - struct kib_poolset *ps = pool->po_owner; - struct kib_pool *tmp; - unsigned long now = jiffies; - - spin_lock(&ps->ps_lock); - - if (ps->ps_node_fini) - ps->ps_node_fini(pool, node); - - LASSERT(pool->po_allocated > 0); - list_add(node, &pool->po_free_list); - pool->po_allocated--; - - list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) { - /* the first pool is persistent */ - if (ps->ps_pool_list.next == &pool->po_list) - continue; - - if (kiblnd_pool_is_idle(pool, now)) - list_move(&pool->po_list, &zombies); - } - spin_unlock(&ps->ps_lock); - - if (!list_empty(&zombies)) - kiblnd_destroy_pool_list(&zombies); -} - -struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps) -{ - struct list_head *node; - struct kib_pool *pool; - unsigned int interval = 1; - unsigned long time_before; - unsigned int trips = 0; - int rc; - - again: - spin_lock(&ps->ps_lock); - list_for_each_entry(pool, &ps->ps_pool_list, po_list) { - if (list_empty(&pool->po_free_list)) - continue; - - pool->po_allocated++; - pool->po_deadline = jiffies + IBLND_POOL_DEADLINE * HZ; - node = pool->po_free_list.next; - list_del(node); - - if (ps->ps_node_init) { - /* still hold the lock */ - ps->ps_node_init(pool, node); - } - spin_unlock(&ps->ps_lock); - return node; - } - - /* no available tx pool and ... */ - if (ps->ps_increasing) { - /* another thread is allocating a new pool */ - spin_unlock(&ps->ps_lock); - trips++; - CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n", - ps->ps_name, interval, trips); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(interval); - if (interval < HZ) - interval *= 2; - - goto again; - } - - if (time_before(jiffies, ps->ps_next_retry)) { - /* someone failed recently */ - spin_unlock(&ps->ps_lock); - return NULL; - } - - ps->ps_increasing = 1; - spin_unlock(&ps->ps_lock); - - CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); - time_before = jiffies; - rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); - CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete", - jiffies - time_before); - - spin_lock(&ps->ps_lock); - ps->ps_increasing = 0; - if (!rc) { - list_add_tail(&pool->po_list, &ps->ps_pool_list); - } else { - ps->ps_next_retry = jiffies + IBLND_POOL_RETRY * HZ; - CERROR("Can't allocate new %s pool because out of memory\n", - ps->ps_name); - } - spin_unlock(&ps->ps_lock); - - goto again; -} - -static void kiblnd_destroy_tx_pool(struct kib_pool *pool) -{ - struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool); - int i; - - LASSERT(!pool->po_allocated); - - if (tpo->tpo_tx_pages) { - kiblnd_unmap_tx_pool(tpo); - kiblnd_free_pages(tpo->tpo_tx_pages); - } - - if (!tpo->tpo_tx_descs) - goto out; - - for (i = 0; i < pool->po_size; i++) { - struct kib_tx *tx = &tpo->tpo_tx_descs[i]; - - list_del(&tx->tx_list); - kfree(tx->tx_pages); - kfree(tx->tx_frags); - kfree(tx->tx_wrq); - kfree(tx->tx_sge); - kfree(tx->tx_rd); - } - - kfree(tpo->tpo_tx_descs); -out: - kiblnd_fini_pool(pool); - kfree(tpo); -} - -static int kiblnd_tx_pool_size(int ncpts) -{ - int ntx = *kiblnd_tunables.kib_ntx / ncpts; - - return max(IBLND_TX_POOL, ntx); -} - -static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, - struct kib_pool **pp_po) -{ - int i; - int npg; - struct kib_pool *pool; - struct kib_tx_pool *tpo; - - tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt); - if (!tpo) { - CERROR("Failed to allocate TX pool\n"); - return -ENOMEM; - } - - pool = &tpo->tpo_pool; - kiblnd_init_pool(ps, pool, size); - tpo->tpo_tx_descs = NULL; - tpo->tpo_tx_pages = NULL; - - npg = DIV_ROUND_UP(size * IBLND_MSG_SIZE, PAGE_SIZE); - if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) { - CERROR("Can't allocate tx pages: %d\n", npg); - kfree(tpo); - return -ENOMEM; - } - - tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx), - GFP_NOFS, ps->ps_cpt); - if (!tpo->tpo_tx_descs) { - CERROR("Can't allocate %d tx descriptors\n", size); - ps->ps_pool_destroy(pool); - return -ENOMEM; - } - - memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx)); - - for (i = 0; i < size; i++) { - struct kib_tx *tx = &tpo->tpo_tx_descs[i]; - - tx->tx_pool = tpo; - if (ps->ps_net->ibn_fmr_ps) { - tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages), - GFP_NOFS, ps->ps_cpt); - if (!tx->tx_pages) - break; - } - - tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_frags), - GFP_NOFS, ps->ps_cpt); - if (!tx->tx_frags) - break; - - sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1); - - tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_wrq), - GFP_NOFS, ps->ps_cpt); - if (!tx->tx_wrq) - break; - - tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_sge), - GFP_NOFS, ps->ps_cpt); - if (!tx->tx_sge) - break; - - tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc, - rd_frags[IBLND_MAX_RDMA_FRAGS]), - GFP_NOFS, ps->ps_cpt); - if (!tx->tx_rd) - break; - } - - if (i == size) { - kiblnd_map_tx_pool(tpo); - *pp_po = pool; - return 0; - } - - ps->ps_pool_destroy(pool); - return -ENOMEM; -} - -static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node) -{ - struct kib_tx_poolset *tps = container_of(pool->po_owner, - struct kib_tx_poolset, - tps_poolset); - struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list); - - tx->tx_cookie = tps->tps_next_tx_cookie++; -} - -static void kiblnd_net_fini_pools(struct kib_net *net) -{ - int i; - - cfs_cpt_for_each(i, lnet_cpt_table()) { - struct kib_tx_poolset *tps; - struct kib_fmr_poolset *fps; - - if (net->ibn_tx_ps) { - tps = net->ibn_tx_ps[i]; - kiblnd_fini_poolset(&tps->tps_poolset); - } - - if (net->ibn_fmr_ps) { - fps = net->ibn_fmr_ps[i]; - kiblnd_fini_fmr_poolset(fps); - } - } - - if (net->ibn_tx_ps) { - cfs_percpt_free(net->ibn_tx_ps); - net->ibn_tx_ps = NULL; - } - - if (net->ibn_fmr_ps) { - cfs_percpt_free(net->ibn_fmr_ps); - net->ibn_fmr_ps = NULL; - } -} - -static int kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, - __u32 *cpts, int ncpts) -{ - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - int cpt; - int rc; - int i; - - tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; - - if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) { - CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n", - tunables->lnd_fmr_pool_size, - *kiblnd_tunables.kib_ntx / 4); - rc = -EINVAL; - goto failed; - } - - /* - * TX pool must be created later than FMR, see LU-2268 - * for details - */ - LASSERT(!net->ibn_tx_ps); - - /* - * premapping can fail if ibd_nmr > 1, so we always create - * FMR pool and map-on-demand if premapping failed - * - * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset - * The number of struct kib_fmr_poolsets create is equal to the - * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt]. - */ - net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(struct kib_fmr_poolset)); - if (!net->ibn_fmr_ps) { - CERROR("Failed to allocate FMR pool array\n"); - rc = -ENOMEM; - goto failed; - } - - for (i = 0; i < ncpts; i++) { - cpt = !cpts ? i : cpts[i]; - rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts, - net, tunables); - if (rc) { - CERROR("Can't initialize FMR pool for CPT %d: %d\n", - cpt, rc); - goto failed; - } - } - - if (i > 0) - LASSERT(i == ncpts); - - /* - * cfs_precpt_alloc is creating an array of struct kib_tx_poolset - * The number of struct kib_tx_poolsets create is equal to the - * number of CPTs that exist, i.e net->ibn_tx_ps[cpt]. - */ - net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(struct kib_tx_poolset)); - if (!net->ibn_tx_ps) { - CERROR("Failed to allocate tx pool array\n"); - rc = -ENOMEM; - goto failed; - } - - for (i = 0; i < ncpts; i++) { - cpt = !cpts ? i : cpts[i]; - rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, - cpt, net, "TX", - kiblnd_tx_pool_size(ncpts), - kiblnd_create_tx_pool, - kiblnd_destroy_tx_pool, - kiblnd_tx_init, NULL); - if (rc) { - CERROR("Can't initialize TX pool for CPT %d: %d\n", - cpt, rc); - goto failed; - } - } - - return 0; - failed: - kiblnd_net_fini_pools(net); - LASSERT(rc); - return rc; -} - -static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) -{ - /* - * It's safe to assume a HCA can handle a page size - * matching that of the native system - */ - hdev->ibh_page_shift = PAGE_SHIFT; - hdev->ibh_page_size = 1 << PAGE_SHIFT; - hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); - - hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size; - if (hdev->ibh_mr_size == ~0ULL) { - hdev->ibh_mr_shift = 64; - return 0; - } - - CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); - return -EINVAL; -} - -void kiblnd_hdev_destroy(struct kib_hca_dev *hdev) -{ - if (hdev->ibh_pd) - ib_dealloc_pd(hdev->ibh_pd); - - if (hdev->ibh_cmid) - rdma_destroy_id(hdev->ibh_cmid); - - kfree(hdev); -} - -/* DUMMY */ -static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, - struct rdma_cm_event *event) -{ - return 0; -} - -static int kiblnd_dev_need_failover(struct kib_dev *dev) -{ - struct rdma_cm_id *cmid; - struct sockaddr_in srcaddr; - struct sockaddr_in dstaddr; - int rc; - - if (!dev->ibd_hdev || /* initializing */ - !dev->ibd_hdev->ibh_cmid || /* listener is dead */ - *kiblnd_tunables.kib_dev_failover > 1) /* debugging */ - return 1; - - /* - * XXX: it's UGLY, but I don't have better way to find - * ib-bonding HCA failover because: - * - * a. no reliable CM event for HCA failover... - * b. no OFED API to get ib_device for current net_device... - * - * We have only two choices at this point: - * - * a. rdma_bind_addr(), it will conflict with listener cmid - * b. rdma_resolve_addr() to zero addr - */ - cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP, - IB_QPT_RC); - if (IS_ERR(cmid)) { - rc = PTR_ERR(cmid); - CERROR("Failed to create cmid for failover: %d\n", rc); - return rc; - } - - memset(&srcaddr, 0, sizeof(srcaddr)); - srcaddr.sin_family = AF_INET; - srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip); - - memset(&dstaddr, 0, sizeof(dstaddr)); - dstaddr.sin_family = AF_INET; - rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, - (struct sockaddr *)&dstaddr, 1); - if (rc || !cmid->device) { - CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", - dev->ibd_ifname, &dev->ibd_ifip, - cmid->device, rc); - rdma_destroy_id(cmid); - return rc; - } - - rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */ - rdma_destroy_id(cmid); - - return rc; -} - -int kiblnd_dev_failover(struct kib_dev *dev) -{ - LIST_HEAD(zombie_tpo); - LIST_HEAD(zombie_ppo); - LIST_HEAD(zombie_fpo); - struct rdma_cm_id *cmid = NULL; - struct kib_hca_dev *hdev = NULL; - struct ib_pd *pd; - struct kib_net *net; - struct sockaddr_in addr; - unsigned long flags; - int rc = 0; - int i; - - LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || - dev->ibd_can_failover || !dev->ibd_hdev); - - rc = kiblnd_dev_need_failover(dev); - if (rc <= 0) - goto out; - - if (dev->ibd_hdev && - dev->ibd_hdev->ibh_cmid) { - /* - * XXX it's not good to close old listener at here, - * because we can fail to create new listener. - * But we have to close it now, otherwise rdma_bind_addr - * will return EADDRINUSE... How crap! - */ - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - cmid = dev->ibd_hdev->ibh_cmid; - /* - * make next schedule of kiblnd_dev_need_failover() - * return 1 for me - */ - dev->ibd_hdev->ibh_cmid = NULL; - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - rdma_destroy_id(cmid); - } - - cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP, - IB_QPT_RC); - if (IS_ERR(cmid)) { - rc = PTR_ERR(cmid); - CERROR("Failed to create cmid for failover: %d\n", rc); - goto out; - } - - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(dev->ibd_ifip); - addr.sin_port = htons(*kiblnd_tunables.kib_service); - - /* Bind to failover device or port */ - rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); - if (rc || !cmid->device) { - CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", - dev->ibd_ifname, &dev->ibd_ifip, - cmid->device, rc); - rdma_destroy_id(cmid); - goto out; - } - - hdev = kzalloc(sizeof(*hdev), GFP_NOFS); - if (!hdev) { - CERROR("Failed to allocate kib_hca_dev\n"); - rdma_destroy_id(cmid); - rc = -ENOMEM; - goto out; - } - - atomic_set(&hdev->ibh_ref, 1); - hdev->ibh_dev = dev; - hdev->ibh_cmid = cmid; - hdev->ibh_ibdev = cmid->device; - - pd = ib_alloc_pd(cmid->device, 0); - if (IS_ERR(pd)) { - rc = PTR_ERR(pd); - CERROR("Can't allocate PD: %d\n", rc); - goto out; - } - - hdev->ibh_pd = pd; - - rc = rdma_listen(cmid, 0); - if (rc) { - CERROR("Can't start new listener: %d\n", rc); - goto out; - } - - rc = kiblnd_hdev_get_attr(hdev); - if (rc) { - CERROR("Can't get device attributes: %d\n", rc); - goto out; - } - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - swap(dev->ibd_hdev, hdev); /* take over the refcount */ - - list_for_each_entry(net, &dev->ibd_nets, ibn_list) { - cfs_cpt_for_each(i, lnet_cpt_table()) { - kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset, - &zombie_tpo); - - if (net->ibn_fmr_ps) - kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i], - &zombie_fpo); - } - } - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - out: - if (!list_empty(&zombie_tpo)) - kiblnd_destroy_pool_list(&zombie_tpo); - if (!list_empty(&zombie_ppo)) - kiblnd_destroy_pool_list(&zombie_ppo); - if (!list_empty(&zombie_fpo)) - kiblnd_destroy_fmr_pool_list(&zombie_fpo); - if (hdev) - kiblnd_hdev_decref(hdev); - - if (rc) - dev->ibd_failed_failover++; - else - dev->ibd_failed_failover = 0; - - return rc; -} - -void kiblnd_destroy_dev(struct kib_dev *dev) -{ - LASSERT(!dev->ibd_nnets); - LASSERT(list_empty(&dev->ibd_nets)); - - list_del(&dev->ibd_fail_list); - list_del(&dev->ibd_list); - - if (dev->ibd_hdev) - kiblnd_hdev_decref(dev->ibd_hdev); - - kfree(dev); -} - -static struct kib_dev *kiblnd_create_dev(char *ifname) -{ - struct net_device *netdev; - struct kib_dev *dev; - __u32 netmask; - __u32 ip; - int up; - int rc; - - rc = lnet_ipif_query(ifname, &up, &ip, &netmask); - if (rc) { - CERROR("Can't query IPoIB interface %s: %d\n", - ifname, rc); - return NULL; - } - - if (!up) { - CERROR("Can't query IPoIB interface %s: it's down\n", ifname); - return NULL; - } - - dev = kzalloc(sizeof(*dev), GFP_NOFS); - if (!dev) - return NULL; - - netdev = dev_get_by_name(&init_net, ifname); - if (!netdev) { - dev->ibd_can_failover = 0; - } else { - dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER); - dev_put(netdev); - } - - INIT_LIST_HEAD(&dev->ibd_nets); - INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */ - INIT_LIST_HEAD(&dev->ibd_fail_list); - dev->ibd_ifip = ip; - strcpy(&dev->ibd_ifname[0], ifname); - - /* initialize the device */ - rc = kiblnd_dev_failover(dev); - if (rc) { - CERROR("Can't initialize device: %d\n", rc); - kfree(dev); - return NULL; - } - - list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs); - return dev; -} - -static void kiblnd_base_shutdown(void) -{ - struct kib_sched_info *sched; - int i; - - LASSERT(list_empty(&kiblnd_data.kib_devs)); - - switch (kiblnd_data.kib_init) { - default: - LBUG(); - - case IBLND_INIT_ALL: - case IBLND_INIT_DATA: - LASSERT(kiblnd_data.kib_peers); - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) - LASSERT(list_empty(&kiblnd_data.kib_peers[i])); - LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); - LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); - LASSERT(list_empty(&kiblnd_data.kib_reconn_list)); - LASSERT(list_empty(&kiblnd_data.kib_reconn_wait)); - - /* flag threads to terminate; wake and wait for them to die */ - kiblnd_data.kib_shutdown = 1; - - /* - * NB: we really want to stop scheduler threads net by net - * instead of the whole module, this should be improved - * with dynamic configuration LNet - */ - cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) - wake_up_all(&sched->ibs_waitq); - - wake_up_all(&kiblnd_data.kib_connd_waitq); - wake_up_all(&kiblnd_data.kib_failover_waitq); - - i = 2; - while (atomic_read(&kiblnd_data.kib_nthreads)) { - i++; - /* power of 2 ? */ - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, - "Waiting for %d threads to terminate\n", - atomic_read(&kiblnd_data.kib_nthreads)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - } - - /* fall through */ - - case IBLND_INIT_NOTHING: - break; - } - - kvfree(kiblnd_data.kib_peers); - - if (kiblnd_data.kib_scheds) - cfs_percpt_free(kiblnd_data.kib_scheds); - - kiblnd_data.kib_init = IBLND_INIT_NOTHING; - module_put(THIS_MODULE); -} - -static void kiblnd_shutdown(struct lnet_ni *ni) -{ - struct kib_net *net = ni->ni_data; - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - int i; - unsigned long flags; - - LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); - - if (!net) - goto out; - - write_lock_irqsave(g_lock, flags); - net->ibn_shutdown = 1; - write_unlock_irqrestore(g_lock, flags); - - switch (net->ibn_init) { - default: - LBUG(); - - case IBLND_INIT_ALL: - /* nuke all existing peers within this net */ - kiblnd_del_peer(ni, LNET_NID_ANY); - - /* Wait for all peer state to clean up */ - i = 2; - while (atomic_read(&net->ibn_npeers)) { - i++; - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */ - "%s: waiting for %d peers to disconnect\n", - libcfs_nid2str(ni->ni_nid), - atomic_read(&net->ibn_npeers)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - } - - kiblnd_net_fini_pools(net); - - write_lock_irqsave(g_lock, flags); - LASSERT(net->ibn_dev->ibd_nnets > 0); - net->ibn_dev->ibd_nnets--; - list_del(&net->ibn_list); - write_unlock_irqrestore(g_lock, flags); - - /* fall through */ - - case IBLND_INIT_NOTHING: - LASSERT(!atomic_read(&net->ibn_nconns)); - - if (net->ibn_dev && !net->ibn_dev->ibd_nnets) - kiblnd_destroy_dev(net->ibn_dev); - - break; - } - - net->ibn_init = IBLND_INIT_NOTHING; - ni->ni_data = NULL; - - kfree(net); - -out: - if (list_empty(&kiblnd_data.kib_devs)) - kiblnd_base_shutdown(); -} - -static int kiblnd_base_startup(void) -{ - struct kib_sched_info *sched; - int rc; - int i; - - LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); - - try_module_get(THIS_MODULE); - /* zero pointers, flags etc */ - memset(&kiblnd_data, 0, sizeof(kiblnd_data)); - - rwlock_init(&kiblnd_data.kib_global_lock); - - INIT_LIST_HEAD(&kiblnd_data.kib_devs); - INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs); - - kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; - kiblnd_data.kib_peers = kvmalloc_array(kiblnd_data.kib_peer_hash_size, - sizeof(struct list_head), - GFP_KERNEL); - if (!kiblnd_data.kib_peers) - goto failed; - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) - INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); - - spin_lock_init(&kiblnd_data.kib_connd_lock); - INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns); - INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies); - INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list); - INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait); - - init_waitqueue_head(&kiblnd_data.kib_connd_waitq); - init_waitqueue_head(&kiblnd_data.kib_failover_waitq); - - kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*sched)); - if (!kiblnd_data.kib_scheds) - goto failed; - - cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { - int nthrs; - - spin_lock_init(&sched->ibs_lock); - INIT_LIST_HEAD(&sched->ibs_conns); - init_waitqueue_head(&sched->ibs_waitq); - - nthrs = cfs_cpt_weight(lnet_cpt_table(), i); - if (*kiblnd_tunables.kib_nscheds > 0) { - nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds); - } else { - /* - * max to half of CPUs, another half is reserved for - * upper layer modules - */ - nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); - } - - sched->ibs_nthreads_max = nthrs; - sched->ibs_cpt = i; - } - - kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR; - - /* lists/ptrs/locks initialised */ - kiblnd_data.kib_init = IBLND_INIT_DATA; - /*****************************************************/ - - rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd"); - if (rc) { - CERROR("Can't spawn o2iblnd connd: %d\n", rc); - goto failed; - } - - if (*kiblnd_tunables.kib_dev_failover) - rc = kiblnd_thread_start(kiblnd_failover_thread, NULL, - "kiblnd_failover"); - - if (rc) { - CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); - goto failed; - } - - /* flag everything initialised */ - kiblnd_data.kib_init = IBLND_INIT_ALL; - /*****************************************************/ - - return 0; - - failed: - kiblnd_base_shutdown(); - return -ENETDOWN; -} - -static int kiblnd_start_schedulers(struct kib_sched_info *sched) -{ - int rc = 0; - int nthrs; - int i; - - if (!sched->ibs_nthreads) { - if (*kiblnd_tunables.kib_nscheds > 0) { - nthrs = sched->ibs_nthreads_max; - } else { - nthrs = cfs_cpt_weight(lnet_cpt_table(), - sched->ibs_cpt); - nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); - nthrs = min(IBLND_N_SCHED_HIGH, nthrs); - } - } else { - LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max); - /* increase one thread if there is new interface */ - nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max; - } - - for (i = 0; i < nthrs; i++) { - long id; - char name[20]; - - id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); - snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", - KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); - rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name); - if (!rc) - continue; - - CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", - sched->ibs_cpt, sched->ibs_nthreads + i, rc); - break; - } - - sched->ibs_nthreads += i; - return rc; -} - -static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts, - int ncpts) -{ - int cpt; - int rc; - int i; - - for (i = 0; i < ncpts; i++) { - struct kib_sched_info *sched; - - cpt = !cpts ? i : cpts[i]; - sched = kiblnd_data.kib_scheds[cpt]; - - if (!newdev && sched->ibs_nthreads > 0) - continue; - - rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]); - if (rc) { - CERROR("Failed to start scheduler threads for %s\n", - dev->ibd_ifname); - return rc; - } - } - return 0; -} - -static struct kib_dev *kiblnd_dev_search(char *ifname) -{ - struct kib_dev *alias = NULL; - struct kib_dev *dev; - char *colon; - char *colon2; - - colon = strchr(ifname, ':'); - list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (!strcmp(&dev->ibd_ifname[0], ifname)) - return dev; - - if (alias) - continue; - - colon2 = strchr(dev->ibd_ifname, ':'); - if (colon) - *colon = 0; - if (colon2) - *colon2 = 0; - - if (!strcmp(&dev->ibd_ifname[0], ifname)) - alias = dev; - - if (colon) - *colon = ':'; - if (colon2) - *colon2 = ':'; - } - return alias; -} - -static int kiblnd_startup(struct lnet_ni *ni) -{ - char *ifname; - struct kib_dev *ibdev = NULL; - struct kib_net *net; - struct timespec64 tv; - unsigned long flags; - int rc; - int newdev; - - LASSERT(ni->ni_lnd == &the_o2iblnd); - - if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { - rc = kiblnd_base_startup(); - if (rc) - return rc; - } - - net = kzalloc(sizeof(*net), GFP_NOFS); - ni->ni_data = net; - if (!net) - goto net_failed; - - ktime_get_real_ts64(&tv); - net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC + - tv.tv_nsec / NSEC_PER_USEC; - - rc = kiblnd_tunables_setup(ni); - if (rc) - goto net_failed; - - if (ni->ni_interfaces[0]) { - /* Use the IPoIB interface specified in 'networks=' */ - - BUILD_BUG_ON(LNET_MAX_INTERFACES <= 1); - if (ni->ni_interfaces[1]) { - CERROR("Multiple interfaces not supported\n"); - goto failed; - } - - ifname = ni->ni_interfaces[0]; - } else { - ifname = *kiblnd_tunables.kib_default_ipif; - } - - if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) { - CERROR("IPoIB interface name too long: %s\n", ifname); - goto failed; - } - - ibdev = kiblnd_dev_search(ifname); - - newdev = !ibdev; - /* hmm...create kib_dev even for alias */ - if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname)) - ibdev = kiblnd_create_dev(ifname); - - if (!ibdev) - goto failed; - - net->ibn_dev = ibdev; - ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip); - - rc = kiblnd_dev_start_threads(ibdev, newdev, - ni->ni_cpts, ni->ni_ncpts); - if (rc) - goto failed; - - rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts); - if (rc) { - CERROR("Failed to initialize NI pools: %d\n", rc); - goto failed; - } - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - ibdev->ibd_nnets++; - list_add_tail(&net->ibn_list, &ibdev->ibd_nets); - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - net->ibn_init = IBLND_INIT_ALL; - - return 0; - -failed: - if (!net->ibn_dev && ibdev) - kiblnd_destroy_dev(ibdev); - -net_failed: - kiblnd_shutdown(ni); - - CDEBUG(D_NET, "%s failed\n", __func__); - return -ENETDOWN; -} - -static struct lnet_lnd the_o2iblnd = { - .lnd_type = O2IBLND, - .lnd_startup = kiblnd_startup, - .lnd_shutdown = kiblnd_shutdown, - .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, - .lnd_send = kiblnd_send, - .lnd_recv = kiblnd_recv, -}; - -static void __exit ko2iblnd_exit(void) -{ - lnet_unregister_lnd(&the_o2iblnd); -} - -static int __init ko2iblnd_init(void) -{ - int rc; - - BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE); - BUILD_BUG_ON(offsetof(struct kib_msg, - ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - > IBLND_MSG_SIZE); - BUILD_BUG_ON(offsetof(struct kib_msg, - ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - > IBLND_MSG_SIZE); - - kiblnd_tunables_init(); - - rc = libcfs_setup(); - if (rc) - return rc; - - lnet_register_lnd(&the_o2iblnd); - - return 0; -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver"); -MODULE_VERSION("2.7.0"); -MODULE_LICENSE("GPL"); - -module_init(ko2iblnd_init); -module_exit(ko2iblnd_exit); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h deleted file mode 100644 index 217503f125bc..000000000000 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ /dev/null @@ -1,1048 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/klnds/o2iblnd/o2iblnd.h - * - * Author: Eric Barton - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_LND - -#include - -#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ -/* # scheduler loops before reschedule */ -#define IBLND_RESCHED 100 - -#define IBLND_N_SCHED 2 -#define IBLND_N_SCHED_HIGH 4 - -struct kib_tunables { - int *kib_dev_failover; /* HCA failover */ - unsigned int *kib_service; /* IB service number */ - int *kib_min_reconnect_interval; /* first failed connection retry... */ - int *kib_max_reconnect_interval; /* exponentially increasing to this */ - int *kib_cksum; /* checksum struct kib_msg? */ - int *kib_timeout; /* comms timeout (seconds) */ - int *kib_keepalive; /* keepalive timeout (seconds) */ - int *kib_ntx; /* # tx descs */ - char **kib_default_ipif; /* default IPoIB interface */ - int *kib_retry_count; - int *kib_rnr_retry_count; - int *kib_ib_mtu; /* IB MTU */ - int *kib_require_priv_port; /* accept only privileged ports */ - int *kib_use_priv_port; /* use privileged port for active connect */ - int *kib_nscheds; /* # threads on each CPT */ -}; - -extern struct kib_tunables kiblnd_tunables; - -#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ -#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ - -#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ -#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1) /* Max # of peer credits */ - -/* when eagerly to return credits */ -#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \ - IBLND_CREDIT_HIGHWATER_V1 : \ - t->lnd_peercredits_hiw) - -#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \ - cb, dev, \ - ps, qpt) - -/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */ -#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1) -#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0) - -#define IBLND_FRAG_SHIFT (PAGE_SHIFT - 12) /* frag size on wire is in 4K units */ -#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */ -#define IBLND_MAX_RDMA_FRAGS (LNET_MAX_PAYLOAD >> 12)/* max # of fragments supported in 4K size */ - -/************************/ -/* derived constants... */ -/* Pools (shared by connections on each CPT) */ -/* These pools can grow at runtime, so don't need give a very large value */ -#define IBLND_TX_POOL 256 -#define IBLND_FMR_POOL 256 -#define IBLND_FMR_POOL_FLUSH 192 - -#define IBLND_RX_MSGS(c) \ - ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version)) -#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE) -#define IBLND_RX_MSG_PAGES(c) \ - ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE) - -/* WRs and CQEs (per connection) */ -#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c) -#define IBLND_SEND_WRS(c) \ - (((c->ibc_max_frags + 1) << IBLND_FRAG_SHIFT) * \ - kiblnd_concurrent_sends(c->ibc_version, c->ibc_peer->ibp_ni)) -#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c)) - -struct kib_hca_dev; - -/* o2iblnd can run over aliased interface */ -#ifdef IFALIASZ -#define KIB_IFNAME_SIZE IFALIASZ -#else -#define KIB_IFNAME_SIZE 256 -#endif - -struct kib_dev { - struct list_head ibd_list; /* chain on kib_devs */ - struct list_head ibd_fail_list; /* chain on kib_failed_devs */ - __u32 ibd_ifip; /* IPoIB interface IP */ - - /* IPoIB interface name */ - char ibd_ifname[KIB_IFNAME_SIZE]; - int ibd_nnets; /* # nets extant */ - - unsigned long ibd_next_failover; - int ibd_failed_failover; /* # failover failures */ - unsigned int ibd_failover; /* failover in progress */ - unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */ - struct list_head ibd_nets; - struct kib_hca_dev *ibd_hdev; -}; - -struct kib_hca_dev { - struct rdma_cm_id *ibh_cmid; /* listener cmid */ - struct ib_device *ibh_ibdev; /* IB device */ - int ibh_page_shift; /* page shift of current HCA */ - int ibh_page_size; /* page size of current HCA */ - __u64 ibh_page_mask; /* page mask of current HCA */ - int ibh_mr_shift; /* bits shift of max MR size */ - __u64 ibh_mr_size; /* size of MR */ - struct ib_pd *ibh_pd; /* PD */ - struct kib_dev *ibh_dev; /* owner */ - atomic_t ibh_ref; /* refcount */ -}; - -/** # of seconds to keep pool alive */ -#define IBLND_POOL_DEADLINE 300 -/** # of seconds to retry if allocation failed */ -#define IBLND_POOL_RETRY 1 - -struct kib_pages { - int ibp_npages; /* # pages */ - struct page *ibp_pages[0]; /* page array */ -}; - -struct kib_pool; -struct kib_poolset; - -typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, - int inc, struct kib_pool **pp_po); -typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po); -typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node); -typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node); - -struct kib_net; - -#define IBLND_POOL_NAME_LEN 32 - -struct kib_poolset { - spinlock_t ps_lock; /* serialize */ - struct kib_net *ps_net; /* network it belongs to */ - char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ - struct list_head ps_pool_list; /* list of pools */ - struct list_head ps_failed_pool_list;/* failed pool list */ - unsigned long ps_next_retry; /* time stamp for retry if */ - /* failed to allocate */ - int ps_increasing; /* is allocating new pool */ - int ps_pool_size; /* new pool size */ - int ps_cpt; /* CPT id */ - - kib_ps_pool_create_t ps_pool_create; /* create a new pool */ - kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ - kib_ps_node_init_t ps_node_init; /* initialize new allocated node */ - kib_ps_node_fini_t ps_node_fini; /* finalize node */ -}; - -struct kib_pool { - struct list_head po_list; /* chain on pool list */ - struct list_head po_free_list; /* pre-allocated node */ - struct kib_poolset *po_owner; /* pool_set of this pool */ - unsigned long po_deadline; /* deadline of this pool */ - int po_allocated; /* # of elements in use */ - int po_failed; /* pool is created on failed HCA */ - int po_size; /* # of pre-allocated elements */ -}; - -struct kib_tx_poolset { - struct kib_poolset tps_poolset; /* pool-set */ - __u64 tps_next_tx_cookie; /* cookie of TX */ -}; - -struct kib_tx_pool { - struct kib_pool tpo_pool; /* pool */ - struct kib_hca_dev *tpo_hdev; /* device for this pool */ - struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ - struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */ -}; - -struct kib_fmr_poolset { - spinlock_t fps_lock; /* serialize */ - struct kib_net *fps_net; /* IB network */ - struct list_head fps_pool_list; /* FMR pool list */ - struct list_head fps_failed_pool_list;/* FMR pool list */ - __u64 fps_version; /* validity stamp */ - int fps_cpt; /* CPT id */ - int fps_pool_size; - int fps_flush_trigger; - int fps_cache; - int fps_increasing; /* is allocating new pool */ - unsigned long fps_next_retry; /* time stamp for retry if*/ - /* failed to allocate */ -}; - -struct kib_fast_reg_descriptor { /* For fast registration */ - struct list_head frd_list; - struct ib_send_wr frd_inv_wr; - struct ib_reg_wr frd_fastreg_wr; - struct ib_mr *frd_mr; - bool frd_valid; -}; - -struct kib_fmr_pool { - struct list_head fpo_list; /* chain on pool list */ - struct kib_hca_dev *fpo_hdev; /* device for this pool */ - struct kib_fmr_poolset *fpo_owner; /* owner of this pool */ - union { - struct { - struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ - } fmr; - struct { /* For fast registration */ - struct list_head fpo_pool_list; - int fpo_pool_size; - } fast_reg; - }; - unsigned long fpo_deadline; /* deadline of this pool */ - int fpo_failed; /* fmr pool is failed */ - int fpo_map_count; /* # of mapped FMR */ - int fpo_is_fmr; -}; - -struct kib_fmr { - struct kib_fmr_pool *fmr_pool; /* pool of FMR */ - struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ - struct kib_fast_reg_descriptor *fmr_frd; - u32 fmr_key; -}; - -struct kib_net { - struct list_head ibn_list; /* chain on struct kib_dev::ibd_nets */ - __u64 ibn_incarnation;/* my epoch */ - int ibn_init; /* initialisation state */ - int ibn_shutdown; /* shutting down? */ - - atomic_t ibn_npeers; /* # peers extant */ - atomic_t ibn_nconns; /* # connections extant */ - - struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */ - struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */ - - struct kib_dev *ibn_dev; /* underlying IB device */ -}; - -#define KIB_THREAD_SHIFT 16 -#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid)) -#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT) -#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1)) - -struct kib_sched_info { - spinlock_t ibs_lock; /* serialise */ - wait_queue_head_t ibs_waitq; /* schedulers sleep here */ - struct list_head ibs_conns; /* conns to check for rx completions */ - int ibs_nthreads; /* number of scheduler threads */ - int ibs_nthreads_max; /* max allowed scheduler threads */ - int ibs_cpt; /* CPT id */ -}; - -struct kib_data { - int kib_init; /* initialisation state */ - int kib_shutdown; /* shut down? */ - struct list_head kib_devs; /* IB devices extant */ - struct list_head kib_failed_devs; /* list head of failed devices */ - wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */ - atomic_t kib_nthreads; /* # live threads */ - rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */ - struct list_head *kib_peers; /* hash table of all my known peers */ - int kib_peer_hash_size; /* size of kib_peers */ - void *kib_connd; /* the connd task (serialisation assertions) */ - struct list_head kib_connd_conns; /* connections to setup/teardown */ - struct list_head kib_connd_zombies; /* connections with zero refcount */ - /* connections to reconnect */ - struct list_head kib_reconn_list; - /* peers wait for reconnection */ - struct list_head kib_reconn_wait; - /** - * The second that peers are pulled out from \a kib_reconn_wait - * for reconnection. - */ - time64_t kib_reconn_sec; - - wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */ - spinlock_t kib_connd_lock; /* serialise */ - struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ - struct kib_sched_info **kib_scheds; /* percpt data for schedulers */ -}; - -#define IBLND_INIT_NOTHING 0 -#define IBLND_INIT_DATA 1 -#define IBLND_INIT_ALL 2 - -/************************************************************************ - * IB Wire message format. - * These are sent in sender's byte order (i.e. receiver flips). - */ - -struct kib_connparams { - __u16 ibcp_queue_depth; - __u16 ibcp_max_frags; - __u32 ibcp_max_msg_size; -} WIRE_ATTR; - -struct kib_immediate_msg { - struct lnet_hdr ibim_hdr; /* portals header */ - char ibim_payload[0]; /* piggy-backed payload */ -} WIRE_ATTR; - -struct kib_rdma_frag { - __u32 rf_nob; /* # bytes this frag */ - __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ -} WIRE_ATTR; - -struct kib_rdma_desc { - __u32 rd_key; /* local/remote key */ - __u32 rd_nfrags; /* # fragments */ - struct kib_rdma_frag rd_frags[0]; /* buffer frags */ -} WIRE_ATTR; - -struct kib_putreq_msg { - struct lnet_hdr ibprm_hdr; /* portals header */ - __u64 ibprm_cookie; /* opaque completion cookie */ -} WIRE_ATTR; - -struct kib_putack_msg { - __u64 ibpam_src_cookie; /* reflected completion cookie */ - __u64 ibpam_dst_cookie; /* opaque completion cookie */ - struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */ -} WIRE_ATTR; - -struct kib_get_msg { - struct lnet_hdr ibgm_hdr; /* portals header */ - __u64 ibgm_cookie; /* opaque completion cookie */ - struct kib_rdma_desc ibgm_rd; /* rdma descriptor */ -} WIRE_ATTR; - -struct kib_completion_msg { - __u64 ibcm_cookie; /* opaque completion cookie */ - __s32 ibcm_status; /* < 0 failure: >= 0 length */ -} WIRE_ATTR; - -struct kib_msg { - /* First 2 fields fixed FOR ALL TIME */ - __u32 ibm_magic; /* I'm an ibnal message */ - __u16 ibm_version; /* this is my version number */ - - __u8 ibm_type; /* msg type */ - __u8 ibm_credits; /* returned credits */ - __u32 ibm_nob; /* # bytes in whole message */ - __u32 ibm_cksum; /* checksum (0 == no checksum) */ - __u64 ibm_srcnid; /* sender's NID */ - __u64 ibm_srcstamp; /* sender's incarnation */ - __u64 ibm_dstnid; /* destination's NID */ - __u64 ibm_dststamp; /* destination's incarnation */ - - union { - struct kib_connparams connparams; - struct kib_immediate_msg immediate; - struct kib_putreq_msg putreq; - struct kib_putack_msg putack; - struct kib_get_msg get; - struct kib_completion_msg completion; - } WIRE_ATTR ibm_u; -} WIRE_ATTR; - -#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ - -#define IBLND_MSG_VERSION_1 0x11 -#define IBLND_MSG_VERSION_2 0x12 -#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2 - -#define IBLND_MSG_CONNREQ 0xc0 /* connection request */ -#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */ -#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */ -#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */ -#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */ -#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */ -#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */ -#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */ -#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ -#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ - -struct kib_rej { - __u32 ibr_magic; /* sender's magic */ - __u16 ibr_version; /* sender's version */ - __u8 ibr_why; /* reject reason */ - __u8 ibr_padding; /* padding */ - __u64 ibr_incarnation; /* incarnation of peer */ - struct kib_connparams ibr_cp; /* connection parameters */ -} WIRE_ATTR; - -/* connection rejection reasons */ -#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ -#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ -#define IBLND_REJECT_FATAL 3 /* Anything else */ -#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ -#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ -/* peer's rdma frags doesn't match mine */ -#define IBLND_REJECT_RDMA_FRAGS 6 -/* peer's msg queue size doesn't match mine */ -#define IBLND_REJECT_MSG_QUEUE_SIZE 7 - -/***********************************************************************/ - -struct kib_rx { /* receive message */ - struct list_head rx_list; /* queue for attention */ - struct kib_conn *rx_conn; /* owning conn */ - int rx_nob; /* # bytes received (-1 while posted) */ - enum ib_wc_status rx_status; /* completion status */ - struct kib_msg *rx_msg; /* message buffer (host vaddr) */ - __u64 rx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */ - struct ib_recv_wr rx_wrq; /* receive work item... */ - struct ib_sge rx_sge; /* ...and its memory */ -}; - -#define IBLND_POSTRX_DONT_POST 0 /* don't post */ -#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ -#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ -#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */ - -struct kib_tx { /* transmit message */ - struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */ - struct kib_tx_pool *tx_pool; /* pool I'm from */ - struct kib_conn *tx_conn; /* owning conn */ - short tx_sending; /* # tx callbacks outstanding */ - short tx_queued; /* queued for sending */ - short tx_waiting; /* waiting for peer */ - int tx_status; /* LNET completion status */ - unsigned long tx_deadline; /* completion deadline */ - __u64 tx_cookie; /* completion cookie */ - struct lnet_msg *tx_lntmsg[2]; /* lnet msgs to finalize on completion */ - struct kib_msg *tx_msg; /* message buffer (host vaddr) */ - __u64 tx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */ - int tx_nwrq; /* # send work items */ - struct ib_rdma_wr *tx_wrq; /* send work items... */ - struct ib_sge *tx_sge; /* ...and their memory */ - struct kib_rdma_desc *tx_rd; /* rdma descriptor */ - int tx_nfrags; /* # entries in... */ - struct scatterlist *tx_frags; /* dma_map_sg descriptor */ - __u64 *tx_pages; /* rdma phys page addrs */ - struct kib_fmr fmr; /* FMR */ - int tx_dmadir; /* dma direction */ -}; - -struct kib_connvars { - struct kib_msg cv_msg; /* connection-in-progress variables */ -}; - -struct kib_conn { - struct kib_sched_info *ibc_sched; /* scheduler information */ - struct kib_peer *ibc_peer; /* owning peer */ - struct kib_hca_dev *ibc_hdev; /* HCA bound on */ - struct list_head ibc_list; /* stash on peer's conn list */ - struct list_head ibc_sched_list; /* schedule for attention */ - __u16 ibc_version; /* version of connection */ - /* reconnect later */ - __u16 ibc_reconnect:1; - __u64 ibc_incarnation; /* which instance of the peer */ - atomic_t ibc_refcount; /* # users */ - int ibc_state; /* what's happening */ - int ibc_nsends_posted; /* # uncompleted sends */ - int ibc_noops_posted; /* # uncompleted NOOPs */ - int ibc_credits; /* # credits I have */ - int ibc_outstanding_credits; /* # credits to return */ - int ibc_reserved_credits; /* # ACK/DONE msg credits */ - int ibc_comms_error; /* set on comms error */ - /* connections queue depth */ - __u16 ibc_queue_depth; - /* connections max frags */ - __u16 ibc_max_frags; - unsigned int ibc_nrx:16; /* receive buffers owned */ - unsigned int ibc_scheduled:1; /* scheduled for attention */ - unsigned int ibc_ready:1; /* CQ callback fired */ - unsigned long ibc_last_send; /* time of last send */ - struct list_head ibc_connd_list; /* link chain for */ - /* kiblnd_check_conns only */ - struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */ - struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for */ - /* IBLND_MSG_VERSION_1 */ - struct list_head ibc_tx_queue; /* sends that need a credit */ - struct list_head ibc_tx_queue_nocred; /* sends that don't need a */ - /* credit */ - struct list_head ibc_tx_queue_rsrvd; /* sends that need to */ - /* reserve an ACK/DONE msg */ - struct list_head ibc_active_txs; /* active tx awaiting completion */ - spinlock_t ibc_lock; /* serialise */ - struct kib_rx *ibc_rxs; /* the rx descs */ - struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */ - - struct rdma_cm_id *ibc_cmid; /* CM id */ - struct ib_cq *ibc_cq; /* completion queue */ - - struct kib_connvars *ibc_connvars; /* in-progress connection state */ -}; - -#define IBLND_CONN_INIT 0 /* being initialised */ -#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ -#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ -#define IBLND_CONN_ESTABLISHED 3 /* connection established */ -#define IBLND_CONN_CLOSING 4 /* being closed */ -#define IBLND_CONN_DISCONNECTED 5 /* disconnected */ - -struct kib_peer { - struct list_head ibp_list; /* stash on global peer list */ - lnet_nid_t ibp_nid; /* who's on the other end(s) */ - struct lnet_ni *ibp_ni; /* LNet interface */ - struct list_head ibp_conns; /* all active connections */ - struct kib_conn *ibp_next_conn; /* next connection to send on for - * round robin */ - struct list_head ibp_tx_queue; /* msgs waiting for a conn */ - __u64 ibp_incarnation; /* incarnation of peer */ - /* when (in jiffies) I was last alive */ - unsigned long ibp_last_alive; - /* # users */ - atomic_t ibp_refcount; - /* version of peer */ - __u16 ibp_version; - /* current passive connection attempts */ - unsigned short ibp_accepting; - /* current active connection attempts */ - unsigned short ibp_connecting; - /* reconnect this peer later */ - unsigned char ibp_reconnecting; - /* counter of how many times we triggered a conn race */ - unsigned char ibp_races; - /* # consecutive reconnection attempts to this peer */ - unsigned int ibp_reconnected; - /* errno on closing this peer */ - int ibp_error; - /* max map_on_demand */ - __u16 ibp_max_frags; - /* max_peer_credits */ - __u16 ibp_queue_depth; -}; - -extern struct kib_data kiblnd_data; - -void kiblnd_hdev_destroy(struct kib_hca_dev *hdev); - -int kiblnd_msg_queue_size(int version, struct lnet_ni *ni); - -/* max # of fragments configured by user */ -static inline int -kiblnd_cfg_rdma_frags(struct lnet_ni *ni) -{ - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - int mod; - - tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; - mod = tunables->lnd_map_on_demand; - return mod ? mod : IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT; -} - -static inline int -kiblnd_rdma_frags(int version, struct lnet_ni *ni) -{ - return version == IBLND_MSG_VERSION_1 ? - (IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT) : - kiblnd_cfg_rdma_frags(ni); -} - -static inline int -kiblnd_concurrent_sends(int version, struct lnet_ni *ni) -{ - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - int concurrent_sends; - - tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; - concurrent_sends = tunables->lnd_concurrent_sends; - - if (version == IBLND_MSG_VERSION_1) { - if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2) - return IBLND_MSG_QUEUE_SIZE_V1 * 2; - - if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2) - return IBLND_MSG_QUEUE_SIZE_V1 / 2; - } - - return concurrent_sends; -} - -static inline void -kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev) -{ - LASSERT(atomic_read(&hdev->ibh_ref) > 0); - atomic_inc(&hdev->ibh_ref); -} - -static inline void -kiblnd_hdev_decref(struct kib_hca_dev *hdev) -{ - LASSERT(atomic_read(&hdev->ibh_ref) > 0); - if (atomic_dec_and_test(&hdev->ibh_ref)) - kiblnd_hdev_destroy(hdev); -} - -static inline int -kiblnd_dev_can_failover(struct kib_dev *dev) -{ - if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */ - return 0; - - if (!*kiblnd_tunables.kib_dev_failover) /* disabled */ - return 0; - - if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */ - return 1; - - return dev->ibd_can_failover; -} - -#define kiblnd_conn_addref(conn) \ -do { \ - CDEBUG(D_NET, "conn[%p] (%d)++\n", \ - (conn), atomic_read(&(conn)->ibc_refcount)); \ - atomic_inc(&(conn)->ibc_refcount); \ -} while (0) - -#define kiblnd_conn_decref(conn) \ -do { \ - unsigned long flags; \ - \ - CDEBUG(D_NET, "conn[%p] (%d)--\n", \ - (conn), atomic_read(&(conn)->ibc_refcount)); \ - LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \ - if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \ - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \ - list_add_tail(&(conn)->ibc_list, \ - &kiblnd_data.kib_connd_zombies); \ - wake_up(&kiblnd_data.kib_connd_waitq); \ - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\ - } \ -} while (0) - -#define kiblnd_peer_addref(peer) \ -do { \ - CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \ - (peer), libcfs_nid2str((peer)->ibp_nid), \ - atomic_read(&(peer)->ibp_refcount)); \ - atomic_inc(&(peer)->ibp_refcount); \ -} while (0) - -#define kiblnd_peer_decref(peer) \ -do { \ - CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \ - (peer), libcfs_nid2str((peer)->ibp_nid), \ - atomic_read(&(peer)->ibp_refcount)); \ - LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \ - if (atomic_dec_and_test(&(peer)->ibp_refcount)) \ - kiblnd_destroy_peer(peer); \ -} while (0) - -static inline bool -kiblnd_peer_connecting(struct kib_peer *peer) -{ - return peer->ibp_connecting || - peer->ibp_reconnecting || - peer->ibp_accepting; -} - -static inline bool -kiblnd_peer_idle(struct kib_peer *peer) -{ - return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns); -} - -static inline struct list_head * -kiblnd_nid2peerlist(lnet_nid_t nid) -{ - unsigned int hash = - ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size; - - return &kiblnd_data.kib_peers[hash]; -} - -static inline int -kiblnd_peer_active(struct kib_peer *peer) -{ - /* Am I in the peer hash table? */ - return !list_empty(&peer->ibp_list); -} - -static inline struct kib_conn * -kiblnd_get_conn_locked(struct kib_peer *peer) -{ - struct list_head *next; - - LASSERT(!list_empty(&peer->ibp_conns)); - - /* Advance to next connection, be sure to skip the head node */ - if (!peer->ibp_next_conn || - peer->ibp_next_conn->ibc_list.next == &peer->ibp_conns) - next = peer->ibp_conns.next; - else - next = peer->ibp_next_conn->ibc_list.next; - peer->ibp_next_conn = list_entry(next, struct kib_conn, ibc_list); - - return peer->ibp_next_conn; -} - -static inline int -kiblnd_send_keepalive(struct kib_conn *conn) -{ - return (*kiblnd_tunables.kib_keepalive > 0) && - time_after(jiffies, conn->ibc_last_send + - msecs_to_jiffies(*kiblnd_tunables.kib_keepalive * - MSEC_PER_SEC)); -} - -static inline int -kiblnd_need_noop(struct kib_conn *conn) -{ - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - - LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; - - if (conn->ibc_outstanding_credits < - IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) && - !kiblnd_send_keepalive(conn)) - return 0; /* No need to send NOOP */ - - if (IBLND_OOB_CAPABLE(conn->ibc_version)) { - if (!list_empty(&conn->ibc_tx_queue_nocred)) - return 0; /* NOOP can be piggybacked */ - - /* No tx to piggyback NOOP onto or no credit to send a tx */ - return (list_empty(&conn->ibc_tx_queue) || - !conn->ibc_credits); - } - - if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */ - !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */ - !conn->ibc_credits) /* no credit */ - return 0; - - if (conn->ibc_credits == 1 && /* last credit reserved for */ - !conn->ibc_outstanding_credits) /* giving back credits */ - return 0; - - /* No tx to piggyback NOOP onto or no credit to send a tx */ - return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1); -} - -static inline void -kiblnd_abort_receives(struct kib_conn *conn) -{ - ib_modify_qp(conn->ibc_cmid->qp, - &kiblnd_data.kib_error_qpa, IB_QP_STATE); -} - -static inline const char * -kiblnd_queue2str(struct kib_conn *conn, struct list_head *q) -{ - if (q == &conn->ibc_tx_queue) - return "tx_queue"; - - if (q == &conn->ibc_tx_queue_rsrvd) - return "tx_queue_rsrvd"; - - if (q == &conn->ibc_tx_queue_nocred) - return "tx_queue_nocred"; - - if (q == &conn->ibc_active_txs) - return "active_txs"; - - LBUG(); - return NULL; -} - -/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */ -/* lowest bits of the work request id to stash the work item type. */ - -#define IBLND_WID_INVAL 0 -#define IBLND_WID_TX 1 -#define IBLND_WID_RX 2 -#define IBLND_WID_RDMA 3 -#define IBLND_WID_MR 4 -#define IBLND_WID_MASK 7UL - -static inline __u64 -kiblnd_ptr2wreqid(void *ptr, int type) -{ - unsigned long lptr = (unsigned long)ptr; - - LASSERT(!(lptr & IBLND_WID_MASK)); - LASSERT(!(type & ~IBLND_WID_MASK)); - return (__u64)(lptr | type); -} - -static inline void * -kiblnd_wreqid2ptr(__u64 wreqid) -{ - return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK); -} - -static inline int -kiblnd_wreqid2type(__u64 wreqid) -{ - return wreqid & IBLND_WID_MASK; -} - -static inline void -kiblnd_set_conn_state(struct kib_conn *conn, int state) -{ - conn->ibc_state = state; - mb(); -} - -static inline void -kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob) -{ - msg->ibm_type = type; - msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob; -} - -static inline int -kiblnd_rd_size(struct kib_rdma_desc *rd) -{ - int i; - int size; - - for (i = size = 0; i < rd->rd_nfrags; i++) - size += rd->rd_frags[i].rf_nob; - - return size; -} - -static inline __u64 -kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index) -{ - return rd->rd_frags[index].rf_addr; -} - -static inline __u32 -kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index) -{ - return rd->rd_frags[index].rf_nob; -} - -static inline __u32 -kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index) -{ - return rd->rd_key; -} - -static inline int -kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob) -{ - if (nob < rd->rd_frags[index].rf_nob) { - rd->rd_frags[index].rf_addr += nob; - rd->rd_frags[index].rf_nob -= nob; - } else { - index++; - } - - return index; -} - -static inline int -kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n) -{ - LASSERT(msgtype == IBLND_MSG_GET_REQ || - msgtype == IBLND_MSG_PUT_ACK); - - return msgtype == IBLND_MSG_GET_REQ ? - offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) : - offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]); -} - -static inline __u64 -kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return ib_dma_mapping_error(dev, dma_addr); -} - -static inline __u64 kiblnd_dma_map_single(struct ib_device *dev, - void *msg, size_t size, - enum dma_data_direction direction) -{ - return ib_dma_map_single(dev, msg, size, direction); -} - -static inline void kiblnd_dma_unmap_single(struct ib_device *dev, - __u64 addr, size_t size, - enum dma_data_direction direction) -{ - ib_dma_unmap_single(dev, addr, size, direction); -} - -#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0) -#define KIBLND_UNMAP_ADDR(p, m, a) (a) - -static inline int kiblnd_dma_map_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - return ib_dma_map_sg(dev, sg, nents, direction); -} - -static inline void kiblnd_dma_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - ib_dma_unmap_sg(dev, sg, nents, direction); -} - -static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev, - struct scatterlist *sg) -{ - return ib_sg_dma_address(dev, sg); -} - -static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, - struct scatterlist *sg) -{ - return ib_sg_dma_len(dev, sg); -} - -/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly */ -/* right because OFED1.2 defines it as const, to use it we have to add */ -/* (void *) cast to overcome "const" */ - -#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) -#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) - -void kiblnd_map_rx_descs(struct kib_conn *conn); -void kiblnd_unmap_rx_descs(struct kib_conn *conn); -void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node); -struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps); - -int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, - struct kib_rdma_desc *rd, __u32 nob, __u64 iov, - struct kib_fmr *fmr); -void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status); - -int kiblnd_tunables_setup(struct lnet_ni *ni); -void kiblnd_tunables_init(void); - -int kiblnd_connd(void *arg); -int kiblnd_scheduler(void *arg); -int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); -int kiblnd_failover_thread(void *arg); - -int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages); - -int kiblnd_cm_callback(struct rdma_cm_id *cmid, - struct rdma_cm_event *event); -int kiblnd_translate_mtu(int value); - -int kiblnd_dev_failover(struct kib_dev *dev); -int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp, - lnet_nid_t nid); -void kiblnd_destroy_peer(struct kib_peer *peer); -bool kiblnd_reconnect_peer(struct kib_peer *peer); -void kiblnd_destroy_dev(struct kib_dev *dev); -void kiblnd_unlink_peer_locked(struct kib_peer *peer); -struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid); -int kiblnd_close_stale_conns_locked(struct kib_peer *peer, - int version, __u64 incarnation); -int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why); - -struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, - struct rdma_cm_id *cmid, - int state, int version); -void kiblnd_destroy_conn(struct kib_conn *conn); -void kiblnd_close_conn(struct kib_conn *conn, int error); -void kiblnd_close_conn_locked(struct kib_conn *conn, int error); - -void kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid); -void kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, - int status); - -void kiblnd_qp_event(struct ib_event *event, void *arg); -void kiblnd_cq_event(struct ib_event *event, void *arg); -void kiblnd_cq_completion(struct ib_cq *cq, void *arg); - -void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version, - int credits, lnet_nid_t dstnid, __u64 dststamp); -int kiblnd_unpack_msg(struct kib_msg *msg, int nob); -int kiblnd_post_rx(struct kib_rx *rx, int credit); - -int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg); -int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, - int delayed, struct iov_iter *to, unsigned int rlen); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c deleted file mode 100644 index 65b7a62943ad..000000000000 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ /dev/null @@ -1,3763 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/klnds/o2iblnd/o2iblnd_cb.c - * - * Author: Eric Barton - */ - -#include -#include "o2iblnd.h" - -#define MAX_CONN_RACES_BEFORE_ABORT 20 - -static void kiblnd_peer_alive(struct kib_peer *peer); -static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error); -static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, - int type, int body_nob); -static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, - int resid, struct kib_rdma_desc *dstrd, - __u64 dstcookie); -static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn); -static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn); -static void kiblnd_unmap_tx(struct kib_tx *tx); -static void kiblnd_check_sends_locked(struct kib_conn *conn); - -static void -kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx) -{ - struct lnet_msg *lntmsg[2]; - struct kib_net *net = ni->ni_data; - int rc; - int i; - - LASSERT(net); - LASSERT(!in_interrupt()); - LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ - LASSERT(!tx->tx_sending); /* mustn't be awaiting sent callback */ - LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ - LASSERT(tx->tx_pool); - - kiblnd_unmap_tx(tx); - - /* tx may have up to 2 lnet msgs to finalise */ - lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; - lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; - rc = tx->tx_status; - - if (tx->tx_conn) { - LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); - - kiblnd_conn_decref(tx->tx_conn); - tx->tx_conn = NULL; - } - - tx->tx_nwrq = 0; - tx->tx_status = 0; - - kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); - - /* delay finalize until my descs have been freed */ - for (i = 0; i < 2; i++) { - if (!lntmsg[i]) - continue; - - lnet_finalize(ni, lntmsg[i], rc); - } -} - -void -kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int status) -{ - struct kib_tx *tx; - - while (!list_empty(txlist)) { - tx = list_entry(txlist->next, struct kib_tx, tx_list); - - list_del(&tx->tx_list); - /* complete now */ - tx->tx_waiting = 0; - tx->tx_status = status; - kiblnd_tx_done(ni, tx); - } -} - -static struct kib_tx * -kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target) -{ - struct kib_net *net = (struct kib_net *)ni->ni_data; - struct list_head *node; - struct kib_tx *tx; - struct kib_tx_poolset *tps; - - tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; - node = kiblnd_pool_alloc_node(&tps->tps_poolset); - if (!node) - return NULL; - tx = list_entry(node, struct kib_tx, tx_list); - - LASSERT(!tx->tx_nwrq); - LASSERT(!tx->tx_queued); - LASSERT(!tx->tx_sending); - LASSERT(!tx->tx_waiting); - LASSERT(!tx->tx_status); - LASSERT(!tx->tx_conn); - LASSERT(!tx->tx_lntmsg[0]); - LASSERT(!tx->tx_lntmsg[1]); - LASSERT(!tx->tx_nfrags); - - return tx; -} - -static void -kiblnd_drop_rx(struct kib_rx *rx) -{ - struct kib_conn *conn = rx->rx_conn; - struct kib_sched_info *sched = conn->ibc_sched; - unsigned long flags; - - spin_lock_irqsave(&sched->ibs_lock, flags); - LASSERT(conn->ibc_nrx > 0); - conn->ibc_nrx--; - spin_unlock_irqrestore(&sched->ibs_lock, flags); - - kiblnd_conn_decref(conn); -} - -int -kiblnd_post_rx(struct kib_rx *rx, int credit) -{ - struct kib_conn *conn = rx->rx_conn; - struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data; - struct ib_recv_wr *bad_wrq = NULL; - int rc; - - LASSERT(net); - LASSERT(!in_interrupt()); - LASSERT(credit == IBLND_POSTRX_NO_CREDIT || - credit == IBLND_POSTRX_PEER_CREDIT || - credit == IBLND_POSTRX_RSRVD_CREDIT); - - rx->rx_sge.lkey = conn->ibc_hdev->ibh_pd->local_dma_lkey; - rx->rx_sge.addr = rx->rx_msgaddr; - rx->rx_sge.length = IBLND_MSG_SIZE; - - rx->rx_wrq.next = NULL; - rx->rx_wrq.sg_list = &rx->rx_sge; - rx->rx_wrq.num_sge = 1; - rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); - - LASSERT(conn->ibc_state >= IBLND_CONN_INIT); - LASSERT(rx->rx_nob >= 0); /* not posted */ - - if (conn->ibc_state > IBLND_CONN_ESTABLISHED) { - kiblnd_drop_rx(rx); /* No more posts for this rx */ - return 0; - } - - rx->rx_nob = -1; /* flag posted */ - - /* NB: need an extra reference after ib_post_recv because we don't - * own this rx (and rx::rx_conn) anymore, LU-5678. - */ - kiblnd_conn_addref(conn); - rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); - if (unlikely(rc)) { - CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); - rx->rx_nob = 0; - } - - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ - goto out; - - if (unlikely(rc)) { - kiblnd_close_conn(conn, rc); - kiblnd_drop_rx(rx); /* No more posts for this rx */ - goto out; - } - - if (credit == IBLND_POSTRX_NO_CREDIT) - goto out; - - spin_lock(&conn->ibc_lock); - if (credit == IBLND_POSTRX_PEER_CREDIT) - conn->ibc_outstanding_credits++; - else - conn->ibc_reserved_credits++; - kiblnd_check_sends_locked(conn); - spin_unlock(&conn->ibc_lock); - -out: - kiblnd_conn_decref(conn); - return rc; -} - -static struct kib_tx * -kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, __u64 cookie) -{ - struct list_head *tmp; - - list_for_each(tmp, &conn->ibc_active_txs) { - struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list); - - LASSERT(!tx->tx_queued); - LASSERT(tx->tx_sending || tx->tx_waiting); - - if (tx->tx_cookie != cookie) - continue; - - if (tx->tx_waiting && - tx->tx_msg->ibm_type == txtype) - return tx; - - CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", - tx->tx_waiting ? "" : "NOT ", - tx->tx_msg->ibm_type, txtype); - } - return NULL; -} - -static void -kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie) -{ - struct kib_tx *tx; - struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - int idle; - - spin_lock(&conn->ibc_lock); - - tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); - if (!tx) { - spin_unlock(&conn->ibc_lock); - - CWARN("Unmatched completion type %x cookie %#llx from %s\n", - txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - kiblnd_close_conn(conn, -EPROTO); - return; - } - - if (!tx->tx_status) { /* success so far */ - if (status < 0) /* failed? */ - tx->tx_status = status; - else if (txtype == IBLND_MSG_GET_REQ) - lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status); - } - - tx->tx_waiting = 0; - - idle = !tx->tx_queued && !tx->tx_sending; - if (idle) - list_del(&tx->tx_list); - - spin_unlock(&conn->ibc_lock); - - if (idle) - kiblnd_tx_done(ni, tx); -} - -static void -kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie) -{ - struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - - if (!tx) { - CERROR("Can't get tx for completion %x for %s\n", - type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - return; - } - - tx->tx_msg->ibm_u.completion.ibcm_status = status; - tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie; - kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg)); - - kiblnd_queue_tx(tx, conn); -} - -static void -kiblnd_handle_rx(struct kib_rx *rx) -{ - struct kib_msg *msg = rx->rx_msg; - struct kib_conn *conn = rx->rx_conn; - struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - int credits = msg->ibm_credits; - struct kib_tx *tx; - int rc = 0; - int rc2; - int post_credit; - - LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - - CDEBUG(D_NET, "Received %x[%d] from %s\n", - msg->ibm_type, credits, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - - if (credits) { - /* Have I received credits that will let me send? */ - spin_lock(&conn->ibc_lock); - - if (conn->ibc_credits + credits > - conn->ibc_queue_depth) { - rc2 = conn->ibc_credits; - spin_unlock(&conn->ibc_lock); - - CERROR("Bad credits from %s: %d + %d > %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc2, credits, conn->ibc_queue_depth); - - kiblnd_close_conn(conn, -EPROTO); - kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); - return; - } - - conn->ibc_credits += credits; - - /* This ensures the credit taken by NOOP can be returned */ - if (msg->ibm_type == IBLND_MSG_NOOP && - !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */ - conn->ibc_outstanding_credits++; - - kiblnd_check_sends_locked(conn); - spin_unlock(&conn->ibc_lock); - } - - switch (msg->ibm_type) { - default: - CERROR("Bad IBLND message type %x from %s\n", - msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - post_credit = IBLND_POSTRX_NO_CREDIT; - rc = -EPROTO; - break; - - case IBLND_MSG_NOOP: - if (IBLND_OOB_CAPABLE(conn->ibc_version)) { - post_credit = IBLND_POSTRX_NO_CREDIT; - break; - } - - if (credits) /* credit already posted */ - post_credit = IBLND_POSTRX_NO_CREDIT; - else /* a keepalive NOOP */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; - - case IBLND_MSG_IMMEDIATE: - post_credit = IBLND_POSTRX_DONT_POST; - rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr, - msg->ibm_srcnid, rx, 0); - if (rc < 0) /* repost on error */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; - - case IBLND_MSG_PUT_REQ: - post_credit = IBLND_POSTRX_DONT_POST; - rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr, - msg->ibm_srcnid, rx, 1); - if (rc < 0) /* repost on error */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; - - case IBLND_MSG_PUT_NAK: - CWARN("PUT_NACK from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - post_credit = IBLND_POSTRX_RSRVD_CREDIT; - kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ, - msg->ibm_u.completion.ibcm_status, - msg->ibm_u.completion.ibcm_cookie); - break; - - case IBLND_MSG_PUT_ACK: - post_credit = IBLND_POSTRX_RSRVD_CREDIT; - - spin_lock(&conn->ibc_lock); - tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, - msg->ibm_u.putack.ibpam_src_cookie); - if (tx) - list_del(&tx->tx_list); - spin_unlock(&conn->ibc_lock); - - if (!tx) { - CERROR("Unmatched PUT_ACK from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - rc = -EPROTO; - break; - } - - LASSERT(tx->tx_waiting); - /* - * CAVEAT EMPTOR: I could be racing with tx_complete, but... - * (a) I can overwrite tx_msg since my peer has received it! - * (b) tx_waiting set tells tx_complete() it's not done. - */ - tx->tx_nwrq = 0; /* overwrite PUT_REQ */ - - rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE, - kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd), - &msg->ibm_u.putack.ibpam_rd, - msg->ibm_u.putack.ibpam_dst_cookie); - if (rc2 < 0) - CERROR("Can't setup rdma for PUT to %s: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2); - - spin_lock(&conn->ibc_lock); - tx->tx_waiting = 0; /* clear waiting and queue atomically */ - kiblnd_queue_tx_locked(tx, conn); - spin_unlock(&conn->ibc_lock); - break; - - case IBLND_MSG_PUT_DONE: - post_credit = IBLND_POSTRX_PEER_CREDIT; - kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK, - msg->ibm_u.completion.ibcm_status, - msg->ibm_u.completion.ibcm_cookie); - break; - - case IBLND_MSG_GET_REQ: - post_credit = IBLND_POSTRX_DONT_POST; - rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr, - msg->ibm_srcnid, rx, 1); - if (rc < 0) /* repost on error */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; - - case IBLND_MSG_GET_DONE: - post_credit = IBLND_POSTRX_RSRVD_CREDIT; - kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ, - msg->ibm_u.completion.ibcm_status, - msg->ibm_u.completion.ibcm_cookie); - break; - } - - if (rc < 0) /* protocol error */ - kiblnd_close_conn(conn, rc); - - if (post_credit != IBLND_POSTRX_DONT_POST) - kiblnd_post_rx(rx, post_credit); -} - -static void -kiblnd_rx_complete(struct kib_rx *rx, int status, int nob) -{ - struct kib_msg *msg = rx->rx_msg; - struct kib_conn *conn = rx->rx_conn; - struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - struct kib_net *net = ni->ni_data; - int rc; - int err = -EIO; - - LASSERT(net); - LASSERT(rx->rx_nob < 0); /* was posted */ - rx->rx_nob = 0; /* isn't now */ - - if (conn->ibc_state > IBLND_CONN_ESTABLISHED) - goto ignore; - - if (status != IB_WC_SUCCESS) { - CNETERR("Rx from %s failed: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), status); - goto failed; - } - - LASSERT(nob >= 0); - rx->rx_nob = nob; - - rc = kiblnd_unpack_msg(msg, rx->rx_nob); - if (rc) { - CERROR("Error %d unpacking rx from %s\n", - rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - goto failed; - } - - if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid || - msg->ibm_dstnid != ni->ni_nid || - msg->ibm_srcstamp != conn->ibc_incarnation || - msg->ibm_dststamp != net->ibn_incarnation) { - CERROR("Stale rx from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - err = -ESTALE; - goto failed; - } - - /* set time last known alive */ - kiblnd_peer_alive(conn->ibc_peer); - - /* racing with connection establishment/teardown! */ - - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - - write_lock_irqsave(g_lock, flags); - /* must check holding global lock to eliminate race */ - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); - write_unlock_irqrestore(g_lock, flags); - return; - } - write_unlock_irqrestore(g_lock, flags); - } - kiblnd_handle_rx(rx); - return; - - failed: - CDEBUG(D_NET, "rx %p conn %p\n", rx, conn); - kiblnd_close_conn(conn, err); - ignore: - kiblnd_drop_rx(rx); /* Don't re-post rx. */ -} - -static struct page * -kiblnd_kvaddr_to_page(unsigned long vaddr) -{ - struct page *page; - - if (is_vmalloc_addr((void *)vaddr)) { - page = vmalloc_to_page((void *)vaddr); - LASSERT(page); - return page; - } -#ifdef CONFIG_HIGHMEM - if (vaddr >= PKMAP_BASE && - vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) { - /* No highmem pages only used for bulk (kiov) I/O */ - CERROR("find page for address in highmem\n"); - LBUG(); - } -#endif - page = virt_to_page(vaddr); - LASSERT(page); - return page; -} - -static int -kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *rd, __u32 nob) -{ - struct kib_hca_dev *hdev; - struct kib_fmr_poolset *fps; - int cpt; - int rc; - - LASSERT(tx->tx_pool); - LASSERT(tx->tx_pool->tpo_pool.po_owner); - - hdev = tx->tx_pool->tpo_hdev; - cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; - - fps = net->ibn_fmr_ps[cpt]; - rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr); - if (rc) { - CERROR("Can't map %u bytes: %d\n", nob, rc); - return rc; - } - - /* - * If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey - */ - rd->rd_key = tx->fmr.fmr_key; - rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; - rd->rd_frags[0].rf_nob = nob; - rd->rd_nfrags = 1; - - return 0; -} - -static void kiblnd_unmap_tx(struct kib_tx *tx) -{ - if (tx->fmr.fmr_pfmr || tx->fmr.fmr_frd) - kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); - - if (tx->tx_nfrags) { - kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, - tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); - tx->tx_nfrags = 0; - } -} - -static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx, - struct kib_rdma_desc *rd, int nfrags) -{ - struct kib_net *net = ni->ni_data; - struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev; - __u32 nob; - int i; - - /* - * If rd is not tx_rd, it's going to get sent to a peer and I'm the - * RDMA sink - */ - tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - tx->tx_nfrags = nfrags; - - rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, - tx->tx_nfrags, tx->tx_dmadir); - - for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { - rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( - hdev->ibh_ibdev, &tx->tx_frags[i]); - rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address( - hdev->ibh_ibdev, &tx->tx_frags[i]); - nob += rd->rd_frags[i].rf_nob; - } - - if (net->ibn_fmr_ps) - return kiblnd_fmr_map_tx(net, tx, rd, nob); - - return -EINVAL; -} - -static int -kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx, - struct kib_rdma_desc *rd, unsigned int niov, - const struct kvec *iov, int offset, int nob) -{ - struct kib_net *net = ni->ni_data; - struct page *page; - struct scatterlist *sg; - unsigned long vaddr; - int fragnob; - int page_offset; - - LASSERT(nob > 0); - LASSERT(niov > 0); - LASSERT(net); - - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - niov--; - iov++; - LASSERT(niov > 0); - } - - sg = tx->tx_frags; - do { - LASSERT(niov > 0); - - vaddr = ((unsigned long)iov->iov_base) + offset; - page_offset = vaddr & (PAGE_SIZE - 1); - page = kiblnd_kvaddr_to_page(vaddr); - if (!page) { - CERROR("Can't find page\n"); - return -EFAULT; - } - - fragnob = min((int)(iov->iov_len - offset), nob); - fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); - - sg_set_page(sg, page, fragnob, page_offset); - sg = sg_next(sg); - if (!sg) { - CERROR("lacking enough sg entries to map tx\n"); - return -EFAULT; - } - - if (offset + fragnob < iov->iov_len) { - offset += fragnob; - } else { - offset = 0; - iov++; - niov--; - } - nob -= fragnob; - } while (nob > 0); - - return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); -} - -static int -kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, - struct kib_rdma_desc *rd, int nkiov, - const struct bio_vec *kiov, int offset, int nob) -{ - struct kib_net *net = ni->ni_data; - struct scatterlist *sg; - int fragnob; - - CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); - - LASSERT(nob > 0); - LASSERT(nkiov > 0); - LASSERT(net); - - while (offset >= kiov->bv_len) { - offset -= kiov->bv_len; - nkiov--; - kiov++; - LASSERT(nkiov > 0); - } - - sg = tx->tx_frags; - do { - LASSERT(nkiov > 0); - - fragnob = min((int)(kiov->bv_len - offset), nob); - - sg_set_page(sg, kiov->bv_page, fragnob, - kiov->bv_offset + offset); - sg = sg_next(sg); - if (!sg) { - CERROR("lacking enough sg entries to map tx\n"); - return -EFAULT; - } - - offset = 0; - kiov++; - nkiov--; - nob -= fragnob; - } while (nob > 0); - - return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); -} - -static int -kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit) - __must_hold(&conn->ibc_lock) -{ - struct kib_msg *msg = tx->tx_msg; - struct kib_peer *peer = conn->ibc_peer; - struct lnet_ni *ni = peer->ibp_ni; - int ver = conn->ibc_version; - int rc; - int done; - - LASSERT(tx->tx_queued); - /* We rely on this for QP sizing */ - LASSERT(tx->tx_nwrq > 0); - - LASSERT(!credit || credit == 1); - LASSERT(conn->ibc_outstanding_credits >= 0); - LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth); - LASSERT(conn->ibc_credits >= 0); - LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); - - if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) { - /* tx completions outstanding... */ - CDEBUG(D_NET, "%s: posted enough\n", - libcfs_nid2str(peer->ibp_nid)); - return -EAGAIN; - } - - if (credit && !conn->ibc_credits) { /* no credits */ - CDEBUG(D_NET, "%s: no credits\n", - libcfs_nid2str(peer->ibp_nid)); - return -EAGAIN; - } - - if (credit && !IBLND_OOB_CAPABLE(ver) && - conn->ibc_credits == 1 && /* last credit reserved */ - msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ - CDEBUG(D_NET, "%s: not using last credit\n", - libcfs_nid2str(peer->ibp_nid)); - return -EAGAIN; - } - - /* NB don't drop ibc_lock before bumping tx_sending */ - list_del(&tx->tx_list); - tx->tx_queued = 0; - - if (msg->ibm_type == IBLND_MSG_NOOP && - (!kiblnd_need_noop(conn) || /* redundant NOOP */ - (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ - conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { - /* - * OK to drop when posted enough NOOPs, since - * kiblnd_check_sends_locked will queue NOOP again when - * posted NOOPs complete - */ - spin_unlock(&conn->ibc_lock); - kiblnd_tx_done(peer->ibp_ni, tx); - spin_lock(&conn->ibc_lock); - CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", - libcfs_nid2str(peer->ibp_nid), - conn->ibc_noops_posted); - return 0; - } - - kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits, - peer->ibp_nid, conn->ibc_incarnation); - - conn->ibc_credits -= credit; - conn->ibc_outstanding_credits = 0; - conn->ibc_nsends_posted++; - if (msg->ibm_type == IBLND_MSG_NOOP) - conn->ibc_noops_posted++; - - /* - * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA - * PUT. If so, it was first queued here as a PUT_REQ, sent and - * stashed on ibc_active_txs, matched by an incoming PUT_ACK, - * and then re-queued here. It's (just) possible that - * tx_sending is non-zero if we've not done the tx_complete() - * from the first send; hence the ++ rather than = below. - */ - tx->tx_sending++; - list_add(&tx->tx_list, &conn->ibc_active_txs); - - /* I'm still holding ibc_lock! */ - if (conn->ibc_state != IBLND_CONN_ESTABLISHED) { - rc = -ECONNABORTED; - } else if (tx->tx_pool->tpo_pool.po_failed || - conn->ibc_hdev != tx->tx_pool->tpo_hdev) { - /* close_conn will launch failover */ - rc = -ENETDOWN; - } else { - struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd; - struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr; - struct ib_send_wr *wrq = &tx->tx_wrq[0].wr; - - if (frd) { - if (!frd->frd_valid) { - wrq = &frd->frd_inv_wr; - wrq->next = &frd->frd_fastreg_wr.wr; - } else { - wrq = &frd->frd_fastreg_wr.wr; - } - frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr; - } - - LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), - "bad wr_id %llx, opc %d, flags %d, peer: %s\n", - bad->wr_id, bad->opcode, bad->send_flags, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - bad = NULL; - rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad); - } - - conn->ibc_last_send = jiffies; - - if (!rc) - return 0; - - /* - * NB credits are transferred in the actual - * message, which can only be the last work item - */ - conn->ibc_credits += credit; - conn->ibc_outstanding_credits += msg->ibm_credits; - conn->ibc_nsends_posted--; - if (msg->ibm_type == IBLND_MSG_NOOP) - conn->ibc_noops_posted--; - - tx->tx_status = rc; - tx->tx_waiting = 0; - tx->tx_sending--; - - done = !tx->tx_sending; - if (done) - list_del(&tx->tx_list); - - spin_unlock(&conn->ibc_lock); - - if (conn->ibc_state == IBLND_CONN_ESTABLISHED) - CERROR("Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer->ibp_nid)); - else - CDEBUG(D_NET, "Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer->ibp_nid)); - - kiblnd_close_conn(conn, rc); - - if (done) - kiblnd_tx_done(peer->ibp_ni, tx); - - spin_lock(&conn->ibc_lock); - - return -EIO; -} - -static void -kiblnd_check_sends_locked(struct kib_conn *conn) -{ - int ver = conn->ibc_version; - struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - struct kib_tx *tx; - - /* Don't send anything until after the connection is established */ - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - CDEBUG(D_NET, "%s too soon\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - return; - } - - LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni)); - LASSERT(!IBLND_OOB_CAPABLE(ver) || - conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); - LASSERT(conn->ibc_reserved_credits >= 0); - - while (conn->ibc_reserved_credits > 0 && - !list_empty(&conn->ibc_tx_queue_rsrvd)) { - tx = list_entry(conn->ibc_tx_queue_rsrvd.next, - struct kib_tx, tx_list); - list_del(&tx->tx_list); - list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); - conn->ibc_reserved_credits--; - } - - if (kiblnd_need_noop(conn)) { - spin_unlock(&conn->ibc_lock); - - tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx) - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); - - spin_lock(&conn->ibc_lock); - if (tx) - kiblnd_queue_tx_locked(tx, conn); - } - - for (;;) { - int credit; - - if (!list_empty(&conn->ibc_tx_queue_nocred)) { - credit = 0; - tx = list_entry(conn->ibc_tx_queue_nocred.next, - struct kib_tx, tx_list); - } else if (!list_empty(&conn->ibc_tx_noops)) { - LASSERT(!IBLND_OOB_CAPABLE(ver)); - credit = 1; - tx = list_entry(conn->ibc_tx_noops.next, - struct kib_tx, tx_list); - } else if (!list_empty(&conn->ibc_tx_queue)) { - credit = 1; - tx = list_entry(conn->ibc_tx_queue.next, - struct kib_tx, tx_list); - } else { - break; - } - - if (kiblnd_post_tx_locked(conn, tx, credit)) - break; - } -} - -static void -kiblnd_tx_complete(struct kib_tx *tx, int status) -{ - int failed = (status != IB_WC_SUCCESS); - struct kib_conn *conn = tx->tx_conn; - int idle; - - LASSERT(tx->tx_sending > 0); - - if (failed) { - if (conn->ibc_state == IBLND_CONN_ESTABLISHED) - CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - tx->tx_cookie, tx->tx_sending, tx->tx_waiting, - status); - - kiblnd_close_conn(conn, -EIO); - } else { - kiblnd_peer_alive(conn->ibc_peer); - } - - spin_lock(&conn->ibc_lock); - - /* - * I could be racing with rdma completion. Whoever makes 'tx' idle - * gets to free it, which also drops its ref on 'conn'. - */ - tx->tx_sending--; - conn->ibc_nsends_posted--; - if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) - conn->ibc_noops_posted--; - - if (failed) { - tx->tx_waiting = 0; /* don't wait for peer */ - tx->tx_status = -EIO; - } - - idle = !tx->tx_sending && /* This is the final callback */ - !tx->tx_waiting && /* Not waiting for peer */ - !tx->tx_queued; /* Not re-queued (PUT_DONE) */ - if (idle) - list_del(&tx->tx_list); - - kiblnd_check_sends_locked(conn); - spin_unlock(&conn->ibc_lock); - - if (idle) - kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx); -} - -static void -kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type, - int body_nob) -{ - struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev; - struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; - struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof(struct kib_msg, ibm_u) + body_nob; - - LASSERT(tx->tx_nwrq >= 0); - LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); - LASSERT(nob <= IBLND_MSG_SIZE); - - kiblnd_init_msg(tx->tx_msg, type, body_nob); - - sge->lkey = hdev->ibh_pd->local_dma_lkey; - sge->addr = tx->tx_msgaddr; - sge->length = nob; - - memset(wrq, 0, sizeof(*wrq)); - - wrq->wr.next = NULL; - wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); - wrq->wr.sg_list = sge; - wrq->wr.num_sge = 1; - wrq->wr.opcode = IB_WR_SEND; - wrq->wr.send_flags = IB_SEND_SIGNALED; - - tx->tx_nwrq++; -} - -static int -kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, - int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie) -{ - struct kib_msg *ibmsg = tx->tx_msg; - struct kib_rdma_desc *srcrd = tx->tx_rd; - struct ib_sge *sge = &tx->tx_sge[0]; - struct ib_rdma_wr *wrq, *next; - int rc = resid; - int srcidx = 0; - int dstidx = 0; - int wrknob; - - LASSERT(!in_interrupt()); - LASSERT(!tx->tx_nwrq); - LASSERT(type == IBLND_MSG_GET_DONE || - type == IBLND_MSG_PUT_DONE); - - if (kiblnd_rd_size(srcrd) > conn->ibc_max_frags << PAGE_SHIFT) { - CERROR("RDMA is too large for peer %s (%d), src size: %d dst size: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - conn->ibc_max_frags << PAGE_SHIFT, - kiblnd_rd_size(srcrd), kiblnd_rd_size(dstrd)); - rc = -EMSGSIZE; - goto too_big; - } - - while (resid > 0) { - if (srcidx >= srcrd->rd_nfrags) { - CERROR("Src buffer exhausted: %d frags\n", srcidx); - rc = -EPROTO; - break; - } - - if (dstidx == dstrd->rd_nfrags) { - CERROR("Dst buffer exhausted: %d frags\n", dstidx); - rc = -EPROTO; - break; - } - - if (tx->tx_nwrq >= IBLND_MAX_RDMA_FRAGS) { - CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - IBLND_MAX_RDMA_FRAGS, - srcidx, srcrd->rd_nfrags, - dstidx, dstrd->rd_nfrags); - rc = -EMSGSIZE; - break; - } - - wrknob = min3(kiblnd_rd_frag_size(srcrd, srcidx), - kiblnd_rd_frag_size(dstrd, dstidx), - (__u32)resid); - - sge = &tx->tx_sge[tx->tx_nwrq]; - sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx); - sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx); - sge->length = wrknob; - - wrq = &tx->tx_wrq[tx->tx_nwrq]; - next = wrq + 1; - - wrq->wr.next = &next->wr; - wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); - wrq->wr.sg_list = sge; - wrq->wr.num_sge = 1; - wrq->wr.opcode = IB_WR_RDMA_WRITE; - wrq->wr.send_flags = 0; - - wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); - wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx); - - srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); - dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); - - resid -= wrknob; - - tx->tx_nwrq++; - wrq++; - sge++; - } -too_big: - if (rc < 0) /* no RDMA if completing with failure */ - tx->tx_nwrq = 0; - - ibmsg->ibm_u.completion.ibcm_status = rc; - ibmsg->ibm_u.completion.ibcm_cookie = dstcookie; - kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx, - type, sizeof(struct kib_completion_msg)); - - return rc; -} - -static void -kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn) -{ - struct list_head *q; - - LASSERT(tx->tx_nwrq > 0); /* work items set up */ - LASSERT(!tx->tx_queued); /* not queued for sending already */ - LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - - tx->tx_queued = 1; - tx->tx_deadline = jiffies + - msecs_to_jiffies(*kiblnd_tunables.kib_timeout * - MSEC_PER_SEC); - - if (!tx->tx_conn) { - kiblnd_conn_addref(conn); - tx->tx_conn = conn; - LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); - } else { - /* PUT_DONE first attached to conn as a PUT_REQ */ - LASSERT(tx->tx_conn == conn); - LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); - } - - switch (tx->tx_msg->ibm_type) { - default: - LBUG(); - - case IBLND_MSG_PUT_REQ: - case IBLND_MSG_GET_REQ: - q = &conn->ibc_tx_queue_rsrvd; - break; - - case IBLND_MSG_PUT_NAK: - case IBLND_MSG_PUT_ACK: - case IBLND_MSG_PUT_DONE: - case IBLND_MSG_GET_DONE: - q = &conn->ibc_tx_queue_nocred; - break; - - case IBLND_MSG_NOOP: - if (IBLND_OOB_CAPABLE(conn->ibc_version)) - q = &conn->ibc_tx_queue_nocred; - else - q = &conn->ibc_tx_noops; - break; - - case IBLND_MSG_IMMEDIATE: - q = &conn->ibc_tx_queue; - break; - } - - list_add_tail(&tx->tx_list, q); -} - -static void -kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn) -{ - spin_lock(&conn->ibc_lock); - kiblnd_queue_tx_locked(tx, conn); - kiblnd_check_sends_locked(conn); - spin_unlock(&conn->ibc_lock); -} - -static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, - struct sockaddr_in *srcaddr, - struct sockaddr_in *dstaddr, - int timeout_ms) -{ - unsigned short port; - int rc; - - /* allow the port to be reused */ - rc = rdma_set_reuseaddr(cmid, 1); - if (rc) { - CERROR("Unable to set reuse on cmid: %d\n", rc); - return rc; - } - - /* look for a free privileged port */ - for (port = PROT_SOCK - 1; port > 0; port--) { - srcaddr->sin_port = htons(port); - rc = rdma_resolve_addr(cmid, - (struct sockaddr *)srcaddr, - (struct sockaddr *)dstaddr, - timeout_ms); - if (!rc) { - CDEBUG(D_NET, "bound to port %hu\n", port); - return 0; - } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) { - CDEBUG(D_NET, "bind to port %hu failed: %d\n", - port, rc); - } else { - return rc; - } - } - - CERROR("Failed to bind to a free privileged port\n"); - return rc; -} - -static void -kiblnd_connect_peer(struct kib_peer *peer) -{ - struct rdma_cm_id *cmid; - struct kib_dev *dev; - struct kib_net *net = peer->ibp_ni->ni_data; - struct sockaddr_in srcaddr; - struct sockaddr_in dstaddr; - int rc; - - LASSERT(net); - LASSERT(peer->ibp_connecting > 0); - - cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, - IB_QPT_RC); - - if (IS_ERR(cmid)) { - CERROR("Can't create CMID for %s: %ld\n", - libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid)); - rc = PTR_ERR(cmid); - goto failed; - } - - dev = net->ibn_dev; - memset(&srcaddr, 0, sizeof(srcaddr)); - srcaddr.sin_family = AF_INET; - srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip); - - memset(&dstaddr, 0, sizeof(dstaddr)); - dstaddr.sin_family = AF_INET; - dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); - dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid)); - - kiblnd_peer_addref(peer); /* cmid's ref */ - - if (*kiblnd_tunables.kib_use_priv_port) { - rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, - *kiblnd_tunables.kib_timeout * 1000); - } else { - rc = rdma_resolve_addr(cmid, - (struct sockaddr *)&srcaddr, - (struct sockaddr *)&dstaddr, - *kiblnd_tunables.kib_timeout * 1000); - } - if (rc) { - /* Can't initiate address resolution: */ - CERROR("Can't resolve addr for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); - goto failed2; - } - - return; - - failed2: - kiblnd_peer_connect_failed(peer, 1, rc); - kiblnd_peer_decref(peer); /* cmid's ref */ - rdma_destroy_id(cmid); - return; - failed: - kiblnd_peer_connect_failed(peer, 1, rc); -} - -bool -kiblnd_reconnect_peer(struct kib_peer *peer) -{ - rwlock_t *glock = &kiblnd_data.kib_global_lock; - char *reason = NULL; - struct list_head txs; - unsigned long flags; - - INIT_LIST_HEAD(&txs); - - write_lock_irqsave(glock, flags); - if (!peer->ibp_reconnecting) { - if (peer->ibp_accepting) - reason = "accepting"; - else if (peer->ibp_connecting) - reason = "connecting"; - else if (!list_empty(&peer->ibp_conns)) - reason = "connected"; - else /* connected then closed */ - reason = "closed"; - - goto no_reconnect; - } - - LASSERT(!peer->ibp_accepting && !peer->ibp_connecting && - list_empty(&peer->ibp_conns)); - peer->ibp_reconnecting--; - - if (!kiblnd_peer_active(peer)) { - list_splice_init(&peer->ibp_tx_queue, &txs); - reason = "unlinked"; - goto no_reconnect; - } - - peer->ibp_connecting++; - peer->ibp_reconnected++; - write_unlock_irqrestore(glock, flags); - - kiblnd_connect_peer(peer); - return true; - -no_reconnect: - write_unlock_irqrestore(glock, flags); - - CWARN("Abort reconnection of %s: %s\n", - libcfs_nid2str(peer->ibp_nid), reason); - kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED); - return false; -} - -void -kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid) -{ - struct kib_peer *peer; - struct kib_peer *peer2; - struct kib_conn *conn; - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - int rc; - int i; - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - - /* - * If I get here, I've committed to send, so I complete the tx with - * failure on any problems - */ - LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */ - LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */ - - /* - * First time, just use a read lock since I expect to find my peer - * connected - */ - read_lock_irqsave(g_lock, flags); - - peer = kiblnd_find_peer_locked(nid); - if (peer && !list_empty(&peer->ibp_conns)) { - /* Found a peer with an established connection */ - conn = kiblnd_get_conn_locked(peer); - kiblnd_conn_addref(conn); /* 1 ref for me... */ - - read_unlock_irqrestore(g_lock, flags); - - if (tx) - kiblnd_queue_tx(tx, conn); - kiblnd_conn_decref(conn); /* ...to here */ - return; - } - - read_unlock(g_lock); - /* Re-try with a write lock */ - write_lock(g_lock); - - peer = kiblnd_find_peer_locked(nid); - if (peer) { - if (list_empty(&peer->ibp_conns)) { - /* found a peer, but it's still connecting... */ - LASSERT(kiblnd_peer_connecting(peer)); - if (tx) - list_add_tail(&tx->tx_list, - &peer->ibp_tx_queue); - write_unlock_irqrestore(g_lock, flags); - } else { - conn = kiblnd_get_conn_locked(peer); - kiblnd_conn_addref(conn); /* 1 ref for me... */ - - write_unlock_irqrestore(g_lock, flags); - - if (tx) - kiblnd_queue_tx(tx, conn); - kiblnd_conn_decref(conn); /* ...to here */ - } - return; - } - - write_unlock_irqrestore(g_lock, flags); - - /* Allocate a peer ready to add to the peer table and retry */ - rc = kiblnd_create_peer(ni, &peer, nid); - if (rc) { - CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); - if (tx) { - tx->tx_status = -EHOSTUNREACH; - tx->tx_waiting = 0; - kiblnd_tx_done(ni, tx); - } - return; - } - - write_lock_irqsave(g_lock, flags); - - peer2 = kiblnd_find_peer_locked(nid); - if (peer2) { - if (list_empty(&peer2->ibp_conns)) { - /* found a peer, but it's still connecting... */ - LASSERT(kiblnd_peer_connecting(peer2)); - if (tx) - list_add_tail(&tx->tx_list, - &peer2->ibp_tx_queue); - write_unlock_irqrestore(g_lock, flags); - } else { - conn = kiblnd_get_conn_locked(peer2); - kiblnd_conn_addref(conn); /* 1 ref for me... */ - - write_unlock_irqrestore(g_lock, flags); - - if (tx) - kiblnd_queue_tx(tx, conn); - kiblnd_conn_decref(conn); /* ...to here */ - } - - kiblnd_peer_decref(peer); - return; - } - - /* Brand new peer */ - LASSERT(!peer->ibp_connecting); - tunables = &peer->ibp_ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; - peer->ibp_connecting = tunables->lnd_conns_per_peer; - - /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(!((struct kib_net *)ni->ni_data)->ibn_shutdown); - - if (tx) - list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); - - kiblnd_peer_addref(peer); - list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); - - write_unlock_irqrestore(g_lock, flags); - - for (i = 0; i < tunables->lnd_conns_per_peer; i++) - kiblnd_connect_peer(peer); - kiblnd_peer_decref(peer); -} - -int -kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) -{ - struct lnet_hdr *hdr = &lntmsg->msg_hdr; - int type = lntmsg->msg_type; - struct lnet_process_id target = lntmsg->msg_target; - int target_is_router = lntmsg->msg_target_is_router; - int routing = lntmsg->msg_routing; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - struct bio_vec *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - struct iov_iter from; - struct kib_msg *ibmsg; - struct kib_rdma_desc *rd; - struct kib_tx *tx; - int nob; - int rc; - - /* NB 'private' is different depending on what we're sending.... */ - - CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", - payload_nob, payload_niov, libcfs_id2str(target)); - - LASSERT(!payload_nob || payload_niov > 0); - LASSERT(payload_niov <= LNET_MAX_IOV); - - /* Thread context */ - LASSERT(!in_interrupt()); - /* payload is either all vaddrs or all pages */ - LASSERT(!(payload_kiov && payload_iov)); - - if (payload_kiov) - iov_iter_bvec(&from, ITER_BVEC | WRITE, - payload_kiov, payload_niov, - payload_nob + payload_offset); - else - iov_iter_kvec(&from, ITER_KVEC | WRITE, - payload_iov, payload_niov, - payload_nob + payload_offset); - - iov_iter_advance(&from, payload_offset); - - switch (type) { - default: - LBUG(); - return -EIO; - - case LNET_MSG_ACK: - LASSERT(!payload_nob); - break; - - case LNET_MSG_GET: - if (routing || target_is_router) - break; /* send IMMEDIATE */ - - /* is the REPLY message too small for RDMA? */ - nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]); - if (nob <= IBLND_MSG_SIZE) - break; /* send IMMEDIATE */ - - tx = kiblnd_get_idle_tx(ni, target.nid); - if (!tx) { - CERROR("Can't allocate txd for GET to %s\n", - libcfs_nid2str(target.nid)); - return -ENOMEM; - } - - ibmsg = tx->tx_msg; - rd = &ibmsg->ibm_u.get.ibgm_rd; - if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV)) - rc = kiblnd_setup_rd_iov(ni, tx, rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.iov, - 0, lntmsg->msg_md->md_length); - else - rc = kiblnd_setup_rd_kiov(ni, tx, rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.kiov, - 0, lntmsg->msg_md->md_length); - if (rc) { - CERROR("Can't setup GET sink for %s: %d\n", - libcfs_nid2str(target.nid), rc); - kiblnd_tx_done(ni, tx); - return -EIO; - } - - nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]); - ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; - ibmsg->ibm_u.get.ibgm_hdr = *hdr; - - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); - - tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); - if (!tx->tx_lntmsg[1]) { - CERROR("Can't create reply for GET -> %s\n", - libcfs_nid2str(target.nid)); - kiblnd_tx_done(ni, tx); - return -EIO; - } - - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */ - tx->tx_waiting = 1; /* waiting for GET_DONE */ - kiblnd_launch_tx(ni, tx, target.nid); - return 0; - - case LNET_MSG_REPLY: - case LNET_MSG_PUT: - /* Is the payload small enough not to need RDMA? */ - nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]); - if (nob <= IBLND_MSG_SIZE) - break; /* send IMMEDIATE */ - - tx = kiblnd_get_idle_tx(ni, target.nid); - if (!tx) { - CERROR("Can't allocate %s txd for %s\n", - type == LNET_MSG_PUT ? "PUT" : "REPLY", - libcfs_nid2str(target.nid)); - return -ENOMEM; - } - - if (!payload_kiov) - rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, - payload_niov, payload_iov, - payload_offset, payload_nob); - else - rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, - payload_niov, payload_kiov, - payload_offset, payload_nob); - if (rc) { - CERROR("Can't setup PUT src for %s: %d\n", - libcfs_nid2str(target.nid), rc); - kiblnd_tx_done(ni, tx); - return -EIO; - } - - ibmsg = tx->tx_msg; - ibmsg->ibm_u.putreq.ibprm_hdr = *hdr; - ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg)); - - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ - tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ - kiblnd_launch_tx(ni, tx, target.nid); - return 0; - } - - /* send IMMEDIATE */ - - LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]) - <= IBLND_MSG_SIZE); - - tx = kiblnd_get_idle_tx(ni, target.nid); - if (!tx) { - CERROR("Can't send %d to %s: tx descs exhausted\n", - type, libcfs_nid2str(target.nid)); - return -ENOMEM; - } - - ibmsg = tx->tx_msg; - ibmsg->ibm_u.immediate.ibim_hdr = *hdr; - - rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob, - &from); - if (rc != payload_nob) { - kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); - return -EFAULT; - } - - nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); - - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ - kiblnd_launch_tx(ni, tx, target.nid); - return 0; -} - -static void -kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg) -{ - struct lnet_process_id target = lntmsg->msg_target; - unsigned int niov = lntmsg->msg_niov; - struct kvec *iov = lntmsg->msg_iov; - struct bio_vec *kiov = lntmsg->msg_kiov; - unsigned int offset = lntmsg->msg_offset; - unsigned int nob = lntmsg->msg_len; - struct kib_tx *tx; - int rc; - - tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); - if (!tx) { - CERROR("Can't get tx for REPLY to %s\n", - libcfs_nid2str(target.nid)); - goto failed_0; - } - - if (!nob) - rc = 0; - else if (!kiov) - rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, - niov, iov, offset, nob); - else - rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, - niov, kiov, offset, nob); - - if (rc) { - CERROR("Can't setup GET src for %s: %d\n", - libcfs_nid2str(target.nid), rc); - goto failed_1; - } - - rc = kiblnd_init_rdma(rx->rx_conn, tx, - IBLND_MSG_GET_DONE, nob, - &rx->rx_msg->ibm_u.get.ibgm_rd, - rx->rx_msg->ibm_u.get.ibgm_cookie); - if (rc < 0) { - CERROR("Can't setup rdma for GET from %s: %d\n", - libcfs_nid2str(target.nid), rc); - goto failed_1; - } - - if (!nob) { - /* No RDMA: local completion may happen now! */ - lnet_finalize(ni, lntmsg, 0); - } else { - /* RDMA: lnet_finalize(lntmsg) when it completes */ - tx->tx_lntmsg[0] = lntmsg; - } - - kiblnd_queue_tx(tx, rx->rx_conn); - return; - - failed_1: - kiblnd_tx_done(ni, tx); - failed_0: - lnet_finalize(ni, lntmsg, -EIO); -} - -int -kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, - int delayed, struct iov_iter *to, unsigned int rlen) -{ - struct kib_rx *rx = private; - struct kib_msg *rxmsg = rx->rx_msg; - struct kib_conn *conn = rx->rx_conn; - struct kib_tx *tx; - int nob; - int post_credit = IBLND_POSTRX_PEER_CREDIT; - int rc = 0; - - LASSERT(iov_iter_count(to) <= rlen); - LASSERT(!in_interrupt()); - /* Either all pages or all vaddrs */ - - switch (rxmsg->ibm_type) { - default: - LBUG(); - - case IBLND_MSG_IMMEDIATE: - nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]); - if (nob > rx->rx_nob) { - CERROR("Immediate message from %s too big: %d(%d)\n", - libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), - nob, rx->rx_nob); - rc = -EPROTO; - break; - } - - rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen, - to); - if (rc != rlen) { - rc = -EFAULT; - break; - } - - rc = 0; - lnet_finalize(ni, lntmsg, 0); - break; - - case IBLND_MSG_PUT_REQ: { - struct kib_msg *txmsg; - struct kib_rdma_desc *rd; - - if (!iov_iter_count(to)) { - lnet_finalize(ni, lntmsg, 0); - kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, - rxmsg->ibm_u.putreq.ibprm_cookie); - break; - } - - tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (!tx) { - CERROR("Can't allocate tx for %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - /* Not replying will break the connection */ - rc = -ENOMEM; - break; - } - - txmsg = tx->tx_msg; - rd = &txmsg->ibm_u.putack.ibpam_rd; - if (!(to->type & ITER_BVEC)) - rc = kiblnd_setup_rd_iov(ni, tx, rd, - to->nr_segs, to->kvec, - to->iov_offset, - iov_iter_count(to)); - else - rc = kiblnd_setup_rd_kiov(ni, tx, rd, - to->nr_segs, to->bvec, - to->iov_offset, - iov_iter_count(to)); - if (rc) { - CERROR("Can't setup PUT sink for %s: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); - kiblnd_tx_done(ni, tx); - /* tell peer it's over */ - kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, - rxmsg->ibm_u.putreq.ibprm_cookie); - break; - } - - nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]); - txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; - txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; - - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob); - - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ - tx->tx_waiting = 1; /* waiting for PUT_DONE */ - kiblnd_queue_tx(tx, conn); - - /* reposted buffer reserved for PUT_DONE */ - post_credit = IBLND_POSTRX_NO_CREDIT; - break; - } - - case IBLND_MSG_GET_REQ: - if (lntmsg) { - /* Optimized GET; RDMA lntmsg's payload */ - kiblnd_reply(ni, rx, lntmsg); - } else { - /* GET didn't match anything */ - kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE, - -ENODATA, - rxmsg->ibm_u.get.ibgm_cookie); - } - break; - } - - kiblnd_post_rx(rx, post_credit); - return rc; -} - -int -kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) -{ - struct task_struct *task = kthread_run(fn, arg, "%s", name); - - if (IS_ERR(task)) - return PTR_ERR(task); - - atomic_inc(&kiblnd_data.kib_nthreads); - return 0; -} - -static void -kiblnd_thread_fini(void) -{ - atomic_dec(&kiblnd_data.kib_nthreads); -} - -static void -kiblnd_peer_alive(struct kib_peer *peer) -{ - /* This is racy, but everyone's only writing jiffies */ - peer->ibp_last_alive = jiffies; - mb(); -} - -static void -kiblnd_peer_notify(struct kib_peer *peer) -{ - int error = 0; - unsigned long last_alive = 0; - unsigned long flags; - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - if (kiblnd_peer_idle(peer) && peer->ibp_error) { - error = peer->ibp_error; - peer->ibp_error = 0; - - last_alive = peer->ibp_last_alive; - } - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (error) - lnet_notify(peer->ibp_ni, - peer->ibp_nid, 0, last_alive); -} - -void -kiblnd_close_conn_locked(struct kib_conn *conn, int error) -{ - /* - * This just does the immediate housekeeping. 'error' is zero for a - * normal shutdown which can happen only after the connection has been - * established. If the connection is established, schedule the - * connection to be finished off by the connd. Otherwise the connd is - * already dealing with it (either to set it up or tear it down). - * Caller holds kib_global_lock exclusively in irq context - */ - struct kib_peer *peer = conn->ibc_peer; - struct kib_dev *dev; - unsigned long flags; - - LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED); - - if (error && !conn->ibc_comms_error) - conn->ibc_comms_error = error; - - if (conn->ibc_state != IBLND_CONN_ESTABLISHED) - return; /* already being handled */ - - if (!error && - list_empty(&conn->ibc_tx_noops) && - list_empty(&conn->ibc_tx_queue) && - list_empty(&conn->ibc_tx_queue_rsrvd) && - list_empty(&conn->ibc_tx_queue_nocred) && - list_empty(&conn->ibc_active_txs)) { - CDEBUG(D_NET, "closing conn to %s\n", - libcfs_nid2str(peer->ibp_nid)); - } else { - CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", - libcfs_nid2str(peer->ibp_nid), error, - list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", - list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", - list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); - } - - dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev; - if (peer->ibp_next_conn == conn) - /* clear next_conn so it won't be used */ - peer->ibp_next_conn = NULL; - list_del(&conn->ibc_list); - /* connd (see below) takes over ibc_list's ref */ - - if (list_empty(&peer->ibp_conns) && /* no more conns */ - kiblnd_peer_active(peer)) { /* still in peer table */ - kiblnd_unlink_peer_locked(peer); - - /* set/clear error on last conn */ - peer->ibp_error = conn->ibc_comms_error; - } - - kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); - - if (error && - kiblnd_dev_can_failover(dev)) { - list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - wake_up(&kiblnd_data.kib_failover_waitq); - } - - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); - - list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); - wake_up(&kiblnd_data.kib_connd_waitq); - - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); -} - -void -kiblnd_close_conn(struct kib_conn *conn, int error) -{ - unsigned long flags; - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - kiblnd_close_conn_locked(conn, error); - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); -} - -static void -kiblnd_handle_early_rxs(struct kib_conn *conn) -{ - unsigned long flags; - struct kib_rx *rx; - - LASSERT(!in_interrupt()); - LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!list_empty(&conn->ibc_early_rxs)) { - rx = list_entry(conn->ibc_early_rxs.next, - struct kib_rx, rx_list); - list_del(&rx->rx_list); - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - kiblnd_handle_rx(rx); - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - } - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); -} - -static void -kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs) -{ - LIST_HEAD(zombies); - struct list_head *tmp; - struct list_head *nxt; - struct kib_tx *tx; - - spin_lock(&conn->ibc_lock); - - list_for_each_safe(tmp, nxt, txs) { - tx = list_entry(tmp, struct kib_tx, tx_list); - - if (txs == &conn->ibc_active_txs) { - LASSERT(!tx->tx_queued); - LASSERT(tx->tx_waiting || tx->tx_sending); - } else { - LASSERT(tx->tx_queued); - } - - tx->tx_status = -ECONNABORTED; - tx->tx_waiting = 0; - - if (!tx->tx_sending) { - tx->tx_queued = 0; - list_del(&tx->tx_list); - list_add(&tx->tx_list, &zombies); - } - } - - spin_unlock(&conn->ibc_lock); - - kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED); -} - -static void -kiblnd_finalise_conn(struct kib_conn *conn) -{ - LASSERT(!in_interrupt()); - LASSERT(conn->ibc_state > IBLND_CONN_INIT); - - kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); - - /* - * abort_receives moves QP state to IB_QPS_ERR. This is only required - * for connections that didn't get as far as being connected, because - * rdma_disconnect() does this for free. - */ - kiblnd_abort_receives(conn); - - /* - * Complete all tx descs not waiting for sends to complete. - * NB we should be safe from RDMA now that the QP has changed state - */ - kiblnd_abort_txs(conn, &conn->ibc_tx_noops); - kiblnd_abort_txs(conn, &conn->ibc_tx_queue); - kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); - kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred); - kiblnd_abort_txs(conn, &conn->ibc_active_txs); - - kiblnd_handle_early_rxs(conn); -} - -static void -kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error) -{ - LIST_HEAD(zombies); - unsigned long flags; - - LASSERT(error); - LASSERT(!in_interrupt()); - - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - if (active) { - LASSERT(peer->ibp_connecting > 0); - peer->ibp_connecting--; - } else { - LASSERT(peer->ibp_accepting > 0); - peer->ibp_accepting--; - } - - if (kiblnd_peer_connecting(peer)) { - /* another connection attempt under way... */ - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - return; - } - - peer->ibp_reconnected = 0; - if (list_empty(&peer->ibp_conns)) { - /* Take peer's blocked transmits to complete with error */ - list_add(&zombies, &peer->ibp_tx_queue); - list_del_init(&peer->ibp_tx_queue); - - if (kiblnd_peer_active(peer)) - kiblnd_unlink_peer_locked(peer); - - peer->ibp_error = error; - } else { - /* Can't have blocked transmits if there are connections */ - LASSERT(list_empty(&peer->ibp_tx_queue)); - } - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - kiblnd_peer_notify(peer); - - if (list_empty(&zombies)) - return; - - CNETERR("Deleting messages for %s: connection failed\n", - libcfs_nid2str(peer->ibp_nid)); - - kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); -} - -static void -kiblnd_connreq_done(struct kib_conn *conn, int status) -{ - struct kib_peer *peer = conn->ibc_peer; - struct kib_tx *tx; - struct kib_tx *tmp; - struct list_head txs; - unsigned long flags; - int active; - - active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - - CDEBUG(D_NET, "%s: active(%d), version(%x), status(%d)\n", - libcfs_nid2str(peer->ibp_nid), active, - conn->ibc_version, status); - - LASSERT(!in_interrupt()); - LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && - peer->ibp_connecting > 0) || - (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && - peer->ibp_accepting > 0)); - - kfree(conn->ibc_connvars); - conn->ibc_connvars = NULL; - - if (status) { - /* failed to establish connection */ - kiblnd_peer_connect_failed(peer, active, status); - kiblnd_finalise_conn(conn); - return; - } - - /* connection established */ - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - conn->ibc_last_send = jiffies; - kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); - kiblnd_peer_alive(peer); - - /* - * Add conn to peer's list and nuke any dangling conns from a different - * peer instance... - */ - kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ - list_add(&conn->ibc_list, &peer->ibp_conns); - peer->ibp_reconnected = 0; - if (active) - peer->ibp_connecting--; - else - peer->ibp_accepting--; - - if (!peer->ibp_version) { - peer->ibp_version = conn->ibc_version; - peer->ibp_incarnation = conn->ibc_incarnation; - } - - if (peer->ibp_version != conn->ibc_version || - peer->ibp_incarnation != conn->ibc_incarnation) { - kiblnd_close_stale_conns_locked(peer, conn->ibc_version, - conn->ibc_incarnation); - peer->ibp_version = conn->ibc_version; - peer->ibp_incarnation = conn->ibc_incarnation; - } - - /* grab pending txs while I have the lock */ - list_add(&txs, &peer->ibp_tx_queue); - list_del_init(&peer->ibp_tx_queue); - - if (!kiblnd_peer_active(peer) || /* peer has been deleted */ - conn->ibc_comms_error) { /* error has happened already */ - struct lnet_ni *ni = peer->ibp_ni; - - /* start to shut down connection */ - kiblnd_close_conn_locked(conn, -ECONNABORTED); - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - kiblnd_txlist_done(ni, &txs, -ECONNABORTED); - - return; - } - - /* - * +1 ref for myself, this connection is visible to other threads - * now, refcount of peer:ibp_conns can be released by connection - * close from either a different thread, or the calling of - * kiblnd_check_sends_locked() below. See bz21911 for details. - */ - kiblnd_conn_addref(conn); - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - /* Schedule blocked txs - * Note: if we are running with conns_per_peer > 1, these blocked - * txs will all get scheduled to the first connection which gets - * scheduled. We won't be using round robin on this first batch. - */ - spin_lock(&conn->ibc_lock); - list_for_each_entry_safe(tx, tmp, &txs, tx_list) { - list_del(&tx->tx_list); - - kiblnd_queue_tx_locked(tx, conn); - } - kiblnd_check_sends_locked(conn); - spin_unlock(&conn->ibc_lock); - - /* schedule blocked rxs */ - kiblnd_handle_early_rxs(conn); - - kiblnd_conn_decref(conn); -} - -static void -kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej) -{ - int rc; - - rc = rdma_reject(cmid, rej, sizeof(*rej)); - - if (rc) - CWARN("Error %d sending reject\n", rc); -} - -static int -kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) -{ - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - struct kib_msg *reqmsg = priv; - struct kib_msg *ackmsg; - struct kib_dev *ibdev; - struct kib_peer *peer; - struct kib_peer *peer2; - struct kib_conn *conn; - struct lnet_ni *ni = NULL; - struct kib_net *net = NULL; - lnet_nid_t nid; - struct rdma_conn_param cp; - struct kib_rej rej; - int version = IBLND_MSG_VERSION; - unsigned long flags; - int max_frags; - int rc; - struct sockaddr_in *peer_addr; - - LASSERT(!in_interrupt()); - - /* cmid inherits 'context' from the corresponding listener id */ - ibdev = (struct kib_dev *)cmid->context; - LASSERT(ibdev); - - memset(&rej, 0, sizeof(rej)); - rej.ibr_magic = IBLND_MSG_MAGIC; - rej.ibr_why = IBLND_REJECT_FATAL; - rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; - - peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr; - if (*kiblnd_tunables.kib_require_priv_port && - ntohs(peer_addr->sin_port) >= PROT_SOCK) { - __u32 ip = ntohl(peer_addr->sin_addr.s_addr); - - CERROR("Peer's port (%pI4h:%hu) is not privileged\n", - &ip, ntohs(peer_addr->sin_port)); - goto failed; - } - - if (priv_nob < offsetof(struct kib_msg, ibm_type)) { - CERROR("Short connection request\n"); - goto failed; - } - - /* - * Future protocol version compatibility support! If the - * o2iblnd-specific protocol changes, or when LNET unifies - * protocols over all LNDs, the initial connection will - * negotiate a protocol version. I trap this here to avoid - * console errors; the reject tells the peer which protocol I - * speak. - */ - if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || - reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) - goto failed; - if (reqmsg->ibm_magic == IBLND_MSG_MAGIC && - reqmsg->ibm_version != IBLND_MSG_VERSION && - reqmsg->ibm_version != IBLND_MSG_VERSION_1) - goto failed; - if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) && - reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) && - reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1)) - goto failed; - - rc = kiblnd_unpack_msg(reqmsg, priv_nob); - if (rc) { - CERROR("Can't parse connection request: %d\n", rc); - goto failed; - } - - nid = reqmsg->ibm_srcnid; - ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); - - if (ni) { - net = (struct kib_net *)ni->ni_data; - rej.ibr_incarnation = net->ibn_incarnation; - } - - if (!ni || /* no matching net */ - ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ - net->ibn_dev != ibdev) { /* wrong device */ - CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", - libcfs_nid2str(nid), - !ni ? "NA" : libcfs_nid2str(ni->ni_nid), - ibdev->ibd_ifname, ibdev->ibd_nnets, - &ibdev->ibd_ifip, - libcfs_nid2str(reqmsg->ibm_dstnid)); - - goto failed; - } - - /* check time stamp as soon as possible */ - if (reqmsg->ibm_dststamp && - reqmsg->ibm_dststamp != net->ibn_incarnation) { - CWARN("Stale connection request\n"); - rej.ibr_why = IBLND_REJECT_CONN_STALE; - goto failed; - } - - /* I can accept peer's version */ - version = reqmsg->ibm_version; - - if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { - CERROR("Unexpected connreq msg type: %x from %s\n", - reqmsg->ibm_type, libcfs_nid2str(nid)); - goto failed; - } - - if (reqmsg->ibm_u.connparams.ibcp_queue_depth > - kiblnd_msg_queue_size(version, ni)) { - CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n", - libcfs_nid2str(nid), - reqmsg->ibm_u.connparams.ibcp_queue_depth, - kiblnd_msg_queue_size(version, ni)); - - if (version == IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; - - goto failed; - } - - max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT; - if (max_frags > kiblnd_rdma_frags(version, ni)) { - CWARN("Can't accept conn from %s (version %x): max message size %d is too large (%d wanted)\n", - libcfs_nid2str(nid), version, max_frags, - kiblnd_rdma_frags(version, ni)); - - if (version >= IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; - - goto failed; - } else if (max_frags < kiblnd_rdma_frags(version, ni) && - !net->ibn_fmr_ps) { - CWARN("Can't accept conn from %s (version %x): max message size %d incompatible without FMR pool (%d wanted)\n", - libcfs_nid2str(nid), version, max_frags, - kiblnd_rdma_frags(version, ni)); - - if (version == IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; - - goto failed; - } - - if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { - CERROR("Can't accept %s: message size %d too big (%d max)\n", - libcfs_nid2str(nid), - reqmsg->ibm_u.connparams.ibcp_max_msg_size, - IBLND_MSG_SIZE); - goto failed; - } - - /* assume 'nid' is a new peer; create */ - rc = kiblnd_create_peer(ni, &peer, nid); - if (rc) { - CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); - rej.ibr_why = IBLND_REJECT_NO_RESOURCES; - goto failed; - } - - /* We have validated the peer's parameters so use those */ - peer->ibp_max_frags = max_frags; - peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; - - write_lock_irqsave(g_lock, flags); - - peer2 = kiblnd_find_peer_locked(nid); - if (peer2) { - if (!peer2->ibp_version) { - peer2->ibp_version = version; - peer2->ibp_incarnation = reqmsg->ibm_srcstamp; - } - - /* not the guy I've talked with */ - if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || - peer2->ibp_version != version) { - kiblnd_close_peer_conns_locked(peer2, -ESTALE); - - if (kiblnd_peer_active(peer2)) { - peer2->ibp_incarnation = reqmsg->ibm_srcstamp; - peer2->ibp_version = version; - } - write_unlock_irqrestore(g_lock, flags); - - CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n", - libcfs_nid2str(nid), peer2->ibp_version, version, - peer2->ibp_incarnation, reqmsg->ibm_srcstamp); - - kiblnd_peer_decref(peer); - rej.ibr_why = IBLND_REJECT_CONN_STALE; - goto failed; - } - - /* - * Tie-break connection race in favour of the higher NID. - * If we keep running into a race condition multiple times, - * we have to assume that the connection attempt with the - * higher NID is stuck in a connecting state and will never - * recover. As such, we pass through this if-block and let - * the lower NID connection win so we can move forward. - */ - if (peer2->ibp_connecting && - nid < ni->ni_nid && peer2->ibp_races < - MAX_CONN_RACES_BEFORE_ABORT) { - peer2->ibp_races++; - write_unlock_irqrestore(g_lock, flags); - - CDEBUG(D_NET, "Conn race %s\n", - libcfs_nid2str(peer2->ibp_nid)); - - kiblnd_peer_decref(peer); - rej.ibr_why = IBLND_REJECT_CONN_RACE; - goto failed; - } - if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT) - CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n", - libcfs_nid2str(peer2->ibp_nid), - MAX_CONN_RACES_BEFORE_ABORT); - /** - * passive connection is allowed even this peer is waiting for - * reconnection. - */ - peer2->ibp_reconnecting = 0; - peer2->ibp_races = 0; - peer2->ibp_accepting++; - kiblnd_peer_addref(peer2); - - /** - * Race with kiblnd_launch_tx (active connect) to create peer - * so copy validated parameters since we now know what the - * peer's limits are - */ - peer2->ibp_max_frags = peer->ibp_max_frags; - peer2->ibp_queue_depth = peer->ibp_queue_depth; - - write_unlock_irqrestore(g_lock, flags); - kiblnd_peer_decref(peer); - peer = peer2; - } else { - /* Brand new peer */ - LASSERT(!peer->ibp_accepting); - LASSERT(!peer->ibp_version && - !peer->ibp_incarnation); - - peer->ibp_accepting = 1; - peer->ibp_version = version; - peer->ibp_incarnation = reqmsg->ibm_srcstamp; - - /* I have a ref on ni that prevents it being shutdown */ - LASSERT(!net->ibn_shutdown); - - kiblnd_peer_addref(peer); - list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); - - write_unlock_irqrestore(g_lock, flags); - } - - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, - version); - if (!conn) { - kiblnd_peer_connect_failed(peer, 0, -ENOMEM); - kiblnd_peer_decref(peer); - rej.ibr_why = IBLND_REJECT_NO_RESOURCES; - goto failed; - } - - /* - * conn now "owns" cmid, so I return success from here on to ensure the - * CM callback doesn't destroy cmid. - */ - conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = conn->ibc_queue_depth; - conn->ibc_reserved_credits = conn->ibc_queue_depth; - LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + - IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); - - ackmsg = &conn->ibc_connvars->cv_msg; - memset(ackmsg, 0, sizeof(*ackmsg)); - - kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, - sizeof(ackmsg->ibm_u.connparams)); - ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; - ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags << IBLND_FRAG_SHIFT; - ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - - kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); - - memset(&cp, 0, sizeof(cp)); - cp.private_data = ackmsg; - cp.private_data_len = ackmsg->ibm_nob; - cp.responder_resources = 0; /* No atomic ops or RDMA reads */ - cp.initiator_depth = 0; - cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; - cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; - - CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); - - rc = rdma_accept(cmid, &cp); - if (rc) { - CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc); - rej.ibr_version = version; - rej.ibr_why = IBLND_REJECT_FATAL; - - kiblnd_reject(cmid, &rej); - kiblnd_connreq_done(conn, rc); - kiblnd_conn_decref(conn); - } - - lnet_ni_decref(ni); - return 0; - - failed: - if (ni) { - rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); - rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); - lnet_ni_decref(ni); - } - - rej.ibr_version = version; - kiblnd_reject(cmid, &rej); - - return -ECONNREFUSED; -} - -static void -kiblnd_check_reconnect(struct kib_conn *conn, int version, - __u64 incarnation, int why, struct kib_connparams *cp) -{ - rwlock_t *glock = &kiblnd_data.kib_global_lock; - struct kib_peer *peer = conn->ibc_peer; - char *reason; - int msg_size = IBLND_MSG_SIZE; - int frag_num = -1; - int queue_dep = -1; - bool reconnect; - unsigned long flags; - - LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ - - if (cp) { - msg_size = cp->ibcp_max_msg_size; - frag_num = cp->ibcp_max_frags << IBLND_FRAG_SHIFT; - queue_dep = cp->ibcp_queue_depth; - } - - write_lock_irqsave(glock, flags); - /** - * retry connection if it's still needed and no other connection - * attempts (active or passive) are in progress - * NB: reconnect is still needed even when ibp_tx_queue is - * empty if ibp_version != version because reconnect may be - * initiated by kiblnd_query() - */ - reconnect = (!list_empty(&peer->ibp_tx_queue) || - peer->ibp_version != version) && - peer->ibp_connecting && - !peer->ibp_accepting; - if (!reconnect) { - reason = "no need"; - goto out; - } - - switch (why) { - default: - reason = "Unknown"; - break; - - case IBLND_REJECT_RDMA_FRAGS: { - struct lnet_ioctl_config_lnd_tunables *tunables; - - if (!cp) { - reason = "can't negotiate max frags"; - goto out; - } - tunables = peer->ibp_ni->ni_lnd_tunables; - if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) { - reason = "map_on_demand must be enabled"; - goto out; - } - if (conn->ibc_max_frags <= frag_num) { - reason = "unsupported max frags"; - goto out; - } - - peer->ibp_max_frags = frag_num; - reason = "rdma fragments"; - break; - } - case IBLND_REJECT_MSG_QUEUE_SIZE: - if (!cp) { - reason = "can't negotiate queue depth"; - goto out; - } - if (conn->ibc_queue_depth <= queue_dep) { - reason = "unsupported queue depth"; - goto out; - } - - peer->ibp_queue_depth = queue_dep; - reason = "queue depth"; - break; - - case IBLND_REJECT_CONN_STALE: - reason = "stale"; - break; - - case IBLND_REJECT_CONN_RACE: - reason = "conn race"; - break; - - case IBLND_REJECT_CONN_UNCOMPAT: - reason = "version negotiation"; - break; - } - - conn->ibc_reconnect = 1; - peer->ibp_reconnecting++; - peer->ibp_version = version; - if (incarnation) - peer->ibp_incarnation = incarnation; -out: - write_unlock_irqrestore(glock, flags); - - CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", - libcfs_nid2str(peer->ibp_nid), - reconnect ? "reconnect" : "don't reconnect", - reason, IBLND_MSG_VERSION, version, msg_size, - conn->ibc_queue_depth, queue_dep, - conn->ibc_max_frags, frag_num); - /** - * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer - * while destroying the zombie - */ -} - -static void -kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob) -{ - struct kib_peer *peer = conn->ibc_peer; - - LASSERT(!in_interrupt()); - LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - - switch (reason) { - case IB_CM_REJ_STALE_CONN: - kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, - IBLND_REJECT_CONN_STALE, NULL); - break; - - case IB_CM_REJ_INVALID_SERVICE_ID: - CNETERR("%s rejected: no listener at %d\n", - libcfs_nid2str(peer->ibp_nid), - *kiblnd_tunables.kib_service); - break; - - case IB_CM_REJ_CONSUMER_DEFINED: - if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) { - struct kib_rej *rej = priv; - struct kib_connparams *cp = NULL; - int flip = 0; - __u64 incarnation = -1; - - /* NB. default incarnation is -1 because: - * a) V1 will ignore dst incarnation in connreq. - * b) V2 will provide incarnation while rejecting me, - * -1 will be overwrote. - * - * if I try to connect to a V1 peer with V2 protocol, - * it rejected me then upgrade to V2, I have no idea - * about the upgrading and try to reconnect with V1, - * in this case upgraded V2 can find out I'm trying to - * talk to the old guy and reject me(incarnation is -1). - */ - - if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) || - rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) { - __swab32s(&rej->ibr_magic); - __swab16s(&rej->ibr_version); - flip = 1; - } - - if (priv_nob >= sizeof(struct kib_rej) && - rej->ibr_version > IBLND_MSG_VERSION_1) { - /* - * priv_nob is always 148 in current version - * of OFED, so we still need to check version. - * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) - */ - cp = &rej->ibr_cp; - - if (flip) { - __swab64s(&rej->ibr_incarnation); - __swab16s(&cp->ibcp_queue_depth); - __swab16s(&cp->ibcp_max_frags); - __swab32s(&cp->ibcp_max_msg_size); - } - - incarnation = rej->ibr_incarnation; - } - - if (rej->ibr_magic != IBLND_MSG_MAGIC && - rej->ibr_magic != LNET_PROTO_MAGIC) { - CERROR("%s rejected: consumer defined fatal error\n", - libcfs_nid2str(peer->ibp_nid)); - break; - } - - if (rej->ibr_version != IBLND_MSG_VERSION && - rej->ibr_version != IBLND_MSG_VERSION_1) { - CERROR("%s rejected: o2iblnd version %x error\n", - libcfs_nid2str(peer->ibp_nid), - rej->ibr_version); - break; - } - - if (rej->ibr_why == IBLND_REJECT_FATAL && - rej->ibr_version == IBLND_MSG_VERSION_1) { - CDEBUG(D_NET, "rejected by old version peer %s: %x\n", - libcfs_nid2str(peer->ibp_nid), rej->ibr_version); - - if (conn->ibc_version != IBLND_MSG_VERSION_1) - rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; - } - - switch (rej->ibr_why) { - case IBLND_REJECT_CONN_RACE: - case IBLND_REJECT_CONN_STALE: - case IBLND_REJECT_CONN_UNCOMPAT: - case IBLND_REJECT_MSG_QUEUE_SIZE: - case IBLND_REJECT_RDMA_FRAGS: - kiblnd_check_reconnect(conn, rej->ibr_version, - incarnation, - rej->ibr_why, cp); - break; - - case IBLND_REJECT_NO_RESOURCES: - CERROR("%s rejected: o2iblnd no resources\n", - libcfs_nid2str(peer->ibp_nid)); - break; - - case IBLND_REJECT_FATAL: - CERROR("%s rejected: o2iblnd fatal error\n", - libcfs_nid2str(peer->ibp_nid)); - break; - - default: - CERROR("%s rejected: o2iblnd reason %d\n", - libcfs_nid2str(peer->ibp_nid), - rej->ibr_why); - break; - } - break; - } - /* fall through */ - default: - CNETERR("%s rejected: reason %d, size %d\n", - libcfs_nid2str(peer->ibp_nid), reason, priv_nob); - break; - } - - kiblnd_connreq_done(conn, -ECONNREFUSED); -} - -static void -kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) -{ - struct kib_peer *peer = conn->ibc_peer; - struct lnet_ni *ni = peer->ibp_ni; - struct kib_net *net = ni->ni_data; - struct kib_msg *msg = priv; - int ver = conn->ibc_version; - int rc = kiblnd_unpack_msg(msg, priv_nob); - unsigned long flags; - - LASSERT(net); - - if (rc) { - CERROR("Can't unpack connack from %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); - goto failed; - } - - if (msg->ibm_type != IBLND_MSG_CONNACK) { - CERROR("Unexpected message %d from %s\n", - msg->ibm_type, libcfs_nid2str(peer->ibp_nid)); - rc = -EPROTO; - goto failed; - } - - if (ver != msg->ibm_version) { - CERROR("%s replied version %x is different with requested version %x\n", - libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver); - rc = -EPROTO; - goto failed; - } - - if (msg->ibm_u.connparams.ibcp_queue_depth > - conn->ibc_queue_depth) { - CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_queue_depth, - conn->ibc_queue_depth); - rc = -EPROTO; - goto failed; - } - - if ((msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT) > - conn->ibc_max_frags) { - CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT, - conn->ibc_max_frags); - rc = -EPROTO; - goto failed; - } - - if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { - CERROR("%s max message size %d too big (%d max)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_max_msg_size, - IBLND_MSG_SIZE); - rc = -EPROTO; - goto failed; - } - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (msg->ibm_dstnid == ni->ni_nid && - msg->ibm_dststamp == net->ibn_incarnation) - rc = 0; - else - rc = -ESTALE; - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (rc) { - CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n", - libcfs_nid2str(peer->ibp_nid), rc, - msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); - goto failed; - } - - conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth; - conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth; - conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth; - conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT; - LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + - IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn)); - - kiblnd_connreq_done(conn, 0); - return; - - failed: - /* - * NB My QP has already established itself, so I handle anything going - * wrong here by setting ibc_comms_error. - * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then - * immediately tears it down. - */ - LASSERT(rc); - conn->ibc_comms_error = rc; - kiblnd_connreq_done(conn, 0); -} - -static int -kiblnd_active_connect(struct rdma_cm_id *cmid) -{ - struct kib_peer *peer = (struct kib_peer *)cmid->context; - struct kib_conn *conn; - struct kib_msg *msg; - struct rdma_conn_param cp; - int version; - __u64 incarnation; - unsigned long flags; - int rc; - - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - incarnation = peer->ibp_incarnation; - version = !peer->ibp_version ? IBLND_MSG_VERSION : - peer->ibp_version; - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, - version); - if (!conn) { - kiblnd_peer_connect_failed(peer, 1, -ENOMEM); - kiblnd_peer_decref(peer); /* lose cmid's ref */ - return -ENOMEM; - } - - /* - * conn "owns" cmid now, so I return success from here on to ensure the - * CM callback doesn't destroy cmid. conn also takes over cmid's ref - * on peer - */ - msg = &conn->ibc_connvars->cv_msg; - - memset(msg, 0, sizeof(*msg)); - kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); - msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; - msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags << IBLND_FRAG_SHIFT; - msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - - kiblnd_pack_msg(peer->ibp_ni, msg, version, - 0, peer->ibp_nid, incarnation); - - memset(&cp, 0, sizeof(cp)); - cp.private_data = msg; - cp.private_data_len = msg->ibm_nob; - cp.responder_resources = 0; /* No atomic ops or RDMA reads */ - cp.initiator_depth = 0; - cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; - cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; - - LASSERT(cmid->context == (void *)conn); - LASSERT(conn->ibc_cmid == cmid); - - rc = rdma_connect(cmid, &cp); - if (rc) { - CERROR("Can't connect to %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); - kiblnd_connreq_done(conn, rc); - kiblnd_conn_decref(conn); - } - - return 0; -} - -int -kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) -{ - struct kib_peer *peer; - struct kib_conn *conn; - int rc; - - switch (event->event) { - default: - CERROR("Unexpected event: %d, status: %d\n", - event->event, event->status); - LBUG(); - - case RDMA_CM_EVENT_CONNECT_REQUEST: - /* destroy cmid on failure */ - rc = kiblnd_passive_connect(cmid, - (void *)KIBLND_CONN_PARAM(event), - KIBLND_CONN_PARAM_LEN(event)); - CDEBUG(D_NET, "connreq: %d\n", rc); - return rc; - - case RDMA_CM_EVENT_ADDR_ERROR: - peer = (struct kib_peer *)cmid->context; - CNETERR("%s: ADDR ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); - kiblnd_peer_decref(peer); - return -EHOSTUNREACH; /* rc destroys cmid */ - - case RDMA_CM_EVENT_ADDR_RESOLVED: - peer = (struct kib_peer *)cmid->context; - - CDEBUG(D_NET, "%s Addr resolved: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - - if (event->status) { - CNETERR("Can't resolve address for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - rc = event->status; - } else { - rc = rdma_resolve_route( - cmid, *kiblnd_tunables.kib_timeout * 1000); - if (!rc) { - struct kib_net *net = peer->ibp_ni->ni_data; - struct kib_dev *dev = net->ibn_dev; - - CDEBUG(D_NET, "%s: connection bound to "\ - "%s:%pI4h:%s\n", - libcfs_nid2str(peer->ibp_nid), - dev->ibd_ifname, - &dev->ibd_ifip, cmid->device->name); - - return 0; - } - - /* Can't initiate route resolution */ - CERROR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); - } - kiblnd_peer_connect_failed(peer, 1, rc); - kiblnd_peer_decref(peer); - return rc; /* rc destroys cmid */ - - case RDMA_CM_EVENT_ROUTE_ERROR: - peer = (struct kib_peer *)cmid->context; - CNETERR("%s: ROUTE ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); - kiblnd_peer_decref(peer); - return -EHOSTUNREACH; /* rc destroys cmid */ - - case RDMA_CM_EVENT_ROUTE_RESOLVED: - peer = (struct kib_peer *)cmid->context; - CDEBUG(D_NET, "%s Route resolved: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - - if (!event->status) - return kiblnd_active_connect(cmid); - - CNETERR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, event->status); - kiblnd_peer_decref(peer); - return event->status; /* rc destroys cmid */ - - case RDMA_CM_EVENT_UNREACHABLE: - conn = (struct kib_conn *)cmid->context; - LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || - conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); - CNETERR("%s: UNREACHABLE %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); - kiblnd_connreq_done(conn, -ENETDOWN); - kiblnd_conn_decref(conn); - return 0; - - case RDMA_CM_EVENT_CONNECT_ERROR: - conn = (struct kib_conn *)cmid->context; - LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || - conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); - CNETERR("%s: CONNECT ERROR %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); - kiblnd_connreq_done(conn, -ENOTCONN); - kiblnd_conn_decref(conn); - return 0; - - case RDMA_CM_EVENT_REJECTED: - conn = (struct kib_conn *)cmid->context; - switch (conn->ibc_state) { - default: - LBUG(); - - case IBLND_CONN_PASSIVE_WAIT: - CERROR("%s: REJECTED %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - event->status); - kiblnd_connreq_done(conn, -ECONNRESET); - break; - - case IBLND_CONN_ACTIVE_CONNECT: - kiblnd_rejected(conn, event->status, - (void *)KIBLND_CONN_PARAM(event), - KIBLND_CONN_PARAM_LEN(event)); - break; - } - kiblnd_conn_decref(conn); - return 0; - - case RDMA_CM_EVENT_ESTABLISHED: - conn = (struct kib_conn *)cmid->context; - switch (conn->ibc_state) { - default: - LBUG(); - - case IBLND_CONN_PASSIVE_WAIT: - CDEBUG(D_NET, "ESTABLISHED (passive): %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - kiblnd_connreq_done(conn, 0); - break; - - case IBLND_CONN_ACTIVE_CONNECT: - CDEBUG(D_NET, "ESTABLISHED(active): %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - kiblnd_check_connreply(conn, - (void *)KIBLND_CONN_PARAM(event), - KIBLND_CONN_PARAM_LEN(event)); - break; - } - /* net keeps its ref on conn! */ - return 0; - - case RDMA_CM_EVENT_TIMEWAIT_EXIT: - CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n"); - return 0; - case RDMA_CM_EVENT_DISCONNECTED: - conn = (struct kib_conn *)cmid->context; - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - CERROR("%s DISCONNECTED\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - kiblnd_connreq_done(conn, -ECONNRESET); - } else { - kiblnd_close_conn(conn, 0); - } - kiblnd_conn_decref(conn); - cmid->context = NULL; - return 0; - - case RDMA_CM_EVENT_DEVICE_REMOVAL: - LCONSOLE_ERROR_MSG(0x131, - "Received notification of device removal\n" - "Please shutdown LNET to allow this to proceed\n"); - /* - * Can't remove network from underneath LNET for now, so I have - * to ignore this - */ - return 0; - - case RDMA_CM_EVENT_ADDR_CHANGE: - LCONSOLE_INFO("Physical link changed (eg hca/port)\n"); - return 0; - } -} - -static int -kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs) -{ - struct kib_tx *tx; - struct list_head *ttmp; - - list_for_each(ttmp, txs) { - tx = list_entry(ttmp, struct kib_tx, tx_list); - - if (txs != &conn->ibc_active_txs) { - LASSERT(tx->tx_queued); - } else { - LASSERT(!tx->tx_queued); - LASSERT(tx->tx_waiting || tx->tx_sending); - } - - if (time_after_eq(jiffies, tx->tx_deadline)) { - CERROR("Timed out tx: %s, %lu seconds\n", - kiblnd_queue2str(conn, txs), - (jiffies - tx->tx_deadline) / HZ); - return 1; - } - } - - return 0; -} - -static int -kiblnd_conn_timed_out_locked(struct kib_conn *conn) -{ - return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) || - kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) || - kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) || - kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) || - kiblnd_check_txs_locked(conn, &conn->ibc_active_txs); -} - -static void -kiblnd_check_conns(int idx) -{ - LIST_HEAD(closes); - LIST_HEAD(checksends); - struct list_head *peers = &kiblnd_data.kib_peers[idx]; - struct list_head *ptmp; - struct kib_peer *peer; - struct kib_conn *conn; - struct kib_conn *temp; - struct kib_conn *tmp; - struct list_head *ctmp; - unsigned long flags; - - /* - * NB. We expect to have a look at all the peers and not find any - * RDMAs to time out, so we just use a shared lock while we - * take a look... - */ - read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - list_for_each(ptmp, peers) { - peer = list_entry(ptmp, struct kib_peer, ibp_list); - - list_for_each(ctmp, &peer->ibp_conns) { - int timedout; - int sendnoop; - - conn = list_entry(ctmp, struct kib_conn, ibc_list); - - LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); - - spin_lock(&conn->ibc_lock); - - sendnoop = kiblnd_need_noop(conn); - timedout = kiblnd_conn_timed_out_locked(conn); - if (!sendnoop && !timedout) { - spin_unlock(&conn->ibc_lock); - continue; - } - - if (timedout) { - CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n", - libcfs_nid2str(peer->ibp_nid), - (jiffies - peer->ibp_last_alive) / HZ, - conn->ibc_credits, - conn->ibc_outstanding_credits, - conn->ibc_reserved_credits); - list_add(&conn->ibc_connd_list, &closes); - } else { - list_add(&conn->ibc_connd_list, &checksends); - } - /* +ref for 'closes' or 'checksends' */ - kiblnd_conn_addref(conn); - - spin_unlock(&conn->ibc_lock); - } - } - - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - /* - * Handle timeout by closing the whole - * connection. We can only be sure RDMA activity - * has ceased once the QP has been modified. - */ - list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) { - list_del(&conn->ibc_connd_list); - kiblnd_close_conn(conn, -ETIMEDOUT); - kiblnd_conn_decref(conn); - } - - /* - * In case we have enough credits to return via a - * NOOP, but there were no non-blocking tx descs - * free to do it last time... - */ - list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) { - list_del(&conn->ibc_connd_list); - - spin_lock(&conn->ibc_lock); - kiblnd_check_sends_locked(conn); - spin_unlock(&conn->ibc_lock); - - kiblnd_conn_decref(conn); - } -} - -static void -kiblnd_disconnect_conn(struct kib_conn *conn) -{ - LASSERT(!in_interrupt()); - LASSERT(current == kiblnd_data.kib_connd); - LASSERT(conn->ibc_state == IBLND_CONN_CLOSING); - - rdma_disconnect(conn->ibc_cmid); - kiblnd_finalise_conn(conn); - - kiblnd_peer_notify(conn->ibc_peer); -} - -/** - * High-water for reconnection to the same peer, reconnection attempt should - * be delayed after trying more than KIB_RECONN_HIGH_RACE. - */ -#define KIB_RECONN_HIGH_RACE 10 -/** - * Allow connd to take a break and handle other things after consecutive - * reconnection attempts. - */ -#define KIB_RECONN_BREAK 100 - -int -kiblnd_connd(void *arg) -{ - spinlock_t *lock = &kiblnd_data.kib_connd_lock; - wait_queue_entry_t wait; - unsigned long flags; - struct kib_conn *conn; - int timeout; - int i; - int dropped_lock; - int peer_index = 0; - unsigned long deadline = jiffies; - - init_waitqueue_entry(&wait, current); - kiblnd_data.kib_connd = current; - - spin_lock_irqsave(lock, flags); - - while (!kiblnd_data.kib_shutdown) { - int reconn = 0; - - dropped_lock = 0; - - if (!list_empty(&kiblnd_data.kib_connd_zombies)) { - struct kib_peer *peer = NULL; - - conn = list_entry(kiblnd_data.kib_connd_zombies.next, - struct kib_conn, ibc_list); - list_del(&conn->ibc_list); - if (conn->ibc_reconnect) { - peer = conn->ibc_peer; - kiblnd_peer_addref(peer); - } - - spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; - - kiblnd_destroy_conn(conn); - - spin_lock_irqsave(lock, flags); - if (!peer) { - kfree(conn); - continue; - } - - conn->ibc_peer = peer; - if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) - list_add_tail(&conn->ibc_list, - &kiblnd_data.kib_reconn_list); - else - list_add_tail(&conn->ibc_list, - &kiblnd_data.kib_reconn_wait); - } - - if (!list_empty(&kiblnd_data.kib_connd_conns)) { - conn = list_entry(kiblnd_data.kib_connd_conns.next, - struct kib_conn, ibc_list); - list_del(&conn->ibc_list); - - spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; - - kiblnd_disconnect_conn(conn); - kiblnd_conn_decref(conn); - - spin_lock_irqsave(lock, flags); - } - - while (reconn < KIB_RECONN_BREAK) { - if (kiblnd_data.kib_reconn_sec != - ktime_get_real_seconds()) { - kiblnd_data.kib_reconn_sec = ktime_get_real_seconds(); - list_splice_init(&kiblnd_data.kib_reconn_wait, - &kiblnd_data.kib_reconn_list); - } - - if (list_empty(&kiblnd_data.kib_reconn_list)) - break; - - conn = list_entry(kiblnd_data.kib_reconn_list.next, - struct kib_conn, ibc_list); - list_del(&conn->ibc_list); - - spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; - - reconn += kiblnd_reconnect_peer(conn->ibc_peer); - kiblnd_peer_decref(conn->ibc_peer); - kfree(conn); - - spin_lock_irqsave(lock, flags); - } - - /* careful with the jiffy wrap... */ - timeout = (int)(deadline - jiffies); - if (timeout <= 0) { - const int n = 4; - const int p = 1; - int chunk = kiblnd_data.kib_peer_hash_size; - - spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; - - /* - * Time to check for RDMA timeouts on a few more - * peers: I do checks every 'p' seconds on a - * proportion of the peer table and I need to check - * every connection 'n' times within a timeout - * interval, to ensure I detect a timeout on any - * connection within (n+1)/n times the timeout - * interval. - */ - if (*kiblnd_tunables.kib_timeout > n * p) - chunk = (chunk * n * p) / - *kiblnd_tunables.kib_timeout; - if (!chunk) - chunk = 1; - - for (i = 0; i < chunk; i++) { - kiblnd_check_conns(peer_index); - peer_index = (peer_index + 1) % - kiblnd_data.kib_peer_hash_size; - } - - deadline += msecs_to_jiffies(p * MSEC_PER_SEC); - spin_lock_irqsave(lock, flags); - } - - if (dropped_lock) - continue; - - /* Nothing to do for 'timeout' */ - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_unlock_irqrestore(lock, flags); - - schedule_timeout(timeout); - - remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_lock_irqsave(lock, flags); - } - - spin_unlock_irqrestore(lock, flags); - - kiblnd_thread_fini(); - return 0; -} - -void -kiblnd_qp_event(struct ib_event *event, void *arg) -{ - struct kib_conn *conn = arg; - - switch (event->event) { - case IB_EVENT_COMM_EST: - CDEBUG(D_NET, "%s established\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - /* - * We received a packet but connection isn't established - * probably handshake packet was lost, so free to - * force make connection established - */ - rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST); - return; - - default: - CERROR("%s: Async QP event type %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); - return; - } -} - -static void -kiblnd_complete(struct ib_wc *wc) -{ - switch (kiblnd_wreqid2type(wc->wr_id)) { - default: - LBUG(); - - case IBLND_WID_MR: - if (wc->status != IB_WC_SUCCESS && - wc->status != IB_WC_WR_FLUSH_ERR) - CNETERR("FastReg failed: %d\n", wc->status); - break; - - case IBLND_WID_RDMA: - /* - * We only get RDMA completion notification if it fails. All - * subsequent work items, including the final SEND will fail - * too. However we can't print out any more info about the - * failing RDMA because 'tx' might be back on the idle list or - * even reused already if we didn't manage to post all our work - * items - */ - CNETERR("RDMA (tx: %p) failed: %d\n", - kiblnd_wreqid2ptr(wc->wr_id), wc->status); - return; - - case IBLND_WID_TX: - kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status); - return; - - case IBLND_WID_RX: - kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status, - wc->byte_len); - return; - } -} - -void -kiblnd_cq_completion(struct ib_cq *cq, void *arg) -{ - /* - * NB I'm not allowed to schedule this conn once its refcount has - * reached 0. Since fundamentally I'm racing with scheduler threads - * consuming my CQ I could be called after all completions have - * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted - * and this CQ is about to be destroyed so I NOOP. - */ - struct kib_conn *conn = arg; - struct kib_sched_info *sched = conn->ibc_sched; - unsigned long flags; - - LASSERT(cq == conn->ibc_cq); - - spin_lock_irqsave(&sched->ibs_lock, flags); - - conn->ibc_ready = 1; - - if (!conn->ibc_scheduled && - (conn->ibc_nrx > 0 || - conn->ibc_nsends_posted > 0)) { - kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ - conn->ibc_scheduled = 1; - list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); - - if (waitqueue_active(&sched->ibs_waitq)) - wake_up(&sched->ibs_waitq); - } - - spin_unlock_irqrestore(&sched->ibs_lock, flags); -} - -void -kiblnd_cq_event(struct ib_event *event, void *arg) -{ - struct kib_conn *conn = arg; - - CERROR("%s: async CQ event type %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); -} - -int -kiblnd_scheduler(void *arg) -{ - long id = (long)arg; - struct kib_sched_info *sched; - struct kib_conn *conn; - wait_queue_entry_t wait; - unsigned long flags; - struct ib_wc wc; - int did_something; - int busy_loops = 0; - int rc; - - init_waitqueue_entry(&wait, current); - - sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; - - rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); - if (rc) { - CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", - sched->ibs_cpt); - } - - spin_lock_irqsave(&sched->ibs_lock, flags); - - while (!kiblnd_data.kib_shutdown) { - if (busy_loops++ >= IBLND_RESCHED) { - spin_unlock_irqrestore(&sched->ibs_lock, flags); - - cond_resched(); - busy_loops = 0; - - spin_lock_irqsave(&sched->ibs_lock, flags); - } - - did_something = 0; - - if (!list_empty(&sched->ibs_conns)) { - conn = list_entry(sched->ibs_conns.next, struct kib_conn, - ibc_sched_list); - /* take over kib_sched_conns' ref on conn... */ - LASSERT(conn->ibc_scheduled); - list_del(&conn->ibc_sched_list); - conn->ibc_ready = 0; - - spin_unlock_irqrestore(&sched->ibs_lock, flags); - - wc.wr_id = IBLND_WID_INVAL; - - rc = ib_poll_cq(conn->ibc_cq, 1, &wc); - if (!rc) { - rc = ib_req_notify_cq(conn->ibc_cq, - IB_CQ_NEXT_COMP); - if (rc < 0) { - CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); - kiblnd_close_conn(conn, -EIO); - kiblnd_conn_decref(conn); - spin_lock_irqsave(&sched->ibs_lock, - flags); - continue; - } - - rc = ib_poll_cq(conn->ibc_cq, 1, &wc); - } - - if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) { - LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n", - rc, wc.opcode, wc.status, - wc.vendor_err, - libcfs_nid2str(conn->ibc_peer->ibp_nid), - conn->ibc_state); - rc = -EINVAL; - } - - if (rc < 0) { - CWARN("%s: ib_poll_cq failed: %d, closing connection\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc); - kiblnd_close_conn(conn, -EIO); - kiblnd_conn_decref(conn); - spin_lock_irqsave(&sched->ibs_lock, flags); - continue; - } - - spin_lock_irqsave(&sched->ibs_lock, flags); - - if (rc || conn->ibc_ready) { - /* - * There may be another completion waiting; get - * another scheduler to check while I handle - * this one... - */ - /* +1 ref for sched_conns */ - kiblnd_conn_addref(conn); - list_add_tail(&conn->ibc_sched_list, - &sched->ibs_conns); - if (waitqueue_active(&sched->ibs_waitq)) - wake_up(&sched->ibs_waitq); - } else { - conn->ibc_scheduled = 0; - } - - if (rc) { - spin_unlock_irqrestore(&sched->ibs_lock, flags); - kiblnd_complete(&wc); - - spin_lock_irqsave(&sched->ibs_lock, flags); - } - - kiblnd_conn_decref(conn); /* ...drop my ref from above */ - did_something = 1; - } - - if (did_something) - continue; - - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue_exclusive(&sched->ibs_waitq, &wait); - spin_unlock_irqrestore(&sched->ibs_lock, flags); - - schedule(); - busy_loops = 0; - - remove_wait_queue(&sched->ibs_waitq, &wait); - spin_lock_irqsave(&sched->ibs_lock, flags); - } - - spin_unlock_irqrestore(&sched->ibs_lock, flags); - - kiblnd_thread_fini(); - return 0; -} - -int -kiblnd_failover_thread(void *arg) -{ - rwlock_t *glock = &kiblnd_data.kib_global_lock; - struct kib_dev *dev; - wait_queue_entry_t wait; - unsigned long flags; - int rc; - - LASSERT(*kiblnd_tunables.kib_dev_failover); - - init_waitqueue_entry(&wait, current); - write_lock_irqsave(glock, flags); - - while (!kiblnd_data.kib_shutdown) { - int do_failover = 0; - int long_sleep; - - list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, - ibd_fail_list) { - if (time_before(jiffies, - dev->ibd_next_failover)) - continue; - do_failover = 1; - break; - } - - if (do_failover) { - list_del_init(&dev->ibd_fail_list); - dev->ibd_failover = 1; - write_unlock_irqrestore(glock, flags); - - rc = kiblnd_dev_failover(dev); - - write_lock_irqsave(glock, flags); - - LASSERT(dev->ibd_failover); - dev->ibd_failover = 0; - if (rc >= 0) { /* Device is OK or failover succeed */ - dev->ibd_next_failover = jiffies + 3 * HZ; - continue; - } - - /* failed to failover, retry later */ - dev->ibd_next_failover = - jiffies + min(dev->ibd_failed_failover, 10) * HZ; - if (kiblnd_dev_can_failover(dev)) { - list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - } - - continue; - } - - /* long sleep if no more pending failover */ - long_sleep = list_empty(&kiblnd_data.kib_failed_devs); - - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); - write_unlock_irqrestore(glock, flags); - - rc = schedule_timeout(long_sleep ? 10 * HZ : - HZ); - remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); - write_lock_irqsave(glock, flags); - - if (!long_sleep || rc) - continue; - - /* - * have a long sleep, routine check all active devices, - * we need checking like this because if there is not active - * connection on the dev and no SEND from local, we may listen - * on wrong HCA for ever while there is a bonding failover - */ - list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (kiblnd_dev_can_failover(dev)) { - list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - } - } - } - - write_unlock_irqrestore(glock, flags); - - kiblnd_thread_fini(); - return 0; -} diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c deleted file mode 100644 index 39d07926d603..000000000000 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ /dev/null @@ -1,296 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/klnds/o2iblnd/o2iblnd_modparams.c - * - * Author: Eric Barton - */ - -#include "o2iblnd.h" - -static int service = 987; -module_param(service, int, 0444); -MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)"); - -static int cksum; -module_param(cksum, int, 0644); -MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums"); - -static int timeout = 50; -module_param(timeout, int, 0644); -MODULE_PARM_DESC(timeout, "timeout (seconds)"); - -/* - * Number of threads in each scheduler pool which is percpt, - * we will estimate reasonable value based on CPUs if it's set to zero. - */ -static int nscheds; -module_param(nscheds, int, 0444); -MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool"); - -static unsigned int conns_per_peer = 1; -module_param(conns_per_peer, uint, 0444); -MODULE_PARM_DESC(conns_per_peer, "number of connections per peer"); - -/* NB: this value is shared by all CPTs, it can grow at runtime */ -static int ntx = 512; -module_param(ntx, int, 0444); -MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool"); - -/* NB: this value is shared by all CPTs */ -static int credits = 256; -module_param(credits, int, 0444); -MODULE_PARM_DESC(credits, "# concurrent sends"); - -static int peer_credits = 8; -module_param(peer_credits, int, 0444); -MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer"); - -static int peer_credits_hiw; -module_param(peer_credits_hiw, int, 0444); -MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits"); - -static int peer_buffer_credits; -module_param(peer_buffer_credits, int, 0444); -MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits"); - -static int peer_timeout = 180; -module_param(peer_timeout, int, 0444); -MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)"); - -static char *ipif_name = "ib0"; -module_param(ipif_name, charp, 0444); -MODULE_PARM_DESC(ipif_name, "IPoIB interface name"); - -static int retry_count = 5; -module_param(retry_count, int, 0644); -MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received"); - -static int rnr_retry_count = 6; -module_param(rnr_retry_count, int, 0644); -MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions"); - -static int keepalive = 100; -module_param(keepalive, int, 0644); -MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive"); - -static int ib_mtu; -module_param(ib_mtu, int, 0444); -MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096"); - -static int concurrent_sends; -module_param(concurrent_sends, int, 0444); -MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing"); - -#define IBLND_DEFAULT_MAP_ON_DEMAND IBLND_MAX_RDMA_FRAGS -static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND; -module_param(map_on_demand, int, 0444); -MODULE_PARM_DESC(map_on_demand, "map on demand"); - -/* NB: this value is shared by all CPTs, it can grow at runtime */ -static int fmr_pool_size = 512; -module_param(fmr_pool_size, int, 0444); -MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)"); - -/* NB: this value is shared by all CPTs, it can grow at runtime */ -static int fmr_flush_trigger = 384; -module_param(fmr_flush_trigger, int, 0444); -MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush"); - -static int fmr_cache = 1; -module_param(fmr_cache, int, 0444); -MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching"); - -/* - * 0: disable failover - * 1: enable failover if necessary - * 2: force to failover (for debug) - */ -static int dev_failover; -module_param(dev_failover, int, 0444); -MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)"); - -static int require_privileged_port; -module_param(require_privileged_port, int, 0644); -MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection"); - -static int use_privileged_port = 1; -module_param(use_privileged_port, int, 0644); -MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection"); - -struct kib_tunables kiblnd_tunables = { - .kib_dev_failover = &dev_failover, - .kib_service = &service, - .kib_cksum = &cksum, - .kib_timeout = &timeout, - .kib_keepalive = &keepalive, - .kib_ntx = &ntx, - .kib_default_ipif = &ipif_name, - .kib_retry_count = &retry_count, - .kib_rnr_retry_count = &rnr_retry_count, - .kib_ib_mtu = &ib_mtu, - .kib_require_priv_port = &require_privileged_port, - .kib_use_priv_port = &use_privileged_port, - .kib_nscheds = &nscheds -}; - -static struct lnet_ioctl_config_o2iblnd_tunables default_tunables; - -/* # messages/RDMAs in-flight */ -int kiblnd_msg_queue_size(int version, struct lnet_ni *ni) -{ - if (version == IBLND_MSG_VERSION_1) - return IBLND_MSG_QUEUE_SIZE_V1; - else if (ni) - return ni->ni_peertxcredits; - else - return peer_credits; -} - -int kiblnd_tunables_setup(struct lnet_ni *ni) -{ - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - - /* - * if there was no tunables specified, setup the tunables to be - * defaulted - */ - if (!ni->ni_lnd_tunables) { - ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables), - GFP_NOFS); - if (!ni->ni_lnd_tunables) - return -ENOMEM; - - memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib, - &default_tunables, sizeof(*tunables)); - } - tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; - - /* Current API version */ - tunables->lnd_version = 0; - - if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { - CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", - *kiblnd_tunables.kib_ib_mtu); - return -EINVAL; - } - - if (!ni->ni_peertimeout) - ni->ni_peertimeout = peer_timeout; - - if (!ni->ni_maxtxcredits) - ni->ni_maxtxcredits = credits; - - if (!ni->ni_peertxcredits) - ni->ni_peertxcredits = peer_credits; - - if (!ni->ni_peerrtrcredits) - ni->ni_peerrtrcredits = peer_buffer_credits; - - if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT) - ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT; - - if (ni->ni_peertxcredits > IBLND_CREDITS_MAX) - ni->ni_peertxcredits = IBLND_CREDITS_MAX; - - if (ni->ni_peertxcredits > credits) - ni->ni_peertxcredits = credits; - - if (!tunables->lnd_peercredits_hiw) - tunables->lnd_peercredits_hiw = peer_credits_hiw; - - if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2) - tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2; - - if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits) - tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1; - - if (tunables->lnd_map_on_demand <= 0 || - tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) { - /* Use the default */ - CWARN("Invalid map_on_demand (%d), expects 1 - %d. Using default of %d\n", - tunables->lnd_map_on_demand, - IBLND_MAX_RDMA_FRAGS, IBLND_DEFAULT_MAP_ON_DEMAND); - tunables->lnd_map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND; - } - - if (tunables->lnd_map_on_demand == 1) { - /* don't make sense to create map if only one fragment */ - tunables->lnd_map_on_demand = 2; - } - - if (!tunables->lnd_concurrent_sends) { - if (tunables->lnd_map_on_demand > 0 && - tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) { - tunables->lnd_concurrent_sends = - ni->ni_peertxcredits * 2; - } else { - tunables->lnd_concurrent_sends = ni->ni_peertxcredits; - } - } - - if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2) - tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2; - - if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2) - tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2; - - if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) { - CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n", - tunables->lnd_concurrent_sends, ni->ni_peertxcredits); - } - - if (!tunables->lnd_fmr_pool_size) - tunables->lnd_fmr_pool_size = fmr_pool_size; - if (!tunables->lnd_fmr_flush_trigger) - tunables->lnd_fmr_flush_trigger = fmr_flush_trigger; - if (!tunables->lnd_fmr_cache) - tunables->lnd_fmr_cache = fmr_cache; - if (!tunables->lnd_conns_per_peer) { - tunables->lnd_conns_per_peer = (conns_per_peer) ? - conns_per_peer : 1; - } - - return 0; -} - -void kiblnd_tunables_init(void) -{ - default_tunables.lnd_version = 0; - default_tunables.lnd_peercredits_hiw = peer_credits_hiw, - default_tunables.lnd_map_on_demand = map_on_demand; - default_tunables.lnd_concurrent_sends = concurrent_sends; - default_tunables.lnd_fmr_pool_size = fmr_pool_size; - default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger; - default_tunables.lnd_fmr_cache = fmr_cache; - default_tunables.lnd_conns_per_peer = conns_per_peer; -} diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile deleted file mode 100644 index a7da1abfc804..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LNET) += ksocklnd.o - -ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib.o diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c deleted file mode 100644 index f01b34ac1a53..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ /dev/null @@ -1,2921 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/klnds/socklnd/socklnd.c - * - * Author: Zach Brown - * Author: Peter J. Braam - * Author: Phil Schwan - * Author: Eric Barton - */ - -#include "socklnd.h" - -static struct lnet_lnd the_ksocklnd; -struct ksock_nal_data ksocknal_data; - -static struct ksock_interface * -ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip) -{ - struct ksock_net *net = ni->ni_data; - int i; - struct ksock_interface *iface; - - for (i = 0; i < net->ksnn_ninterfaces; i++) { - LASSERT(i < LNET_MAX_INTERFACES); - iface = &net->ksnn_interfaces[i]; - - if (iface->ksni_ipaddr == ip) - return iface; - } - - return NULL; -} - -static struct ksock_route * -ksocknal_create_route(__u32 ipaddr, int port) -{ - struct ksock_route *route; - - route = kzalloc(sizeof(*route), GFP_NOFS); - if (!route) - return NULL; - - atomic_set(&route->ksnr_refcount, 1); - route->ksnr_peer = NULL; - route->ksnr_retry_interval = 0; /* OK to connect at any time */ - route->ksnr_ipaddr = ipaddr; - route->ksnr_port = port; - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; - route->ksnr_connected = 0; - route->ksnr_deleted = 0; - route->ksnr_conn_count = 0; - route->ksnr_share_count = 0; - - return route; -} - -void -ksocknal_destroy_route(struct ksock_route *route) -{ - LASSERT(!atomic_read(&route->ksnr_refcount)); - - if (route->ksnr_peer) - ksocknal_peer_decref(route->ksnr_peer); - - kfree(route); -} - -static int -ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni, - struct lnet_process_id id) -{ - int cpt = lnet_cpt_of_nid(id.nid); - struct ksock_net *net = ni->ni_data; - struct ksock_peer *peer; - - LASSERT(id.nid != LNET_NID_ANY); - LASSERT(id.pid != LNET_PID_ANY); - LASSERT(!in_interrupt()); - - peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt); - if (!peer) - return -ENOMEM; - - peer->ksnp_ni = ni; - peer->ksnp_id = id; - atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */ - peer->ksnp_closing = 0; - peer->ksnp_accepting = 0; - peer->ksnp_proto = NULL; - peer->ksnp_last_alive = 0; - peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - - INIT_LIST_HEAD(&peer->ksnp_conns); - INIT_LIST_HEAD(&peer->ksnp_routes); - INIT_LIST_HEAD(&peer->ksnp_tx_queue); - INIT_LIST_HEAD(&peer->ksnp_zc_req_list); - spin_lock_init(&peer->ksnp_lock); - - spin_lock_bh(&net->ksnn_lock); - - if (net->ksnn_shutdown) { - spin_unlock_bh(&net->ksnn_lock); - - kfree(peer); - CERROR("Can't create peer: network shutdown\n"); - return -ESHUTDOWN; - } - - net->ksnn_npeers++; - - spin_unlock_bh(&net->ksnn_lock); - - *peerp = peer; - return 0; -} - -void -ksocknal_destroy_peer(struct ksock_peer *peer) -{ - struct ksock_net *net = peer->ksnp_ni->ni_data; - - CDEBUG(D_NET, "peer %s %p deleted\n", - libcfs_id2str(peer->ksnp_id), peer); - - LASSERT(!atomic_read(&peer->ksnp_refcount)); - LASSERT(!peer->ksnp_accepting); - LASSERT(list_empty(&peer->ksnp_conns)); - LASSERT(list_empty(&peer->ksnp_routes)); - LASSERT(list_empty(&peer->ksnp_tx_queue)); - LASSERT(list_empty(&peer->ksnp_zc_req_list)); - - kfree(peer); - - /* - * NB a peer's connections and routes keep a reference on their peer - * until they are destroyed, so we can be assured that _all_ state to - * do with this peer has been cleaned up when its refcount drops to - * zero. - */ - spin_lock_bh(&net->ksnn_lock); - net->ksnn_npeers--; - spin_unlock_bh(&net->ksnn_lock); -} - -struct ksock_peer * -ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id) -{ - struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); - struct ksock_peer *peer; - - list_for_each_entry(peer, peer_list, ksnp_list) { - LASSERT(!peer->ksnp_closing); - - if (peer->ksnp_ni != ni) - continue; - - if (peer->ksnp_id.nid != id.nid || - peer->ksnp_id.pid != id.pid) - continue; - - CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n", - peer, libcfs_id2str(id), - atomic_read(&peer->ksnp_refcount)); - return peer; - } - return NULL; -} - -struct ksock_peer * -ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id) -{ - struct ksock_peer *peer; - - read_lock(&ksocknal_data.ksnd_global_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer) /* +1 ref for caller? */ - ksocknal_peer_addref(peer); - read_unlock(&ksocknal_data.ksnd_global_lock); - - return peer; -} - -static void -ksocknal_unlink_peer_locked(struct ksock_peer *peer) -{ - int i; - __u32 ip; - struct ksock_interface *iface; - - for (i = 0; i < peer->ksnp_n_passive_ips; i++) { - LASSERT(i < LNET_MAX_INTERFACES); - ip = peer->ksnp_passive_ips[i]; - - iface = ksocknal_ip2iface(peer->ksnp_ni, ip); - /* - * All IPs in peer->ksnp_passive_ips[] come from the - * interface list, therefore the call must succeed. - */ - LASSERT(iface); - - CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n", - peer, iface, iface->ksni_nroutes); - iface->ksni_npeers--; - } - - LASSERT(list_empty(&peer->ksnp_conns)); - LASSERT(list_empty(&peer->ksnp_routes)); - LASSERT(!peer->ksnp_closing); - peer->ksnp_closing = 1; - list_del(&peer->ksnp_list); - /* lose peerlist's ref */ - ksocknal_peer_decref(peer); -} - -static int -ksocknal_get_peer_info(struct lnet_ni *ni, int index, - struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip, - int *port, int *conn_count, int *share_count) -{ - struct ksock_peer *peer; - struct list_head *ptmp; - struct ksock_route *route; - struct list_head *rtmp; - int i; - int j; - int rc = -ENOENT; - - read_lock(&ksocknal_data.ksnd_global_lock); - - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, struct ksock_peer, ksnp_list); - - if (peer->ksnp_ni != ni) - continue; - - if (!peer->ksnp_n_passive_ips && - list_empty(&peer->ksnp_routes)) { - if (index-- > 0) - continue; - - *id = peer->ksnp_id; - *myip = 0; - *peer_ip = 0; - *port = 0; - *conn_count = 0; - *share_count = 0; - rc = 0; - goto out; - } - - for (j = 0; j < peer->ksnp_n_passive_ips; j++) { - if (index-- > 0) - continue; - - *id = peer->ksnp_id; - *myip = peer->ksnp_passive_ips[j]; - *peer_ip = 0; - *port = 0; - *conn_count = 0; - *share_count = 0; - rc = 0; - goto out; - } - - list_for_each(rtmp, &peer->ksnp_routes) { - if (index-- > 0) - continue; - - route = list_entry(rtmp, struct ksock_route, - ksnr_list); - - *id = peer->ksnp_id; - *myip = route->ksnr_myipaddr; - *peer_ip = route->ksnr_ipaddr; - *port = route->ksnr_port; - *conn_count = route->ksnr_conn_count; - *share_count = route->ksnr_share_count; - rc = 0; - goto out; - } - } - } - out: - read_unlock(&ksocknal_data.ksnd_global_lock); - return rc; -} - -static void -ksocknal_associate_route_conn_locked(struct ksock_route *route, - struct ksock_conn *conn) -{ - struct ksock_peer *peer = route->ksnr_peer; - int type = conn->ksnc_type; - struct ksock_interface *iface; - - conn->ksnc_route = route; - ksocknal_route_addref(route); - - if (route->ksnr_myipaddr != conn->ksnc_myipaddr) { - if (!route->ksnr_myipaddr) { - /* route wasn't bound locally yet (the initial route) */ - CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n", - libcfs_id2str(peer->ksnp_id), - &route->ksnr_ipaddr, - &conn->ksnc_myipaddr); - } else { - CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n", - libcfs_id2str(peer->ksnp_id), - &route->ksnr_ipaddr, - &route->ksnr_myipaddr, - &conn->ksnc_myipaddr); - - iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, - route->ksnr_myipaddr); - if (iface) - iface->ksni_nroutes--; - } - route->ksnr_myipaddr = conn->ksnc_myipaddr; - iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, - route->ksnr_myipaddr); - if (iface) - iface->ksni_nroutes++; - } - - route->ksnr_connected |= (1 << type); - route->ksnr_conn_count++; - - /* - * Successful connection => further attempts can - * proceed immediately - */ - route->ksnr_retry_interval = 0; -} - -static void -ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route) -{ - struct list_head *tmp; - struct ksock_conn *conn; - struct ksock_route *route2; - - LASSERT(!peer->ksnp_closing); - LASSERT(!route->ksnr_peer); - LASSERT(!route->ksnr_scheduled); - LASSERT(!route->ksnr_connecting); - LASSERT(!route->ksnr_connected); - - /* LASSERT(unique) */ - list_for_each(tmp, &peer->ksnp_routes) { - route2 = list_entry(tmp, struct ksock_route, ksnr_list); - - if (route2->ksnr_ipaddr == route->ksnr_ipaddr) { - CERROR("Duplicate route %s %pI4h\n", - libcfs_id2str(peer->ksnp_id), - &route->ksnr_ipaddr); - LBUG(); - } - } - - route->ksnr_peer = peer; - ksocknal_peer_addref(peer); - /* peer's routelist takes over my ref on 'route' */ - list_add_tail(&route->ksnr_list, &peer->ksnp_routes); - - list_for_each(tmp, &peer->ksnp_conns) { - conn = list_entry(tmp, struct ksock_conn, ksnc_list); - - if (conn->ksnc_ipaddr != route->ksnr_ipaddr) - continue; - - ksocknal_associate_route_conn_locked(route, conn); - /* keep going (typed routes) */ - } -} - -static void -ksocknal_del_route_locked(struct ksock_route *route) -{ - struct ksock_peer *peer = route->ksnr_peer; - struct ksock_interface *iface; - struct ksock_conn *conn; - struct list_head *ctmp; - struct list_head *cnxt; - - LASSERT(!route->ksnr_deleted); - - /* Close associated conns */ - list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { - conn = list_entry(ctmp, struct ksock_conn, ksnc_list); - - if (conn->ksnc_route != route) - continue; - - ksocknal_close_conn_locked(conn, 0); - } - - if (route->ksnr_myipaddr) { - iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, - route->ksnr_myipaddr); - if (iface) - iface->ksni_nroutes--; - } - - route->ksnr_deleted = 1; - list_del(&route->ksnr_list); - ksocknal_route_decref(route); /* drop peer's ref */ - - if (list_empty(&peer->ksnp_routes) && - list_empty(&peer->ksnp_conns)) { - /* - * I've just removed the last route to a peer with no active - * connections - */ - ksocknal_unlink_peer_locked(peer); - } -} - -int -ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr, - int port) -{ - struct ksock_peer *peer; - struct ksock_peer *peer2; - struct ksock_route *route; - struct ksock_route *route2; - int rc; - - if (id.nid == LNET_NID_ANY || - id.pid == LNET_PID_ANY) - return -EINVAL; - - /* Have a brand new peer ready... */ - rc = ksocknal_create_peer(&peer, ni, id); - if (rc) - return rc; - - route = ksocknal_create_route(ipaddr, port); - if (!route) { - ksocknal_peer_decref(peer); - return -ENOMEM; - } - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - /* always called with a ref on ni, so shutdown can't have started */ - LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown); - - peer2 = ksocknal_find_peer_locked(ni, id); - if (peer2) { - ksocknal_peer_decref(peer); - peer = peer2; - } else { - /* peer table takes my ref on peer */ - list_add_tail(&peer->ksnp_list, - ksocknal_nid2peerlist(id.nid)); - } - - list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) { - if (route2->ksnr_ipaddr == ipaddr) { - /* Route already exists, use the old one */ - ksocknal_route_decref(route); - route2->ksnr_share_count++; - goto out; - } - } - /* Route doesn't already exist, add the new one */ - ksocknal_add_route_locked(peer, route); - route->ksnr_share_count++; -out: - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - return 0; -} - -static void -ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip) -{ - struct ksock_conn *conn; - struct ksock_route *route; - struct list_head *tmp; - struct list_head *nxt; - int nshared; - - LASSERT(!peer->ksnp_closing); - - /* Extra ref prevents peer disappearing until I'm done with it */ - ksocknal_peer_addref(peer); - - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - /* no match */ - if (!(!ip || route->ksnr_ipaddr == ip)) - continue; - - route->ksnr_share_count = 0; - /* This deletes associated conns too */ - ksocknal_del_route_locked(route); - } - - nshared = 0; - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - nshared += route->ksnr_share_count; - } - - if (!nshared) { - /* - * remove everything else if there are no explicit entries - * left - */ - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - /* we should only be removing auto-entries */ - LASSERT(!route->ksnr_share_count); - ksocknal_del_route_locked(route); - } - - list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { - conn = list_entry(tmp, struct ksock_conn, ksnc_list); - - ksocknal_close_conn_locked(conn, 0); - } - } - - ksocknal_peer_decref(peer); - /* NB peer unlinks itself when last conn/route is removed */ -} - -static int -ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip) -{ - LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - struct ksock_peer *peer; - int lo; - int hi; - int i; - int rc = -ENOENT; - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - if (id.nid != LNET_NID_ANY) { - lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - } else { - lo = 0; - hi = ksocknal_data.ksnd_peer_hash_size - 1; - } - - for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, struct ksock_peer, ksnp_list); - - if (peer->ksnp_ni != ni) - continue; - - if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) && - (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid))) - continue; - - ksocknal_peer_addref(peer); /* a ref for me... */ - - ksocknal_del_peer_locked(peer, ip); - - if (peer->ksnp_closing && - !list_empty(&peer->ksnp_tx_queue)) { - LASSERT(list_empty(&peer->ksnp_conns)); - LASSERT(list_empty(&peer->ksnp_routes)); - - list_splice_init(&peer->ksnp_tx_queue, - &zombies); - } - - ksocknal_peer_decref(peer); /* ...till here */ - - rc = 0; /* matched! */ - } - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - ksocknal_txlist_done(ni, &zombies, 1); - - return rc; -} - -static struct ksock_conn * -ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index) -{ - struct ksock_peer *peer; - struct list_head *ptmp; - struct ksock_conn *conn; - struct list_head *ctmp; - int i; - - read_lock(&ksocknal_data.ksnd_global_lock); - - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, struct ksock_peer, ksnp_list); - - LASSERT(!peer->ksnp_closing); - - if (peer->ksnp_ni != ni) - continue; - - list_for_each(ctmp, &peer->ksnp_conns) { - if (index-- > 0) - continue; - - conn = list_entry(ctmp, struct ksock_conn, - ksnc_list); - ksocknal_conn_addref(conn); - read_unlock(&ksocknal_data.ksnd_global_lock); - return conn; - } - } - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - return NULL; -} - -static struct ksock_sched * -ksocknal_choose_scheduler_locked(unsigned int cpt) -{ - struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt]; - struct ksock_sched *sched; - int i; - - LASSERT(info->ksi_nthreads > 0); - - sched = &info->ksi_scheds[0]; - /* - * NB: it's safe so far, but info->ksi_nthreads could be changed - * at runtime when we have dynamic LNet configuration, then we - * need to take care of this. - */ - for (i = 1; i < info->ksi_nthreads; i++) { - if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns) - sched = &info->ksi_scheds[i]; - } - - return sched; -} - -static int -ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs) -{ - struct ksock_net *net = ni->ni_data; - int i; - int nip; - - read_lock(&ksocknal_data.ksnd_global_lock); - - nip = net->ksnn_ninterfaces; - LASSERT(nip <= LNET_MAX_INTERFACES); - - /* - * Only offer interfaces for additional connections if I have - * more than one. - */ - if (nip < 2) { - read_unlock(&ksocknal_data.ksnd_global_lock); - return 0; - } - - for (i = 0; i < nip; i++) { - ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr; - LASSERT(ipaddrs[i]); - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - return nip; -} - -static int -ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips) -{ - int best_netmatch = 0; - int best_xor = 0; - int best = -1; - int this_xor; - int this_netmatch; - int i; - - for (i = 0; i < nips; i++) { - if (!ips[i]) - continue; - - this_xor = ips[i] ^ iface->ksni_ipaddr; - this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0; - - if (!(best < 0 || - best_netmatch < this_netmatch || - (best_netmatch == this_netmatch && - best_xor > this_xor))) - continue; - - best = i; - best_netmatch = this_netmatch; - best_xor = this_xor; - } - - LASSERT(best >= 0); - return best; -} - -static int -ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips) -{ - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - struct ksock_net *net = peer->ksnp_ni->ni_data; - struct ksock_interface *iface; - struct ksock_interface *best_iface; - int n_ips; - int i; - int j; - int k; - __u32 ip; - __u32 xor; - int this_netmatch; - int best_netmatch; - int best_npeers; - - /* - * CAVEAT EMPTOR: We do all our interface matching with an - * exclusive hold of global lock at IRQ priority. We're only - * expecting to be dealing with small numbers of interfaces, so the - * O(n**3)-ness shouldn't matter - */ - /* - * Also note that I'm not going to return more than n_peerips - * interfaces, even if I have more myself - */ - write_lock_bh(global_lock); - - LASSERT(n_peerips <= LNET_MAX_INTERFACES); - LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); - - /* - * Only match interfaces for additional connections - * if I have > 1 interface - */ - n_ips = (net->ksnn_ninterfaces < 2) ? 0 : - min(n_peerips, net->ksnn_ninterfaces); - - for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) { - /* ^ yes really... */ - - /* - * If we have any new interfaces, first tick off all the - * peer IPs that match old interfaces, then choose new - * interfaces to match the remaining peer IPS. - * We don't forget interfaces we've stopped using; we might - * start using them again... - */ - if (i < peer->ksnp_n_passive_ips) { - /* Old interface. */ - ip = peer->ksnp_passive_ips[i]; - best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip); - - /* peer passive ips are kept up to date */ - LASSERT(best_iface); - } else { - /* choose a new interface */ - LASSERT(i == peer->ksnp_n_passive_ips); - - best_iface = NULL; - best_netmatch = 0; - best_npeers = 0; - - for (j = 0; j < net->ksnn_ninterfaces; j++) { - iface = &net->ksnn_interfaces[j]; - ip = iface->ksni_ipaddr; - - for (k = 0; k < peer->ksnp_n_passive_ips; k++) - if (peer->ksnp_passive_ips[k] == ip) - break; - - if (k < peer->ksnp_n_passive_ips) /* using it already */ - continue; - - k = ksocknal_match_peerip(iface, peerips, - n_peerips); - xor = ip ^ peerips[k]; - this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0; - - if (!(!best_iface || - best_netmatch < this_netmatch || - (best_netmatch == this_netmatch && - best_npeers > iface->ksni_npeers))) - continue; - - best_iface = iface; - best_netmatch = this_netmatch; - best_npeers = iface->ksni_npeers; - } - - LASSERT(best_iface); - - best_iface->ksni_npeers++; - ip = best_iface->ksni_ipaddr; - peer->ksnp_passive_ips[i] = ip; - peer->ksnp_n_passive_ips = i + 1; - } - - /* mark the best matching peer IP used */ - j = ksocknal_match_peerip(best_iface, peerips, n_peerips); - peerips[j] = 0; - } - - /* Overwrite input peer IP addresses */ - memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips)); - - write_unlock_bh(global_lock); - - return n_ips; -} - -static void -ksocknal_create_routes(struct ksock_peer *peer, int port, - __u32 *peer_ipaddrs, int npeer_ipaddrs) -{ - struct ksock_route *newroute = NULL; - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - struct lnet_ni *ni = peer->ksnp_ni; - struct ksock_net *net = ni->ni_data; - struct list_head *rtmp; - struct ksock_route *route; - struct ksock_interface *iface; - struct ksock_interface *best_iface; - int best_netmatch; - int this_netmatch; - int best_nroutes; - int i; - int j; - - /* - * CAVEAT EMPTOR: We do all our interface matching with an - * exclusive hold of global lock at IRQ priority. We're only - * expecting to be dealing with small numbers of interfaces, so the - * O(n**3)-ness here shouldn't matter - */ - write_lock_bh(global_lock); - - if (net->ksnn_ninterfaces < 2) { - /* - * Only create additional connections - * if I have > 1 interface - */ - write_unlock_bh(global_lock); - return; - } - - LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES); - - for (i = 0; i < npeer_ipaddrs; i++) { - if (newroute) { - newroute->ksnr_ipaddr = peer_ipaddrs[i]; - } else { - write_unlock_bh(global_lock); - - newroute = ksocknal_create_route(peer_ipaddrs[i], port); - if (!newroute) - return; - - write_lock_bh(global_lock); - } - - if (peer->ksnp_closing) { - /* peer got closed under me */ - break; - } - - /* Already got a route? */ - route = NULL; - list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, struct ksock_route, ksnr_list); - - if (route->ksnr_ipaddr == newroute->ksnr_ipaddr) - break; - - route = NULL; - } - if (route) - continue; - - best_iface = NULL; - best_nroutes = 0; - best_netmatch = 0; - - LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); - - /* Select interface to connect from */ - for (j = 0; j < net->ksnn_ninterfaces; j++) { - iface = &net->ksnn_interfaces[j]; - - /* Using this interface already? */ - list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, struct ksock_route, - ksnr_list); - - if (route->ksnr_myipaddr == iface->ksni_ipaddr) - break; - - route = NULL; - } - if (route) - continue; - - this_netmatch = (!((iface->ksni_ipaddr ^ - newroute->ksnr_ipaddr) & - iface->ksni_netmask)) ? 1 : 0; - - if (!(!best_iface || - best_netmatch < this_netmatch || - (best_netmatch == this_netmatch && - best_nroutes > iface->ksni_nroutes))) - continue; - - best_iface = iface; - best_netmatch = this_netmatch; - best_nroutes = iface->ksni_nroutes; - } - - if (!best_iface) - continue; - - newroute->ksnr_myipaddr = best_iface->ksni_ipaddr; - best_iface->ksni_nroutes++; - - ksocknal_add_route_locked(peer, newroute); - newroute = NULL; - } - - write_unlock_bh(global_lock); - if (newroute) - ksocknal_route_decref(newroute); -} - -int -ksocknal_accept(struct lnet_ni *ni, struct socket *sock) -{ - struct ksock_connreq *cr; - int rc; - __u32 peer_ip; - int peer_port; - - rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT(!rc); /* we succeeded before */ - - cr = kzalloc(sizeof(*cr), GFP_NOFS); - if (!cr) { - LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n", - &peer_ip); - return -ENOMEM; - } - - lnet_ni_addref(ni); - cr->ksncr_ni = ni; - cr->ksncr_sock = sock; - - spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - - list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs); - wake_up(&ksocknal_data.ksnd_connd_waitq); - - spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); - return 0; -} - -static int -ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr) -{ - struct ksock_route *route; - - list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { - if (route->ksnr_ipaddr == ipaddr) - return route->ksnr_connecting; - } - return 0; -} - -int -ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, - struct socket *sock, int type) -{ - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - LIST_HEAD(zombies); - struct lnet_process_id peerid; - struct list_head *tmp; - __u64 incarnation; - struct ksock_conn *conn; - struct ksock_conn *conn2; - struct ksock_peer *peer = NULL; - struct ksock_peer *peer2; - struct ksock_sched *sched; - struct ksock_hello_msg *hello; - int cpt; - struct ksock_tx *tx; - struct ksock_tx *txtmp; - int rc; - int active; - char *warn = NULL; - - active = !!route; - - LASSERT(active == (type != SOCKLND_CONN_NONE)); - - conn = kzalloc(sizeof(*conn), GFP_NOFS); - if (!conn) { - rc = -ENOMEM; - goto failed_0; - } - - conn->ksnc_peer = NULL; - conn->ksnc_route = NULL; - conn->ksnc_sock = sock; - /* - * 2 ref, 1 for conn, another extra ref prevents socket - * being closed before establishment of connection - */ - atomic_set(&conn->ksnc_sock_refcount, 2); - conn->ksnc_type = type; - ksocknal_lib_save_callback(sock, conn); - atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */ - - conn->ksnc_rx_ready = 0; - conn->ksnc_rx_scheduled = 0; - - INIT_LIST_HEAD(&conn->ksnc_tx_queue); - conn->ksnc_tx_ready = 0; - conn->ksnc_tx_scheduled = 0; - conn->ksnc_tx_carrier = NULL; - atomic_set(&conn->ksnc_tx_nob, 0); - - hello = kvzalloc(offsetof(struct ksock_hello_msg, - kshm_ips[LNET_MAX_INTERFACES]), - GFP_KERNEL); - if (!hello) { - rc = -ENOMEM; - goto failed_1; - } - - /* stash conn's local and remote addrs */ - rc = ksocknal_lib_get_conn_addrs(conn); - if (rc) - goto failed_1; - - /* - * Find out/confirm peer's NID and connection type and get the - * vector of interfaces she's willing to let me connect to. - * Passive connections use the listener timeout since the peer sends - * eagerly - */ - if (active) { - peer = route->ksnr_peer; - LASSERT(ni == peer->ksnp_ni); - - /* Active connection sends HELLO eagerly */ - hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips); - peerid = peer->ksnp_id; - - write_lock_bh(global_lock); - conn->ksnc_proto = peer->ksnp_proto; - write_unlock_bh(global_lock); - - if (!conn->ksnc_proto) { - conn->ksnc_proto = &ksocknal_protocol_v3x; -#if SOCKNAL_VERSION_DEBUG - if (*ksocknal_tunables.ksnd_protocol == 2) - conn->ksnc_proto = &ksocknal_protocol_v2x; - else if (*ksocknal_tunables.ksnd_protocol == 1) - conn->ksnc_proto = &ksocknal_protocol_v1x; -#endif - } - - rc = ksocknal_send_hello(ni, conn, peerid.nid, hello); - if (rc) - goto failed_1; - } else { - peerid.nid = LNET_NID_ANY; - peerid.pid = LNET_PID_ANY; - - /* Passive, get protocol from peer */ - conn->ksnc_proto = NULL; - } - - rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation); - if (rc < 0) - goto failed_1; - - LASSERT(!rc || active); - LASSERT(conn->ksnc_proto); - LASSERT(peerid.nid != LNET_NID_ANY); - - cpt = lnet_cpt_of_nid(peerid.nid); - - if (active) { - ksocknal_peer_addref(peer); - write_lock_bh(global_lock); - } else { - rc = ksocknal_create_peer(&peer, ni, peerid); - if (rc) - goto failed_1; - - write_lock_bh(global_lock); - - /* called with a ref on ni, so shutdown can't have started */ - LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown); - - peer2 = ksocknal_find_peer_locked(ni, peerid); - if (!peer2) { - /* - * NB this puts an "empty" peer in the peer - * table (which takes my ref) - */ - list_add_tail(&peer->ksnp_list, - ksocknal_nid2peerlist(peerid.nid)); - } else { - ksocknal_peer_decref(peer); - peer = peer2; - } - - /* +1 ref for me */ - ksocknal_peer_addref(peer); - peer->ksnp_accepting++; - - /* - * Am I already connecting to this guy? Resolve in - * favour of higher NID... - */ - if (peerid.nid < ni->ni_nid && - ksocknal_connecting(peer, conn->ksnc_ipaddr)) { - rc = EALREADY; - warn = "connection race resolution"; - goto failed_2; - } - } - - if (peer->ksnp_closing || - (active && route->ksnr_deleted)) { - /* peer/route got closed under me */ - rc = -ESTALE; - warn = "peer/route removed"; - goto failed_2; - } - - if (!peer->ksnp_proto) { - /* - * Never connected before. - * NB recv_hello may have returned EPROTO to signal my peer - * wants a different protocol than the one I asked for. - */ - LASSERT(list_empty(&peer->ksnp_conns)); - - peer->ksnp_proto = conn->ksnc_proto; - peer->ksnp_incarnation = incarnation; - } - - if (peer->ksnp_proto != conn->ksnc_proto || - peer->ksnp_incarnation != incarnation) { - /* Peer rebooted or I've got the wrong protocol version */ - ksocknal_close_peer_conns_locked(peer, 0, 0); - - peer->ksnp_proto = NULL; - rc = ESTALE; - warn = peer->ksnp_incarnation != incarnation ? - "peer rebooted" : - "wrong proto version"; - goto failed_2; - } - - switch (rc) { - default: - LBUG(); - case 0: - break; - case EALREADY: - warn = "lost conn race"; - goto failed_2; - case EPROTO: - warn = "retry with different protocol version"; - goto failed_2; - } - - /* - * Refuse to duplicate an existing connection, unless this is a - * loopback connection - */ - if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) { - list_for_each(tmp, &peer->ksnp_conns) { - conn2 = list_entry(tmp, struct ksock_conn, ksnc_list); - - if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr || - conn2->ksnc_myipaddr != conn->ksnc_myipaddr || - conn2->ksnc_type != conn->ksnc_type) - continue; - - /* - * Reply on a passive connection attempt so the peer - * realises we're connected. - */ - LASSERT(!rc); - if (!active) - rc = EALREADY; - - warn = "duplicate"; - goto failed_2; - } - } - - /* - * If the connection created by this route didn't bind to the IP - * address the route connected to, the connection/route matching - * code below probably isn't going to work. - */ - if (active && - route->ksnr_ipaddr != conn->ksnc_ipaddr) { - CERROR("Route %s %pI4h connected to %pI4h\n", - libcfs_id2str(peer->ksnp_id), - &route->ksnr_ipaddr, - &conn->ksnc_ipaddr); - } - - /* - * Search for a route corresponding to the new connection and - * create an association. This allows incoming connections created - * by routes in my peer to match my own route entries so I don't - * continually create duplicate routes. - */ - list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - if (route->ksnr_ipaddr != conn->ksnc_ipaddr) - continue; - - ksocknal_associate_route_conn_locked(route, conn); - break; - } - - conn->ksnc_peer = peer; /* conn takes my ref on peer */ - peer->ksnp_last_alive = jiffies; - peer->ksnp_send_keepalive = 0; - peer->ksnp_error = 0; - - sched = ksocknal_choose_scheduler_locked(cpt); - sched->kss_nconns++; - conn->ksnc_scheduler = sched; - - conn->ksnc_tx_last_post = jiffies; - /* Set the deadline for the outgoing HELLO to drain */ - conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued; - conn->ksnc_tx_deadline = jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - mb(); /* order with adding to peer's conn list */ - - list_add(&conn->ksnc_list, &peer->ksnp_conns); - ksocknal_conn_addref(conn); - - ksocknal_new_packet(conn, 0); - - conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn); - - /* Take packets blocking for this connection. */ - list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) { - int match = conn->ksnc_proto->pro_match_tx(conn, tx, - tx->tx_nonblk); - - if (match == SOCKNAL_MATCH_NO) - continue; - - list_del(&tx->tx_list); - ksocknal_queue_tx_locked(tx, conn); - } - - write_unlock_bh(global_lock); - - /* - * We've now got a new connection. Any errors from here on are just - * like "normal" comms errors and we close the connection normally. - * NB (a) we still have to send the reply HELLO for passive - * connections, - * (b) normal I/O on the conn is blocked until I setup and call the - * socket callbacks. - */ - CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n", - libcfs_id2str(peerid), conn->ksnc_proto->pro_version, - &conn->ksnc_myipaddr, &conn->ksnc_ipaddr, - conn->ksnc_port, incarnation, cpt, - (int)(sched - &sched->kss_info->ksi_scheds[0])); - - if (active) { - /* additional routes after interface exchange? */ - ksocknal_create_routes(peer, conn->ksnc_port, - hello->kshm_ips, hello->kshm_nips); - } else { - hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips, - hello->kshm_nips); - rc = ksocknal_send_hello(ni, conn, peerid.nid, hello); - } - - kvfree(hello); - - /* - * setup the socket AFTER I've received hello (it disables - * SO_LINGER). I might call back to the acceptor who may want - * to send a protocol version response and then close the - * socket; this ensures the socket only tears down after the - * response has been sent. - */ - if (!rc) - rc = ksocknal_lib_setup_sock(sock); - - write_lock_bh(global_lock); - - /* NB my callbacks block while I hold ksnd_global_lock */ - ksocknal_lib_set_callback(sock, conn); - - if (!active) - peer->ksnp_accepting--; - - write_unlock_bh(global_lock); - - if (rc) { - write_lock_bh(global_lock); - if (!conn->ksnc_closing) { - /* could be closed by another thread */ - ksocknal_close_conn_locked(conn, rc); - } - write_unlock_bh(global_lock); - } else if (!ksocknal_connsock_addref(conn)) { - /* Allow I/O to proceed. */ - ksocknal_read_callback(conn); - ksocknal_write_callback(conn); - ksocknal_connsock_decref(conn); - } - - ksocknal_connsock_decref(conn); - ksocknal_conn_decref(conn); - return rc; - - failed_2: - if (!peer->ksnp_closing && - list_empty(&peer->ksnp_conns) && - list_empty(&peer->ksnp_routes)) { - list_add(&zombies, &peer->ksnp_tx_queue); - list_del_init(&peer->ksnp_tx_queue); - ksocknal_unlink_peer_locked(peer); - } - - write_unlock_bh(global_lock); - - if (warn) { - if (rc < 0) - CERROR("Not creating conn %s type %d: %s\n", - libcfs_id2str(peerid), conn->ksnc_type, warn); - else - CDEBUG(D_NET, "Not creating conn %s type %d: %s\n", - libcfs_id2str(peerid), conn->ksnc_type, warn); - } - - if (!active) { - if (rc > 0) { - /* - * Request retry by replying with CONN_NONE - * ksnc_proto has been set already - */ - conn->ksnc_type = SOCKLND_CONN_NONE; - hello->kshm_nips = 0; - ksocknal_send_hello(ni, conn, peerid.nid, hello); - } - - write_lock_bh(global_lock); - peer->ksnp_accepting--; - write_unlock_bh(global_lock); - } - - ksocknal_txlist_done(ni, &zombies, 1); - ksocknal_peer_decref(peer); - -failed_1: - kvfree(hello); - - kfree(conn); - -failed_0: - sock_release(sock); - return rc; -} - -void -ksocknal_close_conn_locked(struct ksock_conn *conn, int error) -{ - /* - * This just does the immmediate housekeeping, and queues the - * connection for the reaper to terminate. - * Caller holds ksnd_global_lock exclusively in irq context - */ - struct ksock_peer *peer = conn->ksnc_peer; - struct ksock_route *route; - struct ksock_conn *conn2; - struct list_head *tmp; - - LASSERT(!peer->ksnp_error); - LASSERT(!conn->ksnc_closing); - conn->ksnc_closing = 1; - - /* ksnd_deathrow_conns takes over peer's ref */ - list_del(&conn->ksnc_list); - - route = conn->ksnc_route; - if (route) { - /* dissociate conn from route... */ - LASSERT(!route->ksnr_deleted); - LASSERT(route->ksnr_connected & (1 << conn->ksnc_type)); - - conn2 = NULL; - list_for_each(tmp, &peer->ksnp_conns) { - conn2 = list_entry(tmp, struct ksock_conn, ksnc_list); - - if (conn2->ksnc_route == route && - conn2->ksnc_type == conn->ksnc_type) - break; - - conn2 = NULL; - } - if (!conn2) - route->ksnr_connected &= ~(1 << conn->ksnc_type); - - conn->ksnc_route = NULL; - - ksocknal_route_decref(route); /* drop conn's ref on route */ - } - - if (list_empty(&peer->ksnp_conns)) { - /* No more connections to this peer */ - - if (!list_empty(&peer->ksnp_tx_queue)) { - struct ksock_tx *tx; - - LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); - - /* - * throw them to the last connection..., - * these TXs will be send to /dev/null by scheduler - */ - list_for_each_entry(tx, &peer->ksnp_tx_queue, - tx_list) - ksocknal_tx_prep(conn, tx); - - spin_lock_bh(&conn->ksnc_scheduler->kss_lock); - list_splice_init(&peer->ksnp_tx_queue, - &conn->ksnc_tx_queue); - spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); - } - - peer->ksnp_proto = NULL; /* renegotiate protocol version */ - peer->ksnp_error = error; /* stash last conn close reason */ - - if (list_empty(&peer->ksnp_routes)) { - /* - * I've just closed last conn belonging to a - * peer with no routes to it - */ - ksocknal_unlink_peer_locked(peer); - } - } - - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - - list_add_tail(&conn->ksnc_list, - &ksocknal_data.ksnd_deathrow_conns); - wake_up(&ksocknal_data.ksnd_reaper_waitq); - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); -} - -void -ksocknal_peer_failed(struct ksock_peer *peer) -{ - int notify = 0; - unsigned long last_alive = 0; - - /* - * There has been a connection failure or comms error; but I'll only - * tell LNET I think the peer is dead if it's to another kernel and - * there are no connections or connection attempts in existence. - */ - read_lock(&ksocknal_data.ksnd_global_lock); - - if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) && - list_empty(&peer->ksnp_conns) && - !peer->ksnp_accepting && - !ksocknal_find_connecting_route_locked(peer)) { - notify = 1; - last_alive = peer->ksnp_last_alive; - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - - if (notify) - lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0, - last_alive); -} - -void -ksocknal_finalize_zcreq(struct ksock_conn *conn) -{ - struct ksock_peer *peer = conn->ksnc_peer; - struct ksock_tx *tx; - struct ksock_tx *temp; - struct ksock_tx *tmp; - LIST_HEAD(zlist); - - /* - * NB safe to finalize TXs because closing of socket will - * abort all buffered data - */ - LASSERT(!conn->ksnc_sock); - - spin_lock(&peer->ksnp_lock); - - list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) { - if (tx->tx_conn != conn) - continue; - - LASSERT(tx->tx_msg.ksm_zc_cookies[0]); - - tx->tx_msg.ksm_zc_cookies[0] = 0; - tx->tx_zc_aborted = 1; /* mark it as not-acked */ - list_del(&tx->tx_zc_list); - list_add(&tx->tx_zc_list, &zlist); - } - - spin_unlock(&peer->ksnp_lock); - - list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) { - list_del(&tx->tx_zc_list); - ksocknal_tx_decref(tx); - } -} - -void -ksocknal_terminate_conn(struct ksock_conn *conn) -{ - /* - * This gets called by the reaper (guaranteed thread context) to - * disengage the socket from its callbacks and close it. - * ksnc_refcount will eventually hit zero, and then the reaper will - * destroy it. - */ - struct ksock_peer *peer = conn->ksnc_peer; - struct ksock_sched *sched = conn->ksnc_scheduler; - int failed = 0; - - LASSERT(conn->ksnc_closing); - - /* wake up the scheduler to "send" all remaining packets to /dev/null */ - spin_lock_bh(&sched->kss_lock); - - /* a closing conn is always ready to tx */ - conn->ksnc_tx_ready = 1; - - if (!conn->ksnc_tx_scheduled && - !list_empty(&conn->ksnc_tx_queue)) { - list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - /* extra ref for scheduler */ - ksocknal_conn_addref(conn); - - wake_up(&sched->kss_waitq); - } - - spin_unlock_bh(&sched->kss_lock); - - /* serialise with callbacks */ - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - ksocknal_lib_reset_callback(conn->ksnc_sock, conn); - - /* - * OK, so this conn may not be completely disengaged from its - * scheduler yet, but it _has_ committed to terminate... - */ - conn->ksnc_scheduler->kss_nconns--; - - if (peer->ksnp_error) { - /* peer's last conn closed in error */ - LASSERT(list_empty(&peer->ksnp_conns)); - failed = 1; - peer->ksnp_error = 0; /* avoid multiple notifications */ - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - if (failed) - ksocknal_peer_failed(peer); - - /* - * The socket is closed on the final put; either here, or in - * ksocknal_{send,recv}msg(). Since we set up the linger2 option - * when the connection was established, this will close the socket - * immediately, aborting anything buffered in it. Any hung - * zero-copy transmits will therefore complete in finite time. - */ - ksocknal_connsock_decref(conn); -} - -void -ksocknal_queue_zombie_conn(struct ksock_conn *conn) -{ - /* Queue the conn for the reaper to destroy */ - - LASSERT(!atomic_read(&conn->ksnc_conn_refcount)); - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - - list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns); - wake_up(&ksocknal_data.ksnd_reaper_waitq); - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); -} - -void -ksocknal_destroy_conn(struct ksock_conn *conn) -{ - unsigned long last_rcv; - - /* Final coup-de-grace of the reaper */ - CDEBUG(D_NET, "connection %p\n", conn); - - LASSERT(!atomic_read(&conn->ksnc_conn_refcount)); - LASSERT(!atomic_read(&conn->ksnc_sock_refcount)); - LASSERT(!conn->ksnc_sock); - LASSERT(!conn->ksnc_route); - LASSERT(!conn->ksnc_tx_scheduled); - LASSERT(!conn->ksnc_rx_scheduled); - LASSERT(list_empty(&conn->ksnc_tx_queue)); - - /* complete current receive if any */ - switch (conn->ksnc_rx_state) { - case SOCKNAL_RX_LNET_PAYLOAD: - last_rcv = conn->ksnc_rx_deadline - - *ksocknal_tunables.ksnd_timeout * HZ; - CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type, - &conn->ksnc_ipaddr, conn->ksnc_port, - iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left, - (jiffies - last_rcv) / HZ); - lnet_finalize(conn->ksnc_peer->ksnp_ni, - conn->ksnc_cookie, -EIO); - break; - case SOCKNAL_RX_LNET_HEADER: - if (conn->ksnc_rx_started) - CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port, - conn->ksnc_proto->pro_version); - break; - case SOCKNAL_RX_KSM_HEADER: - if (conn->ksnc_rx_started) - CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port, - conn->ksnc_proto->pro_version); - break; - case SOCKNAL_RX_SLOP: - if (conn->ksnc_rx_started) - CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port); - break; - default: - LBUG(); - break; - } - - ksocknal_peer_decref(conn->ksnc_peer); - - kfree(conn); -} - -int -ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why) -{ - struct ksock_conn *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { - conn = list_entry(ctmp, struct ksock_conn, ksnc_list); - - if (!ipaddr || conn->ksnc_ipaddr == ipaddr) { - count++; - ksocknal_close_conn_locked(conn, why); - } - } - - return count; -} - -int -ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why) -{ - struct ksock_peer *peer = conn->ksnc_peer; - __u32 ipaddr = conn->ksnc_ipaddr; - int count; - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - count = ksocknal_close_peer_conns_locked(peer, ipaddr, why); - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - return count; -} - -int -ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr) -{ - struct ksock_peer *peer; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - int count = 0; - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - if (id.nid != LNET_NID_ANY) { - lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - } else { - lo = 0; - hi = ksocknal_data.ksnd_peer_hash_size - 1; - } - - for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, - &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, struct ksock_peer, ksnp_list); - - if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) && - (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid))) - continue; - - count += ksocknal_close_peer_conns_locked(peer, ipaddr, - 0); - } - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - /* wildcards always succeed */ - if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr) - return 0; - - if (!count) - return -ENOENT; - else - return 0; -} - -void -ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive) -{ - /* - * The router is telling me she's been notified of a change in - * gateway state.... - */ - struct lnet_process_id id = {0}; - - id.nid = gw_nid; - id.pid = LNET_PID_ANY; - - CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid), - alive ? "up" : "down"); - - if (!alive) { - /* If the gateway crashed, close all open connections... */ - ksocknal_close_matching_conns(id, 0); - return; - } - - /* - * ...otherwise do nothing. We can only establish new connections - * if we have autroutes, and these connect on demand. - */ -} - -void -ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when) -{ - int connect = 1; - unsigned long last_alive = 0; - unsigned long now = jiffies; - struct ksock_peer *peer = NULL; - rwlock_t *glock = &ksocknal_data.ksnd_global_lock; - struct lnet_process_id id = { - .nid = nid, - .pid = LNET_PID_LUSTRE, - }; - - read_lock(glock); - - peer = ksocknal_find_peer_locked(ni, id); - if (peer) { - struct ksock_conn *conn; - int bufnob; - - list_for_each_entry(conn, &peer->ksnp_conns, ksnc_list) { - bufnob = conn->ksnc_sock->sk->sk_wmem_queued; - - if (bufnob < conn->ksnc_tx_bufnob) { - /* something got ACKed */ - conn->ksnc_tx_deadline = - jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - peer->ksnp_last_alive = now; - conn->ksnc_tx_bufnob = bufnob; - } - } - - last_alive = peer->ksnp_last_alive; - if (!ksocknal_find_connectable_route_locked(peer)) - connect = 0; - } - - read_unlock(glock); - - if (last_alive) - *when = last_alive; - - CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n", - libcfs_nid2str(nid), peer, - last_alive ? (now - last_alive) / HZ : -1, - connect); - - if (!connect) - return; - - ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port()); - - write_lock_bh(glock); - - peer = ksocknal_find_peer_locked(ni, id); - if (peer) - ksocknal_launch_all_connections_locked(peer); - - write_unlock_bh(glock); -} - -static void -ksocknal_push_peer(struct ksock_peer *peer) -{ - int index; - int i; - struct list_head *tmp; - struct ksock_conn *conn; - - for (index = 0; ; index++) { - read_lock(&ksocknal_data.ksnd_global_lock); - - i = 0; - conn = NULL; - - list_for_each(tmp, &peer->ksnp_conns) { - if (i++ == index) { - conn = list_entry(tmp, struct ksock_conn, - ksnc_list); - ksocknal_conn_addref(conn); - break; - } - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - - if (!conn) - break; - - ksocknal_lib_push_conn(conn); - ksocknal_conn_decref(conn); - } -} - -static int ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id) -{ - struct list_head *start; - struct list_head *end; - struct list_head *tmp; - int rc = -ENOENT; - unsigned int hsize = ksocknal_data.ksnd_peer_hash_size; - - if (id.nid == LNET_NID_ANY) { - start = &ksocknal_data.ksnd_peers[0]; - end = &ksocknal_data.ksnd_peers[hsize - 1]; - } else { - start = ksocknal_nid2peerlist(id.nid); - end = ksocknal_nid2peerlist(id.nid); - } - - for (tmp = start; tmp <= end; tmp++) { - int peer_off; /* searching offset in peer hash table */ - - for (peer_off = 0; ; peer_off++) { - struct ksock_peer *peer; - int i = 0; - - read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer, tmp, ksnp_list) { - if (!((id.nid == LNET_NID_ANY || - id.nid == peer->ksnp_id.nid) && - (id.pid == LNET_PID_ANY || - id.pid == peer->ksnp_id.pid))) - continue; - - if (i++ == peer_off) { - ksocknal_peer_addref(peer); - break; - } - } - read_unlock(&ksocknal_data.ksnd_global_lock); - - if (!i) /* no match */ - break; - - rc = 0; - ksocknal_push_peer(peer); - ksocknal_peer_decref(peer); - } - } - return rc; -} - -static int -ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask) -{ - struct ksock_net *net = ni->ni_data; - struct ksock_interface *iface; - int rc; - int i; - int j; - struct list_head *ptmp; - struct ksock_peer *peer; - struct list_head *rtmp; - struct ksock_route *route; - - if (!ipaddress || !netmask) - return -EINVAL; - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - iface = ksocknal_ip2iface(ni, ipaddress); - if (iface) { - /* silently ignore dups */ - rc = 0; - } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) { - rc = -ENOSPC; - } else { - iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++]; - - iface->ksni_ipaddr = ipaddress; - iface->ksni_netmask = netmask; - iface->ksni_nroutes = 0; - iface->ksni_npeers = 0; - - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, struct ksock_peer, - ksnp_list); - - for (j = 0; j < peer->ksnp_n_passive_ips; j++) - if (peer->ksnp_passive_ips[j] == ipaddress) - iface->ksni_npeers++; - - list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, struct ksock_route, - ksnr_list); - - if (route->ksnr_myipaddr == ipaddress) - iface->ksni_nroutes++; - } - } - } - - rc = 0; - /* - * NB only new connections will pay attention to the - * new interface! - */ - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - return rc; -} - -static void -ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr) -{ - struct list_head *tmp; - struct list_head *nxt; - struct ksock_route *route; - struct ksock_conn *conn; - int i; - int j; - - for (i = 0; i < peer->ksnp_n_passive_ips; i++) - if (peer->ksnp_passive_ips[i] == ipaddr) { - for (j = i + 1; j < peer->ksnp_n_passive_ips; j++) - peer->ksnp_passive_ips[j - 1] = - peer->ksnp_passive_ips[j]; - peer->ksnp_n_passive_ips--; - break; - } - - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - if (route->ksnr_myipaddr != ipaddr) - continue; - - if (route->ksnr_share_count) { - /* Manually created; keep, but unbind */ - route->ksnr_myipaddr = 0; - } else { - ksocknal_del_route_locked(route); - } - } - - list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { - conn = list_entry(tmp, struct ksock_conn, ksnc_list); - - if (conn->ksnc_myipaddr == ipaddr) - ksocknal_close_conn_locked(conn, 0); - } -} - -static int -ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress) -{ - struct ksock_net *net = ni->ni_data; - int rc = -ENOENT; - struct list_head *tmp; - struct list_head *nxt; - struct ksock_peer *peer; - __u32 this_ip; - int i; - int j; - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - for (i = 0; i < net->ksnn_ninterfaces; i++) { - this_ip = net->ksnn_interfaces[i].ksni_ipaddr; - - if (!(!ipaddress || ipaddress == this_ip)) - continue; - - rc = 0; - - for (j = i + 1; j < net->ksnn_ninterfaces; j++) - net->ksnn_interfaces[j - 1] = - net->ksnn_interfaces[j]; - - net->ksnn_ninterfaces--; - - for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) { - list_for_each_safe(tmp, nxt, - &ksocknal_data.ksnd_peers[j]) { - peer = list_entry(tmp, struct ksock_peer, ksnp_list); - - if (peer->ksnp_ni != ni) - continue; - - ksocknal_peer_del_interface_locked(peer, this_ip); - } - } - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - return rc; -} - -int -ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg) -{ - struct lnet_process_id id = {0}; - struct libcfs_ioctl_data *data = arg; - int rc; - - switch (cmd) { - case IOC_LIBCFS_GET_INTERFACE: { - struct ksock_net *net = ni->ni_data; - struct ksock_interface *iface; - - read_lock(&ksocknal_data.ksnd_global_lock); - - if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) { - rc = -ENOENT; - } else { - rc = 0; - iface = &net->ksnn_interfaces[data->ioc_count]; - - data->ioc_u32[0] = iface->ksni_ipaddr; - data->ioc_u32[1] = iface->ksni_netmask; - data->ioc_u32[2] = iface->ksni_npeers; - data->ioc_u32[3] = iface->ksni_nroutes; - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - return rc; - } - - case IOC_LIBCFS_ADD_INTERFACE: - return ksocknal_add_interface(ni, - data->ioc_u32[0], /* IP address */ - data->ioc_u32[1]); /* net mask */ - - case IOC_LIBCFS_DEL_INTERFACE: - return ksocknal_del_interface(ni, - data->ioc_u32[0]); /* IP address */ - - case IOC_LIBCFS_GET_PEER: { - __u32 myip = 0; - __u32 ip = 0; - int port = 0; - int conn_count = 0; - int share_count = 0; - - rc = ksocknal_get_peer_info(ni, data->ioc_count, - &id, &myip, &ip, &port, - &conn_count, &share_count); - if (rc) - return rc; - - data->ioc_nid = id.nid; - data->ioc_count = share_count; - data->ioc_u32[0] = ip; - data->ioc_u32[1] = port; - data->ioc_u32[2] = myip; - data->ioc_u32[3] = conn_count; - data->ioc_u32[4] = id.pid; - return 0; - } - - case IOC_LIBCFS_ADD_PEER: - id.nid = data->ioc_nid; - id.pid = LNET_PID_LUSTRE; - return ksocknal_add_peer(ni, id, - data->ioc_u32[0], /* IP */ - data->ioc_u32[1]); /* port */ - - case IOC_LIBCFS_DEL_PEER: - id.nid = data->ioc_nid; - id.pid = LNET_PID_ANY; - return ksocknal_del_peer(ni, id, - data->ioc_u32[0]); /* IP */ - - case IOC_LIBCFS_GET_CONN: { - int txmem; - int rxmem; - int nagle; - struct ksock_conn *conn; - - conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); - if (!conn) - return -ENOENT; - - ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle); - - data->ioc_count = txmem; - data->ioc_nid = conn->ksnc_peer->ksnp_id.nid; - data->ioc_flags = nagle; - data->ioc_u32[0] = conn->ksnc_ipaddr; - data->ioc_u32[1] = conn->ksnc_port; - data->ioc_u32[2] = conn->ksnc_myipaddr; - data->ioc_u32[3] = conn->ksnc_type; - data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt; - data->ioc_u32[5] = rxmem; - data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid; - ksocknal_conn_decref(conn); - return 0; - } - - case IOC_LIBCFS_CLOSE_CONNECTION: - id.nid = data->ioc_nid; - id.pid = LNET_PID_ANY; - return ksocknal_close_matching_conns(id, - data->ioc_u32[0]); - - case IOC_LIBCFS_REGISTER_MYNID: - /* Ignore if this is a noop */ - if (data->ioc_nid == ni->ni_nid) - return 0; - - CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n", - libcfs_nid2str(data->ioc_nid), - libcfs_nid2str(ni->ni_nid)); - return -EINVAL; - - case IOC_LIBCFS_PUSH_CONNECTION: - id.nid = data->ioc_nid; - id.pid = LNET_PID_ANY; - return ksocknal_push(ni, id); - - default: - return -EINVAL; - } - /* not reached */ -} - -static void -ksocknal_free_buffers(void) -{ - LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs)); - - if (ksocknal_data.ksnd_sched_info) { - struct ksock_sched_info *info; - int i; - - cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) - kfree(info->ksi_scheds); - cfs_percpt_free(ksocknal_data.ksnd_sched_info); - } - - kvfree(ksocknal_data.ksnd_peers); - - spin_lock(&ksocknal_data.ksnd_tx_lock); - - if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - struct list_head zlist; - struct ksock_tx *tx; - struct ksock_tx *temp; - - list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); - list_del_init(&ksocknal_data.ksnd_idle_noop_txs); - spin_unlock(&ksocknal_data.ksnd_tx_lock); - - list_for_each_entry_safe(tx, temp, &zlist, tx_list) { - list_del(&tx->tx_list); - kfree(tx); - } - } else { - spin_unlock(&ksocknal_data.ksnd_tx_lock); - } -} - -static void -ksocknal_base_shutdown(void) -{ - struct ksock_sched_info *info; - struct ksock_sched *sched; - int i; - int j; - - LASSERT(!ksocknal_data.ksnd_nnets); - - switch (ksocknal_data.ksnd_init) { - default: - LASSERT(0); - /* fall through */ - case SOCKNAL_INIT_ALL: - case SOCKNAL_INIT_DATA: - LASSERT(ksocknal_data.ksnd_peers); - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) - LASSERT(list_empty(&ksocknal_data.ksnd_peers[i])); - - LASSERT(list_empty(&ksocknal_data.ksnd_nets)); - LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns)); - LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns)); - LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs)); - LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes)); - - if (ksocknal_data.ksnd_sched_info) { - cfs_percpt_for_each(info, i, - ksocknal_data.ksnd_sched_info) { - if (!info->ksi_scheds) - continue; - - for (j = 0; j < info->ksi_nthreads_max; j++) { - sched = &info->ksi_scheds[j]; - LASSERT(list_empty( - &sched->kss_tx_conns)); - LASSERT(list_empty( - &sched->kss_rx_conns)); - LASSERT(list_empty( - &sched->kss_zombie_noop_txs)); - LASSERT(!sched->kss_nconns); - } - } - } - - /* flag threads to terminate; wake and wait for them to die */ - ksocknal_data.ksnd_shuttingdown = 1; - wake_up_all(&ksocknal_data.ksnd_connd_waitq); - wake_up_all(&ksocknal_data.ksnd_reaper_waitq); - - if (ksocknal_data.ksnd_sched_info) { - cfs_percpt_for_each(info, i, - ksocknal_data.ksnd_sched_info) { - if (!info->ksi_scheds) - continue; - - for (j = 0; j < info->ksi_nthreads_max; j++) { - sched = &info->ksi_scheds[j]; - wake_up_all(&sched->kss_waitq); - } - } - } - - i = 4; - read_lock(&ksocknal_data.ksnd_global_lock); - while (ksocknal_data.ksnd_nthreads) { - i++; - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ - "waiting for %d threads to terminate\n", - ksocknal_data.ksnd_nthreads); - read_unlock(&ksocknal_data.ksnd_global_lock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - read_lock(&ksocknal_data.ksnd_global_lock); - } - read_unlock(&ksocknal_data.ksnd_global_lock); - - ksocknal_free_buffers(); - - ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING; - break; - } - - module_put(THIS_MODULE); -} - -static __u64 -ksocknal_new_incarnation(void) -{ - /* The incarnation number is the time this module loaded and it - * identifies this particular instance of the socknal. - */ - return ktime_get_ns(); -} - -static int -ksocknal_base_startup(void) -{ - struct ksock_sched_info *info; - int rc; - int i; - - LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); - LASSERT(!ksocknal_data.ksnd_nnets); - - memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */ - - ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; - ksocknal_data.ksnd_peers = kvmalloc_array(ksocknal_data.ksnd_peer_hash_size, - sizeof(struct list_head), - GFP_KERNEL); - if (!ksocknal_data.ksnd_peers) - return -ENOMEM; - - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) - INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]); - - rwlock_init(&ksocknal_data.ksnd_global_lock); - INIT_LIST_HEAD(&ksocknal_data.ksnd_nets); - - spin_lock_init(&ksocknal_data.ksnd_reaper_lock); - INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns); - INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns); - INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns); - init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq); - - spin_lock_init(&ksocknal_data.ksnd_connd_lock); - INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs); - INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes); - init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq); - - spin_lock_init(&ksocknal_data.ksnd_tx_lock); - INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs); - - /* NB memset above zeros whole of ksocknal_data */ - - /* flag lists/ptrs/locks initialised */ - ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA; - try_module_get(THIS_MODULE); - - ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*info)); - if (!ksocknal_data.ksnd_sched_info) - goto failed; - - cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - struct ksock_sched *sched; - int nthrs; - - nthrs = cfs_cpt_weight(lnet_cpt_table(), i); - if (*ksocknal_tunables.ksnd_nscheds > 0) { - nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds); - } else { - /* - * max to half of CPUs, assume another half should be - * reserved for upper layer modules - */ - nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs); - } - - info->ksi_nthreads_max = nthrs; - info->ksi_cpt = i; - - info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched), - GFP_NOFS, i); - if (!info->ksi_scheds) - goto failed; - - for (; nthrs > 0; nthrs--) { - sched = &info->ksi_scheds[nthrs - 1]; - - sched->kss_info = info; - spin_lock_init(&sched->kss_lock); - INIT_LIST_HEAD(&sched->kss_rx_conns); - INIT_LIST_HEAD(&sched->kss_tx_conns); - INIT_LIST_HEAD(&sched->kss_zombie_noop_txs); - init_waitqueue_head(&sched->kss_waitq); - } - } - - ksocknal_data.ksnd_connd_starting = 0; - ksocknal_data.ksnd_connd_failed_stamp = 0; - ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds(); - /* - * must have at least 2 connds to remain responsive to accepts while - * connecting - */ - if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1) - *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1; - - if (*ksocknal_tunables.ksnd_nconnds_max < - *ksocknal_tunables.ksnd_nconnds) { - ksocknal_tunables.ksnd_nconnds_max = - ksocknal_tunables.ksnd_nconnds; - } - - for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) { - char name[16]; - - spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - ksocknal_data.ksnd_connd_starting++; - spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); - - snprintf(name, sizeof(name), "socknal_cd%02d", i); - rc = ksocknal_thread_start(ksocknal_connd, - (void *)((uintptr_t)i), name); - if (rc) { - spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - ksocknal_data.ksnd_connd_starting--; - spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); - CERROR("Can't spawn socknal connd: %d\n", rc); - goto failed; - } - } - - rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper"); - if (rc) { - CERROR("Can't spawn socknal reaper: %d\n", rc); - goto failed; - } - - /* flag everything initialised */ - ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL; - - return 0; - - failed: - ksocknal_base_shutdown(); - return -ENETDOWN; -} - -static void -ksocknal_debug_peerhash(struct lnet_ni *ni) -{ - struct ksock_peer *peer = NULL; - struct list_head *tmp; - int i; - - read_lock(&ksocknal_data.ksnd_global_lock); - - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(tmp, struct ksock_peer, ksnp_list); - - if (peer->ksnp_ni == ni) - break; - - peer = NULL; - } - } - - if (peer) { - struct ksock_route *route; - struct ksock_conn *conn; - - CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n", - libcfs_id2str(peer->ksnp_id), - atomic_read(&peer->ksnp_refcount), - peer->ksnp_sharecount, peer->ksnp_closing, - peer->ksnp_accepting, peer->ksnp_error, - peer->ksnp_zc_next_cookie, - !list_empty(&peer->ksnp_tx_queue), - !list_empty(&peer->ksnp_zc_req_list)); - - list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n", - atomic_read(&route->ksnr_refcount), - route->ksnr_scheduled, route->ksnr_connecting, - route->ksnr_connected, route->ksnr_deleted); - } - - list_for_each(tmp, &peer->ksnp_conns) { - conn = list_entry(tmp, struct ksock_conn, ksnc_list); - CWARN("Conn: ref %d, sref %d, t %d, c %d\n", - atomic_read(&conn->ksnc_conn_refcount), - atomic_read(&conn->ksnc_sock_refcount), - conn->ksnc_type, conn->ksnc_closing); - } - } - - read_unlock(&ksocknal_data.ksnd_global_lock); -} - -void -ksocknal_shutdown(struct lnet_ni *ni) -{ - struct ksock_net *net = ni->ni_data; - int i; - struct lnet_process_id anyid = {0}; - - anyid.nid = LNET_NID_ANY; - anyid.pid = LNET_PID_ANY; - - LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL); - LASSERT(ksocknal_data.ksnd_nnets > 0); - - spin_lock_bh(&net->ksnn_lock); - net->ksnn_shutdown = 1; /* prevent new peers */ - spin_unlock_bh(&net->ksnn_lock); - - /* Delete all peers */ - ksocknal_del_peer(ni, anyid, 0); - - /* Wait for all peer state to clean up */ - i = 2; - spin_lock_bh(&net->ksnn_lock); - while (net->ksnn_npeers) { - spin_unlock_bh(&net->ksnn_lock); - - i++; - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ - "waiting for %d peers to disconnect\n", - net->ksnn_npeers); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - - ksocknal_debug_peerhash(ni); - - spin_lock_bh(&net->ksnn_lock); - } - spin_unlock_bh(&net->ksnn_lock); - - for (i = 0; i < net->ksnn_ninterfaces; i++) { - LASSERT(!net->ksnn_interfaces[i].ksni_npeers); - LASSERT(!net->ksnn_interfaces[i].ksni_nroutes); - } - - list_del(&net->ksnn_list); - kfree(net); - - ksocknal_data.ksnd_nnets--; - if (!ksocknal_data.ksnd_nnets) - ksocknal_base_shutdown(); -} - -static int -ksocknal_enumerate_interfaces(struct ksock_net *net) -{ - char **names; - int i; - int j; - int rc; - int n; - - n = lnet_ipif_enumerate(&names); - if (n <= 0) { - CERROR("Can't enumerate interfaces: %d\n", n); - return n; - } - - for (i = j = 0; i < n; i++) { - int up; - __u32 ip; - __u32 mask; - - if (!strcmp(names[i], "lo")) /* skip the loopback IF */ - continue; - - rc = lnet_ipif_query(names[i], &up, &ip, &mask); - if (rc) { - CWARN("Can't get interface %s info: %d\n", - names[i], rc); - continue; - } - - if (!up) { - CWARN("Ignoring interface %s (down)\n", - names[i]); - continue; - } - - if (j == LNET_MAX_INTERFACES) { - CWARN("Ignoring interface %s (too many interfaces)\n", - names[i]); - continue; - } - - net->ksnn_interfaces[j].ksni_ipaddr = ip; - net->ksnn_interfaces[j].ksni_netmask = mask; - strlcpy(net->ksnn_interfaces[j].ksni_name, - names[i], sizeof(net->ksnn_interfaces[j].ksni_name)); - j++; - } - - lnet_ipif_free_enumeration(names, n); - - if (!j) - CERROR("Can't find any usable interfaces\n"); - - return j; -} - -static int -ksocknal_search_new_ipif(struct ksock_net *net) -{ - int new_ipif = 0; - int i; - - for (i = 0; i < net->ksnn_ninterfaces; i++) { - char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; - char *colon = strchr(ifnam, ':'); - int found = 0; - struct ksock_net *tmp; - int j; - - if (colon) /* ignore alias device */ - *colon = 0; - - list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) { - for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) { - char *ifnam2 = - &tmp->ksnn_interfaces[j].ksni_name[0]; - char *colon2 = strchr(ifnam2, ':'); - - if (colon2) - *colon2 = 0; - - found = !strcmp(ifnam, ifnam2); - if (colon2) - *colon2 = ':'; - } - if (found) - break; - } - - new_ipif += !found; - if (colon) - *colon = ':'; - } - - return new_ipif; -} - -static int -ksocknal_start_schedulers(struct ksock_sched_info *info) -{ - int nthrs; - int rc = 0; - int i; - - if (!info->ksi_nthreads) { - if (*ksocknal_tunables.ksnd_nscheds > 0) { - nthrs = info->ksi_nthreads_max; - } else { - nthrs = cfs_cpt_weight(lnet_cpt_table(), - info->ksi_cpt); - nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs); - nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs); - } - nthrs = min(nthrs, info->ksi_nthreads_max); - } else { - LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max); - /* increase two threads if there is new interface */ - nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads); - } - - for (i = 0; i < nthrs; i++) { - long id; - char name[20]; - struct ksock_sched *sched; - - id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i); - sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; - snprintf(name, sizeof(name), "socknal_sd%02d_%02d", - info->ksi_cpt, (int)(sched - &info->ksi_scheds[0])); - - rc = ksocknal_thread_start(ksocknal_scheduler, - (void *)id, name); - if (!rc) - continue; - - CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", - info->ksi_cpt, info->ksi_nthreads + i, rc); - break; - } - - info->ksi_nthreads += i; - return rc; -} - -static int -ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts) -{ - int newif = ksocknal_search_new_ipif(net); - int rc; - int i; - - LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table())); - - for (i = 0; i < ncpts; i++) { - struct ksock_sched_info *info; - int cpt = !cpts ? i : cpts[i]; - - LASSERT(cpt < cfs_cpt_number(lnet_cpt_table())); - info = ksocknal_data.ksnd_sched_info[cpt]; - - if (!newif && info->ksi_nthreads > 0) - continue; - - rc = ksocknal_start_schedulers(info); - if (rc) - return rc; - } - return 0; -} - -int -ksocknal_startup(struct lnet_ni *ni) -{ - struct ksock_net *net; - int rc; - int i; - - LASSERT(ni->ni_lnd == &the_ksocklnd); - - if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) { - rc = ksocknal_base_startup(); - if (rc) - return rc; - } - - net = kzalloc(sizeof(*net), GFP_NOFS); - if (!net) - goto fail_0; - - spin_lock_init(&net->ksnn_lock); - net->ksnn_incarnation = ksocknal_new_incarnation(); - ni->ni_data = net; - ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout; - ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits; - ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits; - ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits; - - if (!ni->ni_interfaces[0]) { - rc = ksocknal_enumerate_interfaces(net); - if (rc <= 0) - goto fail_1; - - net->ksnn_ninterfaces = 1; - } else { - for (i = 0; i < LNET_MAX_INTERFACES; i++) { - int up; - - if (!ni->ni_interfaces[i]) - break; - - rc = lnet_ipif_query(ni->ni_interfaces[i], &up, - &net->ksnn_interfaces[i].ksni_ipaddr, - &net->ksnn_interfaces[i].ksni_netmask); - - if (rc) { - CERROR("Can't get interface %s info: %d\n", - ni->ni_interfaces[i], rc); - goto fail_1; - } - - if (!up) { - CERROR("Interface %s is down\n", - ni->ni_interfaces[i]); - goto fail_1; - } - - strlcpy(net->ksnn_interfaces[i].ksni_name, - ni->ni_interfaces[i], - sizeof(net->ksnn_interfaces[i].ksni_name)); - } - net->ksnn_ninterfaces = i; - } - - /* call it before add it to ksocknal_data.ksnd_nets */ - rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts); - if (rc) - goto fail_1; - - ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), - net->ksnn_interfaces[0].ksni_ipaddr); - list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets); - - ksocknal_data.ksnd_nnets++; - - return 0; - - fail_1: - kfree(net); - fail_0: - if (!ksocknal_data.ksnd_nnets) - ksocknal_base_shutdown(); - - return -ENETDOWN; -} - -static void __exit ksocklnd_exit(void) -{ - lnet_unregister_lnd(&the_ksocklnd); -} - -static int __init ksocklnd_init(void) -{ - int rc; - - /* check ksnr_connected/connecting field large enough */ - BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4); - BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN); - - /* initialize the_ksocklnd */ - the_ksocklnd.lnd_type = SOCKLND; - the_ksocklnd.lnd_startup = ksocknal_startup; - the_ksocklnd.lnd_shutdown = ksocknal_shutdown; - the_ksocklnd.lnd_ctl = ksocknal_ctl; - the_ksocklnd.lnd_send = ksocknal_send; - the_ksocklnd.lnd_recv = ksocknal_recv; - the_ksocklnd.lnd_notify = ksocknal_notify; - the_ksocklnd.lnd_query = ksocknal_query; - the_ksocklnd.lnd_accept = ksocknal_accept; - - rc = ksocknal_tunables_init(); - if (rc) - return rc; - - rc = libcfs_setup(); - if (rc) - return rc; - - lnet_register_lnd(&the_ksocklnd); - - return 0; -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("TCP Socket LNet Network Driver"); -MODULE_VERSION("2.7.0"); -MODULE_LICENSE("GPL"); - -module_init(ksocklnd_init); -module_exit(ksocklnd_exit); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h deleted file mode 100644 index 4e5c89a692a3..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ /dev/null @@ -1,704 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2011, 2012, Intel Corporation. - * - * Author: Zach Brown - * Author: Peter J. Braam - * Author: Phil Schwan - * Author: Eric Barton - * - * This file is part of Lustre, http://www.lustre.org - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _SOCKLND_SOCKLND_H_ -#define _SOCKLND_SOCKLND_H_ - -#define DEBUG_PORTAL_ALLOC -#define DEBUG_SUBSYSTEM S_LND - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -/* assume one thread for each connection type */ -#define SOCKNAL_NSCHEDS 3 -#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) - -#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ -#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ -#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ -#define SOCKNAL_ENOMEM_RETRY 1 /* jiffies between retries */ - -#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ -#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ - -#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ - -/* - * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). - * no risk if we're not running on a CONFIG_HIGHMEM platform. - */ -#ifdef CONFIG_HIGHMEM -# define SOCKNAL_RISK_KMAP_DEADLOCK 0 -#else -# define SOCKNAL_RISK_KMAP_DEADLOCK 1 -#endif - -struct ksock_sched_info; - -struct ksock_sched { /* per scheduler state */ - spinlock_t kss_lock; /* serialise */ - struct list_head kss_rx_conns; /* conn waiting to be read */ - struct list_head kss_tx_conns; /* conn waiting to be written */ - struct list_head kss_zombie_noop_txs; /* zombie noop tx list */ - wait_queue_head_t kss_waitq; /* where scheduler sleeps */ - int kss_nconns; /* # connections assigned to - * this scheduler - */ - struct ksock_sched_info *kss_info; /* owner of it */ -}; - -struct ksock_sched_info { - int ksi_nthreads_max; /* max allowed threads */ - int ksi_nthreads; /* number of threads */ - int ksi_cpt; /* CPT id */ - struct ksock_sched *ksi_scheds; /* array of schedulers */ -}; - -#define KSOCK_CPT_SHIFT 16 -#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid)) -#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT) -#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1)) - -struct ksock_interface { /* in-use interface */ - __u32 ksni_ipaddr; /* interface's IP address */ - __u32 ksni_netmask; /* interface's network mask */ - int ksni_nroutes; /* # routes using (active) */ - int ksni_npeers; /* # peers using (passive) */ - char ksni_name[IFNAMSIZ]; /* interface name */ -}; - -struct ksock_tunables { - int *ksnd_timeout; /* "stuck" socket timeout - * (seconds) - */ - int *ksnd_nscheds; /* # scheduler threads in each - * pool while starting - */ - int *ksnd_nconnds; /* # connection daemons */ - int *ksnd_nconnds_max; /* max # connection daemons */ - int *ksnd_min_reconnectms; /* first connection retry after - * (ms)... - */ - int *ksnd_max_reconnectms; /* ...exponentially increasing to - * this - */ - int *ksnd_eager_ack; /* make TCP ack eagerly? */ - int *ksnd_typed_conns; /* drive sockets by type? */ - int *ksnd_min_bulk; /* smallest "large" message */ - int *ksnd_tx_buffer_size; /* socket tx buffer size */ - int *ksnd_rx_buffer_size; /* socket rx buffer size */ - int *ksnd_nagle; /* enable NAGLE? */ - int *ksnd_round_robin; /* round robin for multiple - * interfaces - */ - int *ksnd_keepalive; /* # secs for sending keepalive - * NOOP - */ - int *ksnd_keepalive_idle; /* # idle secs before 1st probe - */ - int *ksnd_keepalive_count; /* # probes */ - int *ksnd_keepalive_intvl; /* time between probes */ - int *ksnd_credits; /* # concurrent sends */ - int *ksnd_peertxcredits; /* # concurrent sends to 1 peer - */ - int *ksnd_peerrtrcredits; /* # per-peer router buffer - * credits - */ - int *ksnd_peertimeout; /* seconds to consider peer dead - */ - int *ksnd_enable_csum; /* enable check sum */ - int *ksnd_inject_csum_error; /* set non-zero to inject - * checksum error - */ - int *ksnd_nonblk_zcack; /* always send zc-ack on - * non-blocking connection - */ - unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload - * size - */ - int *ksnd_zc_recv; /* enable ZC receive (for - * Chelsio TOE) - */ - int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to - * enable ZC receive - */ -}; - -struct ksock_net { - __u64 ksnn_incarnation; /* my epoch */ - spinlock_t ksnn_lock; /* serialise */ - struct list_head ksnn_list; /* chain on global list */ - int ksnn_npeers; /* # peers */ - int ksnn_shutdown; /* shutting down? */ - int ksnn_ninterfaces; /* IP interfaces */ - struct ksock_interface ksnn_interfaces[LNET_MAX_INTERFACES]; -}; - -/** connd timeout */ -#define SOCKNAL_CONND_TIMEOUT 120 -/** reserved thread for accepting & creating new connd */ -#define SOCKNAL_CONND_RESV 1 - -struct ksock_nal_data { - int ksnd_init; /* initialisation state - */ - int ksnd_nnets; /* # networks set up */ - struct list_head ksnd_nets; /* list of nets */ - rwlock_t ksnd_global_lock; /* stabilize peer/conn - * ops - */ - struct list_head *ksnd_peers; /* hash table of all my - * known peers - */ - int ksnd_peer_hash_size; /* size of ksnd_peers */ - - int ksnd_nthreads; /* # live threads */ - int ksnd_shuttingdown; /* tell threads to exit - */ - struct ksock_sched_info **ksnd_sched_info; /* schedulers info */ - - atomic_t ksnd_nactive_txs; /* #active txs */ - - struct list_head ksnd_deathrow_conns; /* conns to close: - * reaper_lock - */ - struct list_head ksnd_zombie_conns; /* conns to free: - * reaper_lock - */ - struct list_head ksnd_enomem_conns; /* conns to retry: - * reaper_lock - */ - wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ - unsigned long ksnd_reaper_waketime; /* when reaper will wake - */ - spinlock_t ksnd_reaper_lock; /* serialise */ - - int ksnd_enomem_tx; /* test ENOMEM sender */ - int ksnd_stall_tx; /* test sluggish sender - */ - int ksnd_stall_rx; /* test sluggish - * receiver - */ - struct list_head ksnd_connd_connreqs; /* incoming connection - * requests - */ - struct list_head ksnd_connd_routes; /* routes waiting to be - * connected - */ - wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ - int ksnd_connd_connecting; /* # connds connecting - */ - time64_t ksnd_connd_failed_stamp;/* time stamp of the - * last failed - * connecting attempt - */ - time64_t ksnd_connd_starting_stamp;/* time stamp of the - * last starting connd - */ - unsigned int ksnd_connd_starting; /* # starting connd */ - unsigned int ksnd_connd_running; /* # running connd */ - spinlock_t ksnd_connd_lock; /* serialise */ - - struct list_head ksnd_idle_noop_txs; /* list head for freed - * noop tx - */ - spinlock_t ksnd_tx_lock; /* serialise, g_lock - * unsafe - */ -}; - -#define SOCKNAL_INIT_NOTHING 0 -#define SOCKNAL_INIT_DATA 1 -#define SOCKNAL_INIT_ALL 2 - -/* - * A packet just assembled for transmission is represented by 1 or more - * struct iovec fragments (the first frag contains the portals header), - * followed by 0 or more struct bio_vec fragments. - * - * On the receive side, initially 1 struct iovec fragment is posted for - * receive (the header). Once the header has been received, the payload is - * received into either struct iovec or struct bio_vec fragments, depending on - * what the header matched or whether the message needs forwarding. - */ -struct ksock_conn; /* forward ref */ -struct ksock_peer; /* forward ref */ -struct ksock_route; /* forward ref */ -struct ksock_proto; /* forward ref */ - -struct ksock_tx { /* transmit packet */ - struct list_head tx_list; /* queue on conn for transmission etc - */ - struct list_head tx_zc_list; /* queue on peer for ZC request */ - atomic_t tx_refcount; /* tx reference count */ - int tx_nob; /* # packet bytes */ - int tx_resid; /* residual bytes */ - int tx_niov; /* # packet iovec frags */ - struct kvec *tx_iov; /* packet iovec frags */ - int tx_nkiov; /* # packet page frags */ - unsigned short tx_zc_aborted; /* aborted ZC request */ - unsigned short tx_zc_capable:1; /* payload is large enough for ZC */ - unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */ - unsigned short tx_nonblk:1; /* it's a non-blocking ACK */ - struct bio_vec *tx_kiov; /* packet page frags */ - struct ksock_conn *tx_conn; /* owning conn */ - struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize() - */ - unsigned long tx_deadline; /* when (in jiffies) tx times out */ - struct ksock_msg tx_msg; /* socklnd message buffer */ - int tx_desc_size; /* size of this descriptor */ - union { - struct { - struct kvec iov; /* virt hdr */ - struct bio_vec kiov[0]; /* paged payload */ - } paged; - struct { - struct kvec iov[1]; /* virt hdr + payload */ - } virt; - } tx_frags; -}; - -#define KSOCK_NOOP_TX_SIZE (offsetof(struct ksock_tx, tx_frags.paged.kiov[0])) - -/* network zero copy callback descriptor embedded in struct ksock_tx */ - -#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ -#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */ -#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */ -#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */ -#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */ -#define SOCKNAL_RX_SLOP 6 /* skipping body */ - -struct ksock_conn { - struct ksock_peer *ksnc_peer; /* owning peer */ - struct ksock_route *ksnc_route; /* owning route */ - struct list_head ksnc_list; /* stash on peer's conn list */ - struct socket *ksnc_sock; /* actual socket */ - void *ksnc_saved_data_ready; /* socket's original - * data_ready() callback - */ - void *ksnc_saved_write_space; /* socket's original - * write_space() callback - */ - atomic_t ksnc_conn_refcount;/* conn refcount */ - atomic_t ksnc_sock_refcount;/* sock refcount */ - struct ksock_sched *ksnc_scheduler; /* who schedules this connection - */ - __u32 ksnc_myipaddr; /* my IP */ - __u32 ksnc_ipaddr; /* peer's IP */ - int ksnc_port; /* peer's port */ - signed int ksnc_type:3; /* type of connection, should be - * signed value - */ - unsigned int ksnc_closing:1; /* being shut down */ - unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ - unsigned int ksnc_zc_capable:1; /* enable to ZC */ - struct ksock_proto *ksnc_proto; /* protocol for the connection */ - - /* reader */ - struct list_head ksnc_rx_list; /* where I enq waiting input or a - * forwarding descriptor - */ - unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times - * out - */ - __u8 ksnc_rx_started; /* started receiving a message */ - __u8 ksnc_rx_ready; /* data ready to read */ - __u8 ksnc_rx_scheduled; /* being progressed */ - __u8 ksnc_rx_state; /* what is being read */ - int ksnc_rx_nob_left; /* # bytes to next hdr/body */ - struct iov_iter ksnc_rx_to; /* copy destination */ - struct kvec ksnc_rx_iov_space[LNET_MAX_IOV]; /* space for frag descriptors */ - __u32 ksnc_rx_csum; /* partial checksum for incoming - * data - */ - void *ksnc_cookie; /* rx lnet_finalize passthru arg - */ - struct ksock_msg ksnc_msg; /* incoming message buffer: - * V2.x message takes the - * whole struct - * V1.x message is a bare - * struct lnet_hdr, it's stored in - * ksnc_msg.ksm_u.lnetmsg - */ - /* WRITER */ - struct list_head ksnc_tx_list; /* where I enq waiting for output - * space - */ - struct list_head ksnc_tx_queue; /* packets waiting to be sent */ - struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet - * message or ZC-ACK - */ - unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out - */ - int ksnc_tx_bufnob; /* send buffer marker */ - atomic_t ksnc_tx_nob; /* # bytes queued */ - int ksnc_tx_ready; /* write space */ - int ksnc_tx_scheduled; /* being progressed */ - unsigned long ksnc_tx_last_post; /* time stamp of the last posted - * TX - */ -}; - -struct ksock_route { - struct list_head ksnr_list; /* chain on peer route list */ - struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ - struct ksock_peer *ksnr_peer; /* owning peer */ - atomic_t ksnr_refcount; /* # users */ - unsigned long ksnr_timeout; /* when (in jiffies) reconnection - * can happen next - */ - long ksnr_retry_interval; /* how long between retries */ - __u32 ksnr_myipaddr; /* my IP */ - __u32 ksnr_ipaddr; /* IP address to connect to */ - int ksnr_port; /* port to connect to */ - unsigned int ksnr_scheduled:1; /* scheduled for attention */ - unsigned int ksnr_connecting:1; /* connection establishment in - * progress - */ - unsigned int ksnr_connected:4; /* connections established by - * type - */ - unsigned int ksnr_deleted:1; /* been removed from peer? */ - unsigned int ksnr_share_count; /* created explicitly? */ - int ksnr_conn_count; /* # conns established by this - * route - */ -}; - -#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ - -struct ksock_peer { - struct list_head ksnp_list; /* stash on global peer list */ - unsigned long ksnp_last_alive; /* when (in jiffies) I was last - * alive - */ - struct lnet_process_id ksnp_id; /* who's on the other end(s) */ - atomic_t ksnp_refcount; /* # users */ - int ksnp_sharecount; /* lconf usage counter */ - int ksnp_closing; /* being closed */ - int ksnp_accepting; /* # passive connections pending - */ - int ksnp_error; /* errno on closing last conn */ - __u64 ksnp_zc_next_cookie; /* ZC completion cookie */ - __u64 ksnp_incarnation; /* latest known peer incarnation - */ - struct ksock_proto *ksnp_proto; /* latest known peer protocol */ - struct list_head ksnp_conns; /* all active connections */ - struct list_head ksnp_routes; /* routes */ - struct list_head ksnp_tx_queue; /* waiting packets */ - spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ - struct list_head ksnp_zc_req_list; /* zero copy requests wait for - * ACK - */ - unsigned long ksnp_send_keepalive; /* time to send keepalive */ - struct lnet_ni *ksnp_ni; /* which network */ - int ksnp_n_passive_ips; /* # of... */ - - /* preferred local interfaces */ - __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; -}; - -struct ksock_connreq { - struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */ - struct lnet_ni *ksncr_ni; /* chosen NI */ - struct socket *ksncr_sock; /* accepted socket */ -}; - -extern struct ksock_nal_data ksocknal_data; -extern struct ksock_tunables ksocknal_tunables; - -#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ -#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ -#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not - * preferred - */ - -struct ksock_proto { - /* version number of protocol */ - int pro_version; - - /* handshake function */ - int (*pro_send_hello)(struct ksock_conn *, struct ksock_hello_msg *); - - /* handshake function */ - int (*pro_recv_hello)(struct ksock_conn *, struct ksock_hello_msg *, int); - - /* message pack */ - void (*pro_pack)(struct ksock_tx *); - - /* message unpack */ - void (*pro_unpack)(struct ksock_msg *); - - /* queue tx on the connection */ - struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *); - - /* queue ZC ack on the connection */ - int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64); - - /* handle ZC request */ - int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int); - - /* handle ZC ACK */ - int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64); - - /* - * msg type matches the connection type: - * return value: - * return MATCH_NO : no - * return MATCH_YES : matching type - * return MATCH_MAY : can be backup - */ - int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int); -}; - -extern struct ksock_proto ksocknal_protocol_v1x; -extern struct ksock_proto ksocknal_protocol_v2x; -extern struct ksock_proto ksocknal_protocol_v3x; - -#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR -#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR -#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR - -#ifndef CPU_MASK_NONE -#define CPU_MASK_NONE 0UL -#endif - -static inline int -ksocknal_route_mask(void) -{ - if (!*ksocknal_tunables.ksnd_typed_conns) - return (1 << SOCKLND_CONN_ANY); - - return ((1 << SOCKLND_CONN_CONTROL) | - (1 << SOCKLND_CONN_BULK_IN) | - (1 << SOCKLND_CONN_BULK_OUT)); -} - -static inline struct list_head * -ksocknal_nid2peerlist(lnet_nid_t nid) -{ - unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size; - - return &ksocknal_data.ksnd_peers[hash]; -} - -static inline void -ksocknal_conn_addref(struct ksock_conn *conn) -{ - LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); - atomic_inc(&conn->ksnc_conn_refcount); -} - -void ksocknal_queue_zombie_conn(struct ksock_conn *conn); -void ksocknal_finalize_zcreq(struct ksock_conn *conn); - -static inline void -ksocknal_conn_decref(struct ksock_conn *conn) -{ - LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); - if (atomic_dec_and_test(&conn->ksnc_conn_refcount)) - ksocknal_queue_zombie_conn(conn); -} - -static inline int -ksocknal_connsock_addref(struct ksock_conn *conn) -{ - int rc = -ESHUTDOWN; - - read_lock(&ksocknal_data.ksnd_global_lock); - if (!conn->ksnc_closing) { - LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0); - atomic_inc(&conn->ksnc_sock_refcount); - rc = 0; - } - read_unlock(&ksocknal_data.ksnd_global_lock); - - return rc; -} - -static inline void -ksocknal_connsock_decref(struct ksock_conn *conn) -{ - LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0); - if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) { - LASSERT(conn->ksnc_closing); - sock_release(conn->ksnc_sock); - conn->ksnc_sock = NULL; - ksocknal_finalize_zcreq(conn); - } -} - -static inline void -ksocknal_tx_addref(struct ksock_tx *tx) -{ - LASSERT(atomic_read(&tx->tx_refcount) > 0); - atomic_inc(&tx->tx_refcount); -} - -void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx); -void ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx); - -static inline void -ksocknal_tx_decref(struct ksock_tx *tx) -{ - LASSERT(atomic_read(&tx->tx_refcount) > 0); - if (atomic_dec_and_test(&tx->tx_refcount)) - ksocknal_tx_done(NULL, tx); -} - -static inline void -ksocknal_route_addref(struct ksock_route *route) -{ - LASSERT(atomic_read(&route->ksnr_refcount) > 0); - atomic_inc(&route->ksnr_refcount); -} - -void ksocknal_destroy_route(struct ksock_route *route); - -static inline void -ksocknal_route_decref(struct ksock_route *route) -{ - LASSERT(atomic_read(&route->ksnr_refcount) > 0); - if (atomic_dec_and_test(&route->ksnr_refcount)) - ksocknal_destroy_route(route); -} - -static inline void -ksocknal_peer_addref(struct ksock_peer *peer) -{ - LASSERT(atomic_read(&peer->ksnp_refcount) > 0); - atomic_inc(&peer->ksnp_refcount); -} - -void ksocknal_destroy_peer(struct ksock_peer *peer); - -static inline void -ksocknal_peer_decref(struct ksock_peer *peer) -{ - LASSERT(atomic_read(&peer->ksnp_refcount) > 0); - if (atomic_dec_and_test(&peer->ksnp_refcount)) - ksocknal_destroy_peer(peer); -} - -int ksocknal_startup(struct lnet_ni *ni); -void ksocknal_shutdown(struct lnet_ni *ni); -int ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg); -int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg); -int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, - int delayed, struct iov_iter *to, unsigned int rlen); -int ksocknal_accept(struct lnet_ni *ni, struct socket *sock); - -int ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip, - int port); -struct ksock_peer *ksocknal_find_peer_locked(struct lnet_ni *ni, - struct lnet_process_id id); -struct ksock_peer *ksocknal_find_peer(struct lnet_ni *ni, - struct lnet_process_id id); -void ksocknal_peer_failed(struct ksock_peer *peer); -int ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, - struct socket *sock, int type); -void ksocknal_close_conn_locked(struct ksock_conn *conn, int why); -void ksocknal_terminate_conn(struct ksock_conn *conn); -void ksocknal_destroy_conn(struct ksock_conn *conn); -int ksocknal_close_peer_conns_locked(struct ksock_peer *peer, - __u32 ipaddr, int why); -int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why); -int ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr); -struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer, - struct ksock_tx *tx, int nonblk); - -int ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx, - struct lnet_process_id id); -struct ksock_tx *ksocknal_alloc_tx(int type, int size); -void ksocknal_free_tx(struct ksock_tx *tx); -struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk); -void ksocknal_next_tx_carrier(struct ksock_conn *conn); -void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn); -void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error); -void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive); -void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); -int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name); -void ksocknal_thread_fini(void); -void ksocknal_launch_all_connections_locked(struct ksock_peer *peer); -struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer *peer); -struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer *peer); -int ksocknal_new_packet(struct ksock_conn *conn, int skip); -int ksocknal_scheduler(void *arg); -int ksocknal_connd(void *arg); -int ksocknal_reaper(void *arg); -int ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn, - lnet_nid_t peer_nid, struct ksock_hello_msg *hello); -int ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, - struct ksock_hello_msg *hello, - struct lnet_process_id *id, - __u64 *incarnation); -void ksocknal_read_callback(struct ksock_conn *conn); -void ksocknal_write_callback(struct ksock_conn *conn); - -int ksocknal_lib_zc_capable(struct ksock_conn *conn); -void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn); -void ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn); -void ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn); -void ksocknal_lib_push_conn(struct ksock_conn *conn); -int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn); -int ksocknal_lib_setup_sock(struct socket *so); -int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx); -int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx); -void ksocknal_lib_eager_ack(struct ksock_conn *conn); -int ksocknal_lib_recv(struct ksock_conn *conn); -int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, - int *rxmem, int *nagle); - -void ksocknal_read_callback(struct ksock_conn *conn); -void ksocknal_write_callback(struct ksock_conn *conn); - -int ksocknal_tunables_init(void); - -void ksocknal_lib_csum_tx(struct ksock_tx *tx); - -int ksocknal_lib_memory_pressure(struct ksock_conn *conn); -int ksocknal_lib_bind_thread_to_cpu(int id); - -#endif /* _SOCKLND_SOCKLND_H_ */ diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c deleted file mode 100644 index 01b31a6bb588..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ /dev/null @@ -1,2586 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2011, 2012, Intel Corporation. - * - * Author: Zach Brown - * Author: Peter J. Braam - * Author: Phil Schwan - * Author: Eric Barton - * - * This file is part of Portals, http://www.sf.net/projects/sandiaportals/ - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include "socklnd.h" - -struct ksock_tx * -ksocknal_alloc_tx(int type, int size) -{ - struct ksock_tx *tx = NULL; - - if (type == KSOCK_MSG_NOOP) { - LASSERT(size == KSOCK_NOOP_TX_SIZE); - - /* searching for a noop tx in free list */ - spin_lock(&ksocknal_data.ksnd_tx_lock); - - if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next, - struct ksock_tx, tx_list); - LASSERT(tx->tx_desc_size == size); - list_del(&tx->tx_list); - } - - spin_unlock(&ksocknal_data.ksnd_tx_lock); - } - - if (!tx) - tx = kzalloc(size, GFP_NOFS); - - if (!tx) - return NULL; - - atomic_set(&tx->tx_refcount, 1); - tx->tx_zc_aborted = 0; - tx->tx_zc_capable = 0; - tx->tx_zc_checked = 0; - tx->tx_desc_size = size; - - atomic_inc(&ksocknal_data.ksnd_nactive_txs); - - return tx; -} - -struct ksock_tx * -ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) -{ - struct ksock_tx *tx; - - tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); - if (!tx) { - CERROR("Can't allocate noop tx desc\n"); - return NULL; - } - - tx->tx_conn = NULL; - tx->tx_lnetmsg = NULL; - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1; - tx->tx_nonblk = nonblk; - - tx->tx_msg.ksm_csum = 0; - tx->tx_msg.ksm_type = KSOCK_MSG_NOOP; - tx->tx_msg.ksm_zc_cookies[0] = 0; - tx->tx_msg.ksm_zc_cookies[1] = cookie; - - return tx; -} - -void -ksocknal_free_tx(struct ksock_tx *tx) -{ - atomic_dec(&ksocknal_data.ksnd_nactive_txs); - - if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { - /* it's a noop tx */ - spin_lock(&ksocknal_data.ksnd_tx_lock); - - list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs); - - spin_unlock(&ksocknal_data.ksnd_tx_lock); - } else { - kfree(tx); - } -} - -static int -ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx) -{ - struct kvec *iov = tx->tx_iov; - int nob; - int rc; - - LASSERT(tx->tx_niov > 0); - - /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ - rc = ksocknal_lib_send_iov(conn, tx); - - if (rc <= 0) /* sent nothing? */ - return rc; - - nob = rc; - LASSERT(nob <= tx->tx_resid); - tx->tx_resid -= nob; - - /* "consume" iov */ - do { - LASSERT(tx->tx_niov > 0); - - if (nob < (int)iov->iov_len) { - iov->iov_base = (void *)((char *)iov->iov_base + nob); - iov->iov_len -= nob; - return rc; - } - - nob -= iov->iov_len; - tx->tx_iov = ++iov; - tx->tx_niov--; - } while (nob); - - return rc; -} - -static int -ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) -{ - struct bio_vec *kiov = tx->tx_kiov; - int nob; - int rc; - - LASSERT(!tx->tx_niov); - LASSERT(tx->tx_nkiov > 0); - - /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ - rc = ksocknal_lib_send_kiov(conn, tx); - - if (rc <= 0) /* sent nothing? */ - return rc; - - nob = rc; - LASSERT(nob <= tx->tx_resid); - tx->tx_resid -= nob; - - /* "consume" kiov */ - do { - LASSERT(tx->tx_nkiov > 0); - - if (nob < (int)kiov->bv_len) { - kiov->bv_offset += nob; - kiov->bv_len -= nob; - return rc; - } - - nob -= (int)kiov->bv_len; - tx->tx_kiov = ++kiov; - tx->tx_nkiov--; - } while (nob); - - return rc; -} - -static int -ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx) -{ - int rc; - int bufnob; - - if (ksocknal_data.ksnd_stall_tx) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(ksocknal_data.ksnd_stall_tx * HZ); - } - - LASSERT(tx->tx_resid); - - rc = ksocknal_connsock_addref(conn); - if (rc) { - LASSERT(conn->ksnc_closing); - return -ESHUTDOWN; - } - - do { - if (ksocknal_data.ksnd_enomem_tx > 0) { - /* testing... */ - ksocknal_data.ksnd_enomem_tx--; - rc = -EAGAIN; - } else if (tx->tx_niov) { - rc = ksocknal_send_iov(conn, tx); - } else { - rc = ksocknal_send_kiov(conn, tx); - } - - bufnob = conn->ksnc_sock->sk->sk_wmem_queued; - if (rc > 0) /* sent something? */ - conn->ksnc_tx_bufnob += rc; /* account it */ - - if (bufnob < conn->ksnc_tx_bufnob) { - /* - * allocated send buffer bytes < computed; infer - * something got ACKed - */ - conn->ksnc_tx_deadline = - jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - conn->ksnc_peer->ksnp_last_alive = jiffies; - conn->ksnc_tx_bufnob = bufnob; - mb(); - } - - if (rc <= 0) { /* Didn't write anything? */ - - if (!rc) /* some stacks return 0 instead of -EAGAIN */ - rc = -EAGAIN; - - /* Check if EAGAIN is due to memory pressure */ - if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn)) - rc = -ENOMEM; - - break; - } - - /* socket's wmem_queued now includes 'rc' bytes */ - atomic_sub(rc, &conn->ksnc_tx_nob); - rc = 0; - - } while (tx->tx_resid); - - ksocknal_connsock_decref(conn); - return rc; -} - -static int -ksocknal_recv_iter(struct ksock_conn *conn) -{ - int nob; - int rc; - - /* - * Never touch conn->ksnc_rx_to or change connection - * status inside ksocknal_lib_recv - */ - rc = ksocknal_lib_recv(conn); - - if (rc <= 0) - return rc; - - /* received something... */ - nob = rc; - - conn->ksnc_peer->ksnp_last_alive = jiffies; - conn->ksnc_rx_deadline = - jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - mb(); /* order with setting rx_started */ - conn->ksnc_rx_started = 1; - - conn->ksnc_rx_nob_left -= nob; - - iov_iter_advance(&conn->ksnc_rx_to, nob); - if (iov_iter_count(&conn->ksnc_rx_to)) - return -EAGAIN; - - return 1; -} - -static int -ksocknal_receive(struct ksock_conn *conn) -{ - /* - * Return 1 on success, 0 on EOF, < 0 on error. - * Caller checks ksnc_rx_to to determine - * progress/completion. - */ - int rc; - - if (ksocknal_data.ksnd_stall_rx) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(ksocknal_data.ksnd_stall_rx * HZ); - } - - rc = ksocknal_connsock_addref(conn); - if (rc) { - LASSERT(conn->ksnc_closing); - return -ESHUTDOWN; - } - - for (;;) { - rc = ksocknal_recv_iter(conn); - if (rc <= 0) { - /* error/EOF or partial receive */ - if (rc == -EAGAIN) { - rc = 1; - } else if (!rc && conn->ksnc_rx_started) { - /* EOF in the middle of a message */ - rc = -EPROTO; - } - break; - } - - /* Completed a fragment */ - - if (!iov_iter_count(&conn->ksnc_rx_to)) { - rc = 1; - break; - } - } - - ksocknal_connsock_decref(conn); - return rc; -} - -void -ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx) -{ - struct lnet_msg *lnetmsg = tx->tx_lnetmsg; - int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO; - - LASSERT(ni || tx->tx_conn); - - if (tx->tx_conn) - ksocknal_conn_decref(tx->tx_conn); - - if (!ni && tx->tx_conn) - ni = tx->tx_conn->ksnc_peer->ksnp_ni; - - ksocknal_free_tx(tx); - if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */ - lnet_finalize(ni, lnetmsg, rc); -} - -void -ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error) -{ - struct ksock_tx *tx; - - while (!list_empty(txlist)) { - tx = list_entry(txlist->next, struct ksock_tx, tx_list); - - if (error && tx->tx_lnetmsg) { - CNETERR("Deleting packet type %d len %d %s->%s\n", - le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type), - le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length), - libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), - libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); - } else if (error) { - CNETERR("Deleting noop packet\n"); - } - - list_del(&tx->tx_list); - - LASSERT(atomic_read(&tx->tx_refcount) == 1); - ksocknal_tx_done(ni, tx); - } -} - -static void -ksocknal_check_zc_req(struct ksock_tx *tx) -{ - struct ksock_conn *conn = tx->tx_conn; - struct ksock_peer *peer = conn->ksnc_peer; - - /* - * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx - * to ksnp_zc_req_list if some fragment of this message should be sent - * zero-copy. Our peer will send an ACK containing this cookie when - * she has received this message to tell us we can signal completion. - * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on - * ksnp_zc_req_list. - */ - LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT(tx->tx_zc_capable); - - tx->tx_zc_checked = 1; - - if (conn->ksnc_proto == &ksocknal_protocol_v1x || - !conn->ksnc_zc_capable) - return; - - /* - * assign cookie and queue tx to pending list, it will be released when - * a matching ack is received. See ksocknal_handle_zcack() - */ - ksocknal_tx_addref(tx); - - spin_lock(&peer->ksnp_lock); - - /* ZC_REQ is going to be pinned to the peer */ - tx->tx_deadline = - jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - - LASSERT(!tx->tx_msg.ksm_zc_cookies[0]); - - tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; - - if (!peer->ksnp_zc_next_cookie) - peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - - list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); - - spin_unlock(&peer->ksnp_lock); -} - -static void -ksocknal_uncheck_zc_req(struct ksock_tx *tx) -{ - struct ksock_peer *peer = tx->tx_conn->ksnc_peer; - - LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT(tx->tx_zc_capable); - - tx->tx_zc_checked = 0; - - spin_lock(&peer->ksnp_lock); - - if (!tx->tx_msg.ksm_zc_cookies[0]) { - /* Not waiting for an ACK */ - spin_unlock(&peer->ksnp_lock); - return; - } - - tx->tx_msg.ksm_zc_cookies[0] = 0; - list_del(&tx->tx_zc_list); - - spin_unlock(&peer->ksnp_lock); - - ksocknal_tx_decref(tx); -} - -static int -ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx) -{ - int rc; - - if (tx->tx_zc_capable && !tx->tx_zc_checked) - ksocknal_check_zc_req(tx); - - rc = ksocknal_transmit(conn, tx); - - CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc); - - if (!tx->tx_resid) { - /* Sent everything OK */ - LASSERT(!rc); - - return 0; - } - - if (rc == -EAGAIN) - return rc; - - if (rc == -ENOMEM) { - static int counter; - - counter++; /* exponential backoff warnings */ - if ((counter & (-counter)) == counter) - CWARN("%u ENOMEM tx %p\n", counter, conn); - - /* Queue on ksnd_enomem_conns for retry after a timeout */ - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - - /* enomem list takes over scheduler's ref... */ - LASSERT(conn->ksnc_tx_scheduled); - list_add_tail(&conn->ksnc_tx_list, - &ksocknal_data.ksnd_enomem_conns); - if (!time_after_eq(jiffies + SOCKNAL_ENOMEM_RETRY, - ksocknal_data.ksnd_reaper_waketime)) - wake_up(&ksocknal_data.ksnd_reaper_waitq); - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); - return rc; - } - - /* Actual error */ - LASSERT(rc < 0); - - if (!conn->ksnc_closing) { - switch (rc) { - case -ECONNRESET: - LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n", - &conn->ksnc_ipaddr); - break; - default: - LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n", - &conn->ksnc_ipaddr, rc); - break; - } - CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n", - conn, rc, - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - } - - if (tx->tx_zc_checked) - ksocknal_uncheck_zc_req(tx); - - /* it's not an error if conn is being closed */ - ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc); - - return rc; -} - -static void -ksocknal_launch_connection_locked(struct ksock_route *route) -{ - /* called holding write lock on ksnd_global_lock */ - - LASSERT(!route->ksnr_scheduled); - LASSERT(!route->ksnr_connecting); - LASSERT(ksocknal_route_mask() & ~route->ksnr_connected); - - route->ksnr_scheduled = 1; /* scheduling conn for connd */ - ksocknal_route_addref(route); /* extra ref for connd */ - - spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - - list_add_tail(&route->ksnr_connd_list, - &ksocknal_data.ksnd_connd_routes); - wake_up(&ksocknal_data.ksnd_connd_waitq); - - spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); -} - -void -ksocknal_launch_all_connections_locked(struct ksock_peer *peer) -{ - struct ksock_route *route; - - /* called holding write lock on ksnd_global_lock */ - for (;;) { - /* launch any/all connections that need it */ - route = ksocknal_find_connectable_route_locked(peer); - if (!route) - return; - - ksocknal_launch_connection_locked(route); - } -} - -struct ksock_conn * -ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, - int nonblk) -{ - struct list_head *tmp; - struct ksock_conn *conn; - struct ksock_conn *typed = NULL; - struct ksock_conn *fallback = NULL; - int tnob = 0; - int fnob = 0; - - list_for_each(tmp, &peer->ksnp_conns) { - struct ksock_conn *c; - int nob, rc; - - c = list_entry(tmp, struct ksock_conn, ksnc_list); - nob = atomic_read(&c->ksnc_tx_nob) + - c->ksnc_sock->sk->sk_wmem_queued; - - LASSERT(!c->ksnc_closing); - LASSERT(c->ksnc_proto && - c->ksnc_proto->pro_match_tx); - - rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); - - switch (rc) { - default: - LBUG(); - case SOCKNAL_MATCH_NO: /* protocol rejected the tx */ - continue; - - case SOCKNAL_MATCH_YES: /* typed connection */ - if (!typed || tnob > nob || - (tnob == nob && *ksocknal_tunables.ksnd_round_robin && - time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) { - typed = c; - tnob = nob; - } - break; - - case SOCKNAL_MATCH_MAY: /* fallback connection */ - if (!fallback || fnob > nob || - (fnob == nob && *ksocknal_tunables.ksnd_round_robin && - time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { - fallback = c; - fnob = nob; - } - break; - } - } - - /* prefer the typed selection */ - conn = (typed) ? typed : fallback; - - if (conn) - conn->ksnc_tx_last_post = jiffies; - - return conn; -} - -void -ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx) -{ - conn->ksnc_proto->pro_pack(tx); - - atomic_add(tx->tx_nob, &conn->ksnc_tx_nob); - ksocknal_conn_addref(conn); /* +1 ref for tx */ - tx->tx_conn = conn; -} - -void -ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) -{ - struct ksock_sched *sched = conn->ksnc_scheduler; - struct ksock_msg *msg = &tx->tx_msg; - struct ksock_tx *ztx = NULL; - int bufnob = 0; - - /* - * called holding global lock (read or irq-write) and caller may - * not have dropped this lock between finding conn and calling me, - * so we don't need the {get,put}connsock dance to deref - * ksnc_sock... - */ - LASSERT(!conn->ksnc_closing); - - CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, conn->ksnc_port); - - ksocknal_tx_prep(conn, tx); - - /* - * Ensure the frags we've been given EXACTLY match the number of - * bytes we want to send. Many TCP/IP stacks disregard any total - * size parameters passed to them and just look at the frags. - * - * We always expect at least 1 mapped fragment containing the - * complete ksocknal message header. - */ - LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) + - lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == - (unsigned int)tx->tx_nob); - LASSERT(tx->tx_niov >= 1); - LASSERT(tx->tx_resid == tx->tx_nob); - - CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", - tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type : - KSOCK_MSG_NOOP, - tx->tx_nob, tx->tx_niov, tx->tx_nkiov); - - /* - * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__ - * but they're used inside spinlocks a lot. - */ - bufnob = conn->ksnc_sock->sk->sk_wmem_queued; - spin_lock_bh(&sched->kss_lock); - - if (list_empty(&conn->ksnc_tx_queue) && !bufnob) { - /* First packet starts the timeout */ - conn->ksnc_tx_deadline = - jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */ - conn->ksnc_peer->ksnp_last_alive = jiffies; - conn->ksnc_tx_bufnob = 0; - mb(); /* order with adding to tx_queue */ - } - - if (msg->ksm_type == KSOCK_MSG_NOOP) { - /* - * The packet is noop ZC ACK, try to piggyback the ack_cookie - * on a normal packet so I don't need to send it - */ - LASSERT(msg->ksm_zc_cookies[1]); - LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); - - /* ZC ACK piggybacked on ztx release tx later */ - if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) - ztx = tx; - } else { - /* - * It's a normal packet - can it piggback a noop zc-ack that - * has been queued already? - */ - LASSERT(!msg->ksm_zc_cookies[1]); - LASSERT(conn->ksnc_proto->pro_queue_tx_msg); - - ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); - /* ztx will be released later */ - } - - if (ztx) { - atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob); - list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); - } - - if (conn->ksnc_tx_ready && /* able to send */ - !conn->ksnc_tx_scheduled) { /* not scheduled to send */ - /* +1 ref for scheduler */ - ksocknal_conn_addref(conn); - list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - wake_up(&sched->kss_waitq); - } - - spin_unlock_bh(&sched->kss_lock); -} - -struct ksock_route * -ksocknal_find_connectable_route_locked(struct ksock_peer *peer) -{ - unsigned long now = jiffies; - struct list_head *tmp; - struct ksock_route *route; - - list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); - - /* connections being established */ - if (route->ksnr_scheduled) - continue; - - /* all route types connected ? */ - if (!(ksocknal_route_mask() & ~route->ksnr_connected)) - continue; - - if (!(!route->ksnr_retry_interval || /* first attempt */ - time_after_eq(now, route->ksnr_timeout))) { - CDEBUG(D_NET, - "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n", - &route->ksnr_ipaddr, - route->ksnr_connected, - route->ksnr_retry_interval, - (route->ksnr_timeout - now) / HZ); - continue; - } - - return route; - } - - return NULL; -} - -struct ksock_route * -ksocknal_find_connecting_route_locked(struct ksock_peer *peer) -{ - struct list_head *tmp; - struct ksock_route *route; - - list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, struct ksock_route, ksnr_list); - - LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); - - if (route->ksnr_scheduled) - return route; - } - - return NULL; -} - -int -ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx, - struct lnet_process_id id) -{ - struct ksock_peer *peer; - struct ksock_conn *conn; - rwlock_t *g_lock; - int retry; - int rc; - - LASSERT(!tx->tx_conn); - - g_lock = &ksocknal_data.ksnd_global_lock; - - for (retry = 0;; retry = 1) { - read_lock(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer) { - if (!ksocknal_find_connectable_route_locked(peer)) { - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); - if (conn) { - /* - * I've got no routes that need to be - * connecting and I do have an actual - * connection... - */ - ksocknal_queue_tx_locked(tx, conn); - read_unlock(g_lock); - return 0; - } - } - } - - /* I'll need a write lock... */ - read_unlock(g_lock); - - write_lock_bh(g_lock); - - peer = ksocknal_find_peer_locked(ni, id); - if (peer) - break; - - write_unlock_bh(g_lock); - - if (id.pid & LNET_PID_USERFLAG) { - CERROR("Refusing to create a connection to userspace process %s\n", - libcfs_id2str(id)); - return -EHOSTUNREACH; - } - - if (retry) { - CERROR("Can't find peer %s\n", libcfs_id2str(id)); - return -EHOSTUNREACH; - } - - rc = ksocknal_add_peer(ni, id, - LNET_NIDADDR(id.nid), - lnet_acceptor_port()); - if (rc) { - CERROR("Can't add peer %s: %d\n", - libcfs_id2str(id), rc); - return rc; - } - } - - ksocknal_launch_all_connections_locked(peer); - - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); - if (conn) { - /* Connection exists; queue message on it */ - ksocknal_queue_tx_locked(tx, conn); - write_unlock_bh(g_lock); - return 0; - } - - if (peer->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked(peer)) { - /* the message is going to be pinned to the peer */ - tx->tx_deadline = - jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - - /* Queue the message until a connection is established */ - list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue); - write_unlock_bh(g_lock); - return 0; - } - - write_unlock_bh(g_lock); - - /* NB Routes may be ignored if connections to them failed recently */ - CNETERR("No usable routes to %s\n", libcfs_id2str(id)); - return -EHOSTUNREACH; -} - -int -ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) -{ - unsigned int mpflag = 0; - int type = lntmsg->msg_type; - struct lnet_process_id target = lntmsg->msg_target; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - struct bio_vec *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - struct ksock_tx *tx; - int desc_size; - int rc; - - /* - * NB 'private' is different depending on what we're sending. - * Just ignore it... - */ - CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", - payload_nob, payload_niov, libcfs_id2str(target)); - - LASSERT(!payload_nob || payload_niov > 0); - LASSERT(payload_niov <= LNET_MAX_IOV); - /* payload is either all vaddrs or all pages */ - LASSERT(!(payload_kiov && payload_iov)); - LASSERT(!in_interrupt()); - - if (payload_iov) - desc_size = offsetof(struct ksock_tx, - tx_frags.virt.iov[1 + payload_niov]); - else - desc_size = offsetof(struct ksock_tx, - tx_frags.paged.kiov[payload_niov]); - - if (lntmsg->msg_vmflush) - mpflag = memalloc_noreclaim_save(); - tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); - if (!tx) { - CERROR("Can't allocate tx desc type %d size %d\n", - type, desc_size); - if (lntmsg->msg_vmflush) - memalloc_noreclaim_restore(mpflag); - return -ENOMEM; - } - - tx->tx_conn = NULL; /* set when assigned a conn */ - tx->tx_lnetmsg = lntmsg; - - if (payload_iov) { - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1 + - lnet_extract_iov(payload_niov, &tx->tx_iov[1], - payload_niov, payload_iov, - payload_offset, payload_nob); - } else { - tx->tx_niov = 1; - tx->tx_iov = &tx->tx_frags.paged.iov; - tx->tx_kiov = tx->tx_frags.paged.kiov; - tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov, - payload_niov, payload_kiov, - payload_offset, payload_nob); - - if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload) - tx->tx_zc_capable = 1; - } - - tx->tx_msg.ksm_csum = 0; - tx->tx_msg.ksm_type = KSOCK_MSG_LNET; - tx->tx_msg.ksm_zc_cookies[0] = 0; - tx->tx_msg.ksm_zc_cookies[1] = 0; - - /* The first fragment will be set later in pro_pack */ - rc = ksocknal_launch_packet(ni, tx, target); - if (mpflag) - memalloc_noreclaim_restore(mpflag); - - if (!rc) - return 0; - - ksocknal_free_tx(tx); - return -EIO; -} - -int -ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) -{ - struct task_struct *task = kthread_run(fn, arg, "%s", name); - - if (IS_ERR(task)) - return PTR_ERR(task); - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_data.ksnd_nthreads++; - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - return 0; -} - -void -ksocknal_thread_fini(void) -{ - write_lock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_data.ksnd_nthreads--; - write_unlock_bh(&ksocknal_data.ksnd_global_lock); -} - -int -ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) -{ - static char ksocknal_slop_buffer[4096]; - struct kvec *kvec = conn->ksnc_rx_iov_space; - - int nob; - unsigned int niov; - int skipped; - - LASSERT(conn->ksnc_proto); - - if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) { - /* Remind the socket to ack eagerly... */ - ksocknal_lib_eager_ack(conn); - } - - if (!nob_to_skip) { /* right at next packet boundary now */ - conn->ksnc_rx_started = 0; - mb(); /* racing with timeout thread */ - - switch (conn->ksnc_proto->pro_version) { - case KSOCK_PROTO_V2: - case KSOCK_PROTO_V3: - conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER; - kvec->iov_base = &conn->ksnc_msg; - kvec->iov_len = offsetof(struct ksock_msg, ksm_u); - conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u); - iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, - 1, offsetof(struct ksock_msg, ksm_u)); - break; - - case KSOCK_PROTO_V1: - /* Receiving bare struct lnet_hdr */ - conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; - kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - kvec->iov_len = sizeof(struct lnet_hdr); - conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr); - iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, - 1, sizeof(struct lnet_hdr)); - break; - - default: - LBUG(); - } - conn->ksnc_rx_csum = ~0; - return 1; - } - - /* - * Set up to skip as much as possible now. If there's more left - * (ran out of iov entries) we'll get called again - */ - conn->ksnc_rx_state = SOCKNAL_RX_SLOP; - conn->ksnc_rx_nob_left = nob_to_skip; - skipped = 0; - niov = 0; - - do { - nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer)); - - kvec[niov].iov_base = ksocknal_slop_buffer; - kvec[niov].iov_len = nob; - niov++; - skipped += nob; - nob_to_skip -= nob; - - } while (nob_to_skip && /* mustn't overflow conn's rx iov */ - niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec)); - - iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped); - return 0; -} - -static int -ksocknal_process_receive(struct ksock_conn *conn) -{ - struct kvec *kvec = conn->ksnc_rx_iov_space; - struct lnet_hdr *lhdr; - struct lnet_process_id *id; - int rc; - - LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); - - /* NB: sched lock NOT held */ - /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ - LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_SLOP); - again: - if (iov_iter_count(&conn->ksnc_rx_to)) { - rc = ksocknal_receive(conn); - - if (rc <= 0) { - LASSERT(rc != -EAGAIN); - - if (!rc) - CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n", - conn, - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - else if (!conn->ksnc_closing) - CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n", - conn, rc, - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - - /* it's not an error if conn is being closed */ - ksocknal_close_conn_and_siblings(conn, - (conn->ksnc_closing) ? 0 : rc); - return (!rc ? -ESHUTDOWN : rc); - } - - if (iov_iter_count(&conn->ksnc_rx_to)) { - /* short read */ - return -EAGAIN; - } - } - switch (conn->ksnc_rx_state) { - case SOCKNAL_RX_KSM_HEADER: - if (conn->ksnc_flip) { - __swab32s(&conn->ksnc_msg.ksm_type); - __swab32s(&conn->ksnc_msg.ksm_csum); - __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]); - __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]); - } - - if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP && - conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) { - CERROR("%s: Unknown message type: %x\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - conn->ksnc_msg.ksm_type); - ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings(conn, -EPROTO); - return -EPROTO; - } - - if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP && - conn->ksnc_msg.ksm_csum && /* has checksum */ - conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { - /* NOOP Checksum error */ - CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum); - ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings(conn, -EPROTO); - return -EIO; - } - - if (conn->ksnc_msg.ksm_zc_cookies[1]) { - __u64 cookie = 0; - - LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); - - if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) - cookie = conn->ksnc_msg.ksm_zc_cookies[0]; - - rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie, - conn->ksnc_msg.ksm_zc_cookies[1]); - - if (rc) { - CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - cookie, conn->ksnc_msg.ksm_zc_cookies[1]); - ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings(conn, -EPROTO); - return rc; - } - } - - if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) { - ksocknal_new_packet(conn, 0); - return 0; /* NOOP is done and just return */ - } - - conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; - conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg); - - kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - kvec->iov_len = sizeof(struct ksock_lnet_msg); - - iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, - 1, sizeof(struct ksock_lnet_msg)); - - goto again; /* read lnet header now */ - - case SOCKNAL_RX_LNET_HEADER: - /* unpack message header */ - conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); - - if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) { - /* Userspace peer */ - lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; - - /* Substitute process ID assigned at connection time */ - lhdr->src_pid = cpu_to_le32(id->pid); - lhdr->src_nid = cpu_to_le64(id->nid); - } - - conn->ksnc_rx_state = SOCKNAL_RX_PARSE; - ksocknal_conn_addref(conn); /* ++ref while parsing */ - - rc = lnet_parse(conn->ksnc_peer->ksnp_ni, - &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr, - conn->ksnc_peer->ksnp_id.nid, conn, 0); - if (rc < 0) { - /* I just received garbage: give up on this conn */ - ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings(conn, rc); - ksocknal_conn_decref(conn); - return -EPROTO; - } - - /* I'm racing with ksocknal_recv() */ - LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD); - - if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD) - return 0; - - /* ksocknal_recv() got called */ - goto again; - - case SOCKNAL_RX_LNET_PAYLOAD: - /* payload all received */ - rc = 0; - - if (!conn->ksnc_rx_nob_left && /* not truncating */ - conn->ksnc_msg.ksm_csum && /* has checksum */ - conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { - CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum); - rc = -EIO; - } - - if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) { - LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); - - lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; - - rc = conn->ksnc_proto->pro_handle_zcreq(conn, - conn->ksnc_msg.ksm_zc_cookies[0], - *ksocknal_tunables.ksnd_nonblk_zcack || - le64_to_cpu(lhdr->src_nid) != id->nid); - } - - lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); - - if (rc) { - ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings(conn, rc); - return -EPROTO; - } - /* Fall through */ - - case SOCKNAL_RX_SLOP: - /* starting new packet? */ - if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left)) - return 0; /* come back later */ - goto again; /* try to finish reading slop now */ - - default: - break; - } - - /* Not Reached */ - LBUG(); - return -EINVAL; /* keep gcc happy */ -} - -int -ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, - int delayed, struct iov_iter *to, unsigned int rlen) -{ - struct ksock_conn *conn = private; - struct ksock_sched *sched = conn->ksnc_scheduler; - - LASSERT(iov_iter_count(to) <= rlen); - LASSERT(to->nr_segs <= LNET_MAX_IOV); - - conn->ksnc_cookie = msg; - conn->ksnc_rx_nob_left = rlen; - - conn->ksnc_rx_to = *to; - - LASSERT(conn->ksnc_rx_scheduled); - - spin_lock_bh(&sched->kss_lock); - - switch (conn->ksnc_rx_state) { - case SOCKNAL_RX_PARSE_WAIT: - list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); - wake_up(&sched->kss_waitq); - LASSERT(conn->ksnc_rx_ready); - break; - - case SOCKNAL_RX_PARSE: - /* scheduler hasn't noticed I'm parsing yet */ - break; - } - - conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD; - - spin_unlock_bh(&sched->kss_lock); - ksocknal_conn_decref(conn); - return 0; -} - -static inline int -ksocknal_sched_cansleep(struct ksock_sched *sched) -{ - int rc; - - spin_lock_bh(&sched->kss_lock); - - rc = !ksocknal_data.ksnd_shuttingdown && - list_empty(&sched->kss_rx_conns) && - list_empty(&sched->kss_tx_conns); - - spin_unlock_bh(&sched->kss_lock); - return rc; -} - -int ksocknal_scheduler(void *arg) -{ - struct ksock_sched_info *info; - struct ksock_sched *sched; - struct ksock_conn *conn; - struct ksock_tx *tx; - int rc; - int nloops = 0; - long id = (long)arg; - - info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)]; - sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; - - rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt); - if (rc) { - CWARN("Can't set CPU partition affinity to %d: %d\n", - info->ksi_cpt, rc); - } - - spin_lock_bh(&sched->kss_lock); - - while (!ksocknal_data.ksnd_shuttingdown) { - int did_something = 0; - - /* Ensure I progress everything semi-fairly */ - - if (!list_empty(&sched->kss_rx_conns)) { - conn = list_entry(sched->kss_rx_conns.next, - struct ksock_conn, ksnc_rx_list); - list_del(&conn->ksnc_rx_list); - - LASSERT(conn->ksnc_rx_scheduled); - LASSERT(conn->ksnc_rx_ready); - - /* - * clear rx_ready in case receive isn't complete. - * Do it BEFORE we call process_recv, since - * data_ready can set it any time after we release - * kss_lock. - */ - conn->ksnc_rx_ready = 0; - spin_unlock_bh(&sched->kss_lock); - - rc = ksocknal_process_receive(conn); - - spin_lock_bh(&sched->kss_lock); - - /* I'm the only one that can clear this flag */ - LASSERT(conn->ksnc_rx_scheduled); - - /* Did process_receive get everything it wanted? */ - if (!rc) - conn->ksnc_rx_ready = 1; - - if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) { - /* - * Conn blocked waiting for ksocknal_recv() - * I change its state (under lock) to signal - * it can be rescheduled - */ - conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT; - } else if (conn->ksnc_rx_ready) { - /* reschedule for rx */ - list_add_tail(&conn->ksnc_rx_list, - &sched->kss_rx_conns); - } else { - conn->ksnc_rx_scheduled = 0; - /* drop my ref */ - ksocknal_conn_decref(conn); - } - - did_something = 1; - } - - if (!list_empty(&sched->kss_tx_conns)) { - LIST_HEAD(zlist); - - if (!list_empty(&sched->kss_zombie_noop_txs)) { - list_add(&zlist, &sched->kss_zombie_noop_txs); - list_del_init(&sched->kss_zombie_noop_txs); - } - - conn = list_entry(sched->kss_tx_conns.next, - struct ksock_conn, ksnc_tx_list); - list_del(&conn->ksnc_tx_list); - - LASSERT(conn->ksnc_tx_scheduled); - LASSERT(conn->ksnc_tx_ready); - LASSERT(!list_empty(&conn->ksnc_tx_queue)); - - tx = list_entry(conn->ksnc_tx_queue.next, - struct ksock_tx, tx_list); - - if (conn->ksnc_tx_carrier == tx) - ksocknal_next_tx_carrier(conn); - - /* dequeue now so empty list => more to send */ - list_del(&tx->tx_list); - - /* - * Clear tx_ready in case send isn't complete. Do - * it BEFORE we call process_transmit, since - * write_space can set it any time after we release - * kss_lock. - */ - conn->ksnc_tx_ready = 0; - spin_unlock_bh(&sched->kss_lock); - - if (!list_empty(&zlist)) { - /* - * free zombie noop txs, it's fast because - * noop txs are just put in freelist - */ - ksocknal_txlist_done(NULL, &zlist, 0); - } - - rc = ksocknal_process_transmit(conn, tx); - - if (rc == -ENOMEM || rc == -EAGAIN) { - /* - * Incomplete send: replace tx on HEAD of - * tx_queue - */ - spin_lock_bh(&sched->kss_lock); - list_add(&tx->tx_list, &conn->ksnc_tx_queue); - } else { - /* Complete send; tx -ref */ - ksocknal_tx_decref(tx); - - spin_lock_bh(&sched->kss_lock); - /* assume space for more */ - conn->ksnc_tx_ready = 1; - } - - if (rc == -ENOMEM) { - /* - * Do nothing; after a short timeout, this - * conn will be reposted on kss_tx_conns. - */ - } else if (conn->ksnc_tx_ready && - !list_empty(&conn->ksnc_tx_queue)) { - /* reschedule for tx */ - list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); - } else { - conn->ksnc_tx_scheduled = 0; - /* drop my ref */ - ksocknal_conn_decref(conn); - } - - did_something = 1; - } - if (!did_something || /* nothing to do */ - ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */ - spin_unlock_bh(&sched->kss_lock); - - nloops = 0; - - if (!did_something) { /* wait for something to do */ - rc = wait_event_interruptible_exclusive( - sched->kss_waitq, - !ksocknal_sched_cansleep(sched)); - LASSERT(!rc); - } else { - cond_resched(); - } - - spin_lock_bh(&sched->kss_lock); - } - } - - spin_unlock_bh(&sched->kss_lock); - ksocknal_thread_fini(); - return 0; -} - -/* - * Add connection to kss_rx_conns of scheduler - * and wakeup the scheduler. - */ -void ksocknal_read_callback(struct ksock_conn *conn) -{ - struct ksock_sched *sched; - - sched = conn->ksnc_scheduler; - - spin_lock_bh(&sched->kss_lock); - - conn->ksnc_rx_ready = 1; - - if (!conn->ksnc_rx_scheduled) { /* not being progressed */ - list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); - conn->ksnc_rx_scheduled = 1; - /* extra ref for scheduler */ - ksocknal_conn_addref(conn); - - wake_up(&sched->kss_waitq); - } - spin_unlock_bh(&sched->kss_lock); -} - -/* - * Add connection to kss_tx_conns of scheduler - * and wakeup the scheduler. - */ -void ksocknal_write_callback(struct ksock_conn *conn) -{ - struct ksock_sched *sched; - - sched = conn->ksnc_scheduler; - - spin_lock_bh(&sched->kss_lock); - - conn->ksnc_tx_ready = 1; - - if (!conn->ksnc_tx_scheduled && /* not being progressed */ - !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */ - list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - /* extra ref for scheduler */ - ksocknal_conn_addref(conn); - - wake_up(&sched->kss_waitq); - } - - spin_unlock_bh(&sched->kss_lock); -} - -static struct ksock_proto * -ksocknal_parse_proto_version(struct ksock_hello_msg *hello) -{ - __u32 version = 0; - - if (hello->kshm_magic == LNET_PROTO_MAGIC) - version = hello->kshm_version; - else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC)) - version = __swab32(hello->kshm_version); - - if (version) { -#if SOCKNAL_VERSION_DEBUG - if (*ksocknal_tunables.ksnd_protocol == 1) - return NULL; - - if (*ksocknal_tunables.ksnd_protocol == 2 && - version == KSOCK_PROTO_V3) - return NULL; -#endif - if (version == KSOCK_PROTO_V2) - return &ksocknal_protocol_v2x; - - if (version == KSOCK_PROTO_V3) - return &ksocknal_protocol_v3x; - - return NULL; - } - - if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { - struct lnet_magicversion *hmv = (struct lnet_magicversion *)hello; - - BUILD_BUG_ON(sizeof(struct lnet_magicversion) != - offsetof(struct ksock_hello_msg, kshm_src_nid)); - - if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) && - hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR)) - return &ksocknal_protocol_v1x; - } - - return NULL; -} - -int -ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn, - lnet_nid_t peer_nid, struct ksock_hello_msg *hello) -{ - /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ - struct ksock_net *net = (struct ksock_net *)ni->ni_data; - - LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); - - /* rely on caller to hold a ref on socket so it wouldn't disappear */ - LASSERT(conn->ksnc_proto); - - hello->kshm_src_nid = ni->ni_nid; - hello->kshm_dst_nid = peer_nid; - hello->kshm_src_pid = the_lnet.ln_pid; - - hello->kshm_src_incarnation = net->ksnn_incarnation; - hello->kshm_ctype = conn->ksnc_type; - - return conn->ksnc_proto->pro_send_hello(conn, hello); -} - -static int -ksocknal_invert_type(int type) -{ - switch (type) { - case SOCKLND_CONN_ANY: - case SOCKLND_CONN_CONTROL: - return type; - case SOCKLND_CONN_BULK_IN: - return SOCKLND_CONN_BULK_OUT; - case SOCKLND_CONN_BULK_OUT: - return SOCKLND_CONN_BULK_IN; - default: - return SOCKLND_CONN_NONE; - } -} - -int -ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn, - struct ksock_hello_msg *hello, - struct lnet_process_id *peerid, - __u64 *incarnation) -{ - /* Return < 0 fatal error - * 0 success - * EALREADY lost connection race - * EPROTO protocol version mismatch - */ - struct socket *sock = conn->ksnc_sock; - int active = !!conn->ksnc_proto; - int timeout; - int proto_match; - int rc; - struct ksock_proto *proto; - struct lnet_process_id recv_id; - - /* socket type set on active connections - not set on passive */ - LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); - - timeout = active ? *ksocknal_tunables.ksnd_timeout : - lnet_acceptor_timeout(); - - rc = lnet_sock_read(sock, &hello->kshm_magic, - sizeof(hello->kshm_magic), timeout); - if (rc) { - CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0); - return rc; - } - - if (hello->kshm_magic != LNET_PROTO_MAGIC && - hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) && - hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { - /* Unexpected magic! */ - CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n", - __cpu_to_le32(hello->kshm_magic), - LNET_PROTO_TCP_MAGIC, - &conn->ksnc_ipaddr); - return -EPROTO; - } - - rc = lnet_sock_read(sock, &hello->kshm_version, - sizeof(hello->kshm_version), timeout); - if (rc) { - CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0); - return rc; - } - - proto = ksocknal_parse_proto_version(hello); - if (!proto) { - if (!active) { - /* unknown protocol from peer, tell peer my protocol */ - conn->ksnc_proto = &ksocknal_protocol_v3x; -#if SOCKNAL_VERSION_DEBUG - if (*ksocknal_tunables.ksnd_protocol == 2) - conn->ksnc_proto = &ksocknal_protocol_v2x; - else if (*ksocknal_tunables.ksnd_protocol == 1) - conn->ksnc_proto = &ksocknal_protocol_v1x; -#endif - hello->kshm_nips = 0; - ksocknal_send_hello(ni, conn, ni->ni_nid, hello); - } - - CERROR("Unknown protocol version (%d.x expected) from %pI4h\n", - conn->ksnc_proto->pro_version, - &conn->ksnc_ipaddr); - - return -EPROTO; - } - - proto_match = (conn->ksnc_proto == proto); - conn->ksnc_proto = proto; - - /* receive the rest of hello message anyway */ - rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout); - if (rc) { - CERROR("Error %d reading or checking hello from from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0); - return rc; - } - - *incarnation = hello->kshm_src_incarnation; - - if (hello->kshm_src_nid == LNET_NID_ANY) { - CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n", - &conn->ksnc_ipaddr); - return -EPROTO; - } - - if (!active && - conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { - /* Userspace NAL assigns peer process ID from socket */ - recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; - recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), - conn->ksnc_ipaddr); - } else { - recv_id.nid = hello->kshm_src_nid; - recv_id.pid = hello->kshm_src_pid; - } - - if (!active) { - *peerid = recv_id; - - /* peer determines type */ - conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); - if (conn->ksnc_type == SOCKLND_CONN_NONE) { - CERROR("Unexpected type %d from %s ip %pI4h\n", - hello->kshm_ctype, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr); - return -EPROTO; - } - - return 0; - } - - if (peerid->pid != recv_id.pid || - peerid->nid != recv_id.nid) { - LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n", - libcfs_id2str(*peerid), - &conn->ksnc_ipaddr, - libcfs_id2str(recv_id)); - return -EPROTO; - } - - if (hello->kshm_ctype == SOCKLND_CONN_NONE) { - /* Possible protocol mismatch or I lost the connection race */ - return proto_match ? EALREADY : EPROTO; - } - - if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { - CERROR("Mismatched types: me %d, %s ip %pI4h %d\n", - conn->ksnc_type, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr, hello->kshm_ctype); - return -EPROTO; - } - - return 0; -} - -static int -ksocknal_connect(struct ksock_route *route) -{ - LIST_HEAD(zombies); - struct ksock_peer *peer = route->ksnr_peer; - int type; - int wanted; - struct socket *sock; - unsigned long deadline; - int retry_later = 0; - int rc = 0; - - deadline = jiffies + *ksocknal_tunables.ksnd_timeout * HZ; - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - LASSERT(route->ksnr_scheduled); - LASSERT(!route->ksnr_connecting); - - route->ksnr_connecting = 1; - - for (;;) { - wanted = ksocknal_route_mask() & ~route->ksnr_connected; - - /* - * stop connecting if peer/route got closed under me, or - * route got connected while queued - */ - if (peer->ksnp_closing || route->ksnr_deleted || - !wanted) { - retry_later = 0; - break; - } - - /* reschedule if peer is connecting to me */ - if (peer->ksnp_accepting > 0) { - CDEBUG(D_NET, - "peer %s(%d) already connecting to me, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid), - peer->ksnp_accepting); - retry_later = 1; - } - - if (retry_later) /* needs reschedule */ - break; - - if (wanted & BIT(SOCKLND_CONN_ANY)) { - type = SOCKLND_CONN_ANY; - } else if (wanted & BIT(SOCKLND_CONN_CONTROL)) { - type = SOCKLND_CONN_CONTROL; - } else if (wanted & BIT(SOCKLND_CONN_BULK_IN)) { - type = SOCKLND_CONN_BULK_IN; - } else { - LASSERT(wanted & BIT(SOCKLND_CONN_BULK_OUT)); - type = SOCKLND_CONN_BULK_OUT; - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - if (time_after_eq(jiffies, deadline)) { - rc = -ETIMEDOUT; - lnet_connect_console_error(rc, peer->ksnp_id.nid, - route->ksnr_ipaddr, - route->ksnr_port); - goto failed; - } - - rc = lnet_connect(&sock, peer->ksnp_id.nid, - route->ksnr_myipaddr, - route->ksnr_ipaddr, route->ksnr_port); - if (rc) - goto failed; - - rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); - if (rc < 0) { - lnet_connect_console_error(rc, peer->ksnp_id.nid, - route->ksnr_ipaddr, - route->ksnr_port); - goto failed; - } - - /* - * A +ve RC means I have to retry because I lost the connection - * race or I have to renegotiate protocol version - */ - retry_later = (rc); - if (retry_later) - CDEBUG(D_NET, "peer %s: conn race, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid)); - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - } - - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; - - if (retry_later) { - /* - * re-queue for attention; this frees me up to handle - * the peer's incoming connection request - */ - if (rc == EALREADY || - (!rc && peer->ksnp_accepting > 0)) { - /* - * We want to introduce a delay before next - * attempt to connect if we lost conn race, - * but the race is resolved quickly usually, - * so min_reconnectms should be good heuristic - */ - route->ksnr_retry_interval = - *ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000; - route->ksnr_timeout = jiffies + route->ksnr_retry_interval; - } - - ksocknal_launch_connection_locked(route); - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - return retry_later; - - failed: - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - route->ksnr_scheduled = 0; - route->ksnr_connecting = 0; - - /* This is a retry rather than a new connection */ - route->ksnr_retry_interval *= 2; - route->ksnr_retry_interval = - max(route->ksnr_retry_interval, - (long)*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000); - route->ksnr_retry_interval = - min(route->ksnr_retry_interval, - (long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000); - - LASSERT(route->ksnr_retry_interval); - route->ksnr_timeout = jiffies + route->ksnr_retry_interval; - - if (!list_empty(&peer->ksnp_tx_queue) && - !peer->ksnp_accepting && - !ksocknal_find_connecting_route_locked(peer)) { - struct ksock_conn *conn; - - /* - * ksnp_tx_queue is queued on a conn on successful - * connection for V1.x and V2.x - */ - if (!list_empty(&peer->ksnp_conns)) { - conn = list_entry(peer->ksnp_conns.next, - struct ksock_conn, ksnc_list); - LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); - } - - /* - * take all the blocked packets while I've got the lock and - * complete below... - */ - list_splice_init(&peer->ksnp_tx_queue, &zombies); - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - ksocknal_peer_failed(peer); - ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1); - return 0; -} - -/* - * check whether we need to create more connds. - * It will try to create new thread if it's necessary, @timeout can - * be updated if failed to create, so caller wouldn't keep try while - * running out of resource. - */ -static int -ksocknal_connd_check_start(time64_t sec, long *timeout) -{ - char name[16]; - int rc; - int total = ksocknal_data.ksnd_connd_starting + - ksocknal_data.ksnd_connd_running; - - if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) { - /* still in initializing */ - return 0; - } - - if (total >= *ksocknal_tunables.ksnd_nconnds_max || - total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) { - /* - * can't create more connd, or still have enough - * threads to handle more connecting - */ - return 0; - } - - if (list_empty(&ksocknal_data.ksnd_connd_routes)) { - /* no pending connecting request */ - return 0; - } - - if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) { - /* may run out of resource, retry later */ - *timeout = HZ; - return 0; - } - - if (ksocknal_data.ksnd_connd_starting > 0) { - /* serialize starting to avoid flood */ - return 0; - } - - ksocknal_data.ksnd_connd_starting_stamp = sec; - ksocknal_data.ksnd_connd_starting++; - spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); - - /* NB: total is the next id */ - snprintf(name, sizeof(name), "socknal_cd%02d", total); - rc = ksocknal_thread_start(ksocknal_connd, NULL, name); - - spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - if (!rc) - return 1; - - /* we tried ... */ - LASSERT(ksocknal_data.ksnd_connd_starting > 0); - ksocknal_data.ksnd_connd_starting--; - ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds(); - - return 1; -} - -/* - * check whether current thread can exit, it will return 1 if there are too - * many threads and no creating in past 120 seconds. - * Also, this function may update @timeout to make caller come back - * again to recheck these conditions. - */ -static int -ksocknal_connd_check_stop(time64_t sec, long *timeout) -{ - int val; - - if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) { - /* still in initializing */ - return 0; - } - - if (ksocknal_data.ksnd_connd_starting > 0) { - /* in progress of starting new thread */ - return 0; - } - - if (ksocknal_data.ksnd_connd_running <= - *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */ - return 0; - } - - /* created thread in past 120 seconds? */ - val = (int)(ksocknal_data.ksnd_connd_starting_stamp + - SOCKNAL_CONND_TIMEOUT - sec); - - *timeout = (val > 0) ? val * HZ : - SOCKNAL_CONND_TIMEOUT * HZ; - if (val > 0) - return 0; - - /* no creating in past 120 seconds */ - - return ksocknal_data.ksnd_connd_running > - ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV; -} - -/* - * Go through connd_routes queue looking for a route that we can process - * right now, @timeout_p can be updated if we need to come back later - */ -static struct ksock_route * -ksocknal_connd_get_route_locked(signed long *timeout_p) -{ - struct ksock_route *route; - unsigned long now; - - now = jiffies; - - /* connd_routes can contain both pending and ordinary routes */ - list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, - ksnr_connd_list) { - if (!route->ksnr_retry_interval || - time_after_eq(now, route->ksnr_timeout)) - return route; - - if (*timeout_p == MAX_SCHEDULE_TIMEOUT || - (int)*timeout_p > (int)(route->ksnr_timeout - now)) - *timeout_p = (int)(route->ksnr_timeout - now); - } - - return NULL; -} - -int -ksocknal_connd(void *arg) -{ - spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; - struct ksock_connreq *cr; - wait_queue_entry_t wait; - int nloops = 0; - int cons_retry = 0; - - init_waitqueue_entry(&wait, current); - - spin_lock_bh(connd_lock); - - LASSERT(ksocknal_data.ksnd_connd_starting > 0); - ksocknal_data.ksnd_connd_starting--; - ksocknal_data.ksnd_connd_running++; - - while (!ksocknal_data.ksnd_shuttingdown) { - struct ksock_route *route = NULL; - time64_t sec = ktime_get_real_seconds(); - long timeout = MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; - - if (ksocknal_connd_check_stop(sec, &timeout)) { - /* wakeup another one to check stop */ - wake_up(&ksocknal_data.ksnd_connd_waitq); - break; - } - - if (ksocknal_connd_check_start(sec, &timeout)) { - /* created new thread */ - dropped_lock = 1; - } - - if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { - /* Connection accepted by the listener */ - cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next, - struct ksock_connreq, ksncr_list); - - list_del(&cr->ksncr_list); - spin_unlock_bh(connd_lock); - dropped_lock = 1; - - ksocknal_create_conn(cr->ksncr_ni, NULL, - cr->ksncr_sock, SOCKLND_CONN_NONE); - lnet_ni_decref(cr->ksncr_ni); - kfree(cr); - - spin_lock_bh(connd_lock); - } - - /* - * Only handle an outgoing connection request if there - * is a thread left to handle incoming connections and - * create new connd - */ - if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < - ksocknal_data.ksnd_connd_running) { - route = ksocknal_connd_get_route_locked(&timeout); - } - if (route) { - list_del(&route->ksnr_connd_list); - ksocknal_data.ksnd_connd_connecting++; - spin_unlock_bh(connd_lock); - dropped_lock = 1; - - if (ksocknal_connect(route)) { - /* consecutive retry */ - if (cons_retry++ > SOCKNAL_INSANITY_RECONN) { - CWARN("massive consecutive re-connecting to %pI4h\n", - &route->ksnr_ipaddr); - cons_retry = 0; - } - } else { - cons_retry = 0; - } - - ksocknal_route_decref(route); - - spin_lock_bh(connd_lock); - ksocknal_data.ksnd_connd_connecting--; - } - - if (dropped_lock) { - if (++nloops < SOCKNAL_RESCHED) - continue; - spin_unlock_bh(connd_lock); - nloops = 0; - cond_resched(); - spin_lock_bh(connd_lock); - continue; - } - - /* Nothing to do for 'timeout' */ - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, - &wait); - spin_unlock_bh(connd_lock); - - nloops = 0; - schedule_timeout(timeout); - - remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait); - spin_lock_bh(connd_lock); - } - ksocknal_data.ksnd_connd_running--; - spin_unlock_bh(connd_lock); - - ksocknal_thread_fini(); - return 0; -} - -static struct ksock_conn * -ksocknal_find_timed_out_conn(struct ksock_peer *peer) -{ - /* We're called with a shared lock on ksnd_global_lock */ - struct ksock_conn *conn; - struct list_head *ctmp; - - list_for_each(ctmp, &peer->ksnp_conns) { - int error; - - conn = list_entry(ctmp, struct ksock_conn, ksnc_list); - - /* Don't need the {get,put}connsock dance to deref ksnc_sock */ - LASSERT(!conn->ksnc_closing); - - /* - * SOCK_ERROR will reset error code of socket in - * some platform (like Darwin8.x) - */ - error = conn->ksnc_sock->sk->sk_err; - if (error) { - ksocknal_conn_addref(conn); - - switch (error) { - case ECONNRESET: - CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n", - libcfs_id2str(peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - break; - case ETIMEDOUT: - CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n", - libcfs_id2str(peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - break; - default: - CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n", - error, - libcfs_id2str(peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - break; - } - - return conn; - } - - if (conn->ksnc_rx_started && - time_after_eq(jiffies, - conn->ksnc_rx_deadline)) { - /* Timed out incomplete incoming message */ - ksocknal_conn_addref(conn); - CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n", - libcfs_id2str(peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port, - conn->ksnc_rx_state, - iov_iter_count(&conn->ksnc_rx_to), - conn->ksnc_rx_nob_left); - return conn; - } - - if ((!list_empty(&conn->ksnc_tx_queue) || - conn->ksnc_sock->sk->sk_wmem_queued) && - time_after_eq(jiffies, - conn->ksnc_tx_deadline)) { - /* - * Timed out messages queued for sending or - * buffered in the socket's send buffer - */ - ksocknal_conn_addref(conn); - CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n", - libcfs_id2str(peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); - return conn; - } - } - - return NULL; -} - -static inline void -ksocknal_flush_stale_txs(struct ksock_peer *peer) -{ - struct ksock_tx *tx; - struct ksock_tx *tmp; - LIST_HEAD(stale_txs); - - write_lock_bh(&ksocknal_data.ksnd_global_lock); - - list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) { - if (!time_after_eq(jiffies, - tx->tx_deadline)) - break; - - list_del(&tx->tx_list); - list_add_tail(&tx->tx_list, &stale_txs); - } - - write_unlock_bh(&ksocknal_data.ksnd_global_lock); - - ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1); -} - -static int -ksocknal_send_keepalive_locked(struct ksock_peer *peer) - __must_hold(&ksocknal_data.ksnd_global_lock) -{ - struct ksock_sched *sched; - struct ksock_conn *conn; - struct ksock_tx *tx; - - /* last_alive will be updated by create_conn */ - if (list_empty(&peer->ksnp_conns)) - return 0; - - if (peer->ksnp_proto != &ksocknal_protocol_v3x) - return 0; - - if (*ksocknal_tunables.ksnd_keepalive <= 0 || - time_before(jiffies, - peer->ksnp_last_alive + *ksocknal_tunables.ksnd_keepalive * HZ)) - return 0; - - if (time_before(jiffies, peer->ksnp_send_keepalive)) - return 0; - - /* - * retry 10 secs later, so we wouldn't put pressure - * on this peer if we failed to send keepalive this time - */ - peer->ksnp_send_keepalive = jiffies + 10 * HZ; - - conn = ksocknal_find_conn_locked(peer, NULL, 1); - if (conn) { - sched = conn->ksnc_scheduler; - - spin_lock_bh(&sched->kss_lock); - if (!list_empty(&conn->ksnc_tx_queue)) { - spin_unlock_bh(&sched->kss_lock); - /* there is an queued ACK, don't need keepalive */ - return 0; - } - - spin_unlock_bh(&sched->kss_lock); - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - - /* cookie = 1 is reserved for keepalive PING */ - tx = ksocknal_alloc_tx_noop(1, 1); - if (!tx) { - read_lock(&ksocknal_data.ksnd_global_lock); - return -ENOMEM; - } - - if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) { - read_lock(&ksocknal_data.ksnd_global_lock); - return 1; - } - - ksocknal_free_tx(tx); - read_lock(&ksocknal_data.ksnd_global_lock); - - return -EIO; -} - -static void -ksocknal_check_peer_timeouts(int idx) -{ - struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - struct ksock_peer *peer; - struct ksock_conn *conn; - struct ksock_tx *tx; - - again: - /* - * NB. We expect to have a look at all the peers and not find any - * connections to time out, so we just use a shared lock while we - * take a look... - */ - read_lock(&ksocknal_data.ksnd_global_lock); - - list_for_each_entry(peer, peers, ksnp_list) { - unsigned long deadline = 0; - struct ksock_tx *tx_stale; - int resid = 0; - int n = 0; - - if (ksocknal_send_keepalive_locked(peer)) { - read_unlock(&ksocknal_data.ksnd_global_lock); - goto again; - } - - conn = ksocknal_find_timed_out_conn(peer); - - if (conn) { - read_unlock(&ksocknal_data.ksnd_global_lock); - - ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); - - /* - * NB we won't find this one again, but we can't - * just proceed with the next peer, since we dropped - * ksnd_global_lock and it might be dead already! - */ - ksocknal_conn_decref(conn); - goto again; - } - - /* - * we can't process stale txs right here because we're - * holding only shared lock - */ - if (!list_empty(&peer->ksnp_tx_queue)) { - tx = list_entry(peer->ksnp_tx_queue.next, - struct ksock_tx, tx_list); - - if (time_after_eq(jiffies, - tx->tx_deadline)) { - ksocknal_peer_addref(peer); - read_unlock(&ksocknal_data.ksnd_global_lock); - - ksocknal_flush_stale_txs(peer); - - ksocknal_peer_decref(peer); - goto again; - } - } - - if (list_empty(&peer->ksnp_zc_req_list)) - continue; - - tx_stale = NULL; - spin_lock(&peer->ksnp_lock); - list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { - if (!time_after_eq(jiffies, - tx->tx_deadline)) - break; - /* ignore the TX if connection is being closed */ - if (tx->tx_conn->ksnc_closing) - continue; - if (!tx_stale) - tx_stale = tx; - n++; - } - - if (!tx_stale) { - spin_unlock(&peer->ksnp_lock); - continue; - } - - deadline = tx_stale->tx_deadline; - resid = tx_stale->tx_resid; - conn = tx_stale->tx_conn; - ksocknal_conn_addref(conn); - - spin_unlock(&peer->ksnp_lock); - read_unlock(&ksocknal_data.ksnd_global_lock); - - CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n", - n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale, - (jiffies - deadline) / HZ, - resid, conn->ksnc_sock->sk->sk_wmem_queued); - - ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); - ksocknal_conn_decref(conn); - goto again; - } - - read_unlock(&ksocknal_data.ksnd_global_lock); -} - -int -ksocknal_reaper(void *arg) -{ - wait_queue_entry_t wait; - struct ksock_conn *conn; - struct ksock_sched *sched; - struct list_head enomem_conns; - int nenomem_conns; - long timeout; - int i; - int peer_index = 0; - unsigned long deadline = jiffies; - - INIT_LIST_HEAD(&enomem_conns); - init_waitqueue_entry(&wait, current); - - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - - while (!ksocknal_data.ksnd_shuttingdown) { - if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { - conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next, - struct ksock_conn, ksnc_list); - list_del(&conn->ksnc_list); - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); - - ksocknal_terminate_conn(conn); - ksocknal_conn_decref(conn); - - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - continue; - } - - if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { - conn = list_entry(ksocknal_data.ksnd_zombie_conns.next, - struct ksock_conn, ksnc_list); - list_del(&conn->ksnc_list); - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); - - ksocknal_destroy_conn(conn); - - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - continue; - } - - if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) { - list_add(&enomem_conns, - &ksocknal_data.ksnd_enomem_conns); - list_del_init(&ksocknal_data.ksnd_enomem_conns); - } - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); - - /* reschedule all the connections that stalled with ENOMEM... */ - nenomem_conns = 0; - while (!list_empty(&enomem_conns)) { - conn = list_entry(enomem_conns.next, struct ksock_conn, - ksnc_tx_list); - list_del(&conn->ksnc_tx_list); - - sched = conn->ksnc_scheduler; - - spin_lock_bh(&sched->kss_lock); - - LASSERT(conn->ksnc_tx_scheduled); - conn->ksnc_tx_ready = 1; - list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); - wake_up(&sched->kss_waitq); - - spin_unlock_bh(&sched->kss_lock); - nenomem_conns++; - } - - /* careful with the jiffy wrap... */ - while ((timeout = deadline - jiffies) <= 0) { - const int n = 4; - const int p = 1; - int chunk = ksocknal_data.ksnd_peer_hash_size; - - /* - * Time to check for timeouts on a few more peers: I do - * checks every 'p' seconds on a proportion of the peer - * table and I need to check every connection 'n' times - * within a timeout interval, to ensure I detect a - * timeout on any connection within (n+1)/n times the - * timeout interval. - */ - if (*ksocknal_tunables.ksnd_timeout > n * p) - chunk = (chunk * n * p) / - *ksocknal_tunables.ksnd_timeout; - if (!chunk) - chunk = 1; - - for (i = 0; i < chunk; i++) { - ksocknal_check_peer_timeouts(peer_index); - peer_index = (peer_index + 1) % - ksocknal_data.ksnd_peer_hash_size; - } - - deadline = deadline + p * HZ; - } - - if (nenomem_conns) { - /* - * Reduce my timeout if I rescheduled ENOMEM conns. - * This also prevents me getting woken immediately - * if any go back on my enomem list. - */ - timeout = SOCKNAL_ENOMEM_RETRY; - } - ksocknal_data.ksnd_reaper_waketime = jiffies + timeout; - - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); - - if (!ksocknal_data.ksnd_shuttingdown && - list_empty(&ksocknal_data.ksnd_deathrow_conns) && - list_empty(&ksocknal_data.ksnd_zombie_conns)) - schedule_timeout(timeout); - - set_current_state(TASK_RUNNING); - remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); - - spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - } - - spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); - - ksocknal_thread_fini(); - return 0; -} diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c deleted file mode 100644 index 93a02cd6b6b5..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ /dev/null @@ -1,534 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include "socklnd.h" - -int -ksocknal_lib_get_conn_addrs(struct ksock_conn *conn) -{ - int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr, - &conn->ksnc_port); - - /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ - LASSERT(!conn->ksnc_closing); - - if (rc) { - CERROR("Error %d getting sock peer IP\n", rc); - return rc; - } - - rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL); - if (rc) { - CERROR("Error %d getting sock local IP\n", rc); - return rc; - } - - return 0; -} - -int -ksocknal_lib_zc_capable(struct ksock_conn *conn) -{ - int caps = conn->ksnc_sock->sk->sk_route_caps; - - if (conn->ksnc_proto == &ksocknal_protocol_v1x) - return 0; - - /* - * ZC if the socket supports scatter/gather and doesn't need software - * checksums - */ - return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK)); -} - -int -ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx) -{ - struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; - struct socket *sock = conn->ksnc_sock; - int nob, i; - - if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ - conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ - tx->tx_nob == tx->tx_resid && /* frist sending */ - !tx->tx_msg.ksm_csum) /* not checksummed */ - ksocknal_lib_csum_tx(tx); - - for (nob = i = 0; i < tx->tx_niov; i++) - nob += tx->tx_iov[i].iov_len; - - if (!list_empty(&conn->ksnc_tx_queue) || - nob < tx->tx_resid) - msg.msg_flags |= MSG_MORE; - - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, - tx->tx_iov, tx->tx_niov, nob); - return sock_sendmsg(sock, &msg); -} - -int -ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) -{ - struct socket *sock = conn->ksnc_sock; - struct bio_vec *kiov = tx->tx_kiov; - int rc; - int nob; - - /* Not NOOP message */ - LASSERT(tx->tx_lnetmsg); - - if (tx->tx_msg.ksm_zc_cookies[0]) { - /* Zero copy is enabled */ - struct sock *sk = sock->sk; - struct page *page = kiov->bv_page; - int offset = kiov->bv_offset; - int fragsize = kiov->bv_len; - int msgflg = MSG_DONTWAIT; - - CDEBUG(D_NET, "page %p + offset %x for %d\n", - page, offset, kiov->bv_len); - - if (!list_empty(&conn->ksnc_tx_queue) || - fragsize < tx->tx_resid) - msgflg |= MSG_MORE; - - if (sk->sk_prot->sendpage) { - rc = sk->sk_prot->sendpage(sk, page, - offset, fragsize, msgflg); - } else { - rc = tcp_sendpage(sk, page, offset, fragsize, msgflg); - } - } else { - struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; - int i; - - for (nob = i = 0; i < tx->tx_nkiov; i++) - nob += kiov[i].bv_len; - - if (!list_empty(&conn->ksnc_tx_queue) || - nob < tx->tx_resid) - msg.msg_flags |= MSG_MORE; - - iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, - kiov, tx->tx_nkiov, nob); - rc = sock_sendmsg(sock, &msg); - } - return rc; -} - -void -ksocknal_lib_eager_ack(struct ksock_conn *conn) -{ - int opt = 1; - struct socket *sock = conn->ksnc_sock; - - /* - * Remind the socket to ACK eagerly. If I don't, the socket might - * think I'm about to send something it could piggy-back the ACK - * on, introducing delay in completing zero-copy sends in my - * peer. - */ - kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt, - sizeof(opt)); -} - -static int lustre_csum(struct kvec *v, void *context) -{ - struct ksock_conn *conn = context; - conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum, - v->iov_base, v->iov_len); - return 0; -} - -int -ksocknal_lib_recv(struct ksock_conn *conn) -{ - struct msghdr msg = { .msg_iter = conn->ksnc_rx_to }; - __u32 saved_csum; - int rc; - - rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT); - if (rc <= 0) - return rc; - - saved_csum = conn->ksnc_msg.ksm_csum; - if (!saved_csum) - return rc; - - /* header is included only in V2 - V3 checksums only the bulk data */ - if (!(conn->ksnc_rx_to.type & ITER_BVEC) && - conn->ksnc_proto != &ksocknal_protocol_v2x) - return rc; - - /* accumulate checksum */ - conn->ksnc_msg.ksm_csum = 0; - iov_iter_for_each_range(&conn->ksnc_rx_to, rc, lustre_csum, conn); - conn->ksnc_msg.ksm_csum = saved_csum; - - return rc; -} - -void -ksocknal_lib_csum_tx(struct ksock_tx *tx) -{ - int i; - __u32 csum; - void *base; - - LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); - LASSERT(tx->tx_conn); - LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); - - tx->tx_msg.ksm_csum = 0; - - csum = crc32_le(~0, tx->tx_iov[0].iov_base, - tx->tx_iov[0].iov_len); - - if (tx->tx_kiov) { - for (i = 0; i < tx->tx_nkiov; i++) { - base = kmap(tx->tx_kiov[i].bv_page) + - tx->tx_kiov[i].bv_offset; - - csum = crc32_le(csum, base, tx->tx_kiov[i].bv_len); - - kunmap(tx->tx_kiov[i].bv_page); - } - } else { - for (i = 1; i < tx->tx_niov; i++) - csum = crc32_le(csum, tx->tx_iov[i].iov_base, - tx->tx_iov[i].iov_len); - } - - if (*ksocknal_tunables.ksnd_inject_csum_error) { - csum++; - *ksocknal_tunables.ksnd_inject_csum_error = 0; - } - - tx->tx_msg.ksm_csum = csum; -} - -int -ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, - int *rxmem, int *nagle) -{ - struct socket *sock = conn->ksnc_sock; - int len; - int rc; - - rc = ksocknal_connsock_addref(conn); - if (rc) { - LASSERT(conn->ksnc_closing); - *txmem = *rxmem = *nagle = 0; - return -ESHUTDOWN; - } - - rc = lnet_sock_getbuf(sock, txmem, rxmem); - if (!rc) { - len = sizeof(*nagle); - rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)nagle, &len); - } - - ksocknal_connsock_decref(conn); - - if (!rc) - *nagle = !*nagle; - else - *txmem = *rxmem = *nagle = 0; - - return rc; -} - -int -ksocknal_lib_setup_sock(struct socket *sock) -{ - int rc; - int option; - int keep_idle; - int keep_intvl; - int keep_count; - int do_keepalive; - struct linger linger; - - sock->sk->sk_allocation = GFP_NOFS; - - /* - * Ensure this socket aborts active sends immediately when we close - * it. - */ - linger.l_onoff = 0; - linger.l_linger = 0; - - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger, - sizeof(linger)); - if (rc) { - CERROR("Can't set SO_LINGER: %d\n", rc); - return rc; - } - - option = -1; - rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option, - sizeof(option)); - if (rc) { - CERROR("Can't set SO_LINGER2: %d\n", rc); - return rc; - } - - if (!*ksocknal_tunables.ksnd_nagle) { - option = 1; - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)&option, sizeof(option)); - if (rc) { - CERROR("Can't disable nagle: %d\n", rc); - return rc; - } - } - - rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size); - if (rc) { - CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", - *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size, rc); - return rc; - } - -/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */ - - /* snapshot tunables */ - keep_idle = *ksocknal_tunables.ksnd_keepalive_idle; - keep_count = *ksocknal_tunables.ksnd_keepalive_count; - keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl; - - do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); - - option = (do_keepalive ? 1 : 0); - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option, - sizeof(option)); - if (rc) { - CERROR("Can't set SO_KEEPALIVE: %d\n", rc); - return rc; - } - - if (!do_keepalive) - return 0; - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle, - sizeof(keep_idle)); - if (rc) { - CERROR("Can't set TCP_KEEPIDLE: %d\n", rc); - return rc; - } - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keep_intvl, sizeof(keep_intvl)); - if (rc) { - CERROR("Can't set TCP_KEEPINTVL: %d\n", rc); - return rc; - } - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count, - sizeof(keep_count)); - if (rc) { - CERROR("Can't set TCP_KEEPCNT: %d\n", rc); - return rc; - } - - return 0; -} - -void -ksocknal_lib_push_conn(struct ksock_conn *conn) -{ - struct sock *sk; - struct tcp_sock *tp; - int nonagle; - int val = 1; - int rc; - - rc = ksocknal_connsock_addref(conn); - if (rc) /* being shut down */ - return; - - sk = conn->ksnc_sock->sk; - tp = tcp_sk(sk); - - lock_sock(sk); - nonagle = tp->nonagle; - tp->nonagle = 1; - release_sock(sk); - - rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - LASSERT(!rc); - - lock_sock(sk); - tp->nonagle = nonagle; - release_sock(sk); - - ksocknal_connsock_decref(conn); -} - -/* - * socket call back in Linux - */ -static void -ksocknal_data_ready(struct sock *sk) -{ - struct ksock_conn *conn; - - /* interleave correctly with closing sockets... */ - LASSERT(!in_irq()); - read_lock(&ksocknal_data.ksnd_global_lock); - - conn = sk->sk_user_data; - if (!conn) { /* raced with ksocknal_terminate_conn */ - LASSERT(sk->sk_data_ready != &ksocknal_data_ready); - sk->sk_data_ready(sk); - } else { - ksocknal_read_callback(conn); - } - - read_unlock(&ksocknal_data.ksnd_global_lock); -} - -static void -ksocknal_write_space(struct sock *sk) -{ - struct ksock_conn *conn; - int wspace; - int min_wpace; - - /* interleave correctly with closing sockets... */ - LASSERT(!in_irq()); - read_lock(&ksocknal_data.ksnd_global_lock); - - conn = sk->sk_user_data; - wspace = sk_stream_wspace(sk); - min_wpace = sk_stream_min_wspace(sk); - - CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", - sk, wspace, min_wpace, conn, - !conn ? "" : (conn->ksnc_tx_ready ? - " ready" : " blocked"), - !conn ? "" : (conn->ksnc_tx_scheduled ? - " scheduled" : " idle"), - !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ? - " empty" : " queued")); - - if (!conn) { /* raced with ksocknal_terminate_conn */ - LASSERT(sk->sk_write_space != &ksocknal_write_space); - sk->sk_write_space(sk); - - read_unlock(&ksocknal_data.ksnd_global_lock); - return; - } - - if (wspace >= min_wpace) { /* got enough space */ - ksocknal_write_callback(conn); - - /* - * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the - * ENOMEM check in ksocknal_transmit is race-free (think about - * it). - */ - clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - } - - read_unlock(&ksocknal_data.ksnd_global_lock); -} - -void -ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn) -{ - conn->ksnc_saved_data_ready = sock->sk->sk_data_ready; - conn->ksnc_saved_write_space = sock->sk->sk_write_space; -} - -void -ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn) -{ - sock->sk->sk_user_data = conn; - sock->sk->sk_data_ready = ksocknal_data_ready; - sock->sk->sk_write_space = ksocknal_write_space; -} - -void -ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn) -{ - /* - * Remove conn's network callbacks. - * NB I _have_ to restore the callback, rather than storing a noop, - * since the socket could survive past this module being unloaded!! - */ - sock->sk->sk_data_ready = conn->ksnc_saved_data_ready; - sock->sk->sk_write_space = conn->ksnc_saved_write_space; - - /* - * A callback could be in progress already; they hold a read lock - * on ksnd_global_lock (to serialise with me) and NOOP if - * sk_user_data is NULL. - */ - sock->sk->sk_user_data = NULL; -} - -int -ksocknal_lib_memory_pressure(struct ksock_conn *conn) -{ - int rc = 0; - struct ksock_sched *sched; - - sched = conn->ksnc_scheduler; - spin_lock_bh(&sched->kss_lock); - - if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) && - !conn->ksnc_tx_ready) { - /* - * SOCK_NOSPACE is set when the socket fills - * and cleared in the write_space callback - * (which also sets ksnc_tx_ready). If - * SOCK_NOSPACE and ksnc_tx_ready are BOTH - * zero, I didn't fill the socket and - * write_space won't reschedule me, so I - * return -ENOMEM to get my caller to retry - * after a timeout - */ - rc = -ENOMEM; - } - - spin_unlock_bh(&sched->kss_lock); - - return rc; -} diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c deleted file mode 100644 index 5663a4ca94d4..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ /dev/null @@ -1,184 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2011, 2012, Intel Corporation. - * - * Author: Eric Barton - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include "socklnd.h" - -static int sock_timeout = 50; -module_param(sock_timeout, int, 0644); -MODULE_PARM_DESC(sock_timeout, "dead socket timeout (seconds)"); - -static int credits = 256; -module_param(credits, int, 0444); -MODULE_PARM_DESC(credits, "# concurrent sends"); - -static int peer_credits = 8; -module_param(peer_credits, int, 0444); -MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer"); - -static int peer_buffer_credits; -module_param(peer_buffer_credits, int, 0444); -MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits"); - -static int peer_timeout = 180; -module_param(peer_timeout, int, 0444); -MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)"); - -/* - * Number of daemons in each thread pool which is percpt, - * we will estimate reasonable value based on CPUs if it's not set. - */ -static unsigned int nscheds; -module_param(nscheds, int, 0444); -MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting"); - -static int nconnds = 4; -module_param(nconnds, int, 0444); -MODULE_PARM_DESC(nconnds, "# connection daemons while starting"); - -static int nconnds_max = 64; -module_param(nconnds_max, int, 0444); -MODULE_PARM_DESC(nconnds_max, "max # connection daemons"); - -static int min_reconnectms = 1000; -module_param(min_reconnectms, int, 0644); -MODULE_PARM_DESC(min_reconnectms, "min connection retry interval (mS)"); - -static int max_reconnectms = 60000; -module_param(max_reconnectms, int, 0644); -MODULE_PARM_DESC(max_reconnectms, "max connection retry interval (mS)"); - -# define DEFAULT_EAGER_ACK 0 -static int eager_ack = DEFAULT_EAGER_ACK; -module_param(eager_ack, int, 0644); -MODULE_PARM_DESC(eager_ack, "send tcp ack packets eagerly"); - -static int typed_conns = 1; -module_param(typed_conns, int, 0444); -MODULE_PARM_DESC(typed_conns, "use different sockets for bulk"); - -static int min_bulk = 1 << 10; -module_param(min_bulk, int, 0644); -MODULE_PARM_DESC(min_bulk, "smallest 'large' message"); - -# define DEFAULT_BUFFER_SIZE 0 -static int tx_buffer_size = DEFAULT_BUFFER_SIZE; -module_param(tx_buffer_size, int, 0644); -MODULE_PARM_DESC(tx_buffer_size, "socket tx buffer size (0 for system default)"); - -static int rx_buffer_size = DEFAULT_BUFFER_SIZE; -module_param(rx_buffer_size, int, 0644); -MODULE_PARM_DESC(rx_buffer_size, "socket rx buffer size (0 for system default)"); - -static int nagle; -module_param(nagle, int, 0644); -MODULE_PARM_DESC(nagle, "enable NAGLE?"); - -static int round_robin = 1; -module_param(round_robin, int, 0644); -MODULE_PARM_DESC(round_robin, "Round robin for multiple interfaces"); - -static int keepalive = 30; -module_param(keepalive, int, 0644); -MODULE_PARM_DESC(keepalive, "# seconds before send keepalive"); - -static int keepalive_idle = 30; -module_param(keepalive_idle, int, 0644); -MODULE_PARM_DESC(keepalive_idle, "# idle seconds before probe"); - -#define DEFAULT_KEEPALIVE_COUNT 5 -static int keepalive_count = DEFAULT_KEEPALIVE_COUNT; -module_param(keepalive_count, int, 0644); -MODULE_PARM_DESC(keepalive_count, "# missed probes == dead"); - -static int keepalive_intvl = 5; -module_param(keepalive_intvl, int, 0644); -MODULE_PARM_DESC(keepalive_intvl, "seconds between probes"); - -static int enable_csum; -module_param(enable_csum, int, 0644); -MODULE_PARM_DESC(enable_csum, "enable check sum"); - -static int inject_csum_error; -module_param(inject_csum_error, int, 0644); -MODULE_PARM_DESC(inject_csum_error, "set non-zero to inject a checksum error"); - -static int nonblk_zcack = 1; -module_param(nonblk_zcack, int, 0644); -MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection"); - -static unsigned int zc_min_payload = 16 << 10; -module_param(zc_min_payload, int, 0644); -MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy"); - -static unsigned int zc_recv; -module_param(zc_recv, int, 0644); -MODULE_PARM_DESC(zc_recv, "enable ZC recv for Chelsio driver"); - -static unsigned int zc_recv_min_nfrags = 16; -module_param(zc_recv_min_nfrags, int, 0644); -MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv"); - -#if SOCKNAL_VERSION_DEBUG -static int protocol = 3; -module_param(protocol, int, 0644); -MODULE_PARM_DESC(protocol, "protocol version"); -#endif - -struct ksock_tunables ksocknal_tunables; - -int ksocknal_tunables_init(void) -{ - /* initialize ksocknal_tunables structure */ - ksocknal_tunables.ksnd_timeout = &sock_timeout; - ksocknal_tunables.ksnd_nscheds = &nscheds; - ksocknal_tunables.ksnd_nconnds = &nconnds; - ksocknal_tunables.ksnd_nconnds_max = &nconnds_max; - ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms; - ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms; - ksocknal_tunables.ksnd_eager_ack = &eager_ack; - ksocknal_tunables.ksnd_typed_conns = &typed_conns; - ksocknal_tunables.ksnd_min_bulk = &min_bulk; - ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size; - ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size; - ksocknal_tunables.ksnd_nagle = &nagle; - ksocknal_tunables.ksnd_round_robin = &round_robin; - ksocknal_tunables.ksnd_keepalive = &keepalive; - ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle; - ksocknal_tunables.ksnd_keepalive_count = &keepalive_count; - ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl; - ksocknal_tunables.ksnd_credits = &credits; - ksocknal_tunables.ksnd_peertxcredits = &peer_credits; - ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits; - ksocknal_tunables.ksnd_peertimeout = &peer_timeout; - ksocknal_tunables.ksnd_enable_csum = &enable_csum; - ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error; - ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack; - ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload; - ksocknal_tunables.ksnd_zc_recv = &zc_recv; - ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags; - -#if SOCKNAL_VERSION_DEBUG - ksocknal_tunables.ksnd_protocol = &protocol; -#endif - - if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10)) - *ksocknal_tunables.ksnd_zc_min_payload = 2 << 10; - - return 0; -}; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c deleted file mode 100644 index 05982dac781c..000000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ /dev/null @@ -1,810 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2012, Intel Corporation. - * - * Author: Zach Brown - * Author: Peter J. Braam - * Author: Phil Schwan - * Author: Eric Barton - * - * This file is part of Portals, http://www.sf.net/projects/sandiaportals/ - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include "socklnd.h" - -/* - * Protocol entries : - * pro_send_hello : send hello message - * pro_recv_hello : receive hello message - * pro_pack : pack message header - * pro_unpack : unpack message header - * pro_queue_tx_zcack() : Called holding BH lock: kss_lock - * return 1 if ACK is piggybacked, otherwise return 0 - * pro_queue_tx_msg() : Called holding BH lock: kss_lock - * return the ACK that piggybacked by my message, or NULL - * pro_handle_zcreq() : handler of incoming ZC-REQ - * pro_handle_zcack() : handler of incoming ZC-ACK - * pro_match_tx() : Called holding glock - */ - -static struct ksock_tx * -ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg) -{ - /* V1.x, just enqueue it */ - list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); - return NULL; -} - -void -ksocknal_next_tx_carrier(struct ksock_conn *conn) -{ - struct ksock_tx *tx = conn->ksnc_tx_carrier; - - /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ - LASSERT(!list_empty(&conn->ksnc_tx_queue)); - LASSERT(tx); - - /* Next TX that can carry ZC-ACK or LNet message */ - if (tx->tx_list.next == &conn->ksnc_tx_queue) { - /* no more packets queued */ - conn->ksnc_tx_carrier = NULL; - } else { - conn->ksnc_tx_carrier = list_next_entry(tx, tx_list); - LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); - } -} - -static int -ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn, - struct ksock_tx *tx_ack, __u64 cookie) -{ - struct ksock_tx *tx = conn->ksnc_tx_carrier; - - LASSERT(!tx_ack || - tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); - - /* - * Enqueue or piggyback tx_ack / cookie - * . no tx can piggyback cookie of tx_ack (or cookie), just - * enqueue the tx_ack (if tx_ack != NUL) and return NULL. - * . There is tx can piggyback cookie of tx_ack (or cookie), - * piggyback the cookie and return the tx. - */ - if (!tx) { - if (tx_ack) { - list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); - conn->ksnc_tx_carrier = tx_ack; - } - return 0; - } - - if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { - /* tx is noop zc-ack, can't piggyback zc-ack cookie */ - if (tx_ack) - list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); - return 0; - } - - LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); - LASSERT(!tx->tx_msg.ksm_zc_cookies[1]); - - if (tx_ack) - cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; - - /* piggyback the zc-ack cookie */ - tx->tx_msg.ksm_zc_cookies[1] = cookie; - /* move on to the next TX which can carry cookie */ - ksocknal_next_tx_carrier(conn); - - return 1; -} - -static struct ksock_tx * -ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg) -{ - struct ksock_tx *tx = conn->ksnc_tx_carrier; - - /* - * Enqueue tx_msg: - * . If there is no NOOP on the connection, just enqueue - * tx_msg and return NULL - * . If there is NOOP on the connection, piggyback the cookie - * and replace the NOOP tx, and return the NOOP tx. - */ - if (!tx) { /* nothing on queue */ - list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); - conn->ksnc_tx_carrier = tx_msg; - return NULL; - } - - if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */ - list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); - return NULL; - } - - LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); - - /* There is a noop zc-ack can be piggybacked */ - tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1]; - ksocknal_next_tx_carrier(conn); - - /* use new_tx to replace the noop zc-ack packet */ - list_add(&tx_msg->tx_list, &tx->tx_list); - list_del(&tx->tx_list); - - return tx; -} - -static int -ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn, - struct ksock_tx *tx_ack, __u64 cookie) -{ - struct ksock_tx *tx; - - if (conn->ksnc_type != SOCKLND_CONN_ACK) - return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie); - - /* non-blocking ZC-ACK (to router) */ - LASSERT(!tx_ack || - tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); - - tx = conn->ksnc_tx_carrier; - if (!tx) { - if (tx_ack) { - list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); - conn->ksnc_tx_carrier = tx_ack; - } - return 0; - } - - /* conn->ksnc_tx_carrier */ - - if (tx_ack) - cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; - - if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */ - return 1; - - if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { - /* replace the keepalive PING with a real ACK */ - LASSERT(!tx->tx_msg.ksm_zc_cookies[0]); - tx->tx_msg.ksm_zc_cookies[1] = cookie; - return 1; - } - - if (cookie == tx->tx_msg.ksm_zc_cookies[0] || - cookie == tx->tx_msg.ksm_zc_cookies[1]) { - CWARN("%s: duplicated ZC cookie: %llu\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie); - return 1; /* XXX return error in the future */ - } - - if (!tx->tx_msg.ksm_zc_cookies[0]) { - /* - * NOOP tx has only one ZC-ACK cookie, - * can carry at least one more - */ - if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { - tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; - tx->tx_msg.ksm_zc_cookies[1] = cookie; - } else { - tx->tx_msg.ksm_zc_cookies[0] = cookie; - } - - if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) { - /* - * not likely to carry more ACKs, skip it - * to simplify logic - */ - ksocknal_next_tx_carrier(conn); - } - - return 1; - } - - /* takes two or more cookies already */ - - if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) { - __u64 tmp = 0; - - /* two separated cookies: (a+2, a) or (a+1, a) */ - LASSERT(tx->tx_msg.ksm_zc_cookies[0] - - tx->tx_msg.ksm_zc_cookies[1] <= 2); - - if (tx->tx_msg.ksm_zc_cookies[0] - - tx->tx_msg.ksm_zc_cookies[1] == 2) { - if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) - tmp = cookie; - } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) { - tmp = tx->tx_msg.ksm_zc_cookies[1]; - } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) { - tmp = tx->tx_msg.ksm_zc_cookies[0]; - } - - if (tmp) { - /* range of cookies */ - tx->tx_msg.ksm_zc_cookies[0] = tmp - 1; - tx->tx_msg.ksm_zc_cookies[1] = tmp + 1; - return 1; - } - - } else { - /* - * ksm_zc_cookies[0] < ksm_zc_cookies[1], - * it is range of cookies - */ - if (cookie >= tx->tx_msg.ksm_zc_cookies[0] && - cookie <= tx->tx_msg.ksm_zc_cookies[1]) { - CWARN("%s: duplicated ZC cookie: %llu\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie); - return 1; /* XXX: return error in the future */ - } - - if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) { - tx->tx_msg.ksm_zc_cookies[1] = cookie; - return 1; - } - - if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) { - tx->tx_msg.ksm_zc_cookies[0] = cookie; - return 1; - } - } - - /* failed to piggyback ZC-ACK */ - if (tx_ack) { - list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); - /* the next tx can piggyback at least 1 ACK */ - ksocknal_next_tx_carrier(conn); - } - - return 0; -} - -static int -ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk) -{ - int nob; - -#if SOCKNAL_VERSION_DEBUG - if (!*ksocknal_tunables.ksnd_typed_conns) - return SOCKNAL_MATCH_YES; -#endif - - if (!tx || !tx->tx_lnetmsg) { - /* noop packet */ - nob = offsetof(struct ksock_msg, ksm_u); - } else { - nob = tx->tx_lnetmsg->msg_len + - ((conn->ksnc_proto == &ksocknal_protocol_v1x) ? - sizeof(struct lnet_hdr) : sizeof(struct ksock_msg)); - } - - /* default checking for typed connection */ - switch (conn->ksnc_type) { - default: - CERROR("ksnc_type bad: %u\n", conn->ksnc_type); - LBUG(); - case SOCKLND_CONN_ANY: - return SOCKNAL_MATCH_YES; - - case SOCKLND_CONN_BULK_IN: - return SOCKNAL_MATCH_MAY; - - case SOCKLND_CONN_BULK_OUT: - if (nob < *ksocknal_tunables.ksnd_min_bulk) - return SOCKNAL_MATCH_MAY; - else - return SOCKNAL_MATCH_YES; - - case SOCKLND_CONN_CONTROL: - if (nob >= *ksocknal_tunables.ksnd_min_bulk) - return SOCKNAL_MATCH_MAY; - else - return SOCKNAL_MATCH_YES; - } -} - -static int -ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk) -{ - int nob; - - if (!tx || !tx->tx_lnetmsg) - nob = offsetof(struct ksock_msg, ksm_u); - else - nob = tx->tx_lnetmsg->msg_len + sizeof(struct ksock_msg); - - switch (conn->ksnc_type) { - default: - CERROR("ksnc_type bad: %u\n", conn->ksnc_type); - LBUG(); - case SOCKLND_CONN_ANY: - return SOCKNAL_MATCH_NO; - - case SOCKLND_CONN_ACK: - if (nonblk) - return SOCKNAL_MATCH_YES; - else if (!tx || !tx->tx_lnetmsg) - return SOCKNAL_MATCH_MAY; - else - return SOCKNAL_MATCH_NO; - - case SOCKLND_CONN_BULK_OUT: - if (nonblk) - return SOCKNAL_MATCH_NO; - else if (nob < *ksocknal_tunables.ksnd_min_bulk) - return SOCKNAL_MATCH_MAY; - else - return SOCKNAL_MATCH_YES; - - case SOCKLND_CONN_CONTROL: - if (nonblk) - return SOCKNAL_MATCH_NO; - else if (nob >= *ksocknal_tunables.ksnd_min_bulk) - return SOCKNAL_MATCH_MAY; - else - return SOCKNAL_MATCH_YES; - } -} - -/* (Sink) handle incoming ZC request from sender */ -static int -ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote) -{ - struct ksock_peer *peer = c->ksnc_peer; - struct ksock_conn *conn; - struct ksock_tx *tx; - int rc; - - read_lock(&ksocknal_data.ksnd_global_lock); - - conn = ksocknal_find_conn_locked(peer, NULL, !!remote); - if (conn) { - struct ksock_sched *sched = conn->ksnc_scheduler; - - LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); - - spin_lock_bh(&sched->kss_lock); - - rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie); - - spin_unlock_bh(&sched->kss_lock); - - if (rc) { /* piggybacked */ - read_unlock(&ksocknal_data.ksnd_global_lock); - return 0; - } - } - - read_unlock(&ksocknal_data.ksnd_global_lock); - - /* ACK connection is not ready, or can't piggyback the ACK */ - tx = ksocknal_alloc_tx_noop(cookie, !!remote); - if (!tx) - return -ENOMEM; - - rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id); - if (!rc) - return 0; - - ksocknal_free_tx(tx); - return rc; -} - -/* (Sender) handle ZC_ACK from sink */ -static int -ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2) -{ - struct ksock_peer *peer = conn->ksnc_peer; - struct ksock_tx *tx; - struct ksock_tx *temp; - struct ksock_tx *tmp; - LIST_HEAD(zlist); - int count; - - if (!cookie1) - cookie1 = cookie2; - - count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1); - - if (cookie2 == SOCKNAL_KEEPALIVE_PING && - conn->ksnc_proto == &ksocknal_protocol_v3x) { - /* keepalive PING for V3.x, just ignore it */ - return count == 1 ? 0 : -EPROTO; - } - - spin_lock(&peer->ksnp_lock); - - list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, - tx_zc_list) { - __u64 c = tx->tx_msg.ksm_zc_cookies[0]; - - if (c == cookie1 || c == cookie2 || - (cookie1 < c && c < cookie2)) { - tx->tx_msg.ksm_zc_cookies[0] = 0; - list_del(&tx->tx_zc_list); - list_add(&tx->tx_zc_list, &zlist); - - if (!--count) - break; - } - } - - spin_unlock(&peer->ksnp_lock); - - list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) { - list_del(&tx->tx_zc_list); - ksocknal_tx_decref(tx); - } - - return !count ? 0 : -EPROTO; -} - -static int -ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello) -{ - struct socket *sock = conn->ksnc_sock; - struct lnet_hdr *hdr; - struct lnet_magicversion *hmv; - int rc; - int i; - - BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(struct lnet_hdr, src_nid)); - - hdr = kzalloc(sizeof(*hdr), GFP_NOFS); - if (!hdr) { - CERROR("Can't allocate struct lnet_hdr\n"); - return -ENOMEM; - } - - hmv = (struct lnet_magicversion *)&hdr->dest_nid; - - /* - * Re-organize V2.x message header to V1.x (struct lnet_hdr) - * header and send out - */ - hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC); - hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR); - hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR); - - if (the_lnet.ln_testprotocompat) { - /* single-shot proto check */ - LNET_LOCK(); - if (the_lnet.ln_testprotocompat & 1) { - hmv->version_major++; /* just different! */ - the_lnet.ln_testprotocompat &= ~1; - } - if (the_lnet.ln_testprotocompat & 2) { - hmv->magic = LNET_PROTO_MAGIC; - the_lnet.ln_testprotocompat &= ~2; - } - LNET_UNLOCK(); - } - - hdr->src_nid = cpu_to_le64(hello->kshm_src_nid); - hdr->src_pid = cpu_to_le32(hello->kshm_src_pid); - hdr->type = cpu_to_le32(LNET_MSG_HELLO); - hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32)); - hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype); - hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation); - - rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); - if (rc) { - CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", - rc, &conn->ksnc_ipaddr, conn->ksnc_port); - goto out; - } - - if (!hello->kshm_nips) - goto out; - - for (i = 0; i < (int)hello->kshm_nips; i++) - hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]); - - rc = lnet_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); - if (rc) { - CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", - rc, hello->kshm_nips, - &conn->ksnc_ipaddr, conn->ksnc_port); - } -out: - kfree(hdr); - - return rc; -} - -static int -ksocknal_send_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello) -{ - struct socket *sock = conn->ksnc_sock; - int rc; - - hello->kshm_magic = LNET_PROTO_MAGIC; - hello->kshm_version = conn->ksnc_proto->pro_version; - - if (the_lnet.ln_testprotocompat) { - /* single-shot proto check */ - LNET_LOCK(); - if (the_lnet.ln_testprotocompat & 1) { - hello->kshm_version++; /* just different! */ - the_lnet.ln_testprotocompat &= ~1; - } - LNET_UNLOCK(); - } - - rc = lnet_sock_write(sock, hello, offsetof(struct ksock_hello_msg, kshm_ips), - lnet_acceptor_timeout()); - if (rc) { - CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", - rc, &conn->ksnc_ipaddr, conn->ksnc_port); - return rc; - } - - if (!hello->kshm_nips) - return 0; - - rc = lnet_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); - if (rc) { - CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", - rc, hello->kshm_nips, - &conn->ksnc_ipaddr, conn->ksnc_port); - } - - return rc; -} - -static int -ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello, - int timeout) -{ - struct socket *sock = conn->ksnc_sock; - struct lnet_hdr *hdr; - int rc; - int i; - - hdr = kzalloc(sizeof(*hdr), GFP_NOFS); - if (!hdr) { - CERROR("Can't allocate struct lnet_hdr\n"); - return -ENOMEM; - } - - rc = lnet_sock_read(sock, &hdr->src_nid, - sizeof(*hdr) - offsetof(struct lnet_hdr, src_nid), - timeout); - if (rc) { - CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0 && rc != -EALREADY); - goto out; - } - - /* ...and check we got what we expected */ - if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) { - CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n", - le32_to_cpu(hdr->type), - &conn->ksnc_ipaddr); - rc = -EPROTO; - goto out; - } - - hello->kshm_src_nid = le64_to_cpu(hdr->src_nid); - hello->kshm_src_pid = le32_to_cpu(hdr->src_pid); - hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation); - hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type); - hello->kshm_nips = le32_to_cpu(hdr->payload_length) / - sizeof(__u32); - - if (hello->kshm_nips > LNET_MAX_INTERFACES) { - CERROR("Bad nips %d from ip %pI4h\n", - hello->kshm_nips, &conn->ksnc_ipaddr); - rc = -EPROTO; - goto out; - } - - if (!hello->kshm_nips) - goto out; - - rc = lnet_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); - if (rc) { - CERROR("Error %d reading IPs from ip %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0 && rc != -EALREADY); - goto out; - } - - for (i = 0; i < (int)hello->kshm_nips; i++) { - hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); - - if (!hello->kshm_ips[i]) { - CERROR("Zero IP[%d] from ip %pI4h\n", - i, &conn->ksnc_ipaddr); - rc = -EPROTO; - break; - } - } -out: - kfree(hdr); - - return rc; -} - -static int -ksocknal_recv_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello, - int timeout) -{ - struct socket *sock = conn->ksnc_sock; - int rc; - int i; - - if (hello->kshm_magic == LNET_PROTO_MAGIC) - conn->ksnc_flip = 0; - else - conn->ksnc_flip = 1; - - rc = lnet_sock_read(sock, &hello->kshm_src_nid, - offsetof(struct ksock_hello_msg, kshm_ips) - - offsetof(struct ksock_hello_msg, kshm_src_nid), - timeout); - if (rc) { - CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0 && rc != -EALREADY); - return rc; - } - - if (conn->ksnc_flip) { - __swab32s(&hello->kshm_src_pid); - __swab64s(&hello->kshm_src_nid); - __swab32s(&hello->kshm_dst_pid); - __swab64s(&hello->kshm_dst_nid); - __swab64s(&hello->kshm_src_incarnation); - __swab64s(&hello->kshm_dst_incarnation); - __swab32s(&hello->kshm_ctype); - __swab32s(&hello->kshm_nips); - } - - if (hello->kshm_nips > LNET_MAX_INTERFACES) { - CERROR("Bad nips %d from ip %pI4h\n", - hello->kshm_nips, &conn->ksnc_ipaddr); - return -EPROTO; - } - - if (!hello->kshm_nips) - return 0; - - rc = lnet_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); - if (rc) { - CERROR("Error %d reading IPs from ip %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT(rc < 0 && rc != -EALREADY); - return rc; - } - - for (i = 0; i < (int)hello->kshm_nips; i++) { - if (conn->ksnc_flip) - __swab32s(&hello->kshm_ips[i]); - - if (!hello->kshm_ips[i]) { - CERROR("Zero IP[%d] from ip %pI4h\n", - i, &conn->ksnc_ipaddr); - return -EPROTO; - } - } - - return 0; -} - -static void -ksocknal_pack_msg_v1(struct ksock_tx *tx) -{ - /* V1.x has no KSOCK_MSG_NOOP */ - LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT(tx->tx_lnetmsg); - - tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr; - tx->tx_iov[0].iov_len = sizeof(struct lnet_hdr); - - tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr); - tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr); -} - -static void -ksocknal_pack_msg_v2(struct ksock_tx *tx) -{ - tx->tx_iov[0].iov_base = &tx->tx_msg; - - if (tx->tx_lnetmsg) { - LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - - tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; - tx->tx_iov[0].iov_len = sizeof(struct ksock_msg); - tx->tx_nob = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len; - tx->tx_resid = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len; - } else { - LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); - - tx->tx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr); - tx->tx_nob = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr); - tx->tx_resid = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr); - } - /* - * Don't checksum before start sending, because packet can be - * piggybacked with ACK - */ -} - -static void -ksocknal_unpack_msg_v1(struct ksock_msg *msg) -{ - msg->ksm_csum = 0; - msg->ksm_type = KSOCK_MSG_LNET; - msg->ksm_zc_cookies[0] = 0; - msg->ksm_zc_cookies[1] = 0; -} - -static void -ksocknal_unpack_msg_v2(struct ksock_msg *msg) -{ - return; /* Do nothing */ -} - -struct ksock_proto ksocknal_protocol_v1x = { - .pro_version = KSOCK_PROTO_V1, - .pro_send_hello = ksocknal_send_hello_v1, - .pro_recv_hello = ksocknal_recv_hello_v1, - .pro_pack = ksocknal_pack_msg_v1, - .pro_unpack = ksocknal_unpack_msg_v1, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1, - .pro_handle_zcreq = NULL, - .pro_handle_zcack = NULL, - .pro_queue_tx_zcack = NULL, - .pro_match_tx = ksocknal_match_tx -}; - -struct ksock_proto ksocknal_protocol_v2x = { - .pro_version = KSOCK_PROTO_V2, - .pro_send_hello = ksocknal_send_hello_v2, - .pro_recv_hello = ksocknal_recv_hello_v2, - .pro_pack = ksocknal_pack_msg_v2, - .pro_unpack = ksocknal_unpack_msg_v2, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, - .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2, - .pro_handle_zcreq = ksocknal_handle_zcreq, - .pro_handle_zcack = ksocknal_handle_zcack, - .pro_match_tx = ksocknal_match_tx -}; - -struct ksock_proto ksocknal_protocol_v3x = { - .pro_version = KSOCK_PROTO_V3, - .pro_send_hello = ksocknal_send_hello_v2, - .pro_recv_hello = ksocknal_recv_hello_v2, - .pro_pack = ksocknal_pack_msg_v2, - .pro_unpack = ksocknal_unpack_msg_v2, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, - .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3, - .pro_handle_zcreq = ksocknal_handle_zcreq, - .pro_handle_zcack = ksocknal_handle_zcack, - .pro_match_tx = ksocknal_match_tx_v3 -}; diff --git a/drivers/staging/lustre/lnet/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile deleted file mode 100644 index 6a1b232da495..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LNET) += libcfs.o - -libcfs-obj-y += linux-tracefile.o linux-debug.o -libcfs-obj-y += linux-crypto.o -libcfs-obj-y += linux-crypto-adler.o - -libcfs-obj-y += debug.o fail.o module.o tracefile.o -libcfs-obj-y += libcfs_string.o hash.o -libcfs-obj-$(CONFIG_SMP) += libcfs_cpu.o -libcfs-obj-y += libcfs_mem.o libcfs_lock.o - -libcfs-objs := $(libcfs-obj-y) diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c deleted file mode 100644 index 06f694f6a28f..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/debug.c +++ /dev/null @@ -1,461 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/debug.c - * - * Author: Phil Schwan - * - */ - -# define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include -#include -#include "tracefile.h" - -static char debug_file_name[1024]; - -unsigned int libcfs_subsystem_debug = ~0; -EXPORT_SYMBOL(libcfs_subsystem_debug); -module_param(libcfs_subsystem_debug, int, 0644); -MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask"); - -unsigned int libcfs_debug = (D_CANTMASK | - D_NETERROR | D_HA | D_CONFIG | D_IOCTL); -EXPORT_SYMBOL(libcfs_debug); -module_param(libcfs_debug, int, 0644); -MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask"); - -static int libcfs_param_debug_mb_set(const char *val, - const struct kernel_param *kp) -{ - int rc; - unsigned int num; - - rc = kstrtouint(val, 0, &num); - if (rc < 0) - return rc; - - if (!*((unsigned int *)kp->arg)) { - *((unsigned int *)kp->arg) = num; - return 0; - } - - rc = cfs_trace_set_debug_mb(num); - - if (!rc) - *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb(); - - return rc; -} - -/* While debug_mb setting look like unsigned int, in fact - * it needs quite a bunch of extra processing, so we define special - * debugmb parameter type with corresponding methods to handle this case - */ -static const struct kernel_param_ops param_ops_debugmb = { - .set = libcfs_param_debug_mb_set, - .get = param_get_uint, -}; - -#define param_check_debugmb(name, p) \ - __param_check(name, p, unsigned int) - -static unsigned int libcfs_debug_mb; -module_param(libcfs_debug_mb, debugmb, 0644); -MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size."); - -unsigned int libcfs_printk = D_CANTMASK; -module_param(libcfs_printk, uint, 0644); -MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask"); - -unsigned int libcfs_console_ratelimit = 1; -module_param(libcfs_console_ratelimit, uint, 0644); -MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)"); - -static int param_set_delay_minmax(const char *val, - const struct kernel_param *kp, - long min, long max) -{ - long d; - int sec; - int rc; - - rc = kstrtoint(val, 0, &sec); - if (rc) - return -EINVAL; - - d = sec * HZ / 100; - if (d < min || d > max) - return -EINVAL; - - *((unsigned int *)kp->arg) = d; - - return 0; -} - -static int param_get_delay(char *buffer, const struct kernel_param *kp) -{ - unsigned int d = *(unsigned int *)kp->arg; - - return sprintf(buffer, "%u", (unsigned int)(d * 100) / HZ); -} - -unsigned int libcfs_console_max_delay; -unsigned int libcfs_console_min_delay; - -static int param_set_console_max_delay(const char *val, - const struct kernel_param *kp) -{ - return param_set_delay_minmax(val, kp, - libcfs_console_min_delay, INT_MAX); -} - -static const struct kernel_param_ops param_ops_console_max_delay = { - .set = param_set_console_max_delay, - .get = param_get_delay, -}; - -#define param_check_console_max_delay(name, p) \ - __param_check(name, p, unsigned int) - -module_param(libcfs_console_max_delay, console_max_delay, 0644); -MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)"); - -static int param_set_console_min_delay(const char *val, - const struct kernel_param *kp) -{ - return param_set_delay_minmax(val, kp, - 1, libcfs_console_max_delay); -} - -static const struct kernel_param_ops param_ops_console_min_delay = { - .set = param_set_console_min_delay, - .get = param_get_delay, -}; - -#define param_check_console_min_delay(name, p) \ - __param_check(name, p, unsigned int) - -module_param(libcfs_console_min_delay, console_min_delay, 0644); -MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)"); - -static int param_set_uint_minmax(const char *val, - const struct kernel_param *kp, - unsigned int min, unsigned int max) -{ - unsigned int num; - int ret; - - if (!val) - return -EINVAL; - ret = kstrtouint(val, 0, &num); - if (ret < 0 || num < min || num > max) - return -EINVAL; - *((unsigned int *)kp->arg) = num; - return 0; -} - -static int param_set_uintpos(const char *val, const struct kernel_param *kp) -{ - return param_set_uint_minmax(val, kp, 1, -1); -} - -static const struct kernel_param_ops param_ops_uintpos = { - .set = param_set_uintpos, - .get = param_get_uint, -}; - -#define param_check_uintpos(name, p) \ - __param_check(name, p, unsigned int) - -unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF; -module_param(libcfs_console_backoff, uintpos, 0644); -MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor"); - -unsigned int libcfs_debug_binary = 1; - -unsigned int libcfs_stack = 3 * THREAD_SIZE / 4; -EXPORT_SYMBOL(libcfs_stack); - -unsigned int libcfs_catastrophe; -EXPORT_SYMBOL(libcfs_catastrophe); - -unsigned int libcfs_panic_on_lbug = 1; -module_param(libcfs_panic_on_lbug, uint, 0644); -MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG"); - -static wait_queue_head_t debug_ctlwq; - -char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT; - -/* We need to pass a pointer here, but elsewhere this must be a const */ -static char *libcfs_debug_file_path; -module_param(libcfs_debug_file_path, charp, 0644); -MODULE_PARM_DESC(libcfs_debug_file_path, - "Path for dumping debug logs, set 'NONE' to prevent log dumping"); - -int libcfs_panic_in_progress; - -/* libcfs_debug_token2mask() expects the returned string in lower-case */ -static const char * -libcfs_debug_subsys2str(int subsys) -{ - static const char * const libcfs_debug_subsystems[] = - LIBCFS_DEBUG_SUBSYS_NAMES; - - if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems)) - return NULL; - - return libcfs_debug_subsystems[subsys]; -} - -/* libcfs_debug_token2mask() expects the returned string in lower-case */ -static const char * -libcfs_debug_dbg2str(int debug) -{ - static const char * const libcfs_debug_masks[] = - LIBCFS_DEBUG_MASKS_NAMES; - - if (debug >= ARRAY_SIZE(libcfs_debug_masks)) - return NULL; - - return libcfs_debug_masks[debug]; -} - -int -libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) -{ - const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : - libcfs_debug_dbg2str; - int len = 0; - const char *token; - int i; - - if (!mask) { /* "0" */ - if (size > 0) - str[0] = '0'; - len = 1; - } else { /* space-separated tokens */ - for (i = 0; i < 32; i++) { - if (!(mask & (1 << i))) - continue; - - token = fn(i); - if (!token) /* unused bit */ - continue; - - if (len > 0) { /* separator? */ - if (len < size) - str[len] = ' '; - len++; - } - - while (*token) { - if (len < size) - str[len] = *token; - token++; - len++; - } - } - } - - /* terminate 'str' */ - if (len < size) - str[len] = 0; - else - str[size - 1] = 0; - - return len; -} - -int -libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) -{ - const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : - libcfs_debug_dbg2str; - int m = 0; - int matched; - int n; - int t; - - /* Allow a number for backwards compatibility */ - - for (n = strlen(str); n > 0; n--) - if (!isspace(str[n - 1])) - break; - matched = n; - t = sscanf(str, "%i%n", &m, &matched); - if (t >= 1 && matched == n) { - /* don't print warning for lctl set_param debug=0 or -1 */ - if (m && m != -1) - CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n"); - *mask = m; - return 0; - } - - return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK, - 0xffffffff); -} - -/** - * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages() - */ -void libcfs_debug_dumplog_internal(void *arg) -{ - static time64_t last_dump_time; - time64_t current_time; - void *journal_info; - - journal_info = current->journal_info; - current->journal_info = NULL; - current_time = ktime_get_real_seconds(); - - if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) && - current_time > last_dump_time) { - last_dump_time = current_time; - snprintf(debug_file_name, sizeof(debug_file_name) - 1, - "%s.%lld.%ld", libcfs_debug_file_path_arr, - (s64)current_time, (long)arg); - pr_alert("LustreError: dumping log to %s\n", debug_file_name); - cfs_tracefile_dump_all_pages(debug_file_name); - libcfs_run_debug_log_upcall(debug_file_name); - } - - current->journal_info = journal_info; -} - -static int libcfs_debug_dumplog_thread(void *arg) -{ - libcfs_debug_dumplog_internal(arg); - wake_up(&debug_ctlwq); - return 0; -} - -void libcfs_debug_dumplog(void) -{ - wait_queue_entry_t wait; - struct task_struct *dumper; - - /* we're being careful to ensure that the kernel thread is - * able to set our state to running as it exits before we - * get to schedule() - */ - init_waitqueue_entry(&wait, current); - add_wait_queue(&debug_ctlwq, &wait); - - dumper = kthread_run(libcfs_debug_dumplog_thread, - (void *)(long)current->pid, - "libcfs_debug_dumper"); - set_current_state(TASK_INTERRUPTIBLE); - if (IS_ERR(dumper)) - pr_err("LustreError: cannot start log dump thread: %ld\n", - PTR_ERR(dumper)); - else - schedule(); - - /* be sure to teardown if cfs_create_thread() failed */ - remove_wait_queue(&debug_ctlwq, &wait); - set_current_state(TASK_RUNNING); -} -EXPORT_SYMBOL(libcfs_debug_dumplog); - -int libcfs_debug_init(unsigned long bufsize) -{ - unsigned int max = libcfs_debug_mb; - int rc = 0; - - init_waitqueue_head(&debug_ctlwq); - - if (libcfs_console_max_delay <= 0 || /* not set by user or */ - libcfs_console_min_delay <= 0 || /* set to invalid values */ - libcfs_console_min_delay >= libcfs_console_max_delay) { - libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY; - libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY; - } - - if (libcfs_debug_file_path) { - strlcpy(libcfs_debug_file_path_arr, - libcfs_debug_file_path, - sizeof(libcfs_debug_file_path_arr)); - } - - /* If libcfs_debug_mb is set to an invalid value or uninitialized - * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES - */ - if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) { - max = TCD_MAX_PAGES; - } else { - max = max / num_possible_cpus(); - max <<= (20 - PAGE_SHIFT); - } - - rc = cfs_tracefile_init(max); - if (!rc) { - libcfs_register_panic_notifier(); - libcfs_debug_mb = cfs_trace_get_debug_mb(); - } - - return rc; -} - -int libcfs_debug_cleanup(void) -{ - libcfs_unregister_panic_notifier(); - cfs_tracefile_exit(); - return 0; -} - -int libcfs_debug_clear_buffer(void) -{ - cfs_trace_flush_pages(); - return 0; -} - -/* Debug markers, although printed by S_LNET should not be marked as such. */ -#undef DEBUG_SUBSYSTEM -#define DEBUG_SUBSYSTEM S_UNDEFINED -int libcfs_debug_mark_buffer(const char *text) -{ - CDEBUG(D_TRACE, - "***************************************************\n"); - LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text); - CDEBUG(D_TRACE, - "***************************************************\n"); - - return 0; -} - -#undef DEBUG_SUBSYSTEM -#define DEBUG_SUBSYSTEM S_LNET diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c deleted file mode 100644 index bd86b3b5bc34..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/fail.c +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Oracle Corporation, Inc. - */ - -#include -#include -#include -#include -#include - -unsigned long cfs_fail_loc; -EXPORT_SYMBOL(cfs_fail_loc); - -unsigned int cfs_fail_val; -EXPORT_SYMBOL(cfs_fail_val); - -int cfs_fail_err; -EXPORT_SYMBOL(cfs_fail_err); - -DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); -EXPORT_SYMBOL(cfs_race_waitq); - -int cfs_race_state; -EXPORT_SYMBOL(cfs_race_state); - -int __cfs_fail_check_set(u32 id, u32 value, int set) -{ - static atomic_t cfs_fail_count = ATOMIC_INIT(0); - - LASSERT(!(id & CFS_FAIL_ONCE)); - - if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) == - (CFS_FAILED | CFS_FAIL_ONCE)) { - atomic_set(&cfs_fail_count, 0); /* paranoia */ - return 0; - } - - /* Fail 1/cfs_fail_val times */ - if (cfs_fail_loc & CFS_FAIL_RAND) { - if (cfs_fail_val < 2 || prandom_u32_max(cfs_fail_val) > 0) - return 0; - } - - /* Skip the first cfs_fail_val, then fail */ - if (cfs_fail_loc & CFS_FAIL_SKIP) { - if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val) - return 0; - } - - /* check cfs_fail_val... */ - if (set == CFS_FAIL_LOC_VALUE) { - if (cfs_fail_val != -1 && cfs_fail_val != value) - return 0; - } - - /* Fail cfs_fail_val times, overridden by FAIL_ONCE */ - if (cfs_fail_loc & CFS_FAIL_SOME && - (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) { - int count = atomic_inc_return(&cfs_fail_count); - - if (count >= cfs_fail_val) { - set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); - atomic_set(&cfs_fail_count, 0); - /* we are lost race to increase */ - if (count > cfs_fail_val) - return 0; - } - } - - /* Take into account the current call for FAIL_ONCE for ORSET only, - * as RESET is a new fail_loc, it does not change the current call - */ - if ((set == CFS_FAIL_LOC_ORSET) && (value & CFS_FAIL_ONCE)) - set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); - /* Lost race to set CFS_FAILED_BIT. */ - if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) { - /* If CFS_FAIL_ONCE is valid, only one process can fail, - * otherwise multi-process can fail at the same time. - */ - if (cfs_fail_loc & CFS_FAIL_ONCE) - return 0; - } - - switch (set) { - case CFS_FAIL_LOC_NOSET: - case CFS_FAIL_LOC_VALUE: - break; - case CFS_FAIL_LOC_ORSET: - cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE); - break; - case CFS_FAIL_LOC_RESET: - cfs_fail_loc = value; - atomic_set(&cfs_fail_count, 0); - break; - default: - LASSERTF(0, "called with bad set %u\n", set); - break; - } - - return 1; -} -EXPORT_SYMBOL(__cfs_fail_check_set); - -int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set) -{ - int ret; - - ret = __cfs_fail_check_set(id, value, set); - if (ret && likely(ms > 0)) { - CERROR("cfs_fail_timeout id %x sleeping for %dms\n", - id, ms); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(ms * HZ / 1000); - CERROR("cfs_fail_timeout id %x awake\n", id); - } - return ret; -} -EXPORT_SYMBOL(__cfs_fail_timeout_set); diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c deleted file mode 100644 index 48be66f0d654..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/hash.c +++ /dev/null @@ -1,2065 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/hash.c - * - * Implement a hash class for hash process in lustre system. - * - * Author: YuZhangyong - * - * 2008-08-15: Brian Behlendorf - * - Simplified API and improved documentation - * - Added per-hash feature flags: - * * CFS_HASH_DEBUG additional validation - * * CFS_HASH_REHASH dynamic rehashing - * - Added per-hash statistics - * - General performance enhancements - * - * 2009-07-31: Liang Zhen - * - move all stuff to libcfs - * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH - * - ignore hs_rwlock if without CFS_HASH_REHASH setting - * - buckets are allocated one by one(instead of contiguous memory), - * to avoid unnecessary cacheline conflict - * - * 2010-03-01: Liang Zhen - * - "bucket" is a group of hlist_head now, user can specify bucket size - * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share - * one lock for reducing memory overhead. - * - * - support lockless hash, caller will take care of locks: - * avoid lock overhead for hash tables that are already protected - * by locking in the caller for another reason - * - * - support both spin_lock/rwlock for bucket: - * overhead of spinlock contention is lower than read/write - * contention of rwlock, so using spinlock to serialize operations on - * bucket is more reasonable for those frequently changed hash tables - * - * - support one-single lock mode: - * one lock to protect all hash operations to avoid overhead of - * multiple locks if hash table is always small - * - * - removed a lot of unnecessary addref & decref on hash element: - * addref & decref are atomic operations in many use-cases which - * are expensive. - * - * - support non-blocking cfs_hash_add() and cfs_hash_findadd(): - * some lustre use-cases require these functions to be strictly - * non-blocking, we need to schedule required rehash on a different - * thread on those cases. - * - * - safer rehash on large hash table - * In old implementation, rehash function will exclusively lock the - * hash table and finish rehash in one batch, it's dangerous on SMP - * system because rehash millions of elements could take long time. - * New implemented rehash can release lock and relax CPU in middle - * of rehash, it's safe for another thread to search/change on the - * hash table even it's in rehasing. - * - * - support two different refcount modes - * . hash table has refcount on element - * . hash table doesn't change refcount on adding/removing element - * - * - support long name hash table (for param-tree) - * - * - fix a bug for cfs_hash_rehash_key: - * in old implementation, cfs_hash_rehash_key could screw up the - * hash-table because @key is overwritten without any protection. - * Now we need user to define hs_keycpy for those rehash enabled - * hash tables, cfs_hash_rehash_key will overwrite hash-key - * inside lock by calling hs_keycpy. - * - * - better hash iteration: - * Now we support both locked iteration & lockless iteration of hash - * table. Also, user can break the iteration by return 1 in callback. - */ -#include -#include -#include -#include -#include - -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static unsigned int warn_on_depth = 8; -module_param(warn_on_depth, uint, 0644); -MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); -#endif - -struct workqueue_struct *cfs_rehash_wq; - -static inline void -cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} - -static inline void -cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {} - -static inline void -cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive) - __acquires(&lock->spin) -{ - spin_lock(&lock->spin); -} - -static inline void -cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive) - __releases(&lock->spin) -{ - spin_unlock(&lock->spin); -} - -static inline void -cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive) - __acquires(&lock->rw) -{ - if (!exclusive) - read_lock(&lock->rw); - else - write_lock(&lock->rw); -} - -static inline void -cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive) - __releases(&lock->rw) -{ - if (!exclusive) - read_unlock(&lock->rw); - else - write_unlock(&lock->rw); -} - -/** No lock hash */ -static struct cfs_hash_lock_ops cfs_hash_nl_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_nl_lock, - .hs_bkt_unlock = cfs_hash_nl_unlock, -}; - -/** no bucket lock, one spinlock to protect everything */ -static struct cfs_hash_lock_ops cfs_hash_nbl_lops = { - .hs_lock = cfs_hash_spin_lock, - .hs_unlock = cfs_hash_spin_unlock, - .hs_bkt_lock = cfs_hash_nl_lock, - .hs_bkt_unlock = cfs_hash_nl_unlock, -}; - -/** spin bucket lock, rehash is enabled */ -static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = { - .hs_lock = cfs_hash_rw_lock, - .hs_unlock = cfs_hash_rw_unlock, - .hs_bkt_lock = cfs_hash_spin_lock, - .hs_bkt_unlock = cfs_hash_spin_unlock, -}; - -/** rw bucket lock, rehash is enabled */ -static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = { - .hs_lock = cfs_hash_rw_lock, - .hs_unlock = cfs_hash_rw_unlock, - .hs_bkt_lock = cfs_hash_rw_lock, - .hs_bkt_unlock = cfs_hash_rw_unlock, -}; - -/** spin bucket lock, rehash is disabled */ -static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_spin_lock, - .hs_bkt_unlock = cfs_hash_spin_unlock, -}; - -/** rw bucket lock, rehash is disabled */ -static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_rw_lock, - .hs_bkt_unlock = cfs_hash_rw_unlock, -}; - -static void -cfs_hash_lock_setup(struct cfs_hash *hs) -{ - if (cfs_hash_with_no_lock(hs)) { - hs->hs_lops = &cfs_hash_nl_lops; - - } else if (cfs_hash_with_no_bktlock(hs)) { - hs->hs_lops = &cfs_hash_nbl_lops; - spin_lock_init(&hs->hs_lock.spin); - - } else if (cfs_hash_with_rehash(hs)) { - rwlock_init(&hs->hs_lock.rw); - - if (cfs_hash_with_rw_bktlock(hs)) - hs->hs_lops = &cfs_hash_bkt_rw_lops; - else if (cfs_hash_with_spin_bktlock(hs)) - hs->hs_lops = &cfs_hash_bkt_spin_lops; - else - LBUG(); - } else { - if (cfs_hash_with_rw_bktlock(hs)) - hs->hs_lops = &cfs_hash_nr_bkt_rw_lops; - else if (cfs_hash_with_spin_bktlock(hs)) - hs->hs_lops = &cfs_hash_nr_bkt_spin_lops; - else - LBUG(); - } -} - -/** - * Simple hash head without depth tracking - * new element is always added to head of hlist - */ -struct cfs_hash_head { - struct hlist_head hh_head; /**< entries list */ -}; - -static int -cfs_hash_hh_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_head); -} - -static struct hlist_head * -cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_head *head; - - head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hh_head; -} - -static int -cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); - return -1; /* unknown depth */ -} - -static int -cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hlist_del_init(hnode); - return -1; /* unknown depth */ -} - -/** - * Simple hash head with depth tracking - * new element is always added to head of hlist - */ -struct cfs_hash_head_dep { - struct hlist_head hd_head; /**< entries list */ - unsigned int hd_depth; /**< list length */ -}; - -static int -cfs_hash_hd_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_head_dep); -} - -static struct hlist_head * -cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_head_dep *head; - - head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hd_head; -} - -static int -cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_head_dep *hh; - - hh = container_of(cfs_hash_hd_hhead(hs, bd), - struct cfs_hash_head_dep, hd_head); - hlist_add_head(hnode, &hh->hd_head); - return ++hh->hd_depth; -} - -static int -cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_head_dep *hh; - - hh = container_of(cfs_hash_hd_hhead(hs, bd), - struct cfs_hash_head_dep, hd_head); - hlist_del_init(hnode); - return --hh->hd_depth; -} - -/** - * double links hash head without depth tracking - * new element is always added to tail of hlist - */ -struct cfs_hash_dhead { - struct hlist_head dh_head; /**< entries list */ - struct hlist_node *dh_tail; /**< the last entry */ -}; - -static int -cfs_hash_dh_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_dhead); -} - -static struct hlist_head * -cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_dhead *head; - - head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dh_head; -} - -static int -cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_dhead *dh; - - dh = container_of(cfs_hash_dh_hhead(hs, bd), - struct cfs_hash_dhead, dh_head); - if (dh->dh_tail) /* not empty */ - hlist_add_behind(hnode, dh->dh_tail); - else /* empty list */ - hlist_add_head(hnode, &dh->dh_head); - dh->dh_tail = hnode; - return -1; /* unknown depth */ -} - -static int -cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnd) -{ - struct cfs_hash_dhead *dh; - - dh = container_of(cfs_hash_dh_hhead(hs, bd), - struct cfs_hash_dhead, dh_head); - if (!hnd->next) { /* it's the tail */ - dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : - container_of(hnd->pprev, struct hlist_node, next); - } - hlist_del_init(hnd); - return -1; /* unknown depth */ -} - -/** - * double links hash head with depth tracking - * new element is always added to tail of hlist - */ -struct cfs_hash_dhead_dep { - struct hlist_head dd_head; /**< entries list */ - struct hlist_node *dd_tail; /**< the last entry */ - unsigned int dd_depth; /**< list length */ -}; - -static int -cfs_hash_dd_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_dhead_dep); -} - -static struct hlist_head * -cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_dhead_dep *head; - - head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dd_head; -} - -static int -cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_dhead_dep *dh; - - dh = container_of(cfs_hash_dd_hhead(hs, bd), - struct cfs_hash_dhead_dep, dd_head); - if (dh->dd_tail) /* not empty */ - hlist_add_behind(hnode, dh->dd_tail); - else /* empty list */ - hlist_add_head(hnode, &dh->dd_head); - dh->dd_tail = hnode; - return ++dh->dd_depth; -} - -static int -cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnd) -{ - struct cfs_hash_dhead_dep *dh; - - dh = container_of(cfs_hash_dd_hhead(hs, bd), - struct cfs_hash_dhead_dep, dd_head); - if (!hnd->next) { /* it's the tail */ - dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : - container_of(hnd->pprev, struct hlist_node, next); - } - hlist_del_init(hnd); - return --dh->dd_depth; -} - -static struct cfs_hash_hlist_ops cfs_hash_hh_hops = { - .hop_hhead = cfs_hash_hh_hhead, - .hop_hhead_size = cfs_hash_hh_hhead_size, - .hop_hnode_add = cfs_hash_hh_hnode_add, - .hop_hnode_del = cfs_hash_hh_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_hd_hops = { - .hop_hhead = cfs_hash_hd_hhead, - .hop_hhead_size = cfs_hash_hd_hhead_size, - .hop_hnode_add = cfs_hash_hd_hnode_add, - .hop_hnode_del = cfs_hash_hd_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_dh_hops = { - .hop_hhead = cfs_hash_dh_hhead, - .hop_hhead_size = cfs_hash_dh_hhead_size, - .hop_hnode_add = cfs_hash_dh_hnode_add, - .hop_hnode_del = cfs_hash_dh_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_dd_hops = { - .hop_hhead = cfs_hash_dd_hhead, - .hop_hhead_size = cfs_hash_dd_hhead_size, - .hop_hnode_add = cfs_hash_dd_hnode_add, - .hop_hnode_del = cfs_hash_dd_hnode_del, -}; - -static void -cfs_hash_hlist_setup(struct cfs_hash *hs) -{ - if (cfs_hash_with_add_tail(hs)) { - hs->hs_hops = cfs_hash_with_depth(hs) ? - &cfs_hash_dd_hops : &cfs_hash_dh_hops; - } else { - hs->hs_hops = cfs_hash_with_depth(hs) ? - &cfs_hash_hd_hops : &cfs_hash_hh_hops; - } -} - -static void -cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts, - unsigned int bits, const void *key, struct cfs_hash_bd *bd) -{ - unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1); - - LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits); - - bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)]; - bd->bd_offset = index >> (bits - hs->hs_bkt_bits); -} - -void -cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (likely(!hs->hs_rehash_buckets)) { - cfs_hash_bd_from_key(hs, hs->hs_buckets, - hs->hs_cur_bits, key, bd); - } else { - LASSERT(hs->hs_rehash_bits); - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, bd); - } -} -EXPORT_SYMBOL(cfs_hash_bd_get); - -static inline void -cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) -{ - if (likely(dep_cur <= bd->bd_bucket->hsb_depmax)) - return; - - bd->bd_bucket->hsb_depmax = dep_cur; -# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 - if (likely(!warn_on_depth || - max(warn_on_depth, hs->hs_dep_max) >= dep_cur)) - return; - - spin_lock(&hs->hs_dep_lock); - hs->hs_dep_max = dep_cur; - hs->hs_dep_bkt = bd->bd_bucket->hsb_index; - hs->hs_dep_off = bd->bd_offset; - hs->hs_dep_bits = hs->hs_cur_bits; - spin_unlock(&hs->hs_dep_lock); - - queue_work(cfs_rehash_wq, &hs->hs_dep_work); -# endif -} - -void -cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - int rc; - - rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); - cfs_hash_bd_dep_record(hs, bd, rc); - bd->bd_bucket->hsb_version++; - if (unlikely(!bd->bd_bucket->hsb_version)) - bd->bd_bucket->hsb_version++; - bd->bd_bucket->hsb_count++; - - if (cfs_hash_with_counter(hs)) - atomic_inc(&hs->hs_count); - if (!cfs_hash_with_no_itemref(hs)) - cfs_hash_get(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_bd_add_locked); - -void -cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hs->hs_hops->hop_hnode_del(hs, bd, hnode); - - LASSERT(bd->bd_bucket->hsb_count > 0); - bd->bd_bucket->hsb_count--; - bd->bd_bucket->hsb_version++; - if (unlikely(!bd->bd_bucket->hsb_version)) - bd->bd_bucket->hsb_version++; - - if (cfs_hash_with_counter(hs)) { - LASSERT(atomic_read(&hs->hs_count) > 0); - atomic_dec(&hs->hs_count); - } - if (!cfs_hash_with_no_itemref(hs)) - cfs_hash_put_locked(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_bd_del_locked); - -void -cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, - struct cfs_hash_bd *bd_new, struct hlist_node *hnode) -{ - struct cfs_hash_bucket *obkt = bd_old->bd_bucket; - struct cfs_hash_bucket *nbkt = bd_new->bd_bucket; - int rc; - - if (!cfs_hash_bd_compare(bd_old, bd_new)) - return; - - /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops - * in cfs_hash_bd_del/add_locked - */ - hs->hs_hops->hop_hnode_del(hs, bd_old, hnode); - rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode); - cfs_hash_bd_dep_record(hs, bd_new, rc); - - LASSERT(obkt->hsb_count > 0); - obkt->hsb_count--; - obkt->hsb_version++; - if (unlikely(!obkt->hsb_version)) - obkt->hsb_version++; - nbkt->hsb_count++; - nbkt->hsb_version++; - if (unlikely(!nbkt->hsb_version)) - nbkt->hsb_version++; -} - -enum { - /** always set, for sanity (avoid ZERO intent) */ - CFS_HS_LOOKUP_MASK_FIND = BIT(0), - /** return entry with a ref */ - CFS_HS_LOOKUP_MASK_REF = BIT(1), - /** add entry if not existing */ - CFS_HS_LOOKUP_MASK_ADD = BIT(2), - /** delete entry, ignore other masks */ - CFS_HS_LOOKUP_MASK_DEL = BIT(3), -}; - -enum cfs_hash_lookup_intent { - /** return item w/o refcount */ - CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND, - /** return item with refcount */ - CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_REF), - /** return item w/o refcount if existed, otherwise add */ - CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_ADD), - /** return item with refcount if existed, otherwise add */ - CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND | - CFS_HS_LOOKUP_MASK_ADD), - /** delete if existed */ - CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_DEL) -}; - -static struct hlist_node * -cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key, struct hlist_node *hnode, - enum cfs_hash_lookup_intent intent) - -{ - struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); - struct hlist_node *ehnode; - struct hlist_node *match; - int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD; - - /* with this function, we can avoid a lot of useless refcount ops, - * which are expensive atomic operations most time. - */ - match = intent_add ? NULL : hnode; - hlist_for_each(ehnode, hhead) { - if (!cfs_hash_keycmp(hs, key, ehnode)) - continue; - - if (match && match != ehnode) /* can't match */ - continue; - - /* match and ... */ - if (intent & CFS_HS_LOOKUP_MASK_DEL) { - cfs_hash_bd_del_locked(hs, bd, ehnode); - return ehnode; - } - - /* caller wants refcount? */ - if (intent & CFS_HS_LOOKUP_MASK_REF) - cfs_hash_get(hs, ehnode); - return ehnode; - } - /* no match item */ - if (!intent_add) - return NULL; - - LASSERT(hnode); - cfs_hash_bd_add_locked(hs, bd, hnode); - return hnode; -} - -struct hlist_node * -cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key) -{ - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_FIND); -} -EXPORT_SYMBOL(cfs_hash_bd_lookup_locked); - -struct hlist_node * -cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key) -{ - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_PEEK); -} -EXPORT_SYMBOL(cfs_hash_bd_peek_locked); - -static void -cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned int n, int excl) -{ - struct cfs_hash_bucket *prev = NULL; - int i; - - /** - * bds must be ascendantly ordered by bd->bd_bucket->hsb_index. - * NB: it's possible that several bds point to the same bucket but - * have different bd::bd_offset, so need take care of deadlock. - */ - cfs_hash_for_each_bd(bds, n, i) { - if (prev == bds[i].bd_bucket) - continue; - - LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index); - cfs_hash_bd_lock(hs, &bds[i], excl); - prev = bds[i].bd_bucket; - } -} - -static void -cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned int n, int excl) -{ - struct cfs_hash_bucket *prev = NULL; - int i; - - cfs_hash_for_each_bd(bds, n, i) { - if (prev != bds[i].bd_bucket) { - cfs_hash_bd_unlock(hs, &bds[i], excl); - prev = bds[i].bd_bucket; - } - } -} - -static struct hlist_node * -cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned int n, const void *key) -{ - struct hlist_node *ehnode; - unsigned int i; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, - CFS_HS_LOOKUP_IT_FIND); - if (ehnode) - return ehnode; - } - return NULL; -} - -static struct hlist_node * -cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned int n, const void *key, - struct hlist_node *hnode, int noref) -{ - struct hlist_node *ehnode; - int intent; - unsigned int i; - - LASSERT(hnode); - intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, - NULL, intent); - if (ehnode) - return ehnode; - } - - if (i == 1) { /* only one bucket */ - cfs_hash_bd_add_locked(hs, &bds[0], hnode); - } else { - struct cfs_hash_bd mybd; - - cfs_hash_bd_get(hs, key, &mybd); - cfs_hash_bd_add_locked(hs, &mybd, hnode); - } - - return hnode; -} - -static struct hlist_node * -cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned int n, const void *key, - struct hlist_node *hnode) -{ - struct hlist_node *ehnode; - unsigned int i; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, - CFS_HS_LOOKUP_IT_FINDDEL); - if (ehnode) - return ehnode; - } - return NULL; -} - -static void -cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) -{ - int rc; - - if (!bd2->bd_bucket) - return; - - if (!bd1->bd_bucket) { - *bd1 = *bd2; - bd2->bd_bucket = NULL; - return; - } - - rc = cfs_hash_bd_compare(bd1, bd2); - if (!rc) - bd2->bd_bucket = NULL; - else if (rc > 0) - swap(*bd1, *bd2); /* swap bd1 and bd2 */ -} - -void -cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bds) -{ - /* NB: caller should hold hs_lock.rw if REHASH is set */ - cfs_hash_bd_from_key(hs, hs->hs_buckets, - hs->hs_cur_bits, key, &bds[0]); - if (likely(!hs->hs_rehash_buckets)) { - /* no rehash or not rehashing */ - bds[1].bd_bucket = NULL; - return; - } - - LASSERT(hs->hs_rehash_bits); - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &bds[1]); - - cfs_hash_bd_order(&bds[0], &bds[1]); -} - -void -cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_multi_bd_lock(hs, bds, 2, excl); -} - -void -cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_multi_bd_unlock(hs, bds, 2, excl); -} - -struct hlist_node * -cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key) -{ - return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key); -} - -struct hlist_node * -cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode, - int noref) -{ - return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key, - hnode, noref); -} - -struct hlist_node * -cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode) -{ - return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); -} - -static void -cfs_hash_buckets_free(struct cfs_hash_bucket **buckets, - int bkt_size, int prev_size, int size) -{ - int i; - - for (i = prev_size; i < size; i++) - kfree(buckets[i]); - - kvfree(buckets); -} - -/* - * Create or grow bucket memory. Return old_buckets if no allocation was - * needed, the newly allocated buckets if allocation was needed and - * successful, and NULL on error. - */ -static struct cfs_hash_bucket ** -cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, - unsigned int old_size, unsigned int new_size) -{ - struct cfs_hash_bucket **new_bkts; - int i; - - LASSERT(!old_size || old_bkts); - - if (old_bkts && old_size == new_size) - return old_bkts; - - new_bkts = kvmalloc_array(new_size, sizeof(new_bkts[0]), GFP_KERNEL); - if (!new_bkts) - return NULL; - - if (old_bkts) { - memcpy(new_bkts, old_bkts, - min(old_size, new_size) * sizeof(*old_bkts)); - } - - for (i = old_size; i < new_size; i++) { - struct hlist_head *hhead; - struct cfs_hash_bd bd; - - new_bkts[i] = kzalloc(cfs_hash_bkt_size(hs), GFP_KERNEL); - if (!new_bkts[i]) { - cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs), - old_size, new_size); - return NULL; - } - - new_bkts[i]->hsb_index = i; - new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ - new_bkts[i]->hsb_depmax = -1; /* unknown */ - bd.bd_bucket = new_bkts[i]; - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) - INIT_HLIST_HEAD(hhead); - - if (cfs_hash_with_no_lock(hs) || - cfs_hash_with_no_bktlock(hs)) - continue; - - if (cfs_hash_with_rw_bktlock(hs)) - rwlock_init(&new_bkts[i]->hsb_lock.rw); - else if (cfs_hash_with_spin_bktlock(hs)) - spin_lock_init(&new_bkts[i]->hsb_lock.spin); - else - LBUG(); /* invalid use-case */ - } - return new_bkts; -} - -/** - * Initialize new libcfs hash, where: - * @name - Descriptive hash name - * @cur_bits - Initial hash table size, in bits - * @max_bits - Maximum allowed hash table resize, in bits - * @ops - Registered hash table operations - * @flags - CFS_HASH_REHASH enable synamic hash resizing - * - CFS_HASH_SORT enable chained hash sort - */ -static void cfs_hash_rehash_worker(struct work_struct *work); - -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static void cfs_hash_dep_print(struct work_struct *work) -{ - struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work); - int dep; - int bkt; - int off; - int bits; - - spin_lock(&hs->hs_dep_lock); - dep = hs->hs_dep_max; - bkt = hs->hs_dep_bkt; - off = hs->hs_dep_off; - bits = hs->hs_dep_bits; - spin_unlock(&hs->hs_dep_lock); - - LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n", - hs->hs_name, bits, dep, bkt, off); - spin_lock(&hs->hs_dep_lock); - hs->hs_dep_bits = 0; /* mark as workitem done */ - spin_unlock(&hs->hs_dep_lock); - return 0; -} - -static void cfs_hash_depth_wi_init(struct cfs_hash *hs) -{ - spin_lock_init(&hs->hs_dep_lock); - INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print); -} - -static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) -{ - cancel_work_sync(&hs->hs_dep_work); -} - -#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ - -static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {} -static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {} - -#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ - -struct cfs_hash * -cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, - unsigned int bkt_bits, unsigned int extra_bytes, - unsigned int min_theta, unsigned int max_theta, - struct cfs_hash_ops *ops, unsigned int flags) -{ - struct cfs_hash *hs; - int len; - - BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15); - - LASSERT(name); - LASSERT(ops->hs_key); - LASSERT(ops->hs_hash); - LASSERT(ops->hs_object); - LASSERT(ops->hs_keycmp); - LASSERT(ops->hs_get); - LASSERT(ops->hs_put || ops->hs_put_locked); - - if (flags & CFS_HASH_REHASH) - flags |= CFS_HASH_COUNTER; /* must have counter */ - - LASSERT(cur_bits > 0); - LASSERT(cur_bits >= bkt_bits); - LASSERT(max_bits >= cur_bits && max_bits < 31); - LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits)); - LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK))); - LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy)); - - len = !(flags & CFS_HASH_BIGNAME) ? - CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; - hs = kzalloc(offsetof(struct cfs_hash, hs_name[len]), GFP_KERNEL); - if (!hs) - return NULL; - - strlcpy(hs->hs_name, name, len); - hs->hs_flags = flags; - - atomic_set(&hs->hs_refcount, 1); - atomic_set(&hs->hs_count, 0); - - cfs_hash_lock_setup(hs); - cfs_hash_hlist_setup(hs); - - hs->hs_cur_bits = (u8)cur_bits; - hs->hs_min_bits = (u8)cur_bits; - hs->hs_max_bits = (u8)max_bits; - hs->hs_bkt_bits = (u8)bkt_bits; - - hs->hs_ops = ops; - hs->hs_extra_bytes = extra_bytes; - hs->hs_rehash_bits = 0; - INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker); - cfs_hash_depth_wi_init(hs); - - if (cfs_hash_with_rehash(hs)) - __cfs_hash_set_theta(hs, min_theta, max_theta); - - hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0, - CFS_HASH_NBKT(hs)); - if (hs->hs_buckets) - return hs; - - kfree(hs); - return NULL; -} -EXPORT_SYMBOL(cfs_hash_create); - -/** - * Cleanup libcfs hash @hs. - */ -static void -cfs_hash_destroy(struct cfs_hash *hs) -{ - struct hlist_node *hnode; - struct hlist_node *pos; - struct cfs_hash_bd bd; - int i; - - LASSERT(hs); - LASSERT(!cfs_hash_is_exiting(hs) && - !cfs_hash_is_iterating(hs)); - - /** - * prohibit further rehashes, don't need any lock because - * I'm the only (last) one can change it. - */ - hs->hs_exiting = 1; - if (cfs_hash_with_rehash(hs)) - cfs_hash_rehash_cancel(hs); - - cfs_hash_depth_wi_cancel(hs); - /* rehash should be done/canceled */ - LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - LASSERT(bd.bd_bucket); - /* no need to take this lock, just for consistent code */ - cfs_hash_bd_lock(hs, &bd, 1); - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - LASSERTF(!cfs_hash_with_assert_empty(hs), - "hash %s bucket %u(%u) is not empty: %u items left\n", - hs->hs_name, bd.bd_bucket->hsb_index, - bd.bd_offset, bd.bd_bucket->hsb_count); - /* can't assert key valicate, because we - * can interrupt rehash - */ - cfs_hash_bd_del_locked(hs, &bd, hnode); - cfs_hash_exit(hs, hnode); - } - } - LASSERT(!bd.bd_bucket->hsb_count); - cfs_hash_bd_unlock(hs, &bd, 1); - cond_resched(); - } - - LASSERT(!atomic_read(&hs->hs_count)); - - cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs), - 0, CFS_HASH_NBKT(hs)); - i = cfs_hash_with_bigname(hs) ? - CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN; - kfree(hs); -} - -struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs) -{ - if (atomic_inc_not_zero(&hs->hs_refcount)) - return hs; - return NULL; -} -EXPORT_SYMBOL(cfs_hash_getref); - -void cfs_hash_putref(struct cfs_hash *hs) -{ - if (atomic_dec_and_test(&hs->hs_refcount)) - cfs_hash_destroy(hs); -} -EXPORT_SYMBOL(cfs_hash_putref); - -static inline int -cfs_hash_rehash_bits(struct cfs_hash *hs) -{ - if (cfs_hash_with_no_lock(hs) || - !cfs_hash_with_rehash(hs)) - return -EOPNOTSUPP; - - if (unlikely(cfs_hash_is_exiting(hs))) - return -ESRCH; - - if (unlikely(cfs_hash_is_rehashing(hs))) - return -EALREADY; - - if (unlikely(cfs_hash_is_iterating(hs))) - return -EAGAIN; - - /* XXX: need to handle case with max_theta != 2.0 - * and the case with min_theta != 0.5 - */ - if ((hs->hs_cur_bits < hs->hs_max_bits) && - (__cfs_hash_theta(hs) > hs->hs_max_theta)) - return hs->hs_cur_bits + 1; - - if (!cfs_hash_with_shrink(hs)) - return 0; - - if ((hs->hs_cur_bits > hs->hs_min_bits) && - (__cfs_hash_theta(hs) < hs->hs_min_theta)) - return hs->hs_cur_bits - 1; - - return 0; -} - -/** - * don't allow inline rehash if: - * - user wants non-blocking change (add/del) on hash table - * - too many elements - */ -static inline int -cfs_hash_rehash_inline(struct cfs_hash *hs) -{ - return !cfs_hash_with_nblk_change(hs) && - atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG; -} - -/** - * Add item @hnode to libcfs hash @hs using @key. The registered - * ops->hs_get function will be called when the item is added. - */ -void -cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - struct cfs_hash_bd bd; - int bits; - - LASSERT(hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - cfs_hash_bd_get_and_lock(hs, key, &bd, 1); - - cfs_hash_key_validate(hs, key, hnode); - cfs_hash_bd_add_locked(hs, &bd, hnode); - - cfs_hash_bd_unlock(hs, &bd, 1); - - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); -} -EXPORT_SYMBOL(cfs_hash_add); - -static struct hlist_node * -cfs_hash_find_or_add(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode, int noref) -{ - struct hlist_node *ehnode; - struct cfs_hash_bd bds[2]; - int bits = 0; - - LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode); - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); - - cfs_hash_key_validate(hs, key, hnode); - ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key, - hnode, noref); - cfs_hash_dual_bd_unlock(hs, bds, 1); - - if (ehnode == hnode) /* new item added */ - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); - - return ehnode; -} - -/** - * Add item @hnode to libcfs hash @hs using @key. The registered - * ops->hs_get function will be called if the item was added. - * Returns 0 on success or -EALREADY on key collisions. - */ -int -cfs_hash_add_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? - -EALREADY : 0; -} -EXPORT_SYMBOL(cfs_hash_add_unique); - -/** - * Add item @hnode to libcfs hash @hs using @key. If this @key - * already exists in the hash then ops->hs_get will be called on the - * conflicting entry and that entry will be returned to the caller. - * Otherwise ops->hs_get is called on the item which was added. - */ -void * -cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - hnode = cfs_hash_find_or_add(hs, key, hnode, 0); - - return cfs_hash_object(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_findadd_unique); - -/** - * Delete item @hnode from the libcfs hash @hs using @key. The @key - * is required to ensure the correct hash bucket is locked since there - * is no direct linkage from the item to the bucket. The object - * removed from the hash will be returned and obs->hs_put is called - * on the removed object. - */ -void * -cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - void *obj = NULL; - int bits = 0; - struct cfs_hash_bd bds[2]; - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); - - /* NB: do nothing if @hnode is not in hash table */ - if (!hnode || !hlist_unhashed(hnode)) { - if (!bds[1].bd_bucket && hnode) { - cfs_hash_bd_del_locked(hs, &bds[0], hnode); - } else { - hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, - key, hnode); - } - } - - if (hnode) { - obj = cfs_hash_object(hs, hnode); - bits = cfs_hash_rehash_bits(hs); - } - - cfs_hash_dual_bd_unlock(hs, bds, 1); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); - - return obj; -} -EXPORT_SYMBOL(cfs_hash_del); - -/** - * Delete item given @key in libcfs hash @hs. The first @key found in - * the hash will be removed, if the key exists multiple times in the hash - * @hs this function must be called once per key. The removed object - * will be returned and ops->hs_put is called on the removed object. - */ -void * -cfs_hash_del_key(struct cfs_hash *hs, const void *key) -{ - return cfs_hash_del(hs, key, NULL); -} -EXPORT_SYMBOL(cfs_hash_del_key); - -/** - * Lookup an item using @key in the libcfs hash @hs and return it. - * If the @key is found in the hash hs->hs_get() is called and the - * matching objects is returned. It is the callers responsibility - * to call the counterpart ops->hs_put using the cfs_hash_put() macro - * when when finished with the object. If the @key was not found - * in the hash @hs NULL is returned. - */ -void * -cfs_hash_lookup(struct cfs_hash *hs, const void *key) -{ - void *obj = NULL; - struct hlist_node *hnode; - struct cfs_hash_bd bds[2]; - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - - hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key); - if (hnode) - obj = cfs_hash_object(hs, hnode); - - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); - - return obj; -} -EXPORT_SYMBOL(cfs_hash_lookup); - -static void -cfs_hash_for_each_enter(struct cfs_hash *hs) -{ - LASSERT(!cfs_hash_is_exiting(hs)); - - if (!cfs_hash_with_rehash(hs)) - return; - /* - * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter - * because it's just an unreliable signal to rehash-thread, - * rehash-thread will try to finish rehash ASAP when seeing this. - */ - hs->hs_iterating = 1; - - cfs_hash_lock(hs, 1); - hs->hs_iterators++; - cfs_hash_unlock(hs, 1); - - /* NB: iteration is mostly called by service thread, - * we tend to cancel pending rehash-request, instead of - * blocking service thread, we will relaunch rehash request - * after iteration - */ - if (cfs_hash_is_rehashing(hs)) - cfs_hash_rehash_cancel(hs); -} - -static void -cfs_hash_for_each_exit(struct cfs_hash *hs) -{ - int remained; - int bits; - - if (!cfs_hash_with_rehash(hs)) - return; - cfs_hash_lock(hs, 1); - remained = --hs->hs_iterators; - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 1); - /* NB: it's race on cfs_has_t::hs_iterating, see above */ - if (!remained) - hs->hs_iterating = 0; - if (bits > 0) { - cfs_hash_rehash(hs, atomic_read(&hs->hs_count) < - CFS_HASH_LOOP_HOG); - } -} - -/** - * For each item in the libcfs hash @hs call the passed callback @func - * and pass to it as an argument each hash item and the private @data. - * - * a) the function may sleep! - * b) during the callback: - * . the bucket lock is held so the callback must never sleep. - * . if @removal_safe is true, use can remove current item by - * cfs_hash_bd_del_locked - */ -static u64 -cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data, int remove_safe) -{ - struct hlist_node *hnode; - struct hlist_node *pos; - struct cfs_hash_bd bd; - u64 count = 0; - int excl = !!remove_safe; - int loop = 0; - int i; - - cfs_hash_for_each_enter(hs); - - cfs_hash_lock(hs, 0); - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - cfs_hash_bd_lock(hs, &bd, excl); - if (!func) { /* only glimpse size */ - count += bd.bd_bucket->hsb_count; - cfs_hash_bd_unlock(hs, &bd, excl); - continue; - } - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - cfs_hash_bucket_validate(hs, &bd, hnode); - count++; - loop++; - if (func(hs, &bd, hnode, data)) { - cfs_hash_bd_unlock(hs, &bd, excl); - goto out; - } - } - } - cfs_hash_bd_unlock(hs, &bd, excl); - if (loop < CFS_HASH_LOOP_HOG) - continue; - loop = 0; - cfs_hash_unlock(hs, 0); - cond_resched(); - cfs_hash_lock(hs, 0); - } - out: - cfs_hash_unlock(hs, 0); - - cfs_hash_for_each_exit(hs); - return count; -} - -struct cfs_hash_cond_arg { - cfs_hash_cond_opt_cb_t func; - void *arg; -}; - -static int -cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - struct cfs_hash_cond_arg *cond = data; - - if (cond->func(cfs_hash_object(hs, hnode), cond->arg)) - cfs_hash_bd_del_locked(hs, bd, hnode); - return 0; -} - -/** - * Delete item from the libcfs hash @hs when @func return true. - * The write lock being hold during loop for each bucket to avoid - * any object be reference. - */ -void -cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data) -{ - struct cfs_hash_cond_arg arg = { - .func = func, - .arg = data, - }; - - cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1); -} -EXPORT_SYMBOL(cfs_hash_cond_del); - -void -cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - cfs_hash_for_each_tight(hs, func, data, 0); -} -EXPORT_SYMBOL(cfs_hash_for_each); - -void -cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - cfs_hash_for_each_tight(hs, func, data, 1); -} -EXPORT_SYMBOL(cfs_hash_for_each_safe); - -static int -cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - *(int *)data = 0; - return 1; /* return 1 to break the loop */ -} - -int -cfs_hash_is_empty(struct cfs_hash *hs) -{ - int empty = 1; - - cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0); - return empty; -} -EXPORT_SYMBOL(cfs_hash_is_empty); - -u64 -cfs_hash_size_get(struct cfs_hash *hs) -{ - return cfs_hash_with_counter(hs) ? - atomic_read(&hs->hs_count) : - cfs_hash_for_each_tight(hs, NULL, NULL, 0); -} -EXPORT_SYMBOL(cfs_hash_size_get); - -/* - * cfs_hash_for_each_relax: - * Iterate the hash table and call @func on each item without - * any lock. This function can't guarantee to finish iteration - * if these features are enabled: - * - * a. if rehash_key is enabled, an item can be moved from - * one bucket to another bucket - * b. user can remove non-zero-ref item from hash-table, - * so the item can be removed from hash-table, even worse, - * it's possible that user changed key and insert to another - * hash bucket. - * there's no way for us to finish iteration correctly on previous - * two cases, so iteration has to be stopped on change. - */ -static int -cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data, int start) -{ - struct hlist_node *next = NULL; - struct hlist_node *hnode; - struct cfs_hash_bd bd; - u32 version; - int count = 0; - int stop_on_change; - int has_put_locked; - int end = -1; - int rc = 0; - int i; - - stop_on_change = cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs); - has_put_locked = hs->hs_ops->hs_put_locked != NULL; - cfs_hash_lock(hs, 0); -again: - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - if (i < start) - continue; - else if (end > 0 && i >= end) - break; - - cfs_hash_bd_lock(hs, &bd, 0); - version = cfs_hash_bd_version_get(&bd); - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hnode = hhead->first; - if (!hnode) - continue; - cfs_hash_get(hs, hnode); - - for (; hnode; hnode = next) { - cfs_hash_bucket_validate(hs, &bd, hnode); - next = hnode->next; - if (next) - cfs_hash_get(hs, next); - cfs_hash_bd_unlock(hs, &bd, 0); - cfs_hash_unlock(hs, 0); - - rc = func(hs, &bd, hnode, data); - if (stop_on_change || !has_put_locked) - cfs_hash_put(hs, hnode); - cond_resched(); - count++; - - cfs_hash_lock(hs, 0); - cfs_hash_bd_lock(hs, &bd, 0); - if (stop_on_change) { - if (version != - cfs_hash_bd_version_get(&bd)) - rc = -EINTR; - } else if (has_put_locked) { - cfs_hash_put_locked(hs, hnode); - } - if (rc) /* callback wants to break iteration */ - break; - } - if (next) { - if (has_put_locked) { - cfs_hash_put_locked(hs, next); - next = NULL; - } - break; - } else if (rc) { - break; - } - } - cfs_hash_bd_unlock(hs, &bd, 0); - if (next && !has_put_locked) { - cfs_hash_put(hs, next); - next = NULL; - } - if (rc) /* callback wants to break iteration */ - break; - } - if (start > 0 && !rc) { - end = start; - start = 0; - goto again; - } - - cfs_hash_unlock(hs, 0); - return count; -} - -int -cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data, int start) -{ - if (cfs_hash_with_no_lock(hs) || - cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs)) - return -EOPNOTSUPP; - - if (!hs->hs_ops->hs_get || - (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked)) - return -EOPNOTSUPP; - - cfs_hash_for_each_enter(hs); - cfs_hash_for_each_relax(hs, func, data, start); - cfs_hash_for_each_exit(hs); - - return 0; -} -EXPORT_SYMBOL(cfs_hash_for_each_nolock); - -/** - * For each hash bucket in the libcfs hash @hs call the passed callback - * @func until all the hash buckets are empty. The passed callback @func - * or the previously registered callback hs->hs_put must remove the item - * from the hash. You may either use the cfs_hash_del() or hlist_del() - * functions. No rwlocks will be held during the callback @func it is - * safe to sleep if needed. This function will not terminate until the - * hash is empty. Note it is still possible to concurrently add new - * items in to the hash. It is the callers responsibility to ensure - * the required locking is in place to prevent concurrent insertions. - */ -int -cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - unsigned int i = 0; - - if (cfs_hash_with_no_lock(hs)) - return -EOPNOTSUPP; - - if (!hs->hs_ops->hs_get || - (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked)) - return -EOPNOTSUPP; - - cfs_hash_for_each_enter(hs); - while (cfs_hash_for_each_relax(hs, func, data, 0)) { - CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", - hs->hs_name, i++); - } - cfs_hash_for_each_exit(hs); - return 0; -} -EXPORT_SYMBOL(cfs_hash_for_each_empty); - -void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex, - cfs_hash_for_each_cb_t func, void *data) -{ - struct hlist_head *hhead; - struct hlist_node *hnode; - struct cfs_hash_bd bd; - - cfs_hash_for_each_enter(hs); - cfs_hash_lock(hs, 0); - if (hindex >= CFS_HASH_NHLIST(hs)) - goto out; - - cfs_hash_bd_index_set(hs, hindex, &bd); - - cfs_hash_bd_lock(hs, &bd, 0); - hhead = cfs_hash_bd_hhead(hs, &bd); - hlist_for_each(hnode, hhead) { - if (func(hs, &bd, hnode, data)) - break; - } - cfs_hash_bd_unlock(hs, &bd, 0); -out: - cfs_hash_unlock(hs, 0); - cfs_hash_for_each_exit(hs); -} -EXPORT_SYMBOL(cfs_hash_hlist_for_each); - -/* - * For each item in the libcfs hash @hs which matches the @key call - * the passed callback @func and pass to it as an argument each hash - * item and the private @data. During the callback the bucket lock - * is held so the callback must never sleep. - */ -void -cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, - cfs_hash_for_each_cb_t func, void *data) -{ - struct hlist_node *hnode; - struct cfs_hash_bd bds[2]; - unsigned int i; - - cfs_hash_lock(hs, 0); - - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - - cfs_hash_for_each_bd(bds, 2, i) { - struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]); - - hlist_for_each(hnode, hlist) { - cfs_hash_bucket_validate(hs, &bds[i], hnode); - - if (cfs_hash_keycmp(hs, key, hnode)) { - if (func(hs, &bds[i], hnode, data)) - break; - } - } - } - - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_for_each_key); - -/** - * Rehash the libcfs hash @hs to the given @bits. This can be used - * to grow the hash size when excessive chaining is detected, or to - * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH - * flag is set in @hs the libcfs hash may be dynamically rehashed - * during addition or removal if the hash's theta value exceeds - * either the hs->hs_min_theta or hs->max_theta values. By default - * these values are tuned to keep the chained hash depth small, and - * this approach assumes a reasonably uniform hashing function. The - * theta thresholds for @hs are tunable via cfs_hash_set_theta(). - */ -void -cfs_hash_rehash_cancel(struct cfs_hash *hs) -{ - LASSERT(cfs_hash_with_rehash(hs)); - cancel_work_sync(&hs->hs_rehash_work); -} - -void -cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) -{ - int rc; - - LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs)); - - cfs_hash_lock(hs, 1); - - rc = cfs_hash_rehash_bits(hs); - if (rc <= 0) { - cfs_hash_unlock(hs, 1); - return; - } - - hs->hs_rehash_bits = rc; - if (!do_rehash) { - /* launch and return */ - queue_work(cfs_rehash_wq, &hs->hs_rehash_work); - cfs_hash_unlock(hs, 1); - return; - } - - /* rehash right now */ - cfs_hash_unlock(hs, 1); - - cfs_hash_rehash_worker(&hs->hs_rehash_work); -} - -static int -cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) -{ - struct cfs_hash_bd new; - struct hlist_head *hhead; - struct hlist_node *hnode; - struct hlist_node *pos; - void *key; - int c = 0; - - /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ - cfs_hash_bd_for_each_hlist(hs, old, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - key = cfs_hash_key(hs, hnode); - LASSERT(key); - /* Validate hnode is in the correct bucket. */ - cfs_hash_bucket_validate(hs, old, hnode); - /* - * Delete from old hash bucket; move to new bucket. - * ops->hs_key must be defined. - */ - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &new); - cfs_hash_bd_move_locked(hs, old, &new, hnode); - c++; - } - } - - return c; -} - -static void -cfs_hash_rehash_worker(struct work_struct *work) -{ - struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_rehash_work); - struct cfs_hash_bucket **bkts; - struct cfs_hash_bd bd; - unsigned int old_size; - unsigned int new_size; - int bsize; - int count = 0; - int rc = 0; - int i; - - LASSERT(hs && cfs_hash_with_rehash(hs)); - - cfs_hash_lock(hs, 0); - LASSERT(cfs_hash_is_rehashing(hs)); - - old_size = CFS_HASH_NBKT(hs); - new_size = CFS_HASH_RH_NBKT(hs); - - cfs_hash_unlock(hs, 0); - - /* - * don't need hs::hs_rwlock for hs::hs_buckets, - * because nobody can change bkt-table except me. - */ - bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets, - old_size, new_size); - cfs_hash_lock(hs, 1); - if (!bkts) { - rc = -ENOMEM; - goto out; - } - - if (bkts == hs->hs_buckets) { - bkts = NULL; /* do nothing */ - goto out; - } - - rc = __cfs_hash_theta(hs); - if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) { - /* free the new allocated bkt-table */ - old_size = new_size; - new_size = CFS_HASH_NBKT(hs); - rc = -EALREADY; - goto out; - } - - LASSERT(!hs->hs_rehash_buckets); - hs->hs_rehash_buckets = bkts; - - rc = 0; - cfs_hash_for_each_bucket(hs, &bd, i) { - if (cfs_hash_is_exiting(hs)) { - rc = -ESRCH; - /* someone wants to destroy the hash, abort now */ - if (old_size < new_size) /* OK to free old bkt-table */ - break; - /* it's shrinking, need free new bkt-table */ - hs->hs_rehash_buckets = NULL; - old_size = new_size; - new_size = CFS_HASH_NBKT(hs); - goto out; - } - - count += cfs_hash_rehash_bd(hs, &bd); - if (count < CFS_HASH_LOOP_HOG || - cfs_hash_is_iterating(hs)) { /* need to finish ASAP */ - continue; - } - - count = 0; - cfs_hash_unlock(hs, 1); - cond_resched(); - cfs_hash_lock(hs, 1); - } - - hs->hs_rehash_count++; - - bkts = hs->hs_buckets; - hs->hs_buckets = hs->hs_rehash_buckets; - hs->hs_rehash_buckets = NULL; - - hs->hs_cur_bits = hs->hs_rehash_bits; -out: - hs->hs_rehash_bits = 0; - bsize = cfs_hash_bkt_size(hs); - cfs_hash_unlock(hs, 1); - /* can't refer to @hs anymore because it could be destroyed */ - if (bkts) - cfs_hash_buckets_free(bkts, bsize, new_size, old_size); - if (rc) - CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); -} - -/** - * Rehash the object referenced by @hnode in the libcfs hash @hs. The - * @old_key must be provided to locate the objects previous location - * in the hash, and the @new_key will be used to reinsert the object. - * Use this function instead of a cfs_hash_add() + cfs_hash_del() - * combo when it is critical that there is no window in time where the - * object is missing from the hash. When an object is being rehashed - * the registered cfs_hash_get() and cfs_hash_put() functions will - * not be called. - */ -void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, - void *new_key, struct hlist_node *hnode) -{ - struct cfs_hash_bd bds[3]; - struct cfs_hash_bd old_bds[2]; - struct cfs_hash_bd new_bd; - - LASSERT(!hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - - cfs_hash_dual_bd_get(hs, old_key, old_bds); - cfs_hash_bd_get(hs, new_key, &new_bd); - - bds[0] = old_bds[0]; - bds[1] = old_bds[1]; - bds[2] = new_bd; - - /* NB: bds[0] and bds[1] are ordered already */ - cfs_hash_bd_order(&bds[1], &bds[2]); - cfs_hash_bd_order(&bds[0], &bds[1]); - - cfs_hash_multi_bd_lock(hs, bds, 3, 1); - if (likely(!old_bds[1].bd_bucket)) { - cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode); - } else { - cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode); - cfs_hash_bd_add_locked(hs, &new_bd, hnode); - } - /* overwrite key inside locks, otherwise may screw up with - * other operations, i.e: rehash - */ - cfs_hash_keycpy(hs, hnode, new_key); - - cfs_hash_multi_bd_unlock(hs, bds, 3, 1); - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_rehash_key); - -void cfs_hash_debug_header(struct seq_file *m) -{ - seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n", - CFS_HASH_BIGNAME_LEN, "name"); -} -EXPORT_SYMBOL(cfs_hash_debug_header); - -static struct cfs_hash_bucket ** -cfs_hash_full_bkts(struct cfs_hash *hs) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (!hs->hs_rehash_buckets) - return hs->hs_buckets; - - LASSERT(hs->hs_rehash_bits); - return hs->hs_rehash_bits > hs->hs_cur_bits ? - hs->hs_rehash_buckets : hs->hs_buckets; -} - -static unsigned int -cfs_hash_full_nbkt(struct cfs_hash *hs) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (!hs->hs_rehash_buckets) - return CFS_HASH_NBKT(hs); - - LASSERT(hs->hs_rehash_bits); - return hs->hs_rehash_bits > hs->hs_cur_bits ? - CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); -} - -void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m) -{ - int dist[8] = { 0, }; - int maxdep = -1; - int maxdepb = -1; - int total = 0; - int theta; - int i; - - cfs_hash_lock(hs, 0); - theta = __cfs_hash_theta(hs); - - seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ", - CFS_HASH_BIGNAME_LEN, hs->hs_name, - 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits, - 1 << hs->hs_max_bits, - __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta), - __cfs_hash_theta_int(hs->hs_min_theta), - __cfs_hash_theta_frac(hs->hs_min_theta), - __cfs_hash_theta_int(hs->hs_max_theta), - __cfs_hash_theta_frac(hs->hs_max_theta), - hs->hs_flags, hs->hs_rehash_count); - - /* - * The distribution is a summary of the chained hash depth in - * each of the libcfs hash buckets. Each buckets hsb_count is - * divided by the hash theta value and used to generate a - * histogram of the hash distribution. A uniform hash will - * result in all hash buckets being close to the average thus - * only the first few entries in the histogram will be non-zero. - * If you hash function results in a non-uniform hash the will - * be observable by outlier bucks in the distribution histogram. - * - * Uniform hash distribution: 128/128/0/0/0/0/0/0 - * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1 - */ - for (i = 0; i < cfs_hash_full_nbkt(hs); i++) { - struct cfs_hash_bd bd; - - bd.bd_bucket = cfs_hash_full_bkts(hs)[i]; - cfs_hash_bd_lock(hs, &bd, 0); - if (maxdep < bd.bd_bucket->hsb_depmax) { - maxdep = bd.bd_bucket->hsb_depmax; - maxdepb = ffz(~maxdep); - } - total += bd.bd_bucket->hsb_count; - dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++; - cfs_hash_bd_unlock(hs, &bd, 0); - } - - seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb); - for (i = 0; i < 8; i++) - seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/'); - - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_debug_str); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c deleted file mode 100644 index 3d1cf457b286..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c +++ /dev/null @@ -1,1086 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include -#include -#include - -#include -#include -#include - -/** Global CPU partition table */ -struct cfs_cpt_table *cfs_cpt_tab __read_mostly; -EXPORT_SYMBOL(cfs_cpt_tab); - -/** - * modparam for setting number of partitions - * - * 0 : estimate best value based on cores or NUMA nodes - * 1 : disable multiple partitions - * >1 : specify number of partitions - */ -static int cpu_npartitions; -module_param(cpu_npartitions, int, 0444); -MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions"); - -/** - * modparam for setting CPU partitions patterns: - * - * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID, - * number in bracket is processor ID (core or HT) - * - * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket - * are NUMA node ID, number before bracket is CPU partition ID. - * - * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology - * - * NB: If user specified cpu_pattern, cpu_npartitions will be ignored - */ -static char *cpu_pattern = "N"; -module_param(cpu_pattern, charp, 0444); -MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern"); - -static struct cfs_cpt_data { - /* serialize hotplug etc */ - spinlock_t cpt_lock; - /* reserved for hotplug */ - unsigned long cpt_version; - /* mutex to protect cpt_cpumask */ - struct mutex cpt_mutex; - /* scratch buffer for set/unset_node */ - cpumask_var_t cpt_cpumask; -} cpt_data; - -#define CFS_CPU_VERSION_MAGIC 0xbabecafe - -struct cfs_cpt_table * -cfs_cpt_table_alloc(unsigned int ncpt) -{ - struct cfs_cpt_table *cptab; - int i; - - cptab = kzalloc(sizeof(*cptab), GFP_NOFS); - if (!cptab) - return NULL; - - cptab->ctb_nparts = ncpt; - - cptab->ctb_nodemask = kzalloc(sizeof(*cptab->ctb_nodemask), - GFP_NOFS); - if (!zalloc_cpumask_var(&cptab->ctb_cpumask, GFP_NOFS) || - !cptab->ctb_nodemask) - goto failed; - - cptab->ctb_cpu2cpt = kvmalloc_array(num_possible_cpus(), - sizeof(cptab->ctb_cpu2cpt[0]), - GFP_KERNEL); - if (!cptab->ctb_cpu2cpt) - goto failed; - - memset(cptab->ctb_cpu2cpt, -1, - num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); - - cptab->ctb_parts = kvmalloc_array(ncpt, sizeof(cptab->ctb_parts[0]), - GFP_KERNEL); - if (!cptab->ctb_parts) - goto failed; - - for (i = 0; i < ncpt; i++) { - struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - - part->cpt_nodemask = kzalloc(sizeof(*part->cpt_nodemask), - GFP_NOFS); - if (!zalloc_cpumask_var(&part->cpt_cpumask, GFP_NOFS) || - !part->cpt_nodemask) - goto failed; - } - - spin_lock(&cpt_data.cpt_lock); - /* Reserved for hotplug */ - cptab->ctb_version = cpt_data.cpt_version; - spin_unlock(&cpt_data.cpt_lock); - - return cptab; - - failed: - cfs_cpt_table_free(cptab); - return NULL; -} -EXPORT_SYMBOL(cfs_cpt_table_alloc); - -void -cfs_cpt_table_free(struct cfs_cpt_table *cptab) -{ - int i; - - kvfree(cptab->ctb_cpu2cpt); - - for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) { - struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - - kfree(part->cpt_nodemask); - free_cpumask_var(part->cpt_cpumask); - } - - kvfree(cptab->ctb_parts); - - kfree(cptab->ctb_nodemask); - free_cpumask_var(cptab->ctb_cpumask); - - kfree(cptab); -} -EXPORT_SYMBOL(cfs_cpt_table_free); - -int -cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) -{ - char *tmp = buf; - int rc = 0; - int i; - int j; - - for (i = 0; i < cptab->ctb_nparts; i++) { - if (len > 0) { - rc = snprintf(tmp, len, "%d\t: ", i); - len -= rc; - } - - if (len <= 0) { - rc = -EFBIG; - goto out; - } - - tmp += rc; - for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { - rc = snprintf(tmp, len, "%d ", j); - len -= rc; - if (len <= 0) { - rc = -EFBIG; - goto out; - } - tmp += rc; - } - - *tmp = '\n'; - tmp++; - len--; - } - - out: - if (rc < 0) - return rc; - - return tmp - buf; -} -EXPORT_SYMBOL(cfs_cpt_table_print); - -static void -cfs_node_to_cpumask(int node, cpumask_t *mask) -{ - const cpumask_t *tmp = cpumask_of_node(node); - - if (tmp) - cpumask_copy(mask, tmp); - else - cpumask_clear(mask); -} - -int -cfs_cpt_number(struct cfs_cpt_table *cptab) -{ - return cptab->ctb_nparts; -} -EXPORT_SYMBOL(cfs_cpt_number); - -int -cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cpumask_weight(cptab->ctb_cpumask) : - cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); -} -EXPORT_SYMBOL(cfs_cpt_weight); - -int -cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cpumask_any_and(cptab->ctb_cpumask, - cpu_online_mask) < nr_cpu_ids : - cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, - cpu_online_mask) < nr_cpu_ids; -} -EXPORT_SYMBOL(cfs_cpt_online); - -cpumask_var_t * -cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - &cptab->ctb_cpumask : &cptab->ctb_parts[cpt].cpt_cpumask; -} -EXPORT_SYMBOL(cfs_cpt_cpumask); - -nodemask_t * -cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask; -} -EXPORT_SYMBOL(cfs_cpt_nodemask); - -int -cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - int node; - - LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); - - if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { - CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); - return 0; - } - - if (cptab->ctb_cpu2cpt[cpu] != -1) { - CDEBUG(D_INFO, "CPU %d is already in partition %d\n", - cpu, cptab->ctb_cpu2cpt[cpu]); - return 0; - } - - cptab->ctb_cpu2cpt[cpu] = cpt; - - LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - - cpumask_set_cpu(cpu, cptab->ctb_cpumask); - cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); - - node = cpu_to_node(cpu); - - /* first CPU of @node in this CPT table */ - if (!node_isset(node, *cptab->ctb_nodemask)) - node_set(node, *cptab->ctb_nodemask); - - /* first CPU of @node in this partition */ - if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)) - node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask); - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpu); - -void -cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - int node; - int i; - - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - if (cpu < 0 || cpu >= nr_cpu_ids) { - CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); - return; - } - - if (cpt == CFS_CPT_ANY) { - /* caller doesn't know the partition ID */ - cpt = cptab->ctb_cpu2cpt[cpu]; - if (cpt < 0) { /* not set in this CPT-table */ - CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n", - cpt, cptab); - return; - } - - } else if (cpt != cptab->ctb_cpu2cpt[cpu]) { - CDEBUG(D_INFO, - "CPU %d is not in cpu-partition %d\n", cpu, cpt); - return; - } - - LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - - cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); - cpumask_clear_cpu(cpu, cptab->ctb_cpumask); - cptab->ctb_cpu2cpt[cpu] = -1; - - node = cpu_to_node(cpu); - - LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); - LASSERT(node_isset(node, *cptab->ctb_nodemask)); - - for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { - /* this CPT has other CPU belonging to this node? */ - if (cpu_to_node(i) == node) - break; - } - - if (i >= nr_cpu_ids) - node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); - - for_each_cpu(i, cptab->ctb_cpumask) { - /* this CPT-table has other CPU belonging to this node? */ - if (cpu_to_node(i) == node) - break; - } - - if (i >= nr_cpu_ids) - node_clear(node, *cptab->ctb_nodemask); -} -EXPORT_SYMBOL(cfs_cpt_unset_cpu); - -int -cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - int i; - - if (!cpumask_weight(mask) || - cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { - CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", - cpt); - return 0; - } - - for_each_cpu(i, mask) { - if (!cfs_cpt_set_cpu(cptab, cpt, i)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpumask); - -void -cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - int i; - - for_each_cpu(i, mask) - cfs_cpt_unset_cpu(cptab, cpt, i); -} -EXPORT_SYMBOL(cfs_cpt_unset_cpumask); - -int -cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - int rc; - - if (node < 0 || node >= MAX_NUMNODES) { - CDEBUG(D_INFO, - "Invalid NUMA id %d for CPU partition %d\n", node, cpt); - return 0; - } - - mutex_lock(&cpt_data.cpt_mutex); - - cfs_node_to_cpumask(node, cpt_data.cpt_cpumask); - - rc = cfs_cpt_set_cpumask(cptab, cpt, cpt_data.cpt_cpumask); - - mutex_unlock(&cpt_data.cpt_mutex); - - return rc; -} -EXPORT_SYMBOL(cfs_cpt_set_node); - -void -cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - if (node < 0 || node >= MAX_NUMNODES) { - CDEBUG(D_INFO, - "Invalid NUMA id %d for CPU partition %d\n", node, cpt); - return; - } - - mutex_lock(&cpt_data.cpt_mutex); - - cfs_node_to_cpumask(node, cpt_data.cpt_cpumask); - - cfs_cpt_unset_cpumask(cptab, cpt, cpt_data.cpt_cpumask); - - mutex_unlock(&cpt_data.cpt_mutex); -} -EXPORT_SYMBOL(cfs_cpt_unset_node); - -int -cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - int i; - - for_each_node_mask(i, *mask) { - if (!cfs_cpt_set_node(cptab, cpt, i)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_nodemask); - -void -cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - int i; - - for_each_node_mask(i, *mask) - cfs_cpt_unset_node(cptab, cpt, i); -} -EXPORT_SYMBOL(cfs_cpt_unset_nodemask); - -void -cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) -{ - int last; - int i; - - if (cpt == CFS_CPT_ANY) { - last = cptab->ctb_nparts - 1; - cpt = 0; - } else { - last = cpt; - } - - for (; cpt <= last; cpt++) { - for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) - cfs_cpt_unset_cpu(cptab, cpt, i); - } -} -EXPORT_SYMBOL(cfs_cpt_clear); - -int -cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) -{ - nodemask_t *mask; - int weight; - int rotor; - int node; - - /* convert CPU partition ID to HW node id */ - - if (cpt < 0 || cpt >= cptab->ctb_nparts) { - mask = cptab->ctb_nodemask; - rotor = cptab->ctb_spread_rotor++; - } else { - mask = cptab->ctb_parts[cpt].cpt_nodemask; - rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++; - } - - weight = nodes_weight(*mask); - LASSERT(weight > 0); - - rotor %= weight; - - for_each_node_mask(node, *mask) { - if (!rotor--) - return node; - } - - LBUG(); - return 0; -} -EXPORT_SYMBOL(cfs_cpt_spread_node); - -int -cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) -{ - int cpu; - int cpt; - - preempt_disable(); - cpu = smp_processor_id(); - cpt = cptab->ctb_cpu2cpt[cpu]; - - if (cpt < 0 && remap) { - /* don't return negative value for safety of upper layer, - * instead we shadow the unknown cpu to a valid partition ID - */ - cpt = cpu % cptab->ctb_nparts; - } - preempt_enable(); - return cpt; -} -EXPORT_SYMBOL(cfs_cpt_current); - -int -cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) -{ - LASSERT(cpu >= 0 && cpu < nr_cpu_ids); - - return cptab->ctb_cpu2cpt[cpu]; -} -EXPORT_SYMBOL(cfs_cpt_of_cpu); - -int -cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) -{ - cpumask_var_t *cpumask; - nodemask_t *nodemask; - int rc; - int i; - - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - if (cpt == CFS_CPT_ANY) { - cpumask = &cptab->ctb_cpumask; - nodemask = cptab->ctb_nodemask; - } else { - cpumask = &cptab->ctb_parts[cpt].cpt_cpumask; - nodemask = cptab->ctb_parts[cpt].cpt_nodemask; - } - - if (cpumask_any_and(*cpumask, cpu_online_mask) >= nr_cpu_ids) { - CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", - cpt); - return -EINVAL; - } - - for_each_online_cpu(i) { - if (cpumask_test_cpu(i, *cpumask)) - continue; - - rc = set_cpus_allowed_ptr(current, *cpumask); - set_mems_allowed(*nodemask); - if (!rc) - schedule(); /* switch to allowed CPU */ - - return rc; - } - - /* don't need to set affinity because all online CPUs are covered */ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_bind); - -/** - * Choose max to \a number CPUs from \a node and set them in \a cpt. - * We always prefer to choose CPU in the same core/socket. - */ -static int -cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, - cpumask_t *node, int number) -{ - cpumask_var_t socket; - cpumask_var_t core; - int rc = 0; - int cpu; - - LASSERT(number > 0); - - if (number >= cpumask_weight(node)) { - while (!cpumask_empty(node)) { - cpu = cpumask_first(node); - - rc = cfs_cpt_set_cpu(cptab, cpt, cpu); - if (!rc) - return -EINVAL; - cpumask_clear_cpu(cpu, node); - } - return 0; - } - - /* - * Allocate scratch buffers - * As we cannot initialize a cpumask_var_t, we need - * to alloc both before we can risk trying to free either - */ - if (!zalloc_cpumask_var(&socket, GFP_NOFS)) - rc = -ENOMEM; - if (!zalloc_cpumask_var(&core, GFP_NOFS)) - rc = -ENOMEM; - if (rc) - goto out; - - while (!cpumask_empty(node)) { - cpu = cpumask_first(node); - - /* get cpumask for cores in the same socket */ - cpumask_copy(socket, topology_core_cpumask(cpu)); - cpumask_and(socket, socket, node); - - LASSERT(!cpumask_empty(socket)); - - while (!cpumask_empty(socket)) { - int i; - - /* get cpumask for hts in the same core */ - cpumask_copy(core, topology_sibling_cpumask(cpu)); - cpumask_and(core, core, node); - - LASSERT(!cpumask_empty(core)); - - for_each_cpu(i, core) { - cpumask_clear_cpu(i, socket); - cpumask_clear_cpu(i, node); - - rc = cfs_cpt_set_cpu(cptab, cpt, i); - if (!rc) { - rc = -EINVAL; - goto out; - } - - if (!--number) - goto out; - } - cpu = cpumask_first(socket); - } - } - -out: - free_cpumask_var(socket); - free_cpumask_var(core); - return rc; -} - -#define CPT_WEIGHT_MIN 4u - -static unsigned int -cfs_cpt_num_estimate(void) -{ - unsigned int nnode = num_online_nodes(); - unsigned int ncpu = num_online_cpus(); - unsigned int ncpt; - - if (ncpu <= CPT_WEIGHT_MIN) { - ncpt = 1; - goto out; - } - - /* generate reasonable number of CPU partitions based on total number - * of CPUs, Preferred N should be power2 and match this condition: - * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 - */ - for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) - ; - - if (ncpt <= nnode) { /* fat numa system */ - while (nnode > ncpt) - nnode >>= 1; - - } else { /* ncpt > nnode */ - while ((nnode << 1) <= ncpt) - nnode <<= 1; - } - - ncpt = nnode; - -out: -#if (BITS_PER_LONG == 32) - /* config many CPU partitions on 32-bit system could consume - * too much memory - */ - ncpt = min(2U, ncpt); -#endif - while (ncpu % ncpt) - ncpt--; /* worst case is 1 */ - - return ncpt; -} - -static struct cfs_cpt_table * -cfs_cpt_table_create(int ncpt) -{ - struct cfs_cpt_table *cptab = NULL; - cpumask_var_t mask; - int cpt = 0; - int num; - int rc; - int i; - - rc = cfs_cpt_num_estimate(); - if (ncpt <= 0) - ncpt = rc; - - if (ncpt > num_online_cpus() || ncpt > 4 * rc) { - CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n", - ncpt, rc); - } - - if (num_online_cpus() % ncpt) { - CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n", - (int)num_online_cpus(), ncpt); - goto failed; - } - - cptab = cfs_cpt_table_alloc(ncpt); - if (!cptab) { - CERROR("Failed to allocate CPU map(%d)\n", ncpt); - goto failed; - } - - num = num_online_cpus() / ncpt; - if (!num) { - CERROR("CPU changed while setting CPU partition\n"); - goto failed; - } - - if (!zalloc_cpumask_var(&mask, GFP_NOFS)) { - CERROR("Failed to allocate scratch cpumask\n"); - goto failed; - } - - for_each_online_node(i) { - cfs_node_to_cpumask(i, mask); - - while (!cpumask_empty(mask)) { - struct cfs_cpu_partition *part; - int n; - - /* - * Each emulated NUMA node has all allowed CPUs in - * the mask. - * End loop when all partitions have assigned CPUs. - */ - if (cpt == ncpt) - break; - - part = &cptab->ctb_parts[cpt]; - - n = num - cpumask_weight(part->cpt_cpumask); - LASSERT(n > 0); - - rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); - if (rc < 0) - goto failed_mask; - - LASSERT(num >= cpumask_weight(part->cpt_cpumask)); - if (num == cpumask_weight(part->cpt_cpumask)) - cpt++; - } - } - - if (cpt != ncpt || - num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { - CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", - cptab->ctb_nparts, num, cpt, - cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); - goto failed_mask; - } - - free_cpumask_var(mask); - - return cptab; - - failed_mask: - free_cpumask_var(mask); - failed: - CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n", - ncpt, num_online_nodes(), num_online_cpus()); - - if (cptab) - cfs_cpt_table_free(cptab); - - return NULL; -} - -static struct cfs_cpt_table * -cfs_cpt_table_create_pattern(char *pattern) -{ - struct cfs_cpt_table *cptab; - char *str; - int node = 0; - int high; - int ncpt = 0; - int cpt; - int rc; - int c; - int i; - - str = strim(pattern); - if (*str == 'n' || *str == 'N') { - pattern = str + 1; - if (*pattern != '\0') { - node = 1; - } else { /* shortcut to create CPT from NUMA & CPU topology */ - node = -1; - ncpt = num_online_nodes(); - } - } - - if (!ncpt) { /* scanning bracket which is mark of partition */ - for (str = pattern;; str++, ncpt++) { - str = strchr(str, '['); - if (!str) - break; - } - } - - if (!ncpt || - (node && ncpt > num_online_nodes()) || - (!node && ncpt > num_online_cpus())) { - CERROR("Invalid pattern %s, or too many partitions %d\n", - pattern, ncpt); - return NULL; - } - - cptab = cfs_cpt_table_alloc(ncpt); - if (!cptab) { - CERROR("Failed to allocate cpu partition table\n"); - return NULL; - } - - if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */ - cpt = 0; - - for_each_online_node(i) { - if (cpt >= ncpt) { - CERROR("CPU changed while setting CPU partition table, %d/%d\n", - cpt, ncpt); - goto failed; - } - - rc = cfs_cpt_set_node(cptab, cpt++, i); - if (!rc) - goto failed; - } - return cptab; - } - - high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; - - for (str = strim(pattern), c = 0;; c++) { - struct cfs_range_expr *range; - struct cfs_expr_list *el; - char *bracket = strchr(str, '['); - int n; - - if (!bracket) { - if (*str) { - CERROR("Invalid pattern %s\n", str); - goto failed; - } - if (c != ncpt) { - CERROR("expect %d partitions but found %d\n", - ncpt, c); - goto failed; - } - break; - } - - if (sscanf(str, "%d%n", &cpt, &n) < 1) { - CERROR("Invalid cpu pattern %s\n", str); - goto failed; - } - - if (cpt < 0 || cpt >= ncpt) { - CERROR("Invalid partition id %d, total partitions %d\n", - cpt, ncpt); - goto failed; - } - - if (cfs_cpt_weight(cptab, cpt)) { - CERROR("Partition %d has already been set.\n", cpt); - goto failed; - } - - str = strim(str + n); - if (str != bracket) { - CERROR("Invalid pattern %s\n", str); - goto failed; - } - - bracket = strchr(str, ']'); - if (!bracket) { - CERROR("missing right bracket for cpt %d, %s\n", - cpt, str); - goto failed; - } - - if (cfs_expr_list_parse(str, (bracket - str) + 1, - 0, high, &el)) { - CERROR("Can't parse number range: %s\n", str); - goto failed; - } - - list_for_each_entry(range, &el->el_exprs, re_link) { - for (i = range->re_lo; i <= range->re_hi; i++) { - if ((i - range->re_lo) % range->re_stride) - continue; - - rc = node ? cfs_cpt_set_node(cptab, cpt, i) : - cfs_cpt_set_cpu(cptab, cpt, i); - if (!rc) { - cfs_expr_list_free(el); - goto failed; - } - } - } - - cfs_expr_list_free(el); - - if (!cfs_cpt_online(cptab, cpt)) { - CERROR("No online CPU is found on partition %d\n", cpt); - goto failed; - } - - str = strim(bracket + 1); - } - - return cptab; - - failed: - cfs_cpt_table_free(cptab); - return NULL; -} - -#ifdef CONFIG_HOTPLUG_CPU -static enum cpuhp_state lustre_cpu_online; - -static void cfs_cpu_incr_cpt_version(void) -{ - spin_lock(&cpt_data.cpt_lock); - cpt_data.cpt_version++; - spin_unlock(&cpt_data.cpt_lock); -} - -static int cfs_cpu_online(unsigned int cpu) -{ - cfs_cpu_incr_cpt_version(); - return 0; -} - -static int cfs_cpu_dead(unsigned int cpu) -{ - bool warn; - - cfs_cpu_incr_cpt_version(); - - mutex_lock(&cpt_data.cpt_mutex); - /* if all HTs in a core are offline, it may break affinity */ - cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu)); - warn = cpumask_any_and(cpt_data.cpt_cpumask, - cpu_online_mask) >= nr_cpu_ids; - mutex_unlock(&cpt_data.cpt_mutex); - CDEBUG(warn ? D_WARNING : D_INFO, - "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n", - cpu); - return 0; -} -#endif - -void -cfs_cpu_fini(void) -{ - if (cfs_cpt_tab) - cfs_cpt_table_free(cfs_cpt_tab); - -#ifdef CONFIG_HOTPLUG_CPU - if (lustre_cpu_online > 0) - cpuhp_remove_state_nocalls(lustre_cpu_online); - cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD); -#endif - free_cpumask_var(cpt_data.cpt_cpumask); -} - -int -cfs_cpu_init(void) -{ - int ret = 0; - - LASSERT(!cfs_cpt_tab); - - memset(&cpt_data, 0, sizeof(cpt_data)); - - if (!zalloc_cpumask_var(&cpt_data.cpt_cpumask, GFP_NOFS)) { - CERROR("Failed to allocate scratch buffer\n"); - return -1; - } - - spin_lock_init(&cpt_data.cpt_lock); - mutex_init(&cpt_data.cpt_mutex); - -#ifdef CONFIG_HOTPLUG_CPU - ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD, - "staging/lustre/cfe:dead", NULL, - cfs_cpu_dead); - if (ret < 0) - goto failed; - ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "staging/lustre/cfe:online", - cfs_cpu_online, NULL); - if (ret < 0) - goto failed; - lustre_cpu_online = ret; -#endif - ret = -EINVAL; - - if (*cpu_pattern) { - char *cpu_pattern_dup = kstrdup(cpu_pattern, GFP_KERNEL); - - if (!cpu_pattern_dup) { - CERROR("Failed to duplicate cpu_pattern\n"); - goto failed; - } - - cfs_cpt_tab = cfs_cpt_table_create_pattern(cpu_pattern_dup); - kfree(cpu_pattern_dup); - if (!cfs_cpt_tab) { - CERROR("Failed to create cptab from pattern %s\n", - cpu_pattern); - goto failed; - } - - } else { - cfs_cpt_tab = cfs_cpt_table_create(cpu_npartitions); - if (!cfs_cpt_tab) { - CERROR("Failed to create ptable with npartitions %d\n", - cpu_npartitions); - goto failed; - } - } - - spin_lock(&cpt_data.cpt_lock); - if (cfs_cpt_tab->ctb_version != cpt_data.cpt_version) { - spin_unlock(&cpt_data.cpt_lock); - CERROR("CPU hotplug/unplug during setup\n"); - goto failed; - } - spin_unlock(&cpt_data.cpt_lock); - - LCONSOLE(0, "HW nodes: %d, HW CPU cores: %d, npartitions: %d\n", - num_online_nodes(), num_online_cpus(), - cfs_cpt_number(cfs_cpt_tab)); - return 0; - - failed: - cfs_cpu_fini(); - return ret; -} diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c deleted file mode 100644 index 223505c37545..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c +++ /dev/null @@ -1,155 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include -#include - -/** destroy cpu-partition lock, see libcfs_private.h for more detail */ -void -cfs_percpt_lock_free(struct cfs_percpt_lock *pcl) -{ - LASSERT(pcl->pcl_locks); - LASSERT(!pcl->pcl_locked); - - cfs_percpt_free(pcl->pcl_locks); - kfree(pcl); -} -EXPORT_SYMBOL(cfs_percpt_lock_free); - -/** - * create cpu-partition lock, see libcfs_private.h for more detail. - * - * cpu-partition lock is designed for large-scale SMP system, so we need to - * reduce cacheline conflict as possible as we can, that's the - * reason we always allocate cacheline-aligned memory block. - */ -struct cfs_percpt_lock * -cfs_percpt_lock_create(struct cfs_cpt_table *cptab, - struct lock_class_key *keys) -{ - struct cfs_percpt_lock *pcl; - spinlock_t *lock; - int i; - - /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */ - pcl = kzalloc(sizeof(*pcl), GFP_NOFS); - if (!pcl) - return NULL; - - pcl->pcl_cptab = cptab; - pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock)); - if (!pcl->pcl_locks) { - kfree(pcl); - return NULL; - } - - if (!keys) - CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n"); - - cfs_percpt_for_each(lock, i, pcl->pcl_locks) { - spin_lock_init(lock); - if (keys) - lockdep_set_class(lock, &keys[i]); - } - - return pcl; -} -EXPORT_SYMBOL(cfs_percpt_lock_create); - -/** - * lock a CPU partition - * - * \a index != CFS_PERCPT_LOCK_EX - * hold private lock indexed by \a index - * - * \a index == CFS_PERCPT_LOCK_EX - * exclusively lock @pcl and nobody can take private lock - */ -void -cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) - __acquires(pcl->pcl_locks) -{ - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; - - LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt); - - if (ncpt == 1) { - index = 0; - } else { /* serialize with exclusive lock */ - while (pcl->pcl_locked) - cpu_relax(); - } - - if (likely(index != CFS_PERCPT_LOCK_EX)) { - spin_lock(pcl->pcl_locks[index]); - return; - } - - /* exclusive lock request */ - for (i = 0; i < ncpt; i++) { - spin_lock(pcl->pcl_locks[i]); - if (!i) { - LASSERT(!pcl->pcl_locked); - /* nobody should take private lock after this - * so I wouldn't starve for too long time - */ - pcl->pcl_locked = 1; - } - } -} -EXPORT_SYMBOL(cfs_percpt_lock); - -/** unlock a CPU partition */ -void -cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) - __releases(pcl->pcl_locks) -{ - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; - - index = ncpt == 1 ? 0 : index; - - if (likely(index != CFS_PERCPT_LOCK_EX)) { - spin_unlock(pcl->pcl_locks[index]); - return; - } - - for (i = ncpt - 1; i >= 0; i--) { - if (!i) { - LASSERT(pcl->pcl_locked); - pcl->pcl_locked = 0; - } - spin_unlock(pcl->pcl_locks[i]); - } -} -EXPORT_SYMBOL(cfs_percpt_unlock); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c deleted file mode 100644 index 2d533be9bb30..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include -#include -#include - -struct cfs_var_array { - unsigned int va_count; /* # of buffers */ - unsigned int va_size; /* size of each var */ - struct cfs_cpt_table *va_cptab; /* cpu partition table */ - void *va_ptrs[0]; /* buffer addresses */ -}; - -/* - * free per-cpu data, see more detail in cfs_percpt_free - */ -void -cfs_percpt_free(void *vars) -{ - struct cfs_var_array *arr; - int i; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - for (i = 0; i < arr->va_count; i++) - kfree(arr->va_ptrs[i]); - - kvfree(arr); -} -EXPORT_SYMBOL(cfs_percpt_free); - -/* - * allocate per cpu-partition variables, returned value is an array of pointers, - * variable can be indexed by CPU partition ID, i.e: - * - * arr = cfs_percpt_alloc(cfs_cpu_pt, size); - * then caller can access memory block for CPU 0 by arr[0], - * memory block for CPU 1 by arr[1]... - * memory block for CPU N by arr[N]... - * - * cacheline aligned. - */ -void * -cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) -{ - struct cfs_var_array *arr; - int count; - int i; - - count = cfs_cpt_number(cptab); - - arr = kvzalloc(offsetof(struct cfs_var_array, va_ptrs[count]), - GFP_KERNEL); - if (!arr) - return NULL; - - size = L1_CACHE_ALIGN(size); - arr->va_size = size; - arr->va_count = count; - arr->va_cptab = cptab; - - for (i = 0; i < count; i++) { - arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL, - cfs_cpt_spread_node(cptab, i)); - if (!arr->va_ptrs[i]) { - cfs_percpt_free((void *)&arr->va_ptrs[0]); - return NULL; - } - } - - return (void *)&arr->va_ptrs[0]; -} -EXPORT_SYMBOL(cfs_percpt_alloc); - -/* - * return number of CPUs (or number of elements in per-cpu data) - * according to cptab of @vars - */ -int -cfs_percpt_number(void *vars) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - return arr->va_count; -} -EXPORT_SYMBOL(cfs_percpt_number); - -/* - * free variable array, see more detail in cfs_array_alloc - */ -void -cfs_array_free(void *vars) -{ - struct cfs_var_array *arr; - int i; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - for (i = 0; i < arr->va_count; i++) { - if (!arr->va_ptrs[i]) - continue; - - kvfree(arr->va_ptrs[i]); - } - kvfree(arr); -} -EXPORT_SYMBOL(cfs_array_free); - -/* - * allocate a variable array, returned value is an array of pointers. - * Caller can specify length of array by @count, @size is size of each - * memory block in array. - */ -void * -cfs_array_alloc(int count, unsigned int size) -{ - struct cfs_var_array *arr; - int i; - - arr = kvmalloc(offsetof(struct cfs_var_array, va_ptrs[count]), GFP_KERNEL); - if (!arr) - return NULL; - - arr->va_count = count; - arr->va_size = size; - - for (i = 0; i < count; i++) { - arr->va_ptrs[i] = kvzalloc(size, GFP_KERNEL); - - if (!arr->va_ptrs[i]) { - cfs_array_free((void *)&arr->va_ptrs[0]); - return NULL; - } - } - - return (void *)&arr->va_ptrs[0]; -} -EXPORT_SYMBOL(cfs_array_alloc); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c deleted file mode 100644 index e1fb1263e3ae..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c +++ /dev/null @@ -1,562 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * String manipulation functions. - * - * libcfs/libcfs/libcfs_string.c - * - * Author: Nathan Rutman - */ - -#include -#include -#include -#include -#include -#include -#include - -/* Convert a text string to a bitmask */ -int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), - int *oldmask, int minmask, int allmask) -{ - const char *debugstr; - char op = '\0'; - int newmask = minmask, i, len, found = 0; - - /* must be a list of tokens separated by whitespace - * and optionally an operator ('+' or '-'). If an operator - * appears first in , '*oldmask' is used as the starting point - * (relative), otherwise minmask is used (absolute). An operator - * applies to all following tokens up to the next operator. - */ - while (*str != '\0') { - while (isspace(*str)) - str++; - if (*str == '\0') - break; - if (*str == '+' || *str == '-') { - op = *str++; - if (!found) - /* only if first token is relative */ - newmask = *oldmask; - while (isspace(*str)) - str++; - if (*str == '\0') /* trailing op */ - return -EINVAL; - } - - /* find token length */ - len = 0; - while (str[len] != '\0' && !isspace(str[len]) && - str[len] != '+' && str[len] != '-') - len++; - - /* match token */ - found = 0; - for (i = 0; i < 32; i++) { - debugstr = bit2str(i); - if (debugstr && strlen(debugstr) == len && - !strncasecmp(str, debugstr, len)) { - if (op == '-') - newmask &= ~(1 << i); - else - newmask |= (1 << i); - found = 1; - break; - } - } - if (!found && len == 3 && - !strncasecmp(str, "ALL", len)) { - if (op == '-') - newmask = minmask; - else - newmask = allmask; - found = 1; - } - if (!found) { - CWARN("unknown mask '%.*s'.\n" - "mask usage: [+|-] ...\n", len, str); - return -EINVAL; - } - str += len; - } - - *oldmask = newmask; - return 0; -} - -/* get the first string out of @str */ -char *cfs_firststr(char *str, size_t size) -{ - size_t i = 0; - char *end; - - /* trim leading spaces */ - while (i < size && *str && isspace(*str)) { - ++i; - ++str; - } - - /* string with all spaces */ - if (*str == '\0') - goto out; - - end = str; - while (i < size && *end != '\0' && !isspace(*end)) { - ++i; - ++end; - } - - *end = '\0'; -out: - return str; -} -EXPORT_SYMBOL(cfs_firststr); - -/** - * Extracts tokens from strings. - * - * Looks for \a delim in string \a next, sets \a res to point to - * substring before the delimiter, sets \a next right after the found - * delimiter. - * - * \retval 1 if \a res points to a string of non-whitespace characters - * \retval 0 otherwise - */ -int -cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) -{ - char *end; - - if (!next->ls_str) - return 0; - - /* skip leading white spaces */ - while (next->ls_len) { - if (!isspace(*next->ls_str)) - break; - next->ls_str++; - next->ls_len--; - } - - if (!next->ls_len) /* whitespaces only */ - return 0; - - if (*next->ls_str == delim) { - /* first non-writespace is the delimiter */ - return 0; - } - - res->ls_str = next->ls_str; - end = memchr(next->ls_str, delim, next->ls_len); - if (!end) { - /* there is no the delimeter in the string */ - end = next->ls_str + next->ls_len; - next->ls_str = NULL; - } else { - next->ls_str = end + 1; - next->ls_len -= (end - res->ls_str + 1); - } - - /* skip ending whitespaces */ - while (--end != res->ls_str) { - if (!isspace(*end)) - break; - } - - res->ls_len = end - res->ls_str + 1; - return 1; -} -EXPORT_SYMBOL(cfs_gettok); - -/** - * Converts string to integer. - * - * Accepts decimal and hexadecimal number recordings. - * - * \retval 1 if first \a nob chars of \a str convert to decimal or - * hexadecimal integer in the range [\a min, \a max] - * \retval 0 otherwise - */ -int -cfs_str2num_check(char *str, int nob, unsigned int *num, - unsigned int min, unsigned int max) -{ - bool all_numbers = true; - char *endp, cache; - int rc; - - /** - * kstrouint can only handle strings composed - * of only numbers. We need to scan the string - * passed in for the first non-digit character - * and end the string at that location. If we - * don't find any non-digit character we still - * need to place a '\0' at position nob since - * we are not interested in the rest of the - * string which is longer than nob in size. - * After we are done the character at the - * position we placed '\0' must be restored. - */ - for (endp = str; endp < str + nob; endp++) { - if (!isdigit(*endp)) { - all_numbers = false; - break; - } - } - cache = *endp; - *endp = '\0'; - - rc = kstrtouint(str, 10, num); - *endp = cache; - if (rc || !all_numbers) - return 0; - - return (*num >= min && *num <= max); -} -EXPORT_SYMBOL(cfs_str2num_check); - -/** - * Parses \ token of the syntax. If \a bracketed is false, - * \a src should only have a single token which can be \ or \* - * - * \retval pointer to allocated range_expr and initialized - * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a - `* src parses to - * \ | - * \ '-' \ | - * \ '-' \ '/' \ - * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or - * -ENOMEM will be returned. - */ -static int -cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max, - int bracketed, struct cfs_range_expr **expr) -{ - struct cfs_range_expr *re; - struct cfs_lstr tok; - - re = kzalloc(sizeof(*re), GFP_NOFS); - if (!re) - return -ENOMEM; - - if (src->ls_len == 1 && src->ls_str[0] == '*') { - re->re_lo = min; - re->re_hi = max; - re->re_stride = 1; - goto out; - } - - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_lo, min, max)) { - /* is parsed */ - re->re_hi = re->re_lo; - re->re_stride = 1; - goto out; - } - - if (!bracketed || !cfs_gettok(src, '-', &tok)) - goto failed; - - if (!cfs_str2num_check(tok.ls_str, tok.ls_len, - &re->re_lo, min, max)) - goto failed; - - /* - */ - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_hi, min, max)) { - /* - is parsed */ - re->re_stride = 1; - goto out; - } - - /* go to check '-' '/' */ - if (cfs_gettok(src, '/', &tok)) { - if (!cfs_str2num_check(tok.ls_str, tok.ls_len, - &re->re_hi, min, max)) - goto failed; - - /* - / ... */ - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_stride, min, max)) { - /* - / is parsed */ - goto out; - } - } - - out: - *expr = re; - return 0; - - failed: - kfree(re); - return -EINVAL; -} - -/** - * Print the range expression \a re into specified \a buffer. - * If \a bracketed is true, expression does not need additional - * brackets. - * - * \retval number of characters written - */ -static int -cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr, - bool bracketed) -{ - int i; - char s[] = "["; - char e[] = "]"; - - if (bracketed) { - s[0] = '\0'; - e[0] = '\0'; - } - - if (expr->re_lo == expr->re_hi) - i = scnprintf(buffer, count, "%u", expr->re_lo); - else if (expr->re_stride == 1) - i = scnprintf(buffer, count, "%s%u-%u%s", - s, expr->re_lo, expr->re_hi, e); - else - i = scnprintf(buffer, count, "%s%u-%u/%u%s", - s, expr->re_lo, expr->re_hi, expr->re_stride, e); - return i; -} - -/** - * Print a list of range expressions (\a expr_list) into specified \a buffer. - * If the list contains several expressions, separate them with comma - * and surround the list with brackets. - * - * \retval number of characters written - */ -int -cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list) -{ - struct cfs_range_expr *expr; - int i = 0, j = 0; - int numexprs = 0; - - if (count <= 0) - return 0; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) - numexprs++; - - if (numexprs > 1) - i += scnprintf(buffer + i, count - i, "["); - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (j++) - i += scnprintf(buffer + i, count - i, ","); - i += cfs_range_expr_print(buffer + i, count - i, expr, - numexprs > 1); - } - - if (numexprs > 1) - i += scnprintf(buffer + i, count - i, "]"); - - return i; -} -EXPORT_SYMBOL(cfs_expr_list_print); - -/** - * Matches value (\a value) against ranges expression list \a expr_list. - * - * \retval 1 if \a value matches - * \retval 0 otherwise - */ -int -cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list) -{ - struct cfs_range_expr *expr; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (value >= expr->re_lo && value <= expr->re_hi && - !((value - expr->re_lo) % expr->re_stride)) - return 1; - } - - return 0; -} -EXPORT_SYMBOL(cfs_expr_list_match); - -/** - * Convert express list (\a expr_list) to an array of all matched values - * - * \retval N N is total number of all matched values - * \retval 0 if expression list is empty - * \retval < 0 for failure - */ -int -cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp) -{ - struct cfs_range_expr *expr; - u32 *val; - int count = 0; - int i; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (!((i - expr->re_lo) % expr->re_stride)) - count++; - } - } - - if (!count) /* empty expression list */ - return 0; - - if (count > max) { - CERROR("Number of values %d exceeds max allowed %d\n", - max, count); - return -EINVAL; - } - - val = kvmalloc_array(count, sizeof(val[0]), GFP_KERNEL | __GFP_ZERO); - if (!val) - return -ENOMEM; - - count = 0; - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (!((i - expr->re_lo) % expr->re_stride)) - val[count++] = i; - } - } - - *valpp = val; - return count; -} -EXPORT_SYMBOL(cfs_expr_list_values); - -/** - * Frees cfs_range_expr structures of \a expr_list. - * - * \retval none - */ -void -cfs_expr_list_free(struct cfs_expr_list *expr_list) -{ - while (!list_empty(&expr_list->el_exprs)) { - struct cfs_range_expr *expr; - - expr = list_entry(expr_list->el_exprs.next, - struct cfs_range_expr, re_link); - list_del(&expr->re_link); - kfree(expr); - } - - kfree(expr_list); -} -EXPORT_SYMBOL(cfs_expr_list_free); - -/** - * Parses \ token of the syntax. - * - * \retval 0 if \a str parses to \ | \ - * \retval -errno otherwise - */ -int -cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max, - struct cfs_expr_list **elpp) -{ - struct cfs_expr_list *expr_list; - struct cfs_range_expr *expr; - struct cfs_lstr src; - int rc; - - expr_list = kzalloc(sizeof(*expr_list), GFP_NOFS); - if (!expr_list) - return -ENOMEM; - - src.ls_str = str; - src.ls_len = len; - - INIT_LIST_HEAD(&expr_list->el_exprs); - - if (src.ls_str[0] == '[' && - src.ls_str[src.ls_len - 1] == ']') { - src.ls_str++; - src.ls_len -= 2; - - rc = -EINVAL; - while (src.ls_str) { - struct cfs_lstr tok; - - if (!cfs_gettok(&src, ',', &tok)) { - rc = -EINVAL; - break; - } - - rc = cfs_range_expr_parse(&tok, min, max, 1, &expr); - if (rc) - break; - - list_add_tail(&expr->re_link, &expr_list->el_exprs); - } - } else { - rc = cfs_range_expr_parse(&src, min, max, 0, &expr); - if (!rc) - list_add_tail(&expr->re_link, &expr_list->el_exprs); - } - - if (rc) - cfs_expr_list_free(expr_list); - else - *elpp = expr_list; - - return rc; -} -EXPORT_SYMBOL(cfs_expr_list_parse); - -/** - * Frees cfs_expr_list structures of \a list. - * - * For each struct cfs_expr_list structure found on \a list it frees - * range_expr list attached to it and frees the cfs_expr_list itself. - * - * \retval none - */ -void -cfs_expr_list_free_list(struct list_head *list) -{ - struct cfs_expr_list *el; - - while (!list_empty(list)) { - el = list_entry(list->next, struct cfs_expr_list, el_link); - list_del(&el->el_link); - cfs_expr_list_free(el); - } -} -EXPORT_SYMBOL(cfs_expr_list_free_list); diff --git a/drivers/staging/lustre/lnet/libcfs/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux-crypto-adler.c deleted file mode 100644 index db81ed527452..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/linux-crypto-adler.c +++ /dev/null @@ -1,139 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - */ - -/* - * This is crypto api shash wrappers to zlib_adler32. - */ - -#include -#include -#include -#include "linux-crypto.h" - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -static int adler32_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 1; - - return 0; -} - -static int adler32_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = *(u32 *)key; - return 0; -} - -static int adler32_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *cksump = shash_desc_ctx(desc); - - *cksump = *mctx; - - return 0; -} - -static int adler32_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *cksump = shash_desc_ctx(desc); - - *cksump = zlib_adler32(*cksump, data, len); - return 0; -} - -static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len, - u8 *out) -{ - *(u32 *)out = zlib_adler32(*cksump, data, len); - return 0; -} - -static int adler32_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __adler32_finup(shash_desc_ctx(desc), data, len, out); -} - -static int adler32_final(struct shash_desc *desc, u8 *out) -{ - u32 *cksump = shash_desc_ctx(desc); - - *(u32 *)out = *cksump; - return 0; -} - -static int adler32_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} - -static struct shash_alg alg = { - .setkey = adler32_setkey, - .init = adler32_init, - .update = adler32_update, - .final = adler32_final, - .finup = adler32_finup, - .digest = adler32_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "adler32", - .cra_driver_name = "adler32-zlib", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = adler32_cra_init, - } -}; - -int cfs_crypto_adler32_register(void) -{ - return crypto_register_shash(&alg); -} - -void cfs_crypto_adler32_unregister(void) -{ - crypto_unregister_shash(&alg); -} diff --git a/drivers/staging/lustre/lnet/libcfs/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux-crypto.c deleted file mode 100644 index 21ff9bf6da47..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/linux-crypto.c +++ /dev/null @@ -1,447 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - * - * Copyright (c) 2012, Intel Corporation. - */ - -#include -#include -#include -#include -#include -#include -#include "linux-crypto.h" - -/** - * Array of hash algorithm speed in MByte per second - */ -static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; - -/** - * Initialize the state descriptor for the specified hash algorithm. - * - * An internal routine to allocate the hash-specific state in \a req for - * use with cfs_crypto_hash_digest() to compute the hash of a single message, - * though possibly in multiple chunks. The descriptor internal state should - * be freed with cfs_crypto_hash_final(). - * - * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) - * \param[out] type pointer to the hash description in hash_types[] - * array - * \param[in,out] req hash state descriptor to be initialized - * \param[in] key initial hash value/state, NULL to use default - * value - * \param[in] key_len length of \a key - * - * \retval 0 on success - * \retval negative errno on failure - */ -static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, - const struct cfs_crypto_hash_type **type, - struct ahash_request **req, - unsigned char *key, - unsigned int key_len) -{ - struct crypto_ahash *tfm; - int err = 0; - - *type = cfs_crypto_hash_type(hash_alg); - - if (!*type) { - CWARN("Unsupported hash algorithm id = %d, max id is %d\n", - hash_alg, CFS_HASH_ALG_MAX); - return -EINVAL; - } - tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); - - if (IS_ERR(tfm)) { - CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", - (*type)->cht_name); - return PTR_ERR(tfm); - } - - *req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!*req) { - CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n", - (*type)->cht_name); - crypto_free_ahash(tfm); - return -ENOMEM; - } - - ahash_request_set_callback(*req, 0, NULL, NULL); - - if (key) - err = crypto_ahash_setkey(tfm, key, key_len); - else if ((*type)->cht_key) - err = crypto_ahash_setkey(tfm, - (unsigned char *)&((*type)->cht_key), - (*type)->cht_size); - - if (err) { - ahash_request_free(*req); - crypto_free_ahash(tfm); - return err; - } - - CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", - crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), - cfs_crypto_hash_speeds[hash_alg]); - - err = crypto_ahash_init(*req); - if (err) { - ahash_request_free(*req); - crypto_free_ahash(tfm); - } - return err; -} - -/** - * Calculate hash digest for the passed buffer. - * - * This should be used when computing the hash on a single contiguous buffer. - * It combines the hash initialization, computation, and cleanup. - * - * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*) - * \param[in] buf data buffer on which to compute hash - * \param[in] buf_len length of \a buf in bytes - * \param[in] key initial value/state for algorithm, - * if \a key = NULL use default initial value - * \param[in] key_len length of \a key in bytes - * \param[out] hash pointer to computed hash value, - * if \a hash = NULL then \a hash_len is to digest - * size in bytes, retval -ENOSPC - * \param[in,out] hash_len size of \a hash buffer - * - * \retval -EINVAL \a buf, \a buf_len, \a hash_len, - * \a hash_alg invalid - * \retval -ENOENT \a hash_alg is unsupported - * \retval -ENOSPC \a hash is NULL, or \a hash_len less than - * digest size - * \retval 0 for success - * \retval negative errno for other errors from lower - * layers. - */ -int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, - const void *buf, unsigned int buf_len, - unsigned char *key, unsigned int key_len, - unsigned char *hash, unsigned int *hash_len) -{ - struct scatterlist sl; - struct ahash_request *req; - int err; - const struct cfs_crypto_hash_type *type; - - if (!buf || !buf_len || !hash_len) - return -EINVAL; - - err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); - if (err) - return err; - - if (!hash || *hash_len < type->cht_size) { - *hash_len = type->cht_size; - crypto_free_ahash(crypto_ahash_reqtfm(req)); - ahash_request_free(req); - return -ENOSPC; - } - sg_init_one(&sl, buf, buf_len); - - ahash_request_set_crypt(req, &sl, hash, sl.length); - err = crypto_ahash_digest(req); - crypto_free_ahash(crypto_ahash_reqtfm(req)); - ahash_request_free(req); - - return err; -} -EXPORT_SYMBOL(cfs_crypto_hash_digest); - -/** - * Allocate and initialize descriptor for hash algorithm. - * - * This should be used to initialize a hash descriptor for multiple calls - * to a single hash function when computing the hash across multiple - * separate buffers or pages using cfs_crypto_hash_update{,_page}(). - * - * The hash descriptor should be freed with cfs_crypto_hash_final(). - * - * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*) - * \param[in] key initial value/state for algorithm, if \a key = NULL - * use default initial value - * \param[in] key_len length of \a key in bytes - * - * \retval pointer to descriptor of hash instance - * \retval ERR_PTR(errno) in case of error - */ -struct ahash_request * -cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg, - unsigned char *key, unsigned int key_len) -{ - struct ahash_request *req; - int err; - const struct cfs_crypto_hash_type *type; - - err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); - - if (err) - return ERR_PTR(err); - return req; -} -EXPORT_SYMBOL(cfs_crypto_hash_init); - -/** - * Update hash digest computed on data within the given \a page - * - * \param[in] hreq hash state descriptor - * \param[in] page data page on which to compute the hash - * \param[in] offset offset within \a page at which to start hash - * \param[in] len length of data on which to compute hash - * - * \retval 0 for success - * \retval negative errno on failure - */ -int cfs_crypto_hash_update_page(struct ahash_request *req, - struct page *page, unsigned int offset, - unsigned int len) -{ - struct scatterlist sl; - - sg_init_table(&sl, 1); - sg_set_page(&sl, page, len, offset & ~PAGE_MASK); - - ahash_request_set_crypt(req, &sl, NULL, sl.length); - return crypto_ahash_update(req); -} -EXPORT_SYMBOL(cfs_crypto_hash_update_page); - -/** - * Update hash digest computed on the specified data - * - * \param[in] req hash state descriptor - * \param[in] buf data buffer on which to compute the hash - * \param[in] buf_len length of \buf on which to compute hash - * - * \retval 0 for success - * \retval negative errno on failure - */ -int cfs_crypto_hash_update(struct ahash_request *req, - const void *buf, unsigned int buf_len) -{ - struct scatterlist sl; - - sg_init_one(&sl, buf, buf_len); - - ahash_request_set_crypt(req, &sl, NULL, sl.length); - return crypto_ahash_update(req); -} -EXPORT_SYMBOL(cfs_crypto_hash_update); - -/** - * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor - * - * \param[in] req hash descriptor - * \param[out] hash pointer to hash buffer to store hash digest - * \param[in,out] hash_len pointer to hash buffer size, if \a req = NULL - * only free \a req instead of computing the hash - * - * \retval 0 for success - * \retval -EOVERFLOW if hash_len is too small for the hash digest - * \retval negative errno for other errors from lower layers - */ -int cfs_crypto_hash_final(struct ahash_request *req, - unsigned char *hash, unsigned int *hash_len) -{ - int err; - int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); - - if (!hash || !hash_len) { - err = 0; - goto free_ahash; - } - if (*hash_len < size) { - err = -EOVERFLOW; - goto free_ahash; - } - - ahash_request_set_crypt(req, NULL, hash, 0); - err = crypto_ahash_final(req); - if (!err) - *hash_len = size; -free_ahash: - crypto_free_ahash(crypto_ahash_reqtfm(req)); - ahash_request_free(req); - return err; -} -EXPORT_SYMBOL(cfs_crypto_hash_final); - -/** - * Compute the speed of specified hash function - * - * Run a speed test on the given hash algorithm on buffer of the given size. - * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and - * is available through the cfs_crypto_hash_speed() function. - * - * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) - * \param[in] buf data buffer on which to compute the hash - * \param[in] buf_len length of \buf on which to compute hash - */ -static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg) -{ - int buf_len = max(PAGE_SIZE, 1048576UL); - void *buf; - unsigned long start, end; - int bcount, err = 0; - struct page *page; - unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; - unsigned int hash_len = sizeof(hash); - - page = alloc_page(GFP_KERNEL); - if (!page) { - err = -ENOMEM; - goto out_err; - } - - buf = kmap(page); - memset(buf, 0xAD, PAGE_SIZE); - kunmap(page); - - for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC), - bcount = 0; time_before(jiffies, end); bcount++) { - struct ahash_request *hdesc; - int i; - - hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0); - if (IS_ERR(hdesc)) { - err = PTR_ERR(hdesc); - break; - } - - for (i = 0; i < buf_len / PAGE_SIZE; i++) { - err = cfs_crypto_hash_update_page(hdesc, page, 0, - PAGE_SIZE); - if (err) - break; - } - - err = cfs_crypto_hash_final(hdesc, hash, &hash_len); - if (err) - break; - } - end = jiffies; - __free_page(page); -out_err: - if (err) { - cfs_crypto_hash_speeds[hash_alg] = err; - CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n", - cfs_crypto_hash_name(hash_alg), err); - } else { - unsigned long tmp; - - tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * - 1000) / (1024 * 1024); - cfs_crypto_hash_speeds[hash_alg] = (int)tmp; - CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n", - cfs_crypto_hash_name(hash_alg), - cfs_crypto_hash_speeds[hash_alg]); - } -} - -/** - * hash speed in Mbytes per second for valid hash algorithm - * - * Return the performance of the specified \a hash_alg that was previously - * computed using cfs_crypto_performance_test(). - * - * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) - * - * \retval positive speed of the hash function in MB/s - * \retval -ENOENT if \a hash_alg is unsupported - * \retval negative errno if \a hash_alg speed is unavailable - */ -int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg) -{ - if (hash_alg < CFS_HASH_ALG_MAX) - return cfs_crypto_hash_speeds[hash_alg]; - return -ENOENT; -} -EXPORT_SYMBOL(cfs_crypto_hash_speed); - -/** - * Run the performance test for all hash algorithms. - * - * Run the cfs_crypto_performance_test() benchmark for all of the available - * hash functions using a 1MB buffer size. This is a reasonable buffer size - * for Lustre RPCs, even if the actual RPC size is larger or smaller. - * - * Since the setup cost and computation speed of various hash algorithms is - * a function of the buffer size (and possibly internal contention of offload - * engines), this speed only represents an estimate of the actual speed under - * actual usage, but is reasonable for comparing available algorithms. - * - * The actual speeds are available via cfs_crypto_hash_speed() for later - * comparison. - * - * \retval 0 on success - * \retval -ENOMEM if no memory is available for test buffer - */ -static int cfs_crypto_test_hashes(void) -{ - enum cfs_crypto_hash_alg hash_alg; - - for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++) - cfs_crypto_performance_test(hash_alg); - - return 0; -} - -static int adler32; - -/** - * Register available hash functions - * - * \retval 0 - */ -int cfs_crypto_register(void) -{ - request_module("crc32c"); - - if (cfs_crypto_adler32_register() == 0) - adler32 = 1; - - /* check all algorithms and do performance test */ - cfs_crypto_test_hashes(); - return 0; -} - -/** - * Unregister previously registered hash functions - */ -void cfs_crypto_unregister(void) -{ - if (adler32) - cfs_crypto_adler32_unregister(); - adler32 = 0; -} diff --git a/drivers/staging/lustre/lnet/libcfs/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux-crypto.h deleted file mode 100644 index 5616e9ea1450..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/linux-crypto.h +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/** - * Functions for start/stop shash adler32 algorithm. - */ -int cfs_crypto_adler32_register(void); -void cfs_crypto_adler32_unregister(void); diff --git a/drivers/staging/lustre/lnet/libcfs/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux-debug.c deleted file mode 100644 index 15ab849374c2..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/linux-debug.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/linux/linux-debug.c - * - * Author: Phil Schwan - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -# define DEBUG_SUBSYSTEM S_LNET - -#include "tracefile.h" - -#include - -char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; - -/** - * Upcall function once a Lustre log has been dumped. - * - * \param file path of the dumped log - */ -void libcfs_run_debug_log_upcall(char *file) -{ - char *argv[3]; - int rc; - static const char * const envp[] = { - "HOME=/", - "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL - }; - - argv[0] = lnet_debug_log_upcall; - - LASSERTF(file, "called on a null filename\n"); - argv[1] = file; /* only need to pass the path of the file */ - - argv[2] = NULL; - - rc = call_usermodehelper(argv[0], argv, (char **)envp, 1); - if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n", - rc, argv[0], argv[1]); - } else { - CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n", - argv[0], argv[1]); - } -} - -/* coverity[+kill] */ -void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) -{ - libcfs_catastrophe = 1; - libcfs_debug_msg(msgdata, "LBUG\n"); - - if (in_interrupt()) { - panic("LBUG in interrupt.\n"); - /* not reached */ - } - - dump_stack(); - if (!libcfs_panic_on_lbug) - libcfs_debug_dumplog(); - if (libcfs_panic_on_lbug) - panic("LBUG"); - set_current_state(TASK_UNINTERRUPTIBLE); - while (1) - schedule(); -} -EXPORT_SYMBOL(lbug_with_loc); - -static int panic_notifier(struct notifier_block *self, unsigned long unused1, - void *unused2) -{ - if (libcfs_panic_in_progress) - return 0; - - libcfs_panic_in_progress = 1; - mb(); - - return 0; -} - -static struct notifier_block libcfs_panic_notifier = { - .notifier_call = panic_notifier, - .next = NULL, - .priority = 10000, -}; - -void libcfs_register_panic_notifier(void) -{ - atomic_notifier_chain_register(&panic_notifier_list, - &libcfs_panic_notifier); -} - -void libcfs_unregister_panic_notifier(void) -{ - atomic_notifier_chain_unregister(&panic_notifier_list, - &libcfs_panic_notifier); -} diff --git a/drivers/staging/lustre/lnet/libcfs/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux-tracefile.c deleted file mode 100644 index 347138409eba..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/linux-tracefile.c +++ /dev/null @@ -1,258 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE - -#include -#include -#include "tracefile.h" - -/* percents to share the total debug memory for each type */ -static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = { - 80, /* 80% pages for CFS_TCD_TYPE_PROC */ - 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */ - 10 /* 10% pages for CFS_TCD_TYPE_IRQ */ -}; - -char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; - -static DECLARE_RWSEM(cfs_tracefile_sem); - -int cfs_tracefile_init_arch(void) -{ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - - /* initialize trace_data */ - memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); - for (i = 0; i < CFS_TCD_TYPE_MAX; i++) { - cfs_trace_data[i] = - kmalloc_array(num_possible_cpus(), - sizeof(union cfs_trace_data_union), - GFP_KERNEL); - if (!cfs_trace_data[i]) - goto out; - } - - /* arch related info initialized */ - cfs_tcd_for_each(tcd, i, j) { - spin_lock_init(&tcd->tcd_lock); - tcd->tcd_pages_factor = pages_factor[i]; - tcd->tcd_type = i; - tcd->tcd_cpu = j; - } - - for (i = 0; i < num_possible_cpus(); i++) - for (j = 0; j < 3; j++) { - cfs_trace_console_buffers[i][j] = - kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, - GFP_KERNEL); - - if (!cfs_trace_console_buffers[i][j]) - goto out; - } - - return 0; - -out: - cfs_tracefile_fini_arch(); - pr_err("lnet: Not enough memory\n"); - return -ENOMEM; -} - -void cfs_tracefile_fini_arch(void) -{ - int i; - int j; - - for (i = 0; i < num_possible_cpus(); i++) - for (j = 0; j < 3; j++) { - kfree(cfs_trace_console_buffers[i][j]); - cfs_trace_console_buffers[i][j] = NULL; - } - - for (i = 0; cfs_trace_data[i]; i++) { - kfree(cfs_trace_data[i]); - cfs_trace_data[i] = NULL; - } -} - -void cfs_tracefile_read_lock(void) -{ - down_read(&cfs_tracefile_sem); -} - -void cfs_tracefile_read_unlock(void) -{ - up_read(&cfs_tracefile_sem); -} - -void cfs_tracefile_write_lock(void) -{ - down_write(&cfs_tracefile_sem); -} - -void cfs_tracefile_write_unlock(void) -{ - up_write(&cfs_tracefile_sem); -} - -enum cfs_trace_buf_type cfs_trace_buf_idx_get(void) -{ - if (in_irq()) - return CFS_TCD_TYPE_IRQ; - if (in_softirq()) - return CFS_TCD_TYPE_SOFTIRQ; - return CFS_TCD_TYPE_PROC; -} - -/* - * The walking argument indicates the locking comes from all tcd types - * iterator and we must lock it and dissable local irqs to avoid deadlocks - * with other interrupt locks that might be happening. See LU-1311 - * for details. - */ -int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking) - __acquires(&tcd->tc_lock) -{ - __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); - if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) - spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags); - else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) - spin_lock_bh(&tcd->tcd_lock); - else if (unlikely(walking)) - spin_lock_irq(&tcd->tcd_lock); - else - spin_lock(&tcd->tcd_lock); - return 1; -} - -void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) - __releases(&tcd->tcd_lock) -{ - __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); - if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) - spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags); - else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) - spin_unlock_bh(&tcd->tcd_lock); - else if (unlikely(walking)) - spin_unlock_irq(&tcd->tcd_lock); - else - spin_unlock(&tcd->tcd_lock); -} - -void -cfs_set_ptldebug_header(struct ptldebug_header *header, - struct libcfs_debug_msg_data *msgdata, - unsigned long stack) -{ - struct timespec64 ts; - - ktime_get_real_ts64(&ts); - - header->ph_subsys = msgdata->msg_subsys; - header->ph_mask = msgdata->msg_mask; - header->ph_cpu_id = smp_processor_id(); - header->ph_type = cfs_trace_buf_idx_get(); - /* y2038 safe since all user space treats this as unsigned, but - * will overflow in 2106 - */ - header->ph_sec = (u32)ts.tv_sec; - header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; - header->ph_stack = stack; - header->ph_pid = current->pid; - header->ph_line_num = msgdata->msg_line; - header->ph_extern_pid = 0; -} - -static char * -dbghdr_to_err_string(struct ptldebug_header *hdr) -{ - switch (hdr->ph_subsys) { - case S_LND: - case S_LNET: - return "LNetError"; - default: - return "LustreError"; - } -} - -static char * -dbghdr_to_info_string(struct ptldebug_header *hdr) -{ - switch (hdr->ph_subsys) { - case S_LND: - case S_LNET: - return "LNet"; - default: - return "Lustre"; - } -} - -void cfs_print_to_console(struct ptldebug_header *hdr, int mask, - const char *buf, int len, const char *file, - const char *fn) -{ - char *prefix = "Lustre", *ptype = NULL; - - if (mask & D_EMERG) { - prefix = dbghdr_to_err_string(hdr); - ptype = KERN_EMERG; - } else if (mask & D_ERROR) { - prefix = dbghdr_to_err_string(hdr); - ptype = KERN_ERR; - } else if (mask & D_WARNING) { - prefix = dbghdr_to_info_string(hdr); - ptype = KERN_WARNING; - } else if (mask & (D_CONSOLE | libcfs_printk)) { - prefix = dbghdr_to_info_string(hdr); - ptype = KERN_INFO; - } - - if (mask & D_CONSOLE) { - pr_info("%s%s: %.*s", ptype, prefix, len, buf); - } else { - pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, - hdr->ph_pid, hdr->ph_extern_pid, file, - hdr->ph_line_num, fn, len, buf); - } -} - -int cfs_trace_max_debug_mb(void) -{ - int total_mb = (totalram_pages >> (20 - PAGE_SHIFT)); - - return max(512, (total_mb * 80) / 100); -} diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c deleted file mode 100644 index 5dc7de9e6478..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/module.c +++ /dev/null @@ -1,758 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include -#include - -# define DEBUG_SUBSYSTEM S_LNET - -#include - -#include -#include -#include -#include "tracefile.h" - -struct lnet_debugfs_symlink_def { - char *name; - char *target; -}; - -static struct dentry *lnet_debugfs_root; - -BLOCKING_NOTIFIER_HEAD(libcfs_ioctl_list); -EXPORT_SYMBOL(libcfs_ioctl_list); - -static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data) -{ - size_t len = sizeof(*data); - - len += cfs_size_round(data->ioc_inllen1); - len += cfs_size_round(data->ioc_inllen2); - return len; -} - -static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) -{ - if (data->ioc_hdr.ioc_len > BIT(30)) { - CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n"); - return true; - } - if (data->ioc_inllen1 > BIT(30)) { - CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n"); - return true; - } - if (data->ioc_inllen2 > BIT(30)) { - CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n"); - return true; - } - if (data->ioc_inlbuf1 && !data->ioc_inllen1) { - CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n"); - return true; - } - if (data->ioc_inlbuf2 && !data->ioc_inllen2) { - CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n"); - return true; - } - if (data->ioc_pbuf1 && !data->ioc_plen1) { - CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n"); - return true; - } - if (data->ioc_pbuf2 && !data->ioc_plen2) { - CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n"); - return true; - } - if (data->ioc_plen1 && !data->ioc_pbuf1) { - CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n"); - return true; - } - if (data->ioc_plen2 && !data->ioc_pbuf2) { - CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n"); - return true; - } - if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { - CERROR("LIBCFS ioctl: packlen != ioc_len\n"); - return true; - } - if (data->ioc_inllen1 && - data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') { - CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n"); - return true; - } - if (data->ioc_inllen2 && - data->ioc_bulk[cfs_size_round(data->ioc_inllen1) + - data->ioc_inllen2 - 1] != '\0') { - CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n"); - return true; - } - return false; -} - -static int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data) -{ - if (libcfs_ioctl_is_invalid(data)) { - CERROR("libcfs ioctl: parameter not correctly formatted\n"); - return -EINVAL; - } - - if (data->ioc_inllen1) - data->ioc_inlbuf1 = &data->ioc_bulk[0]; - - if (data->ioc_inllen2) - data->ioc_inlbuf2 = &data->ioc_bulk[0] + - cfs_size_round(data->ioc_inllen1); - - return 0; -} - -static int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp, - const struct libcfs_ioctl_hdr __user *uhdr) -{ - struct libcfs_ioctl_hdr hdr; - int err; - - if (copy_from_user(&hdr, uhdr, sizeof(hdr))) - return -EFAULT; - - if (hdr.ioc_version != LIBCFS_IOCTL_VERSION && - hdr.ioc_version != LIBCFS_IOCTL_VERSION2) { - CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n", - LIBCFS_IOCTL_VERSION, hdr.ioc_version); - return -EINVAL; - } - - if (hdr.ioc_len < sizeof(hdr)) { - CERROR("libcfs ioctl: user buffer too small for ioctl\n"); - return -EINVAL; - } - - if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) { - CERROR("libcfs ioctl: user buffer is too large %d/%d\n", - hdr.ioc_len, LIBCFS_IOC_DATA_MAX); - return -EINVAL; - } - - *hdr_pp = kvmalloc(hdr.ioc_len, GFP_KERNEL); - if (!*hdr_pp) - return -ENOMEM; - - if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) { - err = -EFAULT; - goto free; - } - - if ((*hdr_pp)->ioc_version != hdr.ioc_version || - (*hdr_pp)->ioc_len != hdr.ioc_len) { - err = -EINVAL; - goto free; - } - - return 0; - -free: - kvfree(*hdr_pp); - return err; -} - -static int libcfs_ioctl(unsigned long cmd, void __user *uparam) -{ - struct libcfs_ioctl_data *data = NULL; - struct libcfs_ioctl_hdr *hdr; - int err; - - /* 'cmd' and permissions get checked in our arch-specific caller */ - err = libcfs_ioctl_getdata(&hdr, uparam); - if (err) { - CDEBUG_LIMIT(D_ERROR, - "libcfs ioctl: data header error %d\n", err); - return err; - } - - if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) { - /* - * The libcfs_ioctl_data_adjust() function performs adjustment - * operations on the libcfs_ioctl_data structure to make - * it usable by the code. This doesn't need to be called - * for new data structures added. - */ - data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); - err = libcfs_ioctl_data_adjust(data); - if (err) - goto out; - } - - CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd); - switch (cmd) { - case IOC_LIBCFS_CLEAR_DEBUG: - libcfs_debug_clear_buffer(); - break; - - case IOC_LIBCFS_MARK_DEBUG: - if (!data || !data->ioc_inlbuf1 || - data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') { - err = -EINVAL; - goto out; - } - libcfs_debug_mark_buffer(data->ioc_inlbuf1); - break; - - default: - err = blocking_notifier_call_chain(&libcfs_ioctl_list, - cmd, hdr); - if (!(err & NOTIFY_STOP_MASK)) - /* No-one claimed the ioctl */ - err = -EINVAL; - else - err = notifier_to_errno(err); - if (!err) - if (copy_to_user(uparam, hdr, hdr->ioc_len)) - err = -EFAULT; - break; - } -out: - kvfree(hdr); - return err; -} - -static long -libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE || - _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR || - _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) { - CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n", - _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd)); - return -EINVAL; - } - - return libcfs_ioctl(cmd, (void __user *)arg); -} - -static const struct file_operations libcfs_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = libcfs_psdev_ioctl, -}; - -static struct miscdevice libcfs_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "lnet", - .fops = &libcfs_fops, -}; - -static int libcfs_dev_registered; - -int lprocfs_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, loff_t pos, - void __user *buffer, int len)) -{ - int rc = handler(data, write, *ppos, buffer, *lenp); - - if (rc < 0) - return rc; - - if (write) { - *ppos += *lenp; - } else { - *lenp = rc; - *ppos += rc; - } - return 0; -} -EXPORT_SYMBOL(lprocfs_call_handler); - -static int __proc_dobitmasks(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - const int tmpstrlen = 512; - char *tmpstr; - int rc; - unsigned int *mask = data; - int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; - int is_printk = (mask == &libcfs_printk) ? 1 : 0; - - rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen); - if (rc < 0) - return rc; - - if (!write) { - libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys); - rc = strlen(tmpstr); - - if (pos >= rc) { - rc = 0; - } else { - rc = cfs_trace_copyout_string(buffer, nob, - tmpstr + pos, "\n"); - } - } else { - rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob); - if (rc < 0) { - kfree(tmpstr); - return rc; - } - - rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys); - /* Always print LBUG/LASSERT to console, so keep this mask */ - if (is_printk) - *mask |= D_EMERG; - } - - kfree(tmpstr); - return rc; -} - -static int proc_dobitmasks(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dobitmasks); -} - -static int __proc_dump_kernel(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - if (!write) - return 0; - - return cfs_trace_dump_debug_buffer_usrstr(buffer, nob); -} - -static int proc_dump_kernel(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dump_kernel); -} - -static int __proc_daemon_file(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - if (!write) { - int len = strlen(cfs_tracefile); - - if (pos >= len) - return 0; - - return cfs_trace_copyout_string(buffer, nob, - cfs_tracefile + pos, "\n"); - } - - return cfs_trace_daemon_command_usrstr(buffer, nob); -} - -static int proc_daemon_file(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_daemon_file); -} - -static int libcfs_force_lbug(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - if (write) - LBUG(); - return 0; -} - -static int proc_fail_loc(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int rc; - long old_fail_loc = cfs_fail_loc; - - rc = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); - if (old_fail_loc != cfs_fail_loc) - wake_up(&cfs_race_waitq); - return rc; -} - -static int __proc_cpt_table(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - char *buf = NULL; - int len = 4096; - int rc = 0; - - if (write) - return -EPERM; - - while (1) { - buf = kzalloc(len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - rc = cfs_cpt_table_print(cfs_cpt_tab, buf, len); - if (rc >= 0) - break; - - if (rc == -EFBIG) { - kfree(buf); - len <<= 1; - continue; - } - goto out; - } - - if (pos >= rc) { - rc = 0; - goto out; - } - - rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL); - out: - kfree(buf); - return rc; -} - -static int proc_cpt_table(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_cpt_table); -} - -static struct ctl_table lnet_table[] = { - { - .procname = "debug", - .data = &libcfs_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "subsystem_debug", - .data = &libcfs_subsystem_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "printk", - .data = &libcfs_printk, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "cpu_partition_table", - .maxlen = 128, - .mode = 0444, - .proc_handler = &proc_cpt_table, - }, - { - .procname = "debug_log_upcall", - .data = lnet_debug_log_upcall, - .maxlen = sizeof(lnet_debug_log_upcall), - .mode = 0644, - .proc_handler = &proc_dostring, - }, - { - .procname = "catastrophe", - .data = &libcfs_catastrophe, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .procname = "dump_kernel", - .maxlen = 256, - .mode = 0200, - .proc_handler = &proc_dump_kernel, - }, - { - .procname = "daemon_file", - .mode = 0644, - .maxlen = 256, - .proc_handler = &proc_daemon_file, - }, - { - .procname = "force_lbug", - .data = NULL, - .maxlen = 0, - .mode = 0200, - .proc_handler = &libcfs_force_lbug - }, - { - .procname = "fail_loc", - .data = &cfs_fail_loc, - .maxlen = sizeof(cfs_fail_loc), - .mode = 0644, - .proc_handler = &proc_fail_loc - }, - { - .procname = "fail_val", - .data = &cfs_fail_val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .procname = "fail_err", - .data = &cfs_fail_err, - .maxlen = sizeof(cfs_fail_err), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - } -}; - -static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = { - { "console_ratelimit", - "/sys/module/libcfs/parameters/libcfs_console_ratelimit"}, - { "debug_path", - "/sys/module/libcfs/parameters/libcfs_debug_file_path"}, - { "panic_on_lbug", - "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"}, - { "libcfs_console_backoff", - "/sys/module/libcfs/parameters/libcfs_console_backoff"}, - { "debug_mb", - "/sys/module/libcfs/parameters/libcfs_debug_mb"}, - { "console_min_delay_centisecs", - "/sys/module/libcfs/parameters/libcfs_console_min_delay"}, - { "console_max_delay_centisecs", - "/sys/module/libcfs/parameters/libcfs_console_max_delay"}, - {}, -}; - -static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf, - size_t count, loff_t *ppos) -{ - struct ctl_table *table = filp->private_data; - int error; - - error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos); - if (!error) - error = count; - - return error; -} - -static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct ctl_table *table = filp->private_data; - int error; - - error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos); - if (!error) - error = count; - - return error; -} - -static const struct file_operations lnet_debugfs_file_operations_rw = { - .open = simple_open, - .read = lnet_debugfs_read, - .write = lnet_debugfs_write, - .llseek = default_llseek, -}; - -static const struct file_operations lnet_debugfs_file_operations_ro = { - .open = simple_open, - .read = lnet_debugfs_read, - .llseek = default_llseek, -}; - -static const struct file_operations lnet_debugfs_file_operations_wo = { - .open = simple_open, - .write = lnet_debugfs_write, - .llseek = default_llseek, -}; - -static const struct file_operations *lnet_debugfs_fops_select(umode_t mode) -{ - if (!(mode & 0222)) - return &lnet_debugfs_file_operations_ro; - - if (!(mode & 0444)) - return &lnet_debugfs_file_operations_wo; - - return &lnet_debugfs_file_operations_rw; -} - -void lustre_insert_debugfs(struct ctl_table *table) -{ - if (!lnet_debugfs_root) - lnet_debugfs_root = debugfs_create_dir("lnet", NULL); - - /* Even if we cannot create, just ignore it altogether) */ - if (IS_ERR_OR_NULL(lnet_debugfs_root)) - return; - - /* - * We don't save the dentry returned because we don't call - * debugfs_remove() but rather remove_recursive() - */ - for (; table->procname; table++) - debugfs_create_file(table->procname, table->mode, - lnet_debugfs_root, table, - lnet_debugfs_fops_select(table->mode)); -} -EXPORT_SYMBOL_GPL(lustre_insert_debugfs); - -static void lustre_insert_debugfs_links( - const struct lnet_debugfs_symlink_def *symlinks) -{ - for (; symlinks && symlinks->name; symlinks++) - debugfs_create_symlink(symlinks->name, lnet_debugfs_root, - symlinks->target); -} - -static void lustre_remove_debugfs(void) -{ - debugfs_remove_recursive(lnet_debugfs_root); - - lnet_debugfs_root = NULL; -} - -static DEFINE_MUTEX(libcfs_startup); -static int libcfs_active; - -int libcfs_setup(void) -{ - int rc = -EINVAL; - - mutex_lock(&libcfs_startup); - if (libcfs_active) - goto out; - - if (!libcfs_dev_registered) - goto err; - - rc = libcfs_debug_init(5 * 1024 * 1024); - if (rc < 0) { - pr_err("LustreError: libcfs_debug_init: %d\n", rc); - goto err; - } - - rc = cfs_cpu_init(); - if (rc) - goto err; - - cfs_rehash_wq = alloc_workqueue("cfs_rh", WQ_SYSFS, 4); - if (!cfs_rehash_wq) { - CERROR("Failed to start rehash workqueue.\n"); - rc = -ENOMEM; - goto err; - } - - rc = cfs_crypto_register(); - if (rc) { - CERROR("cfs_crypto_register: error %d\n", rc); - goto err; - } - - lustre_insert_debugfs(lnet_table); - if (!IS_ERR_OR_NULL(lnet_debugfs_root)) - lustre_insert_debugfs_links(lnet_debugfs_symlinks); - - CDEBUG(D_OTHER, "portals setup OK\n"); -out: - libcfs_active = 1; - mutex_unlock(&libcfs_startup); - return 0; -err: - cfs_crypto_unregister(); - if (cfs_rehash_wq) - destroy_workqueue(cfs_rehash_wq); - cfs_cpu_fini(); - libcfs_debug_cleanup(); - mutex_unlock(&libcfs_startup); - return rc; -} -EXPORT_SYMBOL(libcfs_setup); - -static int libcfs_init(void) -{ - int rc; - - rc = misc_register(&libcfs_dev); - if (rc) - CERROR("misc_register: error %d\n", rc); - else - libcfs_dev_registered = 1; - return rc; -} - -static void libcfs_exit(void) -{ - int rc; - - lustre_remove_debugfs(); - - if (cfs_rehash_wq) - destroy_workqueue(cfs_rehash_wq); - - cfs_crypto_unregister(); - - if (libcfs_dev_registered) - misc_deregister(&libcfs_dev); - - cfs_cpu_fini(); - - rc = libcfs_debug_cleanup(); - if (rc) - pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre helper library"); -MODULE_VERSION(LIBCFS_VERSION); -MODULE_LICENSE("GPL"); - -module_init(libcfs_init); -module_exit(libcfs_exit); diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c deleted file mode 100644 index 7ca562e156f0..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ /dev/null @@ -1,1198 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/tracefile.c - * - * Author: Zach Brown - * Author: Phil Schwan - */ - -#define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE -#define pr_fmt(fmt) "Lustre: " fmt - -#include -#include -#include -#include -#include -#include -#include -#include "tracefile.h" - -/* XXX move things up to the top, comment */ -union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned; - -char cfs_tracefile[TRACEFILE_NAME_SIZE]; -long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; -static struct tracefiled_ctl trace_tctl; -static DEFINE_MUTEX(cfs_trace_thread_mutex); -static int thread_running; - -static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); - -struct page_collection { - struct list_head pc_pages; - /* - * if this flag is set, collect_pages() will spill both - * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, - * only ->tcd_pages are spilled. - */ - int pc_want_daemon_pages; -}; - -struct tracefiled_ctl { - struct completion tctl_start; - struct completion tctl_stop; - wait_queue_head_t tctl_waitq; - pid_t tctl_pid; - atomic_t tctl_shutdown; -}; - -/* - * small data-structure for each page owned by tracefiled. - */ -struct cfs_trace_page { - /* - * page itself - */ - struct page *page; - /* - * linkage into one of the lists in trace_data_union or - * page_collection - */ - struct list_head linkage; - /* - * number of bytes used within this page - */ - unsigned int used; - /* - * cpu that owns this page - */ - unsigned short cpu; - /* - * type(context) of this page - */ - unsigned short type; -}; - -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd); - -static inline struct cfs_trace_page * -cfs_tage_from_list(struct list_head *list) -{ - return list_entry(list, struct cfs_trace_page, linkage); -} - -static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) -{ - struct page *page; - struct cfs_trace_page *tage; - - /* My caller is trying to free memory */ - if (!in_interrupt() && (current->flags & PF_MEMALLOC)) - return NULL; - - /* - * Don't spam console with allocation failures: they will be reported - * by upper layer anyway. - */ - gfp |= __GFP_NOWARN; - page = alloc_page(gfp); - if (!page) - return NULL; - - tage = kmalloc(sizeof(*tage), gfp); - if (!tage) { - __free_page(page); - return NULL; - } - - tage->page = page; - atomic_inc(&cfs_tage_allocated); - return tage; -} - -static void cfs_tage_free(struct cfs_trace_page *tage) -{ - __free_page(tage->page); - kfree(tage); - atomic_dec(&cfs_tage_allocated); -} - -static void cfs_tage_to_tail(struct cfs_trace_page *tage, - struct list_head *queue) -{ - list_move_tail(&tage->linkage, queue); -} - -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, - struct list_head *stock) -{ - int i; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) { - struct cfs_trace_page *tage; - - tage = cfs_tage_alloc(gfp); - if (!tage) - break; - list_add_tail(&tage->linkage, stock); - } - return i; -} - -/* return a page that has 'len' bytes left at the end */ -static struct cfs_trace_page * -cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) -{ - struct cfs_trace_page *tage; - - if (tcd->tcd_cur_pages > 0) { - __LASSERT(!list_empty(&tcd->tcd_pages)); - tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= PAGE_SIZE) - return tage; - } - - if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { - if (tcd->tcd_cur_stock_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); - --tcd->tcd_cur_stock_pages; - list_del_init(&tage->linkage); - } else { - tage = cfs_tage_alloc(GFP_ATOMIC); - if (unlikely(!tage)) { - if (!(current->flags & PF_MEMALLOC) || - in_interrupt()) - pr_warn_ratelimited("cannot allocate a tage (%ld)\n", - tcd->tcd_cur_pages); - return NULL; - } - } - - tage->used = 0; - tage->cpu = smp_processor_id(); - tage->type = tcd->tcd_type; - list_add_tail(&tage->linkage, &tcd->tcd_pages); - tcd->tcd_cur_pages++; - - if (tcd->tcd_cur_pages > 8 && thread_running) { - struct tracefiled_ctl *tctl = &trace_tctl; - /* - * wake up tracefiled to process some pages. - */ - wake_up(&tctl->tctl_waitq); - } - return tage; - } - return NULL; -} - -static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) -{ - int pgcount = tcd->tcd_cur_pages / 10; - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - pr_warn_ratelimited("debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", - pgcount + 1, tcd->tcd_cur_pages); - - INIT_LIST_HEAD(&pc.pc_pages); - - list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { - if (!pgcount--) - break; - - list_move_tail(&tage->linkage, &pc.pc_pages); - tcd->tcd_cur_pages--; - } - put_pages_on_tcd_daemon_list(&pc, tcd); -} - -/* return a page that has 'len' bytes left at the end */ -static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, - unsigned long len) -{ - struct cfs_trace_page *tage; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (len > PAGE_SIZE) { - pr_err("cowardly refusing to write %lu bytes in a page\n", len); - return NULL; - } - - tage = cfs_trace_get_tage_try(tcd, len); - if (tage) - return tage; - if (thread_running) - cfs_tcd_shrink(tcd); - if (tcd->tcd_cur_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_pages.next); - tage->used = 0; - cfs_tage_to_tail(tage, &tcd->tcd_pages); - } - return tage; -} - -int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, - const char *format, ...) -{ - va_list args; - int rc; - - va_start(args, format); - rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); - va_end(args); - - return rc; -} -EXPORT_SYMBOL(libcfs_debug_msg); - -int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, - const char *format1, va_list args, - const char *format2, ...) -{ - struct cfs_trace_cpu_data *tcd = NULL; - struct ptldebug_header header = { 0 }; - struct cfs_trace_page *tage; - /* string_buf is used only if tcd != NULL, and is always set then */ - char *string_buf = NULL; - char *debug_buf; - int known_size; - int needed = 85; /* average message length */ - int max_nob; - va_list ap; - int depth; - int i; - int remain; - int mask = msgdata->msg_mask; - const char *file = kbasename(msgdata->msg_file); - struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; - - tcd = cfs_trace_get_tcd(); - - /* cfs_trace_get_tcd() grabs a lock, which disables preemption and - * pins us to a particular CPU. This avoids an smp_processor_id() - * warning on Linux when debugging is enabled. - */ - cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); - - if (!tcd) /* arch may not log in IRQ context */ - goto console; - - if (!tcd->tcd_cur_pages) - header.ph_flags |= PH_FLAG_FIRST_RECORD; - - if (tcd->tcd_shutting_down) { - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - depth = 0; - known_size = strlen(file) + 1 + depth; - if (msgdata->msg_fn) - known_size += strlen(msgdata->msg_fn) + 1; - - if (libcfs_debug_binary) - known_size += sizeof(header); - - /* - * '2' used because vsnprintf return real size required for output - * _without_ terminating NULL. - * if needed is to small for this format. - */ - for (i = 0; i < 2; i++) { - tage = cfs_trace_get_tage(tcd, needed + known_size + 1); - if (!tage) { - if (needed + known_size > PAGE_SIZE) - mask |= D_ERROR; - - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - string_buf = (char *)page_address(tage->page) + - tage->used + known_size; - - max_nob = PAGE_SIZE - tage->used - known_size; - if (max_nob <= 0) { - pr_emerg("negative max_nob: %d\n", max_nob); - mask |= D_ERROR; - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - needed = 0; - if (format1) { - va_copy(ap, args); - needed = vsnprintf(string_buf, max_nob, format1, ap); - va_end(ap); - } - - if (format2) { - remain = max_nob - needed; - if (remain < 0) - remain = 0; - - va_start(ap, format2); - needed += vsnprintf(string_buf + needed, remain, - format2, ap); - va_end(ap); - } - - if (needed < max_nob) /* well. printing ok.. */ - break; - } - - if (*(string_buf + needed - 1) != '\n') - pr_info("format at %s:%d:%s doesn't end in newline\n", file, - msgdata->msg_line, msgdata->msg_fn); - - header.ph_len = known_size + needed; - debug_buf = (char *)page_address(tage->page) + tage->used; - - if (libcfs_debug_binary) { - memcpy(debug_buf, &header, sizeof(header)); - tage->used += sizeof(header); - debug_buf += sizeof(header); - } - - /* indent message according to the nesting level */ - while (depth-- > 0) { - *(debug_buf++) = '.'; - ++tage->used; - } - - strcpy(debug_buf, file); - tage->used += strlen(file) + 1; - debug_buf += strlen(file) + 1; - - if (msgdata->msg_fn) { - strcpy(debug_buf, msgdata->msg_fn); - tage->used += strlen(msgdata->msg_fn) + 1; - debug_buf += strlen(msgdata->msg_fn) + 1; - } - - __LASSERT(debug_buf == string_buf); - - tage->used += needed; - __LASSERT(tage->used <= PAGE_SIZE); - -console: - if (!(mask & libcfs_printk)) { - /* no console output requested */ - if (tcd) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cdls) { - if (libcfs_console_ratelimit && - cdls->cdls_next && /* not first time ever */ - !time_after(jiffies, cdls->cdls_next)) { - /* skipping a console message */ - cdls->cdls_count++; - if (tcd) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (time_after(jiffies, - cdls->cdls_next + libcfs_console_max_delay + - 10 * HZ)) { - /* last timeout was a long time ago */ - cdls->cdls_delay /= libcfs_console_backoff * 4; - } else { - cdls->cdls_delay *= libcfs_console_backoff; - } - - if (cdls->cdls_delay < libcfs_console_min_delay) - cdls->cdls_delay = libcfs_console_min_delay; - else if (cdls->cdls_delay > libcfs_console_max_delay) - cdls->cdls_delay = libcfs_console_max_delay; - - /* ensure cdls_next is never zero after it's been seen */ - cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1; - } - - if (tcd) { - cfs_print_to_console(&header, mask, string_buf, needed, file, - msgdata->msg_fn); - cfs_trace_put_tcd(tcd); - } else { - string_buf = cfs_trace_get_console_buffer(); - - needed = 0; - if (format1) { - va_copy(ap, args); - needed = vsnprintf(string_buf, - CFS_TRACE_CONSOLE_BUFFER_SIZE, - format1, ap); - va_end(ap); - } - if (format2) { - remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; - if (remain > 0) { - va_start(ap, format2); - needed += vsnprintf(string_buf + needed, remain, - format2, ap); - va_end(ap); - } - } - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - put_cpu(); - } - - if (cdls && cdls->cdls_count) { - string_buf = cfs_trace_get_console_buffer(); - - needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, - "Skipped %d previous similar message%s\n", - cdls->cdls_count, - (cdls->cdls_count > 1) ? "s" : ""); - - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - put_cpu(); - cdls->cdls_count = 0; - } - - return 0; -} -EXPORT_SYMBOL(libcfs_debug_vmsg2); - -void -cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *msgdata) -{ - struct ptldebug_header hdr; - - libcfs_panic_in_progress = 1; - libcfs_catastrophe = 1; - mb(); - - cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); - - cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), - msgdata->msg_file, msgdata->msg_fn); - - panic("Lustre debug assertion failure\n"); - - /* not reached */ -} - -static void -panic_collect_pages(struct page_collection *pc) -{ - /* Do the collect_pages job on a single CPU: assumes that all other - * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. - */ - struct cfs_trace_cpu_data *tcd; - int i; - int j; - - INIT_LIST_HEAD(&pc->pc_pages); - - cfs_tcd_for_each(tcd, i, j) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } -} - -static void collect_pages_on_all_cpus(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } - } -} - -static void collect_pages(struct page_collection *pc) -{ - INIT_LIST_HEAD(&pc->pc_pages); - - if (libcfs_panic_in_progress) - panic_collect_pages(pc); - else - collect_pages_on_all_cpus(pc); -} - -static void put_pages_back_on_all_cpus(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - struct list_head *cur_head; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - cur_head = tcd->tcd_pages.next; - - list_for_each_entry_safe(tage, tmp, &pc->pc_pages, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != cpu || tage->type != i) - continue; - - cfs_tage_to_tail(tage, cur_head); - tcd->tcd_cur_pages++; - } - } - } -} - -static void put_pages_back(struct page_collection *pc) -{ - if (!libcfs_panic_in_progress) - put_pages_back_on_all_cpus(pc); -} - -/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that - * we have a good amount of data at all times for dumping during an LBUG, even - * if we have been steadily writing (and otherwise discarding) pages via the - * debug daemon. - */ -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd) -{ - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) - continue; - - cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); - tcd->tcd_cur_daemon_pages++; - - if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { - struct cfs_trace_page *victim; - - __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); - victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); - - __LASSERT_TAGE_INVARIANT(victim); - - list_del(&victim->linkage); - cfs_tage_free(victim); - tcd->tcd_cur_daemon_pages--; - } - } -} - -static void put_pages_on_daemon_list(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) - put_pages_on_tcd_daemon_list(pc, tcd); - } -} - -void cfs_trace_debug_print(void) -{ - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - char *p, *file, *fn; - struct page *page; - - __LASSERT_TAGE_INVARIANT(tage); - - page = tage->page; - p = page_address(page); - while (p < ((char *)page_address(page) + tage->used)) { - struct ptldebug_header *hdr; - int len; - - hdr = (void *)p; - p += sizeof(*hdr); - file = p; - p += strlen(file) + 1; - fn = p; - p += strlen(fn) + 1; - len = hdr->ph_len - (int)(p - (char *)hdr); - - cfs_print_to_console(hdr, D_EMERG, p, len, file, fn); - - p += len; - } - - list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_tracefile_dump_all_pages(char *filename) -{ - struct page_collection pc; - struct file *filp; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - char *buf; - mm_segment_t __oldfs; - int rc; - - cfs_tracefile_write_lock(); - - filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, - 0600); - if (IS_ERR(filp)) { - rc = PTR_ERR(filp); - filp = NULL; - pr_err("LustreError: can't open %s for dump: rc %d\n", - filename, rc); - goto out; - } - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) { - rc = 0; - goto close; - } - __oldfs = get_fs(); - set_fs(get_ds()); - - /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO - */ - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - buf = kmap(tage->page); - rc = kernel_write(filp, buf, tage->used, &filp->f_pos); - kunmap(tage->page); - - if (rc != (int)tage->used) { - pr_warn("wanted to write %u but wrote %d\n", tage->used, - rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - list_del(&tage->linkage); - cfs_tage_free(tage); - } - set_fs(__oldfs); - rc = vfs_fsync(filp, 1); - if (rc) - pr_err("sync returns %d\n", rc); -close: - filp_close(filp, NULL); -out: - cfs_tracefile_write_unlock(); - return rc; -} - -void cfs_trace_flush_pages(void) -{ - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob) -{ - int nob; - - if (usr_buffer_nob > knl_buffer_nob) - return -EOVERFLOW; - - if (copy_from_user((void *)knl_buffer, - usr_buffer, usr_buffer_nob)) - return -EFAULT; - - nob = strnlen(knl_buffer, usr_buffer_nob); - while (--nob >= 0) /* strip trailing whitespace */ - if (!isspace(knl_buffer[nob])) - break; - - if (nob < 0) /* empty string */ - return -EINVAL; - - if (nob == knl_buffer_nob) /* no space to terminate */ - return -EOVERFLOW; - - knl_buffer[nob + 1] = 0; /* terminate */ - return 0; -} -EXPORT_SYMBOL(cfs_trace_copyin_string); - -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_buffer, char *append) -{ - /* - * NB if 'append' != NULL, it's a single character to append to the - * copied out string - usually "\n" or "" (i.e. a terminating zero byte) - */ - int nob = strlen(knl_buffer); - - if (nob > usr_buffer_nob) - nob = usr_buffer_nob; - - if (copy_to_user(usr_buffer, knl_buffer, nob)) - return -EFAULT; - - if (append && nob < usr_buffer_nob) { - if (copy_to_user(usr_buffer + nob, append, 1)) - return -EFAULT; - - nob++; - } - - return nob; -} -EXPORT_SYMBOL(cfs_trace_copyout_string); - -int cfs_trace_allocate_string_buffer(char **str, int nob) -{ - if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */ - return -EINVAL; - - *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); - if (!*str) - return -ENOMEM; - - return 0; -} - -int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc) - goto out; - - if (str[0] != '/') { - rc = -EINVAL; - goto out; - } - rc = cfs_tracefile_dump_all_pages(str); -out: - kfree(str); - return rc; -} - -int cfs_trace_daemon_command(char *str) -{ - int rc = 0; - - cfs_tracefile_write_lock(); - - if (!strcmp(str, "stop")) { - cfs_tracefile_write_unlock(); - cfs_trace_stop_thread(); - cfs_tracefile_write_lock(); - memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); - - } else if (!strncmp(str, "size=", 5)) { - unsigned long tmp; - - rc = kstrtoul(str + 5, 10, &tmp); - if (!rc) { - if (tmp < 10 || tmp > 20480) - cfs_tracefile_size = CFS_TRACEFILE_SIZE; - else - cfs_tracefile_size = tmp << 20; - } - } else if (strlen(str) >= sizeof(cfs_tracefile)) { - rc = -ENAMETOOLONG; - } else if (str[0] != '/') { - rc = -EINVAL; - } else { - strcpy(cfs_tracefile, str); - - pr_info("debug daemon will attempt to start writing to %s (%lukB max)\n", - cfs_tracefile, - (long)(cfs_tracefile_size >> 10)); - - cfs_trace_start_thread(); - } - - cfs_tracefile_write_unlock(); - return rc; -} - -int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (!rc) - rc = cfs_trace_daemon_command(str); - - kfree(str); - return rc; -} - -int cfs_trace_set_debug_mb(int mb) -{ - int i; - int j; - int pages; - int limit = cfs_trace_max_debug_mb(); - struct cfs_trace_cpu_data *tcd; - - if (mb < num_possible_cpus()) { - pr_warn("%d MB is too small for debug buffer size, setting it to %d MB.\n", - mb, num_possible_cpus()); - mb = num_possible_cpus(); - } - - if (mb > limit) { - pr_warn("%d MB is too large for debug buffer size, setting it to %d MB.\n", - mb, limit); - mb = limit; - } - - mb /= num_possible_cpus(); - pages = mb << (20 - PAGE_SHIFT); - - cfs_tracefile_write_lock(); - - cfs_tcd_for_each(tcd, i, j) - tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; - - cfs_tracefile_write_unlock(); - - return 0; -} - -int cfs_trace_get_debug_mb(void) -{ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - int total_pages = 0; - - cfs_tracefile_read_lock(); - - cfs_tcd_for_each(tcd, i, j) - total_pages += tcd->tcd_max_pages; - - cfs_tracefile_read_unlock(); - - return (total_pages >> (20 - PAGE_SHIFT)) + 1; -} - -static int tracefiled(void *arg) -{ - struct page_collection pc; - struct tracefiled_ctl *tctl = arg; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - struct file *filp; - char *buf; - int last_loop = 0; - int rc; - - /* we're started late enough that we pick up init's fs context */ - /* this is so broken in uml? what on earth is going on? */ - - complete(&tctl->tctl_start); - - while (1) { - wait_queue_entry_t __wait; - - pc.pc_want_daemon_pages = 0; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) - goto end_loop; - - filp = NULL; - cfs_tracefile_read_lock(); - if (cfs_tracefile[0]) { - filp = filp_open(cfs_tracefile, - O_CREAT | O_RDWR | O_LARGEFILE, - 0600); - if (IS_ERR(filp)) { - rc = PTR_ERR(filp); - filp = NULL; - pr_warn("couldn't open %s: %d\n", cfs_tracefile, - rc); - } - } - cfs_tracefile_read_unlock(); - if (!filp) { - put_pages_on_daemon_list(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - goto end_loop; - } - - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - static loff_t f_pos; - - __LASSERT_TAGE_INVARIANT(tage); - - if (f_pos >= (off_t)cfs_tracefile_size) - f_pos = 0; - else if (f_pos > i_size_read(file_inode(filp))) - f_pos = i_size_read(file_inode(filp)); - - buf = kmap(tage->page); - rc = kernel_write(filp, buf, tage->used, &f_pos); - kunmap(tage->page); - - if (rc != (int)tage->used) { - pr_warn("wanted to write %u but wrote %d\n", - tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - } - - filp_close(filp, NULL); - put_pages_on_daemon_list(&pc); - if (!list_empty(&pc.pc_pages)) { - int i; - - pr_alert("trace pages aren't empty\n"); - pr_err("total cpus(%d): ", num_possible_cpus()); - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_online(i)) - pr_cont("%d(on) ", i); - else - pr_cont("%d(off) ", i); - pr_cont("\n"); - - i = 0; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) - pr_err("page %d belongs to cpu %d\n", - ++i, tage->cpu); - pr_err("There are %d pages unwritten\n", i); - } - __LASSERT(list_empty(&pc.pc_pages)); -end_loop: - if (atomic_read(&tctl->tctl_shutdown)) { - if (!last_loop) { - last_loop = 1; - continue; - } else { - break; - } - } - init_waitqueue_entry(&__wait, current); - add_wait_queue(&tctl->tctl_waitq, &__wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); - remove_wait_queue(&tctl->tctl_waitq, &__wait); - } - complete(&tctl->tctl_stop); - return 0; -} - -int cfs_trace_start_thread(void) -{ - struct tracefiled_ctl *tctl = &trace_tctl; - struct task_struct *task; - int rc = 0; - - mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) - goto out; - - init_completion(&tctl->tctl_start); - init_completion(&tctl->tctl_stop); - init_waitqueue_head(&tctl->tctl_waitq); - atomic_set(&tctl->tctl_shutdown, 0); - - task = kthread_run(tracefiled, tctl, "ktracefiled"); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - goto out; - } - - wait_for_completion(&tctl->tctl_start); - thread_running = 1; -out: - mutex_unlock(&cfs_trace_thread_mutex); - return rc; -} - -void cfs_trace_stop_thread(void) -{ - struct tracefiled_ctl *tctl = &trace_tctl; - - mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) { - pr_info("shutting down debug daemon thread...\n"); - atomic_set(&tctl->tctl_shutdown, 1); - wait_for_completion(&tctl->tctl_stop); - thread_running = 0; - } - mutex_unlock(&cfs_trace_thread_mutex); -} - -int cfs_tracefile_init(int max_pages) -{ - struct cfs_trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; - - rc = cfs_tracefile_init_arch(); - if (rc) - return rc; - - cfs_tcd_for_each(tcd, i, j) { - /* tcd_pages_factor is initialized int tracefile_init_arch. */ - factor = tcd->tcd_pages_factor; - INIT_LIST_HEAD(&tcd->tcd_pages); - INIT_LIST_HEAD(&tcd->tcd_stock_pages); - INIT_LIST_HEAD(&tcd->tcd_daemon_pages); - tcd->tcd_cur_pages = 0; - tcd->tcd_cur_stock_pages = 0; - tcd->tcd_cur_daemon_pages = 0; - tcd->tcd_max_pages = (max_pages * factor) / 100; - LASSERT(tcd->tcd_max_pages > 0); - tcd->tcd_shutting_down = 0; - } - - return 0; -} - -static void trace_cleanup_on_all_cpus(void) -{ - struct cfs_trace_cpu_data *tcd; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - tcd->tcd_shutting_down = 1; - - list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - cfs_tage_free(tage); - } - - tcd->tcd_cur_pages = 0; - } - } -} - -static void cfs_trace_cleanup(void) -{ - struct page_collection pc; - - INIT_LIST_HEAD(&pc.pc_pages); - - trace_cleanup_on_all_cpus(); - - cfs_tracefile_fini_arch(); -} - -void cfs_tracefile_exit(void) -{ - cfs_trace_stop_thread(); - cfs_trace_cleanup(); -} diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h deleted file mode 100644 index 0608240d897f..000000000000 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.h +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LIBCFS_TRACEFILE_H__ -#define __LIBCFS_TRACEFILE_H__ - -#include -#include -#include -#include -#include -#include -#include - -enum cfs_trace_buf_type { - CFS_TCD_TYPE_PROC = 0, - CFS_TCD_TYPE_SOFTIRQ, - CFS_TCD_TYPE_IRQ, - CFS_TCD_TYPE_MAX -}; - -/* trace file lock routines */ - -#define TRACEFILE_NAME_SIZE 1024 -extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; -extern long long cfs_tracefile_size; - -/** - * The path of debug log dump upcall script. - */ -extern char lnet_debug_log_upcall[1024]; - -void libcfs_run_debug_log_upcall(char *file); - -int cfs_tracefile_init_arch(void); -void cfs_tracefile_fini_arch(void); - -void cfs_tracefile_read_lock(void); -void cfs_tracefile_read_unlock(void); -void cfs_tracefile_write_lock(void); -void cfs_tracefile_write_unlock(void); - -int cfs_tracefile_dump_all_pages(char *filename); -void cfs_trace_debug_print(void); -void cfs_trace_flush_pages(void); -int cfs_trace_start_thread(void); -void cfs_trace_stop_thread(void); -int cfs_tracefile_init(int max_pages); -void cfs_tracefile_exit(void); - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob); -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_str, char *append); -int cfs_trace_allocate_string_buffer(char **str, int nob); -int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob); -int cfs_trace_daemon_command(char *str); -int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob); -int cfs_trace_set_debug_mb(int mb); -int cfs_trace_get_debug_mb(void); - -void libcfs_debug_dumplog_internal(void *arg); -void libcfs_register_panic_notifier(void); -void libcfs_unregister_panic_notifier(void); -extern int libcfs_panic_in_progress; -int cfs_trace_max_debug_mb(void); - -#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) -#define TCD_STOCK_PAGES (TCD_MAX_PAGES) -#define CFS_TRACEFILE_SIZE (500 << 20) - -#ifdef LUSTRE_TRACEFILE_PRIVATE - -/* - * Private declare for tracefile - */ -#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) -#define TCD_STOCK_PAGES (TCD_MAX_PAGES) - -#define CFS_TRACEFILE_SIZE (500 << 20) - -/* - * Size of a buffer for sprinting console messages if we can't get a page - * from system - */ -#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024 - -union cfs_trace_data_union { - struct cfs_trace_cpu_data { - /* - * Even though this structure is meant to be per-CPU, locking - * is needed because in some places the data may be accessed - * from other CPUs. This lock is directly used in trace_get_tcd - * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and - * tcd_for_each_type_lock - */ - spinlock_t tcd_lock; - unsigned long tcd_lock_flags; - - /* - * pages with trace records not yet processed by tracefiled. - */ - struct list_head tcd_pages; - /* number of pages on ->tcd_pages */ - unsigned long tcd_cur_pages; - - /* - * pages with trace records already processed by - * tracefiled. These pages are kept in memory, so that some - * portion of log can be written in the event of LBUG. This - * list is maintained in LRU order. - * - * Pages are moved to ->tcd_daemon_pages by tracefiled() - * (put_pages_on_daemon_list()). LRU pages from this list are - * discarded when list grows too large. - */ - struct list_head tcd_daemon_pages; - /* number of pages on ->tcd_daemon_pages */ - unsigned long tcd_cur_daemon_pages; - - /* - * Maximal number of pages allowed on ->tcd_pages and - * ->tcd_daemon_pages each. - * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current - * implementation. - */ - unsigned long tcd_max_pages; - - /* - * preallocated pages to write trace records into. Pages from - * ->tcd_stock_pages are moved to ->tcd_pages by - * portals_debug_msg(). - * - * This list is necessary, because on some platforms it's - * impossible to perform efficient atomic page allocation in a - * non-blockable context. - * - * Such platforms fill ->tcd_stock_pages "on occasion", when - * tracing code is entered in blockable context. - * - * trace_get_tage_try() tries to get a page from - * ->tcd_stock_pages first and resorts to atomic page - * allocation only if this queue is empty. ->tcd_stock_pages - * is replenished when tracing code is entered in blocking - * context (darwin-tracefile.c:trace_get_tcd()). We try to - * maintain TCD_STOCK_PAGES (40 by default) pages in this - * queue. Atomic allocation is only required if more than - * TCD_STOCK_PAGES pagesful are consumed by trace records all - * emitted in non-blocking contexts. Which is quite unlikely. - */ - struct list_head tcd_stock_pages; - /* number of pages on ->tcd_stock_pages */ - unsigned long tcd_cur_stock_pages; - - unsigned short tcd_shutting_down; - unsigned short tcd_cpu; - unsigned short tcd_type; - /* The factors to share debug memory. */ - unsigned short tcd_pages_factor; - } tcd; - char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))]; -}; - -#define TCD_MAX_TYPES 8 -extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS]; - -#define cfs_tcd_for_each(tcd, i, j) \ - for (i = 0; cfs_trace_data[i]; i++) \ - for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ - j < num_possible_cpus(); \ - j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) - -#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ - for (i = 0; cfs_trace_data[i] && \ - (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ - cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) - -void cfs_set_ptldebug_header(struct ptldebug_header *header, - struct libcfs_debug_msg_data *m, - unsigned long stack); -void cfs_print_to_console(struct ptldebug_header *hdr, int mask, - const char *buf, int len, const char *file, - const char *fn); - -int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking); -void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking); - -extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; -enum cfs_trace_buf_type cfs_trace_buf_idx_get(void); - -static inline char * -cfs_trace_get_console_buffer(void) -{ - unsigned int i = get_cpu(); - unsigned int j = cfs_trace_buf_idx_get(); - - return cfs_trace_console_buffers[i][j]; -} - -static inline struct cfs_trace_cpu_data * -cfs_trace_get_tcd(void) -{ - struct cfs_trace_cpu_data *tcd = - &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd; - - cfs_trace_lock_tcd(tcd, 0); - - return tcd; -} - -static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd) -{ - cfs_trace_unlock_tcd(tcd, 0); - - put_cpu(); -} - -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, - struct list_head *stock); - -void cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *m); - -/* ASSERTION that is safe to use within the debug system */ -#define __LASSERT(cond) \ -do { \ - if (unlikely(!(cond))) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ - cfs_trace_assertion_failed("ASSERTION("#cond") failed", \ - &msgdata); \ - } \ -} while (0) - -#define __LASSERT_TAGE_INVARIANT(tage) \ -do { \ - __LASSERT(tage); \ - __LASSERT(tage->page); \ - __LASSERT(tage->used <= PAGE_SIZE); \ - __LASSERT(page_count(tage->page) > 0); \ -} while (0) - -#endif /* LUSTRE_TRACEFILE_PRIVATE */ - -#endif /* __LIBCFS_TRACEFILE_H__ */ diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile deleted file mode 100644 index 0a9d70924fe0..000000000000 --- a/drivers/staging/lustre/lnet/lnet/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LNET) += lnet.o - -lnet-y := api-ni.o config.o nidstrings.o net_fault.o \ - lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \ - lib-socket.o lib-move.o module.o lo.o \ - router.o router_proc.o acceptor.o peer.o diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c deleted file mode 100644 index 5648f17eddc0..000000000000 --- a/drivers/staging/lustre/lnet/lnet/acceptor.c +++ /dev/null @@ -1,501 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#include -#include -#include - -static int accept_port = 988; -static int accept_backlog = 127; -static int accept_timeout = 5; - -static struct { - int pta_shutdown; - struct socket *pta_sock; - struct completion pta_signal; -} lnet_acceptor_state = { - .pta_shutdown = 1 -}; - -int -lnet_acceptor_port(void) -{ - return accept_port; -} -EXPORT_SYMBOL(lnet_acceptor_port); - -static inline int -lnet_accept_magic(__u32 magic, __u32 constant) -{ - return (magic == constant || - magic == __swab32(constant)); -} - -static char *accept = "secure"; - -module_param(accept, charp, 0444); -MODULE_PARM_DESC(accept, "Accept connections (secure|all|none)"); -module_param(accept_port, int, 0444); -MODULE_PARM_DESC(accept_port, "Acceptor's port (same on all nodes)"); -module_param(accept_backlog, int, 0444); -MODULE_PARM_DESC(accept_backlog, "Acceptor's listen backlog"); -module_param(accept_timeout, int, 0644); -MODULE_PARM_DESC(accept_timeout, "Acceptor's timeout (seconds)"); - -static char *accept_type; - -static int -lnet_acceptor_get_tunables(void) -{ - /* - * Userland acceptor uses 'accept_type' instead of 'accept', due to - * conflict with 'accept(2)', but kernel acceptor still uses 'accept' - * for compatibility. Hence the trick. - */ - accept_type = accept; - return 0; -} - -int -lnet_acceptor_timeout(void) -{ - return accept_timeout; -} -EXPORT_SYMBOL(lnet_acceptor_timeout); - -void -lnet_connect_console_error(int rc, lnet_nid_t peer_nid, - __u32 peer_ip, int peer_port) -{ - switch (rc) { - /* "normal" errors */ - case -ECONNREFUSED: - CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n", - libcfs_nid2str(peer_nid), - &peer_ip, peer_port); - break; - case -EHOSTUNREACH: - case -ENETUNREACH: - CNETERR("Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n", - libcfs_nid2str(peer_nid), &peer_ip); - break; - case -ETIMEDOUT: - CNETERR("Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n", - libcfs_nid2str(peer_nid), - &peer_ip, peer_port); - break; - case -ECONNRESET: - LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n", - libcfs_nid2str(peer_nid), - &peer_ip, peer_port, - libcfs_nid2str(peer_nid)); - break; - case -EPROTO: - LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n", - libcfs_nid2str(peer_nid), - &peer_ip, peer_port); - break; - case -EADDRINUSE: - LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to connect to %s at host %pI4h on port %d\n", - libcfs_nid2str(peer_nid), - &peer_ip, peer_port); - break; - default: - LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s at host %pI4h on port %d\n", - rc, libcfs_nid2str(peer_nid), - &peer_ip, peer_port); - break; - } -} -EXPORT_SYMBOL(lnet_connect_console_error); - -int -lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, - __u32 local_ip, __u32 peer_ip, int peer_port) -{ - struct lnet_acceptor_connreq cr; - struct socket *sock; - int rc; - int port; - int fatal; - - BUILD_BUG_ON(sizeof(cr) > 16); /* too big to be on the stack */ - - for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT; - port >= LNET_ACCEPTOR_MIN_RESERVED_PORT; - --port) { - /* Iterate through reserved ports. */ - - rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip, - peer_port); - if (rc) { - if (fatal) - goto failed; - continue; - } - - BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1); - - cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; - cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; - cr.acr_nid = peer_nid; - - if (the_lnet.ln_testprotocompat) { - /* single-shot proto check */ - lnet_net_lock(LNET_LOCK_EX); - if (the_lnet.ln_testprotocompat & 4) { - cr.acr_version++; - the_lnet.ln_testprotocompat &= ~4; - } - if (the_lnet.ln_testprotocompat & 8) { - cr.acr_magic = LNET_PROTO_MAGIC; - the_lnet.ln_testprotocompat &= ~8; - } - lnet_net_unlock(LNET_LOCK_EX); - } - - rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc) - goto failed_sock; - - *sockp = sock; - return 0; - } - - rc = -EADDRINUSE; - goto failed; - - failed_sock: - sock_release(sock); - failed: - lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port); - return rc; -} -EXPORT_SYMBOL(lnet_connect); - -static int -lnet_accept(struct socket *sock, __u32 magic) -{ - struct lnet_acceptor_connreq cr; - __u32 peer_ip; - int peer_port; - int rc; - int flip; - struct lnet_ni *ni; - char *str; - - LASSERT(sizeof(cr) <= 16); /* not too big for the stack */ - - rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT(!rc); /* we succeeded before */ - - if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) { - if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) { - /* - * future version compatibility! - * When LNET unifies protocols over all LNDs, the first - * thing sent will be a version query. I send back - * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" - */ - memset(&cr, 0, sizeof(cr)); - cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; - cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; - rc = lnet_sock_write(sock, &cr, sizeof(cr), - accept_timeout); - - if (rc) - CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n", - &peer_ip, rc); - return -EPROTO; - } - - if (lnet_accept_magic(magic, LNET_PROTO_TCP_MAGIC)) - str = "'old' socknal/tcpnal"; - else - str = "unrecognised"; - - LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n", - &peer_ip, magic, str); - return -EPROTO; - } - - flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC); - - rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version), - accept_timeout); - if (rc) { - CERROR("Error %d reading connection request version from %pI4h\n", - rc, &peer_ip); - return -EIO; - } - - if (flip) - __swab32s(&cr.acr_version); - - if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) { - /* - * future version compatibility! - * An acceptor-specific protocol rev will first send a version - * query. I send back my current version to tell her I'm - * "old". - */ - int peer_version = cr.acr_version; - - memset(&cr, 0, sizeof(cr)); - cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; - cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; - - rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc) - CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n", - peer_version, &peer_ip, rc); - return -EPROTO; - } - - rc = lnet_sock_read(sock, &cr.acr_nid, - sizeof(cr) - - offsetof(struct lnet_acceptor_connreq, acr_nid), - accept_timeout); - if (rc) { - CERROR("Error %d reading connection request from %pI4h\n", - rc, &peer_ip); - return -EIO; - } - - if (flip) - __swab64s(&cr.acr_nid); - - ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid)); - if (!ni || /* no matching net */ - ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */ - if (ni) - lnet_ni_decref(ni); - LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n", - &peer_ip, libcfs_nid2str(cr.acr_nid)); - return -EPERM; - } - - if (!ni->ni_lnd->lnd_accept) { - /* This catches a request for the loopback LND */ - lnet_ni_decref(ni); - LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n", - &peer_ip, libcfs_nid2str(cr.acr_nid)); - return -EPERM; - } - - CDEBUG(D_NET, "Accept %s from %pI4h\n", - libcfs_nid2str(cr.acr_nid), &peer_ip); - - rc = ni->ni_lnd->lnd_accept(ni, sock); - - lnet_ni_decref(ni); - return rc; -} - -static int -lnet_acceptor(void *arg) -{ - struct socket *newsock; - int rc; - __u32 magic; - __u32 peer_ip; - int peer_port; - int secure = (int)((long)arg); - - LASSERT(!lnet_acceptor_state.pta_sock); - - rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port, - accept_backlog); - if (rc) { - if (rc == -EADDRINUSE) - LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n", - accept_port); - else - LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n", - accept_port, rc); - - lnet_acceptor_state.pta_sock = NULL; - } else { - LCONSOLE(0, "Accept %s, port %d\n", accept_type, accept_port); - } - - /* set init status and unblock parent */ - lnet_acceptor_state.pta_shutdown = rc; - complete(&lnet_acceptor_state.pta_signal); - - if (rc) - return rc; - - while (!lnet_acceptor_state.pta_shutdown) { - rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock); - if (rc) { - if (rc != -EAGAIN) { - CWARN("Accept error %d: pausing...\n", rc); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - } - continue; - } - - /* maybe the LNet acceptor thread has been waken */ - if (lnet_acceptor_state.pta_shutdown) { - sock_release(newsock); - break; - } - - rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port); - if (rc) { - CERROR("Can't determine new connection's address\n"); - goto failed; - } - - if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { - CERROR("Refusing connection from %pI4h: insecure port %d\n", - &peer_ip, peer_port); - goto failed; - } - - rc = lnet_sock_read(newsock, &magic, sizeof(magic), - accept_timeout); - if (rc) { - CERROR("Error %d reading connection request from %pI4h\n", - rc, &peer_ip); - goto failed; - } - - rc = lnet_accept(newsock, magic); - if (rc) - goto failed; - - continue; - -failed: - sock_release(newsock); - } - - sock_release(lnet_acceptor_state.pta_sock); - lnet_acceptor_state.pta_sock = NULL; - - CDEBUG(D_NET, "Acceptor stopping\n"); - - /* unblock lnet_acceptor_stop() */ - complete(&lnet_acceptor_state.pta_signal); - return 0; -} - -static inline int -accept2secure(const char *acc, long *sec) -{ - if (!strcmp(acc, "secure")) { - *sec = 1; - return 1; - } else if (!strcmp(acc, "all")) { - *sec = 0; - return 1; - } else if (!strcmp(acc, "none")) { - return 0; - } - - LCONSOLE_ERROR_MSG(0x124, "Can't parse 'accept=\"%s\"'\n", - acc); - return -EINVAL; -} - -int -lnet_acceptor_start(void) -{ - struct task_struct *task; - int rc; - long rc2; - long secure; - - /* if acceptor is already running return immediately */ - if (!lnet_acceptor_state.pta_shutdown) - return 0; - - LASSERT(!lnet_acceptor_state.pta_sock); - - rc = lnet_acceptor_get_tunables(); - if (rc) - return rc; - - init_completion(&lnet_acceptor_state.pta_signal); - rc = accept2secure(accept_type, &secure); - if (rc <= 0) - return rc; - - if (!lnet_count_acceptor_nis()) /* not required */ - return 0; - - task = kthread_run(lnet_acceptor, (void *)(uintptr_t)secure, - "acceptor_%03ld", secure); - if (IS_ERR(task)) { - rc2 = PTR_ERR(task); - CERROR("Can't start acceptor thread: %ld\n", rc2); - - return -ESRCH; - } - - /* wait for acceptor to startup */ - wait_for_completion(&lnet_acceptor_state.pta_signal); - - if (!lnet_acceptor_state.pta_shutdown) { - /* started OK */ - LASSERT(lnet_acceptor_state.pta_sock); - return 0; - } - - LASSERT(!lnet_acceptor_state.pta_sock); - - return -ENETDOWN; -} - -void -lnet_acceptor_stop(void) -{ - struct sock *sk; - - if (lnet_acceptor_state.pta_shutdown) /* not running */ - return; - - lnet_acceptor_state.pta_shutdown = 1; - - sk = lnet_acceptor_state.pta_sock->sk; - - /* awake any sleepers using safe method */ - sk->sk_state_change(sk); - - /* block until acceptor signals exit */ - wait_for_completion(&lnet_acceptor_state.pta_signal); -} diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c deleted file mode 100644 index f9ed6977056c..000000000000 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ /dev/null @@ -1,2307 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#include -#include - -#include -#include - -#define D_LNI D_CONSOLE - -struct lnet the_lnet; /* THE state of the network */ -EXPORT_SYMBOL(the_lnet); - -static char *ip2nets = ""; -module_param(ip2nets, charp, 0444); -MODULE_PARM_DESC(ip2nets, "LNET network <- IP table"); - -static char *networks = ""; -module_param(networks, charp, 0444); -MODULE_PARM_DESC(networks, "local networks"); - -static char *routes = ""; -module_param(routes, charp, 0444); -MODULE_PARM_DESC(routes, "routes to non-local networks"); - -static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT; -module_param(rnet_htable_size, int, 0444); -MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table"); - -static int lnet_ping(struct lnet_process_id id, int timeout_ms, - struct lnet_process_id __user *ids, int n_ids); - -static char * -lnet_get_routes(void) -{ - return routes; -} - -static char * -lnet_get_networks(void) -{ - char *nets; - int rc; - - if (*networks && *ip2nets) { - LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n"); - return NULL; - } - - if (*ip2nets) { - rc = lnet_parse_ip2nets(&nets, ip2nets); - return !rc ? nets : NULL; - } - - if (*networks) - return networks; - - return "tcp"; -} - -static void -lnet_init_locks(void) -{ - spin_lock_init(&the_lnet.ln_eq_wait_lock); - init_waitqueue_head(&the_lnet.ln_eq_waitq); - init_waitqueue_head(&the_lnet.ln_rc_waitq); - mutex_init(&the_lnet.ln_lnd_mutex); - mutex_init(&the_lnet.ln_api_mutex); -} - -static int -lnet_create_remote_nets_table(void) -{ - int i; - struct list_head *hash; - - LASSERT(!the_lnet.ln_remote_nets_hash); - LASSERT(the_lnet.ln_remote_nets_hbits > 0); - hash = kvmalloc_array(LNET_REMOTE_NETS_HASH_SIZE, sizeof(*hash), - GFP_KERNEL); - if (!hash) { - CERROR("Failed to create remote nets hash table\n"); - return -ENOMEM; - } - - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) - INIT_LIST_HEAD(&hash[i]); - the_lnet.ln_remote_nets_hash = hash; - return 0; -} - -static void -lnet_destroy_remote_nets_table(void) -{ - int i; - - if (!the_lnet.ln_remote_nets_hash) - return; - - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) - LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i])); - - kvfree(the_lnet.ln_remote_nets_hash); - the_lnet.ln_remote_nets_hash = NULL; -} - -static void -lnet_destroy_locks(void) -{ - if (the_lnet.ln_res_lock) { - cfs_percpt_lock_free(the_lnet.ln_res_lock); - the_lnet.ln_res_lock = NULL; - } - - if (the_lnet.ln_net_lock) { - cfs_percpt_lock_free(the_lnet.ln_net_lock); - the_lnet.ln_net_lock = NULL; - } -} - -static int -lnet_create_locks(void) -{ - lnet_init_locks(); - - the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table()); - if (!the_lnet.ln_res_lock) - goto failed; - - the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table()); - if (!the_lnet.ln_net_lock) - goto failed; - - return 0; - - failed: - lnet_destroy_locks(); - return -ENOMEM; -} - -static void lnet_assert_wire_constants(void) -{ - /* - * Wire protocol assertions generated by 'wirecheck' - * running on Linux robert.bartonsoftware.com 2.6.8-1.521 - * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux - * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) - */ - - /* Constants... */ - BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded); - BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1); - BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0); - BUILD_BUG_ON(LNET_MSG_ACK != 0); - BUILD_BUG_ON(LNET_MSG_PUT != 1); - BUILD_BUG_ON(LNET_MSG_GET != 2); - BUILD_BUG_ON(LNET_MSG_REPLY != 3); - BUILD_BUG_ON(LNET_MSG_HELLO != 4); - - /* Checks for struct ptl_handle_wire_t */ - BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) != 0); - BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire, wh_object_cookie) != 8); - BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8); - - /* Checks for struct struct lnet_magicversion */ - BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0); - BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4); - BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2); - BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_minor) != 6); - BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2); - - /* Checks for struct struct lnet_hdr */ - BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40); - - /* Ack */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4); - - /* Put */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4); - - /* Get */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4); - - /* Reply */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16); - - /* Hello */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4); -} - -static struct lnet_lnd * -lnet_find_lnd_by_type(__u32 type) -{ - struct lnet_lnd *lnd; - struct list_head *tmp; - - /* holding lnd mutex */ - list_for_each(tmp, &the_lnet.ln_lnds) { - lnd = list_entry(tmp, struct lnet_lnd, lnd_list); - - if (lnd->lnd_type == type) - return lnd; - } - - return NULL; -} - -void -lnet_register_lnd(struct lnet_lnd *lnd) -{ - mutex_lock(&the_lnet.ln_lnd_mutex); - - LASSERT(libcfs_isknown_lnd(lnd->lnd_type)); - LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type)); - - list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds); - lnd->lnd_refcount = 0; - - CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type)); - - mutex_unlock(&the_lnet.ln_lnd_mutex); -} -EXPORT_SYMBOL(lnet_register_lnd); - -void -lnet_unregister_lnd(struct lnet_lnd *lnd) -{ - mutex_lock(&the_lnet.ln_lnd_mutex); - - LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd); - LASSERT(!lnd->lnd_refcount); - - list_del(&lnd->lnd_list); - CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type)); - - mutex_unlock(&the_lnet.ln_lnd_mutex); -} -EXPORT_SYMBOL(lnet_unregister_lnd); - -void -lnet_counters_get(struct lnet_counters *counters) -{ - struct lnet_counters *ctr; - int i; - - memset(counters, 0, sizeof(*counters)); - - lnet_net_lock(LNET_LOCK_EX); - - cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) { - counters->msgs_max += ctr->msgs_max; - counters->msgs_alloc += ctr->msgs_alloc; - counters->errors += ctr->errors; - counters->send_count += ctr->send_count; - counters->recv_count += ctr->recv_count; - counters->route_count += ctr->route_count; - counters->drop_count += ctr->drop_count; - counters->send_length += ctr->send_length; - counters->recv_length += ctr->recv_length; - counters->route_length += ctr->route_length; - counters->drop_length += ctr->drop_length; - } - lnet_net_unlock(LNET_LOCK_EX); -} -EXPORT_SYMBOL(lnet_counters_get); - -void -lnet_counters_reset(void) -{ - struct lnet_counters *counters; - int i; - - lnet_net_lock(LNET_LOCK_EX); - - cfs_percpt_for_each(counters, i, the_lnet.ln_counters) - memset(counters, 0, sizeof(struct lnet_counters)); - - lnet_net_unlock(LNET_LOCK_EX); -} - -static char * -lnet_res_type2str(int type) -{ - switch (type) { - default: - LBUG(); - case LNET_COOKIE_TYPE_MD: - return "MD"; - case LNET_COOKIE_TYPE_ME: - return "ME"; - case LNET_COOKIE_TYPE_EQ: - return "EQ"; - } -} - -static void -lnet_res_container_cleanup(struct lnet_res_container *rec) -{ - int count = 0; - - if (!rec->rec_type) /* not set yet, it's uninitialized */ - return; - - while (!list_empty(&rec->rec_active)) { - struct list_head *e = rec->rec_active.next; - - list_del_init(e); - if (rec->rec_type == LNET_COOKIE_TYPE_EQ) { - kfree(list_entry(e, struct lnet_eq, eq_list)); - - } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) { - kfree(list_entry(e, struct lnet_libmd, md_list)); - - } else { /* NB: Active MEs should be attached on portals */ - LBUG(); - } - count++; - } - - if (count > 0) { - /* - * Found alive MD/ME/EQ, user really should unlink/free - * all of them before finalize LNet, but if someone didn't, - * we have to recycle garbage for him - */ - CERROR("%d active elements on exit of %s container\n", - count, lnet_res_type2str(rec->rec_type)); - } - - kfree(rec->rec_lh_hash); - rec->rec_lh_hash = NULL; - - rec->rec_type = 0; /* mark it as finalized */ -} - -static int -lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) -{ - int rc = 0; - int i; - - LASSERT(!rec->rec_type); - - rec->rec_type = type; - INIT_LIST_HEAD(&rec->rec_active); - rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type; - - /* Arbitrary choice of hash table size */ - rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]), - GFP_KERNEL, cpt); - if (!rec->rec_lh_hash) { - rc = -ENOMEM; - goto out; - } - - for (i = 0; i < LNET_LH_HASH_SIZE; i++) - INIT_LIST_HEAD(&rec->rec_lh_hash[i]); - - return 0; - -out: - CERROR("Failed to setup %s resource container\n", - lnet_res_type2str(type)); - lnet_res_container_cleanup(rec); - return rc; -} - -static void -lnet_res_containers_destroy(struct lnet_res_container **recs) -{ - struct lnet_res_container *rec; - int i; - - cfs_percpt_for_each(rec, i, recs) - lnet_res_container_cleanup(rec); - - cfs_percpt_free(recs); -} - -static struct lnet_res_container ** -lnet_res_containers_create(int type) -{ - struct lnet_res_container **recs; - struct lnet_res_container *rec; - int rc; - int i; - - recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec)); - if (!recs) { - CERROR("Failed to allocate %s resource containers\n", - lnet_res_type2str(type)); - return NULL; - } - - cfs_percpt_for_each(rec, i, recs) { - rc = lnet_res_container_setup(rec, i, type); - if (rc) { - lnet_res_containers_destroy(recs); - return NULL; - } - } - - return recs; -} - -struct lnet_libhandle * -lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie) -{ - /* ALWAYS called with lnet_res_lock held */ - struct list_head *head; - struct lnet_libhandle *lh; - unsigned int hash; - - if ((cookie & LNET_COOKIE_MASK) != rec->rec_type) - return NULL; - - hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS); - head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK]; - - list_for_each_entry(lh, head, lh_hash_chain) { - if (lh->lh_cookie == cookie) - return lh; - } - - return NULL; -} - -void -lnet_res_lh_initialize(struct lnet_res_container *rec, - struct lnet_libhandle *lh) -{ - /* ALWAYS called with lnet_res_lock held */ - unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS; - unsigned int hash; - - lh->lh_cookie = rec->rec_lh_cookie; - rec->rec_lh_cookie += 1 << ibits; - - hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK; - - list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]); -} - -static int lnet_unprepare(void); - -static int -lnet_prepare(lnet_pid_t requested_pid) -{ - /* Prepare to bring up the network */ - struct lnet_res_container **recs; - int rc = 0; - - if (requested_pid == LNET_PID_ANY) { - /* Don't instantiate LNET just for me */ - return -ENETDOWN; - } - - LASSERT(!the_lnet.ln_refcount); - - the_lnet.ln_routing = 0; - - LASSERT(!(requested_pid & LNET_PID_USERFLAG)); - the_lnet.ln_pid = requested_pid; - - INIT_LIST_HEAD(&the_lnet.ln_test_peers); - INIT_LIST_HEAD(&the_lnet.ln_nis); - INIT_LIST_HEAD(&the_lnet.ln_nis_cpt); - INIT_LIST_HEAD(&the_lnet.ln_nis_zombie); - INIT_LIST_HEAD(&the_lnet.ln_routers); - INIT_LIST_HEAD(&the_lnet.ln_drop_rules); - INIT_LIST_HEAD(&the_lnet.ln_delay_rules); - - rc = lnet_create_remote_nets_table(); - if (rc) - goto failed; - /* - * NB the interface cookie in wire handles guards against delayed - * replies and ACKs appearing valid after reboot. - */ - the_lnet.ln_interface_cookie = ktime_get_ns(); - - the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(struct lnet_counters)); - if (!the_lnet.ln_counters) { - CERROR("Failed to allocate counters for LNet\n"); - rc = -ENOMEM; - goto failed; - } - - rc = lnet_peer_tables_create(); - if (rc) - goto failed; - - rc = lnet_msg_containers_create(); - if (rc) - goto failed; - - rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0, - LNET_COOKIE_TYPE_EQ); - if (rc) - goto failed; - - recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME); - if (!recs) { - rc = -ENOMEM; - goto failed; - } - - the_lnet.ln_me_containers = recs; - - recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD); - if (!recs) { - rc = -ENOMEM; - goto failed; - } - - the_lnet.ln_md_containers = recs; - - rc = lnet_portals_create(); - if (rc) { - CERROR("Failed to create portals for LNet: %d\n", rc); - goto failed; - } - - return 0; - - failed: - lnet_unprepare(); - return rc; -} - -static int -lnet_unprepare(void) -{ - /* - * NB no LNET_LOCK since this is the last reference. All LND instances - * have shut down already, so it is safe to unlink and free all - * descriptors, even those that appear committed to a network op (eg MD - * with non-zero pending count) - */ - lnet_fail_nid(LNET_NID_ANY, 0); - - LASSERT(!the_lnet.ln_refcount); - LASSERT(list_empty(&the_lnet.ln_test_peers)); - LASSERT(list_empty(&the_lnet.ln_nis)); - LASSERT(list_empty(&the_lnet.ln_nis_cpt)); - LASSERT(list_empty(&the_lnet.ln_nis_zombie)); - - lnet_portals_destroy(); - - if (the_lnet.ln_md_containers) { - lnet_res_containers_destroy(the_lnet.ln_md_containers); - the_lnet.ln_md_containers = NULL; - } - - if (the_lnet.ln_me_containers) { - lnet_res_containers_destroy(the_lnet.ln_me_containers); - the_lnet.ln_me_containers = NULL; - } - - lnet_res_container_cleanup(&the_lnet.ln_eq_container); - - lnet_msg_containers_destroy(); - lnet_peer_tables_destroy(); - lnet_rtrpools_free(0); - - if (the_lnet.ln_counters) { - cfs_percpt_free(the_lnet.ln_counters); - the_lnet.ln_counters = NULL; - } - lnet_destroy_remote_nets_table(); - - return 0; -} - -struct lnet_ni * -lnet_net2ni_locked(__u32 net, int cpt) -{ - struct list_head *tmp; - struct lnet_ni *ni; - - LASSERT(cpt != LNET_LOCK_EX); - - list_for_each(tmp, &the_lnet.ln_nis) { - ni = list_entry(tmp, struct lnet_ni, ni_list); - - if (LNET_NIDNET(ni->ni_nid) == net) { - lnet_ni_addref_locked(ni, cpt); - return ni; - } - } - - return NULL; -} - -struct lnet_ni * -lnet_net2ni(__u32 net) -{ - struct lnet_ni *ni; - - lnet_net_lock(0); - ni = lnet_net2ni_locked(net, 0); - lnet_net_unlock(0); - - return ni; -} -EXPORT_SYMBOL(lnet_net2ni); - -static unsigned int -lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number) -{ - __u64 key = nid; - unsigned int val; - - LASSERT(number >= 1 && number <= LNET_CPT_NUMBER); - - if (number == 1) - return 0; - - val = hash_long(key, LNET_CPT_BITS); - /* NB: LNET_CP_NUMBER doesn't have to be PO2 */ - if (val < number) - return val; - - return (unsigned int)(key + val + (val >> 1)) % number; -} - -int -lnet_cpt_of_nid_locked(lnet_nid_t nid) -{ - struct lnet_ni *ni; - - /* must called with hold of lnet_net_lock */ - if (LNET_CPT_NUMBER == 1) - return 0; /* the only one */ - - /* take lnet_net_lock(any) would be OK */ - if (!list_empty(&the_lnet.ln_nis_cpt)) { - list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) { - if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) - continue; - - LASSERT(ni->ni_cpts); - return ni->ni_cpts[lnet_nid_cpt_hash - (nid, ni->ni_ncpts)]; - } - } - - return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); -} - -int -lnet_cpt_of_nid(lnet_nid_t nid) -{ - int cpt; - int cpt2; - - if (LNET_CPT_NUMBER == 1) - return 0; /* the only one */ - - if (list_empty(&the_lnet.ln_nis_cpt)) - return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); - - cpt = lnet_net_lock_current(); - cpt2 = lnet_cpt_of_nid_locked(nid); - lnet_net_unlock(cpt); - - return cpt2; -} -EXPORT_SYMBOL(lnet_cpt_of_nid); - -int -lnet_islocalnet(__u32 net) -{ - struct lnet_ni *ni; - int cpt; - - cpt = lnet_net_lock_current(); - - ni = lnet_net2ni_locked(net, cpt); - if (ni) - lnet_ni_decref_locked(ni, cpt); - - lnet_net_unlock(cpt); - - return !!ni; -} - -struct lnet_ni * -lnet_nid2ni_locked(lnet_nid_t nid, int cpt) -{ - struct lnet_ni *ni; - struct list_head *tmp; - - LASSERT(cpt != LNET_LOCK_EX); - - list_for_each(tmp, &the_lnet.ln_nis) { - ni = list_entry(tmp, struct lnet_ni, ni_list); - - if (ni->ni_nid == nid) { - lnet_ni_addref_locked(ni, cpt); - return ni; - } - } - - return NULL; -} - -int -lnet_islocalnid(lnet_nid_t nid) -{ - struct lnet_ni *ni; - int cpt; - - cpt = lnet_net_lock_current(); - ni = lnet_nid2ni_locked(nid, cpt); - if (ni) - lnet_ni_decref_locked(ni, cpt); - lnet_net_unlock(cpt); - - return !!ni; -} - -int -lnet_count_acceptor_nis(void) -{ - /* Return the # of NIs that need the acceptor. */ - int count = 0; - struct list_head *tmp; - struct lnet_ni *ni; - int cpt; - - cpt = lnet_net_lock_current(); - list_for_each(tmp, &the_lnet.ln_nis) { - ni = list_entry(tmp, struct lnet_ni, ni_list); - - if (ni->ni_lnd->lnd_accept) - count++; - } - - lnet_net_unlock(cpt); - - return count; -} - -static struct lnet_ping_info * -lnet_ping_info_create(int num_ni) -{ - struct lnet_ping_info *ping_info; - unsigned int infosz; - - infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]); - ping_info = kvzalloc(infosz, GFP_KERNEL); - if (!ping_info) { - CERROR("Can't allocate ping info[%d]\n", num_ni); - return NULL; - } - - ping_info->pi_nnis = num_ni; - ping_info->pi_pid = the_lnet.ln_pid; - ping_info->pi_magic = LNET_PROTO_PING_MAGIC; - ping_info->pi_features = LNET_PING_FEAT_NI_STATUS; - - return ping_info; -} - -static inline int -lnet_get_ni_count(void) -{ - struct lnet_ni *ni; - int count = 0; - - lnet_net_lock(0); - - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) - count++; - - lnet_net_unlock(0); - - return count; -} - -static inline void -lnet_ping_info_free(struct lnet_ping_info *pinfo) -{ - kvfree(pinfo); -} - -static void -lnet_ping_info_destroy(void) -{ - struct lnet_ni *ni; - - lnet_net_lock(LNET_LOCK_EX); - - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { - lnet_ni_lock(ni); - ni->ni_status = NULL; - lnet_ni_unlock(ni); - } - - lnet_ping_info_free(the_lnet.ln_ping_info); - the_lnet.ln_ping_info = NULL; - - lnet_net_unlock(LNET_LOCK_EX); -} - -static void -lnet_ping_event_handler(struct lnet_event *event) -{ - struct lnet_ping_info *pinfo = event->md.user_ptr; - - if (event->unlinked) - pinfo->pi_features = LNET_PING_FEAT_INVAL; -} - -static int -lnet_ping_info_setup(struct lnet_ping_info **ppinfo, - struct lnet_handle_md *md_handle, - int ni_count, bool set_eq) -{ - struct lnet_process_id id = {LNET_NID_ANY, LNET_PID_ANY}; - struct lnet_handle_me me_handle; - struct lnet_md md = { NULL }; - int rc, rc2; - - if (set_eq) { - rc = LNetEQAlloc(0, lnet_ping_event_handler, - &the_lnet.ln_ping_target_eq); - if (rc) { - CERROR("Can't allocate ping EQ: %d\n", rc); - return rc; - } - } - - *ppinfo = lnet_ping_info_create(ni_count); - if (!*ppinfo) { - rc = -ENOMEM; - goto failed_0; - } - - rc = LNetMEAttach(LNET_RESERVED_PORTAL, id, - LNET_PROTO_PING_MATCHBITS, 0, - LNET_UNLINK, LNET_INS_AFTER, - &me_handle); - if (rc) { - CERROR("Can't create ping ME: %d\n", rc); - goto failed_1; - } - - /* initialize md content */ - md.start = *ppinfo; - md.length = offsetof(struct lnet_ping_info, - pi_ni[(*ppinfo)->pi_nnis]); - md.threshold = LNET_MD_THRESH_INF; - md.max_size = 0; - md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | - LNET_MD_MANAGE_REMOTE; - md.user_ptr = NULL; - md.eq_handle = the_lnet.ln_ping_target_eq; - md.user_ptr = *ppinfo; - - rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle); - if (rc) { - CERROR("Can't attach ping MD: %d\n", rc); - goto failed_2; - } - - return 0; - -failed_2: - rc2 = LNetMEUnlink(me_handle); - LASSERT(!rc2); -failed_1: - lnet_ping_info_free(*ppinfo); - *ppinfo = NULL; -failed_0: - if (set_eq) - LNetEQFree(the_lnet.ln_ping_target_eq); - return rc; -} - -static void -lnet_ping_md_unlink(struct lnet_ping_info *pinfo, - struct lnet_handle_md *md_handle) -{ - LNetMDUnlink(*md_handle); - LNetInvalidateMDHandle(md_handle); - - /* NB md could be busy; this just starts the unlink */ - while (pinfo->pi_features != LNET_PING_FEAT_INVAL) { - CDEBUG(D_NET, "Still waiting for ping MD to unlink\n"); - set_current_state(TASK_NOLOAD); - schedule_timeout(HZ); - } -} - -static void -lnet_ping_info_install_locked(struct lnet_ping_info *ping_info) -{ - struct lnet_ni_status *ns; - struct lnet_ni *ni; - int i = 0; - - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { - LASSERT(i < ping_info->pi_nnis); - - ns = &ping_info->pi_ni[i]; - - ns->ns_nid = ni->ni_nid; - - lnet_ni_lock(ni); - ns->ns_status = (ni->ni_status) ? - ni->ni_status->ns_status : LNET_NI_STATUS_UP; - ni->ni_status = ns; - lnet_ni_unlock(ni); - - i++; - } -} - -static void -lnet_ping_target_update(struct lnet_ping_info *pinfo, - struct lnet_handle_md md_handle) -{ - struct lnet_ping_info *old_pinfo = NULL; - struct lnet_handle_md old_md; - - /* switch the NIs to point to the new ping info created */ - lnet_net_lock(LNET_LOCK_EX); - - if (!the_lnet.ln_routing) - pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED; - lnet_ping_info_install_locked(pinfo); - - if (the_lnet.ln_ping_info) { - old_pinfo = the_lnet.ln_ping_info; - old_md = the_lnet.ln_ping_target_md; - } - the_lnet.ln_ping_target_md = md_handle; - the_lnet.ln_ping_info = pinfo; - - lnet_net_unlock(LNET_LOCK_EX); - - if (old_pinfo) { - /* unlink the old ping info */ - lnet_ping_md_unlink(old_pinfo, &old_md); - lnet_ping_info_free(old_pinfo); - } -} - -static void -lnet_ping_target_fini(void) -{ - int rc; - - lnet_ping_md_unlink(the_lnet.ln_ping_info, - &the_lnet.ln_ping_target_md); - - rc = LNetEQFree(the_lnet.ln_ping_target_eq); - LASSERT(!rc); - - lnet_ping_info_destroy(); -} - -static int -lnet_ni_tq_credits(struct lnet_ni *ni) -{ - int credits; - - LASSERT(ni->ni_ncpts >= 1); - - if (ni->ni_ncpts == 1) - return ni->ni_maxtxcredits; - - credits = ni->ni_maxtxcredits / ni->ni_ncpts; - credits = max(credits, 8 * ni->ni_peertxcredits); - credits = min(credits, ni->ni_maxtxcredits); - - return credits; -} - -static void -lnet_ni_unlink_locked(struct lnet_ni *ni) -{ - if (!list_empty(&ni->ni_cptlist)) { - list_del_init(&ni->ni_cptlist); - lnet_ni_decref_locked(ni, 0); - } - - /* move it to zombie list and nobody can find it anymore */ - LASSERT(!list_empty(&ni->ni_list)); - list_move(&ni->ni_list, &the_lnet.ln_nis_zombie); - lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */ -} - -static void -lnet_clear_zombies_nis_locked(void) -{ - int i; - int islo; - struct lnet_ni *ni; - struct lnet_ni *temp; - - /* - * Now wait for the NI's I just nuked to show up on ln_zombie_nis - * and shut them down in guaranteed thread context - */ - i = 2; - list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) { - int *ref; - int j; - - list_del_init(&ni->ni_list); - cfs_percpt_for_each(ref, j, ni->ni_refs) { - if (!*ref) - continue; - /* still busy, add it back to zombie list */ - list_add(&ni->ni_list, &the_lnet.ln_nis_zombie); - break; - } - - if (!list_empty(&ni->ni_list)) { - lnet_net_unlock(LNET_LOCK_EX); - ++i; - if ((i & (-i)) == i) { - CDEBUG(D_WARNING, "Waiting for zombie LNI %s\n", - libcfs_nid2str(ni->ni_nid)); - } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - lnet_net_lock(LNET_LOCK_EX); - continue; - } - - ni->ni_lnd->lnd_refcount--; - lnet_net_unlock(LNET_LOCK_EX); - - islo = ni->ni_lnd->lnd_type == LOLND; - - LASSERT(!in_interrupt()); - ni->ni_lnd->lnd_shutdown(ni); - - /* - * can't deref lnd anymore now; it might have unregistered - * itself... - */ - if (!islo) - CDEBUG(D_LNI, "Removed LNI %s\n", - libcfs_nid2str(ni->ni_nid)); - - lnet_ni_free(ni); - i = 2; - - lnet_net_lock(LNET_LOCK_EX); - } -} - -static void -lnet_shutdown_lndnis(void) -{ - struct lnet_ni *ni; - struct lnet_ni *temp; - int i; - - /* NB called holding the global mutex */ - - /* All quiet on the API front */ - LASSERT(!the_lnet.ln_shutdown); - LASSERT(!the_lnet.ln_refcount); - LASSERT(list_empty(&the_lnet.ln_nis_zombie)); - - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_shutdown = 1; /* flag shutdown */ - - /* Unlink NIs from the global table */ - list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) { - lnet_ni_unlink_locked(ni); - } - - /* Drop the cached loopback NI. */ - if (the_lnet.ln_loni) { - lnet_ni_decref_locked(the_lnet.ln_loni, 0); - the_lnet.ln_loni = NULL; - } - - lnet_net_unlock(LNET_LOCK_EX); - - /* - * Clear lazy portals and drop delayed messages which hold refs - * on their lnet_msg::msg_rxpeer - */ - for (i = 0; i < the_lnet.ln_nportals; i++) - LNetClearLazyPortal(i); - - /* - * Clear the peer table and wait for all peers to go (they hold refs on - * their NIs) - */ - lnet_peer_tables_cleanup(NULL); - - lnet_net_lock(LNET_LOCK_EX); - - lnet_clear_zombies_nis_locked(); - the_lnet.ln_shutdown = 0; - lnet_net_unlock(LNET_LOCK_EX); -} - -/* shutdown down the NI and release refcount */ -static void -lnet_shutdown_lndni(struct lnet_ni *ni) -{ - int i; - - lnet_net_lock(LNET_LOCK_EX); - lnet_ni_unlink_locked(ni); - lnet_net_unlock(LNET_LOCK_EX); - - /* clear messages for this NI on the lazy portal */ - for (i = 0; i < the_lnet.ln_nportals; i++) - lnet_clear_lazy_portal(ni, i, "Shutting down NI"); - - /* Do peer table cleanup for this ni */ - lnet_peer_tables_cleanup(ni); - - lnet_net_lock(LNET_LOCK_EX); - lnet_clear_zombies_nis_locked(); - lnet_net_unlock(LNET_LOCK_EX); -} - -static int -lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf) -{ - struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL; - int rc = -EINVAL; - int lnd_type; - struct lnet_lnd *lnd; - struct lnet_tx_queue *tq; - int i; - u32 seed; - - lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); - - LASSERT(libcfs_isknown_lnd(lnd_type)); - - if (lnd_type == CIBLND || lnd_type == OPENIBLND || - lnd_type == IIBLND || lnd_type == VIBLND) { - CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type)); - goto failed0; - } - - /* Make sure this new NI is unique. */ - lnet_net_lock(LNET_LOCK_EX); - rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis); - lnet_net_unlock(LNET_LOCK_EX); - if (!rc) { - if (lnd_type == LOLND) { - lnet_ni_free(ni); - return 0; - } - - CERROR("Net %s is not unique\n", - libcfs_net2str(LNET_NIDNET(ni->ni_nid))); - rc = -EEXIST; - goto failed0; - } - - mutex_lock(&the_lnet.ln_lnd_mutex); - lnd = lnet_find_lnd_by_type(lnd_type); - - if (!lnd) { - mutex_unlock(&the_lnet.ln_lnd_mutex); - rc = request_module("%s", libcfs_lnd2modname(lnd_type)); - mutex_lock(&the_lnet.ln_lnd_mutex); - - lnd = lnet_find_lnd_by_type(lnd_type); - if (!lnd) { - mutex_unlock(&the_lnet.ln_lnd_mutex); - CERROR("Can't load LND %s, module %s, rc=%d\n", - libcfs_lnd2str(lnd_type), - libcfs_lnd2modname(lnd_type), rc); - rc = -EINVAL; - goto failed0; - } - } - - lnet_net_lock(LNET_LOCK_EX); - lnd->lnd_refcount++; - lnet_net_unlock(LNET_LOCK_EX); - - ni->ni_lnd = lnd; - - if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf)) - lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk; - - if (lnd_tunables) { - ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables), - GFP_NOFS); - if (!ni->ni_lnd_tunables) { - mutex_unlock(&the_lnet.ln_lnd_mutex); - rc = -ENOMEM; - goto failed0; - } - memcpy(ni->ni_lnd_tunables, lnd_tunables, - sizeof(*ni->ni_lnd_tunables)); - } - - /* - * If given some LND tunable parameters, parse those now to - * override the values in the NI structure. - */ - if (conf) { - if (conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) - ni->ni_peerrtrcredits = - conf->cfg_config_u.cfg_net.net_peer_rtr_credits; - if (conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) - ni->ni_peertimeout = - conf->cfg_config_u.cfg_net.net_peer_timeout; - if (conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1) - ni->ni_peertxcredits = - conf->cfg_config_u.cfg_net.net_peer_tx_credits; - if (conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) - ni->ni_maxtxcredits = - conf->cfg_config_u.cfg_net.net_max_tx_credits; - } - - rc = lnd->lnd_startup(ni); - - mutex_unlock(&the_lnet.ln_lnd_mutex); - - if (rc) { - LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n", - rc, libcfs_lnd2str(lnd->lnd_type)); - lnet_net_lock(LNET_LOCK_EX); - lnd->lnd_refcount--; - lnet_net_unlock(LNET_LOCK_EX); - goto failed0; - } - - LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query); - - lnet_net_lock(LNET_LOCK_EX); - /* refcount for ln_nis */ - lnet_ni_addref_locked(ni, 0); - list_add_tail(&ni->ni_list, &the_lnet.ln_nis); - if (ni->ni_cpts) { - lnet_ni_addref_locked(ni, 0); - list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt); - } - - lnet_net_unlock(LNET_LOCK_EX); - - if (lnd->lnd_type == LOLND) { - lnet_ni_addref(ni); - LASSERT(!the_lnet.ln_loni); - the_lnet.ln_loni = ni; - return 0; - } - - if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) { - LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n", - libcfs_lnd2str(lnd->lnd_type), - !ni->ni_peertxcredits ? - "" : "per-peer "); - /* - * shutdown the NI since if we get here then it must've already - * been started - */ - lnet_shutdown_lndni(ni); - return -EINVAL; - } - - cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { - tq->tq_credits_min = - tq->tq_credits_max = - tq->tq_credits = lnet_ni_tq_credits(ni); - } - - /* Nodes with small feet have little entropy. The NID for this - * node gives the most entropy in the low bits. - */ - seed = LNET_NIDADDR(ni->ni_nid); - add_device_randomness(&seed, sizeof(seed)); - - CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n", - libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits, - lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER, - ni->ni_peerrtrcredits, ni->ni_peertimeout); - - return 0; -failed0: - lnet_ni_free(ni); - return rc; -} - -static int -lnet_startup_lndnis(struct list_head *nilist) -{ - struct lnet_ni *ni; - int rc; - int ni_count = 0; - - while (!list_empty(nilist)) { - ni = list_entry(nilist->next, struct lnet_ni, ni_list); - list_del(&ni->ni_list); - rc = lnet_startup_lndni(ni, NULL); - - if (rc < 0) - goto failed; - - ni_count++; - } - - return ni_count; -failed: - lnet_shutdown_lndnis(); - - return rc; -} - -/** - * Initialize LNet library. - * - * Automatically called at module loading time. Caller has to call - * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the - * latter returned 0. It must be called exactly once. - * - * \retval 0 on success - * \retval -ve on failures. - */ -int lnet_lib_init(void) -{ - int rc; - - lnet_assert_wire_constants(); - - memset(&the_lnet, 0, sizeof(the_lnet)); - - /* refer to global cfs_cpt_tab for now */ - the_lnet.ln_cpt_table = cfs_cpt_tab; - the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab); - - LASSERT(the_lnet.ln_cpt_number > 0); - if (the_lnet.ln_cpt_number > LNET_CPT_MAX) { - /* we are under risk of consuming all lh_cookie */ - CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n", - the_lnet.ln_cpt_number, LNET_CPT_MAX); - return -E2BIG; - } - - while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number) - the_lnet.ln_cpt_bits++; - - rc = lnet_create_locks(); - if (rc) { - CERROR("Can't create LNet global locks: %d\n", rc); - return rc; - } - - the_lnet.ln_refcount = 0; - LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh); - INIT_LIST_HEAD(&the_lnet.ln_lnds); - INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie); - INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow); - - /* - * The hash table size is the number of bits it takes to express the set - * ln_num_routes, minus 1 (better to under estimate than over so we - * don't waste memory). - */ - if (rnet_htable_size <= 0) - rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT; - else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX) - rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX; - the_lnet.ln_remote_nets_hbits = max_t(int, 1, - order_base_2(rnet_htable_size) - 1); - - /* - * All LNDs apart from the LOLND are in separate modules. They - * register themselves when their module loads, and unregister - * themselves when their module is unloaded. - */ - lnet_register_lnd(&the_lolnd); - return 0; -} - -/** - * Finalize LNet library. - * - * \pre lnet_lib_init() called with success. - * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls. - */ -void lnet_lib_exit(void) -{ - LASSERT(!the_lnet.ln_refcount); - - while (!list_empty(&the_lnet.ln_lnds)) - lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next, - struct lnet_lnd, lnd_list)); - lnet_destroy_locks(); -} - -/** - * Set LNet PID and start LNet interfaces, routing, and forwarding. - * - * Users must call this function at least once before any other functions. - * For each successful call there must be a corresponding call to - * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is - * ignored. - * - * The PID used by LNet may be different from the one requested. - * See LNetGetId(). - * - * \param requested_pid PID requested by the caller. - * - * \return >= 0 on success, and < 0 error code on failures. - */ -int -LNetNIInit(lnet_pid_t requested_pid) -{ - int im_a_router = 0; - int rc; - int ni_count; - struct lnet_ping_info *pinfo; - struct lnet_handle_md md_handle; - struct list_head net_head; - - INIT_LIST_HEAD(&net_head); - - mutex_lock(&the_lnet.ln_api_mutex); - - CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount); - - if (the_lnet.ln_refcount > 0) { - rc = the_lnet.ln_refcount++; - mutex_unlock(&the_lnet.ln_api_mutex); - return rc; - } - - rc = lnet_prepare(requested_pid); - if (rc) { - mutex_unlock(&the_lnet.ln_api_mutex); - return rc; - } - - /* Add in the loopback network */ - if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) { - rc = -ENOMEM; - goto err_empty_list; - } - - /* - * If LNet is being initialized via DLC it is possible - * that the user requests not to load module parameters (ones which - * are supported by DLC) on initialization. Therefore, make sure not - * to load networks, routes and forwarding from module parameters - * in this case. On cleanup in case of failure only clean up - * routes if it has been loaded - */ - if (!the_lnet.ln_nis_from_mod_params) { - rc = lnet_parse_networks(&net_head, lnet_get_networks()); - if (rc < 0) - goto err_empty_list; - } - - ni_count = lnet_startup_lndnis(&net_head); - if (ni_count < 0) { - rc = ni_count; - goto err_empty_list; - } - - if (!the_lnet.ln_nis_from_mod_params) { - rc = lnet_parse_routes(lnet_get_routes(), &im_a_router); - if (rc) - goto err_shutdown_lndnis; - - rc = lnet_check_routes(); - if (rc) - goto err_destroy_routes; - - rc = lnet_rtrpools_alloc(im_a_router); - if (rc) - goto err_destroy_routes; - } - - rc = lnet_acceptor_start(); - if (rc) - goto err_destroy_routes; - - the_lnet.ln_refcount = 1; - /* Now I may use my own API functions... */ - - rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true); - if (rc) - goto err_acceptor_stop; - - lnet_ping_target_update(pinfo, md_handle); - - rc = lnet_router_checker_start(); - if (rc) - goto err_stop_ping; - - lnet_fault_init(); - lnet_router_debugfs_init(); - - mutex_unlock(&the_lnet.ln_api_mutex); - - return 0; - -err_stop_ping: - lnet_ping_target_fini(); -err_acceptor_stop: - the_lnet.ln_refcount = 0; - lnet_acceptor_stop(); -err_destroy_routes: - if (!the_lnet.ln_nis_from_mod_params) - lnet_destroy_routes(); -err_shutdown_lndnis: - lnet_shutdown_lndnis(); -err_empty_list: - lnet_unprepare(); - LASSERT(rc < 0); - mutex_unlock(&the_lnet.ln_api_mutex); - while (!list_empty(&net_head)) { - struct lnet_ni *ni; - - ni = list_entry(net_head.next, struct lnet_ni, ni_list); - list_del_init(&ni->ni_list); - lnet_ni_free(ni); - } - return rc; -} -EXPORT_SYMBOL(LNetNIInit); - -/** - * Stop LNet interfaces, routing, and forwarding. - * - * Users must call this function once for each successful call to LNetNIInit(). - * Once the LNetNIFini() operation has been started, the results of pending - * API operations are undefined. - * - * \return always 0 for current implementation. - */ -int -LNetNIFini(void) -{ - mutex_lock(&the_lnet.ln_api_mutex); - - LASSERT(the_lnet.ln_refcount > 0); - - if (the_lnet.ln_refcount != 1) { - the_lnet.ln_refcount--; - } else { - LASSERT(!the_lnet.ln_niinit_self); - - lnet_fault_fini(); - lnet_router_debugfs_fini(); - lnet_router_checker_stop(); - lnet_ping_target_fini(); - - /* Teardown fns that use my own API functions BEFORE here */ - the_lnet.ln_refcount = 0; - - lnet_acceptor_stop(); - lnet_destroy_routes(); - lnet_shutdown_lndnis(); - lnet_unprepare(); - } - - mutex_unlock(&the_lnet.ln_api_mutex); - return 0; -} -EXPORT_SYMBOL(LNetNIFini); - -/** - * Grabs the ni data from the ni structure and fills the out - * parameters - * - * \param[in] ni network interface structure - * \param[out] config NI configuration - */ -static void -lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config) -{ - struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL; - struct lnet_ioctl_net_config *net_config; - size_t min_size, tunable_size = 0; - int i; - - if (!ni || !config) - return; - - net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk; - if (!net_config) - return; - - BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) != - ARRAY_SIZE(net_config->ni_interfaces)); - - for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) { - if (!ni->ni_interfaces[i]) - break; - - strncpy(net_config->ni_interfaces[i], - ni->ni_interfaces[i], - sizeof(net_config->ni_interfaces[i])); - } - - config->cfg_nid = ni->ni_nid; - config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout; - config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits; - config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits; - config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits; - - net_config->ni_status = ni->ni_status->ns_status; - - if (ni->ni_cpts) { - int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT); - - for (i = 0; i < num_cpts; i++) - net_config->ni_cpts[i] = ni->ni_cpts[i]; - - config->cfg_ncpts = num_cpts; - } - - /* - * See if user land tools sent in a newer and larger version - * of struct lnet_tunables than what the kernel uses. - */ - min_size = sizeof(*config) + sizeof(*net_config); - - if (config->cfg_hdr.ioc_len > min_size) - tunable_size = config->cfg_hdr.ioc_len - min_size; - - /* Don't copy to much data to user space */ - min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables)); - lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk; - - if (ni->ni_lnd_tunables && lnd_cfg && min_size) { - memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size); - config->cfg_config_u.cfg_net.net_interface_count = 1; - - /* Tell user land that kernel side has less data */ - if (tunable_size > sizeof(*ni->ni_lnd_tunables)) { - min_size = tunable_size - sizeof(ni->ni_lnd_tunables); - config->cfg_hdr.ioc_len -= min_size; - } - } -} - -static int -lnet_get_net_config(struct lnet_ioctl_config_data *config) -{ - struct lnet_ni *ni; - struct list_head *tmp; - int idx = config->cfg_count; - int cpt, i = 0; - int rc = -ENOENT; - - cpt = lnet_net_lock_current(); - - list_for_each(tmp, &the_lnet.ln_nis) { - if (i++ != idx) - continue; - - ni = list_entry(tmp, struct lnet_ni, ni_list); - lnet_ni_lock(ni); - lnet_fill_ni_info(ni, config); - lnet_ni_unlock(ni); - rc = 0; - break; - } - - lnet_net_unlock(cpt); - return rc; -} - -int -lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf) -{ - char *nets = conf->cfg_config_u.cfg_net.net_intf; - struct lnet_ping_info *pinfo; - struct lnet_handle_md md_handle; - struct lnet_ni *ni; - struct list_head net_head; - struct lnet_remotenet *rnet; - int rc; - - INIT_LIST_HEAD(&net_head); - - /* Create a ni structure for the network string */ - rc = lnet_parse_networks(&net_head, nets); - if (rc <= 0) - return !rc ? -EINVAL : rc; - - mutex_lock(&the_lnet.ln_api_mutex); - - if (rc > 1) { - rc = -EINVAL; /* only add one interface per call */ - goto failed0; - } - - ni = list_entry(net_head.next, struct lnet_ni, ni_list); - - lnet_net_lock(LNET_LOCK_EX); - rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid)); - lnet_net_unlock(LNET_LOCK_EX); - /* - * make sure that the net added doesn't invalidate the current - * configuration LNet is keeping - */ - if (rnet) { - CERROR("Adding net %s will invalidate routing configuration\n", - nets); - rc = -EUSERS; - goto failed0; - } - - rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(), - false); - if (rc) - goto failed0; - - list_del_init(&ni->ni_list); - - rc = lnet_startup_lndni(ni, conf); - if (rc) - goto failed1; - - if (ni->ni_lnd->lnd_accept) { - rc = lnet_acceptor_start(); - if (rc < 0) { - /* shutdown the ni that we just started */ - CERROR("Failed to start up acceptor thread\n"); - lnet_shutdown_lndni(ni); - goto failed1; - } - } - - lnet_ping_target_update(pinfo, md_handle); - mutex_unlock(&the_lnet.ln_api_mutex); - - return 0; - -failed1: - lnet_ping_md_unlink(pinfo, &md_handle); - lnet_ping_info_free(pinfo); -failed0: - mutex_unlock(&the_lnet.ln_api_mutex); - while (!list_empty(&net_head)) { - ni = list_entry(net_head.next, struct lnet_ni, ni_list); - list_del_init(&ni->ni_list); - lnet_ni_free(ni); - } - return rc; -} - -int -lnet_dyn_del_ni(__u32 net) -{ - struct lnet_ni *ni; - struct lnet_ping_info *pinfo; - struct lnet_handle_md md_handle; - int rc; - - /* don't allow userspace to shutdown the LOLND */ - if (LNET_NETTYP(net) == LOLND) - return -EINVAL; - - mutex_lock(&the_lnet.ln_api_mutex); - /* create and link a new ping info, before removing the old one */ - rc = lnet_ping_info_setup(&pinfo, &md_handle, - lnet_get_ni_count() - 1, false); - if (rc) - goto out; - - ni = lnet_net2ni(net); - if (!ni) { - rc = -EINVAL; - goto failed; - } - - /* decrement the reference counter taken by lnet_net2ni() */ - lnet_ni_decref_locked(ni, 0); - - lnet_shutdown_lndni(ni); - - if (!lnet_count_acceptor_nis()) - lnet_acceptor_stop(); - - lnet_ping_target_update(pinfo, md_handle); - goto out; -failed: - lnet_ping_md_unlink(pinfo, &md_handle); - lnet_ping_info_free(pinfo); -out: - mutex_unlock(&the_lnet.ln_api_mutex); - - return rc; -} - -/** - * LNet ioctl handler. - * - */ -int -LNetCtl(unsigned int cmd, void *arg) -{ - struct libcfs_ioctl_data *data = arg; - struct lnet_ioctl_config_data *config; - struct lnet_process_id id = {0}; - struct lnet_ni *ni; - int rc; - unsigned long secs_passed; - - BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX < - sizeof(struct lnet_ioctl_net_config) + - sizeof(struct lnet_ioctl_config_data)); - - switch (cmd) { - case IOC_LIBCFS_GET_NI: - rc = LNetGetId(data->ioc_count, &id); - data->ioc_nid = id.nid; - return rc; - - case IOC_LIBCFS_FAIL_NID: - return lnet_fail_nid(data->ioc_nid, data->ioc_count); - - case IOC_LIBCFS_ADD_ROUTE: - config = arg; - - if (config->cfg_hdr.ioc_len < sizeof(*config)) - return -EINVAL; - - mutex_lock(&the_lnet.ln_api_mutex); - rc = lnet_add_route(config->cfg_net, - config->cfg_config_u.cfg_route.rtr_hop, - config->cfg_nid, - config->cfg_config_u.cfg_route.rtr_priority); - if (!rc) { - rc = lnet_check_routes(); - if (rc) - lnet_del_route(config->cfg_net, - config->cfg_nid); - } - mutex_unlock(&the_lnet.ln_api_mutex); - return rc; - - case IOC_LIBCFS_DEL_ROUTE: - config = arg; - - if (config->cfg_hdr.ioc_len < sizeof(*config)) - return -EINVAL; - - mutex_lock(&the_lnet.ln_api_mutex); - rc = lnet_del_route(config->cfg_net, config->cfg_nid); - mutex_unlock(&the_lnet.ln_api_mutex); - return rc; - - case IOC_LIBCFS_GET_ROUTE: - config = arg; - - if (config->cfg_hdr.ioc_len < sizeof(*config)) - return -EINVAL; - - return lnet_get_route(config->cfg_count, - &config->cfg_net, - &config->cfg_config_u.cfg_route.rtr_hop, - &config->cfg_nid, - &config->cfg_config_u.cfg_route.rtr_flags, - &config->cfg_config_u.cfg_route.rtr_priority); - - case IOC_LIBCFS_GET_NET: { - size_t total = sizeof(*config) + - sizeof(struct lnet_ioctl_net_config); - config = arg; - - if (config->cfg_hdr.ioc_len < total) - return -EINVAL; - - return lnet_get_net_config(config); - } - - case IOC_LIBCFS_GET_LNET_STATS: { - struct lnet_ioctl_lnet_stats *lnet_stats = arg; - - if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats)) - return -EINVAL; - - lnet_counters_get(&lnet_stats->st_cntrs); - return 0; - } - - case IOC_LIBCFS_CONFIG_RTR: - config = arg; - - if (config->cfg_hdr.ioc_len < sizeof(*config)) - return -EINVAL; - - mutex_lock(&the_lnet.ln_api_mutex); - if (config->cfg_config_u.cfg_buffers.buf_enable) { - rc = lnet_rtrpools_enable(); - mutex_unlock(&the_lnet.ln_api_mutex); - return rc; - } - lnet_rtrpools_disable(); - mutex_unlock(&the_lnet.ln_api_mutex); - return 0; - - case IOC_LIBCFS_ADD_BUF: - config = arg; - - if (config->cfg_hdr.ioc_len < sizeof(*config)) - return -EINVAL; - - mutex_lock(&the_lnet.ln_api_mutex); - rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny, - config->cfg_config_u.cfg_buffers.buf_small, - config->cfg_config_u.cfg_buffers.buf_large); - mutex_unlock(&the_lnet.ln_api_mutex); - return rc; - - case IOC_LIBCFS_GET_BUF: { - struct lnet_ioctl_pool_cfg *pool_cfg; - size_t total = sizeof(*config) + sizeof(*pool_cfg); - - config = arg; - - if (config->cfg_hdr.ioc_len < total) - return -EINVAL; - - pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk; - return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg); - } - - case IOC_LIBCFS_GET_PEER_INFO: { - struct lnet_ioctl_peer *peer_info = arg; - - if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info)) - return -EINVAL; - - return lnet_get_peer_info(peer_info->pr_count, - &peer_info->pr_nid, - peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness, - &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt, - &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount, - &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits, - &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits, - &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits, - &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits, - &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob); - } - - case IOC_LIBCFS_NOTIFY_ROUTER: - secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]); - secs_passed *= msecs_to_jiffies(MSEC_PER_SEC); - - return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, - jiffies - secs_passed); - - case IOC_LIBCFS_LNET_DIST: - rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]); - if (rc < 0 && rc != -EHOSTUNREACH) - return rc; - - data->ioc_u32[0] = rc; - return 0; - - case IOC_LIBCFS_TESTPROTOCOMPAT: - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_testprotocompat = data->ioc_flags; - lnet_net_unlock(LNET_LOCK_EX); - return 0; - - case IOC_LIBCFS_LNET_FAULT: - return lnet_fault_ctl(data->ioc_flags, data); - - case IOC_LIBCFS_PING: - id.nid = data->ioc_nid; - id.pid = data->ioc_u32[0]; - rc = lnet_ping(id, data->ioc_u32[1], /* timeout */ - data->ioc_pbuf1, - data->ioc_plen1 / sizeof(struct lnet_process_id)); - if (rc < 0) - return rc; - data->ioc_count = rc; - return 0; - - default: - ni = lnet_net2ni(data->ioc_net); - if (!ni) - return -EINVAL; - - if (!ni->ni_lnd->lnd_ctl) - rc = -EINVAL; - else - rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg); - - lnet_ni_decref(ni); - return rc; - } - /* not reached */ -} -EXPORT_SYMBOL(LNetCtl); - -void LNetDebugPeer(struct lnet_process_id id) -{ - lnet_debug_peer(id.nid); -} -EXPORT_SYMBOL(LNetDebugPeer); - -/** - * Retrieve the lnet_process_id ID of LNet interface at \a index. Note that - * all interfaces share a same PID, as requested by LNetNIInit(). - * - * \param index Index of the interface to look up. - * \param id On successful return, this location will hold the - * lnet_process_id ID of the interface. - * - * \retval 0 If an interface exists at \a index. - * \retval -ENOENT If no interface has been found. - */ -int -LNetGetId(unsigned int index, struct lnet_process_id *id) -{ - struct lnet_ni *ni; - struct list_head *tmp; - int cpt; - int rc = -ENOENT; - - LASSERT(the_lnet.ln_refcount > 0); - - cpt = lnet_net_lock_current(); - - list_for_each(tmp, &the_lnet.ln_nis) { - if (index--) - continue; - - ni = list_entry(tmp, struct lnet_ni, ni_list); - - id->nid = ni->ni_nid; - id->pid = the_lnet.ln_pid; - rc = 0; - break; - } - - lnet_net_unlock(cpt); - return rc; -} -EXPORT_SYMBOL(LNetGetId); - -static int lnet_ping(struct lnet_process_id id, int timeout_ms, - struct lnet_process_id __user *ids, int n_ids) -{ - struct lnet_handle_eq eqh; - struct lnet_handle_md mdh; - struct lnet_event event; - struct lnet_md md = { NULL }; - int which; - int unlinked = 0; - int replied = 0; - const int a_long_time = 60000; /* mS */ - int infosz; - struct lnet_ping_info *info; - struct lnet_process_id tmpid; - int i; - int nob; - int rc; - int rc2; - - infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]); - - if (n_ids <= 0 || - id.nid == LNET_NID_ANY || - timeout_ms > 500000 || /* arbitrary limit! */ - n_ids > 20) /* arbitrary limit! */ - return -EINVAL; - - if (id.pid == LNET_PID_ANY) - id.pid = LNET_PID_LUSTRE; - - info = kzalloc(infosz, GFP_KERNEL); - if (!info) - return -ENOMEM; - - /* NB 2 events max (including any unlink event) */ - rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh); - if (rc) { - CERROR("Can't allocate EQ: %d\n", rc); - goto out_0; - } - - /* initialize md content */ - md.start = info; - md.length = infosz; - md.threshold = 2; /*GET/REPLY*/ - md.max_size = 0; - md.options = LNET_MD_TRUNCATE; - md.user_ptr = NULL; - md.eq_handle = eqh; - - rc = LNetMDBind(md, LNET_UNLINK, &mdh); - if (rc) { - CERROR("Can't bind MD: %d\n", rc); - goto out_1; - } - - rc = LNetGet(LNET_NID_ANY, mdh, id, - LNET_RESERVED_PORTAL, - LNET_PROTO_PING_MATCHBITS, 0); - - if (rc) { - /* Don't CERROR; this could be deliberate! */ - - rc2 = LNetMDUnlink(mdh); - LASSERT(!rc2); - - /* NB must wait for the UNLINK event below... */ - unlinked = 1; - timeout_ms = a_long_time; - } - - do { - /* MUST block for unlink to complete */ - - rc2 = LNetEQPoll(&eqh, 1, timeout_ms, !unlinked, - &event, &which); - - CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2, - (rc2 <= 0) ? -1 : event.type, - (rc2 <= 0) ? -1 : event.status, - (rc2 > 0 && event.unlinked) ? " unlinked" : ""); - - LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */ - - if (rc2 <= 0 || event.status) { - /* timeout or error */ - if (!replied && !rc) - rc = (rc2 < 0) ? rc2 : - !rc2 ? -ETIMEDOUT : - event.status; - - if (!unlinked) { - /* Ensure completion in finite time... */ - LNetMDUnlink(mdh); - /* No assertion (racing with network) */ - unlinked = 1; - timeout_ms = a_long_time; - } else if (!rc2) { - /* timed out waiting for unlink */ - CWARN("ping %s: late network completion\n", - libcfs_id2str(id)); - } - } else if (event.type == LNET_EVENT_REPLY) { - replied = 1; - rc = event.mlength; - } - - } while (rc2 <= 0 || !event.unlinked); - - if (!replied) { - if (rc >= 0) - CWARN("%s: Unexpected rc >= 0 but no reply!\n", - libcfs_id2str(id)); - rc = -EIO; - goto out_1; - } - - nob = rc; - LASSERT(nob >= 0 && nob <= infosz); - - rc = -EPROTO; /* if I can't parse... */ - - if (nob < 8) { - /* can't check magic/version */ - CERROR("%s: ping info too short %d\n", - libcfs_id2str(id), nob); - goto out_1; - } - - if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) { - lnet_swap_pinginfo(info); - } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) { - CERROR("%s: Unexpected magic %08x\n", - libcfs_id2str(id), info->pi_magic); - goto out_1; - } - - if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) { - CERROR("%s: ping w/o NI status: 0x%x\n", - libcfs_id2str(id), info->pi_features); - goto out_1; - } - - if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) { - CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id), - nob, (int)offsetof(struct lnet_ping_info, pi_ni[0])); - goto out_1; - } - - if (info->pi_nnis < n_ids) - n_ids = info->pi_nnis; - - if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) { - CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id), - nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids])); - goto out_1; - } - - rc = -EFAULT; /* If I SEGV... */ - - memset(&tmpid, 0, sizeof(tmpid)); - for (i = 0; i < n_ids; i++) { - tmpid.pid = info->pi_pid; - tmpid.nid = info->pi_ni[i].ns_nid; - if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid))) - goto out_1; - } - rc = info->pi_nnis; - - out_1: - rc2 = LNetEQFree(eqh); - if (rc2) - CERROR("rc2 %d\n", rc2); - LASSERT(!rc2); - - out_0: - kfree(info); - return rc; -} diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c deleted file mode 100644 index 55ecc1998b7e..000000000000 --- a/drivers/staging/lustre/lnet/lnet/config.c +++ /dev/null @@ -1,1235 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#include -#include -#include -#include - -struct lnet_text_buf { /* tmp struct for parsing routes */ - struct list_head ltb_list; /* stash on lists */ - int ltb_size; /* allocated size */ - char ltb_text[0]; /* text buffer */ -}; - -static int lnet_tbnob; /* track text buf allocation */ -#define LNET_MAX_TEXTBUF_NOB (64 << 10) /* bound allocation */ -#define LNET_SINGLE_TEXTBUF_NOB (4 << 10) - -static void -lnet_syntax(char *name, char *str, int offset, int width) -{ - static char dots[LNET_SINGLE_TEXTBUF_NOB]; - static char dashes[LNET_SINGLE_TEXTBUF_NOB]; - - memset(dots, '.', sizeof(dots)); - dots[sizeof(dots) - 1] = 0; - memset(dashes, '-', sizeof(dashes)); - dashes[sizeof(dashes) - 1] = 0; - - LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str); - LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n", - (int)strlen(name), dots, offset, dots, - (width < 1) ? 0 : width - 1, dashes); -} - -static int -lnet_issep(char c) -{ - switch (c) { - case '\n': - case '\r': - case ';': - return 1; - default: - return 0; - } -} - -int -lnet_net_unique(__u32 net, struct list_head *nilist) -{ - struct list_head *tmp; - struct lnet_ni *ni; - - list_for_each(tmp, nilist) { - ni = list_entry(tmp, struct lnet_ni, ni_list); - - if (LNET_NIDNET(ni->ni_nid) == net) - return 0; - } - - return 1; -} - -void -lnet_ni_free(struct lnet_ni *ni) -{ - int i; - - if (ni->ni_refs) - cfs_percpt_free(ni->ni_refs); - - if (ni->ni_tx_queues) - cfs_percpt_free(ni->ni_tx_queues); - - if (ni->ni_cpts) - cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts); - - kfree(ni->ni_lnd_tunables); - - for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) - kfree(ni->ni_interfaces[i]); - - /* release reference to net namespace */ - if (ni->ni_net_ns) - put_net(ni->ni_net_ns); - - kfree(ni); -} - -struct lnet_ni * -lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) -{ - struct lnet_tx_queue *tq; - struct lnet_ni *ni; - int rc; - int i; - - if (!lnet_net_unique(net, nilist)) { - LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n", - libcfs_net2str(net)); - return NULL; - } - - ni = kzalloc(sizeof(*ni), GFP_NOFS); - if (!ni) { - CERROR("Out of memory creating network %s\n", - libcfs_net2str(net)); - return NULL; - } - - spin_lock_init(&ni->ni_lock); - INIT_LIST_HEAD(&ni->ni_cptlist); - ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*ni->ni_refs[0])); - if (!ni->ni_refs) - goto failed; - - ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*ni->ni_tx_queues[0])); - if (!ni->ni_tx_queues) - goto failed; - - cfs_percpt_for_each(tq, i, ni->ni_tx_queues) - INIT_LIST_HEAD(&tq->tq_delayed); - - if (!el) { - ni->ni_cpts = NULL; - ni->ni_ncpts = LNET_CPT_NUMBER; - } else { - rc = cfs_expr_list_values(el, LNET_CPT_NUMBER, &ni->ni_cpts); - if (rc <= 0) { - CERROR("Failed to set CPTs for NI %s: %d\n", - libcfs_net2str(net), rc); - goto failed; - } - - LASSERT(rc <= LNET_CPT_NUMBER); - if (rc == LNET_CPT_NUMBER) { - cfs_expr_list_values_free(ni->ni_cpts, LNET_CPT_NUMBER); - ni->ni_cpts = NULL; - } - - ni->ni_ncpts = rc; - } - - /* LND will fill in the address part of the NID */ - ni->ni_nid = LNET_MKNID(net, 0); - - /* Store net namespace in which current ni is being created */ - if (current->nsproxy->net_ns) - ni->ni_net_ns = get_net(current->nsproxy->net_ns); - else - ni->ni_net_ns = NULL; - - ni->ni_last_alive = ktime_get_real_seconds(); - list_add_tail(&ni->ni_list, nilist); - return ni; - failed: - lnet_ni_free(ni); - return NULL; -} - -int -lnet_parse_networks(struct list_head *nilist, char *networks) -{ - struct cfs_expr_list *el = NULL; - char *tokens; - char *str; - char *tmp; - struct lnet_ni *ni; - __u32 net; - int nnets = 0; - struct list_head *temp_node; - - if (!networks) { - CERROR("networks string is undefined\n"); - return -EINVAL; - } - - if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) { - /* _WAY_ conservative */ - LCONSOLE_ERROR_MSG(0x112, - "Can't parse networks: string too long\n"); - return -EINVAL; - } - - tokens = kstrdup(networks, GFP_KERNEL); - if (!tokens) { - CERROR("Can't allocate net tokens\n"); - return -ENOMEM; - } - - tmp = tokens; - str = tokens; - - while (str && *str) { - char *comma = strchr(str, ','); - char *bracket = strchr(str, '('); - char *square = strchr(str, '['); - char *iface; - int niface; - int rc; - - /* - * NB we don't check interface conflicts here; it's the LNDs - * responsibility (if it cares at all) - */ - if (square && (!comma || square < comma)) { - /* - * i.e: o2ib0(ib0)[1,2], number between square - * brackets are CPTs this NI needs to be bond - */ - if (bracket && bracket > square) { - tmp = square; - goto failed_syntax; - } - - tmp = strchr(square, ']'); - if (!tmp) { - tmp = square; - goto failed_syntax; - } - - rc = cfs_expr_list_parse(square, tmp - square + 1, - 0, LNET_CPT_NUMBER - 1, &el); - if (rc) { - tmp = square; - goto failed_syntax; - } - - while (square <= tmp) - *square++ = ' '; - } - - if (!bracket || (comma && comma < bracket)) { - /* no interface list specified */ - - if (comma) - *comma++ = 0; - net = libcfs_str2net(strim(str)); - - if (net == LNET_NIDNET(LNET_NID_ANY)) { - LCONSOLE_ERROR_MSG(0x113, - "Unrecognised network type\n"); - tmp = str; - goto failed_syntax; - } - - if (LNET_NETTYP(net) != LOLND && /* LO is implicit */ - !lnet_ni_alloc(net, el, nilist)) - goto failed; - - if (el) { - cfs_expr_list_free(el); - el = NULL; - } - - str = comma; - continue; - } - - *bracket = 0; - net = libcfs_str2net(strim(str)); - if (net == LNET_NIDNET(LNET_NID_ANY)) { - tmp = str; - goto failed_syntax; - } - - ni = lnet_ni_alloc(net, el, nilist); - if (!ni) - goto failed; - - if (el) { - cfs_expr_list_free(el); - el = NULL; - } - - niface = 0; - iface = bracket + 1; - - bracket = strchr(iface, ')'); - if (!bracket) { - tmp = iface; - goto failed_syntax; - } - - *bracket = 0; - do { - comma = strchr(iface, ','); - if (comma) - *comma++ = 0; - - iface = strim(iface); - if (!*iface) { - tmp = iface; - goto failed_syntax; - } - - if (niface == LNET_MAX_INTERFACES) { - LCONSOLE_ERROR_MSG(0x115, - "Too many interfaces for net %s\n", - libcfs_net2str(net)); - goto failed; - } - - /* - * Allocate a separate piece of memory and copy - * into it the string, so we don't have - * a depencency on the tokens string. This way we - * can free the tokens at the end of the function. - * The newly allocated ni_interfaces[] can be - * freed when freeing the NI - */ - ni->ni_interfaces[niface] = kstrdup(iface, GFP_KERNEL); - if (!ni->ni_interfaces[niface]) { - CERROR("Can't allocate net interface name\n"); - goto failed; - } - niface++; - iface = comma; - } while (iface); - - str = bracket + 1; - comma = strchr(bracket + 1, ','); - if (comma) { - *comma = 0; - str = strim(str); - if (*str) { - tmp = str; - goto failed_syntax; - } - str = comma + 1; - continue; - } - - str = strim(str); - if (*str) { - tmp = str; - goto failed_syntax; - } - } - - list_for_each(temp_node, nilist) - nnets++; - - kfree(tokens); - return nnets; - - failed_syntax: - lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp)); - failed: - while (!list_empty(nilist)) { - ni = list_entry(nilist->next, struct lnet_ni, ni_list); - - list_del(&ni->ni_list); - lnet_ni_free(ni); - } - - if (el) - cfs_expr_list_free(el); - - kfree(tokens); - - return -EINVAL; -} - -static struct lnet_text_buf * -lnet_new_text_buf(int str_len) -{ - struct lnet_text_buf *ltb; - int nob; - - /* NB allocate space for the terminating 0 */ - nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]); - if (nob > LNET_SINGLE_TEXTBUF_NOB) { - /* _way_ conservative for "route net gateway..." */ - CERROR("text buffer too big\n"); - return NULL; - } - - if (lnet_tbnob + nob > LNET_MAX_TEXTBUF_NOB) { - CERROR("Too many text buffers\n"); - return NULL; - } - - ltb = kzalloc(nob, GFP_KERNEL); - if (!ltb) - return NULL; - - ltb->ltb_size = nob; - ltb->ltb_text[0] = 0; - lnet_tbnob += nob; - return ltb; -} - -static void -lnet_free_text_buf(struct lnet_text_buf *ltb) -{ - lnet_tbnob -= ltb->ltb_size; - kfree(ltb); -} - -static void -lnet_free_text_bufs(struct list_head *tbs) -{ - struct lnet_text_buf *ltb; - - while (!list_empty(tbs)) { - ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list); - - list_del(<b->ltb_list); - lnet_free_text_buf(ltb); - } -} - -static int -lnet_str2tbs_sep(struct list_head *tbs, char *str) -{ - struct list_head pending; - char *sep; - int nob; - int i; - struct lnet_text_buf *ltb; - - INIT_LIST_HEAD(&pending); - - /* Split 'str' into separate commands */ - for (;;) { - /* skip leading whitespace */ - while (isspace(*str)) - str++; - - /* scan for separator or comment */ - for (sep = str; *sep; sep++) - if (lnet_issep(*sep) || *sep == '#') - break; - - nob = (int)(sep - str); - if (nob > 0) { - ltb = lnet_new_text_buf(nob); - if (!ltb) { - lnet_free_text_bufs(&pending); - return -ENOMEM; - } - - for (i = 0; i < nob; i++) - if (isspace(str[i])) - ltb->ltb_text[i] = ' '; - else - ltb->ltb_text[i] = str[i]; - - ltb->ltb_text[nob] = 0; - - list_add_tail(<b->ltb_list, &pending); - } - - if (*sep == '#') { - /* scan for separator */ - do { - sep++; - } while (*sep && !lnet_issep(*sep)); - } - - if (!*sep) - break; - - str = sep + 1; - } - - list_splice(&pending, tbs->prev); - return 0; -} - -static int -lnet_expand1tb(struct list_head *list, - char *str, char *sep1, char *sep2, - char *item, int itemlen) -{ - int len1 = (int)(sep1 - str); - int len2 = strlen(sep2 + 1); - struct lnet_text_buf *ltb; - - LASSERT(*sep1 == '['); - LASSERT(*sep2 == ']'); - - ltb = lnet_new_text_buf(len1 + itemlen + len2); - if (!ltb) - return -ENOMEM; - - memcpy(ltb->ltb_text, str, len1); - memcpy(<b->ltb_text[len1], item, itemlen); - memcpy(<b->ltb_text[len1 + itemlen], sep2 + 1, len2); - ltb->ltb_text[len1 + itemlen + len2] = 0; - - list_add_tail(<b->ltb_list, list); - return 0; -} - -static int -lnet_str2tbs_expand(struct list_head *tbs, char *str) -{ - char num[16]; - struct list_head pending; - char *sep; - char *sep2; - char *parsed; - char *enditem; - int lo; - int hi; - int stride; - int i; - int nob; - int scanned; - - INIT_LIST_HEAD(&pending); - - sep = strchr(str, '['); - if (!sep) /* nothing to expand */ - return 0; - - sep2 = strchr(sep, ']'); - if (!sep2) - goto failed; - - for (parsed = sep; parsed < sep2; parsed = enditem) { - enditem = ++parsed; - while (enditem < sep2 && *enditem != ',') - enditem++; - - if (enditem == parsed) /* no empty items */ - goto failed; - - if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi, - &stride, &scanned) < 3) { - if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) { - /* simple string enumeration */ - if (lnet_expand1tb(&pending, str, sep, sep2, - parsed, - (int)(enditem - parsed))) { - goto failed; - } - continue; - } - - stride = 1; - } - - /* range expansion */ - - if (enditem != parsed + scanned) /* no trailing junk */ - goto failed; - - if (hi < 0 || lo < 0 || stride < 0 || hi < lo || - (hi - lo) % stride) - goto failed; - - for (i = lo; i <= hi; i += stride) { - snprintf(num, sizeof(num), "%d", i); - nob = strlen(num); - if (nob + 1 == sizeof(num)) - goto failed; - - if (lnet_expand1tb(&pending, str, sep, sep2, - num, nob)) - goto failed; - } - } - - list_splice(&pending, tbs->prev); - return 1; - - failed: - lnet_free_text_bufs(&pending); - return -EINVAL; -} - -static int -lnet_parse_hops(char *str, unsigned int *hops) -{ - int len = strlen(str); - int nob = len; - - return (sscanf(str, "%u%n", hops, &nob) >= 1 && - nob == len && - *hops > 0 && *hops < 256); -} - -#define LNET_PRIORITY_SEPARATOR (':') - -static int -lnet_parse_priority(char *str, unsigned int *priority, char **token) -{ - int nob; - char *sep; - int len; - - sep = strchr(str, LNET_PRIORITY_SEPARATOR); - if (!sep) { - *priority = 0; - return 0; - } - len = strlen(sep + 1); - - if ((sscanf((sep + 1), "%u%n", priority, &nob) < 1) || (len != nob)) { - /* - * Update the caller's token pointer so it treats the found - * priority as the token to report in the error message. - */ - *token += sep - str + 1; - return -EINVAL; - } - - CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob); - - /* - * Change priority separator to \0 to be able to parse NID - */ - *sep = '\0'; - return 0; -} - -static int -lnet_parse_route(char *str, int *im_a_router) -{ - /* static scratch buffer OK (single threaded) */ - static char cmd[LNET_SINGLE_TEXTBUF_NOB]; - - struct list_head nets; - struct list_head gateways; - struct list_head *tmp1; - struct list_head *tmp2; - __u32 net; - lnet_nid_t nid; - struct lnet_text_buf *ltb; - int rc; - char *sep; - char *token = str; - int ntokens = 0; - int myrc = -1; - __u32 hops; - int got_hops = 0; - unsigned int priority = 0; - - INIT_LIST_HEAD(&gateways); - INIT_LIST_HEAD(&nets); - - /* save a copy of the string for error messages */ - strncpy(cmd, str, sizeof(cmd)); - cmd[sizeof(cmd) - 1] = '\0'; - - sep = str; - for (;;) { - /* scan for token start */ - while (isspace(*sep)) - sep++; - if (!*sep) { - if (ntokens < (got_hops ? 3 : 2)) - goto token_error; - break; - } - - ntokens++; - token = sep++; - - /* scan for token end */ - while (*sep && !isspace(*sep)) - sep++; - if (*sep) - *sep++ = 0; - - if (ntokens == 1) { - tmp2 = &nets; /* expanding nets */ - } else if (ntokens == 2 && - lnet_parse_hops(token, &hops)) { - got_hops = 1; /* got a hop count */ - continue; - } else { - tmp2 = &gateways; /* expanding gateways */ - } - - ltb = lnet_new_text_buf(strlen(token)); - if (!ltb) - goto out; - - strcpy(ltb->ltb_text, token); - tmp1 = <b->ltb_list; - list_add_tail(tmp1, tmp2); - - while (tmp1 != tmp2) { - ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list); - - rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text); - if (rc < 0) - goto token_error; - - tmp1 = tmp1->next; - - if (rc > 0) { /* expanded! */ - list_del(<b->ltb_list); - lnet_free_text_buf(ltb); - continue; - } - - if (ntokens == 1) { - net = libcfs_str2net(ltb->ltb_text); - if (net == LNET_NIDNET(LNET_NID_ANY) || - LNET_NETTYP(net) == LOLND) - goto token_error; - } else { - rc = lnet_parse_priority(ltb->ltb_text, - &priority, &token); - if (rc < 0) - goto token_error; - - nid = libcfs_str2nid(ltb->ltb_text); - if (nid == LNET_NID_ANY || - LNET_NETTYP(LNET_NIDNET(nid)) == LOLND) - goto token_error; - } - } - } - - /** - * if there are no hops set then we want to flag this value as - * unset since hops is an optional parameter - */ - if (!got_hops) - hops = LNET_UNDEFINED_HOPS; - - LASSERT(!list_empty(&nets)); - LASSERT(!list_empty(&gateways)); - - list_for_each(tmp1, &nets) { - ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list); - net = libcfs_str2net(ltb->ltb_text); - LASSERT(net != LNET_NIDNET(LNET_NID_ANY)); - - list_for_each(tmp2, &gateways) { - ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list); - nid = libcfs_str2nid(ltb->ltb_text); - LASSERT(nid != LNET_NID_ANY); - - if (lnet_islocalnid(nid)) { - *im_a_router = 1; - continue; - } - - rc = lnet_add_route(net, hops, nid, priority); - if (rc && rc != -EEXIST && rc != -EHOSTUNREACH) { - CERROR("Can't create route to %s via %s\n", - libcfs_net2str(net), - libcfs_nid2str(nid)); - goto out; - } - } - } - - myrc = 0; - goto out; - - token_error: - lnet_syntax("routes", cmd, (int)(token - str), strlen(token)); - out: - lnet_free_text_bufs(&nets); - lnet_free_text_bufs(&gateways); - return myrc; -} - -static int -lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router) -{ - struct lnet_text_buf *ltb; - - while (!list_empty(tbs)) { - ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list); - - if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) { - lnet_free_text_bufs(tbs); - return -EINVAL; - } - - list_del(<b->ltb_list); - lnet_free_text_buf(ltb); - } - - return 0; -} - -int -lnet_parse_routes(char *routes, int *im_a_router) -{ - struct list_head tbs; - int rc = 0; - - *im_a_router = 0; - - INIT_LIST_HEAD(&tbs); - - if (lnet_str2tbs_sep(&tbs, routes) < 0) { - CERROR("Error parsing routes\n"); - rc = -EINVAL; - } else { - rc = lnet_parse_route_tbs(&tbs, im_a_router); - } - - LASSERT(!lnet_tbnob); - return rc; -} - -static int -lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip) -{ - LIST_HEAD(list); - int rc; - int i; - - rc = cfs_ip_addr_parse(token, len, &list); - if (rc) - return rc; - - for (rc = i = 0; !rc && i < nip; i++) - rc = cfs_ip_addr_match(ipaddrs[i], &list); - - cfs_expr_list_free_list(&list); - - return rc; -} - -static int -lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) -{ - static char tokens[LNET_SINGLE_TEXTBUF_NOB]; - - int matched = 0; - int ntokens = 0; - int len; - char *net = NULL; - char *sep; - char *token; - int rc; - - LASSERT(strlen(net_entry) < sizeof(tokens)); - - /* work on a copy of the string */ - strcpy(tokens, net_entry); - sep = tokens; - for (;;) { - /* scan for token start */ - while (isspace(*sep)) - sep++; - if (!*sep) - break; - - token = sep++; - - /* scan for token end */ - while (*sep && !isspace(*sep)) - sep++; - if (*sep) - *sep++ = 0; - - if (!ntokens++) { - net = token; - continue; - } - - len = strlen(token); - - rc = lnet_match_network_token(token, len, ipaddrs, nip); - if (rc < 0) { - lnet_syntax("ip2nets", net_entry, - (int)(token - tokens), len); - return rc; - } - - if (rc) - matched |= 1; - } - - if (!matched) - return 0; - - strcpy(net_entry, net); /* replace with matched net */ - return 1; -} - -static __u32 -lnet_netspec2net(char *netspec) -{ - char *bracket = strchr(netspec, '('); - __u32 net; - - if (bracket) - *bracket = 0; - - net = libcfs_str2net(netspec); - - if (bracket) - *bracket = '('; - - return net; -} - -static int -lnet_splitnets(char *source, struct list_head *nets) -{ - int offset = 0; - int offset2; - int len; - struct lnet_text_buf *tb; - struct lnet_text_buf *tb2; - struct list_head *t; - char *sep; - char *bracket; - __u32 net; - - LASSERT(!list_empty(nets)); - LASSERT(nets->next == nets->prev); /* single entry */ - - tb = list_entry(nets->next, struct lnet_text_buf, ltb_list); - - for (;;) { - sep = strchr(tb->ltb_text, ','); - bracket = strchr(tb->ltb_text, '('); - - if (sep && bracket && bracket < sep) { - /* netspec lists interfaces... */ - - offset2 = offset + (int)(bracket - tb->ltb_text); - len = strlen(bracket); - - bracket = strchr(bracket + 1, ')'); - - if (!bracket || - !(bracket[1] == ',' || !bracket[1])) { - lnet_syntax("ip2nets", source, offset2, len); - return -EINVAL; - } - - sep = !bracket[1] ? NULL : bracket + 1; - } - - if (sep) - *sep++ = 0; - - net = lnet_netspec2net(tb->ltb_text); - if (net == LNET_NIDNET(LNET_NID_ANY)) { - lnet_syntax("ip2nets", source, offset, - strlen(tb->ltb_text)); - return -EINVAL; - } - - list_for_each(t, nets) { - tb2 = list_entry(t, struct lnet_text_buf, ltb_list); - - if (tb2 == tb) - continue; - - if (net == lnet_netspec2net(tb2->ltb_text)) { - /* duplicate network */ - lnet_syntax("ip2nets", source, offset, - strlen(tb->ltb_text)); - return -EINVAL; - } - } - - if (!sep) - return 0; - - offset += (int)(sep - tb->ltb_text); - len = strlen(sep); - tb2 = lnet_new_text_buf(len); - if (!tb2) - return -ENOMEM; - - strncpy(tb2->ltb_text, sep, len); - tb2->ltb_text[len] = '\0'; - list_add_tail(&tb2->ltb_list, nets); - - tb = tb2; - } -} - -static int -lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) -{ - static char networks[LNET_SINGLE_TEXTBUF_NOB]; - static char source[LNET_SINGLE_TEXTBUF_NOB]; - - struct list_head raw_entries; - struct list_head matched_nets; - struct list_head current_nets; - struct list_head *t; - struct list_head *t2; - struct lnet_text_buf *tb; - struct lnet_text_buf *temp; - struct lnet_text_buf *tb2; - __u32 net1; - __u32 net2; - int len; - int count; - int dup; - int rc; - - INIT_LIST_HEAD(&raw_entries); - if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) { - CERROR("Error parsing ip2nets\n"); - LASSERT(!lnet_tbnob); - return -EINVAL; - } - - INIT_LIST_HEAD(&matched_nets); - INIT_LIST_HEAD(¤t_nets); - networks[0] = 0; - count = 0; - len = 0; - rc = 0; - - list_for_each_entry_safe(tb, temp, &raw_entries, ltb_list) { - strncpy(source, tb->ltb_text, sizeof(source)); - source[sizeof(source) - 1] = '\0'; - - /* replace ltb_text with the network(s) add on match */ - rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip); - if (rc < 0) - break; - - list_del(&tb->ltb_list); - - if (!rc) { /* no match */ - lnet_free_text_buf(tb); - continue; - } - - /* split into separate networks */ - INIT_LIST_HEAD(¤t_nets); - list_add(&tb->ltb_list, ¤t_nets); - rc = lnet_splitnets(source, ¤t_nets); - if (rc < 0) - break; - - dup = 0; - list_for_each(t, ¤t_nets) { - tb = list_entry(t, struct lnet_text_buf, ltb_list); - net1 = lnet_netspec2net(tb->ltb_text); - LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY)); - - list_for_each(t2, &matched_nets) { - tb2 = list_entry(t2, struct lnet_text_buf, - ltb_list); - net2 = lnet_netspec2net(tb2->ltb_text); - LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY)); - - if (net1 == net2) { - dup = 1; - break; - } - } - - if (dup) - break; - } - - if (dup) { - lnet_free_text_bufs(¤t_nets); - continue; - } - - list_for_each_safe(t, t2, ¤t_nets) { - tb = list_entry(t, struct lnet_text_buf, ltb_list); - - list_del(&tb->ltb_list); - list_add_tail(&tb->ltb_list, &matched_nets); - - len += snprintf(networks + len, sizeof(networks) - len, - "%s%s", !len ? "" : ",", - tb->ltb_text); - - if (len >= sizeof(networks)) { - CERROR("Too many matched networks\n"); - rc = -E2BIG; - goto out; - } - } - - count++; - } - - out: - lnet_free_text_bufs(&raw_entries); - lnet_free_text_bufs(&matched_nets); - lnet_free_text_bufs(¤t_nets); - LASSERT(!lnet_tbnob); - - if (rc < 0) - return rc; - - *networksp = networks; - return count; -} - -static int -lnet_ipaddr_enumerate(__u32 **ipaddrsp) -{ - int up; - __u32 netmask; - __u32 *ipaddrs; - __u32 *ipaddrs2; - int nip; - char **ifnames; - int nif = lnet_ipif_enumerate(&ifnames); - int i; - int rc; - - if (nif <= 0) - return nif; - - ipaddrs = kcalloc(nif, sizeof(*ipaddrs), GFP_KERNEL); - if (!ipaddrs) { - CERROR("Can't allocate ipaddrs[%d]\n", nif); - lnet_ipif_free_enumeration(ifnames, nif); - return -ENOMEM; - } - - for (i = nip = 0; i < nif; i++) { - if (!strcmp(ifnames[i], "lo")) - continue; - - rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask); - if (rc) { - CWARN("Can't query interface %s: %d\n", - ifnames[i], rc); - continue; - } - - if (!up) { - CWARN("Ignoring interface %s: it's down\n", - ifnames[i]); - continue; - } - - nip++; - } - - lnet_ipif_free_enumeration(ifnames, nif); - - if (nip == nif) { - *ipaddrsp = ipaddrs; - } else { - if (nip > 0) { - ipaddrs2 = kcalloc(nip, sizeof(*ipaddrs2), - GFP_KERNEL); - if (!ipaddrs2) { - CERROR("Can't allocate ipaddrs[%d]\n", nip); - nip = -ENOMEM; - } else { - memcpy(ipaddrs2, ipaddrs, - nip * sizeof(*ipaddrs)); - *ipaddrsp = ipaddrs2; - rc = nip; - } - } - kfree(ipaddrs); - } - return nip; -} - -int -lnet_parse_ip2nets(char **networksp, char *ip2nets) -{ - __u32 *ipaddrs = NULL; - int nip = lnet_ipaddr_enumerate(&ipaddrs); - int rc; - - if (nip < 0) { - LCONSOLE_ERROR_MSG(0x117, - "Error %d enumerating local IP interfaces for ip2nets to match\n", - nip); - return nip; - } - - if (!nip) { - LCONSOLE_ERROR_MSG(0x118, - "No local IP interfaces for ip2nets to match\n"); - return -ENOENT; - } - - rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip); - kfree(ipaddrs); - - if (rc < 0) { - LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc); - return rc; - } - - if (!rc) { - LCONSOLE_ERROR_MSG(0x11a, - "ip2nets does not match any local IP interfaces\n"); - return -ENOENT; - } - - return 0; -} diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c deleted file mode 100644 index c78e70373ab4..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ /dev/null @@ -1,426 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/lib-eq.c - * - * Library level Event queue management routines - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -/** - * Create an event queue that has room for \a count number of events. - * - * The event queue is circular and older events will be overwritten by new - * ones if they are not removed in time by the user using the functions - * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to - * determine the appropriate size of the event queue to prevent this loss - * of events. Note that when EQ handler is specified in \a callback, no - * event loss can happen, since the handler is run for each event deposited - * into the EQ. - * - * \param count The number of events to be stored in the event queue. It - * will be rounded up to the next power of two. - * \param callback A handler function that runs when an event is deposited - * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to - * indicate that no event handler is desired. - * \param handle On successful return, this location will hold a handle for - * the newly created EQ. - * - * \retval 0 On success. - * \retval -EINVAL If an parameter is not valid. - * \retval -ENOMEM If memory for the EQ can't be allocated. - * - * \see lnet_eq_handler_t for the discussion on EQ handler semantics. - */ -int -LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, - struct lnet_handle_eq *handle) -{ - struct lnet_eq *eq; - - LASSERT(the_lnet.ln_refcount > 0); - - /* - * We need count to be a power of 2 so that when eq_{enq,deq}_seq - * overflow, they don't skip entries, so the queue has the same - * apparent capacity at all times - */ - if (count) - count = roundup_pow_of_two(count); - - if (callback != LNET_EQ_HANDLER_NONE && count) - CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); - - /* - * count can be 0 if only need callback, we can eliminate - * overhead of enqueue event - */ - if (!count && callback == LNET_EQ_HANDLER_NONE) - return -EINVAL; - - eq = kzalloc(sizeof(*eq), GFP_NOFS); - if (!eq) - return -ENOMEM; - - if (count) { - eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event), - GFP_KERNEL | __GFP_ZERO); - if (!eq->eq_events) - goto failed; - /* - * NB allocator has set all event sequence numbers to 0, - * so all them should be earlier than eq_deq_seq - */ - } - - eq->eq_deq_seq = 1; - eq->eq_enq_seq = 1; - eq->eq_size = count; - eq->eq_callback = callback; - - eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*eq->eq_refs[0])); - if (!eq->eq_refs) - goto failed; - - /* MUST hold both exclusive lnet_res_lock */ - lnet_res_lock(LNET_LOCK_EX); - /* - * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do - * both EQ lookup and poll event with only lnet_eq_wait_lock - */ - lnet_eq_wait_lock(); - - lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh); - list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active); - - lnet_eq_wait_unlock(); - lnet_res_unlock(LNET_LOCK_EX); - - lnet_eq2handle(handle, eq); - return 0; - -failed: - kvfree(eq->eq_events); - - if (eq->eq_refs) - cfs_percpt_free(eq->eq_refs); - - kfree(eq); - return -ENOMEM; -} -EXPORT_SYMBOL(LNetEQAlloc); - -/** - * Release the resources associated with an event queue if it's idle; - * otherwise do nothing and it's up to the user to try again. - * - * \param eqh A handle for the event queue to be released. - * - * \retval 0 If the EQ is not in use and freed. - * \retval -ENOENT If \a eqh does not point to a valid EQ. - * \retval -EBUSY If the EQ is still in use by some MDs. - */ -int -LNetEQFree(struct lnet_handle_eq eqh) -{ - struct lnet_eq *eq; - struct lnet_event *events = NULL; - int **refs = NULL; - int *ref; - int rc = 0; - int size = 0; - int i; - - LASSERT(the_lnet.ln_refcount > 0); - - lnet_res_lock(LNET_LOCK_EX); - /* - * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do - * both EQ lookup and poll event with only lnet_eq_wait_lock - */ - lnet_eq_wait_lock(); - - eq = lnet_handle2eq(&eqh); - if (!eq) { - rc = -ENOENT; - goto out; - } - - cfs_percpt_for_each(ref, i, eq->eq_refs) { - LASSERT(*ref >= 0); - if (!*ref) - continue; - - CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n", - i, *ref); - rc = -EBUSY; - goto out; - } - - /* stash for free after lock dropped */ - events = eq->eq_events; - size = eq->eq_size; - refs = eq->eq_refs; - - lnet_res_lh_invalidate(&eq->eq_lh); - list_del(&eq->eq_list); - kfree(eq); - out: - lnet_eq_wait_unlock(); - lnet_res_unlock(LNET_LOCK_EX); - - kvfree(events); - if (refs) - cfs_percpt_free(refs); - - return rc; -} -EXPORT_SYMBOL(LNetEQFree); - -void -lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev) -{ - /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */ - int index; - - if (!eq->eq_size) { - LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE); - eq->eq_callback(ev); - return; - } - - lnet_eq_wait_lock(); - ev->sequence = eq->eq_enq_seq++; - - LASSERT(is_power_of_2(eq->eq_size)); - index = ev->sequence & (eq->eq_size - 1); - - eq->eq_events[index] = *ev; - - if (eq->eq_callback != LNET_EQ_HANDLER_NONE) - eq->eq_callback(ev); - - /* Wake anyone waiting in LNetEQPoll() */ - if (waitqueue_active(&the_lnet.ln_eq_waitq)) - wake_up_all(&the_lnet.ln_eq_waitq); - lnet_eq_wait_unlock(); -} - -static int -lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev) -{ - int new_index = eq->eq_deq_seq & (eq->eq_size - 1); - struct lnet_event *new_event = &eq->eq_events[new_index]; - int rc; - - /* must called with lnet_eq_wait_lock hold */ - if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence)) - return 0; - - /* We've got a new event... */ - *ev = *new_event; - - CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n", - new_event, eq->eq_deq_seq, eq->eq_size); - - /* ...but did it overwrite an event we've not seen yet? */ - if (eq->eq_deq_seq == new_event->sequence) { - rc = 1; - } else { - /* - * don't complain with CERROR: some EQs are sized small - * anyway; if it's important, the caller should complain - */ - CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n", - eq->eq_deq_seq, new_event->sequence); - rc = -EOVERFLOW; - } - - eq->eq_deq_seq = new_event->sequence + 1; - return rc; -} - -/** - * A nonblocking function that can be used to get the next event in an EQ. - * If an event handler is associated with the EQ, the handler will run before - * this function returns successfully. The event is removed from the queue. - * - * \param eventq A handle for the event queue. - * \param event On successful return (1 or -EOVERFLOW), this location will - * hold the next event in the EQ. - * - * \retval 0 No pending event in the EQ. - * \retval 1 Indicates success. - * \retval -ENOENT If \a eventq does not point to a valid EQ. - * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that - * at least one event between this event and the last event obtained from the - * EQ has been dropped due to limited space in the EQ. - */ - -/** - * Block the calling process until there is an event in the EQ. - * If an event handler is associated with the EQ, the handler will run before - * this function returns successfully. This function returns the next event - * in the EQ and removes it from the EQ. - * - * \param eventq A handle for the event queue. - * \param event On successful return (1 or -EOVERFLOW), this location will - * hold the next event in the EQ. - * - * \retval 1 Indicates success. - * \retval -ENOENT If \a eventq does not point to a valid EQ. - * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that - * at least one event between this event and the last event obtained from the - * EQ has been dropped due to limited space in the EQ. - */ - -static int -lnet_eq_wait_locked(int *timeout_ms, long state) -__must_hold(&the_lnet.ln_eq_wait_lock) -{ - int tms = *timeout_ms; - int wait; - wait_queue_entry_t wl; - unsigned long now; - - if (!tms) - return -ENXIO; /* don't want to wait and no new event */ - - init_waitqueue_entry(&wl, current); - set_current_state(state); - add_wait_queue(&the_lnet.ln_eq_waitq, &wl); - - lnet_eq_wait_unlock(); - - if (tms < 0) { - schedule(); - } else { - now = jiffies; - schedule_timeout(msecs_to_jiffies(tms)); - tms -= jiffies_to_msecs(jiffies - now); - if (tms < 0) /* no more wait but may have new event */ - tms = 0; - } - - wait = tms; /* might need to call here again */ - *timeout_ms = tms; - - lnet_eq_wait_lock(); - remove_wait_queue(&the_lnet.ln_eq_waitq, &wl); - - return wait; -} - -/** - * Block the calling process until there's an event from a set of EQs or - * timeout happens. - * - * If an event handler is associated with the EQ, the handler will run before - * this function returns successfully, in which case the corresponding event - * is consumed. - * - * LNetEQPoll() provides a timeout to allow applications to poll, block for a - * fixed period, or block indefinitely. - * - * \param eventqs,neq An array of EQ handles, and size of the array. - * \param timeout_ms Time in milliseconds to wait for an event to occur on - * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an - * infinite timeout. - * \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD - * \param event,which On successful return (1 or -EOVERFLOW), \a event will - * hold the next event in the EQs, and \a which will contain the index of the - * EQ from which the event was taken. - * - * \retval 0 No pending event in the EQs after timeout. - * \retval 1 Indicates success. - * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that - * at least one event between this event and the last event obtained from the - * EQ indicated by \a which has been dropped due to limited space in the EQ. - * \retval -ENOENT If there's an invalid handle in \a eventqs. - */ -int -LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms, - int interruptible, - struct lnet_event *event, int *which) -{ - int wait = 1; - int rc; - int i; - - LASSERT(the_lnet.ln_refcount > 0); - - if (neq < 1) - return -ENOENT; - - lnet_eq_wait_lock(); - - for (;;) { - for (i = 0; i < neq; i++) { - struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]); - - if (!eq) { - lnet_eq_wait_unlock(); - return -ENOENT; - } - - rc = lnet_eq_dequeue_event(eq, event); - if (rc) { - lnet_eq_wait_unlock(); - *which = i; - return rc; - } - } - - if (!wait) - break; - - /* - * return value of lnet_eq_wait_locked: - * -1 : did nothing and it's sure no new event - * 1 : sleep inside and wait until new event - * 0 : don't want to wait anymore, but might have new event - * so need to call dequeue again - */ - wait = lnet_eq_wait_locked(&timeout_ms, - interruptible ? TASK_INTERRUPTIBLE - : TASK_NOLOAD); - if (wait < 0) /* no new event */ - break; - } - - lnet_eq_wait_unlock(); - return 0; -} diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c deleted file mode 100644 index 8a22514aaf71..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-md.c +++ /dev/null @@ -1,463 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/lib-md.c - * - * Memory Descriptor management routines - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -/* must be called with lnet_res_lock held */ -void -lnet_md_unlink(struct lnet_libmd *md) -{ - if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) { - /* first unlink attempt... */ - struct lnet_me *me = md->md_me; - - md->md_flags |= LNET_MD_FLAG_ZOMBIE; - - /* - * Disassociate from ME (if any), - * and unlink it if it was created - * with LNET_UNLINK - */ - if (me) { - /* detach MD from portal */ - lnet_ptl_detach_md(me, md); - if (me->me_unlink == LNET_UNLINK) - lnet_me_unlink(me); - } - - /* ensure all future handle lookups fail */ - lnet_res_lh_invalidate(&md->md_lh); - } - - if (md->md_refcount) { - CDEBUG(D_NET, "Queueing unlink of md %p\n", md); - return; - } - - CDEBUG(D_NET, "Unlinking md %p\n", md); - - if (md->md_eq) { - int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); - - LASSERT(*md->md_eq->eq_refs[cpt] > 0); - (*md->md_eq->eq_refs[cpt])--; - } - - LASSERT(!list_empty(&md->md_list)); - list_del_init(&md->md_list); - kfree(md); -} - -static int -lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink) -{ - int i; - unsigned int niov; - int total_length = 0; - - lmd->md_me = NULL; - lmd->md_start = umd->start; - lmd->md_offset = 0; - lmd->md_max_size = umd->max_size; - lmd->md_options = umd->options; - lmd->md_user_ptr = umd->user_ptr; - lmd->md_eq = NULL; - lmd->md_threshold = umd->threshold; - lmd->md_refcount = 0; - lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0; - - if (umd->options & LNET_MD_IOVEC) { - if (umd->options & LNET_MD_KIOV) /* Can't specify both */ - return -EINVAL; - - niov = umd->length; - lmd->md_niov = umd->length; - memcpy(lmd->md_iov.iov, umd->start, - niov * sizeof(lmd->md_iov.iov[0])); - - for (i = 0; i < (int)niov; i++) { - /* We take the base address on trust */ - /* invalid length */ - if (lmd->md_iov.iov[i].iov_len <= 0) - return -EINVAL; - - total_length += lmd->md_iov.iov[i].iov_len; - } - - lmd->md_length = total_length; - - if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */ - (umd->max_size < 0 || - umd->max_size > total_length)) /* illegal max_size */ - return -EINVAL; - - } else if (umd->options & LNET_MD_KIOV) { - niov = umd->length; - lmd->md_niov = umd->length; - memcpy(lmd->md_iov.kiov, umd->start, - niov * sizeof(lmd->md_iov.kiov[0])); - - for (i = 0; i < (int)niov; i++) { - /* We take the page pointer on trust */ - if (lmd->md_iov.kiov[i].bv_offset + - lmd->md_iov.kiov[i].bv_len > PAGE_SIZE) - return -EINVAL; /* invalid length */ - - total_length += lmd->md_iov.kiov[i].bv_len; - } - - lmd->md_length = total_length; - - if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */ - (umd->max_size < 0 || - umd->max_size > total_length)) /* illegal max_size */ - return -EINVAL; - } else { /* contiguous */ - lmd->md_length = umd->length; - niov = 1; - lmd->md_niov = 1; - lmd->md_iov.iov[0].iov_base = umd->start; - lmd->md_iov.iov[0].iov_len = umd->length; - - if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */ - (umd->max_size < 0 || - umd->max_size > (int)umd->length)) /* illegal max_size */ - return -EINVAL; - } - - return 0; -} - -/* must be called with resource lock held */ -static int -lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt) -{ - struct lnet_res_container *container = the_lnet.ln_md_containers[cpt]; - - /* - * NB we are passed an allocated, but inactive md. - * if we return success, caller may lnet_md_unlink() it. - * otherwise caller may only kfree() it. - */ - /* - * This implementation doesn't know how to create START events or - * disable END events. Best to LASSERT our caller is compliant so - * we find out quickly... - */ - /* - * TODO - reevaluate what should be here in light of - * the removal of the start and end events - * maybe there we shouldn't even allow LNET_EQ_NONE!) - * LASSERT(!eq); - */ - if (!LNetEQHandleIsInvalid(eq_handle)) { - md->md_eq = lnet_handle2eq(&eq_handle); - - if (!md->md_eq) - return -ENOENT; - - (*md->md_eq->eq_refs[cpt])++; - } - - lnet_res_lh_initialize(container, &md->md_lh); - - LASSERT(list_empty(&md->md_list)); - list_add(&md->md_list, &container->rec_active); - - return 0; -} - -/* must be called with lnet_res_lock held */ -void -lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd) -{ - /* NB this doesn't copy out all the iov entries so when a - * discontiguous MD is copied out, the target gets to know the - * original iov pointer (in start) and the number of entries it had - * and that's all. - */ - umd->start = lmd->md_start; - umd->length = !(lmd->md_options & - (LNET_MD_IOVEC | LNET_MD_KIOV)) ? - lmd->md_length : lmd->md_niov; - umd->threshold = lmd->md_threshold; - umd->max_size = lmd->md_max_size; - umd->options = lmd->md_options; - umd->user_ptr = lmd->md_user_ptr; - lnet_eq2handle(&umd->eq_handle, lmd->md_eq); -} - -static int -lnet_md_validate(struct lnet_md *umd) -{ - if (!umd->start && umd->length) { - CERROR("MD start pointer can not be NULL with length %u\n", - umd->length); - return -EINVAL; - } - - if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) && - umd->length > LNET_MAX_IOV) { - CERROR("Invalid option: too many fragments %u, %d max\n", - umd->length, LNET_MAX_IOV); - return -EINVAL; - } - - return 0; -} - -/** - * Create a memory descriptor and attach it to a ME - * - * \param meh A handle for a ME to associate the new MD with. - * \param umd Provides initial values for the user-visible parts of a MD. - * Other than its use for initialization, there is no linkage between this - * structure and the MD maintained by the LNet. - * \param unlink A flag to indicate whether the MD is automatically unlinked - * when it becomes inactive, either because the operation threshold drops to - * zero or because the available memory becomes less than \a umd.max_size. - * (Note that the check for unlinking a MD only occurs after the completion - * of a successful operation on the MD.) The value LNET_UNLINK enables auto - * unlinking; the value LNET_RETAIN disables it. - * \param handle On successful returns, a handle to the newly created MD is - * saved here. This handle can be used later in LNetMDUnlink(). - * - * \retval 0 On success. - * \retval -EINVAL If \a umd is not valid. - * \retval -ENOMEM If new MD cannot be allocated. - * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a - * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by - * calling LNetInvalidateHandle() on it. - * \retval -EBUSY If the ME pointed to by \a meh is already associated with - * a MD. - */ -int -LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd, - enum lnet_unlink unlink, struct lnet_handle_md *handle) -{ - LIST_HEAD(matches); - LIST_HEAD(drops); - struct lnet_me *me; - struct lnet_libmd *md; - int cpt; - int rc; - - LASSERT(the_lnet.ln_refcount > 0); - - if (lnet_md_validate(&umd)) - return -EINVAL; - - if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) { - CERROR("Invalid option: no MD_OP set\n"); - return -EINVAL; - } - - md = lnet_md_alloc(&umd); - if (!md) - return -ENOMEM; - - rc = lnet_md_build(md, &umd, unlink); - if (rc) - goto out_free; - - cpt = lnet_cpt_of_cookie(meh.cookie); - - lnet_res_lock(cpt); - - me = lnet_handle2me(&meh); - if (!me) - rc = -ENOENT; - else if (me->me_md) - rc = -EBUSY; - else - rc = lnet_md_link(md, umd.eq_handle, cpt); - - if (rc) - goto out_unlock; - - /* - * attach this MD to portal of ME and check if it matches any - * blocked msgs on this portal - */ - lnet_ptl_attach_md(me, md, &matches, &drops); - - lnet_md2handle(handle, md); - - lnet_res_unlock(cpt); - - lnet_drop_delayed_msg_list(&drops, "Bad match"); - lnet_recv_delayed_msg_list(&matches); - - return 0; - -out_unlock: - lnet_res_unlock(cpt); -out_free: - kfree(md); - return rc; -} -EXPORT_SYMBOL(LNetMDAttach); - -/** - * Create a "free floating" memory descriptor - a MD that is not associated - * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations. - * - * \param umd,unlink See the discussion for LNetMDAttach(). - * \param handle On successful returns, a handle to the newly created MD is - * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(), - * and LNetGet() operations. - * - * \retval 0 On success. - * \retval -EINVAL If \a umd is not valid. - * \retval -ENOMEM If new MD cannot be allocated. - * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that - * it's OK to supply a NULL \a umd.eq_handle by calling - * LNetInvalidateHandle() on it. - */ -int -LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink, - struct lnet_handle_md *handle) -{ - struct lnet_libmd *md; - int cpt; - int rc; - - LASSERT(the_lnet.ln_refcount > 0); - - if (lnet_md_validate(&umd)) - return -EINVAL; - - if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) { - CERROR("Invalid option: GET|PUT illegal on active MDs\n"); - return -EINVAL; - } - - md = lnet_md_alloc(&umd); - if (!md) - return -ENOMEM; - - rc = lnet_md_build(md, &umd, unlink); - if (rc) - goto out_free; - - cpt = lnet_res_lock_current(); - - rc = lnet_md_link(md, umd.eq_handle, cpt); - if (rc) - goto out_unlock; - - lnet_md2handle(handle, md); - - lnet_res_unlock(cpt); - return 0; - -out_unlock: - lnet_res_unlock(cpt); -out_free: - kfree(md); - - return rc; -} -EXPORT_SYMBOL(LNetMDBind); - -/** - * Unlink the memory descriptor from any ME it may be linked to and release - * the internal resources associated with it. As a result, active messages - * associated with the MD may get aborted. - * - * This function does not free the memory region associated with the MD; - * i.e., the memory the user allocated for this MD. If the ME associated with - * this MD is not NULL and was created with auto unlink enabled, the ME is - * unlinked as well (see LNetMEAttach()). - * - * Explicitly unlinking a MD via this function call has the same behavior as - * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK - * is generated in the latter case. - * - * An unlinked event can be reported in two ways: - * - If there's no pending operations on the MD, it's unlinked immediately - * and an LNET_EVENT_UNLINK event is logged before this function returns. - * - Otherwise, the MD is only marked for deletion when this function - * returns, and the unlinked event will be piggybacked on the event of - * the completion of the last operation by setting the unlinked field of - * the event. No dedicated LNET_EVENT_UNLINK event is generated. - * - * Note that in both cases the unlinked field of the event is always set; no - * more event will happen on the MD after such an event is logged. - * - * \param mdh A handle for the MD to be unlinked. - * - * \retval 0 On success. - * \retval -ENOENT If \a mdh does not point to a valid MD object. - */ -int -LNetMDUnlink(struct lnet_handle_md mdh) -{ - struct lnet_event ev; - struct lnet_libmd *md; - int cpt; - - LASSERT(the_lnet.ln_refcount > 0); - - cpt = lnet_cpt_of_cookie(mdh.cookie); - lnet_res_lock(cpt); - - md = lnet_handle2md(&mdh); - if (!md) { - lnet_res_unlock(cpt); - return -ENOENT; - } - - md->md_flags |= LNET_MD_FLAG_ABORTED; - /* - * If the MD is busy, lnet_md_unlink just marks it for deletion, and - * when the LND is done, the completion event flags that the MD was - * unlinked. Otherwise, we enqueue an event now... - */ - if (md->md_eq && !md->md_refcount) { - lnet_build_unlink_event(md, &ev); - lnet_eq_enqueue_event(md->md_eq, &ev); - } - - lnet_md_unlink(md); - - lnet_res_unlock(cpt); - return 0; -} -EXPORT_SYMBOL(LNetMDUnlink); diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c deleted file mode 100644 index 672e37bdd045..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-me.c +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/lib-me.c - * - * Match Entry management routines - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -/** - * Create and attach a match entry to the match list of \a portal. The new - * ME is empty, i.e. not associated with a memory descriptor. LNetMDAttach() - * can be used to attach a MD to an empty ME. - * - * \param portal The portal table index where the ME should be attached. - * \param match_id Specifies the match criteria for the process ID of - * the requester. The constants LNET_PID_ANY and LNET_NID_ANY can be - * used to wildcard either of the identifiers in the lnet_process_id - * structure. - * \param match_bits,ignore_bits Specify the match criteria to apply - * to the match bits in the incoming request. The ignore bits are used - * to mask out insignificant bits in the incoming match bits. The resulting - * bits are then compared to the ME's match bits to determine if the - * incoming request meets the match criteria. - * \param unlink Indicates whether the ME should be unlinked when the memory - * descriptor associated with it is unlinked (Note that the check for - * unlinking a ME only occurs when the memory descriptor is unlinked.). - * Valid values are LNET_RETAIN and LNET_UNLINK. - * \param pos Indicates whether the new ME should be prepended or - * appended to the match list. Allowed constants: LNET_INS_BEFORE, - * LNET_INS_AFTER. - * \param handle On successful returns, a handle to the newly created ME - * object is saved here. This handle can be used later in LNetMEInsert(), - * LNetMEUnlink(), or LNetMDAttach() functions. - * - * \retval 0 On success. - * \retval -EINVAL If \a portal is invalid. - * \retval -ENOMEM If new ME object cannot be allocated. - */ -int -LNetMEAttach(unsigned int portal, - struct lnet_process_id match_id, - __u64 match_bits, __u64 ignore_bits, - enum lnet_unlink unlink, enum lnet_ins_pos pos, - struct lnet_handle_me *handle) -{ - struct lnet_match_table *mtable; - struct lnet_me *me; - struct list_head *head; - - LASSERT(the_lnet.ln_refcount > 0); - - if ((int)portal >= the_lnet.ln_nportals) - return -EINVAL; - - mtable = lnet_mt_of_attach(portal, match_id, - match_bits, ignore_bits, pos); - if (!mtable) /* can't match portal type */ - return -EPERM; - - me = kzalloc(sizeof(*me), GFP_NOFS); - if (!me) - return -ENOMEM; - - lnet_res_lock(mtable->mt_cpt); - - me->me_portal = portal; - me->me_match_id = match_id; - me->me_match_bits = match_bits; - me->me_ignore_bits = ignore_bits; - me->me_unlink = unlink; - me->me_md = NULL; - - lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt], - &me->me_lh); - if (ignore_bits) - head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE]; - else - head = lnet_mt_match_head(mtable, match_id, match_bits); - - me->me_pos = head - &mtable->mt_mhash[0]; - if (pos == LNET_INS_AFTER || pos == LNET_INS_LOCAL) - list_add_tail(&me->me_list, head); - else - list_add(&me->me_list, head); - - lnet_me2handle(handle, me); - - lnet_res_unlock(mtable->mt_cpt); - return 0; -} -EXPORT_SYMBOL(LNetMEAttach); - -/** - * Create and a match entry and insert it before or after the ME pointed to by - * \a current_meh. The new ME is empty, i.e. not associated with a memory - * descriptor. LNetMDAttach() can be used to attach a MD to an empty ME. - * - * This function is identical to LNetMEAttach() except for the position - * where the new ME is inserted. - * - * \param current_meh A handle for a ME. The new ME will be inserted - * immediately before or immediately after this ME. - * \param match_id,match_bits,ignore_bits,unlink,pos,handle See the discussion - * for LNetMEAttach(). - * - * \retval 0 On success. - * \retval -ENOMEM If new ME object cannot be allocated. - * \retval -ENOENT If \a current_meh does not point to a valid match entry. - */ -int -LNetMEInsert(struct lnet_handle_me current_meh, - struct lnet_process_id match_id, - __u64 match_bits, __u64 ignore_bits, - enum lnet_unlink unlink, enum lnet_ins_pos pos, - struct lnet_handle_me *handle) -{ - struct lnet_me *current_me; - struct lnet_me *new_me; - struct lnet_portal *ptl; - int cpt; - - LASSERT(the_lnet.ln_refcount > 0); - - if (pos == LNET_INS_LOCAL) - return -EPERM; - - new_me = kzalloc(sizeof(*new_me), GFP_NOFS); - if (!new_me) - return -ENOMEM; - - cpt = lnet_cpt_of_cookie(current_meh.cookie); - - lnet_res_lock(cpt); - - current_me = lnet_handle2me(¤t_meh); - if (!current_me) { - kfree(new_me); - - lnet_res_unlock(cpt); - return -ENOENT; - } - - LASSERT(current_me->me_portal < the_lnet.ln_nportals); - - ptl = the_lnet.ln_portals[current_me->me_portal]; - if (lnet_ptl_is_unique(ptl)) { - /* nosense to insertion on unique portal */ - kfree(new_me); - lnet_res_unlock(cpt); - return -EPERM; - } - - new_me->me_pos = current_me->me_pos; - new_me->me_portal = current_me->me_portal; - new_me->me_match_id = match_id; - new_me->me_match_bits = match_bits; - new_me->me_ignore_bits = ignore_bits; - new_me->me_unlink = unlink; - new_me->me_md = NULL; - - lnet_res_lh_initialize(the_lnet.ln_me_containers[cpt], &new_me->me_lh); - - if (pos == LNET_INS_AFTER) - list_add(&new_me->me_list, ¤t_me->me_list); - else - list_add_tail(&new_me->me_list, ¤t_me->me_list); - - lnet_me2handle(handle, new_me); - - lnet_res_unlock(cpt); - - return 0; -} -EXPORT_SYMBOL(LNetMEInsert); - -/** - * Unlink a match entry from its match list. - * - * This operation also releases any resources associated with the ME. If a - * memory descriptor is attached to the ME, then it will be unlinked as well - * and an unlink event will be generated. It is an error to use the ME handle - * after calling LNetMEUnlink(). - * - * \param meh A handle for the ME to be unlinked. - * - * \retval 0 On success. - * \retval -ENOENT If \a meh does not point to a valid ME. - * \see LNetMDUnlink() for the discussion on delivering unlink event. - */ -int -LNetMEUnlink(struct lnet_handle_me meh) -{ - struct lnet_me *me; - struct lnet_libmd *md; - struct lnet_event ev; - int cpt; - - LASSERT(the_lnet.ln_refcount > 0); - - cpt = lnet_cpt_of_cookie(meh.cookie); - lnet_res_lock(cpt); - - me = lnet_handle2me(&meh); - if (!me) { - lnet_res_unlock(cpt); - return -ENOENT; - } - - md = me->me_md; - if (md) { - md->md_flags |= LNET_MD_FLAG_ABORTED; - if (md->md_eq && !md->md_refcount) { - lnet_build_unlink_event(md, &ev); - lnet_eq_enqueue_event(md->md_eq, &ev); - } - } - - lnet_me_unlink(me); - - lnet_res_unlock(cpt); - return 0; -} -EXPORT_SYMBOL(LNetMEUnlink); - -/* call with lnet_res_lock please */ -void -lnet_me_unlink(struct lnet_me *me) -{ - list_del(&me->me_list); - - if (me->me_md) { - struct lnet_libmd *md = me->me_md; - - /* detach MD from portal of this ME */ - lnet_ptl_detach_md(me, md); - lnet_md_unlink(md); - } - - lnet_res_lh_invalidate(&me->me_lh); - kfree(me); -} diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c deleted file mode 100644 index f8eaf8ff8d8d..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ /dev/null @@ -1,2386 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/lib-move.c - * - * Data movement routines - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include - -static int local_nid_dist_zero = 1; -module_param(local_nid_dist_zero, int, 0444); -MODULE_PARM_DESC(local_nid_dist_zero, "Reserved"); - -int -lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) -{ - struct lnet_test_peer *tp; - struct lnet_test_peer *temp; - struct list_head *el; - struct list_head *next; - struct list_head cull; - - /* NB: use lnet_net_lock(0) to serialize operations on test peers */ - if (threshold) { - /* Adding a new entry */ - tp = kzalloc(sizeof(*tp), GFP_NOFS); - if (!tp) - return -ENOMEM; - - tp->tp_nid = nid; - tp->tp_threshold = threshold; - - lnet_net_lock(0); - list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers); - lnet_net_unlock(0); - return 0; - } - - /* removing entries */ - INIT_LIST_HEAD(&cull); - - lnet_net_lock(0); - - list_for_each_safe(el, next, &the_lnet.ln_test_peers) { - tp = list_entry(el, struct lnet_test_peer, tp_list); - - if (!tp->tp_threshold || /* needs culling anyway */ - nid == LNET_NID_ANY || /* removing all entries */ - tp->tp_nid == nid) { /* matched this one */ - list_del(&tp->tp_list); - list_add(&tp->tp_list, &cull); - } - } - - lnet_net_unlock(0); - - list_for_each_entry_safe(tp, temp, &cull, tp_list) { - list_del(&tp->tp_list); - kfree(tp); - } - return 0; -} - -static int -fail_peer(lnet_nid_t nid, int outgoing) -{ - struct lnet_test_peer *tp; - struct lnet_test_peer *temp; - struct list_head *el; - struct list_head *next; - struct list_head cull; - int fail = 0; - - INIT_LIST_HEAD(&cull); - - /* NB: use lnet_net_lock(0) to serialize operations on test peers */ - lnet_net_lock(0); - - list_for_each_safe(el, next, &the_lnet.ln_test_peers) { - tp = list_entry(el, struct lnet_test_peer, tp_list); - - if (!tp->tp_threshold) { - /* zombie entry */ - if (outgoing) { - /* - * only cull zombies on outgoing tests, - * since we may be at interrupt priority on - * incoming messages. - */ - list_del(&tp->tp_list); - list_add(&tp->tp_list, &cull); - } - continue; - } - - if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */ - nid == tp->tp_nid) { /* fail this peer */ - fail = 1; - - if (tp->tp_threshold != LNET_MD_THRESH_INF) { - tp->tp_threshold--; - if (outgoing && - !tp->tp_threshold) { - /* see above */ - list_del(&tp->tp_list); - list_add(&tp->tp_list, &cull); - } - } - break; - } - } - - lnet_net_unlock(0); - - list_for_each_entry_safe(tp, temp, &cull, tp_list) { - list_del(&tp->tp_list); - - kfree(tp); - } - - return fail; -} - -unsigned int -lnet_iov_nob(unsigned int niov, struct kvec *iov) -{ - unsigned int nob = 0; - - LASSERT(!niov || iov); - while (niov-- > 0) - nob += (iov++)->iov_len; - - return nob; -} -EXPORT_SYMBOL(lnet_iov_nob); - -void -lnet_copy_iov2iter(struct iov_iter *to, - unsigned int nsiov, const struct kvec *siov, - unsigned int soffset, unsigned int nob) -{ - /* NB diov, siov are READ-ONLY */ - const char *s; - size_t left; - - if (!nob) - return; - - /* skip complete frags before 'soffset' */ - LASSERT(nsiov > 0); - while (soffset >= siov->iov_len) { - soffset -= siov->iov_len; - siov++; - nsiov--; - LASSERT(nsiov > 0); - } - - s = (char *)siov->iov_base + soffset; - left = siov->iov_len - soffset; - do { - size_t n, copy = left; - - LASSERT(nsiov > 0); - - if (copy > nob) - copy = nob; - n = copy_to_iter(s, copy, to); - if (n != copy) - return; - nob -= n; - - siov++; - s = (char *)siov->iov_base; - left = siov->iov_len; - nsiov--; - } while (nob > 0); -} -EXPORT_SYMBOL(lnet_copy_iov2iter); - -void -lnet_copy_kiov2iter(struct iov_iter *to, - unsigned int nsiov, const struct bio_vec *siov, - unsigned int soffset, unsigned int nob) -{ - if (!nob) - return; - - LASSERT(!in_interrupt()); - - LASSERT(nsiov > 0); - while (soffset >= siov->bv_len) { - soffset -= siov->bv_len; - siov++; - nsiov--; - LASSERT(nsiov > 0); - } - - do { - size_t copy = siov->bv_len - soffset, n; - - LASSERT(nsiov > 0); - - if (copy > nob) - copy = nob; - n = copy_page_to_iter(siov->bv_page, - siov->bv_offset + soffset, - copy, to); - if (n != copy) - return; - nob -= n; - siov++; - nsiov--; - soffset = 0; - } while (nob > 0); -} -EXPORT_SYMBOL(lnet_copy_kiov2iter); - -int -lnet_extract_iov(int dst_niov, struct kvec *dst, - int src_niov, const struct kvec *src, - unsigned int offset, unsigned int len) -{ - /* - * Initialise 'dst' to the subset of 'src' starting at 'offset', - * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' - */ - unsigned int frag_len; - unsigned int niov; - - if (!len) /* no data => */ - return 0; /* no frags */ - - LASSERT(src_niov > 0); - while (offset >= src->iov_len) { /* skip initial frags */ - offset -= src->iov_len; - src_niov--; - src++; - LASSERT(src_niov > 0); - } - - niov = 1; - for (;;) { - LASSERT(src_niov > 0); - LASSERT((int)niov <= dst_niov); - - frag_len = src->iov_len - offset; - dst->iov_base = ((char *)src->iov_base) + offset; - - if (len <= frag_len) { - dst->iov_len = len; - return niov; - } - - dst->iov_len = frag_len; - - len -= frag_len; - dst++; - src++; - niov++; - src_niov--; - offset = 0; - } -} -EXPORT_SYMBOL(lnet_extract_iov); - -unsigned int -lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov) -{ - unsigned int nob = 0; - - LASSERT(!niov || kiov); - while (niov-- > 0) - nob += (kiov++)->bv_len; - - return nob; -} -EXPORT_SYMBOL(lnet_kiov_nob); - -int -lnet_extract_kiov(int dst_niov, struct bio_vec *dst, - int src_niov, const struct bio_vec *src, - unsigned int offset, unsigned int len) -{ - /* - * Initialise 'dst' to the subset of 'src' starting at 'offset', - * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' - */ - unsigned int frag_len; - unsigned int niov; - - if (!len) /* no data => */ - return 0; /* no frags */ - - LASSERT(src_niov > 0); - while (offset >= src->bv_len) { /* skip initial frags */ - offset -= src->bv_len; - src_niov--; - src++; - LASSERT(src_niov > 0); - } - - niov = 1; - for (;;) { - LASSERT(src_niov > 0); - LASSERT((int)niov <= dst_niov); - - frag_len = src->bv_len - offset; - dst->bv_page = src->bv_page; - dst->bv_offset = src->bv_offset + offset; - - if (len <= frag_len) { - dst->bv_len = len; - LASSERT(dst->bv_offset + dst->bv_len - <= PAGE_SIZE); - return niov; - } - - dst->bv_len = frag_len; - LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE); - - len -= frag_len; - dst++; - src++; - niov++; - src_niov--; - offset = 0; - } -} -EXPORT_SYMBOL(lnet_extract_kiov); - -void -lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, - int delayed, unsigned int offset, unsigned int mlen, - unsigned int rlen) -{ - unsigned int niov = 0; - struct kvec *iov = NULL; - struct bio_vec *kiov = NULL; - struct iov_iter to; - int rc; - - LASSERT(!in_interrupt()); - LASSERT(!mlen || msg); - - if (msg) { - LASSERT(msg->msg_receiving); - LASSERT(!msg->msg_sending); - LASSERT(rlen == msg->msg_len); - LASSERT(mlen <= msg->msg_len); - LASSERT(msg->msg_offset == offset); - LASSERT(msg->msg_wanted == mlen); - - msg->msg_receiving = 0; - - if (mlen) { - niov = msg->msg_niov; - iov = msg->msg_iov; - kiov = msg->msg_kiov; - - LASSERT(niov > 0); - LASSERT(!iov != !kiov); - } - } - - if (iov) { - iov_iter_kvec(&to, ITER_KVEC | READ, iov, niov, mlen + offset); - iov_iter_advance(&to, offset); - } else { - iov_iter_bvec(&to, ITER_BVEC | READ, kiov, niov, mlen + offset); - iov_iter_advance(&to, offset); - } - rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed, &to, rlen); - if (rc < 0) - lnet_finalize(ni, msg, rc); -} - -static void -lnet_setpayloadbuffer(struct lnet_msg *msg) -{ - struct lnet_libmd *md = msg->msg_md; - - LASSERT(msg->msg_len > 0); - LASSERT(!msg->msg_routing); - LASSERT(md); - LASSERT(!msg->msg_niov); - LASSERT(!msg->msg_iov); - LASSERT(!msg->msg_kiov); - - msg->msg_niov = md->md_niov; - if (md->md_options & LNET_MD_KIOV) - msg->msg_kiov = md->md_iov.kiov; - else - msg->msg_iov = md->md_iov.iov; -} - -void -lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target, - unsigned int offset, unsigned int len) -{ - msg->msg_type = type; - msg->msg_target = target; - msg->msg_len = len; - msg->msg_offset = offset; - - if (len) - lnet_setpayloadbuffer(msg); - - memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr)); - msg->msg_hdr.type = cpu_to_le32(type); - msg->msg_hdr.dest_nid = cpu_to_le64(target.nid); - msg->msg_hdr.dest_pid = cpu_to_le32(target.pid); - /* src_nid will be set later */ - msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid); - msg->msg_hdr.payload_length = cpu_to_le32(len); -} - -static void -lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg) -{ - void *priv = msg->msg_private; - int rc; - - LASSERT(!in_interrupt()); - LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND || - (msg->msg_txcredit && msg->msg_peertxcredit)); - - rc = ni->ni_lnd->lnd_send(ni, priv, msg); - if (rc < 0) - lnet_finalize(ni, msg, rc); -} - -static int -lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg) -{ - int rc; - - LASSERT(!msg->msg_sending); - LASSERT(msg->msg_receiving); - LASSERT(!msg->msg_rx_ready_delay); - LASSERT(ni->ni_lnd->lnd_eager_recv); - - msg->msg_rx_ready_delay = 1; - rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg, - &msg->msg_private); - if (rc) { - CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n", - libcfs_nid2str(msg->msg_rxpeer->lp_nid), - libcfs_id2str(msg->msg_target), rc); - LASSERT(rc < 0); /* required by my callers */ - } - - return rc; -} - -/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */ -static void -lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer *lp) -{ - unsigned long last_alive = 0; - - LASSERT(lnet_peer_aliveness_enabled(lp)); - LASSERT(ni->ni_lnd->lnd_query); - - lnet_net_unlock(lp->lp_cpt); - ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive); - lnet_net_lock(lp->lp_cpt); - - lp->lp_last_query = jiffies; - - if (last_alive) /* NI has updated timestamp */ - lp->lp_last_alive = last_alive; -} - -/* NB: always called with lnet_net_lock held */ -static inline int -lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now) -{ - int alive; - unsigned long deadline; - - LASSERT(lnet_peer_aliveness_enabled(lp)); - - /* Trust lnet_notify() if it has more recent aliveness news, but - * ignore the initial assumed death (see lnet_peers_start_down()). - */ - if (!lp->lp_alive && lp->lp_alive_count > 0 && - time_after_eq(lp->lp_timestamp, lp->lp_last_alive)) - return 0; - - deadline = lp->lp_last_alive + lp->lp_ni->ni_peertimeout * HZ; - alive = time_after(deadline, now); - - /* Update obsolete lp_alive except for routers assumed to be dead - * initially, because router checker would update aliveness in this - * case, and moreover lp_last_alive at peer creation is assumed. - */ - if (alive && !lp->lp_alive && - !(lnet_isrouter(lp) && !lp->lp_alive_count)) - lnet_notify_locked(lp, 0, 1, lp->lp_last_alive); - - return alive; -} - -/* - * NB: returns 1 when alive, 0 when dead, negative when error; - * may drop the lnet_net_lock - */ -static int -lnet_peer_alive_locked(struct lnet_peer *lp) -{ - unsigned long now = jiffies; - - if (!lnet_peer_aliveness_enabled(lp)) - return -ENODEV; - - if (lnet_peer_is_alive(lp, now)) - return 1; - - /* - * Peer appears dead, but we should avoid frequent NI queries (at - * most once per lnet_queryinterval seconds). - */ - if (lp->lp_last_query) { - static const int lnet_queryinterval = 1; - - unsigned long next_query = - lp->lp_last_query + lnet_queryinterval * HZ; - - if (time_before(now, next_query)) { - if (lp->lp_alive) - CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n", - libcfs_nid2str(lp->lp_nid), - (int)now, (int)next_query, - lnet_queryinterval, - lp->lp_ni->ni_peertimeout); - return 0; - } - } - - /* query NI for latest aliveness news */ - lnet_ni_query_locked(lp->lp_ni, lp); - - if (lnet_peer_is_alive(lp, now)) - return 1; - - lnet_notify_locked(lp, 0, 0, lp->lp_last_alive); - return 0; -} - -/** - * \param msg The message to be sent. - * \param do_send True if lnet_ni_send() should be called in this function. - * lnet_send() is going to lnet_net_unlock immediately after this, so - * it sets do_send FALSE and I don't do the unlock/send/lock bit. - * - * \retval LNET_CREDIT_OK If \a msg sent or OK to send. - * \retval LNET_CREDIT_WAIT If \a msg blocked for credit. - * \retval -EHOSTUNREACH If the next hop of the message appears dead. - * \retval -ECANCELED If the MD of the message has been unlinked. - */ -static int -lnet_post_send_locked(struct lnet_msg *msg, int do_send) -{ - struct lnet_peer *lp = msg->msg_txpeer; - struct lnet_ni *ni = lp->lp_ni; - int cpt = msg->msg_tx_cpt; - struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt]; - - /* non-lnet_send() callers have checked before */ - LASSERT(!do_send || msg->msg_tx_delayed); - LASSERT(!msg->msg_receiving); - LASSERT(msg->msg_tx_committed); - - /* NB 'lp' is always the next hop */ - if (!(msg->msg_target.pid & LNET_PID_USERFLAG) && - !lnet_peer_alive_locked(lp)) { - the_lnet.ln_counters[cpt]->drop_count++; - the_lnet.ln_counters[cpt]->drop_length += msg->msg_len; - lnet_net_unlock(cpt); - - CNETERR("Dropping message for %s: peer not alive\n", - libcfs_id2str(msg->msg_target)); - if (do_send) - lnet_finalize(ni, msg, -EHOSTUNREACH); - - lnet_net_lock(cpt); - return -EHOSTUNREACH; - } - - if (msg->msg_md && - (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) { - lnet_net_unlock(cpt); - - CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n", - libcfs_id2str(msg->msg_target)); - if (do_send) - lnet_finalize(ni, msg, -ECANCELED); - - lnet_net_lock(cpt); - return -ECANCELED; - } - - if (!msg->msg_peertxcredit) { - LASSERT((lp->lp_txcredits < 0) == - !list_empty(&lp->lp_txq)); - - msg->msg_peertxcredit = 1; - lp->lp_txqnob += msg->msg_len + sizeof(struct lnet_hdr); - lp->lp_txcredits--; - - if (lp->lp_txcredits < lp->lp_mintxcredits) - lp->lp_mintxcredits = lp->lp_txcredits; - - if (lp->lp_txcredits < 0) { - msg->msg_tx_delayed = 1; - list_add_tail(&msg->msg_list, &lp->lp_txq); - return LNET_CREDIT_WAIT; - } - } - - if (!msg->msg_txcredit) { - LASSERT((tq->tq_credits < 0) == - !list_empty(&tq->tq_delayed)); - - msg->msg_txcredit = 1; - tq->tq_credits--; - - if (tq->tq_credits < tq->tq_credits_min) - tq->tq_credits_min = tq->tq_credits; - - if (tq->tq_credits < 0) { - msg->msg_tx_delayed = 1; - list_add_tail(&msg->msg_list, &tq->tq_delayed); - return LNET_CREDIT_WAIT; - } - } - - if (do_send) { - lnet_net_unlock(cpt); - lnet_ni_send(ni, msg); - lnet_net_lock(cpt); - } - return LNET_CREDIT_OK; -} - -static struct lnet_rtrbufpool * -lnet_msg2bufpool(struct lnet_msg *msg) -{ - struct lnet_rtrbufpool *rbp; - int cpt; - - LASSERT(msg->msg_rx_committed); - - cpt = msg->msg_rx_cpt; - rbp = &the_lnet.ln_rtrpools[cpt][0]; - - LASSERT(msg->msg_len <= LNET_MTU); - while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) { - rbp++; - LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); - } - - return rbp; -} - -static int -lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv) -{ - /* - * lnet_parse is going to lnet_net_unlock immediately after this, so it - * sets do_recv FALSE and I don't do the unlock/send/lock bit. - * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if - * received or OK to receive - */ - struct lnet_peer *lp = msg->msg_rxpeer; - struct lnet_rtrbufpool *rbp; - struct lnet_rtrbuf *rb; - - LASSERT(!msg->msg_iov); - LASSERT(!msg->msg_kiov); - LASSERT(!msg->msg_niov); - LASSERT(msg->msg_routing); - LASSERT(msg->msg_receiving); - LASSERT(!msg->msg_sending); - - /* non-lnet_parse callers only receive delayed messages */ - LASSERT(!do_recv || msg->msg_rx_delayed); - - if (!msg->msg_peerrtrcredit) { - LASSERT((lp->lp_rtrcredits < 0) == - !list_empty(&lp->lp_rtrq)); - - msg->msg_peerrtrcredit = 1; - lp->lp_rtrcredits--; - if (lp->lp_rtrcredits < lp->lp_minrtrcredits) - lp->lp_minrtrcredits = lp->lp_rtrcredits; - - if (lp->lp_rtrcredits < 0) { - /* must have checked eager_recv before here */ - LASSERT(msg->msg_rx_ready_delay); - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, &lp->lp_rtrq); - return LNET_CREDIT_WAIT; - } - } - - rbp = lnet_msg2bufpool(msg); - - if (!msg->msg_rtrcredit) { - msg->msg_rtrcredit = 1; - rbp->rbp_credits--; - if (rbp->rbp_credits < rbp->rbp_mincredits) - rbp->rbp_mincredits = rbp->rbp_credits; - - if (rbp->rbp_credits < 0) { - /* must have checked eager_recv before here */ - LASSERT(msg->msg_rx_ready_delay); - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, &rbp->rbp_msgs); - return LNET_CREDIT_WAIT; - } - } - - LASSERT(!list_empty(&rbp->rbp_bufs)); - rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list); - list_del(&rb->rb_list); - - msg->msg_niov = rbp->rbp_npages; - msg->msg_kiov = &rb->rb_kiov[0]; - - if (do_recv) { - int cpt = msg->msg_rx_cpt; - - lnet_net_unlock(cpt); - lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1, - 0, msg->msg_len, msg->msg_len); - lnet_net_lock(cpt); - } - return LNET_CREDIT_OK; -} - -void -lnet_return_tx_credits_locked(struct lnet_msg *msg) -{ - struct lnet_peer *txpeer = msg->msg_txpeer; - struct lnet_msg *msg2; - - if (msg->msg_txcredit) { - struct lnet_ni *ni = txpeer->lp_ni; - struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt]; - - /* give back NI txcredits */ - msg->msg_txcredit = 0; - - LASSERT((tq->tq_credits < 0) == - !list_empty(&tq->tq_delayed)); - - tq->tq_credits++; - if (tq->tq_credits <= 0) { - msg2 = list_entry(tq->tq_delayed.next, - struct lnet_msg, msg_list); - list_del(&msg2->msg_list); - - LASSERT(msg2->msg_txpeer->lp_ni == ni); - LASSERT(msg2->msg_tx_delayed); - - (void)lnet_post_send_locked(msg2, 1); - } - } - - if (msg->msg_peertxcredit) { - /* give back peer txcredits */ - msg->msg_peertxcredit = 0; - - LASSERT((txpeer->lp_txcredits < 0) == - !list_empty(&txpeer->lp_txq)); - - txpeer->lp_txqnob -= msg->msg_len + sizeof(struct lnet_hdr); - LASSERT(txpeer->lp_txqnob >= 0); - - txpeer->lp_txcredits++; - if (txpeer->lp_txcredits <= 0) { - msg2 = list_entry(txpeer->lp_txq.next, - struct lnet_msg, msg_list); - list_del(&msg2->msg_list); - - LASSERT(msg2->msg_txpeer == txpeer); - LASSERT(msg2->msg_tx_delayed); - - (void)lnet_post_send_locked(msg2, 1); - } - } - - if (txpeer) { - msg->msg_txpeer = NULL; - lnet_peer_decref_locked(txpeer); - } -} - -void -lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp) -{ - struct lnet_msg *msg; - - if (list_empty(&rbp->rbp_msgs)) - return; - msg = list_entry(rbp->rbp_msgs.next, - struct lnet_msg, msg_list); - list_del(&msg->msg_list); - - (void)lnet_post_routed_recv_locked(msg, 1); -} - -void -lnet_drop_routed_msgs_locked(struct list_head *list, int cpt) -{ - struct list_head drop; - struct lnet_msg *msg; - struct lnet_msg *tmp; - - INIT_LIST_HEAD(&drop); - - list_splice_init(list, &drop); - - lnet_net_unlock(cpt); - - list_for_each_entry_safe(msg, tmp, &drop, msg_list) { - lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL, - 0, 0, 0, msg->msg_hdr.payload_length); - list_del_init(&msg->msg_list); - lnet_finalize(NULL, msg, -ECANCELED); - } - - lnet_net_lock(cpt); -} - -void -lnet_return_rx_credits_locked(struct lnet_msg *msg) -{ - struct lnet_peer *rxpeer = msg->msg_rxpeer; - struct lnet_msg *msg2; - - if (msg->msg_rtrcredit) { - /* give back global router credits */ - struct lnet_rtrbuf *rb; - struct lnet_rtrbufpool *rbp; - - /* - * NB If a msg ever blocks for a buffer in rbp_msgs, it stays - * there until it gets one allocated, or aborts the wait - * itself - */ - LASSERT(msg->msg_kiov); - - rb = container_of(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]); - rbp = rb->rb_pool; - - msg->msg_kiov = NULL; - msg->msg_rtrcredit = 0; - - LASSERT(rbp == lnet_msg2bufpool(msg)); - - LASSERT((rbp->rbp_credits > 0) == - !list_empty(&rbp->rbp_bufs)); - - /* - * If routing is now turned off, we just drop this buffer and - * don't bother trying to return credits. - */ - if (!the_lnet.ln_routing) { - lnet_destroy_rtrbuf(rb, rbp->rbp_npages); - goto routing_off; - } - - /* - * It is possible that a user has lowered the desired number of - * buffers in this pool. Make sure we never put back - * more buffers than the stated number. - */ - if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) { - /* Discard this buffer so we don't have too many. */ - lnet_destroy_rtrbuf(rb, rbp->rbp_npages); - rbp->rbp_nbuffers--; - } else { - list_add(&rb->rb_list, &rbp->rbp_bufs); - rbp->rbp_credits++; - if (rbp->rbp_credits <= 0) - lnet_schedule_blocked_locked(rbp); - } - } - -routing_off: - if (msg->msg_peerrtrcredit) { - /* give back peer router credits */ - msg->msg_peerrtrcredit = 0; - - LASSERT((rxpeer->lp_rtrcredits < 0) == - !list_empty(&rxpeer->lp_rtrq)); - - rxpeer->lp_rtrcredits++; - /* - * drop all messages which are queued to be routed on that - * peer. - */ - if (!the_lnet.ln_routing) { - lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq, - msg->msg_rx_cpt); - } else if (rxpeer->lp_rtrcredits <= 0) { - msg2 = list_entry(rxpeer->lp_rtrq.next, - struct lnet_msg, msg_list); - list_del(&msg2->msg_list); - - (void)lnet_post_routed_recv_locked(msg2, 1); - } - } - if (rxpeer) { - msg->msg_rxpeer = NULL; - lnet_peer_decref_locked(rxpeer); - } -} - -static int -lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2) -{ - struct lnet_peer *p1 = r1->lr_gateway; - struct lnet_peer *p2 = r2->lr_gateway; - int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops; - int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops; - - if (r1->lr_priority < r2->lr_priority) - return 1; - - if (r1->lr_priority > r2->lr_priority) - return -ERANGE; - - if (r1_hops < r2_hops) - return 1; - - if (r1_hops > r2_hops) - return -ERANGE; - - if (p1->lp_txqnob < p2->lp_txqnob) - return 1; - - if (p1->lp_txqnob > p2->lp_txqnob) - return -ERANGE; - - if (p1->lp_txcredits > p2->lp_txcredits) - return 1; - - if (p1->lp_txcredits < p2->lp_txcredits) - return -ERANGE; - - if (r1->lr_seq - r2->lr_seq <= 0) - return 1; - - return -ERANGE; -} - -static struct lnet_peer * -lnet_find_route_locked(struct lnet_ni *ni, lnet_nid_t target, - lnet_nid_t rtr_nid) -{ - struct lnet_remotenet *rnet; - struct lnet_route *route; - struct lnet_route *best_route; - struct lnet_route *last_route; - struct lnet_peer *lp_best; - struct lnet_peer *lp; - int rc; - - /* - * If @rtr_nid is not LNET_NID_ANY, return the gateway with - * rtr_nid nid, otherwise find the best gateway I can use - */ - rnet = lnet_find_net_locked(LNET_NIDNET(target)); - if (!rnet) - return NULL; - - lp_best = NULL; - best_route = NULL; - last_route = NULL; - list_for_each_entry(route, &rnet->lrn_routes, lr_list) { - lp = route->lr_gateway; - - if (!lnet_is_route_alive(route)) - continue; - - if (ni && lp->lp_ni != ni) - continue; - - if (lp->lp_nid == rtr_nid) /* it's pre-determined router */ - return lp; - - if (!lp_best) { - best_route = route; - last_route = route; - lp_best = lp; - continue; - } - - /* no protection on below fields, but it's harmless */ - if (last_route->lr_seq - route->lr_seq < 0) - last_route = route; - - rc = lnet_compare_routes(route, best_route); - if (rc < 0) - continue; - - best_route = route; - lp_best = lp; - } - - /* - * set sequence number on the best router to the latest sequence + 1 - * so we can round-robin all routers, it's race and inaccurate but - * harmless and functional - */ - if (best_route) - best_route->lr_seq = last_route->lr_seq + 1; - return lp_best; -} - -int -lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid) -{ - lnet_nid_t dst_nid = msg->msg_target.nid; - struct lnet_ni *src_ni; - struct lnet_ni *local_ni; - struct lnet_peer *lp; - int cpt; - int cpt2; - int rc; - - /* - * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, - * but we might want to use pre-determined router for ACK/REPLY - * in the future - */ - /* NB: ni == interface pre-determined (ACK/REPLY) */ - LASSERT(!msg->msg_txpeer); - LASSERT(!msg->msg_sending); - LASSERT(!msg->msg_target_is_router); - LASSERT(!msg->msg_receiving); - - msg->msg_sending = 1; - - LASSERT(!msg->msg_tx_committed); - cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid); - again: - lnet_net_lock(cpt); - - if (the_lnet.ln_shutdown) { - lnet_net_unlock(cpt); - return -ESHUTDOWN; - } - - if (src_nid == LNET_NID_ANY) { - src_ni = NULL; - } else { - src_ni = lnet_nid2ni_locked(src_nid, cpt); - if (!src_ni) { - lnet_net_unlock(cpt); - LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); - return -EINVAL; - } - LASSERT(!msg->msg_routing); - } - - /* Is this for someone on a local network? */ - local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt); - - if (local_ni) { - if (!src_ni) { - src_ni = local_ni; - src_nid = src_ni->ni_nid; - } else if (src_ni == local_ni) { - lnet_ni_decref_locked(local_ni, cpt); - } else { - lnet_ni_decref_locked(local_ni, cpt); - lnet_ni_decref_locked(src_ni, cpt); - lnet_net_unlock(cpt); - LCONSOLE_WARN("No route to %s via from %s\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); - return -EINVAL; - } - - LASSERT(src_nid != LNET_NID_ANY); - lnet_msg_commit(msg, cpt); - - if (!msg->msg_routing) - msg->msg_hdr.src_nid = cpu_to_le64(src_nid); - - if (src_ni == the_lnet.ln_loni) { - /* No send credit hassles with LOLND */ - lnet_net_unlock(cpt); - lnet_ni_send(src_ni, msg); - - lnet_net_lock(cpt); - lnet_ni_decref_locked(src_ni, cpt); - lnet_net_unlock(cpt); - return 0; - } - - rc = lnet_nid2peer_locked(&lp, dst_nid, cpt); - /* lp has ref on src_ni; lose mine */ - lnet_ni_decref_locked(src_ni, cpt); - if (rc) { - lnet_net_unlock(cpt); - LCONSOLE_WARN("Error %d finding peer %s\n", rc, - libcfs_nid2str(dst_nid)); - /* ENOMEM or shutting down */ - return rc; - } - LASSERT(lp->lp_ni == src_ni); - } else { - /* sending to a remote network */ - lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid); - if (!lp) { - if (src_ni) - lnet_ni_decref_locked(src_ni, cpt); - lnet_net_unlock(cpt); - - LCONSOLE_WARN("No route to %s via %s (all routers down)\n", - libcfs_id2str(msg->msg_target), - libcfs_nid2str(src_nid)); - return -EHOSTUNREACH; - } - - /* - * rtr_nid is LNET_NID_ANY or NID of pre-determined router, - * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't - * pre-determined router, this can happen if router table - * was changed when we release the lock - */ - if (rtr_nid != lp->lp_nid) { - cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid); - if (cpt2 != cpt) { - if (src_ni) - lnet_ni_decref_locked(src_ni, cpt); - lnet_net_unlock(cpt); - - rtr_nid = lp->lp_nid; - cpt = cpt2; - goto again; - } - } - - CDEBUG(D_NET, "Best route to %s via %s for %s %d\n", - libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid), - lnet_msgtyp2str(msg->msg_type), msg->msg_len); - - if (!src_ni) { - src_ni = lp->lp_ni; - src_nid = src_ni->ni_nid; - } else { - LASSERT(src_ni == lp->lp_ni); - lnet_ni_decref_locked(src_ni, cpt); - } - - lnet_peer_addref_locked(lp); - - LASSERT(src_nid != LNET_NID_ANY); - lnet_msg_commit(msg, cpt); - - if (!msg->msg_routing) { - /* I'm the source and now I know which NI to send on */ - msg->msg_hdr.src_nid = cpu_to_le64(src_nid); - } - - msg->msg_target_is_router = 1; - msg->msg_target.nid = lp->lp_nid; - msg->msg_target.pid = LNET_PID_LUSTRE; - } - - /* 'lp' is our best choice of peer */ - - LASSERT(!msg->msg_peertxcredit); - LASSERT(!msg->msg_txcredit); - LASSERT(!msg->msg_txpeer); - - msg->msg_txpeer = lp; /* msg takes my ref on lp */ - - rc = lnet_post_send_locked(msg, 0); - lnet_net_unlock(cpt); - - if (rc < 0) - return rc; - - if (rc == LNET_CREDIT_OK) - lnet_ni_send(src_ni, msg); - - return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */ -} - -void -lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob) -{ - lnet_net_lock(cpt); - the_lnet.ln_counters[cpt]->drop_count++; - the_lnet.ln_counters[cpt]->drop_length += nob; - lnet_net_unlock(cpt); - - lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob); -} - -static void -lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg) -{ - struct lnet_hdr *hdr = &msg->msg_hdr; - - if (msg->msg_wanted) - lnet_setpayloadbuffer(msg); - - lnet_build_msg_event(msg, LNET_EVENT_PUT); - - /* - * Must I ACK? If so I'll grab the ack_wmd out of the header and put - * it back into the ACK during lnet_finalize() - */ - msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) && - !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE); - - lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed, - msg->msg_offset, msg->msg_wanted, hdr->payload_length); -} - -static int -lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg) -{ - struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_match_info info; - bool ready_delay; - int rc; - - /* Convert put fields to host byte order */ - le64_to_cpus(&hdr->msg.put.match_bits); - le32_to_cpus(&hdr->msg.put.ptl_index); - le32_to_cpus(&hdr->msg.put.offset); - - info.mi_id.nid = hdr->src_nid; - info.mi_id.pid = hdr->src_pid; - info.mi_opc = LNET_MD_OP_PUT; - info.mi_portal = hdr->msg.put.ptl_index; - info.mi_rlength = hdr->payload_length; - info.mi_roffset = hdr->msg.put.offset; - info.mi_mbits = hdr->msg.put.match_bits; - - msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv; - ready_delay = msg->msg_rx_ready_delay; - - again: - rc = lnet_ptl_match_md(&info, msg); - switch (rc) { - default: - LBUG(); - - case LNET_MATCHMD_OK: - lnet_recv_put(ni, msg); - return 0; - - case LNET_MATCHMD_NONE: - /** - * no eager_recv or has already called it, should - * have been attached on delayed list - */ - if (ready_delay) - return 0; - - rc = lnet_ni_eager_recv(ni, msg); - if (!rc) { - ready_delay = true; - goto again; - } - /* fall through */ - - case LNET_MATCHMD_DROP: - CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n", - libcfs_id2str(info.mi_id), info.mi_portal, - info.mi_mbits, info.mi_roffset, info.mi_rlength, rc); - - return -ENOENT; /* -ve: OK but no match */ - } -} - -static int -lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get) -{ - struct lnet_match_info info; - struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_handle_wire reply_wmd; - int rc; - - /* Convert get fields to host byte order */ - le64_to_cpus(&hdr->msg.get.match_bits); - le32_to_cpus(&hdr->msg.get.ptl_index); - le32_to_cpus(&hdr->msg.get.sink_length); - le32_to_cpus(&hdr->msg.get.src_offset); - - info.mi_id.nid = hdr->src_nid; - info.mi_id.pid = hdr->src_pid; - info.mi_opc = LNET_MD_OP_GET; - info.mi_portal = hdr->msg.get.ptl_index; - info.mi_rlength = hdr->msg.get.sink_length; - info.mi_roffset = hdr->msg.get.src_offset; - info.mi_mbits = hdr->msg.get.match_bits; - - rc = lnet_ptl_match_md(&info, msg); - if (rc == LNET_MATCHMD_DROP) { - CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n", - libcfs_id2str(info.mi_id), info.mi_portal, - info.mi_mbits, info.mi_roffset, info.mi_rlength); - return -ENOENT; /* -ve: OK but no match */ - } - - LASSERT(rc == LNET_MATCHMD_OK); - - lnet_build_msg_event(msg, LNET_EVENT_GET); - - reply_wmd = hdr->msg.get.return_wmd; - - lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id, - msg->msg_offset, msg->msg_wanted); - - msg->msg_hdr.msg.reply.dst_wmd = reply_wmd; - - if (rdma_get) { - /* The LND completes the REPLY from her recv procedure */ - lnet_ni_recv(ni, msg->msg_private, msg, 0, - msg->msg_offset, msg->msg_len, msg->msg_len); - return 0; - } - - lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0); - msg->msg_receiving = 0; - - rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY); - if (rc < 0) { - /* didn't get as far as lnet_ni_send() */ - CERROR("%s: Unable to send REPLY for GET from %s: %d\n", - libcfs_nid2str(ni->ni_nid), - libcfs_id2str(info.mi_id), rc); - - lnet_finalize(ni, msg, rc); - } - - return 0; -} - -static int -lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) -{ - void *private = msg->msg_private; - struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_process_id src = {0}; - struct lnet_libmd *md; - int rlength; - int mlength; - int cpt; - - cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie); - lnet_res_lock(cpt); - - src.nid = hdr->src_nid; - src.pid = hdr->src_pid; - - /* NB handles only looked up by creator (no flips) */ - md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd); - if (!md || !md->md_threshold || md->md_me) { - CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - !md ? "invalid" : "inactive", - hdr->msg.reply.dst_wmd.wh_interface_cookie, - hdr->msg.reply.dst_wmd.wh_object_cookie); - if (md && md->md_me) - CERROR("REPLY MD also attached to portal %d\n", - md->md_me->me_portal); - - lnet_res_unlock(cpt); - return -ENOENT; /* -ve: OK but no match */ - } - - LASSERT(!md->md_offset); - - rlength = hdr->payload_length; - mlength = min_t(uint, rlength, md->md_length); - - if (mlength < rlength && - !(md->md_options & LNET_MD_TRUNCATE)) { - CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - rlength, hdr->msg.reply.dst_wmd.wh_object_cookie, - mlength); - lnet_res_unlock(cpt); - return -ENOENT; /* -ve: OK but no match */ - } - - CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie); - - lnet_msg_attach_md(msg, md, 0, mlength); - - if (mlength) - lnet_setpayloadbuffer(msg); - - lnet_res_unlock(cpt); - - lnet_build_msg_event(msg, LNET_EVENT_REPLY); - - lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength); - return 0; -} - -static int -lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg) -{ - struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_process_id src = {0}; - struct lnet_libmd *md; - int cpt; - - src.nid = hdr->src_nid; - src.pid = hdr->src_pid; - - /* Convert ack fields to host byte order */ - le64_to_cpus(&hdr->msg.ack.match_bits); - le32_to_cpus(&hdr->msg.ack.mlength); - - cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie); - lnet_res_lock(cpt); - - /* NB handles only looked up by creator (no flips) */ - md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd); - if (!md || !md->md_threshold || md->md_me) { - /* Don't moan; this is expected */ - CDEBUG(D_NET, - "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - !md ? "invalid" : "inactive", - hdr->msg.ack.dst_wmd.wh_interface_cookie, - hdr->msg.ack.dst_wmd.wh_object_cookie); - if (md && md->md_me) - CERROR("Source MD also attached to portal %d\n", - md->md_me->me_portal); - - lnet_res_unlock(cpt); - return -ENOENT; /* -ve! */ - } - - CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - hdr->msg.ack.dst_wmd.wh_object_cookie); - - lnet_msg_attach_md(msg, md, 0, 0); - - lnet_res_unlock(cpt); - - lnet_build_msg_event(msg, LNET_EVENT_ACK); - - lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len); - return 0; -} - -/** - * \retval LNET_CREDIT_OK If \a msg is forwarded - * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer - * \retval -ve error code - */ -int -lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg) -{ - int rc = 0; - - if (!the_lnet.ln_routing) - return -ECANCELED; - - if (msg->msg_rxpeer->lp_rtrcredits <= 0 || - lnet_msg2bufpool(msg)->rbp_credits <= 0) { - if (!ni->ni_lnd->lnd_eager_recv) { - msg->msg_rx_ready_delay = 1; - } else { - lnet_net_unlock(msg->msg_rx_cpt); - rc = lnet_ni_eager_recv(ni, msg); - lnet_net_lock(msg->msg_rx_cpt); - } - } - - if (!rc) - rc = lnet_post_routed_recv_locked(msg, 0); - return rc; -} - -int -lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg) -{ - int rc; - - switch (msg->msg_type) { - case LNET_MSG_ACK: - rc = lnet_parse_ack(ni, msg); - break; - case LNET_MSG_PUT: - rc = lnet_parse_put(ni, msg); - break; - case LNET_MSG_GET: - rc = lnet_parse_get(ni, msg, msg->msg_rdma_get); - break; - case LNET_MSG_REPLY: - rc = lnet_parse_reply(ni, msg); - break; - default: /* prevent an unused label if !kernel */ - LASSERT(0); - return -EPROTO; - } - - LASSERT(!rc || rc == -ENOENT); - return rc; -} - -char * -lnet_msgtyp2str(int type) -{ - switch (type) { - case LNET_MSG_ACK: - return "ACK"; - case LNET_MSG_PUT: - return "PUT"; - case LNET_MSG_GET: - return "GET"; - case LNET_MSG_REPLY: - return "REPLY"; - case LNET_MSG_HELLO: - return "HELLO"; - default: - return ""; - } -} - -void -lnet_print_hdr(struct lnet_hdr *hdr) -{ - struct lnet_process_id src = {0}; - struct lnet_process_id dst = {0}; - char *type_str = lnet_msgtyp2str(hdr->type); - - src.nid = hdr->src_nid; - src.pid = hdr->src_pid; - - dst.nid = hdr->dest_nid; - dst.pid = hdr->dest_pid; - - CWARN("P3 Header at %p of type %s\n", hdr, type_str); - CWARN(" From %s\n", libcfs_id2str(src)); - CWARN(" To %s\n", libcfs_id2str(dst)); - - switch (hdr->type) { - default: - break; - - case LNET_MSG_PUT: - CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n", - hdr->msg.put.ptl_index, - hdr->msg.put.ack_wmd.wh_interface_cookie, - hdr->msg.put.ack_wmd.wh_object_cookie, - hdr->msg.put.match_bits); - CWARN(" Length %d, offset %d, hdr data %#llx\n", - hdr->payload_length, hdr->msg.put.offset, - hdr->msg.put.hdr_data); - break; - - case LNET_MSG_GET: - CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n", - hdr->msg.get.ptl_index, - hdr->msg.get.return_wmd.wh_interface_cookie, - hdr->msg.get.return_wmd.wh_object_cookie, - hdr->msg.get.match_bits); - CWARN(" Length %d, src offset %d\n", - hdr->msg.get.sink_length, - hdr->msg.get.src_offset); - break; - - case LNET_MSG_ACK: - CWARN(" dst md %#llx.%#llx, manipulated length %d\n", - hdr->msg.ack.dst_wmd.wh_interface_cookie, - hdr->msg.ack.dst_wmd.wh_object_cookie, - hdr->msg.ack.mlength); - break; - - case LNET_MSG_REPLY: - CWARN(" dst md %#llx.%#llx, length %d\n", - hdr->msg.reply.dst_wmd.wh_interface_cookie, - hdr->msg.reply.dst_wmd.wh_object_cookie, - hdr->payload_length); - } -} - -int -lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, - void *private, int rdma_req) -{ - int rc = 0; - int cpt; - int for_me; - struct lnet_msg *msg; - lnet_pid_t dest_pid; - lnet_nid_t dest_nid; - lnet_nid_t src_nid; - __u32 payload_length; - __u32 type; - - LASSERT(!in_interrupt()); - - type = le32_to_cpu(hdr->type); - src_nid = le64_to_cpu(hdr->src_nid); - dest_nid = le64_to_cpu(hdr->dest_nid); - dest_pid = le32_to_cpu(hdr->dest_pid); - payload_length = le32_to_cpu(hdr->payload_length); - - for_me = (ni->ni_nid == dest_nid); - cpt = lnet_cpt_of_nid(from_nid); - - switch (type) { - case LNET_MSG_ACK: - case LNET_MSG_GET: - if (payload_length > 0) { - CERROR("%s, src %s: bad %s payload %d (0 expected)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - lnet_msgtyp2str(type), payload_length); - return -EPROTO; - } - break; - - case LNET_MSG_PUT: - case LNET_MSG_REPLY: - if (payload_length > - (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) { - CERROR("%s, src %s: bad %s payload %d (%d max expected)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - lnet_msgtyp2str(type), - payload_length, - for_me ? LNET_MAX_PAYLOAD : LNET_MTU); - return -EPROTO; - } - break; - - default: - CERROR("%s, src %s: Bad message type 0x%x\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), type); - return -EPROTO; - } - - if (the_lnet.ln_routing && - ni->ni_last_alive != ktime_get_real_seconds()) { - /* NB: so far here is the only place to set NI status to "up */ - lnet_ni_lock(ni); - ni->ni_last_alive = ktime_get_real_seconds(); - if (ni->ni_status && - ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) - ni->ni_status->ns_status = LNET_NI_STATUS_UP; - lnet_ni_unlock(ni); - } - - /* - * Regard a bad destination NID as a protocol error. Senders should - * know what they're doing; if they don't they're misconfigured, buggy - * or malicious so we chop them off at the knees :) - */ - if (!for_me) { - if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) { - /* should have gone direct */ - CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); - return -EPROTO; - } - - if (lnet_islocalnid(dest_nid)) { - /* - * dest is another local NI; sender should have used - * this node's NID on its own network - */ - CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); - return -EPROTO; - } - - if (rdma_req && type == LNET_MSG_GET) { - CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); - return -EPROTO; - } - - if (!the_lnet.ln_routing) { - CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); - goto drop; - } - } - - /* - * Message looks OK; we're not going to return an error, so we MUST - * call back lnd_recv() come what may... - */ - if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ - fail_peer(src_nid, 0)) { /* shall we now? */ - CERROR("%s, src %s: Dropping %s to simulate failure\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - lnet_msgtyp2str(type)); - goto drop; - } - - if (!list_empty(&the_lnet.ln_drop_rules) && - lnet_drop_rule_match(hdr)) { - CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid), lnet_msgtyp2str(type)); - goto drop; - } - - msg = kzalloc(sizeof(*msg), GFP_NOFS); - if (!msg) { - CERROR("%s, src %s: Dropping %s (out of memory)\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - lnet_msgtyp2str(type)); - goto drop; - } - - /* msg zeroed by kzalloc() - * i.e. flags all clear, pointers NULL etc - */ - msg->msg_type = type; - msg->msg_private = private; - msg->msg_receiving = 1; - msg->msg_rdma_get = rdma_req; - msg->msg_wanted = payload_length; - msg->msg_len = payload_length; - msg->msg_offset = 0; - msg->msg_hdr = *hdr; - /* for building message event */ - msg->msg_from = from_nid; - if (!for_me) { - msg->msg_target.pid = dest_pid; - msg->msg_target.nid = dest_nid; - msg->msg_routing = 1; - - } else { - /* convert common msg->hdr fields to host byteorder */ - msg->msg_hdr.type = type; - msg->msg_hdr.src_nid = src_nid; - le32_to_cpus(&msg->msg_hdr.src_pid); - msg->msg_hdr.dest_nid = dest_nid; - msg->msg_hdr.dest_pid = dest_pid; - msg->msg_hdr.payload_length = payload_length; - } - - lnet_net_lock(cpt); - rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt); - if (rc) { - lnet_net_unlock(cpt); - CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - lnet_msgtyp2str(type), rc); - kfree(msg); - if (rc == -ESHUTDOWN) - /* We are shutting down. Don't do anything more */ - return 0; - goto drop; - } - - if (lnet_isrouter(msg->msg_rxpeer)) { - lnet_peer_set_alive(msg->msg_rxpeer); - if (avoid_asym_router_failure && - LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) { - /* received a remote message from router, update - * remote NI status on this router. - * NB: multi-hop routed message will be ignored. - */ - lnet_router_ni_update_locked(msg->msg_rxpeer, - LNET_NIDNET(src_nid)); - } - } - - lnet_msg_commit(msg, cpt); - - /* message delay simulation */ - if (unlikely(!list_empty(&the_lnet.ln_delay_rules) && - lnet_delay_rule_match_locked(hdr, msg))) { - lnet_net_unlock(cpt); - return 0; - } - - if (!for_me) { - rc = lnet_parse_forward_locked(ni, msg); - lnet_net_unlock(cpt); - - if (rc < 0) - goto free_drop; - - if (rc == LNET_CREDIT_OK) { - lnet_ni_recv(ni, msg->msg_private, msg, 0, - 0, payload_length, payload_length); - } - return 0; - } - - lnet_net_unlock(cpt); - - rc = lnet_parse_local(ni, msg); - if (rc) - goto free_drop; - return 0; - - free_drop: - LASSERT(!msg->msg_md); - lnet_finalize(ni, msg, rc); - - drop: - lnet_drop_message(ni, cpt, private, payload_length); - return 0; -} -EXPORT_SYMBOL(lnet_parse); - -void -lnet_drop_delayed_msg_list(struct list_head *head, char *reason) -{ - while (!list_empty(head)) { - struct lnet_process_id id = {0}; - struct lnet_msg *msg; - - msg = list_entry(head->next, struct lnet_msg, msg_list); - list_del(&msg->msg_list); - - id.nid = msg->msg_hdr.src_nid; - id.pid = msg->msg_hdr.src_pid; - - LASSERT(!msg->msg_md); - LASSERT(msg->msg_rx_delayed); - LASSERT(msg->msg_rxpeer); - LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); - - CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n", - libcfs_id2str(id), - msg->msg_hdr.msg.put.ptl_index, - msg->msg_hdr.msg.put.match_bits, - msg->msg_hdr.msg.put.offset, - msg->msg_hdr.payload_length, reason); - - /* - * NB I can't drop msg's ref on msg_rxpeer until after I've - * called lnet_drop_message(), so I just hang onto msg as well - * until that's done - */ - lnet_drop_message(msg->msg_rxpeer->lp_ni, - msg->msg_rxpeer->lp_cpt, - msg->msg_private, msg->msg_len); - /* - * NB: message will not generate event because w/o attached MD, - * but we still should give error code so lnet_msg_decommit() - * can skip counters operations and other checks. - */ - lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT); - } -} - -void -lnet_recv_delayed_msg_list(struct list_head *head) -{ - while (!list_empty(head)) { - struct lnet_msg *msg; - struct lnet_process_id id; - - msg = list_entry(head->next, struct lnet_msg, msg_list); - list_del(&msg->msg_list); - - /* - * md won't disappear under me, since each msg - * holds a ref on it - */ - id.nid = msg->msg_hdr.src_nid; - id.pid = msg->msg_hdr.src_pid; - - LASSERT(msg->msg_rx_delayed); - LASSERT(msg->msg_md); - LASSERT(msg->msg_rxpeer); - LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); - - CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n", - libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index, - msg->msg_hdr.msg.put.match_bits, - msg->msg_hdr.msg.put.offset, - msg->msg_hdr.payload_length); - - lnet_recv_put(msg->msg_rxpeer->lp_ni, msg); - } -} - -/** - * Initiate an asynchronous PUT operation. - * - * There are several events associated with a PUT: completion of the send on - * the initiator node (LNET_EVENT_SEND), and when the send completes - * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating - * that the operation was accepted by the target. The event LNET_EVENT_PUT is - * used at the target node to indicate the completion of incoming data - * delivery. - * - * The local events will be logged in the EQ associated with the MD pointed to - * by \a mdh handle. Using a MD without an associated EQ results in these - * events being discarded. In this case, the caller must have another - * mechanism (e.g., a higher level protocol) for determining when it is safe - * to modify the memory region associated with the MD. - * - * Note that LNet does not guarantee the order of LNET_EVENT_SEND and - * LNET_EVENT_ACK, though intuitively ACK should happen after SEND. - * - * \param self Indicates the NID of a local interface through which to send - * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself. - * \param mdh A handle for the MD that describes the memory to be sent. The MD - * must be "free floating" (See LNetMDBind()). - * \param ack Controls whether an acknowledgment is requested. - * Acknowledgments are only sent when they are requested by the initiating - * process and the target MD enables them. - * \param target A process identifier for the target process. - * \param portal The index in the \a target's portal table. - * \param match_bits The match bits to use for MD selection at the target - * process. - * \param offset The offset into the target MD (only used when the target - * MD has the LNET_MD_MANAGE_REMOTE option set). - * \param hdr_data 64 bits of user data that can be included in the message - * header. This data is written to an event queue entry at the target if an - * EQ is present on the matching MD. - * - * \retval 0 Success, and only in this case events will be generated - * and logged to EQ (if it exists). - * \retval -EIO Simulated failure. - * \retval -ENOMEM Memory allocation failure. - * \retval -ENOENT Invalid MD object. - * - * \see lnet_event::hdr_data and lnet_event_kind. - */ -int -LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, - struct lnet_process_id target, unsigned int portal, - __u64 match_bits, unsigned int offset, - __u64 hdr_data) -{ - struct lnet_msg *msg; - struct lnet_libmd *md; - int cpt; - int rc; - - LASSERT(the_lnet.ln_refcount > 0); - - if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ - fail_peer(target.nid, 1)) { /* shall we now? */ - CERROR("Dropping PUT to %s: simulated failure\n", - libcfs_id2str(target)); - return -EIO; - } - - msg = kzalloc(sizeof(*msg), GFP_NOFS); - if (!msg) { - CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n", - libcfs_id2str(target)); - return -ENOMEM; - } - msg->msg_vmflush = !!(current->flags & PF_MEMALLOC); - - cpt = lnet_cpt_of_cookie(mdh.cookie); - lnet_res_lock(cpt); - - md = lnet_handle2md(&mdh); - if (!md || !md->md_threshold || md->md_me) { - CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n", - match_bits, portal, libcfs_id2str(target), - !md ? -1 : md->md_threshold); - if (md && md->md_me) - CERROR("Source MD also attached to portal %d\n", - md->md_me->me_portal); - lnet_res_unlock(cpt); - - kfree(msg); - return -ENOENT; - } - - CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_id2str(target)); - - lnet_msg_attach_md(msg, md, 0, 0); - - lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length); - - msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits); - msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal); - msg->msg_hdr.msg.put.offset = cpu_to_le32(offset); - msg->msg_hdr.msg.put.hdr_data = hdr_data; - - /* NB handles only looked up by creator (no flips) */ - if (ack == LNET_ACK_REQ) { - msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie = - the_lnet.ln_interface_cookie; - msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie = - md->md_lh.lh_cookie; - } else { - msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie = - LNET_WIRE_HANDLE_COOKIE_NONE; - msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie = - LNET_WIRE_HANDLE_COOKIE_NONE; - } - - lnet_res_unlock(cpt); - - lnet_build_msg_event(msg, LNET_EVENT_SEND); - - rc = lnet_send(self, msg, LNET_NID_ANY); - if (rc) { - CNETERR("Error sending PUT to %s: %d\n", - libcfs_id2str(target), rc); - lnet_finalize(NULL, msg, rc); - } - - /* completion will be signalled by an event */ - return 0; -} -EXPORT_SYMBOL(LNetPut); - -struct lnet_msg * -lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg) -{ - /* - * The LND can DMA direct to the GET md (i.e. no REPLY msg). This - * returns a msg for the LND to pass to lnet_finalize() when the sink - * data has been received. - * - * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when - * lnet_finalize() is called on it, so the LND must call this first - */ - struct lnet_msg *msg = kzalloc(sizeof(*msg), GFP_NOFS); - struct lnet_libmd *getmd = getmsg->msg_md; - struct lnet_process_id peer_id = getmsg->msg_target; - int cpt; - - LASSERT(!getmsg->msg_target_is_router); - LASSERT(!getmsg->msg_routing); - - if (!msg) { - CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id)); - goto drop; - } - - cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie); - lnet_res_lock(cpt); - - LASSERT(getmd->md_refcount > 0); - - if (!getmd->md_threshold) { - CERROR("%s: Dropping REPLY from %s for inactive MD %p\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), - getmd); - lnet_res_unlock(cpt); - goto drop; - } - - LASSERT(!getmd->md_offset); - - CDEBUG(D_NET, "%s: Reply from %s md %p\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd); - - /* setup information for lnet_build_msg_event */ - msg->msg_from = peer_id.nid; - msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */ - msg->msg_hdr.src_nid = peer_id.nid; - msg->msg_hdr.payload_length = getmd->md_length; - msg->msg_receiving = 1; /* required by lnet_msg_attach_md */ - - lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length); - lnet_res_unlock(cpt); - - cpt = lnet_cpt_of_nid(peer_id.nid); - - lnet_net_lock(cpt); - lnet_msg_commit(msg, cpt); - lnet_net_unlock(cpt); - - lnet_build_msg_event(msg, LNET_EVENT_REPLY); - - return msg; - - drop: - cpt = lnet_cpt_of_nid(peer_id.nid); - - lnet_net_lock(cpt); - the_lnet.ln_counters[cpt]->drop_count++; - the_lnet.ln_counters[cpt]->drop_length += getmd->md_length; - lnet_net_unlock(cpt); - - kfree(msg); - - return NULL; -} -EXPORT_SYMBOL(lnet_create_reply_msg); - -void -lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply, - unsigned int len) -{ - /* - * Set the REPLY length, now the RDMA that elides the REPLY message has - * completed and I know it. - */ - LASSERT(reply); - LASSERT(reply->msg_type == LNET_MSG_GET); - LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY); - - /* - * NB I trusted my peer to RDMA. If she tells me she's written beyond - * the end of my buffer, I might as well be dead. - */ - LASSERT(len <= reply->msg_ev.mlength); - - reply->msg_ev.mlength = len; -} -EXPORT_SYMBOL(lnet_set_reply_msg_len); - -/** - * Initiate an asynchronous GET operation. - * - * On the initiator node, an LNET_EVENT_SEND is logged when the GET request - * is sent, and an LNET_EVENT_REPLY is logged when the data returned from - * the target node in the REPLY has been written to local MD. - * - * On the target node, an LNET_EVENT_GET is logged when the GET request - * arrives and is accepted into a MD. - * - * \param self,target,portal,match_bits,offset See the discussion in LNetPut(). - * \param mdh A handle for the MD that describes the memory into which the - * requested data will be received. The MD must be "free floating" - * (See LNetMDBind()). - * - * \retval 0 Success, and only in this case events will be generated - * and logged to EQ (if it exists) of the MD. - * \retval -EIO Simulated failure. - * \retval -ENOMEM Memory allocation failure. - * \retval -ENOENT Invalid MD object. - */ -int -LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, - struct lnet_process_id target, unsigned int portal, - __u64 match_bits, unsigned int offset) -{ - struct lnet_msg *msg; - struct lnet_libmd *md; - int cpt; - int rc; - - LASSERT(the_lnet.ln_refcount > 0); - - if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ - fail_peer(target.nid, 1)) { /* shall we now? */ - CERROR("Dropping GET to %s: simulated failure\n", - libcfs_id2str(target)); - return -EIO; - } - - msg = kzalloc(sizeof(*msg), GFP_NOFS); - if (!msg) { - CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n", - libcfs_id2str(target)); - return -ENOMEM; - } - - cpt = lnet_cpt_of_cookie(mdh.cookie); - lnet_res_lock(cpt); - - md = lnet_handle2md(&mdh); - if (!md || !md->md_threshold || md->md_me) { - CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n", - match_bits, portal, libcfs_id2str(target), - !md ? -1 : md->md_threshold); - if (md && md->md_me) - CERROR("REPLY MD also attached to portal %d\n", - md->md_me->me_portal); - - lnet_res_unlock(cpt); - - kfree(msg); - return -ENOENT; - } - - CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_id2str(target)); - - lnet_msg_attach_md(msg, md, 0, 0); - - lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0); - - msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits); - msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal); - msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset); - msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length); - - /* NB handles only looked up by creator (no flips) */ - msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie = - the_lnet.ln_interface_cookie; - msg->msg_hdr.msg.get.return_wmd.wh_object_cookie = - md->md_lh.lh_cookie; - - lnet_res_unlock(cpt); - - lnet_build_msg_event(msg, LNET_EVENT_SEND); - - rc = lnet_send(self, msg, LNET_NID_ANY); - if (rc < 0) { - CNETERR("Error sending GET to %s: %d\n", - libcfs_id2str(target), rc); - lnet_finalize(NULL, msg, rc); - } - - /* completion will be signalled by an event */ - return 0; -} -EXPORT_SYMBOL(LNetGet); - -/** - * Calculate distance to node at \a dstnid. - * - * \param dstnid Target NID. - * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid - * is saved here. - * \param orderp If not NULL, order of the route to reach \a dstnid is saved - * here. - * - * \retval 0 If \a dstnid belongs to a local interface, and reserved option - * local_nid_dist_zero is set, which is the default. - * \retval positives Distance to target NID, i.e. number of hops plus one. - * \retval -EHOSTUNREACH If \a dstnid is not reachable. - */ -int -LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) -{ - struct list_head *e; - struct lnet_ni *ni; - struct lnet_remotenet *rnet; - __u32 dstnet = LNET_NIDNET(dstnid); - int hops; - int cpt; - __u32 order = 2; - struct list_head *rn_list; - - /* - * if !local_nid_dist_zero, I don't return a distance of 0 ever - * (when lustre sees a distance of 0, it substitutes 0@lo), so I - * keep order 0 free for 0@lo and order 1 free for a local NID - * match - */ - LASSERT(the_lnet.ln_refcount > 0); - - cpt = lnet_net_lock_current(); - - list_for_each(e, &the_lnet.ln_nis) { - ni = list_entry(e, struct lnet_ni, ni_list); - - if (ni->ni_nid == dstnid) { - if (srcnidp) - *srcnidp = dstnid; - if (orderp) { - if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND) - *orderp = 0; - else - *orderp = 1; - } - lnet_net_unlock(cpt); - - return local_nid_dist_zero ? 0 : 1; - } - - if (LNET_NIDNET(ni->ni_nid) == dstnet) { - /* - * Check if ni was originally created in - * current net namespace. - * If not, assign order above 0xffff0000, - * to make this ni not a priority. - */ - if (!net_eq(ni->ni_net_ns, current->nsproxy->net_ns)) - order += 0xffff0000; - - if (srcnidp) - *srcnidp = ni->ni_nid; - if (orderp) - *orderp = order; - lnet_net_unlock(cpt); - return 1; - } - - order++; - } - - rn_list = lnet_net2rnethash(dstnet); - list_for_each(e, rn_list) { - rnet = list_entry(e, struct lnet_remotenet, lrn_list); - - if (rnet->lrn_net == dstnet) { - struct lnet_route *route; - struct lnet_route *shortest = NULL; - __u32 shortest_hops = LNET_UNDEFINED_HOPS; - __u32 route_hops; - - LASSERT(!list_empty(&rnet->lrn_routes)); - - list_for_each_entry(route, &rnet->lrn_routes, - lr_list) { - route_hops = route->lr_hops; - if (route_hops == LNET_UNDEFINED_HOPS) - route_hops = 1; - if (!shortest || - route_hops < shortest_hops) { - shortest = route; - shortest_hops = route_hops; - } - } - - LASSERT(shortest); - hops = shortest_hops; - if (srcnidp) - *srcnidp = shortest->lr_gateway->lp_ni->ni_nid; - if (orderp) - *orderp = order; - lnet_net_unlock(cpt); - return hops + 1; - } - order++; - } - - lnet_net_unlock(cpt); - return -EHOSTUNREACH; -} -EXPORT_SYMBOL(LNetDist); diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c deleted file mode 100644 index 0091273c04b9..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ /dev/null @@ -1,625 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/lib-msg.c - * - * Message decoding, parsing and finalizing routines - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -void -lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev) -{ - memset(ev, 0, sizeof(*ev)); - - ev->status = 0; - ev->unlinked = 1; - ev->type = LNET_EVENT_UNLINK; - lnet_md_deconstruct(md, &ev->md); - lnet_md2handle(&ev->md_handle, md); -} - -/* - * Don't need any lock, must be called after lnet_commit_md - */ -void -lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type) -{ - struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_event *ev = &msg->msg_ev; - - LASSERT(!msg->msg_routing); - - ev->type = ev_type; - - if (ev_type == LNET_EVENT_SEND) { - /* event for active message */ - ev->target.nid = le64_to_cpu(hdr->dest_nid); - ev->target.pid = le32_to_cpu(hdr->dest_pid); - ev->initiator.nid = LNET_NID_ANY; - ev->initiator.pid = the_lnet.ln_pid; - ev->sender = LNET_NID_ANY; - } else { - /* event for passive message */ - ev->target.pid = hdr->dest_pid; - ev->target.nid = hdr->dest_nid; - ev->initiator.pid = hdr->src_pid; - ev->initiator.nid = hdr->src_nid; - ev->rlength = hdr->payload_length; - ev->sender = msg->msg_from; - ev->mlength = msg->msg_wanted; - ev->offset = msg->msg_offset; - } - - switch (ev_type) { - default: - LBUG(); - - case LNET_EVENT_PUT: /* passive PUT */ - ev->pt_index = hdr->msg.put.ptl_index; - ev->match_bits = hdr->msg.put.match_bits; - ev->hdr_data = hdr->msg.put.hdr_data; - return; - - case LNET_EVENT_GET: /* passive GET */ - ev->pt_index = hdr->msg.get.ptl_index; - ev->match_bits = hdr->msg.get.match_bits; - ev->hdr_data = 0; - return; - - case LNET_EVENT_ACK: /* ACK */ - ev->match_bits = hdr->msg.ack.match_bits; - ev->mlength = hdr->msg.ack.mlength; - return; - - case LNET_EVENT_REPLY: /* REPLY */ - return; - - case LNET_EVENT_SEND: /* active message */ - if (msg->msg_type == LNET_MSG_PUT) { - ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index); - ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits); - ev->offset = le32_to_cpu(hdr->msg.put.offset); - ev->mlength = - ev->rlength = le32_to_cpu(hdr->payload_length); - ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data); - - } else { - LASSERT(msg->msg_type == LNET_MSG_GET); - ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index); - ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits); - ev->mlength = - ev->rlength = le32_to_cpu(hdr->msg.get.sink_length); - ev->offset = le32_to_cpu(hdr->msg.get.src_offset); - ev->hdr_data = 0; - } - return; - } -} - -void -lnet_msg_commit(struct lnet_msg *msg, int cpt) -{ - struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt]; - struct lnet_counters *counters = the_lnet.ln_counters[cpt]; - - /* routed message can be committed for both receiving and sending */ - LASSERT(!msg->msg_tx_committed); - - if (msg->msg_sending) { - LASSERT(!msg->msg_receiving); - - msg->msg_tx_cpt = cpt; - msg->msg_tx_committed = 1; - if (msg->msg_rx_committed) { /* routed message REPLY */ - LASSERT(msg->msg_onactivelist); - return; - } - } else { - LASSERT(!msg->msg_sending); - msg->msg_rx_cpt = cpt; - msg->msg_rx_committed = 1; - } - - LASSERT(!msg->msg_onactivelist); - msg->msg_onactivelist = 1; - list_add(&msg->msg_activelist, &container->msc_active); - - counters->msgs_alloc++; - if (counters->msgs_alloc > counters->msgs_max) - counters->msgs_max = counters->msgs_alloc; -} - -static void -lnet_msg_decommit_tx(struct lnet_msg *msg, int status) -{ - struct lnet_counters *counters; - struct lnet_event *ev = &msg->msg_ev; - - LASSERT(msg->msg_tx_committed); - if (status) - goto out; - - counters = the_lnet.ln_counters[msg->msg_tx_cpt]; - switch (ev->type) { - default: /* routed message */ - LASSERT(msg->msg_routing); - LASSERT(msg->msg_rx_committed); - LASSERT(!ev->type); - - counters->route_length += msg->msg_len; - counters->route_count++; - goto out; - - case LNET_EVENT_PUT: - /* should have been decommitted */ - LASSERT(!msg->msg_rx_committed); - /* overwritten while sending ACK */ - LASSERT(msg->msg_type == LNET_MSG_ACK); - msg->msg_type = LNET_MSG_PUT; /* fix type */ - break; - - case LNET_EVENT_SEND: - LASSERT(!msg->msg_rx_committed); - if (msg->msg_type == LNET_MSG_PUT) - counters->send_length += msg->msg_len; - break; - - case LNET_EVENT_GET: - LASSERT(msg->msg_rx_committed); - /* - * overwritten while sending reply, we should never be - * here for optimized GET - */ - LASSERT(msg->msg_type == LNET_MSG_REPLY); - msg->msg_type = LNET_MSG_GET; /* fix type */ - break; - } - - counters->send_count++; - out: - lnet_return_tx_credits_locked(msg); - msg->msg_tx_committed = 0; -} - -static void -lnet_msg_decommit_rx(struct lnet_msg *msg, int status) -{ - struct lnet_counters *counters; - struct lnet_event *ev = &msg->msg_ev; - - LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */ - LASSERT(msg->msg_rx_committed); - - if (status) - goto out; - - counters = the_lnet.ln_counters[msg->msg_rx_cpt]; - switch (ev->type) { - default: - LASSERT(!ev->type); - LASSERT(msg->msg_routing); - goto out; - - case LNET_EVENT_ACK: - LASSERT(msg->msg_type == LNET_MSG_ACK); - break; - - case LNET_EVENT_GET: - /* - * type is "REPLY" if it's an optimized GET on passive side, - * because optimized GET will never be committed for sending, - * so message type wouldn't be changed back to "GET" by - * lnet_msg_decommit_tx(), see details in lnet_parse_get() - */ - LASSERT(msg->msg_type == LNET_MSG_REPLY || - msg->msg_type == LNET_MSG_GET); - counters->send_length += msg->msg_wanted; - break; - - case LNET_EVENT_PUT: - LASSERT(msg->msg_type == LNET_MSG_PUT); - break; - - case LNET_EVENT_REPLY: - /* - * type is "GET" if it's an optimized GET on active side, - * see details in lnet_create_reply_msg() - */ - LASSERT(msg->msg_type == LNET_MSG_GET || - msg->msg_type == LNET_MSG_REPLY); - break; - } - - counters->recv_count++; - if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY) - counters->recv_length += msg->msg_wanted; - - out: - lnet_return_rx_credits_locked(msg); - msg->msg_rx_committed = 0; -} - -void -lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status) -{ - int cpt2 = cpt; - - LASSERT(msg->msg_tx_committed || msg->msg_rx_committed); - LASSERT(msg->msg_onactivelist); - - if (msg->msg_tx_committed) { /* always decommit for sending first */ - LASSERT(cpt == msg->msg_tx_cpt); - lnet_msg_decommit_tx(msg, status); - } - - if (msg->msg_rx_committed) { - /* forwarding msg committed for both receiving and sending */ - if (cpt != msg->msg_rx_cpt) { - lnet_net_unlock(cpt); - cpt2 = msg->msg_rx_cpt; - lnet_net_lock(cpt2); - } - lnet_msg_decommit_rx(msg, status); - } - - list_del(&msg->msg_activelist); - msg->msg_onactivelist = 0; - - the_lnet.ln_counters[cpt2]->msgs_alloc--; - - if (cpt2 != cpt) { - lnet_net_unlock(cpt2); - lnet_net_lock(cpt); - } -} - -void -lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md, - unsigned int offset, unsigned int mlen) -{ - /* NB: @offset and @len are only useful for receiving */ - /* - * Here, we attach the MD on lnet_msg and mark it busy and - * decrementing its threshold. Come what may, the lnet_msg "owns" - * the MD until a call to lnet_msg_detach_md or lnet_finalize() - * signals completion. - */ - LASSERT(!msg->msg_routing); - - msg->msg_md = md; - if (msg->msg_receiving) { /* committed for receiving */ - msg->msg_offset = offset; - msg->msg_wanted = mlen; - } - - md->md_refcount++; - if (md->md_threshold != LNET_MD_THRESH_INF) { - LASSERT(md->md_threshold > 0); - md->md_threshold--; - } - - /* build umd in event */ - lnet_md2handle(&msg->msg_ev.md_handle, md); - lnet_md_deconstruct(md, &msg->msg_ev.md); -} - -void -lnet_msg_detach_md(struct lnet_msg *msg, int status) -{ - struct lnet_libmd *md = msg->msg_md; - int unlink; - - /* Now it's safe to drop my caller's ref */ - md->md_refcount--; - LASSERT(md->md_refcount >= 0); - - unlink = lnet_md_unlinkable(md); - if (md->md_eq) { - msg->msg_ev.status = status; - msg->msg_ev.unlinked = unlink; - lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev); - } - - if (unlink) - lnet_md_unlink(md); - - msg->msg_md = NULL; -} - -static int -lnet_complete_msg_locked(struct lnet_msg *msg, int cpt) -{ - struct lnet_handle_wire ack_wmd; - int rc; - int status = msg->msg_ev.status; - - LASSERT(msg->msg_onactivelist); - - if (!status && msg->msg_ack) { - /* Only send an ACK if the PUT completed successfully */ - - lnet_msg_decommit(msg, cpt, 0); - - msg->msg_ack = 0; - lnet_net_unlock(cpt); - - LASSERT(msg->msg_ev.type == LNET_EVENT_PUT); - LASSERT(!msg->msg_routing); - - ack_wmd = msg->msg_hdr.msg.put.ack_wmd; - - lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0); - - msg->msg_hdr.msg.ack.dst_wmd = ack_wmd; - msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits; - msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength); - - /* - * NB: we probably want to use NID of msg::msg_from as 3rd - * parameter (router NID) if it's routed message - */ - rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY); - - lnet_net_lock(cpt); - /* - * NB: message is committed for sending, we should return - * on success because LND will finalize this message later. - * - * Also, there is possibility that message is committed for - * sending and also failed before delivering to LND, - * i.e: ENOMEM, in that case we can't fall through either - * because CPT for sending can be different with CPT for - * receiving, so we should return back to lnet_finalize() - * to make sure we are locking the correct partition. - */ - return rc; - - } else if (!status && /* OK so far */ - (msg->msg_routing && !msg->msg_sending)) { - /* not forwarded */ - LASSERT(!msg->msg_receiving); /* called back recv already */ - lnet_net_unlock(cpt); - - rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY); - - lnet_net_lock(cpt); - /* - * NB: message is committed for sending, we should return - * on success because LND will finalize this message later. - * - * Also, there is possibility that message is committed for - * sending and also failed before delivering to LND, - * i.e: ENOMEM, in that case we can't fall through either: - * - The rule is message must decommit for sending first if - * the it's committed for both sending and receiving - * - CPT for sending can be different with CPT for receiving, - * so we should return back to lnet_finalize() to make - * sure we are locking the correct partition. - */ - return rc; - } - - lnet_msg_decommit(msg, cpt, status); - kfree(msg); - return 0; -} - -void -lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int status) -{ - struct lnet_msg_container *container; - int my_slot; - int cpt; - int rc; - int i; - - LASSERT(!in_interrupt()); - - if (!msg) - return; - - msg->msg_ev.status = status; - - if (msg->msg_md) { - cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie); - - lnet_res_lock(cpt); - lnet_msg_detach_md(msg, status); - lnet_res_unlock(cpt); - } - - again: - rc = 0; - if (!msg->msg_tx_committed && !msg->msg_rx_committed) { - /* not committed to network yet */ - LASSERT(!msg->msg_onactivelist); - kfree(msg); - return; - } - - /* - * NB: routed message can be committed for both receiving and sending, - * we should finalize in LIFO order and keep counters correct. - * (finalize sending first then finalize receiving) - */ - cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt; - lnet_net_lock(cpt); - - container = the_lnet.ln_msg_containers[cpt]; - list_add_tail(&msg->msg_list, &container->msc_finalizing); - - /* - * Recursion breaker. Don't complete the message here if I am (or - * enough other threads are) already completing messages - */ - my_slot = -1; - for (i = 0; i < container->msc_nfinalizers; i++) { - if (container->msc_finalizers[i] == current) - break; - - if (my_slot < 0 && !container->msc_finalizers[i]) - my_slot = i; - } - - if (i < container->msc_nfinalizers || my_slot < 0) { - lnet_net_unlock(cpt); - return; - } - - container->msc_finalizers[my_slot] = current; - - while (!list_empty(&container->msc_finalizing)) { - msg = list_entry(container->msc_finalizing.next, - struct lnet_msg, msg_list); - - list_del(&msg->msg_list); - - /* - * NB drops and regains the lnet lock if it actually does - * anything, so my finalizing friends can chomp along too - */ - rc = lnet_complete_msg_locked(msg, cpt); - if (rc) - break; - } - - if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) { - lnet_net_unlock(cpt); - lnet_delay_rule_check(); - lnet_net_lock(cpt); - } - - container->msc_finalizers[my_slot] = NULL; - lnet_net_unlock(cpt); - - if (rc) - goto again; -} -EXPORT_SYMBOL(lnet_finalize); - -void -lnet_msg_container_cleanup(struct lnet_msg_container *container) -{ - int count = 0; - - if (!container->msc_init) - return; - - while (!list_empty(&container->msc_active)) { - struct lnet_msg *msg; - - msg = list_entry(container->msc_active.next, - struct lnet_msg, msg_activelist); - LASSERT(msg->msg_onactivelist); - msg->msg_onactivelist = 0; - list_del(&msg->msg_activelist); - kfree(msg); - count++; - } - - if (count > 0) - CERROR("%d active msg on exit\n", count); - - kvfree(container->msc_finalizers); - container->msc_finalizers = NULL; - container->msc_init = 0; -} - -int -lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) -{ - container->msc_init = 1; - - INIT_LIST_HEAD(&container->msc_active); - INIT_LIST_HEAD(&container->msc_finalizing); - - /* number of CPUs */ - container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); - - container->msc_finalizers = kvzalloc_cpt(container->msc_nfinalizers * - sizeof(*container->msc_finalizers), - GFP_KERNEL, cpt); - - if (!container->msc_finalizers) { - CERROR("Failed to allocate message finalizers\n"); - lnet_msg_container_cleanup(container); - return -ENOMEM; - } - - return 0; -} - -void -lnet_msg_containers_destroy(void) -{ - struct lnet_msg_container *container; - int i; - - if (!the_lnet.ln_msg_containers) - return; - - cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) - lnet_msg_container_cleanup(container); - - cfs_percpt_free(the_lnet.ln_msg_containers); - the_lnet.ln_msg_containers = NULL; -} - -int -lnet_msg_containers_create(void) -{ - struct lnet_msg_container *container; - int rc; - int i; - - the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*container)); - - if (!the_lnet.ln_msg_containers) { - CERROR("Failed to allocate cpu-partition data for network\n"); - return -ENOMEM; - } - - cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) { - rc = lnet_msg_container_setup(container, i); - if (rc) { - lnet_msg_containers_destroy(); - return rc; - } - } - - return 0; -} diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c deleted file mode 100644 index fc47379c5938..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c +++ /dev/null @@ -1,987 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/lib-ptl.c - * - * portal & match routines - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -/* NB: add /proc interfaces in upcoming patches */ -int portal_rotor = LNET_PTL_ROTOR_HASH_RT; -module_param(portal_rotor, int, 0644); -MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions"); - -static int -lnet_ptl_match_type(unsigned int index, struct lnet_process_id match_id, - __u64 mbits, __u64 ignore_bits) -{ - struct lnet_portal *ptl = the_lnet.ln_portals[index]; - int unique; - - unique = !ignore_bits && - match_id.nid != LNET_NID_ANY && - match_id.pid != LNET_PID_ANY; - - LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl)); - - /* prefer to check w/o any lock */ - if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) - goto match; - - /* unset, new portal */ - lnet_ptl_lock(ptl); - /* check again with lock */ - if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) { - lnet_ptl_unlock(ptl); - goto match; - } - - /* still not set */ - if (unique) - lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE); - else - lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD); - - lnet_ptl_unlock(ptl); - - return 1; - - match: - if ((lnet_ptl_is_unique(ptl) && !unique) || - (lnet_ptl_is_wildcard(ptl) && unique)) - return 0; - return 1; -} - -static void -lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt) -{ - struct lnet_match_table *mtable = ptl->ptl_mtables[cpt]; - int i; - - /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */ - LASSERT(lnet_ptl_is_wildcard(ptl)); - - mtable->mt_enabled = 1; - - ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt; - for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) { - LASSERT(ptl->ptl_mt_maps[i] != cpt); - if (ptl->ptl_mt_maps[i] < cpt) - break; - - /* swap to order */ - ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i]; - ptl->ptl_mt_maps[i] = cpt; - } - - ptl->ptl_mt_nmaps++; -} - -static void -lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt) -{ - struct lnet_match_table *mtable = ptl->ptl_mtables[cpt]; - int i; - - /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */ - LASSERT(lnet_ptl_is_wildcard(ptl)); - - if (LNET_CPT_NUMBER == 1) - return; /* never disable the only match-table */ - - mtable->mt_enabled = 0; - - LASSERT(ptl->ptl_mt_nmaps > 0 && - ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER); - - /* remove it from mt_maps */ - ptl->ptl_mt_nmaps--; - for (i = 0; i < ptl->ptl_mt_nmaps; i++) { - if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */ - ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1]; - } -} - -static int -lnet_try_match_md(struct lnet_libmd *md, - struct lnet_match_info *info, struct lnet_msg *msg) -{ - /* - * ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock; - * lnet_match_blocked_msg() relies on this to avoid races - */ - unsigned int offset; - unsigned int mlength; - struct lnet_me *me = md->md_me; - - /* MD exhausted */ - if (lnet_md_exhausted(md)) - return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED; - - /* mismatched MD op */ - if (!(md->md_options & info->mi_opc)) - return LNET_MATCHMD_NONE; - - /* mismatched ME nid/pid? */ - if (me->me_match_id.nid != LNET_NID_ANY && - me->me_match_id.nid != info->mi_id.nid) - return LNET_MATCHMD_NONE; - - if (me->me_match_id.pid != LNET_PID_ANY && - me->me_match_id.pid != info->mi_id.pid) - return LNET_MATCHMD_NONE; - - /* mismatched ME matchbits? */ - if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) - return LNET_MATCHMD_NONE; - - /* Hurrah! This _is_ a match; check it out... */ - - if (!(md->md_options & LNET_MD_MANAGE_REMOTE)) - offset = md->md_offset; - else - offset = info->mi_roffset; - - if (md->md_options & LNET_MD_MAX_SIZE) { - mlength = md->md_max_size; - LASSERT(md->md_offset + mlength <= md->md_length); - } else { - mlength = md->md_length - offset; - } - - if (info->mi_rlength <= mlength) { /* fits in allowed space */ - mlength = info->mi_rlength; - } else if (!(md->md_options & LNET_MD_TRUNCATE)) { - /* this packet _really_ is too big */ - CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n", - libcfs_id2str(info->mi_id), info->mi_mbits, - info->mi_rlength, md->md_length - offset, mlength); - - return LNET_MATCHMD_DROP; - } - - /* Commit to this ME/MD */ - CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n", - (info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get", - info->mi_portal, libcfs_id2str(info->mi_id), mlength, - info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset); - - lnet_msg_attach_md(msg, md, offset, mlength); - md->md_offset = offset + mlength; - - if (!lnet_md_exhausted(md)) - return LNET_MATCHMD_OK; - - /* - * Auto-unlink NOW, so the ME gets unlinked if required. - * We bumped md->md_refcount above so the MD just gets flagged - * for unlink when it is finalized. - */ - if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) - lnet_md_unlink(md); - - return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED; -} - -static struct lnet_match_table * -lnet_match2mt(struct lnet_portal *ptl, struct lnet_process_id id, __u64 mbits) -{ - if (LNET_CPT_NUMBER == 1) - return ptl->ptl_mtables[0]; /* the only one */ - - /* if it's a unique portal, return match-table hashed by NID */ - return lnet_ptl_is_unique(ptl) ? - ptl->ptl_mtables[lnet_cpt_of_nid(id.nid)] : NULL; -} - -struct lnet_match_table * -lnet_mt_of_attach(unsigned int index, struct lnet_process_id id, - __u64 mbits, __u64 ignore_bits, enum lnet_ins_pos pos) -{ - struct lnet_portal *ptl; - struct lnet_match_table *mtable; - - /* NB: called w/o lock */ - LASSERT(index < the_lnet.ln_nportals); - - if (!lnet_ptl_match_type(index, id, mbits, ignore_bits)) - return NULL; - - ptl = the_lnet.ln_portals[index]; - - mtable = lnet_match2mt(ptl, id, mbits); - if (mtable) /* unique portal or only one match-table */ - return mtable; - - /* it's a wildcard portal */ - switch (pos) { - default: - return NULL; - case LNET_INS_BEFORE: - case LNET_INS_AFTER: - /* - * posted by no affinity thread, always hash to specific - * match-table to avoid buffer stealing which is heavy - */ - return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER]; - case LNET_INS_LOCAL: - /* posted by cpu-affinity thread */ - return ptl->ptl_mtables[lnet_cpt_current()]; - } -} - -static struct lnet_match_table * -lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) -{ - struct lnet_match_table *mtable; - struct lnet_portal *ptl; - unsigned int nmaps; - unsigned int rotor; - unsigned int cpt; - bool routed; - - /* NB: called w/o lock */ - LASSERT(info->mi_portal < the_lnet.ln_nportals); - ptl = the_lnet.ln_portals[info->mi_portal]; - - LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)); - - mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits); - if (mtable) - return mtable; - - /* it's a wildcard portal */ - routed = LNET_NIDNET(msg->msg_hdr.src_nid) != - LNET_NIDNET(msg->msg_hdr.dest_nid); - - if (portal_rotor == LNET_PTL_ROTOR_OFF || - (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) { - cpt = lnet_cpt_current(); - if (ptl->ptl_mtables[cpt]->mt_enabled) - return ptl->ptl_mtables[cpt]; - } - - rotor = ptl->ptl_rotor++; /* get round-robin factor */ - if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed) - cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid); - else - cpt = rotor % LNET_CPT_NUMBER; - - if (!ptl->ptl_mtables[cpt]->mt_enabled) { - /* is there any active entry for this portal? */ - nmaps = ptl->ptl_mt_nmaps; - /* map to an active mtable to avoid heavy "stealing" */ - if (nmaps) { - /* - * NB: there is possibility that ptl_mt_maps is being - * changed because we are not under protection of - * lnet_ptl_lock, but it shouldn't hurt anything - */ - cpt = ptl->ptl_mt_maps[rotor % nmaps]; - } - } - - return ptl->ptl_mtables[cpt]; -} - -static int -lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos) -{ - __u64 *bmap; - int i; - - if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])) - return 0; - - if (pos < 0) { /* check all bits */ - for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) { - if (mtable->mt_exhausted[i] != (__u64)(-1)) - return 0; - } - return 1; - } - - LASSERT(pos <= LNET_MT_HASH_IGNORE); - /* mtable::mt_mhash[pos] is marked as exhausted or not */ - bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64]; - pos &= (1 << LNET_MT_BITS_U64) - 1; - - return (*bmap & BIT(pos)); -} - -static void -lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted) -{ - __u64 *bmap; - - LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])); - LASSERT(pos <= LNET_MT_HASH_IGNORE); - - /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */ - bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64]; - pos &= (1 << LNET_MT_BITS_U64) - 1; - - if (!exhausted) - *bmap &= ~(1ULL << pos); - else - *bmap |= 1ULL << pos; -} - -struct list_head * -lnet_mt_match_head(struct lnet_match_table *mtable, - struct lnet_process_id id, __u64 mbits) -{ - struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal]; - unsigned long hash = mbits; - - if (!lnet_ptl_is_wildcard(ptl)) { - hash += id.nid + id.pid; - - LASSERT(lnet_ptl_is_unique(ptl)); - hash = hash_long(hash, LNET_MT_HASH_BITS); - } - return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK]; -} - -int -lnet_mt_match_md(struct lnet_match_table *mtable, - struct lnet_match_info *info, struct lnet_msg *msg) -{ - struct list_head *head; - struct lnet_me *me; - struct lnet_me *tmp; - int exhausted = 0; - int rc; - - /* any ME with ignore bits? */ - if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE])) - head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE]; - else - head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); - again: - /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */ - if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])) - exhausted = LNET_MATCHMD_EXHAUSTED; - - list_for_each_entry_safe(me, tmp, head, me_list) { - /* ME attached but MD not attached yet */ - if (!me->me_md) - continue; - - LASSERT(me == me->me_md->md_me); - - rc = lnet_try_match_md(me->me_md, info, msg); - if (!(rc & LNET_MATCHMD_EXHAUSTED)) - exhausted = 0; /* mlist is not empty */ - - if (rc & LNET_MATCHMD_FINISH) { - /* - * don't return EXHAUSTED bit because we don't know - * whether the mlist is empty or not - */ - return rc & ~LNET_MATCHMD_EXHAUSTED; - } - } - - if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */ - lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1); - if (!lnet_mt_test_exhausted(mtable, -1)) - exhausted = 0; - } - - if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) { - head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); - goto again; /* re-check MEs w/o ignore-bits */ - } - - if (info->mi_opc == LNET_MD_OP_GET || - !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal])) - return exhausted | LNET_MATCHMD_DROP; - - return exhausted | LNET_MATCHMD_NONE; -} - -static int -lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) -{ - int rc; - - /* - * message arrived before any buffer posting on this portal, - * simply delay or drop this message - */ - if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl))) - return 0; - - lnet_ptl_lock(ptl); - /* check it again with hold of lock */ - if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) { - lnet_ptl_unlock(ptl); - return 0; - } - - if (lnet_ptl_is_lazy(ptl)) { - if (msg->msg_rx_ready_delay) { - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); - } - rc = LNET_MATCHMD_NONE; - } else { - rc = LNET_MATCHMD_DROP; - } - - lnet_ptl_unlock(ptl); - return rc; -} - -static int -lnet_ptl_match_delay(struct lnet_portal *ptl, - struct lnet_match_info *info, struct lnet_msg *msg) -{ - int first = ptl->ptl_mt_maps[0]; /* read w/o lock */ - int rc = 0; - int i; - - /** - * Steal buffer from other CPTs, and delay msg if nothing to - * steal. This function is more expensive than a regular - * match, but we don't expect it can happen a lot. The return - * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or - * LNET_MATCHMD_NONE. - */ - LASSERT(lnet_ptl_is_wildcard(ptl)); - - for (i = 0; i < LNET_CPT_NUMBER; i++) { - struct lnet_match_table *mtable; - int cpt; - - cpt = (first + i) % LNET_CPT_NUMBER; - mtable = ptl->ptl_mtables[cpt]; - if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled) - continue; - - lnet_res_lock(cpt); - lnet_ptl_lock(ptl); - - if (!i) { - /* The first try, add to stealing list. */ - list_add_tail(&msg->msg_list, - &ptl->ptl_msg_stealing); - } - - if (!list_empty(&msg->msg_list)) { - /* On stealing list. */ - rc = lnet_mt_match_md(mtable, info, msg); - - if ((rc & LNET_MATCHMD_EXHAUSTED) && - mtable->mt_enabled) - lnet_ptl_disable_mt(ptl, cpt); - - if (rc & LNET_MATCHMD_FINISH) { - /* Match found, remove from stealing list. */ - list_del_init(&msg->msg_list); - } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */ - !ptl->ptl_mt_nmaps || /* (2) */ - (ptl->ptl_mt_nmaps == 1 && /* (3) */ - ptl->ptl_mt_maps[0] == cpt)) { - /** - * No match found, and this is either - * (1) the last cpt to check, or - * (2) there is no active cpt, or - * (3) this is the only active cpt. - * There is nothing to steal: delay or - * drop the message. - */ - list_del_init(&msg->msg_list); - - if (lnet_ptl_is_lazy(ptl)) { - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); - rc = LNET_MATCHMD_NONE; - } else { - rc = LNET_MATCHMD_DROP; - } - } else { - /* Do another iteration. */ - rc = 0; - } - } else { - /** - * No longer on stealing list: another thread - * matched the message in lnet_ptl_attach_md(). - * We are now expected to handle the message. - */ - rc = !msg->msg_md ? - LNET_MATCHMD_DROP : LNET_MATCHMD_OK; - } - - lnet_ptl_unlock(ptl); - lnet_res_unlock(cpt); - - /** - * Note that test (1) above ensures that we always - * exit the loop through this break statement. - * - * LNET_MATCHMD_NONE means msg was added to the - * delayed queue, and we may no longer reference it - * after lnet_ptl_unlock() and lnet_res_unlock(). - */ - if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE)) - break; - } - - return rc; -} - -int -lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) -{ - struct lnet_match_table *mtable; - struct lnet_portal *ptl; - int rc; - - CDEBUG(D_NET, "Request from %s of length %d into portal %d MB=%#llx\n", - libcfs_id2str(info->mi_id), info->mi_rlength, info->mi_portal, - info->mi_mbits); - - if (info->mi_portal >= the_lnet.ln_nportals) { - CERROR("Invalid portal %d not in [0-%d]\n", - info->mi_portal, the_lnet.ln_nportals); - return LNET_MATCHMD_DROP; - } - - ptl = the_lnet.ln_portals[info->mi_portal]; - rc = lnet_ptl_match_early(ptl, msg); - if (rc) /* matched or delayed early message */ - return rc; - - mtable = lnet_mt_of_match(info, msg); - lnet_res_lock(mtable->mt_cpt); - - if (the_lnet.ln_shutdown) { - rc = LNET_MATCHMD_DROP; - goto out1; - } - - rc = lnet_mt_match_md(mtable, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) { - lnet_ptl_lock(ptl); - lnet_ptl_disable_mt(ptl, mtable->mt_cpt); - lnet_ptl_unlock(ptl); - } - - if (rc & LNET_MATCHMD_FINISH) /* matched or dropping */ - goto out1; - - if (!msg->msg_rx_ready_delay) - goto out1; - - LASSERT(lnet_ptl_is_lazy(ptl)); - LASSERT(!msg->msg_rx_delayed); - - /* NB: we don't expect "delay" can happen a lot */ - if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) { - lnet_ptl_lock(ptl); - - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed); - - lnet_ptl_unlock(ptl); - lnet_res_unlock(mtable->mt_cpt); - rc = LNET_MATCHMD_NONE; - } else { - lnet_res_unlock(mtable->mt_cpt); - rc = lnet_ptl_match_delay(ptl, info, msg); - } - - /* LNET_MATCHMD_NONE means msg was added to the delay queue */ - if (rc & LNET_MATCHMD_NONE) { - CDEBUG(D_NET, - "Delaying %s from %s ptl %d MB %#llx off %d len %d\n", - info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET", - libcfs_id2str(info->mi_id), info->mi_portal, - info->mi_mbits, info->mi_roffset, info->mi_rlength); - } - goto out0; - out1: - lnet_res_unlock(mtable->mt_cpt); - out0: - /* EXHAUSTED bit is only meaningful for internal functions */ - return rc & ~LNET_MATCHMD_EXHAUSTED; -} - -void -lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md) -{ - LASSERT(me->me_md == md && md->md_me == me); - - me->me_md = NULL; - md->md_me = NULL; -} - -/* called with lnet_res_lock held */ -void -lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md, - struct list_head *matches, struct list_head *drops) -{ - struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal]; - struct lnet_match_table *mtable; - struct list_head *head; - struct lnet_msg *tmp; - struct lnet_msg *msg; - int exhausted = 0; - int cpt; - - LASSERT(!md->md_refcount); /* a brand new MD */ - - me->me_md = md; - md->md_me = me; - - cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); - mtable = ptl->ptl_mtables[cpt]; - - if (list_empty(&ptl->ptl_msg_stealing) && - list_empty(&ptl->ptl_msg_delayed) && - !lnet_mt_test_exhausted(mtable, me->me_pos)) - return; - - lnet_ptl_lock(ptl); - head = &ptl->ptl_msg_stealing; - again: - list_for_each_entry_safe(msg, tmp, head, msg_list) { - struct lnet_match_info info; - struct lnet_hdr *hdr; - int rc; - - LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing); - - hdr = &msg->msg_hdr; - info.mi_id.nid = hdr->src_nid; - info.mi_id.pid = hdr->src_pid; - info.mi_opc = LNET_MD_OP_PUT; - info.mi_portal = hdr->msg.put.ptl_index; - info.mi_rlength = hdr->payload_length; - info.mi_roffset = hdr->msg.put.offset; - info.mi_mbits = hdr->msg.put.match_bits; - - rc = lnet_try_match_md(md, &info, msg); - - exhausted = (rc & LNET_MATCHMD_EXHAUSTED); - if (rc & LNET_MATCHMD_NONE) { - if (exhausted) - break; - continue; - } - - /* Hurrah! This _is_ a match */ - LASSERT(rc & LNET_MATCHMD_FINISH); - list_del_init(&msg->msg_list); - - if (head == &ptl->ptl_msg_stealing) { - if (exhausted) - break; - /* stealing thread will handle the message */ - continue; - } - - if (rc & LNET_MATCHMD_OK) { - list_add_tail(&msg->msg_list, matches); - - CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n", - libcfs_id2str(info.mi_id), - info.mi_portal, info.mi_mbits, - info.mi_roffset, info.mi_rlength); - } else { - list_add_tail(&msg->msg_list, drops); - } - - if (exhausted) - break; - } - - if (!exhausted && head == &ptl->ptl_msg_stealing) { - head = &ptl->ptl_msg_delayed; - goto again; - } - - if (lnet_ptl_is_wildcard(ptl) && !exhausted) { - lnet_mt_set_exhausted(mtable, me->me_pos, 0); - if (!mtable->mt_enabled) - lnet_ptl_enable_mt(ptl, cpt); - } - - lnet_ptl_unlock(ptl); -} - -static void -lnet_ptl_cleanup(struct lnet_portal *ptl) -{ - struct lnet_match_table *mtable; - int i; - - if (!ptl->ptl_mtables) /* uninitialized portal */ - return; - - LASSERT(list_empty(&ptl->ptl_msg_delayed)); - LASSERT(list_empty(&ptl->ptl_msg_stealing)); - cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { - struct list_head *mhash; - struct lnet_me *me; - int j; - - if (!mtable->mt_mhash) /* uninitialized match-table */ - continue; - - mhash = mtable->mt_mhash; - /* cleanup ME */ - for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) { - while (!list_empty(&mhash[j])) { - me = list_entry(mhash[j].next, - struct lnet_me, me_list); - CERROR("Active ME %p on exit\n", me); - list_del(&me->me_list); - kfree(me); - } - } - /* the extra entry is for MEs with ignore bits */ - kvfree(mhash); - } - - cfs_percpt_free(ptl->ptl_mtables); - ptl->ptl_mtables = NULL; -} - -static int -lnet_ptl_setup(struct lnet_portal *ptl, int index) -{ - struct lnet_match_table *mtable; - struct list_head *mhash; - int i; - int j; - - ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(struct lnet_match_table)); - if (!ptl->ptl_mtables) { - CERROR("Failed to create match table for portal %d\n", index); - return -ENOMEM; - } - - ptl->ptl_index = index; - INIT_LIST_HEAD(&ptl->ptl_msg_delayed); - INIT_LIST_HEAD(&ptl->ptl_msg_stealing); - spin_lock_init(&ptl->ptl_lock); - cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { - /* the extra entry is for MEs with ignore bits */ - mhash = kvzalloc_cpt(sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1), - GFP_KERNEL, i); - if (!mhash) { - CERROR("Failed to create match hash for portal %d\n", - index); - goto failed; - } - - memset(&mtable->mt_exhausted[0], -1, - sizeof(mtable->mt_exhausted[0]) * - LNET_MT_EXHAUSTED_BMAP); - mtable->mt_mhash = mhash; - for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) - INIT_LIST_HEAD(&mhash[j]); - - mtable->mt_portal = index; - mtable->mt_cpt = i; - } - - return 0; - failed: - lnet_ptl_cleanup(ptl); - return -ENOMEM; -} - -void -lnet_portals_destroy(void) -{ - int i; - - if (!the_lnet.ln_portals) - return; - - for (i = 0; i < the_lnet.ln_nportals; i++) - lnet_ptl_cleanup(the_lnet.ln_portals[i]); - - cfs_array_free(the_lnet.ln_portals); - the_lnet.ln_portals = NULL; - the_lnet.ln_nportals = 0; -} - -int -lnet_portals_create(void) -{ - int size; - int i; - - size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]); - - the_lnet.ln_portals = cfs_array_alloc(MAX_PORTALS, size); - if (!the_lnet.ln_portals) { - CERROR("Failed to allocate portals table\n"); - return -ENOMEM; - } - the_lnet.ln_nportals = MAX_PORTALS; - - for (i = 0; i < the_lnet.ln_nportals; i++) { - if (lnet_ptl_setup(the_lnet.ln_portals[i], i)) { - lnet_portals_destroy(); - return -ENOMEM; - } - } - - return 0; -} - -/** - * Turn on the lazy portal attribute. Use with caution! - * - * This portal attribute only affects incoming PUT requests to the portal, - * and is off by default. By default, if there's no matching MD for an - * incoming PUT request, it is simply dropped. With the lazy attribute on, - * such requests are queued indefinitely until either a matching MD is - * posted to the portal or the lazy attribute is turned off. - * - * It would prevent dropped requests, however it should be regarded as the - * last line of defense - i.e. users must keep a close watch on active - * buffers on a lazy portal and once it becomes too low post more buffers as - * soon as possible. This is because delayed requests usually have detrimental - * effects on underlying network connections. A few delayed requests often - * suffice to bring an underlying connection to a complete halt, due to flow - * control mechanisms. - * - * There's also a DOS attack risk. If users don't post match-all MDs on a - * lazy portal, a malicious peer can easily stop a service by sending some - * PUT requests with match bits that won't match any MD. A routed server is - * especially vulnerable since the connections to its neighbor routers are - * shared among all clients. - * - * \param portal Index of the portal to enable the lazy attribute on. - * - * \retval 0 On success. - * \retval -EINVAL If \a portal is not a valid index. - */ -int -LNetSetLazyPortal(int portal) -{ - struct lnet_portal *ptl; - - if (portal < 0 || portal >= the_lnet.ln_nportals) - return -EINVAL; - - CDEBUG(D_NET, "Setting portal %d lazy\n", portal); - ptl = the_lnet.ln_portals[portal]; - - lnet_res_lock(LNET_LOCK_EX); - lnet_ptl_lock(ptl); - - lnet_ptl_setopt(ptl, LNET_PTL_LAZY); - - lnet_ptl_unlock(ptl); - lnet_res_unlock(LNET_LOCK_EX); - - return 0; -} -EXPORT_SYMBOL(LNetSetLazyPortal); - -int -lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason) -{ - struct lnet_portal *ptl; - LIST_HEAD(zombies); - - if (portal < 0 || portal >= the_lnet.ln_nportals) - return -EINVAL; - - ptl = the_lnet.ln_portals[portal]; - - lnet_res_lock(LNET_LOCK_EX); - lnet_ptl_lock(ptl); - - if (!lnet_ptl_is_lazy(ptl)) { - lnet_ptl_unlock(ptl); - lnet_res_unlock(LNET_LOCK_EX); - return 0; - } - - if (ni) { - struct lnet_msg *msg, *tmp; - - /* grab all messages which are on the NI passed in */ - list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed, - msg_list) { - if (msg->msg_rxpeer->lp_ni == ni) - list_move(&msg->msg_list, &zombies); - } - } else { - if (the_lnet.ln_shutdown) - CWARN("Active lazy portal %d on exit\n", portal); - else - CDEBUG(D_NET, "clearing portal %d lazy\n", portal); - - /* grab all the blocked messages atomically */ - list_splice_init(&ptl->ptl_msg_delayed, &zombies); - - lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); - } - - lnet_ptl_unlock(ptl); - lnet_res_unlock(LNET_LOCK_EX); - - lnet_drop_delayed_msg_list(&zombies, reason); - - return 0; -} - -/** - * Turn off the lazy portal attribute. Delayed requests on the portal, - * if any, will be all dropped when this function returns. - * - * \param portal Index of the portal to disable the lazy attribute on. - * - * \retval 0 On success. - * \retval -EINVAL If \a portal is not a valid index. - */ -int -LNetClearLazyPortal(int portal) -{ - return lnet_clear_lazy_portal(NULL, portal, - "Clearing lazy portal attr"); -} -EXPORT_SYMBOL(LNetClearLazyPortal); diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c deleted file mode 100644 index 9b61260155f2..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ /dev/null @@ -1,585 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - */ -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include -#include -#include -/* For sys_open & sys_close */ -#include -#include - -#include - -static int -kernel_sock_unlocked_ioctl(struct file *filp, int cmd, unsigned long arg) -{ - mm_segment_t oldfs = get_fs(); - int err; - - set_fs(KERNEL_DS); - err = filp->f_op->unlocked_ioctl(filp, cmd, arg); - set_fs(oldfs); - - return err; -} - -static int -lnet_sock_ioctl(int cmd, unsigned long arg) -{ - struct file *sock_filp; - struct socket *sock; - int rc; - - rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); - if (rc) { - CERROR("Can't create socket: %d\n", rc); - return rc; - } - - sock_filp = sock_alloc_file(sock, 0, NULL); - if (IS_ERR(sock_filp)) - return PTR_ERR(sock_filp); - - rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg); - - fput(sock_filp); - return rc; -} - -int -lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) -{ - struct ifreq ifr; - int nob; - int rc; - __be32 val; - - nob = strnlen(name, IFNAMSIZ); - if (nob == IFNAMSIZ) { - CERROR("Interface name %s too long\n", name); - return -EINVAL; - } - - BUILD_BUG_ON(sizeof(ifr.ifr_name) < IFNAMSIZ); - - if (strlen(name) > sizeof(ifr.ifr_name) - 1) - return -E2BIG; - strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); - - rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr); - if (rc) { - CERROR("Can't get flags for interface %s\n", name); - return rc; - } - - if (!(ifr.ifr_flags & IFF_UP)) { - CDEBUG(D_NET, "Interface %s down\n", name); - *up = 0; - *ip = *mask = 0; - return 0; - } - *up = 1; - - if (strlen(name) > sizeof(ifr.ifr_name) - 1) - return -E2BIG; - strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); - - ifr.ifr_addr.sa_family = AF_INET; - rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr); - if (rc) { - CERROR("Can't get IP address for interface %s\n", name); - return rc; - } - - val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr; - *ip = ntohl(val); - - if (strlen(name) > sizeof(ifr.ifr_name) - 1) - return -E2BIG; - strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); - - ifr.ifr_addr.sa_family = AF_INET; - rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr); - if (rc) { - CERROR("Can't get netmask for interface %s\n", name); - return rc; - } - - val = ((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr; - *mask = ntohl(val); - - return 0; -} -EXPORT_SYMBOL(lnet_ipif_query); - -int -lnet_ipif_enumerate(char ***namesp) -{ - /* Allocate and fill in 'names', returning # interfaces/error */ - char **names; - int toobig; - int nalloc; - int nfound; - struct ifreq *ifr; - struct ifconf ifc; - int rc; - int nob; - int i; - - nalloc = 16; /* first guess at max interfaces */ - toobig = 0; - for (;;) { - if (nalloc * sizeof(*ifr) > PAGE_SIZE) { - toobig = 1; - nalloc = PAGE_SIZE / sizeof(*ifr); - CWARN("Too many interfaces: only enumerating first %d\n", - nalloc); - } - - ifr = kzalloc(nalloc * sizeof(*ifr), GFP_KERNEL); - if (!ifr) { - CERROR("ENOMEM enumerating up to %d interfaces\n", - nalloc); - rc = -ENOMEM; - goto out0; - } - - ifc.ifc_buf = (char *)ifr; - ifc.ifc_len = nalloc * sizeof(*ifr); - - rc = lnet_sock_ioctl(SIOCGIFCONF, (unsigned long)&ifc); - if (rc < 0) { - CERROR("Error %d enumerating interfaces\n", rc); - goto out1; - } - - LASSERT(!rc); - - nfound = ifc.ifc_len / sizeof(*ifr); - LASSERT(nfound <= nalloc); - - if (nfound < nalloc || toobig) - break; - - kfree(ifr); - nalloc *= 2; - } - - if (!nfound) - goto out1; - - names = kzalloc(nfound * sizeof(*names), GFP_KERNEL); - if (!names) { - rc = -ENOMEM; - goto out1; - } - - for (i = 0; i < nfound; i++) { - nob = strnlen(ifr[i].ifr_name, IFNAMSIZ); - if (nob == IFNAMSIZ) { - /* no space for terminating NULL */ - CERROR("interface name %.*s too long (%d max)\n", - nob, ifr[i].ifr_name, IFNAMSIZ); - rc = -ENAMETOOLONG; - goto out2; - } - - names[i] = kmalloc(IFNAMSIZ, GFP_KERNEL); - if (!names[i]) { - rc = -ENOMEM; - goto out2; - } - - memcpy(names[i], ifr[i].ifr_name, nob); - names[i][nob] = 0; - } - - *namesp = names; - rc = nfound; - -out2: - if (rc < 0) - lnet_ipif_free_enumeration(names, nfound); -out1: - kfree(ifr); -out0: - return rc; -} -EXPORT_SYMBOL(lnet_ipif_enumerate); - -void -lnet_ipif_free_enumeration(char **names, int n) -{ - int i; - - LASSERT(n > 0); - - for (i = 0; i < n && names[i]; i++) - kfree(names[i]); - - kfree(names); -} -EXPORT_SYMBOL(lnet_ipif_free_enumeration); - -int -lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) -{ - int rc; - long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); - unsigned long then; - struct timeval tv; - struct kvec iov = { .iov_base = buffer, .iov_len = nob }; - struct msghdr msg = {NULL,}; - - LASSERT(nob > 0); - /* - * Caller may pass a zero timeout if she thinks the socket buffer is - * empty enough to take the whole message immediately - */ - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, nob); - for (;;) { - msg.msg_flags = !timeout ? MSG_DONTWAIT : 0; - if (timeout) { - /* Set send timeout to remaining time */ - jiffies_to_timeval(jiffies_left, &tv); - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, - (char *)&tv, sizeof(tv)); - if (rc) { - CERROR("Can't set socket send timeout %ld.%06d: %d\n", - (long)tv.tv_sec, (int)tv.tv_usec, rc); - return rc; - } - } - - then = jiffies; - rc = kernel_sendmsg(sock, &msg, &iov, 1, nob); - jiffies_left -= jiffies - then; - - if (rc < 0) - return rc; - - if (!rc) { - CERROR("Unexpected zero rc\n"); - return -ECONNABORTED; - } - - if (!msg_data_left(&msg)) - break; - - if (jiffies_left <= 0) - return -EAGAIN; - } - return 0; -} -EXPORT_SYMBOL(lnet_sock_write); - -int -lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) -{ - int rc; - long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); - unsigned long then; - struct timeval tv; - struct kvec iov = { - .iov_base = buffer, - .iov_len = nob - }; - struct msghdr msg = { - .msg_flags = 0 - }; - - LASSERT(nob > 0); - LASSERT(jiffies_left > 0); - - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, nob); - - for (;;) { - /* Set receive timeout to remaining time */ - jiffies_to_timeval(jiffies_left, &tv); - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, - (char *)&tv, sizeof(tv)); - if (rc) { - CERROR("Can't set socket recv timeout %ld.%06d: %d\n", - (long)tv.tv_sec, (int)tv.tv_usec, rc); - return rc; - } - - then = jiffies; - rc = sock_recvmsg(sock, &msg, 0); - jiffies_left -= jiffies - then; - - if (rc < 0) - return rc; - - if (!rc) - return -ECONNRESET; - - if (!msg_data_left(&msg)) - return 0; - - if (jiffies_left <= 0) - return -ETIMEDOUT; - } -} -EXPORT_SYMBOL(lnet_sock_read); - -static int -lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, - int local_port) -{ - struct sockaddr_in locaddr; - struct socket *sock; - int rc; - int option; - - /* All errors are fatal except bind failure if the port is in use */ - *fatal = 1; - - rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); - *sockp = sock; - if (rc) { - CERROR("Can't create socket: %d\n", rc); - return rc; - } - - option = 1; - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&option, sizeof(option)); - if (rc) { - CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); - goto failed; - } - - if (local_ip || local_port) { - memset(&locaddr, 0, sizeof(locaddr)); - locaddr.sin_family = AF_INET; - locaddr.sin_port = htons(local_port); - if (!local_ip) - locaddr.sin_addr.s_addr = htonl(INADDR_ANY); - else - locaddr.sin_addr.s_addr = htonl(local_ip); - - rc = kernel_bind(sock, (struct sockaddr *)&locaddr, - sizeof(locaddr)); - if (rc == -EADDRINUSE) { - CDEBUG(D_NET, "Port %d already in use\n", local_port); - *fatal = 0; - goto failed; - } - if (rc) { - CERROR("Error trying to bind to port %d: %d\n", - local_port, rc); - goto failed; - } - } - return 0; - -failed: - sock_release(sock); - return rc; -} - -int -lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) -{ - int option; - int rc; - - if (txbufsize) { - option = txbufsize; - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, - (char *)&option, sizeof(option)); - if (rc) { - CERROR("Can't set send buffer %d: %d\n", - option, rc); - return rc; - } - } - - if (rxbufsize) { - option = rxbufsize; - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, - (char *)&option, sizeof(option)); - if (rc) { - CERROR("Can't set receive buffer %d: %d\n", - option, rc); - return rc; - } - } - return 0; -} -EXPORT_SYMBOL(lnet_sock_setbuf); - -int -lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port) -{ - struct sockaddr_in sin; - int rc; - - if (remote) - rc = kernel_getpeername(sock, (struct sockaddr *)&sin); - else - rc = kernel_getsockname(sock, (struct sockaddr *)&sin); - if (rc < 0) { - CERROR("Error %d getting sock %s IP/port\n", - rc, remote ? "peer" : "local"); - return rc; - } - - if (ip) - *ip = ntohl(sin.sin_addr.s_addr); - - if (port) - *port = ntohs(sin.sin_port); - - return 0; -} -EXPORT_SYMBOL(lnet_sock_getaddr); - -int -lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize) -{ - if (txbufsize) - *txbufsize = sock->sk->sk_sndbuf; - - if (rxbufsize) - *rxbufsize = sock->sk->sk_rcvbuf; - - return 0; -} -EXPORT_SYMBOL(lnet_sock_getbuf); - -int -lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, - int backlog) -{ - int fatal; - int rc; - - rc = lnet_sock_create(sockp, &fatal, local_ip, local_port); - if (rc) { - if (!fatal) - CERROR("Can't create socket: port %d already in use\n", - local_port); - return rc; - } - - rc = kernel_listen(*sockp, backlog); - if (!rc) - return 0; - - CERROR("Can't set listen backlog %d: %d\n", backlog, rc); - sock_release(*sockp); - return rc; -} - -int -lnet_sock_accept(struct socket **newsockp, struct socket *sock) -{ - wait_queue_entry_t wait; - struct socket *newsock; - int rc; - - /* - * XXX this should add a ref to sock->ops->owner, if - * TCP could be a module - */ - rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock); - if (rc) { - CERROR("Can't allocate socket\n"); - return rc; - } - - newsock->ops = sock->ops; - - rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false); - if (rc == -EAGAIN) { - /* Nothing ready, so wait for activity */ - init_waitqueue_entry(&wait, current); - add_wait_queue(sk_sleep(sock->sk), &wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - remove_wait_queue(sk_sleep(sock->sk), &wait); - rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false); - } - - if (rc) - goto failed; - - *newsockp = newsock; - return 0; - -failed: - sock_release(newsock); - return rc; -} - -int -lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, - int local_port, __u32 peer_ip, int peer_port) -{ - struct sockaddr_in srvaddr; - int rc; - - rc = lnet_sock_create(sockp, fatal, local_ip, local_port); - if (rc) - return rc; - - memset(&srvaddr, 0, sizeof(srvaddr)); - srvaddr.sin_family = AF_INET; - srvaddr.sin_port = htons(peer_port); - srvaddr.sin_addr.s_addr = htonl(peer_ip); - - rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr, - sizeof(srvaddr), 0); - if (!rc) - return 0; - - /* - * EADDRNOTAVAIL probably means we're already connected to the same - * peer/port on the same local port on a differently typed - * connection. Let our caller retry with a different local - * port... - */ - *fatal = !(rc == -EADDRNOTAVAIL); - - CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET, - "Error %d connecting %pI4h/%d -> %pI4h/%d\n", rc, - &local_ip, local_port, &peer_ip, peer_port); - - sock_release(*sockp); - return rc; -} diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c deleted file mode 100644 index 7456b989e451..000000000000 --- a/drivers/staging/lustre/lnet/lnet/lo.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -static int -lolnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) -{ - LASSERT(!lntmsg->msg_routing); - LASSERT(!lntmsg->msg_target_is_router); - - return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0); -} - -static int -lolnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, - int delayed, struct iov_iter *to, unsigned int rlen) -{ - struct lnet_msg *sendmsg = private; - - if (lntmsg) { /* not discarding */ - if (sendmsg->msg_iov) - lnet_copy_iov2iter(to, - sendmsg->msg_niov, - sendmsg->msg_iov, - sendmsg->msg_offset, - iov_iter_count(to)); - else - lnet_copy_kiov2iter(to, - sendmsg->msg_niov, - sendmsg->msg_kiov, - sendmsg->msg_offset, - iov_iter_count(to)); - - lnet_finalize(ni, lntmsg, 0); - } - - lnet_finalize(ni, sendmsg, 0); - return 0; -} - -static int lolnd_instanced; - -static void -lolnd_shutdown(struct lnet_ni *ni) -{ - CDEBUG(D_NET, "shutdown\n"); - LASSERT(lolnd_instanced); - - lolnd_instanced = 0; -} - -static int -lolnd_startup(struct lnet_ni *ni) -{ - LASSERT(ni->ni_lnd == &the_lolnd); - LASSERT(!lolnd_instanced); - lolnd_instanced = 1; - - return 0; -} - -struct lnet_lnd the_lolnd = { - /* .lnd_list = */ {&the_lolnd.lnd_list, &the_lolnd.lnd_list}, - /* .lnd_refcount = */ 0, - /* .lnd_type = */ LOLND, - /* .lnd_startup = */ lolnd_startup, - /* .lnd_shutdown = */ lolnd_shutdown, - /* .lnt_ctl = */ NULL, - /* .lnd_send = */ lolnd_send, - /* .lnd_recv = */ lolnd_recv, - /* .lnd_eager_recv = */ NULL, - /* .lnd_notify = */ NULL, - /* .lnd_accept = */ NULL -}; diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c deleted file mode 100644 index 9d06664f0c17..000000000000 --- a/drivers/staging/lustre/lnet/lnet/module.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include - -static int config_on_load; -module_param(config_on_load, int, 0444); -MODULE_PARM_DESC(config_on_load, "configure network at module load"); - -static struct mutex lnet_config_mutex; - -static int -lnet_configure(void *arg) -{ - /* 'arg' only there so I can be passed to cfs_create_thread() */ - int rc = 0; - - mutex_lock(&lnet_config_mutex); - - if (!the_lnet.ln_niinit_self) { - rc = try_module_get(THIS_MODULE); - - if (rc != 1) - goto out; - - rc = LNetNIInit(LNET_PID_LUSTRE); - if (rc >= 0) { - the_lnet.ln_niinit_self = 1; - rc = 0; - } else { - module_put(THIS_MODULE); - } - } - -out: - mutex_unlock(&lnet_config_mutex); - return rc; -} - -static int -lnet_unconfigure(void) -{ - int refcount; - - mutex_lock(&lnet_config_mutex); - - if (the_lnet.ln_niinit_self) { - the_lnet.ln_niinit_self = 0; - LNetNIFini(); - module_put(THIS_MODULE); - } - - mutex_lock(&the_lnet.ln_api_mutex); - refcount = the_lnet.ln_refcount; - mutex_unlock(&the_lnet.ln_api_mutex); - - mutex_unlock(&lnet_config_mutex); - return !refcount ? 0 : -EBUSY; -} - -static int -lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr) -{ - struct lnet_ioctl_config_data *conf = - (struct lnet_ioctl_config_data *)hdr; - int rc; - - if (conf->cfg_hdr.ioc_len < sizeof(*conf)) - return -EINVAL; - - mutex_lock(&lnet_config_mutex); - if (!the_lnet.ln_niinit_self) { - rc = -EINVAL; - goto out_unlock; - } - rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf); -out_unlock: - mutex_unlock(&lnet_config_mutex); - - return rc; -} - -static int -lnet_dyn_unconfigure(struct libcfs_ioctl_hdr *hdr) -{ - struct lnet_ioctl_config_data *conf = - (struct lnet_ioctl_config_data *)hdr; - int rc; - - if (conf->cfg_hdr.ioc_len < sizeof(*conf)) - return -EINVAL; - - mutex_lock(&lnet_config_mutex); - if (!the_lnet.ln_niinit_self) { - rc = -EINVAL; - goto out_unlock; - } - rc = lnet_dyn_del_ni(conf->cfg_net); -out_unlock: - mutex_unlock(&lnet_config_mutex); - - return rc; -} - -static int -lnet_ioctl(struct notifier_block *nb, - unsigned long cmd, void *vdata) -{ - int rc; - struct libcfs_ioctl_hdr *hdr = vdata; - - switch (cmd) { - case IOC_LIBCFS_CONFIGURE: { - struct libcfs_ioctl_data *data = - (struct libcfs_ioctl_data *)hdr; - - if (data->ioc_hdr.ioc_len < sizeof(*data)) { - rc = -EINVAL; - } else { - the_lnet.ln_nis_from_mod_params = data->ioc_flags; - rc = lnet_configure(NULL); - } - break; - } - - case IOC_LIBCFS_UNCONFIGURE: - rc = lnet_unconfigure(); - break; - - case IOC_LIBCFS_ADD_NET: - rc = lnet_dyn_configure(hdr); - break; - - case IOC_LIBCFS_DEL_NET: - rc = lnet_dyn_unconfigure(hdr); - break; - - default: - /* - * Passing LNET_PID_ANY only gives me a ref if the net is up - * already; I'll need it to ensure the net can't go down while - * I'm called into it - */ - rc = LNetNIInit(LNET_PID_ANY); - if (rc >= 0) { - rc = LNetCtl(cmd, hdr); - LNetNIFini(); - } - break; - } - return notifier_from_ioctl_errno(rc); -} - -static struct notifier_block lnet_ioctl_handler = { - .notifier_call = lnet_ioctl, -}; - -static int __init lnet_init(void) -{ - int rc; - - mutex_init(&lnet_config_mutex); - - rc = libcfs_setup(); - if (rc) - return rc; - - rc = lnet_lib_init(); - if (rc) { - CERROR("lnet_lib_init: error %d\n", rc); - return rc; - } - - rc = blocking_notifier_chain_register(&libcfs_ioctl_list, - &lnet_ioctl_handler); - LASSERT(!rc); - - if (config_on_load) { - /* - * Have to schedule a separate thread to avoid deadlocking - * in modload - */ - (void)kthread_run(lnet_configure, NULL, "lnet_initd"); - } - - return 0; -} - -static void __exit lnet_exit(void) -{ - int rc; - - rc = blocking_notifier_chain_unregister(&libcfs_ioctl_list, - &lnet_ioctl_handler); - LASSERT(!rc); - - lnet_lib_exit(); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Networking layer"); -MODULE_VERSION(LNET_VERSION); -MODULE_LICENSE("GPL"); - -module_init(lnet_init); -module_exit(lnet_exit); diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c deleted file mode 100644 index 0066394b0bb0..000000000000 --- a/drivers/staging/lustre/lnet/lnet/net_fault.c +++ /dev/null @@ -1,1023 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2014, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate, Inc. - * - * lnet/lnet/net_fault.c - * - * Lustre network fault simulation - * - * Author: liang.zhen@intel.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include - -#define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \ - LNET_GET_BIT | LNET_REPLY_BIT) - -struct lnet_drop_rule { - /** link chain on the_lnet.ln_drop_rules */ - struct list_head dr_link; - /** attributes of this rule */ - struct lnet_fault_attr dr_attr; - /** lock to protect \a dr_drop_at and \a dr_stat */ - spinlock_t dr_lock; - /** - * the message sequence to drop, which means message is dropped when - * dr_stat.drs_count == dr_drop_at - */ - unsigned long dr_drop_at; - /** - * seconds to drop the next message, it's exclusive with dr_drop_at - */ - unsigned long dr_drop_time; - /** baseline to caculate dr_drop_time */ - unsigned long dr_time_base; - /** statistic of dropped messages */ - struct lnet_fault_stat dr_stat; -}; - -static bool -lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid) -{ - if (nid == msg_nid || nid == LNET_NID_ANY) - return true; - - if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid)) - return false; - - /* 255.255.255.255@net is wildcard for all addresses in a network */ - return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY); -} - -static bool -lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src, - lnet_nid_t dst, unsigned int type, unsigned int portal) -{ - if (!lnet_fault_nid_match(attr->fa_src, src) || - !lnet_fault_nid_match(attr->fa_dst, dst)) - return false; - - if (!(attr->fa_msg_mask & (1 << type))) - return false; - - /** - * NB: ACK and REPLY have no portal, but they should have been - * rejected by message mask - */ - if (attr->fa_ptl_mask && /* has portal filter */ - !(attr->fa_ptl_mask & (1ULL << portal))) - return false; - - return true; -} - -static int -lnet_fault_attr_validate(struct lnet_fault_attr *attr) -{ - if (!attr->fa_msg_mask) - attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */ - - if (!attr->fa_ptl_mask) /* no portal filter */ - return 0; - - /* NB: only PUT and GET can be filtered if portal filter has been set */ - attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT; - if (!attr->fa_msg_mask) { - CDEBUG(D_NET, "can't find valid message type bits %x\n", - attr->fa_msg_mask); - return -EINVAL; - } - return 0; -} - -static void -lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type) -{ - /* NB: fs_counter is NOT updated by this function */ - switch (type) { - case LNET_MSG_PUT: - stat->fs_put++; - return; - case LNET_MSG_ACK: - stat->fs_ack++; - return; - case LNET_MSG_GET: - stat->fs_get++; - return; - case LNET_MSG_REPLY: - stat->fs_reply++; - return; - } -} - -/** - * LNet message drop simulation - */ - -/** - * Add a new drop rule to LNet - * There is no check for duplicated drop rule, all rules will be checked for - * incoming message. - */ -static int -lnet_drop_rule_add(struct lnet_fault_attr *attr) -{ - struct lnet_drop_rule *rule; - - if (attr->u.drop.da_rate & attr->u.drop.da_interval) { - CDEBUG(D_NET, "please provide either drop rate or drop interval, but not both at the same time %d/%d\n", - attr->u.drop.da_rate, attr->u.drop.da_interval); - return -EINVAL; - } - - if (lnet_fault_attr_validate(attr)) - return -EINVAL; - - rule = kzalloc(sizeof(*rule), GFP_NOFS); - if (!rule) - return -ENOMEM; - - spin_lock_init(&rule->dr_lock); - - rule->dr_attr = *attr; - if (attr->u.drop.da_interval) { - rule->dr_time_base = jiffies + attr->u.drop.da_interval * HZ; - rule->dr_drop_time = jiffies + - prandom_u32_max(attr->u.drop.da_interval) * HZ; - } else { - rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate); - } - - lnet_net_lock(LNET_LOCK_EX); - list_add(&rule->dr_link, &the_lnet.ln_drop_rules); - lnet_net_unlock(LNET_LOCK_EX); - - CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n", - libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), - attr->u.drop.da_rate, attr->u.drop.da_interval); - return 0; -} - -/** - * Remove matched drop rules from lnet, all rules that can match \a src and - * \a dst will be removed. - * If \a src is zero, then all rules have \a dst as destination will be remove - * If \a dst is zero, then all rules have \a src as source will be removed - * If both of them are zero, all rules will be removed - */ -static int -lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst) -{ - struct lnet_drop_rule *rule; - struct lnet_drop_rule *tmp; - struct list_head zombies; - int n = 0; - - INIT_LIST_HEAD(&zombies); - - lnet_net_lock(LNET_LOCK_EX); - list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) { - if (rule->dr_attr.fa_src != src && src) - continue; - - if (rule->dr_attr.fa_dst != dst && dst) - continue; - - list_move(&rule->dr_link, &zombies); - } - lnet_net_unlock(LNET_LOCK_EX); - - list_for_each_entry_safe(rule, tmp, &zombies, dr_link) { - CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n", - libcfs_nid2str(rule->dr_attr.fa_src), - libcfs_nid2str(rule->dr_attr.fa_dst), - rule->dr_attr.u.drop.da_rate, - rule->dr_attr.u.drop.da_interval); - - list_del(&rule->dr_link); - kfree(rule); - n++; - } - - return n; -} - -/** - * List drop rule at position of \a pos - */ -static int -lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr, - struct lnet_fault_stat *stat) -{ - struct lnet_drop_rule *rule; - int cpt; - int i = 0; - int rc = -ENOENT; - - cpt = lnet_net_lock_current(); - list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { - if (i++ < pos) - continue; - - spin_lock(&rule->dr_lock); - *attr = rule->dr_attr; - *stat = rule->dr_stat; - spin_unlock(&rule->dr_lock); - rc = 0; - break; - } - - lnet_net_unlock(cpt); - return rc; -} - -/** - * reset counters for all drop rules - */ -static void -lnet_drop_rule_reset(void) -{ - struct lnet_drop_rule *rule; - int cpt; - - cpt = lnet_net_lock_current(); - - list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { - struct lnet_fault_attr *attr = &rule->dr_attr; - - spin_lock(&rule->dr_lock); - - memset(&rule->dr_stat, 0, sizeof(rule->dr_stat)); - if (attr->u.drop.da_rate) { - rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate); - } else { - rule->dr_drop_time = jiffies + - prandom_u32_max(attr->u.drop.da_interval) * HZ; - rule->dr_time_base = jiffies + attr->u.drop.da_interval * HZ; - } - spin_unlock(&rule->dr_lock); - } - - lnet_net_unlock(cpt); -} - -/** - * check source/destination NID, portal, message type and drop rate, - * decide whether should drop this message or not - */ -static bool -drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, - lnet_nid_t dst, unsigned int type, unsigned int portal) -{ - struct lnet_fault_attr *attr = &rule->dr_attr; - bool drop; - - if (!lnet_fault_attr_match(attr, src, dst, type, portal)) - return false; - - /* match this rule, check drop rate now */ - spin_lock(&rule->dr_lock); - if (rule->dr_drop_time) { /* time based drop */ - unsigned long now = jiffies; - - rule->dr_stat.fs_count++; - drop = time_after_eq(now, rule->dr_drop_time); - if (drop) { - if (time_after(now, rule->dr_time_base)) - rule->dr_time_base = now; - - rule->dr_drop_time = rule->dr_time_base + - prandom_u32_max(attr->u.drop.da_interval) * HZ; - rule->dr_time_base += attr->u.drop.da_interval * HZ; - - CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n", - libcfs_nid2str(attr->fa_src), - libcfs_nid2str(attr->fa_dst), - rule->dr_drop_time); - } - - } else { /* rate based drop */ - drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; - - if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) { - rule->dr_drop_at = rule->dr_stat.fs_count + - prandom_u32_max(attr->u.drop.da_rate); - CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n", - libcfs_nid2str(attr->fa_src), - libcfs_nid2str(attr->fa_dst), rule->dr_drop_at); - } - } - - if (drop) { /* drop this message, update counters */ - lnet_fault_stat_inc(&rule->dr_stat, type); - rule->dr_stat.u.drop.ds_dropped++; - } - - spin_unlock(&rule->dr_lock); - return drop; -} - -/** - * Check if message from \a src to \a dst can match any existed drop rule - */ -bool -lnet_drop_rule_match(struct lnet_hdr *hdr) -{ - struct lnet_drop_rule *rule; - lnet_nid_t src = le64_to_cpu(hdr->src_nid); - lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); - unsigned int typ = le32_to_cpu(hdr->type); - unsigned int ptl = -1; - bool drop = false; - int cpt; - - /** - * NB: if Portal is specified, then only PUT and GET will be - * filtered by drop rule - */ - if (typ == LNET_MSG_PUT) - ptl = le32_to_cpu(hdr->msg.put.ptl_index); - else if (typ == LNET_MSG_GET) - ptl = le32_to_cpu(hdr->msg.get.ptl_index); - - cpt = lnet_net_lock_current(); - list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { - drop = drop_rule_match(rule, src, dst, typ, ptl); - if (drop) - break; - } - - lnet_net_unlock(cpt); - return drop; -} - -/** - * LNet Delay Simulation - */ -/** timestamp (second) to send delayed message */ -#define msg_delay_send msg_ev.hdr_data - -struct lnet_delay_rule { - /** link chain on the_lnet.ln_delay_rules */ - struct list_head dl_link; - /** link chain on delay_dd.dd_sched_rules */ - struct list_head dl_sched_link; - /** attributes of this rule */ - struct lnet_fault_attr dl_attr; - /** lock to protect \a below members */ - spinlock_t dl_lock; - /** refcount of delay rule */ - atomic_t dl_refcount; - /** - * the message sequence to delay, which means message is delayed when - * dl_stat.fs_count == dl_delay_at - */ - unsigned long dl_delay_at; - /** - * seconds to delay the next message, it's exclusive with dl_delay_at - */ - unsigned long dl_delay_time; - /** baseline to caculate dl_delay_time */ - unsigned long dl_time_base; - /** jiffies to send the next delayed message */ - unsigned long dl_msg_send; - /** delayed message list */ - struct list_head dl_msg_list; - /** statistic of delayed messages */ - struct lnet_fault_stat dl_stat; - /** timer to wakeup delay_daemon */ - struct timer_list dl_timer; -}; - -struct delay_daemon_data { - /** serialise rule add/remove */ - struct mutex dd_mutex; - /** protect rules on \a dd_sched_rules */ - spinlock_t dd_lock; - /** scheduled delay rules (by timer) */ - struct list_head dd_sched_rules; - /** daemon thread sleeps at here */ - wait_queue_head_t dd_waitq; - /** controller (lctl command) wait at here */ - wait_queue_head_t dd_ctl_waitq; - /** daemon is running */ - unsigned int dd_running; - /** daemon stopped */ - unsigned int dd_stopped; -}; - -static struct delay_daemon_data delay_dd; - -static unsigned long -round_timeout(unsigned long timeout) -{ - return (unsigned int)rounddown(timeout, HZ) + HZ; -} - -static void -delay_rule_decref(struct lnet_delay_rule *rule) -{ - if (atomic_dec_and_test(&rule->dl_refcount)) { - LASSERT(list_empty(&rule->dl_sched_link)); - LASSERT(list_empty(&rule->dl_msg_list)); - LASSERT(list_empty(&rule->dl_link)); - - kfree(rule); - } -} - -/** - * check source/destination NID, portal, message type and delay rate, - * decide whether should delay this message or not - */ -static bool -delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, - lnet_nid_t dst, unsigned int type, unsigned int portal, - struct lnet_msg *msg) -{ - struct lnet_fault_attr *attr = &rule->dl_attr; - bool delay; - - if (!lnet_fault_attr_match(attr, src, dst, type, portal)) - return false; - - /* match this rule, check delay rate now */ - spin_lock(&rule->dl_lock); - if (rule->dl_delay_time) { /* time based delay */ - unsigned long now = jiffies; - - rule->dl_stat.fs_count++; - delay = time_after_eq(now, rule->dl_delay_time); - if (delay) { - if (time_after(now, rule->dl_time_base)) - rule->dl_time_base = now; - - rule->dl_delay_time = rule->dl_time_base + - prandom_u32_max(attr->u.delay.la_interval) * HZ; - rule->dl_time_base += attr->u.delay.la_interval * HZ; - - CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n", - libcfs_nid2str(attr->fa_src), - libcfs_nid2str(attr->fa_dst), - rule->dl_delay_time); - } - - } else { /* rate based delay */ - delay = rule->dl_stat.fs_count++ == rule->dl_delay_at; - /* generate the next random rate sequence */ - if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) { - rule->dl_delay_at = rule->dl_stat.fs_count + - prandom_u32_max(attr->u.delay.la_rate); - CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n", - libcfs_nid2str(attr->fa_src), - libcfs_nid2str(attr->fa_dst), rule->dl_delay_at); - } - } - - if (!delay) { - spin_unlock(&rule->dl_lock); - return false; - } - - /* delay this message, update counters */ - lnet_fault_stat_inc(&rule->dl_stat, type); - rule->dl_stat.u.delay.ls_delayed++; - - list_add_tail(&msg->msg_list, &rule->dl_msg_list); - msg->msg_delay_send = round_timeout( - jiffies + attr->u.delay.la_latency * HZ); - if (rule->dl_msg_send == -1) { - rule->dl_msg_send = msg->msg_delay_send; - mod_timer(&rule->dl_timer, rule->dl_msg_send); - } - - spin_unlock(&rule->dl_lock); - return true; -} - -/** - * check if \a msg can match any Delay Rule, receiving of this message - * will be delayed if there is a match. - */ -bool -lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg) -{ - struct lnet_delay_rule *rule; - lnet_nid_t src = le64_to_cpu(hdr->src_nid); - lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); - unsigned int typ = le32_to_cpu(hdr->type); - unsigned int ptl = -1; - - /* NB: called with hold of lnet_net_lock */ - - /** - * NB: if Portal is specified, then only PUT and GET will be - * filtered by delay rule - */ - if (typ == LNET_MSG_PUT) - ptl = le32_to_cpu(hdr->msg.put.ptl_index); - else if (typ == LNET_MSG_GET) - ptl = le32_to_cpu(hdr->msg.get.ptl_index); - - list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { - if (delay_rule_match(rule, src, dst, typ, ptl, msg)) - return true; - } - - return false; -} - -/** check out delayed messages for send */ -static void -delayed_msg_check(struct lnet_delay_rule *rule, bool all, - struct list_head *msg_list) -{ - struct lnet_msg *msg; - struct lnet_msg *tmp; - unsigned long now = jiffies; - - if (!all && rule->dl_msg_send > now) - return; - - spin_lock(&rule->dl_lock); - list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) { - if (!all && msg->msg_delay_send > now) - break; - - msg->msg_delay_send = 0; - list_move_tail(&msg->msg_list, msg_list); - } - - if (list_empty(&rule->dl_msg_list)) { - del_timer(&rule->dl_timer); - rule->dl_msg_send = -1; - - } else if (!list_empty(msg_list)) { - /* - * dequeued some timedout messages, update timer for the - * next delayed message on rule - */ - msg = list_entry(rule->dl_msg_list.next, - struct lnet_msg, msg_list); - rule->dl_msg_send = msg->msg_delay_send; - mod_timer(&rule->dl_timer, rule->dl_msg_send); - } - spin_unlock(&rule->dl_lock); -} - -static void -delayed_msg_process(struct list_head *msg_list, bool drop) -{ - struct lnet_msg *msg; - - while (!list_empty(msg_list)) { - struct lnet_ni *ni; - int cpt; - int rc; - - msg = list_entry(msg_list->next, struct lnet_msg, msg_list); - LASSERT(msg->msg_rxpeer); - - ni = msg->msg_rxpeer->lp_ni; - cpt = msg->msg_rx_cpt; - - list_del_init(&msg->msg_list); - if (drop) { - rc = -ECANCELED; - - } else if (!msg->msg_routing) { - rc = lnet_parse_local(ni, msg); - if (!rc) - continue; - - } else { - lnet_net_lock(cpt); - rc = lnet_parse_forward_locked(ni, msg); - lnet_net_unlock(cpt); - - switch (rc) { - case LNET_CREDIT_OK: - lnet_ni_recv(ni, msg->msg_private, msg, 0, - 0, msg->msg_len, msg->msg_len); - /* fall through */ - case LNET_CREDIT_WAIT: - continue; - default: /* failures */ - break; - } - } - - lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len); - lnet_finalize(ni, msg, rc); - } -} - -/** - * Process delayed messages for scheduled rules - * This function can either be called by delay_rule_daemon, or by lnet_finalise - */ -void -lnet_delay_rule_check(void) -{ - struct lnet_delay_rule *rule; - struct list_head msgs; - - INIT_LIST_HEAD(&msgs); - while (1) { - if (list_empty(&delay_dd.dd_sched_rules)) - break; - - spin_lock_bh(&delay_dd.dd_lock); - if (list_empty(&delay_dd.dd_sched_rules)) { - spin_unlock_bh(&delay_dd.dd_lock); - break; - } - - rule = list_entry(delay_dd.dd_sched_rules.next, - struct lnet_delay_rule, dl_sched_link); - list_del_init(&rule->dl_sched_link); - spin_unlock_bh(&delay_dd.dd_lock); - - delayed_msg_check(rule, false, &msgs); - delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */ - } - - if (!list_empty(&msgs)) - delayed_msg_process(&msgs, false); -} - -/** daemon thread to handle delayed messages */ -static int -lnet_delay_rule_daemon(void *arg) -{ - delay_dd.dd_running = 1; - wake_up(&delay_dd.dd_ctl_waitq); - - while (delay_dd.dd_running) { - wait_event_interruptible(delay_dd.dd_waitq, - !delay_dd.dd_running || - !list_empty(&delay_dd.dd_sched_rules)); - lnet_delay_rule_check(); - } - - /* in case more rules have been enqueued after my last check */ - lnet_delay_rule_check(); - delay_dd.dd_stopped = 1; - wake_up(&delay_dd.dd_ctl_waitq); - - return 0; -} - -static void -delay_timer_cb(struct timer_list *t) -{ - struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer); - - spin_lock_bh(&delay_dd.dd_lock); - if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) { - atomic_inc(&rule->dl_refcount); - list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules); - wake_up(&delay_dd.dd_waitq); - } - spin_unlock_bh(&delay_dd.dd_lock); -} - -/** - * Add a new delay rule to LNet - * There is no check for duplicated delay rule, all rules will be checked for - * incoming message. - */ -int -lnet_delay_rule_add(struct lnet_fault_attr *attr) -{ - struct lnet_delay_rule *rule; - int rc = 0; - - if (attr->u.delay.la_rate & attr->u.delay.la_interval) { - CDEBUG(D_NET, "please provide either delay rate or delay interval, but not both at the same time %d/%d\n", - attr->u.delay.la_rate, attr->u.delay.la_interval); - return -EINVAL; - } - - if (!attr->u.delay.la_latency) { - CDEBUG(D_NET, "delay latency cannot be zero\n"); - return -EINVAL; - } - - if (lnet_fault_attr_validate(attr)) - return -EINVAL; - - rule = kzalloc(sizeof(*rule), GFP_NOFS); - if (!rule) - return -ENOMEM; - - mutex_lock(&delay_dd.dd_mutex); - if (!delay_dd.dd_running) { - struct task_struct *task; - - /** - * NB: although LND threads will process delayed message - * in lnet_finalize, but there is no guarantee that LND - * threads will be waken up if no other message needs to - * be handled. - * Only one daemon thread, performance is not the concern - * of this simualation module. - */ - task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd"); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - goto failed; - } - wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running); - } - - timer_setup(&rule->dl_timer, delay_timer_cb, 0); - - spin_lock_init(&rule->dl_lock); - INIT_LIST_HEAD(&rule->dl_msg_list); - INIT_LIST_HEAD(&rule->dl_sched_link); - - rule->dl_attr = *attr; - if (attr->u.delay.la_interval) { - rule->dl_time_base = jiffies + attr->u.delay.la_interval * HZ; - rule->dl_delay_time = jiffies + - prandom_u32_max(attr->u.delay.la_interval) * HZ; - } else { - rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate); - } - - rule->dl_msg_send = -1; - - lnet_net_lock(LNET_LOCK_EX); - atomic_set(&rule->dl_refcount, 1); - list_add(&rule->dl_link, &the_lnet.ln_delay_rules); - lnet_net_unlock(LNET_LOCK_EX); - - CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n", - libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), - attr->u.delay.la_rate); - - mutex_unlock(&delay_dd.dd_mutex); - return 0; -failed: - mutex_unlock(&delay_dd.dd_mutex); - kfree(rule); - return rc; -} - -/** - * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src - * and \a dst are zero, all rules will be removed, otherwise only matched rules - * will be removed. - * If \a src is zero, then all rules have \a dst as destination will be remove - * If \a dst is zero, then all rules have \a src as source will be removed - * - * When a delay rule is removed, all delayed messages of this rule will be - * processed immediately. - */ -int -lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown) -{ - struct lnet_delay_rule *rule; - struct lnet_delay_rule *tmp; - struct list_head rule_list; - struct list_head msg_list; - int n = 0; - bool cleanup; - - INIT_LIST_HEAD(&rule_list); - INIT_LIST_HEAD(&msg_list); - - if (shutdown) { - src = 0; - dst = 0; - } - - mutex_lock(&delay_dd.dd_mutex); - lnet_net_lock(LNET_LOCK_EX); - - list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) { - if (rule->dl_attr.fa_src != src && src) - continue; - - if (rule->dl_attr.fa_dst != dst && dst) - continue; - - CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n", - libcfs_nid2str(rule->dl_attr.fa_src), - libcfs_nid2str(rule->dl_attr.fa_dst), - rule->dl_attr.u.delay.la_rate, - rule->dl_attr.u.delay.la_interval); - /* refcount is taken over by rule_list */ - list_move(&rule->dl_link, &rule_list); - } - - /* check if we need to shutdown delay_daemon */ - cleanup = list_empty(&the_lnet.ln_delay_rules) && - !list_empty(&rule_list); - lnet_net_unlock(LNET_LOCK_EX); - - list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) { - list_del_init(&rule->dl_link); - - del_timer_sync(&rule->dl_timer); - delayed_msg_check(rule, true, &msg_list); - delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */ - n++; - } - - if (cleanup) { /* no more delay rule, shutdown delay_daemon */ - LASSERT(delay_dd.dd_running); - delay_dd.dd_running = 0; - wake_up(&delay_dd.dd_waitq); - - while (!delay_dd.dd_stopped) - wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped); - } - mutex_unlock(&delay_dd.dd_mutex); - - if (!list_empty(&msg_list)) - delayed_msg_process(&msg_list, shutdown); - - return n; -} - -/** - * List Delay Rule at position of \a pos - */ -int -lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, - struct lnet_fault_stat *stat) -{ - struct lnet_delay_rule *rule; - int cpt; - int i = 0; - int rc = -ENOENT; - - cpt = lnet_net_lock_current(); - list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { - if (i++ < pos) - continue; - - spin_lock(&rule->dl_lock); - *attr = rule->dl_attr; - *stat = rule->dl_stat; - spin_unlock(&rule->dl_lock); - rc = 0; - break; - } - - lnet_net_unlock(cpt); - return rc; -} - -/** - * reset counters for all Delay Rules - */ -void -lnet_delay_rule_reset(void) -{ - struct lnet_delay_rule *rule; - int cpt; - - cpt = lnet_net_lock_current(); - - list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { - struct lnet_fault_attr *attr = &rule->dl_attr; - - spin_lock(&rule->dl_lock); - - memset(&rule->dl_stat, 0, sizeof(rule->dl_stat)); - if (attr->u.delay.la_rate) { - rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate); - } else { - rule->dl_delay_time = - jiffies + prandom_u32_max( - attr->u.delay.la_interval) * HZ; - rule->dl_time_base = jiffies + attr->u.delay.la_interval * HZ; - } - spin_unlock(&rule->dl_lock); - } - - lnet_net_unlock(cpt); -} - -int -lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data) -{ - struct lnet_fault_attr *attr; - struct lnet_fault_stat *stat; - - attr = (struct lnet_fault_attr *)data->ioc_inlbuf1; - - switch (opc) { - default: - return -EINVAL; - - case LNET_CTL_DROP_ADD: - if (!attr) - return -EINVAL; - - return lnet_drop_rule_add(attr); - - case LNET_CTL_DROP_DEL: - if (!attr) - return -EINVAL; - - data->ioc_count = lnet_drop_rule_del(attr->fa_src, - attr->fa_dst); - return 0; - - case LNET_CTL_DROP_RESET: - lnet_drop_rule_reset(); - return 0; - - case LNET_CTL_DROP_LIST: - stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; - if (!attr || !stat) - return -EINVAL; - - return lnet_drop_rule_list(data->ioc_count, attr, stat); - - case LNET_CTL_DELAY_ADD: - if (!attr) - return -EINVAL; - - return lnet_delay_rule_add(attr); - - case LNET_CTL_DELAY_DEL: - if (!attr) - return -EINVAL; - - data->ioc_count = lnet_delay_rule_del(attr->fa_src, - attr->fa_dst, false); - return 0; - - case LNET_CTL_DELAY_RESET: - lnet_delay_rule_reset(); - return 0; - - case LNET_CTL_DELAY_LIST: - stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; - if (!attr || !stat) - return -EINVAL; - - return lnet_delay_rule_list(data->ioc_count, attr, stat); - } -} - -int -lnet_fault_init(void) -{ - BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT); - BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK); - BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET); - BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY); - - mutex_init(&delay_dd.dd_mutex); - spin_lock_init(&delay_dd.dd_lock); - init_waitqueue_head(&delay_dd.dd_waitq); - init_waitqueue_head(&delay_dd.dd_ctl_waitq); - INIT_LIST_HEAD(&delay_dd.dd_sched_rules); - - return 0; -} - -void -lnet_fault_fini(void) -{ - lnet_drop_rule_del(0, 0); - lnet_delay_rule_del(0, 0, true); - - LASSERT(list_empty(&the_lnet.ln_drop_rules)); - LASSERT(list_empty(&the_lnet.ln_delay_rules)); - LASSERT(list_empty(&delay_dd.dd_sched_rules)); -} diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c deleted file mode 100644 index 0f6c3fa16c65..000000000000 --- a/drivers/staging/lustre/lnet/lnet/nidstrings.c +++ /dev/null @@ -1,1261 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/nidstrings.c - * - * Author: Phil Schwan - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include -#include -#include - -/* max value for numeric network address */ -#define MAX_NUMERIC_VALUE 0xffffffff - -#define IPSTRING_LENGTH 16 - -/* CAVEAT VENDITOR! Keep the canonical string representation of nets/nids - * consistent in all conversion functions. Some code fragments are copied - * around for the sake of clarity... - */ - -/* CAVEAT EMPTOR! Racey temporary buffer allocation! - * Choose the number of nidstrings to support the MAXIMUM expected number of - * concurrent users. If there are more, the returned string will be volatile. - * NB this number must allow for a process to be descheduled for a timeslice - * between getting its string and using it. - */ - -static char libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE]; -static int libcfs_nidstring_idx; - -static DEFINE_SPINLOCK(libcfs_nidstring_lock); - -static struct netstrfns *libcfs_namenum2netstrfns(const char *name); - -char * -libcfs_next_nidstring(void) -{ - char *str; - unsigned long flags; - - spin_lock_irqsave(&libcfs_nidstring_lock, flags); - - str = libcfs_nidstrings[libcfs_nidstring_idx++]; - if (libcfs_nidstring_idx == ARRAY_SIZE(libcfs_nidstrings)) - libcfs_nidstring_idx = 0; - - spin_unlock_irqrestore(&libcfs_nidstring_lock, flags); - return str; -} -EXPORT_SYMBOL(libcfs_next_nidstring); - -/** - * Nid range list syntax. - * \verbatim - * - * :== [ ' ' ] - * :== '@' - * :== '*' | - * | - * - * :== ... - * - * :== | - * - * :== '[' [ ',' ] ']' - * :== | - * '-' | - * '-' '/' - * :== | - * :== "lo" | "tcp" | "o2ib" | "cib" | "openib" | "iib" | - * "vib" | "ra" | "elan" | "mx" | "ptl" - * \endverbatim - */ - -/** - * Structure to represent \ token of the syntax. - * - * One of this is created for each \ parsed. - */ -struct nidrange { - /** - * Link to list of this structures which is built on nid range - * list parsing. - */ - struct list_head nr_link; - /** - * List head for addrrange::ar_link. - */ - struct list_head nr_addrranges; - /** - * Flag indicating that *@ is found. - */ - int nr_all; - /** - * Pointer to corresponding element of libcfs_netstrfns. - */ - struct netstrfns *nr_netstrfns; - /** - * Number of network. E.g. 5 if \ is "elan5". - */ - int nr_netnum; -}; - -/** - * Structure to represent \ token of the syntax. - */ -struct addrrange { - /** - * Link to nidrange::nr_addrranges. - */ - struct list_head ar_link; - /** - * List head for cfs_expr_list::el_list. - */ - struct list_head ar_numaddr_ranges; -}; - -/** - * Parses \ token on the syntax. - * - * Allocates struct addrrange and links to \a nidrange via - * (nidrange::nr_addrranges) - * - * \retval 0 if \a src parses to '*' | \ | \ - * \retval -errno otherwise - */ -static int -parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange) -{ - struct addrrange *addrrange; - - if (src->ls_len == 1 && src->ls_str[0] == '*') { - nidrange->nr_all = 1; - return 0; - } - - addrrange = kzalloc(sizeof(struct addrrange), GFP_NOFS); - if (!addrrange) - return -ENOMEM; - list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges); - INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges); - - return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str, - src->ls_len, - &addrrange->ar_numaddr_ranges); -} - -/** - * Finds or creates struct nidrange. - * - * Checks if \a src is a valid network name, looks for corresponding - * nidrange on the ist of nidranges (\a nidlist), creates new struct - * nidrange if it is not found. - * - * \retval pointer to struct nidrange matching network specified via \a src - * \retval NULL if \a src does not match any network - */ -static struct nidrange * -add_nidrange(const struct cfs_lstr *src, - struct list_head *nidlist) -{ - struct netstrfns *nf; - struct nidrange *nr; - int endlen; - unsigned int netnum; - - if (src->ls_len >= LNET_NIDSTR_SIZE) - return NULL; - - nf = libcfs_namenum2netstrfns(src->ls_str); - if (!nf) - return NULL; - endlen = src->ls_len - strlen(nf->nf_name); - if (!endlen) - /* network name only, e.g. "elan" or "tcp" */ - netnum = 0; - else { - /* - * e.g. "elan25" or "tcp23", refuse to parse if - * network name is not appended with decimal or - * hexadecimal number - */ - if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name), - endlen, &netnum, 0, MAX_NUMERIC_VALUE)) - return NULL; - } - - list_for_each_entry(nr, nidlist, nr_link) { - if (nr->nr_netstrfns != nf) - continue; - if (nr->nr_netnum != netnum) - continue; - return nr; - } - - nr = kzalloc(sizeof(struct nidrange), GFP_NOFS); - if (!nr) - return NULL; - list_add_tail(&nr->nr_link, nidlist); - INIT_LIST_HEAD(&nr->nr_addrranges); - nr->nr_netstrfns = nf; - nr->nr_all = 0; - nr->nr_netnum = netnum; - - return nr; -} - -/** - * Parses \ token of the syntax. - * - * \retval 1 if \a src parses to \ '@' \ - * \retval 0 otherwise - */ -static int -parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist) -{ - struct cfs_lstr addrrange; - struct cfs_lstr net; - struct nidrange *nr; - - if (!cfs_gettok(src, '@', &addrrange)) - goto failed; - - if (!cfs_gettok(src, '@', &net) || src->ls_str) - goto failed; - - nr = add_nidrange(&net, nidlist); - if (!nr) - goto failed; - - if (parse_addrange(&addrrange, nr)) - goto failed; - - return 1; -failed: - return 0; -} - -/** - * Frees addrrange structures of \a list. - * - * For each struct addrrange structure found on \a list it frees - * cfs_expr_list list attached to it and frees the addrrange itself. - * - * \retval none - */ -static void -free_addrranges(struct list_head *list) -{ - while (!list_empty(list)) { - struct addrrange *ar; - - ar = list_entry(list->next, struct addrrange, ar_link); - - cfs_expr_list_free_list(&ar->ar_numaddr_ranges); - list_del(&ar->ar_link); - kfree(ar); - } -} - -/** - * Frees nidrange strutures of \a list. - * - * For each struct nidrange structure found on \a list it frees - * addrrange list attached to it and frees the nidrange itself. - * - * \retval none - */ -void -cfs_free_nidlist(struct list_head *list) -{ - struct list_head *pos, *next; - struct nidrange *nr; - - list_for_each_safe(pos, next, list) { - nr = list_entry(pos, struct nidrange, nr_link); - free_addrranges(&nr->nr_addrranges); - list_del(pos); - kfree(nr); - } -} -EXPORT_SYMBOL(cfs_free_nidlist); - -/** - * Parses nid range list. - * - * Parses with rigorous syntax and overflow checking \a str into - * \ [ ' ' \ ], compiles \a str into set of - * structures and links that structure to \a nidlist. The resulting - * list can be used to match a NID againts set of NIDS defined by \a - * str. - * \see cfs_match_nid - * - * \retval 1 on success - * \retval 0 otherwise - */ -int -cfs_parse_nidlist(char *str, int len, struct list_head *nidlist) -{ - struct cfs_lstr src; - struct cfs_lstr res; - int rc; - - src.ls_str = str; - src.ls_len = len; - INIT_LIST_HEAD(nidlist); - while (src.ls_str) { - rc = cfs_gettok(&src, ' ', &res); - if (!rc) { - cfs_free_nidlist(nidlist); - return 0; - } - rc = parse_nidrange(&res, nidlist); - if (!rc) { - cfs_free_nidlist(nidlist); - return 0; - } - } - return 1; -} -EXPORT_SYMBOL(cfs_parse_nidlist); - -/** - * Matches a nid (\a nid) against the compiled list of nidranges (\a nidlist). - * - * \see cfs_parse_nidlist() - * - * \retval 1 on match - * \retval 0 otherwises - */ -int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist) -{ - struct nidrange *nr; - struct addrrange *ar; - - list_for_each_entry(nr, nidlist, nr_link) { - if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid))) - continue; - if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid))) - continue; - if (nr->nr_all) - return 1; - list_for_each_entry(ar, &nr->nr_addrranges, ar_link) - if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid), - &ar->ar_numaddr_ranges)) - return 1; - } - return 0; -} -EXPORT_SYMBOL(cfs_match_nid); - -/** - * Print the network part of the nidrange \a nr into the specified \a buffer. - * - * \retval number of characters written - */ -static int -cfs_print_network(char *buffer, int count, struct nidrange *nr) -{ - struct netstrfns *nf = nr->nr_netstrfns; - - if (!nr->nr_netnum) - return scnprintf(buffer, count, "@%s", nf->nf_name); - else - return scnprintf(buffer, count, "@%s%u", - nf->nf_name, nr->nr_netnum); -} - -/** - * Print a list of addrrange (\a addrranges) into the specified \a buffer. - * At max \a count characters can be printed into \a buffer. - * - * \retval number of characters written - */ -static int -cfs_print_addrranges(char *buffer, int count, struct list_head *addrranges, - struct nidrange *nr) -{ - int i = 0; - struct addrrange *ar; - struct netstrfns *nf = nr->nr_netstrfns; - - list_for_each_entry(ar, addrranges, ar_link) { - if (i) - i += scnprintf(buffer + i, count - i, " "); - i += nf->nf_print_addrlist(buffer + i, count - i, - &ar->ar_numaddr_ranges); - i += cfs_print_network(buffer + i, count - i, nr); - } - return i; -} - -/** - * Print a list of nidranges (\a nidlist) into the specified \a buffer. - * At max \a count characters can be printed into \a buffer. - * Nidranges are separated by a space character. - * - * \retval number of characters written - */ -int cfs_print_nidlist(char *buffer, int count, struct list_head *nidlist) -{ - int i = 0; - struct nidrange *nr; - - if (count <= 0) - return 0; - - list_for_each_entry(nr, nidlist, nr_link) { - if (i) - i += scnprintf(buffer + i, count - i, " "); - - if (nr->nr_all) { - LASSERT(list_empty(&nr->nr_addrranges)); - i += scnprintf(buffer + i, count - i, "*"); - i += cfs_print_network(buffer + i, count - i, nr); - } else { - i += cfs_print_addrranges(buffer + i, count - i, - &nr->nr_addrranges, nr); - } - } - return i; -} -EXPORT_SYMBOL(cfs_print_nidlist); - -/** - * Determines minimum and maximum addresses for a single - * numeric address range - * - * \param ar - * \param min_nid - * \param max_nid - */ -static void cfs_ip_ar_min_max(struct addrrange *ar, __u32 *min_nid, - __u32 *max_nid) -{ - struct cfs_expr_list *el; - struct cfs_range_expr *re; - __u32 tmp_ip_addr = 0; - unsigned int min_ip[4] = {0}; - unsigned int max_ip[4] = {0}; - int re_count = 0; - - list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) { - list_for_each_entry(re, &el->el_exprs, re_link) { - min_ip[re_count] = re->re_lo; - max_ip[re_count] = re->re_hi; - re_count++; - } - } - - tmp_ip_addr = ((min_ip[0] << 24) | (min_ip[1] << 16) | - (min_ip[2] << 8) | min_ip[3]); - - if (min_nid) - *min_nid = tmp_ip_addr; - - tmp_ip_addr = ((max_ip[0] << 24) | (max_ip[1] << 16) | - (max_ip[2] << 8) | max_ip[3]); - - if (max_nid) - *max_nid = tmp_ip_addr; -} - -/** - * Determines minimum and maximum addresses for a single - * numeric address range - * - * \param ar - * \param min_nid - * \param max_nid - */ -static void cfs_num_ar_min_max(struct addrrange *ar, __u32 *min_nid, - __u32 *max_nid) -{ - struct cfs_expr_list *el; - struct cfs_range_expr *re; - unsigned int min_addr = 0; - unsigned int max_addr = 0; - - list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) { - list_for_each_entry(re, &el->el_exprs, re_link) { - if (re->re_lo < min_addr || !min_addr) - min_addr = re->re_lo; - if (re->re_hi > max_addr) - max_addr = re->re_hi; - } - } - - if (min_nid) - *min_nid = min_addr; - if (max_nid) - *max_nid = max_addr; -} - -/** - * Determines whether an expression list in an nidrange contains exactly - * one contiguous address range. Calls the correct netstrfns for the LND - * - * \param *nidlist - * - * \retval true if contiguous - * \retval false if not contiguous - */ -bool cfs_nidrange_is_contiguous(struct list_head *nidlist) -{ - struct nidrange *nr; - struct netstrfns *nf = NULL; - char *lndname = NULL; - int netnum = -1; - - list_for_each_entry(nr, nidlist, nr_link) { - nf = nr->nr_netstrfns; - if (!lndname) - lndname = nf->nf_name; - if (netnum == -1) - netnum = nr->nr_netnum; - - if (strcmp(lndname, nf->nf_name) || - netnum != nr->nr_netnum) - return false; - } - - if (!nf) - return false; - - if (!nf->nf_is_contiguous(nidlist)) - return false; - - return true; -} -EXPORT_SYMBOL(cfs_nidrange_is_contiguous); - -/** - * Determines whether an expression list in an num nidrange contains exactly - * one contiguous address range. - * - * \param *nidlist - * - * \retval true if contiguous - * \retval false if not contiguous - */ -static bool cfs_num_is_contiguous(struct list_head *nidlist) -{ - struct nidrange *nr; - struct addrrange *ar; - struct cfs_expr_list *el; - struct cfs_range_expr *re; - int last_hi = 0; - __u32 last_end_nid = 0; - __u32 current_start_nid = 0; - __u32 current_end_nid = 0; - - list_for_each_entry(nr, nidlist, nr_link) { - list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { - cfs_num_ar_min_max(ar, ¤t_start_nid, - ¤t_end_nid); - if (last_end_nid && - (current_start_nid - last_end_nid != 1)) - return false; - last_end_nid = current_end_nid; - list_for_each_entry(el, &ar->ar_numaddr_ranges, - el_link) { - list_for_each_entry(re, &el->el_exprs, - re_link) { - if (re->re_stride > 1) - return false; - else if (last_hi && - re->re_hi - last_hi != 1) - return false; - last_hi = re->re_hi; - } - } - } - } - - return true; -} - -/** - * Determines whether an expression list in an ip nidrange contains exactly - * one contiguous address range. - * - * \param *nidlist - * - * \retval true if contiguous - * \retval false if not contiguous - */ -static bool cfs_ip_is_contiguous(struct list_head *nidlist) -{ - struct nidrange *nr; - struct addrrange *ar; - struct cfs_expr_list *el; - struct cfs_range_expr *re; - int expr_count; - int last_hi = 255; - int last_diff = 0; - __u32 last_end_nid = 0; - __u32 current_start_nid = 0; - __u32 current_end_nid = 0; - - list_for_each_entry(nr, nidlist, nr_link) { - list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { - last_hi = 255; - last_diff = 0; - cfs_ip_ar_min_max(ar, ¤t_start_nid, - ¤t_end_nid); - if (last_end_nid && - (current_start_nid - last_end_nid != 1)) - return false; - last_end_nid = current_end_nid; - list_for_each_entry(el, &ar->ar_numaddr_ranges, - el_link) { - expr_count = 0; - list_for_each_entry(re, &el->el_exprs, - re_link) { - expr_count++; - if (re->re_stride > 1 || - (last_diff > 0 && last_hi != 255) || - (last_diff > 0 && last_hi == 255 && - re->re_lo > 0)) - return false; - last_hi = re->re_hi; - last_diff = re->re_hi - re->re_lo; - } - } - } - } - - return true; -} - -/** - * Takes a linked list of nidrange expressions, determines the minimum - * and maximum nid and creates appropriate nid structures - * - * \param *nidlist - * \param *min_nid - * \param *max_nid - */ -void cfs_nidrange_find_min_max(struct list_head *nidlist, char *min_nid, - char *max_nid, size_t nidstr_length) -{ - struct nidrange *nr; - struct netstrfns *nf = NULL; - int netnum = -1; - __u32 min_addr; - __u32 max_addr; - char *lndname = NULL; - char min_addr_str[IPSTRING_LENGTH]; - char max_addr_str[IPSTRING_LENGTH]; - - list_for_each_entry(nr, nidlist, nr_link) { - nf = nr->nr_netstrfns; - lndname = nf->nf_name; - if (netnum == -1) - netnum = nr->nr_netnum; - - nf->nf_min_max(nidlist, &min_addr, &max_addr); - } - nf->nf_addr2str(min_addr, min_addr_str, sizeof(min_addr_str)); - nf->nf_addr2str(max_addr, max_addr_str, sizeof(max_addr_str)); - - snprintf(min_nid, nidstr_length, "%s@%s%d", min_addr_str, lndname, - netnum); - snprintf(max_nid, nidstr_length, "%s@%s%d", max_addr_str, lndname, - netnum); -} -EXPORT_SYMBOL(cfs_nidrange_find_min_max); - -/** - * Determines the min and max NID values for num LNDs - * - * \param *nidlist - * \param *min_nid - * \param *max_nid - */ -static void cfs_num_min_max(struct list_head *nidlist, __u32 *min_nid, - __u32 *max_nid) -{ - struct nidrange *nr; - struct addrrange *ar; - unsigned int tmp_min_addr = 0; - unsigned int tmp_max_addr = 0; - unsigned int min_addr = 0; - unsigned int max_addr = 0; - - list_for_each_entry(nr, nidlist, nr_link) { - list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { - cfs_num_ar_min_max(ar, &tmp_min_addr, - &tmp_max_addr); - if (tmp_min_addr < min_addr || !min_addr) - min_addr = tmp_min_addr; - if (tmp_max_addr > max_addr) - max_addr = tmp_min_addr; - } - } - *max_nid = max_addr; - *min_nid = min_addr; -} - -/** - * Takes an nidlist and determines the minimum and maximum - * ip addresses. - * - * \param *nidlist - * \param *min_nid - * \param *max_nid - */ -static void cfs_ip_min_max(struct list_head *nidlist, __u32 *min_nid, - __u32 *max_nid) -{ - struct nidrange *nr; - struct addrrange *ar; - __u32 tmp_min_ip_addr = 0; - __u32 tmp_max_ip_addr = 0; - __u32 min_ip_addr = 0; - __u32 max_ip_addr = 0; - - list_for_each_entry(nr, nidlist, nr_link) { - list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { - cfs_ip_ar_min_max(ar, &tmp_min_ip_addr, - &tmp_max_ip_addr); - if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr) - min_ip_addr = tmp_min_ip_addr; - if (tmp_max_ip_addr > max_ip_addr) - max_ip_addr = tmp_max_ip_addr; - } - } - - if (min_nid) - *min_nid = min_ip_addr; - if (max_nid) - *max_nid = max_ip_addr; -} - -static int -libcfs_lo_str2addr(const char *str, int nob, __u32 *addr) -{ - *addr = 0; - return 1; -} - -static void -libcfs_ip_addr2str(__u32 addr, char *str, size_t size) -{ - snprintf(str, size, "%u.%u.%u.%u", - (addr >> 24) & 0xff, (addr >> 16) & 0xff, - (addr >> 8) & 0xff, addr & 0xff); -} - -/* - * CAVEAT EMPTOR XscanfX - * I use "%n" at the end of a sscanf format to detect trailing junk. However - * sscanf may return immediately if it sees the terminating '0' in a string, so - * I initialise the %n variable to the expected length. If sscanf sets it; - * fine, if it doesn't, then the scan ended at the end of the string, which is - * fine too :) - */ -static int -libcfs_ip_str2addr(const char *str, int nob, __u32 *addr) -{ - unsigned int a; - unsigned int b; - unsigned int c; - unsigned int d; - int n = nob; /* XscanfX */ - - /* numeric IP? */ - if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 && - n == nob && - !(a & ~0xff) && !(b & ~0xff) && - !(c & ~0xff) && !(d & ~0xff)) { - *addr = ((a << 24) | (b << 16) | (c << 8) | d); - return 1; - } - - return 0; -} - -/* Used by lnet/config.c so it can't be static */ -int -cfs_ip_addr_parse(char *str, int len, struct list_head *list) -{ - struct cfs_expr_list *el; - struct cfs_lstr src; - int rc; - int i; - - src.ls_str = str; - src.ls_len = len; - i = 0; - - while (src.ls_str) { - struct cfs_lstr res; - - if (!cfs_gettok(&src, '.', &res)) { - rc = -EINVAL; - goto out; - } - - rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el); - if (rc) - goto out; - - list_add_tail(&el->el_link, list); - i++; - } - - if (i == 4) - return 0; - - rc = -EINVAL; -out: - cfs_expr_list_free_list(list); - - return rc; -} - -static int -libcfs_ip_addr_range_print(char *buffer, int count, struct list_head *list) -{ - int i = 0, j = 0; - struct cfs_expr_list *el; - - list_for_each_entry(el, list, el_link) { - LASSERT(j++ < 4); - if (i) - i += scnprintf(buffer + i, count - i, "."); - i += cfs_expr_list_print(buffer + i, count - i, el); - } - return i; -} - -/** - * Matches address (\a addr) against address set encoded in \a list. - * - * \retval 1 if \a addr matches - * \retval 0 otherwise - */ -int -cfs_ip_addr_match(__u32 addr, struct list_head *list) -{ - struct cfs_expr_list *el; - int i = 0; - - list_for_each_entry_reverse(el, list, el_link) { - if (!cfs_expr_list_match(addr & 0xff, el)) - return 0; - addr >>= 8; - i++; - } - - return i == 4; -} - -static void -libcfs_decnum_addr2str(__u32 addr, char *str, size_t size) -{ - snprintf(str, size, "%u", addr); -} - -static int -libcfs_num_str2addr(const char *str, int nob, __u32 *addr) -{ - int n; - - n = nob; - if (sscanf(str, "0x%x%n", addr, &n) >= 1 && n == nob) - return 1; - - n = nob; - if (sscanf(str, "0X%x%n", addr, &n) >= 1 && n == nob) - return 1; - - n = nob; - if (sscanf(str, "%u%n", addr, &n) >= 1 && n == nob) - return 1; - - return 0; -} - -/** - * Nf_parse_addrlist method for networks using numeric addresses. - * - * Examples of such networks are gm and elan. - * - * \retval 0 if \a str parsed to numeric address - * \retval errno otherwise - */ -static int -libcfs_num_parse(char *str, int len, struct list_head *list) -{ - struct cfs_expr_list *el; - int rc; - - rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el); - if (!rc) - list_add_tail(&el->el_link, list); - - return rc; -} - -static int -libcfs_num_addr_range_print(char *buffer, int count, struct list_head *list) -{ - int i = 0, j = 0; - struct cfs_expr_list *el; - - list_for_each_entry(el, list, el_link) { - LASSERT(j++ < 1); - i += cfs_expr_list_print(buffer + i, count - i, el); - } - return i; -} - -/* - * Nf_match_addr method for networks using numeric addresses - * - * \retval 1 on match - * \retval 0 otherwise - */ -static int -libcfs_num_match(__u32 addr, struct list_head *numaddr) -{ - struct cfs_expr_list *el; - - LASSERT(!list_empty(numaddr)); - el = list_entry(numaddr->next, struct cfs_expr_list, el_link); - - return cfs_expr_list_match(addr, el); -} - -static struct netstrfns libcfs_netstrfns[] = { - { .nf_type = LOLND, - .nf_name = "lo", - .nf_modname = "klolnd", - .nf_addr2str = libcfs_decnum_addr2str, - .nf_str2addr = libcfs_lo_str2addr, - .nf_parse_addrlist = libcfs_num_parse, - .nf_print_addrlist = libcfs_num_addr_range_print, - .nf_match_addr = libcfs_num_match, - .nf_is_contiguous = cfs_num_is_contiguous, - .nf_min_max = cfs_num_min_max }, - { .nf_type = SOCKLND, - .nf_name = "tcp", - .nf_modname = "ksocklnd", - .nf_addr2str = libcfs_ip_addr2str, - .nf_str2addr = libcfs_ip_str2addr, - .nf_parse_addrlist = cfs_ip_addr_parse, - .nf_print_addrlist = libcfs_ip_addr_range_print, - .nf_match_addr = cfs_ip_addr_match, - .nf_is_contiguous = cfs_ip_is_contiguous, - .nf_min_max = cfs_ip_min_max }, - { .nf_type = O2IBLND, - .nf_name = "o2ib", - .nf_modname = "ko2iblnd", - .nf_addr2str = libcfs_ip_addr2str, - .nf_str2addr = libcfs_ip_str2addr, - .nf_parse_addrlist = cfs_ip_addr_parse, - .nf_print_addrlist = libcfs_ip_addr_range_print, - .nf_match_addr = cfs_ip_addr_match, - .nf_is_contiguous = cfs_ip_is_contiguous, - .nf_min_max = cfs_ip_min_max }, - { .nf_type = GNILND, - .nf_name = "gni", - .nf_modname = "kgnilnd", - .nf_addr2str = libcfs_decnum_addr2str, - .nf_str2addr = libcfs_num_str2addr, - .nf_parse_addrlist = libcfs_num_parse, - .nf_print_addrlist = libcfs_num_addr_range_print, - .nf_match_addr = libcfs_num_match, - .nf_is_contiguous = cfs_num_is_contiguous, - .nf_min_max = cfs_num_min_max }, - { .nf_type = GNIIPLND, - .nf_name = "gip", - .nf_modname = "kgnilnd", - .nf_addr2str = libcfs_ip_addr2str, - .nf_str2addr = libcfs_ip_str2addr, - .nf_parse_addrlist = cfs_ip_addr_parse, - .nf_print_addrlist = libcfs_ip_addr_range_print, - .nf_match_addr = cfs_ip_addr_match, - .nf_is_contiguous = cfs_ip_is_contiguous, - .nf_min_max = cfs_ip_min_max }, -}; - -static const size_t libcfs_nnetstrfns = ARRAY_SIZE(libcfs_netstrfns); - -static struct netstrfns * -libcfs_lnd2netstrfns(__u32 lnd) -{ - int i; - - for (i = 0; i < libcfs_nnetstrfns; i++) - if (lnd == libcfs_netstrfns[i].nf_type) - return &libcfs_netstrfns[i]; - - return NULL; -} - -static struct netstrfns * -libcfs_namenum2netstrfns(const char *name) -{ - struct netstrfns *nf; - int i; - - for (i = 0; i < libcfs_nnetstrfns; i++) { - nf = &libcfs_netstrfns[i]; - if (!strncmp(name, nf->nf_name, strlen(nf->nf_name))) - return nf; - } - return NULL; -} - -static struct netstrfns * -libcfs_name2netstrfns(const char *name) -{ - int i; - - for (i = 0; i < libcfs_nnetstrfns; i++) - if (!strcmp(libcfs_netstrfns[i].nf_name, name)) - return &libcfs_netstrfns[i]; - - return NULL; -} - -int -libcfs_isknown_lnd(__u32 lnd) -{ - return !!libcfs_lnd2netstrfns(lnd); -} -EXPORT_SYMBOL(libcfs_isknown_lnd); - -char * -libcfs_lnd2modname(__u32 lnd) -{ - struct netstrfns *nf = libcfs_lnd2netstrfns(lnd); - - return nf ? nf->nf_modname : NULL; -} -EXPORT_SYMBOL(libcfs_lnd2modname); - -int -libcfs_str2lnd(const char *str) -{ - struct netstrfns *nf = libcfs_name2netstrfns(str); - - if (nf) - return nf->nf_type; - - return -ENXIO; -} -EXPORT_SYMBOL(libcfs_str2lnd); - -char * -libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size) -{ - struct netstrfns *nf; - - nf = libcfs_lnd2netstrfns(lnd); - if (!nf) - snprintf(buf, buf_size, "?%u?", lnd); - else - snprintf(buf, buf_size, "%s", nf->nf_name); - - return buf; -} -EXPORT_SYMBOL(libcfs_lnd2str_r); - -char * -libcfs_net2str_r(__u32 net, char *buf, size_t buf_size) -{ - __u32 nnum = LNET_NETNUM(net); - __u32 lnd = LNET_NETTYP(net); - struct netstrfns *nf; - - nf = libcfs_lnd2netstrfns(lnd); - if (!nf) - snprintf(buf, buf_size, "<%u:%u>", lnd, nnum); - else if (!nnum) - snprintf(buf, buf_size, "%s", nf->nf_name); - else - snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum); - - return buf; -} -EXPORT_SYMBOL(libcfs_net2str_r); - -char * -libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size) -{ - __u32 addr = LNET_NIDADDR(nid); - __u32 net = LNET_NIDNET(nid); - __u32 nnum = LNET_NETNUM(net); - __u32 lnd = LNET_NETTYP(net); - struct netstrfns *nf; - - if (nid == LNET_NID_ANY) { - strncpy(buf, "", buf_size); - buf[buf_size - 1] = '\0'; - return buf; - } - - nf = libcfs_lnd2netstrfns(lnd); - if (!nf) { - snprintf(buf, buf_size, "%x@<%u:%u>", addr, lnd, nnum); - } else { - size_t addr_len; - - nf->nf_addr2str(addr, buf, buf_size); - addr_len = strlen(buf); - if (!nnum) - snprintf(buf + addr_len, buf_size - addr_len, "@%s", - nf->nf_name); - else - snprintf(buf + addr_len, buf_size - addr_len, "@%s%u", - nf->nf_name, nnum); - } - - return buf; -} -EXPORT_SYMBOL(libcfs_nid2str_r); - -static struct netstrfns * -libcfs_str2net_internal(const char *str, __u32 *net) -{ - struct netstrfns *nf = NULL; - int nob; - unsigned int netnum; - int i; - - for (i = 0; i < libcfs_nnetstrfns; i++) { - nf = &libcfs_netstrfns[i]; - if (!strncmp(str, nf->nf_name, strlen(nf->nf_name))) - break; - } - - if (i == libcfs_nnetstrfns) - return NULL; - - nob = strlen(nf->nf_name); - - if (strlen(str) == (unsigned int)nob) { - netnum = 0; - } else { - if (nf->nf_type == LOLND) /* net number not allowed */ - return NULL; - - str += nob; - i = strlen(str); - if (sscanf(str, "%u%n", &netnum, &i) < 1 || - i != (int)strlen(str)) - return NULL; - } - - *net = LNET_MKNET(nf->nf_type, netnum); - return nf; -} - -__u32 -libcfs_str2net(const char *str) -{ - __u32 net; - - if (libcfs_str2net_internal(str, &net)) - return net; - - return LNET_NIDNET(LNET_NID_ANY); -} -EXPORT_SYMBOL(libcfs_str2net); - -lnet_nid_t -libcfs_str2nid(const char *str) -{ - const char *sep = strchr(str, '@'); - struct netstrfns *nf; - __u32 net; - __u32 addr; - - if (sep) { - nf = libcfs_str2net_internal(sep + 1, &net); - if (!nf) - return LNET_NID_ANY; - } else { - sep = str + strlen(str); - net = LNET_MKNET(SOCKLND, 0); - nf = libcfs_lnd2netstrfns(SOCKLND); - LASSERT(nf); - } - - if (!nf->nf_str2addr(str, (int)(sep - str), &addr)) - return LNET_NID_ANY; - - return LNET_MKNID(net, addr); -} -EXPORT_SYMBOL(libcfs_str2nid); - -char * -libcfs_id2str(struct lnet_process_id id) -{ - char *str = libcfs_next_nidstring(); - - if (id.pid == LNET_PID_ANY) { - snprintf(str, LNET_NIDSTR_SIZE, - "LNET_PID_ANY-%s", libcfs_nid2str(id.nid)); - return str; - } - - snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s", - id.pid & LNET_PID_USERFLAG ? "U" : "", - id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid)); - return str; -} -EXPORT_SYMBOL(libcfs_id2str); - -int -libcfs_str2anynid(lnet_nid_t *nidp, const char *str) -{ - if (!strcmp(str, "*")) { - *nidp = LNET_NID_ANY; - return 1; - } - - *nidp = libcfs_str2nid(str); - return *nidp != LNET_NID_ANY; -} -EXPORT_SYMBOL(libcfs_str2anynid); diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c deleted file mode 100644 index 58294149f7b2..000000000000 --- a/drivers/staging/lustre/lnet/lnet/peer.c +++ /dev/null @@ -1,456 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/lnet/peer.c - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include - -int -lnet_peer_tables_create(void) -{ - struct lnet_peer_table *ptable; - struct list_head *hash; - int i; - int j; - - the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*ptable)); - if (!the_lnet.ln_peer_tables) { - CERROR("Failed to allocate cpu-partition peer tables\n"); - return -ENOMEM; - } - - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - INIT_LIST_HEAD(&ptable->pt_deathrow); - - hash = kvmalloc_cpt(LNET_PEER_HASH_SIZE * sizeof(*hash), - GFP_KERNEL, i); - if (!hash) { - CERROR("Failed to create peer hash table\n"); - lnet_peer_tables_destroy(); - return -ENOMEM; - } - - for (j = 0; j < LNET_PEER_HASH_SIZE; j++) - INIT_LIST_HEAD(&hash[j]); - ptable->pt_hash = hash; /* sign of initialization */ - } - - return 0; -} - -void -lnet_peer_tables_destroy(void) -{ - struct lnet_peer_table *ptable; - struct list_head *hash; - int i; - int j; - - if (!the_lnet.ln_peer_tables) - return; - - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - hash = ptable->pt_hash; - if (!hash) /* not initialized */ - break; - - LASSERT(list_empty(&ptable->pt_deathrow)); - - ptable->pt_hash = NULL; - for (j = 0; j < LNET_PEER_HASH_SIZE; j++) - LASSERT(list_empty(&hash[j])); - - kvfree(hash); - } - - cfs_percpt_free(the_lnet.ln_peer_tables); - the_lnet.ln_peer_tables = NULL; -} - -static void -lnet_peer_table_cleanup_locked(struct lnet_ni *ni, - struct lnet_peer_table *ptable) -{ - int i; - struct lnet_peer *lp; - struct lnet_peer *tmp; - - for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { - list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], - lp_hashlist) { - if (ni && ni != lp->lp_ni) - continue; - list_del_init(&lp->lp_hashlist); - /* Lose hash table's ref */ - ptable->pt_zombies++; - lnet_peer_decref_locked(lp); - } - } -} - -static void -lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable, - int cpt_locked) -{ - int i; - - for (i = 3; ptable->pt_zombies; i++) { - lnet_net_unlock(cpt_locked); - - if (is_power_of_2(i)) { - CDEBUG(D_WARNING, - "Waiting for %d zombies on peer table\n", - ptable->pt_zombies); - } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ >> 1); - lnet_net_lock(cpt_locked); - } -} - -static void -lnet_peer_table_del_rtrs_locked(struct lnet_ni *ni, - struct lnet_peer_table *ptable, - int cpt_locked) -{ - struct lnet_peer *lp; - struct lnet_peer *tmp; - lnet_nid_t lp_nid; - int i; - - for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { - list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], - lp_hashlist) { - if (ni != lp->lp_ni) - continue; - - if (!lp->lp_rtr_refcount) - continue; - - lp_nid = lp->lp_nid; - - lnet_net_unlock(cpt_locked); - lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid); - lnet_net_lock(cpt_locked); - } - } -} - -void -lnet_peer_tables_cleanup(struct lnet_ni *ni) -{ - struct lnet_peer_table *ptable; - struct list_head deathrow; - struct lnet_peer *lp; - struct lnet_peer *temp; - int i; - - INIT_LIST_HEAD(&deathrow); - - LASSERT(the_lnet.ln_shutdown || ni); - /* - * If just deleting the peers for a NI, get rid of any routes these - * peers are gateways for. - */ - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - lnet_net_lock(i); - lnet_peer_table_del_rtrs_locked(ni, ptable, i); - lnet_net_unlock(i); - } - - /* - * Start the process of moving the applicable peers to - * deathrow. - */ - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - lnet_net_lock(i); - lnet_peer_table_cleanup_locked(ni, ptable); - lnet_net_unlock(i); - } - - /* Cleanup all entries on deathrow. */ - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - lnet_net_lock(i); - lnet_peer_table_deathrow_wait_locked(ptable, i); - list_splice_init(&ptable->pt_deathrow, &deathrow); - lnet_net_unlock(i); - } - - list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) { - list_del(&lp->lp_hashlist); - kfree(lp); - } -} - -void -lnet_destroy_peer_locked(struct lnet_peer *lp) -{ - struct lnet_peer_table *ptable; - - LASSERT(!lp->lp_refcount); - LASSERT(!lp->lp_rtr_refcount); - LASSERT(list_empty(&lp->lp_txq)); - LASSERT(list_empty(&lp->lp_hashlist)); - LASSERT(!lp->lp_txqnob); - - ptable = the_lnet.ln_peer_tables[lp->lp_cpt]; - LASSERT(ptable->pt_number > 0); - ptable->pt_number--; - - lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt); - lp->lp_ni = NULL; - - list_add(&lp->lp_hashlist, &ptable->pt_deathrow); - LASSERT(ptable->pt_zombies > 0); - ptable->pt_zombies--; -} - -struct lnet_peer * -lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid) -{ - struct list_head *peers; - struct lnet_peer *lp; - - LASSERT(!the_lnet.ln_shutdown); - - peers = &ptable->pt_hash[lnet_nid2peerhash(nid)]; - list_for_each_entry(lp, peers, lp_hashlist) { - if (lp->lp_nid == nid) { - lnet_peer_addref_locked(lp); - return lp; - } - } - - return NULL; -} - -int -lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt) -{ - struct lnet_peer_table *ptable; - struct lnet_peer *lp = NULL; - struct lnet_peer *lp2; - int cpt2; - int rc = 0; - - *lpp = NULL; - if (the_lnet.ln_shutdown) /* it's shutting down */ - return -ESHUTDOWN; - - /* cpt can be LNET_LOCK_EX if it's called from router functions */ - cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid); - - ptable = the_lnet.ln_peer_tables[cpt2]; - lp = lnet_find_peer_locked(ptable, nid); - if (lp) { - *lpp = lp; - return 0; - } - - if (!list_empty(&ptable->pt_deathrow)) { - lp = list_entry(ptable->pt_deathrow.next, - struct lnet_peer, lp_hashlist); - list_del(&lp->lp_hashlist); - } - - /* - * take extra refcount in case another thread has shutdown LNet - * and destroyed locks and peer-table before I finish the allocation - */ - ptable->pt_number++; - lnet_net_unlock(cpt); - - if (lp) - memset(lp, 0, sizeof(*lp)); - else - lp = kzalloc_cpt(sizeof(*lp), GFP_NOFS, cpt2); - - if (!lp) { - rc = -ENOMEM; - lnet_net_lock(cpt); - goto out; - } - - INIT_LIST_HEAD(&lp->lp_txq); - INIT_LIST_HEAD(&lp->lp_rtrq); - INIT_LIST_HEAD(&lp->lp_routes); - - lp->lp_notify = 0; - lp->lp_notifylnd = 0; - lp->lp_notifying = 0; - lp->lp_alive_count = 0; - lp->lp_timestamp = 0; - lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */ - lp->lp_last_alive = jiffies; /* assumes alive */ - lp->lp_last_query = 0; /* haven't asked NI yet */ - lp->lp_ping_timestamp = 0; - lp->lp_ping_feats = LNET_PING_FEAT_INVAL; - lp->lp_nid = nid; - lp->lp_cpt = cpt2; - lp->lp_refcount = 2; /* 1 for caller; 1 for hash */ - lp->lp_rtr_refcount = 0; - - lnet_net_lock(cpt); - - if (the_lnet.ln_shutdown) { - rc = -ESHUTDOWN; - goto out; - } - - lp2 = lnet_find_peer_locked(ptable, nid); - if (lp2) { - *lpp = lp2; - goto out; - } - - lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2); - if (!lp->lp_ni) { - rc = -EHOSTUNREACH; - goto out; - } - - lp->lp_txcredits = lp->lp_ni->ni_peertxcredits; - lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; - lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni); - lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni); - - list_add_tail(&lp->lp_hashlist, - &ptable->pt_hash[lnet_nid2peerhash(nid)]); - ptable->pt_version++; - *lpp = lp; - - return 0; -out: - if (lp) - list_add(&lp->lp_hashlist, &ptable->pt_deathrow); - ptable->pt_number--; - return rc; -} - -void -lnet_debug_peer(lnet_nid_t nid) -{ - char *aliveness = "NA"; - struct lnet_peer *lp; - int rc; - int cpt; - - cpt = lnet_cpt_of_nid(nid); - lnet_net_lock(cpt); - - rc = lnet_nid2peer_locked(&lp, nid, cpt); - if (rc) { - lnet_net_unlock(cpt); - CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid)); - return; - } - - if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp)) - aliveness = lp->lp_alive ? "up" : "down"; - - CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n", - libcfs_nid2str(lp->lp_nid), lp->lp_refcount, - aliveness, lp->lp_ni->ni_peertxcredits, - lp->lp_rtrcredits, lp->lp_minrtrcredits, - lp->lp_txcredits, lp->lp_mintxcredits, lp->lp_txqnob); - - lnet_peer_decref_locked(lp); - - lnet_net_unlock(cpt); -} - -int -lnet_get_peer_info(__u32 peer_index, __u64 *nid, - char aliveness[LNET_MAX_STR_LEN], - __u32 *cpt_iter, __u32 *refcount, - __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits, - __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits, - __u32 *peer_tx_qnob) -{ - struct lnet_peer_table *peer_table; - struct lnet_peer *lp; - bool found = false; - int lncpt, j; - - /* get the number of CPTs */ - lncpt = cfs_percpt_number(the_lnet.ln_peer_tables); - - /* - * if the cpt number to be examined is >= the number of cpts in - * the system then indicate that there are no more cpts to examin - */ - if (*cpt_iter >= lncpt) - return -ENOENT; - - /* get the current table */ - peer_table = the_lnet.ln_peer_tables[*cpt_iter]; - /* if the ptable is NULL then there are no more cpts to examine */ - if (!peer_table) - return -ENOENT; - - lnet_net_lock(*cpt_iter); - - for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) { - struct list_head *peers = &peer_table->pt_hash[j]; - - list_for_each_entry(lp, peers, lp_hashlist) { - if (peer_index-- > 0) - continue; - - snprintf(aliveness, LNET_MAX_STR_LEN, "NA"); - if (lnet_isrouter(lp) || - lnet_peer_aliveness_enabled(lp)) - snprintf(aliveness, LNET_MAX_STR_LEN, - lp->lp_alive ? "up" : "down"); - - *nid = lp->lp_nid; - *refcount = lp->lp_refcount; - *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits; - *peer_tx_credits = lp->lp_txcredits; - *peer_rtr_credits = lp->lp_rtrcredits; - *peer_min_rtr_credits = lp->lp_mintxcredits; - *peer_tx_qnob = lp->lp_txqnob; - - found = true; - } - } - lnet_net_unlock(*cpt_iter); - - *cpt_iter = lncpt; - - return found ? 0 : -ENOENT; -} diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c deleted file mode 100644 index 6267d5e4bbd6..000000000000 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ /dev/null @@ -1,1799 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2011, 2015, Intel Corporation. - * - * This file is part of Portals - * http://sourceforge.net/projects/sandiaportals/ - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include - -#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */ -#define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4) -#define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */ -#define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4) -#define LNET_NRB_SMALL_PAGES 1 -#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ -#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) -#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \ - PAGE_SHIFT) - -static char *forwarding = ""; -module_param(forwarding, charp, 0444); -MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks"); - -static int tiny_router_buffers; -module_param(tiny_router_buffers, int, 0444); -MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router"); -static int small_router_buffers; -module_param(small_router_buffers, int, 0444); -MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router"); -static int large_router_buffers; -module_param(large_router_buffers, int, 0444); -MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router"); -static int peer_buffer_credits; -module_param(peer_buffer_credits, int, 0444); -MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer"); - -static int auto_down = 1; -module_param(auto_down, int, 0444); -MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error"); - -int -lnet_peer_buffer_credits(struct lnet_ni *ni) -{ - /* NI option overrides LNet default */ - if (ni->ni_peerrtrcredits > 0) - return ni->ni_peerrtrcredits; - if (peer_buffer_credits > 0) - return peer_buffer_credits; - - /* - * As an approximation, allow this peer the same number of router - * buffers as it is allowed outstanding sends - */ - return ni->ni_peertxcredits; -} - -/* forward ref's */ -static int lnet_router_checker(void *); - -static int check_routers_before_use; -module_param(check_routers_before_use, int, 0444); -MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use"); - -int avoid_asym_router_failure = 1; -module_param(avoid_asym_router_failure, int, 0644); -MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)"); - -static int dead_router_check_interval = 60; -module_param(dead_router_check_interval, int, 0644); -MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)"); - -static int live_router_check_interval = 60; -module_param(live_router_check_interval, int, 0644); -MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)"); - -static int router_ping_timeout = 50; -module_param(router_ping_timeout, int, 0644); -MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query"); - -int -lnet_peers_start_down(void) -{ - return check_routers_before_use; -} - -void -lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive, - unsigned long when) -{ - if (time_before(when, lp->lp_timestamp)) { /* out of date information */ - CDEBUG(D_NET, "Out of date\n"); - return; - } - - lp->lp_timestamp = when; /* update timestamp */ - lp->lp_ping_deadline = 0; /* disable ping timeout */ - - if (lp->lp_alive_count && /* got old news */ - (!lp->lp_alive) == (!alive)) { /* new date for old news */ - CDEBUG(D_NET, "Old news\n"); - return; - } - - /* Flag that notification is outstanding */ - - lp->lp_alive_count++; - lp->lp_alive = !(!alive); /* 1 bit! */ - lp->lp_notify = 1; - lp->lp_notifylnd |= notifylnd; - if (lp->lp_alive) - lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */ - - CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive); -} - -static void -lnet_ni_notify_locked(struct lnet_ni *ni, struct lnet_peer *lp) -{ - int alive; - int notifylnd; - - /* - * Notify only in 1 thread at any time to ensure ordered notification. - * NB individual events can be missed; the only guarantee is that you - * always get the most recent news - */ - if (lp->lp_notifying || !ni) - return; - - lp->lp_notifying = 1; - - while (lp->lp_notify) { - alive = lp->lp_alive; - notifylnd = lp->lp_notifylnd; - - lp->lp_notifylnd = 0; - lp->lp_notify = 0; - - if (notifylnd && ni->ni_lnd->lnd_notify) { - lnet_net_unlock(lp->lp_cpt); - - /* - * A new notification could happen now; I'll handle it - * when control returns to me - */ - ni->ni_lnd->lnd_notify(ni, lp->lp_nid, alive); - - lnet_net_lock(lp->lp_cpt); - } - } - - lp->lp_notifying = 0; -} - -static void -lnet_rtr_addref_locked(struct lnet_peer *lp) -{ - LASSERT(lp->lp_refcount > 0); - LASSERT(lp->lp_rtr_refcount >= 0); - - /* lnet_net_lock must be exclusively locked */ - lp->lp_rtr_refcount++; - if (lp->lp_rtr_refcount == 1) { - struct list_head *pos; - - /* a simple insertion sort */ - list_for_each_prev(pos, &the_lnet.ln_routers) { - struct lnet_peer *rtr; - - rtr = list_entry(pos, struct lnet_peer, lp_rtr_list); - if (rtr->lp_nid < lp->lp_nid) - break; - } - - list_add(&lp->lp_rtr_list, pos); - /* addref for the_lnet.ln_routers */ - lnet_peer_addref_locked(lp); - the_lnet.ln_routers_version++; - } -} - -static void -lnet_rtr_decref_locked(struct lnet_peer *lp) -{ - LASSERT(lp->lp_refcount > 0); - LASSERT(lp->lp_rtr_refcount > 0); - - /* lnet_net_lock must be exclusively locked */ - lp->lp_rtr_refcount--; - if (!lp->lp_rtr_refcount) { - LASSERT(list_empty(&lp->lp_routes)); - - if (lp->lp_rcd) { - list_add(&lp->lp_rcd->rcd_list, - &the_lnet.ln_rcd_deathrow); - lp->lp_rcd = NULL; - } - - list_del(&lp->lp_rtr_list); - /* decref for the_lnet.ln_routers */ - lnet_peer_decref_locked(lp); - the_lnet.ln_routers_version++; - } -} - -struct lnet_remotenet * -lnet_find_net_locked(__u32 net) -{ - struct lnet_remotenet *rnet; - struct list_head *rn_list; - - LASSERT(!the_lnet.ln_shutdown); - - rn_list = lnet_net2rnethash(net); - list_for_each_entry(rnet, rn_list, lrn_list) { - if (rnet->lrn_net == net) - return rnet; - } - return NULL; -} - -static void lnet_shuffle_seed(void) -{ - static int seeded; - struct lnet_ni *ni; - - if (seeded) - return; - - /* - * Nodes with small feet have little entropy - * the NID for this node gives the most entropy in the low bits - */ - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { - __u32 lnd_type, seed; - - lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); - if (lnd_type != LOLND) { - seed = (LNET_NIDADDR(ni->ni_nid) | lnd_type); - add_device_randomness(&seed, sizeof(seed)); - } - } - - seeded = 1; -} - -/* NB expects LNET_LOCK held */ -static void -lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route) -{ - unsigned int len = 0; - unsigned int offset = 0; - struct list_head *e; - - lnet_shuffle_seed(); - - list_for_each(e, &rnet->lrn_routes) { - len++; - } - - /* len+1 positions to add a new entry */ - offset = prandom_u32_max(len + 1); - list_for_each(e, &rnet->lrn_routes) { - if (!offset) - break; - offset--; - } - list_add(&route->lr_list, e); - list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes); - - the_lnet.ln_remote_nets_version++; - lnet_rtr_addref_locked(route->lr_gateway); -} - -int -lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway, - unsigned int priority) -{ - struct list_head *e; - struct lnet_remotenet *rnet; - struct lnet_remotenet *rnet2; - struct lnet_route *route; - struct lnet_ni *ni; - int add_route; - int rc; - - CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n", - libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway)); - - if (gateway == LNET_NID_ANY || - LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND || - net == LNET_NIDNET(LNET_NID_ANY) || - LNET_NETTYP(net) == LOLND || - LNET_NIDNET(gateway) == net || - (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255))) - return -EINVAL; - - if (lnet_islocalnet(net)) /* it's a local network */ - return -EEXIST; - - /* Assume net, route, all new */ - route = kzalloc(sizeof(*route), GFP_NOFS); - rnet = kzalloc(sizeof(*rnet), GFP_NOFS); - if (!route || !rnet) { - CERROR("Out of memory creating route %s %d %s\n", - libcfs_net2str(net), hops, libcfs_nid2str(gateway)); - kfree(route); - kfree(rnet); - return -ENOMEM; - } - - INIT_LIST_HEAD(&rnet->lrn_routes); - rnet->lrn_net = net; - route->lr_hops = hops; - route->lr_net = net; - route->lr_priority = priority; - - lnet_net_lock(LNET_LOCK_EX); - - rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX); - if (rc) { - lnet_net_unlock(LNET_LOCK_EX); - - kfree(route); - kfree(rnet); - - if (rc == -EHOSTUNREACH) /* gateway is not on a local net */ - return rc; /* ignore the route entry */ - CERROR("Error %d creating route %s %d %s\n", rc, - libcfs_net2str(net), hops, - libcfs_nid2str(gateway)); - return rc; - } - - LASSERT(!the_lnet.ln_shutdown); - - rnet2 = lnet_find_net_locked(net); - if (!rnet2) { - /* new network */ - list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net)); - rnet2 = rnet; - } - - /* Search for a duplicate route (it's a NOOP if it is) */ - add_route = 1; - list_for_each(e, &rnet2->lrn_routes) { - struct lnet_route *route2; - - route2 = list_entry(e, struct lnet_route, lr_list); - if (route2->lr_gateway == route->lr_gateway) { - add_route = 0; - break; - } - - /* our lookups must be true */ - LASSERT(route2->lr_gateway->lp_nid != gateway); - } - - if (add_route) { - lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */ - lnet_add_route_to_rnet(rnet2, route); - - ni = route->lr_gateway->lp_ni; - lnet_net_unlock(LNET_LOCK_EX); - - /* XXX Assume alive */ - if (ni->ni_lnd->lnd_notify) - ni->ni_lnd->lnd_notify(ni, gateway, 1); - - lnet_net_lock(LNET_LOCK_EX); - } - - /* -1 for notify or !add_route */ - lnet_peer_decref_locked(route->lr_gateway); - lnet_net_unlock(LNET_LOCK_EX); - rc = 0; - - if (!add_route) { - rc = -EEXIST; - kfree(route); - } - - if (rnet != rnet2) - kfree(rnet); - - /* indicate to startup the router checker if configured */ - wake_up(&the_lnet.ln_rc_waitq); - - return rc; -} - -int -lnet_check_routes(void) -{ - struct lnet_remotenet *rnet; - struct lnet_route *route; - struct lnet_route *route2; - struct list_head *e1; - struct list_head *e2; - int cpt; - struct list_head *rn_list; - int i; - - cpt = lnet_net_lock_current(); - - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) { - rn_list = &the_lnet.ln_remote_nets_hash[i]; - list_for_each(e1, rn_list) { - rnet = list_entry(e1, struct lnet_remotenet, lrn_list); - - route2 = NULL; - list_for_each(e2, &rnet->lrn_routes) { - lnet_nid_t nid1; - lnet_nid_t nid2; - int net; - - route = list_entry(e2, struct lnet_route, lr_list); - - if (!route2) { - route2 = route; - continue; - } - - if (route->lr_gateway->lp_ni == - route2->lr_gateway->lp_ni) - continue; - - nid1 = route->lr_gateway->lp_nid; - nid2 = route2->lr_gateway->lp_nid; - net = rnet->lrn_net; - - lnet_net_unlock(cpt); - - CERROR("Routes to %s via %s and %s not supported\n", - libcfs_net2str(net), - libcfs_nid2str(nid1), - libcfs_nid2str(nid2)); - return -EINVAL; - } - } - } - - lnet_net_unlock(cpt); - return 0; -} - -int -lnet_del_route(__u32 net, lnet_nid_t gw_nid) -{ - struct lnet_peer *gateway; - struct lnet_remotenet *rnet; - struct lnet_route *route; - struct list_head *e1; - struct list_head *e2; - int rc = -ENOENT; - struct list_head *rn_list; - int idx = 0; - - CDEBUG(D_NET, "Del route: net %s : gw %s\n", - libcfs_net2str(net), libcfs_nid2str(gw_nid)); - - /* - * NB Caller may specify either all routes via the given gateway - * or a specific route entry actual NIDs) - */ - lnet_net_lock(LNET_LOCK_EX); - if (net == LNET_NIDNET(LNET_NID_ANY)) - rn_list = &the_lnet.ln_remote_nets_hash[0]; - else - rn_list = lnet_net2rnethash(net); - - again: - list_for_each(e1, rn_list) { - rnet = list_entry(e1, struct lnet_remotenet, lrn_list); - - if (!(net == LNET_NIDNET(LNET_NID_ANY) || - net == rnet->lrn_net)) - continue; - - list_for_each(e2, &rnet->lrn_routes) { - route = list_entry(e2, struct lnet_route, lr_list); - - gateway = route->lr_gateway; - if (!(gw_nid == LNET_NID_ANY || - gw_nid == gateway->lp_nid)) - continue; - - list_del(&route->lr_list); - list_del(&route->lr_gwlist); - the_lnet.ln_remote_nets_version++; - - if (list_empty(&rnet->lrn_routes)) - list_del(&rnet->lrn_list); - else - rnet = NULL; - - lnet_rtr_decref_locked(gateway); - lnet_peer_decref_locked(gateway); - - lnet_net_unlock(LNET_LOCK_EX); - - kfree(route); - kfree(rnet); - - rc = 0; - lnet_net_lock(LNET_LOCK_EX); - goto again; - } - } - - if (net == LNET_NIDNET(LNET_NID_ANY) && - ++idx < LNET_REMOTE_NETS_HASH_SIZE) { - rn_list = &the_lnet.ln_remote_nets_hash[idx]; - goto again; - } - lnet_net_unlock(LNET_LOCK_EX); - - return rc; -} - -void -lnet_destroy_routes(void) -{ - lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY); -} - -int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg) -{ - int i, rc = -ENOENT, j; - - if (!the_lnet.ln_rtrpools) - return rc; - - for (i = 0; i < LNET_NRBPOOLS; i++) { - struct lnet_rtrbufpool *rbp; - - lnet_net_lock(LNET_LOCK_EX); - cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) { - if (i++ != idx) - continue; - - pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages; - pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers; - pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits; - pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits; - rc = 0; - break; - } - lnet_net_unlock(LNET_LOCK_EX); - } - - lnet_net_lock(LNET_LOCK_EX); - pool_cfg->pl_routing = the_lnet.ln_routing; - lnet_net_unlock(LNET_LOCK_EX); - - return rc; -} - -int -lnet_get_route(int idx, __u32 *net, __u32 *hops, - lnet_nid_t *gateway, __u32 *alive, __u32 *priority) -{ - struct list_head *e1; - struct list_head *e2; - struct lnet_remotenet *rnet; - struct lnet_route *route; - int cpt; - int i; - struct list_head *rn_list; - - cpt = lnet_net_lock_current(); - - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) { - rn_list = &the_lnet.ln_remote_nets_hash[i]; - list_for_each(e1, rn_list) { - rnet = list_entry(e1, struct lnet_remotenet, lrn_list); - - list_for_each(e2, &rnet->lrn_routes) { - route = list_entry(e2, struct lnet_route, - lr_list); - - if (!idx--) { - *net = rnet->lrn_net; - *hops = route->lr_hops; - *priority = route->lr_priority; - *gateway = route->lr_gateway->lp_nid; - *alive = lnet_is_route_alive(route); - lnet_net_unlock(cpt); - return 0; - } - } - } - } - - lnet_net_unlock(cpt); - return -ENOENT; -} - -void -lnet_swap_pinginfo(struct lnet_ping_info *info) -{ - int i; - struct lnet_ni_status *stat; - - __swab32s(&info->pi_magic); - __swab32s(&info->pi_features); - __swab32s(&info->pi_pid); - __swab32s(&info->pi_nnis); - for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) { - stat = &info->pi_ni[i]; - __swab64s(&stat->ns_nid); - __swab32s(&stat->ns_status); - } -} - -/** - * parse router-checker pinginfo, record number of down NIs for remote - * networks on that router. - */ -static void -lnet_parse_rc_info(struct lnet_rc_data *rcd) -{ - struct lnet_ping_info *info = rcd->rcd_pinginfo; - struct lnet_peer *gw = rcd->rcd_gateway; - struct lnet_route *rte; - - if (!gw->lp_alive) - return; - - if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) - lnet_swap_pinginfo(info); - - /* NB always racing with network! */ - if (info->pi_magic != LNET_PROTO_PING_MAGIC) { - CDEBUG(D_NET, "%s: Unexpected magic %08x\n", - libcfs_nid2str(gw->lp_nid), info->pi_magic); - gw->lp_ping_feats = LNET_PING_FEAT_INVAL; - return; - } - - gw->lp_ping_feats = info->pi_features; - if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) { - CDEBUG(D_NET, "%s: Unexpected features 0x%x\n", - libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats); - return; /* nothing I can understand */ - } - - if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) - return; /* can't carry NI status info */ - - list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) { - int down = 0; - int up = 0; - int i; - - if (gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) { - rte->lr_downis = 1; - continue; - } - - for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) { - struct lnet_ni_status *stat = &info->pi_ni[i]; - lnet_nid_t nid = stat->ns_nid; - - if (nid == LNET_NID_ANY) { - CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n", - libcfs_nid2str(gw->lp_nid)); - gw->lp_ping_feats = LNET_PING_FEAT_INVAL; - return; - } - - if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND) - continue; - - if (stat->ns_status == LNET_NI_STATUS_DOWN) { - down++; - continue; - } - - if (stat->ns_status == LNET_NI_STATUS_UP) { - if (LNET_NIDNET(nid) == rte->lr_net) { - up = 1; - break; - } - continue; - } - - CDEBUG(D_NET, "%s: Unexpected status 0x%x\n", - libcfs_nid2str(gw->lp_nid), stat->ns_status); - gw->lp_ping_feats = LNET_PING_FEAT_INVAL; - return; - } - - if (up) { /* ignore downed NIs if NI for dest network is up */ - rte->lr_downis = 0; - continue; - } - /** - * if @down is zero and this route is single-hop, it means - * we can't find NI for target network - */ - if (!down && rte->lr_hops == 1) - down = 1; - - rte->lr_downis = down; - } -} - -static void -lnet_router_checker_event(struct lnet_event *event) -{ - struct lnet_rc_data *rcd = event->md.user_ptr; - struct lnet_peer *lp; - - LASSERT(rcd); - - if (event->unlinked) { - LNetInvalidateMDHandle(&rcd->rcd_mdh); - return; - } - - LASSERT(event->type == LNET_EVENT_SEND || - event->type == LNET_EVENT_REPLY); - - lp = rcd->rcd_gateway; - LASSERT(lp); - - /* - * NB: it's called with holding lnet_res_lock, we have a few - * places need to hold both locks at the same time, please take - * care of lock ordering - */ - lnet_net_lock(lp->lp_cpt); - if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) { - /* ignore if no longer a router or rcd is replaced */ - goto out; - } - - if (event->type == LNET_EVENT_SEND) { - lp->lp_ping_notsent = 0; - if (!event->status) - goto out; - } - - /* LNET_EVENT_REPLY */ - /* - * A successful REPLY means the router is up. If _any_ comms - * to the router fail I assume it's down (this will happen if - * we ping alive routers to try to detect router death before - * apps get burned). - */ - lnet_notify_locked(lp, 1, !event->status, jiffies); - - /* - * The router checker will wake up very shortly and do the - * actual notification. - * XXX If 'lp' stops being a router before then, it will still - * have the notification pending!!! - */ - if (avoid_asym_router_failure && !event->status) - lnet_parse_rc_info(rcd); - - out: - lnet_net_unlock(lp->lp_cpt); -} - -static void -lnet_wait_known_routerstate(void) -{ - struct lnet_peer *rtr; - struct list_head *entry; - int all_known; - - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); - - for (;;) { - int cpt = lnet_net_lock_current(); - - all_known = 1; - list_for_each(entry, &the_lnet.ln_routers) { - rtr = list_entry(entry, struct lnet_peer, lp_rtr_list); - - if (!rtr->lp_alive_count) { - all_known = 0; - break; - } - } - - lnet_net_unlock(cpt); - - if (all_known) - return; - - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - } -} - -void -lnet_router_ni_update_locked(struct lnet_peer *gw, __u32 net) -{ - struct lnet_route *rte; - - if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) { - list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) { - if (rte->lr_net == net) { - rte->lr_downis = 0; - break; - } - } - } -} - -static void -lnet_update_ni_status_locked(void) -{ - struct lnet_ni *ni; - time64_t now; - int timeout; - - LASSERT(the_lnet.ln_routing); - - timeout = router_ping_timeout + - max(live_router_check_interval, dead_router_check_interval); - - now = ktime_get_real_seconds(); - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { - if (ni->ni_lnd->lnd_type == LOLND) - continue; - - if (now < ni->ni_last_alive + timeout) - continue; - - lnet_ni_lock(ni); - /* re-check with lock */ - if (now < ni->ni_last_alive + timeout) { - lnet_ni_unlock(ni); - continue; - } - - LASSERT(ni->ni_status); - - if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) { - CDEBUG(D_NET, "NI(%s:%d) status changed to down\n", - libcfs_nid2str(ni->ni_nid), timeout); - /* - * NB: so far, this is the only place to set - * NI status to "down" - */ - ni->ni_status->ns_status = LNET_NI_STATUS_DOWN; - } - lnet_ni_unlock(ni); - } -} - -static void -lnet_destroy_rc_data(struct lnet_rc_data *rcd) -{ - LASSERT(list_empty(&rcd->rcd_list)); - /* detached from network */ - LASSERT(LNetMDHandleIsInvalid(rcd->rcd_mdh)); - - if (rcd->rcd_gateway) { - int cpt = rcd->rcd_gateway->lp_cpt; - - lnet_net_lock(cpt); - lnet_peer_decref_locked(rcd->rcd_gateway); - lnet_net_unlock(cpt); - } - - kfree(rcd->rcd_pinginfo); - - kfree(rcd); -} - -static struct lnet_rc_data * -lnet_create_rc_data_locked(struct lnet_peer *gateway) -{ - struct lnet_rc_data *rcd = NULL; - struct lnet_ping_info *pi; - struct lnet_md md; - int rc; - int i; - - lnet_net_unlock(gateway->lp_cpt); - - rcd = kzalloc(sizeof(*rcd), GFP_NOFS); - if (!rcd) - goto out; - - LNetInvalidateMDHandle(&rcd->rcd_mdh); - INIT_LIST_HEAD(&rcd->rcd_list); - - pi = kzalloc(LNET_PINGINFO_SIZE, GFP_NOFS); - if (!pi) - goto out; - - for (i = 0; i < LNET_MAX_RTR_NIS; i++) { - pi->pi_ni[i].ns_nid = LNET_NID_ANY; - pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID; - } - rcd->rcd_pinginfo = pi; - - md.start = pi; - md.user_ptr = rcd; - md.length = LNET_PINGINFO_SIZE; - md.threshold = LNET_MD_THRESH_INF; - md.options = LNET_MD_TRUNCATE; - md.eq_handle = the_lnet.ln_rc_eqh; - - LASSERT(!LNetEQHandleIsInvalid(the_lnet.ln_rc_eqh)); - rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh); - if (rc < 0) { - CERROR("Can't bind MD: %d\n", rc); - goto out; - } - LASSERT(!rc); - - lnet_net_lock(gateway->lp_cpt); - /* router table changed or someone has created rcd for this gateway */ - if (!lnet_isrouter(gateway) || gateway->lp_rcd) { - lnet_net_unlock(gateway->lp_cpt); - goto out; - } - - lnet_peer_addref_locked(gateway); - rcd->rcd_gateway = gateway; - gateway->lp_rcd = rcd; - gateway->lp_ping_notsent = 0; - - return rcd; - - out: - if (rcd) { - if (!LNetMDHandleIsInvalid(rcd->rcd_mdh)) { - rc = LNetMDUnlink(rcd->rcd_mdh); - LASSERT(!rc); - } - lnet_destroy_rc_data(rcd); - } - - lnet_net_lock(gateway->lp_cpt); - return gateway->lp_rcd; -} - -static int -lnet_router_check_interval(struct lnet_peer *rtr) -{ - int secs; - - secs = rtr->lp_alive ? live_router_check_interval : - dead_router_check_interval; - if (secs < 0) - secs = 0; - - return secs; -} - -static void -lnet_ping_router_locked(struct lnet_peer *rtr) -{ - struct lnet_rc_data *rcd = NULL; - unsigned long now = jiffies; - int secs; - - lnet_peer_addref_locked(rtr); - - if (rtr->lp_ping_deadline && /* ping timed out? */ - time_after(now, rtr->lp_ping_deadline)) - lnet_notify_locked(rtr, 1, 0, now); - - /* Run any outstanding notifications */ - lnet_ni_notify_locked(rtr->lp_ni, rtr); - - if (!lnet_isrouter(rtr) || - the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) { - /* router table changed or router checker is shutting down */ - lnet_peer_decref_locked(rtr); - return; - } - - rcd = rtr->lp_rcd ? - rtr->lp_rcd : lnet_create_rc_data_locked(rtr); - - if (!rcd) - return; - - secs = lnet_router_check_interval(rtr); - - CDEBUG(D_NET, - "rtr %s %d: deadline %lu ping_notsent %d alive %d alive_count %d lp_ping_timestamp %lu\n", - libcfs_nid2str(rtr->lp_nid), secs, - rtr->lp_ping_deadline, rtr->lp_ping_notsent, - rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp); - - if (secs && !rtr->lp_ping_notsent && - time_after(now, rtr->lp_ping_timestamp + secs * HZ)) { - int rc; - struct lnet_process_id id; - struct lnet_handle_md mdh; - - id.nid = rtr->lp_nid; - id.pid = LNET_PID_LUSTRE; - CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id)); - - rtr->lp_ping_notsent = 1; - rtr->lp_ping_timestamp = now; - - mdh = rcd->rcd_mdh; - - if (!rtr->lp_ping_deadline) { - rtr->lp_ping_deadline = - jiffies + router_ping_timeout * HZ; - } - - lnet_net_unlock(rtr->lp_cpt); - - rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL, - LNET_PROTO_PING_MATCHBITS, 0); - - lnet_net_lock(rtr->lp_cpt); - if (rc) - rtr->lp_ping_notsent = 0; /* no event pending */ - } - - lnet_peer_decref_locked(rtr); -} - -int -lnet_router_checker_start(void) -{ - struct task_struct *task; - int rc; - int eqsz = 0; - - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); - - if (check_routers_before_use && - dead_router_check_interval <= 0) { - LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n"); - return -EINVAL; - } - - init_completion(&the_lnet.ln_rc_signal); - - rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh); - if (rc) { - CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc); - return -ENOMEM; - } - - the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING; - task = kthread_run(lnet_router_checker, NULL, "router_checker"); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - CERROR("Can't start router checker thread: %d\n", rc); - /* block until event callback signals exit */ - wait_for_completion(&the_lnet.ln_rc_signal); - rc = LNetEQFree(the_lnet.ln_rc_eqh); - LASSERT(!rc); - the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; - return -ENOMEM; - } - - if (check_routers_before_use) { - /* - * Note that a helpful side-effect of pinging all known routers - * at startup is that it makes them drop stale connections they - * may have to a previous instance of me. - */ - lnet_wait_known_routerstate(); - } - - return 0; -} - -void -lnet_router_checker_stop(void) -{ - int rc; - - if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN) - return; - - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); - the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING; - /* wakeup the RC thread if it's sleeping */ - wake_up(&the_lnet.ln_rc_waitq); - - /* block until event callback signals exit */ - wait_for_completion(&the_lnet.ln_rc_signal); - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); - - rc = LNetEQFree(the_lnet.ln_rc_eqh); - LASSERT(!rc); -} - -static void -lnet_prune_rc_data(int wait_unlink) -{ - struct lnet_rc_data *rcd; - struct lnet_rc_data *tmp; - struct lnet_peer *lp; - struct list_head head; - int i = 2; - - if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING && - list_empty(&the_lnet.ln_rcd_deathrow) && - list_empty(&the_lnet.ln_rcd_zombie))) - return; - - INIT_LIST_HEAD(&head); - - lnet_net_lock(LNET_LOCK_EX); - - if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) { - /* router checker is stopping, prune all */ - list_for_each_entry(lp, &the_lnet.ln_routers, - lp_rtr_list) { - if (!lp->lp_rcd) - continue; - - LASSERT(list_empty(&lp->lp_rcd->rcd_list)); - list_add(&lp->lp_rcd->rcd_list, - &the_lnet.ln_rcd_deathrow); - lp->lp_rcd = NULL; - } - } - - /* unlink all RCDs on deathrow list */ - list_splice_init(&the_lnet.ln_rcd_deathrow, &head); - - if (!list_empty(&head)) { - lnet_net_unlock(LNET_LOCK_EX); - - list_for_each_entry(rcd, &head, rcd_list) - LNetMDUnlink(rcd->rcd_mdh); - - lnet_net_lock(LNET_LOCK_EX); - } - - list_splice_init(&head, &the_lnet.ln_rcd_zombie); - - /* release all zombie RCDs */ - while (!list_empty(&the_lnet.ln_rcd_zombie)) { - list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie, - rcd_list) { - if (LNetMDHandleIsInvalid(rcd->rcd_mdh)) - list_move(&rcd->rcd_list, &head); - } - - wait_unlink = wait_unlink && - !list_empty(&the_lnet.ln_rcd_zombie); - - lnet_net_unlock(LNET_LOCK_EX); - - while (!list_empty(&head)) { - rcd = list_entry(head.next, - struct lnet_rc_data, rcd_list); - list_del_init(&rcd->rcd_list); - lnet_destroy_rc_data(rcd); - } - - if (!wait_unlink) - return; - - i++; - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, - "Waiting for rc buffers to unlink\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ / 4); - - lnet_net_lock(LNET_LOCK_EX); - } - - lnet_net_unlock(LNET_LOCK_EX); -} - -/* - * This function is called to check if the RC should block indefinitely. - * It's called from lnet_router_checker() as well as being passed to - * wait_event_interruptible() to avoid the lost wake_up problem. - * - * When it's called from wait_event_interruptible() it is necessary to - * also not sleep if the rc state is not running to avoid a deadlock - * when the system is shutting down - */ -static inline bool -lnet_router_checker_active(void) -{ - if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) - return true; - - /* - * Router Checker thread needs to run when routing is enabled in - * order to call lnet_update_ni_status_locked() - */ - if (the_lnet.ln_routing) - return true; - - return !list_empty(&the_lnet.ln_routers) && - (live_router_check_interval > 0 || - dead_router_check_interval > 0); -} - -static int -lnet_router_checker(void *arg) -{ - struct lnet_peer *rtr; - struct list_head *entry; - - while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) { - __u64 version; - int cpt; - int cpt2; - - cpt = lnet_net_lock_current(); -rescan: - version = the_lnet.ln_routers_version; - - list_for_each(entry, &the_lnet.ln_routers) { - rtr = list_entry(entry, struct lnet_peer, lp_rtr_list); - - cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid); - if (cpt != cpt2) { - lnet_net_unlock(cpt); - cpt = cpt2; - lnet_net_lock(cpt); - /* the routers list has changed */ - if (version != the_lnet.ln_routers_version) - goto rescan; - } - - lnet_ping_router_locked(rtr); - - /* NB dropped lock */ - if (version != the_lnet.ln_routers_version) { - /* the routers list has changed */ - goto rescan; - } - } - - if (the_lnet.ln_routing) - lnet_update_ni_status_locked(); - - lnet_net_unlock(cpt); - - lnet_prune_rc_data(0); /* don't wait for UNLINK */ - - /* - * Call schedule_timeout() here always adds 1 to load average - * because kernel counts # active tasks as nr_running - * + nr_uninterruptible. - */ - /* - * if there are any routes then wakeup every second. If - * there are no routes then sleep indefinitely until woken - * up by a user adding a route - */ - if (!lnet_router_checker_active()) - wait_event_interruptible(the_lnet.ln_rc_waitq, - lnet_router_checker_active()); - else - wait_event_interruptible_timeout(the_lnet.ln_rc_waitq, - false, - HZ); - } - - lnet_prune_rc_data(1); /* wait for UNLINK */ - - the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; - complete(&the_lnet.ln_rc_signal); - /* The unlink event callback will signal final completion */ - return 0; -} - -void -lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages) -{ - while (--npages >= 0) - __free_page(rb->rb_kiov[npages].bv_page); - - kfree(rb); -} - -static struct lnet_rtrbuf * -lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt) -{ - int npages = rbp->rbp_npages; - int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]); - struct page *page; - struct lnet_rtrbuf *rb; - int i; - - rb = kzalloc_cpt(sz, GFP_NOFS, cpt); - if (!rb) - return NULL; - - rb->rb_pool = rbp; - - for (i = 0; i < npages; i++) { - page = alloc_pages_node( - cfs_cpt_spread_node(lnet_cpt_table(), cpt), - GFP_KERNEL | __GFP_ZERO, 0); - if (!page) { - while (--i >= 0) - __free_page(rb->rb_kiov[i].bv_page); - - kfree(rb); - return NULL; - } - - rb->rb_kiov[i].bv_len = PAGE_SIZE; - rb->rb_kiov[i].bv_offset = 0; - rb->rb_kiov[i].bv_page = page; - } - - return rb; -} - -static void -lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt) -{ - int npages = rbp->rbp_npages; - struct list_head tmp; - struct lnet_rtrbuf *rb; - struct lnet_rtrbuf *temp; - - if (!rbp->rbp_nbuffers) /* not initialized or already freed */ - return; - - INIT_LIST_HEAD(&tmp); - - lnet_net_lock(cpt); - lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt); - list_splice_init(&rbp->rbp_bufs, &tmp); - rbp->rbp_req_nbuffers = 0; - rbp->rbp_nbuffers = 0; - rbp->rbp_credits = 0; - rbp->rbp_mincredits = 0; - lnet_net_unlock(cpt); - - /* Free buffers on the free list. */ - list_for_each_entry_safe(rb, temp, &tmp, rb_list) { - list_del(&rb->rb_list); - lnet_destroy_rtrbuf(rb, npages); - } -} - -static int -lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt) -{ - struct list_head rb_list; - struct lnet_rtrbuf *rb; - int num_rb; - int num_buffers = 0; - int old_req_nbufs; - int npages = rbp->rbp_npages; - - lnet_net_lock(cpt); - /* - * If we are called for less buffers than already in the pool, we - * just lower the req_nbuffers number and excess buffers will be - * thrown away as they are returned to the free list. Credits - * then get adjusted as well. - * If we already have enough buffers allocated to serve the - * increase requested, then we can treat that the same way as we - * do the decrease. - */ - num_rb = nbufs - rbp->rbp_nbuffers; - if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) { - rbp->rbp_req_nbuffers = nbufs; - lnet_net_unlock(cpt); - return 0; - } - /* - * store the older value of rbp_req_nbuffers and then set it to - * the new request to prevent lnet_return_rx_credits_locked() from - * freeing buffers that we need to keep around - */ - old_req_nbufs = rbp->rbp_req_nbuffers; - rbp->rbp_req_nbuffers = nbufs; - lnet_net_unlock(cpt); - - INIT_LIST_HEAD(&rb_list); - - /* - * allocate the buffers on a local list first. If all buffers are - * allocated successfully then join this list to the rbp buffer - * list. If not then free all allocated buffers. - */ - while (num_rb-- > 0) { - rb = lnet_new_rtrbuf(rbp, cpt); - if (!rb) { - CERROR("Failed to allocate %d route bufs of %d pages\n", - nbufs, npages); - - lnet_net_lock(cpt); - rbp->rbp_req_nbuffers = old_req_nbufs; - lnet_net_unlock(cpt); - - goto failed; - } - - list_add(&rb->rb_list, &rb_list); - num_buffers++; - } - - lnet_net_lock(cpt); - - list_splice_tail(&rb_list, &rbp->rbp_bufs); - rbp->rbp_nbuffers += num_buffers; - rbp->rbp_credits += num_buffers; - rbp->rbp_mincredits = rbp->rbp_credits; - /* - * We need to schedule blocked msg using the newly - * added buffers. - */ - while (!list_empty(&rbp->rbp_bufs) && - !list_empty(&rbp->rbp_msgs)) - lnet_schedule_blocked_locked(rbp); - - lnet_net_unlock(cpt); - - return 0; - -failed: - while (!list_empty(&rb_list)) { - rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list); - list_del(&rb->rb_list); - lnet_destroy_rtrbuf(rb, npages); - } - - return -ENOMEM; -} - -static void -lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages) -{ - INIT_LIST_HEAD(&rbp->rbp_msgs); - INIT_LIST_HEAD(&rbp->rbp_bufs); - - rbp->rbp_npages = npages; - rbp->rbp_credits = 0; - rbp->rbp_mincredits = 0; -} - -void -lnet_rtrpools_free(int keep_pools) -{ - struct lnet_rtrbufpool *rtrp; - int i; - - if (!the_lnet.ln_rtrpools) /* uninitialized or freed */ - return; - - cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i); - lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i); - lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i); - } - - if (!keep_pools) { - cfs_percpt_free(the_lnet.ln_rtrpools); - the_lnet.ln_rtrpools = NULL; - } -} - -static int -lnet_nrb_tiny_calculate(void) -{ - int nrbs = LNET_NRB_TINY; - - if (tiny_router_buffers < 0) { - LCONSOLE_ERROR_MSG(0x10c, - "tiny_router_buffers=%d invalid when routing enabled\n", - tiny_router_buffers); - return -EINVAL; - } - - if (tiny_router_buffers > 0) - nrbs = tiny_router_buffers; - - nrbs /= LNET_CPT_NUMBER; - return max(nrbs, LNET_NRB_TINY_MIN); -} - -static int -lnet_nrb_small_calculate(void) -{ - int nrbs = LNET_NRB_SMALL; - - if (small_router_buffers < 0) { - LCONSOLE_ERROR_MSG(0x10c, - "small_router_buffers=%d invalid when routing enabled\n", - small_router_buffers); - return -EINVAL; - } - - if (small_router_buffers > 0) - nrbs = small_router_buffers; - - nrbs /= LNET_CPT_NUMBER; - return max(nrbs, LNET_NRB_SMALL_MIN); -} - -static int -lnet_nrb_large_calculate(void) -{ - int nrbs = LNET_NRB_LARGE; - - if (large_router_buffers < 0) { - LCONSOLE_ERROR_MSG(0x10c, - "large_router_buffers=%d invalid when routing enabled\n", - large_router_buffers); - return -EINVAL; - } - - if (large_router_buffers > 0) - nrbs = large_router_buffers; - - nrbs /= LNET_CPT_NUMBER; - return max(nrbs, LNET_NRB_LARGE_MIN); -} - -int -lnet_rtrpools_alloc(int im_a_router) -{ - struct lnet_rtrbufpool *rtrp; - int nrb_tiny; - int nrb_small; - int nrb_large; - int rc; - int i; - - if (!strcmp(forwarding, "")) { - /* not set either way */ - if (!im_a_router) - return 0; - } else if (!strcmp(forwarding, "disabled")) { - /* explicitly disabled */ - return 0; - } else if (!strcmp(forwarding, "enabled")) { - /* explicitly enabled */ - } else { - LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n"); - return -EINVAL; - } - - nrb_tiny = lnet_nrb_tiny_calculate(); - if (nrb_tiny < 0) - return -EINVAL; - - nrb_small = lnet_nrb_small_calculate(); - if (nrb_small < 0) - return -EINVAL; - - nrb_large = lnet_nrb_large_calculate(); - if (nrb_large < 0) - return -EINVAL; - - the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(), - LNET_NRBPOOLS * - sizeof(struct lnet_rtrbufpool)); - if (!the_lnet.ln_rtrpools) { - LCONSOLE_ERROR_MSG(0x10c, - "Failed to initialize router buffe pool\n"); - return -ENOMEM; - } - - cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0); - rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX], - nrb_tiny, i); - if (rc) - goto failed; - - lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX], - LNET_NRB_SMALL_PAGES); - rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX], - nrb_small, i); - if (rc) - goto failed; - - lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX], - LNET_NRB_LARGE_PAGES); - rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX], - nrb_large, i); - if (rc) - goto failed; - } - - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_routing = 1; - lnet_net_unlock(LNET_LOCK_EX); - - return 0; - - failed: - lnet_rtrpools_free(0); - return rc; -} - -static int -lnet_rtrpools_adjust_helper(int tiny, int small, int large) -{ - int nrb = 0; - int rc = 0; - int i; - struct lnet_rtrbufpool *rtrp; - - /* - * If the provided values for each buffer pool are different than the - * configured values, we need to take action. - */ - if (tiny >= 0) { - tiny_router_buffers = tiny; - nrb = lnet_nrb_tiny_calculate(); - cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX], - nrb, i); - if (rc) - return rc; - } - } - if (small >= 0) { - small_router_buffers = small; - nrb = lnet_nrb_small_calculate(); - cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX], - nrb, i); - if (rc) - return rc; - } - } - if (large >= 0) { - large_router_buffers = large; - nrb = lnet_nrb_large_calculate(); - cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX], - nrb, i); - if (rc) - return rc; - } - } - - return 0; -} - -int -lnet_rtrpools_adjust(int tiny, int small, int large) -{ - /* - * this function doesn't revert the changes if adding new buffers - * failed. It's up to the user space caller to revert the - * changes. - */ - if (!the_lnet.ln_routing) - return 0; - - return lnet_rtrpools_adjust_helper(tiny, small, large); -} - -int -lnet_rtrpools_enable(void) -{ - int rc = 0; - - if (the_lnet.ln_routing) - return 0; - - if (!the_lnet.ln_rtrpools) - /* - * If routing is turned off, and we have never - * initialized the pools before, just call the - * standard buffer pool allocation routine as - * if we are just configuring this for the first - * time. - */ - rc = lnet_rtrpools_alloc(1); - else - rc = lnet_rtrpools_adjust_helper(0, 0, 0); - if (rc) - return rc; - - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_routing = 1; - - the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED; - lnet_net_unlock(LNET_LOCK_EX); - - return rc; -} - -void -lnet_rtrpools_disable(void) -{ - if (!the_lnet.ln_routing) - return; - - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_routing = 0; - the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED; - - tiny_router_buffers = 0; - small_router_buffers = 0; - large_router_buffers = 0; - lnet_net_unlock(LNET_LOCK_EX); - lnet_rtrpools_free(1); -} - -int -lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when) -{ - struct lnet_peer *lp = NULL; - unsigned long now = jiffies; - int cpt = lnet_cpt_of_nid(nid); - - LASSERT(!in_interrupt()); - - CDEBUG(D_NET, "%s notifying %s: %s\n", - !ni ? "userspace" : libcfs_nid2str(ni->ni_nid), - libcfs_nid2str(nid), - alive ? "up" : "down"); - - if (ni && - LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) { - CWARN("Ignoring notification of %s %s by %s (different net)\n", - libcfs_nid2str(nid), alive ? "birth" : "death", - libcfs_nid2str(ni->ni_nid)); - return -EINVAL; - } - - /* can't do predictions... */ - if (time_after(when, now)) { - CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n", - !ni ? "userspace" : libcfs_nid2str(ni->ni_nid), - libcfs_nid2str(nid), alive ? "up" : "down", - (when - now) / HZ); - return -EINVAL; - } - - if (ni && !alive && /* LND telling me she's down */ - !auto_down) { /* auto-down disabled */ - CDEBUG(D_NET, "Auto-down disabled\n"); - return 0; - } - - lnet_net_lock(cpt); - - if (the_lnet.ln_shutdown) { - lnet_net_unlock(cpt); - return -ESHUTDOWN; - } - - lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid); - if (!lp) { - /* nid not found */ - lnet_net_unlock(cpt); - CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid)); - return 0; - } - - /* - * We can't fully trust LND on reporting exact peer last_alive - * if he notifies us about dead peer. For example ksocklnd can - * call us with when == _time_when_the_node_was_booted_ if - * no connections were successfully established - */ - if (ni && !alive && when < lp->lp_last_alive) - when = lp->lp_last_alive; - - lnet_notify_locked(lp, !ni, alive, when); - - if (ni) - lnet_ni_notify_locked(ni, lp); - - lnet_peer_decref_locked(lp); - - lnet_net_unlock(cpt); - return 0; -} -EXPORT_SYMBOL(lnet_notify); diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c deleted file mode 100644 index ae4b7f5953a0..000000000000 --- a/drivers/staging/lustre/lnet/lnet/router_proc.c +++ /dev/null @@ -1,907 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2011, 2012, Intel Corporation. - * - * This file is part of Portals - * http://sourceforge.net/projects/sandiaportals/ - * - * Portals is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Portals is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include - -/* - * This is really lnet_proc.c. You might need to update sanity test 215 - * if any file format is changed. - */ - -#define LNET_LOFFT_BITS (sizeof(loff_t) * 8) -/* - * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system - */ -#define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1) -/* change version, 16 bits or 8 bits */ -#define LNET_PROC_VER_BITS max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8) - -#define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS -/* - * bits for peer hash offset - * NB: we don't use the highest bit of *ppos because it's signed - */ -#define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \ - LNET_PROC_CPT_BITS - \ - LNET_PROC_VER_BITS - \ - LNET_PROC_HASH_BITS - 1) -/* bits for hash index + position */ -#define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS) -/* bits for peer hash table + hash version */ -#define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS) - -#define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1) -#define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1) -#define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1) -#define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1) - -#define LNET_PROC_CPT_GET(pos) \ - (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK) - -#define LNET_PROC_VER_GET(pos) \ - (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK) - -#define LNET_PROC_HASH_GET(pos) \ - (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK) - -#define LNET_PROC_HOFF_GET(pos) \ - (int)((pos) & LNET_PROC_HOFF_MASK) - -#define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \ - (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \ - ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \ - ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \ - ((off) & LNET_PROC_HOFF_MASK)) - -#define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK)) - -static int __proc_lnet_stats(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - int rc; - struct lnet_counters *ctrs; - int len; - char *tmpstr; - const int tmpsiz = 256; /* 7 %u and 4 %llu */ - - if (write) { - lnet_counters_reset(); - return 0; - } - - /* read */ - - ctrs = kzalloc(sizeof(*ctrs), GFP_NOFS); - if (!ctrs) - return -ENOMEM; - - tmpstr = kmalloc(tmpsiz, GFP_KERNEL); - if (!tmpstr) { - kfree(ctrs); - return -ENOMEM; - } - - lnet_counters_get(ctrs); - - len = snprintf(tmpstr, tmpsiz, - "%u %u %u %u %u %u %u %llu %llu %llu %llu", - ctrs->msgs_alloc, ctrs->msgs_max, - ctrs->errors, - ctrs->send_count, ctrs->recv_count, - ctrs->route_count, ctrs->drop_count, - ctrs->send_length, ctrs->recv_length, - ctrs->route_length, ctrs->drop_length); - - if (pos >= min_t(int, len, strlen(tmpstr))) - rc = 0; - else - rc = cfs_trace_copyout_string(buffer, nob, - tmpstr + pos, "\n"); - - kfree(tmpstr); - kfree(ctrs); - return rc; -} - -static int proc_lnet_stats(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_stats); -} - -static int proc_lnet_routes(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - const int tmpsiz = 256; - char *tmpstr; - char *s; - int rc = 0; - int len; - int ver; - int off; - - BUILD_BUG_ON(sizeof(loff_t) < 4); - - off = LNET_PROC_HOFF_GET(*ppos); - ver = LNET_PROC_VER_GET(*ppos); - - LASSERT(!write); - - if (!*lenp) - return 0; - - tmpstr = kmalloc(tmpsiz, GFP_KERNEL); - if (!tmpstr) - return -ENOMEM; - - s = tmpstr; /* points to current position in tmpstr[] */ - - if (!*ppos) { - s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n", - the_lnet.ln_routing ? "enabled" : "disabled"); - LASSERT(tmpstr + tmpsiz - s > 0); - - s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n", - "net", "hops", "priority", "state", "router"); - LASSERT(tmpstr + tmpsiz - s > 0); - - lnet_net_lock(0); - ver = (unsigned int)the_lnet.ln_remote_nets_version; - lnet_net_unlock(0); - *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); - } else { - struct list_head *n; - struct list_head *r; - struct lnet_route *route = NULL; - struct lnet_remotenet *rnet = NULL; - int skip = off - 1; - struct list_head *rn_list; - int i; - - lnet_net_lock(0); - - if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) { - lnet_net_unlock(0); - kfree(tmpstr); - return -ESTALE; - } - - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) { - rn_list = &the_lnet.ln_remote_nets_hash[i]; - - n = rn_list->next; - - while (n != rn_list && !route) { - rnet = list_entry(n, struct lnet_remotenet, - lrn_list); - - r = rnet->lrn_routes.next; - - while (r != &rnet->lrn_routes) { - struct lnet_route *re; - - re = list_entry(r, struct lnet_route, - lr_list); - if (!skip) { - route = re; - break; - } - - skip--; - r = r->next; - } - - n = n->next; - } - } - - if (route) { - __u32 net = rnet->lrn_net; - __u32 hops = route->lr_hops; - unsigned int priority = route->lr_priority; - lnet_nid_t nid = route->lr_gateway->lp_nid; - int alive = lnet_is_route_alive(route); - - s += snprintf(s, tmpstr + tmpsiz - s, - "%-8s %4u %8u %7s %s\n", - libcfs_net2str(net), hops, - priority, - alive ? "up" : "down", - libcfs_nid2str(nid)); - LASSERT(tmpstr + tmpsiz - s > 0); - } - - lnet_net_unlock(0); - } - - len = s - tmpstr; /* how many bytes was written */ - - if (len > *lenp) { /* linux-supplied buffer is too small */ - rc = -EINVAL; - } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) { - rc = -EFAULT; - } else { - off += 1; - *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); - } - } - - kfree(tmpstr); - - if (!rc) - *lenp = len; - - return rc; -} - -static int proc_lnet_routers(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - int rc = 0; - char *tmpstr; - char *s; - const int tmpsiz = 256; - int len; - int ver; - int off; - - off = LNET_PROC_HOFF_GET(*ppos); - ver = LNET_PROC_VER_GET(*ppos); - - LASSERT(!write); - - if (!*lenp) - return 0; - - tmpstr = kmalloc(tmpsiz, GFP_KERNEL); - if (!tmpstr) - return -ENOMEM; - - s = tmpstr; /* points to current position in tmpstr[] */ - - if (!*ppos) { - s += snprintf(s, tmpstr + tmpsiz - s, - "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n", - "ref", "rtr_ref", "alive_cnt", "state", - "last_ping", "ping_sent", "deadline", - "down_ni", "router"); - LASSERT(tmpstr + tmpsiz - s > 0); - - lnet_net_lock(0); - ver = (unsigned int)the_lnet.ln_routers_version; - lnet_net_unlock(0); - *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); - } else { - struct list_head *r; - struct lnet_peer *peer = NULL; - int skip = off - 1; - - lnet_net_lock(0); - - if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) { - lnet_net_unlock(0); - - kfree(tmpstr); - return -ESTALE; - } - - r = the_lnet.ln_routers.next; - - while (r != &the_lnet.ln_routers) { - struct lnet_peer *lp; - - lp = list_entry(r, struct lnet_peer, lp_rtr_list); - if (!skip) { - peer = lp; - break; - } - - skip--; - r = r->next; - } - - if (peer) { - lnet_nid_t nid = peer->lp_nid; - unsigned long now = jiffies; - unsigned long deadline = peer->lp_ping_deadline; - int nrefs = peer->lp_refcount; - int nrtrrefs = peer->lp_rtr_refcount; - int alive_cnt = peer->lp_alive_count; - int alive = peer->lp_alive; - int pingsent = !peer->lp_ping_notsent; - int last_ping = (now - peer->lp_ping_timestamp) / HZ; - int down_ni = 0; - struct lnet_route *rtr; - - if ((peer->lp_ping_feats & - LNET_PING_FEAT_NI_STATUS)) { - list_for_each_entry(rtr, &peer->lp_routes, - lr_gwlist) { - /* - * downis on any route should be the - * number of downis on the gateway - */ - if (rtr->lr_downis) { - down_ni = rtr->lr_downis; - break; - } - } - } - - if (!deadline) - s += snprintf(s, tmpstr + tmpsiz - s, - "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n", - nrefs, nrtrrefs, alive_cnt, - alive ? "up" : "down", last_ping, - pingsent, "NA", down_ni, - libcfs_nid2str(nid)); - else - s += snprintf(s, tmpstr + tmpsiz - s, - "%-4d %7d %9d %6s %12d %9d %8lu %7d %s\n", - nrefs, nrtrrefs, alive_cnt, - alive ? "up" : "down", last_ping, - pingsent, - (deadline - now) / HZ, - down_ni, libcfs_nid2str(nid)); - LASSERT(tmpstr + tmpsiz - s > 0); - } - - lnet_net_unlock(0); - } - - len = s - tmpstr; /* how many bytes was written */ - - if (len > *lenp) { /* linux-supplied buffer is too small */ - rc = -EINVAL; - } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) { - rc = -EFAULT; - } else { - off += 1; - *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); - } - } - - kfree(tmpstr); - - if (!rc) - *lenp = len; - - return rc; -} - -static int proc_lnet_peers(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - const int tmpsiz = 256; - struct lnet_peer_table *ptable; - char *tmpstr; - char *s; - int cpt = LNET_PROC_CPT_GET(*ppos); - int ver = LNET_PROC_VER_GET(*ppos); - int hash = LNET_PROC_HASH_GET(*ppos); - int hoff = LNET_PROC_HOFF_GET(*ppos); - int rc = 0; - int len; - - BUILD_BUG_ON(LNET_PROC_HASH_BITS < LNET_PEER_HASH_BITS); - LASSERT(!write); - - if (!*lenp) - return 0; - - if (cpt >= LNET_CPT_NUMBER) { - *lenp = 0; - return 0; - } - - tmpstr = kmalloc(tmpsiz, GFP_KERNEL); - if (!tmpstr) - return -ENOMEM; - - s = tmpstr; /* points to current position in tmpstr[] */ - - if (!*ppos) { - s += snprintf(s, tmpstr + tmpsiz - s, - "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n", - "nid", "refs", "state", "last", "max", - "rtr", "min", "tx", "min", "queue"); - LASSERT(tmpstr + tmpsiz - s > 0); - - hoff++; - } else { - struct lnet_peer *peer; - struct list_head *p; - int skip; - again: - p = NULL; - peer = NULL; - skip = hoff - 1; - - lnet_net_lock(cpt); - ptable = the_lnet.ln_peer_tables[cpt]; - if (hoff == 1) - ver = LNET_PROC_VERSION(ptable->pt_version); - - if (ver != LNET_PROC_VERSION(ptable->pt_version)) { - lnet_net_unlock(cpt); - kfree(tmpstr); - return -ESTALE; - } - - while (hash < LNET_PEER_HASH_SIZE) { - if (!p) - p = ptable->pt_hash[hash].next; - - while (p != &ptable->pt_hash[hash]) { - struct lnet_peer *lp; - - lp = list_entry(p, struct lnet_peer, - lp_hashlist); - if (!skip) { - peer = lp; - - /* - * minor optimization: start from idx+1 - * on next iteration if we've just - * drained lp_hashlist - */ - if (lp->lp_hashlist.next == - &ptable->pt_hash[hash]) { - hoff = 1; - hash++; - } else { - hoff++; - } - - break; - } - - skip--; - p = lp->lp_hashlist.next; - } - - if (peer) - break; - - p = NULL; - hoff = 1; - hash++; - } - - if (peer) { - lnet_nid_t nid = peer->lp_nid; - int nrefs = peer->lp_refcount; - int lastalive = -1; - char *aliveness = "NA"; - int maxcr = peer->lp_ni->ni_peertxcredits; - int txcr = peer->lp_txcredits; - int mintxcr = peer->lp_mintxcredits; - int rtrcr = peer->lp_rtrcredits; - int minrtrcr = peer->lp_minrtrcredits; - int txqnob = peer->lp_txqnob; - - if (lnet_isrouter(peer) || - lnet_peer_aliveness_enabled(peer)) - aliveness = peer->lp_alive ? "up" : "down"; - - if (lnet_peer_aliveness_enabled(peer)) { - unsigned long now = jiffies; - long delta; - - delta = now - peer->lp_last_alive; - lastalive = (delta) / HZ; - - /* No need to mess up peers contents with - * arbitrarily long integers - it suffices to - * know that lastalive is more than 10000s old - */ - if (lastalive >= 10000) - lastalive = 9999; - } - - lnet_net_unlock(cpt); - - s += snprintf(s, tmpstr + tmpsiz - s, - "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n", - libcfs_nid2str(nid), nrefs, aliveness, - lastalive, maxcr, rtrcr, minrtrcr, txcr, - mintxcr, txqnob); - LASSERT(tmpstr + tmpsiz - s > 0); - - } else { /* peer is NULL */ - lnet_net_unlock(cpt); - } - - if (hash == LNET_PEER_HASH_SIZE) { - cpt++; - hash = 0; - hoff = 1; - if (!peer && cpt < LNET_CPT_NUMBER) - goto again; - } - } - - len = s - tmpstr; /* how many bytes was written */ - - if (len > *lenp) { /* linux-supplied buffer is too small */ - rc = -EINVAL; - } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) - rc = -EFAULT; - else - *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff); - } - - kfree(tmpstr); - - if (!rc) - *lenp = len; - - return rc; -} - -static int __proc_lnet_buffers(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - char *s; - char *tmpstr; - int tmpsiz; - int idx; - int len; - int rc; - int i; - - LASSERT(!write); - - /* (4 %d) * 4 * LNET_CPT_NUMBER */ - tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER; - tmpstr = kvmalloc(tmpsiz, GFP_KERNEL); - if (!tmpstr) - return -ENOMEM; - - s = tmpstr; /* points to current position in tmpstr[] */ - - s += snprintf(s, tmpstr + tmpsiz - s, - "%5s %5s %7s %7s\n", - "pages", "count", "credits", "min"); - LASSERT(tmpstr + tmpsiz - s > 0); - - if (!the_lnet.ln_rtrpools) - goto out; /* I'm not a router */ - - for (idx = 0; idx < LNET_NRBPOOLS; idx++) { - struct lnet_rtrbufpool *rbp; - - lnet_net_lock(LNET_LOCK_EX); - cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) { - s += snprintf(s, tmpstr + tmpsiz - s, - "%5d %5d %7d %7d\n", - rbp[idx].rbp_npages, - rbp[idx].rbp_nbuffers, - rbp[idx].rbp_credits, - rbp[idx].rbp_mincredits); - LASSERT(tmpstr + tmpsiz - s > 0); - } - lnet_net_unlock(LNET_LOCK_EX); - } - - out: - len = s - tmpstr; - - if (pos >= min_t(int, len, strlen(tmpstr))) - rc = 0; - else - rc = cfs_trace_copyout_string(buffer, nob, - tmpstr + pos, NULL); - - kvfree(tmpstr); - return rc; -} - -static int proc_lnet_buffers(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_buffers); -} - -static int proc_lnet_nis(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - int tmpsiz = 128 * LNET_CPT_NUMBER; - int rc = 0; - char *tmpstr; - char *s; - int len; - - LASSERT(!write); - - if (!*lenp) - return 0; - - tmpstr = kvmalloc(tmpsiz, GFP_KERNEL); - if (!tmpstr) - return -ENOMEM; - - s = tmpstr; /* points to current position in tmpstr[] */ - - if (!*ppos) { - s += snprintf(s, tmpstr + tmpsiz - s, - "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n", - "nid", "status", "alive", "refs", "peer", - "rtr", "max", "tx", "min"); - LASSERT(tmpstr + tmpsiz - s > 0); - } else { - struct list_head *n; - struct lnet_ni *ni = NULL; - int skip = *ppos - 1; - - lnet_net_lock(0); - - n = the_lnet.ln_nis.next; - - while (n != &the_lnet.ln_nis) { - struct lnet_ni *a_ni; - - a_ni = list_entry(n, struct lnet_ni, ni_list); - if (!skip) { - ni = a_ni; - break; - } - - skip--; - n = n->next; - } - - if (ni) { - struct lnet_tx_queue *tq; - char *stat; - time64_t now = ktime_get_real_seconds(); - int last_alive = -1; - int i; - int j; - - if (the_lnet.ln_routing) - last_alive = now - ni->ni_last_alive; - - /* @lo forever alive */ - if (ni->ni_lnd->lnd_type == LOLND) - last_alive = 0; - - lnet_ni_lock(ni); - LASSERT(ni->ni_status); - stat = (ni->ni_status->ns_status == - LNET_NI_STATUS_UP) ? "up" : "down"; - lnet_ni_unlock(ni); - - /* - * we actually output credits information for - * TX queue of each partition - */ - cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { - for (j = 0; ni->ni_cpts && - j < ni->ni_ncpts; j++) { - if (i == ni->ni_cpts[j]) - break; - } - - if (j == ni->ni_ncpts) - continue; - - if (i) - lnet_net_lock(i); - - s += snprintf(s, tmpstr + tmpsiz - s, - "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n", - libcfs_nid2str(ni->ni_nid), stat, - last_alive, *ni->ni_refs[i], - ni->ni_peertxcredits, - ni->ni_peerrtrcredits, - tq->tq_credits_max, - tq->tq_credits, - tq->tq_credits_min); - if (i) - lnet_net_unlock(i); - } - LASSERT(tmpstr + tmpsiz - s > 0); - } - - lnet_net_unlock(0); - } - - len = s - tmpstr; /* how many bytes was written */ - - if (len > *lenp) { /* linux-supplied buffer is too small */ - rc = -EINVAL; - } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) - rc = -EFAULT; - else - *ppos += 1; - } - - kvfree(tmpstr); - - if (!rc) - *lenp = len; - - return rc; -} - -struct lnet_portal_rotors { - int pr_value; - const char *pr_name; - const char *pr_desc; -}; - -static struct lnet_portal_rotors portal_rotors[] = { - { - .pr_value = LNET_PTL_ROTOR_OFF, - .pr_name = "OFF", - .pr_desc = "Turn off message rotor for wildcard portals" - }, - { - .pr_value = LNET_PTL_ROTOR_ON, - .pr_name = "ON", - .pr_desc = "round-robin dispatch all PUT messages for wildcard portals" - }, - { - .pr_value = LNET_PTL_ROTOR_RR_RT, - .pr_name = "RR_RT", - .pr_desc = "round-robin dispatch routed PUT message for wildcard portals" - }, - { - .pr_value = LNET_PTL_ROTOR_HASH_RT, - .pr_name = "HASH_RT", - .pr_desc = "dispatch routed PUT message by hashing source NID for wildcard portals" - }, - { - .pr_value = -1, - .pr_name = NULL, - .pr_desc = NULL - }, -}; - -static int __proc_lnet_portal_rotor(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - const int buf_len = 128; - char *buf; - char *tmp; - int rc; - int i; - - buf = kmalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (!write) { - lnet_res_lock(0); - - for (i = 0; portal_rotors[i].pr_value >= 0; i++) { - if (portal_rotors[i].pr_value == portal_rotor) - break; - } - - LASSERT(portal_rotors[i].pr_value == portal_rotor); - lnet_res_unlock(0); - - rc = snprintf(buf, buf_len, - "{\n\tportals: all\n" - "\trotor: %s\n\tdescription: %s\n}", - portal_rotors[i].pr_name, - portal_rotors[i].pr_desc); - - if (pos >= min_t(int, rc, buf_len)) { - rc = 0; - } else { - rc = cfs_trace_copyout_string(buffer, nob, - buf + pos, "\n"); - } - goto out; - } - - rc = cfs_trace_copyin_string(buf, buf_len, buffer, nob); - if (rc < 0) - goto out; - - tmp = strim(buf); - - rc = -EINVAL; - lnet_res_lock(0); - for (i = 0; portal_rotors[i].pr_name; i++) { - if (!strncasecmp(portal_rotors[i].pr_name, tmp, - strlen(portal_rotors[i].pr_name))) { - portal_rotor = portal_rotors[i].pr_value; - rc = 0; - break; - } - } - lnet_res_unlock(0); -out: - kfree(buf); - return rc; -} - -static int proc_lnet_portal_rotor(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) -{ - return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_portal_rotor); -} - -static struct ctl_table lnet_table[] = { - /* - * NB No .strategy entries have been provided since sysctl(8) prefers - * to go via /proc for portability. - */ - { - .procname = "stats", - .mode = 0644, - .proc_handler = &proc_lnet_stats, - }, - { - .procname = "routes", - .mode = 0444, - .proc_handler = &proc_lnet_routes, - }, - { - .procname = "routers", - .mode = 0444, - .proc_handler = &proc_lnet_routers, - }, - { - .procname = "peers", - .mode = 0444, - .proc_handler = &proc_lnet_peers, - }, - { - .procname = "buffers", - .mode = 0444, - .proc_handler = &proc_lnet_buffers, - }, - { - .procname = "nis", - .mode = 0444, - .proc_handler = &proc_lnet_nis, - }, - { - .procname = "portal_rotor", - .mode = 0644, - .proc_handler = &proc_lnet_portal_rotor, - }, - { - } -}; - -void lnet_router_debugfs_init(void) -{ - lustre_insert_debugfs(lnet_table); -} - -void lnet_router_debugfs_fini(void) -{ -} diff --git a/drivers/staging/lustre/lnet/selftest/Makefile b/drivers/staging/lustre/lnet/selftest/Makefile deleted file mode 100644 index 3ccc8966b566..000000000000 --- a/drivers/staging/lustre/lnet/selftest/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LNET_SELFTEST) := lnet_selftest.o - -lnet_selftest-y := console.o conrpc.o conctl.o framework.o timer.o rpc.o \ - module.o ping_test.o brw_test.o diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c deleted file mode 100644 index f1ee219bc8f3..000000000000 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ /dev/null @@ -1,526 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/brw_test.c - * - * Author: Isaac Huang - */ - -#include "selftest.h" - -static int brw_srv_workitems = SFW_TEST_WI_MAX; -module_param(brw_srv_workitems, int, 0644); -MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems"); - -static int brw_inject_errors; -module_param(brw_inject_errors, int, 0644); -MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default"); - -#define BRW_POISON 0xbeefbeefbeefbeefULL -#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL -#define BRW_MSIZE sizeof(u64) - -static void -brw_client_fini(struct sfw_test_instance *tsi) -{ - struct srpc_bulk *bulk; - struct sfw_test_unit *tsu; - - LASSERT(tsi->tsi_is_client); - - list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { - bulk = tsu->tsu_private; - if (!bulk) - continue; - - srpc_free_bulk(bulk); - tsu->tsu_private = NULL; - } -} - -static int -brw_client_init(struct sfw_test_instance *tsi) -{ - struct sfw_session *sn = tsi->tsi_batch->bat_session; - int flags; - int off; - int npg; - int len; - int opc; - struct srpc_bulk *bulk; - struct sfw_test_unit *tsu; - - LASSERT(sn); - LASSERT(tsi->tsi_is_client); - - if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { - struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0; - - opc = breq->blk_opc; - flags = breq->blk_flags; - npg = breq->blk_npg; - /* - * NB: this is not going to work for variable page size, - * but we have to keep it for compatibility - */ - len = npg * PAGE_SIZE; - off = 0; - } else { - struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; - - /* - * I should never get this step if it's unknown feature - * because make_session will reject unknown feature - */ - LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - - opc = breq->blk_opc; - flags = breq->blk_flags; - len = breq->blk_len; - off = breq->blk_offset & ~PAGE_MASK; - npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - } - - if (off % BRW_MSIZE) - return -EINVAL; - - if (npg > LNET_MAX_IOV || npg <= 0) - return -EINVAL; - - if (opc != LST_BRW_READ && opc != LST_BRW_WRITE) - return -EINVAL; - - if (flags != LST_BRW_CHECK_NONE && - flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE) - return -EINVAL; - - list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { - bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid), - off, npg, len, opc == LST_BRW_READ); - if (!bulk) { - brw_client_fini(tsi); - return -ENOMEM; - } - - tsu->tsu_private = bulk; - } - - return 0; -} - -static int brw_inject_one_error(void) -{ - struct timespec64 ts; - - if (brw_inject_errors <= 0) - return 0; - - ktime_get_ts64(&ts); - - if (!((ts.tv_nsec / NSEC_PER_USEC) & 1)) - return 0; - - return brw_inject_errors--; -} - -static void -brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic) -{ - char *addr = page_address(pg) + off; - int i; - - LASSERT(addr); - LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE)); - - if (pattern == LST_BRW_CHECK_NONE) - return; - - if (magic == BRW_MAGIC) - magic += brw_inject_one_error(); - - if (pattern == LST_BRW_CHECK_SIMPLE) { - memcpy(addr, &magic, BRW_MSIZE); - if (len > BRW_MSIZE) { - addr += PAGE_SIZE - BRW_MSIZE; - memcpy(addr, &magic, BRW_MSIZE); - } - return; - } - - if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < len; i += BRW_MSIZE) - memcpy(addr + i, &magic, BRW_MSIZE); - return; - } - - LBUG(); -} - -static int -brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic) -{ - char *addr = page_address(pg) + off; - __u64 data = 0; /* make compiler happy */ - int i; - - LASSERT(addr); - LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE)); - - if (pattern == LST_BRW_CHECK_NONE) - return 0; - - if (pattern == LST_BRW_CHECK_SIMPLE) { - data = *((__u64 *)addr); - if (data != magic) - goto bad_data; - - if (len > BRW_MSIZE) { - addr += PAGE_SIZE - BRW_MSIZE; - data = *((__u64 *)addr); - if (data != magic) - goto bad_data; - } - return 0; - } - - if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < len; i += BRW_MSIZE) { - data = *(u64 *)(addr + i); - if (data != magic) - goto bad_data; - } - return 0; - } - - LBUG(); - -bad_data: - CERROR("Bad data in page %p: %#llx, %#llx expected\n", - pg, data, magic); - return 1; -} - -static void -brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) -{ - int i; - struct page *pg; - - for (i = 0; i < bk->bk_niov; i++) { - int off, len; - - pg = bk->bk_iovs[i].bv_page; - off = bk->bk_iovs[i].bv_offset; - len = bk->bk_iovs[i].bv_len; - brw_fill_page(pg, off, len, pattern, magic); - } -} - -static int -brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) -{ - int i; - struct page *pg; - - for (i = 0; i < bk->bk_niov; i++) { - int off, len; - - pg = bk->bk_iovs[i].bv_page; - off = bk->bk_iovs[i].bv_offset; - len = bk->bk_iovs[i].bv_len; - if (brw_check_page(pg, off, len, pattern, magic)) { - CERROR("Bulk page %p (%d/%d) is corrupted!\n", - pg, i, bk->bk_niov); - return 1; - } - } - - return 0; -} - -static int -brw_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest, - struct srpc_client_rpc **rpcpp) -{ - struct srpc_bulk *bulk = tsu->tsu_private; - struct sfw_test_instance *tsi = tsu->tsu_instance; - struct sfw_session *sn = tsi->tsi_batch->bat_session; - struct srpc_client_rpc *rpc; - struct srpc_brw_reqst *req; - int flags; - int npg; - int len; - int opc; - int rc; - - LASSERT(sn); - LASSERT(bulk); - - if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { - struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0; - - opc = breq->blk_opc; - flags = breq->blk_flags; - npg = breq->blk_npg; - len = npg * PAGE_SIZE; - } else { - struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; - int off; - - /* - * I should never get this step if it's unknown feature - * because make_session will reject unknown feature - */ - LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - - opc = breq->blk_opc; - flags = breq->blk_flags; - len = breq->blk_len; - off = breq->blk_offset; - npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - } - - rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); - if (rc) - return rc; - - memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg])); - if (opc == LST_BRW_WRITE) - brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC); - else - brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_POISON); - - req = &rpc->crpc_reqstmsg.msg_body.brw_reqst; - req->brw_flags = flags; - req->brw_rw = opc; - req->brw_len = len; - - *rpcpp = rpc; - return 0; -} - -static void -brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) -{ - __u64 magic = BRW_MAGIC; - struct sfw_test_instance *tsi = tsu->tsu_instance; - struct sfw_session *sn = tsi->tsi_batch->bat_session; - struct srpc_msg *msg = &rpc->crpc_replymsg; - struct srpc_brw_reply *reply = &msg->msg_body.brw_reply; - struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; - - LASSERT(sn); - - if (rpc->crpc_status) { - CERROR("BRW RPC to %s failed with %d\n", - libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); - if (!tsi->tsi_stopping) /* rpc could have been aborted */ - atomic_inc(&sn->sn_brw_errors); - return; - } - - if (msg->msg_magic != SRPC_MSG_MAGIC) { - __swab64s(&magic); - __swab32s(&reply->brw_status); - } - - CDEBUG(reply->brw_status ? D_WARNING : D_NET, - "BRW RPC to %s finished with brw_status: %d\n", - libcfs_id2str(rpc->crpc_dest), reply->brw_status); - - if (reply->brw_status) { - atomic_inc(&sn->sn_brw_errors); - rpc->crpc_status = -(int)reply->brw_status; - return; - } - - if (reqst->brw_rw == LST_BRW_WRITE) - return; - - if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) { - CERROR("Bulk data from %s is corrupted!\n", - libcfs_id2str(rpc->crpc_dest)); - atomic_inc(&sn->sn_brw_errors); - rpc->crpc_status = -EBADMSG; - } -} - -static void -brw_server_rpc_done(struct srpc_server_rpc *rpc) -{ - struct srpc_bulk *blk = rpc->srpc_bulk; - - if (!blk) - return; - - if (rpc->srpc_status) - CERROR("Bulk transfer %s %s has failed: %d\n", - blk->bk_sink ? "from" : "to", - libcfs_id2str(rpc->srpc_peer), rpc->srpc_status); - else - CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n", - blk->bk_niov, blk->bk_sink ? "from" : "to", - libcfs_id2str(rpc->srpc_peer)); - - sfw_free_pages(rpc); -} - -static int -brw_bulk_ready(struct srpc_server_rpc *rpc, int status) -{ - __u64 magic = BRW_MAGIC; - struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply; - struct srpc_brw_reqst *reqst; - struct srpc_msg *reqstmsg; - - LASSERT(rpc->srpc_bulk); - LASSERT(rpc->srpc_reqstbuf); - - reqstmsg = &rpc->srpc_reqstbuf->buf_msg; - reqst = &reqstmsg->msg_body.brw_reqst; - - if (status) { - CERROR("BRW bulk %s failed for RPC from %s: %d\n", - reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE", - libcfs_id2str(rpc->srpc_peer), status); - return -EIO; - } - - if (reqst->brw_rw == LST_BRW_READ) - return 0; - - if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) - __swab64s(&magic); - - if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) { - CERROR("Bulk data from %s is corrupted!\n", - libcfs_id2str(rpc->srpc_peer)); - reply->brw_status = EBADMSG; - } - - return 0; -} - -static int -brw_server_handle(struct srpc_server_rpc *rpc) -{ - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - struct srpc_msg *replymsg = &rpc->srpc_replymsg; - struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; - struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply; - struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst; - int npg; - int rc; - - LASSERT(sv->sv_id == SRPC_SERVICE_BRW); - - if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) { - LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC)); - - __swab32s(&reqst->brw_rw); - __swab32s(&reqst->brw_len); - __swab32s(&reqst->brw_flags); - __swab64s(&reqst->brw_rpyid); - __swab64s(&reqst->brw_bulkid); - } - LASSERT(reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id)); - - reply->brw_status = 0; - rpc->srpc_done = brw_server_rpc_done; - - if ((reqst->brw_rw != LST_BRW_READ && reqst->brw_rw != LST_BRW_WRITE) || - (reqst->brw_flags != LST_BRW_CHECK_NONE && - reqst->brw_flags != LST_BRW_CHECK_FULL && - reqst->brw_flags != LST_BRW_CHECK_SIMPLE)) { - reply->brw_status = EINVAL; - return 0; - } - - if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) { - replymsg->msg_ses_feats = LST_FEATS_MASK; - reply->brw_status = EPROTO; - return 0; - } - - if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) { - /* compat with old version */ - if (reqst->brw_len & ~PAGE_MASK) { - reply->brw_status = EINVAL; - return 0; - } - npg = reqst->brw_len >> PAGE_SHIFT; - - } else { - npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT; - } - - replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; - - if (!reqst->brw_len || npg > LNET_MAX_IOV) { - reply->brw_status = EINVAL; - return 0; - } - - rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg, - reqst->brw_len, - reqst->brw_rw == LST_BRW_WRITE); - if (rc) - return rc; - - if (reqst->brw_rw == LST_BRW_READ) - brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_MAGIC); - else - brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_POISON); - - return 0; -} - -struct sfw_test_client_ops brw_test_client; - -void brw_init_test_client(void) -{ - brw_test_client.tso_init = brw_client_init; - brw_test_client.tso_fini = brw_client_fini; - brw_test_client.tso_prep_rpc = brw_client_prep_rpc; - brw_test_client.tso_done_rpc = brw_client_done_rpc; -}; - -struct srpc_service brw_test_service; - -void brw_init_test_service(void) -{ - brw_test_service.sv_id = SRPC_SERVICE_BRW; - brw_test_service.sv_name = "brw_test"; - brw_test_service.sv_handler = brw_server_handle; - brw_test_service.sv_bulk_ready = brw_bulk_ready; - brw_test_service.sv_wi_total = brw_srv_workitems; -} diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c deleted file mode 100644 index 906d82d90c0c..000000000000 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ /dev/null @@ -1,801 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/conctl.c - * - * IOC handle in kernel - * - * Author: Liang Zhen - */ - -#include -#include -#include "console.h" - -static int -lst_session_new_ioctl(struct lstio_session_new_args *args) -{ - char name[LST_NAME_SIZE + 1]; - int rc; - - if (!args->lstio_ses_idp || /* address for output sid */ - !args->lstio_ses_key || /* no key is specified */ - !args->lstio_ses_namep || /* session name */ - args->lstio_ses_nmlen <= 0 || - args->lstio_ses_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_ses_namep, - args->lstio_ses_nmlen)) { - return -EFAULT; - } - - name[args->lstio_ses_nmlen] = 0; - - rc = lstcon_session_new(name, - args->lstio_ses_key, - args->lstio_ses_feats, - args->lstio_ses_timeout, - args->lstio_ses_force, - args->lstio_ses_idp); - - return rc; -} - -static int -lst_session_end_ioctl(struct lstio_session_end_args *args) -{ - if (args->lstio_ses_key != console_session.ses_key) - return -EACCES; - - return lstcon_session_end(); -} - -static int -lst_session_info_ioctl(struct lstio_session_info_args *args) -{ - /* no checking of key */ - - if (!args->lstio_ses_idp || /* address for output sid */ - !args->lstio_ses_keyp || /* address for output key */ - !args->lstio_ses_featp || /* address for output features */ - !args->lstio_ses_ndinfo || /* address for output ndinfo */ - !args->lstio_ses_namep || /* address for output name */ - args->lstio_ses_nmlen <= 0 || - args->lstio_ses_nmlen > LST_NAME_SIZE) - return -EINVAL; - - return lstcon_session_info(args->lstio_ses_idp, - args->lstio_ses_keyp, - args->lstio_ses_featp, - args->lstio_ses_ndinfo, - args->lstio_ses_namep, - args->lstio_ses_nmlen); -} - -static int -lst_debug_ioctl(struct lstio_debug_args *args) -{ - char name[LST_NAME_SIZE + 1]; - int client = 1; - int rc; - - if (args->lstio_dbg_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_dbg_resultp) - return -EINVAL; - - if (args->lstio_dbg_namep && /* name of batch/group */ - (args->lstio_dbg_nmlen <= 0 || - args->lstio_dbg_nmlen > LST_NAME_SIZE)) - return -EINVAL; - - if (args->lstio_dbg_namep) { - - if (copy_from_user(name, args->lstio_dbg_namep, - args->lstio_dbg_nmlen)) - return -EFAULT; - - name[args->lstio_dbg_nmlen] = 0; - } - - rc = -EINVAL; - - switch (args->lstio_dbg_type) { - case LST_OPC_SESSION: - rc = lstcon_session_debug(args->lstio_dbg_timeout, - args->lstio_dbg_resultp); - break; - - case LST_OPC_BATCHSRV: - client = 0; - /* fall through */ - case LST_OPC_BATCHCLI: - if (!args->lstio_dbg_namep) - goto out; - - rc = lstcon_batch_debug(args->lstio_dbg_timeout, - name, client, args->lstio_dbg_resultp); - break; - - case LST_OPC_GROUP: - if (!args->lstio_dbg_namep) - goto out; - - rc = lstcon_group_debug(args->lstio_dbg_timeout, - name, args->lstio_dbg_resultp); - break; - - case LST_OPC_NODES: - if (args->lstio_dbg_count <= 0 || - !args->lstio_dbg_idsp) - goto out; - - rc = lstcon_nodes_debug(args->lstio_dbg_timeout, - args->lstio_dbg_count, - args->lstio_dbg_idsp, - args->lstio_dbg_resultp); - break; - - default: - break; - } - -out: - return rc; -} - -static int -lst_group_add_ioctl(struct lstio_group_add_args *args) -{ - char name[LST_NAME_SIZE + 1]; - int rc; - - if (args->lstio_grp_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_grp_namep || - args->lstio_grp_nmlen <= 0 || - args->lstio_grp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) - return -EFAULT; - - name[args->lstio_grp_nmlen] = 0; - - rc = lstcon_group_add(name); - - return rc; -} - -static int -lst_group_del_ioctl(struct lstio_group_del_args *args) -{ - int rc; - char name[LST_NAME_SIZE + 1]; - - if (args->lstio_grp_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_grp_namep || - args->lstio_grp_nmlen <= 0 || - args->lstio_grp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) - return -EFAULT; - - name[args->lstio_grp_nmlen] = 0; - - rc = lstcon_group_del(name); - - return rc; -} - -static int -lst_group_update_ioctl(struct lstio_group_update_args *args) -{ - int rc; - char name[LST_NAME_SIZE + 1]; - - if (args->lstio_grp_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_grp_resultp || - !args->lstio_grp_namep || - args->lstio_grp_nmlen <= 0 || - args->lstio_grp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) - return -EFAULT; - - name[args->lstio_grp_nmlen] = 0; - - switch (args->lstio_grp_opc) { - case LST_GROUP_CLEAN: - rc = lstcon_group_clean(name, args->lstio_grp_args); - break; - - case LST_GROUP_REFRESH: - rc = lstcon_group_refresh(name, args->lstio_grp_resultp); - break; - - case LST_GROUP_RMND: - if (args->lstio_grp_count <= 0 || - !args->lstio_grp_idsp) { - rc = -EINVAL; - break; - } - rc = lstcon_nodes_remove(name, args->lstio_grp_count, - args->lstio_grp_idsp, - args->lstio_grp_resultp); - break; - - default: - rc = -EINVAL; - break; - } - - return rc; -} - -static int -lst_nodes_add_ioctl(struct lstio_group_nodes_args *args) -{ - unsigned int feats; - int rc; - char name[LST_NAME_SIZE + 1]; - - if (args->lstio_grp_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_grp_idsp || /* array of ids */ - args->lstio_grp_count <= 0 || - !args->lstio_grp_resultp || - !args->lstio_grp_featp || - !args->lstio_grp_namep || - args->lstio_grp_nmlen <= 0 || - args->lstio_grp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) - return -EFAULT; - - name[args->lstio_grp_nmlen] = 0; - - rc = lstcon_nodes_add(name, args->lstio_grp_count, - args->lstio_grp_idsp, &feats, - args->lstio_grp_resultp); - - if (!rc && - copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) { - return -EINVAL; - } - - return rc; -} - -static int -lst_group_list_ioctl(struct lstio_group_list_args *args) -{ - if (args->lstio_grp_key != console_session.ses_key) - return -EACCES; - - if (args->lstio_grp_idx < 0 || - !args->lstio_grp_namep || - args->lstio_grp_nmlen <= 0 || - args->lstio_grp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - return lstcon_group_list(args->lstio_grp_idx, - args->lstio_grp_nmlen, - args->lstio_grp_namep); -} - -static int -lst_group_info_ioctl(struct lstio_group_info_args *args) -{ - char name[LST_NAME_SIZE + 1]; - int ndent; - int index; - int rc; - - if (args->lstio_grp_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_grp_namep || - args->lstio_grp_nmlen <= 0 || - args->lstio_grp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (!args->lstio_grp_entp && /* output: group entry */ - !args->lstio_grp_dentsp) /* output: node entry */ - return -EINVAL; - - if (args->lstio_grp_dentsp) { /* have node entry */ - if (!args->lstio_grp_idxp || /* node index */ - !args->lstio_grp_ndentp) /* # of node entry */ - return -EINVAL; - - if (copy_from_user(&ndent, args->lstio_grp_ndentp, - sizeof(ndent)) || - copy_from_user(&index, args->lstio_grp_idxp, - sizeof(index))) - return -EFAULT; - - if (ndent <= 0 || index < 0) - return -EINVAL; - } - - if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) - return -EFAULT; - - name[args->lstio_grp_nmlen] = 0; - - rc = lstcon_group_info(name, args->lstio_grp_entp, - &index, &ndent, args->lstio_grp_dentsp); - - if (rc) - return rc; - - if (args->lstio_grp_dentsp && - (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) || - copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent)))) - return -EFAULT; - - return 0; -} - -static int -lst_batch_add_ioctl(struct lstio_batch_add_args *args) -{ - int rc; - char name[LST_NAME_SIZE + 1]; - - if (args->lstio_bat_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_bat_namep || - args->lstio_bat_nmlen <= 0 || - args->lstio_bat_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_bat_namep, - args->lstio_bat_nmlen)) - return -EFAULT; - - name[args->lstio_bat_nmlen] = 0; - - rc = lstcon_batch_add(name); - - return rc; -} - -static int -lst_batch_run_ioctl(struct lstio_batch_run_args *args) -{ - int rc; - char name[LST_NAME_SIZE + 1]; - - if (args->lstio_bat_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_bat_namep || - args->lstio_bat_nmlen <= 0 || - args->lstio_bat_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_bat_namep, - args->lstio_bat_nmlen)) - return -EFAULT; - - name[args->lstio_bat_nmlen] = 0; - - rc = lstcon_batch_run(name, args->lstio_bat_timeout, - args->lstio_bat_resultp); - - return rc; -} - -static int -lst_batch_stop_ioctl(struct lstio_batch_stop_args *args) -{ - int rc; - char name[LST_NAME_SIZE + 1]; - - if (args->lstio_bat_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_bat_resultp || - !args->lstio_bat_namep || - args->lstio_bat_nmlen <= 0 || - args->lstio_bat_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (copy_from_user(name, args->lstio_bat_namep, - args->lstio_bat_nmlen)) - return -EFAULT; - - name[args->lstio_bat_nmlen] = 0; - - rc = lstcon_batch_stop(name, args->lstio_bat_force, - args->lstio_bat_resultp); - - return rc; -} - -static int -lst_batch_query_ioctl(struct lstio_batch_query_args *args) -{ - char name[LST_NAME_SIZE + 1]; - int rc; - - if (args->lstio_bat_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_bat_resultp || - !args->lstio_bat_namep || - args->lstio_bat_nmlen <= 0 || - args->lstio_bat_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (args->lstio_bat_testidx < 0) - return -EINVAL; - - if (copy_from_user(name, args->lstio_bat_namep, - args->lstio_bat_nmlen)) - return -EFAULT; - - name[args->lstio_bat_nmlen] = 0; - - rc = lstcon_test_batch_query(name, - args->lstio_bat_testidx, - args->lstio_bat_client, - args->lstio_bat_timeout, - args->lstio_bat_resultp); - - return rc; -} - -static int -lst_batch_list_ioctl(struct lstio_batch_list_args *args) -{ - if (args->lstio_bat_key != console_session.ses_key) - return -EACCES; - - if (args->lstio_bat_idx < 0 || - !args->lstio_bat_namep || - args->lstio_bat_nmlen <= 0 || - args->lstio_bat_nmlen > LST_NAME_SIZE) - return -EINVAL; - - return lstcon_batch_list(args->lstio_bat_idx, - args->lstio_bat_nmlen, - args->lstio_bat_namep); -} - -static int -lst_batch_info_ioctl(struct lstio_batch_info_args *args) -{ - char name[LST_NAME_SIZE + 1]; - int rc; - int index; - int ndent; - - if (args->lstio_bat_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_bat_namep || /* batch name */ - args->lstio_bat_nmlen <= 0 || - args->lstio_bat_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (!args->lstio_bat_entp && /* output: batch entry */ - !args->lstio_bat_dentsp) /* output: node entry */ - return -EINVAL; - - if (args->lstio_bat_dentsp) { /* have node entry */ - if (!args->lstio_bat_idxp || /* node index */ - !args->lstio_bat_ndentp) /* # of node entry */ - return -EINVAL; - - if (copy_from_user(&index, args->lstio_bat_idxp, - sizeof(index)) || - copy_from_user(&ndent, args->lstio_bat_ndentp, - sizeof(ndent))) - return -EFAULT; - - if (ndent <= 0 || index < 0) - return -EINVAL; - } - - if (copy_from_user(name, args->lstio_bat_namep, - args->lstio_bat_nmlen)) - return -EFAULT; - - name[args->lstio_bat_nmlen] = 0; - - rc = lstcon_batch_info(name, args->lstio_bat_entp, - args->lstio_bat_server, args->lstio_bat_testidx, - &index, &ndent, args->lstio_bat_dentsp); - - if (rc) - return rc; - - if (args->lstio_bat_dentsp && - (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) || - copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent)))) - rc = -EFAULT; - - return rc; -} - -static int -lst_stat_query_ioctl(struct lstio_stat_args *args) -{ - int rc; - char name[LST_NAME_SIZE + 1]; - - /* TODO: not finished */ - if (args->lstio_sta_key != console_session.ses_key) - return -EACCES; - - if (!args->lstio_sta_resultp) - return -EINVAL; - - if (args->lstio_sta_idsp) { - if (args->lstio_sta_count <= 0) - return -EINVAL; - - rc = lstcon_nodes_stat(args->lstio_sta_count, - args->lstio_sta_idsp, - args->lstio_sta_timeout, - args->lstio_sta_resultp); - } else if (args->lstio_sta_namep) { - if (args->lstio_sta_nmlen <= 0 || - args->lstio_sta_nmlen > LST_NAME_SIZE) - return -EINVAL; - - rc = copy_from_user(name, args->lstio_sta_namep, - args->lstio_sta_nmlen); - if (!rc) - rc = lstcon_group_stat(name, args->lstio_sta_timeout, - args->lstio_sta_resultp); - else - rc = -EFAULT; - } else { - rc = -EINVAL; - } - - return rc; -} - -static int lst_test_add_ioctl(struct lstio_test_args *args) -{ - char batch_name[LST_NAME_SIZE + 1]; - char src_name[LST_NAME_SIZE + 1]; - char dst_name[LST_NAME_SIZE + 1]; - void *param = NULL; - int ret = 0; - int rc = -ENOMEM; - - if (!args->lstio_tes_resultp || - !args->lstio_tes_retp || - !args->lstio_tes_bat_name || /* no specified batch */ - args->lstio_tes_bat_nmlen <= 0 || - args->lstio_tes_bat_nmlen > LST_NAME_SIZE || - !args->lstio_tes_sgrp_name || /* no source group */ - args->lstio_tes_sgrp_nmlen <= 0 || - args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE || - !args->lstio_tes_dgrp_name || /* no target group */ - args->lstio_tes_dgrp_nmlen <= 0 || - args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE) - return -EINVAL; - - if (!args->lstio_tes_loop || /* negative is infinite */ - args->lstio_tes_concur <= 0 || - args->lstio_tes_dist <= 0 || - args->lstio_tes_span <= 0) - return -EINVAL; - - /* have parameter, check if parameter length is valid */ - if (args->lstio_tes_param && - (args->lstio_tes_param_len <= 0 || - args->lstio_tes_param_len > - PAGE_SIZE - sizeof(struct lstcon_test))) - return -EINVAL; - - /* Enforce zero parameter length if there's no parameter */ - if (!args->lstio_tes_param && args->lstio_tes_param_len) - return -EINVAL; - - if (args->lstio_tes_param) { - param = memdup_user(args->lstio_tes_param, - args->lstio_tes_param_len); - if (IS_ERR(param)) - return PTR_ERR(param); - } - - rc = -EFAULT; - if (copy_from_user(batch_name, args->lstio_tes_bat_name, - args->lstio_tes_bat_nmlen) || - copy_from_user(src_name, args->lstio_tes_sgrp_name, - args->lstio_tes_sgrp_nmlen) || - copy_from_user(dst_name, args->lstio_tes_dgrp_name, - args->lstio_tes_dgrp_nmlen)) - goto out; - - rc = lstcon_test_add(batch_name, args->lstio_tes_type, - args->lstio_tes_loop, args->lstio_tes_concur, - args->lstio_tes_dist, args->lstio_tes_span, - src_name, dst_name, param, - args->lstio_tes_param_len, - &ret, args->lstio_tes_resultp); - - if (!rc && ret) - rc = (copy_to_user(args->lstio_tes_retp, &ret, - sizeof(ret))) ? -EFAULT : 0; -out: - kfree(param); - - return rc; -} - -int -lstcon_ioctl_entry(struct notifier_block *nb, - unsigned long cmd, void *vdata) -{ - struct libcfs_ioctl_hdr *hdr = vdata; - char *buf = NULL; - struct libcfs_ioctl_data *data; - int opc; - int rc = -EINVAL; - - if (cmd != IOC_LIBCFS_LNETST) - goto err; - - data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); - - opc = data->ioc_u32[0]; - - if (data->ioc_plen1 > PAGE_SIZE) - goto err; - - buf = kmalloc(data->ioc_plen1, GFP_KERNEL); - rc = -ENOMEM; - if (!buf) - goto err; - - /* copy in parameter */ - rc = -EFAULT; - if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) - goto err; - - mutex_lock(&console_session.ses_mutex); - - console_session.ses_laststamp = ktime_get_real_seconds(); - - if (console_session.ses_shutdown) { - rc = -ESHUTDOWN; - goto out; - } - - if (console_session.ses_expired) - lstcon_session_end(); - - if (opc != LSTIO_SESSION_NEW && - console_session.ses_state == LST_SESSION_NONE) { - CDEBUG(D_NET, "LST no active session\n"); - rc = -ESRCH; - goto out; - } - - memset(&console_session.ses_trans_stat, 0, sizeof(struct lstcon_trans_stat)); - - switch (opc) { - case LSTIO_SESSION_NEW: - rc = lst_session_new_ioctl((struct lstio_session_new_args *)buf); - break; - case LSTIO_SESSION_END: - rc = lst_session_end_ioctl((struct lstio_session_end_args *)buf); - break; - case LSTIO_SESSION_INFO: - rc = lst_session_info_ioctl((struct lstio_session_info_args *)buf); - break; - case LSTIO_DEBUG: - rc = lst_debug_ioctl((struct lstio_debug_args *)buf); - break; - case LSTIO_GROUP_ADD: - rc = lst_group_add_ioctl((struct lstio_group_add_args *)buf); - break; - case LSTIO_GROUP_DEL: - rc = lst_group_del_ioctl((struct lstio_group_del_args *)buf); - break; - case LSTIO_GROUP_UPDATE: - rc = lst_group_update_ioctl((struct lstio_group_update_args *)buf); - break; - case LSTIO_NODES_ADD: - rc = lst_nodes_add_ioctl((struct lstio_group_nodes_args *)buf); - break; - case LSTIO_GROUP_LIST: - rc = lst_group_list_ioctl((struct lstio_group_list_args *)buf); - break; - case LSTIO_GROUP_INFO: - rc = lst_group_info_ioctl((struct lstio_group_info_args *)buf); - break; - case LSTIO_BATCH_ADD: - rc = lst_batch_add_ioctl((struct lstio_batch_add_args *)buf); - break; - case LSTIO_BATCH_START: - rc = lst_batch_run_ioctl((struct lstio_batch_run_args *)buf); - break; - case LSTIO_BATCH_STOP: - rc = lst_batch_stop_ioctl((struct lstio_batch_stop_args *)buf); - break; - case LSTIO_BATCH_QUERY: - rc = lst_batch_query_ioctl((struct lstio_batch_query_args *)buf); - break; - case LSTIO_BATCH_LIST: - rc = lst_batch_list_ioctl((struct lstio_batch_list_args *)buf); - break; - case LSTIO_BATCH_INFO: - rc = lst_batch_info_ioctl((struct lstio_batch_info_args *)buf); - break; - case LSTIO_TEST_ADD: - rc = lst_test_add_ioctl((struct lstio_test_args *)buf); - break; - case LSTIO_STAT_QUERY: - rc = lst_stat_query_ioctl((struct lstio_stat_args *)buf); - break; - default: - rc = -EINVAL; - goto out; - } - - if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat, - sizeof(struct lstcon_trans_stat))) - rc = -EFAULT; -out: - mutex_unlock(&console_session.ses_mutex); -err: - kfree(buf); - - return notifier_from_ioctl_errno(rc); -} diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c deleted file mode 100644 index 0dabade3d091..000000000000 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ /dev/null @@ -1,1396 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/conctl.c - * - * Console framework rpcs - * - * Author: Liang Zhen - */ - -#include -#include "timer.h" -#include "conrpc.h" -#include "console.h" - -void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *, - struct lstcon_node *, struct lstcon_trans_stat *); - -static void -lstcon_rpc_done(struct srpc_client_rpc *rpc) -{ - struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv; - - LASSERT(crpc && rpc == crpc->crp_rpc); - LASSERT(crpc->crp_posted && !crpc->crp_finished); - - spin_lock(&rpc->crpc_lock); - - if (!crpc->crp_trans) { - /* - * Orphan RPC is not in any transaction, - * I'm just a poor body and nobody loves me - */ - spin_unlock(&rpc->crpc_lock); - - /* release it */ - lstcon_rpc_put(crpc); - return; - } - - /* not an orphan RPC */ - crpc->crp_finished = 1; - - if (!crpc->crp_stamp) { - /* not aborted */ - LASSERT(!crpc->crp_status); - - crpc->crp_stamp = jiffies; - crpc->crp_status = rpc->crpc_status; - } - - /* wakeup (transaction)thread if I'm the last RPC in the transaction */ - if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining)) - wake_up(&crpc->crp_trans->tas_waitq); - - spin_unlock(&rpc->crpc_lock); -} - -static int -lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats, - int bulk_npg, int bulk_len, int embedded, - struct lstcon_rpc *crpc) -{ - crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, - feats, bulk_npg, bulk_len, - lstcon_rpc_done, (void *)crpc); - if (!crpc->crp_rpc) - return -ENOMEM; - - crpc->crp_trans = NULL; - crpc->crp_node = nd; - crpc->crp_posted = 0; - crpc->crp_finished = 0; - crpc->crp_unpacked = 0; - crpc->crp_status = 0; - crpc->crp_stamp = 0; - crpc->crp_embedded = embedded; - INIT_LIST_HEAD(&crpc->crp_link); - - atomic_inc(&console_session.ses_rpc_counter); - - return 0; -} - -static int -lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats, - int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp) -{ - struct lstcon_rpc *crpc = NULL; - int rc; - - spin_lock(&console_session.ses_rpc_lock); - - crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist, - struct lstcon_rpc, crp_link); - if (crpc) - list_del_init(&crpc->crp_link); - - spin_unlock(&console_session.ses_rpc_lock); - - if (!crpc) { - crpc = kzalloc(sizeof(*crpc), GFP_NOFS); - if (!crpc) - return -ENOMEM; - } - - rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc); - if (!rc) { - *crpcpp = crpc; - return 0; - } - - kfree(crpc); - - return rc; -} - -void -lstcon_rpc_put(struct lstcon_rpc *crpc) -{ - struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk; - int i; - - LASSERT(list_empty(&crpc->crp_link)); - - for (i = 0; i < bulk->bk_niov; i++) { - if (!bulk->bk_iovs[i].bv_page) - continue; - - __free_page(bulk->bk_iovs[i].bv_page); - } - - srpc_client_rpc_decref(crpc->crp_rpc); - - if (crpc->crp_embedded) { - /* embedded RPC, don't recycle it */ - memset(crpc, 0, sizeof(*crpc)); - crpc->crp_embedded = 1; - - } else { - spin_lock(&console_session.ses_rpc_lock); - - list_add(&crpc->crp_link, - &console_session.ses_rpc_freelist); - - spin_unlock(&console_session.ses_rpc_lock); - } - - /* RPC is not alive now */ - atomic_dec(&console_session.ses_rpc_counter); -} - -static void -lstcon_rpc_post(struct lstcon_rpc *crpc) -{ - struct lstcon_rpc_trans *trans = crpc->crp_trans; - - LASSERT(trans); - - atomic_inc(&trans->tas_remaining); - crpc->crp_posted = 1; - - sfw_post_rpc(crpc->crp_rpc); -} - -static char * -lstcon_rpc_trans_name(int transop) -{ - if (transop == LST_TRANS_SESNEW) - return "SESNEW"; - - if (transop == LST_TRANS_SESEND) - return "SESEND"; - - if (transop == LST_TRANS_SESQRY) - return "SESQRY"; - - if (transop == LST_TRANS_SESPING) - return "SESPING"; - - if (transop == LST_TRANS_TSBCLIADD) - return "TSBCLIADD"; - - if (transop == LST_TRANS_TSBSRVADD) - return "TSBSRVADD"; - - if (transop == LST_TRANS_TSBRUN) - return "TSBRUN"; - - if (transop == LST_TRANS_TSBSTOP) - return "TSBSTOP"; - - if (transop == LST_TRANS_TSBCLIQRY) - return "TSBCLIQRY"; - - if (transop == LST_TRANS_TSBSRVQRY) - return "TSBSRVQRY"; - - if (transop == LST_TRANS_STATQRY) - return "STATQRY"; - - return "Unknown"; -} - -int -lstcon_rpc_trans_prep(struct list_head *translist, int transop, - struct lstcon_rpc_trans **transpp) -{ - struct lstcon_rpc_trans *trans; - - if (translist) { - list_for_each_entry(trans, translist, tas_link) { - /* - * Can't enqueue two private transaction on - * the same object - */ - if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE) - return -EPERM; - } - } - - /* create a trans group */ - trans = kzalloc(sizeof(*trans), GFP_NOFS); - if (!trans) - return -ENOMEM; - - trans->tas_opc = transop; - - if (!translist) - INIT_LIST_HEAD(&trans->tas_olink); - else - list_add_tail(&trans->tas_olink, translist); - - list_add_tail(&trans->tas_link, &console_session.ses_trans_list); - - INIT_LIST_HEAD(&trans->tas_rpcs_list); - atomic_set(&trans->tas_remaining, 0); - init_waitqueue_head(&trans->tas_waitq); - - spin_lock(&console_session.ses_rpc_lock); - trans->tas_features = console_session.ses_features; - spin_unlock(&console_session.ses_rpc_lock); - - *transpp = trans; - return 0; -} - -void -lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc) -{ - list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list); - crpc->crp_trans = trans; -} - -void -lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error) -{ - struct srpc_client_rpc *rpc; - struct lstcon_rpc *crpc; - struct lstcon_node *nd; - - list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { - rpc = crpc->crp_rpc; - - spin_lock(&rpc->crpc_lock); - - if (!crpc->crp_posted || /* not posted */ - crpc->crp_stamp) { /* rpc done or aborted already */ - if (!crpc->crp_stamp) { - crpc->crp_stamp = jiffies; - crpc->crp_status = -EINTR; - } - spin_unlock(&rpc->crpc_lock); - continue; - } - - crpc->crp_stamp = jiffies; - crpc->crp_status = error; - - spin_unlock(&rpc->crpc_lock); - - sfw_abort_rpc(rpc); - - if (error != -ETIMEDOUT) - continue; - - nd = crpc->crp_node; - if (time_after(nd->nd_stamp, crpc->crp_stamp)) - continue; - - nd->nd_stamp = crpc->crp_stamp; - nd->nd_state = LST_NODE_DOWN; - } -} - -static int -lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans) -{ - if (console_session.ses_shutdown && - !list_empty(&trans->tas_olink)) /* Not an end session RPC */ - return 1; - - return !atomic_read(&trans->tas_remaining) ? 1 : 0; -} - -int -lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout) -{ - struct lstcon_rpc *crpc; - int rc; - - if (list_empty(&trans->tas_rpcs_list)) - return 0; - - if (timeout < LST_TRANS_MIN_TIMEOUT) - timeout = LST_TRANS_MIN_TIMEOUT; - - CDEBUG(D_NET, "Transaction %s started\n", - lstcon_rpc_trans_name(trans->tas_opc)); - - /* post all requests */ - list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { - LASSERT(!crpc->crp_posted); - - lstcon_rpc_post(crpc); - } - - mutex_unlock(&console_session.ses_mutex); - - rc = wait_event_interruptible_timeout(trans->tas_waitq, - lstcon_rpc_trans_check(trans), - timeout * HZ); - rc = (rc > 0) ? 0 : ((rc < 0) ? -EINTR : -ETIMEDOUT); - - mutex_lock(&console_session.ses_mutex); - - if (console_session.ses_shutdown) - rc = -ESHUTDOWN; - - if (rc || atomic_read(&trans->tas_remaining)) { - /* treat short timeout as canceled */ - if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2) - rc = -EINTR; - - lstcon_rpc_trans_abort(trans, rc); - } - - CDEBUG(D_NET, "Transaction %s stopped: %d\n", - lstcon_rpc_trans_name(trans->tas_opc), rc); - - lstcon_rpc_trans_stat(trans, lstcon_trans_stat()); - - return rc; -} - -static int -lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp) -{ - struct lstcon_node *nd = crpc->crp_node; - struct srpc_client_rpc *rpc = crpc->crp_rpc; - struct srpc_generic_reply *rep; - - LASSERT(nd && rpc); - LASSERT(crpc->crp_stamp); - - if (crpc->crp_status) { - *msgpp = NULL; - return crpc->crp_status; - } - - *msgpp = &rpc->crpc_replymsg; - if (!crpc->crp_unpacked) { - sfw_unpack_message(*msgpp); - crpc->crp_unpacked = 1; - } - - if (time_after(nd->nd_stamp, crpc->crp_stamp)) - return 0; - - nd->nd_stamp = crpc->crp_stamp; - rep = &(*msgpp)->msg_body.reply; - - if (rep->sid.ses_nid == LNET_NID_ANY) - nd->nd_state = LST_NODE_UNKNOWN; - else if (lstcon_session_match(rep->sid)) - nd->nd_state = LST_NODE_ACTIVE; - else - nd->nd_state = LST_NODE_BUSY; - - return 0; -} - -void -lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, struct lstcon_trans_stat *stat) -{ - struct lstcon_rpc *crpc; - struct srpc_msg *rep; - int error; - - LASSERT(stat); - - memset(stat, 0, sizeof(*stat)); - - list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { - lstcon_rpc_stat_total(stat, 1); - - LASSERT(crpc->crp_stamp); - - error = lstcon_rpc_get_reply(crpc, &rep); - if (error) { - lstcon_rpc_stat_failure(stat, 1); - if (!stat->trs_rpc_errno) - stat->trs_rpc_errno = -error; - - continue; - } - - lstcon_rpc_stat_success(stat, 1); - - lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat); - } - - if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) { - stat->trs_fwk_errno = - lstcon_session_feats_check(trans->tas_features); - } - - CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n", - lstcon_rpc_trans_name(trans->tas_opc), - lstcon_rpc_stat_success(stat, 0), - lstcon_rpc_stat_failure(stat, 0), - lstcon_rpc_stat_total(stat, 0), - stat->trs_rpc_errno, stat->trs_fwk_errno); -} - -int -lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans, - struct list_head __user *head_up, - lstcon_rpc_readent_func_t readent) -{ - struct list_head tmp; - struct list_head __user *next; - struct lstcon_rpc_ent *ent; - struct srpc_generic_reply *rep; - struct lstcon_rpc *crpc; - struct srpc_msg *msg; - struct lstcon_node *nd; - long dur; - struct timeval tv; - int error; - - LASSERT(head_up); - - next = head_up; - - list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { - if (copy_from_user(&tmp, next, - sizeof(struct list_head))) - return -EFAULT; - - next = tmp.next; - if (next == head_up) - return 0; - - ent = list_entry(next, struct lstcon_rpc_ent, rpe_link); - - LASSERT(crpc->crp_stamp); - - error = lstcon_rpc_get_reply(crpc, &msg); - - nd = crpc->crp_node; - - dur = (long)(crpc->crp_stamp - - (unsigned long)console_session.ses_id.ses_stamp); - jiffies_to_timeval(dur, &tv); - - if (copy_to_user(&ent->rpe_peer, &nd->nd_id, - sizeof(struct lnet_process_id)) || - copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) || - copy_to_user(&ent->rpe_state, &nd->nd_state, - sizeof(nd->nd_state)) || - copy_to_user(&ent->rpe_rpc_errno, &error, - sizeof(error))) - return -EFAULT; - - if (error) - continue; - - /* RPC is done */ - rep = (struct srpc_generic_reply *)&msg->msg_body.reply; - - if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(rep->sid)) || - copy_to_user(&ent->rpe_fwk_errno, &rep->status, - sizeof(rep->status))) - return -EFAULT; - - if (!readent) - continue; - - error = readent(trans->tas_opc, msg, ent); - if (error) - return error; - } - - return 0; -} - -void -lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans) -{ - struct srpc_client_rpc *rpc; - struct lstcon_rpc *crpc; - struct lstcon_rpc *tmp; - int count = 0; - - list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { - rpc = crpc->crp_rpc; - - spin_lock(&rpc->crpc_lock); - - /* free it if not posted or finished already */ - if (!crpc->crp_posted || crpc->crp_finished) { - spin_unlock(&rpc->crpc_lock); - - list_del_init(&crpc->crp_link); - lstcon_rpc_put(crpc); - - continue; - } - - /* - * rpcs can be still not callbacked (even LNetMDUnlink is - * called) because huge timeout for inaccessible network, - * don't make user wait for them, just abandon them, they - * will be recycled in callback - */ - LASSERT(crpc->crp_status); - - crpc->crp_node = NULL; - crpc->crp_trans = NULL; - list_del_init(&crpc->crp_link); - count++; - - spin_unlock(&rpc->crpc_lock); - - atomic_dec(&trans->tas_remaining); - } - - LASSERT(!atomic_read(&trans->tas_remaining)); - - list_del(&trans->tas_link); - if (!list_empty(&trans->tas_olink)) - list_del(&trans->tas_olink); - - CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n", - lstcon_rpc_trans_name(trans->tas_opc), count); - - kfree(trans); -} - -int -lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, - unsigned int feats, struct lstcon_rpc **crpc) -{ - struct srpc_mksn_reqst *msrq; - struct srpc_rmsn_reqst *rsrq; - int rc; - - switch (transop) { - case LST_TRANS_SESNEW: - rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION, - feats, 0, 0, crpc); - if (rc) - return rc; - - msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst; - msrq->mksn_sid = console_session.ses_id; - msrq->mksn_force = console_session.ses_force; - strlcpy(msrq->mksn_name, console_session.ses_name, - sizeof(msrq->mksn_name)); - break; - - case LST_TRANS_SESEND: - rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION, - feats, 0, 0, crpc); - if (rc) - return rc; - - rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst; - rsrq->rmsn_sid = console_session.ses_id; - break; - - default: - LBUG(); - } - - return 0; -} - -int -lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats, - struct lstcon_rpc **crpc) -{ - struct srpc_debug_reqst *drq; - int rc; - - rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc); - if (rc) - return rc; - - drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst; - - drq->dbg_sid = console_session.ses_id; - drq->dbg_flags = 0; - - return rc; -} - -int -lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats, - struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc) -{ - struct lstcon_batch *batch; - struct srpc_batch_reqst *brq; - int rc; - - rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc); - if (rc) - return rc; - - brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst; - - brq->bar_sid = console_session.ses_id; - brq->bar_bid = tsb->tsb_id; - brq->bar_testidx = tsb->tsb_index; - brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN : - (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP : - SRPC_BATCH_OPC_QUERY); - - if (transop != LST_TRANS_TSBRUN && - transop != LST_TRANS_TSBSTOP) - return 0; - - LASSERT(!tsb->tsb_index); - - batch = (struct lstcon_batch *)tsb; - brq->bar_arg = batch->bat_arg; - - return 0; -} - -int -lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats, - struct lstcon_rpc **crpc) -{ - struct srpc_stat_reqst *srq; - int rc; - - rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc); - if (rc) - return rc; - - srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst; - - srq->str_sid = console_session.ses_id; - srq->str_type = 0; /* XXX remove it */ - - return 0; -} - -static struct lnet_process_id_packed * -lstcon_next_id(int idx, int nkiov, struct bio_vec *kiov) -{ - struct lnet_process_id_packed *pid; - int i; - - i = idx / SFW_ID_PER_PAGE; - - LASSERT(i < nkiov); - - pid = (struct lnet_process_id_packed *)page_address(kiov[i].bv_page); - - return &pid[idx % SFW_ID_PER_PAGE]; -} - -static int -lstcon_dstnodes_prep(struct lstcon_group *grp, int idx, - int dist, int span, int nkiov, struct bio_vec *kiov) -{ - struct lnet_process_id_packed *pid; - struct lstcon_ndlink *ndl; - struct lstcon_node *nd; - int start; - int end; - int i = 0; - - LASSERT(dist >= 1); - LASSERT(span >= 1); - LASSERT(grp->grp_nnode >= 1); - - if (span > grp->grp_nnode) - return -EINVAL; - - start = ((idx / dist) * span) % grp->grp_nnode; - end = ((idx / dist) * span + span - 1) % grp->grp_nnode; - - list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) { - nd = ndl->ndl_node; - if (i < start) { - i++; - continue; - } - - if (i > (end >= start ? end : grp->grp_nnode)) - break; - - pid = lstcon_next_id((i - start), nkiov, kiov); - pid->nid = nd->nd_id.nid; - pid->pid = nd->nd_id.pid; - i++; - } - - if (start <= end) /* done */ - return 0; - - list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) { - if (i > grp->grp_nnode + end) - break; - - nd = ndl->ndl_node; - pid = lstcon_next_id((i - start), nkiov, kiov); - pid->nid = nd->nd_id.nid; - pid->pid = nd->nd_id.pid; - i++; - } - - return 0; -} - -static int -lstcon_pingrpc_prep(struct lst_test_ping_param *param, struct srpc_test_reqst *req) -{ - struct test_ping_req *prq = &req->tsr_u.ping; - - prq->png_size = param->png_size; - prq->png_flags = param->png_flags; - /* TODO dest */ - return 0; -} - -static int -lstcon_bulkrpc_v0_prep(struct lst_test_bulk_param *param, - struct srpc_test_reqst *req) -{ - struct test_bulk_req *brq = &req->tsr_u.bulk_v0; - - brq->blk_opc = param->blk_opc; - brq->blk_npg = DIV_ROUND_UP(param->blk_size, PAGE_SIZE); - brq->blk_flags = param->blk_flags; - - return 0; -} - -static int -lstcon_bulkrpc_v1_prep(struct lst_test_bulk_param *param, bool is_client, - struct srpc_test_reqst *req) -{ - struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1; - - brq->blk_opc = param->blk_opc; - brq->blk_flags = param->blk_flags; - brq->blk_len = param->blk_size; - brq->blk_offset = is_client ? param->blk_cli_off : param->blk_srv_off; - - return 0; -} - -int -lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats, - struct lstcon_test *test, struct lstcon_rpc **crpc) -{ - struct lstcon_group *sgrp = test->tes_src_grp; - struct lstcon_group *dgrp = test->tes_dst_grp; - struct srpc_test_reqst *trq; - struct srpc_bulk *bulk; - int i; - int npg = 0; - int nob = 0; - int rc = 0; - - if (transop == LST_TRANS_TSBCLIADD) { - npg = sfw_id_pages(test->tes_span); - nob = !(feats & LST_FEAT_BULK_LEN) ? - npg * PAGE_SIZE : - sizeof(struct lnet_process_id_packed) * test->tes_span; - } - - rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc); - if (rc) - return rc; - - trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst; - - if (transop == LST_TRANS_TSBSRVADD) { - int ndist = DIV_ROUND_UP(sgrp->grp_nnode, test->tes_dist); - int nspan = DIV_ROUND_UP(dgrp->grp_nnode, test->tes_span); - int nmax = DIV_ROUND_UP(ndist, nspan); - - trq->tsr_ndest = 0; - trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; - } else { - bulk = &(*crpc)->crp_rpc->crpc_bulk; - - for (i = 0; i < npg; i++) { - int len; - - LASSERT(nob > 0); - - len = !(feats & LST_FEAT_BULK_LEN) ? - PAGE_SIZE : - min_t(int, nob, PAGE_SIZE); - nob -= len; - - bulk->bk_iovs[i].bv_offset = 0; - bulk->bk_iovs[i].bv_len = len; - bulk->bk_iovs[i].bv_page = alloc_page(GFP_KERNEL); - - if (!bulk->bk_iovs[i].bv_page) { - lstcon_rpc_put(*crpc); - return -ENOMEM; - } - } - - bulk->bk_sink = 0; - - LASSERT(transop == LST_TRANS_TSBCLIADD); - - rc = lstcon_dstnodes_prep(test->tes_dst_grp, - test->tes_cliidx++, - test->tes_dist, - test->tes_span, - npg, &bulk->bk_iovs[0]); - if (rc) { - lstcon_rpc_put(*crpc); - return rc; - } - - trq->tsr_ndest = test->tes_span; - trq->tsr_loop = test->tes_loop; - } - - trq->tsr_sid = console_session.ses_id; - trq->tsr_bid = test->tes_hdr.tsb_id; - trq->tsr_concur = test->tes_concur; - trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0; - trq->tsr_stop_onerr = !!test->tes_stop_onerr; - - switch (test->tes_type) { - case LST_TEST_PING: - trq->tsr_service = SRPC_SERVICE_PING; - rc = lstcon_pingrpc_prep((struct lst_test_ping_param *) - &test->tes_param[0], trq); - break; - - case LST_TEST_BULK: - trq->tsr_service = SRPC_SERVICE_BRW; - if (!(feats & LST_FEAT_BULK_LEN)) { - rc = lstcon_bulkrpc_v0_prep((struct lst_test_bulk_param *) - &test->tes_param[0], trq); - } else { - rc = lstcon_bulkrpc_v1_prep((struct lst_test_bulk_param *) - &test->tes_param[0], - trq->tsr_is_client, trq); - } - - break; - default: - LBUG(); - break; - } - - return rc; -} - -static int -lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans, - struct lstcon_node *nd, struct srpc_msg *reply) -{ - struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply; - int status = mksn_rep->mksn_status; - - if (!status && - (reply->msg_ses_feats & ~LST_FEATS_MASK)) { - mksn_rep->mksn_status = EPROTO; - status = EPROTO; - } - - if (status == EPROTO) { - CNETERR("session protocol error from %s: %u\n", - libcfs_nid2str(nd->nd_id.nid), - reply->msg_ses_feats); - } - - if (status) - return status; - - if (!trans->tas_feats_updated) { - spin_lock(&console_session.ses_rpc_lock); - if (!trans->tas_feats_updated) { /* recheck with lock */ - trans->tas_feats_updated = 1; - trans->tas_features = reply->msg_ses_feats; - } - spin_unlock(&console_session.ses_rpc_lock); - } - - if (reply->msg_ses_feats != trans->tas_features) { - CNETERR("Framework features %x from %s is different with features on this transaction: %x\n", - reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid), - trans->tas_features); - mksn_rep->mksn_status = EPROTO; - status = EPROTO; - } - - if (!status) { - /* session timeout on remote node */ - nd->nd_timeout = mksn_rep->mksn_timeout; - } - - return status; -} - -void -lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg, - struct lstcon_node *nd, struct lstcon_trans_stat *stat) -{ - struct srpc_rmsn_reply *rmsn_rep; - struct srpc_debug_reply *dbg_rep; - struct srpc_batch_reply *bat_rep; - struct srpc_test_reply *test_rep; - struct srpc_stat_reply *stat_rep; - int rc = 0; - - switch (trans->tas_opc) { - case LST_TRANS_SESNEW: - rc = lstcon_sesnew_stat_reply(trans, nd, msg); - if (!rc) { - lstcon_sesop_stat_success(stat, 1); - return; - } - - lstcon_sesop_stat_failure(stat, 1); - break; - - case LST_TRANS_SESEND: - rmsn_rep = &msg->msg_body.rmsn_reply; - /* ESRCH is not an error for end session */ - if (!rmsn_rep->rmsn_status || - rmsn_rep->rmsn_status == ESRCH) { - lstcon_sesop_stat_success(stat, 1); - return; - } - - lstcon_sesop_stat_failure(stat, 1); - rc = rmsn_rep->rmsn_status; - break; - - case LST_TRANS_SESQRY: - case LST_TRANS_SESPING: - dbg_rep = &msg->msg_body.dbg_reply; - - if (dbg_rep->dbg_status == ESRCH) { - lstcon_sesqry_stat_unknown(stat, 1); - return; - } - - if (lstcon_session_match(dbg_rep->dbg_sid)) - lstcon_sesqry_stat_active(stat, 1); - else - lstcon_sesqry_stat_busy(stat, 1); - return; - - case LST_TRANS_TSBRUN: - case LST_TRANS_TSBSTOP: - bat_rep = &msg->msg_body.bat_reply; - - if (!bat_rep->bar_status) { - lstcon_tsbop_stat_success(stat, 1); - return; - } - - if (bat_rep->bar_status == EPERM && - trans->tas_opc == LST_TRANS_TSBSTOP) { - lstcon_tsbop_stat_success(stat, 1); - return; - } - - lstcon_tsbop_stat_failure(stat, 1); - rc = bat_rep->bar_status; - break; - - case LST_TRANS_TSBCLIQRY: - case LST_TRANS_TSBSRVQRY: - bat_rep = &msg->msg_body.bat_reply; - - if (bat_rep->bar_active) - lstcon_tsbqry_stat_run(stat, 1); - else - lstcon_tsbqry_stat_idle(stat, 1); - - if (!bat_rep->bar_status) - return; - - lstcon_tsbqry_stat_failure(stat, 1); - rc = bat_rep->bar_status; - break; - - case LST_TRANS_TSBCLIADD: - case LST_TRANS_TSBSRVADD: - test_rep = &msg->msg_body.tes_reply; - - if (!test_rep->tsr_status) { - lstcon_tsbop_stat_success(stat, 1); - return; - } - - lstcon_tsbop_stat_failure(stat, 1); - rc = test_rep->tsr_status; - break; - - case LST_TRANS_STATQRY: - stat_rep = &msg->msg_body.stat_reply; - - if (!stat_rep->str_status) { - lstcon_statqry_stat_success(stat, 1); - return; - } - - lstcon_statqry_stat_failure(stat, 1); - rc = stat_rep->str_status; - break; - - default: - LBUG(); - } - - if (!stat->trs_fwk_errno) - stat->trs_fwk_errno = rc; -} - -int -lstcon_rpc_trans_ndlist(struct list_head *ndlist, - struct list_head *translist, int transop, - void *arg, lstcon_rpc_cond_func_t condition, - struct lstcon_rpc_trans **transpp) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_ndlink *ndl; - struct lstcon_node *nd; - struct lstcon_rpc *rpc; - unsigned int feats; - int rc; - - /* Creating session RPG for list of nodes */ - - rc = lstcon_rpc_trans_prep(translist, transop, &trans); - if (rc) { - CERROR("Can't create transaction %d: %d\n", transop, rc); - return rc; - } - - feats = trans->tas_features; - list_for_each_entry(ndl, ndlist, ndl_link) { - rc = !condition ? 1 : - condition(transop, ndl->ndl_node, arg); - - if (!rc) - continue; - - if (rc < 0) { - CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n", - transop, rc); - break; - } - - nd = ndl->ndl_node; - - switch (transop) { - case LST_TRANS_SESNEW: - case LST_TRANS_SESEND: - rc = lstcon_sesrpc_prep(nd, transop, feats, &rpc); - break; - case LST_TRANS_SESQRY: - case LST_TRANS_SESPING: - rc = lstcon_dbgrpc_prep(nd, feats, &rpc); - break; - case LST_TRANS_TSBCLIADD: - case LST_TRANS_TSBSRVADD: - rc = lstcon_testrpc_prep(nd, transop, feats, - (struct lstcon_test *)arg, - &rpc); - break; - case LST_TRANS_TSBRUN: - case LST_TRANS_TSBSTOP: - case LST_TRANS_TSBCLIQRY: - case LST_TRANS_TSBSRVQRY: - rc = lstcon_batrpc_prep(nd, transop, feats, - (struct lstcon_tsb_hdr *)arg, - &rpc); - break; - case LST_TRANS_STATQRY: - rc = lstcon_statrpc_prep(nd, feats, &rpc); - break; - default: - rc = -EINVAL; - break; - } - - if (rc) { - CERROR("Failed to create RPC for transaction %s: %d\n", - lstcon_rpc_trans_name(transop), rc); - break; - } - - lstcon_rpc_trans_addreq(trans, rpc); - } - - if (!rc) { - *transpp = trans; - return 0; - } - - lstcon_rpc_trans_destroy(trans); - - return rc; -} - -static void -lstcon_rpc_pinger(void *arg) -{ - struct stt_timer *ptimer = (struct stt_timer *)arg; - struct lstcon_rpc_trans *trans; - struct lstcon_rpc *crpc; - struct srpc_msg *rep; - struct srpc_debug_reqst *drq; - struct lstcon_ndlink *ndl; - struct lstcon_node *nd; - int intv; - int count = 0; - int rc; - - /* - * RPC pinger is a special case of transaction, - * it's called by timer at 8 seconds interval. - */ - mutex_lock(&console_session.ses_mutex); - - if (console_session.ses_shutdown || console_session.ses_expired) { - mutex_unlock(&console_session.ses_mutex); - return; - } - - if (!console_session.ses_expired && - ktime_get_real_seconds() - console_session.ses_laststamp > - (time64_t)console_session.ses_timeout) - console_session.ses_expired = 1; - - trans = console_session.ses_ping; - - LASSERT(trans); - - list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) { - nd = ndl->ndl_node; - - if (console_session.ses_expired) { - /* idle console, end session on all nodes */ - if (nd->nd_state != LST_NODE_ACTIVE) - continue; - - rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND, - trans->tas_features, &crpc); - if (rc) { - CERROR("Out of memory\n"); - break; - } - - lstcon_rpc_trans_addreq(trans, crpc); - lstcon_rpc_post(crpc); - - continue; - } - - crpc = &nd->nd_ping; - - if (crpc->crp_rpc) { - LASSERT(crpc->crp_trans == trans); - LASSERT(!list_empty(&crpc->crp_link)); - - spin_lock(&crpc->crp_rpc->crpc_lock); - - LASSERT(crpc->crp_posted); - - if (!crpc->crp_finished) { - /* in flight */ - spin_unlock(&crpc->crp_rpc->crpc_lock); - continue; - } - - spin_unlock(&crpc->crp_rpc->crpc_lock); - - lstcon_rpc_get_reply(crpc, &rep); - - list_del_init(&crpc->crp_link); - - lstcon_rpc_put(crpc); - } - - if (nd->nd_state != LST_NODE_ACTIVE) - continue; - - intv = (jiffies - nd->nd_stamp) / msecs_to_jiffies(MSEC_PER_SEC); - if (intv < nd->nd_timeout / 2) - continue; - - rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG, - trans->tas_features, 0, 0, 1, crpc); - if (rc) { - CERROR("Out of memory\n"); - break; - } - - drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst; - - drq->dbg_sid = console_session.ses_id; - drq->dbg_flags = 0; - - lstcon_rpc_trans_addreq(trans, crpc); - lstcon_rpc_post(crpc); - - count++; - } - - if (console_session.ses_expired) { - mutex_unlock(&console_session.ses_mutex); - return; - } - - CDEBUG(D_NET, "Ping %d nodes in session\n", count); - - ptimer->stt_expires = ktime_get_real_seconds() + LST_PING_INTERVAL; - stt_add_timer(ptimer); - - mutex_unlock(&console_session.ses_mutex); -} - -int -lstcon_rpc_pinger_start(void) -{ - struct stt_timer *ptimer; - int rc; - - LASSERT(list_empty(&console_session.ses_rpc_freelist)); - LASSERT(!atomic_read(&console_session.ses_rpc_counter)); - - rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING, - &console_session.ses_ping); - if (rc) { - CERROR("Failed to create console pinger\n"); - return rc; - } - - ptimer = &console_session.ses_ping_timer; - ptimer->stt_expires = ktime_get_real_seconds() + LST_PING_INTERVAL; - - stt_add_timer(ptimer); - - return 0; -} - -void -lstcon_rpc_pinger_stop(void) -{ - LASSERT(console_session.ses_shutdown); - - stt_del_timer(&console_session.ses_ping_timer); - - lstcon_rpc_trans_abort(console_session.ses_ping, -ESHUTDOWN); - lstcon_rpc_trans_stat(console_session.ses_ping, lstcon_trans_stat()); - lstcon_rpc_trans_destroy(console_session.ses_ping); - - memset(lstcon_trans_stat(), 0, sizeof(struct lstcon_trans_stat)); - - console_session.ses_ping = NULL; -} - -void -lstcon_rpc_cleanup_wait(void) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_rpc *crpc; - struct lstcon_rpc *temp; - struct list_head *pacer; - struct list_head zlist; - - /* Called with hold of global mutex */ - - LASSERT(console_session.ses_shutdown); - - while (!list_empty(&console_session.ses_trans_list)) { - list_for_each(pacer, &console_session.ses_trans_list) { - trans = list_entry(pacer, struct lstcon_rpc_trans, - tas_link); - - CDEBUG(D_NET, "Session closed, wakeup transaction %s\n", - lstcon_rpc_trans_name(trans->tas_opc)); - - wake_up(&trans->tas_waitq); - } - - mutex_unlock(&console_session.ses_mutex); - - CWARN("Session is shutting down, waiting for termination of transactions\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - - mutex_lock(&console_session.ses_mutex); - } - - spin_lock(&console_session.ses_rpc_lock); - - lst_wait_until(!atomic_read(&console_session.ses_rpc_counter), - console_session.ses_rpc_lock, - "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n", - atomic_read(&console_session.ses_rpc_counter)); - - list_add(&zlist, &console_session.ses_rpc_freelist); - list_del_init(&console_session.ses_rpc_freelist); - - spin_unlock(&console_session.ses_rpc_lock); - - list_for_each_entry_safe(crpc, temp, &zlist, crp_link) { - list_del(&crpc->crp_link); - kfree(crpc); - } -} - -int -lstcon_rpc_module_init(void) -{ - INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list); - console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger; - console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer; - - console_session.ses_ping = NULL; - - spin_lock_init(&console_session.ses_rpc_lock); - atomic_set(&console_session.ses_rpc_counter, 0); - INIT_LIST_HEAD(&console_session.ses_rpc_freelist); - - return 0; -} - -void -lstcon_rpc_module_fini(void) -{ - LASSERT(list_empty(&console_session.ses_rpc_freelist)); - LASSERT(!atomic_read(&console_session.ses_rpc_counter)); -} diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h deleted file mode 100644 index ce2f92d04838..000000000000 --- a/drivers/staging/lustre/lnet/selftest/conrpc.h +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * /lnet/selftest/conrpc.h - * - * Console rpc - * - * Author: Liang Zhen - */ - -#ifndef __LST_CONRPC_H__ -#define __LST_CONRPC_H__ - -#include -#include -#include "rpc.h" -#include "selftest.h" - -/* Console rpc and rpc transaction */ -#define LST_TRANS_TIMEOUT 30 -#define LST_TRANS_MIN_TIMEOUT 3 - -#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT) - -#define LST_PING_INTERVAL 8 - -struct lstcon_rpc_trans; -struct lstcon_tsb_hdr; -struct lstcon_test; -struct lstcon_node; - -struct lstcon_rpc { - struct list_head crp_link; /* chain on rpc transaction */ - struct srpc_client_rpc *crp_rpc; /* client rpc */ - struct lstcon_node *crp_node; /* destination node */ - struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ - - unsigned int crp_posted:1; /* rpc is posted */ - unsigned int crp_finished:1; /* rpc is finished */ - unsigned int crp_unpacked:1; /* reply is unpacked */ - /** RPC is embedded in other structure and can't free it */ - unsigned int crp_embedded:1; - int crp_status; /* console rpc errors */ - unsigned long crp_stamp; /* replied time stamp */ -}; - -struct lstcon_rpc_trans { - struct list_head tas_olink; /* link chain on owner list */ - struct list_head tas_link; /* link chain on global list */ - int tas_opc; /* operation code of transaction */ - unsigned int tas_feats_updated; /* features mask is uptodate */ - unsigned int tas_features; /* test features mask */ - wait_queue_head_t tas_waitq; /* wait queue head */ - atomic_t tas_remaining; /* # of un-scheduled rpcs */ - struct list_head tas_rpcs_list; /* queued requests */ -}; - -#define LST_TRANS_PRIVATE 0x1000 - -#define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01) -#define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02) -#define LST_TRANS_SESQRY 0x03 -#define LST_TRANS_SESPING 0x04 - -#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11) -#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12) -#define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13) -#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14) -#define LST_TRANS_TSBCLIQRY 0x15 -#define LST_TRANS_TSBSRVQRY 0x16 - -#define LST_TRANS_STATQRY 0x21 - -typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *); -typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *, - struct lstcon_rpc_ent __user *); - -int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, - unsigned int version, struct lstcon_rpc **crpc); -int lstcon_dbgrpc_prep(struct lstcon_node *nd, - unsigned int version, struct lstcon_rpc **crpc); -int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, - unsigned int version, struct lstcon_tsb_hdr *tsb, - struct lstcon_rpc **crpc); -int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, - unsigned int version, struct lstcon_test *test, - struct lstcon_rpc **crpc); -int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version, - struct lstcon_rpc **crpc); -void lstcon_rpc_put(struct lstcon_rpc *crpc); -int lstcon_rpc_trans_prep(struct list_head *translist, - int transop, struct lstcon_rpc_trans **transpp); -int lstcon_rpc_trans_ndlist(struct list_head *ndlist, - struct list_head *translist, int transop, - void *arg, lstcon_rpc_cond_func_t condition, - struct lstcon_rpc_trans **transpp); -void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, - struct lstcon_trans_stat *stat); -int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans, - struct list_head __user *head_up, - lstcon_rpc_readent_func_t readent); -void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error); -void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans); -void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, - struct lstcon_rpc *req); -int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout); -int lstcon_rpc_pinger_start(void); -void lstcon_rpc_pinger_stop(void); -void lstcon_rpc_cleanup_wait(void); -int lstcon_rpc_module_init(void); -void lstcon_rpc_module_fini(void); - -#endif diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c deleted file mode 100644 index 3c1c1b5997e0..000000000000 --- a/drivers/staging/lustre/lnet/selftest/console.c +++ /dev/null @@ -1,2104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/conctl.c - * - * Infrastructure of LST console - * - * Author: Liang Zhen - */ - -#include -#include "console.h" -#include "conrpc.h" - -#define LST_NODE_STATE_COUNTER(nd, p) \ -do { \ - if ((nd)->nd_state == LST_NODE_ACTIVE) \ - (p)->nle_nactive++; \ - else if ((nd)->nd_state == LST_NODE_BUSY) \ - (p)->nle_nbusy++; \ - else if ((nd)->nd_state == LST_NODE_DOWN) \ - (p)->nle_ndown++; \ - else \ - (p)->nle_nunknown++; \ - (p)->nle_nnode++; \ -} while (0) - -struct lstcon_session console_session; - -static void -lstcon_node_get(struct lstcon_node *nd) -{ - LASSERT(nd->nd_ref >= 1); - - nd->nd_ref++; -} - -static int -lstcon_node_find(struct lnet_process_id id, struct lstcon_node **ndpp, - int create) -{ - struct lstcon_ndlink *ndl; - unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; - - LASSERT(id.nid != LNET_NID_ANY); - - list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], - ndl_hlink) { - if (ndl->ndl_node->nd_id.nid != id.nid || - ndl->ndl_node->nd_id.pid != id.pid) - continue; - - lstcon_node_get(ndl->ndl_node); - *ndpp = ndl->ndl_node; - return 0; - } - - if (!create) - return -ENOENT; - - *ndpp = kzalloc(sizeof(**ndpp) + sizeof(*ndl), GFP_KERNEL); - if (!*ndpp) - return -ENOMEM; - - ndl = (struct lstcon_ndlink *)(*ndpp + 1); - - ndl->ndl_node = *ndpp; - - ndl->ndl_node->nd_ref = 1; - ndl->ndl_node->nd_id = id; - ndl->ndl_node->nd_stamp = jiffies; - ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; - ndl->ndl_node->nd_timeout = 0; - memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc)); - - /* - * queued in global hash & list, no refcount is taken by - * global hash & list, if caller release his refcount, - * node will be released - */ - list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]); - list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list); - - return 0; -} - -static void -lstcon_node_put(struct lstcon_node *nd) -{ - struct lstcon_ndlink *ndl; - - LASSERT(nd->nd_ref > 0); - - if (--nd->nd_ref > 0) - return; - - ndl = (struct lstcon_ndlink *)(nd + 1); - - LASSERT(!list_empty(&ndl->ndl_link)); - LASSERT(!list_empty(&ndl->ndl_hlink)); - - /* remove from session */ - list_del(&ndl->ndl_link); - list_del(&ndl->ndl_hlink); - - kfree(nd); -} - -static int -lstcon_ndlink_find(struct list_head *hash, struct lnet_process_id id, - struct lstcon_ndlink **ndlpp, int create) -{ - unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; - struct lstcon_ndlink *ndl; - struct lstcon_node *nd; - int rc; - - if (id.nid == LNET_NID_ANY) - return -EINVAL; - - /* search in hash */ - list_for_each_entry(ndl, &hash[idx], ndl_hlink) { - if (ndl->ndl_node->nd_id.nid != id.nid || - ndl->ndl_node->nd_id.pid != id.pid) - continue; - - *ndlpp = ndl; - return 0; - } - - if (!create) - return -ENOENT; - - /* find or create in session hash */ - rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0); - if (rc) - return rc; - - ndl = kzalloc(sizeof(struct lstcon_ndlink), GFP_NOFS); - if (!ndl) { - lstcon_node_put(nd); - return -ENOMEM; - } - - *ndlpp = ndl; - - ndl->ndl_node = nd; - INIT_LIST_HEAD(&ndl->ndl_link); - list_add_tail(&ndl->ndl_hlink, &hash[idx]); - - return 0; -} - -static void -lstcon_ndlink_release(struct lstcon_ndlink *ndl) -{ - LASSERT(list_empty(&ndl->ndl_link)); - LASSERT(!list_empty(&ndl->ndl_hlink)); - - list_del(&ndl->ndl_hlink); /* delete from hash */ - lstcon_node_put(ndl->ndl_node); - - kfree(ndl); -} - -static int -lstcon_group_alloc(char *name, struct lstcon_group **grpp) -{ - struct lstcon_group *grp; - int i; - - grp = kmalloc(offsetof(struct lstcon_group, - grp_ndl_hash[LST_NODE_HASHSIZE]), - GFP_KERNEL); - if (!grp) - return -ENOMEM; - - grp->grp_ref = 1; - if (name) { - if (strlen(name) > sizeof(grp->grp_name) - 1) { - kfree(grp); - return -E2BIG; - } - strncpy(grp->grp_name, name, sizeof(grp->grp_name)); - } - - INIT_LIST_HEAD(&grp->grp_link); - INIT_LIST_HEAD(&grp->grp_ndl_list); - INIT_LIST_HEAD(&grp->grp_trans_list); - - for (i = 0; i < LST_NODE_HASHSIZE; i++) - INIT_LIST_HEAD(&grp->grp_ndl_hash[i]); - - *grpp = grp; - - return 0; -} - -static void -lstcon_group_addref(struct lstcon_group *grp) -{ - grp->grp_ref++; -} - -static void lstcon_group_ndlink_release(struct lstcon_group *, - struct lstcon_ndlink *); - -static void -lstcon_group_drain(struct lstcon_group *grp, int keep) -{ - struct lstcon_ndlink *ndl; - struct lstcon_ndlink *tmp; - - list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) { - if (!(ndl->ndl_node->nd_state & keep)) - lstcon_group_ndlink_release(grp, ndl); - } -} - -static void -lstcon_group_decref(struct lstcon_group *grp) -{ - int i; - - if (--grp->grp_ref > 0) - return; - - if (!list_empty(&grp->grp_link)) - list_del(&grp->grp_link); - - lstcon_group_drain(grp, 0); - - for (i = 0; i < LST_NODE_HASHSIZE; i++) - LASSERT(list_empty(&grp->grp_ndl_hash[i])); - - kfree(grp); -} - -static int -lstcon_group_find(const char *name, struct lstcon_group **grpp) -{ - struct lstcon_group *grp; - - list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { - if (strncmp(grp->grp_name, name, LST_NAME_SIZE)) - continue; - - lstcon_group_addref(grp); /* +1 ref for caller */ - *grpp = grp; - return 0; - } - - return -ENOENT; -} - -static int -lstcon_group_ndlink_find(struct lstcon_group *grp, struct lnet_process_id id, - struct lstcon_ndlink **ndlpp, int create) -{ - int rc; - - rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create); - if (rc) - return rc; - - if (!list_empty(&(*ndlpp)->ndl_link)) - return 0; - - list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list); - grp->grp_nnode++; - - return 0; -} - -static void -lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl) -{ - list_del_init(&ndl->ndl_link); - lstcon_ndlink_release(ndl); - grp->grp_nnode--; -} - -static void -lstcon_group_ndlink_move(struct lstcon_group *old, - struct lstcon_group *new, struct lstcon_ndlink *ndl) -{ - unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) % - LST_NODE_HASHSIZE; - - list_del(&ndl->ndl_hlink); - list_del(&ndl->ndl_link); - old->grp_nnode--; - - list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]); - list_add_tail(&ndl->ndl_link, &new->grp_ndl_list); - new->grp_nnode++; -} - -static void -lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new) -{ - struct lstcon_ndlink *ndl; - - while (!list_empty(&old->grp_ndl_list)) { - ndl = list_entry(old->grp_ndl_list.next, - struct lstcon_ndlink, ndl_link); - lstcon_group_ndlink_move(old, new, ndl); - } -} - -static int -lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg) -{ - struct lstcon_group *grp = (struct lstcon_group *)arg; - - switch (transop) { - case LST_TRANS_SESNEW: - if (nd->nd_state == LST_NODE_ACTIVE) - return 0; - break; - - case LST_TRANS_SESEND: - if (nd->nd_state != LST_NODE_ACTIVE) - return 0; - - if (grp && nd->nd_ref > 1) - return 0; - break; - - case LST_TRANS_SESQRY: - break; - - default: - LBUG(); - } - - return 1; -} - -static int -lstcon_sesrpc_readent(int transop, struct srpc_msg *msg, - struct lstcon_rpc_ent __user *ent_up) -{ - struct srpc_debug_reply *rep; - - switch (transop) { - case LST_TRANS_SESNEW: - case LST_TRANS_SESEND: - return 0; - - case LST_TRANS_SESQRY: - rep = &msg->msg_body.dbg_reply; - - if (copy_to_user(&ent_up->rpe_priv[0], - &rep->dbg_timeout, sizeof(int)) || - copy_to_user(&ent_up->rpe_payload[0], - &rep->dbg_name, LST_NAME_SIZE)) - return -EFAULT; - - return 0; - - default: - LBUG(); - } - - return 0; -} - -static int -lstcon_group_nodes_add(struct lstcon_group *grp, - int count, struct lnet_process_id __user *ids_up, - unsigned int *featp, - struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_ndlink *ndl; - struct lstcon_group *tmp; - struct lnet_process_id id; - int i; - int rc; - - rc = lstcon_group_alloc(NULL, &tmp); - if (rc) { - CERROR("Out of memory\n"); - return -ENOMEM; - } - - for (i = 0 ; i < count; i++) { - if (copy_from_user(&id, &ids_up[i], sizeof(id))) { - rc = -EFAULT; - break; - } - - /* skip if it's in this group already */ - rc = lstcon_group_ndlink_find(grp, id, &ndl, 0); - if (!rc) - continue; - - /* add to tmp group */ - rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1); - if (rc) { - CERROR("Can't create ndlink, out of memory\n"); - break; - } - } - - if (rc) { - lstcon_group_decref(tmp); - return rc; - } - - rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list, - &tmp->grp_trans_list, LST_TRANS_SESNEW, - tmp, lstcon_sesrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - lstcon_group_decref(tmp); - return rc; - } - - /* post all RPCs */ - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - rc = lstcon_rpc_trans_interpreter(trans, result_up, - lstcon_sesrpc_readent); - *featp = trans->tas_features; - - /* destroy all RPGs */ - lstcon_rpc_trans_destroy(trans); - - lstcon_group_move(tmp, grp); - lstcon_group_decref(tmp); - - return rc; -} - -static int -lstcon_group_nodes_remove(struct lstcon_group *grp, - int count, struct lnet_process_id __user *ids_up, - struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_ndlink *ndl; - struct lstcon_group *tmp; - struct lnet_process_id id; - int rc; - int i; - - /* End session and remove node from the group */ - - rc = lstcon_group_alloc(NULL, &tmp); - if (rc) { - CERROR("Out of memory\n"); - return -ENOMEM; - } - - for (i = 0; i < count; i++) { - if (copy_from_user(&id, &ids_up[i], sizeof(id))) { - rc = -EFAULT; - goto error; - } - - /* move node to tmp group */ - if (!lstcon_group_ndlink_find(grp, id, &ndl, 0)) - lstcon_group_ndlink_move(grp, tmp, ndl); - } - - rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list, - &tmp->grp_trans_list, LST_TRANS_SESEND, - tmp, lstcon_sesrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - goto error; - } - - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL); - - lstcon_rpc_trans_destroy(trans); - /* release nodes anyway, because we can't rollback status */ - lstcon_group_decref(tmp); - - return rc; -error: - lstcon_group_move(tmp, grp); - lstcon_group_decref(tmp); - - return rc; -} - -int -lstcon_group_add(char *name) -{ - struct lstcon_group *grp; - int rc; - - rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST; - if (rc) { - /* find a group with same name */ - lstcon_group_decref(grp); - return rc; - } - - rc = lstcon_group_alloc(name, &grp); - if (rc) { - CERROR("Can't allocate descriptor for group %s\n", name); - return -ENOMEM; - } - - list_add_tail(&grp->grp_link, &console_session.ses_grp_list); - - return rc; -} - -int -lstcon_nodes_add(char *name, int count, struct lnet_process_id __user *ids_up, - unsigned int *featp, struct list_head __user *result_up) -{ - struct lstcon_group *grp; - int rc; - - LASSERT(count > 0); - LASSERT(ids_up); - - rc = lstcon_group_find(name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group %s\n", name); - return rc; - } - - if (grp->grp_ref > 2) { - /* referred by other threads or test */ - CDEBUG(D_NET, "Group %s is busy\n", name); - lstcon_group_decref(grp); - - return -EBUSY; - } - - rc = lstcon_group_nodes_add(grp, count, ids_up, featp, result_up); - - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_group_del(char *name) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_group *grp; - int rc; - - rc = lstcon_group_find(name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group: %s\n", name); - return rc; - } - - if (grp->grp_ref > 2) { - /* referred by others threads or test */ - CDEBUG(D_NET, "Group %s is busy\n", name); - lstcon_group_decref(grp); - return -EBUSY; - } - - rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, - &grp->grp_trans_list, LST_TRANS_SESEND, - grp, lstcon_sesrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - lstcon_group_decref(grp); - return rc; - } - - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - lstcon_rpc_trans_destroy(trans); - - lstcon_group_decref(grp); - /* - * -ref for session, it's destroyed, - * status can't be rolled back, destroy group anyway - */ - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_group_clean(char *name, int args) -{ - struct lstcon_group *grp = NULL; - int rc; - - rc = lstcon_group_find(name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group %s\n", name); - return rc; - } - - if (grp->grp_ref > 2) { - /* referred by test */ - CDEBUG(D_NET, "Group %s is busy\n", name); - lstcon_group_decref(grp); - return -EBUSY; - } - - args = (LST_NODE_ACTIVE | LST_NODE_BUSY | - LST_NODE_DOWN | LST_NODE_UNKNOWN) & ~args; - - lstcon_group_drain(grp, args); - - lstcon_group_decref(grp); - /* release empty group */ - if (list_empty(&grp->grp_ndl_list)) - lstcon_group_decref(grp); - - return 0; -} - -int -lstcon_nodes_remove(char *name, int count, - struct lnet_process_id __user *ids_up, - struct list_head __user *result_up) -{ - struct lstcon_group *grp = NULL; - int rc; - - rc = lstcon_group_find(name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group: %s\n", name); - return rc; - } - - if (grp->grp_ref > 2) { - /* referred by test */ - CDEBUG(D_NET, "Group %s is busy\n", name); - lstcon_group_decref(grp); - return -EBUSY; - } - - rc = lstcon_group_nodes_remove(grp, count, ids_up, result_up); - - lstcon_group_decref(grp); - /* release empty group */ - if (list_empty(&grp->grp_ndl_list)) - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_group_refresh(char *name, struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_group *grp; - int rc; - - rc = lstcon_group_find(name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group: %s\n", name); - return rc; - } - - if (grp->grp_ref > 2) { - /* referred by test */ - CDEBUG(D_NET, "Group %s is busy\n", name); - lstcon_group_decref(grp); - return -EBUSY; - } - - /* re-invite all inactive nodes int the group */ - rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, - &grp->grp_trans_list, LST_TRANS_SESNEW, - grp, lstcon_sesrpc_condition, &trans); - if (rc) { - /* local error, return */ - CDEBUG(D_NET, "Can't create transaction: %d\n", rc); - lstcon_group_decref(grp); - return rc; - } - - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL); - - lstcon_rpc_trans_destroy(trans); - /* -ref for me */ - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_group_list(int index, int len, char __user *name_up) -{ - struct lstcon_group *grp; - - LASSERT(index >= 0); - LASSERT(name_up); - - list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { - if (!index--) { - return copy_to_user(name_up, grp->grp_name, len) ? - -EFAULT : 0; - } - } - - return -ENOENT; -} - -static int -lstcon_nodes_getent(struct list_head *head, int *index_p, - int *count_p, struct lstcon_node_ent __user *dents_up) -{ - struct lstcon_ndlink *ndl; - struct lstcon_node *nd; - int count = 0; - int index = 0; - - LASSERT(index_p && count_p); - LASSERT(dents_up); - LASSERT(*index_p >= 0); - LASSERT(*count_p > 0); - - list_for_each_entry(ndl, head, ndl_link) { - if (index++ < *index_p) - continue; - - if (count >= *count_p) - break; - - nd = ndl->ndl_node; - if (copy_to_user(&dents_up[count].nde_id, - &nd->nd_id, sizeof(nd->nd_id)) || - copy_to_user(&dents_up[count].nde_state, - &nd->nd_state, sizeof(nd->nd_state))) - return -EFAULT; - - count++; - } - - if (index <= *index_p) - return -ENOENT; - - *count_p = count; - *index_p = index; - - return 0; -} - -int -lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gents_p, - int *index_p, int *count_p, - struct lstcon_node_ent __user *dents_up) -{ - struct lstcon_ndlist_ent *gentp; - struct lstcon_group *grp; - struct lstcon_ndlink *ndl; - int rc; - - rc = lstcon_group_find(name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group %s\n", name); - return rc; - } - - if (dents_up) { - /* verbose query */ - rc = lstcon_nodes_getent(&grp->grp_ndl_list, - index_p, count_p, dents_up); - lstcon_group_decref(grp); - - return rc; - } - - /* non-verbose query */ - gentp = kzalloc(sizeof(struct lstcon_ndlist_ent), GFP_NOFS); - if (!gentp) { - CERROR("Can't allocate ndlist_ent\n"); - lstcon_group_decref(grp); - - return -ENOMEM; - } - - list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) - LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp); - - rc = copy_to_user(gents_p, gentp, - sizeof(struct lstcon_ndlist_ent)) ? -EFAULT : 0; - - kfree(gentp); - - lstcon_group_decref(grp); - - return rc; -} - -static int -lstcon_batch_find(const char *name, struct lstcon_batch **batpp) -{ - struct lstcon_batch *bat; - - list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { - if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) { - *batpp = bat; - return 0; - } - } - - return -ENOENT; -} - -int -lstcon_batch_add(char *name) -{ - struct lstcon_batch *bat; - int i; - int rc; - - rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0; - if (rc) { - CDEBUG(D_NET, "Batch %s already exists\n", name); - return rc; - } - - bat = kzalloc(sizeof(struct lstcon_batch), GFP_NOFS); - if (!bat) { - CERROR("Can't allocate descriptor for batch %s\n", name); - return -ENOMEM; - } - - bat->bat_cli_hash = kmalloc(sizeof(struct list_head) * LST_NODE_HASHSIZE, - GFP_KERNEL); - if (!bat->bat_cli_hash) { - CERROR("Can't allocate hash for batch %s\n", name); - kfree(bat); - - return -ENOMEM; - } - - bat->bat_srv_hash = kmalloc(sizeof(struct list_head) * LST_NODE_HASHSIZE, - GFP_KERNEL); - if (!bat->bat_srv_hash) { - CERROR("Can't allocate hash for batch %s\n", name); - kfree(bat->bat_cli_hash); - kfree(bat); - - return -ENOMEM; - } - - if (strlen(name) > sizeof(bat->bat_name) - 1) { - kfree(bat->bat_srv_hash); - kfree(bat->bat_cli_hash); - kfree(bat); - return -E2BIG; - } - strncpy(bat->bat_name, name, sizeof(bat->bat_name)); - bat->bat_hdr.tsb_index = 0; - bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie; - - bat->bat_ntest = 0; - bat->bat_state = LST_BATCH_IDLE; - - INIT_LIST_HEAD(&bat->bat_cli_list); - INIT_LIST_HEAD(&bat->bat_srv_list); - INIT_LIST_HEAD(&bat->bat_test_list); - INIT_LIST_HEAD(&bat->bat_trans_list); - - for (i = 0; i < LST_NODE_HASHSIZE; i++) { - INIT_LIST_HEAD(&bat->bat_cli_hash[i]); - INIT_LIST_HEAD(&bat->bat_srv_hash[i]); - } - - list_add_tail(&bat->bat_link, &console_session.ses_bat_list); - - return rc; -} - -int -lstcon_batch_list(int index, int len, char __user *name_up) -{ - struct lstcon_batch *bat; - - LASSERT(name_up); - LASSERT(index >= 0); - - list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { - if (!index--) { - return copy_to_user(name_up, bat->bat_name, len) ? - -EFAULT : 0; - } - } - - return -ENOENT; -} - -int -lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up, - int server, int testidx, int *index_p, int *ndent_p, - struct lstcon_node_ent __user *dents_up) -{ - struct lstcon_test_batch_ent *entp; - struct list_head *clilst; - struct list_head *srvlst; - struct lstcon_test *test = NULL; - struct lstcon_batch *bat; - struct lstcon_ndlink *ndl; - int rc; - - rc = lstcon_batch_find(name, &bat); - if (rc) { - CDEBUG(D_NET, "Can't find batch %s\n", name); - return -ENOENT; - } - - if (testidx > 0) { - /* query test, test index start from 1 */ - list_for_each_entry(test, &bat->bat_test_list, tes_link) { - if (testidx-- == 1) - break; - } - - if (testidx > 0) { - CDEBUG(D_NET, "Can't find specified test in batch\n"); - return -ENOENT; - } - } - - clilst = !test ? &bat->bat_cli_list : - &test->tes_src_grp->grp_ndl_list; - srvlst = !test ? &bat->bat_srv_list : - &test->tes_dst_grp->grp_ndl_list; - - if (dents_up) { - rc = lstcon_nodes_getent((server ? srvlst : clilst), - index_p, ndent_p, dents_up); - return rc; - } - - /* non-verbose query */ - entp = kzalloc(sizeof(struct lstcon_test_batch_ent), GFP_NOFS); - if (!entp) - return -ENOMEM; - - if (!test) { - entp->u.tbe_batch.bae_ntest = bat->bat_ntest; - entp->u.tbe_batch.bae_state = bat->bat_state; - } else { - entp->u.tbe_test.tse_type = test->tes_type; - entp->u.tbe_test.tse_loop = test->tes_loop; - entp->u.tbe_test.tse_concur = test->tes_concur; - } - - list_for_each_entry(ndl, clilst, ndl_link) - LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle); - - list_for_each_entry(ndl, srvlst, ndl_link) - LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle); - - rc = copy_to_user(ent_up, entp, - sizeof(struct lstcon_test_batch_ent)) ? -EFAULT : 0; - - kfree(entp); - - return rc; -} - -static int -lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg) -{ - switch (transop) { - case LST_TRANS_TSBRUN: - if (nd->nd_state != LST_NODE_ACTIVE) - return -ENETDOWN; - break; - - case LST_TRANS_TSBSTOP: - if (nd->nd_state != LST_NODE_ACTIVE) - return 0; - break; - - case LST_TRANS_TSBCLIQRY: - case LST_TRANS_TSBSRVQRY: - break; - } - - return 1; -} - -static int -lstcon_batch_op(struct lstcon_batch *bat, int transop, - struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - int rc; - - rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, - &bat->bat_trans_list, transop, - bat, lstcon_batrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - return rc; - } - - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL); - - lstcon_rpc_trans_destroy(trans); - - return rc; -} - -int -lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) -{ - struct lstcon_batch *bat; - int rc; - - if (lstcon_batch_find(name, &bat)) { - CDEBUG(D_NET, "Can't find batch %s\n", name); - return -ENOENT; - } - - bat->bat_arg = timeout; - - rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up); - - /* mark batch as running if it's started in any node */ - if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0)) - bat->bat_state = LST_BATCH_RUNNING; - - return rc; -} - -int -lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) -{ - struct lstcon_batch *bat; - int rc; - - if (lstcon_batch_find(name, &bat)) { - CDEBUG(D_NET, "Can't find batch %s\n", name); - return -ENOENT; - } - - bat->bat_arg = force; - - rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up); - - /* mark batch as stopped if all RPCs finished */ - if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0)) - bat->bat_state = LST_BATCH_IDLE; - - return rc; -} - -static void -lstcon_batch_destroy(struct lstcon_batch *bat) -{ - struct lstcon_ndlink *ndl; - struct lstcon_test *test; - int i; - - list_del(&bat->bat_link); - - while (!list_empty(&bat->bat_test_list)) { - test = list_entry(bat->bat_test_list.next, - struct lstcon_test, tes_link); - LASSERT(list_empty(&test->tes_trans_list)); - - list_del(&test->tes_link); - - lstcon_group_decref(test->tes_src_grp); - lstcon_group_decref(test->tes_dst_grp); - - kfree(test); - } - - LASSERT(list_empty(&bat->bat_trans_list)); - - while (!list_empty(&bat->bat_cli_list)) { - ndl = list_entry(bat->bat_cli_list.next, - struct lstcon_ndlink, ndl_link); - list_del_init(&ndl->ndl_link); - - lstcon_ndlink_release(ndl); - } - - while (!list_empty(&bat->bat_srv_list)) { - ndl = list_entry(bat->bat_srv_list.next, - struct lstcon_ndlink, ndl_link); - list_del_init(&ndl->ndl_link); - - lstcon_ndlink_release(ndl); - } - - for (i = 0; i < LST_NODE_HASHSIZE; i++) { - LASSERT(list_empty(&bat->bat_cli_hash[i])); - LASSERT(list_empty(&bat->bat_srv_hash[i])); - } - - kfree(bat->bat_cli_hash); - kfree(bat->bat_srv_hash); - kfree(bat); -} - -static int -lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg) -{ - struct lstcon_test *test; - struct lstcon_batch *batch; - struct lstcon_ndlink *ndl; - struct list_head *hash; - struct list_head *head; - - test = (struct lstcon_test *)arg; - LASSERT(test); - - batch = test->tes_batch; - LASSERT(batch); - - if (test->tes_oneside && - transop == LST_TRANS_TSBSRVADD) - return 0; - - if (nd->nd_state != LST_NODE_ACTIVE) - return -ENETDOWN; - - if (transop == LST_TRANS_TSBCLIADD) { - hash = batch->bat_cli_hash; - head = &batch->bat_cli_list; - - } else { - LASSERT(transop == LST_TRANS_TSBSRVADD); - - hash = batch->bat_srv_hash; - head = &batch->bat_srv_list; - } - - LASSERT(nd->nd_id.nid != LNET_NID_ANY); - - if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1)) - return -ENOMEM; - - if (list_empty(&ndl->ndl_link)) - list_add_tail(&ndl->ndl_link, head); - - return 1; -} - -static int -lstcon_test_nodes_add(struct lstcon_test *test, - struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_group *grp; - int transop; - int rc; - - LASSERT(test->tes_src_grp); - LASSERT(test->tes_dst_grp); - - transop = LST_TRANS_TSBSRVADD; - grp = test->tes_dst_grp; -again: - rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, - &test->tes_trans_list, transop, - test, lstcon_testrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - return rc; - } - - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - if (lstcon_trans_stat()->trs_rpc_errno || - lstcon_trans_stat()->trs_fwk_errno) { - lstcon_rpc_trans_interpreter(trans, result_up, NULL); - - lstcon_rpc_trans_destroy(trans); - /* return if any error */ - CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n", - transop == LST_TRANS_TSBCLIADD ? "client" : "server", - lstcon_trans_stat()->trs_rpc_errno, - lstcon_trans_stat()->trs_fwk_errno); - - return rc; - } - - lstcon_rpc_trans_destroy(trans); - - if (transop == LST_TRANS_TSBCLIADD) - return rc; - - transop = LST_TRANS_TSBCLIADD; - grp = test->tes_src_grp; - test->tes_cliidx = 0; - - /* requests to test clients */ - goto again; -} - -static int -lstcon_verify_batch(const char *name, struct lstcon_batch **batch) -{ - int rc; - - rc = lstcon_batch_find(name, batch); - if (rc) { - CDEBUG(D_NET, "Can't find batch %s\n", name); - return rc; - } - - if ((*batch)->bat_state != LST_BATCH_IDLE) { - CDEBUG(D_NET, "Can't change running batch %s\n", name); - return -EINVAL; - } - - return 0; -} - -static int -lstcon_verify_group(const char *name, struct lstcon_group **grp) -{ - int rc; - struct lstcon_ndlink *ndl; - - rc = lstcon_group_find(name, grp); - if (rc) { - CDEBUG(D_NET, "can't find group %s\n", name); - return rc; - } - - list_for_each_entry(ndl, &(*grp)->grp_ndl_list, ndl_link) { - if (ndl->ndl_node->nd_state == LST_NODE_ACTIVE) - return 0; - } - - CDEBUG(D_NET, "Group %s has no ACTIVE nodes\n", name); - - return -EINVAL; -} - -int -lstcon_test_add(char *batch_name, int type, int loop, - int concur, int dist, int span, - char *src_name, char *dst_name, - void *param, int paramlen, int *retp, - struct list_head __user *result_up) -{ - struct lstcon_test *test = NULL; - int rc; - struct lstcon_group *src_grp = NULL; - struct lstcon_group *dst_grp = NULL; - struct lstcon_batch *batch = NULL; - - /* - * verify that a batch of the given name exists, and the groups - * that will be part of the batch exist and have at least one - * active node - */ - rc = lstcon_verify_batch(batch_name, &batch); - if (rc) - goto out; - - rc = lstcon_verify_group(src_name, &src_grp); - if (rc) - goto out; - - rc = lstcon_verify_group(dst_name, &dst_grp); - if (rc) - goto out; - - if (dst_grp->grp_userland) - *retp = 1; - - test = kzalloc(offsetof(struct lstcon_test, tes_param[paramlen]), - GFP_KERNEL); - if (!test) { - CERROR("Can't allocate test descriptor\n"); - rc = -ENOMEM; - - goto out; - } - - test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id; - test->tes_batch = batch; - test->tes_type = type; - test->tes_oneside = 0; /* TODO */ - test->tes_loop = loop; - test->tes_concur = concur; - test->tes_stop_onerr = 1; /* TODO */ - test->tes_span = span; - test->tes_dist = dist; - test->tes_cliidx = 0; /* just used for creating RPC */ - test->tes_src_grp = src_grp; - test->tes_dst_grp = dst_grp; - INIT_LIST_HEAD(&test->tes_trans_list); - - if (param) { - test->tes_paramlen = paramlen; - memcpy(&test->tes_param[0], param, paramlen); - } - - rc = lstcon_test_nodes_add(test, result_up); - - if (rc) - goto out; - - if (lstcon_trans_stat()->trs_rpc_errno || - lstcon_trans_stat()->trs_fwk_errno) - CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, - batch_name); - - /* add to test list anyway, so user can check what's going on */ - list_add_tail(&test->tes_link, &batch->bat_test_list); - - batch->bat_ntest++; - test->tes_hdr.tsb_index = batch->bat_ntest; - - /* hold groups so nobody can change them */ - return rc; -out: - kfree(test); - - if (dst_grp) - lstcon_group_decref(dst_grp); - - if (src_grp) - lstcon_group_decref(src_grp); - - return rc; -} - -static int -lstcon_test_find(struct lstcon_batch *batch, int idx, - struct lstcon_test **testpp) -{ - struct lstcon_test *test; - - list_for_each_entry(test, &batch->bat_test_list, tes_link) { - if (idx == test->tes_hdr.tsb_index) { - *testpp = test; - return 0; - } - } - - return -ENOENT; -} - -static int -lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg, - struct lstcon_rpc_ent __user *ent_up) -{ - struct srpc_batch_reply *rep = &msg->msg_body.bat_reply; - - LASSERT(transop == LST_TRANS_TSBCLIQRY || - transop == LST_TRANS_TSBSRVQRY); - - /* positive errno, framework error code */ - if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active, - sizeof(rep->bar_active))) - return -EFAULT; - - return 0; -} - -int -lstcon_test_batch_query(char *name, int testidx, int client, - int timeout, struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - struct list_head *translist; - struct list_head *ndlist; - struct lstcon_tsb_hdr *hdr; - struct lstcon_batch *batch; - struct lstcon_test *test = NULL; - int transop; - int rc; - - rc = lstcon_batch_find(name, &batch); - if (rc) { - CDEBUG(D_NET, "Can't find batch: %s\n", name); - return rc; - } - - if (!testidx) { - translist = &batch->bat_trans_list; - ndlist = &batch->bat_cli_list; - hdr = &batch->bat_hdr; - } else { - /* query specified test only */ - rc = lstcon_test_find(batch, testidx, &test); - if (rc) { - CDEBUG(D_NET, "Can't find test: %d\n", testidx); - return rc; - } - - translist = &test->tes_trans_list; - ndlist = &test->tes_src_grp->grp_ndl_list; - hdr = &test->tes_hdr; - } - - transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY; - - rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr, - lstcon_batrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - return rc; - } - - lstcon_rpc_trans_postwait(trans, timeout); - - /* query a batch, not a test */ - if (!testidx && - !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) && - !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) { - /* all RPCs finished, and no active test */ - batch->bat_state = LST_BATCH_IDLE; - } - - rc = lstcon_rpc_trans_interpreter(trans, result_up, - lstcon_tsbrpc_readent); - lstcon_rpc_trans_destroy(trans); - - return rc; -} - -static int -lstcon_statrpc_readent(int transop, struct srpc_msg *msg, - struct lstcon_rpc_ent __user *ent_up) -{ - struct srpc_stat_reply *rep = &msg->msg_body.stat_reply; - struct sfw_counters __user *sfwk_stat; - struct srpc_counters __user *srpc_stat; - struct lnet_counters __user *lnet_stat; - - if (rep->str_status) - return 0; - - sfwk_stat = (struct sfw_counters __user *)&ent_up->rpe_payload[0]; - srpc_stat = (struct srpc_counters __user *)(sfwk_stat + 1); - lnet_stat = (struct lnet_counters __user *)(srpc_stat + 1); - - if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) || - copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) || - copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat))) - return -EFAULT; - - return 0; -} - -static int -lstcon_ndlist_stat(struct list_head *ndlist, - int timeout, struct list_head __user *result_up) -{ - struct list_head head; - struct lstcon_rpc_trans *trans; - int rc; - - INIT_LIST_HEAD(&head); - - rc = lstcon_rpc_trans_ndlist(ndlist, &head, - LST_TRANS_STATQRY, NULL, NULL, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - return rc; - } - - lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout)); - - rc = lstcon_rpc_trans_interpreter(trans, result_up, - lstcon_statrpc_readent); - lstcon_rpc_trans_destroy(trans); - - return rc; -} - -int -lstcon_group_stat(char *grp_name, int timeout, - struct list_head __user *result_up) -{ - struct lstcon_group *grp; - int rc; - - rc = lstcon_group_find(grp_name, &grp); - if (rc) { - CDEBUG(D_NET, "Can't find group %s\n", grp_name); - return rc; - } - - rc = lstcon_ndlist_stat(&grp->grp_ndl_list, timeout, result_up); - - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_nodes_stat(int count, struct lnet_process_id __user *ids_up, - int timeout, struct list_head __user *result_up) -{ - struct lstcon_ndlink *ndl; - struct lstcon_group *tmp; - struct lnet_process_id id; - int i; - int rc; - - rc = lstcon_group_alloc(NULL, &tmp); - if (rc) { - CERROR("Out of memory\n"); - return -ENOMEM; - } - - for (i = 0 ; i < count; i++) { - if (copy_from_user(&id, &ids_up[i], sizeof(id))) { - rc = -EFAULT; - break; - } - - /* add to tmp group */ - rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2); - if (rc) { - CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET, - "Failed to find or create %s: %d\n", - libcfs_id2str(id), rc); - break; - } - } - - if (rc) { - lstcon_group_decref(tmp); - return rc; - } - - rc = lstcon_ndlist_stat(&tmp->grp_ndl_list, timeout, result_up); - - lstcon_group_decref(tmp); - - return rc; -} - -static int -lstcon_debug_ndlist(struct list_head *ndlist, - struct list_head *translist, - int timeout, struct list_head __user *result_up) -{ - struct lstcon_rpc_trans *trans; - int rc; - - rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY, - NULL, lstcon_sesrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - return rc; - } - - lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout)); - - rc = lstcon_rpc_trans_interpreter(trans, result_up, - lstcon_sesrpc_readent); - lstcon_rpc_trans_destroy(trans); - - return rc; -} - -int -lstcon_session_debug(int timeout, struct list_head __user *result_up) -{ - return lstcon_debug_ndlist(&console_session.ses_ndl_list, - NULL, timeout, result_up); -} - -int -lstcon_batch_debug(int timeout, char *name, - int client, struct list_head __user *result_up) -{ - struct lstcon_batch *bat; - int rc; - - rc = lstcon_batch_find(name, &bat); - if (rc) - return -ENOENT; - - rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list : - &bat->bat_srv_list, - NULL, timeout, result_up); - - return rc; -} - -int -lstcon_group_debug(int timeout, char *name, - struct list_head __user *result_up) -{ - struct lstcon_group *grp; - int rc; - - rc = lstcon_group_find(name, &grp); - if (rc) - return -ENOENT; - - rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL, - timeout, result_up); - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_nodes_debug(int timeout, int count, - struct lnet_process_id __user *ids_up, - struct list_head __user *result_up) -{ - struct lnet_process_id id; - struct lstcon_ndlink *ndl; - struct lstcon_group *grp; - int i; - int rc; - - rc = lstcon_group_alloc(NULL, &grp); - if (rc) { - CDEBUG(D_NET, "Out of memory\n"); - return rc; - } - - for (i = 0; i < count; i++) { - if (copy_from_user(&id, &ids_up[i], sizeof(id))) { - rc = -EFAULT; - break; - } - - /* node is added to tmp group */ - rc = lstcon_group_ndlink_find(grp, id, &ndl, 1); - if (rc) { - CERROR("Can't create node link\n"); - break; - } - } - - if (rc) { - lstcon_group_decref(grp); - return rc; - } - - rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL, - timeout, result_up); - - lstcon_group_decref(grp); - - return rc; -} - -int -lstcon_session_match(struct lst_sid sid) -{ - return (console_session.ses_id.ses_nid == sid.ses_nid && - console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0; -} - -static void -lstcon_new_session_id(struct lst_sid *sid) -{ - struct lnet_process_id id; - - LASSERT(console_session.ses_state == LST_SESSION_NONE); - - LNetGetId(1, &id); - sid->ses_nid = id.nid; - sid->ses_stamp = jiffies; -} - -int -lstcon_session_new(char *name, int key, unsigned int feats, - int timeout, int force, struct lst_sid __user *sid_up) -{ - int rc = 0; - int i; - - if (console_session.ses_state != LST_SESSION_NONE) { - /* session exists */ - if (!force) { - CNETERR("Session %s already exists\n", - console_session.ses_name); - return -EEXIST; - } - - rc = lstcon_session_end(); - - /* lstcon_session_end() only return local error */ - if (rc) - return rc; - } - - if (feats & ~LST_FEATS_MASK) { - CNETERR("Unknown session features %x\n", - (feats & ~LST_FEATS_MASK)); - return -EINVAL; - } - - for (i = 0; i < LST_GLOBAL_HASHSIZE; i++) - LASSERT(list_empty(&console_session.ses_ndl_hash[i])); - - lstcon_new_session_id(&console_session.ses_id); - - console_session.ses_key = key; - console_session.ses_state = LST_SESSION_ACTIVE; - console_session.ses_force = !!force; - console_session.ses_features = feats; - console_session.ses_feats_updated = 0; - console_session.ses_timeout = (timeout <= 0) ? - LST_CONSOLE_TIMEOUT : timeout; - - if (strlen(name) > sizeof(console_session.ses_name) - 1) - return -E2BIG; - strlcpy(console_session.ses_name, name, - sizeof(console_session.ses_name)); - - rc = lstcon_batch_add(LST_DEFAULT_BATCH); - if (rc) - return rc; - - rc = lstcon_rpc_pinger_start(); - if (rc) { - struct lstcon_batch *bat = NULL; - - lstcon_batch_find(LST_DEFAULT_BATCH, &bat); - lstcon_batch_destroy(bat); - - return rc; - } - - if (!copy_to_user(sid_up, &console_session.ses_id, - sizeof(struct lst_sid))) - return rc; - - lstcon_session_end(); - - return -EFAULT; -} - -int -lstcon_session_info(struct lst_sid __user *sid_up, int __user *key_up, - unsigned __user *featp, - struct lstcon_ndlist_ent __user *ndinfo_up, - char __user *name_up, int len) -{ - struct lstcon_ndlist_ent *entp; - struct lstcon_ndlink *ndl; - int rc = 0; - - if (console_session.ses_state != LST_SESSION_ACTIVE) - return -ESRCH; - - entp = kzalloc(sizeof(*entp), GFP_NOFS); - if (!entp) - return -ENOMEM; - - list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) - LST_NODE_STATE_COUNTER(ndl->ndl_node, entp); - - if (copy_to_user(sid_up, &console_session.ses_id, - sizeof(*sid_up)) || - copy_to_user(key_up, &console_session.ses_key, - sizeof(*key_up)) || - copy_to_user(featp, &console_session.ses_features, - sizeof(*featp)) || - copy_to_user(ndinfo_up, entp, sizeof(*entp)) || - copy_to_user(name_up, console_session.ses_name, len)) - rc = -EFAULT; - - kfree(entp); - - return rc; -} - -int -lstcon_session_end(void) -{ - struct lstcon_rpc_trans *trans; - struct lstcon_group *grp; - struct lstcon_batch *bat; - int rc = 0; - - LASSERT(console_session.ses_state == LST_SESSION_ACTIVE); - - rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list, - NULL, LST_TRANS_SESEND, NULL, - lstcon_sesrpc_condition, &trans); - if (rc) { - CERROR("Can't create transaction: %d\n", rc); - return rc; - } - - console_session.ses_shutdown = 1; - - lstcon_rpc_pinger_stop(); - - lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - - lstcon_rpc_trans_destroy(trans); - /* User can do nothing even rpc failed, so go on */ - - /* waiting for orphan rpcs to die */ - lstcon_rpc_cleanup_wait(); - - console_session.ses_id = LST_INVALID_SID; - console_session.ses_state = LST_SESSION_NONE; - console_session.ses_key = 0; - console_session.ses_force = 0; - console_session.ses_feats_updated = 0; - - /* destroy all batches */ - while (!list_empty(&console_session.ses_bat_list)) { - bat = list_entry(console_session.ses_bat_list.next, - struct lstcon_batch, bat_link); - - lstcon_batch_destroy(bat); - } - - /* destroy all groups */ - while (!list_empty(&console_session.ses_grp_list)) { - grp = list_entry(console_session.ses_grp_list.next, - struct lstcon_group, grp_link); - LASSERT(grp->grp_ref == 1); - - lstcon_group_decref(grp); - } - - /* all nodes should be released */ - LASSERT(list_empty(&console_session.ses_ndl_list)); - - console_session.ses_shutdown = 0; - console_session.ses_expired = 0; - - return rc; -} - -int -lstcon_session_feats_check(unsigned int feats) -{ - int rc = 0; - - if (feats & ~LST_FEATS_MASK) { - CERROR("Can't support these features: %x\n", - (feats & ~LST_FEATS_MASK)); - return -EPROTO; - } - - spin_lock(&console_session.ses_rpc_lock); - - if (!console_session.ses_feats_updated) { - console_session.ses_feats_updated = 1; - console_session.ses_features = feats; - } - - if (console_session.ses_features != feats) - rc = -EPROTO; - - spin_unlock(&console_session.ses_rpc_lock); - - if (rc) { - CERROR("remote features %x do not match with session features %x of console\n", - feats, console_session.ses_features); - } - - return rc; -} - -static int -lstcon_acceptor_handle(struct srpc_server_rpc *rpc) -{ - struct srpc_msg *rep = &rpc->srpc_replymsg; - struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg; - struct srpc_join_reqst *jreq = &req->msg_body.join_reqst; - struct srpc_join_reply *jrep = &rep->msg_body.join_reply; - struct lstcon_group *grp = NULL; - struct lstcon_ndlink *ndl; - int rc = 0; - - sfw_unpack_message(req); - - mutex_lock(&console_session.ses_mutex); - - jrep->join_sid = console_session.ses_id; - - if (console_session.ses_id.ses_nid == LNET_NID_ANY) { - jrep->join_status = ESRCH; - goto out; - } - - if (lstcon_session_feats_check(req->msg_ses_feats)) { - jrep->join_status = EPROTO; - goto out; - } - - if (jreq->join_sid.ses_nid != LNET_NID_ANY && - !lstcon_session_match(jreq->join_sid)) { - jrep->join_status = EBUSY; - goto out; - } - - if (lstcon_group_find(jreq->join_group, &grp)) { - rc = lstcon_group_alloc(jreq->join_group, &grp); - if (rc) { - CERROR("Out of memory\n"); - goto out; - } - - list_add_tail(&grp->grp_link, - &console_session.ses_grp_list); - lstcon_group_addref(grp); - } - - if (grp->grp_ref > 2) { - /* Group in using */ - jrep->join_status = EBUSY; - goto out; - } - - rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0); - if (!rc) { - jrep->join_status = EEXIST; - goto out; - } - - rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1); - if (rc) { - CERROR("Out of memory\n"); - goto out; - } - - ndl->ndl_node->nd_state = LST_NODE_ACTIVE; - ndl->ndl_node->nd_timeout = console_session.ses_timeout; - - if (!grp->grp_userland) - grp->grp_userland = 1; - - strlcpy(jrep->join_session, console_session.ses_name, - sizeof(jrep->join_session)); - jrep->join_timeout = console_session.ses_timeout; - jrep->join_status = 0; - -out: - rep->msg_ses_feats = console_session.ses_features; - if (grp) - lstcon_group_decref(grp); - - mutex_unlock(&console_session.ses_mutex); - - return rc; -} - -static struct srpc_service lstcon_acceptor_service; - -static void lstcon_init_acceptor_service(void) -{ - /* initialize selftest console acceptor service table */ - lstcon_acceptor_service.sv_name = "join session"; - lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; - lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; - lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX; -} - -static struct notifier_block lstcon_ioctl_handler = { - .notifier_call = lstcon_ioctl_entry, -}; - -/* initialize console */ -int -lstcon_console_init(void) -{ - int i; - int rc; - - memset(&console_session, 0, sizeof(struct lstcon_session)); - - console_session.ses_id = LST_INVALID_SID; - console_session.ses_state = LST_SESSION_NONE; - console_session.ses_timeout = 0; - console_session.ses_force = 0; - console_session.ses_expired = 0; - console_session.ses_feats_updated = 0; - console_session.ses_features = LST_FEATS_MASK; - console_session.ses_laststamp = ktime_get_real_seconds(); - - mutex_init(&console_session.ses_mutex); - - INIT_LIST_HEAD(&console_session.ses_ndl_list); - INIT_LIST_HEAD(&console_session.ses_grp_list); - INIT_LIST_HEAD(&console_session.ses_bat_list); - INIT_LIST_HEAD(&console_session.ses_trans_list); - - console_session.ses_ndl_hash = - kmalloc(sizeof(struct list_head) * LST_GLOBAL_HASHSIZE, GFP_KERNEL); - if (!console_session.ses_ndl_hash) - return -ENOMEM; - - for (i = 0; i < LST_GLOBAL_HASHSIZE; i++) - INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]); - - /* initialize acceptor service table */ - lstcon_init_acceptor_service(); - - rc = srpc_add_service(&lstcon_acceptor_service); - LASSERT(rc != -EBUSY); - if (rc) { - kfree(console_session.ses_ndl_hash); - return rc; - } - - rc = srpc_service_add_buffers(&lstcon_acceptor_service, - lstcon_acceptor_service.sv_wi_total); - if (rc) { - rc = -ENOMEM; - goto out; - } - - rc = blocking_notifier_chain_register(&libcfs_ioctl_list, - &lstcon_ioctl_handler); - - if (!rc) { - lstcon_rpc_module_init(); - return 0; - } - -out: - srpc_shutdown_service(&lstcon_acceptor_service); - srpc_remove_service(&lstcon_acceptor_service); - - kfree(console_session.ses_ndl_hash); - - srpc_wait_service_shutdown(&lstcon_acceptor_service); - - return rc; -} - -int -lstcon_console_fini(void) -{ - int i; - - blocking_notifier_chain_unregister(&libcfs_ioctl_list, - &lstcon_ioctl_handler); - - mutex_lock(&console_session.ses_mutex); - - srpc_shutdown_service(&lstcon_acceptor_service); - srpc_remove_service(&lstcon_acceptor_service); - - if (console_session.ses_state != LST_SESSION_NONE) - lstcon_session_end(); - - lstcon_rpc_module_fini(); - - mutex_unlock(&console_session.ses_mutex); - - LASSERT(list_empty(&console_session.ses_ndl_list)); - LASSERT(list_empty(&console_session.ses_grp_list)); - LASSERT(list_empty(&console_session.ses_bat_list)); - LASSERT(list_empty(&console_session.ses_trans_list)); - - for (i = 0; i < LST_NODE_HASHSIZE; i++) - LASSERT(list_empty(&console_session.ses_ndl_hash[i])); - - kfree(console_session.ses_ndl_hash); - - srpc_wait_service_shutdown(&lstcon_acceptor_service); - - return 0; -} diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h deleted file mode 100644 index 2826205e36a1..000000000000 --- a/drivers/staging/lustre/lnet/selftest/console.h +++ /dev/null @@ -1,244 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/console.h - * - * kernel structure for LST console - * - * Author: Liang Zhen - */ - -#ifndef __LST_CONSOLE_H__ -#define __LST_CONSOLE_H__ - -#include -#include -#include "selftest.h" -#include "conrpc.h" - -/* node descriptor */ -struct lstcon_node { - struct lnet_process_id nd_id; /* id of the node */ - int nd_ref; /* reference count */ - int nd_state; /* state of the node */ - int nd_timeout; /* session timeout */ - unsigned long nd_stamp; /* timestamp of last replied RPC */ - struct lstcon_rpc nd_ping; /* ping rpc */ -}; - -/* node link descriptor */ -struct lstcon_ndlink { - struct list_head ndl_link; /* chain on list */ - struct list_head ndl_hlink; /* chain on hash */ - struct lstcon_node *ndl_node; /* pointer to node */ -}; - -/* (alias of nodes) group descriptor */ -struct lstcon_group { - struct list_head grp_link; /* chain on global group list - */ - int grp_ref; /* reference count */ - int grp_userland; /* has userland nodes */ - int grp_nnode; /* # of nodes */ - char grp_name[LST_NAME_SIZE]; /* group name */ - - struct list_head grp_trans_list; /* transaction list */ - struct list_head grp_ndl_list; /* nodes list */ - struct list_head grp_ndl_hash[0]; /* hash table for nodes */ -}; - -#define LST_BATCH_IDLE 0xB0 /* idle batch */ -#define LST_BATCH_RUNNING 0xB1 /* running batch */ - -struct lstcon_tsb_hdr { - struct lst_bid tsb_id; /* batch ID */ - int tsb_index; /* test index */ -}; - -/* (tests ) batch descriptor */ -struct lstcon_batch { - struct lstcon_tsb_hdr bat_hdr; /* test_batch header */ - struct list_head bat_link; /* chain on session's batches list */ - int bat_ntest; /* # of test */ - int bat_state; /* state of the batch */ - int bat_arg; /* parameter for run|stop, timeout - * for run, force for stop - */ - char bat_name[LST_NAME_SIZE];/* name of batch */ - - struct list_head bat_test_list; /* list head of tests (struct lstcon_test) - */ - struct list_head bat_trans_list; /* list head of transaction */ - struct list_head bat_cli_list; /* list head of client nodes - * (struct lstcon_node) - */ - struct list_head *bat_cli_hash; /* hash table of client nodes */ - struct list_head bat_srv_list; /* list head of server nodes */ - struct list_head *bat_srv_hash; /* hash table of server nodes */ -}; - -/* a single test descriptor */ -struct lstcon_test { - struct lstcon_tsb_hdr tes_hdr; /* test batch header */ - struct list_head tes_link; /* chain on batch's tests list */ - struct lstcon_batch *tes_batch; /* pointer to batch */ - - int tes_type; /* type of the test, i.e: bulk, ping */ - int tes_stop_onerr; /* stop on error */ - int tes_oneside; /* one-sided test */ - int tes_concur; /* concurrency */ - int tes_loop; /* loop count */ - int tes_dist; /* nodes distribution of target group */ - int tes_span; /* nodes span of target group */ - int tes_cliidx; /* client index, used for RPC creating */ - - struct list_head tes_trans_list; /* transaction list */ - struct lstcon_group *tes_src_grp; /* group run the test */ - struct lstcon_group *tes_dst_grp; /* target group */ - - int tes_paramlen; /* test parameter length */ - char tes_param[0]; /* test parameter */ -}; - -#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ -#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */ - -#define LST_SESSION_NONE 0x0 /* no session */ -#define LST_SESSION_ACTIVE 0x1 /* working session */ - -#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */ - -struct lstcon_session { - struct mutex ses_mutex; /* only 1 thread in session */ - struct lst_sid ses_id; /* global session id */ - int ses_key; /* local session key */ - int ses_state; /* state of session */ - int ses_timeout; /* timeout in seconds */ - time64_t ses_laststamp; /* last operation stamp (seconds) - */ - unsigned int ses_features; /* tests features of the session - */ - unsigned int ses_feats_updated:1; /* features are synced with - * remote test nodes - */ - unsigned int ses_force:1; /* force creating */ - unsigned int ses_shutdown:1; /* session is shutting down */ - unsigned int ses_expired:1; /* console is timedout */ - __u64 ses_id_cookie; /* batch id cookie */ - char ses_name[LST_NAME_SIZE];/* session name */ - struct lstcon_rpc_trans *ses_ping; /* session pinger */ - struct stt_timer ses_ping_timer; /* timer for pinger */ - struct lstcon_trans_stat ses_trans_stat; /* transaction stats */ - - struct list_head ses_trans_list; /* global list of transaction */ - struct list_head ses_grp_list; /* global list of groups */ - struct list_head ses_bat_list; /* global list of batches */ - struct list_head ses_ndl_list; /* global list of nodes */ - struct list_head *ses_ndl_hash; /* hash table of nodes */ - - spinlock_t ses_rpc_lock; /* serialize */ - atomic_t ses_rpc_counter; /* # of initialized RPCs */ - struct list_head ses_rpc_freelist; /* idle console rpc */ -}; /* session descriptor */ - -extern struct lstcon_session console_session; - -static inline struct lstcon_trans_stat * -lstcon_trans_stat(void) -{ - return &console_session.ses_trans_stat; -} - -static inline struct list_head * -lstcon_id2hash(struct lnet_process_id id, struct list_head *hash) -{ - unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; - - return &hash[idx]; -} - -int lstcon_ioctl_entry(struct notifier_block *nb, - unsigned long cmd, void *vdata); -int lstcon_console_init(void); -int lstcon_console_fini(void); -int lstcon_session_match(struct lst_sid sid); -int lstcon_session_new(char *name, int key, unsigned int version, - int timeout, int flags, struct lst_sid __user *sid_up); -int lstcon_session_info(struct lst_sid __user *sid_up, int __user *key, - unsigned __user *verp, struct lstcon_ndlist_ent __user *entp, - char __user *name_up, int len); -int lstcon_session_end(void); -int lstcon_session_debug(int timeout, struct list_head __user *result_up); -int lstcon_session_feats_check(unsigned int feats); -int lstcon_batch_debug(int timeout, char *name, - int client, struct list_head __user *result_up); -int lstcon_group_debug(int timeout, char *name, - struct list_head __user *result_up); -int lstcon_nodes_debug(int timeout, int nnd, - struct lnet_process_id __user *nds_up, - struct list_head __user *result_up); -int lstcon_group_add(char *name); -int lstcon_group_del(char *name); -int lstcon_group_clean(char *name, int args); -int lstcon_group_refresh(char *name, struct list_head __user *result_up); -int lstcon_nodes_add(char *name, int nnd, struct lnet_process_id __user *nds_up, - unsigned int *featp, struct list_head __user *result_up); -int lstcon_nodes_remove(char *name, int nnd, - struct lnet_process_id __user *nds_up, - struct list_head __user *result_up); -int lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gent_up, - int *index_p, int *ndent_p, - struct lstcon_node_ent __user *ndents_up); -int lstcon_group_list(int idx, int len, char __user *name_up); -int lstcon_batch_add(char *name); -int lstcon_batch_run(char *name, int timeout, - struct list_head __user *result_up); -int lstcon_batch_stop(char *name, int force, - struct list_head __user *result_up); -int lstcon_test_batch_query(char *name, int testidx, - int client, int timeout, - struct list_head __user *result_up); -int lstcon_batch_del(char *name); -int lstcon_batch_list(int idx, int namelen, char __user *name_up); -int lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up, - int server, int testidx, int *index_p, - int *ndent_p, struct lstcon_node_ent __user *dents_up); -int lstcon_group_stat(char *grp_name, int timeout, - struct list_head __user *result_up); -int lstcon_nodes_stat(int count, struct lnet_process_id __user *ids_up, - int timeout, struct list_head __user *result_up); -int lstcon_test_add(char *batch_name, int type, int loop, - int concur, int dist, int span, - char *src_name, char *dst_name, - void *param, int paramlen, int *retp, - struct list_head __user *result_up); -#endif diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c deleted file mode 100644 index 741af10560ad..000000000000 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ /dev/null @@ -1,1786 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/framework.c - * - * Author: Isaac Huang - * Author: Liang Zhen - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "selftest.h" - -struct lst_sid LST_INVALID_SID = {LNET_NID_ANY, -1}; - -static int session_timeout = 100; -module_param(session_timeout, int, 0444); -MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)"); - -static int rpc_timeout = 64; -module_param(rpc_timeout, int, 0644); -MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)"); - -#define sfw_unpack_id(id) \ -do { \ - __swab64s(&(id).nid); \ - __swab32s(&(id).pid); \ -} while (0) - -#define sfw_unpack_sid(sid) \ -do { \ - __swab64s(&(sid).ses_nid); \ - __swab64s(&(sid).ses_stamp); \ -} while (0) - -#define sfw_unpack_fw_counters(fc) \ -do { \ - __swab32s(&(fc).running_ms); \ - __swab32s(&(fc).active_batches); \ - __swab32s(&(fc).zombie_sessions); \ - __swab32s(&(fc).brw_errors); \ - __swab32s(&(fc).ping_errors); \ -} while (0) - -#define sfw_unpack_rpc_counters(rc) \ -do { \ - __swab32s(&(rc).errors); \ - __swab32s(&(rc).rpcs_sent); \ - __swab32s(&(rc).rpcs_rcvd); \ - __swab32s(&(rc).rpcs_dropped); \ - __swab32s(&(rc).rpcs_expired); \ - __swab64s(&(rc).bulk_get); \ - __swab64s(&(rc).bulk_put); \ -} while (0) - -#define sfw_unpack_lnet_counters(lc) \ -do { \ - __swab32s(&(lc).errors); \ - __swab32s(&(lc).msgs_max); \ - __swab32s(&(lc).msgs_alloc); \ - __swab32s(&(lc).send_count); \ - __swab32s(&(lc).recv_count); \ - __swab32s(&(lc).drop_count); \ - __swab32s(&(lc).route_count); \ - __swab64s(&(lc).send_length); \ - __swab64s(&(lc).recv_length); \ - __swab64s(&(lc).drop_length); \ - __swab64s(&(lc).route_length); \ -} while (0) - -#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive)) -#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive)) - -static struct smoketest_framework { - struct list_head fw_zombie_rpcs; /* RPCs to be recycled */ - struct list_head fw_zombie_sessions; /* stopping sessions */ - struct list_head fw_tests; /* registered test cases */ - atomic_t fw_nzombies; /* # zombie sessions */ - spinlock_t fw_lock; /* serialise */ - struct sfw_session *fw_session; /* _the_ session */ - int fw_shuttingdown; /* shutdown in progress */ - struct srpc_server_rpc *fw_active_srpc;/* running RPC */ -} sfw_data; - -/* forward ref's */ -int sfw_stop_batch(struct sfw_batch *tsb, int force); -void sfw_destroy_session(struct sfw_session *sn); - -static inline struct sfw_test_case * -sfw_find_test_case(int id) -{ - struct sfw_test_case *tsc; - - LASSERT(id <= SRPC_SERVICE_MAX_ID); - LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID); - - list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) { - if (tsc->tsc_srv_service->sv_id == id) - return tsc; - } - - return NULL; -} - -static int -sfw_register_test(struct srpc_service *service, - struct sfw_test_client_ops *cliops) -{ - struct sfw_test_case *tsc; - - if (sfw_find_test_case(service->sv_id)) { - CERROR("Failed to register test %s (%d)\n", - service->sv_name, service->sv_id); - return -EEXIST; - } - - tsc = kzalloc(sizeof(struct sfw_test_case), GFP_NOFS); - if (!tsc) - return -ENOMEM; - - tsc->tsc_cli_ops = cliops; - tsc->tsc_srv_service = service; - - list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests); - return 0; -} - -static void -sfw_add_session_timer(void) -{ - struct sfw_session *sn = sfw_data.fw_session; - struct stt_timer *timer = &sn->sn_timer; - - LASSERT(!sfw_data.fw_shuttingdown); - - if (!sn || !sn->sn_timeout) - return; - - LASSERT(!sn->sn_timer_active); - - sn->sn_timer_active = 1; - timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout; - stt_add_timer(timer); -} - -static int -sfw_del_session_timer(void) -{ - struct sfw_session *sn = sfw_data.fw_session; - - if (!sn || !sn->sn_timer_active) - return 0; - - LASSERT(sn->sn_timeout); - - if (stt_del_timer(&sn->sn_timer)) { /* timer defused */ - sn->sn_timer_active = 0; - return 0; - } - - return -EBUSY; /* racing with sfw_session_expired() */ -} - -static void -sfw_deactivate_session(void) -__must_hold(&sfw_data.fw_lock) -{ - struct sfw_session *sn = sfw_data.fw_session; - int nactive = 0; - struct sfw_batch *tsb; - struct sfw_test_case *tsc; - - if (!sn) - return; - - LASSERT(!sn->sn_timer_active); - - sfw_data.fw_session = NULL; - atomic_inc(&sfw_data.fw_nzombies); - list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions); - - spin_unlock(&sfw_data.fw_lock); - - list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) { - srpc_abort_service(tsc->tsc_srv_service); - } - - spin_lock(&sfw_data.fw_lock); - - list_for_each_entry(tsb, &sn->sn_batches, bat_list) { - if (sfw_batch_active(tsb)) { - nactive++; - sfw_stop_batch(tsb, 1); - } - } - - if (nactive) - return; /* wait for active batches to stop */ - - list_del_init(&sn->sn_list); - spin_unlock(&sfw_data.fw_lock); - - sfw_destroy_session(sn); - - spin_lock(&sfw_data.fw_lock); -} - -static void -sfw_session_expired(void *data) -{ - struct sfw_session *sn = data; - - spin_lock(&sfw_data.fw_lock); - - LASSERT(sn->sn_timer_active); - LASSERT(sn == sfw_data.fw_session); - - CWARN("Session expired! sid: %s-%llu, name: %s\n", - libcfs_nid2str(sn->sn_id.ses_nid), - sn->sn_id.ses_stamp, &sn->sn_name[0]); - - sn->sn_timer_active = 0; - sfw_deactivate_session(); - - spin_unlock(&sfw_data.fw_lock); -} - -static inline void -sfw_init_session(struct sfw_session *sn, struct lst_sid sid, - unsigned int features, const char *name) -{ - struct stt_timer *timer = &sn->sn_timer; - - memset(sn, 0, sizeof(struct sfw_session)); - INIT_LIST_HEAD(&sn->sn_list); - INIT_LIST_HEAD(&sn->sn_batches); - atomic_set(&sn->sn_refcount, 1); /* +1 for caller */ - atomic_set(&sn->sn_brw_errors, 0); - atomic_set(&sn->sn_ping_errors, 0); - strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name)); - - sn->sn_timer_active = 0; - sn->sn_id = sid; - sn->sn_features = features; - sn->sn_timeout = session_timeout; - sn->sn_started = jiffies; - - timer->stt_data = sn; - timer->stt_func = sfw_session_expired; - INIT_LIST_HEAD(&timer->stt_list); -} - -/* completion handler for incoming framework RPCs */ -static void -sfw_server_rpc_done(struct srpc_server_rpc *rpc) -{ - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - int status = rpc->srpc_status; - - CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n", - sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), - status); - - if (rpc->srpc_bulk) - sfw_free_pages(rpc); -} - -static void -sfw_client_rpc_fini(struct srpc_client_rpc *rpc) -{ - LASSERT(!rpc->crpc_bulk.bk_niov); - LASSERT(list_empty(&rpc->crpc_list)); - LASSERT(!atomic_read(&rpc->crpc_refcount)); - - CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(rpc->crpc_wi.swi_state), - rpc->crpc_aborted, rpc->crpc_status); - - spin_lock(&sfw_data.fw_lock); - - /* my callers must finish all RPCs before shutting me down */ - LASSERT(!sfw_data.fw_shuttingdown); - list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs); - - spin_unlock(&sfw_data.fw_lock); -} - -static struct sfw_batch * -sfw_find_batch(struct lst_bid bid) -{ - struct sfw_session *sn = sfw_data.fw_session; - struct sfw_batch *bat; - - LASSERT(sn); - - list_for_each_entry(bat, &sn->sn_batches, bat_list) { - if (bat->bat_id.bat_id == bid.bat_id) - return bat; - } - - return NULL; -} - -static struct sfw_batch * -sfw_bid2batch(struct lst_bid bid) -{ - struct sfw_session *sn = sfw_data.fw_session; - struct sfw_batch *bat; - - LASSERT(sn); - - bat = sfw_find_batch(bid); - if (bat) - return bat; - - bat = kzalloc(sizeof(struct sfw_batch), GFP_NOFS); - if (!bat) - return NULL; - - bat->bat_error = 0; - bat->bat_session = sn; - bat->bat_id = bid; - atomic_set(&bat->bat_nactive, 0); - INIT_LIST_HEAD(&bat->bat_tests); - - list_add_tail(&bat->bat_list, &sn->sn_batches); - return bat; -} - -static int -sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply) -{ - struct sfw_session *sn = sfw_data.fw_session; - struct sfw_counters *cnt = &reply->str_fw; - struct sfw_batch *bat; - - reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id; - - if (request->str_sid.ses_nid == LNET_NID_ANY) { - reply->str_status = EINVAL; - return 0; - } - - if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) { - reply->str_status = ESRCH; - return 0; - } - - lnet_counters_get(&reply->str_lnet); - srpc_get_counters(&reply->str_rpc); - - /* - * send over the msecs since the session was started - * with 32 bits to send, this is ~49 days - */ - cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started); - cnt->brw_errors = atomic_read(&sn->sn_brw_errors); - cnt->ping_errors = atomic_read(&sn->sn_ping_errors); - cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies); - - cnt->active_batches = 0; - list_for_each_entry(bat, &sn->sn_batches, bat_list) { - if (atomic_read(&bat->bat_nactive) > 0) - cnt->active_batches++; - } - - reply->str_status = 0; - return 0; -} - -int -sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply) -{ - struct sfw_session *sn = sfw_data.fw_session; - struct srpc_msg *msg = container_of(request, struct srpc_msg, - msg_body.mksn_reqst); - int cplen = 0; - - if (request->mksn_sid.ses_nid == LNET_NID_ANY) { - reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id; - reply->mksn_status = EINVAL; - return 0; - } - - if (sn) { - reply->mksn_status = 0; - reply->mksn_sid = sn->sn_id; - reply->mksn_timeout = sn->sn_timeout; - - if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) { - atomic_inc(&sn->sn_refcount); - return 0; - } - - if (!request->mksn_force) { - reply->mksn_status = EBUSY; - cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0], - sizeof(reply->mksn_name)); - if (cplen >= sizeof(reply->mksn_name)) - return -E2BIG; - return 0; - } - } - - /* - * reject the request if it requires unknown features - * NB: old version will always accept all features because it's not - * aware of srpc_msg::msg_ses_feats, it's a defect but it's also - * harmless because it will return zero feature to console, and it's - * console's responsibility to make sure all nodes in a session have - * same feature mask. - */ - if (msg->msg_ses_feats & ~LST_FEATS_MASK) { - reply->mksn_status = EPROTO; - return 0; - } - - /* brand new or create by force */ - sn = kzalloc(sizeof(struct sfw_session), GFP_NOFS); - if (!sn) { - CERROR("dropping RPC mksn under memory pressure\n"); - return -ENOMEM; - } - - sfw_init_session(sn, request->mksn_sid, - msg->msg_ses_feats, &request->mksn_name[0]); - - spin_lock(&sfw_data.fw_lock); - - sfw_deactivate_session(); - LASSERT(!sfw_data.fw_session); - sfw_data.fw_session = sn; - - spin_unlock(&sfw_data.fw_lock); - - reply->mksn_status = 0; - reply->mksn_sid = sn->sn_id; - reply->mksn_timeout = sn->sn_timeout; - return 0; -} - -static int -sfw_remove_session(struct srpc_rmsn_reqst *request, - struct srpc_rmsn_reply *reply) -{ - struct sfw_session *sn = sfw_data.fw_session; - - reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id; - - if (request->rmsn_sid.ses_nid == LNET_NID_ANY) { - reply->rmsn_status = EINVAL; - return 0; - } - - if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) { - reply->rmsn_status = !sn ? ESRCH : EBUSY; - return 0; - } - - if (!atomic_dec_and_test(&sn->sn_refcount)) { - reply->rmsn_status = 0; - return 0; - } - - spin_lock(&sfw_data.fw_lock); - sfw_deactivate_session(); - spin_unlock(&sfw_data.fw_lock); - - reply->rmsn_status = 0; - reply->rmsn_sid = LST_INVALID_SID; - LASSERT(!sfw_data.fw_session); - return 0; -} - -static int -sfw_debug_session(struct srpc_debug_reqst *request, - struct srpc_debug_reply *reply) -{ - struct sfw_session *sn = sfw_data.fw_session; - - if (!sn) { - reply->dbg_status = ESRCH; - reply->dbg_sid = LST_INVALID_SID; - return 0; - } - - reply->dbg_status = 0; - reply->dbg_sid = sn->sn_id; - reply->dbg_timeout = sn->sn_timeout; - if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name)) - >= sizeof(reply->dbg_name)) - return -E2BIG; - - return 0; -} - -static void -sfw_test_rpc_fini(struct srpc_client_rpc *rpc) -{ - struct sfw_test_unit *tsu = rpc->crpc_priv; - struct sfw_test_instance *tsi = tsu->tsu_instance; - - /* Called with hold of tsi->tsi_lock */ - LASSERT(list_empty(&rpc->crpc_list)); - list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs); -} - -static inline int -sfw_test_buffers(struct sfw_test_instance *tsi) -{ - struct sfw_test_case *tsc; - struct srpc_service *svc; - int nbuf; - - LASSERT(tsi); - tsc = sfw_find_test_case(tsi->tsi_service); - LASSERT(tsc); - svc = tsc->tsc_srv_service; - LASSERT(svc); - - nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts; - return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA); -} - -static int -sfw_load_test(struct sfw_test_instance *tsi) -{ - struct sfw_test_case *tsc; - struct srpc_service *svc; - int nbuf; - int rc; - - LASSERT(tsi); - tsc = sfw_find_test_case(tsi->tsi_service); - nbuf = sfw_test_buffers(tsi); - LASSERT(tsc); - svc = tsc->tsc_srv_service; - - if (tsi->tsi_is_client) { - tsi->tsi_ops = tsc->tsc_cli_ops; - return 0; - } - - rc = srpc_service_add_buffers(svc, nbuf); - if (rc) { - CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n", - svc->sv_name, nbuf, rc); - /* - * NB: this error handler is not strictly correct, because - * it may release more buffers than already allocated, - * but it doesn't matter because request portal should - * be lazy portal and will grow buffers if necessary. - */ - srpc_service_remove_buffers(svc, nbuf); - return -ENOMEM; - } - - CDEBUG(D_NET, "Reserved %d buffers for test %s\n", - nbuf * (srpc_serv_is_framework(svc) ? - 2 : cfs_cpt_number(cfs_cpt_tab)), svc->sv_name); - return 0; -} - -static void -sfw_unload_test(struct sfw_test_instance *tsi) -{ - struct sfw_test_case *tsc; - - LASSERT(tsi); - tsc = sfw_find_test_case(tsi->tsi_service); - LASSERT(tsc); - - if (tsi->tsi_is_client) - return; - - /* - * shrink buffers, because request portal is lazy portal - * which can grow buffers at runtime so we may leave - * some buffers behind, but never mind... - */ - srpc_service_remove_buffers(tsc->tsc_srv_service, - sfw_test_buffers(tsi)); -} - -static void -sfw_destroy_test_instance(struct sfw_test_instance *tsi) -{ - struct srpc_client_rpc *rpc; - struct sfw_test_unit *tsu; - - if (!tsi->tsi_is_client) - goto clean; - - tsi->tsi_ops->tso_fini(tsi); - - LASSERT(!tsi->tsi_stopping); - LASSERT(list_empty(&tsi->tsi_active_rpcs)); - LASSERT(!sfw_test_active(tsi)); - - while (!list_empty(&tsi->tsi_units)) { - tsu = list_entry(tsi->tsi_units.next, - struct sfw_test_unit, tsu_list); - list_del(&tsu->tsu_list); - kfree(tsu); - } - - while (!list_empty(&tsi->tsi_free_rpcs)) { - rpc = list_entry(tsi->tsi_free_rpcs.next, - struct srpc_client_rpc, crpc_list); - list_del(&rpc->crpc_list); - kfree(rpc); - } - -clean: - sfw_unload_test(tsi); - kfree(tsi); -} - -static void -sfw_destroy_batch(struct sfw_batch *tsb) -{ - struct sfw_test_instance *tsi; - - LASSERT(!sfw_batch_active(tsb)); - LASSERT(list_empty(&tsb->bat_list)); - - while (!list_empty(&tsb->bat_tests)) { - tsi = list_entry(tsb->bat_tests.next, - struct sfw_test_instance, tsi_list); - list_del_init(&tsi->tsi_list); - sfw_destroy_test_instance(tsi); - } - - kfree(tsb); -} - -void -sfw_destroy_session(struct sfw_session *sn) -{ - struct sfw_batch *batch; - - LASSERT(list_empty(&sn->sn_list)); - LASSERT(sn != sfw_data.fw_session); - - while (!list_empty(&sn->sn_batches)) { - batch = list_entry(sn->sn_batches.next, - struct sfw_batch, bat_list); - list_del_init(&batch->bat_list); - sfw_destroy_batch(batch); - } - - kfree(sn); - atomic_dec(&sfw_data.fw_nzombies); -} - -static void -sfw_unpack_addtest_req(struct srpc_msg *msg) -{ - struct srpc_test_reqst *req = &msg->msg_body.tes_reqst; - - LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST); - LASSERT(req->tsr_is_client); - - if (msg->msg_magic == SRPC_MSG_MAGIC) - return; /* no flipping needed */ - - LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); - - if (req->tsr_service == SRPC_SERVICE_BRW) { - if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) { - struct test_bulk_req *bulk = &req->tsr_u.bulk_v0; - - __swab32s(&bulk->blk_opc); - __swab32s(&bulk->blk_npg); - __swab32s(&bulk->blk_flags); - - } else { - struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1; - - __swab16s(&bulk->blk_opc); - __swab16s(&bulk->blk_flags); - __swab32s(&bulk->blk_offset); - __swab32s(&bulk->blk_len); - } - - return; - } - - if (req->tsr_service == SRPC_SERVICE_PING) { - struct test_ping_req *ping = &req->tsr_u.ping; - - __swab32s(&ping->png_size); - __swab32s(&ping->png_flags); - return; - } - - LBUG(); -} - -static int -sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc) -{ - struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg; - struct srpc_test_reqst *req = &msg->msg_body.tes_reqst; - struct srpc_bulk *bk = rpc->srpc_bulk; - int ndest = req->tsr_ndest; - struct sfw_test_unit *tsu; - struct sfw_test_instance *tsi; - int i; - int rc; - - tsi = kzalloc(sizeof(*tsi), GFP_NOFS); - if (!tsi) { - CERROR("Can't allocate test instance for batch: %llu\n", - tsb->bat_id.bat_id); - return -ENOMEM; - } - - spin_lock_init(&tsi->tsi_lock); - atomic_set(&tsi->tsi_nactive, 0); - INIT_LIST_HEAD(&tsi->tsi_units); - INIT_LIST_HEAD(&tsi->tsi_free_rpcs); - INIT_LIST_HEAD(&tsi->tsi_active_rpcs); - - tsi->tsi_stopping = 0; - tsi->tsi_batch = tsb; - tsi->tsi_loop = req->tsr_loop; - tsi->tsi_concur = req->tsr_concur; - tsi->tsi_service = req->tsr_service; - tsi->tsi_is_client = !!(req->tsr_is_client); - tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr); - - rc = sfw_load_test(tsi); - if (rc) { - kfree(tsi); - return rc; - } - - LASSERT(!sfw_batch_active(tsb)); - - if (!tsi->tsi_is_client) { - /* it's test server, just add it to tsb */ - list_add_tail(&tsi->tsi_list, &tsb->bat_tests); - return 0; - } - - LASSERT(bk); - LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest); - LASSERT((unsigned int)bk->bk_len >= - sizeof(struct lnet_process_id_packed) * ndest); - - sfw_unpack_addtest_req(msg); - memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u)); - - for (i = 0; i < ndest; i++) { - struct lnet_process_id_packed *dests; - struct lnet_process_id_packed id; - int j; - - dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page); - LASSERT(dests); /* my pages are within KVM always */ - id = dests[i % SFW_ID_PER_PAGE]; - if (msg->msg_magic != SRPC_MSG_MAGIC) - sfw_unpack_id(id); - - for (j = 0; j < tsi->tsi_concur; j++) { - tsu = kzalloc(sizeof(struct sfw_test_unit), GFP_NOFS); - if (!tsu) { - rc = -ENOMEM; - CERROR("Can't allocate tsu for %d\n", - tsi->tsi_service); - goto error; - } - - tsu->tsu_dest.nid = id.nid; - tsu->tsu_dest.pid = id.pid; - tsu->tsu_instance = tsi; - tsu->tsu_private = NULL; - list_add_tail(&tsu->tsu_list, &tsi->tsi_units); - } - } - - rc = tsi->tsi_ops->tso_init(tsi); - if (!rc) { - list_add_tail(&tsi->tsi_list, &tsb->bat_tests); - return 0; - } - -error: - LASSERT(rc); - sfw_destroy_test_instance(tsi); - return rc; -} - -static void -sfw_test_unit_done(struct sfw_test_unit *tsu) -{ - struct sfw_test_instance *tsi = tsu->tsu_instance; - struct sfw_batch *tsb = tsi->tsi_batch; - struct sfw_session *sn = tsb->bat_session; - - LASSERT(sfw_test_active(tsi)); - - if (!atomic_dec_and_test(&tsi->tsi_nactive)) - return; - - /* the test instance is done */ - spin_lock(&tsi->tsi_lock); - - tsi->tsi_stopping = 0; - - spin_unlock(&tsi->tsi_lock); - - spin_lock(&sfw_data.fw_lock); - - if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */ - sn == sfw_data.fw_session) { /* sn also active */ - spin_unlock(&sfw_data.fw_lock); - return; - } - - LASSERT(!list_empty(&sn->sn_list)); /* I'm a zombie! */ - - list_for_each_entry(tsb, &sn->sn_batches, bat_list) { - if (sfw_batch_active(tsb)) { - spin_unlock(&sfw_data.fw_lock); - return; - } - } - - list_del_init(&sn->sn_list); - spin_unlock(&sfw_data.fw_lock); - - sfw_destroy_session(sn); -} - -static void -sfw_test_rpc_done(struct srpc_client_rpc *rpc) -{ - struct sfw_test_unit *tsu = rpc->crpc_priv; - struct sfw_test_instance *tsi = tsu->tsu_instance; - int done = 0; - - tsi->tsi_ops->tso_done_rpc(tsu, rpc); - - spin_lock(&tsi->tsi_lock); - - LASSERT(sfw_test_active(tsi)); - LASSERT(!list_empty(&rpc->crpc_list)); - - list_del_init(&rpc->crpc_list); - - /* batch is stopping or loop is done or get error */ - if (tsi->tsi_stopping || !tsu->tsu_loop || - (rpc->crpc_status && tsi->tsi_stoptsu_onerr)) - done = 1; - - /* dec ref for poster */ - srpc_client_rpc_decref(rpc); - - spin_unlock(&tsi->tsi_lock); - - if (!done) { - swi_schedule_workitem(&tsu->tsu_worker); - return; - } - - sfw_test_unit_done(tsu); -} - -int -sfw_create_test_rpc(struct sfw_test_unit *tsu, struct lnet_process_id peer, - unsigned int features, int nblk, int blklen, - struct srpc_client_rpc **rpcpp) -{ - struct srpc_client_rpc *rpc = NULL; - struct sfw_test_instance *tsi = tsu->tsu_instance; - - spin_lock(&tsi->tsi_lock); - - LASSERT(sfw_test_active(tsi)); - /* pick request from buffer */ - rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs, - struct srpc_client_rpc, crpc_list); - if (rpc) { - LASSERT(nblk == rpc->crpc_bulk.bk_niov); - list_del_init(&rpc->crpc_list); - } - - spin_unlock(&tsi->tsi_lock); - - if (!rpc) { - rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk, - blklen, sfw_test_rpc_done, - sfw_test_rpc_fini, tsu); - } else { - srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk, - blklen, sfw_test_rpc_done, - sfw_test_rpc_fini, tsu); - } - - if (!rpc) { - CERROR("Can't create rpc for test %d\n", tsi->tsi_service); - return -ENOMEM; - } - - rpc->crpc_reqstmsg.msg_ses_feats = features; - *rpcpp = rpc; - - return 0; -} - -static void -sfw_run_test(struct swi_workitem *wi) -{ - struct sfw_test_unit *tsu = container_of(wi, struct sfw_test_unit, tsu_worker); - struct sfw_test_instance *tsi = tsu->tsu_instance; - struct srpc_client_rpc *rpc = NULL; - - if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) { - LASSERT(!rpc); - goto test_done; - } - - LASSERT(rpc); - - spin_lock(&tsi->tsi_lock); - - if (tsi->tsi_stopping) { - list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs); - spin_unlock(&tsi->tsi_lock); - goto test_done; - } - - if (tsu->tsu_loop > 0) - tsu->tsu_loop--; - - list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs); - spin_unlock(&tsi->tsi_lock); - - spin_lock(&rpc->crpc_lock); - rpc->crpc_timeout = rpc_timeout; - srpc_post_rpc(rpc); - spin_unlock(&rpc->crpc_lock); - return; - -test_done: - /* - * No one can schedule me now since: - * - previous RPC, if any, has done and - * - no new RPC is initiated. - * - my batch is still active; no one can run it again now. - * Cancel pending schedules and prevent future schedule attempts: - */ - sfw_test_unit_done(tsu); -} - -static int -sfw_run_batch(struct sfw_batch *tsb) -{ - struct swi_workitem *wi; - struct sfw_test_unit *tsu; - struct sfw_test_instance *tsi; - - if (sfw_batch_active(tsb)) { - CDEBUG(D_NET, "Batch already active: %llu (%d)\n", - tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive)); - return 0; - } - - list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) { - if (!tsi->tsi_is_client) /* skip server instances */ - continue; - - LASSERT(!tsi->tsi_stopping); - LASSERT(!sfw_test_active(tsi)); - - atomic_inc(&tsb->bat_nactive); - - list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { - atomic_inc(&tsi->tsi_nactive); - tsu->tsu_loop = tsi->tsi_loop; - wi = &tsu->tsu_worker; - swi_init_workitem(wi, sfw_run_test, - lst_test_wq[lnet_cpt_of_nid(tsu->tsu_dest.nid)]); - swi_schedule_workitem(wi); - } - } - - return 0; -} - -int -sfw_stop_batch(struct sfw_batch *tsb, int force) -{ - struct sfw_test_instance *tsi; - struct srpc_client_rpc *rpc; - - if (!sfw_batch_active(tsb)) { - CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id); - return 0; - } - - list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) { - spin_lock(&tsi->tsi_lock); - - if (!tsi->tsi_is_client || - !sfw_test_active(tsi) || tsi->tsi_stopping) { - spin_unlock(&tsi->tsi_lock); - continue; - } - - tsi->tsi_stopping = 1; - - if (!force) { - spin_unlock(&tsi->tsi_lock); - continue; - } - - /* abort launched rpcs in the test */ - list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) { - spin_lock(&rpc->crpc_lock); - - srpc_abort_rpc(rpc, -EINTR); - - spin_unlock(&rpc->crpc_lock); - } - - spin_unlock(&tsi->tsi_lock); - } - - return 0; -} - -static int -sfw_query_batch(struct sfw_batch *tsb, int testidx, - struct srpc_batch_reply *reply) -{ - struct sfw_test_instance *tsi; - - if (testidx < 0) - return -EINVAL; - - if (!testidx) { - reply->bar_active = atomic_read(&tsb->bat_nactive); - return 0; - } - - list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) { - if (testidx-- > 1) - continue; - - reply->bar_active = atomic_read(&tsi->tsi_nactive); - return 0; - } - - return -ENOENT; -} - -void -sfw_free_pages(struct srpc_server_rpc *rpc) -{ - srpc_free_bulk(rpc->srpc_bulk); - rpc->srpc_bulk = NULL; -} - -int -sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, - int sink) -{ - LASSERT(!rpc->srpc_bulk); - LASSERT(npages > 0 && npages <= LNET_MAX_IOV); - - rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink); - if (!rpc->srpc_bulk) - return -ENOMEM; - - return 0; -} - -static int -sfw_add_test(struct srpc_server_rpc *rpc) -{ - struct sfw_session *sn = sfw_data.fw_session; - struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply; - struct srpc_test_reqst *request; - int rc; - struct sfw_batch *bat; - - request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; - reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id; - - if (!request->tsr_loop || - !request->tsr_concur || - request->tsr_sid.ses_nid == LNET_NID_ANY || - request->tsr_ndest > SFW_MAX_NDESTS || - (request->tsr_is_client && !request->tsr_ndest) || - request->tsr_concur > SFW_MAX_CONCUR || - request->tsr_service > SRPC_SERVICE_MAX_ID || - request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) { - reply->tsr_status = EINVAL; - return 0; - } - - if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) || - !sfw_find_test_case(request->tsr_service)) { - reply->tsr_status = ENOENT; - return 0; - } - - bat = sfw_bid2batch(request->tsr_bid); - if (!bat) { - CERROR("dropping RPC %s from %s under memory pressure\n", - rpc->srpc_scd->scd_svc->sv_name, - libcfs_id2str(rpc->srpc_peer)); - return -ENOMEM; - } - - if (sfw_batch_active(bat)) { - reply->tsr_status = EBUSY; - return 0; - } - - if (request->tsr_is_client && !rpc->srpc_bulk) { - /* rpc will be resumed later in sfw_bulk_ready */ - int npg = sfw_id_pages(request->tsr_ndest); - int len; - - if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { - len = npg * PAGE_SIZE; - - } else { - len = sizeof(struct lnet_process_id_packed) * - request->tsr_ndest; - } - - return sfw_alloc_pages(rpc, CFS_CPT_ANY, npg, len, 1); - } - - rc = sfw_add_test_instance(bat, rpc); - CDEBUG(!rc ? D_NET : D_WARNING, - "%s test: sv %d %s, loop %d, concur %d, ndest %d\n", - !rc ? "Added" : "Failed to add", request->tsr_service, - request->tsr_is_client ? "client" : "server", - request->tsr_loop, request->tsr_concur, request->tsr_ndest); - - reply->tsr_status = (rc < 0) ? -rc : rc; - return 0; -} - -static int -sfw_control_batch(struct srpc_batch_reqst *request, - struct srpc_batch_reply *reply) -{ - struct sfw_session *sn = sfw_data.fw_session; - int rc = 0; - struct sfw_batch *bat; - - reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id; - - if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) { - reply->bar_status = ESRCH; - return 0; - } - - bat = sfw_find_batch(request->bar_bid); - if (!bat) { - reply->bar_status = ENOENT; - return 0; - } - - switch (request->bar_opc) { - case SRPC_BATCH_OPC_RUN: - rc = sfw_run_batch(bat); - break; - - case SRPC_BATCH_OPC_STOP: - rc = sfw_stop_batch(bat, request->bar_arg); - break; - - case SRPC_BATCH_OPC_QUERY: - rc = sfw_query_batch(bat, request->bar_testidx, reply); - break; - - default: - return -EINVAL; /* drop it */ - } - - reply->bar_status = (rc < 0) ? -rc : rc; - return 0; -} - -static int -sfw_handle_server_rpc(struct srpc_server_rpc *rpc) -{ - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - struct srpc_msg *reply = &rpc->srpc_replymsg; - struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg; - unsigned int features = LST_FEATS_MASK; - int rc = 0; - - LASSERT(!sfw_data.fw_active_srpc); - LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID); - - spin_lock(&sfw_data.fw_lock); - - if (sfw_data.fw_shuttingdown) { - spin_unlock(&sfw_data.fw_lock); - return -ESHUTDOWN; - } - - /* Remove timer to avoid racing with it or expiring active session */ - if (sfw_del_session_timer()) { - CERROR("dropping RPC %s from %s: racing with expiry timer\n", - sv->sv_name, libcfs_id2str(rpc->srpc_peer)); - spin_unlock(&sfw_data.fw_lock); - return -EAGAIN; - } - - sfw_data.fw_active_srpc = rpc; - spin_unlock(&sfw_data.fw_lock); - - sfw_unpack_message(request); - LASSERT(request->msg_type == srpc_service2request(sv->sv_id)); - - /* rpc module should have checked this */ - LASSERT(request->msg_version == SRPC_MSG_VERSION); - - if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION && - sv->sv_id != SRPC_SERVICE_DEBUG) { - struct sfw_session *sn = sfw_data.fw_session; - - if (sn && - sn->sn_features != request->msg_ses_feats) { - CNETERR("Features of framework RPC don't match features of current session: %x/%x\n", - request->msg_ses_feats, sn->sn_features); - reply->msg_body.reply.status = EPROTO; - reply->msg_body.reply.sid = sn->sn_id; - goto out; - } - - } else if (request->msg_ses_feats & ~LST_FEATS_MASK) { - /* - * NB: at this point, old version will ignore features and - * create new session anyway, so console should be able - * to handle this - */ - reply->msg_body.reply.status = EPROTO; - goto out; - } - - switch (sv->sv_id) { - default: - LBUG(); - case SRPC_SERVICE_TEST: - rc = sfw_add_test(rpc); - break; - - case SRPC_SERVICE_BATCH: - rc = sfw_control_batch(&request->msg_body.bat_reqst, - &reply->msg_body.bat_reply); - break; - - case SRPC_SERVICE_QUERY_STAT: - rc = sfw_get_stats(&request->msg_body.stat_reqst, - &reply->msg_body.stat_reply); - break; - - case SRPC_SERVICE_DEBUG: - rc = sfw_debug_session(&request->msg_body.dbg_reqst, - &reply->msg_body.dbg_reply); - break; - - case SRPC_SERVICE_MAKE_SESSION: - rc = sfw_make_session(&request->msg_body.mksn_reqst, - &reply->msg_body.mksn_reply); - break; - - case SRPC_SERVICE_REMOVE_SESSION: - rc = sfw_remove_session(&request->msg_body.rmsn_reqst, - &reply->msg_body.rmsn_reply); - break; - } - - if (sfw_data.fw_session) - features = sfw_data.fw_session->sn_features; - out: - reply->msg_ses_feats = features; - rpc->srpc_done = sfw_server_rpc_done; - spin_lock(&sfw_data.fw_lock); - - if (!sfw_data.fw_shuttingdown) - sfw_add_session_timer(); - - sfw_data.fw_active_srpc = NULL; - spin_unlock(&sfw_data.fw_lock); - return rc; -} - -static int -sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) -{ - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - int rc; - - LASSERT(rpc->srpc_bulk); - LASSERT(sv->sv_id == SRPC_SERVICE_TEST); - LASSERT(!sfw_data.fw_active_srpc); - LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client); - - spin_lock(&sfw_data.fw_lock); - - if (status) { - CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n", - sv->sv_name, libcfs_id2str(rpc->srpc_peer), status); - spin_unlock(&sfw_data.fw_lock); - return -EIO; - } - - if (sfw_data.fw_shuttingdown) { - spin_unlock(&sfw_data.fw_lock); - return -ESHUTDOWN; - } - - if (sfw_del_session_timer()) { - CERROR("dropping RPC %s from %s: racing with expiry timer\n", - sv->sv_name, libcfs_id2str(rpc->srpc_peer)); - spin_unlock(&sfw_data.fw_lock); - return -EAGAIN; - } - - sfw_data.fw_active_srpc = rpc; - spin_unlock(&sfw_data.fw_lock); - - rc = sfw_add_test(rpc); - - spin_lock(&sfw_data.fw_lock); - - if (!sfw_data.fw_shuttingdown) - sfw_add_session_timer(); - - sfw_data.fw_active_srpc = NULL; - spin_unlock(&sfw_data.fw_lock); - return rc; -} - -struct srpc_client_rpc * -sfw_create_rpc(struct lnet_process_id peer, int service, - unsigned int features, int nbulkiov, int bulklen, - void (*done)(struct srpc_client_rpc *), void *priv) -{ - struct srpc_client_rpc *rpc = NULL; - - spin_lock(&sfw_data.fw_lock); - - LASSERT(!sfw_data.fw_shuttingdown); - LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); - - if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) { - rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - struct srpc_client_rpc, crpc_list); - list_del(&rpc->crpc_list); - - srpc_init_client_rpc(rpc, peer, service, 0, 0, - done, sfw_client_rpc_fini, priv); - } - - spin_unlock(&sfw_data.fw_lock); - - if (!rpc) { - rpc = srpc_create_client_rpc(peer, service, - nbulkiov, bulklen, done, - nbulkiov ? NULL : - sfw_client_rpc_fini, - priv); - } - - if (rpc) /* "session" is concept in framework */ - rpc->crpc_reqstmsg.msg_ses_feats = features; - - return rpc; -} - -void -sfw_unpack_message(struct srpc_msg *msg) -{ - if (msg->msg_magic == SRPC_MSG_MAGIC) - return; /* no flipping needed */ - - /* srpc module should guarantee I wouldn't get crap */ - LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); - - if (msg->msg_type == SRPC_MSG_STAT_REQST) { - struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst; - - __swab32s(&req->str_type); - __swab64s(&req->str_rpyid); - sfw_unpack_sid(req->str_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_STAT_REPLY) { - struct srpc_stat_reply *rep = &msg->msg_body.stat_reply; - - __swab32s(&rep->str_status); - sfw_unpack_sid(rep->str_sid); - sfw_unpack_fw_counters(rep->str_fw); - sfw_unpack_rpc_counters(rep->str_rpc); - sfw_unpack_lnet_counters(rep->str_lnet); - return; - } - - if (msg->msg_type == SRPC_MSG_MKSN_REQST) { - struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst; - - __swab64s(&req->mksn_rpyid); - __swab32s(&req->mksn_force); - sfw_unpack_sid(req->mksn_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_MKSN_REPLY) { - struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply; - - __swab32s(&rep->mksn_status); - __swab32s(&rep->mksn_timeout); - sfw_unpack_sid(rep->mksn_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_RMSN_REQST) { - struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst; - - __swab64s(&req->rmsn_rpyid); - sfw_unpack_sid(req->rmsn_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_RMSN_REPLY) { - struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply; - - __swab32s(&rep->rmsn_status); - sfw_unpack_sid(rep->rmsn_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_DEBUG_REQST) { - struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst; - - __swab64s(&req->dbg_rpyid); - __swab32s(&req->dbg_flags); - sfw_unpack_sid(req->dbg_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) { - struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply; - - __swab32s(&rep->dbg_nbatch); - __swab32s(&rep->dbg_timeout); - sfw_unpack_sid(rep->dbg_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_BATCH_REQST) { - struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst; - - __swab32s(&req->bar_opc); - __swab64s(&req->bar_rpyid); - __swab32s(&req->bar_testidx); - __swab32s(&req->bar_arg); - sfw_unpack_sid(req->bar_sid); - __swab64s(&req->bar_bid.bat_id); - return; - } - - if (msg->msg_type == SRPC_MSG_BATCH_REPLY) { - struct srpc_batch_reply *rep = &msg->msg_body.bat_reply; - - __swab32s(&rep->bar_status); - sfw_unpack_sid(rep->bar_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_TEST_REQST) { - struct srpc_test_reqst *req = &msg->msg_body.tes_reqst; - - __swab64s(&req->tsr_rpyid); - __swab64s(&req->tsr_bulkid); - __swab32s(&req->tsr_loop); - __swab32s(&req->tsr_ndest); - __swab32s(&req->tsr_concur); - __swab32s(&req->tsr_service); - sfw_unpack_sid(req->tsr_sid); - __swab64s(&req->tsr_bid.bat_id); - return; - } - - if (msg->msg_type == SRPC_MSG_TEST_REPLY) { - struct srpc_test_reply *rep = &msg->msg_body.tes_reply; - - __swab32s(&rep->tsr_status); - sfw_unpack_sid(rep->tsr_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_JOIN_REQST) { - struct srpc_join_reqst *req = &msg->msg_body.join_reqst; - - __swab64s(&req->join_rpyid); - sfw_unpack_sid(req->join_sid); - return; - } - - if (msg->msg_type == SRPC_MSG_JOIN_REPLY) { - struct srpc_join_reply *rep = &msg->msg_body.join_reply; - - __swab32s(&rep->join_status); - __swab32s(&rep->join_timeout); - sfw_unpack_sid(rep->join_sid); - return; - } - - LBUG(); -} - -void -sfw_abort_rpc(struct srpc_client_rpc *rpc) -{ - LASSERT(atomic_read(&rpc->crpc_refcount) > 0); - LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); - - spin_lock(&rpc->crpc_lock); - srpc_abort_rpc(rpc, -EINTR); - spin_unlock(&rpc->crpc_lock); -} - -void -sfw_post_rpc(struct srpc_client_rpc *rpc) -{ - spin_lock(&rpc->crpc_lock); - - LASSERT(!rpc->crpc_closed); - LASSERT(!rpc->crpc_aborted); - LASSERT(list_empty(&rpc->crpc_list)); - LASSERT(!sfw_data.fw_shuttingdown); - - rpc->crpc_timeout = rpc_timeout; - srpc_post_rpc(rpc); - - spin_unlock(&rpc->crpc_lock); -} - -static struct srpc_service sfw_services[] = { - { - /* sv_id */ SRPC_SERVICE_DEBUG, - /* sv_name */ "debug", - 0 - }, - { - /* sv_id */ SRPC_SERVICE_QUERY_STAT, - /* sv_name */ "query stats", - 0 - }, - { - /* sv_id */ SRPC_SERVICE_MAKE_SESSION, - /* sv_name */ "make session", - 0 - }, - { - /* sv_id */ SRPC_SERVICE_REMOVE_SESSION, - /* sv_name */ "remove session", - 0 - }, - { - /* sv_id */ SRPC_SERVICE_BATCH, - /* sv_name */ "batch service", - 0 - }, - { - /* sv_id */ SRPC_SERVICE_TEST, - /* sv_name */ "test service", - 0 - }, - { - /* sv_id */ 0, - /* sv_name */ NULL, - 0 - } -}; - -int -sfw_startup(void) -{ - int i; - int rc; - int error; - struct srpc_service *sv; - struct sfw_test_case *tsc; - - if (session_timeout < 0) { - CERROR("Session timeout must be non-negative: %d\n", - session_timeout); - return -EINVAL; - } - - if (rpc_timeout < 0) { - CERROR("RPC timeout must be non-negative: %d\n", - rpc_timeout); - return -EINVAL; - } - - if (!session_timeout) - CWARN("Zero session_timeout specified - test sessions never expire.\n"); - - if (!rpc_timeout) - CWARN("Zero rpc_timeout specified - test RPC never expire.\n"); - - memset(&sfw_data, 0, sizeof(struct smoketest_framework)); - - sfw_data.fw_session = NULL; - sfw_data.fw_active_srpc = NULL; - spin_lock_init(&sfw_data.fw_lock); - atomic_set(&sfw_data.fw_nzombies, 0); - INIT_LIST_HEAD(&sfw_data.fw_tests); - INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs); - INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions); - - brw_init_test_client(); - brw_init_test_service(); - rc = sfw_register_test(&brw_test_service, &brw_test_client); - LASSERT(!rc); - - ping_init_test_client(); - ping_init_test_service(); - rc = sfw_register_test(&ping_test_service, &ping_test_client); - LASSERT(!rc); - - error = 0; - list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) { - sv = tsc->tsc_srv_service; - - rc = srpc_add_service(sv); - LASSERT(rc != -EBUSY); - if (rc) { - CWARN("Failed to add %s service: %d\n", - sv->sv_name, rc); - error = rc; - } - } - - for (i = 0; ; i++) { - sv = &sfw_services[i]; - if (!sv->sv_name) - break; - - sv->sv_bulk_ready = NULL; - sv->sv_handler = sfw_handle_server_rpc; - sv->sv_wi_total = SFW_FRWK_WI_MAX; - if (sv->sv_id == SRPC_SERVICE_TEST) - sv->sv_bulk_ready = sfw_bulk_ready; - - rc = srpc_add_service(sv); - LASSERT(rc != -EBUSY); - if (rc) { - CWARN("Failed to add %s service: %d\n", - sv->sv_name, rc); - error = rc; - } - - /* about to sfw_shutdown, no need to add buffer */ - if (error) - continue; - - rc = srpc_service_add_buffers(sv, sv->sv_wi_total); - if (rc) { - CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n", - sv->sv_name, sv->sv_wi_total, rc); - error = -ENOMEM; - } - } - - if (error) - sfw_shutdown(); - return error; -} - -void -sfw_shutdown(void) -{ - struct srpc_service *sv; - struct sfw_test_case *tsc; - int i; - - spin_lock(&sfw_data.fw_lock); - - sfw_data.fw_shuttingdown = 1; - lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock, - "waiting for active RPC to finish.\n"); - - if (sfw_del_session_timer()) - lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock, - "waiting for session timer to explode.\n"); - - sfw_deactivate_session(); - lst_wait_until(!atomic_read(&sfw_data.fw_nzombies), - sfw_data.fw_lock, - "waiting for %d zombie sessions to die.\n", - atomic_read(&sfw_data.fw_nzombies)); - - spin_unlock(&sfw_data.fw_lock); - - for (i = 0; ; i++) { - sv = &sfw_services[i]; - if (!sv->sv_name) - break; - - srpc_shutdown_service(sv); - srpc_remove_service(sv); - } - - list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) { - sv = tsc->tsc_srv_service; - srpc_shutdown_service(sv); - srpc_remove_service(sv); - } - - while (!list_empty(&sfw_data.fw_zombie_rpcs)) { - struct srpc_client_rpc *rpc; - - rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - struct srpc_client_rpc, crpc_list); - list_del(&rpc->crpc_list); - - kfree(rpc); - } - - for (i = 0; ; i++) { - sv = &sfw_services[i]; - if (!sv->sv_name) - break; - - srpc_wait_service_shutdown(sv); - } - - while (!list_empty(&sfw_data.fw_tests)) { - tsc = list_entry(sfw_data.fw_tests.next, - struct sfw_test_case, tsc_list); - - srpc_wait_service_shutdown(tsc->tsc_srv_service); - - list_del(&tsc->tsc_list); - kfree(tsc); - } -} diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c deleted file mode 100644 index 9ba65320f748..000000000000 --- a/drivers/staging/lustre/lnet/selftest/module.c +++ /dev/null @@ -1,169 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "selftest.h" -#include "console.h" - -enum { - LST_INIT_NONE = 0, - LST_INIT_WI_SERIAL, - LST_INIT_WI_TEST, - LST_INIT_RPC, - LST_INIT_FW, - LST_INIT_CONSOLE -}; - -static int lst_init_step = LST_INIT_NONE; - -struct workqueue_struct *lst_serial_wq; -struct workqueue_struct **lst_test_wq; - -static void -lnet_selftest_exit(void) -{ - int i; - - switch (lst_init_step) { - case LST_INIT_CONSOLE: - lstcon_console_fini(); - /* fall through */ - case LST_INIT_FW: - sfw_shutdown(); - /* fall through */ - case LST_INIT_RPC: - srpc_shutdown(); - /* fall through */ - case LST_INIT_WI_TEST: - for (i = 0; - i < cfs_cpt_number(lnet_cpt_table()); i++) { - if (!lst_test_wq[i]) - continue; - destroy_workqueue(lst_test_wq[i]); - } - kvfree(lst_test_wq); - lst_test_wq = NULL; - /* fall through */ - case LST_INIT_WI_SERIAL: - destroy_workqueue(lst_serial_wq); - lst_serial_wq = NULL; - case LST_INIT_NONE: - break; - default: - LBUG(); - } -} - -static int -lnet_selftest_init(void) -{ - int nscheds; - int rc; - int i; - - rc = libcfs_setup(); - if (rc) - return rc; - - lst_serial_wq = alloc_ordered_workqueue("lst_s", 0); - if (!lst_serial_wq) { - CERROR("Failed to create serial WI scheduler for LST\n"); - return -ENOMEM; - } - lst_init_step = LST_INIT_WI_SERIAL; - - nscheds = cfs_cpt_number(lnet_cpt_table()); - lst_test_wq = kvmalloc_array(nscheds, sizeof(lst_test_wq[0]), - GFP_KERNEL | __GFP_ZERO); - if (!lst_test_wq) { - rc = -ENOMEM; - goto error; - } - - lst_init_step = LST_INIT_WI_TEST; - for (i = 0; i < nscheds; i++) { - int nthrs = cfs_cpt_weight(lnet_cpt_table(), i); - struct workqueue_attrs attrs = {0}; - cpumask_var_t *mask = cfs_cpt_cpumask(lnet_cpt_table(), i); - - /* reserve at least one CPU for LND */ - nthrs = max(nthrs - 1, 1); - lst_test_wq[i] = alloc_workqueue("lst_t", WQ_UNBOUND, nthrs); - if (!lst_test_wq[i]) { - CWARN("Failed to create CPU partition affinity WI scheduler %d for LST\n", - i); - rc = -ENOMEM; - goto error; - } - - if (mask && alloc_cpumask_var(&attrs.cpumask, GFP_KERNEL)) { - cpumask_copy(attrs.cpumask, *mask); - apply_workqueue_attrs(lst_test_wq[i], &attrs); - free_cpumask_var(attrs.cpumask); - } - } - - rc = srpc_startup(); - if (rc) { - CERROR("LST can't startup rpc\n"); - goto error; - } - lst_init_step = LST_INIT_RPC; - - rc = sfw_startup(); - if (rc) { - CERROR("LST can't startup framework\n"); - goto error; - } - lst_init_step = LST_INIT_FW; - - rc = lstcon_console_init(); - if (rc) { - CERROR("LST can't startup console\n"); - goto error; - } - lst_init_step = LST_INIT_CONSOLE; - return 0; -error: - lnet_selftest_exit(); - return rc; -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("LNet Selftest"); -MODULE_VERSION("2.7.0"); -MODULE_LICENSE("GPL"); - -module_init(lnet_selftest_init); -module_exit(lnet_selftest_exit); diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c deleted file mode 100644 index f54bd630dbf8..000000000000 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ /dev/null @@ -1,228 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/conctl.c - * - * Test client & Server - * - * Author: Liang Zhen - */ - -#include "selftest.h" - -#define LST_PING_TEST_MAGIC 0xbabeface - -static int ping_srv_workitems = SFW_TEST_WI_MAX; -module_param(ping_srv_workitems, int, 0644); -MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems"); - -struct lst_ping_data { - spinlock_t pnd_lock; /* serialize */ - int pnd_counter; /* sequence counter */ -}; - -static struct lst_ping_data lst_ping_data; - -static int -ping_client_init(struct sfw_test_instance *tsi) -{ - struct sfw_session *sn = tsi->tsi_batch->bat_session; - - LASSERT(tsi->tsi_is_client); - LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK)); - - spin_lock_init(&lst_ping_data.pnd_lock); - lst_ping_data.pnd_counter = 0; - - return 0; -} - -static void -ping_client_fini(struct sfw_test_instance *tsi) -{ - struct sfw_session *sn = tsi->tsi_batch->bat_session; - int errors; - - LASSERT(sn); - LASSERT(tsi->tsi_is_client); - - errors = atomic_read(&sn->sn_ping_errors); - if (errors) - CWARN("%d pings have failed.\n", errors); - else - CDEBUG(D_NET, "Ping test finished OK.\n"); -} - -static int -ping_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest, - struct srpc_client_rpc **rpc) -{ - struct srpc_ping_reqst *req; - struct sfw_test_instance *tsi = tsu->tsu_instance; - struct sfw_session *sn = tsi->tsi_batch->bat_session; - struct timespec64 ts; - int rc; - - LASSERT(sn); - LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - - rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc); - if (rc) - return rc; - - req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst; - - req->pnr_magic = LST_PING_TEST_MAGIC; - - spin_lock(&lst_ping_data.pnd_lock); - req->pnr_seq = lst_ping_data.pnd_counter++; - spin_unlock(&lst_ping_data.pnd_lock); - - ktime_get_real_ts64(&ts); - req->pnr_time_sec = ts.tv_sec; - req->pnr_time_usec = ts.tv_nsec / NSEC_PER_USEC; - - return rc; -} - -static void -ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) -{ - struct sfw_test_instance *tsi = tsu->tsu_instance; - struct sfw_session *sn = tsi->tsi_batch->bat_session; - struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; - struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply; - struct timespec64 ts; - - LASSERT(sn); - - if (rpc->crpc_status) { - if (!tsi->tsi_stopping) /* rpc could have been aborted */ - atomic_inc(&sn->sn_ping_errors); - CERROR("Unable to ping %s (%d): %d\n", - libcfs_id2str(rpc->crpc_dest), - reqst->pnr_seq, rpc->crpc_status); - return; - } - - if (rpc->crpc_replymsg.msg_magic != SRPC_MSG_MAGIC) { - __swab32s(&reply->pnr_seq); - __swab32s(&reply->pnr_magic); - __swab32s(&reply->pnr_status); - } - - if (reply->pnr_magic != LST_PING_TEST_MAGIC) { - rpc->crpc_status = -EBADMSG; - atomic_inc(&sn->sn_ping_errors); - CERROR("Bad magic %u from %s, %u expected.\n", - reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), - LST_PING_TEST_MAGIC); - return; - } - - if (reply->pnr_seq != reqst->pnr_seq) { - rpc->crpc_status = -EBADMSG; - atomic_inc(&sn->sn_ping_errors); - CERROR("Bad seq %u from %s, %u expected.\n", - reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), - reqst->pnr_seq); - return; - } - - ktime_get_real_ts64(&ts); - CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, - (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + - (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); -} - -static int -ping_server_handle(struct srpc_server_rpc *rpc) -{ - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; - struct srpc_msg *replymsg = &rpc->srpc_replymsg; - struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst; - struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply; - - LASSERT(sv->sv_id == SRPC_SERVICE_PING); - - if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) { - LASSERT(reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC)); - - __swab32s(&req->pnr_seq); - __swab32s(&req->pnr_magic); - __swab64s(&req->pnr_time_sec); - __swab64s(&req->pnr_time_usec); - } - LASSERT(reqstmsg->msg_type == srpc_service2request(sv->sv_id)); - - if (req->pnr_magic != LST_PING_TEST_MAGIC) { - CERROR("Unexpected magic %08x from %s\n", - req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); - return -EINVAL; - } - - rep->pnr_seq = req->pnr_seq; - rep->pnr_magic = LST_PING_TEST_MAGIC; - - if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) { - replymsg->msg_ses_feats = LST_FEATS_MASK; - rep->pnr_status = EPROTO; - return 0; - } - - replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; - - CDEBUG(D_NET, "Get ping %d from %s\n", - req->pnr_seq, libcfs_id2str(rpc->srpc_peer)); - return 0; -} - -struct sfw_test_client_ops ping_test_client; - -void ping_init_test_client(void) -{ - ping_test_client.tso_init = ping_client_init; - ping_test_client.tso_fini = ping_client_fini; - ping_test_client.tso_prep_rpc = ping_client_prep_rpc; - ping_test_client.tso_done_rpc = ping_client_done_rpc; -} - -struct srpc_service ping_test_service; - -void ping_init_test_service(void) -{ - ping_test_service.sv_id = SRPC_SERVICE_PING; - ping_test_service.sv_name = "ping_test"; - ping_test_service.sv_handler = ping_server_handle; - ping_test_service.sv_wi_total = ping_srv_workitems; -} diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c deleted file mode 100644 index 9613b0a77007..000000000000 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ /dev/null @@ -1,1682 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/rpc.c - * - * Author: Isaac Huang - * - * 2012-05-13: Liang Zhen - * - percpt data for service to improve smp performance - * - code cleanup - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "selftest.h" - -enum srpc_state { - SRPC_STATE_NONE, - SRPC_STATE_NI_INIT, - SRPC_STATE_EQ_INIT, - SRPC_STATE_RUNNING, - SRPC_STATE_STOPPING, -}; - -static struct smoketest_rpc { - spinlock_t rpc_glock; /* global lock */ - struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1]; - struct lnet_handle_eq rpc_lnet_eq; /* _the_ LNet event queue */ - enum srpc_state rpc_state; - struct srpc_counters rpc_counters; - __u64 rpc_matchbits; /* matchbits counter */ -} srpc_data; - -static inline int -srpc_serv_portal(int svc_id) -{ - return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ? - SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL; -} - -/* forward ref's */ -void srpc_handle_rpc(struct swi_workitem *wi); - -void srpc_get_counters(struct srpc_counters *cnt) -{ - spin_lock(&srpc_data.rpc_glock); - *cnt = srpc_data.rpc_counters; - spin_unlock(&srpc_data.rpc_glock); -} - -void srpc_set_counters(const struct srpc_counters *cnt) -{ - spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters = *cnt; - spin_unlock(&srpc_data.rpc_glock); -} - -static int -srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off, - int nob) -{ - LASSERT(off < PAGE_SIZE); - LASSERT(nob > 0 && nob <= PAGE_SIZE); - - bk->bk_iovs[i].bv_offset = off; - bk->bk_iovs[i].bv_page = pg; - bk->bk_iovs[i].bv_len = nob; - return nob; -} - -void -srpc_free_bulk(struct srpc_bulk *bk) -{ - int i; - struct page *pg; - - LASSERT(bk); - - for (i = 0; i < bk->bk_niov; i++) { - pg = bk->bk_iovs[i].bv_page; - if (!pg) - break; - - __free_page(pg); - } - - kfree(bk); -} - -struct srpc_bulk * -srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg, - unsigned int bulk_len, int sink) -{ - struct srpc_bulk *bk; - int i; - - LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); - - bk = kzalloc_cpt(offsetof(struct srpc_bulk, bk_iovs[bulk_npg]), - GFP_KERNEL, cpt); - if (!bk) { - CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); - return NULL; - } - - memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg])); - bk->bk_sink = sink; - bk->bk_len = bulk_len; - bk->bk_niov = bulk_npg; - - for (i = 0; i < bulk_npg; i++) { - struct page *pg; - int nob; - - pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt), - GFP_KERNEL, 0); - if (!pg) { - CERROR("Can't allocate page %d of %d\n", i, bulk_npg); - srpc_free_bulk(bk); - return NULL; - } - - nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) - - bulk_off; - srpc_add_bulk_page(bk, pg, i, bulk_off, nob); - bulk_len -= nob; - bulk_off = 0; - } - - return bk; -} - -static inline __u64 -srpc_next_id(void) -{ - __u64 id; - - spin_lock(&srpc_data.rpc_glock); - id = srpc_data.rpc_matchbits++; - spin_unlock(&srpc_data.rpc_glock); - return id; -} - -static void -srpc_init_server_rpc(struct srpc_server_rpc *rpc, - struct srpc_service_cd *scd, - struct srpc_buffer *buffer) -{ - memset(rpc, 0, sizeof(*rpc)); - swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc, - srpc_serv_is_framework(scd->scd_svc) ? - lst_serial_wq : lst_test_wq[scd->scd_cpt]); - - rpc->srpc_ev.ev_fired = 1; /* no event expected now */ - - rpc->srpc_scd = scd; - rpc->srpc_reqstbuf = buffer; - rpc->srpc_peer = buffer->buf_peer; - rpc->srpc_self = buffer->buf_self; - LNetInvalidateMDHandle(&rpc->srpc_replymdh); -} - -static void -srpc_service_fini(struct srpc_service *svc) -{ - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - struct srpc_buffer *buf; - struct list_head *q; - int i; - - if (!svc->sv_cpt_data) - return; - - cfs_percpt_for_each(scd, i, svc->sv_cpt_data) { - while (1) { - if (!list_empty(&scd->scd_buf_posted)) - q = &scd->scd_buf_posted; - else if (!list_empty(&scd->scd_buf_blocked)) - q = &scd->scd_buf_blocked; - else - break; - - while (!list_empty(q)) { - buf = list_entry(q->next, struct srpc_buffer, - buf_list); - list_del(&buf->buf_list); - kfree(buf); - } - } - - LASSERT(list_empty(&scd->scd_rpc_active)); - - while (!list_empty(&scd->scd_rpc_free)) { - rpc = list_entry(scd->scd_rpc_free.next, - struct srpc_server_rpc, - srpc_list); - list_del(&rpc->srpc_list); - kfree(rpc); - } - } - - cfs_percpt_free(svc->sv_cpt_data); - svc->sv_cpt_data = NULL; -} - -static int -srpc_service_nrpcs(struct srpc_service *svc) -{ - int nrpcs = svc->sv_wi_total / svc->sv_ncpts; - - return srpc_serv_is_framework(svc) ? - max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN); -} - -void srpc_add_buffer(struct swi_workitem *wi); - -static int -srpc_service_init(struct srpc_service *svc) -{ - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - int nrpcs; - int i; - int j; - - svc->sv_shuttingdown = 0; - - svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(**svc->sv_cpt_data)); - if (!svc->sv_cpt_data) - return -ENOMEM; - - svc->sv_ncpts = srpc_serv_is_framework(svc) ? - 1 : cfs_cpt_number(lnet_cpt_table()); - nrpcs = srpc_service_nrpcs(svc); - - cfs_percpt_for_each(scd, i, svc->sv_cpt_data) { - scd->scd_cpt = i; - scd->scd_svc = svc; - spin_lock_init(&scd->scd_lock); - INIT_LIST_HEAD(&scd->scd_rpc_free); - INIT_LIST_HEAD(&scd->scd_rpc_active); - INIT_LIST_HEAD(&scd->scd_buf_posted); - INIT_LIST_HEAD(&scd->scd_buf_blocked); - - scd->scd_ev.ev_data = scd; - scd->scd_ev.ev_type = SRPC_REQUEST_RCVD; - - /* - * NB: don't use lst_serial_wq for adding buffer, - * see details in srpc_service_add_buffers() - */ - swi_init_workitem(&scd->scd_buf_wi, - srpc_add_buffer, lst_test_wq[i]); - - if (i && srpc_serv_is_framework(svc)) { - /* - * NB: framework service only needs srpc_service_cd for - * one partition, but we allocate for all to make - * it easier to implement, it will waste a little - * memory but nobody should care about this - */ - continue; - } - - for (j = 0; j < nrpcs; j++) { - rpc = kzalloc_cpt(sizeof(*rpc), GFP_NOFS, i); - if (!rpc) { - srpc_service_fini(svc); - return -ENOMEM; - } - list_add(&rpc->srpc_list, &scd->scd_rpc_free); - } - } - - return 0; -} - -int -srpc_add_service(struct srpc_service *sv) -{ - int id = sv->sv_id; - - LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID); - - if (srpc_service_init(sv)) - return -ENOMEM; - - spin_lock(&srpc_data.rpc_glock); - - LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); - - if (srpc_data.rpc_services[id]) { - spin_unlock(&srpc_data.rpc_glock); - goto failed; - } - - srpc_data.rpc_services[id] = sv; - spin_unlock(&srpc_data.rpc_glock); - - CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name); - return 0; - - failed: - srpc_service_fini(sv); - return -EBUSY; -} - -int -srpc_remove_service(struct srpc_service *sv) -{ - int id = sv->sv_id; - - spin_lock(&srpc_data.rpc_glock); - - if (srpc_data.rpc_services[id] != sv) { - spin_unlock(&srpc_data.rpc_glock); - return -ENOENT; - } - - srpc_data.rpc_services[id] = NULL; - spin_unlock(&srpc_data.rpc_glock); - return 0; -} - -static int -srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, - int len, int options, struct lnet_process_id peer, - struct lnet_handle_md *mdh, struct srpc_event *ev) -{ - int rc; - struct lnet_md md; - struct lnet_handle_me meh; - - rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK, - local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh); - if (rc) { - CERROR("LNetMEAttach failed: %d\n", rc); - LASSERT(rc == -ENOMEM); - return -ENOMEM; - } - - md.threshold = 1; - md.user_ptr = ev; - md.start = buf; - md.length = len; - md.options = options; - md.eq_handle = srpc_data.rpc_lnet_eq; - - rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh); - if (rc) { - CERROR("LNetMDAttach failed: %d\n", rc); - LASSERT(rc == -ENOMEM); - - rc = LNetMEUnlink(meh); - LASSERT(!rc); - return -ENOMEM; - } - - CDEBUG(D_NET, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n", - libcfs_id2str(peer), portal, matchbits); - return 0; -} - -static int -srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, - int options, struct lnet_process_id peer, - lnet_nid_t self, struct lnet_handle_md *mdh, - struct srpc_event *ev) -{ - int rc; - struct lnet_md md; - - md.user_ptr = ev; - md.start = buf; - md.length = len; - md.eq_handle = srpc_data.rpc_lnet_eq; - md.threshold = options & LNET_MD_OP_GET ? 2 : 1; - md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); - - rc = LNetMDBind(md, LNET_UNLINK, mdh); - if (rc) { - CERROR("LNetMDBind failed: %d\n", rc); - LASSERT(rc == -ENOMEM); - return -ENOMEM; - } - - /* - * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options. - * they're only meaningful for MDs attached to an ME (i.e. passive - * buffers... - */ - if (options & LNET_MD_OP_PUT) { - rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer, - portal, matchbits, 0, 0); - } else { - LASSERT(options & LNET_MD_OP_GET); - - rc = LNetGet(self, *mdh, peer, portal, matchbits, 0); - } - - if (rc) { - CERROR("LNet%s(%s, %d, %lld) failed: %d\n", - options & LNET_MD_OP_PUT ? "Put" : "Get", - libcfs_id2str(peer), portal, matchbits, rc); - - /* - * The forthcoming unlink event will complete this operation - * with failure, so fall through and return success here. - */ - rc = LNetMDUnlink(*mdh); - LASSERT(!rc); - } else { - CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n", - libcfs_id2str(peer), portal, matchbits); - } - return 0; -} - -static int -srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, - struct lnet_handle_md *mdh, struct srpc_event *ev) -{ - struct lnet_process_id any = { 0 }; - - any.nid = LNET_NID_ANY; - any.pid = LNET_PID_ANY; - - return srpc_post_passive_rdma(srpc_serv_portal(service), - local, service, buf, len, - LNET_MD_OP_PUT, any, mdh, ev); -} - -static int -srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) -__must_hold(&scd->scd_lock) -{ - struct srpc_service *sv = scd->scd_svc; - struct srpc_msg *msg = &buf->buf_msg; - int rc; - - LNetInvalidateMDHandle(&buf->buf_mdh); - list_add(&buf->buf_list, &scd->scd_buf_posted); - scd->scd_buf_nposted++; - spin_unlock(&scd->scd_lock); - - rc = srpc_post_passive_rqtbuf(sv->sv_id, - !srpc_serv_is_framework(sv), - msg, sizeof(*msg), &buf->buf_mdh, - &scd->scd_ev); - - /* - * At this point, a RPC (new or delayed) may have arrived in - * msg and its event handler has been called. So we must add - * buf to scd_buf_posted _before_ dropping scd_lock - */ - spin_lock(&scd->scd_lock); - - if (!rc) { - if (!sv->sv_shuttingdown) - return 0; - - spin_unlock(&scd->scd_lock); - /* - * srpc_shutdown_service might have tried to unlink me - * when my buf_mdh was still invalid - */ - LNetMDUnlink(buf->buf_mdh); - spin_lock(&scd->scd_lock); - return 0; - } - - scd->scd_buf_nposted--; - if (sv->sv_shuttingdown) - return rc; /* don't allow to change scd_buf_posted */ - - list_del(&buf->buf_list); - spin_unlock(&scd->scd_lock); - - kfree(buf); - - spin_lock(&scd->scd_lock); - return rc; -} - -void -srpc_add_buffer(struct swi_workitem *wi) -{ - struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd, scd_buf_wi); - struct srpc_buffer *buf; - int rc = 0; - - /* - * it's called by workitem scheduler threads, these threads - * should have been set CPT affinity, so buffers will be posted - * on CPT local list of Portal - */ - spin_lock(&scd->scd_lock); - - while (scd->scd_buf_adjust > 0 && - !scd->scd_svc->sv_shuttingdown) { - scd->scd_buf_adjust--; /* consume it */ - scd->scd_buf_posting++; - - spin_unlock(&scd->scd_lock); - - buf = kzalloc(sizeof(*buf), GFP_NOFS); - if (!buf) { - CERROR("Failed to add new buf to service: %s\n", - scd->scd_svc->sv_name); - spin_lock(&scd->scd_lock); - rc = -ENOMEM; - break; - } - - spin_lock(&scd->scd_lock); - if (scd->scd_svc->sv_shuttingdown) { - spin_unlock(&scd->scd_lock); - kfree(buf); - - spin_lock(&scd->scd_lock); - rc = -ESHUTDOWN; - break; - } - - rc = srpc_service_post_buffer(scd, buf); - if (rc) - break; /* buf has been freed inside */ - - LASSERT(scd->scd_buf_posting > 0); - scd->scd_buf_posting--; - scd->scd_buf_total++; - scd->scd_buf_low = max(2, scd->scd_buf_total / 4); - } - - if (rc) { - scd->scd_buf_err_stamp = ktime_get_real_seconds(); - scd->scd_buf_err = rc; - - LASSERT(scd->scd_buf_posting > 0); - scd->scd_buf_posting--; - } - - spin_unlock(&scd->scd_lock); -} - -int -srpc_service_add_buffers(struct srpc_service *sv, int nbuffer) -{ - struct srpc_service_cd *scd; - int rc = 0; - int i; - - LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer); - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { - spin_lock(&scd->scd_lock); - - scd->scd_buf_err = 0; - scd->scd_buf_err_stamp = 0; - scd->scd_buf_posting = 0; - scd->scd_buf_adjust = nbuffer; - /* start to post buffers */ - swi_schedule_workitem(&scd->scd_buf_wi); - spin_unlock(&scd->scd_lock); - - /* framework service only post buffer for one partition */ - if (srpc_serv_is_framework(sv)) - break; - } - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { - spin_lock(&scd->scd_lock); - /* - * NB: srpc_service_add_buffers() can be called inside - * thread context of lst_serial_wq, and we don't normally - * allow to sleep inside thread context of WI scheduler - * because it will block current scheduler thread from doing - * anything else, even worse, it could deadlock if it's - * waiting on result from another WI of the same scheduler. - * However, it's safe at here because scd_buf_wi is scheduled - * by thread in a different WI scheduler (lst_test_wq), - * so we don't have any risk of deadlock, though this could - * block all WIs pending on lst_serial_wq for a moment - * which is not good but not fatal. - */ - lst_wait_until(scd->scd_buf_err || - (!scd->scd_buf_adjust && - !scd->scd_buf_posting), - scd->scd_lock, "waiting for adding buffer\n"); - - if (scd->scd_buf_err && !rc) - rc = scd->scd_buf_err; - - spin_unlock(&scd->scd_lock); - } - - return rc; -} - -void -srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer) -{ - struct srpc_service_cd *scd; - int num; - int i; - - LASSERT(!sv->sv_shuttingdown); - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { - spin_lock(&scd->scd_lock); - - num = scd->scd_buf_total + scd->scd_buf_posting; - scd->scd_buf_adjust -= min(nbuffer, num); - - spin_unlock(&scd->scd_lock); - } -} - -/* returns 1 if sv has finished, otherwise 0 */ -int -srpc_finish_service(struct srpc_service *sv) -{ - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - int i; - - LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */ - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { - swi_cancel_workitem(&scd->scd_buf_wi); - - spin_lock(&scd->scd_lock); - - if (scd->scd_buf_nposted > 0) { - CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n", - scd->scd_buf_nposted); - spin_unlock(&scd->scd_lock); - return 0; - } - - if (list_empty(&scd->scd_rpc_active)) { - spin_unlock(&scd->scd_lock); - continue; - } - - rpc = list_entry(scd->scd_rpc_active.next, - struct srpc_server_rpc, srpc_list); - CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s, ev fired %d type %d status %d lnet %d\n", - rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), - rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type, - rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet); - spin_unlock(&scd->scd_lock); - return 0; - } - - /* no lock needed from now on */ - srpc_service_fini(sv); - return 1; -} - -/* called with sv->sv_lock held */ -static void -srpc_service_recycle_buffer(struct srpc_service_cd *scd, - struct srpc_buffer *buf) -__must_hold(&scd->scd_lock) -{ - if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { - if (srpc_service_post_buffer(scd, buf)) { - CWARN("Failed to post %s buffer\n", - scd->scd_svc->sv_name); - } - return; - } - - /* service is shutting down, or we want to recycle some buffers */ - scd->scd_buf_total--; - - if (scd->scd_buf_adjust < 0) { - scd->scd_buf_adjust++; - if (scd->scd_buf_adjust < 0 && - !scd->scd_buf_total && !scd->scd_buf_posting) { - CDEBUG(D_INFO, - "Try to recycle %d buffers but nothing left\n", - scd->scd_buf_adjust); - scd->scd_buf_adjust = 0; - } - } - - spin_unlock(&scd->scd_lock); - kfree(buf); - spin_lock(&scd->scd_lock); -} - -void -srpc_abort_service(struct srpc_service *sv) -{ - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - int i; - - CDEBUG(D_NET, "Aborting service: id %d, name %s\n", - sv->sv_id, sv->sv_name); - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { - spin_lock(&scd->scd_lock); - - /* - * schedule in-flight RPCs to notice the abort, NB: - * racing with incoming RPCs; complete fix should make test - * RPCs carry session ID in its headers - */ - list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) { - rpc->srpc_aborted = 1; - swi_schedule_workitem(&rpc->srpc_wi); - } - - spin_unlock(&scd->scd_lock); - } -} - -void -srpc_shutdown_service(struct srpc_service *sv) -{ - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - struct srpc_buffer *buf; - int i; - - CDEBUG(D_NET, "Shutting down service: id %d, name %s\n", - sv->sv_id, sv->sv_name); - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) - spin_lock(&scd->scd_lock); - - sv->sv_shuttingdown = 1; /* i.e. no new active RPC */ - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) - spin_unlock(&scd->scd_lock); - - cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { - spin_lock(&scd->scd_lock); - - /* schedule in-flight RPCs to notice the shutdown */ - list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) - swi_schedule_workitem(&rpc->srpc_wi); - - spin_unlock(&scd->scd_lock); - - /* - * OK to traverse scd_buf_posted without lock, since no one - * touches scd_buf_posted now - */ - list_for_each_entry(buf, &scd->scd_buf_posted, buf_list) - LNetMDUnlink(buf->buf_mdh); - } -} - -static int -srpc_send_request(struct srpc_client_rpc *rpc) -{ - struct srpc_event *ev = &rpc->crpc_reqstev; - int rc; - - ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REQUEST_SENT; - - rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), - rpc->crpc_service, &rpc->crpc_reqstmsg, - sizeof(struct srpc_msg), LNET_MD_OP_PUT, - rpc->crpc_dest, LNET_NID_ANY, - &rpc->crpc_reqstmdh, ev); - if (rc) { - LASSERT(rc == -ENOMEM); - ev->ev_fired = 1; /* no more event expected */ - } - return rc; -} - -static int -srpc_prepare_reply(struct srpc_client_rpc *rpc) -{ - struct srpc_event *ev = &rpc->crpc_replyev; - __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid; - int rc; - - ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REPLY_RCVD; - - *id = srpc_next_id(); - - rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, - &rpc->crpc_replymsg, - sizeof(struct srpc_msg), - LNET_MD_OP_PUT, rpc->crpc_dest, - &rpc->crpc_replymdh, ev); - if (rc) { - LASSERT(rc == -ENOMEM); - ev->ev_fired = 1; /* no more event expected */ - } - return rc; -} - -static int -srpc_prepare_bulk(struct srpc_client_rpc *rpc) -{ - struct srpc_bulk *bk = &rpc->crpc_bulk; - struct srpc_event *ev = &rpc->crpc_bulkev; - __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; - int rc; - int opt; - - LASSERT(bk->bk_niov <= LNET_MAX_IOV); - - if (!bk->bk_niov) - return 0; /* nothing to do */ - - opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET; - opt |= LNET_MD_KIOV; - - ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_BULK_REQ_RCVD; - - *id = srpc_next_id(); - - rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, - &bk->bk_iovs[0], bk->bk_niov, opt, - rpc->crpc_dest, &bk->bk_mdh, ev); - if (rc) { - LASSERT(rc == -ENOMEM); - ev->ev_fired = 1; /* no more event expected */ - } - return rc; -} - -static int -srpc_do_bulk(struct srpc_server_rpc *rpc) -{ - struct srpc_event *ev = &rpc->srpc_ev; - struct srpc_bulk *bk = rpc->srpc_bulk; - __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid; - int rc; - int opt; - - LASSERT(bk); - - opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT; - opt |= LNET_MD_KIOV; - - ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT; - - rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id, - &bk->bk_iovs[0], bk->bk_niov, opt, - rpc->srpc_peer, rpc->srpc_self, - &bk->bk_mdh, ev); - if (rc) - ev->ev_fired = 1; /* no more event expected */ - return rc; -} - -/* only called from srpc_handle_rpc */ -static void -srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) -{ - struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; - struct srpc_buffer *buffer; - - LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE); - - rpc->srpc_status = status; - - CDEBUG_LIMIT(!status ? D_NET : D_NETERROR, - "Server RPC %p done: service %s, peer %s, status %s:%d\n", - rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), status); - - if (status) { - spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.rpcs_dropped++; - spin_unlock(&srpc_data.rpc_glock); - } - - if (rpc->srpc_done) - (*rpc->srpc_done) (rpc); - LASSERT(!rpc->srpc_bulk); - - spin_lock(&scd->scd_lock); - - if (rpc->srpc_reqstbuf) { - /* - * NB might drop sv_lock in srpc_service_recycle_buffer, but - * sv won't go away for scd_rpc_active must not be empty - */ - srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf); - rpc->srpc_reqstbuf = NULL; - } - - list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */ - - /* - * No one can schedule me now since: - * - I'm not on scd_rpc_active. - * - all LNet events have been fired. - * Cancel pending schedules and prevent future schedule attempts: - */ - LASSERT(rpc->srpc_ev.ev_fired); - - if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) { - buffer = list_entry(scd->scd_buf_blocked.next, - struct srpc_buffer, buf_list); - list_del(&buffer->buf_list); - - srpc_init_server_rpc(rpc, scd, buffer); - list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active); - swi_schedule_workitem(&rpc->srpc_wi); - } else { - list_add(&rpc->srpc_list, &scd->scd_rpc_free); - } - - spin_unlock(&scd->scd_lock); -} - -/* handles an incoming RPC */ -void -srpc_handle_rpc(struct swi_workitem *wi) -{ - struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc, srpc_wi); - struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; - struct srpc_event *ev = &rpc->srpc_ev; - int rc = 0; - - LASSERT(wi == &rpc->srpc_wi); - - spin_lock(&scd->scd_lock); - - if (sv->sv_shuttingdown || rpc->srpc_aborted) { - spin_unlock(&scd->scd_lock); - - if (rpc->srpc_bulk) - LNetMDUnlink(rpc->srpc_bulk->bk_mdh); - LNetMDUnlink(rpc->srpc_replymdh); - - if (ev->ev_fired) { /* no more event, OK to finish */ - srpc_server_rpc_done(rpc, -ESHUTDOWN); - } - return; - } - - spin_unlock(&scd->scd_lock); - - switch (wi->swi_state) { - default: - LBUG(); - case SWI_STATE_NEWBORN: { - struct srpc_msg *msg; - struct srpc_generic_reply *reply; - - msg = &rpc->srpc_reqstbuf->buf_msg; - reply = &rpc->srpc_replymsg.msg_body.reply; - - if (!msg->msg_magic) { - /* moaned already in srpc_lnet_ev_handler */ - srpc_server_rpc_done(rpc, EBADMSG); - return; - } - - srpc_unpack_msg_hdr(msg); - if (msg->msg_version != SRPC_MSG_VERSION) { - CWARN("Version mismatch: %u, %u expected, from %s\n", - msg->msg_version, SRPC_MSG_VERSION, - libcfs_id2str(rpc->srpc_peer)); - reply->status = EPROTO; - /* drop through and send reply */ - } else { - reply->status = 0; - rc = (*sv->sv_handler)(rpc); - LASSERT(!reply->status || !rpc->srpc_bulk); - if (rc) { - srpc_server_rpc_done(rpc, rc); - return; - } - } - - wi->swi_state = SWI_STATE_BULK_STARTED; - - if (rpc->srpc_bulk) { - rc = srpc_do_bulk(rpc); - if (!rc) - return; /* wait for bulk */ - - LASSERT(ev->ev_fired); - ev->ev_status = rc; - } - } - /* fall through */ - case SWI_STATE_BULK_STARTED: - LASSERT(!rpc->srpc_bulk || ev->ev_fired); - - if (rpc->srpc_bulk) { - rc = ev->ev_status; - - if (sv->sv_bulk_ready) - rc = (*sv->sv_bulk_ready) (rpc, rc); - - if (rc) { - srpc_server_rpc_done(rpc, rc); - return; - } - } - - wi->swi_state = SWI_STATE_REPLY_SUBMITTED; - rc = srpc_send_reply(rpc); - if (!rc) - return; /* wait for reply */ - srpc_server_rpc_done(rpc, rc); - return; - - case SWI_STATE_REPLY_SUBMITTED: - if (!ev->ev_fired) { - CERROR("RPC %p: bulk %p, service %d\n", - rpc, rpc->srpc_bulk, sv->sv_id); - CERROR("Event: status %d, type %d, lnet %d\n", - ev->ev_status, ev->ev_type, ev->ev_lnet); - LASSERT(ev->ev_fired); - } - - wi->swi_state = SWI_STATE_DONE; - srpc_server_rpc_done(rpc, ev->ev_status); - return; - } -} - -static void -srpc_client_rpc_expired(void *data) -{ - struct srpc_client_rpc *rpc = data; - - CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - rpc->crpc_timeout); - - spin_lock(&rpc->crpc_lock); - - rpc->crpc_timeout = 0; - srpc_abort_rpc(rpc, -ETIMEDOUT); - - spin_unlock(&rpc->crpc_lock); - - spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.rpcs_expired++; - spin_unlock(&srpc_data.rpc_glock); -} - -static void -srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc) -{ - struct stt_timer *timer = &rpc->crpc_timer; - - if (!rpc->crpc_timeout) - return; - - INIT_LIST_HEAD(&timer->stt_list); - timer->stt_data = rpc; - timer->stt_func = srpc_client_rpc_expired; - timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout; - stt_add_timer(timer); -} - -/* - * Called with rpc->crpc_lock held. - * - * Upon exit the RPC expiry timer is not queued and the handler is not - * running on any CPU. - */ -static void -srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc) -{ - /* timer not planted or already exploded */ - if (!rpc->crpc_timeout) - return; - - /* timer successfully defused */ - if (stt_del_timer(&rpc->crpc_timer)) - return; - - /* timer detonated, wait for it to explode */ - while (rpc->crpc_timeout) { - spin_unlock(&rpc->crpc_lock); - - schedule(); - - spin_lock(&rpc->crpc_lock); - } -} - -static void -srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status) -{ - struct swi_workitem *wi = &rpc->crpc_wi; - - LASSERT(status || wi->swi_state == SWI_STATE_DONE); - - spin_lock(&rpc->crpc_lock); - - rpc->crpc_closed = 1; - if (!rpc->crpc_status) - rpc->crpc_status = status; - - srpc_del_client_rpc_timer(rpc); - - CDEBUG_LIMIT(!status ? D_NET : D_NETERROR, - "Client RPC done: service %d, peer %s, status %s:%d:%d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(wi->swi_state), rpc->crpc_aborted, status); - - /* - * No one can schedule me now since: - * - RPC timer has been defused. - * - all LNet events have been fired. - * - crpc_closed has been set, preventing srpc_abort_rpc from - * scheduling me. - * Cancel pending schedules and prevent future schedule attempts: - */ - LASSERT(!srpc_event_pending(rpc)); - - spin_unlock(&rpc->crpc_lock); - - (*rpc->crpc_done)(rpc); -} - -/* sends an outgoing RPC */ -void -srpc_send_rpc(struct swi_workitem *wi) -{ - int rc = 0; - struct srpc_client_rpc *rpc; - struct srpc_msg *reply; - int do_bulk; - - LASSERT(wi); - - rpc = container_of(wi, struct srpc_client_rpc, crpc_wi); - - LASSERT(rpc); - LASSERT(wi == &rpc->crpc_wi); - - reply = &rpc->crpc_replymsg; - do_bulk = rpc->crpc_bulk.bk_niov > 0; - - spin_lock(&rpc->crpc_lock); - - if (rpc->crpc_aborted) { - spin_unlock(&rpc->crpc_lock); - goto abort; - } - - spin_unlock(&rpc->crpc_lock); - - switch (wi->swi_state) { - default: - LBUG(); - case SWI_STATE_NEWBORN: - LASSERT(!srpc_event_pending(rpc)); - - rc = srpc_prepare_reply(rpc); - if (rc) { - srpc_client_rpc_done(rpc, rc); - return; - } - - rc = srpc_prepare_bulk(rpc); - if (rc) - break; - - wi->swi_state = SWI_STATE_REQUEST_SUBMITTED; - rc = srpc_send_request(rpc); - break; - - case SWI_STATE_REQUEST_SUBMITTED: - /* - * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any - * order; however, they're processed in a strict order: - * rqt, rpy, and bulk. - */ - if (!rpc->crpc_reqstev.ev_fired) - break; - - rc = rpc->crpc_reqstev.ev_status; - if (rc) - break; - - wi->swi_state = SWI_STATE_REQUEST_SENT; - /* perhaps more events */ - /* fall through */ - case SWI_STATE_REQUEST_SENT: { - enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service); - - if (!rpc->crpc_replyev.ev_fired) - break; - - rc = rpc->crpc_replyev.ev_status; - if (rc) - break; - - srpc_unpack_msg_hdr(reply); - if (reply->msg_type != type || - (reply->msg_magic != SRPC_MSG_MAGIC && - reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) { - CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n", - libcfs_id2str(rpc->crpc_dest), - reply->msg_type, type, - reply->msg_magic, SRPC_MSG_MAGIC); - rc = -EBADMSG; - break; - } - - if (do_bulk && reply->msg_body.reply.status) { - CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n", - reply->msg_body.reply.status, - libcfs_id2str(rpc->crpc_dest)); - LNetMDUnlink(rpc->crpc_bulk.bk_mdh); - } - - wi->swi_state = SWI_STATE_REPLY_RECEIVED; - } - /* fall through */ - case SWI_STATE_REPLY_RECEIVED: - if (do_bulk && !rpc->crpc_bulkev.ev_fired) - break; - - rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0; - - /* - * Bulk buffer was unlinked due to remote error. Clear error - * since reply buffer still contains valid data. - * NB rpc->crpc_done shouldn't look into bulk data in case of - * remote error. - */ - if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK && - !rpc->crpc_status && reply->msg_body.reply.status) - rc = 0; - - wi->swi_state = SWI_STATE_DONE; - srpc_client_rpc_done(rpc, rc); - return; - } - - if (rc) { - spin_lock(&rpc->crpc_lock); - srpc_abort_rpc(rpc, rc); - spin_unlock(&rpc->crpc_lock); - } - -abort: - if (rpc->crpc_aborted) { - LNetMDUnlink(rpc->crpc_reqstmdh); - LNetMDUnlink(rpc->crpc_replymdh); - LNetMDUnlink(rpc->crpc_bulk.bk_mdh); - - if (!srpc_event_pending(rpc)) { - srpc_client_rpc_done(rpc, -EINTR); - return; - } - } -} - -struct srpc_client_rpc * -srpc_create_client_rpc(struct lnet_process_id peer, int service, - int nbulkiov, int bulklen, - void (*rpc_done)(struct srpc_client_rpc *), - void (*rpc_fini)(struct srpc_client_rpc *), void *priv) -{ - struct srpc_client_rpc *rpc; - - rpc = kzalloc(offsetof(struct srpc_client_rpc, - crpc_bulk.bk_iovs[nbulkiov]), GFP_KERNEL); - if (!rpc) - return NULL; - - srpc_init_client_rpc(rpc, peer, service, nbulkiov, - bulklen, rpc_done, rpc_fini, priv); - return rpc; -} - -/* called with rpc->crpc_lock held */ -void -srpc_abort_rpc(struct srpc_client_rpc *rpc, int why) -{ - LASSERT(why); - - if (rpc->crpc_aborted || /* already aborted */ - rpc->crpc_closed) /* callback imminent */ - return; - - CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(rpc->crpc_wi.swi_state), why); - - rpc->crpc_aborted = 1; - rpc->crpc_status = why; - swi_schedule_workitem(&rpc->crpc_wi); -} - -/* called with rpc->crpc_lock held */ -void -srpc_post_rpc(struct srpc_client_rpc *rpc) -{ - LASSERT(!rpc->crpc_aborted); - LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); - - CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n", - libcfs_id2str(rpc->crpc_dest), rpc->crpc_service, - rpc->crpc_timeout); - - srpc_add_client_rpc_timer(rpc); - swi_schedule_workitem(&rpc->crpc_wi); -} - -int -srpc_send_reply(struct srpc_server_rpc *rpc) -{ - struct srpc_event *ev = &rpc->srpc_ev; - struct srpc_msg *msg = &rpc->srpc_replymsg; - struct srpc_buffer *buffer = rpc->srpc_reqstbuf; - struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; - __u64 rpyid; - int rc; - - LASSERT(buffer); - rpyid = buffer->buf_msg.msg_body.reqst.rpyid; - - spin_lock(&scd->scd_lock); - - if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) { - /* - * Repost buffer before replying since test client - * might send me another RPC once it gets the reply - */ - if (srpc_service_post_buffer(scd, buffer)) - CWARN("Failed to repost %s buffer\n", sv->sv_name); - rpc->srpc_reqstbuf = NULL; - } - - spin_unlock(&scd->scd_lock); - - ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REPLY_SENT; - - msg->msg_magic = SRPC_MSG_MAGIC; - msg->msg_version = SRPC_MSG_VERSION; - msg->msg_type = srpc_service2reply(sv->sv_id); - - rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg, - sizeof(*msg), LNET_MD_OP_PUT, - rpc->srpc_peer, rpc->srpc_self, - &rpc->srpc_replymdh, ev); - if (rc) - ev->ev_fired = 1; /* no more event expected */ - return rc; -} - -/* when in kernel always called with LNET_LOCK() held, and in thread context */ -static void -srpc_lnet_ev_handler(struct lnet_event *ev) -{ - struct srpc_service_cd *scd; - struct srpc_event *rpcev = ev->md.user_ptr; - struct srpc_client_rpc *crpc; - struct srpc_server_rpc *srpc; - struct srpc_buffer *buffer; - struct srpc_service *sv; - struct srpc_msg *msg; - enum srpc_msg_type type; - - LASSERT(!in_interrupt()); - - if (ev->status) { - __u32 errors; - - spin_lock(&srpc_data.rpc_glock); - if (ev->status != -ECANCELED) /* cancellation is not error */ - srpc_data.rpc_counters.errors++; - errors = srpc_data.rpc_counters.errors; - spin_unlock(&srpc_data.rpc_glock); - - CNETERR("LNet event status %d type %d, RPC errors %u\n", - ev->status, ev->type, errors); - } - - rpcev->ev_lnet = ev->type; - - switch (rpcev->ev_type) { - default: - CERROR("Unknown event: status %d, type %d, lnet %d\n", - rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet); - LBUG(); - case SRPC_REQUEST_SENT: - if (!ev->status && ev->type != LNET_EVENT_UNLINK) { - spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.rpcs_sent++; - spin_unlock(&srpc_data.rpc_glock); - } - /* fall through */ - case SRPC_REPLY_RCVD: - case SRPC_BULK_REQ_RCVD: - crpc = rpcev->ev_data; - - if (rpcev != &crpc->crpc_reqstev && - rpcev != &crpc->crpc_replyev && - rpcev != &crpc->crpc_bulkev) { - CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n", - rpcev, crpc, &crpc->crpc_reqstev, - &crpc->crpc_replyev, &crpc->crpc_bulkev); - CERROR("Bad event: status %d, type %d, lnet %d\n", - rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet); - LBUG(); - } - - spin_lock(&crpc->crpc_lock); - - LASSERT(!rpcev->ev_fired); - rpcev->ev_fired = 1; - rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? - -EINTR : ev->status; - swi_schedule_workitem(&crpc->crpc_wi); - - spin_unlock(&crpc->crpc_lock); - break; - - case SRPC_REQUEST_RCVD: - scd = rpcev->ev_data; - sv = scd->scd_svc; - - LASSERT(rpcev == &scd->scd_ev); - - spin_lock(&scd->scd_lock); - - LASSERT(ev->unlinked); - LASSERT(ev->type == LNET_EVENT_PUT || - ev->type == LNET_EVENT_UNLINK); - LASSERT(ev->type != LNET_EVENT_UNLINK || - sv->sv_shuttingdown); - - buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg); - buffer->buf_peer = ev->initiator; - buffer->buf_self = ev->target.nid; - - LASSERT(scd->scd_buf_nposted > 0); - scd->scd_buf_nposted--; - - if (sv->sv_shuttingdown) { - /* - * Leave buffer on scd->scd_buf_nposted since - * srpc_finish_service needs to traverse it. - */ - spin_unlock(&scd->scd_lock); - break; - } - - if (scd->scd_buf_err_stamp && - scd->scd_buf_err_stamp < ktime_get_real_seconds()) { - /* re-enable adding buffer */ - scd->scd_buf_err_stamp = 0; - scd->scd_buf_err = 0; - } - - if (!scd->scd_buf_err && /* adding buffer is enabled */ - !scd->scd_buf_adjust && - scd->scd_buf_nposted < scd->scd_buf_low) { - scd->scd_buf_adjust = max(scd->scd_buf_total / 2, - SFW_TEST_WI_MIN); - swi_schedule_workitem(&scd->scd_buf_wi); - } - - list_del(&buffer->buf_list); /* from scd->scd_buf_posted */ - msg = &buffer->buf_msg; - type = srpc_service2request(sv->sv_id); - - if (ev->status || ev->mlength != sizeof(*msg) || - (msg->msg_type != type && - msg->msg_type != __swab32(type)) || - (msg->msg_magic != SRPC_MSG_MAGIC && - msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) { - CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n", - sv->sv_name, libcfs_id2str(ev->initiator), - ev->status, ev->mlength, - msg->msg_type, msg->msg_magic); - - /* - * NB can't call srpc_service_recycle_buffer here since - * it may call LNetM[DE]Attach. The invalid magic tells - * srpc_handle_rpc to drop this RPC - */ - msg->msg_magic = 0; - } - - if (!list_empty(&scd->scd_rpc_free)) { - srpc = list_entry(scd->scd_rpc_free.next, - struct srpc_server_rpc, - srpc_list); - list_del(&srpc->srpc_list); - - srpc_init_server_rpc(srpc, scd, buffer); - list_add_tail(&srpc->srpc_list, - &scd->scd_rpc_active); - swi_schedule_workitem(&srpc->srpc_wi); - } else { - list_add_tail(&buffer->buf_list, - &scd->scd_buf_blocked); - } - - spin_unlock(&scd->scd_lock); - - spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.rpcs_rcvd++; - spin_unlock(&srpc_data.rpc_glock); - break; - - case SRPC_BULK_GET_RPLD: - LASSERT(ev->type == LNET_EVENT_SEND || - ev->type == LNET_EVENT_REPLY || - ev->type == LNET_EVENT_UNLINK); - - if (!ev->unlinked) - break; /* wait for final event */ - /* fall through */ - case SRPC_BULK_PUT_SENT: - if (!ev->status && ev->type != LNET_EVENT_UNLINK) { - spin_lock(&srpc_data.rpc_glock); - - if (rpcev->ev_type == SRPC_BULK_GET_RPLD) - srpc_data.rpc_counters.bulk_get += ev->mlength; - else - srpc_data.rpc_counters.bulk_put += ev->mlength; - - spin_unlock(&srpc_data.rpc_glock); - } - /* fall through */ - case SRPC_REPLY_SENT: - srpc = rpcev->ev_data; - scd = srpc->srpc_scd; - - LASSERT(rpcev == &srpc->srpc_ev); - - spin_lock(&scd->scd_lock); - - rpcev->ev_fired = 1; - rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? - -EINTR : ev->status; - swi_schedule_workitem(&srpc->srpc_wi); - - spin_unlock(&scd->scd_lock); - break; - } -} - -int -srpc_startup(void) -{ - int rc; - - memset(&srpc_data, 0, sizeof(struct smoketest_rpc)); - spin_lock_init(&srpc_data.rpc_glock); - - /* 1 second pause to avoid timestamp reuse */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - srpc_data.rpc_matchbits = ((__u64)ktime_get_real_seconds()) << 48; - - srpc_data.rpc_state = SRPC_STATE_NONE; - - rc = LNetNIInit(LNET_PID_LUSTRE); - if (rc < 0) { - CERROR("LNetNIInit() has failed: %d\n", rc); - return rc; - } - - srpc_data.rpc_state = SRPC_STATE_NI_INIT; - - LNetInvalidateEQHandle(&srpc_data.rpc_lnet_eq); - rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq); - if (rc) { - CERROR("LNetEQAlloc() has failed: %d\n", rc); - goto bail; - } - - rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); - LASSERT(!rc); - rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL); - LASSERT(!rc); - - srpc_data.rpc_state = SRPC_STATE_EQ_INIT; - - rc = stt_startup(); - -bail: - if (rc) - srpc_shutdown(); - else - srpc_data.rpc_state = SRPC_STATE_RUNNING; - - return rc; -} - -void -srpc_shutdown(void) -{ - int i; - int rc; - int state; - - state = srpc_data.rpc_state; - srpc_data.rpc_state = SRPC_STATE_STOPPING; - - switch (state) { - default: - LBUG(); - case SRPC_STATE_RUNNING: - spin_lock(&srpc_data.rpc_glock); - - for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) { - struct srpc_service *sv = srpc_data.rpc_services[i]; - - LASSERTF(!sv, "service not empty: id %d, name %s\n", - i, sv->sv_name); - } - - spin_unlock(&srpc_data.rpc_glock); - - stt_shutdown(); - /* fall through */ - case SRPC_STATE_EQ_INIT: - rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); - rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL); - LASSERT(!rc); - rc = LNetEQFree(srpc_data.rpc_lnet_eq); - LASSERT(!rc); /* the EQ should have no user by now */ - /* fall through */ - case SRPC_STATE_NI_INIT: - LNetNIFini(); - } -} diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h deleted file mode 100644 index 465b5b534423..000000000000 --- a/drivers/staging/lustre/lnet/selftest/rpc.h +++ /dev/null @@ -1,295 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __SELFTEST_RPC_H__ -#define __SELFTEST_RPC_H__ - -#include - -/* - * LST wired structures - * - * XXX: *REPLY == *REQST + 1 - */ -enum srpc_msg_type { - SRPC_MSG_MKSN_REQST = 0, - SRPC_MSG_MKSN_REPLY = 1, - SRPC_MSG_RMSN_REQST = 2, - SRPC_MSG_RMSN_REPLY = 3, - SRPC_MSG_BATCH_REQST = 4, - SRPC_MSG_BATCH_REPLY = 5, - SRPC_MSG_STAT_REQST = 6, - SRPC_MSG_STAT_REPLY = 7, - SRPC_MSG_TEST_REQST = 8, - SRPC_MSG_TEST_REPLY = 9, - SRPC_MSG_DEBUG_REQST = 10, - SRPC_MSG_DEBUG_REPLY = 11, - SRPC_MSG_BRW_REQST = 12, - SRPC_MSG_BRW_REPLY = 13, - SRPC_MSG_PING_REQST = 14, - SRPC_MSG_PING_REPLY = 15, - SRPC_MSG_JOIN_REQST = 16, - SRPC_MSG_JOIN_REPLY = 17, -}; - -/* CAVEAT EMPTOR: - * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer, - * and 2nd field matchbits of bulk buffer if any. - * - * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field - * session id if needed. - */ -struct srpc_generic_reqst { - __u64 rpyid; /* reply buffer matchbits */ - __u64 bulkid; /* bulk buffer matchbits */ -} WIRE_ATTR; - -struct srpc_generic_reply { - __u32 status; - struct lst_sid sid; -} WIRE_ATTR; - -/* FRAMEWORK RPCs */ -struct srpc_mksn_reqst { - __u64 mksn_rpyid; /* reply buffer matchbits */ - struct lst_sid mksn_sid; /* session id */ - __u32 mksn_force; /* use brute force */ - char mksn_name[LST_NAME_SIZE]; -} WIRE_ATTR; /* make session request */ - -struct srpc_mksn_reply { - __u32 mksn_status; /* session status */ - struct lst_sid mksn_sid; /* session id */ - __u32 mksn_timeout; /* session timeout */ - char mksn_name[LST_NAME_SIZE]; -} WIRE_ATTR; /* make session reply */ - -struct srpc_rmsn_reqst { - __u64 rmsn_rpyid; /* reply buffer matchbits */ - struct lst_sid rmsn_sid; /* session id */ -} WIRE_ATTR; /* remove session request */ - -struct srpc_rmsn_reply { - __u32 rmsn_status; - struct lst_sid rmsn_sid; /* session id */ -} WIRE_ATTR; /* remove session reply */ - -struct srpc_join_reqst { - __u64 join_rpyid; /* reply buffer matchbits */ - struct lst_sid join_sid; /* session id to join */ - char join_group[LST_NAME_SIZE]; /* group name */ -} WIRE_ATTR; - -struct srpc_join_reply { - __u32 join_status; /* returned status */ - struct lst_sid join_sid; /* session id */ - __u32 join_timeout; /* # seconds' inactivity to - * expire - */ - char join_session[LST_NAME_SIZE]; /* session name */ -} WIRE_ATTR; - -struct srpc_debug_reqst { - __u64 dbg_rpyid; /* reply buffer matchbits */ - struct lst_sid dbg_sid; /* session id */ - __u32 dbg_flags; /* bitmap of debug */ -} WIRE_ATTR; - -struct srpc_debug_reply { - __u32 dbg_status; /* returned code */ - struct lst_sid dbg_sid; /* session id */ - __u32 dbg_timeout; /* session timeout */ - __u32 dbg_nbatch; /* # of batches in the node */ - char dbg_name[LST_NAME_SIZE]; /* session name */ -} WIRE_ATTR; - -#define SRPC_BATCH_OPC_RUN 1 -#define SRPC_BATCH_OPC_STOP 2 -#define SRPC_BATCH_OPC_QUERY 3 - -struct srpc_batch_reqst { - __u64 bar_rpyid; /* reply buffer matchbits */ - struct lst_sid bar_sid; /* session id */ - struct lst_bid bar_bid; /* batch id */ - __u32 bar_opc; /* create/start/stop batch */ - __u32 bar_testidx; /* index of test */ - __u32 bar_arg; /* parameters */ -} WIRE_ATTR; - -struct srpc_batch_reply { - __u32 bar_status; /* status of request */ - struct lst_sid bar_sid; /* session id */ - __u32 bar_active; /* # of active tests in batch/test */ - __u32 bar_time; /* remained time */ -} WIRE_ATTR; - -struct srpc_stat_reqst { - __u64 str_rpyid; /* reply buffer matchbits */ - struct lst_sid str_sid; /* session id */ - __u32 str_type; /* type of stat */ -} WIRE_ATTR; - -struct srpc_stat_reply { - __u32 str_status; - struct lst_sid str_sid; - struct sfw_counters str_fw; - struct srpc_counters str_rpc; - struct lnet_counters str_lnet; -} WIRE_ATTR; - -struct test_bulk_req { - __u32 blk_opc; /* bulk operation code */ - __u32 blk_npg; /* # of pages */ - __u32 blk_flags; /* reserved flags */ -} WIRE_ATTR; - -struct test_bulk_req_v1 { - __u16 blk_opc; /* bulk operation code */ - __u16 blk_flags; /* data check flags */ - __u32 blk_len; /* data length */ - __u32 blk_offset; /* offset */ -} WIRE_ATTR; - -struct test_ping_req { - __u32 png_size; /* size of ping message */ - __u32 png_flags; /* reserved flags */ -} WIRE_ATTR; - -struct srpc_test_reqst { - __u64 tsr_rpyid; /* reply buffer matchbits */ - __u64 tsr_bulkid; /* bulk buffer matchbits */ - struct lst_sid tsr_sid; /* session id */ - struct lst_bid tsr_bid; /* batch id */ - __u32 tsr_service; /* test type: bulk|ping|... */ - __u32 tsr_loop; /* test client loop count or - * # server buffers needed - */ - __u32 tsr_concur; /* concurrency of test */ - __u8 tsr_is_client; /* is test client or not */ - __u8 tsr_stop_onerr; /* stop on error */ - __u32 tsr_ndest; /* # of dest nodes */ - - union { - struct test_ping_req ping; - struct test_bulk_req bulk_v0; - struct test_bulk_req_v1 bulk_v1; - } tsr_u; -} WIRE_ATTR; - -struct srpc_test_reply { - __u32 tsr_status; /* returned code */ - struct lst_sid tsr_sid; -} WIRE_ATTR; - -/* TEST RPCs */ -struct srpc_ping_reqst { - __u64 pnr_rpyid; - __u32 pnr_magic; - __u32 pnr_seq; - __u64 pnr_time_sec; - __u64 pnr_time_usec; -} WIRE_ATTR; - -struct srpc_ping_reply { - __u32 pnr_status; - __u32 pnr_magic; - __u32 pnr_seq; -} WIRE_ATTR; - -struct srpc_brw_reqst { - __u64 brw_rpyid; /* reply buffer matchbits */ - __u64 brw_bulkid; /* bulk buffer matchbits */ - __u32 brw_rw; /* read or write */ - __u32 brw_len; /* bulk data len */ - __u32 brw_flags; /* bulk data patterns */ -} WIRE_ATTR; /* bulk r/w request */ - -struct srpc_brw_reply { - __u32 brw_status; -} WIRE_ATTR; /* bulk r/w reply */ - -#define SRPC_MSG_MAGIC 0xeeb0f00d -#define SRPC_MSG_VERSION 1 - -struct srpc_msg { - __u32 msg_magic; /* magic number */ - __u32 msg_version; /* message version number */ - __u32 msg_type; /* type of message body: srpc_msg_type */ - __u32 msg_reserved0; - __u32 msg_reserved1; - __u32 msg_ses_feats; /* test session features */ - union { - struct srpc_generic_reqst reqst; - struct srpc_generic_reply reply; - - struct srpc_mksn_reqst mksn_reqst; - struct srpc_mksn_reply mksn_reply; - struct srpc_rmsn_reqst rmsn_reqst; - struct srpc_rmsn_reply rmsn_reply; - struct srpc_debug_reqst dbg_reqst; - struct srpc_debug_reply dbg_reply; - struct srpc_batch_reqst bat_reqst; - struct srpc_batch_reply bat_reply; - struct srpc_stat_reqst stat_reqst; - struct srpc_stat_reply stat_reply; - struct srpc_test_reqst tes_reqst; - struct srpc_test_reply tes_reply; - struct srpc_join_reqst join_reqst; - struct srpc_join_reply join_reply; - - struct srpc_ping_reqst ping_reqst; - struct srpc_ping_reply ping_reply; - struct srpc_brw_reqst brw_reqst; - struct srpc_brw_reply brw_reply; - } msg_body; -} WIRE_ATTR; - -static inline void -srpc_unpack_msg_hdr(struct srpc_msg *msg) -{ - if (msg->msg_magic == SRPC_MSG_MAGIC) - return; /* no flipping needed */ - - /* - * We do not swap the magic number here as it is needed to - * determine whether the body needs to be swapped. - */ - /* __swab32s(&msg->msg_magic); */ - __swab32s(&msg->msg_type); - __swab32s(&msg->msg_version); - __swab32s(&msg->msg_ses_feats); - __swab32s(&msg->msg_reserved0); - __swab32s(&msg->msg_reserved1); -} - -#endif /* __SELFTEST_RPC_H__ */ diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h deleted file mode 100644 index 8737fa96b192..000000000000 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ /dev/null @@ -1,622 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/selftest.h - * - * Author: Isaac Huang - */ -#ifndef __SELFTEST_SELFTEST_H__ -#define __SELFTEST_SELFTEST_H__ - -#define LNET_ONLY - -#include -#include -#include - -#include "rpc.h" -#include "timer.h" - -#ifndef MADE_WITHOUT_COMPROMISE -#define MADE_WITHOUT_COMPROMISE -#endif - -#define SWI_STATE_NEWBORN 0 -#define SWI_STATE_REPLY_SUBMITTED 1 -#define SWI_STATE_REPLY_SENT 2 -#define SWI_STATE_REQUEST_SUBMITTED 3 -#define SWI_STATE_REQUEST_SENT 4 -#define SWI_STATE_REPLY_RECEIVED 5 -#define SWI_STATE_BULK_STARTED 6 -#define SWI_STATE_DONE 10 - -/* forward refs */ -struct srpc_service; -struct srpc_service_cd; -struct sfw_test_unit; -struct sfw_test_instance; - -/* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework - * services, e.g. create/modify session. - */ -#define SRPC_SERVICE_DEBUG 0 -#define SRPC_SERVICE_MAKE_SESSION 1 -#define SRPC_SERVICE_REMOVE_SESSION 2 -#define SRPC_SERVICE_BATCH 3 -#define SRPC_SERVICE_TEST 4 -#define SRPC_SERVICE_QUERY_STAT 5 -#define SRPC_SERVICE_JOIN 6 -#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 -/* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */ -#define SRPC_SERVICE_BRW 11 -#define SRPC_SERVICE_PING 12 -#define SRPC_SERVICE_MAX_ID 12 - -#define SRPC_REQUEST_PORTAL 50 -/* a lazy portal for framework RPC requests */ -#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 -/* all reply/bulk RDMAs go to this portal */ -#define SRPC_RDMA_PORTAL 52 - -static inline enum srpc_msg_type -srpc_service2request(int service) -{ - switch (service) { - default: - LBUG(); - case SRPC_SERVICE_DEBUG: - return SRPC_MSG_DEBUG_REQST; - - case SRPC_SERVICE_MAKE_SESSION: - return SRPC_MSG_MKSN_REQST; - - case SRPC_SERVICE_REMOVE_SESSION: - return SRPC_MSG_RMSN_REQST; - - case SRPC_SERVICE_BATCH: - return SRPC_MSG_BATCH_REQST; - - case SRPC_SERVICE_TEST: - return SRPC_MSG_TEST_REQST; - - case SRPC_SERVICE_QUERY_STAT: - return SRPC_MSG_STAT_REQST; - - case SRPC_SERVICE_BRW: - return SRPC_MSG_BRW_REQST; - - case SRPC_SERVICE_PING: - return SRPC_MSG_PING_REQST; - - case SRPC_SERVICE_JOIN: - return SRPC_MSG_JOIN_REQST; - } -} - -static inline enum srpc_msg_type -srpc_service2reply(int service) -{ - return srpc_service2request(service) + 1; -} - -enum srpc_event_type { - SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) - * received - */ - SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ - SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */ - SRPC_REPLY_RCVD = 4, /* incoming reply received */ - SRPC_REPLY_SENT = 5, /* outgoing reply sent */ - SRPC_REQUEST_RCVD = 6, /* incoming request received */ - SRPC_REQUEST_SENT = 7, /* outgoing request sent */ -}; - -/* RPC event */ -struct srpc_event { - enum srpc_event_type ev_type; /* what's up */ - enum lnet_event_kind ev_lnet; /* LNet event type */ - int ev_fired; /* LNet event fired? */ - int ev_status; /* LNet event status */ - void *ev_data; /* owning server/client RPC */ -}; - -/* bulk descriptor */ -struct srpc_bulk { - int bk_len; /* len of bulk data */ - struct lnet_handle_md bk_mdh; - int bk_sink; /* sink/source */ - int bk_niov; /* # iov in bk_iovs */ - struct bio_vec bk_iovs[0]; -}; - -/* message buffer descriptor */ -struct srpc_buffer { - struct list_head buf_list; /* chain on srpc_service::*_msgq */ - struct srpc_msg buf_msg; - struct lnet_handle_md buf_mdh; - lnet_nid_t buf_self; - struct lnet_process_id buf_peer; -}; - -struct swi_workitem; -typedef void (*swi_action_t) (struct swi_workitem *); - -struct swi_workitem { - struct workqueue_struct *swi_wq; - struct work_struct swi_work; - swi_action_t swi_action; - int swi_state; -}; - -/* server-side state of a RPC */ -struct srpc_server_rpc { - /* chain on srpc_service::*_rpcq */ - struct list_head srpc_list; - struct srpc_service_cd *srpc_scd; - struct swi_workitem srpc_wi; - struct srpc_event srpc_ev; /* bulk/reply event */ - lnet_nid_t srpc_self; - struct lnet_process_id srpc_peer; - struct srpc_msg srpc_replymsg; - struct lnet_handle_md srpc_replymdh; - struct srpc_buffer *srpc_reqstbuf; - struct srpc_bulk *srpc_bulk; - - unsigned int srpc_aborted; /* being given up */ - int srpc_status; - void (*srpc_done)(struct srpc_server_rpc *); -}; - -/* client-side state of a RPC */ -struct srpc_client_rpc { - struct list_head crpc_list; /* chain on user's lists */ - spinlock_t crpc_lock; /* serialize */ - int crpc_service; - atomic_t crpc_refcount; - int crpc_timeout; /* # seconds to wait for reply */ - struct stt_timer crpc_timer; - struct swi_workitem crpc_wi; - struct lnet_process_id crpc_dest; - - void (*crpc_done)(struct srpc_client_rpc *); - void (*crpc_fini)(struct srpc_client_rpc *); - int crpc_status; /* completion status */ - void *crpc_priv; /* caller data */ - - /* state flags */ - unsigned int crpc_aborted:1; /* being given up */ - unsigned int crpc_closed:1; /* completed */ - - /* RPC events */ - struct srpc_event crpc_bulkev; /* bulk event */ - struct srpc_event crpc_reqstev; /* request event */ - struct srpc_event crpc_replyev; /* reply event */ - - /* bulk, request(reqst), and reply exchanged on wire */ - struct srpc_msg crpc_reqstmsg; - struct srpc_msg crpc_replymsg; - struct lnet_handle_md crpc_reqstmdh; - struct lnet_handle_md crpc_replymdh; - struct srpc_bulk crpc_bulk; -}; - -#define srpc_client_rpc_size(rpc) \ -offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) - -#define srpc_client_rpc_addref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - atomic_inc(&(rpc)->crpc_refcount); \ -} while (0) - -#define srpc_client_rpc_decref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ - srpc_destroy_client_rpc(rpc); \ -} while (0) - -#define srpc_event_pending(rpc) (!(rpc)->crpc_bulkev.ev_fired || \ - !(rpc)->crpc_reqstev.ev_fired || \ - !(rpc)->crpc_replyev.ev_fired) - -/* CPU partition data of srpc service */ -struct srpc_service_cd { - /** serialize */ - spinlock_t scd_lock; - /** backref to service */ - struct srpc_service *scd_svc; - /** event buffer */ - struct srpc_event scd_ev; - /** free RPC descriptors */ - struct list_head scd_rpc_free; - /** in-flight RPCs */ - struct list_head scd_rpc_active; - /** workitem for posting buffer */ - struct swi_workitem scd_buf_wi; - /** CPT id */ - int scd_cpt; - /** error code for scd_buf_wi */ - int scd_buf_err; - /** timestamp for scd_buf_err */ - time64_t scd_buf_err_stamp; - /** total # request buffers */ - int scd_buf_total; - /** # posted request buffers */ - int scd_buf_nposted; - /** in progress of buffer posting */ - int scd_buf_posting; - /** allocate more buffers if scd_buf_nposted < scd_buf_low */ - int scd_buf_low; - /** increase/decrease some buffers */ - int scd_buf_adjust; - /** posted message buffers */ - struct list_head scd_buf_posted; - /** blocked for RPC descriptor */ - struct list_head scd_buf_blocked; -}; - -/* number of server workitems (mini-thread) for testing service */ -#define SFW_TEST_WI_MIN 256 -#define SFW_TEST_WI_MAX 2048 -/* extra buffers for tolerating buggy peers, or unbalanced number - * of peers between partitions - */ -#define SFW_TEST_WI_EXTRA 64 - -/* number of server workitems (mini-thread) for framework service */ -#define SFW_FRWK_WI_MIN 16 -#define SFW_FRWK_WI_MAX 256 - -struct srpc_service { - int sv_id; /* service id */ - const char *sv_name; /* human readable name */ - int sv_wi_total; /* total server workitems */ - int sv_shuttingdown; - int sv_ncpts; - /* percpt data for srpc_service */ - struct srpc_service_cd **sv_cpt_data; - /* Service callbacks: - * - sv_handler: process incoming RPC request - * - sv_bulk_ready: notify bulk data - */ - int (*sv_handler)(struct srpc_server_rpc *); - int (*sv_bulk_ready)(struct srpc_server_rpc *, int); -}; - -struct sfw_session { - struct list_head sn_list; /* chain on fw_zombie_sessions */ - struct lst_sid sn_id; /* unique identifier */ - unsigned int sn_timeout; /* # seconds' inactivity to expire */ - int sn_timer_active; - unsigned int sn_features; - struct stt_timer sn_timer; - struct list_head sn_batches; /* list of batches */ - char sn_name[LST_NAME_SIZE]; - atomic_t sn_refcount; - atomic_t sn_brw_errors; - atomic_t sn_ping_errors; - unsigned long sn_started; -}; - -#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ - (sid0).ses_stamp == (sid1).ses_stamp) - -struct sfw_batch { - struct list_head bat_list; /* chain on sn_batches */ - struct lst_bid bat_id; /* batch id */ - int bat_error; /* error code of batch */ - struct sfw_session *bat_session; /* batch's session */ - atomic_t bat_nactive; /* # of active tests */ - struct list_head bat_tests; /* test instances */ -}; - -struct sfw_test_client_ops { - int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test - * client - */ - void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test - * client - */ - int (*tso_prep_rpc)(struct sfw_test_unit *tsu, - struct lnet_process_id dest, - struct srpc_client_rpc **rpc); /* prep a tests rpc */ - void (*tso_done_rpc)(struct sfw_test_unit *tsu, - struct srpc_client_rpc *rpc); /* done a test rpc */ -}; - -struct sfw_test_instance { - struct list_head tsi_list; /* chain on batch */ - int tsi_service; /* test type */ - struct sfw_batch *tsi_batch; /* batch */ - struct sfw_test_client_ops *tsi_ops; /* test client operation - */ - - /* public parameter for all test units */ - unsigned int tsi_is_client:1; /* is test client */ - unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ - int tsi_concur; /* concurrency */ - int tsi_loop; /* loop count */ - - /* status of test instance */ - spinlock_t tsi_lock; /* serialize */ - unsigned int tsi_stopping:1; /* test is stopping */ - atomic_t tsi_nactive; /* # of active test - * unit - */ - struct list_head tsi_units; /* test units */ - struct list_head tsi_free_rpcs; /* free rpcs */ - struct list_head tsi_active_rpcs; /* active rpcs */ - - union { - struct test_ping_req ping; /* ping parameter */ - struct test_bulk_req bulk_v0; /* bulk parameter */ - struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */ - } tsi_u; -}; - -/* - * XXX: trailing (PAGE_SIZE % sizeof(struct lnet_process_id)) bytes at the end - * of pages are not used - */ -#define SFW_MAX_CONCUR LST_MAX_CONCUR -#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(struct lnet_process_id_packed)) -#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) -#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) - -struct sfw_test_unit { - struct list_head tsu_list; /* chain on lst_test_instance */ - struct lnet_process_id tsu_dest; /* id of dest node */ - int tsu_loop; /* loop count of the test */ - struct sfw_test_instance *tsu_instance; /* pointer to test instance */ - void *tsu_private; /* private data */ - struct swi_workitem tsu_worker; /* workitem of the test unit */ -}; - -struct sfw_test_case { - struct list_head tsc_list; /* chain on fw_tests */ - struct srpc_service *tsc_srv_service; /* test service */ - struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */ -}; - -struct srpc_client_rpc * -sfw_create_rpc(struct lnet_process_id peer, int service, - unsigned int features, int nbulkiov, int bulklen, - void (*done)(struct srpc_client_rpc *), void *priv); -int sfw_create_test_rpc(struct sfw_test_unit *tsu, - struct lnet_process_id peer, unsigned int features, - int nblk, int blklen, struct srpc_client_rpc **rpc); -void sfw_abort_rpc(struct srpc_client_rpc *rpc); -void sfw_post_rpc(struct srpc_client_rpc *rpc); -void sfw_client_rpc_done(struct srpc_client_rpc *rpc); -void sfw_unpack_message(struct srpc_msg *msg); -void sfw_free_pages(struct srpc_server_rpc *rpc); -void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i); -int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, - int sink); -int sfw_make_session(struct srpc_mksn_reqst *request, - struct srpc_mksn_reply *reply); - -struct srpc_client_rpc * -srpc_create_client_rpc(struct lnet_process_id peer, int service, - int nbulkiov, int bulklen, - void (*rpc_done)(struct srpc_client_rpc *), - void (*rpc_fini)(struct srpc_client_rpc *), void *priv); -void srpc_post_rpc(struct srpc_client_rpc *rpc); -void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why); -void srpc_free_bulk(struct srpc_bulk *bk); -struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off, - unsigned int bulk_npg, unsigned int bulk_len, - int sink); -void srpc_send_rpc(struct swi_workitem *wi); -int srpc_send_reply(struct srpc_server_rpc *rpc); -int srpc_add_service(struct srpc_service *sv); -int srpc_remove_service(struct srpc_service *sv); -void srpc_shutdown_service(struct srpc_service *sv); -void srpc_abort_service(struct srpc_service *sv); -int srpc_finish_service(struct srpc_service *sv); -int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer); -void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer); -void srpc_get_counters(struct srpc_counters *cnt); -void srpc_set_counters(const struct srpc_counters *cnt); - -extern struct workqueue_struct *lst_serial_wq; -extern struct workqueue_struct **lst_test_wq; - -static inline int -srpc_serv_is_framework(struct srpc_service *svc) -{ - return svc->sv_id < SRPC_FRAMEWORK_SERVICE_MAX_ID; -} - -static void -swi_wi_action(struct work_struct *wi) -{ - struct swi_workitem *swi; - - swi = container_of(wi, struct swi_workitem, swi_work); - - swi->swi_action(swi); -} - -static inline void -swi_init_workitem(struct swi_workitem *swi, - swi_action_t action, struct workqueue_struct *wq) -{ - swi->swi_wq = wq; - swi->swi_action = action; - swi->swi_state = SWI_STATE_NEWBORN; - INIT_WORK(&swi->swi_work, swi_wi_action); -} - -static inline void -swi_schedule_workitem(struct swi_workitem *wi) -{ - queue_work(wi->swi_wq, &wi->swi_work); -} - -static inline int -swi_cancel_workitem(struct swi_workitem *swi) -{ - return cancel_work_sync(&swi->swi_work); -} - -int sfw_startup(void); -int srpc_startup(void); -void sfw_shutdown(void); -void srpc_shutdown(void); - -static inline void -srpc_destroy_client_rpc(struct srpc_client_rpc *rpc) -{ - LASSERT(rpc); - LASSERT(!srpc_event_pending(rpc)); - LASSERT(!atomic_read(&rpc->crpc_refcount)); - - if (!rpc->crpc_fini) - kfree(rpc); - else - (*rpc->crpc_fini)(rpc); -} - -static inline void -srpc_init_client_rpc(struct srpc_client_rpc *rpc, struct lnet_process_id peer, - int service, int nbulkiov, int bulklen, - void (*rpc_done)(struct srpc_client_rpc *), - void (*rpc_fini)(struct srpc_client_rpc *), void *priv) -{ - LASSERT(nbulkiov <= LNET_MAX_IOV); - - memset(rpc, 0, offsetof(struct srpc_client_rpc, - crpc_bulk.bk_iovs[nbulkiov])); - - INIT_LIST_HEAD(&rpc->crpc_list); - swi_init_workitem(&rpc->crpc_wi, srpc_send_rpc, - lst_test_wq[lnet_cpt_of_nid(peer.nid)]); - spin_lock_init(&rpc->crpc_lock); - atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ - - rpc->crpc_dest = peer; - rpc->crpc_priv = priv; - rpc->crpc_service = service; - rpc->crpc_bulk.bk_len = bulklen; - rpc->crpc_bulk.bk_niov = nbulkiov; - rpc->crpc_done = rpc_done; - rpc->crpc_fini = rpc_fini; - LNetInvalidateMDHandle(&rpc->crpc_reqstmdh); - LNetInvalidateMDHandle(&rpc->crpc_replymdh); - LNetInvalidateMDHandle(&rpc->crpc_bulk.bk_mdh); - - /* no event is expected at this point */ - rpc->crpc_bulkev.ev_fired = 1; - rpc->crpc_reqstev.ev_fired = 1; - rpc->crpc_replyev.ev_fired = 1; - - rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC; - rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION; - rpc->crpc_reqstmsg.msg_type = srpc_service2request(service); -} - -static inline const char * -swi_state2str(int state) -{ -#define STATE2STR(x) case x: return #x - switch (state) { - default: - LBUG(); - STATE2STR(SWI_STATE_NEWBORN); - STATE2STR(SWI_STATE_REPLY_SUBMITTED); - STATE2STR(SWI_STATE_REPLY_SENT); - STATE2STR(SWI_STATE_REQUEST_SUBMITTED); - STATE2STR(SWI_STATE_REQUEST_SENT); - STATE2STR(SWI_STATE_REPLY_RECEIVED); - STATE2STR(SWI_STATE_BULK_STARTED); - STATE2STR(SWI_STATE_DONE); - } -#undef STATE2STR -} - -#define selftest_wait_events() \ - do { \ - set_current_state(TASK_UNINTERRUPTIBLE); \ - schedule_timeout(HZ / 10); \ - } while (0) - -#define lst_wait_until(cond, lock, fmt, ...) \ -do { \ - int __I = 2; \ - while (!(cond)) { \ - CDEBUG(is_power_of_2(++__I) ? D_WARNING : D_NET, \ - fmt, ## __VA_ARGS__); \ - spin_unlock(&(lock)); \ - \ - selftest_wait_events(); \ - \ - spin_lock(&(lock)); \ - } \ -} while (0) - -static inline void -srpc_wait_service_shutdown(struct srpc_service *sv) -{ - int i = 2; - - LASSERT(sv->sv_shuttingdown); - - while (!srpc_finish_service(sv)) { - i++; - CDEBUG(((i & -i) == i) ? D_WARNING : D_NET, - "Waiting for %s service to shutdown...\n", - sv->sv_name); - selftest_wait_events(); - } -} - -extern struct sfw_test_client_ops brw_test_client; -void brw_init_test_client(void); - -extern struct srpc_service brw_test_service; -void brw_init_test_service(void); - -extern struct sfw_test_client_ops ping_test_client; -void ping_init_test_client(void); - -extern struct srpc_service ping_test_service; -void ping_init_test_service(void); - -#endif /* __SELFTEST_SELFTEST_H__ */ diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c deleted file mode 100644 index 582f252b3e12..000000000000 --- a/drivers/staging/lustre/lnet/selftest/timer.c +++ /dev/null @@ -1,244 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/timer.c - * - * Author: Isaac Huang - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "selftest.h" - -/* - * Timers are implemented as a sorted queue of expiry times. The queue - * is slotted, with each slot holding timers which expire in a - * 2**STTIMER_MINPOLL (8) second period. The timers in each slot are - * sorted by increasing expiry time. The number of slots is 2**7 (128), - * to cover a time period of 1024 seconds into the future before wrapping. - */ -#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ -#define STTIMER_SLOTTIME BIT(STTIMER_MINPOLL) -#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1)) -#define STTIMER_NSLOTS BIT(7) -#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \ - (STTIMER_NSLOTS - 1))]) - -static struct st_timer_data { - spinlock_t stt_lock; - unsigned long stt_prev_slot; /* start time of the slot processed - * previously - */ - struct list_head stt_hash[STTIMER_NSLOTS]; - int stt_shuttingdown; - wait_queue_head_t stt_waitq; - int stt_nthreads; -} stt_data; - -void -stt_add_timer(struct stt_timer *timer) -{ - struct list_head *pos; - - spin_lock(&stt_data.stt_lock); - - LASSERT(stt_data.stt_nthreads > 0); - LASSERT(!stt_data.stt_shuttingdown); - LASSERT(timer->stt_func); - LASSERT(list_empty(&timer->stt_list)); - LASSERT(timer->stt_expires > ktime_get_real_seconds()); - - /* a simple insertion sort */ - list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) { - struct stt_timer *old = list_entry(pos, struct stt_timer, - stt_list); - - if (timer->stt_expires >= old->stt_expires) - break; - } - list_add(&timer->stt_list, pos); - - spin_unlock(&stt_data.stt_lock); -} - -/* - * The function returns whether it has deactivated a pending timer or not. - * (ie. del_timer() of an inactive timer returns 0, del_timer() of an - * active timer returns 1.) - * - * CAVEAT EMPTOR: - * When 0 is returned, it is possible that timer->stt_func _is_ running on - * another CPU. - */ -int -stt_del_timer(struct stt_timer *timer) -{ - int ret = 0; - - spin_lock(&stt_data.stt_lock); - - LASSERT(stt_data.stt_nthreads > 0); - LASSERT(!stt_data.stt_shuttingdown); - - if (!list_empty(&timer->stt_list)) { - ret = 1; - list_del_init(&timer->stt_list); - } - - spin_unlock(&stt_data.stt_lock); - return ret; -} - -/* called with stt_data.stt_lock held */ -static int -stt_expire_list(struct list_head *slot, time64_t now) -{ - int expired = 0; - struct stt_timer *timer; - - while (!list_empty(slot)) { - timer = list_entry(slot->next, struct stt_timer, stt_list); - - if (timer->stt_expires > now) - break; - - list_del_init(&timer->stt_list); - spin_unlock(&stt_data.stt_lock); - - expired++; - (*timer->stt_func) (timer->stt_data); - - spin_lock(&stt_data.stt_lock); - } - - return expired; -} - -static int -stt_check_timers(unsigned long *last) -{ - int expired = 0; - time64_t now; - unsigned long this_slot; - - now = ktime_get_real_seconds(); - this_slot = now & STTIMER_SLOTTIMEMASK; - - spin_lock(&stt_data.stt_lock); - - while (time_after_eq(this_slot, *last)) { - expired += stt_expire_list(STTIMER_SLOT(this_slot), now); - this_slot = this_slot - STTIMER_SLOTTIME; - } - - *last = now & STTIMER_SLOTTIMEMASK; - spin_unlock(&stt_data.stt_lock); - return expired; -} - -static int -stt_timer_main(void *arg) -{ - int rc = 0; - - while (!stt_data.stt_shuttingdown) { - stt_check_timers(&stt_data.stt_prev_slot); - - rc = wait_event_timeout(stt_data.stt_waitq, - stt_data.stt_shuttingdown, - STTIMER_SLOTTIME * HZ); - } - - spin_lock(&stt_data.stt_lock); - stt_data.stt_nthreads--; - spin_unlock(&stt_data.stt_lock); - return rc; -} - -static int -stt_start_timer_thread(void) -{ - struct task_struct *task; - - LASSERT(!stt_data.stt_shuttingdown); - - task = kthread_run(stt_timer_main, NULL, "st_timer"); - if (IS_ERR(task)) - return PTR_ERR(task); - - spin_lock(&stt_data.stt_lock); - stt_data.stt_nthreads++; - spin_unlock(&stt_data.stt_lock); - return 0; -} - -int -stt_startup(void) -{ - int rc = 0; - int i; - - stt_data.stt_shuttingdown = 0; - stt_data.stt_prev_slot = ktime_get_real_seconds() & STTIMER_SLOTTIMEMASK; - - spin_lock_init(&stt_data.stt_lock); - for (i = 0; i < STTIMER_NSLOTS; i++) - INIT_LIST_HEAD(&stt_data.stt_hash[i]); - - stt_data.stt_nthreads = 0; - init_waitqueue_head(&stt_data.stt_waitq); - rc = stt_start_timer_thread(); - if (rc) - CERROR("Can't spawn timer thread: %d\n", rc); - - return rc; -} - -void -stt_shutdown(void) -{ - int i; - - spin_lock(&stt_data.stt_lock); - - for (i = 0; i < STTIMER_NSLOTS; i++) - LASSERT(list_empty(&stt_data.stt_hash[i])); - - stt_data.stt_shuttingdown = 1; - - wake_up(&stt_data.stt_waitq); - lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock, - "waiting for %d threads to terminate\n", - stt_data.stt_nthreads); - - spin_unlock(&stt_data.stt_lock); -} diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h deleted file mode 100644 index 7f0ef9bd0cda..000000000000 --- a/drivers/staging/lustre/lnet/selftest/timer.h +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lnet/selftest/timer.h - * - * Author: Isaac Huang - */ -#ifndef __SELFTEST_TIMER_H__ -#define __SELFTEST_TIMER_H__ - -struct stt_timer { - struct list_head stt_list; - time64_t stt_expires; - void (*stt_func)(void *); - void *stt_data; -}; - -void stt_add_timer(struct stt_timer *timer); -int stt_del_timer(struct stt_timer *timer); -int stt_startup(void); -void stt_shutdown(void); - -#endif /* __SELFTEST_TIMER_H__ */ diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig deleted file mode 100644 index ccb78a945995..000000000000 --- a/drivers/staging/lustre/lustre/Kconfig +++ /dev/null @@ -1,45 +0,0 @@ -config LUSTRE_FS - tristate "Lustre file system client support" - depends on LNET - select CRYPTO - select CRYPTO_CRC32 - select CRYPTO_CRC32_PCLMUL if X86 - select CRYPTO_CRC32C - select CRYPTO_MD5 - select CRYPTO_SHA1 - select CRYPTO_SHA256 - select CRYPTO_SHA512 - depends on MULTIUSER - help - This option enables Lustre file system client support. Choose Y - here if you want to access a Lustre file system cluster. To compile - this file system support as a module, choose M here: the module will - be called lustre. - - To mount Lustre file systems, you also need to install the user space - mount.lustre and other user space commands which can be found in the - lustre-client package, available from - http://downloads.whamcloud.com/public/lustre/ - - Lustre file system is the most popular cluster file system in high - performance computing. Source code of both kernel space and user space - Lustre components can also be found at - http://git.whamcloud.com/?p=fs/lustre-release.git;a=summary - - If unsure, say N. - - See also http://wiki.lustre.org/ - -config LUSTRE_DEBUG_EXPENSIVE_CHECK - bool "Enable Lustre DEBUG checks" - depends on LUSTRE_FS - help - This option is mainly for debug purpose. It enables Lustre code to do - expensive checks that may have a performance impact. - - Use with caution. If unsure, say N. - -config LUSTRE_TRANSLATE_ERRNOS - bool - depends on LUSTRE_FS && !X86 - default y diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile deleted file mode 100644 index 331e4fcdd5a2..000000000000 --- a/drivers/staging/lustre/lustre/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_LUSTRE_FS) += obdclass/ ptlrpc/ fld/ osc/ mgc/ \ - fid/ lov/ mdc/ lmv/ llite/ obdecho/ diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile deleted file mode 100644 index 77b65b92667d..000000000000 --- a/drivers/staging/lustre/lustre/fid/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include/ - -obj-$(CONFIG_LUSTRE_FS) += fid.o -fid-y := fid_request.o fid_lib.o lproc_fid.o diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h deleted file mode 100644 index 14569e969a31..000000000000 --- a/drivers/staging/lustre/lustre/fid/fid_internal.h +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fid/fid_internal.h - * - * Author: Yury Umanets - */ -#ifndef __FID_INTERNAL_H -#define __FID_INTERNAL_H - -#include - -/* Functions used internally in module. */ - -extern struct lprocfs_vars seq_client_debugfs_list[]; - -#endif /* __FID_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c deleted file mode 100644 index ac52b378c155..000000000000 --- a/drivers/staging/lustre/lustre/fid/fid_lib.c +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fid/fid_lib.c - * - * Miscellaneous fid functions. - * - * Author: Nikita Danilov - * Author: Yury Umanets - */ - -#define DEBUG_SUBSYSTEM S_FID - -#include -#include - -/** - * A cluster-wide range from which fid-sequences are granted to servers and - * then clients. - * - * Fid namespace: - *
- * Normal FID:        seq:64 [2^33,2^64-1]      oid:32          ver:32
- * IGIF      :        0:32, ino:32              gen:32          0:32
- * IDIF      :        0:31, 1:1, ost-index:16,  objd:48         0:32
- * 
- * - * The first 0x400 sequences of normal FID are reserved for special purpose. - * FID_SEQ_START + 1 is for local file id generation. - * FID_SEQ_START + 2 is for .lustre directory and its objects - */ -const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE = { - .lsr_start = FID_SEQ_NORMAL, - .lsr_end = (__u64)~0ULL, -}; - -/* Zero range, used for init and other purposes. */ -const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE = { - .lsr_start = 0, -}; - -/* Lustre Big Fs Lock fid. */ -const struct lu_fid LUSTRE_BFL_FID = { .f_seq = FID_SEQ_SPECIAL, - .f_oid = FID_OID_SPECIAL_BFL, - .f_ver = 0x0000000000000000 }; -EXPORT_SYMBOL(LUSTRE_BFL_FID); - -/** Special fid for ".lustre" directory */ -const struct lu_fid LU_DOT_LUSTRE_FID = { .f_seq = FID_SEQ_DOT_LUSTRE, - .f_oid = FID_OID_DOT_LUSTRE, - .f_ver = 0x0000000000000000 }; -EXPORT_SYMBOL(LU_DOT_LUSTRE_FID); - -/** Special fid for "fid" special object in .lustre */ -const struct lu_fid LU_OBF_FID = { .f_seq = FID_SEQ_DOT_LUSTRE, - .f_oid = FID_OID_DOT_LUSTRE_OBF, - .f_ver = 0x0000000000000000 }; -EXPORT_SYMBOL(LU_OBF_FID); diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c deleted file mode 100644 index a34fd90ca5e5..000000000000 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ /dev/null @@ -1,410 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fid/fid_request.c - * - * Lustre Sequence Manager - * - * Author: Yury Umanets - */ - -#define DEBUG_SUBSYSTEM S_FID - -#include - -#include -#include -#include -#include -/* mdc RPC locks */ -#include -#include "fid_internal.h" - -static struct dentry *seq_debugfs_dir; - -static int seq_client_rpc(struct lu_client_seq *seq, - struct lu_seq_range *output, __u32 opc, - const char *opcname) -{ - struct obd_export *exp = seq->lcs_exp; - struct ptlrpc_request *req; - struct lu_seq_range *out, *in; - __u32 *op; - unsigned int debug_mask; - int rc; - - LASSERT(exp && !IS_ERR(exp)); - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, - LUSTRE_MDS_VERSION, SEQ_QUERY); - if (!req) - return -ENOMEM; - - /* Init operation code */ - op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC); - *op = opc; - - /* Zero out input range, this is not recovery yet. */ - in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE); - lu_seq_range_init(in); - - ptlrpc_request_set_replen(req); - - in->lsr_index = seq->lcs_space.lsr_index; - if (seq->lcs_type == LUSTRE_SEQ_METADATA) - fld_range_set_mdt(in); - else - fld_range_set_ost(in); - - if (opc == SEQ_ALLOC_SUPER) { - req->rq_request_portal = SEQ_CONTROLLER_PORTAL; - req->rq_reply_portal = MDC_REPLY_PORTAL; - /* During allocating super sequence for data object, - * the current thread might hold the export of MDT0(MDT0 - * precreating objects on this OST), and it will send the - * request to MDT0 here, so we can not keep resending the - * request here, otherwise if MDT0 is failed(umounted), - * it can not release the export of MDT0 - */ - if (seq->lcs_type == LUSTRE_SEQ_DATA) { - req->rq_no_delay = 1; - req->rq_no_resend = 1; - } - debug_mask = D_CONSOLE; - } else { - if (seq->lcs_type == LUSTRE_SEQ_METADATA) { - req->rq_reply_portal = MDC_REPLY_PORTAL; - req->rq_request_portal = SEQ_METADATA_PORTAL; - } else { - req->rq_reply_portal = OSC_REPLY_PORTAL; - req->rq_request_portal = SEQ_DATA_PORTAL; - } - debug_mask = D_INFO; - } - - ptlrpc_at_set_req_timeout(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out_req; - - out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE); - - if (!lu_seq_range_is_sane(out)) { - CERROR("%s: Invalid range received from server: " - DRANGE "\n", seq->lcs_name, PRANGE(out)); - rc = -EINVAL; - goto out_req; - } - - if (lu_seq_range_is_exhausted(out)) { - CERROR("%s: Range received from server is exhausted: " - DRANGE "]\n", seq->lcs_name, PRANGE(out)); - rc = -EINVAL; - goto out_req; - } - - *output = *out; - CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence " DRANGE "]\n", - seq->lcs_name, opcname, PRANGE(output)); - -out_req: - ptlrpc_req_finished(req); - return rc; -} - -/* Request sequence-controller node to allocate new meta-sequence. */ -static int seq_client_alloc_meta(const struct lu_env *env, - struct lu_client_seq *seq) -{ - int rc; - - do { - /* If meta server return -EINPROGRESS or EAGAIN, - * it means meta server might not be ready to - * allocate super sequence from sequence controller - * (MDT0)yet - */ - rc = seq_client_rpc(seq, &seq->lcs_space, - SEQ_ALLOC_META, "meta"); - } while (rc == -EINPROGRESS || rc == -EAGAIN); - - return rc; -} - -/* Allocate new sequence for client. */ -static int seq_client_alloc_seq(const struct lu_env *env, - struct lu_client_seq *seq, u64 *seqnr) -{ - int rc; - - LASSERT(lu_seq_range_is_sane(&seq->lcs_space)); - - if (lu_seq_range_is_exhausted(&seq->lcs_space)) { - rc = seq_client_alloc_meta(env, seq); - if (rc) { - CERROR("%s: Can't allocate new meta-sequence, rc %d\n", - seq->lcs_name, rc); - *seqnr = U64_MAX; - return rc; - } - CDEBUG(D_INFO, "%s: New range - " DRANGE "\n", - seq->lcs_name, PRANGE(&seq->lcs_space)); - } else { - rc = 0; - } - - LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space)); - *seqnr = seq->lcs_space.lsr_start; - seq->lcs_space.lsr_start += 1; - - CDEBUG(D_INFO, "%s: Allocated sequence [%#llx]\n", seq->lcs_name, - *seqnr); - - return rc; -} - -/* Allocate new fid on passed client @seq and save it to @fid. */ -int seq_client_alloc_fid(const struct lu_env *env, - struct lu_client_seq *seq, struct lu_fid *fid) -{ - int rc; - - LASSERT(seq); - LASSERT(fid); - - spin_lock(&seq->lcs_lock); - - if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST)) - seq->lcs_fid.f_oid = seq->lcs_width; - - wait_event_cmd(seq->lcs_waitq, - (!fid_is_zero(&seq->lcs_fid) && - fid_oid(&seq->lcs_fid) < seq->lcs_width) || - !seq->lcs_update, - spin_unlock(&seq->lcs_lock), - spin_lock(&seq->lcs_lock)); - - if (!fid_is_zero(&seq->lcs_fid) && - fid_oid(&seq->lcs_fid) < seq->lcs_width) { - /* Just bump last allocated fid and return to caller. */ - seq->lcs_fid.f_oid += 1; - rc = 0; - } else { - u64 seqnr; - - LASSERT(seq->lcs_update == 0); - seq->lcs_update = 1; - spin_unlock(&seq->lcs_lock); - - rc = seq_client_alloc_seq(env, seq, &seqnr); - - spin_lock(&seq->lcs_lock); - seq->lcs_update = 0; - wake_up(&seq->lcs_waitq); - - if (rc) { - CERROR("%s: Can't allocate new sequence, rc %d\n", - seq->lcs_name, rc); - spin_unlock(&seq->lcs_lock); - return rc; - } - - CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16llx]\n", - seq->lcs_name, seqnr); - - seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID; - seq->lcs_fid.f_seq = seqnr; - seq->lcs_fid.f_ver = 0; - - /* - * Inform caller that sequence switch is performed to allow it - * to setup FLD for it. - */ - rc = 1; - } - - *fid = seq->lcs_fid; - spin_unlock(&seq->lcs_lock); - - CDEBUG(D_INFO, - "%s: Allocated FID " DFID "\n", seq->lcs_name, PFID(fid)); - return rc; -} -EXPORT_SYMBOL(seq_client_alloc_fid); - -/* - * Finish the current sequence due to disconnect. - * See mdc_import_event() - */ -void seq_client_flush(struct lu_client_seq *seq) -{ - - LASSERT(seq); - spin_lock(&seq->lcs_lock); - - wait_event_cmd(seq->lcs_waitq, - !seq->lcs_update, - spin_unlock(&seq->lcs_lock), - spin_lock(&seq->lcs_lock)); - - fid_zero(&seq->lcs_fid); - /** - * this id shld not be used for seq range allocation. - * set to -1 for dgb check. - */ - - seq->lcs_space.lsr_index = -1; - - lu_seq_range_init(&seq->lcs_space); - spin_unlock(&seq->lcs_lock); -} -EXPORT_SYMBOL(seq_client_flush); - -static void seq_client_debugfs_fini(struct lu_client_seq *seq) -{ - debugfs_remove_recursive(seq->lcs_debugfs_entry); -} - -static void seq_client_debugfs_init(struct lu_client_seq *seq) -{ - seq->lcs_debugfs_entry = debugfs_create_dir(seq->lcs_name, - seq_debugfs_dir); - - ldebugfs_add_vars(seq->lcs_debugfs_entry, seq_client_debugfs_list, seq); -} - -static void seq_client_fini(struct lu_client_seq *seq) -{ - seq_client_debugfs_fini(seq); - - if (seq->lcs_exp) { - class_export_put(seq->lcs_exp); - seq->lcs_exp = NULL; - } -} - -static void seq_client_init(struct lu_client_seq *seq, struct obd_export *exp, - enum lu_cli_type type, const char *prefix) -{ - LASSERT(seq); - LASSERT(prefix); - - seq->lcs_type = type; - - spin_lock_init(&seq->lcs_lock); - if (type == LUSTRE_SEQ_METADATA) - seq->lcs_width = LUSTRE_METADATA_SEQ_MAX_WIDTH; - else - seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH; - - init_waitqueue_head(&seq->lcs_waitq); - /* Make sure that things are clear before work is started. */ - seq_client_flush(seq); - - seq->lcs_exp = class_export_get(exp); - - snprintf(seq->lcs_name, sizeof(seq->lcs_name), - "cli-%s", prefix); - - seq_client_debugfs_init(seq); -} - -int client_fid_init(struct obd_device *obd, - struct obd_export *exp, enum lu_cli_type type) -{ - struct client_obd *cli = &obd->u.cli; - char *prefix; - int rc; - - cli->cl_seq = kzalloc(sizeof(*cli->cl_seq), GFP_NOFS); - if (!cli->cl_seq) - return -ENOMEM; - - prefix = kzalloc(MAX_OBD_NAME + 5, GFP_NOFS); - if (!prefix) { - rc = -ENOMEM; - goto out_free_seq; - } - - snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name); - - /* Init client side sequence-manager */ - seq_client_init(cli->cl_seq, exp, type, prefix); - kfree(prefix); - - return 0; -out_free_seq: - kfree(cli->cl_seq); - cli->cl_seq = NULL; - return rc; -} -EXPORT_SYMBOL(client_fid_init); - -int client_fid_fini(struct obd_device *obd) -{ - struct client_obd *cli = &obd->u.cli; - - if (cli->cl_seq) { - seq_client_fini(cli->cl_seq); - kfree(cli->cl_seq); - cli->cl_seq = NULL; - } - - return 0; -} -EXPORT_SYMBOL(client_fid_fini); - -static int __init fid_init(void) -{ - int rc; - - rc = libcfs_setup(); - if (rc) - return rc; - - seq_debugfs_dir = debugfs_create_dir(LUSTRE_SEQ_NAME, - debugfs_lustre_root); - return 0; -} - -static void __exit fid_exit(void) -{ - debugfs_remove_recursive(seq_debugfs_dir); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre File IDentifier"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(fid_init); -module_exit(fid_exit); diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c deleted file mode 100644 index 0aabf473c9bd..000000000000 --- a/drivers/staging/lustre/lustre/fid/lproc_fid.c +++ /dev/null @@ -1,225 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fid/lproc_fid.c - * - * Lustre Sequence Manager - * - * Author: Yury Umanets - */ - -#define DEBUG_SUBSYSTEM S_FID - -#include - -#include -#include -#include -#include -#include -#include "fid_internal.h" - -/* Format: [0x64BIT_INT - 0x64BIT_INT] + 32 bytes just in case */ -#define MAX_FID_RANGE_STRLEN (32 + 2 * 2 * sizeof(__u64)) -/* - * Note: this function is only used for testing, it is no safe for production - * use. - */ -static int -ldebugfs_fid_write_common(const char __user *buffer, size_t count, - struct lu_seq_range *range) -{ - struct lu_seq_range tmp; - int rc; - char kernbuf[MAX_FID_RANGE_STRLEN]; - - LASSERT(range); - - if (count >= sizeof(kernbuf)) - return -EINVAL; - - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - - kernbuf[count] = 0; - - if (count == 5 && strcmp(kernbuf, "clear") == 0) { - memset(range, 0, sizeof(*range)); - return count; - } - - /* of the form "[0x0000000240000400 - 0x000000028000400]" */ - rc = sscanf(kernbuf, "[%llx - %llx]\n", - (unsigned long long *)&tmp.lsr_start, - (unsigned long long *)&tmp.lsr_end); - if (rc != 2) - return -EINVAL; - if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) || - tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end) - return -EINVAL; - *range = tmp; - return count; -} - -/* Client side debugfs stuff */ -static ssize_t -ldebugfs_fid_space_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct lu_client_seq *seq; - struct lu_seq_range range; - int rc; - - seq = ((struct seq_file *)file->private_data)->private; - - rc = ldebugfs_fid_write_common(buffer, count, &range); - - spin_lock(&seq->lcs_lock); - if (seq->lcs_update) - /* An RPC call is active to update lcs_space */ - rc = -EBUSY; - if (rc > 0) - seq->lcs_space = range; - spin_unlock(&seq->lcs_lock); - - if (rc > 0) { - CDEBUG(D_INFO, "%s: Space: " DRANGE "\n", - seq->lcs_name, PRANGE(&range)); - } - - return rc; -} - -static int -ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused) -{ - struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - int rc = 0; - - spin_lock(&seq->lcs_lock); - if (seq->lcs_update) - rc = -EBUSY; - else - seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space)); - spin_unlock(&seq->lcs_lock); - - return rc; -} - -static ssize_t -ldebugfs_fid_width_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct lu_client_seq *seq; - __u64 max; - int rc, val; - - seq = ((struct seq_file *)file->private_data)->private; - - rc = lprocfs_write_helper(buffer, count, &val); - if (rc) - return rc; - - spin_lock(&seq->lcs_lock); - if (seq->lcs_type == LUSTRE_SEQ_DATA) - max = LUSTRE_DATA_SEQ_MAX_WIDTH; - else - max = LUSTRE_METADATA_SEQ_MAX_WIDTH; - - if (val <= max && val > 0) { - seq->lcs_width = val; - - CDEBUG(D_INFO, "%s: Sequence size: %llu\n", seq->lcs_name, - seq->lcs_width); - } - - spin_unlock(&seq->lcs_lock); - - return count; -} - -static int -ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused) -{ - struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - - spin_lock(&seq->lcs_lock); - seq_printf(m, "%llu\n", seq->lcs_width); - spin_unlock(&seq->lcs_lock); - - return 0; -} - -static int -ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused) -{ - struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - - spin_lock(&seq->lcs_lock); - seq_printf(m, DFID "\n", PFID(&seq->lcs_fid)); - spin_unlock(&seq->lcs_lock); - - return 0; -} - -static int -ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused) -{ - struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - struct client_obd *cli; - - if (seq->lcs_exp) { - cli = &seq->lcs_exp->exp_obd->u.cli; - seq_printf(m, "%s\n", cli->cl_target_uuid.uuid); - } - - return 0; -} - -LPROC_SEQ_FOPS(ldebugfs_fid_space); -LPROC_SEQ_FOPS(ldebugfs_fid_width); -LPROC_SEQ_FOPS_RO(ldebugfs_fid_server); -LPROC_SEQ_FOPS_RO(ldebugfs_fid_fid); - -struct lprocfs_vars seq_client_debugfs_list[] = { - { .name = "space", - .fops = &ldebugfs_fid_space_fops }, - { .name = "width", - .fops = &ldebugfs_fid_width_fops }, - { .name = "server", - .fops = &ldebugfs_fid_server_fops }, - { .name = "fid", - .fops = &ldebugfs_fid_fid_fops }, - { NULL } -}; diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile deleted file mode 100644 index 426deba8b815..000000000000 --- a/drivers/staging/lustre/lustre/fld/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include/ - -obj-$(CONFIG_LUSTRE_FS) += fld.o -fld-y := fld_request.o fld_cache.o lproc_fld.o diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c deleted file mode 100644 index a7415c9a1c28..000000000000 --- a/drivers/staging/lustre/lustre/fld/fld_cache.c +++ /dev/null @@ -1,516 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2013, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fld/fld_cache.c - * - * FLD (Fids Location Database) - * - * Author: Pravin Shelar - * Author: Yury Umanets - */ - -#define DEBUG_SUBSYSTEM S_FLD - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include "fld_internal.h" - -/** - * create fld cache. - */ -struct fld_cache *fld_cache_init(const char *name, - int cache_size, int cache_threshold) -{ - struct fld_cache *cache; - - LASSERT(name); - LASSERT(cache_threshold < cache_size); - - cache = kzalloc(sizeof(*cache), GFP_NOFS); - if (!cache) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&cache->fci_entries_head); - INIT_LIST_HEAD(&cache->fci_lru); - - cache->fci_cache_count = 0; - rwlock_init(&cache->fci_lock); - - strlcpy(cache->fci_name, name, - sizeof(cache->fci_name)); - - cache->fci_cache_size = cache_size; - cache->fci_threshold = cache_threshold; - - /* Init fld cache info. */ - memset(&cache->fci_stat, 0, sizeof(cache->fci_stat)); - - CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n", - cache->fci_name, cache_size, cache_threshold); - - return cache; -} - -/** - * destroy fld cache. - */ -void fld_cache_fini(struct fld_cache *cache) -{ - __u64 pct; - - LASSERT(cache); - fld_cache_flush(cache); - - if (cache->fci_stat.fst_count > 0) { - pct = cache->fci_stat.fst_cache * 100; - do_div(pct, cache->fci_stat.fst_count); - } else { - pct = 0; - } - - CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name); - CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count); - CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache); - CDEBUG(D_INFO, " Cache hits: %llu%%\n", pct); - - kfree(cache); -} - -/** - * delete given node from list. - */ -static void fld_cache_entry_delete(struct fld_cache *cache, - struct fld_cache_entry *node) -{ - list_del(&node->fce_list); - list_del(&node->fce_lru); - cache->fci_cache_count--; - kfree(node); -} - -/** - * fix list by checking new entry with NEXT entry in order. - */ -static void fld_fix_new_list(struct fld_cache *cache) -{ - struct fld_cache_entry *f_curr; - struct fld_cache_entry *f_next; - struct lu_seq_range *c_range; - struct lu_seq_range *n_range; - struct list_head *head = &cache->fci_entries_head; - -restart_fixup: - - list_for_each_entry_safe(f_curr, f_next, head, fce_list) { - c_range = &f_curr->fce_range; - n_range = &f_next->fce_range; - - LASSERT(lu_seq_range_is_sane(c_range)); - if (&f_next->fce_list == head) - break; - - if (c_range->lsr_flags != n_range->lsr_flags) - continue; - - LASSERTF(c_range->lsr_start <= n_range->lsr_start, - "cur lsr_start " DRANGE " next lsr_start " DRANGE "\n", - PRANGE(c_range), PRANGE(n_range)); - - /* check merge possibility with next range */ - if (c_range->lsr_end == n_range->lsr_start) { - if (c_range->lsr_index != n_range->lsr_index) - continue; - n_range->lsr_start = c_range->lsr_start; - fld_cache_entry_delete(cache, f_curr); - continue; - } - - /* check if current range overlaps with next range. */ - if (n_range->lsr_start < c_range->lsr_end) { - if (c_range->lsr_index == n_range->lsr_index) { - n_range->lsr_start = c_range->lsr_start; - n_range->lsr_end = max(c_range->lsr_end, - n_range->lsr_end); - fld_cache_entry_delete(cache, f_curr); - } else { - if (n_range->lsr_end <= c_range->lsr_end) { - *n_range = *c_range; - fld_cache_entry_delete(cache, f_curr); - } else { - n_range->lsr_start = c_range->lsr_end; - } - } - - /* we could have overlap over next - * range too. better restart. - */ - goto restart_fixup; - } - - /* kill duplicates */ - if (c_range->lsr_start == n_range->lsr_start && - c_range->lsr_end == n_range->lsr_end) - fld_cache_entry_delete(cache, f_curr); - } -} - -/** - * add node to fld cache - */ -static inline void fld_cache_entry_add(struct fld_cache *cache, - struct fld_cache_entry *f_new, - struct list_head *pos) -{ - list_add(&f_new->fce_list, pos); - list_add(&f_new->fce_lru, &cache->fci_lru); - - cache->fci_cache_count++; - fld_fix_new_list(cache); -} - -/** - * Check if cache needs to be shrunk. If so - do it. - * Remove one entry in list and so on until cache is shrunk enough. - */ -static int fld_cache_shrink(struct fld_cache *cache) -{ - int num = 0; - - if (cache->fci_cache_count < cache->fci_cache_size) - return 0; - - while (cache->fci_cache_count + cache->fci_threshold > - cache->fci_cache_size && - !list_empty(&cache->fci_lru)) { - struct fld_cache_entry *flde = - list_last_entry(&cache->fci_lru, - struct fld_cache_entry, fce_lru); - - fld_cache_entry_delete(cache, flde); - num++; - } - - CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n", - cache->fci_name, num); - - return 0; -} - -/** - * kill all fld cache entries. - */ -void fld_cache_flush(struct fld_cache *cache) -{ - write_lock(&cache->fci_lock); - cache->fci_cache_size = 0; - fld_cache_shrink(cache); - write_unlock(&cache->fci_lock); -} - -/** - * punch hole in existing range. divide this range and add new - * entry accordingly. - */ - -static void fld_cache_punch_hole(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) -{ - const struct lu_seq_range *range = &f_new->fce_range; - const u64 new_start = range->lsr_start; - const u64 new_end = range->lsr_end; - struct fld_cache_entry *fldt; - - fldt = kzalloc(sizeof(*fldt), GFP_ATOMIC); - if (!fldt) { - kfree(f_new); - /* overlap is not allowed, so don't mess up list. */ - return; - } - /* break f_curr RANGE into three RANGES: - * f_curr, f_new , fldt - */ - - /* f_new = *range */ - - /* fldt */ - fldt->fce_range.lsr_start = new_end; - fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end; - fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index; - - /* f_curr */ - f_curr->fce_range.lsr_end = new_start; - - /* add these two entries to list */ - fld_cache_entry_add(cache, f_new, &f_curr->fce_list); - fld_cache_entry_add(cache, fldt, &f_new->fce_list); - - /* no need to fixup */ -} - -/** - * handle range overlap in fld cache. - */ -static void fld_cache_overlap_handle(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) -{ - const struct lu_seq_range *range = &f_new->fce_range; - const u64 new_start = range->lsr_start; - const u64 new_end = range->lsr_end; - const u32 mdt = range->lsr_index; - - /* this is overlap case, these case are checking overlapping with - * prev range only. fixup will handle overlapping with next range. - */ - - if (f_curr->fce_range.lsr_index == mdt) { - f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start, - new_start); - - f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end, - new_end); - - kfree(f_new); - fld_fix_new_list(cache); - - } else if (new_start <= f_curr->fce_range.lsr_start && - f_curr->fce_range.lsr_end <= new_end) { - /* case 1: new range completely overshadowed existing range. - * e.g. whole range migrated. update fld cache entry - */ - - f_curr->fce_range = *range; - kfree(f_new); - fld_fix_new_list(cache); - - } else if (f_curr->fce_range.lsr_start < new_start && - new_end < f_curr->fce_range.lsr_end) { - /* case 2: new range fit within existing range. */ - - fld_cache_punch_hole(cache, f_curr, f_new); - - } else if (new_end <= f_curr->fce_range.lsr_end) { - /* case 3: overlap: - * [new_start [c_start new_end) c_end) - */ - - LASSERT(new_start <= f_curr->fce_range.lsr_start); - - f_curr->fce_range.lsr_start = new_end; - fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev); - - } else if (f_curr->fce_range.lsr_start <= new_start) { - /* case 4: overlap: - * [c_start [new_start c_end) new_end) - */ - - LASSERT(f_curr->fce_range.lsr_end <= new_end); - - f_curr->fce_range.lsr_end = new_start; - fld_cache_entry_add(cache, f_new, &f_curr->fce_list); - } else { - CERROR("NEW range =" DRANGE " curr = " DRANGE "\n", - PRANGE(range), PRANGE(&f_curr->fce_range)); - } -} - -struct fld_cache_entry -*fld_cache_entry_create(const struct lu_seq_range *range) -{ - struct fld_cache_entry *f_new; - - LASSERT(lu_seq_range_is_sane(range)); - - f_new = kzalloc(sizeof(*f_new), GFP_NOFS); - if (!f_new) - return ERR_PTR(-ENOMEM); - - f_new->fce_range = *range; - return f_new; -} - -/** - * Insert FLD entry in FLD cache. - * - * This function handles all cases of merging and breaking up of - * ranges. - */ -static int fld_cache_insert_nolock(struct fld_cache *cache, - struct fld_cache_entry *f_new) -{ - struct fld_cache_entry *f_curr; - struct fld_cache_entry *n; - struct list_head *head; - struct list_head *prev = NULL; - const u64 new_start = f_new->fce_range.lsr_start; - const u64 new_end = f_new->fce_range.lsr_end; - __u32 new_flags = f_new->fce_range.lsr_flags; - - /* - * Duplicate entries are eliminated in insert op. - * So we don't need to search new entry before starting - * insertion loop. - */ - - if (!cache->fci_no_shrink) - fld_cache_shrink(cache); - - head = &cache->fci_entries_head; - - list_for_each_entry_safe(f_curr, n, head, fce_list) { - /* add list if next is end of list */ - if (new_end < f_curr->fce_range.lsr_start || - (new_end == f_curr->fce_range.lsr_start && - new_flags != f_curr->fce_range.lsr_flags)) - break; - - prev = &f_curr->fce_list; - /* check if this range is to left of new range. */ - if (new_start < f_curr->fce_range.lsr_end && - new_flags == f_curr->fce_range.lsr_flags) { - fld_cache_overlap_handle(cache, f_curr, f_new); - goto out; - } - } - - if (!prev) - prev = head; - - CDEBUG(D_INFO, "insert range " DRANGE "\n", PRANGE(&f_new->fce_range)); - /* Add new entry to cache and lru list. */ - fld_cache_entry_add(cache, f_new, prev); -out: - return 0; -} - -int fld_cache_insert(struct fld_cache *cache, - const struct lu_seq_range *range) -{ - struct fld_cache_entry *flde; - int rc; - - flde = fld_cache_entry_create(range); - if (IS_ERR(flde)) - return PTR_ERR(flde); - - write_lock(&cache->fci_lock); - rc = fld_cache_insert_nolock(cache, flde); - write_unlock(&cache->fci_lock); - if (rc) - kfree(flde); - - return rc; -} - -/** - * Delete FLD entry in FLD cache. - * - */ - -struct fld_cache_entry -*fld_cache_entry_lookup_nolock(struct fld_cache *cache, - struct lu_seq_range *range) -{ - struct fld_cache_entry *flde; - struct fld_cache_entry *got = NULL; - struct list_head *head; - - head = &cache->fci_entries_head; - list_for_each_entry(flde, head, fce_list) { - if (range->lsr_start == flde->fce_range.lsr_start || - (range->lsr_end == flde->fce_range.lsr_end && - range->lsr_flags == flde->fce_range.lsr_flags)) { - got = flde; - break; - } - } - - return got; -} - -/** - * lookup \a seq sequence for range in fld cache. - */ -struct fld_cache_entry -*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range) -{ - struct fld_cache_entry *got = NULL; - - read_lock(&cache->fci_lock); - got = fld_cache_entry_lookup_nolock(cache, range); - read_unlock(&cache->fci_lock); - return got; -} - -/** - * lookup \a seq sequence for range in fld cache. - */ -int fld_cache_lookup(struct fld_cache *cache, - const u64 seq, struct lu_seq_range *range) -{ - struct fld_cache_entry *flde; - struct fld_cache_entry *prev = NULL; - struct list_head *head; - - read_lock(&cache->fci_lock); - head = &cache->fci_entries_head; - - cache->fci_stat.fst_count++; - list_for_each_entry(flde, head, fce_list) { - if (flde->fce_range.lsr_start > seq) { - if (prev) - *range = prev->fce_range; - break; - } - - prev = flde; - if (lu_seq_range_within(&flde->fce_range, seq)) { - *range = flde->fce_range; - - cache->fci_stat.fst_cache++; - read_unlock(&cache->fci_lock); - return 0; - } - } - read_unlock(&cache->fci_lock); - return -ENOENT; -} diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h deleted file mode 100644 index e1d6aaa5c2b4..000000000000 --- a/drivers/staging/lustre/lustre/fld/fld_internal.h +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fld/fld_internal.h - * - * Subsystem Description: - * FLD is FID Location Database, which stores where (IE, on which MDT) - * FIDs are located. - * The database is basically a record file, each record consists of a FID - * sequence range, MDT/OST index, and flags. The FLD for the whole FS - * is only stored on the sequence controller(MDT0) right now, but each target - * also has its local FLD, which only stores the local sequence. - * - * The FLD subsystem usually has two tasks: - * 1. maintain the database, i.e. when the sequence controller allocates - * new sequence ranges to some nodes, it will call the FLD API to insert the - * location information in FLDB. - * - * 2. Handle requests from other nodes, i.e. if client needs to know where - * the FID is located, if it can not find the information in the local cache, - * it will send a FLD lookup RPC to the FLD service, and the FLD service will - * look up the FLDB entry and return the location information to client. - * - * - * Author: Yury Umanets - * Author: Tom WangDi - */ -#ifndef __FLD_INTERNAL_H -#define __FLD_INTERNAL_H - -#include - -#include -#include - -struct fld_stats { - __u64 fst_count; - __u64 fst_cache; - __u64 fst_inflight; -}; - -struct lu_fld_hash { - const char *fh_name; - int (*fh_hash_func)(struct lu_client_fld *, __u64); - struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64); -}; - -struct fld_cache_entry { - struct list_head fce_lru; - struct list_head fce_list; - /** fld cache entries are sorted on range->lsr_start field. */ - struct lu_seq_range fce_range; -}; - -struct fld_cache { - /** - * Cache guard, protects fci_hash mostly because others immutable after - * init is finished. - */ - rwlock_t fci_lock; - - /** Cache shrink threshold */ - int fci_threshold; - - /** Preferred number of cached entries */ - int fci_cache_size; - - /** Current number of cached entries. Protected by \a fci_lock */ - int fci_cache_count; - - /** LRU list fld entries. */ - struct list_head fci_lru; - - /** sorted fld entries. */ - struct list_head fci_entries_head; - - /** Cache statistics. */ - struct fld_stats fci_stat; - - /** Cache name used for debug and messages. */ - char fci_name[LUSTRE_MDT_MAXNAMELEN]; - unsigned int fci_no_shrink:1; -}; - -enum { - /* 4M of FLD cache will not hurt client a lot. */ - FLD_SERVER_CACHE_SIZE = (4 * 0x100000), - - /* 1M of FLD cache will not hurt client a lot. */ - FLD_CLIENT_CACHE_SIZE = (1 * 0x100000) -}; - -enum { - /* Cache threshold is 10 percent of size. */ - FLD_SERVER_CACHE_THRESHOLD = 10, - - /* Cache threshold is 10 percent of size. */ - FLD_CLIENT_CACHE_THRESHOLD = 10 -}; - -extern struct lu_fld_hash fld_hash[]; - -int fld_client_rpc(struct obd_export *exp, - struct lu_seq_range *range, __u32 fld_op, - struct ptlrpc_request **reqp); - -extern struct lprocfs_vars fld_client_debugfs_list[]; - -struct fld_cache *fld_cache_init(const char *name, - int cache_size, int cache_threshold); - -void fld_cache_fini(struct fld_cache *cache); - -void fld_cache_flush(struct fld_cache *cache); - -int fld_cache_insert(struct fld_cache *cache, - const struct lu_seq_range *range); - -struct fld_cache_entry -*fld_cache_entry_create(const struct lu_seq_range *range); - -int fld_cache_lookup(struct fld_cache *cache, - const u64 seq, struct lu_seq_range *range); - -struct fld_cache_entry* -fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range); - -struct fld_cache_entry -*fld_cache_entry_lookup_nolock(struct fld_cache *cache, - struct lu_seq_range *range); - -static inline const char * -fld_target_name(struct lu_fld_target *tar) -{ - if (tar->ft_srv) - return tar->ft_srv->lsf_name; - - return (const char *)tar->ft_exp->exp_obd->obd_name; -} - -#endif /* __FLD_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c deleted file mode 100644 index 97f7ea632346..000000000000 --- a/drivers/staging/lustre/lustre/fld/fld_request.c +++ /dev/null @@ -1,446 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fld/fld_request.c - * - * FLD (Fids Location Database) - * - * Author: Yury Umanets - */ - -#define DEBUG_SUBSYSTEM S_FLD - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include "fld_internal.h" - -static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq) -{ - LASSERT(fld->lcf_count > 0); - return do_div(seq, fld->lcf_count); -} - -static struct lu_fld_target * -fld_rrb_scan(struct lu_client_fld *fld, u64 seq) -{ - struct lu_fld_target *target; - int hash; - - /* Because almost all of special sequence located in MDT0, - * it should go to index 0 directly, instead of calculating - * hash again, and also if other MDTs is not being connected, - * the fld lookup requests(for seq on MDT0) should not be - * blocked because of other MDTs - */ - if (fid_seq_is_norm(seq)) - hash = fld_rrb_hash(fld, seq); - else - hash = 0; - -again: - list_for_each_entry(target, &fld->lcf_targets, ft_chain) { - if (target->ft_idx == hash) - return target; - } - - if (hash != 0) { - /* It is possible the remote target(MDT) are not connected to - * with client yet, so we will refer this to MDT0, which should - * be connected during mount - */ - hash = 0; - goto again; - } - - CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n", - fld->lcf_name, hash, seq, fld->lcf_count); - - list_for_each_entry(target, &fld->lcf_targets, ft_chain) { - const char *srv_name = target->ft_srv ? - target->ft_srv->lsf_name : ""; - const char *exp_name = target->ft_exp ? - (char *)target->ft_exp->exp_obd->obd_uuid.uuid : - ""; - - CERROR(" exp: 0x%p (%s), srv: 0x%p (%s), idx: %llu\n", - target->ft_exp, exp_name, target->ft_srv, - srv_name, target->ft_idx); - } - - /* - * If target is not found, there is logical error anyway, so here is - * LBUG() to catch this situation. - */ - LBUG(); - return NULL; -} - -struct lu_fld_hash fld_hash[] = { - { - .fh_name = "RRB", - .fh_hash_func = fld_rrb_hash, - .fh_scan_func = fld_rrb_scan - }, - { - NULL, - } -}; - -static struct lu_fld_target * -fld_client_get_target(struct lu_client_fld *fld, u64 seq) -{ - struct lu_fld_target *target; - - LASSERT(fld->lcf_hash); - - spin_lock(&fld->lcf_lock); - target = fld->lcf_hash->fh_scan_func(fld, seq); - spin_unlock(&fld->lcf_lock); - - if (target) { - CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n", - fld->lcf_name, target->ft_idx, seq); - } - - return target; -} - -/* - * Add export to FLD. This is usually done by CMM and LMV as they are main users - * of FLD module. - */ -int fld_client_add_target(struct lu_client_fld *fld, - struct lu_fld_target *tar) -{ - const char *name; - struct lu_fld_target *target, *tmp; - - LASSERT(tar); - name = fld_target_name(tar); - LASSERT(name); - LASSERT(tar->ft_srv || tar->ft_exp); - - CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", - fld->lcf_name, name, tar->ft_idx); - - target = kzalloc(sizeof(*target), GFP_NOFS); - if (!target) - return -ENOMEM; - - spin_lock(&fld->lcf_lock); - list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) { - if (tmp->ft_idx == tar->ft_idx) { - spin_unlock(&fld->lcf_lock); - kfree(target); - CERROR("Target %s exists in FLD and known as %s:#%llu\n", - name, fld_target_name(tmp), tmp->ft_idx); - return -EEXIST; - } - } - - target->ft_exp = tar->ft_exp; - if (target->ft_exp) - class_export_get(target->ft_exp); - target->ft_srv = tar->ft_srv; - target->ft_idx = tar->ft_idx; - - list_add_tail(&target->ft_chain, &fld->lcf_targets); - - fld->lcf_count++; - spin_unlock(&fld->lcf_lock); - - return 0; -} -EXPORT_SYMBOL(fld_client_add_target); - -/* Remove export from FLD */ -int fld_client_del_target(struct lu_client_fld *fld, __u64 idx) -{ - struct lu_fld_target *target, *tmp; - - spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { - if (target->ft_idx == idx) { - fld->lcf_count--; - list_del(&target->ft_chain); - spin_unlock(&fld->lcf_lock); - - if (target->ft_exp) - class_export_put(target->ft_exp); - - kfree(target); - return 0; - } - } - spin_unlock(&fld->lcf_lock); - return -ENOENT; -} - -static struct dentry *fld_debugfs_dir; - -static void fld_client_debugfs_init(struct lu_client_fld *fld) -{ - fld->lcf_debugfs_entry = debugfs_create_dir(fld->lcf_name, - fld_debugfs_dir); - - ldebugfs_add_vars(fld->lcf_debugfs_entry, fld_client_debugfs_list, fld); -} - -void fld_client_debugfs_fini(struct lu_client_fld *fld) -{ - debugfs_remove_recursive(fld->lcf_debugfs_entry); -} -EXPORT_SYMBOL(fld_client_debugfs_fini); - -static inline int hash_is_sane(int hash) -{ - return (hash >= 0 && hash < ARRAY_SIZE(fld_hash)); -} - -int fld_client_init(struct lu_client_fld *fld, - const char *prefix, int hash) -{ - int cache_size, cache_threshold; - int rc = 0; - - snprintf(fld->lcf_name, sizeof(fld->lcf_name), - "cli-%s", prefix); - - if (!hash_is_sane(hash)) { - CERROR("%s: Wrong hash function %#x\n", - fld->lcf_name, hash); - return -EINVAL; - } - - fld->lcf_count = 0; - spin_lock_init(&fld->lcf_lock); - fld->lcf_hash = &fld_hash[hash]; - INIT_LIST_HEAD(&fld->lcf_targets); - - cache_size = FLD_CLIENT_CACHE_SIZE / - sizeof(struct fld_cache_entry); - - cache_threshold = cache_size * - FLD_CLIENT_CACHE_THRESHOLD / 100; - - fld->lcf_cache = fld_cache_init(fld->lcf_name, - cache_size, cache_threshold); - if (IS_ERR(fld->lcf_cache)) { - rc = PTR_ERR(fld->lcf_cache); - fld->lcf_cache = NULL; - goto out; - } - - fld_client_debugfs_init(fld); -out: - CDEBUG(D_INFO, "%s: Using \"%s\" hash\n", - fld->lcf_name, fld->lcf_hash->fh_name); - return rc; -} -EXPORT_SYMBOL(fld_client_init); - -void fld_client_fini(struct lu_client_fld *fld) -{ - struct lu_fld_target *target, *tmp; - - spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { - fld->lcf_count--; - list_del(&target->ft_chain); - if (target->ft_exp) - class_export_put(target->ft_exp); - kfree(target); - } - spin_unlock(&fld->lcf_lock); - - if (fld->lcf_cache) { - if (!IS_ERR(fld->lcf_cache)) - fld_cache_fini(fld->lcf_cache); - fld->lcf_cache = NULL; - } -} -EXPORT_SYMBOL(fld_client_fini); - -int fld_client_rpc(struct obd_export *exp, - struct lu_seq_range *range, __u32 fld_op, - struct ptlrpc_request **reqp) -{ - struct ptlrpc_request *req = NULL; - struct lu_seq_range *prange; - __u32 *op; - int rc = 0; - struct obd_import *imp; - - LASSERT(exp); - - imp = class_exp2cliimp(exp); - switch (fld_op) { - case FLD_QUERY: - req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, - LUSTRE_MDS_VERSION, FLD_QUERY); - if (!req) - return -ENOMEM; - - /* - * XXX: only needed when talking to old server(< 2.6), it should - * be removed when < 2.6 server is not supported - */ - op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); - *op = FLD_LOOKUP; - - if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) - req->rq_allow_replay = 1; - break; - case FLD_READ: - req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ, - LUSTRE_MDS_VERSION, FLD_READ); - if (!req) - return -ENOMEM; - - req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, - RCL_SERVER, PAGE_SIZE); - break; - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD); - *prange = *range; - ptlrpc_request_set_replen(req); - req->rq_request_portal = FLD_REQUEST_PORTAL; - req->rq_reply_portal = MDC_REPLY_PORTAL; - ptlrpc_at_set_req_timeout(req); - - obd_get_request_slot(&exp->exp_obd->u.cli); - rc = ptlrpc_queue_wait(req); - obd_put_request_slot(&exp->exp_obd->u.cli); - if (rc) - goto out_req; - - if (fld_op == FLD_QUERY) { - prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD); - if (!prange) { - rc = -EFAULT; - goto out_req; - } - *range = *prange; - } - -out_req: - if (rc || !reqp) { - ptlrpc_req_finished(req); - req = NULL; - } - - if (reqp) - *reqp = req; - - return rc; -} - -int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, - __u32 flags, const struct lu_env *env) -{ - struct lu_seq_range res = { 0 }; - struct lu_fld_target *target; - int rc; - - rc = fld_cache_lookup(fld->lcf_cache, seq, &res); - if (rc == 0) { - *mds = res.lsr_index; - return 0; - } - - /* Can not find it in the cache */ - target = fld_client_get_target(fld, seq); - LASSERT(target); - - CDEBUG(D_INFO, - "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n", - fld->lcf_name, seq, fld_target_name(target), target->ft_idx); - - res.lsr_start = seq; - fld_range_set_type(&res, flags); - rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL); - - if (rc == 0) { - *mds = res.lsr_index; - - fld_cache_insert(fld->lcf_cache, &res); - } - return rc; -} -EXPORT_SYMBOL(fld_client_lookup); - -void fld_client_flush(struct lu_client_fld *fld) -{ - fld_cache_flush(fld->lcf_cache); -} - -static int __init fld_init(void) -{ - int rc; - - rc = libcfs_setup(); - if (rc) - return rc; - - fld_debugfs_dir = debugfs_create_dir(LUSTRE_FLD_NAME, - debugfs_lustre_root); - return 0; -} - -static void __exit fld_exit(void) -{ - debugfs_remove_recursive(fld_debugfs_dir); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre FID Location Database"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(fld_init) -module_exit(fld_exit) diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c deleted file mode 100644 index 0bcfb26ef8aa..000000000000 --- a/drivers/staging/lustre/lustre/fld/lproc_fld.c +++ /dev/null @@ -1,154 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/fld/lproc_fld.c - * - * FLD (FIDs Location Database) - * - * Author: Yury Umanets - * Di Wang - */ - -#define DEBUG_SUBSYSTEM S_FLD - -#include - -#include -#include -#include -#include -#include -#include -#include "fld_internal.h" - -static int -fld_debugfs_targets_seq_show(struct seq_file *m, void *unused) -{ - struct lu_client_fld *fld = (struct lu_client_fld *)m->private; - struct lu_fld_target *target; - - spin_lock(&fld->lcf_lock); - list_for_each_entry(target, &fld->lcf_targets, ft_chain) - seq_printf(m, "%s\n", fld_target_name(target)); - spin_unlock(&fld->lcf_lock); - - return 0; -} - -static int -fld_debugfs_hash_seq_show(struct seq_file *m, void *unused) -{ - struct lu_client_fld *fld = (struct lu_client_fld *)m->private; - - spin_lock(&fld->lcf_lock); - seq_printf(m, "%s\n", fld->lcf_hash->fh_name); - spin_unlock(&fld->lcf_lock); - - return 0; -} - -static ssize_t -fld_debugfs_hash_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct lu_client_fld *fld; - struct lu_fld_hash *hash = NULL; - char fh_name[8]; - int i; - - if (count > sizeof(fh_name)) - return -ENAMETOOLONG; - - if (copy_from_user(fh_name, buffer, count) != 0) - return -EFAULT; - - fld = ((struct seq_file *)file->private_data)->private; - - for (i = 0; fld_hash[i].fh_name; i++) { - if (count != strlen(fld_hash[i].fh_name)) - continue; - - if (!strncmp(fld_hash[i].fh_name, fh_name, count)) { - hash = &fld_hash[i]; - break; - } - } - - if (hash) { - spin_lock(&fld->lcf_lock); - fld->lcf_hash = hash; - spin_unlock(&fld->lcf_lock); - - CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n", - fld->lcf_name, hash->fh_name); - } - - return count; -} - -static ssize_t -fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer, - size_t count, loff_t *pos) -{ - struct lu_client_fld *fld = file->private_data; - - fld_cache_flush(fld->lcf_cache); - - CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name); - - return count; -} - -static int -fld_debugfs_cache_flush_release(struct inode *inode, struct file *file) -{ - file->private_data = NULL; - return 0; -} - -static const struct file_operations fld_debugfs_cache_flush_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = fld_debugfs_cache_flush_write, - .release = fld_debugfs_cache_flush_release, -}; - -LPROC_SEQ_FOPS_RO(fld_debugfs_targets); -LPROC_SEQ_FOPS(fld_debugfs_hash); - -struct lprocfs_vars fld_client_debugfs_list[] = { - { "targets", &fld_debugfs_targets_fops }, - { "hash", &fld_debugfs_hash_fops }, - { "cache_flush", &fld_debugfs_cache_flush_fops }, - { NULL } -}; diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h deleted file mode 100644 index 6f7b991be809..000000000000 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ /dev/null @@ -1,2463 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#ifndef _LUSTRE_CL_OBJECT_H -#define _LUSTRE_CL_OBJECT_H - -/** \defgroup clio clio - * - * Client objects implement io operations and cache pages. - * - * Examples: lov and osc are implementations of cl interface. - * - * Big Theory Statement. - * - * Layered objects. - * - * Client implementation is based on the following data-types: - * - * - cl_object - * - * - cl_page - * - * - cl_lock represents an extent lock on an object. - * - * - cl_io represents high-level i/o activity such as whole read/write - * system call, or write-out of pages from under the lock being - * canceled. cl_io has sub-ios that can be stopped and resumed - * independently, thus achieving high degree of transfer - * parallelism. Single cl_io can be advanced forward by - * the multiple threads (although in the most usual case of - * read/write system call it is associated with the single user - * thread, that issued the system call). - * - * Terminology - * - * - to avoid confusion high-level I/O operation like read or write system - * call is referred to as "an io", whereas low-level I/O operation, like - * RPC, is referred to as "a transfer" - * - * - "generic code" means generic (not file system specific) code in the - * hosting environment. "cl-code" means code (mostly in cl_*.c files) that - * is not layer specific. - * - * Locking. - * - * - i_mutex - * - PG_locked - * - cl_object_header::coh_page_guard - * - lu_site::ls_guard - * - * See the top comment in cl_object.c for the description of overall locking and - * reference-counting design. - * - * See comments below for the description of i/o, page, and dlm-locking - * design. - * - * @{ - */ - -/* - * super-class definitions. - */ -#include -#include -#include -#include -#include -#include -#include - -struct inode; - -struct cl_device; - -struct cl_object; - -struct cl_page; -struct cl_page_slice; -struct cl_lock; -struct cl_lock_slice; - -struct cl_lock_operations; -struct cl_page_operations; - -struct cl_io; -struct cl_io_slice; - -struct cl_req_attr; - -/** - * Device in the client stack. - * - * \see vvp_device, lov_device, lovsub_device, osc_device - */ -struct cl_device { - /** Super-class. */ - struct lu_device cd_lu_dev; -}; - -/** \addtogroup cl_object cl_object - * @{ - */ -/** - * "Data attributes" of cl_object. Data attributes can be updated - * independently for a sub-object, and top-object's attributes are calculated - * from sub-objects' ones. - */ -struct cl_attr { - /** Object size, in bytes */ - loff_t cat_size; - /** - * Known minimal size, in bytes. - * - * This is only valid when at least one DLM lock is held. - */ - loff_t cat_kms; - /** Modification time. Measured in seconds since epoch. */ - time64_t cat_mtime; - /** Access time. Measured in seconds since epoch. */ - time64_t cat_atime; - /** Change time. Measured in seconds since epoch. */ - time64_t cat_ctime; - /** - * Blocks allocated to this cl_object on the server file system. - * - * \todo XXX An interface for block size is needed. - */ - __u64 cat_blocks; - /** - * User identifier for quota purposes. - */ - uid_t cat_uid; - /** - * Group identifier for quota purposes. - */ - gid_t cat_gid; - - /* nlink of the directory */ - __u64 cat_nlink; -}; - -/** - * Fields in cl_attr that are being set. - */ -enum cl_attr_valid { - CAT_SIZE = 1 << 0, - CAT_KMS = 1 << 1, - CAT_MTIME = 1 << 3, - CAT_ATIME = 1 << 4, - CAT_CTIME = 1 << 5, - CAT_BLOCKS = 1 << 6, - CAT_UID = 1 << 7, - CAT_GID = 1 << 8 -}; - -/** - * Sub-class of lu_object with methods common for objects on the client - * stacks. - * - * cl_object: represents a regular file system object, both a file and a - * stripe. cl_object is based on lu_object: it is identified by a fid, - * layered, cached, hashed, and lrued. Important distinction with the server - * side, where md_object and dt_object are used, is that cl_object "fans out" - * at the lov/sns level: depending on the file layout, single file is - * represented as a set of "sub-objects" (stripes). At the implementation - * level, struct lov_object contains an array of cl_objects. Each sub-object - * is a full-fledged cl_object, having its fid, living in the lru and hash - * table. - * - * This leads to the next important difference with the server side: on the - * client, it's quite usual to have objects with the different sequence of - * layers. For example, typical top-object is composed of the following - * layers: - * - * - vvp - * - lov - * - * whereas its sub-objects are composed of - * - * - lovsub - * - osc - * - * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep - * track of the object-subobject relationship. - * - * Sub-objects are not cached independently: when top-object is about to - * be discarded from the memory, all its sub-objects are torn-down and - * destroyed too. - * - * \see vvp_object, lov_object, lovsub_object, osc_object - */ -struct cl_object { - /** super class */ - struct lu_object co_lu; - /** per-object-layer operations */ - const struct cl_object_operations *co_ops; - /** offset of page slice in cl_page buffer */ - int co_slice_off; -}; - -/** - * Description of the client object configuration. This is used for the - * creation of a new client object that is identified by a more state than - * fid. - */ -struct cl_object_conf { - /** Super-class. */ - struct lu_object_conf coc_lu; - union { - /** - * Object layout. This is consumed by lov. - */ - struct lu_buf coc_layout; - /** - * Description of particular stripe location in the - * cluster. This is consumed by osc. - */ - struct lov_oinfo *coc_oinfo; - } u; - /** - * VFS inode. This is consumed by vvp. - */ - struct inode *coc_inode; - /** - * Layout lock handle. - */ - struct ldlm_lock *coc_lock; - /** - * Operation to handle layout, OBJECT_CONF_XYZ. - */ - int coc_opc; -}; - -enum { - /** configure layout, set up a new stripe, must be called while - * holding layout lock. - */ - OBJECT_CONF_SET = 0, - /** invalidate the current stripe configuration due to losing - * layout lock. - */ - OBJECT_CONF_INVALIDATE = 1, - /** wait for old layout to go away so that new layout can be set up. */ - OBJECT_CONF_WAIT = 2 -}; - -enum { - CL_LAYOUT_GEN_NONE = (u32)-2, /* layout lock was cancelled */ - CL_LAYOUT_GEN_EMPTY = (u32)-1, /* for empty layout */ -}; - -struct cl_layout { - /** the buffer to return the layout in lov_mds_md format. */ - struct lu_buf cl_buf; - /** size of layout in lov_mds_md format. */ - size_t cl_size; - /** Layout generation. */ - u32 cl_layout_gen; -}; - -/** - * Operations implemented for each cl object layer. - * - * \see vvp_ops, lov_ops, lovsub_ops, osc_ops - */ -struct cl_object_operations { - /** - * Initialize page slice for this layer. Called top-to-bottom through - * every object layer when a new cl_page is instantiated. Layer - * keeping private per-page data, or requiring its own page operations - * vector should allocate these data here, and attach then to the page - * by calling cl_page_slice_add(). \a vmpage is locked (in the VM - * sense). Optional. - * - * \retval NULL success. - * - * \retval ERR_PTR(errno) failure code. - * - * \retval valid-pointer pointer to already existing referenced page - * to be used instead of newly created. - */ - int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index); - /** - * Initialize lock slice for this layer. Called top-to-bottom through - * every object layer when a new cl_lock is instantiated. Layer - * keeping private per-lock data, or requiring its own lock operations - * vector should allocate these data here, and attach then to the lock - * by calling cl_lock_slice_add(). Mandatory. - */ - int (*coo_lock_init)(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *io); - /** - * Initialize io state for a given layer. - * - * called top-to-bottom once per io existence to initialize io - * state. If layer wants to keep some state for this type of io, it - * has to embed struct cl_io_slice in lu_env::le_ses, and register - * slice with cl_io_slice_add(). It is guaranteed that all threads - * participating in this io share the same session. - */ - int (*coo_io_init)(const struct lu_env *env, - struct cl_object *obj, struct cl_io *io); - /** - * Fill portion of \a attr that this layer controls. This method is - * called top-to-bottom through all object layers. - * - * \pre cl_object_header::coh_attr_guard of the top-object is locked. - * - * \return 0: to continue - * \return +ve: to stop iterating through layers (but 0 is returned - * from enclosing cl_object_attr_get()) - * \return -ve: to signal error - */ - int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); - /** - * Update attributes. - * - * \a valid is a bitmask composed from enum #cl_attr_valid, and - * indicating what attributes are to be set. - * - * \pre cl_object_header::coh_attr_guard of the top-object is locked. - * - * \return the same convention as for - * cl_object_operations::coo_attr_get() is used. - */ - int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int valid); - /** - * Update object configuration. Called top-to-bottom to modify object - * configuration. - * - * XXX error conditions and handling. - */ - int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf); - /** - * Glimpse ast. Executed when glimpse ast arrives for a lock on this - * object. Layers are supposed to fill parts of \a lvb that will be - * shipped to the glimpse originator as a glimpse result. - * - * \see vvp_object_glimpse(), lovsub_object_glimpse(), - * \see osc_object_glimpse() - */ - int (*coo_glimpse)(const struct lu_env *env, - const struct cl_object *obj, struct ost_lvb *lvb); - /** - * Object prune method. Called when the layout is going to change on - * this object, therefore each layer has to clean up their cache, - * mainly pages and locks. - */ - int (*coo_prune)(const struct lu_env *env, struct cl_object *obj); - /** - * Object getstripe method. - */ - int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *lum); - /** - * Get FIEMAP mapping from the object. - */ - int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj, - struct ll_fiemap_info_key *fmkey, - struct fiemap *fiemap, size_t *buflen); - /** - * Get layout and generation of the object. - */ - int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj, - struct cl_layout *layout); - /** - * Get maximum size of the object. - */ - loff_t (*coo_maxbytes)(struct cl_object *obj); - /** - * Set request attributes. - */ - void (*coo_req_attr_set)(const struct lu_env *env, - struct cl_object *obj, - struct cl_req_attr *attr); -}; - -/** - * Extended header for client object. - */ -struct cl_object_header { - /** Standard lu_object_header. cl_object::co_lu::lo_header points - * here. - */ - struct lu_object_header coh_lu; - - /** - * Parent object. It is assumed that an object has a well-defined - * parent, but not a well-defined child (there may be multiple - * sub-objects, for the same top-object). cl_object_header::coh_parent - * field allows certain code to be written generically, without - * limiting possible cl_object layouts unduly. - */ - struct cl_object_header *coh_parent; - /** - * Protects consistency between cl_attr of parent object and - * attributes of sub-objects, that the former is calculated ("merged") - * from. - * - * \todo XXX this can be read/write lock if needed. - */ - spinlock_t coh_attr_guard; - /** - * Size of cl_page + page slices - */ - unsigned short coh_page_bufsize; - /** - * Number of objects above this one: 0 for a top-object, 1 for its - * sub-object, etc. - */ - unsigned char coh_nesting; -}; - -/** - * Helper macro: iterate over all layers of the object \a obj, assigning every - * layer top-to-bottom to \a slice. - */ -#define cl_object_for_each(slice, obj) \ - list_for_each_entry((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) -/** - * Helper macro: iterate over all layers of the object \a obj, assigning every - * layer bottom-to-top to \a slice. - */ -#define cl_object_for_each_reverse(slice, obj) \ - list_for_each_entry_reverse((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) -/** @} cl_object */ - -#define CL_PAGE_EOF ((pgoff_t)~0ull) - -/** \addtogroup cl_page cl_page - * @{ - */ - -/** \struct cl_page - * Layered client page. - * - * cl_page: represents a portion of a file, cached in the memory. All pages - * of the given file are of the same size, and are kept in the radix tree - * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects - * of the top-level file object are first class cl_objects, they have their - * own radix trees of pages and hence page is implemented as a sequence of - * struct cl_pages's, linked into double-linked list through - * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the - * corresponding radix tree at the corresponding logical offset. - * - * cl_page is associated with VM page of the hosting environment (struct - * page in Linux kernel, for example), struct page. It is assumed, that this - * association is implemented by one of cl_page layers (top layer in the - * current design) that - * - * - intercepts per-VM-page call-backs made by the environment (e.g., - * memory pressure), - * - * - translates state (page flag bits) and locking between lustre and - * environment. - * - * The association between cl_page and struct page is immutable and - * established when cl_page is created. - * - * cl_page can be "owned" by a particular cl_io (see below), guaranteeing - * this io an exclusive access to this page w.r.t. other io attempts and - * various events changing page state (such as transfer completion, or - * eviction of the page from the memory). Note, that in general cl_io - * cannot be identified with a particular thread, and page ownership is not - * exactly equal to the current thread holding a lock on the page. Layer - * implementing association between cl_page and struct page has to implement - * ownership on top of available synchronization mechanisms. - * - * While lustre client maintains the notion of an page ownership by io, - * hosting MM/VM usually has its own page concurrency control - * mechanisms. For example, in Linux, page access is synchronized by the - * per-page PG_locked bit-lock, and generic kernel code (generic_file_*()) - * takes care to acquire and release such locks as necessary around the - * calls to the file system methods (->readpage(), ->prepare_write(), - * ->commit_write(), etc.). This leads to the situation when there are two - * different ways to own a page in the client: - * - * - client code explicitly and voluntary owns the page (cl_page_own()); - * - * - VM locks a page and then calls the client, that has "to assume" - * the ownership from the VM (cl_page_assume()). - * - * Dual methods to release ownership are cl_page_disown() and - * cl_page_unassume(). - * - * cl_page is reference counted (cl_page::cp_ref). When reference counter - * drops to 0, the page is returned to the cache, unless it is in - * cl_page_state::CPS_FREEING state, in which case it is immediately - * destroyed. - * - * The general logic guaranteeing the absence of "existential races" for - * pages is the following: - * - * - there are fixed known ways for a thread to obtain a new reference - * to a page: - * - * - by doing a lookup in the cl_object radix tree, protected by the - * spin-lock; - * - * - by starting from VM-locked struct page and following some - * hosting environment method (e.g., following ->private pointer in - * the case of Linux kernel), see cl_vmpage_page(); - * - * - when the page enters cl_page_state::CPS_FREEING state, all these - * ways are severed with the proper synchronization - * (cl_page_delete()); - * - * - entry into cl_page_state::CPS_FREEING is serialized by the VM page - * lock; - * - * - no new references to the page in cl_page_state::CPS_FREEING state - * are allowed (checked in cl_page_get()). - * - * Together this guarantees that when last reference to a - * cl_page_state::CPS_FREEING page is released, it is safe to destroy the - * page, as neither references to it can be acquired at that point, nor - * ones exist. - * - * cl_page is a state machine. States are enumerated in enum - * cl_page_state. Possible state transitions are enumerated in - * cl_page_state_set(). State transition process (i.e., actual changing of - * cl_page::cp_state field) is protected by the lock on the underlying VM - * page. - * - * Linux Kernel implementation. - * - * Binding between cl_page and struct page (which is a typedef for - * struct page) is implemented in the vvp layer. cl_page is attached to the - * ->private pointer of the struct page, together with the setting of - * PG_private bit in page->flags, and acquiring additional reference on the - * struct page (much like struct buffer_head, or any similar file system - * private data structures). - * - * PG_locked lock is used to implement both ownership and transfer - * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}} - * states. No additional references are acquired for the duration of the - * transfer. - * - * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where - * write-out is "protected" by the special PG_writeback bit. - */ - -/** - * States of cl_page. cl_page.c assumes particular order here. - * - * The page state machine is rather crude, as it doesn't recognize finer page - * states like "dirty" or "up to date". This is because such states are not - * always well defined for the whole stack (see, for example, the - * implementation of the read-ahead, that hides page up-to-dateness to track - * cache hits accurately). Such sub-states are maintained by the layers that - * are interested in them. - */ -enum cl_page_state { - /** - * Page is in the cache, un-owned. Page leaves cached state in the - * following cases: - * - * - [cl_page_state::CPS_OWNED] io comes across the page and - * owns it; - * - * - [cl_page_state::CPS_PAGEOUT] page is dirty, the - * req-formation engine decides that it wants to include this page - * into an RPC being constructed, and yanks it from the cache; - * - * - [cl_page_state::CPS_FREEING] VM callback is executed to - * evict the page form the memory; - * - * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL - */ - CPS_CACHED, - /** - * Page is exclusively owned by some cl_io. Page may end up in this - * state as a result of - * - * - io creating new page and immediately owning it; - * - * - [cl_page_state::CPS_CACHED] io finding existing cached page - * and owning it; - * - * - [cl_page_state::CPS_OWNED] io finding existing owned page - * and waiting for owner to release the page; - * - * Page leaves owned state in the following cases: - * - * - [cl_page_state::CPS_CACHED] io decides to leave the page in - * the cache, doing nothing; - * - * - [cl_page_state::CPS_PAGEIN] io starts read transfer for - * this page; - * - * - [cl_page_state::CPS_PAGEOUT] io starts immediate write - * transfer for this page; - * - * - [cl_page_state::CPS_FREEING] io decides to destroy this - * page (e.g., as part of truncate or extent lock cancellation). - * - * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL - */ - CPS_OWNED, - /** - * Page is being written out, as a part of a transfer. This state is - * entered when req-formation logic decided that it wants this page to - * be sent through the wire _now_. Specifically, it means that once - * this state is achieved, transfer completion handler (with either - * success or failure indication) is guaranteed to be executed against - * this page independently of any locks and any scheduling decisions - * made by the hosting environment (that effectively means that the - * page is never put into cl_page_state::CPS_PAGEOUT state "in - * advance". This property is mentioned, because it is important when - * reasoning about possible dead-locks in the system). The page can - * enter this state as a result of - * - * - [cl_page_state::CPS_OWNED] an io requesting an immediate - * write-out of this page, or - * - * - [cl_page_state::CPS_CACHED] req-forming engine deciding - * that it has enough dirty pages cached to issue a "good" - * transfer. - * - * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer - * is completed---it is moved into cl_page_state::CPS_CACHED state. - * - * Underlying VM page is locked for the duration of transfer. - * - * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL - */ - CPS_PAGEOUT, - /** - * Page is being read in, as a part of a transfer. This is quite - * similar to the cl_page_state::CPS_PAGEOUT state, except that - * read-in is always "immediate"---there is no such thing a sudden - * construction of read request from cached, presumably not up to date, - * pages. - * - * Underlying VM page is locked for the duration of transfer. - * - * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL - */ - CPS_PAGEIN, - /** - * Page is being destroyed. This state is entered when client decides - * that page has to be deleted from its host object, as, e.g., a part - * of truncate. - * - * Once this state is reached, there is no way to escape it. - * - * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL - */ - CPS_FREEING, - CPS_NR -}; - -enum cl_page_type { - /** Host page, the page is from the host inode which the cl_page - * belongs to. - */ - CPT_CACHEABLE = 1, - - /** Transient page, the transient cl_page is used to bind a cl_page - * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO and lockless IO. - */ - CPT_TRANSIENT, -}; - -/** - * Fields are protected by the lock on struct page, except for atomics and - * immutables. - * - * \invariant Data type invariants are in cl_page_invariant(). Basically: - * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked - * list, consistent with the parent/child pointers in the cl_page::cp_obj and - * cl_page::cp_owner (when set). - */ -struct cl_page { - /** Reference counter. */ - atomic_t cp_ref; - /** An object this page is a part of. Immutable after creation. */ - struct cl_object *cp_obj; - /** vmpage */ - struct page *cp_vmpage; - /** Linkage of pages within group. Pages must be owned */ - struct list_head cp_batch; - /** List of slices. Immutable after creation. */ - struct list_head cp_layers; - /** - * Page state. This field is const to avoid accidental update, it is - * modified only internally within cl_page.c. Protected by a VM lock. - */ - const enum cl_page_state cp_state; - /** - * Page type. Only CPT_TRANSIENT is used so far. Immutable after - * creation. - */ - enum cl_page_type cp_type; - - /** - * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned - * by sub-io. Protected by a VM lock. - */ - struct cl_io *cp_owner; - /** List of references to this page, for debugging. */ - struct lu_ref cp_reference; - /** Link to an object, for debugging. */ - struct lu_ref_link cp_obj_ref; - /** Link to a queue, for debugging. */ - struct lu_ref_link cp_queue_ref; - /** Assigned if doing a sync_io */ - struct cl_sync_io *cp_sync_io; -}; - -/** - * Per-layer part of cl_page. - * - * \see vvp_page, lov_page, osc_page - */ -struct cl_page_slice { - struct cl_page *cpl_page; - pgoff_t cpl_index; - /** - * Object slice corresponding to this page slice. Immutable after - * creation. - */ - struct cl_object *cpl_obj; - const struct cl_page_operations *cpl_ops; - /** Linkage into cl_page::cp_layers. Immutable after creation. */ - struct list_head cpl_linkage; -}; - -/** - * Lock mode. For the client extent locks. - * - * \ingroup cl_lock - */ -enum cl_lock_mode { - CLM_READ, - CLM_WRITE, - CLM_GROUP -}; - -/** - * Requested transfer type. - */ -enum cl_req_type { - CRT_READ, - CRT_WRITE, - CRT_NR -}; - -/** - * Per-layer page operations. - * - * Methods taking an \a io argument are for the activity happening in the - * context of given \a io. Page is assumed to be owned by that io, except for - * the obvious cases (like cl_page_operations::cpo_own()). - * - * \see vvp_page_ops, lov_page_ops, osc_page_ops - */ -struct cl_page_operations { - /** - * cl_page<->struct page methods. Only one layer in the stack has to - * implement these. Current code assumes that this functionality is - * provided by the topmost layer, see cl_page_disown0() as an example. - */ - - /** - * Called when \a io acquires this page into the exclusive - * ownership. When this method returns, it is guaranteed that the is - * not owned by other io, and no transfer is going on against - * it. Optional. - * - * \see cl_page_own() - * \see vvp_page_own(), lov_page_own() - */ - int (*cpo_own)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, int nonblock); - /** Called when ownership it yielded. Optional. - * - * \see cl_page_disown() - * \see vvp_page_disown() - */ - void (*cpo_disown)(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); - /** - * Called for a page that is already "owned" by \a io from VM point of - * view. Optional. - * - * \see cl_page_assume() - * \see vvp_page_assume(), lov_page_assume() - */ - void (*cpo_assume)(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); - /** Dual to cl_page_operations::cpo_assume(). Optional. Called - * bottom-to-top when IO releases a page without actually unlocking - * it. - * - * \see cl_page_unassume() - * \see vvp_page_unassume() - */ - void (*cpo_unassume)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); - /** - * Announces whether the page contains valid data or not by \a uptodate. - * - * \see cl_page_export() - * \see vvp_page_export() - */ - void (*cpo_export)(const struct lu_env *env, - const struct cl_page_slice *slice, int uptodate); - /** - * Checks whether underlying VM page is locked (in the suitable - * sense). Used for assertions. - * - * \retval -EBUSY: page is protected by a lock of a given mode; - * \retval -ENODATA: page is not protected by a lock; - * \retval 0: this layer cannot decide. (Should never happen.) - */ - int (*cpo_is_vmlocked)(const struct lu_env *env, - const struct cl_page_slice *slice); - /** - * Page destruction. - */ - - /** - * Called when page is truncated from the object. Optional. - * - * \see cl_page_discard() - * \see vvp_page_discard(), osc_page_discard() - */ - void (*cpo_discard)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); - /** - * Called when page is removed from the cache, and is about to being - * destroyed. Optional. - * - * \see cl_page_delete() - * \see vvp_page_delete(), osc_page_delete() - */ - void (*cpo_delete)(const struct lu_env *env, - const struct cl_page_slice *slice); - /** Destructor. Frees resources and slice itself. */ - void (*cpo_fini)(const struct lu_env *env, - struct cl_page_slice *slice); - /** - * Optional debugging helper. Prints given page slice. - * - * \see cl_page_print() - */ - int (*cpo_print)(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t p); - /** - * \name transfer - * - * Transfer methods. - * - * @{ - */ - /** - * Request type dependent vector of operations. - * - * Transfer operations depend on transfer mode (cl_req_type). To avoid - * passing transfer mode to each and every of these methods, and to - * avoid branching on request type inside of the methods, separate - * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are - * provided. That is, method invocation usually looks like - * - * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...); - */ - struct { - /** - * Called when a page is submitted for a transfer as a part of - * cl_page_list. - * - * \return 0 : page is eligible for submission; - * \return -EALREADY : skip this page; - * \return -ve : error. - * - * \see cl_page_prep() - */ - int (*cpo_prep)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); - /** - * Completion handler. This is guaranteed to be eventually - * fired after cl_page_operations::cpo_prep() or - * cl_page_operations::cpo_make_ready() call. - * - * This method can be called in a non-blocking context. It is - * guaranteed however, that the page involved and its object - * are pinned in memory (and, hence, calling cl_page_put() is - * safe). - * - * \see cl_page_completion() - */ - void (*cpo_completion)(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret); - /** - * Called when cached page is about to be added to the - * ptlrpc request as a part of req formation. - * - * \return 0 : proceed with this page; - * \return -EAGAIN : skip this page; - * \return -ve : error. - * - * \see cl_page_make_ready() - */ - int (*cpo_make_ready)(const struct lu_env *env, - const struct cl_page_slice *slice); - } io[CRT_NR]; - /** - * Tell transfer engine that only [to, from] part of a page should be - * transmitted. - * - * This is used for immediate transfers. - * - * \todo XXX this is not very good interface. It would be much better - * if all transfer parameters were supplied as arguments to - * cl_io_operations::cio_submit() call, but it is not clear how to do - * this for page queues. - * - * \see cl_page_clip() - */ - void (*cpo_clip)(const struct lu_env *env, - const struct cl_page_slice *slice, - int from, int to); - /** - * \pre the page was queued for transferring. - * \post page is removed from client's pending list, or -EBUSY - * is returned if it has already been in transferring. - * - * This is one of seldom page operation which is: - * 0. called from top level; - * 1. don't have vmpage locked; - * 2. every layer should synchronize execution of its ->cpo_cancel() - * with completion handlers. Osc uses client obd lock for this - * purpose. Based on there is no vvp_page_cancel and - * lov_page_cancel(), cpo_cancel is defacto protected by client lock. - * - * \see osc_page_cancel(). - */ - int (*cpo_cancel)(const struct lu_env *env, - const struct cl_page_slice *slice); - /** - * Write out a page by kernel. This is only called by ll_writepage - * right now. - * - * \see cl_page_flush() - */ - int (*cpo_flush)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); - /** @} transfer */ -}; - -/** - * Helper macro, dumping detailed information about \a page into a log. - */ -#define CL_PAGE_DEBUG(mask, env, page, format, ...) \ -do { \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ - cl_page_print(env, &msgdata, lu_cdebug_printer, page); \ - CDEBUG(mask, format, ## __VA_ARGS__); \ - } \ -} while (0) - -/** - * Helper macro, dumping shorter information about \a page into a log. - */ -#define CL_PAGE_HEADER(mask, env, page, format, ...) \ -do { \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ - cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \ - CDEBUG(mask, format, ## __VA_ARGS__); \ - } \ -} while (0) - -static inline struct page *cl_page_vmpage(struct cl_page *page) -{ - LASSERT(page->cp_vmpage); - return page->cp_vmpage; -} - -/** - * Check if a cl_page is in use. - * - * Client cache holds a refcount, this refcount will be dropped when - * the page is taken out of cache, see vvp_page_delete(). - */ -static inline bool __page_in_use(const struct cl_page *page, int refc) -{ - return (atomic_read(&page->cp_ref) > refc + 1); -} - -/** - * Caller itself holds a refcount of cl_page. - */ -#define cl_page_in_use(pg) __page_in_use(pg, 1) -/** - * Caller doesn't hold a refcount. - */ -#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) - -/** @} cl_page */ - -/** \addtogroup cl_lock cl_lock - * @{ - */ -/** \struct cl_lock - * - * Extent locking on the client. - * - * LAYERING - * - * The locking model of the new client code is built around - * - * struct cl_lock - * - * data-type representing an extent lock on a regular file. cl_lock is a - * layered object (much like cl_object and cl_page), it consists of a header - * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to - * cl_lock::cll_layers list through cl_lock_slice::cls_linkage. - * - * Typical cl_lock consists of the two layers: - * - * - vvp_lock (vvp specific data), and - * - lov_lock (lov specific data). - * - * lov_lock contains an array of sub-locks. Each of these sub-locks is a - * normal cl_lock: it has a header (struct cl_lock) and a list of layers: - * - * - lovsub_lock, and - * - osc_lock - * - * Each sub-lock is associated with a cl_object (representing stripe - * sub-object or the file to which top-level cl_lock is associated to), and is - * linked into that cl_object::coh_locks. In this respect cl_lock is similar to - * cl_object (that at lov layer also fans out into multiple sub-objects), and - * is different from cl_page, that doesn't fan out (there is usually exactly - * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock - * a "top-lock" and its lovsub-osc portion a "sub-lock". - * - * LIFE CYCLE - * - * cl_lock is a cacheless data container for the requirements of locks to - * complete the IO. cl_lock is created before I/O starts and destroyed when the - * I/O is complete. - * - * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached - * to cl_lock at OSC layer. LDLM lock is still cacheable. - * - * INTERFACE AND USAGE - * - * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A - * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue() - * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock - * consists of multiple sub cl_locks, each sub locks will be enqueued - * correspondingly. At OSC layer, the lock enqueue request will tend to reuse - * cached LDLM lock; otherwise a new LDLM lock will have to be requested from - * OST side. - * - * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel() - * method will be called for each layer to release the resource held by this - * lock. At OSC layer, the reference count of LDLM lock, which is held at - * clo_enqueue time, is released. - * - * LDLM lock can only be canceled if there is no cl_lock using it. - * - * Overall process of the locking during IO operation is as following: - * - * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock() - * is called on each layer. Responsibility of this method is to add locks, - * needed by a given layer into cl_io.ci_lockset. - * - * - once locks for all layers were collected, they are sorted to avoid - * dead-locks (cl_io_locks_sort()), and enqueued. - * - * - when all locks are acquired, IO is performed; - * - * - locks are released after IO is complete. - * - * Striping introduces major additional complexity into locking. The - * fundamental problem is that it is generally unsafe to actively use (hold) - * two locks on the different OST servers at the same time, as this introduces - * inter-server dependency and can lead to cascading evictions. - * - * Basic solution is to sub-divide large read/write IOs into smaller pieces so - * that no multi-stripe locks are taken (note that this design abandons POSIX - * read/write semantics). Such pieces ideally can be executed concurrently. At - * the same time, certain types of IO cannot be sub-divived, without - * sacrificing correctness. This includes: - * - * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee - * atomicity; - * - * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken. - * - * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where - * buf is a part of memory mapped Lustre file, a lock or locks protecting buf - * has to be held together with the usual lock on [offset, offset + count]. - * - * Interaction with DLM - * - * In the expected setup, cl_lock is ultimately backed up by a collection of - * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is - * implemented in osc layer, that also matches DLM events (ASTs, cancellation, - * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed - * description of interaction with DLM. - */ - -/** - * Lock description. - */ -struct cl_lock_descr { - /** Object this lock is granted for. */ - struct cl_object *cld_obj; - /** Index of the first page protected by this lock. */ - pgoff_t cld_start; - /** Index of the last page (inclusive) protected by this lock. */ - pgoff_t cld_end; - /** Group ID, for group lock */ - __u64 cld_gid; - /** Lock mode. */ - enum cl_lock_mode cld_mode; - /** - * flags to enqueue lock. A combination of bit-flags from - * enum cl_enq_flags. - */ - __u32 cld_enq_flags; -}; - -#define DDESCR "%s(%d):[%lu, %lu]:%x" -#define PDESCR(descr) \ - cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \ - (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags - -const char *cl_lock_mode_name(const enum cl_lock_mode mode); - -/** - * Layered client lock. - */ -struct cl_lock { - /** List of slices. Immutable after creation. */ - struct list_head cll_layers; - /** lock attribute, extent, cl_object, etc. */ - struct cl_lock_descr cll_descr; -}; - -/** - * Per-layer part of cl_lock - * - * \see vvp_lock, lov_lock, lovsub_lock, osc_lock - */ -struct cl_lock_slice { - struct cl_lock *cls_lock; - /** Object slice corresponding to this lock slice. Immutable after - * creation. - */ - struct cl_object *cls_obj; - const struct cl_lock_operations *cls_ops; - /** Linkage into cl_lock::cll_layers. Immutable after creation. */ - struct list_head cls_linkage; -}; - -/** - * - * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops - */ -struct cl_lock_operations { - /** @{ */ - /** - * Attempts to enqueue the lock. Called top-to-bottom. - * - * \retval 0 this layer has enqueued the lock successfully - * \retval >0 this layer has enqueued the lock, but need to wait on - * @anchor for resources - * \retval -ve failure - * - * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(), - * \see osc_lock_enqueue() - */ - int (*clo_enqueue)(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *io, struct cl_sync_io *anchor); - /** - * Cancel a lock, release its DLM lock ref, while does not cancel the - * DLM lock - */ - void (*clo_cancel)(const struct lu_env *env, - const struct cl_lock_slice *slice); - /** @} */ - /** - * Destructor. Frees resources and the slice. - * - * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(), - * \see osc_lock_fini() - */ - void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice); - /** - * Optional debugging helper. Prints given lock slice. - */ - int (*clo_print)(const struct lu_env *env, - void *cookie, lu_printer_t p, - const struct cl_lock_slice *slice); -}; - -#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \ -do { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ - \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \ - CDEBUG(mask, format, ## __VA_ARGS__); \ - } \ -} while (0) - -#define CL_LOCK_ASSERT(expr, env, lock) do { \ - if (likely(expr)) \ - break; \ - \ - CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \ - LBUG(); \ -} while (0) - -/** @} cl_lock */ - -/** \addtogroup cl_page_list cl_page_list - * Page list used to perform collective operations on a group of pages. - * - * Pages are added to the list one by one. cl_page_list acquires a reference - * for every page in it. Page list is used to perform collective operations on - * pages: - * - * - submit pages for an immediate transfer, - * - * - own pages on behalf of certain io (waiting for each page in turn), - * - * - discard pages. - * - * When list is finalized, it releases references on all pages it still has. - * - * \todo XXX concurrency control. - * - * @{ - */ -struct cl_page_list { - unsigned int pl_nr; - struct list_head pl_pages; - struct task_struct *pl_owner; -}; - -/** - * A 2-queue of pages. A convenience data-type for common use case, 2-queue - * contains an incoming page list and an outgoing page list. - */ -struct cl_2queue { - struct cl_page_list c2_qin; - struct cl_page_list c2_qout; -}; - -/** @} cl_page_list */ - -/** \addtogroup cl_io cl_io - * @{ - */ -/** \struct cl_io - * I/O - * - * cl_io represents a high level I/O activity like - * read(2)/write(2)/truncate(2) system call, or cancellation of an extent - * lock. - * - * cl_io is a layered object, much like cl_{object,page,lock} but with one - * important distinction. We want to minimize number of calls to the allocator - * in the fast path, e.g., in the case of read(2) when everything is cached: - * client already owns the lock over region being read, and data are cached - * due to read-ahead. To avoid allocation of cl_io layers in such situations, - * per-layer io state is stored in the session, associated with the io, see - * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized - * by using free-lists, see cl_env_get(). - * - * There is a small predefined number of possible io types, enumerated in enum - * cl_io_type. - * - * cl_io is a state machine, that can be advanced concurrently by the multiple - * threads. It is up to these threads to control the concurrency and, - * specifically, to detect when io is done, and its state can be safely - * released. - * - * For read/write io overall execution plan is as following: - * - * (0) initialize io state through all layers; - * - * (1) loop: prepare chunk of work to do - * - * (2) call all layers to collect locks they need to process current chunk - * - * (3) sort all locks to avoid dead-locks, and acquire them - * - * (4) process the chunk: call per-page methods - * cl_io_operations::cio_prepare_write(), - * cl_io_operations::cio_commit_write() for write) - * - * (5) release locks - * - * (6) repeat loop. - * - * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to - * address allocation efficiency issues mentioned above), and returns with the - * special error condition from per-page method when current sub-io has to - * block. This causes io loop to be repeated, and lov switches to the next - * sub-io in its cl_io_operations::cio_iter_init() implementation. - */ - -/** IO types */ -enum cl_io_type { - /** read system call */ - CIT_READ = 1, - /** write system call */ - CIT_WRITE, - /** truncate, utime system calls */ - CIT_SETATTR, - /** get data version */ - CIT_DATA_VERSION, - /** - * page fault handling - */ - CIT_FAULT, - /** - * fsync system call handling - * To write out a range of file - */ - CIT_FSYNC, - /** - * Miscellaneous io. This is used for occasional io activity that - * doesn't fit into other types. Currently this is used for: - * - * - cancellation of an extent lock. This io exists as a context - * to write dirty pages from under the lock being canceled back - * to the server; - * - * - VM induced page write-out. An io context for writing page out - * for memory cleansing; - * - * - glimpse. An io context to acquire glimpse lock. - * - * - grouplock. An io context to acquire group lock. - * - * CIT_MISC io is used simply as a context in which locks and pages - * are manipulated. Such io has no internal "process", that is, - * cl_io_loop() is never called for it. - */ - CIT_MISC, - CIT_OP_NR -}; - -/** - * States of cl_io state machine - */ -enum cl_io_state { - /** Not initialized. */ - CIS_ZERO, - /** Initialized. */ - CIS_INIT, - /** IO iteration started. */ - CIS_IT_STARTED, - /** Locks taken. */ - CIS_LOCKED, - /** Actual IO is in progress. */ - CIS_IO_GOING, - /** IO for the current iteration finished. */ - CIS_IO_FINISHED, - /** Locks released. */ - CIS_UNLOCKED, - /** Iteration completed. */ - CIS_IT_ENDED, - /** cl_io finalized. */ - CIS_FINI -}; - -/** - * IO state private for a layer. - * - * This is usually embedded into layer session data, rather than allocated - * dynamically. - * - * \see vvp_io, lov_io, osc_io - */ -struct cl_io_slice { - struct cl_io *cis_io; - /** corresponding object slice. Immutable after creation. */ - struct cl_object *cis_obj; - /** io operations. Immutable after creation. */ - const struct cl_io_operations *cis_iop; - /** - * linkage into a list of all slices for a given cl_io, hanging off - * cl_io::ci_layers. Immutable after creation. - */ - struct list_head cis_linkage; -}; - -typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, - struct cl_page *); - -struct cl_read_ahead { - /* - * Maximum page index the readahead window will end. - * This is determined DLM lock coverage, RPC and stripe boundary. - * cra_end is included. - */ - pgoff_t cra_end; - /* optimal RPC size for this read, by pages */ - unsigned long cra_rpc_size; - /* - * Release callback. If readahead holds resources underneath, this - * function should be called to release it. - */ - void (*cra_release)(const struct lu_env *env, void *cbdata); - /* Callback data for cra_release routine */ - void *cra_cbdata; -}; - -static inline void cl_read_ahead_release(const struct lu_env *env, - struct cl_read_ahead *ra) -{ - if (ra->cra_release) - ra->cra_release(env, ra->cra_cbdata); - memset(ra, 0, sizeof(*ra)); -} - -/** - * Per-layer io operations. - * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops - */ -struct cl_io_operations { - /** - * Vector of io state transition methods for every io type. - * - * \see cl_page_operations::io - */ - struct { - /** - * Prepare io iteration at a given layer. - * - * Called top-to-bottom at the beginning of each iteration of - * "io loop" (if it makes sense for this type of io). Here - * layer selects what work it will do during this iteration. - * - * \see cl_io_operations::cio_iter_fini() - */ - int (*cio_iter_init)(const struct lu_env *env, - const struct cl_io_slice *slice); - /** - * Finalize io iteration. - * - * Called bottom-to-top at the end of each iteration of "io - * loop". Here layers can decide whether IO has to be - * continued. - * - * \see cl_io_operations::cio_iter_init() - */ - void (*cio_iter_fini)(const struct lu_env *env, - const struct cl_io_slice *slice); - /** - * Collect locks for the current iteration of io. - * - * Called top-to-bottom to collect all locks necessary for - * this iteration. This methods shouldn't actually enqueue - * anything, instead it should post a lock through - * cl_io_lock_add(). Once all locks are collected, they are - * sorted and enqueued in the proper order. - */ - int (*cio_lock)(const struct lu_env *env, - const struct cl_io_slice *slice); - /** - * Finalize unlocking. - * - * Called bottom-to-top to finish layer specific unlocking - * functionality, after generic code released all locks - * acquired by cl_io_operations::cio_lock(). - */ - void (*cio_unlock)(const struct lu_env *env, - const struct cl_io_slice *slice); - /** - * Start io iteration. - * - * Once all locks are acquired, called top-to-bottom to - * commence actual IO. In the current implementation, - * top-level vvp_io_{read,write}_start() does all the work - * synchronously by calling generic_file_*(), so other layers - * are called when everything is done. - */ - int (*cio_start)(const struct lu_env *env, - const struct cl_io_slice *slice); - /** - * Called top-to-bottom at the end of io loop. Here layer - * might wait for an unfinished asynchronous io. - */ - void (*cio_end)(const struct lu_env *env, - const struct cl_io_slice *slice); - /** - * Called bottom-to-top to notify layers that read/write IO - * iteration finished, with \a nob bytes transferred. - */ - void (*cio_advance)(const struct lu_env *env, - const struct cl_io_slice *slice, - size_t nob); - /** - * Called once per io, bottom-to-top to release io resources. - */ - void (*cio_fini)(const struct lu_env *env, - const struct cl_io_slice *slice); - } op[CIT_OP_NR]; - - /** - * Submit pages from \a queue->c2_qin for IO, and move - * successfully submitted pages into \a queue->c2_qout. Return - * non-zero if failed to submit even the single page. If - * submission failed after some pages were moved into \a - * queue->c2_qout, completion callback with non-zero ioret is - * executed on them. - */ - int (*cio_submit)(const struct lu_env *env, - const struct cl_io_slice *slice, - enum cl_req_type crt, - struct cl_2queue *queue); - /** - * Queue async page for write. - * The difference between cio_submit and cio_queue is that - * cio_submit is for urgent request. - */ - int (*cio_commit_async)(const struct lu_env *env, - const struct cl_io_slice *slice, - struct cl_page_list *queue, int from, int to, - cl_commit_cbt cb); - /** - * Decide maximum read ahead extent - * - * \pre io->ci_type == CIT_READ - */ - int (*cio_read_ahead)(const struct lu_env *env, - const struct cl_io_slice *slice, - pgoff_t start, struct cl_read_ahead *ra); - /** - * Optional debugging helper. Print given io slice. - */ - int (*cio_print)(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_io_slice *slice); -}; - -/** - * Flags to lock enqueue procedure. - * \ingroup cl_lock - */ -enum cl_enq_flags { - /** - * instruct server to not block, if conflicting lock is found. Instead - * -EWOULDBLOCK is returned immediately. - */ - CEF_NONBLOCK = 0x00000001, - /** - * take lock asynchronously (out of order), as it cannot - * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing. - */ - CEF_ASYNC = 0x00000002, - /** - * tell the server to instruct (though a flag in the blocking ast) an - * owner of the conflicting lock, that it can drop dirty pages - * protected by this lock, without sending them to the server. - */ - CEF_DISCARD_DATA = 0x00000004, - /** - * tell the sub layers that it must be a `real' lock. This is used for - * mmapped-buffer locks and glimpse locks that must be never converted - * into lockless mode. - * - * \see vvp_mmap_locks(), cl_glimpse_lock(). - */ - CEF_MUST = 0x00000008, - /** - * tell the sub layers that never request a `real' lock. This flag is - * not used currently. - * - * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless - * conversion policy: ci_lockreq describes generic information of lock - * requirement for this IO, especially for locks which belong to the - * object doing IO; however, lock itself may have precise requirements - * that are described by the enqueue flags. - */ - CEF_NEVER = 0x00000010, - /** - * for async glimpse lock. - */ - CEF_AGL = 0x00000020, - /** - * enqueue a lock to test DLM lock existence. - */ - CEF_PEEK = 0x00000040, - /** - * Lock match only. Used by group lock in I/O as group lock - * is known to exist. - */ - CEF_LOCK_MATCH = BIT(7), - /** - * mask of enq_flags. - */ - CEF_MASK = 0x000000ff, -}; - -/** - * Link between lock and io. Intermediate structure is needed, because the - * same lock can be part of multiple io's simultaneously. - */ -struct cl_io_lock_link { - /** linkage into one of cl_lockset lists. */ - struct list_head cill_linkage; - struct cl_lock cill_lock; - /** optional destructor */ - void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); -}; -#define cill_descr cill_lock.cll_descr - -/** - * Lock-set represents a collection of locks, that io needs at a - * time. Generally speaking, client tries to avoid holding multiple locks when - * possible, because - * - * - holding extent locks over multiple ost's introduces the danger of - * "cascading timeouts"; - * - * - holding multiple locks over the same ost is still dead-lock prone, - * see comment in osc_lock_enqueue(), - * - * but there are certain situations where this is unavoidable: - * - * - O_APPEND writes have to take [0, EOF] lock for correctness; - * - * - truncate has to take [new-size, EOF] lock for correctness; - * - * - SNS has to take locks across full stripe for correctness; - * - * - in the case when user level buffer, supplied to {read,write}(file0), - * is a part of a memory mapped lustre file, client has to take a dlm - * locks on file0, and all files that back up the buffer (or a part of - * the buffer, that is being processed in the current chunk, in any - * case, there are situations where at least 2 locks are necessary). - * - * In such cases we at least try to take locks in the same consistent - * order. To this end, all locks are first collected, then sorted, and then - * enqueued. - */ -struct cl_lockset { - /** locks to be acquired. */ - struct list_head cls_todo; - /** locks acquired. */ - struct list_head cls_done; -}; - -/** - * Lock requirements(demand) for IO. It should be cl_io_lock_req, - * but 'req' is always to be thought as 'request' :-) - */ -enum cl_io_lock_dmd { - /** Always lock data (e.g., O_APPEND). */ - CILR_MANDATORY = 0, - /** Layers are free to decide between local and global locking. */ - CILR_MAYBE, - /** Never lock: there is no cache (e.g., lockless IO). */ - CILR_NEVER -}; - -enum cl_fsync_mode { - /** start writeback, do not wait for them to finish */ - CL_FSYNC_NONE = 0, - /** start writeback and wait for them to finish */ - CL_FSYNC_LOCAL = 1, - /** discard all of dirty pages in a specific file range */ - CL_FSYNC_DISCARD = 2, - /** start writeback and make sure they have reached storage before - * return. OST_SYNC RPC must be issued and finished - */ - CL_FSYNC_ALL = 3 -}; - -struct cl_io_rw_common { - loff_t crw_pos; - size_t crw_count; - int crw_nonblock; -}; - -/** - * State for io. - * - * cl_io is shared by all threads participating in this IO (in current - * implementation only one thread advances IO, but parallel IO design and - * concurrent copy_*_user() require multiple threads acting on the same IO. It - * is up to these threads to serialize their activities, including updates to - * mutable cl_io fields. - */ -struct cl_io { - /** type of this IO. Immutable after creation. */ - enum cl_io_type ci_type; - /** current state of cl_io state machine. */ - enum cl_io_state ci_state; - /** main object this io is against. Immutable after creation. */ - struct cl_object *ci_obj; - /** - * Upper layer io, of which this io is a part of. Immutable after - * creation. - */ - struct cl_io *ci_parent; - /** List of slices. Immutable after creation. */ - struct list_head ci_layers; - /** list of locks (to be) acquired by this io. */ - struct cl_lockset ci_lockset; - /** lock requirements, this is just a help info for sublayers. */ - enum cl_io_lock_dmd ci_lockreq; - union { - struct cl_rd_io { - struct cl_io_rw_common rd; - } ci_rd; - struct cl_wr_io { - struct cl_io_rw_common wr; - int wr_append; - int wr_sync; - } ci_wr; - struct cl_io_rw_common ci_rw; - struct cl_setattr_io { - struct ost_lvb sa_attr; - unsigned int sa_attr_flags; - unsigned int sa_valid; - int sa_stripe_index; - const struct lu_fid *sa_parent_fid; - } ci_setattr; - struct cl_data_version_io { - u64 dv_data_version; - int dv_flags; - } ci_data_version; - struct cl_fault_io { - /** page index within file. */ - pgoff_t ft_index; - /** bytes valid byte on a faulted page. */ - size_t ft_nob; - /** writable page? for nopage() only */ - int ft_writable; - /** page of an executable? */ - int ft_executable; - /** page_mkwrite() */ - int ft_mkwrite; - /** resulting page */ - struct cl_page *ft_page; - } ci_fault; - struct cl_fsync_io { - loff_t fi_start; - loff_t fi_end; - /** file system level fid */ - struct lu_fid *fi_fid; - enum cl_fsync_mode fi_mode; - /* how many pages were written/discarded */ - unsigned int fi_nr_written; - } ci_fsync; - } u; - struct cl_2queue ci_queue; - size_t ci_nob; - int ci_result; - unsigned int ci_continue:1, - /** - * This io has held grouplock, to inform sublayers that - * don't do lockless i/o. - */ - ci_no_srvlock:1, - /** - * The whole IO need to be restarted because layout has been changed - */ - ci_need_restart:1, - /** - * to not refresh layout - the IO issuer knows that the layout won't - * change(page operations, layout change causes all page to be - * discarded), or it doesn't matter if it changes(sync). - */ - ci_ignore_layout:1, - /** - * Check if layout changed after the IO finishes. Mainly for HSM - * requirement. If IO occurs to openning files, it doesn't need to - * verify layout because HSM won't release openning files. - * Right now, only two operations need to verify layout: glimpse - * and setattr. - */ - ci_verify_layout:1, - /** - * file is released, restore has to be triggered by vvp layer - */ - ci_restore_needed:1, - /** - * O_NOATIME - */ - ci_noatime:1; - /** - * Number of pages owned by this IO. For invariant checking. - */ - unsigned int ci_owned_nr; -}; - -/** @} cl_io */ - -/** - * Per-transfer attributes. - */ -struct cl_req_attr { - enum cl_req_type cra_type; - u64 cra_flags; - struct cl_page *cra_page; - - /** Generic attributes for the server consumption. */ - struct obdo *cra_oa; - /** Jobid */ - char cra_jobid[LUSTRE_JOBID_SIZE]; -}; - -enum cache_stats_item { - /** how many cache lookups were performed */ - CS_lookup = 0, - /** how many times cache lookup resulted in a hit */ - CS_hit, - /** how many entities are in the cache right now */ - CS_total, - /** how many entities in the cache are actively used (and cannot be - * evicted) right now - */ - CS_busy, - /** how many entities were created at all */ - CS_create, - CS_NR -}; - -#define CS_NAMES { "lookup", "hit", "total", "busy", "create" } - -/** - * Stats for a generic cache (similar to inode, lu_object, etc. caches). - */ -struct cache_stats { - const char *cs_name; - atomic_t cs_stats[CS_NR]; -}; - -/** These are not exported so far */ -void cache_stats_init(struct cache_stats *cs, const char *name); - -/** - * Client-side site. This represents particular client stack. "Global" - * variables should (directly or indirectly) be added here to allow multiple - * clients to co-exist in the single address space. - */ -struct cl_site { - struct lu_site cs_lu; - /** - * Statistical counters. Atomics do not scale, something better like - * per-cpu counters is needed. - * - * These are exported as /sys/kernel/debug/lustre/llite/.../site - * - * When interpreting keep in mind that both sub-locks (and sub-pages) - * and top-locks (and top-pages) are accounted here. - */ - struct cache_stats cs_pages; - atomic_t cs_pages_state[CPS_NR]; -}; - -int cl_site_init(struct cl_site *s, struct cl_device *top); -void cl_site_fini(struct cl_site *s); -void cl_stack_fini(const struct lu_env *env, struct cl_device *cl); - -/** - * Output client site statistical counters into a buffer. Suitable for - * ll_rd_*()-style functions. - */ -int cl_site_stats_print(const struct cl_site *site, struct seq_file *m); - -/** - * \name helpers - * - * Type conversion and accessory functions. - */ -/** @{ */ - -static inline struct cl_site *lu2cl_site(const struct lu_site *site) -{ - return container_of(site, struct cl_site, cs_lu); -} - -static inline int lu_device_is_cl(const struct lu_device *d) -{ - return d->ld_type->ldt_tags & LU_DEVICE_CL; -} - -static inline struct cl_device *lu2cl_dev(const struct lu_device *d) -{ - LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d)); - return container_of_safe(d, struct cl_device, cd_lu_dev); -} - -static inline struct lu_device *cl2lu_dev(struct cl_device *d) -{ - return &d->cd_lu_dev; -} - -static inline struct cl_object *lu2cl(const struct lu_object *o) -{ - LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); - return container_of_safe(o, struct cl_object, co_lu); -} - -static inline const struct cl_object_conf * -lu2cl_conf(const struct lu_object_conf *conf) -{ - return container_of_safe(conf, struct cl_object_conf, coc_lu); -} - -static inline struct cl_object *cl_object_next(const struct cl_object *obj) -{ - return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL; -} - -static inline struct cl_device *cl_object_device(const struct cl_object *o) -{ - LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); - return container_of_safe(o->co_lu.lo_dev, struct cl_device, cd_lu_dev); -} - -static inline struct cl_object_header *luh2coh(const struct lu_object_header *h) -{ - return container_of_safe(h, struct cl_object_header, coh_lu); -} - -static inline struct cl_site *cl_object_site(const struct cl_object *obj) -{ - return lu2cl_site(obj->co_lu.lo_dev->ld_site); -} - -static inline -struct cl_object_header *cl_object_header(const struct cl_object *obj) -{ - return luh2coh(obj->co_lu.lo_header); -} - -static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t) -{ - return lu_device_init(&d->cd_lu_dev, t); -} - -static inline void cl_device_fini(struct cl_device *d) -{ - lu_device_fini(&d->cd_lu_dev); -} - -void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, - struct cl_object *obj, pgoff_t index, - const struct cl_page_operations *ops); -void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, - struct cl_object *obj, - const struct cl_lock_operations *ops); -void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, - struct cl_object *obj, const struct cl_io_operations *ops); -/** @} helpers */ - -/** \defgroup cl_object cl_object - * @{ - */ -struct cl_object *cl_object_top(struct cl_object *o); -struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, - const struct lu_fid *fid, - const struct cl_object_conf *c); - -int cl_object_header_init(struct cl_object_header *h); -void cl_object_put(const struct lu_env *env, struct cl_object *o); -void cl_object_get(struct cl_object *o); -void cl_object_attr_lock(struct cl_object *o); -void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int valid); -int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, - struct ost_lvb *lvb); -int cl_conf_set(const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf); -int cl_object_prune(const struct lu_env *env, struct cl_object *obj); -void cl_object_kill(const struct lu_env *env, struct cl_object *obj); -int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *lum); -int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, - struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, - size_t *buflen); -int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj, - struct cl_layout *cl); -loff_t cl_object_maxbytes(struct cl_object *obj); - -/** - * Returns true, iff \a o0 and \a o1 are slices of the same object. - */ -static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) -{ - return cl_object_header(o0) == cl_object_header(o1); -} - -static inline void cl_object_page_init(struct cl_object *clob, int size) -{ - clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; - cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size); - WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512); -} - -static inline void *cl_object_page_slice(struct cl_object *clob, - struct cl_page *page) -{ - return (void *)((char *)page + clob->co_slice_off); -} - -/** - * Return refcount of cl_object. - */ -static inline int cl_object_refc(struct cl_object *clob) -{ - struct lu_object_header *header = clob->co_lu.lo_header; - - return atomic_read(&header->loh_ref); -} - -/** @} cl_object */ - -/** \defgroup cl_page cl_page - * @{ - */ -enum { - CLP_GANG_OKAY = 0, - CLP_GANG_RESCHED, - CLP_GANG_AGAIN, - CLP_GANG_ABORT -}; - -/* callback of cl_page_gang_lookup() */ -struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj, - pgoff_t idx, struct page *vmpage, - enum cl_page_type type); -struct cl_page *cl_page_alloc(const struct lu_env *env, - struct cl_object *o, pgoff_t ind, - struct page *vmpage, - enum cl_page_type type); -void cl_page_get(struct cl_page *page); -void cl_page_put(const struct lu_env *env, struct cl_page *page); -void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, - const struct cl_page *pg); -void cl_page_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_page *pg); -struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj); - -const struct cl_page_slice *cl_page_at(const struct cl_page *page, - const struct lu_device_type *dtype); - -/** - * \name ownership - * - * Functions dealing with the ownership of page by io. - */ -/** @{ */ - -int cl_page_own(const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_own_try(const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_assume(const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_unassume(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg); -void cl_page_disown(const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_disown0(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg); -int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io); - -/** @} ownership */ - -/** - * \name transfer - * - * Functions dealing with the preparation of a page for a transfer, and - * tracking transfer state. - */ -/** @{ */ -int cl_page_prep(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_completion(const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt, int ioret); -int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, - enum cl_req_type crt); -int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_clip(const struct lu_env *env, struct cl_page *pg, - int from, int to); -int cl_page_cancel(const struct lu_env *env, struct cl_page *page); -int cl_page_flush(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); - -/** @} transfer */ - -/** - * \name helper routines - * Functions to discard, delete and export a cl_page. - */ -/** @{ */ -void cl_page_discard(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -void cl_page_delete(const struct lu_env *env, struct cl_page *pg); -int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); -void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); -loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); -pgoff_t cl_index(const struct cl_object *obj, loff_t offset); -size_t cl_page_size(const struct cl_object *obj); -int cl_pages_prune(const struct lu_env *env, struct cl_object *obj); - -void cl_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock); -void cl_lock_descr_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_lock_descr *descr); -/* @} helper */ - -/** - * Data structure managing a client's cached pages. A count of - * "unstable" pages is maintained, and an LRU of clean pages is - * maintained. "unstable" pages are pages pinned by the ptlrpc - * layer for recovery purposes. - */ -struct cl_client_cache { - /** - * # of client cache refcount - * # of users (OSCs) + 2 (held by llite and lov) - */ - atomic_t ccc_users; - /** - * # of threads are doing shrinking - */ - unsigned int ccc_lru_shrinkers; - /** - * # of LRU entries available - */ - atomic_long_t ccc_lru_left; - /** - * List of entities(OSCs) for this LRU cache - */ - struct list_head ccc_lru; - /** - * Max # of LRU entries - */ - unsigned long ccc_lru_max; - /** - * Lock to protect ccc_lru list - */ - spinlock_t ccc_lru_lock; - /** - * Set if unstable check is enabled - */ - unsigned int ccc_unstable_check:1; - /** - * # of unstable pages for this mount point - */ - atomic_long_t ccc_unstable_nr; - /** - * Waitq for awaiting unstable pages to reach zero. - * Used at umounting time and signaled on BRW commit - */ - wait_queue_head_t ccc_unstable_waitq; - -}; - -/** - * cl_cache functions - */ -struct cl_client_cache *cl_cache_init(unsigned long lru_page_max); -void cl_cache_incref(struct cl_client_cache *cache); -void cl_cache_decref(struct cl_client_cache *cache); - -/** @} cl_page */ - -/** \defgroup cl_lock cl_lock - * @{ - */ - -int cl_lock_request(const struct lu_env *env, struct cl_io *io, - struct cl_lock *lock); -int cl_lock_init(const struct lu_env *env, struct cl_lock *lock, - const struct cl_io *io); -void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock); -const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, - const struct lu_device_type *dtype); -void cl_lock_release(const struct lu_env *env, struct cl_lock *lock); -int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io, - struct cl_lock *lock, struct cl_sync_io *anchor); -void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); - -/** @} cl_lock */ - -/** \defgroup cl_io cl_io - * @{ - */ - -int cl_io_init(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, loff_t pos, size_t count); -int cl_io_loop(const struct lu_env *env, struct cl_io *io); - -void cl_io_fini(const struct lu_env *env, struct cl_io *io); -int cl_io_iter_init(const struct lu_env *env, struct cl_io *io); -void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io); -int cl_io_lock(const struct lu_env *env, struct cl_io *io); -void cl_io_unlock(const struct lu_env *env, struct cl_io *io); -int cl_io_start(const struct lu_env *env, struct cl_io *io); -void cl_io_end(const struct lu_env *env, struct cl_io *io); -int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, - struct cl_io_lock_link *link); -int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr); -int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue); -int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - long timeout); -int cl_io_commit_async(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, int from, int to, - cl_commit_cbt cb); -int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io, - pgoff_t start, struct cl_read_ahead *ra); -int cl_io_is_going(const struct lu_env *env); - -/** - * True, iff \a io is an O_APPEND write(2). - */ -static inline int cl_io_is_append(const struct cl_io *io) -{ - return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; -} - -static inline int cl_io_is_sync_write(const struct cl_io *io) -{ - return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync; -} - -static inline int cl_io_is_mkwrite(const struct cl_io *io) -{ - return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite; -} - -/** - * True, iff \a io is a truncate(2). - */ -static inline int cl_io_is_trunc(const struct cl_io *io) -{ - return io->ci_type == CIT_SETATTR && - (io->u.ci_setattr.sa_valid & ATTR_SIZE); -} - -struct cl_io *cl_io_top(struct cl_io *io); - -#define CL_IO_SLICE_CLEAN(foo_io, base) \ -do { \ - typeof(foo_io) __foo_io = (foo_io); \ - \ - BUILD_BUG_ON(offsetof(typeof(*__foo_io), base) != 0); \ - memset(&__foo_io->base + 1, 0, \ - sizeof(*__foo_io) - sizeof(__foo_io->base)); \ -} while (0) - -/** @} cl_io */ - -/** \defgroup cl_page_list cl_page_list - * @{ - */ - -/** - * Last page in the page list. - */ -static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) -{ - LASSERT(plist->pl_nr > 0); - return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); -} - -static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist) -{ - LASSERT(plist->pl_nr > 0); - return list_entry(plist->pl_pages.next, struct cl_page, cp_batch); -} - -/** - * Iterate over pages in a page list. - */ -#define cl_page_list_for_each(page, list) \ - list_for_each_entry((page), &(list)->pl_pages, cp_batch) - -/** - * Iterate over pages in a page list, taking possible removals into account. - */ -#define cl_page_list_for_each_safe(page, temp, list) \ - list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) - -void cl_page_list_init(struct cl_page_list *plist); -void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head); -void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist, - struct cl_page *page); -void cl_page_list_disown(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); -void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist); - -void cl_2queue_init(struct cl_2queue *queue); -void cl_2queue_disown(const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_discard(const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue); -void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); - -/** @} cl_page_list */ - -void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, - struct cl_req_attr *attr); - -/** \defgroup cl_sync_io cl_sync_io - * @{ - */ - -/** - * Anchor for synchronous transfer. This is allocated on a stack by thread - * doing synchronous transfer, and a pointer to this structure is set up in - * every page submitted for transfer. Transfer completion routine updates - * anchor and wakes up waiting thread when transfer is complete. - */ -struct cl_sync_io { - /** number of pages yet to be transferred. */ - atomic_t csi_sync_nr; - /** error code. */ - int csi_sync_rc; - /** barrier of destroy this structure */ - atomic_t csi_barrier; - /** completion to be signaled when transfer is complete. */ - wait_queue_head_t csi_waitq; - /** callback to invoke when this IO is finished */ - void (*csi_end_io)(const struct lu_env *, - struct cl_sync_io *); -}; - -void cl_sync_io_init(struct cl_sync_io *anchor, int nr, - void (*end)(const struct lu_env *, struct cl_sync_io *)); -int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, - long timeout); -void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, - int ioret); -void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor); - -/** @} cl_sync_io */ - -/** \defgroup cl_env cl_env - * - * lu_env handling for a client. - * - * lu_env is an environment within which lustre code executes. Its major part - * is lu_context---a fast memory allocation mechanism that is used to conserve - * precious kernel stack space. Originally lu_env was designed for a server, - * where - * - * - there is a (mostly) fixed number of threads, and - * - * - call chains have no non-lustre portions inserted between lustre code. - * - * On a client both these assumption fails, because every user thread can - * potentially execute lustre code as part of a system call, and lustre calls - * into VFS or MM that call back into lustre. - * - * To deal with that, cl_env wrapper functions implement the following - * optimizations: - * - * - allocation and destruction of environment is amortized by caching no - * longer used environments instead of destroying them; - * - * \see lu_env, lu_context, lu_context_key - * @{ - */ - -struct lu_env *cl_env_get(u16 *refcheck); -struct lu_env *cl_env_alloc(u16 *refcheck, __u32 tags); -void cl_env_put(struct lu_env *env, u16 *refcheck); -unsigned int cl_env_cache_purge(unsigned int nr); -struct lu_env *cl_env_percpu_get(void); -void cl_env_percpu_put(struct lu_env *env); - -/** @} cl_env */ - -/* - * Misc - */ -void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb); - -struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, - struct lu_device_type *ldt, - struct lu_device *next); -/** @} clio */ - -int cl_global_init(void); -void cl_global_fini(void); - -#endif /* _LINUX_CL_OBJECT_H */ diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h deleted file mode 100644 index 7d119c1a0469..000000000000 --- a/drivers/staging/lustre/lustre/include/interval_tree.h +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/interval_tree.h - * - * Author: Huang Wei - * Author: Jay Xiong - */ - -#ifndef _INTERVAL_H__ -#define _INTERVAL_H__ - -#include -#include -#include - -struct interval_node { - struct interval_node *in_left; - struct interval_node *in_right; - struct interval_node *in_parent; - unsigned in_color:1, - in_intree:1, /** set if the node is in tree */ - in_res1:30; - __u8 in_res2[4]; /** tags, 8-bytes aligned */ - __u64 in_max_high; - struct interval_node_extent { - __u64 start; - __u64 end; - } in_extent; -}; - -enum interval_iter { - INTERVAL_ITER_CONT = 1, - INTERVAL_ITER_STOP = 2 -}; - -static inline int interval_is_intree(struct interval_node *node) -{ - return node->in_intree == 1; -} - -static inline __u64 interval_low(struct interval_node *node) -{ - return node->in_extent.start; -} - -static inline __u64 interval_high(struct interval_node *node) -{ - return node->in_extent.end; -} - -static inline int interval_set(struct interval_node *node, - __u64 start, __u64 end) -{ - if (start > end) - return -ERANGE; - node->in_extent.start = start; - node->in_extent.end = end; - node->in_max_high = end; - return 0; -} - -/* - * Rules to write an interval callback. - * - the callback returns INTERVAL_ITER_STOP when it thinks the iteration - * should be stopped. It will then cause the iteration function to return - * immediately with return value INTERVAL_ITER_STOP. - * - callbacks for interval_iterate and interval_iterate_reverse: Every - * nodes in the tree will be set to @node before the callback being called - * - callback for interval_search: Only overlapped node will be set to @node - * before the callback being called. - */ -typedef enum interval_iter (*interval_callback_t)(struct interval_node *node, - void *args); - -struct interval_node *interval_insert(struct interval_node *node, - struct interval_node **root); -void interval_erase(struct interval_node *node, struct interval_node **root); - -/* - * Search the extents in the tree and call @func for each overlapped - * extents. - */ -enum interval_iter interval_search(struct interval_node *root, - struct interval_node_extent *ex, - interval_callback_t func, void *data); - -enum interval_iter interval_iterate_reverse(struct interval_node *root, - interval_callback_t func, - void *data); - -#endif diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h deleted file mode 100644 index 0433b79efdcb..000000000000 --- a/drivers/staging/lustre/lustre/include/llog_swab.h +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - * - * Copyright 2015 Cray Inc, all rights reserved. - * Author: Ben Evans. - * - * We assume all nodes are either little-endian or big-endian, and we - * always send messages in the sender's native format. The receiver - * detects the message format by checking the 'magic' field of the message - * (see lustre_msg_swabbed() below). - * - * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines - * are implemented in ptlrpc/pack_generic.c. These 'swabbers' convert the - * type from "other" endian, in-place in the message buffer. - * - * A swabber takes a single pointer argument. The caller must already have - * verified that the length of the message buffer >= sizeof (type). - * - * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine - * may be defined that swabs just the variable part, after the caller has - * verified that the message buffer is large enough. - */ - -#ifndef _LLOG_SWAB_H_ -#define _LLOG_SWAB_H_ - -#include - -struct lustre_cfg; - -void lustre_swab_lu_fid(struct lu_fid *fid); -void lustre_swab_ost_id(struct ost_id *oid); -void lustre_swab_llogd_body(struct llogd_body *d); -void lustre_swab_llog_hdr(struct llog_log_hdr *h); -void lustre_swab_llogd_conn_body(struct llogd_conn_body *d); -void lustre_swab_llog_rec(struct llog_rec_hdr *rec); -void lustre_swab_lu_seq_range(struct lu_seq_range *range); -void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); -void lustre_swab_cfg_marker(struct cfg_marker *marker, - int swab, int size); - -#endif diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h deleted file mode 100644 index 495e6f5f676b..000000000000 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ /dev/null @@ -1,646 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lprocfs_status.h - * - * Top level header file for LProc SNMP - * - * Author: Hariharan Thantry thantry@users.sourceforge.net - */ -#ifndef _LPROCFS_SNMP_H -#define _LPROCFS_SNMP_H - -#include -#include -#include -#include -#include - -#include -#include - -struct lprocfs_vars { - const char *name; - const struct file_operations *fops; - void *data; - /** - * sysfs file mode. - */ - umode_t proc_mode; -}; - -struct lprocfs_static_vars { - struct lprocfs_vars *obd_vars; - const struct attribute_group *sysfs_vars; -}; - -/* if we find more consumers this could be generalized */ -#define OBD_HIST_MAX 32 -struct obd_histogram { - spinlock_t oh_lock; - unsigned long oh_buckets[OBD_HIST_MAX]; -}; - -enum { - BRW_R_PAGES = 0, - BRW_W_PAGES, - BRW_R_RPC_HIST, - BRW_W_RPC_HIST, - BRW_R_IO_TIME, - BRW_W_IO_TIME, - BRW_R_DISCONT_PAGES, - BRW_W_DISCONT_PAGES, - BRW_R_DISCONT_BLOCKS, - BRW_W_DISCONT_BLOCKS, - BRW_R_DISK_IOSIZE, - BRW_W_DISK_IOSIZE, - BRW_R_DIO_FRAGS, - BRW_W_DIO_FRAGS, - BRW_LAST, -}; - -struct brw_stats { - struct obd_histogram hist[BRW_LAST]; -}; - -enum { - RENAME_SAMEDIR_SIZE = 0, - RENAME_CROSSDIR_SRC_SIZE, - RENAME_CROSSDIR_TGT_SIZE, - RENAME_LAST, -}; - -struct rename_stats { - struct obd_histogram hist[RENAME_LAST]; -}; - -/* An lprocfs counter can be configured using the enum bit masks below. - * - * LPROCFS_CNTR_EXTERNALLOCK indicates that an external lock already - * protects this counter from concurrent updates. If not specified, - * lprocfs an internal per-counter lock variable. External locks are - * not used to protect counter increments, but are used to protect - * counter readout and resets. - * - * LPROCFS_CNTR_AVGMINMAX indicates a multi-valued counter samples, - * (i.e. counter can be incremented by more than "1"). When specified, - * the counter maintains min, max and sum in addition to a simple - * invocation count. This allows averages to be computed. - * If not specified, the counter is an increment-by-1 counter. - * min, max, sum, etc. are not maintained. - * - * LPROCFS_CNTR_STDDEV indicates that the counter should track sum of - * squares (for multi-valued counter samples only). This allows - * external computation of standard deviation, but involves a 64-bit - * multiply per counter increment. - */ - -enum { - LPROCFS_CNTR_EXTERNALLOCK = 0x0001, - LPROCFS_CNTR_AVGMINMAX = 0x0002, - LPROCFS_CNTR_STDDEV = 0x0004, - - /* counter data type */ - LPROCFS_TYPE_REGS = 0x0100, - LPROCFS_TYPE_BYTES = 0x0200, - LPROCFS_TYPE_PAGES = 0x0400, - LPROCFS_TYPE_CYCLE = 0x0800, -}; - -#define LC_MIN_INIT ((~(__u64)0) >> 1) - -struct lprocfs_counter_header { - unsigned int lc_config; - const char *lc_name; /* must be static */ - const char *lc_units; /* must be static */ -}; - -struct lprocfs_counter { - __s64 lc_count; - __s64 lc_min; - __s64 lc_max; - __s64 lc_sumsquare; - /* - * Every counter has lc_array_sum[0], while lc_array_sum[1] is only - * for irq context counter, i.e. stats with - * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need - * lc_array_sum[1] - */ - __s64 lc_array_sum[1]; -}; - -#define lc_sum lc_array_sum[0] -#define lc_sum_irq lc_array_sum[1] - -struct lprocfs_percpu { -#ifndef __GNUC__ - __s64 pad; -#endif - struct lprocfs_counter lp_cntr[0]; -}; - -enum lprocfs_stats_lock_ops { - LPROCFS_GET_NUM_CPU = 0x0001, /* number allocated per-CPU stats */ - LPROCFS_GET_SMP_ID = 0x0002, /* current stat to be updated */ -}; - -enum lprocfs_stats_flags { - LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ - LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu - * area and need locking - */ - LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */ -}; - -enum lprocfs_fields_flags { - LPROCFS_FIELDS_FLAGS_CONFIG = 0x0001, - LPROCFS_FIELDS_FLAGS_SUM = 0x0002, - LPROCFS_FIELDS_FLAGS_MIN = 0x0003, - LPROCFS_FIELDS_FLAGS_MAX = 0x0004, - LPROCFS_FIELDS_FLAGS_AVG = 0x0005, - LPROCFS_FIELDS_FLAGS_SUMSQUARE = 0x0006, - LPROCFS_FIELDS_FLAGS_COUNT = 0x0007, -}; - -struct lprocfs_stats { - /* # of counters */ - unsigned short ls_num; - /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */ - unsigned short ls_biggest_alloc_num; - enum lprocfs_stats_flags ls_flags; - /* Lock used when there are no percpu stats areas; For percpu stats, - * it is used to protect ls_biggest_alloc_num change - */ - spinlock_t ls_lock; - - /* has ls_num of counter headers */ - struct lprocfs_counter_header *ls_cnt_header; - struct lprocfs_percpu *ls_percpu[0]; -}; - -#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC) - -/* Pack all opcodes down into a single monotonically increasing index */ -static inline int opcode_offset(__u32 opc) -{ - if (opc < OST_LAST_OPC) { - /* OST opcode */ - return (opc - OST_FIRST_OPC); - } else if (opc < MDS_LAST_OPC) { - /* MDS opcode */ - return (opc - MDS_FIRST_OPC + - OPC_RANGE(OST)); - } else if (opc < LDLM_LAST_OPC) { - /* LDLM Opcode */ - return (opc - LDLM_FIRST_OPC + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < MGS_LAST_OPC) { - /* MGS Opcode */ - return (opc - MGS_FIRST_OPC + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < OBD_LAST_OPC) { - /* OBD Ping */ - return (opc - OBD_FIRST_OPC + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < LLOG_LAST_OPC) { - /* LLOG Opcode */ - return (opc - LLOG_FIRST_OPC + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < QUOTA_LAST_OPC) { - /* LQUOTA Opcode */ - return (opc - QUOTA_FIRST_OPC + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < SEQ_LAST_OPC) { - /* SEQ opcode */ - return (opc - SEQ_FIRST_OPC + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < SEC_LAST_OPC) { - /* SEC opcode */ - return (opc - SEC_FIRST_OPC + - OPC_RANGE(SEQ) + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < FLD_LAST_OPC) { - /* FLD opcode */ - return (opc - FLD_FIRST_OPC + - OPC_RANGE(SEC) + - OPC_RANGE(SEQ) + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else { - /* Unknown Opcode */ - return -1; - } -} - -#define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \ - OPC_RANGE(MDS) + \ - OPC_RANGE(LDLM) + \ - OPC_RANGE(MGS) + \ - OPC_RANGE(OBD) + \ - OPC_RANGE(LLOG) + \ - OPC_RANGE(SEC) + \ - OPC_RANGE(SEQ) + \ - OPC_RANGE(SEC) + \ - OPC_RANGE(FLD)) - -#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \ - OPC_RANGE(EXTRA)) - -enum { - PTLRPC_REQWAIT_CNTR = 0, - PTLRPC_REQQDEPTH_CNTR, - PTLRPC_REQACTIVE_CNTR, - PTLRPC_TIMEOUT, - PTLRPC_REQBUF_AVAIL_CNTR, - PTLRPC_LAST_CNTR -}; - -#define PTLRPC_FIRST_CNTR PTLRPC_REQWAIT_CNTR - -enum { - LDLM_GLIMPSE_ENQUEUE = 0, - LDLM_PLAIN_ENQUEUE, - LDLM_EXTENT_ENQUEUE, - LDLM_FLOCK_ENQUEUE, - LDLM_IBITS_ENQUEUE, - MDS_REINT_SETATTR, - MDS_REINT_CREATE, - MDS_REINT_LINK, - MDS_REINT_UNLINK, - MDS_REINT_RENAME, - MDS_REINT_OPEN, - MDS_REINT_SETXATTR, - BRW_READ_BYTES, - BRW_WRITE_BYTES, - EXTRA_LAST_OPC -}; - -#define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE -/* class_obd.c */ -extern struct dentry *debugfs_lustre_root; -extern struct kobject *lustre_kobj; - -struct obd_device; -struct obd_histogram; - -/* Days / hours / mins / seconds format */ -struct dhms { - int d, h, m, s; -}; - -static inline void s2dhms(struct dhms *ts, time64_t secs64) -{ - unsigned int secs; - - ts->d = div_u64_rem(secs64, 86400, &secs); - ts->h = secs / 3600; - secs = secs % 3600; - ts->m = secs / 60; - ts->s = secs % 60; -} - -#define DHMS_FMT "%dd%dh%02dm%02ds" -#define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s - -#define JOBSTATS_JOBID_VAR_MAX_LEN 20 -#define JOBSTATS_DISABLE "disable" -#define JOBSTATS_PROCNAME_UID "procname_uid" -#define JOBSTATS_NODELOCAL "nodelocal" - -/* obd_config.c */ -void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)); - -int lprocfs_write_frac_helper(const char __user *buffer, - unsigned long count, int *val, int mult); -int lprocfs_read_frac_helper(char *buffer, unsigned long count, - long val, int mult); - -int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, - unsigned int cpuid); -int lprocfs_stats_lock(struct lprocfs_stats *stats, - enum lprocfs_stats_lock_ops opc, - unsigned long *flags); -void lprocfs_stats_unlock(struct lprocfs_stats *stats, - enum lprocfs_stats_lock_ops opc, - unsigned long *flags); - -static inline unsigned int -lprocfs_stats_counter_size(struct lprocfs_stats *stats) -{ - unsigned int percpusize; - - percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]); - - /* irq safe stats need lc_array_sum[1] */ - if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) - percpusize += stats->ls_num * sizeof(__s64); - - if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0) - percpusize = L1_CACHE_ALIGN(percpusize); - - return percpusize; -} - -static inline struct lprocfs_counter * -lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid, - int index) -{ - struct lprocfs_counter *cntr; - - cntr = &stats->ls_percpu[cpuid]->lp_cntr[index]; - - if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) - cntr = (void *)cntr + index * sizeof(__s64); - - return cntr; -} - -/* Two optimized LPROCFS counter increment functions are provided: - * lprocfs_counter_incr(cntr, value) - optimized for by-one counters - * lprocfs_counter_add(cntr) - use for multi-valued counters - * Counter data layout allows config flag, counter lock and the - * count itself to reside within a single cache line. - */ - -void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount); -void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount); - -#define lprocfs_counter_incr(stats, idx) \ - lprocfs_counter_add(stats, idx, 1) -#define lprocfs_counter_decr(stats, idx) \ - lprocfs_counter_sub(stats, idx, 1) - -__s64 lprocfs_read_helper(struct lprocfs_counter *lc, - struct lprocfs_counter_header *header, - enum lprocfs_stats_flags flags, - enum lprocfs_fields_flags field); -__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx, - enum lprocfs_fields_flags field); - -extern struct lprocfs_stats * -lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags); -void lprocfs_clear_stats(struct lprocfs_stats *stats); -void lprocfs_free_stats(struct lprocfs_stats **stats); -void lprocfs_counter_init(struct lprocfs_stats *stats, int index, - unsigned int conf, const char *name, - const char *units); -struct obd_export; -int lprocfs_exp_cleanup(struct obd_export *exp); -extern const struct file_operations lprocfs_stats_seq_fops; - -/* lprocfs_status.c */ -void ldebugfs_add_vars(struct dentry *parent, struct lprocfs_vars *var, - void *data); - -int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, - const struct attribute_group *attrs); -int lprocfs_obd_cleanup(struct obd_device *obd); - -/* Generic callbacks */ - -int lprocfs_rd_uint(struct seq_file *m, void *data); -int lprocfs_wr_uint(struct file *file, const char __user *buffer, - unsigned long count, void *data); -int lprocfs_rd_server_uuid(struct seq_file *m, void *data); -int lprocfs_rd_conn_uuid(struct seq_file *m, void *data); -int lprocfs_rd_import(struct seq_file *m, void *data); -int lprocfs_rd_state(struct seq_file *m, void *data); -int lprocfs_rd_connect_flags(struct seq_file *m, void *data); - -struct adaptive_timeout; -int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at); -int lprocfs_rd_timeouts(struct seq_file *m, void *data); -int lprocfs_wr_ping(struct file *file, const char __user *buffer, - size_t count, loff_t *off); -int lprocfs_wr_import(struct file *file, const char __user *buffer, - size_t count, loff_t *off); -int lprocfs_rd_pinger_recov(struct seq_file *m, void *n); -int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer, - size_t count, loff_t *off); - -/* Statfs helpers */ - -int lprocfs_write_helper(const char __user *buffer, unsigned long count, - int *val); -int lprocfs_write_u64_helper(const char __user *buffer, - unsigned long count, __u64 *val); -int lprocfs_write_frac_u64_helper(const char __user *buffer, - unsigned long count, - __u64 *val, int mult); -char *lprocfs_find_named_value(const char *buffer, const char *name, - size_t *count); -void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value); -void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value); -void lprocfs_oh_clear(struct obd_histogram *oh); -unsigned long lprocfs_oh_sum(struct obd_histogram *oh); - -void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, - struct lprocfs_counter *cnt); - -int lprocfs_single_release(struct inode *inode, struct file *file); -int lprocfs_seq_release(struct inode *inode, struct file *file); - -/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only - * proc entries; otherwise, you will define name##_seq_write function also for - * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, - * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); - */ -#define __LPROC_SEQ_FOPS(name, custom_seq_write) \ -static int name##_single_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, name##_seq_show, inode->i_private); \ -} \ -static const struct file_operations name##_fops = { \ - .owner = THIS_MODULE, \ - .open = name##_single_open, \ - .read = seq_read, \ - .write = custom_seq_write, \ - .llseek = seq_lseek, \ - .release = lprocfs_single_release, \ -} - -#define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL) -#define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write) - -#define LPROC_SEQ_FOPS_RO_TYPE(name, type) \ - static int name##_##type##_seq_show(struct seq_file *m, void *v)\ - { \ - return lprocfs_rd_##type(m, m->private); \ - } \ - LPROC_SEQ_FOPS_RO(name##_##type) - -#define LPROC_SEQ_FOPS_RW_TYPE(name, type) \ - static int name##_##type##_seq_show(struct seq_file *m, void *v)\ - { \ - return lprocfs_rd_##type(m, m->private); \ - } \ - static ssize_t name##_##type##_seq_write(struct file *file, \ - const char __user *buffer, size_t count, \ - loff_t *off) \ - { \ - struct seq_file *seq = file->private_data; \ - return lprocfs_wr_##type(file, buffer, \ - count, seq->private); \ - } \ - LPROC_SEQ_FOPS(name##_##type) - -#define LPROC_SEQ_FOPS_WR_ONLY(name, type) \ - static ssize_t name##_##type##_write(struct file *file, \ - const char __user *buffer, size_t count, \ - loff_t *off) \ - { \ - return lprocfs_wr_##type(file, buffer, count, off); \ - } \ - static int name##_##type##_open(struct inode *inode, struct file *file) \ - { \ - return single_open(file, NULL, inode->i_private); \ - } \ - static const struct file_operations name##_##type##_fops = { \ - .open = name##_##type##_open, \ - .write = name##_##type##_write, \ - .release = lprocfs_single_release, \ - } - -struct lustre_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, struct attribute *attr, - char *buf); - ssize_t (*store)(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t len); -}; - -#define LUSTRE_ATTR(name, mode, show, store) \ -static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store) - -#define LUSTRE_RO_ATTR(name) LUSTRE_ATTR(name, 0444, name##_show, NULL) -#define LUSTRE_RW_ATTR(name) LUSTRE_ATTR(name, 0644, name##_show, name##_store) - -extern const struct sysfs_ops lustre_sysfs_ops; - -struct root_squash_info; -int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count, - struct root_squash_info *squash, char *name); -int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count, - struct root_squash_info *squash, char *name); - -/* all quota proc functions */ -int lprocfs_quota_rd_bunit(char *page, char **start, - loff_t off, int count, - int *eof, void *data); -int lprocfs_quota_wr_bunit(struct file *file, const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_btune(char *page, char **start, - loff_t off, int count, - int *eof, void *data); -int lprocfs_quota_wr_btune(struct file *file, const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_iunit(char *page, char **start, - loff_t off, int count, - int *eof, void *data); -int lprocfs_quota_wr_iunit(struct file *file, const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_itune(char *page, char **start, - loff_t off, int count, - int *eof, void *data); -int lprocfs_quota_wr_itune(struct file *file, const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count, - int *eof, void *data); -int lprocfs_quota_wr_type(struct file *file, const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_switch_seconds(struct file *file, - const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer, - unsigned long count, void *data); -int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_switch_qs(struct file *file, - const char *buffer, unsigned long count, - void *data); -int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_boundary_factor(struct file *file, - const char *buffer, unsigned long count, - void *data); -int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_least_bunit(struct file *file, - const char *buffer, unsigned long count, - void *data); -int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_least_iunit(struct file *file, - const char *buffer, unsigned long count, - void *data); -int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off, - int count, int *eof, void *data); -int lprocfs_quota_wr_qs_factor(struct file *file, - const char *buffer, unsigned long count, - void *data); -#endif /* LPROCFS_SNMP_H */ diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h deleted file mode 100644 index f29bbca5af65..000000000000 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ /dev/null @@ -1,1305 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LUSTRE_LU_OBJECT_H -#define __LUSTRE_LU_OBJECT_H - -#include -#include -#include -#include -#include - -struct seq_file; -struct lustre_cfg; -struct lprocfs_stats; - -/** \defgroup lu lu - * lu_* data-types represent server-side entities shared by data and meta-data - * stacks. - * - * Design goals: - * - * -# support for layering. - * - * Server side object is split into layers, one per device in the - * corresponding device stack. Individual layer is represented by struct - * lu_object. Compound layered object --- by struct lu_object_header. Most - * interface functions take lu_object as an argument and operate on the - * whole compound object. This decision was made due to the following - * reasons: - * - * - it's envisaged that lu_object will be used much more often than - * lu_object_header; - * - * - we want lower (non-top) layers to be able to initiate operations - * on the whole object. - * - * Generic code supports layering more complex than simple stacking, e.g., - * it is possible that at some layer object "spawns" multiple sub-objects - * on the lower layer. - * - * -# fid-based identification. - * - * Compound object is uniquely identified by its fid. Objects are indexed - * by their fids (hash table is used for index). - * - * -# caching and life-cycle management. - * - * Object's life-time is controlled by reference counting. When reference - * count drops to 0, object is returned to cache. Cached objects still - * retain their identity (i.e., fid), and can be recovered from cache. - * - * Objects are kept in the global LRU list, and lu_site_purge() function - * can be used to reclaim given number of unused objects from the tail of - * the LRU. - * - * -# avoiding recursion. - * - * Generic code tries to replace recursion through layers by iterations - * where possible. Additionally to the end of reducing stack consumption, - * data, when practically possible, are allocated through lu_context_key - * interface rather than on stack. - * @{ - */ - -struct lu_site; -struct lu_object; -struct lu_device; -struct lu_object_header; -struct lu_context; -struct lu_env; - -/** - * Operations common for data and meta-data devices. - */ -struct lu_device_operations { - /** - * Allocate object for the given device (without lower-layer - * parts). This is called by lu_object_operations::loo_object_init() - * from the parent layer, and should setup at least lu_object::lo_dev - * and lu_object::lo_ops fields of resulting lu_object. - * - * Object creation protocol. - * - * Due to design goal of avoiding recursion, object creation (see - * lu_object_alloc()) is somewhat involved: - * - * - first, lu_device_operations::ldo_object_alloc() method of the - * top-level device in the stack is called. It should allocate top - * level object (including lu_object_header), but without any - * lower-layer sub-object(s). - * - * - then lu_object_alloc() sets fid in the header of newly created - * object. - * - * - then lu_object_operations::loo_object_init() is called. It has - * to allocate lower-layer object(s). To do this, - * lu_object_operations::loo_object_init() calls ldo_object_alloc() - * of the lower-layer device(s). - * - * - for all new objects allocated by - * lu_object_operations::loo_object_init() (and inserted into object - * stack), lu_object_operations::loo_object_init() is called again - * repeatedly, until no new objects are created. - * - * \post ergo(!IS_ERR(result), result->lo_dev == d && - * result->lo_ops != NULL); - */ - struct lu_object *(*ldo_object_alloc)(const struct lu_env *env, - const struct lu_object_header *h, - struct lu_device *d); - /** - * process config specific for device. - */ - int (*ldo_process_config)(const struct lu_env *env, - struct lu_device *, struct lustre_cfg *); - int (*ldo_recovery_complete)(const struct lu_env *, - struct lu_device *); - - /** - * initialize local objects for device. this method called after layer - * has been initialized (after LCFG_SETUP stage) and before it starts - * serving user requests. - */ - - int (*ldo_prepare)(const struct lu_env *, - struct lu_device *parent, - struct lu_device *dev); - -}; - -/** - * For lu_object_conf flags - */ -enum loc_flags { - /* This is a new object to be allocated, or the file - * corresponding to the object does not exists. - */ - LOC_F_NEW = 0x00000001, -}; - -/** - * Object configuration, describing particulars of object being created. On - * server this is not used, as server objects are full identified by fid. On - * client configuration contains struct lustre_md. - */ -struct lu_object_conf { - /** - * Some hints for obj find and alloc. - */ - enum loc_flags loc_flags; -}; - -/** - * Type of "printer" function used by lu_object_operations::loo_object_print() - * method. - * - * Printer function is needed to provide some flexibility in (semi-)debugging - * output: possible implementations: printk, CDEBUG, sysfs/seq_file - */ -typedef int (*lu_printer_t)(const struct lu_env *env, - void *cookie, const char *format, ...) - __printf(3, 4); - -/** - * Operations specific for particular lu_object. - */ -struct lu_object_operations { - /** - * Allocate lower-layer parts of the object by calling - * lu_device_operations::ldo_object_alloc() of the corresponding - * underlying device. - * - * This method is called once for each object inserted into object - * stack. It's responsibility of this method to insert lower-layer - * object(s) it create into appropriate places of object stack. - */ - int (*loo_object_init)(const struct lu_env *env, - struct lu_object *o, - const struct lu_object_conf *conf); - /** - * Called (in top-to-bottom order) during object allocation after all - * layers were allocated and initialized. Can be used to perform - * initialization depending on lower layers. - */ - int (*loo_object_start)(const struct lu_env *env, - struct lu_object *o); - /** - * Called before lu_object_operations::loo_object_free() to signal - * that object is being destroyed. Dual to - * lu_object_operations::loo_object_init(). - */ - void (*loo_object_delete)(const struct lu_env *env, - struct lu_object *o); - /** - * Dual to lu_device_operations::ldo_object_alloc(). Called when - * object is removed from memory. - */ - void (*loo_object_free)(const struct lu_env *env, - struct lu_object *o); - /** - * Called when last active reference to the object is released (and - * object returns to the cache). This method is optional. - */ - void (*loo_object_release)(const struct lu_env *env, - struct lu_object *o); - /** - * Optional debugging helper. Print given object. - */ - int (*loo_object_print)(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o); - /** - * Optional debugging method. Returns true iff method is internally - * consistent. - */ - int (*loo_object_invariant)(const struct lu_object *o); -}; - -/** - * Type of lu_device. - */ -struct lu_device_type; - -/** - * Device: a layer in the server side abstraction stacking. - */ -struct lu_device { - /** - * reference count. This is incremented, in particular, on each object - * created at this layer. - * - * \todo XXX which means that atomic_t is probably too small. - */ - atomic_t ld_ref; - /** - * Pointer to device type. Never modified once set. - */ - struct lu_device_type *ld_type; - /** - * Operation vector for this device. - */ - const struct lu_device_operations *ld_ops; - /** - * Stack this device belongs to. - */ - struct lu_site *ld_site; - - /** \todo XXX: temporary back pointer into obd. */ - struct obd_device *ld_obd; - /** - * A list of references to this object, for debugging. - */ - struct lu_ref ld_reference; - /** - * Link the device to the site. - **/ - struct list_head ld_linkage; -}; - -struct lu_device_type_operations; - -/** - * Tag bits for device type. They are used to distinguish certain groups of - * device types. - */ -enum lu_device_tag { - /** this is meta-data device */ - LU_DEVICE_MD = (1 << 0), - /** this is data device */ - LU_DEVICE_DT = (1 << 1), - /** data device in the client stack */ - LU_DEVICE_CL = (1 << 2) -}; - -/** - * Type of device. - */ -struct lu_device_type { - /** - * Tag bits. Taken from enum lu_device_tag. Never modified once set. - */ - __u32 ldt_tags; - /** - * Name of this class. Unique system-wide. Never modified once set. - */ - char *ldt_name; - /** - * Operations for this type. - */ - const struct lu_device_type_operations *ldt_ops; - /** - * \todo XXX: temporary pointer to associated obd_type. - */ - struct obd_type *ldt_obd_type; - /** - * \todo XXX: temporary: context tags used by obd_*() calls. - */ - __u32 ldt_ctx_tags; - /** - * Number of existing device type instances. - */ - atomic_t ldt_device_nr; - /** - * Linkage into a global list of all device types. - * - * \see lu_device_types. - */ - struct list_head ldt_linkage; -}; - -/** - * Operations on a device type. - */ -struct lu_device_type_operations { - /** - * Allocate new device. - */ - struct lu_device *(*ldto_device_alloc)(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *lcfg); - /** - * Free device. Dual to - * lu_device_type_operations::ldto_device_alloc(). Returns pointer to - * the next device in the stack. - */ - struct lu_device *(*ldto_device_free)(const struct lu_env *, - struct lu_device *); - - /** - * Initialize the devices after allocation - */ - int (*ldto_device_init)(const struct lu_env *env, - struct lu_device *, const char *, - struct lu_device *); - /** - * Finalize device. Dual to - * lu_device_type_operations::ldto_device_init(). Returns pointer to - * the next device in the stack. - */ - struct lu_device *(*ldto_device_fini)(const struct lu_env *env, - struct lu_device *); - /** - * Initialize device type. This is called on module load. - */ - int (*ldto_init)(struct lu_device_type *t); - /** - * Finalize device type. Dual to - * lu_device_type_operations::ldto_init(). Called on module unload. - */ - void (*ldto_fini)(struct lu_device_type *t); - /** - * Called when the first device is created. - */ - void (*ldto_start)(struct lu_device_type *t); - /** - * Called when number of devices drops to 0. - */ - void (*ldto_stop)(struct lu_device_type *t); -}; - -static inline int lu_device_is_md(const struct lu_device *d) -{ - return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD); -} - -/** - * Common object attributes. - */ -struct lu_attr { - /** size in bytes */ - __u64 la_size; - /** modification time in seconds since Epoch */ - s64 la_mtime; - /** access time in seconds since Epoch */ - s64 la_atime; - /** change time in seconds since Epoch */ - s64 la_ctime; - /** 512-byte blocks allocated to object */ - __u64 la_blocks; - /** permission bits and file type */ - __u32 la_mode; - /** owner id */ - __u32 la_uid; - /** group id */ - __u32 la_gid; - /** object flags */ - __u32 la_flags; - /** number of persistent references to this object */ - __u32 la_nlink; - /** blk bits of the object*/ - __u32 la_blkbits; - /** blk size of the object*/ - __u32 la_blksize; - /** real device */ - __u32 la_rdev; - /** - * valid bits - * - * \see enum la_valid - */ - __u64 la_valid; -}; - -/** Bit-mask of valid attributes */ -enum la_valid { - LA_ATIME = 1 << 0, - LA_MTIME = 1 << 1, - LA_CTIME = 1 << 2, - LA_SIZE = 1 << 3, - LA_MODE = 1 << 4, - LA_UID = 1 << 5, - LA_GID = 1 << 6, - LA_BLOCKS = 1 << 7, - LA_TYPE = 1 << 8, - LA_FLAGS = 1 << 9, - LA_NLINK = 1 << 10, - LA_RDEV = 1 << 11, - LA_BLKSIZE = 1 << 12, - LA_KILL_SUID = 1 << 13, - LA_KILL_SGID = 1 << 14, -}; - -/** - * Layer in the layered object. - */ -struct lu_object { - /** - * Header for this object. - */ - struct lu_object_header *lo_header; - /** - * Device for this layer. - */ - struct lu_device *lo_dev; - /** - * Operations for this object. - */ - const struct lu_object_operations *lo_ops; - /** - * Linkage into list of all layers. - */ - struct list_head lo_linkage; - /** - * Link to the device, for debugging. - */ - struct lu_ref_link lo_dev_ref; -}; - -enum lu_object_header_flags { - /** - * Don't keep this object in cache. Object will be destroyed as soon - * as last reference to it is released. This flag cannot be cleared - * once set. - */ - LU_OBJECT_HEARD_BANSHEE = 0, - /** - * Mark this object has already been taken out of cache. - */ - LU_OBJECT_UNHASHED = 1, -}; - -enum lu_object_header_attr { - LOHA_EXISTS = 1 << 0, - LOHA_REMOTE = 1 << 1, - /** - * UNIX file type is stored in S_IFMT bits. - */ - LOHA_FT_START = 001 << 12, /**< S_IFIFO */ - LOHA_FT_END = 017 << 12, /**< S_IFMT */ -}; - -/** - * "Compound" object, consisting of multiple layers. - * - * Compound object with given fid is unique with given lu_site. - * - * Note, that object does *not* necessary correspond to the real object in the - * persistent storage: object is an anchor for locking and method calling, so - * it is created for things like not-yet-existing child created by mkdir or - * create calls. lu_object_operations::loo_exists() can be used to check - * whether object is backed by persistent storage entity. - */ -struct lu_object_header { - /** - * Fid, uniquely identifying this object. - */ - struct lu_fid loh_fid; - /** - * Object flags from enum lu_object_header_flags. Set and checked - * atomically. - */ - unsigned long loh_flags; - /** - * Object reference count. Protected by lu_site::ls_guard. - */ - atomic_t loh_ref; - /** - * Common object attributes, cached for efficiency. From enum - * lu_object_header_attr. - */ - __u32 loh_attr; - /** - * Linkage into per-site hash table. Protected by lu_site::ls_guard. - */ - struct hlist_node loh_hash; - /** - * Linkage into per-site LRU list. Protected by lu_site::ls_guard. - */ - struct list_head loh_lru; - /** - * Linkage into list of layers. Never modified once set (except lately - * during object destruction). No locking is necessary. - */ - struct list_head loh_layers; - /** - * A list of references to this object, for debugging. - */ - struct lu_ref loh_reference; -}; - -struct fld; -struct lu_site_bkt_data; - -enum { - LU_SS_CREATED = 0, - LU_SS_CACHE_HIT, - LU_SS_CACHE_MISS, - LU_SS_CACHE_RACE, - LU_SS_CACHE_DEATH_RACE, - LU_SS_LRU_PURGED, - LU_SS_LAST_STAT -}; - -/** - * lu_site is a "compartment" within which objects are unique, and LRU - * discipline is maintained. - * - * lu_site exists so that multiple layered stacks can co-exist in the same - * address space. - * - * lu_site has the same relation to lu_device as lu_object_header to - * lu_object. - */ -struct lu_site { - /** - * objects hash table - */ - struct cfs_hash *ls_obj_hash; - /** - * index of bucket on hash table while purging - */ - unsigned int ls_purge_start; - /** - * Top-level device for this stack. - */ - struct lu_device *ls_top_dev; - /** - * Bottom-level device for this stack - */ - struct lu_device *ls_bottom_dev; - /** - * Linkage into global list of sites. - */ - struct list_head ls_linkage; - /** - * List for lu device for this site, protected - * by ls_ld_lock. - **/ - struct list_head ls_ld_linkage; - spinlock_t ls_ld_lock; - - /** - * Lock to serialize site purge. - */ - struct mutex ls_purge_mutex; - - /** - * lu_site stats - */ - struct lprocfs_stats *ls_stats; - /** - * XXX: a hack! fld has to find md_site via site, remove when possible - */ - struct seq_server_site *ld_seq_site; - /** - * Number of objects in lsb_lru_lists - used for shrinking - */ - struct percpu_counter ls_lru_len_counter; -}; - -wait_queue_head_t * -lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid); - -static inline struct seq_server_site *lu_site2seq(const struct lu_site *s) -{ - return s->ld_seq_site; -} - -/** \name ctors - * Constructors/destructors. - * @{ - */ - -int lu_site_init(struct lu_site *s, struct lu_device *d); -void lu_site_fini(struct lu_site *s); -int lu_site_init_finish(struct lu_site *s); -void lu_stack_fini(const struct lu_env *env, struct lu_device *top); -void lu_device_get(struct lu_device *d); -void lu_device_put(struct lu_device *d); -int lu_device_init(struct lu_device *d, struct lu_device_type *t); -void lu_device_fini(struct lu_device *d); -int lu_object_header_init(struct lu_object_header *h); -void lu_object_header_fini(struct lu_object_header *h); -int lu_object_init(struct lu_object *o, - struct lu_object_header *h, struct lu_device *d); -void lu_object_fini(struct lu_object *o); -void lu_object_add_top(struct lu_object_header *h, struct lu_object *o); -void lu_object_add(struct lu_object *before, struct lu_object *o); - -/** - * Helpers to initialize and finalize device types. - */ - -int lu_device_type_init(struct lu_device_type *ldt); -void lu_device_type_fini(struct lu_device_type *ldt); - -/** @} ctors */ - -/** \name caching - * Caching and reference counting. - * @{ - */ - -/** - * Acquire additional reference to the given object. This function is used to - * attain additional reference. To acquire initial reference use - * lu_object_find(). - */ -static inline void lu_object_get(struct lu_object *o) -{ - LASSERT(atomic_read(&o->lo_header->loh_ref) > 0); - atomic_inc(&o->lo_header->loh_ref); -} - -/** - * Return true of object will not be cached after last reference to it is - * released. - */ -static inline int lu_object_is_dying(const struct lu_object_header *h) -{ - return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags); -} - -void lu_object_put(const struct lu_env *env, struct lu_object *o); -void lu_object_unhash(const struct lu_env *env, struct lu_object *o); -int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, int nr, - bool canblock); - -static inline int lu_site_purge(const struct lu_env *env, struct lu_site *s, - int nr) -{ - return lu_site_purge_objects(env, s, nr, true); -} - -void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, - lu_printer_t printer); -struct lu_object *lu_object_find_at(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf); -struct lu_object *lu_object_find_slice(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf); -/** @} caching */ - -/** \name helpers - * Helpers. - * @{ - */ - -/** - * First (topmost) sub-object of given compound object - */ -static inline struct lu_object *lu_object_top(struct lu_object_header *h) -{ - LASSERT(!list_empty(&h->loh_layers)); - return list_first_entry(&h->loh_layers, struct lu_object, lo_linkage); -} - -/** - * Next sub-object in the layering - */ -static inline const struct lu_object *lu_object_next(const struct lu_object *o) -{ - return list_next_entry(o, lo_linkage); -} - -/** - * Pointer to the fid of this object. - */ -static inline const struct lu_fid *lu_object_fid(const struct lu_object *o) -{ - return &o->lo_header->loh_fid; -} - -/** - * return device operations vector for this object - */ -static inline const struct lu_device_operations * -lu_object_ops(const struct lu_object *o) -{ - return o->lo_dev->ld_ops; -} - -/** - * Given a compound object, find its slice, corresponding to the device type - * \a dtype. - */ -struct lu_object *lu_object_locate(struct lu_object_header *h, - const struct lu_device_type *dtype); - -/** - * Printer function emitting messages through libcfs_debug_msg(). - */ -int lu_cdebug_printer(const struct lu_env *env, - void *cookie, const char *format, ...); - -/** - * Print object description followed by a user-supplied message. - */ -#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \ -do { \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ - lu_object_print(env, &msgdata, lu_cdebug_printer, object);\ - CDEBUG(mask, format "\n", ## __VA_ARGS__); \ - } \ -} while (0) - -/** - * Print short object description followed by a user-supplied message. - */ -#define LU_OBJECT_HEADER(mask, env, object, format, ...) \ -do { \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ - lu_object_header_print(env, &msgdata, lu_cdebug_printer,\ - (object)->lo_header); \ - lu_cdebug_printer(env, &msgdata, "\n"); \ - CDEBUG(mask, format, ## __VA_ARGS__); \ - } \ -} while (0) - -void lu_object_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct lu_object *o); -void lu_object_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct lu_object_header *hdr); - -/** - * Check object consistency. - */ -int lu_object_invariant(const struct lu_object *o); - -/** - * Check whether object exists, no matter on local or remote storage. - * Note: LOHA_EXISTS will be set once some one created the object, - * and it does not needs to be committed to storage. - */ -#define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS) - -/** - * Check whether object on the remote storage. - */ -#define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE) - -static inline int lu_object_assert_exists(const struct lu_object *o) -{ - return lu_object_exists(o); -} - -static inline int lu_object_assert_not_exists(const struct lu_object *o) -{ - return !lu_object_exists(o); -} - -/** - * Attr of this object. - */ -static inline __u32 lu_object_attr(const struct lu_object *o) -{ - LASSERT(lu_object_exists(o) != 0); - return o->lo_header->loh_attr; -} - -static inline void lu_object_ref_add(struct lu_object *o, - const char *scope, - const void *source) -{ - lu_ref_add(&o->lo_header->loh_reference, scope, source); -} - -static inline void lu_object_ref_add_at(struct lu_object *o, - struct lu_ref_link *link, - const char *scope, - const void *source) -{ - lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source); -} - -static inline void lu_object_ref_del(struct lu_object *o, - const char *scope, const void *source) -{ - lu_ref_del(&o->lo_header->loh_reference, scope, source); -} - -static inline void lu_object_ref_del_at(struct lu_object *o, - struct lu_ref_link *link, - const char *scope, const void *source) -{ - lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source); -} - -/** input params, should be filled out by mdt */ -struct lu_rdpg { - /** hash */ - __u64 rp_hash; - /** count in bytes */ - unsigned int rp_count; - /** number of pages */ - unsigned int rp_npages; - /** requested attr */ - __u32 rp_attrs; - /** pointers to pages */ - struct page **rp_pages; -}; - -enum lu_xattr_flags { - LU_XATTR_REPLACE = (1 << 0), - LU_XATTR_CREATE = (1 << 1) -}; - -/** @} helpers */ - -/** \name lu_context - * @{ - */ - -/** For lu_context health-checks */ -enum lu_context_state { - LCS_INITIALIZED = 1, - LCS_ENTERED, - LCS_LEFT, - LCS_FINALIZED -}; - -/** - * lu_context. Execution context for lu_object methods. Currently associated - * with thread. - * - * All lu_object methods, except device and device type methods (called during - * system initialization and shutdown) are executed "within" some - * lu_context. This means, that pointer to some "current" lu_context is passed - * as an argument to all methods. - * - * All service ptlrpc threads create lu_context as part of their - * initialization. It is possible to create "stand-alone" context for other - * execution environments (like system calls). - * - * lu_object methods mainly use lu_context through lu_context_key interface - * that allows each layer to associate arbitrary pieces of data with each - * context (see pthread_key_create(3) for similar interface). - * - * On a client, lu_context is bound to a thread, see cl_env_get(). - * - * \see lu_context_key - */ -struct lu_context { - /** - * lu_context is used on the client side too. Yet we don't want to - * allocate values of server-side keys for the client contexts and - * vice versa. - * - * To achieve this, set of tags in introduced. Contexts and keys are - * marked with tags. Key value are created only for context whose set - * of tags has non-empty intersection with one for key. Tags are taken - * from enum lu_context_tag. - */ - __u32 lc_tags; - enum lu_context_state lc_state; - /** - * Pointer to the home service thread. NULL for other execution - * contexts. - */ - struct ptlrpc_thread *lc_thread; - /** - * Pointer to an array with key values. Internal implementation - * detail. - */ - void **lc_value; - /** - * Linkage into a list of all remembered contexts. Only - * `non-transient' contexts, i.e., ones created for service threads - * are placed here. - */ - struct list_head lc_remember; - /** - * Version counter used to skip calls to lu_context_refill() when no - * keys were registered. - */ - unsigned int lc_version; - /** - * Debugging cookie. - */ - unsigned int lc_cookie; -}; - -/** - * lu_context_key interface. Similar to pthread_key. - */ - -enum lu_context_tag { - /** - * Thread on md server - */ - LCT_MD_THREAD = 1 << 0, - /** - * Thread on dt server - */ - LCT_DT_THREAD = 1 << 1, - /** - * Context for transaction handle - */ - LCT_TX_HANDLE = 1 << 2, - /** - * Thread on client - */ - LCT_CL_THREAD = 1 << 3, - /** - * A per-request session on a server, and a per-system-call session on - * a client. - */ - LCT_SESSION = 1 << 4, - /** - * A per-request data on OSP device - */ - LCT_OSP_THREAD = 1 << 5, - /** - * MGS device thread - */ - LCT_MG_THREAD = 1 << 6, - /** - * Context for local operations - */ - LCT_LOCAL = 1 << 7, - /** - * session for server thread - **/ - LCT_SERVER_SESSION = BIT(8), - /** - * Set when at least one of keys, having values in this context has - * non-NULL lu_context_key::lct_exit() method. This is used to - * optimize lu_context_exit() call. - */ - LCT_HAS_EXIT = 1 << 28, - /** - * Don't add references for modules creating key values in that context. - * This is only for contexts used internally by lu_object framework. - */ - LCT_NOREF = 1 << 29, - /** - * Key is being prepared for retiring, don't create new values for it. - */ - LCT_QUIESCENT = 1 << 30, - /** - * Context should be remembered. - */ - LCT_REMEMBER = 1 << 31, - /** - * Contexts usable in cache shrinker thread. - */ - LCT_SHRINKER = LCT_MD_THREAD | LCT_DT_THREAD | LCT_CL_THREAD | - LCT_NOREF -}; - -/** - * Key. Represents per-context value slot. - * - * Keys are usually registered when module owning the key is initialized, and - * de-registered when module is unloaded. Once key is registered, all new - * contexts with matching tags, will get key value. "Old" contexts, already - * initialized at the time of key registration, can be forced to get key value - * by calling lu_context_refill(). - * - * Every key value is counted in lu_context_key::lct_used and acquires a - * reference on an owning module. This means, that all key values have to be - * destroyed before module can be unloaded. This is usually achieved by - * stopping threads started by the module, that created contexts in their - * entry functions. Situation is complicated by the threads shared by multiple - * modules, like ptlrpcd daemon on a client. To work around this problem, - * contexts, created in such threads, are `remembered' (see - * LCT_REMEMBER)---i.e., added into a global list. When module is preparing - * for unloading it does the following: - * - * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT) - * preventing new key values from being allocated in the new contexts, - * and - * - * - scans a list of remembered contexts, destroying values of module - * keys, thus releasing references to the module. - * - * This is done by lu_context_key_quiesce(). If module is re-activated - * before key has been de-registered, lu_context_key_revive() call clears - * `quiescent' marker. - * - * lu_context code doesn't provide any internal synchronization for these - * activities---it's assumed that startup (including threads start-up) and - * shutdown are serialized by some external means. - * - * \see lu_context - */ -struct lu_context_key { - /** - * Set of tags for which values of this key are to be instantiated. - */ - __u32 lct_tags; - /** - * Value constructor. This is called when new value is created for a - * context. Returns pointer to new value of error pointer. - */ - void *(*lct_init)(const struct lu_context *ctx, - struct lu_context_key *key); - /** - * Value destructor. Called when context with previously allocated - * value of this slot is destroyed. \a data is a value that was returned - * by a matching call to lu_context_key::lct_init(). - */ - void (*lct_fini)(const struct lu_context *ctx, - struct lu_context_key *key, void *data); - /** - * Optional method called on lu_context_exit() for all allocated - * keys. Can be used by debugging code checking that locks are - * released, etc. - */ - void (*lct_exit)(const struct lu_context *ctx, - struct lu_context_key *key, void *data); - /** - * Internal implementation detail: index within lu_context::lc_value[] - * reserved for this key. - */ - int lct_index; - /** - * Internal implementation detail: number of values created for this - * key. - */ - atomic_t lct_used; - /** - * Internal implementation detail: module for this key. - */ - struct module *lct_owner; - /** - * References to this key. For debugging. - */ - struct lu_ref lct_reference; -}; - -#define LU_KEY_INIT(mod, type) \ - static void *mod##_key_init(const struct lu_context *ctx, \ - struct lu_context_key *key) \ - { \ - type *value; \ - \ - BUILD_BUG_ON(sizeof(*value) > PAGE_SIZE); \ - \ - value = kzalloc(sizeof(*value), GFP_NOFS); \ - if (!value) \ - value = ERR_PTR(-ENOMEM); \ - \ - return value; \ - } \ - struct __##mod##__dummy_init {; } /* semicolon catcher */ - -#define LU_KEY_FINI(mod, type) \ - static void mod##_key_fini(const struct lu_context *ctx, \ - struct lu_context_key *key, void *data) \ - { \ - type *info = data; \ - \ - kfree(info); \ - } \ - struct __##mod##__dummy_fini {; } /* semicolon catcher */ - -#define LU_KEY_INIT_FINI(mod, type) \ - LU_KEY_INIT(mod, type); \ - LU_KEY_FINI(mod, type) - -#define LU_CONTEXT_KEY_DEFINE(mod, tags) \ - struct lu_context_key mod##_thread_key = { \ - .lct_tags = tags, \ - .lct_init = mod##_key_init, \ - .lct_fini = mod##_key_fini \ - } - -#define LU_CONTEXT_KEY_INIT(key) \ -do { \ - (key)->lct_owner = THIS_MODULE; \ -} while (0) - -int lu_context_key_register(struct lu_context_key *key); -void lu_context_key_degister(struct lu_context_key *key); -void *lu_context_key_get(const struct lu_context *ctx, - const struct lu_context_key *key); -void lu_context_key_quiesce(struct lu_context_key *key); -void lu_context_key_revive(struct lu_context_key *key); - -/* - * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an - * owning module. - */ - -#define LU_KEY_INIT_GENERIC(mod) \ - static void mod##_key_init_generic(struct lu_context_key *k, ...) \ - { \ - struct lu_context_key *key = k; \ - va_list args; \ - \ - va_start(args, k); \ - do { \ - LU_CONTEXT_KEY_INIT(key); \ - key = va_arg(args, struct lu_context_key *); \ - } while (key); \ - va_end(args); \ - } - -#define LU_TYPE_INIT(mod, ...) \ - LU_KEY_INIT_GENERIC(mod) \ - static int mod##_type_init(struct lu_device_type *t) \ - { \ - mod##_key_init_generic(__VA_ARGS__, NULL); \ - return lu_context_key_register_many(__VA_ARGS__, NULL); \ - } \ - struct __##mod##_dummy_type_init {; } - -#define LU_TYPE_FINI(mod, ...) \ - static void mod##_type_fini(struct lu_device_type *t) \ - { \ - lu_context_key_degister_many(__VA_ARGS__, NULL); \ - } \ - struct __##mod##_dummy_type_fini {; } - -#define LU_TYPE_START(mod, ...) \ - static void mod##_type_start(struct lu_device_type *t) \ - { \ - lu_context_key_revive_many(__VA_ARGS__, NULL); \ - } \ - struct __##mod##_dummy_type_start {; } - -#define LU_TYPE_STOP(mod, ...) \ - static void mod##_type_stop(struct lu_device_type *t) \ - { \ - lu_context_key_quiesce_many(__VA_ARGS__, NULL); \ - } \ - struct __##mod##_dummy_type_stop {; } - -#define LU_TYPE_INIT_FINI(mod, ...) \ - LU_TYPE_INIT(mod, __VA_ARGS__); \ - LU_TYPE_FINI(mod, __VA_ARGS__); \ - LU_TYPE_START(mod, __VA_ARGS__); \ - LU_TYPE_STOP(mod, __VA_ARGS__) - -int lu_context_init(struct lu_context *ctx, __u32 tags); -void lu_context_fini(struct lu_context *ctx); -void lu_context_enter(struct lu_context *ctx); -void lu_context_exit(struct lu_context *ctx); -int lu_context_refill(struct lu_context *ctx); - -/* - * Helper functions to operate on multiple keys. These are used by the default - * device type operations, defined by LU_TYPE_INIT_FINI(). - */ - -int lu_context_key_register_many(struct lu_context_key *k, ...); -void lu_context_key_degister_many(struct lu_context_key *k, ...); -void lu_context_key_revive_many(struct lu_context_key *k, ...); -void lu_context_key_quiesce_many(struct lu_context_key *k, ...); - -/** - * Environment. - */ -struct lu_env { - /** - * "Local" context, used to store data instead of stack. - */ - struct lu_context le_ctx; - /** - * "Session" context for per-request data. - */ - struct lu_context *le_ses; -}; - -int lu_env_init(struct lu_env *env, __u32 tags); -void lu_env_fini(struct lu_env *env); -int lu_env_refill(struct lu_env *env); - -/** @} lu_context */ - -/** - * Output site statistical counters into a buffer. Suitable for - * ll_rd_*()-style functions. - */ -int lu_site_stats_print(const struct lu_site *s, struct seq_file *m); - -/** - * Common name structure to be passed around for various name related methods. - */ -struct lu_name { - const char *ln_name; - int ln_namelen; -}; - -/** - * Validate names (path components) - * - * To be valid \a name must be non-empty, '\0' terminated of length \a - * name_len, and not contain '/'. The maximum length of a name (before - * say -ENAMETOOLONG will be returned) is really controlled by llite - * and the server. We only check for something insane coming from bad - * integer handling here. - */ -static inline bool lu_name_is_valid_2(const char *name, size_t name_len) -{ - return name && name_len > 0 && name_len < INT_MAX && - name[name_len] == '\0' && strlen(name) == name_len && - !memchr(name, '/', name_len); -} - -/** - * Common buffer structure to be passed around for various xattr_{s,g}et() - * methods. - */ -struct lu_buf { - void *lb_buf; - size_t lb_len; -}; - -/** - * One-time initializers, called at obdclass module initialization, not - * exported. - */ - -/** - * Initialization of global lu_* data. - */ -int lu_global_init(void); - -/** - * Dual to lu_global_init(). - */ -void lu_global_fini(void); - -struct lu_kmem_descr { - struct kmem_cache **ckd_cache; - const char *ckd_name; - const size_t ckd_size; -}; - -int lu_kmem_init(struct lu_kmem_descr *caches); -void lu_kmem_fini(struct lu_kmem_descr *caches); - -extern __u32 lu_context_tags_default; -extern __u32 lu_session_tags_default; - -/** @} lu */ -#endif /* __LUSTRE_LU_OBJECT_H */ diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h deleted file mode 100644 index ad0c24d29ffa..000000000000 --- a/drivers/staging/lustre/lustre/include/lu_ref.h +++ /dev/null @@ -1,178 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - * - * Author: Nikita Danilov - * - * This file is part of Lustre, http://www.lustre.org. - * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __LUSTRE_LU_REF_H -#define __LUSTRE_LU_REF_H - -#include - -/** \defgroup lu_ref lu_ref - * - * An interface to track references between objects. Mostly for debugging. - * - * Suppose there is a reference counted data-structure struct foo. To track - * who acquired references to instance of struct foo, add lu_ref field to it: - * - * \code - * struct foo { - * atomic_t foo_refcount; - * struct lu_ref foo_reference; - * ... - * }; - * \endcode - * - * foo::foo_reference has to be initialized by calling - * lu_ref_init(). Typically there will be functions or macros to increment and - * decrement foo::foo_refcount, let's say they are foo_get(struct foo *foo) - * and foo_put(struct foo *foo), respectively. - * - * Whenever foo_get() is called to acquire a reference on a foo, lu_ref_add() - * has to be called to insert into foo::foo_reference a record, describing - * acquired reference. Dually, lu_ref_del() removes matching record. Typical - * usages are: - * - * \code - * struct bar *bar; - * - * // bar owns a reference to foo. - * bar->bar_foo = foo_get(foo); - * lu_ref_add(&foo->foo_reference, "bar", bar); - * - * ... - * - * // reference from bar to foo is released. - * lu_ref_del(&foo->foo_reference, "bar", bar); - * foo_put(bar->bar_foo); - * - * - * // current thread acquired a temporary reference to foo. - * foo_get(foo); - * lu_ref_add(&foo->reference, __func__, current); - * - * ... - * - * // temporary reference is released. - * lu_ref_del(&foo->reference, __func__, current); - * foo_put(foo); - * \endcode - * - * \e Et \e cetera. Often it makes sense to include lu_ref_add() and - * lu_ref_del() calls into foo_get() and foo_put(). When an instance of struct - * foo is destroyed, lu_ref_fini() has to be called that checks that no - * pending references remain. lu_ref_print() can be used to dump a list of - * pending references, while hunting down a leak. - * - * For objects to which a large number of references can be acquired, - * lu_ref_del() can become cpu consuming, as it has to scan the list of - * references. To work around this, remember result of lu_ref_add() (usually - * in the same place where pointer to struct foo is stored), and use - * lu_ref_del_at(): - * - * \code - * // There is a large number of bar's for a single foo. - * bar->bar_foo = foo_get(foo); - * bar->bar_foo_ref = lu_ref_add(&foo->foo_reference, "bar", bar); - * - * ... - * - * // reference from bar to foo is released. - * lu_ref_del_at(&foo->foo_reference, bar->bar_foo_ref, "bar", bar); - * foo_put(bar->bar_foo); - * \endcode - * - * lu_ref interface degrades gracefully in case of memory shortages. - * - * @{ - */ - -/* - * dummy data structures/functions to pass compile for now. - * We need to reimplement them with kref. - */ -struct lu_ref {}; -struct lu_ref_link {}; - -static inline void lu_ref_init(struct lu_ref *ref) -{ -} - -static inline void lu_ref_fini(struct lu_ref *ref) -{ -} - -static inline struct lu_ref_link *lu_ref_add(struct lu_ref *ref, - const char *scope, - const void *source) -{ - return NULL; -} - -static inline struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, - const char *scope, - const void *source) -{ - return NULL; -} - -static inline void lu_ref_add_at(struct lu_ref *ref, - struct lu_ref_link *link, - const char *scope, - const void *source) -{ -} - -static inline void lu_ref_del(struct lu_ref *ref, const char *scope, - const void *source) -{ -} - -static inline void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link, - const char *scope, const void *source0, - const void *source1) -{ -} - -static inline void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link, - const char *scope, const void *source) -{ -} - -static inline int lu_ref_global_init(void) -{ - return 0; -} - -static inline void lu_ref_global_fini(void) -{ -} - -static inline void lu_ref_print(const struct lu_ref *ref) -{ -} - -static inline void lu_ref_print_all(void) -{ -} - -/** @} lu */ - -#endif /* __LUSTRE_LU_REF_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h deleted file mode 100644 index e7575a172b5f..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_acl.h +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_acl.h - */ - -#ifndef _LUSTRE_ACL_H -#define _LUSTRE_ACL_H - -#include -#include -#ifdef CONFIG_FS_POSIX_ACL -#include - -#define LUSTRE_POSIX_ACL_MAX_ENTRIES 32 -#define LUSTRE_POSIX_ACL_MAX_SIZE_OLD \ - (sizeof(struct posix_acl_xattr_header) + \ - LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(struct posix_acl_xattr_entry)) - -#else /* ! CONFIG_FS_POSIX_ACL */ -#define LUSTRE_POSIX_ACL_MAX_SIZE_OLD 0 -#endif /* CONFIG_FS_POSIX_ACL */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h deleted file mode 100644 index 3c6db0d632dc..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_compat.h +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _LUSTRE_COMPAT_H -#define _LUSTRE_COMPAT_H - -#include -#include -#include -#include - -#include - -/* - * set ATTR_BLOCKS to a high value to avoid any risk of collision with other - * ATTR_* attributes (see bug 13828) - */ -#define ATTR_BLOCKS (1 << 27) - -#define current_ngroups current_cred()->group_info->ngroups -#define current_groups current_cred()->group_info->small_block - -/* - * OBD need working random driver, thus all our - * initialization routines must be called after device - * driver initialization - */ -#ifndef MODULE -#undef module_init -#define module_init(a) late_initcall(a) -#endif - -#define LTIME_S(time) (time.tv_sec) - -#ifndef QUOTA_OK -# define QUOTA_OK 0 -#endif -#ifndef NO_QUOTA -# define NO_QUOTA (-EDQUOT) -#endif - -#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit) -# define ext2_set_bit __test_and_set_bit_le -# define ext2_clear_bit __test_and_clear_bit_le -# define ext2_test_bit test_bit_le -# define ext2_find_first_zero_bit find_first_zero_bit_le -# define ext2_find_next_zero_bit find_next_zero_bit_le -#endif - -#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) - -#endif /* _LUSTRE_COMPAT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h deleted file mode 100644 index 721a81f923e3..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_debug.h +++ /dev/null @@ -1,52 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _LUSTRE_DEBUG_H -#define _LUSTRE_DEBUG_H - -/** \defgroup debug debug - * - * @{ - */ - -#include -#include - -/* lib/debug.c */ -int dump_req(struct ptlrpc_request *req); -int block_debug_setup(void *addr, int len, __u64 off, __u64 id); -int block_debug_check(char *who, void *addr, int len, __u64 off, __u64 id); - -/** @} debug */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h deleted file mode 100644 index 886e817644d6..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_disk.h +++ /dev/null @@ -1,152 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_disk.h - * - * Lustre disk format definitions. - * - * Author: Nathan Rutman - */ - -#ifndef _LUSTRE_DISK_H -#define _LUSTRE_DISK_H - -/** \defgroup disk disk - * - * @{ - */ - -#include -#include -#include - -/****************** persistent mount data *********************/ - -#define LDD_F_SV_TYPE_MDT 0x0001 -#define LDD_F_SV_TYPE_OST 0x0002 -#define LDD_F_SV_TYPE_MGS 0x0004 -#define LDD_F_SV_TYPE_MASK (LDD_F_SV_TYPE_MDT | \ - LDD_F_SV_TYPE_OST | \ - LDD_F_SV_TYPE_MGS) -#define LDD_F_SV_ALL 0x0008 - -/****************** mount command *********************/ - -/* The lmd is only used internally by Lustre; mount simply passes - * everything as string options - */ - -#define LMD_MAGIC 0xbdacbd03 -#define LMD_PARAMS_MAXLEN 4096 - -/* gleaned from the mount command - no persistent info here */ -struct lustre_mount_data { - __u32 lmd_magic; - __u32 lmd_flags; /* lustre mount flags */ - int lmd_mgs_failnodes; /* mgs failover node count */ - int lmd_exclude_count; - int lmd_recovery_time_soft; - int lmd_recovery_time_hard; - char *lmd_dev; /* device name */ - char *lmd_profile; /* client only */ - char *lmd_mgssec; /* sptlrpc flavor to mgs */ - char *lmd_opts; /* lustre mount options (as opposed to - * _device_ mount options) - */ - char *lmd_params; /* lustre params */ - __u32 *lmd_exclude; /* array of OSTs to ignore */ - char *lmd_mgs; /* MGS nid */ - char *lmd_osd_type; /* OSD type */ -}; - -#define LMD_FLG_SERVER 0x0001 /* Mounting a server */ -#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */ -#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */ -#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers, - * no other services - */ -#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, - * reusing existing MGS services - */ -#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */ -#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */ -#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */ -#define LMD_FLG_MGS 0x0200 /* Also start MGS along with server */ -#define LMD_FLG_IAM 0x0400 /* IAM dir */ -#define LMD_FLG_NO_PRIMNODE 0x0800 /* all nodes are service nodes */ -#define LMD_FLG_VIRGIN 0x1000 /* the service registers first time */ -#define LMD_FLG_UPDATE 0x2000 /* update parameters */ -#define LMD_FLG_HSM 0x4000 /* Start coordinator */ - -#define lmd_is_client(x) ((x)->lmd_flags & LMD_FLG_CLIENT) - -/****************** superblock additional info *********************/ - -struct ll_sb_info; - -struct lustre_sb_info { - int lsi_flags; - struct obd_device *lsi_mgc; /* mgc obd */ - struct lustre_mount_data *lsi_lmd; /* mount command info */ - struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */ - struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/ - atomic_t lsi_mounts; /* references to the srv_mnt */ - char lsi_svname[MTI_NAME_MAXLEN]; - char lsi_osd_obdname[64]; - char lsi_osd_uuid[64]; - struct obd_export *lsi_osd_exp; - char lsi_osd_type[16]; - char lsi_fstype[16]; -}; - -#define LSI_UMOUNT_FAILOVER 0x00200000 - -#define s2lsi(sb) ((struct lustre_sb_info *)((sb)->s_fs_info)) -#define s2lsi_nocast(sb) ((sb)->s_fs_info) - -#define get_profile_name(sb) (s2lsi(sb)->lsi_lmd->lmd_profile) - -/****************** prototypes *********************/ - -/* obd_mount.c */ - -int lustre_start_mgc(struct super_block *sb); -void lustre_register_super_ops(struct module *mod, - int (*cfs)(struct super_block *sb), - void (*ksc)(struct super_block *sb)); -int lustre_common_put_super(struct super_block *sb); - -int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type); - -/** @} disk */ - -#endif /* _LUSTRE_DISK_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h deleted file mode 100644 index 2c55241258cc..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ /dev/null @@ -1,1346 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -/** \defgroup LDLM Lustre Distributed Lock Manager - * - * Lustre DLM is based on VAX DLM. - * Its two main roles are: - * - To provide locking assuring consistency of data on all Lustre nodes. - * - To allow clients to cache state protected by a lock by holding the - * lock until a conflicting lock is requested or it is expired by the LRU. - * - * @{ - */ - -#ifndef _LUSTRE_DLM_H__ -#define _LUSTRE_DLM_H__ - -#include -#include -#include -#include -#include /* for interval_node{}, ldlm_extent */ -#include - -#include "lustre_dlm_flags.h" - -struct obd_ops; -struct obd_device; - -#define OBD_LDLM_DEVICENAME "ldlm" - -#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus()) -#define LDLM_DEFAULT_MAX_ALIVE (65 * 60 * HZ) /* 65 min */ -#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024 - -/** - * LDLM non-error return states - */ -enum ldlm_error { - ELDLM_OK = 0, - ELDLM_LOCK_MATCHED = 1, - - ELDLM_LOCK_CHANGED = 300, - ELDLM_LOCK_ABORTED = 301, - ELDLM_LOCK_REPLACED = 302, - ELDLM_NO_LOCK_DATA = 303, - ELDLM_LOCK_WOULDBLOCK = 304, - - ELDLM_NAMESPACE_EXISTS = 400, - ELDLM_BAD_NAMESPACE = 401 -}; - -/** - * LDLM namespace type. - * The "client" type is actually an indication that this is a narrow local view - * into complete namespace on the server. Such namespaces cannot make any - * decisions about lack of conflicts or do any autonomous lock granting without - * first speaking to a server. - */ -enum ldlm_side { - LDLM_NAMESPACE_SERVER = 1 << 0, - LDLM_NAMESPACE_CLIENT = 1 << 1 -}; - -/** - * The blocking callback is overloaded to perform two functions. These flags - * indicate which operation should be performed. - */ -#define LDLM_CB_BLOCKING 1 -#define LDLM_CB_CANCELING 2 - -/** - * \name Lock Compatibility Matrix. - * - * A lock has both a type (extent, flock, inode bits, or plain) and a mode. - * Lock types are described in their respective implementation files: - * ldlm_{extent,flock,inodebits,plain}.c. - * - * There are six lock modes along with a compatibility matrix to indicate if - * two locks are compatible. - * - * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock - * on the parent. - * - PW: Protective Write (normal write) mode. When a client requests a write - * lock from an OST, a lock with PW mode will be issued. - * - PR: Protective Read (normal read) mode. When a client requests a read from - * an OST, a lock with PR mode will be issued. Also, if the client opens a - * file for execution, it is granted a lock with PR mode. - * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client - * requests a write lock during a file open operation. - * - CR Concurrent Read mode. When a client performs a path lookup, MDS grants - * an inodebit lock with the CR mode on the intermediate path component. - * - NL Null mode. - * - *
- *       NL  CR  CW  PR  PW  EX
- *  NL    1   1   1   1   1   1
- *  CR    1   1   1   1   1   0
- *  CW    1   1   1   0   0   0
- *  PR    1   1   0   1   0   0
- *  PW    1   1   0   0   0   0
- *  EX    1   0   0   0   0   0
- * 
- */ -/** @{ */ -#define LCK_COMPAT_EX LCK_NL -#define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR) -#define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR) -#define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW) -#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW) -#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP) -#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL) -#define LCK_COMPAT_COS (LCK_COS) -/** @} Lock Compatibility Matrix */ - -extern enum ldlm_mode lck_compat_array[]; - -static inline void lockmode_verify(enum ldlm_mode mode) -{ - LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); -} - -static inline int lockmode_compat(enum ldlm_mode exist_mode, - enum ldlm_mode new_mode) -{ - return (lck_compat_array[exist_mode] & new_mode); -} - -/* - * - * cluster name spaces - * - */ - -#define DLM_OST_NAMESPACE 1 -#define DLM_MDS_NAMESPACE 2 - -/* XXX - - do we just separate this by security domains and use a prefix for - multiple namespaces in the same domain? - - -*/ - -/** - * Locking rules for LDLM: - * - * lr_lock - * - * lr_lock - * waiting_locks_spinlock - * - * lr_lock - * led_lock - * - * lr_lock - * ns_lock - * - * lr_lvb_mutex - * lr_lock - * - */ - -struct ldlm_pool; -struct ldlm_lock; -struct ldlm_resource; -struct ldlm_namespace; - -/** - * Operations on LDLM pools. - * LDLM pool is a pool of locks in the namespace without any implicitly - * specified limits. - * Locks in the pool are organized in LRU. - * Local memory pressure or server instructions (e.g. mempressure on server) - * can trigger freeing of locks from the pool - */ -struct ldlm_pool_ops { - /** Recalculate pool \a pl usage */ - int (*po_recalc)(struct ldlm_pool *pl); - /** Cancel at least \a nr locks from pool \a pl */ - int (*po_shrink)(struct ldlm_pool *pl, int nr, - gfp_t gfp_mask); -}; - -/** One second for pools thread check interval. Each pool has own period. */ -#define LDLM_POOLS_THREAD_PERIOD (1) - -/** ~6% margin for modest pools. See ldlm_pool.c for details. */ -#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4) - -/** Default recalc period for server side pools in sec. */ -#define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1) - -/** Default recalc period for client side pools in sec. */ -#define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10) - -/** - * LDLM pool structure to track granted locks. - * For purposes of determining when to release locks on e.g. memory pressure. - * This feature is commonly referred to as lru_resize. - */ -struct ldlm_pool { - /** Pool debugfs directory. */ - struct dentry *pl_debugfs_entry; - /** Pool name, must be long enough to hold compound proc entry name. */ - char pl_name[100]; - /** Lock for protecting SLV/CLV updates. */ - spinlock_t pl_lock; - /** Number of allowed locks in in pool, both, client and server side. */ - atomic_t pl_limit; - /** Number of granted locks in */ - atomic_t pl_granted; - /** Grant rate per T. */ - atomic_t pl_grant_rate; - /** Cancel rate per T. */ - atomic_t pl_cancel_rate; - /** Server lock volume (SLV). Protected by pl_lock. */ - __u64 pl_server_lock_volume; - /** Current biggest client lock volume. Protected by pl_lock. */ - __u64 pl_client_lock_volume; - /** Lock volume factor. SLV on client is calculated as following: - * server_slv * lock_volume_factor. - */ - atomic_t pl_lock_volume_factor; - /** Time when last SLV from server was obtained. */ - time64_t pl_recalc_time; - /** Recalculation period for pool. */ - time64_t pl_recalc_period; - /** Recalculation and shrink operations. */ - const struct ldlm_pool_ops *pl_ops; - /** Number of planned locks for next period. */ - int pl_grant_plan; - /** Pool statistics. */ - struct lprocfs_stats *pl_stats; - - /* sysfs object */ - struct kobject pl_kobj; - struct completion pl_kobj_unregister; -}; - -typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock); - -/** - * LVB operations. - * LVB is Lock Value Block. This is a special opaque (to LDLM) value that could - * be associated with an LDLM lock and transferred from client to server and - * back. - * - * Currently LVBs are used by: - * - OSC-OST code to maintain current object size/times - * - layout lock code to return the layout when the layout lock is granted - */ -struct ldlm_valblock_ops { - int (*lvbo_init)(struct ldlm_resource *res); - int (*lvbo_update)(struct ldlm_resource *res, - struct ptlrpc_request *r, - int increase); - int (*lvbo_free)(struct ldlm_resource *res); - /* Return size of lvb data appropriate RPC size can be reserved */ - int (*lvbo_size)(struct ldlm_lock *lock); - /* Called to fill in lvb data to RPC buffer @buf */ - int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen); -}; - -/** - * LDLM pools related, type of lock pool in the namespace. - * Greedy means release cached locks aggressively - */ -enum ldlm_appetite { - LDLM_NAMESPACE_GREEDY = 1 << 0, - LDLM_NAMESPACE_MODEST = 1 << 1 -}; - -struct ldlm_ns_bucket { - /** back pointer to namespace */ - struct ldlm_namespace *nsb_namespace; - /** - * Estimated lock callback time. Used by adaptive timeout code to - * avoid spurious client evictions due to unresponsiveness when in - * fact the network or overall system load is at fault - */ - struct adaptive_timeout nsb_at_estimate; -}; - -enum { - /** LDLM namespace lock stats */ - LDLM_NSS_LOCKS = 0, - LDLM_NSS_LAST -}; - -enum ldlm_ns_type { - /** invalid type */ - LDLM_NS_TYPE_UNKNOWN = 0, - /** mdc namespace */ - LDLM_NS_TYPE_MDC, - /** mds namespace */ - LDLM_NS_TYPE_MDT, - /** osc namespace */ - LDLM_NS_TYPE_OSC, - /** ost namespace */ - LDLM_NS_TYPE_OST, - /** mgc namespace */ - LDLM_NS_TYPE_MGC, - /** mgs namespace */ - LDLM_NS_TYPE_MGT, -}; - -/** - * LDLM Namespace. - * - * Namespace serves to contain locks related to a particular service. - * There are two kinds of namespaces: - * - Server namespace has knowledge of all locks and is therefore authoritative - * to make decisions like what locks could be granted and what conflicts - * exist during new lock enqueue. - * - Client namespace only has limited knowledge about locks in the namespace, - * only seeing locks held by the client. - * - * Every Lustre service has one server namespace present on the server serving - * that service. Every client connected to the service has a client namespace - * for it. - * Every lock obtained by client in that namespace is actually represented by - * two in-memory locks. One on the server and one on the client. The locks are - * linked by a special cookie by which one node can tell to the other which lock - * it actually means during communications. Such locks are called remote locks. - * The locks held by server only without any reference to a client are called - * local locks. - */ -struct ldlm_namespace { - /** Backward link to OBD, required for LDLM pool to store new SLV. */ - struct obd_device *ns_obd; - - /** Flag indicating if namespace is on client instead of server */ - enum ldlm_side ns_client; - - /** name of this namespace */ - char *ns_name; - - /** Resource hash table for namespace. */ - struct cfs_hash *ns_rs_hash; - - /** serialize */ - spinlock_t ns_lock; - - /** big refcount (by bucket) */ - atomic_t ns_bref; - - /** - * Namespace connect flags supported by server (may be changed via - * sysfs, LRU resize may be disabled/enabled). - */ - __u64 ns_connect_flags; - - /** Client side original connect flags supported by server. */ - __u64 ns_orig_connect_flags; - - /* namespace debugfs dir entry */ - struct dentry *ns_debugfs_entry; - - /** - * Position in global namespace list linking all namespaces on - * the node. - */ - struct list_head ns_list_chain; - - /** - * List of unused locks for this namespace. This list is also called - * LRU lock list. - * Unused locks are locks with zero reader/writer reference counts. - * This list is only used on clients for lock caching purposes. - * When we want to release some locks voluntarily or if server wants - * us to release some locks due to e.g. memory pressure, we take locks - * to release from the head of this list. - * Locks are linked via l_lru field in \see struct ldlm_lock. - */ - struct list_head ns_unused_list; - /** Number of locks in the LRU list above */ - int ns_nr_unused; - - /** - * Maximum number of locks permitted in the LRU. If 0, means locks - * are managed by pools and there is no preset limit, rather it is all - * controlled by available memory on this client and on server. - */ - unsigned int ns_max_unused; - /** Maximum allowed age (last used time) for locks in the LRU */ - unsigned int ns_max_age; - - /** - * Used to rate-limit ldlm_namespace_dump calls. - * \see ldlm_namespace_dump. Increased by 10 seconds every time - * it is called. - */ - unsigned long ns_next_dump; - - /** - * LVB operations for this namespace. - * \see struct ldlm_valblock_ops - */ - struct ldlm_valblock_ops *ns_lvbo; - - /** - * Used by filter code to store pointer to OBD of the service. - * Should be dropped in favor of \a ns_obd - */ - void *ns_lvbp; - - /** - * Wait queue used by __ldlm_namespace_free. Gets woken up every time - * a resource is removed. - */ - wait_queue_head_t ns_waitq; - /** LDLM pool structure for this namespace */ - struct ldlm_pool ns_pool; - /** Definition of how eagerly unused locks will be released from LRU */ - enum ldlm_appetite ns_appetite; - - /** Limit of parallel AST RPC count. */ - unsigned ns_max_parallel_ast; - - /** - * Callback to check if a lock is good to be canceled by ELC or - * during recovery. - */ - ldlm_cancel_cbt ns_cancel; - - /** LDLM lock stats */ - struct lprocfs_stats *ns_stats; - - /** - * Flag to indicate namespace is being freed. Used to determine if - * recalculation of LDLM pool statistics should be skipped. - */ - unsigned ns_stopping:1; - - struct kobject ns_kobj; /* sysfs object */ - struct completion ns_kobj_unregister; -}; - -/** - * Returns 1 if namespace \a ns supports early lock cancel (ELC). - */ -static inline int ns_connect_cancelset(struct ldlm_namespace *ns) -{ - return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET); -} - -/** - * Returns 1 if this namespace supports lru_resize. - */ -static inline int ns_connect_lru_resize(struct ldlm_namespace *ns) -{ - return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE); -} - -static inline void ns_register_cancel(struct ldlm_namespace *ns, - ldlm_cancel_cbt arg) -{ - ns->ns_cancel = arg; -} - -struct ldlm_lock; - -/** Type for blocking callback function of a lock. */ -typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock, - struct ldlm_lock_desc *new, void *data, - int flag); -/** Type for completion callback function of a lock. */ -typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags, - void *data); -/** Type for glimpse callback function of a lock. */ -typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data); - -/** Work list for sending GL ASTs to multiple locks. */ -struct ldlm_glimpse_work { - struct ldlm_lock *gl_lock; /* lock to glimpse */ - struct list_head gl_list; /* linkage to other gl work structs */ - __u32 gl_flags;/* see LDLM_GL_WORK_* below */ - union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in - * glimpse callback request - */ -}; - -/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */ -#define LDLM_GL_WORK_NOFREE 0x1 - -/** Interval node data for each LDLM_EXTENT lock. */ -struct ldlm_interval { - struct interval_node li_node; /* node for tree management */ - struct list_head li_group; /* the locks which have the same - * policy - group of the policy - */ -}; - -#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) - -/** - * Interval tree for extent locks. - * The interval tree must be accessed under the resource lock. - * Interval trees are used for granted extent locks to speed up conflicts - * lookup. See ldlm/interval_tree.c for more details. - */ -struct ldlm_interval_tree { - /** Tree size. */ - int lit_size; - enum ldlm_mode lit_mode; /* lock mode */ - struct interval_node *lit_root; /* actual ldlm_interval */ -}; - -/** Whether to track references to exports by LDLM locks. */ -#define LUSTRE_TRACKS_LOCK_EXP_REFS (0) - -/** Cancel flags. */ -enum ldlm_cancel_flags { - LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ - LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ - LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST - * in the same RPC - */ -}; - -struct ldlm_flock { - __u64 start; - __u64 end; - __u64 owner; - __u64 blocking_owner; - struct obd_export *blocking_export; - __u32 pid; -}; - -union ldlm_policy_data { - struct ldlm_extent l_extent; - struct ldlm_flock l_flock; - struct ldlm_inodebits l_inodebits; -}; - -void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, - const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy); - -enum lvb_type { - LVB_T_NONE = 0, - LVB_T_OST = 1, - LVB_T_LQUOTA = 2, - LVB_T_LAYOUT = 3, -}; - -/** - * LDLM_GID_ANY is used to match any group id in ldlm_lock_match(). - */ -#define LDLM_GID_ANY ((__u64)-1) - -/** - * LDLM lock structure - * - * Represents a single LDLM lock and its state in memory. Each lock is - * associated with a single ldlm_resource, the object which is being - * locked. There may be multiple ldlm_locks on a single resource, - * depending on the lock type and whether the locks are conflicting or - * not. - */ -struct ldlm_lock { - /** - * Local lock handle. - * When remote side wants to tell us about a lock, they address - * it by this opaque handle. The handle does not hold a - * reference on the ldlm_lock, so it can be safely passed to - * other threads or nodes. When the lock needs to be accessed - * from the handle, it is looked up again in the lock table, and - * may no longer exist. - * - * Must be first in the structure. - */ - struct portals_handle l_handle; - /** - * Lock reference count. - * This is how many users have pointers to actual structure, so that - * we do not accidentally free lock structure that is in use. - */ - atomic_t l_refc; - /** - * Internal spinlock protects l_resource. We should hold this lock - * first before taking res_lock. - */ - spinlock_t l_lock; - /** - * Pointer to actual resource this lock is in. - * ldlm_lock_change_resource() can change this. - */ - struct ldlm_resource *l_resource; - /** - * List item for client side LRU list. - * Protected by ns_lock in struct ldlm_namespace. - */ - struct list_head l_lru; - /** - * Linkage to resource's lock queues according to current lock state. - * (could be granted, waiting or converting) - * Protected by lr_lock in struct ldlm_resource. - */ - struct list_head l_res_link; - /** - * Tree node for ldlm_extent. - */ - struct ldlm_interval *l_tree_node; - /** - * Requested mode. - * Protected by lr_lock. - */ - enum ldlm_mode l_req_mode; - /** - * Granted mode, also protected by lr_lock. - */ - enum ldlm_mode l_granted_mode; - /** Lock completion handler pointer. Called when lock is granted. */ - ldlm_completion_callback l_completion_ast; - /** - * Lock blocking AST handler pointer. - * It plays two roles: - * - as a notification of an attempt to queue a conflicting lock (once) - * - as a notification when the lock is being cancelled. - * - * As such it's typically called twice: once for the initial conflict - * and then once more when the last user went away and the lock is - * cancelled (could happen recursively). - */ - ldlm_blocking_callback l_blocking_ast; - /** - * Lock glimpse handler. - * Glimpse handler is used to obtain LVB updates from a client by - * server - */ - ldlm_glimpse_callback l_glimpse_ast; - - /** - * Lock export. - * This is a pointer to actual client export for locks that were granted - * to clients. Used server-side. - */ - struct obd_export *l_export; - /** - * Lock connection export. - * Pointer to server export on a client. - */ - struct obd_export *l_conn_export; - - /** - * Remote lock handle. - * If the lock is remote, this is the handle of the other side lock - * (l_handle) - */ - struct lustre_handle l_remote_handle; - - /** - * Representation of private data specific for a lock type. - * Examples are: extent range for extent lock or bitmask for ibits locks - */ - union ldlm_policy_data l_policy_data; - - /** - * Lock state flags. Protected by lr_lock. - * \see lustre_dlm_flags.h where the bits are defined. - */ - __u64 l_flags; - - /** - * Lock r/w usage counters. - * Protected by lr_lock. - */ - __u32 l_readers; - __u32 l_writers; - /** - * If the lock is granted, a process sleeps on this waitq to learn when - * it's no longer in use. If the lock is not granted, a process sleeps - * on this waitq to learn when it becomes granted. - */ - wait_queue_head_t l_waitq; - - /** - * Seconds. It will be updated if there is any activity related to - * the lock, e.g. enqueue the lock or send blocking AST. - */ - time64_t l_last_activity; - - /** - * Time last used by e.g. being matched by lock match. - * Jiffies. Should be converted to time if needed. - */ - unsigned long l_last_used; - - /** Originally requested extent for the extent lock. */ - struct ldlm_extent l_req_extent; - - /* - * Client-side-only members. - */ - - enum lvb_type l_lvb_type; - - /** - * Temporary storage for a LVB received during an enqueue operation. - */ - __u32 l_lvb_len; - void *l_lvb_data; - - /** Private storage for lock user. Opaque to LDLM. */ - void *l_ast_data; - - /* - * Server-side-only members. - */ - - /** - * Connection cookie for the client originating the operation. - * Used by Commit on Share (COS) code. Currently only used for - * inodebits locks on MDS. - */ - __u64 l_client_cookie; - - /** - * List item for locks waiting for cancellation from clients. - * The lists this could be linked into are: - * waiting_locks_list (protected by waiting_locks_spinlock), - * then if the lock timed out, it is moved to - * expired_lock_thread.elt_expired_locks for further processing. - * Protected by elt_lock. - */ - struct list_head l_pending_chain; - - /** - * Set when lock is sent a blocking AST. Time in seconds when timeout - * is reached and client holding this lock could be evicted. - * This timeout could be further extended by e.g. certain IO activity - * under this lock. - * \see ost_rw_prolong_locks - */ - unsigned long l_callback_timeout; - - /** Local PID of process which created this lock. */ - __u32 l_pid; - - /** - * Number of times blocking AST was sent for this lock. - * This is for debugging. Valid values are 0 and 1, if there is an - * attempt to send blocking AST more than once, an assertion would be - * hit. \see ldlm_work_bl_ast_lock - */ - int l_bl_ast_run; - /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */ - struct list_head l_bl_ast; - /** List item ldlm_add_ast_work_item() for case of completion ASTs. */ - struct list_head l_cp_ast; - /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */ - struct list_head l_rk_ast; - - /** - * Pointer to a conflicting lock that caused blocking AST to be sent - * for this lock - */ - struct ldlm_lock *l_blocking_lock; - - /** - * Protected by lr_lock, linkages to "skip lists". - * For more explanations of skip lists see ldlm/ldlm_inodebits.c - */ - struct list_head l_sl_mode; - struct list_head l_sl_policy; - - /** Reference tracking structure to debug leaked locks. */ - struct lu_ref l_reference; -#if LUSTRE_TRACKS_LOCK_EXP_REFS - /* Debugging stuff for bug 20498, for tracking export references. */ - /** number of export references taken */ - int l_exp_refs_nr; - /** link all locks referencing one export */ - struct list_head l_exp_refs_link; - /** referenced export object */ - struct obd_export *l_exp_refs_target; -#endif -}; - -/** - * LDLM resource description. - * Basically, resource is a representation for a single object. - * Object has a name which is currently 4 64-bit integers. LDLM user is - * responsible for creation of a mapping between objects it wants to be - * protected and resource names. - * - * A resource can only hold locks of a single lock type, though there may be - * multiple ldlm_locks on a single resource, depending on the lock type and - * whether the locks are conflicting or not. - */ -struct ldlm_resource { - struct ldlm_ns_bucket *lr_ns_bucket; - - /** - * List item for list in namespace hash. - * protected by ns_lock - */ - struct hlist_node lr_hash; - - /** Spinlock to protect locks under this resource. */ - spinlock_t lr_lock; - - /** - * protected by lr_lock - * @{ - */ - /** List of locks in granted state */ - struct list_head lr_granted; - /** - * List of locks that could not be granted due to conflicts and - * that are waiting for conflicts to go away - */ - struct list_head lr_waiting; - /** @} */ - - /** Type of locks this resource can hold. Only one type per resource. */ - enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ - - /** Resource name */ - struct ldlm_res_id lr_name; - /** Reference count for this resource */ - atomic_t lr_refcount; - - /** - * Interval trees (only for extent locks) for all modes of this resource - */ - struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; - - /** - * Server-side-only lock value block elements. - * To serialize lvbo_init. - */ - struct mutex lr_lvb_mutex; - int lr_lvb_len; - - /** When the resource was considered as contended. */ - unsigned long lr_contention_time; - /** List of references to this resource. For debugging. */ - struct lu_ref lr_reference; - - struct inode *lr_lvb_inode; -}; - -static inline bool ldlm_has_layout(struct ldlm_lock *lock) -{ - return lock->l_resource->lr_type == LDLM_IBITS && - lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT; -} - -static inline char * -ldlm_ns_name(struct ldlm_namespace *ns) -{ - return ns->ns_name; -} - -static inline struct ldlm_namespace * -ldlm_res_to_ns(struct ldlm_resource *res) -{ - return res->lr_ns_bucket->nsb_namespace; -} - -static inline struct ldlm_namespace * -ldlm_lock_to_ns(struct ldlm_lock *lock) -{ - return ldlm_res_to_ns(lock->l_resource); -} - -static inline char * -ldlm_lock_to_ns_name(struct ldlm_lock *lock) -{ - return ldlm_ns_name(ldlm_lock_to_ns(lock)); -} - -static inline struct adaptive_timeout * -ldlm_lock_to_ns_at(struct ldlm_lock *lock) -{ - return &lock->l_resource->lr_ns_bucket->nsb_at_estimate; -} - -static inline int ldlm_lvbo_init(struct ldlm_resource *res) -{ - struct ldlm_namespace *ns = ldlm_res_to_ns(res); - - if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) - return ns->ns_lvbo->lvbo_init(res); - - return 0; -} - -static inline int ldlm_lvbo_size(struct ldlm_lock *lock) -{ - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size) - return ns->ns_lvbo->lvbo_size(lock); - - return 0; -} - -static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) -{ - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - if (ns->ns_lvbo) - return ns->ns_lvbo->lvbo_fill(lock, buf, len); - - return 0; -} - -struct ldlm_ast_work { - struct ldlm_lock *w_lock; - int w_blocking; - struct ldlm_lock_desc w_desc; - struct list_head w_list; - int w_flags; - void *w_data; - int w_datalen; -}; - -/** - * Common ldlm_enqueue parameters - */ -struct ldlm_enqueue_info { - enum ldlm_type ei_type; /** Type of the lock being enqueued. */ - enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */ - void *ei_cb_bl; /** blocking lock callback */ - void *ei_cb_cp; /** lock completion callback */ - void *ei_cb_gl; /** lock glimpse callback */ - void *ei_cbdata; /** Data to be passed into callbacks. */ - unsigned int ei_enq_slave:1; /* whether enqueue slave stripes */ -}; - -extern struct obd_ops ldlm_obd_ops; - -extern char *ldlm_lockname[]; -const char *ldlm_it2str(enum ldlm_intent_flags it); - -/** - * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG. - * For the cases where we do not have actual lock to print along - * with a debugging message that is ldlm-related - */ -#define LDLM_DEBUG_NOLOCK(format, a...) \ - CDEBUG(D_DLMTRACE, "### " format "\n", ##a) - -/** - * Support function for lock information printing into debug logs. - * \see LDLM_DEBUG - */ -#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \ - CFS_CHECK_STACK(msgdata, mask, cdls); \ - \ - if (((mask) & D_CANTMASK) != 0 || \ - ((libcfs_debug & (mask)) != 0 && \ - (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \ - _ldlm_lock_debug(lock, msgdata, fmt, ##a); \ -} while (0) - -void _ldlm_lock_debug(struct ldlm_lock *lock, - struct libcfs_debug_msg_data *data, - const char *fmt, ...) - __printf(3, 4); - -/** - * Rate-limited version of lock printing function. - */ -#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \ - static struct cfs_debug_limit_state _ldlm_cdls; \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \ - ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt, ##a);\ -} while (0) - -#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a) -#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a) - -/** Non-rate-limited lock printing function for debugging purposes. */ -#define LDLM_DEBUG(lock, fmt, a...) do { \ - if (likely(lock)) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \ - ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \ - "### " fmt, ##a); \ - } else { \ - LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a); \ - } \ -} while (0) - -typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, enum ldlm_error *err, - struct list_head *work_list); - -/** - * Return values for lock iterators. - * Also used during deciding of lock grants and cancellations. - */ -#define LDLM_ITER_CONTINUE 1 /* keep iterating */ -#define LDLM_ITER_STOP 2 /* stop iterating */ - -typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *); -typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *); - -/** \defgroup ldlm_iterator Lock iterators - * - * LDLM provides for a way to iterate through every lock on a resource or - * namespace or every resource in a namespace. - * @{ - */ -int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, - ldlm_iterator_t iter, void *data); -/** @} ldlm_iterator */ - -int ldlm_replay_locks(struct obd_import *imp); - -/* ldlm_flock.c */ -int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); - -/* ldlm_extent.c */ -__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms); - -struct ldlm_callback_suite { - ldlm_completion_callback lcs_completion; - ldlm_blocking_callback lcs_blocking; - ldlm_glimpse_callback lcs_glimpse; -}; - -/* ldlm_lockd.c */ -int ldlm_get_ref(void); -void ldlm_put_ref(void); -struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req); - -/* ldlm_lock.c */ -void ldlm_lock2handle(const struct ldlm_lock *lock, - struct lustre_handle *lockh); -struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags); -void ldlm_cancel_callback(struct ldlm_lock *); -int ldlm_lock_remove_from_lru(struct ldlm_lock *); -int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data); - -/** - * Obtain a lock reference by its handle. - */ -static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h) -{ - return __ldlm_handle2lock(h, 0); -} - -#define LDLM_LOCK_REF_DEL(lock) \ - lu_ref_del(&lock->l_reference, "handle", current) - -static inline struct ldlm_lock * -ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) -{ - struct ldlm_lock *lock; - - lock = __ldlm_handle2lock(h, flags); - if (lock) - LDLM_LOCK_REF_DEL(lock); - return lock; -} - -/** - * Update Lock Value Block Operations (LVBO) on a resource taking into account - * data from request \a r - */ -static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, - struct ptlrpc_request *r, int increase) -{ - if (ldlm_res_to_ns(res)->ns_lvbo && - ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) { - return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r, - increase); - } - return 0; -} - -int ldlm_error2errno(enum ldlm_error error); - -#if LUSTRE_TRACKS_LOCK_EXP_REFS -void ldlm_dump_export_locks(struct obd_export *exp); -#endif - -/** - * Release a temporary lock reference obtained by ldlm_handle2lock() or - * __ldlm_handle2lock(). - */ -#define LDLM_LOCK_PUT(lock) \ -do { \ - LDLM_LOCK_REF_DEL(lock); \ - /*LDLM_DEBUG((lock), "put");*/ \ - ldlm_lock_put(lock); \ -} while (0) - -/** - * Release a lock reference obtained by some other means (see - * LDLM_LOCK_PUT()). - */ -#define LDLM_LOCK_RELEASE(lock) \ -do { \ - /*LDLM_DEBUG((lock), "put");*/ \ - ldlm_lock_put(lock); \ -} while (0) - -#define LDLM_LOCK_GET(lock) \ -({ \ - ldlm_lock_get(lock); \ - /*LDLM_DEBUG((lock), "get");*/ \ - lock; \ -}) - -#define ldlm_lock_list_put(head, member, count) \ -({ \ - struct ldlm_lock *_lock, *_next; \ - int c = count; \ - list_for_each_entry_safe(_lock, _next, head, member) { \ - if (c-- == 0) \ - break; \ - list_del_init(&_lock->member); \ - LDLM_LOCK_RELEASE(_lock); \ - } \ - LASSERT(c <= 0); \ -}) - -struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); -void ldlm_lock_put(struct ldlm_lock *lock); -void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc); -void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode); -int ldlm_lock_addref_try(const struct lustre_handle *lockh, - enum ldlm_mode mode); -void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode); -void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, - enum ldlm_mode mode); -void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); -void ldlm_lock_allow_match(struct ldlm_lock *lock); -void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); -enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *, - enum ldlm_type type, union ldlm_policy_data *, - enum ldlm_mode mode, struct lustre_handle *, - int unref); -enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh, - __u64 *bits); -void ldlm_lock_cancel(struct ldlm_lock *lock); -void ldlm_lock_dump_handle(int level, const struct lustre_handle *); -void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); - -/* resource.c */ -struct ldlm_namespace * -ldlm_namespace_new(struct obd_device *obd, char *name, - enum ldlm_side client, enum ldlm_appetite apt, - enum ldlm_ns_type ns_type); -int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); -void ldlm_namespace_free_prior(struct ldlm_namespace *ns, - struct obd_import *imp, - int force); -void ldlm_namespace_free_post(struct ldlm_namespace *ns); -void ldlm_namespace_get(struct ldlm_namespace *ns); -void ldlm_namespace_put(struct ldlm_namespace *ns); -void ldlm_debugfs_setup(void); -void ldlm_debugfs_cleanup(void); - -/* resource.c - internal */ -struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, - struct ldlm_resource *parent, - const struct ldlm_res_id *, - enum ldlm_type type, int create); -void ldlm_resource_putref(struct ldlm_resource *res); -void ldlm_resource_add_lock(struct ldlm_resource *res, - struct list_head *head, - struct ldlm_lock *lock); -void ldlm_resource_unlink_lock(struct ldlm_lock *lock); -void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc); -void ldlm_dump_all_namespaces(enum ldlm_side client, int level); -void ldlm_namespace_dump(int level, struct ldlm_namespace *); -void ldlm_resource_dump(int level, struct ldlm_resource *); -int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, - const struct ldlm_res_id *); - -#define LDLM_RESOURCE_ADDREF(res) do { \ - lu_ref_add_atomic(&(res)->lr_reference, __func__, current); \ -} while (0) - -#define LDLM_RESOURCE_DELREF(res) do { \ - lu_ref_del(&(res)->lr_reference, __func__, current); \ -} while (0) - -/* ldlm_request.c */ -/** \defgroup ldlm_local_ast Default AST handlers for local locks - * These AST handlers are typically used for server-side local locks and are - * also used by client-side lock handlers to perform minimum level base - * processing. - * @{ - */ -int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); -/** @} ldlm_local_ast */ - -/** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users. - * These are typically used by client and server (*_local versions) - * to obtain and release locks. - * @{ - */ -int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, - struct ldlm_enqueue_info *einfo, - const struct ldlm_res_id *res_id, - union ldlm_policy_data const *policy, __u64 *flags, - void *lvb, __u32 lvb_len, enum lvb_type lvb_type, - struct lustre_handle *lockh, int async); -int ldlm_prep_enqueue_req(struct obd_export *exp, - struct ptlrpc_request *req, - struct list_head *cancels, - int count); -int ldlm_prep_elc_req(struct obd_export *exp, - struct ptlrpc_request *req, - int version, int opc, int canceloff, - struct list_head *cancels, int count); - -int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - enum ldlm_type type, __u8 with_policy, - enum ldlm_mode mode, - __u64 *flags, void *lvb, __u32 lvb_len, - const struct lustre_handle *lockh, int rc); -int ldlm_cli_update_pool(struct ptlrpc_request *req); -int ldlm_cli_cancel(const struct lustre_handle *lockh, - enum ldlm_cancel_flags cancel_flags); -int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, - enum ldlm_cancel_flags flags, void *opaque); -int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, - const struct ldlm_res_id *res_id, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - enum ldlm_cancel_flags flags, - void *opaque); -int ldlm_cancel_resource_local(struct ldlm_resource *res, - struct list_head *cancels, - union ldlm_policy_data *policy, - enum ldlm_mode mode, __u64 lock_flags, - enum ldlm_cancel_flags cancel_flags, - void *opaque); -int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - enum ldlm_cancel_flags flags); -int ldlm_cli_cancel_list(struct list_head *head, int count, - struct ptlrpc_request *req, - enum ldlm_cancel_flags flags); -/** @} ldlm_cli_api */ - -/* mds/handler.c */ -/* This has to be here because recursive inclusion sucks. */ -int intent_disposition(struct ldlm_reply *rep, int flag); -void intent_set_disposition(struct ldlm_reply *rep, int flag); - -/** - * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more - * than one lock_res is dead-lock safe. - */ -enum lock_res_type { - LRT_NORMAL, - LRT_NEW -}; - -/** Lock resource. */ -static inline void lock_res(struct ldlm_resource *res) -{ - spin_lock(&res->lr_lock); -} - -/** Lock resource with a way to instruct lockdep code about nestedness-safe. */ -static inline void lock_res_nested(struct ldlm_resource *res, - enum lock_res_type mode) -{ - spin_lock_nested(&res->lr_lock, mode); -} - -/** Unlock resource. */ -static inline void unlock_res(struct ldlm_resource *res) -{ - spin_unlock(&res->lr_lock); -} - -/** Check if resource is already locked, assert if not. */ -static inline void check_res_locked(struct ldlm_resource *res) -{ - assert_spin_locked(&res->lr_lock); -} - -struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock); -void unlock_res_and_lock(struct ldlm_lock *lock); - -/* ldlm_pool.c */ -/** \defgroup ldlm_pools Various LDLM pool related functions - * There are not used outside of ldlm. - * @{ - */ -int ldlm_pools_init(void); -void ldlm_pools_fini(void); - -int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, - int idx, enum ldlm_side client); -void ldlm_pool_fini(struct ldlm_pool *pl); -void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock); -void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock); -/** @} */ - -static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1, - const struct ldlm_extent *ex2) -{ - return ex1->start <= ex2->end && ex2->start <= ex1->end; -} - -/* check if @ex1 contains @ex2 */ -static inline int ldlm_extent_contain(const struct ldlm_extent *ex1, - const struct ldlm_extent *ex2) -{ - return ex1->start <= ex2->start && ex1->end >= ex2->end; -} - -#endif -/** @} LDLM */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h deleted file mode 100644 index 53db031c4c8c..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h +++ /dev/null @@ -1,402 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* -*- buffer-read-only: t -*- vi: set ro: - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * Lustre is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see . - */ -/** - * \file lustre_dlm_flags.h - * The flags and collections of flags (masks) for \see struct ldlm_lock. - * - * \addtogroup LDLM Lustre Distributed Lock Manager - * @{ - * - * \name flags - * The flags and collections of flags (masks) for \see struct ldlm_lock. - * @{ - */ -#ifndef LDLM_ALL_FLAGS_MASK - -/** l_flags bits marked as "all_flags" bits */ -#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL - -/** extent, mode, or resource changed */ -#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */ -#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG((_l), 1ULL << 0) -#define ldlm_set_lock_changed(_l) LDLM_SET_FLAG((_l), 1ULL << 0) -#define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0) - -/** - * Server placed lock on granted list, or a recovering client wants the - * lock added to the granted list, no questions asked. - */ -#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */ -#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1) -#define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1) -#define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1) - -/** - * Server placed lock on conv list, or a recovering client wants the lock - * added to the conv list, no questions asked. - */ -#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */ -#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2) -#define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2) -#define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2) - -/** - * Server placed lock on wait list, or a recovering client wants the lock - * added to the wait list, no questions asked. - */ -#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */ -#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3) -#define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3) -#define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3) - -/** blocking or cancel packet was queued for sending. */ -#define LDLM_FL_AST_SENT 0x0000000000000020ULL /* bit 5 */ -#define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG((_l), 1ULL << 5) -#define ldlm_set_ast_sent(_l) LDLM_SET_FLAG((_l), 1ULL << 5) -#define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5) - -/** - * Lock is being replayed. This could probably be implied by the fact that - * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. - */ -#define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */ -#define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8) -#define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8) -#define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8) - -/** Don't grant lock, just do intent. */ -#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL /* bit 9 */ -#define ldlm_is_intent_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 9) -#define ldlm_set_intent_only(_l) LDLM_SET_FLAG((_l), 1ULL << 9) -#define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9) - -/** lock request has intent */ -#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL /* bit 12 */ -#define ldlm_is_has_intent(_l) LDLM_TEST_FLAG((_l), 1ULL << 12) -#define ldlm_set_has_intent(_l) LDLM_SET_FLAG((_l), 1ULL << 12) -#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12) - -/** flock deadlock detected */ -#define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL /* bit 15 */ -#define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG((_l), 1ULL << 15) -#define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15) -#define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15) - -/** discard (no writeback) on cancel */ -#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL /* bit 16 */ -#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 16) -#define ldlm_set_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 16) -#define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16) - -/** Blocked by group lock - wait indefinitely */ -#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL /* bit 17 */ -#define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG((_l), 1ULL << 17) -#define ldlm_set_no_timeout(_l) LDLM_SET_FLAG((_l), 1ULL << 17) -#define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17) - -/** - * Server told not to wait if blocked. For AGL, OST will not send glimpse - * callback. - */ -#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */ -#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18) -#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18) -#define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18) - -/** return blocking lock */ -#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL /* bit 19 */ -#define ldlm_is_test_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 19) -#define ldlm_set_test_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 19) -#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19) - -/** match lock only */ -#define LDLM_FL_MATCH_LOCK 0x0000000000100000ULL /* bit 20 */ - -/** - * Immediately cancel such locks when they block some other locks. Send - * cancel notification to original lock holder, but expect no reply. This - * is for clients (like liblustre) that cannot be expected to reliably - * response to blocking AST. - */ -#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */ -#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23) -#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23) -#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23) - -/** - * measure lock contention and return -EUSERS if locking contention is high - */ -#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL /* bit 30 */ -#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG((_l), 1ULL << 30) -#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG((_l), 1ULL << 30) -#define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30) - -/** - * These are flags that are mapped into the flags and ASTs of blocking - * locks Add FL_DISCARD to blocking ASTs - */ -#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL /* bit 31 */ -#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 31) -#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 31) -#define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31) - -/** - * Used for marking lock as a target for -EINTR while cp_ast sleep emulation - * + race with upcoming bl_ast. - */ -#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */ -#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32) -#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32) -#define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32) - -/** - * Used while processing the unused list to know that we have already - * handled this lock and decided to skip it. - */ -#define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */ -#define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33) -#define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33) -#define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33) - -/** this lock is being destroyed */ -#define LDLM_FL_CBPENDING 0x0000000400000000ULL /* bit 34 */ -#define ldlm_is_cbpending(_l) LDLM_TEST_FLAG((_l), 1ULL << 34) -#define ldlm_set_cbpending(_l) LDLM_SET_FLAG((_l), 1ULL << 34) -#define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34) - -/** not a real flag, not saved in lock */ -#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL /* bit 35 */ -#define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG((_l), 1ULL << 35) -#define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG((_l), 1ULL << 35) -#define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35) - -/** cancellation callback already run */ -#define LDLM_FL_CANCEL 0x0000001000000000ULL /* bit 36 */ -#define ldlm_is_cancel(_l) LDLM_TEST_FLAG((_l), 1ULL << 36) -#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36) -#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36) - -/** whatever it might mean -- never transmitted? */ -#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */ -#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37) -#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37) -#define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37) - -/** don't run the cancel callback under ldlm_cli_cancel_unused */ -#define LDLM_FL_FAILED 0x0000004000000000ULL /* bit 38 */ -#define ldlm_is_failed(_l) LDLM_TEST_FLAG((_l), 1ULL << 38) -#define ldlm_set_failed(_l) LDLM_SET_FLAG((_l), 1ULL << 38) -#define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38) - -/** lock cancel has already been sent */ -#define LDLM_FL_CANCELING 0x0000008000000000ULL /* bit 39 */ -#define ldlm_is_canceling(_l) LDLM_TEST_FLAG((_l), 1ULL << 39) -#define ldlm_set_canceling(_l) LDLM_SET_FLAG((_l), 1ULL << 39) -#define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39) - -/** local lock (ie, no srv/cli split) */ -#define LDLM_FL_LOCAL 0x0000010000000000ULL /* bit 40 */ -#define ldlm_is_local(_l) LDLM_TEST_FLAG((_l), 1ULL << 40) -#define ldlm_set_local(_l) LDLM_SET_FLAG((_l), 1ULL << 40) -#define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40) - -/** - * XXX FIXME: This is being added to b_size as a low-risk fix to the - * fact that the LVB filling happens _after_ the lock has been granted, - * so another thread can match it before the LVB has been updated. As a - * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop. - * this is only needed on LOV/OSC now, where LVB is actually used and - * callers must set it in input flags. - * - * The proper fix is to do the granting inside of the completion AST, - * which can be replaced with a LVB-aware wrapping function for OSC locks. - * That change is pretty high-risk, though, and would need a lot more - * testing. - */ -#define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */ -#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41) -#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41) -#define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41) - -/** - * A lock contributes to the known minimum size (KMS) calculation until it - * has finished the part of its cancellation that performs write back on its - * dirty pages. It can remain on the granted list during this whole time. - * Threads racing to update the KMS after performing their writeback need - * to know to exclude each other's locks from the calculation as they walk - * the granted list. - */ -#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */ -#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42) -#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42) -#define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42) - -/** completion AST to be executed */ -#define LDLM_FL_CP_REQD 0x0000080000000000ULL /* bit 43 */ -#define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG((_l), 1ULL << 43) -#define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG((_l), 1ULL << 43) -#define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43) - -/** cleanup_resource has already handled the lock */ -#define LDLM_FL_CLEANED 0x0000100000000000ULL /* bit 44 */ -#define ldlm_is_cleaned(_l) LDLM_TEST_FLAG((_l), 1ULL << 44) -#define ldlm_set_cleaned(_l) LDLM_SET_FLAG((_l), 1ULL << 44) -#define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44) - -/** - * optimization hint: LDLM can run blocking callback from current context - * w/o involving separate thread. in order to decrease cs rate - */ -#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */ -#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45) -#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45) -#define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45) - -/** - * It may happen that a client initiates two operations, e.g. unlink and - * mkdir, such that the server sends a blocking AST for conflicting locks - * to this client for the first operation, whereas the second operation - * has canceled this lock and is waiting for rpc_lock which is taken by - * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in - * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it. - */ -#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */ -#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) -#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) -#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46) - -/** - * Set by ldlm_cancel_callback() when lock cache is dropped to let - * ldlm_callback_handler() return EINVAL to the server. It is used when - * ELC RPC is already prepared and is waiting for rpc_lock, too late to - * send a separate CANCEL RPC. - */ -#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */ -#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47) -#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47) -#define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47) - -/** - * Don't put lock into the LRU list, so that it is not canceled due - * to aging. Used by MGC locks, they are cancelled only at unmount or - * by callback. - */ -#define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */ -#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48) -#define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48) -#define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48) - -/** - * Set for locks that failed and where the server has been notified. - * - * Protected by lock and resource locks. - */ -#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */ -#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49) -#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49) -#define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49) - -/** - * Set for locks that were removed from class hash table and will - * be destroyed when last reference to them is released. Set by - * ldlm_lock_destroy_internal(). - * - * Protected by lock and resource locks. - */ -#define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */ -#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50) -#define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50) -#define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50) - -/** flag whether this is a server namespace lock */ -#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL /* bit 51 */ -#define ldlm_is_server_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 51) -#define ldlm_set_server_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 51) -#define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51) - -/** - * It's set in lock_res_and_lock() and unset in unlock_res_and_lock(). - * - * NB: compared with check_res_locked(), checking this bit is cheaper. - * Also, spin_is_locked() is deprecated for kernel code; one reason is - * because it works only for SMP so user needs to add extra macros like - * LASSERT_SPIN_LOCKED for uniprocessor kernels. - */ -#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */ -#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52) -#define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52) -#define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52) - -/** - * It's set once we call ldlm_add_waiting_lock_res_locked() to start the - * lock-timeout timer and it will never be reset. - * - * Protected by lock and resource locks. - */ -#define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */ -#define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53) -#define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53) -#define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53) - -/** Flag whether this is a server namespace lock. */ -#define LDLM_FL_NS_SRV 0x0040000000000000ULL /* bit 54 */ -#define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG((_l), 1ULL << 54) -#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG((_l), 1ULL << 54) -#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54) - -/** Flag whether this lock can be reused. Used by exclusive open. */ -#define LDLM_FL_EXCL 0x0080000000000000ULL /* bit 55 */ -#define ldlm_is_excl(_l) LDLM_TEST_FLAG((_l), 1ULL << 55) -#define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55) -#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55) - -/** l_flags bits marked as "ast" bits */ -#define LDLM_FL_AST_MASK (LDLM_FL_FLOCK_DEADLOCK |\ - LDLM_FL_AST_DISCARD_DATA) - -/** l_flags bits marked as "blocked" bits */ -#define LDLM_FL_BLOCKED_MASK (LDLM_FL_BLOCK_GRANTED |\ - LDLM_FL_BLOCK_CONV |\ - LDLM_FL_BLOCK_WAIT) - -/** l_flags bits marked as "gone" bits */ -#define LDLM_FL_GONE_MASK (LDLM_FL_DESTROYED |\ - LDLM_FL_FAILED) - -/** l_flags bits marked as "inherit" bits */ -/* Flags inherited from wire on enqueue/reply between client/server. */ -/* NO_TIMEOUT flag to force ldlm_lock_match() to wait with no timeout. */ -/* TEST_LOCK flag to not let TEST lock to be granted. */ -#define LDLM_FL_INHERIT_MASK (LDLM_FL_CANCEL_ON_BLOCK |\ - LDLM_FL_NO_TIMEOUT |\ - LDLM_FL_TEST_LOCK) - -/** test for ldlm_lock flag bit set */ -#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) - -/** multi-bit test: are any of mask bits set? */ -#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK) - -/** set a ldlm_lock flag bit */ -#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b)) - -/** clear a ldlm_lock flag bit */ -#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b)) - -/** @} subgroup */ -/** @} group */ - -#endif /* LDLM_ALL_FLAGS_MASK */ diff --git a/drivers/staging/lustre/lustre/include/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre_errno.h deleted file mode 100644 index 59fbb9f47ff1..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_errno.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.txt - * - * GPL HEADER END - */ -/* - * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved. - * - * Copyright (c) 2013, Intel Corporation. - */ - -#ifndef LUSTRE_ERRNO_H -#define LUSTRE_ERRNO_H - -/* - * Only "network" errnos, which are defined below, are allowed on wire (or on - * disk). Generic routines exist to help translate between these and a subset - * of the "host" errnos. Some host errnos (e.g., EDEADLOCK) are intentionally - * left out. See also the comment on lustre_errno_hton_mapping[]. - * - * To maintain compatibility with existing x86 clients and servers, each of - * these network errnos has the same numerical value as its corresponding host - * errno on x86. - */ -#define LUSTRE_EPERM 1 /* Operation not permitted */ -#define LUSTRE_ENOENT 2 /* No such file or directory */ -#define LUSTRE_ESRCH 3 /* No such process */ -#define LUSTRE_EINTR 4 /* Interrupted system call */ -#define LUSTRE_EIO 5 /* I/O error */ -#define LUSTRE_ENXIO 6 /* No such device or address */ -#define LUSTRE_E2BIG 7 /* Argument list too long */ -#define LUSTRE_ENOEXEC 8 /* Exec format error */ -#define LUSTRE_EBADF 9 /* Bad file number */ -#define LUSTRE_ECHILD 10 /* No child processes */ -#define LUSTRE_EAGAIN 11 /* Try again */ -#define LUSTRE_ENOMEM 12 /* Out of memory */ -#define LUSTRE_EACCES 13 /* Permission denied */ -#define LUSTRE_EFAULT 14 /* Bad address */ -#define LUSTRE_ENOTBLK 15 /* Block device required */ -#define LUSTRE_EBUSY 16 /* Device or resource busy */ -#define LUSTRE_EEXIST 17 /* File exists */ -#define LUSTRE_EXDEV 18 /* Cross-device link */ -#define LUSTRE_ENODEV 19 /* No such device */ -#define LUSTRE_ENOTDIR 20 /* Not a directory */ -#define LUSTRE_EISDIR 21 /* Is a directory */ -#define LUSTRE_EINVAL 22 /* Invalid argument */ -#define LUSTRE_ENFILE 23 /* File table overflow */ -#define LUSTRE_EMFILE 24 /* Too many open files */ -#define LUSTRE_ENOTTY 25 /* Not a typewriter */ -#define LUSTRE_ETXTBSY 26 /* Text file busy */ -#define LUSTRE_EFBIG 27 /* File too large */ -#define LUSTRE_ENOSPC 28 /* No space left on device */ -#define LUSTRE_ESPIPE 29 /* Illegal seek */ -#define LUSTRE_EROFS 30 /* Read-only file system */ -#define LUSTRE_EMLINK 31 /* Too many links */ -#define LUSTRE_EPIPE 32 /* Broken pipe */ -#define LUSTRE_EDOM 33 /* Math argument out of func domain */ -#define LUSTRE_ERANGE 34 /* Math result not representable */ -#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */ -#define LUSTRE_ENAMETOOLONG 36 /* File name too long */ -#define LUSTRE_ENOLCK 37 /* No record locks available */ -#define LUSTRE_ENOSYS 38 /* Function not implemented */ -#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */ -#define LUSTRE_ELOOP 40 /* Too many symbolic links found */ -#define LUSTRE_ENOMSG 42 /* No message of desired type */ -#define LUSTRE_EIDRM 43 /* Identifier removed */ -#define LUSTRE_ECHRNG 44 /* Channel number out of range */ -#define LUSTRE_EL2NSYNC 45 /* Level 2 not synchronized */ -#define LUSTRE_EL3HLT 46 /* Level 3 halted */ -#define LUSTRE_EL3RST 47 /* Level 3 reset */ -#define LUSTRE_ELNRNG 48 /* Link number out of range */ -#define LUSTRE_EUNATCH 49 /* Protocol driver not attached */ -#define LUSTRE_ENOCSI 50 /* No CSI structure available */ -#define LUSTRE_EL2HLT 51 /* Level 2 halted */ -#define LUSTRE_EBADE 52 /* Invalid exchange */ -#define LUSTRE_EBADR 53 /* Invalid request descriptor */ -#define LUSTRE_EXFULL 54 /* Exchange full */ -#define LUSTRE_ENOANO 55 /* No anode */ -#define LUSTRE_EBADRQC 56 /* Invalid request code */ -#define LUSTRE_EBADSLT 57 /* Invalid slot */ -#define LUSTRE_EBFONT 59 /* Bad font file format */ -#define LUSTRE_ENOSTR 60 /* Device not a stream */ -#define LUSTRE_ENODATA 61 /* No data available */ -#define LUSTRE_ETIME 62 /* Timer expired */ -#define LUSTRE_ENOSR 63 /* Out of streams resources */ -#define LUSTRE_ENONET 64 /* Machine is not on the network */ -#define LUSTRE_ENOPKG 65 /* Package not installed */ -#define LUSTRE_EREMOTE 66 /* Object is remote */ -#define LUSTRE_ENOLINK 67 /* Link has been severed */ -#define LUSTRE_EADV 68 /* Advertise error */ -#define LUSTRE_ESRMNT 69 /* Srmount error */ -#define LUSTRE_ECOMM 70 /* Communication error on send */ -#define LUSTRE_EPROTO 71 /* Protocol error */ -#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */ -#define LUSTRE_EDOTDOT 73 /* RFS specific error */ -#define LUSTRE_EBADMSG 74 /* Not a data message */ -#define LUSTRE_EOVERFLOW 75 /* Value too large for data type */ -#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */ -#define LUSTRE_EBADFD 77 /* File descriptor in bad state */ -#define LUSTRE_EREMCHG 78 /* Remote address changed */ -#define LUSTRE_ELIBACC 79 /* Can't access needed shared library */ -#define LUSTRE_ELIBBAD 80 /* Access corrupted shared library */ -#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */ -#define LUSTRE_ELIBMAX 82 /* Trying to link too many libraries */ -#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared lib directly */ -#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */ -#define LUSTRE_ERESTART 85 /* Restart interrupted system call */ -#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */ -#define LUSTRE_EUSERS 87 /* Too many users */ -#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */ -#define LUSTRE_EDESTADDRREQ 89 /* Destination address required */ -#define LUSTRE_EMSGSIZE 90 /* Message too long */ -#define LUSTRE_EPROTOTYPE 91 /* Protocol wrong type for socket */ -#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */ -#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */ -#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */ -#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported */ -#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */ -#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported */ -#define LUSTRE_EADDRINUSE 98 /* Address already in use */ -#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */ -#define LUSTRE_ENETDOWN 100 /* Network is down */ -#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */ -#define LUSTRE_ENETRESET 102 /* Network connection drop for reset */ -#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */ -#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */ -#define LUSTRE_ENOBUFS 105 /* No buffer space available */ -#define LUSTRE_EISCONN 106 /* Transport endpoint is connected */ -#define LUSTRE_ENOTCONN 107 /* Transport endpoint not connected */ -#define LUSTRE_ESHUTDOWN 108 /* Cannot send after shutdown */ -#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */ -#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */ -#define LUSTRE_ECONNREFUSED 111 /* Connection refused */ -#define LUSTRE_EHOSTDOWN 112 /* Host is down */ -#define LUSTRE_EHOSTUNREACH 113 /* No route to host */ -#define LUSTRE_EALREADY 114 /* Operation already in progress */ -#define LUSTRE_EINPROGRESS 115 /* Operation now in progress */ -#define LUSTRE_ESTALE 116 /* Stale file handle */ -#define LUSTRE_EUCLEAN 117 /* Structure needs cleaning */ -#define LUSTRE_ENOTNAM 118 /* Not a XENIX named type file */ -#define LUSTRE_ENAVAIL 119 /* No XENIX semaphores available */ -#define LUSTRE_EISNAM 120 /* Is a named type file */ -#define LUSTRE_EREMOTEIO 121 /* Remote I/O error */ -#define LUSTRE_EDQUOT 122 /* Quota exceeded */ -#define LUSTRE_ENOMEDIUM 123 /* No medium found */ -#define LUSTRE_EMEDIUMTYPE 124 /* Wrong medium type */ -#define LUSTRE_ECANCELED 125 /* Operation Canceled */ -#define LUSTRE_ENOKEY 126 /* Required key not available */ -#define LUSTRE_EKEYEXPIRED 127 /* Key has expired */ -#define LUSTRE_EKEYREVOKED 128 /* Key has been revoked */ -#define LUSTRE_EKEYREJECTED 129 /* Key was rejected by service */ -#define LUSTRE_EOWNERDEAD 130 /* Owner died */ -#define LUSTRE_ENOTRECOVERABLE 131 /* State not recoverable */ -#define LUSTRE_ERESTARTSYS 512 -#define LUSTRE_ERESTARTNOINTR 513 -#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */ -#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */ -#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart via sys_restart_syscall */ -#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */ -#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */ -#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */ -#define LUSTRE_ENOTSUPP 524 /* Operation is not supported */ -#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */ -#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */ -#define LUSTRE_EBADTYPE 527 /* Type not supported by server */ -#define LUSTRE_EJUKEBOX 528 /* Request won't finish until timeout */ -#define LUSTRE_EIOCBQUEUED 529 /* iocb queued await completion event */ -#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */ - -/* - * Translations are optimized away on x86. Host errnos that shouldn't be put - * on wire could leak through as a result. Do not count on this side effect. - */ -#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS -unsigned int lustre_errno_hton(unsigned int h); -unsigned int lustre_errno_ntoh(unsigned int n); -#else -#define lustre_errno_hton(h) (h) -#define lustre_errno_ntoh(n) (n) -#endif - -#endif /* LUSTRE_ERRNO_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h deleted file mode 100644 index 79ad5aae86b9..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_export.h +++ /dev/null @@ -1,250 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -/** \defgroup obd_export PortalRPC export definitions - * - * @{ - */ - -#ifndef __EXPORT_H -#define __EXPORT_H - -/** \defgroup export export - * - * @{ - */ - -#include -#include -#include - -enum obd_option { - OBD_OPT_FORCE = 0x0001, - OBD_OPT_FAILOVER = 0x0002, - OBD_OPT_ABORT_RECOV = 0x0004, -}; - -/** - * Export structure. Represents target-side of connection in portals. - * Also used in Lustre to connect between layers on the same node when - * there is no network-connection in-between. - * For every connected client there is an export structure on the server - * attached to the same obd device. - */ -struct obd_export { - /** - * Export handle, it's id is provided to client on connect - * Subsequent client RPCs contain this handle id to identify - * what export they are talking to. - */ - struct portals_handle exp_handle; - atomic_t exp_refcount; - /** - * Set of counters below is to track where export references are - * kept. The exp_rpc_count is used for reconnect handling also, - * the cb_count and locks_count are for debug purposes only for now. - * The sum of them should be less than exp_refcount by 3 - */ - atomic_t exp_rpc_count; /* RPC references */ - atomic_t exp_cb_count; /* Commit callback references */ - /** Number of queued replay requests to be processes */ - atomic_t exp_replay_count; - atomic_t exp_locks_count; /** Lock references */ -#if LUSTRE_TRACKS_LOCK_EXP_REFS - struct list_head exp_locks_list; - spinlock_t exp_locks_list_guard; -#endif - /** UUID of client connected to this export */ - struct obd_uuid exp_client_uuid; - /** To link all exports on an obd device */ - struct list_head exp_obd_chain; - /** work_struct for destruction of export */ - struct work_struct exp_zombie_work; - struct rhash_head exp_uuid_hash; /** uuid-export hash*/ - /** Obd device of this export */ - struct obd_device *exp_obd; - /** - * "reverse" import to send requests (e.g. from ldlm) back to client - * exp_lock protect its change - */ - struct obd_import *exp_imp_reverse; - struct lprocfs_stats *exp_md_stats; - /** Active connection */ - struct ptlrpc_connection *exp_connection; - /** Connection count value from last successful reconnect rpc */ - __u32 exp_conn_cnt; - struct list_head exp_outstanding_replies; - struct list_head exp_uncommitted_replies; - spinlock_t exp_uncommitted_replies_lock; - /** Last committed transno for this export */ - __u64 exp_last_committed; - /** On replay all requests waiting for replay are linked here */ - struct list_head exp_req_replay_queue; - /** - * protects exp_flags, exp_outstanding_replies and the change - * of exp_imp_reverse - */ - spinlock_t exp_lock; - /** Compatibility flags for this export are embedded into - * exp_connect_data - */ - struct obd_connect_data exp_connect_data; - enum obd_option exp_flags; - unsigned long exp_failed:1, - exp_disconnected:1, - exp_connecting:1, - exp_flvr_changed:1, - exp_flvr_adapt:1; - /* also protected by exp_lock */ - enum lustre_sec_part exp_sp_peer; - struct sptlrpc_flavor exp_flvr; /* current */ - struct sptlrpc_flavor exp_flvr_old[2]; /* about-to-expire */ - time64_t exp_flvr_expire[2]; /* seconds */ - - /** protects exp_hp_rpcs */ - spinlock_t exp_rpc_lock; - struct list_head exp_hp_rpcs; /* (potential) HP RPCs */ - - /** blocking dlm lock list, protected by exp_bl_list_lock */ - struct list_head exp_bl_list; - spinlock_t exp_bl_list_lock; -}; - -static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp) -{ - return &exp->exp_connect_data.ocd_connect_flags; -} - -static inline __u64 exp_connect_flags(struct obd_export *exp) -{ - return *exp_connect_flags_ptr(exp); -} - -static inline int exp_max_brw_size(struct obd_export *exp) -{ - if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE) - return exp->exp_connect_data.ocd_brw_size; - - return ONE_MB_BRW_SIZE; -} - -static inline int exp_connect_multibulk(struct obd_export *exp) -{ - return exp_max_brw_size(exp) > ONE_MB_BRW_SIZE; -} - -static inline int exp_connect_cancelset(struct obd_export *exp) -{ - return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET); -} - -static inline int exp_connect_lru_resize(struct obd_export *exp) -{ - return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE); -} - -static inline int exp_connect_vbr(struct obd_export *exp) -{ - return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR); -} - -static inline int exp_connect_som(struct obd_export *exp) -{ - return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM); -} - -static inline int exp_connect_umask(struct obd_export *exp) -{ - return !!(exp_connect_flags(exp) & OBD_CONNECT_UMASK); -} - -static inline int imp_connect_lru_resize(struct obd_import *imp) -{ - struct obd_connect_data *ocd; - - ocd = &imp->imp_connect_data; - return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE); -} - -static inline int exp_connect_layout(struct obd_export *exp) -{ - return !!(exp_connect_flags(exp) & OBD_CONNECT_LAYOUTLOCK); -} - -static inline bool exp_connect_lvb_type(struct obd_export *exp) -{ - if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE) - return true; - else - return false; -} - -static inline bool imp_connect_lvb_type(struct obd_import *imp) -{ - struct obd_connect_data *ocd; - - ocd = &imp->imp_connect_data; - if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE) - return true; - else - return false; -} - -static inline __u64 exp_connect_ibits(struct obd_export *exp) -{ - struct obd_connect_data *ocd; - - ocd = &exp->exp_connect_data; - return ocd->ocd_ibits_known; -} - -static inline bool imp_connect_disp_stripe(struct obd_import *imp) -{ - struct obd_connect_data *ocd; - - ocd = &imp->imp_connect_data; - return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE; -} - -struct obd_export *class_conn2export(struct lustre_handle *conn); - -#define KKUC_CT_DATA_MAGIC 0x092013cea -struct kkuc_ct_data { - __u32 kcd_magic; - struct obd_uuid kcd_uuid; - __u32 kcd_archive; -}; - -/** @} export */ - -#endif /* __EXPORT_H */ -/** @} obd_export */ diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h deleted file mode 100644 index 094ad282de2c..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_fid.h +++ /dev/null @@ -1,676 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_fid.h - * - * Author: Yury Umanets - */ - -#ifndef __LUSTRE_FID_H -#define __LUSTRE_FID_H - -/** \defgroup fid fid - * - * @{ - * - * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs - * describes the FID namespace and interoperability requirements for FIDs. - * The important parts of that document are included here for reference. - * - * FID - * File IDentifier generated by client from range allocated by the SEQuence - * service and stored in struct lu_fid. The FID is composed of three parts: - * SEQuence, ObjectID, and VERsion. The SEQ component is a filesystem - * unique 64-bit integer, and only one client is ever assigned any SEQ value. - * The first 0x400 FID_SEQ_NORMAL [2^33, 2^33 + 0x400] values are reserved - * for system use. The OID component is a 32-bit value generated by the - * client on a per-SEQ basis to allow creating many unique FIDs without - * communication with the server. The VER component is a 32-bit value that - * distinguishes between different FID instantiations, such as snapshots or - * separate subtrees within the filesystem. FIDs with the same VER field - * are considered part of the same namespace. - * - * OLD filesystems are those upgraded from Lustre 1.x that predate FIDs, and - * MDTs use 32-bit ldiskfs internal inode/generation numbers (IGIFs), while - * OSTs use 64-bit Lustre object IDs and generation numbers. - * - * NEW filesystems are those formatted since the introduction of FIDs. - * - * IGIF - * Inode and Generation In FID, a surrogate FID used to globally identify - * an existing object on OLD formatted MDT file system. This would only be - * used on MDT0 in a DNE filesystem, because there cannot be more than one - * MDT in an OLD formatted filesystem. Belongs to sequence in [12, 2^32 - 1] - * range, where inode number is stored in SEQ, and inode generation is in OID. - * NOTE: This assumes no more than 2^32-1 inodes exist in the MDT filesystem, - * which is the maximum possible for an ldiskfs backend. It also assumes - * that the reserved ext3/ext4/ldiskfs inode numbers [0-11] are never visible - * to clients, which has always been true. - * - * IDIF - * object ID In FID, a surrogate FID used to globally identify an existing - * OST object on OLD formatted OST file system. Belongs to a sequence in - * [2^32, 2^33 - 1]. Sequence number is calculated as: - * - * 1 << 32 | (ost_index << 16) | ((objid >> 32) & 0xffff) - * - * that is, SEQ consists of 16-bit OST index, and higher 16 bits of object - * ID. The generation of unique SEQ values per OST allows the IDIF FIDs to - * be identified in the FLD correctly. The OID field is calculated as: - * - * objid & 0xffffffff - * - * that is, it consists of lower 32 bits of object ID. For objects within - * the IDIF range, object ID extraction will be: - * - * o_id = (fid->f_seq & 0x7fff) << 16 | fid->f_oid; - * o_seq = 0; // formerly group number - * - * NOTE: This assumes that no more than 2^48-1 objects have ever been created - * on any OST, and that no more than 65535 OSTs are in use. Both are very - * reasonable assumptions, i.e. an IDIF can uniquely map all objects assuming - * a maximum creation rate of 1M objects per second for a maximum of 9 years, - * or combinations thereof. - * - * OST_MDT0 - * Surrogate FID used to identify an existing object on OLD formatted OST - * filesystem. Belongs to the reserved SEQuence 0, and is used prior to - * the introduction of FID-on-OST, at which point IDIF will be used to - * identify objects as residing on a specific OST. - * - * LLOG - * For Lustre Log objects the object sequence 1 is used. This is compatible - * with both OLD and NEW namespaces, as this SEQ number is in the - * ext3/ldiskfs reserved inode range and does not conflict with IGIF - * sequence numbers. - * - * ECHO - * For testing OST IO performance the object sequence 2 is used. This is - * compatible with both OLD and NEW namespaces, as this SEQ number is in - * the ext3/ldiskfs reserved inode range and does not conflict with IGIF - * sequence numbers. - * - * OST_MDT1 .. OST_MAX - * For testing with multiple MDTs the object sequence 3 through 9 is used, - * allowing direct mapping of MDTs 1 through 7 respectively, for a total - * of 8 MDTs including OST_MDT0. This matches the legacy CMD project "group" - * mappings. However, this SEQ range is only for testing prior to any - * production DNE release, as the objects in this range conflict across all - * OSTs, as the OST index is not part of the FID. For production DNE usage, - * OST objects created by MDT1+ will use FID_SEQ_NORMAL FIDs. - * - * DLM OST objid to IDIF mapping - * For compatibility with existing OLD OST network protocol structures, the - * FID must map onto the o_id and o_seq in a manner that ensures existing - * objects are identified consistently for IO, as well as onto the LDLM - * namespace to ensure IDIFs there is only a single resource name for any - * object in the DLM. The OLD OST object DLM resource mapping is: - * - * resource[] = {o_id, o_seq, 0, 0}; // o_seq == 0 for production releases - * - * The NEW OST object DLM resource mapping is the same for both MDT and OST: - * - * resource[] = {SEQ, OID, VER, HASH}; - * - * NOTE: for mapping IDIF values to DLM resource names the o_id may be - * larger than the 2^33 reserved sequence numbers for IDIF, so it is possible - * for the o_id numbers to overlap FID SEQ numbers in the resource. However, - * in all production releases the OLD o_seq field is always zero, and all - * valid FID OID values are non-zero, so the lock resources will not collide. - * Even so, the MDT and OST resources are also in different LDLM namespaces. - */ - -#include -#include -#include -#include - -struct lu_env; -struct lu_site; -struct lu_context; -struct obd_device; -struct obd_export; - -/* Whole sequences space range and zero range definitions */ -extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE; -extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE; -extern const struct lu_fid LUSTRE_BFL_FID; -extern const struct lu_fid LU_OBF_FID; -extern const struct lu_fid LU_DOT_LUSTRE_FID; - -enum { - /* - * This is how may metadata FIDs may be allocated in one sequence(128k) - */ - LUSTRE_METADATA_SEQ_MAX_WIDTH = 0x0000000000020000ULL, - - /* - * This is how many data FIDs could be allocated in one sequence(4B - 1) - */ - LUSTRE_DATA_SEQ_MAX_WIDTH = 0x00000000FFFFFFFFULL, - - /* - * How many sequences to allocate to a client at once. - */ - LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL, - - /* - * seq allocation pool size. - */ - LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000, - - /* - * This is how many sequences may be in one super-sequence allocated to - * MDTs. - */ - LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH) -}; - -enum { - /** 2^6 FIDs for OI containers */ - OSD_OI_FID_OID_BITS = 6, - /** reserve enough FIDs in case we want more in the future */ - OSD_OI_FID_OID_BITS_MAX = 10, -}; - -/** special OID for local objects */ -enum local_oid { - /** \see fld_mod_init */ - FLD_INDEX_OID = 3UL, - /** \see fid_mod_init */ - FID_SEQ_CTL_OID = 4UL, - FID_SEQ_SRV_OID = 5UL, - /** \see mdd_mod_init */ - MDD_ROOT_INDEX_OID = 6UL, /* deprecated in 2.4 */ - MDD_ORPHAN_OID = 7UL, /* deprecated in 2.4 */ - MDD_LOV_OBJ_OID = 8UL, - MDD_CAPA_KEYS_OID = 9UL, - /** \see mdt_mod_init */ - LAST_RECV_OID = 11UL, - OSD_FS_ROOT_OID = 13UL, - ACCT_USER_OID = 15UL, - ACCT_GROUP_OID = 16UL, - LFSCK_BOOKMARK_OID = 17UL, - OTABLE_IT_OID = 18UL, - /* These two definitions are obsolete - * OFD_GROUP0_LAST_OID = 20UL, - * OFD_GROUP4K_LAST_OID = 20UL+4096, - */ - OFD_LAST_GROUP_OID = 4117UL, - LLOG_CATALOGS_OID = 4118UL, - MGS_CONFIGS_OID = 4119UL, - OFD_HEALTH_CHECK_OID = 4120UL, - MDD_LOV_OBJ_OSEQ = 4121UL, - LFSCK_NAMESPACE_OID = 4122UL, - REMOTE_PARENT_DIR_OID = 4123UL, - SLAVE_LLOG_CATALOGS_OID = 4124UL, -}; - -static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid) -{ - fid->f_seq = FID_SEQ_LOCAL_FILE; - fid->f_oid = oid; - fid->f_ver = 0; -} - -static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid) -{ - fid->f_seq = FID_SEQ_LOCAL_NAME; - fid->f_oid = oid; - fid->f_ver = 0; -} - -/* For new FS (>= 2.4), the root FID will be changed to - * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4), - * the root FID will still be IGIF - */ -static inline int fid_is_root(const struct lu_fid *fid) -{ - return unlikely((fid_seq(fid) == FID_SEQ_ROOT && - fid_oid(fid) == 1)); -} - -static inline int fid_is_dot_lustre(const struct lu_fid *fid) -{ - return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE && - fid_oid(fid) == FID_OID_DOT_LUSTRE); -} - -static inline int fid_is_obf(const struct lu_fid *fid) -{ - return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE && - fid_oid(fid) == FID_OID_DOT_LUSTRE_OBF); -} - -static inline int fid_is_otable_it(const struct lu_fid *fid) -{ - return unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE && - fid_oid(fid) == OTABLE_IT_OID); -} - -static inline int fid_is_acct(const struct lu_fid *fid) -{ - return fid_seq(fid) == FID_SEQ_LOCAL_FILE && - (fid_oid(fid) == ACCT_USER_OID || - fid_oid(fid) == ACCT_GROUP_OID); -} - -static inline int fid_is_quota(const struct lu_fid *fid) -{ - return fid_seq(fid) == FID_SEQ_QUOTA || - fid_seq(fid) == FID_SEQ_QUOTA_GLB; -} - -static inline int fid_seq_in_fldb(__u64 seq) -{ - return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) || - fid_seq_is_root(seq) || fid_seq_is_dot(seq); -} - -static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx) -{ - if (fid_seq_is_mdt0(seq)) { - fid->f_seq = fid_idif_seq(0, ost_idx); - } else { - LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) || - fid_seq_is_idif(seq), "%#llx\n", seq); - fid->f_seq = seq; - } - fid->f_oid = 0; - fid->f_ver = 0; -} - -/* seq client type */ -enum lu_cli_type { - LUSTRE_SEQ_METADATA = 1, - LUSTRE_SEQ_DATA -}; - -enum lu_mgr_type { - LUSTRE_SEQ_SERVER, - LUSTRE_SEQ_CONTROLLER -}; - -/* Client sequence manager interface. */ -struct lu_client_seq { - /* Sequence-controller export. */ - struct obd_export *lcs_exp; - spinlock_t lcs_lock; - - /* - * Range of allowed for allocation sequences. When using lu_client_seq on - * clients, this contains meta-sequence range. And for servers this - * contains super-sequence range. - */ - struct lu_seq_range lcs_space; - - /* Seq related proc */ - struct dentry *lcs_debugfs_entry; - - /* This holds last allocated fid in last obtained seq */ - struct lu_fid lcs_fid; - - /* LUSTRE_SEQ_METADATA or LUSTRE_SEQ_DATA */ - enum lu_cli_type lcs_type; - - /* - * Service uuid, passed from MDT + seq name to form unique seq name to - * use it with procfs. - */ - char lcs_name[LUSTRE_MDT_MAXNAMELEN]; - - /* - * Sequence width, that is how many objects may be allocated in one - * sequence. Default value for it is LUSTRE_SEQ_MAX_WIDTH. - */ - __u64 lcs_width; - - /* wait queue for fid allocation and update indicator */ - wait_queue_head_t lcs_waitq; - int lcs_update; -}; - -/* Client methods */ -void seq_client_flush(struct lu_client_seq *seq); - -int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq, - struct lu_fid *fid); -/* Fids common stuff */ -int fid_is_local(const struct lu_env *env, - struct lu_site *site, const struct lu_fid *fid); - -enum lu_cli_type; -int client_fid_init(struct obd_device *obd, struct obd_export *exp, - enum lu_cli_type type); -int client_fid_fini(struct obd_device *obd); - -/* fid locking */ - -struct ldlm_namespace; - -/* - * Build (DLM) resource name from FID. - * - * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], - * but was moved into name[1] along with the OID to avoid consuming the - * renaming name[2,3] fields that need to be used for the quota identifier. - */ -static inline void -fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res) -{ - memset(res, 0, sizeof(*res)); - res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid); - res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid); -} - -/* - * Return true if resource is for object identified by FID. - */ -static inline bool fid_res_name_eq(const struct lu_fid *fid, - const struct ldlm_res_id *res) -{ - return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) && - res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid); -} - -/* - * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name(). - */ -static inline void -fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res) -{ - fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF]; - fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]); - fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32); - LASSERT(fid_res_name_eq(fid, res)); -} - -/* - * Build (DLM) resource identifier from global quota FID and quota ID. - */ -static inline void -fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid, - struct ldlm_res_id *res) -{ - fid_build_reg_res_name(glb_fid, res); - res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid); - res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid); -} - -/* - * Extract global FID and quota ID from resource name - */ -static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid, - union lquota_id *qid, - const struct ldlm_res_id *res) -{ - fid_extract_from_res_name(glb_fid, res); - qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF]; - qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF]; - qid->qid_fid.f_ver = - (__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32); -} - -static inline void -fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash, - struct ldlm_res_id *res) -{ - fid_build_reg_res_name(fid, res); - res->name[LUSTRE_RES_ID_HSH_OFF] = hash; -} - -/** - * Build DLM resource name from object id & seq, which will be removed - * finally, when we replace ost_id with FID in data stack. - * - * Currently, resid from the old client, whose res[0] = object_id, - * res[1] = object_seq, is just opposite with Metatdata - * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid. - * To unify the resid identification, we will reverse the data - * resid to keep it same with Metadata resid, i.e. - * - * For resid from the old client, - * res[0] = objid, res[1] = 0, still keep the original order, - * for compatibility. - * - * For new resid - * res will be built from normal FID directly, i.e. res[0] = f_seq, - * res[1] = f_oid + f_ver. - */ -static inline void ostid_build_res_name(const struct ost_id *oi, - struct ldlm_res_id *name) -{ - memset(name, 0, sizeof(*name)); - if (fid_seq_is_mdt0(ostid_seq(oi))) { - name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi); - name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi); - } else { - fid_build_reg_res_name(&oi->oi_fid, name); - } -} - -/** - * Return true if the resource is for the object identified by this id & group. - */ -static inline int ostid_res_name_eq(const struct ost_id *oi, - const struct ldlm_res_id *name) -{ - /* Note: it is just a trick here to save some effort, probably the - * correct way would be turn them into the FID and compare - */ - if (fid_seq_is_mdt0(ostid_seq(oi))) { - return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) && - name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi); - } else { - return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_seq(oi) && - name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_id(oi); - } -} - -/** - * Note: we need check oi_seq to decide where to set oi_id, - * so oi_seq should always be set ahead of oi_id. - */ -static inline int ostid_set_id(struct ost_id *oi, __u64 oid) -{ - if (fid_seq_is_mdt0(oi->oi.oi_seq)) { - if (oid >= IDIF_MAX_OID) - return -E2BIG; - oi->oi.oi_id = oid; - } else if (fid_is_idif(&oi->oi_fid)) { - if (oid >= IDIF_MAX_OID) - return -E2BIG; - oi->oi_fid.f_seq = fid_idif_seq(oid, - fid_idif_ost_idx(&oi->oi_fid)); - oi->oi_fid.f_oid = oid; - oi->oi_fid.f_ver = oid >> 48; - } else { - if (oid >= OBIF_MAX_OID) - return -E2BIG; - oi->oi_fid.f_oid = oid; - } - return 0; -} - -/* pack any OST FID into an ostid (id/seq) for the wire/disk */ -static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid) -{ - int rc = 0; - - if (fid_seq_is_igif(fid->f_seq)) - return -EBADF; - - if (fid_is_idif(fid)) { - u64 objid = fid_idif_id(fid_seq(fid), fid_oid(fid), - fid_ver(fid)); - - ostid_set_seq_mdt0(ostid); - rc = ostid_set_id(ostid, objid); - } else { - ostid->oi_fid = *fid; - } - - return rc; -} - -/* The same as osc_build_res_name() */ -static inline void ost_fid_build_resid(const struct lu_fid *fid, - struct ldlm_res_id *resname) -{ - if (fid_is_mdt0(fid) || fid_is_idif(fid)) { - struct ost_id oi; - - oi.oi.oi_id = 0; /* gcc 4.7.2 complains otherwise */ - if (fid_to_ostid(fid, &oi) != 0) - return; - ostid_build_res_name(&oi, resname); - } else { - fid_build_reg_res_name(fid, resname); - } -} - -/** - * Flatten 128-bit FID values into a 64-bit value for use as an inode number. - * For non-IGIF FIDs this starts just over 2^32, and continues without - * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ - * into the range where there may not be many OID values in use, to minimize - * the risk of conflict. - * - * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true, - * the time between re-used inode numbers is very long - 2^40 SEQ numbers, - * or about 2^40 client mounts, if clients create less than 2^24 files/mount. - */ -static inline __u64 fid_flatten(const struct lu_fid *fid) -{ - __u64 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid); - - ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); - - return ino ? ino : fid_oid(fid); -} - -static inline __u32 fid_hash(const struct lu_fid *f, int bits) -{ - /* all objects with same id and different versions will belong to same - * collisions list. - */ - return hash_long(fid_flatten(f), bits); -} - -/** - * map fid to 32 bit value for ino on 32bit systems. - */ -static inline __u32 fid_flatten32(const struct lu_fid *fid) -{ - __u32 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid) - FID_SEQ_START; - - /* Map the high bits of the OID into higher bits of the inode number so - * that inodes generated at about the same time have a reduced chance - * of collisions. This will give a period of 2^12 = 1024 unique clients - * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. - */ - ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + - (seq >> (64 - (40 - 8)) & 0xffffff00) + - (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); - - return ino ? ino : fid_oid(fid); -} - -static inline int lu_fid_diff(const struct lu_fid *fid1, - const struct lu_fid *fid2) -{ - LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:" DFID ", fid2:" DFID "\n", - PFID(fid1), PFID(fid2)); - - if (fid_is_idif(fid1) && fid_is_idif(fid2)) - return fid_idif_id(fid1->f_seq, fid1->f_oid, fid1->f_ver) - - fid_idif_id(fid2->f_seq, fid2->f_oid, fid2->f_ver); - - return fid_oid(fid1) - fid_oid(fid2); -} - -#define LUSTRE_SEQ_SRV_NAME "seq_srv" -#define LUSTRE_SEQ_CTL_NAME "seq_ctl" - -/* Range common stuff */ -static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src) -{ - dst->lsr_start = cpu_to_le64(src->lsr_start); - dst->lsr_end = cpu_to_le64(src->lsr_end); - dst->lsr_index = cpu_to_le32(src->lsr_index); - dst->lsr_flags = cpu_to_le32(src->lsr_flags); -} - -static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) -{ - dst->lsr_start = le64_to_cpu(src->lsr_start); - dst->lsr_end = le64_to_cpu(src->lsr_end); - dst->lsr_index = le32_to_cpu(src->lsr_index); - dst->lsr_flags = le32_to_cpu(src->lsr_flags); -} - -static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src) -{ - dst->lsr_start = cpu_to_be64(src->lsr_start); - dst->lsr_end = cpu_to_be64(src->lsr_end); - dst->lsr_index = cpu_to_be32(src->lsr_index); - dst->lsr_flags = cpu_to_be32(src->lsr_flags); -} - -static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) -{ - dst->lsr_start = be64_to_cpu(src->lsr_start); - dst->lsr_end = be64_to_cpu(src->lsr_end); - dst->lsr_index = be32_to_cpu(src->lsr_index); - dst->lsr_flags = be32_to_cpu(src->lsr_flags); -} - -/** @} fid */ - -#endif /* __LUSTRE_FID_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h deleted file mode 100644 index f42122a4dfaa..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_fld.h +++ /dev/null @@ -1,137 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LINUX_FLD_H -#define __LINUX_FLD_H - -/** \defgroup fld fld - * - * @{ - */ - -#include -#include - -struct lu_client_fld; -struct lu_server_fld; -struct lu_fld_hash; -struct fld_cache; - -extern const struct dt_index_features fld_index_features; -extern const char fld_index_name[]; - -/* - * FLD (Fid Location Database) interface. - */ -enum { - LUSTRE_CLI_FLD_HASH_DHT = 0, - LUSTRE_CLI_FLD_HASH_RRB -}; - -struct lu_fld_target { - struct list_head ft_chain; - struct obd_export *ft_exp; - struct lu_server_fld *ft_srv; - __u64 ft_idx; -}; - -struct lu_server_fld { - /** - * super sequence controller export, needed to forward fld - * lookup request. - */ - struct obd_export *lsf_control_exp; - - /** Client FLD cache. */ - struct fld_cache *lsf_cache; - - /** Protect index modifications */ - struct mutex lsf_lock; - - /** Fld service name in form "fld-srv-lustre-MDTXXX" */ - char lsf_name[LUSTRE_MDT_MAXNAMELEN]; - -}; - -struct lu_client_fld { - /** Client side debugfs entry. */ - struct dentry *lcf_debugfs_entry; - - /** List of exports client FLD knows about. */ - struct list_head lcf_targets; - - /** Current hash to be used to chose an export. */ - struct lu_fld_hash *lcf_hash; - - /** Exports count. */ - int lcf_count; - - /** Lock protecting exports list and fld_hash. */ - spinlock_t lcf_lock; - - /** Client FLD cache. */ - struct fld_cache *lcf_cache; - - /** Client fld debugfs entry name. */ - char lcf_name[LUSTRE_MDT_MAXNAMELEN]; -}; - -/* Client methods */ -int fld_client_init(struct lu_client_fld *fld, - const char *prefix, int hash); - -void fld_client_fini(struct lu_client_fld *fld); - -void fld_client_flush(struct lu_client_fld *fld); - -int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, - __u32 flags, const struct lu_env *env); - -int fld_client_create(struct lu_client_fld *fld, - struct lu_seq_range *range, - const struct lu_env *env); - -int fld_client_delete(struct lu_client_fld *fld, u64 seq, - const struct lu_env *env); - -int fld_client_add_target(struct lu_client_fld *fld, - struct lu_fld_target *tar); - -int fld_client_del_target(struct lu_client_fld *fld, - __u64 idx); - -void fld_client_debugfs_fini(struct lu_client_fld *fld); - -/** @} fld */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h deleted file mode 100644 index cbd68985ada9..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_ha.h +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _LUSTRE_HA_H -#define _LUSTRE_HA_H - -/** \defgroup ha ha - * - * @{ - */ - -struct obd_import; -struct obd_export; -struct obd_device; -struct ptlrpc_request; - -int ptlrpc_replay(struct obd_import *imp); -int ptlrpc_resend(struct obd_import *imp); -void ptlrpc_free_committed(struct obd_import *imp); -void ptlrpc_wake_delayed(struct obd_import *imp); -int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async); -int ptlrpc_set_import_active(struct obd_import *imp, int active); -void ptlrpc_activate_import(struct obd_import *imp); -void ptlrpc_deactivate_import(struct obd_import *imp); -void ptlrpc_invalidate_import(struct obd_import *imp); -void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt); -void ptlrpc_pinger_force(struct obd_import *imp); - -/** @} ha */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h deleted file mode 100644 index 3556ce8d94e8..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_handles.h +++ /dev/null @@ -1,91 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LUSTRE_HANDLES_H_ -#define __LUSTRE_HANDLES_H_ - -/** \defgroup handles handles - * - * @{ - */ - -#include -#include -#include -#include -#include - -struct portals_handle_ops { - void (*hop_addref)(void *object); - void (*hop_free)(void *object, int size); -}; - -/* These handles are most easily used by having them appear at the very top of - * whatever object that you want to make handles for. ie: - * - * struct ldlm_lock { - * struct portals_handle handle; - * ... - * }; - * - * Now you're able to assign the results of cookie2handle directly to an - * ldlm_lock. If it's not at the top, you'll want to use container_of() - * to compute the start of the structure based on the handle field. - */ -struct portals_handle { - struct list_head h_link; - __u64 h_cookie; - const void *h_owner; - struct portals_handle_ops *h_ops; - - /* newly added fields to handle the RCU issue. -jxiong */ - struct rcu_head h_rcu; - spinlock_t h_lock; - unsigned int h_size:31; - unsigned int h_in:1; -}; - -/* handles.c */ - -/* Add a handle to the hash table */ -void class_handle_hash(struct portals_handle *, - struct portals_handle_ops *ops); -void class_handle_unhash(struct portals_handle *); -void *class_handle2object(__u64 cookie, const void *owner); -void class_handle_free_cb(struct rcu_head *rcu); -int class_handle_init(void); -void class_handle_cleanup(void); - -/** @} handles */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h deleted file mode 100644 index ac3805ead620..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_import.h +++ /dev/null @@ -1,369 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -/** \defgroup obd_import PtlRPC import definitions - * Imports are client-side representation of remote obd target. - * - * @{ - */ - -#ifndef __IMPORT_H -#define __IMPORT_H - -/** \defgroup export export - * - * @{ - */ - -#include -#include -#include - -/** - * Adaptive Timeout stuff - * - * @{ - */ -#define D_ADAPTTO D_OTHER -#define AT_BINS 4 /* "bin" means "N seconds of history" */ -#define AT_FLG_NOHIST 0x1 /* use last reported value only */ - -struct adaptive_timeout { - time64_t at_binstart; /* bin start time */ - unsigned int at_hist[AT_BINS]; /* timeout history bins */ - unsigned int at_flags; - unsigned int at_current; /* current timeout value */ - unsigned int at_worst_ever; /* worst-ever timeout value */ - time64_t at_worst_time; /* worst-ever timeout timestamp */ - spinlock_t at_lock; -}; - -struct ptlrpc_at_array { - struct list_head *paa_reqs_array; /** array to hold requests */ - __u32 paa_size; /** the size of array */ - __u32 paa_count; /** the total count of reqs */ - time64_t paa_deadline; /** the earliest deadline of reqs */ - __u32 *paa_reqs_count; /** the count of reqs in each entry */ -}; - -#define IMP_AT_MAX_PORTALS 8 -struct imp_at { - int iat_portal[IMP_AT_MAX_PORTALS]; - struct adaptive_timeout iat_net_latency; - struct adaptive_timeout iat_service_estimate[IMP_AT_MAX_PORTALS]; -}; - -/** @} */ - -/** Possible import states */ -enum lustre_imp_state { - LUSTRE_IMP_CLOSED = 1, - LUSTRE_IMP_NEW = 2, - LUSTRE_IMP_DISCON = 3, - LUSTRE_IMP_CONNECTING = 4, - LUSTRE_IMP_REPLAY = 5, - LUSTRE_IMP_REPLAY_LOCKS = 6, - LUSTRE_IMP_REPLAY_WAIT = 7, - LUSTRE_IMP_RECOVER = 8, - LUSTRE_IMP_FULL = 9, - LUSTRE_IMP_EVICTED = 10, -}; - -/** Returns test string representation of numeric import state \a state */ -static inline char *ptlrpc_import_state_name(enum lustre_imp_state state) -{ - static char *import_state_names[] = { - "", "CLOSED", "NEW", "DISCONN", - "CONNECTING", "REPLAY", "REPLAY_LOCKS", "REPLAY_WAIT", - "RECOVER", "FULL", "EVICTED", - }; - - LASSERT(state <= LUSTRE_IMP_EVICTED); - return import_state_names[state]; -} - -/** - * List of import event types - */ -enum obd_import_event { - IMP_EVENT_DISCON = 0x808001, - IMP_EVENT_INACTIVE = 0x808002, - IMP_EVENT_INVALIDATE = 0x808003, - IMP_EVENT_ACTIVE = 0x808004, - IMP_EVENT_OCD = 0x808005, - IMP_EVENT_DEACTIVATE = 0x808006, - IMP_EVENT_ACTIVATE = 0x808007, -}; - -/** - * Definition of import connection structure - */ -struct obd_import_conn { - /** Item for linking connections together */ - struct list_head oic_item; - /** Pointer to actual PortalRPC connection */ - struct ptlrpc_connection *oic_conn; - /** uuid of remote side */ - struct obd_uuid oic_uuid; - /** - * Time (64 bit jiffies) of last connection attempt on this connection - */ - __u64 oic_last_attempt; -}; - -/* state history */ -#define IMP_STATE_HIST_LEN 16 -struct import_state_hist { - enum lustre_imp_state ish_state; - time64_t ish_time; -}; - -/** - * Definition of PortalRPC import structure. - * Imports are representing client-side view to remote target. - */ -struct obd_import { - /** Local handle (== id) for this import. */ - struct portals_handle imp_handle; - /** Reference counter */ - atomic_t imp_refcount; - struct lustre_handle imp_dlm_handle; /* client's ldlm export */ - /** Currently active connection */ - struct ptlrpc_connection *imp_connection; - /** PortalRPC client structure for this import */ - struct ptlrpc_client *imp_client; - /** List element for linking into pinger chain */ - struct list_head imp_pinger_chain; - /** work struct for destruction of import */ - struct work_struct imp_zombie_work; - - /** - * Lists of requests that are retained for replay, waiting for a reply, - * or waiting for recovery to complete, respectively. - * @{ - */ - struct list_head imp_replay_list; - struct list_head imp_sending_list; - struct list_head imp_delayed_list; - /** @} */ - - /** - * List of requests that are retained for committed open replay. Once - * open is committed, open replay request will be moved from the - * imp_replay_list into the imp_committed_list. - * The imp_replay_cursor is for accelerating searching during replay. - * @{ - */ - struct list_head imp_committed_list; - struct list_head *imp_replay_cursor; - /** @} */ - - /** List of not replied requests */ - struct list_head imp_unreplied_list; - /** Known maximal replied XID */ - __u64 imp_known_replied_xid; - - /** obd device for this import */ - struct obd_device *imp_obd; - - /** - * some seciruty-related fields - * @{ - */ - struct ptlrpc_sec *imp_sec; - struct mutex imp_sec_mutex; - time64_t imp_sec_expire; - /** @} */ - - /** Wait queue for those who need to wait for recovery completion */ - wait_queue_head_t imp_recovery_waitq; - - /** Number of requests currently in-flight */ - atomic_t imp_inflight; - /** Number of requests currently unregistering */ - atomic_t imp_unregistering; - /** Number of replay requests inflight */ - atomic_t imp_replay_inflight; - /** Number of currently happening import invalidations */ - atomic_t imp_inval_count; - /** Numbner of request timeouts */ - atomic_t imp_timeouts; - /** Current import state */ - enum lustre_imp_state imp_state; - /** Last replay state */ - enum lustre_imp_state imp_replay_state; - /** History of import states */ - struct import_state_hist imp_state_hist[IMP_STATE_HIST_LEN]; - int imp_state_hist_idx; - /** Current import generation. Incremented on every reconnect */ - int imp_generation; - /** Incremented every time we send reconnection request */ - __u32 imp_conn_cnt; - /** - * \see ptlrpc_free_committed remembers imp_generation value here - * after a check to save on unnecessary replay list iterations - */ - int imp_last_generation_checked; - /** Last transno we replayed */ - __u64 imp_last_replay_transno; - /** Last transno committed on remote side */ - __u64 imp_peer_committed_transno; - /** - * \see ptlrpc_free_committed remembers last_transno since its last - * check here and if last_transno did not change since last run of - * ptlrpc_free_committed and import generation is the same, we can - * skip looking for requests to remove from replay list as optimisation - */ - __u64 imp_last_transno_checked; - /** - * Remote export handle. This is how remote side knows what export - * we are talking to. Filled from response to connect request - */ - struct lustre_handle imp_remote_handle; - /** When to perform next ping. time in jiffies. */ - unsigned long imp_next_ping; - /** When we last successfully connected. time in 64bit jiffies */ - __u64 imp_last_success_conn; - - /** List of all possible connection for import. */ - struct list_head imp_conn_list; - /** - * Current connection. \a imp_connection is imp_conn_current->oic_conn - */ - struct obd_import_conn *imp_conn_current; - - /** Protects flags, level, generation, conn_cnt, *_list */ - spinlock_t imp_lock; - - /* flags */ - unsigned long imp_no_timeout:1, /* timeouts are disabled */ - imp_invalid:1, /* evicted */ - /* administratively disabled */ - imp_deactive:1, - /* try to recover the import */ - imp_replayable:1, - /* don't run recovery (timeout instead) */ - imp_dlm_fake:1, - /* use 1/2 timeout on MDS' OSCs */ - imp_server_timeout:1, - /* VBR: imp in delayed recovery */ - imp_delayed_recovery:1, - /* VBR: if gap was found then no lock replays - */ - imp_no_lock_replay:1, - /* recovery by versions was failed */ - imp_vbr_failed:1, - /* force an immediate ping */ - imp_force_verify:1, - /* force a scheduled ping */ - imp_force_next_verify:1, - /* pingable */ - imp_pingable:1, - /* resend for replay */ - imp_resend_replay:1, - /* disable normal recovery, for test only. */ - imp_no_pinger_recover:1, -#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE - /* need IR MNE swab */ - imp_need_mne_swab:1, -#endif - /* import must be reconnected instead of - * chosing new connection - */ - imp_force_reconnect:1, - /* import has tried to connect with server */ - imp_connect_tried:1, - /* connected but not FULL yet */ - imp_connected:1; - __u32 imp_connect_op; - struct obd_connect_data imp_connect_data; - __u64 imp_connect_flags_orig; - int imp_connect_error; - - __u32 imp_msg_magic; - __u32 imp_msghdr_flags; /* adjusted based on server capability */ - - struct imp_at imp_at; /* adaptive timeout data */ - time64_t imp_last_reply_time; /* for health check */ -}; - -/* import.c */ -static inline unsigned int at_est2timeout(unsigned int val) -{ - /* add an arbitrary minimum: 125% +5 sec */ - return (val + (val >> 2) + 5); -} - -static inline unsigned int at_timeout2est(unsigned int val) -{ - /* restore estimate value from timeout: e=4/5(t-5) */ - LASSERT(val); - return (max((val << 2) / 5, 5U) - 4); -} - -static inline void at_reset(struct adaptive_timeout *at, int val) -{ - spin_lock(&at->at_lock); - at->at_current = val; - at->at_worst_ever = val; - at->at_worst_time = ktime_get_real_seconds(); - spin_unlock(&at->at_lock); -} - -static inline void at_init(struct adaptive_timeout *at, int val, int flags) -{ - memset(at, 0, sizeof(*at)); - spin_lock_init(&at->at_lock); - at->at_flags = flags; - at_reset(at, val); -} - -extern unsigned int at_min; -static inline int at_get(struct adaptive_timeout *at) -{ - return (at->at_current > at_min) ? at->at_current : at_min; -} - -int at_measured(struct adaptive_timeout *at, unsigned int val); -int import_at_get_index(struct obd_import *imp, int portal); -extern unsigned int at_max; -#define AT_OFF (at_max == 0) - -/* genops.c */ -struct obd_export; -struct obd_import *class_exp2cliimp(struct obd_export *); - -/** @} import */ - -#endif /* __IMPORT_H */ - -/** @} obd_import */ diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h deleted file mode 100644 index 51e5c0e03872..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_intent.h +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef LUSTRE_INTENT_H -#define LUSTRE_INTENT_H - -#include - -/* intent IT_XXX are defined in lustre/include/obd.h */ - -struct lookup_intent { - int it_op; - int it_create_mode; - __u64 it_flags; - int it_disposition; - int it_status; - __u64 it_lock_handle; - __u64 it_lock_bits; - int it_lock_mode; - int it_remote_lock_mode; - __u64 it_remote_lock_handle; - struct ptlrpc_request *it_request; - unsigned int it_lock_set:1; -}; - -static inline int it_disposition(struct lookup_intent *it, int flag) -{ - return it->it_disposition & flag; -} - -static inline void it_set_disposition(struct lookup_intent *it, int flag) -{ - it->it_disposition |= flag; -} - -static inline void it_clear_disposition(struct lookup_intent *it, int flag) -{ - it->it_disposition &= ~flag; -} - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h deleted file mode 100644 index 2b3fa8430185..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2013 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * - * Author: Nathan Rutman - * - * Kernel <-> userspace communication routines. - * The definitions below are used in the kernel and userspace. - */ - -#ifndef __LUSTRE_KERNELCOMM_H__ -#define __LUSTRE_KERNELCOMM_H__ - -/* For declarations shared with userspace */ -#include - -/* prototype for callback function on kuc groups */ -typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg); - -/* Kernel methods */ -int libcfs_kkuc_msg_put(struct file *fp, void *payload); -int libcfs_kkuc_group_put(unsigned int group, void *payload); -int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, - void *data, size_t data_len); -int libcfs_kkuc_group_rem(int uid, unsigned int group); -int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, - void *cb_arg); - -#endif /* __LUSTRE_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h deleted file mode 100644 index 87748e9902a7..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ /dev/null @@ -1,126 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_lib.h - * - * Basic Lustre library routines. - */ - -#ifndef _LUSTRE_LIB_H -#define _LUSTRE_LIB_H - -/** \defgroup lib lib - * - * @{ - */ - -#include -#include -#include -#include -#include -#include - -/* target.c */ -struct ptlrpc_request; -struct obd_export; -struct lu_target; -struct l_wait_info; -#include -#include - -#define LI_POISON 0x5a5a5a5a -#if BITS_PER_LONG > 32 -# define LL_POISON 0x5a5a5a5a5a5a5a5aL -#else -# define LL_POISON 0x5a5a5a5aL -#endif -#define LP_POISON ((void *)LL_POISON) - -int target_pack_pool_reply(struct ptlrpc_request *req); -int do_set_info_async(struct obd_import *imp, - int opcode, int version, - u32 keylen, void *key, - u32 vallen, void *val, - struct ptlrpc_request_set *set); - -void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id); - -#define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \ - sigmask(SIGTERM) | sigmask(SIGQUIT) | \ - sigmask(SIGALRM)) -static inline int l_fatal_signal_pending(struct task_struct *p) -{ - return signal_pending(p) && sigtestsetmask(&p->pending.signal, LUSTRE_FATAL_SIGS); -} - -/** @} lib */ - - - -/* l_wait_event_abortable() is a bit like wait_event_killable() - * except there is a fixed set of signals which will abort: - * LUSTRE_FATAL_SIGS - */ -#define l_wait_event_abortable(wq, condition) \ -({ \ - sigset_t __new_blocked, __old_blocked; \ - int __ret = 0; \ - siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \ - sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \ - __ret = wait_event_interruptible(wq, condition); \ - sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \ - __ret; \ -}) - -#define l_wait_event_abortable_timeout(wq, condition, timeout) \ -({ \ - sigset_t __new_blocked, __old_blocked; \ - int __ret = 0; \ - siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \ - sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \ - __ret = wait_event_interruptible_timeout(wq, condition, timeout);\ - sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \ - __ret; \ -}) - -#define l_wait_event_abortable_exclusive(wq, condition) \ -({ \ - sigset_t __new_blocked, __old_blocked; \ - int __ret = 0; \ - siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \ - sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \ - __ret = wait_event_interruptible_exclusive(wq, condition); \ - sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \ - __ret; \ -}) -#endif /* _LUSTRE_LIB_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h deleted file mode 100644 index 03db1511bfd3..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_linkea.h +++ /dev/null @@ -1,93 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2013, 2014, Intel Corporation. - * Use is subject to license terms. - * - * Author: di wang - */ - -/* There are several reasons to restrict the linkEA size: - * - * 1. Under DNE mode, if we do not restrict the linkEA size, and if there - * are too many cross-MDTs hard links to the same object, then it will - * casue the llog overflow. - * - * 2. Some backend has limited size for EA. For example, if without large - * EA enabled, the ldiskfs will make all EAs to share one (4K) EA block. - * - * 3. Too many entries in linkEA will seriously affect linkEA performance - * because we only support to locate linkEA entry consecutively. - */ -#define MAX_LINKEA_SIZE 4096 - -struct linkea_data { - /** - * Buffer to keep link EA body. - */ - struct lu_buf *ld_buf; - /** - * The matched header, entry and its length in the EA - */ - struct link_ea_header *ld_leh; - struct link_ea_entry *ld_lee; - int ld_reclen; -}; - -int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf); -int linkea_init(struct linkea_data *ldata); -int linkea_init_with_rec(struct linkea_data *ldata); -void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen, - struct lu_name *lname, struct lu_fid *pfid); -int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname, - const struct lu_fid *pfid); -int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname, - const struct lu_fid *pfid); -void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname); -int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname, - const struct lu_fid *pfid); - -static inline void linkea_first_entry(struct linkea_data *ldata) -{ - LASSERT(ldata); - LASSERT(ldata->ld_leh); - - if (ldata->ld_leh->leh_reccount == 0) - ldata->ld_lee = NULL; - else - ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1); -} - -static inline void linkea_next_entry(struct linkea_data *ldata) -{ - LASSERT(ldata); - LASSERT(ldata->ld_leh); - - if (ldata->ld_lee) { - ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee + - ldata->ld_reclen); - if ((char *)ldata->ld_lee >= ((char *)ldata->ld_leh + - ldata->ld_leh->leh_len)) - ldata->ld_lee = NULL; - } -} diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h deleted file mode 100644 index 080ec1f8e19f..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_lmv.h +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. A copy is - * included in the COPYING file that accompanied this code. - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * GPL HEADER END - */ -/* - * Copyright (c) 2013, Intel Corporation. - */ -/* - * lustre/include/lustre_lmv.h - * - * Lustre LMV structures and functions. - * - * Author: Di Wang - */ - -#ifndef _LUSTRE_LMV_H -#define _LUSTRE_LMV_H -#include - -struct lmv_oinfo { - struct lu_fid lmo_fid; - u32 lmo_mds; - struct inode *lmo_root; -}; - -struct lmv_stripe_md { - __u32 lsm_md_magic; - __u32 lsm_md_stripe_count; - __u32 lsm_md_master_mdt_index; - __u32 lsm_md_hash_type; - __u32 lsm_md_layout_version; - __u32 lsm_md_default_count; - __u32 lsm_md_default_index; - char lsm_md_pool_name[LOV_MAXPOOLNAME + 1]; - struct lmv_oinfo lsm_md_oinfo[0]; -}; - -static inline bool -lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2) -{ - __u32 idx; - - if (lsm1->lsm_md_magic != lsm2->lsm_md_magic || - lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count || - lsm1->lsm_md_master_mdt_index != lsm2->lsm_md_master_mdt_index || - lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type || - lsm1->lsm_md_layout_version != lsm2->lsm_md_layout_version || - strcmp(lsm1->lsm_md_pool_name, lsm2->lsm_md_pool_name) != 0) - return false; - - for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) { - if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid, - &lsm2->lsm_md_oinfo[idx].lmo_fid)) - return false; - } - - return true; -} - -union lmv_mds_md; - -void lmv_free_memmd(struct lmv_stripe_md *lsm); - -static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst, - const struct lmv_mds_md_v1 *lmv_src) -{ - __u32 i; - - lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic); - lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count); - lmv_dst->lmv_master_mdt_index = - le32_to_cpu(lmv_src->lmv_master_mdt_index); - lmv_dst->lmv_hash_type = le32_to_cpu(lmv_src->lmv_hash_type); - lmv_dst->lmv_layout_version = le32_to_cpu(lmv_src->lmv_layout_version); - - for (i = 0; i < lmv_src->lmv_stripe_count; i++) - fid_le_to_cpu(&lmv_dst->lmv_stripe_fids[i], - &lmv_src->lmv_stripe_fids[i]); -} - -static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst, - const union lmv_mds_md *lmv_src) -{ - switch (le32_to_cpu(lmv_src->lmv_magic)) { - case LMV_MAGIC_V1: - lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1); - break; - default: - break; - } -} - -/* This hash is only for testing purpose */ -static inline unsigned int -lmv_hash_all_chars(unsigned int count, const char *name, int namelen) -{ - const unsigned char *p = (const unsigned char *)name; - unsigned int c = 0; - - while (--namelen >= 0) - c += p[namelen]; - - c = c % count; - - return c; -} - -static inline unsigned int -lmv_hash_fnv1a(unsigned int count, const char *name, int namelen) -{ - __u64 hash; - - hash = lustre_hash_fnv_1a_64(name, namelen); - - return do_div(hash, count); -} - -static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type, - unsigned int stripe_count, - const char *name, int namelen) -{ - __u32 hash_type = lmv_hash_type & LMV_HASH_TYPE_MASK; - int idx; - - LASSERT(namelen > 0); - if (stripe_count <= 1) - return 0; - - /* for migrating object, always start from 0 stripe */ - if (lmv_hash_type & LMV_HASH_FLAG_MIGRATION) - return 0; - - switch (hash_type) { - case LMV_HASH_TYPE_ALL_CHARS: - idx = lmv_hash_all_chars(stripe_count, name, namelen); - break; - case LMV_HASH_TYPE_FNV_1A_64: - idx = lmv_hash_fnv1a(stripe_count, name, namelen); - break; - default: - idx = -EBADFD; - break; - } - CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name, - hash_type, idx); - - return idx; -} - -static inline bool lmv_is_known_hash_type(__u32 type) -{ - return (type & LMV_HASH_TYPE_MASK) == LMV_HASH_TYPE_FNV_1A_64 || - (type & LMV_HASH_TYPE_MASK) == LMV_HASH_TYPE_ALL_CHARS; -} - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h deleted file mode 100644 index 07f4e600386b..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_log.h +++ /dev/null @@ -1,382 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_log.h - * - * Generic infrastructure for managing a collection of logs. - * These logs are used for: - * - * - orphan recovery: OST adds record on create - * - mtime/size consistency: the OST adds a record on first write - * - open/unlinked objects: OST adds a record on destroy - * - * - mds unlink log: the MDS adds an entry upon delete - * - * - raid1 replication log between OST's - * - MDS replication logs - */ - -#ifndef _LUSTRE_LOG_H -#define _LUSTRE_LOG_H - -/** \defgroup log log - * - * @{ - */ - -#include -#include - -#define LOG_NAME_LIMIT(logname, name) \ - snprintf(logname, sizeof(logname), "LOGS/%s", name) -#define LLOG_EEMPTY 4711 - -enum llog_open_param { - LLOG_OPEN_EXISTS = 0x0000, - LLOG_OPEN_NEW = 0x0001, -}; - -struct plain_handle_data { - struct list_head phd_entry; - struct llog_handle *phd_cat_handle; - struct llog_cookie phd_cookie; /* cookie of this log in its cat */ -}; - -struct cat_handle_data { - struct list_head chd_head; - struct llog_handle *chd_current_log; /* currently open log */ - struct llog_handle *chd_next_log; /* llog to be used next */ -}; - -struct llog_handle; - -/* llog.c - general API */ -int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, - int flags, struct obd_uuid *uuid); -int llog_process(const struct lu_env *env, struct llog_handle *loghandle, - llog_cb_t cb, void *data, void *catdata); -int llog_process_or_fork(const struct lu_env *env, - struct llog_handle *loghandle, - llog_cb_t cb, void *data, void *catdata, bool fork); -int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, - struct llog_handle **lgh, struct llog_logid *logid, - char *name, enum llog_open_param open_param); -int llog_close(const struct lu_env *env, struct llog_handle *cathandle); - -/* llog_process flags */ -#define LLOG_FLAG_NODEAMON 0x0001 - -/* llog_cat.c - catalog api */ -struct llog_process_data { - /** - * Any useful data needed while processing catalog. This is - * passed later to process callback. - */ - void *lpd_data; - /** - * Catalog process callback function, called for each record - * in catalog. - */ - llog_cb_t lpd_cb; - /** - * Start processing the catalog from startcat/startidx - */ - int lpd_startcat; - int lpd_startidx; -}; - -struct llog_process_cat_data { - /** - * Temporary stored first_idx while scanning log. - */ - int lpcd_first_idx; - /** - * Temporary stored last_idx while scanning log. - */ - int lpcd_last_idx; -}; - -struct thandle; - -int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle); -int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh, - llog_cb_t cb, void *data, int startcat, int startidx); - -/* llog_obd.c */ -int llog_setup(const struct lu_env *env, struct obd_device *obd, - struct obd_llog_group *olg, int index, - struct obd_device *disk_obd, struct llog_operations *op); -int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt); -int llog_cleanup(const struct lu_env *env, struct llog_ctxt *); - -/* llog_net.c */ -int llog_initiator_connect(struct llog_ctxt *ctxt); - -struct llog_operations { - int (*lop_next_block)(const struct lu_env *env, struct llog_handle *h, - int *curr_idx, int next_idx, __u64 *offset, - void *buf, int len); - int (*lop_prev_block)(const struct lu_env *env, struct llog_handle *h, - int prev_idx, void *buf, int len); - int (*lop_read_header)(const struct lu_env *env, - struct llog_handle *handle); - int (*lop_setup)(const struct lu_env *env, struct obd_device *obd, - struct obd_llog_group *olg, int ctxt_idx, - struct obd_device *disk_obd); - int (*lop_sync)(struct llog_ctxt *ctxt, struct obd_export *exp, - int flags); - int (*lop_cleanup)(const struct lu_env *env, struct llog_ctxt *ctxt); - int (*lop_cancel)(const struct lu_env *env, struct llog_ctxt *ctxt, - struct llog_cookie *cookies, int flags); - int (*lop_connect)(struct llog_ctxt *ctxt, struct llog_logid *logid, - struct llog_gen *gen, struct obd_uuid *uuid); - /** - * Any llog file must be opened first using llog_open(). Llog can be - * opened by name, logid or without both, in last case the new logid - * will be generated. - */ - int (*lop_open)(const struct lu_env *env, struct llog_handle *lgh, - struct llog_logid *logid, char *name, - enum llog_open_param); - /** - * Opened llog may not exist and this must be checked where needed using - * the llog_exist() call. - */ - int (*lop_exist)(struct llog_handle *lgh); - /** - * Close llog file and calls llog_free_handle() implicitly. - * Any opened llog must be closed by llog_close() call. - */ - int (*lop_close)(const struct lu_env *env, struct llog_handle *handle); - /** - * Create new llog file. The llog must be opened. - * Must be used only for local llog operations. - */ - int (*lop_declare_create)(const struct lu_env *env, - struct llog_handle *handle, - struct thandle *th); - /** - * write new record in llog. It appends records usually but can edit - * existing records too. - */ - int (*lop_declare_write_rec)(const struct lu_env *env, - struct llog_handle *lgh, - struct llog_rec_hdr *rec, - int idx, struct thandle *th); - int (*lop_write_rec)(const struct lu_env *env, - struct llog_handle *loghandle, - struct llog_rec_hdr *rec, - struct llog_cookie *cookie, int cookiecount, - void *buf, int idx, struct thandle *th); - /** - * Add new record in llog catalog. Does the same as llog_write_rec() - * but using llog catalog. - */ - int (*lop_declare_add)(const struct lu_env *env, - struct llog_handle *lgh, - struct llog_rec_hdr *rec, struct thandle *th); - int (*lop_add)(const struct lu_env *env, struct llog_handle *lgh, - struct llog_rec_hdr *rec, struct llog_cookie *cookie, - void *buf, struct thandle *th); -}; - -/* In-memory descriptor for a log object or log catalog */ -struct llog_handle { - struct rw_semaphore lgh_lock; - spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */ - struct llog_logid lgh_id; /* id of this log */ - struct llog_log_hdr *lgh_hdr; - size_t lgh_hdr_size; - int lgh_last_idx; - int lgh_cur_idx; /* used during llog_process */ - __u64 lgh_cur_offset; /* used during llog_process */ - struct llog_ctxt *lgh_ctxt; - union { - struct plain_handle_data phd; - struct cat_handle_data chd; - } u; - char *lgh_name; - void *private_data; - struct llog_operations *lgh_logops; - atomic_t lgh_refcount; -}; - -#define LLOG_CTXT_FLAG_UNINITIALIZED 0x00000001 -#define LLOG_CTXT_FLAG_STOP 0x00000002 - -struct llog_ctxt { - int loc_idx; /* my index the obd array of ctxt's */ - struct obd_device *loc_obd; /* points back to the containing obd*/ - struct obd_llog_group *loc_olg; /* group containing that ctxt */ - struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */ - struct obd_import *loc_imp; /* to use in RPC's: can be backward - * pointing import - */ - struct llog_operations *loc_logops; - struct llog_handle *loc_handle; - struct mutex loc_mutex; /* protect loc_imp */ - atomic_t loc_refcount; - long loc_flags; /* flags, see above defines */ - /* - * llog chunk size, and llog record size can not be bigger than - * loc_chunk_size - */ - __u32 loc_chunk_size; -}; - -#define LLOG_PROC_BREAK 0x0001 -#define LLOG_DEL_RECORD 0x0002 - -static inline int llog_handle2ops(struct llog_handle *loghandle, - struct llog_operations **lop) -{ - if (!loghandle || !loghandle->lgh_logops) - return -EINVAL; - - *lop = loghandle->lgh_logops; - return 0; -} - -static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) -{ - atomic_inc(&ctxt->loc_refcount); - CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt, - atomic_read(&ctxt->loc_refcount)); - return ctxt; -} - -static inline void llog_ctxt_put(struct llog_ctxt *ctxt) -{ - if (!ctxt) - return; - LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON); - CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt, - atomic_read(&ctxt->loc_refcount) - 1); - __llog_ctxt_put(NULL, ctxt); -} - -static inline void llog_group_init(struct obd_llog_group *olg) -{ - init_waitqueue_head(&olg->olg_waitq); - spin_lock_init(&olg->olg_lock); - mutex_init(&olg->olg_cat_processing); -} - -static inline int llog_group_set_ctxt(struct obd_llog_group *olg, - struct llog_ctxt *ctxt, int index) -{ - LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); - - spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index]) { - spin_unlock(&olg->olg_lock); - return -EEXIST; - } - olg->olg_ctxts[index] = ctxt; - spin_unlock(&olg->olg_lock); - return 0; -} - -static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg, - int index) -{ - struct llog_ctxt *ctxt; - - LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); - - spin_lock(&olg->olg_lock); - if (!olg->olg_ctxts[index]) - ctxt = NULL; - else - ctxt = llog_ctxt_get(olg->olg_ctxts[index]); - spin_unlock(&olg->olg_lock); - return ctxt; -} - -static inline void llog_group_clear_ctxt(struct obd_llog_group *olg, int index) -{ - LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); - spin_lock(&olg->olg_lock); - olg->olg_ctxts[index] = NULL; - spin_unlock(&olg->olg_lock); -} - -static inline struct llog_ctxt *llog_get_context(struct obd_device *obd, - int index) -{ - return llog_group_get_ctxt(&obd->obd_olg, index); -} - -static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index) -{ - return (!olg->olg_ctxts[index]); -} - -static inline int llog_ctxt_null(struct obd_device *obd, int index) -{ - return llog_group_ctxt_null(&obd->obd_olg, index); -} - -static inline int llog_next_block(const struct lu_env *env, - struct llog_handle *loghandle, int *cur_idx, - int next_idx, __u64 *cur_offset, void *buf, - int len) -{ - struct llog_operations *lop; - int rc; - - rc = llog_handle2ops(loghandle, &lop); - if (rc) - return rc; - if (!lop->lop_next_block) - return -EOPNOTSUPP; - - rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx, - cur_offset, buf, len); - return rc; -} - -/* llog.c */ -int llog_declare_write_rec(const struct lu_env *env, - struct llog_handle *handle, - struct llog_rec_hdr *rec, int idx, - struct thandle *th); -int llog_write_rec(const struct lu_env *env, struct llog_handle *handle, - struct llog_rec_hdr *rec, struct llog_cookie *logcookies, - int numcookies, void *buf, int idx, struct thandle *th); -int lustre_process_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg); -int lustre_end_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg); -/** @} log */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h deleted file mode 100644 index a9c9992a2502..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ /dev/null @@ -1,229 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_mdc.h - * - * MDS data structures. - * See also lustre_idl.h for wire formats of requests. - */ - -#ifndef _LUSTRE_MDC_H -#define _LUSTRE_MDC_H - -/** \defgroup mdc mdc - * - * @{ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct ptlrpc_client; -struct obd_export; -struct ptlrpc_request; -struct obd_device; - -/** - * Serializes in-flight MDT-modifying RPC requests to preserve idempotency. - * - * This mutex is used to implement execute-once semantics on the MDT. - * The MDT stores the last transaction ID and result for every client in - * its last_rcvd file. If the client doesn't get a reply, it can safely - * resend the request and the MDT will reconstruct the reply being aware - * that the request has already been executed. Without this lock, - * execution status of concurrent in-flight requests would be - * overwritten. - * - * This design limits the extent to which we can keep a full pipeline of - * in-flight requests from a single client. This limitation could be - * overcome by allowing multiple slots per client in the last_rcvd file. - */ -struct mdc_rpc_lock { - /** Lock protecting in-flight RPC concurrency. */ - struct mutex rpcl_mutex; - /** Intent associated with currently executing request. */ - struct lookup_intent *rpcl_it; - /** Used for MDS/RPC load testing purposes. */ - int rpcl_fakes; -}; - -#define MDC_FAKE_RPCL_IT ((void *)0x2c0012bfUL) - -static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck) -{ - mutex_init(&lck->rpcl_mutex); - lck->rpcl_it = NULL; -} - -static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, - struct lookup_intent *it) -{ - if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT || it->it_op == IT_READDIR)) - return; - - /* This would normally block until the existing request finishes. - * If fail_loc is set it will block until the regular request is - * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set - * it will only be cleared when all fake requests are finished. - * Only when all fake requests are finished can normal requests - * be sent, to ensure they are recoverable again. - */ - again: - mutex_lock(&lck->rpcl_mutex); - - if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) { - lck->rpcl_it = MDC_FAKE_RPCL_IT; - lck->rpcl_fakes++; - mutex_unlock(&lck->rpcl_mutex); - return; - } - - /* This will only happen when the CFS_FAIL_CHECK() was - * just turned off but there are still requests in progress. - * Wait until they finish. It doesn't need to be efficient - * in this extremely rare case, just have low overhead in - * the common case when it isn't true. - */ - while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) { - mutex_unlock(&lck->rpcl_mutex); - schedule_timeout(HZ / 4); - goto again; - } - - LASSERT(!lck->rpcl_it); - lck->rpcl_it = it; -} - -static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, - struct lookup_intent *it) -{ - if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT || it->it_op == IT_READDIR)) - return; - - if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */ - mutex_lock(&lck->rpcl_mutex); - - LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes); - lck->rpcl_fakes--; - - if (lck->rpcl_fakes == 0) - lck->rpcl_it = NULL; - - } else { - LASSERTF(it == lck->rpcl_it, "%p != %p\n", it, lck->rpcl_it); - lck->rpcl_it = NULL; - } - - mutex_unlock(&lck->rpcl_mutex); -} - -static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req, - struct lookup_intent *it) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - u32 opc; - u16 tag; - - opc = lustre_msg_get_opc(req->rq_reqmsg); - tag = obd_get_mod_rpc_slot(cli, opc, it); - lustre_msg_set_tag(req->rq_reqmsg, tag); -} - -static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req, - struct lookup_intent *it) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - u32 opc; - u16 tag; - - opc = lustre_msg_get_opc(req->rq_reqmsg); - tag = lustre_msg_get_tag(req->rq_reqmsg); - obd_put_mod_rpc_slot(cli, opc, it, tag); -} - -/** - * Update the maximum possible easize. - * - * This value is learned from ptlrpc replies sent by the MDT. The - * default easize is initialized to the minimum value but allowed - * to grow up to a single page in size if required to handle the - * common case. - * - * \see client_obd::cl_default_mds_easize - * - * \param[in] exp export for MDC device - * \param[in] body body of ptlrpc reply from MDT - * - */ -static inline void mdc_update_max_ea_from_body(struct obd_export *exp, - struct mdt_body *body) -{ - if (body->mbo_valid & OBD_MD_FLMODEASIZE) { - struct client_obd *cli = &exp->exp_obd->u.cli; - u32 def_easize; - - if (cli->cl_max_mds_easize < body->mbo_max_mdsize) - cli->cl_max_mds_easize = body->mbo_max_mdsize; - - def_easize = min_t(__u32, body->mbo_max_mdsize, - OBD_MAX_DEFAULT_EA_SIZE); - cli->cl_default_mds_easize = def_easize; - } -} - -/* mdc/mdc_locks.c */ -int it_open_error(int phase, struct lookup_intent *it); - -static inline bool cl_is_lov_delay_create(unsigned int flags) -{ - return (flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE; -} - -static inline void cl_lov_delay_create_clear(unsigned int *flags) -{ - if ((*flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE) - *flags &= ~O_LOV_DELAY_CREATE; -} - -/** @} mdc */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h deleted file mode 100644 index f665556556ec..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_mds.h +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_mds.h - * - * MDS data structures. - * See also lustre_idl.h for wire formats of requests. - */ - -#ifndef _LUSTRE_MDS_H -#define _LUSTRE_MDS_H - -/** \defgroup mds mds - * - * @{ - */ - -#include -#include -#include -#include - -struct mds_group_info { - struct obd_uuid *uuid; - int group; -}; - -#define MDD_OBD_NAME "mdd_obd" -#define MDD_OBD_UUID "mdd_obd_uuid" - -/** @} mds */ - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h deleted file mode 100644 index 35b43a77eb18..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ /dev/null @@ -1,2360 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -/** \defgroup PtlRPC Portal RPC and networking module. - * - * PortalRPC is the layer used by rest of lustre code to achieve network - * communications: establish connections with corresponding export and import - * states, listen for a service, send and receive RPCs. - * PortalRPC also includes base recovery framework: packet resending and - * replaying, reconnections, pinger. - * - * PortalRPC utilizes LNet as its transport layer. - * - * @{ - */ - -#ifndef _LUSTRE_NET_H -#define _LUSTRE_NET_H - -/** \defgroup net net - * - * @{ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -/* MD flags we _always_ use */ -#define PTLRPC_MD_OPTIONS 0 - -/** - * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ... - * In order for the client and server to properly negotiate the maximum - * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two - * value. The client is free to limit the actual RPC size for any bulk - * transfer via cl_max_pages_per_rpc to some non-power-of-two value. - * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS. - */ -#define PTLRPC_BULK_OPS_BITS 4 -#if PTLRPC_BULK_OPS_BITS > 16 -#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS." -#endif -#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) -/** - * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and - * should not be used on the server at all. Otherwise, it imposes a - * protocol limitation on the maximum RPC size that can be used by any - * RPC sent to that server in the future. Instead, the server should - * use the negotiated per-client ocd_brw_size to determine the bulk - * RPC count. - */ -#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) - -/** - * Define maxima for bulk I/O. - * - * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT - * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the - * currently supported maximum between peers at connect via ocd_brw_size. - */ -#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) -#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) - -#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) -#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) -#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) - -/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ -# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) -# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" -# endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" -# endif -# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) -# error "PTLRPC_MAX_BRW_SIZE too big" -# endif -# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT) -# error "PTLRPC_MAX_BRW_PAGES too big" -# endif - -#define PTLRPC_NTHRS_INIT 2 - -/** - * Buffer Constants - * - * Constants determine how memory is used to buffer incoming service requests. - * - * ?_NBUFS # buffers to allocate when growing the pool - * ?_BUFSIZE # bytes in a single request buffer - * ?_MAXREQSIZE # maximum request service will receive - * - * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk - * of ?_NBUFS is added to the pool. - * - * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are - * considered full when less than ?_MAXREQSIZE is left in them. - */ -/** - * Thread Constants - * - * Constants determine how threads are created for ptlrpc service. - * - * ?_NTHRS_INIT # threads to create for each service partition on - * initializing. If it's non-affinity service and - * there is only one partition, it's the overall # - * threads for the service while initializing. - * ?_NTHRS_BASE # threads should be created at least for each - * ptlrpc partition to keep the service healthy. - * It's the low-water mark of threads upper-limit - * for each partition. - * ?_THR_FACTOR # threads can be added on threads upper-limit for - * each CPU core. This factor is only for reference, - * we might decrease value of factor if number of cores - * per CPT is above a limit. - * ?_NTHRS_MAX # overall threads can be created for a service, - * it's a soft limit because if service is running - * on machine with hundreds of cores and tens of - * CPU partitions, we need to guarantee each partition - * has ?_NTHRS_BASE threads, which means total threads - * will be ?_NTHRS_BASE * number_of_cpts which can - * exceed ?_NTHRS_MAX. - * - * Examples - * - * #define MDS_NTHRS_INIT 2 - * #define MDS_NTHRS_BASE 64 - * #define MDS_NTHRS_FACTOR 8 - * #define MDS_NTHRS_MAX 1024 - * - * Example 1): - * --------------------------------------------------------------------- - * Server(A) has 16 cores, user configured it to 4 partitions so each - * partition has 4 cores, then actual number of service threads on each - * partition is: - * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96 - * - * Total number of threads for the service is: - * 96 * partitions(4) = 384 - * - * Example 2): - * --------------------------------------------------------------------- - * Server(B) has 32 cores, user configured it to 4 partitions so each - * partition has 8 cores, then actual number of service threads on each - * partition is: - * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128 - * - * Total number of threads for the service is: - * 128 * partitions(4) = 512 - * - * Example 3): - * --------------------------------------------------------------------- - * Server(B) has 96 cores, user configured it to 8 partitions so each - * partition has 12 cores, then actual number of service threads on each - * partition is: - * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160 - * - * Total number of threads for the service is: - * 160 * partitions(8) = 1280 - * - * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number - * as upper limit of threads number for each partition: - * MDS_NTHRS_MAX(1024) / partitions(8) = 128 - * - * Example 4): - * --------------------------------------------------------------------- - * Server(C) have a thousand of cores and user configured it to 32 partitions - * MDS_NTHRS_BASE(64) * 32 = 2048 - * - * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need - * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads - * to keep service healthy, so total number of threads will just be 2048. - * - * NB: we don't suggest to choose server with that many cores because backend - * filesystem itself, buffer cache, or underlying network stack might - * have some SMP scalability issues at that large scale. - * - * If user already has a fat machine with hundreds or thousands of cores, - * there are two choices for configuration: - * a) create CPU table from subset of all CPUs and run Lustre on - * top of this subset - * b) bind service threads on a few partitions, see modparameters of - * MDS and OSS for details -* - * NB: these calculations (and examples below) are simplified to help - * understanding, the real implementation is a little more complex, - * please see ptlrpc_server_nthreads_check() for details. - * - */ - - /* - * LDLM threads constants: - * - * Given 8 as factor and 24 as base threads number - * - * example 1) - * On 4-core machine we will have 24 + 8 * 4 = 56 threads. - * - * example 2) - * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56 - * threads for each partition and total threads number will be 112. - * - * example 3) - * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24) - * threads for each partition to keep service healthy, so total threads - * number should be 24 * 8 = 192. - * - * So with these constants, threads number will be at the similar level - * of old versions, unless target machine has over a hundred cores - */ -#define LDLM_THR_FACTOR 8 -#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT -#define LDLM_NTHRS_BASE 24 -#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128) - -#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT -#define LDLM_CLIENT_NBUFS 1 -#define LDLM_SERVER_NBUFS 64 -#define LDLM_BUFSIZE (8 * 1024) -#define LDLM_MAXREQSIZE (5 * 1024) -#define LDLM_MAXREPSIZE (1024) - -#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */ - -/** - * FIEMAP request can be 4K+ for now - */ -#define OST_MAXREQSIZE (16 * 1024) - -/* Macro to hide a typecast. */ -#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) - -struct ptlrpc_replay_async_args { - int praa_old_state; - int praa_old_status; -}; - -/** - * Structure to single define portal connection. - */ -struct ptlrpc_connection { - /** linkage for connections hash table */ - struct rhash_head c_hash; - /** Our own lnet nid for this connection */ - lnet_nid_t c_self; - /** Remote side nid for this connection */ - struct lnet_process_id c_peer; - /** UUID of the other side */ - struct obd_uuid c_remote_uuid; - /** reference counter for this connection */ - atomic_t c_refcount; -}; - -/** Client definition for PortalRPC */ -struct ptlrpc_client { - /** What lnet portal does this client send messages to by default */ - __u32 cli_request_portal; - /** What portal do we expect replies on */ - __u32 cli_reply_portal; - /** Name of the client */ - char *cli_name; -}; - -/** state flags of requests */ -/* XXX only ones left are those used by the bulk descs as well! */ -#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */ -#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */ - -#define REQ_MAX_ACK_LOCKS 8 - -union ptlrpc_async_args { - /** - * Scratchpad for passing args to completion interpreter. Users - * cast to the struct of their choosing, and BUILD_BUG_ON oversized - * arguments. For _tons_ of context, kmalloc a struct and store - * a pointer to it here. The pointer_arg ensures this struct is at - * least big enough for that. - */ - void *pointer_arg[11]; - __u64 space[7]; -}; - -struct ptlrpc_request_set; -typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int); -typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); - -/** - * Definition of request set structure. - * Request set is a list of requests (not necessary to the same target) that - * once populated with RPCs could be sent in parallel. - * There are two kinds of request sets. General purpose and with dedicated - * serving thread. Example of the latter is ptlrpcd set. - * For general purpose sets once request set started sending it is impossible - * to add new requests to such set. - * Provides a way to call "completion callbacks" when all requests in the set - * returned. - */ -struct ptlrpc_request_set { - atomic_t set_refcount; - /** number of in queue requests */ - atomic_t set_new_count; - /** number of uncompleted requests */ - atomic_t set_remaining; - /** wait queue to wait on for request events */ - wait_queue_head_t set_waitq; - wait_queue_head_t *set_wakeup_ptr; - /** List of requests in the set */ - struct list_head set_requests; - /** - * List of completion callbacks to be called when the set is completed - * This is only used if \a set_interpret is NULL. - * Links struct ptlrpc_set_cbdata. - */ - struct list_head set_cblist; - /** Completion callback, if only one. */ - set_interpreter_func set_interpret; - /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; - /** - * Lock for \a set_new_requests manipulations - * locked so that any old caller can communicate requests to - * the set holder who can then fold them into the lock-free set - */ - spinlock_t set_new_req_lock; - /** List of new yet unsent requests. Only used with ptlrpcd now. */ - struct list_head set_new_requests; - - /** rq_status of requests that have been freed already */ - int set_rc; - /** Additional fields used by the flow control extension */ - /** Maximum number of RPCs in flight */ - int set_max_inflight; - /** Callback function used to generate RPCs */ - set_producer_func set_producer; - /** opaq argument passed to the producer callback */ - void *set_producer_arg; -}; - -/** - * Description of a single ptrlrpc_set callback - */ -struct ptlrpc_set_cbdata { - /** List linkage item */ - struct list_head psc_item; - /** Pointer to interpreting function */ - set_interpreter_func psc_interpret; - /** Opaq argument to pass to the callback */ - void *psc_data; -}; - -struct ptlrpc_bulk_desc; -struct ptlrpc_service_part; -struct ptlrpc_service; - -/** - * ptlrpc callback & work item stuff - */ -struct ptlrpc_cb_id { - void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */ - void *cbid_arg; /* additional arg */ -}; - -/** Maximum number of locks to fit into reply state */ -#define RS_MAX_LOCKS 8 -#define RS_DEBUG 0 - -/** - * Structure to define reply state on the server - * Reply state holds various reply message information. Also for "difficult" - * replies (rep-ack case) we store the state after sending reply and wait - * for the client to acknowledge the reception. In these cases locks could be - * added to the state for replay/failover consistency guarantees. - */ -struct ptlrpc_reply_state { - /** Callback description */ - struct ptlrpc_cb_id rs_cb_id; - /** Linkage for list of all reply states in a system */ - struct list_head rs_list; - /** Linkage for list of all reply states on same export */ - struct list_head rs_exp_list; - /** Linkage for list of all reply states for same obd */ - struct list_head rs_obd_list; -#if RS_DEBUG - struct list_head rs_debug_list; -#endif - /** A spinlock to protect the reply state flags */ - spinlock_t rs_lock; - /** Reply state flags */ - unsigned long rs_difficult:1; /* ACK/commit stuff */ - unsigned long rs_no_ack:1; /* no ACK, even for - * difficult requests - */ - unsigned long rs_scheduled:1; /* being handled? */ - unsigned long rs_scheduled_ever:1;/* any schedule attempts? */ - unsigned long rs_handled:1; /* been handled yet? */ - unsigned long rs_on_net:1; /* reply_out_callback pending? */ - unsigned long rs_prealloc:1; /* rs from prealloc list */ - unsigned long rs_committed:1;/* the transaction was committed - * and the rs was dispatched - */ - atomic_t rs_refcount; /* number of users */ - /** Number of locks awaiting client ACK */ - int rs_nlocks; - - /** Size of the state */ - int rs_size; - /** opcode */ - __u32 rs_opc; - /** Transaction number */ - __u64 rs_transno; - /** xid */ - __u64 rs_xid; - struct obd_export *rs_export; - struct ptlrpc_service_part *rs_svcpt; - /** Lnet metadata handle for the reply */ - struct lnet_handle_md rs_md_h; - - /** Context for the service thread */ - struct ptlrpc_svc_ctx *rs_svc_ctx; - /** Reply buffer (actually sent to the client), encoded if needed */ - struct lustre_msg *rs_repbuf; /* wrapper */ - /** Size of the reply buffer */ - int rs_repbuf_len; /* wrapper buf length */ - /** Size of the reply message */ - int rs_repdata_len; /* wrapper msg length */ - /** - * Actual reply message. Its content is encrypted (if needed) to - * produce reply buffer for actual sending. In simple case - * of no network encryption we just set \a rs_repbuf to \a rs_msg - */ - struct lustre_msg *rs_msg; /* reply message */ - - /** Handles of locks awaiting client reply ACK */ - struct lustre_handle rs_locks[RS_MAX_LOCKS]; - /** Lock modes of locks in \a rs_locks */ - enum ldlm_mode rs_modes[RS_MAX_LOCKS]; -}; - -struct ptlrpc_thread; - -/** RPC stages */ -enum rq_phase { - RQ_PHASE_NEW = 0xebc0de00, - RQ_PHASE_RPC = 0xebc0de01, - RQ_PHASE_BULK = 0xebc0de02, - RQ_PHASE_INTERPRET = 0xebc0de03, - RQ_PHASE_COMPLETE = 0xebc0de04, - RQ_PHASE_UNREG_RPC = 0xebc0de05, - RQ_PHASE_UNREG_BULK = 0xebc0de06, - RQ_PHASE_UNDEFINED = 0xebc0de07 -}; - -/** Type of request interpreter call-back */ -typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, - struct ptlrpc_request *req, - void *arg, int rc); - -/** - * Definition of request pool structure. - * The pool is used to store empty preallocated requests for the case - * when we would actually need to send something without performing - * any allocations (to avoid e.g. OOM). - */ -struct ptlrpc_request_pool { - /** Locks the list */ - spinlock_t prp_lock; - /** list of ptlrpc_request structs */ - struct list_head prp_req_list; - /** Maximum message size that would fit into a request from this pool */ - int prp_rq_size; - /** Function to allocate more requests for this pool */ - int (*prp_populate)(struct ptlrpc_request_pool *, int); -}; - -struct lu_context; -struct lu_env; - -struct ldlm_lock; - -#include - -/** - * Basic request prioritization operations structure. - * The whole idea is centered around locks and RPCs that might affect locks. - * When a lock is contended we try to give priority to RPCs that might lead - * to fastest release of that lock. - * Currently only implemented for OSTs only in a way that makes all - * IO and truncate RPCs that are coming from a locked region where a lock is - * contended a priority over other requests. - */ -struct ptlrpc_hpreq_ops { - /** - * Check if the lock handle of the given lock is the same as - * taken from the request. - */ - int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *); - /** - * Check if the request is a high priority one. - */ - int (*hpreq_check)(struct ptlrpc_request *); - /** - * Called after the request has been handled. - */ - void (*hpreq_fini)(struct ptlrpc_request *); -}; - -struct ptlrpc_cli_req { - /** For bulk requests on client only: bulk descriptor */ - struct ptlrpc_bulk_desc *cr_bulk; - /** optional time limit for send attempts */ - long cr_delay_limit; - /** time request was first queued */ - unsigned long cr_queued_time; - /** request sent timeval */ - struct timespec64 cr_sent_tv; - /** time for request really sent out */ - time64_t cr_sent_out; - /** when req reply unlink must finish. */ - time64_t cr_reply_deadline; - /** when req bulk unlink must finish. */ - time64_t cr_bulk_deadline; - /** when req unlink must finish. */ - time64_t cr_req_deadline; - /** Portal to which this request would be sent */ - short cr_req_ptl; - /** Portal where to wait for reply and where reply would be sent */ - short cr_rep_ptl; - /** request resending number */ - unsigned int cr_resend_nr; - /** What was import generation when this request was sent */ - int cr_imp_gen; - enum lustre_imp_state cr_send_state; - /** Per-request waitq introduced by bug 21938 for recovery waiting */ - wait_queue_head_t cr_set_waitq; - /** Link item for request set lists */ - struct list_head cr_set_chain; - /** link to waited ctx */ - struct list_head cr_ctx_chain; - - /** client's half ctx */ - struct ptlrpc_cli_ctx *cr_cli_ctx; - /** Link back to the request set */ - struct ptlrpc_request_set *cr_set; - /** outgoing request MD handle */ - struct lnet_handle_md cr_req_md_h; - /** request-out callback parameter */ - struct ptlrpc_cb_id cr_req_cbid; - /** incoming reply MD handle */ - struct lnet_handle_md cr_reply_md_h; - wait_queue_head_t cr_reply_waitq; - /** reply callback parameter */ - struct ptlrpc_cb_id cr_reply_cbid; - /** Async completion handler, called when reply is received */ - ptlrpc_interpterer_t cr_reply_interp; - /** Async completion context */ - union ptlrpc_async_args cr_async_args; - /** Opaq data for replay and commit callbacks. */ - void *cr_cb_data; - /** Link to the imp->imp_unreplied_list */ - struct list_head cr_unreplied_list; - /** - * Commit callback, called when request is committed and about to be - * freed. - */ - void (*cr_commit_cb)(struct ptlrpc_request *); - /** Replay callback, called after request is replayed at recovery */ - void (*cr_replay_cb)(struct ptlrpc_request *); -}; - -/** client request member alias */ -/* NB: these alias should NOT be used by any new code, instead they should - * be removed step by step to avoid potential abuse - */ -#define rq_bulk rq_cli.cr_bulk -#define rq_delay_limit rq_cli.cr_delay_limit -#define rq_queued_time rq_cli.cr_queued_time -#define rq_sent_tv rq_cli.cr_sent_tv -#define rq_real_sent rq_cli.cr_sent_out -#define rq_reply_deadline rq_cli.cr_reply_deadline -#define rq_bulk_deadline rq_cli.cr_bulk_deadline -#define rq_req_deadline rq_cli.cr_req_deadline -#define rq_nr_resend rq_cli.cr_resend_nr -#define rq_request_portal rq_cli.cr_req_ptl -#define rq_reply_portal rq_cli.cr_rep_ptl -#define rq_import_generation rq_cli.cr_imp_gen -#define rq_send_state rq_cli.cr_send_state -#define rq_set_chain rq_cli.cr_set_chain -#define rq_ctx_chain rq_cli.cr_ctx_chain -#define rq_set rq_cli.cr_set -#define rq_set_waitq rq_cli.cr_set_waitq -#define rq_cli_ctx rq_cli.cr_cli_ctx -#define rq_req_md_h rq_cli.cr_req_md_h -#define rq_req_cbid rq_cli.cr_req_cbid -#define rq_reply_md_h rq_cli.cr_reply_md_h -#define rq_reply_waitq rq_cli.cr_reply_waitq -#define rq_reply_cbid rq_cli.cr_reply_cbid -#define rq_interpret_reply rq_cli.cr_reply_interp -#define rq_async_args rq_cli.cr_async_args -#define rq_cb_data rq_cli.cr_cb_data -#define rq_unreplied_list rq_cli.cr_unreplied_list -#define rq_commit_cb rq_cli.cr_commit_cb -#define rq_replay_cb rq_cli.cr_replay_cb - -struct ptlrpc_srv_req { - /** initial thread servicing this request */ - struct ptlrpc_thread *sr_svc_thread; - /** - * Server side list of incoming unserved requests sorted by arrival - * time. Traversed from time to time to notice about to expire - * requests and sent back "early replies" to clients to let them - * know server is alive and well, just very busy to service their - * requests in time - */ - struct list_head sr_timed_list; - /** server-side per-export list */ - struct list_head sr_exp_list; - /** server-side history, used for debuging purposes. */ - struct list_head sr_hist_list; - /** history sequence # */ - __u64 sr_hist_seq; - /** the index of service's srv_at_array into which request is linked */ - time64_t sr_at_index; - /** authed uid */ - uid_t sr_auth_uid; - /** authed uid mapped to */ - uid_t sr_auth_mapped_uid; - /** RPC is generated from what part of Lustre */ - enum lustre_sec_part sr_sp_from; - /** request session context */ - struct lu_context sr_ses; - /** \addtogroup nrs - * @{ - */ - /** stub for NRS request */ - struct ptlrpc_nrs_request sr_nrq; - /** @} nrs */ - /** request arrival time */ - struct timespec64 sr_arrival_time; - /** server's half ctx */ - struct ptlrpc_svc_ctx *sr_svc_ctx; - /** (server side), pointed directly into req buffer */ - struct ptlrpc_user_desc *sr_user_desc; - /** separated reply state */ - struct ptlrpc_reply_state *sr_reply_state; - /** server-side hp handlers */ - struct ptlrpc_hpreq_ops *sr_ops; - /** incoming request buffer */ - struct ptlrpc_request_buffer_desc *sr_rqbd; -}; - -/** server request member alias */ -/* NB: these alias should NOT be used by any new code, instead they should - * be removed step by step to avoid potential abuse - */ -#define rq_svc_thread rq_srv.sr_svc_thread -#define rq_timed_list rq_srv.sr_timed_list -#define rq_exp_list rq_srv.sr_exp_list -#define rq_history_list rq_srv.sr_hist_list -#define rq_history_seq rq_srv.sr_hist_seq -#define rq_at_index rq_srv.sr_at_index -#define rq_auth_uid rq_srv.sr_auth_uid -#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid -#define rq_sp_from rq_srv.sr_sp_from -#define rq_session rq_srv.sr_ses -#define rq_nrq rq_srv.sr_nrq -#define rq_arrival_time rq_srv.sr_arrival_time -#define rq_reply_state rq_srv.sr_reply_state -#define rq_svc_ctx rq_srv.sr_svc_ctx -#define rq_user_desc rq_srv.sr_user_desc -#define rq_ops rq_srv.sr_ops -#define rq_rqbd rq_srv.sr_rqbd - -/** - * Represents remote procedure call. - * - * This is a staple structure used by everybody wanting to send a request - * in Lustre. - */ -struct ptlrpc_request { - /* Request type: one of PTL_RPC_MSG_* */ - int rq_type; - /** Result of request processing */ - int rq_status; - /** - * Linkage item through which this request is included into - * sending/delayed lists on client and into rqbd list on server - */ - struct list_head rq_list; - /** Lock to protect request flags and some other important bits, like - * rq_list - */ - spinlock_t rq_lock; - /** client-side flags are serialized by rq_lock @{ */ - unsigned int rq_intr:1, rq_replied:1, rq_err:1, - rq_timedout:1, rq_resend:1, rq_restart:1, - /** - * when ->rq_replay is set, request is kept by the client even - * after server commits corresponding transaction. This is - * used for operations that require sequence of multiple - * requests to be replayed. The only example currently is file - * open/close. When last request in such a sequence is - * committed, ->rq_replay is cleared on all requests in the - * sequence. - */ - rq_replay:1, - rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1, - rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1, - rq_early:1, - rq_req_unlinked:1, /* unlinked request buffer from lnet */ - rq_reply_unlinked:1, /* unlinked reply buffer from lnet */ - rq_memalloc:1, /* req originated from "kswapd" */ - rq_committed:1, - rq_reply_truncated:1, - /** whether the "rq_set" is a valid one */ - rq_invalid_rqset:1, - rq_generation_set:1, - /** do not resend request on -EINPROGRESS */ - rq_no_retry_einprogress:1, - /* allow the req to be sent if the import is in recovery - * status - */ - rq_allow_replay:1, - /* bulk request, sent to server, but uncommitted */ - rq_unstable:1; - /** @} */ - - /** server-side flags @{ */ - unsigned int - rq_hp:1, /**< high priority RPC */ - rq_at_linked:1, /**< link into service's srv_at_array */ - rq_packed_final:1; /**< packed final reply */ - /** @} */ - - /** one of RQ_PHASE_* */ - enum rq_phase rq_phase; - /** one of RQ_PHASE_* to be used next */ - enum rq_phase rq_next_phase; - /** - * client-side refcount for SENT race, server-side refcount - * for multiple replies - */ - atomic_t rq_refcount; - /** - * client-side: - * !rq_truncate : # reply bytes actually received, - * rq_truncate : required repbuf_len for resend - */ - int rq_nob_received; - /** Request length */ - int rq_reqlen; - /** Reply length */ - int rq_replen; - /** Pool if request is from preallocated list */ - struct ptlrpc_request_pool *rq_pool; - /** Request message - what client sent */ - struct lustre_msg *rq_reqmsg; - /** Reply message - server response */ - struct lustre_msg *rq_repmsg; - /** Transaction number */ - __u64 rq_transno; - /** xid */ - __u64 rq_xid; - /** bulk match bits */ - u64 rq_mbits; - /** - * List item to for replay list. Not yet committed requests get linked - * there. - * Also see \a rq_replay comment above. - * It's also link chain on obd_export::exp_req_replay_queue - */ - struct list_head rq_replay_list; - /** non-shared members for client & server request*/ - union { - struct ptlrpc_cli_req rq_cli; - struct ptlrpc_srv_req rq_srv; - }; - /** - * security and encryption data - * @{ - */ - /** description of flavors for client & server */ - struct sptlrpc_flavor rq_flvr; - - /* client/server security flags */ - unsigned int - rq_ctx_init:1, /* context initiation */ - rq_ctx_fini:1, /* context destroy */ - rq_bulk_read:1, /* request bulk read */ - rq_bulk_write:1, /* request bulk write */ - /* server authentication flags */ - rq_auth_gss:1, /* authenticated by gss */ - rq_auth_usr_root:1, /* authed as root */ - rq_auth_usr_mdt:1, /* authed as mdt */ - rq_auth_usr_ost:1, /* authed as ost */ - /* security tfm flags */ - rq_pack_udesc:1, - rq_pack_bulk:1, - /* doesn't expect reply FIXME */ - rq_no_reply:1, - rq_pill_init:1, /* pill initialized */ - rq_srv_req:1; /* server request */ - - /** various buffer pointers */ - struct lustre_msg *rq_reqbuf; /**< req wrapper */ - char *rq_repbuf; /**< rep buffer */ - struct lustre_msg *rq_repdata; /**< rep wrapper msg */ - /** only in priv mode */ - struct lustre_msg *rq_clrbuf; - int rq_reqbuf_len; /* req wrapper buf len */ - int rq_reqdata_len; /* req wrapper msg len */ - int rq_repbuf_len; /* rep buffer len */ - int rq_repdata_len; /* rep wrapper msg len */ - int rq_clrbuf_len; /* only in priv mode */ - int rq_clrdata_len; /* only in priv mode */ - - /** early replies go to offset 0, regular replies go after that */ - unsigned int rq_reply_off; - - /** @} */ - - /** Fields that help to see if request and reply were swabbed or not */ - __u32 rq_req_swab_mask; - __u32 rq_rep_swab_mask; - - /** how many early replies (for stats) */ - int rq_early_count; - - /** Server-side, export on which request was received */ - struct obd_export *rq_export; - /** import where request is being sent */ - struct obd_import *rq_import; - /** our LNet NID */ - lnet_nid_t rq_self; - /** Peer description (the other side) */ - struct lnet_process_id rq_peer; - /** - * service time estimate (secs) - * If the request is not served by this time, it is marked as timed out. - */ - int rq_timeout; - /** - * when request/reply sent (secs), or time when request should be sent - */ - time64_t rq_sent; - /** when request must finish. */ - time64_t rq_deadline; - /** request format description */ - struct req_capsule rq_pill; -}; - -/** - * Call completion handler for rpc if any, return it's status or original - * rc if there was no handler defined for this request. - */ -static inline int ptlrpc_req_interpret(const struct lu_env *env, - struct ptlrpc_request *req, int rc) -{ - if (req->rq_interpret_reply) { - req->rq_status = req->rq_interpret_reply(env, req, - &req->rq_async_args, - rc); - return req->rq_status; - } - return rc; -} - -/* - * Can the request be moved from the regular NRS head to the high-priority NRS - * head (of the same PTLRPC service partition), if any? - * - * For a reliable result, this should be checked under svcpt->scp_req lock. - */ -static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) -{ - struct ptlrpc_nrs_request *nrq = &req->rq_nrq; - - /** - * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the - * request has been enqueued first, and ptlrpc_nrs_request::nr_started - * to make sure it has not been scheduled yet (analogous to previous - * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list). - */ - return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp; -} - -/** @} nrs */ - -/** - * Returns 1 if request buffer at offset \a index was already swabbed - */ -static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index) -{ - LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); - return req->rq_req_swab_mask & (1 << index); -} - -/** - * Returns 1 if request reply buffer at offset \a index was already swabbed - */ -static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index) -{ - LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); - return req->rq_rep_swab_mask & (1 << index); -} - -/** - * Returns 1 if request needs to be swabbed into local cpu byteorder - */ -static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req) -{ - return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); -} - -/** - * Returns 1 if request reply needs to be swabbed into local cpu byteorder - */ -static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req) -{ - return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); -} - -/** - * Mark request buffer at offset \a index that it was already swabbed - */ -static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, - size_t index) -{ - LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); - LASSERT((req->rq_req_swab_mask & (1 << index)) == 0); - req->rq_req_swab_mask |= 1 << index; -} - -/** - * Mark request reply buffer at offset \a index that it was already swabbed - */ -static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, - size_t index) -{ - LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); - LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0); - req->rq_rep_swab_mask |= 1 << index; -} - -/** - * Convert numerical request phase value \a phase into text string description - */ -static inline const char * -ptlrpc_phase2str(enum rq_phase phase) -{ - switch (phase) { - case RQ_PHASE_NEW: - return "New"; - case RQ_PHASE_RPC: - return "Rpc"; - case RQ_PHASE_BULK: - return "Bulk"; - case RQ_PHASE_INTERPRET: - return "Interpret"; - case RQ_PHASE_COMPLETE: - return "Complete"; - case RQ_PHASE_UNREG_RPC: - return "UnregRPC"; - case RQ_PHASE_UNREG_BULK: - return "UnregBULK"; - default: - return "?Phase?"; - } -} - -/** - * Convert numerical request phase of the request \a req into text stringi - * description - */ -static inline const char * -ptlrpc_rqphase2str(struct ptlrpc_request *req) -{ - return ptlrpc_phase2str(req->rq_phase); -} - -/** - * Debugging functions and helpers to print request structure into debug log - * @{ - */ -/* Spare the preprocessor, spoil the bugs. */ -#define FLAG(field, str) (field ? str : "") - -/** Convert bit flags into a string */ -#define DEBUG_REQ_FLAGS(req) \ - ptlrpc_rqphase2str(req), \ - FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \ - FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \ - FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \ - FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \ - FLAG(req->rq_no_resend, "N"), \ - FLAG(req->rq_waiting, "W"), \ - FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \ - FLAG(req->rq_committed, "M") - -#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s" - -void _debug_req(struct ptlrpc_request *req, - struct libcfs_debug_msg_data *data, const char *fmt, ...) - __printf(3, 4); - -/** - * Helper that decides if we need to print request according to current debug - * level settings - */ -#define debug_req(msgdata, mask, cdls, req, fmt, a...) \ -do { \ - CFS_CHECK_STACK(msgdata, mask, cdls); \ - \ - if (((mask) & D_CANTMASK) != 0 || \ - ((libcfs_debug & (mask)) != 0 && \ - (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \ - _debug_req((req), msgdata, fmt, ##a); \ -} while (0) - -/** - * This is the debug print function you need to use to print request structure - * content into lustre debug log. - * for most callers (level is a constant) this is resolved at compile time - */ -#define DEBUG_REQ(level, req, fmt, args...) \ -do { \ - if ((level) & (D_ERROR | D_WARNING)) { \ - static struct cfs_debug_limit_state cdls; \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \ - debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\ - } else { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \ - debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \ - } \ -} while (0) -/** @} */ - -/** - * Structure that defines a single page of a bulk transfer - */ -struct ptlrpc_bulk_page { - /** Linkage to list of pages in a bulk */ - struct list_head bp_link; - /** - * Number of bytes in a page to transfer starting from \a bp_pageoffset - */ - int bp_buflen; - /** offset within a page */ - int bp_pageoffset; - /** The page itself */ - struct page *bp_page; -}; - -enum ptlrpc_bulk_op_type { - PTLRPC_BULK_OP_ACTIVE = 0x00000001, - PTLRPC_BULK_OP_PASSIVE = 0x00000002, - PTLRPC_BULK_OP_PUT = 0x00000004, - PTLRPC_BULK_OP_GET = 0x00000008, - PTLRPC_BULK_BUF_KVEC = 0x00000010, - PTLRPC_BULK_BUF_KIOV = 0x00000020, - PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET, - PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT, - PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET, - PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT, -}; - -static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type) -{ - return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET; -} - -static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type) -{ - return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE; -} - -static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type) -{ - return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK; -} - -static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type) -{ - return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK; -} - -static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type) -{ - return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE; -} - -static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type) -{ - return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) - == PTLRPC_BULK_BUF_KVEC; -} - -static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type) -{ - return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) - == PTLRPC_BULK_BUF_KIOV; -} - -static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type) -{ - return ((type & PTLRPC_BULK_OP_ACTIVE) | - (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE; -} - -static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type) -{ - return ((type & PTLRPC_BULK_OP_ACTIVE) | - (type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE; -} - -struct ptlrpc_bulk_frag_ops { - /** - * Add a page \a page to the bulk descriptor \a desc - * Data to transfer in the page starts at offset \a pageoffset and - * amount of data to transfer from the page is \a len - */ - void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len); - - /* - * Add a \a fragment to the bulk descriptor \a desc. - * Data to transfer in the fragment is pointed to by \a frag - * The size of the fragment is \a len - */ - int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len); - - /** - * Uninitialize and free bulk descriptor \a desc. - * Works on bulk descriptors both from server and client side. - */ - void (*release_frags)(struct ptlrpc_bulk_desc *desc); -}; - -extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops; -extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops; - -/** - * Definition of bulk descriptor. - * Bulks are special "Two phase" RPCs where initial request message - * is sent first and it is followed bt a transfer (o receiving) of a large - * amount of data to be settled into pages referenced from the bulk descriptors. - * Bulks transfers (the actual data following the small requests) are done - * on separate LNet portals. - * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs. - * Another user is readpage for MDT. - */ -struct ptlrpc_bulk_desc { - /** completed with failure */ - unsigned long bd_failure:1; - /** client side */ - unsigned long bd_registered:1; - /** For serialization with callback */ - spinlock_t bd_lock; - /** Import generation when request for this bulk was sent */ - int bd_import_generation; - /** {put,get}{source,sink}{kvec,kiov} */ - enum ptlrpc_bulk_op_type bd_type; - /** LNet portal for this bulk */ - __u32 bd_portal; - /** Server side - export this bulk created for */ - struct obd_export *bd_export; - /** Client side - import this bulk was sent on */ - struct obd_import *bd_import; - /** Back pointer to the request */ - struct ptlrpc_request *bd_req; - struct ptlrpc_bulk_frag_ops *bd_frag_ops; - wait_queue_head_t bd_waitq; /* server side only WQ */ - int bd_iov_count; /* # entries in bd_iov */ - int bd_max_iov; /* allocated size of bd_iov */ - int bd_nob; /* # bytes covered */ - int bd_nob_transferred; /* # bytes GOT/PUT */ - - u64 bd_last_mbits; - - struct ptlrpc_cb_id bd_cbid; /* network callback info */ - lnet_nid_t bd_sender; /* stash event::sender */ - int bd_md_count; /* # valid entries in bd_mds */ - int bd_md_max_brw; /* max entries in bd_mds */ - /** array of associated MDs */ - struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT]; - - union { - struct { - /* - * encrypt iov, size is either 0 or bd_iov_count. - */ - struct bio_vec *bd_enc_vec; - struct bio_vec *bd_vec; /* Array of bio_vecs */ - } bd_kiov; - - struct { - struct kvec *bd_enc_kvec; - struct kvec *bd_kvec; /* Array of kvecs */ - } bd_kvec; - } bd_u; -}; - -#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec) -#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i]) -#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec) -#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i]) -#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec) -#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i]) -#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec) -#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i]) - -enum { - SVC_STOPPED = 1 << 0, - SVC_STOPPING = 1 << 1, - SVC_STARTING = 1 << 2, - SVC_RUNNING = 1 << 3, -}; - -#define PTLRPC_THR_NAME_LEN 32 -/** - * Definition of server service thread structure - */ -struct ptlrpc_thread { - /** - * List of active threads in svc->srv_threads - */ - struct list_head t_link; - /** - * thread-private data (preallocated memory) - */ - void *t_data; - __u32 t_flags; - /** - * service thread index, from ptlrpc_start_threads - */ - unsigned int t_id; - /** - * service thread pid - */ - pid_t t_pid; - /** - * put watchdog in the structure per thread b=14840 - * - * Lustre watchdog is removed for client in the hope - * of a generic watchdog can be merged in kernel. - * When that happens, we should add below back. - * - * struct lc_watchdog *t_watchdog; - */ - /** - * the svc this thread belonged to b=18582 - */ - struct ptlrpc_service_part *t_svcpt; - wait_queue_head_t t_ctl_waitq; - struct lu_env *t_env; - char t_name[PTLRPC_THR_NAME_LEN]; -}; - -static inline int thread_is_stopped(struct ptlrpc_thread *thread) -{ - return !!(thread->t_flags & SVC_STOPPED); -} - -static inline int thread_is_stopping(struct ptlrpc_thread *thread) -{ - return !!(thread->t_flags & SVC_STOPPING); -} - -static inline int thread_is_starting(struct ptlrpc_thread *thread) -{ - return !!(thread->t_flags & SVC_STARTING); -} - -static inline int thread_is_running(struct ptlrpc_thread *thread) -{ - return !!(thread->t_flags & SVC_RUNNING); -} - -static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags) -{ - thread->t_flags &= ~flags; -} - -static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags) -{ - thread->t_flags = flags; -} - -static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags) -{ - thread->t_flags |= flags; -} - -static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread, - __u32 flags) -{ - if (thread->t_flags & flags) { - thread->t_flags &= ~flags; - return 1; - } - return 0; -} - -/** - * Request buffer descriptor structure. - * This is a structure that contains one posted request buffer for service. - * Once data land into a buffer, event callback creates actual request and - * notifies wakes one of the service threads to process new incoming request. - * More than one request can fit into the buffer. - */ -struct ptlrpc_request_buffer_desc { - /** Link item for rqbds on a service */ - struct list_head rqbd_list; - /** History of requests for this buffer */ - struct list_head rqbd_reqs; - /** Back pointer to service for which this buffer is registered */ - struct ptlrpc_service_part *rqbd_svcpt; - /** LNet descriptor */ - struct lnet_handle_md rqbd_md_h; - int rqbd_refcount; - /** The buffer itself */ - char *rqbd_buffer; - struct ptlrpc_cb_id rqbd_cbid; - /** - * This "embedded" request structure is only used for the - * last request to fit into the buffer - */ - struct ptlrpc_request rqbd_req; -}; - -typedef int (*svc_handler_t)(struct ptlrpc_request *req); - -struct ptlrpc_service_ops { - /** - * if non-NULL called during thread creation (ptlrpc_start_thread()) - * to initialize service specific per-thread state. - */ - int (*so_thr_init)(struct ptlrpc_thread *thr); - /** - * if non-NULL called during thread shutdown (ptlrpc_main()) to - * destruct state created by ->srv_init(). - */ - void (*so_thr_done)(struct ptlrpc_thread *thr); - /** - * Handler function for incoming requests for this service - */ - int (*so_req_handler)(struct ptlrpc_request *req); - /** - * function to determine priority of the request, it's called - * on every new request - */ - int (*so_hpreq_handler)(struct ptlrpc_request *); - /** - * service-specific print fn - */ - void (*so_req_printer)(void *, struct ptlrpc_request *); -}; - -#ifndef __cfs_cacheline_aligned -/* NB: put it here for reducing patche dependence */ -# define __cfs_cacheline_aligned -#endif - -/** - * How many high priority requests to serve before serving one normal - * priority request - */ -#define PTLRPC_SVC_HP_RATIO 10 - -/** - * Definition of PortalRPC service. - * The service is listening on a particular portal (like tcp port) - * and perform actions for a specific server like IO service for OST - * or general metadata service for MDS. - */ -struct ptlrpc_service { - /** serialize sysfs operations */ - spinlock_t srv_lock; - /** most often accessed fields */ - /** chain thru all services */ - struct list_head srv_list; - /** service operations table */ - struct ptlrpc_service_ops srv_ops; - /** only statically allocated strings here; we don't clean them */ - char *srv_name; - /** only statically allocated strings here; we don't clean them */ - char *srv_thread_name; - /** service thread list */ - struct list_head srv_threads; - /** threads # should be created for each partition on initializing */ - int srv_nthrs_cpt_init; - /** limit of threads number for each partition */ - int srv_nthrs_cpt_limit; - /** Root of debugfs dir tree for this service */ - struct dentry *srv_debugfs_entry; - /** Pointer to statistic data for this service */ - struct lprocfs_stats *srv_stats; - /** # hp per lp reqs to handle */ - int srv_hpreq_ratio; - /** biggest request to receive */ - int srv_max_req_size; - /** biggest reply to send */ - int srv_max_reply_size; - /** size of individual buffers */ - int srv_buf_size; - /** # buffers to allocate in 1 group */ - int srv_nbuf_per_group; - /** Local portal on which to receive requests */ - __u32 srv_req_portal; - /** Portal on the client to send replies to */ - __u32 srv_rep_portal; - /** - * Tags for lu_context associated with this thread, see struct - * lu_context. - */ - __u32 srv_ctx_tags; - /** soft watchdog timeout multiplier */ - int srv_watchdog_factor; - /** under unregister_service */ - unsigned srv_is_stopping:1; - - /** max # request buffers in history per partition */ - int srv_hist_nrqbds_cpt_max; - /** number of CPTs this service bound on */ - int srv_ncpts; - /** CPTs array this service bound on */ - __u32 *srv_cpts; - /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */ - int srv_cpt_bits; - /** CPT table this service is running over */ - struct cfs_cpt_table *srv_cptable; - - /* sysfs object */ - struct kobject srv_kobj; - struct completion srv_kobj_unregister; - /** - * partition data for ptlrpc service - */ - struct ptlrpc_service_part *srv_parts[0]; -}; - -/** - * Definition of PortalRPC service partition data. - * Although a service only has one instance of it right now, but we - * will have multiple instances very soon (instance per CPT). - * - * it has four locks: - * \a scp_lock - * serialize operations on rqbd and requests waiting for preprocess - * \a scp_req_lock - * serialize operations active requests sent to this portal - * \a scp_at_lock - * serialize adaptive timeout stuff - * \a scp_rep_lock - * serialize operations on RS list (reply states) - * - * We don't have any use-case to take two or more locks at the same time - * for now, so there is no lock order issue. - */ -struct ptlrpc_service_part { - /** back reference to owner */ - struct ptlrpc_service *scp_service __cfs_cacheline_aligned; - /* CPT id, reserved */ - int scp_cpt; - /** always increasing number */ - int scp_thr_nextid; - /** # of starting threads */ - int scp_nthrs_starting; - /** # of stopping threads, reserved for shrinking threads */ - int scp_nthrs_stopping; - /** # running threads */ - int scp_nthrs_running; - /** service threads list */ - struct list_head scp_threads; - - /** - * serialize the following fields, used for protecting - * rqbd list and incoming requests waiting for preprocess, - * threads starting & stopping are also protected by this lock. - */ - spinlock_t scp_lock __cfs_cacheline_aligned; - /** total # req buffer descs allocated */ - int scp_nrqbds_total; - /** # posted request buffers for receiving */ - int scp_nrqbds_posted; - /** in progress of allocating rqbd */ - int scp_rqbd_allocating; - /** # incoming reqs */ - int scp_nreqs_incoming; - /** request buffers to be reposted */ - struct list_head scp_rqbd_idle; - /** req buffers receiving */ - struct list_head scp_rqbd_posted; - /** incoming reqs */ - struct list_head scp_req_incoming; - /** timeout before re-posting reqs, in tick */ - long scp_rqbd_timeout; - /** - * all threads sleep on this. This wait-queue is signalled when new - * incoming request arrives and when difficult reply has to be handled. - */ - wait_queue_head_t scp_waitq; - - /** request history */ - struct list_head scp_hist_reqs; - /** request buffer history */ - struct list_head scp_hist_rqbds; - /** # request buffers in history */ - int scp_hist_nrqbds; - /** sequence number for request */ - __u64 scp_hist_seq; - /** highest seq culled from history */ - __u64 scp_hist_seq_culled; - - /** - * serialize the following fields, used for processing requests - * sent to this portal - */ - spinlock_t scp_req_lock __cfs_cacheline_aligned; - /** # reqs in either of the NRS heads below */ - /** # reqs being served */ - int scp_nreqs_active; - /** # HPreqs being served */ - int scp_nhreqs_active; - /** # hp requests handled */ - int scp_hreq_count; - - /** NRS head for regular requests */ - struct ptlrpc_nrs scp_nrs_reg; - /** NRS head for HP requests; this is only valid for services that can - * handle HP requests - */ - struct ptlrpc_nrs *scp_nrs_hp; - - /** AT stuff */ - /** @{ */ - /** - * serialize the following fields, used for changes on - * adaptive timeout - */ - spinlock_t scp_at_lock __cfs_cacheline_aligned; - /** estimated rpc service time */ - struct adaptive_timeout scp_at_estimate; - /** reqs waiting for replies */ - struct ptlrpc_at_array scp_at_array; - /** early reply timer */ - struct timer_list scp_at_timer; - /** debug */ - unsigned long scp_at_checktime; - /** check early replies */ - unsigned scp_at_check; - /** @} */ - - /** - * serialize the following fields, used for processing - * replies for this portal - */ - spinlock_t scp_rep_lock __cfs_cacheline_aligned; - /** all the active replies */ - struct list_head scp_rep_active; - /** List of free reply_states */ - struct list_head scp_rep_idle; - /** waitq to run, when adding stuff to srv_free_rs_list */ - wait_queue_head_t scp_rep_waitq; - /** # 'difficult' replies */ - atomic_t scp_nreps_difficult; -}; - -#define ptlrpc_service_for_each_part(part, i, svc) \ - for (i = 0; \ - i < (svc)->srv_ncpts && \ - (svc)->srv_parts && \ - ((part) = (svc)->srv_parts[i]); i++) - -/** - * Declaration of ptlrpcd control structure - */ -struct ptlrpcd_ctl { - /** - * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE) - */ - unsigned long pc_flags; - /** - * Thread lock protecting structure fields. - */ - spinlock_t pc_lock; - /** - * Start completion. - */ - struct completion pc_starting; - /** - * Stop completion. - */ - struct completion pc_finishing; - /** - * Thread requests set. - */ - struct ptlrpc_request_set *pc_set; - /** - * Thread name used in kthread_run() - */ - char pc_name[16]; - /** - * CPT the thread is bound on. - */ - int pc_cpt; - /** - * Index of ptlrpcd thread in the array. - */ - int pc_index; - /** - * Pointer to the array of partners' ptlrpcd_ctl structure. - */ - struct ptlrpcd_ctl **pc_partners; - /** - * Number of the ptlrpcd's partners. - */ - int pc_npartners; - /** - * Record the partner index to be processed next. - */ - int pc_cursor; - /** - * Error code if the thread failed to fully start. - */ - int pc_error; -}; - -/* Bits for pc_flags */ -enum ptlrpcd_ctl_flags { - /** - * Ptlrpc thread start flag. - */ - LIOD_START = 1 << 0, - /** - * Ptlrpc thread stop flag. - */ - LIOD_STOP = 1 << 1, - /** - * Ptlrpc thread force flag (only stop force so far). - * This will cause aborting any inflight rpcs handled - * by thread if LIOD_STOP is specified. - */ - LIOD_FORCE = 1 << 2, - /** - * This is a recovery ptlrpc thread. - */ - LIOD_RECOVERY = 1 << 3, -}; - -/** - * \addtogroup nrs - * @{ - * - * Service compatibility function; the policy is compatible with all services. - * - * \param[in] svc The service the policy is attempting to register with. - * \param[in] desc The policy descriptor - * - * \retval true The policy is compatible with the service - * - * \see ptlrpc_nrs_pol_desc::pd_compat() - */ -static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc, - const struct ptlrpc_nrs_pol_desc *desc) -{ - return true; -} - -/** - * Service compatibility function; the policy is compatible with only a specific - * service which is identified by its human-readable name at - * ptlrpc_service::srv_name. - * - * \param[in] svc The service the policy is attempting to register with. - * \param[in] desc The policy descriptor - * - * \retval false The policy is not compatible with the service - * \retval true The policy is compatible with the service - * - * \see ptlrpc_nrs_pol_desc::pd_compat() - */ -static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, - const struct ptlrpc_nrs_pol_desc *desc) -{ - return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; -} - -/** @} nrs */ - -/* ptlrpc/events.c */ -extern struct lnet_handle_eq ptlrpc_eq_h; -int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, - struct lnet_process_id *peer, lnet_nid_t *self); -/** - * These callbacks are invoked by LNet when something happened to - * underlying buffer - * @{ - */ -void request_out_callback(struct lnet_event *ev); -void reply_in_callback(struct lnet_event *ev); -void client_bulk_callback(struct lnet_event *ev); -void request_in_callback(struct lnet_event *ev); -void reply_out_callback(struct lnet_event *ev); -/** @} */ - -/* ptlrpc/connection.c */ -struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer, - lnet_nid_t self, - struct obd_uuid *uuid); -int ptlrpc_connection_put(struct ptlrpc_connection *c); -struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *); -int ptlrpc_connection_init(void); -void ptlrpc_connection_fini(void); - -/* ptlrpc/niobuf.c */ -/** - * Actual interfacing with LNet to put/get/register/unregister stuff - * @{ - */ - -int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async); - -static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) -{ - struct ptlrpc_bulk_desc *desc; - int rc; - - desc = req->rq_bulk; - - if (req->rq_bulk_deadline > ktime_get_real_seconds()) - return 1; - - if (!desc) - return 0; - - spin_lock(&desc->bd_lock); - rc = desc->bd_md_count; - spin_unlock(&desc->bd_lock); - return rc; -} - -#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01 -#define PTLRPC_REPLY_EARLY 0x02 -int ptlrpc_send_reply(struct ptlrpc_request *req, int flags); -int ptlrpc_reply(struct ptlrpc_request *req); -int ptlrpc_send_error(struct ptlrpc_request *req, int difficult); -int ptlrpc_error(struct ptlrpc_request *req); -int ptlrpc_at_get_net_latency(struct ptlrpc_request *req); -int ptl_send_rpc(struct ptlrpc_request *request, int noreply); -int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); -/** @} */ - -/* ptlrpc/client.c */ -/** - * Client-side portals API. Everything to send requests, receive replies, - * request queues, request management, etc. - * @{ - */ -void ptlrpc_request_committed(struct ptlrpc_request *req, int force); - -int ptlrpc_inc_ref(void); -void ptlrpc_dec_ref(void); - -void ptlrpc_init_client(int req_portal, int rep_portal, char *name, - struct ptlrpc_client *); -struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid); - -int ptlrpc_queue_wait(struct ptlrpc_request *req); -int ptlrpc_replay_req(struct ptlrpc_request *req); -void ptlrpc_abort_inflight(struct obd_import *imp); -void ptlrpc_abort_set(struct ptlrpc_request_set *set); - -struct ptlrpc_request_set *ptlrpc_prep_set(void); -struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, - void *arg); -int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set); -int ptlrpc_set_wait(struct ptlrpc_request_set *); -void ptlrpc_mark_interrupted(struct ptlrpc_request *req); -void ptlrpc_set_destroy(struct ptlrpc_request_set *); -void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *); - -void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool); -int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq); - -struct ptlrpc_request_pool * -ptlrpc_init_rq_pool(int, int, - int (*populate_pool)(struct ptlrpc_request_pool *, int)); - -void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); -struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, - const struct req_format *format); -struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, - struct ptlrpc_request_pool *, - const struct req_format *); -void ptlrpc_request_free(struct ptlrpc_request *request); -int ptlrpc_request_pack(struct ptlrpc_request *request, - __u32 version, int opcode); -struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *, - const struct req_format *, - __u32, int); -int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, - __u32 version, int opcode, char **bufs, - struct ptlrpc_cli_ctx *ctx); -void ptlrpc_req_finished(struct ptlrpc_request *request); -struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); -struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned int nfrags, - unsigned int max_brw, - unsigned int type, - unsigned int portal, - const struct ptlrpc_bulk_frag_ops *ops); - -int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, - void *frag, int len); -void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, - int pin); -static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, - int len) -{ - __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1); -} - -static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, - int len) -{ - __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); -} - -void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk); - -static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) -{ - int i; - - for (i = 0; i < desc->bd_iov_count ; i++) - put_page(BD_GET_KIOV(desc, i).bv_page); -} - -void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, - struct obd_import *imp); -__u64 ptlrpc_next_xid(void); -__u64 ptlrpc_sample_next_xid(void); -__u64 ptlrpc_req_xid(struct ptlrpc_request *request); - -/* Set of routines to run a function in ptlrpcd context */ -void *ptlrpcd_alloc_work(struct obd_import *imp, - int (*cb)(const struct lu_env *, void *), void *data); -void ptlrpcd_destroy_work(void *handler); -int ptlrpcd_queue_work(void *handler); - -/** @} */ -struct ptlrpc_service_buf_conf { - /* nbufs is buffers # to allocate when growing the pool */ - unsigned int bc_nbufs; - /* buffer size to post */ - unsigned int bc_buf_size; - /* portal to listed for requests on */ - unsigned int bc_req_portal; - /* portal of where to send replies to */ - unsigned int bc_rep_portal; - /* maximum request size to be accepted for this service */ - unsigned int bc_req_max_size; - /* maximum reply size this service can ever send */ - unsigned int bc_rep_max_size; -}; - -struct ptlrpc_service_thr_conf { - /* threadname should be 8 characters or less - 6 will be added on */ - char *tc_thr_name; - /* threads increasing factor for each CPU */ - unsigned int tc_thr_factor; - /* service threads # to start on each partition while initializing */ - unsigned int tc_nthrs_init; - /* - * low water of threads # upper-limit on each partition while running, - * service availability may be impacted if threads number is lower - * than this value. It can be ZERO if the service doesn't require - * CPU affinity or there is only one partition. - */ - unsigned int tc_nthrs_base; - /* "soft" limit for total threads number */ - unsigned int tc_nthrs_max; - /* user specified threads number, it will be validated due to - * other members of this structure. - */ - unsigned int tc_nthrs_user; - /* set NUMA node affinity for service threads */ - unsigned int tc_cpu_affinity; - /* Tags for lu_context associated with service thread */ - __u32 tc_ctx_tags; -}; - -struct ptlrpc_service_cpt_conf { - struct cfs_cpt_table *cc_cptable; - /* string pattern to describe CPTs for a service */ - char *cc_pattern; -}; - -struct ptlrpc_service_conf { - /* service name */ - char *psc_name; - /* soft watchdog timeout multiplifier to print stuck service traces */ - unsigned int psc_watchdog_factor; - /* buffer information */ - struct ptlrpc_service_buf_conf psc_buf; - /* thread information */ - struct ptlrpc_service_thr_conf psc_thr; - /* CPU partition information */ - struct ptlrpc_service_cpt_conf psc_cpt; - /* function table */ - struct ptlrpc_service_ops psc_ops; -}; - -/* ptlrpc/service.c */ -/** - * Server-side services API. Register/unregister service, request state - * management, service thread management - * - * @{ - */ -void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); -void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); -struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf, - struct kset *parent, - struct dentry *debugfs_entry); - -int ptlrpc_start_threads(struct ptlrpc_service *svc); -int ptlrpc_unregister_service(struct ptlrpc_service *service); - -int ptlrpc_hr_init(void); -void ptlrpc_hr_fini(void); - -/** @} */ - -/* ptlrpc/import.c */ -/** - * Import API - * @{ - */ -int ptlrpc_connect_import(struct obd_import *imp); -int ptlrpc_init_import(struct obd_import *imp); -int ptlrpc_disconnect_import(struct obd_import *imp, int noclose); -int ptlrpc_import_recovery_state_machine(struct obd_import *imp); - -/* ptlrpc/pack_generic.c */ -int ptlrpc_reconnect_import(struct obd_import *imp); -/** @} */ - -/** - * ptlrpc msg buffer and swab interface - * - * @{ - */ -int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, - u32 index); -void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - u32 index); -int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); -int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); - -void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, - char **bufs); -int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count, - __u32 *lens, char **bufs); -int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens, - char **bufs); -int lustre_pack_reply_v2(struct ptlrpc_request *req, int count, - __u32 *lens, char **bufs, int flags); -#define LPRFL_EARLY_REPLY 1 -int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens, - char **bufs, int flags); -int lustre_shrink_msg(struct lustre_msg *msg, int segment, - unsigned int newlen, int move_data); -void lustre_free_reply_state(struct ptlrpc_reply_state *rs); -int __lustre_unpack_msg(struct lustre_msg *m, int len); -u32 lustre_msg_hdr_size(__u32 magic, u32 count); -u32 lustre_msg_size(__u32 magic, int count, __u32 *lengths); -u32 lustre_msg_size_v2(int count, __u32 *lengths); -u32 lustre_packed_msg_size(struct lustre_msg *msg); -u32 lustre_msg_early_size(void); -void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size); -void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 minlen); -u32 lustre_msg_buflen(struct lustre_msg *m, u32 n); -u32 lustre_msg_bufcount(struct lustre_msg *m); -char *lustre_msg_string(struct lustre_msg *m, u32 n, u32 max_len); -__u32 lustre_msghdr_get_flags(struct lustre_msg *msg); -void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags); -__u32 lustre_msg_get_flags(struct lustre_msg *msg); -void lustre_msg_add_flags(struct lustre_msg *msg, u32 flags); -void lustre_msg_set_flags(struct lustre_msg *msg, u32 flags); -void lustre_msg_clear_flags(struct lustre_msg *msg, u32 flags); -__u32 lustre_msg_get_op_flags(struct lustre_msg *msg); -void lustre_msg_add_op_flags(struct lustre_msg *msg, u32 flags); -struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg); -__u32 lustre_msg_get_type(struct lustre_msg *msg); -void lustre_msg_add_version(struct lustre_msg *msg, u32 version); -__u32 lustre_msg_get_opc(struct lustre_msg *msg); -__u16 lustre_msg_get_tag(struct lustre_msg *msg); -__u64 lustre_msg_get_last_committed(struct lustre_msg *msg); -__u64 *lustre_msg_get_versions(struct lustre_msg *msg); -__u64 lustre_msg_get_transno(struct lustre_msg *msg); -__u64 lustre_msg_get_slv(struct lustre_msg *msg); -__u32 lustre_msg_get_limit(struct lustre_msg *msg); -void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv); -void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit); -int lustre_msg_get_status(struct lustre_msg *msg); -__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg); -__u32 lustre_msg_get_magic(struct lustre_msg *msg); -__u32 lustre_msg_get_timeout(struct lustre_msg *msg); -__u32 lustre_msg_get_service_time(struct lustre_msg *msg); -__u32 lustre_msg_get_cksum(struct lustre_msg *msg); -__u32 lustre_msg_calc_cksum(struct lustre_msg *msg); -void lustre_msg_set_handle(struct lustre_msg *msg, - struct lustre_handle *handle); -void lustre_msg_set_type(struct lustre_msg *msg, __u32 type); -void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc); -void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid); -void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag); -void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions); -void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno); -void lustre_msg_set_status(struct lustre_msg *msg, __u32 status); -void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt); -void ptlrpc_request_set_replen(struct ptlrpc_request *req); -void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout); -void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time); -void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid); -void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum); -void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits); - -static inline void -lustre_shrink_reply(struct ptlrpc_request *req, int segment, - unsigned int newlen, int move_data) -{ - LASSERT(req->rq_reply_state); - LASSERT(req->rq_repmsg); - req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment, - newlen, move_data); -} - -#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS - -static inline int ptlrpc_status_hton(int h) -{ - /* - * Positive errnos must be network errnos, such as LUSTRE_EDEADLK, - * ELDLM_LOCK_ABORTED, etc. - */ - if (h < 0) - return -lustre_errno_hton(-h); - else - return h; -} - -static inline int ptlrpc_status_ntoh(int n) -{ - /* - * See the comment in ptlrpc_status_hton(). - */ - if (n < 0) - return -lustre_errno_ntoh(-n); - else - return n; -} - -#else - -#define ptlrpc_status_hton(h) (h) -#define ptlrpc_status_ntoh(n) (n) - -#endif -/** @} */ - -/** Change request phase of \a req to \a new_phase */ -static inline void -ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase) -{ - if (req->rq_phase == new_phase) - return; - - if (new_phase == RQ_PHASE_UNREG_RPC || - new_phase == RQ_PHASE_UNREG_BULK) { - /* No embedded unregistering phases */ - if (req->rq_phase == RQ_PHASE_UNREG_RPC || - req->rq_phase == RQ_PHASE_UNREG_BULK) - return; - - req->rq_next_phase = req->rq_phase; - if (req->rq_import) - atomic_inc(&req->rq_import->imp_unregistering); - } - - if (req->rq_phase == RQ_PHASE_UNREG_RPC || - req->rq_phase == RQ_PHASE_UNREG_BULK) { - if (req->rq_import) - atomic_dec(&req->rq_import->imp_unregistering); - } - - DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"", - ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase)); - - req->rq_phase = new_phase; -} - -/** - * Returns true if request \a req got early reply and hard deadline is not met - */ -static inline int -ptlrpc_client_early(struct ptlrpc_request *req) -{ - return req->rq_early; -} - -/** - * Returns true if we got real reply from server for this request - */ -static inline int -ptlrpc_client_replied(struct ptlrpc_request *req) -{ - if (req->rq_reply_deadline > ktime_get_real_seconds()) - return 0; - return req->rq_replied; -} - -/** Returns true if request \a req is in process of receiving server reply */ -static inline int -ptlrpc_client_recv(struct ptlrpc_request *req) -{ - if (req->rq_reply_deadline > ktime_get_real_seconds()) - return 1; - return req->rq_receiving_reply; -} - -static inline int -ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) -{ - int rc; - - spin_lock(&req->rq_lock); - if (req->rq_reply_deadline > ktime_get_real_seconds()) { - spin_unlock(&req->rq_lock); - return 1; - } - if (req->rq_req_deadline > ktime_get_real_seconds()) { - spin_unlock(&req->rq_lock); - return 1; - } - rc = !req->rq_req_unlinked || !req->rq_reply_unlinked || - req->rq_receiving_reply; - spin_unlock(&req->rq_lock); - return rc; -} - -static inline void -ptlrpc_client_wake_req(struct ptlrpc_request *req) -{ - if (!req->rq_set) - wake_up(&req->rq_reply_waitq); - else - wake_up(&req->rq_set->set_waitq); -} - -static inline void -ptlrpc_rs_addref(struct ptlrpc_reply_state *rs) -{ - LASSERT(atomic_read(&rs->rs_refcount) > 0); - atomic_inc(&rs->rs_refcount); -} - -static inline void -ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) -{ - LASSERT(atomic_read(&rs->rs_refcount) > 0); - if (atomic_dec_and_test(&rs->rs_refcount)) - lustre_free_reply_state(rs); -} - -/* Should only be called once per req */ -static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req) -{ - if (!req->rq_reply_state) - return; /* shouldn't occur */ - ptlrpc_rs_decref(req->rq_reply_state); - req->rq_reply_state = NULL; - req->rq_repmsg = NULL; -} - -static inline __u32 lustre_request_magic(struct ptlrpc_request *req) -{ - return lustre_msg_get_magic(req->rq_reqmsg); -} - -static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req) -{ - switch (req->rq_reqmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return req->rq_reqmsg->lm_repsize; - default: - LASSERTF(0, "incorrect message magic: %08x\n", - req->rq_reqmsg->lm_magic); - return -EFAULT; - } -} - -static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) -{ - if (req->rq_delay_limit != 0 && - time_before(req->rq_queued_time + req->rq_delay_limit * HZ, - jiffies)) { - return 1; - } - return 0; -} - -static inline int ptlrpc_no_resend(struct ptlrpc_request *req) -{ - if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) { - spin_lock(&req->rq_lock); - req->rq_no_resend = 1; - spin_unlock(&req->rq_lock); - } - return req->rq_no_resend; -} - -static inline int -ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt) -{ - int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate); - - return svcpt->scp_service->srv_watchdog_factor * - max_t(int, at, obd_timeout); -} - -static inline struct ptlrpc_service * -ptlrpc_req2svc(struct ptlrpc_request *req) -{ - return req->rq_rqbd->rqbd_svcpt->scp_service; -} - -/* ldlm/ldlm_lib.c */ -/** - * Target client logic - * @{ - */ -int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg); -int client_obd_cleanup(struct obd_device *obddev); -int client_connect_import(const struct lu_env *env, - struct obd_export **exp, struct obd_device *obd, - struct obd_uuid *cluuid, struct obd_connect_data *, - void *localdata); -int client_disconnect_export(struct obd_export *exp); -int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid, - int priority); -int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid); -int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer, - struct obd_uuid *uuid); -int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid); -void client_destroy_import(struct obd_import *imp); -/** @} */ - -/* ptlrpc/pinger.c */ -/** - * Pinger API (client side only) - * @{ - */ -enum timeout_event { - TIMEOUT_GRANT = 1 -}; - -struct timeout_item; -typedef int (*timeout_cb_t)(struct timeout_item *, void *); -int ptlrpc_pinger_add_import(struct obd_import *imp); -int ptlrpc_pinger_del_import(struct obd_import *imp); -int ptlrpc_add_timeout_client(int time, enum timeout_event event, - timeout_cb_t cb, void *data, - struct list_head *obd_list); -int ptlrpc_del_timeout_client(struct list_head *obd_list, - enum timeout_event event); -struct ptlrpc_request *ptlrpc_prep_ping(struct obd_import *imp); -int ptlrpc_obd_ping(struct obd_device *obd); -void ptlrpc_pinger_ir_up(void); -void ptlrpc_pinger_ir_down(void); -/** @} */ -int ptlrpc_pinger_suppress_pings(void); - -/* ptlrpc/ptlrpcd.c */ -void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force); -void ptlrpcd_free(struct ptlrpcd_ctl *pc); -void ptlrpcd_wake(struct ptlrpc_request *req); -void ptlrpcd_add_req(struct ptlrpc_request *req); -int ptlrpcd_addref(void); -void ptlrpcd_decref(void); - -/* ptlrpc/lproc_ptlrpc.c */ -/** - * procfs output related functions - * @{ - */ -const char *ll_opcode2str(__u32 opcode); -void ptlrpc_lprocfs_register_obd(struct obd_device *obd); -void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd); -void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes); -/** @} */ - -/* ptlrpc/llog_client.c */ -extern struct llog_operations llog_client_ops; -/** @} net */ - -#endif -/** @} PtlRPC */ diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h deleted file mode 100644 index ffa7317da35b..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_nrs.h +++ /dev/null @@ -1,718 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2014, Intel Corporation. - * - * Copyright 2012 Xyratex Technology Limited - */ -/* - * - * Network Request Scheduler (NRS) - * - */ - -#ifndef _LUSTRE_NRS_H -#define _LUSTRE_NRS_H - -/** - * \defgroup nrs Network Request Scheduler - * @{ - */ -struct ptlrpc_nrs_policy; -struct ptlrpc_nrs_resource; -struct ptlrpc_nrs_request; - -/** - * NRS control operations. - * - * These are common for all policies. - */ -enum ptlrpc_nrs_ctl { - /** - * Not a valid opcode. - */ - PTLRPC_NRS_CTL_INVALID, - /** - * Activate the policy. - */ - PTLRPC_NRS_CTL_START, - /** - * Reserved for multiple primary policies, which may be a possibility - * in the future. - */ - PTLRPC_NRS_CTL_STOP, - /** - * Policies can start using opcodes from this value and onwards for - * their own purposes; the assigned value itself is arbitrary. - */ - PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20, -}; - -/** - * NRS policy operations. - * - * These determine the behaviour of a policy, and are called in response to - * NRS core events. - */ -struct ptlrpc_nrs_pol_ops { - /** - * Called during policy registration; this operation is optional. - * - * \param[in,out] policy The policy being initialized - */ - int (*op_policy_init)(struct ptlrpc_nrs_policy *policy); - /** - * Called during policy unregistration; this operation is optional. - * - * \param[in,out] policy The policy being unregistered/finalized - */ - void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy); - /** - * Called when activating a policy via lprocfs; policies allocate and - * initialize their resources here; this operation is optional. - * - * \param[in,out] policy The policy being started - * - * \see nrs_policy_start_locked() - */ - int (*op_policy_start)(struct ptlrpc_nrs_policy *policy); - /** - * Called when deactivating a policy via lprocfs; policies deallocate - * their resources here; this operation is optional - * - * \param[in,out] policy The policy being stopped - * - * \see nrs_policy_stop0() - */ - void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy); - /** - * Used for policy-specific operations; i.e. not generic ones like - * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous - * to an ioctl; this operation is optional. - * - * \param[in,out] policy The policy carrying out operation \a opc - * \param[in] opc The command operation being carried out - * \param[in,out] arg An generic buffer for communication between the - * user and the control operation - * - * \retval -ve error - * \retval 0 success - * - * \see ptlrpc_nrs_policy_control() - */ - int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy, - enum ptlrpc_nrs_ctl opc, void *arg); - - /** - * Called when obtaining references to the resources of the resource - * hierarchy for a request that has arrived for handling at the PTLRPC - * service. Policies should return -ve for requests they do not wish - * to handle. This operation is mandatory. - * - * \param[in,out] policy The policy we're getting resources for. - * \param[in,out] nrq The request we are getting resources for. - * \param[in] parent The parent resource of the resource being - * requested; set to NULL if none. - * \param[out] resp The resource is to be returned here; the - * fallback policy in an NRS head should - * \e always return a non-NULL pointer value. - * \param[in] moving_req When set, signifies that this is an attempt - * to obtain resources for a request being moved - * to the high-priority NRS head by - * ldlm_lock_reorder_req(). - * This implies two things: - * 1. We are under obd_export::exp_rpc_lock and - * so should not sleep. - * 2. We should not perform non-idempotent or can - * skip performing idempotent operations that - * were carried out when resources were first - * taken for the request when it was initialized - * in ptlrpc_nrs_req_initialize(). - * - * \retval 0, +ve The level of the returned resource in the resource - * hierarchy; currently only 0 (for a non-leaf resource) - * and 1 (for a leaf resource) are supported by the - * framework. - * \retval -ve error - * - * \see ptlrpc_nrs_req_initialize() - * \see ptlrpc_nrs_hpreq_add_nolock() - * \see ptlrpc_nrs_req_hp_move() - */ - int (*op_res_get)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq, - const struct ptlrpc_nrs_resource *parent, - struct ptlrpc_nrs_resource **resp, - bool moving_req); - /** - * Called when releasing references taken for resources in the resource - * hierarchy for the request; this operation is optional. - * - * \param[in,out] policy The policy the resource belongs to - * \param[in] res The resource to be freed - * - * \see ptlrpc_nrs_req_finalize() - * \see ptlrpc_nrs_hpreq_add_nolock() - * \see ptlrpc_nrs_req_hp_move() - */ - void (*op_res_put)(struct ptlrpc_nrs_policy *policy, - const struct ptlrpc_nrs_resource *res); - - /** - * Obtains a request for handling from the policy, and optionally - * removes the request from the policy; this operation is mandatory. - * - * \param[in,out] policy The policy to poll - * \param[in] peek When set, signifies that we just want to - * examine the request, and not handle it, so the - * request is not removed from the policy. - * \param[in] force When set, it will force a policy to return a - * request if it has one queued. - * - * \retval NULL No request available for handling - * \retval valid-pointer The request polled for handling - * - * \see ptlrpc_nrs_req_get_nolock() - */ - struct ptlrpc_nrs_request * - (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek, - bool force); - /** - * Called when attempting to add a request to a policy for later - * handling; this operation is mandatory. - * - * \param[in,out] policy The policy on which to enqueue \a nrq - * \param[in,out] nrq The request to enqueue - * - * \retval 0 success - * \retval != 0 error - * - * \see ptlrpc_nrs_req_add_nolock() - */ - int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Removes a request from the policy's set of pending requests. Normally - * called after a request has been polled successfully from the policy - * for handling; this operation is mandatory. - * - * \param[in,out] policy The policy the request \a nrq belongs to - * \param[in,out] nrq The request to dequeue - * - * \see ptlrpc_nrs_req_del_nolock() - */ - void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Called after the request being carried out. Could be used for - * job/resource control; this operation is optional. - * - * \param[in,out] policy The policy which is stopping to handle request - * \a nrq - * \param[in,out] nrq The request - * - * \pre assert_spin_locked(&svcpt->scp_req_lock) - * - * \see ptlrpc_nrs_req_stop_nolock() - */ - void (*op_req_stop)(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Registers the policy's lprocfs interface with a PTLRPC service. - * - * \param[in] svc The service - * - * \retval 0 success - * \retval != 0 error - */ - int (*op_lprocfs_init)(struct ptlrpc_service *svc); - /** - * Unegisters the policy's lprocfs interface with a PTLRPC service. - * - * In cases of failed policy registration in - * \e ptlrpc_nrs_policy_register(), this function may be called for a - * service which has not registered the policy successfully, so - * implementations of this method should make sure their operations are - * safe in such cases. - * - * \param[in] svc The service - */ - void (*op_lprocfs_fini)(struct ptlrpc_service *svc); -}; - -/** - * Policy flags - */ -enum nrs_policy_flags { - /** - * Fallback policy, use this flag only on a single supported policy per - * service. The flag cannot be used on policies that use - * \e PTLRPC_NRS_FL_REG_EXTERN - */ - PTLRPC_NRS_FL_FALLBACK = BIT(0), - /** - * Start policy immediately after registering. - */ - PTLRPC_NRS_FL_REG_START = BIT(1), - /** - * This is a policy registering from a module different to the one NRS - * core ships in (currently ptlrpc). - */ - PTLRPC_NRS_FL_REG_EXTERN = BIT(2), -}; - -/** - * NRS queue type. - * - * Denotes whether an NRS instance is for handling normal or high-priority - * RPCs, or whether an operation pertains to one or both of the NRS instances - * in a service. - */ -enum ptlrpc_nrs_queue_type { - PTLRPC_NRS_QUEUE_REG = BIT(0), - PTLRPC_NRS_QUEUE_HP = BIT(1), - PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP) -}; - -/** - * NRS head - * - * A PTLRPC service has at least one NRS head instance for handling normal - * priority RPCs, and may optionally have a second NRS head instance for - * handling high-priority RPCs. Each NRS head maintains a list of available - * policies, of which one and only one policy is acting as the fallback policy, - * and optionally a different policy may be acting as the primary policy. For - * all RPCs handled by this NRS head instance, NRS core will first attempt to - * enqueue the RPC using the primary policy (if any). The fallback policy is - * used in the following cases: - * - when there was no primary policy in the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request - * was initialized. - * - when the primary policy that was at the - * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the - * RPC was initialized, denoted it did not wish, or for some other reason was - * not able to handle the request, by returning a non-valid NRS resource - * reference. - * - when the primary policy that was at the - * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the - * RPC was initialized, fails later during the request enqueueing stage. - * - * \see nrs_resource_get_safe() - * \see nrs_request_enqueue() - */ -struct ptlrpc_nrs { - spinlock_t nrs_lock; - /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */ - /** - * List of registered policies - */ - struct list_head nrs_policy_list; - /** - * List of policies with queued requests. Policies that have any - * outstanding requests are queued here, and this list is queried - * in a round-robin manner from NRS core when obtaining a request - * for handling. This ensures that requests from policies that at some - * point transition away from the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. - */ - struct list_head nrs_policy_queued; - /** - * Service partition for this NRS head - */ - struct ptlrpc_service_part *nrs_svcpt; - /** - * Primary policy, which is the preferred policy for handling RPCs - */ - struct ptlrpc_nrs_policy *nrs_policy_primary; - /** - * Fallback policy, which is the backup policy for handling RPCs - */ - struct ptlrpc_nrs_policy *nrs_policy_fallback; - /** - * This NRS head handles either HP or regular requests - */ - enum ptlrpc_nrs_queue_type nrs_queue_type; - /** - * # queued requests from all policies in this NRS head - */ - unsigned long nrs_req_queued; - /** - * # scheduled requests from all policies in this NRS head - */ - unsigned long nrs_req_started; - /** - * # policies on this NRS - */ - unsigned int nrs_num_pols; - /** - * This NRS head is in progress of starting a policy - */ - unsigned int nrs_policy_starting:1; - /** - * In progress of shutting down the whole NRS head; used during - * unregistration - */ - unsigned int nrs_stopping:1; - /** - * NRS policy is throttling request - */ - unsigned int nrs_throttling:1; -}; - -#define NRS_POL_NAME_MAX 16 -#define NRS_POL_ARG_MAX 16 - -struct ptlrpc_nrs_pol_desc; - -/** - * Service compatibility predicate; this determines whether a policy is adequate - * for handling RPCs of a particular PTLRPC service. - * - * XXX:This should give the same result during policy registration and - * unregistration, and for all partitions of a service; so the result should not - * depend on temporal service or other properties, that may influence the - * result. - */ -typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc, - const struct ptlrpc_nrs_pol_desc *desc); - -struct ptlrpc_nrs_pol_conf { - /** - * Human-readable policy name - */ - char nc_name[NRS_POL_NAME_MAX]; - /** - * NRS operations for this policy - */ - const struct ptlrpc_nrs_pol_ops *nc_ops; - /** - * Service compatibility predicate - */ - nrs_pol_desc_compat_t nc_compat; - /** - * Set for policies that support a single ptlrpc service, i.e. ones that - * have \a pd_compat set to nrs_policy_compat_one(). The variable value - * depicts the name of the single service that such policies are - * compatible with. - */ - const char *nc_compat_svc_name; - /** - * Owner module for this policy descriptor; policies registering from a - * different module to the one the NRS framework is held within - * (currently ptlrpc), should set this field to THIS_MODULE. - */ - struct module *nc_owner; - /** - * Policy registration flags; a bitmask of \e nrs_policy_flags - */ - unsigned int nc_flags; -}; - -/** - * NRS policy registering descriptor - * - * Is used to hold a description of a policy that can be passed to NRS core in - * order to register the policy with NRS heads in different PTLRPC services. - */ -struct ptlrpc_nrs_pol_desc { - /** - * Human-readable policy name - */ - char pd_name[NRS_POL_NAME_MAX]; - /** - * Link into nrs_core::nrs_policies - */ - struct list_head pd_list; - /** - * NRS operations for this policy - */ - const struct ptlrpc_nrs_pol_ops *pd_ops; - /** - * Service compatibility predicate - */ - nrs_pol_desc_compat_t pd_compat; - /** - * Set for policies that are compatible with only one PTLRPC service. - * - * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name - */ - const char *pd_compat_svc_name; - /** - * Owner module for this policy descriptor. - * - * We need to hold a reference to the module whenever we might make use - * of any of the module's contents, i.e. - * - If one or more instances of the policy are at a state where they - * might be handling a request, i.e. - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to - * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference - * is taken on the module when - * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it - * becomes 0, so that we hold only one reference to the module maximum - * at any time. - * - * We do not need to hold a reference to the module, even though we - * might use code and data from the module, in the following cases: - * - During external policy registration, because this should happen in - * the module's init() function, in which case the module is safe from - * removal because a reference is being held on the module by the - * kernel, and iirc kmod (and I guess module-init-tools also) will - * serialize any racing processes properly anyway. - * - During external policy unregistration, because this should happen - * in a module's exit() function, and any attempts to start a policy - * instance would need to take a reference on the module, and this is - * not possible once we have reached the point where the exit() - * handler is called. - * - During service registration and unregistration, as service setup - * and cleanup, and policy registration, unregistration and policy - * instance starting, are serialized by \e nrs_core::nrs_mutex, so - * as long as users adhere to the convention of registering policies - * in init() and unregistering them in module exit() functions, there - * should not be a race between these operations. - * - During any policy-specific lprocfs operations, because a reference - * is held by the kernel on a proc entry that has been entered by a - * syscall, so as long as proc entries are removed during - * unregistration time, then unregistration and lprocfs operations - * will be properly serialized. - */ - struct module *pd_owner; - /** - * Bitmask of \e nrs_policy_flags - */ - unsigned int pd_flags; - /** - * # of references on this descriptor - */ - atomic_t pd_refs; -}; - -/** - * NRS policy state - * - * Policies transition from one state to the other during their lifetime - */ -enum ptlrpc_nrs_pol_state { - /** - * Not a valid policy state. - */ - NRS_POL_STATE_INVALID, - /** - * Policies are at this state either at the start of their life, or - * transition here when the user selects a different policy to act - * as the primary one. - */ - NRS_POL_STATE_STOPPED, - /** - * Policy is progress of stopping - */ - NRS_POL_STATE_STOPPING, - /** - * Policy is in progress of starting - */ - NRS_POL_STATE_STARTING, - /** - * A policy is in this state in two cases: - * - it is the fallback policy, which is always in this state. - * - it has been activated by the user; i.e. it is the primary policy, - */ - NRS_POL_STATE_STARTED, -}; - -/** - * NRS policy information - * - * Used for obtaining information for the status of a policy via lprocfs - */ -struct ptlrpc_nrs_pol_info { - /** - * Policy name - */ - char pi_name[NRS_POL_NAME_MAX]; - /** - * Policy argument - */ - char pi_arg[NRS_POL_ARG_MAX]; - /** - * Current policy state - */ - enum ptlrpc_nrs_pol_state pi_state; - /** - * # RPCs enqueued for later dispatching by the policy - */ - long pi_req_queued; - /** - * # RPCs started for dispatch by the policy - */ - long pi_req_started; - /** - * Is this a fallback policy? - */ - unsigned pi_fallback:1; -}; - -/** - * NRS policy - * - * There is one instance of this for each policy in each NRS head of each - * PTLRPC service partition. - */ -struct ptlrpc_nrs_policy { - /** - * Linkage into the NRS head's list of policies, - * ptlrpc_nrs:nrs_policy_list - */ - struct list_head pol_list; - /** - * Linkage into the NRS head's list of policies with enqueued - * requests ptlrpc_nrs:nrs_policy_queued - */ - struct list_head pol_list_queued; - /** - * Current state of this policy - */ - enum ptlrpc_nrs_pol_state pol_state; - /** - * Bitmask of nrs_policy_flags - */ - unsigned int pol_flags; - /** - * # RPCs enqueued for later dispatching by the policy - */ - long pol_req_queued; - /** - * # RPCs started for dispatch by the policy - */ - long pol_req_started; - /** - * Usage Reference count taken on the policy instance - */ - long pol_ref; - /** - * Human-readable policy argument - */ - char pol_arg[NRS_POL_ARG_MAX]; - /** - * The NRS head this policy has been created at - */ - struct ptlrpc_nrs *pol_nrs; - /** - * Private policy data; varies by policy type - */ - void *pol_private; - /** - * Policy descriptor for this policy instance. - */ - struct ptlrpc_nrs_pol_desc *pol_desc; -}; - -/** - * NRS resource - * - * Resources are embedded into two types of NRS entities: - * - Inside NRS policies, in the policy's private data in - * ptlrpc_nrs_policy::pol_private - * - In objects that act as prime-level scheduling entities in different NRS - * policies; e.g. on a policy that performs round robin or similar order - * scheduling across client NIDs, there would be one NRS resource per unique - * client NID. On a policy which performs round robin scheduling across - * backend filesystem objects, there would be one resource associated with - * each of the backend filesystem objects partaking in the scheduling - * performed by the policy. - * - * NRS resources share a parent-child relationship, in which resources embedded - * in policy instances are the parent entities, with all scheduling entities - * a policy schedules across being the children, thus forming a simple resource - * hierarchy. This hierarchy may be extended with one or more levels in the - * future if the ability to have more than one primary policy is added. - * - * Upon request initialization, references to the then active NRS policies are - * taken and used to later handle the dispatching of the request with one of - * these policies. - * - * \see nrs_resource_get_safe() - * \see ptlrpc_nrs_req_add() - */ -struct ptlrpc_nrs_resource { - /** - * This NRS resource's parent; is NULL for resources embedded in NRS - * policy instances; i.e. those are top-level ones. - */ - struct ptlrpc_nrs_resource *res_parent; - /** - * The policy associated with this resource. - */ - struct ptlrpc_nrs_policy *res_policy; -}; - -enum { - NRS_RES_FALLBACK, - NRS_RES_PRIMARY, - NRS_RES_MAX -}; - -#include - -/** - * NRS request - * - * Instances of this object exist embedded within ptlrpc_request; the main - * purpose of this object is to hold references to the request's resources - * for the lifetime of the request, and to hold properties that policies use - * use for determining the request's scheduling priority. - **/ -struct ptlrpc_nrs_request { - /** - * The request's resource hierarchy. - */ - struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX]; - /** - * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the - * policy that was used to enqueue the request. - * - * \see nrs_request_enqueue() - */ - unsigned int nr_res_idx; - unsigned int nr_initialized:1; - unsigned int nr_enqueued:1; - unsigned int nr_started:1; - unsigned int nr_finalized:1; - - /** - * Policy-specific fields, used for determining a request's scheduling - * priority, and other supporting functionality. - */ - union { - /** - * Fields for the FIFO policy - */ - struct nrs_fifo_req fifo; - } nr_u; - /** - * Externally-registering policies may want to use this to allocate - * their own request properties. - */ - void *ext; -}; - -/** @} nrs */ -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h deleted file mode 100644 index b70d97d4acbb..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2014, Intel Corporation. - * - * Copyright 2012 Xyratex Technology Limited - */ -/* - * - * Network Request Scheduler (NRS) First-in First-out (FIFO) policy - * - */ - -#ifndef _LUSTRE_NRS_FIFO_H -#define _LUSTRE_NRS_FIFO_H - -/* \name fifo - * - * FIFO policy - * - * This policy is a logical wrapper around previous, non-NRS functionality. - * It dispatches RPCs in the same order as they arrive from the network. This - * policy is currently used as the fallback policy, and the only enabled policy - * on all NRS heads of all PTLRPC service partitions. - * @{ - */ - -/** - * Private data structure for the FIFO policy - */ -struct nrs_fifo_head { - /** - * Resource object for policy instance. - */ - struct ptlrpc_nrs_resource fh_res; - /** - * List of queued requests. - */ - struct list_head fh_list; - /** - * For debugging purposes. - */ - __u64 fh_sequence; -}; - -struct nrs_fifo_req { - struct list_head fr_list; - __u64 fr_sequence; -}; - -/** @} fifo */ -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_obdo.h b/drivers/staging/lustre/lustre/include/lustre_obdo.h deleted file mode 100644 index d67dcbb84f18..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_obdo.h +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - * - * Copyright 2015 Cray Inc, all rights reserved. - * Author: Ben Evans. - * - * Define obdo associated functions - * obdo: OBject Device o... - */ - -#ifndef _LUSTRE_OBDO_H_ -#define _LUSTRE_OBDO_H_ - -#include - -/** - * Create an obdo to send over the wire - */ -void lustre_set_wire_obdo(const struct obd_connect_data *ocd, - struct obdo *wobdo, - const struct obdo *lobdo); - -/** - * Create a local obdo from a wire based odbo - */ -void lustre_get_wire_obdo(const struct obd_connect_data *ocd, - struct obdo *lobdo, - const struct obdo *wobdo); - -#endif diff --git a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h deleted file mode 100644 index 298476ea7557..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef LUSTRE_PATCHLESS_COMPAT_H -#define LUSTRE_PATCHLESS_COMPAT_H - -#include - -#include -#include -#include -#include - -#define ll_delete_from_page_cache(page) delete_from_page_cache(page) - -static inline void -truncate_complete_page(struct address_space *mapping, struct page *page) -{ - if (page->mapping != mapping) - return; - - if (PagePrivate(page)) - page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); - - cancel_dirty_page(page); - ClearPageMappedToDisk(page); - ll_delete_from_page_cache(page); -} - -#ifndef ATTR_CTIME_SET -/* - * set ATTR_CTIME_SET to a high value to avoid any risk of collision with other - * ATTR_* attributes (see bug 13828) - */ -#define ATTR_CTIME_SET (1 << 28) -#endif - -#endif /* LUSTRE_PATCHLESS_COMPAT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h deleted file mode 100644 index 213d0a01adcf..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h +++ /dev/null @@ -1,307 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/include/lustre_req_layout.h - * - * Lustre Metadata Target (mdt) request handler - * - * Author: Nikita Danilov - */ - -#ifndef _LUSTRE_REQ_LAYOUT_H__ -#define _LUSTRE_REQ_LAYOUT_H__ - -#include - -/** \defgroup req_layout req_layout - * - * @{ - */ - -struct req_msg_field; -struct req_format; -struct req_capsule; - -struct ptlrpc_request; - -enum req_location { - RCL_CLIENT, - RCL_SERVER, - RCL_NR -}; - -/* Maximal number of fields (buffers) in a request message. */ -#define REQ_MAX_FIELD_NR 9 - -struct req_capsule { - struct ptlrpc_request *rc_req; - const struct req_format *rc_fmt; - enum req_location rc_loc; - __u32 rc_area[RCL_NR][REQ_MAX_FIELD_NR]; -}; - -void req_capsule_init(struct req_capsule *pill, struct ptlrpc_request *req, - enum req_location location); -void req_capsule_fini(struct req_capsule *pill); - -void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt); -size_t req_capsule_filled_sizes(struct req_capsule *pill, - enum req_location loc); -int req_capsule_server_pack(struct req_capsule *pill); - -void *req_capsule_client_get(struct req_capsule *pill, - const struct req_msg_field *field); -void *req_capsule_client_swab_get(struct req_capsule *pill, - const struct req_msg_field *field, - void *swabber); -void *req_capsule_client_sized_get(struct req_capsule *pill, - const struct req_msg_field *field, - u32 len); -void *req_capsule_server_get(struct req_capsule *pill, - const struct req_msg_field *field); -void *req_capsule_server_sized_get(struct req_capsule *pill, - const struct req_msg_field *field, - u32 len); -void *req_capsule_server_swab_get(struct req_capsule *pill, - const struct req_msg_field *field, - void *swabber); -void *req_capsule_server_sized_swab_get(struct req_capsule *pill, - const struct req_msg_field *field, - u32 len, void *swabber); - -void req_capsule_set_size(struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc, u32 size); -u32 req_capsule_get_size(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc); -u32 req_capsule_msg_size(struct req_capsule *pill, enum req_location loc); -u32 req_capsule_fmt_size(__u32 magic, const struct req_format *fmt, - enum req_location loc); -void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt); - -int req_capsule_has_field(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc); -void req_capsule_shrink(struct req_capsule *pill, - const struct req_msg_field *field, - u32 newlen, enum req_location loc); -int req_layout_init(void); -void req_layout_fini(void); - -extern struct req_format RQF_OBD_PING; -extern struct req_format RQF_OBD_SET_INFO; -extern struct req_format RQF_SEC_CTX; -/* MGS req_format */ -extern struct req_format RQF_MGS_TARGET_REG; -extern struct req_format RQF_MGS_SET_INFO; -extern struct req_format RQF_MGS_CONFIG_READ; -/* fid/fld req_format */ -extern struct req_format RQF_SEQ_QUERY; -extern struct req_format RQF_FLD_QUERY; -extern struct req_format RQF_FLD_READ; -/* MDS req_format */ -extern struct req_format RQF_MDS_CONNECT; -extern struct req_format RQF_MDS_DISCONNECT; -extern struct req_format RQF_MDS_STATFS; -extern struct req_format RQF_MDS_GETSTATUS; -extern struct req_format RQF_MDS_SYNC; -extern struct req_format RQF_MDS_GETXATTR; -extern struct req_format RQF_MDS_GETATTR; - -/* - * This is format of direct (non-intent) MDS_GETATTR_NAME request. - */ -extern struct req_format RQF_MDS_GETATTR_NAME; -extern struct req_format RQF_MDS_CLOSE; -extern struct req_format RQF_MDS_INTENT_CLOSE; -extern struct req_format RQF_MDS_CONNECT; -extern struct req_format RQF_MDS_DISCONNECT; -extern struct req_format RQF_MDS_GET_INFO; -extern struct req_format RQF_MDS_READPAGE; -extern struct req_format RQF_MDS_WRITEPAGE; -extern struct req_format RQF_MDS_REINT; -extern struct req_format RQF_MDS_REINT_CREATE; -extern struct req_format RQF_MDS_REINT_CREATE_ACL; -extern struct req_format RQF_MDS_REINT_CREATE_SLAVE; -extern struct req_format RQF_MDS_REINT_CREATE_SYM; -extern struct req_format RQF_MDS_REINT_OPEN; -extern struct req_format RQF_MDS_REINT_UNLINK; -extern struct req_format RQF_MDS_REINT_LINK; -extern struct req_format RQF_MDS_REINT_RENAME; -extern struct req_format RQF_MDS_REINT_SETATTR; -extern struct req_format RQF_MDS_REINT_SETXATTR; -extern struct req_format RQF_MDS_QUOTACTL; -extern struct req_format RQF_MDS_SWAP_LAYOUTS; -extern struct req_format RQF_MDS_REINT_MIGRATE; -/* MDS hsm formats */ -extern struct req_format RQF_MDS_HSM_STATE_GET; -extern struct req_format RQF_MDS_HSM_STATE_SET; -extern struct req_format RQF_MDS_HSM_ACTION; -extern struct req_format RQF_MDS_HSM_PROGRESS; -extern struct req_format RQF_MDS_HSM_CT_REGISTER; -extern struct req_format RQF_MDS_HSM_CT_UNREGISTER; -extern struct req_format RQF_MDS_HSM_REQUEST; -/* OST req_format */ -extern struct req_format RQF_OST_CONNECT; -extern struct req_format RQF_OST_DISCONNECT; -extern struct req_format RQF_OST_QUOTACTL; -extern struct req_format RQF_OST_GETATTR; -extern struct req_format RQF_OST_SETATTR; -extern struct req_format RQF_OST_CREATE; -extern struct req_format RQF_OST_PUNCH; -extern struct req_format RQF_OST_SYNC; -extern struct req_format RQF_OST_DESTROY; -extern struct req_format RQF_OST_BRW_READ; -extern struct req_format RQF_OST_BRW_WRITE; -extern struct req_format RQF_OST_STATFS; -extern struct req_format RQF_OST_SET_GRANT_INFO; -extern struct req_format RQF_OST_GET_INFO; -extern struct req_format RQF_OST_GET_INFO_LAST_ID; -extern struct req_format RQF_OST_GET_INFO_LAST_FID; -extern struct req_format RQF_OST_SET_INFO_LAST_FID; -extern struct req_format RQF_OST_GET_INFO_FIEMAP; - -/* LDLM req_format */ -extern struct req_format RQF_LDLM_ENQUEUE; -extern struct req_format RQF_LDLM_ENQUEUE_LVB; -extern struct req_format RQF_LDLM_CONVERT; -extern struct req_format RQF_LDLM_INTENT; -extern struct req_format RQF_LDLM_INTENT_BASIC; -extern struct req_format RQF_LDLM_INTENT_LAYOUT; -extern struct req_format RQF_LDLM_INTENT_GETATTR; -extern struct req_format RQF_LDLM_INTENT_OPEN; -extern struct req_format RQF_LDLM_INTENT_CREATE; -extern struct req_format RQF_LDLM_INTENT_UNLINK; -extern struct req_format RQF_LDLM_INTENT_GETXATTR; -extern struct req_format RQF_LDLM_CANCEL; -extern struct req_format RQF_LDLM_CALLBACK; -extern struct req_format RQF_LDLM_CP_CALLBACK; -extern struct req_format RQF_LDLM_BL_CALLBACK; -extern struct req_format RQF_LDLM_GL_CALLBACK; -extern struct req_format RQF_LDLM_GL_DESC_CALLBACK; -/* LOG req_format */ -extern struct req_format RQF_LOG_CANCEL; -extern struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE; -extern struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY; -extern struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK; -extern struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK; -extern struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER; -extern struct req_format RQF_LLOG_ORIGIN_CONNECT; - -extern struct req_format RQF_CONNECT; - -extern struct req_msg_field RMF_GENERIC_DATA; -extern struct req_msg_field RMF_PTLRPC_BODY; -extern struct req_msg_field RMF_MDT_BODY; -extern struct req_msg_field RMF_MDT_EPOCH; -extern struct req_msg_field RMF_OBD_STATFS; -extern struct req_msg_field RMF_NAME; -extern struct req_msg_field RMF_SYMTGT; -extern struct req_msg_field RMF_TGTUUID; -extern struct req_msg_field RMF_CLUUID; -extern struct req_msg_field RMF_SETINFO_VAL; -extern struct req_msg_field RMF_SETINFO_KEY; -extern struct req_msg_field RMF_GETINFO_VAL; -extern struct req_msg_field RMF_GETINFO_VALLEN; -extern struct req_msg_field RMF_GETINFO_KEY; -extern struct req_msg_field RMF_CLOSE_DATA; - -/* - * connection handle received in MDS_CONNECT request. - */ -extern struct req_msg_field RMF_CONN; -extern struct req_msg_field RMF_CONNECT_DATA; -extern struct req_msg_field RMF_DLM_REQ; -extern struct req_msg_field RMF_DLM_REP; -extern struct req_msg_field RMF_DLM_LVB; -extern struct req_msg_field RMF_DLM_GL_DESC; -extern struct req_msg_field RMF_LDLM_INTENT; -extern struct req_msg_field RMF_LAYOUT_INTENT; -extern struct req_msg_field RMF_MDT_MD; -extern struct req_msg_field RMF_REC_REINT; -extern struct req_msg_field RMF_EADATA; -extern struct req_msg_field RMF_EAVALS; -extern struct req_msg_field RMF_EAVALS_LENS; -extern struct req_msg_field RMF_ACL; -extern struct req_msg_field RMF_LOGCOOKIES; -extern struct req_msg_field RMF_CAPA1; -extern struct req_msg_field RMF_CAPA2; -extern struct req_msg_field RMF_OBD_QUOTACHECK; -extern struct req_msg_field RMF_OBD_QUOTACTL; -extern struct req_msg_field RMF_STRING; -extern struct req_msg_field RMF_SWAP_LAYOUTS; -extern struct req_msg_field RMF_MDS_HSM_PROGRESS; -extern struct req_msg_field RMF_MDS_HSM_REQUEST; -extern struct req_msg_field RMF_MDS_HSM_USER_ITEM; -extern struct req_msg_field RMF_MDS_HSM_ARCHIVE; -extern struct req_msg_field RMF_HSM_USER_STATE; -extern struct req_msg_field RMF_HSM_STATE_SET; -extern struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION; -extern struct req_msg_field RMF_MDS_HSM_REQUEST; - -/* seq-mgr fields */ -extern struct req_msg_field RMF_SEQ_OPC; -extern struct req_msg_field RMF_SEQ_RANGE; -extern struct req_msg_field RMF_FID_SPACE; - -/* FLD fields */ -extern struct req_msg_field RMF_FLD_OPC; -extern struct req_msg_field RMF_FLD_MDFLD; - -extern struct req_msg_field RMF_LLOGD_BODY; -extern struct req_msg_field RMF_LLOG_LOG_HDR; -extern struct req_msg_field RMF_LLOGD_CONN_BODY; - -extern struct req_msg_field RMF_MGS_TARGET_INFO; -extern struct req_msg_field RMF_MGS_SEND_PARAM; - -extern struct req_msg_field RMF_OST_BODY; -extern struct req_msg_field RMF_OBD_IOOBJ; -extern struct req_msg_field RMF_OBD_ID; -extern struct req_msg_field RMF_FID; -extern struct req_msg_field RMF_NIOBUF_REMOTE; -extern struct req_msg_field RMF_RCS; -extern struct req_msg_field RMF_FIEMAP_KEY; -extern struct req_msg_field RMF_FIEMAP_VAL; -extern struct req_msg_field RMF_OST_ID; - -/* MGS config read message format */ -extern struct req_msg_field RMF_MGS_CONFIG_BODY; -extern struct req_msg_field RMF_MGS_CONFIG_RES; - -/* generic uint32 */ -extern struct req_msg_field RMF_U32; - -/** @} req_layout */ - -#endif /* _LUSTRE_REQ_LAYOUT_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h deleted file mode 100644 index d35bcbc98831..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_sec.h +++ /dev/null @@ -1,1072 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _LUSTRE_SEC_H_ -#define _LUSTRE_SEC_H_ - -#include - -/** \defgroup sptlrpc sptlrpc - * - * @{ - */ - -/* - * to avoid include - */ -struct obd_import; -struct obd_export; -struct ptlrpc_request; -struct ptlrpc_reply_state; -struct ptlrpc_bulk_desc; -struct brw_page; -/* Linux specific */ -struct key; -struct seq_file; -struct lustre_cfg; - -/* - * forward declaration - */ -struct ptlrpc_sec_policy; -struct ptlrpc_sec_cops; -struct ptlrpc_sec_sops; -struct ptlrpc_sec; -struct ptlrpc_svc_ctx; -struct ptlrpc_cli_ctx; -struct ptlrpc_ctx_ops; - -/** - * \addtogroup flavor flavor - * - * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits - * are unused, must be set to 0 for future expansion. - *
- * ------------------------------------------------------------------------
- * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech)  | 4b (policy) |
- * ------------------------------------------------------------------------
- * 
- * - * @{ - */ - -/* - * flavor constants - */ -enum sptlrpc_policy { - SPTLRPC_POLICY_NULL = 0, - SPTLRPC_POLICY_PLAIN = 1, - SPTLRPC_POLICY_GSS = 2, - SPTLRPC_POLICY_MAX, -}; - -enum sptlrpc_mech_null { - SPTLRPC_MECH_NULL = 0, - SPTLRPC_MECH_NULL_MAX, -}; - -enum sptlrpc_mech_plain { - SPTLRPC_MECH_PLAIN = 0, - SPTLRPC_MECH_PLAIN_MAX, -}; - -enum sptlrpc_mech_gss { - SPTLRPC_MECH_GSS_NULL = 0, - SPTLRPC_MECH_GSS_KRB5 = 1, - SPTLRPC_MECH_GSS_MAX, -}; - -enum sptlrpc_service_type { - SPTLRPC_SVC_NULL = 0, /**< no security */ - SPTLRPC_SVC_AUTH = 1, /**< authentication only */ - SPTLRPC_SVC_INTG = 2, /**< integrity */ - SPTLRPC_SVC_PRIV = 3, /**< privacy */ - SPTLRPC_SVC_MAX, -}; - -enum sptlrpc_bulk_type { - SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */ - SPTLRPC_BULK_HASH = 1, /**< hash integrity */ - SPTLRPC_BULK_MAX, -}; - -enum sptlrpc_bulk_service { - SPTLRPC_BULK_SVC_NULL = 0, /**< no security */ - SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */ - SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */ - SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */ - SPTLRPC_BULK_SVC_MAX, -}; - -/* - * compose/extract macros - */ -#define FLVR_POLICY_OFFSET (0) -#define FLVR_MECH_OFFSET (4) -#define FLVR_SVC_OFFSET (8) -#define FLVR_BULK_TYPE_OFFSET (12) -#define FLVR_BULK_SVC_OFFSET (16) - -#define MAKE_FLVR(policy, mech, svc, btype, bsvc) \ - (((__u32)(policy) << FLVR_POLICY_OFFSET) | \ - ((__u32)(mech) << FLVR_MECH_OFFSET) | \ - ((__u32)(svc) << FLVR_SVC_OFFSET) | \ - ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \ - ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET)) - -/* - * extraction - */ -#define SPTLRPC_FLVR_POLICY(flavor) \ - ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF) -#define SPTLRPC_FLVR_MECH(flavor) \ - ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF) -#define SPTLRPC_FLVR_SVC(flavor) \ - ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF) -#define SPTLRPC_FLVR_BULK_TYPE(flavor) \ - ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF) -#define SPTLRPC_FLVR_BULK_SVC(flavor) \ - ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF) - -#define SPTLRPC_FLVR_BASE(flavor) \ - ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF) -#define SPTLRPC_FLVR_BASE_SUB(flavor) \ - ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF) - -/* - * gss subflavors - */ -#define MAKE_BASE_SUBFLVR(mech, svc) \ - ((__u32)(mech) | \ - ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET))) - -#define SPTLRPC_SUBFLVR_KRB5N \ - MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL) -#define SPTLRPC_SUBFLVR_KRB5A \ - MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH) -#define SPTLRPC_SUBFLVR_KRB5I \ - MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG) -#define SPTLRPC_SUBFLVR_KRB5P \ - MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV) - -/* - * "end user" flavors - */ -#define SPTLRPC_FLVR_NULL \ - MAKE_FLVR(SPTLRPC_POLICY_NULL, \ - SPTLRPC_MECH_NULL, \ - SPTLRPC_SVC_NULL, \ - SPTLRPC_BULK_DEFAULT, \ - SPTLRPC_BULK_SVC_NULL) -#define SPTLRPC_FLVR_PLAIN \ - MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \ - SPTLRPC_MECH_PLAIN, \ - SPTLRPC_SVC_NULL, \ - SPTLRPC_BULK_HASH, \ - SPTLRPC_BULK_SVC_INTG) -#define SPTLRPC_FLVR_KRB5N \ - MAKE_FLVR(SPTLRPC_POLICY_GSS, \ - SPTLRPC_MECH_GSS_KRB5, \ - SPTLRPC_SVC_NULL, \ - SPTLRPC_BULK_DEFAULT, \ - SPTLRPC_BULK_SVC_NULL) -#define SPTLRPC_FLVR_KRB5A \ - MAKE_FLVR(SPTLRPC_POLICY_GSS, \ - SPTLRPC_MECH_GSS_KRB5, \ - SPTLRPC_SVC_AUTH, \ - SPTLRPC_BULK_DEFAULT, \ - SPTLRPC_BULK_SVC_NULL) -#define SPTLRPC_FLVR_KRB5I \ - MAKE_FLVR(SPTLRPC_POLICY_GSS, \ - SPTLRPC_MECH_GSS_KRB5, \ - SPTLRPC_SVC_INTG, \ - SPTLRPC_BULK_DEFAULT, \ - SPTLRPC_BULK_SVC_INTG) -#define SPTLRPC_FLVR_KRB5P \ - MAKE_FLVR(SPTLRPC_POLICY_GSS, \ - SPTLRPC_MECH_GSS_KRB5, \ - SPTLRPC_SVC_PRIV, \ - SPTLRPC_BULK_DEFAULT, \ - SPTLRPC_BULK_SVC_PRIV) - -#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL - -#define SPTLRPC_FLVR_INVALID ((__u32)0xFFFFFFFF) -#define SPTLRPC_FLVR_ANY ((__u32)0xFFF00000) - -/** - * extract the useful part from wire flavor - */ -#define WIRE_FLVR(wflvr) (((__u32)(wflvr)) & 0x000FFFFF) - -/** @} flavor */ - -static inline void flvr_set_svc(__u32 *flvr, __u32 svc) -{ - LASSERT(svc < SPTLRPC_SVC_MAX); - *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr), - SPTLRPC_FLVR_MECH(*flvr), - svc, - SPTLRPC_FLVR_BULK_TYPE(*flvr), - SPTLRPC_FLVR_BULK_SVC(*flvr)); -} - -static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc) -{ - LASSERT(svc < SPTLRPC_BULK_SVC_MAX); - *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr), - SPTLRPC_FLVR_MECH(*flvr), - SPTLRPC_FLVR_SVC(*flvr), - SPTLRPC_FLVR_BULK_TYPE(*flvr), - svc); -} - -struct bulk_spec_hash { - __u8 hash_alg; -}; - -/** - * Full description of flavors being used on a ptlrpc connection, include - * both regular RPC and bulk transfer parts. - */ -struct sptlrpc_flavor { - /** - * wire flavor, should be renamed to sf_wire. - */ - __u32 sf_rpc; - /** - * general flags of PTLRPC_SEC_FL_* - */ - __u32 sf_flags; - /** - * rpc flavor specification - */ - union { - /* nothing for now */ - } u_rpc; - /** - * bulk flavor specification - */ - union { - struct bulk_spec_hash hash; - } u_bulk; -}; - -/** - * identify the RPC is generated from what part of Lustre. It's encoded into - * RPC requests and to be checked by ptlrpc service. - */ -enum lustre_sec_part { - LUSTRE_SP_CLI = 0, - LUSTRE_SP_MDT, - LUSTRE_SP_OST, - LUSTRE_SP_MGC, - LUSTRE_SP_MGS, - LUSTRE_SP_ANY = 0xFF -}; - -enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd); - -/** - * A rule specifies a flavor to be used by a ptlrpc connection between - * two Lustre parts. - */ -struct sptlrpc_rule { - __u32 sr_netid; /* LNET network ID */ - __u8 sr_from; /* sec_part */ - __u8 sr_to; /* sec_part */ - __u16 sr_padding; - struct sptlrpc_flavor sr_flvr; -}; - -/** - * A set of rules in memory. - * - * Rules are generated and stored on MGS, and propagated to MDT, OST, - * and client when needed. - */ -struct sptlrpc_rule_set { - int srs_nslot; - int srs_nrule; - struct sptlrpc_rule *srs_rules; -}; - -int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr); -bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr); - -static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set) -{ - memset(set, 0, sizeof(*set)); -} - -int sptlrpc_process_config(struct lustre_cfg *lcfg); -void sptlrpc_conf_log_start(const char *logname); -void sptlrpc_conf_log_stop(const char *logname); -void sptlrpc_conf_log_update_begin(const char *logname); -void sptlrpc_conf_log_update_end(const char *logname); -void sptlrpc_conf_client_adapt(struct obd_device *obd); - -/* The maximum length of security payload. 1024 is enough for Kerberos 5, - * and should be enough for other future mechanisms but not sure. - * Only used by pre-allocated request/reply pool. - */ -#define SPTLRPC_MAX_PAYLOAD (1024) - -struct vfs_cred { - u32 vc_uid; - u32 vc_gid; -}; - -struct ptlrpc_ctx_ops { - /** - * To determine whether it's suitable to use the \a ctx for \a vcred. - */ - int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); - - /** - * To bring the \a ctx uptodate. - */ - int (*refresh)(struct ptlrpc_cli_ctx *ctx); - - /** - * Validate the \a ctx. - */ - int (*validate)(struct ptlrpc_cli_ctx *ctx); - - /** - * Force the \a ctx to die. - */ - void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace); - int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); - - /** - * Sign the request message using \a ctx. - * - * \pre req->rq_reqmsg point to request message. - * \pre req->rq_reqlen is the request message length. - * \post req->rq_reqbuf point to request message with signature. - * \post req->rq_reqdata_len is set to the final request message size. - * - * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). - */ - int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); - - /** - * Verify the reply message using \a ctx. - * - * \pre req->rq_repdata point to reply message with signature. - * \pre req->rq_repdata_len is the total reply message length. - * \post req->rq_repmsg point to reply message without signature. - * \post req->rq_replen is the reply message length. - * - * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). - */ - int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); - - /** - * Encrypt the request message using \a ctx. - * - * \pre req->rq_reqmsg point to request message in clear text. - * \pre req->rq_reqlen is the request message length. - * \post req->rq_reqbuf point to request message. - * \post req->rq_reqdata_len is set to the final request message size. - * - * \see gss_cli_ctx_seal(). - */ - int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); - - /** - * Decrypt the reply message using \a ctx. - * - * \pre req->rq_repdata point to encrypted reply message. - * \pre req->rq_repdata_len is the total cipher text length. - * \post req->rq_repmsg point to reply message in clear text. - * \post req->rq_replen is the reply message length in clear text. - * - * \see gss_cli_ctx_unseal(). - */ - int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); - - /** - * Wrap bulk request data. This is called before wrapping RPC - * request message. - * - * \pre bulk buffer is descripted by desc->bd_iov and - * desc->bd_iov_count. note for read it's just buffer, no data - * need to be sent; for write it contains data in clear text. - * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared - * (usually inside of RPC request message). - * - encryption: cipher text bulk buffer is descripted by - * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov - * count remains the same). - * - otherwise: bulk buffer is still desc->bd_iov and - * desc->bd_iov_count. - * - * \return 0: success. - * \return -ev: error code. - * - * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). - */ - int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); - - /** - * Unwrap bulk reply data. This is called after wrapping RPC - * reply message. - * - * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and - * desc->bd_iov_count, according to wrap_bulk(). - * \post final bulk data in clear text is placed in buffer described - * by desc->bd_iov and desc->bd_iov_count. - * \return +ve nob of actual bulk data in clear text. - * \return -ve error code. - * - * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). - */ - int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); -}; - -#define PTLRPC_CTX_NEW_BIT (0) /* newly created */ -#define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */ -#define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */ -#define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */ -#define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */ -#define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */ - -#define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT) -#define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT) -#define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT) -#define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT) -#define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT) -#define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT) - -#define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \ - PTLRPC_CTX_UPTODATE | \ - PTLRPC_CTX_DEAD | \ - PTLRPC_CTX_ERROR) - -struct ptlrpc_cli_ctx { - struct hlist_node cc_cache; /* linked into ctx cache */ - atomic_t cc_refcount; - struct ptlrpc_sec *cc_sec; - struct ptlrpc_ctx_ops *cc_ops; - unsigned long cc_expire; /* in seconds */ - unsigned int cc_early_expire:1; - unsigned long cc_flags; - struct vfs_cred cc_vcred; - spinlock_t cc_lock; - struct list_head cc_req_list; /* waiting reqs linked here */ - struct list_head cc_gc_chain; /* linked to gc chain */ -}; - -/** - * client side policy operation vector. - */ -struct ptlrpc_sec_cops { - /** - * Given an \a imp, create and initialize a ptlrpc_sec structure. - * \param ctx service context: - * - regular import: \a ctx should be NULL; - * - reverse import: \a ctx is obtained from incoming request. - * \param flavor specify what flavor to use. - * - * When necessary, policy module is responsible for taking reference - * on the import. - * - * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). - */ - struct ptlrpc_sec *(*create_sec)(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx, - struct sptlrpc_flavor *flavor); - - /** - * Destructor of ptlrpc_sec. When called, refcount has been dropped - * to 0 and all contexts has been destroyed. - * - * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). - */ - void (*destroy_sec)(struct ptlrpc_sec *sec); - - /** - * Notify that this ptlrpc_sec is going to die. Optionally, policy - * module is supposed to set sec->ps_dying and whatever necessary - * actions. - * - * \see plain_kill_sec(), gss_sec_kill(). - */ - void (*kill_sec)(struct ptlrpc_sec *sec); - - /** - * Given \a vcred, lookup and/or create its context. The policy module - * is supposed to maintain its own context cache. - * XXX currently \a create and \a remove_dead is always 1, perhaps - * should be removed completely. - * - * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). - */ - struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, int remove_dead); - - /** - * Called then the reference of \a ctx dropped to 0. The policy module - * is supposed to destroy this context or whatever else according to - * its cache maintenance mechanism. - * - * \param sync if zero, we shouldn't wait for the context being - * destroyed completely. - * - * \see plain_release_ctx(), gss_sec_release_ctx_kr(). - */ - void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, - int sync); - - /** - * Flush the context cache. - * - * \param uid context of which user, -1 means all contexts. - * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected - * contexts should be cleared immediately. - * \param force if zero, only idle contexts will be flushed. - * - * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). - */ - int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, - int grace, int force); - - /** - * Called periodically by garbage collector to remove dead contexts - * from cache. - * - * \see gss_sec_gc_ctx_kr(). - */ - void (*gc_ctx)(struct ptlrpc_sec *sec); - - /** - * Given an context \a ctx, install a corresponding reverse service - * context on client side. - * XXX currently it's only used by GSS module, maybe we should remove - * this from general API. - */ - int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx); - - /** - * To allocate request buffer for \a req. - * - * \pre req->rq_reqmsg == NULL. - * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated, - * we are not supposed to free it. - * \post if success, req->rq_reqmsg point to a buffer with size - * at least \a lustre_msg_size. - * - * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). - */ - int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, - int lustre_msg_size); - - /** - * To free request buffer for \a req. - * - * \pre req->rq_reqbuf != NULL. - * - * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). - */ - void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); - - /** - * To allocate reply buffer for \a req. - * - * \pre req->rq_repbuf == NULL. - * \post if success, req->rq_repbuf point to a buffer with size - * req->rq_repbuf_len, the size should be large enough to receive - * reply which be transformed from \a lustre_msg_size of clear text. - * - * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). - */ - int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, - int lustre_msg_size); - - /** - * To free reply buffer for \a req. - * - * \pre req->rq_repbuf != NULL. - * \post req->rq_repbuf == NULL. - * \post req->rq_repbuf_len == 0. - * - * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). - */ - void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); - - /** - * To expand the request buffer of \a req, thus the \a segment in - * the request message pointed by req->rq_reqmsg can accommodate - * at least \a newsize of data. - * - * \pre req->rq_reqmsg->lm_buflens[segment] < newsize. - * - * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), - * gss_enlarge_reqbuf(). - */ - int (*enlarge_reqbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize); - /* - * misc - */ - int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq); -}; - -/** - * server side policy operation vector. - */ -struct ptlrpc_sec_sops { - /** - * verify an incoming request. - * - * \pre request message is pointed by req->rq_reqbuf, size is - * req->rq_reqdata_len; and the message has been unpacked to - * host byte order. - * - * \retval SECSVC_OK success, req->rq_reqmsg point to request message - * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set; - * req->rq_sp_from is decoded from request. - * \retval SECSVC_COMPLETE success, the request has been fully - * processed, and reply message has been prepared; req->rq_sp_from is - * decoded from request. - * \retval SECSVC_DROP failed, this request should be dropped. - * - * \see null_accept(), plain_accept(), gss_svc_accept_kr(). - */ - int (*accept)(struct ptlrpc_request *req); - - /** - * Perform security transformation upon reply message. - * - * \pre reply message is pointed by req->rq_reply_state->rs_msg, size - * is req->rq_replen. - * \post req->rs_repdata_len is the final message size. - * \post req->rq_reply_off is set. - * - * \see null_authorize(), plain_authorize(), gss_svc_authorize(). - */ - int (*authorize)(struct ptlrpc_request *req); - - /** - * Invalidate server context \a ctx. - * - * \see gss_svc_invalidate_ctx(). - */ - void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx); - - /** - * Allocate a ptlrpc_reply_state. - * - * \param msgsize size of the reply message in clear text. - * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we - * should simply use it; otherwise we'll responsible for allocating - * a new one. - * \post req->rq_reply_state != NULL; - * \post req->rq_reply_state->rs_msg != NULL; - * - * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). - */ - int (*alloc_rs)(struct ptlrpc_request *req, int msgsize); - - /** - * Free a ptlrpc_reply_state. - */ - void (*free_rs)(struct ptlrpc_reply_state *rs); - - /** - * Release the server context \a ctx. - * - * \see gss_svc_free_ctx(). - */ - void (*free_ctx)(struct ptlrpc_svc_ctx *ctx); - - /** - * Install a reverse context based on the server context \a ctx. - * - * \see gss_svc_install_rctx_kr(). - */ - int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); - - /** - * Prepare buffer for incoming bulk write. - * - * \pre desc->bd_iov and desc->bd_iov_count describes the buffer - * intended to receive the write. - * - * \see gss_svc_prep_bulk(). - */ - int (*prep_bulk)(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); - - /** - * Unwrap the bulk write data. - * - * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). - */ - int (*unwrap_bulk)(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); - - /** - * Wrap the bulk read data. - * - * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). - */ - int (*wrap_bulk)(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); -}; - -struct ptlrpc_sec_policy { - struct module *sp_owner; - char *sp_name; - __u16 sp_policy; /* policy number */ - struct ptlrpc_sec_cops *sp_cops; /* client ops */ - struct ptlrpc_sec_sops *sp_sops; /* server ops */ -}; - -#define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */ -#define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */ -#define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */ -#define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */ -#define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */ - -/** - * The ptlrpc_sec represents the client side ptlrpc security facilities, - * each obd_import (both regular and reverse import) must associate with - * a ptlrpc_sec. - * - * \see sptlrpc_import_sec_adapt(). - */ -struct ptlrpc_sec { - struct ptlrpc_sec_policy *ps_policy; - atomic_t ps_refcount; - /** statistic only */ - atomic_t ps_nctx; - /** unique identifier */ - int ps_id; - struct sptlrpc_flavor ps_flvr; - enum lustre_sec_part ps_part; - /** after set, no more new context will be created */ - unsigned int ps_dying:1; - /** owning import */ - struct obd_import *ps_import; - spinlock_t ps_lock; - - /* - * garbage collection - */ - struct list_head ps_gc_list; - unsigned long ps_gc_interval; /* in seconds */ - time64_t ps_gc_next; /* in seconds */ -}; - -static inline int sec_is_reverse(struct ptlrpc_sec *sec) -{ - return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE); -} - -static inline int sec_is_rootonly(struct ptlrpc_sec *sec) -{ - return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY); -} - -struct ptlrpc_svc_ctx { - atomic_t sc_refcount; - struct ptlrpc_sec_policy *sc_policy; -}; - -/* - * user identity descriptor - */ -#define LUSTRE_MAX_GROUPS (128) - -struct ptlrpc_user_desc { - __u32 pud_uid; - __u32 pud_gid; - __u32 pud_fsuid; - __u32 pud_fsgid; - __u32 pud_cap; - __u32 pud_ngroups; - __u32 pud_groups[0]; -}; - -/* - * bulk flavors - */ -enum sptlrpc_bulk_hash_alg { - BULK_HASH_ALG_NULL = 0, - BULK_HASH_ALG_ADLER32, - BULK_HASH_ALG_CRC32, - BULK_HASH_ALG_MD5, - BULK_HASH_ALG_SHA1, - BULK_HASH_ALG_SHA256, - BULK_HASH_ALG_SHA384, - BULK_HASH_ALG_SHA512, - BULK_HASH_ALG_MAX -}; - -const char *sptlrpc_get_hash_name(__u8 hash_alg); -__u8 sptlrpc_get_hash_alg(const char *algname); - -enum { - BSD_FL_ERR = 1, -}; - -struct ptlrpc_bulk_sec_desc { - __u8 bsd_version; /* 0 */ - __u8 bsd_type; /* SPTLRPC_BULK_XXX */ - __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */ - __u8 bsd_flags; /* flags */ - __u32 bsd_nob; /* nob of bulk data */ - __u8 bsd_data[0]; /* policy-specific token */ -}; - -/* - * round size up to next power of 2, for slab allocation. - * @size must be sane (can't overflow after round up) - */ -static inline int size_roundup_power2(int size) -{ - size--; - size |= size >> 1; - size |= size >> 2; - size |= size >> 4; - size |= size >> 8; - size |= size >> 16; - size++; - return size; -} - -/* - * internal support libraries - */ -void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, - int segment, int newsize); - -/* - * security policies - */ -int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy); -int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy); - -__u32 sptlrpc_name2flavor_base(const char *name); -const char *sptlrpc_flavor2name_base(__u32 flvr); -char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf, - char *buf, int bufsize); -char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize); - -static inline -struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy) -{ - __module_get(policy->sp_owner); - return policy; -} - -static inline -void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy) -{ - module_put(policy->sp_owner); -} - -/* - * client credential - */ -static inline -unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx) -{ - return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK); -} - -static inline -int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx) -{ - return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE); -} - -static inline -int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx) -{ - return (cli_ctx_status(ctx) != 0); -} - -static inline -int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx) -{ - return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0); -} - -static inline -int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx) -{ - return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0); -} - -static inline -int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx) -{ - return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0); -} - -static inline -int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx) -{ - return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0); -} - -/* - * sec get/put - */ -void sptlrpc_sec_put(struct ptlrpc_sec *sec); - -/* - * internal apis which only used by policy implementation - */ -int sptlrpc_get_next_secid(void); - -/* - * exported client context api - */ -struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx); -void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync); - -/* - * exported client context wrap/buffers - */ -int sptlrpc_cli_wrap_request(struct ptlrpc_request *req); -int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req); -int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize); -void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req); -int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize); -void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req); -int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req, - int segment, int newsize); -int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, - struct ptlrpc_request **req_ret); -void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req); - -void sptlrpc_request_out_callback(struct ptlrpc_request *req); - -/* - * exported higher interface of import & request - */ -int sptlrpc_import_sec_adapt(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx, - struct sptlrpc_flavor *flvr); -struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp); -void sptlrpc_import_sec_put(struct obd_import *imp); - -int sptlrpc_import_check_ctx(struct obd_import *imp); -void sptlrpc_import_flush_root_ctx(struct obd_import *imp); -void sptlrpc_import_flush_my_ctx(struct obd_import *imp); -void sptlrpc_import_flush_all_ctx(struct obd_import *imp); -int sptlrpc_req_get_ctx(struct ptlrpc_request *req); -void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync); -int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout); -void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode); - -/* gc */ -void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec); -void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec); - -/* misc */ -const char *sec2target_str(struct ptlrpc_sec *sec); -/* - * lprocfs - */ -int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev); - -/* - * server side - */ -enum secsvc_accept_res { - SECSVC_OK = 0, - SECSVC_COMPLETE, - SECSVC_DROP, -}; - -int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req); -int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen); -int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req); -void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs); -void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req); -void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req); - -int sptlrpc_target_export_check(struct obd_export *exp, - struct ptlrpc_request *req); - -/* bulk security api */ -void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc); -int get_free_pages_in_pool(void); -int pool_is_at_full_capacity(void); - -int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); -int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc, - int nob); -int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); - -/* bulk helpers (internal use only by policies) */ -int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, - void *buf, int buflen); - -int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed); - -/* user descriptor helpers */ -static inline int sptlrpc_user_desc_size(int ngroups) -{ - return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32); -} - -int sptlrpc_current_user_desc_size(void); -int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset); -int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed); - -enum { - LUSTRE_SEC_NONE = 0, - LUSTRE_SEC_REMOTE = 1, - LUSTRE_SEC_SPECIFY = 2, - LUSTRE_SEC_ALL = 3 -}; - -/** @} sptlrpc */ - -#endif /* _LUSTRE_SEC_H_ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h deleted file mode 100644 index 9d786bbe7f3f..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre_swab.h +++ /dev/null @@ -1,109 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - * - * Copyright 2015 Cray Inc, all rights reserved. - * Author: Ben Evans. - * - * We assume all nodes are either little-endian or big-endian, and we - * always send messages in the sender's native format. The receiver - * detects the message format by checking the 'magic' field of the message - * (see lustre_msg_swabbed() below). - * - * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines - * are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the - * type from "other" endian, in-place in the message buffer. - * - * A swabber takes a single pointer argument. The caller must already have - * verified that the length of the message buffer >= sizeof (type). - * - * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine - * may be defined that swabs just the variable part, after the caller has - * verified that the message buffer is large enough. - */ - -#ifndef _LUSTRE_SWAB_H_ -#define _LUSTRE_SWAB_H_ - -#include - -void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); -void lustre_swab_connect(struct obd_connect_data *ocd); -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_state_set(struct hsm_state_set *hss); -void lustre_swab_obd_statfs(struct obd_statfs *os); -void lustre_swab_obd_ioobj(struct obd_ioobj *ioo); -void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); -void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb); -void lustre_swab_ost_lvb(struct ost_lvb *lvb); -void lustre_swab_obd_quotactl(struct obd_quotactl *q); -void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); -void lustre_swab_generic_32s(__u32 *val); -void lustre_swab_mdt_body(struct mdt_body *b); -void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b); -void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); -void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); -void lustre_swab_lmv_desc(struct lmv_desc *ld); -void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm); -void lustre_swab_lov_desc(struct lov_desc *ld); -void lustre_swab_gl_desc(union ldlm_gl_desc *desc); -void lustre_swab_ldlm_intent(struct ldlm_intent *i); -void lustre_swab_ldlm_request(struct ldlm_request *rq); -void lustre_swab_ldlm_reply(struct ldlm_reply *r); -void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo); -void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo); -void lustre_swab_mgs_config_body(struct mgs_config_body *body); -void lustre_swab_mgs_config_res(struct mgs_config_res *body); -void lustre_swab_ost_body(struct ost_body *b); -void lustre_swab_ost_last_id(__u64 *id); -void lustre_swab_fiemap(struct fiemap *fiemap); -void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); -void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); -void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, - int stripe_count); -void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); -void lustre_swab_lustre_capa(struct lustre_capa *c); -void lustre_swab_lustre_capa_key(struct lustre_capa_key *k); -void lustre_swab_fid2path(struct getinfo_fid2path *gf); -void lustre_swab_layout_intent(struct layout_intent *li); -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_current_action(struct hsm_current_action *action); -void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk); -void lustre_swab_hsm_user_state(struct hsm_user_state *hus); -void lustre_swab_hsm_user_item(struct hsm_user_item *hui); -void lustre_swab_hsm_request(struct hsm_request *hr); -void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl); -void lustre_swab_close_data(struct close_data *data); -void lustre_swab_lmv_user_md(struct lmv_user_md *lum); - -/* Functions for dumping PTLRPC fields */ -void dump_rniobuf(struct niobuf_remote *rnb); -void dump_ioo(struct obd_ioobj *nb); -void dump_ost_body(struct ost_body *ob); -void dump_rcs(__u32 *rc); - -#endif diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h deleted file mode 100644 index b1907bbffb19..000000000000 --- a/drivers/staging/lustre/lustre/include/obd.h +++ /dev/null @@ -1,1114 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __OBD_H -#define __OBD_H - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define MAX_OBD_DEVICES 8192 - -struct osc_async_rc { - int ar_rc; - int ar_force_sync; - __u64 ar_min_xid; -}; - -struct lov_oinfo { /* per-stripe data structure */ - struct ost_id loi_oi; /* object ID/Sequence on the target OST */ - int loi_ost_idx; /* OST stripe index in lov_tgt_desc->tgts */ - int loi_ost_gen; /* generation of this loi_ost_idx */ - - unsigned long loi_kms_valid:1; - __u64 loi_kms; /* known minimum size */ - struct ost_lvb loi_lvb; - struct osc_async_rc loi_ar; -}; - -static inline void loi_kms_set(struct lov_oinfo *oinfo, __u64 kms) -{ - oinfo->loi_kms = kms; - oinfo->loi_kms_valid = 1; -} - -static inline void loi_init(struct lov_oinfo *loi) -{ -} - -struct lov_stripe_md; -struct obd_info; - -int lov_read_and_clear_async_rc(struct cl_object *clob); - -typedef int (*obd_enqueue_update_f)(void *cookie, int rc); - -/* obd info for a particular level (lov, osc). */ -struct obd_info { - /* OBD_STATFS_* flags */ - __u64 oi_flags; - /* lsm data specific for every OSC. */ - struct lov_stripe_md *oi_md; - /* statfs data specific for every OSC, if needed at all. */ - struct obd_statfs *oi_osfs; - /* An update callback which is called to update some data on upper - * level. E.g. it is used for update lsm->lsm_oinfo at every received - * request in osc level for enqueue requests. It is also possible to - * update some caller data from LOV layer if needed. - */ - obd_enqueue_update_f oi_cb_up; -}; - -struct obd_type { - struct list_head typ_chain; - struct obd_ops *typ_dt_ops; - struct md_ops *typ_md_ops; - struct dentry *typ_debugfs_entry; - char *typ_name; - int typ_refcnt; - struct lu_device_type *typ_lu; - spinlock_t obd_type_lock; - struct kobject *typ_kobj; -}; - -struct brw_page { - u64 off; - struct page *pg; - unsigned int count; - u32 flag; -}; - -struct timeout_item { - enum timeout_event ti_event; - unsigned long ti_timeout; - timeout_cb_t ti_cb; - void *ti_cb_data; - struct list_head ti_obd_list; - struct list_head ti_chain; -}; - -#define OBD_MAX_RIF_DEFAULT 8 -#define OBD_MAX_RIF_MAX 512 -#define OSC_MAX_RIF_MAX 256 -#define OSC_MAX_DIRTY_DEFAULT (OBD_MAX_RIF_DEFAULT * 4) -#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */ -#define OSC_DEFAULT_RESENDS 10 - -/* possible values for fo_sync_lock_cancel */ -enum { - NEVER_SYNC_ON_CANCEL = 0, - BLOCKING_SYNC_ON_CANCEL = 1, - ALWAYS_SYNC_ON_CANCEL = 2, - NUM_SYNC_ON_CANCEL_STATES -}; - -enum obd_cl_sem_lock_class { - OBD_CLI_SEM_NORMAL, - OBD_CLI_SEM_MGC, - OBD_CLI_SEM_MDCOSC, -}; - -/* - * Limit reply buffer size for striping data to one x86_64 page. This - * value is chosen to fit the striping data for common use cases while - * staying well below the limit at which the buffer must be backed by - * vmalloc(). Excessive use of vmalloc() may cause spinlock contention - * on the MDS. - */ -#define OBD_MAX_DEFAULT_EA_SIZE 4096 - -struct mdc_rpc_lock; -struct obd_import; -struct client_obd { - struct rw_semaphore cl_sem; - struct obd_uuid cl_target_uuid; - struct obd_import *cl_import; /* ptlrpc connection state */ - size_t cl_conn_count; - /* - * Cache maximum and default values for easize. This is - * strictly a performance optimization to minimize calls to - * obd_size_diskmd(). The default values are used to calculate the - * initial size of a request buffer. The ptlrpc layer will resize the - * buffer as needed to accommodate a larger reply from the - * server. The default values should be small enough to avoid wasted - * memory and excessive use of vmalloc(), yet large enough to avoid - * reallocating the buffer in the common use case. - */ - /* - * Default EA size for striping attributes. It is initialized at - * mount-time based on the default stripe width of the filesystem, - * then it tracks the largest observed EA size advertised by - * the MDT, up to a maximum value of OBD_MAX_DEFAULT_EA_SIZE. - */ - u32 cl_default_mds_easize; - /* Maximum possible EA size computed at mount-time based on - * the number of OSTs in the filesystem. May be increased at - * run-time if a larger observed size is advertised by the MDT. - */ - u32 cl_max_mds_easize; - - enum lustre_sec_part cl_sp_me; - enum lustre_sec_part cl_sp_to; - struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */ - - /* the grant values are protected by loi_list_lock below */ - unsigned long cl_dirty_pages; /* all _dirty_ in pages */ - unsigned long cl_dirty_max_pages; /* allowed w/o rpc */ - unsigned long cl_dirty_transit; /* dirty synchronous */ - unsigned long cl_avail_grant; /* bytes of credit for ost */ - unsigned long cl_lost_grant; /* lost credits (trunc) */ - - /* since we allocate grant by blocks, we don't know how many grant will - * be used to add a page into cache. As a solution, we reserve maximum - * grant before trying to dirty a page and unreserve the rest. - * See osc_{reserve|unreserve}_grant for details. - */ - long cl_reserved_grant; - struct list_head cl_cache_waiters; /* waiting for cache/grant */ - unsigned long cl_next_shrink_grant; /* jiffies */ - struct list_head cl_grant_shrink_list; /* Timeout event list */ - int cl_grant_shrink_interval; /* seconds */ - - /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_SIZE, OST block size) - */ - int cl_chunkbits; - unsigned int cl_extent_tax; /* extent overhead, by bytes */ - - /* keep track of objects that have lois that contain pages which - * have been queued for async brw. this lock also protects the - * lists of osc_client_pages that hang off of the loi - */ - /* - * ->cl_loi_list_lock protects consistency of - * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and - * ->ap_completion() call-backs are executed under this lock. As we - * cannot guarantee that these call-backs never block on all platforms - * (as a matter of fact they do block on Mac OS X), type of - * ->cl_loi_list_lock is platform dependent: it's a spin-lock on Linux - * and blocking mutex on Mac OS X. (Alternative is to make this lock - * blocking everywhere, but we don't want to slow down fast-path of - * our main platform.) - * - * NB by Jinshan: though field names are still _loi_, but actually - * osc_object{}s are in the list. - */ - spinlock_t cl_loi_list_lock; - struct list_head cl_loi_ready_list; - struct list_head cl_loi_hp_ready_list; - struct list_head cl_loi_write_list; - struct list_head cl_loi_read_list; - __u32 cl_r_in_flight; - __u32 cl_w_in_flight; - /* just a sum of the loi/lop pending numbers to be exported by sysfs */ - atomic_t cl_pending_w_pages; - atomic_t cl_pending_r_pages; - __u32 cl_max_pages_per_rpc; - __u32 cl_max_rpcs_in_flight; - struct obd_histogram cl_read_rpc_hist; - struct obd_histogram cl_write_rpc_hist; - struct obd_histogram cl_read_page_hist; - struct obd_histogram cl_write_page_hist; - struct obd_histogram cl_read_offset_hist; - struct obd_histogram cl_write_offset_hist; - - /* LRU for osc caching pages */ - struct cl_client_cache *cl_cache; - /** member of cl_cache->ccc_lru */ - struct list_head cl_lru_osc; - /** # of available LRU slots left in the per-OSC cache. - * Available LRU slots are shared by all OSCs of the same file system, - * therefore this is a pointer to cl_client_cache::ccc_lru_left. - */ - atomic_long_t *cl_lru_left; - /** # of busy LRU pages. A page is considered busy if it's in writeback - * queue, or in transfer. Busy pages can't be discarded so they are not - * in LRU cache. - */ - atomic_long_t cl_lru_busy; - /** # of LRU pages in the cache for this client_obd */ - atomic_long_t cl_lru_in_list; - /** # of threads are shrinking LRU cache. To avoid contention, it's not - * allowed to have multiple threads shrinking LRU cache. - */ - atomic_t cl_lru_shrinkers; - /** The time when this LRU cache was last used. */ - time64_t cl_lru_last_used; - /** stats: how many reclaims have happened for this client_obd. - * reclaim and shrink - shrink is async, voluntarily rebalancing; - * reclaim is sync, initiated by IO thread when the LRU slots are - * in shortage. - */ - u64 cl_lru_reclaim; - /** List of LRU pages for this client_obd */ - struct list_head cl_lru_list; - /** Lock for LRU page list */ - spinlock_t cl_lru_list_lock; - /** # of unstable pages in this client_obd. - * An unstable page is a page state that WRITE RPC has finished but - * the transaction has NOT yet committed. - */ - atomic_long_t cl_unstable_count; - /** Link to osc_shrinker_list */ - struct list_head cl_shrink_list; - - /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ - atomic_t cl_destroy_in_flight; - wait_queue_head_t cl_destroy_waitq; - - struct mdc_rpc_lock *cl_rpc_lock; - - /* modify rpcs in flight - * currently used for metadata only - */ - spinlock_t cl_mod_rpcs_lock; - u16 cl_max_mod_rpcs_in_flight; - u16 cl_mod_rpcs_in_flight; - u16 cl_close_rpcs_in_flight; - wait_queue_head_t cl_mod_rpcs_waitq; - unsigned long *cl_mod_tag_bitmap; - struct obd_histogram cl_mod_rpcs_hist; - - /* mgc datastruct */ - atomic_t cl_mgc_refcount; - struct obd_export *cl_mgc_mgsexp; - - /* checksumming for data sent over the network */ - unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */ - /* supported checksum types that are worked out at connect time */ - __u32 cl_supp_cksum_types; - /* checksum algorithm to be used */ - enum cksum_type cl_cksum_type; - - /* also protected by the poorly named _loi_list_lock lock above */ - struct osc_async_rc cl_ar; - - /* sequence manager */ - struct lu_client_seq *cl_seq; - - atomic_t cl_resends; /* resend count */ - - /* ptlrpc work for writeback in ptlrpcd context */ - void *cl_writeback_work; - void *cl_lru_work; - /* hash tables for osc_quota_info */ - struct rhashtable cl_quota_hash[MAXQUOTAS]; -}; - -#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid) - -struct obd_id_info { - __u32 idx; - u64 *data; -}; - -struct echo_client_obd { - struct obd_export *ec_exp; /* the local connection to osc/lov */ - spinlock_t ec_lock; - struct list_head ec_objects; - struct list_head ec_locks; - __u64 ec_unique; -}; - -/* Generic subset of OSTs */ -struct ost_pool { - __u32 *op_array; /* array of index of lov_obd->lov_tgts */ - unsigned int op_count; /* number of OSTs in the array */ - unsigned int op_size; /* allocated size of lp_array */ - struct rw_semaphore op_rw_sem; /* to protect ost_pool use */ -}; - -/* allow statfs data caching for 1 second */ -#define OBD_STATFS_CACHE_SECONDS 1 - -struct lov_tgt_desc { - struct list_head ltd_kill; - struct obd_uuid ltd_uuid; - struct obd_device *ltd_obd; - struct obd_export *ltd_exp; - __u32 ltd_gen; - __u32 ltd_index; /* index in lov_obd->tgts */ - unsigned long ltd_active:1,/* is this target up for requests */ - ltd_activate:1,/* should target be activated */ - ltd_reap:1; /* should this target be deleted */ -}; - -struct lov_obd { - struct lov_desc desc; - struct lov_tgt_desc **lov_tgts; /* sparse array */ - struct ost_pool lov_packed; /* all OSTs in a packed array */ - struct mutex lov_lock; - struct obd_connect_data lov_ocd; - atomic_t lov_refcount; - __u32 lov_death_row;/* tgts scheduled to be deleted */ - __u32 lov_tgt_size; /* size of tgts array */ - int lov_connects; - int lov_pool_count; - struct rhashtable lov_pools_hash_body; /* used for key access */ - struct list_head lov_pool_list; /* used for sequential access */ - struct dentry *lov_pool_debugfs_entry; - enum lustre_sec_part lov_sp_me; - - /* Cached LRU and unstable data from upper layer */ - struct cl_client_cache *lov_cache; - - struct rw_semaphore lov_notify_lock; - - struct kobject *lov_tgts_kobj; -}; - -struct lmv_tgt_desc { - struct obd_uuid ltd_uuid; - struct obd_export *ltd_exp; - u32 ltd_idx; - struct mutex ltd_fid_mutex; - unsigned long ltd_active:1; /* target up for requests */ -}; - -struct lmv_obd { - struct lu_client_fld lmv_fld; - spinlock_t lmv_lock; - struct lmv_desc desc; - struct obd_uuid cluuid; - - struct mutex lmv_init_mutex; - int connected; - int max_easize; - int max_def_easize; - - u32 tgts_size; /* size of tgts array */ - struct lmv_tgt_desc **tgts; - - struct obd_connect_data conn_data; - struct kobject *lmv_tgts_kobj; -}; - -struct niobuf_local { - __u64 lnb_file_offset; - __u32 lnb_page_offset; - __u32 lnb_len; - __u32 lnb_flags; - int lnb_rc; - struct page *lnb_page; - void *lnb_data; -}; - -#define LUSTRE_FLD_NAME "fld" -#define LUSTRE_SEQ_NAME "seq" - -#define LUSTRE_MDD_NAME "mdd" -#define LUSTRE_OSD_LDISKFS_NAME "osd-ldiskfs" -#define LUSTRE_OSD_ZFS_NAME "osd-zfs" -#define LUSTRE_VVP_NAME "vvp" -#define LUSTRE_LMV_NAME "lmv" -#define LUSTRE_SLP_NAME "slp" -#define LUSTRE_LOD_NAME "lod" -#define LUSTRE_OSP_NAME "osp" -#define LUSTRE_LWP_NAME "lwp" - -/* obd device type names */ - /* FIXME all the references to LUSTRE_MDS_NAME should be swapped with LUSTRE_MDT_NAME */ -#define LUSTRE_MDS_NAME "mds" -#define LUSTRE_MDT_NAME "mdt" -#define LUSTRE_MDC_NAME "mdc" -#define LUSTRE_OSS_NAME "ost" /* FIXME change name to oss */ -#define LUSTRE_OST_NAME "obdfilter" /* FIXME change name to ost */ -#define LUSTRE_OSC_NAME "osc" -#define LUSTRE_LOV_NAME "lov" -#define LUSTRE_MGS_NAME "mgs" -#define LUSTRE_MGC_NAME "mgc" - -#define LUSTRE_ECHO_NAME "obdecho" -#define LUSTRE_ECHO_CLIENT_NAME "echo_client" -#define LUSTRE_QMT_NAME "qmt" - -/* Constant obd names (post-rename) */ -#define LUSTRE_MDS_OBDNAME "MDS" -#define LUSTRE_OSS_OBDNAME "OSS" -#define LUSTRE_MGS_OBDNAME "MGS" -#define LUSTRE_MGC_OBDNAME "MGC" - -/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */ -#define N_LOCAL_TEMP_PAGE 0x10000000 - -/* - * Events signalled through obd_notify() upcall-chain. - */ -enum obd_notify_event { - /* Device connect start */ - OBD_NOTIFY_CONNECT, - /* Device activated */ - OBD_NOTIFY_ACTIVE, - /* Device deactivated */ - OBD_NOTIFY_INACTIVE, - /* Connect data for import were changed */ - OBD_NOTIFY_OCD, - /* Sync request */ - OBD_NOTIFY_SYNC_NONBLOCK, - OBD_NOTIFY_SYNC, - /* Configuration event */ - OBD_NOTIFY_CONFIG, - /* Administratively deactivate/activate event */ - OBD_NOTIFY_DEACTIVATE, - OBD_NOTIFY_ACTIVATE -}; - -/* - * Data structure used to pass obd_notify()-event to non-obd listeners (llite - * being main example). - */ -struct obd_notify_upcall { - int (*onu_upcall)(struct obd_device *host, struct obd_device *watched, - enum obd_notify_event ev, void *owner, void *data); - /* Opaque datum supplied by upper layer listener */ - void *onu_owner; -}; - -struct target_recovery_data { - svc_handler_t trd_recovery_handler; - pid_t trd_processing_task; - struct completion trd_starting; - struct completion trd_finishing; -}; - -struct obd_llog_group { - struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS]; - wait_queue_head_t olg_waitq; - spinlock_t olg_lock; - struct mutex olg_cat_processing; -}; - -/* corresponds to one of the obd's */ -#define OBD_DEVICE_MAGIC 0XAB5CD6EF - -struct lvfs_run_ctxt { - struct dt_device *dt; -}; - -struct obd_device { - struct obd_type *obd_type; - u32 obd_magic; /* OBD_DEVICE_MAGIC */ - int obd_minor; /* device number: lctl dl */ - struct lu_device *obd_lu_dev; - - /* common and UUID name of this device */ - struct obd_uuid obd_uuid; - char obd_name[MAX_OBD_NAME]; - - /* bitfield modification is protected by obd_dev_lock */ - unsigned long obd_attached:1, /* finished attach */ - obd_set_up:1, /* finished setup */ - obd_version_recov:1, /* obd uses version checking */ - obd_replayable:1,/* recovery is enabled; inform clients */ - obd_no_transno:1, /* no committed-transno notification */ - obd_no_recov:1, /* fail instead of retry messages */ - obd_stopping:1, /* started cleanup */ - obd_starting:1, /* started setup */ - obd_force:1, /* cleanup with > 0 obd refcount */ - obd_fail:1, /* cleanup with failover */ - obd_no_conn:1, /* deny new connections */ - obd_inactive:1, /* device active/inactive - * (for sysfs status only!!) - */ - obd_no_ir:1, /* no imperative recovery. */ - obd_process_conf:1; /* device is processing mgs config */ - /* use separate field as it is set in interrupt to don't mess with - * protection of other bits using _bh lock - */ - unsigned long obd_recovery_expired:1; - /* uuid-export hash body */ - struct rhashtable obd_uuid_hash; - wait_queue_head_t obd_refcount_waitq; - struct list_head obd_exports; - struct list_head obd_unlinked_exports; - struct list_head obd_delayed_exports; - atomic_t obd_refcount; - int obd_num_exports; - spinlock_t obd_nid_lock; - struct ldlm_namespace *obd_namespace; - struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */ - /* a spinlock is OK for what we do now, may need a semaphore later */ - spinlock_t obd_dev_lock; /* protect OBD bitfield above */ - spinlock_t obd_osfs_lock; - struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */ - __u64 obd_osfs_age; - u64 obd_last_committed; - struct mutex obd_dev_mutex; - struct lvfs_run_ctxt obd_lvfs_ctxt; - struct obd_llog_group obd_olg; /* default llog group */ - struct obd_device *obd_observer; - struct rw_semaphore obd_observer_link_sem; - struct obd_notify_upcall obd_upcall; - struct obd_export *obd_self_export; - - union { - struct client_obd cli; - struct echo_client_obd echo_client; - struct lov_obd lov; - struct lmv_obd lmv; - } u; - - /* Fields used by LProcFS */ - struct lprocfs_stats *obd_stats; - unsigned int obd_cntr_base; - - struct lprocfs_stats *md_stats; - unsigned int md_cntr_base; - - struct dentry *obd_debugfs_entry; - struct dentry *obd_svc_debugfs_entry; - struct lprocfs_stats *obd_svc_stats; - atomic_t obd_evict_inprogress; - wait_queue_head_t obd_evict_inprogress_waitq; - struct list_head obd_evict_list; /* protected with pet_lock */ - - /** - * Ldlm pool part. Save last calculated SLV and Limit. - */ - rwlock_t obd_pool_lock; - u64 obd_pool_slv; - int obd_pool_limit; - - int obd_conn_inprogress; - - /** - * A list of outstanding class_incref()'s against this obd. For - * debugging. - */ - struct lu_ref obd_reference; - - struct kobject obd_kobj; /* sysfs object */ - struct completion obd_kobj_unregister; -}; - -int obd_uuid_add(struct obd_device *obd, struct obd_export *export); -void obd_uuid_del(struct obd_device *obd, struct obd_export *export); - -/* get/set_info keys */ -#define KEY_ASYNC "async" -#define KEY_CHANGELOG_CLEAR "changelog_clear" -#define KEY_FID2PATH "fid2path" -#define KEY_CHECKSUM "checksum" -#define KEY_CLEAR_FS "clear_fs" -#define KEY_CONN_DATA "conn_data" -#define KEY_EVICT_BY_NID "evict_by_nid" -#define KEY_FIEMAP "fiemap" -#define KEY_FLUSH_CTX "flush_ctx" -#define KEY_GRANT_SHRINK "grant_shrink" -#define KEY_HSM_COPYTOOL_SEND "hsm_send" -#define KEY_INIT_RECOV_BACKUP "init_recov_bk" -#define KEY_INTERMDS "inter_mds" -#define KEY_LAST_ID "last_id" -#define KEY_LAST_FID "last_fid" -#define KEY_MAX_EASIZE "max_easize" -#define KEY_DEFAULT_EASIZE "default_easize" -#define KEY_MGSSEC "mgssec" -#define KEY_READ_ONLY "read-only" -#define KEY_REGISTER_TARGET "register_target" -#define KEY_SET_FS "set_fs" -#define KEY_TGT_COUNT "tgt_count" -/* KEY_SET_INFO in lustre_idl.h */ -#define KEY_SPTLRPC_CONF "sptlrpc_conf" - -#define KEY_CACHE_SET "cache_set" -#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink" - -struct lu_context; - -static inline int it_to_lock_mode(struct lookup_intent *it) -{ - /* CREAT needs to be tested before open (both could be set) */ - if (it->it_op & IT_CREAT) - return LCK_CW; - else if (it->it_op & (IT_GETATTR | IT_OPEN | IT_LOOKUP | - IT_LAYOUT)) - return LCK_CR; - else if (it->it_op & IT_READDIR) - return LCK_PR; - else if (it->it_op & IT_GETXATTR) - return LCK_PR; - else if (it->it_op & IT_SETXATTR) - return LCK_PW; - - LASSERTF(0, "Invalid it_op: %d\n", it->it_op); - return -EINVAL; -} - -enum md_op_flags { - MF_MDC_CANCEL_FID1 = BIT(0), - MF_MDC_CANCEL_FID2 = BIT(1), - MF_MDC_CANCEL_FID3 = BIT(2), - MF_MDC_CANCEL_FID4 = BIT(3), - MF_GET_MDT_IDX = BIT(4), -}; - -enum md_cli_flags { - CLI_SET_MEA = BIT(0), - CLI_RM_ENTRY = BIT(1), - CLI_HASH64 = BIT(2), - CLI_API32 = BIT(3), - CLI_MIGRATE = BIT(4), -}; - -/** - * GETXATTR is not included as only a couple of fields in the reply body - * is filled, but not FID which is needed for common intent handling in - * mdc_finish_intent_lock() - */ -static inline bool it_has_reply_body(const struct lookup_intent *it) -{ - return it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR); -} - -struct md_op_data { - struct lu_fid op_fid1; /* operation fid1 (usually parent) */ - struct lu_fid op_fid2; /* operation fid2 (usually child) */ - struct lu_fid op_fid3; /* 2 extra fids to find conflicting */ - struct lu_fid op_fid4; /* to the operation locks. */ - u32 op_mds; /* what mds server open will go to */ - struct lustre_handle op_handle; - s64 op_mod_time; - const char *op_name; - size_t op_namelen; - __u32 op_mode; - struct lmv_stripe_md *op_mea1; - struct lmv_stripe_md *op_mea2; - __u32 op_suppgids[2]; - __u32 op_fsuid; - __u32 op_fsgid; - kernel_cap_t op_cap; - void *op_data; - size_t op_data_size; - - /* iattr fields and blocks. */ - struct iattr op_attr; - unsigned int op_attr_flags; - __u64 op_valid; - loff_t op_attr_blocks; - - __u32 op_flags; - - /* Various operation flags. */ - enum mds_op_bias op_bias; - - /* Used by readdir */ - __u64 op_offset; - - /* Used by readdir */ - __u32 op_max_pages; - - /* used to transfer info between the stacks of MD client - * see enum op_cli_flags - */ - enum md_cli_flags op_cli_flags; - - /* File object data version for HSM release, on client */ - __u64 op_data_version; - struct lustre_handle op_lease_handle; - - /* default stripe offset */ - __u32 op_default_stripe_offset; -}; - -struct md_callback { - int (*md_blocking_ast)(struct ldlm_lock *lock, - struct ldlm_lock_desc *desc, - void *data, int flag); -}; - -struct md_enqueue_info; -/* metadata stat-ahead */ - -struct md_enqueue_info { - struct md_op_data mi_data; - struct lookup_intent mi_it; - struct lustre_handle mi_lockh; - struct inode *mi_dir; - struct ldlm_enqueue_info mi_einfo; - int (*mi_cb)(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, int rc); - void *mi_cbdata; -}; - -struct obd_ops { - struct module *owner; - int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void __user *uarg); - int (*get_info)(const struct lu_env *env, struct obd_export *, - __u32 keylen, void *key, __u32 *vallen, void *val); - int (*set_info_async)(const struct lu_env *, struct obd_export *, - __u32 keylen, void *key, - __u32 vallen, void *val, - struct ptlrpc_request_set *set); - int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg); - int (*precleanup)(struct obd_device *dev); - int (*cleanup)(struct obd_device *dev); - int (*process_config)(struct obd_device *dev, u32 len, void *data); - int (*postrecov)(struct obd_device *dev); - int (*add_conn)(struct obd_import *imp, struct obd_uuid *uuid, - int priority); - int (*del_conn)(struct obd_import *imp, struct obd_uuid *uuid); - /* connect to the target device with given connection - * data. @ocd->ocd_connect_flags is modified to reflect flags actually - * granted by the target, which are guaranteed to be a subset of flags - * asked for. If @ocd == NULL, use default parameters. - */ - int (*connect)(const struct lu_env *env, - struct obd_export **exp, struct obd_device *src, - struct obd_uuid *cluuid, struct obd_connect_data *ocd, - void *localdata); - int (*reconnect)(const struct lu_env *env, - struct obd_export *exp, struct obd_device *src, - struct obd_uuid *cluuid, - struct obd_connect_data *ocd, - void *localdata); - int (*disconnect)(struct obd_export *exp); - - /* Initialize/finalize fids infrastructure. */ - int (*fid_init)(struct obd_device *obd, - struct obd_export *exp, enum lu_cli_type type); - int (*fid_fini)(struct obd_device *obd); - - /* Allocate new fid according to passed @hint. */ - int (*fid_alloc)(const struct lu_env *env, struct obd_export *exp, - struct lu_fid *fid, struct md_op_data *op_data); - - /* - * Object with @fid is getting deleted, we may want to do something - * about this. - */ - int (*statfs)(const struct lu_env *, struct obd_export *exp, - struct obd_statfs *osfs, __u64 max_age, __u32 flags); - int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo, - __u64 max_age, struct ptlrpc_request_set *set); - int (*create)(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa); - int (*destroy)(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa); - int (*setattr)(const struct lu_env *, struct obd_export *exp, - struct obdo *oa); - int (*getattr)(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa); - int (*preprw)(const struct lu_env *env, int cmd, - struct obd_export *exp, struct obdo *oa, int objcount, - struct obd_ioobj *obj, struct niobuf_remote *remote, - int *nr_pages, struct niobuf_local *local); - int (*commitrw)(const struct lu_env *env, int cmd, - struct obd_export *exp, struct obdo *oa, - int objcount, struct obd_ioobj *obj, - struct niobuf_remote *remote, int pages, - struct niobuf_local *local, int rc); - int (*init_export)(struct obd_export *exp); - int (*destroy_export)(struct obd_export *exp); - - /* metadata-only methods */ - int (*import_event)(struct obd_device *, struct obd_import *, - enum obd_import_event); - - int (*notify)(struct obd_device *obd, struct obd_device *watched, - enum obd_notify_event ev, void *data); - - int (*health_check)(const struct lu_env *env, struct obd_device *); - struct obd_uuid *(*get_uuid)(struct obd_export *exp); - - /* quota methods */ - int (*quotactl)(struct obd_device *, struct obd_export *, - struct obd_quotactl *); - - /* pools methods */ - int (*pool_new)(struct obd_device *obd, char *poolname); - int (*pool_del)(struct obd_device *obd, char *poolname); - int (*pool_add)(struct obd_device *obd, char *poolname, - char *ostname); - int (*pool_rem)(struct obd_device *obd, char *poolname, - char *ostname); - void (*getref)(struct obd_device *obd); - void (*putref)(struct obd_device *obd); - /* - * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line - * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c. - * Also, add a wrapper function in include/linux/obd_class.h. - */ -}; - -/* lmv structures */ -struct lustre_md { - struct mdt_body *body; - struct lu_buf layout; - struct lmv_stripe_md *lmv; -#ifdef CONFIG_FS_POSIX_ACL - struct posix_acl *posix_acl; -#endif - struct mdt_remote_perm *remote_perm; -}; - -struct md_open_data { - struct obd_client_handle *mod_och; - struct ptlrpc_request *mod_open_req; - struct ptlrpc_request *mod_close_req; - atomic_t mod_refcount; - bool mod_is_create; -}; - -struct obd_client_handle { - struct lustre_handle och_fh; - struct lu_fid och_fid; - struct md_open_data *och_mod; - struct lustre_handle och_lease_handle; /* open lock for lease */ - __u32 och_magic; - fmode_t och_flags; -}; - -#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed - -struct lookup_intent; -struct cl_attr; - -struct md_ops { - int (*getstatus)(struct obd_export *, struct lu_fid *); - int (*null_inode)(struct obd_export *, const struct lu_fid *); - int (*close)(struct obd_export *, struct md_op_data *, - struct md_open_data *, struct ptlrpc_request **); - int (*create)(struct obd_export *, struct md_op_data *, - const void *, size_t, umode_t, uid_t, gid_t, - kernel_cap_t, __u64, struct ptlrpc_request **); - int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *, - const union ldlm_policy_data *, struct md_op_data *, - struct lustre_handle *, __u64); - int (*getattr)(struct obd_export *, struct md_op_data *, - struct ptlrpc_request **); - int (*getattr_name)(struct obd_export *, struct md_op_data *, - struct ptlrpc_request **); - int (*intent_lock)(struct obd_export *, struct md_op_data *, - struct lookup_intent *, - struct ptlrpc_request **, - ldlm_blocking_callback, __u64); - int (*link)(struct obd_export *, struct md_op_data *, - struct ptlrpc_request **); - int (*rename)(struct obd_export *, struct md_op_data *, - const char *, size_t, const char *, size_t, - struct ptlrpc_request **); - int (*setattr)(struct obd_export *, struct md_op_data *, void *, - size_t, struct ptlrpc_request **); - int (*sync)(struct obd_export *, const struct lu_fid *, - struct ptlrpc_request **); - int (*read_page)(struct obd_export *, struct md_op_data *, - struct md_callback *cb_op, __u64 hash_offset, - struct page **ppage); - int (*unlink)(struct obd_export *, struct md_op_data *, - struct ptlrpc_request **); - - int (*setxattr)(struct obd_export *, const struct lu_fid *, - u64, const char *, const void *, size_t, unsigned int, - u32, struct ptlrpc_request **); - - int (*getxattr)(struct obd_export *, const struct lu_fid *, - u64, const char *, size_t, struct ptlrpc_request **); - - int (*init_ea_size)(struct obd_export *, u32, u32); - - int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *, - struct obd_export *, struct obd_export *, - struct lustre_md *); - - int (*free_lustre_md)(struct obd_export *, struct lustre_md *); - - int (*merge_attr)(struct obd_export *, - const struct lmv_stripe_md *lsm, - struct cl_attr *attr, ldlm_blocking_callback); - - int (*set_open_replay_data)(struct obd_export *, - struct obd_client_handle *, - struct lookup_intent *); - int (*clear_open_replay_data)(struct obd_export *, - struct obd_client_handle *); - int (*set_lock_data)(struct obd_export *, const struct lustre_handle *, - void *, __u64 *); - - enum ldlm_mode (*lock_match)(struct obd_export *, __u64, - const struct lu_fid *, enum ldlm_type, - union ldlm_policy_data *, enum ldlm_mode, - struct lustre_handle *); - - int (*cancel_unused)(struct obd_export *, const struct lu_fid *, - union ldlm_policy_data *, enum ldlm_mode, - enum ldlm_cancel_flags flags, void *opaque); - - int (*get_fid_from_lsm)(struct obd_export *, - const struct lmv_stripe_md *, - const char *name, int namelen, - struct lu_fid *fid); - - int (*intent_getattr_async)(struct obd_export *, - struct md_enqueue_info *); - - int (*revalidate_lock)(struct obd_export *, struct lookup_intent *, - struct lu_fid *, __u64 *bits); - - int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm, - const union lmv_mds_md *lmv, size_t lmv_size); - /* - * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to - * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a - * wrapper function in include/linux/obd_class.h. - */ -}; - -static inline struct md_open_data *obd_mod_alloc(void) -{ - struct md_open_data *mod; - - mod = kzalloc(sizeof(*mod), GFP_NOFS); - if (!mod) - return NULL; - atomic_set(&mod->mod_refcount, 1); - return mod; -} - -#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount) -#define obd_mod_put(mod) \ -({ \ - if (atomic_dec_and_test(&(mod)->mod_refcount)) { \ - if ((mod)->mod_open_req) \ - ptlrpc_req_finished((mod)->mod_open_req); \ - kfree(mod); \ - } \ -}) - -void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid); -void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent); - -/* return 1 if client should be resend request */ -static inline int client_should_resend(int resend, struct client_obd *cli) -{ - return atomic_read(&cli->cl_resends) ? - atomic_read(&cli->cl_resends) > resend : 1; -} - -/** - * Return device name for this device - * - * XXX: lu_device is declared before obd_device, while a pointer pointing - * back to obd_device in lu_device, so this helper function defines here - * instead of in lu_object.h - */ -static inline const char *lu_dev_name(const struct lu_device *lu_dev) -{ - return lu_dev->ld_obd->obd_name; -} - -static inline bool filename_is_volatile(const char *name, size_t namelen, - int *idx) -{ - const char *start; - char *end; - - if (strncmp(name, LUSTRE_VOLATILE_HDR, LUSTRE_VOLATILE_HDR_LEN) != 0) - return false; - - /* caller does not care of idx */ - if (!idx) - return true; - - /* volatile file, the MDT can be set from name */ - /* name format is LUSTRE_VOLATILE_HDR:[idx]: */ - /* if no MDT is specified, use std way */ - if (namelen < LUSTRE_VOLATILE_HDR_LEN + 2) - goto bad_format; - /* test for no MDT idx case */ - if ((*(name + LUSTRE_VOLATILE_HDR_LEN) == ':') && - (*(name + LUSTRE_VOLATILE_HDR_LEN + 1) == ':')) { - *idx = -1; - return true; - } - /* we have an idx, read it */ - start = name + LUSTRE_VOLATILE_HDR_LEN + 1; - *idx = simple_strtoul(start, &end, 0); - /* error cases: - * no digit, no trailing :, negative value - */ - if (((*idx == 0) && (end == start)) || - (*end != ':') || (*idx < 0)) - goto bad_format; - - return true; -bad_format: - /* bad format of mdt idx, we cannot return an error - * to caller so we use hash algo - */ - CERROR("Bad volatile file name format: %s\n", - name + LUSTRE_VOLATILE_HDR_LEN); - return false; -} - -static inline int cli_brw_size(struct obd_device *obd) -{ - return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT; -} - -/* - * when RPC size or the max RPCs in flight is increased, the max dirty pages - * of the client should be increased accordingly to avoid sending fragmented - * RPCs over the network when the client runs out of the maximum dirty space - * when so many RPCs are being generated. - */ -static inline void client_adjust_max_dirty(struct client_obd *cli) -{ - /* initializing */ - if (cli->cl_dirty_max_pages <= 0) - cli->cl_dirty_max_pages = - (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT; - else { - unsigned long dirty_max = cli->cl_max_rpcs_in_flight * - cli->cl_max_pages_per_rpc; - - if (dirty_max > cli->cl_dirty_max_pages) - cli->cl_dirty_max_pages = dirty_max; - } - - if (cli->cl_dirty_max_pages > totalram_pages / 8) - cli->cl_dirty_max_pages = totalram_pages / 8; -} - -#endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h deleted file mode 100644 index e5f7bb20415d..000000000000 --- a/drivers/staging/lustre/lustre/include/obd_cksum.h +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __OBD_CKSUM -#define __OBD_CKSUM -#include -#include -#include - -static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type) -{ - switch (cksum_type) { - case OBD_CKSUM_CRC32: - return CFS_HASH_ALG_CRC32; - case OBD_CKSUM_ADLER: - return CFS_HASH_ALG_ADLER32; - case OBD_CKSUM_CRC32C: - return CFS_HASH_ALG_CRC32C; - default: - CERROR("Unknown checksum type (%x)!!!\n", cksum_type); - LBUG(); - } - return 0; -} - -/* The OBD_FL_CKSUM_* flags is packed into 5 bits of o_flags, since there can - * only be a single checksum type per RPC. - * - * The OBD_CHECKSUM_* type bits passed in ocd_cksum_types are a 32-bit bitmask - * since they need to represent the full range of checksum algorithms that - * both the client and server can understand. - * - * In case of an unsupported types/flags we fall back to ADLER - * because that is supported by all clients since 1.8 - * - * In case multiple algorithms are supported the best one is used. - */ -static inline u32 cksum_type_pack(enum cksum_type cksum_type) -{ - unsigned int performance = 0, tmp; - u32 flag = OBD_FL_CKSUM_ADLER; - - if (cksum_type & OBD_CKSUM_CRC32) { - tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)); - if (tmp > performance) { - performance = tmp; - flag = OBD_FL_CKSUM_CRC32; - } - } - if (cksum_type & OBD_CKSUM_CRC32C) { - tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)); - if (tmp > performance) { - performance = tmp; - flag = OBD_FL_CKSUM_CRC32C; - } - } - if (cksum_type & OBD_CKSUM_ADLER) { - tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)); - if (tmp > performance) { - performance = tmp; - flag = OBD_FL_CKSUM_ADLER; - } - } - if (unlikely(cksum_type && !(cksum_type & (OBD_CKSUM_CRC32C | - OBD_CKSUM_CRC32 | - OBD_CKSUM_ADLER)))) - CWARN("unknown cksum type %x\n", cksum_type); - - return flag; -} - -static inline enum cksum_type cksum_type_unpack(u32 o_flags) -{ - switch (o_flags & OBD_FL_CKSUM_ALL) { - case OBD_FL_CKSUM_CRC32C: - return OBD_CKSUM_CRC32C; - case OBD_FL_CKSUM_CRC32: - return OBD_CKSUM_CRC32; - default: - break; - } - - return OBD_CKSUM_ADLER; -} - -/* Return a bitmask of the checksum types supported on this system. - * 1.8 supported ADLER it is base and not depend on hw - * Client uses all available local algos - */ -static inline enum cksum_type cksum_types_supported_client(void) -{ - enum cksum_type ret = OBD_CKSUM_ADLER; - - CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n", - cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)), - cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)), - cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER))); - - if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)) > 0) - ret |= OBD_CKSUM_CRC32C; - if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)) > 0) - ret |= OBD_CKSUM_CRC32; - - return ret; -} - -/* Select the best checksum algorithm among those supplied in the cksum_types - * input. - * - * Currently, calling cksum_type_pack() with a mask will return the fastest - * checksum type due to its benchmarking at libcfs module load. - * Caution is advised, however, since what is fastest on a single client may - * not be the fastest or most efficient algorithm on the server. - */ -static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types) -{ - return cksum_type_unpack(cksum_type_pack(cksum_types)); -} - -/* Checksum algorithm names. Must be defined in the same order as the - * OBD_CKSUM_* flags. - */ -#define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"} - -#endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h deleted file mode 100644 index fc9c7720fee0..000000000000 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ /dev/null @@ -1,1603 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#ifndef __CLASS_OBD_H -#define __CLASS_OBD_H - -#include -#include -#include -#include -#include -#include - -/* requests should be send without delay and resends for avoid deadlocks */ -#define OBD_STATFS_NODELAY 0x0001 -/* the statfs callback should not update obd_osfs_age */ -#define OBD_STATFS_FROM_CACHE 0x0002 -/* the statfs is only for retrieving information from MDT0 */ -#define OBD_STATFS_FOR_MDT0 0x0004 - -/* OBD Device Declarations */ -extern struct obd_device *obd_devs[MAX_OBD_DEVICES]; -extern rwlock_t obd_dev_lock; - -/* OBD Operations Declarations */ -struct obd_device *class_exp2obd(struct obd_export *exp); -int class_handle_ioctl(unsigned int cmd, unsigned long arg); -int lustre_get_jobid(char *jobid); - -struct lu_device_type; - -/* genops.c */ -extern struct list_head obd_types; -struct obd_export *class_conn2export(struct lustre_handle *conn); -int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops, - const char *name, struct lu_device_type *ldt); -int class_unregister_type(const char *name); - -struct obd_device *class_newdev(const char *type_name, const char *name); -void class_release_dev(struct obd_device *obd); - -int class_name2dev(const char *name); -struct obd_device *class_name2obd(const char *name); -int class_uuid2dev(struct obd_uuid *uuid); -struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid); -struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, - int *next); -struct obd_device *class_num2obd(int num); - -int class_notify_sptlrpc_conf(const char *fsname, int namelen); - -int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep); - -int obd_zombie_impexp_init(void); -void obd_zombie_impexp_stop(void); -void obd_zombie_barrier(void); - -int obd_get_request_slot(struct client_obd *cli); -void obd_put_request_slot(struct client_obd *cli); -__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli); -int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max); -int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max); -int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq); - -u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc, - struct lookup_intent *it); -void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc, - struct lookup_intent *it, u16 tag); - -struct llog_handle; -struct llog_rec_hdr; -typedef int (*llog_cb_t)(const struct lu_env *, struct llog_handle *, - struct llog_rec_hdr *, void *); - -/* obd_config.c */ -char *lustre_cfg_string(struct lustre_cfg *lcfg, u32 index); -int class_process_config(struct lustre_cfg *lcfg); -int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, - struct lustre_cfg *lcfg, void *data); - -/* For interoperability */ -struct cfg_interop_param { - char *old_param; - char *new_param; -}; - -int class_find_param(char *buf, char *key, char **valp); -struct cfg_interop_param *class_find_old_param(const char *param, - struct cfg_interop_param *ptr); -int class_get_next_param(char **params, char *copy); -int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh); -int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh); -int class_parse_net(char *buf, u32 *net, char **endh); -int class_match_nid(char *buf, char *key, lnet_nid_t nid); -int class_match_net(char *buf, char *key, u32 net); - -struct obd_device *class_incref(struct obd_device *obd, - const char *scope, const void *source); -void class_decref(struct obd_device *obd, - const char *scope, const void *source); -int class_config_llog_handler(const struct lu_env *env, - struct llog_handle *handle, - struct llog_rec_hdr *rec, void *data); -int class_add_uuid(const char *uuid, __u64 nid); - -/* obdecho */ -void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars); - -#define CFG_F_START 0x01 /* Set when we start updating from a log */ -#define CFG_F_MARKER 0x02 /* We are within a maker */ -#define CFG_F_SKIP 0x04 /* We should ignore this cfg command */ -#define CFG_F_COMPAT146 0x08 /* Allow old-style logs */ -#define CFG_F_EXCLUDE 0x10 /* OST exclusion list */ - -/* Passed as data param to class_config_parse_llog */ -struct config_llog_instance { - char *cfg_obdname; - void *cfg_instance; - struct super_block *cfg_sb; - struct obd_uuid cfg_uuid; - llog_cb_t cfg_callback; - int cfg_last_idx; /* for partial llog processing */ - int cfg_flags; -}; - -int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, - char *name, struct config_llog_instance *cfg); -enum { - CONFIG_T_CONFIG = 0, - CONFIG_T_SPTLRPC = 1, - CONFIG_T_RECOVER = 2, - CONFIG_T_PARAMS = 3, - CONFIG_T_MAX = 4 -}; - -#define PARAMS_FILENAME "params" -#define LCTL_UPCALL "lctl" - -/* list of active configuration logs */ -struct config_llog_data { - struct ldlm_res_id cld_resid; - struct config_llog_instance cld_cfg; - struct list_head cld_list_chain; - atomic_t cld_refcount; - struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */ - struct config_llog_data *cld_params; /* common parameters log */ - struct config_llog_data *cld_recover;/* imperative recover log */ - struct obd_export *cld_mgcexp; - struct mutex cld_lock; - int cld_type; - unsigned int cld_stopping:1, /* - * we were told to stop - * watching - */ - cld_lostlock:1; /* lock not requeued */ - char cld_logname[0]; -}; - -struct lustre_profile { - struct list_head lp_list; - char *lp_profile; - char *lp_dt; - char *lp_md; - int lp_refs; - bool lp_list_deleted; -}; - -struct lustre_profile *class_get_profile(const char *prof); -void class_del_profile(const char *prof); -void class_put_profile(struct lustre_profile *lprof); -void class_del_profiles(void); - -#if LUSTRE_TRACKS_LOCK_EXP_REFS - -void __class_export_add_lock_ref(struct obd_export *exp, - struct ldlm_lock *lock); -void __class_export_del_lock_ref(struct obd_export *exp, - struct ldlm_lock *lock); -extern void (*class_export_dump_hook)(struct obd_export *exp); - -#else - -#define __class_export_add_lock_ref(exp, lock) do {} while (0) -#define __class_export_del_lock_ref(exp, lock) do {} while (0) - -#endif - -/* genops.c */ -struct obd_export *class_export_get(struct obd_export *exp); -void class_export_put(struct obd_export *exp); -struct obd_export *class_new_export(struct obd_device *obddev, - struct obd_uuid *cluuid); -void class_unlink_export(struct obd_export *exp); - -struct obd_import *class_import_get(struct obd_import *imp); -void class_import_put(struct obd_import *imp); -struct obd_import *class_new_import(struct obd_device *obd); -void class_destroy_import(struct obd_import *exp); - -void class_put_type(struct obd_type *type); -int class_connect(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid); -int class_disconnect(struct obd_export *exp); -void class_fail_export(struct obd_export *exp); -int class_manual_cleanup(struct obd_device *obd); - -static inline void class_export_rpc_inc(struct obd_export *exp) -{ - atomic_inc(&(exp)->exp_rpc_count); - CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n", - (exp), atomic_read(&(exp)->exp_rpc_count)); -} - -static inline void class_export_rpc_dec(struct obd_export *exp) -{ - LASSERT_ATOMIC_POS(&exp->exp_rpc_count); - atomic_dec(&(exp)->exp_rpc_count); - CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n", - (exp), atomic_read(&(exp)->exp_rpc_count)); -} - -static inline struct obd_export *class_export_lock_get(struct obd_export *exp, - struct ldlm_lock *lock) -{ - atomic_inc(&(exp)->exp_locks_count); - __class_export_add_lock_ref(exp, lock); - CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", - (exp), atomic_read(&(exp)->exp_locks_count)); - return class_export_get(exp); -} - -static inline void class_export_lock_put(struct obd_export *exp, - struct ldlm_lock *lock) -{ - LASSERT_ATOMIC_POS(&exp->exp_locks_count); - atomic_dec(&(exp)->exp_locks_count); - __class_export_del_lock_ref(exp, lock); - CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", - (exp), atomic_read(&(exp)->exp_locks_count)); - class_export_put(exp); -} - -static inline enum obd_option exp_flags_from_obd(struct obd_device *obd) -{ - return ((obd->obd_fail ? OBD_OPT_FAILOVER : 0) | - (obd->obd_force ? OBD_OPT_FORCE : 0) | - 0); -} - -static inline int lprocfs_climp_check(struct obd_device *obd) -{ - down_read(&(obd)->u.cli.cl_sem); - if (!(obd)->u.cli.cl_import) { - up_read(&(obd)->u.cli.cl_sem); - return -ENODEV; - } - return 0; -} - -struct inode; -struct lu_attr; -struct obdo; - -void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj); - -#define OBT(dev) ((dev)->obd_type) -#define OBP(dev, op) ((dev)->obd_type->typ_dt_ops->op) -#define MDP(dev, op) ((dev)->obd_type->typ_md_ops->op) -#define CTXTP(ctxt, op) ((ctxt)->loc_logops->lop_##op) - -/* - * Ensure obd_setup: used for cleanup which must be called - * while obd is stopping - */ -static inline int obd_check_dev(struct obd_device *obd) -{ - if (!obd) { - CERROR("NULL device\n"); - return -ENODEV; - } - return 0; -} - -/* ensure obd_setup and !obd_stopping */ -static inline int obd_check_dev_active(struct obd_device *obd) -{ - int rc; - - rc = obd_check_dev(obd); - if (rc) - return rc; - if (!obd->obd_set_up || obd->obd_stopping) { - CERROR("Device %d not setup\n", obd->obd_minor); - return -ENODEV; - } - return rc; -} - -#define OBD_COUNTER_OFFSET(op) \ - ((offsetof(struct obd_ops, op) - \ - offsetof(struct obd_ops, iocontrol)) \ - / sizeof(((struct obd_ops *)(0))->iocontrol)) - -#define OBD_COUNTER_INCREMENT(obdx, op) \ -do { \ - if ((obdx)->obd_stats) { \ - unsigned int coffset; \ - coffset = (unsigned int)((obdx)->obd_cntr_base) + \ - OBD_COUNTER_OFFSET(op); \ - LASSERT(coffset < (obdx)->obd_stats->ls_num); \ - lprocfs_counter_incr((obdx)->obd_stats, coffset); \ - } \ -} while (0) - -#define EXP_COUNTER_INCREMENT(export, op) \ -do { \ - if ((export)->exp_obd->obd_stats) { \ - unsigned int coffset; \ - coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \ - OBD_COUNTER_OFFSET(op); \ - LASSERT(coffset < (export)->exp_obd->obd_stats->ls_num); \ - lprocfs_counter_incr((export)->exp_obd->obd_stats, coffset); \ - } \ -} while (0) - -#define MD_COUNTER_OFFSET(op) \ - ((offsetof(struct md_ops, op) - \ - offsetof(struct md_ops, getstatus)) \ - / sizeof(((struct md_ops *)(0))->getstatus)) - -#define MD_COUNTER_INCREMENT(obdx, op) \ -do { \ - if ((obd)->md_stats) { \ - unsigned int coffset; \ - coffset = (unsigned int)((obdx)->md_cntr_base) + \ - MD_COUNTER_OFFSET(op); \ - LASSERT(coffset < (obdx)->md_stats->ls_num); \ - lprocfs_counter_incr((obdx)->md_stats, coffset); \ - } \ -} while (0) - -#define EXP_MD_COUNTER_INCREMENT(export, op) \ -do { \ - if ((export)->exp_obd->obd_stats) { \ - unsigned int coffset; \ - coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \ - MD_COUNTER_OFFSET(op); \ - LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \ - lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \ - if ((export)->exp_md_stats) \ - lprocfs_counter_incr( \ - (export)->exp_md_stats, coffset); \ - } \ -} while (0) - -#define EXP_CHECK_MD_OP(exp, op) \ -do { \ - if (!(exp)) { \ - CERROR("obd_" #op ": NULL export\n"); \ - return -ENODEV; \ - } \ - if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ - CERROR("obd_" #op ": cleaned up obd\n"); \ - return -EOPNOTSUPP; \ - } \ - if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \ - CERROR("obd_" #op ": dev %s/%d no operation\n", \ - (exp)->exp_obd->obd_name, \ - (exp)->exp_obd->obd_minor); \ - return -EOPNOTSUPP; \ - } \ -} while (0) - -#define OBD_CHECK_DT_OP(obd, op, err) \ -do { \ - if (!OBT(obd) || !OBP((obd), op)) { \ - if (err) \ - CERROR("obd_" #op ": dev %d no operation\n", \ - obd->obd_minor); \ - return err; \ - } \ -} while (0) - -#define EXP_CHECK_DT_OP(exp, op) \ -do { \ - if (!(exp)) { \ - CERROR("obd_" #op ": NULL export\n"); \ - return -ENODEV; \ - } \ - if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ - CERROR("obd_" #op ": cleaned up obd\n"); \ - return -EOPNOTSUPP; \ - } \ - if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \ - CERROR("obd_" #op ": dev %d no operation\n", \ - (exp)->exp_obd->obd_minor); \ - return -EOPNOTSUPP; \ - } \ -} while (0) - -#define CTXT_CHECK_OP(ctxt, op, err) \ -do { \ - if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \ - if (err) \ - CERROR("lop_" #op ": dev %d no operation\n", \ - ctxt->loc_obd->obd_minor); \ - return err; \ - } \ -} while (0) - -static inline int class_devno_max(void) -{ - return MAX_OBD_DEVICES; -} - -static inline int obd_get_info(const struct lu_env *env, - struct obd_export *exp, __u32 keylen, - void *key, __u32 *vallen, void *val) -{ - int rc; - - EXP_CHECK_DT_OP(exp, get_info); - EXP_COUNTER_INCREMENT(exp, get_info); - - rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val); - return rc; -} - -static inline int obd_set_info_async(const struct lu_env *env, - struct obd_export *exp, u32 keylen, - void *key, u32 vallen, void *val, - struct ptlrpc_request_set *set) -{ - int rc; - - EXP_CHECK_DT_OP(exp, set_info_async); - EXP_COUNTER_INCREMENT(exp, set_info_async); - - rc = OBP(exp->exp_obd, set_info_async)(env, exp, keylen, key, vallen, - val, set); - return rc; -} - -/* - * obd-lu integration. - * - * Functionality is being moved into new lu_device-based layering, but some - * pieces of configuration process are still based on obd devices. - * - * Specifically, lu_device_type_operations::ldto_device_alloc() methods fully - * subsume ->o_setup() methods of obd devices they replace. The same for - * lu_device_operations::ldo_process_config() and ->o_process_config(). As a - * result, obd_setup() and obd_process_config() branch and call one XOR - * another. - * - * Yet neither lu_device_type_operations::ldto_device_fini() nor - * lu_device_type_operations::ldto_device_free() fully implement the - * functionality of ->o_precleanup() and ->o_cleanup() they override. Hence, - * obd_precleanup() and obd_cleanup() call both lu_device and obd operations. - */ - -static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg) -{ - int rc; - struct lu_device_type *ldt; - struct lu_device *d; - - ldt = obd->obd_type->typ_lu; - if (ldt) { - struct lu_context session_ctx; - struct lu_env env; - - lu_context_init(&session_ctx, LCT_SESSION | LCT_SERVER_SESSION); - session_ctx.lc_thread = NULL; - lu_context_enter(&session_ctx); - - rc = lu_env_init(&env, ldt->ldt_ctx_tags); - if (rc == 0) { - env.le_ses = &session_ctx; - d = ldt->ldt_ops->ldto_device_alloc(&env, ldt, cfg); - lu_env_fini(&env); - if (!IS_ERR(d)) { - obd->obd_lu_dev = d; - d->ld_obd = obd; - rc = 0; - } else { - rc = PTR_ERR(d); - } - } - lu_context_exit(&session_ctx); - lu_context_fini(&session_ctx); - - } else { - OBD_CHECK_DT_OP(obd, setup, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, setup); - rc = OBP(obd, setup)(obd, cfg); - } - return rc; -} - -static inline int obd_precleanup(struct obd_device *obd) -{ - int rc; - struct lu_device_type *ldt; - struct lu_device *d; - - rc = obd_check_dev(obd); - if (rc) - return rc; - ldt = obd->obd_type->typ_lu; - d = obd->obd_lu_dev; - if (ldt && d) { - struct lu_env env; - - rc = lu_env_init(&env, ldt->ldt_ctx_tags); - if (!rc) { - ldt->ldt_ops->ldto_device_fini(&env, d); - lu_env_fini(&env); - } - } - OBD_CHECK_DT_OP(obd, precleanup, 0); - OBD_COUNTER_INCREMENT(obd, precleanup); - - rc = OBP(obd, precleanup)(obd); - return rc; -} - -static inline int obd_cleanup(struct obd_device *obd) -{ - int rc; - struct lu_device_type *ldt; - struct lu_device *d; - - rc = obd_check_dev(obd); - if (rc) - return rc; - - ldt = obd->obd_type->typ_lu; - d = obd->obd_lu_dev; - if (ldt && d) { - struct lu_env env; - - rc = lu_env_init(&env, ldt->ldt_ctx_tags); - if (rc == 0) { - ldt->ldt_ops->ldto_device_free(&env, d); - lu_env_fini(&env); - obd->obd_lu_dev = NULL; - } - } - OBD_CHECK_DT_OP(obd, cleanup, 0); - OBD_COUNTER_INCREMENT(obd, cleanup); - - rc = OBP(obd, cleanup)(obd); - return rc; -} - -static inline void obd_cleanup_client_import(struct obd_device *obd) -{ - /* - * If we set up but never connected, the - * client import will not have been cleaned. - */ - down_write(&obd->u.cli.cl_sem); - if (obd->u.cli.cl_import) { - struct obd_import *imp; - - imp = obd->u.cli.cl_import; - CDEBUG(D_CONFIG, "%s: client import never connected\n", - obd->obd_name); - ptlrpc_invalidate_import(imp); - client_destroy_import(imp); - obd->u.cli.cl_import = NULL; - } - up_write(&obd->u.cli.cl_sem); -} - -static inline int -obd_process_config(struct obd_device *obd, int datalen, void *data) -{ - int rc; - struct lu_device_type *ldt; - struct lu_device *d; - - rc = obd_check_dev(obd); - if (rc) - return rc; - - obd->obd_process_conf = 1; - ldt = obd->obd_type->typ_lu; - d = obd->obd_lu_dev; - if (ldt && d) { - struct lu_env env; - - rc = lu_env_init(&env, ldt->ldt_ctx_tags); - if (rc == 0) { - rc = d->ld_ops->ldo_process_config(&env, d, data); - lu_env_fini(&env); - } - } else { - OBD_CHECK_DT_OP(obd, process_config, -EOPNOTSUPP); - rc = OBP(obd, process_config)(obd, datalen, data); - } - OBD_COUNTER_INCREMENT(obd, process_config); - obd->obd_process_conf = 0; - - return rc; -} - -static inline int obd_create(const struct lu_env *env, struct obd_export *exp, - struct obdo *obdo) -{ - int rc; - - EXP_CHECK_DT_OP(exp, create); - EXP_COUNTER_INCREMENT(exp, create); - - rc = OBP(exp->exp_obd, create)(env, exp, obdo); - return rc; -} - -static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp, - struct obdo *obdo) -{ - int rc; - - EXP_CHECK_DT_OP(exp, destroy); - EXP_COUNTER_INCREMENT(exp, destroy); - - rc = OBP(exp->exp_obd, destroy)(env, exp, obdo); - return rc; -} - -static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa) -{ - int rc; - - EXP_CHECK_DT_OP(exp, getattr); - EXP_COUNTER_INCREMENT(exp, getattr); - - rc = OBP(exp->exp_obd, getattr)(env, exp, oa); - return rc; -} - -static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa) -{ - int rc; - - EXP_CHECK_DT_OP(exp, setattr); - EXP_COUNTER_INCREMENT(exp, setattr); - - rc = OBP(exp->exp_obd, setattr)(env, exp, oa); - return rc; -} - -static inline int obd_add_conn(struct obd_import *imp, struct obd_uuid *uuid, - int priority) -{ - struct obd_device *obd = imp->imp_obd; - int rc; - - rc = obd_check_dev_active(obd); - if (rc) - return rc; - OBD_CHECK_DT_OP(obd, add_conn, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, add_conn); - - rc = OBP(obd, add_conn)(imp, uuid, priority); - return rc; -} - -static inline int obd_del_conn(struct obd_import *imp, struct obd_uuid *uuid) -{ - struct obd_device *obd = imp->imp_obd; - int rc; - - rc = obd_check_dev_active(obd); - if (rc) - return rc; - OBD_CHECK_DT_OP(obd, del_conn, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, del_conn); - - rc = OBP(obd, del_conn)(imp, uuid); - return rc; -} - -static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp) -{ - struct obd_uuid *uuid; - - OBD_CHECK_DT_OP(exp->exp_obd, get_uuid, NULL); - EXP_COUNTER_INCREMENT(exp, get_uuid); - - uuid = OBP(exp->exp_obd, get_uuid)(exp); - return uuid; -} - -/* - * Create a new /a exp on device /a obd for the uuid /a cluuid - * @param exp New export handle - * @param d Connect data, supported flags are set, flags also understood - * by obd are returned. - */ -static inline int obd_connect(const struct lu_env *env, - struct obd_export **exp, struct obd_device *obd, - struct obd_uuid *cluuid, - struct obd_connect_data *data, - void *localdata) -{ - int rc; - __u64 ocf = data ? data->ocd_connect_flags : 0; /* - * for post-condition - * check - */ - - rc = obd_check_dev_active(obd); - if (rc) - return rc; - OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, connect); - - rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata); - /* check that only subset is granted */ - LASSERT(ergo(data, (data->ocd_connect_flags & ocf) == - data->ocd_connect_flags)); - return rc; -} - -static inline int obd_reconnect(const struct lu_env *env, - struct obd_export *exp, - struct obd_device *obd, - struct obd_uuid *cluuid, - struct obd_connect_data *d, - void *localdata) -{ - int rc; - __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */ - - rc = obd_check_dev_active(obd); - if (rc) - return rc; - OBD_CHECK_DT_OP(obd, reconnect, 0); - OBD_COUNTER_INCREMENT(obd, reconnect); - - rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata); - /* check that only subset is granted */ - LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); - return rc; -} - -static inline int obd_disconnect(struct obd_export *exp) -{ - int rc; - - EXP_CHECK_DT_OP(exp, disconnect); - EXP_COUNTER_INCREMENT(exp, disconnect); - - rc = OBP(exp->exp_obd, disconnect)(exp); - return rc; -} - -static inline int obd_fid_init(struct obd_device *obd, struct obd_export *exp, - enum lu_cli_type type) -{ - int rc; - - OBD_CHECK_DT_OP(obd, fid_init, 0); - OBD_COUNTER_INCREMENT(obd, fid_init); - - rc = OBP(obd, fid_init)(obd, exp, type); - return rc; -} - -static inline int obd_fid_fini(struct obd_device *obd) -{ - int rc; - - OBD_CHECK_DT_OP(obd, fid_fini, 0); - OBD_COUNTER_INCREMENT(obd, fid_fini); - - rc = OBP(obd, fid_fini)(obd); - return rc; -} - -static inline int obd_fid_alloc(const struct lu_env *env, - struct obd_export *exp, - struct lu_fid *fid, - struct md_op_data *op_data) -{ - int rc; - - EXP_CHECK_DT_OP(exp, fid_alloc); - EXP_COUNTER_INCREMENT(exp, fid_alloc); - - rc = OBP(exp->exp_obd, fid_alloc)(env, exp, fid, op_data); - return rc; -} - -static inline int obd_pool_new(struct obd_device *obd, char *poolname) -{ - int rc; - - OBD_CHECK_DT_OP(obd, pool_new, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, pool_new); - - rc = OBP(obd, pool_new)(obd, poolname); - return rc; -} - -static inline int obd_pool_del(struct obd_device *obd, char *poolname) -{ - int rc; - - OBD_CHECK_DT_OP(obd, pool_del, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, pool_del); - - rc = OBP(obd, pool_del)(obd, poolname); - return rc; -} - -static inline int obd_pool_add(struct obd_device *obd, - char *poolname, - char *ostname) -{ - int rc; - - OBD_CHECK_DT_OP(obd, pool_add, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, pool_add); - - rc = OBP(obd, pool_add)(obd, poolname, ostname); - return rc; -} - -static inline int obd_pool_rem(struct obd_device *obd, - char *poolname, - char *ostname) -{ - int rc; - - OBD_CHECK_DT_OP(obd, pool_rem, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, pool_rem); - - rc = OBP(obd, pool_rem)(obd, poolname, ostname); - return rc; -} - -static inline void obd_getref(struct obd_device *obd) -{ - if (OBT(obd) && OBP(obd, getref)) { - OBD_COUNTER_INCREMENT(obd, getref); - OBP(obd, getref)(obd); - } -} - -static inline void obd_putref(struct obd_device *obd) -{ - if (OBT(obd) && OBP(obd, putref)) { - OBD_COUNTER_INCREMENT(obd, putref); - OBP(obd, putref)(obd); - } -} - -static inline int obd_init_export(struct obd_export *exp) -{ - int rc = 0; - - if ((exp)->exp_obd && OBT((exp)->exp_obd) && - OBP((exp)->exp_obd, init_export)) - rc = OBP(exp->exp_obd, init_export)(exp); - return rc; -} - -static inline int obd_destroy_export(struct obd_export *exp) -{ - if ((exp)->exp_obd && OBT((exp)->exp_obd) && - OBP((exp)->exp_obd, destroy_export)) - OBP(exp->exp_obd, destroy_export)(exp); - return 0; -} - -/* - * @max_age is the oldest time in jiffies that we accept using a cached data. - * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "jiffies + HZ" to guarantee freshness. - */ -static inline int obd_statfs_async(struct obd_export *exp, - struct obd_info *oinfo, - __u64 max_age, - struct ptlrpc_request_set *rqset) -{ - int rc = 0; - struct obd_device *obd; - - if (!exp || !exp->exp_obd) - return -EINVAL; - - obd = exp->exp_obd; - OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, statfs); - - CDEBUG(D_SUPER, "%s: osfs %p age %llu, max_age %llu\n", - obd->obd_name, &obd->obd_osfs, obd->obd_osfs_age, max_age); - if (time_before64(obd->obd_osfs_age, max_age)) { - rc = OBP(obd, statfs_async)(exp, oinfo, max_age, rqset); - } else { - CDEBUG(D_SUPER, - "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n", - obd->obd_name, &obd->obd_osfs, - obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks, - obd->obd_osfs.os_ffree, obd->obd_osfs.os_files); - spin_lock(&obd->obd_osfs_lock); - memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs)); - spin_unlock(&obd->obd_osfs_lock); - oinfo->oi_flags |= OBD_STATFS_FROM_CACHE; - if (oinfo->oi_cb_up) - oinfo->oi_cb_up(oinfo, 0); - } - return rc; -} - -static inline int obd_statfs_rqset(struct obd_export *exp, - struct obd_statfs *osfs, __u64 max_age, - __u32 flags) -{ - struct ptlrpc_request_set *set = NULL; - struct obd_info oinfo = { - .oi_osfs = osfs, - .oi_flags = flags, - }; - int rc = 0; - - set = ptlrpc_prep_set(); - if (!set) - return -ENOMEM; - - rc = obd_statfs_async(exp, &oinfo, max_age, set); - if (rc == 0) - rc = ptlrpc_set_wait(set); - ptlrpc_set_destroy(set); - return rc; -} - -/* - * @max_age is the oldest time in jiffies that we accept using a cached data. - * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "jiffies + HZ" to guarantee freshness. - */ -static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, - struct obd_statfs *osfs, __u64 max_age, - __u32 flags) -{ - int rc = 0; - struct obd_device *obd = exp->exp_obd; - - if (!obd) - return -EINVAL; - - OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP); - OBD_COUNTER_INCREMENT(obd, statfs); - - CDEBUG(D_SUPER, "osfs %llu, max_age %llu\n", - obd->obd_osfs_age, max_age); - if (time_before64(obd->obd_osfs_age, max_age)) { - rc = OBP(obd, statfs)(env, exp, osfs, max_age, flags); - if (rc == 0) { - spin_lock(&obd->obd_osfs_lock); - memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs)); - obd->obd_osfs_age = get_jiffies_64(); - spin_unlock(&obd->obd_osfs_lock); - } - } else { - CDEBUG(D_SUPER, - "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n", - obd->obd_name, &obd->obd_osfs, - obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks, - obd->obd_osfs.os_ffree, obd->obd_osfs.os_files); - spin_lock(&obd->obd_osfs_lock); - memcpy(osfs, &obd->obd_osfs, sizeof(*osfs)); - spin_unlock(&obd->obd_osfs_lock); - } - return rc; -} - -static inline int obd_preprw(const struct lu_env *env, int cmd, - struct obd_export *exp, struct obdo *oa, - int objcount, struct obd_ioobj *obj, - struct niobuf_remote *remote, int *pages, - struct niobuf_local *local) -{ - int rc; - - EXP_CHECK_DT_OP(exp, preprw); - EXP_COUNTER_INCREMENT(exp, preprw); - - rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote, - pages, local); - return rc; -} - -static inline int obd_commitrw(const struct lu_env *env, int cmd, - struct obd_export *exp, struct obdo *oa, - int objcount, struct obd_ioobj *obj, - struct niobuf_remote *rnb, int pages, - struct niobuf_local *local, int rc) -{ - EXP_CHECK_DT_OP(exp, commitrw); - EXP_COUNTER_INCREMENT(exp, commitrw); - - rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj, - rnb, pages, local, rc); - return rc; -} - -static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void __user *uarg) -{ - int rc; - - EXP_CHECK_DT_OP(exp, iocontrol); - EXP_COUNTER_INCREMENT(exp, iocontrol); - - rc = OBP(exp->exp_obd, iocontrol)(cmd, exp, len, karg, uarg); - return rc; -} - -static inline void obd_import_event(struct obd_device *obd, - struct obd_import *imp, - enum obd_import_event event) -{ - if (!obd) { - CERROR("NULL device\n"); - return; - } - if (obd->obd_set_up && OBP(obd, import_event)) { - OBD_COUNTER_INCREMENT(obd, import_event); - OBP(obd, import_event)(obd, imp, event); - } -} - -static inline int obd_notify(struct obd_device *obd, - struct obd_device *watched, - enum obd_notify_event ev, - void *data) -{ - int rc; - - rc = obd_check_dev(obd); - if (rc) - return rc; - - if (!obd->obd_set_up) { - CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name); - return -EINVAL; - } - - if (!OBP(obd, notify)) { - CDEBUG(D_HA, "obd %s has no notify handler\n", obd->obd_name); - return -ENOSYS; - } - - OBD_COUNTER_INCREMENT(obd, notify); - rc = OBP(obd, notify)(obd, watched, ev, data); - return rc; -} - -static inline int obd_notify_observer(struct obd_device *observer, - struct obd_device *observed, - enum obd_notify_event ev, - void *data) -{ - int rc1; - int rc2; - - struct obd_notify_upcall *onu; - - if (observer->obd_observer) - rc1 = obd_notify(observer->obd_observer, observed, ev, data); - else - rc1 = 0; - /* - * Also, call non-obd listener, if any - */ - onu = &observer->obd_upcall; - if (onu->onu_upcall) - rc2 = onu->onu_upcall(observer, observed, ev, - onu->onu_owner, NULL); - else - rc2 = 0; - - return rc1 ? rc1 : rc2; -} - -static inline int obd_quotactl(struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - int rc; - - EXP_CHECK_DT_OP(exp, quotactl); - EXP_COUNTER_INCREMENT(exp, quotactl); - - rc = OBP(exp->exp_obd, quotactl)(exp->exp_obd, exp, oqctl); - return rc; -} - -static inline int obd_health_check(const struct lu_env *env, - struct obd_device *obd) -{ - /* - * returns: 0 on healthy - * >0 on unhealthy + reason code/flag - * however the only supported reason == 1 right now - * We'll need to define some better reasons - * or flags in the future. - * <0 on error - */ - int rc; - - /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */ - if (!obd || !OBT(obd)) { - CERROR("cleaned up obd\n"); - return -EOPNOTSUPP; - } - if (!obd->obd_set_up || obd->obd_stopping) - return 0; - if (!OBP(obd, health_check)) - return 0; - - rc = OBP(obd, health_check)(env, obd); - return rc; -} - -static inline int obd_register_observer(struct obd_device *obd, - struct obd_device *observer) -{ - int rc; - - rc = obd_check_dev(obd); - if (rc) - return rc; - down_write(&obd->obd_observer_link_sem); - if (obd->obd_observer && observer) { - up_write(&obd->obd_observer_link_sem); - return -EALREADY; - } - obd->obd_observer = observer; - up_write(&obd->obd_observer_link_sem); - return 0; -} - -/* metadata helpers */ -static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid) -{ - int rc; - - EXP_CHECK_MD_OP(exp, getstatus); - EXP_MD_COUNTER_INCREMENT(exp, getstatus); - rc = MDP(exp->exp_obd, getstatus)(exp, fid); - return rc; -} - -static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, getattr); - EXP_MD_COUNTER_INCREMENT(exp, getattr); - rc = MDP(exp->exp_obd, getattr)(exp, op_data, request); - return rc; -} - -static inline int md_null_inode(struct obd_export *exp, - const struct lu_fid *fid) -{ - int rc; - - EXP_CHECK_MD_OP(exp, null_inode); - EXP_MD_COUNTER_INCREMENT(exp, null_inode); - rc = MDP(exp->exp_obd, null_inode)(exp, fid); - return rc; -} - -static inline int md_close(struct obd_export *exp, struct md_op_data *op_data, - struct md_open_data *mod, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, close); - EXP_MD_COUNTER_INCREMENT(exp, close); - rc = MDP(exp->exp_obd, close)(exp, op_data, mod, request); - return rc; -} - -static inline int md_create(struct obd_export *exp, struct md_op_data *op_data, - const void *data, size_t datalen, umode_t mode, - uid_t uid, gid_t gid, kernel_cap_t cap_effective, - __u64 rdev, struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, create); - EXP_MD_COUNTER_INCREMENT(exp, create); - rc = MDP(exp->exp_obd, create)(exp, op_data, data, datalen, mode, - uid, gid, cap_effective, rdev, request); - return rc; -} - -static inline int md_enqueue(struct obd_export *exp, - struct ldlm_enqueue_info *einfo, - const union ldlm_policy_data *policy, - struct md_op_data *op_data, - struct lustre_handle *lockh, - __u64 extra_lock_flags) -{ - int rc; - - EXP_CHECK_MD_OP(exp, enqueue); - EXP_MD_COUNTER_INCREMENT(exp, enqueue); - rc = MDP(exp->exp_obd, enqueue)(exp, einfo, policy, op_data, lockh, - extra_lock_flags); - return rc; -} - -static inline int md_getattr_name(struct obd_export *exp, - struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, getattr_name); - EXP_MD_COUNTER_INCREMENT(exp, getattr_name); - rc = MDP(exp->exp_obd, getattr_name)(exp, op_data, request); - return rc; -} - -static inline int md_intent_lock(struct obd_export *exp, - struct md_op_data *op_data, - struct lookup_intent *it, - struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags) -{ - int rc; - - EXP_CHECK_MD_OP(exp, intent_lock); - EXP_MD_COUNTER_INCREMENT(exp, intent_lock); - rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, it, reqp, - cb_blocking, extra_lock_flags); - return rc; -} - -static inline int md_link(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, link); - EXP_MD_COUNTER_INCREMENT(exp, link); - rc = MDP(exp->exp_obd, link)(exp, op_data, request); - return rc; -} - -static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data, - const char *old, size_t oldlen, const char *new, - size_t newlen, struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, rename); - EXP_MD_COUNTER_INCREMENT(exp, rename); - rc = MDP(exp->exp_obd, rename)(exp, op_data, old, oldlen, new, - newlen, request); - return rc; -} - -static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, setattr); - EXP_MD_COUNTER_INCREMENT(exp, setattr); - rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request); - return rc; -} - -static inline int md_sync(struct obd_export *exp, const struct lu_fid *fid, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, sync); - EXP_MD_COUNTER_INCREMENT(exp, sync); - rc = MDP(exp->exp_obd, sync)(exp, fid, request); - return rc; -} - -static inline int md_read_page(struct obd_export *exp, - struct md_op_data *op_data, - struct md_callback *cb_op, - __u64 hash_offset, - struct page **ppage) -{ - int rc; - - EXP_CHECK_MD_OP(exp, read_page); - EXP_MD_COUNTER_INCREMENT(exp, read_page); - rc = MDP(exp->exp_obd, read_page)(exp, op_data, cb_op, hash_offset, - ppage); - return rc; -} - -static inline int md_unlink(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - int rc; - - EXP_CHECK_MD_OP(exp, unlink); - EXP_MD_COUNTER_INCREMENT(exp, unlink); - rc = MDP(exp->exp_obd, unlink)(exp, op_data, request); - return rc; -} - -static inline int md_get_lustre_md(struct obd_export *exp, - struct ptlrpc_request *req, - struct obd_export *dt_exp, - struct obd_export *md_exp, - struct lustre_md *md) -{ - EXP_CHECK_MD_OP(exp, get_lustre_md); - EXP_MD_COUNTER_INCREMENT(exp, get_lustre_md); - return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md); -} - -static inline int md_free_lustre_md(struct obd_export *exp, - struct lustre_md *md) -{ - EXP_CHECK_MD_OP(exp, free_lustre_md); - EXP_MD_COUNTER_INCREMENT(exp, free_lustre_md); - return MDP(exp->exp_obd, free_lustre_md)(exp, md); -} - -static inline int md_merge_attr(struct obd_export *exp, - const struct lmv_stripe_md *lsm, - struct cl_attr *attr, - ldlm_blocking_callback cb) -{ - EXP_CHECK_MD_OP(exp, merge_attr); - EXP_MD_COUNTER_INCREMENT(exp, merge_attr); - return MDP(exp->exp_obd, merge_attr)(exp, lsm, attr, cb); -} - -static inline int md_setxattr(struct obd_export *exp, const struct lu_fid *fid, - u64 obd_md_valid, const char *name, - const char *value, size_t value_size, - unsigned int xattr_flags, u32 suppgid, - struct ptlrpc_request **request) -{ - EXP_CHECK_MD_OP(exp, setxattr); - EXP_MD_COUNTER_INCREMENT(exp, setxattr); - return MDP(exp->exp_obd, setxattr)(exp, fid, obd_md_valid, name, - value, value_size, xattr_flags, - suppgid, request); -} - -static inline int md_getxattr(struct obd_export *exp, const struct lu_fid *fid, - u64 obd_md_valid, const char *name, - size_t buf_size, struct ptlrpc_request **req) -{ - EXP_CHECK_MD_OP(exp, getxattr); - EXP_MD_COUNTER_INCREMENT(exp, getxattr); - return MDP(exp->exp_obd, getxattr)(exp, fid, obd_md_valid, name, - buf_size, req); -} - -static inline int md_set_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och, - struct lookup_intent *it) -{ - EXP_CHECK_MD_OP(exp, set_open_replay_data); - EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data); - return MDP(exp->exp_obd, set_open_replay_data)(exp, och, it); -} - -static inline int md_clear_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och) -{ - EXP_CHECK_MD_OP(exp, clear_open_replay_data); - EXP_MD_COUNTER_INCREMENT(exp, clear_open_replay_data); - return MDP(exp->exp_obd, clear_open_replay_data)(exp, och); -} - -static inline int md_set_lock_data(struct obd_export *exp, - const struct lustre_handle *lockh, - void *data, __u64 *bits) -{ - EXP_CHECK_MD_OP(exp, set_lock_data); - EXP_MD_COUNTER_INCREMENT(exp, set_lock_data); - return MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits); -} - -static inline int md_cancel_unused(struct obd_export *exp, - const struct lu_fid *fid, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - enum ldlm_cancel_flags flags, - void *opaque) -{ - int rc; - - EXP_CHECK_MD_OP(exp, cancel_unused); - EXP_MD_COUNTER_INCREMENT(exp, cancel_unused); - - rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode, - flags, opaque); - return rc; -} - -static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, - enum ldlm_type type, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - struct lustre_handle *lockh) -{ - EXP_CHECK_MD_OP(exp, lock_match); - EXP_MD_COUNTER_INCREMENT(exp, lock_match); - return MDP(exp->exp_obd, lock_match)(exp, flags, fid, type, - policy, mode, lockh); -} - -static inline int md_init_ea_size(struct obd_export *exp, u32 easize, - u32 def_asize) -{ - EXP_CHECK_MD_OP(exp, init_ea_size); - EXP_MD_COUNTER_INCREMENT(exp, init_ea_size); - return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize); -} - -static inline int md_intent_getattr_async(struct obd_export *exp, - struct md_enqueue_info *minfo) -{ - int rc; - - EXP_CHECK_MD_OP(exp, intent_getattr_async); - EXP_MD_COUNTER_INCREMENT(exp, intent_getattr_async); - rc = MDP(exp->exp_obd, intent_getattr_async)(exp, minfo); - return rc; -} - -static inline int md_revalidate_lock(struct obd_export *exp, - struct lookup_intent *it, - struct lu_fid *fid, __u64 *bits) -{ - int rc; - - EXP_CHECK_MD_OP(exp, revalidate_lock); - EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock); - rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits); - return rc; -} - -static inline int md_get_fid_from_lsm(struct obd_export *exp, - const struct lmv_stripe_md *lsm, - const char *name, int namelen, - struct lu_fid *fid) -{ - int rc; - - EXP_CHECK_MD_OP(exp, get_fid_from_lsm); - EXP_MD_COUNTER_INCREMENT(exp, get_fid_from_lsm); - rc = MDP(exp->exp_obd, get_fid_from_lsm)(exp, lsm, name, namelen, fid); - return rc; -} - -/* - * Unpack an MD struct from disk to in-memory format. - * Returns +ve size of unpacked MD (0 for free), or -ve error. - * - * If *plsm != NULL and lmm == NULL then *lsm will be freed. - * If *plsm == NULL then it will be allocated. - */ -static inline int md_unpackmd(struct obd_export *exp, - struct lmv_stripe_md **plsm, - const union lmv_mds_md *lmm, size_t lmm_size) -{ - int rc; - - EXP_CHECK_MD_OP(exp, unpackmd); - EXP_MD_COUNTER_INCREMENT(exp, unpackmd); - rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size); - return rc; -} - -/* OBD Metadata Support */ - -int obd_init_caches(void); -void obd_cleanup_caches(void); - -/* support routines */ -extern struct kmem_cache *obdo_cachep; - -typedef int (*register_lwp_cb)(void *data); - -struct lwp_register_item { - struct obd_export **lri_exp; - register_lwp_cb lri_cb_func; - void *lri_cb_data; - struct list_head lri_list; - char lri_name[MTI_NAME_MAXLEN]; -}; - -/* - * I'm as embarrassed about this as you are. - * - * // XXX do not look into _superhack with remaining eye - * // XXX if this were any uglier, I'd get my own show on MTV - */ -extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c); - -/* obd_mount.c */ -int lustre_unregister_fs(void); -int lustre_register_fs(void); -int lustre_check_exclusion(struct super_block *sb, char *svname); - -/* sysctl.c */ -int obd_sysctl_init(void); - -/* uuid.c */ -typedef __u8 class_uuid_t[16]; -void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out); - -/* lustre_peer.c */ -int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index); -int class_add_uuid(const char *uuid, __u64 nid); -int class_del_uuid(const char *uuid); -int class_check_uuid(struct obd_uuid *uuid, __u64 nid); -void class_init_uuidlist(void); -void class_exit_uuidlist(void); - -/* class_obd.c */ -extern char obd_jobid_node[]; -extern struct miscdevice obd_psdev; -extern spinlock_t obd_types_lock; -int class_procfs_init(void); -int class_procfs_clean(void); - -/* prng.c */ -#define ll_generate_random_uuid(uuid_out) \ - get_random_bytes(uuid_out, sizeof(class_uuid_t)) - -/* statfs_pack.c */ -struct kstatfs; -void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs); -void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs); - -/* root squash info */ -struct rw_semaphore; -struct root_squash_info { - uid_t rsi_uid; - gid_t rsi_gid; - struct list_head rsi_nosquash_nids; - struct rw_semaphore rsi_sem; -}; - -/* linux-module.c */ -int obd_ioctl_getdata(char **buf, int *len, void __user *arg); - -#endif /* __LINUX_OBD_CLASS_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h deleted file mode 100644 index 9e41633823f7..000000000000 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ /dev/null @@ -1,517 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _OBD_SUPPORT -#define _OBD_SUPPORT - -#include -#include - -#include -#include - -/* global variables */ -extern unsigned int obd_debug_peer_on_timeout; -extern unsigned int obd_dump_on_timeout; -extern unsigned int obd_dump_on_eviction; -/* obd_timeout should only be used for recovery, not for - * networking / disk / timings affected by load (use Adaptive Timeouts) - */ -extern unsigned int obd_timeout; /* seconds */ -extern unsigned int obd_timeout_set; -extern unsigned int at_min; -extern unsigned int at_max; -extern unsigned int at_history; -extern int at_early_margin; -extern int at_extra; -extern unsigned long obd_max_dirty_pages; -extern atomic_long_t obd_dirty_pages; -extern atomic_long_t obd_dirty_transit_pages; -extern char obd_jobid_var[]; - -/* Some hash init argument constants */ -/* Timeout definitions */ -#define OBD_TIMEOUT_DEFAULT 100 -/* Time to wait for all clients to reconnect during recovery (hard limit) */ -#define OBD_RECOVERY_TIME_HARD (obd_timeout * 9) -/* Time to wait for all clients to reconnect during recovery (soft limit) */ -/* Should be very conservative; must catch the first reconnect after reboot */ -#define OBD_RECOVERY_TIME_SOFT (obd_timeout * 3) -/* Change recovery-small 26b time if you change this */ -#define PING_INTERVAL max(obd_timeout / 4, 1U) -/* a bit more than maximal journal commit time in seconds */ -#define PING_INTERVAL_SHORT min(PING_INTERVAL, 7U) -/* Client may skip 1 ping; we must wait at least 2.5. But for multiple - * failover targets the client only pings one server at a time, and pings - * can be lost on a loaded network. Since eviction has serious consequences, - * and there's no urgent need to evict a client just because it's idle, we - * should be very conservative here. - */ -#define PING_EVICT_TIMEOUT (PING_INTERVAL * 6) -#define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */ -#define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */ -/* Max connect interval for nonresponsive servers; ~50s to avoid building up - * connect requests in the LND queues, but within obd_timeout so we don't - * miss the recovery window - */ -#define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout)) -#define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */ -/* In general this should be low to have quick detection of a system - * running on a backup server. (If it's too low, import_select_connection - * will increase the timeout anyhow.) - */ -#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout / 20) -/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */ -#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \ - INITIAL_CONNECT_TIMEOUT) -/* The min time a target should wait for clients to reconnect in recovery */ -#define OBD_RECOVERY_TIME_MIN (2 * RECONNECT_DELAY_MAX) -#define OBD_IR_FACTOR_MIN 1 -#define OBD_IR_FACTOR_MAX 10 -#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX / 2) -/* default timeout for the MGS to become IR_FULL */ -#define OBD_IR_MGS_TIMEOUT (4 * obd_timeout) -#define LONG_UNLINK 300 /* Unlink should happen before now */ - -/** - * Time interval of shrink, if the client is "idle" more than this interval, - * then the ll_grant thread will return the requested grant space to filter - */ -#define GRANT_SHRINK_INTERVAL 1200/*20 minutes*/ - -#define OBD_FAIL_MDS 0x100 -#define OBD_FAIL_MDS_HANDLE_UNPACK 0x101 -#define OBD_FAIL_MDS_GETATTR_NET 0x102 -#define OBD_FAIL_MDS_GETATTR_PACK 0x103 -#define OBD_FAIL_MDS_READPAGE_NET 0x104 -#define OBD_FAIL_MDS_READPAGE_PACK 0x105 -#define OBD_FAIL_MDS_SENDPAGE 0x106 -#define OBD_FAIL_MDS_REINT_NET 0x107 -#define OBD_FAIL_MDS_REINT_UNPACK 0x108 -#define OBD_FAIL_MDS_REINT_SETATTR 0x109 -#define OBD_FAIL_MDS_REINT_SETATTR_WRITE 0x10a -#define OBD_FAIL_MDS_REINT_CREATE 0x10b -#define OBD_FAIL_MDS_REINT_CREATE_WRITE 0x10c -#define OBD_FAIL_MDS_REINT_UNLINK 0x10d -#define OBD_FAIL_MDS_REINT_UNLINK_WRITE 0x10e -#define OBD_FAIL_MDS_REINT_LINK 0x10f -#define OBD_FAIL_MDS_REINT_LINK_WRITE 0x110 -#define OBD_FAIL_MDS_REINT_RENAME 0x111 -#define OBD_FAIL_MDS_REINT_RENAME_WRITE 0x112 -#define OBD_FAIL_MDS_OPEN_NET 0x113 -#define OBD_FAIL_MDS_OPEN_PACK 0x114 -#define OBD_FAIL_MDS_CLOSE_NET 0x115 -#define OBD_FAIL_MDS_CLOSE_PACK 0x116 -#define OBD_FAIL_MDS_CONNECT_NET 0x117 -#define OBD_FAIL_MDS_CONNECT_PACK 0x118 -#define OBD_FAIL_MDS_REINT_NET_REP 0x119 -#define OBD_FAIL_MDS_DISCONNECT_NET 0x11a -#define OBD_FAIL_MDS_GETSTATUS_NET 0x11b -#define OBD_FAIL_MDS_GETSTATUS_PACK 0x11c -#define OBD_FAIL_MDS_STATFS_PACK 0x11d -#define OBD_FAIL_MDS_STATFS_NET 0x11e -#define OBD_FAIL_MDS_GETATTR_NAME_NET 0x11f -#define OBD_FAIL_MDS_PIN_NET 0x120 -#define OBD_FAIL_MDS_UNPIN_NET 0x121 -#define OBD_FAIL_MDS_ALL_REPLY_NET 0x122 -#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123 -#define OBD_FAIL_MDS_SYNC_NET 0x124 -#define OBD_FAIL_MDS_SYNC_PACK 0x125 -/* OBD_FAIL_MDS_DONE_WRITING_NET 0x126 obsolete since 2.8.0 */ -/* OBD_FAIL_MDS_DONE_WRITING_PACK 0x127 obsolete since 2.8.0 */ -#define OBD_FAIL_MDS_ALLOC_OBDO 0x128 -#define OBD_FAIL_MDS_PAUSE_OPEN 0x129 -#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a -#define OBD_FAIL_MDS_OPEN_CREATE 0x12b -#define OBD_FAIL_MDS_OST_SETATTR 0x12c -/* OBD_FAIL_MDS_QUOTACHECK_NET 0x12d obsolete since 2.4 */ -#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e -#define OBD_FAIL_MDS_CLIENT_ADD 0x12f -#define OBD_FAIL_MDS_GETXATTR_NET 0x130 -#define OBD_FAIL_MDS_GETXATTR_PACK 0x131 -#define OBD_FAIL_MDS_SETXATTR_NET 0x132 -#define OBD_FAIL_MDS_SETXATTR 0x133 -#define OBD_FAIL_MDS_SETXATTR_WRITE 0x134 -#define OBD_FAIL_MDS_FS_SETUP 0x135 -#define OBD_FAIL_MDS_RESEND 0x136 -#define OBD_FAIL_MDS_LLOG_CREATE_FAILED 0x137 -#define OBD_FAIL_MDS_LOV_SYNC_RACE 0x138 -#define OBD_FAIL_MDS_OSC_PRECREATE 0x139 -#define OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a -#define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b -#define OBD_FAIL_MDS_BLOCK_QUOTA_REQ 0x13c -#define OBD_FAIL_MDS_DROP_QUOTA_REQ 0x13d -#define OBD_FAIL_MDS_REMOVE_COMMON_EA 0x13e -#define OBD_FAIL_MDS_ALLOW_COMMON_EA_SETTING 0x13f -#define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140 -#define OBD_FAIL_MDS_LOV_PREP_CREATE 0x141 -#define OBD_FAIL_MDS_REINT_DELAY 0x142 -#define OBD_FAIL_MDS_READLINK_EPROTO 0x143 -#define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144 -#define OBD_FAIL_MDS_PDO_LOCK 0x145 -#define OBD_FAIL_MDS_PDO_LOCK2 0x146 -#define OBD_FAIL_MDS_OSC_CREATE_FAIL 0x147 -#define OBD_FAIL_MDS_NEGATIVE_POSITIVE 0x148 -#define OBD_FAIL_MDS_HSM_STATE_GET_NET 0x149 -#define OBD_FAIL_MDS_HSM_STATE_SET_NET 0x14a -#define OBD_FAIL_MDS_HSM_PROGRESS_NET 0x14b -#define OBD_FAIL_MDS_HSM_REQUEST_NET 0x14c -#define OBD_FAIL_MDS_HSM_CT_REGISTER_NET 0x14d -#define OBD_FAIL_MDS_HSM_CT_UNREGISTER_NET 0x14e -#define OBD_FAIL_MDS_SWAP_LAYOUTS_NET 0x14f -#define OBD_FAIL_MDS_HSM_ACTION_NET 0x150 -#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151 - -/* layout lock */ -#define OBD_FAIL_MDS_NO_LL_GETATTR 0x170 -#define OBD_FAIL_MDS_NO_LL_OPEN 0x171 -#define OBD_FAIL_MDS_LL_BLOCK 0x172 - -/* CMD */ -#define OBD_FAIL_MDS_IS_SUBDIR_NET 0x180 -#define OBD_FAIL_MDS_IS_SUBDIR_PACK 0x181 -#define OBD_FAIL_MDS_SET_INFO_NET 0x182 -#define OBD_FAIL_MDS_WRITEPAGE_NET 0x183 -#define OBD_FAIL_MDS_WRITEPAGE_PACK 0x184 -#define OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS 0x185 -#define OBD_FAIL_MDS_GET_INFO_NET 0x186 -#define OBD_FAIL_MDS_DQACQ_NET 0x187 - -/* OI scrub */ -#define OBD_FAIL_OSD_SCRUB_DELAY 0x190 -#define OBD_FAIL_OSD_SCRUB_CRASH 0x191 -#define OBD_FAIL_OSD_SCRUB_FATAL 0x192 -#define OBD_FAIL_OSD_FID_MAPPING 0x193 -#define OBD_FAIL_OSD_LMA_INCOMPAT 0x194 -#define OBD_FAIL_OSD_COMPAT_INVALID_ENTRY 0x195 - -#define OBD_FAIL_OST 0x200 -#define OBD_FAIL_OST_CONNECT_NET 0x201 -#define OBD_FAIL_OST_DISCONNECT_NET 0x202 -#define OBD_FAIL_OST_GET_INFO_NET 0x203 -#define OBD_FAIL_OST_CREATE_NET 0x204 -#define OBD_FAIL_OST_DESTROY_NET 0x205 -#define OBD_FAIL_OST_GETATTR_NET 0x206 -#define OBD_FAIL_OST_SETATTR_NET 0x207 -#define OBD_FAIL_OST_OPEN_NET 0x208 -#define OBD_FAIL_OST_CLOSE_NET 0x209 -#define OBD_FAIL_OST_BRW_NET 0x20a -#define OBD_FAIL_OST_PUNCH_NET 0x20b -#define OBD_FAIL_OST_STATFS_NET 0x20c -#define OBD_FAIL_OST_HANDLE_UNPACK 0x20d -#define OBD_FAIL_OST_BRW_WRITE_BULK 0x20e -#define OBD_FAIL_OST_BRW_READ_BULK 0x20f -#define OBD_FAIL_OST_SYNC_NET 0x210 -#define OBD_FAIL_OST_ALL_REPLY_NET 0x211 -#define OBD_FAIL_OST_ALL_REQUEST_NET 0x212 -#define OBD_FAIL_OST_LDLM_REPLY_NET 0x213 -#define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214 -#define OBD_FAIL_OST_ENOSPC 0x215 -#define OBD_FAIL_OST_EROFS 0x216 -#define OBD_FAIL_OST_ENOENT 0x217 -/* OBD_FAIL_OST_QUOTACHECK_NET 0x218 obsolete since 2.4 */ -#define OBD_FAIL_OST_QUOTACTL_NET 0x219 -#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a -#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b -#define OBD_FAIL_OST_BRW_SIZE 0x21c -#define OBD_FAIL_OST_DROP_REQ 0x21d -#define OBD_FAIL_OST_SETATTR_CREDITS 0x21e -#define OBD_FAIL_OST_HOLD_WRITE_RPC 0x21f -#define OBD_FAIL_OST_BRW_WRITE_BULK2 0x220 -#define OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221 -#define OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222 -#define OBD_FAIL_OST_PAUSE_CREATE 0x223 -#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224 -#define OBD_FAIL_OST_CONNECT_NET2 0x225 -#define OBD_FAIL_OST_NOMEM 0x226 -#define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227 -#define OBD_FAIL_OST_MAPBLK_ENOSPC 0x228 -#define OBD_FAIL_OST_ENOINO 0x229 -#define OBD_FAIL_OST_DQACQ_NET 0x230 -#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231 -#define OBD_FAIL_OST_SET_INFO_NET 0x232 - -#define OBD_FAIL_LDLM 0x300 -#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301 -#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302 -#define OBD_FAIL_LDLM_CONVERT_NET 0x303 -#define OBD_FAIL_LDLM_CANCEL_NET 0x304 -#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305 -#define OBD_FAIL_LDLM_CP_CALLBACK_NET 0x306 -#define OBD_FAIL_LDLM_GL_CALLBACK_NET 0x307 -#define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308 -#define OBD_FAIL_LDLM_ENQUEUE_INTENT_ERR 0x309 -#define OBD_FAIL_LDLM_CREATE_RESOURCE 0x30a -#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b -#define OBD_FAIL_LDLM_REPLY 0x30c -#define OBD_FAIL_LDLM_RECOV_CLIENTS 0x30d -#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e -#define OBD_FAIL_LDLM_GLIMPSE 0x30f -#define OBD_FAIL_LDLM_CANCEL_RACE 0x310 -#define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311 -#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312 -#define OBD_FAIL_LDLM_CLOSE_THREAD 0x313 -#define OBD_FAIL_LDLM_CANCEL_BL_CB_RACE 0x314 -#define OBD_FAIL_LDLM_CP_CB_WAIT 0x315 -#define OBD_FAIL_LDLM_OST_FAIL_RACE 0x316 -#define OBD_FAIL_LDLM_INTR_CP_AST 0x317 -#define OBD_FAIL_LDLM_CP_BL_RACE 0x318 -#define OBD_FAIL_LDLM_NEW_LOCK 0x319 -#define OBD_FAIL_LDLM_AGL_DELAY 0x31a -#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b -#define OBD_FAIL_LDLM_OST_LVB 0x31c -#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d -#define OBD_FAIL_LDLM_PAUSE_CANCEL2 0x31f -#define OBD_FAIL_LDLM_CP_CB_WAIT2 0x320 -#define OBD_FAIL_LDLM_CP_CB_WAIT3 0x321 -#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322 -#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323 - -#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a - -/* LOCKLESS IO */ -#define OBD_FAIL_LDLM_SET_CONTENTION 0x385 - -#define OBD_FAIL_OSC 0x400 -#define OBD_FAIL_OSC_BRW_READ_BULK 0x401 -#define OBD_FAIL_OSC_BRW_WRITE_BULK 0x402 -#define OBD_FAIL_OSC_LOCK_BL_AST 0x403 -#define OBD_FAIL_OSC_LOCK_CP_AST 0x404 -#define OBD_FAIL_OSC_MATCH 0x405 -#define OBD_FAIL_OSC_BRW_PREP_REQ 0x406 -#define OBD_FAIL_OSC_SHUTDOWN 0x407 -#define OBD_FAIL_OSC_CHECKSUM_RECEIVE 0x408 -#define OBD_FAIL_OSC_CHECKSUM_SEND 0x409 -#define OBD_FAIL_OSC_BRW_PREP_REQ2 0x40a -#define OBD_FAIL_OSC_CONNECT_CKSUM 0x40b -#define OBD_FAIL_OSC_CKSUM_ADLER_ONLY 0x40c -#define OBD_FAIL_OSC_DIO_PAUSE 0x40d -#define OBD_FAIL_OSC_OBJECT_CONTENTION 0x40e -#define OBD_FAIL_OSC_CP_CANCEL_RACE 0x40f -#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410 -#define OBD_FAIL_OSC_NO_GRANT 0x411 -#define OBD_FAIL_OSC_DELAY_SETTIME 0x412 -#define OBD_FAIL_OSC_DELAY_IO 0x414 - -#define OBD_FAIL_PTLRPC 0x500 -#define OBD_FAIL_PTLRPC_ACK 0x501 -#define OBD_FAIL_PTLRPC_RQBD 0x502 -#define OBD_FAIL_PTLRPC_BULK_GET_NET 0x503 -#define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504 -#define OBD_FAIL_PTLRPC_DROP_RPC 0x505 -#define OBD_FAIL_PTLRPC_DELAY_SEND 0x506 -#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507 -#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB 0x508 -#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a -#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c -#define OBD_FAIL_PTLRPC_IMP_DEACTIVE 0x50d -#define OBD_FAIL_PTLRPC_DUMP_LOG 0x50e -#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f -#define OBD_FAIL_PTLRPC_LONG_BULK_UNLINK 0x510 -#define OBD_FAIL_PTLRPC_HPREQ_TIMEOUT 0x511 -#define OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT 0x512 -#define OBD_FAIL_PTLRPC_DROP_REQ_OPC 0x513 -#define OBD_FAIL_PTLRPC_FINISH_REPLAY 0x514 -#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2 0x515 -#define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516 -#define OBD_FAIL_PTLRPC_CANCEL_RESEND 0x517 -#define OBD_FAIL_PTLRPC_DROP_BULK 0x51a -#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b -#define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c - -#define OBD_FAIL_OBD_PING_NET 0x600 -#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601 -#define OBD_FAIL_OBD_LOGD_NET 0x602 -/* OBD_FAIL_OBD_QC_CALLBACK_NET 0x603 obsolete since 2.4 */ -#define OBD_FAIL_OBD_DQACQ 0x604 -#define OBD_FAIL_OBD_LLOG_SETUP 0x605 -#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606 -#define OBD_FAIL_OBD_IDX_READ_NET 0x607 -#define OBD_FAIL_OBD_IDX_READ_BREAK 0x608 -#define OBD_FAIL_OBD_NO_LRU 0x609 - -#define OBD_FAIL_TGT_REPLY_NET 0x700 -#define OBD_FAIL_TGT_CONN_RACE 0x701 -#define OBD_FAIL_TGT_FORCE_RECONNECT 0x702 -#define OBD_FAIL_TGT_DELAY_CONNECT 0x703 -#define OBD_FAIL_TGT_DELAY_RECONNECT 0x704 -#define OBD_FAIL_TGT_DELAY_PRECREATE 0x705 -#define OBD_FAIL_TGT_TOOMANY_THREADS 0x706 -#define OBD_FAIL_TGT_REPLAY_DROP 0x707 -#define OBD_FAIL_TGT_FAKE_EXP 0x708 -#define OBD_FAIL_TGT_REPLAY_DELAY 0x709 -#define OBD_FAIL_TGT_LAST_REPLAY 0x710 -#define OBD_FAIL_TGT_CLIENT_ADD 0x711 -#define OBD_FAIL_TGT_RCVG_FLAG 0x712 -#define OBD_FAIL_TGT_DELAY_CONDITIONAL 0x713 - -#define OBD_FAIL_MDC_REVALIDATE_PAUSE 0x800 -#define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801 -#define OBD_FAIL_MDC_OLD_EXT_FLAGS 0x802 -#define OBD_FAIL_MDC_GETATTR_ENQUEUE 0x803 -#define OBD_FAIL_MDC_RPCS_SEM 0x804 -#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805 -#define OBD_FAIL_MDC_CLOSE 0x806 - -#define OBD_FAIL_MGS 0x900 -#define OBD_FAIL_MGS_ALL_REQUEST_NET 0x901 -#define OBD_FAIL_MGS_ALL_REPLY_NET 0x902 -#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903 -#define OBD_FAIL_MGS_PAUSE_REQ 0x904 -#define OBD_FAIL_MGS_PAUSE_TARGET_REG 0x905 -#define OBD_FAIL_MGS_CONNECT_NET 0x906 -#define OBD_FAIL_MGS_DISCONNECT_NET 0x907 -#define OBD_FAIL_MGS_SET_INFO_NET 0x908 -#define OBD_FAIL_MGS_EXCEPTION_NET 0x909 -#define OBD_FAIL_MGS_TARGET_REG_NET 0x90a -#define OBD_FAIL_MGS_TARGET_DEL_NET 0x90b -#define OBD_FAIL_MGS_CONFIG_READ_NET 0x90c - -#define OBD_FAIL_QUOTA_DQACQ_NET 0xA01 -#define OBD_FAIL_QUOTA_EDQUOT 0xA02 -#define OBD_FAIL_QUOTA_DELAY_REINT 0xA03 -#define OBD_FAIL_QUOTA_RECOVERABLE_ERR 0xA04 - -#define OBD_FAIL_LPROC_REMOVE 0xB00 - -#define OBD_FAIL_SEQ 0x1000 -#define OBD_FAIL_SEQ_QUERY_NET 0x1001 -#define OBD_FAIL_SEQ_EXHAUST 0x1002 - -#define OBD_FAIL_FLD 0x1100 -#define OBD_FAIL_FLD_QUERY_NET 0x1101 -#define OBD_FAIL_FLD_READ_NET 0x1102 - -#define OBD_FAIL_SEC_CTX 0x1200 -#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201 -#define OBD_FAIL_SEC_CTX_INIT_CONT_NET 0x1202 -#define OBD_FAIL_SEC_CTX_FINI_NET 0x1203 -#define OBD_FAIL_SEC_CTX_HDL_PAUSE 0x1204 - -#define OBD_FAIL_LLOG 0x1300 -#define OBD_FAIL_LLOG_ORIGIN_CONNECT_NET 0x1301 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CREATE_NET 0x1302 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_DESTROY_NET 0x1303 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_READ_HEADER_NET 0x1304 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_NEXT_BLOCK_NET 0x1305 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_PREV_BLOCK_NET 0x1306 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_WRITE_REC_NET 0x1307 -#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CLOSE_NET 0x1308 -#define OBD_FAIL_LLOG_CATINFO_NET 0x1309 -#define OBD_FAIL_MDS_SYNC_CAPA_SL 0x1310 -#define OBD_FAIL_SEQ_ALLOC 0x1311 - -#define OBD_FAIL_LLITE 0x1400 -#define OBD_FAIL_LLITE_FAULT_TRUNC_RACE 0x1401 -#define OBD_FAIL_LOCK_STATE_WAIT_INTR 0x1402 -#define OBD_FAIL_LOV_INIT 0x1403 -#define OBD_FAIL_GLIMPSE_DELAY 0x1404 -#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405 -#define OBD_FAIL_MAKE_LOVEA_HOLE 0x1406 -#define OBD_FAIL_LLITE_LOST_LAYOUT 0x1407 -#define OBD_FAIL_GETATTR_DELAY 0x1409 - -#define OBD_FAIL_FID_INDIR 0x1501 -#define OBD_FAIL_FID_INLMA 0x1502 -#define OBD_FAIL_FID_IGIF 0x1504 -#define OBD_FAIL_FID_LOOKUP 0x1505 -#define OBD_FAIL_FID_NOLMA 0x1506 - -/* LFSCK */ -#define OBD_FAIL_LFSCK_DELAY1 0x1600 -#define OBD_FAIL_LFSCK_DELAY2 0x1601 -#define OBD_FAIL_LFSCK_DELAY3 0x1602 -#define OBD_FAIL_LFSCK_LINKEA_CRASH 0x1603 -#define OBD_FAIL_LFSCK_LINKEA_MORE 0x1604 -#define OBD_FAIL_LFSCK_LINKEA_MORE2 0x1605 -#define OBD_FAIL_LFSCK_FATAL1 0x1608 -#define OBD_FAIL_LFSCK_FATAL2 0x1609 -#define OBD_FAIL_LFSCK_CRASH 0x160a -#define OBD_FAIL_LFSCK_NO_AUTO 0x160b -#define OBD_FAIL_LFSCK_NO_DOUBLESCAN 0x160c -#define OBD_FAIL_LFSCK_INVALID_PFID 0x1619 -#define OBD_FAIL_LFSCK_BAD_NAME_HASH 0x1628 - -/* UPDATE */ -#define OBD_FAIL_UPDATE_OBJ_NET 0x1700 -#define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701 - -/* LMV */ -#define OBD_FAIL_UNKNOWN_LMV_STRIPE 0x1901 - -/* Assign references to moved code to reduce code changes */ -#define OBD_FAIL_PRECHECK(id) CFS_FAIL_PRECHECK(id) -#define OBD_FAIL_CHECK(id) CFS_FAIL_CHECK(id) -#define OBD_FAIL_CHECK_VALUE(id, value) CFS_FAIL_CHECK_VALUE(id, value) -#define OBD_FAIL_CHECK_ORSET(id, value) CFS_FAIL_CHECK_ORSET(id, value) -#define OBD_FAIL_CHECK_RESET(id, value) CFS_FAIL_CHECK_RESET(id, value) -#define OBD_FAIL_RETURN(id, ret) CFS_FAIL_RETURN(id, ret) -#define OBD_FAIL_TIMEOUT(id, secs) CFS_FAIL_TIMEOUT(id, secs) -#define OBD_FAIL_TIMEOUT_MS(id, ms) CFS_FAIL_TIMEOUT_MS(id, ms) -#define OBD_FAIL_TIMEOUT_ORSET(id, value, secs) CFS_FAIL_TIMEOUT_ORSET(id, value, secs) -#define OBD_RACE(id) CFS_RACE(id) -#define OBD_FAIL_ONCE CFS_FAIL_ONCE -#define OBD_FAILED CFS_FAILED - -#ifdef CONFIG_DEBUG_SLAB -#define POISON(ptr, c, s) do {} while (0) -#define POISON_PTR(ptr) ((void)0) -#else -#define POISON(ptr, c, s) memset(ptr, c, s) -#define POISON_PTR(ptr) ((ptr) = (void *)0xdeadbeef) -#endif - -#ifdef POISON_BULK -#define POISON_PAGE(page, val) do { \ - memset(kmap(page), val, PAGE_SIZE); \ - kunmap(page); \ -} while (0) -#else -#define POISON_PAGE(page, val) do { } while (0) -#endif - -#define OBD_FREE_RCU(ptr, size, handle) \ -do { \ - struct portals_handle *__h = (handle); \ - \ - __h->h_cookie = (unsigned long)(ptr); \ - __h->h_size = (size); \ - call_rcu(&__h->h_rcu, class_handle_free_cb); \ - POISON_PTR(ptr); \ -} while (0) - -#define KEY_IS(str) \ - (keylen >= (sizeof(str) - 1) && \ - memcmp(key, str, (sizeof(str) - 1)) == 0) - -#endif diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h deleted file mode 100644 index 9450da728160..000000000000 --- a/drivers/staging/lustre/lustre/include/seq_range.h +++ /dev/null @@ -1,200 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2014, Intel Corporation. - * - * Copyright 2015 Cray Inc, all rights reserved. - * Author: Ben Evans. - * - * Define lu_seq_range associated functions - */ - -#ifndef _SEQ_RANGE_H_ -#define _SEQ_RANGE_H_ - -#include - -/** - * computes the sequence range type \a range - */ - -static inline unsigned int fld_range_type(const struct lu_seq_range *range) -{ - return range->lsr_flags & LU_SEQ_RANGE_MASK; -} - -/** - * Is this sequence range an OST? \a range - */ - -static inline bool fld_range_is_ost(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_OST; -} - -/** - * Is this sequence range an MDT? \a range - */ - -static inline bool fld_range_is_mdt(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_MDT; -} - -/** - * ANY range is only used when the fld client sends a fld query request, - * but it does not know whether the seq is an MDT or OST, so it will send the - * request with ANY type, which means any seq type from the lookup can be - * expected. /a range - */ -static inline unsigned int fld_range_is_any(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_ANY; -} - -/** - * Apply flags to range \a range \a flags - */ - -static inline void fld_range_set_type(struct lu_seq_range *range, - unsigned int flags) -{ - range->lsr_flags |= flags; -} - -/** - * Add MDT to range type \a range - */ - -static inline void fld_range_set_mdt(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_MDT); -} - -/** - * Add OST to range type \a range - */ - -static inline void fld_range_set_ost(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_OST); -} - -/** - * Add ANY to range type \a range - */ - -static inline void fld_range_set_any(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_ANY); -} - -/** - * computes width of given sequence range \a range - */ - -static inline u64 lu_seq_range_space(const struct lu_seq_range *range) -{ - return range->lsr_end - range->lsr_start; -} - -/** - * initialize range to zero \a range - */ - -static inline void lu_seq_range_init(struct lu_seq_range *range) -{ - memset(range, 0, sizeof(*range)); -} - -/** - * check if given seq id \a s is within given range \a range - */ - -static inline bool lu_seq_range_within(const struct lu_seq_range *range, - u64 seq) -{ - return seq >= range->lsr_start && seq < range->lsr_end; -} - -/** - * Is the range sane? Is the end after the beginning? \a range - */ - -static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range) -{ - return range->lsr_end >= range->lsr_start; -} - -/** - * Is the range 0? \a range - */ - -static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range) -{ - return range->lsr_start == 0 && range->lsr_end == 0; -} - -/** - * Is the range out of space? \a range - */ - -static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range) -{ - return lu_seq_range_space(range) == 0; -} - -/** - * return 0 if two ranges have the same location, nonzero if they are - * different \a r1 \a r2 - */ - -static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1, - const struct lu_seq_range *r2) -{ - return r1->lsr_index != r2->lsr_index || - r1->lsr_flags != r2->lsr_flags; -} - -#if !defined(__REQ_LAYOUT_USER__) -/** - * byte swap range structure \a range - */ - -void lustre_swab_lu_seq_range(struct lu_seq_range *range); -#endif -/** - * printf string and argument list for sequence range - */ -#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s" - -#define PRANGE(range) \ - (range)->lsr_start, \ - (range)->lsr_end, \ - (range)->lsr_index, \ - fld_range_is_mdt(range) ? "mdt" : "ost" - -#endif diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c deleted file mode 100644 index 8df7a4463c21..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c +++ /dev/null @@ -1,599 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/interval_tree.c - * - * Interval tree library used by ldlm extent lock code - * - * Author: Huang Wei - * Author: Jay Xiong - */ -#include -#include -#include - -enum { - INTERVAL_RED = 0, - INTERVAL_BLACK = 1 -}; - -static inline int node_is_left_child(struct interval_node *node) -{ - return node == node->in_parent->in_left; -} - -static inline int node_is_right_child(struct interval_node *node) -{ - return node == node->in_parent->in_right; -} - -static inline int node_is_red(struct interval_node *node) -{ - return node->in_color == INTERVAL_RED; -} - -static inline int node_is_black(struct interval_node *node) -{ - return node->in_color == INTERVAL_BLACK; -} - -static inline int extent_compare(struct interval_node_extent *e1, - struct interval_node_extent *e2) -{ - int rc; - - if (e1->start == e2->start) { - if (e1->end < e2->end) - rc = -1; - else if (e1->end > e2->end) - rc = 1; - else - rc = 0; - } else { - if (e1->start < e2->start) - rc = -1; - else - rc = 1; - } - return rc; -} - -static inline int extent_equal(struct interval_node_extent *e1, - struct interval_node_extent *e2) -{ - return (e1->start == e2->start) && (e1->end == e2->end); -} - -static inline int extent_overlapped(struct interval_node_extent *e1, - struct interval_node_extent *e2) -{ - return (e1->start <= e2->end) && (e2->start <= e1->end); -} - -static inline int node_equal(struct interval_node *n1, struct interval_node *n2) -{ - return extent_equal(&n1->in_extent, &n2->in_extent); -} - -static struct interval_node *interval_first(struct interval_node *node) -{ - if (!node) - return NULL; - while (node->in_left) - node = node->in_left; - return node; -} - -static struct interval_node *interval_last(struct interval_node *node) -{ - if (!node) - return NULL; - while (node->in_right) - node = node->in_right; - return node; -} - -static struct interval_node *interval_next(struct interval_node *node) -{ - if (!node) - return NULL; - if (node->in_right) - return interval_first(node->in_right); - while (node->in_parent && node_is_right_child(node)) - node = node->in_parent; - return node->in_parent; -} - -static struct interval_node *interval_prev(struct interval_node *node) -{ - if (!node) - return NULL; - - if (node->in_left) - return interval_last(node->in_left); - - while (node->in_parent && node_is_left_child(node)) - node = node->in_parent; - - return node->in_parent; -} - -enum interval_iter interval_iterate_reverse(struct interval_node *root, - interval_callback_t func, - void *data) -{ - enum interval_iter rc = INTERVAL_ITER_CONT; - struct interval_node *node; - - for (node = interval_last(root); node; node = interval_prev(node)) { - rc = func(node, data); - if (rc == INTERVAL_ITER_STOP) - break; - } - - return rc; -} -EXPORT_SYMBOL(interval_iterate_reverse); - -static void __rotate_change_maxhigh(struct interval_node *node, - struct interval_node *rotate) -{ - __u64 left_max, right_max; - - rotate->in_max_high = node->in_max_high; - left_max = node->in_left ? node->in_left->in_max_high : 0; - right_max = node->in_right ? node->in_right->in_max_high : 0; - node->in_max_high = max(interval_high(node), - max(left_max, right_max)); -} - -/* The left rotation "pivots" around the link from node to node->right, and - * - node will be linked to node->right's left child, and - * - node->right's left child will be linked to node's right child. - */ -static void __rotate_left(struct interval_node *node, - struct interval_node **root) -{ - struct interval_node *right = node->in_right; - struct interval_node *parent = node->in_parent; - - node->in_right = right->in_left; - if (node->in_right) - right->in_left->in_parent = node; - - right->in_left = node; - right->in_parent = parent; - if (parent) { - if (node_is_left_child(node)) - parent->in_left = right; - else - parent->in_right = right; - } else { - *root = right; - } - node->in_parent = right; - - /* update max_high for node and right */ - __rotate_change_maxhigh(node, right); -} - -/* The right rotation "pivots" around the link from node to node->left, and - * - node will be linked to node->left's right child, and - * - node->left's right child will be linked to node's left child. - */ -static void __rotate_right(struct interval_node *node, - struct interval_node **root) -{ - struct interval_node *left = node->in_left; - struct interval_node *parent = node->in_parent; - - node->in_left = left->in_right; - if (node->in_left) - left->in_right->in_parent = node; - left->in_right = node; - - left->in_parent = parent; - if (parent) { - if (node_is_right_child(node)) - parent->in_right = left; - else - parent->in_left = left; - } else { - *root = left; - } - node->in_parent = left; - - /* update max_high for node and left */ - __rotate_change_maxhigh(node, left); -} - -#define interval_swap(a, b) do { \ - struct interval_node *c = a; a = b; b = c; \ -} while (0) - -/* - * Operations INSERT and DELETE, when run on a tree with n keys, - * take O(logN) time.Because they modify the tree, the result - * may violate the red-black properties.To restore these properties, - * we must change the colors of some of the nodes in the tree - * and also change the pointer structure. - */ -static void interval_insert_color(struct interval_node *node, - struct interval_node **root) -{ - struct interval_node *parent, *gparent; - - while ((parent = node->in_parent) && node_is_red(parent)) { - gparent = parent->in_parent; - /* Parent is RED, so gparent must not be NULL */ - if (node_is_left_child(parent)) { - struct interval_node *uncle; - - uncle = gparent->in_right; - if (uncle && node_is_red(uncle)) { - uncle->in_color = INTERVAL_BLACK; - parent->in_color = INTERVAL_BLACK; - gparent->in_color = INTERVAL_RED; - node = gparent; - continue; - } - - if (parent->in_right == node) { - __rotate_left(parent, root); - interval_swap(node, parent); - } - - parent->in_color = INTERVAL_BLACK; - gparent->in_color = INTERVAL_RED; - __rotate_right(gparent, root); - } else { - struct interval_node *uncle; - - uncle = gparent->in_left; - if (uncle && node_is_red(uncle)) { - uncle->in_color = INTERVAL_BLACK; - parent->in_color = INTERVAL_BLACK; - gparent->in_color = INTERVAL_RED; - node = gparent; - continue; - } - - if (node_is_left_child(node)) { - __rotate_right(parent, root); - interval_swap(node, parent); - } - - parent->in_color = INTERVAL_BLACK; - gparent->in_color = INTERVAL_RED; - __rotate_left(gparent, root); - } - } - - (*root)->in_color = INTERVAL_BLACK; -} - -struct interval_node *interval_insert(struct interval_node *node, - struct interval_node **root) - -{ - struct interval_node **p, *parent = NULL; - - LASSERT(!interval_is_intree(node)); - p = root; - while (*p) { - parent = *p; - if (node_equal(parent, node)) - return parent; - - /* max_high field must be updated after each iteration */ - if (parent->in_max_high < interval_high(node)) - parent->in_max_high = interval_high(node); - - if (extent_compare(&node->in_extent, &parent->in_extent) < 0) - p = &parent->in_left; - else - p = &parent->in_right; - } - - /* link node into the tree */ - node->in_parent = parent; - node->in_color = INTERVAL_RED; - node->in_left = NULL; - node->in_right = NULL; - *p = node; - - interval_insert_color(node, root); - node->in_intree = 1; - - return NULL; -} -EXPORT_SYMBOL(interval_insert); - -static inline int node_is_black_or_0(struct interval_node *node) -{ - return !node || node_is_black(node); -} - -static void interval_erase_color(struct interval_node *node, - struct interval_node *parent, - struct interval_node **root) -{ - struct interval_node *tmp; - - while (node_is_black_or_0(node) && node != *root) { - if (parent->in_left == node) { - tmp = parent->in_right; - if (node_is_red(tmp)) { - tmp->in_color = INTERVAL_BLACK; - parent->in_color = INTERVAL_RED; - __rotate_left(parent, root); - tmp = parent->in_right; - } - if (node_is_black_or_0(tmp->in_left) && - node_is_black_or_0(tmp->in_right)) { - tmp->in_color = INTERVAL_RED; - node = parent; - parent = node->in_parent; - } else { - if (node_is_black_or_0(tmp->in_right)) { - struct interval_node *o_left; - - o_left = tmp->in_left; - if (o_left) - o_left->in_color = INTERVAL_BLACK; - tmp->in_color = INTERVAL_RED; - __rotate_right(tmp, root); - tmp = parent->in_right; - } - tmp->in_color = parent->in_color; - parent->in_color = INTERVAL_BLACK; - if (tmp->in_right) - tmp->in_right->in_color = INTERVAL_BLACK; - __rotate_left(parent, root); - node = *root; - break; - } - } else { - tmp = parent->in_left; - if (node_is_red(tmp)) { - tmp->in_color = INTERVAL_BLACK; - parent->in_color = INTERVAL_RED; - __rotate_right(parent, root); - tmp = parent->in_left; - } - if (node_is_black_or_0(tmp->in_left) && - node_is_black_or_0(tmp->in_right)) { - tmp->in_color = INTERVAL_RED; - node = parent; - parent = node->in_parent; - } else { - if (node_is_black_or_0(tmp->in_left)) { - struct interval_node *o_right; - - o_right = tmp->in_right; - if (o_right) - o_right->in_color = INTERVAL_BLACK; - tmp->in_color = INTERVAL_RED; - __rotate_left(tmp, root); - tmp = parent->in_left; - } - tmp->in_color = parent->in_color; - parent->in_color = INTERVAL_BLACK; - if (tmp->in_left) - tmp->in_left->in_color = INTERVAL_BLACK; - __rotate_right(parent, root); - node = *root; - break; - } - } - } - if (node) - node->in_color = INTERVAL_BLACK; -} - -/* - * if the @max_high value of @node is changed, this function traverse a path - * from node up to the root to update max_high for the whole tree. - */ -static void update_maxhigh(struct interval_node *node, - __u64 old_maxhigh) -{ - __u64 left_max, right_max; - - while (node) { - left_max = node->in_left ? node->in_left->in_max_high : 0; - right_max = node->in_right ? node->in_right->in_max_high : 0; - node->in_max_high = max(interval_high(node), - max(left_max, right_max)); - - if (node->in_max_high >= old_maxhigh) - break; - node = node->in_parent; - } -} - -void interval_erase(struct interval_node *node, - struct interval_node **root) -{ - struct interval_node *child, *parent; - int color; - - LASSERT(interval_is_intree(node)); - node->in_intree = 0; - if (!node->in_left) { - child = node->in_right; - } else if (!node->in_right) { - child = node->in_left; - } else { /* Both left and right child are not NULL */ - struct interval_node *old = node; - - node = interval_next(node); - child = node->in_right; - parent = node->in_parent; - color = node->in_color; - - if (child) - child->in_parent = parent; - if (parent == old) - parent->in_right = child; - else - parent->in_left = child; - - node->in_color = old->in_color; - node->in_right = old->in_right; - node->in_left = old->in_left; - node->in_parent = old->in_parent; - - if (old->in_parent) { - if (node_is_left_child(old)) - old->in_parent->in_left = node; - else - old->in_parent->in_right = node; - } else { - *root = node; - } - - old->in_left->in_parent = node; - if (old->in_right) - old->in_right->in_parent = node; - update_maxhigh(child ? : parent, node->in_max_high); - update_maxhigh(node, old->in_max_high); - if (parent == old) - parent = node; - goto color; - } - parent = node->in_parent; - color = node->in_color; - - if (child) - child->in_parent = parent; - if (parent) { - if (node_is_left_child(node)) - parent->in_left = child; - else - parent->in_right = child; - } else { - *root = child; - } - - update_maxhigh(child ? : parent, node->in_max_high); - -color: - if (color == INTERVAL_BLACK) - interval_erase_color(child, parent, root); -} -EXPORT_SYMBOL(interval_erase); - -static inline int interval_may_overlap(struct interval_node *node, - struct interval_node_extent *ext) -{ - return (ext->start <= node->in_max_high && - ext->end >= interval_low(node)); -} - -/* - * This function finds all intervals that overlap interval ext, - * and calls func to handle resulted intervals one by one. - * in lustre, this function will find all conflicting locks in - * the granted queue and add these locks to the ast work list. - * - * { - * if (!node) - * return 0; - * if (ext->end < interval_low(node)) { - * interval_search(node->in_left, ext, func, data); - * } else if (interval_may_overlap(node, ext)) { - * if (extent_overlapped(ext, &node->in_extent)) - * func(node, data); - * interval_search(node->in_left, ext, func, data); - * interval_search(node->in_right, ext, func, data); - * } - * return 0; - * } - * - */ -enum interval_iter interval_search(struct interval_node *node, - struct interval_node_extent *ext, - interval_callback_t func, - void *data) -{ - enum interval_iter rc = INTERVAL_ITER_CONT; - struct interval_node *parent; - - LASSERT(ext); - LASSERT(func); - - while (node) { - if (ext->end < interval_low(node)) { - if (node->in_left) { - node = node->in_left; - continue; - } - } else if (interval_may_overlap(node, ext)) { - if (extent_overlapped(ext, &node->in_extent)) { - rc = func(node, data); - if (rc == INTERVAL_ITER_STOP) - break; - } - - if (node->in_left) { - node = node->in_left; - continue; - } - if (node->in_right) { - node = node->in_right; - continue; - } - } - - parent = node->in_parent; - while (parent) { - if (node_is_left_child(node) && - parent->in_right) { - /* - * If we ever got the left, it means that the - * parent met ext->endin_right; - break; - } - node = parent; - parent = parent->in_parent; - } - if (!parent || !interval_may_overlap(parent, ext)) - break; - } - - return rc; -} -EXPORT_SYMBOL(interval_search); diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c deleted file mode 100644 index 296259aa51e6..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/l_lock.c +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include - -/** - * Lock a lock and its resource. - * - * LDLM locking uses resource to serialize access to locks - * but there is a case when we change resource of lock upon - * enqueue reply. We rely on lock->l_resource = new_res - * being an atomic operation. - */ -struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock) - __acquires(&lock->l_lock) - __acquires(&lock->l_resource->lr_lock) -{ - spin_lock(&lock->l_lock); - - lock_res(lock->l_resource); - - ldlm_set_res_locked(lock); - return lock->l_resource; -} -EXPORT_SYMBOL(lock_res_and_lock); - -/** - * Unlock a lock and its resource previously locked with lock_res_and_lock - */ -void unlock_res_and_lock(struct ldlm_lock *lock) - __releases(&lock->l_resource->lr_lock) - __releases(&lock->l_lock) -{ - /* on server-side resource of lock doesn't change */ - ldlm_clear_res_locked(lock); - - unlock_res(lock->l_resource); - spin_unlock(&lock->l_lock); -} -EXPORT_SYMBOL(unlock_res_and_lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c deleted file mode 100644 index 4da23ade2bb3..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c +++ /dev/null @@ -1,258 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_extent.c - * - * Author: Peter Braam - * Author: Phil Schwan - */ - -/** - * This file contains implementation of EXTENT lock type - * - * EXTENT lock type is for locking a contiguous range of values, represented - * by 64-bit starting and ending offsets (inclusive). There are several extent - * lock modes, some of which may be mutually incompatible. Extent locks are - * considered incompatible if their modes are incompatible and their extents - * intersect. See the lock mode compatibility matrix in lustre_dlm.h. - */ - -#define DEBUG_SUBSYSTEM S_LDLM -#include -#include -#include -#include -#include -#include "ldlm_internal.h" - -/* When a lock is cancelled by a client, the KMS may undergo change if this - * is the "highest lock". This function returns the new KMS value. - * Caller must hold lr_lock already. - * - * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! - */ -__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) -{ - struct ldlm_resource *res = lock->l_resource; - struct ldlm_lock *lck; - __u64 kms = 0; - - /* don't let another thread in ldlm_extent_shift_kms race in - * just after we finish and take our lock into account in its - * calculation of the kms - */ - ldlm_set_kms_ignore(lock); - - list_for_each_entry(lck, &res->lr_granted, l_res_link) { - - if (ldlm_is_kms_ignore(lck)) - continue; - - if (lck->l_policy_data.l_extent.end >= old_kms) - return old_kms; - - /* This extent _has_ to be smaller than old_kms (checked above) - * so kms can only ever be smaller or the same as old_kms. - */ - if (lck->l_policy_data.l_extent.end + 1 > kms) - kms = lck->l_policy_data.l_extent.end + 1; - } - LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms); - - return kms; -} -EXPORT_SYMBOL(ldlm_extent_shift_kms); - -struct kmem_cache *ldlm_interval_slab; - -/* interval tree, for LDLM_EXTENT. */ -static void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l) -{ - LASSERT(!l->l_tree_node); - LASSERT(l->l_resource->lr_type == LDLM_EXTENT); - - list_add_tail(&l->l_sl_policy, &n->li_group); - l->l_tree_node = n; -} - -struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) -{ - struct ldlm_interval *node; - - LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); - node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS); - if (!node) - return NULL; - - INIT_LIST_HEAD(&node->li_group); - ldlm_interval_attach(node, lock); - return node; -} - -void ldlm_interval_free(struct ldlm_interval *node) -{ - if (node) { - LASSERT(list_empty(&node->li_group)); - LASSERT(!interval_is_intree(&node->li_node)); - kmem_cache_free(ldlm_interval_slab, node); - } -} - -struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) -{ - struct ldlm_interval *n = l->l_tree_node; - - if (!n) - return NULL; - - LASSERT(!list_empty(&n->li_group)); - l->l_tree_node = NULL; - list_del_init(&l->l_sl_policy); - - return list_empty(&n->li_group) ? n : NULL; -} - -static inline int lock_mode_to_index(enum ldlm_mode mode) -{ - int index; - - LASSERT(mode != 0); - LASSERT(is_power_of_2(mode)); - for (index = -1; mode; index++) - mode >>= 1; - LASSERT(index < LCK_MODE_NUM); - return index; -} - -/** Add newly granted lock into interval tree for the resource. */ -void ldlm_extent_add_lock(struct ldlm_resource *res, - struct ldlm_lock *lock) -{ - struct interval_node *found, **root; - struct ldlm_interval *node; - struct ldlm_extent *extent; - int idx, rc; - - LASSERT(lock->l_granted_mode == lock->l_req_mode); - - node = lock->l_tree_node; - LASSERT(node); - LASSERT(!interval_is_intree(&node->li_node)); - - idx = lock_mode_to_index(lock->l_granted_mode); - LASSERT(lock->l_granted_mode == 1 << idx); - LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode); - - /* node extent initialize */ - extent = &lock->l_policy_data.l_extent; - rc = interval_set(&node->li_node, extent->start, extent->end); - LASSERT(!rc); - - root = &res->lr_itree[idx].lit_root; - found = interval_insert(&node->li_node, root); - if (found) { /* The policy group found. */ - struct ldlm_interval *tmp; - - tmp = ldlm_interval_detach(lock); - ldlm_interval_free(tmp); - ldlm_interval_attach(to_ldlm_interval(found), lock); - } - res->lr_itree[idx].lit_size++; - - /* even though we use interval tree to manage the extent lock, we also - * add the locks into grant list, for debug purpose, .. - */ - ldlm_resource_add_lock(res, &res->lr_granted, lock); - - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) { - struct ldlm_lock *lck; - - list_for_each_entry_reverse(lck, &res->lr_granted, - l_res_link) { - if (lck == lock) - continue; - if (lockmode_compat(lck->l_granted_mode, - lock->l_granted_mode)) - continue; - if (ldlm_extent_overlap(&lck->l_req_extent, - &lock->l_req_extent)) { - CDEBUG(D_ERROR, - "granting conflicting lock %p %p\n", - lck, lock); - ldlm_resource_dump(D_ERROR, res); - LBUG(); - } - } - } -} - -/** Remove cancelled lock from resource interval tree. */ -void ldlm_extent_unlink_lock(struct ldlm_lock *lock) -{ - struct ldlm_resource *res = lock->l_resource; - struct ldlm_interval *node = lock->l_tree_node; - struct ldlm_interval_tree *tree; - int idx; - - if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */ - return; - - idx = lock_mode_to_index(lock->l_granted_mode); - LASSERT(lock->l_granted_mode == 1 << idx); - tree = &res->lr_itree[idx]; - - LASSERT(tree->lit_root); /* assure the tree is not null */ - - tree->lit_size--; - node = ldlm_interval_detach(lock); - if (node) { - interval_erase(&node->li_node, &tree->lit_root); - ldlm_interval_free(node); - } -} - -void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy) -{ - lpolicy->l_extent.start = wpolicy->l_extent.start; - lpolicy->l_extent.end = wpolicy->l_extent.end; - lpolicy->l_extent.gid = wpolicy->l_extent.gid; -} - -void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy) -{ - memset(wpolicy, 0, sizeof(*wpolicy)); - wpolicy->l_extent.start = lpolicy->l_extent.start; - wpolicy->l_extent.end = lpolicy->l_extent.end; - wpolicy->l_extent.gid = lpolicy->l_extent.gid; -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c deleted file mode 100644 index 94f3b1e49896..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ /dev/null @@ -1,486 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003 Hewlett-Packard Development Company LP. - * Developed under the sponsorship of the US Government under - * Subcontract No. B514193 - * - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -/** - * This file implements POSIX lock type for Lustre. - * Its policy properties are start and end of extent and PID. - * - * These locks are only done through MDS due to POSIX semantics requiring - * e.g. that locks could be only partially released and as such split into - * two parts, and also that two adjacent locks from the same process may be - * merged into a single wider lock. - * - * Lock modes are mapped like this: - * PR and PW for READ and WRITE locks - * NL to request a releasing of a portion of the lock - * - * These flock locks never timeout. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include -#include -#include "ldlm_internal.h" - -static inline int -ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new) -{ - return((new->l_policy_data.l_flock.owner == - lock->l_policy_data.l_flock.owner) && - (new->l_export == lock->l_export)); -} - -static inline int -ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) -{ - return((new->l_policy_data.l_flock.start <= - lock->l_policy_data.l_flock.end) && - (new->l_policy_data.l_flock.end >= - lock->l_policy_data.l_flock.start)); -} - -static inline void -ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode) -{ - LDLM_DEBUG(lock, "%s(mode: %d)", - __func__, mode); - - list_del_init(&lock->l_res_link); - - /* client side - set a flag to prevent sending a CANCEL */ - lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; - - /* when reaching here, it is under lock_res_and_lock(). Thus, - * need call the nolock version of ldlm_lock_decref_internal - */ - ldlm_lock_decref_internal_nolock(lock, mode); - - ldlm_lock_destroy_nolock(lock); -} - -/** - * Process a granting attempt for flock lock. - * Must be called under ns lock held. - * - * This function looks for any conflicts for \a lock in the granted or - * waiting queues. The lock is granted if no conflicts are found in - * either queue. - * - * It is also responsible for splitting a lock if a portion of the lock - * is released. - * - */ -static int ldlm_process_flock_lock(struct ldlm_lock *req) -{ - struct ldlm_resource *res = req->l_resource; - struct ldlm_namespace *ns = ldlm_res_to_ns(res); - struct ldlm_lock *tmp; - struct ldlm_lock *lock; - struct ldlm_lock *new = req; - struct ldlm_lock *new2 = NULL; - enum ldlm_mode mode = req->l_req_mode; - int added = (mode == LCK_NL); - int splitted = 0; - const struct ldlm_callback_suite null_cbs = { }; - - CDEBUG(D_DLMTRACE, - "owner %llu pid %u mode %u start %llu end %llu\n", - new->l_policy_data.l_flock.owner, - new->l_policy_data.l_flock.pid, mode, - req->l_policy_data.l_flock.start, - req->l_policy_data.l_flock.end); - - /* No blocking ASTs are sent to the clients for - * Posix file & record locks - */ - req->l_blocking_ast = NULL; - -reprocess: - /* This loop determines where this processes locks start - * in the resource lr_granted list. - */ - list_for_each_entry(lock, &res->lr_granted, l_res_link) - if (ldlm_same_flock_owner(lock, req)) - break; - - /* Scan the locks owned by this process to find the insertion point - * (as locks are ordered), and to handle overlaps. - * We may have to merge or split existing locks. - */ - list_for_each_entry_safe_from(lock, tmp, &res->lr_granted, l_res_link) { - - if (!ldlm_same_flock_owner(lock, new)) - break; - - if (lock->l_granted_mode == mode) { - /* If the modes are the same then we need to process - * locks that overlap OR adjoin the new lock. The extra - * logic condition is necessary to deal with arithmetic - * overflow and underflow. - */ - if ((new->l_policy_data.l_flock.start > - (lock->l_policy_data.l_flock.end + 1)) && - (lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF)) - continue; - - if ((new->l_policy_data.l_flock.end < - (lock->l_policy_data.l_flock.start - 1)) && - (lock->l_policy_data.l_flock.start != 0)) - break; - - if (new->l_policy_data.l_flock.start < - lock->l_policy_data.l_flock.start) { - lock->l_policy_data.l_flock.start = - new->l_policy_data.l_flock.start; - } else { - new->l_policy_data.l_flock.start = - lock->l_policy_data.l_flock.start; - } - - if (new->l_policy_data.l_flock.end > - lock->l_policy_data.l_flock.end) { - lock->l_policy_data.l_flock.end = - new->l_policy_data.l_flock.end; - } else { - new->l_policy_data.l_flock.end = - lock->l_policy_data.l_flock.end; - } - - if (added) { - ldlm_flock_destroy(lock, mode); - } else { - new = lock; - added = 1; - } - continue; - } - - if (new->l_policy_data.l_flock.start > - lock->l_policy_data.l_flock.end) - continue; - - if (new->l_policy_data.l_flock.end < - lock->l_policy_data.l_flock.start) - break; - - if (new->l_policy_data.l_flock.start <= - lock->l_policy_data.l_flock.start) { - if (new->l_policy_data.l_flock.end < - lock->l_policy_data.l_flock.end) { - lock->l_policy_data.l_flock.start = - new->l_policy_data.l_flock.end + 1; - break; - } - ldlm_flock_destroy(lock, lock->l_req_mode); - continue; - } - if (new->l_policy_data.l_flock.end >= - lock->l_policy_data.l_flock.end) { - lock->l_policy_data.l_flock.end = - new->l_policy_data.l_flock.start - 1; - continue; - } - - /* split the existing lock into two locks */ - - /* if this is an F_UNLCK operation then we could avoid - * allocating a new lock and use the req lock passed in - * with the request but this would complicate the reply - * processing since updates to req get reflected in the - * reply. The client side replays the lock request so - * it must see the original lock data in the reply. - */ - - /* XXX - if ldlm_lock_new() can sleep we should - * release the lr_lock, allocate the new lock, - * and restart processing this lock. - */ - if (!new2) { - unlock_res_and_lock(req); - new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK, - lock->l_granted_mode, &null_cbs, - NULL, 0, LVB_T_NONE); - lock_res_and_lock(req); - if (IS_ERR(new2)) { - ldlm_flock_destroy(req, lock->l_granted_mode); - return LDLM_ITER_STOP; - } - goto reprocess; - } - - splitted = 1; - - new2->l_granted_mode = lock->l_granted_mode; - new2->l_policy_data.l_flock.pid = - new->l_policy_data.l_flock.pid; - new2->l_policy_data.l_flock.owner = - new->l_policy_data.l_flock.owner; - new2->l_policy_data.l_flock.start = - lock->l_policy_data.l_flock.start; - new2->l_policy_data.l_flock.end = - new->l_policy_data.l_flock.start - 1; - lock->l_policy_data.l_flock.start = - new->l_policy_data.l_flock.end + 1; - new2->l_conn_export = lock->l_conn_export; - if (lock->l_export) - new2->l_export = class_export_lock_get(lock->l_export, - new2); - ldlm_lock_addref_internal_nolock(new2, - lock->l_granted_mode); - - /* insert new2 at lock */ - ldlm_resource_add_lock(res, &lock->l_res_link, new2); - LDLM_LOCK_RELEASE(new2); - break; - } - - /* if new2 is created but never used, destroy it*/ - if (splitted == 0 && new2) - ldlm_lock_destroy_nolock(new2); - - /* At this point we're granting the lock request. */ - req->l_granted_mode = req->l_req_mode; - - if (!added) { - list_del_init(&req->l_res_link); - /* insert new lock before "lock", which might be the - * next lock for this owner, or might be the first - * lock for the next owner, or might not be a lock at - * all, but instead points at the head of the list - */ - ldlm_resource_add_lock(res, &lock->l_res_link, req); - } - - /* In case we're reprocessing the requested lock we can't destroy - * it until after calling ldlm_add_ast_work_item() above so that laawi() - * can bump the reference count on \a req. Otherwise \a req - * could be freed before the completion AST can be sent. - */ - if (added) - ldlm_flock_destroy(req, mode); - - ldlm_resource_dump(D_INFO, res); - return LDLM_ITER_CONTINUE; -} - -/** - * Flock completion callback function. - * - * \param lock [in,out]: A lock to be handled - * \param flags [in]: flags - * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg - * - * \retval 0 : success - * \retval <0 : failure - */ -int -ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) -{ - struct file_lock *getlk = lock->l_ast_data; - int rc = 0; - - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4); - if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) { - lock_res_and_lock(lock); - lock->l_flags |= LDLM_FL_FAIL_LOC; - unlock_res_and_lock(lock); - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4); - } - CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n", - flags, data, getlk); - - LASSERT(flags != LDLM_FL_WAIT_NOREPROC); - - if (flags & LDLM_FL_FAILED) - goto granted; - - if (!(flags & LDLM_FL_BLOCKED_MASK)) { - if (!data) - /* mds granted the lock in the reply */ - goto granted; - /* CP AST RPC: lock get granted, wake it up */ - wake_up(&lock->l_waitq); - return 0; - } - - LDLM_DEBUG(lock, - "client-side enqueue returned a blocked lock, sleeping"); - - /* Go to sleep until the lock is granted. */ - rc = l_wait_event_abortable(lock->l_waitq, is_granted_or_cancelled(lock)); - - if (rc) { - lock_res_and_lock(lock); - - /* client side - set flag to prevent lock from being put on LRU list */ - ldlm_set_cbpending(lock); - unlock_res_and_lock(lock); - - LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)", - rc); - return rc; - } - -granted: - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10); - - if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) { - lock_res_and_lock(lock); - /* DEADLOCK is always set with CBPENDING */ - lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING; - unlock_res_and_lock(lock); - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4); - } - if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) { - lock_res_and_lock(lock); - /* DEADLOCK is always set with CBPENDING */ - lock->l_flags |= LDLM_FL_FAIL_LOC | - LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING; - unlock_res_and_lock(lock); - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4); - } - - lock_res_and_lock(lock); - - /* - * Protect against race where lock could have been just destroyed - * due to overlap in ldlm_process_flock_lock(). - */ - if (ldlm_is_destroyed(lock)) { - unlock_res_and_lock(lock); - LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed"); - /* - * An error is still to be returned, to propagate it up to - * ldlm_cli_enqueue_fini() caller. - */ - return -EIO; - } - - /* ldlm_lock_enqueue() has already placed lock on the granted list. */ - ldlm_resource_unlink_lock(lock); - - /* - * Import invalidation. We need to actually release the lock - * references being held, so that it can go away. No point in - * holding the lock even if app still believes it has it, since - * server already dropped it anyway. Only for granted locks too. - */ - /* Do the same for DEADLOCK'ed locks. */ - if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) { - int mode; - - if (flags & LDLM_FL_TEST_LOCK) - LASSERT(ldlm_is_test_lock(lock)); - - if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock)) - mode = getlk->fl_type; - else - mode = lock->l_granted_mode; - - if (ldlm_is_flock_deadlock(lock)) { - LDLM_DEBUG(lock, - "client-side enqueue deadlock received"); - rc = -EDEADLK; - } - ldlm_flock_destroy(lock, mode); - unlock_res_and_lock(lock); - - /* Need to wake up the waiter if we were evicted */ - wake_up(&lock->l_waitq); - - /* - * An error is still to be returned, to propagate it up to - * ldlm_cli_enqueue_fini() caller. - */ - return rc ? : -EIO; - } - - LDLM_DEBUG(lock, "client-side enqueue granted"); - - if (flags & LDLM_FL_TEST_LOCK) { - /* fcntl(F_GETLK) request */ - /* The old mode was saved in getlk->fl_type so that if the mode - * in the lock changes we can decref the appropriate refcount. - */ - LASSERT(ldlm_is_test_lock(lock)); - ldlm_flock_destroy(lock, getlk->fl_type); - switch (lock->l_granted_mode) { - case LCK_PR: - getlk->fl_type = F_RDLCK; - break; - case LCK_PW: - getlk->fl_type = F_WRLCK; - break; - default: - getlk->fl_type = F_UNLCK; - } - getlk->fl_pid = -(pid_t)lock->l_policy_data.l_flock.pid; - getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start; - getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end; - } else { - /* We need to reprocess the lock to do merges or splits - * with existing locks owned by this process. - */ - ldlm_process_flock_lock(lock); - } - unlock_res_and_lock(lock); - return rc; -} -EXPORT_SYMBOL(ldlm_flock_completion_ast); - -void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy) -{ - lpolicy->l_flock.start = wpolicy->l_flock.lfw_start; - lpolicy->l_flock.end = wpolicy->l_flock.lfw_end; - lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid; - lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner; -} - -void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy) -{ - memset(wpolicy, 0, sizeof(*wpolicy)); - wpolicy->l_flock.lfw_start = lpolicy->l_flock.start; - wpolicy->l_flock.lfw_end = lpolicy->l_flock.end; - wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid; - wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner; -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c deleted file mode 100644 index 2926208cdfa1..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_inodebits.c - * - * Author: Peter Braam - * Author: Phil Schwan - */ - -/** - * This file contains implementation of IBITS lock type - * - * IBITS lock type contains a bit mask determining various properties of an - * object. The meanings of specific bits are specific to the caller and are - * opaque to LDLM code. - * - * Locks with intersecting bitmasks and conflicting lock modes (e.g. LCK_PW) - * are considered conflicting. See the lock mode compatibility matrix - * in lustre_dlm.h. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include "ldlm_internal.h" - -void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy) -{ - lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits; -} - -void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy) -{ - memset(wpolicy, 0, sizeof(*wpolicy)); - wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits; -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h deleted file mode 100644 index bc33ca100620..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h +++ /dev/null @@ -1,342 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define MAX_STRING_SIZE 128 - -extern int ldlm_srv_namespace_nr; -extern int ldlm_cli_namespace_nr; -extern struct mutex ldlm_srv_namespace_lock; -extern struct list_head ldlm_srv_namespace_list; -extern struct mutex ldlm_cli_namespace_lock; -extern struct list_head ldlm_cli_active_namespace_list; - -static inline int ldlm_namespace_nr_read(enum ldlm_side client) -{ - return client == LDLM_NAMESPACE_SERVER ? - ldlm_srv_namespace_nr : ldlm_cli_namespace_nr; -} - -static inline void ldlm_namespace_nr_inc(enum ldlm_side client) -{ - if (client == LDLM_NAMESPACE_SERVER) - ldlm_srv_namespace_nr++; - else - ldlm_cli_namespace_nr++; -} - -static inline void ldlm_namespace_nr_dec(enum ldlm_side client) -{ - if (client == LDLM_NAMESPACE_SERVER) - ldlm_srv_namespace_nr--; - else - ldlm_cli_namespace_nr--; -} - -static inline struct list_head *ldlm_namespace_list(enum ldlm_side client) -{ - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list; -} - -static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client) -{ - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; -} - -/* ns_bref is the number of resources in this namespace */ -static inline int ldlm_ns_empty(struct ldlm_namespace *ns) -{ - return atomic_read(&ns->ns_bref) == 0; -} - -void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, - enum ldlm_side client); -void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, - enum ldlm_side client); -struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client); - -/* ldlm_request.c */ -/* Cancel lru flag, it indicates we cancel aged locks. */ -enum { - LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel old non-LRU resize locks */ - LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */ - LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */ - LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */ - LDLM_LRU_FLAG_NO_WAIT = BIT(4), /* Cancel locks w/o blocking (neither - * sending nor waiting for any rpcs) - */ - LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */ -}; - -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - enum ldlm_cancel_flags sync, int flags); -int ldlm_cancel_lru_local(struct ldlm_namespace *ns, - struct list_head *cancels, int count, int max, - enum ldlm_cancel_flags cancel_flags, int flags); -extern unsigned int ldlm_enqueue_min; -extern unsigned int ldlm_cancel_unused_locks_before_replay; - -/* ldlm_lock.c */ - -struct ldlm_cb_set_arg { - struct ptlrpc_request_set *set; - int type; /* LDLM_{CP,BL,GL}_CALLBACK */ - atomic_t restart; - struct list_head *list; - union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */ -}; - -enum ldlm_desc_ast_t { - LDLM_WORK_BL_AST, - LDLM_WORK_CP_AST, - LDLM_WORK_REVOKE_AST, - LDLM_WORK_GL_AST -}; - -void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list); -int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, - enum req_location loc, void *data, int size); -struct ldlm_lock * -ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *id, - enum ldlm_type type, enum ldlm_mode mode, - const struct ldlm_callback_suite *cbs, - void *data, __u32 lvb_len, enum lvb_type lvb_type); -enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns, - struct ldlm_lock **lock, void *cookie, - __u64 *flags); -void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode); -void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, - enum ldlm_mode mode); -void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode); -void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, - enum ldlm_mode mode); -int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, - enum ldlm_desc_ast_t ast_type); -int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use); -#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0) -int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock); -void ldlm_lock_destroy_nolock(struct ldlm_lock *lock); - -/* ldlm_lockd.c */ -int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - struct ldlm_lock *lock); -int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, - struct list_head *cancels, int count, - enum ldlm_cancel_flags cancel_flags); -int ldlm_bl_thread_wakeup(void); - -void ldlm_handle_bl_callback(struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, struct ldlm_lock *lock); - -extern struct kmem_cache *ldlm_resource_slab; -extern struct kset *ldlm_ns_kset; - -/* ldlm_lockd.c & ldlm_lock.c */ -extern struct kmem_cache *ldlm_lock_slab; - -/* ldlm_extent.c */ -void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock); -void ldlm_extent_unlink_lock(struct ldlm_lock *lock); - -/* l_lock.c */ -void l_check_ns_lock(struct ldlm_namespace *ns); -void l_check_no_ns_lock(struct ldlm_namespace *ns); - -extern struct dentry *ldlm_svc_debugfs_dir; - -struct ldlm_state { - struct ptlrpc_service *ldlm_cb_service; - struct ptlrpc_service *ldlm_cancel_service; - struct ptlrpc_client *ldlm_client; - struct ptlrpc_connection *ldlm_server_conn; - struct ldlm_bl_pool *ldlm_bl_pool; -}; - -/* ldlm_pool.c */ -__u64 ldlm_pool_get_slv(struct ldlm_pool *pl); -void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv); -__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl); - -/* interval tree, for LDLM_EXTENT. */ -extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */ -struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l); -struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock); -void ldlm_interval_free(struct ldlm_interval *node); -/* this function must be called with res lock held */ -static inline struct ldlm_extent * -ldlm_interval_extent(struct ldlm_interval *node) -{ - struct ldlm_lock *lock; - - LASSERT(!list_empty(&node->li_group)); - - lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy); - return &lock->l_policy_data.l_extent; -} - -int ldlm_init(void); -void ldlm_exit(void); - -enum ldlm_policy_res { - LDLM_POLICY_CANCEL_LOCK, - LDLM_POLICY_KEEP_LOCK, - LDLM_POLICY_SKIP_LOCK -}; - -#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v) -#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; } -#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v) -#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; } -#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v)) -#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b) - -#define LDLM_POOL_SYSFS_READER_SHOW(var, type) \ - static ssize_t var##_show(struct kobject *kobj, \ - struct attribute *attr, \ - char *buf) \ - { \ - struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \ - pl_kobj); \ - type tmp; \ - \ - spin_lock(&pl->pl_lock); \ - tmp = pl->pl_##var; \ - spin_unlock(&pl->pl_lock); \ - \ - return LDLM_POOL_SYSFS_PRINT_##type(tmp); \ - } \ - struct __##var##__dummy_read {; } /* semicolon catcher */ - -#define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \ - static ssize_t var##_store(struct kobject *kobj, \ - struct attribute *attr, \ - const char *buffer, \ - size_t count) \ - { \ - struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \ - pl_kobj); \ - unsigned long tmp; \ - int rc; \ - \ - rc = kstrtoul(buffer, 10, &tmp); \ - if (rc < 0) { \ - return rc; \ - } \ - \ - spin_lock(&pl->pl_lock); \ - LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \ - spin_unlock(&pl->pl_lock); \ - \ - return count; \ - } \ - struct __##var##__dummy_write {; } /* semicolon catcher */ - -#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \ - static ssize_t var##_show(struct kobject *kobj, \ - struct attribute *attr, \ - char *buf) \ - { \ - struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \ - pl_kobj); \ - \ - return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \ - } \ - struct __##var##__dummy_read {; } /* semicolon catcher */ - -#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \ - static ssize_t var##_store(struct kobject *kobj, \ - struct attribute *attr, \ - const char *buffer, \ - size_t count) \ - { \ - struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \ - pl_kobj); \ - unsigned long tmp; \ - int rc; \ - \ - rc = kstrtoul(buffer, 10, &tmp); \ - if (rc < 0) { \ - return rc; \ - } \ - \ - LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \ - \ - return count; \ - } \ - struct __##var##__dummy_write {; } /* semicolon catcher */ - -static inline int is_granted_or_cancelled(struct ldlm_lock *lock) -{ - int ret = 0; - - lock_res_and_lock(lock); - if ((lock->l_req_mode == lock->l_granted_mode) && - !ldlm_is_cp_reqd(lock)) - ret = 1; - else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock)) - ret = 1; - unlock_res_and_lock(lock); - - return ret; -} - -typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *, - union ldlm_policy_data *); - -typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *, - union ldlm_wire_policy_data *); - -void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy); -void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy); -void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy); -void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy); -void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy); -void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy); -void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy); -void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy); - -static inline bool ldlm_res_eq(const struct ldlm_res_id *res0, - const struct ldlm_res_id *res1) -{ - return memcmp(res0, res1, sizeof(*res0)) == 0; -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c deleted file mode 100644 index 0aa4f234a4f4..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c +++ /dev/null @@ -1,842 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -/** - * This file deals with various client/target related logic including recovery. - * - * TODO: This code more logically belongs in the ptlrpc module than in ldlm and - * should be moved. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include -#include -#include "ldlm_internal.h" - -/* @priority: If non-zero, move the selected connection to the list head. - * @create: If zero, only search in existing connections. - */ -static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid, - int priority, int create) -{ - struct ptlrpc_connection *ptlrpc_conn; - struct obd_import_conn *imp_conn = NULL, *item; - int rc = 0; - - if (!create && !priority) { - CDEBUG(D_HA, "Nothing to do\n"); - return -EINVAL; - } - - ptlrpc_conn = ptlrpc_uuid_to_connection(uuid); - if (!ptlrpc_conn) { - CDEBUG(D_HA, "can't find connection %s\n", uuid->uuid); - return -ENOENT; - } - - if (create) { - imp_conn = kzalloc(sizeof(*imp_conn), GFP_NOFS); - if (!imp_conn) { - rc = -ENOMEM; - goto out_put; - } - } - - spin_lock(&imp->imp_lock); - list_for_each_entry(item, &imp->imp_conn_list, oic_item) { - if (obd_uuid_equals(uuid, &item->oic_uuid)) { - if (priority) { - list_del(&item->oic_item); - list_add(&item->oic_item, - &imp->imp_conn_list); - item->oic_last_attempt = 0; - } - CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n", - imp, imp->imp_obd->obd_name, uuid->uuid, - (priority ? ", moved to head" : "")); - spin_unlock(&imp->imp_lock); - rc = 0; - goto out_free; - } - } - /* No existing import connection found for \a uuid. */ - if (create) { - imp_conn->oic_conn = ptlrpc_conn; - imp_conn->oic_uuid = *uuid; - imp_conn->oic_last_attempt = 0; - if (priority) - list_add(&imp_conn->oic_item, &imp->imp_conn_list); - else - list_add_tail(&imp_conn->oic_item, - &imp->imp_conn_list); - CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n", - imp, imp->imp_obd->obd_name, uuid->uuid, - (priority ? "head" : "tail")); - } else { - spin_unlock(&imp->imp_lock); - rc = -ENOENT; - goto out_free; - } - - spin_unlock(&imp->imp_lock); - return 0; -out_free: - kfree(imp_conn); -out_put: - ptlrpc_connection_put(ptlrpc_conn); - return rc; -} - -int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid) -{ - return import_set_conn(imp, uuid, 1, 0); -} - -int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid, - int priority) -{ - return import_set_conn(imp, uuid, priority, 1); -} -EXPORT_SYMBOL(client_import_add_conn); - -int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid) -{ - struct obd_import_conn *imp_conn; - struct obd_export *dlmexp; - int rc = -ENOENT; - - spin_lock(&imp->imp_lock); - if (list_empty(&imp->imp_conn_list)) { - LASSERT(!imp->imp_connection); - goto out; - } - - list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) { - if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid)) - continue; - LASSERT(imp_conn->oic_conn); - - if (imp_conn == imp->imp_conn_current) { - LASSERT(imp_conn->oic_conn == imp->imp_connection); - - if (imp->imp_state != LUSTRE_IMP_CLOSED && - imp->imp_state != LUSTRE_IMP_DISCON) { - CERROR("can't remove current connection\n"); - rc = -EBUSY; - goto out; - } - - ptlrpc_connection_put(imp->imp_connection); - imp->imp_connection = NULL; - - dlmexp = class_conn2export(&imp->imp_dlm_handle); - if (dlmexp && dlmexp->exp_connection) { - LASSERT(dlmexp->exp_connection == - imp_conn->oic_conn); - ptlrpc_connection_put(dlmexp->exp_connection); - dlmexp->exp_connection = NULL; - } - - if (dlmexp) - class_export_put(dlmexp); - } - - list_del(&imp_conn->oic_item); - ptlrpc_connection_put(imp_conn->oic_conn); - kfree(imp_conn); - CDEBUG(D_HA, "imp %p@%s: remove connection %s\n", - imp, imp->imp_obd->obd_name, uuid->uuid); - rc = 0; - break; - } -out: - spin_unlock(&imp->imp_lock); - if (rc == -ENOENT) - CERROR("connection %s not found\n", uuid->uuid); - return rc; -} -EXPORT_SYMBOL(client_import_del_conn); - -/** - * Find conn UUID by peer NID. \a peer is a server NID. This function is used - * to find a conn uuid of \a imp which can reach \a peer. - */ -int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer, - struct obd_uuid *uuid) -{ - struct obd_import_conn *conn; - int rc = -ENOENT; - - spin_lock(&imp->imp_lock); - list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { - /* Check if conn UUID does have this peer NID. */ - if (class_check_uuid(&conn->oic_uuid, peer)) { - *uuid = conn->oic_uuid; - rc = 0; - break; - } - } - spin_unlock(&imp->imp_lock); - return rc; -} -EXPORT_SYMBOL(client_import_find_conn); - -void client_destroy_import(struct obd_import *imp) -{ - /* Drop security policy instance after all RPCs have finished/aborted - * to let all busy contexts be released. - */ - class_import_get(imp); - class_destroy_import(imp); - sptlrpc_import_sec_put(imp); - class_import_put(imp); -} -EXPORT_SYMBOL(client_destroy_import); - -/* Configure an RPC client OBD device. - * - * lcfg parameters: - * 1 - client UUID - * 2 - server UUID - * 3 - inactive-on-startup - */ -int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) -{ - struct client_obd *cli = &obddev->u.cli; - struct obd_import *imp; - struct obd_uuid server_uuid; - int rq_portal, rp_portal, connect_op; - char *name = obddev->obd_type->typ_name; - enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN; - int rc; - - /* In a more perfect world, we would hang a ptlrpc_client off of - * obd_type and just use the values from there. - */ - if (!strcmp(name, LUSTRE_OSC_NAME)) { - rq_portal = OST_REQUEST_PORTAL; - rp_portal = OSC_REPLY_PORTAL; - connect_op = OST_CONNECT; - cli->cl_sp_me = LUSTRE_SP_CLI; - cli->cl_sp_to = LUSTRE_SP_OST; - ns_type = LDLM_NS_TYPE_OSC; - } else if (!strcmp(name, LUSTRE_MDC_NAME) || - !strcmp(name, LUSTRE_LWP_NAME)) { - rq_portal = MDS_REQUEST_PORTAL; - rp_portal = MDC_REPLY_PORTAL; - connect_op = MDS_CONNECT; - cli->cl_sp_me = LUSTRE_SP_CLI; - cli->cl_sp_to = LUSTRE_SP_MDT; - ns_type = LDLM_NS_TYPE_MDC; - } else if (!strcmp(name, LUSTRE_MGC_NAME)) { - rq_portal = MGS_REQUEST_PORTAL; - rp_portal = MGC_REPLY_PORTAL; - connect_op = MGS_CONNECT; - cli->cl_sp_me = LUSTRE_SP_MGC; - cli->cl_sp_to = LUSTRE_SP_MGS; - cli->cl_flvr_mgc.sf_rpc = SPTLRPC_FLVR_INVALID; - ns_type = LDLM_NS_TYPE_MGC; - } else { - CERROR("unknown client OBD type \"%s\", can't setup\n", - name); - return -EINVAL; - } - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) { - CERROR("requires a TARGET UUID\n"); - return -EINVAL; - } - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) { - CERROR("client UUID must be less than 38 characters\n"); - return -EINVAL; - } - - if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) { - CERROR("setup requires a SERVER UUID\n"); - return -EINVAL; - } - - if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) { - CERROR("target UUID must be less than 38 characters\n"); - return -EINVAL; - } - - init_rwsem(&cli->cl_sem); - cli->cl_conn_count = 0; - memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2), - min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2), - sizeof(server_uuid))); - - cli->cl_dirty_pages = 0; - cli->cl_avail_grant = 0; - /* FIXME: Should limit this for the sum of all cl_dirty_max_pages. */ - /* - * cl_dirty_max_pages may be changed at connect time in - * ptlrpc_connect_interpret(). - */ - client_adjust_max_dirty(cli); - INIT_LIST_HEAD(&cli->cl_cache_waiters); - INIT_LIST_HEAD(&cli->cl_loi_ready_list); - INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); - INIT_LIST_HEAD(&cli->cl_loi_write_list); - INIT_LIST_HEAD(&cli->cl_loi_read_list); - spin_lock_init(&cli->cl_loi_list_lock); - atomic_set(&cli->cl_pending_w_pages, 0); - atomic_set(&cli->cl_pending_r_pages, 0); - cli->cl_r_in_flight = 0; - cli->cl_w_in_flight = 0; - - spin_lock_init(&cli->cl_read_rpc_hist.oh_lock); - spin_lock_init(&cli->cl_write_rpc_hist.oh_lock); - spin_lock_init(&cli->cl_read_page_hist.oh_lock); - spin_lock_init(&cli->cl_write_page_hist.oh_lock); - spin_lock_init(&cli->cl_read_offset_hist.oh_lock); - spin_lock_init(&cli->cl_write_offset_hist.oh_lock); - - /* lru for osc. */ - INIT_LIST_HEAD(&cli->cl_lru_osc); - atomic_set(&cli->cl_lru_shrinkers, 0); - atomic_long_set(&cli->cl_lru_busy, 0); - atomic_long_set(&cli->cl_lru_in_list, 0); - INIT_LIST_HEAD(&cli->cl_lru_list); - spin_lock_init(&cli->cl_lru_list_lock); - atomic_long_set(&cli->cl_unstable_count, 0); - INIT_LIST_HEAD(&cli->cl_shrink_list); - - init_waitqueue_head(&cli->cl_destroy_waitq); - atomic_set(&cli->cl_destroy_in_flight, 0); - /* Turn on checksumming by default. */ - cli->cl_checksum = 1; - /* - * The supported checksum types will be worked out at connect time - * Set cl_chksum* to CRC32 for now to avoid returning screwed info - * through procfs. - */ - cli->cl_cksum_type = OBD_CKSUM_CRC32; - cli->cl_supp_cksum_types = OBD_CKSUM_CRC32; - atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS); - - /* - * Set it to possible maximum size. It may be reduced by ocd_brw_size - * from OFD after connecting. - */ - cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES; - - /* - * set cl_chunkbits default value to PAGE_CACHE_SHIFT, - * it will be updated at OSC connection time. - */ - cli->cl_chunkbits = PAGE_SHIFT; - - if (!strcmp(name, LUSTRE_MDC_NAME)) - cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT; - else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) - cli->cl_max_rpcs_in_flight = 2; - else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) - cli->cl_max_rpcs_in_flight = 3; - else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) - cli->cl_max_rpcs_in_flight = 4; - else - cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT; - - spin_lock_init(&cli->cl_mod_rpcs_lock); - spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock); - cli->cl_max_mod_rpcs_in_flight = 0; - cli->cl_mod_rpcs_in_flight = 0; - cli->cl_close_rpcs_in_flight = 0; - init_waitqueue_head(&cli->cl_mod_rpcs_waitq); - cli->cl_mod_tag_bitmap = NULL; - - if (connect_op == MDS_CONNECT) { - cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1; - cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX), - sizeof(long), GFP_NOFS); - if (!cli->cl_mod_tag_bitmap) { - rc = -ENOMEM; - goto err; - } - } - - rc = ldlm_get_ref(); - if (rc) { - CERROR("ldlm_get_ref failed: %d\n", rc); - goto err; - } - - ptlrpc_init_client(rq_portal, rp_portal, name, - &obddev->obd_ldlm_client); - - imp = class_new_import(obddev); - if (!imp) { - rc = -ENOENT; - goto err_ldlm; - } - imp->imp_client = &obddev->obd_ldlm_client; - imp->imp_connect_op = connect_op; - memcpy(cli->cl_target_uuid.uuid, lustre_cfg_buf(lcfg, 1), - LUSTRE_CFG_BUFLEN(lcfg, 1)); - class_import_put(imp); - - rc = client_import_add_conn(imp, &server_uuid, 1); - if (rc) { - CERROR("can't add initial connection\n"); - goto err_import; - } - - cli->cl_import = imp; - /* cli->cl_max_mds_easize updated by mdc_init_ea_size() */ - cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3); - - if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) { - if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) { - CDEBUG(D_HA, "marking %s %s->%s as inactive\n", - name, obddev->obd_name, - cli->cl_target_uuid.uuid); - spin_lock(&imp->imp_lock); - imp->imp_deactive = 1; - spin_unlock(&imp->imp_lock); - } - } - - obddev->obd_namespace = ldlm_namespace_new(obddev, obddev->obd_name, - LDLM_NAMESPACE_CLIENT, - LDLM_NAMESPACE_GREEDY, - ns_type); - if (!obddev->obd_namespace) { - CERROR("Unable to create client namespace - %s\n", - obddev->obd_name); - rc = -ENOMEM; - goto err_import; - } - - return rc; - -err_import: - class_destroy_import(imp); -err_ldlm: - ldlm_put_ref(); -err: - kfree(cli->cl_mod_tag_bitmap); - cli->cl_mod_tag_bitmap = NULL; - return rc; -} -EXPORT_SYMBOL(client_obd_setup); - -int client_obd_cleanup(struct obd_device *obddev) -{ - struct client_obd *cli = &obddev->u.cli; - - ldlm_namespace_free_post(obddev->obd_namespace); - obddev->obd_namespace = NULL; - - obd_cleanup_client_import(obddev); - LASSERT(!obddev->u.cli.cl_import); - - ldlm_put_ref(); - - kfree(cli->cl_mod_tag_bitmap); - cli->cl_mod_tag_bitmap = NULL; - - return 0; -} -EXPORT_SYMBOL(client_obd_cleanup); - -/* ->o_connect() method for client side (OSC and MDC and MGC) */ -int client_connect_import(const struct lu_env *env, - struct obd_export **exp, - struct obd_device *obd, struct obd_uuid *cluuid, - struct obd_connect_data *data, void *localdata) -{ - struct client_obd *cli = &obd->u.cli; - struct obd_import *imp = cli->cl_import; - struct obd_connect_data *ocd; - struct lustre_handle conn = { 0 }; - bool is_mdc = false; - int rc; - - *exp = NULL; - down_write(&cli->cl_sem); - if (cli->cl_conn_count > 0) { - rc = -EALREADY; - goto out_sem; - } - - rc = class_connect(&conn, obd, cluuid); - if (rc) - goto out_sem; - - cli->cl_conn_count++; - *exp = class_conn2export(&conn); - - LASSERT(obd->obd_namespace); - - imp->imp_dlm_handle = conn; - rc = ptlrpc_init_import(imp); - if (rc != 0) - goto out_ldlm; - - ocd = &imp->imp_connect_data; - if (data) { - *ocd = *data; - is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name, - LUSTRE_MDC_NAME, 3); - if (is_mdc) - data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS; - imp->imp_connect_flags_orig = data->ocd_connect_flags; - } - - rc = ptlrpc_connect_import(imp); - if (rc != 0) { - if (data && is_mdc) - data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS; - LASSERT(imp->imp_state == LUSTRE_IMP_DISCON); - goto out_ldlm; - } - LASSERT(*exp && (*exp)->exp_connection); - - if (data) { - LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) == - ocd->ocd_connect_flags, "old %#llx, new %#llx\n", - data->ocd_connect_flags, ocd->ocd_connect_flags); - data->ocd_connect_flags = ocd->ocd_connect_flags; - /* clear the flag as it was not set and is not known - * by upper layers - */ - if (is_mdc) - data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS; - } - - ptlrpc_pinger_add_import(imp); - - if (rc) { -out_ldlm: - cli->cl_conn_count--; - class_disconnect(*exp); - *exp = NULL; - } -out_sem: - up_write(&cli->cl_sem); - - return rc; -} -EXPORT_SYMBOL(client_connect_import); - -int client_disconnect_export(struct obd_export *exp) -{ - struct obd_device *obd = class_exp2obd(exp); - struct client_obd *cli; - struct obd_import *imp; - int rc = 0, err; - - if (!obd) { - CERROR("invalid export for disconnect: exp %p cookie %#llx\n", - exp, exp ? exp->exp_handle.h_cookie : -1); - return -EINVAL; - } - - cli = &obd->u.cli; - imp = cli->cl_import; - - down_write(&cli->cl_sem); - CDEBUG(D_INFO, "disconnect %s - %zu\n", obd->obd_name, - cli->cl_conn_count); - - if (!cli->cl_conn_count) { - CERROR("disconnecting disconnected device (%s)\n", - obd->obd_name); - rc = -EINVAL; - goto out_disconnect; - } - - cli->cl_conn_count--; - if (cli->cl_conn_count) { - rc = 0; - goto out_disconnect; - } - - /* Mark import deactivated now, so we don't try to reconnect if any - * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't - * fully deactivate the import, or that would drop all requests. - */ - spin_lock(&imp->imp_lock); - imp->imp_deactive = 1; - spin_unlock(&imp->imp_lock); - - /* Some non-replayable imports (MDS's OSCs) are pinged, so just - * delete it regardless. (It's safe to delete an import that was - * never added.) - */ - (void)ptlrpc_pinger_del_import(imp); - - if (obd->obd_namespace) { - /* obd_force == local only */ - ldlm_cli_cancel_unused(obd->obd_namespace, NULL, - obd->obd_force ? LCF_LOCAL : 0, NULL); - ldlm_namespace_free_prior(obd->obd_namespace, imp, - obd->obd_force); - } - - /* There's no need to hold sem while disconnecting an import, - * and it may actually cause deadlock in GSS. - */ - up_write(&cli->cl_sem); - rc = ptlrpc_disconnect_import(imp, 0); - down_write(&cli->cl_sem); - - ptlrpc_invalidate_import(imp); - -out_disconnect: - /* Use server style - class_disconnect should be always called for - * o_disconnect. - */ - err = class_disconnect(exp); - if (!rc && err) - rc = err; - - up_write(&cli->cl_sem); - - return rc; -} -EXPORT_SYMBOL(client_disconnect_export); - -/** - * Packs current SLV and Limit into \a req. - */ -int target_pack_pool_reply(struct ptlrpc_request *req) -{ - struct obd_device *obd; - - /* Check that we still have all structures alive as this may - * be some late RPC at shutdown time. - */ - if (unlikely(!req->rq_export || !req->rq_export->exp_obd || - !exp_connect_lru_resize(req->rq_export))) { - lustre_msg_set_slv(req->rq_repmsg, 0); - lustre_msg_set_limit(req->rq_repmsg, 0); - return 0; - } - - /* OBD is alive here as export is alive, which we checked above. */ - obd = req->rq_export->exp_obd; - - read_lock(&obd->obd_pool_lock); - lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv); - lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit); - read_unlock(&obd->obd_pool_lock); - - return 0; -} -EXPORT_SYMBOL(target_pack_pool_reply); - -static int -target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id) -{ - if (OBD_FAIL_CHECK_ORSET(fail_id & ~OBD_FAIL_ONCE, OBD_FAIL_ONCE)) { - DEBUG_REQ(D_ERROR, req, "dropping reply"); - return -ECOMM; - } - - if (unlikely(rc)) { - DEBUG_REQ(D_NET, req, "processing error (%d)", rc); - req->rq_status = rc; - return ptlrpc_send_error(req, 1); - } - - DEBUG_REQ(D_NET, req, "sending reply"); - return ptlrpc_send_reply(req, PTLRPC_REPLY_MAYBE_DIFFICULT); -} - -void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) -{ - struct ptlrpc_service_part *svcpt; - int netrc; - struct ptlrpc_reply_state *rs; - struct obd_export *exp; - - if (req->rq_no_reply) - return; - - svcpt = req->rq_rqbd->rqbd_svcpt; - rs = req->rq_reply_state; - if (!rs || !rs->rs_difficult) { - /* no notifiers */ - target_send_reply_msg(req, rc, fail_id); - return; - } - - /* must be an export if locks saved */ - LASSERT(req->rq_export); - /* req/reply consistent */ - LASSERT(rs->rs_svcpt == svcpt); - - /* "fresh" reply */ - LASSERT(!rs->rs_scheduled); - LASSERT(!rs->rs_scheduled_ever); - LASSERT(!rs->rs_handled); - LASSERT(!rs->rs_on_net); - LASSERT(!rs->rs_export); - LASSERT(list_empty(&rs->rs_obd_list)); - LASSERT(list_empty(&rs->rs_exp_list)); - - exp = class_export_get(req->rq_export); - - /* disable reply scheduling while I'm setting up */ - rs->rs_scheduled = 1; - rs->rs_on_net = 1; - rs->rs_xid = req->rq_xid; - rs->rs_transno = req->rq_transno; - rs->rs_export = exp; - rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg); - - spin_lock(&exp->exp_uncommitted_replies_lock); - CDEBUG(D_NET, "rs transno = %llu, last committed = %llu\n", - rs->rs_transno, exp->exp_last_committed); - if (rs->rs_transno > exp->exp_last_committed) { - /* not committed already */ - list_add_tail(&rs->rs_obd_list, - &exp->exp_uncommitted_replies); - } - spin_unlock(&exp->exp_uncommitted_replies_lock); - - spin_lock(&exp->exp_lock); - list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies); - spin_unlock(&exp->exp_lock); - - netrc = target_send_reply_msg(req, rc, fail_id); - - spin_lock(&svcpt->scp_rep_lock); - - atomic_inc(&svcpt->scp_nreps_difficult); - - if (netrc != 0) { - /* error sending: reply is off the net. Also we need +1 - * reply ref until ptlrpc_handle_rs() is done - * with the reply state (if the send was successful, there - * would have been +1 ref for the net, which - * reply_out_callback leaves alone) - */ - rs->rs_on_net = 0; - ptlrpc_rs_addref(rs); - } - - spin_lock(&rs->rs_lock); - if (rs->rs_transno <= exp->exp_last_committed || - (!rs->rs_on_net && !rs->rs_no_ack) || - list_empty(&rs->rs_exp_list) || /* completed already */ - list_empty(&rs->rs_obd_list)) { - CDEBUG(D_HA, "Schedule reply immediately\n"); - ptlrpc_dispatch_difficult_reply(rs); - } else { - list_add(&rs->rs_list, &svcpt->scp_rep_active); - rs->rs_scheduled = 0; /* allow notifier to schedule */ - } - spin_unlock(&rs->rs_lock); - spin_unlock(&svcpt->scp_rep_lock); -} -EXPORT_SYMBOL(target_send_reply); - -enum ldlm_mode lck_compat_array[] = { - [LCK_EX] = LCK_COMPAT_EX, - [LCK_PW] = LCK_COMPAT_PW, - [LCK_PR] = LCK_COMPAT_PR, - [LCK_CW] = LCK_COMPAT_CW, - [LCK_CR] = LCK_COMPAT_CR, - [LCK_NL] = LCK_COMPAT_NL, - [LCK_GROUP] = LCK_COMPAT_GROUP, - [LCK_COS] = LCK_COMPAT_COS, -}; - -/** - * Rather arbitrary mapping from LDLM error codes to errno values. This should - * not escape to the user level. - */ -int ldlm_error2errno(enum ldlm_error error) -{ - int result; - - switch (error) { - case ELDLM_OK: - case ELDLM_LOCK_MATCHED: - result = 0; - break; - case ELDLM_LOCK_CHANGED: - result = -ESTALE; - break; - case ELDLM_LOCK_ABORTED: - result = -ENAVAIL; - break; - case ELDLM_LOCK_REPLACED: - result = -ESRCH; - break; - case ELDLM_NO_LOCK_DATA: - result = -ENOENT; - break; - case ELDLM_NAMESPACE_EXISTS: - result = -EEXIST; - break; - case ELDLM_BAD_NAMESPACE: - result = -EBADF; - break; - default: - if (((int)error) < 0) /* cast to signed type */ - result = error; /* as enum ldlm_error can be unsigned */ - else { - CERROR("Invalid DLM result code: %d\n", error); - result = -EPROTO; - } - } - return result; -} -EXPORT_SYMBOL(ldlm_error2errno); - -#if LUSTRE_TRACKS_LOCK_EXP_REFS -void ldlm_dump_export_locks(struct obd_export *exp) -{ - spin_lock(&exp->exp_locks_list_guard); - if (!list_empty(&exp->exp_locks_list)) { - struct ldlm_lock *lock; - - CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n", - exp); - list_for_each_entry(lock, &exp->exp_locks_list, - l_exp_refs_link) - LDLM_ERROR(lock, "lock:"); - } - spin_unlock(&exp->exp_locks_list_guard); -} -#endif diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c deleted file mode 100644 index a644d133063b..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ /dev/null @@ -1,2135 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_lock.c - * - * Author: Peter Braam - * Author: Phil Schwan - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include "ldlm_internal.h" - -/* lock types */ -char *ldlm_lockname[] = { - [0] = "--", - [LCK_EX] = "EX", - [LCK_PW] = "PW", - [LCK_PR] = "PR", - [LCK_CW] = "CW", - [LCK_CR] = "CR", - [LCK_NL] = "NL", - [LCK_GROUP] = "GROUP", - [LCK_COS] = "COS", -}; -EXPORT_SYMBOL(ldlm_lockname); - -static char *ldlm_typename[] = { - [LDLM_PLAIN] = "PLN", - [LDLM_EXTENT] = "EXT", - [LDLM_FLOCK] = "FLK", - [LDLM_IBITS] = "IBT", -}; - -static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = { - [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local, - [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local, - [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire_to_local, - [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local, -}; - -static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { - [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire, - [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire, - [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire, - [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire, -}; - -/** - * Converts lock policy from local format to on the wire lock_desc format - */ -static void ldlm_convert_policy_to_wire(enum ldlm_type type, - const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy) -{ - ldlm_policy_local_to_wire_t convert; - - convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE]; - - convert(lpolicy, wpolicy); -} - -/** - * Converts lock policy from on the wire lock_desc format to local format - */ -void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, - const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy) -{ - ldlm_policy_wire_to_local_t convert; - - convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE]; - - convert(wpolicy, lpolicy); -} - -const char *ldlm_it2str(enum ldlm_intent_flags it) -{ - switch (it) { - case IT_OPEN: - return "open"; - case IT_CREAT: - return "creat"; - case (IT_OPEN | IT_CREAT): - return "open|creat"; - case IT_READDIR: - return "readdir"; - case IT_GETATTR: - return "getattr"; - case IT_LOOKUP: - return "lookup"; - case IT_UNLINK: - return "unlink"; - case IT_GETXATTR: - return "getxattr"; - case IT_LAYOUT: - return "layout"; - default: - CERROR("Unknown intent 0x%08x\n", it); - return "UNKNOWN"; - } -} -EXPORT_SYMBOL(ldlm_it2str); - -/* - * REFCOUNTED LOCK OBJECTS - */ - -/** - * Get a reference on a lock. - * - * Lock refcounts, during creation: - * - one special one for allocation, dec'd only once in destroy - * - one for being a lock that's in-use - * - one for the addref associated with a new lock - */ -struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) -{ - atomic_inc(&lock->l_refc); - return lock; -} -EXPORT_SYMBOL(ldlm_lock_get); - -/** - * Release lock reference. - * - * Also frees the lock if it was last reference. - */ -void ldlm_lock_put(struct ldlm_lock *lock) -{ - LASSERT(lock->l_resource != LP_POISON); - LASSERT(atomic_read(&lock->l_refc) > 0); - if (atomic_dec_and_test(&lock->l_refc)) { - struct ldlm_resource *res; - - LDLM_DEBUG(lock, - "final lock_put on destroyed lock, freeing it."); - - res = lock->l_resource; - LASSERT(ldlm_is_destroyed(lock)); - LASSERT(list_empty(&lock->l_res_link)); - LASSERT(list_empty(&lock->l_pending_chain)); - - lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats, - LDLM_NSS_LOCKS); - lu_ref_del(&res->lr_reference, "lock", lock); - ldlm_resource_putref(res); - lock->l_resource = NULL; - if (lock->l_export) { - class_export_lock_put(lock->l_export, lock); - lock->l_export = NULL; - } - - kfree(lock->l_lvb_data); - - ldlm_interval_free(ldlm_interval_detach(lock)); - lu_ref_fini(&lock->l_reference); - OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle); - } -} -EXPORT_SYMBOL(ldlm_lock_put); - -/** - * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked. - */ -int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) -{ - int rc = 0; - - if (!list_empty(&lock->l_lru)) { - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - list_del_init(&lock->l_lru); - LASSERT(ns->ns_nr_unused > 0); - ns->ns_nr_unused--; - rc = 1; - } - return rc; -} - -/** - * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first. - * - * If \a last_use is non-zero, it will remove the lock from LRU only if - * it matches lock's l_last_used. - * - * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use - * doesn't match lock's l_last_used; - * otherwise, the lock hasn't been in the LRU list. - * \retval 1 the lock was in LRU list and removed. - */ -int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use) -{ - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - int rc = 0; - - spin_lock(&ns->ns_lock); - if (last_use == 0 || last_use == lock->l_last_used) - rc = ldlm_lock_remove_from_lru_nolock(lock); - spin_unlock(&ns->ns_lock); - - return rc; -} - -/** - * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked. - */ -static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) -{ - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - lock->l_last_used = jiffies; - LASSERT(list_empty(&lock->l_lru)); - LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); - list_add_tail(&lock->l_lru, &ns->ns_unused_list); - ldlm_clear_skipped(lock); - LASSERT(ns->ns_nr_unused >= 0); - ns->ns_nr_unused++; -} - -/** - * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks - * first. - */ -static void ldlm_lock_add_to_lru(struct ldlm_lock *lock) -{ - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - spin_lock(&ns->ns_lock); - ldlm_lock_add_to_lru_nolock(lock); - spin_unlock(&ns->ns_lock); -} - -/** - * Moves LDLM lock \a lock that is already in namespace LRU to the tail of - * the LRU. Performs necessary LRU locking - */ -static void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) -{ - struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - - spin_lock(&ns->ns_lock); - if (!list_empty(&lock->l_lru)) { - ldlm_lock_remove_from_lru_nolock(lock); - ldlm_lock_add_to_lru_nolock(lock); - } - spin_unlock(&ns->ns_lock); -} - -/** - * Helper to destroy a locked lock. - * - * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock - * Must be called with l_lock and lr_lock held. - * - * Does not actually free the lock data, but rather marks the lock as - * destroyed by setting l_destroyed field in the lock to 1. Destroys a - * handle->lock association too, so that the lock can no longer be found - * and removes the lock from LRU list. Actual lock freeing occurs when - * last lock reference goes away. - * - * Original comment (of some historical value): - * This used to have a 'strict' flag, which recovery would use to mark an - * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I - * shall explain why it's gone: with the new hash table scheme, once you call - * ldlm_lock_destroy, you can never drop your final references on this lock. - * Because it's not in the hash table anymore. -phil - */ -static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) -{ - if (lock->l_readers || lock->l_writers) { - LDLM_ERROR(lock, "lock still has references"); - LBUG(); - } - - if (!list_empty(&lock->l_res_link)) { - LDLM_ERROR(lock, "lock still on resource"); - LBUG(); - } - - if (ldlm_is_destroyed(lock)) { - LASSERT(list_empty(&lock->l_lru)); - return 0; - } - ldlm_set_destroyed(lock); - - ldlm_lock_remove_from_lru(lock); - class_handle_unhash(&lock->l_handle); - - return 1; -} - -/** - * Destroys a LDLM lock \a lock. Performs necessary locking first. - */ -static void ldlm_lock_destroy(struct ldlm_lock *lock) -{ - int first; - - lock_res_and_lock(lock); - first = ldlm_lock_destroy_internal(lock); - unlock_res_and_lock(lock); - - /* drop reference from hashtable only for first destroy */ - if (first) { - lu_ref_del(&lock->l_reference, "hash", lock); - LDLM_LOCK_RELEASE(lock); - } -} - -/** - * Destroys a LDLM lock \a lock that is already locked. - */ -void ldlm_lock_destroy_nolock(struct ldlm_lock *lock) -{ - int first; - - first = ldlm_lock_destroy_internal(lock); - /* drop reference from hashtable only for first destroy */ - if (first) { - lu_ref_del(&lock->l_reference, "hash", lock); - LDLM_LOCK_RELEASE(lock); - } -} - -/* this is called by portals_handle2object with the handle lock taken */ -static void lock_handle_addref(void *lock) -{ - LDLM_LOCK_GET((struct ldlm_lock *)lock); -} - -static void lock_handle_free(void *lock, int size) -{ - LASSERT(size == sizeof(struct ldlm_lock)); - kmem_cache_free(ldlm_lock_slab, lock); -} - -static struct portals_handle_ops lock_handle_ops = { - .hop_addref = lock_handle_addref, - .hop_free = lock_handle_free, -}; - -/** - * - * Allocate and initialize new lock structure. - * - * usage: pass in a resource on which you have done ldlm_resource_get - * new lock will take over the refcount. - * returns: lock with refcount 2 - one for current caller and one for remote - */ -static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) -{ - struct ldlm_lock *lock; - - LASSERT(resource); - - lock = kmem_cache_zalloc(ldlm_lock_slab, GFP_NOFS); - if (!lock) - return NULL; - - spin_lock_init(&lock->l_lock); - lock->l_resource = resource; - lu_ref_add(&resource->lr_reference, "lock", lock); - - atomic_set(&lock->l_refc, 2); - INIT_LIST_HEAD(&lock->l_res_link); - INIT_LIST_HEAD(&lock->l_lru); - INIT_LIST_HEAD(&lock->l_pending_chain); - INIT_LIST_HEAD(&lock->l_bl_ast); - INIT_LIST_HEAD(&lock->l_cp_ast); - INIT_LIST_HEAD(&lock->l_rk_ast); - init_waitqueue_head(&lock->l_waitq); - lock->l_blocking_lock = NULL; - INIT_LIST_HEAD(&lock->l_sl_mode); - INIT_LIST_HEAD(&lock->l_sl_policy); - - lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats, - LDLM_NSS_LOCKS); - INIT_LIST_HEAD(&lock->l_handle.h_link); - class_handle_hash(&lock->l_handle, &lock_handle_ops); - - lu_ref_init(&lock->l_reference); - lu_ref_add(&lock->l_reference, "hash", lock); - lock->l_callback_timeout = 0; - -#if LUSTRE_TRACKS_LOCK_EXP_REFS - INIT_LIST_HEAD(&lock->l_exp_refs_link); - lock->l_exp_refs_nr = 0; - lock->l_exp_refs_target = NULL; -#endif - - return lock; -} - -/** - * Moves LDLM lock \a lock to another resource. - * This is used on client when server returns some other lock than requested - * (typically as a result of intent operation) - */ -int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, - const struct ldlm_res_id *new_resid) -{ - struct ldlm_resource *oldres = lock->l_resource; - struct ldlm_resource *newres; - int type; - - lock_res_and_lock(lock); - if (memcmp(new_resid, &lock->l_resource->lr_name, - sizeof(lock->l_resource->lr_name)) == 0) { - /* Nothing to do */ - unlock_res_and_lock(lock); - return 0; - } - - LASSERT(new_resid->name[0] != 0); - - /* This function assumes that the lock isn't on any lists */ - LASSERT(list_empty(&lock->l_res_link)); - - type = oldres->lr_type; - unlock_res_and_lock(lock); - - newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); - if (IS_ERR(newres)) - return PTR_ERR(newres); - - lu_ref_add(&newres->lr_reference, "lock", lock); - /* - * To flip the lock from the old to the new resource, lock, oldres and - * newres have to be locked. Resource spin-locks are nested within - * lock->l_lock, and are taken in the memory address order to avoid - * dead-locks. - */ - spin_lock(&lock->l_lock); - oldres = lock->l_resource; - if (oldres < newres) { - lock_res(oldres); - lock_res_nested(newres, LRT_NEW); - } else { - lock_res(newres); - lock_res_nested(oldres, LRT_NEW); - } - LASSERT(memcmp(new_resid, &oldres->lr_name, - sizeof(oldres->lr_name)) != 0); - lock->l_resource = newres; - unlock_res(oldres); - unlock_res_and_lock(lock); - - /* ...and the flowers are still standing! */ - lu_ref_del(&oldres->lr_reference, "lock", lock); - ldlm_resource_putref(oldres); - - return 0; -} - -/** \defgroup ldlm_handles LDLM HANDLES - * Ways to get hold of locks without any addresses. - * @{ - */ - -/** - * Fills in handle for LDLM lock \a lock into supplied \a lockh - * Does not take any references. - */ -void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh) -{ - lockh->cookie = lock->l_handle.h_cookie; -} -EXPORT_SYMBOL(ldlm_lock2handle); - -/** - * Obtain a lock reference by handle. - * - * if \a flags: atomically get the lock and set the flags. - * Return NULL if flag already set - */ -struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, - __u64 flags) -{ - struct ldlm_lock *lock; - - LASSERT(handle); - - lock = class_handle2object(handle->cookie, NULL); - if (!lock) - return NULL; - - if (lock->l_export && lock->l_export->exp_failed) { - CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n", - lock, lock->l_export); - LDLM_LOCK_PUT(lock); - return NULL; - } - - /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it - */ - if (flags == 0 && !ldlm_is_destroyed(lock)) { - lu_ref_add(&lock->l_reference, "handle", current); - return lock; - } - - lock_res_and_lock(lock); - - LASSERT(lock->l_resource); - - lu_ref_add_atomic(&lock->l_reference, "handle", current); - if (unlikely(ldlm_is_destroyed(lock))) { - unlock_res_and_lock(lock); - CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); - LDLM_LOCK_PUT(lock); - return NULL; - } - - if (flags) { - if (lock->l_flags & flags) { - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - return NULL; - } - - lock->l_flags |= flags; - } - - unlock_res_and_lock(lock); - return lock; -} -EXPORT_SYMBOL(__ldlm_handle2lock); -/** @} ldlm_handles */ - -/** - * Fill in "on the wire" representation for given LDLM lock into supplied - * lock descriptor \a desc structure. - */ -void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) -{ - ldlm_res2desc(lock->l_resource, &desc->l_resource); - desc->l_req_mode = lock->l_req_mode; - desc->l_granted_mode = lock->l_granted_mode; - ldlm_convert_policy_to_wire(lock->l_resource->lr_type, - &lock->l_policy_data, - &desc->l_policy_data); -} - -/** - * Add a lock to list of conflicting locks to send AST to. - * - * Only add if we have not sent a blocking AST to the lock yet. - */ -static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - struct list_head *work_list) -{ - if (!ldlm_is_ast_sent(lock)) { - LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); - ldlm_set_ast_sent(lock); - /* If the enqueuing client said so, tell the AST recipient to - * discard dirty data, rather than writing back. - */ - if (ldlm_is_ast_discard_data(new)) - ldlm_set_discard_data(lock); - LASSERT(list_empty(&lock->l_bl_ast)); - list_add(&lock->l_bl_ast, work_list); - LDLM_LOCK_GET(lock); - LASSERT(!lock->l_blocking_lock); - lock->l_blocking_lock = LDLM_LOCK_GET(new); - } -} - -/** - * Add a lock to list of just granted locks to send completion AST to. - */ -static void ldlm_add_cp_work_item(struct ldlm_lock *lock, - struct list_head *work_list) -{ - if (!ldlm_is_cp_reqd(lock)) { - ldlm_set_cp_reqd(lock); - LDLM_DEBUG(lock, "lock granted; sending completion AST."); - LASSERT(list_empty(&lock->l_cp_ast)); - list_add(&lock->l_cp_ast, work_list); - LDLM_LOCK_GET(lock); - } -} - -/** - * Aggregator function to add AST work items into a list. Determines - * what sort of an AST work needs to be done and calls the proper - * adding function. - * Must be called with lr_lock held. - */ -static void ldlm_add_ast_work_item(struct ldlm_lock *lock, - struct ldlm_lock *new, - struct list_head *work_list) -{ - check_res_locked(lock->l_resource); - if (new) - ldlm_add_bl_work_item(lock, new, work_list); - else - ldlm_add_cp_work_item(lock, work_list); -} - -/** - * Add specified reader/writer reference to LDLM lock with handle \a lockh. - * r/w reference type is determined by \a mode - * Calls ldlm_lock_addref_internal. - */ -void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode) -{ - struct ldlm_lock *lock; - - lock = ldlm_handle2lock(lockh); - LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie); - ldlm_lock_addref_internal(lock, mode); - LDLM_LOCK_PUT(lock); -} -EXPORT_SYMBOL(ldlm_lock_addref); - -/** - * Helper function. - * Add specified reader/writer reference to LDLM lock \a lock. - * r/w reference type is determined by \a mode - * Removes lock from LRU if it is there. - * Assumes the LDLM lock is already locked. - */ -void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, - enum ldlm_mode mode) -{ - ldlm_lock_remove_from_lru(lock); - if (mode & (LCK_NL | LCK_CR | LCK_PR)) { - lock->l_readers++; - lu_ref_add_atomic(&lock->l_reference, "reader", lock); - } - if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) { - lock->l_writers++; - lu_ref_add_atomic(&lock->l_reference, "writer", lock); - } - LDLM_LOCK_GET(lock); - lu_ref_add_atomic(&lock->l_reference, "user", lock); - LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]); -} - -/** - * Attempts to add reader/writer reference to a lock with handle \a lockh, and - * fails if lock is already LDLM_FL_CBPENDING or destroyed. - * - * \retval 0 success, lock was addref-ed - * - * \retval -EAGAIN lock is being canceled. - */ -int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode) -{ - struct ldlm_lock *lock; - int result; - - result = -EAGAIN; - lock = ldlm_handle2lock(lockh); - if (lock) { - lock_res_and_lock(lock); - if (lock->l_readers != 0 || lock->l_writers != 0 || - !ldlm_is_cbpending(lock)) { - ldlm_lock_addref_internal_nolock(lock, mode); - result = 0; - } - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - } - return result; -} -EXPORT_SYMBOL(ldlm_lock_addref_try); - -/** - * Add specified reader/writer reference to LDLM lock \a lock. - * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work. - * Only called for local locks. - */ -void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode) -{ - lock_res_and_lock(lock); - ldlm_lock_addref_internal_nolock(lock, mode); - unlock_res_and_lock(lock); -} - -/** - * Removes reader/writer reference for LDLM lock \a lock. - * Assumes LDLM lock is already locked. - * only called in ldlm_flock_destroy and for local locks. - * Does NOT add lock to LRU if no r/w references left to accommodate flock locks - * that cannot be placed in LRU. - */ -void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, - enum ldlm_mode mode) -{ - LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); - if (mode & (LCK_NL | LCK_CR | LCK_PR)) { - LASSERT(lock->l_readers > 0); - lu_ref_del(&lock->l_reference, "reader", lock); - lock->l_readers--; - } - if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) { - LASSERT(lock->l_writers > 0); - lu_ref_del(&lock->l_reference, "writer", lock); - lock->l_writers--; - } - - lu_ref_del(&lock->l_reference, "user", lock); - LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */ -} - -/** - * Removes reader/writer reference for LDLM lock \a lock. - * Locks LDLM lock first. - * If the lock is determined to be client lock on a client and r/w refcount - * drops to zero and the lock is not blocked, the lock is added to LRU lock - * on the namespace. - * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called. - */ -void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode) -{ - struct ldlm_namespace *ns; - - lock_res_and_lock(lock); - - ns = ldlm_lock_to_ns(lock); - - ldlm_lock_decref_internal_nolock(lock, mode); - - if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) && - !lock->l_readers && !lock->l_writers) { - /* If this is a local lock on a server namespace and this was - * the last reference, cancel the lock. - * - * Group locks are special: - * They must not go in LRU, but they are not called back - * like non-group locks, instead they are manually released. - * They have an l_writers reference which they keep until - * they are manually released, so we remove them when they have - * no more reader or writer references. - LU-6368 - */ - ldlm_set_cbpending(lock); - } - - if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) { - /* If we received a blocked AST and this was the last reference, - * run the callback. - */ - LDLM_DEBUG(lock, "final decref done on cbpending lock"); - - LDLM_LOCK_GET(lock); /* dropped by bl thread */ - ldlm_lock_remove_from_lru(lock); - unlock_res_and_lock(lock); - - if (ldlm_is_fail_loc(lock)) - OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); - - if (ldlm_is_atomic_cb(lock) || - ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) - ldlm_handle_bl_callback(ns, NULL, lock); - } else if (!lock->l_readers && !lock->l_writers && - !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) { - LDLM_DEBUG(lock, "add lock into lru list"); - - /* If this is a client-side namespace and this was the last - * reference, put it on the LRU. - */ - ldlm_lock_add_to_lru(lock); - unlock_res_and_lock(lock); - - if (ldlm_is_fail_loc(lock)) - OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); - - /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE - * are not supported by the server, otherwise, it is done on - * enqueue. - */ - if (!exp_connect_cancelset(lock->l_conn_export) && - !ns_connect_lru_resize(ns)) - ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0); - } else { - LDLM_DEBUG(lock, "do not add lock into lru list"); - unlock_res_and_lock(lock); - } -} - -/** - * Decrease reader/writer refcount for LDLM lock with handle \a lockh - */ -void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode) -{ - struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - - LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie); - ldlm_lock_decref_internal(lock, mode); - LDLM_LOCK_PUT(lock); -} -EXPORT_SYMBOL(ldlm_lock_decref); - -/** - * Decrease reader/writer refcount for LDLM lock with handle - * \a lockh and mark it for subsequent cancellation once r/w refcount - * drops to zero instead of putting into LRU. - */ -void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, - enum ldlm_mode mode) -{ - struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - - LASSERT(lock); - - LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); - lock_res_and_lock(lock); - ldlm_set_cbpending(lock); - unlock_res_and_lock(lock); - ldlm_lock_decref_internal(lock, mode); - LDLM_LOCK_PUT(lock); -} -EXPORT_SYMBOL(ldlm_lock_decref_and_cancel); - -struct sl_insert_point { - struct list_head *res_link; - struct list_head *mode_link; - struct list_head *policy_link; -}; - -/** - * Finds a position to insert the new lock into granted lock list. - * - * Used for locks eligible for skiplist optimization. - * - * Parameters: - * queue [input]: the granted list where search acts on; - * req [input]: the lock whose position to be located; - * prev [output]: positions within 3 lists to insert @req to - * Return Value: - * filled @prev - * NOTE: called by - * - ldlm_grant_lock_with_skiplist - */ -static void search_granted_lock(struct list_head *queue, - struct ldlm_lock *req, - struct sl_insert_point *prev) -{ - struct ldlm_lock *lock, *mode_end, *policy_end; - - list_for_each_entry(lock, queue, l_res_link) { - - mode_end = list_prev_entry(lock, l_sl_mode); - - if (lock->l_req_mode != req->l_req_mode) { - /* jump to last lock of mode group */ - lock = mode_end; - continue; - } - - /* suitable mode group is found */ - if (lock->l_resource->lr_type == LDLM_PLAIN) { - /* insert point is last lock of the mode group */ - prev->res_link = &mode_end->l_res_link; - prev->mode_link = &mode_end->l_sl_mode; - prev->policy_link = &req->l_sl_policy; - return; - } - - if (lock->l_resource->lr_type == LDLM_IBITS) { - for (;;) { - policy_end = - list_prev_entry(lock, l_sl_policy); - - if (lock->l_policy_data.l_inodebits.bits == - req->l_policy_data.l_inodebits.bits) { - /* insert point is last lock of - * the policy group - */ - prev->res_link = - &policy_end->l_res_link; - prev->mode_link = - &policy_end->l_sl_mode; - prev->policy_link = - &policy_end->l_sl_policy; - return; - } - - if (policy_end == mode_end) - /* done with mode group */ - break; - - /* go to next policy group within mode group */ - lock = list_next_entry(policy_end, l_res_link); - } /* loop over policy groups within the mode group */ - - /* insert point is last lock of the mode group, - * new policy group is started - */ - prev->res_link = &mode_end->l_res_link; - prev->mode_link = &mode_end->l_sl_mode; - prev->policy_link = &req->l_sl_policy; - return; - } - - LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock"); - LBUG(); - } - - /* insert point is last lock on the queue, - * new mode group and new policy group are started - */ - prev->res_link = queue->prev; - prev->mode_link = &req->l_sl_mode; - prev->policy_link = &req->l_sl_policy; -} - -/** - * Add a lock into resource granted list after a position described by - * \a prev. - */ -static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, - struct sl_insert_point *prev) -{ - struct ldlm_resource *res = lock->l_resource; - - check_res_locked(res); - - ldlm_resource_dump(D_INFO, res); - LDLM_DEBUG(lock, "About to add lock:"); - - if (ldlm_is_destroyed(lock)) { - CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); - return; - } - - LASSERT(list_empty(&lock->l_res_link)); - LASSERT(list_empty(&lock->l_sl_mode)); - LASSERT(list_empty(&lock->l_sl_policy)); - - /* - * lock->link == prev->link means lock is first starting the group. - * Don't re-add to itself to suppress kernel warnings. - */ - if (&lock->l_res_link != prev->res_link) - list_add(&lock->l_res_link, prev->res_link); - if (&lock->l_sl_mode != prev->mode_link) - list_add(&lock->l_sl_mode, prev->mode_link); - if (&lock->l_sl_policy != prev->policy_link) - list_add(&lock->l_sl_policy, prev->policy_link); -} - -/** - * Add a lock to granted list on a resource maintaining skiplist - * correctness. - */ -static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) -{ - struct sl_insert_point prev; - - LASSERT(lock->l_req_mode == lock->l_granted_mode); - - search_granted_lock(&lock->l_resource->lr_granted, lock, &prev); - ldlm_granted_list_add_lock(lock, &prev); -} - -/** - * Perform lock granting bookkeeping. - * - * Includes putting the lock into granted list and updating lock mode. - * NOTE: called by - * - ldlm_lock_enqueue - * - ldlm_reprocess_queue - * - ldlm_lock_convert - * - * must be called with lr_lock held - */ -void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) -{ - struct ldlm_resource *res = lock->l_resource; - - check_res_locked(res); - - lock->l_granted_mode = lock->l_req_mode; - - if (work_list && lock->l_completion_ast) - ldlm_add_ast_work_item(lock, NULL, work_list); - - if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) { - ldlm_grant_lock_with_skiplist(lock); - } else if (res->lr_type == LDLM_EXTENT) { - ldlm_extent_add_lock(res, lock); - } else if (res->lr_type == LDLM_FLOCK) { - /* - * We should not add locks to granted list in - * the following cases: - * - this is an UNLOCK but not a real lock; - * - this is a TEST lock; - * - this is a F_CANCELLK lock (async flock has req_mode == 0) - * - this is a deadlock (flock cannot be granted) - */ - if (!lock->l_req_mode || lock->l_req_mode == LCK_NL || - ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock)) - return; - ldlm_resource_add_lock(res, &res->lr_granted, lock); - } else { - LBUG(); - } - - ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); -} - -/** - * Describe the overlap between two locks. itree_overlap_cb data. - */ -struct lock_match_data { - struct ldlm_lock *lmd_old; - struct ldlm_lock *lmd_lock; - enum ldlm_mode *lmd_mode; - union ldlm_policy_data *lmd_policy; - __u64 lmd_flags; - int lmd_unref; -}; - -/** - * Check if the given @lock meets the criteria for a match. - * A reference on the lock is taken if matched. - * - * \param lock test-against this lock - * \param data parameters - */ -static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data) -{ - union ldlm_policy_data *lpol = &lock->l_policy_data; - enum ldlm_mode match; - - if (lock == data->lmd_old) - return INTERVAL_ITER_STOP; - - /* - * Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock - */ - if (ldlm_is_excl(lock)) - return INTERVAL_ITER_CONT; - - /* - * llite sometimes wants to match locks that will be - * canceled when their users drop, but we allow it to match - * if it passes in CBPENDING and the lock still has users. - * this is generally only going to be used by children - * whose parents already hold a lock so forward progress - * can still happen. - */ - if (ldlm_is_cbpending(lock) && - !(data->lmd_flags & LDLM_FL_CBPENDING)) - return INTERVAL_ITER_CONT; - - if (!data->lmd_unref && ldlm_is_cbpending(lock) && - !lock->l_readers && !lock->l_writers) - return INTERVAL_ITER_CONT; - - if (!(lock->l_req_mode & *data->lmd_mode)) - return INTERVAL_ITER_CONT; - match = lock->l_req_mode; - - switch (lock->l_resource->lr_type) { - case LDLM_EXTENT: - if (lpol->l_extent.start > data->lmd_policy->l_extent.start || - lpol->l_extent.end < data->lmd_policy->l_extent.end) - return INTERVAL_ITER_CONT; - - if (unlikely(match == LCK_GROUP) && - data->lmd_policy->l_extent.gid != LDLM_GID_ANY && - lpol->l_extent.gid != data->lmd_policy->l_extent.gid) - return INTERVAL_ITER_CONT; - break; - case LDLM_IBITS: - /* - * We match if we have existing lock with same or wider set - * of bits. - */ - if ((lpol->l_inodebits.bits & - data->lmd_policy->l_inodebits.bits) != - data->lmd_policy->l_inodebits.bits) - return INTERVAL_ITER_CONT; - break; - default: - break; - } - /* - * We match if we have existing lock with same or wider set - * of bits. - */ - if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE)) - return INTERVAL_ITER_CONT; - - if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock))) - return INTERVAL_ITER_CONT; - - if (data->lmd_flags & LDLM_FL_TEST_LOCK) { - LDLM_LOCK_GET(lock); - ldlm_lock_touch_in_lru(lock); - } else { - ldlm_lock_addref_internal_nolock(lock, match); - } - - *data->lmd_mode = match; - data->lmd_lock = lock; - - return INTERVAL_ITER_STOP; -} - -static enum interval_iter itree_overlap_cb(struct interval_node *in, void *args) -{ - struct ldlm_interval *node = to_ldlm_interval(in); - struct lock_match_data *data = args; - struct ldlm_lock *lock; - int rc; - - list_for_each_entry(lock, &node->li_group, l_sl_policy) { - rc = lock_matches(lock, data); - if (rc == INTERVAL_ITER_STOP) - return INTERVAL_ITER_STOP; - } - return INTERVAL_ITER_CONT; -} - -/** - * Search for a lock with given parameters in interval trees. - * - * \param res search for a lock in this resource - * \param data parameters - * - * \retval a referenced lock or NULL. - */ -static struct ldlm_lock *search_itree(struct ldlm_resource *res, - struct lock_match_data *data) -{ - struct interval_node_extent ext = { - .start = data->lmd_policy->l_extent.start, - .end = data->lmd_policy->l_extent.end - }; - int idx; - - for (idx = 0; idx < LCK_MODE_NUM; idx++) { - struct ldlm_interval_tree *tree = &res->lr_itree[idx]; - - if (!tree->lit_root) - continue; - - if (!(tree->lit_mode & *data->lmd_mode)) - continue; - - interval_search(tree->lit_root, &ext, - itree_overlap_cb, data); - } - return data->lmd_lock; -} - -/** - * Search for a lock with given properties in a queue. - * - * \param queue search for a lock in this queue - * \param data parameters - * - * \retval a referenced lock or NULL. - */ -static struct ldlm_lock *search_queue(struct list_head *queue, - struct lock_match_data *data) -{ - struct ldlm_lock *lock; - int rc; - - list_for_each_entry(lock, queue, l_res_link) { - rc = lock_matches(lock, data); - if (rc == INTERVAL_ITER_STOP) - return data->lmd_lock; - } - return NULL; -} - -void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) -{ - if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) { - lock->l_flags |= LDLM_FL_FAIL_NOTIFIED; - wake_up_all(&lock->l_waitq); - } -} - -/** - * Mark lock as "matchable" by OST. - * - * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB - * is not yet valid. - * Assumes LDLM lock is already locked. - */ -void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) -{ - ldlm_set_lvb_ready(lock); - wake_up_all(&lock->l_waitq); -} -EXPORT_SYMBOL(ldlm_lock_allow_match_locked); - -/** - * Mark lock as "matchable" by OST. - * Locks the lock and then \see ldlm_lock_allow_match_locked - */ -void ldlm_lock_allow_match(struct ldlm_lock *lock) -{ - lock_res_and_lock(lock); - ldlm_lock_allow_match_locked(lock); - unlock_res_and_lock(lock); -} -EXPORT_SYMBOL(ldlm_lock_allow_match); - -/** - * Attempt to find a lock with specified properties. - * - * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is - * set in \a flags - * - * Can be called in two ways: - * - * If 'ns' is NULL, then lockh describes an existing lock that we want to look - * for a duplicate of. - * - * Otherwise, all of the fields must be filled in, to match against. - * - * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the - * server (ie, connh is NULL) - * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted - * list will be considered - * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked - * to be canceled can still be matched as long as they still have reader - * or writer referneces - * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock, - * just tell us if we would have matched. - * - * \retval 1 if it finds an already-existing lock that is compatible; in this - * case, lockh is filled in with a addref()ed lock - * - * We also check security context, and if that fails we simply return 0 (to - * keep caller code unchanged), the context failure will be discovered by - * caller sometime later. - */ -enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *res_id, - enum ldlm_type type, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - struct lustre_handle *lockh, int unref) -{ - struct lock_match_data data = { - .lmd_old = NULL, - .lmd_lock = NULL, - .lmd_mode = &mode, - .lmd_policy = policy, - .lmd_flags = flags, - .lmd_unref = unref, - }; - struct ldlm_resource *res; - struct ldlm_lock *lock; - int rc = 0; - - if (!ns) { - data.lmd_old = ldlm_handle2lock(lockh); - LASSERT(data.lmd_old); - - ns = ldlm_lock_to_ns(data.lmd_old); - res_id = &data.lmd_old->l_resource->lr_name; - type = data.lmd_old->l_resource->lr_type; - *data.lmd_mode = data.lmd_old->l_req_mode; - } - - res = ldlm_resource_get(ns, NULL, res_id, type, 0); - if (IS_ERR(res)) { - LASSERT(!data.lmd_old); - return 0; - } - - LDLM_RESOURCE_ADDREF(res); - lock_res(res); - - if (res->lr_type == LDLM_EXTENT) - lock = search_itree(res, &data); - else - lock = search_queue(&res->lr_granted, &data); - if (lock) { - rc = 1; - goto out; - } - if (flags & LDLM_FL_BLOCK_GRANTED) { - rc = 0; - goto out; - } - lock = search_queue(&res->lr_waiting, &data); - if (lock) { - rc = 1; - goto out; - } -out: - unlock_res(res); - LDLM_RESOURCE_DELREF(res); - ldlm_resource_putref(res); - - if (lock) { - ldlm_lock2handle(lock, lockh); - if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) { - __u64 wait_flags = LDLM_FL_LVB_READY | - LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED; - - if (lock->l_completion_ast) { - int err = lock->l_completion_ast(lock, - LDLM_FL_WAIT_NOREPROC, - NULL); - if (err) { - if (flags & LDLM_FL_TEST_LOCK) - LDLM_LOCK_RELEASE(lock); - else - ldlm_lock_decref_internal(lock, - mode); - rc = 0; - goto out2; - } - } - - /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */ - wait_event_idle_timeout(lock->l_waitq, - lock->l_flags & wait_flags, - obd_timeout * HZ); - if (!ldlm_is_lvb_ready(lock)) { - if (flags & LDLM_FL_TEST_LOCK) - LDLM_LOCK_RELEASE(lock); - else - ldlm_lock_decref_internal(lock, mode); - rc = 0; - } - } - } - out2: - if (rc) { - LDLM_DEBUG(lock, "matched (%llu %llu)", - (type == LDLM_PLAIN || type == LDLM_IBITS) ? - res_id->name[2] : policy->l_extent.start, - (type == LDLM_PLAIN || type == LDLM_IBITS) ? - res_id->name[3] : policy->l_extent.end); - - /* check user's security context */ - if (lock->l_conn_export && - sptlrpc_import_check_ctx( - class_exp2cliimp(lock->l_conn_export))) { - if (!(flags & LDLM_FL_TEST_LOCK)) - ldlm_lock_decref_internal(lock, mode); - rc = 0; - } - - if (flags & LDLM_FL_TEST_LOCK) - LDLM_LOCK_RELEASE(lock); - - } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/ - LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res %llu/%llu (%llu %llu)", - ns, type, mode, res_id->name[0], - res_id->name[1], - (type == LDLM_PLAIN || type == LDLM_IBITS) ? - res_id->name[2] : policy->l_extent.start, - (type == LDLM_PLAIN || type == LDLM_IBITS) ? - res_id->name[3] : policy->l_extent.end); - } - if (data.lmd_old) - LDLM_LOCK_PUT(data.lmd_old); - - return rc ? mode : 0; -} -EXPORT_SYMBOL(ldlm_lock_match); - -enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh, - __u64 *bits) -{ - struct ldlm_lock *lock; - enum ldlm_mode mode = 0; - - lock = ldlm_handle2lock(lockh); - if (lock) { - lock_res_and_lock(lock); - if (LDLM_HAVE_MASK(lock, GONE)) - goto out; - - if (ldlm_is_cbpending(lock) && - lock->l_readers == 0 && lock->l_writers == 0) - goto out; - - if (bits) - *bits = lock->l_policy_data.l_inodebits.bits; - mode = lock->l_granted_mode; - ldlm_lock_addref_internal_nolock(lock, mode); - } - -out: - if (lock) { - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - } - return mode; -} -EXPORT_SYMBOL(ldlm_revalidate_lock_handle); - -/** The caller must guarantee that the buffer is large enough. */ -int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, - enum req_location loc, void *data, int size) -{ - void *lvb; - - LASSERT(data); - LASSERT(size >= 0); - - switch (lock->l_lvb_type) { - case LVB_T_OST: - if (size == sizeof(struct ost_lvb)) { - if (loc == RCL_CLIENT) - lvb = req_capsule_client_swab_get(pill, - &RMF_DLM_LVB, - lustre_swab_ost_lvb); - else - lvb = req_capsule_server_swab_get(pill, - &RMF_DLM_LVB, - lustre_swab_ost_lvb); - if (unlikely(!lvb)) { - LDLM_ERROR(lock, "no LVB"); - return -EPROTO; - } - - memcpy(data, lvb, size); - } else if (size == sizeof(struct ost_lvb_v1)) { - struct ost_lvb *olvb = data; - - if (loc == RCL_CLIENT) - lvb = req_capsule_client_swab_get(pill, - &RMF_DLM_LVB, - lustre_swab_ost_lvb_v1); - else - lvb = req_capsule_server_sized_swab_get(pill, - &RMF_DLM_LVB, size, - lustre_swab_ost_lvb_v1); - if (unlikely(!lvb)) { - LDLM_ERROR(lock, "no LVB"); - return -EPROTO; - } - - memcpy(data, lvb, size); - olvb->lvb_mtime_ns = 0; - olvb->lvb_atime_ns = 0; - olvb->lvb_ctime_ns = 0; - } else { - LDLM_ERROR(lock, "Replied unexpected ost LVB size %d", - size); - return -EINVAL; - } - break; - case LVB_T_LQUOTA: - if (size == sizeof(struct lquota_lvb)) { - if (loc == RCL_CLIENT) - lvb = req_capsule_client_swab_get(pill, - &RMF_DLM_LVB, - lustre_swab_lquota_lvb); - else - lvb = req_capsule_server_swab_get(pill, - &RMF_DLM_LVB, - lustre_swab_lquota_lvb); - if (unlikely(!lvb)) { - LDLM_ERROR(lock, "no LVB"); - return -EPROTO; - } - - memcpy(data, lvb, size); - } else { - LDLM_ERROR(lock, - "Replied unexpected lquota LVB size %d", - size); - return -EINVAL; - } - break; - case LVB_T_LAYOUT: - if (size == 0) - break; - - if (loc == RCL_CLIENT) - lvb = req_capsule_client_get(pill, &RMF_DLM_LVB); - else - lvb = req_capsule_server_get(pill, &RMF_DLM_LVB); - if (unlikely(!lvb)) { - LDLM_ERROR(lock, "no LVB"); - return -EPROTO; - } - - memcpy(data, lvb, size); - break; - default: - LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type); - dump_stack(); - return -EINVAL; - } - - return 0; -} - -/** - * Create and fill in new LDLM lock with specified properties. - * Returns a referenced lock - */ -struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, - const struct ldlm_res_id *res_id, - enum ldlm_type type, - enum ldlm_mode mode, - const struct ldlm_callback_suite *cbs, - void *data, __u32 lvb_len, - enum lvb_type lvb_type) -{ - struct ldlm_lock *lock; - struct ldlm_resource *res; - int rc; - - res = ldlm_resource_get(ns, NULL, res_id, type, 1); - if (IS_ERR(res)) - return ERR_CAST(res); - - lock = ldlm_lock_new(res); - if (!lock) { - ldlm_resource_putref(res); - return ERR_PTR(-ENOMEM); - } - - lock->l_req_mode = mode; - lock->l_ast_data = data; - lock->l_pid = current->pid; - if (cbs) { - lock->l_blocking_ast = cbs->lcs_blocking; - lock->l_completion_ast = cbs->lcs_completion; - lock->l_glimpse_ast = cbs->lcs_glimpse; - } - - lock->l_tree_node = NULL; - /* if this is the extent lock, allocate the interval tree node */ - if (type == LDLM_EXTENT) { - if (!ldlm_interval_alloc(lock)) { - rc = -ENOMEM; - goto out; - } - } - - if (lvb_len) { - lock->l_lvb_len = lvb_len; - lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS); - if (!lock->l_lvb_data) { - rc = -ENOMEM; - goto out; - } - } - - lock->l_lvb_type = lvb_type; - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) { - rc = -ENOENT; - goto out; - } - - return lock; - -out: - ldlm_lock_destroy(lock); - LDLM_LOCK_RELEASE(lock); - return ERR_PTR(rc); -} - - - -/** - * Enqueue (request) a lock. - * On the client this is called from ldlm_cli_enqueue_fini - * after we already got an initial reply from the server with some status. - * - * Does not block. As a result of enqueue the lock would be put - * into granted or waiting list. - */ -enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, - void *cookie, __u64 *flags) -{ - struct ldlm_lock *lock = *lockp; - struct ldlm_resource *res = lock->l_resource; - - lock_res_and_lock(lock); - if (lock->l_req_mode == lock->l_granted_mode) { - /* The server returned a blocked lock, but it was granted - * before we got a chance to actually enqueue it. We don't - * need to do anything else. - */ - *flags &= ~LDLM_FL_BLOCKED_MASK; - goto out; - } - - ldlm_resource_unlink_lock(lock); - - /* Cannot happen unless on the server */ - if (res->lr_type == LDLM_EXTENT && !lock->l_tree_node) - LBUG(); - - /* Some flags from the enqueue want to make it into the AST, via the - * lock's l_flags. - */ - if (*flags & LDLM_FL_AST_DISCARD_DATA) - ldlm_set_ast_discard_data(lock); - if (*flags & LDLM_FL_TEST_LOCK) - ldlm_set_test_lock(lock); - - /* - * This distinction between local lock trees is very important; a client - * namespace only has information about locks taken by that client, and - * thus doesn't have enough information to decide for itself if it can - * be granted (below). In this case, we do exactly what the server - * tells us to do, as dictated by the 'flags'. - */ - if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED)) - ldlm_resource_add_lock(res, &res->lr_waiting, lock); - else - ldlm_grant_lock(lock, NULL); - -out: - unlock_res_and_lock(lock); - return ELDLM_OK; -} - -/** - * Process a call to blocking AST callback for a lock in ast_work list - */ -static int -ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) -{ - struct ldlm_cb_set_arg *arg = opaq; - struct ldlm_lock_desc d; - int rc; - struct ldlm_lock *lock; - - if (list_empty(arg->list)) - return -ENOENT; - - lock = list_first_entry(arg->list, struct ldlm_lock, l_bl_ast); - - /* nobody should touch l_bl_ast */ - lock_res_and_lock(lock); - list_del_init(&lock->l_bl_ast); - - LASSERT(ldlm_is_ast_sent(lock)); - LASSERT(lock->l_bl_ast_run == 0); - LASSERT(lock->l_blocking_lock); - lock->l_bl_ast_run++; - unlock_res_and_lock(lock); - - ldlm_lock2desc(lock->l_blocking_lock, &d); - - rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING); - LDLM_LOCK_RELEASE(lock->l_blocking_lock); - lock->l_blocking_lock = NULL; - LDLM_LOCK_RELEASE(lock); - - return rc; -} - -/** - * Process a call to completion AST callback for a lock in ast_work list - */ -static int -ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) -{ - struct ldlm_cb_set_arg *arg = opaq; - int rc = 0; - struct ldlm_lock *lock; - ldlm_completion_callback completion_callback; - - if (list_empty(arg->list)) - return -ENOENT; - - lock = list_first_entry(arg->list, struct ldlm_lock, l_cp_ast); - - /* It's possible to receive a completion AST before we've set - * the l_completion_ast pointer: either because the AST arrived - * before the reply, or simply because there's a small race - * window between receiving the reply and finishing the local - * enqueue. (bug 842) - * - * This can't happen with the blocking_ast, however, because we - * will never call the local blocking_ast until we drop our - * reader/writer reference, which we won't do until we get the - * reply and finish enqueueing. - */ - - /* nobody should touch l_cp_ast */ - lock_res_and_lock(lock); - list_del_init(&lock->l_cp_ast); - LASSERT(ldlm_is_cp_reqd(lock)); - /* save l_completion_ast since it can be changed by - * mds_intent_policy(), see bug 14225 - */ - completion_callback = lock->l_completion_ast; - ldlm_clear_cp_reqd(lock); - unlock_res_and_lock(lock); - - if (completion_callback) - rc = completion_callback(lock, 0, (void *)arg); - LDLM_LOCK_RELEASE(lock); - - return rc; -} - -/** - * Process a call to revocation AST callback for a lock in ast_work list - */ -static int -ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) -{ - struct ldlm_cb_set_arg *arg = opaq; - struct ldlm_lock_desc desc; - int rc; - struct ldlm_lock *lock; - - if (list_empty(arg->list)) - return -ENOENT; - - lock = list_first_entry(arg->list, struct ldlm_lock, l_rk_ast); - list_del_init(&lock->l_rk_ast); - - /* the desc just pretend to exclusive */ - ldlm_lock2desc(lock, &desc); - desc.l_req_mode = LCK_EX; - desc.l_granted_mode = 0; - - rc = lock->l_blocking_ast(lock, &desc, (void *)arg, LDLM_CB_BLOCKING); - LDLM_LOCK_RELEASE(lock); - - return rc; -} - -/** - * Process a call to glimpse AST callback for a lock in ast_work list - */ -static int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) -{ - struct ldlm_cb_set_arg *arg = opaq; - struct ldlm_glimpse_work *gl_work; - struct ldlm_lock *lock; - int rc = 0; - - if (list_empty(arg->list)) - return -ENOENT; - - gl_work = list_first_entry(arg->list, struct ldlm_glimpse_work, - gl_list); - list_del_init(&gl_work->gl_list); - - lock = gl_work->gl_lock; - - /* transfer the glimpse descriptor to ldlm_cb_set_arg */ - arg->gl_desc = gl_work->gl_desc; - - /* invoke the actual glimpse callback */ - if (lock->l_glimpse_ast(lock, (void *)arg) == 0) - rc = 1; - - LDLM_LOCK_RELEASE(lock); - - if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0) - kfree(gl_work); - - return rc; -} - -/** - * Process list of locks in need of ASTs being sent. - * - * Used on server to send multiple ASTs together instead of sending one by - * one. - */ -int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, - enum ldlm_desc_ast_t ast_type) -{ - struct ldlm_cb_set_arg *arg; - set_producer_func work_ast_lock; - int rc; - - if (list_empty(rpc_list)) - return 0; - - arg = kzalloc(sizeof(*arg), GFP_NOFS); - if (!arg) - return -ENOMEM; - - atomic_set(&arg->restart, 0); - arg->list = rpc_list; - - switch (ast_type) { - case LDLM_WORK_BL_AST: - arg->type = LDLM_BL_CALLBACK; - work_ast_lock = ldlm_work_bl_ast_lock; - break; - case LDLM_WORK_CP_AST: - arg->type = LDLM_CP_CALLBACK; - work_ast_lock = ldlm_work_cp_ast_lock; - break; - case LDLM_WORK_REVOKE_AST: - arg->type = LDLM_BL_CALLBACK; - work_ast_lock = ldlm_work_revoke_ast_lock; - break; - case LDLM_WORK_GL_AST: - arg->type = LDLM_GL_CALLBACK; - work_ast_lock = ldlm_work_gl_ast_lock; - break; - default: - LBUG(); - } - - /* We create a ptlrpc request set with flow control extension. - * This request set will use the work_ast_lock function to produce new - * requests and will send a new request each time one completes in order - * to keep the number of requests in flight to ns_max_parallel_ast - */ - arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX, - work_ast_lock, arg); - if (!arg->set) { - rc = -ENOMEM; - goto out; - } - - ptlrpc_set_wait(arg->set); - ptlrpc_set_destroy(arg->set); - - rc = atomic_read(&arg->restart) ? -ERESTART : 0; - goto out; -out: - kfree(arg); - return rc; -} - -static bool is_bl_done(struct ldlm_lock *lock) -{ - bool bl_done = true; - - if (!ldlm_is_bl_done(lock)) { - lock_res_and_lock(lock); - bl_done = ldlm_is_bl_done(lock); - unlock_res_and_lock(lock); - } - - return bl_done; -} - -/** - * Helper function to call blocking AST for LDLM lock \a lock in a - * "cancelling" mode. - */ -void ldlm_cancel_callback(struct ldlm_lock *lock) -{ - check_res_locked(lock->l_resource); - if (!ldlm_is_cancel(lock)) { - ldlm_set_cancel(lock); - if (lock->l_blocking_ast) { - unlock_res_and_lock(lock); - lock->l_blocking_ast(lock, NULL, lock->l_ast_data, - LDLM_CB_CANCELING); - lock_res_and_lock(lock); - } else { - LDLM_DEBUG(lock, "no blocking ast"); - } - /* only canceller can set bl_done bit */ - ldlm_set_bl_done(lock); - wake_up_all(&lock->l_waitq); - } else if (!ldlm_is_bl_done(lock)) { - /* - * The lock is guaranteed to have been canceled once - * returning from this function. - */ - unlock_res_and_lock(lock); - wait_event_idle(lock->l_waitq, is_bl_done(lock)); - lock_res_and_lock(lock); - } -} - -/** - * Remove skiplist-enabled LDLM lock \a req from granted list - */ -void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) -{ - if (req->l_resource->lr_type != LDLM_PLAIN && - req->l_resource->lr_type != LDLM_IBITS) - return; - - list_del_init(&req->l_sl_policy); - list_del_init(&req->l_sl_mode); -} - -/** - * Attempts to cancel LDLM lock \a lock that has no reader/writer references. - */ -void ldlm_lock_cancel(struct ldlm_lock *lock) -{ - struct ldlm_resource *res; - struct ldlm_namespace *ns; - - lock_res_and_lock(lock); - - res = lock->l_resource; - ns = ldlm_res_to_ns(res); - - /* Please do not, no matter how tempting, remove this LBUG without - * talking to me first. -phik - */ - if (lock->l_readers || lock->l_writers) { - LDLM_ERROR(lock, "lock still has references"); - LBUG(); - } - - /* Releases cancel callback. */ - ldlm_cancel_callback(lock); - - ldlm_resource_unlink_lock(lock); - ldlm_lock_destroy_nolock(lock); - - if (lock->l_granted_mode == lock->l_req_mode) - ldlm_pool_del(&ns->ns_pool, lock); - - /* Make sure we will not be called again for same lock what is possible - * if not to zero out lock->l_granted_mode - */ - lock->l_granted_mode = LCK_MINMODE; - unlock_res_and_lock(lock); -} -EXPORT_SYMBOL(ldlm_lock_cancel); - -/** - * Set opaque data into the lock that only makes sense to upper layer. - */ -int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data) -{ - struct ldlm_lock *lock = ldlm_handle2lock(lockh); - int rc = -EINVAL; - - if (lock) { - if (!lock->l_ast_data) - lock->l_ast_data = data; - if (lock->l_ast_data == data) - rc = 0; - LDLM_LOCK_PUT(lock); - } - return rc; -} -EXPORT_SYMBOL(ldlm_lock_set_data); - -struct export_cl_data { - struct obd_export *ecl_exp; - int ecl_loop; -}; - -/** - * Print lock with lock handle \a lockh description into debug log. - * - * Used when printing all locks on a resource for debug purposes. - */ -void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh) -{ - struct ldlm_lock *lock; - - if (!((libcfs_debug | D_ERROR) & level)) - return; - - lock = ldlm_handle2lock(lockh); - if (!lock) - return; - - LDLM_DEBUG_LIMIT(level, lock, "###"); - - LDLM_LOCK_PUT(lock); -} -EXPORT_SYMBOL(ldlm_lock_dump_handle); - -/** - * Print lock information with custom message into debug log. - * Helper function. - */ -void _ldlm_lock_debug(struct ldlm_lock *lock, - struct libcfs_debug_msg_data *msgdata, - const char *fmt, ...) -{ - va_list args; - struct obd_export *exp = lock->l_export; - struct ldlm_resource *resource = lock->l_resource; - char *nid = "local"; - - va_start(args, fmt); - - if (exp && exp->exp_connection) { - nid = libcfs_nid2str(exp->exp_connection->c_peer.nid); - } else if (exp && exp->exp_obd) { - struct obd_import *imp = exp->exp_obd->u.cli.cl_import; - - nid = libcfs_nid2str(imp->imp_connection->c_peer.nid); - } - - if (!resource) { - libcfs_debug_vmsg2(msgdata, fmt, args, - " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", - lock, - lock->l_handle.h_cookie, - atomic_read(&lock->l_refc), - lock->l_readers, lock->l_writers, - ldlm_lockname[lock->l_granted_mode], - ldlm_lockname[lock->l_req_mode], - lock->l_flags, nid, - lock->l_remote_handle.cookie, - exp ? atomic_read(&exp->exp_refcount) : -99, - lock->l_pid, lock->l_callback_timeout, - lock->l_lvb_type); - va_end(args); - return; - } - - switch (resource->lr_type) { - case LDLM_EXTENT: - libcfs_debug_vmsg2(msgdata, fmt, args, - " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", - ldlm_lock_to_ns_name(lock), lock, - lock->l_handle.h_cookie, - atomic_read(&lock->l_refc), - lock->l_readers, lock->l_writers, - ldlm_lockname[lock->l_granted_mode], - ldlm_lockname[lock->l_req_mode], - PLDLMRES(resource), - atomic_read(&resource->lr_refcount), - ldlm_typename[resource->lr_type], - lock->l_policy_data.l_extent.start, - lock->l_policy_data.l_extent.end, - lock->l_req_extent.start, - lock->l_req_extent.end, - lock->l_flags, nid, - lock->l_remote_handle.cookie, - exp ? atomic_read(&exp->exp_refcount) : -99, - lock->l_pid, lock->l_callback_timeout, - lock->l_lvb_type); - break; - - case LDLM_FLOCK: - libcfs_debug_vmsg2(msgdata, fmt, args, - " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n", - ldlm_lock_to_ns_name(lock), lock, - lock->l_handle.h_cookie, - atomic_read(&lock->l_refc), - lock->l_readers, lock->l_writers, - ldlm_lockname[lock->l_granted_mode], - ldlm_lockname[lock->l_req_mode], - PLDLMRES(resource), - atomic_read(&resource->lr_refcount), - ldlm_typename[resource->lr_type], - lock->l_policy_data.l_flock.pid, - lock->l_policy_data.l_flock.start, - lock->l_policy_data.l_flock.end, - lock->l_flags, nid, - lock->l_remote_handle.cookie, - exp ? atomic_read(&exp->exp_refcount) : -99, - lock->l_pid, lock->l_callback_timeout); - break; - - case LDLM_IBITS: - libcfs_debug_vmsg2(msgdata, fmt, args, - " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", - ldlm_lock_to_ns_name(lock), - lock, lock->l_handle.h_cookie, - atomic_read(&lock->l_refc), - lock->l_readers, lock->l_writers, - ldlm_lockname[lock->l_granted_mode], - ldlm_lockname[lock->l_req_mode], - PLDLMRES(resource), - lock->l_policy_data.l_inodebits.bits, - atomic_read(&resource->lr_refcount), - ldlm_typename[resource->lr_type], - lock->l_flags, nid, - lock->l_remote_handle.cookie, - exp ? atomic_read(&exp->exp_refcount) : -99, - lock->l_pid, lock->l_callback_timeout, - lock->l_lvb_type); - break; - - default: - libcfs_debug_vmsg2(msgdata, fmt, args, - " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", - ldlm_lock_to_ns_name(lock), - lock, lock->l_handle.h_cookie, - atomic_read(&lock->l_refc), - lock->l_readers, lock->l_writers, - ldlm_lockname[lock->l_granted_mode], - ldlm_lockname[lock->l_req_mode], - PLDLMRES(resource), - atomic_read(&resource->lr_refcount), - ldlm_typename[resource->lr_type], - lock->l_flags, nid, - lock->l_remote_handle.cookie, - exp ? atomic_read(&exp->exp_refcount) : -99, - lock->l_pid, lock->l_callback_timeout, - lock->l_lvb_type); - break; - } - va_end(args); -} -EXPORT_SYMBOL(_ldlm_lock_debug); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c deleted file mode 100644 index 5963e90d0938..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ /dev/null @@ -1,1163 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_lockd.c - * - * Author: Peter Braam - * Author: Phil Schwan - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include -#include -#include "ldlm_internal.h" - -static int ldlm_num_threads; -module_param(ldlm_num_threads, int, 0444); -MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start"); - -static char *ldlm_cpts; -module_param(ldlm_cpts, charp, 0444); -MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on"); - -static struct mutex ldlm_ref_mutex; -static int ldlm_refcount; - -static struct kobject *ldlm_kobj; -struct kset *ldlm_ns_kset; -static struct kset *ldlm_svc_kset; - -struct ldlm_cb_async_args { - struct ldlm_cb_set_arg *ca_set_arg; - struct ldlm_lock *ca_lock; -}; - -/* LDLM state */ - -static struct ldlm_state *ldlm_state; - -#define ELT_STOPPED 0 -#define ELT_READY 1 -#define ELT_TERMINATE 2 - -struct ldlm_bl_pool { - spinlock_t blp_lock; - - /* - * blp_prio_list is used for callbacks that should be handled - * as a priority. It is used for LDLM_FL_DISCARD_DATA requests. - * see bug 13843 - */ - struct list_head blp_prio_list; - - /* - * blp_list is used for all other callbacks which are likely - * to take longer to process. - */ - struct list_head blp_list; - - wait_queue_head_t blp_waitq; - struct completion blp_comp; - atomic_t blp_num_threads; - atomic_t blp_busy_threads; - int blp_min_threads; - int blp_max_threads; -}; - -struct ldlm_bl_work_item { - struct list_head blwi_entry; - struct ldlm_namespace *blwi_ns; - struct ldlm_lock_desc blwi_ld; - struct ldlm_lock *blwi_lock; - struct list_head blwi_head; - int blwi_count; - struct completion blwi_comp; - enum ldlm_cancel_flags blwi_flags; - int blwi_mem_pressure; -}; - -/** - * Callback handler for receiving incoming blocking ASTs. - * - * This can only happen on client side. - */ -void ldlm_handle_bl_callback(struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, struct ldlm_lock *lock) -{ - int do_ast; - - LDLM_DEBUG(lock, "client blocking AST callback handler"); - - lock_res_and_lock(lock); - ldlm_set_cbpending(lock); - - if (ldlm_is_cancel_on_block(lock)) - ldlm_set_cancel(lock); - - do_ast = !lock->l_readers && !lock->l_writers; - unlock_res_and_lock(lock); - - if (do_ast) { - CDEBUG(D_DLMTRACE, - "Lock %p already unused, calling callback (%p)\n", lock, - lock->l_blocking_ast); - if (lock->l_blocking_ast) - lock->l_blocking_ast(lock, ld, lock->l_ast_data, - LDLM_CB_BLOCKING); - } else { - CDEBUG(D_DLMTRACE, - "Lock %p is referenced, will be cancelled later\n", - lock); - } - - LDLM_DEBUG(lock, "client blocking callback handler END"); - LDLM_LOCK_RELEASE(lock); -} - -/** - * Callback handler for receiving incoming completion ASTs. - * - * This only can happen on client side. - */ -static void ldlm_handle_cp_callback(struct ptlrpc_request *req, - struct ldlm_namespace *ns, - struct ldlm_request *dlm_req, - struct ldlm_lock *lock) -{ - int lvb_len; - LIST_HEAD(ast_list); - int rc = 0; - - LDLM_DEBUG(lock, "client completion callback handler START"); - - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) { - int to = HZ; - - while (to > 0) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(to); - if (lock->l_granted_mode == lock->l_req_mode || - ldlm_is_destroyed(lock)) - break; - } - } - - lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT); - if (lvb_len < 0) { - LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len); - rc = lvb_len; - goto out; - } else if (lvb_len > 0) { - if (lock->l_lvb_len > 0) { - /* for extent lock, lvb contains ost_lvb{}. */ - LASSERT(lock->l_lvb_data); - - if (unlikely(lock->l_lvb_len < lvb_len)) { - LDLM_ERROR(lock, - "Replied LVB is larger than expectation, expected = %d, replied = %d", - lock->l_lvb_len, lvb_len); - rc = -EINVAL; - goto out; - } - } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has - * variable length - */ - void *lvb_data; - - lvb_data = kzalloc(lvb_len, GFP_NOFS); - if (!lvb_data) { - LDLM_ERROR(lock, "No memory: %d.\n", lvb_len); - rc = -ENOMEM; - goto out; - } - - lock_res_and_lock(lock); - LASSERT(!lock->l_lvb_data); - lock->l_lvb_type = LVB_T_LAYOUT; - lock->l_lvb_data = lvb_data; - lock->l_lvb_len = lvb_len; - unlock_res_and_lock(lock); - } - } - - lock_res_and_lock(lock); - if (ldlm_is_destroyed(lock) || - lock->l_granted_mode == lock->l_req_mode) { - /* bug 11300: the lock has already been granted */ - unlock_res_and_lock(lock); - LDLM_DEBUG(lock, "Double grant race happened"); - rc = 0; - goto out; - } - - /* If we receive the completion AST before the actual enqueue returned, - * then we might need to switch lock modes, resources, or extents. - */ - if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) { - lock->l_req_mode = dlm_req->lock_desc.l_granted_mode; - LDLM_DEBUG(lock, "completion AST, new lock mode"); - } - - if (lock->l_resource->lr_type != LDLM_PLAIN) { - ldlm_convert_policy_to_local(req->rq_export, - dlm_req->lock_desc.l_resource.lr_type, - &dlm_req->lock_desc.l_policy_data, - &lock->l_policy_data); - LDLM_DEBUG(lock, "completion AST, new policy data"); - } - - ldlm_resource_unlink_lock(lock); - if (memcmp(&dlm_req->lock_desc.l_resource.lr_name, - &lock->l_resource->lr_name, - sizeof(lock->l_resource->lr_name)) != 0) { - unlock_res_and_lock(lock); - rc = ldlm_lock_change_resource(ns, lock, - &dlm_req->lock_desc.l_resource.lr_name); - if (rc < 0) { - LDLM_ERROR(lock, "Failed to allocate resource"); - goto out; - } - LDLM_DEBUG(lock, "completion AST, new resource"); - CERROR("change resource!\n"); - lock_res_and_lock(lock); - } - - if (dlm_req->lock_flags & LDLM_FL_AST_SENT) { - /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. - */ - ldlm_lock_remove_from_lru(lock); - lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; - LDLM_DEBUG(lock, "completion AST includes blocking AST"); - } - - if (lock->l_lvb_len > 0) { - rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT, - lock->l_lvb_data, lvb_len); - if (rc < 0) { - unlock_res_and_lock(lock); - goto out; - } - } - - ldlm_grant_lock(lock, &ast_list); - unlock_res_and_lock(lock); - - LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work"); - - /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */ - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2); - - ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST); - - LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)", - lock); - goto out; - -out: - if (rc < 0) { - lock_res_and_lock(lock); - ldlm_set_failed(lock); - unlock_res_and_lock(lock); - wake_up(&lock->l_waitq); - } - LDLM_LOCK_RELEASE(lock); -} - -/** - * Callback handler for receiving incoming glimpse ASTs. - * - * This only can happen on client side. After handling the glimpse AST - * we also consider dropping the lock here if it is unused locally for a - * long time. - */ -static void ldlm_handle_gl_callback(struct ptlrpc_request *req, - struct ldlm_namespace *ns, - struct ldlm_request *dlm_req, - struct ldlm_lock *lock) -{ - int rc = -ENOSYS; - - LDLM_DEBUG(lock, "client glimpse AST callback handler"); - - if (lock->l_glimpse_ast) - rc = lock->l_glimpse_ast(lock, req); - - if (req->rq_repmsg) { - ptlrpc_reply(req); - } else { - req->rq_status = rc; - ptlrpc_error(req); - } - - lock_res_and_lock(lock); - if (lock->l_granted_mode == LCK_PW && - !lock->l_readers && !lock->l_writers && - time_after(jiffies, - lock->l_last_used + 10 * HZ)) { - unlock_res_and_lock(lock); - if (ldlm_bl_to_thread_lock(ns, NULL, lock)) - ldlm_handle_bl_callback(ns, NULL, lock); - - return; - } - unlock_res_and_lock(lock); - LDLM_LOCK_RELEASE(lock); -} - -static int ldlm_callback_reply(struct ptlrpc_request *req, int rc) -{ - if (req->rq_no_reply) - return 0; - - req->rq_status = rc; - if (!req->rq_packed_final) { - rc = lustre_pack_reply(req, 1, NULL, NULL); - if (rc) - return rc; - } - return ptlrpc_reply(req); -} - -static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, - enum ldlm_cancel_flags cancel_flags) -{ - struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; - - spin_lock(&blp->blp_lock); - if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) { - /* add LDLM_FL_DISCARD_DATA requests to the priority list */ - list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list); - } else { - /* other blocking callbacks are added to the regular list */ - list_add_tail(&blwi->blwi_entry, &blp->blp_list); - } - spin_unlock(&blp->blp_lock); - - wake_up(&blp->blp_waitq); - - /* can not check blwi->blwi_flags as blwi could be already freed in - * LCF_ASYNC mode - */ - if (!(cancel_flags & LCF_ASYNC)) - wait_for_completion(&blwi->blwi_comp); - - return 0; -} - -static inline void init_blwi(struct ldlm_bl_work_item *blwi, - struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, - struct list_head *cancels, int count, - struct ldlm_lock *lock, - enum ldlm_cancel_flags cancel_flags) -{ - init_completion(&blwi->blwi_comp); - INIT_LIST_HEAD(&blwi->blwi_head); - - if (current->flags & PF_MEMALLOC) - blwi->blwi_mem_pressure = 1; - - blwi->blwi_ns = ns; - blwi->blwi_flags = cancel_flags; - if (ld) - blwi->blwi_ld = *ld; - if (count) { - list_add(&blwi->blwi_head, cancels); - list_del_init(cancels); - blwi->blwi_count = count; - } else { - blwi->blwi_lock = lock; - } -} - -/** - * Queues a list of locks \a cancels containing \a count locks - * for later processing by a blocking thread. If \a count is zero, - * then the lock referenced as \a lock is queued instead. - * - * The blocking thread would then call ->l_blocking_ast callback in the lock. - * If list addition fails an error is returned and caller is supposed to - * call ->l_blocking_ast itself. - */ -static int ldlm_bl_to_thread(struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, - struct ldlm_lock *lock, - struct list_head *cancels, int count, - enum ldlm_cancel_flags cancel_flags) -{ - if (cancels && count == 0) - return 0; - - if (cancel_flags & LCF_ASYNC) { - struct ldlm_bl_work_item *blwi; - - blwi = kzalloc(sizeof(*blwi), GFP_NOFS); - if (!blwi) - return -ENOMEM; - init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags); - - return __ldlm_bl_to_thread(blwi, cancel_flags); - } else { - /* if it is synchronous call do minimum mem alloc, as it could - * be triggered from kernel shrinker - */ - struct ldlm_bl_work_item blwi; - - memset(&blwi, 0, sizeof(blwi)); - init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags); - return __ldlm_bl_to_thread(&blwi, cancel_flags); - } -} - -int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - struct ldlm_lock *lock) -{ - return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC); -} - -int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - struct list_head *cancels, int count, - enum ldlm_cancel_flags cancel_flags) -{ - return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags); -} - -int ldlm_bl_thread_wakeup(void) -{ - wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq); - return 0; -} - -/* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */ -static int ldlm_handle_setinfo(struct ptlrpc_request *req) -{ - struct obd_device *obd = req->rq_export->exp_obd; - char *key; - void *val; - int keylen, vallen; - int rc = -ENOSYS; - - DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name); - - req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO); - - key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); - if (!key) { - DEBUG_REQ(D_IOCTL, req, "no set_info key"); - return -EFAULT; - } - keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY, - RCL_CLIENT); - val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL); - if (!val) { - DEBUG_REQ(D_IOCTL, req, "no set_info val"); - return -EFAULT; - } - vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL, - RCL_CLIENT); - - /* We are responsible for swabbing contents of val */ - - if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) - /* Pass it on to mdc (the "export" in this case) */ - rc = obd_set_info_async(req->rq_svc_thread->t_env, - req->rq_export, - sizeof(KEY_HSM_COPYTOOL_SEND), - KEY_HSM_COPYTOOL_SEND, - vallen, val, NULL); - else - DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key); - - return rc; -} - -static inline void ldlm_callback_errmsg(struct ptlrpc_request *req, - const char *msg, int rc, - const struct lustre_handle *handle) -{ - DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req, - "%s: [nid %s] [rc %d] [lock %#llx]", - msg, libcfs_id2str(req->rq_peer), rc, - handle ? handle->cookie : 0); - if (req->rq_no_reply) - CWARN("No reply was sent, maybe cause bug 21636.\n"); - else if (rc) - CWARN("Send reply failed, maybe cause bug 21636.\n"); -} - -/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */ -static int ldlm_callback_handler(struct ptlrpc_request *req) -{ - struct ldlm_namespace *ns; - struct ldlm_request *dlm_req; - struct ldlm_lock *lock; - int rc; - - /* Requests arrive in sender's byte order. The ptlrpc service - * handler has already checked and, if necessary, byte-swapped the - * incoming request message body, but I am responsible for the - * message buffers. - */ - - /* do nothing for sec context finalize */ - if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI) - return 0; - - req_capsule_init(&req->rq_pill, req, RCL_SERVER); - - if (!req->rq_export) { - rc = ldlm_callback_reply(req, -ENOTCONN); - ldlm_callback_errmsg(req, "Operate on unconnected server", - rc, NULL); - return 0; - } - - LASSERT(req->rq_export->exp_obd); - - switch (lustre_msg_get_opc(req->rq_reqmsg)) { - case LDLM_BL_CALLBACK: - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) { - if (cfs_fail_err) - ldlm_callback_reply(req, -(int)cfs_fail_err); - return 0; - } - break; - case LDLM_CP_CALLBACK: - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET)) - return 0; - break; - case LDLM_GL_CALLBACK: - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET)) - return 0; - break; - case LDLM_SET_INFO: - rc = ldlm_handle_setinfo(req); - ldlm_callback_reply(req, rc); - return 0; - default: - CERROR("unknown opcode %u\n", - lustre_msg_get_opc(req->rq_reqmsg)); - ldlm_callback_reply(req, -EPROTO); - return 0; - } - - ns = req->rq_export->exp_obd->obd_namespace; - LASSERT(ns); - - req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK); - - dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - if (!dlm_req) { - rc = ldlm_callback_reply(req, -EPROTO); - ldlm_callback_errmsg(req, "Operate without parameter", rc, - NULL); - return 0; - } - - /* Force a known safe race, send a cancel to the server for a lock - * which the server has already started a blocking callback on. - */ - if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) && - lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { - rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0); - if (rc < 0) - CERROR("ldlm_cli_cancel: %d\n", rc); - } - - lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0); - if (!lock) { - CDEBUG(D_DLMTRACE, - "callback on lock %#llx - lock disappeared\n", - dlm_req->lock_handle[0].cookie); - rc = ldlm_callback_reply(req, -EINVAL); - ldlm_callback_errmsg(req, "Operate with invalid parameter", rc, - &dlm_req->lock_handle[0]); - return 0; - } - - if (ldlm_is_fail_loc(lock) && - lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) - OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE); - - /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */ - lock_res_and_lock(lock); - lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & - LDLM_FL_AST_MASK); - if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { - /* If somebody cancels lock and cache is already dropped, - * or lock is failed before cp_ast received on client, - * we can tell the server we have no lock. Otherwise, we - * should send cancel after dropping the cache. - */ - if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) || - ldlm_is_failed(lock)) { - LDLM_DEBUG(lock, - "callback on lock %#llx - lock disappeared", - dlm_req->lock_handle[0].cookie); - unlock_res_and_lock(lock); - LDLM_LOCK_RELEASE(lock); - rc = ldlm_callback_reply(req, -EINVAL); - ldlm_callback_errmsg(req, "Operate on stale lock", rc, - &dlm_req->lock_handle[0]); - return 0; - } - /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. - */ - ldlm_lock_remove_from_lru(lock); - ldlm_set_bl_ast(lock); - } - unlock_res_and_lock(lock); - - /* We want the ost thread to get this reply so that it can respond - * to ost requests (write cache writeback) that might be triggered - * in the callback. - * - * But we'd also like to be able to indicate in the reply that we're - * cancelling right now, because it's unused, or have an intent result - * in the reply, so we might have to push the responsibility for sending - * the reply down into the AST handlers, alas. - */ - - switch (lustre_msg_get_opc(req->rq_reqmsg)) { - case LDLM_BL_CALLBACK: - CDEBUG(D_INODE, "blocking ast\n"); - req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK); - if (!ldlm_is_cancel_on_block(lock)) { - rc = ldlm_callback_reply(req, 0); - if (req->rq_no_reply || rc) - ldlm_callback_errmsg(req, "Normal process", rc, - &dlm_req->lock_handle[0]); - } - if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock)) - ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock); - break; - case LDLM_CP_CALLBACK: - CDEBUG(D_INODE, "completion ast\n"); - req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK); - ldlm_callback_reply(req, 0); - ldlm_handle_cp_callback(req, ns, dlm_req, lock); - break; - case LDLM_GL_CALLBACK: - CDEBUG(D_INODE, "glimpse ast\n"); - req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK); - ldlm_handle_gl_callback(req, ns, dlm_req, lock); - break; - default: - LBUG(); /* checked above */ - } - - return 0; -} - -static int ldlm_bl_get_work(struct ldlm_bl_pool *blp, - struct ldlm_bl_work_item **p_blwi, - struct obd_export **p_exp) -{ - int num_th = atomic_read(&blp->blp_num_threads); - struct ldlm_bl_work_item *blwi = NULL; - static unsigned int num_bl; - - spin_lock(&blp->blp_lock); - /* process a request from the blp_list at least every blp_num_threads */ - if (!list_empty(&blp->blp_list) && - (list_empty(&blp->blp_prio_list) || num_bl == 0)) - blwi = list_first_entry(&blp->blp_list, - struct ldlm_bl_work_item, blwi_entry); - else - if (!list_empty(&blp->blp_prio_list)) - blwi = list_first_entry(&blp->blp_prio_list, - struct ldlm_bl_work_item, - blwi_entry); - - if (blwi) { - if (++num_bl >= num_th) - num_bl = 0; - list_del(&blwi->blwi_entry); - } - spin_unlock(&blp->blp_lock); - *p_blwi = blwi; - - return (*p_blwi || *p_exp) ? 1 : 0; -} - -/* This only contains temporary data until the thread starts */ -struct ldlm_bl_thread_data { - struct ldlm_bl_pool *bltd_blp; - struct completion bltd_comp; - int bltd_num; -}; - -static int ldlm_bl_thread_main(void *arg); - -static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp, bool check_busy) -{ - struct ldlm_bl_thread_data bltd = { .bltd_blp = blp }; - struct task_struct *task; - - init_completion(&bltd.bltd_comp); - - bltd.bltd_num = atomic_inc_return(&blp->blp_num_threads); - if (bltd.bltd_num >= blp->blp_max_threads) { - atomic_dec(&blp->blp_num_threads); - return 0; - } - - LASSERTF(bltd.bltd_num > 0, "thread num:%d\n", bltd.bltd_num); - if (check_busy && - atomic_read(&blp->blp_busy_threads) < (bltd.bltd_num - 1)) { - atomic_dec(&blp->blp_num_threads); - return 0; - } - - task = kthread_run(ldlm_bl_thread_main, &bltd, "ldlm_bl_%02d", - bltd.bltd_num); - if (IS_ERR(task)) { - CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n", - bltd.bltd_num, PTR_ERR(task)); - atomic_dec(&blp->blp_num_threads); - return PTR_ERR(task); - } - wait_for_completion(&bltd.bltd_comp); - - return 0; -} - -/* Not fatal if racy and have a few too many threads */ -static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp, - struct ldlm_bl_work_item *blwi) -{ - if (atomic_read(&blp->blp_num_threads) >= blp->blp_max_threads) - return 0; - - if (atomic_read(&blp->blp_busy_threads) < - atomic_read(&blp->blp_num_threads)) - return 0; - - if (blwi && (!blwi->blwi_ns || blwi->blwi_mem_pressure)) - return 0; - - return 1; -} - -static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp, - struct ldlm_bl_work_item *blwi) -{ - unsigned int flags = 0; - - if (!blwi->blwi_ns) - /* added by ldlm_cleanup() */ - return LDLM_ITER_STOP; - - if (blwi->blwi_mem_pressure) - flags = memalloc_noreclaim_save(); - - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4); - - if (blwi->blwi_count) { - int count; - - /* - * The special case when we cancel locks in lru - * asynchronously, we pass the list of locks here. - * Thus locks are marked LDLM_FL_CANCELING, but NOT - * canceled locally yet. - */ - count = ldlm_cli_cancel_list_local(&blwi->blwi_head, - blwi->blwi_count, - LCF_BL_AST); - ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, - blwi->blwi_flags); - } else { - ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld, - blwi->blwi_lock); - } - if (blwi->blwi_mem_pressure) - memalloc_noreclaim_restore(flags); - - if (blwi->blwi_flags & LCF_ASYNC) - kfree(blwi); - else - complete(&blwi->blwi_comp); - - return 0; -} - -/** - * Main blocking requests processing thread. - * - * Callers put locks into its queue by calling ldlm_bl_to_thread. - * This thread in the end ends up doing actual call to ->l_blocking_ast - * for queued locks. - */ -static int ldlm_bl_thread_main(void *arg) -{ - struct ldlm_bl_pool *blp; - struct ldlm_bl_thread_data *bltd = arg; - - blp = bltd->bltd_blp; - - complete(&bltd->bltd_comp); - /* cannot use bltd after this, it is only on caller's stack */ - - while (1) { - struct ldlm_bl_work_item *blwi = NULL; - struct obd_export *exp = NULL; - int rc; - - rc = ldlm_bl_get_work(blp, &blwi, &exp); - if (!rc) - wait_event_idle_exclusive(blp->blp_waitq, - ldlm_bl_get_work(blp, &blwi, - &exp)); - atomic_inc(&blp->blp_busy_threads); - - if (ldlm_bl_thread_need_create(blp, blwi)) - /* discard the return value, we tried */ - ldlm_bl_thread_start(blp, true); - - if (blwi) - rc = ldlm_bl_thread_blwi(blp, blwi); - - atomic_dec(&blp->blp_busy_threads); - - if (rc == LDLM_ITER_STOP) - break; - } - - atomic_dec(&blp->blp_num_threads); - complete(&blp->blp_comp); - return 0; -} - -static int ldlm_setup(void); -static int ldlm_cleanup(void); - -int ldlm_get_ref(void) -{ - int rc = 0; - - rc = ptlrpc_inc_ref(); - if (rc) - return rc; - - mutex_lock(&ldlm_ref_mutex); - if (++ldlm_refcount == 1) { - rc = ldlm_setup(); - if (rc) - ldlm_refcount--; - } - mutex_unlock(&ldlm_ref_mutex); - - if (rc) - ptlrpc_dec_ref(); - - return rc; -} - -void ldlm_put_ref(void) -{ - int rc = 0; - mutex_lock(&ldlm_ref_mutex); - if (ldlm_refcount == 1) { - rc = ldlm_cleanup(); - - if (rc) - CERROR("ldlm_cleanup failed: %d\n", rc); - else - ldlm_refcount--; - } else { - ldlm_refcount--; - } - mutex_unlock(&ldlm_ref_mutex); - if (!rc) - ptlrpc_dec_ref(); -} - -static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay); -} - -static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - ldlm_cancel_unused_locks_before_replay = val; - - return count; -} -LUSTRE_RW_ATTR(cancel_unused_locks_before_replay); - -/* These are for root of /sys/fs/lustre/ldlm */ -static struct attribute *ldlm_attrs[] = { - &lustre_attr_cancel_unused_locks_before_replay.attr, - NULL, -}; - -static const struct attribute_group ldlm_attr_group = { - .attrs = ldlm_attrs, -}; - -static int ldlm_setup(void) -{ - static struct ptlrpc_service_conf conf; - struct ldlm_bl_pool *blp = NULL; - int rc = 0; - int i; - - if (ldlm_state) - return -EALREADY; - - ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS); - if (!ldlm_state) - return -ENOMEM; - - ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj); - if (!ldlm_kobj) { - rc = -ENOMEM; - goto out; - } - - rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group); - if (rc) - goto out; - - ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj); - if (!ldlm_ns_kset) { - rc = -ENOMEM; - goto out; - } - - ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj); - if (!ldlm_svc_kset) { - rc = -ENOMEM; - goto out; - } - - ldlm_debugfs_setup(); - - memset(&conf, 0, sizeof(conf)); - conf = (typeof(conf)) { - .psc_name = "ldlm_cbd", - .psc_watchdog_factor = 2, - .psc_buf = { - .bc_nbufs = LDLM_CLIENT_NBUFS, - .bc_buf_size = LDLM_BUFSIZE, - .bc_req_max_size = LDLM_MAXREQSIZE, - .bc_rep_max_size = LDLM_MAXREPSIZE, - .bc_req_portal = LDLM_CB_REQUEST_PORTAL, - .bc_rep_portal = LDLM_CB_REPLY_PORTAL, - }, - .psc_thr = { - .tc_thr_name = "ldlm_cb", - .tc_thr_factor = LDLM_THR_FACTOR, - .tc_nthrs_init = LDLM_NTHRS_INIT, - .tc_nthrs_base = LDLM_NTHRS_BASE, - .tc_nthrs_max = LDLM_NTHRS_MAX, - .tc_nthrs_user = ldlm_num_threads, - .tc_cpu_affinity = 1, - .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD, - }, - .psc_cpt = { - .cc_pattern = ldlm_cpts, - }, - .psc_ops = { - .so_req_handler = ldlm_callback_handler, - }, - }; - ldlm_state->ldlm_cb_service = - ptlrpc_register_service(&conf, ldlm_svc_kset, - ldlm_svc_debugfs_dir); - if (IS_ERR(ldlm_state->ldlm_cb_service)) { - CERROR("failed to start service\n"); - rc = PTR_ERR(ldlm_state->ldlm_cb_service); - ldlm_state->ldlm_cb_service = NULL; - goto out; - } - - blp = kzalloc(sizeof(*blp), GFP_NOFS); - if (!blp) { - rc = -ENOMEM; - goto out; - } - ldlm_state->ldlm_bl_pool = blp; - - spin_lock_init(&blp->blp_lock); - INIT_LIST_HEAD(&blp->blp_list); - INIT_LIST_HEAD(&blp->blp_prio_list); - init_waitqueue_head(&blp->blp_waitq); - atomic_set(&blp->blp_num_threads, 0); - atomic_set(&blp->blp_busy_threads, 0); - - if (ldlm_num_threads == 0) { - blp->blp_min_threads = LDLM_NTHRS_INIT; - blp->blp_max_threads = LDLM_NTHRS_MAX; - } else { - blp->blp_min_threads = min_t(int, LDLM_NTHRS_MAX, - max_t(int, LDLM_NTHRS_INIT, - ldlm_num_threads)); - - blp->blp_max_threads = blp->blp_min_threads; - } - - for (i = 0; i < blp->blp_min_threads; i++) { - rc = ldlm_bl_thread_start(blp, false); - if (rc < 0) - goto out; - } - - rc = ldlm_pools_init(); - if (rc) { - CERROR("Failed to initialize LDLM pools: %d\n", rc); - goto out; - } - return 0; - - out: - ldlm_cleanup(); - return rc; -} - -static int ldlm_cleanup(void) -{ - if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) || - !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) { - CERROR("ldlm still has namespaces; clean these up first.\n"); - ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE); - ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE); - return -EBUSY; - } - - ldlm_pools_fini(); - - if (ldlm_state->ldlm_bl_pool) { - struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; - - while (atomic_read(&blp->blp_num_threads) > 0) { - struct ldlm_bl_work_item blwi = { .blwi_ns = NULL }; - - init_completion(&blp->blp_comp); - - spin_lock(&blp->blp_lock); - list_add_tail(&blwi.blwi_entry, &blp->blp_list); - wake_up(&blp->blp_waitq); - spin_unlock(&blp->blp_lock); - - wait_for_completion(&blp->blp_comp); - } - - kfree(blp); - } - - if (ldlm_state->ldlm_cb_service) - ptlrpc_unregister_service(ldlm_state->ldlm_cb_service); - - if (ldlm_ns_kset) - kset_unregister(ldlm_ns_kset); - if (ldlm_svc_kset) - kset_unregister(ldlm_svc_kset); - if (ldlm_kobj) { - sysfs_remove_group(ldlm_kobj, &ldlm_attr_group); - kobject_put(ldlm_kobj); - } - - ldlm_debugfs_cleanup(); - - kfree(ldlm_state); - ldlm_state = NULL; - - return 0; -} - -int ldlm_init(void) -{ - mutex_init(&ldlm_ref_mutex); - mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER)); - mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); - ldlm_resource_slab = kmem_cache_create("ldlm_resources", - sizeof(struct ldlm_resource), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!ldlm_resource_slab) - return -ENOMEM; - - ldlm_lock_slab = kmem_cache_create("ldlm_locks", - sizeof(struct ldlm_lock), 0, - SLAB_HWCACHE_ALIGN | - SLAB_TYPESAFE_BY_RCU, NULL); - if (!ldlm_lock_slab) { - kmem_cache_destroy(ldlm_resource_slab); - return -ENOMEM; - } - - ldlm_interval_slab = kmem_cache_create("interval_node", - sizeof(struct ldlm_interval), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!ldlm_interval_slab) { - kmem_cache_destroy(ldlm_resource_slab); - kmem_cache_destroy(ldlm_lock_slab); - return -ENOMEM; - } -#if LUSTRE_TRACKS_LOCK_EXP_REFS - class_export_dump_hook = ldlm_dump_export_locks; -#endif - return 0; -} - -void ldlm_exit(void) -{ - if (ldlm_refcount) - CERROR("ldlm_refcount is %d in %s!\n", ldlm_refcount, __func__); - kmem_cache_destroy(ldlm_resource_slab); - /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call - * synchronize_rcu() to wait a grace period elapsed, so that - * ldlm_lock_free() get a chance to be called. - */ - synchronize_rcu(); - kmem_cache_destroy(ldlm_lock_slab); - kmem_cache_destroy(ldlm_interval_slab); -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c deleted file mode 100644 index 33b5a3f96fcb..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_plain.c - * - * Author: Peter Braam - * Author: Phil Schwan - */ - -/** - * This file contains implementation of PLAIN lock type. - * - * PLAIN locks are the simplest form of LDLM locking, and are used when - * there only needs to be a single lock on a resource. This avoids some - * of the complexity of EXTENT and IBITS lock types, but doesn't allow - * different "parts" of a resource to be locked concurrently. Example - * use cases for PLAIN locks include locking of MGS configuration logs - * and (as of Lustre 2.4) quota records. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include - -#include "ldlm_internal.h" - -void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, - union ldlm_policy_data *lpolicy) -{ - /* No policy for plain locks */ -} - -void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy, - union ldlm_wire_policy_data *wpolicy) -{ - /* No policy for plain locks */ -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c deleted file mode 100644 index 36d14ee4e5b1..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ /dev/null @@ -1,1013 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_pool.c - * - * Author: Yury Umanets - */ - -/* - * Idea of this code is rather simple. Each second, for each server namespace - * we have SLV - server lock volume which is calculated on current number of - * granted locks, grant speed for past period, etc - that is, locking load. - * This SLV number may be thought as a flow definition for simplicity. It is - * sent to clients with each occasion to let them know what is current load - * situation on the server. By default, at the beginning, SLV on server is - * set max value which is calculated as the following: allow to one client - * have all locks of limit ->pl_limit for 10h. - * - * Next, on clients, number of cached locks is not limited artificially in any - * way as it was before. Instead, client calculates CLV, that is, client lock - * volume for each lock and compares it with last SLV from the server. CLV is - * calculated as the number of locks in LRU * lock live time in seconds. If - * CLV > SLV - lock is canceled. - * - * Client has LVF, that is, lock volume factor which regulates how much - * sensitive client should be about last SLV from server. The higher LVF is the - * more locks will be canceled on client. Default value for it is 1. Setting LVF - * to 2 means that client will cancel locks 2 times faster. - * - * Locks on a client will be canceled more intensively in these cases: - * (1) if SLV is smaller, that is, load is higher on the server; - * (2) client has a lot of locks (the more locks are held by client, the bigger - * chances that some of them should be canceled); - * (3) client has old locks (taken some time ago); - * - * Thus, according to flow paradigm that we use for better understanding SLV, - * CLV is the volume of particle in flow described by SLV. According to this, - * if flow is getting thinner, more and more particles become outside of it and - * as particles are locks, they should be canceled. - * - * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). - * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using - * LVF and many cleanups. Flow definition to allow more easy understanding of - * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many - * cleanups and fixes. And design and implementation are done by Yury Umanets - * (umka@clusterfs.com). - * - * Glossary for terms used: - * - * pl_limit - Number of allowed locks in pool. Applies to server and client - * side (tunable); - * - * pl_granted - Number of granted locks (calculated); - * pl_grant_rate - Number of granted locks for last T (calculated); - * pl_cancel_rate - Number of canceled locks for last T (calculated); - * pl_grant_speed - Grant speed (GR - CR) for last T (calculated); - * pl_grant_plan - Planned number of granted locks for next T (calculated); - * pl_server_lock_volume - Current server lock volume (calculated); - * - * As it may be seen from list above, we have few possible tunables which may - * affect behavior much. They all may be modified via sysfs. However, they also - * give a possibility for constructing few pre-defined behavior policies. If - * none of predefines is suitable for a working pattern being used, new one may - * be "constructed" via sysfs tunables. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include -#include "ldlm_internal.h" - -/* - * 50 ldlm locks for 1MB of RAM. - */ -#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50) - -/* - * Maximal possible grant step plan in %. - */ -#define LDLM_POOL_MAX_GSP (30) - -/* - * Minimal possible grant step plan in %. - */ -#define LDLM_POOL_MIN_GSP (1) - -/* - * This controls the speed of reaching LDLM_POOL_MAX_GSP - * with increasing thread period. - */ -#define LDLM_POOL_GSP_STEP_SHIFT (2) - -/* - * LDLM_POOL_GSP% of all locks is default GP. - */ -#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100) - -/* - * Max age for locks on clients. - */ -#define LDLM_POOL_MAX_AGE (36000) - -/* - * The granularity of SLV calculation. - */ -#define LDLM_POOL_SLV_SHIFT (10) - -static inline __u64 dru(__u64 val, __u32 shift, int round_up) -{ - return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift; -} - -static inline __u64 ldlm_pool_slv_max(__u32 L) -{ - /* - * Allow to have all locks for 1 client for 10 hrs. - * Formula is the following: limit * 10h / 1 client. - */ - __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1; - return lim; -} - -static inline __u64 ldlm_pool_slv_min(__u32 L) -{ - return 1; -} - -enum { - LDLM_POOL_FIRST_STAT = 0, - LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT, - LDLM_POOL_GRANT_STAT, - LDLM_POOL_CANCEL_STAT, - LDLM_POOL_GRANT_RATE_STAT, - LDLM_POOL_CANCEL_RATE_STAT, - LDLM_POOL_GRANT_PLAN_STAT, - LDLM_POOL_SLV_STAT, - LDLM_POOL_SHRINK_REQTD_STAT, - LDLM_POOL_SHRINK_FREED_STAT, - LDLM_POOL_RECALC_STAT, - LDLM_POOL_TIMING_STAT, - LDLM_POOL_LAST_STAT -}; - -/** - * Calculates suggested grant_step in % of available locks for passed - * \a period. This is later used in grant_plan calculations. - */ -static inline int ldlm_pool_t2gsp(unsigned int t) -{ - /* - * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP - * and up to 30% for anything higher than LDLM_POOL_GSP_STEP. - * - * How this will affect execution is the following: - * - * - for thread period 1s we will have grant_step 1% which good from - * pov of taking some load off from server and push it out to clients. - * This is like that because 1% for grant_step means that server will - * not allow clients to get lots of locks in short period of time and - * keep all old locks in their caches. Clients will always have to - * get some locks back if they want to take some new; - * - * - for thread period 10s (which is default) we will have 23% which - * means that clients will have enough of room to take some new locks - * without getting some back. All locks from this 23% which were not - * taken by clients in current period will contribute in SLV growing. - * SLV growing means more locks cached on clients until limit or grant - * plan is reached. - */ - return LDLM_POOL_MAX_GSP - - ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >> - (t >> LDLM_POOL_GSP_STEP_SHIFT)); -} - -/** - * Recalculates next stats on passed \a pl. - * - * \pre ->pl_lock is locked. - */ -static void ldlm_pool_recalc_stats(struct ldlm_pool *pl) -{ - int grant_plan = pl->pl_grant_plan; - __u64 slv = pl->pl_server_lock_volume; - int granted = atomic_read(&pl->pl_granted); - int grant_rate = atomic_read(&pl->pl_grant_rate); - int cancel_rate = atomic_read(&pl->pl_cancel_rate); - - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT, - slv); - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT, - granted); - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT, - grant_rate); - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT, - grant_plan); - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT, - cancel_rate); -} - -/** - * Sets SLV and Limit from container_of(pl, struct ldlm_namespace, - * ns_pool)->ns_obd tp passed \a pl. - */ -static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) -{ - struct obd_device *obd; - - /* - * Get new SLV and Limit from obd which is updated with coming - * RPCs. - */ - obd = container_of(pl, struct ldlm_namespace, - ns_pool)->ns_obd; - read_lock(&obd->obd_pool_lock); - pl->pl_server_lock_volume = obd->obd_pool_slv; - atomic_set(&pl->pl_limit, obd->obd_pool_limit); - read_unlock(&obd->obd_pool_lock); -} - -/** - * Recalculates client size pool \a pl according to current SLV and Limit. - */ -static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) -{ - time64_t recalc_interval_sec; - int ret; - - recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; - if (recalc_interval_sec < pl->pl_recalc_period) - return 0; - - spin_lock(&pl->pl_lock); - /* - * Check if we need to recalc lists now. - */ - recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; - if (recalc_interval_sec < pl->pl_recalc_period) { - spin_unlock(&pl->pl_lock); - return 0; - } - - /* - * Make sure that pool knows last SLV and Limit from obd. - */ - ldlm_cli_pool_pop_slv(pl); - - spin_unlock(&pl->pl_lock); - - /* - * Do not cancel locks in case lru resize is disabled for this ns. - */ - if (!ns_connect_lru_resize(container_of(pl, struct ldlm_namespace, - ns_pool))) { - ret = 0; - goto out; - } - - /* - * In the time of canceling locks on client we do not need to maintain - * sharp timing, we only want to cancel locks asap according to new SLV. - * It may be called when SLV has changed much, this is why we do not - * take into account pl->pl_recalc_time here. - */ - ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool), - 0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR); - -out: - spin_lock(&pl->pl_lock); - /* - * Time of LRU resizing might be longer than period, - * so update after LRU resizing rather than before it. - */ - pl->pl_recalc_time = ktime_get_real_seconds(); - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, - recalc_interval_sec); - spin_unlock(&pl->pl_lock); - return ret; -} - -/** - * This function is main entry point for memory pressure handling on client - * side. Main goal of this function is to cancel some number of locks on - * passed \a pl according to \a nr and \a gfp_mask. - */ -static int ldlm_cli_pool_shrink(struct ldlm_pool *pl, - int nr, gfp_t gfp_mask) -{ - struct ldlm_namespace *ns; - int unused; - - ns = container_of(pl, struct ldlm_namespace, ns_pool); - - /* - * Do not cancel locks in case lru resize is disabled for this ns. - */ - if (!ns_connect_lru_resize(ns)) - return 0; - - /* - * Make sure that pool knows last SLV and Limit from obd. - */ - ldlm_cli_pool_pop_slv(pl); - - spin_lock(&ns->ns_lock); - unused = ns->ns_nr_unused; - spin_unlock(&ns->ns_lock); - - if (nr == 0) - return (unused / 100) * sysctl_vfs_cache_pressure; - else - return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK); -} - -static const struct ldlm_pool_ops ldlm_cli_pool_ops = { - .po_recalc = ldlm_cli_pool_recalc, - .po_shrink = ldlm_cli_pool_shrink -}; - -/** - * Pool recalc wrapper. Will call either client or server pool recalc callback - * depending what pool \a pl is used. - */ -static int ldlm_pool_recalc(struct ldlm_pool *pl) -{ - u32 recalc_interval_sec; - int count; - - recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; - if (recalc_interval_sec > 0) { - spin_lock(&pl->pl_lock); - recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; - - if (recalc_interval_sec > 0) { - /* - * Update pool statistics every 1s. - */ - ldlm_pool_recalc_stats(pl); - - /* - * Zero out all rates and speed for the last period. - */ - atomic_set(&pl->pl_grant_rate, 0); - atomic_set(&pl->pl_cancel_rate, 0); - } - spin_unlock(&pl->pl_lock); - } - - if (pl->pl_ops->po_recalc) { - count = pl->pl_ops->po_recalc(pl); - lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, - count); - } - - recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() + - pl->pl_recalc_period; - if (recalc_interval_sec <= 0) { - /* DEBUG: should be re-removed after LU-4536 is fixed */ - CDEBUG(D_DLMTRACE, - "%s: Negative interval(%ld), too short period(%ld)\n", - pl->pl_name, (long)recalc_interval_sec, - (long)pl->pl_recalc_period); - - /* Prevent too frequent recalculation. */ - recalc_interval_sec = 1; - } - - return recalc_interval_sec; -} - -/* - * Pool shrink wrapper. Will call either client or server pool recalc callback - * depending what pool pl is used. When nr == 0, just return the number of - * freeable locks. Otherwise, return the number of canceled locks. - */ -static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask) -{ - int cancel = 0; - - if (pl->pl_ops->po_shrink) { - cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); - if (nr > 0) { - lprocfs_counter_add(pl->pl_stats, - LDLM_POOL_SHRINK_REQTD_STAT, - nr); - lprocfs_counter_add(pl->pl_stats, - LDLM_POOL_SHRINK_FREED_STAT, - cancel); - CDEBUG(D_DLMTRACE, - "%s: request to shrink %d locks, shrunk %d\n", - pl->pl_name, nr, cancel); - } - } - return cancel; -} - -static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused) -{ - int granted, grant_rate, cancel_rate; - int grant_speed, lvf; - struct ldlm_pool *pl = m->private; - __u64 slv, clv; - __u32 limit; - - spin_lock(&pl->pl_lock); - slv = pl->pl_server_lock_volume; - clv = pl->pl_client_lock_volume; - limit = atomic_read(&pl->pl_limit); - granted = atomic_read(&pl->pl_granted); - grant_rate = atomic_read(&pl->pl_grant_rate); - cancel_rate = atomic_read(&pl->pl_cancel_rate); - grant_speed = grant_rate - cancel_rate; - lvf = atomic_read(&pl->pl_lock_volume_factor); - spin_unlock(&pl->pl_lock); - - seq_printf(m, "LDLM pool state (%s):\n" - " SLV: %llu\n" - " CLV: %llu\n" - " LVF: %d\n", - pl->pl_name, slv, clv, lvf); - - seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n" - " G: %d\n L: %d\n", - grant_rate, cancel_rate, grant_speed, - granted, limit); - - return 0; -} - -LPROC_SEQ_FOPS_RO(lprocfs_pool_state); - -static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, - pl_kobj); - - int grant_speed; - - spin_lock(&pl->pl_lock); - /* serialize with ldlm_pool_recalc */ - grant_speed = atomic_read(&pl->pl_grant_rate) - - atomic_read(&pl->pl_cancel_rate); - spin_unlock(&pl->pl_lock); - return sprintf(buf, "%d\n", grant_speed); -} -LUSTRE_RO_ATTR(grant_speed); - -LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int); -LUSTRE_RO_ATTR(grant_plan); - -LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int); -LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int); -LUSTRE_RW_ATTR(recalc_period); - -LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64); -LUSTRE_RO_ATTR(server_lock_volume); - -LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic); -LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic); -LUSTRE_RW_ATTR(limit); - -LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic); -LUSTRE_RO_ATTR(granted); - -LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic); -LUSTRE_RO_ATTR(cancel_rate); - -LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic); -LUSTRE_RO_ATTR(grant_rate); - -LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic); -LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic); -LUSTRE_RW_ATTR(lock_volume_factor); - -#define LDLM_POOL_ADD_VAR(name, var, ops) \ - do { \ - snprintf(var_name, MAX_STRING_SIZE, #name); \ - pool_vars[0].data = var; \ - pool_vars[0].fops = ops; \ - ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\ - } while (0) - -/* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */ -static struct attribute *ldlm_pl_attrs[] = { - &lustre_attr_grant_speed.attr, - &lustre_attr_grant_plan.attr, - &lustre_attr_recalc_period.attr, - &lustre_attr_server_lock_volume.attr, - &lustre_attr_limit.attr, - &lustre_attr_granted.attr, - &lustre_attr_cancel_rate.attr, - &lustre_attr_grant_rate.attr, - &lustre_attr_lock_volume_factor.attr, - NULL, -}; - -static void ldlm_pl_release(struct kobject *kobj) -{ - struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, - pl_kobj); - complete(&pl->pl_kobj_unregister); -} - -static struct kobj_type ldlm_pl_ktype = { - .default_attrs = ldlm_pl_attrs, - .sysfs_ops = &lustre_sysfs_ops, - .release = ldlm_pl_release, -}; - -static int ldlm_pool_sysfs_init(struct ldlm_pool *pl) -{ - struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace, - ns_pool); - int err; - - init_completion(&pl->pl_kobj_unregister); - err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj, - "pool"); - - return err; -} - -static int ldlm_pool_debugfs_init(struct ldlm_pool *pl) -{ - struct ldlm_namespace *ns = container_of(pl, struct ldlm_namespace, - ns_pool); - struct dentry *debugfs_ns_parent; - struct lprocfs_vars pool_vars[2]; - char *var_name = NULL; - int rc = 0; - - var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS); - if (!var_name) - return -ENOMEM; - - debugfs_ns_parent = ns->ns_debugfs_entry; - if (IS_ERR_OR_NULL(debugfs_ns_parent)) { - CERROR("%s: debugfs entry is not initialized\n", - ldlm_ns_name(ns)); - rc = -EINVAL; - goto out_free_name; - } - pl->pl_debugfs_entry = debugfs_create_dir("pool", debugfs_ns_parent); - - var_name[MAX_STRING_SIZE] = '\0'; - memset(pool_vars, 0, sizeof(pool_vars)); - pool_vars[0].name = var_name; - - LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops); - - pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT - - LDLM_POOL_FIRST_STAT, 0); - if (!pl->pl_stats) { - rc = -ENOMEM; - goto out_free_name; - } - - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "granted", "locks"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "grant", "locks"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "cancel", "locks"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "grant_rate", "locks/s"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "cancel_rate", "locks/s"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "grant_plan", "locks/s"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "slv", "slv"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "shrink_request", "locks"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "shrink_freed", "locks"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "recalc_freed", "locks"); - lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT, - LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, - "recalc_timing", "sec"); - debugfs_create_file("stats", 0644, pl->pl_debugfs_entry, pl->pl_stats, - &lprocfs_stats_seq_fops); - -out_free_name: - kfree(var_name); - return rc; -} - -static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl) -{ - kobject_put(&pl->pl_kobj); - wait_for_completion(&pl->pl_kobj_unregister); -} - -static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl) -{ - if (pl->pl_stats) { - lprocfs_free_stats(&pl->pl_stats); - pl->pl_stats = NULL; - } - debugfs_remove_recursive(pl->pl_debugfs_entry); -} - -int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, - int idx, enum ldlm_side client) -{ - int rc; - - spin_lock_init(&pl->pl_lock); - atomic_set(&pl->pl_granted, 0); - pl->pl_recalc_time = ktime_get_real_seconds(); - atomic_set(&pl->pl_lock_volume_factor, 1); - - atomic_set(&pl->pl_grant_rate, 0); - atomic_set(&pl->pl_cancel_rate, 0); - pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L); - - snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d", - ldlm_ns_name(ns), idx); - - atomic_set(&pl->pl_limit, 1); - pl->pl_server_lock_volume = 0; - pl->pl_ops = &ldlm_cli_pool_ops; - pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD; - pl->pl_client_lock_volume = 0; - rc = ldlm_pool_debugfs_init(pl); - if (rc) - return rc; - - rc = ldlm_pool_sysfs_init(pl); - if (rc) - return rc; - - CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name); - - return rc; -} - -void ldlm_pool_fini(struct ldlm_pool *pl) -{ - ldlm_pool_sysfs_fini(pl); - ldlm_pool_debugfs_fini(pl); - - /* - * Pool should not be used after this point. We can't free it here as - * it lives in struct ldlm_namespace, but still interested in catching - * any abnormal using cases. - */ - POISON(pl, 0x5a, sizeof(*pl)); -} - -/** - * Add new taken ldlm lock \a lock into pool \a pl accounting. - */ -void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock) -{ - /* - * FLOCK locks are special in a sense that they are almost never - * cancelled, instead special kind of lock is used to drop them. - * also there is no LRU for flock locks, so no point in tracking - * them anyway. - */ - if (lock->l_resource->lr_type == LDLM_FLOCK) - return; - - atomic_inc(&pl->pl_granted); - atomic_inc(&pl->pl_grant_rate); - lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT); - /* - * Do not do pool recalc for client side as all locks which - * potentially may be canceled has already been packed into - * enqueue/cancel rpc. Also we do not want to run out of stack - * with too long call paths. - */ -} - -/** - * Remove ldlm lock \a lock from pool \a pl accounting. - */ -void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock) -{ - /* - * Filter out FLOCK locks. Read above comment in ldlm_pool_add(). - */ - if (lock->l_resource->lr_type == LDLM_FLOCK) - return; - - LASSERT(atomic_read(&pl->pl_granted) > 0); - atomic_dec(&pl->pl_granted); - atomic_inc(&pl->pl_cancel_rate); - - lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT); -} - -/** - * Returns current \a pl SLV. - * - * \pre ->pl_lock is not locked. - */ -__u64 ldlm_pool_get_slv(struct ldlm_pool *pl) -{ - __u64 slv; - - spin_lock(&pl->pl_lock); - slv = pl->pl_server_lock_volume; - spin_unlock(&pl->pl_lock); - return slv; -} - -/** - * Sets passed \a clv to \a pl. - * - * \pre ->pl_lock is not locked. - */ -void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv) -{ - spin_lock(&pl->pl_lock); - pl->pl_client_lock_volume = clv; - spin_unlock(&pl->pl_lock); -} - -/** - * Returns current LVF from \a pl. - */ -__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl) -{ - return atomic_read(&pl->pl_lock_volume_factor); -} - -static int ldlm_pool_granted(struct ldlm_pool *pl) -{ - return atomic_read(&pl->pl_granted); -} - -/* - * count locks from all namespaces (if possible). Returns number of - * cached locks. - */ -static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask) -{ - unsigned long total = 0; - int nr_ns; - struct ldlm_namespace *ns; - struct ldlm_namespace *ns_old = NULL; /* loop detection */ - - if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) - return 0; - - CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n", - client == LDLM_NAMESPACE_CLIENT ? "client" : "server"); - - /* - * Find out how many resources we may release. - */ - for (nr_ns = ldlm_namespace_nr_read(client); - nr_ns > 0; nr_ns--) { - mutex_lock(ldlm_namespace_lock(client)); - if (list_empty(ldlm_namespace_list(client))) { - mutex_unlock(ldlm_namespace_lock(client)); - return 0; - } - ns = ldlm_namespace_first_locked(client); - - if (ns == ns_old) { - mutex_unlock(ldlm_namespace_lock(client)); - break; - } - - if (ldlm_ns_empty(ns)) { - ldlm_namespace_move_to_inactive_locked(ns, client); - mutex_unlock(ldlm_namespace_lock(client)); - continue; - } - - if (!ns_old) - ns_old = ns; - - ldlm_namespace_get(ns); - ldlm_namespace_move_to_active_locked(ns, client); - mutex_unlock(ldlm_namespace_lock(client)); - total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask); - ldlm_namespace_put(ns); - } - - return total; -} - -static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr, - gfp_t gfp_mask) -{ - unsigned long freed = 0; - int tmp, nr_ns; - struct ldlm_namespace *ns; - - if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) - return -1; - - /* - * Shrink at least ldlm_namespace_nr_read(client) namespaces. - */ - for (tmp = nr_ns = ldlm_namespace_nr_read(client); - tmp > 0; tmp--) { - int cancel, nr_locks; - - /* - * Do not call shrink under ldlm_namespace_lock(client) - */ - mutex_lock(ldlm_namespace_lock(client)); - if (list_empty(ldlm_namespace_list(client))) { - mutex_unlock(ldlm_namespace_lock(client)); - break; - } - ns = ldlm_namespace_first_locked(client); - ldlm_namespace_get(ns); - ldlm_namespace_move_to_active_locked(ns, client); - mutex_unlock(ldlm_namespace_lock(client)); - - nr_locks = ldlm_pool_granted(&ns->ns_pool); - /* - * We use to shrink propotionally but with new shrinker API, - * we lost the total number of freeable locks. - */ - cancel = 1 + min_t(int, nr_locks, nr / nr_ns); - freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); - ldlm_namespace_put(ns); - } - /* - * we only decrease the SLV in server pools shrinker, return - * SHRINK_STOP to kernel to avoid needless loop. LU-1128 - */ - return freed; -} - -static unsigned long ldlm_pools_cli_count(struct shrinker *s, - struct shrink_control *sc) -{ - return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask); -} - -static unsigned long ldlm_pools_cli_scan(struct shrinker *s, - struct shrink_control *sc) -{ - return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan, - sc->gfp_mask); -} - -static void ldlm_pools_recalc(struct work_struct *ws); -static DECLARE_DELAYED_WORK(ldlm_recalc_pools, ldlm_pools_recalc); - -static void ldlm_pools_recalc(struct work_struct *ws) -{ - enum ldlm_side client = LDLM_NAMESPACE_CLIENT; - struct ldlm_namespace *ns; - struct ldlm_namespace *ns_old = NULL; - /* seconds of sleep if no active namespaces */ - int time = LDLM_POOL_CLI_DEF_RECALC_PERIOD; - int nr; - - /* - * Recalc at least ldlm_namespace_nr_read(client) namespaces. - */ - for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) { - int skip; - /* - * Lock the list, get first @ns in the list, getref, move it - * to the tail, unlock and call pool recalc. This way we avoid - * calling recalc under @ns lock what is really good as we get - * rid of potential deadlock on client nodes when canceling - * locks synchronously. - */ - mutex_lock(ldlm_namespace_lock(client)); - if (list_empty(ldlm_namespace_list(client))) { - mutex_unlock(ldlm_namespace_lock(client)); - break; - } - ns = ldlm_namespace_first_locked(client); - - if (ns_old == ns) { /* Full pass complete */ - mutex_unlock(ldlm_namespace_lock(client)); - break; - } - - /* We got an empty namespace, need to move it back to inactive - * list. - * The race with parallel resource creation is fine: - * - If they do namespace_get before our check, we fail the - * check and they move this item to the end of the list anyway - * - If we do the check and then they do namespace_get, then - * we move the namespace to inactive and they will move - * it back to active (synchronised by the lock, so no clash - * there). - */ - if (ldlm_ns_empty(ns)) { - ldlm_namespace_move_to_inactive_locked(ns, client); - mutex_unlock(ldlm_namespace_lock(client)); - continue; - } - - if (!ns_old) - ns_old = ns; - - spin_lock(&ns->ns_lock); - /* - * skip ns which is being freed, and we don't want to increase - * its refcount again, not even temporarily. bz21519 & LU-499. - */ - if (ns->ns_stopping) { - skip = 1; - } else { - skip = 0; - ldlm_namespace_get(ns); - } - spin_unlock(&ns->ns_lock); - - ldlm_namespace_move_to_active_locked(ns, client); - mutex_unlock(ldlm_namespace_lock(client)); - - /* - * After setup is done - recalc the pool. - */ - if (!skip) { - int ttime = ldlm_pool_recalc(&ns->ns_pool); - - if (ttime < time) - time = ttime; - - ldlm_namespace_put(ns); - } - } - - /* Wake up the blocking threads from time to time. */ - ldlm_bl_thread_wakeup(); - - schedule_delayed_work(&ldlm_recalc_pools, time * HZ); -} - -static int ldlm_pools_thread_start(void) -{ - schedule_delayed_work(&ldlm_recalc_pools, 0); - - return 0; -} - -static void ldlm_pools_thread_stop(void) -{ - cancel_delayed_work_sync(&ldlm_recalc_pools); -} - -static struct shrinker ldlm_pools_cli_shrinker = { - .count_objects = ldlm_pools_cli_count, - .scan_objects = ldlm_pools_cli_scan, - .seeks = DEFAULT_SEEKS, -}; - -int ldlm_pools_init(void) -{ - int rc; - - rc = ldlm_pools_thread_start(); - if (!rc) - rc = register_shrinker(&ldlm_pools_cli_shrinker); - - return rc; -} - -void ldlm_pools_fini(void) -{ - unregister_shrinker(&ldlm_pools_cli_shrinker); - - ldlm_pools_thread_stop(); -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c deleted file mode 100644 index cdc52eed6d85..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ /dev/null @@ -1,2033 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -/** - * This file contains Asynchronous System Trap (AST) handlers and related - * LDLM request-processing routines. - * - * An AST is a callback issued on a lock when its state is changed. There are - * several different types of ASTs (callbacks) registered for each lock: - * - * - completion AST: when a lock is enqueued by some process, but cannot be - * granted immediately due to other conflicting locks on the same resource, - * the completion AST is sent to notify the caller when the lock is - * eventually granted - * - * - blocking AST: when a lock is granted to some process, if another process - * enqueues a conflicting (blocking) lock on a resource, a blocking AST is - * sent to notify the holder(s) of the lock(s) of the conflicting lock - * request. The lock holder(s) must release their lock(s) on that resource in - * a timely manner or be evicted by the server. - * - * - glimpse AST: this is used when a process wants information about a lock - * (i.e. the lock value block (LVB)) but does not necessarily require holding - * the lock. If the resource is locked, the lock holder(s) are sent glimpse - * ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL - * their lock(s) if they are idle. If the resource is not locked, the server - * may grant the lock. - */ - -#define DEBUG_SUBSYSTEM S_LDLM - -#include -#include -#include -#include -#include - -#include "ldlm_internal.h" - -unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT; -module_param(ldlm_enqueue_min, uint, 0644); -MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum"); - -/* in client side, whether the cached locks will be canceled before replay */ -unsigned int ldlm_cancel_unused_locks_before_replay = 1; - -struct ldlm_async_args { - struct lustre_handle lock_handle; -}; - -/** - * ldlm_request_bufsize - * - * @count: number of ldlm handles - * @type: ldlm opcode - * - * If opcode=LDLM_ENQUEUE, 1 slot is already occupied, - * LDLM_LOCKREQ_HANDLE -1 slots are available. - * Otherwise, LDLM_LOCKREQ_HANDLE slots are available. - * - * Return: size of the request buffer - */ -static int ldlm_request_bufsize(int count, int type) -{ - int avail = LDLM_LOCKREQ_HANDLES; - - if (type == LDLM_ENQUEUE) - avail -= LDLM_ENQUEUE_CANCEL_OFF; - - if (count > avail) - avail = (count - avail) * sizeof(struct lustre_handle); - else - avail = 0; - - return sizeof(struct ldlm_request) + avail; -} - -static void ldlm_expired_completion_wait(struct ldlm_lock *lock, __u32 conn_cnt) -{ - struct obd_import *imp; - struct obd_device *obd; - - if (!lock->l_conn_export) { - static unsigned long next_dump, last_dump; - - LDLM_ERROR(lock, - "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep", - (s64)lock->l_last_activity, - (s64)(ktime_get_real_seconds() - - lock->l_last_activity)); - if (time_after(jiffies, next_dump)) { - last_dump = next_dump; - next_dump = jiffies + 300 * HZ; - ldlm_namespace_dump(D_DLMTRACE, - ldlm_lock_to_ns(lock)); - if (last_dump == 0) - libcfs_debug_dumplog(); - } - return; - } - - obd = lock->l_conn_export->exp_obd; - imp = obd->u.cli.cl_import; - ptlrpc_fail_import(imp, conn_cnt); - LDLM_ERROR(lock, - "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s", - (s64)lock->l_last_activity, - (s64)(ktime_get_real_seconds() - lock->l_last_activity), - obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid); -} - -/** - * Calculate the Completion timeout (covering enqueue, BL AST, data flush, - * lock cancel, and their replies). Used for lock completion timeout on the - * client side. - * - * \param[in] lock lock which is waiting the completion callback - * - * \retval timeout in seconds to wait for the server reply - */ -/* We use the same basis for both server side and client side functions - * from a single node. - */ -static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock) -{ - unsigned int timeout; - - if (AT_OFF) - return obd_timeout; - - /* - * Wait a long time for enqueue - server may have to callback a - * lock from another client. Server will evict the other client if it - * doesn't respond reasonably, and then give us the lock. - */ - timeout = at_get(ldlm_lock_to_ns_at(lock)); - return max(3 * timeout, ldlm_enqueue_min); -} - -/** - * Helper function for ldlm_completion_ast(), updating timings when lock is - * actually granted. - */ -static int ldlm_completion_tail(struct ldlm_lock *lock, void *data) -{ - long delay; - int result = 0; - - if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) { - LDLM_DEBUG(lock, "client-side enqueue: destroyed"); - result = -EIO; - } else if (!data) { - LDLM_DEBUG(lock, "client-side enqueue: granted"); - } else { - /* Take into AT only CP RPC, not immediately granted locks */ - delay = ktime_get_real_seconds() - lock->l_last_activity; - LDLM_DEBUG(lock, "client-side enqueue: granted after %lds", - delay); - - /* Update our time estimate */ - at_measured(ldlm_lock_to_ns_at(lock), delay); - } - return result; -} - -/** - * Generic LDLM "completion" AST. This is called in several cases: - * - * - when a reply to an ENQUEUE RPC is received from the server - * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at - * this point (determined by flags); - * - * - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has - * been granted; - * - * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock - * gets correct lvb; - * - * - to force all locks when resource is destroyed (cleanup_resource()); - * - * - during lock conversion (not used currently). - * - * If lock is not granted in the first case, this function waits until second - * or penultimate cases happen in some other thread. - * - */ -int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) -{ - /* XXX ALLOCATE - 160 bytes */ - struct obd_device *obd; - struct obd_import *imp = NULL; - __u32 timeout; - __u32 conn_cnt = 0; - int rc = 0; - - if (flags == LDLM_FL_WAIT_NOREPROC) { - LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock"); - goto noreproc; - } - - if (!(flags & LDLM_FL_BLOCKED_MASK)) { - wake_up(&lock->l_waitq); - return 0; - } - - LDLM_DEBUG(lock, - "client-side enqueue returned a blocked lock, sleeping"); - -noreproc: - - obd = class_exp2obd(lock->l_conn_export); - - /* if this is a local lock, then there is no import */ - if (obd) - imp = obd->u.cli.cl_import; - - timeout = ldlm_cp_timeout(lock); - - lock->l_last_activity = ktime_get_real_seconds(); - - if (imp) { - spin_lock(&imp->imp_lock); - conn_cnt = imp->imp_conn_cnt; - spin_unlock(&imp->imp_lock); - } - if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST, - OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) { - ldlm_set_fail_loc(lock); - rc = -EINTR; - } else { - /* Go to sleep until the lock is granted or canceled. */ - if (!ldlm_is_no_timeout(lock)) { - /* Wait uninterruptible for a while first */ - rc = wait_event_idle_timeout(lock->l_waitq, - is_granted_or_cancelled(lock), - timeout * HZ); - if (rc == 0) - ldlm_expired_completion_wait(lock, conn_cnt); - } - /* Now wait abortable */ - if (rc == 0) - rc = l_wait_event_abortable(lock->l_waitq, - is_granted_or_cancelled(lock)); - else - rc = 0; - } - - if (rc) { - LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)", - rc); - return rc; - } - - return ldlm_completion_tail(lock, data); -} -EXPORT_SYMBOL(ldlm_completion_ast); - -static void failed_lock_cleanup(struct ldlm_namespace *ns, - struct ldlm_lock *lock, int mode) -{ - int need_cancel = 0; - - /* Set a flag to prevent us from sending a CANCEL (bug 407) */ - lock_res_and_lock(lock); - /* Check that lock is not granted or failed, we might race. */ - if ((lock->l_req_mode != lock->l_granted_mode) && - !ldlm_is_failed(lock)) { - /* Make sure that this lock will not be found by raced - * bl_ast and -EINVAL reply is sent to server anyways. - * bug 17645 - */ - lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED | - LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING; - need_cancel = 1; - } - unlock_res_and_lock(lock); - - if (need_cancel) - LDLM_DEBUG(lock, - "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING"); - else - LDLM_DEBUG(lock, "lock was granted or failed in race"); - - /* XXX - HACK because we shouldn't call ldlm_lock_destroy() - * from llite/file.c/ll_file_flock(). - */ - /* This code makes for the fact that we do not have blocking handler on - * a client for flock locks. As such this is the place where we must - * completely kill failed locks. (interrupted and those that - * were waiting to be granted when server evicted us. - */ - if (lock->l_resource->lr_type == LDLM_FLOCK) { - lock_res_and_lock(lock); - if (!ldlm_is_destroyed(lock)) { - ldlm_resource_unlink_lock(lock); - ldlm_lock_decref_internal_nolock(lock, mode); - ldlm_lock_destroy_nolock(lock); - } - unlock_res_and_lock(lock); - } else { - ldlm_lock_decref_internal(lock, mode); - } -} - -/** - * Finishing portion of client lock enqueue code. - * - * Called after receiving reply from server. - */ -int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - enum ldlm_type type, __u8 with_policy, - enum ldlm_mode mode, - __u64 *flags, void *lvb, __u32 lvb_len, - const struct lustre_handle *lockh, int rc) -{ - struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; - int is_replay = *flags & LDLM_FL_REPLAY; - struct ldlm_lock *lock; - struct ldlm_reply *reply; - int cleanup_phase = 1; - - lock = ldlm_handle2lock(lockh); - /* ldlm_cli_enqueue is holding a reference on this lock. */ - if (!lock) { - LASSERT(type == LDLM_FLOCK); - return -ENOLCK; - } - - LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len), - "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len); - - if (rc != ELDLM_OK) { - LASSERT(!is_replay); - LDLM_DEBUG(lock, "client-side enqueue END (%s)", - rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED"); - - if (rc != ELDLM_LOCK_ABORTED) - goto cleanup; - } - - /* Before we return, swab the reply */ - reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (!reply) { - rc = -EPROTO; - goto cleanup; - } - - if (lvb_len > 0) { - int size = 0; - - size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, - RCL_SERVER); - if (size < 0) { - LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size); - rc = size; - goto cleanup; - } else if (unlikely(size > lvb_len)) { - LDLM_ERROR(lock, - "Replied LVB is larger than expectation, expected = %d, replied = %d", - lvb_len, size); - rc = -EINVAL; - goto cleanup; - } - lvb_len = size; - } - - if (rc == ELDLM_LOCK_ABORTED) { - if (lvb_len > 0 && lvb) - rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, - lvb, lvb_len); - if (rc == 0) - rc = ELDLM_LOCK_ABORTED; - goto cleanup; - } - - /* lock enqueued on the server */ - cleanup_phase = 0; - - lock_res_and_lock(lock); - lock->l_remote_handle = reply->lock_handle; - - *flags = ldlm_flags_from_wire(reply->lock_flags); - lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & - LDLM_FL_INHERIT_MASK); - unlock_res_and_lock(lock); - - CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n", - lock, reply->lock_handle.cookie, *flags); - - /* If enqueue returned a blocked lock but the completion handler has - * already run, then it fixed up the resource and we don't need to do it - * again. - */ - if ((*flags) & LDLM_FL_LOCK_CHANGED) { - int newmode = reply->lock_desc.l_req_mode; - - LASSERT(!is_replay); - if (newmode && newmode != lock->l_req_mode) { - LDLM_DEBUG(lock, "server returned different mode %s", - ldlm_lockname[newmode]); - lock->l_req_mode = newmode; - } - - if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name, - &lock->l_resource->lr_name)) { - CDEBUG(D_INFO, - "remote intent success, locking " DLDLMRES " instead of " DLDLMRES "\n", - PLDLMRES(&reply->lock_desc.l_resource), - PLDLMRES(lock->l_resource)); - - rc = ldlm_lock_change_resource(ns, lock, - &reply->lock_desc.l_resource.lr_name); - if (rc || !lock->l_resource) { - rc = -ENOMEM; - goto cleanup; - } - LDLM_DEBUG(lock, "client-side enqueue, new resource"); - } - if (with_policy) - if (!(type == LDLM_IBITS && - !(exp_connect_flags(exp) & OBD_CONNECT_IBITS))) - /* We assume lock type cannot change on server*/ - ldlm_convert_policy_to_local(exp, - lock->l_resource->lr_type, - &reply->lock_desc.l_policy_data, - &lock->l_policy_data); - if (type != LDLM_PLAIN) - LDLM_DEBUG(lock, - "client-side enqueue, new policy data"); - } - - if ((*flags) & LDLM_FL_AST_SENT) { - lock_res_and_lock(lock); - lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; - unlock_res_and_lock(lock); - LDLM_DEBUG(lock, "enqueue reply includes blocking AST"); - } - - /* If the lock has already been granted by a completion AST, don't - * clobber the LVB with an older one. - */ - if (lvb_len > 0) { - /* We must lock or a racing completion might update lvb without - * letting us know and we'll clobber the correct value. - * Cannot unlock after the check either, as that still leaves - * a tiny window for completion to get in - */ - lock_res_and_lock(lock); - if (lock->l_req_mode != lock->l_granted_mode) - rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, - lock->l_lvb_data, lvb_len); - unlock_res_and_lock(lock); - if (rc < 0) { - cleanup_phase = 1; - goto cleanup; - } - } - - if (!is_replay) { - rc = ldlm_lock_enqueue(ns, &lock, NULL, flags); - if (lock->l_completion_ast) { - int err = lock->l_completion_ast(lock, *flags, NULL); - - if (!rc) - rc = err; - if (rc) - cleanup_phase = 1; - } - } - - if (lvb_len > 0 && lvb) { - /* Copy the LVB here, and not earlier, because the completion - * AST (if any) can override what we got in the reply - */ - memcpy(lvb, lock->l_lvb_data, lvb_len); - } - - LDLM_DEBUG(lock, "client-side enqueue END"); -cleanup: - if (cleanup_phase == 1 && rc) - failed_lock_cleanup(ns, lock, mode); - /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */ - LDLM_LOCK_PUT(lock); - LDLM_LOCK_RELEASE(lock); - return rc; -} -EXPORT_SYMBOL(ldlm_cli_enqueue_fini); - -/** - * Estimate number of lock handles that would fit into request of given - * size. PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into - * a single page on the send/receive side. XXX: 512 should be changed to - * more adequate value. - */ -static inline int ldlm_req_handles_avail(int req_size, int off) -{ - int avail; - - avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size; - if (likely(avail >= 0)) - avail /= (int)sizeof(struct lustre_handle); - else - avail = 0; - avail += LDLM_LOCKREQ_HANDLES - off; - - return avail; -} - -static inline int ldlm_capsule_handles_avail(struct req_capsule *pill, - enum req_location loc, - int off) -{ - u32 size = req_capsule_msg_size(pill, loc); - - return ldlm_req_handles_avail(size, off); -} - -static inline int ldlm_format_handles_avail(struct obd_import *imp, - const struct req_format *fmt, - enum req_location loc, int off) -{ - u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc); - - return ldlm_req_handles_avail(size, off); -} - -/** - * Cancel LRU locks and pack them into the enqueue request. Pack there the given - * \a count locks in \a cancels. - * - * This is to be called by functions preparing their own requests that - * might contain lists of locks to cancel in addition to actual operation - * that needs to be performed. - */ -int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, - int version, int opc, int canceloff, - struct list_head *cancels, int count) -{ - struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; - struct req_capsule *pill = &req->rq_pill; - struct ldlm_request *dlm = NULL; - int flags, avail, to_free, pack = 0; - LIST_HEAD(head); - int rc; - - if (!cancels) - cancels = &head; - if (ns_connect_cancelset(ns)) { - /* Estimate the amount of available space in the request. */ - req_capsule_filled_sizes(pill, RCL_CLIENT); - avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff); - - flags = ns_connect_lru_resize(ns) ? - LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED; - to_free = !ns_connect_lru_resize(ns) && - opc == LDLM_ENQUEUE ? 1 : 0; - - /* Cancel LRU locks here _only_ if the server supports - * EARLY_CANCEL. Otherwise we have to send extra CANCEL - * RPC, which will make us slower. - */ - if (avail > count) - count += ldlm_cancel_lru_local(ns, cancels, to_free, - avail - count, 0, flags); - if (avail > count) - pack = count; - else - pack = avail; - req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT, - ldlm_request_bufsize(pack, opc)); - } - - rc = ptlrpc_request_pack(req, version, opc); - if (rc) { - ldlm_lock_list_put(cancels, l_bl_ast, count); - return rc; - } - - if (ns_connect_cancelset(ns)) { - if (canceloff) { - dlm = req_capsule_client_get(pill, &RMF_DLM_REQ); - LASSERT(dlm); - /* Skip first lock handler in ldlm_request_pack(), - * this method will increment @lock_count according - * to the lock handle amount actually written to - * the buffer. - */ - dlm->lock_count = canceloff; - } - /* Pack into the request @pack lock handles. */ - ldlm_cli_cancel_list(cancels, pack, req, 0); - /* Prepare and send separate cancel RPC for others. */ - ldlm_cli_cancel_list(cancels, count - pack, NULL, 0); - } else { - ldlm_lock_list_put(cancels, l_bl_ast, count); - } - return 0; -} -EXPORT_SYMBOL(ldlm_prep_elc_req); - -int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req, - struct list_head *cancels, int count) -{ - return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE, - LDLM_ENQUEUE_CANCEL_OFF, cancels, count); -} -EXPORT_SYMBOL(ldlm_prep_enqueue_req); - -static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, - int lvb_len) -{ - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); - if (!req) - return ERR_PTR(-ENOMEM); - - rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); - if (rc) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len); - ptlrpc_request_set_replen(req); - return req; -} - -/** - * Client-side lock enqueue. - * - * If a request has some specific initialisation it is passed in \a reqp, - * otherwise it is created in ldlm_cli_enqueue. - * - * Supports sync and async requests, pass \a async flag accordingly. If a - * request was created in ldlm_cli_enqueue and it is the async request, - * pass it to the caller in \a reqp. - */ -int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, - struct ldlm_enqueue_info *einfo, - const struct ldlm_res_id *res_id, - union ldlm_policy_data const *policy, __u64 *flags, - void *lvb, __u32 lvb_len, enum lvb_type lvb_type, - struct lustre_handle *lockh, int async) -{ - struct ldlm_namespace *ns; - struct ldlm_lock *lock; - struct ldlm_request *body; - int is_replay = *flags & LDLM_FL_REPLAY; - int req_passed_in = 1; - int rc, err; - struct ptlrpc_request *req; - - ns = exp->exp_obd->obd_namespace; - - /* If we're replaying this lock, just check some invariants. - * If we're creating a new lock, get everything all setup nicely. - */ - if (is_replay) { - lock = ldlm_handle2lock_long(lockh, 0); - LASSERT(lock); - LDLM_DEBUG(lock, "client-side enqueue START"); - LASSERT(exp == lock->l_conn_export); - } else { - const struct ldlm_callback_suite cbs = { - .lcs_completion = einfo->ei_cb_cp, - .lcs_blocking = einfo->ei_cb_bl, - .lcs_glimpse = einfo->ei_cb_gl - }; - lock = ldlm_lock_create(ns, res_id, einfo->ei_type, - einfo->ei_mode, &cbs, einfo->ei_cbdata, - lvb_len, lvb_type); - if (IS_ERR(lock)) - return PTR_ERR(lock); - /* for the local lock, add the reference */ - ldlm_lock_addref_internal(lock, einfo->ei_mode); - ldlm_lock2handle(lock, lockh); - if (policy) - lock->l_policy_data = *policy; - - if (einfo->ei_type == LDLM_EXTENT) { - /* extent lock without policy is a bug */ - if (!policy) - LBUG(); - - lock->l_req_extent = policy->l_extent; - } - LDLM_DEBUG(lock, "client-side enqueue START, flags %llx", - *flags); - } - - lock->l_conn_export = exp; - lock->l_export = NULL; - lock->l_blocking_ast = einfo->ei_cb_bl; - lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL)); - lock->l_last_activity = ktime_get_real_seconds(); - - /* lock not sent to server yet */ - if (!reqp || !*reqp) { - req = ldlm_enqueue_pack(exp, lvb_len); - if (IS_ERR(req)) { - failed_lock_cleanup(ns, lock, einfo->ei_mode); - LDLM_LOCK_RELEASE(lock); - return PTR_ERR(req); - } - - req_passed_in = 0; - if (reqp) - *reqp = req; - } else { - int len; - - req = *reqp; - len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, - RCL_CLIENT); - LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n", - DLM_LOCKREQ_OFF, len, (int)sizeof(*body)); - } - - /* Dump lock data into the request buffer */ - body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - ldlm_lock2desc(lock, &body->lock_desc); - body->lock_flags = ldlm_flags_to_wire(*flags); - body->lock_handle[0] = *lockh; - - if (async) { - LASSERT(reqp); - return 0; - } - - LDLM_DEBUG(lock, "sending request"); - - rc = ptlrpc_queue_wait(req); - - err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0, - einfo->ei_mode, flags, lvb, lvb_len, - lockh, rc); - - /* If ldlm_cli_enqueue_fini did not find the lock, we need to free - * one reference that we took - */ - if (err == -ENOLCK) - LDLM_LOCK_RELEASE(lock); - else - rc = err; - - if (!req_passed_in && req) { - ptlrpc_req_finished(req); - if (reqp) - *reqp = NULL; - } - - return rc; -} -EXPORT_SYMBOL(ldlm_cli_enqueue); - -/** - * Cancel locks locally. - * Returns: - * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server - * \retval LDLM_FL_CANCELING otherwise; - * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC. - */ -static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock) -{ - __u64 rc = LDLM_FL_LOCAL_ONLY; - - if (lock->l_conn_export) { - bool local_only; - - LDLM_DEBUG(lock, "client-side cancel"); - /* Set this flag to prevent others from getting new references*/ - lock_res_and_lock(lock); - ldlm_set_cbpending(lock); - local_only = !!(lock->l_flags & - (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK)); - ldlm_cancel_callback(lock); - rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING; - unlock_res_and_lock(lock); - - if (local_only) { - CDEBUG(D_DLMTRACE, - "not sending request (at caller's instruction)\n"); - rc = LDLM_FL_LOCAL_ONLY; - } - ldlm_lock_cancel(lock); - } else { - LDLM_ERROR(lock, "Trying to cancel local lock"); - LBUG(); - } - - return rc; -} - -/** - * Pack \a count locks in \a head into ldlm_request buffer of request \a req. - */ -static void ldlm_cancel_pack(struct ptlrpc_request *req, - struct list_head *head, int count) -{ - struct ldlm_request *dlm; - struct ldlm_lock *lock; - int max, packed = 0; - - dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - LASSERT(dlm); - - /* Check the room in the request buffer. */ - max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - - sizeof(struct ldlm_request); - max /= sizeof(struct lustre_handle); - max += LDLM_LOCKREQ_HANDLES; - LASSERT(max >= dlm->lock_count + count); - - /* XXX: it would be better to pack lock handles grouped by resource. - * so that the server cancel would call filter_lvbo_update() less - * frequently. - */ - list_for_each_entry(lock, head, l_bl_ast) { - if (!count--) - break; - LASSERT(lock->l_conn_export); - /* Pack the lock handle to the given request buffer. */ - LDLM_DEBUG(lock, "packing"); - dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle; - packed++; - } - CDEBUG(D_DLMTRACE, "%d locks packed\n", packed); -} - -/** - * Prepare and send a batched cancel RPC. It will include \a count lock - * handles of locks given in \a cancels list. - */ -static int ldlm_cli_cancel_req(struct obd_export *exp, - struct list_head *cancels, - int count, enum ldlm_cancel_flags flags) -{ - struct ptlrpc_request *req = NULL; - struct obd_import *imp; - int free, sent = 0; - int rc = 0; - - LASSERT(exp); - LASSERT(count > 0); - - CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val); - - if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE)) - return count; - - free = ldlm_format_handles_avail(class_exp2cliimp(exp), - &RQF_LDLM_CANCEL, RCL_CLIENT, 0); - if (count > free) - count = free; - - while (1) { - imp = class_exp2cliimp(exp); - if (!imp || imp->imp_invalid) { - CDEBUG(D_DLMTRACE, - "skipping cancel on invalid import %p\n", imp); - return count; - } - - req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL); - if (!req) { - rc = -ENOMEM; - goto out; - } - - req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT); - req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT, - ldlm_request_bufsize(count, LDLM_CANCEL)); - - rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL); - if (rc) { - ptlrpc_request_free(req); - goto out; - } - - req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL; - req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL; - ptlrpc_at_set_req_timeout(req); - - ldlm_cancel_pack(req, cancels, count); - - ptlrpc_request_set_replen(req); - if (flags & LCF_ASYNC) { - ptlrpcd_add_req(req); - sent = count; - goto out; - } - - rc = ptlrpc_queue_wait(req); - if (rc == LUSTRE_ESTALE) { - CDEBUG(D_DLMTRACE, - "client/server (nid %s) out of sync -- not fatal\n", - libcfs_nid2str(req->rq_import-> - imp_connection->c_peer.nid)); - rc = 0; - } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/ - req->rq_import_generation == imp->imp_generation) { - ptlrpc_req_finished(req); - continue; - } else if (rc != ELDLM_OK) { - /* -ESHUTDOWN is common on umount */ - CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR, - "Got rc %d from cancel RPC: canceling anyway\n", - rc); - break; - } - sent = count; - break; - } - - ptlrpc_req_finished(req); -out: - return sent ? sent : rc; -} - -static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp) -{ - return &imp->imp_obd->obd_namespace->ns_pool; -} - -/** - * Update client's OBD pool related fields with new SLV and Limit from \a req. - */ -int ldlm_cli_update_pool(struct ptlrpc_request *req) -{ - struct obd_device *obd; - __u64 new_slv; - __u32 new_limit; - - if (unlikely(!req->rq_import || !req->rq_import->imp_obd || - !imp_connect_lru_resize(req->rq_import))) { - /* - * Do nothing for corner cases. - */ - return 0; - } - - /* In some cases RPC may contain SLV and limit zeroed out. This - * is the case when server does not support LRU resize feature. - * This is also possible in some recovery cases when server-side - * reqs have no reference to the OBD export and thus access to - * server-side namespace is not possible. - */ - if (lustre_msg_get_slv(req->rq_repmsg) == 0 || - lustre_msg_get_limit(req->rq_repmsg) == 0) { - DEBUG_REQ(D_HA, req, - "Zero SLV or Limit found (SLV: %llu, Limit: %u)", - lustre_msg_get_slv(req->rq_repmsg), - lustre_msg_get_limit(req->rq_repmsg)); - return 0; - } - - new_limit = lustre_msg_get_limit(req->rq_repmsg); - new_slv = lustre_msg_get_slv(req->rq_repmsg); - obd = req->rq_import->imp_obd; - - /* Set new SLV and limit in OBD fields to make them accessible - * to the pool thread. We do not access obd_namespace and pool - * directly here as there is no reliable way to make sure that - * they are still alive at cleanup time. Evil races are possible - * which may cause Oops at that time. - */ - write_lock(&obd->obd_pool_lock); - obd->obd_pool_slv = new_slv; - obd->obd_pool_limit = new_limit; - write_unlock(&obd->obd_pool_lock); - - return 0; -} - -/** - * Client side lock cancel. - * - * Lock must not have any readers or writers by this time. - */ -int ldlm_cli_cancel(const struct lustre_handle *lockh, - enum ldlm_cancel_flags cancel_flags) -{ - struct obd_export *exp; - int avail, flags, count = 1; - __u64 rc = 0; - struct ldlm_namespace *ns; - struct ldlm_lock *lock; - LIST_HEAD(cancels); - - lock = ldlm_handle2lock_long(lockh, 0); - if (!lock) { - LDLM_DEBUG_NOLOCK("lock is already being destroyed"); - return 0; - } - - lock_res_and_lock(lock); - /* Lock is being canceled and the caller doesn't want to wait */ - if (ldlm_is_canceling(lock) && (cancel_flags & LCF_ASYNC)) { - unlock_res_and_lock(lock); - LDLM_LOCK_RELEASE(lock); - return 0; - } - - ldlm_set_canceling(lock); - unlock_res_and_lock(lock); - - rc = ldlm_cli_cancel_local(lock); - if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) { - LDLM_LOCK_RELEASE(lock); - return 0; - } - /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL - * RPC which goes to canceld portal, so we can cancel other LRU locks - * here and send them all as one LDLM_CANCEL RPC. - */ - LASSERT(list_empty(&lock->l_bl_ast)); - list_add(&lock->l_bl_ast, &cancels); - - exp = lock->l_conn_export; - if (exp_connect_cancelset(exp)) { - avail = ldlm_format_handles_avail(class_exp2cliimp(exp), - &RQF_LDLM_CANCEL, - RCL_CLIENT, 0); - LASSERT(avail > 0); - - ns = ldlm_lock_to_ns(lock); - flags = ns_connect_lru_resize(ns) ? - LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED; - count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1, - LCF_BL_AST, flags); - } - ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags); - return 0; -} -EXPORT_SYMBOL(ldlm_cli_cancel); - -/** - * Locally cancel up to \a count locks in list \a cancels. - * Return the number of cancelled locks. - */ -int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - enum ldlm_cancel_flags flags) -{ - LIST_HEAD(head); - struct ldlm_lock *lock, *next; - int left = 0, bl_ast = 0; - __u64 rc; - - left = count; - list_for_each_entry_safe(lock, next, cancels, l_bl_ast) { - if (left-- == 0) - break; - - if (flags & LCF_LOCAL) { - rc = LDLM_FL_LOCAL_ONLY; - ldlm_lock_cancel(lock); - } else { - rc = ldlm_cli_cancel_local(lock); - } - /* Until we have compound requests and can send LDLM_CANCEL - * requests batched with generic RPCs, we need to send cancels - * with the LDLM_FL_BL_AST flag in a separate RPC from - * the one being generated now. - */ - if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) { - LDLM_DEBUG(lock, "Cancel lock separately"); - list_del_init(&lock->l_bl_ast); - list_add(&lock->l_bl_ast, &head); - bl_ast++; - continue; - } - if (rc == LDLM_FL_LOCAL_ONLY) { - /* CANCEL RPC should not be sent to server. */ - list_del_init(&lock->l_bl_ast); - LDLM_LOCK_RELEASE(lock); - count--; - } - } - if (bl_ast > 0) { - count -= bl_ast; - ldlm_cli_cancel_list(&head, bl_ast, NULL, 0); - } - - return count; -} - -/** - * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back - * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g. - * readahead requests, ...) - */ -static enum ldlm_policy_res -ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, - int unused, int added, int count) -{ - enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK; - - /* don't check added & count since we want to process all locks - * from unused list. - * It's fine to not take lock to access lock->l_resource since - * the lock has already been granted so it won't change. - */ - switch (lock->l_resource->lr_type) { - case LDLM_EXTENT: - case LDLM_IBITS: - if (ns->ns_cancel && ns->ns_cancel(lock) != 0) - break; - /* fall through */ - default: - result = LDLM_POLICY_SKIP_LOCK; - lock_res_and_lock(lock); - ldlm_set_skipped(lock); - unlock_res_and_lock(lock); - break; - } - - return result; -} - -/** - * Callback function for LRU-resize policy. Decides whether to keep - * \a lock in LRU for current \a LRU size \a unused, added in current - * scan \a added and number of locks to be preferably canceled \a count. - * - * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning - * - * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU - */ -static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, - int unused, int added, - int count) -{ - unsigned long cur = jiffies; - struct ldlm_pool *pl = &ns->ns_pool; - __u64 slv, lvf, lv; - unsigned long la; - - /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. - */ - if (count && added >= count) - return LDLM_POLICY_KEEP_LOCK; - - /* - * Despite of the LV, It doesn't make sense to keep the lock which - * is unused for ns_max_age time. - */ - if (time_after(jiffies, lock->l_last_used + ns->ns_max_age)) - return LDLM_POLICY_CANCEL_LOCK; - - slv = ldlm_pool_get_slv(pl); - lvf = ldlm_pool_get_lvf(pl); - la = (cur - lock->l_last_used) / HZ; - lv = lvf * la * unused; - - /* Inform pool about current CLV to see it via debugfs. */ - ldlm_pool_set_clv(pl, lv); - - /* Stop when SLV is not yet come from server or lv is smaller than - * it is. - */ - if (slv == 0 || lv < slv) - return LDLM_POLICY_KEEP_LOCK; - - return LDLM_POLICY_CANCEL_LOCK; -} - -/** - * Callback function for debugfs used policy. Makes decision whether to keep - * \a lock in LRU for current \a LRU size \a unused, added in current scan \a - * added and number of locks to be preferably canceled \a count. - * - * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning - * - * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU - */ -static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, - int unused, int added, - int count) -{ - /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. - */ - return (added >= count) ? - LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; -} - -/** - * Callback function for aged policy. Makes decision whether to keep \a lock in - * LRU for current LRU size \a unused, added in current scan \a added and - * number of locks to be preferably canceled \a count. - * - * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning - * - * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU - */ -static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, - int unused, int added, - int count) -{ - if ((added >= count) && - time_before(jiffies, lock->l_last_used + ns->ns_max_age)) - return LDLM_POLICY_KEEP_LOCK; - - return LDLM_POLICY_CANCEL_LOCK; -} - -static enum ldlm_policy_res -ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns, - struct ldlm_lock *lock, - int unused, int added, - int count) -{ - enum ldlm_policy_res result; - - result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count); - if (result == LDLM_POLICY_KEEP_LOCK) - return result; - - return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count); -} - -/** - * Callback function for default policy. Makes decision whether to keep \a lock - * in LRU for current LRU size \a unused, added in current scan \a added and - * number of locks to be preferably canceled \a count. - * - * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning - * - * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU - */ -static enum ldlm_policy_res -ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, - int unused, int added, int count) -{ - /* Stop LRU processing when we reach past count or have checked all - * locks in LRU. - */ - return (added >= count) ? - LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; -} - -typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)( - struct ldlm_namespace *, - struct ldlm_lock *, int, - int, int); - -static ldlm_cancel_lru_policy_t -ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags) -{ - if (flags & LDLM_LRU_FLAG_NO_WAIT) - return ldlm_cancel_no_wait_policy; - - if (ns_connect_lru_resize(ns)) { - if (flags & LDLM_LRU_FLAG_SHRINK) - /* We kill passed number of old locks. */ - return ldlm_cancel_passed_policy; - else if (flags & LDLM_LRU_FLAG_LRUR) - return ldlm_cancel_lrur_policy; - else if (flags & LDLM_LRU_FLAG_PASSED) - return ldlm_cancel_passed_policy; - else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT) - return ldlm_cancel_lrur_no_wait_policy; - } else { - if (flags & LDLM_LRU_FLAG_AGED) - return ldlm_cancel_aged_policy; - } - - return ldlm_cancel_default_policy; -} - -/** - * - Free space in LRU for \a count new locks, - * redundant unused locks are canceled locally; - * - also cancel locally unused aged locks; - * - do not cancel more than \a max locks; - * - GET the found locks and add them into the \a cancels list. - * - * A client lock can be added to the l_bl_ast list only when it is - * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing - * CANCEL. There are the following use cases: - * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and - * ldlm_cli_cancel(), which check and set this flag properly. As any - * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed - * later without any special locking. - * - * Calling policies for enabled LRU resize: - * ---------------------------------------- - * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to - * cancel not more than \a count locks; - * - * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located - * at the beginning of LRU list); - * - * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according - * to memory pressure policy function; - * - * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to - * "aged policy". - * - * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible - * (typically before replaying locks) w/o - * sending any RPCs or waiting for any - * outstanding RPC to complete. - */ -static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, - struct list_head *cancels, int count, int max, - int flags) -{ - ldlm_cancel_lru_policy_t pf; - struct ldlm_lock *lock, *next; - int added = 0, unused, remained; - int no_wait = flags & - (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT); - - spin_lock(&ns->ns_lock); - unused = ns->ns_nr_unused; - remained = unused; - - if (!ns_connect_lru_resize(ns)) - count += unused - ns->ns_max_unused; - - pf = ldlm_cancel_lru_policy(ns, flags); - LASSERT(pf); - - while (!list_empty(&ns->ns_unused_list)) { - enum ldlm_policy_res result; - time_t last_use = 0; - - /* all unused locks */ - if (remained-- <= 0) - break; - - /* For any flags, stop scanning if @max is reached. */ - if (max && added >= max) - break; - - list_for_each_entry_safe(lock, next, &ns->ns_unused_list, - l_lru) { - /* No locks which got blocking requests. */ - LASSERT(!ldlm_is_bl_ast(lock)); - - if (no_wait && ldlm_is_skipped(lock)) - /* already processed */ - continue; - - last_use = lock->l_last_used; - if (last_use == jiffies) - continue; - - /* Somebody is already doing CANCEL. No need for this - * lock in LRU, do not traverse it again. - */ - if (!ldlm_is_canceling(lock)) - break; - - ldlm_lock_remove_from_lru_nolock(lock); - } - if (&lock->l_lru == &ns->ns_unused_list) - break; - - LDLM_LOCK_GET(lock); - spin_unlock(&ns->ns_lock); - lu_ref_add(&lock->l_reference, __func__, current); - - /* Pass the lock through the policy filter and see if it - * should stay in LRU. - * - * Even for shrinker policy we stop scanning if - * we find a lock that should stay in the cache. - * We should take into account lock age anyway - * as a new lock is a valuable resource even if - * it has a low weight. - * - * That is, for shrinker policy we drop only - * old locks, but additionally choose them by - * their weight. Big extent locks will stay in - * the cache. - */ - result = pf(ns, lock, unused, added, count); - if (result == LDLM_POLICY_KEEP_LOCK) { - lu_ref_del(&lock->l_reference, - __func__, current); - LDLM_LOCK_RELEASE(lock); - spin_lock(&ns->ns_lock); - break; - } - if (result == LDLM_POLICY_SKIP_LOCK) { - lu_ref_del(&lock->l_reference, - __func__, current); - LDLM_LOCK_RELEASE(lock); - spin_lock(&ns->ns_lock); - continue; - } - - lock_res_and_lock(lock); - /* Check flags again under the lock. */ - if (ldlm_is_canceling(lock) || - (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) { - /* Another thread is removing lock from LRU, or - * somebody is already doing CANCEL, or there - * is a blocking request which will send cancel - * by itself, or the lock is no longer unused or - * the lock has been used since the pf() call and - * pages could be put under it. - */ - unlock_res_and_lock(lock); - lu_ref_del(&lock->l_reference, - __func__, current); - LDLM_LOCK_RELEASE(lock); - spin_lock(&ns->ns_lock); - continue; - } - LASSERT(!lock->l_readers && !lock->l_writers); - - /* If we have chosen to cancel this lock voluntarily, we - * better send cancel notification to server, so that it - * frees appropriate state. This might lead to a race - * where while we are doing cancel here, server is also - * silently cancelling this lock. - */ - ldlm_clear_cancel_on_block(lock); - - /* Setting the CBPENDING flag is a little misleading, - * but prevents an important race; namely, once - * CBPENDING is set, the lock can accumulate no more - * readers/writers. Since readers and writers are - * already zero here, ldlm_lock_decref() won't see - * this flag and call l_blocking_ast - */ - lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING; - - /* We can't re-add to l_lru as it confuses the - * refcounting in ldlm_lock_remove_from_lru() if an AST - * arrives after we drop lr_lock below. We use l_bl_ast - * and can't use l_pending_chain as it is used both on - * server and client nevertheless bug 5666 says it is - * used only on server - */ - LASSERT(list_empty(&lock->l_bl_ast)); - list_add(&lock->l_bl_ast, cancels); - unlock_res_and_lock(lock); - lu_ref_del(&lock->l_reference, __func__, current); - spin_lock(&ns->ns_lock); - added++; - unused--; - } - spin_unlock(&ns->ns_lock); - return added; -} - -int ldlm_cancel_lru_local(struct ldlm_namespace *ns, - struct list_head *cancels, int count, int max, - enum ldlm_cancel_flags cancel_flags, int flags) -{ - int added; - - added = ldlm_prepare_lru_list(ns, cancels, count, max, flags); - if (added <= 0) - return added; - return ldlm_cli_cancel_list_local(cancels, added, cancel_flags); -} - -/** - * Cancel at least \a nr locks from given namespace LRU. - * - * When called with LCF_ASYNC the blocking callback will be handled - * in a thread and this function will return after the thread has been - * asked to call the callback. When called with LCF_ASYNC the blocking - * callback will be performed in this function. - */ -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - enum ldlm_cancel_flags cancel_flags, - int flags) -{ - LIST_HEAD(cancels); - int count, rc; - - /* Just prepare the list of locks, do not actually cancel them yet. - * Locks are cancelled later in a separate thread. - */ - count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); - rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags); - if (rc == 0) - return count; - - return 0; -} - -/** - * Find and cancel locally unused locks found on resource, matched to the - * given policy, mode. GET the found locks and add them into the \a cancels - * list. - */ -int ldlm_cancel_resource_local(struct ldlm_resource *res, - struct list_head *cancels, - union ldlm_policy_data *policy, - enum ldlm_mode mode, __u64 lock_flags, - enum ldlm_cancel_flags cancel_flags, - void *opaque) -{ - struct ldlm_lock *lock; - int count = 0; - - lock_res(res); - list_for_each_entry(lock, &res->lr_granted, l_res_link) { - if (opaque && lock->l_ast_data != opaque) { - LDLM_ERROR(lock, "data %p doesn't match opaque %p", - lock->l_ast_data, opaque); - continue; - } - - if (lock->l_readers || lock->l_writers) - continue; - - /* If somebody is already doing CANCEL, or blocking AST came, - * skip this lock. - */ - if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock)) - continue; - - if (lockmode_compat(lock->l_granted_mode, mode)) - continue; - - /* If policy is given and this is IBITS lock, add to list only - * those locks that match by policy. - */ - if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && - !(lock->l_policy_data.l_inodebits.bits & - policy->l_inodebits.bits)) - continue; - - /* See CBPENDING comment in ldlm_cancel_lru */ - lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING | - lock_flags; - - LASSERT(list_empty(&lock->l_bl_ast)); - list_add(&lock->l_bl_ast, cancels); - LDLM_LOCK_GET(lock); - count++; - } - unlock_res(res); - - return ldlm_cli_cancel_list_local(cancels, count, cancel_flags); -} -EXPORT_SYMBOL(ldlm_cancel_resource_local); - -/** - * Cancel client-side locks from a list and send/prepare cancel RPCs to the - * server. - * If \a req is NULL, send CANCEL request to server with handles of locks - * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests - * separately per lock. - * If \a req is not NULL, put handles of locks in \a cancels into the request - * buffer at the offset \a off. - * Destroy \a cancels at the end. - */ -int ldlm_cli_cancel_list(struct list_head *cancels, int count, - struct ptlrpc_request *req, - enum ldlm_cancel_flags flags) -{ - struct ldlm_lock *lock; - int res = 0; - - if (list_empty(cancels) || count == 0) - return 0; - - /* XXX: requests (both batched and not) could be sent in parallel. - * Usually it is enough to have just 1 RPC, but it is possible that - * there are too many locks to be cancelled in LRU or on a resource. - * It would also speed up the case when the server does not support - * the feature. - */ - while (count > 0) { - LASSERT(!list_empty(cancels)); - lock = list_first_entry(cancels, struct ldlm_lock, l_bl_ast); - LASSERT(lock->l_conn_export); - - if (exp_connect_cancelset(lock->l_conn_export)) { - res = count; - if (req) - ldlm_cancel_pack(req, cancels, count); - else - res = ldlm_cli_cancel_req(lock->l_conn_export, - cancels, count, - flags); - } else { - res = ldlm_cli_cancel_req(lock->l_conn_export, - cancels, 1, flags); - } - - if (res < 0) { - CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR, - "%s: %d\n", __func__, res); - res = count; - } - - count -= res; - ldlm_lock_list_put(cancels, l_bl_ast, res); - } - LASSERT(count == 0); - return 0; -} -EXPORT_SYMBOL(ldlm_cli_cancel_list); - -/** - * Cancel all locks on a resource that have 0 readers/writers. - * - * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying - * to notify the server. - */ -int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, - const struct ldlm_res_id *res_id, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - enum ldlm_cancel_flags flags, - void *opaque) -{ - struct ldlm_resource *res; - LIST_HEAD(cancels); - int count; - int rc; - - res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (IS_ERR(res)) { - /* This is not a problem. */ - CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]); - return 0; - } - - LDLM_RESOURCE_ADDREF(res); - count = ldlm_cancel_resource_local(res, &cancels, policy, mode, - 0, flags | LCF_BL_AST, opaque); - rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags); - if (rc != ELDLM_OK) - CERROR("canceling unused lock " DLDLMRES ": rc = %d\n", - PLDLMRES(res), rc); - - LDLM_RESOURCE_DELREF(res); - ldlm_resource_putref(res); - return 0; -} -EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource); - -struct ldlm_cli_cancel_arg { - int lc_flags; - void *lc_opaque; -}; - -static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, - struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *arg) -{ - struct ldlm_resource *res = cfs_hash_object(hs, hnode); - struct ldlm_cli_cancel_arg *lc = arg; - - ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name, - NULL, LCK_MINMODE, - lc->lc_flags, lc->lc_opaque); - /* must return 0 for hash iteration */ - return 0; -} - -/** - * Cancel all locks on a namespace (or a specific resource, if given) - * that have 0 readers/writers. - * - * If flags & LCF_LOCAL, throw the locks away without trying - * to notify the server. - */ -int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, - const struct ldlm_res_id *res_id, - enum ldlm_cancel_flags flags, void *opaque) -{ - struct ldlm_cli_cancel_arg arg = { - .lc_flags = flags, - .lc_opaque = opaque, - }; - - if (!ns) - return ELDLM_OK; - - if (res_id) { - return ldlm_cli_cancel_unused_resource(ns, res_id, NULL, - LCK_MINMODE, flags, - opaque); - } else { - cfs_hash_for_each_nolock(ns->ns_rs_hash, - ldlm_cli_hash_cancel_unused, &arg, 0); - return ELDLM_OK; - } -} -EXPORT_SYMBOL(ldlm_cli_cancel_unused); - -/* Lock iterators. */ - -static int ldlm_resource_foreach(struct ldlm_resource *res, - ldlm_iterator_t iter, void *closure) -{ - struct ldlm_lock *tmp; - struct ldlm_lock *lock; - int rc = LDLM_ITER_CONTINUE; - - if (!res) - return LDLM_ITER_CONTINUE; - - lock_res(res); - list_for_each_entry_safe(lock, tmp, &res->lr_granted, l_res_link) { - if (iter(lock, closure) == LDLM_ITER_STOP) { - rc = LDLM_ITER_STOP; - goto out; - } - } - - list_for_each_entry_safe(lock, tmp, &res->lr_waiting, l_res_link) { - if (iter(lock, closure) == LDLM_ITER_STOP) { - rc = LDLM_ITER_STOP; - goto out; - } - } - out: - unlock_res(res); - return rc; -} - -struct iter_helper_data { - ldlm_iterator_t iter; - void *closure; -}; - -static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure) -{ - struct iter_helper_data *helper = closure; - - return helper->iter(lock, helper->closure); -} - -static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *arg) - -{ - struct ldlm_resource *res = cfs_hash_object(hs, hnode); - - return ldlm_resource_foreach(res, ldlm_iter_helper, arg) == - LDLM_ITER_STOP; -} - -static void ldlm_namespace_foreach(struct ldlm_namespace *ns, - ldlm_iterator_t iter, void *closure) - -{ - struct iter_helper_data helper = { - .iter = iter, - .closure = closure, - }; - - cfs_hash_for_each_nolock(ns->ns_rs_hash, - ldlm_res_iter_helper, &helper, 0); -} - -/* non-blocking function to manipulate a lock whose cb_data is being put away. - * return 0: find no resource - * > 0: must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE. - * < 0: errors - */ -int ldlm_resource_iterate(struct ldlm_namespace *ns, - const struct ldlm_res_id *res_id, - ldlm_iterator_t iter, void *data) -{ - struct ldlm_resource *res; - int rc; - - LASSERTF(ns, "must pass in namespace\n"); - - res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (IS_ERR(res)) - return 0; - - LDLM_RESOURCE_ADDREF(res); - rc = ldlm_resource_foreach(res, iter, data); - LDLM_RESOURCE_DELREF(res); - ldlm_resource_putref(res); - return rc; -} -EXPORT_SYMBOL(ldlm_resource_iterate); - -/* Lock replay */ - -static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure) -{ - struct list_head *list = closure; - - /* we use l_pending_chain here, because it's unused on clients. */ - LASSERTF(list_empty(&lock->l_pending_chain), - "lock %p next %p prev %p\n", - lock, &lock->l_pending_chain.next, - &lock->l_pending_chain.prev); - /* bug 9573: don't replay locks left after eviction, or - * bug 17614: locks being actively cancelled. Get a reference - * on a lock so that it does not disappear under us (e.g. due to cancel) - */ - if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) { - list_add(&lock->l_pending_chain, list); - LDLM_LOCK_GET(lock); - } - - return LDLM_ITER_CONTINUE; -} - -static int replay_lock_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - struct ldlm_async_args *aa, int rc) -{ - struct ldlm_lock *lock; - struct ldlm_reply *reply; - struct obd_export *exp; - - atomic_dec(&req->rq_import->imp_replay_inflight); - if (rc != ELDLM_OK) - goto out; - - reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (!reply) { - rc = -EPROTO; - goto out; - } - - lock = ldlm_handle2lock(&aa->lock_handle); - if (!lock) { - CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n", - aa->lock_handle.cookie, reply->lock_handle.cookie, - req->rq_export->exp_client_uuid.uuid, - libcfs_id2str(req->rq_peer)); - rc = -ESTALE; - goto out; - } - - /* Key change rehash lock in per-export hash with new key */ - exp = req->rq_export; - lock->l_remote_handle = reply->lock_handle; - - LDLM_DEBUG(lock, "replayed lock:"); - ptlrpc_import_recovery_state_machine(req->rq_import); - LDLM_LOCK_PUT(lock); -out: - if (rc != ELDLM_OK) - ptlrpc_connect_import(req->rq_import); - - return rc; -} - -static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) -{ - struct ptlrpc_request *req; - struct ldlm_async_args *aa; - struct ldlm_request *body; - int flags; - - /* Bug 11974: Do not replay a lock which is actively being canceled */ - if (ldlm_is_bl_done(lock)) { - LDLM_DEBUG(lock, "Not replaying canceled lock:"); - return 0; - } - - /* If this is reply-less callback lock, we cannot replay it, since - * server might have long dropped it, but notification of that event was - * lost by network. (and server granted conflicting lock already) - */ - if (ldlm_is_cancel_on_block(lock)) { - LDLM_DEBUG(lock, "Not replaying reply-less lock:"); - ldlm_lock_cancel(lock); - return 0; - } - - /* - * If granted mode matches the requested mode, this lock is granted. - * - * If they differ, but we have a granted mode, then we were granted - * one mode and now want another: ergo, converting. - * - * If we haven't been granted anything and are on a resource list, - * then we're blocked/waiting. - * - * If we haven't been granted anything and we're NOT on a resource list, - * then we haven't got a reply yet and don't have a known disposition. - * This happens whenever a lock enqueue is the request that triggers - * recovery. - */ - if (lock->l_granted_mode == lock->l_req_mode) - flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED; - else if (lock->l_granted_mode) - flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV; - else if (!list_empty(&lock->l_res_link)) - flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT; - else - flags = LDLM_FL_REPLAY; - - req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE, - LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (!req) - return -ENOMEM; - - /* We're part of recovery, so don't wait for it. */ - req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS; - - body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - ldlm_lock2desc(lock, &body->lock_desc); - body->lock_flags = ldlm_flags_to_wire(flags); - - ldlm_lock2handle(lock, &body->lock_handle[0]); - if (lock->l_lvb_len > 0) - req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB); - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, - lock->l_lvb_len); - ptlrpc_request_set_replen(req); - /* notify the server we've replayed all requests. - * also, we mark the request to be put on a dedicated - * queue to be processed after all request replayes. - * bug 6063 - */ - lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE); - - LDLM_DEBUG(lock, "replaying lock:"); - - atomic_inc(&req->rq_import->imp_replay_inflight); - BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->lock_handle = body->lock_handle[0]; - req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret; - ptlrpcd_add_req(req); - - return 0; -} - -/** - * Cancel as many unused locks as possible before replay. since we are - * in recovery, we can't wait for any outstanding RPCs to send any RPC - * to the server. - * - * Called only in recovery before replaying locks. there is no need to - * replay locks that are unused. since the clients may hold thousands of - * cached unused locks, dropping the unused locks can greatly reduce the - * load on the servers at recovery time. - */ -static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns) -{ - int canceled; - LIST_HEAD(cancels); - - CDEBUG(D_DLMTRACE, - "Dropping as many unused locks as possible before replay for namespace %s (%d)\n", - ldlm_ns_name(ns), ns->ns_nr_unused); - - /* We don't need to care whether or not LRU resize is enabled - * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the - * count parameter - */ - canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0, - LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT); - - CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n", - canceled, ldlm_ns_name(ns)); -} - -int ldlm_replay_locks(struct obd_import *imp) -{ - struct ldlm_namespace *ns = imp->imp_obd->obd_namespace; - LIST_HEAD(list); - struct ldlm_lock *lock, *next; - int rc = 0; - - LASSERT(atomic_read(&imp->imp_replay_inflight) == 0); - - /* don't replay locks if import failed recovery */ - if (imp->imp_vbr_failed) - return 0; - - /* ensure this doesn't fall to 0 before all have been queued */ - atomic_inc(&imp->imp_replay_inflight); - - if (ldlm_cancel_unused_locks_before_replay) - ldlm_cancel_unused_locks_for_replay(ns); - - ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list); - - list_for_each_entry_safe(lock, next, &list, l_pending_chain) { - list_del_init(&lock->l_pending_chain); - if (rc) { - LDLM_LOCK_RELEASE(lock); - continue; /* or try to do the rest? */ - } - rc = replay_one_lock(imp, lock); - LDLM_LOCK_RELEASE(lock); - } - - atomic_dec(&imp->imp_replay_inflight); - - return rc; -} diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c deleted file mode 100644 index c93b019b8e37..000000000000 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ /dev/null @@ -1,1318 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ldlm/ldlm_resource.c - * - * Author: Phil Schwan - * Author: Peter Braam - */ - -#define DEBUG_SUBSYSTEM S_LDLM -#include -#include -#include -#include "ldlm_internal.h" -#include - -struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab; - -int ldlm_srv_namespace_nr; -int ldlm_cli_namespace_nr; - -struct mutex ldlm_srv_namespace_lock; -LIST_HEAD(ldlm_srv_namespace_list); - -struct mutex ldlm_cli_namespace_lock; -/* Client Namespaces that have active resources in them. - * Once all resources go away, ldlm_poold moves such namespaces to the - * inactive list - */ -LIST_HEAD(ldlm_cli_active_namespace_list); -/* Client namespaces that don't have any locks in them */ -static LIST_HEAD(ldlm_cli_inactive_namespace_list); - -static struct dentry *ldlm_debugfs_dir; -static struct dentry *ldlm_ns_debugfs_dir; -struct dentry *ldlm_svc_debugfs_dir; - -/* during debug dump certain amount of granted locks for one resource to avoid - * DDOS. - */ -static unsigned int ldlm_dump_granted_max = 256; - -static ssize_t -lprocfs_wr_dump_ns(struct file *file, const char __user *buffer, - size_t count, loff_t *off) -{ - ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE); - ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE); - return count; -} - -LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns); - -static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%u\n", *(unsigned int *)m->private); - return 0; -} - -static ssize_t -ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer, - size_t count, loff_t *off) -{ - struct seq_file *seq = file->private_data; - - if (count == 0) - return 0; - return kstrtouint_from_user(buffer, count, 0, - (unsigned int *)seq->private); -} - -LPROC_SEQ_FOPS(ldlm_rw_uint); - -static struct lprocfs_vars ldlm_debugfs_list[] = { - { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 }, - { "dump_granted_max", &ldlm_rw_uint_fops, &ldlm_dump_granted_max }, - { NULL } -}; - -void ldlm_debugfs_setup(void) -{ - ldlm_debugfs_dir = debugfs_create_dir(OBD_LDLM_DEVICENAME, - debugfs_lustre_root); - - ldlm_ns_debugfs_dir = debugfs_create_dir("namespaces", - ldlm_debugfs_dir); - - ldlm_svc_debugfs_dir = debugfs_create_dir("services", ldlm_debugfs_dir); - - ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL); -} - -void ldlm_debugfs_cleanup(void) -{ - debugfs_remove_recursive(ldlm_svc_debugfs_dir); - debugfs_remove_recursive(ldlm_ns_debugfs_dir); - debugfs_remove_recursive(ldlm_debugfs_dir); -} - -static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - __u64 res = 0; - struct cfs_hash_bd bd; - int i; - - /* result is not strictly consistent */ - cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i) - res += cfs_hash_bd_count_get(&bd); - return sprintf(buf, "%lld\n", res); -} -LUSTRE_RO_ATTR(resource_count); - -static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - __u64 locks; - - locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS, - LPROCFS_FIELDS_FLAGS_SUM); - return sprintf(buf, "%lld\n", locks); -} -LUSTRE_RO_ATTR(lock_count); - -static ssize_t lock_unused_count_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - - return sprintf(buf, "%d\n", ns->ns_nr_unused); -} -LUSTRE_RO_ATTR(lock_unused_count); - -static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - __u32 *nr = &ns->ns_max_unused; - - if (ns_connect_lru_resize(ns)) - nr = &ns->ns_nr_unused; - return sprintf(buf, "%u\n", *nr); -} - -static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - unsigned long tmp; - int lru_resize; - int err; - - if (strncmp(buffer, "clear", 5) == 0) { - CDEBUG(D_DLMTRACE, - "dropping all unused locks from namespace %s\n", - ldlm_ns_name(ns)); - if (ns_connect_lru_resize(ns)) { - int canceled, unused = ns->ns_nr_unused; - - /* Try to cancel all @ns_nr_unused locks. */ - canceled = ldlm_cancel_lru(ns, unused, 0, - LDLM_LRU_FLAG_PASSED); - if (canceled < unused) { - CDEBUG(D_DLMTRACE, - "not all requested locks are canceled, requested: %d, canceled: %d\n", - unused, - canceled); - return -EINVAL; - } - } else { - tmp = ns->ns_max_unused; - ns->ns_max_unused = 0; - ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED); - ns->ns_max_unused = tmp; - } - return count; - } - - err = kstrtoul(buffer, 10, &tmp); - if (err != 0) { - CERROR("lru_size: invalid value written\n"); - return -EINVAL; - } - lru_resize = (tmp == 0); - - if (ns_connect_lru_resize(ns)) { - if (!lru_resize) - ns->ns_max_unused = (unsigned int)tmp; - - if (tmp > ns->ns_nr_unused) - tmp = ns->ns_nr_unused; - tmp = ns->ns_nr_unused - tmp; - - CDEBUG(D_DLMTRACE, - "changing namespace %s unused locks from %u to %u\n", - ldlm_ns_name(ns), ns->ns_nr_unused, - (unsigned int)tmp); - ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED); - - if (!lru_resize) { - CDEBUG(D_DLMTRACE, - "disable lru_resize for namespace %s\n", - ldlm_ns_name(ns)); - ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE; - } - } else { - CDEBUG(D_DLMTRACE, - "changing namespace %s max_unused from %u to %u\n", - ldlm_ns_name(ns), ns->ns_max_unused, - (unsigned int)tmp); - ns->ns_max_unused = (unsigned int)tmp; - ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED); - - /* Make sure that LRU resize was originally supported before - * turning it on here. - */ - if (lru_resize && - (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) { - CDEBUG(D_DLMTRACE, - "enable lru_resize for namespace %s\n", - ldlm_ns_name(ns)); - ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE; - } - } - - return count; -} -LUSTRE_RW_ATTR(lru_size); - -static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - - return sprintf(buf, "%u\n", ns->ns_max_age); -} - -static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - unsigned long tmp; - int err; - - err = kstrtoul(buffer, 10, &tmp); - if (err != 0) - return -EINVAL; - - ns->ns_max_age = tmp; - - return count; -} -LUSTRE_RW_ATTR(lru_max_age); - -static ssize_t early_lock_cancel_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - - return sprintf(buf, "%d\n", ns_connect_cancelset(ns)); -} - -static ssize_t early_lock_cancel_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - unsigned long supp = -1; - int rc; - - rc = kstrtoul(buffer, 10, &supp); - if (rc < 0) - return rc; - - if (supp == 0) - ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET; - else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET) - ns->ns_connect_flags |= OBD_CONNECT_CANCELSET; - return count; -} -LUSTRE_RW_ATTR(early_lock_cancel); - -/* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */ -static struct attribute *ldlm_ns_attrs[] = { - &lustre_attr_resource_count.attr, - &lustre_attr_lock_count.attr, - &lustre_attr_lock_unused_count.attr, - &lustre_attr_lru_size.attr, - &lustre_attr_lru_max_age.attr, - &lustre_attr_early_lock_cancel.attr, - NULL, -}; - -static void ldlm_ns_release(struct kobject *kobj) -{ - struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, - ns_kobj); - complete(&ns->ns_kobj_unregister); -} - -static struct kobj_type ldlm_ns_ktype = { - .default_attrs = ldlm_ns_attrs, - .sysfs_ops = &lustre_sysfs_ops, - .release = ldlm_ns_release, -}; - -static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns) -{ - debugfs_remove_recursive(ns->ns_debugfs_entry); - - if (ns->ns_stats) - lprocfs_free_stats(&ns->ns_stats); -} - -static void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns) -{ - kobject_put(&ns->ns_kobj); - wait_for_completion(&ns->ns_kobj_unregister); -} - -static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns) -{ - int err; - - ns->ns_kobj.kset = ldlm_ns_kset; - init_completion(&ns->ns_kobj_unregister); - err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL, - "%s", ldlm_ns_name(ns)); - - ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0); - if (!ns->ns_stats) { - kobject_put(&ns->ns_kobj); - return -ENOMEM; - } - - lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS, - LPROCFS_CNTR_AVGMINMAX, "locks", "locks"); - - return err; -} - -static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns) -{ - struct dentry *ns_entry; - - if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) { - ns_entry = ns->ns_debugfs_entry; - } else { - ns_entry = debugfs_create_dir(ldlm_ns_name(ns), - ldlm_ns_debugfs_dir); - if (!ns_entry) - return -ENOMEM; - ns->ns_debugfs_entry = ns_entry; - } - - return 0; -} - -#undef MAX_STRING_SIZE - -static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res) -{ - LASSERT(res); - LASSERT(res != LP_POISON); - atomic_inc(&res->lr_refcount); - CDEBUG(D_INFO, "getref res: %p count: %d\n", res, - atomic_read(&res->lr_refcount)); - return res; -} - -static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs, - const void *key, unsigned int mask) -{ - const struct ldlm_res_id *id = key; - unsigned int val = 0; - unsigned int i; - - for (i = 0; i < RES_NAME_SIZE; i++) - val += id->name[i]; - return val & mask; -} - -static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs, - const void *key, unsigned int mask) -{ - const struct ldlm_res_id *id = key; - struct lu_fid fid; - __u32 hash; - __u32 val; - - fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF]; - fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF]; - fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32); - - hash = fid_flatten32(&fid); - hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ - if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) { - val = id->name[LUSTRE_RES_ID_HSH_OFF]; - hash += (val >> 5) + (val << 11); - } else { - val = fid_oid(&fid); - } - hash = hash_long(hash, hs->hs_bkt_bits); - /* give me another random factor */ - hash -= hash_long((unsigned long)hs, val % 11 + 3); - - hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; - hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1); - - return hash & mask; -} - -static void *ldlm_res_hop_key(struct hlist_node *hnode) -{ - struct ldlm_resource *res; - - res = hlist_entry(hnode, struct ldlm_resource, lr_hash); - return &res->lr_name; -} - -static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode) -{ - struct ldlm_resource *res; - - res = hlist_entry(hnode, struct ldlm_resource, lr_hash); - return ldlm_res_eq((const struct ldlm_res_id *)key, - (const struct ldlm_res_id *)&res->lr_name); -} - -static void *ldlm_res_hop_object(struct hlist_node *hnode) -{ - return hlist_entry(hnode, struct ldlm_resource, lr_hash); -} - -static void ldlm_res_hop_get_locked(struct cfs_hash *hs, - struct hlist_node *hnode) -{ - struct ldlm_resource *res; - - res = hlist_entry(hnode, struct ldlm_resource, lr_hash); - ldlm_resource_getref(res); -} - -static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode) -{ - struct ldlm_resource *res; - - res = hlist_entry(hnode, struct ldlm_resource, lr_hash); - ldlm_resource_putref(res); -} - -static struct cfs_hash_ops ldlm_ns_hash_ops = { - .hs_hash = ldlm_res_hop_hash, - .hs_key = ldlm_res_hop_key, - .hs_keycmp = ldlm_res_hop_keycmp, - .hs_keycpy = NULL, - .hs_object = ldlm_res_hop_object, - .hs_get = ldlm_res_hop_get_locked, - .hs_put = ldlm_res_hop_put -}; - -static struct cfs_hash_ops ldlm_ns_fid_hash_ops = { - .hs_hash = ldlm_res_hop_fid_hash, - .hs_key = ldlm_res_hop_key, - .hs_keycmp = ldlm_res_hop_keycmp, - .hs_keycpy = NULL, - .hs_object = ldlm_res_hop_object, - .hs_get = ldlm_res_hop_get_locked, - .hs_put = ldlm_res_hop_put -}; - -struct ldlm_ns_hash_def { - enum ldlm_ns_type nsd_type; - /** hash bucket bits */ - unsigned int nsd_bkt_bits; - /** hash bits */ - unsigned int nsd_all_bits; - /** hash operations */ - struct cfs_hash_ops *nsd_hops; -}; - -static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = { - { - .nsd_type = LDLM_NS_TYPE_MDC, - .nsd_bkt_bits = 11, - .nsd_all_bits = 16, - .nsd_hops = &ldlm_ns_fid_hash_ops, - }, - { - .nsd_type = LDLM_NS_TYPE_MDT, - .nsd_bkt_bits = 14, - .nsd_all_bits = 21, - .nsd_hops = &ldlm_ns_fid_hash_ops, - }, - { - .nsd_type = LDLM_NS_TYPE_OSC, - .nsd_bkt_bits = 8, - .nsd_all_bits = 12, - .nsd_hops = &ldlm_ns_hash_ops, - }, - { - .nsd_type = LDLM_NS_TYPE_OST, - .nsd_bkt_bits = 11, - .nsd_all_bits = 17, - .nsd_hops = &ldlm_ns_hash_ops, - }, - { - .nsd_type = LDLM_NS_TYPE_MGC, - .nsd_bkt_bits = 4, - .nsd_all_bits = 4, - .nsd_hops = &ldlm_ns_hash_ops, - }, - { - .nsd_type = LDLM_NS_TYPE_MGT, - .nsd_bkt_bits = 4, - .nsd_all_bits = 4, - .nsd_hops = &ldlm_ns_hash_ops, - }, - { - .nsd_type = LDLM_NS_TYPE_UNKNOWN, - }, -}; - -/** Register \a ns in the list of namespaces */ -static void ldlm_namespace_register(struct ldlm_namespace *ns, - enum ldlm_side client) -{ - mutex_lock(ldlm_namespace_lock(client)); - LASSERT(list_empty(&ns->ns_list_chain)); - list_add(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list); - ldlm_namespace_nr_inc(client); - mutex_unlock(ldlm_namespace_lock(client)); -} - -/** - * Create and initialize new empty namespace. - */ -struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, - enum ldlm_side client, - enum ldlm_appetite apt, - enum ldlm_ns_type ns_type) -{ - struct ldlm_namespace *ns = NULL; - struct ldlm_ns_bucket *nsb; - struct ldlm_ns_hash_def *nsd; - struct cfs_hash_bd bd; - int idx; - int rc; - - LASSERT(obd); - - rc = ldlm_get_ref(); - if (rc) { - CERROR("ldlm_get_ref failed: %d\n", rc); - return NULL; - } - - for (idx = 0;; idx++) { - nsd = &ldlm_ns_hash_defs[idx]; - if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) { - CERROR("Unknown type %d for ns %s\n", ns_type, name); - goto out_ref; - } - - if (nsd->nsd_type == ns_type) - break; - } - - ns = kzalloc(sizeof(*ns), GFP_NOFS); - if (!ns) - goto out_ref; - - ns->ns_rs_hash = cfs_hash_create(name, - nsd->nsd_all_bits, nsd->nsd_all_bits, - nsd->nsd_bkt_bits, sizeof(*nsb), - CFS_HASH_MIN_THETA, - CFS_HASH_MAX_THETA, - nsd->nsd_hops, - CFS_HASH_DEPTH | - CFS_HASH_BIGNAME | - CFS_HASH_SPIN_BKTLOCK | - CFS_HASH_NO_ITEMREF); - if (!ns->ns_rs_hash) - goto out_ns; - - cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) { - nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); - at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0); - nsb->nsb_namespace = ns; - } - - ns->ns_obd = obd; - ns->ns_appetite = apt; - ns->ns_client = client; - ns->ns_name = kstrdup(name, GFP_KERNEL); - if (!ns->ns_name) - goto out_hash; - - INIT_LIST_HEAD(&ns->ns_list_chain); - INIT_LIST_HEAD(&ns->ns_unused_list); - spin_lock_init(&ns->ns_lock); - atomic_set(&ns->ns_bref, 0); - init_waitqueue_head(&ns->ns_waitq); - - ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT; - ns->ns_nr_unused = 0; - ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE; - ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE; - ns->ns_orig_connect_flags = 0; - ns->ns_connect_flags = 0; - ns->ns_stopping = 0; - - rc = ldlm_namespace_sysfs_register(ns); - if (rc != 0) { - CERROR("Can't initialize ns sysfs, rc %d\n", rc); - goto out_hash; - } - - rc = ldlm_namespace_debugfs_register(ns); - if (rc != 0) { - CERROR("Can't initialize ns proc, rc %d\n", rc); - goto out_sysfs; - } - - idx = ldlm_namespace_nr_read(client); - rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client); - if (rc) { - CERROR("Can't initialize lock pool, rc %d\n", rc); - goto out_proc; - } - - ldlm_namespace_register(ns, client); - return ns; -out_proc: - ldlm_namespace_debugfs_unregister(ns); -out_sysfs: - ldlm_namespace_sysfs_unregister(ns); - ldlm_namespace_cleanup(ns, 0); -out_hash: - kfree(ns->ns_name); - cfs_hash_putref(ns->ns_rs_hash); -out_ns: - kfree(ns); -out_ref: - ldlm_put_ref(); - return NULL; -} -EXPORT_SYMBOL(ldlm_namespace_new); - -extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); - -/** - * Cancel and destroy all locks on a resource. - * - * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just - * clean up. This is currently only used for recovery, and we make - * certain assumptions as a result--notably, that we shouldn't cancel - * locks with refs. - */ -static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, - __u64 flags) -{ - int rc = 0; - bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY); - - do { - struct ldlm_lock *lock = NULL, *tmp; - struct lustre_handle lockh; - - /* First, we look for non-cleaned-yet lock - * all cleaned locks are marked by CLEANED flag. - */ - lock_res(res); - list_for_each_entry(tmp, q, l_res_link) { - if (ldlm_is_cleaned(tmp)) - continue; - - lock = tmp; - LDLM_LOCK_GET(lock); - ldlm_set_cleaned(lock); - break; - } - - if (!lock) { - unlock_res(res); - break; - } - - /* Set CBPENDING so nothing in the cancellation path - * can match this lock. - */ - ldlm_set_cbpending(lock); - ldlm_set_failed(lock); - lock->l_flags |= flags; - - /* ... without sending a CANCEL message for local_only. */ - if (local_only) - ldlm_set_local_only(lock); - - if (local_only && (lock->l_readers || lock->l_writers)) { - /* This is a little bit gross, but much better than the - * alternative: pretend that we got a blocking AST from - * the server, so that when the lock is decref'd, it - * will go away ... - */ - unlock_res(res); - LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); - if (lock->l_flags & LDLM_FL_FAIL_LOC) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(4 * HZ); - set_current_state(TASK_RUNNING); - } - if (lock->l_completion_ast) - lock->l_completion_ast(lock, LDLM_FL_FAILED, - NULL); - LDLM_LOCK_RELEASE(lock); - continue; - } - - unlock_res(res); - ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh, LCF_LOCAL); - if (rc) - CERROR("ldlm_cli_cancel: %d\n", rc); - LDLM_LOCK_RELEASE(lock); - } while (1); -} - -static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *arg) -{ - struct ldlm_resource *res = cfs_hash_object(hs, hnode); - __u64 flags = *(__u64 *)arg; - - cleanup_resource(res, &res->lr_granted, flags); - cleanup_resource(res, &res->lr_waiting, flags); - - return 0; -} - -static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *arg) -{ - struct ldlm_resource *res = cfs_hash_object(hs, hnode); - - lock_res(res); - CERROR("%s: namespace resource " DLDLMRES - " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n", - ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res, - atomic_read(&res->lr_refcount) - 1); - - ldlm_resource_dump(D_ERROR, res); - unlock_res(res); - return 0; -} - -/** - * Cancel and destroy all locks in the namespace. - * - * Typically used during evictions when server notified client that it was - * evicted and all of its state needs to be destroyed. - * Also used during shutdown. - */ -int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) -{ - if (!ns) { - CDEBUG(D_INFO, "NULL ns, skipping cleanup\n"); - return ELDLM_OK; - } - - cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, - &flags, 0); - cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, - NULL, 0); - return ELDLM_OK; -} -EXPORT_SYMBOL(ldlm_namespace_cleanup); - -/** - * Attempts to free namespace. - * - * Only used when namespace goes away, like during an unmount. - */ -static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force) -{ - /* At shutdown time, don't call the cancellation callback */ - ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0); - - if (atomic_read(&ns->ns_bref) > 0) { - int rc; - - CDEBUG(D_DLMTRACE, - "dlm namespace %s free waiting on refcount %d\n", - ldlm_ns_name(ns), atomic_read(&ns->ns_bref)); -force_wait: - if (force) - rc = wait_event_idle_timeout(ns->ns_waitq, - atomic_read(&ns->ns_bref) == 0, - obd_timeout * HZ / 4) ? 0 : -ETIMEDOUT; - else - rc = l_wait_event_abortable(ns->ns_waitq, - atomic_read(&ns->ns_bref) == 0); - - /* Forced cleanups should be able to reclaim all references, - * so it's safe to wait forever... we can't leak locks... - */ - if (force && rc == -ETIMEDOUT) { - LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n", - ldlm_ns_name(ns), - atomic_read(&ns->ns_bref), rc); - goto force_wait; - } - - if (atomic_read(&ns->ns_bref)) { - LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n", - ldlm_ns_name(ns), - atomic_read(&ns->ns_bref), rc); - return ELDLM_NAMESPACE_EXISTS; - } - CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n", - ldlm_ns_name(ns)); - } - - return ELDLM_OK; -} - -/** - * Performs various cleanups for passed \a ns to make it drop refc and be - * ready for freeing. Waits for refc == 0. - * - * The following is done: - * (0) Unregister \a ns from its list to make inaccessible for potential - * users like pools thread and others; - * (1) Clear all locks in \a ns. - */ -void ldlm_namespace_free_prior(struct ldlm_namespace *ns, - struct obd_import *imp, - int force) -{ - int rc; - - if (!ns) - return; - - spin_lock(&ns->ns_lock); - ns->ns_stopping = 1; - spin_unlock(&ns->ns_lock); - - /* - * Can fail with -EINTR when force == 0 in which case try harder. - */ - rc = __ldlm_namespace_free(ns, force); - if (rc != ELDLM_OK) { - if (imp) { - ptlrpc_disconnect_import(imp, 0); - ptlrpc_invalidate_import(imp); - } - - /* - * With all requests dropped and the import inactive - * we are guaranteed all reference will be dropped. - */ - rc = __ldlm_namespace_free(ns, 1); - LASSERT(rc == 0); - } -} - -/** Unregister \a ns from the list of namespaces. */ -static void ldlm_namespace_unregister(struct ldlm_namespace *ns, - enum ldlm_side client) -{ - mutex_lock(ldlm_namespace_lock(client)); - LASSERT(!list_empty(&ns->ns_list_chain)); - /* Some asserts and possibly other parts of the code are still - * using list_empty(&ns->ns_list_chain). This is why it is - * important to use list_del_init() here. - */ - list_del_init(&ns->ns_list_chain); - ldlm_namespace_nr_dec(client); - mutex_unlock(ldlm_namespace_lock(client)); -} - -/** - * Performs freeing memory structures related to \a ns. This is only done - * when ldlm_namespce_free_prior() successfully removed all resources - * referencing \a ns and its refc == 0. - */ -void ldlm_namespace_free_post(struct ldlm_namespace *ns) -{ - if (!ns) - return; - - /* Make sure that nobody can find this ns in its list. */ - ldlm_namespace_unregister(ns, ns->ns_client); - /* Fini pool _before_ parent proc dir is removed. This is important as - * ldlm_pool_fini() removes own proc dir which is child to @dir. - * Removing it after @dir may cause oops. - */ - ldlm_pool_fini(&ns->ns_pool); - - ldlm_namespace_debugfs_unregister(ns); - ldlm_namespace_sysfs_unregister(ns); - cfs_hash_putref(ns->ns_rs_hash); - kfree(ns->ns_name); - /* Namespace \a ns should be not on list at this time, otherwise - * this will cause issues related to using freed \a ns in poold - * thread. - */ - LASSERT(list_empty(&ns->ns_list_chain)); - kfree(ns); - ldlm_put_ref(); -} - -void ldlm_namespace_get(struct ldlm_namespace *ns) -{ - atomic_inc(&ns->ns_bref); -} - -/* This is only for callers that care about refcount */ -static int ldlm_namespace_get_return(struct ldlm_namespace *ns) -{ - return atomic_inc_return(&ns->ns_bref); -} - -void ldlm_namespace_put(struct ldlm_namespace *ns) -{ - if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) { - wake_up(&ns->ns_waitq); - spin_unlock(&ns->ns_lock); - } -} - -/** Should be called with ldlm_namespace_lock(client) taken. */ -void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns, - enum ldlm_side client) -{ - LASSERT(!list_empty(&ns->ns_list_chain)); - LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); - list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client)); -} - -/** Should be called with ldlm_namespace_lock(client) taken. */ -void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns, - enum ldlm_side client) -{ - LASSERT(!list_empty(&ns->ns_list_chain)); - LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); - list_move_tail(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list); -} - -/** Should be called with ldlm_namespace_lock(client) taken. */ -struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client) -{ - LASSERT(mutex_is_locked(ldlm_namespace_lock(client))); - LASSERT(!list_empty(ldlm_namespace_list(client))); - return container_of(ldlm_namespace_list(client)->next, - struct ldlm_namespace, ns_list_chain); -} - -/** Create and initialize new resource. */ -static struct ldlm_resource *ldlm_resource_new(void) -{ - struct ldlm_resource *res; - int idx; - - res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS); - if (!res) - return NULL; - - INIT_LIST_HEAD(&res->lr_granted); - INIT_LIST_HEAD(&res->lr_waiting); - - /* Initialize interval trees for each lock mode. */ - for (idx = 0; idx < LCK_MODE_NUM; idx++) { - res->lr_itree[idx].lit_size = 0; - res->lr_itree[idx].lit_mode = 1 << idx; - res->lr_itree[idx].lit_root = NULL; - } - - atomic_set(&res->lr_refcount, 1); - spin_lock_init(&res->lr_lock); - lu_ref_init(&res->lr_reference); - - /* The creator of the resource must unlock the mutex after LVB - * initialization. - */ - mutex_init(&res->lr_lvb_mutex); - mutex_lock(&res->lr_lvb_mutex); - - return res; -} - -/** - * Return a reference to resource with given name, creating it if necessary. - * Args: namespace with ns_lock unlocked - * Locks: takes and releases NS hash-lock and res->lr_lock - * Returns: referenced, unlocked ldlm_resource or NULL - */ -struct ldlm_resource * -ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, - const struct ldlm_res_id *name, enum ldlm_type type, - int create) -{ - struct hlist_node *hnode; - struct ldlm_resource *res = NULL; - struct cfs_hash_bd bd; - __u64 version; - int ns_refcount = 0; - int rc; - - LASSERT(!parent); - LASSERT(ns->ns_rs_hash); - LASSERT(name->name[0] != 0); - - cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0); - hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode) { - cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); - goto lvbo_init; - } - - version = cfs_hash_bd_version_get(&bd); - cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); - - if (create == 0) - return ERR_PTR(-ENOENT); - - LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE, - "type: %d\n", type); - res = ldlm_resource_new(); - if (!res) - return ERR_PTR(-ENOMEM); - - res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); - res->lr_name = *name; - res->lr_type = type; - - cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); - hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : - cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - - if (hnode) { - /* Someone won the race and already added the resource. */ - cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); - /* Clean lu_ref for failed resource. */ - lu_ref_fini(&res->lr_reference); - /* We have taken lr_lvb_mutex. Drop it. */ - mutex_unlock(&res->lr_lvb_mutex); - kmem_cache_free(ldlm_resource_slab, res); -lvbo_init: - res = hlist_entry(hnode, struct ldlm_resource, lr_hash); - /* Synchronize with regard to resource creation. */ - if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { - mutex_lock(&res->lr_lvb_mutex); - mutex_unlock(&res->lr_lvb_mutex); - } - - if (unlikely(res->lr_lvb_len < 0)) { - rc = res->lr_lvb_len; - ldlm_resource_putref(res); - res = ERR_PTR(rc); - } - return res; - } - /* We won! Let's add the resource. */ - cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash); - if (cfs_hash_bd_count_get(&bd) == 1) - ns_refcount = ldlm_namespace_get_return(ns); - - cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); - if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) { - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2); - rc = ns->ns_lvbo->lvbo_init(res); - if (rc < 0) { - CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n", - ns->ns_obd->obd_name, name->name[0], - name->name[1], rc); - res->lr_lvb_len = rc; - mutex_unlock(&res->lr_lvb_mutex); - ldlm_resource_putref(res); - return ERR_PTR(rc); - } - } - - /* We create resource with locked lr_lvb_mutex. */ - mutex_unlock(&res->lr_lvb_mutex); - - /* Let's see if we happened to be the very first resource in this - * namespace. If so, and this is a client namespace, we need to move - * the namespace into the active namespaces list to be patrolled by - * the ldlm_poold. - */ - if (ns_refcount == 1) { - mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); - ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT); - mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); - } - - return res; -} -EXPORT_SYMBOL(ldlm_resource_get); - -static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd, - struct ldlm_resource *res) -{ - struct ldlm_ns_bucket *nsb = res->lr_ns_bucket; - struct ldlm_namespace *ns = nsb->nsb_namespace; - - if (!list_empty(&res->lr_granted)) { - ldlm_resource_dump(D_ERROR, res); - LBUG(); - } - - if (!list_empty(&res->lr_waiting)) { - ldlm_resource_dump(D_ERROR, res); - LBUG(); - } - - cfs_hash_bd_del_locked(ns->ns_rs_hash, - bd, &res->lr_hash); - lu_ref_fini(&res->lr_reference); - cfs_hash_bd_unlock(ns->ns_rs_hash, bd, 1); - if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) - ns->ns_lvbo->lvbo_free(res); - if (cfs_hash_bd_count_get(bd) == 0) - ldlm_namespace_put(ns); - kmem_cache_free(ldlm_resource_slab, res); -} - -void ldlm_resource_putref(struct ldlm_resource *res) -{ - struct ldlm_namespace *ns = ldlm_res_to_ns(res); - struct cfs_hash_bd bd; - - LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON); - CDEBUG(D_INFO, "putref res: %p count: %d\n", - res, atomic_read(&res->lr_refcount) - 1); - - cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd); - if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) - __ldlm_resource_putref_final(&bd, res); -} -EXPORT_SYMBOL(ldlm_resource_putref); - -/** - * Add a lock into a given resource into specified lock list. - */ -void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, - struct ldlm_lock *lock) -{ - check_res_locked(res); - - LDLM_DEBUG(lock, "About to add this lock:"); - - if (ldlm_is_destroyed(lock)) { - CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); - return; - } - - LASSERT(list_empty(&lock->l_res_link)); - - list_add_tail(&lock->l_res_link, head); -} - -void ldlm_resource_unlink_lock(struct ldlm_lock *lock) -{ - int type = lock->l_resource->lr_type; - - check_res_locked(lock->l_resource); - if (type == LDLM_IBITS || type == LDLM_PLAIN) - ldlm_unlink_lock_skiplist(lock); - else if (type == LDLM_EXTENT) - ldlm_extent_unlink_lock(lock); - list_del_init(&lock->l_res_link); -} -EXPORT_SYMBOL(ldlm_resource_unlink_lock); - -void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc) -{ - desc->lr_type = res->lr_type; - desc->lr_name = res->lr_name; -} - -/** - * Print information about all locks in all namespaces on this node to debug - * log. - */ -void ldlm_dump_all_namespaces(enum ldlm_side client, int level) -{ - struct ldlm_namespace *ns; - - if (!((libcfs_debug | D_ERROR) & level)) - return; - - mutex_lock(ldlm_namespace_lock(client)); - - list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) - ldlm_namespace_dump(level, ns); - - mutex_unlock(ldlm_namespace_lock(client)); -} - -static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *arg) -{ - struct ldlm_resource *res = cfs_hash_object(hs, hnode); - int level = (int)(unsigned long)arg; - - lock_res(res); - ldlm_resource_dump(level, res); - unlock_res(res); - - return 0; -} - -/** - * Print information about all locks in this namespace on this node to debug - * log. - */ -void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) -{ - if (!((libcfs_debug | D_ERROR) & level)) - return; - - CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n", - ldlm_ns_name(ns), atomic_read(&ns->ns_bref)); - - if (time_before(jiffies, ns->ns_next_dump)) - return; - - cfs_hash_for_each_nolock(ns->ns_rs_hash, - ldlm_res_hash_dump, - (void *)(unsigned long)level, 0); - spin_lock(&ns->ns_lock); - ns->ns_next_dump = jiffies + 10 * HZ; - spin_unlock(&ns->ns_lock); -} - -/** - * Print information about all locks in this resource to debug log. - */ -void ldlm_resource_dump(int level, struct ldlm_resource *res) -{ - struct ldlm_lock *lock; - unsigned int granted = 0; - - BUILD_BUG_ON(RES_NAME_SIZE != 4); - - if (!((libcfs_debug | D_ERROR) & level)) - return; - - CDEBUG(level, "--- Resource: " DLDLMRES " (%p) refcount = %d\n", - PLDLMRES(res), res, atomic_read(&res->lr_refcount)); - - if (!list_empty(&res->lr_granted)) { - CDEBUG(level, "Granted locks (in reverse order):\n"); - list_for_each_entry_reverse(lock, &res->lr_granted, - l_res_link) { - LDLM_DEBUG_LIMIT(level, lock, "###"); - if (!(level & D_CANTMASK) && - ++granted > ldlm_dump_granted_max) { - CDEBUG(level, - "only dump %d granted locks to avoid DDOS.\n", - granted); - break; - } - } - } - if (!list_empty(&res->lr_waiting)) { - CDEBUG(level, "Waiting locks:\n"); - list_for_each_entry(lock, &res->lr_waiting, l_res_link) - LDLM_DEBUG_LIMIT(level, lock, "###"); - } -} -EXPORT_SYMBOL(ldlm_resource_dump); diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile deleted file mode 100644 index 5200924182ae..000000000000 --- a/drivers/staging/lustre/lustre/llite/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += lustre.o -lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \ - rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \ - xattr.o xattr_cache.o xattr_security.o \ - super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \ - vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \ - lproc_llite.o - -lustre-$(CONFIG_FS_POSIX_ACL) += acl.o diff --git a/drivers/staging/lustre/lustre/llite/acl.c b/drivers/staging/lustre/lustre/llite/acl.c deleted file mode 100644 index 2ee9ff931236..000000000000 --- a/drivers/staging/lustre/lustre/llite/acl.c +++ /dev/null @@ -1,108 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/llite/acl.c - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include "llite_internal.h" - -struct posix_acl *ll_get_acl(struct inode *inode, int type) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct posix_acl *acl = NULL; - - spin_lock(&lli->lli_lock); - /* VFS' acl_permission_check->check_acl will release the refcount */ - acl = posix_acl_dup(lli->lli_posix_acl); - spin_unlock(&lli->lli_lock); - - return acl; -} - -int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req = NULL; - const char *name = NULL; - size_t value_size = 0; - char *value = NULL; - int rc = 0; - - switch (type) { - case ACL_TYPE_ACCESS: - name = XATTR_NAME_POSIX_ACL_ACCESS; - if (acl) - rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); - break; - - case ACL_TYPE_DEFAULT: - name = XATTR_NAME_POSIX_ACL_DEFAULT; - if (!S_ISDIR(inode->i_mode)) - rc = acl ? -EACCES : 0; - break; - - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - if (acl) { - value_size = posix_acl_xattr_size(acl->a_count); - value = kmalloc(value_size, GFP_NOFS); - if (!value) { - rc = -ENOMEM; - goto out; - } - - rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size); - if (rc < 0) - goto out_value; - } - - rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), - value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM, - name, value, value_size, 0, 0, &req); - - ptlrpc_req_finished(req); -out_value: - kfree(value); -out: - if (rc) - forget_cached_acl(inode, type); - else - set_cached_acl(inode, type, acl); - return rc; -} diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c deleted file mode 100644 index 11b82c639bfe..000000000000 --- a/drivers/staging/lustre/lustre/llite/dcache.c +++ /dev/null @@ -1,300 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include - -#include "llite_internal.h" - -static void free_dentry_data(struct rcu_head *head) -{ - struct ll_dentry_data *lld; - - lld = container_of(head, struct ll_dentry_data, lld_rcu_head); - kfree(lld); -} - -/* should NOT be called with the dcache lock, see fs/dcache.c */ -static void ll_release(struct dentry *de) -{ - struct ll_dentry_data *lld; - - LASSERT(de); - lld = ll_d2d(de); - if (lld->lld_it) { - ll_intent_release(lld->lld_it); - kfree(lld->lld_it); - } - - de->d_fsdata = NULL; - call_rcu(&lld->lld_rcu_head, free_dentry_data); -} - -/* Compare if two dentries are the same. Don't match if the existing dentry - * is marked invalid. Returns 1 if different, 0 if the same. - * - * This avoids a race where ll_lookup_it() instantiates a dentry, but we get - * an AST before calling d_revalidate_it(). The dentry still exists (marked - * INVALID) so d_lookup() matches it, but we have no lock on it (so - * lock_match() fails) and we spin around real_lookup(). - * - * This race doesn't apply to lookups in d_alloc_parallel(), and for - * those we want to ensure that only one dentry with a given name is - * in ll_lookup_nd() at a time. So allow invalid dentries to match - * while d_in_lookup(). We will be called again when the lookup - * completes, and can give a different answer then. - */ -static int ll_dcompare(const struct dentry *dentry, - unsigned int len, const char *str, - const struct qstr *name) -{ - if (len != name->len) - return 1; - - if (memcmp(str, name->name, len)) - return 1; - - CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n", - name->len, name->name, dentry, dentry->d_flags, - d_count(dentry)); - - /* mountpoint is always valid */ - if (d_mountpoint(dentry)) - return 0; - - /* ensure exclusion against parallel lookup of the same name */ - if (d_in_lookup((struct dentry *)dentry)) - return 0; - - if (d_lustre_invalid(dentry)) - return 1; - - return 0; -} - -/** - * Called when last reference to a dentry is dropped and dcache wants to know - * whether or not it should cache it: - * - return 1 to delete the dentry immediately - * - return 0 to cache the dentry - * Should NOT be called with the dcache lock, see fs/dcache.c - */ -static int ll_ddelete(const struct dentry *de) -{ - LASSERT(de); - - CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n", - d_lustre_invalid(de) ? "deleting" : "keeping", - de, de, de->d_parent, d_inode(de), - d_unhashed(de) ? "" : "hashed,", - list_empty(&de->d_subdirs) ? "" : "subdirs"); - - /* kernel >= 2.6.38 last refcount is decreased after this function. */ - LASSERT(d_count(de) == 1); - - if (d_lustre_invalid(de)) - return 1; - return 0; -} - -static int ll_d_init(struct dentry *de) -{ - struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL); - - if (unlikely(!lld)) - return -ENOMEM; - lld->lld_invalid = 1; - de->d_fsdata = lld; - return 0; -} - -void ll_intent_drop_lock(struct lookup_intent *it) -{ - if (it->it_op && it->it_lock_mode) { - struct lustre_handle handle; - - handle.cookie = it->it_lock_handle; - - CDEBUG(D_DLMTRACE, - "releasing lock with cookie %#llx from it %p\n", - handle.cookie, it); - ldlm_lock_decref(&handle, it->it_lock_mode); - - /* bug 494: intent_release may be called multiple times, from - * this thread and we don't want to double-decref this lock - */ - it->it_lock_mode = 0; - if (it->it_remote_lock_mode != 0) { - handle.cookie = it->it_remote_lock_handle; - - CDEBUG(D_DLMTRACE, - "releasing remote lock with cookie%#llx from it %p\n", - handle.cookie, it); - ldlm_lock_decref(&handle, - it->it_remote_lock_mode); - it->it_remote_lock_mode = 0; - } - } -} - -void ll_intent_release(struct lookup_intent *it) -{ - CDEBUG(D_INFO, "intent %p released\n", it); - ll_intent_drop_lock(it); - /* We are still holding extra reference on a request, need to free it */ - if (it_disposition(it, DISP_ENQ_OPEN_REF)) - ptlrpc_req_finished(it->it_request); /* ll_file_open */ - - if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */ - ptlrpc_req_finished(it->it_request); - - it->it_disposition = 0; - it->it_request = NULL; -} - -void ll_invalidate_aliases(struct inode *inode) -{ - struct dentry *dentry; - - CDEBUG(D_INODE, "marking dentries for ino " DFID "(%p) invalid\n", - PFID(ll_inode2fid(inode)), inode); - - spin_lock(&inode->i_lock); - hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { - CDEBUG(D_DENTRY, - "dentry in drop %pd (%p) parent %p inode %p flags %d\n", - dentry, dentry, dentry->d_parent, - d_inode(dentry), dentry->d_flags); - - d_lustre_invalidate(dentry, 0); - } - spin_unlock(&inode->i_lock); -} - -int ll_revalidate_it_finish(struct ptlrpc_request *request, - struct lookup_intent *it, - struct inode *inode) -{ - int rc = 0; - - if (!request) - return 0; - - if (it_disposition(it, DISP_LOOKUP_NEG)) - return -ENOENT; - - rc = ll_prep_inode(&inode, request, NULL, it); - - return rc; -} - -void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) -{ - if (it->it_lock_mode && inode) { - struct ll_sb_info *sbi = ll_i2sbi(inode); - - CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "(%p)\n", - PFID(ll_inode2fid(inode)), inode); - ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); - } - - /* drop lookup or getattr locks immediately */ - if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) { - /* on 2.6 there are situation when several lookups and - * revalidations may be requested during single operation. - * therefore, we don't release intent here -bzzz - */ - ll_intent_drop_lock(it); - } -} - -static int ll_revalidate_dentry(struct dentry *dentry, - unsigned int lookup_flags) -{ - struct inode *dir = d_inode(dentry->d_parent); - - /* If this is intermediate component path lookup and we were able to get - * to this dentry, then its lock has not been revoked and the - * path component is valid. - */ - if (lookup_flags & LOOKUP_PARENT) - return 1; - - /* Symlink - always valid as long as the dentry was found */ - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) - return 1; - - /* - * VFS warns us that this is the second go around and previous - * operation failed (most likely open|creat), so this time - * we better talk to the server via the lookup path by name, - * not by fid. - */ - if (lookup_flags & LOOKUP_REVAL) - return 0; - - if (!dentry_may_statahead(dir, dentry)) - return 1; - - if (lookup_flags & LOOKUP_RCU) - return -ECHILD; - - ll_statahead(dir, &dentry, !d_inode(dentry)); - return 1; -} - -/* - * Always trust cached dentries. Update statahead window if necessary. - */ -static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags) -{ - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, flags=%u\n", - dentry, flags); - - return ll_revalidate_dentry(dentry, flags); -} - -const struct dentry_operations ll_d_ops = { - .d_init = ll_d_init, - .d_revalidate = ll_revalidate_nd, - .d_release = ll_release, - .d_delete = ll_ddelete, - .d_compare = ll_dcompare, -}; diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c deleted file mode 100644 index 688dddf3ca47..000000000000 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ /dev/null @@ -1,1708 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/llite/dir.c - * - * Directory code for lustre client. - */ - -#include -#include -#include -#include -#include /* for wait_on_buffer */ -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "llite_internal.h" - -/* - * (new) readdir implementation overview. - * - * Original lustre readdir implementation cached exact copy of raw directory - * pages on the client. These pages were indexed in client page cache by - * logical offset in the directory file. This design, while very simple and - * intuitive had some inherent problems: - * - * . it implies that byte offset to the directory entry serves as a - * telldir(3)/seekdir(3) cookie, but that offset is not stable: in - * ext3/htree directory entries may move due to splits, and more - * importantly, - * - * . it is incompatible with the design of split directories for cmd3, - * that assumes that names are distributed across nodes based on their - * hash, and so readdir should be done in hash order. - * - * New readdir implementation does readdir in hash order, and uses hash of a - * file name as a telldir/seekdir cookie. This led to number of complications: - * - * . hash is not unique, so it cannot be used to index cached directory - * pages on the client (note, that it requires a whole pageful of hash - * collided entries to cause two pages to have identical hashes); - * - * . hash is not unique, so it cannot, strictly speaking, be used as an - * entry cookie. ext3/htree has the same problem and lustre implementation - * mimics their solution: seekdir(hash) positions directory at the first - * entry with the given hash. - * - * Client side. - * - * 0. caching - * - * Client caches directory pages using hash of the first entry as an index. As - * noted above hash is not unique, so this solution doesn't work as is: - * special processing is needed for "page hash chains" (i.e., sequences of - * pages filled with entries all having the same hash value). - * - * First, such chains have to be detected. To this end, server returns to the - * client the hash of the first entry on the page next to one returned. When - * client detects that this hash is the same as hash of the first entry on the - * returned page, page hash collision has to be handled. Pages in the - * hash chain, except first one, are termed "overflow pages". - * - * Solution to index uniqueness problem is to not cache overflow - * pages. Instead, when page hash collision is detected, all overflow pages - * from emerging chain are immediately requested from the server and placed in - * a special data structure (struct ll_dir_chain). This data structure is used - * by ll_readdir() to process entries from overflow pages. When readdir - * invocation finishes, overflow pages are discarded. If page hash collision - * chain weren't completely processed, next call to readdir will again detect - * page hash collision, again read overflow pages in, process next portion of - * entries and again discard the pages. This is not as wasteful as it looks, - * because, given reasonable hash, page hash collisions are extremely rare. - * - * 1. directory positioning - * - * When seekdir(hash) is called, original - * - * - * - * - * - * - * - * - * Server. - * - * identification of and access to overflow pages - * - * page format - * - * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains - * a header lu_dirpage which describes the start/end hash, and whether this - * page is empty (contains no dir entry) or hash collide with next page. - * After client receives reply, several pages will be integrated into dir page - * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage - * for this integrated page will be adjusted. See lmv_adjust_dirpages(). - * - */ -struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data, - __u64 offset) -{ - struct md_callback cb_op; - struct page *page; - int rc; - - cb_op.md_blocking_ast = ll_md_blocking_ast; - rc = md_read_page(ll_i2mdexp(dir), op_data, &cb_op, offset, &page); - if (rc) - return ERR_PTR(rc); - - return page; -} - -void ll_release_page(struct inode *inode, struct page *page, bool remove) -{ - kunmap(page); - - /* - * Always remove the page for striped dir, because the page is - * built from temporarily in LMV layer - */ - if (inode && S_ISDIR(inode->i_mode) && - ll_i2info(inode)->lli_lsm_md) { - __free_page(page); - return; - } - - if (remove) { - lock_page(page); - if (likely(page->mapping)) - truncate_complete_page(page->mapping, page); - unlock_page(page); - } - put_page(page); -} - -/** - * return IF_* type for given lu_dirent entry. - * IF_* flag shld be converted to particular OS file type in - * platform llite module. - */ -static __u16 ll_dirent_type_get(struct lu_dirent *ent) -{ - __u16 type = 0; - struct luda_type *lt; - int len = 0; - - if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) { - const unsigned int align = sizeof(struct luda_type) - 1; - - len = le16_to_cpu(ent->lde_namelen); - len = (len + align) & ~align; - lt = (void *)ent->lde_name + len; - type = IFTODT(le16_to_cpu(lt->lt_type)); - } - return type; -} - -int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data, - struct dir_context *ctx) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - __u64 pos = *ppos; - int is_api32 = ll_need_32bit_api(sbi); - int is_hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; - struct page *page; - bool done = false; - int rc = 0; - - page = ll_get_dir_page(inode, op_data, pos); - - while (rc == 0 && !done) { - struct lu_dirpage *dp; - struct lu_dirent *ent; - __u64 hash; - __u64 next; - - if (IS_ERR(page)) { - rc = PTR_ERR(page); - break; - } - - hash = MDS_DIR_END_OFF; - dp = page_address(page); - for (ent = lu_dirent_start(dp); ent && !done; - ent = lu_dirent_next(ent)) { - __u16 type; - int namelen; - struct lu_fid fid; - __u64 lhash; - __u64 ino; - - hash = le64_to_cpu(ent->lde_hash); - if (hash < pos) - /* - * Skip until we find target hash - * value. - */ - continue; - - namelen = le16_to_cpu(ent->lde_namelen); - if (namelen == 0) - /* - * Skip dummy record. - */ - continue; - - if (is_api32 && is_hash64) - lhash = hash >> 32; - else - lhash = hash; - fid_le_to_cpu(&fid, &ent->lde_fid); - ino = cl_fid_build_ino(&fid, is_api32); - type = ll_dirent_type_get(ent); - ctx->pos = lhash; - /* For 'll_nfs_get_name_filldir()', it will try - * to access the 'ent' through its 'lde_name', - * so the parameter 'name' for 'ctx->actor()' - * must be part of the 'ent'. - */ - done = !dir_emit(ctx, ent->lde_name, - namelen, ino, type); - } - - if (done) { - pos = hash; - ll_release_page(inode, page, false); - break; - } - - next = le64_to_cpu(dp->ldp_hash_end); - pos = next; - if (pos == MDS_DIR_END_OFF) { - /* - * End of directory reached. - */ - done = 1; - ll_release_page(inode, page, false); - } else { - /* - * Normal case: continue to the next - * page. - */ - ll_release_page(inode, page, - le32_to_cpu(dp->ldp_flags) & - LDF_COLLIDE); - next = pos; - page = ll_get_dir_page(inode, op_data, pos); - } - } - - ctx->pos = pos; - return rc; -} - -static int ll_readdir(struct file *filp, struct dir_context *ctx) -{ - struct inode *inode = file_inode(filp); - struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp); - struct ll_sb_info *sbi = ll_i2sbi(inode); - __u64 pos = lfd ? lfd->lfd_pos : 0; - int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; - int api32 = ll_need_32bit_api(sbi); - struct md_op_data *op_data; - int rc; - - CDEBUG(D_VFSTRACE, - "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n", - PFID(ll_inode2fid(inode)), inode, (unsigned long)pos, - i_size_read(inode), api32); - - if (pos == MDS_DIR_END_OFF) { - /* - * end-of-file. - */ - rc = 0; - goto out; - } - - op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, - LUSTRE_OPC_ANY, inode); - if (IS_ERR(op_data)) { - rc = PTR_ERR(op_data); - goto out; - } - - if (unlikely(op_data->op_mea1)) { - /* - * This is only needed for striped dir to fill .., - * see lmv_read_page - */ - if (file_dentry(filp)->d_parent && - file_dentry(filp)->d_parent->d_inode) { - __u64 ibits = MDS_INODELOCK_UPDATE; - struct inode *parent; - - parent = file_dentry(filp)->d_parent->d_inode; - if (ll_have_md_lock(parent, &ibits, LCK_MINMODE)) - op_data->op_fid3 = *ll_inode2fid(parent); - } - - /* - * If it can not find in cache, do lookup .. on the master - * object - */ - if (fid_is_zero(&op_data->op_fid3)) { - rc = ll_dir_get_parent_fid(inode, &op_data->op_fid3); - if (rc) { - ll_finish_md_op_data(op_data); - return rc; - } - } - } - op_data->op_max_pages = sbi->ll_md_brw_pages; - ctx->pos = pos; - rc = ll_dir_read(inode, &pos, op_data, ctx); - pos = ctx->pos; - if (lfd) - lfd->lfd_pos = pos; - - if (pos == MDS_DIR_END_OFF) { - if (api32) - pos = LL_DIR_END_OFF_32BIT; - else - pos = LL_DIR_END_OFF; - } else { - if (api32 && hash64) - pos >>= 32; - } - ctx->pos = pos; - ll_finish_md_op_data(op_data); -out: - if (!rc) - ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1); - - return rc; -} - -static int ll_send_mgc_param(struct obd_export *mgc, char *string) -{ - struct mgs_send_param *msp; - int rc = 0; - - msp = kzalloc(sizeof(*msp), GFP_NOFS); - if (!msp) - return -ENOMEM; - - strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param)); - rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO, - sizeof(struct mgs_send_param), msp, NULL); - if (rc) - CERROR("Failed to set parameter: %d\n", rc); - kfree(msp); - - return rc; -} - -/** - * Create striped directory with specified stripe(@lump) - * - * param[in] parent the parent of the directory. - * param[in] lump the specified stripes. - * param[in] dirname the name of the directory. - * param[in] mode the specified mode of the directory. - * - * retval =0 if striped directory is being created successfully. - * <0 if the creation is failed. - */ -static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump, - const char *dirname, umode_t mode) -{ - struct ptlrpc_request *request = NULL; - struct md_op_data *op_data; - struct ll_sb_info *sbi = ll_i2sbi(parent); - struct inode *inode = NULL; - struct dentry dentry; - int err; - - if (unlikely(lump->lum_magic != LMV_USER_MAGIC)) - return -EINVAL; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p) name %s stripe_offset %d, stripe_count: %u\n", - PFID(ll_inode2fid(parent)), parent, dirname, - (int)lump->lum_stripe_offset, lump->lum_stripe_count); - - if (lump->lum_stripe_count > 1 && - !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE)) - return -EINVAL; - - if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC)) - lustre_swab_lmv_user_md(lump); - - if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent))) - mode &= ~current_umask(); - mode = (mode & (0777 | S_ISVTX)) | S_IFDIR; - op_data = ll_prep_md_op_data(NULL, parent, NULL, dirname, - strlen(dirname), mode, LUSTRE_OPC_MKDIR, - lump); - if (IS_ERR(op_data)) { - err = PTR_ERR(op_data); - goto err_exit; - } - - op_data->op_cli_flags |= CLI_SET_MEA; - err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode, - from_kuid(&init_user_ns, current_fsuid()), - from_kgid(&init_user_ns, current_fsgid()), - current_cap(), 0, &request); - ll_finish_md_op_data(op_data); - - err = ll_prep_inode(&inode, request, parent->i_sb, NULL); - if (err) - goto err_exit; - - memset(&dentry, 0, sizeof(dentry)); - dentry.d_inode = inode; - - err = ll_init_security(&dentry, inode, parent); - iput(inode); - -err_exit: - ptlrpc_req_finished(request); - return err; -} - -int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, - int set_default) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct md_op_data *op_data; - struct ptlrpc_request *req = NULL; - int rc = 0; - struct lustre_sb_info *lsi = s2lsi(inode->i_sb); - struct obd_device *mgc = lsi->lsi_mgc; - int lum_size; - - if (lump) { - /* - * This is coming from userspace, so should be in - * local endian. But the MDS would like it in little - * endian, so we swab it before we send it. - */ - switch (lump->lmm_magic) { - case LOV_USER_MAGIC_V1: { - if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1)) - lustre_swab_lov_user_md_v1(lump); - lum_size = sizeof(struct lov_user_md_v1); - break; - } - case LOV_USER_MAGIC_V3: { - if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3)) - lustre_swab_lov_user_md_v3( - (struct lov_user_md_v3 *)lump); - lum_size = sizeof(struct lov_user_md_v3); - break; - } - case LMV_USER_MAGIC: { - if (lump->lmm_magic != cpu_to_le32(LMV_USER_MAGIC)) - lustre_swab_lmv_user_md( - (struct lmv_user_md *)lump); - lum_size = sizeof(struct lmv_user_md); - break; - } - default: { - CDEBUG(D_IOCTL, - "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n", - lump->lmm_magic, LOV_USER_MAGIC_V1, - LOV_USER_MAGIC_V3); - return -EINVAL; - } - } - } else { - lum_size = sizeof(struct lov_user_md_v1); - } - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - /* swabbing is done in lov_setstripe() on server side */ - rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req); - ll_finish_md_op_data(op_data); - ptlrpc_req_finished(req); - if (rc) - return rc; - -#if OBD_OCD_VERSION(2, 13, 53, 0) > LUSTRE_VERSION_CODE - /* - * 2.9 server has stored filesystem default stripe in ROOT xattr, - * and it's stored into system config for backward compatibility. - * - * In the following we use the fact that LOV_USER_MAGIC_V1 and - * LOV_USER_MAGIC_V3 have the same initial fields so we do not - * need to make the distinction between the 2 versions - */ - if (set_default && mgc->u.cli.cl_mgc_mgsexp) { - char *param = NULL; - char *buf; - - param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS); - if (!param) - return -ENOMEM; - - buf = param; - /* Get fsname and assume devname to be -MDT0000. */ - ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN); - strcat(buf, "-MDT0000.lov"); - buf += strlen(buf); - - /* Set root stripesize */ - sprintf(buf, ".stripesize=%u", - lump ? le32_to_cpu(lump->lmm_stripe_size) : 0); - rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param); - if (rc) - goto end; - - /* Set root stripecount */ - sprintf(buf, ".stripecount=%hd", - lump ? le16_to_cpu(lump->lmm_stripe_count) : 0); - rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param); - if (rc) - goto end; - - /* Set root stripeoffset */ - sprintf(buf, ".stripeoffset=%hd", - lump ? le16_to_cpu(lump->lmm_stripe_offset) : - (typeof(lump->lmm_stripe_offset))(-1)); - rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param); - -end: - kfree(param); - } -#endif - return rc; -} - -/** - * This function will be used to get default LOV/LMV/Default LMV - * @valid will be used to indicate which stripe it will retrieve - * OBD_MD_MEA LMV stripe EA - * OBD_MD_DEFAULT_MEA Default LMV stripe EA - * otherwise Default LOV EA. - * Each time, it can only retrieve 1 stripe EA - **/ -int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size, - struct ptlrpc_request **request, u64 valid) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct mdt_body *body; - struct lov_mds_md *lmm = NULL; - struct ptlrpc_request *req = NULL; - int rc, lmmsize; - struct md_op_data *op_data; - - rc = ll_get_max_mdsize(sbi, &lmmsize); - if (rc) - return rc; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, - 0, lmmsize, LUSTRE_OPC_ANY, - NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA; - rc = md_getattr(sbi->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc < 0) { - CDEBUG(D_INFO, "md_getattr failed on inode " DFID ": rc %d\n", - PFID(ll_inode2fid(inode)), rc); - goto out; - } - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - - lmmsize = body->mbo_eadatasize; - - if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) || - lmmsize == 0) { - rc = -ENODATA; - goto out; - } - - lmm = req_capsule_server_sized_get(&req->rq_pill, - &RMF_MDT_MD, lmmsize); - LASSERT(lmm); - - /* - * This is coming from the MDS, so is probably in - * little endian. We convert it to host endian before - * passing it to userspace. - */ - /* We don't swab objects for directories */ - switch (le32_to_cpu(lmm->lmm_magic)) { - case LOV_MAGIC_V1: - if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) - lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm); - break; - case LOV_MAGIC_V3: - if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) - lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm); - break; - case LMV_MAGIC_V1: - if (cpu_to_le32(LMV_MAGIC) != LMV_MAGIC) - lustre_swab_lmv_mds_md((union lmv_mds_md *)lmm); - break; - case LMV_USER_MAGIC: - if (cpu_to_le32(LMV_USER_MAGIC) != LMV_USER_MAGIC) - lustre_swab_lmv_user_md((struct lmv_user_md *)lmm); - break; - default: - CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic); - rc = -EPROTO; - } -out: - *plmm = lmm; - *plmm_size = lmmsize; - *request = req; - return rc; -} - -int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid) -{ - struct md_op_data *op_data; - int mdt_index, rc; - - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) - return -ENOMEM; - - op_data->op_flags |= MF_GET_MDT_IDX; - op_data->op_fid1 = *fid; - rc = md_getattr(sbi->ll_md_exp, op_data, NULL); - mdt_index = op_data->op_mds; - kvfree(op_data); - if (rc < 0) - return rc; - - return mdt_index; -} - -/* - * Get MDT index for the inode. - */ -int ll_get_mdt_idx(struct inode *inode) -{ - return ll_get_mdt_idx_by_fid(ll_i2sbi(inode), ll_inode2fid(inode)); -} - -/** - * Generic handler to do any pre-copy work. - * - * It sends a first hsm_progress (with extent length == 0) to coordinator as a - * first information for it that real work has started. - * - * Moreover, for a ARCHIVE request, it will sample the file data version and - * store it in \a copy. - * - * \return 0 on success. - */ -static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct hsm_progress_kernel hpk; - int rc2, rc = 0; - - /* Forge a hsm_progress based on data from copy. */ - hpk.hpk_fid = copy->hc_hai.hai_fid; - hpk.hpk_cookie = copy->hc_hai.hai_cookie; - hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset; - hpk.hpk_extent.length = 0; - hpk.hpk_flags = 0; - hpk.hpk_errval = 0; - hpk.hpk_data_version = 0; - - /* For archive request, we need to read the current file version. */ - if (copy->hc_hai.hai_action == HSMA_ARCHIVE) { - struct inode *inode; - __u64 data_version = 0; - - /* Get inode for this fid */ - inode = search_inode_for_lustre(sb, ©->hc_hai.hai_fid); - if (IS_ERR(inode)) { - hpk.hpk_flags |= HP_FLAG_RETRY; - /* hpk_errval is >= 0 */ - hpk.hpk_errval = -PTR_ERR(inode); - rc = PTR_ERR(inode); - goto progress; - } - - /* Read current file data version */ - rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH); - iput(inode); - if (rc != 0) { - CDEBUG(D_HSM, - "Could not read file data version of " DFID " (rc = %d). Archive request (%#llx) could not be done.\n", - PFID(©->hc_hai.hai_fid), rc, - copy->hc_hai.hai_cookie); - hpk.hpk_flags |= HP_FLAG_RETRY; - /* hpk_errval must be >= 0 */ - hpk.hpk_errval = -rc; - goto progress; - } - - /* Store in the hsm_copy for later copytool use. - * Always modified even if no lsm. - */ - copy->hc_data_version = data_version; - } - -progress: - /* On error, the request should be considered as completed */ - if (hpk.hpk_errval > 0) - hpk.hpk_flags |= HP_FLAG_COMPLETED; - rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), - &hpk, NULL); - - return rc ? rc : rc2; -} - -/** - * Generic handler to do any post-copy work. - * - * It will send the last hsm_progress update to coordinator to inform it - * that copy is finished and whether it was successful or not. - * - * Moreover, - * - for ARCHIVE request, it will sample the file data version and compare it - * with the version saved in ll_ioc_copy_start(). If they do not match, copy - * will be considered as failed. - * - for RESTORE request, it will sample the file data version and send it to - * coordinator which is useful if the file was imported as 'released'. - * - * \return 0 on success. - */ -static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct hsm_progress_kernel hpk; - int rc2, rc = 0; - - /* If you modify the logic here, also check llapi_hsm_copy_end(). */ - /* Take care: copy->hc_hai.hai_action, len, gid and data are not - * initialized if copy_end was called with copy == NULL. - */ - - /* Forge a hsm_progress based on data from copy. */ - hpk.hpk_fid = copy->hc_hai.hai_fid; - hpk.hpk_cookie = copy->hc_hai.hai_cookie; - hpk.hpk_extent = copy->hc_hai.hai_extent; - hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED; - hpk.hpk_errval = copy->hc_errval; - hpk.hpk_data_version = 0; - - /* For archive request, we need to check the file data was not changed. - * - * For restore request, we need to send the file data version, this is - * useful when the file was created using hsm_import. - */ - if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) || - (copy->hc_hai.hai_action == HSMA_RESTORE)) && - (copy->hc_errval == 0)) { - struct inode *inode; - __u64 data_version = 0; - - /* Get lsm for this fid */ - inode = search_inode_for_lustre(sb, ©->hc_hai.hai_fid); - if (IS_ERR(inode)) { - hpk.hpk_flags |= HP_FLAG_RETRY; - /* hpk_errval must be >= 0 */ - hpk.hpk_errval = -PTR_ERR(inode); - rc = PTR_ERR(inode); - goto progress; - } - - rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH); - iput(inode); - if (rc) { - CDEBUG(D_HSM, - "Could not read file data version. Request could not be confirmed.\n"); - if (hpk.hpk_errval == 0) - hpk.hpk_errval = -rc; - goto progress; - } - - /* Store in the hsm_copy for later copytool use. - * Always modified even if no lsm. - */ - hpk.hpk_data_version = data_version; - - /* File could have been stripped during archiving, so we need - * to check anyway. - */ - if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) && - (copy->hc_data_version != data_version)) { - CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " DFID ", start:%#llx current:%#llx\n", - PFID(©->hc_hai.hai_fid), - copy->hc_data_version, data_version); - /* File was changed, send error to cdt. Do not ask for - * retry because if a file is modified frequently, - * the cdt will loop on retried archive requests. - * The policy engine will ask for a new archive later - * when the file will not be modified for some tunable - * time - */ - hpk.hpk_flags &= ~HP_FLAG_RETRY; - rc = -EBUSY; - /* hpk_errval must be >= 0 */ - hpk.hpk_errval = -rc; - } - } - -progress: - rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk), - &hpk, NULL); - - return rc ? rc : rc2; -} - -static int copy_and_ioctl(int cmd, struct obd_export *exp, - const void __user *data, size_t size) -{ - void *copy; - int rc; - - copy = memdup_user(data, size); - if (IS_ERR(copy)) - return PTR_ERR(copy); - - rc = obd_iocontrol(cmd, exp, size, copy, NULL); - kfree(copy); - - return rc; -} - -static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) -{ - int cmd = qctl->qc_cmd; - int type = qctl->qc_type; - int id = qctl->qc_id; - int valid = qctl->qc_valid; - int rc = 0; - - switch (cmd) { - case Q_SETQUOTA: - case Q_SETINFO: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - break; - case Q_GETQUOTA: - if (((type == USRQUOTA && - !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || - (type == GRPQUOTA && - !in_egroup_p(make_kgid(&init_user_ns, id)))) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - break; - case Q_GETINFO: - break; - default: - CERROR("unsupported quotactl op: %#x\n", cmd); - return -ENOTTY; - } - - if (valid != QC_GENERAL) { - if (cmd == Q_GETINFO) - qctl->qc_cmd = Q_GETOINFO; - else if (cmd == Q_GETQUOTA) - qctl->qc_cmd = Q_GETOQUOTA; - else - return -EINVAL; - - switch (valid) { - case QC_MDTIDX: - rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp, - sizeof(*qctl), qctl, NULL); - break; - case QC_OSTIDX: - rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp, - sizeof(*qctl), qctl, NULL); - break; - case QC_UUID: - rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp, - sizeof(*qctl), qctl, NULL); - if (rc == -EAGAIN) - rc = obd_iocontrol(OBD_IOC_QUOTACTL, - sbi->ll_dt_exp, - sizeof(*qctl), qctl, NULL); - break; - default: - rc = -EINVAL; - break; - } - - if (rc) - return rc; - - qctl->qc_cmd = cmd; - } else { - struct obd_quotactl *oqctl; - - oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS); - if (!oqctl) - return -ENOMEM; - - QCTL_COPY(oqctl, qctl); - rc = obd_quotactl(sbi->ll_md_exp, oqctl); - if (rc) { - kfree(oqctl); - return rc; - } - /* If QIF_SPACE is not set, client should collect the - * space usage from OSSs by itself - */ - if (cmd == Q_GETQUOTA && - !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) && - !oqctl->qc_dqblk.dqb_curspace) { - struct obd_quotactl *oqctl_tmp; - - oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS); - if (!oqctl_tmp) { - rc = -ENOMEM; - goto out; - } - - oqctl_tmp->qc_cmd = Q_GETOQUOTA; - oqctl_tmp->qc_id = oqctl->qc_id; - oqctl_tmp->qc_type = oqctl->qc_type; - - /* collect space usage from OSTs */ - oqctl_tmp->qc_dqblk.dqb_curspace = 0; - rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp); - if (!rc || rc == -EREMOTEIO) { - oqctl->qc_dqblk.dqb_curspace = - oqctl_tmp->qc_dqblk.dqb_curspace; - oqctl->qc_dqblk.dqb_valid |= QIF_SPACE; - } - - /* collect space & inode usage from MDTs */ - oqctl_tmp->qc_dqblk.dqb_curspace = 0; - oqctl_tmp->qc_dqblk.dqb_curinodes = 0; - rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp); - if (!rc || rc == -EREMOTEIO) { - oqctl->qc_dqblk.dqb_curspace += - oqctl_tmp->qc_dqblk.dqb_curspace; - oqctl->qc_dqblk.dqb_curinodes = - oqctl_tmp->qc_dqblk.dqb_curinodes; - oqctl->qc_dqblk.dqb_valid |= QIF_INODES; - } else { - oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE; - } - - kfree(oqctl_tmp); - } -out: - QCTL_COPY(qctl, oqctl); - kfree(oqctl); - } - - return rc; -} - -/* This function tries to get a single name component, - * to send to the server. No actual path traversal involved, - * so we limit to NAME_MAX - */ -static char *ll_getname(const char __user *filename) -{ - int ret = 0, len; - char *tmp; - - tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL); - if (!tmp) - return ERR_PTR(-ENOMEM); - - len = strncpy_from_user(tmp, filename, NAME_MAX + 1); - if (len < 0) - ret = len; - else if (len == 0) - ret = -ENOENT; - else if (len > NAME_MAX && tmp[NAME_MAX] != 0) - ret = -ENAMETOOLONG; - - if (ret) { - kfree(tmp); - tmp = ERR_PTR(ret); - } - return tmp; -} - -#define ll_putname(filename) kfree(filename) - -static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct inode *inode = file_inode(file); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct obd_ioctl_data *data; - int rc = 0; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), cmd=%#x\n", - PFID(ll_inode2fid(inode)), inode, cmd); - - /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */ - if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */ - return -ENOTTY; - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1); - switch (cmd) { - case FSFILT_IOC_GETFLAGS: - case FSFILT_IOC_SETFLAGS: - return ll_iocontrol(inode, file, cmd, arg); - case FSFILT_IOC_GETVERSION_OLD: - case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int __user *)arg); - /* We need to special case any other ioctls we want to handle, - * to send them to the MDS/OST as appropriate and to properly - * network encode the arg field. - case FSFILT_IOC_SETVERSION_OLD: - case FSFILT_IOC_SETVERSION: - */ - case LL_IOC_GET_MDTIDX: { - int mdtidx; - - mdtidx = ll_get_mdt_idx(inode); - if (mdtidx < 0) - return mdtidx; - - if (put_user((int)mdtidx, (int __user *)arg)) - return -EFAULT; - - return 0; - } - case IOC_MDC_LOOKUP: { - int namelen, len = 0; - char *buf = NULL; - char *filename; - - rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); - if (rc) - return rc; - data = (void *)buf; - - filename = data->ioc_inlbuf1; - namelen = strlen(filename); - - if (namelen < 1) { - CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n"); - rc = -EINVAL; - goto out_free; - } - - rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL); - if (rc < 0) { - CERROR("%s: lookup %.*s failed: rc = %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), namelen, - filename, rc); - goto out_free; - } -out_free: - kvfree(buf); - return rc; - } - case LL_IOC_LMV_SETSTRIPE: { - struct lmv_user_md *lum; - char *buf = NULL; - char *filename; - int namelen = 0; - int lumlen = 0; - umode_t mode; - int len; - int rc; - - rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); - if (rc) - return rc; - - data = (void *)buf; - if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || - data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) { - rc = -EINVAL; - goto lmv_out_free; - } - - filename = data->ioc_inlbuf1; - namelen = data->ioc_inllen1; - - if (namelen < 1) { - CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n"); - rc = -EINVAL; - goto lmv_out_free; - } - lum = (struct lmv_user_md *)data->ioc_inlbuf2; - lumlen = data->ioc_inllen2; - - if (lum->lum_magic != LMV_USER_MAGIC || - lumlen != sizeof(*lum)) { - CERROR("%s: wrong lum magic %x or size %d: rc = %d\n", - filename, lum->lum_magic, lumlen, -EFAULT); - rc = -EINVAL; - goto lmv_out_free; - } - -#if OBD_OCD_VERSION(2, 9, 50, 0) > LUSTRE_VERSION_CODE - mode = data->ioc_type != 0 ? data->ioc_type : 0777; -#else - mode = data->ioc_type; -#endif - rc = ll_dir_setdirstripe(inode, lum, filename, mode); -lmv_out_free: - kvfree(buf); - return rc; - } - case LL_IOC_LMV_SET_DEFAULT_STRIPE: { - struct lmv_user_md __user *ulump; - struct lmv_user_md lum; - int rc; - - ulump = (struct lmv_user_md __user *)arg; - if (copy_from_user(&lum, ulump, sizeof(lum))) - return -EFAULT; - - if (lum.lum_magic != LMV_USER_MAGIC) - return -EINVAL; - - rc = ll_dir_setstripe(inode, (struct lov_user_md *)&lum, 0); - - return rc; - } - case LL_IOC_LOV_SETSTRIPE: { - struct lov_user_md_v3 lumv3; - struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; - struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; - - int set_default = 0; - - LASSERT(sizeof(lumv3) == sizeof(*lumv3p)); - LASSERT(sizeof(lumv3.lmm_objects[0]) == - sizeof(lumv3p->lmm_objects[0])); - /* first try with v1 which is smaller than v3 */ - if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1))) - return -EFAULT; - - if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) { - if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3))) - return -EFAULT; - if (lumv3.lmm_magic != LOV_USER_MAGIC_V3) - return -EINVAL; - } - - if (is_root_inode(inode)) - set_default = 1; - - /* in v1 and v3 cases lumv1 points to data */ - rc = ll_dir_setstripe(inode, lumv1, set_default); - - return rc; - } - case LL_IOC_LMV_GETSTRIPE: { - struct lmv_user_md __user *ulmv; - struct lmv_user_md lum; - struct ptlrpc_request *request = NULL; - struct lmv_user_md *tmp = NULL; - union lmv_mds_md *lmm = NULL; - u64 valid = 0; - int max_stripe_count; - int stripe_count; - int mdt_index; - int lum_size; - int lmmsize; - int rc; - int i; - - ulmv = (struct lmv_user_md __user *)arg; - if (copy_from_user(&lum, ulmv, sizeof(*ulmv))) - return -EFAULT; - - max_stripe_count = lum.lum_stripe_count; - /* - * lum_magic will indicate which stripe the ioctl will like - * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC - * is for default LMV stripe - */ - if (lum.lum_magic == LMV_MAGIC_V1) - valid |= OBD_MD_MEA; - else if (lum.lum_magic == LMV_USER_MAGIC) - valid |= OBD_MD_DEFAULT_MEA; - else - return -EINVAL; - - rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, &request, - valid); - if (rc) - goto finish_req; - - /* Get default LMV EA */ - if (lum.lum_magic == LMV_USER_MAGIC) { - if (lmmsize > sizeof(*ulmv)) { - rc = -EINVAL; - goto finish_req; - } - - if (copy_to_user(ulmv, lmm, lmmsize)) - rc = -EFAULT; - - goto finish_req; - } - - stripe_count = lmv_mds_md_stripe_count_get(lmm); - if (max_stripe_count < stripe_count) { - lum.lum_stripe_count = stripe_count; - if (copy_to_user(ulmv, &lum, sizeof(lum))) { - rc = -EFAULT; - goto finish_req; - } - rc = -E2BIG; - goto finish_req; - } - - lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1); - tmp = kzalloc(lum_size, GFP_NOFS); - if (!tmp) { - rc = -ENOMEM; - goto finish_req; - } - - mdt_index = ll_get_mdt_idx(inode); - if (mdt_index < 0) { - rc = -ENOMEM; - goto out_tmp; - } - tmp->lum_magic = LMV_MAGIC_V1; - tmp->lum_stripe_count = 0; - tmp->lum_stripe_offset = mdt_index; - for (i = 0; i < stripe_count; i++) { - struct lu_fid fid; - - fid_le_to_cpu(&fid, &lmm->lmv_md_v1.lmv_stripe_fids[i]); - mdt_index = ll_get_mdt_idx_by_fid(sbi, &fid); - if (mdt_index < 0) { - rc = mdt_index; - goto out_tmp; - } - tmp->lum_objects[i].lum_mds = mdt_index; - tmp->lum_objects[i].lum_fid = fid; - tmp->lum_stripe_count++; - } - - if (copy_to_user(ulmv, tmp, lum_size)) { - rc = -EFAULT; - goto out_tmp; - } -out_tmp: - kfree(tmp); -finish_req: - ptlrpc_req_finished(request); - return rc; - } - - case LL_IOC_LOV_SWAP_LAYOUTS: - return -EPERM; - case IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void __user *)arg); - case LL_IOC_LOV_GETSTRIPE: - case LL_IOC_MDC_GETINFO: - case IOC_MDC_GETFILEINFO: - case IOC_MDC_GETFILESTRIPE: { - struct ptlrpc_request *request = NULL; - struct lov_user_md __user *lump; - struct lov_mds_md *lmm = NULL; - struct mdt_body *body; - char *filename = NULL; - int lmmsize; - - if (cmd == IOC_MDC_GETFILEINFO || - cmd == IOC_MDC_GETFILESTRIPE) { - filename = ll_getname((const char __user *)arg); - if (IS_ERR(filename)) - return PTR_ERR(filename); - - rc = ll_lov_getstripe_ea_info(inode, filename, &lmm, - &lmmsize, &request); - } else { - rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, - &request, 0); - } - - if (request) { - body = req_capsule_server_get(&request->rq_pill, - &RMF_MDT_BODY); - LASSERT(body); - } else { - goto out_req; - } - - if (rc < 0) { - if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO || - cmd == LL_IOC_MDC_GETINFO)) { - rc = 0; - goto skip_lmm; - } - - goto out_req; - } - - if (cmd == IOC_MDC_GETFILESTRIPE || - cmd == LL_IOC_LOV_GETSTRIPE) { - lump = (struct lov_user_md __user *)arg; - } else { - struct lov_user_mds_data __user *lmdp; - - lmdp = (struct lov_user_mds_data __user *)arg; - lump = &lmdp->lmd_lmm; - } - if (copy_to_user(lump, lmm, lmmsize)) { - if (copy_to_user(lump, lmm, sizeof(*lump))) { - rc = -EFAULT; - goto out_req; - } - rc = -EOVERFLOW; - } -skip_lmm: - if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) { - struct lov_user_mds_data __user *lmdp; - lstat_t st = { 0 }; - - st.st_dev = inode->i_sb->s_dev; - st.st_mode = body->mbo_mode; - st.st_nlink = body->mbo_nlink; - st.st_uid = body->mbo_uid; - st.st_gid = body->mbo_gid; - st.st_rdev = body->mbo_rdev; - st.st_size = body->mbo_size; - st.st_blksize = PAGE_SIZE; - st.st_blocks = body->mbo_blocks; - st.st_atime = body->mbo_atime; - st.st_mtime = body->mbo_mtime; - st.st_ctime = body->mbo_ctime; - st.st_ino = cl_fid_build_ino(&body->mbo_fid1, - sbi->ll_flags & - LL_SBI_32BIT_API); - - lmdp = (struct lov_user_mds_data __user *)arg; - if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) { - rc = -EFAULT; - goto out_req; - } - } - -out_req: - ptlrpc_req_finished(request); - if (filename) - ll_putname(filename); - return rc; - } - case OBD_IOC_QUOTACTL: { - struct if_quotactl *qctl; - - qctl = kzalloc(sizeof(*qctl), GFP_NOFS); - if (!qctl) - return -ENOMEM; - - if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) { - rc = -EFAULT; - goto out_quotactl; - } - - rc = quotactl_ioctl(sbi, qctl); - - if (rc == 0 && copy_to_user((void __user *)arg, qctl, - sizeof(*qctl))) - rc = -EFAULT; - -out_quotactl: - kfree(qctl); - return rc; - } - case OBD_IOC_GETDTNAME: - case OBD_IOC_GETMDNAME: - return ll_get_obd_name(inode, cmd, arg); - case LL_IOC_FLUSHCTX: - return ll_flush_ctx(inode); - case LL_IOC_GETOBDCOUNT: { - int count, vallen; - struct obd_export *exp; - - if (copy_from_user(&count, (int __user *)arg, sizeof(int))) - return -EFAULT; - - /* get ost count when count is zero, get mdt count otherwise */ - exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp; - vallen = sizeof(count); - rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT), - KEY_TGT_COUNT, &vallen, &count); - if (rc) { - CERROR("get target count failed: %d\n", rc); - return rc; - } - - if (copy_to_user((int __user *)arg, &count, sizeof(int))) - return -EFAULT; - - return 0; - } - case LL_IOC_PATH2FID: - if (copy_to_user((void __user *)arg, ll_inode2fid(inode), - sizeof(struct lu_fid))) - return -EFAULT; - return 0; - case LL_IOC_GET_CONNECT_FLAGS: { - return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, - (void __user *)arg); - } - case OBD_IOC_CHANGELOG_SEND: - case OBD_IOC_CHANGELOG_CLEAR: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, - sizeof(struct ioc_changelog)); - return rc; - case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void __user *)arg); - case LL_IOC_GETPARENT: - return ll_getparent(file, (void __user *)arg); - case LL_IOC_FID2MDTIDX: { - struct obd_export *exp = ll_i2mdexp(inode); - struct lu_fid fid; - __u32 index; - - if (copy_from_user(&fid, (const struct lu_fid __user *)arg, - sizeof(fid))) - return -EFAULT; - - /* Call mdc_iocontrol */ - rc = obd_iocontrol(LL_IOC_FID2MDTIDX, exp, sizeof(fid), &fid, - &index); - if (rc) - return rc; - - return index; - } - case LL_IOC_HSM_REQUEST: { - struct hsm_user_request *hur; - ssize_t totalsize; - - hur = memdup_user((void __user *)arg, sizeof(*hur)); - if (IS_ERR(hur)) - return PTR_ERR(hur); - - /* Compute the whole struct size */ - totalsize = hur_len(hur); - kfree(hur); - if (totalsize < 0) - return -E2BIG; - - /* Final size will be more than double totalsize */ - if (totalsize >= MDS_MAXREQSIZE / 3) - return -E2BIG; - - hur = kzalloc(totalsize, GFP_NOFS); - if (!hur) - return -ENOMEM; - - /* Copy the whole struct */ - if (copy_from_user(hur, (void __user *)arg, totalsize)) { - kvfree(hur); - return -EFAULT; - } - - if (hur->hur_request.hr_action == HUA_RELEASE) { - const struct lu_fid *fid; - struct inode *f; - int i; - - for (i = 0; i < hur->hur_request.hr_itemcount; i++) { - fid = &hur->hur_user_item[i].hui_fid; - f = search_inode_for_lustre(inode->i_sb, fid); - if (IS_ERR(f)) { - rc = PTR_ERR(f); - break; - } - - rc = ll_hsm_release(f); - iput(f); - if (rc != 0) - break; - } - } else { - rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize, - hur, NULL); - } - - kvfree(hur); - - return rc; - } - case LL_IOC_HSM_PROGRESS: { - struct hsm_progress_kernel hpk; - struct hsm_progress hp; - - if (copy_from_user(&hp, (void __user *)arg, sizeof(hp))) - return -EFAULT; - - hpk.hpk_fid = hp.hp_fid; - hpk.hpk_cookie = hp.hp_cookie; - hpk.hpk_extent = hp.hp_extent; - hpk.hpk_flags = hp.hp_flags; - hpk.hpk_errval = hp.hp_errval; - hpk.hpk_data_version = 0; - - /* File may not exist in Lustre; all progress - * reported to Lustre root - */ - rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk, - NULL); - return rc; - } - case LL_IOC_HSM_CT_START: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, - sizeof(struct lustre_kernelcomm)); - return rc; - - case LL_IOC_HSM_COPY_START: { - struct hsm_copy *copy; - int rc; - - copy = memdup_user((char __user *)arg, sizeof(*copy)); - if (IS_ERR(copy)) - return PTR_ERR(copy); - - rc = ll_ioc_copy_start(inode->i_sb, copy); - if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) - rc = -EFAULT; - - kfree(copy); - return rc; - } - case LL_IOC_HSM_COPY_END: { - struct hsm_copy *copy; - int rc; - - copy = memdup_user((char __user *)arg, sizeof(*copy)); - if (IS_ERR(copy)) - return PTR_ERR(copy); - - rc = ll_ioc_copy_end(inode->i_sb, copy); - if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) - rc = -EFAULT; - - kfree(copy); - return rc; - } - case LL_IOC_MIGRATE: { - char *buf = NULL; - const char *filename; - int namelen = 0; - int len; - int rc; - int mdtidx; - - rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); - if (rc < 0) - return rc; - - data = (struct obd_ioctl_data *)buf; - if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || - !data->ioc_inllen1 || !data->ioc_inllen2) { - rc = -EINVAL; - goto migrate_free; - } - - filename = data->ioc_inlbuf1; - namelen = data->ioc_inllen1; - if (namelen < 1 || namelen != strlen(filename) + 1) { - rc = -EINVAL; - goto migrate_free; - } - - if (data->ioc_inllen2 != sizeof(mdtidx)) { - rc = -EINVAL; - goto migrate_free; - } - mdtidx = *(int *)data->ioc_inlbuf2; - - rc = ll_migrate(inode, file, mdtidx, filename, namelen - 1); -migrate_free: - kvfree(buf); - - return rc; - } - - default: - return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, - (void __user *)arg); - } -} - -static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin) -{ - struct inode *inode = file->f_mapping->host; - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_sb_info *sbi = ll_i2sbi(inode); - int api32 = ll_need_32bit_api(sbi); - loff_t ret = -EINVAL; - - switch (origin) { - case SEEK_SET: - break; - case SEEK_CUR: - offset += file->f_pos; - break; - case SEEK_END: - if (offset > 0) - goto out; - if (api32) - offset += LL_DIR_END_OFF_32BIT; - else - offset += LL_DIR_END_OFF; - break; - default: - goto out; - } - - if (offset >= 0 && - ((api32 && offset <= LL_DIR_END_OFF_32BIT) || - (!api32 && offset <= LL_DIR_END_OFF))) { - if (offset != file->f_pos) { - if ((api32 && offset == LL_DIR_END_OFF_32BIT) || - (!api32 && offset == LL_DIR_END_OFF)) - fd->lfd_pos = MDS_DIR_END_OFF; - else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH) - fd->lfd_pos = offset << 32; - else - fd->lfd_pos = offset; - file->f_pos = offset; - } - ret = offset; - } - goto out; - -out: - return ret; -} - -static int ll_dir_open(struct inode *inode, struct file *file) -{ - return ll_file_open(inode, file); -} - -static int ll_dir_release(struct inode *inode, struct file *file) -{ - return ll_file_release(inode, file); -} - -const struct file_operations ll_dir_operations = { - .llseek = ll_dir_seek, - .open = ll_dir_open, - .release = ll_dir_release, - .read = generic_read_dir, - .iterate_shared = ll_readdir, - .unlocked_ioctl = ll_dir_ioctl, - .fsync = ll_fsync, -}; diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c deleted file mode 100644 index 02295931883b..000000000000 --- a/drivers/staging/lustre/lustre/llite/file.c +++ /dev/null @@ -1,3580 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/llite/file.c - * - * Author: Peter Braam - * Author: Phil Schwan - * Author: Andreas Dilger - */ - -#define DEBUG_SUBSYSTEM S_LLITE -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "llite_internal.h" - -static int -ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg); - -static int ll_lease_close(struct obd_client_handle *och, struct inode *inode, - bool *lease_broken); - -static enum llioc_iter -ll_iocontrol_call(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg, int *rcp); - -static struct ll_file_data *ll_file_data_get(void) -{ - struct ll_file_data *fd; - - fd = kmem_cache_zalloc(ll_file_data_slab, GFP_NOFS); - if (!fd) - return NULL; - fd->fd_write_failed = false; - return fd; -} - -static void ll_file_data_put(struct ll_file_data *fd) -{ - if (fd) - kmem_cache_free(ll_file_data_slab, fd); -} - -/** - * Packs all the attributes into @op_data for the CLOSE rpc. - */ -static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data, - struct obd_client_handle *och) -{ - struct ll_inode_info *lli = ll_i2info(inode); - - ll_prep_md_op_data(op_data, inode, NULL, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); - - op_data->op_attr.ia_mode = inode->i_mode; - op_data->op_attr.ia_atime = inode->i_atime; - op_data->op_attr.ia_mtime = inode->i_mtime; - op_data->op_attr.ia_ctime = inode->i_ctime; - op_data->op_attr.ia_size = i_size_read(inode); - op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET | - ATTR_MTIME | ATTR_MTIME_SET | - ATTR_CTIME | ATTR_CTIME_SET; - op_data->op_attr_blocks = inode->i_blocks; - op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags); - op_data->op_handle = och->och_fh; - - /* - * For HSM: if inode data has been modified, pack it so that - * MDT can set data dirty flag in the archive. - */ - if (och->och_flags & FMODE_WRITE && - test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) - op_data->op_bias |= MDS_DATA_MODIFIED; -} - -/** - * Perform a close, possibly with a bias. - * The meaning of "data" depends on the value of "bias". - * - * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version. - * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to - * swap layouts with. - */ -static int ll_close_inode_openhandle(struct inode *inode, - struct obd_client_handle *och, - enum mds_op_bias bias, - void *data) -{ - const struct ll_inode_info *lli = ll_i2info(inode); - struct obd_export *md_exp = ll_i2mdexp(inode); - struct md_op_data *op_data; - struct ptlrpc_request *req = NULL; - int rc; - - if (!class_exp2obd(md_exp)) { - CERROR("%s: invalid MDC connection handle closing " DFID "\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid)); - rc = 0; - goto out; - } - - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - /* - * We leak openhandle and request here on error, but not much to be - * done in OOM case since app won't retry close on error either. - */ - if (!op_data) { - rc = -ENOMEM; - goto out; - } - - ll_prepare_close(inode, op_data, och); - switch (bias) { - case MDS_CLOSE_LAYOUT_SWAP: - LASSERT(data); - op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP; - op_data->op_data_version = 0; - op_data->op_lease_handle = och->och_lease_handle; - op_data->op_fid2 = *ll_inode2fid(data); - break; - - case MDS_HSM_RELEASE: - LASSERT(data); - op_data->op_bias |= MDS_HSM_RELEASE; - op_data->op_data_version = *(__u64 *)data; - op_data->op_lease_handle = och->och_lease_handle; - op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS; - break; - - default: - LASSERT(!data); - break; - } - - rc = md_close(md_exp, op_data, och->och_mod, &req); - if (rc && rc != -EINTR) { - CERROR("%s: inode " DFID " mdc close failed: rc = %d\n", - md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc); - } - - if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) && - !rc) { - struct mdt_body *body; - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED)) - rc = -EBUSY; - } - - ll_finish_md_op_data(op_data); - -out: - md_clear_open_replay_data(md_exp, och); - och->och_fh.cookie = DEAD_HANDLE_MAGIC; - kfree(och); - - ptlrpc_req_finished(req); - return rc; -} - -int ll_md_real_close(struct inode *inode, fmode_t fmode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct obd_client_handle **och_p; - struct obd_client_handle *och; - __u64 *och_usecount; - int rc = 0; - - if (fmode & FMODE_WRITE) { - och_p = &lli->lli_mds_write_och; - och_usecount = &lli->lli_open_fd_write_count; - } else if (fmode & FMODE_EXEC) { - och_p = &lli->lli_mds_exec_och; - och_usecount = &lli->lli_open_fd_exec_count; - } else { - LASSERT(fmode & FMODE_READ); - och_p = &lli->lli_mds_read_och; - och_usecount = &lli->lli_open_fd_read_count; - } - - mutex_lock(&lli->lli_och_mutex); - if (*och_usecount > 0) { - /* There are still users of this handle, so skip - * freeing it. - */ - mutex_unlock(&lli->lli_och_mutex); - return 0; - } - - och = *och_p; - *och_p = NULL; - mutex_unlock(&lli->lli_och_mutex); - - if (och) { - /* There might be a race and this handle may already - * be closed. - */ - rc = ll_close_inode_openhandle(inode, och, 0, NULL); - } - - return rc; -} - -static int ll_md_close(struct inode *inode, struct file *file) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_inode_info *lli = ll_i2info(inode); - int lockmode; - __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK; - struct lustre_handle lockh; - union ldlm_policy_data policy = { - .l_inodebits = { MDS_INODELOCK_OPEN } - }; - int rc = 0; - - /* clear group lock, if present */ - if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) - ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid); - - if (fd->fd_lease_och) { - bool lease_broken; - - /* Usually the lease is not released when the - * application crashed, we need to release here. - */ - rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken); - CDEBUG(rc ? D_ERROR : D_INODE, - "Clean up lease " DFID " %d/%d\n", - PFID(&lli->lli_fid), rc, lease_broken); - - fd->fd_lease_och = NULL; - } - - if (fd->fd_och) { - rc = ll_close_inode_openhandle(inode, fd->fd_och, 0, NULL); - fd->fd_och = NULL; - goto out; - } - - /* Let's see if we have good enough OPEN lock on the file and if - * we can skip talking to MDS - */ - - mutex_lock(&lli->lli_och_mutex); - if (fd->fd_omode & FMODE_WRITE) { - lockmode = LCK_CW; - LASSERT(lli->lli_open_fd_write_count); - lli->lli_open_fd_write_count--; - } else if (fd->fd_omode & FMODE_EXEC) { - lockmode = LCK_PR; - LASSERT(lli->lli_open_fd_exec_count); - lli->lli_open_fd_exec_count--; - } else { - lockmode = LCK_CR; - LASSERT(lli->lli_open_fd_read_count); - lli->lli_open_fd_read_count--; - } - mutex_unlock(&lli->lli_och_mutex); - - if (!md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode), - LDLM_IBITS, &policy, lockmode, &lockh)) - rc = ll_md_real_close(inode, fd->fd_omode); - -out: - LUSTRE_FPRIVATE(file) = NULL; - ll_file_data_put(fd); - - return rc; -} - -/* While this returns an error code, fput() the caller does not, so we need - * to make every effort to clean up all of our state here. Also, applications - * rarely check close errors and even if an error is returned they will not - * re-try the close call. - */ -int ll_file_release(struct inode *inode, struct file *file) -{ - struct ll_file_data *fd; - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ll_inode_info *lli = ll_i2info(inode); - int rc; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n", - PFID(ll_inode2fid(inode)), inode); - - if (!is_root_inode(inode)) - ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); - fd = LUSTRE_FPRIVATE(file); - LASSERT(fd); - - /* The last ref on @file, maybe not be the owner pid of statahead, - * because parent and child process can share the same file handle. - */ - if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd) - ll_deauthorize_statahead(inode, fd); - - if (is_root_inode(inode)) { - LUSTRE_FPRIVATE(file) = NULL; - ll_file_data_put(fd); - return 0; - } - - if (!S_ISDIR(inode->i_mode)) { - if (lli->lli_clob) - lov_read_and_clear_async_rc(lli->lli_clob); - lli->lli_async_rc = 0; - } - - rc = ll_md_close(inode, file); - - if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val)) - libcfs_debug_dumplog(); - - return rc; -} - -static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize, - struct lookup_intent *itp) -{ - struct inode *inode = d_inode(de); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct dentry *parent = de->d_parent; - const char *name = NULL; - struct md_op_data *op_data; - struct ptlrpc_request *req = NULL; - int len = 0, rc; - - LASSERT(parent); - LASSERT(itp->it_flags & MDS_OPEN_BY_FID); - - /* - * if server supports open-by-fid, or file name is invalid, don't pack - * name in open request - */ - if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID) && - lu_name_is_valid_2(de->d_name.name, de->d_name.len)) { - name = de->d_name.name; - len = de->d_name.len; - } - - op_data = ll_prep_md_op_data(NULL, d_inode(parent), inode, name, len, - O_RDWR, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - op_data->op_data = lmm; - op_data->op_data_size = lmmsize; - - rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req, - &ll_md_blocking_ast, 0); - ll_finish_md_op_data(op_data); - if (rc == -ESTALE) { - /* reason for keep own exit path - don`t flood log - * with messages with -ESTALE errors. - */ - if (!it_disposition(itp, DISP_OPEN_OPEN) || - it_open_error(DISP_OPEN_OPEN, itp)) - goto out; - ll_release_openhandle(inode, itp); - goto out; - } - - if (it_disposition(itp, DISP_LOOKUP_NEG)) { - rc = -ENOENT; - goto out; - } - - if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) { - rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp); - CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc); - goto out; - } - - rc = ll_prep_inode(&inode, req, NULL, itp); - if (!rc && itp->it_lock_mode) - ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL); - -out: - ptlrpc_req_finished(req); - ll_intent_drop_lock(itp); - - /* - * We did open by fid, but by the time we got to the server, - * the object disappeared. If this is a create, we cannot really - * tell the userspace that the file it was trying to create - * does not exist. Instead let's return -ESTALE, and the VFS will - * retry the create with LOOKUP_REVAL that we are going to catch - * in ll_revalidate_dentry() and use lookup then. - */ - if (rc == -ENOENT && itp->it_op & IT_CREAT) - rc = -ESTALE; - - return rc; -} - -static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it, - struct obd_client_handle *och) -{ - struct mdt_body *body; - - body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY); - och->och_fh = body->mbo_handle; - och->och_fid = body->mbo_fid1; - och->och_lease_handle.cookie = it->it_lock_handle; - och->och_magic = OBD_CLIENT_HANDLE_MAGIC; - och->och_flags = it->it_flags; - - return md_set_open_replay_data(md_exp, och, it); -} - -static int ll_local_open(struct file *file, struct lookup_intent *it, - struct ll_file_data *fd, struct obd_client_handle *och) -{ - struct inode *inode = file_inode(file); - - LASSERT(!LUSTRE_FPRIVATE(file)); - - LASSERT(fd); - - if (och) { - int rc; - - rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); - if (rc != 0) - return rc; - } - - LUSTRE_FPRIVATE(file) = fd; - ll_readahead_init(inode, &fd->fd_ras); - fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); - - /* ll_cl_context initialize */ - rwlock_init(&fd->fd_lock); - INIT_LIST_HEAD(&fd->fd_lccs); - - return 0; -} - -/* Open a file, and (for the very first open) create objects on the OSTs at - * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object - * creation or open until ll_lov_setstripe() ioctl is called. - * - * If we already have the stripe MD locally then we don't request it in - * md_open(), by passing a lmm_size = 0. - * - * It is up to the application to ensure no other processes open this file - * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be - * used. We might be able to avoid races of that sort by getting lli_open_sem - * before returning in the O_LOV_DELAY_CREATE case and dropping it here - * or in ll_file_release(), but I'm not sure that is desirable/necessary. - */ -int ll_file_open(struct inode *inode, struct file *file) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct lookup_intent *it, oit = { .it_op = IT_OPEN, - .it_flags = file->f_flags }; - struct obd_client_handle **och_p = NULL; - __u64 *och_usecount = NULL; - struct ll_file_data *fd; - int rc = 0; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), flags %o\n", - PFID(ll_inode2fid(inode)), inode, file->f_flags); - - it = file->private_data; /* XXX: compat macro */ - file->private_data = NULL; /* prevent ll_local_open assertion */ - - fd = ll_file_data_get(); - if (!fd) { - rc = -ENOMEM; - goto out_openerr; - } - - fd->fd_file = file; - if (S_ISDIR(inode->i_mode)) - ll_authorize_statahead(inode, fd); - - if (is_root_inode(inode)) { - LUSTRE_FPRIVATE(file) = fd; - return 0; - } - - if (!it || !it->it_disposition) { - /* Convert f_flags into access mode. We cannot use file->f_mode, - * because everything but O_ACCMODE mask was stripped from - * there - */ - if ((oit.it_flags + 1) & O_ACCMODE) - oit.it_flags++; - if (file->f_flags & O_TRUNC) - oit.it_flags |= FMODE_WRITE; - - /* kernel only call f_op->open in dentry_open. filp_open calls - * dentry_open after call to open_namei that checks permissions. - * Only nfsd_open call dentry_open directly without checking - * permissions and because of that this code below is safe. - */ - if (oit.it_flags & (FMODE_WRITE | FMODE_READ)) - oit.it_flags |= MDS_OPEN_OWNEROVERRIDE; - - /* We do not want O_EXCL here, presumably we opened the file - * already? XXX - NFS implications? - */ - oit.it_flags &= ~O_EXCL; - - /* bug20584, if "it_flags" contains O_CREAT, the file will be - * created if necessary, then "IT_CREAT" should be set to keep - * consistent with it - */ - if (oit.it_flags & O_CREAT) - oit.it_op |= IT_CREAT; - - it = &oit; - } - -restart: - /* Let's see if we have file open on MDS already. */ - if (it->it_flags & FMODE_WRITE) { - och_p = &lli->lli_mds_write_och; - och_usecount = &lli->lli_open_fd_write_count; - } else if (it->it_flags & FMODE_EXEC) { - och_p = &lli->lli_mds_exec_och; - och_usecount = &lli->lli_open_fd_exec_count; - } else { - och_p = &lli->lli_mds_read_och; - och_usecount = &lli->lli_open_fd_read_count; - } - - mutex_lock(&lli->lli_och_mutex); - if (*och_p) { /* Open handle is present */ - if (it_disposition(it, DISP_OPEN_OPEN)) { - /* Well, there's extra open request that we do not need, - * let's close it somehow. This will decref request. - */ - rc = it_open_error(DISP_OPEN_OPEN, it); - if (rc) { - mutex_unlock(&lli->lli_och_mutex); - goto out_openerr; - } - - ll_release_openhandle(inode, it); - } - (*och_usecount)++; - - rc = ll_local_open(file, it, fd, NULL); - if (rc) { - (*och_usecount)--; - mutex_unlock(&lli->lli_och_mutex); - goto out_openerr; - } - } else { - LASSERT(*och_usecount == 0); - if (!it->it_disposition) { - /* We cannot just request lock handle now, new ELC code - * means that one of other OPEN locks for this file - * could be cancelled, and since blocking ast handler - * would attempt to grab och_mutex as well, that would - * result in a deadlock - */ - mutex_unlock(&lli->lli_och_mutex); - /* - * Normally called under two situations: - * 1. NFS export. - * 2. revalidate with IT_OPEN (revalidate doesn't - * execute this intent any more). - * - * Always fetch MDS_OPEN_LOCK if this is not setstripe. - * - * Always specify MDS_OPEN_BY_FID because we don't want - * to get file with different fid. - */ - it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID; - rc = ll_intent_file_open(file->f_path.dentry, - NULL, 0, it); - if (rc) - goto out_openerr; - - goto restart; - } - *och_p = kzalloc(sizeof(struct obd_client_handle), GFP_NOFS); - if (!*och_p) { - rc = -ENOMEM; - goto out_och_free; - } - - (*och_usecount)++; - - /* md_intent_lock() didn't get a request ref if there was an - * open error, so don't do cleanup on the request here - * (bug 3430) - */ - /* XXX (green): Should not we bail out on any error here, not - * just open error? - */ - rc = it_open_error(DISP_OPEN_OPEN, it); - if (rc) - goto out_och_free; - - LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF), - "inode %p: disposition %x, status %d\n", inode, - it_disposition(it, ~0), it->it_status); - - rc = ll_local_open(file, it, fd, *och_p); - if (rc) - goto out_och_free; - } - mutex_unlock(&lli->lli_och_mutex); - fd = NULL; - - /* Must do this outside lli_och_mutex lock to prevent deadlock where - * different kind of OPEN lock for this same inode gets cancelled - * by ldlm_cancel_lru - */ - if (!S_ISREG(inode->i_mode)) - goto out_och_free; - - cl_lov_delay_create_clear(&file->f_flags); - goto out_och_free; - -out_och_free: - if (rc) { - if (och_p && *och_p) { - kfree(*och_p); - *och_p = NULL; - (*och_usecount)--; - } - mutex_unlock(&lli->lli_och_mutex); - -out_openerr: - if (lli->lli_opendir_key == fd) - ll_deauthorize_statahead(inode, fd); - if (fd) - ll_file_data_put(fd); - } else { - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1); - } - - if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) { - ptlrpc_req_finished(it->it_request); - it_clear_disposition(it, DISP_ENQ_OPEN_REF); - } - - return rc; -} - -static int ll_md_blocking_lease_ast(struct ldlm_lock *lock, - struct ldlm_lock_desc *desc, - void *data, int flag) -{ - int rc; - struct lustre_handle lockh; - - switch (flag) { - case LDLM_CB_BLOCKING: - ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); - if (rc < 0) { - CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc); - return rc; - } - break; - case LDLM_CB_CANCELING: - /* do nothing */ - break; - } - return 0; -} - -/** - * Acquire a lease and open the file. - */ -static struct obd_client_handle * -ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, - __u64 open_flags) -{ - struct lookup_intent it = { .it_op = IT_OPEN }; - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct md_op_data *op_data; - struct ptlrpc_request *req = NULL; - struct lustre_handle old_handle = { 0 }; - struct obd_client_handle *och = NULL; - int rc; - int rc2; - - if (fmode != FMODE_WRITE && fmode != FMODE_READ) - return ERR_PTR(-EINVAL); - - if (file) { - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct obd_client_handle **och_p; - __u64 *och_usecount; - - if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC)) - return ERR_PTR(-EPERM); - - /* Get the openhandle of the file */ - rc = -EBUSY; - mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och) { - mutex_unlock(&lli->lli_och_mutex); - return ERR_PTR(rc); - } - - if (!fd->fd_och) { - if (file->f_mode & FMODE_WRITE) { - LASSERT(lli->lli_mds_write_och); - och_p = &lli->lli_mds_write_och; - och_usecount = &lli->lli_open_fd_write_count; - } else { - LASSERT(lli->lli_mds_read_och); - och_p = &lli->lli_mds_read_och; - och_usecount = &lli->lli_open_fd_read_count; - } - if (*och_usecount == 1) { - fd->fd_och = *och_p; - *och_p = NULL; - *och_usecount = 0; - rc = 0; - } - } - mutex_unlock(&lli->lli_och_mutex); - if (rc < 0) /* more than 1 opener */ - return ERR_PTR(rc); - - LASSERT(fd->fd_och); - old_handle = fd->fd_och->och_fh; - } - - och = kzalloc(sizeof(*och), GFP_NOFS); - if (!och) - return ERR_PTR(-ENOMEM); - - op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) { - rc = PTR_ERR(op_data); - goto out; - } - - /* To tell the MDT this openhandle is from the same owner */ - op_data->op_handle = old_handle; - - it.it_flags = fmode | open_flags; - it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE; - rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req, - &ll_md_blocking_lease_ast, - /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise - * it can be cancelled which may mislead applications that the lease is - * broken; - * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal - * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast - * doesn't deal with openhandle, so normal openhandle will be leaked. - */ - LDLM_FL_NO_LRU | LDLM_FL_EXCL); - ll_finish_md_op_data(op_data); - ptlrpc_req_finished(req); - if (rc < 0) - goto out_release_it; - - if (it_disposition(&it, DISP_LOOKUP_NEG)) { - rc = -ENOENT; - goto out_release_it; - } - - rc = it_open_error(DISP_OPEN_OPEN, &it); - if (rc) - goto out_release_it; - - LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF)); - ll_och_fill(sbi->ll_md_exp, &it, och); - - if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */ { - rc = -EOPNOTSUPP; - goto out_close; - } - - /* already get lease, handle lease lock */ - ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL); - if (it.it_lock_mode == 0 || - it.it_lock_bits != MDS_INODELOCK_OPEN) { - /* open lock must return for lease */ - CERROR(DFID "lease granted but no open lock, %d/%llu.\n", - PFID(ll_inode2fid(inode)), it.it_lock_mode, - it.it_lock_bits); - rc = -EPROTO; - goto out_close; - } - - ll_intent_release(&it); - return och; - -out_close: - /* Cancel open lock */ - if (it.it_lock_mode != 0) { - ldlm_lock_decref_and_cancel(&och->och_lease_handle, - it.it_lock_mode); - it.it_lock_mode = 0; - och->och_lease_handle.cookie = 0ULL; - } - rc2 = ll_close_inode_openhandle(inode, och, 0, NULL); - if (rc2 < 0) - CERROR("%s: error closing file " DFID ": %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&ll_i2info(inode)->lli_fid), rc2); - och = NULL; /* och has been freed in ll_close_inode_openhandle() */ -out_release_it: - ll_intent_release(&it); -out: - kfree(och); - return ERR_PTR(rc); -} - -/** - * Check whether a layout swap can be done between two inodes. - * - * \param[in] inode1 First inode to check - * \param[in] inode2 Second inode to check - * - * \retval 0 on success, layout swap can be performed between both inodes - * \retval negative error code if requirements are not met - */ -static int ll_check_swap_layouts_validity(struct inode *inode1, - struct inode *inode2) -{ - if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode)) - return -EINVAL; - - if (inode_permission(inode1, MAY_WRITE) || - inode_permission(inode2, MAY_WRITE)) - return -EPERM; - - if (inode1->i_sb != inode2->i_sb) - return -EXDEV; - - return 0; -} - -static int ll_swap_layouts_close(struct obd_client_handle *och, - struct inode *inode, struct inode *inode2) -{ - const struct lu_fid *fid1 = ll_inode2fid(inode); - const struct lu_fid *fid2; - int rc; - - CDEBUG(D_INODE, "%s: biased close of file " DFID "\n", - ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1)); - - rc = ll_check_swap_layouts_validity(inode, inode2); - if (rc < 0) - goto out_free_och; - - /* We now know that inode2 is a lustre inode */ - fid2 = ll_inode2fid(inode2); - - rc = lu_fid_cmp(fid1, fid2); - if (!rc) { - rc = -EINVAL; - goto out_free_och; - } - - /* - * Close the file and swap layouts between inode & inode2. - * NB: lease lock handle is released in mdc_close_layout_swap_pack() - * because we still need it to pack l_remote_handle to MDT. - */ - rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP, - inode2); - - och = NULL; /* freed in ll_close_inode_openhandle() */ - -out_free_och: - kfree(och); - return rc; -} - -/** - * Release lease and close the file. - * It will check if the lease has ever broken. - */ -static int ll_lease_close(struct obd_client_handle *och, struct inode *inode, - bool *lease_broken) -{ - struct ldlm_lock *lock; - bool cancelled = true; - - lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock) { - lock_res_and_lock(lock); - cancelled = ldlm_is_cancel(lock); - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - } - - CDEBUG(D_INODE, "lease for " DFID " broken? %d\n", - PFID(&ll_i2info(inode)->lli_fid), cancelled); - - if (!cancelled) - ldlm_cli_cancel(&och->och_lease_handle, 0); - if (lease_broken) - *lease_broken = cancelled; - - return ll_close_inode_openhandle(inode, och, 0, NULL); -} - -int ll_merge_attr(const struct lu_env *env, struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct cl_object *obj = lli->lli_clob; - struct cl_attr *attr = vvp_env_thread_attr(env); - s64 atime; - s64 mtime; - s64 ctime; - int rc = 0; - - ll_inode_size_lock(inode); - - /* merge timestamps the most recently obtained from mds with - * timestamps obtained from osts - */ - LTIME_S(inode->i_atime) = lli->lli_atime; - LTIME_S(inode->i_mtime) = lli->lli_mtime; - LTIME_S(inode->i_ctime) = lli->lli_ctime; - - mtime = LTIME_S(inode->i_mtime); - atime = LTIME_S(inode->i_atime); - ctime = LTIME_S(inode->i_ctime); - - cl_object_attr_lock(obj); - rc = cl_object_attr_get(env, obj, attr); - cl_object_attr_unlock(obj); - - if (rc != 0) - goto out_size_unlock; - - if (atime < attr->cat_atime) - atime = attr->cat_atime; - - if (ctime < attr->cat_ctime) - ctime = attr->cat_ctime; - - if (mtime < attr->cat_mtime) - mtime = attr->cat_mtime; - - CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n", - PFID(&lli->lli_fid), attr->cat_size); - - i_size_write(inode, attr->cat_size); - - inode->i_blocks = attr->cat_blocks; - - LTIME_S(inode->i_mtime) = mtime; - LTIME_S(inode->i_atime) = atime; - LTIME_S(inode->i_ctime) = ctime; - -out_size_unlock: - ll_inode_size_unlock(inode); - - return rc; -} - -static bool file_is_noatime(const struct file *file) -{ - const struct vfsmount *mnt = file->f_path.mnt; - const struct inode *inode = file_inode(file); - - /* Adapted from file_accessed() and touch_atime().*/ - if (file->f_flags & O_NOATIME) - return true; - - if (inode->i_flags & S_NOATIME) - return true; - - if (IS_NOATIME(inode)) - return true; - - if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY)) - return true; - - if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) - return true; - - if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) - return true; - - return false; -} - -static void ll_io_init(struct cl_io *io, const struct file *file, int write) -{ - struct inode *inode = file_inode(file); - - io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK; - if (write) { - io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND); - io->u.ci_wr.wr_sync = file->f_flags & O_SYNC || - file->f_flags & O_DIRECT || - IS_SYNC(inode); - } - io->ci_obj = ll_i2info(inode)->lli_clob; - io->ci_lockreq = CILR_MAYBE; - if (ll_file_nolock(file)) { - io->ci_lockreq = CILR_NEVER; - io->ci_no_srvlock = 1; - } else if (file->f_flags & O_APPEND) { - io->ci_lockreq = CILR_MANDATORY; - } - - io->ci_noatime = file_is_noatime(file); -} - -static ssize_t -ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, - struct file *file, enum cl_io_type iot, - loff_t *ppos, size_t count) -{ - struct ll_inode_info *lli = ll_i2info(file_inode(file)); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct vvp_io *vio = vvp_env_io(env); - struct range_lock range; - struct cl_io *io; - ssize_t result = 0; - int rc = 0; - - CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n", - file, iot, *ppos, count); - -restart: - io = vvp_env_thread_io(env); - ll_io_init(io, file, iot == CIT_WRITE); - - if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) { - struct vvp_io *vio = vvp_env_io(env); - bool range_locked = false; - - if (file->f_flags & O_APPEND) - range_lock_init(&range, 0, LUSTRE_EOF); - else - range_lock_init(&range, *ppos, *ppos + count - 1); - - vio->vui_fd = LUSTRE_FPRIVATE(file); - vio->vui_iter = args->u.normal.via_iter; - vio->vui_iocb = args->u.normal.via_iocb; - /* - * Direct IO reads must also take range lock, - * or multiple reads will try to work on the same pages - * See LU-6227 for details. - */ - if (((iot == CIT_WRITE) || - (iot == CIT_READ && (file->f_flags & O_DIRECT))) && - !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n", - range.rl_node.in_extent.start, - range.rl_node.in_extent.end); - rc = range_lock(&lli->lli_write_tree, &range); - if (rc < 0) - goto out; - - range_locked = true; - } - ll_cl_add(file, env, io); - rc = cl_io_loop(env, io); - ll_cl_remove(file, env); - if (range_locked) { - CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n", - range.rl_node.in_extent.start, - range.rl_node.in_extent.end); - range_unlock(&lli->lli_write_tree, &range); - } - } else { - /* cl_io_rw_init() handled IO */ - rc = io->ci_result; - } - - if (io->ci_nob > 0) { - result = io->ci_nob; - count -= io->ci_nob; - *ppos = io->u.ci_wr.wr.crw_pos; - - /* prepare IO restart */ - if (count > 0) - args->u.normal.via_iter = vio->vui_iter; - } -out: - cl_io_fini(env, io); - - if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) { - CDEBUG(D_VFSTRACE, - "%s: restart %s from %lld, count:%zu, result: %zd\n", - file_dentry(file)->d_name.name, - iot == CIT_READ ? "read" : "write", - *ppos, count, result); - goto restart; - } - - if (iot == CIT_READ) { - if (result >= 0) - ll_stats_ops_tally(ll_i2sbi(file_inode(file)), - LPROC_LL_READ_BYTES, result); - } else if (iot == CIT_WRITE) { - if (result >= 0) { - ll_stats_ops_tally(ll_i2sbi(file_inode(file)), - LPROC_LL_WRITE_BYTES, result); - fd->fd_write_failed = false; - } else if (!result && !rc) { - rc = io->ci_result; - if (rc < 0) - fd->fd_write_failed = true; - else - fd->fd_write_failed = false; - } else if (rc != -ERESTARTSYS) { - fd->fd_write_failed = true; - } - } - CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result); - - return result > 0 ? result : rc; -} - -static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) -{ - struct lu_env *env; - struct vvp_io_args *args; - ssize_t result; - u16 refcheck; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - args = ll_env_args(env); - args->u.normal.via_iter = to; - args->u.normal.via_iocb = iocb; - - result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ, - &iocb->ki_pos, iov_iter_count(to)); - cl_env_put(env, &refcheck); - return result; -} - -/* - * Write to a file (through the page cache). - */ -static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) -{ - struct lu_env *env; - struct vvp_io_args *args; - ssize_t result; - u16 refcheck; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - args = ll_env_args(env); - args->u.normal.via_iter = from; - args->u.normal.via_iocb = iocb; - - result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE, - &iocb->ki_pos, iov_iter_count(from)); - cl_env_put(env, &refcheck); - return result; -} - -int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, - __u64 flags, struct lov_user_md *lum, - int lum_size) -{ - struct lookup_intent oit = { - .it_op = IT_OPEN, - .it_flags = flags | MDS_OPEN_BY_FID, - }; - int rc = 0; - - ll_inode_size_lock(inode); - rc = ll_intent_file_open(dentry, lum, lum_size, &oit); - if (rc < 0) - goto out_unlock; - - ll_release_openhandle(inode, &oit); - -out_unlock: - ll_inode_size_unlock(inode); - ll_intent_release(&oit); - return rc; -} - -int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, - struct lov_mds_md **lmmp, int *lmm_size, - struct ptlrpc_request **request) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct mdt_body *body; - struct lov_mds_md *lmm = NULL; - struct ptlrpc_request *req = NULL; - struct md_op_data *op_data; - int rc, lmmsize; - - rc = ll_get_default_mdsize(sbi, &lmmsize); - if (rc) - return rc; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, - strlen(filename), lmmsize, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA; - rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc < 0) { - CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n", - filename, rc); - goto out; - } - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - - lmmsize = body->mbo_eadatasize; - - if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) || - lmmsize == 0) { - rc = -ENODATA; - goto out; - } - - lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - - if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) && - (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) { - rc = -EPROTO; - goto out; - } - - /* - * This is coming from the MDS, so is probably in - * little endian. We convert it to host endian before - * passing it to userspace. - */ - if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) { - int stripe_count; - - stripe_count = le16_to_cpu(lmm->lmm_stripe_count); - if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED) - stripe_count = 0; - - /* if function called for directory - we should - * avoid swab not existent lsm objects - */ - if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) { - lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm); - if (S_ISREG(body->mbo_mode)) - lustre_swab_lov_user_md_objects( - ((struct lov_user_md_v1 *)lmm)->lmm_objects, - stripe_count); - } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) { - lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm); - if (S_ISREG(body->mbo_mode)) - lustre_swab_lov_user_md_objects( - ((struct lov_user_md_v3 *)lmm)->lmm_objects, - stripe_count); - } - } - -out: - *lmmp = lmm; - *lmm_size = lmmsize; - *request = req; - return rc; -} - -static int ll_lov_setea(struct inode *inode, struct file *file, - unsigned long arg) -{ - __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE; - struct lov_user_md *lump; - int lum_size = sizeof(struct lov_user_md) + - sizeof(struct lov_user_ost_data); - int rc; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - lump = kzalloc(lum_size, GFP_NOFS); - if (!lump) - return -ENOMEM; - - if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) { - kvfree(lump); - return -EFAULT; - } - - rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump, - lum_size); - cl_lov_delay_create_clear(&file->f_flags); - - kvfree(lump); - return rc; -} - -static int ll_file_getstripe(struct inode *inode, - struct lov_user_md __user *lum) -{ - struct lu_env *env; - u16 refcheck; - int rc; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum); - cl_env_put(env, &refcheck); - return rc; -} - -static int ll_lov_setstripe(struct inode *inode, struct file *file, - unsigned long arg) -{ - struct lov_user_md __user *lum = (struct lov_user_md __user *)arg; - struct lov_user_md *klum; - int lum_size, rc; - __u64 flags = FMODE_WRITE; - - rc = ll_copy_user_md(lum, &klum); - if (rc < 0) - return rc; - - lum_size = rc; - rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, klum, - lum_size); - cl_lov_delay_create_clear(&file->f_flags); - if (rc == 0) { - __u32 gen; - - put_user(0, &lum->lmm_stripe_count); - - ll_layout_refresh(inode, &gen); - rc = ll_file_getstripe(inode, (struct lov_user_md __user *)arg); - } - - kfree(klum); - return rc; -} - -static int -ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_grouplock grouplock; - int rc; - - if (arg == 0) { - CWARN("group id for group lock must not be 0\n"); - return -EINVAL; - } - - if (ll_file_nolock(file)) - return -EOPNOTSUPP; - - spin_lock(&lli->lli_lock); - if (fd->fd_flags & LL_FILE_GROUP_LOCKED) { - CWARN("group lock already existed with gid %lu\n", - fd->fd_grouplock.lg_gid); - spin_unlock(&lli->lli_lock); - return -EINVAL; - } - LASSERT(!fd->fd_grouplock.lg_lock); - spin_unlock(&lli->lli_lock); - - rc = cl_get_grouplock(ll_i2info(inode)->lli_clob, - arg, (file->f_flags & O_NONBLOCK), &grouplock); - if (rc) - return rc; - - spin_lock(&lli->lli_lock); - if (fd->fd_flags & LL_FILE_GROUP_LOCKED) { - spin_unlock(&lli->lli_lock); - CERROR("another thread just won the race\n"); - cl_put_grouplock(&grouplock); - return -EINVAL; - } - - fd->fd_flags |= LL_FILE_GROUP_LOCKED; - fd->fd_grouplock = grouplock; - spin_unlock(&lli->lli_lock); - - CDEBUG(D_INFO, "group lock %lu obtained\n", arg); - return 0; -} - -static int ll_put_grouplock(struct inode *inode, struct file *file, - unsigned long arg) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_grouplock grouplock; - - spin_lock(&lli->lli_lock); - if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - spin_unlock(&lli->lli_lock); - CWARN("no group lock held\n"); - return -EINVAL; - } - LASSERT(fd->fd_grouplock.lg_lock); - - if (fd->fd_grouplock.lg_gid != arg) { - CWARN("group lock %lu doesn't match current id %lu\n", - arg, fd->fd_grouplock.lg_gid); - spin_unlock(&lli->lli_lock); - return -EINVAL; - } - - grouplock = fd->fd_grouplock; - memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock)); - fd->fd_flags &= ~LL_FILE_GROUP_LOCKED; - spin_unlock(&lli->lli_lock); - - cl_put_grouplock(&grouplock); - CDEBUG(D_INFO, "group lock %lu released\n", arg); - return 0; -} - -/** - * Close inode open handle - * - * \param inode [in] inode in question - * \param it [in,out] intent which contains open info and result - * - * \retval 0 success - * \retval <0 failure - */ -int ll_release_openhandle(struct inode *inode, struct lookup_intent *it) -{ - struct obd_client_handle *och; - int rc; - - LASSERT(inode); - - /* Root ? Do nothing. */ - if (is_root_inode(inode)) - return 0; - - /* No open handle to close? Move away */ - if (!it_disposition(it, DISP_OPEN_OPEN)) - return 0; - - LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0); - - och = kzalloc(sizeof(*och), GFP_NOFS); - if (!och) { - rc = -ENOMEM; - goto out; - } - - ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); - - rc = ll_close_inode_openhandle(inode, och, 0, NULL); -out: - /* this one is in place of ll_file_open */ - if (it_disposition(it, DISP_ENQ_OPEN_REF)) { - ptlrpc_req_finished(it->it_request); - it_clear_disposition(it, DISP_ENQ_OPEN_REF); - } - return rc; -} - -/** - * Get size for inode for which FIEMAP mapping is requested. - * Make the FIEMAP get_info call and returns the result. - * - * \param fiemap kernel buffer to hold extens - * \param num_bytes kernel buffer size - */ -static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap, - size_t num_bytes) -{ - struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, }; - struct lu_env *env; - u16 refcheck; - int rc = 0; - - /* Checks for fiemap flags */ - if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) { - fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT; - return -EBADR; - } - - /* Check for FIEMAP_FLAG_SYNC */ - if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) { - rc = filemap_fdatawrite(inode->i_mapping); - if (rc) - return rc; - } - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - if (i_size_read(inode) == 0) { - rc = ll_glimpse_size(inode); - if (rc) - goto out; - } - - fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; - obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE); - obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid); - - /* If filesize is 0, then there would be no objects for mapping */ - if (fmkey.lfik_oa.o_size == 0) { - fiemap->fm_mapped_extents = 0; - rc = 0; - goto out; - } - - memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap)); - - rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob, - &fmkey, fiemap, &num_bytes); -out: - cl_env_put(env, &refcheck); - return rc; -} - -int ll_fid2path(struct inode *inode, void __user *arg) -{ - struct obd_export *exp = ll_i2mdexp(inode); - const struct getinfo_fid2path __user *gfin = arg; - struct getinfo_fid2path *gfout; - u32 pathlen; - size_t outsize; - int rc; - - if (!capable(CAP_DAC_READ_SEARCH) && - !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH)) - return -EPERM; - - /* Only need to get the buflen */ - if (get_user(pathlen, &gfin->gf_pathlen)) - return -EFAULT; - - if (pathlen > PATH_MAX) - return -EINVAL; - - outsize = sizeof(*gfout) + pathlen; - - gfout = kzalloc(outsize, GFP_NOFS); - if (!gfout) - return -ENOMEM; - - if (copy_from_user(gfout, arg, sizeof(*gfout))) { - rc = -EFAULT; - goto gf_free; - } - - /* Call mdc_iocontrol */ - rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL); - if (rc != 0) - goto gf_free; - - if (copy_to_user(arg, gfout, outsize)) - rc = -EFAULT; - -gf_free: - kfree(gfout); - return rc; -} - -/* - * Read the data_version for inode. - * - * This value is computed using stripe object version on OST. - * Version is computed using server side locking. - * - * @param flags if do sync on the OST side; - * 0: no sync - * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs - * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs - */ -int ll_data_version(struct inode *inode, __u64 *data_version, int flags) -{ - struct cl_object *obj = ll_i2info(inode)->lli_clob; - struct lu_env *env; - struct cl_io *io; - u16 refcheck; - int result; - - /* If no file object initialized, we consider its version is 0. */ - if (!obj) { - *data_version = 0; - return 0; - } - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - io = vvp_env_thread_io(env); - io->ci_obj = obj; - io->u.ci_data_version.dv_data_version = 0; - io->u.ci_data_version.dv_flags = flags; - -restart: - if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj)) - result = cl_io_loop(env, io); - else - result = io->ci_result; - - *data_version = io->u.ci_data_version.dv_data_version; - - cl_io_fini(env, io); - - if (unlikely(io->ci_need_restart)) - goto restart; - - cl_env_put(env, &refcheck); - - return result; -} - -/* - * Trigger a HSM release request for the provided inode. - */ -int ll_hsm_release(struct inode *inode) -{ - struct lu_env *env; - struct obd_client_handle *och = NULL; - __u64 data_version = 0; - int rc; - u16 refcheck; - - CDEBUG(D_INODE, "%s: Releasing file " DFID ".\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&ll_i2info(inode)->lli_fid)); - - och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE); - if (IS_ERR(och)) { - rc = PTR_ERR(och); - goto out; - } - - /* Grab latest data_version and [am]time values */ - rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH); - if (rc != 0) - goto out; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) { - rc = PTR_ERR(env); - goto out; - } - - ll_merge_attr(env, inode); - cl_env_put(env, &refcheck); - - /* Release the file. - * NB: lease lock handle is released in mdc_hsm_release_pack() because - * we still need it to pack l_remote_handle to MDT. - */ - rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE, - &data_version); - och = NULL; - -out: - if (och && !IS_ERR(och)) /* close the file */ - ll_lease_close(och, inode, NULL); - - return rc; -} - -struct ll_swap_stack { - u64 dv1; - u64 dv2; - struct inode *inode1; - struct inode *inode2; - bool check_dv1; - bool check_dv2; -}; - -static int ll_swap_layouts(struct file *file1, struct file *file2, - struct lustre_swap_layouts *lsl) -{ - struct mdc_swap_layouts msl; - struct md_op_data *op_data; - __u32 gid; - __u64 dv; - struct ll_swap_stack *llss = NULL; - int rc; - - llss = kzalloc(sizeof(*llss), GFP_NOFS); - if (!llss) - return -ENOMEM; - - llss->inode1 = file_inode(file1); - llss->inode2 = file_inode(file2); - - rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2); - if (rc < 0) - goto free; - - /* we use 2 bool because it is easier to swap than 2 bits */ - if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1) - llss->check_dv1 = true; - - if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2) - llss->check_dv2 = true; - - /* we cannot use lsl->sl_dvX directly because we may swap them */ - llss->dv1 = lsl->sl_dv1; - llss->dv2 = lsl->sl_dv2; - - rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2)); - if (!rc) /* same file, done! */ - goto free; - - if (rc < 0) { /* sequentialize it */ - swap(llss->inode1, llss->inode2); - swap(file1, file2); - swap(llss->dv1, llss->dv2); - swap(llss->check_dv1, llss->check_dv2); - } - - gid = lsl->sl_gid; - if (gid != 0) { /* application asks to flush dirty cache */ - rc = ll_get_grouplock(llss->inode1, file1, gid); - if (rc < 0) - goto free; - - rc = ll_get_grouplock(llss->inode2, file2, gid); - if (rc < 0) { - ll_put_grouplock(llss->inode1, file1, gid); - goto free; - } - } - - /* ultimate check, before swapping the layouts we check if - * dataversion has changed (if requested) - */ - if (llss->check_dv1) { - rc = ll_data_version(llss->inode1, &dv, 0); - if (rc) - goto putgl; - if (dv != llss->dv1) { - rc = -EAGAIN; - goto putgl; - } - } - - if (llss->check_dv2) { - rc = ll_data_version(llss->inode2, &dv, 0); - if (rc) - goto putgl; - if (dv != llss->dv2) { - rc = -EAGAIN; - goto putgl; - } - } - - /* struct md_op_data is used to send the swap args to the mdt - * only flags is missing, so we use struct mdc_swap_layouts - * through the md_op_data->op_data - */ - /* flags from user space have to be converted before they are send to - * server, no flag is sent today, they are only used on the client - */ - msl.msl_flags = 0; - rc = -ENOMEM; - op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0, - 0, LUSTRE_OPC_ANY, &msl); - if (IS_ERR(op_data)) { - rc = PTR_ERR(op_data); - goto free; - } - - rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1), - sizeof(*op_data), op_data, NULL); - ll_finish_md_op_data(op_data); - -putgl: - if (gid != 0) { - ll_put_grouplock(llss->inode2, file2, gid); - ll_put_grouplock(llss->inode1, file1, gid); - } - -free: - kfree(llss); - - return rc; -} - -int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss) -{ - struct md_op_data *op_data; - int rc; - - /* Detect out-of range masks */ - if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK) - return -EINVAL; - - /* Non-root users are forbidden to set or clear flags which are - * NOT defined in HSM_USER_MASK. - */ - if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - - /* Detect out-of range archive id */ - if ((hss->hss_valid & HSS_ARCHIVE_ID) && - (hss->hss_archive_id > LL_HSM_MAX_ARCHIVE)) - return -EINVAL; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, hss); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode), - sizeof(*op_data), op_data, NULL); - - ll_finish_md_op_data(op_data); - - return rc; -} - -static int ll_hsm_import(struct inode *inode, struct file *file, - struct hsm_user_import *hui) -{ - struct hsm_state_set *hss = NULL; - struct iattr *attr = NULL; - int rc; - - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - - /* set HSM flags */ - hss = kzalloc(sizeof(*hss), GFP_NOFS); - if (!hss) - return -ENOMEM; - - hss->hss_valid = HSS_SETMASK | HSS_ARCHIVE_ID; - hss->hss_archive_id = hui->hui_archive_id; - hss->hss_setmask = HS_ARCHIVED | HS_EXISTS | HS_RELEASED; - rc = ll_hsm_state_set(inode, hss); - if (rc != 0) - goto free_hss; - - attr = kzalloc(sizeof(*attr), GFP_NOFS); - if (!attr) { - rc = -ENOMEM; - goto free_hss; - } - - attr->ia_mode = hui->hui_mode & 0777; - attr->ia_mode |= S_IFREG; - attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid); - attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid); - attr->ia_size = hui->hui_size; - attr->ia_mtime.tv_sec = hui->hui_mtime; - attr->ia_mtime.tv_nsec = hui->hui_mtime_ns; - attr->ia_atime.tv_sec = hui->hui_atime; - attr->ia_atime.tv_nsec = hui->hui_atime_ns; - - attr->ia_valid = ATTR_SIZE | ATTR_MODE | ATTR_FORCE | - ATTR_UID | ATTR_GID | - ATTR_MTIME | ATTR_MTIME_SET | - ATTR_ATIME | ATTR_ATIME_SET; - - inode_lock(inode); - - rc = ll_setattr_raw(file->f_path.dentry, attr, true); - if (rc == -ENODATA) - rc = 0; - - inode_unlock(inode); - - kfree(attr); -free_hss: - kfree(hss); - return rc; -} - -static inline long ll_lease_type_from_fmode(fmode_t fmode) -{ - return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) | - ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0); -} - -static long -ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct inode *inode = file_inode(file); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - int flags, rc; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p),cmd=%x\n", - PFID(ll_inode2fid(inode)), inode, cmd); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1); - - /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */ - if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */ - return -ENOTTY; - - switch (cmd) { - case LL_IOC_GETFLAGS: - /* Get the current value of the file flags */ - return put_user(fd->fd_flags, (int __user *)arg); - case LL_IOC_SETFLAGS: - case LL_IOC_CLRFLAGS: - /* Set or clear specific file flags */ - /* XXX This probably needs checks to ensure the flags are - * not abused, and to handle any flag side effects. - */ - if (get_user(flags, (int __user *)arg)) - return -EFAULT; - - if (cmd == LL_IOC_SETFLAGS) { - if ((flags & LL_FILE_IGNORE_LOCK) && - !(file->f_flags & O_DIRECT)) { - CERROR("%s: unable to disable locking on non-O_DIRECT file\n", - current->comm); - return -EINVAL; - } - - fd->fd_flags |= flags; - } else { - fd->fd_flags &= ~flags; - } - return 0; - case LL_IOC_LOV_SETSTRIPE: - return ll_lov_setstripe(inode, file, arg); - case LL_IOC_LOV_SETEA: - return ll_lov_setea(inode, file, arg); - case LL_IOC_LOV_SWAP_LAYOUTS: { - struct file *file2; - struct lustre_swap_layouts lsl; - - if (copy_from_user(&lsl, (char __user *)arg, - sizeof(struct lustre_swap_layouts))) - return -EFAULT; - - if ((file->f_flags & O_ACCMODE) == O_RDONLY) - return -EPERM; - - file2 = fget(lsl.sl_fd); - if (!file2) - return -EBADF; - - /* O_WRONLY or O_RDWR */ - if ((file2->f_flags & O_ACCMODE) == O_RDONLY) { - rc = -EPERM; - goto out; - } - - if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) { - struct obd_client_handle *och = NULL; - struct ll_inode_info *lli; - struct inode *inode2; - - if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) { - rc = -EINVAL; - goto out; - } - - lli = ll_i2info(inode); - mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och) { - och = fd->fd_lease_och; - fd->fd_lease_och = NULL; - } - mutex_unlock(&lli->lli_och_mutex); - if (!och) { - rc = -ENOLCK; - goto out; - } - inode2 = file_inode(file2); - rc = ll_swap_layouts_close(och, inode, inode2); - } else { - rc = ll_swap_layouts(file, file2, &lsl); - } -out: - fput(file2); - return rc; - } - case LL_IOC_LOV_GETSTRIPE: - return ll_file_getstripe(inode, - (struct lov_user_md __user *)arg); - case FSFILT_IOC_GETFLAGS: - case FSFILT_IOC_SETFLAGS: - return ll_iocontrol(inode, file, cmd, arg); - case FSFILT_IOC_GETVERSION_OLD: - case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int __user *)arg); - case LL_IOC_GROUP_LOCK: - return ll_get_grouplock(inode, file, arg); - case LL_IOC_GROUP_UNLOCK: - return ll_put_grouplock(inode, file, arg); - case IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void __user *)arg); - - /* We need to special case any other ioctls we want to handle, - * to send them to the MDS/OST as appropriate and to properly - * network encode the arg field. - case FSFILT_IOC_SETVERSION_OLD: - case FSFILT_IOC_SETVERSION: - */ - case LL_IOC_FLUSHCTX: - return ll_flush_ctx(inode); - case LL_IOC_PATH2FID: { - if (copy_to_user((void __user *)arg, ll_inode2fid(inode), - sizeof(struct lu_fid))) - return -EFAULT; - - return 0; - } - case LL_IOC_GETPARENT: - return ll_getparent(file, (struct getparent __user *)arg); - case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void __user *)arg); - case LL_IOC_DATA_VERSION: { - struct ioc_data_version idv; - int rc; - - if (copy_from_user(&idv, (char __user *)arg, sizeof(idv))) - return -EFAULT; - - idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH; - rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags); - if (rc == 0 && copy_to_user((char __user *)arg, &idv, - sizeof(idv))) - return -EFAULT; - - return rc; - } - - case LL_IOC_GET_MDTIDX: { - int mdtidx; - - mdtidx = ll_get_mdt_idx(inode); - if (mdtidx < 0) - return mdtidx; - - if (put_user(mdtidx, (int __user *)arg)) - return -EFAULT; - - return 0; - } - case OBD_IOC_GETDTNAME: - case OBD_IOC_GETMDNAME: - return ll_get_obd_name(inode, cmd, arg); - case LL_IOC_HSM_STATE_GET: { - struct md_op_data *op_data; - struct hsm_user_state *hus; - int rc; - - hus = kzalloc(sizeof(*hus), GFP_NOFS); - if (!hus) - return -ENOMEM; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, hus); - if (IS_ERR(op_data)) { - kfree(hus); - return PTR_ERR(op_data); - } - - rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), - op_data, NULL); - - if (copy_to_user((void __user *)arg, hus, sizeof(*hus))) - rc = -EFAULT; - - ll_finish_md_op_data(op_data); - kfree(hus); - return rc; - } - case LL_IOC_HSM_STATE_SET: { - struct hsm_state_set *hss; - int rc; - - hss = memdup_user((char __user *)arg, sizeof(*hss)); - if (IS_ERR(hss)) - return PTR_ERR(hss); - - rc = ll_hsm_state_set(inode, hss); - - kfree(hss); - return rc; - } - case LL_IOC_HSM_ACTION: { - struct md_op_data *op_data; - struct hsm_current_action *hca; - int rc; - - hca = kzalloc(sizeof(*hca), GFP_NOFS); - if (!hca) - return -ENOMEM; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, hca); - if (IS_ERR(op_data)) { - kfree(hca); - return PTR_ERR(op_data); - } - - rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), - op_data, NULL); - - if (copy_to_user((char __user *)arg, hca, sizeof(*hca))) - rc = -EFAULT; - - ll_finish_md_op_data(op_data); - kfree(hca); - return rc; - } - case LL_IOC_SET_LEASE: { - struct ll_inode_info *lli = ll_i2info(inode); - struct obd_client_handle *och = NULL; - bool lease_broken; - fmode_t fmode; - - switch (arg) { - case LL_LEASE_WRLCK: - if (!(file->f_mode & FMODE_WRITE)) - return -EPERM; - fmode = FMODE_WRITE; - break; - case LL_LEASE_RDLCK: - if (!(file->f_mode & FMODE_READ)) - return -EPERM; - fmode = FMODE_READ; - break; - case LL_LEASE_UNLCK: - mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och) { - och = fd->fd_lease_och; - fd->fd_lease_och = NULL; - } - mutex_unlock(&lli->lli_och_mutex); - - if (!och) - return -ENOLCK; - - fmode = och->och_flags; - rc = ll_lease_close(och, inode, &lease_broken); - if (rc < 0) - return rc; - - if (lease_broken) - fmode = 0; - - return ll_lease_type_from_fmode(fmode); - default: - return -EINVAL; - } - - CDEBUG(D_INODE, "Set lease with mode %u\n", fmode); - - /* apply for lease */ - och = ll_lease_open(inode, file, fmode, 0); - if (IS_ERR(och)) - return PTR_ERR(och); - - rc = 0; - mutex_lock(&lli->lli_och_mutex); - if (!fd->fd_lease_och) { - fd->fd_lease_och = och; - och = NULL; - } - mutex_unlock(&lli->lli_och_mutex); - if (och) { - /* impossible now that only excl is supported for now */ - ll_lease_close(och, inode, &lease_broken); - rc = -EBUSY; - } - return rc; - } - case LL_IOC_GET_LEASE: { - struct ll_inode_info *lli = ll_i2info(inode); - struct ldlm_lock *lock = NULL; - fmode_t fmode = 0; - - mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och) { - struct obd_client_handle *och = fd->fd_lease_och; - - lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock) { - lock_res_and_lock(lock); - if (!ldlm_is_cancel(lock)) - fmode = och->och_flags; - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - } - } - mutex_unlock(&lli->lli_och_mutex); - return ll_lease_type_from_fmode(fmode); - } - case LL_IOC_HSM_IMPORT: { - struct hsm_user_import *hui; - - hui = memdup_user((void __user *)arg, sizeof(*hui)); - if (IS_ERR(hui)) - return PTR_ERR(hui); - - rc = ll_hsm_import(inode, file, hui); - - kfree(hui); - return rc; - } - default: { - int err; - - if (ll_iocontrol_call(inode, file, cmd, arg, &err) == - LLIOC_STOP) - return err; - - return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, - (void __user *)arg); - } - } -} - -static loff_t ll_file_seek(struct file *file, loff_t offset, int origin) -{ - struct inode *inode = file_inode(file); - loff_t retval, eof = 0; - - retval = offset + ((origin == SEEK_END) ? i_size_read(inode) : - (origin == SEEK_CUR) ? file->f_pos : 0); - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), to=%llu=%#llx(%d)\n", - PFID(ll_inode2fid(inode)), inode, retval, retval, origin); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1); - - if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) { - retval = ll_glimpse_size(inode); - if (retval != 0) - return retval; - eof = i_size_read(inode); - } - - return generic_file_llseek_size(file, offset, origin, - ll_file_maxbytes(inode), eof); -} - -static int ll_flush(struct file *file, fl_owner_t id) -{ - struct inode *inode = file_inode(file); - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - int rc, err; - - LASSERT(!S_ISDIR(inode->i_mode)); - - /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. - */ - rc = lli->lli_async_rc; - lli->lli_async_rc = 0; - if (lli->lli_clob) { - err = lov_read_and_clear_async_rc(lli->lli_clob); - if (!rc) - rc = err; - } - - /* The application has been told about write failure already. - * Do not report failure again. - */ - if (fd->fd_write_failed) - return 0; - return rc ? -EIO : 0; -} - -/** - * Called to make sure a portion of file has been written out. - * if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST. - * - * Return how many pages have been written. - */ -int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, - enum cl_fsync_mode mode, int ignore_layout) -{ - struct lu_env *env; - struct cl_io *io; - struct cl_fsync_io *fio; - int result; - u16 refcheck; - - if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL && - mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL) - return -EINVAL; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - io = vvp_env_thread_io(env); - io->ci_obj = ll_i2info(inode)->lli_clob; - io->ci_ignore_layout = ignore_layout; - - /* initialize parameters for sync */ - fio = &io->u.ci_fsync; - fio->fi_start = start; - fio->fi_end = end; - fio->fi_fid = ll_inode2fid(inode); - fio->fi_mode = mode; - fio->fi_nr_written = 0; - - if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0) - result = cl_io_loop(env, io); - else - result = io->ci_result; - if (result == 0) - result = fio->fi_nr_written; - cl_io_fini(env, io); - cl_env_put(env, &refcheck); - - return result; -} - -int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) -{ - struct inode *inode = file_inode(file); - struct ll_inode_info *lli = ll_i2info(inode); - struct ptlrpc_request *req; - int rc, err; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n", - PFID(ll_inode2fid(inode)), inode); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1); - - rc = file_write_and_wait_range(file, start, end); - inode_lock(inode); - - /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. - */ - if (!S_ISDIR(inode->i_mode)) { - err = lli->lli_async_rc; - lli->lli_async_rc = 0; - if (rc == 0) - rc = err; - if (lli->lli_clob) { - err = lov_read_and_clear_async_rc(lli->lli_clob); - if (rc == 0) - rc = err; - } - } - - err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req); - if (!rc) - rc = err; - if (!err) - ptlrpc_req_finished(req); - - if (S_ISREG(inode->i_mode)) { - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - - err = cl_sync_file_range(inode, start, end, CL_FSYNC_ALL, 0); - if (rc == 0 && err < 0) - rc = err; - if (rc < 0) - fd->fd_write_failed = true; - else - fd->fd_write_failed = false; - } - - inode_unlock(inode); - return rc; -} - -static int -ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) -{ - struct inode *inode = file_inode(file); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ldlm_enqueue_info einfo = { - .ei_type = LDLM_FLOCK, - .ei_cb_cp = ldlm_flock_completion_ast, - .ei_cbdata = file_lock, - }; - struct md_op_data *op_data; - struct lustre_handle lockh = {0}; - union ldlm_policy_data flock = { { 0 } }; - int fl_type = file_lock->fl_type; - __u64 flags = 0; - int rc; - int rc2 = 0; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID " file_lock=%p\n", - PFID(ll_inode2fid(inode)), file_lock); - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1); - - if (file_lock->fl_flags & FL_FLOCK) - LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK)); - else if (!(file_lock->fl_flags & FL_POSIX)) - return -EINVAL; - - flock.l_flock.owner = (unsigned long)file_lock->fl_owner; - flock.l_flock.pid = file_lock->fl_pid; - flock.l_flock.start = file_lock->fl_start; - flock.l_flock.end = file_lock->fl_end; - - /* Somewhat ugly workaround for svc lockd. - * lockd installs custom fl_lmops->lm_compare_owner that checks - * for the fl_owner to be the same (which it always is on local node - * I guess between lockd processes) and then compares pid. - * As such we assign pid to the owner field to make it all work, - * conflict with normal locks is unlikely since pid space and - * pointer space for current->files are not intersecting - */ - if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner) - flock.l_flock.owner = (unsigned long)file_lock->fl_pid; - - switch (fl_type) { - case F_RDLCK: - einfo.ei_mode = LCK_PR; - break; - case F_UNLCK: - /* An unlock request may or may not have any relation to - * existing locks so we may not be able to pass a lock handle - * via a normal ldlm_lock_cancel() request. The request may even - * unlock a byte range in the middle of an existing lock. In - * order to process an unlock request we need all of the same - * information that is given with a normal read or write record - * lock request. To avoid creating another ldlm unlock (cancel) - * message we'll treat a LCK_NL flock request as an unlock. - */ - einfo.ei_mode = LCK_NL; - break; - case F_WRLCK: - einfo.ei_mode = LCK_PW; - break; - default: - CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", fl_type); - return -ENOTSUPP; - } - - switch (cmd) { - case F_SETLKW: -#ifdef F_SETLKW64 - case F_SETLKW64: -#endif - flags = 0; - break; - case F_SETLK: -#ifdef F_SETLK64 - case F_SETLK64: -#endif - flags = LDLM_FL_BLOCK_NOWAIT; - break; - case F_GETLK: -#ifdef F_GETLK64 - case F_GETLK64: -#endif - flags = LDLM_FL_TEST_LOCK; - break; - default: - CERROR("unknown fcntl lock command: %d\n", cmd); - return -EINVAL; - } - - /* - * Save the old mode so that if the mode in the lock changes we - * can decrement the appropriate reader or writer refcount. - */ - file_lock->fl_type = einfo.ei_mode; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - CDEBUG(D_DLMTRACE, "inode=" DFID ", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n", - PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags, - einfo.ei_mode, flock.l_flock.start, flock.l_flock.end); - - rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh, - flags); - - /* Restore the file lock type if not TEST lock. */ - if (!(flags & LDLM_FL_TEST_LOCK)) - file_lock->fl_type = fl_type; - - if ((rc == 0 || file_lock->fl_type == F_UNLCK) && - !(flags & LDLM_FL_TEST_LOCK)) - rc2 = locks_lock_file_wait(file, file_lock); - - if (rc2 && file_lock->fl_type != F_UNLCK) { - einfo.ei_mode = LCK_NL; - md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, - &lockh, flags); - rc = rc2; - } - - ll_finish_md_op_data(op_data); - - return rc; -} - -int ll_get_fid_by_name(struct inode *parent, const char *name, - int namelen, struct lu_fid *fid, - struct inode **inode) -{ - struct md_op_data *op_data = NULL; - struct ptlrpc_request *req; - struct mdt_body *body; - int rc; - - op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE; - rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc < 0) - return rc; - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!body) { - rc = -EFAULT; - goto out_req; - } - if (fid) - *fid = body->mbo_fid1; - - if (inode) - rc = ll_prep_inode(inode, req, parent->i_sb, NULL); -out_req: - ptlrpc_req_finished(req); - return rc; -} - -int ll_migrate(struct inode *parent, struct file *file, int mdtidx, - const char *name, int namelen) -{ - struct ptlrpc_request *request = NULL; - struct obd_client_handle *och = NULL; - struct inode *child_inode = NULL; - struct dentry *dchild = NULL; - struct md_op_data *op_data; - struct mdt_body *body; - u64 data_version = 0; - struct qstr qstr; - int rc; - - CDEBUG(D_VFSTRACE, "migrate %s under " DFID " to MDT%d\n", - name, PFID(ll_inode2fid(parent)), mdtidx); - - op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, - 0, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - /* Get child FID first */ - qstr.hash = full_name_hash(parent, name, namelen); - qstr.name = name; - qstr.len = namelen; - dchild = d_lookup(file_dentry(file), &qstr); - if (dchild) { - op_data->op_fid3 = *ll_inode2fid(dchild->d_inode); - if (dchild->d_inode) - child_inode = igrab(dchild->d_inode); - dput(dchild); - } - - if (!child_inode) { - rc = ll_get_fid_by_name(parent, name, namelen, - &op_data->op_fid3, &child_inode); - if (rc) - goto out_free; - } - - if (!child_inode) { - rc = -EINVAL; - goto out_free; - } - - inode_lock(child_inode); - op_data->op_fid3 = *ll_inode2fid(child_inode); - if (!fid_is_sane(&op_data->op_fid3)) { - CERROR("%s: migrate %s, but fid " DFID " is insane\n", - ll_get_fsname(parent->i_sb, NULL, 0), name, - PFID(&op_data->op_fid3)); - rc = -EINVAL; - goto out_unlock; - } - - rc = ll_get_mdt_idx_by_fid(ll_i2sbi(parent), &op_data->op_fid3); - if (rc < 0) - goto out_unlock; - - if (rc == mdtidx) { - CDEBUG(D_INFO, "%s: " DFID " is already on MDT%d.\n", name, - PFID(&op_data->op_fid3), mdtidx); - rc = 0; - goto out_unlock; - } -again: - if (S_ISREG(child_inode->i_mode)) { - och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0); - if (IS_ERR(och)) { - rc = PTR_ERR(och); - och = NULL; - goto out_unlock; - } - - rc = ll_data_version(child_inode, &data_version, - LL_DV_WR_FLUSH); - if (rc) - goto out_close; - - op_data->op_handle = och->och_fh; - op_data->op_data = och->och_mod; - op_data->op_data_version = data_version; - op_data->op_lease_handle = och->och_lease_handle; - op_data->op_bias |= MDS_RENAME_MIGRATE; - } - - op_data->op_mds = mdtidx; - op_data->op_cli_flags = CLI_MIGRATE; - rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name, - namelen, name, namelen, &request); - if (!rc) { - LASSERT(request); - ll_update_times(request, parent); - - body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(body); - - /* - * If the server does release layout lock, then we cleanup - * the client och here, otherwise release it in out_close: - */ - if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) { - obd_mod_put(och->och_mod); - md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp, - och); - och->och_fh.cookie = DEAD_HANDLE_MAGIC; - kfree(och); - och = NULL; - } - } - - if (request) { - ptlrpc_req_finished(request); - request = NULL; - } - - /* Try again if the file layout has changed. */ - if (rc == -EAGAIN && S_ISREG(child_inode->i_mode)) - goto again; - -out_close: - if (och) /* close the file */ - ll_lease_close(och, child_inode, NULL); - if (!rc) - clear_nlink(child_inode); -out_unlock: - inode_unlock(child_inode); - iput(child_inode); -out_free: - ll_finish_md_op_data(op_data); - return rc; -} - -static int -ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock) -{ - return -ENOSYS; -} - -/** - * test if some locks matching bits and l_req_mode are acquired - * - bits can be in different locks - * - if found clear the common lock bits in *bits - * - the bits not found, are kept in *bits - * \param inode [IN] - * \param bits [IN] searched lock bits [IN] - * \param l_req_mode [IN] searched lock mode - * \retval boolean, true iff all bits are found - */ -int ll_have_md_lock(struct inode *inode, __u64 *bits, - enum ldlm_mode l_req_mode) -{ - struct lustre_handle lockh; - union ldlm_policy_data policy; - enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ? - (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode; - struct lu_fid *fid; - __u64 flags; - int i; - - if (!inode) - return 0; - - fid = &ll_i2info(inode)->lli_fid; - CDEBUG(D_INFO, "trying to match res " DFID " mode %s\n", PFID(fid), - ldlm_lockname[mode]); - - flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK; - for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) { - policy.l_inodebits.bits = *bits & (1 << i); - if (policy.l_inodebits.bits == 0) - continue; - - if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, - &policy, mode, &lockh)) { - struct ldlm_lock *lock; - - lock = ldlm_handle2lock(&lockh); - if (lock) { - *bits &= - ~(lock->l_policy_data.l_inodebits.bits); - LDLM_LOCK_PUT(lock); - } else { - *bits &= ~policy.l_inodebits.bits; - } - } - } - return *bits == 0; -} - -enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - enum ldlm_mode mode) -{ - union ldlm_policy_data policy = { .l_inodebits = { bits } }; - struct lu_fid *fid; - - fid = &ll_i2info(inode)->lli_fid; - CDEBUG(D_INFO, "trying to match res " DFID "\n", PFID(fid)); - - return md_lock_match(ll_i2mdexp(inode), flags | LDLM_FL_BLOCK_GRANTED, - fid, LDLM_IBITS, &policy, mode, lockh); -} - -static int ll_inode_revalidate_fini(struct inode *inode, int rc) -{ - /* Already unlinked. Just update nlink and return success */ - if (rc == -ENOENT) { - clear_nlink(inode); - /* If it is striped directory, and there is bad stripe - * Let's revalidate the dentry again, instead of returning - * error - */ - if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md) - return 0; - - /* This path cannot be hit for regular files unless in - * case of obscure races, so no need to validate size. - */ - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) - return 0; - } else if (rc != 0) { - CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR, - "%s: revalidate FID " DFID " error: rc = %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), rc); - } - - return rc; -} - -static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) -{ - struct inode *inode = d_inode(dentry); - struct ptlrpc_request *req = NULL; - struct obd_export *exp; - int rc = 0; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p),name=%pd\n", - PFID(ll_inode2fid(inode)), inode, dentry); - - exp = ll_i2mdexp(inode); - - /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC. - * But under CMD case, it caused some lock issues, should be fixed - * with new CMD ibits lock. See bug 12718 - */ - if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) { - struct lookup_intent oit = { .it_op = IT_GETATTR }; - struct md_op_data *op_data; - - if (ibits == MDS_INODELOCK_LOOKUP) - oit.it_op = IT_LOOKUP; - - /* Call getattr by fid, so do not provide name at all. */ - op_data = ll_prep_md_op_data(NULL, inode, - inode, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - rc = md_intent_lock(exp, op_data, &oit, &req, - &ll_md_blocking_ast, 0); - ll_finish_md_op_data(op_data); - if (rc < 0) { - rc = ll_inode_revalidate_fini(inode, rc); - goto out; - } - - rc = ll_revalidate_it_finish(req, &oit, inode); - if (rc != 0) { - ll_intent_release(&oit); - goto out; - } - - /* Unlinked? Unhash dentry, so it is not picked up later by - * do_lookup() -> ll_revalidate_it(). We cannot use d_drop - * here to preserve get_cwd functionality on 2.6. - * Bug 10503 - */ - if (!d_inode(dentry)->i_nlink) { - spin_lock(&inode->i_lock); - d_lustre_invalidate(dentry, 0); - spin_unlock(&inode->i_lock); - } - - ll_lookup_finish_locks(&oit, inode); - } else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) { - struct ll_sb_info *sbi = ll_i2sbi(d_inode(dentry)); - u64 valid = OBD_MD_FLGETATTR; - struct md_op_data *op_data; - int ealen = 0; - - if (S_ISREG(inode->i_mode)) { - rc = ll_get_default_mdsize(sbi, &ealen); - if (rc) - return rc; - valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE; - } - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, - 0, ealen, LUSTRE_OPC_ANY, - NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_valid = valid; - rc = md_getattr(sbi->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc) - return ll_inode_revalidate_fini(inode, rc); - - rc = ll_prep_inode(&inode, req, NULL, NULL); - } -out: - ptlrpc_req_finished(req); - return rc; -} - -static int ll_merge_md_attr(struct inode *inode) -{ - struct cl_attr attr = { 0 }; - int rc; - - LASSERT(ll_i2info(inode)->lli_lsm_md); - rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md, - &attr, ll_md_blocking_ast); - if (rc) - return rc; - - set_nlink(inode, attr.cat_nlink); - inode->i_blocks = attr.cat_blocks; - i_size_write(inode, attr.cat_size); - - ll_i2info(inode)->lli_atime = attr.cat_atime; - ll_i2info(inode)->lli_mtime = attr.cat_mtime; - ll_i2info(inode)->lli_ctime = attr.cat_ctime; - - return 0; -} - -static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits) -{ - struct inode *inode = d_inode(dentry); - int rc; - - rc = __ll_inode_revalidate(dentry, ibits); - if (rc != 0) - return rc; - - /* if object isn't regular file, don't validate size */ - if (!S_ISREG(inode->i_mode)) { - if (S_ISDIR(inode->i_mode) && - ll_i2info(inode)->lli_lsm_md) { - rc = ll_merge_md_attr(inode); - if (rc) - return rc; - } - - LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime; - LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime; - LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime; - } else { - struct ll_inode_info *lli = ll_i2info(inode); - - /* In case of restore, the MDT has the right size and has - * already send it back without granting the layout lock, - * inode is up-to-date so glimpse is useless. - * Also to glimpse we need the layout, in case of a running - * restore the MDT holds the layout lock so the glimpse will - * block up to the end of restore (getattr will block) - */ - if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) - rc = ll_glimpse_size(inode); - } - return rc; -} - -int ll_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int flags) -{ - struct inode *inode = d_inode(path->dentry); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ll_inode_info *lli = ll_i2info(inode); - int res; - - res = ll_inode_revalidate(path->dentry, - MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP); - ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1); - - if (res) - return res; - - OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30); - - stat->dev = inode->i_sb->s_dev; - if (ll_need_32bit_api(sbi)) - stat->ino = cl_fid_build_ino(&lli->lli_fid, 1); - else - stat->ino = inode->i_ino; - stat->mode = inode->i_mode; - stat->uid = inode->i_uid; - stat->gid = inode->i_gid; - stat->rdev = inode->i_rdev; - stat->atime = inode->i_atime; - stat->mtime = inode->i_mtime; - stat->ctime = inode->i_ctime; - stat->blksize = 1 << inode->i_blkbits; - - stat->nlink = inode->i_nlink; - stat->size = i_size_read(inode); - stat->blocks = inode->i_blocks; - - return 0; -} - -static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - __u64 start, __u64 len) -{ - int rc; - size_t num_bytes; - struct fiemap *fiemap; - unsigned int extent_count = fieinfo->fi_extents_max; - - num_bytes = sizeof(*fiemap) + (extent_count * - sizeof(struct fiemap_extent)); - fiemap = kvzalloc(num_bytes, GFP_KERNEL); - if (!fiemap) - return -ENOMEM; - - fiemap->fm_flags = fieinfo->fi_flags; - fiemap->fm_extent_count = fieinfo->fi_extents_max; - fiemap->fm_start = start; - fiemap->fm_length = len; - - if (extent_count > 0 && - copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start, - sizeof(struct fiemap_extent))) { - rc = -EFAULT; - goto out; - } - - rc = ll_do_fiemap(inode, fiemap, num_bytes); - - fieinfo->fi_flags = fiemap->fm_flags; - fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents; - if (extent_count > 0 && - copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0], - fiemap->fm_mapped_extents * - sizeof(struct fiemap_extent))) { - rc = -EFAULT; - goto out; - } -out: - kvfree(fiemap); - return rc; -} - -int ll_inode_permission(struct inode *inode, int mask) -{ - struct ll_sb_info *sbi; - struct root_squash_info *squash; - const struct cred *old_cred = NULL; - struct cred *cred = NULL; - bool squash_id = false; - int rc = 0; - - if (mask & MAY_NOT_BLOCK) - return -ECHILD; - - /* as root inode are NOT getting validated in lookup operation, - * need to do it before permission check. - */ - - if (is_root_inode(inode)) { - rc = __ll_inode_revalidate(inode->i_sb->s_root, - MDS_INODELOCK_LOOKUP); - if (rc) - return rc; - } - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), inode mode %x mask %o\n", - PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask); - - /* squash fsuid/fsgid if needed */ - sbi = ll_i2sbi(inode); - squash = &sbi->ll_squash; - if (unlikely(squash->rsi_uid && - uid_eq(current_fsuid(), GLOBAL_ROOT_UID) && - !(sbi->ll_flags & LL_SBI_NOROOTSQUASH))) { - squash_id = true; - } - - if (squash_id) { - CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n", - __kuid_val(current_fsuid()), __kgid_val(current_fsgid()), - squash->rsi_uid, squash->rsi_gid); - - /* - * update current process's credentials - * and FS capability - */ - cred = prepare_creds(); - if (!cred) - return -ENOMEM; - - cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid); - cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid); - cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective); - cred->cap_effective = cap_drop_fs_set(cred->cap_effective); - - old_cred = override_creds(cred); - } - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1); - rc = generic_permission(inode, mask); - - /* restore current process's credentials and FS capability */ - if (squash_id) { - revert_creds(old_cred); - put_cred(cred); - } - - return rc; -} - -/* -o localflock - only provides locally consistent flock locks */ -const struct file_operations ll_file_operations = { - .read_iter = ll_file_read_iter, - .write_iter = ll_file_write_iter, - .unlocked_ioctl = ll_file_ioctl, - .open = ll_file_open, - .release = ll_file_release, - .mmap = ll_file_mmap, - .llseek = ll_file_seek, - .splice_read = generic_file_splice_read, - .fsync = ll_fsync, - .flush = ll_flush -}; - -const struct file_operations ll_file_operations_flock = { - .read_iter = ll_file_read_iter, - .write_iter = ll_file_write_iter, - .unlocked_ioctl = ll_file_ioctl, - .open = ll_file_open, - .release = ll_file_release, - .mmap = ll_file_mmap, - .llseek = ll_file_seek, - .splice_read = generic_file_splice_read, - .fsync = ll_fsync, - .flush = ll_flush, - .flock = ll_file_flock, - .lock = ll_file_flock -}; - -/* These are for -o noflock - to return ENOSYS on flock calls */ -const struct file_operations ll_file_operations_noflock = { - .read_iter = ll_file_read_iter, - .write_iter = ll_file_write_iter, - .unlocked_ioctl = ll_file_ioctl, - .open = ll_file_open, - .release = ll_file_release, - .mmap = ll_file_mmap, - .llseek = ll_file_seek, - .splice_read = generic_file_splice_read, - .fsync = ll_fsync, - .flush = ll_flush, - .flock = ll_file_noflock, - .lock = ll_file_noflock -}; - -const struct inode_operations ll_file_inode_operations = { - .setattr = ll_setattr, - .getattr = ll_getattr, - .permission = ll_inode_permission, - .listxattr = ll_listxattr, - .fiemap = ll_fiemap, - .get_acl = ll_get_acl, -}; - -/* dynamic ioctl number support routines */ -static struct llioc_ctl_data { - struct rw_semaphore ioc_sem; - struct list_head ioc_head; -} llioc = { - __RWSEM_INITIALIZER(llioc.ioc_sem), - LIST_HEAD_INIT(llioc.ioc_head) -}; - -struct llioc_data { - struct list_head iocd_list; - unsigned int iocd_size; - llioc_callback_t iocd_cb; - unsigned int iocd_count; - unsigned int iocd_cmd[0]; -}; - -void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd) -{ - unsigned int size; - struct llioc_data *in_data = NULL; - - if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0) - return NULL; - - size = sizeof(*in_data) + count * sizeof(unsigned int); - in_data = kzalloc(size, GFP_NOFS); - if (!in_data) - return NULL; - - in_data->iocd_size = size; - in_data->iocd_cb = cb; - in_data->iocd_count = count; - memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count); - - down_write(&llioc.ioc_sem); - list_add_tail(&in_data->iocd_list, &llioc.ioc_head); - up_write(&llioc.ioc_sem); - - return in_data; -} -EXPORT_SYMBOL(ll_iocontrol_register); - -void ll_iocontrol_unregister(void *magic) -{ - struct llioc_data *tmp; - - if (!magic) - return; - - down_write(&llioc.ioc_sem); - list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) { - if (tmp == magic) { - list_del(&tmp->iocd_list); - up_write(&llioc.ioc_sem); - - kfree(tmp); - return; - } - } - up_write(&llioc.ioc_sem); - - CWARN("didn't find iocontrol register block with magic: %p\n", magic); -} -EXPORT_SYMBOL(ll_iocontrol_unregister); - -static enum llioc_iter -ll_iocontrol_call(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg, int *rcp) -{ - enum llioc_iter ret = LLIOC_CONT; - struct llioc_data *data; - int rc = -EINVAL, i; - - down_read(&llioc.ioc_sem); - list_for_each_entry(data, &llioc.ioc_head, iocd_list) { - for (i = 0; i < data->iocd_count; i++) { - if (cmd != data->iocd_cmd[i]) - continue; - - ret = data->iocd_cb(inode, file, cmd, arg, data, &rc); - break; - } - - if (ret == LLIOC_STOP) - break; - } - up_read(&llioc.ioc_sem); - - if (rcp) - *rcp = rc; - return ret; -} - -int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct cl_object *obj = lli->lli_clob; - struct lu_env *env; - int rc; - u16 refcheck; - - if (!obj) - return 0; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - rc = cl_conf_set(env, obj, conf); - if (rc < 0) - goto out; - - if (conf->coc_opc == OBJECT_CONF_SET) { - struct ldlm_lock *lock = conf->coc_lock; - struct cl_layout cl = { - .cl_layout_gen = 0, - }; - - LASSERT(lock); - LASSERT(ldlm_has_layout(lock)); - - /* it can only be allowed to match after layout is - * applied to inode otherwise false layout would be - * seen. Applying layout should happen before dropping - * the intent lock. - */ - ldlm_lock_allow_match(lock); - - rc = cl_object_layout_get(env, obj, &cl); - if (rc < 0) - goto out; - - CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", - PFID(&lli->lli_fid), ll_layout_version_get(lli), - cl.cl_layout_gen); - ll_layout_version_set(lli, cl.cl_layout_gen); - } -out: - cl_env_put(env, &refcheck); - return rc; -} - -/* Fetch layout from MDT with getxattr request, if it's not ready yet */ -static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) - -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req; - struct mdt_body *body; - void *lvbdata; - void *lmm; - int lmmsize; - int rc; - - CDEBUG(D_INODE, DFID " LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n", - PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock), - lock->l_lvb_data, lock->l_lvb_len); - - if (lock->l_lvb_data && ldlm_is_lvb_ready(lock)) - return 0; - - /* if layout lock was granted right away, the layout is returned - * within DLM_LVB of dlm reply; otherwise if the lock was ever - * blocked and then granted via completion ast, we have to fetch - * layout here. Please note that we can't use the LVB buffer in - * completion AST because it doesn't have a large enough buffer - */ - rc = ll_get_default_mdsize(sbi, &lmmsize); - if (rc == 0) - rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), - OBD_MD_FLXATTR, XATTR_NAME_LOV, lmmsize, &req); - if (rc < 0) - return rc; - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!body) { - rc = -EPROTO; - goto out; - } - - lmmsize = body->mbo_eadatasize; - if (lmmsize == 0) /* empty layout */ { - rc = 0; - goto out; - } - - lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize); - if (!lmm) { - rc = -EFAULT; - goto out; - } - - lvbdata = kvzalloc(lmmsize, GFP_NOFS); - if (!lvbdata) { - rc = -ENOMEM; - goto out; - } - - memcpy(lvbdata, lmm, lmmsize); - lock_res_and_lock(lock); - if (lock->l_lvb_data) - kvfree(lock->l_lvb_data); - - lock->l_lvb_data = lvbdata; - lock->l_lvb_len = lmmsize; - unlock_res_and_lock(lock); - -out: - ptlrpc_req_finished(req); - return rc; -} - -/** - * Apply the layout to the inode. Layout lock is held and will be released - * in this function. - */ -static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, - struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ldlm_lock *lock; - struct cl_object_conf conf; - int rc = 0; - bool lvb_ready; - bool wait_layout = false; - - LASSERT(lustre_handle_is_used(lockh)); - - lock = ldlm_handle2lock(lockh); - LASSERT(lock); - LASSERT(ldlm_has_layout(lock)); - - LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured", - PFID(&lli->lli_fid), inode); - - /* in case this is a caching lock and reinstate with new inode */ - md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL); - - lock_res_and_lock(lock); - lvb_ready = ldlm_is_lvb_ready(lock); - unlock_res_and_lock(lock); - /* checking lvb_ready is racy but this is okay. The worst case is - * that multi processes may configure the file on the same time. - */ - if (lvb_ready) { - rc = 0; - goto out; - } - - rc = ll_layout_fetch(inode, lock); - if (rc < 0) - goto out; - - /* for layout lock, lmm is returned in lock's lvb. - * lvb_data is immutable if the lock is held so it's safe to access it - * without res lock. - * - * set layout to file. Unlikely this will fail as old layout was - * surely eliminated - */ - memset(&conf, 0, sizeof(conf)); - conf.coc_opc = OBJECT_CONF_SET; - conf.coc_inode = inode; - conf.coc_lock = lock; - conf.u.coc_layout.lb_buf = lock->l_lvb_data; - conf.u.coc_layout.lb_len = lock->l_lvb_len; - rc = ll_layout_conf(inode, &conf); - - /* refresh layout failed, need to wait */ - wait_layout = rc == -EBUSY; - -out: - LDLM_LOCK_PUT(lock); - ldlm_lock_decref(lockh, mode); - - /* wait for IO to complete if it's still being used. */ - if (wait_layout) { - CDEBUG(D_INODE, "%s: " DFID "(%p) wait for layout reconf\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), inode); - - memset(&conf, 0, sizeof(conf)); - conf.coc_opc = OBJECT_CONF_WAIT; - conf.coc_inode = inode; - rc = ll_layout_conf(inode, &conf); - if (rc == 0) - rc = -EAGAIN; - - CDEBUG(D_INODE, - "%s: file=" DFID " waiting layout return: %d.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), rc); - } - return rc; -} - -static int ll_layout_refresh_locked(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct md_op_data *op_data; - struct lookup_intent it; - struct lustre_handle lockh; - enum ldlm_mode mode; - struct ptlrpc_request *req; - int rc; - -again: - /* mostly layout lock is caching on the local side, so try to match - * it before grabbing layout lock mutex. - */ - mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0, - LCK_CR | LCK_CW | LCK_PR | LCK_PW); - if (mode != 0) { /* hit cached lock */ - rc = ll_layout_lock_set(&lockh, mode, inode); - if (rc == -EAGAIN) - goto again; - return rc; - } - - op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - /* have to enqueue one */ - memset(&it, 0, sizeof(it)); - it.it_op = IT_LAYOUT; - - LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file " DFID "(%p)", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), inode); - - rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req, - &ll_md_blocking_ast, 0); - ptlrpc_req_finished(it.it_request); - it.it_request = NULL; - - ll_finish_md_op_data(op_data); - - mode = it.it_lock_mode; - it.it_lock_mode = 0; - ll_intent_drop_lock(&it); - - if (rc == 0) { - /* set lock data in case this is a new lock */ - ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL); - lockh.cookie = it.it_lock_handle; - rc = ll_layout_lock_set(&lockh, mode, inode); - if (rc == -EAGAIN) - goto again; - } - - return rc; -} - -/** - * This function checks if there exists a LAYOUT lock on the client side, - * or enqueues it if it doesn't have one in cache. - * - * This function will not hold layout lock so it may be revoked any time after - * this function returns. Any operations depend on layout should be redone - * in that case. - * - * This function should be called before lov_io_init() to get an uptodate - * layout version, the caller should save the version number and after IO - * is finished, this function should be called again to verify that layout - * is not changed during IO time. - */ -int ll_layout_refresh(struct inode *inode, __u32 *gen) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - int rc; - - *gen = ll_layout_version_get(lli); - if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE) - return 0; - - /* sanity checks */ - LASSERT(fid_is_sane(ll_inode2fid(inode))); - LASSERT(S_ISREG(inode->i_mode)); - - /* take layout lock mutex to enqueue layout lock exclusively. */ - mutex_lock(&lli->lli_layout_mutex); - - rc = ll_layout_refresh_locked(inode); - if (rc < 0) - goto out; - - *gen = ll_layout_version_get(lli); -out: - mutex_unlock(&lli->lli_layout_mutex); - - return rc; -} - -/** - * This function send a restore request to the MDT - */ -int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length) -{ - struct hsm_user_request *hur; - int len, rc; - - len = sizeof(struct hsm_user_request) + - sizeof(struct hsm_user_item); - hur = kzalloc(len, GFP_NOFS); - if (!hur) - return -ENOMEM; - - hur->hur_request.hr_action = HUA_RESTORE; - hur->hur_request.hr_archive_id = 0; - hur->hur_request.hr_flags = 0; - memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid, - sizeof(hur->hur_user_item[0].hui_fid)); - hur->hur_user_item[0].hui_extent.offset = offset; - hur->hur_user_item[0].hui_extent.length = length; - hur->hur_request.hr_itemcount = 1; - rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp, - len, hur, NULL); - kfree(hur); - return rc; -} diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c deleted file mode 100644 index ce0d51767da3..000000000000 --- a/drivers/staging/lustre/lustre/llite/glimpse.c +++ /dev/null @@ -1,205 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * glimpse code shared between vvp and liblustre (and other Lustre clients in - * the future). - * - * Author: Nikita Danilov - * Author: Oleg Drokin - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include -#include "llite_internal.h" - -static const struct cl_lock_descr whole_file = { - .cld_start = 0, - .cld_end = CL_PAGE_EOF, - .cld_mode = CLM_READ -}; - -/* - * Check whether file has possible unwriten pages. - * - * \retval 1 file is mmap-ed or has dirty pages - * 0 otherwise - */ -blkcnt_t dirty_cnt(struct inode *inode) -{ - blkcnt_t cnt = 0; - struct vvp_object *vob = cl_inode2vvp(inode); - void *results[1]; - - if (inode->i_mapping) - cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->i_pages, - results, 0, 1, - PAGECACHE_TAG_DIRTY); - if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0) - cnt = 1; - - return (cnt > 0) ? 1 : 0; -} - -int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, - struct inode *inode, struct cl_object *clob, int agl) -{ - const struct lu_fid *fid = lu_object_fid(&clob->co_lu); - struct cl_lock *lock = vvp_env_lock(env); - struct cl_lock_descr *descr = &lock->cll_descr; - int result = 0; - - CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid)); - - /* NOTE: this looks like DLM lock request, but it may - * not be one. Due to CEF_ASYNC flag (translated - * to LDLM_FL_HAS_INTENT by osc), this is - * glimpse request, that won't revoke any - * conflicting DLM locks held. Instead, - * ll_glimpse_callback() will be called on each - * client holding a DLM lock against this file, - * and resulting size will be returned for each - * stripe. DLM lock on [0, EOF] is acquired only - * if there were no conflicting locks. If there - * were conflicting locks, enqueuing or waiting - * fails with -ENAVAIL, but valid inode - * attributes are returned anyway. - */ - *descr = whole_file; - descr->cld_obj = clob; - descr->cld_mode = CLM_READ; - descr->cld_enq_flags = CEF_ASYNC | CEF_MUST; - if (agl) - descr->cld_enq_flags |= CEF_AGL; - /* - * CEF_ASYNC is used because glimpse sub-locks cannot - * deadlock (because they never conflict with other - * locks) and, hence, can be enqueued out-of-order. - * - * CEF_MUST protects glimpse lock from conversion into - * a lockless mode. - */ - result = cl_lock_request(env, io, lock); - if (result < 0) - return result; - - if (!agl) { - ll_merge_attr(env, inode); - if (i_size_read(inode) > 0 && !inode->i_blocks) { - /* - * LU-417: Add dirty pages block count - * lest i_blocks reports 0, some "cp" or - * "tar" may think it's a completely - * sparse file and skip it. - */ - inode->i_blocks = dirty_cnt(inode); - } - } - - cl_lock_release(env, lock); - - return result; -} - -static int cl_io_get(struct inode *inode, struct lu_env **envout, - struct cl_io **ioout, u16 *refcheck) -{ - struct lu_env *env; - struct cl_io *io; - struct ll_inode_info *lli = ll_i2info(inode); - struct cl_object *clob = lli->lli_clob; - int result; - - if (S_ISREG(inode->i_mode)) { - env = cl_env_get(refcheck); - if (!IS_ERR(env)) { - io = vvp_env_thread_io(env); - io->ci_obj = clob; - *envout = env; - *ioout = io; - result = 1; - } else { - result = PTR_ERR(env); - } - } else { - result = 0; - } - return result; -} - -int cl_glimpse_size0(struct inode *inode, int agl) -{ - /* - * We don't need ast_flags argument to cl_glimpse_size(), because - * osc_lock_enqueue() takes care of the possible deadlock that said - * argument was introduced to avoid. - */ - /* - * XXX but note that ll_file_seek() passes LDLM_FL_BLOCK_NOWAIT to - * cl_glimpse_size(), which doesn't make sense: glimpse locks are not - * blocking anyway. - */ - struct lu_env *env = NULL; - struct cl_io *io = NULL; - int result; - u16 refcheck; - - result = cl_io_get(inode, &env, &io, &refcheck); - if (result > 0) { -again: - io->ci_verify_layout = 1; - result = cl_io_init(env, io, CIT_MISC, io->ci_obj); - if (result > 0) - /* - * nothing to do for this io. This currently happens - * when stripe sub-object's are not yet created. - */ - result = io->ci_result; - else if (result == 0) - result = cl_glimpse_lock(env, io, inode, io->ci_obj, - agl); - - OBD_FAIL_TIMEOUT(OBD_FAIL_GLIMPSE_DELAY, 2); - cl_io_fini(env, io); - if (unlikely(io->ci_need_restart)) - goto again; - cl_env_put(env, &refcheck); - } - return result; -} diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c deleted file mode 100644 index d7ea39ce0cb2..000000000000 --- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c +++ /dev/null @@ -1,292 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * cl code shared between vvp and liblustre (and other Lustre clients in the - * future). - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "llite_internal.h" - -/* - * ccc_ prefix stands for "Common Client Code". - */ - -/***************************************************************************** - * - * Vvp device and device type functions. - * - */ - -/** - * An `emergency' environment used by cl_inode_fini() when cl_env_get() - * fails. Access to this environment is serialized by cl_inode_fini_guard - * mutex. - */ -struct lu_env *cl_inode_fini_env; -u16 cl_inode_fini_refcheck; - -/** - * A mutex serializing calls to slp_inode_fini() under extreme memory - * pressure, when environments cannot be allocated. - */ -static DEFINE_MUTEX(cl_inode_fini_guard); - -int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr, - unsigned int attr_flags) -{ - struct lu_env *env; - struct cl_io *io; - int result; - u16 refcheck; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - io = vvp_env_thread_io(env); - io->ci_obj = obj; - io->ci_verify_layout = 1; - - io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); - io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); - io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); - io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; - io->u.ci_setattr.sa_attr_flags = attr_flags; - io->u.ci_setattr.sa_valid = attr->ia_valid; - io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu); - -again: - if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { - struct vvp_io *vio = vvp_env_io(env); - - if (attr->ia_valid & ATTR_FILE) - /* populate the file descriptor for ftruncate to honor - * group lock - see LU-787 - */ - vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file); - - result = cl_io_loop(env, io); - } else { - result = io->ci_result; - } - cl_io_fini(env, io); - if (unlikely(io->ci_need_restart)) - goto again; - - cl_env_put(env, &refcheck); - return result; -} - -/** - * Initialize or update CLIO structures for regular files when new - * meta-data arrives from the server. - * - * \param inode regular file inode - * \param md new file metadata from MDS - * - allocates cl_object if necessary, - * - updated layout, if object was already here. - */ -int cl_file_inode_init(struct inode *inode, struct lustre_md *md) -{ - struct lu_env *env; - struct ll_inode_info *lli; - struct cl_object *clob; - struct lu_site *site; - struct lu_fid *fid; - struct cl_object_conf conf = { - .coc_inode = inode, - .u = { - .coc_layout = md->layout, - } - }; - int result = 0; - u16 refcheck; - - LASSERT(md->body->mbo_valid & OBD_MD_FLID); - LASSERT(S_ISREG(inode->i_mode)); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - site = ll_i2sbi(inode)->ll_site; - lli = ll_i2info(inode); - fid = &lli->lli_fid; - LASSERT(fid_is_sane(fid)); - - if (!lli->lli_clob) { - /* clob is slave of inode, empty lli_clob means for new inode, - * there is no clob in cache with the given fid, so it is - * unnecessary to perform lookup-alloc-lookup-insert, just - * alloc and insert directly. - */ - LASSERT(inode->i_state & I_NEW); - conf.coc_lu.loc_flags = LOC_F_NEW; - clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), - fid, &conf); - if (!IS_ERR(clob)) { - /* - * No locking is necessary, as new inode is - * locked by I_NEW bit. - */ - lli->lli_clob = clob; - lu_object_ref_add(&clob->co_lu, "inode", inode); - } else { - result = PTR_ERR(clob); - } - } else { - result = cl_conf_set(env, lli->lli_clob, &conf); - } - - cl_env_put(env, &refcheck); - - if (result != 0) - CERROR("Failure to initialize cl object " DFID ": %d\n", - PFID(fid), result); - return result; -} - -/** - * Wait for others drop their references of the object at first, then we drop - * the last one, which will lead to the object be destroyed immediately. - * Must be called after cl_object_kill() against this object. - * - * The reason we want to do this is: destroying top object will wait for sub - * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs) - * to initiate top object destroying which may deadlock. See bz22520. - */ -static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) -{ - struct lu_object_header *header = obj->co_lu.lo_header; - wait_queue_entry_t waiter; - - if (unlikely(atomic_read(&header->loh_ref) != 1)) { - struct lu_site *site = obj->co_lu.lo_dev->ld_site; - wait_queue_head_t *wq; - - wq = lu_site_wq_from_fid(site, &header->loh_fid); - - init_waitqueue_entry(&waiter, current); - add_wait_queue(wq, &waiter); - - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (atomic_read(&header->loh_ref) == 1) - break; - schedule(); - } - - set_current_state(TASK_RUNNING); - remove_wait_queue(wq, &waiter); - } - - cl_object_put(env, obj); -} - -void cl_inode_fini(struct inode *inode) -{ - struct lu_env *env; - struct ll_inode_info *lli = ll_i2info(inode); - struct cl_object *clob = lli->lli_clob; - u16 refcheck; - int emergency; - - if (clob) { - env = cl_env_get(&refcheck); - emergency = IS_ERR(env); - if (emergency) { - mutex_lock(&cl_inode_fini_guard); - LASSERT(cl_inode_fini_env); - env = cl_inode_fini_env; - } - /* - * cl_object cache is a slave to inode cache (which, in turn - * is a slave to dentry cache), don't keep cl_object in memory - * when its master is evicted. - */ - cl_object_kill(env, clob); - lu_object_ref_del(&clob->co_lu, "inode", inode); - cl_object_put_last(env, clob); - lli->lli_clob = NULL; - if (emergency) - mutex_unlock(&cl_inode_fini_guard); - else - cl_env_put(env, &refcheck); - } -} - -/** - * build inode number from passed @fid - */ -__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) -{ - if (BITS_PER_LONG == 32 || api32) - return fid_flatten32(fid); - else - return fid_flatten(fid); -} - -/** - * build inode generation from passed @fid. If our FID overflows the 32-bit - * inode number then return a non-zero generation to distinguish them. - */ -__u32 cl_fid_build_gen(const struct lu_fid *fid) -{ - __u32 gen; - - if (fid_is_igif(fid)) { - gen = lu_igif_gen(fid); - return gen; - } - - gen = fid_flatten(fid) >> 32; - return gen; -} diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c deleted file mode 100644 index a246b955306e..000000000000 --- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c +++ /dev/null @@ -1,186 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * cl code shared between vvp and liblustre (and other Lustre clients in the - * future). - * - */ -#define DEBUG_SUBSYSTEM S_LLITE -#include -#include -#include -#include - -#include "llite_internal.h" - -/* Initialize the default and maximum LOV EA and cookie sizes. This allows - * us to make MDS RPCs with large enough reply buffers to hold the - * maximum-sized (= maximum striped) EA and cookie without having to - * calculate this (via a call into the LOV + OSCs) each time we make an RPC. - */ -int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) -{ - u32 val_size, max_easize, def_easize; - int rc; - - val_size = sizeof(max_easize); - rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE, - &val_size, &max_easize); - if (rc) - return rc; - - val_size = sizeof(def_easize); - rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, &val_size, &def_easize); - if (rc) - return rc; - - /* - * default cookiesize is 0 because from 2.4 server doesn't send - * llog cookies to client. - */ - CDEBUG(D_HA, "updating def/max_easize: %d/%d\n", - def_easize, max_easize); - - rc = md_init_ea_size(md_exp, max_easize, def_easize); - return rc; -} - -/** - * This function is used as an upcall-callback hooked by liblustre and llite - * clients into obd_notify() listeners chain to handle notifications about - * change of import connect_flags. See llu_fsswop_mount() and - * lustre_common_fill_super(). - */ -int cl_ocd_update(struct obd_device *host, - struct obd_device *watched, - enum obd_notify_event ev, void *owner, void *data) -{ - struct lustre_client_ocd *lco; - struct client_obd *cli; - __u64 flags; - int result; - - if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME) && - watched->obd_set_up && !watched->obd_stopping) { - cli = &watched->u.cli; - lco = owner; - flags = cli->cl_import->imp_connect_data.ocd_connect_flags; - CDEBUG(D_SUPER, "Changing connect_flags: %#llx -> %#llx\n", - lco->lco_flags, flags); - mutex_lock(&lco->lco_lock); - lco->lco_flags &= flags; - /* for each osc event update ea size */ - if (lco->lco_dt_exp) - cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp); - - mutex_unlock(&lco->lco_lock); - result = 0; - } else { - CERROR("unexpected notification from %s %s (setup:%d,stopping:%d)!\n", - watched->obd_type->typ_name, - watched->obd_name, watched->obd_set_up, - watched->obd_stopping); - result = -EINVAL; - } - return result; -} - -#define GROUPLOCK_SCOPE "grouplock" - -int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, - struct ll_grouplock *cg) -{ - struct lu_env *env; - struct cl_io *io; - struct cl_lock *lock; - struct cl_lock_descr *descr; - __u32 enqflags; - u16 refcheck; - int rc; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - io = vvp_env_thread_io(env); - io->ci_obj = obj; - - rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); - if (rc != 0) { - cl_io_fini(env, io); - cl_env_put(env, &refcheck); - /* Does not make sense to take GL for released layout */ - if (rc > 0) - rc = -ENOTSUPP; - return rc; - } - - lock = vvp_env_lock(env); - descr = &lock->cll_descr; - descr->cld_obj = obj; - descr->cld_start = 0; - descr->cld_end = CL_PAGE_EOF; - descr->cld_gid = gid; - descr->cld_mode = CLM_GROUP; - - enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); - descr->cld_enq_flags = enqflags; - - rc = cl_lock_request(env, io, lock); - if (rc < 0) { - cl_io_fini(env, io); - cl_env_put(env, &refcheck); - return rc; - } - - cg->lg_env = env; - cg->lg_io = io; - cg->lg_lock = lock; - cg->lg_gid = gid; - - return 0; -} - -void cl_put_grouplock(struct ll_grouplock *cg) -{ - struct lu_env *env = cg->lg_env; - struct cl_io *io = cg->lg_io; - struct cl_lock *lock = cg->lg_lock; - - LASSERT(cg->lg_env); - LASSERT(cg->lg_gid); - - cl_lock_release(env, lock); - cl_io_fini(env, io); - cl_env_put(env, NULL); -} diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h deleted file mode 100644 index c08a6e14b6d7..000000000000 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ /dev/null @@ -1,1344 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef LLITE_INTERNAL_H -#define LLITE_INTERNAL_H -#include -#include -#include /* for s2sbi */ -#include - -/* for struct cl_lock_descr and struct cl_io */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "vvp_internal.h" -#include "range_lock.h" - -#ifndef FMODE_EXEC -#define FMODE_EXEC 0 -#endif - -#ifndef VM_FAULT_RETRY -#define VM_FAULT_RETRY 0 -#endif - -/** Only used on client-side for indicating the tail of dir hash/offset. */ -#define LL_DIR_END_OFF 0x7fffffffffffffffULL -#define LL_DIR_END_OFF_32BIT 0x7fffffffUL - -/* 4UL * 1024 * 1024 */ -#define LL_MAX_BLKSIZE_BITS 22 - -#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0") -#define LUSTRE_FPRIVATE(file) ((file)->private_data) - -struct ll_dentry_data { - struct lookup_intent *lld_it; - unsigned int lld_sa_generation; - unsigned int lld_invalid:1; - unsigned int lld_nfs_dentry:1; - struct rcu_head lld_rcu_head; -}; - -#define ll_d2d(de) ((struct ll_dentry_data *)((de)->d_fsdata)) - -#define LLI_INODE_MAGIC 0x111d0de5 -#define LLI_INODE_DEAD 0xdeadd00d - -struct ll_getname_data { - struct dir_context ctx; - char *lgd_name; /* points to buffer with NAME_MAX+1 size */ - struct lu_fid lgd_fid; /* target fid we are looking for */ - int lgd_found; /* inode matched? */ -}; - -struct ll_grouplock { - struct lu_env *lg_env; - struct cl_io *lg_io; - struct cl_lock *lg_lock; - unsigned long lg_gid; -}; - -enum ll_file_flags { - /* File data is modified. */ - LLIF_DATA_MODIFIED = 0, - /* File is being restored */ - LLIF_FILE_RESTORING = 1, - /* Xattr cache is attached to the file */ - LLIF_XATTR_CACHE = 2, -}; - -struct ll_inode_info { - __u32 lli_inode_magic; - - spinlock_t lli_lock; - unsigned long lli_flags; - struct posix_acl *lli_posix_acl; - - /* identifying fields for both metadata and data stacks. */ - struct lu_fid lli_fid; - /* master inode fid for stripe directory */ - struct lu_fid lli_pfid; - - /* We need all three because every inode may be opened in different - * modes - */ - struct obd_client_handle *lli_mds_read_och; - struct obd_client_handle *lli_mds_write_och; - struct obd_client_handle *lli_mds_exec_och; - __u64 lli_open_fd_read_count; - __u64 lli_open_fd_write_count; - __u64 lli_open_fd_exec_count; - /* Protects access to och pointers and their usage counters */ - struct mutex lli_och_mutex; - - struct inode lli_vfs_inode; - - /* the most recent timestamps obtained from mds */ - s64 lli_atime; - s64 lli_mtime; - s64 lli_ctime; - spinlock_t lli_agl_lock; - - /* Try to make the d::member and f::member are aligned. Before using - * these members, make clear whether it is directory or not. - */ - union { - /* for directory */ - struct { - /* serialize normal readdir and statahead-readdir. */ - struct mutex lli_readdir_mutex; - - /* metadata statahead */ - /* since parent-child threads can share the same @file - * struct, "opendir_key" is the token when dir close for - * case of parent exit before child -- it is me should - * cleanup the dir readahead. - */ - void *lli_opendir_key; - struct ll_statahead_info *lli_sai; - /* protect statahead stuff. */ - spinlock_t lli_sa_lock; - /* "opendir_pid" is the token when lookup/revalidate - * -- I am the owner of dir statahead. - */ - pid_t lli_opendir_pid; - /* stat will try to access statahead entries or start - * statahead if this flag is set, and this flag will be - * set upon dir open, and cleared when dir is closed, - * statahead hit ratio is too low, or start statahead - * thread failed. - */ - unsigned int lli_sa_enabled:1; - /* generation for statahead */ - unsigned int lli_sa_generation; - /* directory stripe information */ - struct lmv_stripe_md *lli_lsm_md; - /* default directory stripe offset. This is extracted - * from the "dmv" xattr in order to decide which MDT to - * create a subdirectory on. The MDS itself fetches - * "dmv" and gets the rest of the default layout itself - * (count, hash, etc). - */ - __u32 lli_def_stripe_offset; - }; - - /* for non-directory */ - struct { - struct mutex lli_size_mutex; - char *lli_symlink_name; - /* - * struct rw_semaphore { - * signed long count; // align d.d_def_acl - * spinlock_t wait_lock; // align d.d_sa_lock - * struct list_head wait_list; - * } - */ - struct rw_semaphore lli_trunc_sem; - struct range_lock_tree lli_write_tree; - - struct rw_semaphore lli_glimpse_sem; - unsigned long lli_glimpse_time; - struct list_head lli_agl_list; - __u64 lli_agl_index; - - /* for writepage() only to communicate to fsync */ - int lli_async_rc; - - /* - * whenever a process try to read/write the file, the - * jobid of the process will be saved here, and it'll - * be packed into the write PRC when flush later. - * - * so the read/write statistics for jobid will not be - * accurate if the file is shared by different jobs. - */ - char lli_jobid[LUSTRE_JOBID_SIZE]; - }; - }; - - /* XXX: For following frequent used members, although they maybe special - * used for non-directory object, it is some time-wasting to check - * whether the object is directory or not before using them. On the - * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce - * the "ll_inode_info" size even if moving those members into u.f. - * So keep them out side. - * - * In the future, if more members are added only for directory, - * some of the following members can be moved into u.f. - */ - struct cl_object *lli_clob; - - /* mutex to request for layout lock exclusively. */ - struct mutex lli_layout_mutex; - /* Layout version, protected by lli_layout_lock */ - __u32 lli_layout_gen; - spinlock_t lli_layout_lock; - - struct rw_semaphore lli_xattrs_list_rwsem; - struct mutex lli_xattrs_enq_lock; - struct list_head lli_xattrs;/* ll_xattr_entry->xe_list */ -}; - -static inline __u32 ll_layout_version_get(struct ll_inode_info *lli) -{ - __u32 gen; - - spin_lock(&lli->lli_layout_lock); - gen = lli->lli_layout_gen; - spin_unlock(&lli->lli_layout_lock); - - return gen; -} - -static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen) -{ - spin_lock(&lli->lli_layout_lock); - lli->lli_layout_gen = gen; - spin_unlock(&lli->lli_layout_lock); -} - -int ll_xattr_cache_destroy(struct inode *inode); - -int ll_xattr_cache_get(struct inode *inode, const char *name, - char *buffer, size_t size, __u64 valid); - -int ll_init_security(struct dentry *dentry, struct inode *inode, - struct inode *dir); - -/* - * Locking to guarantee consistency of non-atomic updates to long long i_size, - * consistency between file size and KMS. - * - * Implemented by ->lli_size_mutex and ->lsm_lock, nested in that order. - */ - -void ll_inode_size_lock(struct inode *inode); -void ll_inode_size_unlock(struct inode *inode); - -/* FIXME: replace the name of this with LL_I to conform to kernel stuff */ -/* static inline struct ll_inode_info *LL_I(struct inode *inode) */ -static inline struct ll_inode_info *ll_i2info(struct inode *inode) -{ - return container_of(inode, struct ll_inode_info, lli_vfs_inode); -} - -/* default to about 64M of readahead on a given system. */ -#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_SHIFT)) - -/* default to read-ahead full files smaller than 2MB on the second read */ -#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT)) - -enum ra_stat { - RA_STAT_HIT = 0, - RA_STAT_MISS, - RA_STAT_DISTANT_READPAGE, - RA_STAT_MISS_IN_WINDOW, - RA_STAT_FAILED_GRAB_PAGE, - RA_STAT_FAILED_MATCH, - RA_STAT_DISCARDED, - RA_STAT_ZERO_LEN, - RA_STAT_ZERO_WINDOW, - RA_STAT_EOF, - RA_STAT_MAX_IN_FLIGHT, - RA_STAT_WRONG_GRAB_PAGE, - RA_STAT_FAILED_REACH_END, - _NR_RA_STAT, -}; - -struct ll_ra_info { - atomic_t ra_cur_pages; - unsigned long ra_max_pages; - unsigned long ra_max_pages_per_file; - unsigned long ra_max_read_ahead_whole_pages; -}; - -/* ra_io_arg will be filled in the beginning of ll_readahead with - * ras_lock, then the following ll_read_ahead_pages will read RA - * pages according to this arg, all the items in this structure are - * counted by page index. - */ -struct ra_io_arg { - unsigned long ria_start; /* start offset of read-ahead*/ - unsigned long ria_end; /* end offset of read-ahead*/ - unsigned long ria_reserved; /* reserved pages for read-ahead */ - unsigned long ria_end_min; /* minimum end to cover current read */ - bool ria_eof; /* reach end of file */ - /* If stride read pattern is detected, ria_stoff means where - * stride read is started. Note: for normal read-ahead, the - * value here is meaningless, and also it will not be accessed - */ - pgoff_t ria_stoff; - /* ria_length and ria_pages are the length and pages length in the - * stride I/O mode. And they will also be used to check whether - * it is stride I/O read-ahead in the read-ahead pages - */ - unsigned long ria_length; - unsigned long ria_pages; -}; - -/* LL_HIST_MAX=32 causes an overflow */ -#define LL_HIST_MAX 28 -#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */ -#define LL_PROCESS_HIST_MAX 10 -struct per_process_info { - pid_t pid; - struct obd_histogram pp_r_hist; - struct obd_histogram pp_w_hist; -}; - -/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */ -struct ll_rw_extents_info { - struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1]; -}; - -#define LL_OFFSET_HIST_MAX 100 -struct ll_rw_process_info { - pid_t rw_pid; - int rw_op; - loff_t rw_range_start; - loff_t rw_range_end; - loff_t rw_last_file_pos; - loff_t rw_offset; - size_t rw_smallest_extent; - size_t rw_largest_extent; - struct ll_file_data *rw_last_file; -}; - -enum stats_track_type { - STATS_TRACK_ALL = 0, /* track all processes */ - STATS_TRACK_PID, /* track process with this pid */ - STATS_TRACK_PPID, /* track processes with this ppid */ - STATS_TRACK_GID, /* track processes with this gid */ - STATS_TRACK_LAST, -}; - -/* flags for sbi->ll_flags */ -#define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */ -#define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */ -#define LL_SBI_FLOCK 0x04 -#define LL_SBI_USER_XATTR 0x08 /* support user xattr */ -#define LL_SBI_ACL 0x10 /* support ACL */ -/* LL_SBI_RMT_CLIENT 0x40 remote client */ -#define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */ -#define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */ -#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */ -#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */ -#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */ -/* LL_SBI_SOM_PREVIEW 0x1000 SOM preview mount option, obsolete */ -#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */ -#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */ -#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */ -#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */ -#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */ -#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */ -#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */ -#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */ -#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server - * suppress_pings - */ - -#define LL_SBI_FLAGS { \ - "nolck", \ - "checksum", \ - "flock", \ - "user_xattr", \ - "acl", \ - "???", \ - "???", \ - "mds_capa", \ - "oss_capa", \ - "flock", \ - "lru_resize", \ - "lazy_statfs", \ - "som", \ - "32bit_api", \ - "64bit_hash", \ - "agl", \ - "verbose", \ - "layout", \ - "user_fid2path",\ - "xattr_cache", \ - "norootsquash", \ - "always_ping", \ -} - -/* - * This is embedded into llite super-blocks to keep track of connect - * flags (capabilities) supported by all imports given mount is - * connected to. - */ -struct lustre_client_ocd { - /* - * This is conjunction of connect_flags across all imports - * (LOVs) this mount is connected to. This field is updated by - * cl_ocd_update() under ->lco_lock. - */ - __u64 lco_flags; - struct mutex lco_lock; - struct obd_export *lco_md_exp; - struct obd_export *lco_dt_exp; -}; - -struct ll_sb_info { - /* this protects pglist and ra_info. It isn't safe to - * grab from interrupt contexts - */ - spinlock_t ll_lock; - spinlock_t ll_pp_extent_lock; /* pp_extent entry*/ - spinlock_t ll_process_lock; /* ll_rw_process_info */ - struct obd_uuid ll_sb_uuid; - struct obd_export *ll_md_exp; - struct obd_export *ll_dt_exp; - struct dentry *ll_debugfs_entry; - struct lu_fid ll_root_fid; /* root object fid */ - - int ll_flags; - unsigned int ll_umounting:1, - ll_xattr_cache_enabled:1, - ll_client_common_fill_super_succeeded:1; - - struct lustre_client_ocd ll_lco; - - struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ - - /* - * Used to track "unstable" pages on a client, and maintain a - * LRU list of clean pages. An "unstable" page is defined as - * any page which is sent to a server as part of a bulk request, - * but is uncommitted to stable storage. - */ - struct cl_client_cache *ll_cache; - - struct lprocfs_stats *ll_ra_stats; - - struct ll_ra_info ll_ra_info; - unsigned int ll_namelen; - const struct file_operations *ll_fop; - - unsigned int ll_md_brw_pages; /* readdir pages per RPC */ - - struct lu_site *ll_site; - struct cl_device *ll_cl; - /* Statistics */ - struct ll_rw_extents_info ll_rw_extents_info; - int ll_extent_process_count; - struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX]; - unsigned int ll_offset_process_count; - struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX]; - unsigned int ll_rw_offset_entry_count; - int ll_stats_track_id; - enum stats_track_type ll_stats_track_type; - int ll_rw_stats_on; - - /* metadata stat-ahead */ - unsigned int ll_sa_max; /* max statahead RPCs */ - atomic_t ll_sa_total; /* statahead thread started - * count - */ - atomic_t ll_sa_wrong; /* statahead thread stopped for - * low hit ratio - */ - atomic_t ll_sa_running; /* running statahead thread - * count - */ - atomic_t ll_agl_total; /* AGL thread started count */ - - dev_t ll_sdev_orig; /* save s_dev before assign for - * clustered nfs - */ - /* root squash */ - struct root_squash_info ll_squash; - struct path ll_mnt; - - __kernel_fsid_t ll_fsid; - struct kobject ll_kobj; /* sysfs object */ - struct super_block *ll_sb; /* struct super_block (for sysfs code)*/ - struct completion ll_kobj_unregister; -}; - -/* - * per file-descriptor read-ahead data. - */ -struct ll_readahead_state { - spinlock_t ras_lock; - /* - * index of the last page that read(2) needed and that wasn't in the - * cache. Used by ras_update() to detect seeks. - * - * XXX nikita: if access seeks into cached region, Lustre doesn't see - * this. - */ - unsigned long ras_last_readpage; - /* - * number of pages read after last read-ahead window reset. As window - * is reset on each seek, this is effectively a number of consecutive - * accesses. Maybe ->ras_accessed_in_window is better name. - * - * XXX nikita: window is also reset (by ras_update()) when Lustre - * believes that memory pressure evicts read-ahead pages. In that - * case, it probably doesn't make sense to expand window to - * PTLRPC_MAX_BRW_PAGES on the third access. - */ - unsigned long ras_consecutive_pages; - /* - * number of read requests after the last read-ahead window reset - * As window is reset on each seek, this is effectively the number - * on consecutive read request and is used to trigger read-ahead. - */ - unsigned long ras_consecutive_requests; - /* - * Parameters of current read-ahead window. Handled by - * ras_update(). On the initial access to the file or after a seek, - * window is reset to 0. After 3 consecutive accesses, window is - * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by - * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages. - */ - unsigned long ras_window_start, ras_window_len; - /* - * Optimal RPC size. It decides how many pages will be sent - * for each read-ahead. - */ - unsigned long ras_rpc_size; - /* - * Where next read-ahead should start at. This lies within read-ahead - * window. Read-ahead window is read in pieces rather than at once - * because: 1. lustre limits total number of pages under read-ahead by - * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages - * not covered by DLM lock. - */ - unsigned long ras_next_readahead; - /* - * Total number of ll_file_read requests issued, reads originating - * due to mmap are not counted in this total. This value is used to - * trigger full file read-ahead after multiple reads to a small file. - */ - unsigned long ras_requests; - /* - * Page index with respect to the current request, these value - * will not be accurate when dealing with reads issued via mmap. - */ - unsigned long ras_request_index; - /* - * The following 3 items are used for detecting the stride I/O - * mode. - * In stride I/O mode, - * ...............|-----data-----|****gap*****|--------|******|.... - * offset |-stride_pages-|-stride_gap-| - * ras_stride_offset = offset; - * ras_stride_length = stride_pages + stride_gap; - * ras_stride_pages = stride_pages; - * Note: all these three items are counted by pages. - */ - unsigned long ras_stride_length; - unsigned long ras_stride_pages; - pgoff_t ras_stride_offset; - /* - * number of consecutive stride request count, and it is similar as - * ras_consecutive_requests, but used for stride I/O mode. - * Note: only more than 2 consecutive stride request are detected, - * stride read-ahead will be enable - */ - unsigned long ras_consecutive_stride_requests; -}; - -extern struct kmem_cache *ll_file_data_slab; -struct lustre_handle; -struct ll_file_data { - struct ll_readahead_state fd_ras; - struct ll_grouplock fd_grouplock; - __u64 lfd_pos; - __u32 fd_flags; - fmode_t fd_omode; - /* openhandle if lease exists for this file. - * Borrow lli->lli_och_mutex to protect assignment - */ - struct obd_client_handle *fd_lease_och; - struct obd_client_handle *fd_och; - struct file *fd_file; - /* Indicate whether need to report failure when close. - * true: failure is known, not report again. - * false: unknown failure, should report. - */ - bool fd_write_failed; - rwlock_t fd_lock; /* protect lcc list */ - struct list_head fd_lccs; /* list of ll_cl_context */ -}; - -extern struct dentry *llite_root; -extern struct kset *llite_kset; - -static inline struct inode *ll_info2i(struct ll_inode_info *lli) -{ - return &lli->lli_vfs_inode; -} - -__u32 ll_i2suppgid(struct inode *i); -void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2); - -static inline int ll_need_32bit_api(struct ll_sb_info *sbi) -{ -#if BITS_PER_LONG == 32 - return 1; -#elif defined(CONFIG_COMPAT) - return unlikely(in_compat_syscall() || - (sbi->ll_flags & LL_SBI_32BIT_API)); -#else - return unlikely(sbi->ll_flags & LL_SBI_32BIT_API); -#endif -} - -void ll_ras_enter(struct file *f); - -/* llite/lcommon_misc.c */ -int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp); -int cl_ocd_update(struct obd_device *host, - struct obd_device *watched, - enum obd_notify_event ev, void *owner, void *data); -int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, - struct ll_grouplock *cg); -void cl_put_grouplock(struct ll_grouplock *cg); - -/* llite/lproc_llite.c */ -int ldebugfs_register_mountpoint(struct dentry *parent, - struct super_block *sb, char *osc, char *mdc); -void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi); -void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count); -void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars); -void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid, - struct ll_file_data *file, loff_t pos, - size_t count, int rw); - -enum { - LPROC_LL_DIRTY_HITS, - LPROC_LL_DIRTY_MISSES, - LPROC_LL_READ_BYTES, - LPROC_LL_WRITE_BYTES, - LPROC_LL_BRW_READ, - LPROC_LL_BRW_WRITE, - LPROC_LL_IOCTL, - LPROC_LL_OPEN, - LPROC_LL_RELEASE, - LPROC_LL_MAP, - LPROC_LL_LLSEEK, - LPROC_LL_FSYNC, - LPROC_LL_READDIR, - LPROC_LL_SETATTR, - LPROC_LL_TRUNC, - LPROC_LL_FLOCK, - LPROC_LL_GETATTR, - LPROC_LL_CREATE, - LPROC_LL_LINK, - LPROC_LL_UNLINK, - LPROC_LL_SYMLINK, - LPROC_LL_MKDIR, - LPROC_LL_RMDIR, - LPROC_LL_MKNOD, - LPROC_LL_RENAME, - LPROC_LL_STAFS, - LPROC_LL_ALLOC_INODE, - LPROC_LL_SETXATTR, - LPROC_LL_GETXATTR, - LPROC_LL_GETXATTR_HITS, - LPROC_LL_LISTXATTR, - LPROC_LL_REMOVEXATTR, - LPROC_LL_INODE_PERM, - LPROC_LL_FILE_OPCODES -}; - -/* llite/dir.c */ -extern const struct file_operations ll_dir_operations; -extern const struct inode_operations ll_dir_inode_operations; -int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data, - struct dir_context *ctx); -int ll_get_mdt_idx(struct inode *inode); -int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid); -struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data, - __u64 offset); -void ll_release_page(struct inode *inode, struct page *page, bool remove); - -/* llite/namei.c */ -extern const struct inode_operations ll_special_inode_operations; - -struct inode *ll_iget(struct super_block *sb, ino_t hash, - struct lustre_md *lic); -int ll_test_inode_by_fid(struct inode *inode, void *opaque); -int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, - void *data, int flag); -struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de); -void ll_update_times(struct ptlrpc_request *request, struct inode *inode); - -/* llite/rw.c */ -int ll_writepage(struct page *page, struct writeback_control *wbc); -int ll_writepages(struct address_space *mapping, struct writeback_control *wbc); -int ll_readpage(struct file *file, struct page *page); -void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras); -int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); -struct ll_cl_context *ll_cl_find(struct file *file); -void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io); -void ll_cl_remove(struct file *file, const struct lu_env *env); - -extern const struct address_space_operations ll_aops; - -/* llite/file.c */ -extern const struct file_operations ll_file_operations; -extern const struct file_operations ll_file_operations_flock; -extern const struct file_operations ll_file_operations_noflock; -extern const struct inode_operations ll_file_inode_operations; -int ll_have_md_lock(struct inode *inode, __u64 *bits, - enum ldlm_mode l_req_mode); -enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - enum ldlm_mode mode); -int ll_file_open(struct inode *inode, struct file *file); -int ll_file_release(struct inode *inode, struct file *file); -int ll_release_openhandle(struct inode *inode, struct lookup_intent *it); -int ll_md_real_close(struct inode *inode, fmode_t fmode); -int ll_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int flags); -#ifdef CONFIG_FS_POSIX_ACL -struct posix_acl *ll_get_acl(struct inode *inode, int type); -int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type); -#else -#define ll_get_acl NULL -#define ll_set_acl NULL -#endif /* CONFIG_FS_POSIX_ACL */ - -int ll_migrate(struct inode *parent, struct file *file, int mdtidx, - const char *name, int namelen); -int ll_get_fid_by_name(struct inode *parent, const char *name, - int namelen, struct lu_fid *fid, struct inode **inode); -int ll_inode_permission(struct inode *inode, int mask); - -int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, - __u64 flags, struct lov_user_md *lum, - int lum_size); -int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, - struct lov_mds_md **lmm, int *lmm_size, - struct ptlrpc_request **request); -int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, - int set_default); -int ll_dir_getstripe(struct inode *inode, void **lmmp, int *lmm_size, - struct ptlrpc_request **request, u64 valid); -int ll_fsync(struct file *file, loff_t start, loff_t end, int data); -int ll_merge_attr(const struct lu_env *env, struct inode *inode); -int ll_fid2path(struct inode *inode, void __user *arg); -int ll_data_version(struct inode *inode, __u64 *data_version, int flags); -int ll_hsm_release(struct inode *inode); -int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss); - -/* llite/dcache.c */ - -extern const struct dentry_operations ll_d_ops; -void ll_intent_drop_lock(struct lookup_intent *it); -void ll_intent_release(struct lookup_intent *it); -void ll_invalidate_aliases(struct inode *inode); -void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode); -int ll_revalidate_it_finish(struct ptlrpc_request *request, - struct lookup_intent *it, struct inode *inode); - -/* llite/llite_lib.c */ -extern struct super_operations lustre_super_operations; - -void ll_lli_init(struct ll_inode_info *lli); -int ll_fill_super(struct super_block *sb); -void ll_put_super(struct super_block *sb); -void ll_kill_super(struct super_block *sb); -struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock); -void ll_dir_clear_lsm_md(struct inode *inode); -void ll_clear_inode(struct inode *inode); -int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import); -int ll_setattr(struct dentry *de, struct iattr *attr); -int ll_statfs(struct dentry *de, struct kstatfs *sfs); -int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs, - __u64 max_age, __u32 flags); -int ll_update_inode(struct inode *inode, struct lustre_md *md); -int ll_read_inode2(struct inode *inode, void *opaque); -void ll_delete_inode(struct inode *inode); -int ll_iocontrol(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg); -int ll_flush_ctx(struct inode *inode); -void ll_umount_begin(struct super_block *sb); -int ll_remount_fs(struct super_block *sb, int *flags, char *data); -int ll_show_options(struct seq_file *seq, struct dentry *dentry); -void ll_dirty_page_discard_warn(struct page *page, int ioret); -int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, - struct super_block *sb, struct lookup_intent *it); -int ll_obd_statfs(struct inode *inode, void __user *arg); -int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize); -int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize); -int ll_set_default_mdsize(struct ll_sb_info *sbi, int default_mdsize); -int ll_process_config(struct lustre_cfg *lcfg); - -enum { - LUSTRE_OPC_MKDIR = 0, - LUSTRE_OPC_SYMLINK = 1, - LUSTRE_OPC_MKNOD = 2, - LUSTRE_OPC_CREATE = 3, - LUSTRE_OPC_ANY = 5, -}; - -struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, - struct inode *i1, struct inode *i2, - const char *name, size_t namelen, - u32 mode, __u32 opc, void *data); -void ll_finish_md_op_data(struct md_op_data *op_data); -int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg); -char *ll_get_fsname(struct super_block *sb, char *buf, int buflen); -void ll_compute_rootsquash_state(struct ll_sb_info *sbi); -void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req); -ssize_t ll_copy_user_md(const struct lov_user_md __user *md, - struct lov_user_md **kbuf); - -/* Compute expected user md size when passing in a md from user space */ -static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum) -{ - switch (lum->lmm_magic) { - case LOV_USER_MAGIC_V1: - return sizeof(struct lov_user_md_v1); - case LOV_USER_MAGIC_V3: - return sizeof(struct lov_user_md_v3); - case LOV_USER_MAGIC_SPECIFIC: - if (lum->lmm_stripe_count > LOV_MAX_STRIPE_COUNT) - return -EINVAL; - - return lov_user_md_size(lum->lmm_stripe_count, - LOV_USER_MAGIC_SPECIFIC); - } - return -EINVAL; -} - -/* llite/llite_nfs.c */ -extern const struct export_operations lustre_export_operations; -__u32 get_uuid2int(const char *name, int len); -void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid); -struct inode *search_inode_for_lustre(struct super_block *sb, - const struct lu_fid *fid); -int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid); - -/* llite/symlink.c */ -extern const struct inode_operations ll_fast_symlink_inode_operations; - -/** - * IO arguments for various VFS I/O interfaces. - */ -struct vvp_io_args { - /** normal/splice */ - union { - struct { - struct kiocb *via_iocb; - struct iov_iter *via_iter; - } normal; - } u; -}; - -struct ll_cl_context { - struct list_head lcc_list; - void *lcc_cookie; - const struct lu_env *lcc_env; - struct cl_io *lcc_io; - struct cl_page *lcc_page; -}; - -struct ll_thread_info { - struct vvp_io_args lti_args; - struct ra_io_arg lti_ria; - struct ll_cl_context lti_io_ctx; -}; - -extern struct lu_context_key ll_thread_key; -static inline struct ll_thread_info *ll_env_info(const struct lu_env *env) -{ - struct ll_thread_info *lti; - - lti = lu_context_key_get(&env->le_ctx, &ll_thread_key); - LASSERT(lti); - return lti; -} - -static inline struct vvp_io_args *ll_env_args(const struct lu_env *env) -{ - return &ll_env_info(env)->lti_args; -} - -/* llite/llite_mmap.c */ - -int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); -int ll_file_mmap(struct file *file, struct vm_area_struct *vma); -void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, - unsigned long addr, size_t count); -struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, - size_t count); - -static inline void ll_invalidate_page(struct page *vmpage) -{ - struct address_space *mapping = vmpage->mapping; - loff_t offset = vmpage->index << PAGE_SHIFT; - - LASSERT(PageLocked(vmpage)); - if (!mapping) - return; - - /* - * truncate_complete_page() calls - * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete(). - */ - ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE); - truncate_complete_page(mapping, vmpage); -} - -#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi) - -/* don't need an addref as the sb_info should be holding one */ -static inline struct obd_export *ll_s2dtexp(struct super_block *sb) -{ - return ll_s2sbi(sb)->ll_dt_exp; -} - -/* don't need an addref as the sb_info should be holding one */ -static inline struct obd_export *ll_s2mdexp(struct super_block *sb) -{ - return ll_s2sbi(sb)->ll_md_exp; -} - -static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi) -{ - struct obd_device *obd = sbi->ll_md_exp->exp_obd; - - if (!obd) - LBUG(); - return &obd->u.cli; -} - -/* FIXME: replace the name of this with LL_SB to conform to kernel stuff */ -static inline struct ll_sb_info *ll_i2sbi(struct inode *inode) -{ - return ll_s2sbi(inode->i_sb); -} - -static inline struct obd_export *ll_i2dtexp(struct inode *inode) -{ - return ll_s2dtexp(inode->i_sb); -} - -static inline struct obd_export *ll_i2mdexp(struct inode *inode) -{ - return ll_s2mdexp(inode->i_sb); -} - -static inline struct lu_fid *ll_inode2fid(struct inode *inode) -{ - struct lu_fid *fid; - - LASSERT(inode); - fid = &ll_i2info(inode)->lli_fid; - - return fid; -} - -static inline loff_t ll_file_maxbytes(struct inode *inode) -{ - struct cl_object *obj = ll_i2info(inode)->lli_clob; - - if (!obj) - return MAX_LFS_FILESIZE; - - return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE); -} - -/* llite/xattr.c */ -extern const struct xattr_handler *ll_xattr_handlers[]; - -#define XATTR_USER_T 1 -#define XATTR_TRUSTED_T 2 -#define XATTR_SECURITY_T 3 -#define XATTR_ACL_ACCESS_T 4 -#define XATTR_ACL_DEFAULT_T 5 -#define XATTR_LUSTRE_T 6 -#define XATTR_OTHER_T 7 - -ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); -int ll_xattr_list(struct inode *inode, const char *name, int type, - void *buffer, size_t size, __u64 valid); -const struct xattr_handler *get_xattr_type(const char *name); - -/** - * Common IO arguments for various VFS I/O interfaces. - */ -int cl_sb_init(struct super_block *sb); -int cl_sb_fini(struct super_block *sb); - -enum ras_update_flags { - LL_RAS_HIT = 0x1, - LL_RAS_MMAP = 0x2 -}; -void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len); -void ll_ra_stats_inc(struct inode *inode, enum ra_stat which); - -/* statahead.c */ -#define LL_SA_RPC_MIN 2 -#define LL_SA_RPC_DEF 32 -#define LL_SA_RPC_MAX 8192 - -#define LL_SA_CACHE_BIT 5 -#define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT) -#define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1) - -/* per inode struct, for dir only */ -struct ll_statahead_info { - struct dentry *sai_dentry; - atomic_t sai_refcount; /* when access this struct, hold - * refcount - */ - unsigned int sai_max; /* max ahead of lookup */ - __u64 sai_sent; /* stat requests sent count */ - __u64 sai_replied; /* stat requests which received - * reply - */ - __u64 sai_index; /* index of statahead entry */ - __u64 sai_index_wait; /* index of entry which is the - * caller is waiting for - */ - __u64 sai_hit; /* hit count */ - __u64 sai_miss; /* miss count: - * for "ls -al" case, it includes - * hidden dentry miss; - * for "ls -l" case, it does not - * include hidden dentry miss. - * "sai_miss_hidden" is used for - * the later case. - */ - unsigned int sai_consecutive_miss; /* consecutive miss */ - unsigned int sai_miss_hidden;/* "ls -al", but first dentry - * is not a hidden one - */ - unsigned int sai_skip_hidden;/* skipped hidden dentry count */ - unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for - * hidden entries - */ - sai_agl_valid:1,/* AGL is valid for the dir */ - sai_in_readpage:1;/* statahead in readdir() */ - wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ - struct task_struct *sai_task; /* stat-ahead thread */ - struct task_struct *sai_agl_task; /* AGL thread */ - struct list_head sai_interim_entries; /* entries which got async - * stat reply, but not - * instantiated - */ - struct list_head sai_entries; /* completed entries */ - struct list_head sai_agls; /* AGLs to be sent */ - struct list_head sai_cache[LL_SA_CACHE_SIZE]; - spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; - atomic_t sai_cache_count; /* entry count in cache */ -}; - -int ll_statahead(struct inode *dir, struct dentry **dentry, bool unplug); -void ll_authorize_statahead(struct inode *dir, void *key); -void ll_deauthorize_statahead(struct inode *dir, void *key); - -blkcnt_t dirty_cnt(struct inode *inode); - -int cl_glimpse_size0(struct inode *inode, int agl); -int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, - struct inode *inode, struct cl_object *clob, int agl); - -static inline int cl_glimpse_size(struct inode *inode) -{ - return cl_glimpse_size0(inode, 0); -} - -static inline int cl_agl(struct inode *inode) -{ - return cl_glimpse_size0(inode, 1); -} - -static inline int ll_glimpse_size(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - int rc; - - down_read(&lli->lli_glimpse_sem); - rc = cl_glimpse_size(inode); - lli->lli_glimpse_time = jiffies; - up_read(&lli->lli_glimpse_sem); - return rc; -} - -/* - * dentry may statahead when statahead is enabled and current process has opened - * parent directory, and this dentry hasn't accessed statahead cache before - */ -static inline bool -dentry_may_statahead(struct inode *dir, struct dentry *dentry) -{ - struct ll_inode_info *lli; - struct ll_dentry_data *ldd; - - if (ll_i2sbi(dir)->ll_sa_max == 0) - return false; - - lli = ll_i2info(dir); - - /* - * statahead is not allowed for this dir, there may be three causes: - * 1. dir is not opened. - * 2. statahead hit ratio is too low. - * 3. previous stat started statahead thread failed. - */ - if (!lli->lli_sa_enabled) - return false; - - /* not the same process, don't statahead */ - if (lli->lli_opendir_pid != current->pid) - return false; - - /* - * When stating a dentry, kernel may trigger 'revalidate' or 'lookup' - * multiple times, eg. for 'getattr', 'getxattr' and etc. - * For patchless client, lookup intent is not accurate, which may - * misguide statahead. For example: - * The 'revalidate' call for 'getattr' and 'getxattr' of a dentry will - * have the same intent -- IT_GETATTR, while one dentry should access - * statahead cache once, otherwise statahead windows is messed up. - * The solution is as following: - * Assign 'lld_sa_generation' with 'lli_sa_generation' when a dentry - * IT_GETATTR for the first time, and subsequent IT_GETATTR will - * bypass interacting with statahead cache by checking - * 'lld_sa_generation == lli->lli_sa_generation'. - */ - ldd = ll_d2d(dentry); - if (ldd->lld_sa_generation == lli->lli_sa_generation) - return false; - - return true; -} - -/* llite ioctl register support routine */ -enum llioc_iter { - LLIOC_CONT = 0, - LLIOC_STOP -}; - -#define LLIOC_MAX_CMD 256 - -/* - * Rules to write a callback function: - * - * Parameters: - * @magic: Dynamic ioctl call routine will feed this value with the pointer - * returned to ll_iocontrol_register. Callback functions should use this - * data to check the potential collasion of ioctl cmd. If collasion is - * found, callback function should return LLIOC_CONT. - * @rcp: The result of ioctl command. - * - * Return values: - * If @magic matches the pointer returned by ll_iocontrol_data, the - * callback should return LLIOC_STOP; return LLIOC_STOP otherwise. - */ -typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode, - struct file *file, unsigned int cmd, unsigned long arg, - void *magic, int *rcp); - -/* export functions */ -/* Register ioctl block dynamatically for a regular file. - * - * @cmd: the array of ioctl command set - * @count: number of commands in the @cmd - * @cb: callback function, it will be called if an ioctl command is found to - * belong to the command list @cmd. - * - * Return value: - * A magic pointer will be returned if success; - * otherwise, NULL will be returned. - */ -void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd); -void ll_iocontrol_unregister(void *magic); - -int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, - enum cl_fsync_mode mode, int ignore_layout); - -/** direct write pages */ -struct ll_dio_pages { - /** page array to be written. we don't support - * partial pages except the last one. - */ - struct page **ldp_pages; - /* offset of each page */ - loff_t *ldp_offsets; - /** if ldp_offsets is NULL, it means a sequential - * pages to be written, then this is the file offset - * of the first page. - */ - loff_t ldp_start_offset; - /** how many bytes are to be written. */ - size_t ldp_size; - /** # of pages in the array. */ - int ldp_nr; -}; - -ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, - int rw, struct inode *inode, - struct ll_dio_pages *pv); - -static inline int ll_file_nolock(const struct file *file) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct inode *inode = file_inode(file); - - return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || - (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK)); -} - -static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, - struct lookup_intent *it, __u64 *bits) -{ - if (!it->it_lock_set) { - struct lustre_handle handle; - - /* If this inode is a remote object, it will get two - * separate locks in different namespaces, Master MDT, - * where the name entry is, will grant LOOKUP lock, - * remote MDT, where the object is, will grant - * UPDATE|PERM lock. The inode will be attached to both - * LOOKUP and PERM locks, so revoking either locks will - * case the dcache being cleared - */ - if (it->it_remote_lock_mode) { - handle.cookie = it->it_remote_lock_handle; - CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "%p for remote lock %#llx\n", - PFID(ll_inode2fid(inode)), inode, - handle.cookie); - md_set_lock_data(exp, &handle, inode, NULL); - } - - handle.cookie = it->it_lock_handle; - - CDEBUG(D_DLMTRACE, - "setting l_data to inode " DFID "%p for lock %#llx\n", - PFID(ll_inode2fid(inode)), inode, handle.cookie); - - md_set_lock_data(exp, &handle, inode, &it->it_lock_bits); - it->it_lock_set = 1; - } - - if (bits) - *bits = it->it_lock_bits; -} - -static inline int d_lustre_invalid(const struct dentry *dentry) -{ - return ll_d2d(dentry)->lld_invalid; -} - -/* - * Mark dentry INVALID, if dentry refcount is zero (this is normally case for - * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later; - * else dput() of the last refcount will unhash this dentry and kill it. - */ -static inline void d_lustre_invalidate(struct dentry *dentry, int nested) -{ - CDEBUG(D_DENTRY, - "invalidate dentry %pd (%p) parent %p inode %p refc %d\n", - dentry, dentry, - dentry->d_parent, d_inode(dentry), d_count(dentry)); - - spin_lock_nested(&dentry->d_lock, - nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL); - ll_d2d(dentry)->lld_invalid = 1; - if (d_count(dentry) == 0) - __d_drop(dentry); - spin_unlock(&dentry->d_lock); -} - -static inline void d_lustre_revalidate(struct dentry *dentry) -{ - spin_lock(&dentry->d_lock); - LASSERT(ll_d2d(dentry)); - ll_d2d(dentry)->lld_invalid = 0; - spin_unlock(&dentry->d_lock); -} - -int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf); -int ll_layout_refresh(struct inode *inode, __u32 *gen); -int ll_layout_restore(struct inode *inode, loff_t start, __u64 length); - -int ll_xattr_init(void); -void ll_xattr_fini(void); - -int ll_page_sync_io(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, enum cl_req_type crt); - -int ll_getparent(struct file *file, struct getparent __user *arg); - -/* lcommon_cl.c */ -int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr, - unsigned int attr_flags); - -extern struct lu_env *cl_inode_fini_env; -extern u16 cl_inode_fini_refcheck; - -int cl_file_inode_init(struct inode *inode, struct lustre_md *md); -void cl_inode_fini(struct inode *inode); - -__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32); -__u32 cl_fid_build_gen(const struct lu_fid *fid); - -#endif /* LLITE_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c deleted file mode 100644 index 36066c839160..000000000000 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ /dev/null @@ -1,2668 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/llite/llite_lib.c - * - * Lustre Light Super operations - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "llite_internal.h" - -struct kmem_cache *ll_file_data_slab; -struct dentry *llite_root; -struct kset *llite_kset; - -#ifndef log2 -#define log2(n) ffz(~(n)) -#endif - -static struct ll_sb_info *ll_init_sbi(struct super_block *sb) -{ - struct ll_sb_info *sbi = NULL; - unsigned long pages; - unsigned long lru_page_max; - struct sysinfo si; - class_uuid_t uuid; - int i; - - sbi = kzalloc(sizeof(*sbi), GFP_NOFS); - if (!sbi) - return NULL; - - spin_lock_init(&sbi->ll_lock); - mutex_init(&sbi->ll_lco.lco_lock); - spin_lock_init(&sbi->ll_pp_extent_lock); - spin_lock_init(&sbi->ll_process_lock); - sbi->ll_rw_stats_on = 0; - - si_meminfo(&si); - pages = si.totalram - si.totalhigh; - lru_page_max = pages / 2; - - sbi->ll_cache = cl_cache_init(lru_page_max); - if (!sbi->ll_cache) { - kfree(sbi); - return NULL; - } - - sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32, - SBI_DEFAULT_READAHEAD_MAX); - sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file; - sbi->ll_ra_info.ra_max_read_ahead_whole_pages = - SBI_DEFAULT_READAHEAD_WHOLE_MAX; - - ll_generate_random_uuid(uuid); - class_uuid_unparse(uuid, &sbi->ll_sb_uuid); - CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid); - - sbi->ll_flags |= LL_SBI_VERBOSE; - sbi->ll_flags |= LL_SBI_CHECKSUM; - - sbi->ll_flags |= LL_SBI_LRU_RESIZE; - sbi->ll_flags |= LL_SBI_LAZYSTATFS; - - for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) { - spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. - pp_r_hist.oh_lock); - spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. - pp_w_hist.oh_lock); - } - - /* metadata statahead is enabled by default */ - sbi->ll_sa_max = LL_SA_RPC_DEF; - atomic_set(&sbi->ll_sa_total, 0); - atomic_set(&sbi->ll_sa_wrong, 0); - atomic_set(&sbi->ll_sa_running, 0); - atomic_set(&sbi->ll_agl_total, 0); - sbi->ll_flags |= LL_SBI_AGL_ENABLED; - - /* root squash */ - sbi->ll_squash.rsi_uid = 0; - sbi->ll_squash.rsi_gid = 0; - INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids); - init_rwsem(&sbi->ll_squash.rsi_sem); - - sbi->ll_sb = sb; - - return sbi; -} - -static void ll_free_sbi(struct super_block *sb) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - - if (sbi->ll_cache) { - if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids)) - cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids); - cl_cache_decref(sbi->ll_cache); - sbi->ll_cache = NULL; - } - - kfree(sbi); -} - -static int client_common_fill_super(struct super_block *sb, char *md, char *dt) -{ - struct inode *root = NULL; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct obd_device *obd; - struct obd_statfs *osfs = NULL; - struct ptlrpc_request *request = NULL; - struct obd_connect_data *data = NULL; - struct obd_uuid *uuid; - struct md_op_data *op_data; - struct lustre_md lmd; - u64 valid; - int size, err, checksum; - - obd = class_name2obd(md); - if (!obd) { - CERROR("MD %s: not setup or attached\n", md); - return -EINVAL; - } - - data = kzalloc(sizeof(*data), GFP_NOFS); - if (!data) - return -ENOMEM; - - osfs = kzalloc(sizeof(*osfs), GFP_NOFS); - if (!osfs) { - kfree(data); - return -ENOMEM; - } - - /* indicate the features supported by this client */ - data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH | - OBD_CONNECT_ATTRFID | - OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE | - OBD_CONNECT_CANCELSET | OBD_CONNECT_FID | - OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 | - OBD_CONNECT_VBR | OBD_CONNECT_FULL20 | - OBD_CONNECT_64BITHASH | - OBD_CONNECT_EINPROGRESS | - OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE | - OBD_CONNECT_LAYOUTLOCK | - OBD_CONNECT_PINGLESS | - OBD_CONNECT_MAX_EASIZE | - OBD_CONNECT_FLOCK_DEAD | - OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | - OBD_CONNECT_OPEN_BY_FID | - OBD_CONNECT_DIR_STRIPE | - OBD_CONNECT_BULK_MBITS; - - if (sbi->ll_flags & LL_SBI_LRU_RESIZE) - data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; -#ifdef CONFIG_FS_POSIX_ACL - data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK | - OBD_CONNECT_LARGE_ACL; -#endif - - if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT)) - /* flag mdc connection as lightweight, only used for test - * purpose, use with care - */ - data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT; - - data->ocd_ibits_known = MDS_INODELOCK_FULL; - data->ocd_version = LUSTRE_VERSION_CODE; - - if (sb_rdonly(sb)) - data->ocd_connect_flags |= OBD_CONNECT_RDONLY; - if (sbi->ll_flags & LL_SBI_USER_XATTR) - data->ocd_connect_flags |= OBD_CONNECT_XATTR; - - if (sbi->ll_flags & LL_SBI_FLOCK) - sbi->ll_fop = &ll_file_operations_flock; - else if (sbi->ll_flags & LL_SBI_LOCALFLOCK) - sbi->ll_fop = &ll_file_operations; - else - sbi->ll_fop = &ll_file_operations_noflock; - - /* always ping even if server suppress_pings */ - if (sbi->ll_flags & LL_SBI_ALWAYS_PING) - data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; - - data->ocd_brw_size = MD_MAX_BRW_SIZE; - - err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, - data, NULL); - if (err == -EBUSY) { - LCONSOLE_ERROR_MSG(0x14f, - "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n", - md); - goto out; - } - - if (err) { - CERROR("cannot connect to %s: rc = %d\n", md, err); - goto out; - } - - sbi->ll_md_exp->exp_connect_data = *data; - - err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp, - LUSTRE_SEQ_METADATA); - if (err) { - CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n", - sbi->ll_md_exp->exp_obd->obd_name, err); - goto out_md; - } - - /* For mount, we only need fs info from MDT0, and also in DNE, it - * can make sure the client can be mounted as long as MDT0 is - * available - */ - err = obd_statfs(NULL, sbi->ll_md_exp, osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_FOR_MDT0); - if (err) - goto out_md_fid; - - /* This needs to be after statfs to ensure connect has finished. - * Note that "data" does NOT contain the valid connect reply. - * If connecting to a 1.8 server there will be no LMV device, so - * we can access the MDC export directly and exp_connect_flags will - * be non-zero, but if accessing an upgraded 2.1 server it will - * have the correct flags filled in. - * XXX: fill in the LMV exp_connect_flags from MDC(s). - */ - valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD; - if (exp_connect_flags(sbi->ll_md_exp) != 0 && - valid != CLIENT_CONNECT_MDT_REQD) { - char *buf; - - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) { - err = -ENOMEM; - goto out_md_fid; - } - obd_connect_flags2str(buf, PAGE_SIZE, - valid ^ CLIENT_CONNECT_MDT_REQD, ","); - LCONSOLE_ERROR_MSG(0x170, - "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", - sbi->ll_md_exp->exp_obd->obd_name, buf); - kfree(buf); - err = -EPROTO; - goto out_md_fid; - } - - size = sizeof(*data); - err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA), - KEY_CONN_DATA, &size, data); - if (err) { - CERROR("%s: Get connect data failed: rc = %d\n", - sbi->ll_md_exp->exp_obd->obd_name, err); - goto out_md_fid; - } - - LASSERT(osfs->os_bsize); - sb->s_blocksize = osfs->os_bsize; - sb->s_blocksize_bits = log2(osfs->os_bsize); - sb->s_magic = LL_SUPER_MAGIC; - sb->s_maxbytes = MAX_LFS_FILESIZE; - sbi->ll_namelen = osfs->os_namelen; - sbi->ll_mnt.mnt = current->fs->root.mnt; - - if ((sbi->ll_flags & LL_SBI_USER_XATTR) && - !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) { - LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n"); - sbi->ll_flags &= ~LL_SBI_USER_XATTR; - } - - if (data->ocd_connect_flags & OBD_CONNECT_ACL) { - sb->s_flags |= SB_POSIXACL; - sbi->ll_flags |= LL_SBI_ACL; - } else { - LCONSOLE_INFO("client wants to enable acl, but mdt not!\n"); - sb->s_flags &= ~SB_POSIXACL; - sbi->ll_flags &= ~LL_SBI_ACL; - } - - if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH) - sbi->ll_flags |= LL_SBI_64BIT_HASH; - - if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) - sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT; - else - sbi->ll_md_brw_pages = 1; - - if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) - sbi->ll_flags |= LL_SBI_LAYOUT_LOCK; - - if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) { - if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) { - LCONSOLE_INFO( - "%s: disabling xattr cache due to unknown maximum xattr size.\n", - dt); - } else { - sbi->ll_flags |= LL_SBI_XATTR_CACHE; - sbi->ll_xattr_cache_enabled = 1; - } - } - - obd = class_name2obd(dt); - if (!obd) { - CERROR("DT %s: not setup or attached\n", dt); - err = -ENODEV; - goto out_md_fid; - } - - data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION | - OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE | - OBD_CONNECT_CANCELSET | OBD_CONNECT_FID | - OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK| - OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA | - OBD_CONNECT_VBR | OBD_CONNECT_FULL20 | - OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | - OBD_CONNECT_EINPROGRESS | - OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE | - OBD_CONNECT_LAYOUTLOCK | - OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK | - OBD_CONNECT_BULK_MBITS; - - if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) { - /* OBD_CONNECT_CKSUM should always be set, even if checksums are - * disabled by default, because it can still be enabled on the - * fly via /sys. As a consequence, we still need to come to an - * agreement on the supported algorithms at connect time - */ - data->ocd_connect_flags |= OBD_CONNECT_CKSUM; - - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY)) - data->ocd_cksum_types = OBD_CKSUM_ADLER; - else - data->ocd_cksum_types = cksum_types_supported_client(); - } - - data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; - - /* always ping even if server suppress_pings */ - if (sbi->ll_flags & LL_SBI_ALWAYS_PING) - data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; - - CDEBUG(D_RPCTRACE, - "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n", - data->ocd_connect_flags, - data->ocd_version, data->ocd_grant); - - obd->obd_upcall.onu_owner = &sbi->ll_lco; - obd->obd_upcall.onu_upcall = cl_ocd_update; - - data->ocd_brw_size = DT_MAX_BRW_SIZE; - - err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data, - NULL); - if (err == -EBUSY) { - LCONSOLE_ERROR_MSG(0x150, - "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n", - dt); - goto out_md_fid; - } else if (err) { - CERROR("%s: Cannot connect to %s: rc = %d\n", - sbi->ll_dt_exp->exp_obd->obd_name, dt, err); - goto out_md_fid; - } - - sbi->ll_dt_exp->exp_connect_data = *data; - - err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp, - LUSTRE_SEQ_METADATA); - if (err) { - CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n", - sbi->ll_dt_exp->exp_obd->obd_name, err); - goto out_dt; - } - - mutex_lock(&sbi->ll_lco.lco_lock); - sbi->ll_lco.lco_flags = data->ocd_connect_flags; - sbi->ll_lco.lco_md_exp = sbi->ll_md_exp; - sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp; - mutex_unlock(&sbi->ll_lco.lco_lock); - - fid_zero(&sbi->ll_root_fid); - err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid); - if (err) { - CERROR("cannot mds_connect: rc = %d\n", err); - goto out_lock_cn_cb; - } - if (!fid_is_sane(&sbi->ll_root_fid)) { - CERROR("%s: Invalid root fid " DFID " during mount\n", - sbi->ll_md_exp->exp_obd->obd_name, - PFID(&sbi->ll_root_fid)); - err = -EINVAL; - goto out_lock_cn_cb; - } - CDEBUG(D_SUPER, "rootfid " DFID "\n", PFID(&sbi->ll_root_fid)); - - sb->s_op = &lustre_super_operations; - sb->s_xattr = ll_xattr_handlers; -#if THREAD_SIZE >= 8192 /*b=17630*/ - sb->s_export_op = &lustre_export_operations; -#endif - - /* make root inode - * XXX: move this to after cbd setup? - */ - valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE; - if (sbi->ll_flags & LL_SBI_ACL) - valid |= OBD_MD_FLACL; - - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) { - err = -ENOMEM; - goto out_lock_cn_cb; - } - - op_data->op_fid1 = sbi->ll_root_fid; - op_data->op_mode = 0; - op_data->op_valid = valid; - - err = md_getattr(sbi->ll_md_exp, op_data, &request); - kfree(op_data); - if (err) { - CERROR("%s: md_getattr failed for root: rc = %d\n", - sbi->ll_md_exp->exp_obd->obd_name, err); - goto out_lock_cn_cb; - } - - err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp, - sbi->ll_md_exp, &lmd); - if (err) { - CERROR("failed to understand root inode md: rc = %d\n", err); - ptlrpc_req_finished(request); - goto out_lock_cn_cb; - } - - LASSERT(fid_is_sane(&sbi->ll_root_fid)); - root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, - sbi->ll_flags & LL_SBI_32BIT_API), - &lmd); - md_free_lustre_md(sbi->ll_md_exp, &lmd); - ptlrpc_req_finished(request); - - if (IS_ERR(root)) { -#ifdef CONFIG_FS_POSIX_ACL - if (lmd.posix_acl) { - posix_acl_release(lmd.posix_acl); - lmd.posix_acl = NULL; - } -#endif - err = -EBADF; - CERROR("lustre_lite: bad iget4 for root\n"); - goto out_root; - } - - checksum = sbi->ll_flags & LL_SBI_CHECKSUM; - err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM), - KEY_CHECKSUM, sizeof(checksum), &checksum, - NULL); - if (err) { - CERROR("%s: Set checksum failed: rc = %d\n", - sbi->ll_dt_exp->exp_obd->obd_name, err); - goto out_root; - } - cl_sb_init(sb); - - err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET), - KEY_CACHE_SET, sizeof(*sbi->ll_cache), - sbi->ll_cache, NULL); - if (err) { - CERROR("%s: Set cache_set failed: rc = %d\n", - sbi->ll_dt_exp->exp_obd->obd_name, err); - goto out_root; - } - - sb->s_root = d_make_root(root); - if (!sb->s_root) { - CERROR("%s: can't make root dentry\n", - ll_get_fsname(sb, NULL, 0)); - err = -ENOMEM; - goto out_lock_cn_cb; - } - - sbi->ll_sdev_orig = sb->s_dev; - - /* We set sb->s_dev equal on all lustre clients in order to support - * NFS export clustering. NFSD requires that the FSID be the same - * on all clients. - */ - /* s_dev is also used in lt_compare() to compare two fs, but that is - * only a node-local comparison. - */ - uuid = obd_get_uuid(sbi->ll_md_exp); - if (uuid) { - sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid)); - get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid); - } - - kfree(data); - kfree(osfs); - - if (llite_root) { - err = ldebugfs_register_mountpoint(llite_root, sb, dt, md); - if (err < 0) { - CERROR("%s: could not register mount in debugfs: " - "rc = %d\n", ll_get_fsname(sb, NULL, 0), err); - err = 0; - } - } - - return err; -out_root: - iput(root); -out_lock_cn_cb: - obd_fid_fini(sbi->ll_dt_exp->exp_obd); -out_dt: - obd_disconnect(sbi->ll_dt_exp); - sbi->ll_dt_exp = NULL; -out_md_fid: - obd_fid_fini(sbi->ll_md_exp->exp_obd); -out_md: - obd_disconnect(sbi->ll_md_exp); - sbi->ll_md_exp = NULL; -out: - kfree(data); - kfree(osfs); - return err; -} - -int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize) -{ - int size, rc; - - size = sizeof(*lmmsize); - rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE), - KEY_MAX_EASIZE, &size, lmmsize); - if (rc) { - CERROR("%s: cannot get max LOV EA size: rc = %d\n", - sbi->ll_dt_exp->exp_obd->obd_name, rc); - return rc; - } - - size = sizeof(int); - rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE), - KEY_MAX_EASIZE, &size, lmmsize); - if (rc) - CERROR("Get max mdsize error rc %d\n", rc); - - return rc; -} - -/** - * Get the value of the default_easize parameter. - * - * \see client_obd::cl_default_mds_easize - * - * \param[in] sbi superblock info for this filesystem - * \param[out] lmmsize pointer to storage location for value - * - * \retval 0 on success - * \retval negative negated errno on failure - */ -int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize) -{ - int size, rc; - - size = sizeof(int); - rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, &size, lmmsize); - if (rc) - CERROR("Get default mdsize error rc %d\n", rc); - - return rc; -} - -/** - * Set the default_easize parameter to the given value. - * - * \see client_obd::cl_default_mds_easize - * - * \param[in] sbi superblock info for this filesystem - * \param[in] lmmsize the size to set - * - * \retval 0 on success - * \retval negative negated errno on failure - */ -int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize) -{ - if (lmmsize < sizeof(struct lov_mds_md) || - lmmsize > OBD_MAX_DEFAULT_EA_SIZE) - return -EINVAL; - - return obd_set_info_async(NULL, sbi->ll_md_exp, - sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, - sizeof(int), &lmmsize, NULL); -} - -static void client_common_put_super(struct super_block *sb) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - - cl_sb_fini(sb); - - obd_fid_fini(sbi->ll_dt_exp->exp_obd); - obd_disconnect(sbi->ll_dt_exp); - sbi->ll_dt_exp = NULL; - - ldebugfs_unregister_mountpoint(sbi); - - obd_fid_fini(sbi->ll_md_exp->exp_obd); - obd_disconnect(sbi->ll_md_exp); - sbi->ll_md_exp = NULL; -} - -void ll_kill_super(struct super_block *sb) -{ - struct ll_sb_info *sbi; - - /* not init sb ?*/ - if (!(sb->s_flags & SB_ACTIVE)) - return; - - sbi = ll_s2sbi(sb); - /* we need to restore s_dev from changed for clustered NFS before - * put_super because new kernels have cached s_dev and change sb->s_dev - * in put_super not affected real removing devices - */ - if (sbi) { - sb->s_dev = sbi->ll_sdev_orig; - sbi->ll_umounting = 1; - - /* wait running statahead threads to quit */ - while (atomic_read(&sbi->ll_sa_running) > 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3)); - } - } -} - -static inline int ll_set_opt(const char *opt, char *data, int fl) -{ - if (strncmp(opt, data, strlen(opt)) != 0) - return 0; - else - return fl; -} - -/* non-client-specific mount options are parsed in lmd_parse */ -static int ll_options(char *options, int *flags) -{ - int tmp; - char *s1 = options, *s2; - - if (!options) - return 0; - - CDEBUG(D_CONFIG, "Parsing opts %s\n", options); - - while (*s1) { - CDEBUG(D_SUPER, "next opt=%s\n", s1); - tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("noflock", s1, - LL_SBI_FLOCK | LL_SBI_LOCALFLOCK); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("context", s1, 1); - if (tmp) - goto next; - tmp = ll_set_opt("fscontext", s1, 1); - if (tmp) - goto next; - tmp = ll_set_opt("defcontext", s1, 1); - if (tmp) - goto next; - tmp = ll_set_opt("rootcontext", s1, 1); - if (tmp) - goto next; - tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH); - if (tmp) { - *flags &= ~tmp; - goto next; - } - - tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING); - if (tmp) { - *flags |= tmp; - goto next; - } - LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n", - s1); - return -EINVAL; - -next: - /* Find next opt */ - s2 = strchr(s1, ','); - if (!s2) - break; - s1 = s2 + 1; - } - return 0; -} - -void ll_lli_init(struct ll_inode_info *lli) -{ - lli->lli_inode_magic = LLI_INODE_MAGIC; - lli->lli_flags = 0; - spin_lock_init(&lli->lli_lock); - lli->lli_posix_acl = NULL; - /* Do not set lli_fid, it has been initialized already. */ - fid_zero(&lli->lli_pfid); - lli->lli_mds_read_och = NULL; - lli->lli_mds_write_och = NULL; - lli->lli_mds_exec_och = NULL; - lli->lli_open_fd_read_count = 0; - lli->lli_open_fd_write_count = 0; - lli->lli_open_fd_exec_count = 0; - mutex_init(&lli->lli_och_mutex); - spin_lock_init(&lli->lli_agl_lock); - spin_lock_init(&lli->lli_layout_lock); - ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE); - lli->lli_clob = NULL; - - init_rwsem(&lli->lli_xattrs_list_rwsem); - mutex_init(&lli->lli_xattrs_enq_lock); - - LASSERT(lli->lli_vfs_inode.i_mode != 0); - if (S_ISDIR(lli->lli_vfs_inode.i_mode)) { - mutex_init(&lli->lli_readdir_mutex); - lli->lli_opendir_key = NULL; - lli->lli_sai = NULL; - spin_lock_init(&lli->lli_sa_lock); - lli->lli_opendir_pid = 0; - lli->lli_sa_enabled = 0; - lli->lli_def_stripe_offset = -1; - } else { - mutex_init(&lli->lli_size_mutex); - lli->lli_symlink_name = NULL; - init_rwsem(&lli->lli_trunc_sem); - range_lock_tree_init(&lli->lli_write_tree); - init_rwsem(&lli->lli_glimpse_sem); - lli->lli_glimpse_time = 0; - INIT_LIST_HEAD(&lli->lli_agl_list); - lli->lli_agl_index = 0; - lli->lli_async_rc = 0; - } - mutex_init(&lli->lli_layout_mutex); -} - -int ll_fill_super(struct super_block *sb) -{ - struct lustre_profile *lprof = NULL; - struct lustre_sb_info *lsi = s2lsi(sb); - struct ll_sb_info *sbi; - char *dt = NULL, *md = NULL; - char *profilenm = get_profile_name(sb); - struct config_llog_instance *cfg; - int err; - static atomic_t ll_bdi_num = ATOMIC_INIT(0); - - CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb); - - err = ptlrpc_inc_ref(); - if (err) - return err; - - cfg = kzalloc(sizeof(*cfg), GFP_NOFS); - if (!cfg) { - err = -ENOMEM; - goto out_put; - } - - try_module_get(THIS_MODULE); - - /* client additional sb info */ - sbi = ll_init_sbi(sb); - lsi->lsi_llsbi = sbi; - if (!sbi) { - module_put(THIS_MODULE); - kfree(cfg); - err = -ENOMEM; - goto out_put; - } - - err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags); - if (err) - goto out_free; - - err = super_setup_bdi_name(sb, "lustre-%d", - atomic_inc_return(&ll_bdi_num)); - if (err) - goto out_free; - - /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */ - sb->s_d_op = &ll_d_ops; - - /* Generate a string unique to this super, in case some joker tries - * to mount the same fs at two mount points. - * Use the address of the super itself. - */ - cfg->cfg_instance = sb; - cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid; - cfg->cfg_callback = class_config_llog_handler; - /* set up client obds */ - err = lustre_process_log(sb, profilenm, cfg); - if (err < 0) - goto out_free; - - /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */ - lprof = class_get_profile(profilenm); - if (!lprof) { - LCONSOLE_ERROR_MSG(0x156, - "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n", - profilenm); - err = -EINVAL; - goto out_free; - } - CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm, - lprof->lp_md, lprof->lp_dt); - - dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance); - if (!dt) { - err = -ENOMEM; - goto out_free; - } - - md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance); - if (!md) { - err = -ENOMEM; - goto out_free; - } - - /* connections, registrations, sb setup */ - err = client_common_fill_super(sb, md, dt); - if (!err) - sbi->ll_client_common_fill_super_succeeded = 1; - -out_free: - kfree(md); - kfree(dt); - if (lprof) - class_put_profile(lprof); - if (err) - ll_put_super(sb); - else if (sbi->ll_flags & LL_SBI_VERBOSE) - LCONSOLE_WARN("Mounted %s\n", profilenm); - - kfree(cfg); -out_put: - if (err) - ptlrpc_dec_ref(); - return err; -} /* ll_fill_super */ - -void ll_put_super(struct super_block *sb) -{ - struct config_llog_instance cfg, params_cfg; - struct obd_device *obd; - struct lustre_sb_info *lsi = s2lsi(sb); - struct ll_sb_info *sbi = ll_s2sbi(sb); - char *profilenm = get_profile_name(sb); - int next, force = 1, rc = 0; - long ccc_count; - - CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm); - - cfg.cfg_instance = sb; - lustre_end_log(sb, profilenm, &cfg); - - params_cfg.cfg_instance = sb; - lustre_end_log(sb, PARAMS_FILENAME, ¶ms_cfg); - - if (sbi->ll_md_exp) { - obd = class_exp2obd(sbi->ll_md_exp); - if (obd) - force = obd->obd_force; - } - - /* Wait for unstable pages to be committed to stable storage */ - if (!force) - rc = l_wait_event_abortable(sbi->ll_cache->ccc_unstable_waitq, - !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr)); - - ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr); - if (!force && rc != -ERESTARTSYS) - LASSERTF(!ccc_count, "count: %li\n", ccc_count); - - /* We need to set force before the lov_disconnect in - * lustre_common_put_super, since l_d cleans up osc's as well. - */ - if (force) { - next = 0; - while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, - &next)) != NULL) { - obd->obd_force = force; - } - } - - if (sbi->ll_client_common_fill_super_succeeded) { - /* Only if client_common_fill_super succeeded */ - client_common_put_super(sb); - } - - next = 0; - while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next))) - class_manual_cleanup(obd); - - if (sbi->ll_flags & LL_SBI_VERBOSE) - LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : ""); - - if (profilenm) - class_del_profile(profilenm); - - ll_free_sbi(sb); - lsi->lsi_llsbi = NULL; - - lustre_common_put_super(sb); - - cl_env_cache_purge(~0); - - module_put(THIS_MODULE); - - ptlrpc_dec_ref(); -} /* client_put_super */ - -struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock) -{ - struct inode *inode = NULL; - - /* NOTE: we depend on atomic igrab() -bzzz */ - lock_res_and_lock(lock); - if (lock->l_resource->lr_lvb_inode) { - struct ll_inode_info *lli; - - lli = ll_i2info(lock->l_resource->lr_lvb_inode); - if (lli->lli_inode_magic == LLI_INODE_MAGIC) { - inode = igrab(lock->l_resource->lr_lvb_inode); - } else { - inode = lock->l_resource->lr_lvb_inode; - LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO : - D_WARNING, lock, - "lr_lvb_inode %p is bogus: magic %08x", - lock->l_resource->lr_lvb_inode, - lli->lli_inode_magic); - inode = NULL; - } - } - unlock_res_and_lock(lock); - return inode; -} - -void ll_dir_clear_lsm_md(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - - LASSERT(S_ISDIR(inode->i_mode)); - - if (lli->lli_lsm_md) { - lmv_free_memmd(lli->lli_lsm_md); - lli->lli_lsm_md = NULL; - } -} - -static struct inode *ll_iget_anon_dir(struct super_block *sb, - const struct lu_fid *fid, - struct lustre_md *md) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct mdt_body *body = md->body; - struct inode *inode; - ino_t ino; - - ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API); - inode = iget_locked(sb, ino); - if (!inode) { - CERROR("%s: failed get simple inode " DFID ": rc = -ENOENT\n", - ll_get_fsname(sb, NULL, 0), PFID(fid)); - return ERR_PTR(-ENOENT); - } - - if (inode->i_state & I_NEW) { - struct ll_inode_info *lli = ll_i2info(inode); - struct lmv_stripe_md *lsm = md->lmv; - - inode->i_mode = (inode->i_mode & ~S_IFMT) | - (body->mbo_mode & S_IFMT); - LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode " DFID "\n", - PFID(fid)); - - LTIME_S(inode->i_mtime) = 0; - LTIME_S(inode->i_atime) = 0; - LTIME_S(inode->i_ctime) = 0; - inode->i_rdev = 0; - - inode->i_op = &ll_dir_inode_operations; - inode->i_fop = &ll_dir_operations; - lli->lli_fid = *fid; - ll_lli_init(lli); - - LASSERT(lsm); - /* master object FID */ - lli->lli_pfid = body->mbo_fid1; - CDEBUG(D_INODE, "lli %p slave " DFID " master " DFID "\n", - lli, PFID(fid), PFID(&lli->lli_pfid)); - unlock_new_inode(inode); - } - - return inode; -} - -static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md) -{ - struct lmv_stripe_md *lsm = md->lmv; - struct lu_fid *fid; - int i; - - LASSERT(lsm); - /* - * XXX sigh, this lsm_root initialization should be in - * LMV layer, but it needs ll_iget right now, so we - * put this here right now. - */ - for (i = 0; i < lsm->lsm_md_stripe_count; i++) { - fid = &lsm->lsm_md_oinfo[i].lmo_fid; - LASSERT(!lsm->lsm_md_oinfo[i].lmo_root); - /* Unfortunately ll_iget will call ll_update_inode, - * where the initialization of slave inode is slightly - * different, so it reset lsm_md to NULL to avoid - * initializing lsm for slave inode. - */ - /* For migrating inode, master stripe and master object will - * be same, so we only need assign this inode - */ - if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i) - lsm->lsm_md_oinfo[i].lmo_root = inode; - else - lsm->lsm_md_oinfo[i].lmo_root = - ll_iget_anon_dir(inode->i_sb, fid, md); - if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) { - int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root); - - lsm->lsm_md_oinfo[i].lmo_root = NULL; - return rc; - } - } - - return 0; -} - -static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1, - const struct lmv_stripe_md *lsm_md2) -{ - return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic && - lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count && - lsm_md1->lsm_md_master_mdt_index == - lsm_md2->lsm_md_master_mdt_index && - lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type && - lsm_md1->lsm_md_layout_version == - lsm_md2->lsm_md_layout_version && - !strcmp(lsm_md1->lsm_md_pool_name, - lsm_md2->lsm_md_pool_name); -} - -static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct lmv_stripe_md *lsm = md->lmv; - int rc; - - LASSERT(S_ISDIR(inode->i_mode)); - CDEBUG(D_INODE, "update lsm %p of " DFID "\n", lli->lli_lsm_md, - PFID(ll_inode2fid(inode))); - - /* no striped information from request. */ - if (!lsm) { - if (!lli->lli_lsm_md) { - return 0; - } else if (lli->lli_lsm_md->lsm_md_hash_type & - LMV_HASH_FLAG_MIGRATION) { - /* - * migration is done, the temporay MIGRATE layout has - * been removed - */ - CDEBUG(D_INODE, DFID " finish migration.\n", - PFID(ll_inode2fid(inode))); - lmv_free_memmd(lli->lli_lsm_md); - lli->lli_lsm_md = NULL; - return 0; - } - /* - * The lustre_md from req does not include stripeEA, - * see ll_md_setattr - */ - return 0; - } - - /* set the directory layout */ - if (!lli->lli_lsm_md) { - struct cl_attr *attr; - - rc = ll_init_lsm_md(inode, md); - if (rc) - return rc; - - /* - * set lsm_md to NULL, so the following free lustre_md - * will not free this lsm - */ - md->lmv = NULL; - lli->lli_lsm_md = lsm; - - attr = kzalloc(sizeof(*attr), GFP_NOFS); - if (!attr) - return -ENOMEM; - - /* validate the lsm */ - rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr, - ll_md_blocking_ast); - if (rc) { - kfree(attr); - return rc; - } - - if (md->body->mbo_valid & OBD_MD_FLNLINK) - md->body->mbo_nlink = attr->cat_nlink; - if (md->body->mbo_valid & OBD_MD_FLSIZE) - md->body->mbo_size = attr->cat_size; - if (md->body->mbo_valid & OBD_MD_FLATIME) - md->body->mbo_atime = attr->cat_atime; - if (md->body->mbo_valid & OBD_MD_FLCTIME) - md->body->mbo_ctime = attr->cat_ctime; - if (md->body->mbo_valid & OBD_MD_FLMTIME) - md->body->mbo_mtime = attr->cat_mtime; - - kfree(attr); - - CDEBUG(D_INODE, "Set lsm %p magic %x to " DFID "\n", lsm, - lsm->lsm_md_magic, PFID(ll_inode2fid(inode))); - return 0; - } - - /* Compare the old and new stripe information */ - if (!lsm_md_eq(lli->lli_lsm_md, lsm)) { - struct lmv_stripe_md *old_lsm = lli->lli_lsm_md; - int idx; - - CERROR("%s: inode " DFID "(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n", - ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), - inode, lsm, old_lsm, - lsm->lsm_md_magic, old_lsm->lsm_md_magic, - lsm->lsm_md_stripe_count, - old_lsm->lsm_md_stripe_count, - lsm->lsm_md_master_mdt_index, - old_lsm->lsm_md_master_mdt_index, - lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type, - lsm->lsm_md_layout_version, - old_lsm->lsm_md_layout_version, - lsm->lsm_md_pool_name, - old_lsm->lsm_md_pool_name); - - for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) { - CERROR("%s: sub FIDs in old lsm idx %d, old: " DFID "\n", - ll_get_fsname(inode->i_sb, NULL, 0), idx, - PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid)); - } - - for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) { - CERROR("%s: sub FIDs in new lsm idx %d, new: " DFID "\n", - ll_get_fsname(inode->i_sb, NULL, 0), idx, - PFID(&lsm->lsm_md_oinfo[idx].lmo_fid)); - } - - return -EIO; - } - - return 0; -} - -void ll_clear_inode(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n", - PFID(ll_inode2fid(inode)), inode); - - if (S_ISDIR(inode->i_mode)) { - /* these should have been cleared in ll_file_release */ - LASSERT(!lli->lli_opendir_key); - LASSERT(!lli->lli_sai); - LASSERT(lli->lli_opendir_pid == 0); - } - - md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode)); - - LASSERT(!lli->lli_open_fd_write_count); - LASSERT(!lli->lli_open_fd_read_count); - LASSERT(!lli->lli_open_fd_exec_count); - - if (lli->lli_mds_write_och) - ll_md_real_close(inode, FMODE_WRITE); - if (lli->lli_mds_exec_och) - ll_md_real_close(inode, FMODE_EXEC); - if (lli->lli_mds_read_och) - ll_md_real_close(inode, FMODE_READ); - - if (S_ISLNK(inode->i_mode)) { - kfree(lli->lli_symlink_name); - lli->lli_symlink_name = NULL; - } - - ll_xattr_cache_destroy(inode); - -#ifdef CONFIG_FS_POSIX_ACL - forget_all_cached_acls(inode); - if (lli->lli_posix_acl) { - posix_acl_release(lli->lli_posix_acl); - lli->lli_posix_acl = NULL; - } -#endif - lli->lli_inode_magic = LLI_INODE_DEAD; - - if (S_ISDIR(inode->i_mode)) - ll_dir_clear_lsm_md(inode); - if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) - LASSERT(list_empty(&lli->lli_agl_list)); - - /* - * XXX This has to be done before lsm is freed below, because - * cl_object still uses inode lsm. - */ - cl_inode_fini(inode); -} - -#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) - -static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data) -{ - struct lustre_md md; - struct inode *inode = d_inode(dentry); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *request = NULL; - int rc, ia_valid; - - op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request); - if (rc) { - ptlrpc_req_finished(request); - if (rc == -ENOENT) { - clear_nlink(inode); - /* Unlinked special device node? Or just a race? - * Pretend we did everything. - */ - if (!S_ISREG(inode->i_mode) && - !S_ISDIR(inode->i_mode)) { - ia_valid = op_data->op_attr.ia_valid; - op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS; - rc = simple_setattr(dentry, &op_data->op_attr); - op_data->op_attr.ia_valid = ia_valid; - } - } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) { - CERROR("md_setattr fails: rc = %d\n", rc); - } - return rc; - } - - rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp, - sbi->ll_md_exp, &md); - if (rc) { - ptlrpc_req_finished(request); - return rc; - } - - ia_valid = op_data->op_attr.ia_valid; - /* inode size will be in cl_setattr_ost, can't do it now since dirty - * cache is not cleared yet. - */ - op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE); - if (S_ISREG(inode->i_mode)) - inode_lock(inode); - rc = simple_setattr(dentry, &op_data->op_attr); - if (S_ISREG(inode->i_mode)) - inode_unlock(inode); - op_data->op_attr.ia_valid = ia_valid; - - rc = ll_update_inode(inode, &md); - ptlrpc_req_finished(request); - - return rc; -} - -/* If this inode has objects allocated to it (lsm != NULL), then the OST - * object(s) determine the file size and mtime. Otherwise, the MDS will - * keep these values until such a time that objects are allocated for it. - * We do the MDS operations first, as it is checking permissions for us. - * We don't to the MDS RPC if there is nothing that we want to store there, - * otherwise there is no harm in updating mtime/atime on the MDS if we are - * going to do an RPC anyways. - * - * If we are doing a truncate, we will send the mtime and ctime updates - * to the OST with the punch RPC, otherwise we do an explicit setattr RPC. - * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE - * at the same time. - * - * In case of HSMimport, we only set attr on MDS. - */ -int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) -{ - struct inode *inode = d_inode(dentry); - struct ll_inode_info *lli = ll_i2info(inode); - struct md_op_data *op_data = NULL; - int rc = 0; - - CDEBUG(D_VFSTRACE, "%s: setattr inode " DFID "(%p) from %llu to %llu, valid %x, hsm_import %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode, - i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import); - - if (attr->ia_valid & ATTR_SIZE) { - /* Check new size against VFS/VM file size limit and rlimit */ - rc = inode_newsize_ok(inode, attr->ia_size); - if (rc) - return rc; - - /* The maximum Lustre file size is variable, based on the - * OST maximum object size and number of stripes. This - * needs another check in addition to the VFS check above. - */ - if (attr->ia_size > ll_file_maxbytes(inode)) { - CDEBUG(D_INODE, "file " DFID " too large %llu > %llu\n", - PFID(&lli->lli_fid), attr->ia_size, - ll_file_maxbytes(inode)); - return -EFBIG; - } - - attr->ia_valid |= ATTR_MTIME | ATTR_CTIME; - } - - /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */ - if (attr->ia_valid & TIMES_SET_FLAGS) { - if ((!uid_eq(current_fsuid(), inode->i_uid)) && - !capable(CAP_FOWNER)) - return -EPERM; - } - - /* We mark all of the fields "set" so MDS/OST does not re-set them */ - if (attr->ia_valid & ATTR_CTIME) { - attr->ia_ctime = current_time(inode); - attr->ia_valid |= ATTR_CTIME_SET; - } - if (!(attr->ia_valid & ATTR_ATIME_SET) && - (attr->ia_valid & ATTR_ATIME)) { - attr->ia_atime = current_time(inode); - attr->ia_valid |= ATTR_ATIME_SET; - } - if (!(attr->ia_valid & ATTR_MTIME_SET) && - (attr->ia_valid & ATTR_MTIME)) { - attr->ia_mtime = current_time(inode); - attr->ia_valid |= ATTR_MTIME_SET; - } - - if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME)) - CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n", - LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime), - (s64)ktime_get_real_seconds()); - - if (S_ISREG(inode->i_mode)) - inode_unlock(inode); - - /* - * We always do an MDS RPC, even if we're only changing the size; - * only the MDS knows whether truncate() should fail with -ETXTBUSY - */ - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) { - rc = -ENOMEM; - goto out; - } - - if (!hsm_import && attr->ia_valid & ATTR_SIZE) { - /* - * If we are changing file size, file content is - * modified, flag it. - */ - attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE; - op_data->op_bias |= MDS_DATA_MODIFIED; - clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags); - } - - op_data->op_attr = *attr; - - rc = ll_md_setattr(dentry, op_data); - if (rc) - goto out; - - if (!S_ISREG(inode->i_mode) || hsm_import) { - rc = 0; - goto out; - } - - if (attr->ia_valid & (ATTR_SIZE | - ATTR_ATIME | ATTR_ATIME_SET | - ATTR_MTIME | ATTR_MTIME_SET)) { - /* For truncate and utimes sending attributes to OSTs, setting - * mtime/atime to the past will be performed under PW [0:EOF] - * extent lock (new_size:EOF for truncate). It may seem - * excessive to send mtime/atime updates to OSTs when not - * setting times to past, but it is necessary due to possible - * time de-synchronization between MDT inode and OST objects - */ - rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0); - } - - /* - * If the file was restored, it needs to set dirty flag. - * - * We've already sent MDS_DATA_MODIFIED flag in - * ll_md_setattr() for truncate. However, the MDT refuses to - * set the HS_DIRTY flag on released files, so we have to set - * it again if the file has been restored. Please check how - * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini(). - * - * Please notice that if the file is not released, the previous - * MDS_DATA_MODIFIED has taken effect and usually - * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()). - * This way we can save an RPC for common open + trunc - * operation. - */ - if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) { - struct hsm_state_set hss = { - .hss_valid = HSS_SETMASK, - .hss_setmask = HS_DIRTY, - }; - int rc2; - - rc2 = ll_hsm_state_set(inode, &hss); - /* - * truncate and write can happen at the same time, so that - * the file can be set modified even though the file is not - * restored from released state, and ll_hsm_state_set() is - * not applicable for the file, and rc2 < 0 is normal in this - * case. - */ - if (rc2 < 0) - CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n", - PFID(ll_inode2fid(inode)), rc2); - } - -out: - if (op_data) - ll_finish_md_op_data(op_data); - - if (S_ISREG(inode->i_mode)) { - inode_lock(inode); - if ((attr->ia_valid & ATTR_SIZE) && !hsm_import) - inode_dio_wait(inode); - } - - ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ? - LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1); - - return rc; -} - -int ll_setattr(struct dentry *de, struct iattr *attr) -{ - int mode = d_inode(de)->i_mode; - - if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) == - (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) - attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE; - - if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) == - (ATTR_SIZE | ATTR_MODE)) && - (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) || - (((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) && - !(attr->ia_mode & S_ISGID)))) - attr->ia_valid |= ATTR_FORCE; - - if ((attr->ia_valid & ATTR_MODE) && - (mode & S_ISUID) && - !(attr->ia_mode & S_ISUID) && - !(attr->ia_valid & ATTR_KILL_SUID)) - attr->ia_valid |= ATTR_KILL_SUID; - - if ((attr->ia_valid & ATTR_MODE) && - ((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) && - !(attr->ia_mode & S_ISGID) && - !(attr->ia_valid & ATTR_KILL_SGID)) - attr->ia_valid |= ATTR_KILL_SGID; - - return ll_setattr_raw(de, attr, false); -} - -int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs, - __u64 max_age, __u32 flags) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct obd_statfs obd_osfs; - int rc; - - rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags); - if (rc) { - CERROR("md_statfs fails: rc = %d\n", rc); - return rc; - } - - osfs->os_type = sb->s_magic; - - CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n", - osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, - osfs->os_files); - - if (sbi->ll_flags & LL_SBI_LAZYSTATFS) - flags |= OBD_STATFS_NODELAY; - - rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags); - if (rc) { - CERROR("obd_statfs fails: rc = %d\n", rc); - return rc; - } - - CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n", - obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree, - obd_osfs.os_files); - - osfs->os_bsize = obd_osfs.os_bsize; - osfs->os_blocks = obd_osfs.os_blocks; - osfs->os_bfree = obd_osfs.os_bfree; - osfs->os_bavail = obd_osfs.os_bavail; - - /* If we don't have as many objects free on the OST as inodes - * on the MDS, we reduce the total number of inodes to - * compensate, so that the "inodes in use" number is correct. - */ - if (obd_osfs.os_ffree < osfs->os_ffree) { - osfs->os_files = (osfs->os_files - osfs->os_ffree) + - obd_osfs.os_ffree; - osfs->os_ffree = obd_osfs.os_ffree; - } - - return rc; -} - -int ll_statfs(struct dentry *de, struct kstatfs *sfs) -{ - struct super_block *sb = de->d_sb; - struct obd_statfs osfs; - int rc; - - CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64()); - ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1); - - /* Some amount of caching on the client is allowed */ - rc = ll_statfs_internal(sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - 0); - if (rc) - return rc; - - statfs_unpack(sfs, &osfs); - - /* We need to downshift for all 32-bit kernels, because we can't - * tell if the kernel is being called via sys_statfs64() or not. - * Stop before overflowing f_bsize - in which case it is better - * to just risk EOVERFLOW if caller is using old sys_statfs(). - */ - if (sizeof(long) < 8) { - while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) { - sfs->f_bsize <<= 1; - - osfs.os_blocks >>= 1; - osfs.os_bfree >>= 1; - osfs.os_bavail >>= 1; - } - } - - sfs->f_blocks = osfs.os_blocks; - sfs->f_bfree = osfs.os_bfree; - sfs->f_bavail = osfs.os_bavail; - sfs->f_fsid = ll_s2sbi(sb)->ll_fsid; - return 0; -} - -void ll_inode_size_lock(struct inode *inode) -{ - struct ll_inode_info *lli; - - LASSERT(!S_ISDIR(inode->i_mode)); - - lli = ll_i2info(inode); - mutex_lock(&lli->lli_size_mutex); -} - -void ll_inode_size_unlock(struct inode *inode) -{ - struct ll_inode_info *lli; - - lli = ll_i2info(inode); - mutex_unlock(&lli->lli_size_mutex); -} - -int ll_update_inode(struct inode *inode, struct lustre_md *md) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct mdt_body *body = md->body; - struct ll_sb_info *sbi = ll_i2sbi(inode); - - if (body->mbo_valid & OBD_MD_FLEASIZE) - cl_file_inode_init(inode, md); - - if (S_ISDIR(inode->i_mode)) { - int rc; - - rc = ll_update_lsm_md(inode, md); - if (rc) - return rc; - } - -#ifdef CONFIG_FS_POSIX_ACL - if (body->mbo_valid & OBD_MD_FLACL) { - spin_lock(&lli->lli_lock); - if (lli->lli_posix_acl) - posix_acl_release(lli->lli_posix_acl); - lli->lli_posix_acl = md->posix_acl; - spin_unlock(&lli->lli_lock); - } -#endif - inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, - sbi->ll_flags & LL_SBI_32BIT_API); - inode->i_generation = cl_fid_build_gen(&body->mbo_fid1); - - if (body->mbo_valid & OBD_MD_FLATIME) { - if (body->mbo_atime > LTIME_S(inode->i_atime)) - LTIME_S(inode->i_atime) = body->mbo_atime; - lli->lli_atime = body->mbo_atime; - } - if (body->mbo_valid & OBD_MD_FLMTIME) { - if (body->mbo_mtime > LTIME_S(inode->i_mtime)) { - CDEBUG(D_INODE, - "setting ino %lu mtime from %lu to %llu\n", - inode->i_ino, LTIME_S(inode->i_mtime), - body->mbo_mtime); - LTIME_S(inode->i_mtime) = body->mbo_mtime; - } - lli->lli_mtime = body->mbo_mtime; - } - if (body->mbo_valid & OBD_MD_FLCTIME) { - if (body->mbo_ctime > LTIME_S(inode->i_ctime)) - LTIME_S(inode->i_ctime) = body->mbo_ctime; - lli->lli_ctime = body->mbo_ctime; - } - if (body->mbo_valid & OBD_MD_FLMODE) - inode->i_mode = (inode->i_mode & S_IFMT) | - (body->mbo_mode & ~S_IFMT); - if (body->mbo_valid & OBD_MD_FLTYPE) - inode->i_mode = (inode->i_mode & ~S_IFMT) | - (body->mbo_mode & S_IFMT); - LASSERT(inode->i_mode != 0); - if (S_ISREG(inode->i_mode)) - inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, - LL_MAX_BLKSIZE_BITS); - else - inode->i_blkbits = inode->i_sb->s_blocksize_bits; - if (body->mbo_valid & OBD_MD_FLUID) - inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid); - if (body->mbo_valid & OBD_MD_FLGID) - inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid); - if (body->mbo_valid & OBD_MD_FLFLAGS) - inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags); - if (body->mbo_valid & OBD_MD_FLNLINK) - set_nlink(inode, body->mbo_nlink); - if (body->mbo_valid & OBD_MD_FLRDEV) - inode->i_rdev = old_decode_dev(body->mbo_rdev); - - if (body->mbo_valid & OBD_MD_FLID) { - /* FID shouldn't be changed! */ - if (fid_is_sane(&lli->lli_fid)) { - LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1), - "Trying to change FID " DFID " to the " DFID ", inode " DFID "(%p)\n", - PFID(&lli->lli_fid), PFID(&body->mbo_fid1), - PFID(ll_inode2fid(inode)), inode); - } else { - lli->lli_fid = body->mbo_fid1; - } - } - - LASSERT(fid_seq(&lli->lli_fid) != 0); - - if (body->mbo_valid & OBD_MD_FLSIZE) { - i_size_write(inode, body->mbo_size); - - CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n", - PFID(ll_inode2fid(inode)), - (unsigned long long)body->mbo_size); - - if (body->mbo_valid & OBD_MD_FLBLOCKS) - inode->i_blocks = body->mbo_blocks; - } - - if (body->mbo_valid & OBD_MD_TSTATE) { - if (body->mbo_t_state & MS_RESTORE) - set_bit(LLIF_FILE_RESTORING, &lli->lli_flags); - } - - return 0; -} - -int ll_read_inode2(struct inode *inode, void *opaque) -{ - struct lustre_md *md = opaque; - struct ll_inode_info *lli = ll_i2info(inode); - int rc; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n", - PFID(&lli->lli_fid), inode); - - /* Core attributes from the MDS first. This is a new inode, and - * the VFS doesn't zero times in the core inode so we have to do - * it ourselves. They will be overwritten by either MDS or OST - * attributes - we just need to make sure they aren't newer. - */ - LTIME_S(inode->i_mtime) = 0; - LTIME_S(inode->i_atime) = 0; - LTIME_S(inode->i_ctime) = 0; - inode->i_rdev = 0; - rc = ll_update_inode(inode, md); - if (rc) - return rc; - - /* OIDEBUG(inode); */ - - if (S_ISREG(inode->i_mode)) { - struct ll_sb_info *sbi = ll_i2sbi(inode); - - inode->i_op = &ll_file_inode_operations; - inode->i_fop = sbi->ll_fop; - inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &ll_dir_inode_operations; - inode->i_fop = &ll_dir_operations; - } else if (S_ISLNK(inode->i_mode)) { - inode->i_op = &ll_fast_symlink_inode_operations; - } else { - inode->i_op = &ll_special_inode_operations; - - init_special_inode(inode, inode->i_mode, - inode->i_rdev); - } - - return 0; -} - -void ll_delete_inode(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - - if (S_ISREG(inode->i_mode) && lli->lli_clob) - /* discard all dirty pages before truncating them, required by - * osc_extent implementation at LU-1030. - */ - cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, - CL_FSYNC_LOCAL, 1); - - truncate_inode_pages_final(&inode->i_data); - - LASSERTF(!inode->i_data.nrpages, - "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n", - PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages); - - ll_clear_inode(inode); - clear_inode(inode); -} - -int ll_iocontrol(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req = NULL; - int rc, flags = 0; - - switch (cmd) { - case FSFILT_IOC_GETFLAGS: { - struct mdt_body *body; - struct md_op_data *op_data; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, - 0, 0, LUSTRE_OPC_ANY, - NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_valid = OBD_MD_FLFLAGS; - rc = md_getattr(sbi->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc) { - CERROR("%s: failure inode " DFID ": rc = %d\n", - sbi->ll_md_exp->exp_obd->obd_name, - PFID(ll_inode2fid(inode)), rc); - return -abs(rc); - } - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - - flags = body->mbo_flags; - - ptlrpc_req_finished(req); - - return put_user(flags, (int __user *)arg); - } - case FSFILT_IOC_SETFLAGS: { - struct md_op_data *op_data; - struct cl_object *obj; - struct iattr *attr; - - if (get_user(flags, (int __user *)arg)) - return -EFAULT; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_attr_flags = flags; - op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG; - rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req); - ll_finish_md_op_data(op_data); - ptlrpc_req_finished(req); - if (rc) - return rc; - - inode->i_flags = ll_ext_to_inode_flags(flags); - - obj = ll_i2info(inode)->lli_clob; - if (!obj) - return 0; - - attr = kzalloc(sizeof(*attr), GFP_NOFS); - if (!attr) - return -ENOMEM; - - attr->ia_valid = ATTR_ATTR_FLAG; - rc = cl_setattr_ost(obj, attr, flags); - kfree(attr); - return rc; - } - default: - return -ENOSYS; - } - - return 0; -} - -int ll_flush_ctx(struct inode *inode) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - - CDEBUG(D_SEC, "flush context for user %d\n", - from_kuid(&init_user_ns, current_uid())); - - obd_set_info_async(NULL, sbi->ll_md_exp, - sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX, - 0, NULL, NULL); - obd_set_info_async(NULL, sbi->ll_dt_exp, - sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX, - 0, NULL, NULL); - return 0; -} - -/* umount -f client means force down, don't save state */ -void ll_umount_begin(struct super_block *sb) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct obd_device *obd; - struct obd_ioctl_data *ioc_data; - int cnt = 0; - - CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb, - sb->s_count, atomic_read(&sb->s_active)); - - obd = class_exp2obd(sbi->ll_md_exp); - if (!obd) { - CERROR("Invalid MDC connection handle %#llx\n", - sbi->ll_md_exp->exp_handle.h_cookie); - return; - } - obd->obd_force = 1; - - obd = class_exp2obd(sbi->ll_dt_exp); - if (!obd) { - CERROR("Invalid LOV connection handle %#llx\n", - sbi->ll_dt_exp->exp_handle.h_cookie); - return; - } - obd->obd_force = 1; - - ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS); - if (ioc_data) { - obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp, - sizeof(*ioc_data), ioc_data, NULL); - - obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp, - sizeof(*ioc_data), ioc_data, NULL); - - kfree(ioc_data); - } - - /* Really, we'd like to wait until there are no requests outstanding, - * and then continue. For now, we just periodically checking for vfs - * to decrement mnt_cnt and hope to finish it within 10sec. - */ - while (cnt < 10 && !may_umount(sbi->ll_mnt.mnt)) { - schedule_timeout_uninterruptible(HZ); - cnt++; - } - - schedule(); -} - -int ll_remount_fs(struct super_block *sb, int *flags, char *data) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - char *profilenm = get_profile_name(sb); - int err; - __u32 read_only; - - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { - read_only = *flags & SB_RDONLY; - err = obd_set_info_async(NULL, sbi->ll_md_exp, - sizeof(KEY_READ_ONLY), - KEY_READ_ONLY, sizeof(read_only), - &read_only, NULL); - if (err) { - LCONSOLE_WARN("Failed to remount %s %s (%d)\n", - profilenm, read_only ? - "read-only" : "read-write", err); - return err; - } - - if (read_only) - sb->s_flags |= SB_RDONLY; - else - sb->s_flags &= ~SB_RDONLY; - - if (sbi->ll_flags & LL_SBI_VERBOSE) - LCONSOLE_WARN("Remounted %s %s\n", profilenm, - read_only ? "read-only" : "read-write"); - } - return 0; -} - -/** - * Cleanup the open handle that is cached on MDT-side. - * - * For open case, the client side open handling thread may hit error - * after the MDT grant the open. Under such case, the client should - * send close RPC to the MDT as cleanup; otherwise, the open handle - * on the MDT will be leaked there until the client umount or evicted. - * - * In further, if someone unlinked the file, because the open handle - * holds the reference on such file/object, then it will block the - * subsequent threads that want to locate such object via FID. - * - * \param[in] sb super block for this file-system - * \param[in] open_req pointer to the original open request - */ -void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) -{ - struct mdt_body *body; - struct md_op_data *op_data; - struct ptlrpc_request *close_req = NULL; - struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp; - - body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) - return; - - op_data->op_fid1 = body->mbo_fid1; - op_data->op_handle = body->mbo_handle; - op_data->op_mod_time = get_seconds(); - md_close(exp, op_data, NULL, &close_req); - ptlrpc_req_finished(close_req); - ll_finish_md_op_data(op_data); -} - -int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, - struct super_block *sb, struct lookup_intent *it) -{ - struct ll_sb_info *sbi = NULL; - struct lustre_md md = { NULL }; - int rc; - - LASSERT(*inode || sb); - sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode); - rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp, - sbi->ll_md_exp, &md); - if (rc) - goto cleanup; - - if (*inode) { - rc = ll_update_inode(*inode, &md); - if (rc) - goto out; - } else { - LASSERT(sb); - - /* - * At this point server returns to client's same fid as client - * generated for creating. So using ->fid1 is okay here. - */ - if (!fid_is_sane(&md.body->mbo_fid1)) { - CERROR("%s: Fid is insane " DFID "\n", - ll_get_fsname(sb, NULL, 0), - PFID(&md.body->mbo_fid1)); - rc = -EINVAL; - goto out; - } - - *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1, - sbi->ll_flags & LL_SBI_32BIT_API), - &md); - if (IS_ERR(*inode)) { -#ifdef CONFIG_FS_POSIX_ACL - if (md.posix_acl) { - posix_acl_release(md.posix_acl); - md.posix_acl = NULL; - } -#endif - rc = PTR_ERR(*inode); - CERROR("new_inode -fatal: rc %d\n", rc); - goto out; - } - } - - /* Handling piggyback layout lock. - * Layout lock can be piggybacked by getattr and open request. - * The lsm can be applied to inode only if it comes with a layout lock - * otherwise correct layout may be overwritten, for example: - * 1. proc1: mdt returns a lsm but not granting layout - * 2. layout was changed by another client - * 3. proc2: refresh layout and layout lock granted - * 4. proc1: to apply a stale layout - */ - if (it && it->it_lock_mode != 0) { - struct lustre_handle lockh; - struct ldlm_lock *lock; - - lockh.cookie = it->it_lock_handle; - lock = ldlm_handle2lock(&lockh); - LASSERT(lock); - if (ldlm_has_layout(lock)) { - struct cl_object_conf conf; - - memset(&conf, 0, sizeof(conf)); - conf.coc_opc = OBJECT_CONF_SET; - conf.coc_inode = *inode; - conf.coc_lock = lock; - conf.u.coc_layout = md.layout; - (void)ll_layout_conf(*inode, &conf); - } - LDLM_LOCK_PUT(lock); - } - -out: - md_free_lustre_md(sbi->ll_md_exp, &md); -cleanup: - if (rc != 0 && it && it->it_op & IT_OPEN) - ll_open_cleanup(sb ? sb : (*inode)->i_sb, req); - - return rc; -} - -int ll_obd_statfs(struct inode *inode, void __user *arg) -{ - struct ll_sb_info *sbi = NULL; - struct obd_export *exp; - char *buf = NULL; - struct obd_ioctl_data *data = NULL; - __u32 type; - int len = 0, rc; - - if (!inode) { - rc = -EINVAL; - goto out_statfs; - } - - sbi = ll_i2sbi(inode); - if (!sbi) { - rc = -EINVAL; - goto out_statfs; - } - - rc = obd_ioctl_getdata(&buf, &len, arg); - if (rc) - goto out_statfs; - - data = (void *)buf; - if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || - !data->ioc_pbuf1 || !data->ioc_pbuf2) { - rc = -EINVAL; - goto out_statfs; - } - - if (data->ioc_inllen1 != sizeof(__u32) || - data->ioc_inllen2 != sizeof(__u32) || - data->ioc_plen1 != sizeof(struct obd_statfs) || - data->ioc_plen2 != sizeof(struct obd_uuid)) { - rc = -EINVAL; - goto out_statfs; - } - - memcpy(&type, data->ioc_inlbuf1, sizeof(__u32)); - if (type & LL_STATFS_LMV) { - exp = sbi->ll_md_exp; - } else if (type & LL_STATFS_LOV) { - exp = sbi->ll_dt_exp; - } else { - rc = -ENODEV; - goto out_statfs; - } - - rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL); - if (rc) - goto out_statfs; -out_statfs: - kvfree(buf); - return rc; -} - -int ll_process_config(struct lustre_cfg *lcfg) -{ - char *ptr; - void *sb; - struct lprocfs_static_vars lvars; - unsigned long x; - int rc = 0; - - lprocfs_llite_init_vars(&lvars); - - /* The instance name contains the sb: lustre-client-aacfe000 */ - ptr = strrchr(lustre_cfg_string(lcfg, 0), '-'); - if (!ptr || !*(++ptr)) - return -EINVAL; - rc = kstrtoul(ptr, 16, &x); - if (rc != 0) - return -EINVAL; - sb = (void *)x; - /* This better be a real Lustre superblock! */ - LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == - LMD_MAGIC); - - /* Note we have not called client_common_fill_super yet, so - * proc fns must be able to handle that! - */ - rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars, - lcfg, sb); - if (rc > 0) - rc = 0; - return rc; -} - -/* this function prepares md_op_data hint for passing ot down to MD stack. */ -struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, - struct inode *i1, struct inode *i2, - const char *name, size_t namelen, - u32 mode, __u32 opc, void *data) -{ - if (!name) { - /* Do not reuse namelen for something else. */ - if (namelen) - return ERR_PTR(-EINVAL); - } else { - if (namelen > ll_i2sbi(i1)->ll_namelen) - return ERR_PTR(-ENAMETOOLONG); - - if (!lu_name_is_valid_2(name, namelen)) - return ERR_PTR(-EINVAL); - } - - if (!op_data) - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - - if (!op_data) - return ERR_PTR(-ENOMEM); - - ll_i2gids(op_data->op_suppgids, i1, i2); - op_data->op_fid1 = *ll_inode2fid(i1); - op_data->op_default_stripe_offset = -1; - if (S_ISDIR(i1->i_mode)) { - op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md; - if (opc == LUSTRE_OPC_MKDIR) - op_data->op_default_stripe_offset = - ll_i2info(i1)->lli_def_stripe_offset; - } - - if (i2) { - op_data->op_fid2 = *ll_inode2fid(i2); - if (S_ISDIR(i2->i_mode)) - op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md; - } else { - fid_zero(&op_data->op_fid2); - } - - if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH) - op_data->op_cli_flags |= CLI_HASH64; - - if (ll_need_32bit_api(ll_i2sbi(i1))) - op_data->op_cli_flags |= CLI_API32; - - op_data->op_name = name; - op_data->op_namelen = namelen; - op_data->op_mode = mode; - op_data->op_mod_time = ktime_get_real_seconds(); - op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); - op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); - op_data->op_cap = current_cap(); - if ((opc == LUSTRE_OPC_CREATE) && name && - filename_is_volatile(name, namelen, &op_data->op_mds)) - op_data->op_bias |= MDS_CREATE_VOLATILE; - else - op_data->op_mds = 0; - op_data->op_data = data; - - return op_data; -} - -void ll_finish_md_op_data(struct md_op_data *op_data) -{ - kfree(op_data); -} - -int ll_show_options(struct seq_file *seq, struct dentry *dentry) -{ - struct ll_sb_info *sbi; - - LASSERT(seq && dentry); - sbi = ll_s2sbi(dentry->d_sb); - - if (sbi->ll_flags & LL_SBI_NOLCK) - seq_puts(seq, ",nolock"); - - if (sbi->ll_flags & LL_SBI_FLOCK) - seq_puts(seq, ",flock"); - - if (sbi->ll_flags & LL_SBI_LOCALFLOCK) - seq_puts(seq, ",localflock"); - - if (sbi->ll_flags & LL_SBI_USER_XATTR) - seq_puts(seq, ",user_xattr"); - - if (sbi->ll_flags & LL_SBI_LAZYSTATFS) - seq_puts(seq, ",lazystatfs"); - - if (sbi->ll_flags & LL_SBI_USER_FID2PATH) - seq_puts(seq, ",user_fid2path"); - - if (sbi->ll_flags & LL_SBI_ALWAYS_PING) - seq_puts(seq, ",always_ping"); - - return 0; -} - -/** - * Get obd name by cmd, and copy out to user space - */ -int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct obd_device *obd; - - if (cmd == OBD_IOC_GETDTNAME) - obd = class_exp2obd(sbi->ll_dt_exp); - else if (cmd == OBD_IOC_GETMDNAME) - obd = class_exp2obd(sbi->ll_md_exp); - else - return -EINVAL; - - if (!obd) - return -ENOENT; - - if (copy_to_user((void __user *)arg, obd->obd_name, - strlen(obd->obd_name) + 1)) - return -EFAULT; - - return 0; -} - -/** - * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the - * fsname will be returned in this buffer; otherwise, a static buffer will be - * used to store the fsname and returned to caller. - */ -char *ll_get_fsname(struct super_block *sb, char *buf, int buflen) -{ - static char fsname_static[MTI_NAME_MAXLEN]; - struct lustre_sb_info *lsi = s2lsi(sb); - char *ptr; - int len; - - if (!buf) { - /* this means the caller wants to use static buffer - * and it doesn't care about race. Usually this is - * in error reporting path - */ - buf = fsname_static; - buflen = sizeof(fsname_static); - } - - len = strlen(lsi->lsi_lmd->lmd_profile); - ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-'); - if (ptr && (strcmp(ptr, "-client") == 0)) - len -= 7; - - if (unlikely(len >= buflen)) - len = buflen - 1; - strncpy(buf, lsi->lsi_lmd->lmd_profile, len); - buf[len] = '\0'; - - return buf; -} - -void ll_dirty_page_discard_warn(struct page *page, int ioret) -{ - char *buf, *path = NULL; - struct dentry *dentry = NULL; - struct vvp_object *obj = cl_inode2vvp(page->mapping->host); - - /* this can be called inside spin lock so use GFP_ATOMIC. */ - buf = (char *)__get_free_page(GFP_ATOMIC); - if (buf) { - dentry = d_find_alias(page->mapping->host); - if (dentry) - path = dentry_path_raw(dentry, buf, PAGE_SIZE); - } - - CDEBUG(D_WARNING, - "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n", - ll_get_fsname(page->mapping->host->i_sb, NULL, 0), - s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev, - PFID(&obj->vob_header.coh_lu.loh_fid), - (path && !IS_ERR(path)) ? path : "", ioret); - - if (dentry) - dput(dentry); - - if (buf) - free_page((unsigned long)buf); -} - -ssize_t ll_copy_user_md(const struct lov_user_md __user *md, - struct lov_user_md **kbuf) -{ - struct lov_user_md lum; - ssize_t lum_size; - - if (copy_from_user(&lum, md, sizeof(lum))) { - lum_size = -EFAULT; - goto no_kbuf; - } - - lum_size = ll_lov_user_md_size(&lum); - if (lum_size < 0) - goto no_kbuf; - - *kbuf = kzalloc(lum_size, GFP_NOFS); - if (!*kbuf) { - lum_size = -ENOMEM; - goto no_kbuf; - } - - if (copy_from_user(*kbuf, md, lum_size) != 0) { - kfree(*kbuf); - *kbuf = NULL; - lum_size = -EFAULT; - } -no_kbuf: - return lum_size; -} - -/* - * Compute llite root squash state after a change of root squash - * configuration setting or add/remove of a lnet nid - */ -void ll_compute_rootsquash_state(struct ll_sb_info *sbi) -{ - struct root_squash_info *squash = &sbi->ll_squash; - struct lnet_process_id id; - bool matched; - int i; - - /* Update norootsquash flag */ - down_write(&squash->rsi_sem); - if (list_empty(&squash->rsi_nosquash_nids)) { - sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH; - } else { - /* - * Do not apply root squash as soon as one of our NIDs is - * in the nosquash_nids list - */ - matched = false; - i = 0; - - while (LNetGetId(i++, &id) != -ENOENT) { - if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND) - continue; - if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) { - matched = true; - break; - } - } - if (matched) - sbi->ll_flags |= LL_SBI_NOROOTSQUASH; - else - sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH; - } - up_write(&squash->rsi_sem); -} - -/** - * Parse linkea content to extract information about a given hardlink - * - * \param[in] ldata - Initialized linkea data - * \param[in] linkno - Link identifier - * \param[out] parent_fid - The entry's parent FID - * \param[in] size - Entry name destination buffer - * - * \retval 0 on success - * \retval Appropriate negative error code on failure - */ -static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno, - struct lu_fid *parent_fid, struct lu_name *ln) -{ - unsigned int idx; - int rc; - - rc = linkea_init_with_rec(ldata); - if (rc < 0) - return rc; - - if (linkno >= ldata->ld_leh->leh_reccount) - /* beyond last link */ - return -ENODATA; - - linkea_first_entry(ldata); - for (idx = 0; ldata->ld_lee; idx++) { - linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln, - parent_fid); - if (idx == linkno) - break; - - linkea_next_entry(ldata); - } - - if (idx < linkno) - return -ENODATA; - - return 0; -} - -/** - * Get parent FID and name of an identified link. Operation is performed for - * a given link number, letting the caller iterate over linkno to list one or - * all links of an entry. - * - * \param[in] file - File descriptor against which to perform the operation - * \param[in,out] arg - User-filled structure containing the linkno to operate - * on and the available size. It is eventually filled - * with the requested information or left untouched on - * error - * - * \retval - 0 on success - * \retval - Appropriate negative error code on failure - */ -int ll_getparent(struct file *file, struct getparent __user *arg) -{ - struct inode *inode = file_inode(file); - struct linkea_data *ldata; - struct lu_fid parent_fid; - struct lu_buf buf = { - .lb_buf = NULL, - .lb_len = 0 - }; - struct lu_name ln; - u32 name_size; - u32 linkno; - int rc; - - if (!capable(CAP_DAC_READ_SEARCH) && - !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH)) - return -EPERM; - - if (get_user(name_size, &arg->gp_name_size)) - return -EFAULT; - - if (get_user(linkno, &arg->gp_linkno)) - return -EFAULT; - - if (name_size > PATH_MAX) - return -EINVAL; - - ldata = kzalloc(sizeof(*ldata), GFP_NOFS); - if (!ldata) - return -ENOMEM; - - rc = linkea_data_new(ldata, &buf); - if (rc < 0) - goto ldata_free; - - rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf, - buf.lb_len, OBD_MD_FLXATTR); - if (rc < 0) - goto lb_free; - - rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln); - if (rc < 0) - goto lb_free; - - if (ln.ln_namelen >= name_size) { - rc = -EOVERFLOW; - goto lb_free; - } - - if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) { - rc = -EFAULT; - goto lb_free; - } - - if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) { - rc = -EFAULT; - goto lb_free; - } - - if (put_user('\0', arg->gp_name + ln.ln_namelen)) { - rc = -EFAULT; - goto lb_free; - } - -lb_free: - kvfree(buf.lb_buf); -ldata_free: - kfree(ldata); - return rc; -} diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c deleted file mode 100644 index d7fb5533f707..000000000000 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ /dev/null @@ -1,480 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include "llite_internal.h" - -static const struct vm_operations_struct ll_file_vm_ops; - -void policy_from_vma(union ldlm_policy_data *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) -{ - policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) + - (vma->vm_pgoff << PAGE_SHIFT); - policy->l_extent.end = (policy->l_extent.start + count - 1) | - ~PAGE_MASK; -} - -struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, - size_t count) -{ - struct vm_area_struct *vma, *ret = NULL; - - /* mmap_sem must have been held by caller. */ - LASSERT(!down_write_trylock(&mm->mmap_sem)); - - for (vma = find_vma(mm, addr); - vma && vma->vm_start < (addr + count); vma = vma->vm_next) { - if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && - vma->vm_flags & VM_SHARED) { - ret = vma; - break; - } - } - return ret; -} - -/** - * API independent part for page fault initialization. - * \param vma - virtual memory area addressed to page fault - * \param env - corespondent lu_env to processing - * \param index - page index corespondent to fault. - * \parm ra_flags - vma readahead flags. - * - * \return error codes from cl_io_init. - */ -static struct cl_io * -ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, - pgoff_t index, unsigned long *ra_flags) -{ - struct file *file = vma->vm_file; - struct inode *inode = file_inode(file); - struct cl_io *io; - struct cl_fault_io *fio; - int rc; - - if (ll_file_nolock(file)) - return ERR_PTR(-EOPNOTSUPP); - -restart: - io = vvp_env_thread_io(env); - io->ci_obj = ll_i2info(inode)->lli_clob; - LASSERT(io->ci_obj); - - fio = &io->u.ci_fault; - fio->ft_index = index; - fio->ft_executable = vma->vm_flags & VM_EXEC; - - /* - * disable VM_SEQ_READ and use VM_RAND_READ to make sure that - * the kernel will not read other pages not covered by ldlm in - * filemap_nopage. we do our readahead in ll_readpage. - */ - if (ra_flags) - *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ); - vma->vm_flags &= ~VM_SEQ_READ; - vma->vm_flags |= VM_RAND_READ; - - CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, - fio->ft_index, fio->ft_executable); - - rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); - if (rc == 0) { - struct vvp_io *vio = vvp_env_io(env); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - - LASSERT(vio->vui_cl.cis_io == io); - - /* mmap lock must be MANDATORY it has to cache pages. */ - io->ci_lockreq = CILR_MANDATORY; - vio->vui_fd = fd; - } else { - LASSERT(rc < 0); - cl_io_fini(env, io); - if (io->ci_need_restart) - goto restart; - - io = ERR_PTR(rc); - } - - return io; -} - -/* Sharing code of page_mkwrite method for rhel5 and rhel6 */ -static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, - bool *retry) -{ - struct lu_env *env; - struct cl_io *io; - struct vvp_io *vio; - int result; - u16 refcheck; - sigset_t old, new; - struct inode *inode; - struct ll_inode_info *lli; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - io = ll_fault_io_init(env, vma, vmpage->index, NULL); - if (IS_ERR(io)) { - result = PTR_ERR(io); - goto out; - } - - result = io->ci_result; - if (result < 0) - goto out_io; - - io->u.ci_fault.ft_mkwrite = 1; - io->u.ci_fault.ft_writable = 1; - - vio = vvp_env_io(env); - vio->u.fault.ft_vma = vma; - vio->u.fault.ft_vmpage = vmpage; - - siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM)); - sigprocmask(SIG_BLOCK, &new, &old); - - inode = vvp_object_inode(io->ci_obj); - lli = ll_i2info(inode); - - result = cl_io_loop(env, io); - - sigprocmask(SIG_SETMASK, &old, NULL); - - if (result == 0) { - struct inode *inode = file_inode(vma->vm_file); - struct ll_inode_info *lli = ll_i2info(inode); - - lock_page(vmpage); - if (!vmpage->mapping) { - unlock_page(vmpage); - - /* page was truncated and lock was cancelled, return - * ENODATA so that VM_FAULT_NOPAGE will be returned - * to handle_mm_fault(). - */ - if (result == 0) - result = -ENODATA; - } else if (!PageDirty(vmpage)) { - /* race, the page has been cleaned by ptlrpcd after - * it was unlocked, it has to be added into dirty - * cache again otherwise this soon-to-dirty page won't - * consume any grants, even worse if this page is being - * transferred because it will break RPC checksum. - */ - unlock_page(vmpage); - - CDEBUG(D_MMAP, - "Race on page_mkwrite %p/%lu, page has been written out, retry.\n", - vmpage, vmpage->index); - - *retry = true; - result = -EAGAIN; - } - - if (!result) - set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags); - } - -out_io: - cl_io_fini(env, io); -out: - cl_env_put(env, &refcheck); - CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); - LASSERT(ergo(result == 0, PageLocked(vmpage))); - - return result; -} - -static inline int to_fault_error(int result) -{ - switch (result) { - case 0: - result = VM_FAULT_LOCKED; - break; - case -EFAULT: - result = VM_FAULT_NOPAGE; - break; - case -ENOMEM: - result = VM_FAULT_OOM; - break; - default: - result = VM_FAULT_SIGBUS; - break; - } - return result; -} - -/** - * Lustre implementation of a vm_operations_struct::fault() method, called by - * VM to server page fault (both in kernel and user space). - * - * \param vma - is virtual area struct related to page fault - * \param vmf - structure which describe type and address where hit fault - * - * \return allocated and filled _locked_ page for address - * \retval VM_FAULT_ERROR on general error - * \retval NOPAGE_OOM not have memory for allocate new page - */ -static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct lu_env *env; - struct cl_io *io; - struct vvp_io *vio = NULL; - struct page *vmpage; - unsigned long ra_flags; - int result = 0; - int fault_ret = 0; - u16 refcheck; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags); - if (IS_ERR(io)) { - result = to_fault_error(PTR_ERR(io)); - goto out; - } - - result = io->ci_result; - if (result == 0) { - vio = vvp_env_io(env); - vio->u.fault.ft_vma = vma; - vio->u.fault.ft_vmpage = NULL; - vio->u.fault.ft_vmf = vmf; - vio->u.fault.ft_flags = 0; - vio->u.fault.ft_flags_valid = false; - - /* May call ll_readpage() */ - ll_cl_add(vma->vm_file, env, io); - - result = cl_io_loop(env, io); - - ll_cl_remove(vma->vm_file, env); - - /* ft_flags are only valid if we reached - * the call to filemap_fault - */ - if (vio->u.fault.ft_flags_valid) - fault_ret = vio->u.fault.ft_flags; - - vmpage = vio->u.fault.ft_vmpage; - if (result != 0 && vmpage) { - put_page(vmpage); - vmf->page = NULL; - } - } - cl_io_fini(env, io); - - vma->vm_flags |= ra_flags; - -out: - cl_env_put(env, &refcheck); - if (result != 0 && !(fault_ret & VM_FAULT_RETRY)) - fault_ret |= to_fault_error(result); - - CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result); - return fault_ret; -} - -static int ll_fault(struct vm_fault *vmf) -{ - int count = 0; - bool printed = false; - int result; - sigset_t old, new; - - /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite - * so that it can be killed by admin but not cause segfault by - * other signals. - */ - siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM)); - sigprocmask(SIG_BLOCK, &new, &old); - -restart: - result = ll_fault0(vmf->vma, vmf); - LASSERT(!(result & VM_FAULT_LOCKED)); - if (result == 0) { - struct page *vmpage = vmf->page; - - /* check if this page has been truncated */ - lock_page(vmpage); - if (unlikely(!vmpage->mapping)) { /* unlucky */ - unlock_page(vmpage); - put_page(vmpage); - vmf->page = NULL; - - if (!printed && ++count > 16) { - CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n", - current->comm); - printed = true; - } - - goto restart; - } - - result = VM_FAULT_LOCKED; - } - sigprocmask(SIG_SETMASK, &old, NULL); - return result; -} - -static int ll_page_mkwrite(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - int count = 0; - bool printed = false; - bool retry; - int result; - - file_update_time(vma->vm_file); - do { - retry = false; - result = ll_page_mkwrite0(vma, vmf->page, &retry); - - if (!printed && ++count > 16) { - const struct dentry *de = vma->vm_file->f_path.dentry; - - CWARN("app(%s): the page %lu of file " DFID " is under heavy contention\n", - current->comm, vmf->pgoff, - PFID(ll_inode2fid(de->d_inode))); - printed = true; - } - } while (retry); - - switch (result) { - case 0: - LASSERT(PageLocked(vmf->page)); - result = VM_FAULT_LOCKED; - break; - case -ENODATA: - case -EAGAIN: - case -EFAULT: - result = VM_FAULT_NOPAGE; - break; - case -ENOMEM: - result = VM_FAULT_OOM; - break; - default: - result = VM_FAULT_SIGBUS; - break; - } - - return result; -} - -/** - * To avoid cancel the locks covering mmapped region for lock cache pressure, - * we track the mapped vma count in vvp_object::vob_mmap_cnt. - */ -static void ll_vm_open(struct vm_area_struct *vma) -{ - struct inode *inode = file_inode(vma->vm_file); - struct vvp_object *vob = cl_inode2vvp(inode); - - LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); - atomic_inc(&vob->vob_mmap_cnt); -} - -/** - * Dual to ll_vm_open(). - */ -static void ll_vm_close(struct vm_area_struct *vma) -{ - struct inode *inode = file_inode(vma->vm_file); - struct vvp_object *vob = cl_inode2vvp(inode); - - atomic_dec(&vob->vob_mmap_cnt); - LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0); -} - -/* XXX put nice comment here. talk about __free_pte -> dirty pages and - * nopage's reference passing to the pte - */ -int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) -{ - int rc = -ENOENT; - - LASSERTF(last > first, "last %llu first %llu\n", last, first); - if (mapping_mapped(mapping)) { - rc = 0; - unmap_mapping_range(mapping, first + PAGE_SIZE - 1, - last - first + 1, 0); - } - - return rc; -} - -static const struct vm_operations_struct ll_file_vm_ops = { - .fault = ll_fault, - .page_mkwrite = ll_page_mkwrite, - .open = ll_vm_open, - .close = ll_vm_close, -}; - -int ll_file_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct inode *inode = file_inode(file); - int rc; - - if (ll_file_nolock(file)) - return -EOPNOTSUPP; - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1); - rc = generic_file_mmap(file, vma); - if (rc == 0) { - vma->vm_ops = &ll_file_vm_ops; - vma->vm_ops->open(vma); - /* update the inode's size and mtime */ - rc = ll_glimpse_size(inode); - } - - return rc; -} diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c deleted file mode 100644 index 14172688d55f..000000000000 --- a/drivers/staging/lustre/lustre/llite/llite_nfs.c +++ /dev/null @@ -1,375 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/lustre/llite/llite_nfs.c - * - * NFS export of Lustre Light File System - * - * Author: Yury Umanets - * Author: Huang Hua - */ - -#define DEBUG_SUBSYSTEM S_LLITE -#include "llite_internal.h" -#include - -__u32 get_uuid2int(const char *name, int len) -{ - __u32 key0 = 0x12a3fe2d, key1 = 0x37abe8f9; - - while (len--) { - __u32 key = key1 + (key0 ^ (*name++ * 7152373)); - - if (key & 0x80000000) - key -= 0x7fffffff; - key1 = key0; - key0 = key; - } - return (key0 << 1); -} - -void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid) -{ - __u64 key = 0, key0 = 0x12a3fe2d, key1 = 0x37abe8f9; - - while (len--) { - key = key1 + (key0 ^ (*name++ * 7152373)); - if (key & 0x8000000000000000ULL) - key -= 0x7fffffffffffffffULL; - key1 = key0; - key0 = key; - } - - fsid->val[0] = key; - fsid->val[1] = key >> 32; -} - -struct inode *search_inode_for_lustre(struct super_block *sb, - const struct lu_fid *fid) -{ - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct ptlrpc_request *req = NULL; - struct inode *inode = NULL; - int eadatalen = 0; - unsigned long hash = cl_fid_build_ino(fid, - ll_need_32bit_api(sbi)); - struct md_op_data *op_data; - int rc; - - CDEBUG(D_INFO, "searching inode for:(%lu," DFID ")\n", hash, PFID(fid)); - - inode = ilookup5(sb, hash, ll_test_inode_by_fid, (void *)fid); - if (inode) - return inode; - - rc = ll_get_default_mdsize(sbi, &eadatalen); - if (rc) - return ERR_PTR(rc); - - /* Because inode is NULL, ll_prep_md_op_data can not - * be used here. So we allocate op_data ourselves - */ - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) - return ERR_PTR(-ENOMEM); - - op_data->op_fid1 = *fid; - op_data->op_mode = eadatalen; - op_data->op_valid = OBD_MD_FLEASIZE; - - /* mds_fid2dentry ignores f_type */ - rc = md_getattr(sbi->ll_md_exp, op_data, &req); - kfree(op_data); - if (rc) { - CDEBUG(D_INFO, "can't get object attrs, fid " DFID ", rc %d\n", - PFID(fid), rc); - return ERR_PTR(rc); - } - rc = ll_prep_inode(&inode, req, sb, NULL); - ptlrpc_req_finished(req); - if (rc) - return ERR_PTR(rc); - - return inode; -} - -struct lustre_nfs_fid { - struct lu_fid lnf_child; - struct lu_fid lnf_parent; -}; - -static struct dentry * -ll_iget_for_nfs(struct super_block *sb, - struct lu_fid *fid, struct lu_fid *parent) -{ - struct inode *inode; - struct dentry *result; - - if (!fid_is_sane(fid)) - return ERR_PTR(-ESTALE); - - CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid)); - - inode = search_inode_for_lustre(sb, fid); - if (IS_ERR(inode)) - return ERR_CAST(inode); - - if (is_bad_inode(inode)) { - /* we didn't find the right inode.. */ - iput(inode); - return ERR_PTR(-ESTALE); - } - - result = d_obtain_alias(inode); - if (IS_ERR(result)) { - iput(inode); - return result; - } - - /** - * In case d_obtain_alias() found a disconnected dentry, always update - * lli_pfid to allow later operation (normally open) have parent fid, - * which may be used by MDS to create data. - */ - if (parent) { - struct ll_inode_info *lli = ll_i2info(inode); - - spin_lock(&lli->lli_lock); - lli->lli_pfid = *parent; - spin_unlock(&lli->lli_lock); - } - - /* N.B. d_obtain_alias() drops inode ref on error */ - result = d_obtain_alias(inode); - if (!IS_ERR(result)) { - /* - * Need to signal to the ll_intent_file_open that - * we came from NFS and so opencache needs to be - * enabled for this one - */ - ll_d2d(result)->lld_nfs_dentry = 1; - } - - return result; -} - -/** - * \a connectable - is nfsd will connect himself or this should be done - * at lustre - * - * The return value is file handle type: - * 1 -- contains child file handle; - * 2 -- contains child file handle and parent file handle; - * 255 -- error. - */ -static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen, - struct inode *parent) -{ - int fileid_len = sizeof(struct lustre_nfs_fid) / 4; - struct lustre_nfs_fid *nfs_fid = (void *)fh; - - CDEBUG(D_INFO, "%s: encoding for (" DFID ") maxlen=%d minlen=%d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), *plen, fileid_len); - - if (*plen < fileid_len) { - *plen = fileid_len; - return FILEID_INVALID; - } - - nfs_fid->lnf_child = *ll_inode2fid(inode); - if (parent) - nfs_fid->lnf_parent = *ll_inode2fid(parent); - else - fid_zero(&nfs_fid->lnf_parent); - *plen = fileid_len; - - return FILEID_LUSTRE; -} - -static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, - int namelen, loff_t hash, u64 ino, - unsigned int type) -{ - /* It is hack to access lde_fid for comparison with lgd_fid. - * So the input 'name' must be part of the 'lu_dirent'. - */ - struct lu_dirent *lde = container_of((void*)name, struct lu_dirent, lde_name); - struct ll_getname_data *lgd = - container_of(ctx, struct ll_getname_data, ctx); - struct lu_fid fid; - - fid_le_to_cpu(&fid, &lde->lde_fid); - if (lu_fid_eq(&fid, &lgd->lgd_fid)) { - memcpy(lgd->lgd_name, name, namelen); - lgd->lgd_name[namelen] = 0; - lgd->lgd_found = 1; - } - return lgd->lgd_found; -} - -static int ll_get_name(struct dentry *dentry, char *name, - struct dentry *child) -{ - struct inode *dir = d_inode(dentry); - int rc; - struct ll_getname_data lgd = { - .lgd_name = name, - .lgd_fid = ll_i2info(d_inode(child))->lli_fid, - .ctx.actor = ll_nfs_get_name_filldir, - }; - struct md_op_data *op_data; - __u64 pos = 0; - - if (!dir || !S_ISDIR(dir->i_mode)) { - rc = -ENOTDIR; - goto out; - } - - if (!dir->i_fop) { - rc = -EINVAL; - goto out; - } - - op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, - LUSTRE_OPC_ANY, dir); - if (IS_ERR(op_data)) { - rc = PTR_ERR(op_data); - goto out; - } - - op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages; - inode_lock(dir); - rc = ll_dir_read(dir, &pos, op_data, &lgd.ctx); - inode_unlock(dir); - ll_finish_md_op_data(op_data); - if (!rc && !lgd.lgd_found) - rc = -ENOENT; -out: - return rc; -} - -static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid, - int fh_len, int fh_type) -{ - struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - - if (fh_type != FILEID_LUSTRE) - return ERR_PTR(-EPROTO); - - return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent); -} - -static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid, - int fh_len, int fh_type) -{ - struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - - if (fh_type != FILEID_LUSTRE) - return ERR_PTR(-EPROTO); - - return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL); -} - -int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid) -{ - struct ptlrpc_request *req = NULL; - struct ll_sb_info *sbi; - struct mdt_body *body; - static const char dotdot[] = ".."; - struct md_op_data *op_data; - int rc; - int lmmsize; - - LASSERT(dir && S_ISDIR(dir->i_mode)); - - sbi = ll_s2sbi(dir->i_sb); - - CDEBUG(D_INFO, "%s: getting parent for (" DFID ")\n", - ll_get_fsname(dir->i_sb, NULL, 0), - PFID(ll_inode2fid(dir))); - - rc = ll_get_default_mdsize(sbi, &lmmsize); - if (rc != 0) - return rc; - - op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot, - strlen(dotdot), lmmsize, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc) { - CERROR("%s: failure inode " DFID " get parent: rc = %d\n", - ll_get_fsname(dir->i_sb, NULL, 0), - PFID(ll_inode2fid(dir)), rc); - return rc; - } - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - /* - * LU-3952: MDT may lost the FID of its parent, we should not crash - * the NFS server, ll_iget_for_nfs() will handle the error. - */ - if (body->mbo_valid & OBD_MD_FLID) { - CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n", - PFID(ll_inode2fid(dir)), PFID(&body->mbo_fid1)); - *parent_fid = body->mbo_fid1; - } - - ptlrpc_req_finished(req); - return 0; -} - -static struct dentry *ll_get_parent(struct dentry *dchild) -{ - struct lu_fid parent_fid = { 0 }; - struct dentry *dentry; - int rc; - - rc = ll_dir_get_parent_fid(dchild->d_inode, &parent_fid); - if (rc) - return ERR_PTR(rc); - - dentry = ll_iget_for_nfs(dchild->d_inode->i_sb, &parent_fid, NULL); - - return dentry; -} - -const struct export_operations lustre_export_operations = { - .get_parent = ll_get_parent, - .encode_fh = ll_encode_fh, - .get_name = ll_get_name, - .fh_to_dentry = ll_fh_to_dentry, - .fh_to_parent = ll_fh_to_parent, -}; diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c deleted file mode 100644 index 49bf1b7ee311..000000000000 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ /dev/null @@ -1,1659 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include - -#include "llite_internal.h" -#include "vvp_internal.h" - -/* debugfs llite mount point registration */ -static const struct file_operations ll_rw_extents_stats_fops; -static const struct file_operations ll_rw_extents_stats_pp_fops; -static const struct file_operations ll_rw_offset_stats_fops; - -static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - struct obd_statfs osfs; - int rc; - - rc = ll_statfs_internal(sbi->ll_sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) - return sprintf(buf, "%u\n", osfs.os_bsize); - - return rc; -} -LUSTRE_RO_ATTR(blocksize); - -static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - struct obd_statfs osfs; - int rc; - - rc = ll_statfs_internal(sbi->ll_sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) { - __u32 blk_size = osfs.os_bsize >> 10; - __u64 result = osfs.os_blocks; - - while (blk_size >>= 1) - result <<= 1; - - rc = sprintf(buf, "%llu\n", result); - } - - return rc; -} -LUSTRE_RO_ATTR(kbytestotal); - -static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - struct obd_statfs osfs; - int rc; - - rc = ll_statfs_internal(sbi->ll_sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) { - __u32 blk_size = osfs.os_bsize >> 10; - __u64 result = osfs.os_bfree; - - while (blk_size >>= 1) - result <<= 1; - - rc = sprintf(buf, "%llu\n", result); - } - - return rc; -} -LUSTRE_RO_ATTR(kbytesfree); - -static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - struct obd_statfs osfs; - int rc; - - rc = ll_statfs_internal(sbi->ll_sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) { - __u32 blk_size = osfs.os_bsize >> 10; - __u64 result = osfs.os_bavail; - - while (blk_size >>= 1) - result <<= 1; - - rc = sprintf(buf, "%llu\n", result); - } - - return rc; -} -LUSTRE_RO_ATTR(kbytesavail); - -static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - struct obd_statfs osfs; - int rc; - - rc = ll_statfs_internal(sbi->ll_sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) - return sprintf(buf, "%llu\n", osfs.os_files); - - return rc; -} -LUSTRE_RO_ATTR(filestotal); - -static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - struct obd_statfs osfs; - int rc; - - rc = ll_statfs_internal(sbi->ll_sb, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) - return sprintf(buf, "%llu\n", osfs.os_ffree); - - return rc; -} -LUSTRE_RO_ATTR(filesfree); - -static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return sprintf(buf, "local client\n"); -} -LUSTRE_RO_ATTR(client_type); - -static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name); -} -LUSTRE_RO_ATTR(fstype); - -static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid); -} -LUSTRE_RO_ATTR(uuid); - -static int ll_site_stats_seq_show(struct seq_file *m, void *v) -{ - struct super_block *sb = m->private; - - /* - * See description of statistical counters in struct cl_site, and - * struct lu_site. - */ - return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m); -} - -LPROC_SEQ_FOPS_RO(ll_site_stats); - -static ssize_t max_read_ahead_mb_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - long pages_number; - int mult; - - spin_lock(&sbi->ll_lock); - pages_number = sbi->ll_ra_info.ra_max_pages; - spin_unlock(&sbi->ll_lock); - - mult = 1 << (20 - PAGE_SHIFT); - return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); -} - -static ssize_t max_read_ahead_mb_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long pages_number; - - rc = kstrtoul(buffer, 10, &pages_number); - if (rc) - return rc; - - pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ - - if (pages_number > totalram_pages / 2) { - CERROR("can't set file readahead more than %lu MB\n", - totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/ - return -ERANGE; - } - - spin_lock(&sbi->ll_lock); - sbi->ll_ra_info.ra_max_pages = pages_number; - spin_unlock(&sbi->ll_lock); - - return count; -} -LUSTRE_RW_ATTR(max_read_ahead_mb); - -static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - long pages_number; - int mult; - - spin_lock(&sbi->ll_lock); - pages_number = sbi->ll_ra_info.ra_max_pages_per_file; - spin_unlock(&sbi->ll_lock); - - mult = 1 << (20 - PAGE_SHIFT); - return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); -} - -static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long pages_number; - - rc = kstrtoul(buffer, 10, &pages_number); - if (rc) - return rc; - - if (pages_number > sbi->ll_ra_info.ra_max_pages) { - CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n", - sbi->ll_ra_info.ra_max_pages); - return -ERANGE; - } - - spin_lock(&sbi->ll_lock); - sbi->ll_ra_info.ra_max_pages_per_file = pages_number; - spin_unlock(&sbi->ll_lock); - - return count; -} -LUSTRE_RW_ATTR(max_read_ahead_per_file_mb); - -static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - long pages_number; - int mult; - - spin_lock(&sbi->ll_lock); - pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; - spin_unlock(&sbi->ll_lock); - - mult = 1 << (20 - PAGE_SHIFT); - return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); -} - -static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long pages_number; - - rc = kstrtoul(buffer, 10, &pages_number); - if (rc) - return rc; - - /* Cap this at the current max readahead window size, the readahead - * algorithm does this anyway so it's pointless to set it larger. - */ - if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { - CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", - sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT)); - return -ERANGE; - } - - spin_lock(&sbi->ll_lock); - sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number; - spin_unlock(&sbi->ll_lock); - - return count; -} -LUSTRE_RW_ATTR(max_read_ahead_whole_mb); - -static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) -{ - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct cl_client_cache *cache = sbi->ll_cache; - int shift = 20 - PAGE_SHIFT; - long max_cached_mb; - long unused_mb; - - max_cached_mb = cache->ccc_lru_max >> shift; - unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift; - seq_printf(m, - "users: %d\n" - "max_cached_mb: %ld\n" - "used_mb: %ld\n" - "unused_mb: %ld\n" - "reclaim_count: %u\n", - atomic_read(&cache->ccc_users), - max_cached_mb, - max_cached_mb - unused_mb, - unused_mb, - cache->ccc_lru_shrinkers); - return 0; -} - -static ssize_t ll_max_cached_mb_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct super_block *sb = ((struct seq_file *)file->private_data)->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct cl_client_cache *cache = sbi->ll_cache; - struct lu_env *env; - long diff = 0; - long nrpages = 0; - u16 refcheck; - long pages_number; - int mult; - long rc; - u64 val; - char kernbuf[128]; - - if (count >= sizeof(kernbuf)) - return -EINVAL; - - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - kernbuf[count] = 0; - - mult = 1 << (20 - PAGE_SHIFT); - buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - - kernbuf; - rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult); - if (rc) - return rc; - - if (val > LONG_MAX) - return -ERANGE; - pages_number = (long)val; - - if (pages_number < 0 || pages_number > totalram_pages) { - CERROR("%s: can't set max cache more than %lu MB\n", - ll_get_fsname(sb, NULL, 0), - totalram_pages >> (20 - PAGE_SHIFT)); - return -ERANGE; - } - - spin_lock(&sbi->ll_lock); - diff = pages_number - cache->ccc_lru_max; - spin_unlock(&sbi->ll_lock); - - /* easy - add more LRU slots. */ - if (diff >= 0) { - atomic_long_add(diff, &cache->ccc_lru_left); - rc = 0; - goto out; - } - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return 0; - - diff = -diff; - while (diff > 0) { - long tmp; - - /* reduce LRU budget from free slots. */ - do { - long ov, nv; - - ov = atomic_long_read(&cache->ccc_lru_left); - if (ov == 0) - break; - - nv = ov > diff ? ov - diff : 0; - rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv); - if (likely(ov == rc)) { - diff -= ov - nv; - nrpages += ov - nv; - break; - } - } while (1); - - if (diff <= 0) - break; - - if (!sbi->ll_dt_exp) { /* being initialized */ - rc = 0; - goto out; - } - - /* difficult - have to ask OSCs to drop LRU slots. */ - tmp = diff << 1; - rc = obd_set_info_async(env, sbi->ll_dt_exp, - sizeof(KEY_CACHE_LRU_SHRINK), - KEY_CACHE_LRU_SHRINK, - sizeof(tmp), &tmp, NULL); - if (rc < 0) - break; - } - cl_env_put(env, &refcheck); - -out: - if (rc >= 0) { - spin_lock(&sbi->ll_lock); - cache->ccc_lru_max = pages_number; - spin_unlock(&sbi->ll_lock); - rc = count; - } else { - atomic_long_add(nrpages, &cache->ccc_lru_left); - } - return rc; -} - -LPROC_SEQ_FOPS(ll_max_cached_mb); - -static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0); -} - -static ssize_t checksum_pages_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long val; - - if (!sbi->ll_dt_exp) - /* Not set up yet */ - return -EAGAIN; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - if (val) - sbi->ll_flags |= LL_SBI_CHECKSUM; - else - sbi->ll_flags &= ~LL_SBI_CHECKSUM; - - rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM), - KEY_CHECKSUM, sizeof(val), &val, NULL); - if (rc) - CWARN("Failed to set OSC checksum flags: %d\n", rc); - - return count; -} -LUSTRE_RW_ATTR(checksum_pages); - -static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf, - enum stats_track_type type) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - if (sbi->ll_stats_track_type == type) - return sprintf(buf, "%d\n", sbi->ll_stats_track_id); - else if (sbi->ll_stats_track_type == STATS_TRACK_ALL) - return sprintf(buf, "0 (all)\n"); - else - return sprintf(buf, "untracked\n"); -} - -static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer, - size_t count, - enum stats_track_type type) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long pid; - - rc = kstrtoul(buffer, 10, &pid); - if (rc) - return rc; - sbi->ll_stats_track_id = pid; - if (pid == 0) - sbi->ll_stats_track_type = STATS_TRACK_ALL; - else - sbi->ll_stats_track_type = type; - lprocfs_clear_stats(sbi->ll_stats); - return count; -} - -static ssize_t stats_track_pid_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - return ll_rd_track_id(kobj, buf, STATS_TRACK_PID); -} - -static ssize_t stats_track_pid_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID); -} -LUSTRE_RW_ATTR(stats_track_pid); - -static ssize_t stats_track_ppid_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID); -} - -static ssize_t stats_track_ppid_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID); -} -LUSTRE_RW_ATTR(stats_track_ppid); - -static ssize_t stats_track_gid_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - return ll_rd_track_id(kobj, buf, STATS_TRACK_GID); -} - -static ssize_t stats_track_gid_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID); -} -LUSTRE_RW_ATTR(stats_track_gid); - -static ssize_t statahead_max_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%u\n", sbi->ll_sa_max); -} - -static ssize_t statahead_max_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val <= LL_SA_RPC_MAX) - sbi->ll_sa_max = val; - else - CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n", - val, LL_SA_RPC_MAX); - - return count; -} -LUSTRE_RW_ATTR(statahead_max); - -static ssize_t statahead_agl_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0); -} - -static ssize_t statahead_agl_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val) - sbi->ll_flags |= LL_SBI_AGL_ENABLED; - else - sbi->ll_flags &= ~LL_SBI_AGL_ENABLED; - - return count; -} -LUSTRE_RW_ATTR(statahead_agl); - -static int ll_statahead_stats_seq_show(struct seq_file *m, void *v) -{ - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - - seq_printf(m, - "statahead total: %u\n" - "statahead wrong: %u\n" - "agl total: %u\n", - atomic_read(&sbi->ll_sa_total), - atomic_read(&sbi->ll_sa_wrong), - atomic_read(&sbi->ll_agl_total)); - return 0; -} - -LPROC_SEQ_FOPS_RO(ll_statahead_stats); - -static ssize_t lazystatfs_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0); -} - -static ssize_t lazystatfs_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val) - sbi->ll_flags |= LL_SBI_LAZYSTATFS; - else - sbi->ll_flags &= ~LL_SBI_LAZYSTATFS; - - return count; -} -LUSTRE_RW_ATTR(lazystatfs); - -static ssize_t max_easize_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - unsigned int ealen; - int rc; - - rc = ll_get_max_mdsize(sbi, &ealen); - if (rc) - return rc; - - return sprintf(buf, "%u\n", ealen); -} -LUSTRE_RO_ATTR(max_easize); - -/** - * Get default_easize. - * - * \see client_obd::cl_default_mds_easize - * - * \param[in] kobj kernel object for sysfs tree - * \param[in] attr attribute of this kernel object - * \param[in] buf buffer to write data into - * - * \retval positive \a count on success - * \retval negative negated errno on failure - */ -static ssize_t default_easize_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - unsigned int ealen; - int rc; - - rc = ll_get_default_mdsize(sbi, &ealen); - if (rc) - return rc; - - return sprintf(buf, "%u\n", ealen); -} - -/** - * Set default_easize. - * - * Range checking on the passed value is handled by - * ll_set_default_mdsize(). - * - * \see client_obd::cl_default_mds_easize - * - * \param[in] kobj kernel object for sysfs tree - * \param[in] attr attribute of this kernel object - * \param[in] buffer string passed from user space - * \param[in] count \a buffer length - * - * \retval positive \a count on success - * \retval negative negated errno on failure - */ -static ssize_t default_easize_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - unsigned long val; - int rc; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - rc = ll_set_default_mdsize(sbi, val); - if (rc) - return rc; - - return count; -} -LUSTRE_RW_ATTR(default_easize); - -static int ll_sbi_flags_seq_show(struct seq_file *m, void *v) -{ - const char *str[] = LL_SBI_FLAGS; - struct super_block *sb = m->private; - int flags = ll_s2sbi(sb)->ll_flags; - int i = 0; - - while (flags != 0) { - if (ARRAY_SIZE(str) <= i) { - CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n", - ll_get_fsname(sb, NULL, 0)); - return -EINVAL; - } - - if (flags & 0x1) - seq_printf(m, "%s ", str[i]); - flags >>= 1; - ++i; - } - seq_puts(m, "\b\n"); - return 0; -} - -LPROC_SEQ_FOPS_RO(ll_sbi_flags); - -static ssize_t xattr_cache_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - - return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled); -} - -static ssize_t xattr_cache_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val != 0 && val != 1) - return -ERANGE; - - if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE)) - return -ENOTSUPP; - - sbi->ll_xattr_cache_enabled = val; - - return count; -} -LUSTRE_RW_ATTR(xattr_cache); - -static int ll_unstable_stats_seq_show(struct seq_file *m, void *v) -{ - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct cl_client_cache *cache = sbi->ll_cache; - long pages; - int mb; - - pages = atomic_long_read(&cache->ccc_unstable_nr); - mb = (pages * PAGE_SIZE) >> 20; - - seq_printf(m, - "unstable_check: %8d\n" - "unstable_pages: %12ld\n" - "unstable_mb: %8d\n", - cache->ccc_unstable_check, pages, mb); - - return 0; -} - -static ssize_t ll_unstable_stats_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct super_block *sb = ((struct seq_file *)file->private_data)->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - char kernbuf[128]; - int val, rc; - - if (!count) - return 0; - if (count >= sizeof(kernbuf)) - return -EINVAL; - - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - kernbuf[count] = 0; - - buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) - - kernbuf; - rc = lprocfs_write_helper(buffer, count, &val); - if (rc < 0) - return rc; - - /* borrow lru lock to set the value */ - spin_lock(&sbi->ll_cache->ccc_lru_lock); - sbi->ll_cache->ccc_unstable_check = !!val; - spin_unlock(&sbi->ll_cache->ccc_lru_lock); - - return count; -} -LPROC_SEQ_FOPS(ll_unstable_stats); - -static int ll_root_squash_seq_show(struct seq_file *m, void *v) -{ - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct root_squash_info *squash = &sbi->ll_squash; - - seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid); - return 0; -} - -static ssize_t ll_root_squash_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct seq_file *m = file->private_data; - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct root_squash_info *squash = &sbi->ll_squash; - - return lprocfs_wr_root_squash(buffer, count, squash, - ll_get_fsname(sb, NULL, 0)); -} -LPROC_SEQ_FOPS(ll_root_squash); - -static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v) -{ - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct root_squash_info *squash = &sbi->ll_squash; - int len; - - down_read(&squash->rsi_sem); - if (!list_empty(&squash->rsi_nosquash_nids)) { - len = cfs_print_nidlist(m->buf + m->count, m->size - m->count, - &squash->rsi_nosquash_nids); - m->count += len; - seq_puts(m, "\n"); - } else { - seq_puts(m, "NONE\n"); - } - up_read(&squash->rsi_sem); - - return 0; -} - -static ssize_t ll_nosquash_nids_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct seq_file *m = file->private_data; - struct super_block *sb = m->private; - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct root_squash_info *squash = &sbi->ll_squash; - int rc; - - rc = lprocfs_wr_nosquash_nids(buffer, count, squash, - ll_get_fsname(sb, NULL, 0)); - if (rc < 0) - return rc; - - ll_compute_rootsquash_state(sbi); - - return rc; -} - -LPROC_SEQ_FOPS(ll_nosquash_nids); - -static struct lprocfs_vars lprocfs_llite_obd_vars[] = { - /* { "mntpt_path", ll_rd_path, 0, 0 }, */ - { "site", &ll_site_stats_fops, NULL, 0 }, - /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */ - { "max_cached_mb", &ll_max_cached_mb_fops, NULL }, - { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 }, - { "unstable_stats", &ll_unstable_stats_fops, NULL }, - { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 }, - { .name = "root_squash", - .fops = &ll_root_squash_fops }, - { .name = "nosquash_nids", - .fops = &ll_nosquash_nids_fops }, - { NULL } -}; - -#define MAX_STRING_SIZE 128 - -static struct attribute *llite_attrs[] = { - &lustre_attr_blocksize.attr, - &lustre_attr_kbytestotal.attr, - &lustre_attr_kbytesfree.attr, - &lustre_attr_kbytesavail.attr, - &lustre_attr_filestotal.attr, - &lustre_attr_filesfree.attr, - &lustre_attr_client_type.attr, - &lustre_attr_fstype.attr, - &lustre_attr_uuid.attr, - &lustre_attr_max_read_ahead_mb.attr, - &lustre_attr_max_read_ahead_per_file_mb.attr, - &lustre_attr_max_read_ahead_whole_mb.attr, - &lustre_attr_checksum_pages.attr, - &lustre_attr_stats_track_pid.attr, - &lustre_attr_stats_track_ppid.attr, - &lustre_attr_stats_track_gid.attr, - &lustre_attr_statahead_max.attr, - &lustre_attr_statahead_agl.attr, - &lustre_attr_lazystatfs.attr, - &lustre_attr_max_easize.attr, - &lustre_attr_default_easize.attr, - &lustre_attr_xattr_cache.attr, - NULL, -}; - -static void llite_sb_release(struct kobject *kobj) -{ - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); - complete(&sbi->ll_kobj_unregister); -} - -static struct kobj_type llite_ktype = { - .default_attrs = llite_attrs, - .sysfs_ops = &lustre_sysfs_ops, - .release = llite_sb_release, -}; - -static const struct llite_file_opcode { - __u32 opcode; - __u32 type; - const char *opname; -} llite_opcode_table[LPROC_LL_FILE_OPCODES] = { - /* file operation */ - { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" }, - { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" }, - { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES, - "read_bytes" }, - { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES, - "write_bytes" }, - { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES, - "brw_read" }, - { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES, - "brw_write" }, - { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" }, - { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" }, - { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" }, - { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" }, - { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" }, - { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" }, - { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" }, - /* inode operation */ - { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" }, - { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" }, - { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" }, - { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" }, - /* dir inode operation */ - { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" }, - { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" }, - { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" }, - { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" }, - { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" }, - { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" }, - { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" }, - { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" }, - /* special inode operation */ - { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" }, - { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" }, - { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" }, - { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" }, - { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" }, - { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" }, - { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" }, - { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" }, -}; - -void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) -{ - if (!sbi->ll_stats) - return; - if (sbi->ll_stats_track_type == STATS_TRACK_ALL) - lprocfs_counter_add(sbi->ll_stats, op, count); - else if (sbi->ll_stats_track_type == STATS_TRACK_PID && - sbi->ll_stats_track_id == current->pid) - lprocfs_counter_add(sbi->ll_stats, op, count); - else if (sbi->ll_stats_track_type == STATS_TRACK_PPID && - sbi->ll_stats_track_id == current->real_parent->pid) - lprocfs_counter_add(sbi->ll_stats, op, count); - else if (sbi->ll_stats_track_type == STATS_TRACK_GID && - sbi->ll_stats_track_id == - from_kgid(&init_user_ns, current_gid())) - lprocfs_counter_add(sbi->ll_stats, op, count); -} -EXPORT_SYMBOL(ll_stats_ops_tally); - -static const char *ra_stat_string[] = { - [RA_STAT_HIT] = "hits", - [RA_STAT_MISS] = "misses", - [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive", - [RA_STAT_MISS_IN_WINDOW] = "miss inside window", - [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page", - [RA_STAT_FAILED_MATCH] = "failed lock match", - [RA_STAT_DISCARDED] = "read but discarded", - [RA_STAT_ZERO_LEN] = "zero length file", - [RA_STAT_ZERO_WINDOW] = "zero size window", - [RA_STAT_EOF] = "read-ahead to EOF", - [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue", - [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page", - [RA_STAT_FAILED_REACH_END] = "failed to reach end" -}; - -int ldebugfs_register_mountpoint(struct dentry *parent, - struct super_block *sb, char *osc, char *mdc) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct obd_device *obd; - struct dentry *dir; - char name[MAX_STRING_SIZE + 1], *ptr; - int err, id, len; - - name[MAX_STRING_SIZE] = '\0'; - - LASSERT(sbi); - LASSERT(mdc); - LASSERT(osc); - - /* Get fsname */ - len = strlen(lsi->lsi_lmd->lmd_profile); - ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-'); - if (ptr && (strcmp(ptr, "-client") == 0)) - len -= 7; - - /* Mount info */ - snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len, - lsi->lsi_lmd->lmd_profile, sb); - - dir = debugfs_create_dir(name, parent); - sbi->ll_debugfs_entry = dir; - - debugfs_create_file("dump_page_cache", 0444, dir, sbi, - &vvp_dump_pgcache_file_ops); - debugfs_create_file("extents_stats", 0644, dir, sbi, - &ll_rw_extents_stats_fops); - debugfs_create_file("extents_stats_per_process", 0644, - dir, sbi, &ll_rw_extents_stats_pp_fops); - debugfs_create_file("offset_stats", 0644, dir, sbi, - &ll_rw_offset_stats_fops); - - /* File operations stats */ - sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES, - LPROCFS_STATS_FLAG_NONE); - if (!sbi->ll_stats) { - err = -ENOMEM; - goto out; - } - /* do counter init */ - for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) { - __u32 type = llite_opcode_table[id].type; - void *ptr = NULL; - - if (type & LPROCFS_TYPE_REGS) - ptr = "regs"; - else if (type & LPROCFS_TYPE_BYTES) - ptr = "bytes"; - else if (type & LPROCFS_TYPE_PAGES) - ptr = "pages"; - lprocfs_counter_init(sbi->ll_stats, - llite_opcode_table[id].opcode, - (type & LPROCFS_CNTR_AVGMINMAX), - llite_opcode_table[id].opname, ptr); - } - - debugfs_create_file("stats", 0644, sbi->ll_debugfs_entry, sbi->ll_stats, - &lprocfs_stats_seq_fops); - - sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string), - LPROCFS_STATS_FLAG_NONE); - if (!sbi->ll_ra_stats) { - err = -ENOMEM; - goto out; - } - - for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++) - lprocfs_counter_init(sbi->ll_ra_stats, id, 0, - ra_stat_string[id], "pages"); - - debugfs_create_file("stats", 0644, sbi->ll_debugfs_entry, - sbi->ll_ra_stats, &lprocfs_stats_seq_fops); - - ldebugfs_add_vars(sbi->ll_debugfs_entry, lprocfs_llite_obd_vars, sb); - - sbi->ll_kobj.kset = llite_kset; - init_completion(&sbi->ll_kobj_unregister); - err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL, - "%s", name); - if (err) - goto out; - - /* MDC info */ - obd = class_name2obd(mdc); - - err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj, - obd->obd_type->typ_name); - if (err) - goto out; - - /* OSC */ - obd = class_name2obd(osc); - - err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj, - obd->obd_type->typ_name); -out: - if (err) { - debugfs_remove_recursive(sbi->ll_debugfs_entry); - lprocfs_free_stats(&sbi->ll_ra_stats); - lprocfs_free_stats(&sbi->ll_stats); - } - return err; -} - -void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi) -{ - debugfs_remove_recursive(sbi->ll_debugfs_entry); - kobject_put(&sbi->ll_kobj); - wait_for_completion(&sbi->ll_kobj_unregister); - lprocfs_free_stats(&sbi->ll_ra_stats); - lprocfs_free_stats(&sbi->ll_stats); -} - -#undef MAX_STRING_SIZE - -#define pct(a, b) (b ? a * 100 / b : 0) - -static void ll_display_extents_info(struct ll_rw_extents_info *io_extents, - struct seq_file *seq, int which) -{ - unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum; - unsigned long start, end, r, w; - char *unitp = "KMGTPEZY"; - int i, units = 10; - struct per_process_info *pp_info = &io_extents->pp_extents[which]; - - read_cum = 0; - write_cum = 0; - start = 0; - - for (i = 0; i < LL_HIST_MAX; i++) { - read_tot += pp_info->pp_r_hist.oh_buckets[i]; - write_tot += pp_info->pp_w_hist.oh_buckets[i]; - } - - for (i = 0; i < LL_HIST_MAX; i++) { - r = pp_info->pp_r_hist.oh_buckets[i]; - w = pp_info->pp_w_hist.oh_buckets[i]; - read_cum += r; - write_cum += w; - end = 1 << (i + LL_HIST_START - units); - seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n", - start, *unitp, end, *unitp, - (i == LL_HIST_MAX - 1) ? '+' : ' ', - r, pct(r, read_tot), pct(read_cum, read_tot), - w, pct(w, write_tot), pct(write_cum, write_tot)); - start = end; - if (start == 1024) { - start = 1; - units += 10; - unitp++; - } - if (read_cum == read_tot && write_cum == write_tot) - break; - } -} - -static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v) -{ - struct timespec64 now; - struct ll_sb_info *sbi = seq->private; - struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info; - int k; - - ktime_get_real_ts64(&now); - - if (!sbi->ll_rw_stats_on) { - seq_printf(seq, "disabled\n" - "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n"); - return 0; - } - seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n", - (s64)now.tv_sec, (unsigned long)now.tv_nsec); - seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write"); - seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n", - "extents", "calls", "%", "cum%", - "calls", "%", "cum%"); - spin_lock(&sbi->ll_pp_extent_lock); - for (k = 0; k < LL_PROCESS_HIST_MAX; k++) { - if (io_extents->pp_extents[k].pid != 0) { - seq_printf(seq, "\nPID: %d\n", - io_extents->pp_extents[k].pid); - ll_display_extents_info(io_extents, seq, k); - } - } - spin_unlock(&sbi->ll_pp_extent_lock); - return 0; -} - -static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file, - const char __user *buf, - size_t len, - loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct ll_sb_info *sbi = seq->private; - struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info; - int i; - int value = 1, rc = 0; - - if (len == 0) - return -EINVAL; - - rc = lprocfs_write_helper(buf, len, &value); - if (rc < 0 && len < 16) { - char kernbuf[16]; - - if (copy_from_user(kernbuf, buf, len)) - return -EFAULT; - kernbuf[len] = 0; - - if (kernbuf[len - 1] == '\n') - kernbuf[len - 1] = 0; - - if (strcmp(kernbuf, "disabled") == 0 || - strcmp(kernbuf, "Disabled") == 0) - value = 0; - } - - if (value == 0) - sbi->ll_rw_stats_on = 0; - else - sbi->ll_rw_stats_on = 1; - - spin_lock(&sbi->ll_pp_extent_lock); - for (i = 0; i < LL_PROCESS_HIST_MAX; i++) { - io_extents->pp_extents[i].pid = 0; - lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist); - lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist); - } - spin_unlock(&sbi->ll_pp_extent_lock); - return len; -} - -LPROC_SEQ_FOPS(ll_rw_extents_stats_pp); - -static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v) -{ - struct timespec64 now; - struct ll_sb_info *sbi = seq->private; - struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info; - - ktime_get_real_ts64(&now); - - if (!sbi->ll_rw_stats_on) { - seq_printf(seq, "disabled\n" - "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n"); - return 0; - } - seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n", - (u64)now.tv_sec, (unsigned long)now.tv_nsec); - - seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write"); - seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n", - "extents", "calls", "%", "cum%", - "calls", "%", "cum%"); - spin_lock(&sbi->ll_lock); - ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX); - spin_unlock(&sbi->ll_lock); - - return 0; -} - -static ssize_t ll_rw_extents_stats_seq_write(struct file *file, - const char __user *buf, - size_t len, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct ll_sb_info *sbi = seq->private; - struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info; - int i; - int value = 1, rc = 0; - - if (len == 0) - return -EINVAL; - - rc = lprocfs_write_helper(buf, len, &value); - if (rc < 0 && len < 16) { - char kernbuf[16]; - - if (copy_from_user(kernbuf, buf, len)) - return -EFAULT; - kernbuf[len] = 0; - - if (kernbuf[len - 1] == '\n') - kernbuf[len - 1] = 0; - - if (strcmp(kernbuf, "disabled") == 0 || - strcmp(kernbuf, "Disabled") == 0) - value = 0; - } - - if (value == 0) - sbi->ll_rw_stats_on = 0; - else - sbi->ll_rw_stats_on = 1; - - spin_lock(&sbi->ll_pp_extent_lock); - for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) { - io_extents->pp_extents[i].pid = 0; - lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist); - lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist); - } - spin_unlock(&sbi->ll_pp_extent_lock); - - return len; -} - -LPROC_SEQ_FOPS(ll_rw_extents_stats); - -void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid, - struct ll_file_data *file, loff_t pos, - size_t count, int rw) -{ - int i, cur = -1; - struct ll_rw_process_info *process; - struct ll_rw_process_info *offset; - int *off_count = &sbi->ll_rw_offset_entry_count; - int *process_count = &sbi->ll_offset_process_count; - struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info; - - if (!sbi->ll_rw_stats_on) - return; - process = sbi->ll_rw_process_info; - offset = sbi->ll_rw_offset_info; - - spin_lock(&sbi->ll_pp_extent_lock); - /* Extent statistics */ - for (i = 0; i < LL_PROCESS_HIST_MAX; i++) { - if (io_extents->pp_extents[i].pid == pid) { - cur = i; - break; - } - } - - if (cur == -1) { - /* new process */ - sbi->ll_extent_process_count = - (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX; - cur = sbi->ll_extent_process_count; - io_extents->pp_extents[cur].pid = pid; - lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist); - lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist); - } - - for (i = 0; (count >= (1 << LL_HIST_START << i)) && - (i < (LL_HIST_MAX - 1)); i++) - ; - if (rw == 0) { - io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++; - io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++; - } else { - io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++; - io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++; - } - spin_unlock(&sbi->ll_pp_extent_lock); - - spin_lock(&sbi->ll_process_lock); - /* Offset statistics */ - for (i = 0; i < LL_PROCESS_HIST_MAX; i++) { - if (process[i].rw_pid == pid) { - if (process[i].rw_last_file != file) { - process[i].rw_range_start = pos; - process[i].rw_last_file_pos = pos + count; - process[i].rw_smallest_extent = count; - process[i].rw_largest_extent = count; - process[i].rw_offset = 0; - process[i].rw_last_file = file; - spin_unlock(&sbi->ll_process_lock); - return; - } - if (process[i].rw_last_file_pos != pos) { - *off_count = - (*off_count + 1) % LL_OFFSET_HIST_MAX; - offset[*off_count].rw_op = process[i].rw_op; - offset[*off_count].rw_pid = pid; - offset[*off_count].rw_range_start = - process[i].rw_range_start; - offset[*off_count].rw_range_end = - process[i].rw_last_file_pos; - offset[*off_count].rw_smallest_extent = - process[i].rw_smallest_extent; - offset[*off_count].rw_largest_extent = - process[i].rw_largest_extent; - offset[*off_count].rw_offset = - process[i].rw_offset; - process[i].rw_op = rw; - process[i].rw_range_start = pos; - process[i].rw_smallest_extent = count; - process[i].rw_largest_extent = count; - process[i].rw_offset = pos - - process[i].rw_last_file_pos; - } - if (process[i].rw_smallest_extent > count) - process[i].rw_smallest_extent = count; - if (process[i].rw_largest_extent < count) - process[i].rw_largest_extent = count; - process[i].rw_last_file_pos = pos + count; - spin_unlock(&sbi->ll_process_lock); - return; - } - } - *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX; - process[*process_count].rw_pid = pid; - process[*process_count].rw_op = rw; - process[*process_count].rw_range_start = pos; - process[*process_count].rw_last_file_pos = pos + count; - process[*process_count].rw_smallest_extent = count; - process[*process_count].rw_largest_extent = count; - process[*process_count].rw_offset = 0; - process[*process_count].rw_last_file = file; - spin_unlock(&sbi->ll_process_lock); -} - -static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v) -{ - struct timespec64 now; - struct ll_sb_info *sbi = seq->private; - struct ll_rw_process_info *offset = sbi->ll_rw_offset_info; - struct ll_rw_process_info *process = sbi->ll_rw_process_info; - int i; - - ktime_get_real_ts64(&now); - - if (!sbi->ll_rw_stats_on) { - seq_printf(seq, "disabled\n" - "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n"); - return 0; - } - spin_lock(&sbi->ll_process_lock); - - seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n", - (s64)now.tv_sec, (unsigned long)now.tv_nsec); - seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n", - "R/W", "PID", "RANGE START", "RANGE END", - "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET"); - /* We stored the discontiguous offsets here; print them first */ - for (i = 0; i < LL_OFFSET_HIST_MAX; i++) { - if (offset[i].rw_pid != 0) - seq_printf(seq, - "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu", - offset[i].rw_op == READ ? 'R' : 'W', - offset[i].rw_pid, - offset[i].rw_range_start, - offset[i].rw_range_end, - (unsigned long)offset[i].rw_smallest_extent, - (unsigned long)offset[i].rw_largest_extent, - offset[i].rw_offset); - } - /* Then print the current offsets for each process */ - for (i = 0; i < LL_PROCESS_HIST_MAX; i++) { - if (process[i].rw_pid != 0) - seq_printf(seq, - "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu", - process[i].rw_op == READ ? 'R' : 'W', - process[i].rw_pid, - process[i].rw_range_start, - process[i].rw_last_file_pos, - (unsigned long)process[i].rw_smallest_extent, - (unsigned long)process[i].rw_largest_extent, - process[i].rw_offset); - } - spin_unlock(&sbi->ll_process_lock); - - return 0; -} - -static ssize_t ll_rw_offset_stats_seq_write(struct file *file, - const char __user *buf, - size_t len, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct ll_sb_info *sbi = seq->private; - struct ll_rw_process_info *process_info = sbi->ll_rw_process_info; - struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info; - int value = 1, rc = 0; - - if (len == 0) - return -EINVAL; - - rc = lprocfs_write_helper(buf, len, &value); - - if (rc < 0 && len < 16) { - char kernbuf[16]; - - if (copy_from_user(kernbuf, buf, len)) - return -EFAULT; - kernbuf[len] = 0; - - if (kernbuf[len - 1] == '\n') - kernbuf[len - 1] = 0; - - if (strcmp(kernbuf, "disabled") == 0 || - strcmp(kernbuf, "Disabled") == 0) - value = 0; - } - - if (value == 0) - sbi->ll_rw_stats_on = 0; - else - sbi->ll_rw_stats_on = 1; - - spin_lock(&sbi->ll_process_lock); - sbi->ll_offset_process_count = 0; - sbi->ll_rw_offset_entry_count = 0; - memset(process_info, 0, sizeof(struct ll_rw_process_info) * - LL_PROCESS_HIST_MAX); - memset(offset_info, 0, sizeof(struct ll_rw_process_info) * - LL_OFFSET_HIST_MAX); - spin_unlock(&sbi->ll_process_lock); - - return len; -} - -LPROC_SEQ_FOPS(ll_rw_offset_stats); - -void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars) -{ - lvars->obd_vars = lprocfs_llite_obd_vars; -} diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c deleted file mode 100644 index d5f6d20afe8c..000000000000 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ /dev/null @@ -1,1207 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include "llite_internal.h" - -static int ll_create_it(struct inode *dir, struct dentry *dentry, - struct lookup_intent *it); - -/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */ -static int ll_test_inode(struct inode *inode, void *opaque) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct lustre_md *md = opaque; - - if (unlikely(!(md->body->mbo_valid & OBD_MD_FLID))) { - CERROR("MDS body missing FID\n"); - return 0; - } - - if (!lu_fid_eq(&lli->lli_fid, &md->body->mbo_fid1)) - return 0; - - return 1; -} - -static int ll_set_inode(struct inode *inode, void *opaque) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct mdt_body *body = ((struct lustre_md *)opaque)->body; - - if (unlikely(!(body->mbo_valid & OBD_MD_FLID))) { - CERROR("MDS body missing FID\n"); - return -EINVAL; - } - - lli->lli_fid = body->mbo_fid1; - if (unlikely(!(body->mbo_valid & OBD_MD_FLTYPE))) { - CERROR("Can not initialize inode " DFID - " without object type: valid = %#llx\n", - PFID(&lli->lli_fid), body->mbo_valid); - return -EINVAL; - } - - inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mbo_mode & S_IFMT); - if (unlikely(inode->i_mode == 0)) { - CERROR("Invalid inode " DFID " type\n", PFID(&lli->lli_fid)); - return -EINVAL; - } - - ll_lli_init(lli); - - return 0; -} - -/** - * Get an inode by inode number(@hash), which is already instantiated by - * the intent lookup). - */ -struct inode *ll_iget(struct super_block *sb, ino_t hash, - struct lustre_md *md) -{ - struct inode *inode; - int rc = 0; - - LASSERT(hash != 0); - inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md); - if (!inode) - return ERR_PTR(-ENOMEM); - - if (inode->i_state & I_NEW) { - rc = ll_read_inode2(inode, md); - if (!rc && S_ISREG(inode->i_mode) && - !ll_i2info(inode)->lli_clob) - rc = cl_file_inode_init(inode, md); - - if (rc) { - /* - * Let's clear directory lsm here, otherwise - * make_bad_inode() will reset the inode mode - * to regular, then ll_clear_inode will not - * be able to clear lsm_md - */ - if (S_ISDIR(inode->i_mode)) - ll_dir_clear_lsm_md(inode); - make_bad_inode(inode); - unlock_new_inode(inode); - iput(inode); - inode = ERR_PTR(rc); - } else { - unlock_new_inode(inode); - } - } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) { - rc = ll_update_inode(inode, md); - CDEBUG(D_VFSTRACE, "got inode: " DFID "(%p): rc = %d\n", - PFID(&md->body->mbo_fid1), inode, rc); - if (rc) { - if (S_ISDIR(inode->i_mode)) - ll_dir_clear_lsm_md(inode); - iput(inode); - inode = ERR_PTR(rc); - } - } - return inode; -} - -static void ll_invalidate_negative_children(struct inode *dir) -{ - struct dentry *dentry, *tmp_subdir; - - spin_lock(&dir->i_lock); - hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) { - spin_lock(&dentry->d_lock); - if (!list_empty(&dentry->d_subdirs)) { - struct dentry *child; - - list_for_each_entry_safe(child, tmp_subdir, - &dentry->d_subdirs, - d_child) { - if (d_really_is_negative(child)) - d_lustre_invalidate(child, 1); - } - } - spin_unlock(&dentry->d_lock); - } - spin_unlock(&dir->i_lock); -} - -int ll_test_inode_by_fid(struct inode *inode, void *opaque) -{ - return lu_fid_eq(&ll_i2info(inode)->lli_fid, opaque); -} - -int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, - void *data, int flag) -{ - struct lustre_handle lockh; - int rc; - - switch (flag) { - case LDLM_CB_BLOCKING: - ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); - if (rc < 0) { - CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc); - return rc; - } - break; - case LDLM_CB_CANCELING: { - struct inode *inode = ll_inode_from_resource_lock(lock); - __u64 bits = lock->l_policy_data.l_inodebits.bits; - - /* Inode is set to lock->l_resource->lr_lvb_inode - * for mdc - bug 24555 - */ - LASSERT(!lock->l_ast_data); - - if (!inode) - break; - - /* Invalidate all dentries associated with this inode */ - LASSERT(ldlm_is_canceling(lock)); - - if (!fid_res_name_eq(ll_inode2fid(inode), - &lock->l_resource->lr_name)) { - LDLM_ERROR(lock, - "data mismatch with object " DFID "(%p)", - PFID(ll_inode2fid(inode)), inode); - LBUG(); - } - - if (bits & MDS_INODELOCK_XATTR) { - if (S_ISDIR(inode->i_mode)) - ll_i2info(inode)->lli_def_stripe_offset = -1; - ll_xattr_cache_destroy(inode); - bits &= ~MDS_INODELOCK_XATTR; - } - - /* For OPEN locks we differentiate between lock modes - * LCK_CR, LCK_CW, LCK_PR - bug 22891 - */ - if (bits & MDS_INODELOCK_OPEN) - ll_have_md_lock(inode, &bits, lock->l_req_mode); - - if (bits & MDS_INODELOCK_OPEN) { - fmode_t fmode; - - switch (lock->l_req_mode) { - case LCK_CW: - fmode = FMODE_WRITE; - break; - case LCK_PR: - fmode = FMODE_EXEC; - break; - case LCK_CR: - fmode = FMODE_READ; - break; - default: - LDLM_ERROR(lock, "bad lock mode for OPEN lock"); - LBUG(); - } - - ll_md_real_close(inode, fmode); - } - - if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE | - MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM)) - ll_have_md_lock(inode, &bits, LCK_MINMODE); - - if (bits & MDS_INODELOCK_LAYOUT) { - struct cl_object_conf conf = { - .coc_opc = OBJECT_CONF_INVALIDATE, - .coc_inode = inode, - }; - - rc = ll_layout_conf(inode, &conf); - if (rc < 0) - CDEBUG(D_INODE, "cannot invalidate layout of " - DFID ": rc = %d\n", - PFID(ll_inode2fid(inode)), rc); - } - - if (bits & MDS_INODELOCK_UPDATE) { - struct ll_inode_info *lli = ll_i2info(inode); - - spin_lock(&lli->lli_lock); - LTIME_S(inode->i_mtime) = 0; - LTIME_S(inode->i_atime) = 0; - LTIME_S(inode->i_ctime) = 0; - spin_unlock(&lli->lli_lock); - } - - if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) { - struct ll_inode_info *lli = ll_i2info(inode); - - CDEBUG(D_INODE, "invalidating inode " DFID " lli = %p, pfid = " DFID "\n", - PFID(ll_inode2fid(inode)), lli, - PFID(&lli->lli_pfid)); - - truncate_inode_pages(inode->i_mapping, 0); - - if (unlikely(!fid_is_zero(&lli->lli_pfid))) { - struct inode *master_inode = NULL; - unsigned long hash; - - /* - * This is slave inode, since all of the child - * dentry is connected on the master inode, so - * we have to invalidate the negative children - * on master inode - */ - CDEBUG(D_INODE, - "Invalidate s" DFID " m" DFID "\n", - PFID(ll_inode2fid(inode)), - PFID(&lli->lli_pfid)); - - hash = cl_fid_build_ino(&lli->lli_pfid, - ll_need_32bit_api(ll_i2sbi(inode))); - /* - * Do not lookup the inode with ilookup5, - * otherwise it will cause dead lock, - * - * 1. Client1 send chmod req to the MDT0, then - * on MDT0, it enqueues master and all of its - * slaves lock, (mdt_attr_set() -> - * mdt_lock_slaves()), after gets master and - * stripe0 lock, it will send the enqueue req - * (for stripe1) to MDT1, then MDT1 finds the - * lock has been granted to client2. Then MDT1 - * sends blocking ast to client2. - * - * 2. At the same time, client2 tries to unlink - * the striped dir (rm -rf striped_dir), and - * during lookup, it will hold the master inode - * of the striped directory, whose inode state - * is NEW, then tries to revalidate all of its - * slaves, (ll_prep_inode()->ll_iget()-> - * ll_read_inode2()-> ll_update_inode().). And - * it will be blocked on the server side because - * of 1. - * - * 3. Then the client get the blocking_ast req, - * cancel the lock, but being blocked if using - * ->ilookup5()), because master inode state is - * NEW. - */ - master_inode = ilookup5_nowait(inode->i_sb, - hash, - ll_test_inode_by_fid, - (void *)&lli->lli_pfid); - if (master_inode) { - ll_invalidate_negative_children(master_inode); - iput(master_inode); - } - } else { - ll_invalidate_negative_children(inode); - } - } - - if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && - inode->i_sb->s_root && - !is_root_inode(inode)) - ll_invalidate_aliases(inode); - - iput(inode); - break; - } - default: - LBUG(); - } - - return 0; -} - -__u32 ll_i2suppgid(struct inode *i) -{ - if (in_group_p(i->i_gid)) - return (__u32)from_kgid(&init_user_ns, i->i_gid); - else - return (__u32)(-1); -} - -/* Pack the required supplementary groups into the supplied groups array. - * If we don't need to use the groups from the target inode(s) then we - * instead pack one or more groups from the user's supplementary group - * array in case it might be useful. Not needed if doing an MDS-side upcall. - */ -void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) -{ - LASSERT(i1); - - suppgids[0] = ll_i2suppgid(i1); - - if (i2) - suppgids[1] = ll_i2suppgid(i2); - else - suppgids[1] = -1; -} - -/* - * Try to reuse unhashed or invalidated dentries. - * This is very similar to d_exact_alias(), and any changes in one should be - * considered for inclusion in the other. The differences are that we don't - * need an unhashed alias, and we don't want d_compare to be used for - * comparison. - */ -static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry) -{ - struct dentry *alias; - - if (hlist_empty(&inode->i_dentry)) - return NULL; - - spin_lock(&inode->i_lock); - hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { - LASSERT(alias != dentry); - /* - * Don't need alias->d_lock here, because aliases with - * d_parent == entry->d_parent are not subject to name or - * parent changes, because the parent inode i_mutex is held. - */ - - if (alias->d_parent != dentry->d_parent) - continue; - if (alias->d_name.hash != dentry->d_name.hash) - continue; - if (alias->d_name.len != dentry->d_name.len || - memcmp(alias->d_name.name, dentry->d_name.name, - dentry->d_name.len) != 0) - continue; - spin_lock(&alias->d_lock); - dget_dlock(alias); - spin_unlock(&alias->d_lock); - spin_unlock(&inode->i_lock); - return alias; - } - spin_unlock(&inode->i_lock); - - return NULL; -} - -/* - * Similar to d_splice_alias(), but lustre treats invalid alias - * similar to DCACHE_DISCONNECTED, and tries to use it anyway. - */ -struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de) -{ - if (inode && !S_ISDIR(inode->i_mode)) { - struct dentry *new = ll_find_alias(inode, de); - - if (new) { - d_move(new, de); - iput(inode); - CDEBUG(D_DENTRY, - "Reuse dentry %p inode %p refc %d flags %#x\n", - new, d_inode(new), d_count(new), new->d_flags); - return new; - } - d_add(de, inode); - } else { - struct dentry *new = d_splice_alias(inode, de); - - if (IS_ERR(new)) - CDEBUG(D_DENTRY, - "splice inode %p as %pd gives error %lu\n", - inode, de, PTR_ERR(new)); - if (new) - de = new; - } - if (!IS_ERR(de)) - CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n", - de, d_inode(de), d_count(de), de->d_flags); - return de; -} - -static int ll_lookup_it_finish(struct ptlrpc_request *request, - struct lookup_intent *it, - struct inode *parent, struct dentry **de) -{ - struct inode *inode = NULL; - __u64 bits = 0; - int rc = 0; - struct dentry *alias; - - /* NB 1 request reference will be taken away by ll_intent_lock() - * when I return - */ - CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it, - it->it_disposition); - if (!it_disposition(it, DISP_LOOKUP_NEG)) { - rc = ll_prep_inode(&inode, request, (*de)->d_sb, it); - if (rc) - return rc; - - ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits); - - /* We used to query real size from OSTs here, but actually - * this is not needed. For stat() calls size would be updated - * from subsequent do_revalidate()->ll_inode_revalidate_it() in - * 2.4 and - * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 - * Everybody else who needs correct file size would call - * ll_glimpse_size or some equivalent themselves anyway. - * Also see bug 7198. - */ - } - - alias = ll_splice_alias(inode, *de); - if (IS_ERR(alias)) { - rc = PTR_ERR(alias); - goto out; - } - *de = alias; - - if (!it_disposition(it, DISP_LOOKUP_NEG)) { - /* We have the "lookup" lock, so unhide dentry */ - if (bits & MDS_INODELOCK_LOOKUP) - d_lustre_revalidate(*de); - } else if (!it_disposition(it, DISP_OPEN_CREATE)) { - /* If file created on server, don't depend on parent UPDATE - * lock to unhide it. It is left hidden and next lookup can - * find it in ll_splice_alias. - */ - /* Check that parent has UPDATE lock. */ - struct lookup_intent parent_it = { - .it_op = IT_GETATTR, - .it_lock_handle = 0 }; - struct lu_fid fid = ll_i2info(parent)->lli_fid; - - /* If it is striped directory, get the real stripe parent */ - if (unlikely(ll_i2info(parent)->lli_lsm_md)) { - rc = md_get_fid_from_lsm(ll_i2mdexp(parent), - ll_i2info(parent)->lli_lsm_md, - (*de)->d_name.name, - (*de)->d_name.len, &fid); - if (rc) - return rc; - } - - if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it, &fid, - NULL)) { - d_lustre_revalidate(*de); - ll_intent_release(&parent_it); - } - } - -out: - if (rc != 0 && it->it_op & IT_OPEN) - ll_open_cleanup((*de)->d_sb, request); - - return rc; -} - -static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, - struct lookup_intent *it) -{ - struct lookup_intent lookup_it = { .it_op = IT_LOOKUP }; - struct dentry *save = dentry, *retval; - struct ptlrpc_request *req = NULL; - struct md_op_data *op_data = NULL; - struct inode *inode; - __u32 opc; - int rc; - - if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen) - return ERR_PTR(-ENAMETOOLONG); - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),intent=%s\n", - dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it)); - - if (d_mountpoint(dentry)) - CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it)); - - if (!it || it->it_op == IT_GETXATTR) - it = &lookup_it; - - if (it->it_op == IT_GETATTR && dentry_may_statahead(parent, dentry)) { - rc = ll_statahead(parent, &dentry, 0); - if (rc == 1) { - if (dentry == save) - retval = NULL; - else - retval = dentry; - goto out; - } - } - - if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && sb_rdonly(dentry->d_sb)) - return ERR_PTR(-EROFS); - - if (it->it_op & IT_CREAT) - opc = LUSTRE_OPC_CREATE; - else - opc = LUSTRE_OPC_ANY; - - op_data = ll_prep_md_op_data(NULL, parent, NULL, dentry->d_name.name, - dentry->d_name.len, 0, opc, NULL); - if (IS_ERR(op_data)) - return (void *)op_data; - - /* enforce umask if acl disabled or MDS doesn't support umask */ - if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent))) - it->it_create_mode &= ~current_umask(); - - rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req, - &ll_md_blocking_ast, 0); - /* - * If the MDS allows the client to chgrp (CFS_SETGRP_PERM), but the - * client does not know which suppgid should be sent to the MDS, or - * some other(s) changed the target file's GID after this RPC sent - * to the MDS with the suppgid as the original GID, then we should - * try again with right suppgid. - */ - if (rc == -EACCES && it->it_op & IT_OPEN && - it_disposition(it, DISP_OPEN_DENY)) { - struct mdt_body *body; - - LASSERT(req); - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (op_data->op_suppgids[0] == body->mbo_gid || - op_data->op_suppgids[1] == body->mbo_gid || - !in_group_p(make_kgid(&init_user_ns, body->mbo_gid))) { - retval = ERR_PTR(-EACCES); - goto out; - } - - fid_zero(&op_data->op_fid2); - op_data->op_suppgids[1] = body->mbo_gid; - ptlrpc_req_finished(req); - req = NULL; - ll_intent_release(it); - rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req, - ll_md_blocking_ast, 0); - } - - if (rc < 0) { - retval = ERR_PTR(rc); - goto out; - } - - rc = ll_lookup_it_finish(req, it, parent, &dentry); - if (rc != 0) { - ll_intent_release(it); - retval = ERR_PTR(rc); - goto out; - } - - inode = d_inode(dentry); - if ((it->it_op & IT_OPEN) && inode && - !S_ISREG(inode->i_mode) && - !S_ISDIR(inode->i_mode)) { - ll_release_openhandle(inode, it); - } - ll_lookup_finish_locks(it, inode); - - if (dentry == save) - retval = NULL; - else - retval = dentry; -out: - if (op_data && !IS_ERR(op_data)) - ll_finish_md_op_data(op_data); - - ptlrpc_req_finished(req); - return retval; -} - -static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry, - unsigned int flags) -{ - struct lookup_intent *itp, it = { .it_op = IT_GETATTR }; - struct dentry *de; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),flags=%u\n", - dentry, PFID(ll_inode2fid(parent)), parent, flags); - - /* Optimize away (CREATE && !OPEN). Let .create handle the race. - * but only if we have write permissions there, otherwise we need - * to proceed with lookup. LU-4185 - */ - if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN) && - (inode_permission(parent, MAY_WRITE | MAY_EXEC) == 0)) - return NULL; - - if (flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE)) - itp = NULL; - else - itp = ⁢ - de = ll_lookup_it(parent, dentry, itp); - - if (itp) - ll_intent_release(itp); - - return de; -} - -/* - * For cached negative dentry and new dentry, handle lookup/create/open - * together. - */ -static int ll_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned int open_flags, - umode_t mode, int *opened) -{ - struct lookup_intent *it; - struct dentry *de; - int rc = 0; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),file %p,open_flags %x,mode %x opened %d\n", - dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode, - *opened); - - /* Only negative dentries enter here */ - LASSERT(!d_inode(dentry)); - - if (!d_in_lookup(dentry)) { - /* A valid negative dentry that just passed revalidation, - * there's little point to try and open it server-side, - * even though there's a minuscle chance it might succeed. - * Either way it's a valid race to just return -ENOENT here. - */ - if (!(open_flags & O_CREAT)) - return -ENOENT; - - /* Otherwise we just unhash it to be rehashed afresh via - * lookup if necessary - */ - d_drop(dentry); - } - - it = kzalloc(sizeof(*it), GFP_NOFS); - if (!it) - return -ENOMEM; - - it->it_op = IT_OPEN; - if (open_flags & O_CREAT) - it->it_op |= IT_CREAT; - it->it_create_mode = (mode & S_IALLUGO) | S_IFREG; - it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags); - it->it_flags &= ~MDS_OPEN_FL_INTERNAL; - - /* Dentry added to dcache tree in ll_lookup_it */ - de = ll_lookup_it(dir, dentry, it); - if (IS_ERR(de)) - rc = PTR_ERR(de); - else if (de) - dentry = de; - - if (!rc) { - if (it_disposition(it, DISP_OPEN_CREATE)) { - /* Dentry instantiated in ll_create_it. */ - rc = ll_create_it(dir, dentry, it); - if (rc) { - /* We dget in ll_splice_alias. */ - if (de) - dput(de); - goto out_release; - } - - *opened |= FILE_CREATED; - } - if (d_really_is_positive(dentry) && - it_disposition(it, DISP_OPEN_OPEN)) { - /* Open dentry. */ - if (S_ISFIFO(d_inode(dentry)->i_mode)) { - /* We cannot call open here as it might - * deadlock. This case is unreachable in - * practice because of OBD_CONNECT_NODEVOH. - */ - rc = finish_no_open(file, de); - } else { - file->private_data = it; - rc = finish_open(file, dentry, NULL, opened); - /* We dget in ll_splice_alias. finish_open takes - * care of dget for fd open. - */ - if (de) - dput(de); - } - } else { - rc = finish_no_open(file, de); - } - } - -out_release: - ll_intent_release(it); - kfree(it); - - return rc; -} - -/* We depend on "mode" being set with the proper file type/umask by now */ -static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it) -{ - struct inode *inode = NULL; - struct ptlrpc_request *request = NULL; - struct ll_sb_info *sbi = ll_i2sbi(dir); - int rc; - - LASSERT(it && it->it_disposition); - - LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF)); - request = it->it_request; - it_clear_disposition(it, DISP_ENQ_CREATE_REF); - rc = ll_prep_inode(&inode, request, dir->i_sb, it); - if (rc) { - inode = ERR_PTR(rc); - goto out; - } - - LASSERT(hlist_empty(&inode->i_dentry)); - - /* We asked for a lock on the directory, but were granted a - * lock on the inode. Since we finally have an inode pointer, - * stuff it in the lock. - */ - CDEBUG(D_DLMTRACE, "setting l_ast_data to inode " DFID "(%p)\n", - PFID(ll_inode2fid(dir)), inode); - ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); - out: - ptlrpc_req_finished(request); - return inode; -} - -/* - * By the time this is called, we already have created the directory cache - * entry for the new file, but it is so far negative - it has no inode. - * - * We defer creating the OBD object(s) until open, to keep the intent and - * non-intent code paths similar, and also because we do not have the MDS - * inode number before calling ll_create_node() (which is needed for LOV), - * so we would need to do yet another RPC to the MDS to store the LOV EA - * data on the MDS. If needed, we would pass the PACKED lmm as data and - * lmm_size in datalen (the MDS still has code which will handle that). - * - * If the create succeeds, we fill in the inode information - * with d_instantiate(). - */ -static int ll_create_it(struct inode *dir, struct dentry *dentry, - struct lookup_intent *it) -{ - struct inode *inode; - int rc = 0; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p), intent=%s\n", - dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it)); - - rc = it_open_error(DISP_OPEN_CREATE, it); - if (rc) - return rc; - - inode = ll_create_node(dir, it); - if (IS_ERR(inode)) - return PTR_ERR(inode); - - d_instantiate(dentry, inode); - - return ll_init_security(dentry, inode, dir); -} - -void ll_update_times(struct ptlrpc_request *request, struct inode *inode) -{ - struct mdt_body *body = req_capsule_server_get(&request->rq_pill, - &RMF_MDT_BODY); - - LASSERT(body); - if (body->mbo_valid & OBD_MD_FLMTIME && - body->mbo_mtime > LTIME_S(inode->i_mtime)) { - CDEBUG(D_INODE, "setting fid " DFID " mtime from %lu to %llu\n", - PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime), - body->mbo_mtime); - LTIME_S(inode->i_mtime) = body->mbo_mtime; - } - if (body->mbo_valid & OBD_MD_FLCTIME && - body->mbo_ctime > LTIME_S(inode->i_ctime)) - LTIME_S(inode->i_ctime) = body->mbo_ctime; -} - -static int ll_new_node(struct inode *dir, struct dentry *dentry, - const char *tgt, umode_t mode, int rdev, - __u32 opc) -{ - struct ptlrpc_request *request = NULL; - struct md_op_data *op_data; - struct inode *inode = NULL; - struct ll_sb_info *sbi = ll_i2sbi(dir); - int tgt_len = 0; - int err; - - if (unlikely(tgt)) - tgt_len = strlen(tgt) + 1; -again: - op_data = ll_prep_md_op_data(NULL, dir, NULL, - dentry->d_name.name, - dentry->d_name.len, - 0, opc, NULL); - if (IS_ERR(op_data)) { - err = PTR_ERR(op_data); - goto err_exit; - } - - err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode, - from_kuid(&init_user_ns, current_fsuid()), - from_kgid(&init_user_ns, current_fsgid()), - current_cap(), rdev, &request); - ll_finish_md_op_data(op_data); - if (err < 0 && err != -EREMOTE) - goto err_exit; - - /* - * If the client doesn't know where to create a subdirectory (or - * in case of a race that sends the RPC to the wrong MDS), the - * MDS will return -EREMOTE and the client will fetch the layout - * of the directory, then create the directory on the right MDT. - */ - if (unlikely(err == -EREMOTE)) { - struct ll_inode_info *lli = ll_i2info(dir); - struct lmv_user_md *lum; - int lumsize, err2; - - ptlrpc_req_finished(request); - request = NULL; - - err2 = ll_dir_getstripe(dir, (void **)&lum, &lumsize, &request, - OBD_MD_DEFAULT_MEA); - if (!err2) { - /* Update stripe_offset and retry */ - lli->lli_def_stripe_offset = lum->lum_stripe_offset; - } else if (err2 == -ENODATA && - lli->lli_def_stripe_offset != -1) { - /* - * If there are no default stripe EA on the MDT, but the - * client has default stripe, then it probably means - * default stripe EA has just been deleted. - */ - lli->lli_def_stripe_offset = -1; - } else { - goto err_exit; - } - - ptlrpc_req_finished(request); - request = NULL; - goto again; - } - - ll_update_times(request, dir); - - err = ll_prep_inode(&inode, request, dir->i_sb, NULL); - if (err) - goto err_exit; - - d_instantiate(dentry, inode); - - err = ll_init_security(dentry, inode, dir); -err_exit: - if (request) - ptlrpc_req_finished(request); - - return err; -} - -static int ll_mknod(struct inode *dir, struct dentry *dchild, - umode_t mode, dev_t rdev) -{ - int err; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p) mode %o dev %x\n", - dchild, PFID(ll_inode2fid(dir)), dir, mode, - old_encode_dev(rdev)); - - if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir))) - mode &= ~current_umask(); - - switch (mode & S_IFMT) { - case 0: - mode |= S_IFREG; - /* for mode = 0 case */ - /* fall through */ - case S_IFREG: - case S_IFCHR: - case S_IFBLK: - case S_IFIFO: - case S_IFSOCK: - err = ll_new_node(dir, dchild, NULL, mode, - old_encode_dev(rdev), - LUSTRE_OPC_MKNOD); - break; - case S_IFDIR: - err = -EPERM; - break; - default: - err = -EINVAL; - } - - if (!err) - ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1); - - return err; -} - -/* - * Plain create. Intent create is handled in atomic_open. - */ -static int ll_create_nd(struct inode *dir, struct dentry *dentry, - umode_t mode, bool want_excl) -{ - int rc; - - CDEBUG(D_VFSTRACE, - "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n", - dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl); - - rc = ll_mknod(dir, dentry, mode, 0); - - ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1); - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, unhashed %d\n", - dentry, d_unhashed(dentry)); - - return rc; -} - -static int ll_unlink(struct inode *dir, struct dentry *dchild) -{ - struct ptlrpc_request *request = NULL; - struct md_op_data *op_data; - int rc; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n", - dchild, dir->i_ino, dir->i_generation, dir); - - op_data = ll_prep_md_op_data(NULL, dir, NULL, - dchild->d_name.name, - dchild->d_name.len, - 0, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - if (dchild->d_inode) - op_data->op_fid3 = *ll_inode2fid(dchild->d_inode); - - op_data->op_fid2 = op_data->op_fid3; - rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request); - ll_finish_md_op_data(op_data); - if (rc) - goto out; - - ll_update_times(request, dir); - ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1); - - out: - ptlrpc_req_finished(request); - return rc; -} - -static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) -{ - int err; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir" DFID "(%p)\n", - dentry, PFID(ll_inode2fid(dir)), dir); - - if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir))) - mode &= ~current_umask(); - mode = (mode & (0777 | S_ISVTX)) | S_IFDIR; - - err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR); - if (!err) - ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1); - - return err; -} - -static int ll_rmdir(struct inode *dir, struct dentry *dchild) -{ - struct ptlrpc_request *request = NULL; - struct md_op_data *op_data; - int rc; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p)\n", - dchild, PFID(ll_inode2fid(dir)), dir); - - op_data = ll_prep_md_op_data(NULL, dir, NULL, - dchild->d_name.name, - dchild->d_name.len, - S_IFDIR, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - if (dchild->d_inode) - op_data->op_fid3 = *ll_inode2fid(dchild->d_inode); - - op_data->op_fid2 = op_data->op_fid3; - rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request); - ll_finish_md_op_data(op_data); - if (rc == 0) { - ll_update_times(request, dir); - ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1); - } - - ptlrpc_req_finished(request); - return rc; -} - -static int ll_symlink(struct inode *dir, struct dentry *dentry, - const char *oldname) -{ - int err; - - CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),target=%.*s\n", - dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname); - - err = ll_new_node(dir, dentry, oldname, S_IFLNK | 0777, - 0, LUSTRE_OPC_SYMLINK); - - if (!err) - ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1); - - return err; -} - -static int ll_link(struct dentry *old_dentry, struct inode *dir, - struct dentry *new_dentry) -{ - struct inode *src = d_inode(old_dentry); - struct ll_sb_info *sbi = ll_i2sbi(dir); - struct ptlrpc_request *request = NULL; - struct md_op_data *op_data; - int err; - - CDEBUG(D_VFSTRACE, - "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n", - PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir, - new_dentry); - - op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name, - new_dentry->d_name.len, - 0, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - err = md_link(sbi->ll_md_exp, op_data, &request); - ll_finish_md_op_data(op_data); - if (err) - goto out; - - ll_update_times(request, dir); - ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1); -out: - ptlrpc_req_finished(request); - return err; -} - -static int ll_rename(struct inode *src, struct dentry *src_dchild, - struct inode *tgt, struct dentry *tgt_dchild, - unsigned int flags) -{ - struct ptlrpc_request *request = NULL; - struct ll_sb_info *sbi = ll_i2sbi(src); - struct md_op_data *op_data; - int err; - - if (flags) - return -EINVAL; - - CDEBUG(D_VFSTRACE, - "VFS Op:oldname=%pd, src_dir=" DFID "(%p), newname=%pd, tgt_dir=" DFID "(%p)\n", - src_dchild, PFID(ll_inode2fid(src)), src, - tgt_dchild, PFID(ll_inode2fid(tgt)), tgt); - - op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - if (src_dchild->d_inode) - op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode); - if (tgt_dchild->d_inode) - op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode); - - err = md_rename(sbi->ll_md_exp, op_data, - src_dchild->d_name.name, - src_dchild->d_name.len, - tgt_dchild->d_name.name, - tgt_dchild->d_name.len, &request); - ll_finish_md_op_data(op_data); - if (!err) { - ll_update_times(request, src); - ll_update_times(request, tgt); - ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1); - } - - ptlrpc_req_finished(request); - if (!err) - d_move(src_dchild, tgt_dchild); - return err; -} - -const struct inode_operations ll_dir_inode_operations = { - .mknod = ll_mknod, - .atomic_open = ll_atomic_open, - .lookup = ll_lookup_nd, - .create = ll_create_nd, - /* We need all these non-raw things for NFSD, to not patch it. */ - .unlink = ll_unlink, - .mkdir = ll_mkdir, - .rmdir = ll_rmdir, - .symlink = ll_symlink, - .link = ll_link, - .rename = ll_rename, - .setattr = ll_setattr, - .getattr = ll_getattr, - .permission = ll_inode_permission, - .listxattr = ll_listxattr, - .get_acl = ll_get_acl, -}; - -const struct inode_operations ll_special_inode_operations = { - .setattr = ll_setattr, - .getattr = ll_getattr, - .permission = ll_inode_permission, - .listxattr = ll_listxattr, - .get_acl = ll_get_acl, -}; diff --git a/drivers/staging/lustre/lustre/llite/range_lock.c b/drivers/staging/lustre/lustre/llite/range_lock.c deleted file mode 100644 index 008a8874118d..000000000000 --- a/drivers/staging/lustre/lustre/llite/range_lock.c +++ /dev/null @@ -1,241 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Range lock is used to allow multiple threads writing a single shared - * file given each thread is writing to a non-overlapping portion of the - * file. - * - * Refer to the possible upstream kernel version of range lock by - * Jan Kara : https://lkml.org/lkml/2013/1/31/480 - * - * This file could later replaced by the upstream kernel version. - */ -/* - * Author: Prakash Surya - * Author: Bobi Jam - */ -#include "range_lock.h" -#include -#include - -/** - * Initialize a range lock tree - * - * \param tree [in] an empty range lock tree - * - * Pre: Caller should have allocated the range lock tree. - * Post: The range lock tree is ready to function. - */ -void range_lock_tree_init(struct range_lock_tree *tree) -{ - tree->rlt_root = NULL; - tree->rlt_sequence = 0; - spin_lock_init(&tree->rlt_lock); -} - -/** - * Initialize a range lock node - * - * \param lock [in] an empty range lock node - * \param start [in] start of the covering region - * \param end [in] end of the covering region - * - * Pre: Caller should have allocated the range lock node. - * Post: The range lock node is meant to cover [start, end] region - */ -int range_lock_init(struct range_lock *lock, __u64 start, __u64 end) -{ - int rc; - - memset(&lock->rl_node, 0, sizeof(lock->rl_node)); - if (end != LUSTRE_EOF) - end >>= PAGE_SHIFT; - rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end); - if (rc) - return rc; - - INIT_LIST_HEAD(&lock->rl_next_lock); - lock->rl_task = NULL; - lock->rl_lock_count = 0; - lock->rl_blocking_ranges = 0; - lock->rl_sequence = 0; - return rc; -} - -static inline struct range_lock *next_lock(struct range_lock *lock) -{ - return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock); -} - -/** - * Helper function of range_unlock() - * - * \param node [in] a range lock found overlapped during interval node - * search - * \param arg [in] the range lock to be tested - * - * \retval INTERVAL_ITER_CONT indicate to continue the search for next - * overlapping range node - * \retval INTERVAL_ITER_STOP indicate to stop the search - */ -static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg) -{ - struct range_lock *lock = arg; - struct range_lock *overlap = node2rangelock(node); - struct range_lock *iter; - - list_for_each_entry(iter, &overlap->rl_next_lock, rl_next_lock) { - if (iter->rl_sequence > lock->rl_sequence) { - --iter->rl_blocking_ranges; - LASSERT(iter->rl_blocking_ranges > 0); - } - } - if (overlap->rl_sequence > lock->rl_sequence) { - --overlap->rl_blocking_ranges; - if (overlap->rl_blocking_ranges == 0) - wake_up_process(overlap->rl_task); - } - return INTERVAL_ITER_CONT; -} - -/** - * Unlock a range lock, wake up locks blocked by this lock. - * - * \param tree [in] range lock tree - * \param lock [in] range lock to be deleted - * - * If this lock has been granted, relase it; if not, just delete it from - * the tree or the same region lock list. Wake up those locks only blocked - * by this lock through range_unlock_cb(). - */ -void range_unlock(struct range_lock_tree *tree, struct range_lock *lock) -{ - spin_lock(&tree->rlt_lock); - if (!list_empty(&lock->rl_next_lock)) { - struct range_lock *next; - - if (interval_is_intree(&lock->rl_node)) { /* first lock */ - /* Insert the next same range lock into the tree */ - next = next_lock(lock); - next->rl_lock_count = lock->rl_lock_count - 1; - interval_erase(&lock->rl_node, &tree->rlt_root); - interval_insert(&next->rl_node, &tree->rlt_root); - } else { - /* find the first lock in tree */ - list_for_each_entry(next, &lock->rl_next_lock, - rl_next_lock) { - if (!interval_is_intree(&next->rl_node)) - continue; - - LASSERT(next->rl_lock_count > 0); - next->rl_lock_count--; - break; - } - } - list_del_init(&lock->rl_next_lock); - } else { - LASSERT(interval_is_intree(&lock->rl_node)); - interval_erase(&lock->rl_node, &tree->rlt_root); - } - - interval_search(tree->rlt_root, &lock->rl_node.in_extent, - range_unlock_cb, lock); - spin_unlock(&tree->rlt_lock); -} - -/** - * Helper function of range_lock() - * - * \param node [in] a range lock found overlapped during interval node - * search - * \param arg [in] the range lock to be tested - * - * \retval INTERVAL_ITER_CONT indicate to continue the search for next - * overlapping range node - * \retval INTERVAL_ITER_STOP indicate to stop the search - */ -static enum interval_iter range_lock_cb(struct interval_node *node, void *arg) -{ - struct range_lock *lock = arg; - struct range_lock *overlap = node2rangelock(node); - - lock->rl_blocking_ranges += overlap->rl_lock_count + 1; - return INTERVAL_ITER_CONT; -} - -/** - * Lock a region - * - * \param tree [in] range lock tree - * \param lock [in] range lock node containing the region span - * - * \retval 0 get the range lock - * \retval <0 error code while not getting the range lock - * - * If there exists overlapping range lock, the new lock will wait and - * retry, if later it find that it is not the chosen one to wake up, - * it wait again. - */ -int range_lock(struct range_lock_tree *tree, struct range_lock *lock) -{ - struct interval_node *node; - int rc = 0; - - spin_lock(&tree->rlt_lock); - /* - * We need to check for all conflicting intervals - * already in the tree. - */ - interval_search(tree->rlt_root, &lock->rl_node.in_extent, - range_lock_cb, lock); - /* - * Insert to the tree if I am unique, otherwise I've been linked to - * the rl_next_lock of another lock which has the same range as mine - * in range_lock_cb(). - */ - node = interval_insert(&lock->rl_node, &tree->rlt_root); - if (node) { - struct range_lock *tmp = node2rangelock(node); - - list_add_tail(&lock->rl_next_lock, &tmp->rl_next_lock); - tmp->rl_lock_count++; - } - lock->rl_sequence = ++tree->rlt_sequence; - - while (lock->rl_blocking_ranges > 0) { - lock->rl_task = current; - __set_current_state(TASK_INTERRUPTIBLE); - spin_unlock(&tree->rlt_lock); - schedule(); - - if (signal_pending(current)) { - range_unlock(tree, lock); - rc = -EINTR; - goto out; - } - spin_lock(&tree->rlt_lock); - } - spin_unlock(&tree->rlt_lock); -out: - return rc; -} diff --git a/drivers/staging/lustre/lustre/llite/range_lock.h b/drivers/staging/lustre/lustre/llite/range_lock.h deleted file mode 100644 index 9ebac09160f2..000000000000 --- a/drivers/staging/lustre/lustre/llite/range_lock.h +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Range lock is used to allow multiple threads writing a single shared - * file given each thread is writing to a non-overlapping portion of the - * file. - * - * Refer to the possible upstream kernel version of range lock by - * Jan Kara : https://lkml.org/lkml/2013/1/31/480 - * - * This file could later replaced by the upstream kernel version. - */ -/* - * Author: Prakash Surya - * Author: Bobi Jam - */ -#ifndef _RANGE_LOCK_H -#define _RANGE_LOCK_H - -#include -#include - -struct range_lock { - struct interval_node rl_node; - /** - * Process to enqueue this lock. - */ - struct task_struct *rl_task; - /** - * List of locks with the same range. - */ - struct list_head rl_next_lock; - /** - * Number of locks in the list rl_next_lock - */ - unsigned int rl_lock_count; - /** - * Number of ranges which are blocking acquisition of the lock - */ - unsigned int rl_blocking_ranges; - /** - * Sequence number of range lock. This number is used to get to know - * the order the locks are queued; this is required for range_cancel(). - */ - __u64 rl_sequence; -}; - -static inline struct range_lock *node2rangelock(const struct interval_node *n) -{ - return container_of(n, struct range_lock, rl_node); -} - -struct range_lock_tree { - struct interval_node *rlt_root; - spinlock_t rlt_lock; /* protect range lock tree */ - __u64 rlt_sequence; -}; - -void range_lock_tree_init(struct range_lock_tree *tree); -int range_lock_init(struct range_lock *lock, __u64 start, __u64 end); -int range_lock(struct range_lock_tree *tree, struct range_lock *lock); -void range_unlock(struct range_lock_tree *tree, struct range_lock *lock); -#endif diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c deleted file mode 100644 index 3e008ce7275d..000000000000 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ /dev/null @@ -1,1214 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/llite/rw.c - * - * Lustre Lite I/O page cache routines shared by different kernel revs - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -/* current_is_kswapd() */ -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include "llite_internal.h" - -static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); - -/** - * Get readahead pages from the filesystem readahead pool of the client for a - * thread. - * - * /param sbi superblock for filesystem readahead state ll_ra_info - * /param ria per-thread readahead state - * /param pages number of pages requested for readahead for the thread. - * - * WARNING: This algorithm is used to reduce contention on sbi->ll_lock. - * It should work well if the ra_max_pages is much greater than the single - * file's read-ahead window, and not too many threads contending for - * these readahead pages. - * - * TODO: There may be a 'global sync problem' if many threads are trying - * to get an ra budget that is larger than the remaining readahead pages - * and reach here at exactly the same time. They will compute /a ret to - * consume the remaining pages, but will fail at atomic_add_return() and - * get a zero ra window, although there is still ra space remaining. - Jay - */ -static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, - struct ra_io_arg *ria, - unsigned long pages, unsigned long min) -{ - struct ll_ra_info *ra = &sbi->ll_ra_info; - long ret; - - /* If read-ahead pages left are less than 1M, do not do read-ahead, - * otherwise it will form small read RPC(< 1M), which hurt server - * performance a lot. - */ - ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages); - if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) { - ret = 0; - goto out; - } - - if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) { - atomic_sub(ret, &ra->ra_cur_pages); - ret = 0; - } - -out: - if (ret < min) { - /* override ra limit for maximum performance */ - atomic_add(min - ret, &ra->ra_cur_pages); - ret = min; - } - return ret; -} - -void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) -{ - struct ll_ra_info *ra = &sbi->ll_ra_info; - - atomic_sub(len, &ra->ra_cur_pages); -} - -static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) -{ - LASSERTF(which < _NR_RA_STAT, "which: %u\n", which); - lprocfs_counter_incr(sbi->ll_ra_stats, which); -} - -void ll_ra_stats_inc(struct inode *inode, enum ra_stat which) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - - ll_ra_stats_inc_sbi(sbi, which); -} - -#define RAS_CDEBUG(ras) \ - CDEBUG(D_READA, \ - "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu rpc %lu " \ - "r %lu ri %lu csr %lu sf %lu sp %lu sl %lu\n", \ - ras->ras_last_readpage, ras->ras_consecutive_requests, \ - ras->ras_consecutive_pages, ras->ras_window_start, \ - ras->ras_window_len, ras->ras_next_readahead, \ - ras->ras_rpc_size, \ - ras->ras_requests, ras->ras_request_index, \ - ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \ - ras->ras_stride_pages, ras->ras_stride_length) - -static int index_in_window(unsigned long index, unsigned long point, - unsigned long before, unsigned long after) -{ - unsigned long start = point - before, end = point + after; - - if (start > point) - start = 0; - if (end < point) - end = ~0; - - return start <= index && index <= end; -} - -void ll_ras_enter(struct file *f) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(f); - struct ll_readahead_state *ras = &fd->fd_ras; - - spin_lock(&ras->ras_lock); - ras->ras_requests++; - ras->ras_request_index = 0; - ras->ras_consecutive_requests++; - spin_unlock(&ras->ras_lock); -} - -/** - * Initiates read-ahead of a page with given index. - * - * \retval +ve: page was already uptodate so it will be skipped - * from being added; - * \retval -ve: page wasn't added to \a queue for error; - * \retval 0: page was added into \a queue for read ahead. - */ -static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, pgoff_t index) -{ - enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ - struct cl_object *clob = io->ci_obj; - struct inode *inode = vvp_object_inode(clob); - const char *msg = NULL; - struct cl_page *page; - struct vvp_page *vpg; - struct page *vmpage; - int rc = 0; - - vmpage = grab_cache_page_nowait(inode->i_mapping, index); - if (!vmpage) { - which = RA_STAT_FAILED_GRAB_PAGE; - msg = "g_c_p_n failed"; - rc = -EBUSY; - goto out; - } - - /* Check if vmpage was truncated or reclaimed */ - if (vmpage->mapping != inode->i_mapping) { - which = RA_STAT_WRONG_GRAB_PAGE; - msg = "g_c_p_n returned invalid page"; - rc = -EBUSY; - goto out; - } - - page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); - if (IS_ERR(page)) { - which = RA_STAT_FAILED_GRAB_PAGE; - msg = "cl_page_find failed"; - rc = PTR_ERR(page); - goto out; - } - - lu_ref_add(&page->cp_reference, "ra", current); - cl_page_assume(env, io, page); - vpg = cl2vvp_page(cl_object_page_slice(clob, page)); - if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) { - vpg->vpg_defer_uptodate = 1; - vpg->vpg_ra_used = 0; - cl_page_list_add(queue, page); - } else { - /* skip completed pages */ - cl_page_unassume(env, io, page); - /* This page is already uptodate, returning a positive number - * to tell the callers about this - */ - rc = 1; - } - - lu_ref_del(&page->cp_reference, "ra", current); - cl_page_put(env, page); -out: - if (vmpage) { - if (rc) - unlock_page(vmpage); - put_page(vmpage); - } - if (msg) { - ll_ra_stats_inc(inode, which); - CDEBUG(D_READA, "%s\n", msg); - } - return rc; -} - -#define RIA_DEBUG(ria) \ - CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \ - ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\ - ria->ria_pages) - -static inline int stride_io_mode(struct ll_readahead_state *ras) -{ - return ras->ras_consecutive_stride_requests > 1; -} - -/* The function calculates how much pages will be read in - * [off, off + length], in such stride IO area, - * stride_offset = st_off, stride_length = st_len, - * stride_pages = st_pgs - * - * |------------------|*****|------------------|*****|------------|*****|.... - * st_off - * |--- st_pgs ---| - * |----- st_len -----| - * - * How many pages it should read in such pattern - * |-------------------------------------------------------------| - * off - * |<------ length ------->| - * - * = |<----->| + |-------------------------------------| + |---| - * start_left st_pgs * i end_left - */ -static unsigned long -stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, - unsigned long off, unsigned long length) -{ - __u64 start = off > st_off ? off - st_off : 0; - __u64 end = off + length > st_off ? off + length - st_off : 0; - unsigned long start_left = 0; - unsigned long end_left = 0; - unsigned long pg_count; - - if (st_len == 0 || length == 0 || end == 0) - return length; - - start_left = do_div(start, st_len); - if (start_left < st_pgs) - start_left = st_pgs - start_left; - else - start_left = 0; - - end_left = do_div(end, st_len); - if (end_left > st_pgs) - end_left = st_pgs; - - CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n", - start, end, start_left, end_left); - - if (start == end) - pg_count = end_left - (st_pgs - start_left); - else - pg_count = start_left + st_pgs * (end - start - 1) + end_left; - - CDEBUG(D_READA, - "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n", - st_off, st_len, st_pgs, off, length, pg_count); - - return pg_count; -} - -static int ria_page_count(struct ra_io_arg *ria) -{ - __u64 length = ria->ria_end >= ria->ria_start ? - ria->ria_end - ria->ria_start + 1 : 0; - - return stride_pg_count(ria->ria_stoff, ria->ria_length, - ria->ria_pages, ria->ria_start, - length); -} - -static unsigned long ras_align(struct ll_readahead_state *ras, - unsigned long index, - unsigned long *remainder) -{ - unsigned long rem = index % ras->ras_rpc_size; - - if (remainder) - *remainder = rem; - return index - rem; -} - -/*Check whether the index is in the defined ra-window */ -static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) -{ - /* If ria_length == ria_pages, it means non-stride I/O mode, - * idx should always inside read-ahead window in this case - * For stride I/O mode, just check whether the idx is inside - * the ria_pages. - */ - return ria->ria_length == 0 || ria->ria_length == ria->ria_pages || - (idx >= ria->ria_stoff && (idx - ria->ria_stoff) % - ria->ria_length < ria->ria_pages); -} - -static unsigned long -ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, struct ll_readahead_state *ras, - struct ra_io_arg *ria) -{ - struct cl_read_ahead ra = { 0 }; - unsigned long ra_end = 0; - bool stride_ria; - pgoff_t page_idx; - int rc; - - LASSERT(ria); - RIA_DEBUG(ria); - - stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; - for (page_idx = ria->ria_start; - page_idx <= ria->ria_end && ria->ria_reserved > 0; page_idx++) { - if (ras_inside_ra_window(page_idx, ria)) { - if (!ra.cra_end || ra.cra_end < page_idx) { - unsigned long end; - - cl_read_ahead_release(env, &ra); - - rc = cl_io_read_ahead(env, io, page_idx, &ra); - if (rc < 0) - break; - - CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n", - page_idx, ra.cra_end, ra.cra_rpc_size); - LASSERTF(ra.cra_end >= page_idx, - "object: %p, indcies %lu / %lu\n", - io->ci_obj, ra.cra_end, page_idx); - /* - * update read ahead RPC size. - * NB: it's racy but doesn't matter - */ - if (ras->ras_rpc_size > ra.cra_rpc_size && - ra.cra_rpc_size > 0) - ras->ras_rpc_size = ra.cra_rpc_size; - /* trim it to align with optimal RPC size */ - end = ras_align(ras, ria->ria_end + 1, NULL); - if (end > 0 && !ria->ria_eof) - ria->ria_end = end - 1; - if (ria->ria_end < ria->ria_end_min) - ria->ria_end = ria->ria_end_min; - if (ria->ria_end > ra.cra_end) - ria->ria_end = ra.cra_end; - } - - /* If the page is inside the read-ahead window */ - rc = ll_read_ahead_page(env, io, queue, page_idx); - if (rc < 0) - break; - - ra_end = page_idx; - if (!rc) - ria->ria_reserved--; - } else if (stride_ria) { - /* If it is not in the read-ahead window, and it is - * read-ahead mode, then check whether it should skip - * the stride gap - */ - pgoff_t offset; - /* FIXME: This assertion only is valid when it is for - * forward read-ahead, it will be fixed when backward - * read-ahead is implemented - */ - LASSERTF(page_idx >= ria->ria_stoff, - "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n", - page_idx, - ria->ria_start, ria->ria_end, ria->ria_stoff, - ria->ria_length, ria->ria_pages); - offset = page_idx - ria->ria_stoff; - offset = offset % (ria->ria_length); - if (offset > ria->ria_pages) { - page_idx += ria->ria_length - offset; - CDEBUG(D_READA, "i %lu skip %lu\n", page_idx, - ria->ria_length - offset); - continue; - } - } - } - cl_read_ahead_release(env, &ra); - - return ra_end; -} - -static int ll_readahead(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, - struct ll_readahead_state *ras, bool hit) -{ - struct vvp_io *vio = vvp_env_io(env); - struct ll_thread_info *lti = ll_env_info(env); - struct cl_attr *attr = vvp_env_thread_attr(env); - unsigned long len, mlen = 0; - pgoff_t ra_end, start = 0, end = 0; - struct inode *inode; - struct ra_io_arg *ria = <i->lti_ria; - struct cl_object *clob; - int ret = 0; - __u64 kms; - - clob = io->ci_obj; - inode = vvp_object_inode(clob); - - memset(ria, 0, sizeof(*ria)); - - cl_object_attr_lock(clob); - ret = cl_object_attr_get(env, clob, attr); - cl_object_attr_unlock(clob); - - if (ret != 0) - return ret; - kms = attr->cat_kms; - if (kms == 0) { - ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN); - return 0; - } - - spin_lock(&ras->ras_lock); - - /** - * Note: other thread might rollback the ras_next_readahead, - * if it can not get the full size of prepared pages, see the - * end of this function. For stride read ahead, it needs to - * make sure the offset is no less than ras_stride_offset, - * so that stride read ahead can work correctly. - */ - if (stride_io_mode(ras)) - start = max(ras->ras_next_readahead, ras->ras_stride_offset); - else - start = ras->ras_next_readahead; - - if (ras->ras_window_len > 0) - end = ras->ras_window_start + ras->ras_window_len - 1; - - /* Enlarge the RA window to encompass the full read */ - if (vio->vui_ra_valid && - end < vio->vui_ra_start + vio->vui_ra_count - 1) - end = vio->vui_ra_start + vio->vui_ra_count - 1; - - if (end) { - unsigned long end_index; - - /* Truncate RA window to end of file */ - end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT); - if (end_index <= end) { - end = end_index; - ria->ria_eof = true; - } - - ras->ras_next_readahead = max(end, end + 1); - RAS_CDEBUG(ras); - } - ria->ria_start = start; - ria->ria_end = end; - /* If stride I/O mode is detected, get stride window*/ - if (stride_io_mode(ras)) { - ria->ria_stoff = ras->ras_stride_offset; - ria->ria_length = ras->ras_stride_length; - ria->ria_pages = ras->ras_stride_pages; - } - spin_unlock(&ras->ras_lock); - - if (end == 0) { - ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW); - return 0; - } - len = ria_page_count(ria); - if (len == 0) { - ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW); - return 0; - } - - CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n", - PFID(lu_object_fid(&clob->co_lu)), - ria->ria_start, ria->ria_end, - vio->vui_ra_valid ? vio->vui_ra_start : 0, - vio->vui_ra_valid ? vio->vui_ra_count : 0, - hit); - - /* at least to extend the readahead window to cover current read */ - if (!hit && vio->vui_ra_valid && - vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) { - unsigned long remainder; - - /* to the end of current read window. */ - mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start; - /* trim to RPC boundary */ - ras_align(ras, ria->ria_start, &remainder); - mlen = min(mlen, ras->ras_rpc_size - remainder); - ria->ria_end_min = ria->ria_start + mlen; - } - - ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen); - if (ria->ria_reserved < len) - ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT); - - CDEBUG(D_READA, "reserved pages %lu/%lu/%lu, ra_cur %d, ra_max %lu\n", - ria->ria_reserved, len, mlen, - atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages), - ll_i2sbi(inode)->ll_ra_info.ra_max_pages); - - ra_end = ll_read_ahead_pages(env, io, queue, ras, ria); - - if (ria->ria_reserved) - ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved); - - if (ra_end == end && ra_end == (kms >> PAGE_SHIFT)) - ll_ra_stats_inc(inode, RA_STAT_EOF); - - /* if we didn't get to the end of the region we reserved from - * the ras we need to go back and update the ras so that the - * next read-ahead tries from where we left off. we only do so - * if the region we failed to issue read-ahead on is still ahead - * of the app and behind the next index to start read-ahead from - */ - CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n", - ra_end, end, ria->ria_end, ret); - - if (ra_end > 0 && ra_end != end) { - ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END); - spin_lock(&ras->ras_lock); - if (ra_end <= ras->ras_next_readahead && - index_in_window(ra_end, ras->ras_window_start, 0, - ras->ras_window_len)) { - ras->ras_next_readahead = ra_end + 1; - RAS_CDEBUG(ras); - } - spin_unlock(&ras->ras_lock); - } - - return ret; -} - -static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras, - unsigned long index) -{ - ras->ras_window_start = ras_align(ras, index, NULL); -} - -/* called with the ras_lock held or from places where it doesn't matter */ -static void ras_reset(struct inode *inode, struct ll_readahead_state *ras, - unsigned long index) -{ - ras->ras_last_readpage = index; - ras->ras_consecutive_requests = 0; - ras->ras_consecutive_pages = 0; - ras->ras_window_len = 0; - ras_set_start(inode, ras, index); - ras->ras_next_readahead = max(ras->ras_window_start, index + 1); - - RAS_CDEBUG(ras); -} - -/* called with the ras_lock held or from places where it doesn't matter */ -static void ras_stride_reset(struct ll_readahead_state *ras) -{ - ras->ras_consecutive_stride_requests = 0; - ras->ras_stride_length = 0; - ras->ras_stride_pages = 0; - RAS_CDEBUG(ras); -} - -void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras) -{ - spin_lock_init(&ras->ras_lock); - ras->ras_rpc_size = PTLRPC_MAX_BRW_PAGES; - ras_reset(inode, ras, 0); - ras->ras_requests = 0; -} - -/* - * Check whether the read request is in the stride window. - * If it is in the stride window, return 1, otherwise return 0. - */ -static int index_in_stride_window(struct ll_readahead_state *ras, - unsigned long index) -{ - unsigned long stride_gap; - - if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 || - ras->ras_stride_pages == ras->ras_stride_length) - return 0; - - stride_gap = index - ras->ras_last_readpage - 1; - - /* If it is contiguous read */ - if (stride_gap == 0) - return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages; - - /* Otherwise check the stride by itself */ - return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap && - ras->ras_consecutive_pages == ras->ras_stride_pages; -} - -static void ras_update_stride_detector(struct ll_readahead_state *ras, - unsigned long index) -{ - unsigned long stride_gap = index - ras->ras_last_readpage - 1; - - if ((stride_gap != 0 || ras->ras_consecutive_stride_requests == 0) && - !stride_io_mode(ras)) { - ras->ras_stride_pages = ras->ras_consecutive_pages; - ras->ras_stride_length = ras->ras_consecutive_pages + - stride_gap; - } - LASSERT(ras->ras_request_index == 0); - LASSERT(ras->ras_consecutive_stride_requests == 0); - - if (index <= ras->ras_last_readpage) { - /*Reset stride window for forward read*/ - ras_stride_reset(ras); - return; - } - - ras->ras_stride_pages = ras->ras_consecutive_pages; - ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages; - - RAS_CDEBUG(ras); -} - -/* Stride Read-ahead window will be increased inc_len according to - * stride I/O pattern - */ -static void ras_stride_increase_window(struct ll_readahead_state *ras, - struct ll_ra_info *ra, - unsigned long inc_len) -{ - unsigned long left, step, window_len; - unsigned long stride_len; - - LASSERT(ras->ras_stride_length > 0); - LASSERTF(ras->ras_window_start + ras->ras_window_len >= - ras->ras_stride_offset, - "window_start %lu, window_len %lu stride_offset %lu\n", - ras->ras_window_start, - ras->ras_window_len, ras->ras_stride_offset); - - stride_len = ras->ras_window_start + ras->ras_window_len - - ras->ras_stride_offset; - - left = stride_len % ras->ras_stride_length; - window_len = ras->ras_window_len - left; - - if (left < ras->ras_stride_pages) - left += inc_len; - else - left = ras->ras_stride_pages + inc_len; - - LASSERT(ras->ras_stride_pages != 0); - - step = left / ras->ras_stride_pages; - left %= ras->ras_stride_pages; - - window_len += step * ras->ras_stride_length + left; - - if (stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length, - ras->ras_stride_pages, ras->ras_stride_offset, - window_len) <= ra->ra_max_pages_per_file) - ras->ras_window_len = window_len; - - RAS_CDEBUG(ras); -} - -static void ras_increase_window(struct inode *inode, - struct ll_readahead_state *ras, - struct ll_ra_info *ra) -{ - /* The stretch of ra-window should be aligned with max rpc_size - * but current clio architecture does not support retrieve such - * information from lower layer. FIXME later - */ - if (stride_io_mode(ras)) { - ras_stride_increase_window(ras, ra, ras->ras_rpc_size); - } else { - unsigned long wlen; - - wlen = min(ras->ras_window_len + ras->ras_rpc_size, - ra->ra_max_pages_per_file); - ras->ras_window_len = ras_align(ras, wlen, NULL); - } -} - -static void ras_update(struct ll_sb_info *sbi, struct inode *inode, - struct ll_readahead_state *ras, unsigned long index, - enum ras_update_flags flags) -{ - struct ll_ra_info *ra = &sbi->ll_ra_info; - int zero = 0, stride_detect = 0, ra_miss = 0; - bool hit = flags & LL_RAS_HIT; - - spin_lock(&ras->ras_lock); - - if (!hit) - CDEBUG(D_READA, DFID " pages at %lu miss.\n", - PFID(ll_inode2fid(inode)), index); - - ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS); - - /* reset the read-ahead window in two cases. First when the app seeks - * or reads to some other part of the file. Secondly if we get a - * read-ahead miss that we think we've previously issued. This can - * be a symptom of there being so many read-ahead pages that the VM is - * reclaiming it before we get to it. - */ - if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) { - zero = 1; - ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); - } else if (!hit && ras->ras_window_len && - index < ras->ras_next_readahead && - index_in_window(index, ras->ras_window_start, 0, - ras->ras_window_len)) { - ra_miss = 1; - ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW); - } - - /* On the second access to a file smaller than the tunable - * ra_max_read_ahead_whole_pages trigger RA on all pages in the - * file up to ra_max_pages_per_file. This is simply a best effort - * and only occurs once per open file. Normal RA behavior is reverted - * to for subsequent IO. The mmap case does not increment - * ras_requests and thus can never trigger this behavior. - */ - if (ras->ras_requests >= 2 && !ras->ras_request_index) { - __u64 kms_pages; - - kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> - PAGE_SHIFT; - - CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, - ra->ra_max_read_ahead_whole_pages, - ra->ra_max_pages_per_file); - - if (kms_pages && - kms_pages <= ra->ra_max_read_ahead_whole_pages) { - ras->ras_window_start = 0; - ras->ras_next_readahead = index + 1; - ras->ras_window_len = min(ra->ra_max_pages_per_file, - ra->ra_max_read_ahead_whole_pages); - goto out_unlock; - } - } - if (zero) { - /* check whether it is in stride I/O mode*/ - if (!index_in_stride_window(ras, index)) { - if (ras->ras_consecutive_stride_requests == 0 && - ras->ras_request_index == 0) { - ras_update_stride_detector(ras, index); - ras->ras_consecutive_stride_requests++; - } else { - ras_stride_reset(ras); - } - ras_reset(inode, ras, index); - ras->ras_consecutive_pages++; - goto out_unlock; - } else { - ras->ras_consecutive_pages = 0; - ras->ras_consecutive_requests = 0; - if (++ras->ras_consecutive_stride_requests > 1) - stride_detect = 1; - RAS_CDEBUG(ras); - } - } else { - if (ra_miss) { - if (index_in_stride_window(ras, index) && - stride_io_mode(ras)) { - if (index != ras->ras_last_readpage + 1) - ras->ras_consecutive_pages = 0; - ras_reset(inode, ras, index); - - /* If stride-RA hit cache miss, the stride - * detector will not be reset to avoid the - * overhead of redetecting read-ahead mode, - * but on the condition that the stride window - * is still intersect with normal sequential - * read-ahead window. - */ - if (ras->ras_window_start < - ras->ras_stride_offset) - ras_stride_reset(ras); - RAS_CDEBUG(ras); - } else { - /* Reset both stride window and normal RA - * window - */ - ras_reset(inode, ras, index); - ras->ras_consecutive_pages++; - ras_stride_reset(ras); - goto out_unlock; - } - } else if (stride_io_mode(ras)) { - /* If this is contiguous read but in stride I/O mode - * currently, check whether stride step still is valid, - * if invalid, it will reset the stride ra window - */ - if (!index_in_stride_window(ras, index)) { - /* Shrink stride read-ahead window to be zero */ - ras_stride_reset(ras); - ras->ras_window_len = 0; - ras->ras_next_readahead = index; - } - } - } - ras->ras_consecutive_pages++; - ras->ras_last_readpage = index; - ras_set_start(inode, ras, index); - - if (stride_io_mode(ras)) { - /* Since stride readahead is sensitive to the offset - * of read-ahead, so we use original offset here, - * instead of ras_window_start, which is RPC aligned - */ - ras->ras_next_readahead = max(index, ras->ras_next_readahead); - ras->ras_window_start = max(ras->ras_stride_offset, - ras->ras_window_start); - } else { - if (ras->ras_next_readahead < ras->ras_window_start) - ras->ras_next_readahead = ras->ras_window_start; - if (!hit) - ras->ras_next_readahead = index + 1; - } - RAS_CDEBUG(ras); - - /* Trigger RA in the mmap case where ras_consecutive_requests - * is not incremented and thus can't be used to trigger RA - */ - if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) { - ras_increase_window(inode, ras, ra); - /* - * reset consecutive pages so that the readahead window can - * grow gradually. - */ - ras->ras_consecutive_pages = 0; - goto out_unlock; - } - - /* Initially reset the stride window offset to next_readahead*/ - if (ras->ras_consecutive_stride_requests == 2 && stride_detect) { - /** - * Once stride IO mode is detected, next_readahead should be - * reset to make sure next_readahead > stride offset - */ - ras->ras_next_readahead = max(index, ras->ras_next_readahead); - ras->ras_stride_offset = index; - ras->ras_window_start = max(index, ras->ras_window_start); - } - - /* The initial ras_window_len is set to the request size. To avoid - * uselessly reading and discarding pages for random IO the window is - * only increased once per consecutive request received. - */ - if ((ras->ras_consecutive_requests > 1 || stride_detect) && - !ras->ras_request_index) - ras_increase_window(inode, ras, ra); -out_unlock: - RAS_CDEBUG(ras); - ras->ras_request_index++; - spin_unlock(&ras->ras_lock); -} - -int ll_writepage(struct page *vmpage, struct writeback_control *wbc) -{ - struct inode *inode = vmpage->mapping->host; - struct ll_inode_info *lli = ll_i2info(inode); - struct lu_env *env; - struct cl_io *io; - struct cl_page *page; - struct cl_object *clob; - bool redirtied = false; - bool unlocked = false; - int result; - u16 refcheck; - - LASSERT(PageLocked(vmpage)); - LASSERT(!PageWriteback(vmpage)); - - LASSERT(ll_i2dtexp(inode)); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) { - result = PTR_ERR(env); - goto out; - } - - clob = ll_i2info(inode)->lli_clob; - LASSERT(clob); - - io = vvp_env_thread_io(env); - io->ci_obj = clob; - io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, clob); - if (result == 0) { - page = cl_page_find(env, clob, vmpage->index, - vmpage, CPT_CACHEABLE); - if (!IS_ERR(page)) { - lu_ref_add(&page->cp_reference, "writepage", - current); - cl_page_assume(env, io, page); - result = cl_page_flush(env, io, page); - if (result != 0) { - /* - * Re-dirty page on error so it retries write, - * but not in case when IO has actually - * occurred and completed with an error. - */ - if (!PageError(vmpage)) { - redirty_page_for_writepage(wbc, vmpage); - result = 0; - redirtied = true; - } - } - cl_page_disown(env, io, page); - unlocked = true; - lu_ref_del(&page->cp_reference, - "writepage", current); - cl_page_put(env, page); - } else { - result = PTR_ERR(page); - } - } - cl_io_fini(env, io); - - if (redirtied && wbc->sync_mode == WB_SYNC_ALL) { - loff_t offset = cl_offset(clob, vmpage->index); - - /* Flush page failed because the extent is being written out. - * Wait for the write of extent to be finished to avoid - * breaking kernel which assumes ->writepage should mark - * PageWriteback or clean the page. - */ - result = cl_sync_file_range(inode, offset, - offset + PAGE_SIZE - 1, - CL_FSYNC_LOCAL, 1); - if (result > 0) { - /* actually we may have written more than one page. - * decreasing this page because the caller will count - * it. - */ - wbc->nr_to_write -= result - 1; - result = 0; - } - } - - cl_env_put(env, &refcheck); - goto out; - -out: - if (result < 0) { - if (!lli->lli_async_rc) - lli->lli_async_rc = result; - SetPageError(vmpage); - if (!unlocked) - unlock_page(vmpage); - } - return result; -} - -int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) -{ - struct inode *inode = mapping->host; - struct ll_sb_info *sbi = ll_i2sbi(inode); - loff_t start; - loff_t end; - enum cl_fsync_mode mode; - int range_whole = 0; - int result; - int ignore_layout = 0; - - if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_SHIFT; - end = OBD_OBJECT_EOF; - } else { - start = wbc->range_start; - end = wbc->range_end; - if (end == LLONG_MAX) { - end = OBD_OBJECT_EOF; - range_whole = start == 0; - } - } - - mode = CL_FSYNC_NONE; - if (wbc->sync_mode == WB_SYNC_ALL) - mode = CL_FSYNC_LOCAL; - - if (sbi->ll_umounting) - /* if the mountpoint is being umounted, all pages have to be - * evicted to avoid hitting LBUG when truncate_inode_pages() - * is called later on. - */ - ignore_layout = 1; - - if (!ll_i2info(inode)->lli_clob) - return 0; - - result = cl_sync_file_range(inode, start, end, mode, ignore_layout); - if (result > 0) { - wbc->nr_to_write -= result; - result = 0; - } - - if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { - if (end == OBD_OBJECT_EOF) - mapping->writeback_index = 0; - else - mapping->writeback_index = (end >> PAGE_SHIFT) + 1; - } - return result; -} - -struct ll_cl_context *ll_cl_find(struct file *file) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_cl_context *lcc; - struct ll_cl_context *found = NULL; - - read_lock(&fd->fd_lock); - list_for_each_entry(lcc, &fd->fd_lccs, lcc_list) { - if (lcc->lcc_cookie == current) { - found = lcc; - break; - } - } - read_unlock(&fd->fd_lock); - - return found; -} - -void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx; - - memset(lcc, 0, sizeof(*lcc)); - INIT_LIST_HEAD(&lcc->lcc_list); - lcc->lcc_cookie = current; - lcc->lcc_env = env; - lcc->lcc_io = io; - - write_lock(&fd->fd_lock); - list_add(&lcc->lcc_list, &fd->fd_lccs); - write_unlock(&fd->fd_lock); -} - -void ll_cl_remove(struct file *file, const struct lu_env *env) -{ - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx; - - write_lock(&fd->fd_lock); - list_del_init(&lcc->lcc_list); - write_unlock(&fd->fd_lock); -} - -static int ll_io_read_page(const struct lu_env *env, struct cl_io *io, - struct cl_page *page) -{ - struct inode *inode = vvp_object_inode(page->cp_obj); - struct ll_file_data *fd = vvp_env_io(env)->vui_fd; - struct ll_readahead_state *ras = &fd->fd_ras; - struct cl_2queue *queue = &io->ci_queue; - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct vvp_page *vpg; - bool uptodate; - int rc = 0; - - vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page)); - uptodate = vpg->vpg_defer_uptodate; - - if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && - sbi->ll_ra_info.ra_max_pages > 0) { - struct vvp_io *vio = vvp_env_io(env); - enum ras_update_flags flags = 0; - - if (uptodate) - flags |= LL_RAS_HIT; - if (!vio->vui_ra_valid) - flags |= LL_RAS_MMAP; - ras_update(sbi, inode, ras, vvp_index(vpg), flags); - } - - cl_2queue_init(queue); - if (uptodate) { - vpg->vpg_ra_used = 1; - cl_page_export(env, page, 1); - cl_page_disown(env, io, page); - } else { - cl_page_list_add(&queue->c2_qin, page); - } - - if (sbi->ll_ra_info.ra_max_pages_per_file > 0 && - sbi->ll_ra_info.ra_max_pages > 0) { - int rc2; - - rc2 = ll_readahead(env, io, &queue->c2_qin, ras, - uptodate); - CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n", - PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg)); - } - - if (queue->c2_qin.pl_nr > 0) - rc = cl_io_submit_rw(env, io, CRT_READ, queue); - - /* - * Unlock unsent pages in case of error. - */ - cl_page_list_disown(env, io, &queue->c2_qin); - cl_2queue_fini(env, queue); - - return rc; -} - -int ll_readpage(struct file *file, struct page *vmpage) -{ - struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob; - struct ll_cl_context *lcc; - const struct lu_env *env; - struct cl_io *io; - struct cl_page *page; - int result; - - lcc = ll_cl_find(file); - if (!lcc) { - unlock_page(vmpage); - return -EIO; - } - - env = lcc->lcc_env; - io = lcc->lcc_io; - LASSERT(io->ci_state == CIS_IO_GOING); - page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); - if (!IS_ERR(page)) { - LASSERT(page->cp_type == CPT_CACHEABLE); - if (likely(!PageUptodate(vmpage))) { - cl_page_assume(env, io, page); - result = ll_io_read_page(env, io, page); - } else { - /* Page from a non-object file. */ - unlock_page(vmpage); - result = 0; - } - cl_page_put(env, page); - } else { - unlock_page(vmpage); - result = PTR_ERR(page); - } - return result; -} - -int ll_page_sync_io(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, enum cl_req_type crt) -{ - struct cl_2queue *queue; - int result; - - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - - queue = &io->ci_queue; - cl_2queue_init_page(queue, page); - - result = cl_io_submit_sync(env, io, crt, queue, 0); - LASSERT(cl_page_is_owned(page, io)); - - if (crt == CRT_READ) - /* - * in CRT_WRITE case page is left locked even in case of - * error. - */ - cl_page_list_disown(env, io, &queue->c2_qin); - cl_2queue_fini(env, queue); - - return result; -} diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c deleted file mode 100644 index 722e5ea1af5f..000000000000 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ /dev/null @@ -1,641 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/lustre/llite/rw26.c - * - * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include "llite_internal.h" - -/** - * Implements Linux VM address_space::invalidatepage() method. This method is - * called when the page is truncate from a file, either as a result of - * explicit truncate, or when inode is removed from memory (as a result of - * final iput(), umount, or memory pressure induced icache shrinking). - * - * [0, offset] bytes of the page remain valid (this is for a case of not-page - * aligned truncate). Lustre leaves partially truncated page in the cache, - * relying on struct inode::i_size to limit further accesses. - */ -static void ll_invalidatepage(struct page *vmpage, unsigned int offset, - unsigned int length) -{ - struct inode *inode; - struct lu_env *env; - struct cl_page *page; - struct cl_object *obj; - - LASSERT(PageLocked(vmpage)); - LASSERT(!PageWriteback(vmpage)); - - /* - * It is safe to not check anything in invalidatepage/releasepage - * below because they are run with page locked and all our io is - * happening with locked page too - */ - if (offset == 0 && length == PAGE_SIZE) { - /* See the comment in ll_releasepage() */ - env = cl_env_percpu_get(); - LASSERT(!IS_ERR(env)); - inode = vmpage->mapping->host; - obj = ll_i2info(inode)->lli_clob; - if (obj) { - page = cl_vmpage_page(vmpage, obj); - if (page) { - cl_page_delete(env, page); - cl_page_put(env, page); - } - } else { - LASSERT(vmpage->private == 0); - } - cl_env_percpu_put(env); - } -} - -static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) -{ - struct lu_env *env; - struct cl_object *obj; - struct cl_page *page; - struct address_space *mapping; - int result = 0; - - LASSERT(PageLocked(vmpage)); - if (PageWriteback(vmpage) || PageDirty(vmpage)) - return 0; - - mapping = vmpage->mapping; - if (!mapping) - return 1; - - obj = ll_i2info(mapping->host)->lli_clob; - if (!obj) - return 1; - - /* 1 for caller, 1 for cl_page and 1 for page cache */ - if (page_count(vmpage) > 3) - return 0; - - page = cl_vmpage_page(vmpage, obj); - if (!page) - return 1; - - env = cl_env_percpu_get(); - LASSERT(!IS_ERR(env)); - - if (!cl_page_in_use(page)) { - result = 1; - cl_page_delete(env, page); - } - - /* To use percpu env array, the call path can not be rescheduled; - * otherwise percpu array will be messed if ll_releaspage() called - * again on the same CPU. - * - * If this page holds the last refc of cl_object, the following - * call path may cause reschedule: - * cl_page_put -> cl_page_free -> cl_object_put -> - * lu_object_put -> lu_object_free -> lov_delete_raid0. - * - * However, the kernel can't get rid of this inode until all pages have - * been cleaned up. Now that we hold page lock here, it's pretty safe - * that we won't get into object delete path. - */ - LASSERT(cl_object_refc(obj) > 1); - cl_page_put(env, page); - - cl_env_percpu_put(env); - return result; -} - -#define MAX_DIRECTIO_SIZE (2 * 1024 * 1024 * 1024UL) - -/* ll_free_user_pages - tear down page struct array - * @pages: array of page struct pointers underlying target buffer - */ -static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) -{ - int i; - - for (i = 0; i < npages; i++) { - if (do_dirty) - set_page_dirty_lock(pages[i]); - put_page(pages[i]); - } - kvfree(pages); -} - -ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, - int rw, struct inode *inode, - struct ll_dio_pages *pv) -{ - struct cl_page *clp; - struct cl_2queue *queue; - struct cl_object *obj = io->ci_obj; - int i; - ssize_t rc = 0; - loff_t file_offset = pv->ldp_start_offset; - size_t size = pv->ldp_size; - int page_count = pv->ldp_nr; - struct page **pages = pv->ldp_pages; - size_t page_size = cl_page_size(obj); - bool do_io; - int io_pages = 0; - - queue = &io->ci_queue; - cl_2queue_init(queue); - for (i = 0; i < page_count; i++) { - if (pv->ldp_offsets) - file_offset = pv->ldp_offsets[i]; - - LASSERT(!(file_offset & (page_size - 1))); - clp = cl_page_find(env, obj, cl_index(obj, file_offset), - pv->ldp_pages[i], CPT_TRANSIENT); - if (IS_ERR(clp)) { - rc = PTR_ERR(clp); - break; - } - - rc = cl_page_own(env, io, clp); - if (rc) { - LASSERT(clp->cp_state == CPS_FREEING); - cl_page_put(env, clp); - break; - } - - do_io = true; - - /* check the page type: if the page is a host page, then do - * write directly - */ - if (clp->cp_type == CPT_CACHEABLE) { - struct page *vmpage = cl_page_vmpage(clp); - struct page *src_page; - struct page *dst_page; - void *src; - void *dst; - - src_page = (rw == WRITE) ? pages[i] : vmpage; - dst_page = (rw == WRITE) ? vmpage : pages[i]; - - src = kmap_atomic(src_page); - dst = kmap_atomic(dst_page); - memcpy(dst, src, min(page_size, size)); - kunmap_atomic(dst); - kunmap_atomic(src); - - /* make sure page will be added to the transfer by - * cl_io_submit()->...->vvp_page_prep_write(). - */ - if (rw == WRITE) - set_page_dirty(vmpage); - - if (rw == READ) { - /* do not issue the page for read, since it - * may reread a ra page which has NOT uptodate - * bit set. - */ - cl_page_disown(env, io, clp); - do_io = false; - } - } - - if (likely(do_io)) { - /* - * Add a page to the incoming page list of 2-queue. - */ - cl_page_list_add(&queue->c2_qin, clp); - - /* - * Set page clip to tell transfer formation engine - * that page has to be sent even if it is beyond KMS. - */ - cl_page_clip(env, clp, 0, min(size, page_size)); - - ++io_pages; - } - - /* drop the reference count for cl_page_find */ - cl_page_put(env, clp); - size -= page_size; - file_offset += page_size; - } - - if (rc == 0 && io_pages) { - rc = cl_io_submit_sync(env, io, - rw == READ ? CRT_READ : CRT_WRITE, - queue, 0); - } - if (rc == 0) - rc = pv->ldp_size; - - cl_2queue_discard(env, io, queue); - cl_2queue_disown(env, io, queue); - cl_2queue_fini(env, queue); - return rc; -} -EXPORT_SYMBOL(ll_direct_rw_pages); - -static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, - int rw, struct inode *inode, - struct address_space *mapping, - size_t size, loff_t file_offset, - struct page **pages, int page_count) -{ - struct ll_dio_pages pvec = { - .ldp_pages = pages, - .ldp_nr = page_count, - .ldp_size = size, - .ldp_offsets = NULL, - .ldp_start_offset = file_offset - }; - - return ll_direct_rw_pages(env, io, rw, inode, &pvec); -} - -/* This is the maximum size of a single O_DIRECT request, based on the - * kmalloc limit. We need to fit all of the brw_page structs, each one - * representing PAGE_SIZE worth of user data, into a single buffer, and - * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is - * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. - */ -#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ - PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) -static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter) -{ - struct ll_cl_context *lcc; - const struct lu_env *env; - struct cl_io *io; - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - loff_t file_offset = iocb->ki_pos; - ssize_t count = iov_iter_count(iter); - ssize_t tot_bytes = 0, result = 0; - long size = MAX_DIO_SIZE; - - /* Check EOF by ourselves */ - if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode)) - return 0; - - /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ - if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK)) - return -EINVAL; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", - PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> PAGE_SHIFT, - MAX_DIO_SIZE >> PAGE_SHIFT); - - /* Check that all user buffers are aligned as well */ - if (iov_iter_alignment(iter) & ~PAGE_MASK) - return -EINVAL; - - lcc = ll_cl_find(file); - if (!lcc) - return -EIO; - - env = lcc->lcc_env; - LASSERT(!IS_ERR(env)); - io = lcc->lcc_io; - LASSERT(io); - - while (iov_iter_count(iter)) { - struct page **pages; - size_t offs; - - count = min_t(size_t, iov_iter_count(iter), size); - if (iov_iter_rw(iter) == READ) { - if (file_offset >= i_size_read(inode)) - break; - if (file_offset + count > i_size_read(inode)) - count = i_size_read(inode) - file_offset; - } - - result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); - if (likely(result > 0)) { - int n = DIV_ROUND_UP(result + offs, PAGE_SIZE); - - result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter), - inode, file->f_mapping, - result, file_offset, pages, - n); - ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); - } - if (unlikely(result <= 0)) { - /* If we can't allocate a large enough buffer - * for the request, shrink it to a smaller - * PAGE_SIZE multiple and try again. - * We should always be able to kmalloc for a - * page worth of page pointers = 4MB on i386. - */ - if (result == -ENOMEM && - size > (PAGE_SIZE / sizeof(*pages)) * - PAGE_SIZE) { - size = ((((size / 2) - 1) | - ~PAGE_MASK) + 1) & - PAGE_MASK; - CDEBUG(D_VFSTRACE, "DIO size now %lu\n", - size); - continue; - } - - goto out; - } - iov_iter_advance(iter, result); - tot_bytes += result; - file_offset += result; - } -out: - if (tot_bytes > 0) { - struct vvp_io *vio = vvp_env_io(env); - - /* no commit async for direct IO */ - vio->u.write.vui_written += tot_bytes; - } - - return tot_bytes ? tot_bytes : result; -} - -/** - * Prepare partially written-to page for a write. - */ -static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg) -{ - struct cl_attr *attr = vvp_env_thread_attr(env); - struct cl_object *obj = io->ci_obj; - struct vvp_page *vpg = cl_object_page_slice(obj, pg); - loff_t offset = cl_offset(obj, vvp_index(vpg)); - int result; - - cl_object_attr_lock(obj); - result = cl_object_attr_get(env, obj, attr); - cl_object_attr_unlock(obj); - if (result == 0) { - /* - * If are writing to a new page, no need to read old data. - * The extent locking will have updated the KMS, and for our - * purposes here we can treat it like i_size. - */ - if (attr->cat_kms <= offset) { - char *kaddr = kmap_atomic(vpg->vpg_page); - - memset(kaddr, 0, cl_page_size(obj)); - kunmap_atomic(kaddr); - } else if (vpg->vpg_defer_uptodate) { - vpg->vpg_ra_used = 1; - } else { - result = ll_page_sync_io(env, io, pg, CRT_READ); - } - } - return result; -} - -static int ll_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int flags, - struct page **pagep, void **fsdata) -{ - struct ll_cl_context *lcc; - const struct lu_env *env = NULL; - struct cl_io *io; - struct cl_page *page = NULL; - struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; - pgoff_t index = pos >> PAGE_SHIFT; - struct page *vmpage = NULL; - unsigned int from = pos & (PAGE_SIZE - 1); - unsigned int to = from + len; - int result = 0; - - CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len); - - lcc = ll_cl_find(file); - if (!lcc) { - io = NULL; - result = -EIO; - goto out; - } - - env = lcc->lcc_env; - io = lcc->lcc_io; - - /* To avoid deadlock, try to lock page first. */ - vmpage = grab_cache_page_nowait(mapping, index); - if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) { - struct vvp_io *vio = vvp_env_io(env); - struct cl_page_list *plist = &vio->u.write.vui_queue; - - /* if the page is already in dirty cache, we have to commit - * the pages right now; otherwise, it may cause deadlock - * because it holds page lock of a dirty page and request for - * more grants. It's okay for the dirty page to be the first - * one in commit page list, though. - */ - if (vmpage && plist->pl_nr > 0) { - unlock_page(vmpage); - put_page(vmpage); - vmpage = NULL; - } - - /* commit pages and then wait for page lock */ - result = vvp_io_write_commit(env, io); - if (result < 0) - goto out; - - if (!vmpage) { - vmpage = grab_cache_page_write_begin(mapping, index, - flags); - if (!vmpage) { - result = -ENOMEM; - goto out; - } - } - } - - page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE); - if (IS_ERR(page)) { - result = PTR_ERR(page); - goto out; - } - - lcc->lcc_page = page; - lu_ref_add(&page->cp_reference, "cl_io", io); - - cl_page_assume(env, io, page); - if (!PageUptodate(vmpage)) { - /* - * We're completely overwriting an existing page, - * so _don't_ set it up to date until commit_write - */ - if (from == 0 && to == PAGE_SIZE) { - CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n"); - POISON_PAGE(vmpage, 0x11); - } else { - /* TODO: can be optimized at OSC layer to check if it - * is a lockless IO. In that case, it's not necessary - * to read the data. - */ - result = ll_prepare_partial_page(env, io, page); - if (result == 0) - SetPageUptodate(vmpage); - } - } - if (result < 0) - cl_page_unassume(env, io, page); -out: - if (result < 0) { - if (vmpage) { - unlock_page(vmpage); - put_page(vmpage); - } - if (!IS_ERR_OR_NULL(page)) { - lu_ref_del(&page->cp_reference, "cl_io", io); - cl_page_put(env, page); - } - if (io) - io->ci_result = result; - } else { - *pagep = vmpage; - *fsdata = lcc; - } - return result; -} - -static int ll_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int copied, - struct page *vmpage, void *fsdata) -{ - struct ll_cl_context *lcc = fsdata; - const struct lu_env *env; - struct cl_io *io; - struct vvp_io *vio; - struct cl_page *page; - unsigned int from = pos & (PAGE_SIZE - 1); - bool unplug = false; - int result = 0; - - put_page(vmpage); - - env = lcc->lcc_env; - page = lcc->lcc_page; - io = lcc->lcc_io; - vio = vvp_env_io(env); - - LASSERT(cl_page_is_owned(page, io)); - if (copied > 0) { - struct cl_page_list *plist = &vio->u.write.vui_queue; - - lcc->lcc_page = NULL; /* page will be queued */ - - /* Add it into write queue */ - cl_page_list_add(plist, page); - if (plist->pl_nr == 1) /* first page */ - vio->u.write.vui_from = from; - else - LASSERT(from == 0); - vio->u.write.vui_to = from + copied; - - /* - * To address the deadlock in balance_dirty_pages() where - * this dirty page may be written back in the same thread. - */ - if (PageDirty(vmpage)) - unplug = true; - - /* We may have one full RPC, commit it soon */ - if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES) - unplug = true; - - CL_PAGE_DEBUG(D_VFSTRACE, env, page, - "queued page: %d.\n", plist->pl_nr); - } else { - cl_page_disown(env, io, page); - - lcc->lcc_page = NULL; - lu_ref_del(&page->cp_reference, "cl_io", io); - cl_page_put(env, page); - - /* page list is not contiguous now, commit it now */ - unplug = true; - } - - if (unplug || - file->f_flags & O_SYNC || IS_SYNC(file_inode(file))) - result = vvp_io_write_commit(env, io); - - if (result < 0) - io->ci_result = result; - return result >= 0 ? copied : result; -} - -#ifdef CONFIG_MIGRATION -static int ll_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode - ) -{ - /* Always fail page migration until we have a proper implementation */ - return -EIO; -} -#endif - -const struct address_space_operations ll_aops = { - .readpage = ll_readpage, - .direct_IO = ll_direct_IO_26, - .writepage = ll_writepage, - .writepages = ll_writepages, - .set_page_dirty = __set_page_dirty_nobuffers, - .write_begin = ll_write_begin, - .write_end = ll_write_end, - .invalidatepage = ll_invalidatepage, - .releasepage = (void *)ll_releasepage, -#ifdef CONFIG_MIGRATION - .migratepage = ll_migratepage, -#endif -}; diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c deleted file mode 100644 index d864f5f36d85..000000000000 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ /dev/null @@ -1,1577 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include "llite_internal.h" - -#define SA_OMITTED_ENTRY_MAX 8ULL - -enum se_stat { - /** negative values are for error cases */ - SA_ENTRY_INIT = 0, /** init entry */ - SA_ENTRY_SUCC = 1, /** stat succeed */ - SA_ENTRY_INVA = 2, /** invalid entry */ -}; - -/* - * sa_entry is not refcounted: statahead thread allocates it and do async stat, - * and in async stat callback ll_statahead_interpret() will add it into - * sai_interim_entries, later statahead thread will call sa_handle_callback() to - * instantiate entry and move it into sai_entries, and then only scanner process - * can access and free it. - */ -struct sa_entry { - /* link into sai_interim_entries or sai_entries */ - struct list_head se_list; - /* link into sai hash table locally */ - struct list_head se_hash; - /* entry index in the sai */ - __u64 se_index; - /* low layer ldlm lock handle */ - __u64 se_handle; - /* entry status */ - enum se_stat se_state; - /* entry size, contains name */ - int se_size; - /* pointer to async getattr enqueue info */ - struct md_enqueue_info *se_minfo; - /* pointer to the async getattr request */ - struct ptlrpc_request *se_req; - /* pointer to the target inode */ - struct inode *se_inode; - /* entry name */ - struct qstr se_qstr; - /* entry fid */ - struct lu_fid se_fid; -}; - -static unsigned int sai_generation; -static DEFINE_SPINLOCK(sai_generation_lock); - -/* sa_entry is ready to use */ -static inline int sa_ready(struct sa_entry *entry) -{ - smp_rmb(); - return (entry->se_state != SA_ENTRY_INIT); -} - -/* hash value to put in sai_cache */ -static inline int sa_hash(int val) -{ - return val & LL_SA_CACHE_MASK; -} - -/* hash entry into sai_cache */ -static inline void -sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry) -{ - int i = sa_hash(entry->se_qstr.hash); - - spin_lock(&sai->sai_cache_lock[i]); - list_add_tail(&entry->se_hash, &sai->sai_cache[i]); - spin_unlock(&sai->sai_cache_lock[i]); -} - -/* - * Remove entry from SA table. - */ -static inline void -sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry) -{ - int i = sa_hash(entry->se_qstr.hash); - - spin_lock(&sai->sai_cache_lock[i]); - list_del_init(&entry->se_hash); - spin_unlock(&sai->sai_cache_lock[i]); -} - -static inline int agl_should_run(struct ll_statahead_info *sai, - struct inode *inode) -{ - return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid); -} - -/* statahead window is full */ -static inline int sa_sent_full(struct ll_statahead_info *sai) -{ - return atomic_read(&sai->sai_cache_count) >= sai->sai_max; -} - -/* got async stat replies */ -static inline int sa_has_callback(struct ll_statahead_info *sai) -{ - return !list_empty(&sai->sai_interim_entries); -} - -static inline int agl_list_empty(struct ll_statahead_info *sai) -{ - return list_empty(&sai->sai_agls); -} - -/** - * (1) hit ratio less than 80% - * or - * (2) consecutive miss more than 8 - * then means low hit. - */ -static inline int sa_low_hit(struct ll_statahead_info *sai) -{ - return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) || - (sai->sai_consecutive_miss > 8)); -} - -/* - * if the given index is behind of statahead window more than - * SA_OMITTED_ENTRY_MAX, then it is old. - */ -static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index) -{ - return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX < - sai->sai_index); -} - -/* allocate sa_entry and hash it to allow scanner process to find it */ -static struct sa_entry * -sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index, - const char *name, int len, const struct lu_fid *fid) -{ - struct ll_inode_info *lli; - struct sa_entry *entry; - int entry_size; - char *dname; - - entry_size = sizeof(struct sa_entry) + (len & ~3) + 4; - entry = kzalloc(entry_size, GFP_NOFS); - if (unlikely(!entry)) - return ERR_PTR(-ENOMEM); - - CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n", - len, name, entry, index); - - entry->se_index = index; - entry->se_state = SA_ENTRY_INIT; - entry->se_size = entry_size; - dname = (char *)entry + sizeof(struct sa_entry); - memcpy(dname, name, len); - dname[len] = 0; - - entry->se_qstr.hash = full_name_hash(parent, name, len); - entry->se_qstr.len = len; - entry->se_qstr.name = dname; - entry->se_fid = *fid; - - lli = ll_i2info(sai->sai_dentry->d_inode); - spin_lock(&lli->lli_sa_lock); - INIT_LIST_HEAD(&entry->se_list); - sa_rehash(sai, entry); - spin_unlock(&lli->lli_sa_lock); - - atomic_inc(&sai->sai_cache_count); - - return entry; -} - -/* free sa_entry, which should have been unhashed and not in any list */ -static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry) -{ - CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n", - entry->se_qstr.len, entry->se_qstr.name, entry, - entry->se_index); - - LASSERT(list_empty(&entry->se_list)); - LASSERT(list_empty(&entry->se_hash)); - - kfree(entry); - atomic_dec(&sai->sai_cache_count); -} - -/* - * find sa_entry by name, used by directory scanner, lock is not needed because - * only scanner can remove the entry from cache. - */ -static struct sa_entry * -sa_get(struct ll_statahead_info *sai, const struct qstr *qstr) -{ - struct sa_entry *entry; - int i = sa_hash(qstr->hash); - - list_for_each_entry(entry, &sai->sai_cache[i], se_hash) { - if (entry->se_qstr.hash == qstr->hash && - entry->se_qstr.len == qstr->len && - memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) - return entry; - } - return NULL; -} - -/* unhash and unlink sa_entry, and then free it */ -static inline void -sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry) -{ - struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode); - - LASSERT(!list_empty(&entry->se_hash)); - LASSERT(!list_empty(&entry->se_list)); - LASSERT(sa_ready(entry)); - - sa_unhash(sai, entry); - - spin_lock(&lli->lli_sa_lock); - list_del_init(&entry->se_list); - spin_unlock(&lli->lli_sa_lock); - - if (entry->se_inode) - iput(entry->se_inode); - - sa_free(sai, entry); -} - -/* called by scanner after use, sa_entry will be killed */ -static void -sa_put(struct ll_statahead_info *sai, struct sa_entry *entry, struct ll_inode_info *lli) -{ - struct sa_entry *tmp, *next; - - if (entry && entry->se_state == SA_ENTRY_SUCC) { - struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode); - - sai->sai_hit++; - sai->sai_consecutive_miss = 0; - sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); - } else { - sai->sai_miss++; - sai->sai_consecutive_miss++; - } - - if (entry) - sa_kill(sai, entry); - - /* - * kill old completed entries, only scanner process does this, no need - * to lock - */ - list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) { - if (!is_omitted_entry(sai, tmp->se_index)) - break; - sa_kill(sai, tmp); - } - - spin_lock(&lli->lli_sa_lock); - if (sai->sai_task) - wake_up_process(sai->sai_task); - spin_unlock(&lli->lli_sa_lock); - -} - -/* - * update state and sort add entry to sai_entries by index, return true if - * scanner is waiting on this entry. - */ -static bool -__sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) -{ - struct list_head *pos = &sai->sai_entries; - __u64 index = entry->se_index; - struct sa_entry *se; - - LASSERT(!sa_ready(entry)); - LASSERT(list_empty(&entry->se_list)); - - list_for_each_entry_reverse(se, &sai->sai_entries, se_list) { - if (se->se_index < entry->se_index) { - pos = &se->se_list; - break; - } - } - list_add(&entry->se_list, pos); - entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC; - - return (index == sai->sai_index_wait); -} - -/* - * release resources used in async stat RPC, update entry state and wakeup if - * scanner process it waiting on this entry. - */ -static void -sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) -{ - struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode); - struct md_enqueue_info *minfo = entry->se_minfo; - struct ptlrpc_request *req = entry->se_req; - bool wakeup; - - /* release resources used in RPC */ - if (minfo) { - entry->se_minfo = NULL; - ll_intent_release(&minfo->mi_it); - iput(minfo->mi_dir); - kfree(minfo); - } - - if (req) { - entry->se_req = NULL; - ptlrpc_req_finished(req); - } - - spin_lock(&lli->lli_sa_lock); - wakeup = __sa_make_ready(sai, entry, ret); - spin_unlock(&lli->lli_sa_lock); - - if (wakeup) - wake_up(&sai->sai_waitq); -} - -/* Insert inode into the list of sai_agls. */ -static void ll_agl_add(struct ll_statahead_info *sai, - struct inode *inode, int index) -{ - struct ll_inode_info *child = ll_i2info(inode); - struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode); - int added = 0; - - spin_lock(&child->lli_agl_lock); - if (child->lli_agl_index == 0) { - child->lli_agl_index = index; - spin_unlock(&child->lli_agl_lock); - - LASSERT(list_empty(&child->lli_agl_list)); - - igrab(inode); - spin_lock(&parent->lli_agl_lock); - if (list_empty(&sai->sai_agls)) - added = 1; - list_add_tail(&child->lli_agl_list, &sai->sai_agls); - spin_unlock(&parent->lli_agl_lock); - } else { - spin_unlock(&child->lli_agl_lock); - } - - if (added > 0) - wake_up_process(sai->sai_agl_task); -} - -/* allocate sai */ -static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry) -{ - struct ll_inode_info *lli = ll_i2info(dentry->d_inode); - struct ll_statahead_info *sai; - int i; - - sai = kzalloc(sizeof(*sai), GFP_NOFS); - if (!sai) - return NULL; - - sai->sai_dentry = dget(dentry); - atomic_set(&sai->sai_refcount, 1); - - sai->sai_max = LL_SA_RPC_MIN; - sai->sai_index = 1; - init_waitqueue_head(&sai->sai_waitq); - - INIT_LIST_HEAD(&sai->sai_interim_entries); - INIT_LIST_HEAD(&sai->sai_entries); - INIT_LIST_HEAD(&sai->sai_agls); - - for (i = 0; i < LL_SA_CACHE_SIZE; i++) { - INIT_LIST_HEAD(&sai->sai_cache[i]); - spin_lock_init(&sai->sai_cache_lock[i]); - } - atomic_set(&sai->sai_cache_count, 0); - - spin_lock(&sai_generation_lock); - lli->lli_sa_generation = ++sai_generation; - if (unlikely(!sai_generation)) - lli->lli_sa_generation = ++sai_generation; - spin_unlock(&sai_generation_lock); - - return sai; -} - -/* free sai */ -static inline void ll_sai_free(struct ll_statahead_info *sai) -{ - LASSERT(sai->sai_dentry); - dput(sai->sai_dentry); - kfree(sai); -} - -/* - * take refcount of sai if sai for @dir exists, which means statahead is on for - * this directory. - */ -static inline struct ll_statahead_info *ll_sai_get(struct inode *dir) -{ - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai = NULL; - - spin_lock(&lli->lli_sa_lock); - sai = lli->lli_sai; - if (sai) - atomic_inc(&sai->sai_refcount); - spin_unlock(&lli->lli_sa_lock); - - return sai; -} - -/* - * put sai refcount after use, if refcount reaches zero, free sai and sa_entries - * attached to it. - */ -static void ll_sai_put(struct ll_statahead_info *sai) -{ - struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode); - - if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) { - struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode); - struct sa_entry *entry, *next; - - lli->lli_sai = NULL; - spin_unlock(&lli->lli_sa_lock); - - LASSERT(sai->sai_task == NULL); - LASSERT(sai->sai_agl_task == NULL); - LASSERT(sai->sai_sent == sai->sai_replied); - LASSERT(!sa_has_callback(sai)); - - list_for_each_entry_safe(entry, next, &sai->sai_entries, - se_list) - sa_kill(sai, entry); - - LASSERT(atomic_read(&sai->sai_cache_count) == 0); - LASSERT(list_empty(&sai->sai_agls)); - - ll_sai_free(sai); - atomic_dec(&sbi->ll_sa_running); - } -} - -/* Do NOT forget to drop inode refcount when into sai_agls. */ -static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai) -{ - struct ll_inode_info *lli = ll_i2info(inode); - __u64 index = lli->lli_agl_index; - int rc; - - LASSERT(list_empty(&lli->lli_agl_list)); - - /* AGL maybe fall behind statahead with one entry */ - if (is_omitted_entry(sai, index + 1)) { - lli->lli_agl_index = 0; - iput(inode); - return; - } - - /* Someone is in glimpse (sync or async), do nothing. */ - rc = down_write_trylock(&lli->lli_glimpse_sem); - if (rc == 0) { - lli->lli_agl_index = 0; - iput(inode); - return; - } - - /* - * Someone triggered glimpse within 1 sec before. - * 1) The former glimpse succeeded with glimpse lock granted by OST, and - * if the lock is still cached on client, AGL needs to do nothing. If - * it is cancelled by other client, AGL maybe cannot obtain new lock - * for no glimpse callback triggered by AGL. - * 2) The former glimpse succeeded, but OST did not grant glimpse lock. - * Under such case, it is quite possible that the OST will not grant - * glimpse lock for AGL also. - * 3) The former glimpse failed, compared with other two cases, it is - * relative rare. AGL can ignore such case, and it will not muchly - * affect the performance. - */ - if (lli->lli_glimpse_time != 0 && - time_before(jiffies - 1 * HZ, lli->lli_glimpse_time)) { - up_write(&lli->lli_glimpse_sem); - lli->lli_agl_index = 0; - iput(inode); - return; - } - - CDEBUG(D_READA, "Handling (init) async glimpse: inode = " - DFID ", idx = %llu\n", PFID(&lli->lli_fid), index); - - cl_agl(inode); - lli->lli_agl_index = 0; - lli->lli_glimpse_time = jiffies; - up_write(&lli->lli_glimpse_sem); - - CDEBUG(D_READA, "Handled (init) async glimpse: inode= " - DFID ", idx = %llu, rc = %d\n", - PFID(&lli->lli_fid), index, rc); - - iput(inode); -} - -/* - * prepare inode for sa entry, add it into agl list, now sa_entry is ready - * to be used by scanner process. - */ -static void sa_instantiate(struct ll_statahead_info *sai, - struct sa_entry *entry) -{ - struct inode *dir = sai->sai_dentry->d_inode; - struct inode *child; - struct md_enqueue_info *minfo; - struct lookup_intent *it; - struct ptlrpc_request *req; - struct mdt_body *body; - int rc = 0; - - LASSERT(entry->se_handle != 0); - - minfo = entry->se_minfo; - it = &minfo->mi_it; - req = entry->se_req; - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!body) { - rc = -EFAULT; - goto out; - } - - child = entry->se_inode; - if (child) { - /* revalidate; unlinked and re-created with the same name */ - if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) { - entry->se_inode = NULL; - iput(child); - child = NULL; - } - } - - it->it_lock_handle = entry->se_handle; - rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL); - if (rc != 1) { - rc = -EAGAIN; - goto out; - } - - rc = ll_prep_inode(&child, req, dir->i_sb, it); - if (rc) - goto out; - - CDEBUG(D_READA, "%s: setting %.*s" DFID " l_data to inode %p\n", - ll_get_fsname(child->i_sb, NULL, 0), - entry->se_qstr.len, entry->se_qstr.name, - PFID(ll_inode2fid(child)), child); - ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL); - - entry->se_inode = child; - - if (agl_should_run(sai, child)) - ll_agl_add(sai, child, entry->se_index); - -out: - /* - * sa_make_ready() will drop ldlm ibits lock refcount by calling - * ll_intent_drop_lock() in spite of failures. Do not worry about - * calling ll_intent_drop_lock() more than once. - */ - sa_make_ready(sai, entry, rc); -} - -/* once there are async stat replies, instantiate sa_entry from replies */ -static void sa_handle_callback(struct ll_statahead_info *sai) -{ - struct ll_inode_info *lli; - - lli = ll_i2info(sai->sai_dentry->d_inode); - - while (sa_has_callback(sai)) { - struct sa_entry *entry; - - spin_lock(&lli->lli_sa_lock); - if (unlikely(!sa_has_callback(sai))) { - spin_unlock(&lli->lli_sa_lock); - break; - } - entry = list_entry(sai->sai_interim_entries.next, - struct sa_entry, se_list); - list_del_init(&entry->se_list); - spin_unlock(&lli->lli_sa_lock); - - sa_instantiate(sai, entry); - } -} - -/* - * callback for async stat, because this is called in ptlrpcd context, we only - * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really - * prepare inode and instantiate sa_entry later. - */ -static int ll_statahead_interpret(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, int rc) -{ - struct lookup_intent *it = &minfo->mi_it; - struct inode *dir = minfo->mi_dir; - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai = lli->lli_sai; - struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata; - __u64 handle = 0; - - if (it_disposition(it, DISP_LOOKUP_NEG)) - rc = -ENOENT; - - /* - * because statahead thread will wait for all inflight RPC to finish, - * sai should be always valid, no need to refcount - */ - LASSERT(sai); - LASSERT(entry); - - CDEBUG(D_READA, "sa_entry %.*s rc %d\n", - entry->se_qstr.len, entry->se_qstr.name, rc); - - if (rc) { - ll_intent_release(it); - iput(dir); - kfree(minfo); - } else { - /* - * release ibits lock ASAP to avoid deadlock when statahead - * thread enqueues lock on parent in readdir and another - * process enqueues lock on child with parent lock held, eg. - * unlink. - */ - handle = it->it_lock_handle; - ll_intent_drop_lock(it); - } - - spin_lock(&lli->lli_sa_lock); - if (rc) { - if (__sa_make_ready(sai, entry, rc)) - wake_up(&sai->sai_waitq); - } else { - int first = 0; - entry->se_minfo = minfo; - entry->se_req = ptlrpc_request_addref(req); - /* - * Release the async ibits lock ASAP to avoid deadlock - * when statahead thread tries to enqueue lock on parent - * for readpage and other tries to enqueue lock on child - * with parent's lock held, for example: unlink. - */ - entry->se_handle = handle; - if (!sa_has_callback(sai)) - first = 1; - - list_add_tail(&entry->se_list, &sai->sai_interim_entries); - - if (first && sai->sai_task) - wake_up_process(sai->sai_task); - } - sai->sai_replied++; - - spin_unlock(&lli->lli_sa_lock); - - return rc; -} - -/* finish async stat RPC arguments */ -static void sa_fini_data(struct md_enqueue_info *minfo) -{ - iput(minfo->mi_dir); - kfree(minfo); -} - -/** - * prepare arguments for async stat RPC. - */ -static struct md_enqueue_info * -sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry) -{ - struct md_enqueue_info *minfo; - struct ldlm_enqueue_info *einfo; - struct md_op_data *op_data; - - minfo = kzalloc(sizeof(*minfo), GFP_NOFS); - if (!minfo) - return ERR_PTR(-ENOMEM); - - op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) { - kfree(minfo); - return (struct md_enqueue_info *)op_data; - } - - if (!child) - op_data->op_fid2 = entry->se_fid; - - minfo->mi_it.it_op = IT_GETATTR; - minfo->mi_dir = igrab(dir); - minfo->mi_cb = ll_statahead_interpret; - minfo->mi_cbdata = entry; - - einfo = &minfo->mi_einfo; - einfo->ei_type = LDLM_IBITS; - einfo->ei_mode = it_to_lock_mode(&minfo->mi_it); - einfo->ei_cb_bl = ll_md_blocking_ast; - einfo->ei_cb_cp = ldlm_completion_ast; - einfo->ei_cb_gl = NULL; - einfo->ei_cbdata = NULL; - - return minfo; -} - -/* async stat for file not found in dcache */ -static int sa_lookup(struct inode *dir, struct sa_entry *entry) -{ - struct md_enqueue_info *minfo; - int rc; - - minfo = sa_prep_data(dir, NULL, entry); - if (IS_ERR(minfo)) - return PTR_ERR(minfo); - - rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo); - if (rc) - sa_fini_data(minfo); - - return rc; -} - -/** - * async stat for file found in dcache, similar to .revalidate - * - * \retval 1 dentry valid, no RPC sent - * \retval 0 dentry invalid, will send async stat RPC - * \retval negative number upon error - */ -static int sa_revalidate(struct inode *dir, struct sa_entry *entry, - struct dentry *dentry) -{ - struct inode *inode = d_inode(dentry); - struct lookup_intent it = { .it_op = IT_GETATTR, - .it_lock_handle = 0 }; - struct md_enqueue_info *minfo; - int rc; - - if (unlikely(!inode)) - return 1; - - if (d_mountpoint(dentry)) - return 1; - - entry->se_inode = igrab(inode); - rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode), - NULL); - if (rc == 1) { - entry->se_handle = it.it_lock_handle; - ll_intent_release(&it); - return 1; - } - - minfo = sa_prep_data(dir, inode, entry); - if (IS_ERR(minfo)) { - entry->se_inode = NULL; - iput(inode); - return PTR_ERR(minfo); - } - - rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo); - if (rc) { - entry->se_inode = NULL; - iput(inode); - sa_fini_data(minfo); - } - - return rc; -} - -/* async stat for file with @name */ -static void sa_statahead(struct dentry *parent, const char *name, int len, - const struct lu_fid *fid) -{ - struct inode *dir = d_inode(parent); - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai = lli->lli_sai; - struct dentry *dentry = NULL; - struct sa_entry *entry; - int rc; - - entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid); - if (IS_ERR(entry)) - return; - - dentry = d_lookup(parent, &entry->se_qstr); - if (!dentry) { - rc = sa_lookup(dir, entry); - } else { - rc = sa_revalidate(dir, entry, dentry); - if (rc == 1 && agl_should_run(sai, d_inode(dentry))) - ll_agl_add(sai, d_inode(dentry), entry->se_index); - } - - if (dentry) - dput(dentry); - - if (rc) - sa_make_ready(sai, entry, rc); - else - sai->sai_sent++; - - sai->sai_index++; -} - -/* async glimpse (agl) thread main function */ -static int ll_agl_thread(void *arg) -{ - struct dentry *parent = arg; - struct inode *dir = d_inode(parent); - struct ll_inode_info *plli = ll_i2info(dir); - struct ll_inode_info *clli; - /* We already own this reference, so it is safe to take it without a lock. */ - struct ll_statahead_info *sai = plli->lli_sai; - - CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n", - sai, parent); - - while (!kthread_should_stop()) { - - spin_lock(&plli->lli_agl_lock); - /* The statahead thread maybe help to process AGL entries, - * so check whether list empty again. - */ - if (!list_empty(&sai->sai_agls)) { - clli = list_entry(sai->sai_agls.next, - struct ll_inode_info, lli_agl_list); - list_del_init(&clli->lli_agl_list); - spin_unlock(&plli->lli_agl_lock); - ll_agl_trigger(&clli->lli_vfs_inode, sai); - } else { - spin_unlock(&plli->lli_agl_lock); - } - - set_current_state(TASK_IDLE); - if (list_empty(&sai->sai_agls) && - !kthread_should_stop()) - schedule(); - __set_current_state(TASK_RUNNING); - } - - spin_lock(&plli->lli_agl_lock); - sai->sai_agl_valid = 0; - while (!list_empty(&sai->sai_agls)) { - clli = list_entry(sai->sai_agls.next, - struct ll_inode_info, lli_agl_list); - list_del_init(&clli->lli_agl_list); - spin_unlock(&plli->lli_agl_lock); - clli->lli_agl_index = 0; - iput(&clli->lli_vfs_inode); - spin_lock(&plli->lli_agl_lock); - } - spin_unlock(&plli->lli_agl_lock); - CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n", - sai, parent); - ll_sai_put(sai); - return 0; -} - -/* start agl thread */ -static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) -{ - struct ll_inode_info *plli; - struct task_struct *task; - - CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n", - sai, parent); - - plli = ll_i2info(d_inode(parent)); - task = kthread_create(ll_agl_thread, parent, "ll_agl_%u", - plli->lli_opendir_pid); - if (IS_ERR(task)) { - CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task)); - return; - } - - sai->sai_agl_task = task; - atomic_inc(&ll_i2sbi(d_inode(parent))->ll_agl_total); - spin_lock(&plli->lli_agl_lock); - sai->sai_agl_valid = 1; - spin_unlock(&plli->lli_agl_lock); - /* Get an extra reference that the thread holds */ - ll_sai_get(d_inode(parent)); - - wake_up_process(task); -} - -/* statahead thread main function */ -static int ll_statahead_thread(void *arg) -{ - struct dentry *parent = arg; - struct inode *dir = d_inode(parent); - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_sb_info *sbi = ll_i2sbi(dir); - struct ll_statahead_info *sai = lli->lli_sai; - struct page *page = NULL; - __u64 pos = 0; - int first = 0; - int rc = 0; - struct md_op_data *op_data; - - CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n", - sai, parent); - - op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, - LUSTRE_OPC_ANY, dir); - if (IS_ERR(op_data)) { - rc = PTR_ERR(op_data); - goto out; - } - - op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages; - - while (pos != MDS_DIR_END_OFF && sai->sai_task) { - struct lu_dirpage *dp; - struct lu_dirent *ent; - - sai->sai_in_readpage = 1; - page = ll_get_dir_page(dir, op_data, pos); - sai->sai_in_readpage = 0; - if (IS_ERR(page)) { - rc = PTR_ERR(page); - CDEBUG(D_READA, "error reading dir " DFID " at %llu/%llu: opendir_pid = %u: rc = %d\n", - PFID(ll_inode2fid(dir)), pos, sai->sai_index, - lli->lli_opendir_pid, rc); - break; - } - - dp = page_address(page); - for (ent = lu_dirent_start(dp); - ent && sai->sai_task && !sa_low_hit(sai); - ent = lu_dirent_next(ent)) { - struct lu_fid fid; - __u64 hash; - int namelen; - char *name; - - hash = le64_to_cpu(ent->lde_hash); - if (unlikely(hash < pos)) - /* - * Skip until we find target hash value. - */ - continue; - - namelen = le16_to_cpu(ent->lde_namelen); - if (unlikely(namelen == 0)) - /* - * Skip dummy record. - */ - continue; - - name = ent->lde_name; - if (name[0] == '.') { - if (namelen == 1) { - /* - * skip "." - */ - continue; - } else if (name[1] == '.' && namelen == 2) { - /* - * skip ".." - */ - continue; - } else if (!sai->sai_ls_all) { - /* - * skip hidden files. - */ - sai->sai_skip_hidden++; - continue; - } - } - - /* - * don't stat-ahead first entry. - */ - if (unlikely(++first == 1)) - continue; - - fid_le_to_cpu(&fid, &ent->lde_fid); - - do { - sa_handle_callback(sai); - - spin_lock(&lli->lli_agl_lock); - while (sa_sent_full(sai) && - !agl_list_empty(sai)) { - struct ll_inode_info *clli; - - clli = list_entry(sai->sai_agls.next, - struct ll_inode_info, - lli_agl_list); - list_del_init(&clli->lli_agl_list); - spin_unlock(&lli->lli_agl_lock); - - ll_agl_trigger(&clli->lli_vfs_inode, - sai); - - spin_lock(&lli->lli_agl_lock); - } - spin_unlock(&lli->lli_agl_lock); - - set_current_state(TASK_IDLE); - if (sa_sent_full(sai) && - !sa_has_callback(sai) && - agl_list_empty(sai) && - sai->sai_task) - /* wait for spare statahead window */ - schedule(); - __set_current_state(TASK_RUNNING); - } while (sa_sent_full(sai) && sai->sai_task); - - sa_statahead(parent, name, namelen, &fid); - } - - pos = le64_to_cpu(dp->ldp_hash_end); - ll_release_page(dir, page, - le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE); - - if (sa_low_hit(sai)) { - rc = -EFAULT; - atomic_inc(&sbi->ll_sa_wrong); - CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread: pid %d\n", - PFID(&lli->lli_fid), sai->sai_hit, - sai->sai_miss, sai->sai_sent, - sai->sai_replied, current->pid); - break; - } - } - ll_finish_md_op_data(op_data); - - if (rc < 0) { - spin_lock(&lli->lli_sa_lock); - sai->sai_task = NULL; - lli->lli_sa_enabled = 0; - spin_unlock(&lli->lli_sa_lock); - } - - /* - * statahead is finished, but statahead entries need to be cached, wait - * for file release to stop me. - */ - while (sai->sai_task) { - sa_handle_callback(sai); - - set_current_state(TASK_IDLE); - if (!sa_has_callback(sai) && - sai->sai_task) - schedule(); - __set_current_state(TASK_RUNNING); - } -out: - if (sai->sai_agl_task) { - kthread_stop(sai->sai_agl_task); - - CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n", - sai, (unsigned int)sai->sai_agl_task->pid); - sai->sai_agl_task = NULL; - } - /* - * wait for inflight statahead RPCs to finish, and then we can free sai - * safely because statahead RPC will access sai data - */ - while (sai->sai_sent != sai->sai_replied) { - /* in case we're not woken up, timeout wait */ - schedule_timeout_idle(HZ>>3); - } - - /* release resources held by statahead RPCs */ - sa_handle_callback(sai); - - CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n", - sai, parent); - - spin_lock(&lli->lli_sa_lock); - sai->sai_task = NULL; - spin_unlock(&lli->lli_sa_lock); - - wake_up(&sai->sai_waitq); - ll_sai_put(sai); - - do_exit(rc); -} - -/* authorize opened dir handle @key to statahead */ -void ll_authorize_statahead(struct inode *dir, void *key) -{ - struct ll_inode_info *lli = ll_i2info(dir); - - spin_lock(&lli->lli_sa_lock); - if (!lli->lli_opendir_key && !lli->lli_sai) { - /* - * if lli_sai is not NULL, it means previous statahead is not - * finished yet, we'd better not start a new statahead for now. - */ - LASSERT(!lli->lli_opendir_pid); - lli->lli_opendir_key = key; - lli->lli_opendir_pid = current->pid; - lli->lli_sa_enabled = 1; - } - spin_unlock(&lli->lli_sa_lock); -} - -/* - * deauthorize opened dir handle @key to statahead, but statahead thread may - * still be running, notify it to quit. - */ -void ll_deauthorize_statahead(struct inode *dir, void *key) -{ - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai; - - LASSERT(lli->lli_opendir_key == key); - LASSERT(lli->lli_opendir_pid); - - CDEBUG(D_READA, "deauthorize statahead for " DFID "\n", - PFID(&lli->lli_fid)); - - spin_lock(&lli->lli_sa_lock); - lli->lli_opendir_key = NULL; - lli->lli_opendir_pid = 0; - lli->lli_sa_enabled = 0; - sai = lli->lli_sai; - if (sai && sai->sai_task) { - /* - * statahead thread may not quit yet because it needs to cache - * entries, now it's time to tell it to quit. - */ - wake_up_process(sai->sai_task); - sai->sai_task = NULL; - } - spin_unlock(&lli->lli_sa_lock); -} - -enum { - /** - * not first dirent, or is "." - */ - LS_NOT_FIRST_DE = 0, - /** - * the first non-hidden dirent - */ - LS_FIRST_DE, - /** - * the first hidden dirent, that is "." - */ - LS_FIRST_DOT_DE -}; - -/* file is first dirent under @dir */ -static int is_first_dirent(struct inode *dir, struct dentry *dentry) -{ - const struct qstr *target = &dentry->d_name; - struct md_op_data *op_data; - struct page *page; - __u64 pos = 0; - int dot_de; - int rc = LS_NOT_FIRST_DE; - - op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, - LUSTRE_OPC_ANY, dir); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - /** - * FIXME choose the start offset of the readdir - */ - op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages; - - page = ll_get_dir_page(dir, op_data, pos); - - while (1) { - struct lu_dirpage *dp; - struct lu_dirent *ent; - - if (IS_ERR(page)) { - struct ll_inode_info *lli = ll_i2info(dir); - - rc = PTR_ERR(page); - CERROR("%s: error reading dir " DFID " at %llu: opendir_pid = %u : rc = %d\n", - ll_get_fsname(dir->i_sb, NULL, 0), - PFID(ll_inode2fid(dir)), pos, - lli->lli_opendir_pid, rc); - break; - } - - dp = page_address(page); - for (ent = lu_dirent_start(dp); ent; - ent = lu_dirent_next(ent)) { - __u64 hash; - int namelen; - char *name; - - hash = le64_to_cpu(ent->lde_hash); - /* The ll_get_dir_page() can return any page containing - * the given hash which may be not the start hash. - */ - if (unlikely(hash < pos)) - continue; - - namelen = le16_to_cpu(ent->lde_namelen); - if (unlikely(namelen == 0)) - /* - * skip dummy record. - */ - continue; - - name = ent->lde_name; - if (name[0] == '.') { - if (namelen == 1) - /* - * skip "." - */ - continue; - else if (name[1] == '.' && namelen == 2) - /* - * skip ".." - */ - continue; - else - dot_de = 1; - } else { - dot_de = 0; - } - - if (dot_de && target->name[0] != '.') { - CDEBUG(D_READA, "%.*s skip hidden file %.*s\n", - target->len, target->name, - namelen, name); - continue; - } - - if (target->len != namelen || - memcmp(target->name, name, namelen) != 0) - rc = LS_NOT_FIRST_DE; - else if (!dot_de) - rc = LS_FIRST_DE; - else - rc = LS_FIRST_DOT_DE; - - ll_release_page(dir, page, false); - goto out; - } - pos = le64_to_cpu(dp->ldp_hash_end); - if (pos == MDS_DIR_END_OFF) { - /* - * End of directory reached. - */ - ll_release_page(dir, page, false); - goto out; - } else { - /* - * chain is exhausted - * Normal case: continue to the next page. - */ - ll_release_page(dir, page, - le32_to_cpu(dp->ldp_flags) & - LDF_COLLIDE); - page = ll_get_dir_page(dir, op_data, pos); - } - } -out: - ll_finish_md_op_data(op_data); - return rc; -} - -/** - * revalidate @dentryp from statahead cache - * - * \param[in] dir parent directory - * \param[in] sai sai structure - * \param[out] dentryp pointer to dentry which will be revalidated - * \param[in] unplug unplug statahead window only (normally for negative - * dentry) - * \retval 1 on success, dentry is saved in @dentryp - * \retval 0 if revalidation failed (no proper lock on client) - * \retval negative number upon error - */ -static int revalidate_statahead_dentry(struct inode *dir, - struct ll_statahead_info *sai, - struct dentry **dentryp, - bool unplug) -{ - struct ll_inode_info *lli = ll_i2info(dir); - struct sa_entry *entry = NULL; - struct ll_dentry_data *ldd; - int rc = 0; - - if ((*dentryp)->d_name.name[0] == '.') { - if (sai->sai_ls_all || - sai->sai_miss_hidden >= sai->sai_skip_hidden) { - /* - * Hidden dentry is the first one, or statahead - * thread does not skip so many hidden dentries - * before "sai_ls_all" enabled as below. - */ - } else { - if (!sai->sai_ls_all) - /* - * It maybe because hidden dentry is not - * the first one, "sai_ls_all" was not - * set, then "ls -al" missed. Enable - * "sai_ls_all" for such case. - */ - sai->sai_ls_all = 1; - - /* - * Such "getattr" has been skipped before - * "sai_ls_all" enabled as above. - */ - sai->sai_miss_hidden++; - return -EAGAIN; - } - } - - if (unplug) { - rc = 1; - goto out_unplug; - } - - entry = sa_get(sai, &(*dentryp)->d_name); - if (!entry) { - rc = -EAGAIN; - goto out_unplug; - } - - /* if statahead is busy in readdir, help it do post-work */ - if (!sa_ready(entry) && sai->sai_in_readpage) - sa_handle_callback(sai); - - if (!sa_ready(entry)) { - spin_lock(&lli->lli_sa_lock); - sai->sai_index_wait = entry->se_index; - spin_unlock(&lli->lli_sa_lock); - if (0 == wait_event_idle_timeout(sai->sai_waitq, - sa_ready(entry), 30 * HZ)) { - /* - * entry may not be ready, so it may be used by inflight - * statahead RPC, don't free it. - */ - entry = NULL; - rc = -EAGAIN; - goto out_unplug; - } - } - - if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode) { - struct inode *inode = entry->se_inode; - struct lookup_intent it = { .it_op = IT_GETATTR, - .it_lock_handle = entry->se_handle }; - __u64 bits; - - rc = md_revalidate_lock(ll_i2mdexp(dir), &it, - ll_inode2fid(inode), &bits); - if (rc == 1) { - if (!(*dentryp)->d_inode) { - struct dentry *alias; - - alias = ll_splice_alias(inode, *dentryp); - if (IS_ERR(alias)) { - ll_intent_release(&it); - rc = PTR_ERR(alias); - goto out_unplug; - } - *dentryp = alias; - /** - * statahead prepared this inode, transfer inode - * refcount from sa_entry to dentry - */ - entry->se_inode = NULL; - } else if ((*dentryp)->d_inode != inode) { - /* revalidate, but inode is recreated */ - CDEBUG(D_READA, - "%s: stale dentry %pd inode " DFID ", statahead inode " DFID "\n", - ll_get_fsname((*dentryp)->d_inode->i_sb, - NULL, 0), - *dentryp, - PFID(ll_inode2fid((*dentryp)->d_inode)), - PFID(ll_inode2fid(inode))); - ll_intent_release(&it); - rc = -ESTALE; - goto out_unplug; - } - - if ((bits & MDS_INODELOCK_LOOKUP) && - d_lustre_invalid(*dentryp)) - d_lustre_revalidate(*dentryp); - ll_intent_release(&it); - } - } -out_unplug: - /* - * statahead cached sa_entry can be used only once, and will be killed - * right after use, so if lookup/revalidate accessed statahead cache, - * set dentry ldd_sa_generation to parent lli_sa_generation, later if we - * stat this file again, we know we've done statahead before, see - * dentry_may_statahead(). - */ - ldd = ll_d2d(*dentryp); - ldd->lld_sa_generation = lli->lli_sa_generation; - sa_put(sai, entry, lli); - return rc; -} - -/** - * start statahead thread - * - * \param[in] dir parent directory - * \param[in] dentry dentry that triggers statahead, normally the first - * dirent under @dir - * \retval -EAGAIN on success, because when this function is - * called, it's already in lookup call, so client should - * do it itself instead of waiting for statahead thread - * to do it asynchronously. - * \retval negative number upon error - */ -static int start_statahead_thread(struct inode *dir, struct dentry *dentry) -{ - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai = NULL; - struct task_struct *task; - struct dentry *parent = dentry->d_parent; - int rc; - - /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */ - rc = is_first_dirent(dir, dentry); - if (rc == LS_NOT_FIRST_DE) { - /* It is not "ls -{a}l" operation, no need statahead for it. */ - rc = -EFAULT; - goto out; - } - - sai = ll_sai_alloc(parent); - if (!sai) { - rc = -ENOMEM; - goto out; - } - - sai->sai_ls_all = (rc == LS_FIRST_DOT_DE); - /* - * if current lli_opendir_key was deauthorized, or dir re-opened by - * another process, don't start statahead, otherwise the newly spawned - * statahead thread won't be notified to quit. - */ - spin_lock(&lli->lli_sa_lock); - if (unlikely(lli->lli_sai || lli->lli_opendir_key || - lli->lli_opendir_pid != current->pid)) { - spin_unlock(&lli->lli_sa_lock); - rc = -EPERM; - goto out; - } - lli->lli_sai = sai; - spin_unlock(&lli->lli_sa_lock); - - atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running); - - CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n", - current->pid, parent); - - task = kthread_create(ll_statahead_thread, parent, "ll_sa_%u", - lli->lli_opendir_pid); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - CERROR("can't start ll_sa thread, rc : %d\n", rc); - goto out; - } - - if (ll_i2sbi(parent->d_inode)->ll_flags & LL_SBI_AGL_ENABLED) - ll_start_agl(parent, sai); - - atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_total); - sai->sai_task = task; - - wake_up_process(task); - - /* - * We don't stat-ahead for the first dirent since we are already in - * lookup. - */ - return -EAGAIN; - -out: - /* - * once we start statahead thread failed, disable statahead so - * that subsequent stat won't waste time to try it. - */ - spin_lock(&lli->lli_sa_lock); - lli->lli_sa_enabled = 0; - lli->lli_sai = NULL; - spin_unlock(&lli->lli_sa_lock); - if (sai) - ll_sai_free(sai); - return rc; -} - -/** - * statahead entry function, this is called when client getattr on a file, it - * will start statahead thread if this is the first dir entry, else revalidate - * dentry from statahead cache. - * - * \param[in] dir parent directory - * \param[out] dentryp dentry to getattr - * \param[in] unplug unplug statahead window only (normally for negative - * dentry) - * \retval 1 on success - * \retval 0 revalidation from statahead cache failed, caller needs - * to getattr from server directly - * \retval negative number on error, caller often ignores this and - * then getattr from server - */ -int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug) -{ - struct ll_statahead_info *sai; - - sai = ll_sai_get(dir); - if (sai) { - int rc; - - rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug); - CDEBUG(D_READA, "revalidate statahead %pd: %d.\n", - *dentryp, rc); - ll_sai_put(sai); - return rc; - } - return start_statahead_thread(dir, *dentryp); -} diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c deleted file mode 100644 index d335f29556c2..000000000000 --- a/drivers/staging/lustre/lustre/llite/super25.c +++ /dev/null @@ -1,189 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include -#include -#include -#include -#include "llite_internal.h" - -static struct kmem_cache *ll_inode_cachep; - -static struct inode *ll_alloc_inode(struct super_block *sb) -{ - struct ll_inode_info *lli; - - ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1); - lli = kmem_cache_zalloc(ll_inode_cachep, GFP_NOFS); - if (!lli) - return NULL; - - inode_init_once(&lli->lli_vfs_inode); - return &lli->lli_vfs_inode; -} - -static void ll_inode_destroy_callback(struct rcu_head *head) -{ - struct inode *inode = container_of(head, struct inode, i_rcu); - struct ll_inode_info *ptr = ll_i2info(inode); - - kmem_cache_free(ll_inode_cachep, ptr); -} - -static void ll_destroy_inode(struct inode *inode) -{ - call_rcu(&inode->i_rcu, ll_inode_destroy_callback); -} - -/* exported operations */ -struct super_operations lustre_super_operations = { - .alloc_inode = ll_alloc_inode, - .destroy_inode = ll_destroy_inode, - .evict_inode = ll_delete_inode, - .put_super = ll_put_super, - .statfs = ll_statfs, - .umount_begin = ll_umount_begin, - .remount_fs = ll_remount_fs, - .show_options = ll_show_options, -}; -MODULE_ALIAS_FS("lustre"); - -static int __init lustre_init(void) -{ - int rc; - - BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) != - LUSTRE_VOLATILE_HDR_LEN + 1); - - rc = libcfs_setup(); - if (rc) - return rc; - - /* print an address of _any_ initialized kernel symbol from this - * module, to allow debugging with gdb that doesn't support data - * symbols from modules. - */ - CDEBUG(D_INFO, "Lustre client module (%p).\n", - &lustre_super_operations); - - rc = -ENOMEM; - ll_inode_cachep = kmem_cache_create("lustre_inode_cache", - sizeof(struct ll_inode_info), 0, - SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, - NULL); - if (!ll_inode_cachep) - goto out_cache; - - ll_file_data_slab = kmem_cache_create("ll_file_data", - sizeof(struct ll_file_data), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!ll_file_data_slab) - goto out_cache; - - llite_root = debugfs_create_dir("llite", debugfs_lustre_root); - if (IS_ERR_OR_NULL(llite_root)) { - rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM; - llite_root = NULL; - goto out_cache; - } - - llite_kset = kset_create_and_add("llite", NULL, lustre_kobj); - if (!llite_kset) { - rc = -ENOMEM; - goto out_debugfs; - } - - rc = vvp_global_init(); - if (rc != 0) - goto out_sysfs; - - cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck, - LCT_REMEMBER | LCT_NOREF); - if (IS_ERR(cl_inode_fini_env)) { - rc = PTR_ERR(cl_inode_fini_env); - goto out_vvp; - } - - cl_inode_fini_env->le_ctx.lc_cookie = 0x4; - - rc = ll_xattr_init(); - if (rc != 0) - goto out_inode_fini_env; - - lustre_register_super_ops(THIS_MODULE, ll_fill_super, ll_kill_super); - lustre_register_client_process_config(ll_process_config); - - return 0; - -out_inode_fini_env: - cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck); -out_vvp: - vvp_global_fini(); -out_sysfs: - kset_unregister(llite_kset); -out_debugfs: - debugfs_remove(llite_root); -out_cache: - kmem_cache_destroy(ll_inode_cachep); - kmem_cache_destroy(ll_file_data_slab); - return rc; -} - -static void __exit lustre_exit(void) -{ - lustre_register_super_ops(NULL, NULL, NULL); - lustre_register_client_process_config(NULL); - - debugfs_remove(llite_root); - kset_unregister(llite_kset); - - ll_xattr_fini(); - cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck); - vvp_global_fini(); - - kmem_cache_destroy(ll_inode_cachep); - kmem_cache_destroy(ll_file_data_slab); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Client File System"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(lustre_init); -module_exit(lustre_exit); diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c deleted file mode 100644 index 0690fdbf49f5..000000000000 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ /dev/null @@ -1,159 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include -#define DEBUG_SUBSYSTEM S_LLITE - -#include "llite_internal.h" - -static int ll_readlink_internal(struct inode *inode, - struct ptlrpc_request **request, char **symname) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - int rc, symlen = i_size_read(inode) + 1; - struct mdt_body *body; - struct md_op_data *op_data; - - *request = NULL; - - if (lli->lli_symlink_name) { - int print_limit = min_t(int, PAGE_SIZE - 128, symlen); - - *symname = lli->lli_symlink_name; - /* If the total CDEBUG() size is larger than a page, it - * will print a warning to the console, avoid this by - * printing just the last part of the symlink. - */ - CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n", - print_limit < symlen ? "..." : "", print_limit, - (*symname) + symlen - print_limit, symlen); - return 0; - } - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, symlen, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - return PTR_ERR(op_data); - - op_data->op_valid = OBD_MD_LINKNAME; - rc = md_getattr(sbi->ll_md_exp, op_data, request); - ll_finish_md_op_data(op_data); - if (rc) { - if (rc != -ENOENT) - CERROR("%s: inode " DFID ": rc = %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), rc); - goto failed; - } - - body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - if ((body->mbo_valid & OBD_MD_LINKNAME) == 0) { - CERROR("OBD_MD_LINKNAME not set on reply\n"); - rc = -EPROTO; - goto failed; - } - - LASSERT(symlen != 0); - if (body->mbo_eadatasize != symlen) { - CERROR("%s: inode " DFID ": symlink length %d not expected %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), body->mbo_eadatasize - 1, - symlen - 1); - rc = -EPROTO; - goto failed; - } - - *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD); - if (!*symname || - strnlen(*symname, symlen) != symlen - 1) { - /* not full/NULL terminated */ - CERROR("inode %lu: symlink not NULL terminated string of length %d\n", - inode->i_ino, symlen - 1); - rc = -EPROTO; - goto failed; - } - - lli->lli_symlink_name = kzalloc(symlen, GFP_NOFS); - /* do not return an error if we cannot cache the symlink locally */ - if (lli->lli_symlink_name) { - memcpy(lli->lli_symlink_name, *symname, symlen); - *symname = lli->lli_symlink_name; - } - return 0; - -failed: - return rc; -} - -static void ll_put_link(void *p) -{ - ptlrpc_req_finished(p); -} - -static const char *ll_get_link(struct dentry *dentry, - struct inode *inode, - struct delayed_call *done) -{ - struct ptlrpc_request *request = NULL; - int rc; - char *symname = NULL; - - if (!dentry) - return ERR_PTR(-ECHILD); - - CDEBUG(D_VFSTRACE, "VFS Op\n"); - ll_inode_size_lock(inode); - rc = ll_readlink_internal(inode, &request, &symname); - ll_inode_size_unlock(inode); - if (rc) { - ptlrpc_req_finished(request); - return ERR_PTR(rc); - } - - /* symname may contain a pointer to the request message buffer, - * we delay request releasing then. - */ - set_delayed_call(done, ll_put_link, request); - return symname; -} - -const struct inode_operations ll_fast_symlink_inode_operations = { - .setattr = ll_setattr, - .get_link = ll_get_link, - .getattr = ll_getattr, - .permission = ll_inode_permission, - .listxattr = ll_listxattr, -}; diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c deleted file mode 100644 index 31dc3c0ade01..000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ /dev/null @@ -1,640 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * cl_device and cl_device_type implementation for VVP layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include "llite_internal.h" -#include "vvp_internal.h" - -/***************************************************************************** - * - * Vvp device and device type functions. - * - */ - -/* - * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical - * "llite_" (var. "ll_") prefix. - */ - -static struct kmem_cache *ll_thread_kmem; -struct kmem_cache *vvp_lock_kmem; -struct kmem_cache *vvp_object_kmem; -static struct kmem_cache *vvp_session_kmem; -static struct kmem_cache *vvp_thread_kmem; - -static struct lu_kmem_descr vvp_caches[] = { - { - .ckd_cache = &ll_thread_kmem, - .ckd_name = "ll_thread_kmem", - .ckd_size = sizeof(struct ll_thread_info), - }, - { - .ckd_cache = &vvp_lock_kmem, - .ckd_name = "vvp_lock_kmem", - .ckd_size = sizeof(struct vvp_lock), - }, - { - .ckd_cache = &vvp_object_kmem, - .ckd_name = "vvp_object_kmem", - .ckd_size = sizeof(struct vvp_object), - }, - { - .ckd_cache = &vvp_session_kmem, - .ckd_name = "vvp_session_kmem", - .ckd_size = sizeof(struct vvp_session) - }, - { - .ckd_cache = &vvp_thread_kmem, - .ckd_name = "vvp_thread_kmem", - .ckd_size = sizeof(struct vvp_thread_info), - }, - { - .ckd_cache = NULL - } -}; - -static void *ll_thread_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct vvp_thread_info *info; - - info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS); - if (!info) - info = ERR_PTR(-ENOMEM); - return info; -} - -static void ll_thread_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct vvp_thread_info *info = data; - - kmem_cache_free(ll_thread_kmem, info); -} - -struct lu_context_key ll_thread_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = ll_thread_key_init, - .lct_fini = ll_thread_key_fini -}; - -static void *vvp_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct vvp_session *session; - - session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS); - if (!session) - session = ERR_PTR(-ENOMEM); - return session; -} - -static void vvp_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct vvp_session *session = data; - - kmem_cache_free(vvp_session_kmem, session); -} - -struct lu_context_key vvp_session_key = { - .lct_tags = LCT_SESSION, - .lct_init = vvp_session_key_init, - .lct_fini = vvp_session_key_fini -}; - -static void *vvp_thread_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct vvp_thread_info *vti; - - vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS); - if (!vti) - vti = ERR_PTR(-ENOMEM); - return vti; -} - -static void vvp_thread_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct vvp_thread_info *vti = data; - - kmem_cache_free(vvp_thread_kmem, vti); -} - -struct lu_context_key vvp_thread_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = vvp_thread_key_init, - .lct_fini = vvp_thread_key_fini -}; - -/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */ -LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key); - -static const struct lu_device_operations vvp_lu_ops = { - .ldo_object_alloc = vvp_object_alloc -}; - -static struct lu_device *vvp_device_free(const struct lu_env *env, - struct lu_device *d) -{ - struct vvp_device *vdv = lu2vvp_dev(d); - struct cl_site *site = lu2cl_site(d->ld_site); - struct lu_device *next = cl2lu_dev(vdv->vdv_next); - - if (d->ld_site) { - cl_site_fini(site); - kfree(site); - } - cl_device_fini(lu2cl_dev(d)); - kfree(vdv); - return next; -} - -static struct lu_device *vvp_device_alloc(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *cfg) -{ - struct vvp_device *vdv; - struct lu_device *lud; - struct cl_site *site; - int rc; - - vdv = kzalloc(sizeof(*vdv), GFP_NOFS); - if (!vdv) - return ERR_PTR(-ENOMEM); - - lud = &vdv->vdv_cl.cd_lu_dev; - cl_device_init(&vdv->vdv_cl, t); - vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops; - - site = kzalloc(sizeof(*site), GFP_NOFS); - if (site) { - rc = cl_site_init(site, &vdv->vdv_cl); - if (rc == 0) { - rc = lu_site_init_finish(&site->cs_lu); - } else { - LASSERT(!lud->ld_site); - CERROR("Cannot init lu_site, rc %d.\n", rc); - kfree(site); - } - } else { - rc = -ENOMEM; - } - if (rc != 0) { - vvp_device_free(env, lud); - lud = ERR_PTR(rc); - } - return lud; -} - -static int vvp_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) -{ - struct vvp_device *vdv; - int rc; - - vdv = lu2vvp_dev(d); - vdv->vdv_next = lu2cl_dev(next); - - LASSERT(d->ld_site && next->ld_type); - next->ld_site = d->ld_site; - rc = next->ld_type->ldt_ops->ldto_device_init(env, next, - next->ld_type->ldt_name, - NULL); - if (rc == 0) { - lu_device_get(next); - lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init); - } - return rc; -} - -static struct lu_device *vvp_device_fini(const struct lu_env *env, - struct lu_device *d) -{ - return cl2lu_dev(lu2vvp_dev(d)->vdv_next); -} - -static const struct lu_device_type_operations vvp_device_type_ops = { - .ldto_init = vvp_type_init, - .ldto_fini = vvp_type_fini, - - .ldto_start = vvp_type_start, - .ldto_stop = vvp_type_stop, - - .ldto_device_alloc = vvp_device_alloc, - .ldto_device_free = vvp_device_free, - .ldto_device_init = vvp_device_init, - .ldto_device_fini = vvp_device_fini, -}; - -struct lu_device_type vvp_device_type = { - .ldt_tags = LU_DEVICE_CL, - .ldt_name = LUSTRE_VVP_NAME, - .ldt_ops = &vvp_device_type_ops, - .ldt_ctx_tags = LCT_CL_THREAD -}; - -/** - * A mutex serializing calls to vvp_inode_fini() under extreme memory - * pressure, when environments cannot be allocated. - */ -int vvp_global_init(void) -{ - int rc; - - rc = lu_kmem_init(vvp_caches); - if (rc != 0) - return rc; - - rc = lu_device_type_init(&vvp_device_type); - if (rc != 0) - goto out_kmem; - - return 0; - -out_kmem: - lu_kmem_fini(vvp_caches); - - return rc; -} - -void vvp_global_fini(void) -{ - lu_device_type_fini(&vvp_device_type); - lu_kmem_fini(vvp_caches); -} - -/***************************************************************************** - * - * mirror obd-devices into cl devices. - * - */ - -int cl_sb_init(struct super_block *sb) -{ - struct ll_sb_info *sbi; - struct cl_device *cl; - struct lu_env *env; - int rc = 0; - u16 refcheck; - - sbi = ll_s2sbi(sb); - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - cl = cl_type_setup(env, NULL, &vvp_device_type, - sbi->ll_dt_exp->exp_obd->obd_lu_dev); - if (!IS_ERR(cl)) { - sbi->ll_cl = cl; - sbi->ll_site = cl2lu_dev(cl)->ld_site; - } - cl_env_put(env, &refcheck); - } else { - rc = PTR_ERR(env); - } - return rc; -} - -int cl_sb_fini(struct super_block *sb) -{ - struct ll_sb_info *sbi; - struct lu_env *env; - struct cl_device *cld; - u16 refcheck; - int result; - - sbi = ll_s2sbi(sb); - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - cld = sbi->ll_cl; - - if (cld) { - cl_stack_fini(env, cld); - sbi->ll_cl = NULL; - sbi->ll_site = NULL; - } - cl_env_put(env, &refcheck); - result = 0; - } else { - CERROR("Cannot cleanup cl-stack due to memory shortage.\n"); - result = PTR_ERR(env); - } - return result; -} - -/**************************************************************************** - * - * debugfs/lustre/llite/$MNT/dump_page_cache - * - ****************************************************************************/ - -/* - * To represent contents of a page cache as a byte stream, following - * information if encoded in 64bit offset: - * - * - file hash bucket in lu_site::ls_hash[] 28bits - * - * - how far file is from bucket head 4bits - * - * - page index 32bits - * - * First two data identify a file in the cache uniquely. - */ - -#define PGC_OBJ_SHIFT (32 + 4) -#define PGC_DEPTH_SHIFT (32) - -struct vvp_pgcache_id { - unsigned int vpi_bucket; - unsigned int vpi_depth; - u32 vpi_index; - - unsigned int vpi_curdep; - struct lu_object_header *vpi_obj; -}; - -struct seq_private { - struct ll_sb_info *sbi; - struct lu_env *env; - u16 refcheck; - struct cl_object *clob; -}; - -static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id) -{ - BUILD_BUG_ON(sizeof(pos) != sizeof(__u64)); - - id->vpi_index = pos & 0xffffffff; - id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf; - id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT; -} - -static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id) -{ - return - ((__u64)id->vpi_index) | - ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) | - ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT); -} - -static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - struct vvp_pgcache_id *id = data; - struct lu_object_header *hdr = cfs_hash_object(hs, hnode); - - if (id->vpi_curdep-- > 0) - return 0; /* continue */ - - if (lu_object_is_dying(hdr)) - return 1; - - cfs_hash_get(hs, hnode); - id->vpi_obj = hdr; - return 1; -} - -static struct cl_object *vvp_pgcache_obj(const struct lu_env *env, - struct lu_device *dev, - struct vvp_pgcache_id *id) -{ - LASSERT(lu_device_is_cl(dev)); - - id->vpi_depth &= 0xf; - id->vpi_obj = NULL; - id->vpi_curdep = id->vpi_depth; - - cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket, - vvp_pgcache_obj_get, id); - if (id->vpi_obj) { - struct lu_object *lu_obj; - - lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type); - if (lu_obj) { - lu_object_ref_add(lu_obj, "dump", current); - return lu2cl(lu_obj); - } - lu_object_put(env, lu_object_top(id->vpi_obj)); - - } else if (id->vpi_curdep > 0) { - id->vpi_depth = 0xf; - } - return NULL; -} - -static struct page *vvp_pgcache_find(const struct lu_env *env, - struct lu_device *dev, - struct cl_object **clobp, loff_t *pos) -{ - struct cl_object *clob; - struct lu_site *site; - struct vvp_pgcache_id id; - - site = dev->ld_site; - vvp_pgcache_id_unpack(*pos, &id); - - while (1) { - if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash)) - return NULL; - clob = vvp_pgcache_obj(env, dev, &id); - if (clob) { - struct inode *inode = vvp_object_inode(clob); - struct page *vmpage; - int nr; - - nr = find_get_pages_contig(inode->i_mapping, - id.vpi_index, 1, &vmpage); - if (nr > 0) { - id.vpi_index = vmpage->index; - /* Cant support over 16T file */ - if (vmpage->index <= 0xffffffff) { - *clobp = clob; - *pos = vvp_pgcache_id_pack(&id); - return vmpage; - } - put_page(vmpage); - } - - lu_object_ref_del(&clob->co_lu, "dump", current); - cl_object_put(env, clob); - } - /* to the next object. */ - ++id.vpi_depth; - id.vpi_depth &= 0xf; - if (id.vpi_depth == 0 && ++id.vpi_bucket == 0) - return NULL; - id.vpi_index = 0; - } -} - -#define seq_page_flag(seq, page, flag, has_flags) do { \ - if (test_bit(PG_##flag, &(page)->flags)) { \ - seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \ - has_flags = 1; \ - } \ -} while (0) - -static void vvp_pgcache_page_show(const struct lu_env *env, - struct seq_file *seq, struct cl_page *page) -{ - struct vvp_page *vpg; - struct page *vmpage; - int has_flags; - - vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); - vmpage = vpg->vpg_page; - seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [", - 0 /* gen */, - vpg, page, - "none", - vpg->vpg_defer_uptodate ? "du" : "- ", - PageWriteback(vmpage) ? "wb" : "-", - vmpage, PFID(ll_inode2fid(vmpage->mapping->host)), - vmpage->mapping->host, vmpage->index, - page_count(vmpage)); - has_flags = 0; - seq_page_flag(seq, vmpage, locked, has_flags); - seq_page_flag(seq, vmpage, error, has_flags); - seq_page_flag(seq, vmpage, referenced, has_flags); - seq_page_flag(seq, vmpage, uptodate, has_flags); - seq_page_flag(seq, vmpage, dirty, has_flags); - seq_page_flag(seq, vmpage, writeback, has_flags); - seq_printf(seq, "%s]\n", has_flags ? "" : "-"); -} - -static int vvp_pgcache_show(struct seq_file *f, void *v) -{ - struct seq_private *priv = f->private; - struct page *vmpage = v; - struct cl_page *page; - - seq_printf(f, "%8lx@" DFID ": ", vmpage->index, - PFID(lu_object_fid(&priv->clob->co_lu))); - lock_page(vmpage); - page = cl_vmpage_page(vmpage, priv->clob); - unlock_page(vmpage); - put_page(vmpage); - - if (page) { - vvp_pgcache_page_show(priv->env, f, page); - cl_page_put(priv->env, page); - } else { - seq_puts(f, "missing\n"); - } - lu_object_ref_del(&priv->clob->co_lu, "dump", current); - cl_object_put(priv->env, priv->clob); - - return 0; -} - -static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos) -{ - struct seq_private *priv = f->private; - struct page *ret; - - if (priv->sbi->ll_site->ls_obj_hash->hs_cur_bits > - 64 - PGC_OBJ_SHIFT) - ret = ERR_PTR(-EFBIG); - else - ret = vvp_pgcache_find(priv->env, &priv->sbi->ll_cl->cd_lu_dev, - &priv->clob, pos); - - return ret; -} - -static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos) -{ - struct seq_private *priv = f->private; - struct page *ret; - - *pos += 1; - ret = vvp_pgcache_find(priv->env, &priv->sbi->ll_cl->cd_lu_dev, - &priv->clob, pos); - return ret; -} - -static void vvp_pgcache_stop(struct seq_file *f, void *v) -{ - /* Nothing to do */ -} - -static const struct seq_operations vvp_pgcache_ops = { - .start = vvp_pgcache_start, - .next = vvp_pgcache_next, - .stop = vvp_pgcache_stop, - .show = vvp_pgcache_show -}; - -static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp) -{ - struct seq_private *priv; - - priv = __seq_open_private(filp, &vvp_pgcache_ops, sizeof(*priv)); - if (!priv) - return -ENOMEM; - - priv->sbi = inode->i_private; - priv->env = cl_env_get(&priv->refcheck); - if (IS_ERR(priv->env)) { - int err = PTR_ERR(priv->env); - - seq_release_private(inode, filp); - return err; - } - return 0; -} - -static int vvp_dump_pgcache_seq_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq = file->private_data; - struct seq_private *priv = seq->private; - - cl_env_put(priv->env, &priv->refcheck); - return seq_release_private(inode, file); -} - -const struct file_operations vvp_dump_pgcache_file_ops = { - .owner = THIS_MODULE, - .open = vvp_dump_pgcache_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = vvp_dump_pgcache_seq_release, -}; diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h deleted file mode 100644 index 7d3abb43584a..000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_internal.h +++ /dev/null @@ -1,321 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2013, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Internal definitions for VVP layer. - * - * Author: Nikita Danilov - */ - -#ifndef VVP_INTERNAL_H -#define VVP_INTERNAL_H - -#include -#include - -enum obd_notify_event; -struct inode; -struct lustre_md; -struct obd_device; -struct obd_export; -struct page; - -/** - * IO state private to IO state private to VVP layer. - */ -struct vvp_io { - /** super class */ - struct cl_io_slice vui_cl; - struct cl_io_lock_link vui_link; - /** - * I/O vector information to or from which read/write is going. - */ - struct iov_iter *vui_iter; - /** - * Total size for the left IO. - */ - size_t vui_tot_count; - - union { - struct vvp_fault_io { - /** - * Inode modification time that is checked across DLM - * lock request. - */ - time64_t ft_mtime; - struct vm_area_struct *ft_vma; - /** - * locked page returned from vvp_io - */ - struct page *ft_vmpage; - /** - * kernel fault info - */ - struct vm_fault *ft_vmf; - /** - * fault API used bitflags for return code. - */ - unsigned int ft_flags; - /** - * check that flags are from filemap_fault - */ - bool ft_flags_valid; - } fault; - struct { - struct cl_page_list vui_queue; - unsigned long vui_written; - int vui_from; - int vui_to; - } write; - } u; - - /** - * Layout version when this IO is initialized - */ - __u32 vui_layout_gen; - /** - * File descriptor against which IO is done. - */ - struct ll_file_data *vui_fd; - struct kiocb *vui_iocb; - - /* Readahead state. */ - pgoff_t vui_ra_start; - pgoff_t vui_ra_count; - /* Set when vui_ra_{start,count} have been initialized. */ - bool vui_ra_valid; -}; - -extern struct lu_device_type vvp_device_type; - -extern struct lu_context_key vvp_session_key; -extern struct lu_context_key vvp_thread_key; - -extern struct kmem_cache *vvp_lock_kmem; -extern struct kmem_cache *vvp_object_kmem; - -struct vvp_thread_info { - struct cl_lock vti_lock; - struct cl_lock_descr vti_descr; - struct cl_io vti_io; - struct cl_attr vti_attr; -}; - -static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) -{ - struct vvp_thread_info *vti; - - vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key); - LASSERT(vti); - - return vti; -} - -static inline struct cl_lock *vvp_env_lock(const struct lu_env *env) -{ - struct cl_lock *lock = &vvp_env_info(env)->vti_lock; - - memset(lock, 0, sizeof(*lock)); - return lock; -} - -static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env) -{ - struct cl_attr *attr = &vvp_env_info(env)->vti_attr; - - memset(attr, 0, sizeof(*attr)); - - return attr; -} - -static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env) -{ - struct cl_io *io = &vvp_env_info(env)->vti_io; - - memset(io, 0, sizeof(*io)); - - return io; -} - -struct vvp_session { - struct vvp_io cs_ios; -}; - -static inline struct vvp_session *vvp_env_session(const struct lu_env *env) -{ - struct vvp_session *ses; - - ses = lu_context_key_get(env->le_ses, &vvp_session_key); - LASSERT(ses); - - return ses; -} - -static inline struct vvp_io *vvp_env_io(const struct lu_env *env) -{ - return &vvp_env_session(env)->cs_ios; -} - -/** - * ccc-private object state. - */ -struct vvp_object { - struct cl_object_header vob_header; - struct cl_object vob_cl; - struct inode *vob_inode; - - /** - * Number of transient pages. This is no longer protected by i_sem, - * and needs to be atomic. This is not actually used for anything, - * and can probably be removed. - */ - atomic_t vob_transient_pages; - - /** - * Number of outstanding mmaps on this file. - * - * \see ll_vm_open(), ll_vm_close(). - */ - atomic_t vob_mmap_cnt; - - /** - * various flags - * vob_discard_page_warned - * if pages belonging to this object are discarded when a client - * is evicted, some debug info will be printed, this flag will be set - * during processing the first discarded page, then avoid flooding - * debug message for lots of discarded pages. - * - * \see ll_dirty_page_discard_warn. - */ - unsigned int vob_discard_page_warned:1; -}; - -/** - * VVP-private page state. - */ -struct vvp_page { - struct cl_page_slice vpg_cl; - unsigned int vpg_defer_uptodate:1, - vpg_ra_used:1; - /** VM page */ - struct page *vpg_page; -}; - -static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice) -{ - return container_of(slice, struct vvp_page, vpg_cl); -} - -static inline pgoff_t vvp_index(struct vvp_page *vvp) -{ - return vvp->vpg_cl.cpl_index; -} - -struct vvp_device { - struct cl_device vdv_cl; - struct cl_device *vdv_next; -}; - -struct vvp_lock { - struct cl_lock_slice vlk_cl; -}; - -void *ccc_key_init(const struct lu_context *ctx, - struct lu_context_key *key); -void ccc_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data); - -void ccc_umount(const struct lu_env *env, struct cl_device *dev); - -static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv) -{ - return &vdv->vdv_cl.cd_lu_dev; -} - -static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d) -{ - return container_of_safe(d, struct vvp_device, vdv_cl.cd_lu_dev); -} - -static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d) -{ - return container_of_safe(d, struct vvp_device, vdv_cl); -} - -static inline struct vvp_object *cl2vvp(const struct cl_object *obj) -{ - return container_of_safe(obj, struct vvp_object, vob_cl); -} - -static inline struct vvp_object *lu2vvp(const struct lu_object *obj) -{ - return container_of_safe(obj, struct vvp_object, vob_cl.co_lu); -} - -static inline struct inode *vvp_object_inode(const struct cl_object *obj) -{ - return cl2vvp(obj)->vob_inode; -} - -int vvp_object_invariant(const struct cl_object *obj); -struct vvp_object *cl_inode2vvp(struct inode *inode); - -static inline struct page *cl2vm_page(const struct cl_page_slice *slice) -{ - return cl2vvp_page(slice)->vpg_page; -} - -static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) -{ - return container_of(slice, struct vvp_lock, vlk_cl); -} - -# define CLOBINVRNT(env, clob, expr) \ - ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr))) - -int vvp_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); -int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index); -struct lu_object *vvp_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); - -int vvp_global_init(void); -void vvp_global_fini(void); - -extern const struct file_operations vvp_dump_pgcache_file_ops; - -#endif /* VVP_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c deleted file mode 100644 index e7a4778e02e4..000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ /dev/null @@ -1,1374 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_io for VVP layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include - -#include "llite_internal.h" -#include "vvp_internal.h" - -static struct vvp_io *cl2vvp_io(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct vvp_io *vio; - - vio = container_of(slice, struct vvp_io, vui_cl); - LASSERT(vio == vvp_env_io(env)); - - return vio; -} - -/** - * For swapping layout. The file's layout may have changed. - * To avoid populating pages to a wrong stripe, we have to verify the - * correctness of layout. It works because swapping layout processes - * have to acquire group lock. - */ -static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, - struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct vvp_io *vio = vvp_env_io(env); - bool rc = true; - - switch (io->ci_type) { - case CIT_READ: - case CIT_WRITE: - /* don't need lock here to check lli_layout_gen as we have held - * extent lock and GROUP lock has to hold to swap layout - */ - if (ll_layout_version_get(lli) != vio->vui_layout_gen || - OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) { - io->ci_need_restart = 1; - /* this will cause a short read/write */ - io->ci_continue = 0; - rc = false; - } - case CIT_FAULT: - /* fault is okay because we've already had a page. */ - default: - break; - } - - return rc; -} - -static void vvp_object_size_lock(struct cl_object *obj) -{ - struct inode *inode = vvp_object_inode(obj); - - ll_inode_size_lock(inode); - cl_object_attr_lock(obj); -} - -static void vvp_object_size_unlock(struct cl_object *obj) -{ - struct inode *inode = vvp_object_inode(obj); - - cl_object_attr_unlock(obj); - ll_inode_size_unlock(inode); -} - -/** - * Helper function that if necessary adjusts file size (inode->i_size), when - * position at the offset \a pos is accessed. File size can be arbitrary stale - * on a Lustre client, but client at least knows KMS. If accessed area is - * inside [0, KMS], set file size to KMS, otherwise glimpse file size. - * - * Locking: cl_isize_lock is used to serialize changes to inode size and to - * protect consistency between inode size and cl_object - * attributes. cl_object_size_lock() protects consistency between cl_attr's of - * top-object and sub-objects. - */ -static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io, loff_t start, size_t count, - int *exceed) -{ - struct cl_attr *attr = vvp_env_thread_attr(env); - struct inode *inode = vvp_object_inode(obj); - loff_t pos = start + count - 1; - loff_t kms; - int result; - - /* - * Consistency guarantees: following possibilities exist for the - * relation between region being accessed and real file size at this - * moment: - * - * (A): the region is completely inside of the file; - * - * (B-x): x bytes of region are inside of the file, the rest is - * outside; - * - * (C): the region is completely outside of the file. - * - * This classification is stable under DLM lock already acquired by - * the caller, because to change the class, other client has to take - * DLM lock conflicting with our lock. Also, any updates to ->i_size - * by other threads on this client are serialized by - * ll_inode_size_lock(). This guarantees that short reads are handled - * correctly in the face of concurrent writes and truncates. - */ - vvp_object_size_lock(obj); - result = cl_object_attr_get(env, obj, attr); - if (result == 0) { - kms = attr->cat_kms; - if (pos > kms) { - /* - * A glimpse is necessary to determine whether we - * return a short read (B) or some zeroes at the end - * of the buffer (C) - */ - vvp_object_size_unlock(obj); - result = cl_glimpse_lock(env, io, inode, obj, 0); - if (result == 0 && exceed) { - /* If objective page index exceed end-of-file - * page index, return directly. Do not expect - * kernel will check such case correctly. - * linux-2.6.18-128.1.1 miss to do that. - * --bug 17336 - */ - loff_t size = i_size_read(inode); - loff_t cur_index = start >> PAGE_SHIFT; - loff_t size_index = (size - 1) >> PAGE_SHIFT; - - if ((size == 0 && cur_index != 0) || - size_index < cur_index) - *exceed = 1; - } - return result; - } - /* - * region is within kms and, hence, within real file - * size (A). We need to increase i_size to cover the - * read region so that generic_file_read() will do its - * job, but that doesn't mean the kms size is - * _correct_, it is only the _minimum_ size. If - * someone does a stat they will get the correct size - * which will always be >= the kms value here. - * b=11081 - */ - if (i_size_read(inode) < kms) { - i_size_write(inode, kms); - CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n", - PFID(lu_object_fid(&obj->co_lu)), - (__u64)i_size_read(inode)); - } - } - - vvp_object_size_unlock(obj); - - return result; -} - -/***************************************************************************** - * - * io operations. - * - */ - -static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io, - __u32 enqflags, enum cl_lock_mode mode, - pgoff_t start, pgoff_t end) -{ - struct vvp_io *vio = vvp_env_io(env); - struct cl_lock_descr *descr = &vio->vui_link.cill_descr; - struct cl_object *obj = io->ci_obj; - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end); - - memset(&vio->vui_link, 0, sizeof(vio->vui_link)); - - if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - descr->cld_mode = CLM_GROUP; - descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid; - enqflags |= CEF_LOCK_MATCH; - } else { - descr->cld_mode = mode; - } - descr->cld_obj = obj; - descr->cld_start = start; - descr->cld_end = end; - descr->cld_enq_flags = enqflags; - - cl_io_lock_add(env, io, &vio->vui_link); - return 0; -} - -static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io, - __u32 enqflags, enum cl_lock_mode mode, - loff_t start, loff_t end) -{ - struct cl_object *obj = io->ci_obj; - - return vvp_io_one_lock_index(env, io, enqflags, mode, - cl_index(obj, start), cl_index(obj, end)); -} - -static int vvp_io_write_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct vvp_io *vio = cl2vvp_io(env, ios); - - cl_page_list_init(&vio->u.write.vui_queue); - vio->u.write.vui_written = 0; - vio->u.write.vui_from = 0; - vio->u.write.vui_to = PAGE_SIZE; - - return 0; -} - -static void vvp_io_write_iter_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct vvp_io *vio = cl2vvp_io(env, ios); - - LASSERT(vio->u.write.vui_queue.pl_nr == 0); -} - -static int vvp_io_fault_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct vvp_io *vio = cl2vvp_io(env, ios); - struct inode *inode = vvp_object_inode(ios->cis_obj); - - LASSERT(inode == file_inode(vio->vui_fd->fd_file)); - vio->u.fault.ft_mtime = inode->i_mtime.tv_sec; - return 0; -} - -static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct cl_object *obj = io->ci_obj; - struct vvp_io *vio = cl2vvp_io(env, ios); - struct inode *inode = vvp_object_inode(obj); - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - CDEBUG(D_VFSTRACE, DFID - " ignore/verify layout %d/%d, layout version %d restore needed %d\n", - PFID(lu_object_fid(&obj->co_lu)), - io->ci_ignore_layout, io->ci_verify_layout, - vio->vui_layout_gen, io->ci_restore_needed); - - if (io->ci_restore_needed) { - int rc; - - /* file was detected release, we need to restore it - * before finishing the io - */ - rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF); - /* if restore registration failed, no restart, - * we will return -ENODATA - */ - /* The layout will change after restore, so we need to - * block on layout lock hold by the MDT - * as MDT will not send new layout in lvb (see LU-3124) - * we have to explicitly fetch it, all this will be done - * by ll_layout_refresh() - */ - if (rc == 0) { - io->ci_restore_needed = 0; - io->ci_need_restart = 1; - io->ci_verify_layout = 1; - } else { - io->ci_restore_needed = 1; - io->ci_need_restart = 0; - io->ci_verify_layout = 0; - io->ci_result = rc; - } - } - - if (!io->ci_ignore_layout && io->ci_verify_layout) { - __u32 gen = 0; - - /* check layout version */ - ll_layout_refresh(inode, &gen); - io->ci_need_restart = vio->vui_layout_gen != gen; - if (io->ci_need_restart) { - CDEBUG(D_VFSTRACE, - DFID " layout changed from %d to %d.\n", - PFID(lu_object_fid(&obj->co_lu)), - vio->vui_layout_gen, gen); - /* today successful restore is the only possible case */ - /* restore was done, clear restoring state */ - clear_bit(LLIF_FILE_RESTORING, - &ll_i2info(inode)->lli_flags); - } - } -} - -static void vvp_io_fault_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct cl_page *page = io->u.ci_fault.ft_page; - - CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj)); - - if (page) { - lu_ref_del(&page->cp_reference, "fault", io); - cl_page_put(env, page); - io->u.ci_fault.ft_page = NULL; - } - vvp_io_fini(env, ios); -} - -static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma) -{ - /* - * we only want to hold PW locks if the mmap() can generate - * writes back to the file and that only happens in shared - * writable vmas - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return CLM_WRITE; - return CLM_READ; -} - -static int vvp_mmap_locks(const struct lu_env *env, - struct vvp_io *vio, struct cl_io *io) -{ - struct vvp_thread_info *cti = vvp_env_info(env); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - struct cl_lock_descr *descr = &cti->vti_descr; - union ldlm_policy_data policy; - unsigned long addr; - ssize_t count; - int result = 0; - struct iov_iter i; - struct iovec iov; - - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - - if (!vio->vui_iter) /* nfs or loop back device write */ - return 0; - - /* No MM (e.g. NFS)? No vmas too. */ - if (!mm) - return 0; - - iov_for_each(iov, i, *vio->vui_iter) { - addr = (unsigned long)iov.iov_base; - count = iov.iov_len; - if (count == 0) - continue; - - count += addr & (~PAGE_MASK); - addr &= PAGE_MASK; - - down_read(&mm->mmap_sem); - while ((vma = our_vma(mm, addr, count)) != NULL) { - struct inode *inode = file_inode(vma->vm_file); - int flags = CEF_MUST; - - if (ll_file_nolock(vma->vm_file)) { - /* - * For no lock case is not allowed for mmap - */ - result = -EINVAL; - break; - } - - /* - * XXX: Required lock mode can be weakened: CIT_WRITE - * io only ever reads user level buffer, and CIT_READ - * only writes on it. - */ - policy_from_vma(&policy, vma, addr, count); - descr->cld_mode = vvp_mode_from_vma(vma); - descr->cld_obj = ll_i2info(inode)->lli_clob; - descr->cld_start = cl_index(descr->cld_obj, - policy.l_extent.start); - descr->cld_end = cl_index(descr->cld_obj, - policy.l_extent.end); - descr->cld_enq_flags = flags; - result = cl_io_lock_alloc_add(env, io, descr); - - CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", - descr->cld_mode, descr->cld_start, - descr->cld_end); - - if (result < 0) - break; - - if (vma->vm_end - addr >= count) - break; - - count -= vma->vm_end - addr; - addr = vma->vm_end; - } - up_read(&mm->mmap_sem); - if (result < 0) - break; - } - return result; -} - -static void vvp_io_advance(const struct lu_env *env, - const struct cl_io_slice *ios, - size_t nob) -{ - struct cl_object *obj = ios->cis_io->ci_obj; - struct vvp_io *vio = cl2vvp_io(env, ios); - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - vio->vui_tot_count -= nob; - iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count); -} - -static void vvp_io_update_iov(const struct lu_env *env, - struct vvp_io *vio, struct cl_io *io) -{ - size_t size = io->u.ci_rw.crw_count; - - if (!vio->vui_iter) - return; - - iov_iter_truncate(vio->vui_iter, size); -} - -static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, - enum cl_lock_mode mode, loff_t start, loff_t end) -{ - struct vvp_io *vio = vvp_env_io(env); - int result; - int ast_flags = 0; - - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - - vvp_io_update_iov(env, vio, io); - - if (io->u.ci_rw.crw_nonblock) - ast_flags |= CEF_NONBLOCK; - result = vvp_mmap_locks(env, vio, io); - if (result == 0) - result = vvp_io_one_lock(env, io, ast_flags, mode, start, end); - return result; -} - -static int vvp_io_read_lock(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct cl_io_rw_common *rd = &io->u.ci_rd.rd; - int result; - - result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos, - rd->crw_pos + rd->crw_count - 1); - - return result; -} - -static int vvp_io_fault_lock(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct vvp_io *vio = cl2vvp_io(env, ios); - /* - * XXX LDLM_FL_CBPENDING - */ - return vvp_io_one_lock_index(env, - io, 0, - vvp_mode_from_vma(vio->u.fault.ft_vma), - io->u.ci_fault.ft_index, - io->u.ci_fault.ft_index); -} - -static int vvp_io_write_lock(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - loff_t start; - loff_t end; - - if (io->u.ci_wr.wr_append) { - start = 0; - end = OBD_OBJECT_EOF; - } else { - start = io->u.ci_wr.wr.crw_pos; - end = start + io->u.ci_wr.wr.crw_count - 1; - } - return vvp_io_rw_lock(env, io, CLM_WRITE, start, end); -} - -static int vvp_io_setattr_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - return 0; -} - -/** - * Implementation of cl_io_operations::vio_lock() method for CIT_SETATTR io. - * - * Handles "lockless io" mode when extent locking is done by server. - */ -static int vvp_io_setattr_lock(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - __u64 new_size; - __u32 enqflags = 0; - - if (cl_io_is_trunc(io)) { - new_size = io->u.ci_setattr.sa_attr.lvb_size; - if (new_size == 0) - enqflags = CEF_DISCARD_DATA; - } else { - unsigned int valid = io->u.ci_setattr.sa_valid; - - if (!(valid & TIMES_SET_FLAGS)) - return 0; - - if ((!(valid & ATTR_MTIME) || - io->u.ci_setattr.sa_attr.lvb_mtime >= - io->u.ci_setattr.sa_attr.lvb_ctime) && - (!(valid & ATTR_ATIME) || - io->u.ci_setattr.sa_attr.lvb_atime >= - io->u.ci_setattr.sa_attr.lvb_ctime)) - return 0; - new_size = 0; - } - - return vvp_io_one_lock(env, io, enqflags, CLM_WRITE, - new_size, OBD_OBJECT_EOF); -} - -static int vvp_do_vmtruncate(struct inode *inode, size_t size) -{ - int result; - /* - * Only ll_inode_size_lock is taken at this level. - */ - ll_inode_size_lock(inode); - result = inode_newsize_ok(inode, size); - if (result < 0) { - ll_inode_size_unlock(inode); - return result; - } - truncate_setsize(inode, size); - ll_inode_size_unlock(inode); - return result; -} - -static int vvp_io_setattr_time(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct cl_object *obj = io->ci_obj; - struct cl_attr *attr = vvp_env_thread_attr(env); - int result; - unsigned valid = CAT_CTIME; - - cl_object_attr_lock(obj); - attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime; - if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) { - attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime; - valid |= CAT_ATIME; - } - if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) { - attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime; - valid |= CAT_MTIME; - } - result = cl_object_attr_update(env, obj, attr, valid); - cl_object_attr_unlock(obj); - - return result; -} - -static int vvp_io_setattr_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct inode *inode = vvp_object_inode(io->ci_obj); - struct ll_inode_info *lli = ll_i2info(inode); - - if (cl_io_is_trunc(io)) { - down_write(&lli->lli_trunc_sem); - inode_lock(inode); - inode_dio_wait(inode); - } else { - inode_lock(inode); - } - - if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS) - return vvp_io_setattr_time(env, ios); - - return 0; -} - -static void vvp_io_setattr_end(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct inode *inode = vvp_object_inode(io->ci_obj); - struct ll_inode_info *lli = ll_i2info(inode); - - if (cl_io_is_trunc(io)) { - /* Truncate in memory pages - they must be clean pages - * because osc has already notified to destroy osc_extents. - */ - vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); - inode_unlock(inode); - up_write(&lli->lli_trunc_sem); - } else { - inode_unlock(inode); - } -} - -static void vvp_io_setattr_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - bool restore_needed = ios->cis_io->ci_restore_needed; - struct inode *inode = vvp_object_inode(ios->cis_obj); - - vvp_io_fini(env, ios); - - if (restore_needed && !ios->cis_io->ci_restore_needed) { - /* restore finished, set data modified flag for HSM */ - set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags); - } -} - -static int vvp_io_read_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct vvp_io *vio = cl2vvp_io(env, ios); - struct cl_io *io = ios->cis_io; - struct cl_object *obj = io->ci_obj; - struct inode *inode = vvp_object_inode(obj); - struct ll_inode_info *lli = ll_i2info(inode); - struct file *file = vio->vui_fd->fd_file; - - int result; - loff_t pos = io->u.ci_rd.rd.crw_pos; - long cnt = io->u.ci_rd.rd.crw_count; - long tot = vio->vui_tot_count; - int exceed = 0; - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); - - down_read(&lli->lli_trunc_sem); - - if (!can_populate_pages(env, io, inode)) - return 0; - - result = vvp_prep_size(env, obj, io, pos, tot, &exceed); - if (result != 0) - return result; - if (exceed != 0) - goto out; - - LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, - "Read ino %lu, %lu bytes, offset %lld, size %llu\n", - inode->i_ino, cnt, pos, i_size_read(inode)); - - /* turn off the kernel's read-ahead */ - vio->vui_fd->fd_file->f_ra.ra_pages = 0; - - /* initialize read-ahead window once per syscall */ - if (!vio->vui_ra_valid) { - vio->vui_ra_valid = true; - vio->vui_ra_start = cl_index(obj, pos); - vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1); - ll_ras_enter(file); - } - - /* BUG: 5972 */ - file_accessed(file); - LASSERT(vio->vui_iocb->ki_pos == pos); - result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter); - -out: - if (result >= 0) { - if (result < cnt) - io->ci_continue = 0; - io->ci_nob += result; - ll_rw_stats_tally(ll_i2sbi(inode), current->pid, - vio->vui_fd, pos, result, READ); - result = 0; - } - return result; -} - -static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *plist, int from, int to) -{ - struct cl_2queue *queue = &io->ci_queue; - struct cl_page *page; - unsigned int bytes = 0; - int rc = 0; - - if (plist->pl_nr == 0) - return 0; - - if (from > 0 || to != PAGE_SIZE) { - page = cl_page_list_first(plist); - if (plist->pl_nr == 1) { - cl_page_clip(env, page, from, to); - } else { - if (from > 0) - cl_page_clip(env, page, from, PAGE_SIZE); - if (to != PAGE_SIZE) { - page = cl_page_list_last(plist); - cl_page_clip(env, page, 0, to); - } - } - } - - cl_2queue_init(queue); - cl_page_list_splice(plist, &queue->c2_qin); - rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0); - - /* plist is not sorted any more */ - cl_page_list_splice(&queue->c2_qin, plist); - cl_page_list_splice(&queue->c2_qout, plist); - cl_2queue_fini(env, queue); - - if (rc == 0) { - /* calculate bytes */ - bytes = plist->pl_nr << PAGE_SHIFT; - bytes -= from + PAGE_SIZE - to; - - while (plist->pl_nr > 0) { - page = cl_page_list_first(plist); - cl_page_list_del(env, plist, page); - - cl_page_clip(env, page, 0, PAGE_SIZE); - - SetPageUptodate(cl_page_vmpage(page)); - cl_page_disown(env, io, page); - - /* held in ll_cl_init() */ - lu_ref_del(&page->cp_reference, "cl_io", io); - cl_page_put(env, page); - } - } - - return bytes > 0 ? bytes : rc; -} - -static void write_commit_callback(const struct lu_env *env, struct cl_io *io, - struct cl_page *page) -{ - struct page *vmpage = page->cp_vmpage; - - SetPageUptodate(vmpage); - set_page_dirty(vmpage); - - cl_page_disown(env, io, page); - - /* held in ll_cl_init() */ - lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io)); - cl_page_put(env, page); -} - -/* make sure the page list is contiguous */ -static bool page_list_sanity_check(struct cl_object *obj, - struct cl_page_list *plist) -{ - struct cl_page *page; - pgoff_t index = CL_PAGE_EOF; - - cl_page_list_for_each(page, plist) { - struct vvp_page *vpg = cl_object_page_slice(obj, page); - - if (index == CL_PAGE_EOF) { - index = vvp_index(vpg); - continue; - } - - ++index; - if (index == vvp_index(vpg)) - continue; - - return false; - } - return true; -} - -/* Return how many bytes have queued or written */ -int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io) -{ - struct cl_object *obj = io->ci_obj; - struct inode *inode = vvp_object_inode(obj); - struct vvp_io *vio = vvp_env_io(env); - struct cl_page_list *queue = &vio->u.write.vui_queue; - struct cl_page *page; - int rc = 0; - int bytes = 0; - unsigned int npages = vio->u.write.vui_queue.pl_nr; - - if (npages == 0) - return 0; - - CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n", - npages, vio->u.write.vui_from, vio->u.write.vui_to); - - LASSERT(page_list_sanity_check(obj, queue)); - - /* submit IO with async write */ - rc = cl_io_commit_async(env, io, queue, - vio->u.write.vui_from, vio->u.write.vui_to, - write_commit_callback); - npages -= queue->pl_nr; /* already committed pages */ - if (npages > 0) { - /* calculate how many bytes were written */ - bytes = npages << PAGE_SHIFT; - - /* first page */ - bytes -= vio->u.write.vui_from; - if (queue->pl_nr == 0) /* last page */ - bytes -= PAGE_SIZE - vio->u.write.vui_to; - LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages); - - vio->u.write.vui_written += bytes; - - CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n", - npages, bytes, vio->u.write.vui_written); - - /* the first page must have been written. */ - vio->u.write.vui_from = 0; - } - LASSERT(page_list_sanity_check(obj, queue)); - LASSERT(ergo(rc == 0, queue->pl_nr == 0)); - - /* out of quota, try sync write */ - if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) { - rc = vvp_io_commit_sync(env, io, queue, - vio->u.write.vui_from, - vio->u.write.vui_to); - if (rc > 0) { - vio->u.write.vui_written += rc; - rc = 0; - } - } - - /* update inode size */ - ll_merge_attr(env, inode); - - /* Now the pages in queue were failed to commit, discard them - * unless they were dirtied before. - */ - while (queue->pl_nr > 0) { - page = cl_page_list_first(queue); - cl_page_list_del(env, queue, page); - - if (!PageDirty(cl_page_vmpage(page))) - cl_page_discard(env, io, page); - - cl_page_disown(env, io, page); - - /* held in ll_cl_init() */ - lu_ref_del(&page->cp_reference, "cl_io", io); - cl_page_put(env, page); - } - cl_page_list_fini(env, queue); - - return rc; -} - -static int vvp_io_write_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct vvp_io *vio = cl2vvp_io(env, ios); - struct cl_io *io = ios->cis_io; - struct cl_object *obj = io->ci_obj; - struct inode *inode = vvp_object_inode(obj); - struct ll_inode_info *lli = ll_i2info(inode); - ssize_t result = 0; - loff_t pos = io->u.ci_wr.wr.crw_pos; - size_t cnt = io->u.ci_wr.wr.crw_count; - - down_read(&lli->lli_trunc_sem); - - if (!can_populate_pages(env, io, inode)) - return 0; - - if (cl_io_is_append(io)) { - /* - * PARALLEL IO This has to be changed for parallel IO doing - * out-of-order writes. - */ - ll_merge_attr(env, inode); - pos = i_size_read(inode); - io->u.ci_wr.wr.crw_pos = pos; - vio->vui_iocb->ki_pos = pos; - } else { - LASSERT(vio->vui_iocb->ki_pos == pos); - } - - CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); - - /* - * The maximum Lustre file size is variable, based on the OST maximum - * object size and number of stripes. This needs another check in - * addition to the VFS checks earlier. - */ - if (pos + cnt > ll_file_maxbytes(inode)) { - CDEBUG(D_INODE, - "%s: file " DFID " offset %llu > maxbytes %llu\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(ll_inode2fid(inode)), pos + cnt, - ll_file_maxbytes(inode)); - return -EFBIG; - } - - if (!vio->vui_iter) { - /* from a temp io in ll_cl_init(). */ - result = 0; - } else { - /* - * When using the locked AIO function (generic_file_aio_write()) - * testing has shown the inode mutex to be a limiting factor - * with multi-threaded single shared file performance. To get - * around this, we now use the lockless version. To maintain - * consistency, proper locking to protect against writes, - * trucates, etc. is handled in the higher layers of lustre. - */ - bool lock_node = !IS_NOSEC(inode); - - if (lock_node) - inode_lock(inode); - result = __generic_file_write_iter(vio->vui_iocb, - vio->vui_iter); - if (lock_node) - inode_unlock(inode); - - if (result > 0 || result == -EIOCBQUEUED) - result = generic_write_sync(vio->vui_iocb, result); - } - - if (result > 0) { - result = vvp_io_write_commit(env, io); - if (vio->u.write.vui_written > 0) { - result = vio->u.write.vui_written; - io->ci_nob += result; - - CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n", - io->ci_nob, result); - } - } - if (result > 0) { - set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags); - - if (result < cnt) - io->ci_continue = 0; - ll_rw_stats_tally(ll_i2sbi(inode), current->pid, - vio->vui_fd, pos, result, WRITE); - result = 0; - } - return result; -} - -static void vvp_io_rw_end(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct inode *inode = vvp_object_inode(ios->cis_obj); - struct ll_inode_info *lli = ll_i2info(inode); - - up_read(&lli->lli_trunc_sem); -} - -static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) -{ - struct vm_fault *vmf = cfio->ft_vmf; - - cfio->ft_flags = filemap_fault(vmf); - cfio->ft_flags_valid = 1; - - if (vmf->page) { - CDEBUG(D_PAGE, - "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n", - vmf->page, vmf->page->mapping, vmf->page->index, - (long)vmf->page->flags, page_count(vmf->page), - page_private(vmf->page), (void *)vmf->address); - if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) { - lock_page(vmf->page); - cfio->ft_flags |= VM_FAULT_LOCKED; - } - - cfio->ft_vmpage = vmf->page; - return 0; - } - - if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { - CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address); - return -EFAULT; - } - - if (cfio->ft_flags & VM_FAULT_OOM) { - CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address); - return -ENOMEM; - } - - if (cfio->ft_flags & VM_FAULT_RETRY) - return -EAGAIN; - - CERROR("Unknown error in page fault %d!\n", cfio->ft_flags); - return -EINVAL; -} - -static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io, - struct cl_page *page) -{ - set_page_dirty(page->cp_vmpage); -} - -static int vvp_io_fault_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct vvp_io *vio = cl2vvp_io(env, ios); - struct cl_io *io = ios->cis_io; - struct cl_object *obj = io->ci_obj; - struct inode *inode = vvp_object_inode(obj); - struct ll_inode_info *lli = ll_i2info(inode); - struct cl_fault_io *fio = &io->u.ci_fault; - struct vvp_fault_io *cfio = &vio->u.fault; - loff_t offset; - int result = 0; - struct page *vmpage = NULL; - struct cl_page *page; - loff_t size; - pgoff_t last_index; - - down_read(&lli->lli_trunc_sem); - - /* offset of the last byte on the page */ - offset = cl_offset(obj, fio->ft_index + 1) - 1; - LASSERT(cl_index(obj, offset) == fio->ft_index); - result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL); - if (result != 0) - return result; - - /* must return locked page */ - if (fio->ft_mkwrite) { - LASSERT(cfio->ft_vmpage); - lock_page(cfio->ft_vmpage); - } else { - result = vvp_io_kernel_fault(cfio); - if (result != 0) - return result; - } - - vmpage = cfio->ft_vmpage; - LASSERT(PageLocked(vmpage)); - - if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) - ll_invalidate_page(vmpage); - - size = i_size_read(inode); - /* Though we have already held a cl_lock upon this page, but - * it still can be truncated locally. - */ - if (unlikely((vmpage->mapping != inode->i_mapping) || - (page_offset(vmpage) > size))) { - CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); - - /* return +1 to stop cl_io_loop() and ll_fault() will catch - * and retry. - */ - result = 1; - goto out; - } - - last_index = cl_index(obj, size - 1); - - if (fio->ft_mkwrite) { - /* - * Capture the size while holding the lli_trunc_sem from above - * we want to make sure that we complete the mkwrite action - * while holding this lock. We need to make sure that we are - * not past the end of the file. - */ - if (last_index < fio->ft_index) { - CDEBUG(D_PAGE, - "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n", - vmpage->mapping, fio->ft_index, last_index); - /* - * We need to return if we are - * passed the end of the file. This will propagate - * up the call stack to ll_page_mkwrite where - * we will return VM_FAULT_NOPAGE. Any non-negative - * value returned here will be silently - * converted to 0. If the vmpage->mapping is null - * the error code would be converted back to ENODATA - * in ll_page_mkwrite0. Thus we return -ENODATA - * to handle both cases - */ - result = -ENODATA; - goto out; - } - } - - page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); - if (IS_ERR(page)) { - result = PTR_ERR(page); - goto out; - } - - /* if page is going to be written, we should add this page into cache - * earlier. - */ - if (fio->ft_mkwrite) { - wait_on_page_writeback(vmpage); - if (!PageDirty(vmpage)) { - struct cl_page_list *plist = &io->ci_queue.c2_qin; - struct vvp_page *vpg = cl_object_page_slice(obj, page); - int to = PAGE_SIZE; - - /* vvp_page_assume() calls wait_on_page_writeback(). */ - cl_page_assume(env, io, page); - - cl_page_list_init(plist); - cl_page_list_add(plist, page); - - /* size fixup */ - if (last_index == vvp_index(vpg)) - to = size & ~PAGE_MASK; - - /* Do not set Dirty bit here so that in case IO is - * started before the page is really made dirty, we - * still have chance to detect it. - */ - result = cl_io_commit_async(env, io, plist, 0, to, - mkwrite_commit_callback); - LASSERT(cl_page_is_owned(page, io)); - cl_page_list_fini(env, plist); - - vmpage = NULL; - if (result < 0) { - cl_page_discard(env, io, page); - cl_page_disown(env, io, page); - - cl_page_put(env, page); - - /* we're in big trouble, what can we do now? */ - if (result == -EDQUOT) - result = -ENOSPC; - goto out; - } else { - cl_page_disown(env, io, page); - } - } - } - - /* - * The ft_index is only used in the case of - * a mkwrite action. We need to check - * our assertions are correct, since - * we should have caught this above - */ - LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index); - if (fio->ft_index == last_index) - /* - * Last page is mapped partially. - */ - fio->ft_nob = size - cl_offset(obj, fio->ft_index); - else - fio->ft_nob = cl_page_size(obj); - - lu_ref_add(&page->cp_reference, "fault", io); - fio->ft_page = page; - -out: - /* return unlocked vmpage to avoid deadlocking */ - if (vmpage) - unlock_page(vmpage); - - cfio->ft_flags &= ~VM_FAULT_LOCKED; - - return result; -} - -static void vvp_io_fault_end(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct inode *inode = vvp_object_inode(ios->cis_obj); - struct ll_inode_info *lli = ll_i2info(inode); - - CLOBINVRNT(env, ios->cis_io->ci_obj, - vvp_object_invariant(ios->cis_io->ci_obj)); - up_read(&lli->lli_trunc_sem); -} - -static int vvp_io_fsync_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - /* we should mark TOWRITE bit to each dirty page in radix tree to - * verify pages have been written, but this is difficult because of - * race. - */ - return 0; -} - -static int vvp_io_read_ahead(const struct lu_env *env, - const struct cl_io_slice *ios, - pgoff_t start, struct cl_read_ahead *ra) -{ - int result = 0; - - if (ios->cis_io->ci_type == CIT_READ || - ios->cis_io->ci_type == CIT_FAULT) { - struct vvp_io *vio = cl2vvp_io(env, ios); - - if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - ra->cra_end = CL_PAGE_EOF; - result = 1; /* no need to call down */ - } - } - - return result; -} - -static const struct cl_io_operations vvp_io_ops = { - .op = { - [CIT_READ] = { - .cio_fini = vvp_io_fini, - .cio_lock = vvp_io_read_lock, - .cio_start = vvp_io_read_start, - .cio_end = vvp_io_rw_end, - .cio_advance = vvp_io_advance, - }, - [CIT_WRITE] = { - .cio_fini = vvp_io_fini, - .cio_iter_init = vvp_io_write_iter_init, - .cio_iter_fini = vvp_io_write_iter_fini, - .cio_lock = vvp_io_write_lock, - .cio_start = vvp_io_write_start, - .cio_end = vvp_io_rw_end, - .cio_advance = vvp_io_advance, - }, - [CIT_SETATTR] = { - .cio_fini = vvp_io_setattr_fini, - .cio_iter_init = vvp_io_setattr_iter_init, - .cio_lock = vvp_io_setattr_lock, - .cio_start = vvp_io_setattr_start, - .cio_end = vvp_io_setattr_end - }, - [CIT_FAULT] = { - .cio_fini = vvp_io_fault_fini, - .cio_iter_init = vvp_io_fault_iter_init, - .cio_lock = vvp_io_fault_lock, - .cio_start = vvp_io_fault_start, - .cio_end = vvp_io_fault_end, - }, - [CIT_FSYNC] = { - .cio_start = vvp_io_fsync_start, - .cio_fini = vvp_io_fini - }, - [CIT_MISC] = { - .cio_fini = vvp_io_fini - } - }, - .cio_read_ahead = vvp_io_read_ahead, -}; - -int vvp_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) -{ - struct vvp_io *vio = vvp_env_io(env); - struct inode *inode = vvp_object_inode(obj); - int result; - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - CDEBUG(D_VFSTRACE, DFID - " ignore/verify layout %d/%d, layout version %d restore needed %d\n", - PFID(lu_object_fid(&obj->co_lu)), - io->ci_ignore_layout, io->ci_verify_layout, - vio->vui_layout_gen, io->ci_restore_needed); - - CL_IO_SLICE_CLEAN(vio, vui_cl); - cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops); - vio->vui_ra_valid = false; - result = 0; - if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { - size_t count; - struct ll_inode_info *lli = ll_i2info(inode); - - count = io->u.ci_rw.crw_count; - /* "If nbyte is 0, read() will return 0 and have no other - * results." -- Single Unix Spec - */ - if (count == 0) - result = 1; - else - vio->vui_tot_count = count; - - /* for read/write, we store the jobid in the inode, and - * it'll be fetched by osc when building RPC. - * - * it's not accurate if the file is shared by different - * jobs. - */ - lustre_get_jobid(lli->lli_jobid); - } else if (io->ci_type == CIT_SETATTR) { - if (!cl_io_is_trunc(io)) - io->ci_lockreq = CILR_MANDATORY; - } - - /* Enqueue layout lock and get layout version. We need to do this - * even for operations requiring to open file, such as read and write, - * because it might not grant layout lock in IT_OPEN. - */ - if (result == 0 && !io->ci_ignore_layout) { - result = ll_layout_refresh(inode, &vio->vui_layout_gen); - if (result == -ENOENT) - /* If the inode on MDS has been removed, but the objects - * on OSTs haven't been destroyed (async unlink), layout - * fetch will return -ENOENT, we'd ignore this error - * and continue with dirty flush. LU-3230. - */ - result = 0; - if (result < 0) - CERROR("%s: refresh file layout " DFID " error %d.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(lu_object_fid(&obj->co_lu)), result); - } - - return result; -} diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c deleted file mode 100644 index 4b6c7143bd2c..000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_lock.c +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2014, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_lock for VVP layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include - -#include "vvp_internal.h" - -/***************************************************************************** - * - * Vvp lock functions. - * - */ - -static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice) -{ - struct vvp_lock *vlk = cl2vvp_lock(slice); - - kmem_cache_free(vvp_lock_kmem, vlk); -} - -static int vvp_lock_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *unused, struct cl_sync_io *anchor) -{ - CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj)); - - return 0; -} - -static const struct cl_lock_operations vvp_lock_ops = { - .clo_fini = vvp_lock_fini, - .clo_enqueue = vvp_lock_enqueue, -}; - -int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *unused) -{ - struct vvp_lock *vlk; - int result; - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS); - if (vlk) { - cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c deleted file mode 100644 index b2cb51c8f7f4..000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_object.c +++ /dev/null @@ -1,303 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * cl_object implementation for VVP layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include - -#include "llite_internal.h" -#include "vvp_internal.h" - -/***************************************************************************** - * - * Object operations. - * - */ - -int vvp_object_invariant(const struct cl_object *obj) -{ - struct inode *inode = vvp_object_inode(obj); - struct ll_inode_info *lli = ll_i2info(inode); - - return (S_ISREG(inode->i_mode) || inode->i_mode == 0) && - lli->lli_clob == obj; -} - -static int vvp_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) -{ - struct vvp_object *obj = lu2vvp(o); - struct inode *inode = obj->vob_inode; - struct ll_inode_info *lli; - - (*p)(env, cookie, "(%d %d) inode: %p ", - atomic_read(&obj->vob_transient_pages), - atomic_read(&obj->vob_mmap_cnt), inode); - if (inode) { - lli = ll_i2info(inode); - (*p)(env, cookie, "%lu/%u %o %u %d %p " DFID, - inode->i_ino, inode->i_generation, inode->i_mode, - inode->i_nlink, atomic_read(&inode->i_count), - lli->lli_clob, PFID(&lli->lli_fid)); - } - return 0; -} - -static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr) -{ - struct inode *inode = vvp_object_inode(obj); - - /* - * lov overwrites most of these fields in - * lov_attr_get()->...lov_merge_lvb_kms(), except when inode - * attributes are newer. - */ - - attr->cat_size = i_size_read(inode); - attr->cat_mtime = inode->i_mtime.tv_sec; - attr->cat_atime = inode->i_atime.tv_sec; - attr->cat_ctime = inode->i_ctime.tv_sec; - attr->cat_blocks = inode->i_blocks; - attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid); - attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid); - /* KMS is not known by this layer */ - return 0; /* layers below have to fill in the rest */ -} - -static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int valid) -{ - struct inode *inode = vvp_object_inode(obj); - - if (valid & CAT_UID) - inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid); - if (valid & CAT_GID) - inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid); - if (valid & CAT_ATIME) - inode->i_atime.tv_sec = attr->cat_atime; - if (valid & CAT_MTIME) - inode->i_mtime.tv_sec = attr->cat_mtime; - if (valid & CAT_CTIME) - inode->i_ctime.tv_sec = attr->cat_ctime; - if (0 && valid & CAT_SIZE) - i_size_write(inode, attr->cat_size); - /* not currently necessary */ - if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE)) - mark_inode_dirty(inode); - return 0; -} - -static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf) -{ - struct ll_inode_info *lli = ll_i2info(conf->coc_inode); - - if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { - CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n", - PFID(&lli->lli_fid)); - - ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE); - - /* Clean up page mmap for this inode. - * The reason for us to do this is that if the page has - * already been installed into memory space, the process - * can access it without interacting with lustre, so this - * page may be stale due to layout change, and the process - * will never be notified. - * This operation is expensive but mmap processes have to pay - * a price themselves. - */ - unmap_mapping_range(conf->coc_inode->i_mapping, - 0, OBD_OBJECT_EOF, 0); - } - - return 0; -} - -static int vvp_prune(const struct lu_env *env, struct cl_object *obj) -{ - struct inode *inode = vvp_object_inode(obj); - int rc; - - rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1); - if (rc < 0) { - CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n", - PFID(lu_object_fid(&obj->co_lu)), rc); - return rc; - } - - truncate_inode_pages(inode->i_mapping, 0); - return 0; -} - -static int vvp_object_glimpse(const struct lu_env *env, - const struct cl_object *obj, struct ost_lvb *lvb) -{ - struct inode *inode = vvp_object_inode(obj); - - lvb->lvb_mtime = LTIME_S(inode->i_mtime); - lvb->lvb_atime = LTIME_S(inode->i_atime); - lvb->lvb_ctime = LTIME_S(inode->i_ctime); - /* - * LU-417: Add dirty pages block count lest i_blocks reports 0, some - * "cp" or "tar" on remote node may think it's a completely sparse file - * and skip it. - */ - if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0) - lvb->lvb_blocks = dirty_cnt(inode); - return 0; -} - -static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj, - struct cl_req_attr *attr) -{ - u64 valid_flags = OBD_MD_FLTYPE; - struct inode *inode; - struct obdo *oa; - - oa = attr->cra_oa; - inode = vvp_object_inode(obj); - - if (attr->cra_type == CRT_WRITE) - valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | - OBD_MD_FLUID | OBD_MD_FLGID; - obdo_from_inode(oa, inode, valid_flags & attr->cra_flags); - obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid); - if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID)) - oa->o_parent_oid++; - memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE); -} - -static const struct cl_object_operations vvp_ops = { - .coo_page_init = vvp_page_init, - .coo_lock_init = vvp_lock_init, - .coo_io_init = vvp_io_init, - .coo_attr_get = vvp_attr_get, - .coo_attr_update = vvp_attr_update, - .coo_conf_set = vvp_conf_set, - .coo_prune = vvp_prune, - .coo_glimpse = vvp_object_glimpse, - .coo_req_attr_set = vvp_req_attr_set -}; - -static int vvp_object_init0(const struct lu_env *env, - struct vvp_object *vob, - const struct cl_object_conf *conf) -{ - vob->vob_inode = conf->coc_inode; - atomic_set(&vob->vob_transient_pages, 0); - cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page)); - return 0; -} - -static int vvp_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf) -{ - struct vvp_device *dev = lu2vvp_dev(obj->lo_dev); - struct vvp_object *vob = lu2vvp(obj); - struct lu_object *below; - struct lu_device *under; - int result; - - under = &dev->vdv_next->cd_lu_dev; - below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below) { - const struct cl_object_conf *cconf; - - cconf = lu2cl_conf(conf); - lu_object_add(obj, below); - result = vvp_object_init0(env, vob, cconf); - } else { - result = -ENOMEM; - } - - return result; -} - -static void vvp_object_free(const struct lu_env *env, struct lu_object *obj) -{ - struct vvp_object *vob = lu2vvp(obj); - - lu_object_fini(obj); - lu_object_header_fini(obj->lo_header); - kmem_cache_free(vvp_object_kmem, vob); -} - -static const struct lu_object_operations vvp_lu_obj_ops = { - .loo_object_init = vvp_object_init, - .loo_object_free = vvp_object_free, - .loo_object_print = vvp_object_print, -}; - -struct vvp_object *cl_inode2vvp(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct cl_object *obj = lli->lli_clob; - struct lu_object *lu; - - lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type); - LASSERT(lu); - return lu2vvp(lu); -} - -struct lu_object *vvp_object_alloc(const struct lu_env *env, - const struct lu_object_header *unused, - struct lu_device *dev) -{ - struct vvp_object *vob; - struct lu_object *obj; - - vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS); - if (vob) { - struct cl_object_header *hdr; - - obj = &vob->vob_cl.co_lu; - hdr = &vob->vob_header; - cl_object_header_init(hdr); - hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page)); - - lu_object_init(obj, &hdr->coh_lu, dev); - lu_object_add_top(&hdr->coh_lu, obj); - - vob->vob_cl.co_ops = &vvp_ops; - obj->lo_ops = &vvp_lu_obj_ops; - } else { - obj = NULL; - } - return obj; -} diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c deleted file mode 100644 index 6eb0565ddc22..000000000000 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ /dev/null @@ -1,523 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_page for VVP layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include -#include -#include - -#include "llite_internal.h" -#include "vvp_internal.h" - -/***************************************************************************** - * - * Page operations. - * - */ - -static void vvp_page_fini_common(struct vvp_page *vpg) -{ - struct page *vmpage = vpg->vpg_page; - - LASSERT(vmpage); - put_page(vmpage); -} - -static void vvp_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) -{ - struct vvp_page *vpg = cl2vvp_page(slice); - struct page *vmpage = vpg->vpg_page; - - /* - * vmpage->private was already cleared when page was moved into - * VPG_FREEING state. - */ - LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); - vvp_page_fini_common(vpg); -} - -static int vvp_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io, - int nonblock) -{ - struct vvp_page *vpg = cl2vvp_page(slice); - struct page *vmpage = vpg->vpg_page; - - LASSERT(vmpage); - if (nonblock) { - if (!trylock_page(vmpage)) - return -EAGAIN; - - if (unlikely(PageWriteback(vmpage))) { - unlock_page(vmpage); - return -EAGAIN; - } - - return 0; - } - - lock_page(vmpage); - wait_on_page_writeback(vmpage); - - return 0; -} - -static void vvp_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - struct page *vmpage = cl2vm_page(slice); - - LASSERT(vmpage); - LASSERT(PageLocked(vmpage)); - wait_on_page_writeback(vmpage); -} - -static void vvp_page_unassume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - struct page *vmpage = cl2vm_page(slice); - - LASSERT(vmpage); - LASSERT(PageLocked(vmpage)); -} - -static void vvp_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io) -{ - struct page *vmpage = cl2vm_page(slice); - - LASSERT(vmpage); - LASSERT(PageLocked(vmpage)); - - unlock_page(cl2vm_page(slice)); -} - -static void vvp_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - struct page *vmpage = cl2vm_page(slice); - struct vvp_page *vpg = cl2vvp_page(slice); - - LASSERT(vmpage); - LASSERT(PageLocked(vmpage)); - - if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used) - ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED); - - ll_invalidate_page(vmpage); -} - -static void vvp_page_delete(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - struct page *vmpage = cl2vm_page(slice); - struct inode *inode = vmpage->mapping->host; - struct cl_object *obj = slice->cpl_obj; - struct cl_page *page = slice->cpl_page; - int refc; - - LASSERT(PageLocked(vmpage)); - LASSERT((struct cl_page *)vmpage->private == page); - LASSERT(inode == vvp_object_inode(obj)); - - /* Drop the reference count held in vvp_page_init */ - refc = atomic_dec_return(&page->cp_ref); - LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc); - - ClearPagePrivate(vmpage); - vmpage->private = 0; - /* - * Reference from vmpage to cl_page is removed, but the reference back - * is still here. It is removed later in vvp_page_fini(). - */ -} - -static void vvp_page_export(const struct lu_env *env, - const struct cl_page_slice *slice, - int uptodate) -{ - struct page *vmpage = cl2vm_page(slice); - - LASSERT(vmpage); - LASSERT(PageLocked(vmpage)); - if (uptodate) - SetPageUptodate(vmpage); - else - ClearPageUptodate(vmpage); -} - -static int vvp_page_is_vmlocked(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA; -} - -static int vvp_page_prep_read(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - /* Skip the page already marked as PG_uptodate. */ - return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0; -} - -static int vvp_page_prep_write(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - struct page *vmpage = cl2vm_page(slice); - struct cl_page *pg = slice->cpl_page; - - LASSERT(PageLocked(vmpage)); - LASSERT(!PageDirty(vmpage)); - - /* ll_writepage path is not a sync write, so need to set page writeback - * flag - */ - if (!pg->cp_sync_io) - set_page_writeback(vmpage); - - return 0; -} - -/** - * Handles page transfer errors at VM level. - * - * This takes inode as a separate argument, because inode on which error is to - * be set can be different from \a vmpage inode in case of direct-io. - */ -static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, - int ioret) -{ - struct vvp_object *obj = cl_inode2vvp(inode); - - if (ioret == 0) { - ClearPageError(vmpage); - obj->vob_discard_page_warned = 0; - } else { - SetPageError(vmpage); - mapping_set_error(inode->i_mapping, ioret); - - if ((ioret == -ESHUTDOWN || ioret == -EINTR) && - obj->vob_discard_page_warned == 0) { - obj->vob_discard_page_warned = 1; - ll_dirty_page_discard_warn(vmpage, ioret); - } - } -} - -static void vvp_page_completion_read(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) -{ - struct vvp_page *vpg = cl2vvp_page(slice); - struct page *vmpage = vpg->vpg_page; - struct cl_page *page = slice->cpl_page; - struct inode *inode = vvp_object_inode(page->cp_obj); - - LASSERT(PageLocked(vmpage)); - CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); - - if (vpg->vpg_defer_uptodate) - ll_ra_count_put(ll_i2sbi(inode), 1); - - if (ioret == 0) { - if (!vpg->vpg_defer_uptodate) - cl_page_export(env, page, 1); - } else { - vpg->vpg_defer_uptodate = 0; - } - - if (!page->cp_sync_io) - unlock_page(vmpage); -} - -static void vvp_page_completion_write(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) -{ - struct vvp_page *vpg = cl2vvp_page(slice); - struct cl_page *pg = slice->cpl_page; - struct page *vmpage = vpg->vpg_page; - - CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); - - if (pg->cp_sync_io) { - LASSERT(PageLocked(vmpage)); - LASSERT(!PageWriteback(vmpage)); - } else { - LASSERT(PageWriteback(vmpage)); - /* - * Only mark the page error only when it's an async write - * because applications won't wait for IO to finish. - */ - vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret); - - end_page_writeback(vmpage); - } -} - -/** - * Implements cl_page_operations::cpo_make_ready() method. - * - * This is called to yank a page from the transfer cache and to send it out as - * a part of transfer. This function try-locks the page. If try-lock failed, - * page is owned by some concurrent IO, and should be skipped (this is bad, - * but hopefully rare situation, as it usually results in transfer being - * shorter than possible). - * - * \retval 0 success, page can be placed into transfer - * - * \retval -EAGAIN page is either used by concurrent IO has been - * truncated. Skip it. - */ -static int vvp_page_make_ready(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - struct page *vmpage = cl2vm_page(slice); - struct cl_page *pg = slice->cpl_page; - int result = 0; - - lock_page(vmpage); - if (clear_page_dirty_for_io(vmpage)) { - LASSERT(pg->cp_state == CPS_CACHED); - /* This actually clears the dirty bit in the radix tree. */ - set_page_writeback(vmpage); - CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); - } else if (pg->cp_state == CPS_PAGEOUT) { - /* is it possible for osc_flush_async_page() to already - * make it ready? - */ - result = -EALREADY; - } else { - CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", - pg->cp_state); - LBUG(); - } - unlock_page(vmpage); - return result; -} - -static int vvp_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) -{ - struct vvp_page *vpg = cl2vvp_page(slice); - struct page *vmpage = vpg->vpg_page; - - (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ", - vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage); - if (vmpage) { - (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", - (long)vmpage->flags, page_count(vmpage), - page_mapcount(vmpage), vmpage->private, - vmpage->index, - list_empty(&vmpage->lru) ? "not-" : ""); - } - - (*printer)(env, cookie, "\n"); - - return 0; -} - -static int vvp_page_fail(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - /* - * Cached read? - */ - LBUG(); - - return 0; -} - -static const struct cl_page_operations vvp_page_ops = { - .cpo_own = vvp_page_own, - .cpo_assume = vvp_page_assume, - .cpo_unassume = vvp_page_unassume, - .cpo_disown = vvp_page_disown, - .cpo_discard = vvp_page_discard, - .cpo_delete = vvp_page_delete, - .cpo_export = vvp_page_export, - .cpo_is_vmlocked = vvp_page_is_vmlocked, - .cpo_fini = vvp_page_fini, - .cpo_print = vvp_page_print, - .io = { - [CRT_READ] = { - .cpo_prep = vvp_page_prep_read, - .cpo_completion = vvp_page_completion_read, - .cpo_make_ready = vvp_page_fail, - }, - [CRT_WRITE] = { - .cpo_prep = vvp_page_prep_write, - .cpo_completion = vvp_page_completion_write, - .cpo_make_ready = vvp_page_make_ready, - }, - }, -}; - -static int vvp_transient_page_prep(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - /* transient page should always be sent. */ - return 0; -} - -static int vvp_transient_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused, int nonblock) -{ - return 0; -} - -static void vvp_transient_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ -} - -static void vvp_transient_page_unassume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ -} - -static void vvp_transient_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ -} - -static void vvp_transient_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - struct cl_page *page = slice->cpl_page; - - /* - * For transient pages, remove it from the radix tree. - */ - cl_page_delete(env, page); -} - -static int vvp_transient_page_is_vmlocked(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - struct inode *inode = vvp_object_inode(slice->cpl_obj); - int locked; - - locked = !inode_trylock(inode); - if (!locked) - inode_unlock(inode); - return locked ? -EBUSY : -ENODATA; -} - -static void -vvp_transient_page_completion(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) -{ -} - -static void vvp_transient_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) -{ - struct vvp_page *vpg = cl2vvp_page(slice); - struct cl_page *clp = slice->cpl_page; - struct vvp_object *clobj = cl2vvp(clp->cp_obj); - - vvp_page_fini_common(vpg); - atomic_dec(&clobj->vob_transient_pages); -} - -static const struct cl_page_operations vvp_transient_page_ops = { - .cpo_own = vvp_transient_page_own, - .cpo_assume = vvp_transient_page_assume, - .cpo_unassume = vvp_transient_page_unassume, - .cpo_disown = vvp_transient_page_disown, - .cpo_discard = vvp_transient_page_discard, - .cpo_fini = vvp_transient_page_fini, - .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, - .cpo_print = vvp_page_print, - .io = { - [CRT_READ] = { - .cpo_prep = vvp_transient_page_prep, - .cpo_completion = vvp_transient_page_completion, - }, - [CRT_WRITE] = { - .cpo_prep = vvp_transient_page_prep, - .cpo_completion = vvp_transient_page_completion, - } - } -}; - -int vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - struct vvp_page *vpg = cl_object_page_slice(obj, page); - struct page *vmpage = page->cp_vmpage; - - CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - - vpg->vpg_page = vmpage; - get_page(vmpage); - - if (page->cp_type == CPT_CACHEABLE) { - /* in cache, decref in vvp_page_delete */ - atomic_inc(&page->cp_ref); - SetPagePrivate(vmpage); - vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &vpg->vpg_cl, obj, index, - &vvp_page_ops); - } else { - struct vvp_object *clobj = cl2vvp(obj); - - cl_page_slice_add(page, &vpg->vpg_cl, obj, index, - &vvp_transient_page_ops); - atomic_inc(&clobj->vob_transient_pages); - } - return 0; -} diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c deleted file mode 100644 index 7fa0a419c094..000000000000 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ /dev/null @@ -1,665 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include - -#include "llite_internal.h" - -const struct xattr_handler *get_xattr_type(const char *name) -{ - int i; - - for (i = 0; ll_xattr_handlers[i]; i++) { - const char *prefix = xattr_prefix(ll_xattr_handlers[i]); - size_t prefix_len = strlen(prefix); - - if (!strncmp(prefix, name, prefix_len)) - return ll_xattr_handlers[i]; - } - - return NULL; -} - -static int xattr_type_filter(struct ll_sb_info *sbi, - const struct xattr_handler *handler) -{ - /* No handler means XATTR_OTHER_T */ - if (!handler) - return -EOPNOTSUPP; - - if ((handler->flags == XATTR_ACL_ACCESS_T || - handler->flags == XATTR_ACL_DEFAULT_T) && - !(sbi->ll_flags & LL_SBI_ACL)) - return -EOPNOTSUPP; - - if (handler->flags == XATTR_USER_T && - !(sbi->ll_flags & LL_SBI_USER_XATTR)) - return -EOPNOTSUPP; - - if (handler->flags == XATTR_TRUSTED_T && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - - return 0; -} - -static int ll_xattr_set_common(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, const void *value, size_t size, - int flags) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req = NULL; - const char *pv = value; - char *fullname; - u64 valid; - int rc; - - /* When setxattr() is called with a size of 0 the value is - * unconditionally replaced by "". When removexattr() is - * called we get a NULL value and XATTR_REPLACE for flags. - */ - if (!value && flags == XATTR_REPLACE) { - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); - valid = OBD_MD_FLXATTRRM; - } else { - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1); - valid = OBD_MD_FLXATTR; - } - - rc = xattr_type_filter(sbi, handler); - if (rc) - return rc; - - if ((handler->flags == XATTR_ACL_ACCESS_T || - handler->flags == XATTR_ACL_DEFAULT_T) && - !inode_owner_or_capable(inode)) - return -EPERM; - - /* b10667: ignore lustre special xattr for now */ - if (!strcmp(name, "hsm") || - ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) || - (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))) - return 0; - - /* LU-549: Disable security.selinux when selinux is disabled */ - if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() && - strcmp(name, "selinux") == 0) - return -EOPNOTSUPP; - - /*FIXME: enable IMA when the conditions are ready */ - if (handler->flags == XATTR_SECURITY_T && - (!strcmp(name, "ima") || !strcmp(name, "evm"))) - return -EOPNOTSUPP; - - /* - * In user.* namespace, only regular files and directories can have - * extended attributes. - */ - if (handler->flags == XATTR_USER_T) { - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) - return -EPERM; - } - - fullname = kasprintf(GFP_KERNEL, "%s%s", handler->prefix, name); - if (!fullname) - return -ENOMEM; - - rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), valid, fullname, - pv, size, flags, ll_i2suppgid(inode), &req); - kfree(fullname); - if (rc) { - if (rc == -EOPNOTSUPP && handler->flags == XATTR_USER_T) { - LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n"); - sbi->ll_flags &= ~LL_SBI_USER_XATTR; - } - return rc; - } - - ptlrpc_req_finished(req); - return 0; -} - -static int get_hsm_state(struct inode *inode, u32 *hus_states) -{ - struct md_op_data *op_data; - struct hsm_user_state *hus; - int rc; - - hus = kzalloc(sizeof(*hus), GFP_NOFS); - if (!hus) - return -ENOMEM; - - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, hus); - if (!IS_ERR(op_data)) { - rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode), - sizeof(*op_data), op_data, NULL); - if (!rc) - *hus_states = hus->hus_states; - else - CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n", - rc); - - ll_finish_md_op_data(op_data); - } else { - rc = PTR_ERR(op_data); - CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n", - rc); - } - kfree(hus); - return rc; -} - -static int ll_adjust_lum(struct inode *inode, struct lov_user_md *lump) -{ - int rc = 0; - - if (!lump) - return 0; - - /* Attributes that are saved via getxattr will always have - * the stripe_offset as 0. Instead, the MDS should be - * allowed to pick the starting OST index. b=17846 - */ - if (lump->lmm_stripe_offset == 0) - lump->lmm_stripe_offset = -1; - - /* Avoid anyone directly setting the RELEASED flag. */ - if (lump->lmm_pattern & LOV_PATTERN_F_RELEASED) { - /* Only if we have a released flag check if the file - * was indeed archived. - */ - u32 state = HS_NONE; - - rc = get_hsm_state(inode, &state); - if (rc) - return rc; - - if (!(state & HS_ARCHIVED)) { - CDEBUG(D_VFSTRACE, - "hus_states state = %x, pattern = %x\n", - state, lump->lmm_pattern); - /* - * Here the state is: real file is not - * archived but user is requesting to set - * the RELEASED flag so we mask off the - * released flag from the request - */ - lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED; - } - } - - return rc; -} - -static int ll_setstripe_ea(struct dentry *dentry, struct lov_user_md *lump, - size_t size) -{ - struct inode *inode = d_inode(dentry); - int rc = 0; - - /* - * It is possible to set an xattr to a "" value of zero size. - * For this case we are going to treat it as a removal. - */ - if (!size && lump) - lump = NULL; - - rc = ll_adjust_lum(inode, lump); - if (rc) - return rc; - - if (lump && S_ISREG(inode->i_mode)) { - u64 it_flags = FMODE_WRITE; - ssize_t lum_size; - - lum_size = ll_lov_user_md_size(lump); - if (lum_size < 0 || size < lum_size) - return -ERANGE; - - rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags, lump, - lum_size); - /** - * b=10667: ignore -EEXIST. - * Silently eat error on setting trusted.lov/lustre.lov - * attribute for platforms that added the default option - * to copy all attributes in 'cp' command. Both rsync and - * tar --xattrs also will try to set LOVEA for existing - * files. - */ - if (rc == -EEXIST) - rc = 0; - } else if (S_ISDIR(inode->i_mode)) { - if (size != 0 && size < sizeof(struct lov_user_md)) - return -EINVAL; - - rc = ll_dir_setstripe(inode, lump, 0); - } - - return rc; -} - -static int ll_xattr_set(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, const void *value, size_t size, - int flags) -{ - LASSERT(inode); - LASSERT(name); - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n", - PFID(ll_inode2fid(inode)), inode, name); - - /* lustre/trusted.lov.xxx would be passed through xattr API */ - if (!strcmp(name, "lov")) { - int op_type = flags == XATTR_REPLACE ? LPROC_LL_REMOVEXATTR : - LPROC_LL_SETXATTR; - - ll_stats_ops_tally(ll_i2sbi(inode), op_type, 1); - - return ll_setstripe_ea(dentry, (struct lov_user_md *)value, - size); - } else if (!strcmp(name, "lma") || !strcmp(name, "link")) { - int op_type = flags == XATTR_REPLACE ? LPROC_LL_REMOVEXATTR : - LPROC_LL_SETXATTR; - - ll_stats_ops_tally(ll_i2sbi(inode), op_type, 1); - return 0; - } - - return ll_xattr_set_common(handler, dentry, inode, name, value, size, - flags); -} - -int ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer, - size_t size, u64 valid) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req = NULL; - struct mdt_body *body; - void *xdata; - int rc; - - if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T && - (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) { - rc = ll_xattr_cache_get(inode, name, buffer, size, valid); - if (rc == -EAGAIN) - goto getxattr_nocache; - if (rc < 0) - goto out_xattr; - - /* Add "system.posix_acl_access" to the list */ - if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) { - if (size == 0) { - rc += sizeof(XATTR_NAME_ACL_ACCESS); - } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) { - memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS, - sizeof(XATTR_NAME_ACL_ACCESS)); - rc += sizeof(XATTR_NAME_ACL_ACCESS); - } else { - rc = -ERANGE; - goto out_xattr; - } - } - } else { -getxattr_nocache: - rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), valid, - name, size, &req); - if (rc < 0) - goto out_xattr; - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body); - - /* only detect the xattr size */ - if (size == 0) { - rc = body->mbo_eadatasize; - goto out; - } - - if (size < body->mbo_eadatasize) { - CERROR("server bug: replied size %u > %u\n", - body->mbo_eadatasize, (int)size); - rc = -ERANGE; - goto out; - } - - if (body->mbo_eadatasize == 0) { - rc = -ENODATA; - goto out; - } - - /* do not need swab xattr data */ - xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->mbo_eadatasize); - if (!xdata) { - rc = -EFAULT; - goto out; - } - - memcpy(buffer, xdata, body->mbo_eadatasize); - rc = body->mbo_eadatasize; - } - -out_xattr: - if (rc == -EOPNOTSUPP && type == XATTR_USER_T) { - LCONSOLE_INFO( - "%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), rc); - sbi->ll_flags &= ~LL_SBI_USER_XATTR; - } -out: - ptlrpc_req_finished(req); - return rc; -} - -static int ll_xattr_get_common(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, void *buffer, size_t size) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - char *fullname; - int rc; - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n", - PFID(ll_inode2fid(inode)), inode); - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); - - rc = xattr_type_filter(sbi, handler); - if (rc) - return rc; - - /* LU-549: Disable security.selinux when selinux is disabled */ - if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() && - !strcmp(name, "selinux")) - return -EOPNOTSUPP; - -#ifdef CONFIG_FS_POSIX_ACL - /* posix acl is under protection of LOOKUP lock. when calling to this, - * we just have path resolution to the target inode, so we have great - * chance that cached ACL is uptodate. - */ - if (handler->flags == XATTR_ACL_ACCESS_T) { - struct ll_inode_info *lli = ll_i2info(inode); - struct posix_acl *acl; - - spin_lock(&lli->lli_lock); - acl = posix_acl_dup(lli->lli_posix_acl); - spin_unlock(&lli->lli_lock); - - if (!acl) - return -ENODATA; - - rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); - posix_acl_release(acl); - return rc; - } - if (handler->flags == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode)) - return -ENODATA; -#endif - fullname = kasprintf(GFP_KERNEL, "%s%s", handler->prefix, name); - if (!fullname) - return -ENOMEM; - - rc = ll_xattr_list(inode, fullname, handler->flags, buffer, size, - OBD_MD_FLXATTR); - kfree(fullname); - return rc; -} - -static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size) -{ - ssize_t rc; - - if (S_ISREG(inode->i_mode)) { - struct cl_object *obj = ll_i2info(inode)->lli_clob; - struct cl_layout cl = { - .cl_buf.lb_buf = buf, - .cl_buf.lb_len = buf_size, - }; - struct lu_env *env; - u16 refcheck; - - if (!obj) - return -ENODATA; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - rc = cl_object_layout_get(env, obj, &cl); - if (rc < 0) - goto out_env; - - if (!cl.cl_size) { - rc = -ENODATA; - goto out_env; - } - - rc = cl.cl_size; - - if (!buf_size) - goto out_env; - - LASSERT(buf && rc <= buf_size); - - /* - * Do not return layout gen for getxattr() since - * otherwise it would confuse tar --xattr by - * recognizing layout gen as stripe offset when the - * file is restored. See LU-2809. - */ - ((struct lov_mds_md *)buf)->lmm_layout_gen = 0; -out_env: - cl_env_put(env, &refcheck); - - return rc; - } else if (S_ISDIR(inode->i_mode)) { - struct ptlrpc_request *req = NULL; - struct lov_mds_md *lmm = NULL; - int lmm_size = 0; - - rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size, - &req, 0); - if (rc < 0) - goto out_req; - - if (!buf_size) { - rc = lmm_size; - goto out_req; - } - - if (buf_size < lmm_size) { - rc = -ERANGE; - goto out_req; - } - - memcpy(buf, lmm, lmm_size); - rc = lmm_size; -out_req: - if (req) - ptlrpc_req_finished(req); - - return rc; - } else { - return -ENODATA; - } -} - -static int ll_xattr_get(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, void *buffer, size_t size) -{ - LASSERT(inode); - LASSERT(name); - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n", - PFID(ll_inode2fid(inode)), inode, name); - - if (!strcmp(name, "lov")) { - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1); - - return ll_getxattr_lov(inode, buffer, size); - } - - return ll_xattr_get_common(handler, dentry, inode, name, buffer, size); -} - -ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) -{ - struct inode *inode = d_inode(dentry); - struct ll_sb_info *sbi = ll_i2sbi(inode); - char *xattr_name; - ssize_t rc, rc2; - size_t len, rem; - - LASSERT(inode); - - CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n", - PFID(ll_inode2fid(inode)), inode); - - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1); - - rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size, - OBD_MD_FLXATTRLS); - if (rc < 0) - return rc; - - /* - * If we're being called to get the size of the xattr list - * (size == 0) then just assume that a lustre.lov xattr - * exists. - */ - if (!size) - return rc + sizeof(XATTR_LUSTRE_LOV); - - xattr_name = buffer; - rem = rc; - - while (rem > 0) { - len = strnlen(xattr_name, rem - 1) + 1; - rem -= len; - if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) { - /* Skip OK xattr type, leave it in buffer. */ - xattr_name += len; - continue; - } - - /* - * Move up remaining xattrs in buffer - * removing the xattr that is not OK. - */ - memmove(xattr_name, xattr_name + len, rem); - rc -= len; - } - - rc2 = ll_getxattr_lov(inode, NULL, 0); - if (rc2 == -ENODATA) - return rc; - - if (rc2 < 0) - return rc2; - - if (size < rc + sizeof(XATTR_LUSTRE_LOV)) - return -ERANGE; - - memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV)); - - return rc + sizeof(XATTR_LUSTRE_LOV); -} - -static const struct xattr_handler ll_user_xattr_handler = { - .prefix = XATTR_USER_PREFIX, - .flags = XATTR_USER_T, - .get = ll_xattr_get_common, - .set = ll_xattr_set_common, -}; - -static const struct xattr_handler ll_trusted_xattr_handler = { - .prefix = XATTR_TRUSTED_PREFIX, - .flags = XATTR_TRUSTED_T, - .get = ll_xattr_get, - .set = ll_xattr_set, -}; - -static const struct xattr_handler ll_security_xattr_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .flags = XATTR_SECURITY_T, - .get = ll_xattr_get_common, - .set = ll_xattr_set_common, -}; - -static const struct xattr_handler ll_acl_access_xattr_handler = { - .name = XATTR_NAME_POSIX_ACL_ACCESS, - .flags = XATTR_ACL_ACCESS_T, - .get = ll_xattr_get_common, - .set = ll_xattr_set_common, -}; - -static const struct xattr_handler ll_acl_default_xattr_handler = { - .name = XATTR_NAME_POSIX_ACL_DEFAULT, - .flags = XATTR_ACL_DEFAULT_T, - .get = ll_xattr_get_common, - .set = ll_xattr_set_common, -}; - -static const struct xattr_handler ll_lustre_xattr_handler = { - .prefix = XATTR_LUSTRE_PREFIX, - .flags = XATTR_LUSTRE_T, - .get = ll_xattr_get, - .set = ll_xattr_set, -}; - -const struct xattr_handler *ll_xattr_handlers[] = { - &ll_user_xattr_handler, - &ll_trusted_xattr_handler, - &ll_security_xattr_handler, -#ifdef CONFIG_FS_POSIX_ACL - &ll_acl_access_xattr_handler, - &ll_acl_default_xattr_handler, -#endif - &ll_lustre_xattr_handler, - NULL, -}; diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c deleted file mode 100644 index 5da69ba088c4..000000000000 --- a/drivers/staging/lustre/lustre/llite/xattr_cache.c +++ /dev/null @@ -1,504 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2012 Xyratex Technology Limited - * - * Copyright (c) 2013, 2015, Intel Corporation. - * - * Author: Andrew Perepechko - * - */ - -#define DEBUG_SUBSYSTEM S_LLITE - -#include -#include -#include -#include -#include -#include "llite_internal.h" - -/* If we ever have hundreds of extended attributes, we might want to consider - * using a hash or a tree structure instead of list for faster lookups. - */ -struct ll_xattr_entry { - struct list_head xe_list; /* protected with - * lli_xattrs_list_rwsem - */ - char *xe_name; /* xattr name, \0-terminated */ - char *xe_value; /* xattr value */ - unsigned int xe_namelen; /* strlen(xe_name) + 1 */ - unsigned int xe_vallen; /* xattr value length */ -}; - -static struct kmem_cache *xattr_kmem; -static struct lu_kmem_descr xattr_caches[] = { - { - .ckd_cache = &xattr_kmem, - .ckd_name = "xattr_kmem", - .ckd_size = sizeof(struct ll_xattr_entry) - }, - { - .ckd_cache = NULL - } -}; - -int ll_xattr_init(void) -{ - return lu_kmem_init(xattr_caches); -} - -void ll_xattr_fini(void) -{ - lu_kmem_fini(xattr_caches); -} - -/** - * Initializes xattr cache for an inode. - * - * This initializes the xattr list and marks cache presence. - */ -static void ll_xattr_cache_init(struct ll_inode_info *lli) -{ - INIT_LIST_HEAD(&lli->lli_xattrs); - set_bit(LLIF_XATTR_CACHE, &lli->lli_flags); -} - -/** - * This looks for a specific extended attribute. - * - * Find in @cache and return @xattr_name attribute in @xattr, - * for the NULL @xattr_name return the first cached @xattr. - * - * \retval 0 success - * \retval -ENODATA if not found - */ -static int ll_xattr_cache_find(struct list_head *cache, - const char *xattr_name, - struct ll_xattr_entry **xattr) -{ - struct ll_xattr_entry *entry; - - list_for_each_entry(entry, cache, xe_list) { - /* xattr_name == NULL means look for any entry */ - if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) { - *xattr = entry; - CDEBUG(D_CACHE, "find: [%s]=%.*s\n", - entry->xe_name, entry->xe_vallen, - entry->xe_value); - return 0; - } - } - - return -ENODATA; -} - -/** - * This adds an xattr. - * - * Add @xattr_name attr with @xattr_val value and @xattr_val_len length, - * - * \retval 0 success - * \retval -ENOMEM if no memory could be allocated for the cached attr - * \retval -EPROTO if duplicate xattr is being added - */ -static int ll_xattr_cache_add(struct list_head *cache, - const char *xattr_name, - const char *xattr_val, - unsigned int xattr_val_len) -{ - struct ll_xattr_entry *xattr; - - if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { - CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name); - return -EPROTO; - } - - xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS); - if (!xattr) { - CDEBUG(D_CACHE, "failed to allocate xattr\n"); - return -ENOMEM; - } - - xattr->xe_name = kstrdup(xattr_name, GFP_NOFS); - if (!xattr->xe_name) { - CDEBUG(D_CACHE, "failed to alloc xattr name %s\n", - xattr_name); - goto err_name; - } - xattr->xe_namelen = strlen(xattr_name) + 1; - - xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS); - if (!xattr->xe_value) - goto err_value; - - xattr->xe_vallen = xattr_val_len; - list_add(&xattr->xe_list, cache); - - CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len, - xattr_val); - - return 0; -err_value: - kfree(xattr->xe_name); -err_name: - kmem_cache_free(xattr_kmem, xattr); - - return -ENOMEM; -} - -/** - * This removes an extended attribute from cache. - * - * Remove @xattr_name attribute from @cache. - * - * \retval 0 success - * \retval -ENODATA if @xattr_name is not cached - */ -static int ll_xattr_cache_del(struct list_head *cache, - const char *xattr_name) -{ - struct ll_xattr_entry *xattr; - - CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name); - - if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { - list_del(&xattr->xe_list); - kfree(xattr->xe_name); - kfree(xattr->xe_value); - kmem_cache_free(xattr_kmem, xattr); - - return 0; - } - - return -ENODATA; -} - -/** - * This iterates cached extended attributes. - * - * Walk over cached attributes in @cache and - * fill in @xld_buffer or only calculate buffer - * size if @xld_buffer is NULL. - * - * \retval >= 0 buffer list size - * \retval -ENODATA if the list cannot fit @xld_size buffer - */ -static int ll_xattr_cache_list(struct list_head *cache, - char *xld_buffer, - int xld_size) -{ - struct ll_xattr_entry *xattr, *tmp; - int xld_tail = 0; - - list_for_each_entry_safe(xattr, tmp, cache, xe_list) { - CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n", - xld_buffer, xld_tail, xattr->xe_name); - - if (xld_buffer) { - xld_size -= xattr->xe_namelen; - if (xld_size < 0) - break; - memcpy(&xld_buffer[xld_tail], - xattr->xe_name, xattr->xe_namelen); - } - xld_tail += xattr->xe_namelen; - } - - if (xld_size < 0) - return -ERANGE; - - return xld_tail; -} - -/** - * Check if the xattr cache is initialized (filled). - * - * \retval 0 @cache is not initialized - * \retval 1 @cache is initialized - */ -static int ll_xattr_cache_valid(struct ll_inode_info *lli) -{ - return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags); -} - -/** - * This finalizes the xattr cache. - * - * Free all xattr memory. @lli is the inode info pointer. - * - * \retval 0 no error occurred - */ -static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli) -{ - if (!ll_xattr_cache_valid(lli)) - return 0; - - while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0) - ; /* empty loop */ - - clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags); - - return 0; -} - -int ll_xattr_cache_destroy(struct inode *inode) -{ - struct ll_inode_info *lli = ll_i2info(inode); - int rc; - - down_write(&lli->lli_xattrs_list_rwsem); - rc = ll_xattr_cache_destroy_locked(lli); - up_write(&lli->lli_xattrs_list_rwsem); - - return rc; -} - -/** - * Match or enqueue a PR lock. - * - * Find or request an LDLM lock with xattr data. - * Since LDLM does not provide API for atomic match_or_enqueue, - * the function handles it with a separate enq lock. - * If successful, the function exits with the list lock held. - * - * \retval 0 no error occurred - * \retval -ENOMEM not enough memory - */ -static int ll_xattr_find_get_lock(struct inode *inode, - struct lookup_intent *oit, - struct ptlrpc_request **req) -{ - enum ldlm_mode mode; - struct lustre_handle lockh = { 0 }; - struct md_op_data *op_data; - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct obd_export *exp = sbi->ll_md_exp; - int rc; - - mutex_lock(&lli->lli_xattrs_enq_lock); - /* inode may have been shrunk and recreated, so data is gone, match lock - * only when data exists. - */ - if (ll_xattr_cache_valid(lli)) { - /* Try matching first. */ - mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, - LCK_PR); - if (mode != 0) { - /* fake oit in mdc_revalidate_lock() manner */ - oit->it_lock_handle = lockh.cookie; - oit->it_lock_mode = mode; - goto out; - } - } - - /* Enqueue if the lock isn't cached locally. */ - op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) { - mutex_unlock(&lli->lli_xattrs_enq_lock); - return PTR_ERR(op_data); - } - - op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS; - - rc = md_intent_lock(exp, op_data, oit, req, &ll_md_blocking_ast, 0); - ll_finish_md_op_data(op_data); - *req = oit->it_request; - - if (rc < 0) { - CDEBUG(D_CACHE, - "md_intent_lock failed with %d for fid " DFID "\n", - rc, PFID(ll_inode2fid(inode))); - mutex_unlock(&lli->lli_xattrs_enq_lock); - return rc; - } - -out: - down_write(&lli->lli_xattrs_list_rwsem); - mutex_unlock(&lli->lli_xattrs_enq_lock); - - return 0; -} - -/** - * Refill the xattr cache. - * - * Fetch and cache the whole of xattrs for @inode, acquiring a read lock. - * - * \retval 0 no error occurred - * \retval -EPROTO network protocol error - * \retval -ENOMEM not enough memory for the cache - */ -static int ll_xattr_cache_refill(struct inode *inode) -{ - struct lookup_intent oit = { .it_op = IT_GETXATTR }; - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req = NULL; - const char *xdata, *xval, *xtail, *xvtail; - struct ll_inode_info *lli = ll_i2info(inode); - struct mdt_body *body; - __u32 *xsizes; - int rc, i; - - rc = ll_xattr_find_get_lock(inode, &oit, &req); - if (rc) - goto err_req; - - /* Do we have the data at this point? */ - if (ll_xattr_cache_valid(lli)) { - ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1); - ll_intent_drop_lock(&oit); - rc = 0; - goto err_req; - } - - /* Matched but no cache? Cancelled on error by a parallel refill. */ - if (unlikely(!req)) { - CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n"); - ll_intent_drop_lock(&oit); - rc = -EAGAIN; - goto err_unlock; - } - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!body) { - CERROR("no MDT BODY in the refill xattr reply\n"); - rc = -EPROTO; - goto err_cancel; - } - /* do not need swab xattr data */ - xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->mbo_eadatasize); - xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS, - body->mbo_aclsize); - xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS, - body->mbo_max_mdsize * sizeof(__u32)); - if (!xdata || !xval || !xsizes) { - CERROR("wrong setxattr reply\n"); - rc = -EPROTO; - goto err_cancel; - } - - xtail = xdata + body->mbo_eadatasize; - xvtail = xval + body->mbo_aclsize; - - CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail); - - ll_xattr_cache_init(lli); - - for (i = 0; i < body->mbo_max_mdsize; i++) { - CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval); - /* Perform consistency checks: attr names and vals in pill */ - if (!memchr(xdata, 0, xtail - xdata)) { - CERROR("xattr protocol violation (names are broken)\n"); - rc = -EPROTO; - } else if (xval + *xsizes > xvtail) { - CERROR("xattr protocol violation (vals are broken)\n"); - rc = -EPROTO; - } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) { - rc = -ENOMEM; - } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) { - /* Filter out ACL ACCESS since it's cached separately */ - CDEBUG(D_CACHE, "not caching %s\n", - XATTR_NAME_ACL_ACCESS); - rc = 0; - } else if (!strcmp(xdata, "security.selinux")) { - /* Filter out security.selinux, it is cached in slab */ - CDEBUG(D_CACHE, "not caching security.selinux\n"); - rc = 0; - } else { - rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval, - *xsizes); - } - if (rc < 0) { - ll_xattr_cache_destroy_locked(lli); - goto err_cancel; - } - xdata += strlen(xdata) + 1; - xval += *xsizes; - xsizes++; - } - - if (xdata != xtail || xval != xvtail) - CERROR("a hole in xattr data\n"); - - ll_set_lock_data(sbi->ll_md_exp, inode, &oit, NULL); - ll_intent_drop_lock(&oit); - - ptlrpc_req_finished(req); - return rc; - -err_cancel: - ldlm_lock_decref_and_cancel((struct lustre_handle *) - &oit.it_lock_handle, - oit.it_lock_mode); -err_unlock: - up_write(&lli->lli_xattrs_list_rwsem); -err_req: - if (rc == -ERANGE) - rc = -EAGAIN; - - ptlrpc_req_finished(req); - return rc; -} - -/** - * Get an xattr value or list xattrs using the write-through cache. - * - * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or - * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode. - * The resulting value/list is stored in @buffer if the former - * is not larger than @size. - * - * \retval 0 no error occurred - * \retval -EPROTO network protocol error - * \retval -ENOMEM not enough memory for the cache - * \retval -ERANGE the buffer is not large enough - * \retval -ENODATA no such attr or the list is empty - */ -int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer, - size_t size, __u64 valid) -{ - struct ll_inode_info *lli = ll_i2info(inode); - int rc = 0; - - LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS)); - - down_read(&lli->lli_xattrs_list_rwsem); - if (!ll_xattr_cache_valid(lli)) { - up_read(&lli->lli_xattrs_list_rwsem); - rc = ll_xattr_cache_refill(inode); - if (rc) - return rc; - downgrade_write(&lli->lli_xattrs_list_rwsem); - } else { - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1); - } - - if (valid & OBD_MD_FLXATTR) { - struct ll_xattr_entry *xattr; - - rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr); - if (rc == 0) { - rc = xattr->xe_vallen; - /* zero size means we are only requested size in rc */ - if (size != 0) { - if (size >= xattr->xe_vallen) - memcpy(buffer, xattr->xe_value, - xattr->xe_vallen); - else - rc = -ERANGE; - } - } - } else if (valid & OBD_MD_FLXATTRLS) { - rc = ll_xattr_cache_list(&lli->lli_xattrs, - size ? buffer : NULL, size); - } - - goto out; -out: - up_read(&lli->lli_xattrs_list_rwsem); - - return rc; -} diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c deleted file mode 100644 index 93ec07531ac7..000000000000 --- a/drivers/staging/lustre/lustre/llite/xattr_security.c +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * GPL HEADER END - */ - -/* - * Copyright (c) 2014 Bull SAS - * Author: Sebastien Buisson sebastien.buisson@bull.net - */ - -/* - * lustre/llite/xattr_security.c - * Handler for storing security labels as extended attributes. - */ - -#include -#include -#include -#include -#include "llite_internal.h" - -/** - * A helper function for ll_security_inode_init_security() - * that takes care of setting xattrs - * - * Get security context of @inode from @xattr_array, - * and put it in 'security.xxx' xattr of dentry - * stored in @fs_info. - * - * \retval 0 success - * \retval -ENOMEM if no memory could be allocated for xattr name - * \retval < 0 failure to set xattr - */ -static int -ll_initxattrs(struct inode *inode, const struct xattr *xattr_array, - void *fs_info) -{ - struct dentry *dentry = fs_info; - const struct xattr *xattr; - int err = 0; - - for (xattr = xattr_array; xattr->name; xattr++) { - char *full_name; - - full_name = kasprintf(GFP_KERNEL, "%s%s", - XATTR_SECURITY_PREFIX, xattr->name); - if (!full_name) { - err = -ENOMEM; - break; - } - - err = __vfs_setxattr(dentry, inode, full_name, xattr->value, - xattr->value_len, XATTR_CREATE); - kfree(full_name); - if (err < 0) - break; - } - return err; -} - -/** - * Initializes security context - * - * Get security context of @inode in @dir, - * and put it in 'security.xxx' xattr of @dentry. - * - * \retval 0 success, or SELinux is disabled - * \retval -ENOMEM if no memory could be allocated for xattr name - * \retval < 0 failure to get security context or set xattr - */ -int -ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir) -{ - if (!selinux_is_enabled()) - return 0; - - return security_inode_init_security(inode, dir, NULL, - &ll_initxattrs, dentry); -} diff --git a/drivers/staging/lustre/lustre/lmv/Makefile b/drivers/staging/lustre/lustre/lmv/Makefile deleted file mode 100644 index 91c99114aa13..000000000000 --- a/drivers/staging/lustre/lustre/lmv/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += lmv.o -lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o lproc_lmv.o diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c deleted file mode 100644 index 00dc858c10c9..000000000000 --- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2013, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LMV -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include "lmv_internal.h" - -int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds) -{ - struct obd_device *obd = lmv2obd_dev(lmv); - int rc; - - /* - * FIXME: Currently ZFS still use local seq for ROOT unfortunately, and - * this fid_is_local check should be removed once LU-2240 is fixed - */ - if (!fid_is_sane(fid) || !(fid_seq_in_fldb(fid_seq(fid)) || - fid_seq_is_local_file(fid_seq(fid)))) { - CERROR("%s: invalid FID " DFID "\n", obd->obd_name, PFID(fid)); - return -EINVAL; - } - - rc = fld_client_lookup(&lmv->lmv_fld, fid_seq(fid), mds, - LU_SEQ_RANGE_MDT, NULL); - if (rc) { - CERROR("Error while looking for mds number. Seq %#llx, err = %d\n", - fid_seq(fid), rc); - return rc; - } - - CDEBUG(D_INODE, "FLD lookup got mds #%x for fid=" DFID "\n", - *mds, PFID(fid)); - - if (*mds >= lmv->desc.ld_tgt_count) { - CERROR("FLD lookup got invalid mds #%x (max: %x) for fid=" DFID "\n", *mds, lmv->desc.ld_tgt_count, - PFID(fid)); - rc = -EINVAL; - } - return rc; -} diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c deleted file mode 100644 index 1e850fdbc623..000000000000 --- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c +++ /dev/null @@ -1,521 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LMV -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "lmv_internal.h" - -static int lmv_intent_remote(struct obd_export *exp, struct lookup_intent *it, - const struct lu_fid *parent_fid, - struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct ptlrpc_request *req = NULL; - struct lustre_handle plock; - struct md_op_data *op_data; - struct lmv_tgt_desc *tgt; - struct mdt_body *body; - int pmode; - int rc = 0; - - body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - - LASSERT((body->mbo_valid & OBD_MD_MDS)); - - /* - * Unfortunately, we have to lie to MDC/MDS to retrieve - * attributes llite needs and provideproper locking. - */ - if (it->it_op & IT_LOOKUP) - it->it_op = IT_GETATTR; - - /* - * We got LOOKUP lock, but we really need attrs. - */ - pmode = it->it_lock_mode; - if (pmode) { - plock.cookie = it->it_lock_handle; - it->it_lock_mode = 0; - it->it_request = NULL; - } - - LASSERT(fid_is_sane(&body->mbo_fid1)); - - tgt = lmv_find_target(lmv, &body->mbo_fid1); - if (IS_ERR(tgt)) { - rc = PTR_ERR(tgt); - goto out; - } - - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) { - rc = -ENOMEM; - goto out; - } - - op_data->op_fid1 = body->mbo_fid1; - /* Sent the parent FID to the remote MDT */ - if (parent_fid) { - /* The parent fid is only for remote open to - * check whether the open is from OBF, - * see mdt_cross_open - */ - LASSERT(it->it_op & IT_OPEN); - op_data->op_fid2 = *parent_fid; - } - - op_data->op_bias = MDS_CROSS_REF; - CDEBUG(D_INODE, "REMOTE_INTENT with fid=" DFID " -> mds #%u\n", - PFID(&body->mbo_fid1), tgt->ltd_idx); - - rc = md_intent_lock(tgt->ltd_exp, op_data, it, &req, cb_blocking, - extra_lock_flags); - if (rc) - goto out_free_op_data; - - /* - * LLite needs LOOKUP lock to track dentry revocation in order to - * maintain dcache consistency. Thus drop UPDATE|PERM lock here - * and put LOOKUP in request. - */ - if (it->it_lock_mode != 0) { - it->it_remote_lock_handle = - it->it_lock_handle; - it->it_remote_lock_mode = it->it_lock_mode; - } - - if (pmode) { - it->it_lock_handle = plock.cookie; - it->it_lock_mode = pmode; - } - -out_free_op_data: - kfree(op_data); -out: - if (rc && pmode) - ldlm_lock_decref(&plock, pmode); - - ptlrpc_req_finished(*reqp); - *reqp = req; - return rc; -} - -int lmv_revalidate_slaves(struct obd_export *exp, - const struct lmv_stripe_md *lsm, - ldlm_blocking_callback cb_blocking, - int extra_lock_flags) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct ptlrpc_request *req = NULL; - struct mdt_body *body; - struct md_op_data *op_data; - int rc = 0, i; - - /** - * revalidate slaves has some problems, temporarily return, - * we may not need that - */ - op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) - return -ENOMEM; - - /** - * Loop over the stripe information, check validity and update them - * from MDS if needed. - */ - for (i = 0; i < lsm->lsm_md_stripe_count; i++) { - struct lookup_intent it = { .it_op = IT_GETATTR }; - struct lustre_handle *lockh = NULL; - struct lmv_tgt_desc *tgt = NULL; - struct inode *inode; - struct lu_fid fid; - - fid = lsm->lsm_md_oinfo[i].lmo_fid; - inode = lsm->lsm_md_oinfo[i].lmo_root; - - /* - * Prepare op_data for revalidating. Note that @fid2 shluld be - * defined otherwise it will go to server and take new lock - * which is not needed here. - */ - memset(op_data, 0, sizeof(*op_data)); - op_data->op_fid1 = fid; - op_data->op_fid2 = fid; - - tgt = lmv_locate_mds(lmv, op_data, &fid); - if (IS_ERR(tgt)) { - rc = PTR_ERR(tgt); - goto cleanup; - } - - CDEBUG(D_INODE, "Revalidate slave " DFID " -> mds #%u\n", - PFID(&fid), tgt->ltd_idx); - - if (req) { - ptlrpc_req_finished(req); - req = NULL; - } - - rc = md_intent_lock(tgt->ltd_exp, op_data, &it, &req, - cb_blocking, extra_lock_flags); - if (rc < 0) - goto cleanup; - - lockh = (struct lustre_handle *)&it.it_lock_handle; - if (rc > 0 && !req) { - /* slave inode is still valid */ - CDEBUG(D_INODE, "slave " DFID " is still valid.\n", - PFID(&fid)); - rc = 0; - } else { - /* refresh slave from server */ - body = req_capsule_server_get(&req->rq_pill, - &RMF_MDT_BODY); - if (!body) { - if (it.it_lock_mode && lockh) { - ldlm_lock_decref(lockh, it.it_lock_mode); - it.it_lock_mode = 0; - } - - rc = -ENOENT; - goto cleanup; - } - - i_size_write(inode, body->mbo_size); - inode->i_blocks = body->mbo_blocks; - set_nlink(inode, body->mbo_nlink); - LTIME_S(inode->i_atime) = body->mbo_atime; - LTIME_S(inode->i_ctime) = body->mbo_ctime; - LTIME_S(inode->i_mtime) = body->mbo_mtime; - } - - md_set_lock_data(tgt->ltd_exp, lockh, inode, NULL); - - if (it.it_lock_mode && lockh) { - ldlm_lock_decref(lockh, it.it_lock_mode); - it.it_lock_mode = 0; - } - } - -cleanup: - if (req) - ptlrpc_req_finished(req); - - kfree(op_data); - return rc; -} - -/* - * IT_OPEN is intended to open (and create, possible) an object. Parent (pid) - * may be split dir. - */ -static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, - struct lookup_intent *it, - struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - struct mdt_body *body; - int rc; - - if (it->it_flags & MDS_OPEN_BY_FID) { - LASSERT(fid_is_sane(&op_data->op_fid2)); - - /* - * for striped directory, we can't know parent stripe fid - * without name, but we can set it to child fid, and MDT - * will obtain it from linkea in open in such case. - */ - if (op_data->op_mea1) - op_data->op_fid1 = op_data->op_fid2; - - tgt = lmv_find_target(lmv, &op_data->op_fid2); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - op_data->op_mds = tgt->ltd_idx; - } else { - LASSERT(fid_is_sane(&op_data->op_fid1)); - LASSERT(fid_is_zero(&op_data->op_fid2)); - LASSERT(op_data->op_name); - - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - } - - /* If it is ready to open the file by FID, do not need - * allocate FID at all, otherwise it will confuse MDT - */ - if ((it->it_op & IT_CREAT) && !(it->it_flags & MDS_OPEN_BY_FID)) { - /* - * For lookup(IT_CREATE) cases allocate new fid and setup FLD - * for it. - */ - rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); - if (rc != 0) - return rc; - } - - CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%u\n", - PFID(&op_data->op_fid1), - PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx); - - rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, cb_blocking, - extra_lock_flags); - if (rc != 0) - return rc; - /* - * Nothing is found, do not access body->mbo_fid1 as it is zero and thus - * pointless. - */ - if ((it->it_disposition & DISP_LOOKUP_NEG) && - !(it->it_disposition & DISP_OPEN_CREATE) && - !(it->it_disposition & DISP_OPEN_OPEN)) - return rc; - - body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - - /* Not cross-ref case, just get out of here. */ - if (unlikely((body->mbo_valid & OBD_MD_MDS))) { - rc = lmv_intent_remote(exp, it, &op_data->op_fid1, reqp, - cb_blocking, extra_lock_flags); - if (rc != 0) - return rc; - - body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - } - - return rc; -} - -/* - * Handler for: getattr, lookup and revalidate cases. - */ -static int lmv_intent_lookup(struct obd_export *exp, - struct md_op_data *op_data, - struct lookup_intent *it, - struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags) -{ - struct lmv_stripe_md *lsm = op_data->op_mea1; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt = NULL; - struct mdt_body *body; - int rc = 0; - - /* - * If it returns ERR_PTR(-EBADFD) then it is an unknown hash type - * it will try all stripes to locate the object - */ - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - if (IS_ERR(tgt) && (PTR_ERR(tgt) != -EBADFD)) - return PTR_ERR(tgt); - - /* - * Both migrating dir and unknown hash dir need to try - * all of sub-stripes - */ - if (lsm && !lmv_is_known_hash_type(lsm->lsm_md_hash_type)) { - struct lmv_oinfo *oinfo = &lsm->lsm_md_oinfo[0]; - - op_data->op_fid1 = oinfo->lmo_fid; - op_data->op_mds = oinfo->lmo_mds; - tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - } - - if (!fid_is_sane(&op_data->op_fid2)) - fid_zero(&op_data->op_fid2); - - CDEBUG(D_INODE, "LOOKUP_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%u lsm=%p lsm_magic=%x\n", - PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), - op_data->op_name ? op_data->op_name : "", - tgt->ltd_idx, lsm, !lsm ? -1 : lsm->lsm_md_magic); - - op_data->op_bias &= ~MDS_CROSS_REF; - - rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, cb_blocking, - extra_lock_flags); - if (rc < 0) - return rc; - - if (!*reqp) { - /* - * If RPC happens, lsm information will be revalidated - * during update_inode process (see ll_update_lsm_md) - */ - if (op_data->op_mea2) { - rc = lmv_revalidate_slaves(exp, op_data->op_mea2, - cb_blocking, - extra_lock_flags); - if (rc != 0) - return rc; - } - return rc; - } else if (it_disposition(it, DISP_LOOKUP_NEG) && lsm && - lmv_need_try_all_stripes(lsm)) { - /* - * For migrating and unknown hash type directory, it will - * try to target the entry on other stripes - */ - int stripe_index; - - for (stripe_index = 1; - stripe_index < lsm->lsm_md_stripe_count && - it_disposition(it, DISP_LOOKUP_NEG); stripe_index++) { - struct lmv_oinfo *oinfo; - - /* release the previous request */ - ptlrpc_req_finished(*reqp); - it->it_request = NULL; - *reqp = NULL; - - oinfo = &lsm->lsm_md_oinfo[stripe_index]; - tgt = lmv_find_target(lmv, &oinfo->lmo_fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - CDEBUG(D_INODE, "Try other stripes " DFID "\n", - PFID(&oinfo->lmo_fid)); - - op_data->op_fid1 = oinfo->lmo_fid; - it->it_disposition &= ~DISP_ENQ_COMPLETE; - rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, - cb_blocking, extra_lock_flags); - if (rc) - return rc; - } - } - - if (!it_has_reply_body(it)) - return 0; - - /* - * MDS has returned success. Probably name has been resolved in - * remote inode. Let's check this. - */ - body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - - /* Not cross-ref case, just get out of here. */ - if (unlikely((body->mbo_valid & OBD_MD_MDS))) { - rc = lmv_intent_remote(exp, it, NULL, reqp, cb_blocking, - extra_lock_flags); - if (rc != 0) - return rc; - body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - } - - return rc; -} - -int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, - struct lookup_intent *it, struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags) -{ - int rc; - - LASSERT(fid_is_sane(&op_data->op_fid1)); - - CDEBUG(D_INODE, "INTENT LOCK '%s' for " DFID " '%*s' on " DFID "\n", - LL_IT2STR(it), PFID(&op_data->op_fid2), - (int)op_data->op_namelen, op_data->op_name, - PFID(&op_data->op_fid1)); - - if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT | IT_GETXATTR)) - rc = lmv_intent_lookup(exp, op_data, it, reqp, cb_blocking, - extra_lock_flags); - else if (it->it_op & IT_OPEN) - rc = lmv_intent_open(exp, op_data, it, reqp, cb_blocking, - extra_lock_flags); - else - LBUG(); - - if (rc < 0) { - struct lustre_handle lock_handle; - - if (it->it_lock_mode) { - lock_handle.cookie = it->it_lock_handle; - ldlm_lock_decref_and_cancel(&lock_handle, - it->it_lock_mode); - } - - it->it_lock_handle = 0; - it->it_lock_mode = 0; - - if (it->it_remote_lock_mode) { - lock_handle.cookie = it->it_remote_lock_handle; - ldlm_lock_decref_and_cancel(&lock_handle, - it->it_remote_lock_mode); - } - - it->it_remote_lock_handle = 0; - it->it_remote_lock_mode = 0; - } - - return rc; -} diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h deleted file mode 100644 index 68a99170c424..000000000000 --- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h +++ /dev/null @@ -1,164 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _LMV_INTERNAL_H_ -#define _LMV_INTERNAL_H_ - -#include -#include -#include - -#define LMV_MAX_TGT_COUNT 128 - -#define LL_IT2STR(it) \ - ((it) ? ldlm_it2str((it)->it_op) : "0") - -int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, - struct lookup_intent *it, struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags); - -int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds); -int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds); -int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp, - struct lu_fid *fid, struct md_op_data *op_data); - -int lmv_revalidate_slaves(struct obd_export *exp, - const struct lmv_stripe_md *lsm, - ldlm_blocking_callback cb_blocking, - int extra_lock_flags); - -static inline struct obd_device *lmv2obd_dev(struct lmv_obd *lmv) -{ - return container_of_safe(lmv, struct obd_device, u.lmv); -} - -static inline struct lmv_tgt_desc * -lmv_get_target(struct lmv_obd *lmv, u32 mdt_idx, int *index) -{ - int i; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i]) - continue; - - if (lmv->tgts[i]->ltd_idx == mdt_idx) { - if (index) - *index = i; - return lmv->tgts[i]; - } - } - - return ERR_PTR(-ENODEV); -} - -static inline int -lmv_find_target_index(struct lmv_obd *lmv, const struct lu_fid *fid) -{ - struct lmv_tgt_desc *ltd; - u32 mdt_idx = 0; - int index = 0; - - if (lmv->desc.ld_tgt_count > 1) { - int rc; - - rc = lmv_fld_lookup(lmv, fid, &mdt_idx); - if (rc < 0) - return rc; - } - - ltd = lmv_get_target(lmv, mdt_idx, &index); - if (IS_ERR(ltd)) - return PTR_ERR(ltd); - - return index; -} - -static inline struct lmv_tgt_desc * -lmv_find_target(struct lmv_obd *lmv, const struct lu_fid *fid) -{ - int index; - - index = lmv_find_target_index(lmv, fid); - if (index < 0) - return ERR_PTR(index); - - return lmv->tgts[index]; -} - -static inline int lmv_stripe_md_size(int stripe_count) -{ - struct lmv_stripe_md *lsm; - - return sizeof(*lsm) + stripe_count * sizeof(lsm->lsm_md_oinfo[0]); -} - -int lmv_name_to_stripe_index(enum lmv_hash_type hashtype, - unsigned int max_mdt_index, - const char *name, int namelen); - -static inline const struct lmv_oinfo * -lsm_name_to_stripe_info(const struct lmv_stripe_md *lsm, const char *name, - int namelen) -{ - int stripe_index; - - stripe_index = lmv_name_to_stripe_index(lsm->lsm_md_hash_type, - lsm->lsm_md_stripe_count, - name, namelen); - if (stripe_index < 0) - return ERR_PTR(stripe_index); - - LASSERTF(stripe_index < lsm->lsm_md_stripe_count, - "stripe_index = %d, stripe_count = %d hash_type = %x name = %.*s\n", - stripe_index, lsm->lsm_md_stripe_count, - lsm->lsm_md_hash_type, namelen, name); - - return &lsm->lsm_md_oinfo[stripe_index]; -} - -static inline bool lmv_need_try_all_stripes(const struct lmv_stripe_md *lsm) -{ - return !lmv_is_known_hash_type(lsm->lsm_md_hash_type) || - lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION; -} - -struct lmv_tgt_desc -*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data, - struct lu_fid *fid); -/* lproc_lmv.c */ -void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars); - -extern const struct file_operations lmv_proc_target_fops; - -#endif diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c deleted file mode 100644 index 65f94e6ecaad..000000000000 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ /dev/null @@ -1,3131 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LMV -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "lmv_internal.h" - -static int lmv_check_connect(struct obd_device *obd); - -static void lmv_activate_target(struct lmv_obd *lmv, - struct lmv_tgt_desc *tgt, - int activate) -{ - if (tgt->ltd_active == activate) - return; - - tgt->ltd_active = activate; - lmv->desc.ld_active_tgt_count += (activate ? 1 : -1); - tgt->ltd_exp->exp_obd->obd_inactive = !activate; -} - -/** - * Error codes: - * - * -EINVAL : UUID can't be found in the LMV's target list - * -ENOTCONN: The UUID is found, but the target connection is bad (!) - * -EBADF : The UUID is found, but the OBD of the wrong type (!) - */ -static int lmv_set_mdc_active(struct lmv_obd *lmv, const struct obd_uuid *uuid, - int activate) -{ - struct lmv_tgt_desc *tgt = NULL; - struct obd_device *obd; - u32 i; - int rc = 0; - - CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n", - lmv, uuid->uuid, activate); - - spin_lock(&lmv->lmv_lock); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - tgt = lmv->tgts[i]; - if (!tgt || !tgt->ltd_exp) - continue; - - CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i, - tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie); - - if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) - break; - } - - if (i == lmv->desc.ld_tgt_count) { - rc = -EINVAL; - goto out_lmv_lock; - } - - obd = class_exp2obd(tgt->ltd_exp); - if (!obd) { - rc = -ENOTCONN; - goto out_lmv_lock; - } - - CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n", - obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd, - obd->obd_type->typ_name, i); - LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0); - - if (tgt->ltd_active == activate) { - CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd, - activate ? "" : "in"); - goto out_lmv_lock; - } - - CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd, - activate ? "" : "in"); - lmv_activate_target(lmv, tgt, activate); - - out_lmv_lock: - spin_unlock(&lmv->lmv_lock); - return rc; -} - -static struct obd_uuid *lmv_get_uuid(struct obd_export *exp) -{ - struct lmv_obd *lmv = &exp->exp_obd->u.lmv; - struct lmv_tgt_desc *tgt = lmv->tgts[0]; - - return tgt ? obd_get_uuid(tgt->ltd_exp) : NULL; -} - -static int lmv_notify(struct obd_device *obd, struct obd_device *watched, - enum obd_notify_event ev, void *data) -{ - struct obd_connect_data *conn_data; - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_uuid *uuid; - int rc = 0; - - if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) { - CERROR("unexpected notification of %s %s!\n", - watched->obd_type->typ_name, - watched->obd_name); - return -EINVAL; - } - - uuid = &watched->u.cli.cl_target_uuid; - if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) { - /* - * Set MDC as active before notifying the observer, so the - * observer can use the MDC normally. - */ - rc = lmv_set_mdc_active(lmv, uuid, - ev == OBD_NOTIFY_ACTIVE); - if (rc) { - CERROR("%sactivation of %s failed: %d\n", - ev == OBD_NOTIFY_ACTIVE ? "" : "de", - uuid->uuid, rc); - return rc; - } - } else if (ev == OBD_NOTIFY_OCD) { - conn_data = &watched->u.cli.cl_import->imp_connect_data; - /* - * XXX: Make sure that ocd_connect_flags from all targets are - * the same. Otherwise one of MDTs runs wrong version or - * something like this. --umka - */ - obd->obd_self_export->exp_connect_data = *conn_data; - } - - /* - * Pass the notification up the chain. - */ - if (obd->obd_observer) - rc = obd_notify(obd->obd_observer, watched, ev, data); - - return rc; -} - -static int lmv_connect(const struct lu_env *env, - struct obd_export **pexp, struct obd_device *obd, - struct obd_uuid *cluuid, struct obd_connect_data *data, - void *localdata) -{ - struct lmv_obd *lmv = &obd->u.lmv; - struct lustre_handle conn = { 0 }; - struct obd_export *exp; - int rc = 0; - - rc = class_connect(&conn, obd, cluuid); - if (rc) { - CERROR("class_connection() returned %d\n", rc); - return rc; - } - - exp = class_conn2export(&conn); - - lmv->connected = 0; - lmv->cluuid = *cluuid; - lmv->conn_data = *data; - - lmv->lmv_tgts_kobj = kobject_create_and_add("target_obds", - &obd->obd_kobj); - rc = lmv_check_connect(obd); - if (rc) - goto out_sysfs; - - *pexp = exp; - - return rc; - -out_sysfs: - if (lmv->lmv_tgts_kobj) - kobject_put(lmv->lmv_tgts_kobj); - - class_disconnect(exp); - - return rc; -} - -static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - u32 i; - int rc = 0; - int change = 0; - - if (lmv->max_easize < easize) { - lmv->max_easize = easize; - change = 1; - } - if (lmv->max_def_easize < def_easize) { - lmv->max_def_easize = def_easize; - change = 1; - } - - if (change == 0) - return 0; - - if (lmv->connected == 0) - return 0; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - struct lmv_tgt_desc *tgt = lmv->tgts[i]; - - if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) { - CWARN("%s: NULL export for %d\n", obd->obd_name, i); - continue; - } - - rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize); - if (rc) { - CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", - obd->obd_name, i, rc); - break; - } - } - return rc; -} - -#define MAX_STRING_SIZE 128 - -static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) -{ - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_uuid *cluuid = &lmv->cluuid; - struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" }; - struct obd_device *mdc_obd; - struct obd_export *mdc_exp; - struct lu_fld_target target; - int rc; - - mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME, - &obd->obd_uuid); - if (!mdc_obd) { - CERROR("target %s not attached\n", tgt->ltd_uuid.uuid); - return -EINVAL; - } - - CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid); - - if (!mdc_obd->obd_set_up) { - CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid); - return -EINVAL; - } - - rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid, - &lmv->conn_data, NULL); - if (rc) { - CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc); - return rc; - } - - /* - * Init fid sequence client for this mdc and add new fld target. - */ - rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA); - if (rc) - return rc; - - target.ft_srv = NULL; - target.ft_exp = mdc_exp; - target.ft_idx = tgt->ltd_idx; - - fld_client_add_target(&lmv->lmv_fld, &target); - - rc = obd_register_observer(mdc_obd, obd); - if (rc) { - obd_disconnect(mdc_exp); - CERROR("target %s register_observer error %d\n", - tgt->ltd_uuid.uuid, rc); - return rc; - } - - if (obd->obd_observer) { - /* - * Tell the observer about the new target. - */ - rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd, - OBD_NOTIFY_ACTIVE, - (void *)(tgt - lmv->tgts[0])); - if (rc) { - obd_disconnect(mdc_exp); - return rc; - } - } - - tgt->ltd_active = 1; - tgt->ltd_exp = mdc_exp; - lmv->desc.ld_active_tgt_count++; - - md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize); - - CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - atomic_read(&obd->obd_refcount)); - - if (lmv->lmv_tgts_kobj) - /* Even if we failed to create the link, that's fine */ - rc = sysfs_create_link(lmv->lmv_tgts_kobj, &mdc_obd->obd_kobj, - mdc_obd->obd_name); - return 0; -} - -static void lmv_del_target(struct lmv_obd *lmv, int index) -{ - if (!lmv->tgts[index]) - return; - - kfree(lmv->tgts[index]); - lmv->tgts[index] = NULL; -} - -static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, - __u32 index, int gen) -{ - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_device *mdc_obd; - struct lmv_tgt_desc *tgt; - int orig_tgt_count = 0; - int rc = 0; - - CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index); - - mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, - &obd->obd_uuid); - if (!mdc_obd) { - CERROR("%s: Target %s not attached: rc = %d\n", - obd->obd_name, uuidp->uuid, -EINVAL); - return -EINVAL; - } - - mutex_lock(&lmv->lmv_init_mutex); - - if ((index < lmv->tgts_size) && lmv->tgts[index]) { - tgt = lmv->tgts[index]; - CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", - obd->obd_name, - obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST); - mutex_unlock(&lmv->lmv_init_mutex); - return -EEXIST; - } - - if (index >= lmv->tgts_size) { - /* We need to reallocate the lmv target array. */ - struct lmv_tgt_desc **newtgts, **old = NULL; - __u32 newsize = 1; - __u32 oldsize = 0; - - while (newsize < index + 1) - newsize <<= 1; - newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (!newtgts) { - mutex_unlock(&lmv->lmv_init_mutex); - return -ENOMEM; - } - - if (lmv->tgts_size) { - memcpy(newtgts, lmv->tgts, - sizeof(*newtgts) * lmv->tgts_size); - old = lmv->tgts; - oldsize = lmv->tgts_size; - } - - lmv->tgts = newtgts; - lmv->tgts_size = newsize; - smp_rmb(); - kfree(old); - - CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts, - lmv->tgts_size); - } - - tgt = kzalloc(sizeof(*tgt), GFP_NOFS); - if (!tgt) { - mutex_unlock(&lmv->lmv_init_mutex); - return -ENOMEM; - } - - mutex_init(&tgt->ltd_fid_mutex); - tgt->ltd_idx = index; - tgt->ltd_uuid = *uuidp; - tgt->ltd_active = 0; - lmv->tgts[index] = tgt; - if (index >= lmv->desc.ld_tgt_count) { - orig_tgt_count = lmv->desc.ld_tgt_count; - lmv->desc.ld_tgt_count = index + 1; - } - - if (!lmv->connected) { - /* lmv_check_connect() will connect this target. */ - mutex_unlock(&lmv->lmv_init_mutex); - return rc; - } - - /* Otherwise let's connect it ourselves */ - mutex_unlock(&lmv->lmv_init_mutex); - rc = lmv_connect_mdc(obd, tgt); - if (rc) { - spin_lock(&lmv->lmv_lock); - if (lmv->desc.ld_tgt_count == index + 1) - lmv->desc.ld_tgt_count = orig_tgt_count; - memset(tgt, 0, sizeof(*tgt)); - spin_unlock(&lmv->lmv_lock); - } else { - int easize = sizeof(struct lmv_stripe_md) + - lmv->desc.ld_tgt_count * sizeof(struct lu_fid); - lmv_init_ea_size(obd->obd_self_export, easize, 0); - } - - return rc; -} - -static int lmv_check_connect(struct obd_device *obd) -{ - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - u32 i; - int rc; - int easize; - - if (lmv->connected) - return 0; - - mutex_lock(&lmv->lmv_init_mutex); - if (lmv->connected) { - mutex_unlock(&lmv->lmv_init_mutex); - return 0; - } - - if (lmv->desc.ld_tgt_count == 0) { - mutex_unlock(&lmv->lmv_init_mutex); - CERROR("%s: no targets configured.\n", obd->obd_name); - return -EINVAL; - } - - LASSERT(lmv->tgts); - - if (!lmv->tgts[0]) { - mutex_unlock(&lmv->lmv_init_mutex); - CERROR("%s: no target configured for index 0.\n", - obd->obd_name); - return -EINVAL; - } - - CDEBUG(D_CONFIG, "Time to connect %s to %s\n", - lmv->cluuid.uuid, obd->obd_name); - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - tgt = lmv->tgts[i]; - if (!tgt) - continue; - rc = lmv_connect_mdc(obd, tgt); - if (rc) - goto out_disc; - } - - lmv->connected = 1; - easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC); - lmv_init_ea_size(obd->obd_self_export, easize, 0); - mutex_unlock(&lmv->lmv_init_mutex); - return 0; - - out_disc: - while (i-- > 0) { - int rc2; - - tgt = lmv->tgts[i]; - if (!tgt) - continue; - tgt->ltd_active = 0; - if (tgt->ltd_exp) { - --lmv->desc.ld_active_tgt_count; - rc2 = obd_disconnect(tgt->ltd_exp); - if (rc2) { - CERROR("LMV target %s disconnect on MDC idx %d: error %d\n", - tgt->ltd_uuid.uuid, i, rc2); - } - } - } - - mutex_unlock(&lmv->lmv_init_mutex); - return rc; -} - -static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) -{ - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_device *mdc_obd; - int rc; - - mdc_obd = class_exp2obd(tgt->ltd_exp); - - if (mdc_obd) { - mdc_obd->obd_force = obd->obd_force; - mdc_obd->obd_fail = obd->obd_fail; - mdc_obd->obd_no_recov = obd->obd_no_recov; - - if (lmv->lmv_tgts_kobj) - sysfs_remove_link(lmv->lmv_tgts_kobj, - mdc_obd->obd_name); - } - - rc = obd_fid_fini(tgt->ltd_exp->exp_obd); - if (rc) - CERROR("Can't finalize fids factory\n"); - - CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n", - tgt->ltd_exp->exp_obd->obd_name, - tgt->ltd_exp->exp_obd->obd_uuid.uuid); - - obd_register_observer(tgt->ltd_exp->exp_obd, NULL); - rc = obd_disconnect(tgt->ltd_exp); - if (rc) { - if (tgt->ltd_active) { - CERROR("Target %s disconnect error %d\n", - tgt->ltd_uuid.uuid, rc); - } - } - - lmv_activate_target(lmv, tgt, 0); - tgt->ltd_exp = NULL; - return 0; -} - -static int lmv_disconnect(struct obd_export *exp) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - int rc; - u32 i; - - if (!lmv->tgts) - goto out_local; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) - continue; - - lmv_disconnect_mdc(obd, lmv->tgts[i]); - } - - if (lmv->lmv_tgts_kobj) - kobject_put(lmv->lmv_tgts_kobj); - -out_local: - /* - * This is the case when no real connection is established by - * lmv_check_connect(). - */ - if (!lmv->connected) - class_export_put(exp); - rc = class_disconnect(exp); - lmv->connected = 0; - return rc; -} - -static int lmv_fid2path(struct obd_export *exp, int len, void *karg, - void __user *uarg) -{ - struct obd_device *obddev = class_exp2obd(exp); - struct lmv_obd *lmv = &obddev->u.lmv; - struct getinfo_fid2path *gf; - struct lmv_tgt_desc *tgt; - struct getinfo_fid2path *remote_gf = NULL; - int remote_gf_size = 0; - int rc; - - gf = karg; - tgt = lmv_find_target(lmv, &gf->gf_fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - -repeat_fid2path: - rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg); - if (rc != 0 && rc != -EREMOTE) - goto out_fid2path; - - /* If remote_gf != NULL, it means just building the - * path on the remote MDT, copy this path segment to gf - */ - if (remote_gf) { - struct getinfo_fid2path *ori_gf; - char *ptr; - - ori_gf = karg; - if (strlen(ori_gf->gf_path) + 1 + - strlen(gf->gf_path) + 1 > ori_gf->gf_pathlen) { - rc = -EOVERFLOW; - goto out_fid2path; - } - - ptr = ori_gf->gf_path; - - memmove(ptr + strlen(gf->gf_path) + 1, ptr, - strlen(ori_gf->gf_path)); - - strncpy(ptr, gf->gf_path, strlen(gf->gf_path)); - ptr += strlen(gf->gf_path); - *ptr = '/'; - } - - CDEBUG(D_INFO, "%s: get path %s " DFID " rec: %llu ln: %u\n", - tgt->ltd_exp->exp_obd->obd_name, - gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno, - gf->gf_linkno); - - if (rc == 0) - goto out_fid2path; - - /* sigh, has to go to another MDT to do path building further */ - if (!remote_gf) { - remote_gf_size = sizeof(*remote_gf) + PATH_MAX; - remote_gf = kzalloc(remote_gf_size, GFP_NOFS); - if (!remote_gf) { - rc = -ENOMEM; - goto out_fid2path; - } - remote_gf->gf_pathlen = PATH_MAX; - } - - if (!fid_is_sane(&gf->gf_fid)) { - CERROR("%s: invalid FID " DFID ": rc = %d\n", - tgt->ltd_exp->exp_obd->obd_name, - PFID(&gf->gf_fid), -EINVAL); - rc = -EINVAL; - goto out_fid2path; - } - - tgt = lmv_find_target(lmv, &gf->gf_fid); - if (IS_ERR(tgt)) { - rc = -EINVAL; - goto out_fid2path; - } - - remote_gf->gf_fid = gf->gf_fid; - remote_gf->gf_recno = -1; - remote_gf->gf_linkno = -1; - memset(remote_gf->gf_path, 0, remote_gf->gf_pathlen); - gf = remote_gf; - goto repeat_fid2path; - -out_fid2path: - kfree(remote_gf); - return rc; -} - -static int lmv_hsm_req_count(struct lmv_obd *lmv, - const struct hsm_user_request *hur, - const struct lmv_tgt_desc *tgt_mds) -{ - u32 i, nr = 0; - struct lmv_tgt_desc *curr_tgt; - - /* count how many requests must be sent to the given target */ - for (i = 0; i < hur->hur_request.hr_itemcount; i++) { - curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid); - if (IS_ERR(curr_tgt)) - return PTR_ERR(curr_tgt); - if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) - nr++; - } - return nr; -} - -static int lmv_hsm_req_build(struct lmv_obd *lmv, - struct hsm_user_request *hur_in, - const struct lmv_tgt_desc *tgt_mds, - struct hsm_user_request *hur_out) -{ - int i, nr_out; - struct lmv_tgt_desc *curr_tgt; - - /* build the hsm_user_request for the given target */ - hur_out->hur_request = hur_in->hur_request; - nr_out = 0; - for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) { - curr_tgt = lmv_find_target(lmv, - &hur_in->hur_user_item[i].hui_fid); - if (IS_ERR(curr_tgt)) - return PTR_ERR(curr_tgt); - if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) { - hur_out->hur_user_item[nr_out] = - hur_in->hur_user_item[i]; - nr_out++; - } - } - hur_out->hur_request.hr_itemcount = nr_out; - memcpy(hur_data(hur_out), hur_data(hur_in), - hur_in->hur_request.hr_data_len); - - return 0; -} - -static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, - void __user *uarg) -{ - __u32 i; - - /* unregister request (call from llapi_hsm_copytool_fini) */ - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - struct lmv_tgt_desc *tgt = lmv->tgts[i]; - - if (!tgt || !tgt->ltd_exp) - continue; - - /* best effort: try to clean as much as possible - * (continue on error) - */ - obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); - } - - /* Whatever the result, remove copytool from kuc groups. - * Unreached coordinators will get EPIPE on next requests - * and will unregister automatically. - */ - return libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group); -} - -static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void __user *uarg) -{ - struct file *filp; - __u32 i, j; - int err, rc = 0; - bool any_set = false; - struct kkuc_ct_data kcd = { 0 }; - - /* All or nothing: try to register to all MDS. - * In case of failure, unregister from previous MDS, - * except if it because of inactive target. - */ - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - struct lmv_tgt_desc *tgt = lmv->tgts[i]; - - if (!tgt || !tgt->ltd_exp) - continue; - - err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg); - if (err) { - if (tgt->ltd_active) { - /* permanent error */ - CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", - tgt->ltd_uuid.uuid, i, cmd, err); - rc = err; - lk->lk_flags |= LK_FLG_STOP; - /* unregister from previous MDS */ - for (j = 0; j < i; j++) { - tgt = lmv->tgts[j]; - - if (!tgt || !tgt->ltd_exp) - continue; - obd_iocontrol(cmd, tgt->ltd_exp, len, - lk, uarg); - } - return rc; - } - /* else: transient error. - * kuc will register to the missing MDT when it is back - */ - } else { - any_set = true; - } - } - - if (!any_set) - /* no registration done: return error */ - return -ENOTCONN; - - /* at least one registration done, with no failure */ - filp = fget(lk->lk_wfd); - if (!filp) - return -EBADF; - - kcd.kcd_magic = KKUC_CT_DATA_MAGIC; - kcd.kcd_uuid = lmv->cluuid; - kcd.kcd_archive = lk->lk_data; - - rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, - &kcd, sizeof(kcd)); - if (rc) - fput(filp); - - return rc; -} - -static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void __user *uarg) -{ - struct obd_device *obddev = class_exp2obd(exp); - struct lmv_obd *lmv = &obddev->u.lmv; - struct lmv_tgt_desc *tgt = NULL; - u32 i = 0; - int rc = 0; - int set = 0; - u32 count = lmv->desc.ld_tgt_count; - - if (count == 0) - return -ENOTTY; - - switch (cmd) { - case IOC_OBD_STATFS: { - struct obd_ioctl_data *data = karg; - struct obd_device *mdc_obd; - struct obd_statfs stat_buf = {0}; - __u32 index; - - memcpy(&index, data->ioc_inlbuf2, sizeof(__u32)); - if (index >= count) - return -ENODEV; - - tgt = lmv->tgts[index]; - if (!tgt || !tgt->ltd_active) - return -ENODATA; - - mdc_obd = class_exp2obd(tgt->ltd_exp); - if (!mdc_obd) - return -EINVAL; - - /* copy UUID */ - if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), - min((int)data->ioc_plen2, - (int)sizeof(struct obd_uuid)))) - return -EFAULT; - - rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - 0); - if (rc) - return rc; - if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int)data->ioc_plen1, - (int)sizeof(stat_buf)))) - return -EFAULT; - break; - } - case OBD_IOC_QUOTACTL: { - struct if_quotactl *qctl = karg; - struct obd_quotactl *oqctl; - - if (qctl->qc_valid == QC_MDTIDX) { - if (count <= qctl->qc_idx) - return -EINVAL; - - tgt = lmv->tgts[qctl->qc_idx]; - if (!tgt || !tgt->ltd_exp) - return -EINVAL; - } else if (qctl->qc_valid == QC_UUID) { - for (i = 0; i < count; i++) { - tgt = lmv->tgts[i]; - if (!tgt) - continue; - if (!obd_uuid_equals(&tgt->ltd_uuid, - &qctl->obd_uuid)) - continue; - - if (!tgt->ltd_exp) - return -EINVAL; - - break; - } - } else { - return -EINVAL; - } - - if (i >= count) - return -EAGAIN; - - LASSERT(tgt && tgt->ltd_exp); - oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS); - if (!oqctl) - return -ENOMEM; - - QCTL_COPY(oqctl, qctl); - rc = obd_quotactl(tgt->ltd_exp, oqctl); - if (rc == 0) { - QCTL_COPY(qctl, oqctl); - qctl->qc_valid = QC_MDTIDX; - qctl->obd_uuid = tgt->ltd_uuid; - } - kfree(oqctl); - break; - } - case OBD_IOC_CHANGELOG_SEND: - case OBD_IOC_CHANGELOG_CLEAR: { - struct ioc_changelog *icc = karg; - - if (icc->icc_mdtindex >= count) - return -ENODEV; - - tgt = lmv->tgts[icc->icc_mdtindex]; - if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) - return -ENODEV; - rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL); - break; - } - case LL_IOC_GET_CONNECT_FLAGS: { - tgt = lmv->tgts[0]; - - if (!tgt || !tgt->ltd_exp) - return -ENODATA; - rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); - break; - } - case LL_IOC_FID2MDTIDX: { - struct lu_fid *fid = karg; - int mdt_index; - - rc = lmv_fld_lookup(lmv, fid, &mdt_index); - if (rc) - return rc; - - /* - * Note: this is from llite(see ll_dir_ioctl()), @uarg does not - * point to user space memory for FID2MDTIDX. - */ - *(__u32 *)uarg = mdt_index; - break; - } - case OBD_IOC_FID2PATH: { - rc = lmv_fid2path(exp, len, karg, uarg); - break; - } - case LL_IOC_HSM_STATE_GET: - case LL_IOC_HSM_STATE_SET: - case LL_IOC_HSM_ACTION: { - struct md_op_data *op_data = karg; - - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - if (!tgt->ltd_exp) - return -EINVAL; - - rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); - break; - } - case LL_IOC_HSM_PROGRESS: { - const struct hsm_progress_kernel *hpk = karg; - - tgt = lmv_find_target(lmv, &hpk->hpk_fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); - break; - } - case LL_IOC_HSM_REQUEST: { - struct hsm_user_request *hur = karg; - unsigned int reqcount = hur->hur_request.hr_itemcount; - - if (reqcount == 0) - return 0; - - /* if the request is about a single fid - * or if there is a single MDS, no need to split - * the request. - */ - if (reqcount == 1 || count == 1) { - tgt = lmv_find_target(lmv, - &hur->hur_user_item[0].hui_fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); - } else { - /* split fid list to their respective MDS */ - for (i = 0; i < count; i++) { - struct hsm_user_request *req; - size_t reqlen; - int nr, rc1; - - tgt = lmv->tgts[i]; - if (!tgt || !tgt->ltd_exp) - continue; - - nr = lmv_hsm_req_count(lmv, hur, tgt); - if (nr < 0) - return nr; - if (nr == 0) /* nothing for this MDS */ - continue; - - /* build a request with fids for this MDS */ - reqlen = offsetof(typeof(*hur), - hur_user_item[nr]) - + hur->hur_request.hr_data_len; - req = kvzalloc(reqlen, GFP_NOFS); - if (!req) - return -ENOMEM; - - rc1 = lmv_hsm_req_build(lmv, hur, tgt, req); - if (rc1 < 0) - goto hsm_req_err; - - rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen, - req, uarg); -hsm_req_err: - if (rc1 != 0 && rc == 0) - rc = rc1; - kvfree(req); - } - } - break; - } - case LL_IOC_LOV_SWAP_LAYOUTS: { - struct md_op_data *op_data = karg; - struct lmv_tgt_desc *tgt1, *tgt2; - - tgt1 = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt1)) - return PTR_ERR(tgt1); - - tgt2 = lmv_find_target(lmv, &op_data->op_fid2); - if (IS_ERR(tgt2)) - return PTR_ERR(tgt2); - - if (!tgt1->ltd_exp || !tgt2->ltd_exp) - return -EINVAL; - - /* only files on same MDT can have their layouts swapped */ - if (tgt1->ltd_idx != tgt2->ltd_idx) - return -EPERM; - - rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg); - break; - } - case LL_IOC_HSM_CT_START: { - struct lustre_kernelcomm *lk = karg; - - if (lk->lk_flags & LK_FLG_STOP) - rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg); - else - rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg); - break; - } - default: - for (i = 0; i < count; i++) { - struct obd_device *mdc_obd; - int err; - - tgt = lmv->tgts[i]; - if (!tgt || !tgt->ltd_exp) - continue; - /* ll_umount_begin() sets force flag but for lmv, not - * mdc. Let's pass it through - */ - mdc_obd = class_exp2obd(tgt->ltd_exp); - mdc_obd->obd_force = obddev->obd_force; - err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); - if (err) { - if (tgt->ltd_active) { - CERROR("%s: error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", - lmv2obd_dev(lmv)->obd_name, - tgt->ltd_uuid.uuid, i, cmd, err); - if (!rc) - rc = err; - } - } else { - set = 1; - } - } - if (!set && !rc) - rc = -EIO; - } - return rc; -} - -/** - * This is _inode_ placement policy function (not name). - */ -static int lmv_placement_policy(struct obd_device *obd, - struct md_op_data *op_data, u32 *mds) -{ - struct lmv_obd *lmv = &obd->u.lmv; - - LASSERT(mds); - - if (lmv->desc.ld_tgt_count == 1) { - *mds = 0; - return 0; - } - - if (op_data->op_default_stripe_offset != -1) { - *mds = op_data->op_default_stripe_offset; - return 0; - } - - /** - * If stripe_offset is provided during setdirstripe - * (setdirstripe -i xx), xx MDS will be chosen. - */ - if (op_data->op_cli_flags & CLI_SET_MEA && op_data->op_data) { - struct lmv_user_md *lum; - - lum = op_data->op_data; - if (le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) { - *mds = le32_to_cpu(lum->lum_stripe_offset); - } else { - /* - * -1 means default, which will be in the same MDT with - * the stripe - */ - *mds = op_data->op_mds; - lum->lum_stripe_offset = cpu_to_le32(op_data->op_mds); - } - } else { - /* - * Allocate new fid on target according to operation type and - * parent home mds. - */ - *mds = op_data->op_mds; - } - - return 0; -} - -int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds) -{ - struct lmv_tgt_desc *tgt; - int rc; - - tgt = lmv_get_target(lmv, mds, NULL); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - /* - * New seq alloc and FLD setup should be atomic. Otherwise we may find - * on server that seq in new allocated fid is not yet known. - */ - mutex_lock(&tgt->ltd_fid_mutex); - - if (tgt->ltd_active == 0 || !tgt->ltd_exp) { - rc = -ENODEV; - goto out; - } - - /* - * Asking underlaying tgt layer to allocate new fid. - */ - rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL); - if (rc > 0) { - LASSERT(fid_is_sane(fid)); - rc = 0; - } - -out: - mutex_unlock(&tgt->ltd_fid_mutex); - return rc; -} - -int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp, - struct lu_fid *fid, struct md_op_data *op_data) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - u32 mds = 0; - int rc; - - LASSERT(op_data); - LASSERT(fid); - - rc = lmv_placement_policy(obd, op_data, &mds); - if (rc) { - CERROR("Can't get target for allocating fid, rc %d\n", - rc); - return rc; - } - - rc = __lmv_fid_alloc(lmv, fid, mds); - if (rc) { - CERROR("Can't alloc new fid, rc %d\n", rc); - return rc; - } - - return rc; -} - -static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct lmv_obd *lmv = &obd->u.lmv; - struct lprocfs_static_vars lvars = { NULL }; - struct lmv_desc *desc; - int rc; - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) { - CERROR("LMV setup requires a descriptor\n"); - return -EINVAL; - } - - desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1); - if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) { - CERROR("Lmv descriptor size wrong: %d > %d\n", - (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1)); - return -EINVAL; - } - - lmv->tgts_size = 32U; - lmv->tgts = kcalloc(lmv->tgts_size, sizeof(*lmv->tgts), GFP_NOFS); - if (!lmv->tgts) - return -ENOMEM; - - obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid); - lmv->desc.ld_tgt_count = 0; - lmv->desc.ld_active_tgt_count = 0; - lmv->max_def_easize = 0; - lmv->max_easize = 0; - - spin_lock_init(&lmv->lmv_lock); - mutex_init(&lmv->lmv_init_mutex); - - lprocfs_lmv_init_vars(&lvars); - - lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars); - debugfs_create_file("target_obd", 0444, obd->obd_debugfs_entry, obd, - &lmv_proc_target_fops); - rc = fld_client_init(&lmv->lmv_fld, obd->obd_name, - LUSTRE_CLI_FLD_HASH_DHT); - if (rc) { - CERROR("Can't init FLD, err %d\n", rc); - goto out; - } - - return 0; - -out: - return rc; -} - -static int lmv_cleanup(struct obd_device *obd) -{ - struct lmv_obd *lmv = &obd->u.lmv; - - fld_client_fini(&lmv->lmv_fld); - if (lmv->tgts) { - int i; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i]) - continue; - lmv_del_target(lmv, i); - } - kfree(lmv->tgts); - lmv->tgts_size = 0; - } - return 0; -} - -static int lmv_process_config(struct obd_device *obd, u32 len, void *buf) -{ - struct lustre_cfg *lcfg = buf; - struct obd_uuid obd_uuid; - int gen; - __u32 index; - int rc; - - switch (lcfg->lcfg_command) { - case LCFG_ADD_MDC: - /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID - * 2:0 3:1 4:lustre-MDT0000-mdc_UUID - */ - if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) { - rc = -EINVAL; - goto out; - } - - obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1)); - - if (sscanf(lustre_cfg_buf(lcfg, 2), "%u", &index) != 1) { - rc = -EINVAL; - goto out; - } - if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1) { - rc = -EINVAL; - goto out; - } - rc = lmv_add_target(obd, &obd_uuid, index, gen); - goto out; - default: - CERROR("Unknown command: %d\n", lcfg->lcfg_command); - rc = -EINVAL; - goto out; - } -out: - return rc; -} - -static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, - struct obd_statfs *osfs, __u64 max_age, __u32 flags) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_statfs *temp; - int rc = 0; - u32 i; - - temp = kzalloc(sizeof(*temp), GFP_NOFS); - if (!temp) - return -ENOMEM; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) - continue; - - rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp, - max_age, flags); - if (rc) { - CERROR("can't stat MDS #%d (%s), error %d\n", i, - lmv->tgts[i]->ltd_exp->exp_obd->obd_name, - rc); - goto out_free_temp; - } - - if (i == 0) { - *osfs = *temp; - /* If the statfs is from mount, it will needs - * retrieve necessary information from MDT0. - * i.e. mount does not need the merged osfs - * from all of MDT. - * And also clients can be mounted as long as - * MDT0 is in service - */ - if (flags & OBD_STATFS_FOR_MDT0) - goto out_free_temp; - } else { - osfs->os_bavail += temp->os_bavail; - osfs->os_blocks += temp->os_blocks; - osfs->os_ffree += temp->os_ffree; - osfs->os_files += temp->os_files; - } - } - -out_free_temp: - kfree(temp); - return rc; -} - -static int lmv_getstatus(struct obd_export *exp, - struct lu_fid *fid) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - - return md_getstatus(lmv->tgts[0]->ltd_exp, fid); -} - -static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid, - u64 obd_md_valid, const char *name, size_t buf_size, - struct ptlrpc_request **req) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_getxattr(tgt->ltd_exp, fid, obd_md_valid, name, buf_size, - req); -} - -static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid, - u64 obd_md_valid, const char *name, - const void *value, size_t value_size, - unsigned int xattr_flags, u32 suppgid, - struct ptlrpc_request **req) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_setxattr(tgt->ltd_exp, fid, obd_md_valid, name, - value, value_size, xattr_flags, suppgid, req); -} - -static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - if (op_data->op_flags & MF_GET_MDT_IDX) { - op_data->op_mds = tgt->ltd_idx; - return 0; - } - - return md_getattr(tgt->ltd_exp, op_data, request); -} - -static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - u32 i; - - CDEBUG(D_INODE, "CBDATA for " DFID "\n", PFID(fid)); - - /* - * With DNE every object can have two locks in different namespaces: - * lookup lock in space of MDT storing direntry and update/open lock in - * space of MDT storing inode. - */ - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) - continue; - md_null_inode(lmv->tgts[i]->ltd_exp, fid); - } - - return 0; -} - -static int lmv_close(struct obd_export *exp, struct md_op_data *op_data, - struct md_open_data *mod, struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - CDEBUG(D_INODE, "CLOSE " DFID "\n", PFID(&op_data->op_fid1)); - return md_close(tgt->ltd_exp, op_data, mod, request); -} - -/** - * Choosing the MDT by name or FID in @op_data. - * For non-striped directory, it will locate MDT by fid. - * For striped-directory, it will locate MDT by name. And also - * it will reset op_fid1 with the FID of the chosen stripe. - **/ -static struct lmv_tgt_desc * -lmv_locate_target_for_name(struct lmv_obd *lmv, struct lmv_stripe_md *lsm, - const char *name, int namelen, struct lu_fid *fid, - u32 *mds) -{ - const struct lmv_oinfo *oinfo; - struct lmv_tgt_desc *tgt; - - if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NAME_HASH)) { - if (cfs_fail_val >= lsm->lsm_md_stripe_count) - return ERR_PTR(-EBADF); - oinfo = &lsm->lsm_md_oinfo[cfs_fail_val]; - } else { - oinfo = lsm_name_to_stripe_info(lsm, name, namelen); - if (IS_ERR(oinfo)) - return ERR_CAST(oinfo); - } - - if (fid) - *fid = oinfo->lmo_fid; - if (mds) - *mds = oinfo->lmo_mds; - - tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL); - - CDEBUG(D_INFO, "locate on mds %u " DFID "\n", oinfo->lmo_mds, - PFID(&oinfo->lmo_fid)); - return tgt; -} - -/** - * Locate mds by fid or name - * - * For striped directory (lsm != NULL), it will locate the stripe - * by name hash (see lsm_name_to_stripe_info()). Note: if the hash_type - * is unknown, it will return -EBADFD, and lmv_intent_lookup might need - * walk through all of stripes to locate the entry. - * - * For normal direcotry, it will locate MDS by FID directly. - * \param[in] lmv LMV device - * \param[in] op_data client MD stack parameters, name, namelen - * mds_num etc. - * \param[in] fid object FID used to locate MDS. - * - * retval pointer to the lmv_tgt_desc if succeed. - * ERR_PTR(errno) if failed. - */ -struct lmv_tgt_desc* -lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data, - struct lu_fid *fid) -{ - struct lmv_stripe_md *lsm = op_data->op_mea1; - struct lmv_tgt_desc *tgt; - - /* - * During creating VOLATILE file, it should honor the mdt - * index if the file under striped dir is being restored, see - * ct_restore(). - */ - if (op_data->op_bias & MDS_CREATE_VOLATILE && - (int)op_data->op_mds != -1) { - int i; - - tgt = lmv_get_target(lmv, op_data->op_mds, NULL); - if (IS_ERR(tgt)) - return tgt; - - if (lsm) { - /* refill the right parent fid */ - for (i = 0; i < lsm->lsm_md_stripe_count; i++) { - struct lmv_oinfo *oinfo; - - oinfo = &lsm->lsm_md_oinfo[i]; - if (oinfo->lmo_mds == op_data->op_mds) { - *fid = oinfo->lmo_fid; - break; - } - } - - if (i == lsm->lsm_md_stripe_count) - *fid = lsm->lsm_md_oinfo[0].lmo_fid; - } - - return tgt; - } - - if (!lsm || !op_data->op_namelen) { - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - return tgt; - - op_data->op_mds = tgt->ltd_idx; - - return tgt; - } - - return lmv_locate_target_for_name(lmv, lsm, op_data->op_name, - op_data->op_namelen, fid, - &op_data->op_mds); -} - -static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, - const void *data, size_t datalen, umode_t mode, - uid_t uid, gid_t gid, kernel_cap_t cap_effective, - __u64 rdev, struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int rc; - - if (!lmv->desc.ld_active_tgt_count) - return -EIO; - - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - CDEBUG(D_INODE, "CREATE name '%.*s' on " DFID " -> mds #%x\n", - (int)op_data->op_namelen, op_data->op_name, - PFID(&op_data->op_fid1), op_data->op_mds); - - rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); - if (rc) - return rc; - - if (exp_connect_flags(exp) & OBD_CONNECT_DIR_STRIPE) { - /* - * Send the create request to the MDT where the object - * will be located - */ - tgt = lmv_find_target(lmv, &op_data->op_fid2); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - op_data->op_mds = tgt->ltd_idx; - } else { - CDEBUG(D_CONFIG, "Server doesn't support striped dirs\n"); - } - - CDEBUG(D_INODE, "CREATE obj " DFID " -> mds #%x\n", - PFID(&op_data->op_fid1), op_data->op_mds); - - op_data->op_flags |= MF_MDC_CANCEL_FID1; - rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid, - cap_effective, rdev, request); - - if (rc == 0) { - if (!*request) - return rc; - CDEBUG(D_INODE, "Created - " DFID "\n", PFID(&op_data->op_fid2)); - } - return rc; -} - -static int -lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const union ldlm_policy_data *policy, struct md_op_data *op_data, - struct lustre_handle *lockh, __u64 extra_lock_flags) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - CDEBUG(D_INODE, "ENQUEUE on " DFID "\n", PFID(&op_data->op_fid1)); - - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - CDEBUG(D_INODE, "ENQUEUE on " DFID " -> mds #%u\n", - PFID(&op_data->op_fid1), tgt->ltd_idx); - - return md_enqueue(tgt->ltd_exp, einfo, policy, op_data, lockh, - extra_lock_flags); -} - -static int -lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **preq) -{ - struct ptlrpc_request *req = NULL; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - struct mdt_body *body; - int rc; - - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - CDEBUG(D_INODE, "GETATTR_NAME for %*s on " DFID " -> mds #%u\n", - (int)op_data->op_namelen, op_data->op_name, - PFID(&op_data->op_fid1), tgt->ltd_idx); - - rc = md_getattr_name(tgt->ltd_exp, op_data, preq); - if (rc != 0) - return rc; - - body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY); - if (body->mbo_valid & OBD_MD_MDS) { - struct lu_fid rid = body->mbo_fid1; - - CDEBUG(D_INODE, "Request attrs for " DFID "\n", - PFID(&rid)); - - tgt = lmv_find_target(lmv, &rid); - if (IS_ERR(tgt)) { - ptlrpc_req_finished(*preq); - *preq = NULL; - return PTR_ERR(tgt); - } - - op_data->op_fid1 = rid; - op_data->op_valid |= OBD_MD_FLCROSSREF; - op_data->op_namelen = 0; - op_data->op_name = NULL; - rc = md_getattr_name(tgt->ltd_exp, op_data, &req); - ptlrpc_req_finished(*preq); - *preq = req; - } - - return rc; -} - -#define md_op_data_fid(op_data, fl) \ - (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \ - fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \ - fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \ - fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \ - NULL) - -static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt, - struct md_op_data *op_data, int op_tgt, - enum ldlm_mode mode, int bits, int flag) -{ - struct lu_fid *fid = md_op_data_fid(op_data, flag); - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - union ldlm_policy_data policy = { { 0 } }; - int rc = 0; - - if (!fid_is_sane(fid)) - return 0; - - if (!tgt) { - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - } - - if (tgt->ltd_idx != op_tgt) { - CDEBUG(D_INODE, "EARLY_CANCEL on " DFID "\n", PFID(fid)); - policy.l_inodebits.bits = bits; - rc = md_cancel_unused(tgt->ltd_exp, fid, &policy, - mode, LCF_ASYNC, NULL); - } else { - CDEBUG(D_INODE, - "EARLY_CANCEL skip operation target %d on " DFID "\n", - op_tgt, PFID(fid)); - op_data->op_flags |= flag; - rc = 0; - } - - return rc; -} - -/* - * llite passes fid of an target inode in op_data->op_fid1 and id of directory in - * op_data->op_fid2 - */ -static int lmv_link(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int rc; - - LASSERT(op_data->op_namelen != 0); - - CDEBUG(D_INODE, "LINK " DFID ":%*s to " DFID "\n", - PFID(&op_data->op_fid2), (int)op_data->op_namelen, - op_data->op_name, PFID(&op_data->op_fid1)); - - op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); - op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); - op_data->op_cap = current_cap(); - if (op_data->op_mea2) { - struct lmv_stripe_md *lsm = op_data->op_mea2; - const struct lmv_oinfo *oinfo; - - oinfo = lsm_name_to_stripe_info(lsm, op_data->op_name, - op_data->op_namelen); - if (IS_ERR(oinfo)) - return PTR_ERR(oinfo); - - op_data->op_fid2 = oinfo->lmo_fid; - } - - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - /* - * Cancel UPDATE lock on child (fid1). - */ - op_data->op_flags |= MF_MDC_CANCEL_FID2; - rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX, - MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1); - if (rc != 0) - return rc; - - return md_link(tgt->ltd_exp, op_data, request); -} - -static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, - const char *old, size_t oldlen, - const char *new, size_t newlen, - struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_export *target_exp; - struct lmv_tgt_desc *src_tgt; - struct lmv_tgt_desc *tgt_tgt; - struct mdt_body *body; - int rc; - - LASSERT(oldlen != 0); - - CDEBUG(D_INODE, "RENAME %.*s in " DFID ":%d to %.*s in " DFID ":%d\n", - (int)oldlen, old, PFID(&op_data->op_fid1), - op_data->op_mea1 ? op_data->op_mea1->lsm_md_stripe_count : 0, - (int)newlen, new, PFID(&op_data->op_fid2), - op_data->op_mea2 ? op_data->op_mea2->lsm_md_stripe_count : 0); - - op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); - op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); - op_data->op_cap = current_cap(); - - if (op_data->op_cli_flags & CLI_MIGRATE) { - LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID " DFID "\n", - PFID(&op_data->op_fid3)); - - if (op_data->op_mea1) { - struct lmv_stripe_md *lsm = op_data->op_mea1; - struct lmv_tgt_desc *tmp; - - /* Fix the parent fid for striped dir */ - tmp = lmv_locate_target_for_name(lmv, lsm, old, - oldlen, - &op_data->op_fid1, - NULL); - if (IS_ERR(tmp)) - return PTR_ERR(tmp); - } - - rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); - if (rc) - return rc; - src_tgt = lmv_find_target(lmv, &op_data->op_fid3); - if (IS_ERR(src_tgt)) - return PTR_ERR(src_tgt); - - target_exp = src_tgt->ltd_exp; - } else { - if (op_data->op_mea1) { - struct lmv_stripe_md *lsm = op_data->op_mea1; - - src_tgt = lmv_locate_target_for_name(lmv, lsm, old, - oldlen, - &op_data->op_fid1, - &op_data->op_mds); - } else { - src_tgt = lmv_find_target(lmv, &op_data->op_fid1); - } - if (IS_ERR(src_tgt)) - return PTR_ERR(src_tgt); - - if (op_data->op_mea2) { - struct lmv_stripe_md *lsm = op_data->op_mea2; - - tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new, - newlen, - &op_data->op_fid2, - &op_data->op_mds); - } else { - tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2); - } - if (IS_ERR(tgt_tgt)) - return PTR_ERR(tgt_tgt); - - target_exp = tgt_tgt->ltd_exp; - } - - /* - * LOOKUP lock on src child (fid3) should also be cancelled for - * src_tgt in mdc_rename. - */ - op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3; - - /* - * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its - * own target. - */ - rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_UPDATE, - MF_MDC_CANCEL_FID2); - if (rc) - return rc; - /* - * Cancel LOOKUP locks on source child (fid3) for parent tgt_tgt. - */ - if (fid_is_sane(&op_data->op_fid3)) { - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - /* Cancel LOOKUP lock on its parent */ - rc = lmv_early_cancel(exp, tgt, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_LOOKUP, - MF_MDC_CANCEL_FID3); - if (rc) - return rc; - - rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_FULL, - MF_MDC_CANCEL_FID3); - if (rc) - return rc; - } - -retry_rename: - /* - * Cancel all the locks on tgt child (fid4). - */ - if (fid_is_sane(&op_data->op_fid4)) { - struct lmv_tgt_desc *tgt; - - rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_FULL, - MF_MDC_CANCEL_FID4); - if (rc) - return rc; - - tgt = lmv_find_target(lmv, &op_data->op_fid4); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - /* - * Since the target child might be destroyed, and it might - * become orphan, and we can only check orphan on the local - * MDT right now, so we send rename request to the MDT where - * target child is located. If target child does not exist, - * then it will send the request to the target parent - */ - target_exp = tgt->ltd_exp; - } - - rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request); - if (rc && rc != -EREMOTE) - return rc; - - body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - - /* Not cross-ref case, just get out of here. */ - if (likely(!(body->mbo_valid & OBD_MD_MDS))) - return rc; - - CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n", - exp->exp_obd->obd_name, PFID(&body->mbo_fid1)); - - op_data->op_fid4 = body->mbo_fid1; - ptlrpc_req_finished(*request); - *request = NULL; - goto retry_rename; -} - -static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - CDEBUG(D_INODE, "SETATTR for " DFID ", valid 0x%x\n", - PFID(&op_data->op_fid1), op_data->op_attr.ia_valid); - - op_data->op_flags |= MF_MDC_CANCEL_FID1; - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request); -} - -static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, - struct ptlrpc_request **request) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_sync(tgt->ltd_exp, fid, request); -} - -/** - * Get current minimum entry from striped directory - * - * This function will search the dir entry, whose hash value is the - * closest(>=) to @hash_offset, from all of sub-stripes, and it is - * only being called for striped directory. - * - * \param[in] exp export of LMV - * \param[in] op_data parameters transferred beween client MD stack - * stripe_information will be included in this - * parameter - * \param[in] cb_op ldlm callback being used in enqueue in - * mdc_read_page - * \param[in] hash_offset the hash value, which is used to locate - * minum(closet) dir entry - * \param[in|out] stripe_offset the caller use this to indicate the stripe - * index of last entry, so to avoid hash conflict - * between stripes. It will also be used to - * return the stripe index of current dir entry. - * \param[in|out] entp the minum entry and it also is being used - * to input the last dir entry to resolve the - * hash conflict - * - * \param[out] ppage the page which holds the minum entry - * - * \retval = 0 get the entry successfully - * negative errno (< 0) does not get the entry - */ -static int lmv_get_min_striped_entry(struct obd_export *exp, - struct md_op_data *op_data, - struct md_callback *cb_op, - __u64 hash_offset, int *stripe_offset, - struct lu_dirent **entp, - struct page **ppage) -{ - struct lmv_stripe_md *lsm = op_data->op_mea1; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lu_dirent *min_ent = NULL; - struct page *min_page = NULL; - struct lmv_tgt_desc *tgt; - int stripe_count; - int min_idx = 0; - int rc = 0; - int i; - - stripe_count = lsm->lsm_md_stripe_count; - for (i = 0; i < stripe_count; i++) { - __u64 stripe_hash = hash_offset; - struct lu_dirent *ent = NULL; - struct page *page = NULL; - struct lu_dirpage *dp; - - tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL); - if (IS_ERR(tgt)) { - rc = PTR_ERR(tgt); - goto out; - } - - /* - * op_data will be shared by each stripe, so we need - * reset these value for each stripe - */ - op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid; - op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid; - op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root; -next: - rc = md_read_page(tgt->ltd_exp, op_data, cb_op, stripe_hash, - &page); - if (rc) - goto out; - - dp = page_address(page); - for (ent = lu_dirent_start(dp); ent; - ent = lu_dirent_next(ent)) { - /* Skip dummy entry */ - if (!le16_to_cpu(ent->lde_namelen)) - continue; - - if (le64_to_cpu(ent->lde_hash) < hash_offset) - continue; - - if (le64_to_cpu(ent->lde_hash) == hash_offset && - (*entp == ent || i < *stripe_offset)) - continue; - - /* skip . and .. for other stripes */ - if (i && (!strncmp(ent->lde_name, ".", - le16_to_cpu(ent->lde_namelen)) || - !strncmp(ent->lde_name, "..", - le16_to_cpu(ent->lde_namelen)))) - continue; - break; - } - - if (!ent) { - stripe_hash = le64_to_cpu(dp->ldp_hash_end); - - kunmap(page); - put_page(page); - page = NULL; - - /* - * reach the end of current stripe, go to next stripe - */ - if (stripe_hash == MDS_DIR_END_OFF) - continue; - else - goto next; - } - - if (min_ent) { - if (le64_to_cpu(min_ent->lde_hash) > - le64_to_cpu(ent->lde_hash)) { - min_ent = ent; - kunmap(min_page); - put_page(min_page); - min_idx = i; - min_page = page; - } else { - kunmap(page); - put_page(page); - page = NULL; - } - } else { - min_ent = ent; - min_page = page; - min_idx = i; - } - } - -out: - if (*ppage) { - kunmap(*ppage); - put_page(*ppage); - } - *stripe_offset = min_idx; - *entp = min_ent; - *ppage = min_page; - return rc; -} - -/** - * Build dir entry page from a striped directory - * - * This function gets one entry by @offset from a striped directory. It will - * read entries from all of stripes, and choose one closest to the required - * offset(&offset). A few notes - * 1. skip . and .. for non-zero stripes, because there can only have one . - * and .. in a directory. - * 2. op_data will be shared by all of stripes, instead of allocating new - * one, so need to restore before reusing. - * 3. release the entry page if that is not being chosen. - * - * \param[in] exp obd export refer to LMV - * \param[in] op_data hold those MD parameters of read_entry - * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry - * \param[out] ldp the entry being read - * \param[out] ppage the page holding the entry. Note: because the entry - * will be accessed in upper layer, so we need hold the - * page until the usages of entry is finished, see - * ll_dir_entry_next. - * - * retval =0 if get entry successfully - * <0 cannot get entry - */ -static int lmv_read_striped_page(struct obd_export *exp, - struct md_op_data *op_data, - struct md_callback *cb_op, - __u64 offset, struct page **ppage) -{ - struct inode *master_inode = op_data->op_data; - struct lu_fid master_fid = op_data->op_fid1; - __u64 hash_offset = offset; - __u32 ldp_flags; - struct page *min_ent_page = NULL; - struct page *ent_page = NULL; - struct lu_dirent *min_ent = NULL; - struct lu_dirent *last_ent; - struct lu_dirent *ent; - struct lu_dirpage *dp; - size_t left_bytes; - int ent_idx = 0; - void *area; - int rc; - - /* - * Allocate a page and read entries from all of stripes and fill - * the page by hash order - */ - ent_page = alloc_page(GFP_KERNEL); - if (!ent_page) - return -ENOMEM; - - /* Initialize the entry page */ - dp = kmap(ent_page); - memset(dp, 0, sizeof(*dp)); - dp->ldp_hash_start = cpu_to_le64(offset); - ldp_flags = LDF_COLLIDE; - - area = dp + 1; - left_bytes = PAGE_SIZE - sizeof(*dp); - ent = area; - last_ent = ent; - do { - __u16 ent_size; - - /* Find the minum entry from all sub-stripes */ - rc = lmv_get_min_striped_entry(exp, op_data, cb_op, hash_offset, - &ent_idx, &min_ent, - &min_ent_page); - if (rc) - goto out; - - /* - * If it can not get minum entry, it means it already reaches - * the end of this directory - */ - if (!min_ent) { - last_ent->lde_reclen = 0; - hash_offset = MDS_DIR_END_OFF; - goto out; - } - - ent_size = le16_to_cpu(min_ent->lde_reclen); - - /* - * the last entry lde_reclen is 0, but it might not - * the end of this entry of this temporay entry - */ - if (!ent_size) - ent_size = lu_dirent_calc_size( - le16_to_cpu(min_ent->lde_namelen), - le32_to_cpu(min_ent->lde_attrs)); - if (ent_size > left_bytes) { - last_ent->lde_reclen = cpu_to_le16(0); - hash_offset = le64_to_cpu(min_ent->lde_hash); - goto out; - } - - memcpy(ent, min_ent, ent_size); - - /* - * Replace . with master FID and Replace .. with the parent FID - * of master object - */ - if (!strncmp(ent->lde_name, ".", - le16_to_cpu(ent->lde_namelen)) && - le16_to_cpu(ent->lde_namelen) == 1) - fid_cpu_to_le(&ent->lde_fid, &master_fid); - else if (!strncmp(ent->lde_name, "..", - le16_to_cpu(ent->lde_namelen)) && - le16_to_cpu(ent->lde_namelen) == 2) - fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid3); - - left_bytes -= ent_size; - ent->lde_reclen = cpu_to_le16(ent_size); - last_ent = ent; - ent = (void *)ent + ent_size; - hash_offset = le64_to_cpu(min_ent->lde_hash); - if (hash_offset == MDS_DIR_END_OFF) { - last_ent->lde_reclen = 0; - break; - } - } while (1); -out: - if (min_ent_page) { - kunmap(min_ent_page); - put_page(min_ent_page); - } - - if (unlikely(rc)) { - __free_page(ent_page); - ent_page = NULL; - } else { - if (ent == area) - ldp_flags |= LDF_EMPTY; - dp->ldp_flags |= cpu_to_le32(ldp_flags); - dp->ldp_hash_end = cpu_to_le64(hash_offset); - } - - /* - * We do not want to allocate md_op_data during each - * dir entry reading, so op_data will be shared by every stripe, - * then we need to restore it back to original value before - * return to the upper layer - */ - op_data->op_fid1 = master_fid; - op_data->op_fid2 = master_fid; - op_data->op_data = master_inode; - - *ppage = ent_page; - - return rc; -} - -static int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data, - struct md_callback *cb_op, __u64 offset, - struct page **ppage) -{ - struct lmv_stripe_md *lsm = op_data->op_mea1; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - if (unlikely(lsm)) - return lmv_read_striped_page(exp, op_data, cb_op, offset, ppage); - - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage); -} - -/** - * Unlink a file/directory - * - * Unlink a file or directory under the parent dir. The unlink request - * usually will be sent to the MDT where the child is located, but if - * the client does not have the child FID then request will be sent to the - * MDT where the parent is located. - * - * If the parent is a striped directory then it also needs to locate which - * stripe the name of the child is located, and replace the parent FID - * (@op->op_fid1) with the stripe FID. Note: if the stripe is unknown, - * it will walk through all of sub-stripes until the child is being - * unlinked finally. - * - * \param[in] exp export refer to LMV - * \param[in] op_data different parameters transferred beween client - * MD stacks, name, namelen, FIDs etc. - * op_fid1 is the parent FID, op_fid2 is the child - * FID. - * \param[out] request point to the request of unlink. - * - * retval 0 if succeed - * negative errno if failed. - */ -static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - struct lmv_stripe_md *lsm = op_data->op_mea1; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *parent_tgt = NULL; - struct lmv_tgt_desc *tgt = NULL; - struct mdt_body *body; - int stripe_index = 0; - int rc; - -retry_unlink: - /* For striped dir, we need to locate the parent as well */ - if (lsm) { - struct lmv_tgt_desc *tmp; - - LASSERT(op_data->op_name && op_data->op_namelen); - - tmp = lmv_locate_target_for_name(lmv, lsm, - op_data->op_name, - op_data->op_namelen, - &op_data->op_fid1, - &op_data->op_mds); - - /* - * return -EBADFD means unknown hash type, might - * need try all sub-stripe here - */ - if (IS_ERR(tmp) && PTR_ERR(tmp) != -EBADFD) - return PTR_ERR(tmp); - - /* - * Note: both migrating dir and unknown hash dir need to - * try all of sub-stripes, so we need start search the - * name from stripe 0, but migrating dir is already handled - * inside lmv_locate_target_for_name(), so we only check - * unknown hash type directory here - */ - if (!lmv_is_known_hash_type(lsm->lsm_md_hash_type)) { - struct lmv_oinfo *oinfo; - - oinfo = &lsm->lsm_md_oinfo[stripe_index]; - - op_data->op_fid1 = oinfo->lmo_fid; - op_data->op_mds = oinfo->lmo_mds; - } - } - -try_next_stripe: - /* Send unlink requests to the MDT where the child is located */ - if (likely(!fid_is_zero(&op_data->op_fid2))) - tgt = lmv_find_target(lmv, &op_data->op_fid2); - else if (lsm) - tgt = lmv_get_target(lmv, op_data->op_mds, NULL); - else - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); - op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); - op_data->op_cap = current_cap(); - - /* - * If child's fid is given, cancel unused locks for it if it is from - * another export than parent. - * - * LOOKUP lock for child (fid3) should also be cancelled on parent - * tgt_tgt in mdc_unlink(). - */ - op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3; - - /* - * Cancel FULL locks on child (fid3). - */ - parent_tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(parent_tgt)) - return PTR_ERR(parent_tgt); - - if (parent_tgt != tgt) { - rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_LOOKUP, - MF_MDC_CANCEL_FID3); - } - - rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX, - MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3); - if (rc != 0) - return rc; - - CDEBUG(D_INODE, "unlink with fid=" DFID "/" DFID " -> mds #%u\n", - PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx); - - rc = md_unlink(tgt->ltd_exp, op_data, request); - if (rc != 0 && rc != -EREMOTE && rc != -ENOENT) - return rc; - - /* Try next stripe if it is needed. */ - if (rc == -ENOENT && lsm && lmv_need_try_all_stripes(lsm)) { - struct lmv_oinfo *oinfo; - - stripe_index++; - if (stripe_index >= lsm->lsm_md_stripe_count) - return rc; - - oinfo = &lsm->lsm_md_oinfo[stripe_index]; - - op_data->op_fid1 = oinfo->lmo_fid; - op_data->op_mds = oinfo->lmo_mds; - - ptlrpc_req_finished(*request); - *request = NULL; - - goto try_next_stripe; - } - - body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - - /* Not cross-ref case, just get out of here. */ - if (likely(!(body->mbo_valid & OBD_MD_MDS))) - return rc; - - CDEBUG(D_INODE, "%s: try unlink to another MDT for " DFID "\n", - exp->exp_obd->obd_name, PFID(&body->mbo_fid1)); - - /* This is a remote object, try remote MDT, Note: it may - * try more than 1 time here, Considering following case - * /mnt/lustre is root on MDT0, remote1 is on MDT1 - * 1. Initially A does not know where remote1 is, it send - * unlink RPC to MDT0, MDT0 return -EREMOTE, it will - * resend unlink RPC to MDT1 (retry 1st time). - * - * 2. During the unlink RPC in flight, - * client B mv /mnt/lustre/remote1 /mnt/lustre/remote2 - * and create new remote1, but on MDT0 - * - * 3. MDT1 get unlink RPC(from A), then do remote lock on - * /mnt/lustre, then lookup get fid of remote1, and find - * it is remote dir again, and replay -EREMOTE again. - * - * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times). - * - * In theory, it might try unlimited time here, but it should - * be very rare case. - */ - op_data->op_fid2 = body->mbo_fid1; - ptlrpc_req_finished(*request); - *request = NULL; - - goto retry_unlink; -} - -static int lmv_precleanup(struct obd_device *obd) -{ - fld_client_debugfs_fini(&obd->u.lmv.lmv_fld); - lprocfs_obd_cleanup(obd); - return 0; -} - -/** - * Get by key a value associated with a LMV device. - * - * Dispatch request to lower-layer devices as needed. - * - * \param[in] env execution environment for this thread - * \param[in] exp export for the LMV device - * \param[in] keylen length of key identifier - * \param[in] key identifier of key to get value for - * \param[in] vallen size of \a val - * \param[out] val pointer to storage location for value - * - * \retval 0 on success - * \retval negative negated errno on failure - */ -static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val) -{ - struct obd_device *obd; - struct lmv_obd *lmv; - int rc = 0; - - obd = class_exp2obd(exp); - if (!obd) { - CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", - exp->exp_handle.h_cookie); - return -EINVAL; - } - - lmv = &obd->u.lmv; - if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) { - int i; - - LASSERT(*vallen == sizeof(__u32)); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - struct lmv_tgt_desc *tgt = lmv->tgts[i]; - - /* - * All tgts should be connected when this gets called. - */ - if (!tgt || !tgt->ltd_exp) - continue; - - if (!obd_get_info(env, tgt->ltd_exp, keylen, key, - vallen, val)) - return 0; - } - return -EINVAL; - } else if (KEY_IS(KEY_MAX_EASIZE) || - KEY_IS(KEY_DEFAULT_EASIZE) || - KEY_IS(KEY_CONN_DATA)) { - /* - * Forwarding this request to first MDS, it should know LOV - * desc. - */ - rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key, - vallen, val); - if (!rc && KEY_IS(KEY_CONN_DATA)) - exp->exp_connect_data = *(struct obd_connect_data *)val; - return rc; - } else if (KEY_IS(KEY_TGT_COUNT)) { - *((int *)val) = lmv->desc.ld_tgt_count; - return 0; - } - - CDEBUG(D_IOCTL, "Invalid key\n"); - return -EINVAL; -} - -/** - * Asynchronously set by key a value associated with a LMV device. - * - * Dispatch request to lower-layer devices as needed. - * - * \param[in] env execution environment for this thread - * \param[in] exp export for the LMV device - * \param[in] keylen length of key identifier - * \param[in] key identifier of key to store value for - * \param[in] vallen size of value to store - * \param[in] val pointer to data to be stored - * \param[in] set optional list of related ptlrpc requests - * - * \retval 0 on success - * \retval negative negated errno on failure - */ -static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) -{ - struct lmv_tgt_desc *tgt; - struct obd_device *obd; - struct lmv_obd *lmv; - int rc = 0; - - obd = class_exp2obd(exp); - if (!obd) { - CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", - exp->exp_handle.h_cookie); - return -EINVAL; - } - lmv = &obd->u.lmv; - - if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX) || - KEY_IS(KEY_DEFAULT_EASIZE)) { - int i, err = 0; - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - tgt = lmv->tgts[i]; - - if (!tgt || !tgt->ltd_exp) - continue; - - err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, vallen, val, set); - if (err && rc == 0) - rc = err; - } - - return rc; - } - - return -EINVAL; -} - -static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm, - const struct lmv_mds_md_v1 *lmm1) -{ - struct lmv_obd *lmv = &exp->exp_obd->u.lmv; - int stripe_count; - int rc = 0; - int cplen; - int i; - - lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic); - lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count); - lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index); - if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE)) - lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN; - else - lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type); - lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version); - cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name, - sizeof(lsm->lsm_md_pool_name)); - - if (cplen >= sizeof(lsm->lsm_md_pool_name)) - return -E2BIG; - - CDEBUG(D_INFO, "unpack lsm count %d, master %d hash_type %d layout_version %d\n", - lsm->lsm_md_stripe_count, lsm->lsm_md_master_mdt_index, - lsm->lsm_md_hash_type, lsm->lsm_md_layout_version); - - stripe_count = le32_to_cpu(lmm1->lmv_stripe_count); - for (i = 0; i < stripe_count; i++) { - fid_le_to_cpu(&lsm->lsm_md_oinfo[i].lmo_fid, - &lmm1->lmv_stripe_fids[i]); - rc = lmv_fld_lookup(lmv, &lsm->lsm_md_oinfo[i].lmo_fid, - &lsm->lsm_md_oinfo[i].lmo_mds); - if (rc) - return rc; - CDEBUG(D_INFO, "unpack fid #%d " DFID "\n", i, - PFID(&lsm->lsm_md_oinfo[i].lmo_fid)); - } - - return rc; -} - -static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp, - const union lmv_mds_md *lmm, size_t lmm_size) -{ - struct lmv_stripe_md *lsm; - bool allocated = false; - int lsm_size, rc; - - LASSERT(lsmp); - - lsm = *lsmp; - /* Free memmd */ - if (lsm && !lmm) { - int i; - - for (i = 0; i < lsm->lsm_md_stripe_count; i++) { - /* - * For migrating inode, the master stripe and master - * object will be the same, so do not need iput, see - * ll_update_lsm_md - */ - if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && - !i) && lsm->lsm_md_oinfo[i].lmo_root) - iput(lsm->lsm_md_oinfo[i].lmo_root); - } - - kvfree(lsm); - *lsmp = NULL; - return 0; - } - - if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE) - return -EPERM; - - /* Unpack memmd */ - if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 && - le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) { - CERROR("%s: invalid lmv magic %x: rc = %d\n", - exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic), - -EIO); - return -EIO; - } - - if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1) - lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm)); - else - /** - * Unpack default dirstripe(lmv_user_md) to lmv_stripe_md, - * stripecount should be 0 then. - */ - lsm_size = lmv_stripe_md_size(0); - - if (!lsm) { - lsm = kvzalloc(lsm_size, GFP_NOFS); - if (!lsm) - return -ENOMEM; - allocated = true; - *lsmp = lsm; - } - - switch (le32_to_cpu(lmm->lmv_magic)) { - case LMV_MAGIC_V1: - rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1); - break; - default: - CERROR("%s: unrecognized magic %x\n", exp->exp_obd->obd_name, - le32_to_cpu(lmm->lmv_magic)); - rc = -EINVAL; - break; - } - - if (rc && allocated) { - kvfree(lsm); - *lsmp = NULL; - lsm_size = rc; - } - return lsm_size; -} - -void lmv_free_memmd(struct lmv_stripe_md *lsm) -{ - lmv_unpackmd(NULL, &lsm, NULL, 0); -} -EXPORT_SYMBOL(lmv_free_memmd); - -static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - union ldlm_policy_data *policy, - enum ldlm_mode mode, enum ldlm_cancel_flags flags, - void *opaque) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - int rc = 0; - int err; - u32 i; - - LASSERT(fid); - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - struct lmv_tgt_desc *tgt = lmv->tgts[i]; - - if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) - continue; - - err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags, - opaque); - if (!rc) - rc = err; - } - return rc; -} - -static int lmv_set_lock_data(struct obd_export *exp, - const struct lustre_handle *lockh, - void *data, __u64 *bits) -{ - struct lmv_obd *lmv = &exp->exp_obd->u.lmv; - struct lmv_tgt_desc *tgt = lmv->tgts[0]; - - if (!tgt || !tgt->ltd_exp) - return -EINVAL; - - return md_set_lock_data(tgt->ltd_exp, lockh, data, bits); -} - -static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, - enum ldlm_type type, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - struct lustre_handle *lockh) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - enum ldlm_mode rc; - int tgt; - u32 i; - - CDEBUG(D_INODE, "Lock match for " DFID "\n", PFID(fid)); - - /* - * With DNE every object can have two locks in different namespaces: - * lookup lock in space of MDT storing direntry and update/open lock in - * space of MDT storing inode. Try the MDT that the FID maps to first, - * since this can be easily found, and only try others if that fails. - */ - for (i = 0, tgt = lmv_find_target_index(lmv, fid); - i < lmv->desc.ld_tgt_count; - i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) { - if (tgt < 0) { - CDEBUG(D_HA, "%s: " DFID " is inaccessible: rc = %d\n", - obd->obd_name, PFID(fid), tgt); - tgt = 0; - } - - if (!lmv->tgts[tgt] || !lmv->tgts[tgt]->ltd_exp || - !lmv->tgts[tgt]->ltd_active) - continue; - - rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid, - type, policy, mode, lockh); - if (rc) - return rc; - } - - return 0; -} - -static int lmv_get_lustre_md(struct obd_export *exp, - struct ptlrpc_request *req, - struct obd_export *dt_exp, - struct obd_export *md_exp, - struct lustre_md *md) -{ - struct lmv_obd *lmv = &exp->exp_obd->u.lmv; - struct lmv_tgt_desc *tgt = lmv->tgts[0]; - - if (!tgt || !tgt->ltd_exp) - return -EINVAL; - return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md); -} - -static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt = lmv->tgts[0]; - - if (md->lmv) { - lmv_free_memmd(md->lmv); - md->lmv = NULL; - } - if (!tgt || !tgt->ltd_exp) - return -EINVAL; - return md_free_lustre_md(tgt->ltd_exp, md); -} - -static int lmv_set_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och, - struct lookup_intent *it) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, &och->och_fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_set_open_replay_data(tgt->ltd_exp, och, it); -} - -static int lmv_clear_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, &och->och_fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_clear_open_replay_data(tgt->ltd_exp, och); -} - -static int lmv_intent_getattr_async(struct obd_export *exp, - struct md_enqueue_info *minfo) -{ - struct md_op_data *op_data = &minfo->mi_data; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *ptgt = NULL; - struct lmv_tgt_desc *ctgt = NULL; - - if (!fid_is_sane(&op_data->op_fid2)) - return -EINVAL; - - ptgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); - if (IS_ERR(ptgt)) - return PTR_ERR(ptgt); - - ctgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2); - if (IS_ERR(ctgt)) - return PTR_ERR(ctgt); - - /* - * if child is on remote MDT, we need 2 async RPCs to fetch both LOOKUP - * lock on parent, and UPDATE lock on child MDT, which makes all - * complicated. Considering remote dir is rare case, and not supporting - * it in statahead won't cause any issue, drop its support for now. - */ - if (ptgt != ctgt) - return -ENOTSUPP; - - return md_intent_getattr_async(ptgt->ltd_exp, minfo); -} - -static int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, - struct lu_fid *fid, __u64 *bits) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - return PTR_ERR(tgt); - - return md_revalidate_lock(tgt->ltd_exp, it, fid, bits); -} - -static int -lmv_get_fid_from_lsm(struct obd_export *exp, - const struct lmv_stripe_md *lsm, - const char *name, int namelen, struct lu_fid *fid) -{ - const struct lmv_oinfo *oinfo; - - LASSERT(lsm); - oinfo = lsm_name_to_stripe_info(lsm, name, namelen); - if (IS_ERR(oinfo)) - return PTR_ERR(oinfo); - - *fid = oinfo->lmo_fid; - - return 0; -} - -/** - * For lmv, only need to send request to master MDT, and the master MDT will - * process with other slave MDTs. The only exception is Q_GETOQUOTA for which - * we directly fetch data from the slave MDTs. - */ -static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt = lmv->tgts[0]; - int rc = 0; - __u64 curspace = 0, curinodes = 0; - u32 i; - - if (!tgt || !tgt->ltd_exp || !tgt->ltd_active || - !lmv->desc.ld_tgt_count) { - CERROR("master lmv inactive\n"); - return -EIO; - } - - if (oqctl->qc_cmd != Q_GETOQUOTA) - return obd_quotactl(tgt->ltd_exp, oqctl); - - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - int err; - - tgt = lmv->tgts[i]; - - if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) - continue; - - err = obd_quotactl(tgt->ltd_exp, oqctl); - if (err) { - CERROR("getquota on mdt %d failed. %d\n", i, err); - if (!rc) - rc = err; - } else { - curspace += oqctl->qc_dqblk.dqb_curspace; - curinodes += oqctl->qc_dqblk.dqb_curinodes; - } - } - oqctl->qc_dqblk.dqb_curspace = curspace; - oqctl->qc_dqblk.dqb_curinodes = curinodes; - - return rc; -} - -static int lmv_merge_attr(struct obd_export *exp, - const struct lmv_stripe_md *lsm, - struct cl_attr *attr, - ldlm_blocking_callback cb_blocking) -{ - int rc, i; - - rc = lmv_revalidate_slaves(exp, lsm, cb_blocking, 0); - if (rc < 0) - return rc; - - for (i = 0; i < lsm->lsm_md_stripe_count; i++) { - struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root; - - CDEBUG(D_INFO, "" DFID " size %llu, blocks %llu nlink %u, atime %lu ctime %lu, mtime %lu.\n", - PFID(&lsm->lsm_md_oinfo[i].lmo_fid), - i_size_read(inode), (unsigned long long)inode->i_blocks, - inode->i_nlink, LTIME_S(inode->i_atime), - LTIME_S(inode->i_ctime), LTIME_S(inode->i_mtime)); - - /* for slave stripe, it needs to subtract nlink for . and .. */ - if (i) - attr->cat_nlink += inode->i_nlink - 2; - else - attr->cat_nlink = inode->i_nlink; - - attr->cat_size += i_size_read(inode); - attr->cat_blocks += inode->i_blocks; - - if (attr->cat_atime < LTIME_S(inode->i_atime)) - attr->cat_atime = LTIME_S(inode->i_atime); - - if (attr->cat_ctime < LTIME_S(inode->i_ctime)) - attr->cat_ctime = LTIME_S(inode->i_ctime); - - if (attr->cat_mtime < LTIME_S(inode->i_mtime)) - attr->cat_mtime = LTIME_S(inode->i_mtime); - } - return 0; -} - -static struct obd_ops lmv_obd_ops = { - .owner = THIS_MODULE, - .setup = lmv_setup, - .cleanup = lmv_cleanup, - .precleanup = lmv_precleanup, - .process_config = lmv_process_config, - .connect = lmv_connect, - .disconnect = lmv_disconnect, - .statfs = lmv_statfs, - .get_info = lmv_get_info, - .set_info_async = lmv_set_info_async, - .notify = lmv_notify, - .get_uuid = lmv_get_uuid, - .iocontrol = lmv_iocontrol, - .quotactl = lmv_quotactl -}; - -static struct md_ops lmv_md_ops = { - .getstatus = lmv_getstatus, - .null_inode = lmv_null_inode, - .close = lmv_close, - .create = lmv_create, - .enqueue = lmv_enqueue, - .getattr = lmv_getattr, - .getxattr = lmv_getxattr, - .getattr_name = lmv_getattr_name, - .intent_lock = lmv_intent_lock, - .link = lmv_link, - .rename = lmv_rename, - .setattr = lmv_setattr, - .setxattr = lmv_setxattr, - .sync = lmv_sync, - .read_page = lmv_read_page, - .unlink = lmv_unlink, - .init_ea_size = lmv_init_ea_size, - .cancel_unused = lmv_cancel_unused, - .set_lock_data = lmv_set_lock_data, - .lock_match = lmv_lock_match, - .get_lustre_md = lmv_get_lustre_md, - .free_lustre_md = lmv_free_lustre_md, - .merge_attr = lmv_merge_attr, - .set_open_replay_data = lmv_set_open_replay_data, - .clear_open_replay_data = lmv_clear_open_replay_data, - .intent_getattr_async = lmv_intent_getattr_async, - .revalidate_lock = lmv_revalidate_lock, - .get_fid_from_lsm = lmv_get_fid_from_lsm, - .unpackmd = lmv_unpackmd, -}; - -static int __init lmv_init(void) -{ - struct lprocfs_static_vars lvars; - int rc; - - lprocfs_lmv_init_vars(&lvars); - - rc = libcfs_setup(); - if (rc) - return rc; - - return class_register_type(&lmv_obd_ops, &lmv_md_ops, - LUSTRE_LMV_NAME, NULL); -} - -static void lmv_exit(void) -{ - class_unregister_type(LUSTRE_LMV_NAME); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Logical Metadata Volume"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(lmv_init); -module_exit(lmv_exit); diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c deleted file mode 100644 index 30727b7acccc..000000000000 --- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include "lmv_internal.h" - -static ssize_t numobd_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct lmv_desc *desc; - - desc = &dev->u.lmv.desc; - return sprintf(buf, "%u\n", desc->ld_tgt_count); -} -LUSTRE_RO_ATTR(numobd); - -static ssize_t activeobd_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct lmv_desc *desc; - - desc = &dev->u.lmv.desc; - return sprintf(buf, "%u\n", desc->ld_active_tgt_count); -} -LUSTRE_RO_ATTR(activeobd); - -static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = (struct obd_device *)m->private; - struct lmv_obd *lmv; - - LASSERT(dev); - lmv = &dev->u.lmv; - seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid); - return 0; -} - -LPROC_SEQ_FOPS_RO(lmv_desc_uuid); - -static void *lmv_tgt_seq_start(struct seq_file *p, loff_t *pos) -{ - struct obd_device *dev = p->private; - struct lmv_obd *lmv = &dev->u.lmv; - - while (*pos < lmv->tgts_size) { - if (lmv->tgts[*pos]) - return lmv->tgts[*pos]; - ++*pos; - } - - return NULL; -} - -static void lmv_tgt_seq_stop(struct seq_file *p, void *v) -{ -} - -static void *lmv_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos) -{ - struct obd_device *dev = p->private; - struct lmv_obd *lmv = &dev->u.lmv; - - ++*pos; - while (*pos < lmv->tgts_size) { - if (lmv->tgts[*pos]) - return lmv->tgts[*pos]; - ++*pos; - } - - return NULL; -} - -static int lmv_tgt_seq_show(struct seq_file *p, void *v) -{ - struct lmv_tgt_desc *tgt = v; - - if (!tgt) - return 0; - seq_printf(p, "%u: %s %sACTIVE\n", - tgt->ltd_idx, tgt->ltd_uuid.uuid, - tgt->ltd_active ? "" : "IN"); - return 0; -} - -static const struct seq_operations lmv_tgt_sops = { - .start = lmv_tgt_seq_start, - .stop = lmv_tgt_seq_stop, - .next = lmv_tgt_seq_next, - .show = lmv_tgt_seq_show, -}; - -static int lmv_target_seq_open(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - int rc; - - rc = seq_open(file, &lmv_tgt_sops); - if (rc) - return rc; - - seq = file->private_data; - seq->private = inode->i_private; - - return 0; -} - -static struct lprocfs_vars lprocfs_lmv_obd_vars[] = { - { "desc_uuid", &lmv_desc_uuid_fops, NULL, 0 }, - { NULL } -}; - -const struct file_operations lmv_proc_target_fops = { - .owner = THIS_MODULE, - .open = lmv_target_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static struct attribute *lmv_attrs[] = { - &lustre_attr_activeobd.attr, - &lustre_attr_numobd.attr, - NULL, -}; - -static const struct attribute_group lmv_attr_group = { - .attrs = lmv_attrs, -}; - -void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars) -{ - lvars->sysfs_vars = &lmv_attr_group; - lvars->obd_vars = lprocfs_lmv_obd_vars; -} diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile deleted file mode 100644 index 1ebf0193f61a..000000000000 --- a/drivers/staging/lustre/lustre/lov/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += lov.o -lov-y := lov_obd.o lov_pack.o lov_offset.o lov_merge.o \ - lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \ - lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \ - lovsub_lock.o lov_pool.o lproc_lov.o diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h deleted file mode 100644 index e4f762137a4a..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ /dev/null @@ -1,639 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Internal interfaces of LOV layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#ifndef LOV_CL_INTERNAL_H -#define LOV_CL_INTERNAL_H - -#include -#include -#include "lov_internal.h" - -/** \defgroup lov lov - * Logical object volume layer. This layer implements data striping (raid0). - * - * At the lov layer top-entity (object, page, lock, io) is connected to one or - * more sub-entities: top-object, representing a file is connected to a set of - * sub-objects, each representing a stripe, file-level top-lock is connected - * to a set of per-stripe sub-locks, top-page is connected to a (single) - * sub-page, and a top-level IO is connected to a set of (potentially - * concurrent) sub-IO's. - * - * Sub-object, sub-page, and sub-io have well-defined top-object and top-page - * respectively, while a single sub-lock can be part of multiple top-locks. - * - * Reference counting models are different for different types of entities: - * - * - top-object keeps a reference to its sub-objects, and destroys them - * when it is destroyed. - * - * - top-page keeps a reference to its sub-page, and destroys it when it - * is destroyed. - * - * - IO's are not reference counted. - * - * To implement a connection between top and sub entities, lov layer is split - * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both - * implementing full set of cl-interfaces. For example, top-object has vvp and - * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is - * used to track child-parent relationship. - * - * @{ - */ - -struct lovsub_device; -struct lovsub_object; -struct lovsub_lock; - -enum lov_device_flags { - LOV_DEV_INITIALIZED = 1 << 0 -}; - -/* - * Upper half. - */ - -struct lov_device { - /* - * XXX Locking of lov-private data is missing. - */ - struct cl_device ld_cl; - struct lov_obd *ld_lov; - /** size of lov_device::ld_target[] array */ - __u32 ld_target_nr; - struct lovsub_device **ld_target; - __u32 ld_flags; -}; - -/** - * Layout type. - */ -enum lov_layout_type { - LLT_EMPTY, /** empty file without body (mknod + truncate) */ - LLT_RAID0, /** striped file */ - LLT_RELEASED, /** file with no objects (data in HSM) */ - LLT_NR -}; - -static inline char *llt2str(enum lov_layout_type llt) -{ - switch (llt) { - case LLT_EMPTY: - return "EMPTY"; - case LLT_RAID0: - return "RAID0"; - case LLT_RELEASED: - return "RELEASED"; - case LLT_NR: - LBUG(); - } - LBUG(); - return ""; -} - -/** - * lov-specific file state. - * - * lov object has particular layout type, determining how top-object is built - * on top of sub-objects. Layout type can change dynamically. When this - * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode, - * all state pertaining to the old layout type is destroyed, and new state is - * constructed. All object methods take said semaphore in the shared mode, - * providing serialization against transition between layout types. - * - * To avoid multiple `if' or `switch' statements, selecting behavior for the - * current layout type, object methods perform double-dispatch, invoking - * function corresponding to the current layout type. - */ -struct lov_object { - struct cl_object lo_cl; - /** - * Serializes object operations with transitions between layout types. - * - * This semaphore is taken in shared mode by all object methods, and - * is taken in exclusive mode when object type is changed. - * - * \see lov_object::lo_type - */ - struct rw_semaphore lo_type_guard; - /** - * Type of an object. Protected by lov_object::lo_type_guard. - */ - enum lov_layout_type lo_type; - /** - * True if layout is invalid. This bit is cleared when layout lock - * is lost. - */ - bool lo_layout_invalid; - /** - * How many IOs are on going on this object. Layout can be changed - * only if there is no active IO. - */ - atomic_t lo_active_ios; - /** - * Waitq - wait for no one else is using lo_lsm - */ - wait_queue_head_t lo_waitq; - /** - * Layout metadata. NULL if empty layout. - */ - struct lov_stripe_md *lo_lsm; - - union lov_layout_state { - struct lov_layout_raid0 { - unsigned int lo_nr; - /** - * When this is true, lov_object::lo_attr contains - * valid up to date attributes for a top-level - * object. This field is reset to 0 when attributes of - * any sub-object change. - */ - int lo_attr_valid; - /** - * Array of sub-objects. Allocated when top-object is - * created (lov_init_raid0()). - * - * Top-object is a strict master of its sub-objects: - * it is created before them, and outlives its - * children (this later is necessary so that basic - * functions like cl_object_top() always - * work). Top-object keeps a reference on every - * sub-object. - * - * When top-object is destroyed (lov_delete_raid0()) - * it releases its reference to a sub-object and waits - * until the latter is finally destroyed. - */ - struct lovsub_object **lo_sub; - /** - * protect lo_sub - */ - spinlock_t lo_sub_lock; - /** - * Cached object attribute, built from sub-object - * attributes. - */ - struct cl_attr lo_attr; - } raid0; - struct lov_layout_state_empty { - } empty; - struct lov_layout_state_released { - } released; - } u; - /** - * Thread that acquired lov_object::lo_type_guard in an exclusive - * mode. - */ - struct task_struct *lo_owner; -}; - -/** - * State lov_lock keeps for each sub-lock. - */ -struct lov_lock_sub { - /** sub-lock itself */ - struct cl_lock sub_lock; - /** Set if the sublock has ever been enqueued, meaning it may - * hold resources of underlying layers - */ - unsigned int sub_is_enqueued:1, - sub_initialized:1; - int sub_stripe; -}; - -/** - * lov-specific lock state. - */ -struct lov_lock { - struct cl_lock_slice lls_cl; - /** Number of sub-locks in this lock */ - int lls_nr; - /** sublock array */ - struct lov_lock_sub lls_sub[0]; -}; - -struct lov_page { - struct cl_page_slice lps_cl; - unsigned int lps_stripe; /* stripe index */ -}; - -/* - * Bottom half. - */ - -struct lovsub_device { - struct cl_device acid_cl; - struct cl_device *acid_next; -}; - -struct lovsub_object { - struct cl_object_header lso_header; - struct cl_object lso_cl; - struct lov_object *lso_super; - int lso_index; -}; - -/** - * Lock state at lovsub layer. - */ -struct lovsub_lock { - struct cl_lock_slice lss_cl; -}; - -/** - * Describe the environment settings for sublocks. - */ -struct lov_sublock_env { - const struct lu_env *lse_env; - struct cl_io *lse_io; -}; - -struct lovsub_page { - struct cl_page_slice lsb_cl; -}; - -struct lov_thread_info { - struct cl_object_conf lti_stripe_conf; - struct lu_fid lti_fid; - struct ost_lvb lti_lvb; - struct cl_2queue lti_cl2q; - struct cl_page_list lti_plist; - wait_queue_entry_t lti_waiter; -}; - -/** - * State that lov_io maintains for every sub-io. - */ -struct lov_io_sub { - u16 sub_stripe; - /** - * environment's refcheck. - * - * \see cl_env_get() - */ - u16 sub_refcheck; - /** - * true, iff cl_io_init() was successfully executed against - * lov_io_sub::sub_io. - */ - u16 sub_io_initialized:1, - /** - * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't - * allocated, but borrowed from a per-device emergency pool. - */ - sub_borrowed:1; - /** - * Linkage into a list (hanging off lov_io::lis_active) of all - * sub-io's active for the current IO iteration. - */ - struct list_head sub_linkage; - /** - * sub-io for a stripe. Ideally sub-io's can be stopped and resumed - * independently, with lov acting as a scheduler to maximize overall - * throughput. - */ - struct cl_io *sub_io; - /** - * environment, in which sub-io executes. - */ - struct lu_env *sub_env; -}; - -/** - * IO state private for LOV. - */ -struct lov_io { - /** super-class */ - struct cl_io_slice lis_cl; - /** - * Pointer to the object slice. This is a duplicate of - * lov_io::lis_cl::cis_object. - */ - struct lov_object *lis_object; - /** - * Original end-of-io position for this IO, set by the upper layer as - * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this, - * changes pos and count to fit IO into a single stripe and uses saved - * value to determine when IO iterations have to stop. - * - * This is used only for CIT_READ and CIT_WRITE io's. - */ - loff_t lis_io_endpos; - - /** - * starting position within a file, for the current io loop iteration - * (stripe), used by ci_io_loop(). - */ - u64 lis_pos; - /** - * end position with in a file, for the current stripe io. This is - * exclusive (i.e., next offset after last byte affected by io). - */ - u64 lis_endpos; - - int lis_stripe_count; - int lis_active_subios; - - /** - * the index of ls_single_subio in ls_subios array - */ - int lis_single_subio_index; - struct cl_io lis_single_subio; - - /** - * size of ls_subios array, actually the highest stripe # - */ - int lis_nr_subios; - struct lov_io_sub *lis_subs; - /** - * List of active sub-io's. - */ - struct list_head lis_active; -}; - -struct lov_session { - struct lov_io ls_io; - struct lov_sublock_env ls_subenv; -}; - -extern struct lu_device_type lov_device_type; -extern struct lu_device_type lovsub_device_type; - -extern struct lu_context_key lov_key; -extern struct lu_context_key lov_session_key; - -extern struct kmem_cache *lov_lock_kmem; -extern struct kmem_cache *lov_object_kmem; -extern struct kmem_cache *lov_thread_kmem; -extern struct kmem_cache *lov_session_kmem; - -extern struct kmem_cache *lovsub_lock_kmem; -extern struct kmem_cache *lovsub_object_kmem; - -int lov_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lov_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); - -int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); - -struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio, - int stripe); - -int lov_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, pgoff_t index); -int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, pgoff_t index); -int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index); -int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index); -struct lu_object *lov_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); -struct lu_object *lovsub_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); - -struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov); -int lov_page_stripe(const struct cl_page *page); - -#define lov_foreach_target(lov, var) \ - for (var = 0; var < lov_targets_nr(lov); ++var) - -/***************************************************************************** - * - * Type conversions. - * - * Accessors. - * - */ - -static inline struct lov_session *lov_env_session(const struct lu_env *env) -{ - struct lov_session *ses; - - ses = lu_context_key_get(env->le_ses, &lov_session_key); - LASSERT(ses); - return ses; -} - -static inline struct lov_io *lov_env_io(const struct lu_env *env) -{ - return &lov_env_session(env)->ls_io; -} - -static inline int lov_is_object(const struct lu_object *obj) -{ - return obj->lo_dev->ld_type == &lov_device_type; -} - -static inline int lovsub_is_object(const struct lu_object *obj) -{ - return obj->lo_dev->ld_type == &lovsub_device_type; -} - -static inline struct lu_device *lov2lu_dev(struct lov_device *lov) -{ - return &lov->ld_cl.cd_lu_dev; -} - -static inline struct lov_device *lu2lov_dev(const struct lu_device *d) -{ - LINVRNT(d->ld_type == &lov_device_type); - return container_of(d, struct lov_device, ld_cl.cd_lu_dev); -} - -static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub) -{ - return &lovsub->acid_cl; -} - -static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub) -{ - return &lovsub2cl_dev(lovsub)->cd_lu_dev; -} - -static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d) -{ - LINVRNT(d->ld_type == &lovsub_device_type); - return container_of(d, struct lovsub_device, acid_cl.cd_lu_dev); -} - -static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d) -{ - LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type); - return container_of(d, struct lovsub_device, acid_cl); -} - -static inline struct lu_object *lov2lu(struct lov_object *lov) -{ - return &lov->lo_cl.co_lu; -} - -static inline struct cl_object *lov2cl(struct lov_object *lov) -{ - return &lov->lo_cl; -} - -static inline struct lov_object *lu2lov(const struct lu_object *obj) -{ - LINVRNT(lov_is_object(obj)); - return container_of(obj, struct lov_object, lo_cl.co_lu); -} - -static inline struct lov_object *cl2lov(const struct cl_object *obj) -{ - LINVRNT(lov_is_object(&obj->co_lu)); - return container_of(obj, struct lov_object, lo_cl); -} - -static inline struct lu_object *lovsub2lu(struct lovsub_object *los) -{ - return &los->lso_cl.co_lu; -} - -static inline struct cl_object *lovsub2cl(struct lovsub_object *los) -{ - return &los->lso_cl; -} - -static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj) -{ - LINVRNT(lovsub_is_object(&obj->co_lu)); - return container_of(obj, struct lovsub_object, lso_cl); -} - -static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj) -{ - LINVRNT(lovsub_is_object(obj)); - return container_of(obj, struct lovsub_object, lso_cl.co_lu); -} - -static inline struct lovsub_lock * -cl2lovsub_lock(const struct cl_lock_slice *slice) -{ - LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu)); - return container_of(slice, struct lovsub_lock, lss_cl); -} - -static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock) -{ - const struct cl_lock_slice *slice; - - slice = cl_lock_at(lock, &lovsub_device_type); - LASSERT(slice); - return cl2lovsub_lock(slice); -} - -static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice) -{ - LINVRNT(lov_is_object(&slice->cls_obj->co_lu)); - return container_of(slice, struct lov_lock, lls_cl); -} - -static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice) -{ - LINVRNT(lov_is_object(&slice->cpl_obj->co_lu)); - return container_of(slice, struct lov_page, lps_cl); -} - -static inline struct lovsub_page * -cl2lovsub_page(const struct cl_page_slice *slice) -{ - LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu)); - return container_of(slice, struct lovsub_page, lsb_cl); -} - -static inline struct lov_io *cl2lov_io(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct lov_io *lio; - - lio = container_of(ios, struct lov_io, lis_cl); - LASSERT(lio == lov_env_io(env)); - return lio; -} - -static inline int lov_targets_nr(const struct lov_device *lov) -{ - return lov->ld_lov->desc.ld_tgt_count; -} - -static inline struct lov_thread_info *lov_env_info(const struct lu_env *env) -{ - struct lov_thread_info *info; - - info = lu_context_key_get(&env->le_ctx, &lov_key); - LASSERT(info); - return info; -} - -static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov) -{ - LASSERT(lov->lo_type == LLT_RAID0); - LASSERT(lov->lo_lsm->lsm_magic == LOV_MAGIC || - lov->lo_lsm->lsm_magic == LOV_MAGIC_V3); - return &lov->u.raid0; -} - -/* lov_pack.c */ -int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, - struct lov_user_md __user *lump); - -/** @} lov */ - -#endif diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c deleted file mode 100644 index c7db23472346..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_dev.c +++ /dev/null @@ -1,384 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_device and cl_device_type for LOV layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LOV - -/* class_name2obd() */ -#include - -#include "lov_cl_internal.h" -#include "lov_internal.h" - -struct kmem_cache *lov_lock_kmem; -struct kmem_cache *lov_object_kmem; -struct kmem_cache *lov_thread_kmem; -struct kmem_cache *lov_session_kmem; - -struct kmem_cache *lovsub_lock_kmem; -struct kmem_cache *lovsub_object_kmem; - -struct lu_kmem_descr lov_caches[] = { - { - .ckd_cache = &lov_lock_kmem, - .ckd_name = "lov_lock_kmem", - .ckd_size = sizeof(struct lov_lock) - }, - { - .ckd_cache = &lov_object_kmem, - .ckd_name = "lov_object_kmem", - .ckd_size = sizeof(struct lov_object) - }, - { - .ckd_cache = &lov_thread_kmem, - .ckd_name = "lov_thread_kmem", - .ckd_size = sizeof(struct lov_thread_info) - }, - { - .ckd_cache = &lov_session_kmem, - .ckd_name = "lov_session_kmem", - .ckd_size = sizeof(struct lov_session) - }, - { - .ckd_cache = &lovsub_lock_kmem, - .ckd_name = "lovsub_lock_kmem", - .ckd_size = sizeof(struct lovsub_lock) - }, - { - .ckd_cache = &lovsub_object_kmem, - .ckd_name = "lovsub_object_kmem", - .ckd_size = sizeof(struct lovsub_object) - }, - { - .ckd_cache = NULL - } -}; - -/***************************************************************************** - * - * Lov device and device type functions. - * - */ - -static void *lov_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct lov_thread_info *info; - - info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS); - if (!info) - info = ERR_PTR(-ENOMEM); - return info; -} - -static void lov_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct lov_thread_info *info = data; - - kmem_cache_free(lov_thread_kmem, info); -} - -struct lu_context_key lov_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = lov_key_init, - .lct_fini = lov_key_fini -}; - -static void *lov_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct lov_session *info; - - info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS); - if (!info) - info = ERR_PTR(-ENOMEM); - return info; -} - -static void lov_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct lov_session *info = data; - - kmem_cache_free(lov_session_kmem, info); -} - -struct lu_context_key lov_session_key = { - .lct_tags = LCT_SESSION, - .lct_init = lov_session_key_init, - .lct_fini = lov_session_key_fini -}; - -/* type constructor/destructor: lov_type_{init,fini,start,stop}() */ -LU_TYPE_INIT_FINI(lov, &lov_key, &lov_session_key); - -static struct lu_device *lov_device_fini(const struct lu_env *env, - struct lu_device *d) -{ - int i; - struct lov_device *ld = lu2lov_dev(d); - - LASSERT(ld->ld_lov); - if (!ld->ld_target) - return NULL; - - lov_foreach_target(ld, i) { - struct lovsub_device *lsd; - - lsd = ld->ld_target[i]; - if (lsd) { - cl_stack_fini(env, lovsub2cl_dev(lsd)); - ld->ld_target[i] = NULL; - } - } - return NULL; -} - -static int lov_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) -{ - struct lov_device *ld = lu2lov_dev(d); - int i; - int rc = 0; - - LASSERT(d->ld_site); - if (!ld->ld_target) - return rc; - - lov_foreach_target(ld, i) { - struct lovsub_device *lsd; - struct cl_device *cl; - struct lov_tgt_desc *desc; - - desc = ld->ld_lov->lov_tgts[i]; - if (!desc) - continue; - - cl = cl_type_setup(env, d->ld_site, &lovsub_device_type, - desc->ltd_obd->obd_lu_dev); - if (IS_ERR(cl)) { - rc = PTR_ERR(cl); - break; - } - lsd = cl2lovsub_dev(cl); - ld->ld_target[i] = lsd; - } - - if (rc) - lov_device_fini(env, d); - else - ld->ld_flags |= LOV_DEV_INITIALIZED; - - return rc; -} - -static struct lu_device *lov_device_free(const struct lu_env *env, - struct lu_device *d) -{ - struct lov_device *ld = lu2lov_dev(d); - - cl_device_fini(lu2cl_dev(d)); - kfree(ld->ld_target); - kfree(ld); - return NULL; -} - -static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev, - __u32 index) -{ - struct lov_device *ld = lu2lov_dev(dev); - - if (ld->ld_target[index]) { - cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index])); - ld->ld_target[index] = NULL; - } -} - -static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) -{ - int result; - __u32 tgt_size; - __u32 sub_size; - - result = 0; - tgt_size = dev->ld_lov->lov_tgt_size; - sub_size = dev->ld_target_nr; - if (sub_size < tgt_size) { - struct lovsub_device **newd; - const size_t sz = sizeof(newd[0]); - - newd = kcalloc(tgt_size, sz, GFP_NOFS); - if (newd) { - if (sub_size > 0) { - memcpy(newd, dev->ld_target, sub_size * sz); - kfree(dev->ld_target); - } - dev->ld_target = newd; - dev->ld_target_nr = tgt_size; - } else { - result = -ENOMEM; - } - } - return result; -} - -static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, - __u32 index) -{ - struct obd_device *obd = dev->ld_obd; - struct lov_device *ld = lu2lov_dev(dev); - struct lov_tgt_desc *tgt; - struct lovsub_device *lsd; - struct cl_device *cl; - int rc; - - obd_getref(obd); - - tgt = obd->u.lov.lov_tgts[index]; - - if (!tgt->ltd_obd->obd_set_up) { - CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid)); - return -EINVAL; - } - - rc = lov_expand_targets(env, ld); - if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) { - LASSERT(dev->ld_site); - - cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type, - tgt->ltd_obd->obd_lu_dev); - if (!IS_ERR(cl)) { - lsd = cl2lovsub_dev(cl); - ld->ld_target[index] = lsd; - } else { - CERROR("add failed (%d), deleting %s\n", rc, - obd_uuid2str(&tgt->ltd_uuid)); - lov_cl_del_target(env, dev, index); - rc = PTR_ERR(cl); - } - } - obd_putref(obd); - return rc; -} - -static int lov_process_config(const struct lu_env *env, - struct lu_device *d, struct lustre_cfg *cfg) -{ - struct obd_device *obd = d->ld_obd; - int cmd; - int rc; - int gen; - __u32 index; - - obd_getref(obd); - - cmd = cfg->lcfg_command; - rc = lov_process_config_base(d->ld_obd, cfg, &index, &gen); - if (rc == 0) { - switch (cmd) { - case LCFG_LOV_ADD_OBD: - case LCFG_LOV_ADD_INA: - rc = lov_cl_add_target(env, d, index); - if (rc != 0) - lov_del_target(d->ld_obd, index, NULL, 0); - break; - case LCFG_LOV_DEL_OBD: - lov_cl_del_target(env, d, index); - break; - } - } - obd_putref(obd); - return rc; -} - -static const struct lu_device_operations lov_lu_ops = { - .ldo_object_alloc = lov_object_alloc, - .ldo_process_config = lov_process_config, -}; - -static struct lu_device *lov_device_alloc(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *cfg) -{ - struct lu_device *d; - struct lov_device *ld; - struct obd_device *obd; - int rc; - - ld = kzalloc(sizeof(*ld), GFP_NOFS); - if (!ld) - return ERR_PTR(-ENOMEM); - - cl_device_init(&ld->ld_cl, t); - d = lov2lu_dev(ld); - d->ld_ops = &lov_lu_ops; - - /* setup the LOV OBD */ - obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd); - rc = lov_setup(obd, cfg); - if (rc) { - lov_device_free(env, d); - return ERR_PTR(rc); - } - - ld->ld_lov = &obd->u.lov; - return d; -} - -static const struct lu_device_type_operations lov_device_type_ops = { - .ldto_init = lov_type_init, - .ldto_fini = lov_type_fini, - - .ldto_start = lov_type_start, - .ldto_stop = lov_type_stop, - - .ldto_device_alloc = lov_device_alloc, - .ldto_device_free = lov_device_free, - - .ldto_device_init = lov_device_init, - .ldto_device_fini = lov_device_fini -}; - -struct lu_device_type lov_device_type = { - .ldt_tags = LU_DEVICE_CL, - .ldt_name = LUSTRE_LOV_NAME, - .ldt_ops = &lov_device_type_ops, - .ldt_ctx_tags = LCT_CL_THREAD -}; - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c deleted file mode 100644 index c80320ab0858..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_ea.c +++ /dev/null @@ -1,331 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/lov/lov_ea.c - * - * Author: Wang Di - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include - -#include -#include - -#include "lov_internal.h" - -static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes, - __u16 stripe_count) -{ - if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) { - CERROR("bad stripe count %d\n", stripe_count); - lov_dump_lmm_common(D_WARNING, lmm); - return -EINVAL; - } - - if (lmm_oi_id(&lmm->lmm_oi) == 0) { - CERROR("zero object id\n"); - lov_dump_lmm_common(D_WARNING, lmm); - return -EINVAL; - } - - if (lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_RAID0) { - CERROR("bad striping pattern\n"); - lov_dump_lmm_common(D_WARNING, lmm); - return -EINVAL; - } - - if (lmm->lmm_stripe_size == 0 || - (le32_to_cpu(lmm->lmm_stripe_size) & - (LOV_MIN_STRIPE_SIZE - 1)) != 0) { - CERROR("bad stripe size %u\n", - le32_to_cpu(lmm->lmm_stripe_size)); - lov_dump_lmm_common(D_WARNING, lmm); - return -EINVAL; - } - return 0; -} - -struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count) -{ - size_t oinfo_ptrs_size, lsm_size; - struct lov_stripe_md *lsm; - struct lov_oinfo *loi; - int i; - - LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT); - - oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count; - lsm_size = sizeof(*lsm) + oinfo_ptrs_size; - - lsm = kvzalloc(lsm_size, GFP_NOFS); - if (!lsm) - return NULL; - - for (i = 0; i < stripe_count; i++) { - loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS); - if (!loi) - goto err; - lsm->lsm_oinfo[i] = loi; - } - lsm->lsm_stripe_count = stripe_count; - return lsm; - -err: - while (--i >= 0) - kmem_cache_free(lov_oinfo_slab, lsm->lsm_oinfo[i]); - kvfree(lsm); - return NULL; -} - -void lsm_free_plain(struct lov_stripe_md *lsm) -{ - __u16 stripe_count = lsm->lsm_stripe_count; - int i; - - for (i = 0; i < stripe_count; i++) - kmem_cache_free(lov_oinfo_slab, lsm->lsm_oinfo[i]); - kvfree(lsm); -} - -/* - * Find minimum stripe maxbytes value. For inactive or - * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES. - */ -static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt) -{ - loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; - struct obd_import *imp; - - if (!tgt->ltd_active) - return maxbytes; - - imp = tgt->ltd_obd->u.cli.cl_import; - if (!imp) - return maxbytes; - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_FULL && - (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) && - imp->imp_connect_data.ocd_maxbytes > 0) - maxbytes = imp->imp_connect_data.ocd_maxbytes; - - spin_unlock(&imp->imp_lock); - - return maxbytes; -} - -static int lsm_unpackmd_common(struct lov_obd *lov, - struct lov_stripe_md *lsm, - struct lov_mds_md *lmm, - struct lov_ost_data_v1 *objects) -{ - loff_t min_stripe_maxbytes = 0; - unsigned int stripe_count; - struct lov_oinfo *loi; - loff_t lov_bytes; - unsigned int i; - - /* - * This supposes lov_mds_md_v1/v3 first fields are - * are the same - */ - lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi); - lsm->lsm_stripe_size = le32_to_cpu(lmm->lmm_stripe_size); - lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern); - lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen); - lsm->lsm_pool_name[0] = '\0'; - - stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count; - - for (i = 0; i < stripe_count; i++) { - loi = lsm->lsm_oinfo[i]; - ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi); - loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx); - loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen); - if (lov_oinfo_is_dummy(loi)) - continue; - - if (loi->loi_ost_idx >= lov->desc.ld_tgt_count && - !lov2obd(lov)->obd_process_conf) { - CERROR("%s: OST index %d more than OST count %d\n", - (char *)lov->desc.ld_uuid.uuid, - loi->loi_ost_idx, lov->desc.ld_tgt_count); - lov_dump_lmm_v1(D_WARNING, lmm); - return -EINVAL; - } - - if (!lov->lov_tgts[loi->loi_ost_idx]) { - CERROR("%s: OST index %d missing\n", - (char *)lov->desc.ld_uuid.uuid, - loi->loi_ost_idx); - lov_dump_lmm_v1(D_WARNING, lmm); - continue; - } - - lov_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]); - if (min_stripe_maxbytes == 0 || lov_bytes < min_stripe_maxbytes) - min_stripe_maxbytes = lov_bytes; - } - - if (min_stripe_maxbytes == 0) - min_stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; - - stripe_count = lsm->lsm_stripe_count ?: lov->desc.ld_tgt_count; - lov_bytes = min_stripe_maxbytes * stripe_count; - - if (lov_bytes < min_stripe_maxbytes) /* handle overflow */ - lsm->lsm_maxbytes = MAX_LFS_FILESIZE; - else - lsm->lsm_maxbytes = lov_bytes; - - return 0; -} - -static void -lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, - loff_t *lov_off, loff_t *swidth) -{ - if (swidth) - *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; -} - -static void -lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno, - loff_t *lov_off, loff_t *swidth) -{ - if (swidth) - *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; -} - -static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes, - __u16 *stripe_count) -{ - if (lmm_bytes < sizeof(*lmm)) { - CERROR("lov_mds_md_v1 too small: %d, need at least %d\n", - lmm_bytes, (int)sizeof(*lmm)); - return -EINVAL; - } - - *stripe_count = le16_to_cpu(lmm->lmm_stripe_count); - if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED) - *stripe_count = 0; - - if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V1)) { - CERROR("LOV EA V1 too small: %d, need %d\n", - lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V1)); - lov_dump_lmm_common(D_WARNING, lmm); - return -EINVAL; - } - - return lsm_lmm_verify_common(lmm, lmm_bytes, *stripe_count); -} - -static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md_v1 *lmm) -{ - return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects); -} - -const struct lsm_operations lsm_v1_ops = { - .lsm_free = lsm_free_plain, - .lsm_stripe_by_index = lsm_stripe_by_index_plain, - .lsm_stripe_by_offset = lsm_stripe_by_offset_plain, - .lsm_lmm_verify = lsm_lmm_verify_v1, - .lsm_unpackmd = lsm_unpackmd_v1, -}; - -static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes, - __u16 *stripe_count) -{ - struct lov_mds_md_v3 *lmm; - - lmm = (struct lov_mds_md_v3 *)lmmv1; - - if (lmm_bytes < sizeof(*lmm)) { - CERROR("lov_mds_md_v3 too small: %d, need at least %d\n", - lmm_bytes, (int)sizeof(*lmm)); - return -EINVAL; - } - - *stripe_count = le16_to_cpu(lmm->lmm_stripe_count); - if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED) - *stripe_count = 0; - - if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V3)) { - CERROR("LOV EA V3 too small: %d, need %d\n", - lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V3)); - lov_dump_lmm_common(D_WARNING, lmm); - return -EINVAL; - } - - return lsm_lmm_verify_common((struct lov_mds_md_v1 *)lmm, lmm_bytes, - *stripe_count); -} - -static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm) -{ - struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm; - size_t cplen = 0; - int rc; - - rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects); - if (rc) - return rc; - - cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name, - sizeof(lsm->lsm_pool_name)); - if (cplen >= sizeof(lsm->lsm_pool_name)) - return -E2BIG; - - return 0; -} - -const struct lsm_operations lsm_v3_ops = { - .lsm_free = lsm_free_plain, - .lsm_stripe_by_index = lsm_stripe_by_index_plain, - .lsm_stripe_by_offset = lsm_stripe_by_offset_plain, - .lsm_lmm_verify = lsm_lmm_verify_v3, - .lsm_unpackmd = lsm_unpackmd_v3, -}; - -void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm) -{ - CDEBUG(level, "lsm %p, objid " DOSTID ", maxbytes %#llx, magic 0x%08X, stripe_size %u, stripe_count %u, refc: %d, layout_gen %u, pool [" LOV_POOLNAMEF "]\n", - lsm, - POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic, - lsm->lsm_stripe_size, lsm->lsm_stripe_count, - atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen, - lsm->lsm_pool_name); -} diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h deleted file mode 100644 index 47042f27ca90..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ /dev/null @@ -1,286 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef LOV_INTERNAL_H -#define LOV_INTERNAL_H - -#include -#include - -/* - * If we are unable to get the maximum object size from the OST in - * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using - * the old maximum object size from ext3. - */ -#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL - -struct lov_stripe_md { - atomic_t lsm_refc; - spinlock_t lsm_lock; - pid_t lsm_lock_owner; /* debugging */ - - /* - * maximum possible file size, might change as OSTs status changes, - * e.g. disconnected, deactivated - */ - loff_t lsm_maxbytes; - struct ost_id lsm_oi; - u32 lsm_magic; - u32 lsm_stripe_size; - u32 lsm_pattern; /* RAID0, RAID1, released, ... */ - u16 lsm_stripe_count; - u16 lsm_layout_gen; - char lsm_pool_name[LOV_MAXPOOLNAME + 1]; - struct lov_oinfo *lsm_oinfo[0]; -}; - -static inline bool lsm_is_released(struct lov_stripe_md *lsm) -{ - return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED); -} - -static inline bool lsm_has_objects(struct lov_stripe_md *lsm) -{ - if (!lsm) - return false; - - if (lsm_is_released(lsm)) - return false; - - return true; -} - -struct lsm_operations { - void (*lsm_free)(struct lov_stripe_md *); - void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *, - loff_t *); - void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *, - loff_t *); - int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, - u16 *stripe_count); - int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm); -}; - -extern const struct lsm_operations lsm_v1_ops; -extern const struct lsm_operations lsm_v3_ops; - -static inline const struct lsm_operations *lsm_op_find(int magic) -{ - switch (magic) { - case LOV_MAGIC_V1: - return &lsm_v1_ops; - case LOV_MAGIC_V3: - return &lsm_v3_ops; - default: - CERROR("unrecognized lsm_magic %08x\n", magic); - return NULL; - } -} - -/* lov_do_div64(a, b) returns a % b, and a = a / b. - * The 32-bit code is LOV-specific due to knowing about stripe limits in - * order to reduce the divisor to a 32-bit number. If the divisor is - * already a 32-bit value the compiler handles this directly. - */ -#if BITS_PER_LONG == 64 -# define lov_do_div64(n, base) ({ \ - u64 __base = (base); \ - u64 __rem; \ - __rem = ((u64)(n)) % __base; \ - (n) = ((u64)(n)) / __base; \ - __rem; \ -}) -#elif BITS_PER_LONG == 32 -# define lov_do_div64(n, base) ({ \ - u64 __rem; \ - if ((sizeof(base) > 4) && (((base) & 0xffffffff00000000ULL) != 0)) { \ - int __remainder; \ - LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov " \ - "division %llu / %llu\n", (n), (u64)(base)); \ - __remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \ - (n) >>= LOV_MIN_STRIPE_BITS; \ - __rem = do_div(n, (base) >> LOV_MIN_STRIPE_BITS); \ - __rem <<= LOV_MIN_STRIPE_BITS; \ - __rem += __remainder; \ - } else { \ - __rem = do_div(n, base); \ - } \ - __rem; \ -}) -#endif - -#define pool_tgt_size(p) ((p)->pool_obds.op_size) -#define pool_tgt_count(p) ((p)->pool_obds.op_count) -#define pool_tgt_array(p) ((p)->pool_obds.op_array) -#define pool_tgt_rw_sem(p) ((p)->pool_obds.op_rw_sem) - -struct pool_desc { - char pool_name[LOV_MAXPOOLNAME + 1]; - struct ost_pool pool_obds; - atomic_t pool_refcount; - struct rhash_head pool_hash; /* access by poolname */ - union { - struct list_head pool_list; /* serial access */ - struct rcu_head rcu; /* delayed free */ - }; - struct dentry *pool_debugfs_entry; /* file in debugfs */ - struct obd_device *pool_lobd; /* owner */ -}; -int lov_pool_hash_init(struct rhashtable *tbl); -void lov_pool_hash_destroy(struct rhashtable *tbl); - -struct lov_request { - struct obd_info rq_oi; - struct lov_request_set *rq_rqset; - - struct list_head rq_link; - - int rq_idx; /* index in lov->tgts array */ -}; - -struct lov_request_set { - struct obd_info *set_oi; - struct obd_device *set_obd; - int set_count; - atomic_t set_completes; - atomic_t set_success; - struct list_head set_list; -}; - -extern struct kmem_cache *lov_oinfo_slab; - -extern struct lu_kmem_descr lov_caches[]; - -#define lov_uuid2str(lv, index) \ - (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid) - -/* lov_merge.c */ -int lov_merge_lvb_kms(struct lov_stripe_md *lsm, - struct ost_lvb *lvb, __u64 *kms_place); - -/* lov_offset.c */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno); -int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, - int stripeno, u64 *u64); -u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, int stripeno); -int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, - u64 start, u64 end, - u64 *obd_start, u64 *obd_end); -int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off); -pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index, - int stripe); - -/* lov_request.c */ -int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, - struct lov_request_set **reqset); -int lov_fini_statfs_set(struct lov_request_set *set); - -/* lov_obd.c */ -void lov_stripe_lock(struct lov_stripe_md *md); -void lov_stripe_unlock(struct lov_stripe_md *md); -void lov_fix_desc(struct lov_desc *desc); -void lov_fix_desc_stripe_size(__u64 *val); -void lov_fix_desc_stripe_count(__u32 *val); -void lov_fix_desc_pattern(__u32 *val); -void lov_fix_desc_qos_maxage(__u32 *val); -__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count); -int lov_connect_obd(struct obd_device *obd, __u32 index, int activate, - struct obd_connect_data *data); -int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg); -int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg, - __u32 *indexp, int *genp); -int lov_del_target(struct obd_device *obd, __u32 index, - struct obd_uuid *uuidp, int gen); - -/* lov_pack.c */ -ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf, - size_t buf_size); -struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm, - size_t lmm_size); -int lov_free_memmd(struct lov_stripe_md **lsmp); - -void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm); -void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm); -void lov_dump_lmm_common(int level, void *lmmp); - -/* lov_ea.c */ -struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count); -void lsm_free_plain(struct lov_stripe_md *lsm); -void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm); - -/* lproc_lov.c */ -extern const struct file_operations lov_proc_target_fops; -void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars); - -/* lov_cl.c */ -extern struct lu_device_type lov_device_type; - -/* ost_pool methods */ -int lov_ost_pool_init(struct ost_pool *op, unsigned int count); -int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count); -int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count); -int lov_ost_pool_remove(struct ost_pool *op, __u32 idx); -int lov_ost_pool_free(struct ost_pool *op); - -/* high level pool methods */ -int lov_pool_new(struct obd_device *obd, char *poolname); -int lov_pool_del(struct obd_device *obd, char *poolname); -int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname); -int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname); -void lov_pool_putref(struct pool_desc *pool); - -static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm) -{ - LASSERT(atomic_read(&lsm->lsm_refc) > 0); - atomic_inc(&lsm->lsm_refc); - return lsm; -} - -static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi) -{ - if (unlikely(loi->loi_oi.oi.oi_id == 0 && - loi->loi_oi.oi.oi_seq == 0 && - loi->loi_ost_idx == 0 && - loi->loi_ost_gen == 0)) - return true; - - return false; -} - -static inline struct obd_device *lov2obd(const struct lov_obd *lov) -{ - return container_of_safe(lov, struct obd_device, u.lov); -} - -#endif diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c deleted file mode 100644 index b823f8a21856..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ /dev/null @@ -1,1023 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_io for LOV layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, - struct lov_io_sub *sub) -{ - if (sub->sub_io) { - if (sub->sub_io_initialized) { - cl_io_fini(sub->sub_env, sub->sub_io); - sub->sub_io_initialized = 0; - lio->lis_active_subios--; - } - if (sub->sub_stripe == lio->lis_single_subio_index) - lio->lis_single_subio_index = -1; - else if (!sub->sub_borrowed) - kfree(sub->sub_io); - sub->sub_io = NULL; - } - if (!IS_ERR_OR_NULL(sub->sub_env)) { - if (!sub->sub_borrowed) - cl_env_put(sub->sub_env, &sub->sub_refcheck); - sub->sub_env = NULL; - } -} - -static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio, - int stripe, loff_t start, loff_t end) -{ - struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; - struct cl_io *parent = lio->lis_cl.cis_io; - - switch (io->ci_type) { - case CIT_SETATTR: { - io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr; - io->u.ci_setattr.sa_attr_flags = - parent->u.ci_setattr.sa_attr_flags; - io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid; - io->u.ci_setattr.sa_stripe_index = stripe; - io->u.ci_setattr.sa_parent_fid = - parent->u.ci_setattr.sa_parent_fid; - if (cl_io_is_trunc(io)) { - loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size; - - new_size = lov_size_to_stripe(lsm, new_size, stripe); - io->u.ci_setattr.sa_attr.lvb_size = new_size; - } - break; - } - case CIT_DATA_VERSION: { - io->u.ci_data_version.dv_data_version = 0; - io->u.ci_data_version.dv_flags = - parent->u.ci_data_version.dv_flags; - break; - } - case CIT_FAULT: { - struct cl_object *obj = parent->ci_obj; - loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index); - - io->u.ci_fault = parent->u.ci_fault; - off = lov_size_to_stripe(lsm, off, stripe); - io->u.ci_fault.ft_index = cl_index(obj, off); - break; - } - case CIT_FSYNC: { - io->u.ci_fsync.fi_start = start; - io->u.ci_fsync.fi_end = end; - io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid; - io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode; - break; - } - case CIT_READ: - case CIT_WRITE: { - io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent); - if (cl_io_is_append(parent)) { - io->u.ci_wr.wr_append = 1; - } else { - io->u.ci_rw.crw_pos = start; - io->u.ci_rw.crw_count = end - start; - } - break; - } - default: - break; - } -} - -static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, - struct lov_io_sub *sub) -{ - struct lov_object *lov = lio->lis_object; - struct cl_io *sub_io; - struct cl_object *sub_obj; - struct cl_io *io = lio->lis_cl.cis_io; - int stripe = sub->sub_stripe; - int rc; - - LASSERT(!sub->sub_io); - LASSERT(!sub->sub_env); - LASSERT(sub->sub_stripe < lio->lis_stripe_count); - - if (unlikely(!lov_r0(lov)->lo_sub[stripe])) - return -EIO; - - sub->sub_io_initialized = 0; - sub->sub_borrowed = 0; - - /* obtain new environment */ - sub->sub_env = cl_env_get(&sub->sub_refcheck); - if (IS_ERR(sub->sub_env)) { - rc = PTR_ERR(sub->sub_env); - goto fini_lov_io; - } - - /* - * First sub-io. Use ->lis_single_subio to - * avoid dynamic allocation. - */ - if (lio->lis_active_subios == 0) { - sub->sub_io = &lio->lis_single_subio; - lio->lis_single_subio_index = stripe; - } else { - sub->sub_io = kzalloc(sizeof(*sub->sub_io), - GFP_NOFS); - if (!sub->sub_io) { - rc = -ENOMEM; - goto fini_lov_io; - } - } - - sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]); - sub_io = sub->sub_io; - - sub_io->ci_obj = sub_obj; - sub_io->ci_result = 0; - sub_io->ci_parent = io; - sub_io->ci_lockreq = io->ci_lockreq; - sub_io->ci_type = io->ci_type; - sub_io->ci_no_srvlock = io->ci_no_srvlock; - sub_io->ci_noatime = io->ci_noatime; - - rc = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj); - if (rc >= 0) { - lio->lis_active_subios++; - sub->sub_io_initialized = 1; - rc = 0; - } -fini_lov_io: - if (rc) - lov_io_sub_fini(env, lio, sub); - return rc; -} - -struct lov_io_sub *lov_sub_get(const struct lu_env *env, - struct lov_io *lio, int stripe) -{ - int rc; - struct lov_io_sub *sub = &lio->lis_subs[stripe]; - - LASSERT(stripe < lio->lis_stripe_count); - - if (!sub->sub_io_initialized) { - sub->sub_stripe = stripe; - rc = lov_io_sub_init(env, lio, sub); - } else { - rc = 0; - } - if (rc < 0) - sub = ERR_PTR(rc); - - return sub; -} - -/***************************************************************************** - * - * Lov io operations. - * - */ - -int lov_page_stripe(const struct cl_page *page) -{ - const struct cl_page_slice *slice; - - slice = cl_page_at(page, &lov_device_type); - LASSERT(slice->cpl_obj); - - return cl2lov_page(slice)->lps_stripe; -} - -static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, - struct cl_io *io) -{ - struct lov_stripe_md *lsm; - int result; - - LASSERT(lio->lis_object); - lsm = lio->lis_object->lo_lsm; - - /* - * Need to be optimized, we can't afford to allocate a piece of memory - * when writing a page. -jay - */ - lio->lis_subs = - kvzalloc(lsm->lsm_stripe_count * - sizeof(lio->lis_subs[0]), - GFP_NOFS); - if (lio->lis_subs) { - lio->lis_nr_subios = lio->lis_stripe_count; - lio->lis_single_subio_index = -1; - lio->lis_active_subios = 0; - result = 0; - } else { - result = -ENOMEM; - } - return result; -} - -static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj, - struct cl_io *io) -{ - io->ci_result = 0; - lio->lis_object = obj; - - lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count; - - switch (io->ci_type) { - case CIT_READ: - case CIT_WRITE: - lio->lis_pos = io->u.ci_rw.crw_pos; - lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count; - lio->lis_io_endpos = lio->lis_endpos; - if (cl_io_is_append(io)) { - LASSERT(io->ci_type == CIT_WRITE); - - /* - * If there is LOV EA hole, then we may cannot locate - * the current file-tail exactly. - */ - if (unlikely(obj->lo_lsm->lsm_pattern & - LOV_PATTERN_F_HOLE)) - return -EIO; - - lio->lis_pos = 0; - lio->lis_endpos = OBD_OBJECT_EOF; - } - break; - - case CIT_SETATTR: - if (cl_io_is_trunc(io)) - lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size; - else - lio->lis_pos = 0; - lio->lis_endpos = OBD_OBJECT_EOF; - break; - - case CIT_DATA_VERSION: - lio->lis_pos = 0; - lio->lis_endpos = OBD_OBJECT_EOF; - break; - - case CIT_FAULT: { - pgoff_t index = io->u.ci_fault.ft_index; - - lio->lis_pos = cl_offset(io->ci_obj, index); - lio->lis_endpos = cl_offset(io->ci_obj, index + 1); - break; - } - - case CIT_FSYNC: { - lio->lis_pos = io->u.ci_fsync.fi_start; - lio->lis_endpos = io->u.ci_fsync.fi_end; - break; - } - - case CIT_MISC: - lio->lis_pos = 0; - lio->lis_endpos = OBD_OBJECT_EOF; - break; - - default: - LBUG(); - } - return 0; -} - -static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) -{ - struct lov_io *lio = cl2lov_io(env, ios); - struct lov_object *lov = cl2lov(ios->cis_obj); - int i; - - if (lio->lis_subs) { - for (i = 0; i < lio->lis_nr_subios; i++) - lov_io_sub_fini(env, lio, &lio->lis_subs[i]); - kvfree(lio->lis_subs); - lio->lis_nr_subios = 0; - } - - LASSERT(atomic_read(&lov->lo_active_ios) > 0); - if (atomic_dec_and_test(&lov->lo_active_ios)) - wake_up_all(&lov->lo_waitq); -} - -static u64 lov_offset_mod(u64 val, int delta) -{ - if (val != OBD_OBJECT_EOF) - val += delta; - return val; -} - -static int lov_io_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct lov_io *lio = cl2lov_io(env, ios); - struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; - struct lov_io_sub *sub; - u64 endpos; - u64 start; - u64 end; - int stripe; - int rc = 0; - - endpos = lov_offset_mod(lio->lis_endpos, -1); - for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) { - if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos, - endpos, &start, &end)) - continue; - - if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) { - if (ios->cis_io->ci_type == CIT_READ || - ios->cis_io->ci_type == CIT_WRITE || - ios->cis_io->ci_type == CIT_FAULT) - return -EIO; - - continue; - } - - end = lov_offset_mod(end, 1); - sub = lov_sub_get(env, lio, stripe); - if (IS_ERR(sub)) { - rc = PTR_ERR(sub); - break; - } - - lov_io_sub_inherit(sub->sub_io, lio, stripe, start, end); - rc = cl_io_iter_init(sub->sub_env, sub->sub_io); - if (rc) { - cl_io_iter_fini(sub->sub_env, sub->sub_io); - break; - } - CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n", - stripe, start, end); - - list_add_tail(&sub->sub_linkage, &lio->lis_active); - } - return rc; -} - -static int lov_io_rw_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct lov_io *lio = cl2lov_io(env, ios); - struct cl_io *io = ios->cis_io; - struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; - __u64 start = io->u.ci_rw.crw_pos; - loff_t next; - unsigned long ssize = lsm->lsm_stripe_size; - - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - - /* fast path for common case. */ - if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) { - lov_do_div64(start, ssize); - next = (start + 1) * ssize; - if (next <= start * ssize) - next = ~0ull; - - io->ci_continue = next < lio->lis_io_endpos; - io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos, - next) - io->u.ci_rw.crw_pos; - lio->lis_pos = io->u.ci_rw.crw_pos; - lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count; - CDEBUG(D_VFSTRACE, "stripe: %llu chunk: [%llu, %llu) %llu\n", - (__u64)start, lio->lis_pos, lio->lis_endpos, - (__u64)lio->lis_io_endpos); - } - /* - * XXX The following call should be optimized: we know, that - * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe. - */ - return lov_io_iter_init(env, ios); -} - -static int lov_io_call(const struct lu_env *env, struct lov_io *lio, - int (*iofunc)(const struct lu_env *, struct cl_io *)) -{ - struct cl_io *parent = lio->lis_cl.cis_io; - struct lov_io_sub *sub; - int rc = 0; - - list_for_each_entry(sub, &lio->lis_active, sub_linkage) { - rc = iofunc(sub->sub_env, sub->sub_io); - if (rc) - break; - - if (parent->ci_result == 0) - parent->ci_result = sub->sub_io->ci_result; - } - return rc; -} - -static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios) -{ - return lov_io_call(env, cl2lov_io(env, ios), cl_io_lock); -} - -static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios) -{ - return lov_io_call(env, cl2lov_io(env, ios), cl_io_start); -} - -static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io) -{ - /* - * It's possible that lov_io_start() wasn't called against this - * sub-io, either because previous sub-io failed, or upper layer - * completed IO. - */ - if (io->ci_state == CIS_IO_GOING) - cl_io_end(env, io); - else - io->ci_state = CIS_IO_FINISHED; - return 0; -} - -static void -lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios) -{ - struct lov_io *lio = cl2lov_io(env, ios); - struct cl_io *parent = lio->lis_cl.cis_io; - struct lov_io_sub *sub; - - list_for_each_entry(sub, &lio->lis_active, sub_linkage) { - lov_io_end_wrapper(sub->sub_env, sub->sub_io); - - parent->u.ci_data_version.dv_data_version += - sub->sub_io->u.ci_data_version.dv_data_version; - - if (!parent->ci_result) - parent->ci_result = sub->sub_io->ci_result; - } -} - -static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io) -{ - cl_io_iter_fini(env, io); - return 0; -} - -static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io) -{ - cl_io_unlock(env, io); - return 0; -} - -static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios) -{ - int rc; - - rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper); - LASSERT(rc == 0); -} - -static void lov_io_iter_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct lov_io *lio = cl2lov_io(env, ios); - int rc; - - rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper); - LASSERT(rc == 0); - while (!list_empty(&lio->lis_active)) - list_del_init(lio->lis_active.next); -} - -static void lov_io_unlock(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - int rc; - - rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper); - LASSERT(rc == 0); -} - -static int lov_io_read_ahead(const struct lu_env *env, - const struct cl_io_slice *ios, - pgoff_t start, struct cl_read_ahead *ra) -{ - struct lov_io *lio = cl2lov_io(env, ios); - struct lov_object *loo = lio->lis_object; - struct cl_object *obj = lov2cl(loo); - struct lov_layout_raid0 *r0 = lov_r0(loo); - unsigned int pps; /* pages per stripe */ - struct lov_io_sub *sub; - pgoff_t ra_end; - loff_t suboff; - int stripe; - int rc; - - stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start)); - if (unlikely(!r0->lo_sub[stripe])) - return -EIO; - - sub = lov_sub_get(env, lio, stripe); - if (IS_ERR(sub)) - return PTR_ERR(sub); - - lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff); - rc = cl_io_read_ahead(sub->sub_env, sub->sub_io, - cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff), - ra); - - CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n", - PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc); - if (rc) - return rc; - - /** - * Adjust the stripe index by layout of raid0. ra->cra_end is - * the maximum page index covered by an underlying DLM lock. - * This function converts cra_end from stripe level to file - * level, and make sure it's not beyond stripe boundary. - */ - if (r0->lo_nr == 1) /* single stripe file */ - return 0; - - /* cra_end is stripe level, convert it into file level */ - ra_end = ra->cra_end; - if (ra_end != CL_PAGE_EOF) - ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe); - - pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT; - - CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n", - PFID(lu_object_fid(lov2lu(loo))), ra_end, pps, - loo->lo_lsm->lsm_stripe_size, stripe, start); - - /* never exceed the end of the stripe */ - ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1); - return 0; -} - -/** - * lov implementation of cl_operations::cio_submit() method. It takes a list - * of pages in \a queue, splits it into per-stripe sub-lists, invokes - * cl_io_submit() on underlying devices to submit sub-lists, and then splices - * everything back. - * - * Major complication of this function is a need to handle memory cleansing: - * cl_io_submit() is called to write out pages as a part of VM memory - * reclamation, and hence it may not fail due to memory shortages (system - * dead-locks otherwise). To deal with this, some resources (sub-lists, - * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a - * not-memory cleansing context), and in case of memory shortage, these - * pre-allocated resources are used by lov_io_submit() under - * lov_device::ld_mutex mutex. - */ -static int lov_io_submit(const struct lu_env *env, - const struct cl_io_slice *ios, - enum cl_req_type crt, struct cl_2queue *queue) -{ - struct cl_page_list *qin = &queue->c2_qin; - struct lov_io *lio = cl2lov_io(env, ios); - struct lov_io_sub *sub; - struct cl_page_list *plist = &lov_env_info(env)->lti_plist; - struct cl_page *page; - int stripe; - - int rc = 0; - - if (lio->lis_active_subios == 1) { - int idx = lio->lis_single_subio_index; - - LASSERT(idx < lio->lis_nr_subios); - sub = lov_sub_get(env, lio, idx); - LASSERT(!IS_ERR(sub)); - LASSERT(sub->sub_io == &lio->lis_single_subio); - rc = cl_io_submit_rw(sub->sub_env, sub->sub_io, - crt, queue); - return rc; - } - - LASSERT(lio->lis_subs); - - cl_page_list_init(plist); - while (qin->pl_nr > 0) { - struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q; - - cl_2queue_init(cl2q); - - page = cl_page_list_first(qin); - cl_page_list_move(&cl2q->c2_qin, qin, page); - - stripe = lov_page_stripe(page); - while (qin->pl_nr > 0) { - page = cl_page_list_first(qin); - if (stripe != lov_page_stripe(page)) - break; - - cl_page_list_move(&cl2q->c2_qin, qin, page); - } - - sub = lov_sub_get(env, lio, stripe); - if (!IS_ERR(sub)) { - rc = cl_io_submit_rw(sub->sub_env, sub->sub_io, - crt, cl2q); - } else { - rc = PTR_ERR(sub); - } - - cl_page_list_splice(&cl2q->c2_qin, plist); - cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout); - cl_2queue_fini(env, cl2q); - - if (rc != 0) - break; - } - - cl_page_list_splice(plist, qin); - cl_page_list_fini(env, plist); - - return rc; -} - -static int lov_io_commit_async(const struct lu_env *env, - const struct cl_io_slice *ios, - struct cl_page_list *queue, int from, int to, - cl_commit_cbt cb) -{ - struct cl_page_list *plist = &lov_env_info(env)->lti_plist; - struct lov_io *lio = cl2lov_io(env, ios); - struct lov_io_sub *sub; - struct cl_page *page; - int rc = 0; - - if (lio->lis_active_subios == 1) { - int idx = lio->lis_single_subio_index; - - LASSERT(idx < lio->lis_nr_subios); - sub = lov_sub_get(env, lio, idx); - LASSERT(!IS_ERR(sub)); - LASSERT(sub->sub_io == &lio->lis_single_subio); - rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue, - from, to, cb); - return rc; - } - - LASSERT(lio->lis_subs); - - cl_page_list_init(plist); - while (queue->pl_nr > 0) { - int stripe_to = to; - int stripe; - - LASSERT(plist->pl_nr == 0); - page = cl_page_list_first(queue); - cl_page_list_move(plist, queue, page); - - stripe = lov_page_stripe(page); - while (queue->pl_nr > 0) { - page = cl_page_list_first(queue); - if (stripe != lov_page_stripe(page)) - break; - - cl_page_list_move(plist, queue, page); - } - - if (queue->pl_nr > 0) /* still has more pages */ - stripe_to = PAGE_SIZE; - - sub = lov_sub_get(env, lio, stripe); - if (!IS_ERR(sub)) { - rc = cl_io_commit_async(sub->sub_env, sub->sub_io, - plist, from, stripe_to, cb); - } else { - rc = PTR_ERR(sub); - break; - } - - if (plist->pl_nr > 0) /* short write */ - break; - - from = 0; - } - - /* for error case, add the page back into the qin list */ - LASSERT(ergo(rc == 0, plist->pl_nr == 0)); - while (plist->pl_nr > 0) { - /* error occurred, add the uncommitted pages back into queue */ - page = cl_page_list_last(plist); - cl_page_list_move_head(queue, plist, page); - } - - return rc; -} - -static int lov_io_fault_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_fault_io *fio; - struct lov_io *lio; - struct lov_io_sub *sub; - - fio = &ios->cis_io->u.ci_fault; - lio = cl2lov_io(env, ios); - sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page)); - if (IS_ERR(sub)) - return PTR_ERR(sub); - sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob; - return lov_io_start(env, ios); -} - -static void lov_io_fsync_end(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct lov_io *lio = cl2lov_io(env, ios); - struct lov_io_sub *sub; - unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written; - - *written = 0; - list_for_each_entry(sub, &lio->lis_active, sub_linkage) { - struct cl_io *subio = sub->sub_io; - - lov_io_end_wrapper(sub->sub_env, subio); - - if (subio->ci_result == 0) - *written += subio->u.ci_fsync.fi_nr_written; - } -} - -static const struct cl_io_operations lov_io_ops = { - .op = { - [CIT_READ] = { - .cio_fini = lov_io_fini, - .cio_iter_init = lov_io_rw_iter_init, - .cio_iter_fini = lov_io_iter_fini, - .cio_lock = lov_io_lock, - .cio_unlock = lov_io_unlock, - .cio_start = lov_io_start, - .cio_end = lov_io_end - }, - [CIT_WRITE] = { - .cio_fini = lov_io_fini, - .cio_iter_init = lov_io_rw_iter_init, - .cio_iter_fini = lov_io_iter_fini, - .cio_lock = lov_io_lock, - .cio_unlock = lov_io_unlock, - .cio_start = lov_io_start, - .cio_end = lov_io_end - }, - [CIT_SETATTR] = { - .cio_fini = lov_io_fini, - .cio_iter_init = lov_io_iter_init, - .cio_iter_fini = lov_io_iter_fini, - .cio_lock = lov_io_lock, - .cio_unlock = lov_io_unlock, - .cio_start = lov_io_start, - .cio_end = lov_io_end - }, - [CIT_DATA_VERSION] = { - .cio_fini = lov_io_fini, - .cio_iter_init = lov_io_iter_init, - .cio_iter_fini = lov_io_iter_fini, - .cio_lock = lov_io_lock, - .cio_unlock = lov_io_unlock, - .cio_start = lov_io_start, - .cio_end = lov_io_data_version_end, - }, - [CIT_FAULT] = { - .cio_fini = lov_io_fini, - .cio_iter_init = lov_io_iter_init, - .cio_iter_fini = lov_io_iter_fini, - .cio_lock = lov_io_lock, - .cio_unlock = lov_io_unlock, - .cio_start = lov_io_fault_start, - .cio_end = lov_io_end - }, - [CIT_FSYNC] = { - .cio_fini = lov_io_fini, - .cio_iter_init = lov_io_iter_init, - .cio_iter_fini = lov_io_iter_fini, - .cio_lock = lov_io_lock, - .cio_unlock = lov_io_unlock, - .cio_start = lov_io_start, - .cio_end = lov_io_fsync_end - }, - [CIT_MISC] = { - .cio_fini = lov_io_fini - } - }, - .cio_read_ahead = lov_io_read_ahead, - .cio_submit = lov_io_submit, - .cio_commit_async = lov_io_commit_async, -}; - -/***************************************************************************** - * - * Empty lov io operations. - * - */ - -static void lov_empty_io_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct lov_object *lov = cl2lov(ios->cis_obj); - - if (atomic_dec_and_test(&lov->lo_active_ios)) - wake_up_all(&lov->lo_waitq); -} - -static int lov_empty_io_submit(const struct lu_env *env, - const struct cl_io_slice *ios, - enum cl_req_type crt, struct cl_2queue *queue) -{ - return -EBADF; -} - -static void lov_empty_impossible(const struct lu_env *env, - struct cl_io_slice *ios) -{ - LBUG(); -} - -#define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible) - -/** - * An io operation vector for files without stripes. - */ -static const struct cl_io_operations lov_empty_io_ops = { - .op = { - [CIT_READ] = { - .cio_fini = lov_empty_io_fini, - }, - [CIT_WRITE] = { - .cio_fini = lov_empty_io_fini, - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE - }, - [CIT_SETATTR] = { - .cio_fini = lov_empty_io_fini, - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE - }, - [CIT_FAULT] = { - .cio_fini = lov_empty_io_fini, - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE - }, - [CIT_FSYNC] = { - .cio_fini = lov_empty_io_fini - }, - [CIT_MISC] = { - .cio_fini = lov_empty_io_fini - } - }, - .cio_submit = lov_empty_io_submit, - .cio_commit_async = LOV_EMPTY_IMPOSSIBLE -}; - -int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) -{ - struct lov_io *lio = lov_env_io(env); - struct lov_object *lov = cl2lov(obj); - - INIT_LIST_HEAD(&lio->lis_active); - io->ci_result = lov_io_slice_init(lio, lov, io); - if (io->ci_result == 0) { - io->ci_result = lov_io_subio_init(env, lio, io); - if (io->ci_result == 0) { - cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops); - atomic_inc(&lov->lo_active_ios); - } - } - return io->ci_result; -} - -int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) -{ - struct lov_object *lov = cl2lov(obj); - struct lov_io *lio = lov_env_io(env); - int result; - - lio->lis_object = lov; - switch (io->ci_type) { - default: - LBUG(); - case CIT_MISC: - case CIT_READ: - result = 0; - break; - case CIT_FSYNC: - case CIT_SETATTR: - case CIT_DATA_VERSION: - result = 1; - break; - case CIT_WRITE: - result = -EBADF; - break; - case CIT_FAULT: - result = -EFAULT; - CERROR("Page fault on a file without stripes: " DFID "\n", - PFID(lu_object_fid(&obj->co_lu))); - break; - } - if (result == 0) { - cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops); - atomic_inc(&lov->lo_active_ios); - } - - io->ci_result = result < 0 ? result : 0; - return result; -} - -int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) -{ - struct lov_object *lov = cl2lov(obj); - struct lov_io *lio = lov_env_io(env); - int result; - - LASSERT(lov->lo_lsm); - lio->lis_object = lov; - - switch (io->ci_type) { - default: - LASSERTF(0, "invalid type %d\n", io->ci_type); - result = -EOPNOTSUPP; - break; - case CIT_MISC: - case CIT_FSYNC: - case CIT_DATA_VERSION: - result = 1; - break; - case CIT_SETATTR: - /* the truncate to 0 is managed by MDT: - * - in open, for open O_TRUNC - * - in setattr, for truncate - */ - /* the truncate is for size > 0 so triggers a restore */ - if (cl_io_is_trunc(io)) { - io->ci_restore_needed = 1; - result = -ENODATA; - } else { - result = 1; - } - break; - case CIT_READ: - case CIT_WRITE: - case CIT_FAULT: - io->ci_restore_needed = 1; - result = -ENODATA; - break; - } - if (result == 0) { - cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops); - atomic_inc(&lov->lo_active_ios); - } - - io->ci_result = result < 0 ? result : 0; - return result; -} - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c deleted file mode 100644 index b0292100bf26..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_lock.c +++ /dev/null @@ -1,348 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_lock for LOV layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lov lock operations. - * - */ - -static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env, - const struct cl_lock *parent, - struct lov_lock_sub *lls) -{ - struct lov_sublock_env *subenv; - struct lov_io *lio = lov_env_io(env); - struct cl_io *io = lio->lis_cl.cis_io; - struct lov_io_sub *sub; - - subenv = &lov_env_session(env)->ls_subenv; - - /* - * FIXME: We tend to use the subio's env & io to call the sublock - * lock operations because osc lock sometimes stores some control - * variables in thread's IO information(Now only lockless information). - * However, if the lock's host(object) is different from the object - * for current IO, we have no way to get the subenv and subio because - * they are not initialized at all. As a temp fix, in this case, - * we still borrow the parent's env to call sublock operations. - */ - if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) { - subenv->lse_env = env; - subenv->lse_io = io; - } else { - sub = lov_sub_get(env, lio, lls->sub_stripe); - if (!IS_ERR(sub)) { - subenv->lse_env = sub->sub_env; - subenv->lse_io = sub->sub_io; - } else { - subenv = (void *)sub; - } - } - return subenv; -} - -static int lov_sublock_init(const struct lu_env *env, - const struct cl_lock *parent, - struct lov_lock_sub *lls) -{ - struct lov_sublock_env *subenv; - int result; - - subenv = lov_sublock_env_get(env, parent, lls); - if (!IS_ERR(subenv)) { - result = cl_lock_init(subenv->lse_env, &lls->sub_lock, - subenv->lse_io); - } else { - /* error occurs. */ - result = PTR_ERR(subenv); - } - return result; -} - -/** - * Creates sub-locks for a given lov_lock for the first time. - * - * Goes through all sub-objects of top-object, and creates sub-locks on every - * sub-object intersecting with top-lock extent. This is complicated by the - * fact that top-lock (that is being created) can be accessed concurrently - * through already created sub-locks (possibly shared with other top-locks). - */ -static struct lov_lock *lov_lock_sub_init(const struct lu_env *env, - const struct cl_object *obj, - struct cl_lock *lock) -{ - int result = 0; - int i; - int nr; - u64 start; - u64 end; - u64 file_start; - u64 file_end; - - struct lov_object *loo = cl2lov(obj); - struct lov_layout_raid0 *r0 = lov_r0(loo); - struct lov_lock *lovlck; - - CDEBUG(D_INODE, "%p: lock/io FID " DFID "/" DFID ", lock/io clobj %p/%p\n", - loo, PFID(lu_object_fid(lov2lu(loo))), - PFID(lu_object_fid(&obj->co_lu)), - lov2cl(loo), obj); - - file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start); - file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1; - - for (i = 0, nr = 0; i < r0->lo_nr; i++) { - /* - * XXX for wide striping smarter algorithm is desirable, - * breaking out of the loop, early. - */ - if (likely(r0->lo_sub[i]) && /* spare layout */ - lov_stripe_intersects(loo->lo_lsm, i, - file_start, file_end, &start, &end)) - nr++; - } - LASSERT(nr > 0); - lovlck = kvzalloc(offsetof(struct lov_lock, lls_sub[nr]), - GFP_NOFS); - if (!lovlck) - return ERR_PTR(-ENOMEM); - - lovlck->lls_nr = nr; - for (i = 0, nr = 0; i < r0->lo_nr; ++i) { - if (likely(r0->lo_sub[i]) && - lov_stripe_intersects(loo->lo_lsm, i, - file_start, file_end, &start, &end)) { - struct lov_lock_sub *lls = &lovlck->lls_sub[nr]; - struct cl_lock_descr *descr; - - descr = &lls->sub_lock.cll_descr; - - LASSERT(!descr->cld_obj); - descr->cld_obj = lovsub2cl(r0->lo_sub[i]); - descr->cld_start = cl_index(descr->cld_obj, start); - descr->cld_end = cl_index(descr->cld_obj, end); - descr->cld_mode = lock->cll_descr.cld_mode; - descr->cld_gid = lock->cll_descr.cld_gid; - descr->cld_enq_flags = lock->cll_descr.cld_enq_flags; - lls->sub_stripe = i; - - /* initialize sub lock */ - result = lov_sublock_init(env, lock, lls); - if (result < 0) - break; - - lls->sub_initialized = 1; - nr++; - } - } - LASSERT(ergo(result == 0, nr == lovlck->lls_nr)); - - if (result != 0) { - for (i = 0; i < nr; ++i) { - if (!lovlck->lls_sub[i].sub_initialized) - break; - - cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock); - } - kvfree(lovlck); - lovlck = ERR_PTR(result); - } - - return lovlck; -} - -static void lov_lock_fini(const struct lu_env *env, - struct cl_lock_slice *slice) -{ - struct lov_lock *lovlck; - int i; - - lovlck = cl2lov_lock(slice); - for (i = 0; i < lovlck->lls_nr; ++i) { - LASSERT(!lovlck->lls_sub[i].sub_is_enqueued); - if (lovlck->lls_sub[i].sub_initialized) - cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock); - } - kvfree(lovlck); -} - -/** - * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This - * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock - * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock - * state machines in the face of sub-locks sharing (by multiple top-locks), - * and concurrent sub-lock cancellations. - */ -static int lov_lock_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *io, struct cl_sync_io *anchor) -{ - struct cl_lock *lock = slice->cls_lock; - struct lov_lock *lovlck = cl2lov_lock(slice); - int i; - int rc = 0; - - for (i = 0; i < lovlck->lls_nr; ++i) { - struct lov_lock_sub *lls = &lovlck->lls_sub[i]; - struct lov_sublock_env *subenv; - - subenv = lov_sublock_env_get(env, lock, lls); - if (IS_ERR(subenv)) { - rc = PTR_ERR(subenv); - break; - } - rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io, - &lls->sub_lock, anchor); - if (rc != 0) - break; - - lls->sub_is_enqueued = 1; - } - return rc; -} - -static void lov_lock_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct cl_lock *lock = slice->cls_lock; - struct lov_lock *lovlck = cl2lov_lock(slice); - int i; - - for (i = 0; i < lovlck->lls_nr; ++i) { - struct lov_lock_sub *lls = &lovlck->lls_sub[i]; - struct cl_lock *sublock = &lls->sub_lock; - struct lov_sublock_env *subenv; - - if (!lls->sub_is_enqueued) - continue; - - lls->sub_is_enqueued = 0; - subenv = lov_sublock_env_get(env, lock, lls); - if (!IS_ERR(subenv)) { - cl_lock_cancel(subenv->lse_env, sublock); - } else { - CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock, - "%s fails with %ld.\n", - __func__, PTR_ERR(subenv)); - } - } -} - -static int lov_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) -{ - struct lov_lock *lck = cl2lov_lock(slice); - int i; - - (*p)(env, cookie, "%d\n", lck->lls_nr); - for (i = 0; i < lck->lls_nr; ++i) { - struct lov_lock_sub *sub; - - sub = &lck->lls_sub[i]; - (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued); - cl_lock_print(env, cookie, p, &sub->sub_lock); - } - return 0; -} - -static const struct cl_lock_operations lov_lock_ops = { - .clo_fini = lov_lock_fini, - .clo_enqueue = lov_lock_enqueue, - .clo_cancel = lov_lock_cancel, - .clo_print = lov_lock_print -}; - -int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) -{ - struct lov_lock *lck; - int result = 0; - - lck = lov_lock_sub_init(env, obj, lock); - if (!IS_ERR(lck)) - cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops); - else - result = PTR_ERR(lck); - return result; -} - -static void lov_empty_lock_fini(const struct lu_env *env, - struct cl_lock_slice *slice) -{ - struct lov_lock *lck = cl2lov_lock(slice); - - kmem_cache_free(lov_lock_kmem, lck); -} - -static int lov_empty_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, - const struct cl_lock_slice *slice) -{ - (*p)(env, cookie, "empty\n"); - return 0; -} - -/* XXX: more methods will be added later. */ -static const struct cl_lock_operations lov_empty_lock_ops = { - .clo_fini = lov_empty_lock_fini, - .clo_print = lov_empty_lock_print -}; - -int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) -{ - struct lov_lock *lck; - int result = -ENOMEM; - - lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); - if (lck) { - cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); - result = 0; - } - return result; -} - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c deleted file mode 100644 index 006717cf7a41..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_merge.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include -#include "lov_internal.h" - -/** Merge the lock value block(&lvb) attributes and KMS from each of the - * stripes in a file into a single lvb. It is expected that the caller - * initializes the current atime, mtime, ctime to avoid regressing a more - * uptodate time on the local client. - */ -int lov_merge_lvb_kms(struct lov_stripe_md *lsm, - struct ost_lvb *lvb, __u64 *kms_place) -{ - __u64 size = 0; - __u64 kms = 0; - __u64 blocks = 0; - s64 current_mtime = lvb->lvb_mtime; - s64 current_atime = lvb->lvb_atime; - s64 current_ctime = lvb->lvb_ctime; - int i; - int rc = 0; - - assert_spin_locked(&lsm->lsm_lock); - LASSERT(lsm->lsm_lock_owner == current->pid); - - CDEBUG(D_INODE, "MDT ID " DOSTID " initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n", - POSTID(&lsm->lsm_oi), lvb->lvb_size, lvb->lvb_mtime, - lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks); - for (i = 0; i < lsm->lsm_stripe_count; i++) { - struct lov_oinfo *loi = lsm->lsm_oinfo[i]; - u64 lov_size, tmpsize; - - if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks)) { - rc = OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks); - continue; - } - - tmpsize = loi->loi_kms; - lov_size = lov_stripe_size(lsm, tmpsize, i); - if (lov_size > kms) - kms = lov_size; - - if (loi->loi_lvb.lvb_size > tmpsize) - tmpsize = loi->loi_lvb.lvb_size; - - lov_size = lov_stripe_size(lsm, tmpsize, i); - if (lov_size > size) - size = lov_size; - /* merge blocks, mtime, atime */ - blocks += loi->loi_lvb.lvb_blocks; - if (loi->loi_lvb.lvb_mtime > current_mtime) - current_mtime = loi->loi_lvb.lvb_mtime; - if (loi->loi_lvb.lvb_atime > current_atime) - current_atime = loi->loi_lvb.lvb_atime; - if (loi->loi_lvb.lvb_ctime > current_ctime) - current_ctime = loi->loi_lvb.lvb_ctime; - - CDEBUG(D_INODE, "MDT ID " DOSTID " on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n", - POSTID(&lsm->lsm_oi), loi->loi_ost_idx, - loi->loi_lvb.lvb_size, loi->loi_lvb.lvb_mtime, - loi->loi_lvb.lvb_atime, loi->loi_lvb.lvb_ctime, - loi->loi_lvb.lvb_blocks); - } - - *kms_place = kms; - lvb->lvb_size = size; - lvb->lvb_blocks = blocks; - lvb->lvb_mtime = current_mtime; - lvb->lvb_atime = current_atime; - lvb->lvb_ctime = current_ctime; - return rc; -} diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c deleted file mode 100644 index 344ff4b20168..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ /dev/null @@ -1,1444 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/lov/lov_obd.c - * - * Author: Phil Schwan - * Author: Peter Braam - * Author: Mike Shaver - * Author: Nathan Rutman - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "lov_internal.h" - -/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion. - * Any function that expects lov_tgts to remain stationary must take a ref. - */ -static void lov_getref(struct obd_device *obd) -{ - struct lov_obd *lov = &obd->u.lov; - - /* nobody gets through here until lov_putref is done */ - mutex_lock(&lov->lov_lock); - atomic_inc(&lov->lov_refcount); - mutex_unlock(&lov->lov_lock); -} - -static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt); - -static void lov_putref(struct obd_device *obd) -{ - struct lov_obd *lov = &obd->u.lov; - - mutex_lock(&lov->lov_lock); - /* ok to dec to 0 more than once -- ltd_exp's will be null */ - if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) { - LIST_HEAD(kill); - int i; - struct lov_tgt_desc *tgt, *n; - - CDEBUG(D_CONFIG, "destroying %d lov targets\n", - lov->lov_death_row); - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - tgt = lov->lov_tgts[i]; - - if (!tgt || !tgt->ltd_reap) - continue; - list_add(&tgt->ltd_kill, &kill); - /* XXX - right now there is a dependency on ld_tgt_count - * being the maximum tgt index for computing the - * mds_max_easize. So we can't shrink it. - */ - lov_ost_pool_remove(&lov->lov_packed, i); - lov->lov_tgts[i] = NULL; - lov->lov_death_row--; - } - mutex_unlock(&lov->lov_lock); - - list_for_each_entry_safe(tgt, n, &kill, ltd_kill) { - list_del(&tgt->ltd_kill); - /* Disconnect */ - __lov_del_obd(obd, tgt); - } - - if (lov->lov_tgts_kobj) - kobject_put(lov->lov_tgts_kobj); - - } else { - mutex_unlock(&lov->lov_lock); - } -} - -static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, - enum obd_notify_event ev); -static int lov_notify(struct obd_device *obd, struct obd_device *watched, - enum obd_notify_event ev, void *data); - -int lov_connect_obd(struct obd_device *obd, __u32 index, int activate, - struct obd_connect_data *data) -{ - struct lov_obd *lov = &obd->u.lov; - struct obd_uuid *tgt_uuid; - struct obd_device *tgt_obd; - static struct obd_uuid lov_osc_uuid = { "LOV_OSC_UUID" }; - struct obd_import *imp; - int rc; - - if (!lov->lov_tgts[index]) - return -EINVAL; - - tgt_uuid = &lov->lov_tgts[index]->ltd_uuid; - tgt_obd = lov->lov_tgts[index]->ltd_obd; - - if (!tgt_obd->obd_set_up) { - CERROR("Target %s not set up\n", obd_uuid2str(tgt_uuid)); - return -EINVAL; - } - - /* override the sp_me from lov */ - tgt_obd->u.cli.cl_sp_me = lov->lov_sp_me; - - if (data && (data->ocd_connect_flags & OBD_CONNECT_INDEX)) - data->ocd_index = index; - - /* - * Divine LOV knows that OBDs under it are OSCs. - */ - imp = tgt_obd->u.cli.cl_import; - - if (activate) { - tgt_obd->obd_no_recov = 0; - /* FIXME this is probably supposed to be - * ptlrpc_set_import_active. Horrible naming. - */ - ptlrpc_activate_import(imp); - } - - rc = obd_register_observer(tgt_obd, obd); - if (rc) { - CERROR("Target %s register_observer error %d\n", - obd_uuid2str(tgt_uuid), rc); - return rc; - } - - if (imp->imp_invalid) { - CDEBUG(D_CONFIG, "not connecting OSC %s; administratively disabled\n", - obd_uuid2str(tgt_uuid)); - return 0; - } - - rc = obd_connect(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd, - &lov_osc_uuid, data, NULL); - if (rc || !lov->lov_tgts[index]->ltd_exp) { - CERROR("Target %s connect error %d\n", - obd_uuid2str(tgt_uuid), rc); - return -ENODEV; - } - - lov->lov_tgts[index]->ltd_reap = 0; - - CDEBUG(D_CONFIG, "Connected tgt idx %d %s (%s) %sactive\n", index, - obd_uuid2str(tgt_uuid), tgt_obd->obd_name, activate ? "":"in"); - - if (lov->lov_tgts_kobj) - /* Even if we failed, that's ok */ - rc = sysfs_create_link(lov->lov_tgts_kobj, &tgt_obd->obd_kobj, - tgt_obd->obd_name); - - return 0; -} - -static int lov_connect(const struct lu_env *env, - struct obd_export **exp, struct obd_device *obd, - struct obd_uuid *cluuid, struct obd_connect_data *data, - void *localdata) -{ - struct lov_obd *lov = &obd->u.lov; - struct lov_tgt_desc *tgt; - struct lustre_handle conn; - int i, rc; - - CDEBUG(D_CONFIG, "connect #%d\n", lov->lov_connects); - - rc = class_connect(&conn, obd, cluuid); - if (rc) - return rc; - - *exp = class_conn2export(&conn); - - /* Why should there ever be more than 1 connect? */ - lov->lov_connects++; - LASSERT(lov->lov_connects == 1); - - memset(&lov->lov_ocd, 0, sizeof(lov->lov_ocd)); - if (data) - lov->lov_ocd = *data; - - obd_getref(obd); - - lov->lov_tgts_kobj = kobject_create_and_add("target_obds", - &obd->obd_kobj); - - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - tgt = lov->lov_tgts[i]; - if (!tgt || obd_uuid_empty(&tgt->ltd_uuid)) - continue; - /* Flags will be lowest common denominator */ - rc = lov_connect_obd(obd, i, tgt->ltd_activate, &lov->lov_ocd); - if (rc) { - CERROR("%s: lov connect tgt %d failed: %d\n", - obd->obd_name, i, rc); - continue; - } - /* connect to administrative disabled ost */ - if (!lov->lov_tgts[i]->ltd_exp) - continue; - - rc = lov_notify(obd, lov->lov_tgts[i]->ltd_exp->exp_obd, - OBD_NOTIFY_CONNECT, (void *)&i); - if (rc) { - CERROR("%s error sending notify %d\n", - obd->obd_name, rc); - } - } - obd_putref(obd); - - return 0; -} - -static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) -{ - struct lov_obd *lov = &obd->u.lov; - struct obd_device *osc_obd; - int rc; - - osc_obd = class_exp2obd(tgt->ltd_exp); - CDEBUG(D_CONFIG, "%s: disconnecting target %s\n", - obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); - - if (tgt->ltd_active) { - tgt->ltd_active = 0; - lov->desc.ld_active_tgt_count--; - tgt->ltd_exp->exp_obd->obd_inactive = 1; - } - - if (osc_obd) { - if (lov->lov_tgts_kobj) - sysfs_remove_link(lov->lov_tgts_kobj, - osc_obd->obd_name); - - /* Pass it on to our clients. - * XXX This should be an argument to disconnect, - * XXX not a back-door flag on the OBD. Ah well. - */ - osc_obd->obd_force = obd->obd_force; - osc_obd->obd_fail = obd->obd_fail; - osc_obd->obd_no_recov = obd->obd_no_recov; - } - - obd_register_observer(osc_obd, NULL); - - rc = obd_disconnect(tgt->ltd_exp); - if (rc) { - CERROR("Target %s disconnect error %d\n", - tgt->ltd_uuid.uuid, rc); - rc = 0; - } - - tgt->ltd_exp = NULL; - return 0; -} - -static int lov_disconnect(struct obd_export *exp) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lov_obd *lov = &obd->u.lov; - int i, rc; - - if (!lov->lov_tgts) - goto out; - - /* Only disconnect the underlying layers on the final disconnect. */ - lov->lov_connects--; - if (lov->lov_connects != 0) { - /* why should there be more than 1 connect? */ - CERROR("disconnect #%d\n", lov->lov_connects); - goto out; - } - - /* Let's hold another reference so lov_del_obd doesn't spin through - * putref every time - */ - obd_getref(obd); - - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - if (lov->lov_tgts[i] && lov->lov_tgts[i]->ltd_exp) { - /* Disconnection is the last we know about an obd */ - lov_del_target(obd, i, NULL, lov->lov_tgts[i]->ltd_gen); - } - } - - obd_putref(obd); - -out: - rc = class_disconnect(exp); /* bz 9811 */ - return rc; -} - -/* Error codes: - * - * -EINVAL : UUID can't be found in the LOV's target list - * -ENOTCONN: The UUID is found, but the target connection is bad (!) - * -EBADF : The UUID is found, but the OBD is the wrong type (!) - * any >= 0 : is log target index - */ -static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, - enum obd_notify_event ev) -{ - struct lov_obd *lov = &obd->u.lov; - struct lov_tgt_desc *tgt; - int index, activate, active; - - CDEBUG(D_INFO, "Searching in lov %p for uuid %s event(%d)\n", - lov, uuid->uuid, ev); - - obd_getref(obd); - for (index = 0; index < lov->desc.ld_tgt_count; index++) { - tgt = lov->lov_tgts[index]; - if (!tgt) - continue; - /* - * LU-642, initially inactive OSC could miss the obd_connect, - * we make up for it here. - */ - if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp && - obd_uuid_equals(uuid, &tgt->ltd_uuid)) { - struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"}; - - obd_connect(NULL, &tgt->ltd_exp, tgt->ltd_obd, - &lov_osc_uuid, &lov->lov_ocd, NULL); - } - if (!tgt->ltd_exp) - continue; - - CDEBUG(D_INFO, "lov idx %d is %s conn %#llx\n", - index, obd_uuid2str(&tgt->ltd_uuid), - tgt->ltd_exp->exp_handle.h_cookie); - if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) - break; - } - - if (index == lov->desc.ld_tgt_count) { - index = -EINVAL; - goto out; - } - - if (ev == OBD_NOTIFY_DEACTIVATE || ev == OBD_NOTIFY_ACTIVATE) { - activate = (ev == OBD_NOTIFY_ACTIVATE) ? 1 : 0; - - if (lov->lov_tgts[index]->ltd_activate == activate) { - CDEBUG(D_INFO, "OSC %s already %sactivate!\n", - uuid->uuid, activate ? "" : "de"); - } else { - lov->lov_tgts[index]->ltd_activate = activate; - CDEBUG(D_CONFIG, "%sactivate OSC %s\n", - activate ? "" : "de", obd_uuid2str(uuid)); - } - - } else if (ev == OBD_NOTIFY_INACTIVE || ev == OBD_NOTIFY_ACTIVE) { - active = (ev == OBD_NOTIFY_ACTIVE) ? 1 : 0; - - if (lov->lov_tgts[index]->ltd_active == active) { - CDEBUG(D_INFO, "OSC %s already %sactive!\n", - uuid->uuid, active ? "" : "in"); - goto out; - } - CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", - obd_uuid2str(uuid), active ? "" : "in"); - - lov->lov_tgts[index]->ltd_active = active; - if (active) { - lov->desc.ld_active_tgt_count++; - lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 0; - } else { - lov->desc.ld_active_tgt_count--; - lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 1; - } - } else { - CERROR("Unknown event(%d) for uuid %s", ev, uuid->uuid); - } - - out: - obd_putref(obd); - return index; -} - -static int lov_notify(struct obd_device *obd, struct obd_device *watched, - enum obd_notify_event ev, void *data) -{ - int rc = 0; - struct lov_obd *lov = &obd->u.lov; - - down_read(&lov->lov_notify_lock); - if (!lov->lov_connects) { - up_read(&lov->lov_notify_lock); - return rc; - } - - if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE || - ev == OBD_NOTIFY_ACTIVATE || ev == OBD_NOTIFY_DEACTIVATE) { - struct obd_uuid *uuid; - - LASSERT(watched); - - if (strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) { - up_read(&lov->lov_notify_lock); - CERROR("unexpected notification of %s %s!\n", - watched->obd_type->typ_name, - watched->obd_name); - return -EINVAL; - } - uuid = &watched->u.cli.cl_target_uuid; - - /* Set OSC as active before notifying the observer, so the - * observer can use the OSC normally. - */ - rc = lov_set_osc_active(obd, uuid, ev); - if (rc < 0) { - up_read(&lov->lov_notify_lock); - CERROR("event(%d) of %s failed: %d\n", ev, - obd_uuid2str(uuid), rc); - return rc; - } - /* active event should be pass lov target index as data */ - data = &rc; - } - - /* Pass the notification up the chain. */ - if (watched) { - rc = obd_notify_observer(obd, watched, ev, data); - } else { - /* NULL watched means all osc's in the lov (only for syncs) */ - /* sync event should be send lov idx as data */ - struct lov_obd *lov = &obd->u.lov; - int i, is_sync; - - data = &i; - is_sync = (ev == OBD_NOTIFY_SYNC) || - (ev == OBD_NOTIFY_SYNC_NONBLOCK); - - obd_getref(obd); - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - if (!lov->lov_tgts[i]) - continue; - - /* don't send sync event if target not - * connected/activated - */ - if (is_sync && !lov->lov_tgts[i]->ltd_active) - continue; - - rc = obd_notify_observer(obd, lov->lov_tgts[i]->ltd_obd, - ev, data); - if (rc) { - CERROR("%s: notify %s of %s failed %d\n", - obd->obd_name, - obd->obd_observer->obd_name, - lov->lov_tgts[i]->ltd_obd->obd_name, - rc); - } - } - obd_putref(obd); - } - - up_read(&lov->lov_notify_lock); - return rc; -} - -static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, - __u32 index, int gen, int active) -{ - struct lov_obd *lov = &obd->u.lov; - struct lov_tgt_desc *tgt; - struct obd_device *tgt_obd; - int rc; - - CDEBUG(D_CONFIG, "uuid:%s idx:%d gen:%d active:%d\n", - uuidp->uuid, index, gen, active); - - if (gen <= 0) { - CERROR("request to add OBD %s with invalid generation: %d\n", - uuidp->uuid, gen); - return -EINVAL; - } - - tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME, - &obd->obd_uuid); - if (!tgt_obd) - return -EINVAL; - - mutex_lock(&lov->lov_lock); - - if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) { - tgt = lov->lov_tgts[index]; - CERROR("UUID %s already assigned at LOV target index %d\n", - obd_uuid2str(&tgt->ltd_uuid), index); - mutex_unlock(&lov->lov_lock); - return -EEXIST; - } - - if (index >= lov->lov_tgt_size) { - /* We need to reallocate the lov target array. */ - struct lov_tgt_desc **newtgts, **old = NULL; - __u32 newsize, oldsize = 0; - - newsize = max_t(__u32, lov->lov_tgt_size, 2); - while (newsize < index + 1) - newsize <<= 1; - newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (!newtgts) { - mutex_unlock(&lov->lov_lock); - return -ENOMEM; - } - - if (lov->lov_tgt_size) { - memcpy(newtgts, lov->lov_tgts, sizeof(*newtgts) * - lov->lov_tgt_size); - old = lov->lov_tgts; - oldsize = lov->lov_tgt_size; - } - - lov->lov_tgts = newtgts; - lov->lov_tgt_size = newsize; - smp_rmb(); - kfree(old); - - CDEBUG(D_CONFIG, "tgts: %p size: %d\n", - lov->lov_tgts, lov->lov_tgt_size); - } - - tgt = kzalloc(sizeof(*tgt), GFP_NOFS); - if (!tgt) { - mutex_unlock(&lov->lov_lock); - return -ENOMEM; - } - - rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size); - if (rc) { - mutex_unlock(&lov->lov_lock); - kfree(tgt); - return rc; - } - - tgt->ltd_uuid = *uuidp; - tgt->ltd_obd = tgt_obd; - /* XXX - add a sanity check on the generation number. */ - tgt->ltd_gen = gen; - tgt->ltd_index = index; - tgt->ltd_activate = active; - lov->lov_tgts[index] = tgt; - if (index >= lov->desc.ld_tgt_count) - lov->desc.ld_tgt_count = index + 1; - - mutex_unlock(&lov->lov_lock); - - CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n", - index, tgt->ltd_gen, lov->desc.ld_tgt_count); - - if (lov->lov_connects == 0) { - /* lov_connect hasn't been called yet. We'll do the - * lov_connect_obd on this target when that fn first runs, - * because we don't know the connect flags yet. - */ - return 0; - } - - obd_getref(obd); - - rc = lov_connect_obd(obd, index, active, &lov->lov_ocd); - if (rc) - goto out; - - /* connect to administrative disabled ost */ - if (!tgt->ltd_exp) { - rc = 0; - goto out; - } - - if (lov->lov_cache) { - rc = obd_set_info_async(NULL, tgt->ltd_exp, - sizeof(KEY_CACHE_SET), KEY_CACHE_SET, - sizeof(struct cl_client_cache), - lov->lov_cache, NULL); - if (rc < 0) - goto out; - } - - rc = lov_notify(obd, tgt->ltd_exp->exp_obd, - active ? OBD_NOTIFY_CONNECT : OBD_NOTIFY_INACTIVE, - (void *)&index); - -out: - if (rc) { - CERROR("add failed (%d), deleting %s\n", rc, - obd_uuid2str(&tgt->ltd_uuid)); - lov_del_target(obd, index, NULL, 0); - } - obd_putref(obd); - return rc; -} - -/* Schedule a target for deletion */ -int lov_del_target(struct obd_device *obd, __u32 index, - struct obd_uuid *uuidp, int gen) -{ - struct lov_obd *lov = &obd->u.lov; - int count = lov->desc.ld_tgt_count; - int rc = 0; - - if (index >= count) { - CERROR("LOV target index %d >= number of LOV OBDs %d.\n", - index, count); - return -EINVAL; - } - - /* to make sure there's no ongoing lov_notify() now */ - down_write(&lov->lov_notify_lock); - obd_getref(obd); - - if (!lov->lov_tgts[index]) { - CERROR("LOV target at index %d is not setup.\n", index); - rc = -EINVAL; - goto out; - } - - if (uuidp && !obd_uuid_equals(uuidp, &lov->lov_tgts[index]->ltd_uuid)) { - CERROR("LOV target UUID %s at index %d doesn't match %s.\n", - lov_uuid2str(lov, index), index, - obd_uuid2str(uuidp)); - rc = -EINVAL; - goto out; - } - - CDEBUG(D_CONFIG, "uuid: %s idx: %d gen: %d exp: %p active: %d\n", - lov_uuid2str(lov, index), index, - lov->lov_tgts[index]->ltd_gen, lov->lov_tgts[index]->ltd_exp, - lov->lov_tgts[index]->ltd_active); - - lov->lov_tgts[index]->ltd_reap = 1; - lov->lov_death_row++; - /* we really delete it from obd_putref */ -out: - obd_putref(obd); - up_write(&lov->lov_notify_lock); - - return rc; -} - -static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) -{ - struct obd_device *osc_obd; - - LASSERT(tgt); - LASSERT(tgt->ltd_reap); - - osc_obd = class_exp2obd(tgt->ltd_exp); - - CDEBUG(D_CONFIG, "Removing tgt %s : %s\n", - tgt->ltd_uuid.uuid, - osc_obd ? osc_obd->obd_name : ""); - - if (tgt->ltd_exp) - lov_disconnect_obd(obd, tgt); - - kfree(tgt); - - /* Manual cleanup - no cleanup logs to clean up the osc's. We must - * do it ourselves. And we can't do it from lov_cleanup, - * because we just lost our only reference to it. - */ - if (osc_obd) - class_manual_cleanup(osc_obd); -} - -void lov_fix_desc_stripe_size(__u64 *val) -{ - if (*val < LOV_MIN_STRIPE_SIZE) { - if (*val != 0) - LCONSOLE_INFO("Increasing default stripe size to minimum %u\n", - LOV_DESC_STRIPE_SIZE_DEFAULT); - *val = LOV_DESC_STRIPE_SIZE_DEFAULT; - } else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) { - *val &= ~(LOV_MIN_STRIPE_SIZE - 1); - LCONSOLE_WARN("Changing default stripe size to %llu (a multiple of %u)\n", - *val, LOV_MIN_STRIPE_SIZE); - } -} - -void lov_fix_desc_stripe_count(__u32 *val) -{ - if (*val == 0) - *val = 1; -} - -void lov_fix_desc_pattern(__u32 *val) -{ - /* from lov_setstripe */ - if ((*val != 0) && (*val != LOV_PATTERN_RAID0)) { - LCONSOLE_WARN("Unknown stripe pattern: %#x\n", *val); - *val = 0; - } -} - -void lov_fix_desc_qos_maxage(__u32 *val) -{ - if (*val == 0) - *val = LOV_DESC_QOS_MAXAGE_DEFAULT; -} - -void lov_fix_desc(struct lov_desc *desc) -{ - lov_fix_desc_stripe_size(&desc->ld_default_stripe_size); - lov_fix_desc_stripe_count(&desc->ld_default_stripe_count); - lov_fix_desc_pattern(&desc->ld_pattern); - lov_fix_desc_qos_maxage(&desc->ld_qos_maxage); -} - -int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct lprocfs_static_vars lvars = { NULL }; - struct lov_desc *desc; - struct lov_obd *lov = &obd->u.lov; - int rc; - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) { - CERROR("LOV setup requires a descriptor\n"); - return -EINVAL; - } - - desc = (struct lov_desc *)lustre_cfg_buf(lcfg, 1); - - if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) { - CERROR("descriptor size wrong: %d > %d\n", - (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1)); - return -EINVAL; - } - - if (desc->ld_magic != LOV_DESC_MAGIC) { - if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) { - CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", - obd->obd_name, desc); - lustre_swab_lov_desc(desc); - } else { - CERROR("%s: Bad lov desc magic: %#x\n", - obd->obd_name, desc->ld_magic); - return -EINVAL; - } - } - - lov_fix_desc(desc); - - desc->ld_active_tgt_count = 0; - lov->desc = *desc; - lov->lov_tgt_size = 0; - - mutex_init(&lov->lov_lock); - atomic_set(&lov->lov_refcount, 0); - lov->lov_sp_me = LUSTRE_SP_CLI; - - init_rwsem(&lov->lov_notify_lock); - - INIT_LIST_HEAD(&lov->lov_pool_list); - lov->lov_pool_count = 0; - rc = lov_pool_hash_init(&lov->lov_pools_hash_body); - if (rc) - goto out; - rc = lov_ost_pool_init(&lov->lov_packed, 0); - if (rc) - goto out; - - lprocfs_lov_init_vars(&lvars); - lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars); - - debugfs_create_file("target_obd", 0444, obd->obd_debugfs_entry, obd, - &lov_proc_target_fops); - - lov->lov_pool_debugfs_entry = debugfs_create_dir("pools", - obd->obd_debugfs_entry); - return 0; - -out: - return rc; -} - -static int lov_cleanup(struct obd_device *obd) -{ - struct lov_obd *lov = &obd->u.lov; - struct pool_desc *pool, *tmp; - - list_for_each_entry_safe(pool, tmp, &lov->lov_pool_list, pool_list) { - /* free pool structs */ - CDEBUG(D_INFO, "delete pool %p\n", pool); - /* In the function below, .hs_keycmp resolves to - * pool_hashkey_keycmp() - */ - /* coverity[overrun-buffer-val] */ - lov_pool_del(obd, pool->pool_name); - } - lov_pool_hash_destroy(&lov->lov_pools_hash_body); - lov_ost_pool_free(&lov->lov_packed); - - lprocfs_obd_cleanup(obd); - if (lov->lov_tgts) { - int i; - - obd_getref(obd); - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - if (!lov->lov_tgts[i]) - continue; - - /* Inactive targets may never have connected */ - if (lov->lov_tgts[i]->ltd_active || - atomic_read(&lov->lov_refcount)) - /* We should never get here - these - * should have been removed in the - * disconnect. - */ - CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n", - i, lov->lov_death_row, - atomic_read(&lov->lov_refcount)); - lov_del_target(obd, i, NULL, 0); - } - obd_putref(obd); - kfree(lov->lov_tgts); - lov->lov_tgt_size = 0; - } - - if (lov->lov_cache) { - cl_cache_decref(lov->lov_cache); - lov->lov_cache = NULL; - } - - return 0; -} - -int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg, - __u32 *indexp, int *genp) -{ - struct obd_uuid obd_uuid; - int cmd; - int rc = 0; - - switch (cmd = lcfg->lcfg_command) { - case LCFG_LOV_ADD_OBD: - case LCFG_LOV_ADD_INA: - case LCFG_LOV_DEL_OBD: { - __u32 index; - int gen; - /* lov_modify_tgts add 0:lov_mdsA 1:ost1_UUID 2:0 3:1 */ - if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) { - rc = -EINVAL; - goto out; - } - - obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1)); - - rc = kstrtoint(lustre_cfg_buf(lcfg, 2), 10, indexp); - if (rc < 0) - goto out; - rc = kstrtoint(lustre_cfg_buf(lcfg, 3), 10, genp); - if (rc < 0) - goto out; - index = *indexp; - gen = *genp; - if (cmd == LCFG_LOV_ADD_OBD) - rc = lov_add_target(obd, &obd_uuid, index, gen, 1); - else if (cmd == LCFG_LOV_ADD_INA) - rc = lov_add_target(obd, &obd_uuid, index, gen, 0); - else - rc = lov_del_target(obd, index, &obd_uuid, gen); - goto out; - } - case LCFG_PARAM: { - struct lprocfs_static_vars lvars = { NULL }; - struct lov_desc *desc = &obd->u.lov.desc; - - if (!desc) { - rc = -EINVAL; - goto out; - } - - lprocfs_lov_init_vars(&lvars); - - rc = class_process_proc_param(PARAM_LOV, lvars.obd_vars, - lcfg, obd); - if (rc > 0) - rc = 0; - goto out; - } - case LCFG_POOL_NEW: - case LCFG_POOL_ADD: - case LCFG_POOL_DEL: - case LCFG_POOL_REM: - goto out; - - default: { - CERROR("Unknown command: %d\n", lcfg->lcfg_command); - rc = -EINVAL; - goto out; - } - } -out: - return rc; -} - -static int -lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc) -{ - struct lov_request_set *lovset = (struct lov_request_set *)data; - int err; - - if (rc) - atomic_set(&lovset->set_completes, 0); - - err = lov_fini_statfs_set(lovset); - return rc ? rc : err; -} - -static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, - __u64 max_age, struct ptlrpc_request_set *rqset) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lov_request_set *set; - struct lov_request *req; - struct lov_obd *lov; - int rc = 0; - - LASSERT(oinfo->oi_osfs); - - lov = &obd->u.lov; - rc = lov_prep_statfs_set(obd, oinfo, &set); - if (rc) - return rc; - - list_for_each_entry(req, &set->set_list, rq_link) { - rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp, - &req->rq_oi, max_age, rqset); - if (rc) - break; - } - - if (rc || list_empty(&rqset->set_requests)) { - int err; - - if (rc) - atomic_set(&set->set_completes, 0); - err = lov_fini_statfs_set(set); - return rc ? rc : err; - } - - LASSERT(!rqset->set_interpret); - rqset->set_interpret = lov_statfs_interpret; - rqset->set_arg = (void *)set; - return 0; -} - -static int lov_statfs(const struct lu_env *env, struct obd_export *exp, - struct obd_statfs *osfs, __u64 max_age, __u32 flags) -{ - struct ptlrpc_request_set *set = NULL; - struct obd_info oinfo = { - .oi_osfs = osfs, - .oi_flags = flags, - }; - int rc = 0; - - /* for obdclass we forbid using obd_statfs_rqset, but prefer using async - * statfs requests - */ - set = ptlrpc_prep_set(); - if (!set) - return -ENOMEM; - - rc = lov_statfs_async(exp, &oinfo, max_age, set); - if (rc == 0) - rc = ptlrpc_set_wait(set); - ptlrpc_set_destroy(set); - - return rc; -} - -static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void __user *uarg) -{ - struct obd_device *obddev = class_exp2obd(exp); - struct lov_obd *lov = &obddev->u.lov; - int i = 0, rc = 0, count = lov->desc.ld_tgt_count; - struct obd_uuid *uuidp; - - switch (cmd) { - case IOC_OBD_STATFS: { - struct obd_ioctl_data *data = karg; - struct obd_device *osc_obd; - struct obd_statfs stat_buf = {0}; - __u32 index; - __u32 flags; - - memcpy(&index, data->ioc_inlbuf2, sizeof(__u32)); - if (index >= count) - return -ENODEV; - - if (!lov->lov_tgts[index]) - /* Try again with the next index */ - return -EAGAIN; - if (!lov->lov_tgts[index]->ltd_active) - return -ENODATA; - - osc_obd = class_exp2obd(lov->lov_tgts[index]->ltd_exp); - if (!osc_obd) - return -EINVAL; - - /* copy UUID */ - if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), - min_t(unsigned long, data->ioc_plen2, - sizeof(struct obd_uuid)))) - return -EFAULT; - - memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32)); - flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0; - - /* got statfs data */ - rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - flags); - if (rc) - return rc; - if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min_t(unsigned long, data->ioc_plen1, - sizeof(stat_buf)))) - return -EFAULT; - break; - } - case OBD_IOC_LOV_GET_CONFIG: { - struct obd_ioctl_data *data; - struct lov_desc *desc; - char *buf = NULL; - __u32 *genp; - - len = 0; - if (obd_ioctl_getdata(&buf, &len, uarg)) - return -EINVAL; - - data = (struct obd_ioctl_data *)buf; - - if (sizeof(*desc) > data->ioc_inllen1) { - kvfree(buf); - return -EINVAL; - } - - if (sizeof(uuidp->uuid) * count > data->ioc_inllen2) { - kvfree(buf); - return -EINVAL; - } - - if (sizeof(__u32) * count > data->ioc_inllen3) { - kvfree(buf); - return -EINVAL; - } - - desc = (struct lov_desc *)data->ioc_inlbuf1; - memcpy(desc, &lov->desc, sizeof(*desc)); - - uuidp = (struct obd_uuid *)data->ioc_inlbuf2; - genp = (__u32 *)data->ioc_inlbuf3; - /* the uuid will be empty for deleted OSTs */ - for (i = 0; i < count; i++, uuidp++, genp++) { - if (!lov->lov_tgts[i]) - continue; - *uuidp = lov->lov_tgts[i]->ltd_uuid; - *genp = lov->lov_tgts[i]->ltd_gen; - } - - if (copy_to_user(uarg, buf, len)) - rc = -EFAULT; - kvfree(buf); - break; - } - case OBD_IOC_QUOTACTL: { - struct if_quotactl *qctl = karg; - struct lov_tgt_desc *tgt = NULL; - struct obd_quotactl *oqctl; - - if (qctl->qc_valid == QC_OSTIDX) { - if (count <= qctl->qc_idx) - return -EINVAL; - - tgt = lov->lov_tgts[qctl->qc_idx]; - if (!tgt || !tgt->ltd_exp) - return -EINVAL; - } else if (qctl->qc_valid == QC_UUID) { - for (i = 0; i < count; i++) { - tgt = lov->lov_tgts[i]; - if (!tgt || - !obd_uuid_equals(&tgt->ltd_uuid, - &qctl->obd_uuid)) - continue; - - if (!tgt->ltd_exp) - return -EINVAL; - - break; - } - } else { - return -EINVAL; - } - - if (i >= count) - return -EAGAIN; - - LASSERT(tgt && tgt->ltd_exp); - oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS); - if (!oqctl) - return -ENOMEM; - - QCTL_COPY(oqctl, qctl); - rc = obd_quotactl(tgt->ltd_exp, oqctl); - if (rc == 0) { - QCTL_COPY(qctl, oqctl); - qctl->qc_valid = QC_OSTIDX; - qctl->obd_uuid = tgt->ltd_uuid; - } - kfree(oqctl); - break; - } - default: { - int set = 0; - - if (count == 0) - return -ENOTTY; - - for (i = 0; i < count; i++) { - int err; - struct obd_device *osc_obd; - - /* OST was disconnected */ - if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp) - continue; - - /* ll_umount_begin() sets force flag but for lov, not - * osc. Let's pass it through - */ - osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp); - osc_obd->obd_force = obddev->obd_force; - err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp, - len, karg, uarg); - if (err) { - if (lov->lov_tgts[i]->ltd_active) { - CDEBUG(err == -ENOTTY ? - D_IOCTL : D_WARNING, - "iocontrol OSC %s on OST idx %d cmd %x: err = %d\n", - lov_uuid2str(lov, i), - i, cmd, err); - if (!rc) - rc = err; - } - } else { - set = 1; - } - } - if (!set && !rc) - rc = -EIO; - } - } - - return rc; -} - -static int lov_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val) -{ - struct obd_device *obddev = class_exp2obd(exp); - struct lov_obd *lov = &obddev->u.lov; - struct lov_desc *ld = &lov->desc; - int rc = 0; - - if (!vallen || !val) - return -EFAULT; - - obd_getref(obddev); - - if (KEY_IS(KEY_MAX_EASIZE)) { - u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count, - LOV_MAX_STRIPE_COUNT); - - *((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3); - } else if (KEY_IS(KEY_DEFAULT_EASIZE)) { - u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count, - LOV_MAX_STRIPE_COUNT); - - *((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3); - } else if (KEY_IS(KEY_TGT_COUNT)) { - *((int *)val) = lov->desc.ld_tgt_count; - } else { - rc = -EINVAL; - } - - obd_putref(obddev); - return rc; -} - -static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) -{ - struct obd_device *obddev = class_exp2obd(exp); - struct lov_obd *lov = &obddev->u.lov; - u32 count; - int i, rc = 0, err; - struct lov_tgt_desc *tgt; - int do_inactive = 0, no_set = 0; - - if (!set) { - no_set = 1; - set = ptlrpc_prep_set(); - if (!set) - return -ENOMEM; - } - - obd_getref(obddev); - count = lov->desc.ld_tgt_count; - - if (KEY_IS(KEY_CHECKSUM)) { - do_inactive = 1; - } else if (KEY_IS(KEY_CACHE_SET)) { - LASSERT(!lov->lov_cache); - lov->lov_cache = val; - do_inactive = 1; - cl_cache_incref(lov->lov_cache); - } - - for (i = 0; i < count; i++) { - tgt = lov->lov_tgts[i]; - - /* OST was disconnected */ - if (!tgt || !tgt->ltd_exp) - continue; - - /* OST is inactive and we don't want inactive OSCs */ - if (!tgt->ltd_active && !do_inactive) - continue; - - err = obd_set_info_async(env, tgt->ltd_exp, keylen, key, - vallen, val, set); - if (!rc) - rc = err; - } - - obd_putref(obddev); - if (no_set) { - err = ptlrpc_set_wait(set); - if (!rc) - rc = err; - ptlrpc_set_destroy(set); - } - return rc; -} - -void lov_stripe_lock(struct lov_stripe_md *md) - __acquires(&md->lsm_lock) -{ - LASSERT(md->lsm_lock_owner != current->pid); - spin_lock(&md->lsm_lock); - LASSERT(md->lsm_lock_owner == 0); - md->lsm_lock_owner = current->pid; -} - -void lov_stripe_unlock(struct lov_stripe_md *md) - __releases(&md->lsm_lock) -{ - LASSERT(md->lsm_lock_owner == current->pid); - md->lsm_lock_owner = 0; - spin_unlock(&md->lsm_lock); -} - -static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct lov_obd *lov = &obd->u.lov; - struct lov_tgt_desc *tgt; - __u64 curspace = 0; - __u64 bhardlimit = 0; - int i, rc = 0; - - if (oqctl->qc_cmd != Q_GETOQUOTA && - oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) { - CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd); - return -EFAULT; - } - - /* for lov tgt */ - obd_getref(obd); - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - int err; - - tgt = lov->lov_tgts[i]; - - if (!tgt) - continue; - - if (!tgt->ltd_active || tgt->ltd_reap) { - if (oqctl->qc_cmd == Q_GETOQUOTA && - lov->lov_tgts[i]->ltd_activate) { - rc = -EREMOTEIO; - CERROR("ost %d is inactive\n", i); - } else { - CDEBUG(D_HA, "ost %d is inactive\n", i); - } - continue; - } - - err = obd_quotactl(tgt->ltd_exp, oqctl); - if (err) { - if (tgt->ltd_active && !rc) - rc = err; - continue; - } - - if (oqctl->qc_cmd == Q_GETOQUOTA) { - curspace += oqctl->qc_dqblk.dqb_curspace; - bhardlimit += oqctl->qc_dqblk.dqb_bhardlimit; - } - } - obd_putref(obd); - - if (oqctl->qc_cmd == Q_GETOQUOTA) { - oqctl->qc_dqblk.dqb_curspace = curspace; - oqctl->qc_dqblk.dqb_bhardlimit = bhardlimit; - } - return rc; -} - -static struct obd_ops lov_obd_ops = { - .owner = THIS_MODULE, - .setup = lov_setup, - .cleanup = lov_cleanup, - /*.process_config = lov_process_config,*/ - .connect = lov_connect, - .disconnect = lov_disconnect, - .statfs = lov_statfs, - .statfs_async = lov_statfs_async, - .iocontrol = lov_iocontrol, - .get_info = lov_get_info, - .set_info_async = lov_set_info_async, - .notify = lov_notify, - .pool_new = lov_pool_new, - .pool_rem = lov_pool_remove, - .pool_add = lov_pool_add, - .pool_del = lov_pool_del, - .getref = lov_getref, - .putref = lov_putref, - .quotactl = lov_quotactl, -}; - -struct kmem_cache *lov_oinfo_slab; - -static int __init lov_init(void) -{ - struct lprocfs_static_vars lvars = { NULL }; - int rc; - - /* print an address of _any_ initialized kernel symbol from this - * module, to allow debugging with gdb that doesn't support data - * symbols from modules. - */ - CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches); - - rc = libcfs_setup(); - if (rc) - return rc; - - rc = lu_kmem_init(lov_caches); - if (rc) - return rc; - - lov_oinfo_slab = kmem_cache_create("lov_oinfo", - sizeof(struct lov_oinfo), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!lov_oinfo_slab) { - lu_kmem_fini(lov_caches); - return -ENOMEM; - } - lprocfs_lov_init_vars(&lvars); - - rc = class_register_type(&lov_obd_ops, NULL, - LUSTRE_LOV_NAME, &lov_device_type); - - if (rc) { - kmem_cache_destroy(lov_oinfo_slab); - lu_kmem_fini(lov_caches); - } - - return rc; -} - -static void /*__exit*/ lov_exit(void) -{ - class_unregister_type(LUSTRE_LOV_NAME); - kmem_cache_destroy(lov_oinfo_slab); - - lu_kmem_fini(lov_caches); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Logical Object Volume"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(LUSTRE_VERSION_STRING); - -module_init(lov_init); -module_exit(lov_exit); diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c deleted file mode 100644 index adc90f310fd7..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ /dev/null @@ -1,1625 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_object for LOV layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -static inline struct lov_device *lov_object_dev(struct lov_object *obj) -{ - return lu2lov_dev(obj->lo_cl.co_lu.lo_dev); -} - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Layout operations. - * - */ - -struct lov_layout_operations { - int (*llo_init)(const struct lu_env *env, struct lov_device *dev, - struct lov_object *lov, struct lov_stripe_md *lsm, - const struct cl_object_conf *conf, - union lov_layout_state *state); - int (*llo_delete)(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state); - void (*llo_fini)(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state); - void (*llo_install)(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state); - int (*llo_print)(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o); - int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index); - int (*llo_lock_init)(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *io); - int (*llo_io_init)(const struct lu_env *env, - struct cl_object *obj, struct cl_io *io); - int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -}; - -static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov); - -static void lov_lsm_put(struct lov_stripe_md *lsm) -{ - if (lsm) - lov_free_memmd(&lsm); -} - -/***************************************************************************** - * - * Lov object layout operations. - * - */ - -static void lov_install_empty(const struct lu_env *env, - struct lov_object *lov, - union lov_layout_state *state) -{ - /* - * File without objects. - */ -} - -static int lov_init_empty(const struct lu_env *env, struct lov_device *dev, - struct lov_object *lov, struct lov_stripe_md *lsm, - const struct cl_object_conf *conf, - union lov_layout_state *state) -{ - return 0; -} - -static void lov_install_raid0(const struct lu_env *env, - struct lov_object *lov, - union lov_layout_state *state) -{ -} - -static struct cl_object *lov_sub_find(const struct lu_env *env, - struct cl_device *dev, - const struct lu_fid *fid, - const struct cl_object_conf *conf) -{ - struct lu_object *o; - - o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu); - LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type)); - return lu2cl(o); -} - -static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, - struct cl_object *stripe, struct lov_layout_raid0 *r0, - int idx) -{ - struct cl_object_header *hdr; - struct cl_object_header *subhdr; - struct cl_object_header *parent; - struct lov_oinfo *oinfo; - int result; - - if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) { - /* For sanity:test_206. - * Do not leave the object in cache to avoid accessing - * freed memory. This is because osc_object is referring to - * lov_oinfo of lsm_stripe_data which will be freed due to - * this failure. - */ - cl_object_kill(env, stripe); - cl_object_put(env, stripe); - return -EIO; - } - - hdr = cl_object_header(lov2cl(lov)); - subhdr = cl_object_header(stripe); - - oinfo = lov->lo_lsm->lsm_oinfo[idx]; - CDEBUG(D_INODE, DFID "@%p[%d] -> " DFID "@%p: ostid: " DOSTID " idx: %d gen: %d\n", - PFID(&subhdr->coh_lu.loh_fid), subhdr, idx, - PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi), - oinfo->loi_ost_idx, oinfo->loi_ost_gen); - - /* reuse ->coh_attr_guard to protect coh_parent change */ - spin_lock(&subhdr->coh_attr_guard); - parent = subhdr->coh_parent; - if (!parent) { - subhdr->coh_parent = hdr; - spin_unlock(&subhdr->coh_attr_guard); - subhdr->coh_nesting = hdr->coh_nesting + 1; - lu_object_ref_add(&stripe->co_lu, "lov-parent", lov); - r0->lo_sub[idx] = cl2lovsub(stripe); - r0->lo_sub[idx]->lso_super = lov; - r0->lo_sub[idx]->lso_index = idx; - result = 0; - } else { - struct lu_object *old_obj; - struct lov_object *old_lov; - unsigned int mask = D_INODE; - - spin_unlock(&subhdr->coh_attr_guard); - old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type); - LASSERT(old_obj); - old_lov = cl2lov(lu2cl(old_obj)); - if (old_lov->lo_layout_invalid) { - /* the object's layout has already changed but isn't - * refreshed - */ - lu_object_unhash(env, &stripe->co_lu); - result = -EAGAIN; - } else { - mask = D_ERROR; - result = -EIO; - } - - LU_OBJECT_DEBUG(mask, env, &stripe->co_lu, - "stripe %d is already owned.", idx); - LU_OBJECT_DEBUG(mask, env, old_obj, "owned."); - LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n"); - cl_object_put(env, stripe); - } - return result; -} - -static int lov_page_slice_fixup(struct lov_object *lov, - struct cl_object *stripe) -{ - struct cl_object_header *hdr = cl_object_header(&lov->lo_cl); - struct cl_object *o; - - if (!stripe) - return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off - - cfs_size_round(sizeof(struct lov_page)); - - cl_object_for_each(o, stripe) - o->co_slice_off += hdr->coh_page_bufsize; - - return cl_object_header(stripe)->coh_page_bufsize; -} - -static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev, - struct lov_object *lov, struct lov_stripe_md *lsm, - const struct cl_object_conf *conf, - union lov_layout_state *state) -{ - int result; - int i; - - struct cl_object *stripe; - struct lov_thread_info *lti = lov_env_info(env); - struct cl_object_conf *subconf = <i->lti_stripe_conf; - struct lu_fid *ofid = <i->lti_fid; - struct lov_layout_raid0 *r0 = &state->raid0; - - if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { - dump_lsm(D_ERROR, lsm); - LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n", - LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic); - } - - LASSERT(!lov->lo_lsm); - lov->lo_lsm = lsm_addref(lsm); - lov->lo_layout_invalid = true; - r0->lo_nr = lsm->lsm_stripe_count; - LASSERT(r0->lo_nr <= lov_targets_nr(dev)); - - r0->lo_sub = kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]), - GFP_NOFS); - if (r0->lo_sub) { - int psz = 0; - - result = 0; - subconf->coc_inode = conf->coc_inode; - spin_lock_init(&r0->lo_sub_lock); - /* - * Create stripe cl_objects. - */ - for (i = 0; i < r0->lo_nr && result == 0; ++i) { - struct cl_device *subdev; - struct lov_oinfo *oinfo = lsm->lsm_oinfo[i]; - int ost_idx = oinfo->loi_ost_idx; - - if (lov_oinfo_is_dummy(oinfo)) - continue; - - result = ostid_to_fid(ofid, &oinfo->loi_oi, - oinfo->loi_ost_idx); - if (result != 0) - goto out; - - if (!dev->ld_target[ost_idx]) { - CERROR("%s: OST %04x is not initialized\n", - lov2obd(dev->ld_lov)->obd_name, ost_idx); - result = -EIO; - goto out; - } - - subdev = lovsub2cl_dev(dev->ld_target[ost_idx]); - subconf->u.coc_oinfo = oinfo; - LASSERTF(subdev, "not init ost %d\n", ost_idx); - /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() - */ - /* coverity[overrun-buffer-val] */ - stripe = lov_sub_find(env, subdev, ofid, subconf); - if (!IS_ERR(stripe)) { - result = lov_init_sub(env, lov, stripe, r0, i); - if (result == -EAGAIN) { /* try again */ - --i; - result = 0; - continue; - } - } else { - result = PTR_ERR(stripe); - } - - if (result == 0) { - int sz = lov_page_slice_fixup(lov, stripe); - - LASSERT(ergo(psz > 0, psz == sz)); - psz = sz; - } - } - if (result == 0) - cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz; - } else { - result = -ENOMEM; - } -out: - return result; -} - -static int lov_init_released(const struct lu_env *env, struct lov_device *dev, - struct lov_object *lov, struct lov_stripe_md *lsm, - const struct cl_object_conf *conf, - union lov_layout_state *state) -{ - LASSERT(lsm); - LASSERT(lsm_is_released(lsm)); - LASSERT(!lov->lo_lsm); - - lov->lo_lsm = lsm_addref(lsm); - return 0; -} - -static struct cl_object *lov_find_subobj(const struct lu_env *env, - struct lov_object *lov, - struct lov_stripe_md *lsm, - int stripe_idx) -{ - struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev); - struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx]; - struct lov_thread_info *lti = lov_env_info(env); - struct lu_fid *ofid = <i->lti_fid; - struct cl_device *subdev; - struct cl_object *result; - int ost_idx; - int rc; - - if (lov->lo_type != LLT_RAID0) { - result = NULL; - goto out; - } - - ost_idx = oinfo->loi_ost_idx; - rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx); - if (rc) { - result = NULL; - goto out; - } - - subdev = lovsub2cl_dev(dev->ld_target[ost_idx]); - result = lov_sub_find(env, subdev, ofid, NULL); -out: - if (!result) - result = ERR_PTR(-EINVAL); - return result; -} - -static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) -{ - LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED); - - lov_layout_wait(env, lov); - return 0; -} - -static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, - struct lovsub_object *los, int idx) -{ - struct cl_object *sub; - struct lov_layout_raid0 *r0; - struct lu_site *site; - wait_queue_head_t *wq; - wait_queue_entry_t *waiter; - - r0 = &lov->u.raid0; - LASSERT(r0->lo_sub[idx] == los); - - sub = lovsub2cl(los); - site = sub->co_lu.lo_dev->ld_site; - wq = lu_site_wq_from_fid(site, &sub->co_lu.lo_header->loh_fid); - - cl_object_kill(env, sub); - /* release a reference to the sub-object and ... */ - lu_object_ref_del(&sub->co_lu, "lov-parent", lov); - cl_object_put(env, sub); - - /* ... wait until it is actually destroyed---sub-object clears its - * ->lo_sub[] slot in lovsub_object_fini() - */ - if (r0->lo_sub[idx] == los) { - waiter = &lov_env_info(env)->lti_waiter; - init_waitqueue_entry(waiter, current); - add_wait_queue(wq, waiter); - set_current_state(TASK_UNINTERRUPTIBLE); - while (1) { - /* this wait-queue is signaled at the end of - * lu_object_free(). - */ - set_current_state(TASK_UNINTERRUPTIBLE); - spin_lock(&r0->lo_sub_lock); - if (r0->lo_sub[idx] == los) { - spin_unlock(&r0->lo_sub_lock); - schedule(); - } else { - spin_unlock(&r0->lo_sub_lock); - set_current_state(TASK_RUNNING); - break; - } - } - remove_wait_queue(wq, waiter); - } - LASSERT(!r0->lo_sub[idx]); -} - -static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) -{ - struct lov_layout_raid0 *r0 = &state->raid0; - struct lov_stripe_md *lsm = lov->lo_lsm; - int i; - - dump_lsm(D_INODE, lsm); - - lov_layout_wait(env, lov); - if (r0->lo_sub) { - for (i = 0; i < r0->lo_nr; ++i) { - struct lovsub_object *los = r0->lo_sub[i]; - - if (los) { - cl_object_prune(env, &los->lso_cl); - /* - * If top-level object is to be evicted from - * the cache, so are its sub-objects. - */ - lov_subobject_kill(env, lov, los, i); - } - } - } - return 0; -} - -static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) -{ - LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED); -} - -static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) -{ - struct lov_layout_raid0 *r0 = &state->raid0; - - if (r0->lo_sub) { - kvfree(r0->lo_sub); - r0->lo_sub = NULL; - } - - dump_lsm(D_INODE, lov->lo_lsm); - lov_free_memmd(&lov->lo_lsm); -} - -static void lov_fini_released(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) -{ - dump_lsm(D_INODE, lov->lo_lsm); - lov_free_memmd(&lov->lo_lsm); -} - -static int lov_print_empty(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) -{ - (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid); - return 0; -} - -static int lov_print_raid0(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) -{ - struct lov_object *lov = lu2lov(o); - struct lov_layout_raid0 *r0 = lov_r0(lov); - struct lov_stripe_md *lsm = lov->lo_lsm; - int i; - - (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n", - r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); - for (i = 0; i < r0->lo_nr; ++i) { - struct lu_object *sub; - - if (r0->lo_sub[i]) { - sub = lovsub2lu(r0->lo_sub[i]); - lu_object_print(env, cookie, p, sub); - } else { - (*p)(env, cookie, "sub %d absent\n", i); - } - } - return 0; -} - -static int lov_print_released(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) -{ - struct lov_object *lov = lu2lov(o); - struct lov_stripe_md *lsm = lov->lo_lsm; - - (*p)(env, cookie, - "released: %s, lsm{%p 0x%08X %d %u %u}:\n", - lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); - return 0; -} - -/** - * Implements cl_object_operations::coo_attr_get() method for an object - * without stripes (LLT_EMPTY layout type). - * - * The only attributes this layer is authoritative in this case is - * cl_attr::cat_blocks---it's 0. - */ -static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr) -{ - attr->cat_blocks = 0; - return 0; -} - -static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr) -{ - struct lov_object *lov = cl2lov(obj); - struct lov_layout_raid0 *r0 = lov_r0(lov); - struct cl_attr *lov_attr = &r0->lo_attr; - int result = 0; - - /* this is called w/o holding type guard mutex, so it must be inside - * an on going IO otherwise lsm may be replaced. - * LU-2117: it turns out there exists one exception. For mmaped files, - * the lock of those files may be requested in the other file's IO - * context, and this function is called in ccc_lock_state(), it will - * hit this assertion. - * Anyway, it's still okay to call attr_get w/o type guard as layout - * can't go if locks exist. - */ - /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */ - - if (!r0->lo_attr_valid) { - struct lov_stripe_md *lsm = lov->lo_lsm; - struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb; - __u64 kms = 0; - - memset(lvb, 0, sizeof(*lvb)); - /* XXX: timestamps can be negative by sanity:test_39m, - * how can it be? - */ - lvb->lvb_atime = LLONG_MIN; - lvb->lvb_ctime = LLONG_MIN; - lvb->lvb_mtime = LLONG_MIN; - - /* - * XXX that should be replaced with a loop over sub-objects, - * doing cl_object_attr_get() on them. But for now, let's - * reuse old lov code. - */ - - /* - * XXX take lsm spin-lock to keep lov_merge_lvb_kms() - * happy. It's not needed, because new code uses - * ->coh_attr_guard spin-lock to protect consistency of - * sub-object attributes. - */ - lov_stripe_lock(lsm); - result = lov_merge_lvb_kms(lsm, lvb, &kms); - lov_stripe_unlock(lsm); - if (result == 0) { - cl_lvb2attr(lov_attr, lvb); - lov_attr->cat_kms = kms; - r0->lo_attr_valid = 1; - } - } - if (result == 0) { /* merge results */ - attr->cat_blocks = lov_attr->cat_blocks; - attr->cat_size = lov_attr->cat_size; - attr->cat_kms = lov_attr->cat_kms; - if (attr->cat_atime < lov_attr->cat_atime) - attr->cat_atime = lov_attr->cat_atime; - if (attr->cat_ctime < lov_attr->cat_ctime) - attr->cat_ctime = lov_attr->cat_ctime; - if (attr->cat_mtime < lov_attr->cat_mtime) - attr->cat_mtime = lov_attr->cat_mtime; - } - return result; -} - -static const struct lov_layout_operations lov_dispatch[] = { - [LLT_EMPTY] = { - .llo_init = lov_init_empty, - .llo_delete = lov_delete_empty, - .llo_fini = lov_fini_empty, - .llo_install = lov_install_empty, - .llo_print = lov_print_empty, - .llo_page_init = lov_page_init_empty, - .llo_lock_init = lov_lock_init_empty, - .llo_io_init = lov_io_init_empty, - .llo_getattr = lov_attr_get_empty - }, - [LLT_RAID0] = { - .llo_init = lov_init_raid0, - .llo_delete = lov_delete_raid0, - .llo_fini = lov_fini_raid0, - .llo_install = lov_install_raid0, - .llo_print = lov_print_raid0, - .llo_page_init = lov_page_init_raid0, - .llo_lock_init = lov_lock_init_raid0, - .llo_io_init = lov_io_init_raid0, - .llo_getattr = lov_attr_get_raid0 - }, - [LLT_RELEASED] = { - .llo_init = lov_init_released, - .llo_delete = lov_delete_empty, - .llo_fini = lov_fini_released, - .llo_install = lov_install_empty, - .llo_print = lov_print_released, - .llo_page_init = lov_page_init_empty, - .llo_lock_init = lov_lock_init_empty, - .llo_io_init = lov_io_init_released, - .llo_getattr = lov_attr_get_empty - } -}; - -/** - * Performs a double-dispatch based on the layout type of an object. - */ -#define LOV_2DISPATCH_NOLOCK(obj, op, ...) \ -({ \ - struct lov_object *__obj = (obj); \ - enum lov_layout_type __llt; \ - \ - __llt = __obj->lo_type; \ - LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \ - lov_dispatch[__llt].op(__VA_ARGS__); \ -}) - -/** - * Return lov_layout_type associated with a given lsm - */ -static enum lov_layout_type lov_type(struct lov_stripe_md *lsm) -{ - if (!lsm) - return LLT_EMPTY; - if (lsm_is_released(lsm)) - return LLT_RELEASED; - return LLT_RAID0; -} - -static inline void lov_conf_freeze(struct lov_object *lov) -{ - CDEBUG(D_INODE, "To take share lov(%p) owner %p/%p\n", - lov, lov->lo_owner, current); - if (lov->lo_owner != current) - down_read(&lov->lo_type_guard); -} - -static inline void lov_conf_thaw(struct lov_object *lov) -{ - CDEBUG(D_INODE, "To release share lov(%p) owner %p/%p\n", - lov, lov->lo_owner, current); - if (lov->lo_owner != current) - up_read(&lov->lo_type_guard); -} - -#define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \ -({ \ - struct lov_object *__obj = (obj); \ - int __lock = !!(lock); \ - typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \ - \ - if (__lock) \ - lov_conf_freeze(__obj); \ - __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \ - if (__lock) \ - lov_conf_thaw(__obj); \ - __result; \ -}) - -/** - * Performs a locked double-dispatch based on the layout type of an object. - */ -#define LOV_2DISPATCH(obj, op, ...) \ - LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__) - -#define LOV_2DISPATCH_VOID(obj, op, ...) \ -do { \ - struct lov_object *__obj = (obj); \ - enum lov_layout_type __llt; \ - \ - lov_conf_freeze(__obj); \ - __llt = __obj->lo_type; \ - LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \ - lov_dispatch[__llt].op(__VA_ARGS__); \ - lov_conf_thaw(__obj); \ -} while (0) - -static void lov_conf_lock(struct lov_object *lov) -{ - LASSERT(lov->lo_owner != current); - down_write(&lov->lo_type_guard); - LASSERT(!lov->lo_owner); - lov->lo_owner = current; - CDEBUG(D_INODE, "Took exclusive lov(%p) owner %p\n", - lov, lov->lo_owner); -} - -static void lov_conf_unlock(struct lov_object *lov) -{ - CDEBUG(D_INODE, "To release exclusive lov(%p) owner %p\n", - lov, lov->lo_owner); - lov->lo_owner = NULL; - up_write(&lov->lo_type_guard); -} - -static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov) -{ - while (atomic_read(&lov->lo_active_ios) > 0) { - CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n", - PFID(lu_object_fid(lov2lu(lov))), - atomic_read(&lov->lo_active_ios)); - - wait_event_idle(lov->lo_waitq, - atomic_read(&lov->lo_active_ios) == 0); - } - return 0; -} - -static int lov_layout_change(const struct lu_env *unused, - struct lov_object *lov, struct lov_stripe_md *lsm, - const struct cl_object_conf *conf) -{ - struct lov_device *lov_dev = lov_object_dev(lov); - enum lov_layout_type llt = lov_type(lsm); - union lov_layout_state *state = &lov->u; - const struct lov_layout_operations *old_ops; - const struct lov_layout_operations *new_ops; - struct lu_env *env; - u16 refcheck; - int rc; - - LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch)); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - LASSERT(llt < ARRAY_SIZE(lov_dispatch)); - - CDEBUG(D_INODE, DFID " from %s to %s\n", - PFID(lu_object_fid(lov2lu(lov))), - llt2str(lov->lo_type), llt2str(llt)); - - old_ops = &lov_dispatch[lov->lo_type]; - new_ops = &lov_dispatch[llt]; - - rc = cl_object_prune(env, &lov->lo_cl); - if (rc) - goto out; - - rc = old_ops->llo_delete(env, lov, &lov->u); - if (rc) - goto out; - - old_ops->llo_fini(env, lov, &lov->u); - - LASSERT(!atomic_read(&lov->lo_active_ios)); - - CDEBUG(D_INODE, DFID "Apply new layout lov %p, type %d\n", - PFID(lu_object_fid(lov2lu(lov))), lov, llt); - - lov->lo_type = LLT_EMPTY; - - /* page bufsize fixup */ - cl_object_header(&lov->lo_cl)->coh_page_bufsize -= - lov_page_slice_fixup(lov, NULL); - - rc = new_ops->llo_init(env, lov_dev, lov, lsm, conf, state); - if (rc) { - struct obd_device *obd = lov2obd(lov_dev->ld_lov); - - CERROR("%s: cannot apply new layout on " DFID " : rc = %d\n", - obd->obd_name, PFID(lu_object_fid(lov2lu(lov))), rc); - new_ops->llo_delete(env, lov, state); - new_ops->llo_fini(env, lov, state); - /* this file becomes an EMPTY file. */ - goto out; - } - - new_ops->llo_install(env, lov, state); - lov->lo_type = llt; -out: - cl_env_put(env, &refcheck); - return rc; -} - -/***************************************************************************** - * - * Lov object operations. - * - */ -int lov_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf) -{ - struct lov_object *lov = lu2lov(obj); - struct lov_device *dev = lov_object_dev(lov); - const struct cl_object_conf *cconf = lu2cl_conf(conf); - union lov_layout_state *set = &lov->u; - const struct lov_layout_operations *ops; - struct lov_stripe_md *lsm = NULL; - int rc; - - init_rwsem(&lov->lo_type_guard); - atomic_set(&lov->lo_active_ios, 0); - init_waitqueue_head(&lov->lo_waitq); - cl_object_page_init(lu2cl(obj), sizeof(struct lov_page)); - - lov->lo_type = LLT_EMPTY; - if (cconf->u.coc_layout.lb_buf) { - lsm = lov_unpackmd(dev->ld_lov, - cconf->u.coc_layout.lb_buf, - cconf->u.coc_layout.lb_len); - if (IS_ERR(lsm)) - return PTR_ERR(lsm); - } - - /* no locking is necessary, as object is being created */ - lov->lo_type = lov_type(lsm); - ops = &lov_dispatch[lov->lo_type]; - rc = ops->llo_init(env, dev, lov, lsm, cconf, set); - if (!rc) - ops->llo_install(env, lov, set); - - lov_lsm_put(lsm); - - return rc; -} - -static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf) -{ - struct lov_stripe_md *lsm = NULL; - struct lov_object *lov = cl2lov(obj); - int result = 0; - - if (conf->coc_opc == OBJECT_CONF_SET && - conf->u.coc_layout.lb_buf) { - lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov, - conf->u.coc_layout.lb_buf, - conf->u.coc_layout.lb_len); - if (IS_ERR(lsm)) - return PTR_ERR(lsm); - } - - lov_conf_lock(lov); - if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { - lov->lo_layout_invalid = true; - result = 0; - goto out; - } - - if (conf->coc_opc == OBJECT_CONF_WAIT) { - if (lov->lo_layout_invalid && - atomic_read(&lov->lo_active_ios) > 0) { - lov_conf_unlock(lov); - result = lov_layout_wait(env, lov); - lov_conf_lock(lov); - } - goto out; - } - - LASSERT(conf->coc_opc == OBJECT_CONF_SET); - - if ((!lsm && !lov->lo_lsm) || - ((lsm && lov->lo_lsm) && - (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && - (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) { - /* same version of layout */ - lov->lo_layout_invalid = false; - result = 0; - goto out; - } - - /* will change layout - check if there still exists active IO. */ - if (atomic_read(&lov->lo_active_ios) > 0) { - lov->lo_layout_invalid = true; - result = -EBUSY; - goto out; - } - - result = lov_layout_change(env, lov, lsm, conf); - lov->lo_layout_invalid = result != 0; - -out: - lov_conf_unlock(lov); - lov_lsm_put(lsm); - CDEBUG(D_INODE, DFID " lo_layout_invalid=%d\n", - PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid); - return result; -} - -static void lov_object_delete(const struct lu_env *env, struct lu_object *obj) -{ - struct lov_object *lov = lu2lov(obj); - - LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u); -} - -static void lov_object_free(const struct lu_env *env, struct lu_object *obj) -{ - struct lov_object *lov = lu2lov(obj); - - LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u); - lu_object_fini(obj); - kmem_cache_free(lov_object_kmem, lov); -} - -static int lov_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) -{ - return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o); -} - -int lov_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page, - index); -} - -/** - * Implements cl_object_operations::clo_io_init() method for lov - * layer. Dispatches to the appropriate layout io initialization method. - */ -int lov_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) -{ - CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl); - - CDEBUG(D_INODE, DFID "io %p type %d ignore/verify layout %d/%d\n", - PFID(lu_object_fid(&obj->co_lu)), io, io->ci_type, - io->ci_ignore_layout, io->ci_verify_layout); - - return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init, - !io->ci_ignore_layout, env, obj, io); -} - -/** - * An implementation of cl_object_operations::clo_attr_get() method for lov - * layer. For raid0 layout this collects and merges attributes of all - * sub-objects. - */ -static int lov_attr_get(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr) -{ - /* do not take lock, as this function is called under a - * spin-lock. Layout is protected from changing by ongoing IO. - */ - return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr); -} - -static int lov_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int valid) -{ - /* - * No dispatch is required here, as no layout implements this. - */ - return 0; -} - -int lov_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) -{ - /* No need to lock because we've taken one refcount of layout. */ - return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock, - io); -} - -/** - * We calculate on which OST the mapping will end. If the length of mapping - * is greater than (stripe_size * stripe_count) then the last_stripe will - * will be one just before start_stripe. Else we check if the mapping - * intersects each OST and find last_stripe. - * This function returns the last_stripe and also sets the stripe_count - * over which the mapping is spread - * - * \param lsm [in] striping information for the file - * \param fm_start [in] logical start of mapping - * \param fm_end [in] logical end of mapping - * \param start_stripe [in] starting stripe of the mapping - * \param stripe_count [out] the number of stripes across which to map is - * returned - * - * \retval last_stripe return the last stripe of the mapping - */ -static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, - u64 fm_start, u64 fm_end, - int start_stripe, int *stripe_count) -{ - int last_stripe; - u64 obd_start; - u64 obd_end; - int i, j; - - if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) { - last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 : - start_stripe - 1); - *stripe_count = lsm->lsm_stripe_count; - } else { - for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count; - i = (i + 1) % lsm->lsm_stripe_count, j++) { - if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end, - &obd_start, &obd_end))) - break; - } - *stripe_count = j; - last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count; - } - - return last_stripe; -} - -/** - * Set fe_device and copy extents from local buffer into main return buffer. - * - * \param fiemap [out] fiemap to hold all extents - * \param lcl_fm_ext [in] array of fiemap extents get from OSC layer - * \param ost_index [in] OST index to be written into the fm_device - * field for each extent - * \param ext_count [in] number of extents to be copied - * \param current_extent [in] where to start copying in the extent array - */ -static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap, - struct fiemap_extent *lcl_fm_ext, - int ost_index, unsigned int ext_count, - int current_extent) -{ - unsigned int ext; - char *to; - - for (ext = 0; ext < ext_count; ext++) { - lcl_fm_ext[ext].fe_device = ost_index; - lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET; - } - - /* Copy fm_extent's from fm_local to return buffer */ - to = (char *)fiemap + fiemap_count_to_size(current_extent); - memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent)); -} - -#define FIEMAP_BUFFER_SIZE 4096 - -/** - * Non-zero fe_logical indicates that this is a continuation FIEMAP - * call. The local end offset and the device are sent in the first - * fm_extent. This function calculates the stripe number from the index. - * This function returns a stripe_no on which mapping is to be restarted. - * - * This function returns fm_end_offset which is the in-OST offset at which - * mapping should be restarted. If fm_end_offset=0 is returned then caller - * will re-calculate proper offset in next stripe. - * Note that the first extent is passed to lov_get_info via the value field. - * - * \param fiemap [in] fiemap request header - * \param lsm [in] striping information for the file - * \param fm_start [in] logical start of mapping - * \param fm_end [in] logical end of mapping - * \param start_stripe [out] starting stripe will be returned in this - */ -static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap, - struct lov_stripe_md *lsm, - u64 fm_start, u64 fm_end, - int *start_stripe) -{ - u64 local_end = fiemap->fm_extents[0].fe_logical; - u64 lun_start, lun_end; - u64 fm_end_offset; - int stripe_no = -1; - int i; - - if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical) - return 0; - - /* Find out stripe_no from ost_index saved in the fe_device */ - for (i = 0; i < lsm->lsm_stripe_count; i++) { - struct lov_oinfo *oinfo = lsm->lsm_oinfo[i]; - - if (lov_oinfo_is_dummy(oinfo)) - continue; - - if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) { - stripe_no = i; - break; - } - } - - if (stripe_no == -1) - return -EINVAL; - - /* - * If we have finished mapping on previous device, shift logical - * offset to start of next device - */ - if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end, - &lun_start, &lun_end) && - local_end < lun_end) { - fm_end_offset = local_end; - *start_stripe = stripe_no; - } else { - /* This is a special value to indicate that caller should - * calculate offset in next stripe. - */ - fm_end_offset = 0; - *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count; - } - - return fm_end_offset; -} - -struct fiemap_state { - struct fiemap *fs_fm; - u64 fs_start; - u64 fs_length; - u64 fs_end; - u64 fs_end_offset; - int fs_cur_extent; - int fs_cnt_need; - int fs_start_stripe; - int fs_last_stripe; - bool fs_device_done; - bool fs_finish; - bool fs_enough; -}; - -static int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj, - struct lov_stripe_md *lsm, - struct fiemap *fiemap, size_t *buflen, - struct ll_fiemap_info_key *fmkey, int stripeno, - struct fiemap_state *fs) -{ - struct cl_object *subobj; - struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov; - struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0]; - u64 req_fm_len; /* Stores length of required mapping */ - u64 len_mapped_single_call; - u64 lun_start; - u64 lun_end; - u64 obd_object_end; - unsigned int ext_count; - /* EOF for object */ - bool ost_eof = false; - /* done with required mapping for this OST? */ - bool ost_done = false; - int ost_index; - int rc = 0; - - fs->fs_device_done = false; - /* Find out range of mapping on this stripe */ - if ((lov_stripe_intersects(lsm, stripeno, fs->fs_start, fs->fs_end, - &lun_start, &obd_object_end)) == 0) - return 0; - - if (lov_oinfo_is_dummy(lsm->lsm_oinfo[stripeno])) - return -EIO; - - /* If this is a continuation FIEMAP call and we are on - * starting stripe then lun_start needs to be set to - * end_offset - */ - if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe) - lun_start = fs->fs_end_offset; - - lun_end = fs->fs_length; - if (lun_end != ~0ULL) { - /* Handle fs->fs_start + fs->fs_length overflow */ - if (fs->fs_start + fs->fs_length < fs->fs_start) - fs->fs_length = ~0ULL - fs->fs_start; - lun_end = lov_size_to_stripe(lsm, fs->fs_start + fs->fs_length, - stripeno); - } - - if (lun_start == lun_end) - return 0; - - req_fm_len = obd_object_end - lun_start; - fs->fs_fm->fm_length = 0; - len_mapped_single_call = 0; - - /* find lobsub object */ - subobj = lov_find_subobj(env, cl2lov(obj), lsm, stripeno); - if (IS_ERR(subobj)) - return PTR_ERR(subobj); - /* If the output buffer is very large and the objects have many - * extents we may need to loop on a single OST repeatedly - */ - do { - if (fiemap->fm_extent_count > 0) { - /* Don't get too many extents. */ - if (fs->fs_cur_extent + fs->fs_cnt_need > - fiemap->fm_extent_count) - fs->fs_cnt_need = fiemap->fm_extent_count - - fs->fs_cur_extent; - } - - lun_start += len_mapped_single_call; - fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call; - req_fm_len = fs->fs_fm->fm_length; - fs->fs_fm->fm_extent_count = fs->fs_enough ? - 1 : fs->fs_cnt_need; - fs->fs_fm->fm_mapped_extents = 0; - fs->fs_fm->fm_flags = fiemap->fm_flags; - - ost_index = lsm->lsm_oinfo[stripeno]->loi_ost_idx; - - if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count) { - rc = -EINVAL; - goto obj_put; - } - /* If OST is inactive, return extent with UNKNOWN flag. */ - if (!lov->lov_tgts[ost_index]->ltd_active) { - fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST; - fs->fs_fm->fm_mapped_extents = 1; - - fm_ext[0].fe_logical = lun_start; - fm_ext[0].fe_length = obd_object_end - lun_start; - fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN; - - goto inactive_tgt; - } - - fs->fs_fm->fm_start = lun_start; - fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER; - memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm)); - *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count); - - rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen); - if (rc) - goto obj_put; -inactive_tgt: - ext_count = fs->fs_fm->fm_mapped_extents; - if (ext_count == 0) { - ost_done = true; - fs->fs_device_done = true; - /* If last stripe has hold at the end, - * we need to return - */ - if (stripeno == fs->fs_last_stripe) { - fiemap->fm_mapped_extents = 0; - fs->fs_finish = true; - goto obj_put; - } - break; - } else if (fs->fs_enough) { - /* - * We've collected enough extents and there are - * more extents after it. - */ - fs->fs_finish = true; - goto obj_put; - } - - /* If we just need num of extents, got to next device */ - if (fiemap->fm_extent_count == 0) { - fs->fs_cur_extent += ext_count; - break; - } - - /* prepare to copy retrived map extents */ - len_mapped_single_call = fm_ext[ext_count - 1].fe_logical + - fm_ext[ext_count - 1].fe_length - - lun_start; - - /* Have we finished mapping on this device? */ - if (req_fm_len <= len_mapped_single_call) { - ost_done = true; - fs->fs_device_done = true; - } - - /* Clear the EXTENT_LAST flag which can be present on - * the last extent - */ - if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST) - fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST; - if (lov_stripe_size(lsm, fm_ext[ext_count - 1].fe_logical + - fm_ext[ext_count - 1].fe_length, - stripeno) >= fmkey->lfik_oa.o_size) { - ost_eof = true; - fs->fs_device_done = true; - } - - fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index, - ext_count, fs->fs_cur_extent); - fs->fs_cur_extent += ext_count; - - /* Ran out of available extents? */ - if (fs->fs_cur_extent >= fiemap->fm_extent_count) - fs->fs_enough = true; - } while (!ost_done && !ost_eof); - - if (stripeno == fs->fs_last_stripe) - fs->fs_finish = true; -obj_put: - cl_object_put(env, subobj); - - return rc; -} - -/** - * Break down the FIEMAP request and send appropriate calls to individual OSTs. - * This also handles the restarting of FIEMAP calls in case mapping overflows - * the available number of extents in single call. - * - * \param env [in] lustre environment - * \param obj [in] file object - * \param fmkey [in] fiemap request header and other info - * \param fiemap [out] fiemap buffer holding retrived map extents - * \param buflen [in/out] max buffer length of @fiemap, when iterate - * each OST, it is used to limit max map needed - * \retval 0 success - * \retval < 0 error - */ -static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj, - struct ll_fiemap_info_key *fmkey, - struct fiemap *fiemap, size_t *buflen) -{ - unsigned int buffer_size = FIEMAP_BUFFER_SIZE; - struct fiemap *fm_local = NULL; - struct lov_stripe_md *lsm; - int rc = 0; - int cur_stripe; - int stripe_count; - struct fiemap_state fs = { NULL }; - - lsm = lov_lsm_addref(cl2lov(obj)); - if (!lsm) - return -ENODATA; - - /** - * If the stripe_count > 1 and the application does not understand - * DEVICE_ORDER flag, it cannot interpret the extents correctly. - */ - if (lsm->lsm_stripe_count > 1 && - !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) { - rc = -ENOTSUPP; - goto out; - } - - if (lsm_is_released(lsm)) { - if (fiemap->fm_start < fmkey->lfik_oa.o_size) { - /** - * released file, return a minimal FIEMAP if - * request fits in file-size. - */ - fiemap->fm_mapped_extents = 1; - fiemap->fm_extents[0].fe_logical = fiemap->fm_start; - if (fiemap->fm_start + fiemap->fm_length < - fmkey->lfik_oa.o_size) - fiemap->fm_extents[0].fe_length = - fiemap->fm_length; - else - fiemap->fm_extents[0].fe_length = - fmkey->lfik_oa.o_size - - fiemap->fm_start; - fiemap->fm_extents[0].fe_flags |= - FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST; - } - rc = 0; - goto out; - } - - if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size) - buffer_size = fiemap_count_to_size(fiemap->fm_extent_count); - - fm_local = kvzalloc(buffer_size, GFP_NOFS); - if (!fm_local) { - rc = -ENOMEM; - goto out; - } - fs.fs_fm = fm_local; - fs.fs_cnt_need = fiemap_size_to_count(buffer_size); - - fs.fs_start = fiemap->fm_start; - /* fs_start is beyond the end of the file */ - if (fs.fs_start > fmkey->lfik_oa.o_size) { - rc = -EINVAL; - goto out; - } - /* Calculate start stripe, last stripe and length of mapping */ - fs.fs_start_stripe = lov_stripe_number(lsm, fs.fs_start); - fs.fs_end = (fs.fs_length == ~0ULL) ? fmkey->lfik_oa.o_size : - fs.fs_start + fs.fs_length - 1; - /* If fs_length != ~0ULL but fs_start+fs_length-1 exceeds file size */ - if (fs.fs_end > fmkey->lfik_oa.o_size) { - fs.fs_end = fmkey->lfik_oa.o_size; - fs.fs_length = fs.fs_end - fs.fs_start; - } - - fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, fs.fs_start, fs.fs_end, - fs.fs_start_stripe, - &stripe_count); - fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fs.fs_start, - fs.fs_end, - &fs.fs_start_stripe); - if (fs.fs_end_offset == -EINVAL) { - rc = -EINVAL; - goto out; - } - - - /** - * Requested extent count exceeds the fiemap buffer size, shrink our - * ambition. - */ - if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen) - fiemap->fm_extent_count = fiemap_size_to_count(*buflen); - if (!fiemap->fm_extent_count) - fs.fs_cnt_need = 0; - - fs.fs_finish = false; - fs.fs_enough = false; - fs.fs_cur_extent = 0; - - /* Check each stripe */ - for (cur_stripe = fs.fs_start_stripe; stripe_count > 0; - --stripe_count, - cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) { - rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen, fmkey, - cur_stripe, &fs); - if (rc < 0) - goto out; - if (fs.fs_finish) - break; - } /* for each stripe */ - /* - * Indicate that we are returning device offsets unless file just has - * single stripe - */ - if (lsm->lsm_stripe_count > 1) - fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER; - - if (!fiemap->fm_extent_count) - goto skip_last_device_calc; - - /* - * Check if we have reached the last stripe and whether mapping for that - * stripe is done. - */ - if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done) - fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |= - FIEMAP_EXTENT_LAST; -skip_last_device_calc: - fiemap->fm_mapped_extents = fs.fs_cur_extent; -out: - kvfree(fm_local); - lov_lsm_put(lsm); - return rc; -} - -static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *lum) -{ - struct lov_object *lov = cl2lov(obj); - struct lov_stripe_md *lsm; - int rc = 0; - - lsm = lov_lsm_addref(lov); - if (!lsm) - return -ENODATA; - - rc = lov_getstripe(cl2lov(obj), lsm, lum); - lov_lsm_put(lsm); - return rc; -} - -static int lov_object_layout_get(const struct lu_env *env, - struct cl_object *obj, - struct cl_layout *cl) -{ - struct lov_object *lov = cl2lov(obj); - struct lov_stripe_md *lsm = lov_lsm_addref(lov); - struct lu_buf *buf = &cl->cl_buf; - ssize_t rc; - - if (!lsm) { - cl->cl_size = 0; - cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY; - return 0; - } - - cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic); - cl->cl_layout_gen = lsm->lsm_layout_gen; - - rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len); - lov_lsm_put(lsm); - - return rc < 0 ? rc : 0; -} - -static loff_t lov_object_maxbytes(struct cl_object *obj) -{ - struct lov_object *lov = cl2lov(obj); - struct lov_stripe_md *lsm = lov_lsm_addref(lov); - loff_t maxbytes; - - if (!lsm) - return LLONG_MAX; - - maxbytes = lsm->lsm_maxbytes; - - lov_lsm_put(lsm); - - return maxbytes; -} - -static const struct cl_object_operations lov_ops = { - .coo_page_init = lov_page_init, - .coo_lock_init = lov_lock_init, - .coo_io_init = lov_io_init, - .coo_attr_get = lov_attr_get, - .coo_attr_update = lov_attr_update, - .coo_conf_set = lov_conf_set, - .coo_getstripe = lov_object_getstripe, - .coo_layout_get = lov_object_layout_get, - .coo_maxbytes = lov_object_maxbytes, - .coo_fiemap = lov_object_fiemap, -}; - -static const struct lu_object_operations lov_lu_obj_ops = { - .loo_object_init = lov_object_init, - .loo_object_delete = lov_object_delete, - .loo_object_release = NULL, - .loo_object_free = lov_object_free, - .loo_object_print = lov_object_print, - .loo_object_invariant = NULL -}; - -struct lu_object *lov_object_alloc(const struct lu_env *env, - const struct lu_object_header *unused, - struct lu_device *dev) -{ - struct lov_object *lov; - struct lu_object *obj; - - lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS); - if (lov) { - obj = lov2lu(lov); - lu_object_init(obj, NULL, dev); - lov->lo_cl.co_ops = &lov_ops; - lov->lo_type = -1; /* invalid, to catch uninitialized type */ - /* - * object io operation vector (cl_object::co_iop) is installed - * later in lov_object_init(), as different vectors are used - * for object with different layouts. - */ - obj->lo_ops = &lov_lu_obj_ops; - } else { - obj = NULL; - } - return obj; -} - -struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov) -{ - struct lov_stripe_md *lsm = NULL; - - lov_conf_freeze(lov); - if (lov->lo_lsm) { - lsm = lsm_addref(lov->lo_lsm); - CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n", - lsm, atomic_read(&lsm->lsm_refc), - lov->lo_layout_invalid, current); - } - lov_conf_thaw(lov); - return lsm; -} - -int lov_read_and_clear_async_rc(struct cl_object *clob) -{ - struct lu_object *luobj; - int rc = 0; - - luobj = lu_object_locate(&cl_object_header(clob)->coh_lu, - &lov_device_type); - if (luobj) { - struct lov_object *lov = lu2lov(luobj); - - lov_conf_freeze(lov); - switch (lov->lo_type) { - case LLT_RAID0: { - struct lov_stripe_md *lsm; - int i; - - lsm = lov->lo_lsm; - for (i = 0; i < lsm->lsm_stripe_count; i++) { - struct lov_oinfo *loi = lsm->lsm_oinfo[i]; - - if (lov_oinfo_is_dummy(loi)) - continue; - - if (loi->loi_ar.ar_rc && !rc) - rc = loi->loi_ar.ar_rc; - loi->loi_ar.ar_rc = 0; - } - } - case LLT_RELEASED: - case LLT_EMPTY: - break; - default: - LBUG(); - } - lov_conf_thaw(lov); - } - return rc; -} -EXPORT_SYMBOL(lov_read_and_clear_async_rc); - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c deleted file mode 100644 index a5f00f6ec347..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_offset.c +++ /dev/null @@ -1,269 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include - -#include "lov_internal.h" - -/* compute object size given "stripeno" and the ost size */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno) -{ - unsigned long ssize = lsm->lsm_stripe_size; - unsigned long stripe_size; - u64 swidth; - u64 lov_size; - int magic = lsm->lsm_magic; - - if (ost_size == 0) - return 0; - - lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth); - - /* lov_do_div64(a, b) returns a % b, and a = a / b */ - stripe_size = lov_do_div64(ost_size, ssize); - if (stripe_size) - lov_size = ost_size * swidth + stripeno * ssize + stripe_size; - else - lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize; - - return lov_size; -} - -/** - * Compute file level page index by stripe level page offset - */ -pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index, - int stripe) -{ - loff_t offset; - - offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1, stripe); - return offset >> PAGE_SHIFT; -} - -/* we have an offset in file backed by an lov and want to find out where - * that offset lands in our given stripe of the file. for the easy - * case where the offset is within the stripe, we just have to scale the - * offset down to make it relative to the stripe instead of the lov. - * - * the harder case is what to do when the offset doesn't intersect the - * stripe. callers will want start offsets clamped ahead to the start - * of the nearest stripe in the file. end offsets similarly clamped to the - * nearest ending byte of a stripe in the file: - * - * all this function does is move offsets to the nearest region of the - * stripe, and it does its work "mod" the full length of all the stripes. - * consider a file with 3 stripes: - * - * S E - * --------------------------------------------------------------------- - * | 0 | 1 | 2 | 0 | 1 | 2 | - * --------------------------------------------------------------------- - * - * to find stripe 1's offsets for S and E, it divides by the full stripe - * width and does its math in the context of a single set of stripes: - * - * S E - * ----------------------------------- - * | 0 | 1 | 2 | - * ----------------------------------- - * - * it'll notice that E is outside stripe 1 and clamp it to the end of the - * stripe, then multiply it back out by lov_off to give the real offsets in - * the stripe: - * - * S E - * --------------------------------------------------------------------- - * | 1 | 1 | 1 | 1 | 1 | 1 | - * --------------------------------------------------------------------- - * - * it would have done similarly and pulled S forward to the start of a 1 - * stripe if, say, S had landed in a 0 stripe. - * - * this rounding isn't always correct. consider an E lov offset that lands - * on a 0 stripe, the "mod stripe width" math will pull it forward to the - * start of a 1 stripe, when in fact it wanted to be rounded back to the end - * of a previous 1 stripe. this logic is handled by callers and this is why: - * - * this function returns < 0 when the offset was "before" the stripe and - * was moved forward to the start of the stripe in question; 0 when it - * falls in the stripe and no shifting was done; > 0 when the offset - * was outside the stripe and was pulled back to its final byte. - */ -int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, - int stripeno, u64 *obdoff) -{ - unsigned long ssize = lsm->lsm_stripe_size; - u64 stripe_off, this_stripe, swidth; - int magic = lsm->lsm_magic; - int ret = 0; - - if (lov_off == OBD_OBJECT_EOF) { - *obdoff = OBD_OBJECT_EOF; - return 0; - } - - lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off, - &swidth); - - /* lov_do_div64(a, b) returns a % b, and a = a / b */ - stripe_off = lov_do_div64(lov_off, swidth); - - this_stripe = (u64)stripeno * ssize; - if (stripe_off < this_stripe) { - stripe_off = 0; - ret = -1; - } else { - stripe_off -= this_stripe; - - if (stripe_off >= ssize) { - stripe_off = ssize; - ret = 1; - } - } - - *obdoff = lov_off * ssize + stripe_off; - return ret; -} - -/* Given a whole-file size and a stripe number, give the file size which - * corresponds to the individual object of that stripe. - * - * This behaves basically in the same was as lov_stripe_offset, except that - * file sizes falling before the beginning of a stripe are clamped to the end - * of the previous stripe, not the beginning of the next: - * - * S - * --------------------------------------------------------------------- - * | 0 | 1 | 2 | 0 | 1 | 2 | - * --------------------------------------------------------------------- - * - * if clamped to stripe 2 becomes: - * - * S - * --------------------------------------------------------------------- - * | 0 | 1 | 2 | 0 | 1 | 2 | - * --------------------------------------------------------------------- - */ -u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, - int stripeno) -{ - unsigned long ssize = lsm->lsm_stripe_size; - u64 stripe_off, this_stripe, swidth; - int magic = lsm->lsm_magic; - - if (file_size == OBD_OBJECT_EOF) - return OBD_OBJECT_EOF; - - lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size, - &swidth); - - /* lov_do_div64(a, b) returns a % b, and a = a / b */ - stripe_off = lov_do_div64(file_size, swidth); - - this_stripe = (u64)stripeno * ssize; - if (stripe_off < this_stripe) { - /* Move to end of previous stripe, or zero */ - if (file_size > 0) { - file_size--; - stripe_off = ssize; - } else { - stripe_off = 0; - } - } else { - stripe_off -= this_stripe; - - if (stripe_off >= ssize) { - /* Clamp to end of this stripe */ - stripe_off = ssize; - } - } - - return (file_size * ssize + stripe_off); -} - -/* given an extent in an lov and a stripe, calculate the extent of the stripe - * that is contained within the lov extent. this returns true if the given - * stripe does intersect with the lov extent. - */ -int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, - u64 start, u64 end, u64 *obd_start, u64 *obd_end) -{ - int start_side, end_side; - - start_side = lov_stripe_offset(lsm, start, stripeno, obd_start); - end_side = lov_stripe_offset(lsm, end, stripeno, obd_end); - - CDEBUG(D_INODE, "[%llu->%llu] -> [(%d) %llu->%llu (%d)]\n", - start, end, start_side, *obd_start, *obd_end, end_side); - - /* this stripe doesn't intersect the file extent when neither - * start or the end intersected the stripe and obd_start and - * obd_end got rounded up to the save value. - */ - if (start_side != 0 && end_side != 0 && *obd_start == *obd_end) - return 0; - - /* as mentioned in the lov_stripe_offset commentary, end - * might have been shifted in the wrong direction. This - * happens when an end offset is before the stripe when viewed - * through the "mod stripe size" math. we detect it being shifted - * in the wrong direction and touch it up. - * interestingly, this can't underflow since end must be > start - * if we passed through the previous check. - * (should we assert for that somewhere?) - */ - if (end_side != 0) - (*obd_end)--; - - return 1; -} - -/* compute which stripe number "lov_off" will be written into */ -int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off) -{ - unsigned long ssize = lsm->lsm_stripe_size; - u64 stripe_off, swidth; - int magic = lsm->lsm_magic; - - lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth); - - stripe_off = lov_do_div64(lov_off, swidth); - - /* Puts stripe_off/ssize result into stripe_off */ - lov_do_div64(stripe_off, ssize); - - return stripe_off; -} diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c deleted file mode 100644 index b1060d02a164..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_pack.c +++ /dev/null @@ -1,400 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/lov/lov_pack.c - * - * (Un)packing of OST/MDS requests - * - * Author: Andreas Dilger - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include -#include -#include -#include -#include - -#include "lov_cl_internal.h" -#include "lov_internal.h" - -void lov_dump_lmm_common(int level, void *lmmp) -{ - struct lov_mds_md *lmm = lmmp; - struct ost_id oi; - - lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi); - CDEBUG(level, "objid " DOSTID ", magic 0x%08x, pattern %#x\n", - POSTID(&oi), le32_to_cpu(lmm->lmm_magic), - le32_to_cpu(lmm->lmm_pattern)); - CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n", - le32_to_cpu(lmm->lmm_stripe_size), - le16_to_cpu(lmm->lmm_stripe_count), - le16_to_cpu(lmm->lmm_layout_gen)); -} - -static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod, - int stripe_count) -{ - int i; - - if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) { - CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n", - stripe_count, LOV_V1_INSANE_STRIPE_COUNT); - return; - } - - for (i = 0; i < stripe_count; ++i, ++lod) { - struct ost_id oi; - - ostid_le_to_cpu(&lod->l_ost_oi, &oi); - CDEBUG(level, "stripe %u idx %u subobj " DOSTID "\n", i, - le32_to_cpu(lod->l_ost_idx), POSTID(&oi)); - } -} - -void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm) -{ - lov_dump_lmm_common(level, lmm); - lov_dump_lmm_objects(level, lmm->lmm_objects, - le16_to_cpu(lmm->lmm_stripe_count)); -} - -void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm) -{ - lov_dump_lmm_common(level, lmm); - CDEBUG(level, "pool_name " LOV_POOLNAMEF "\n", lmm->lmm_pool_name); - lov_dump_lmm_objects(level, lmm->lmm_objects, - le16_to_cpu(lmm->lmm_stripe_count)); -} - -/** - * Pack LOV striping metadata for disk storage format (in little - * endian byte order). - * - * This follows the getxattr() conventions. If \a buf_size is zero - * then return the size needed. If \a buf_size is too small then - * return -ERANGE. Otherwise return the size of the result. - */ -ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf, - size_t buf_size) -{ - struct lov_ost_data_v1 *lmm_objects; - struct lov_mds_md_v1 *lmmv1 = buf; - struct lov_mds_md_v3 *lmmv3 = buf; - size_t lmm_size; - unsigned int i; - - lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic); - if (!buf_size) - return lmm_size; - - if (buf_size < lmm_size) - return -ERANGE; - - /* - * lmmv1 and lmmv3 point to the same struct and have the - * same first fields - */ - lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic); - lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi); - lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size); - lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count); - lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern); - lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen); - - if (lsm->lsm_magic == LOV_MAGIC_V3) { - BUILD_BUG_ON(sizeof(lsm->lsm_pool_name) != - sizeof(lmmv3->lmm_pool_name)); - strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, - sizeof(lmmv3->lmm_pool_name)); - lmm_objects = lmmv3->lmm_objects; - } else { - lmm_objects = lmmv1->lmm_objects; - } - - for (i = 0; i < lsm->lsm_stripe_count; i++) { - struct lov_oinfo *loi = lsm->lsm_oinfo[i]; - - ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi); - lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen); - lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx); - } - - return lmm_size; -} - -/* Find the max stripecount we should use */ -__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count) -{ - __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD; - - if (!stripe_count) - stripe_count = lov->desc.ld_default_stripe_count; - if (stripe_count > lov->desc.ld_active_tgt_count) - stripe_count = lov->desc.ld_active_tgt_count; - if (!stripe_count) - stripe_count = 1; - - /* stripe count is based on whether ldiskfs can handle - * larger EA sizes - */ - if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE && - lov->lov_ocd.ocd_max_easize) - max_stripes = lov_mds_md_max_stripe_count( - lov->lov_ocd.ocd_max_easize, magic); - - if (stripe_count > max_stripes) - stripe_count = max_stripes; - - return stripe_count; -} - -static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count) -{ - int rc; - - if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) { - CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n", - le32_to_cpu(*(__u32 *)lmm), lmm_bytes); - CERROR("%*phN\n", lmm_bytes, lmm); - return -EINVAL; - } - rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm, - lmm_bytes, - stripe_count); - return rc; -} - -static struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, - u32 magic) -{ - struct lov_stripe_md *lsm; - unsigned int i; - - CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count); - - lsm = lsm_alloc_plain(stripe_count); - if (!lsm) { - CERROR("cannot allocate LSM stripe_count %u\n", stripe_count); - return ERR_PTR(-ENOMEM); - } - - atomic_set(&lsm->lsm_refc, 1); - spin_lock_init(&lsm->lsm_lock); - lsm->lsm_magic = magic; - lsm->lsm_stripe_count = stripe_count; - lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count; - lsm->lsm_pattern = pattern; - lsm->lsm_pool_name[0] = '\0'; - lsm->lsm_layout_gen = 0; - if (stripe_count > 0) - lsm->lsm_oinfo[0]->loi_ost_idx = ~0; - - for (i = 0; i < stripe_count; i++) - loi_init(lsm->lsm_oinfo[i]); - - return lsm; -} - -int lov_free_memmd(struct lov_stripe_md **lsmp) -{ - struct lov_stripe_md *lsm = *lsmp; - int refc; - - *lsmp = NULL; - LASSERT(atomic_read(&lsm->lsm_refc) > 0); - refc = atomic_dec_return(&lsm->lsm_refc); - if (refc == 0) - lsm_op_find(lsm->lsm_magic)->lsm_free(lsm); - - return refc; -} - -/* Unpack LOV object metadata from disk storage. It is packed in LE byte - * order and is opaque to the networking layer. - */ -struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm, - size_t lmm_size) -{ - struct lov_stripe_md *lsm; - u16 stripe_count; - u32 pattern; - u32 magic; - int rc; - - rc = lov_verify_lmm(lmm, lmm_size, &stripe_count); - if (rc) - return ERR_PTR(rc); - - magic = le32_to_cpu(lmm->lmm_magic); - pattern = le32_to_cpu(lmm->lmm_pattern); - - lsm = lov_lsm_alloc(stripe_count, pattern, magic); - if (IS_ERR(lsm)) - return lsm; - - LASSERT(lsm_op_find(magic)); - rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm); - if (rc) { - lov_free_memmd(&lsm); - return ERR_PTR(rc); - } - - return lsm; -} - -/* Retrieve object striping information. - * - * @lump is a pointer to an in-core struct with lmm_ost_count indicating - * the maximum number of OST indices which will fit in the user buffer. - * lmm_magic must be LOV_USER_MAGIC. - */ -int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, - struct lov_user_md __user *lump) -{ - /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ - struct lov_user_md_v3 lum; - struct lov_mds_md *lmmk; - u32 stripe_count; - ssize_t lmm_size; - size_t lmmk_size; - size_t lum_size; - int rc; - - if (!lsm) - return -ENODATA; - - if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { - CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", - lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); - rc = -EIO; - goto out; - } - - if (!lsm_is_released(lsm)) - stripe_count = lsm->lsm_stripe_count; - else - stripe_count = 0; - - /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) - */ - lum_size = sizeof(struct lov_user_md_v1); - if (copy_from_user(&lum, lump, lum_size)) { - rc = -EFAULT; - goto out; - } - if (lum.lmm_magic != LOV_USER_MAGIC_V1 && - lum.lmm_magic != LOV_USER_MAGIC_V3 && - lum.lmm_magic != LOV_USER_MAGIC_SPECIFIC) { - rc = -EINVAL; - goto out; - } - - if (lum.lmm_stripe_count && - (lum.lmm_stripe_count < lsm->lsm_stripe_count)) { - /* Return right size of stripe to user */ - lum.lmm_stripe_count = stripe_count; - rc = copy_to_user(lump, &lum, lum_size); - rc = -EOVERFLOW; - goto out; - } - lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic); - - - lmmk = kvzalloc(lmmk_size, GFP_NOFS); - if (!lmmk) { - rc = -ENOMEM; - goto out; - } - - lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size); - if (lmm_size < 0) { - rc = lmm_size; - goto out_free; - } - - /* FIXME: Bug 1185 - copy fields properly when structs change */ - /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */ - BUILD_BUG_ON(sizeof(lum) != sizeof(struct lov_mds_md_v3)); - BUILD_BUG_ON(sizeof(lum.lmm_objects[0]) != sizeof(lmmk->lmm_objects[0])); - - if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC && - (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) || - lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) { - lustre_swab_lov_mds_md(lmmk); - lustre_swab_lov_user_md_objects( - (struct lov_user_ost_data *)lmmk->lmm_objects, - lmmk->lmm_stripe_count); - } - - if (lum.lmm_magic == LOV_USER_MAGIC) { - /* User request for v1, we need skip lmm_pool_name */ - if (lmmk->lmm_magic == LOV_MAGIC_V3) { - memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects, - ((struct lov_mds_md_v3 *)lmmk)->lmm_objects, - lmmk->lmm_stripe_count * - sizeof(struct lov_ost_data_v1)); - lmm_size -= LOV_MAXPOOLNAME; - } - } else { - /* if v3 we just have to update the lum_size */ - lum_size = sizeof(struct lov_user_md_v3); - } - - /* User wasn't expecting this many OST entries */ - if (lum.lmm_stripe_count == 0) { - lmm_size = lum_size; - } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) { - rc = -EOVERFLOW; - goto out_free; - } - /* - * Have a difference between lov_mds_md & lov_user_md. - * So we have to re-order the data before copy to user. - */ - lum.lmm_stripe_count = lmmk->lmm_stripe_count; - lum.lmm_layout_gen = lmmk->lmm_layout_gen; - ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen; - ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count; - if (copy_to_user(lump, lmmk, lmm_size)) - rc = -EFAULT; - else - rc = 0; - -out_free: - kvfree(lmmk); -out: - return rc; -} diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c deleted file mode 100644 index cfae1294d77a..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_page.c +++ /dev/null @@ -1,136 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_page for LOV layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lov page operations. - * - */ - -static int lov_raid0_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) -{ - struct lov_page *lp = cl2lov_page(slice); - - return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, raid0\n", lp); -} - -static const struct cl_page_operations lov_raid0_page_ops = { - .cpo_print = lov_raid0_page_print -}; - -int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - struct lov_object *loo = cl2lov(obj); - struct lov_layout_raid0 *r0 = lov_r0(loo); - struct lov_io *lio = lov_env_io(env); - struct cl_object *subobj; - struct cl_object *o; - struct lov_io_sub *sub; - struct lov_page *lpg = cl_object_page_slice(obj, page); - loff_t offset; - u64 suboff; - int stripe; - int rc; - - offset = cl_offset(obj, index); - stripe = lov_stripe_number(loo->lo_lsm, offset); - LASSERT(stripe < r0->lo_nr); - rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff); - LASSERT(rc == 0); - - lpg->lps_stripe = stripe; - cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops); - - sub = lov_sub_get(env, lio, stripe); - if (IS_ERR(sub)) - return PTR_ERR(sub); - - subobj = lovsub2cl(r0->lo_sub[stripe]); - list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers, - co_lu.lo_linkage) { - if (o->co_ops->coo_page_init) { - rc = o->co_ops->coo_page_init(sub->sub_env, o, page, - cl_index(subobj, suboff)); - if (rc != 0) - break; - } - } - - return rc; -} - -static int lov_empty_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) -{ - struct lov_page *lp = cl2lov_page(slice); - - return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, empty.\n", - lp); -} - -static const struct cl_page_operations lov_empty_page_ops = { - .cpo_print = lov_empty_page_print -}; - -int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - struct lov_page *lpg = cl_object_page_slice(obj, page); - void *addr; - - cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops); - addr = kmap(page->cp_vmpage); - memset(addr, 0, cl_page_size(obj)); - kunmap(page->cp_vmpage); - cl_page_export(env, page, 1); - return 0; -} - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c deleted file mode 100644 index b2a88ba72eb2..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_pool.c +++ /dev/null @@ -1,546 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/lov/lov_pool.c - * - * OST pool methods - * - * Author: Jacques-Charles LAFOUCRIERE - * Author: Alex Lyashkov - * Author: Nathaniel Rutman - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include -#include "lov_internal.h" - -#define pool_tgt(_p, _i) \ - _p->pool_lobd->u.lov.lov_tgts[_p->pool_obds.op_array[_i]] - -static u32 pool_hashfh(const void *data, u32 len, u32 seed) -{ - const char *pool_name = data; - return hashlen_hash(hashlen_string((void*)(unsigned long)seed, pool_name)); -} - -static int pool_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) -{ - const struct pool_desc *pool = obj; - const char *pool_name = arg->key; - return strcmp(pool_name, pool->pool_name); -} - -static const struct rhashtable_params pools_hash_params = { - .key_len = 1, /* actually variable */ - .key_offset = offsetof(struct pool_desc, pool_name), - .head_offset = offsetof(struct pool_desc, pool_hash), - .hashfn = pool_hashfh, - .obj_cmpfn = pool_cmpfn, - .automatic_shrinking = true, -}; - -static void lov_pool_getref(struct pool_desc *pool) -{ - CDEBUG(D_INFO, "pool %p\n", pool); - atomic_inc(&pool->pool_refcount); -} - -void lov_pool_putref(struct pool_desc *pool) -{ - CDEBUG(D_INFO, "pool %p\n", pool); - if (atomic_dec_and_test(&pool->pool_refcount)) { - LASSERT(list_empty(&pool->pool_list)); - lov_ost_pool_free(&pool->pool_obds); - kfree_rcu(pool, rcu); - } -} - -/* - * pool debugfs seq_file methods - */ -/* - * iterator is used to go through the target pool entries - * index is the current entry index in the lp_array[] array - * index >= pos returned to the seq_file interface - * pos is from 0 to (pool->pool_obds.op_count - 1) - */ -#define POOL_IT_MAGIC 0xB001CEA0 -struct pool_iterator { - int magic; - struct pool_desc *pool; - int idx; /* from 0 to pool_tgt_size - 1 */ -}; - -static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos) -{ - struct pool_iterator *iter = (struct pool_iterator *)s->private; - int prev_idx; - - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); - - /* test if end of file */ - if (*pos >= pool_tgt_count(iter->pool)) - return NULL; - - /* iterate to find a non empty entry */ - prev_idx = iter->idx; - down_read(&pool_tgt_rw_sem(iter->pool)); - iter->idx++; - if (iter->idx == pool_tgt_count(iter->pool)) { - iter->idx = prev_idx; /* we stay on the last entry */ - up_read(&pool_tgt_rw_sem(iter->pool)); - return NULL; - } - up_read(&pool_tgt_rw_sem(iter->pool)); - (*pos)++; - /* return != NULL to continue */ - return iter; -} - -static void *pool_proc_start(struct seq_file *s, loff_t *pos) -{ - struct pool_desc *pool = (struct pool_desc *)s->private; - struct pool_iterator *iter; - - lov_pool_getref(pool); - if ((pool_tgt_count(pool) == 0) || - (*pos >= pool_tgt_count(pool))) { - /* iter is not created, so stop() has no way to - * find pool to dec ref - */ - lov_pool_putref(pool); - return NULL; - } - - iter = kzalloc(sizeof(*iter), GFP_NOFS); - if (!iter) - return ERR_PTR(-ENOMEM); - iter->magic = POOL_IT_MAGIC; - iter->pool = pool; - iter->idx = 0; - - /* we use seq_file private field to memorized iterator so - * we can free it at stop() - */ - /* /!\ do not forget to restore it to pool before freeing it */ - s->private = iter; - if (*pos > 0) { - loff_t i; - void *ptr; - - i = 0; - do { - ptr = pool_proc_next(s, &iter, &i); - } while ((i < *pos) && ptr); - return ptr; - } - return iter; -} - -static void pool_proc_stop(struct seq_file *s, void *v) -{ - struct pool_iterator *iter = (struct pool_iterator *)s->private; - - /* in some cases stop() method is called 2 times, without - * calling start() method (see seq_read() from fs/seq_file.c) - * we have to free only if s->private is an iterator - */ - if ((iter) && (iter->magic == POOL_IT_MAGIC)) { - /* we restore s->private so next call to pool_proc_start() - * will work - */ - s->private = iter->pool; - lov_pool_putref(iter->pool); - kfree(iter); - } -} - -static int pool_proc_show(struct seq_file *s, void *v) -{ - struct pool_iterator *iter = (struct pool_iterator *)v; - struct lov_tgt_desc *tgt; - - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); - LASSERT(iter->pool); - LASSERT(iter->idx <= pool_tgt_count(iter->pool)); - - down_read(&pool_tgt_rw_sem(iter->pool)); - tgt = pool_tgt(iter->pool, iter->idx); - up_read(&pool_tgt_rw_sem(iter->pool)); - if (tgt) - seq_printf(s, "%s\n", obd_uuid2str(&tgt->ltd_uuid)); - - return 0; -} - -static const struct seq_operations pool_proc_ops = { - .start = pool_proc_start, - .next = pool_proc_next, - .stop = pool_proc_stop, - .show = pool_proc_show, -}; - -static int pool_proc_open(struct inode *inode, struct file *file) -{ - int rc; - - rc = seq_open(file, &pool_proc_ops); - if (!rc) { - struct seq_file *s = file->private_data; - - s->private = inode->i_private; - } - return rc; -} - -static const struct file_operations pool_proc_operations = { - .open = pool_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -#define LOV_POOL_INIT_COUNT 2 -int lov_ost_pool_init(struct ost_pool *op, unsigned int count) -{ - if (count == 0) - count = LOV_POOL_INIT_COUNT; - op->op_array = NULL; - op->op_count = 0; - init_rwsem(&op->op_rw_sem); - op->op_size = count; - op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS); - if (!op->op_array) { - op->op_size = 0; - return -ENOMEM; - } - return 0; -} - -/* Caller must hold write op_rwlock */ -int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count) -{ - __u32 *new; - int new_size; - - LASSERT(min_count != 0); - - if (op->op_count < op->op_size) - return 0; - - new_size = max(min_count, 2 * op->op_size); - new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS); - if (!new) - return -ENOMEM; - - /* copy old array to new one */ - memcpy(new, op->op_array, op->op_size * sizeof(op->op_array[0])); - kfree(op->op_array); - op->op_array = new; - op->op_size = new_size; - return 0; -} - -int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count) -{ - int rc = 0, i; - - down_write(&op->op_rw_sem); - - rc = lov_ost_pool_extend(op, min_count); - if (rc) - goto out; - - /* search ost in pool array */ - for (i = 0; i < op->op_count; i++) { - if (op->op_array[i] == idx) { - rc = -EEXIST; - goto out; - } - } - /* ost not found we add it */ - op->op_array[op->op_count] = idx; - op->op_count++; -out: - up_write(&op->op_rw_sem); - return rc; -} - -int lov_ost_pool_remove(struct ost_pool *op, __u32 idx) -{ - int i; - - down_write(&op->op_rw_sem); - - for (i = 0; i < op->op_count; i++) { - if (op->op_array[i] == idx) { - memmove(&op->op_array[i], &op->op_array[i + 1], - (op->op_count - i - 1) * sizeof(op->op_array[0])); - op->op_count--; - up_write(&op->op_rw_sem); - return 0; - } - } - - up_write(&op->op_rw_sem); - return -EINVAL; -} - -int lov_ost_pool_free(struct ost_pool *op) -{ - if (op->op_size == 0) - return 0; - - down_write(&op->op_rw_sem); - - kfree(op->op_array); - op->op_array = NULL; - op->op_count = 0; - op->op_size = 0; - - up_write(&op->op_rw_sem); - return 0; -} - -static void -pools_hash_exit(void *vpool, void *data) -{ - struct pool_desc *pool = vpool; - lov_pool_putref(pool); -} - -int lov_pool_hash_init(struct rhashtable *tbl) -{ - return rhashtable_init(tbl, &pools_hash_params); -} - -void lov_pool_hash_destroy(struct rhashtable *tbl) -{ - rhashtable_free_and_destroy(tbl, pools_hash_exit, NULL); -} - -int lov_pool_new(struct obd_device *obd, char *poolname) -{ - struct lov_obd *lov; - struct pool_desc *new_pool; - int rc; - - lov = &obd->u.lov; - - if (strlen(poolname) > LOV_MAXPOOLNAME) - return -ENAMETOOLONG; - - new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS); - if (!new_pool) - return -ENOMEM; - - strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name)); - new_pool->pool_lobd = obd; - /* ref count init to 1 because when created a pool is always used - * up to deletion - */ - atomic_set(&new_pool->pool_refcount, 1); - rc = lov_ost_pool_init(&new_pool->pool_obds, 0); - if (rc) - goto out_err; - - /* get ref for debugfs file */ - lov_pool_getref(new_pool); - - new_pool->pool_debugfs_entry = debugfs_create_file(poolname, 0444, - lov->lov_pool_debugfs_entry, - new_pool, - &pool_proc_operations); - - spin_lock(&obd->obd_dev_lock); - list_add_tail(&new_pool->pool_list, &lov->lov_pool_list); - lov->lov_pool_count++; - spin_unlock(&obd->obd_dev_lock); - - /* Add to hash table only when it is fully ready. */ - rc = rhashtable_lookup_insert_fast(&lov->lov_pools_hash_body, - &new_pool->pool_hash, pools_hash_params); - if (rc) { - if (rc != -EEXIST) - /* - * Hide -E2BIG and -EBUSY which - * are not helpful. - */ - rc = -ENOMEM; - goto out_err; - } - - CDEBUG(D_CONFIG, LOV_POOLNAMEF " is pool #%d\n", - poolname, lov->lov_pool_count); - - return 0; - -out_err: - spin_lock(&obd->obd_dev_lock); - list_del_init(&new_pool->pool_list); - lov->lov_pool_count--; - spin_unlock(&obd->obd_dev_lock); - debugfs_remove_recursive(new_pool->pool_debugfs_entry); - lov_ost_pool_free(&new_pool->pool_obds); - kfree(new_pool); - - return rc; -} - -int lov_pool_del(struct obd_device *obd, char *poolname) -{ - struct lov_obd *lov; - struct pool_desc *pool; - - lov = &obd->u.lov; - - /* lookup and kill hash reference */ - rcu_read_lock(); - pool = rhashtable_lookup(&lov->lov_pools_hash_body, poolname, pools_hash_params); - if (pool) - if (rhashtable_remove_fast(&lov->lov_pools_hash_body, - &pool->pool_hash, pools_hash_params) != 0) - pool = NULL; - rcu_read_unlock(); - if (!pool) - return -ENOENT; - - debugfs_remove_recursive(pool->pool_debugfs_entry); - lov_pool_putref(pool); - - spin_lock(&obd->obd_dev_lock); - list_del_init(&pool->pool_list); - lov->lov_pool_count--; - spin_unlock(&obd->obd_dev_lock); - - /* release last reference */ - lov_pool_putref(pool); - - return 0; -} - -int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname) -{ - struct obd_uuid ost_uuid; - struct lov_obd *lov; - struct pool_desc *pool; - unsigned int lov_idx; - int rc; - - lov = &obd->u.lov; - - rcu_read_lock(); - pool = rhashtable_lookup(&lov->lov_pools_hash_body, poolname, pools_hash_params); - if (pool && !atomic_inc_not_zero(&pool->pool_refcount)) - pool = NULL; - rcu_read_unlock(); - if (!pool) - return -ENOENT; - - obd_str2uuid(&ost_uuid, ostname); - - /* search ost in lov array */ - obd_getref(obd); - for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) { - if (!lov->lov_tgts[lov_idx]) - continue; - if (obd_uuid_equals(&ost_uuid, - &lov->lov_tgts[lov_idx]->ltd_uuid)) - break; - } - /* test if ost found in lov */ - if (lov_idx == lov->desc.ld_tgt_count) { - rc = -EINVAL; - goto out; - } - - rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size); - if (rc) - goto out; - - CDEBUG(D_CONFIG, "Added %s to " LOV_POOLNAMEF " as member %d\n", - ostname, poolname, pool_tgt_count(pool)); - -out: - obd_putref(obd); - lov_pool_putref(pool); - return rc; -} - -int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname) -{ - struct obd_uuid ost_uuid; - struct lov_obd *lov; - struct pool_desc *pool; - unsigned int lov_idx; - int rc = 0; - - lov = &obd->u.lov; - - rcu_read_lock(); - pool = rhashtable_lookup(&lov->lov_pools_hash_body, poolname, pools_hash_params); - if (pool && !atomic_inc_not_zero(&pool->pool_refcount)) - pool = NULL; - rcu_read_unlock(); - if (!pool) - return -ENOENT; - - obd_str2uuid(&ost_uuid, ostname); - - obd_getref(obd); - /* search ost in lov array, to get index */ - for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) { - if (!lov->lov_tgts[lov_idx]) - continue; - - if (obd_uuid_equals(&ost_uuid, - &lov->lov_tgts[lov_idx]->ltd_uuid)) - break; - } - - /* test if ost found in lov */ - if (lov_idx == lov->desc.ld_tgt_count) { - rc = -EINVAL; - goto out; - } - - lov_ost_pool_remove(&pool->pool_obds, lov_idx); - - CDEBUG(D_CONFIG, "%s removed from " LOV_POOLNAMEF "\n", ostname, - poolname); - -out: - obd_putref(obd); - lov_pool_putref(pool); - return rc; -} diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c deleted file mode 100644 index cb8567f20ea7..000000000000 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ /dev/null @@ -1,354 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include -#include -#include "lov_internal.h" - -static void lov_init_set(struct lov_request_set *set) -{ - set->set_count = 0; - atomic_set(&set->set_completes, 0); - atomic_set(&set->set_success, 0); - INIT_LIST_HEAD(&set->set_list); -} - -static void lov_finish_set(struct lov_request_set *set) -{ - struct lov_request *req; - - LASSERT(set); - while ((req = list_first_entry_or_null(&set->set_list, - struct lov_request, - rq_link)) != NULL) { - list_del_init(&req->rq_link); - kfree(req->rq_oi.oi_osfs); - kfree(req); - } - kfree(set); -} - -static void lov_update_set(struct lov_request_set *set, - struct lov_request *req, int rc) -{ - atomic_inc(&set->set_completes); - if (rc == 0) - atomic_inc(&set->set_success); -} - -static void lov_set_add_req(struct lov_request *req, - struct lov_request_set *set) -{ - list_add_tail(&req->rq_link, &set->set_list); - set->set_count++; - req->rq_rqset = set; -} - -static int lov_check_set(struct lov_obd *lov, int idx) -{ - int rc; - struct lov_tgt_desc *tgt; - - mutex_lock(&lov->lov_lock); - tgt = lov->lov_tgts[idx]; - rc = !tgt || tgt->ltd_active || - (tgt->ltd_exp && - class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried); - mutex_unlock(&lov->lov_lock); - - return rc; -} - -/* Check if the OSC connection exists and is active. - * If the OSC has not yet had a chance to connect to the OST the first time, - * wait once for it to connect instead of returning an error. - */ -static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) -{ - int cnt = 0; - struct lov_tgt_desc *tgt; - int rc = 0; - - mutex_lock(&lov->lov_lock); - - tgt = lov->lov_tgts[ost_idx]; - - if (unlikely(!tgt)) { - rc = 0; - goto out; - } - - if (likely(tgt->ltd_active)) { - rc = 1; - goto out; - } - - if (tgt->ltd_exp && class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried) { - rc = 0; - goto out; - } - - mutex_unlock(&lov->lov_lock); - - while (cnt < obd_timeout && !lov_check_set(lov, ost_idx)) { - schedule_timeout_uninterruptible(HZ); - cnt++; - } - if (tgt->ltd_active) - return 1; - - return 0; - -out: - mutex_unlock(&lov->lov_lock); - return rc; -} - -#define LOV_U64_MAX ((__u64)~0ULL) -#define LOV_SUM_MAX(tot, add) \ - do { \ - if ((tot) + (add) < (tot)) \ - (tot) = LOV_U64_MAX; \ - else \ - (tot) += (add); \ - } while (0) - -static int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, - int success) -{ - if (success) { - __u32 expected_stripes = lov_get_stripecnt(&obd->u.lov, - LOV_MAGIC, 0); - if (osfs->os_files != LOV_U64_MAX) - lov_do_div64(osfs->os_files, expected_stripes); - if (osfs->os_ffree != LOV_U64_MAX) - lov_do_div64(osfs->os_ffree, expected_stripes); - - spin_lock(&obd->obd_osfs_lock); - memcpy(&obd->obd_osfs, osfs, sizeof(*osfs)); - obd->obd_osfs_age = get_jiffies_64(); - spin_unlock(&obd->obd_osfs_lock); - return 0; - } - - return -EIO; -} - -int lov_fini_statfs_set(struct lov_request_set *set) -{ - int rc = 0; - - if (!set) - return 0; - - if (atomic_read(&set->set_completes)) { - rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs, - atomic_read(&set->set_success)); - } - - lov_finish_set(set); - - return rc; -} - -static void lov_update_statfs(struct obd_statfs *osfs, - struct obd_statfs *lov_sfs, - int success) -{ - int shift = 0, quit = 0; - __u64 tmp; - - if (success == 0) { - memcpy(osfs, lov_sfs, sizeof(*lov_sfs)); - } else { - if (osfs->os_bsize != lov_sfs->os_bsize) { - /* assume all block sizes are always powers of 2 */ - /* get the bits difference */ - tmp = osfs->os_bsize | lov_sfs->os_bsize; - for (shift = 0; shift <= 64; ++shift) { - if (tmp & 1) { - if (quit) - break; - quit = 1; - shift = 0; - } - tmp >>= 1; - } - } - - if (osfs->os_bsize < lov_sfs->os_bsize) { - osfs->os_bsize = lov_sfs->os_bsize; - - osfs->os_bfree >>= shift; - osfs->os_bavail >>= shift; - osfs->os_blocks >>= shift; - } else if (shift != 0) { - lov_sfs->os_bfree >>= shift; - lov_sfs->os_bavail >>= shift; - lov_sfs->os_blocks >>= shift; - } - osfs->os_bfree += lov_sfs->os_bfree; - osfs->os_bavail += lov_sfs->os_bavail; - osfs->os_blocks += lov_sfs->os_blocks; - /* XXX not sure about this one - depends on policy. - * - could be minimum if we always stripe on all OBDs - * (but that would be wrong for any other policy, - * if one of the OBDs has no more objects left) - * - could be sum if we stripe whole objects - * - could be average, just to give a nice number - * - * To give a "reasonable" (if not wholly accurate) - * number, we divide the total number of free objects - * by expected stripe count (watch out for overflow). - */ - LOV_SUM_MAX(osfs->os_files, lov_sfs->os_files); - LOV_SUM_MAX(osfs->os_ffree, lov_sfs->os_ffree); - } -} - -/* The callback for osc_statfs_async that finalizes a request info when a - * response is received. - */ -static int cb_statfs_update(void *cookie, int rc) -{ - struct obd_info *oinfo = cookie; - struct lov_request *lovreq; - struct lov_request_set *set; - struct obd_statfs *osfs, *lov_sfs; - struct lov_obd *lov; - struct lov_tgt_desc *tgt; - struct obd_device *lovobd, *tgtobd; - int success; - - lovreq = container_of(oinfo, struct lov_request, rq_oi); - set = lovreq->rq_rqset; - lovobd = set->set_obd; - lov = &lovobd->u.lov; - osfs = set->set_oi->oi_osfs; - lov_sfs = oinfo->oi_osfs; - success = atomic_read(&set->set_success); - /* XXX: the same is done in lov_update_common_set, however - * lovset->set_exp is not initialized. - */ - lov_update_set(set, lovreq, rc); - if (rc) - goto out; - - obd_getref(lovobd); - tgt = lov->lov_tgts[lovreq->rq_idx]; - if (!tgt || !tgt->ltd_active) - goto out_update; - - tgtobd = class_exp2obd(tgt->ltd_exp); - spin_lock(&tgtobd->obd_osfs_lock); - memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs)); - if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0) - tgtobd->obd_osfs_age = get_jiffies_64(); - spin_unlock(&tgtobd->obd_osfs_lock); - -out_update: - lov_update_statfs(osfs, lov_sfs, success); - obd_putref(lovobd); -out: - return 0; -} - -int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, - struct lov_request_set **reqset) -{ - struct lov_request_set *set; - struct lov_obd *lov = &obd->u.lov; - int rc = 0, i; - - set = kzalloc(sizeof(*set), GFP_NOFS); - if (!set) - return -ENOMEM; - lov_init_set(set); - - set->set_obd = obd; - set->set_oi = oinfo; - - /* We only get block data from the OBD */ - for (i = 0; i < lov->desc.ld_tgt_count; i++) { - struct lov_request *req; - - if (!lov->lov_tgts[i] || - (oinfo->oi_flags & OBD_STATFS_NODELAY && - !lov->lov_tgts[i]->ltd_active)) { - CDEBUG(D_HA, "lov idx %d inactive\n", i); - continue; - } - - /* skip targets that have been explicitly disabled by the - * administrator - */ - if (!lov->lov_tgts[i]->ltd_exp) { - CDEBUG(D_HA, "lov idx %d administratively disabled\n", i); - continue; - } - - if (!lov->lov_tgts[i]->ltd_active) - lov_check_and_wait_active(lov, i); - - req = kzalloc(sizeof(*req), GFP_NOFS); - if (!req) { - rc = -ENOMEM; - goto out_set; - } - - req->rq_oi.oi_osfs = kzalloc(sizeof(*req->rq_oi.oi_osfs), - GFP_NOFS); - if (!req->rq_oi.oi_osfs) { - kfree(req); - rc = -ENOMEM; - goto out_set; - } - - req->rq_idx = i; - req->rq_oi.oi_cb_up = cb_statfs_update; - req->rq_oi.oi_flags = oinfo->oi_flags; - - lov_set_add_req(req, set); - } - if (!set->set_count) { - rc = -EIO; - goto out_set; - } - *reqset = set; - return rc; -out_set: - lov_fini_statfs_set(set); - return rc; -} diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c deleted file mode 100644 index 7e89a2e485fc..000000000000 --- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c +++ /dev/null @@ -1,147 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2013, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_device and cl_device_type for LOVSUB layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lov-sub device and device type functions. - * - */ - -static int lovsub_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) -{ - struct lovsub_device *lsd = lu2lovsub_dev(d); - struct lu_device_type *ldt; - int rc; - - next->ld_site = d->ld_site; - ldt = next->ld_type; - rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL); - if (rc) { - next->ld_site = NULL; - return rc; - } - - lu_device_get(next); - lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init); - lsd->acid_next = lu2cl_dev(next); - return rc; -} - -static struct lu_device *lovsub_device_fini(const struct lu_env *env, - struct lu_device *d) -{ - struct lu_device *next; - struct lovsub_device *lsd; - - lsd = lu2lovsub_dev(d); - next = cl2lu_dev(lsd->acid_next); - lsd->acid_next = NULL; - return next; -} - -static struct lu_device *lovsub_device_free(const struct lu_env *env, - struct lu_device *d) -{ - struct lovsub_device *lsd = lu2lovsub_dev(d); - struct lu_device *next = cl2lu_dev(lsd->acid_next); - - if (atomic_read(&d->ld_ref) && d->ld_site) { - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL); - lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer); - } - cl_device_fini(lu2cl_dev(d)); - kfree(lsd); - return next; -} - -static const struct lu_device_operations lovsub_lu_ops = { - .ldo_object_alloc = lovsub_object_alloc, - .ldo_process_config = NULL, - .ldo_recovery_complete = NULL -}; - -static struct lu_device *lovsub_device_alloc(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *cfg) -{ - struct lu_device *d; - struct lovsub_device *lsd; - - lsd = kzalloc(sizeof(*lsd), GFP_NOFS); - if (lsd) { - int result; - - result = cl_device_init(&lsd->acid_cl, t); - if (result == 0) { - d = lovsub2lu_dev(lsd); - d->ld_ops = &lovsub_lu_ops; - } else { - d = ERR_PTR(result); - } - } else { - d = ERR_PTR(-ENOMEM); - } - return d; -} - -static const struct lu_device_type_operations lovsub_device_type_ops = { - .ldto_device_alloc = lovsub_device_alloc, - .ldto_device_free = lovsub_device_free, - - .ldto_device_init = lovsub_device_init, - .ldto_device_fini = lovsub_device_fini -}; - -#define LUSTRE_LOVSUB_NAME "lovsub" - -struct lu_device_type lovsub_device_type = { - .ldt_tags = LU_DEVICE_CL, - .ldt_name = LUSTRE_LOVSUB_NAME, - .ldt_ops = &lovsub_device_type_ops, - .ldt_ctx_tags = LCT_CL_THREAD -}; - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c deleted file mode 100644 index ea492be2eef3..000000000000 --- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c +++ /dev/null @@ -1,81 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_lock for LOVSUB layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lovsub lock operations. - * - */ - -static void lovsub_lock_fini(const struct lu_env *env, - struct cl_lock_slice *slice) -{ - struct lovsub_lock *lsl; - - lsl = cl2lovsub_lock(slice); - kmem_cache_free(lovsub_lock_kmem, lsl); -} - -static const struct cl_lock_operations lovsub_lock_ops = { - .clo_fini = lovsub_lock_fini, -}; - -int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) -{ - struct lovsub_lock *lsk; - int result; - - lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS); - if (lsk) { - cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c deleted file mode 100644 index 13d452086b61..000000000000 --- a/drivers/staging/lustre/lustre/lov/lovsub_object.c +++ /dev/null @@ -1,180 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_object for LOVSUB layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lovsub object operations. - * - */ - -int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf) -{ - struct lovsub_device *dev = lu2lovsub_dev(obj->lo_dev); - struct lu_object *below; - struct lu_device *under; - - int result; - - under = &dev->acid_next->cd_lu_dev; - below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below) { - lu_object_add(obj, below); - cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page)); - result = 0; - } else { - result = -ENOMEM; - } - return result; -} - -static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj) -{ - struct lovsub_object *los = lu2lovsub(obj); - struct lov_object *lov = los->lso_super; - - /* We can't assume lov was assigned here, because of the shadow - * object handling in lu_object_find. - */ - if (lov) { - LASSERT(lov->lo_type == LLT_RAID0); - LASSERT(lov->u.raid0.lo_sub[los->lso_index] == los); - spin_lock(&lov->u.raid0.lo_sub_lock); - lov->u.raid0.lo_sub[los->lso_index] = NULL; - spin_unlock(&lov->u.raid0.lo_sub_lock); - } - - lu_object_fini(obj); - lu_object_header_fini(&los->lso_header.coh_lu); - kmem_cache_free(lovsub_object_kmem, los); -} - -static int lovsub_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *obj) -{ - struct lovsub_object *los = lu2lovsub(obj); - - return (*p)(env, cookie, "[%d]", los->lso_index); -} - -static int lovsub_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int valid) -{ - struct lov_object *lov = cl2lovsub(obj)->lso_super; - - lov_r0(lov)->lo_attr_valid = 0; - return 0; -} - -static int lovsub_object_glimpse(const struct lu_env *env, - const struct cl_object *obj, - struct ost_lvb *lvb) -{ - struct lovsub_object *los = cl2lovsub(obj); - - return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb); -} - -/** - * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub - * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx - * field, which is filled there. - */ -static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj, - struct cl_req_attr *attr) -{ - struct lovsub_object *subobj = cl2lovsub(obj); - - cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr); - - /* - * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it - * unconditionally. It never changes anyway. - */ - attr->cra_oa->o_stripe_idx = subobj->lso_index; -} - -static const struct cl_object_operations lovsub_ops = { - .coo_page_init = lovsub_page_init, - .coo_lock_init = lovsub_lock_init, - .coo_attr_update = lovsub_attr_update, - .coo_glimpse = lovsub_object_glimpse, - .coo_req_attr_set = lovsub_req_attr_set -}; - -static const struct lu_object_operations lovsub_lu_obj_ops = { - .loo_object_init = lovsub_object_init, - .loo_object_delete = NULL, - .loo_object_release = NULL, - .loo_object_free = lovsub_object_free, - .loo_object_print = lovsub_object_print, - .loo_object_invariant = NULL -}; - -struct lu_object *lovsub_object_alloc(const struct lu_env *env, - const struct lu_object_header *unused, - struct lu_device *dev) -{ - struct lovsub_object *los; - struct lu_object *obj; - - los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS); - if (los) { - struct cl_object_header *hdr; - - obj = lovsub2lu(los); - hdr = &los->lso_header; - cl_object_header_init(hdr); - lu_object_init(obj, &hdr->coh_lu, dev); - lu_object_add_top(&hdr->coh_lu, obj); - los->lso_cl.co_ops = &lovsub_ops; - obj->lo_ops = &lovsub_lu_obj_ops; - } else { - obj = NULL; - } - return obj; -} - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c deleted file mode 100644 index 915520bcdd60..000000000000 --- a/drivers/staging/lustre/lustre/lov/lovsub_page.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_page for LOVSUB layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lovsub page operations. - * - */ - -static void lovsub_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) -{ -} - -static const struct cl_page_operations lovsub_page_ops = { - .cpo_fini = lovsub_page_fini -}; - -int lovsub_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - struct lovsub_page *lsb = cl_object_page_slice(obj, page); - - cl_page_slice_add(page, &lsb->lsb_cl, obj, index, &lovsub_page_ops); - return 0; -} - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c deleted file mode 100644 index 721440feef72..000000000000 --- a/drivers/staging/lustre/lustre/lov/lproc_lov.c +++ /dev/null @@ -1,299 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include "lov_internal.h" - -static int lov_stripesize_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = (struct obd_device *)m->private; - struct lov_desc *desc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - seq_printf(m, "%llu\n", desc->ld_default_stripe_size); - return 0; -} - -static ssize_t lov_stripesize_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *dev = ((struct seq_file *)file->private_data)->private; - struct lov_desc *desc; - __u64 val; - int rc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - rc = lprocfs_write_u64_helper(buffer, count, &val); - if (rc) - return rc; - - lov_fix_desc_stripe_size(&val); - desc->ld_default_stripe_size = val; - return count; -} - -LPROC_SEQ_FOPS(lov_stripesize); - -static int lov_stripeoffset_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = (struct obd_device *)m->private; - struct lov_desc *desc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - seq_printf(m, "%llu\n", desc->ld_default_stripe_offset); - return 0; -} - -static ssize_t lov_stripeoffset_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *dev = ((struct seq_file *)file->private_data)->private; - struct lov_desc *desc; - __u64 val; - int rc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - rc = lprocfs_write_u64_helper(buffer, count, &val); - if (rc) - return rc; - - desc->ld_default_stripe_offset = val; - return count; -} - -LPROC_SEQ_FOPS(lov_stripeoffset); - -static int lov_stripetype_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = (struct obd_device *)m->private; - struct lov_desc *desc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - seq_printf(m, "%u\n", desc->ld_pattern); - return 0; -} - -static ssize_t lov_stripetype_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *dev = ((struct seq_file *)file->private_data)->private; - struct lov_desc *desc; - int val, rc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - rc = lprocfs_write_helper(buffer, count, &val); - if (rc) - return rc; - - lov_fix_desc_pattern(&val); - desc->ld_pattern = val; - return count; -} - -LPROC_SEQ_FOPS(lov_stripetype); - -static int lov_stripecount_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = (struct obd_device *)m->private; - struct lov_desc *desc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1); - return 0; -} - -static ssize_t lov_stripecount_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *dev = ((struct seq_file *)file->private_data)->private; - struct lov_desc *desc; - int val, rc; - - LASSERT(dev); - desc = &dev->u.lov.desc; - rc = lprocfs_write_helper(buffer, count, &val); - if (rc) - return rc; - - lov_fix_desc_stripe_count(&val); - desc->ld_default_stripe_count = val; - return count; -} - -LPROC_SEQ_FOPS(lov_stripecount); - -static ssize_t numobd_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct lov_desc *desc; - - desc = &dev->u.lov.desc; - return sprintf(buf, "%u\n", desc->ld_tgt_count); -} -LUSTRE_RO_ATTR(numobd); - -static ssize_t activeobd_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct lov_desc *desc; - - desc = &dev->u.lov.desc; - return sprintf(buf, "%u\n", desc->ld_active_tgt_count); -} -LUSTRE_RO_ATTR(activeobd); - -static int lov_desc_uuid_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = (struct obd_device *)m->private; - struct lov_obd *lov; - - LASSERT(dev); - lov = &dev->u.lov; - seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid); - return 0; -} - -LPROC_SEQ_FOPS_RO(lov_desc_uuid); - -static void *lov_tgt_seq_start(struct seq_file *p, loff_t *pos) -{ - struct obd_device *dev = p->private; - struct lov_obd *lov = &dev->u.lov; - - while (*pos < lov->desc.ld_tgt_count) { - if (lov->lov_tgts[*pos]) - return lov->lov_tgts[*pos]; - ++*pos; - } - return NULL; -} - -static void lov_tgt_seq_stop(struct seq_file *p, void *v) -{ -} - -static void *lov_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos) -{ - struct obd_device *dev = p->private; - struct lov_obd *lov = &dev->u.lov; - - while (++*pos < lov->desc.ld_tgt_count) { - if (lov->lov_tgts[*pos]) - return lov->lov_tgts[*pos]; - } - return NULL; -} - -static int lov_tgt_seq_show(struct seq_file *p, void *v) -{ - struct lov_tgt_desc *tgt = v; - - seq_printf(p, "%d: %s %sACTIVE\n", - tgt->ltd_index, obd_uuid2str(&tgt->ltd_uuid), - tgt->ltd_active ? "" : "IN"); - return 0; -} - -static const struct seq_operations lov_tgt_sops = { - .start = lov_tgt_seq_start, - .stop = lov_tgt_seq_stop, - .next = lov_tgt_seq_next, - .show = lov_tgt_seq_show, -}; - -static int lov_target_seq_open(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - int rc; - - rc = seq_open(file, &lov_tgt_sops); - if (rc) - return rc; - - seq = file->private_data; - seq->private = inode->i_private; - return 0; -} - -static struct lprocfs_vars lprocfs_lov_obd_vars[] = { - { "stripesize", &lov_stripesize_fops, NULL }, - { "stripeoffset", &lov_stripeoffset_fops, NULL }, - { "stripecount", &lov_stripecount_fops, NULL }, - { "stripetype", &lov_stripetype_fops, NULL }, - /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/ - { "desc_uuid", &lov_desc_uuid_fops, NULL, 0 }, - { NULL } -}; - -static struct attribute *lov_attrs[] = { - &lustre_attr_activeobd.attr, - &lustre_attr_numobd.attr, - NULL, -}; - -static const struct attribute_group lov_attr_group = { - .attrs = lov_attrs, -}; - -void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars) -{ - lvars->sysfs_vars = &lov_attr_group; - lvars->obd_vars = lprocfs_lov_obd_vars; -} - -const struct file_operations lov_proc_target_fops = { - .owner = THIS_MODULE, - .open = lov_target_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = lprocfs_seq_release, -}; diff --git a/drivers/staging/lustre/lustre/mdc/Makefile b/drivers/staging/lustre/lustre/mdc/Makefile deleted file mode 100644 index c7bc3351ccb0..000000000000 --- a/drivers/staging/lustre/lustre/mdc/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += mdc.o -mdc-y := mdc_request.o mdc_reint.o mdc_lib.o mdc_locks.o lproc_mdc.o diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c deleted file mode 100644 index 6cce32491eb5..000000000000 --- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c +++ /dev/null @@ -1,231 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include "mdc_internal.h" - -static ssize_t active_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive); -} - -static ssize_t active_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - unsigned long val; - int rc; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val > 1) - return -ERANGE; - - /* opposite senses */ - if (dev->u.cli.cl_import->imp_deactive == val) { - rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val); - if (rc) - count = rc; - } else { - CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val); - } - return count; -} -LUSTRE_RW_ATTR(active); - -static ssize_t max_rpcs_in_flight_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - int len; - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - __u32 max; - - max = obd_get_max_rpcs_in_flight(&dev->u.cli); - len = sprintf(buf, "%u\n", max); - - return len; -} - -static ssize_t max_rpcs_in_flight_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - rc = obd_set_max_rpcs_in_flight(&dev->u.cli, val); - if (rc) - count = rc; - - return count; -} -LUSTRE_RW_ATTR(max_rpcs_in_flight); - -static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - u16 max; - int len; - - max = dev->u.cli.cl_max_mod_rpcs_in_flight; - len = sprintf(buf, "%hu\n", max); - - return len; -} - -static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - u16 val; - int rc; - - rc = kstrtou16(buffer, 10, &val); - if (rc) - return rc; - - rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val); - if (rc) - count = rc; - - return count; -} -LUSTRE_RW_ATTR(max_mod_rpcs_in_flight); - -static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v) -{ - struct obd_device *dev = seq->private; - - return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq); -} - -static ssize_t mdc_rpc_stats_seq_write(struct file *file, - const char __user *buf, - size_t len, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct obd_device *dev = seq->private; - struct client_obd *cli = &dev->u.cli; - - lprocfs_oh_clear(&cli->cl_mod_rpcs_hist); - - return len; -} -LPROC_SEQ_FOPS(mdc_rpc_stats); - -LPROC_SEQ_FOPS_WR_ONLY(mdc, ping); - -LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags); -LPROC_SEQ_FOPS_RO_TYPE(mdc, server_uuid); -LPROC_SEQ_FOPS_RO_TYPE(mdc, conn_uuid); -LPROC_SEQ_FOPS_RO_TYPE(mdc, timeouts); -LPROC_SEQ_FOPS_RO_TYPE(mdc, state); - -/* - * Note: below sysfs entry is provided, but not currently in use, instead - * sbi->sb_md_brw_size is used, the per obd variable should be used - * when DNE is enabled, and dir pages are managed in MDC layer. - * Don't forget to enable sysfs store function then. - */ -static ssize_t max_pages_per_rpc_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - - return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc); -} -LUSTRE_RO_ATTR(max_pages_per_rpc); - -LPROC_SEQ_FOPS_RW_TYPE(mdc, import); -LPROC_SEQ_FOPS_RW_TYPE(mdc, pinger_recov); - -static struct lprocfs_vars lprocfs_mdc_obd_vars[] = { - { "ping", &mdc_ping_fops, NULL, 0222 }, - { "connect_flags", &mdc_connect_flags_fops, NULL, 0 }, - /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/ - { "mds_server_uuid", &mdc_server_uuid_fops, NULL, 0 }, - { "mds_conn_uuid", &mdc_conn_uuid_fops, NULL, 0 }, - { "timeouts", &mdc_timeouts_fops, NULL, 0 }, - { "import", &mdc_import_fops, NULL, 0 }, - { "state", &mdc_state_fops, NULL, 0 }, - { "pinger_recov", &mdc_pinger_recov_fops, NULL, 0 }, - { .name = "rpc_stats", - .fops = &mdc_rpc_stats_fops }, - { NULL } -}; - -static struct attribute *mdc_attrs[] = { - &lustre_attr_active.attr, - &lustre_attr_max_rpcs_in_flight.attr, - &lustre_attr_max_mod_rpcs_in_flight.attr, - &lustre_attr_max_pages_per_rpc.attr, - NULL, -}; - -static const struct attribute_group mdc_attr_group = { - .attrs = mdc_attrs, -}; - -void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars) -{ - lvars->sysfs_vars = &mdc_attr_group; - lvars->obd_vars = lprocfs_mdc_obd_vars; -} diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h deleted file mode 100644 index 28924e927b50..000000000000 --- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h +++ /dev/null @@ -1,144 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _MDC_INTERNAL_H -#define _MDC_INTERNAL_H - -#include - -void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars); - -void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid, - __u64 valid, size_t ea_size, __u32 suppgid, u32 flags); -void mdc_swap_layouts_pack(struct ptlrpc_request *req, - struct md_op_data *op_data); -void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size, - const struct lu_fid *fid); -void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, - struct md_op_data *data, size_t ea_size); -void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, size_t ealen); -void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - const void *data, size_t datalen, umode_t mode, uid_t uid, - gid_t gid, kernel_cap_t capability, __u64 rdev); -void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - umode_t mode, __u64 rdev, __u64 flags, const void *data, - size_t datalen); -void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data); -void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data); -void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - const char *old, size_t oldlen, - const char *new, size_t newlen); -void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data); - -/* mdc/mdc_locks.c */ -int mdc_set_lock_data(struct obd_export *exp, - const struct lustre_handle *lockh, - void *data, __u64 *bits); - -int mdc_null_inode(struct obd_export *exp, const struct lu_fid *fid); - -int mdc_intent_lock(struct obd_export *exp, - struct md_op_data *op_data, - struct lookup_intent *it, - struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, - __u64 extra_lock_flags); - -int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const union ldlm_policy_data *policy, - struct md_op_data *op_data, - struct lustre_handle *lockh, u64 extra_lock_flags); - -int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, enum ldlm_mode mode, - __u64 bits); -/* mdc/mdc_request.c */ -int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp, - struct lu_fid *fid, struct md_op_data *op_data); -struct obd_client_handle; - -int mdc_set_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och, - struct lookup_intent *it); - -void mdc_commit_open(struct ptlrpc_request *req); -void mdc_replay_open(struct ptlrpc_request *req); - -int mdc_create(struct obd_export *exp, struct md_op_data *op_data, - const void *data, size_t datalen, umode_t mode, uid_t uid, - gid_t gid, kernel_cap_t capability, __u64 rdev, - struct ptlrpc_request **request); -int mdc_link(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request); -int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, - const char *old, size_t oldlen, - const char *new, size_t newlen, - struct ptlrpc_request **request); -int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, struct ptlrpc_request **request); -int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request); -int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - union ldlm_policy_data *policy, enum ldlm_mode mode, - enum ldlm_cancel_flags flags, void *opaque); - -int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, - struct lu_fid *fid, __u64 *bits); - -int mdc_intent_getattr_async(struct obd_export *exp, - struct md_enqueue_info *minfo); - -enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, enum ldlm_type type, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - struct lustre_handle *lockh); - -static inline int mdc_prep_elc_req(struct obd_export *exp, - struct ptlrpc_request *req, int opc, - struct list_head *cancels, int count) -{ - return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, opc, 0, cancels, - count); -} - -static inline unsigned long hash_x_index(__u64 hash, int hash64) -{ - if (BITS_PER_LONG == 32 && hash64) - hash >>= 32; - /* save hash 0 with hash 1 */ - return ~0UL - (hash + !hash); -} - -#endif diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c deleted file mode 100644 index d582968987ff..000000000000 --- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c +++ /dev/null @@ -1,498 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_MDC -#include -#include -#include "mdc_internal.h" - -static void set_mrc_cr_flags(struct mdt_rec_create *mrc, u64 flags) -{ - mrc->cr_flags_l = (u32)(flags & 0xFFFFFFFFUll); - mrc->cr_flags_h = (u32)(flags >> 32); -} - -static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid) -{ - b->mbo_suppgid = suppgid; - b->mbo_uid = from_kuid(&init_user_ns, current_uid()); - b->mbo_gid = from_kgid(&init_user_ns, current_gid()); - b->mbo_fsuid = from_kuid(&init_user_ns, current_fsuid()); - b->mbo_fsgid = from_kgid(&init_user_ns, current_fsgid()); - b->mbo_capability = current_cap().cap[0]; -} - -void mdc_swap_layouts_pack(struct ptlrpc_request *req, - struct md_op_data *op_data) -{ - struct mdt_body *b = req_capsule_client_get(&req->rq_pill, - &RMF_MDT_BODY); - - __mdc_pack_body(b, op_data->op_suppgids[0]); - b->mbo_fid1 = op_data->op_fid1; - b->mbo_fid2 = op_data->op_fid2; - b->mbo_valid |= OBD_MD_FLID; -} - -void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid, - __u64 valid, size_t ea_size, __u32 suppgid, u32 flags) -{ - struct mdt_body *b = req_capsule_client_get(&req->rq_pill, - &RMF_MDT_BODY); - b->mbo_valid = valid; - b->mbo_eadatasize = ea_size; - b->mbo_flags = flags; - __mdc_pack_body(b, suppgid); - if (fid) { - b->mbo_fid1 = *fid; - b->mbo_valid |= OBD_MD_FLID; - } -} - -/** - * Pack a name (path component) into a request - * - * \param[in] req request - * \param[in] field request field (usually RMF_NAME) - * \param[in] name path component - * \param[in] name_len length of path component - * - * \a field must be present in \a req and of size \a name_len + 1. - * - * \a name must be '\0' terminated of length \a name_len and represent - * a single path component (not contain '/'). - */ -static void mdc_pack_name(struct ptlrpc_request *req, - const struct req_msg_field *field, - const char *name, size_t name_len) -{ - size_t buf_size; - size_t cpy_len; - char *buf; - - buf = req_capsule_client_get(&req->rq_pill, field); - buf_size = req_capsule_get_size(&req->rq_pill, field, RCL_CLIENT); - - LASSERT(name && name_len && buf && buf_size == name_len + 1); - - cpy_len = strlcpy(buf, name, buf_size); - - LASSERT(cpy_len == name_len && lu_name_is_valid_2(buf, cpy_len)); -} - -void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size, - const struct lu_fid *fid) -{ - struct mdt_body *b = req_capsule_client_get(&req->rq_pill, - &RMF_MDT_BODY); - b->mbo_fid1 = *fid; - b->mbo_valid |= OBD_MD_FLID; - b->mbo_size = pgoff; /* !! */ - b->mbo_nlink = size; /* !! */ - __mdc_pack_body(b, -1); - b->mbo_mode = LUDA_FID | LUDA_TYPE; -} - -/* packing of MDS records */ -void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - const void *data, size_t datalen, umode_t mode, - uid_t uid, gid_t gid, kernel_cap_t cap_effective, - __u64 rdev) -{ - struct mdt_rec_create *rec; - char *tmp; - __u64 flags; - - BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_create)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - - rec->cr_opcode = REINT_CREATE; - rec->cr_fsuid = uid; - rec->cr_fsgid = gid; - rec->cr_cap = cap_effective.cap[0]; - rec->cr_fid1 = op_data->op_fid1; - rec->cr_fid2 = op_data->op_fid2; - rec->cr_mode = mode; - rec->cr_rdev = rdev; - rec->cr_time = op_data->op_mod_time; - rec->cr_suppgid1 = op_data->op_suppgids[0]; - rec->cr_suppgid2 = op_data->op_suppgids[1]; - flags = 0; - if (op_data->op_bias & MDS_CREATE_VOLATILE) - flags |= MDS_OPEN_VOLATILE; - set_mrc_cr_flags(rec, flags); - rec->cr_bias = op_data->op_bias; - rec->cr_umask = current_umask(); - - mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); - if (data) { - tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - memcpy(tmp, data, datalen); - } -} - -static inline __u64 mds_pack_open_flags(__u64 flags) -{ - __u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE | - MDS_OPEN_FL_INTERNAL)); - if (flags & O_CREAT) - cr_flags |= MDS_OPEN_CREAT; - if (flags & O_EXCL) - cr_flags |= MDS_OPEN_EXCL; - if (flags & O_TRUNC) - cr_flags |= MDS_OPEN_TRUNC; - if (flags & O_APPEND) - cr_flags |= MDS_OPEN_APPEND; - if (flags & O_SYNC) - cr_flags |= MDS_OPEN_SYNC; - if (flags & O_DIRECTORY) - cr_flags |= MDS_OPEN_DIRECTORY; - if (flags & __FMODE_EXEC) - cr_flags |= MDS_FMODE_EXEC; - if (cl_is_lov_delay_create(flags)) - cr_flags |= MDS_OPEN_DELAY_CREATE; - - if (flags & O_NONBLOCK) - cr_flags |= MDS_OPEN_NORESTORE; - - return cr_flags; -} - -/* packing of MDS records */ -void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - umode_t mode, __u64 rdev, __u64 flags, const void *lmm, - size_t lmmlen) -{ - struct mdt_rec_create *rec; - char *tmp; - __u64 cr_flags; - - BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_create)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - - /* XXX do something about time, uid, gid */ - rec->cr_opcode = REINT_OPEN; - rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid()); - rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid()); - rec->cr_cap = current_cap().cap[0]; - rec->cr_fid1 = op_data->op_fid1; - rec->cr_fid2 = op_data->op_fid2; - - rec->cr_mode = mode; - cr_flags = mds_pack_open_flags(flags); - rec->cr_rdev = rdev; - rec->cr_time = op_data->op_mod_time; - rec->cr_suppgid1 = op_data->op_suppgids[0]; - rec->cr_suppgid2 = op_data->op_suppgids[1]; - rec->cr_bias = op_data->op_bias; - rec->cr_umask = current_umask(); - rec->cr_old_handle = op_data->op_handle; - - if (op_data->op_name) { - mdc_pack_name(req, &RMF_NAME, op_data->op_name, - op_data->op_namelen); - - if (op_data->op_bias & MDS_CREATE_VOLATILE) - cr_flags |= MDS_OPEN_VOLATILE; - } - - if (lmm) { - cr_flags |= MDS_OPEN_HAS_EA; - tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - memcpy(tmp, lmm, lmmlen); - } - set_mrc_cr_flags(rec, cr_flags); -} - -static inline __u64 attr_pack(unsigned int ia_valid) -{ - __u64 sa_valid = 0; - - if (ia_valid & ATTR_MODE) - sa_valid |= MDS_ATTR_MODE; - if (ia_valid & ATTR_UID) - sa_valid |= MDS_ATTR_UID; - if (ia_valid & ATTR_GID) - sa_valid |= MDS_ATTR_GID; - if (ia_valid & ATTR_SIZE) - sa_valid |= MDS_ATTR_SIZE; - if (ia_valid & ATTR_ATIME) - sa_valid |= MDS_ATTR_ATIME; - if (ia_valid & ATTR_MTIME) - sa_valid |= MDS_ATTR_MTIME; - if (ia_valid & ATTR_CTIME) - sa_valid |= MDS_ATTR_CTIME; - if (ia_valid & ATTR_ATIME_SET) - sa_valid |= MDS_ATTR_ATIME_SET; - if (ia_valid & ATTR_MTIME_SET) - sa_valid |= MDS_ATTR_MTIME_SET; - if (ia_valid & ATTR_FORCE) - sa_valid |= MDS_ATTR_FORCE; - if (ia_valid & ATTR_ATTR_FLAG) - sa_valid |= MDS_ATTR_ATTR_FLAG; - if (ia_valid & ATTR_KILL_SUID) - sa_valid |= MDS_ATTR_KILL_SUID; - if (ia_valid & ATTR_KILL_SGID) - sa_valid |= MDS_ATTR_KILL_SGID; - if (ia_valid & ATTR_CTIME_SET) - sa_valid |= MDS_ATTR_CTIME_SET; - if (ia_valid & ATTR_OPEN) - sa_valid |= MDS_ATTR_FROM_OPEN; - if (ia_valid & ATTR_BLOCKS) - sa_valid |= MDS_ATTR_BLOCKS; - if (ia_valid & MDS_OPEN_OWNEROVERRIDE) - /* NFSD hack (see bug 5781) */ - sa_valid |= MDS_OPEN_OWNEROVERRIDE; - return sa_valid; -} - -static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec, - struct md_op_data *op_data) -{ - rec->sa_opcode = REINT_SETATTR; - rec->sa_fsuid = from_kuid(&init_user_ns, current_fsuid()); - rec->sa_fsgid = from_kgid(&init_user_ns, current_fsgid()); - rec->sa_cap = current_cap().cap[0]; - rec->sa_suppgid = -1; - - rec->sa_fid = op_data->op_fid1; - rec->sa_valid = attr_pack(op_data->op_attr.ia_valid); - rec->sa_mode = op_data->op_attr.ia_mode; - rec->sa_uid = from_kuid(&init_user_ns, op_data->op_attr.ia_uid); - rec->sa_gid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid); - rec->sa_size = op_data->op_attr.ia_size; - rec->sa_blocks = op_data->op_attr_blocks; - rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime); - rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime); - rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime); - rec->sa_attr_flags = op_data->op_attr_flags; - if ((op_data->op_attr.ia_valid & ATTR_GID) && - in_group_p(op_data->op_attr.ia_gid)) - rec->sa_suppgid = - from_kgid(&init_user_ns, op_data->op_attr.ia_gid); - else - rec->sa_suppgid = op_data->op_suppgids[0]; - - rec->sa_bias = op_data->op_bias; -} - -static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch, - struct md_op_data *op_data) -{ - epoch->mio_handle = op_data->op_handle; - epoch->mio_unused1 = 0; - epoch->mio_unused2 = 0; - epoch->mio_padding = 0; -} - -void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, size_t ealen) -{ - struct mdt_rec_setattr *rec; - struct lov_user_md *lum = NULL; - - BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != - sizeof(struct mdt_rec_setattr)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - mdc_setattr_pack_rec(rec, op_data); - - if (ealen == 0) - return; - - lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - if (!ea) { /* Remove LOV EA */ - lum->lmm_magic = cpu_to_le32(LOV_USER_MAGIC_V1); - lum->lmm_stripe_size = 0; - lum->lmm_stripe_count = 0; - lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1); - } else { - memcpy(lum, ea, ealen); - } -} - -void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) -{ - struct mdt_rec_unlink *rec; - - BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_unlink)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - - rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? - REINT_RMENTRY : REINT_UNLINK; - rec->ul_fsuid = op_data->op_fsuid; - rec->ul_fsgid = op_data->op_fsgid; - rec->ul_cap = op_data->op_cap.cap[0]; - rec->ul_mode = op_data->op_mode; - rec->ul_suppgid1 = op_data->op_suppgids[0]; - rec->ul_suppgid2 = -1; - rec->ul_fid1 = op_data->op_fid1; - rec->ul_fid2 = op_data->op_fid2; - rec->ul_time = op_data->op_mod_time; - rec->ul_bias = op_data->op_bias; - - mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); -} - -void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) -{ - struct mdt_rec_link *rec; - - BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_link)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - - rec->lk_opcode = REINT_LINK; - rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */ - rec->lk_fsgid = op_data->op_fsgid; /* current->fsgid; */ - rec->lk_cap = op_data->op_cap.cap[0]; /* current->cap_effective; */ - rec->lk_suppgid1 = op_data->op_suppgids[0]; - rec->lk_suppgid2 = op_data->op_suppgids[1]; - rec->lk_fid1 = op_data->op_fid1; - rec->lk_fid2 = op_data->op_fid2; - rec->lk_time = op_data->op_mod_time; - rec->lk_bias = op_data->op_bias; - - mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen); -} - -static void mdc_intent_close_pack(struct ptlrpc_request *req, - struct md_op_data *op_data) -{ - enum mds_op_bias bias = op_data->op_bias; - struct close_data *data; - struct ldlm_lock *lock; - - if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP | - MDS_RENAME_MIGRATE))) - return; - - data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); - LASSERT(data); - - lock = ldlm_handle2lock(&op_data->op_lease_handle); - if (lock) { - data->cd_handle = lock->l_remote_handle; - LDLM_LOCK_PUT(lock); - } - ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL); - - data->cd_data_version = op_data->op_data_version; - data->cd_fid = op_data->op_fid2; -} - -void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - const char *old, size_t oldlen, - const char *new, size_t newlen) -{ - struct mdt_rec_rename *rec; - - BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_rename)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - - /* XXX do something about time, uid, gid */ - rec->rn_opcode = op_data->op_cli_flags & CLI_MIGRATE ? - REINT_MIGRATE : REINT_RENAME; - rec->rn_opcode = REINT_RENAME; - rec->rn_fsuid = op_data->op_fsuid; - rec->rn_fsgid = op_data->op_fsgid; - rec->rn_cap = op_data->op_cap.cap[0]; - rec->rn_suppgid1 = op_data->op_suppgids[0]; - rec->rn_suppgid2 = op_data->op_suppgids[1]; - rec->rn_fid1 = op_data->op_fid1; - rec->rn_fid2 = op_data->op_fid2; - rec->rn_time = op_data->op_mod_time; - rec->rn_mode = op_data->op_mode; - rec->rn_bias = op_data->op_bias; - - mdc_pack_name(req, &RMF_NAME, old, oldlen); - - if (new) - mdc_pack_name(req, &RMF_SYMTGT, new, newlen); - - if (op_data->op_cli_flags & CLI_MIGRATE && - op_data->op_bias & MDS_RENAME_MIGRATE) { - struct mdt_ioepoch *epoch; - - mdc_intent_close_pack(req, op_data); - epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); - mdc_ioepoch_pack(epoch, op_data); - } -} - -void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags, - struct md_op_data *op_data, size_t ea_size) -{ - struct mdt_body *b = req_capsule_client_get(&req->rq_pill, - &RMF_MDT_BODY); - - b->mbo_valid = valid; - if (op_data->op_bias & MDS_CHECK_SPLIT) - b->mbo_valid |= OBD_MD_FLCKSPLIT; - if (op_data->op_bias & MDS_CROSS_REF) - b->mbo_valid |= OBD_MD_FLCROSSREF; - b->mbo_eadatasize = ea_size; - b->mbo_flags = flags; - __mdc_pack_body(b, op_data->op_suppgids[0]); - - b->mbo_fid1 = op_data->op_fid1; - b->mbo_fid2 = op_data->op_fid2; - b->mbo_valid |= OBD_MD_FLID; - - if (op_data->op_name) - mdc_pack_name(req, &RMF_NAME, op_data->op_name, - op_data->op_namelen); -} - -void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data) -{ - struct mdt_ioepoch *epoch; - struct mdt_rec_setattr *rec; - - epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - - mdc_setattr_pack_rec(rec, op_data); - /* - * The client will zero out local timestamps when losing the IBITS lock - * so any new RPC timestamps will update the client inode's timestamps. - * There was a defect on the server side which allowed the atime to be - * overwritten by a zeroed-out atime packed into the close RPC. - * - * Proactively clear the MDS_ATTR_ATIME flag in the RPC in this case - * to avoid zeroing the atime on old unpatched servers. See LU-8041. - */ - if (rec->sa_atime == 0) - rec->sa_valid &= ~MDS_ATTR_ATIME; - - mdc_ioepoch_pack(epoch, op_data); - mdc_intent_close_pack(req, op_data); -} diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c deleted file mode 100644 index a8aa0fa5e87a..000000000000 --- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c +++ /dev/null @@ -1,1239 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_MDC - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mdc_internal.h" - -struct mdc_getattr_args { - struct obd_export *ga_exp; - struct md_enqueue_info *ga_minfo; -}; - -int it_open_error(int phase, struct lookup_intent *it) -{ - if (it_disposition(it, DISP_OPEN_LEASE)) { - if (phase >= DISP_OPEN_LEASE) - return it->it_status; - else - return 0; - } - if (it_disposition(it, DISP_OPEN_OPEN)) { - if (phase >= DISP_OPEN_OPEN) - return it->it_status; - else - return 0; - } - - if (it_disposition(it, DISP_OPEN_CREATE)) { - if (phase >= DISP_OPEN_CREATE) - return it->it_status; - else - return 0; - } - - if (it_disposition(it, DISP_LOOKUP_EXECD)) { - if (phase >= DISP_LOOKUP_EXECD) - return it->it_status; - else - return 0; - } - - if (it_disposition(it, DISP_IT_EXECD)) { - if (phase >= DISP_IT_EXECD) - return it->it_status; - else - return 0; - } - CERROR("it disp: %X, status: %d\n", it->it_disposition, - it->it_status); - LBUG(); - return 0; -} -EXPORT_SYMBOL(it_open_error); - -/* this must be called on a lockh that is known to have a referenced lock */ -int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh, - void *data, __u64 *bits) -{ - struct ldlm_lock *lock; - struct inode *new_inode = data; - - if (bits) - *bits = 0; - - if (!lustre_handle_is_used(lockh)) - return 0; - - lock = ldlm_handle2lock(lockh); - - LASSERT(lock); - lock_res_and_lock(lock); - if (lock->l_resource->lr_lvb_inode && - lock->l_resource->lr_lvb_inode != data) { - struct inode *old_inode = lock->l_resource->lr_lvb_inode; - - LASSERTF(old_inode->i_state & I_FREEING, - "Found existing inode %p/%lu/%u state %lu in lock: setting data to %p/%lu/%u\n", - old_inode, old_inode->i_ino, old_inode->i_generation, - old_inode->i_state, new_inode, new_inode->i_ino, - new_inode->i_generation); - } - lock->l_resource->lr_lvb_inode = new_inode; - if (bits) - *bits = lock->l_policy_data.l_inodebits.bits; - - unlock_res_and_lock(lock); - LDLM_LOCK_PUT(lock); - - return 0; -} - -enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, enum ldlm_type type, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - struct lustre_handle *lockh) -{ - struct ldlm_res_id res_id; - enum ldlm_mode rc; - - fid_build_reg_res_name(fid, &res_id); - /* LU-4405: Clear bits not supported by server */ - policy->l_inodebits.bits &= exp_connect_ibits(exp); - rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags, - &res_id, type, policy, mode, lockh, 0); - return rc; -} - -int mdc_cancel_unused(struct obd_export *exp, - const struct lu_fid *fid, - union ldlm_policy_data *policy, - enum ldlm_mode mode, - enum ldlm_cancel_flags flags, - void *opaque) -{ - struct ldlm_res_id res_id; - struct obd_device *obd = class_exp2obd(exp); - int rc; - - fid_build_reg_res_name(fid, &res_id); - rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id, - policy, mode, flags, opaque); - return rc; -} - -int mdc_null_inode(struct obd_export *exp, - const struct lu_fid *fid) -{ - struct ldlm_res_id res_id; - struct ldlm_resource *res; - struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace; - - LASSERTF(ns, "no namespace passed\n"); - - fid_build_reg_res_name(fid, &res_id); - - res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (IS_ERR(res)) - return 0; - - lock_res(res); - res->lr_lvb_inode = NULL; - unlock_res(res); - - ldlm_resource_putref(res); - return 0; -} - -static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) -{ - /* Don't hold error requests for replay. */ - if (req->rq_replay) { - spin_lock(&req->rq_lock); - req->rq_replay = 0; - spin_unlock(&req->rq_lock); - } - if (rc && req->rq_transno != 0) { - DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc); - LBUG(); - } -} - -/* Save a large LOV EA into the request buffer so that it is available - * for replay. We don't do this in the initial request because the - * original request doesn't need this buffer (at most it sends just the - * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty - * buffer and may also be difficult to allocate and save a very large - * request buffer for each open. (bug 5707) - * - * OOM here may cause recovery failure if lmm is needed (only for the - * original open if the MDS crashed just when this client also OOM'd) - * but this is incredibly unlikely, and questionable whether the client - * could do MDS recovery under OOM anyways... - */ -static void mdc_realloc_openmsg(struct ptlrpc_request *req, - struct mdt_body *body) -{ - int rc; - - /* FIXME: remove this explicit offset. */ - rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4, - body->mbo_eadatasize); - if (rc) { - CERROR("Can't enlarge segment %d size to %d\n", - DLM_INTENT_REC_OFF + 4, body->mbo_eadatasize); - body->mbo_valid &= ~OBD_MD_FLEASIZE; - body->mbo_eadatasize = 0; - } -} - -static struct ptlrpc_request * -mdc_intent_open_pack(struct obd_export *exp, struct lookup_intent *it, - struct md_op_data *op_data) -{ - struct ptlrpc_request *req; - struct obd_device *obddev = class_exp2obd(exp); - struct ldlm_intent *lit; - const void *lmm = op_data->op_data; - u32 lmmsize = op_data->op_data_size; - LIST_HEAD(cancels); - int count = 0; - int mode; - int rc; - - it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG; - - /* XXX: openlock is not cancelled for cross-refs. */ - /* If inode is known, cancel conflicting OPEN locks. */ - if (fid_is_sane(&op_data->op_fid2)) { - if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */ - if (it->it_flags & FMODE_WRITE) - mode = LCK_EX; - else - mode = LCK_PR; - } else { - if (it->it_flags & (FMODE_WRITE | MDS_OPEN_TRUNC)) - mode = LCK_CW; - else if (it->it_flags & __FMODE_EXEC) - mode = LCK_PR; - else - mode = LCK_CR; - } - count = mdc_resource_get_unused(exp, &op_data->op_fid2, - &cancels, mode, - MDS_INODELOCK_OPEN); - } - - /* If CREATE, cancel parent's UPDATE lock. */ - if (it->it_op & IT_CREAT) - mode = LCK_EX; - else - mode = LCK_CR; - count += mdc_resource_get_unused(exp, &op_data->op_fid1, - &cancels, mode, - MDS_INODELOCK_UPDATE); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_OPEN); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return ERR_PTR(-ENOMEM); - } - - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, - max(lmmsize, obddev->u.cli.cl_default_mds_easize)); - - rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); - if (rc < 0) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - spin_lock(&req->rq_lock); - req->rq_replay = req->rq_import->imp_replayable; - spin_unlock(&req->rq_lock); - - /* pack the intent */ - lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); - lit->opc = (__u64)it->it_op; - - /* pack the intended request */ - mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm, - lmmsize); - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obddev->u.cli.cl_max_mds_easize); - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - req->rq_import->imp_connect_data.ocd_max_easize); - - ptlrpc_request_set_replen(req); - return req; -} - -#define GA_DEFAULT_EA_NAME_LEN 20 -#define GA_DEFAULT_EA_VAL_LEN 250 -#define GA_DEFAULT_EA_NUM 10 - -static struct ptlrpc_request * -mdc_intent_getxattr_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data) -{ - struct ptlrpc_request *req; - struct ldlm_intent *lit; - int rc, count = 0; - LIST_HEAD(cancels); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_GETXATTR); - if (!req) - return ERR_PTR(-ENOMEM); - - rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - /* pack the intent */ - lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); - lit->opc = IT_GETXATTR; - - /* pack the intended request */ - mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, - GA_DEFAULT_EA_NAME_LEN * GA_DEFAULT_EA_NUM, -1, 0); - - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, - GA_DEFAULT_EA_NAME_LEN * GA_DEFAULT_EA_NUM); - - req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, RCL_SERVER, - GA_DEFAULT_EA_NAME_LEN * GA_DEFAULT_EA_NUM); - - req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS, RCL_SERVER, - sizeof(u32) * GA_DEFAULT_EA_NUM); - - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0); - - ptlrpc_request_set_replen(req); - - return req; -} - -static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data) -{ - struct ptlrpc_request *req; - struct obd_device *obddev = class_exp2obd(exp); - struct ldlm_intent *lit; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_UNLINK); - if (!req) - return ERR_PTR(-ENOMEM); - - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - - rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); - if (rc) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - /* pack the intent */ - lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); - lit->opc = (__u64)it->it_op; - - /* pack the intended request */ - mdc_unlink_pack(req, op_data); - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obddev->u.cli.cl_default_mds_easize); - ptlrpc_request_set_replen(req); - return req; -} - -static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data) -{ - struct ptlrpc_request *req; - struct obd_device *obddev = class_exp2obd(exp); - u64 valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE | - OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA | - OBD_MD_MEA | OBD_MD_FLACL; - struct ldlm_intent *lit; - int rc; - u32 easize; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_GETATTR); - if (!req) - return ERR_PTR(-ENOMEM); - - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - - rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); - if (rc) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - /* pack the intent */ - lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); - lit->opc = (__u64)it->it_op; - - if (obddev->u.cli.cl_default_mds_easize > 0) - easize = obddev->u.cli.cl_default_mds_easize; - else - easize = obddev->u.cli.cl_max_mds_easize; - - /* pack the intended request */ - mdc_getattr_pack(req, valid, it->it_flags, op_data, easize); - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize); - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - req->rq_import->imp_connect_data.ocd_max_easize); - ptlrpc_request_set_replen(req); - return req; -} - -static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *unused) -{ - struct obd_device *obd = class_exp2obd(exp); - struct ptlrpc_request *req; - struct ldlm_intent *lit; - struct layout_intent *layout; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_LAYOUT); - if (!req) - return ERR_PTR(-ENOMEM); - - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0); - rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); - if (rc) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - /* pack the intent */ - lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); - lit->opc = (__u64)it->it_op; - - /* pack the layout intent request */ - layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT); - /* LAYOUT_INTENT_ACCESS is generic, specific operation will be - * set for replication - */ - layout->li_opc = LAYOUT_INTENT_ACCESS; - - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, - obd->u.cli.cl_default_mds_easize); - ptlrpc_request_set_replen(req); - return req; -} - -static struct ptlrpc_request * -mdc_enqueue_pack(struct obd_export *exp, int lvb_len) -{ - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); - if (!req) - return ERR_PTR(-ENOMEM); - - rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); - if (rc) { - ptlrpc_request_free(req); - return ERR_PTR(rc); - } - - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len); - ptlrpc_request_set_replen(req); - return req; -} - -static int mdc_finish_enqueue(struct obd_export *exp, - struct ptlrpc_request *req, - struct ldlm_enqueue_info *einfo, - struct lookup_intent *it, - struct lustre_handle *lockh, - int rc) -{ - struct req_capsule *pill = &req->rq_pill; - struct ldlm_request *lockreq; - struct ldlm_reply *lockrep; - struct ldlm_lock *lock; - void *lvb_data = NULL; - u32 lvb_len = 0; - - LASSERT(rc >= 0); - /* Similarly, if we're going to replay this request, we don't want to - * actually get a lock, just perform the intent. - */ - if (req->rq_transno || req->rq_replay) { - lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ); - lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY); - } - - if (rc == ELDLM_LOCK_ABORTED) { - einfo->ei_mode = 0; - memset(lockh, 0, sizeof(*lockh)); - rc = 0; - } else { /* rc = 0 */ - lock = ldlm_handle2lock(lockh); - - /* If the server gave us back a different lock mode, we should - * fix up our variables. - */ - if (lock->l_req_mode != einfo->ei_mode) { - ldlm_lock_addref(lockh, lock->l_req_mode); - ldlm_lock_decref(lockh, einfo->ei_mode); - einfo->ei_mode = lock->l_req_mode; - } - LDLM_LOCK_PUT(lock); - } - - lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); - - it->it_disposition = (int)lockrep->lock_policy_res1; - it->it_status = (int)lockrep->lock_policy_res2; - it->it_lock_mode = einfo->ei_mode; - it->it_lock_handle = lockh->cookie; - it->it_request = req; - - /* Technically speaking rq_transno must already be zero if - * it_status is in error, so the check is a bit redundant - */ - if ((!req->rq_transno || it->it_status < 0) && req->rq_replay) - mdc_clear_replay_flag(req, it->it_status); - - /* If we're doing an IT_OPEN which did not result in an actual - * successful open, then we need to remove the bit which saves - * this request for unconditional replay. - * - * It's important that we do this first! Otherwise we might exit the - * function without doing so, and try to replay a failed create - * (bug 3440) - */ - if (it->it_op & IT_OPEN && req->rq_replay && - (!it_disposition(it, DISP_OPEN_OPEN) || it->it_status != 0)) - mdc_clear_replay_flag(req, it->it_status); - - DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d", - it->it_op, it->it_disposition, it->it_status); - - /* We know what to expect, so we do any byte flipping required here */ - if (it_has_reply_body(it)) { - struct mdt_body *body; - - body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (!body) { - CERROR("Can't swab mdt_body\n"); - return -EPROTO; - } - - if (it_disposition(it, DISP_OPEN_OPEN) && - !it_open_error(DISP_OPEN_OPEN, it)) { - /* - * If this is a successful OPEN request, we need to set - * replay handler and data early, so that if replay - * happens immediately after swabbing below, new reply - * is swabbed by that handler correctly. - */ - mdc_set_open_replay_data(NULL, NULL, it); - } - - if ((body->mbo_valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) { - void *eadata; - - mdc_update_max_ea_from_body(exp, body); - - /* - * The eadata is opaque; just check that it is there. - * Eventually, obd_unpackmd() will check the contents. - */ - eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, - body->mbo_eadatasize); - if (!eadata) - return -EPROTO; - - /* save lvb data and length in case this is for layout - * lock - */ - lvb_data = eadata; - lvb_len = body->mbo_eadatasize; - - /* - * We save the reply LOV EA in case we have to replay a - * create for recovery. If we didn't allocate a large - * enough request buffer above we need to reallocate it - * here to hold the actual LOV EA. - * - * To not save LOV EA if request is not going to replay - * (for example error one). - */ - if ((it->it_op & IT_OPEN) && req->rq_replay) { - void *lmm; - - if (req_capsule_get_size(pill, &RMF_EADATA, - RCL_CLIENT) < - body->mbo_eadatasize) - mdc_realloc_openmsg(req, body); - else - req_capsule_shrink(pill, &RMF_EADATA, - body->mbo_eadatasize, - RCL_CLIENT); - - req_capsule_set_size(pill, &RMF_EADATA, - RCL_CLIENT, - body->mbo_eadatasize); - - lmm = req_capsule_client_get(pill, &RMF_EADATA); - if (lmm) - memcpy(lmm, eadata, body->mbo_eadatasize); - } - } - } else if (it->it_op & IT_LAYOUT) { - /* maybe the lock was granted right away and layout - * is packed into RMF_DLM_LVB of req - */ - lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER); - if (lvb_len > 0) { - lvb_data = req_capsule_server_sized_get(pill, - &RMF_DLM_LVB, - lvb_len); - if (!lvb_data) - return -EPROTO; - } - } - - /* fill in stripe data for layout lock */ - lock = ldlm_handle2lock(lockh); - if (lock && ldlm_has_layout(lock) && lvb_data) { - void *lmm; - - LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d", - ldlm_it2str(it->it_op), lvb_len); - - lmm = kvzalloc(lvb_len, GFP_NOFS); - if (!lmm) { - LDLM_LOCK_PUT(lock); - return -ENOMEM; - } - memcpy(lmm, lvb_data, lvb_len); - - /* install lvb_data */ - lock_res_and_lock(lock); - if (!lock->l_lvb_data) { - lock->l_lvb_type = LVB_T_LAYOUT; - lock->l_lvb_data = lmm; - lock->l_lvb_len = lvb_len; - lmm = NULL; - } - unlock_res_and_lock(lock); - if (lmm) - kvfree(lmm); - } - if (lock) - LDLM_LOCK_PUT(lock); - - return rc; -} - -/* We always reserve enough space in the reply packet for a stripe MD, because - * we don't know in advance the file type. - */ -int mdc_enqueue_base(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const union ldlm_policy_data *policy, - struct lookup_intent *it, struct md_op_data *op_data, - struct lustre_handle *lockh, u64 extra_lock_flags) -{ - static const union ldlm_policy_data lookup_policy = { - .l_inodebits = { MDS_INODELOCK_LOOKUP } - }; - static const union ldlm_policy_data update_policy = { - .l_inodebits = { MDS_INODELOCK_UPDATE } - }; - static const union ldlm_policy_data layout_policy = { - .l_inodebits = { MDS_INODELOCK_LAYOUT } - }; - static const union ldlm_policy_data getxattr_policy = { - .l_inodebits = { MDS_INODELOCK_XATTR } - }; - struct obd_device *obddev = class_exp2obd(exp); - struct ptlrpc_request *req = NULL; - u64 flags, saved_flags = extra_lock_flags; - struct ldlm_res_id res_id; - int generation, resends = 0; - struct ldlm_reply *lockrep; - enum lvb_type lvb_type = LVB_T_NONE; - int rc; - - LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n", - einfo->ei_type); - fid_build_reg_res_name(&op_data->op_fid1, &res_id); - - if (it) { - LASSERT(!policy); - - saved_flags |= LDLM_FL_HAS_INTENT; - if (it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR)) - policy = &update_policy; - else if (it->it_op & IT_LAYOUT) - policy = &layout_policy; - else if (it->it_op & (IT_GETXATTR | IT_SETXATTR)) - policy = &getxattr_policy; - else - policy = &lookup_policy; - } - - generation = obddev->u.cli.cl_import->imp_generation; -resend: - flags = saved_flags; - if (!it) { - /* The only way right now is FLOCK. */ - LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", - einfo->ei_type); - res_id.name[3] = LDLM_FLOCK; - } else if (it->it_op & IT_OPEN) { - req = mdc_intent_open_pack(exp, it, op_data); - } else if (it->it_op & IT_UNLINK) { - req = mdc_intent_unlink_pack(exp, it, op_data); - } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) { - req = mdc_intent_getattr_pack(exp, it, op_data); - } else if (it->it_op & IT_READDIR) { - req = mdc_enqueue_pack(exp, 0); - } else if (it->it_op & IT_LAYOUT) { - if (!imp_connect_lvb_type(class_exp2cliimp(exp))) - return -EOPNOTSUPP; - req = mdc_intent_layout_pack(exp, it, op_data); - lvb_type = LVB_T_LAYOUT; - } else if (it->it_op & IT_GETXATTR) { - req = mdc_intent_getxattr_pack(exp, it, op_data); - } else { - LBUG(); - return -EINVAL; - } - - if (IS_ERR(req)) - return PTR_ERR(req); - - if (resends) { - req->rq_generation_set = 1; - req->rq_import_generation = generation; - req->rq_sent = ktime_get_real_seconds() + resends; - } - - /* It is important to obtain modify RPC slot first (if applicable), so - * that threads that are waiting for a modify RPC slot are not polluting - * our rpcs in flight counter. - * We do not do flock request limiting, though - */ - if (it) { - mdc_get_mod_rpc_slot(req, it); - rc = obd_get_request_slot(&obddev->u.cli); - if (rc != 0) { - mdc_put_mod_rpc_slot(req, it); - mdc_clear_replay_flag(req, 0); - ptlrpc_req_finished(req); - return rc; - } - } - - rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL, - 0, lvb_type, lockh, 0); - if (!it) { - /* For flock requests we immediately return without further - * delay and let caller deal with the rest, since rest of - * this function metadata processing makes no sense for flock - * requests anyway. But in case of problem during comms with - * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we - * can not rely on caller and this mainly for F_UNLCKs - * (explicits or automatically generated by Kernel to clean - * current FLocks upon exit) that can't be trashed - */ - if (((rc == -EINTR) || (rc == -ETIMEDOUT)) && - (einfo->ei_type == LDLM_FLOCK) && - (einfo->ei_mode == LCK_NL)) - goto resend; - return rc; - } - - obd_put_request_slot(&obddev->u.cli); - mdc_put_mod_rpc_slot(req, it); - - if (rc < 0) { - CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n", - obddev->obd_name, rc); - - mdc_clear_replay_flag(req, rc); - ptlrpc_req_finished(req); - return rc; - } - - lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - - lockrep->lock_policy_res2 = - ptlrpc_status_ntoh(lockrep->lock_policy_res2); - - /* - * Retry infinitely when the server returns -EINPROGRESS for the - * intent operation, when server returns -EINPROGRESS for acquiring - * intent lock, we'll retry in after_reply(). - */ - if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) { - mdc_clear_replay_flag(req, rc); - ptlrpc_req_finished(req); - resends++; - - CDEBUG(D_HA, "%s: resend:%d op:%d " DFID "/" DFID "\n", - obddev->obd_name, resends, it->it_op, - PFID(&op_data->op_fid1), PFID(&op_data->op_fid2)); - - if (generation == obddev->u.cli.cl_import->imp_generation) { - goto resend; - } else { - CDEBUG(D_HA, "resend cross eviction\n"); - return -EIO; - } - } - - rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); - if (rc < 0) { - if (lustre_handle_is_used(lockh)) { - ldlm_lock_decref(lockh, einfo->ei_mode); - memset(lockh, 0, sizeof(*lockh)); - } - ptlrpc_req_finished(req); - - it->it_lock_handle = 0; - it->it_lock_mode = 0; - it->it_request = NULL; - } - - return rc; -} - -int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - const union ldlm_policy_data *policy, - struct md_op_data *op_data, - struct lustre_handle *lockh, u64 extra_lock_flags) -{ - return mdc_enqueue_base(exp, einfo, policy, NULL, - op_data, lockh, extra_lock_flags); -} - -static int mdc_finish_intent_lock(struct obd_export *exp, - struct ptlrpc_request *request, - struct md_op_data *op_data, - struct lookup_intent *it, - struct lustre_handle *lockh) -{ - struct lustre_handle old_lock; - struct ldlm_lock *lock; - int rc = 0; - - LASSERT(request != LP_POISON); - LASSERT(request->rq_repmsg != LP_POISON); - - if (it->it_op & IT_READDIR) - return 0; - - if (it->it_op & (IT_GETXATTR | IT_LAYOUT)) { - if (it->it_status != 0) { - rc = it->it_status; - goto out; - } - goto matching_lock; - } - - if (!it_disposition(it, DISP_IT_EXECD)) { - /* The server failed before it even started executing the - * intent, i.e. because it couldn't unpack the request. - */ - LASSERT(it->it_status != 0); - rc = it->it_status; - goto out; - } - - rc = it_open_error(DISP_IT_EXECD, it); - if (rc) - goto out; - - rc = it_open_error(DISP_LOOKUP_EXECD, it); - if (rc) - goto out; - - /* keep requests around for the multiple phases of the call - * this shows the DISP_XX must guarantee we make it into the call - */ - if (!it_disposition(it, DISP_ENQ_CREATE_REF) && - it_disposition(it, DISP_OPEN_CREATE) && - !it_open_error(DISP_OPEN_CREATE, it)) { - it_set_disposition(it, DISP_ENQ_CREATE_REF); - ptlrpc_request_addref(request); /* balanced in ll_create_node */ - } - if (!it_disposition(it, DISP_ENQ_OPEN_REF) && - it_disposition(it, DISP_OPEN_OPEN) && - !it_open_error(DISP_OPEN_OPEN, it)) { - it_set_disposition(it, DISP_ENQ_OPEN_REF); - ptlrpc_request_addref(request); /* balanced in ll_file_open */ - /* BUG 11546 - eviction in the middle of open rpc processing */ - OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout); - } - - if (it->it_op & IT_CREAT) - /* XXX this belongs in ll_create_it */ - ; - else if (it->it_op == IT_OPEN) - LASSERT(!it_disposition(it, DISP_OPEN_CREATE)); - else - LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP)); - -matching_lock: - /* If we already have a matching lock, then cancel the new - * one. We have to set the data here instead of in - * mdc_enqueue, because we need to use the child's inode as - * the l_ast_data to match, and that's not available until - * intent_finish has performed the iget().) - */ - lock = ldlm_handle2lock(lockh); - if (lock) { - union ldlm_policy_data policy = lock->l_policy_data; - - LDLM_DEBUG(lock, "matching against this"); - - if (it_has_reply_body(it)) { - struct mdt_body *body; - - body = req_capsule_server_get(&request->rq_pill, - &RMF_MDT_BODY); - - /* mdc_enqueue checked */ - LASSERT(body); - LASSERTF(fid_res_name_eq(&body->mbo_fid1, - &lock->l_resource->lr_name), - "Lock res_id: " DLDLMRES ", fid: " DFID "\n", - PLDLMRES(lock->l_resource), - PFID(&body->mbo_fid1)); - } - LDLM_LOCK_PUT(lock); - - memcpy(&old_lock, lockh, sizeof(*lockh)); - if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL, - LDLM_IBITS, &policy, LCK_NL, - &old_lock, 0)) { - ldlm_lock_decref_and_cancel(lockh, - it->it_lock_mode); - memcpy(lockh, &old_lock, sizeof(old_lock)); - it->it_lock_handle = lockh->cookie; - } - } -out: - CDEBUG(D_DENTRY, - "D_IT dentry %.*s intent: %s status %d disp %x rc %d\n", - (int)op_data->op_namelen, op_data->op_name, - ldlm_it2str(it->it_op), it->it_status, it->it_disposition, rc); - return rc; -} - -int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, - struct lu_fid *fid, __u64 *bits) -{ - /* We could just return 1 immediately, but since we should only - * be called in revalidate_it if we already have a lock, let's - * verify that. - */ - struct ldlm_res_id res_id; - struct lustre_handle lockh; - union ldlm_policy_data policy; - enum ldlm_mode mode; - - if (it->it_lock_handle) { - lockh.cookie = it->it_lock_handle; - mode = ldlm_revalidate_lock_handle(&lockh, bits); - } else { - fid_build_reg_res_name(fid, &res_id); - switch (it->it_op) { - case IT_GETATTR: - /* File attributes are held under multiple bits: - * nlink is under lookup lock, size and times are - * under UPDATE lock and recently we've also got - * a separate permissions lock for owner/group/acl that - * were protected by lookup lock before. - * Getattr must provide all of that information, - * so we need to ensure we have all of those locks. - * Unfortunately, if the bits are split across multiple - * locks, there's no easy way to match all of them here, - * so an extra RPC would be performed to fetch all - * of those bits at once for now. - */ - /* For new MDTs(> 2.4), UPDATE|PERM should be enough, - * but for old MDTs (< 2.4), permission is covered - * by LOOKUP lock, so it needs to match all bits here. - */ - policy.l_inodebits.bits = MDS_INODELOCK_UPDATE | - MDS_INODELOCK_LOOKUP | - MDS_INODELOCK_PERM; - break; - case IT_READDIR: - policy.l_inodebits.bits = MDS_INODELOCK_UPDATE; - break; - case IT_LAYOUT: - policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT; - break; - default: - policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP; - break; - } - - mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid, - LDLM_IBITS, &policy, - LCK_CR | LCK_CW | LCK_PR | LCK_PW, - &lockh); - } - - if (mode) { - it->it_lock_handle = lockh.cookie; - it->it_lock_mode = mode; - } else { - it->it_lock_handle = 0; - it->it_lock_mode = 0; - } - - return !!mode; -} - -/* - * This long block is all about fixing up the lock and request state - * so that it is correct as of the moment _before_ the operation was - * applied; that way, the VFS will think that everything is normal and - * call Lustre's regular VFS methods. - * - * If we're performing a creation, that means that unless the creation - * failed with EEXIST, we should fake up a negative dentry. - * - * For everything else, we want the lookup to succeed. - * - * One additional note: if CREATE or OPEN succeeded, we add an extra - * reference to the request because we need to keep it around until - * ll_create/ll_open gets called. - * - * The server will return to us, in it_disposition, an indication of - * exactly what it_status refers to. - * - * If DISP_OPEN_OPEN is set, then it_status refers to the open() call, - * otherwise if DISP_OPEN_CREATE is set, then it_status is the - * creation failure mode. In either case, one of DISP_LOOKUP_NEG or - * DISP_LOOKUP_POS will be set, indicating whether the child lookup - * was successful. - * - * Else, if DISP_LOOKUP_EXECD then it_status is the rc of the - * child lookup. - */ -int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, - struct lookup_intent *it, struct ptlrpc_request **reqp, - ldlm_blocking_callback cb_blocking, __u64 extra_lock_flags) -{ - struct ldlm_enqueue_info einfo = { - .ei_type = LDLM_IBITS, - .ei_mode = it_to_lock_mode(it), - .ei_cb_bl = cb_blocking, - .ei_cb_cp = ldlm_completion_ast, - }; - struct lustre_handle lockh; - int rc = 0; - - LASSERT(it); - - CDEBUG(D_DLMTRACE, "(name: %.*s," DFID ") in obj " DFID - ", intent: %s flags %#Lo\n", (int)op_data->op_namelen, - op_data->op_name, PFID(&op_data->op_fid2), - PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), - it->it_flags); - - lockh.cookie = 0; - if (fid_is_sane(&op_data->op_fid2) && - (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_READDIR))) { - /* We could just return 1 immediately, but since we should only - * be called in revalidate_it if we already have a lock, let's - * verify that. - */ - it->it_lock_handle = 0; - rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL); - /* Only return failure if it was not GETATTR by cfid - * (from inode_revalidate) - */ - if (rc || op_data->op_namelen != 0) - return rc; - } - - /* For case if upper layer did not alloc fid, do it now. */ - if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) { - rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); - if (rc < 0) { - CERROR("Can't alloc new fid, rc %d\n", rc); - return rc; - } - } - - rc = mdc_enqueue_base(exp, &einfo, NULL, it, op_data, &lockh, - extra_lock_flags); - if (rc < 0) - return rc; - - *reqp = it->it_request; - rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh); - return rc; -} - -static int mdc_intent_getattr_async_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void *args, int rc) -{ - struct mdc_getattr_args *ga = args; - struct obd_export *exp = ga->ga_exp; - struct md_enqueue_info *minfo = ga->ga_minfo; - struct ldlm_enqueue_info *einfo = &minfo->mi_einfo; - struct lookup_intent *it; - struct lustre_handle *lockh; - struct obd_device *obddev; - struct ldlm_reply *lockrep; - __u64 flags = LDLM_FL_HAS_INTENT; - - it = &minfo->mi_it; - lockh = &minfo->mi_lockh; - - obddev = class_exp2obd(exp); - - obd_put_request_slot(&obddev->u.cli); - if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE)) - rc = -ETIMEDOUT; - - rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode, - &flags, NULL, 0, lockh, rc); - if (rc < 0) { - CERROR("ldlm_cli_enqueue_fini: %d\n", rc); - mdc_clear_replay_flag(req, rc); - goto out; - } - - lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - - lockrep->lock_policy_res2 = - ptlrpc_status_ntoh(lockrep->lock_policy_res2); - - rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); - if (rc) - goto out; - - rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh); - -out: - minfo->mi_cb(req, minfo, rc); - return 0; -} - -int mdc_intent_getattr_async(struct obd_export *exp, - struct md_enqueue_info *minfo) -{ - struct md_op_data *op_data = &minfo->mi_data; - struct lookup_intent *it = &minfo->mi_it; - struct ptlrpc_request *req; - struct mdc_getattr_args *ga; - struct obd_device *obddev = class_exp2obd(exp); - struct ldlm_res_id res_id; - union ldlm_policy_data policy = { - .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE } - }; - int rc = 0; - __u64 flags = LDLM_FL_HAS_INTENT; - - CDEBUG(D_DLMTRACE, - "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n", - (int)op_data->op_namelen, op_data->op_name, - PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), it->it_flags); - - fid_build_reg_res_name(&op_data->op_fid1, &res_id); - req = mdc_intent_getattr_pack(exp, it, op_data); - if (IS_ERR(req)) - return PTR_ERR(req); - - rc = obd_get_request_slot(&obddev->u.cli); - if (rc != 0) { - ptlrpc_req_finished(req); - return rc; - } - - rc = ldlm_cli_enqueue(exp, &req, &minfo->mi_einfo, &res_id, &policy, - &flags, NULL, 0, LVB_T_NONE, &minfo->mi_lockh, 1); - if (rc < 0) { - obd_put_request_slot(&obddev->u.cli); - ptlrpc_req_finished(req); - return rc; - } - - BUILD_BUG_ON(sizeof(*ga) > sizeof(req->rq_async_args)); - ga = ptlrpc_req_async_args(req); - ga->ga_exp = exp; - ga->ga_minfo = minfo; - - req->rq_interpret_reply = mdc_intent_getattr_async_interpret; - ptlrpcd_add_req(req); - - return 0; -} diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c deleted file mode 100644 index e77c00df0693..000000000000 --- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c +++ /dev/null @@ -1,421 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_MDC - -# include -# include - -#include -#include "mdc_internal.h" -#include - -/* mdc_setattr does its own semaphore handling */ -static int mdc_reint(struct ptlrpc_request *request, int level) -{ - int rc; - - request->rq_send_state = level; - - mdc_get_mod_rpc_slot(request, NULL); - rc = ptlrpc_queue_wait(request); - mdc_put_mod_rpc_slot(request, NULL); - if (rc) - CDEBUG(D_INFO, "error in handling %d\n", rc); - else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY)) - rc = -EPROTO; - - return rc; -} - -/* Find and cancel locally locks matched by inode @bits & @mode in the resource - * found by @fid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. - */ -int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, enum ldlm_mode mode, - __u64 bits) -{ - struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; - union ldlm_policy_data policy = {}; - struct ldlm_res_id res_id; - struct ldlm_resource *res; - int count; - - /* Return, i.e. cancel nothing, only if ELC is supported (flag in - * export) but disabled through procfs (flag in NS). - * - * This distinguishes from a case when ELC is not supported originally, - * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. - */ - if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) - return 0; - - fid_build_reg_res_name(fid, &res_id); - res = ldlm_resource_get(exp->exp_obd->obd_namespace, - NULL, &res_id, 0, 0); - if (IS_ERR(res)) - return 0; - LDLM_RESOURCE_ADDREF(res); - /* Initialize ibits lock policy. */ - policy.l_inodebits.bits = bits; - count = ldlm_cancel_resource_local(res, cancels, &policy, - mode, 0, 0, NULL); - LDLM_RESOURCE_DELREF(res); - ldlm_resource_putref(res); - return count; -} - -int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, - void *ea, size_t ealen, struct ptlrpc_request **request) -{ - LIST_HEAD(cancels); - struct ptlrpc_request *req; - int count = 0, rc; - __u64 bits; - - bits = MDS_INODELOCK_UPDATE; - if (op_data->op_attr.ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) - bits |= MDS_INODELOCK_LOOKUP; - if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && - (fid_is_sane(&op_data->op_fid1))) - count = mdc_resource_get_unused(exp, &op_data->op_fid1, - &cancels, LCK_EX, bits); - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_REINT_SETATTR); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen); - req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0); - - rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME)) - CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n", - LTIME_S(op_data->op_attr.ia_mtime), - LTIME_S(op_data->op_attr.ia_ctime)); - mdc_setattr_pack(req, op_data, ea, ealen); - - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - req->rq_import->imp_connect_data.ocd_max_easize); - ptlrpc_request_set_replen(req); - - rc = mdc_reint(req, LUSTRE_IMP_FULL); - - if (rc == -ERESTARTSYS) - rc = 0; - - *request = req; - - return rc; -} - -int mdc_create(struct obd_export *exp, struct md_op_data *op_data, - const void *data, size_t datalen, umode_t mode, - uid_t uid, gid_t gid, kernel_cap_t cap_effective, - __u64 rdev, struct ptlrpc_request **request) -{ - struct ptlrpc_request *req; - int level, rc; - int count, resends = 0; - struct obd_import *import = exp->exp_obd->u.cli.cl_import; - int generation = import->imp_generation; - LIST_HEAD(cancels); - - /* For case if upper layer did not alloc fid, do it now. */ - if (!fid_is_sane(&op_data->op_fid2)) { - /* - * mdc_fid_alloc() may return errno 1 in case of switch to new - * sequence, handle this. - */ - rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); - if (rc < 0) - return rc; - } - -rebuild: - count = 0; - if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && - (fid_is_sane(&op_data->op_fid1))) - count = mdc_resource_get_unused(exp, &op_data->op_fid1, - &cancels, LCK_EX, - MDS_INODELOCK_UPDATE); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_REINT_CREATE_ACL); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, - data && datalen ? datalen : 0); - - rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - /* - * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with - * tgt, for symlinks or lov MD data. - */ - mdc_create_pack(req, op_data, data, datalen, mode, uid, - gid, cap_effective, rdev); - - ptlrpc_request_set_replen(req); - - /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry - * logic here - */ - req->rq_no_retry_einprogress = 1; - - if (resends) { - req->rq_generation_set = 1; - req->rq_import_generation = generation; - req->rq_sent = ktime_get_real_seconds() + resends; - } - level = LUSTRE_IMP_FULL; - resend: - rc = mdc_reint(req, level); - - /* Resend if we were told to. */ - if (rc == -ERESTARTSYS) { - level = LUSTRE_IMP_RECOVER; - goto resend; - } else if (rc == -EINPROGRESS) { - /* Retry create infinitely until succeed or get other - * error code. - */ - ptlrpc_req_finished(req); - resends++; - - CDEBUG(D_HA, "%s: resend:%d create on " DFID "/" DFID "\n", - exp->exp_obd->obd_name, resends, - PFID(&op_data->op_fid1), PFID(&op_data->op_fid2)); - - if (generation == import->imp_generation) { - goto rebuild; - } else { - CDEBUG(D_HA, "resend cross eviction\n"); - return -EIO; - } - } - - *request = req; - return rc; -} - -int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - LIST_HEAD(cancels); - struct obd_device *obd = class_exp2obd(exp); - struct ptlrpc_request *req = *request; - int count = 0, rc; - - LASSERT(!req); - - if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && - (fid_is_sane(&op_data->op_fid1))) - count = mdc_resource_get_unused(exp, &op_data->op_fid1, - &cancels, LCK_EX, - MDS_INODELOCK_UPDATE); - if ((op_data->op_flags & MF_MDC_CANCEL_FID3) && - (fid_is_sane(&op_data->op_fid3))) - count += mdc_resource_get_unused(exp, &op_data->op_fid3, - &cancels, LCK_EX, - MDS_INODELOCK_FULL); - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_REINT_UNLINK); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - - rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_unlink_pack(req, op_data); - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obd->u.cli.cl_default_mds_easize); - ptlrpc_request_set_replen(req); - - *request = req; - - rc = mdc_reint(req, LUSTRE_IMP_FULL); - if (rc == -ERESTARTSYS) - rc = 0; - return rc; -} - -int mdc_link(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - LIST_HEAD(cancels); - struct ptlrpc_request *req; - int count = 0, rc; - - if ((op_data->op_flags & MF_MDC_CANCEL_FID2) && - (fid_is_sane(&op_data->op_fid2))) - count = mdc_resource_get_unused(exp, &op_data->op_fid2, - &cancels, LCK_EX, - MDS_INODELOCK_UPDATE); - if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && - (fid_is_sane(&op_data->op_fid1))) - count += mdc_resource_get_unused(exp, &op_data->op_fid1, - &cancels, LCK_EX, - MDS_INODELOCK_UPDATE); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - - rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_link_pack(req, op_data); - ptlrpc_request_set_replen(req); - - rc = mdc_reint(req, LUSTRE_IMP_FULL); - *request = req; - if (rc == -ERESTARTSYS) - rc = 0; - - return rc; -} - -int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, - const char *old, size_t oldlen, const char *new, size_t newlen, - struct ptlrpc_request **request) -{ - LIST_HEAD(cancels); - struct obd_device *obd = exp->exp_obd; - struct ptlrpc_request *req; - int count = 0, rc; - - if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && - (fid_is_sane(&op_data->op_fid1))) - count = mdc_resource_get_unused(exp, &op_data->op_fid1, - &cancels, LCK_EX, - MDS_INODELOCK_UPDATE); - if ((op_data->op_flags & MF_MDC_CANCEL_FID2) && - (fid_is_sane(&op_data->op_fid2))) - count += mdc_resource_get_unused(exp, &op_data->op_fid2, - &cancels, LCK_EX, - MDS_INODELOCK_UPDATE); - if ((op_data->op_flags & MF_MDC_CANCEL_FID3) && - (fid_is_sane(&op_data->op_fid3))) - count += mdc_resource_get_unused(exp, &op_data->op_fid3, - &cancels, LCK_EX, - MDS_INODELOCK_LOOKUP); - if ((op_data->op_flags & MF_MDC_CANCEL_FID4) && - (fid_is_sane(&op_data->op_fid4))) - count += mdc_resource_get_unused(exp, &op_data->op_fid4, - &cancels, LCK_EX, - MDS_INODELOCK_FULL); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - op_data->op_cli_flags & CLI_MIGRATE ? - &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1); - req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT, - newlen + 1); - - rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) { - struct md_open_data *mod = op_data->op_data; - - LASSERTF(mod->mod_open_req && - mod->mod_open_req->rq_type != LI_POISON, - "POISONED open %p!\n", mod->mod_open_req); - - DEBUG_REQ(D_HA, mod->mod_open_req, "matched open"); - /* - * We no longer want to preserve this open for replay even - * though the open was committed. b=3632, b=3633 - */ - spin_lock(&mod->mod_open_req->rq_lock); - mod->mod_open_req->rq_replay = 0; - spin_unlock(&mod->mod_open_req->rq_lock); - } - - if (exp_connect_cancelset(exp) && req) - ldlm_cli_cancel_list(&cancels, count, req, 0); - - mdc_rename_pack(req, op_data, old, oldlen, new, newlen); - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obd->u.cli.cl_default_mds_easize); - ptlrpc_request_set_replen(req); - - rc = mdc_reint(req, LUSTRE_IMP_FULL); - *request = req; - if (rc == -ERESTARTSYS) - rc = 0; - - return rc; -} diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c deleted file mode 100644 index cff31cb0a9ac..000000000000 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ /dev/null @@ -1,2770 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_MDC - -# include -# include -# include -# include -# include -# include -# include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mdc_internal.h" - -#define REQUEST_MINOR 244 - -static int mdc_cleanup(struct obd_device *obd); - -static inline int mdc_queue_wait(struct ptlrpc_request *req) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - int rc; - - /* obd_get_request_slot() ensures that this client has no more - * than cl_max_rpcs_in_flight RPCs simultaneously inf light - * against an MDT. - */ - rc = obd_get_request_slot(cli); - if (rc != 0) - return rc; - - rc = ptlrpc_queue_wait(req); - obd_put_request_slot(cli); - - return rc; -} - -static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) -{ - struct ptlrpc_request *req; - struct mdt_body *body; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_MDS_GETSTATUS, - LUSTRE_MDS_VERSION, MDS_GETSTATUS); - if (!req) - return -ENOMEM; - - mdc_pack_body(req, NULL, 0, 0, -1, 0); - req->rq_send_state = LUSTRE_IMP_FULL; - - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!body) { - rc = -EPROTO; - goto out; - } - - *rootfid = body->mbo_fid1; - CDEBUG(D_NET, - "root fid=" DFID ", last_committed=%llu\n", - PFID(rootfid), - lustre_msg_get_last_committed(req->rq_repmsg)); -out: - ptlrpc_req_finished(req); - return rc; -} - -/* - * This function now is known to always saying that it will receive 4 buffers - * from server. Even for cases when acl_size and md_size is zero, RPC header - * will contain 4 fields and RPC itself will contain zero size fields. This is - * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed - * and thus zero, it shrinks it, making zero size. The same story about - * md_size. And this is course of problem when client waits for smaller number - * of fields. This issue will be fixed later when client gets aware of RPC - * layouts. --umka - */ -static int mdc_getattr_common(struct obd_export *exp, - struct ptlrpc_request *req) -{ - struct req_capsule *pill = &req->rq_pill; - struct mdt_body *body; - void *eadata; - int rc; - - /* Request message already built. */ - rc = ptlrpc_queue_wait(req); - if (rc != 0) - return rc; - - /* sanity check for the reply */ - body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (!body) - return -EPROTO; - - CDEBUG(D_NET, "mode: %o\n", body->mbo_mode); - - mdc_update_max_ea_from_body(exp, body); - if (body->mbo_eadatasize != 0) { - eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, - body->mbo_eadatasize); - if (!eadata) - return -EPROTO; - } - - return 0; -} - -static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - struct ptlrpc_request *req; - int rc; - - /* Single MDS without an LMV case */ - if (op_data->op_flags & MF_GET_MDT_IDX) { - op_data->op_mds = 0; - return 0; - } - *request = NULL; - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, - op_data->op_mode, -1, 0); - - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - req->rq_import->imp_connect_data.ocd_max_easize); - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - op_data->op_mode); - ptlrpc_request_set_replen(req); - - rc = mdc_getattr_common(exp, req); - if (rc) - ptlrpc_req_finished(req); - else - *request = req; - return rc; -} - -static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - struct ptlrpc_request *req; - int rc; - - *request = NULL; - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_GETATTR_NAME); - if (!req) - return -ENOMEM; - - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - op_data->op_namelen + 1); - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, - op_data->op_mode, op_data->op_suppgids[0], 0); - - if (op_data->op_name) { - char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME); - - LASSERT(strnlen(op_data->op_name, op_data->op_namelen) == - op_data->op_namelen); - memcpy(name, op_data->op_name, op_data->op_namelen); - } - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - op_data->op_mode); - req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, - req->rq_import->imp_connect_data.ocd_max_easize); - ptlrpc_request_set_replen(req); - - rc = mdc_getattr_common(exp, req); - if (rc) - ptlrpc_req_finished(req); - else - *request = req; - return rc; -} - -static int mdc_xattr_common(struct obd_export *exp, - const struct req_format *fmt, - const struct lu_fid *fid, - int opcode, u64 valid, - const char *xattr_name, const char *input, - int input_size, int output_size, int flags, - __u32 suppgid, struct ptlrpc_request **request) -{ - struct ptlrpc_request *req; - int xattr_namelen = 0; - char *tmp; - int rc; - - *request = NULL; - req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt); - if (!req) - return -ENOMEM; - - if (xattr_name) { - xattr_namelen = strlen(xattr_name) + 1; - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - xattr_namelen); - } - if (input_size) { - LASSERT(input); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, - input_size); - } - - /* Flush local XATTR locks to get rid of a possible cancel RPC */ - if (opcode == MDS_REINT && fid_is_sane(fid) && - exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) { - LIST_HEAD(cancels); - int count; - - /* Without that packing would fail */ - if (input_size == 0) - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, - RCL_CLIENT, 0); - - count = mdc_resource_get_unused(exp, fid, - &cancels, LCK_EX, - MDS_INODELOCK_XATTR); - - rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - } else { - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - } - - if (opcode == MDS_REINT) { - struct mdt_rec_setxattr *rec; - - BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) != - sizeof(struct mdt_rec_reint)); - rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - rec->sx_opcode = REINT_SETXATTR; - rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid()); - rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid()); - rec->sx_cap = current_cap().cap[0]; - rec->sx_suppgid1 = suppgid; - rec->sx_suppgid2 = -1; - rec->sx_fid = *fid; - rec->sx_valid = valid | OBD_MD_FLCTIME; - rec->sx_time = ktime_get_real_seconds(); - rec->sx_size = output_size; - rec->sx_flags = flags; - - } else { - mdc_pack_body(req, fid, valid, output_size, suppgid, flags); - } - - if (xattr_name) { - tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); - memcpy(tmp, xattr_name, xattr_namelen); - } - if (input_size) { - tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - memcpy(tmp, input, input_size); - } - - if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER)) - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, - RCL_SERVER, output_size); - ptlrpc_request_set_replen(req); - - /* make rpc */ - if (opcode == MDS_REINT) - mdc_get_mod_rpc_slot(req, NULL); - - rc = ptlrpc_queue_wait(req); - - if (opcode == MDS_REINT) - mdc_put_mod_rpc_slot(req, NULL); - - if (rc) - ptlrpc_req_finished(req); - else - *request = req; - return rc; -} - -static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid, - u64 obd_md_valid, const char *name, - const void *value, size_t value_size, - unsigned int xattr_flags, u32 suppgid, - struct ptlrpc_request **req) -{ - LASSERT(obd_md_valid == OBD_MD_FLXATTR || - obd_md_valid == OBD_MD_FLXATTRRM); - - return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR, - fid, MDS_REINT, obd_md_valid, name, - value, value_size, 0, xattr_flags, suppgid, - req); -} - -static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid, - u64 obd_md_valid, const char *name, size_t buf_size, - struct ptlrpc_request **req) -{ - LASSERT(obd_md_valid == OBD_MD_FLXATTR || - obd_md_valid == OBD_MD_FLXATTRLS); - - return mdc_xattr_common(exp, &RQF_MDS_GETXATTR, fid, MDS_GETXATTR, - obd_md_valid, name, NULL, 0, buf_size, 0, -1, - req); -} - -#ifdef CONFIG_FS_POSIX_ACL -static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) -{ - struct req_capsule *pill = &req->rq_pill; - struct mdt_body *body = md->body; - struct posix_acl *acl; - void *buf; - int rc; - - if (!body->mbo_aclsize) - return 0; - - buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->mbo_aclsize); - - if (!buf) - return -EPROTO; - - acl = posix_acl_from_xattr(&init_user_ns, buf, body->mbo_aclsize); - if (!acl) - return 0; - - if (IS_ERR(acl)) { - rc = PTR_ERR(acl); - CERROR("convert xattr to acl: %d\n", rc); - return rc; - } - - rc = posix_acl_valid(&init_user_ns, acl); - if (rc) { - CERROR("validate acl: %d\n", rc); - posix_acl_release(acl); - return rc; - } - - md->posix_acl = acl; - return 0; -} -#else -#define mdc_unpack_acl(req, md) 0 -#endif - -static int mdc_get_lustre_md(struct obd_export *exp, - struct ptlrpc_request *req, - struct obd_export *dt_exp, - struct obd_export *md_exp, - struct lustre_md *md) -{ - struct req_capsule *pill = &req->rq_pill; - int rc; - - LASSERT(md); - memset(md, 0, sizeof(*md)); - - md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); - - if (md->body->mbo_valid & OBD_MD_FLEASIZE) { - if (!S_ISREG(md->body->mbo_mode)) { - CDEBUG(D_INFO, - "OBD_MD_FLEASIZE set, should be a regular file, but is not\n"); - rc = -EPROTO; - goto out; - } - - if (md->body->mbo_eadatasize == 0) { - CDEBUG(D_INFO, - "OBD_MD_FLEASIZE set, but eadatasize 0\n"); - rc = -EPROTO; - goto out; - } - - md->layout.lb_len = md->body->mbo_eadatasize; - md->layout.lb_buf = req_capsule_server_sized_get(pill, - &RMF_MDT_MD, - md->layout.lb_len); - if (!md->layout.lb_buf) { - rc = -EPROTO; - goto out; - } - } else if (md->body->mbo_valid & OBD_MD_FLDIREA) { - const union lmv_mds_md *lmv; - size_t lmv_size; - - if (!S_ISDIR(md->body->mbo_mode)) { - CDEBUG(D_INFO, - "OBD_MD_FLDIREA set, should be a directory, but is not\n"); - rc = -EPROTO; - goto out; - } - - lmv_size = md->body->mbo_eadatasize; - if (!lmv_size) { - CDEBUG(D_INFO, - "OBD_MD_FLDIREA is set, but eadatasize 0\n"); - return -EPROTO; - } - if (md->body->mbo_valid & OBD_MD_MEA) { - lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD, - lmv_size); - if (!lmv) { - rc = -EPROTO; - goto out; - } - - rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size); - if (rc < 0) - goto out; - - if (rc < (typeof(rc))sizeof(*md->lmv)) { - CDEBUG(D_INFO, - "size too small: rc < sizeof(*md->lmv) (%d < %d)\n", - rc, (int)sizeof(*md->lmv)); - rc = -EPROTO; - goto out; - } - } - } - rc = 0; - - if (md->body->mbo_valid & OBD_MD_FLACL) { - /* for ACL, it's possible that FLACL is set but aclsize is zero. - * only when aclsize != 0 there's an actual segment for ACL - * in reply buffer. - */ - if (md->body->mbo_aclsize) { - rc = mdc_unpack_acl(req, md); - if (rc) - goto out; -#ifdef CONFIG_FS_POSIX_ACL - } else { - md->posix_acl = NULL; -#endif - } - } - -out: - if (rc) { -#ifdef CONFIG_FS_POSIX_ACL - posix_acl_release(md->posix_acl); -#endif - } - return rc; -} - -static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md) -{ - return 0; -} - -void mdc_replay_open(struct ptlrpc_request *req) -{ - struct md_open_data *mod = req->rq_cb_data; - struct ptlrpc_request *close_req; - struct obd_client_handle *och; - struct lustre_handle old; - struct mdt_body *body; - - if (!mod) { - DEBUG_REQ(D_ERROR, req, - "Can't properly replay without open data."); - return; - } - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - - och = mod->mod_och; - if (och) { - struct lustre_handle *file_fh; - - LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC); - - file_fh = &och->och_fh; - CDEBUG(D_HA, "updating handle from %#llx to %#llx\n", - file_fh->cookie, body->mbo_handle.cookie); - old = *file_fh; - *file_fh = body->mbo_handle; - } - close_req = mod->mod_close_req; - if (close_req) { - __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg); - struct mdt_ioepoch *epoch; - - LASSERT(opc == MDS_CLOSE); - epoch = req_capsule_client_get(&close_req->rq_pill, - &RMF_MDT_EPOCH); - LASSERT(epoch); - - if (och) - LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old))); - DEBUG_REQ(D_HA, close_req, "updating close body with new fh"); - epoch->mio_handle = body->mbo_handle; - } -} - -void mdc_commit_open(struct ptlrpc_request *req) -{ - struct md_open_data *mod = req->rq_cb_data; - - if (!mod) - return; - - /** - * No need to touch md_open_data::mod_och, it holds a reference on - * \var mod and will zero references to each other, \var mod will be - * freed after that when md_open_data::mod_och will put the reference. - */ - - /** - * Do not let open request to disappear as it still may be needed - * for close rpc to happen (it may happen on evict only, otherwise - * ptlrpc_request::rq_replay does not let mdc_commit_open() to be - * called), just mark this rpc as committed to distinguish these 2 - * cases, see mdc_close() for details. The open request reference will - * be put along with freeing \var mod. - */ - ptlrpc_request_addref(req); - spin_lock(&req->rq_lock); - req->rq_committed = 1; - spin_unlock(&req->rq_lock); - req->rq_cb_data = NULL; - obd_mod_put(mod); -} - -int mdc_set_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och, - struct lookup_intent *it) -{ - struct md_open_data *mod; - struct mdt_rec_create *rec; - struct mdt_body *body; - struct ptlrpc_request *open_req = it->it_request; - struct obd_import *imp = open_req->rq_import; - - if (!open_req->rq_replay) - return 0; - - rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT); - body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); - LASSERT(rec); - /* Incoming message in my byte order (it's been swabbed). */ - /* Outgoing messages always in my byte order. */ - LASSERT(body); - - /* Only if the import is replayable, we set replay_open data */ - if (och && imp->imp_replayable) { - mod = obd_mod_alloc(); - if (!mod) { - DEBUG_REQ(D_ERROR, open_req, - "Can't allocate md_open_data"); - return 0; - } - - /** - * Take a reference on \var mod, to be freed on mdc_close(). - * It protects \var mod from being freed on eviction (commit - * callback is called despite rq_replay flag). - * Another reference for \var och. - */ - obd_mod_get(mod); - obd_mod_get(mod); - - spin_lock(&open_req->rq_lock); - och->och_mod = mod; - mod->mod_och = och; - mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) || - it_disposition(it, DISP_OPEN_STRIPE); - mod->mod_open_req = open_req; - open_req->rq_cb_data = mod; - open_req->rq_commit_cb = mdc_commit_open; - spin_unlock(&open_req->rq_lock); - } - - rec->cr_fid2 = body->mbo_fid1; - rec->cr_ioepoch = body->mbo_ioepoch; - rec->cr_old_handle.cookie = body->mbo_handle.cookie; - open_req->rq_replay_cb = mdc_replay_open; - if (!fid_is_sane(&body->mbo_fid1)) { - DEBUG_REQ(D_ERROR, open_req, - "Saving replay request with insane fid"); - LBUG(); - } - - DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data"); - return 0; -} - -static void mdc_free_open(struct md_open_data *mod) -{ - int committed = 0; - - if (mod->mod_is_create == 0 && - imp_connect_disp_stripe(mod->mod_open_req->rq_import)) - committed = 1; - - /* - * No reason to asssert here if the open request has - * rq_replay == 1. It means that mdc_close failed, and - * close request wasn`t sent. It is not fatal to client. - * The worst thing is eviction if the client gets open lock - */ - DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, - "free open request rq_replay = %d\n", - mod->mod_open_req->rq_replay); - - ptlrpc_request_committed(mod->mod_open_req, committed); - if (mod->mod_close_req) - ptlrpc_request_committed(mod->mod_close_req, committed); -} - -static int mdc_clear_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och) -{ - struct md_open_data *mod = och->och_mod; - - /** - * It is possible to not have \var mod in a case of eviction between - * lookup and ll_file_open(). - **/ - if (!mod) - return 0; - - LASSERT(mod != LP_POISON); - LASSERT(mod->mod_open_req); - mdc_free_open(mod); - - mod->mod_och = NULL; - och->och_mod = NULL; - obd_mod_put(mod); - - return 0; -} - -static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, - struct md_open_data *mod, struct ptlrpc_request **request) -{ - struct obd_device *obd = class_exp2obd(exp); - struct ptlrpc_request *req; - struct req_format *req_fmt; - int rc; - int saved_rc = 0; - - if (op_data->op_bias & MDS_HSM_RELEASE) { - req_fmt = &RQF_MDS_INTENT_CLOSE; - - /* allocate a FID for volatile file */ - rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); - if (rc < 0) { - CERROR("%s: " DFID " failed to allocate FID: %d\n", - obd->obd_name, PFID(&op_data->op_fid1), rc); - /* save the errcode and proceed to close */ - saved_rc = rc; - } - } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) { - req_fmt = &RQF_MDS_INTENT_CLOSE; - } else { - req_fmt = &RQF_MDS_CLOSE; - } - - *request = NULL; - if (OBD_FAIL_CHECK(OBD_FAIL_MDC_CLOSE)) - req = NULL; - else - req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt); - - /* Ensure that this close's handle is fixed up during replay. */ - if (likely(mod)) { - LASSERTF(mod->mod_open_req && - mod->mod_open_req->rq_type != LI_POISON, - "POISONED open %p!\n", mod->mod_open_req); - - mod->mod_close_req = req; - - DEBUG_REQ(D_HA, mod->mod_open_req, "matched open"); - /* We no longer want to preserve this open for replay even - * though the open was committed. b=3632, b=3633 - */ - spin_lock(&mod->mod_open_req->rq_lock); - mod->mod_open_req->rq_replay = 0; - spin_unlock(&mod->mod_open_req->rq_lock); - } else { - CDEBUG(D_HA, - "couldn't find open req; expecting close error\n"); - } - if (!req) { - /* - * TODO: repeat close after errors - */ - CWARN("%s: close of FID " DFID " failed, file reference will be dropped when this client unmounts or is evicted\n", - obd->obd_name, PFID(&op_data->op_fid1)); - rc = -ENOMEM; - goto out; - } - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE); - if (rc) { - ptlrpc_request_free(req); - req = NULL; - goto out; - } - - /* - * To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a - * portal whose threads are not taking any DLM locks and are therefore - * always progressing - */ - req->rq_request_portal = MDS_READPAGE_PORTAL; - ptlrpc_at_set_req_timeout(req); - - mdc_close_pack(req, op_data); - - req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, - obd->u.cli.cl_default_mds_easize); - - ptlrpc_request_set_replen(req); - - mdc_get_mod_rpc_slot(req, NULL); - rc = ptlrpc_queue_wait(req); - mdc_put_mod_rpc_slot(req, NULL); - - if (!req->rq_repmsg) { - CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, - req->rq_status); - if (rc == 0) - rc = req->rq_status ?: -EIO; - } else if (rc == 0 || rc == -EAGAIN) { - struct mdt_body *body; - - rc = lustre_msg_get_status(req->rq_repmsg); - if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { - DEBUG_REQ(D_ERROR, req, - "type == PTL_RPC_MSG_ERR, err = %d", rc); - if (rc > 0) - rc = -rc; - } - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (!body) - rc = -EPROTO; - } else if (rc == -ESTALE) { - /** - * it can be allowed error after 3633 if open was committed and - * server failed before close was sent. Let's check if mod - * exists and return no error in that case - */ - if (mod) { - DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc); - if (mod->mod_open_req->rq_committed) - rc = 0; - } - } - -out: - if (mod) { - if (rc != 0) - mod->mod_close_req = NULL; - /* Since now, mod is accessed through open_req only, - * thus close req does not keep a reference on mod anymore. - */ - obd_mod_put(mod); - } - *request = req; - return rc < 0 ? rc : saved_rc; -} - -static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid, - u64 offset, struct page **pages, int npages, - struct ptlrpc_request **request) -{ - struct ptlrpc_bulk_desc *desc; - struct ptlrpc_request *req; - wait_queue_head_t waitq; - int resends = 0; - int rc; - int i; - - *request = NULL; - init_waitqueue_head(&waitq); - -restart_bulk: - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - req->rq_request_portal = MDS_READPAGE_PORTAL; - ptlrpc_at_set_req_timeout(req); - - desc = ptlrpc_prep_bulk_imp(req, npages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, - MDS_BULK_PORTAL, - &ptlrpc_bulk_kiov_pin_ops); - if (!desc) { - ptlrpc_request_free(req); - return -ENOMEM; - } - - /* NB req now owns desc and will free it when it gets freed */ - for (i = 0; i < npages; i++) - desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE); - - mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid); - - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - if (rc) { - ptlrpc_req_finished(req); - if (rc != -ETIMEDOUT) - return rc; - - resends++; - if (!client_should_resend(resends, &exp->exp_obd->u.cli)) { - CERROR("%s: too many resend retries: rc = %d\n", - exp->exp_obd->obd_name, -EIO); - return -EIO; - } - wait_event_idle_timeout(waitq, 0, resends * HZ); - - goto restart_bulk; - } - - rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, - req->rq_bulk->bd_nob_transferred); - if (rc < 0) { - ptlrpc_req_finished(req); - return rc; - } - - if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { - CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n", - exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred, - PAGE_SIZE * npages); - ptlrpc_req_finished(req); - return -EPROTO; - } - - *request = req; - return 0; -} - -static void mdc_release_page(struct page *page, int remove) -{ - if (remove) { - lock_page(page); - if (likely(page->mapping)) - truncate_complete_page(page->mapping, page); - unlock_page(page); - } - put_page(page); -} - -static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash, - __u64 *start, __u64 *end, int hash64) -{ - /* - * Complement of hash is used as an index so that - * radix_tree_gang_lookup() can be used to find a page with starting - * hash _smaller_ than one we are looking for. - */ - unsigned long offset = hash_x_index(*hash, hash64); - struct page *page; - int found; - - xa_lock_irq(&mapping->i_pages); - found = radix_tree_gang_lookup(&mapping->i_pages, - (void **)&page, offset, 1); - if (found > 0 && !radix_tree_exceptional_entry(page)) { - struct lu_dirpage *dp; - - get_page(page); - xa_unlock_irq(&mapping->i_pages); - /* - * In contrast to find_lock_page() we are sure that directory - * page cannot be truncated (while DLM lock is held) and, - * hence, can avoid restart. - * - * In fact, page cannot be locked here at all, because - * mdc_read_page_remote does synchronous io. - */ - wait_on_page_locked(page); - if (PageUptodate(page)) { - dp = kmap(page); - if (BITS_PER_LONG == 32 && hash64) { - *start = le64_to_cpu(dp->ldp_hash_start) >> 32; - *end = le64_to_cpu(dp->ldp_hash_end) >> 32; - *hash = *hash >> 32; - } else { - *start = le64_to_cpu(dp->ldp_hash_start); - *end = le64_to_cpu(dp->ldp_hash_end); - } - if (unlikely(*start == 1 && *hash == 0)) - *hash = *start; - else - LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n", - *start, *end, *hash); - CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx], hash %#llx\n", - offset, *start, *end, *hash); - if (*hash > *end) { - kunmap(page); - mdc_release_page(page, 0); - page = NULL; - } else if (*end != *start && *hash == *end) { - /* - * upon hash collision, remove this page, - * otherwise put page reference, and - * mdc_read_page_remote() will issue RPC to - * fetch the page we want. - */ - kunmap(page); - mdc_release_page(page, - le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE); - page = NULL; - } - } else { - put_page(page); - page = ERR_PTR(-EIO); - } - } else { - xa_unlock_irq(&mapping->i_pages); - page = NULL; - } - return page; -} - -/* - * Adjust a set of pages, each page containing an array of lu_dirpages, - * so that each page can be used as a single logical lu_dirpage. - * - * A lu_dirpage is laid out as follows, where s = ldp_hash_start, - * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a - * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end - * value is used as a cookie to request the next lu_dirpage in a - * directory listing that spans multiple pages (two in this example): - * ________ - * | | - * .|--------v------- -----. - * |s|e|f|p|ent|ent| ... |ent| - * '--|-------------- -----' Each PAGE contains a single - * '------. lu_dirpage. - * .---------v------- -----. - * |s|e|f|p|ent| 0 | ... | 0 | - * '----------------- -----' - * - * However, on hosts where the native VM page size (PAGE_SIZE) is - * larger than LU_PAGE_SIZE, a single host page may contain multiple - * lu_dirpages. After reading the lu_dirpages from the MDS, the - * ldp_hash_end of the first lu_dirpage refers to the one immediately - * after it in the same PAGE (arrows simplified for brevity, but - * in general e0==s1, e1==s2, etc.): - * - * .-------------------- -----. - * |s0|e0|f0|p|ent|ent| ... |ent| - * |---v---------------- -----| - * |s1|e1|f1|p|ent|ent| ... |ent| - * |---v---------------- -----| Here, each PAGE contains - * ... multiple lu_dirpages. - * |---v---------------- -----| - * |s'|e'|f'|p|ent|ent| ... |ent| - * '---|---------------- -----' - * v - * .----------------------------. - * | next PAGE | - * - * This structure is transformed into a single logical lu_dirpage as follows: - * - * - Replace e0 with e' so the request for the next lu_dirpage gets the page - * labeled 'next PAGE'. - * - * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether - * a hash collision with the next page exists. - * - * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span - * to the first entry of the next lu_dirpage. - */ -#if PAGE_SIZE > LU_PAGE_SIZE -static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs) -{ - int i; - - for (i = 0; i < cfs_pgs; i++) { - struct lu_dirpage *dp = kmap(pages[i]); - __u64 hash_end = le64_to_cpu(dp->ldp_hash_end); - __u32 flags = le32_to_cpu(dp->ldp_flags); - struct lu_dirpage *first = dp; - - while (--lu_pgs > 0) { - struct lu_dirent *end_dirent = NULL; - struct lu_dirent *ent; - - for (ent = lu_dirent_start(dp); ent; - ent = lu_dirent_next(ent)) - end_dirent = ent; - - /* Advance dp to next lu_dirpage. */ - dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE); - - /* Check if we've reached the end of the CFS_PAGE. */ - if (!((unsigned long)dp & ~PAGE_MASK)) - break; - - /* Save the hash and flags of this lu_dirpage. */ - hash_end = le64_to_cpu(dp->ldp_hash_end); - flags = le32_to_cpu(dp->ldp_flags); - - /* Check if lu_dirpage contains no entries. */ - if (!end_dirent) - break; - - /* - * Enlarge the end entry lde_reclen from 0 to - * first entry of next lu_dirpage. - */ - LASSERT(!le16_to_cpu(end_dirent->lde_reclen)); - end_dirent->lde_reclen = - cpu_to_le16((char *)(dp->ldp_entries) - - (char *)end_dirent); - } - - first->ldp_hash_end = hash_end; - first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE); - first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE); - - kunmap(pages[i]); - } - LASSERTF(lu_pgs == 0, "left = %d", lu_pgs); -} -#else -#define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0) -#endif /* PAGE_SIZE > LU_PAGE_SIZE */ - -/* parameters for readdir page */ -struct readpage_param { - struct md_op_data *rp_mod; - __u64 rp_off; - int rp_hash64; - struct obd_export *rp_exp; - struct md_callback *rp_cb; -}; - -/** - * Read pages from server. - * - * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains - * a header lu_dirpage which describes the start/end hash, and whether this - * page is empty (contains no dir entry) or hash collide with next page. - * After client receives reply, several pages will be integrated into dir page - * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the - * lu_dirpage for this integrated page will be adjusted. - **/ -static int mdc_read_page_remote(void *data, struct page *page0) -{ - struct readpage_param *rp = data; - struct page **page_pool; - struct page *page; - struct lu_dirpage *dp; - int rd_pgs = 0; /* number of pages read actually */ - int npages; - struct md_op_data *op_data = rp->rp_mod; - struct ptlrpc_request *req; - int max_pages = op_data->op_max_pages; - struct inode *inode; - struct lu_fid *fid; - int i; - int rc; - - LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES); - inode = op_data->op_data; - fid = &op_data->op_fid1; - LASSERT(inode); - - page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS); - if (page_pool) { - page_pool[0] = page0; - } else { - page_pool = &page0; - max_pages = 1; - } - - for (npages = 1; npages < max_pages; npages++) { - page = page_cache_alloc(inode->i_mapping); - if (!page) - break; - page_pool[npages] = page; - } - - rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req); - if (!rc) { - int lu_pgs = req->rq_bulk->bd_nob_transferred; - - rd_pgs = (req->rq_bulk->bd_nob_transferred + - PAGE_SIZE - 1) >> PAGE_SHIFT; - lu_pgs >>= LU_PAGE_SHIFT; - LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); - - CDEBUG(D_INODE, "read %d(%d) pages\n", rd_pgs, lu_pgs); - - mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs); - - SetPageUptodate(page0); - } - - unlock_page(page0); - ptlrpc_req_finished(req); - CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages); - for (i = 1; i < npages; i++) { - unsigned long offset; - __u64 hash; - int ret; - - page = page_pool[i]; - - if (rc < 0 || i >= rd_pgs) { - put_page(page); - continue; - } - - SetPageUptodate(page); - - dp = kmap(page); - hash = le64_to_cpu(dp->ldp_hash_start); - kunmap(page); - - offset = hash_x_index(hash, rp->rp_hash64); - - prefetchw(&page->flags); - ret = add_to_page_cache_lru(page, inode->i_mapping, offset, - GFP_KERNEL); - if (!ret) - unlock_page(page); - else - CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: rc = %d\n", - offset, ret); - put_page(page); - } - - if (page_pool != &page0) - kfree(page_pool); - - return rc; -} - -/** - * Read dir page from cache first, if it can not find it, read it from - * server and add into the cache. - * - * \param[in] exp MDC export - * \param[in] op_data client MD stack parameters, transferring parameters - * between different layers on client MD stack. - * \param[in] cb_op callback required for ldlm lock enqueue during - * read page - * \param[in] hash_offset the hash offset of the page to be read - * \param[in] ppage the page to be read - * - * retval = 0 get the page successfully - * errno(<0) get the page failed - */ -static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data, - struct md_callback *cb_op, __u64 hash_offset, - struct page **ppage) -{ - struct lookup_intent it = { .it_op = IT_READDIR }; - struct page *page; - struct inode *dir = op_data->op_data; - struct address_space *mapping; - struct lu_dirpage *dp; - __u64 start = 0; - __u64 end = 0; - struct lustre_handle lockh; - struct ptlrpc_request *enq_req = NULL; - struct readpage_param rp_param; - int rc; - - *ppage = NULL; - - LASSERT(dir); - mapping = dir->i_mapping; - - rc = mdc_intent_lock(exp, op_data, &it, &enq_req, - cb_op->md_blocking_ast, 0); - if (enq_req) - ptlrpc_req_finished(enq_req); - - if (rc < 0) { - CERROR("%s: " DFID " lock enqueue fails: rc = %d\n", - exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc); - return rc; - } - - rc = 0; - lockh.cookie = it.it_lock_handle; - mdc_set_lock_data(exp, &lockh, dir, NULL); - - rp_param.rp_off = hash_offset; - rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64; - page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end, - rp_param.rp_hash64); - if (IS_ERR(page)) { - CDEBUG(D_INFO, "%s: dir page locate: " DFID " at %llu: rc %ld\n", - exp->exp_obd->obd_name, PFID(&op_data->op_fid1), - rp_param.rp_off, PTR_ERR(page)); - rc = PTR_ERR(page); - goto out_unlock; - } else if (page) { - /* - * XXX nikita: not entirely correct handling of a corner case: - * suppose hash chain of entries with hash value HASH crosses - * border between pages P0 and P1. First both P0 and P1 are - * cached, seekdir() is called for some entry from the P0 part - * of the chain. Later P0 goes out of cache. telldir(HASH) - * happens and finds P1, as it starts with matching hash - * value. Remaining entries from P0 part of the chain are - * skipped. (Is that really a bug?) - * - * Possible solutions: 0. don't cache P1 is such case, handle - * it as an "overflow" page. 1. invalidate all pages at - * once. 2. use HASH|1 as an index for P1. - */ - goto hash_collision; - } - - rp_param.rp_exp = exp; - rp_param.rp_mod = op_data; - page = read_cache_page(mapping, - hash_x_index(rp_param.rp_off, - rp_param.rp_hash64), - mdc_read_page_remote, &rp_param); - if (IS_ERR(page)) { - CERROR("%s: read cache page: " DFID " at %llu: rc %ld\n", - exp->exp_obd->obd_name, PFID(&op_data->op_fid1), - rp_param.rp_off, PTR_ERR(page)); - rc = PTR_ERR(page); - goto out_unlock; - } - - wait_on_page_locked(page); - (void)kmap(page); - if (!PageUptodate(page)) { - CERROR("%s: page not updated: " DFID " at %llu: rc %d\n", - exp->exp_obd->obd_name, PFID(&op_data->op_fid1), - rp_param.rp_off, -5); - goto fail; - } - if (!PageChecked(page)) - SetPageChecked(page); - if (PageError(page)) { - CERROR("%s: page error: " DFID " at %llu: rc %d\n", - exp->exp_obd->obd_name, PFID(&op_data->op_fid1), - rp_param.rp_off, -5); - goto fail; - } - -hash_collision: - dp = page_address(page); - if (BITS_PER_LONG == 32 && rp_param.rp_hash64) { - start = le64_to_cpu(dp->ldp_hash_start) >> 32; - end = le64_to_cpu(dp->ldp_hash_end) >> 32; - rp_param.rp_off = hash_offset >> 32; - } else { - start = le64_to_cpu(dp->ldp_hash_start); - end = le64_to_cpu(dp->ldp_hash_end); - rp_param.rp_off = hash_offset; - } - if (end == start) { - LASSERT(start == rp_param.rp_off); - CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end); -#if BITS_PER_LONG == 32 - CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n", - le64_to_cpu(dp->ldp_hash_start), - le64_to_cpu(dp->ldp_hash_end), hash_offset); -#endif - /* - * Fetch whole overflow chain... - * - * XXX not yet. - */ - goto fail; - } - *ppage = page; -out_unlock: - ldlm_lock_decref(&lockh, it.it_lock_mode); - return rc; -fail: - kunmap(page); - mdc_release_page(page, 1); - rc = -EIO; - goto out_unlock; -} - -static int mdc_statfs(const struct lu_env *env, - struct obd_export *exp, struct obd_statfs *osfs, - __u64 max_age, __u32 flags) -{ - struct obd_device *obd = class_exp2obd(exp); - struct ptlrpc_request *req; - struct obd_statfs *msfs; - struct obd_import *imp = NULL; - int rc; - - /* - * Since the request might also come from lprocfs, so we need - * sync this with client_disconnect_export Bug15684 - */ - down_read(&obd->u.cli.cl_sem); - if (obd->u.cli.cl_import) - imp = class_import_get(obd->u.cli.cl_import); - up_read(&obd->u.cli.cl_sem); - if (!imp) - return -ENODEV; - - req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS, - LUSTRE_MDS_VERSION, MDS_STATFS); - if (!req) { - rc = -ENOMEM; - goto output; - } - - ptlrpc_request_set_replen(req); - - if (flags & OBD_STATFS_NODELAY) { - /* procfs requests not want stay in wait for avoid deadlock */ - req->rq_no_resend = 1; - req->rq_no_delay = 1; - } - - rc = ptlrpc_queue_wait(req); - if (rc) { - /* check connection error first */ - if (imp->imp_connect_error) - rc = imp->imp_connect_error; - goto out; - } - - msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (!msfs) { - rc = -EPROTO; - goto out; - } - - *osfs = *msfs; -out: - ptlrpc_req_finished(req); -output: - class_import_put(imp); - return rc; -} - -static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf) -{ - __u32 keylen, vallen; - void *key; - int rc; - - if (gf->gf_pathlen > PATH_MAX) - return -ENAMETOOLONG; - if (gf->gf_pathlen < 2) - return -EOVERFLOW; - - /* Key is KEY_FID2PATH + getinfo_fid2path description */ - keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf); - key = kzalloc(keylen, GFP_NOFS); - if (!key) - return -ENOMEM; - memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH)); - memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf)); - - CDEBUG(D_IOCTL, "path get " DFID " from %llu #%d\n", - PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno); - - if (!fid_is_sane(&gf->gf_fid)) { - rc = -EINVAL; - goto out; - } - - /* Val is struct getinfo_fid2path result plus path */ - vallen = sizeof(*gf) + gf->gf_pathlen; - - rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf); - if (rc != 0 && rc != -EREMOTE) - goto out; - - if (vallen <= sizeof(*gf)) { - rc = -EPROTO; - goto out; - } else if (vallen > sizeof(*gf) + gf->gf_pathlen) { - rc = -EOVERFLOW; - goto out; - } - - CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n", - PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, - gf->gf_pathlen < 512 ? gf->gf_path : - /* only log the last 512 characters of the path */ - gf->gf_path + gf->gf_pathlen - 512); - -out: - kfree(key); - return rc; -} - -static int mdc_ioc_hsm_progress(struct obd_export *exp, - struct hsm_progress_kernel *hpk) -{ - struct obd_import *imp = class_exp2cliimp(exp); - struct hsm_progress_kernel *req_hpk; - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS, - LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS); - if (!req) { - rc = -ENOMEM; - goto out; - } - - mdc_pack_body(req, NULL, 0, 0, -1, 0); - - /* Copy hsm_progress struct */ - req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS); - if (!req_hpk) { - rc = -EPROTO; - goto out; - } - - *req_hpk = *hpk; - req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval); - - ptlrpc_request_set_replen(req); - - mdc_get_mod_rpc_slot(req, NULL); - rc = ptlrpc_queue_wait(req); - mdc_put_mod_rpc_slot(req, NULL); -out: - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) -{ - __u32 *archive_mask; - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER, - LUSTRE_MDS_VERSION, - MDS_HSM_CT_REGISTER); - if (!req) { - rc = -ENOMEM; - goto out; - } - - mdc_pack_body(req, NULL, 0, 0, -1, 0); - - /* Copy hsm_progress struct */ - archive_mask = req_capsule_client_get(&req->rq_pill, - &RMF_MDS_HSM_ARCHIVE); - if (!archive_mask) { - rc = -EPROTO; - goto out; - } - - *archive_mask = archives; - - ptlrpc_request_set_replen(req); - - rc = mdc_queue_wait(req); -out: - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_ioc_hsm_current_action(struct obd_export *exp, - struct md_op_data *op_data) -{ - struct hsm_current_action *hca = op_data->op_data; - struct hsm_current_action *req_hca; - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_HSM_ACTION); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, &op_data->op_fid1, 0, 0, - op_data->op_suppgids[0], 0); - - ptlrpc_request_set_replen(req); - - rc = mdc_queue_wait(req); - if (rc) - goto out; - - req_hca = req_capsule_server_get(&req->rq_pill, - &RMF_MDS_HSM_CURRENT_ACTION); - if (!req_hca) { - rc = -EPROTO; - goto out; - } - - *hca = *req_hca; - -out: - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp) -{ - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER, - LUSTRE_MDS_VERSION, - MDS_HSM_CT_UNREGISTER); - if (!req) { - rc = -ENOMEM; - goto out; - } - - mdc_pack_body(req, NULL, 0, 0, -1, 0); - - ptlrpc_request_set_replen(req); - - rc = mdc_queue_wait(req); -out: - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_ioc_hsm_state_get(struct obd_export *exp, - struct md_op_data *op_data) -{ - struct hsm_user_state *hus = op_data->op_data; - struct hsm_user_state *req_hus; - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_HSM_STATE_GET); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET); - if (rc != 0) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, &op_data->op_fid1, 0, 0, - op_data->op_suppgids[0], 0); - - ptlrpc_request_set_replen(req); - - rc = mdc_queue_wait(req); - if (rc) - goto out; - - req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE); - if (!req_hus) { - rc = -EPROTO; - goto out; - } - - *hus = *req_hus; - -out: - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_ioc_hsm_state_set(struct obd_export *exp, - struct md_op_data *op_data) -{ - struct hsm_state_set *hss = op_data->op_data; - struct hsm_state_set *req_hss; - struct ptlrpc_request *req; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_HSM_STATE_SET); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, &op_data->op_fid1, 0, 0, - op_data->op_suppgids[0], 0); - - /* Copy states */ - req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET); - if (!req_hss) { - rc = -EPROTO; - goto out; - } - *req_hss = *hss; - - ptlrpc_request_set_replen(req); - - mdc_get_mod_rpc_slot(req, NULL); - rc = ptlrpc_queue_wait(req); - mdc_put_mod_rpc_slot(req, NULL); -out: - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_ioc_hsm_request(struct obd_export *exp, - struct hsm_user_request *hur) -{ - struct obd_import *imp = class_exp2cliimp(exp); - struct ptlrpc_request *req; - struct hsm_request *req_hr; - struct hsm_user_item *req_hui; - char *req_opaque; - int rc; - - req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST); - if (!req) { - rc = -ENOMEM; - goto out; - } - - req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT, - hur->hur_request.hr_itemcount - * sizeof(struct hsm_user_item)); - req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT, - hur->hur_request.hr_data_len); - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, NULL, 0, 0, -1, 0); - - /* Copy hsm_request struct */ - req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST); - if (!req_hr) { - rc = -EPROTO; - goto out; - } - *req_hr = hur->hur_request; - - /* Copy hsm_user_item structs */ - req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM); - if (!req_hui) { - rc = -EPROTO; - goto out; - } - memcpy(req_hui, hur->hur_user_item, - hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item)); - - /* Copy opaque field */ - req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA); - if (!req_opaque) { - rc = -EPROTO; - goto out; - } - memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len); - - ptlrpc_request_set_replen(req); - - mdc_get_mod_rpc_slot(req, NULL); - rc = ptlrpc_queue_wait(req); - mdc_put_mod_rpc_slot(req, NULL); -out: - ptlrpc_req_finished(req); - return rc; -} - -static struct kuc_hdr *changelog_kuc_hdr(char *buf, size_t len, u32 flags) -{ - struct kuc_hdr *lh = (struct kuc_hdr *)buf; - - LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE); - - lh->kuc_magic = KUC_MAGIC; - lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; - lh->kuc_flags = flags; - lh->kuc_msgtype = CL_RECORD; - lh->kuc_msglen = len; - return lh; -} - -struct changelog_show { - __u64 cs_startrec; - enum changelog_send_flag cs_flags; - struct file *cs_fp; - char *cs_buf; - struct obd_device *cs_obd; -}; - -static inline char *cs_obd_name(struct changelog_show *cs) -{ - return cs->cs_obd->obd_name; -} - -static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh, - struct llog_rec_hdr *hdr, void *data) -{ - struct changelog_show *cs = data; - struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr; - struct kuc_hdr *lh; - size_t len; - int rc; - - if (rec->cr_hdr.lrh_type != CHANGELOG_REC) { - rc = -EINVAL; - CERROR("%s: not a changelog rec %x/%d: rc = %d\n", - cs_obd_name(cs), rec->cr_hdr.lrh_type, - rec->cr.cr_type, rc); - return rc; - } - - if (rec->cr.cr_index < cs->cs_startrec) { - /* Skip entries earlier than what we are interested in */ - CDEBUG(D_HSM, "rec=%llu start=%llu\n", - rec->cr.cr_index, cs->cs_startrec); - return 0; - } - - CDEBUG(D_HSM, "%llu %02d%-5s %llu 0x%x t=" DFID " p=" DFID - " %.*s\n", rec->cr.cr_index, rec->cr.cr_type, - changelog_type2str(rec->cr.cr_type), rec->cr.cr_time, - rec->cr.cr_flags & CLF_FLAGMASK, - PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid), - rec->cr.cr_namelen, changelog_rec_name(&rec->cr)); - - len = sizeof(*lh) + changelog_rec_size(&rec->cr) + rec->cr.cr_namelen; - - /* Set up the message */ - lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags); - memcpy(lh + 1, &rec->cr, len - sizeof(*lh)); - - rc = libcfs_kkuc_msg_put(cs->cs_fp, lh); - CDEBUG(D_HSM, "kucmsg fp %p len %zu rc %d\n", cs->cs_fp, len, rc); - - return rc; -} - -static int mdc_changelog_send_thread(void *csdata) -{ - enum llog_flag flags = LLOG_F_IS_CAT; - struct changelog_show *cs = csdata; - struct llog_ctxt *ctxt = NULL; - struct llog_handle *llh = NULL; - struct kuc_hdr *kuch; - int rc; - - CDEBUG(D_HSM, "changelog to fp=%p start %llu\n", - cs->cs_fp, cs->cs_startrec); - - cs->cs_buf = kzalloc(KUC_CHANGELOG_MSG_MAXSIZE, GFP_NOFS); - if (!cs->cs_buf) { - rc = -ENOMEM; - goto out; - } - - /* Set up the remote catalog handle */ - ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT); - if (!ctxt) { - rc = -ENOENT; - goto out; - } - rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG, - LLOG_OPEN_EXISTS); - if (rc) { - CERROR("%s: fail to open changelog catalog: rc = %d\n", - cs_obd_name(cs), rc); - goto out; - } - - if (cs->cs_flags & CHANGELOG_FLAG_JOBID) - flags |= LLOG_F_EXT_JOBID; - - rc = llog_init_handle(NULL, llh, flags, NULL); - if (rc) { - CERROR("llog_init_handle failed %d\n", rc); - goto out; - } - - rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0); - - /* Send EOF no matter what our result */ - kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags); - kuch->kuc_msgtype = CL_EOF; - libcfs_kkuc_msg_put(cs->cs_fp, kuch); - -out: - fput(cs->cs_fp); - if (llh) - llog_cat_close(NULL, llh); - if (ctxt) - llog_ctxt_put(ctxt); - kfree(cs->cs_buf); - kfree(cs); - return rc; -} - -static int mdc_ioc_changelog_send(struct obd_device *obd, - struct ioc_changelog *icc) -{ - struct changelog_show *cs; - struct task_struct *task; - int rc; - - /* Freed in mdc_changelog_send_thread */ - cs = kzalloc(sizeof(*cs), GFP_NOFS); - if (!cs) - return -ENOMEM; - - cs->cs_obd = obd; - cs->cs_startrec = icc->icc_recno; - /* matching fput in mdc_changelog_send_thread */ - cs->cs_fp = fget(icc->icc_id); - cs->cs_flags = icc->icc_flags; - - /* - * New thread because we should return to user app before - * writing into our pipe - */ - task = kthread_run(mdc_changelog_send_thread, cs, - "mdc_clg_send_thread"); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - CERROR("%s: can't start changelog thread: rc = %d\n", - cs_obd_name(cs), rc); - kfree(cs); - } else { - rc = 0; - CDEBUG(D_HSM, "%s: started changelog thread\n", - cs_obd_name(cs)); - } - - CERROR("Failed to start changelog thread: %d\n", rc); - return rc; -} - -static int mdc_ioc_hsm_ct_start(struct obd_export *exp, - struct lustre_kernelcomm *lk); - -static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct ptlrpc_request *req; - struct obd_quotactl *oqc; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION, - MDS_QUOTACTL); - if (!req) - return -ENOMEM; - - oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - *oqc = *oqctl; - - ptlrpc_request_set_replen(req); - ptlrpc_at_set_req_timeout(req); - req->rq_no_resend = 1; - - rc = ptlrpc_queue_wait(req); - if (rc) - CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc); - - if (req->rq_repmsg) { - oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - if (oqc) { - *oqctl = *oqc; - } else if (!rc) { - CERROR("Can't unpack obd_quotactl\n"); - rc = -EPROTO; - } - } else if (!rc) { - CERROR("Can't unpack obd_quotactl\n"); - rc = -EPROTO; - } - ptlrpc_req_finished(req); - - return rc; -} - -static int mdc_ioc_swap_layouts(struct obd_export *exp, - struct md_op_data *op_data) -{ - LIST_HEAD(cancels); - struct ptlrpc_request *req; - int rc, count; - struct mdc_swap_layouts *msl, *payload; - - msl = op_data->op_data; - - /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the - * first thing it will do is to cancel the 2 layout - * locks hold by this client. - * So the client must cancel its layout locks on the 2 fids - * with the request RPC to avoid extra RPC round trips - */ - count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels, - LCK_CR, MDS_INODELOCK_LAYOUT | - MDS_INODELOCK_XATTR); - count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels, - LCK_CR, MDS_INODELOCK_LAYOUT | - MDS_INODELOCK_XATTR); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_MDS_SWAP_LAYOUTS); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - - rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_swap_layouts_pack(req, op_data); - - payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS); - LASSERT(payload); - - *payload = *msl; - - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - - ptlrpc_req_finished(req); - return rc; -} - -static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void __user *uarg) -{ - struct obd_device *obd = exp->exp_obd; - struct obd_ioctl_data *data = karg; - struct obd_import *imp = obd->u.cli.cl_import; - int rc; - - if (!try_module_get(THIS_MODULE)) { - CERROR("%s: cannot get module '%s'\n", obd->obd_name, - module_name(THIS_MODULE)); - return -EINVAL; - } - switch (cmd) { - case OBD_IOC_CHANGELOG_SEND: - rc = mdc_ioc_changelog_send(obd, karg); - goto out; - case OBD_IOC_CHANGELOG_CLEAR: { - struct ioc_changelog *icc = karg; - struct changelog_setinfo cs = { - .cs_recno = icc->icc_recno, - .cs_id = icc->icc_id - }; - - rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR), - KEY_CHANGELOG_CLEAR, sizeof(cs), &cs, - NULL); - goto out; - } - case OBD_IOC_FID2PATH: - rc = mdc_ioc_fid2path(exp, karg); - goto out; - case LL_IOC_HSM_CT_START: - rc = mdc_ioc_hsm_ct_start(exp, karg); - /* ignore if it was already registered on this MDS. */ - if (rc == -EEXIST) - rc = 0; - goto out; - case LL_IOC_HSM_PROGRESS: - rc = mdc_ioc_hsm_progress(exp, karg); - goto out; - case LL_IOC_HSM_STATE_GET: - rc = mdc_ioc_hsm_state_get(exp, karg); - goto out; - case LL_IOC_HSM_STATE_SET: - rc = mdc_ioc_hsm_state_set(exp, karg); - goto out; - case LL_IOC_HSM_ACTION: - rc = mdc_ioc_hsm_current_action(exp, karg); - goto out; - case LL_IOC_HSM_REQUEST: - rc = mdc_ioc_hsm_request(exp, karg); - goto out; - case OBD_IOC_CLIENT_RECOVER: - rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0); - if (rc < 0) - goto out; - rc = 0; - goto out; - case IOC_OSC_SET_ACTIVE: - rc = ptlrpc_set_import_active(imp, data->ioc_offset); - goto out; - case OBD_IOC_PING_TARGET: - rc = ptlrpc_obd_ping(obd); - goto out; - /* - * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by - * LMV instead of MDC. But when the cluster is upgraded from 1.8, - * there'd be no LMV layer thus we might be called here. Eventually - * this code should be removed. - * bz20731, LU-592. - */ - case IOC_OBD_STATFS: { - struct obd_statfs stat_buf = {0}; - - if (*((__u32 *)data->ioc_inlbuf2) != 0) { - rc = -ENODEV; - goto out; - } - - /* copy UUID */ - if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd), - min_t(size_t, data->ioc_plen2, - sizeof(struct obd_uuid)))) { - rc = -EFAULT; - goto out; - } - - rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - 0); - if (rc != 0) - goto out; - - if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min_t(size_t, data->ioc_plen1, - sizeof(stat_buf)))) { - rc = -EFAULT; - goto out; - } - - rc = 0; - goto out; - } - case OBD_IOC_QUOTACTL: { - struct if_quotactl *qctl = karg; - struct obd_quotactl *oqctl; - - oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS); - if (!oqctl) { - rc = -ENOMEM; - goto out; - } - - QCTL_COPY(oqctl, qctl); - rc = obd_quotactl(exp, oqctl); - if (rc == 0) { - QCTL_COPY(qctl, oqctl); - qctl->qc_valid = QC_MDTIDX; - qctl->obd_uuid = obd->u.cli.cl_target_uuid; - } - - kfree(oqctl); - goto out; - } - case LL_IOC_GET_CONNECT_FLAGS: - if (copy_to_user(uarg, exp_connect_flags_ptr(exp), - sizeof(*exp_connect_flags_ptr(exp)))) { - rc = -EFAULT; - goto out; - } - - rc = 0; - goto out; - case LL_IOC_LOV_SWAP_LAYOUTS: - rc = mdc_ioc_swap_layouts(exp, karg); - goto out; - default: - CERROR("unrecognised ioctl: cmd = %#x\n", cmd); - rc = -ENOTTY; - goto out; - } -out: - module_put(THIS_MODULE); - - return rc; -} - -static int mdc_get_info_rpc(struct obd_export *exp, - u32 keylen, void *key, - int vallen, void *val) -{ - struct obd_import *imp = class_exp2cliimp(exp); - struct ptlrpc_request *req; - char *tmp; - int rc = -EINVAL; - - req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO); - if (!req) - return -ENOMEM; - - req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, - RCL_CLIENT, keylen); - req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN, - RCL_CLIENT, sizeof(__u32)); - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY); - memcpy(tmp, key, keylen); - tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN); - memcpy(tmp, &vallen, sizeof(__u32)); - - req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL, - RCL_SERVER, vallen); - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - /* -EREMOTE means the get_info result is partial, and it needs to - * continue on another MDT, see fid2path part in lmv_iocontrol - */ - if (rc == 0 || rc == -EREMOTE) { - tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL); - memcpy(val, tmp, vallen); - if (ptlrpc_rep_need_swab(req)) { - if (KEY_IS(KEY_FID2PATH)) - lustre_swab_fid2path(val); - } - } - ptlrpc_req_finished(req); - - return rc; -} - -static void lustre_swab_hai(struct hsm_action_item *h) -{ - __swab32s(&h->hai_len); - __swab32s(&h->hai_action); - lustre_swab_lu_fid(&h->hai_fid); - lustre_swab_lu_fid(&h->hai_dfid); - __swab64s(&h->hai_cookie); - __swab64s(&h->hai_extent.offset); - __swab64s(&h->hai_extent.length); - __swab64s(&h->hai_gid); -} - -static void lustre_swab_hal(struct hsm_action_list *h) -{ - struct hsm_action_item *hai; - u32 i; - - __swab32s(&h->hal_version); - __swab32s(&h->hal_count); - __swab32s(&h->hal_archive_id); - __swab64s(&h->hal_flags); - hai = hai_first(h); - for (i = 0; i < h->hal_count; i++, hai = hai_next(hai)) - lustre_swab_hai(hai); -} - -static void lustre_swab_kuch(struct kuc_hdr *l) -{ - __swab16s(&l->kuc_magic); - /* __u8 l->kuc_transport */ - __swab16s(&l->kuc_msgtype); - __swab16s(&l->kuc_msglen); -} - -static int mdc_ioc_hsm_ct_start(struct obd_export *exp, - struct lustre_kernelcomm *lk) -{ - struct obd_import *imp = class_exp2cliimp(exp); - __u32 archive = lk->lk_data; - int rc = 0; - - if (lk->lk_group != KUC_GRP_HSM) { - CERROR("Bad copytool group %d\n", lk->lk_group); - return -EINVAL; - } - - CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd, - lk->lk_uid, lk->lk_group, lk->lk_flags); - - if (lk->lk_flags & LK_FLG_STOP) { - /* Unregister with the coordinator */ - rc = mdc_ioc_hsm_ct_unregister(imp); - } else { - rc = mdc_ioc_hsm_ct_register(imp, archive); - } - - return rc; -} - -/** - * Send a message to any listening copytools - * @param val KUC message (kuc_hdr + hsm_action_list) - * @param len total length of message - */ -static int mdc_hsm_copytool_send(size_t len, void *val) -{ - struct kuc_hdr *lh = (struct kuc_hdr *)val; - struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1); - - if (len < sizeof(*lh) + sizeof(*hal)) { - CERROR("Short HSM message %zu < %zu\n", len, - sizeof(*lh) + sizeof(*hal)); - return -EPROTO; - } - if (lh->kuc_magic == __swab16(KUC_MAGIC)) { - lustre_swab_kuch(lh); - lustre_swab_hal(hal); - } else if (lh->kuc_magic != KUC_MAGIC) { - CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC); - return -EPROTO; - } - - CDEBUG(D_HSM, - "Received message mg=%x t=%d m=%d l=%d actions=%d on %s\n", - lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype, - lh->kuc_msglen, hal->hal_count, hal->hal_fsname); - - /* Broadcast to HSM listeners */ - return libcfs_kkuc_group_put(KUC_GRP_HSM, lh); -} - -/** - * callback function passed to kuc for re-registering each HSM copytool - * running on MDC, after MDT shutdown/recovery. - * @param data copytool registration data - * @param cb_arg callback argument (obd_import) - */ -static int mdc_hsm_ct_reregister(void *data, void *cb_arg) -{ - struct kkuc_ct_data *kcd = data; - struct obd_import *imp = (struct obd_import *)cb_arg; - int rc; - - if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC) - return -EPROTO; - - if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid)) - return 0; - - CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n", - imp->imp_obd->obd_name, kcd->kcd_archive); - rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive); - - /* ignore error if the copytool is already registered */ - return (rc == -EEXIST) ? 0 : rc; -} - -static int mdc_set_info_async(const struct lu_env *env, - struct obd_export *exp, - u32 keylen, void *key, - u32 vallen, void *val, - struct ptlrpc_request_set *set) -{ - struct obd_import *imp = class_exp2cliimp(exp); - int rc; - - if (KEY_IS(KEY_READ_ONLY)) { - if (vallen != sizeof(int)) - return -EINVAL; - - spin_lock(&imp->imp_lock); - if (*((int *)val)) { - imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY; - imp->imp_connect_data.ocd_connect_flags |= - OBD_CONNECT_RDONLY; - } else { - imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY; - imp->imp_connect_data.ocd_connect_flags &= - ~OBD_CONNECT_RDONLY; - } - spin_unlock(&imp->imp_lock); - - return do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION, - keylen, key, vallen, val, set); - } - if (KEY_IS(KEY_SPTLRPC_CONF)) { - sptlrpc_conf_client_adapt(exp->exp_obd); - return 0; - } - if (KEY_IS(KEY_FLUSH_CTX)) { - sptlrpc_import_flush_my_ctx(imp); - return 0; - } - if (KEY_IS(KEY_CHANGELOG_CLEAR)) { - rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION, - keylen, key, vallen, val, set); - return rc; - } - if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) { - rc = mdc_hsm_copytool_send(vallen, val); - return rc; - } - if (KEY_IS(KEY_DEFAULT_EASIZE)) { - u32 *default_easize = val; - - exp->exp_obd->u.cli.cl_default_mds_easize = *default_easize; - return 0; - } - - CERROR("Unknown key %s\n", (char *)key); - return -EINVAL; -} - -static int mdc_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val) -{ - int rc = -EINVAL; - - if (KEY_IS(KEY_MAX_EASIZE)) { - u32 mdsize, *max_easize; - - if (*vallen != sizeof(int)) - return -EINVAL; - mdsize = *(u32 *)val; - if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize) - exp->exp_obd->u.cli.cl_max_mds_easize = mdsize; - max_easize = val; - *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize; - return 0; - } else if (KEY_IS(KEY_DEFAULT_EASIZE)) { - u32 *default_easize; - - if (*vallen != sizeof(int)) - return -EINVAL; - default_easize = val; - *default_easize = exp->exp_obd->u.cli.cl_default_mds_easize; - return 0; - } else if (KEY_IS(KEY_CONN_DATA)) { - struct obd_import *imp = class_exp2cliimp(exp); - struct obd_connect_data *data = val; - - if (*vallen != sizeof(*data)) - return -EINVAL; - - *data = imp->imp_connect_data; - return 0; - } else if (KEY_IS(KEY_TGT_COUNT)) { - *((u32 *)val) = 1; - return 0; - } - - rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val); - - return rc; -} - -static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid, - struct ptlrpc_request **request) -{ - struct ptlrpc_request *req; - int rc; - - *request = NULL; - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - mdc_pack_body(req, fid, 0, 0, -1, 0); - - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - ptlrpc_req_finished(req); - else - *request = req; - return rc; -} - -static int mdc_import_event(struct obd_device *obd, struct obd_import *imp, - enum obd_import_event event) -{ - int rc = 0; - - LASSERT(imp->imp_obd == obd); - - switch (event) { - case IMP_EVENT_INACTIVE: { - struct client_obd *cli = &obd->u.cli; - /* - * Flush current sequence to make client obtain new one - * from server in case of disconnect/reconnect. - */ - if (cli->cl_seq) - seq_client_flush(cli->cl_seq); - - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL); - break; - } - case IMP_EVENT_INVALIDATE: { - struct ldlm_namespace *ns = obd->obd_namespace; - - ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); - - break; - } - case IMP_EVENT_ACTIVE: - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL); - /* redo the kuc registration after reconnecting */ - if (rc == 0) - /* re-register HSM agents */ - rc = libcfs_kkuc_group_foreach(KUC_GRP_HSM, - mdc_hsm_ct_reregister, - (void *)imp); - break; - case IMP_EVENT_OCD: - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL); - break; - case IMP_EVENT_DISCON: - case IMP_EVENT_DEACTIVATE: - case IMP_EVENT_ACTIVATE: - break; - default: - CERROR("Unknown import event %x\n", event); - LBUG(); - } - return rc; -} - -int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp, - struct lu_fid *fid, struct md_op_data *op_data) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - struct lu_client_seq *seq = cli->cl_seq; - - return seq_client_alloc_fid(env, seq, fid); -} - -static struct obd_uuid *mdc_get_uuid(struct obd_export *exp) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - - return &cli->cl_target_uuid; -} - -/** - * Determine whether the lock can be canceled before replaying it during - * recovery, non zero value will be return if the lock can be canceled, - * or zero returned for not - */ -static int mdc_cancel_weight(struct ldlm_lock *lock) -{ - if (lock->l_resource->lr_type != LDLM_IBITS) - return 0; - - /* FIXME: if we ever get into a situation where there are too many - * opened files with open locks on a single node, then we really - * should replay these open locks to reget it - */ - if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN) - return 0; - - return 1; -} - -static int mdc_resource_inode_free(struct ldlm_resource *res) -{ - if (res->lr_lvb_inode) - res->lr_lvb_inode = NULL; - - return 0; -} - -static struct ldlm_valblock_ops inode_lvbo = { - .lvbo_free = mdc_resource_inode_free, -}; - -static int mdc_llog_init(struct obd_device *obd) -{ - struct obd_llog_group *olg = &obd->obd_olg; - struct llog_ctxt *ctxt; - int rc; - - rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd, - &llog_client_ops); - if (rc) - return rc; - - ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT); - llog_initiator_connect(ctxt); - llog_ctxt_put(ctxt); - - return 0; -} - -static void mdc_llog_finish(struct obd_device *obd) -{ - struct llog_ctxt *ctxt; - - ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT); - if (ctxt) - llog_cleanup(NULL, ctxt); -} - -static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg) -{ - struct lprocfs_static_vars lvars = { NULL }; - int rc; - - rc = ptlrpcd_addref(); - if (rc < 0) - return rc; - - rc = client_obd_setup(obd, cfg); - if (rc) - goto err_ptlrpcd_decref; - - lprocfs_mdc_init_vars(&lvars); - lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars); - sptlrpc_lprocfs_cliobd_attach(obd); - ptlrpc_lprocfs_register_obd(obd); - - ns_register_cancel(obd->obd_namespace, mdc_cancel_weight); - - obd->obd_namespace->ns_lvbo = &inode_lvbo; - - rc = mdc_llog_init(obd); - if (rc) { - mdc_cleanup(obd); - CERROR("failed to setup llogging subsystems\n"); - return rc; - } - - return rc; - -err_ptlrpcd_decref: - ptlrpcd_decref(); - return rc; -} - -/* Initialize the default and maximum LOV EA sizes. This allows - * us to make MDS RPCs with large enough reply buffers to hold a default - * sized EA without having to calculate this (via a call into the - * LOV + OSCs) each time we make an RPC. The maximum size is also tracked - * but not used to avoid wastefully vmalloc()'ing large reply buffers when - * a large number of stripes is possible. If a larger reply buffer is - * required it will be reallocated in the ptlrpc layer due to overflow. - */ -static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize) -{ - struct obd_device *obd = exp->exp_obd; - struct client_obd *cli = &obd->u.cli; - - if (cli->cl_max_mds_easize < easize) - cli->cl_max_mds_easize = easize; - - if (cli->cl_default_mds_easize < def_easize) - cli->cl_default_mds_easize = def_easize; - - return 0; -} - -static int mdc_precleanup(struct obd_device *obd) -{ - /* Failsafe, ok if racy */ - if (obd->obd_type->typ_refcnt <= 1) - libcfs_kkuc_group_rem(0, KUC_GRP_HSM); - - obd_cleanup_client_import(obd); - ptlrpc_lprocfs_unregister_obd(obd); - lprocfs_obd_cleanup(obd); - mdc_llog_finish(obd); - return 0; -} - -static int mdc_cleanup(struct obd_device *obd) -{ - ptlrpcd_decref(); - - return client_obd_cleanup(obd); -} - -static int mdc_process_config(struct obd_device *obd, u32 len, void *buf) -{ - struct lustre_cfg *lcfg = buf; - struct lprocfs_static_vars lvars = { NULL }; - int rc = 0; - - lprocfs_mdc_init_vars(&lvars); - switch (lcfg->lcfg_command) { - default: - rc = class_process_proc_param(PARAM_MDC, lvars.obd_vars, - lcfg, obd); - if (rc > 0) - rc = 0; - break; - } - return rc; -} - -static struct obd_ops mdc_obd_ops = { - .owner = THIS_MODULE, - .setup = mdc_setup, - .precleanup = mdc_precleanup, - .cleanup = mdc_cleanup, - .add_conn = client_import_add_conn, - .del_conn = client_import_del_conn, - .connect = client_connect_import, - .disconnect = client_disconnect_export, - .iocontrol = mdc_iocontrol, - .set_info_async = mdc_set_info_async, - .statfs = mdc_statfs, - .fid_init = client_fid_init, - .fid_fini = client_fid_fini, - .fid_alloc = mdc_fid_alloc, - .import_event = mdc_import_event, - .get_info = mdc_get_info, - .process_config = mdc_process_config, - .get_uuid = mdc_get_uuid, - .quotactl = mdc_quotactl, -}; - -static struct md_ops mdc_md_ops = { - .getstatus = mdc_getstatus, - .null_inode = mdc_null_inode, - .close = mdc_close, - .create = mdc_create, - .enqueue = mdc_enqueue, - .getattr = mdc_getattr, - .getattr_name = mdc_getattr_name, - .intent_lock = mdc_intent_lock, - .link = mdc_link, - .rename = mdc_rename, - .setattr = mdc_setattr, - .setxattr = mdc_setxattr, - .getxattr = mdc_getxattr, - .sync = mdc_sync, - .read_page = mdc_read_page, - .unlink = mdc_unlink, - .cancel_unused = mdc_cancel_unused, - .init_ea_size = mdc_init_ea_size, - .set_lock_data = mdc_set_lock_data, - .lock_match = mdc_lock_match, - .get_lustre_md = mdc_get_lustre_md, - .free_lustre_md = mdc_free_lustre_md, - .set_open_replay_data = mdc_set_open_replay_data, - .clear_open_replay_data = mdc_clear_open_replay_data, - .intent_getattr_async = mdc_intent_getattr_async, - .revalidate_lock = mdc_revalidate_lock -}; - -static int __init mdc_init(void) -{ - struct lprocfs_static_vars lvars = { NULL }; - int rc; - - rc = libcfs_setup(); - if (rc) - return rc; - - lprocfs_mdc_init_vars(&lvars); - - return class_register_type(&mdc_obd_ops, &mdc_md_ops, - LUSTRE_MDC_NAME, NULL); -} - -static void /*__exit*/ mdc_exit(void) -{ - class_unregister_type(LUSTRE_MDC_NAME); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Metadata Client"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(mdc_init); -module_exit(mdc_exit); diff --git a/drivers/staging/lustre/lustre/mgc/Makefile b/drivers/staging/lustre/lustre/mgc/Makefile deleted file mode 100644 index 8abf108dbcf7..000000000000 --- a/drivers/staging/lustre/lustre/mgc/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += mgc.o -mgc-y := mgc_request.o lproc_mgc.o diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c deleted file mode 100644 index 636770624e8f..000000000000 --- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include "mgc_internal.h" - -LPROC_SEQ_FOPS_RO_TYPE(mgc, connect_flags); -LPROC_SEQ_FOPS_RO_TYPE(mgc, server_uuid); -LPROC_SEQ_FOPS_RO_TYPE(mgc, conn_uuid); -LPROC_SEQ_FOPS_RO_TYPE(mgc, import); -LPROC_SEQ_FOPS_RO_TYPE(mgc, state); - -LPROC_SEQ_FOPS_WR_ONLY(mgc, ping); - -static int mgc_ir_state_seq_show(struct seq_file *m, void *v) -{ - return lprocfs_mgc_rd_ir_state(m, m->private); -} - -LPROC_SEQ_FOPS_RO(mgc_ir_state); - -static struct lprocfs_vars lprocfs_mgc_obd_vars[] = { - { "ping", &mgc_ping_fops, NULL, 0222 }, - { "connect_flags", &mgc_connect_flags_fops, NULL, 0 }, - { "mgs_server_uuid", &mgc_server_uuid_fops, NULL, 0 }, - { "mgs_conn_uuid", &mgc_conn_uuid_fops, NULL, 0 }, - { "import", &mgc_import_fops, NULL, 0 }, - { "state", &mgc_state_fops, NULL, 0 }, - { "ir_state", &mgc_ir_state_fops, NULL, 0 }, - { NULL } -}; - -void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars) -{ - lvars->obd_vars = lprocfs_mgc_obd_vars; -} diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h deleted file mode 100644 index 9541892b67c7..000000000000 --- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef _MGC_INTERNAL_H -#define _MGC_INTERNAL_H - -#include -#include -#include -#include - -void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars); -int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data); - -int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld); - -static inline int cld_is_sptlrpc(struct config_llog_data *cld) -{ - return cld->cld_type == CONFIG_T_SPTLRPC; -} - -static inline int cld_is_recover(struct config_llog_data *cld) -{ - return cld->cld_type == CONFIG_T_RECOVER; -} - -#endif /* _MGC_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c deleted file mode 100644 index 32df804614d3..000000000000 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ /dev/null @@ -1,1851 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/mgc/mgc_request.c - * - * Author: Nathan Rutman - */ - -#define DEBUG_SUBSYSTEM S_MGC -#define D_MGC D_CONFIG /*|D_WARNING*/ - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "mgc_internal.h" - -static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id, - int type) -{ - __u64 resname = 0; - - if (len > sizeof(resname)) { - CERROR("name too long: %s\n", name); - return -EINVAL; - } - if (len <= 0) { - CERROR("missing name: %s\n", name); - return -EINVAL; - } - memcpy(&resname, name, len); - - /* Always use the same endianness for the resid */ - memset(res_id, 0, sizeof(*res_id)); - res_id->name[0] = cpu_to_le64(resname); - /* XXX: unfortunately, sptlprc and config llog share one lock */ - switch (type) { - case CONFIG_T_CONFIG: - case CONFIG_T_SPTLRPC: - resname = 0; - break; - case CONFIG_T_RECOVER: - case CONFIG_T_PARAMS: - resname = type; - break; - default: - LBUG(); - } - res_id->name[1] = cpu_to_le64(resname); - CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name, - res_id->name[0], res_id->name[1], (char *)&res_id->name[0]); - return 0; -} - -int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type) -{ - /* fsname is at most 8 chars long, maybe contain "-". - * e.g. "lustre", "SUN-000" - */ - return mgc_name2resid(fsname, strlen(fsname), res_id, type); -} -EXPORT_SYMBOL(mgc_fsname2resid); - -static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type) -{ - char *name_end; - int len; - - /* logname consists of "fsname-nodetype". - * e.g. "lustre-MDT0001", "SUN-000-client" - * there is an exception: llog "params" - */ - name_end = strrchr(logname, '-'); - if (!name_end) - len = strlen(logname); - else - len = name_end - logname; - return mgc_name2resid(logname, len, res_id, type); -} - -/********************** config llog list **********************/ -static LIST_HEAD(config_llog_list); -static DEFINE_SPINLOCK(config_list_lock); - -/* Take a reference to a config log */ -static int config_log_get(struct config_llog_data *cld) -{ - atomic_inc(&cld->cld_refcount); - CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname, - atomic_read(&cld->cld_refcount)); - return 0; -} - -/* Drop a reference to a config log. When no longer referenced, - * we can free the config log data - */ -static void config_log_put(struct config_llog_data *cld) -{ - CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname, - atomic_read(&cld->cld_refcount)); - LASSERT(atomic_read(&cld->cld_refcount) > 0); - - /* spinlock to make sure no item with 0 refcount in the list */ - if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) { - list_del(&cld->cld_list_chain); - spin_unlock(&config_list_lock); - - CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname); - - if (cld->cld_recover) - config_log_put(cld->cld_recover); - if (cld->cld_params) - config_log_put(cld->cld_params); - if (cld->cld_sptlrpc) - config_log_put(cld->cld_sptlrpc); - if (cld_is_sptlrpc(cld)) - sptlrpc_conf_log_stop(cld->cld_logname); - - class_export_put(cld->cld_mgcexp); - kfree(cld); - } -} - -/* Find a config log by name */ -static -struct config_llog_data *config_log_find(char *logname, - struct config_llog_instance *cfg) -{ - struct config_llog_data *cld; - struct config_llog_data *found = NULL; - void *instance; - - LASSERT(logname); - - instance = cfg ? cfg->cfg_instance : NULL; - spin_lock(&config_list_lock); - list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - /* check if instance equals */ - if (instance != cld->cld_cfg.cfg_instance) - continue; - - /* instance may be NULL, should check name */ - if (strcmp(logname, cld->cld_logname) == 0) { - found = cld; - config_log_get(found); - break; - } - } - spin_unlock(&config_list_lock); - return found; -} - -static -struct config_llog_data *do_config_log_add(struct obd_device *obd, - char *logname, - int type, - struct config_llog_instance *cfg, - struct super_block *sb) -{ - struct config_llog_data *cld; - int rc; - - CDEBUG(D_MGC, "do adding config log %s:%p\n", logname, - cfg ? cfg->cfg_instance : NULL); - - cld = kzalloc(sizeof(*cld) + strlen(logname) + 1, GFP_NOFS); - if (!cld) - return ERR_PTR(-ENOMEM); - - rc = mgc_logname2resid(logname, &cld->cld_resid, type); - if (rc) { - kfree(cld); - return ERR_PTR(rc); - } - - strcpy(cld->cld_logname, logname); - if (cfg) - cld->cld_cfg = *cfg; - else - cld->cld_cfg.cfg_callback = class_config_llog_handler; - mutex_init(&cld->cld_lock); - cld->cld_cfg.cfg_last_idx = 0; - cld->cld_cfg.cfg_flags = 0; - cld->cld_cfg.cfg_sb = sb; - cld->cld_type = type; - atomic_set(&cld->cld_refcount, 1); - - /* Keep the mgc around until we are done */ - cld->cld_mgcexp = class_export_get(obd->obd_self_export); - - if (cld_is_sptlrpc(cld)) { - sptlrpc_conf_log_start(logname); - cld->cld_cfg.cfg_obdname = obd->obd_name; - } - - spin_lock(&config_list_lock); - list_add(&cld->cld_list_chain, &config_llog_list); - spin_unlock(&config_list_lock); - - if (cld_is_sptlrpc(cld)) { - rc = mgc_process_log(obd, cld); - if (rc && rc != -ENOENT) - CERROR("failed processing sptlrpc log: %d\n", rc); - } - - return cld; -} - -static struct config_llog_data * -config_recover_log_add(struct obd_device *obd, char *fsname, - struct config_llog_instance *cfg, - struct super_block *sb) -{ - struct config_llog_instance lcfg = *cfg; - struct config_llog_data *cld; - char logname[32]; - - /* we have to use different llog for clients and mdts for cmd - * where only clients are notified if one of cmd server restarts - */ - LASSERT(strlen(fsname) < sizeof(logname) / 2); - strcpy(logname, fsname); - LASSERT(lcfg.cfg_instance); - strcat(logname, "-cliir"); - - cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb); - return cld; -} - -static struct config_llog_data * -config_params_log_add(struct obd_device *obd, - struct config_llog_instance *cfg, struct super_block *sb) -{ - struct config_llog_instance lcfg = *cfg; - struct config_llog_data *cld; - - lcfg.cfg_instance = sb; - - cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS, - &lcfg, sb); - - return cld; -} - -/** Add this log to the list of active logs watched by an MGC. - * Active means we're watching for updates. - * We have one active log per "mount" - client instance or servername. - * Each instance may be at a different point in the log. - */ -static struct config_llog_data * -config_log_add(struct obd_device *obd, char *logname, - struct config_llog_instance *cfg, struct super_block *sb) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - struct config_llog_data *cld; - struct config_llog_data *sptlrpc_cld; - struct config_llog_data *params_cld; - struct config_llog_data *recover_cld = NULL; - char seclogname[32]; - char *ptr; - int rc; - - CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance); - - /* - * for each regular log, the depended sptlrpc log name is - * -sptlrpc. multiple regular logs may share one sptlrpc log. - */ - ptr = strrchr(logname, '-'); - if (!ptr || ptr - logname > 8) { - CERROR("logname %s is too long\n", logname); - return ERR_PTR(-EINVAL); - } - - memcpy(seclogname, logname, ptr - logname); - strcpy(seclogname + (ptr - logname), "-sptlrpc"); - - sptlrpc_cld = config_log_find(seclogname, NULL); - if (!sptlrpc_cld) { - sptlrpc_cld = do_config_log_add(obd, seclogname, - CONFIG_T_SPTLRPC, NULL, NULL); - if (IS_ERR(sptlrpc_cld)) { - CERROR("can't create sptlrpc log: %s\n", seclogname); - rc = PTR_ERR(sptlrpc_cld); - goto out_err; - } - } - params_cld = config_params_log_add(obd, cfg, sb); - if (IS_ERR(params_cld)) { - rc = PTR_ERR(params_cld); - CERROR("%s: can't create params log: rc = %d\n", - obd->obd_name, rc); - goto out_sptlrpc; - } - - cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb); - if (IS_ERR(cld)) { - CERROR("can't create log: %s\n", logname); - rc = PTR_ERR(cld); - goto out_params; - } - - LASSERT(lsi->lsi_lmd); - if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) { - ptr = strrchr(seclogname, '-'); - if (ptr) { - *ptr = 0; - } else { - CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n", - obd->obd_name, seclogname, -EINVAL); - rc = -EINVAL; - goto out_cld; - } - recover_cld = config_recover_log_add(obd, seclogname, cfg, sb); - if (IS_ERR(recover_cld)) { - rc = PTR_ERR(recover_cld); - goto out_cld; - } - } - - mutex_lock(&cld->cld_lock); - cld->cld_recover = recover_cld; - cld->cld_params = params_cld; - cld->cld_sptlrpc = sptlrpc_cld; - mutex_unlock(&cld->cld_lock); - - return cld; - -out_cld: - config_log_put(cld); - -out_params: - config_log_put(params_cld); - -out_sptlrpc: - config_log_put(sptlrpc_cld); - -out_err: - return ERR_PTR(rc); -} - -static DEFINE_MUTEX(llog_process_lock); - -static inline void config_mark_cld_stop(struct config_llog_data *cld) -{ - mutex_lock(&cld->cld_lock); - spin_lock(&config_list_lock); - cld->cld_stopping = 1; - spin_unlock(&config_list_lock); - mutex_unlock(&cld->cld_lock); -} - -/** Stop watching for updates on this log. - */ -static int config_log_end(char *logname, struct config_llog_instance *cfg) -{ - struct config_llog_data *cld; - struct config_llog_data *cld_sptlrpc = NULL; - struct config_llog_data *cld_params = NULL; - struct config_llog_data *cld_recover = NULL; - int rc = 0; - - cld = config_log_find(logname, cfg); - if (!cld) - return -ENOENT; - - mutex_lock(&cld->cld_lock); - /* - * if cld_stopping is set, it means we didn't start the log thus - * not owning the start ref. this can happen after previous umount: - * the cld still hanging there waiting for lock cancel, and we - * remount again but failed in the middle and call log_end without - * calling start_log. - */ - if (unlikely(cld->cld_stopping)) { - mutex_unlock(&cld->cld_lock); - /* drop the ref from the find */ - config_log_put(cld); - return rc; - } - - spin_lock(&config_list_lock); - cld->cld_stopping = 1; - spin_unlock(&config_list_lock); - - cld_recover = cld->cld_recover; - cld->cld_recover = NULL; - - cld_params = cld->cld_params; - cld->cld_params = NULL; - cld_sptlrpc = cld->cld_sptlrpc; - cld->cld_sptlrpc = NULL; - mutex_unlock(&cld->cld_lock); - - if (cld_recover) { - config_mark_cld_stop(cld_recover); - config_log_put(cld_recover); - } - - if (cld_params) { - config_mark_cld_stop(cld_params); - config_log_put(cld_params); - } - - if (cld_sptlrpc) - config_log_put(cld_sptlrpc); - - /* drop the ref from the find */ - config_log_put(cld); - /* drop the start ref */ - config_log_put(cld); - - CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client", - rc); - return rc; -} - -int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data) -{ - struct obd_device *obd = data; - struct obd_import *imp; - struct obd_connect_data *ocd; - struct config_llog_data *cld; - int rc; - - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - imp = obd->u.cli.cl_import; - ocd = &imp->imp_connect_data; - - seq_printf(m, "imperative_recovery: %s\n", - OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); - seq_printf(m, "client_state:\n"); - - spin_lock(&config_list_lock); - list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - if (!cld->cld_recover) - continue; - seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", - cld->cld_logname, - cld->cld_recover->cld_cfg.cfg_last_idx); - } - spin_unlock(&config_list_lock); - - up_read(&obd->u.cli.cl_sem); - return 0; -} - -/* reenqueue any lost locks */ -#define RQ_RUNNING 0x1 -#define RQ_NOW 0x2 -#define RQ_LATER 0x4 -#define RQ_STOP 0x8 -#define RQ_PRECLEANUP 0x10 -static int rq_state; -static wait_queue_head_t rq_waitq; -static DECLARE_COMPLETION(rq_exit); -static DECLARE_COMPLETION(rq_start); - -static void do_requeue(struct config_llog_data *cld) -{ - LASSERT(atomic_read(&cld->cld_refcount) > 0); - - /* Do not run mgc_process_log on a disconnected export or an - * export which is being disconnected. Take the client - * semaphore to make the check non-racy. - */ - down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem, - OBD_CLI_SEM_MGC); - - if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) { - int rc; - - CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname); - rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld); - if (rc && rc != -ENOENT) - CERROR("failed processing log: %d\n", rc); - } else { - CDEBUG(D_MGC, "disconnecting, won't update log %s\n", - cld->cld_logname); - } - up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem); -} - -/* this timeout represents how many seconds MGC should wait before - * requeue config and recover lock to the MGS. We need to randomize this - * in order to not flood the MGS. - */ -#define MGC_TIMEOUT_MIN_SECONDS 5 -#define MGC_TIMEOUT_RAND_CENTISEC 500 - -static int mgc_requeue_thread(void *data) -{ - bool first = true; - - CDEBUG(D_MGC, "Starting requeue thread\n"); - - /* Keep trying failed locks periodically */ - spin_lock(&config_list_lock); - rq_state |= RQ_RUNNING; - while (!(rq_state & RQ_STOP)) { - struct config_llog_data *cld, *cld_prev; - int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC); - int to; - - /* Any new or requeued lostlocks will change the state */ - rq_state &= ~(RQ_NOW | RQ_LATER); - spin_unlock(&config_list_lock); - - if (first) { - first = false; - complete(&rq_start); - } - - /* Always wait a few seconds to allow the server who - * caused the lock revocation to finish its setup, plus some - * random so everyone doesn't try to reconnect at once. - */ - to = msecs_to_jiffies(MGC_TIMEOUT_MIN_SECONDS * MSEC_PER_SEC); - /* rand is centi-seconds */ - to += msecs_to_jiffies(rand * MSEC_PER_SEC / 100); - wait_event_idle_timeout(rq_waitq, - rq_state & (RQ_STOP | RQ_PRECLEANUP), - to); - - /* - * iterate & processing through the list. for each cld, process - * its depending sptlrpc cld firstly (if any) and then itself. - * - * it's guaranteed any item in the list must have - * reference > 0; and if cld_lostlock is set, at - * least one reference is taken by the previous enqueue. - */ - cld_prev = NULL; - - spin_lock(&config_list_lock); - rq_state &= ~RQ_PRECLEANUP; - list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - if (!cld->cld_lostlock || cld->cld_stopping) - continue; - - /* - * hold reference to avoid being freed during - * subsequent processing. - */ - config_log_get(cld); - cld->cld_lostlock = 0; - spin_unlock(&config_list_lock); - - if (cld_prev) - config_log_put(cld_prev); - cld_prev = cld; - - if (likely(!(rq_state & RQ_STOP))) { - do_requeue(cld); - spin_lock(&config_list_lock); - } else { - spin_lock(&config_list_lock); - break; - } - } - spin_unlock(&config_list_lock); - if (cld_prev) - config_log_put(cld_prev); - - /* Wait a bit to see if anyone else needs a requeue */ - wait_event_idle(rq_waitq, rq_state & (RQ_NOW | RQ_STOP)); - spin_lock(&config_list_lock); - } - - /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */ - rq_state &= ~RQ_RUNNING; - spin_unlock(&config_list_lock); - - complete(&rq_exit); - - CDEBUG(D_MGC, "Ending requeue thread\n"); - return 0; -} - -/* Add a cld to the list to requeue. Start the requeue thread if needed. - * We are responsible for dropping the config log reference from here on out. - */ -static void mgc_requeue_add(struct config_llog_data *cld) -{ - bool wakeup = false; - - CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n", - cld->cld_logname, atomic_read(&cld->cld_refcount), - cld->cld_stopping, rq_state); - LASSERT(atomic_read(&cld->cld_refcount) > 0); - - mutex_lock(&cld->cld_lock); - spin_lock(&config_list_lock); - if (!(rq_state & RQ_STOP) && !cld->cld_stopping && !cld->cld_lostlock) { - cld->cld_lostlock = 1; - rq_state |= RQ_NOW; - wakeup = true; - } - spin_unlock(&config_list_lock); - mutex_unlock(&cld->cld_lock); - if (wakeup) - wake_up(&rq_waitq); -} - -static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd) -{ - struct llog_ctxt *ctxt; - int rc; - - /* setup only remote ctxt, the local disk context is switched per each - * filesystem during mgc_fs_setup() - */ - rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd, - &llog_client_ops); - if (rc) - return rc; - - ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT); - LASSERT(ctxt); - - llog_initiator_connect(ctxt); - llog_ctxt_put(ctxt); - - return 0; -} - -static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd) -{ - struct llog_ctxt *ctxt; - - ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT); - if (ctxt) - llog_cleanup(env, ctxt); - - return 0; -} - -static atomic_t mgc_count = ATOMIC_INIT(0); -static int mgc_precleanup(struct obd_device *obd) -{ - int rc = 0; - int temp; - - if (atomic_dec_and_test(&mgc_count)) { - LASSERT(rq_state & RQ_RUNNING); - /* stop requeue thread */ - temp = RQ_STOP; - } else { - /* wakeup requeue thread to clean our cld */ - temp = RQ_NOW | RQ_PRECLEANUP; - } - - spin_lock(&config_list_lock); - rq_state |= temp; - spin_unlock(&config_list_lock); - wake_up(&rq_waitq); - - if (temp & RQ_STOP) - wait_for_completion(&rq_exit); - obd_cleanup_client_import(obd); - - rc = mgc_llog_fini(NULL, obd); - if (rc) - CERROR("failed to cleanup llogging subsystems\n"); - - return rc; -} - -static int mgc_cleanup(struct obd_device *obd) -{ - /* COMPAT_146 - old config logs may have added profiles we don't - * know about - */ - if (obd->obd_type->typ_refcnt <= 1) - /* Only for the last mgc */ - class_del_profiles(); - - lprocfs_obd_cleanup(obd); - ptlrpcd_decref(); - - return client_obd_cleanup(obd); -} - -static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct lprocfs_static_vars lvars = { NULL }; - struct task_struct *task; - int rc; - - rc = ptlrpcd_addref(); - if (rc < 0) - goto err_noref; - - rc = client_obd_setup(obd, lcfg); - if (rc) - goto err_decref; - - rc = mgc_llog_init(NULL, obd); - if (rc) { - CERROR("failed to setup llogging subsystems\n"); - goto err_cleanup; - } - - lprocfs_mgc_init_vars(&lvars); - lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars); - sptlrpc_lprocfs_cliobd_attach(obd); - - if (atomic_inc_return(&mgc_count) == 1) { - rq_state = 0; - init_waitqueue_head(&rq_waitq); - - /* start requeue thread */ - task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue"); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n", - obd->obd_name, rc); - goto err_cleanup; - } - /* rc is the task_struct pointer of mgc_requeue_thread. */ - rc = 0; - wait_for_completion(&rq_start); - } - - return rc; - -err_cleanup: - client_obd_cleanup(obd); -err_decref: - ptlrpcd_decref(); -err_noref: - return rc; -} - -/* based on ll_mdc_blocking_ast */ -static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, - void *data, int flag) -{ - struct lustre_handle lockh; - struct config_llog_data *cld = data; - int rc = 0; - - switch (flag) { - case LDLM_CB_BLOCKING: - /* mgs wants the lock, give it up... */ - LDLM_DEBUG(lock, "MGC blocking CB"); - ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); - break; - case LDLM_CB_CANCELING: - /* We've given up the lock, prepare ourselves to update. */ - LDLM_DEBUG(lock, "MGC cancel CB"); - - CDEBUG(D_MGC, "Lock res " DLDLMRES " (%.8s)\n", - PLDLMRES(lock->l_resource), - (char *)&lock->l_resource->lr_name.name[0]); - - if (!cld) { - CDEBUG(D_INFO, "missing data, won't requeue\n"); - break; - } - - /* held at mgc_process_log(). */ - LASSERT(atomic_read(&cld->cld_refcount) > 0); - - lock->l_ast_data = NULL; - /* Are we done with this log? */ - if (cld->cld_stopping) { - CDEBUG(D_MGC, "log %s: stopping, won't requeue\n", - cld->cld_logname); - config_log_put(cld); - break; - } - /* Make sure not to re-enqueue when the mgc is stopping - * (we get called from client_disconnect_export) - */ - if (!lock->l_conn_export || - !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) { - CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n", - cld->cld_logname); - config_log_put(cld); - break; - } - - /* Re-enqueue now */ - mgc_requeue_add(cld); - config_log_put(cld); - break; - default: - LBUG(); - } - - return rc; -} - -/* Not sure where this should go... */ -/* This is the timeout value for MGS_CONNECT request plus a ping interval, such - * that we can have a chance to try the secondary MGS if any. - */ -#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \ - + PING_INTERVAL) -#define MGC_TARGET_REG_LIMIT 10 -#define MGC_SEND_PARAM_LIMIT 10 - -/* Send parameter to MGS*/ -static int mgc_set_mgs_param(struct obd_export *exp, - struct mgs_send_param *msp) -{ - struct ptlrpc_request *req; - struct mgs_send_param *req_msp, *rep_msp; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION, - MGS_SET_INFO); - if (!req) - return -ENOMEM; - - req_msp = req_capsule_client_get(&req->rq_pill, &RMF_MGS_SEND_PARAM); - if (!req_msp) { - ptlrpc_req_finished(req); - return -ENOMEM; - } - - memcpy(req_msp, msp, sizeof(*req_msp)); - ptlrpc_request_set_replen(req); - - /* Limit how long we will wait for the enqueue to complete */ - req->rq_delay_limit = MGC_SEND_PARAM_LIMIT; - rc = ptlrpc_queue_wait(req); - if (!rc) { - rep_msp = req_capsule_server_get(&req->rq_pill, &RMF_MGS_SEND_PARAM); - memcpy(msp, rep_msp, sizeof(*rep_msp)); - } - - ptlrpc_req_finished(req); - - return rc; -} - -/* Take a config lock so we can get cancel notifications */ -static int mgc_enqueue(struct obd_export *exp, __u32 type, - union ldlm_policy_data *policy, __u32 mode, - __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb, - void *data, __u32 lvb_len, void *lvb_swabber, - struct lustre_handle *lockh) -{ - struct config_llog_data *cld = data; - struct ldlm_enqueue_info einfo = { - .ei_type = type, - .ei_mode = mode, - .ei_cb_bl = mgc_blocking_ast, - .ei_cb_cp = ldlm_completion_ast, - }; - struct ptlrpc_request *req; - int short_limit = cld_is_sptlrpc(cld); - int rc; - - CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname, - cld->cld_resid.name[0]); - - /* We need a callback for every lockholder, so don't try to - * ldlm_lock_match (see rev 1.1.2.11.2.47) - */ - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, - LDLM_ENQUEUE); - if (!req) - return -ENOMEM; - - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0); - ptlrpc_request_set_replen(req); - - /* Limit how long we will wait for the enqueue to complete */ - req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT; - rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags, - NULL, 0, LVB_T_NONE, lockh, 0); - /* A failed enqueue should still call the mgc_blocking_ast, - * where it will be requeued if needed ("grant failed"). - */ - ptlrpc_req_finished(req); - return rc; -} - -static void mgc_notify_active(struct obd_device *unused) -{ - /* wakeup mgc_requeue_thread to requeue mgc lock */ - spin_lock(&config_list_lock); - rq_state |= RQ_NOW; - spin_unlock(&config_list_lock); - wake_up(&rq_waitq); - - /* TODO: Help the MGS rebuild nidtbl. -jay */ -} - -/* Send target_reg message to MGS */ -static int mgc_target_register(struct obd_export *exp, - struct mgs_target_info *mti) -{ - struct ptlrpc_request *req; - struct mgs_target_info *req_mti, *rep_mti; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION, - MGS_TARGET_REG); - if (!req) - return -ENOMEM; - - req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO); - if (!req_mti) { - ptlrpc_req_finished(req); - return -ENOMEM; - } - - memcpy(req_mti, mti, sizeof(*req_mti)); - ptlrpc_request_set_replen(req); - CDEBUG(D_MGC, "register %s\n", mti->mti_svname); - /* Limit how long we will wait for the enqueue to complete */ - req->rq_delay_limit = MGC_TARGET_REG_LIMIT; - - rc = ptlrpc_queue_wait(req); - if (!rc) { - rep_mti = req_capsule_server_get(&req->rq_pill, - &RMF_MGS_TARGET_INFO); - memcpy(mti, rep_mti, sizeof(*rep_mti)); - CDEBUG(D_MGC, "register %s got index = %d\n", - mti->mti_svname, mti->mti_stripe_index); - } - ptlrpc_req_finished(req); - - return rc; -} - -static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) -{ - int rc = -EINVAL; - - /* Turn off initial_recov after we try all backup servers once */ - if (KEY_IS(KEY_INIT_RECOV_BACKUP)) { - struct obd_import *imp = class_exp2cliimp(exp); - int value; - - if (vallen != sizeof(int)) - return -EINVAL; - value = *(int *)val; - CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n", - imp->imp_obd->obd_name, value, - imp->imp_deactive, imp->imp_invalid, - imp->imp_replayable, imp->imp_obd->obd_replayable, - ptlrpc_import_state_name(imp->imp_state)); - /* Resurrect if we previously died */ - if ((imp->imp_state != LUSTRE_IMP_FULL && - imp->imp_state != LUSTRE_IMP_NEW) || value > 1) - ptlrpc_reconnect_import(imp); - return 0; - } - if (KEY_IS(KEY_SET_INFO)) { - struct mgs_send_param *msp; - - msp = val; - rc = mgc_set_mgs_param(exp, msp); - return rc; - } - if (KEY_IS(KEY_MGSSEC)) { - struct client_obd *cli = &exp->exp_obd->u.cli; - struct sptlrpc_flavor flvr; - - /* - * empty string means using current flavor, if which haven't - * been set yet, set it as null. - * - * if flavor has been set previously, check the asking flavor - * must match the existing one. - */ - if (vallen == 0) { - if (cli->cl_flvr_mgc.sf_rpc != SPTLRPC_FLVR_INVALID) - return 0; - val = "null"; - vallen = 4; - } - - rc = sptlrpc_parse_flavor(val, &flvr); - if (rc) { - CERROR("invalid sptlrpc flavor %s to MGS\n", - (char *)val); - return rc; - } - - /* - * caller already hold a mutex - */ - if (cli->cl_flvr_mgc.sf_rpc == SPTLRPC_FLVR_INVALID) { - cli->cl_flvr_mgc = flvr; - } else if (memcmp(&cli->cl_flvr_mgc, &flvr, - sizeof(flvr)) != 0) { - char str[20]; - - sptlrpc_flavor2name(&cli->cl_flvr_mgc, - str, sizeof(str)); - LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n", - (char *)val, str); - rc = -EPERM; - } - return rc; - } - - return rc; -} - -static int mgc_get_info(const struct lu_env *env, struct obd_export *exp, - __u32 keylen, void *key, __u32 *vallen, void *val) -{ - int rc = -EINVAL; - - if (KEY_IS(KEY_CONN_DATA)) { - struct obd_import *imp = class_exp2cliimp(exp); - struct obd_connect_data *data = val; - - if (*vallen == sizeof(*data)) { - *data = imp->imp_connect_data; - rc = 0; - } - } - - return rc; -} - -static int mgc_import_event(struct obd_device *obd, - struct obd_import *imp, - enum obd_import_event event) -{ - LASSERT(imp->imp_obd == obd); - CDEBUG(D_MGC, "import event %#x\n", event); - - switch (event) { - case IMP_EVENT_DISCON: - /* MGC imports should not wait for recovery */ - if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV)) - ptlrpc_pinger_ir_down(); - break; - case IMP_EVENT_INACTIVE: - break; - case IMP_EVENT_INVALIDATE: { - struct ldlm_namespace *ns = obd->obd_namespace; - - ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); - break; - } - case IMP_EVENT_ACTIVE: - CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name); - /* Clearing obd_no_recov allows us to continue pinging */ - obd->obd_no_recov = 0; - mgc_notify_active(obd); - if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV)) - ptlrpc_pinger_ir_up(); - break; - case IMP_EVENT_OCD: - break; - case IMP_EVENT_DEACTIVATE: - case IMP_EVENT_ACTIVATE: - break; - default: - CERROR("Unknown import event %#x\n", event); - LBUG(); - } - return 0; -} - -enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT), - CONFIG_READ_NRPAGES = 4 -}; - -static int mgc_apply_recover_logs(struct obd_device *mgc, - struct config_llog_data *cld, - __u64 max_version, - void *data, int datalen, bool mne_swab) -{ - struct config_llog_instance *cfg = &cld->cld_cfg; - struct mgs_nidtbl_entry *entry; - struct lustre_cfg *lcfg; - struct lustre_cfg_bufs bufs; - u64 prev_version = 0; - char *inst; - char *buf; - int bufsz; - int pos; - int rc = 0; - int off = 0; - - LASSERT(cfg->cfg_instance); - LASSERT(cfg->cfg_sb == cfg->cfg_instance); - - inst = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!inst) - return -ENOMEM; - - pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance); - if (pos >= PAGE_SIZE) { - kfree(inst); - return -E2BIG; - } - - ++pos; - buf = inst + pos; - bufsz = PAGE_SIZE - pos; - - while (datalen > 0) { - int entry_len = sizeof(*entry); - int is_ost, i; - struct obd_device *obd; - char *obdname; - char *cname; - char *params; - char *uuid; - size_t len; - - rc = -EINVAL; - if (datalen < sizeof(*entry)) - break; - - entry = (typeof(entry))(data + off); - - /* sanity check */ - if (entry->mne_nid_type != 0) /* only support type 0 for ipv4 */ - break; - if (entry->mne_nid_count == 0) /* at least one nid entry */ - break; - if (entry->mne_nid_size != sizeof(lnet_nid_t)) - break; - - entry_len += entry->mne_nid_count * entry->mne_nid_size; - if (datalen < entry_len) /* must have entry_len at least */ - break; - - /* Keep this swab for normal mixed endian handling. LU-1644 */ - if (mne_swab) - lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > PAGE_SIZE) { - CERROR("MNE too large (%u)\n", entry->mne_length); - break; - } - - if (entry->mne_length < entry_len) - break; - - off += entry->mne_length; - datalen -= entry->mne_length; - if (datalen < 0) - break; - - if (entry->mne_version > max_version) { - CERROR("entry index(%lld) is over max_index(%lld)\n", - entry->mne_version, max_version); - break; - } - - if (prev_version >= entry->mne_version) { - CERROR("index unsorted, prev %lld, now %lld\n", - prev_version, entry->mne_version); - break; - } - prev_version = entry->mne_version; - - /* - * Write a string with format "nid::instance" to - * lustre//--/import. - */ - - is_ost = entry->mne_type == LDD_F_SV_TYPE_OST; - memset(buf, 0, bufsz); - obdname = buf; - pos = 0; - - /* lustre-OST0001-osc- */ - strcpy(obdname, cld->cld_logname); - cname = strrchr(obdname, '-'); - if (!cname) { - CERROR("mgc %s: invalid logname %s\n", - mgc->obd_name, obdname); - break; - } - - pos = cname - obdname; - obdname[pos] = 0; - pos += sprintf(obdname + pos, "-%s%04x", - is_ost ? "OST" : "MDT", entry->mne_index); - - cname = is_ost ? "osc" : "mdc"; - pos += sprintf(obdname + pos, "-%s-%s", cname, inst); - lustre_cfg_bufs_reset(&bufs, obdname); - - /* find the obd by obdname */ - obd = class_name2obd(obdname); - if (!obd) { - CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n", - mgc->obd_name, obdname); - rc = 0; - /* this is a safe race, when the ost is starting up...*/ - continue; - } - - /* osc.import = "connection=::" */ - ++pos; - params = buf + pos; - pos += sprintf(params, "%s.import=%s", cname, "connection="); - uuid = buf + pos; - - down_read(&obd->u.cli.cl_sem); - if (!obd->u.cli.cl_import) { - /* client does not connect to the OST yet */ - up_read(&obd->u.cli.cl_sem); - rc = 0; - continue; - } - - /* iterate all nids to find one */ - /* find uuid by nid */ - rc = -ENOENT; - for (i = 0; i < entry->mne_nid_count; i++) { - rc = client_import_find_conn(obd->u.cli.cl_import, - entry->u.nids[0], - (struct obd_uuid *)uuid); - if (!rc) - break; - } - - up_read(&obd->u.cli.cl_sem); - if (rc < 0) { - CERROR("mgc: cannot find uuid by nid %s\n", - libcfs_nid2str(entry->u.nids[0])); - break; - } - - CDEBUG(D_INFO, "Find uuid %s by nid %s\n", - uuid, libcfs_nid2str(entry->u.nids[0])); - - pos += strlen(uuid); - pos += sprintf(buf + pos, "::%u", entry->mne_instance); - LASSERT(pos < bufsz); - - lustre_cfg_bufs_set_string(&bufs, 1, params); - - rc = -ENOMEM; - len = lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen); - lcfg = kzalloc(len, GFP_NOFS); - if (!lcfg) { - rc = -ENOMEM; - break; - } - lustre_cfg_init(lcfg, LCFG_PARAM, &bufs); - - CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n", - prev_version, max_version, obdname, params); - - rc = class_process_config(lcfg); - kfree(lcfg); - if (rc) - CDEBUG(D_INFO, "process config for %s error %d\n", - obdname, rc); - - /* continue, even one with error */ - } - - kfree(inst); - return rc; -} - -/** - * This function is called if this client was notified for target restarting - * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs. - */ -static int mgc_process_recover_log(struct obd_device *obd, - struct config_llog_data *cld) -{ - struct ptlrpc_request *req = NULL; - struct config_llog_instance *cfg = &cld->cld_cfg; - struct mgs_config_body *body; - struct mgs_config_res *res; - struct ptlrpc_bulk_desc *desc; - struct page **pages; - int nrpages; - bool eof = true; - bool mne_swab; - int i; - int ealen; - int rc; - - /* allocate buffer for bulk transfer. - * if this is the first time for this mgs to read logs, - * CONFIG_READ_NRPAGES_INIT will be used since it will read all logs - * once; otherwise, it only reads increment of logs, this should be - * small and CONFIG_READ_NRPAGES will be used. - */ - nrpages = CONFIG_READ_NRPAGES; - if (cfg->cfg_last_idx == 0) /* the first time */ - nrpages = CONFIG_READ_NRPAGES_INIT; - - pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL); - if (!pages) { - rc = -ENOMEM; - goto out; - } - - for (i = 0; i < nrpages; i++) { - pages[i] = alloc_page(GFP_KERNEL); - if (!pages[i]) { - rc = -ENOMEM; - goto out; - } - } - -again: - LASSERT(cld_is_recover(cld)); - LASSERT(mutex_is_locked(&cld->cld_lock)); - req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp), - &RQF_MGS_CONFIG_READ); - if (!req) { - rc = -ENOMEM; - goto out; - } - - rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ); - if (rc) - goto out; - - /* pack request */ - body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); - LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname)); - if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name)) - >= sizeof(body->mcb_name)) { - rc = -E2BIG; - goto out; - } - body->mcb_offset = cfg->cfg_last_idx + 1; - body->mcb_type = cld->cld_type; - body->mcb_bits = PAGE_SHIFT; - body->mcb_units = nrpages; - - /* allocate bulk transfer descriptor */ - desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, - PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV, - MGS_BULK_PORTAL, - &ptlrpc_bulk_kiov_pin_ops); - if (!desc) { - rc = -ENOMEM; - goto out; - } - - for (i = 0; i < nrpages; i++) - desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE); - - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES); - if (res->mcr_size < res->mcr_offset) { - rc = -EINVAL; - goto out; - } - - /* always update the index even though it might have errors with - * handling the recover logs - */ - cfg->cfg_last_idx = res->mcr_offset; - eof = res->mcr_offset == res->mcr_size; - - CDEBUG(D_INFO, "Latest version %lld, more %d.\n", - res->mcr_offset, eof == false); - - ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0); - if (ealen < 0) { - rc = ealen; - goto out; - } - - if (ealen > nrpages << PAGE_SHIFT) { - rc = -EINVAL; - goto out; - } - - if (ealen == 0) { /* no logs transferred */ - if (!eof) - rc = -EINVAL; - goto out; - } - - mne_swab = !!ptlrpc_rep_need_swab(req); -#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE - /* This import flag means the server did an extra swab of IR MNE - * records (fixed in LU-1252), reverse it here if needed. LU-1644 - */ - if (unlikely(req->rq_import->imp_need_mne_swab)) - mne_swab = !mne_swab; -#endif - - for (i = 0; i < nrpages && ealen > 0; i++) { - int rc2; - void *ptr; - - ptr = kmap(pages[i]); - rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, - min_t(int, ealen, PAGE_SIZE), - mne_swab); - kunmap(pages[i]); - if (rc2 < 0) { - CWARN("Process recover log %s error %d\n", - cld->cld_logname, rc2); - break; - } - - ealen -= PAGE_SIZE; - } - -out: - if (req) - ptlrpc_req_finished(req); - - if (rc == 0 && !eof) - goto again; - - if (pages) { - for (i = 0; i < nrpages; i++) { - if (!pages[i]) - break; - __free_page(pages[i]); - } - kfree(pages); - } - return rc; -} - -/* local_only means it cannot get remote llogs */ -static int mgc_process_cfg_log(struct obd_device *mgc, - struct config_llog_data *cld, int local_only) -{ - struct llog_ctxt *ctxt; - struct lustre_sb_info *lsi = NULL; - int rc = 0; - bool sptlrpc_started = false; - struct lu_env *env; - - LASSERT(cld); - LASSERT(mutex_is_locked(&cld->cld_lock)); - - /* - * local copy of sptlrpc log is controlled elsewhere, don't try to - * read it up here. - */ - if (cld_is_sptlrpc(cld) && local_only) - return 0; - - if (cld->cld_cfg.cfg_sb) - lsi = s2lsi(cld->cld_cfg.cfg_sb); - - env = kzalloc(sizeof(*env), GFP_KERNEL); - if (!env) - return -ENOMEM; - - rc = lu_env_init(env, LCT_MG_THREAD); - if (rc) - goto out_free; - - ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT); - LASSERT(ctxt); - - if (local_only) /* no local log at client side */ { - rc = -EIO; - goto out_pop; - } - - if (cld_is_sptlrpc(cld)) { - sptlrpc_conf_log_update_begin(cld->cld_logname); - sptlrpc_started = true; - } - - /* logname and instance info should be the same, so use our - * copy of the instance for the update. The cfg_last_idx will - * be updated here. - */ - rc = class_config_parse_llog(env, ctxt, cld->cld_logname, - &cld->cld_cfg); - -out_pop: - __llog_ctxt_put(env, ctxt); - - /* - * update settings on existing OBDs. doing it inside - * of llog_process_lock so no device is attaching/detaching - * in parallel. - * the logname must be -sptlrpc - */ - if (sptlrpc_started) { - LASSERT(cld_is_sptlrpc(cld)); - sptlrpc_conf_log_update_end(cld->cld_logname); - class_notify_sptlrpc_conf(cld->cld_logname, - strlen(cld->cld_logname) - - strlen("-sptlrpc")); - } - - lu_env_fini(env); -out_free: - kfree(env); - return rc; -} - -static bool mgc_import_in_recovery(struct obd_import *imp) -{ - bool in_recovery = true; - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_FULL || - imp->imp_state == LUSTRE_IMP_CLOSED) - in_recovery = false; - spin_unlock(&imp->imp_lock); - - return in_recovery; -} - -/** - * Get a configuration log from the MGS and process it. - * - * This function is called for both clients and servers to process the - * configuration log from the MGS. The MGC enqueues a DLM lock on the - * log from the MGS, and if the lock gets revoked the MGC will be notified - * by the lock cancellation callback that the config log has changed, - * and will enqueue another MGS lock on it, and then continue processing - * the new additions to the end of the log. - * - * Since the MGC import is not replayable, if the import is being evicted - * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process - * the log until recovery is finished or the import is closed. - * - * Make a local copy of the log before parsing it if appropriate (non-MGS - * server) so that the server can start even when the MGS is down. - * - * There shouldn't be multiple processes running process_log at once -- - * sounds like badness. It actually might be fine, as long as they're not - * trying to update from the same log simultaneously, in which case we - * should use a per-log semaphore instead of cld_lock. - * - * \param[in] mgc MGC device by which to fetch the configuration log - * \param[in] cld log processing state (stored in lock callback data) - * - * \retval 0 on success - * \retval negative errno on failure - */ -int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) -{ - struct lustre_handle lockh = { 0 }; - __u64 flags = LDLM_FL_NO_LRU; - bool retry = false; - int rc = 0, rcl; - - LASSERT(cld); - - /* I don't want multiple processes running process_log at once -- - * sounds like badness. It actually might be fine, as long as - * we're not trying to update from the same log - * simultaneously (in which case we should use a per-log sem.) - */ -restart: - mutex_lock(&cld->cld_lock); - if (cld->cld_stopping) { - mutex_unlock(&cld->cld_lock); - return 0; - } - - OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20); - - CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname, - cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1); - - /* Get the cfg lock on the llog */ - rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL, - LCK_CR, &flags, NULL, NULL, NULL, - cld, 0, NULL, &lockh); - if (rcl == 0) { - /* Get the cld, it will be released in mgc_blocking_ast. */ - config_log_get(cld); - rc = ldlm_lock_set_data(&lockh, (void *)cld); - LASSERT(rc == 0); - } else { - CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); - - if (rcl == -ESHUTDOWN && - atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) { - struct obd_import *imp; - - mutex_unlock(&cld->cld_lock); - imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp); - - /* - * Let's force the pinger, and wait the import to be - * connected, note: since mgc import is non-replayable, - * and even the import state is disconnected, it does - * not mean the "recovery" is stopped, so we will keep - * waitting until timeout or the import state is - * FULL or closed - */ - ptlrpc_pinger_force(imp); - - wait_event_idle_timeout(imp->imp_recovery_waitq, - !mgc_import_in_recovery(imp), - obd_timeout * HZ); - - if (imp->imp_state == LUSTRE_IMP_FULL) { - retry = true; - goto restart; - } else { - mutex_lock(&cld->cld_lock); - spin_lock(&config_list_lock); - cld->cld_lostlock = 1; - spin_unlock(&config_list_lock); - } - } else { - /* mark cld_lostlock so that it will requeue - * after MGC becomes available. - */ - spin_lock(&config_list_lock); - cld->cld_lostlock = 1; - spin_unlock(&config_list_lock); - } - } - - if (cld_is_recover(cld)) { - rc = 0; /* this is not a fatal error for recover log */ - if (!rcl) { - rc = mgc_process_recover_log(mgc, cld); - if (rc) { - CERROR("%s: recover log %s failed: rc = %d not fatal.\n", - mgc->obd_name, cld->cld_logname, rc); - rc = 0; - spin_lock(&config_list_lock); - cld->cld_lostlock = 1; - spin_unlock(&config_list_lock); - } - } - } else { - rc = mgc_process_cfg_log(mgc, cld, rcl != 0); - } - - CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n", - mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc); - - mutex_unlock(&cld->cld_lock); - - /* Now drop the lock so MGS can revoke it */ - if (!rcl) - ldlm_lock_decref(&lockh, LCK_CR); - - return rc; -} - -/** Called from lustre_process_log. - * LCFG_LOG_START gets the config log from the MGS, processes it to start - * any services, and adds it to the list logs to watch (follow). - */ -static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) -{ - struct lustre_cfg *lcfg = buf; - struct config_llog_instance *cfg = NULL; - char *logname; - int rc = 0; - - switch (lcfg->lcfg_command) { - case LCFG_LOV_ADD_OBD: { - /* Overloading this cfg command: register a new target */ - struct mgs_target_info *mti; - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) != - sizeof(struct mgs_target_info)) { - rc = -EINVAL; - goto out; - } - - mti = (struct mgs_target_info *)lustre_cfg_buf(lcfg, 1); - CDEBUG(D_MGC, "add_target %s %#x\n", - mti->mti_svname, mti->mti_flags); - rc = mgc_target_register(obd->u.cli.cl_mgc_mgsexp, mti); - break; - } - case LCFG_LOV_DEL_OBD: - /* Unregister has no meaning at the moment. */ - CERROR("lov_del_obd unimplemented\n"); - rc = -ENOSYS; - break; - case LCFG_SPTLRPC_CONF: { - rc = sptlrpc_process_config(lcfg); - break; - } - case LCFG_LOG_START: { - struct config_llog_data *cld; - struct super_block *sb; - - logname = lustre_cfg_string(lcfg, 1); - cfg = (struct config_llog_instance *)lustre_cfg_buf(lcfg, 2); - sb = *(struct super_block **)lustre_cfg_buf(lcfg, 3); - - CDEBUG(D_MGC, "parse_log %s from %d\n", logname, - cfg->cfg_last_idx); - - /* We're only called through here on the initial mount */ - cld = config_log_add(obd, logname, cfg, sb); - if (IS_ERR(cld)) { - rc = PTR_ERR(cld); - break; - } - - /* COMPAT_146 */ - /* FIXME only set this for old logs! Right now this forces - * us to always skip the "inside markers" check - */ - cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146; - - rc = mgc_process_log(obd, cld); - if (rc == 0 && cld->cld_recover) { - if (OCD_HAS_FLAG(&obd->u.cli.cl_import-> - imp_connect_data, IMP_RECOV)) { - rc = mgc_process_log(obd, cld->cld_recover); - } else { - struct config_llog_data *cir; - - mutex_lock(&cld->cld_lock); - cir = cld->cld_recover; - cld->cld_recover = NULL; - mutex_unlock(&cld->cld_lock); - config_log_put(cir); - } - - if (rc) - CERROR("Cannot process recover llog %d\n", rc); - } - - if (rc == 0 && cld->cld_params) { - rc = mgc_process_log(obd, cld->cld_params); - if (rc == -ENOENT) { - CDEBUG(D_MGC, - "There is no params config file yet\n"); - rc = 0; - } - /* params log is optional */ - if (rc) - CERROR( - "%s: can't process params llog: rc = %d\n", - obd->obd_name, rc); - } - - break; - } - case LCFG_LOG_END: { - logname = lustre_cfg_string(lcfg, 1); - - if (lcfg->lcfg_bufcount >= 2) - cfg = (struct config_llog_instance *)lustre_cfg_buf( - lcfg, 2); - rc = config_log_end(logname, cfg); - break; - } - default: { - CERROR("Unknown command: %d\n", lcfg->lcfg_command); - rc = -EINVAL; - goto out; - } - } -out: - return rc; -} - -static struct obd_ops mgc_obd_ops = { - .owner = THIS_MODULE, - .setup = mgc_setup, - .precleanup = mgc_precleanup, - .cleanup = mgc_cleanup, - .add_conn = client_import_add_conn, - .del_conn = client_import_del_conn, - .connect = client_connect_import, - .disconnect = client_disconnect_export, - .set_info_async = mgc_set_info_async, - .get_info = mgc_get_info, - .import_event = mgc_import_event, - .process_config = mgc_process_config, -}; - -static int __init mgc_init(void) -{ - int rc; - - rc = libcfs_setup(); - if (rc) - return rc; - - return class_register_type(&mgc_obd_ops, NULL, - LUSTRE_MGC_NAME, NULL); -} - -static void /*__exit*/ mgc_exit(void) -{ - class_unregister_type(LUSTRE_MGC_NAME); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Management Client"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(mgc_init); -module_exit(mgc_exit); diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile deleted file mode 100644 index e3fa9acff4c4..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += obdclass.o - -obdclass-y := linux/linux-module.o linux/linux-sysctl.o \ - llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \ - genops.o uuid.o lprocfs_status.o lprocfs_counters.o \ - lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \ - obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \ - cl_object.o cl_page.o cl_lock.o cl_io.o kernelcomm.o diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h deleted file mode 100644 index a0db830ca841..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Internal cl interfaces. - * - * Author: Nikita Danilov - */ -#ifndef _CL_INTERNAL_H -#define _CL_INTERNAL_H - -#define CLT_PVEC_SIZE (14) - -/** - * Possible levels of the nesting. Currently this is 2: there are "top" - * entities (files, extent locks), and "sub" entities (stripes and stripe - * locks). This is used only for debugging counters right now. - */ -enum clt_nesting_level { - CNL_TOP, - CNL_SUB, - CNL_NR -}; - -/** - * Thread local state internal for generic cl-code. - */ -struct cl_thread_info { - /* - * Common fields. - */ - struct cl_io clt_io; - struct cl_2queue clt_queue; - - /* - * Fields used by cl_lock.c - */ - struct cl_lock_descr clt_descr; - struct cl_page_list clt_list; - /** @} debugging */ - - /* - * Fields used by cl_page.c - */ - struct cl_page *clt_pvec[CLT_PVEC_SIZE]; - - /* - * Fields used by cl_io.c - */ - /** - * Pointer to the topmost ongoing IO in this thread. - */ - struct cl_io *clt_current_io; - /** - * Used for submitting a sync io. - */ - struct cl_sync_io clt_anchor; - /** - * Fields used by cl_lock_discard_pages(). - */ - pgoff_t clt_next_index; - pgoff_t clt_fn_index; /* first non-overlapped index */ -}; - -struct cl_thread_info *cl_env_info(const struct lu_env *env); - -#endif /* _CL_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c deleted file mode 100644 index fcdae6029258..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/cl_io.c +++ /dev/null @@ -1,1151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Client IO. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include -#include -#include "cl_internal.h" - -/***************************************************************************** - * - * cl_io interface. - * - */ - -#define cl_io_for_each(slice, io) \ - list_for_each_entry((slice), &io->ci_layers, cis_linkage) -#define cl_io_for_each_reverse(slice, io) \ - list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage) - -static inline int cl_io_type_is_valid(enum cl_io_type type) -{ - return CIT_READ <= type && type < CIT_OP_NR; -} - -static inline int cl_io_is_loopable(const struct cl_io *io) -{ - return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC; -} - -/** - * Returns true iff there is an IO ongoing in the given environment. - */ -int cl_io_is_going(const struct lu_env *env) -{ - return cl_env_info(env)->clt_current_io != NULL; -} - -/** - * cl_io invariant that holds at all times when exported cl_io_*() functions - * are entered and left. - */ -static int cl_io_invariant(const struct cl_io *io) -{ - struct cl_io *up; - - up = io->ci_parent; - return - /* - * io can own pages only when it is ongoing. Sub-io might - * still be in CIS_LOCKED state when top-io is in - * CIS_IO_GOING. - */ - ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING || - (io->ci_state == CIS_LOCKED && up)); -} - -/** - * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top. - */ -void cl_io_fini(const struct lu_env *env, struct cl_io *io) -{ - struct cl_io_slice *slice; - struct cl_thread_info *info; - - LINVRNT(cl_io_type_is_valid(io->ci_type)); - LINVRNT(cl_io_invariant(io)); - - while (!list_empty(&io->ci_layers)) { - slice = container_of(io->ci_layers.prev, struct cl_io_slice, - cis_linkage); - list_del_init(&slice->cis_linkage); - if (slice->cis_iop->op[io->ci_type].cio_fini) - slice->cis_iop->op[io->ci_type].cio_fini(env, slice); - /* - * Invalidate slice to catch use after free. This assumes that - * slices are allocated within session and can be touched - * after ->cio_fini() returns. - */ - slice->cis_io = NULL; - } - io->ci_state = CIS_FINI; - info = cl_env_info(env); - if (info->clt_current_io == io) - info->clt_current_io = NULL; - - /* sanity check for layout change */ - switch (io->ci_type) { - case CIT_READ: - case CIT_WRITE: - case CIT_DATA_VERSION: - break; - case CIT_FAULT: - break; - case CIT_FSYNC: - LASSERT(!io->ci_need_restart); - break; - case CIT_SETATTR: - case CIT_MISC: - /* Check ignore layout change conf */ - LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout, - !io->ci_need_restart)); - break; - default: - LBUG(); - } -} -EXPORT_SYMBOL(cl_io_fini); - -static int cl_io_init0(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj) -{ - struct cl_object *scan; - int result; - - LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI); - LINVRNT(cl_io_type_is_valid(iot)); - LINVRNT(cl_io_invariant(io)); - - io->ci_type = iot; - INIT_LIST_HEAD(&io->ci_lockset.cls_todo); - INIT_LIST_HEAD(&io->ci_lockset.cls_done); - INIT_LIST_HEAD(&io->ci_layers); - - result = 0; - cl_object_for_each(scan, obj) { - if (scan->co_ops->coo_io_init) { - result = scan->co_ops->coo_io_init(env, scan, io); - if (result != 0) - break; - } - } - if (result == 0) - io->ci_state = CIS_INIT; - return result; -} - -/** - * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom. - * - * \pre obj != cl_object_top(obj) - */ -int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj) -{ - struct cl_thread_info *info = cl_env_info(env); - - LASSERT(obj != cl_object_top(obj)); - if (!info->clt_current_io) - info->clt_current_io = io; - return cl_io_init0(env, io, iot, obj); -} -EXPORT_SYMBOL(cl_io_sub_init); - -/** - * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom. - * - * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter - * what the latter returned. - * - * \pre obj == cl_object_top(obj) - * \pre cl_io_type_is_valid(iot) - * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot - */ -int cl_io_init(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj) -{ - struct cl_thread_info *info = cl_env_info(env); - - LASSERT(obj == cl_object_top(obj)); - LASSERT(!info->clt_current_io); - - info->clt_current_io = io; - return cl_io_init0(env, io, iot, obj); -} -EXPORT_SYMBOL(cl_io_init); - -/** - * Initialize read or write io. - * - * \pre iot == CIT_READ || iot == CIT_WRITE - */ -int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, loff_t pos, size_t count) -{ - LINVRNT(iot == CIT_READ || iot == CIT_WRITE); - LINVRNT(io->ci_obj); - - LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu, - "io range: %u [%llu, %llu) %u %u\n", - iot, (__u64)pos, (__u64)pos + count, - io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append); - io->u.ci_rw.crw_pos = pos; - io->u.ci_rw.crw_count = count; - return cl_io_init(env, io, iot, io->ci_obj); -} -EXPORT_SYMBOL(cl_io_rw_init); - -static int cl_lock_descr_sort(const struct cl_lock_descr *d0, - const struct cl_lock_descr *d1) -{ - return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu), - lu_object_fid(&d1->cld_obj->co_lu)); -} - -/* - * Sort locks in lexicographical order of their (fid, start-offset) pairs. - */ -static void cl_io_locks_sort(struct cl_io *io) -{ - int done = 0; - - /* hidden treasure: bubble sort for now. */ - do { - struct cl_io_lock_link *curr; - struct cl_io_lock_link *prev; - struct cl_io_lock_link *temp; - - done = 1; - prev = NULL; - - list_for_each_entry_safe(curr, temp, - &io->ci_lockset.cls_todo, - cill_linkage) { - if (prev) { - switch (cl_lock_descr_sort(&prev->cill_descr, - &curr->cill_descr)) { - case 0: - /* - * IMPOSSIBLE: Identical locks are - * already removed at - * this point. - */ - default: - LBUG(); - case 1: - list_move_tail(&curr->cill_linkage, - &prev->cill_linkage); - done = 0; - continue; /* don't change prev: it's - * still "previous" - */ - case -1: /* already in order */ - break; - } - } - prev = curr; - } - } while (!done); -} - -static void cl_lock_descr_merge(struct cl_lock_descr *d0, - const struct cl_lock_descr *d1) -{ - d0->cld_start = min(d0->cld_start, d1->cld_start); - d0->cld_end = max(d0->cld_end, d1->cld_end); - - if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE) - d0->cld_mode = CLM_WRITE; - - if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP) - d0->cld_mode = CLM_GROUP; -} - -static int cl_lockset_merge(const struct cl_lockset *set, - const struct cl_lock_descr *need) -{ - struct cl_io_lock_link *scan; - - list_for_each_entry(scan, &set->cls_todo, cill_linkage) { - if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj)) - continue; - - /* Merge locks for the same object because ldlm lock server - * may expand the lock extent, otherwise there is a deadlock - * case if two conflicted locks are queueud for the same object - * and lock server expands one lock to overlap the another. - * The side effect is that it can generate a multi-stripe lock - * that may cause casacading problem - */ - cl_lock_descr_merge(&scan->cill_descr, need); - CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", - scan->cill_descr.cld_mode, scan->cill_descr.cld_start, - scan->cill_descr.cld_end); - return 1; - } - return 0; -} - -static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, - struct cl_lockset *set) -{ - struct cl_io_lock_link *link; - struct cl_io_lock_link *temp; - int result; - - result = 0; - list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { - result = cl_lock_request(env, io, &link->cill_lock); - if (result < 0) - break; - - list_move(&link->cill_linkage, &set->cls_done); - } - return result; -} - -/** - * Takes locks necessary for the current iteration of io. - * - * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required - * by layers for the current iteration. Then sort locks (to avoid dead-locks), - * and acquire them. - */ -int cl_io_lock(const struct lu_env *env, struct cl_io *io) -{ - const struct cl_io_slice *scan; - int result = 0; - - LINVRNT(cl_io_is_loopable(io)); - LINVRNT(io->ci_state == CIS_IT_STARTED); - LINVRNT(cl_io_invariant(io)); - - cl_io_for_each(scan, io) { - if (!scan->cis_iop->op[io->ci_type].cio_lock) - continue; - result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan); - if (result != 0) - break; - } - if (result == 0) { - cl_io_locks_sort(io); - result = cl_lockset_lock(env, io, &io->ci_lockset); - } - if (result != 0) - cl_io_unlock(env, io); - else - io->ci_state = CIS_LOCKED; - return result; -} -EXPORT_SYMBOL(cl_io_lock); - -/** - * Release locks takes by io. - */ -void cl_io_unlock(const struct lu_env *env, struct cl_io *io) -{ - struct cl_lockset *set; - struct cl_io_lock_link *link; - struct cl_io_lock_link *temp; - const struct cl_io_slice *scan; - - LASSERT(cl_io_is_loopable(io)); - LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED); - LINVRNT(cl_io_invariant(io)); - - set = &io->ci_lockset; - - list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { - list_del_init(&link->cill_linkage); - if (link->cill_fini) - link->cill_fini(env, link); - } - - list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) { - list_del_init(&link->cill_linkage); - cl_lock_release(env, &link->cill_lock); - if (link->cill_fini) - link->cill_fini(env, link); - } - - cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_unlock) - scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); - } - io->ci_state = CIS_UNLOCKED; -} -EXPORT_SYMBOL(cl_io_unlock); - -/** - * Prepares next iteration of io. - * - * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give - * layers a chance to modify io parameters, e.g., so that lov can restrict io - * to a single stripe. - */ -int cl_io_iter_init(const struct lu_env *env, struct cl_io *io) -{ - const struct cl_io_slice *scan; - int result; - - LINVRNT(cl_io_is_loopable(io)); - LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED); - LINVRNT(cl_io_invariant(io)); - - result = 0; - cl_io_for_each(scan, io) { - if (!scan->cis_iop->op[io->ci_type].cio_iter_init) - continue; - result = scan->cis_iop->op[io->ci_type].cio_iter_init(env, - scan); - if (result != 0) - break; - } - if (result == 0) - io->ci_state = CIS_IT_STARTED; - return result; -} -EXPORT_SYMBOL(cl_io_iter_init); - -/** - * Finalizes io iteration. - * - * Calls cl_io_operations::cio_iter_fini() bottom-to-top. - */ -void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io) -{ - const struct cl_io_slice *scan; - - LINVRNT(cl_io_is_loopable(io)); - LINVRNT(io->ci_state == CIS_UNLOCKED); - LINVRNT(cl_io_invariant(io)); - - cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_fini) - scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan); - } - io->ci_state = CIS_IT_ENDED; -} -EXPORT_SYMBOL(cl_io_iter_fini); - -/** - * Records that read or write io progressed \a nob bytes forward. - */ -static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, - size_t nob) -{ - const struct cl_io_slice *scan; - - LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || - nob == 0); - LINVRNT(cl_io_is_loopable(io)); - LINVRNT(cl_io_invariant(io)); - - io->u.ci_rw.crw_pos += nob; - io->u.ci_rw.crw_count -= nob; - - /* layers have to be notified. */ - cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_advance) - scan->cis_iop->op[io->ci_type].cio_advance(env, scan, - nob); - } -} - -/** - * Adds a lock to a lockset. - */ -int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, - struct cl_io_lock_link *link) -{ - int result; - - if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) { - result = 1; - } else { - list_add(&link->cill_linkage, &io->ci_lockset.cls_todo); - result = 0; - } - return result; -} -EXPORT_SYMBOL(cl_io_lock_add); - -static void cl_free_io_lock_link(const struct lu_env *env, - struct cl_io_lock_link *link) -{ - kfree(link); -} - -/** - * Allocates new lock link, and uses it to add a lock to a lockset. - */ -int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr) -{ - struct cl_io_lock_link *link; - int result; - - link = kzalloc(sizeof(*link), GFP_NOFS); - if (link) { - link->cill_descr = *descr; - link->cill_fini = cl_free_io_lock_link; - result = cl_io_lock_add(env, io, link); - if (result) /* lock match */ - link->cill_fini(env, link); - } else { - result = -ENOMEM; - } - - return result; -} -EXPORT_SYMBOL(cl_io_lock_alloc_add); - -/** - * Starts io by calling cl_io_operations::cio_start() top-to-bottom. - */ -int cl_io_start(const struct lu_env *env, struct cl_io *io) -{ - const struct cl_io_slice *scan; - int result = 0; - - LINVRNT(cl_io_is_loopable(io)); - LINVRNT(io->ci_state == CIS_LOCKED); - LINVRNT(cl_io_invariant(io)); - - io->ci_state = CIS_IO_GOING; - cl_io_for_each(scan, io) { - if (!scan->cis_iop->op[io->ci_type].cio_start) - continue; - result = scan->cis_iop->op[io->ci_type].cio_start(env, scan); - if (result != 0) - break; - } - if (result >= 0) - result = 0; - return result; -} -EXPORT_SYMBOL(cl_io_start); - -/** - * Wait until current io iteration is finished by calling - * cl_io_operations::cio_end() bottom-to-top. - */ -void cl_io_end(const struct lu_env *env, struct cl_io *io) -{ - const struct cl_io_slice *scan; - - LINVRNT(cl_io_is_loopable(io)); - LINVRNT(io->ci_state == CIS_IO_GOING); - LINVRNT(cl_io_invariant(io)); - - cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_end) - scan->cis_iop->op[io->ci_type].cio_end(env, scan); - /* TODO: error handling. */ - } - io->ci_state = CIS_IO_FINISHED; -} -EXPORT_SYMBOL(cl_io_end); - -/** - * Called by read io, to decide the readahead extent - * - * \see cl_io_operations::cio_read_ahead() - */ -int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io, - pgoff_t start, struct cl_read_ahead *ra) -{ - const struct cl_io_slice *scan; - int result = 0; - - LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT); - LINVRNT(cl_io_invariant(io)); - - cl_io_for_each(scan, io) { - if (!scan->cis_iop->cio_read_ahead) - continue; - - result = scan->cis_iop->cio_read_ahead(env, scan, start, ra); - if (result) - break; - } - return result > 0 ? 0 : result; -} -EXPORT_SYMBOL(cl_io_read_ahead); - -/** - * Commit a list of contiguous pages into writeback cache. - * - * \returns 0 if all pages committed, or errcode if error occurred. - * \see cl_io_operations::cio_commit_async() - */ -int cl_io_commit_async(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *queue, int from, int to, - cl_commit_cbt cb) -{ - const struct cl_io_slice *scan; - int result = 0; - - cl_io_for_each(scan, io) { - if (!scan->cis_iop->cio_commit_async) - continue; - result = scan->cis_iop->cio_commit_async(env, scan, queue, - from, to, cb); - if (result != 0) - break; - } - return result; -} -EXPORT_SYMBOL(cl_io_commit_async); - -/** - * Submits a list of pages for immediate io. - * - * After the function gets returned, The submitted pages are moved to - * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need - * to be submitted, and the pages are errant to submit. - * - * \returns 0 if at least one page was submitted, error code otherwise. - * \see cl_io_operations::cio_submit() - */ -int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, - enum cl_req_type crt, struct cl_2queue *queue) -{ - const struct cl_io_slice *scan; - int result = 0; - - cl_io_for_each(scan, io) { - if (!scan->cis_iop->cio_submit) - continue; - result = scan->cis_iop->cio_submit(env, scan, crt, queue); - if (result != 0) - break; - } - /* - * If ->cio_submit() failed, no pages were sent. - */ - LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages))); - return result; -} -EXPORT_SYMBOL(cl_io_submit_rw); - -static void cl_page_list_assume(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); - -/** - * Submit a sync_io and wait for the IO to be finished, or error happens. - * If \a timeout is zero, it means to wait for the IO unconditionally. - */ -int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - long timeout) -{ - struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor; - struct cl_page *pg; - int rc; - - cl_page_list_for_each(pg, &queue->c2_qin) { - LASSERT(!pg->cp_sync_io); - pg->cp_sync_io = anchor; - } - - cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end); - rc = cl_io_submit_rw(env, io, iot, queue); - if (rc == 0) { - /* - * If some pages weren't sent for any reason (e.g., - * read found up-to-date pages in the cache, or write found - * clean pages), count them as completed to avoid infinite - * wait. - */ - cl_page_list_for_each(pg, &queue->c2_qin) { - pg->cp_sync_io = NULL; - cl_sync_io_note(env, anchor, 1); - } - - /* wait for the IO to be finished. */ - rc = cl_sync_io_wait(env, anchor, timeout); - cl_page_list_assume(env, io, &queue->c2_qout); - } else { - LASSERT(list_empty(&queue->c2_qout.pl_pages)); - cl_page_list_for_each(pg, &queue->c2_qin) - pg->cp_sync_io = NULL; - } - return rc; -} -EXPORT_SYMBOL(cl_io_submit_sync); - -/** - * Main io loop. - * - * Pumps io through iterations calling - * - * - cl_io_iter_init() - * - * - cl_io_lock() - * - * - cl_io_start() - * - * - cl_io_end() - * - * - cl_io_unlock() - * - * - cl_io_iter_fini() - * - * repeatedly until there is no more io to do. - */ -int cl_io_loop(const struct lu_env *env, struct cl_io *io) -{ - int result = 0; - - LINVRNT(cl_io_is_loopable(io)); - - do { - size_t nob; - - io->ci_continue = 0; - result = cl_io_iter_init(env, io); - if (result == 0) { - nob = io->ci_nob; - result = cl_io_lock(env, io); - if (result == 0) { - /* - * Notify layers that locks has been taken, - * and do actual i/o. - * - * - llite: kms, short read; - * - llite: generic_file_read(); - */ - result = cl_io_start(env, io); - /* - * Send any remaining pending - * io, etc. - * - * - llite: ll_rw_stats_tally. - */ - cl_io_end(env, io); - cl_io_unlock(env, io); - cl_io_rw_advance(env, io, io->ci_nob - nob); - } - } - cl_io_iter_fini(env, io); - } while (result == 0 && io->ci_continue); - if (result == 0) - result = io->ci_result; - return result < 0 ? result : 0; -} -EXPORT_SYMBOL(cl_io_loop); - -/** - * Adds io slice to the cl_io. - * - * This is called by cl_object_operations::coo_io_init() methods to add a - * per-layer state to the io. New state is added at the end of - * cl_io::ci_layers list, that is, it is at the bottom of the stack. - * - * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add() - */ -void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, - struct cl_object *obj, - const struct cl_io_operations *ops) -{ - struct list_head *linkage = &slice->cis_linkage; - - LASSERT((!linkage->prev && !linkage->next) || - list_empty(linkage)); - - list_add_tail(linkage, &io->ci_layers); - slice->cis_io = io; - slice->cis_obj = obj; - slice->cis_iop = ops; -} -EXPORT_SYMBOL(cl_io_slice_add); - -/** - * Initializes page list. - */ -void cl_page_list_init(struct cl_page_list *plist) -{ - plist->pl_nr = 0; - INIT_LIST_HEAD(&plist->pl_pages); - plist->pl_owner = current; -} -EXPORT_SYMBOL(cl_page_list_init); - -/** - * Adds a page to a page list. - */ -void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page) -{ - /* it would be better to check that page is owned by "current" io, but - * it is not passed here. - */ - LASSERT(page->cp_owner); - LINVRNT(plist->pl_owner == current); - - LASSERT(list_empty(&page->cp_batch)); - list_add_tail(&page->cp_batch, &plist->pl_pages); - ++plist->pl_nr; - lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); - cl_page_get(page); -} -EXPORT_SYMBOL(cl_page_list_add); - -/** - * Removes a page from a page list. - */ -void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist, - struct cl_page *page) -{ - LASSERT(plist->pl_nr > 0); - LASSERT(cl_page_is_vmlocked(env, page)); - LINVRNT(plist->pl_owner == current); - - list_del_init(&page->cp_batch); - --plist->pl_nr; - lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); - cl_page_put(env, page); -} -EXPORT_SYMBOL(cl_page_list_del); - -/** - * Moves a page from one page list to another. - */ -void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page) -{ - LASSERT(src->pl_nr > 0); - LINVRNT(dst->pl_owner == current); - LINVRNT(src->pl_owner == current); - - list_move_tail(&page->cp_batch, &dst->pl_pages); - --src->pl_nr; - ++dst->pl_nr; - lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue", - src, dst); -} -EXPORT_SYMBOL(cl_page_list_move); - -/** - * Moves a page from one page list to the head of another list. - */ -void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page) -{ - LASSERT(src->pl_nr > 0); - LINVRNT(dst->pl_owner == current); - LINVRNT(src->pl_owner == current); - - list_move(&page->cp_batch, &dst->pl_pages); - --src->pl_nr; - ++dst->pl_nr; - lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue", - src, dst); -} -EXPORT_SYMBOL(cl_page_list_move_head); - -/** - * splice the cl_page_list, just as list head does - */ -void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head) -{ - struct cl_page *page; - struct cl_page *tmp; - - LINVRNT(list->pl_owner == current); - LINVRNT(head->pl_owner == current); - - cl_page_list_for_each_safe(page, tmp, list) - cl_page_list_move(head, list, page); -} -EXPORT_SYMBOL(cl_page_list_splice); - - -/** - * Disowns pages in a queue. - */ -void cl_page_list_disown(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist) -{ - struct cl_page *page; - struct cl_page *temp; - - LINVRNT(plist->pl_owner == current); - - cl_page_list_for_each_safe(page, temp, plist) { - LASSERT(plist->pl_nr > 0); - - list_del_init(&page->cp_batch); - --plist->pl_nr; - /* - * cl_page_disown0 rather than usual cl_page_disown() is used, - * because pages are possibly in CPS_FREEING state already due - * to the call to cl_page_list_discard(). - */ - /* - * XXX cl_page_disown0() will fail if page is not locked. - */ - cl_page_disown0(env, io, page); - lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", - plist); - cl_page_put(env, page); - } -} -EXPORT_SYMBOL(cl_page_list_disown); - -/** - * Releases pages from queue. - */ -void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist) -{ - struct cl_page *page; - struct cl_page *temp; - - LINVRNT(plist->pl_owner == current); - - cl_page_list_for_each_safe(page, temp, plist) - cl_page_list_del(env, plist, page); - LASSERT(plist->pl_nr == 0); -} -EXPORT_SYMBOL(cl_page_list_fini); - -/** - * Assumes all pages in a queue. - */ -static void cl_page_list_assume(const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist) -{ - struct cl_page *page; - - LINVRNT(plist->pl_owner == current); - - cl_page_list_for_each(page, plist) - cl_page_assume(env, io, page); -} - -/** - * Discards all pages in a queue. - */ -static void cl_page_list_discard(const struct lu_env *env, struct cl_io *io, - struct cl_page_list *plist) -{ - struct cl_page *page; - - LINVRNT(plist->pl_owner == current); - cl_page_list_for_each(page, plist) - cl_page_discard(env, io, page); -} - -/** - * Initialize dual page queue. - */ -void cl_2queue_init(struct cl_2queue *queue) -{ - cl_page_list_init(&queue->c2_qin); - cl_page_list_init(&queue->c2_qout); -} -EXPORT_SYMBOL(cl_2queue_init); - -/** - * Disown pages in both lists of a 2-queue. - */ -void cl_2queue_disown(const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue) -{ - cl_page_list_disown(env, io, &queue->c2_qin); - cl_page_list_disown(env, io, &queue->c2_qout); -} -EXPORT_SYMBOL(cl_2queue_disown); - -/** - * Discard (truncate) pages in both lists of a 2-queue. - */ -void cl_2queue_discard(const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue) -{ - cl_page_list_discard(env, io, &queue->c2_qin); - cl_page_list_discard(env, io, &queue->c2_qout); -} -EXPORT_SYMBOL(cl_2queue_discard); - -/** - * Finalize both page lists of a 2-queue. - */ -void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue) -{ - cl_page_list_fini(env, &queue->c2_qout); - cl_page_list_fini(env, &queue->c2_qin); -} -EXPORT_SYMBOL(cl_2queue_fini); - -/** - * Initialize a 2-queue to contain \a page in its incoming page list. - */ -void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page) -{ - cl_2queue_init(queue); - /* - * Add a page to the incoming page list of 2-queue. - */ - cl_page_list_add(&queue->c2_qin, page); -} -EXPORT_SYMBOL(cl_2queue_init_page); - -/** - * Returns top-level io. - * - * \see cl_object_top() - */ -struct cl_io *cl_io_top(struct cl_io *io) -{ - while (io->ci_parent) - io = io->ci_parent; - return io; -} -EXPORT_SYMBOL(cl_io_top); - -/** - * Fills in attributes that are passed to server together with transfer. Only - * attributes from \a flags may be touched. This can be called multiple times - * for the same request. - */ -void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, - struct cl_req_attr *attr) -{ - struct cl_object *scan; - - cl_object_for_each(scan, obj) { - if (scan->co_ops->coo_req_attr_set) - scan->co_ops->coo_req_attr_set(env, scan, attr); - } -} -EXPORT_SYMBOL(cl_req_attr_set); - -/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to - * wait for the IO to finish. - */ -void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor) -{ - wake_up_all(&anchor->csi_waitq); - - /* it's safe to nuke or reuse anchor now */ - atomic_set(&anchor->csi_barrier, 0); -} -EXPORT_SYMBOL(cl_sync_io_end); - -/** - * Initialize synchronous io wait anchor - */ -void cl_sync_io_init(struct cl_sync_io *anchor, int nr, - void (*end)(const struct lu_env *, struct cl_sync_io *)) -{ - memset(anchor, 0, sizeof(*anchor)); - init_waitqueue_head(&anchor->csi_waitq); - atomic_set(&anchor->csi_sync_nr, nr); - atomic_set(&anchor->csi_barrier, nr > 0); - anchor->csi_sync_rc = 0; - anchor->csi_end_io = end; - LASSERT(end); -} -EXPORT_SYMBOL(cl_sync_io_init); - -/** - * Wait until all IO completes. Transfer completion routine has to call - * cl_sync_io_note() for every entity. - */ -int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor, - long timeout) -{ - int rc = 1; - - LASSERT(timeout >= 0); - - if (timeout == 0) - wait_event_idle(anchor->csi_waitq, - atomic_read(&anchor->csi_sync_nr) == 0); - else - rc = wait_event_idle_timeout(anchor->csi_waitq, - atomic_read(&anchor->csi_sync_nr) == 0, - timeout * HZ); - if (rc == 0) { - rc = -ETIMEDOUT; - CERROR("IO failed: %d, still wait for %d remaining entries\n", - rc, atomic_read(&anchor->csi_sync_nr)); - - wait_event_idle(anchor->csi_waitq, - atomic_read(&anchor->csi_sync_nr) == 0); - } else { - rc = anchor->csi_sync_rc; - } - LASSERT(atomic_read(&anchor->csi_sync_nr) == 0); - - /* wait until cl_sync_io_note() has done wakeup */ - while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) - cpu_relax(); - - - return rc; -} -EXPORT_SYMBOL(cl_sync_io_wait); - -/** - * Indicate that transfer of a single page completed. - */ -void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, - int ioret) -{ - if (anchor->csi_sync_rc == 0 && ioret < 0) - anchor->csi_sync_rc = ioret; - /* - * Synchronous IO done without releasing page lock (e.g., as a part of - * ->{prepare,commit}_write(). Completion is used to signal the end of - * IO. - */ - LASSERT(atomic_read(&anchor->csi_sync_nr) > 0); - if (atomic_dec_and_test(&anchor->csi_sync_nr)) { - LASSERT(anchor->csi_end_io); - anchor->csi_end_io(env, anchor); - /* Can't access anchor any more */ - } -} -EXPORT_SYMBOL(cl_sync_io_note); diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c deleted file mode 100644 index 9ca29a26a38b..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c +++ /dev/null @@ -1,275 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Client Extent Lock. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include -#include "cl_internal.h" - -static void cl_lock_trace0(int level, const struct lu_env *env, - const char *prefix, const struct cl_lock *lock, - const char *func, const int line) -{ - struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj); - - CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n", - prefix, lock, env, h->coh_nesting, func, line); -} -#define cl_lock_trace(level, env, prefix, lock) \ - cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__) - -/** - * Adds lock slice to the compound lock. - * - * This is called by cl_object_operations::coo_lock_init() methods to add a - * per-layer state to the lock. New state is added at the end of - * cl_lock::cll_layers list, that is, it is at the bottom of the stack. - * - * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add() - */ -void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, - struct cl_object *obj, - const struct cl_lock_operations *ops) -{ - slice->cls_lock = lock; - list_add_tail(&slice->cls_linkage, &lock->cll_layers); - slice->cls_obj = obj; - slice->cls_ops = ops; -} -EXPORT_SYMBOL(cl_lock_slice_add); - -void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock) -{ - struct cl_lock_slice *slice; - cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock); - - while ((slice = list_first_entry_or_null(&lock->cll_layers, - struct cl_lock_slice, - cls_linkage)) != NULL) { - list_del_init(lock->cll_layers.next); - slice->cls_ops->clo_fini(env, slice); - } - POISON(lock, 0x5a, sizeof(*lock)); -} -EXPORT_SYMBOL(cl_lock_fini); - -int cl_lock_init(const struct lu_env *env, struct cl_lock *lock, - const struct cl_io *io) -{ - struct cl_object *obj = lock->cll_descr.cld_obj; - struct cl_object *scan; - int result = 0; - - /* Make sure cl_lock::cll_descr is initialized. */ - LASSERT(obj); - - INIT_LIST_HEAD(&lock->cll_layers); - list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers, - co_lu.lo_linkage) { - result = scan->co_ops->coo_lock_init(env, scan, lock, io); - if (result != 0) { - cl_lock_fini(env, lock); - break; - } - } - - return result; -} -EXPORT_SYMBOL(cl_lock_init); - -/** - * Returns a slice with a lock, corresponding to the given layer in the - * device stack. - * - * \see cl_page_at() - */ -const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, - const struct lu_device_type *dtype) -{ - const struct cl_lock_slice *slice; - - list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype) - return slice; - } - return NULL; -} -EXPORT_SYMBOL(cl_lock_at); - -void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock) -{ - const struct cl_lock_slice *slice; - - cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock); - list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_cancel) - slice->cls_ops->clo_cancel(env, slice); - } -} -EXPORT_SYMBOL(cl_lock_cancel); - -/** - * Enqueue a lock. - * \param anchor: if we need to wait for resources before getting the lock, - * use @anchor for the purpose. - * \retval 0 enqueue successfully - * \retval <0 error code - */ -int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io, - struct cl_lock *lock, struct cl_sync_io *anchor) -{ - const struct cl_lock_slice *slice; - int rc = -ENOSYS; - - list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (!slice->cls_ops->clo_enqueue) - continue; - - rc = slice->cls_ops->clo_enqueue(env, slice, io, anchor); - if (rc != 0) - break; - } - return rc; -} -EXPORT_SYMBOL(cl_lock_enqueue); - -/** - * Main high-level entry point of cl_lock interface that finds existing or - * enqueues new lock matching given description. - */ -int cl_lock_request(const struct lu_env *env, struct cl_io *io, - struct cl_lock *lock) -{ - struct cl_sync_io *anchor = NULL; - __u32 enq_flags = lock->cll_descr.cld_enq_flags; - int rc; - - rc = cl_lock_init(env, lock, io); - if (rc < 0) - return rc; - - if ((enq_flags & CEF_ASYNC) && !(enq_flags & CEF_AGL)) { - anchor = &cl_env_info(env)->clt_anchor; - cl_sync_io_init(anchor, 1, cl_sync_io_end); - } - - rc = cl_lock_enqueue(env, io, lock, anchor); - - if (anchor) { - int rc2; - - /* drop the reference count held at initialization time */ - cl_sync_io_note(env, anchor, 0); - rc2 = cl_sync_io_wait(env, anchor, 0); - if (rc2 < 0 && rc == 0) - rc = rc2; - } - - if (rc < 0) - cl_lock_release(env, lock); - - return rc; -} -EXPORT_SYMBOL(cl_lock_request); - -/** - * Releases a hold and a reference on a lock, obtained by cl_lock_hold(). - */ -void cl_lock_release(const struct lu_env *env, struct cl_lock *lock) -{ - cl_lock_trace(D_DLMTRACE, env, "release lock", lock); - cl_lock_cancel(env, lock); - cl_lock_fini(env, lock); -} -EXPORT_SYMBOL(cl_lock_release); - -const char *cl_lock_mode_name(const enum cl_lock_mode mode) -{ - static const char * const names[] = { - [CLM_READ] = "R", - [CLM_WRITE] = "W", - [CLM_GROUP] = "G" - }; - if (0 <= mode && mode < ARRAY_SIZE(names)) - return names[mode]; - else - return "U"; -} -EXPORT_SYMBOL(cl_lock_mode_name); - -/** - * Prints human readable representation of a lock description. - */ -void cl_lock_descr_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_lock_descr *descr) -{ - const struct lu_fid *fid; - - fid = lu_object_fid(&descr->cld_obj->co_lu); - (*printer)(env, cookie, DDESCR "@" DFID, PDESCR(descr), PFID(fid)); -} -EXPORT_SYMBOL(cl_lock_descr_print); - -/** - * Prints human readable representation of \a lock to the \a f. - */ -void cl_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock) -{ - const struct cl_lock_slice *slice; - - (*printer)(env, cookie, "lock@%p", lock); - cl_lock_descr_print(env, cookie, printer, &lock->cll_descr); - (*printer)(env, cookie, " {\n"); - - list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - (*printer)(env, cookie, " %s@%p: ", - slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, - slice); - if (slice->cls_ops->clo_print) - slice->cls_ops->clo_print(env, cookie, printer, slice); - (*printer)(env, cookie, "\n"); - } - (*printer)(env, cookie, "} lock@%p\n", lock); -} -EXPORT_SYMBOL(cl_lock_print); diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c deleted file mode 100644 index 42cce2dc5a45..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/cl_object.c +++ /dev/null @@ -1,1059 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Client Lustre Object. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -/* - * Locking. - * - * i_mutex - * PG_locked - * ->coh_attr_guard - * ->ls_guard - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -/* class_put_type() */ -#include -#include -#include -#include -#include -#include -#include "cl_internal.h" - -static struct kmem_cache *cl_env_kmem; - -/** Lock class of cl_object_header::coh_attr_guard */ -static struct lock_class_key cl_attr_guard_class; - -/** - * Initialize cl_object_header. - */ -int cl_object_header_init(struct cl_object_header *h) -{ - int result; - - result = lu_object_header_init(&h->coh_lu); - if (result == 0) { - spin_lock_init(&h->coh_attr_guard); - lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); - h->coh_page_bufsize = 0; - } - return result; -} -EXPORT_SYMBOL(cl_object_header_init); - -/** - * Returns a cl_object with a given \a fid. - * - * Returns either cached or newly created object. Additional reference on the - * returned object is acquired. - * - * \see lu_object_find(), cl_page_find(), cl_lock_find() - */ -struct cl_object *cl_object_find(const struct lu_env *env, - struct cl_device *cd, const struct lu_fid *fid, - const struct cl_object_conf *c) -{ - might_sleep(); - return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu)); -} -EXPORT_SYMBOL(cl_object_find); - -/** - * Releases a reference on \a o. - * - * When last reference is released object is returned to the cache, unless - * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header. - * - * \see cl_page_put(), cl_lock_put(). - */ -void cl_object_put(const struct lu_env *env, struct cl_object *o) -{ - lu_object_put(env, &o->co_lu); -} -EXPORT_SYMBOL(cl_object_put); - -/** - * Acquire an additional reference to the object \a o. - * - * This can only be used to acquire _additional_ reference, i.e., caller - * already has to possess at least one reference to \a o before calling this. - * - * \see cl_page_get(), cl_lock_get(). - */ -void cl_object_get(struct cl_object *o) -{ - lu_object_get(&o->co_lu); -} -EXPORT_SYMBOL(cl_object_get); - -/** - * Returns the top-object for a given \a o. - * - * \see cl_io_top() - */ -struct cl_object *cl_object_top(struct cl_object *o) -{ - struct cl_object_header *hdr = cl_object_header(o); - struct cl_object *top; - - while (hdr->coh_parent) - hdr = hdr->coh_parent; - - top = lu2cl(lu_object_top(&hdr->coh_lu)); - CDEBUG(D_TRACE, "%p -> %p\n", o, top); - return top; -} -EXPORT_SYMBOL(cl_object_top); - -/** - * Returns pointer to the lock protecting data-attributes for the given object - * \a o. - * - * Data-attributes are protected by the cl_object_header::coh_attr_guard - * spin-lock in the top-object. - * - * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get(). - */ -static spinlock_t *cl_object_attr_guard(struct cl_object *o) -{ - return &cl_object_header(cl_object_top(o))->coh_attr_guard; -} - -/** - * Locks data-attributes. - * - * Prevents data-attributes from changing, until lock is released by - * cl_object_attr_unlock(). This has to be called before calls to - * cl_object_attr_get(), cl_object_attr_update(). - */ -void cl_object_attr_lock(struct cl_object *o) - __acquires(cl_object_attr_guard(o)) -{ - spin_lock(cl_object_attr_guard(o)); -} -EXPORT_SYMBOL(cl_object_attr_lock); - -/** - * Releases data-attributes lock, acquired by cl_object_attr_lock(). - */ -void cl_object_attr_unlock(struct cl_object *o) - __releases(cl_object_attr_guard(o)) -{ - spin_unlock(cl_object_attr_guard(o)); -} -EXPORT_SYMBOL(cl_object_attr_unlock); - -/** - * Returns data-attributes of an object \a obj. - * - * Every layer is asked (by calling cl_object_operations::coo_attr_get()) - * top-to-bottom to fill in parts of \a attr that this layer is responsible - * for. - */ -int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr) -{ - struct lu_object_header *top; - int result; - - assert_spin_locked(cl_object_attr_guard(obj)); - - top = obj->co_lu.lo_header; - result = 0; - list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_get) { - result = obj->co_ops->coo_attr_get(env, obj, attr); - if (result != 0) { - if (result > 0) - result = 0; - break; - } - } - } - return result; -} -EXPORT_SYMBOL(cl_object_attr_get); - -/** - * Updates data-attributes of an object \a obj. - * - * Only attributes, mentioned in a validness bit-mask \a v are - * updated. Calls cl_object_operations::coo_attr_update() on every layer, - * bottom to top. - */ -int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int v) -{ - struct lu_object_header *top; - int result; - - assert_spin_locked(cl_object_attr_guard(obj)); - - top = obj->co_lu.lo_header; - result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_update) { - result = obj->co_ops->coo_attr_update(env, obj, attr, - v); - if (result != 0) { - if (result > 0) - result = 0; - break; - } - } - } - return result; -} -EXPORT_SYMBOL(cl_object_attr_update); - -/** - * Notifies layers (bottom-to-top) that glimpse AST was received. - * - * Layers have to fill \a lvb fields with information that will be shipped - * back to glimpse issuer. - * - * \see cl_lock_operations::clo_glimpse() - */ -int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, - struct ost_lvb *lvb) -{ - struct lu_object_header *top; - int result; - - top = obj->co_lu.lo_header; - result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_glimpse) { - result = obj->co_ops->coo_glimpse(env, obj, lvb); - if (result != 0) - break; - } - } - LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top), - "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n", - lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime, - lvb->lvb_ctime, lvb->lvb_blocks); - return result; -} -EXPORT_SYMBOL(cl_object_glimpse); - -/** - * Updates a configuration of an object \a obj. - */ -int cl_conf_set(const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf) -{ - struct lu_object_header *top; - int result; - - top = obj->co_lu.lo_header; - result = 0; - list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_conf_set) { - result = obj->co_ops->coo_conf_set(env, obj, conf); - if (result != 0) - break; - } - } - return result; -} -EXPORT_SYMBOL(cl_conf_set); - -/** - * Prunes caches of pages and locks for this object. - */ -int cl_object_prune(const struct lu_env *env, struct cl_object *obj) -{ - struct lu_object_header *top; - struct cl_object *o; - int result; - - top = obj->co_lu.lo_header; - result = 0; - list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) { - if (o->co_ops->coo_prune) { - result = o->co_ops->coo_prune(env, o); - if (result != 0) - break; - } - } - - return result; -} -EXPORT_SYMBOL(cl_object_prune); - -/** - * Get stripe information of this object. - */ -int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj, - struct lov_user_md __user *uarg) -{ - struct lu_object_header *top; - int result = 0; - - top = obj->co_lu.lo_header; - list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_getstripe) { - result = obj->co_ops->coo_getstripe(env, obj, uarg); - if (result) - break; - } - } - return result; -} -EXPORT_SYMBOL(cl_object_getstripe); - -/** - * Get fiemap extents from file object. - * - * \param env [in] lustre environment - * \param obj [in] file object - * \param key [in] fiemap request argument - * \param fiemap [out] fiemap extents mapping retrived - * \param buflen [in] max buffer length of @fiemap - * - * \retval 0 success - * \retval < 0 error - */ -int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj, - struct ll_fiemap_info_key *key, - struct fiemap *fiemap, size_t *buflen) -{ - struct lu_object_header *top; - int result = 0; - - top = obj->co_lu.lo_header; - list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_fiemap) { - result = obj->co_ops->coo_fiemap(env, obj, key, fiemap, - buflen); - if (result) - break; - } - } - return result; -} -EXPORT_SYMBOL(cl_object_fiemap); - -int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj, - struct cl_layout *cl) -{ - struct lu_object_header *top = obj->co_lu.lo_header; - - list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_layout_get) - return obj->co_ops->coo_layout_get(env, obj, cl); - } - - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(cl_object_layout_get); - -loff_t cl_object_maxbytes(struct cl_object *obj) -{ - struct lu_object_header *top = obj->co_lu.lo_header; - loff_t maxbytes = LLONG_MAX; - - list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_maxbytes) - maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj), - maxbytes); - } - - return maxbytes; -} -EXPORT_SYMBOL(cl_object_maxbytes); - -/** - * Helper function removing all object locks, and marking object for - * deletion. All object pages must have been deleted at this point. - * - * This is called by cl_inode_fini() and lov_object_delete() to destroy top- - * and sub- objects respectively. - */ -void cl_object_kill(const struct lu_env *env, struct cl_object *obj) -{ - struct cl_object_header *hdr = cl_object_header(obj); - - set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); -} -EXPORT_SYMBOL(cl_object_kill); - -void cache_stats_init(struct cache_stats *cs, const char *name) -{ - int i; - - cs->cs_name = name; - for (i = 0; i < CS_NR; i++) - atomic_set(&cs->cs_stats[i], 0); -} - -static int cache_stats_print(const struct cache_stats *cs, - struct seq_file *m, int h) -{ - int i; - /* - * lookup hit total cached create - * env: ...... ...... ...... ...... ...... - */ - if (h) { - const char *names[CS_NR] = CS_NAMES; - - seq_printf(m, "%6s", " "); - for (i = 0; i < CS_NR; i++) - seq_printf(m, "%8s", names[i]); - seq_printf(m, "\n"); - } - - seq_printf(m, "%5.5s:", cs->cs_name); - for (i = 0; i < CS_NR; i++) - seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i])); - return 0; -} - -static void cl_env_percpu_refill(void); - -/** - * Initialize client site. - * - * Perform common initialization (lu_site_init()), and initialize statistical - * counters. Also perform global initializations on the first call. - */ -int cl_site_init(struct cl_site *s, struct cl_device *d) -{ - size_t i; - int result; - - result = lu_site_init(&s->cs_lu, &d->cd_lu_dev); - if (result == 0) { - cache_stats_init(&s->cs_pages, "pages"); - for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i) - atomic_set(&s->cs_pages_state[0], 0); - cl_env_percpu_refill(); - } - return result; -} -EXPORT_SYMBOL(cl_site_init); - -/** - * Finalize client site. Dual to cl_site_init(). - */ -void cl_site_fini(struct cl_site *s) -{ - lu_site_fini(&s->cs_lu); -} -EXPORT_SYMBOL(cl_site_fini); - -static struct cache_stats cl_env_stats = { - .cs_name = "envs", - .cs_stats = { ATOMIC_INIT(0), } -}; - -/** - * Outputs client site statistical counters into a buffer. Suitable for - * ll_rd_*()-style functions. - */ -int cl_site_stats_print(const struct cl_site *site, struct seq_file *m) -{ - size_t i; - static const char * const pstate[] = { - [CPS_CACHED] = "c", - [CPS_OWNED] = "o", - [CPS_PAGEOUT] = "w", - [CPS_PAGEIN] = "r", - [CPS_FREEING] = "f" - }; -/* - lookup hit total busy create -pages: ...... ...... ...... ...... ...... [...... ...... ...... ......] -locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......] - env: ...... ...... ...... ...... ...... - */ - lu_site_stats_print(&site->cs_lu, m); - cache_stats_print(&site->cs_pages, m, 1); - seq_puts(m, " ["); - for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i) - seq_printf(m, "%s: %u ", pstate[i], - atomic_read(&site->cs_pages_state[i])); - seq_puts(m, "]\n"); - cache_stats_print(&cl_env_stats, m, 0); - seq_puts(m, "\n"); - return 0; -} -EXPORT_SYMBOL(cl_site_stats_print); - -/***************************************************************************** - * - * lu_env handling on client. - * - */ - -/** - * The most efficient way is to store cl_env pointer in task specific - * structures. On Linux, it wont' be easy to use task_struct->journal_info - * because Lustre code may call into other fs which has certain assumptions - * about journal_info. Currently following fields in task_struct are identified - * can be used for this purpose: - * - tux_info: only on RedHat kernel. - * - ... - * \note As long as we use task_struct to store cl_env, we assume that once - * called into Lustre, we'll never call into the other part of the kernel - * which will use those fields in task_struct without explicitly exiting - * Lustre. - * - * If there's no space in task_struct is available, hash will be used. - * bz20044, bz22683. - */ - -static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit - * for now. - */ -static struct cl_env_cache { - rwlock_t cec_guard; - unsigned int cec_count; - struct list_head cec_envs; -} *cl_envs = NULL; - -struct cl_env { - void *ce_magic; - struct lu_env ce_lu; - struct lu_context ce_ses; - - /* - * Linkage into global list of all client environments. Used for - * garbage collection. - */ - struct list_head ce_linkage; - /* - * - */ - int ce_ref; - /* - * Debugging field: address of the caller who made original - * allocation. - */ - void *ce_debug; -}; - -#define CL_ENV_INC(counter) -#define CL_ENV_DEC(counter) - -static void cl_env_init0(struct cl_env *cle, void *debug) -{ - LASSERT(cle->ce_ref == 0); - LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(!cle->ce_debug); - - cle->ce_ref = 1; - cle->ce_debug = debug; - CL_ENV_INC(busy); -} - -static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) -{ - struct lu_env *env; - struct cl_env *cle; - - cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS); - if (cle) { - int rc; - - INIT_LIST_HEAD(&cle->ce_linkage); - cle->ce_magic = &cl_env_init0; - env = &cle->ce_lu; - rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD); - if (rc == 0) { - rc = lu_context_init(&cle->ce_ses, - ses_tags | LCT_SESSION); - if (rc == 0) { - lu_context_enter(&cle->ce_ses); - env->le_ses = &cle->ce_ses; - cl_env_init0(cle, debug); - } else { - lu_env_fini(env); - } - } - if (rc != 0) { - kmem_cache_free(cl_env_kmem, cle); - env = ERR_PTR(rc); - } else { - CL_ENV_INC(create); - CL_ENV_INC(total); - } - } else { - env = ERR_PTR(-ENOMEM); - } - return env; -} - -static void cl_env_fini(struct cl_env *cle) -{ - CL_ENV_DEC(total); - lu_context_fini(&cle->ce_lu.le_ctx); - lu_context_fini(&cle->ce_ses); - kmem_cache_free(cl_env_kmem, cle); -} - -static struct lu_env *cl_env_obtain(void *debug) -{ - struct cl_env *cle; - struct lu_env *env; - int cpu = get_cpu(); - - read_lock(&cl_envs[cpu].cec_guard); - LASSERT(equi(cl_envs[cpu].cec_count == 0, - list_empty(&cl_envs[cpu].cec_envs))); - if (cl_envs[cpu].cec_count > 0) { - int rc; - - cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env, - ce_linkage); - list_del_init(&cle->ce_linkage); - cl_envs[cpu].cec_count--; - read_unlock(&cl_envs[cpu].cec_guard); - put_cpu(); - - env = &cle->ce_lu; - rc = lu_env_refill(env); - if (rc == 0) { - cl_env_init0(cle, debug); - lu_context_enter(&env->le_ctx); - lu_context_enter(&cle->ce_ses); - } else { - cl_env_fini(cle); - env = ERR_PTR(rc); - } - } else { - read_unlock(&cl_envs[cpu].cec_guard); - put_cpu(); - env = cl_env_new(lu_context_tags_default, - lu_session_tags_default, debug); - } - return env; -} - -static inline struct cl_env *cl_env_container(struct lu_env *env) -{ - return container_of(env, struct cl_env, ce_lu); -} - -/** - * Returns lu_env: if there already is an environment associated with the - * current thread, it is returned, otherwise, new environment is allocated. - * - * Allocations are amortized through the global cache of environments. - * - * \param refcheck pointer to a counter used to detect environment leaks. In - * the usual case cl_env_get() and cl_env_put() are called in the same lexical - * scope and pointer to the same integer is passed as \a refcheck. This is - * used to detect missed cl_env_put(). - * - * \see cl_env_put() - */ -struct lu_env *cl_env_get(u16 *refcheck) -{ - struct lu_env *env; - - env = cl_env_obtain(__builtin_return_address(0)); - if (!IS_ERR(env)) { - struct cl_env *cle; - - cle = cl_env_container(env); - *refcheck = cle->ce_ref; - CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); - } - return env; -} -EXPORT_SYMBOL(cl_env_get); - -/** - * Forces an allocation of a fresh environment with given tags. - * - * \see cl_env_get() - */ -struct lu_env *cl_env_alloc(u16 *refcheck, u32 tags) -{ - struct lu_env *env; - - env = cl_env_new(tags, tags, __builtin_return_address(0)); - if (!IS_ERR(env)) { - struct cl_env *cle; - - cle = cl_env_container(env); - *refcheck = cle->ce_ref; - CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); - } - return env; -} -EXPORT_SYMBOL(cl_env_alloc); - -static void cl_env_exit(struct cl_env *cle) -{ - lu_context_exit(&cle->ce_lu.le_ctx); - lu_context_exit(&cle->ce_ses); -} - -/** - * Finalizes and frees a given number of cached environments. This is done to - * (1) free some memory (not currently hooked into VM), or (2) release - * references to modules. - */ -unsigned int cl_env_cache_purge(unsigned int nr) -{ - struct cl_env *cle; - unsigned int i; - - for_each_possible_cpu(i) { - write_lock(&cl_envs[i].cec_guard); - for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) { - cle = container_of(cl_envs[i].cec_envs.next, - struct cl_env, ce_linkage); - list_del_init(&cle->ce_linkage); - LASSERT(cl_envs[i].cec_count > 0); - cl_envs[i].cec_count--; - write_unlock(&cl_envs[i].cec_guard); - - cl_env_fini(cle); - write_lock(&cl_envs[i].cec_guard); - } - LASSERT(equi(cl_envs[i].cec_count == 0, - list_empty(&cl_envs[i].cec_envs))); - write_unlock(&cl_envs[i].cec_guard); - } - return nr; -} -EXPORT_SYMBOL(cl_env_cache_purge); - -/** - * Release an environment. - * - * Decrement \a env reference counter. When counter drops to 0, nothing in - * this thread is using environment and it is returned to the allocation - * cache, or freed straight away, if cache is large enough. - */ -void cl_env_put(struct lu_env *env, u16 *refcheck) -{ - struct cl_env *cle; - - cle = cl_env_container(env); - - LASSERT(cle->ce_ref > 0); - LASSERT(ergo(refcheck, cle->ce_ref == *refcheck)); - - CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); - if (--cle->ce_ref == 0) { - int cpu = get_cpu(); - - CL_ENV_DEC(busy); - cle->ce_debug = NULL; - cl_env_exit(cle); - /* - * Don't bother to take a lock here. - * - * Return environment to the cache only when it was allocated - * with the standard tags. - */ - if (cl_envs[cpu].cec_count < cl_envs_cached_max && - (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && - (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { - read_lock(&cl_envs[cpu].cec_guard); - list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs); - cl_envs[cpu].cec_count++; - read_unlock(&cl_envs[cpu].cec_guard); - } else { - cl_env_fini(cle); - } - put_cpu(); - } -} -EXPORT_SYMBOL(cl_env_put); - -/** - * Converts struct ost_lvb to struct cl_attr. - * - * \see cl_attr2lvb - */ -void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb) -{ - attr->cat_size = lvb->lvb_size; - attr->cat_mtime = lvb->lvb_mtime; - attr->cat_atime = lvb->lvb_atime; - attr->cat_ctime = lvb->lvb_ctime; - attr->cat_blocks = lvb->lvb_blocks; -} -EXPORT_SYMBOL(cl_lvb2attr); - -static struct cl_env cl_env_percpu[NR_CPUS]; - -static int cl_env_percpu_init(void) -{ - struct cl_env *cle; - int tags = LCT_REMEMBER | LCT_NOREF; - int i, j; - int rc = 0; - - for_each_possible_cpu(i) { - struct lu_env *env; - - rwlock_init(&cl_envs[i].cec_guard); - INIT_LIST_HEAD(&cl_envs[i].cec_envs); - cl_envs[i].cec_count = 0; - - cle = &cl_env_percpu[i]; - env = &cle->ce_lu; - - INIT_LIST_HEAD(&cle->ce_linkage); - cle->ce_magic = &cl_env_init0; - rc = lu_env_init(env, LCT_CL_THREAD | tags); - if (rc == 0) { - rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags); - if (rc == 0) { - lu_context_enter(&cle->ce_ses); - env->le_ses = &cle->ce_ses; - } else { - lu_env_fini(env); - } - } - if (rc != 0) - break; - } - if (rc != 0) { - /* Indices 0 to i (excluding i) were correctly initialized, - * thus we must uninitialize up to i, the rest are undefined. - */ - for (j = 0; j < i; j++) { - cle = &cl_env_percpu[j]; - lu_context_exit(&cle->ce_ses); - lu_context_fini(&cle->ce_ses); - lu_env_fini(&cle->ce_lu); - } - } - - return rc; -} - -static void cl_env_percpu_fini(void) -{ - int i; - - for_each_possible_cpu(i) { - struct cl_env *cle = &cl_env_percpu[i]; - - lu_context_exit(&cle->ce_ses); - lu_context_fini(&cle->ce_ses); - lu_env_fini(&cle->ce_lu); - } -} - -static void cl_env_percpu_refill(void) -{ - int i; - - for_each_possible_cpu(i) - lu_env_refill(&cl_env_percpu[i].ce_lu); -} - -void cl_env_percpu_put(struct lu_env *env) -{ - struct cl_env *cle; - int cpu; - - cpu = smp_processor_id(); - cle = cl_env_container(env); - LASSERT(cle == &cl_env_percpu[cpu]); - - cle->ce_ref--; - LASSERT(cle->ce_ref == 0); - - CL_ENV_DEC(busy); - cle->ce_debug = NULL; - - put_cpu(); -} -EXPORT_SYMBOL(cl_env_percpu_put); - -struct lu_env *cl_env_percpu_get(void) -{ - struct cl_env *cle; - - cle = &cl_env_percpu[get_cpu()]; - cl_env_init0(cle, __builtin_return_address(0)); - - return &cle->ce_lu; -} -EXPORT_SYMBOL(cl_env_percpu_get); - -/***************************************************************************** - * - * Temporary prototype thing: mirror obd-devices into cl devices. - * - */ - -struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, - struct lu_device_type *ldt, - struct lu_device *next) -{ - const char *typename; - struct lu_device *d; - - typename = ldt->ldt_name; - d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL); - if (!IS_ERR(d)) { - int rc; - - if (site) - d->ld_site = site; - rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next); - if (rc == 0) { - lu_device_get(d); - lu_ref_add(&d->ld_reference, - "lu-stack", &lu_site_init); - } else { - ldt->ldt_ops->ldto_device_free(env, d); - CERROR("can't init device '%s', %d\n", typename, rc); - d = ERR_PTR(rc); - } - } else { - CERROR("Cannot allocate device: '%s'\n", typename); - } - return lu2cl_dev(d); -} -EXPORT_SYMBOL(cl_type_setup); - -/** - * Finalize device stack by calling lu_stack_fini(). - */ -void cl_stack_fini(const struct lu_env *env, struct cl_device *cl) -{ - lu_stack_fini(env, cl2lu_dev(cl)); -} -EXPORT_SYMBOL(cl_stack_fini); - -static struct lu_context_key cl_key; - -struct cl_thread_info *cl_env_info(const struct lu_env *env) -{ - return lu_context_key_get(&env->le_ctx, &cl_key); -} - -/* defines cl0_key_{init,fini}() */ -LU_KEY_INIT_FINI(cl0, struct cl_thread_info); - -static void *cl_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - return cl0_key_init(ctx, key); -} - -static void cl_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - cl0_key_fini(ctx, key, data); -} - -static struct lu_context_key cl_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = cl_key_init, - .lct_fini = cl_key_fini, -}; - -static struct lu_kmem_descr cl_object_caches[] = { - { - .ckd_cache = &cl_env_kmem, - .ckd_name = "cl_env_kmem", - .ckd_size = sizeof(struct cl_env) - }, - { - .ckd_cache = NULL - } -}; - -/** - * Global initialization of cl-data. Create kmem caches, register - * lu_context_key's, etc. - * - * \see cl_global_fini() - */ -int cl_global_init(void) -{ - int result; - - cl_envs = kcalloc(num_possible_cpus(), sizeof(*cl_envs), GFP_KERNEL); - if (!cl_envs) { - result = -ENOMEM; - goto out; - } - - result = lu_kmem_init(cl_object_caches); - if (result) - goto out_envs; - - LU_CONTEXT_KEY_INIT(&cl_key); - result = lu_context_key_register(&cl_key); - if (result) - goto out_kmem; - - result = cl_env_percpu_init(); - if (result) - /* no cl_env_percpu_fini on error */ - goto out_keys; - - return 0; - -out_keys: - lu_context_key_degister(&cl_key); -out_kmem: - lu_kmem_fini(cl_object_caches); -out_envs: - kfree(cl_envs); -out: - return result; -} - -/** - * Finalization of global cl-data. Dual to cl_global_init(). - */ -void cl_global_fini(void) -{ - cl_env_percpu_fini(); - lu_context_key_degister(&cl_key); - lu_kmem_fini(cl_object_caches); - kfree(cl_envs); -} diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c deleted file mode 100644 index 916cf81c5997..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/cl_page.c +++ /dev/null @@ -1,1045 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Client Lustre Page. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include - -#include -#include "cl_internal.h" - -static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg); - -# define PASSERT(env, page, expr) \ - do { \ - if (unlikely(!(expr))) { \ - CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \ - LASSERT(0); \ - } \ - } while (0) - -# define PINVRNT(env, page, exp) \ - ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) - -/** - * Internal version of cl_page_get(). - * - * This function can be used to obtain initial reference to previously - * unreferenced cached object. It can be called only if concurrent page - * reclamation is somehow prevented, e.g., by keeping a lock on a VM page, - * associated with \a page. - * - * Use with care! Not exported. - */ -static void cl_page_get_trust(struct cl_page *page) -{ - LASSERT(atomic_read(&page->cp_ref) > 0); - atomic_inc(&page->cp_ref); -} - -/** - * Returns a slice within a page, corresponding to the given layer in the - * device stack. - * - * \see cl_lock_at() - */ -static const struct cl_page_slice * -cl_page_at_trusted(const struct cl_page *page, - const struct lu_device_type *dtype) -{ - const struct cl_page_slice *slice; - - list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { - if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype) - return slice; - } - return NULL; -} - -static void cl_page_free(const struct lu_env *env, struct cl_page *page) -{ - struct cl_object *obj = page->cp_obj; - - PASSERT(env, page, list_empty(&page->cp_batch)); - PASSERT(env, page, !page->cp_owner); - PASSERT(env, page, page->cp_state == CPS_FREEING); - - while (!list_empty(&page->cp_layers)) { - struct cl_page_slice *slice; - - slice = list_entry(page->cp_layers.next, - struct cl_page_slice, cpl_linkage); - list_del_init(page->cp_layers.next); - if (unlikely(slice->cpl_ops->cpo_fini)) - slice->cpl_ops->cpo_fini(env, slice); - } - lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page); - cl_object_put(env, obj); - lu_ref_fini(&page->cp_reference); - kfree(page); -} - -/** - * Helper function updating page state. This is the only place in the code - * where cl_page::cp_state field is mutated. - */ -static inline void cl_page_state_set_trust(struct cl_page *page, - enum cl_page_state state) -{ - /* bypass const. */ - *(enum cl_page_state *)&page->cp_state = state; -} - -struct cl_page *cl_page_alloc(const struct lu_env *env, - struct cl_object *o, pgoff_t ind, - struct page *vmpage, - enum cl_page_type type) -{ - struct cl_page *page; - struct lu_object_header *head; - - page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS); - if (page) { - int result = 0; - - atomic_set(&page->cp_ref, 1); - page->cp_obj = o; - cl_object_get(o); - lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page", - page); - page->cp_vmpage = vmpage; - cl_page_state_set_trust(page, CPS_CACHED); - page->cp_type = type; - INIT_LIST_HEAD(&page->cp_layers); - INIT_LIST_HEAD(&page->cp_batch); - lu_ref_init(&page->cp_reference); - head = o->co_lu.lo_header; - list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { - if (o->co_ops->coo_page_init) { - result = o->co_ops->coo_page_init(env, o, page, - ind); - if (result != 0) { - cl_page_delete0(env, page); - cl_page_free(env, page); - page = ERR_PTR(result); - break; - } - } - } - } else { - page = ERR_PTR(-ENOMEM); - } - return page; -} - -/** - * Returns a cl_page with index \a idx at the object \a o, and associated with - * the VM page \a vmpage. - * - * This is the main entry point into the cl_page caching interface. First, a - * cache (implemented as a per-object radix tree) is consulted. If page is - * found there, it is returned immediately. Otherwise new page is allocated - * and returned. In any case, additional reference to page is acquired. - * - * \see cl_object_find(), cl_lock_find() - */ -struct cl_page *cl_page_find(const struct lu_env *env, - struct cl_object *o, - pgoff_t idx, struct page *vmpage, - enum cl_page_type type) -{ - struct cl_page *page = NULL; - struct cl_object_header *hdr; - - LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT); - might_sleep(); - - hdr = cl_object_header(o); - - CDEBUG(D_PAGE, "%lu@" DFID " %p %lx %d\n", - idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type); - /* fast path. */ - if (type == CPT_CACHEABLE) { - /* - * vmpage lock is used to protect the child/parent - * relationship - */ - LASSERT(PageLocked(vmpage)); - /* - * cl_vmpage_page() can be called here without any locks as - * - * - "vmpage" is locked (which prevents ->private from - * concurrent updates), and - * - * - "o" cannot be destroyed while current thread holds a - * reference on it. - */ - page = cl_vmpage_page(vmpage, o); - - if (page) - return page; - } - - /* allocate and initialize cl_page */ - page = cl_page_alloc(env, o, idx, vmpage, type); - return page; -} -EXPORT_SYMBOL(cl_page_find); - -static inline int cl_page_invariant(const struct cl_page *pg) -{ - return cl_page_in_use_noref(pg); -} - -static void cl_page_state_set0(const struct lu_env *env, - struct cl_page *page, enum cl_page_state state) -{ - enum cl_page_state old; - - /* - * Matrix of allowed state transitions [old][new], for sanity - * checking. - */ - static const int allowed_transitions[CPS_NR][CPS_NR] = { - [CPS_CACHED] = { - [CPS_CACHED] = 0, - [CPS_OWNED] = 1, /* io finds existing cached page */ - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 1, /* write-out from the cache */ - [CPS_FREEING] = 1, /* eviction on the memory pressure */ - }, - [CPS_OWNED] = { - [CPS_CACHED] = 1, /* release to the cache */ - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 1, /* start read immediately */ - [CPS_PAGEOUT] = 1, /* start write immediately */ - [CPS_FREEING] = 1, /* lock invalidation or truncate */ - }, - [CPS_PAGEIN] = { - [CPS_CACHED] = 1, /* io completion */ - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 0, - [CPS_FREEING] = 0, - }, - [CPS_PAGEOUT] = { - [CPS_CACHED] = 1, /* io completion */ - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 0, - [CPS_FREEING] = 0, - }, - [CPS_FREEING] = { - [CPS_CACHED] = 0, - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 0, - [CPS_FREEING] = 0, - } - }; - - old = page->cp_state; - PASSERT(env, page, allowed_transitions[old][state]); - CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); - PASSERT(env, page, page->cp_state == old); - PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner)); - cl_page_state_set_trust(page, state); -} - -static void cl_page_state_set(const struct lu_env *env, - struct cl_page *page, enum cl_page_state state) -{ - cl_page_state_set0(env, page, state); -} - -/** - * Acquires an additional reference to a page. - * - * This can be called only by caller already possessing a reference to \a - * page. - * - * \see cl_object_get(), cl_lock_get(). - */ -void cl_page_get(struct cl_page *page) -{ - cl_page_get_trust(page); -} -EXPORT_SYMBOL(cl_page_get); - -/** - * Releases a reference to a page. - * - * When last reference is released, page is returned to the cache, unless it - * is in cl_page_state::CPS_FREEING state, in which case it is immediately - * destroyed. - * - * \see cl_object_put(), cl_lock_put(). - */ -void cl_page_put(const struct lu_env *env, struct cl_page *page) -{ - CL_PAGE_HEADER(D_TRACE, env, page, "%d\n", - atomic_read(&page->cp_ref)); - - if (atomic_dec_and_test(&page->cp_ref)) { - LASSERT(page->cp_state == CPS_FREEING); - - LASSERT(atomic_read(&page->cp_ref) == 0); - PASSERT(env, page, !page->cp_owner); - PASSERT(env, page, list_empty(&page->cp_batch)); - /* - * Page is no longer reachable by other threads. Tear - * it down. - */ - cl_page_free(env, page); - } -} -EXPORT_SYMBOL(cl_page_put); - -/** - * Returns a cl_page associated with a VM page, and given cl_object. - */ -struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) -{ - struct cl_page *page; - - LASSERT(PageLocked(vmpage)); - - /* - * NOTE: absence of races and liveness of data are guaranteed by page - * lock on a "vmpage". That works because object destruction has - * bottom-to-top pass. - */ - - page = (struct cl_page *)vmpage->private; - if (page) { - cl_page_get_trust(page); - LASSERT(page->cp_type == CPT_CACHEABLE); - } - return page; -} -EXPORT_SYMBOL(cl_vmpage_page); - -const struct cl_page_slice *cl_page_at(const struct cl_page *page, - const struct lu_device_type *dtype) -{ - return cl_page_at_trusted(page, dtype); -} -EXPORT_SYMBOL(cl_page_at); - -#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname) - -#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \ -({ \ - const struct lu_env *__env = (_env); \ - struct cl_page *__page = (_page); \ - const struct cl_page_slice *__scan; \ - int __result; \ - ptrdiff_t __op = (_op); \ - int (*__method)_proto; \ - \ - __result = 0; \ - list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \ - __method = *(void **)((char *)__scan->cpl_ops + __op); \ - if (__method) { \ - __result = (*__method)(__env, __scan, ## __VA_ARGS__); \ - if (__result != 0) \ - break; \ - } \ - } \ - if (__result > 0) \ - __result = 0; \ - __result; \ -}) - -#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \ -do { \ - const struct lu_env *__env = (_env); \ - struct cl_page *__page = (_page); \ - const struct cl_page_slice *__scan; \ - ptrdiff_t __op = (_op); \ - void (*__method)_proto; \ - \ - list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \ - __method = *(void **)((char *)__scan->cpl_ops + __op); \ - if (__method) \ - (*__method)(__env, __scan, ## __VA_ARGS__); \ - } \ -} while (0) - -#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \ -do { \ - const struct lu_env *__env = (_env); \ - struct cl_page *__page = (_page); \ - const struct cl_page_slice *__scan; \ - ptrdiff_t __op = (_op); \ - void (*__method)_proto; \ - \ - list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \ - __method = *(void **)((char *)__scan->cpl_ops + __op); \ - if (__method) \ - (*__method)(__env, __scan, ## __VA_ARGS__); \ - } \ -} while (0) - -static int cl_page_invoke(const struct lu_env *env, - struct cl_io *io, struct cl_page *page, ptrdiff_t op) - -{ - PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj)); - return CL_PAGE_INVOKE(env, page, op, - (const struct lu_env *, - const struct cl_page_slice *, struct cl_io *), - io); -} - -static void cl_page_invoid(const struct lu_env *env, - struct cl_io *io, struct cl_page *page, ptrdiff_t op) - -{ - PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj)); - CL_PAGE_INVOID(env, page, op, - (const struct lu_env *, - const struct cl_page_slice *, struct cl_io *), io); -} - -static void cl_page_owner_clear(struct cl_page *page) -{ - if (page->cp_owner) { - LASSERT(page->cp_owner->ci_owned_nr > 0); - page->cp_owner->ci_owned_nr--; - page->cp_owner = NULL; - } -} - -static void cl_page_owner_set(struct cl_page *page) -{ - page->cp_owner->ci_owned_nr++; -} - -void cl_page_disown0(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg) -{ - enum cl_page_state state; - - state = pg->cp_state; - PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING); - PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING); - cl_page_owner_clear(pg); - - if (state == CPS_OWNED) - cl_page_state_set(env, pg, CPS_CACHED); - /* - * Completion call-backs are executed in the bottom-up order, so that - * uppermost layer (llite), responsible for VFS/VM interaction runs - * last and can release locks safely. - */ - CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown), - (const struct lu_env *, - const struct cl_page_slice *, struct cl_io *), - io); -} - -/** - * returns true, iff page is owned by the given io. - */ -int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io) -{ - struct cl_io *top = cl_io_top((struct cl_io *)io); - - LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj)); - return pg->cp_state == CPS_OWNED && pg->cp_owner == top; -} -EXPORT_SYMBOL(cl_page_is_owned); - -/** - * Try to own a page by IO. - * - * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it - * into cl_page_state::CPS_OWNED state. - * - * \pre !cl_page_is_owned(pg, io) - * \post result == 0 iff cl_page_is_owned(pg, io) - * - * \retval 0 success - * - * \retval -ve failure, e.g., page was destroyed (and landed in - * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED). - * or, page was owned by another thread, or in IO. - * - * \see cl_page_disown() - * \see cl_page_operations::cpo_own() - * \see cl_page_own_try() - * \see cl_page_own - */ -static int cl_page_own0(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, int nonblock) -{ - int result; - - PINVRNT(env, pg, !cl_page_is_owned(pg, io)); - - io = cl_io_top(io); - - if (pg->cp_state == CPS_FREEING) { - result = -ENOENT; - } else { - result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own), - (const struct lu_env *, - const struct cl_page_slice *, - struct cl_io *, int), - io, nonblock); - if (result == 0) { - PASSERT(env, pg, !pg->cp_owner); - pg->cp_owner = cl_io_top(io); - cl_page_owner_set(pg); - if (pg->cp_state != CPS_FREEING) { - cl_page_state_set(env, pg, CPS_OWNED); - } else { - cl_page_disown0(env, io, pg); - result = -ENOENT; - } - } - } - PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg))); - return result; -} - -/** - * Own a page, might be blocked. - * - * \see cl_page_own0() - */ -int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) -{ - return cl_page_own0(env, io, pg, 0); -} -EXPORT_SYMBOL(cl_page_own); - -/** - * Nonblock version of cl_page_own(). - * - * \see cl_page_own0() - */ -int cl_page_own_try(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg) -{ - return cl_page_own0(env, io, pg, 1); -} -EXPORT_SYMBOL(cl_page_own_try); - -/** - * Assume page ownership. - * - * Called when page is already locked by the hosting VM. - * - * \pre !cl_page_is_owned(pg, io) - * \post cl_page_is_owned(pg, io) - * - * \see cl_page_operations::cpo_assume() - */ -void cl_page_assume(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg) -{ - PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj)); - - io = cl_io_top(io); - - cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume)); - PASSERT(env, pg, !pg->cp_owner); - pg->cp_owner = cl_io_top(io); - cl_page_owner_set(pg); - cl_page_state_set(env, pg, CPS_OWNED); -} -EXPORT_SYMBOL(cl_page_assume); - -/** - * Releases page ownership without unlocking the page. - * - * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the - * underlying VM page (as VM is supposed to do this itself). - * - * \pre cl_page_is_owned(pg, io) - * \post !cl_page_is_owned(pg, io) - * - * \see cl_page_assume() - */ -void cl_page_unassume(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg) -{ - PINVRNT(env, pg, cl_page_is_owned(pg, io)); - PINVRNT(env, pg, cl_page_invariant(pg)); - - io = cl_io_top(io); - cl_page_owner_clear(pg); - cl_page_state_set(env, pg, CPS_CACHED); - CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume), - (const struct lu_env *, - const struct cl_page_slice *, struct cl_io *), - io); -} -EXPORT_SYMBOL(cl_page_unassume); - -/** - * Releases page ownership. - * - * Moves page into cl_page_state::CPS_CACHED. - * - * \pre cl_page_is_owned(pg, io) - * \post !cl_page_is_owned(pg, io) - * - * \see cl_page_own() - * \see cl_page_operations::cpo_disown() - */ -void cl_page_disown(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg) -{ - PINVRNT(env, pg, cl_page_is_owned(pg, io) || - pg->cp_state == CPS_FREEING); - - io = cl_io_top(io); - cl_page_disown0(env, io, pg); -} -EXPORT_SYMBOL(cl_page_disown); - -/** - * Called when page is to be removed from the object, e.g., as a result of - * truncate. - * - * Calls cl_page_operations::cpo_discard() top-to-bottom. - * - * \pre cl_page_is_owned(pg, io) - * - * \see cl_page_operations::cpo_discard() - */ -void cl_page_discard(const struct lu_env *env, - struct cl_io *io, struct cl_page *pg) -{ - PINVRNT(env, pg, cl_page_is_owned(pg, io)); - PINVRNT(env, pg, cl_page_invariant(pg)); - - cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard)); -} -EXPORT_SYMBOL(cl_page_discard); - -/** - * Version of cl_page_delete() that can be called for not fully constructed - * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0() - * path. Doesn't check page invariant. - */ -static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg) -{ - PASSERT(env, pg, pg->cp_state != CPS_FREEING); - - /* - * Sever all ways to obtain new pointers to @pg. - */ - cl_page_owner_clear(pg); - - cl_page_state_set0(env, pg, CPS_FREEING); - - CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete), - (const struct lu_env *, - const struct cl_page_slice *)); -} - -/** - * Called when a decision is made to throw page out of memory. - * - * Notifies all layers about page destruction by calling - * cl_page_operations::cpo_delete() method top-to-bottom. - * - * Moves page into cl_page_state::CPS_FREEING state (this is the only place - * where transition to this state happens). - * - * Eliminates all venues through which new references to the page can be - * obtained: - * - * - removes page from the radix trees, - * - * - breaks linkage from VM page to cl_page. - * - * Once page reaches cl_page_state::CPS_FREEING, all remaining references will - * drain after some time, at which point page will be recycled. - * - * \pre VM page is locked - * \post pg->cp_state == CPS_FREEING - * - * \see cl_page_operations::cpo_delete() - */ -void cl_page_delete(const struct lu_env *env, struct cl_page *pg) -{ - PINVRNT(env, pg, cl_page_invariant(pg)); - cl_page_delete0(env, pg); -} -EXPORT_SYMBOL(cl_page_delete); - -/** - * Marks page up-to-date. - * - * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The - * layer responsible for VM interaction has to mark/clear page as up-to-date - * by the \a uptodate argument. - * - * \see cl_page_operations::cpo_export() - */ -void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate) -{ - PINVRNT(env, pg, cl_page_invariant(pg)); - CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export), - (const struct lu_env *, - const struct cl_page_slice *, int), uptodate); -} -EXPORT_SYMBOL(cl_page_export); - -/** - * Returns true, iff \a pg is VM locked in a suitable sense by the calling - * thread. - */ -int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg) -{ - int result; - const struct cl_page_slice *slice; - - slice = container_of(pg->cp_layers.next, - const struct cl_page_slice, cpl_linkage); - PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked); - /* - * Call ->cpo_is_vmlocked() directly instead of going through - * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by - * cl_page_invariant(). - */ - result = slice->cpl_ops->cpo_is_vmlocked(env, slice); - PASSERT(env, pg, result == -EBUSY || result == -ENODATA); - return result == -EBUSY; -} -EXPORT_SYMBOL(cl_page_is_vmlocked); - -static enum cl_page_state cl_req_type_state(enum cl_req_type crt) -{ - return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN; -} - -static void cl_page_io_start(const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt) -{ - /* - * Page is queued for IO, change its state. - */ - cl_page_owner_clear(pg); - cl_page_state_set(env, pg, cl_req_type_state(crt)); -} - -/** - * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is - * called top-to-bottom. Every layer either agrees to submit this page (by - * returning 0), or requests to omit this page (by returning -EALREADY). Layer - * handling interactions with the VM also has to inform VM that page is under - * transfer now. - */ -int cl_page_prep(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt) -{ - int result; - - PINVRNT(env, pg, cl_page_is_owned(pg, io)); - PINVRNT(env, pg, cl_page_invariant(pg)); - PINVRNT(env, pg, crt < CRT_NR); - - /* - * XXX this has to be called bottom-to-top, so that llite can set up - * PG_writeback without risking other layers deciding to skip this - * page. - */ - if (crt >= CRT_NR) - return -EINVAL; - result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep)); - if (result == 0) - cl_page_io_start(env, pg, crt); - - CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result); - return result; -} -EXPORT_SYMBOL(cl_page_prep); - -/** - * Notify layers about transfer completion. - * - * Invoked by transfer sub-system (which is a part of osc) to notify layers - * that a transfer, of which this page is a part of has completed. - * - * Completion call-backs are executed in the bottom-up order, so that - * uppermost layer (llite), responsible for the VFS/VM interaction runs last - * and can release locks safely. - * - * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT - * \post pg->cp_state == CPS_CACHED - * - * \see cl_page_operations::cpo_completion() - */ -void cl_page_completion(const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt, int ioret) -{ - struct cl_sync_io *anchor = pg->cp_sync_io; - - PASSERT(env, pg, crt < CRT_NR); - PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt)); - - CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret); - - cl_page_state_set(env, pg, CPS_CACHED); - if (crt >= CRT_NR) - return; - CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion), - (const struct lu_env *, - const struct cl_page_slice *, int), ioret); - if (anchor) { - LASSERT(pg->cp_sync_io == anchor); - pg->cp_sync_io = NULL; - cl_sync_io_note(env, anchor, ioret); - } -} -EXPORT_SYMBOL(cl_page_completion); - -/** - * Notify layers that transfer formation engine decided to yank this page from - * the cache and to make it a part of a transfer. - * - * \pre pg->cp_state == CPS_CACHED - * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT - * - * \see cl_page_operations::cpo_make_ready() - */ -int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, - enum cl_req_type crt) -{ - int result; - - PINVRNT(env, pg, crt < CRT_NR); - - if (crt >= CRT_NR) - return -EINVAL; - result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready), - (const struct lu_env *, - const struct cl_page_slice *)); - if (result == 0) { - PASSERT(env, pg, pg->cp_state == CPS_CACHED); - cl_page_io_start(env, pg, crt); - } - CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result); - return result; -} -EXPORT_SYMBOL(cl_page_make_ready); - -/** - * Called if a pge is being written back by kernel's intention. - * - * \pre cl_page_is_owned(pg, io) - * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT) - * - * \see cl_page_operations::cpo_flush() - */ -int cl_page_flush(const struct lu_env *env, struct cl_io *io, - struct cl_page *pg) -{ - int result; - - PINVRNT(env, pg, cl_page_is_owned(pg, io)); - PINVRNT(env, pg, cl_page_invariant(pg)); - - result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush)); - - CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result); - return result; -} -EXPORT_SYMBOL(cl_page_flush); - -/** - * Tells transfer engine that only part of a page is to be transmitted. - * - * \see cl_page_operations::cpo_clip() - */ -void cl_page_clip(const struct lu_env *env, struct cl_page *pg, - int from, int to) -{ - PINVRNT(env, pg, cl_page_invariant(pg)); - - CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to); - CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip), - (const struct lu_env *, - const struct cl_page_slice *, int, int), - from, to); -} -EXPORT_SYMBOL(cl_page_clip); - -/** - * Prints human readable representation of \a pg to the \a f. - */ -void cl_page_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_page *pg) -{ - (*printer)(env, cookie, - "page@%p[%d %p %d %d %p]\n", - pg, atomic_read(&pg->cp_ref), pg->cp_obj, - pg->cp_state, pg->cp_type, - pg->cp_owner); -} -EXPORT_SYMBOL(cl_page_header_print); - -/** - * Prints human readable representation of \a pg to the \a f. - */ -void cl_page_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_page *pg) -{ - cl_page_header_print(env, cookie, printer, pg); - CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print), - (const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t p), cookie, printer); - (*printer)(env, cookie, "end page@%p\n", pg); -} -EXPORT_SYMBOL(cl_page_print); - -/** - * Cancel a page which is still in a transfer. - */ -int cl_page_cancel(const struct lu_env *env, struct cl_page *page) -{ - return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel), - (const struct lu_env *, - const struct cl_page_slice *)); -} - -/** - * Converts a byte offset within object \a obj into a page index. - */ -loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) -{ - /* - * XXX for now. - */ - return (loff_t)idx << PAGE_SHIFT; -} -EXPORT_SYMBOL(cl_offset); - -/** - * Converts a page index into a byte offset within object \a obj. - */ -pgoff_t cl_index(const struct cl_object *obj, loff_t offset) -{ - /* - * XXX for now. - */ - return offset >> PAGE_SHIFT; -} -EXPORT_SYMBOL(cl_index); - -size_t cl_page_size(const struct cl_object *obj) -{ - return 1UL << PAGE_SHIFT; -} -EXPORT_SYMBOL(cl_page_size); - -/** - * Adds page slice to the compound page. - * - * This is called by cl_object_operations::coo_page_init() methods to add a - * per-layer state to the page. New state is added at the end of - * cl_page::cp_layers list, that is, it is at the bottom of the stack. - * - * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add() - */ -void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, - struct cl_object *obj, pgoff_t index, - const struct cl_page_operations *ops) -{ - list_add_tail(&slice->cpl_linkage, &page->cp_layers); - slice->cpl_obj = obj; - slice->cpl_index = index; - slice->cpl_ops = ops; - slice->cpl_page = page; -} -EXPORT_SYMBOL(cl_page_slice_add); - -/** - * Allocate and initialize cl_cache, called by ll_init_sbi(). - */ -struct cl_client_cache *cl_cache_init(unsigned long lru_page_max) -{ - struct cl_client_cache *cache = NULL; - - cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) - return NULL; - - /* Initialize cache data */ - atomic_set(&cache->ccc_users, 1); - cache->ccc_lru_max = lru_page_max; - atomic_long_set(&cache->ccc_lru_left, lru_page_max); - spin_lock_init(&cache->ccc_lru_lock); - INIT_LIST_HEAD(&cache->ccc_lru); - - atomic_long_set(&cache->ccc_unstable_nr, 0); - init_waitqueue_head(&cache->ccc_unstable_waitq); - - return cache; -} -EXPORT_SYMBOL(cl_cache_init); - -/** - * Increase cl_cache refcount - */ -void cl_cache_incref(struct cl_client_cache *cache) -{ - atomic_inc(&cache->ccc_users); -} -EXPORT_SYMBOL(cl_cache_incref); - -/** - * Decrease cl_cache refcount and free the cache if refcount=0. - * Since llite, lov and osc all hold cl_cache refcount, - * the free will not cause race. (LU-6173) - */ -void cl_cache_decref(struct cl_client_cache *cache) -{ - if (atomic_dec_and_test(&cache->ccc_users)) - kfree(cache); -} -EXPORT_SYMBOL(cl_cache_decref); diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c deleted file mode 100644 index d6c46858941b..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ /dev/null @@ -1,544 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "llog_internal.h" - -struct obd_device *obd_devs[MAX_OBD_DEVICES]; -struct list_head obd_types; -DEFINE_RWLOCK(obd_dev_lock); - -/* The following are visible and mutable through /sys/fs/lustre. */ -unsigned int obd_debug_peer_on_timeout; -EXPORT_SYMBOL(obd_debug_peer_on_timeout); -unsigned int obd_dump_on_timeout; -EXPORT_SYMBOL(obd_dump_on_timeout); -unsigned int obd_dump_on_eviction; -EXPORT_SYMBOL(obd_dump_on_eviction); -unsigned long obd_max_dirty_pages; -EXPORT_SYMBOL(obd_max_dirty_pages); -atomic_long_t obd_dirty_pages; -EXPORT_SYMBOL(obd_dirty_pages); -unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */ -EXPORT_SYMBOL(obd_timeout); -unsigned int obd_timeout_set; -EXPORT_SYMBOL(obd_timeout_set); -/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */ -unsigned int at_min; -EXPORT_SYMBOL(at_min); -unsigned int at_max = 600; -EXPORT_SYMBOL(at_max); -unsigned int at_history = 600; -EXPORT_SYMBOL(at_history); -int at_early_margin = 5; -EXPORT_SYMBOL(at_early_margin); -int at_extra = 30; -EXPORT_SYMBOL(at_extra); - -atomic_long_t obd_dirty_transit_pages; -EXPORT_SYMBOL(obd_dirty_transit_pages); - -char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE; -char obd_jobid_node[LUSTRE_JOBID_SIZE + 1]; - -/* Get jobid of current process from stored variable or calculate - * it from pid and user_id. - * - * Historically this was also done by reading the environment variable - * stored in between the "env_start" & "env_end" of task struct. - * This is now deprecated. - */ -int lustre_get_jobid(char *jobid) -{ - memset(jobid, 0, LUSTRE_JOBID_SIZE); - /* Jobstats isn't enabled */ - if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0) - return 0; - - /* Use process name + fsuid as jobid */ - if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) { - snprintf(jobid, LUSTRE_JOBID_SIZE, "%s.%u", - current->comm, - from_kuid(&init_user_ns, current_fsuid())); - return 0; - } - - /* Whole node dedicated to single job */ - if (strcmp(obd_jobid_var, JOBSTATS_NODELOCAL) == 0) { - strcpy(jobid, obd_jobid_node); - return 0; - } - - return -ENOENT; -} -EXPORT_SYMBOL(lustre_get_jobid); - -static int class_resolve_dev_name(__u32 len, const char *name) -{ - int rc; - int dev; - - if (!len || !name) { - CERROR("No name passed,!\n"); - rc = -EINVAL; - goto out; - } - if (name[len - 1] != 0) { - CERROR("Name not nul terminated!\n"); - rc = -EINVAL; - goto out; - } - - CDEBUG(D_IOCTL, "device name %s\n", name); - dev = class_name2dev(name); - if (dev == -1) { - CDEBUG(D_IOCTL, "No device for name %s!\n", name); - rc = -EINVAL; - goto out; - } - - CDEBUG(D_IOCTL, "device name %s, dev %d\n", name, dev); - rc = dev; - -out: - return rc; -} - -int class_handle_ioctl(unsigned int cmd, unsigned long arg) -{ - char *buf = NULL; - struct obd_ioctl_data *data; - struct libcfs_debug_ioctl_data *debug_data; - struct obd_device *obd = NULL; - int err = 0, len = 0; - - /* only for debugging */ - if (cmd == LIBCFS_IOC_DEBUG_MASK) { - debug_data = (struct libcfs_debug_ioctl_data *)arg; - libcfs_subsystem_debug = debug_data->subs; - libcfs_debug = debug_data->debug; - return 0; - } - - CDEBUG(D_IOCTL, "cmd = %x\n", cmd); - if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) { - CERROR("OBD ioctl: data error\n"); - return -EINVAL; - } - data = (struct obd_ioctl_data *)buf; - - switch (cmd) { - case OBD_IOC_PROCESS_CFG: { - struct lustre_cfg *lcfg; - - if (!data->ioc_plen1 || !data->ioc_pbuf1) { - CERROR("No config buffer passed!\n"); - err = -EINVAL; - goto out; - } - lcfg = kzalloc(data->ioc_plen1, GFP_NOFS); - if (!lcfg) { - err = -ENOMEM; - goto out; - } - if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1)) - err = -EFAULT; - if (!err) - err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1); - if (!err) - err = class_process_config(lcfg); - - kfree(lcfg); - goto out; - } - - case OBD_GET_VERSION: - if (!data->ioc_inlbuf1) { - CERROR("No buffer passed in ioctl\n"); - err = -EINVAL; - goto out; - } - - if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) { - CERROR("ioctl buffer too small to hold version\n"); - err = -EINVAL; - goto out; - } - - memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING, - strlen(LUSTRE_VERSION_STRING) + 1); - - if (copy_to_user((void __user *)arg, data, len)) - err = -EFAULT; - goto out; - - case OBD_IOC_NAME2DEV: { - /* Resolve a device name. This does not change the - * currently selected device. - */ - int dev; - - dev = class_resolve_dev_name(data->ioc_inllen1, - data->ioc_inlbuf1); - data->ioc_dev = dev; - if (dev < 0) { - err = -EINVAL; - goto out; - } - - if (copy_to_user((void __user *)arg, data, sizeof(*data))) - err = -EFAULT; - goto out; - } - - case OBD_IOC_UUID2DEV: { - /* Resolve a device uuid. This does not change the - * currently selected device. - */ - int dev; - struct obd_uuid uuid; - - if (!data->ioc_inllen1 || !data->ioc_inlbuf1) { - CERROR("No UUID passed!\n"); - err = -EINVAL; - goto out; - } - if (data->ioc_inlbuf1[data->ioc_inllen1 - 1] != 0) { - CERROR("UUID not NUL terminated!\n"); - err = -EINVAL; - goto out; - } - - CDEBUG(D_IOCTL, "device name %s\n", data->ioc_inlbuf1); - obd_str2uuid(&uuid, data->ioc_inlbuf1); - dev = class_uuid2dev(&uuid); - data->ioc_dev = dev; - if (dev == -1) { - CDEBUG(D_IOCTL, "No device for UUID %s!\n", - data->ioc_inlbuf1); - err = -EINVAL; - goto out; - } - - CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1, - dev); - - if (copy_to_user((void __user *)arg, data, sizeof(*data))) - err = -EFAULT; - goto out; - } - - case OBD_IOC_GETDEVICE: { - int index = data->ioc_count; - char *status, *str; - - if (!data->ioc_inlbuf1) { - CERROR("No buffer passed in ioctl\n"); - err = -EINVAL; - goto out; - } - if (data->ioc_inllen1 < 128) { - CERROR("ioctl buffer too small to hold version\n"); - err = -EINVAL; - goto out; - } - - obd = class_num2obd(index); - if (!obd) { - err = -ENOENT; - goto out; - } - - if (obd->obd_stopping) - status = "ST"; - else if (obd->obd_set_up) - status = "UP"; - else if (obd->obd_attached) - status = "AT"; - else - status = "--"; - str = (char *)data->ioc_bulk; - snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d", - (int)index, status, obd->obd_type->typ_name, - obd->obd_name, obd->obd_uuid.uuid, - atomic_read(&obd->obd_refcount)); - - if (copy_to_user((void __user *)arg, data, len)) - err = -EFAULT; - goto out; - } - } - - if (data->ioc_dev == OBD_DEV_BY_DEVNAME) { - if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) { - err = -EINVAL; - goto out; - } - if (strnlen(data->ioc_inlbuf4, MAX_OBD_NAME) >= MAX_OBD_NAME) { - err = -EINVAL; - goto out; - } - obd = class_name2obd(data->ioc_inlbuf4); - } else if (data->ioc_dev < class_devno_max()) { - obd = class_num2obd(data->ioc_dev); - } else { - CERROR("OBD ioctl: No device\n"); - err = -EINVAL; - goto out; - } - - if (!obd) { - CERROR("OBD ioctl : No Device %d\n", data->ioc_dev); - err = -EINVAL; - goto out; - } - LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); - - if (!obd->obd_set_up || obd->obd_stopping) { - CERROR("OBD ioctl: device not setup %d\n", data->ioc_dev); - err = -EINVAL; - goto out; - } - - switch (cmd) { - case OBD_IOC_NO_TRANSNO: { - if (!obd->obd_attached) { - CERROR("Device %d not attached\n", obd->obd_minor); - err = -ENODEV; - goto out; - } - CDEBUG(D_HA, "%s: disabling committed-transno notification\n", - obd->obd_name); - obd->obd_no_transno = 1; - err = 0; - goto out; - } - - default: { - err = obd_iocontrol(cmd, obd->obd_self_export, len, data, NULL); - if (err) - goto out; - - if (copy_to_user((void __user *)arg, data, len)) - err = -EFAULT; - goto out; - } - } - - out: - kvfree(buf); - return err; -} /* class_handle_ioctl */ - -#define OBD_INIT_CHECK -static int obd_init_checks(void) -{ - __u64 u64val, div64val; - char buf[64]; - int len, ret = 0; - - CDEBUG(D_INFO, "LPU64=%s, LPD64=%s, LPX64=%s\n", "%llu", "%lld", - "%#llx"); - - CDEBUG(D_INFO, "OBD_OBJECT_EOF = %#llx\n", (__u64)OBD_OBJECT_EOF); - - u64val = OBD_OBJECT_EOF; - CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = %#llx\n", u64val); - if (u64val != OBD_OBJECT_EOF) { - CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n", - u64val, (int)sizeof(u64val)); - ret = -EINVAL; - } - len = snprintf(buf, sizeof(buf), "%#llx", u64val); - if (len != 18) { - CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len); - ret = -EINVAL; - } - - div64val = OBD_OBJECT_EOF; - CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = %#llx\n", u64val); - if (u64val != OBD_OBJECT_EOF) { - CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n", - u64val, (int)sizeof(u64val)); - ret = -EOVERFLOW; - } - if (u64val >> 8 != OBD_OBJECT_EOF >> 8) { - CERROR("__u64 %#llx(%d) != 0xffffffffffffffff\n", - u64val, (int)sizeof(u64val)); - return -EOVERFLOW; - } - if (do_div(div64val, 256) != (u64val & 255)) { - CERROR("do_div(%#llx,256) != %llu\n", u64val, u64val & 255); - return -EOVERFLOW; - } - if (u64val >> 8 != div64val) { - CERROR("do_div(%#llx,256) %llu != %llu\n", - u64val, div64val, u64val >> 8); - return -EOVERFLOW; - } - len = snprintf(buf, sizeof(buf), "%#llx", u64val); - if (len != 18) { - CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len); - ret = -EINVAL; - } - len = snprintf(buf, sizeof(buf), "%llu", u64val); - if (len != 20) { - CWARN("LPU64 wrong length! strlen(%s)=%d != 20\n", buf, len); - ret = -EINVAL; - } - len = snprintf(buf, sizeof(buf), "%lld", u64val); - if (len != 2) { - CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); - ret = -EINVAL; - } - if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) { - CWARN("mask failed: u64val %llu >= %llu\n", u64val, - (__u64)PAGE_SIZE); - ret = -EINVAL; - } - - return ret; -} - -static int __init obdclass_init(void) -{ - int i, err; - - LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n"); - - spin_lock_init(&obd_types_lock); - - err = libcfs_setup(); - if (err) - return err; - - obd_zombie_impexp_init(); - - err = obd_init_checks(); - if (err) - return err; - - class_init_uuidlist(); - err = class_handle_init(); - if (err) - return err; - - INIT_LIST_HEAD(&obd_types); - - err = misc_register(&obd_psdev); - if (err) { - CERROR("cannot register OBD miscdevices: err %d\n", err); - return err; - } - - /* This struct is already zeroed for us (static global) */ - for (i = 0; i < class_devno_max(); i++) - obd_devs[i] = NULL; - - /* Default the dirty page cache cap to 1/2 of system memory. - * For clients with less memory, a larger fraction is needed - * for other purposes (mostly for BGL). - */ - if (totalram_pages <= 512 << (20 - PAGE_SHIFT)) - obd_max_dirty_pages = totalram_pages / 4; - else - obd_max_dirty_pages = totalram_pages / 2; - - err = obd_init_caches(); - if (err) - return err; - - err = class_procfs_init(); - if (err) - return err; - - err = obd_sysctl_init(); - if (err) - return err; - - err = lu_global_init(); - if (err) - return err; - - err = cl_global_init(); - if (err != 0) - return err; - - err = llog_info_init(); - if (err) - return err; - - err = lustre_register_fs(); - - return err; -} - -static void obdclass_exit(void) -{ - lustre_unregister_fs(); - - misc_deregister(&obd_psdev); - llog_info_fini(); - cl_global_fini(); - lu_global_fini(); - - obd_cleanup_caches(); - - class_procfs_clean(); - - class_handle_cleanup(); - class_exit_uuidlist(); - obd_zombie_impexp_stop(); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Class Driver"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(obdclass_init); -module_exit(obdclass_exit); diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c deleted file mode 100644 index 2156a82a613a..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/debug.c +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/debug.c - * - * Helper routines for dumping data structs for debugging. - */ - -#define DEBUG_SUBSYSTEM D_OTHER - -#include - -#include -#include -#include - -#define LPDS sizeof(__u64) -int block_debug_setup(void *addr, int len, __u64 off, __u64 id) -{ - LASSERT(addr); - - put_unaligned_le64(off, addr); - put_unaligned_le64(id, addr + LPDS); - addr += len - LPDS - LPDS; - put_unaligned_le64(off, addr); - put_unaligned_le64(id, addr + LPDS); - - return 0; -} -EXPORT_SYMBOL(block_debug_setup); - -int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id) -{ - __u64 ne_off; - int err = 0; - - LASSERT(addr); - - ne_off = le64_to_cpu(off); - id = le64_to_cpu(id); - if (memcmp(addr, (char *)&ne_off, LPDS)) { - CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n", - who, id, off, *(__u64 *)addr, ne_off); - err = -EINVAL; - } - if (memcmp(addr + LPDS, (char *)&id, LPDS)) { - CDEBUG(D_ERROR, "%s: id %#llx offset %llu id: %#llx != %#llx\n", - who, id, off, *(__u64 *)(addr + LPDS), id); - err = -EINVAL; - } - - addr += end - LPDS - LPDS; - if (memcmp(addr, (char *)&ne_off, LPDS)) { - CDEBUG(D_ERROR, "%s: id %#llx offset %llu end off: %#llx != %#llx\n", - who, id, off, *(__u64 *)addr, ne_off); - err = -EINVAL; - } - if (memcmp(addr + LPDS, (char *)&id, LPDS)) { - CDEBUG(D_ERROR, "%s: id %#llx offset %llu end id: %#llx != %#llx\n", - who, id, off, *(__u64 *)(addr + LPDS), id); - err = -EINVAL; - } - - return err; -} -EXPORT_SYMBOL(block_debug_check); -#undef LPDS diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c deleted file mode 100644 index 234f383ce6d9..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/genops.c +++ /dev/null @@ -1,1480 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/genops.c - * - * These are the only exported functions, they provide some generic - * infrastructure for managing object devices - */ - -#define DEBUG_SUBSYSTEM S_CLASS -#include -#include -#include - -spinlock_t obd_types_lock; - -static struct kmem_cache *obd_device_cachep; -struct kmem_cache *obdo_cachep; -EXPORT_SYMBOL(obdo_cachep); -static struct kmem_cache *import_cachep; - -static struct workqueue_struct *zombie_wq; -static void obd_zombie_export_add(struct obd_export *exp); -static void obd_zombie_import_add(struct obd_import *imp); - -int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c); -EXPORT_SYMBOL(ptlrpc_put_connection_superhack); - -/* - * support functions: we could use inter-module communication, but this - * is more portable to other OS's - */ -static struct obd_device *obd_device_alloc(void) -{ - struct obd_device *obd; - - obd = kmem_cache_zalloc(obd_device_cachep, GFP_NOFS); - if (obd) - obd->obd_magic = OBD_DEVICE_MAGIC; - return obd; -} - -static void obd_device_free(struct obd_device *obd) -{ - LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n", - obd, obd->obd_magic, OBD_DEVICE_MAGIC); - if (obd->obd_namespace) { - CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n", - obd, obd->obd_namespace, obd->obd_force); - LBUG(); - } - lu_ref_fini(&obd->obd_reference); - kmem_cache_free(obd_device_cachep, obd); -} - -static struct obd_type *class_search_type(const char *name) -{ - struct list_head *tmp; - struct obd_type *type; - - spin_lock(&obd_types_lock); - list_for_each(tmp, &obd_types) { - type = list_entry(tmp, struct obd_type, typ_chain); - if (strcmp(type->typ_name, name) == 0) { - spin_unlock(&obd_types_lock); - return type; - } - } - spin_unlock(&obd_types_lock); - return NULL; -} - -static struct obd_type *class_get_type(const char *name) -{ - struct obd_type *type = class_search_type(name); - - if (!type) { - const char *modname = name; - - if (!request_module("%s", modname)) { - CDEBUG(D_INFO, "Loaded module '%s'\n", modname); - type = class_search_type(name); - } else { - LCONSOLE_ERROR_MSG(0x158, "Can't load module '%s'\n", - modname); - } - } - if (type) { - spin_lock(&type->obd_type_lock); - type->typ_refcnt++; - try_module_get(type->typ_dt_ops->owner); - spin_unlock(&type->obd_type_lock); - } - return type; -} - -void class_put_type(struct obd_type *type) -{ - LASSERT(type); - spin_lock(&type->obd_type_lock); - type->typ_refcnt--; - module_put(type->typ_dt_ops->owner); - spin_unlock(&type->obd_type_lock); -} - -#define CLASS_MAX_NAME 1024 - -int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops, - const char *name, - struct lu_device_type *ldt) -{ - struct obd_type *type; - int rc; - - /* sanity check */ - LASSERT(strnlen(name, CLASS_MAX_NAME) < CLASS_MAX_NAME); - - if (class_search_type(name)) { - CDEBUG(D_IOCTL, "Type %s already registered\n", name); - return -EEXIST; - } - - rc = -ENOMEM; - type = kzalloc(sizeof(*type), GFP_NOFS); - if (!type) - return rc; - - type->typ_dt_ops = kzalloc(sizeof(*type->typ_dt_ops), GFP_NOFS); - type->typ_md_ops = kzalloc(sizeof(*type->typ_md_ops), GFP_NOFS); - type->typ_name = kzalloc(strlen(name) + 1, GFP_NOFS); - - if (!type->typ_dt_ops || - !type->typ_md_ops || - !type->typ_name) - goto failed; - - *type->typ_dt_ops = *dt_ops; - /* md_ops is optional */ - if (md_ops) - *type->typ_md_ops = *md_ops; - strcpy(type->typ_name, name); - spin_lock_init(&type->obd_type_lock); - - type->typ_debugfs_entry = debugfs_create_dir(type->typ_name, - debugfs_lustre_root); - - type->typ_kobj = kobject_create_and_add(type->typ_name, lustre_kobj); - if (!type->typ_kobj) { - rc = -ENOMEM; - goto failed; - } - - if (ldt) { - type->typ_lu = ldt; - rc = lu_device_type_init(ldt); - if (rc != 0) - goto failed; - } - - spin_lock(&obd_types_lock); - list_add(&type->typ_chain, &obd_types); - spin_unlock(&obd_types_lock); - - return 0; - - failed: - if (type->typ_kobj) - kobject_put(type->typ_kobj); - kfree(type->typ_name); - kfree(type->typ_md_ops); - kfree(type->typ_dt_ops); - kfree(type); - return rc; -} -EXPORT_SYMBOL(class_register_type); - -int class_unregister_type(const char *name) -{ - struct obd_type *type = class_search_type(name); - - if (!type) { - CERROR("unknown obd type\n"); - return -EINVAL; - } - - if (type->typ_refcnt) { - CERROR("type %s has refcount (%d)\n", name, type->typ_refcnt); - /* This is a bad situation, let's make the best of it */ - /* Remove ops, but leave the name for debugging */ - kfree(type->typ_dt_ops); - kfree(type->typ_md_ops); - return -EBUSY; - } - - if (type->typ_kobj) - kobject_put(type->typ_kobj); - - debugfs_remove_recursive(type->typ_debugfs_entry); - - if (type->typ_lu) - lu_device_type_fini(type->typ_lu); - - spin_lock(&obd_types_lock); - list_del(&type->typ_chain); - spin_unlock(&obd_types_lock); - kfree(type->typ_name); - kfree(type->typ_dt_ops); - kfree(type->typ_md_ops); - kfree(type); - return 0; -} /* class_unregister_type */ -EXPORT_SYMBOL(class_unregister_type); - -/** - * Create a new obd device. - * - * Find an empty slot in ::obd_devs[], create a new obd device in it. - * - * \param[in] type_name obd device type string. - * \param[in] name obd device name. - * - * \retval NULL if create fails, otherwise return the obd device - * pointer created. - */ -struct obd_device *class_newdev(const char *type_name, const char *name) -{ - struct obd_device *result = NULL; - struct obd_device *newdev; - struct obd_type *type = NULL; - int i; - int new_obd_minor = 0; - - if (strlen(name) >= MAX_OBD_NAME) { - CERROR("name/uuid must be < %u bytes long\n", MAX_OBD_NAME); - return ERR_PTR(-EINVAL); - } - - type = class_get_type(type_name); - if (!type) { - CERROR("OBD: unknown type: %s\n", type_name); - return ERR_PTR(-ENODEV); - } - - newdev = obd_device_alloc(); - if (!newdev) { - result = ERR_PTR(-ENOMEM); - goto out_type; - } - - LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC); - - write_lock(&obd_dev_lock); - for (i = 0; i < class_devno_max(); i++) { - struct obd_device *obd = class_num2obd(i); - - if (obd && (strcmp(name, obd->obd_name) == 0)) { - CERROR("Device %s already exists at %d, won't add\n", - name, i); - if (result) { - LASSERTF(result->obd_magic == OBD_DEVICE_MAGIC, - "%p obd_magic %08x != %08x\n", result, - result->obd_magic, OBD_DEVICE_MAGIC); - LASSERTF(result->obd_minor == new_obd_minor, - "%p obd_minor %d != %d\n", result, - result->obd_minor, new_obd_minor); - - obd_devs[result->obd_minor] = NULL; - result->obd_name[0] = '\0'; - } - result = ERR_PTR(-EEXIST); - break; - } - if (!result && !obd) { - result = newdev; - result->obd_minor = i; - new_obd_minor = i; - result->obd_type = type; - strncpy(result->obd_name, name, - sizeof(result->obd_name) - 1); - obd_devs[i] = result; - } - } - write_unlock(&obd_dev_lock); - - if (!result && i >= class_devno_max()) { - CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n", - class_devno_max()); - result = ERR_PTR(-EOVERFLOW); - goto out; - } - - if (IS_ERR(result)) - goto out; - - CDEBUG(D_IOCTL, "Adding new device %s (%p)\n", - result->obd_name, result); - - return result; -out: - obd_device_free(newdev); -out_type: - class_put_type(type); - return result; -} - -void class_release_dev(struct obd_device *obd) -{ - struct obd_type *obd_type = obd->obd_type; - - LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "%p obd_magic %08x != %08x\n", - obd, obd->obd_magic, OBD_DEVICE_MAGIC); - LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n", - obd, obd->obd_minor, obd_devs[obd->obd_minor]); - LASSERT(obd_type); - - CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n", - obd->obd_name, obd->obd_minor, obd->obd_type->typ_name); - - write_lock(&obd_dev_lock); - obd_devs[obd->obd_minor] = NULL; - write_unlock(&obd_dev_lock); - obd_device_free(obd); - - class_put_type(obd_type); -} - -int class_name2dev(const char *name) -{ - int i; - - if (!name) - return -1; - - read_lock(&obd_dev_lock); - for (i = 0; i < class_devno_max(); i++) { - struct obd_device *obd = class_num2obd(i); - - if (obd && strcmp(name, obd->obd_name) == 0) { - /* Make sure we finished attaching before we give - * out any references - */ - LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); - if (obd->obd_attached) { - read_unlock(&obd_dev_lock); - return i; - } - break; - } - } - read_unlock(&obd_dev_lock); - - return -1; -} - -struct obd_device *class_name2obd(const char *name) -{ - int dev = class_name2dev(name); - - if (dev < 0 || dev > class_devno_max()) - return NULL; - return class_num2obd(dev); -} -EXPORT_SYMBOL(class_name2obd); - -int class_uuid2dev(struct obd_uuid *uuid) -{ - int i; - - read_lock(&obd_dev_lock); - for (i = 0; i < class_devno_max(); i++) { - struct obd_device *obd = class_num2obd(i); - - if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) { - LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); - read_unlock(&obd_dev_lock); - return i; - } - } - read_unlock(&obd_dev_lock); - - return -1; -} - -/** - * Get obd device from ::obd_devs[] - * - * \param num [in] array index - * - * \retval NULL if ::obd_devs[\a num] does not contains an obd device - * otherwise return the obd device there. - */ -struct obd_device *class_num2obd(int num) -{ - struct obd_device *obd = NULL; - - if (num < class_devno_max()) { - obd = obd_devs[num]; - if (!obd) - return NULL; - - LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, - "%p obd_magic %08x != %08x\n", - obd, obd->obd_magic, OBD_DEVICE_MAGIC); - LASSERTF(obd->obd_minor == num, - "%p obd_minor %0d != %0d\n", - obd, obd->obd_minor, num); - } - - return obd; -} - -/* Search for a client OBD connected to tgt_uuid. If grp_uuid is - * specified, then only the client with that uuid is returned, - * otherwise any client connected to the tgt is returned. - */ -struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid) -{ - int i; - - read_lock(&obd_dev_lock); - for (i = 0; i < class_devno_max(); i++) { - struct obd_device *obd = class_num2obd(i); - - if (!obd) - continue; - if ((strncmp(obd->obd_type->typ_name, typ_name, - strlen(typ_name)) == 0)) { - if (obd_uuid_equals(tgt_uuid, - &obd->u.cli.cl_target_uuid) && - ((grp_uuid) ? obd_uuid_equals(grp_uuid, - &obd->obd_uuid) : 1)) { - read_unlock(&obd_dev_lock); - return obd; - } - } - } - read_unlock(&obd_dev_lock); - - return NULL; -} -EXPORT_SYMBOL(class_find_client_obd); - -/* Iterate the obd_device list looking devices have grp_uuid. Start - * searching at *next, and if a device is found, the next index to look - * at is saved in *next. If next is NULL, then the first matching device - * will always be returned. - */ -struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next) -{ - int i; - - if (!next) - i = 0; - else if (*next >= 0 && *next < class_devno_max()) - i = *next; - else - return NULL; - - read_lock(&obd_dev_lock); - for (; i < class_devno_max(); i++) { - struct obd_device *obd = class_num2obd(i); - - if (!obd) - continue; - if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) { - if (next) - *next = i + 1; - read_unlock(&obd_dev_lock); - return obd; - } - } - read_unlock(&obd_dev_lock); - - return NULL; -} -EXPORT_SYMBOL(class_devices_in_group); - -/** - * to notify sptlrpc log for \a fsname has changed, let every relevant OBD - * adjust sptlrpc settings accordingly. - */ -int class_notify_sptlrpc_conf(const char *fsname, int namelen) -{ - struct obd_device *obd; - const char *type; - int i, rc = 0, rc2; - - LASSERT(namelen > 0); - - read_lock(&obd_dev_lock); - for (i = 0; i < class_devno_max(); i++) { - obd = class_num2obd(i); - - if (!obd || obd->obd_set_up == 0 || obd->obd_stopping) - continue; - - /* only notify mdc, osc, mdt, ost */ - type = obd->obd_type->typ_name; - if (strcmp(type, LUSTRE_MDC_NAME) != 0 && - strcmp(type, LUSTRE_OSC_NAME) != 0 && - strcmp(type, LUSTRE_MDT_NAME) != 0 && - strcmp(type, LUSTRE_OST_NAME) != 0) - continue; - - if (strncmp(obd->obd_name, fsname, namelen)) - continue; - - class_incref(obd, __func__, obd); - read_unlock(&obd_dev_lock); - rc2 = obd_set_info_async(NULL, obd->obd_self_export, - sizeof(KEY_SPTLRPC_CONF), - KEY_SPTLRPC_CONF, 0, NULL, NULL); - rc = rc ? rc : rc2; - class_decref(obd, __func__, obd); - read_lock(&obd_dev_lock); - } - read_unlock(&obd_dev_lock); - return rc; -} -EXPORT_SYMBOL(class_notify_sptlrpc_conf); - -void obd_cleanup_caches(void) -{ - kmem_cache_destroy(obd_device_cachep); - obd_device_cachep = NULL; - kmem_cache_destroy(obdo_cachep); - obdo_cachep = NULL; - kmem_cache_destroy(import_cachep); - import_cachep = NULL; -} - -int obd_init_caches(void) -{ - LASSERT(!obd_device_cachep); - obd_device_cachep = kmem_cache_create("ll_obd_dev_cache", - sizeof(struct obd_device), - 0, 0, NULL); - if (!obd_device_cachep) - goto out; - - LASSERT(!obdo_cachep); - obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo), - 0, 0, NULL); - if (!obdo_cachep) - goto out; - - LASSERT(!import_cachep); - import_cachep = kmem_cache_create("ll_import_cache", - sizeof(struct obd_import), - 0, 0, NULL); - if (!import_cachep) - goto out; - - return 0; - out: - obd_cleanup_caches(); - return -ENOMEM; -} - -/* map connection to client */ -struct obd_export *class_conn2export(struct lustre_handle *conn) -{ - struct obd_export *export; - - if (!conn) { - CDEBUG(D_CACHE, "looking for null handle\n"); - return NULL; - } - - if (conn->cookie == -1) { /* this means assign a new connection */ - CDEBUG(D_CACHE, "want a new connection\n"); - return NULL; - } - - CDEBUG(D_INFO, "looking for export cookie %#llx\n", conn->cookie); - export = class_handle2object(conn->cookie, NULL); - return export; -} -EXPORT_SYMBOL(class_conn2export); - -struct obd_device *class_exp2obd(struct obd_export *exp) -{ - if (exp) - return exp->exp_obd; - return NULL; -} -EXPORT_SYMBOL(class_exp2obd); - -struct obd_import *class_exp2cliimp(struct obd_export *exp) -{ - struct obd_device *obd = exp->exp_obd; - - if (!obd) - return NULL; - return obd->u.cli.cl_import; -} -EXPORT_SYMBOL(class_exp2cliimp); - -/* Export management functions */ -static void class_export_destroy(struct obd_export *exp) -{ - struct obd_device *obd = exp->exp_obd; - - LASSERT_ATOMIC_ZERO(&exp->exp_refcount); - LASSERT(obd); - - CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp, - exp->exp_client_uuid.uuid, obd->obd_name); - - /* "Local" exports (lctl, LOV->{mdc,osc}) have no connection. */ - if (exp->exp_connection) - ptlrpc_put_connection_superhack(exp->exp_connection); - - LASSERT(list_empty(&exp->exp_outstanding_replies)); - LASSERT(list_empty(&exp->exp_uncommitted_replies)); - LASSERT(list_empty(&exp->exp_req_replay_queue)); - LASSERT(list_empty(&exp->exp_hp_rpcs)); - obd_destroy_export(exp); - class_decref(obd, "export", exp); - - OBD_FREE_RCU(exp, sizeof(*exp), &exp->exp_handle); -} - -static void export_handle_addref(void *export) -{ - class_export_get(export); -} - -static struct portals_handle_ops export_handle_ops = { - .hop_addref = export_handle_addref, - .hop_free = NULL, -}; - -struct obd_export *class_export_get(struct obd_export *exp) -{ - atomic_inc(&exp->exp_refcount); - CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp, - atomic_read(&exp->exp_refcount)); - return exp; -} -EXPORT_SYMBOL(class_export_get); - -void class_export_put(struct obd_export *exp) -{ - LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON); - CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp, - atomic_read(&exp->exp_refcount) - 1); - - if (atomic_dec_and_test(&exp->exp_refcount)) { - LASSERT(!list_empty(&exp->exp_obd_chain)); - CDEBUG(D_IOCTL, "final put %p/%s\n", - exp, exp->exp_client_uuid.uuid); - - /* release nid stat refererence */ - lprocfs_exp_cleanup(exp); - - obd_zombie_export_add(exp); - } -} -EXPORT_SYMBOL(class_export_put); - -static void obd_zombie_exp_cull(struct work_struct *ws) -{ - struct obd_export *export = container_of(ws, struct obd_export, exp_zombie_work); - - class_export_destroy(export); -} - -/* Creates a new export, adds it to the hash table, and returns a - * pointer to it. The refcount is 2: one for the hash reference, and - * one for the pointer returned by this function. - */ -struct obd_export *class_new_export(struct obd_device *obd, - struct obd_uuid *cluuid) -{ - struct obd_export *export; - int rc = 0; - - export = kzalloc(sizeof(*export), GFP_NOFS); - if (!export) - return ERR_PTR(-ENOMEM); - - export->exp_conn_cnt = 0; - atomic_set(&export->exp_refcount, 2); - atomic_set(&export->exp_rpc_count, 0); - atomic_set(&export->exp_cb_count, 0); - atomic_set(&export->exp_locks_count, 0); -#if LUSTRE_TRACKS_LOCK_EXP_REFS - INIT_LIST_HEAD(&export->exp_locks_list); - spin_lock_init(&export->exp_locks_list_guard); -#endif - atomic_set(&export->exp_replay_count, 0); - export->exp_obd = obd; - INIT_LIST_HEAD(&export->exp_outstanding_replies); - spin_lock_init(&export->exp_uncommitted_replies_lock); - INIT_LIST_HEAD(&export->exp_uncommitted_replies); - INIT_LIST_HEAD(&export->exp_req_replay_queue); - INIT_LIST_HEAD(&export->exp_handle.h_link); - INIT_LIST_HEAD(&export->exp_hp_rpcs); - class_handle_hash(&export->exp_handle, &export_handle_ops); - spin_lock_init(&export->exp_lock); - spin_lock_init(&export->exp_rpc_lock); - spin_lock_init(&export->exp_bl_list_lock); - INIT_LIST_HEAD(&export->exp_bl_list); - INIT_WORK(&export->exp_zombie_work, obd_zombie_exp_cull); - - export->exp_sp_peer = LUSTRE_SP_ANY; - export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID; - export->exp_client_uuid = *cluuid; - obd_init_export(export); - - spin_lock(&obd->obd_dev_lock); - /* shouldn't happen, but might race */ - if (obd->obd_stopping) { - rc = -ENODEV; - goto exit_unlock; - } - - if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) { - rc = obd_uuid_add(obd, export); - if (rc) { - LCONSOLE_WARN("%s: denying duplicate export for %s, %d\n", - obd->obd_name, cluuid->uuid, rc); - goto exit_unlock; - } - } - - class_incref(obd, "export", export); - list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports); - export->exp_obd->obd_num_exports++; - spin_unlock(&obd->obd_dev_lock); - return export; - -exit_unlock: - spin_unlock(&obd->obd_dev_lock); - class_handle_unhash(&export->exp_handle); - obd_destroy_export(export); - kfree(export); - return ERR_PTR(rc); -} -EXPORT_SYMBOL(class_new_export); - -void class_unlink_export(struct obd_export *exp) -{ - class_handle_unhash(&exp->exp_handle); - - spin_lock(&exp->exp_obd->obd_dev_lock); - /* delete an uuid-export hashitem from hashtables */ - if (exp != exp->exp_obd->obd_self_export) - obd_uuid_del(exp->exp_obd, exp); - - list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports); - exp->exp_obd->obd_num_exports--; - spin_unlock(&exp->exp_obd->obd_dev_lock); - class_export_put(exp); -} - -/* Import management functions */ -static void class_import_destroy(struct obd_import *imp) -{ - CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp, - imp->imp_obd->obd_name); - - LASSERT_ATOMIC_ZERO(&imp->imp_refcount); - - ptlrpc_put_connection_superhack(imp->imp_connection); - - while (!list_empty(&imp->imp_conn_list)) { - struct obd_import_conn *imp_conn; - - imp_conn = list_entry(imp->imp_conn_list.next, - struct obd_import_conn, oic_item); - list_del_init(&imp_conn->oic_item); - ptlrpc_put_connection_superhack(imp_conn->oic_conn); - kfree(imp_conn); - } - - LASSERT(!imp->imp_sec); - class_decref(imp->imp_obd, "import", imp); - OBD_FREE_RCU(imp, sizeof(*imp), &imp->imp_handle); -} - -static void import_handle_addref(void *import) -{ - class_import_get(import); -} - -static struct portals_handle_ops import_handle_ops = { - .hop_addref = import_handle_addref, - .hop_free = NULL, -}; - -struct obd_import *class_import_get(struct obd_import *import) -{ - atomic_inc(&import->imp_refcount); - CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import, - atomic_read(&import->imp_refcount), - import->imp_obd->obd_name); - return import; -} -EXPORT_SYMBOL(class_import_get); - -void class_import_put(struct obd_import *imp) -{ - LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON); - - CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp, - atomic_read(&imp->imp_refcount) - 1, - imp->imp_obd->obd_name); - - if (atomic_dec_and_test(&imp->imp_refcount)) { - CDEBUG(D_INFO, "final put import %p\n", imp); - obd_zombie_import_add(imp); - } - - /* catch possible import put race */ - LASSERT_ATOMIC_GE_LT(&imp->imp_refcount, 0, LI_POISON); -} -EXPORT_SYMBOL(class_import_put); - -static void init_imp_at(struct imp_at *at) -{ - int i; - - at_init(&at->iat_net_latency, 0, 0); - for (i = 0; i < IMP_AT_MAX_PORTALS; i++) { - /* max service estimates are tracked on the server side, so - * don't use the AT history here, just use the last reported - * val. (But keep hist for proc histogram, worst_ever) - */ - at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT, - AT_FLG_NOHIST); - } -} - -static void obd_zombie_imp_cull(struct work_struct *ws) -{ - struct obd_import *import = container_of(ws, struct obd_import, imp_zombie_work); - - class_import_destroy(import); -} - -struct obd_import *class_new_import(struct obd_device *obd) -{ - struct obd_import *imp; - - imp = kzalloc(sizeof(*imp), GFP_NOFS); - if (!imp) - return NULL; - - INIT_LIST_HEAD(&imp->imp_pinger_chain); - INIT_LIST_HEAD(&imp->imp_replay_list); - INIT_LIST_HEAD(&imp->imp_sending_list); - INIT_LIST_HEAD(&imp->imp_delayed_list); - INIT_LIST_HEAD(&imp->imp_committed_list); - INIT_LIST_HEAD(&imp->imp_unreplied_list); - imp->imp_known_replied_xid = 0; - imp->imp_replay_cursor = &imp->imp_committed_list; - spin_lock_init(&imp->imp_lock); - imp->imp_last_success_conn = 0; - imp->imp_state = LUSTRE_IMP_NEW; - imp->imp_obd = class_incref(obd, "import", imp); - mutex_init(&imp->imp_sec_mutex); - init_waitqueue_head(&imp->imp_recovery_waitq); - INIT_WORK(&imp->imp_zombie_work, obd_zombie_imp_cull); - - atomic_set(&imp->imp_refcount, 2); - atomic_set(&imp->imp_unregistering, 0); - atomic_set(&imp->imp_inflight, 0); - atomic_set(&imp->imp_replay_inflight, 0); - atomic_set(&imp->imp_inval_count, 0); - INIT_LIST_HEAD(&imp->imp_conn_list); - INIT_LIST_HEAD(&imp->imp_handle.h_link); - class_handle_hash(&imp->imp_handle, &import_handle_ops); - init_imp_at(&imp->imp_at); - - /* the default magic is V2, will be used in connect RPC, and - * then adjusted according to the flags in request/reply. - */ - imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2; - - return imp; -} -EXPORT_SYMBOL(class_new_import); - -void class_destroy_import(struct obd_import *import) -{ - LASSERT(import); - LASSERT(import != LP_POISON); - - class_handle_unhash(&import->imp_handle); - - spin_lock(&import->imp_lock); - import->imp_generation++; - spin_unlock(&import->imp_lock); - class_import_put(import); -} -EXPORT_SYMBOL(class_destroy_import); - -#if LUSTRE_TRACKS_LOCK_EXP_REFS - -void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) -{ - spin_lock(&exp->exp_locks_list_guard); - - LASSERT(lock->l_exp_refs_nr >= 0); - - if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) { - LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n", - exp, lock, lock->l_exp_refs_target); - } - if ((lock->l_exp_refs_nr++) == 0) { - list_add(&lock->l_exp_refs_link, &exp->exp_locks_list); - lock->l_exp_refs_target = exp; - } - CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n", - lock, exp, lock->l_exp_refs_nr); - spin_unlock(&exp->exp_locks_list_guard); -} - -void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) -{ - spin_lock(&exp->exp_locks_list_guard); - LASSERT(lock->l_exp_refs_nr > 0); - if (lock->l_exp_refs_target != exp) { - LCONSOLE_WARN("lock %p, mismatching export pointers: %p, %p\n", - lock, lock->l_exp_refs_target, exp); - } - if (-- lock->l_exp_refs_nr == 0) { - list_del_init(&lock->l_exp_refs_link); - lock->l_exp_refs_target = NULL; - } - CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n", - lock, exp, lock->l_exp_refs_nr); - spin_unlock(&exp->exp_locks_list_guard); -} -#endif - -/* A connection defines an export context in which preallocation can - * be managed. This releases the export pointer reference, and returns - * the export handle, so the export refcount is 1 when this function - * returns. - */ -int class_connect(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid) -{ - struct obd_export *export; - - LASSERT(conn); - LASSERT(obd); - LASSERT(cluuid); - - export = class_new_export(obd, cluuid); - if (IS_ERR(export)) - return PTR_ERR(export); - - conn->cookie = export->exp_handle.h_cookie; - class_export_put(export); - - CDEBUG(D_IOCTL, "connect: client %s, cookie %#llx\n", - cluuid->uuid, conn->cookie); - return 0; -} -EXPORT_SYMBOL(class_connect); - -/* This function removes 1-3 references from the export: - * 1 - for export pointer passed - * and if disconnect really need - * 2 - removing from hash - * 3 - in client_unlink_export - * The export pointer passed to this function can destroyed - */ -int class_disconnect(struct obd_export *export) -{ - int already_disconnected; - - if (!export) { - CWARN("attempting to free NULL export %p\n", export); - return -EINVAL; - } - - spin_lock(&export->exp_lock); - already_disconnected = export->exp_disconnected; - export->exp_disconnected = 1; - spin_unlock(&export->exp_lock); - - /* class_cleanup(), abort_recovery(), and class_fail_export() - * all end up in here, and if any of them race we shouldn't - * call extra class_export_puts(). - */ - if (already_disconnected) - goto no_disconn; - - CDEBUG(D_IOCTL, "disconnect: cookie %#llx\n", - export->exp_handle.h_cookie); - - class_unlink_export(export); -no_disconn: - class_export_put(export); - return 0; -} -EXPORT_SYMBOL(class_disconnect); - -void class_fail_export(struct obd_export *exp) -{ - int rc, already_failed; - - spin_lock(&exp->exp_lock); - already_failed = exp->exp_failed; - exp->exp_failed = 1; - spin_unlock(&exp->exp_lock); - - if (already_failed) { - CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n", - exp, exp->exp_client_uuid.uuid); - return; - } - - CDEBUG(D_HA, "disconnecting export %p/%s\n", - exp, exp->exp_client_uuid.uuid); - - if (obd_dump_on_timeout) - libcfs_debug_dumplog(); - - /* need for safe call CDEBUG after obd_disconnect */ - class_export_get(exp); - - /* Most callers into obd_disconnect are removing their own reference - * (request, for example) in addition to the one from the hash table. - * We don't have such a reference here, so make one. - */ - class_export_get(exp); - rc = obd_disconnect(exp); - if (rc) - CERROR("disconnecting export %p failed: %d\n", exp, rc); - else - CDEBUG(D_HA, "disconnected export %p/%s\n", - exp, exp->exp_client_uuid.uuid); - class_export_put(exp); -} -EXPORT_SYMBOL(class_fail_export); - -#if LUSTRE_TRACKS_LOCK_EXP_REFS -void (*class_export_dump_hook)(struct obd_export *) = NULL; -#endif - -/** - * Add export to the obd_zombie thread and notify it. - */ -static void obd_zombie_export_add(struct obd_export *exp) -{ - spin_lock(&exp->exp_obd->obd_dev_lock); - LASSERT(!list_empty(&exp->exp_obd_chain)); - list_del_init(&exp->exp_obd_chain); - spin_unlock(&exp->exp_obd->obd_dev_lock); - queue_work(zombie_wq, &exp->exp_zombie_work); -} - -/** - * Add import to the obd_zombie thread and notify it. - */ -static void obd_zombie_import_add(struct obd_import *imp) -{ - LASSERT(!imp->imp_sec); - queue_work(zombie_wq, &imp->imp_zombie_work); -} - -/** - * wait when obd_zombie import/export queues become empty - */ -void obd_zombie_barrier(void) -{ - flush_workqueue(zombie_wq); -} -EXPORT_SYMBOL(obd_zombie_barrier); - -/** - * start destroy zombie import/export thread - */ -int obd_zombie_impexp_init(void) -{ - zombie_wq = alloc_workqueue("obd_zombid", 0, 0); - if (!zombie_wq) - return -ENOMEM; - - return 0; -} - -/** - * stop destroy zombie import/export thread - */ -void obd_zombie_impexp_stop(void) -{ - destroy_workqueue(zombie_wq); -} - -struct obd_request_slot_waiter { - struct list_head orsw_entry; - wait_queue_head_t orsw_waitq; - bool orsw_signaled; -}; - -static bool obd_request_slot_avail(struct client_obd *cli, - struct obd_request_slot_waiter *orsw) -{ - bool avail; - - spin_lock(&cli->cl_loi_list_lock); - avail = !!list_empty(&orsw->orsw_entry); - spin_unlock(&cli->cl_loi_list_lock); - - return avail; -}; - -/* - * For network flow control, the RPC sponsor needs to acquire a credit - * before sending the RPC. The credits count for a connection is defined - * by the "cl_max_rpcs_in_flight". If all the credits are occpuied, then - * the subsequent RPC sponsors need to wait until others released their - * credits, or the administrator increased the "cl_max_rpcs_in_flight". - */ -int obd_get_request_slot(struct client_obd *cli) -{ - struct obd_request_slot_waiter orsw; - int rc; - - spin_lock(&cli->cl_loi_list_lock); - if (cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight) { - cli->cl_r_in_flight++; - spin_unlock(&cli->cl_loi_list_lock); - return 0; - } - - init_waitqueue_head(&orsw.orsw_waitq); - list_add_tail(&orsw.orsw_entry, &cli->cl_loi_read_list); - orsw.orsw_signaled = false; - spin_unlock(&cli->cl_loi_list_lock); - - rc = l_wait_event_abortable(orsw.orsw_waitq, - obd_request_slot_avail(cli, &orsw) || - orsw.orsw_signaled); - - /* - * Here, we must take the lock to avoid the on-stack 'orsw' to be - * freed but other (such as obd_put_request_slot) is using it. - */ - spin_lock(&cli->cl_loi_list_lock); - if (rc) { - if (!orsw.orsw_signaled) { - if (list_empty(&orsw.orsw_entry)) - cli->cl_r_in_flight--; - else - list_del(&orsw.orsw_entry); - } - } - - if (orsw.orsw_signaled) { - LASSERT(list_empty(&orsw.orsw_entry)); - - rc = -EINTR; - } - spin_unlock(&cli->cl_loi_list_lock); - - return rc; -} -EXPORT_SYMBOL(obd_get_request_slot); - -void obd_put_request_slot(struct client_obd *cli) -{ - struct obd_request_slot_waiter *orsw; - - spin_lock(&cli->cl_loi_list_lock); - cli->cl_r_in_flight--; - - /* If there is free slot, wakeup the first waiter. */ - if (!list_empty(&cli->cl_loi_read_list) && - likely(cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight)) { - orsw = list_entry(cli->cl_loi_read_list.next, - struct obd_request_slot_waiter, orsw_entry); - list_del_init(&orsw->orsw_entry); - cli->cl_r_in_flight++; - wake_up(&orsw->orsw_waitq); - } - spin_unlock(&cli->cl_loi_list_lock); -} -EXPORT_SYMBOL(obd_put_request_slot); - -__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli) -{ - return cli->cl_max_rpcs_in_flight; -} -EXPORT_SYMBOL(obd_get_max_rpcs_in_flight); - -int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max) -{ - struct obd_request_slot_waiter *orsw; - const char *typ_name; - __u32 old; - int diff; - int rc; - int i; - - if (max > OBD_MAX_RIF_MAX || max < 1) - return -ERANGE; - - typ_name = cli->cl_import->imp_obd->obd_type->typ_name; - if (!strcmp(typ_name, LUSTRE_MDC_NAME)) { - /* - * adjust max_mod_rpcs_in_flight to ensure it is always - * strictly lower that max_rpcs_in_flight - */ - if (max < 2) { - CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n", - cli->cl_import->imp_obd->obd_name); - return -ERANGE; - } - if (max <= cli->cl_max_mod_rpcs_in_flight) { - rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1); - if (rc) - return rc; - } - } - - spin_lock(&cli->cl_loi_list_lock); - old = cli->cl_max_rpcs_in_flight; - cli->cl_max_rpcs_in_flight = max; - diff = max - old; - - /* We increase the max_rpcs_in_flight, then wakeup some waiters. */ - for (i = 0; i < diff; i++) { - if (list_empty(&cli->cl_loi_read_list)) - break; - - orsw = list_entry(cli->cl_loi_read_list.next, - struct obd_request_slot_waiter, orsw_entry); - list_del_init(&orsw->orsw_entry); - cli->cl_r_in_flight++; - wake_up(&orsw->orsw_waitq); - } - spin_unlock(&cli->cl_loi_list_lock); - - return 0; -} -EXPORT_SYMBOL(obd_set_max_rpcs_in_flight); - -int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max) -{ - struct obd_connect_data *ocd; - u16 maxmodrpcs; - u16 prev; - - if (max > OBD_MAX_RIF_MAX || max < 1) - return -ERANGE; - - /* cannot exceed or equal max_rpcs_in_flight */ - if (max >= cli->cl_max_rpcs_in_flight) { - CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n", - cli->cl_import->imp_obd->obd_name, - max, cli->cl_max_rpcs_in_flight); - return -ERANGE; - } - - /* cannot exceed max modify RPCs in flight supported by the server */ - ocd = &cli->cl_import->imp_connect_data; - if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) - maxmodrpcs = ocd->ocd_maxmodrpcs; - else - maxmodrpcs = 1; - if (max > maxmodrpcs) { - CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n", - cli->cl_import->imp_obd->obd_name, - max, maxmodrpcs); - return -ERANGE; - } - - spin_lock(&cli->cl_mod_rpcs_lock); - - prev = cli->cl_max_mod_rpcs_in_flight; - cli->cl_max_mod_rpcs_in_flight = max; - - /* wakeup waiters if limit has been increased */ - if (cli->cl_max_mod_rpcs_in_flight > prev) - wake_up(&cli->cl_mod_rpcs_waitq); - - spin_unlock(&cli->cl_mod_rpcs_lock); - - return 0; -} -EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight); - -#define pct(a, b) (b ? (a * 100) / b : 0) - -int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq) -{ - unsigned long mod_tot = 0, mod_cum; - struct timespec64 now; - int i; - - ktime_get_real_ts64(&now); - - spin_lock(&cli->cl_mod_rpcs_lock); - - seq_printf(seq, "snapshot_time: %llu.%9lu (secs.nsecs)\n", - (s64)now.tv_sec, (unsigned long)now.tv_nsec); - seq_printf(seq, "modify_RPCs_in_flight: %hu\n", - cli->cl_mod_rpcs_in_flight); - - seq_puts(seq, "\n\t\t\tmodify\n"); - seq_puts(seq, "rpcs in flight rpcs %% cum %%\n"); - - mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist); - - mod_cum = 0; - for (i = 0; i < OBD_HIST_MAX; i++) { - unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i]; - - mod_cum += mod; - seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n", - i, mod, pct(mod, mod_tot), - pct(mod_cum, mod_tot)); - if (mod_cum == mod_tot) - break; - } - - spin_unlock(&cli->cl_mod_rpcs_lock); - - return 0; -} -EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show); -#undef pct - -/* - * The number of modify RPCs sent in parallel is limited - * because the server has a finite number of slots per client to - * store request result and ensure reply reconstruction when needed. - * On the client, this limit is stored in cl_max_mod_rpcs_in_flight - * that takes into account server limit and cl_max_rpcs_in_flight - * value. - * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462), - * one close request is allowed above the maximum. - */ -static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli, - bool close_req) -{ - bool avail; - - /* A slot is available if - * - number of modify RPCs in flight is less than the max - * - it's a close RPC and no other close request is in flight - */ - avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight || - (close_req && !cli->cl_close_rpcs_in_flight); - - return avail; -} - -static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli, - bool close_req) -{ - bool avail; - - spin_lock(&cli->cl_mod_rpcs_lock); - avail = obd_mod_rpc_slot_avail_locked(cli, close_req); - spin_unlock(&cli->cl_mod_rpcs_lock); - return avail; -} - -/* Get a modify RPC slot from the obd client @cli according - * to the kind of operation @opc that is going to be sent - * and the intent @it of the operation if it applies. - * If the maximum number of modify RPCs in flight is reached - * the thread is put to sleep. - * Returns the tag to be set in the request message. Tag 0 - * is reserved for non-modifying requests. - */ -u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc, - struct lookup_intent *it) -{ - bool close_req = false; - u16 i, max; - - /* read-only metadata RPCs don't consume a slot on MDT - * for reply reconstruction - */ - if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT || it->it_op == IT_READDIR)) - return 0; - - if (opc == MDS_CLOSE) - close_req = true; - - do { - spin_lock(&cli->cl_mod_rpcs_lock); - max = cli->cl_max_mod_rpcs_in_flight; - if (obd_mod_rpc_slot_avail_locked(cli, close_req)) { - /* there is a slot available */ - cli->cl_mod_rpcs_in_flight++; - if (close_req) - cli->cl_close_rpcs_in_flight++; - lprocfs_oh_tally(&cli->cl_mod_rpcs_hist, - cli->cl_mod_rpcs_in_flight); - /* find a free tag */ - i = find_first_zero_bit(cli->cl_mod_tag_bitmap, - max + 1); - LASSERT(i < OBD_MAX_RIF_MAX); - LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap)); - spin_unlock(&cli->cl_mod_rpcs_lock); - /* tag 0 is reserved for non-modify RPCs */ - return i + 1; - } - spin_unlock(&cli->cl_mod_rpcs_lock); - - CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n", - cli->cl_import->imp_obd->obd_name, opc, max); - - wait_event_idle(cli->cl_mod_rpcs_waitq, - obd_mod_rpc_slot_avail(cli, close_req)); - } while (true); -} -EXPORT_SYMBOL(obd_get_mod_rpc_slot); - -/* - * Put a modify RPC slot from the obd client @cli according - * to the kind of operation @opc that has been sent and the - * intent @it of the operation if it applies. - */ -void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc, - struct lookup_intent *it, u16 tag) -{ - bool close_req = false; - - if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT || it->it_op == IT_READDIR)) - return; - - if (opc == MDS_CLOSE) - close_req = true; - - spin_lock(&cli->cl_mod_rpcs_lock); - cli->cl_mod_rpcs_in_flight--; - if (close_req) - cli->cl_close_rpcs_in_flight--; - /* release the tag in the bitmap */ - LASSERT(tag - 1 < OBD_MAX_RIF_MAX); - LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0); - spin_unlock(&cli->cl_mod_rpcs_lock); - wake_up(&cli->cl_mod_rpcs_waitq); -} -EXPORT_SYMBOL(obd_put_mod_rpc_slot); diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c deleted file mode 100644 index 63067a7f1e19..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c +++ /dev/null @@ -1,240 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: Nathan Rutman - * - * Kernel <-> userspace communication routines. - * Using pipes for all arches. - */ - -#define DEBUG_SUBSYSTEM S_CLASS -#define D_KUC D_OTHER - -#include -#include -#include -#include - -/** - * libcfs_kkuc_msg_put - send an message from kernel to userspace - * @param fp to send the message to - * @param payload Payload data. First field of payload is always - * struct kuc_hdr - */ -int libcfs_kkuc_msg_put(struct file *filp, void *payload) -{ - struct kuc_hdr *kuch = (struct kuc_hdr *)payload; - ssize_t count = kuch->kuc_msglen; - loff_t offset = 0; - int rc = -ENXIO; - - if (IS_ERR_OR_NULL(filp)) - return -EBADF; - - if (kuch->kuc_magic != KUC_MAGIC) { - CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic); - return rc; - } - - while (count > 0) { - rc = kernel_write(filp, payload, count, &offset); - if (rc < 0) - break; - count -= rc; - payload += rc; - rc = 0; - } - - if (rc < 0) - CWARN("message send failed (%d)\n", rc); - else - CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp); - - return rc; -} -EXPORT_SYMBOL(libcfs_kkuc_msg_put); - -/* - * Broadcast groups are global across all mounted filesystems; - * i.e. registering for a group on 1 fs will get messages for that - * group from any fs - */ -/** A single group registration has a uid and a file pointer */ -struct kkuc_reg { - struct list_head kr_chain; - int kr_uid; - struct file *kr_fp; - char kr_data[0]; -}; - -static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {}; -/* Protect message sending against remove and adds */ -static DECLARE_RWSEM(kg_sem); - -/** Add a receiver to a broadcast group - * @param filp pipe to write into - * @param uid identifier for this receiver - * @param group group number - * @param data user data - */ -int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, - void *data, size_t data_len) -{ - struct kkuc_reg *reg; - - if (group > KUC_GRP_MAX) { - CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); - return -EINVAL; - } - - /* fput in group_rem */ - if (!filp) - return -EBADF; - - /* freed in group_rem */ - reg = kmalloc(sizeof(*reg) + data_len, 0); - if (!reg) - return -ENOMEM; - - reg->kr_fp = filp; - reg->kr_uid = uid; - memcpy(reg->kr_data, data, data_len); - - down_write(&kg_sem); - if (!kkuc_groups[group].next) - INIT_LIST_HEAD(&kkuc_groups[group]); - list_add(®->kr_chain, &kkuc_groups[group]); - up_write(&kg_sem); - - CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group); - - return 0; -} -EXPORT_SYMBOL(libcfs_kkuc_group_add); - -int libcfs_kkuc_group_rem(int uid, unsigned int group) -{ - struct kkuc_reg *reg, *next; - - if (!kkuc_groups[group].next) - return 0; - - if (!uid) { - /* Broadcast a shutdown message */ - struct kuc_hdr lh; - - lh.kuc_magic = KUC_MAGIC; - lh.kuc_transport = KUC_TRANSPORT_GENERIC; - lh.kuc_msgtype = KUC_MSG_SHUTDOWN; - lh.kuc_msglen = sizeof(lh); - libcfs_kkuc_group_put(group, &lh); - } - - down_write(&kg_sem); - list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { - if (!uid || (uid == reg->kr_uid)) { - list_del(®->kr_chain); - CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", - reg->kr_uid, reg->kr_fp, group); - if (reg->kr_fp) - fput(reg->kr_fp); - kfree(reg); - } - } - up_write(&kg_sem); - - return 0; -} -EXPORT_SYMBOL(libcfs_kkuc_group_rem); - -int libcfs_kkuc_group_put(unsigned int group, void *payload) -{ - struct kkuc_reg *reg; - int rc = 0; - int one_success = 0; - - down_write(&kg_sem); - list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp) { - rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); - if (!rc) { - one_success = 1; - } else if (rc == -EPIPE) { - fput(reg->kr_fp); - reg->kr_fp = NULL; - } - } - } - up_write(&kg_sem); - - /* - * don't return an error if the message has been delivered - * at least to one agent - */ - if (one_success) - rc = 0; - - return rc; -} -EXPORT_SYMBOL(libcfs_kkuc_group_put); - -/** - * Calls a callback function for each link of the given kuc group. - * @param group the group to call the function on. - * @param cb_func the function to be called. - * @param cb_arg extra argument to be passed to the callback function. - */ -int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, - void *cb_arg) -{ - struct kkuc_reg *reg; - int rc = 0; - - if (group > KUC_GRP_MAX) { - CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); - return -EINVAL; - } - - /* no link for this group */ - if (!kkuc_groups[group].next) - return 0; - - down_read(&kg_sem); - list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp) - rc = cb_func(reg->kr_data, cb_arg); - } - up_read(&kg_sem); - - return rc; -} -EXPORT_SYMBOL(libcfs_kkuc_group_foreach); diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c deleted file mode 100644 index 74c99ee216bb..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/linkea.c +++ /dev/null @@ -1,249 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2013, 2014, Intel Corporation. - * Use is subject to license terms. - * - * Author: Di Wang - */ - -#include -#include -#include - -int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf) -{ - buf->lb_buf = kzalloc(PAGE_SIZE, GFP_NOFS); - if (!buf->lb_buf) - return -ENOMEM; - buf->lb_len = PAGE_SIZE; - ldata->ld_buf = buf; - ldata->ld_leh = ldata->ld_buf->lb_buf; - ldata->ld_leh->leh_magic = LINK_EA_MAGIC; - ldata->ld_leh->leh_len = sizeof(struct link_ea_header); - ldata->ld_leh->leh_reccount = 0; - ldata->ld_leh->leh_overflow_time = 0; - ldata->ld_leh->leh_padding = 0; - return 0; -} -EXPORT_SYMBOL(linkea_data_new); - -int linkea_init(struct linkea_data *ldata) -{ - struct link_ea_header *leh; - - LASSERT(ldata->ld_buf); - leh = ldata->ld_buf->lb_buf; - if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) { - leh->leh_magic = LINK_EA_MAGIC; - leh->leh_reccount = __swab32(leh->leh_reccount); - leh->leh_len = __swab64(leh->leh_len); - leh->leh_overflow_time = __swab32(leh->leh_overflow_time); - leh->leh_padding = __swab32(leh->leh_padding); - /* individual entries are swabbed by linkea_entry_unpack() */ - } - - if (leh->leh_magic != LINK_EA_MAGIC) - return -EINVAL; - - if (leh->leh_reccount == 0 && leh->leh_overflow_time == 0) - return -ENODATA; - - ldata->ld_leh = leh; - return 0; -} -EXPORT_SYMBOL(linkea_init); - -int linkea_init_with_rec(struct linkea_data *ldata) -{ - int rc; - - rc = linkea_init(ldata); - if (!rc && ldata->ld_leh->leh_reccount == 0) - rc = -ENODATA; - - return rc; -} -EXPORT_SYMBOL(linkea_init_with_rec); - -/** - * Pack a link_ea_entry. - * All elements are stored as chars to avoid alignment issues. - * Numbers are always big-endian - * \retval record length - */ -int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname, - const struct lu_fid *pfid) -{ - struct lu_fid tmpfid; - int reclen; - - tmpfid = *pfid; - if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_CRASH)) - tmpfid.f_ver = ~0; - fid_cpu_to_be(&tmpfid, &tmpfid); - memcpy(&lee->lee_parent_fid, &tmpfid, sizeof(tmpfid)); - memcpy(lee->lee_name, lname->ln_name, lname->ln_namelen); - reclen = sizeof(struct link_ea_entry) + lname->ln_namelen; - - lee->lee_reclen[0] = (reclen >> 8) & 0xff; - lee->lee_reclen[1] = reclen & 0xff; - return reclen; -} -EXPORT_SYMBOL(linkea_entry_pack); - -void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen, - struct lu_name *lname, struct lu_fid *pfid) -{ - LASSERT(lee); - - *reclen = (lee->lee_reclen[0] << 8) | lee->lee_reclen[1]; - memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid)); - fid_be_to_cpu(pfid, pfid); - if (lname) { - lname->ln_name = lee->lee_name; - lname->ln_namelen = *reclen - sizeof(struct link_ea_entry); - } -} -EXPORT_SYMBOL(linkea_entry_unpack); - -/** - * Add a record to the end of link ea buf - **/ -int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname, - const struct lu_fid *pfid) -{ - struct link_ea_header *leh = ldata->ld_leh; - int reclen; - - LASSERT(leh); - - if (!lname || !pfid) - return -EINVAL; - - reclen = lname->ln_namelen + sizeof(struct link_ea_entry); - if (unlikely(leh->leh_len + reclen > MAX_LINKEA_SIZE)) { - /* - * Use 32-bits to save the overflow time, although it will - * shrink the ktime_get_real_seconds() returned 64-bits value - * to 32-bits value, it is still quite large and can be used - * for about 140 years. That is enough. - */ - leh->leh_overflow_time = ktime_get_real_seconds(); - if (unlikely(leh->leh_overflow_time == 0)) - leh->leh_overflow_time++; - - CDEBUG(D_INODE, "No enough space to hold linkea entry '" DFID ": %.*s' at %u\n", - PFID(pfid), lname->ln_namelen, - lname->ln_name, leh->leh_overflow_time); - return 0; - } - - if (leh->leh_len + reclen > ldata->ld_buf->lb_len) { - /* Note: this never happens as MAX_LINKEA_SIZE is 4096, while - * the initial allocation is PAGE_SIZE. - */ - void *b = krealloc(ldata->ld_buf->lb_buf, leh->leh_len + reclen, GFP_NOFS); - if (!b) - return -ENOMEM; - - ldata->ld_buf->lb_len = leh->leh_len + reclen; - leh = ldata->ld_leh = ldata->ld_buf->lb_buf = b; - } - - ldata->ld_lee = ldata->ld_buf->lb_buf + leh->leh_len; - ldata->ld_reclen = linkea_entry_pack(ldata->ld_lee, lname, pfid); - leh->leh_len += ldata->ld_reclen; - leh->leh_reccount++; - CDEBUG(D_INODE, "New link_ea name '" DFID ":%.*s' is added\n", - PFID(pfid), lname->ln_namelen, lname->ln_name); - return 0; -} -EXPORT_SYMBOL(linkea_add_buf); - -/** Del the current record from the link ea buf */ -void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname) -{ - LASSERT(ldata->ld_leh && ldata->ld_lee); - LASSERT(ldata->ld_leh->leh_reccount > 0); - - ldata->ld_leh->leh_reccount--; - ldata->ld_leh->leh_len -= ldata->ld_reclen; - memmove(ldata->ld_lee, (char *)ldata->ld_lee + ldata->ld_reclen, - (char *)ldata->ld_leh + ldata->ld_leh->leh_len - - (char *)ldata->ld_lee); - CDEBUG(D_INODE, "Old link_ea name '%.*s' is removed\n", - lname->ln_namelen, lname->ln_name); - - if ((char *)ldata->ld_lee >= ((char *)ldata->ld_leh + - ldata->ld_leh->leh_len)) - ldata->ld_lee = NULL; -} -EXPORT_SYMBOL(linkea_del_buf); - -/** - * Check if such a link exists in linkEA. - * - * \param ldata link data the search to be done on - * \param lname name in the parent's directory entry pointing to this object - * \param pfid parent fid the link to be found for - * - * \retval 0 success - * \retval -ENOENT link does not exist - * \retval -ve on error - */ -int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname, - const struct lu_fid *pfid) -{ - struct lu_name tmpname; - struct lu_fid tmpfid; - int count; - - LASSERT(ldata->ld_leh); - - /* link #0, if leh_reccount == 0 we skip the loop and return -ENOENT */ - if (likely(ldata->ld_leh->leh_reccount > 0)) - ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1); - - for (count = 0; count < ldata->ld_leh->leh_reccount; count++) { - linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, - &tmpname, &tmpfid); - if (tmpname.ln_namelen == lname->ln_namelen && - lu_fid_eq(&tmpfid, pfid) && - (strncmp(tmpname.ln_name, lname->ln_name, - tmpname.ln_namelen) == 0)) - break; - ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee + - ldata->ld_reclen); - } - - if (count == ldata->ld_leh->leh_reccount) { - CDEBUG(D_INODE, "Old link_ea name '%.*s' not found\n", - lname->ln_namelen, lname->ln_name); - ldata->ld_lee = NULL; - ldata->ld_reclen = 0; - return -ENOENT; - } - return 0; -} -EXPORT_SYMBOL(linkea_links_find); diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c deleted file mode 100644 index 9c800580053b..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c +++ /dev/null @@ -1,514 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/linux/linux-module.c - * - * Object Devices Class Driver - * These are the only exported functions, they provide some generic - * infrastructure for managing object devices - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#define OBD_MAX_IOCTL_BUFFER 8192 - -static int obd_ioctl_is_invalid(struct obd_ioctl_data *data) -{ - if (data->ioc_len > BIT(30)) { - CERROR("OBD ioctl: ioc_len larger than 1<<30\n"); - return 1; - } - - if (data->ioc_inllen1 > BIT(30)) { - CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n"); - return 1; - } - - if (data->ioc_inllen2 > BIT(30)) { - CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n"); - return 1; - } - - if (data->ioc_inllen3 > BIT(30)) { - CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n"); - return 1; - } - - if (data->ioc_inllen4 > BIT(30)) { - CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n"); - return 1; - } - - if (data->ioc_inlbuf1 && data->ioc_inllen1 == 0) { - CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n"); - return 1; - } - - if (data->ioc_inlbuf2 && data->ioc_inllen2 == 0) { - CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n"); - return 1; - } - - if (data->ioc_inlbuf3 && data->ioc_inllen3 == 0) { - CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n"); - return 1; - } - - if (data->ioc_inlbuf4 && data->ioc_inllen4 == 0) { - CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n"); - return 1; - } - - if (data->ioc_pbuf1 && data->ioc_plen1 == 0) { - CERROR("OBD ioctl: pbuf1 pointer but 0 length\n"); - return 1; - } - - if (data->ioc_pbuf2 && data->ioc_plen2 == 0) { - CERROR("OBD ioctl: pbuf2 pointer but 0 length\n"); - return 1; - } - - if (!data->ioc_pbuf1 && data->ioc_plen1 != 0) { - CERROR("OBD ioctl: plen1 set but NULL pointer\n"); - return 1; - } - - if (!data->ioc_pbuf2 && data->ioc_plen2 != 0) { - CERROR("OBD ioctl: plen2 set but NULL pointer\n"); - return 1; - } - - if (obd_ioctl_packlen(data) > data->ioc_len) { - CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n", - obd_ioctl_packlen(data), data->ioc_len); - return 1; - } - - return 0; -} - -/* buffer MUST be at least the size of obd_ioctl_hdr */ -int obd_ioctl_getdata(char **buf, int *len, void __user *arg) -{ - struct obd_ioctl_hdr hdr; - struct obd_ioctl_data *data; - int err; - int offset = 0; - - if (copy_from_user(&hdr, arg, sizeof(hdr))) - return -EFAULT; - - if (hdr.ioc_version != OBD_IOCTL_VERSION) { - CERROR("Version mismatch kernel (%x) vs application (%x)\n", - OBD_IOCTL_VERSION, hdr.ioc_version); - return -EINVAL; - } - - if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) { - CERROR("User buffer len %d exceeds %d max buffer\n", - hdr.ioc_len, OBD_MAX_IOCTL_BUFFER); - return -EINVAL; - } - - if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) { - CERROR("User buffer too small for ioctl (%d)\n", hdr.ioc_len); - return -EINVAL; - } - - /* When there are lots of processes calling vmalloc on multi-core - * system, the high lock contention will hurt performance badly, - * obdfilter-survey is an example, which relies on ioctl. So we'd - * better avoid vmalloc on ioctl path. LU-66 - */ - *buf = kvzalloc(hdr.ioc_len, GFP_KERNEL); - if (!*buf) { - CERROR("Cannot allocate control buffer of len %d\n", - hdr.ioc_len); - return -EINVAL; - } - *len = hdr.ioc_len; - data = (struct obd_ioctl_data *)*buf; - - if (copy_from_user(*buf, arg, hdr.ioc_len)) { - err = -EFAULT; - goto free_buf; - } - if (hdr.ioc_len != data->ioc_len) { - err = -EINVAL; - goto free_buf; - } - - if (obd_ioctl_is_invalid(data)) { - CERROR("ioctl not correctly formatted\n"); - err = -EINVAL; - goto free_buf; - } - - if (data->ioc_inllen1) { - data->ioc_inlbuf1 = &data->ioc_bulk[0]; - offset += cfs_size_round(data->ioc_inllen1); - } - - if (data->ioc_inllen2) { - data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset; - offset += cfs_size_round(data->ioc_inllen2); - } - - if (data->ioc_inllen3) { - data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset; - offset += cfs_size_round(data->ioc_inllen3); - } - - if (data->ioc_inllen4) - data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset; - - return 0; - -free_buf: - kvfree(*buf); - return err; -} -EXPORT_SYMBOL(obd_ioctl_getdata); - -/* opening /dev/obd */ -static int obd_class_open(struct inode *inode, struct file *file) -{ - try_module_get(THIS_MODULE); - return 0; -} - -/* closing /dev/obd */ -static int obd_class_release(struct inode *inode, struct file *file) -{ - module_put(THIS_MODULE); - return 0; -} - -/* to control /dev/obd */ -static long obd_class_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg) -{ - int err = 0; - - /* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */ - if (!capable(CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET)) - return err = -EACCES; - if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */ - return err = -ENOTTY; - - err = class_handle_ioctl(cmd, (unsigned long)arg); - - return err; -} - -/* declare character device */ -static const struct file_operations obd_psdev_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = obd_class_ioctl, /* unlocked_ioctl */ - .open = obd_class_open, /* open */ - .release = obd_class_release, /* release */ -}; - -/* modules setup */ -struct miscdevice obd_psdev = { - .minor = MISC_DYNAMIC_MINOR, - .name = OBD_DEV_NAME, - .fops = &obd_psdev_fops, -}; - -static ssize_t version_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return sprintf(buf, "%s\n", LUSTRE_VERSION_STRING); -} - -static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return sprintf(buf, "%s\n", "on"); -} - -static ssize_t -health_check_show(struct kobject *kobj, struct attribute *attr, char *buf) -{ - bool healthy = true; - int i; - size_t len = 0; - - if (libcfs_catastrophe) - return sprintf(buf, "LBUG\n"); - - read_lock(&obd_dev_lock); - for (i = 0; i < class_devno_max(); i++) { - struct obd_device *obd; - - obd = class_num2obd(i); - if (!obd || !obd->obd_attached || !obd->obd_set_up) - continue; - - LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); - if (obd->obd_stopping) - continue; - - class_incref(obd, __func__, current); - read_unlock(&obd_dev_lock); - - if (obd_health_check(NULL, obd)) - healthy = false; - class_decref(obd, __func__, current); - read_lock(&obd_dev_lock); - } - read_unlock(&obd_dev_lock); - - if (healthy) - len = sprintf(buf, "healthy\n"); - else - len = sprintf(buf, "NOT HEALTHY\n"); - - return len; -} - -static ssize_t jobid_var_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%s\n", obd_jobid_var); -} - -static ssize_t jobid_var_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, - size_t count) -{ - if (!count || count > JOBSTATS_JOBID_VAR_MAX_LEN) - return -EINVAL; - - memset(obd_jobid_var, 0, JOBSTATS_JOBID_VAR_MAX_LEN + 1); - - memcpy(obd_jobid_var, buffer, count); - - /* Trim the trailing '\n' if any */ - if (obd_jobid_var[count - 1] == '\n') - obd_jobid_var[count - 1] = 0; - - return count; -} - -static ssize_t jobid_name_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%s\n", obd_jobid_node); -} - -static ssize_t jobid_name_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, - size_t count) -{ - if (!count || count > LUSTRE_JOBID_SIZE) - return -EINVAL; - - memcpy(obd_jobid_node, buffer, count); - - obd_jobid_node[count] = 0; - - /* Trim the trailing '\n' if any */ - if (obd_jobid_node[count - 1] == '\n') - obd_jobid_node[count - 1] = 0; - - return count; -} - -/* Root for /sys/kernel/debug/lustre */ -struct dentry *debugfs_lustre_root; -EXPORT_SYMBOL_GPL(debugfs_lustre_root); - -LUSTRE_RO_ATTR(version); -LUSTRE_RO_ATTR(pinger); -LUSTRE_RO_ATTR(health_check); -LUSTRE_RW_ATTR(jobid_var); -LUSTRE_RW_ATTR(jobid_name); - -static struct attribute *lustre_attrs[] = { - &lustre_attr_version.attr, - &lustre_attr_pinger.attr, - &lustre_attr_health_check.attr, - &lustre_attr_jobid_name.attr, - &lustre_attr_jobid_var.attr, - NULL, -}; - -static void *obd_device_list_seq_start(struct seq_file *p, loff_t *pos) -{ - if (*pos >= class_devno_max()) - return NULL; - - return pos; -} - -static void obd_device_list_seq_stop(struct seq_file *p, void *v) -{ -} - -static void *obd_device_list_seq_next(struct seq_file *p, void *v, loff_t *pos) -{ - ++*pos; - if (*pos >= class_devno_max()) - return NULL; - - return pos; -} - -static int obd_device_list_seq_show(struct seq_file *p, void *v) -{ - loff_t index = *(loff_t *)v; - struct obd_device *obd = class_num2obd((int)index); - char *status; - - if (!obd) - return 0; - - LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); - if (obd->obd_stopping) - status = "ST"; - else if (obd->obd_inactive) - status = "IN"; - else if (obd->obd_set_up) - status = "UP"; - else if (obd->obd_attached) - status = "AT"; - else - status = "--"; - - seq_printf(p, "%3d %s %s %s %s %d\n", - (int)index, status, obd->obd_type->typ_name, - obd->obd_name, obd->obd_uuid.uuid, - atomic_read(&obd->obd_refcount)); - return 0; -} - -static const struct seq_operations obd_device_list_sops = { - .start = obd_device_list_seq_start, - .stop = obd_device_list_seq_stop, - .next = obd_device_list_seq_next, - .show = obd_device_list_seq_show, -}; - -static int obd_device_list_open(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - int rc = seq_open(file, &obd_device_list_sops); - - if (rc) - return rc; - - seq = file->private_data; - seq->private = inode->i_private; - - return 0; -} - -static const struct file_operations obd_device_list_fops = { - .owner = THIS_MODULE, - .open = obd_device_list_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -struct kobject *lustre_kobj; -EXPORT_SYMBOL_GPL(lustre_kobj); - -static const struct attribute_group lustre_attr_group = { - .attrs = lustre_attrs, -}; - -int class_procfs_init(void) -{ - int rc = -ENOMEM; - - lustre_kobj = kobject_create_and_add("lustre", fs_kobj); - if (!lustre_kobj) - goto out; - - /* Create the files associated with this kobject */ - rc = sysfs_create_group(lustre_kobj, &lustre_attr_group); - if (rc) { - kobject_put(lustre_kobj); - goto out; - } - - debugfs_lustre_root = debugfs_create_dir("lustre", NULL); - - debugfs_create_file("devices", 0444, debugfs_lustre_root, NULL, - &obd_device_list_fops); -out: - return rc; -} - -int class_procfs_clean(void) -{ - debugfs_remove_recursive(debugfs_lustre_root); - - debugfs_lustre_root = NULL; - - sysfs_remove_group(lustre_kobj, &lustre_attr_group); - kobject_put(lustre_kobj); - - return 0; -} diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c deleted file mode 100644 index e5e8687784ee..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include - -struct static_lustre_uintvalue_attr { - struct { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, struct attribute *attr, - char *buf); - ssize_t (*store)(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t len); - } u; - int *value; -}; - -static ssize_t static_uintvalue_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct static_lustre_uintvalue_attr *lattr = (void *)attr; - - return sprintf(buf, "%d\n", *lattr->value); -} - -static ssize_t static_uintvalue_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, size_t count) -{ - struct static_lustre_uintvalue_attr *lattr = (void *)attr; - int rc; - unsigned int val; - - rc = kstrtouint(buffer, 10, &val); - if (rc) - return rc; - - *lattr->value = val; - - return count; -} - -#define LUSTRE_STATIC_UINT_ATTR(name, value) \ -static struct static_lustre_uintvalue_attr lustre_sattr_##name = \ - {__ATTR(name, 0644, \ - static_uintvalue_show, \ - static_uintvalue_store),\ - value } - -LUSTRE_STATIC_UINT_ATTR(timeout, &obd_timeout); - -static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return sprintf(buf, "%lu\n", - obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT))); -} - -static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */ - - if (val > ((totalram_pages / 10) * 9)) { - /* Somebody wants to assign too much memory to dirty pages */ - return -EINVAL; - } - - if (val < 4 << (20 - PAGE_SHIFT)) { - /* Less than 4 Mb for dirty cache is also bad */ - return -EINVAL; - } - - obd_max_dirty_pages = val; - - return count; -} -LUSTRE_RW_ATTR(max_dirty_mb); - -LUSTRE_STATIC_UINT_ATTR(debug_peer_on_timeout, &obd_debug_peer_on_timeout); -LUSTRE_STATIC_UINT_ATTR(dump_on_timeout, &obd_dump_on_timeout); -LUSTRE_STATIC_UINT_ATTR(dump_on_eviction, &obd_dump_on_eviction); -LUSTRE_STATIC_UINT_ATTR(at_min, &at_min); -LUSTRE_STATIC_UINT_ATTR(at_max, &at_max); -LUSTRE_STATIC_UINT_ATTR(at_extra, &at_extra); -LUSTRE_STATIC_UINT_ATTR(at_early_margin, &at_early_margin); -LUSTRE_STATIC_UINT_ATTR(at_history, &at_history); - -static struct attribute *lustre_attrs[] = { - &lustre_sattr_timeout.u.attr, - &lustre_attr_max_dirty_mb.attr, - &lustre_sattr_debug_peer_on_timeout.u.attr, - &lustre_sattr_dump_on_timeout.u.attr, - &lustre_sattr_dump_on_eviction.u.attr, - &lustre_sattr_at_min.u.attr, - &lustre_sattr_at_max.u.attr, - &lustre_sattr_at_extra.u.attr, - &lustre_sattr_at_early_margin.u.attr, - &lustre_sattr_at_history.u.attr, - NULL, -}; - -static const struct attribute_group lustre_attr_group = { - .attrs = lustre_attrs, -}; - -int obd_sysctl_init(void) -{ - return sysfs_create_group(lustre_kobj, &lustre_attr_group); -} diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c deleted file mode 100644 index bba84eae1e19..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/llog.c +++ /dev/null @@ -1,524 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/llog.c - * - * OST<->MDS recovery logging infrastructure. - * Invariants in implementation: - * - we do not share logs among different OST<->MDS connections, so that - * if an OST or MDS fails it need only look at log(s) relevant to itself - * - * Author: Andreas Dilger - * Author: Alex Zhuravlev - * Author: Mikhail Pershin - */ - -#define DEBUG_SUBSYSTEM S_LOG - -#include -#include -#include -#include -#include "llog_internal.h" - -/* - * Allocate a new log or catalog handle - * Used inside llog_open(). - */ -static struct llog_handle *llog_alloc_handle(void) -{ - struct llog_handle *loghandle; - - loghandle = kzalloc(sizeof(*loghandle), GFP_NOFS); - if (!loghandle) - return NULL; - - init_rwsem(&loghandle->lgh_lock); - spin_lock_init(&loghandle->lgh_hdr_lock); - INIT_LIST_HEAD(&loghandle->u.phd.phd_entry); - atomic_set(&loghandle->lgh_refcount, 1); - - return loghandle; -} - -/* - * Free llog handle and header data if exists. Used in llog_close() only - */ -static void llog_free_handle(struct llog_handle *loghandle) -{ - /* failed llog_init_handle */ - if (!loghandle->lgh_hdr) - goto out; - - if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) - LASSERT(list_empty(&loghandle->u.phd.phd_entry)); - else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) - LASSERT(list_empty(&loghandle->u.chd.chd_head)); - kvfree(loghandle->lgh_hdr); -out: - kfree(loghandle); -} - -void llog_handle_get(struct llog_handle *loghandle) -{ - atomic_inc(&loghandle->lgh_refcount); -} - -void llog_handle_put(struct llog_handle *loghandle) -{ - LASSERT(atomic_read(&loghandle->lgh_refcount) > 0); - if (atomic_dec_and_test(&loghandle->lgh_refcount)) - llog_free_handle(loghandle); -} - -static int llog_read_header(const struct lu_env *env, - struct llog_handle *handle, - struct obd_uuid *uuid) -{ - struct llog_operations *lop; - int rc; - - rc = llog_handle2ops(handle, &lop); - if (rc) - return rc; - - if (!lop->lop_read_header) - return -EOPNOTSUPP; - - rc = lop->lop_read_header(env, handle); - if (rc == LLOG_EEMPTY) { - struct llog_log_hdr *llh = handle->lgh_hdr; - size_t len; - - /* lrh_len should be initialized in llog_init_handle */ - handle->lgh_last_idx = 0; /* header is record with index 0 */ - llh->llh_count = 1; /* for the header record */ - llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC; - LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE); - llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size; - llh->llh_hdr.lrh_index = 0; - llh->llh_timestamp = ktime_get_real_seconds(); - if (uuid) - memcpy(&llh->llh_tgtuuid, uuid, - sizeof(llh->llh_tgtuuid)); - llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap); - /* - * Since update llog header might also call this function, - * let's reset the bitmap to 0 here - */ - len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset; - memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail)); - ext2_set_bit(0, LLOG_HDR_BITMAP(llh)); - LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len; - LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index; - rc = 0; - } - return rc; -} - -int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, - int flags, struct obd_uuid *uuid) -{ - int chunk_size = handle->lgh_ctxt->loc_chunk_size; - enum llog_flag fmt = flags & LLOG_F_EXT_MASK; - struct llog_log_hdr *llh; - int rc; - - LASSERT(!handle->lgh_hdr); - - LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE); - llh = kvzalloc(sizeof(*llh), GFP_KERNEL); - if (!llh) - return -ENOMEM; - handle->lgh_hdr = llh; - handle->lgh_hdr_size = chunk_size; - /* first assign flags to use llog_client_ops */ - llh->llh_flags = flags; - rc = llog_read_header(env, handle, uuid); - if (rc == 0) { - if (unlikely((llh->llh_flags & LLOG_F_IS_PLAIN && - flags & LLOG_F_IS_CAT) || - (llh->llh_flags & LLOG_F_IS_CAT && - flags & LLOG_F_IS_PLAIN))) { - CERROR("%s: llog type is %s but initializing %s\n", - handle->lgh_ctxt->loc_obd->obd_name, - llh->llh_flags & LLOG_F_IS_CAT ? - "catalog" : "plain", - flags & LLOG_F_IS_CAT ? "catalog" : "plain"); - rc = -EINVAL; - goto out; - } else if (llh->llh_flags & - (LLOG_F_IS_PLAIN | LLOG_F_IS_CAT)) { - /* - * it is possible to open llog without specifying llog - * type so it is taken from llh_flags - */ - flags = llh->llh_flags; - } else { - /* for some reason the llh_flags has no type set */ - CERROR("llog type is not specified!\n"); - rc = -EINVAL; - goto out; - } - if (unlikely(uuid && - !obd_uuid_equals(uuid, &llh->llh_tgtuuid))) { - CERROR("%s: llog uuid mismatch: %s/%s\n", - handle->lgh_ctxt->loc_obd->obd_name, - (char *)uuid->uuid, - (char *)llh->llh_tgtuuid.uuid); - rc = -EEXIST; - goto out; - } - } - if (flags & LLOG_F_IS_CAT) { - LASSERT(list_empty(&handle->u.chd.chd_head)); - INIT_LIST_HEAD(&handle->u.chd.chd_head); - llh->llh_size = sizeof(struct llog_logid_rec); - llh->llh_flags |= LLOG_F_IS_FIXSIZE; - } else if (!(flags & LLOG_F_IS_PLAIN)) { - CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n", - handle->lgh_ctxt->loc_obd->obd_name, - flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN); - rc = -EINVAL; - } - llh->llh_flags |= fmt; -out: - if (rc) { - kvfree(llh); - handle->lgh_hdr = NULL; - } - return rc; -} -EXPORT_SYMBOL(llog_init_handle); - -static int llog_process_thread(void *arg) -{ - struct llog_process_info *lpi = arg; - struct llog_handle *loghandle = lpi->lpi_loghandle; - struct llog_log_hdr *llh = loghandle->lgh_hdr; - struct llog_process_cat_data *cd = lpi->lpi_catdata; - char *buf; - u64 cur_offset, tmp_offset; - int chunk_size; - int rc = 0, index = 1, last_index; - int saved_index = 0; - int last_called_index = 0; - - if (!llh) - return -EINVAL; - - cur_offset = llh->llh_hdr.lrh_len; - chunk_size = llh->llh_hdr.lrh_len; - /* expect chunk_size to be power of two */ - LASSERT(is_power_of_2(chunk_size)); - - buf = kvzalloc(chunk_size, GFP_NOFS); - if (!buf) { - lpi->lpi_rc = -ENOMEM; - return 0; - } - - if (cd) { - last_called_index = cd->lpcd_first_idx; - index = cd->lpcd_first_idx + 1; - } - if (cd && cd->lpcd_last_idx) - last_index = cd->lpcd_last_idx; - else - last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1; - - while (rc == 0) { - unsigned int buf_offset = 0; - struct llog_rec_hdr *rec; - bool partial_chunk; - off_t chunk_offset; - - /* skip records not set in bitmap */ - while (index <= last_index && - !ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) - ++index; - - if (index > last_index) - break; - - CDEBUG(D_OTHER, "index: %d last_index %d\n", - index, last_index); -repeat: - /* get the buf with our target record; avoid old garbage */ - memset(buf, 0, chunk_size); - rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index, - index, &cur_offset, buf, chunk_size); - if (rc) - goto out; - - /* - * NB: after llog_next_block() call the cur_offset is the - * offset of the next block after read one. - * The absolute offset of the current chunk is calculated - * from cur_offset value and stored in chunk_offset variable. - */ - tmp_offset = cur_offset; - if (do_div(tmp_offset, chunk_size)) { - partial_chunk = true; - chunk_offset = cur_offset & ~(chunk_size - 1); - } else { - partial_chunk = false; - chunk_offset = cur_offset - chunk_size; - } - - /* NB: when rec->lrh_len is accessed it is already swabbed - * since it is used at the "end" of the loop and the rec - * swabbing is done at the beginning of the loop. - */ - for (rec = (struct llog_rec_hdr *)(buf + buf_offset); - (char *)rec < buf + chunk_size; - rec = llog_rec_hdr_next(rec)) { - CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n", - rec, rec->lrh_type); - - if (LLOG_REC_HDR_NEEDS_SWABBING(rec)) - lustre_swab_llog_rec(rec); - - CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n", - rec->lrh_type, rec->lrh_index); - - /* - * for partial chunk the end of it is zeroed, check - * for index 0 to distinguish it. - */ - if (partial_chunk && !rec->lrh_index) { - /* concurrent llog_add() might add new records - * while llog_processing, check this is not - * the case and re-read the current chunk - * otherwise. - */ - if (index > loghandle->lgh_last_idx) { - rc = 0; - goto out; - } - CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n", - index, loghandle->lgh_last_idx); - /* save offset inside buffer for the re-read */ - buf_offset = (char *)rec - (char *)buf; - cur_offset = chunk_offset; - goto repeat; - } - - if (!rec->lrh_len || rec->lrh_len > chunk_size) { - CWARN("invalid length %d in llog record for index %d/%d\n", - rec->lrh_len, - rec->lrh_index, index); - rc = -EINVAL; - goto out; - } - - if (rec->lrh_index < index) { - CDEBUG(D_OTHER, "skipping lrh_index %d\n", - rec->lrh_index); - continue; - } - - if (rec->lrh_index != index) { - CERROR("%s: Invalid record: index %u but expected %u\n", - loghandle->lgh_ctxt->loc_obd->obd_name, - rec->lrh_index, index); - rc = -ERANGE; - goto out; - } - - CDEBUG(D_OTHER, - "lrh_index: %d lrh_len: %d (%d remains)\n", - rec->lrh_index, rec->lrh_len, - (int)(buf + chunk_size - (char *)rec)); - - loghandle->lgh_cur_idx = rec->lrh_index; - loghandle->lgh_cur_offset = (char *)rec - (char *)buf + - chunk_offset; - - /* if set, process the callback on this record */ - if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) { - rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec, - lpi->lpi_cbdata); - last_called_index = index; - if (rc) - goto out; - } - - /* exit if the last index is reached */ - if (index >= last_index) { - rc = 0; - goto out; - } - index++; - } - } - -out: - if (cd) - cd->lpcd_last_idx = last_called_index; - - kvfree(buf); - lpi->lpi_rc = rc; - return 0; -} - -static int llog_process_thread_daemonize(void *arg) -{ - struct llog_process_info *lpi = arg; - struct lu_env env; - int rc; - - unshare_fs_struct(); - - /* client env has no keys, tags is just 0 */ - rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD); - if (rc) - goto out; - lpi->lpi_env = &env; - - rc = llog_process_thread(arg); - - lu_env_fini(&env); -out: - complete(&lpi->lpi_completion); - return rc; -} - -int llog_process_or_fork(const struct lu_env *env, - struct llog_handle *loghandle, - llog_cb_t cb, void *data, void *catdata, bool fork) -{ - struct llog_process_info *lpi; - int rc; - - lpi = kzalloc(sizeof(*lpi), GFP_NOFS); - if (!lpi) - return -ENOMEM; - lpi->lpi_loghandle = loghandle; - lpi->lpi_cb = cb; - lpi->lpi_cbdata = data; - lpi->lpi_catdata = catdata; - - if (fork) { - struct task_struct *task; - - /* The new thread can't use parent env, - * init the new one in llog_process_thread_daemonize. - */ - lpi->lpi_env = NULL; - init_completion(&lpi->lpi_completion); - task = kthread_run(llog_process_thread_daemonize, lpi, - "llog_process_thread"); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - CERROR("%s: cannot start thread: rc = %d\n", - loghandle->lgh_ctxt->loc_obd->obd_name, rc); - goto out_lpi; - } - wait_for_completion(&lpi->lpi_completion); - } else { - lpi->lpi_env = env; - llog_process_thread(lpi); - } - rc = lpi->lpi_rc; -out_lpi: - kfree(lpi); - return rc; -} -EXPORT_SYMBOL(llog_process_or_fork); - -int llog_process(const struct lu_env *env, struct llog_handle *loghandle, - llog_cb_t cb, void *data, void *catdata) -{ - return llog_process_or_fork(env, loghandle, cb, data, catdata, true); -} -EXPORT_SYMBOL(llog_process); - -int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, - struct llog_handle **lgh, struct llog_logid *logid, - char *name, enum llog_open_param open_param) -{ - const struct cred *old_cred = NULL; - int rc; - - LASSERT(ctxt); - LASSERT(ctxt->loc_logops); - - if (!ctxt->loc_logops->lop_open) { - *lgh = NULL; - return -EOPNOTSUPP; - } - - *lgh = llog_alloc_handle(); - if (!*lgh) - return -ENOMEM; - (*lgh)->lgh_ctxt = ctxt; - (*lgh)->lgh_logops = ctxt->loc_logops; - - if (cap_raised(current_cap(), CAP_SYS_RESOURCE)) { - struct cred *cred = prepare_creds(); - - if (cred) { - cap_raise(cred->cap_effective, CAP_SYS_RESOURCE); - old_cred = override_creds(cred); - } - } - rc = ctxt->loc_logops->lop_open(env, *lgh, logid, name, open_param); - if (old_cred) - revert_creds(old_cred); - - if (rc) { - llog_free_handle(*lgh); - *lgh = NULL; - } - return rc; -} -EXPORT_SYMBOL(llog_open); - -int llog_close(const struct lu_env *env, struct llog_handle *loghandle) -{ - struct llog_operations *lop; - int rc; - - rc = llog_handle2ops(loghandle, &lop); - if (rc) - goto out; - if (!lop->lop_close) { - rc = -EOPNOTSUPP; - goto out; - } - rc = lop->lop_close(env, loghandle); -out: - llog_handle_put(loghandle); - return rc; -} -EXPORT_SYMBOL(llog_close); diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c deleted file mode 100644 index d9c63adff206..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/llog_cat.c - * - * OST<->MDS recovery logging infrastructure. - * - * Invariants in implementation: - * - we do not share logs among different OST<->MDS connections, so that - * if an OST or MDS fails it need only look at log(s) relevant to itself - * - * Author: Andreas Dilger - * Author: Alexey Zhuravlev - * Author: Mikhail Pershin - */ - -#define DEBUG_SUBSYSTEM S_LOG - -#include - -#include "llog_internal.h" - -/* Open an existent log handle and add it to the open list. - * This log handle will be closed when all of the records in it are removed. - * - * Assumes caller has already pushed us into the kernel context and is locking. - * We return a lock on the handle to ensure nobody yanks it from us. - * - * This takes extra reference on llog_handle via llog_handle_get() and require - * this reference to be put by caller using llog_handle_put() - */ -static int llog_cat_id2handle(const struct lu_env *env, - struct llog_handle *cathandle, - struct llog_handle **res, - struct llog_logid *logid) -{ - struct llog_handle *loghandle; - enum llog_flag fmt; - int rc = 0; - - if (!cathandle) - return -EBADF; - - fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK; - down_write(&cathandle->lgh_lock); - list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { - struct llog_logid *cgl = &loghandle->lgh_id; - - if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) && - ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) { - if (cgl->lgl_ogen != logid->lgl_ogen) { - CERROR("%s: log " DOSTID " generation %x != %x\n", - loghandle->lgh_ctxt->loc_obd->obd_name, - POSTID(&logid->lgl_oi), cgl->lgl_ogen, - logid->lgl_ogen); - continue; - } - loghandle->u.phd.phd_cat_handle = cathandle; - up_write(&cathandle->lgh_lock); - rc = 0; - goto out; - } - } - up_write(&cathandle->lgh_lock); - - rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL, - LLOG_OPEN_EXISTS); - if (rc < 0) { - CERROR("%s: error opening log id " DOSTID ":%x: rc = %d\n", - cathandle->lgh_ctxt->loc_obd->obd_name, - POSTID(&logid->lgl_oi), logid->lgl_ogen, rc); - return rc; - } - - rc = llog_init_handle(env, loghandle, fmt | LLOG_F_IS_PLAIN, NULL); - if (rc < 0) { - llog_close(env, loghandle); - loghandle = NULL; - return rc; - } - - down_write(&cathandle->lgh_lock); - list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head); - up_write(&cathandle->lgh_lock); - - loghandle->u.phd.phd_cat_handle = cathandle; - loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id; - loghandle->u.phd.phd_cookie.lgc_index = - loghandle->lgh_hdr->llh_cat_idx; -out: - llog_handle_get(loghandle); - *res = loghandle; - return 0; -} - -int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle) -{ - struct llog_handle *loghandle, *n; - - list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { - /* unlink open-not-created llogs */ - list_del_init(&loghandle->u.phd.phd_entry); - llog_close(env, loghandle); - } - /* if handle was stored in ctxt, remove it too */ - if (cathandle->lgh_ctxt->loc_handle == cathandle) - cathandle->lgh_ctxt->loc_handle = NULL; - return llog_close(env, cathandle); -} -EXPORT_SYMBOL(llog_cat_close); - -static int llog_cat_process_cb(const struct lu_env *env, - struct llog_handle *cat_llh, - struct llog_rec_hdr *rec, void *data) -{ - struct llog_process_data *d = data; - struct llog_logid_rec *lir = (struct llog_logid_rec *)rec; - struct llog_handle *llh; - int rc; - - if (rec->lrh_type != LLOG_LOGID_MAGIC) { - CERROR("invalid record in catalog\n"); - return -EINVAL; - } - CDEBUG(D_HA, "processing log " DOSTID ":%x at index %u of catalog " - DOSTID "\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen, - rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi)); - - rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id); - if (rc) { - CERROR("%s: cannot find handle for llog " DOSTID ": %d\n", - cat_llh->lgh_ctxt->loc_obd->obd_name, - POSTID(&lir->lid_id.lgl_oi), rc); - return rc; - } - - if (rec->lrh_index < d->lpd_startcat) - /* Skip processing of the logs until startcat */ - rc = 0; - else if (d->lpd_startidx > 0) { - struct llog_process_cat_data cd; - - cd.lpcd_first_idx = d->lpd_startidx; - cd.lpcd_last_idx = 0; - rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data, - &cd, false); - /* Continue processing the next log from idx 0 */ - d->lpd_startidx = 0; - } else { - rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data, - NULL, false); - } - - llog_handle_put(llh); - - return rc; -} - -static int llog_cat_process_or_fork(const struct lu_env *env, - struct llog_handle *cat_llh, - llog_cb_t cb, void *data, int startcat, - int startidx, bool fork) -{ - struct llog_process_data d; - struct llog_log_hdr *llh = cat_llh->lgh_hdr; - int rc; - - LASSERT(llh->llh_flags & LLOG_F_IS_CAT); - d.lpd_data = data; - d.lpd_cb = cb; - d.lpd_startcat = startcat; - d.lpd_startidx = startidx; - - if (llh->llh_cat_idx > cat_llh->lgh_last_idx) { - struct llog_process_cat_data cd; - - CWARN("catlog " DOSTID " crosses index zero\n", - POSTID(&cat_llh->lgh_id.lgl_oi)); - - cd.lpcd_first_idx = llh->llh_cat_idx; - cd.lpcd_last_idx = 0; - rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb, - &d, &cd, fork); - if (rc != 0) - return rc; - - cd.lpcd_first_idx = 0; - cd.lpcd_last_idx = cat_llh->lgh_last_idx; - rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb, - &d, &cd, fork); - } else { - rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb, - &d, NULL, fork); - } - - return rc; -} - -int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh, - llog_cb_t cb, void *data, int startcat, int startidx) -{ - return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat, - startidx, false); -} -EXPORT_SYMBOL(llog_cat_process); diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h deleted file mode 100644 index 4991d4e589dc..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h +++ /dev/null @@ -1,79 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LLOG_INTERNAL_H__ -#define __LLOG_INTERNAL_H__ - -#include - -struct llog_process_info { - struct llog_handle *lpi_loghandle; - llog_cb_t lpi_cb; - void *lpi_cbdata; - void *lpi_catdata; - int lpi_rc; - struct completion lpi_completion; - const struct lu_env *lpi_env; - -}; - -struct llog_thread_info { - struct lu_attr lgi_attr; - struct lu_fid lgi_fid; - struct lu_buf lgi_buf; - loff_t lgi_off; - struct llog_rec_hdr lgi_lrh; - struct llog_rec_tail lgi_tail; -}; - -extern struct lu_context_key llog_thread_key; - -int llog_info_init(void); -void llog_info_fini(void); - -void llog_handle_get(struct llog_handle *loghandle); -void llog_handle_put(struct llog_handle *loghandle); -int class_config_dump_handler(const struct lu_env *env, - struct llog_handle *handle, - struct llog_rec_hdr *rec, void *data); -int llog_process_or_fork(const struct lu_env *env, - struct llog_handle *loghandle, - llog_cb_t cb, void *data, void *catdata, bool fork); -int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle, - struct llog_handle *loghandle, int index); - -static inline struct llog_rec_hdr *llog_rec_hdr_next(struct llog_rec_hdr *rec) -{ - return (struct llog_rec_hdr *)((char *)rec + rec->lrh_len); -} -#endif diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c deleted file mode 100644 index 26aea114a29b..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c +++ /dev/null @@ -1,225 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LOG - -#include -#include -#include "llog_internal.h" - -/* helper functions for calling the llog obd methods */ -static struct llog_ctxt *llog_new_ctxt(struct obd_device *obd) -{ - struct llog_ctxt *ctxt; - - ctxt = kzalloc(sizeof(*ctxt), GFP_NOFS); - if (!ctxt) - return NULL; - - ctxt->loc_obd = obd; - atomic_set(&ctxt->loc_refcount, 1); - - return ctxt; -} - -static void llog_ctxt_destroy(struct llog_ctxt *ctxt) -{ - if (ctxt->loc_exp) { - class_export_put(ctxt->loc_exp); - ctxt->loc_exp = NULL; - } - if (ctxt->loc_imp) { - class_import_put(ctxt->loc_imp); - ctxt->loc_imp = NULL; - } - kfree(ctxt); -} - -int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt) -{ - struct obd_llog_group *olg = ctxt->loc_olg; - struct obd_device *obd; - int rc = 0; - - spin_lock(&olg->olg_lock); - if (!atomic_dec_and_test(&ctxt->loc_refcount)) { - spin_unlock(&olg->olg_lock); - return rc; - } - olg->olg_ctxts[ctxt->loc_idx] = NULL; - spin_unlock(&olg->olg_lock); - - obd = ctxt->loc_obd; - spin_lock(&obd->obd_dev_lock); - /* sync with llog ctxt user thread */ - spin_unlock(&obd->obd_dev_lock); - - /* obd->obd_starting is needed for the case of cleanup - * in error case while obd is starting up. - */ - LASSERTF(obd->obd_starting == 1 || - obd->obd_stopping == 1 || obd->obd_set_up == 0, - "wrong obd state: %d/%d/%d\n", !!obd->obd_starting, - !!obd->obd_stopping, !!obd->obd_set_up); - - /* cleanup the llog ctxt here */ - if (CTXTP(ctxt, cleanup)) - rc = CTXTP(ctxt, cleanup)(env, ctxt); - - llog_ctxt_destroy(ctxt); - wake_up(&olg->olg_waitq); - return rc; -} -EXPORT_SYMBOL(__llog_ctxt_put); - -int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt) -{ - struct obd_llog_group *olg; - int rc, idx; - - olg = ctxt->loc_olg; - LASSERT(olg); - LASSERT(olg != LP_POISON); - - idx = ctxt->loc_idx; - - /* - * Banlance the ctxt get when calling llog_cleanup() - */ - LASSERT(atomic_read(&ctxt->loc_refcount) < LI_POISON); - LASSERT(atomic_read(&ctxt->loc_refcount) > 1); - llog_ctxt_put(ctxt); - - /* - * Try to free the ctxt. - */ - rc = __llog_ctxt_put(env, ctxt); - if (rc) - CERROR("Error %d while cleaning up ctxt %p\n", - rc, ctxt); - - l_wait_event_abortable(olg->olg_waitq, - llog_group_ctxt_null(olg, idx)); - - return rc; -} -EXPORT_SYMBOL(llog_cleanup); - -int llog_setup(const struct lu_env *env, struct obd_device *obd, - struct obd_llog_group *olg, int index, - struct obd_device *disk_obd, struct llog_operations *op) -{ - struct llog_ctxt *ctxt; - int rc = 0; - - if (index < 0 || index >= LLOG_MAX_CTXTS) - return -EINVAL; - - LASSERT(olg); - - ctxt = llog_new_ctxt(obd); - if (!ctxt) - return -ENOMEM; - - ctxt->loc_obd = obd; - ctxt->loc_olg = olg; - ctxt->loc_idx = index; - ctxt->loc_logops = op; - mutex_init(&ctxt->loc_mutex); - ctxt->loc_exp = class_export_get(disk_obd->obd_self_export); - ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED; - ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE; - - rc = llog_group_set_ctxt(olg, ctxt, index); - if (rc) { - llog_ctxt_destroy(ctxt); - if (rc == -EEXIST) { - ctxt = llog_group_get_ctxt(olg, index); - if (ctxt) { - /* - * mds_lov_update_desc() might call here multiple - * times. So if the llog is already set up then - * don't to do it again. - */ - CDEBUG(D_CONFIG, "obd %s ctxt %d already set up\n", - obd->obd_name, index); - LASSERT(ctxt->loc_olg == olg); - LASSERT(ctxt->loc_obd == obd); - LASSERT(ctxt->loc_exp == disk_obd->obd_self_export); - LASSERT(ctxt->loc_logops == op); - llog_ctxt_put(ctxt); - } - rc = 0; - } - return rc; - } - - if (op->lop_setup) { - if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LLOG_SETUP)) - rc = -EOPNOTSUPP; - else - rc = op->lop_setup(env, obd, olg, index, disk_obd); - } - - if (rc) { - CERROR("%s: ctxt %d lop_setup=%p failed: rc = %d\n", - obd->obd_name, index, op->lop_setup, rc); - llog_group_clear_ctxt(olg, index); - llog_ctxt_destroy(ctxt); - } else { - CDEBUG(D_CONFIG, "obd %s ctxt %d is initialized\n", - obd->obd_name, index); - ctxt->loc_flags &= ~LLOG_CTXT_FLAG_UNINITIALIZED; - } - - return rc; -} -EXPORT_SYMBOL(llog_setup); - -/* context key constructor/destructor: llog_key_init, llog_key_fini */ -LU_KEY_INIT_FINI(llog, struct llog_thread_info); -/* context key: llog_thread_key */ -LU_CONTEXT_KEY_DEFINE(llog, LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL); -LU_KEY_INIT_GENERIC(llog); - -int llog_info_init(void) -{ - llog_key_init_generic(&llog_thread_key, NULL); - lu_context_key_register(&llog_thread_key); - return 0; -} - -void llog_info_fini(void) -{ - lu_context_key_degister(&llog_thread_key); -} diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c deleted file mode 100644 index b431c3408fe4..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c +++ /dev/null @@ -1,412 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/llog_swab.c - * - * Swabbing of llog datatypes (from disk or over the wire). - * - * Author: jacob berkman - */ - -#define DEBUG_SUBSYSTEM S_LOG - -#include -#include - -static void print_llogd_body(struct llogd_body *d) -{ - CDEBUG(D_OTHER, "llogd body: %p\n", d); - CDEBUG(D_OTHER, "\tlgd_logid.lgl_oi: " DOSTID "\n", - POSTID(&d->lgd_logid.lgl_oi)); - CDEBUG(D_OTHER, "\tlgd_logid.lgl_ogen: %#x\n", d->lgd_logid.lgl_ogen); - CDEBUG(D_OTHER, "\tlgd_ctxt_idx: %#x\n", d->lgd_ctxt_idx); - CDEBUG(D_OTHER, "\tlgd_llh_flags: %#x\n", d->lgd_llh_flags); - CDEBUG(D_OTHER, "\tlgd_index: %#x\n", d->lgd_index); - CDEBUG(D_OTHER, "\tlgd_saved_index: %#x\n", d->lgd_saved_index); - CDEBUG(D_OTHER, "\tlgd_len: %#x\n", d->lgd_len); - CDEBUG(D_OTHER, "\tlgd_cur_offset: %#llx\n", d->lgd_cur_offset); -} - -void lustre_swab_lu_fid(struct lu_fid *fid) -{ - __swab64s(&fid->f_seq); - __swab32s(&fid->f_oid); - __swab32s(&fid->f_ver); -} -EXPORT_SYMBOL(lustre_swab_lu_fid); - -void lustre_swab_ost_id(struct ost_id *oid) -{ - if (fid_seq_is_mdt0(oid->oi.oi_seq)) { - __swab64s(&oid->oi.oi_id); - __swab64s(&oid->oi.oi_seq); - } else { - lustre_swab_lu_fid(&oid->oi_fid); - } -} -EXPORT_SYMBOL(lustre_swab_ost_id); - -static void lustre_swab_llog_id(struct llog_logid *log_id) -{ - __swab64s(&log_id->lgl_oi.oi.oi_id); - __swab64s(&log_id->lgl_oi.oi.oi_seq); - __swab32s(&log_id->lgl_ogen); -} - -void lustre_swab_llogd_body(struct llogd_body *d) -{ - print_llogd_body(d); - lustre_swab_llog_id(&d->lgd_logid); - __swab32s(&d->lgd_ctxt_idx); - __swab32s(&d->lgd_llh_flags); - __swab32s(&d->lgd_index); - __swab32s(&d->lgd_saved_index); - __swab32s(&d->lgd_len); - __swab64s(&d->lgd_cur_offset); - print_llogd_body(d); -} -EXPORT_SYMBOL(lustre_swab_llogd_body); - -void lustre_swab_llogd_conn_body(struct llogd_conn_body *d) -{ - __swab64s(&d->lgdc_gen.mnt_cnt); - __swab64s(&d->lgdc_gen.conn_cnt); - lustre_swab_llog_id(&d->lgdc_logid); - __swab32s(&d->lgdc_ctxt_idx); -} -EXPORT_SYMBOL(lustre_swab_llogd_conn_body); - -static void lustre_swab_ll_fid(struct ll_fid *fid) -{ - __swab64s(&fid->id); - __swab32s(&fid->generation); - __swab32s(&fid->f_type); -} - -void lustre_swab_lu_seq_range(struct lu_seq_range *range) -{ - __swab64s(&range->lsr_start); - __swab64s(&range->lsr_end); - __swab32s(&range->lsr_index); - __swab32s(&range->lsr_flags); -} -EXPORT_SYMBOL(lustre_swab_lu_seq_range); - -void lustre_swab_llog_rec(struct llog_rec_hdr *rec) -{ - struct llog_rec_tail *tail = NULL; - - __swab32s(&rec->lrh_len); - __swab32s(&rec->lrh_index); - __swab32s(&rec->lrh_type); - __swab32s(&rec->lrh_id); - - switch (rec->lrh_type) { - case OST_SZ_REC: - { - struct llog_size_change_rec *lsc = - (struct llog_size_change_rec *)rec; - - lustre_swab_ll_fid(&lsc->lsc_fid); - __swab32s(&lsc->lsc_ioepoch); - tail = &lsc->lsc_tail; - break; - } - case MDS_UNLINK_REC: - { - struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec; - - __swab64s(&lur->lur_oid); - __swab32s(&lur->lur_oseq); - __swab32s(&lur->lur_count); - tail = &lur->lur_tail; - break; - } - case MDS_UNLINK64_REC: - { - struct llog_unlink64_rec *lur = - (struct llog_unlink64_rec *)rec; - - lustre_swab_lu_fid(&lur->lur_fid); - __swab32s(&lur->lur_count); - tail = &lur->lur_tail; - break; - } - case CHANGELOG_REC: - { - struct llog_changelog_rec *cr = - (struct llog_changelog_rec *)rec; - - __swab16s(&cr->cr.cr_namelen); - __swab16s(&cr->cr.cr_flags); - __swab32s(&cr->cr.cr_type); - __swab64s(&cr->cr.cr_index); - __swab64s(&cr->cr.cr_prev); - __swab64s(&cr->cr.cr_time); - lustre_swab_lu_fid(&cr->cr.cr_tfid); - lustre_swab_lu_fid(&cr->cr.cr_pfid); - if (cr->cr.cr_flags & CLF_RENAME) { - struct changelog_ext_rename *rnm = - changelog_rec_rename(&cr->cr); - - lustre_swab_lu_fid(&rnm->cr_sfid); - lustre_swab_lu_fid(&rnm->cr_spfid); - } - /* - * Because the tail follows a variable-length structure we need - * to compute its location at runtime - */ - tail = (struct llog_rec_tail *)((char *)&cr->cr + - changelog_rec_size(&cr->cr) + - cr->cr.cr_namelen); - break; - } - - case CHANGELOG_USER_REC: - { - struct llog_changelog_user_rec *cur = - (struct llog_changelog_user_rec *)rec; - - __swab32s(&cur->cur_id); - __swab64s(&cur->cur_endrec); - tail = &cur->cur_tail; - break; - } - - case HSM_AGENT_REC: { - struct llog_agent_req_rec *arr = - (struct llog_agent_req_rec *)rec; - - __swab32s(&arr->arr_hai.hai_len); - __swab32s(&arr->arr_hai.hai_action); - lustre_swab_lu_fid(&arr->arr_hai.hai_fid); - lustre_swab_lu_fid(&arr->arr_hai.hai_dfid); - __swab64s(&arr->arr_hai.hai_cookie); - __swab64s(&arr->arr_hai.hai_extent.offset); - __swab64s(&arr->arr_hai.hai_extent.length); - __swab64s(&arr->arr_hai.hai_gid); - /* no swabing for opaque data */ - /* hai_data[0]; */ - break; - } - - case MDS_SETATTR64_REC: - { - struct llog_setattr64_rec *lsr = - (struct llog_setattr64_rec *)rec; - - lustre_swab_ost_id(&lsr->lsr_oi); - __swab32s(&lsr->lsr_uid); - __swab32s(&lsr->lsr_uid_h); - __swab32s(&lsr->lsr_gid); - __swab32s(&lsr->lsr_gid_h); - __swab64s(&lsr->lsr_valid); - tail = &lsr->lsr_tail; - break; - } - case OBD_CFG_REC: - /* these are swabbed as they are consumed */ - break; - case LLOG_HDR_MAGIC: - { - struct llog_log_hdr *llh = (struct llog_log_hdr *)rec; - - __swab64s(&llh->llh_timestamp); - __swab32s(&llh->llh_count); - __swab32s(&llh->llh_bitmap_offset); - __swab32s(&llh->llh_flags); - __swab32s(&llh->llh_size); - __swab32s(&llh->llh_cat_idx); - tail = LLOG_HDR_TAIL(llh); - break; - } - case LLOG_LOGID_MAGIC: - { - struct llog_logid_rec *lid = (struct llog_logid_rec *)rec; - - lustre_swab_llog_id(&lid->lid_id); - tail = &lid->lid_tail; - break; - } - case LLOG_GEN_REC: - { - struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec; - - __swab64s(&lgr->lgr_gen.mnt_cnt); - __swab64s(&lgr->lgr_gen.conn_cnt); - tail = &lgr->lgr_tail; - break; - } - case LLOG_PAD_MAGIC: - break; - default: - CERROR("Unknown llog rec type %#x swabbing rec %p\n", - rec->lrh_type, rec); - } - - if (tail) { - __swab32s(&tail->lrt_len); - __swab32s(&tail->lrt_index); - } -} -EXPORT_SYMBOL(lustre_swab_llog_rec); - -static void print_llog_hdr(struct llog_log_hdr *h) -{ - CDEBUG(D_OTHER, "llog header: %p\n", h); - CDEBUG(D_OTHER, "\tllh_hdr.lrh_index: %#x\n", h->llh_hdr.lrh_index); - CDEBUG(D_OTHER, "\tllh_hdr.lrh_len: %#x\n", h->llh_hdr.lrh_len); - CDEBUG(D_OTHER, "\tllh_hdr.lrh_type: %#x\n", h->llh_hdr.lrh_type); - CDEBUG(D_OTHER, "\tllh_timestamp: %#llx\n", h->llh_timestamp); - CDEBUG(D_OTHER, "\tllh_count: %#x\n", h->llh_count); - CDEBUG(D_OTHER, "\tllh_bitmap_offset: %#x\n", h->llh_bitmap_offset); - CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags); - CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size); - CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx); - CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", - LLOG_HDR_TAIL(h)->lrt_index); - CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", - LLOG_HDR_TAIL(h)->lrt_len); -} - -void lustre_swab_llog_hdr(struct llog_log_hdr *h) -{ - print_llog_hdr(h); - - lustre_swab_llog_rec(&h->llh_hdr); - - print_llog_hdr(h); -} -EXPORT_SYMBOL(lustre_swab_llog_hdr); - -static void print_lustre_cfg(struct lustre_cfg *lcfg) -{ - int i; - - if (!(libcfs_debug & D_OTHER)) /* don't loop on nothing */ - return; - CDEBUG(D_OTHER, "lustre_cfg: %p\n", lcfg); - CDEBUG(D_OTHER, "\tlcfg->lcfg_version: %#x\n", lcfg->lcfg_version); - - CDEBUG(D_OTHER, "\tlcfg->lcfg_command: %#x\n", lcfg->lcfg_command); - CDEBUG(D_OTHER, "\tlcfg->lcfg_num: %#x\n", lcfg->lcfg_num); - CDEBUG(D_OTHER, "\tlcfg->lcfg_flags: %#x\n", lcfg->lcfg_flags); - CDEBUG(D_OTHER, "\tlcfg->lcfg_nid: %s\n", libcfs_nid2str(lcfg->lcfg_nid)); - - CDEBUG(D_OTHER, "\tlcfg->lcfg_bufcount: %d\n", lcfg->lcfg_bufcount); - if (lcfg->lcfg_bufcount < LUSTRE_CFG_MAX_BUFCOUNT) - for (i = 0; i < lcfg->lcfg_bufcount; i++) - CDEBUG(D_OTHER, "\tlcfg->lcfg_buflens[%d]: %d\n", - i, lcfg->lcfg_buflens[i]); -} - -void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg) -{ - int i; - - __swab32s(&lcfg->lcfg_version); - - if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) { - CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n", - lcfg->lcfg_version, LUSTRE_CFG_VERSION); - return; - } - - __swab32s(&lcfg->lcfg_command); - __swab32s(&lcfg->lcfg_num); - __swab32s(&lcfg->lcfg_flags); - __swab64s(&lcfg->lcfg_nid); - __swab32s(&lcfg->lcfg_bufcount); - for (i = 0; i < lcfg->lcfg_bufcount && i < LUSTRE_CFG_MAX_BUFCOUNT; i++) - __swab32s(&lcfg->lcfg_buflens[i]); - - print_lustre_cfg(lcfg); -} - -/* used only for compatibility with old on-disk cfg_marker data */ -struct cfg_marker32 { - __u32 cm_step; - __u32 cm_flags; - __u32 cm_vers; - __u32 padding; - __u32 cm_createtime; - __u32 cm_canceltime; - char cm_tgtname[MTI_NAME_MAXLEN]; - char cm_comment[MTI_NAME_MAXLEN]; -}; - -#define MTI_NAMELEN32 (MTI_NAME_MAXLEN - \ - (sizeof(struct cfg_marker) - sizeof(struct cfg_marker32))) - -void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) -{ - struct cfg_marker32 *cm32 = (struct cfg_marker32 *)marker; - - if (swab) { - __swab32s(&marker->cm_step); - __swab32s(&marker->cm_flags); - __swab32s(&marker->cm_vers); - } - if (size == sizeof(*cm32)) { - __u32 createtime, canceltime; - /* There was a problem with the original declaration of - * cfg_marker on 32-bit systems because it used time_t as - * a wire protocol structure, and didn't verify this in - * wirecheck. We now have to convert the offsets of the - * later fields in order to work on 32- and 64-bit systems. - * - * Fortunately, the cm_comment field has no functional use - * so can be sacrificed when converting the timestamp size. - * - * Overwrite fields from the end first, so they are not - * clobbered, and use memmove() instead of memcpy() because - * the source and target buffers overlap. bug 16771 - */ - createtime = cm32->cm_createtime; - canceltime = cm32->cm_canceltime; - memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32); - marker->cm_comment[MTI_NAMELEN32 - 1] = '\0'; - memmove(marker->cm_tgtname, cm32->cm_tgtname, - sizeof(marker->cm_tgtname)); - if (swab) { - __swab32s(&createtime); - __swab32s(&canceltime); - } - marker->cm_createtime = createtime; - marker->cm_canceltime = canceltime; - CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) for target %s, converting\n", - marker->cm_tgtname); - } else if (swab) { - __swab64s(&marker->cm_createtime); - __swab64s(&marker->cm_canceltime); - } -} diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c deleted file mode 100644 index 85f09aff6e83..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c +++ /dev/null @@ -1,134 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2013, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/lprocfs_counters.c - * - * Lustre lprocfs counter routines - * - * Author: Andreas Dilger - */ - -#include -#include -#include -#include - -void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount) -{ - struct lprocfs_counter *percpu_cntr; - struct lprocfs_counter_header *header; - int smp_id; - unsigned long flags = 0; - - if (!stats) - return; - - LASSERTF(0 <= idx && idx < stats->ls_num, - "idx %d, ls_num %hu\n", idx, stats->ls_num); - - /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. - */ - smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); - if (smp_id < 0) - return; - - header = &stats->ls_cnt_header[idx]; - percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx); - percpu_cntr->lc_count++; - - if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) { - /* - * lprocfs_counter_add() can be called in interrupt context, - * as memory allocation could trigger memory shrinker call - * ldlm_pool_shrink(), which calls lprocfs_counter_add(). - * LU-1727. - * - */ - if (in_interrupt() && - (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) - percpu_cntr->lc_sum_irq += amount; - else - percpu_cntr->lc_sum += amount; - - if (header->lc_config & LPROCFS_CNTR_STDDEV) - percpu_cntr->lc_sumsquare += (__s64)amount * amount; - if (amount < percpu_cntr->lc_min) - percpu_cntr->lc_min = amount; - if (amount > percpu_cntr->lc_max) - percpu_cntr->lc_max = amount; - } - lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags); -} -EXPORT_SYMBOL(lprocfs_counter_add); - -void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount) -{ - struct lprocfs_counter *percpu_cntr; - struct lprocfs_counter_header *header; - int smp_id; - unsigned long flags = 0; - - if (!stats) - return; - - LASSERTF(0 <= idx && idx < stats->ls_num, - "idx %d, ls_num %hu\n", idx, stats->ls_num); - - /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. - */ - smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); - if (smp_id < 0) - return; - - header = &stats->ls_cnt_header[idx]; - percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx); - if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) { - /* - * Sometimes we use RCU callbacks to free memory which calls - * lprocfs_counter_sub(), and RCU callbacks may execute in - * softirq context - right now that's the only case we're in - * softirq context here, use separate counter for that. - * bz20650. - * - */ - if (in_interrupt() && - (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) - percpu_cntr->lc_sum_irq -= amount; - else - percpu_cntr->lc_sum -= amount; - } - lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags); -} -EXPORT_SYMBOL(lprocfs_counter_sub); diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c deleted file mode 100644 index bdbe6f52031a..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ /dev/null @@ -1,1698 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/lprocfs_status.c - * - * Author: Hariharan Thantry - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include - -static const char * const obd_connect_names[] = { - "read_only", - "lov_index", - "connect_from_mds", - "write_grant", - "server_lock", - "version", - "request_portal", - "acl", - "xattr", - "create_on_write", - "truncate_lock", - "initial_transno", - "inode_bit_locks", - "join_file(obsolete)", - "getattr_by_fid", - "no_oh_for_devices", - "remote_client", - "remote_client_by_force", - "max_byte_per_rpc", - "64bit_qdata", - "mds_capability", - "oss_capability", - "early_lock_cancel", - "som", - "adaptive_timeouts", - "lru_resize", - "mds_mds_connection", - "real_conn", - "change_qunit_size", - "alt_checksum_algorithm", - "fid_is_enabled", - "version_recovery", - "pools", - "grant_shrink", - "skip_orphan", - "large_ea", - "full20", - "layout_lock", - "64bithash", - "object_max_bytes", - "imp_recov", - "jobstats", - "umask", - "einprogress", - "grant_param", - "flock_owner", - "lvb_type", - "nanoseconds_times", - "lightweight_conn", - "short_io", - "pingless", - "flock_deadlock", - "disp_stripe", - "open_by_fid", - "lfsck", - "unknown", - "unlink_close", - "multi_mod_rpcs", - "dir_stripe", - "subtree", - "lock_ahead", - "bulk_mbits", - "compact_obdo", - "second_flags", - NULL -}; - -int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep) -{ - __u64 mask = 1; - int i, ret = 0; - - for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { - if (flags & mask) - ret += snprintf(page + ret, count - ret, "%s%s", - ret ? sep : "", obd_connect_names[i]); - } - if (flags & ~(mask - 1)) - ret += snprintf(page + ret, count - ret, - "%sunknown flags %#llx", - ret ? sep : "", flags & ~(mask - 1)); - return ret; -} -EXPORT_SYMBOL(obd_connect_flags2str); - -static void obd_connect_data_seqprint(struct seq_file *m, - struct obd_connect_data *ocd) -{ - u64 flags; - - LASSERT(ocd); - flags = ocd->ocd_connect_flags; - - seq_printf(m, " connect_data:\n" - " flags: %llx\n" - " instance: %u\n", - ocd->ocd_connect_flags, - ocd->ocd_instance); - if (flags & OBD_CONNECT_VERSION) - seq_printf(m, " target_version: %u.%u.%u.%u\n", - OBD_OCD_VERSION_MAJOR(ocd->ocd_version), - OBD_OCD_VERSION_MINOR(ocd->ocd_version), - OBD_OCD_VERSION_PATCH(ocd->ocd_version), - OBD_OCD_VERSION_FIX(ocd->ocd_version)); - if (flags & OBD_CONNECT_MDS) - seq_printf(m, " mdt_index: %d\n", ocd->ocd_group); - if (flags & OBD_CONNECT_GRANT) - seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant); - if (flags & OBD_CONNECT_INDEX) - seq_printf(m, " target_index: %u\n", ocd->ocd_index); - if (flags & OBD_CONNECT_BRW_SIZE) - seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size); - if (flags & OBD_CONNECT_IBITS) - seq_printf(m, " ibits_known: %llx\n", - ocd->ocd_ibits_known); - if (flags & OBD_CONNECT_GRANT_PARAM) - seq_printf(m, " grant_block_size: %d\n" - " grant_inode_size: %d\n" - " grant_extent_overhead: %d\n", - ocd->ocd_blocksize, - ocd->ocd_inodespace, - ocd->ocd_grant_extent); - if (flags & OBD_CONNECT_TRANSNO) - seq_printf(m, " first_transno: %llx\n", - ocd->ocd_transno); - if (flags & OBD_CONNECT_CKSUM) - seq_printf(m, " cksum_types: %#x\n", - ocd->ocd_cksum_types); - if (flags & OBD_CONNECT_MAX_EASIZE) - seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize); - if (flags & OBD_CONNECT_MAXBYTES) - seq_printf(m, " max_object_bytes: %llx\n", - ocd->ocd_maxbytes); - if (flags & OBD_CONNECT_MULTIMODRPCS) - seq_printf(m, " max_mod_rpcs: %hu\n", - ocd->ocd_maxmodrpcs); -} - -int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, - int mult) -{ - long decimal_val, frac_val; - int prtn; - - if (count < 10) - return -EINVAL; - - decimal_val = val / mult; - prtn = snprintf(buffer, count, "%ld", decimal_val); - frac_val = val % mult; - - if (prtn < (count - 4) && frac_val > 0) { - long temp_frac; - int i, temp_mult = 1, frac_bits = 0; - - temp_frac = frac_val * 10; - buffer[prtn++] = '.'; - while (frac_bits < 2 && (temp_frac / mult) < 1) { - /* only reserved 2 bits fraction */ - buffer[prtn++] = '0'; - temp_frac *= 10; - frac_bits++; - } - /* - * Need to think these cases : - * 1. #echo x.00 > /sys/xxx output result : x - * 2. #echo x.0x > /sys/xxx output result : x.0x - * 3. #echo x.x0 > /sys/xxx output result : x.x - * 4. #echo x.xx > /sys/xxx output result : x.xx - * Only reserved 2 bits fraction. - */ - for (i = 0; i < (5 - prtn); i++) - temp_mult *= 10; - - frac_bits = min((int)count - prtn, 3 - frac_bits); - prtn += snprintf(buffer + prtn, frac_bits, "%ld", - frac_val * temp_mult / mult); - - prtn--; - while (buffer[prtn] < '1' || buffer[prtn] > '9') { - prtn--; - if (buffer[prtn] == '.') { - prtn--; - break; - } - } - prtn++; - } - buffer[prtn++] = '\n'; - return prtn; -} -EXPORT_SYMBOL(lprocfs_read_frac_helper); - -int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count, - int *val, int mult) -{ - char kernbuf[20], *end, *pbuf; - - if (count > (sizeof(kernbuf) - 1)) - return -EINVAL; - - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - - kernbuf[count] = '\0'; - pbuf = kernbuf; - if (*pbuf == '-') { - mult = -mult; - pbuf++; - } - - *val = (int)simple_strtoul(pbuf, &end, 10) * mult; - if (pbuf == end) - return -EINVAL; - - if (end && *end == '.') { - int temp_val, pow = 1; - int i; - - pbuf = end + 1; - if (strlen(pbuf) > 5) - pbuf[5] = '\0'; /*only allow 5bits fractional*/ - - temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult; - - if (pbuf < end) { - for (i = 0; i < (end - pbuf); i++) - pow *= 10; - - *val += temp_val / pow; - } - } - return 0; -} -EXPORT_SYMBOL(lprocfs_write_frac_helper); - -static int lprocfs_no_percpu_stats; -module_param(lprocfs_no_percpu_stats, int, 0644); -MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats"); - -#define MAX_STRING_SIZE 128 - -int lprocfs_single_release(struct inode *inode, struct file *file) -{ - return single_release(inode, file); -} -EXPORT_SYMBOL(lprocfs_single_release); - -int lprocfs_seq_release(struct inode *inode, struct file *file) -{ - return seq_release(inode, file); -} -EXPORT_SYMBOL(lprocfs_seq_release); - -/* lprocfs API calls */ - -static const struct file_operations lprocfs_generic_fops = { }; - -void ldebugfs_add_vars(struct dentry *parent, struct lprocfs_vars *list, - void *data) -{ - if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list)) - return; - - while (list->name) { - umode_t mode = 0; - - if (list->proc_mode != 0000) { - mode = list->proc_mode; - } else if (list->fops) { - if (list->fops->read) - mode = 0444; - if (list->fops->write) - mode |= 0200; - } - debugfs_create_file(list->name, mode, parent, - list->data ?: data, - list->fops ?: &lprocfs_generic_fops); - list++; - } - return; -} -EXPORT_SYMBOL_GPL(ldebugfs_add_vars); - -/* Generic callbacks */ -static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%s\n", obd->obd_uuid.uuid); -} -LUSTRE_RO_ATTR(uuid); - -static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct obd_statfs osfs; - int rc = obd_statfs(NULL, obd->obd_self_export, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) - return sprintf(buf, "%u\n", osfs.os_bsize); - - return rc; -} -LUSTRE_RO_ATTR(blocksize); - -static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct obd_statfs osfs; - int rc = obd_statfs(NULL, obd->obd_self_export, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) { - __u32 blk_size = osfs.os_bsize >> 10; - __u64 result = osfs.os_blocks; - - while (blk_size >>= 1) - result <<= 1; - - return sprintf(buf, "%llu\n", result); - } - - return rc; -} -LUSTRE_RO_ATTR(kbytestotal); - -static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct obd_statfs osfs; - int rc = obd_statfs(NULL, obd->obd_self_export, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) { - __u32 blk_size = osfs.os_bsize >> 10; - __u64 result = osfs.os_bfree; - - while (blk_size >>= 1) - result <<= 1; - - return sprintf(buf, "%llu\n", result); - } - - return rc; -} -LUSTRE_RO_ATTR(kbytesfree); - -static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct obd_statfs osfs; - int rc = obd_statfs(NULL, obd->obd_self_export, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) { - __u32 blk_size = osfs.os_bsize >> 10; - __u64 result = osfs.os_bavail; - - while (blk_size >>= 1) - result <<= 1; - - return sprintf(buf, "%llu\n", result); - } - - return rc; -} -LUSTRE_RO_ATTR(kbytesavail); - -static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct obd_statfs osfs; - int rc = obd_statfs(NULL, obd->obd_self_export, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) - return sprintf(buf, "%llu\n", osfs.os_files); - - return rc; -} -LUSTRE_RO_ATTR(filestotal); - -static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct obd_statfs osfs; - int rc = obd_statfs(NULL, obd->obd_self_export, &osfs, - get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ, - OBD_STATFS_NODELAY); - if (!rc) - return sprintf(buf, "%llu\n", osfs.os_ffree); - - return rc; -} -LUSTRE_RO_ATTR(filesfree); - -int lprocfs_rd_server_uuid(struct seq_file *m, void *data) -{ - struct obd_device *obd = data; - struct obd_import *imp; - char *imp_state_name = NULL; - int rc; - - LASSERT(obd); - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - imp = obd->u.cli.cl_import; - imp_state_name = ptlrpc_import_state_name(imp->imp_state); - seq_printf(m, "%s\t%s%s\n", - obd2cli_tgt(obd), imp_state_name, - imp->imp_deactive ? "\tDEACTIVATED" : ""); - - up_read(&obd->u.cli.cl_sem); - - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_server_uuid); - -int lprocfs_rd_conn_uuid(struct seq_file *m, void *data) -{ - struct obd_device *obd = data; - struct ptlrpc_connection *conn; - int rc; - - LASSERT(obd); - - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - conn = obd->u.cli.cl_import->imp_connection; - if (conn && obd->u.cli.cl_import) - seq_printf(m, "%s\n", conn->c_remote_uuid.uuid); - else - seq_puts(m, "\n"); - - up_read(&obd->u.cli.cl_sem); - - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_conn_uuid); - -/** - * Lock statistics structure for access, possibly only on this CPU. - * - * The statistics struct may be allocated with per-CPU structures for - * efficient concurrent update (usually only on server-wide stats), or - * as a single global struct (e.g. for per-client or per-job statistics), - * so the required locking depends on the type of structure allocated. - * - * For per-CPU statistics, pin the thread to the current cpuid so that - * will only access the statistics for that CPU. If the stats structure - * for the current CPU has not been allocated (or previously freed), - * allocate it now. The per-CPU statistics do not need locking since - * the thread is pinned to the CPU during update. - * - * For global statistics, lock the stats structure to prevent concurrent update. - * - * \param[in] stats statistics structure to lock - * \param[in] opc type of operation: - * LPROCFS_GET_SMP_ID: "lock" and return current CPU index - * for incrementing statistics for that CPU - * LPROCFS_GET_NUM_CPU: "lock" and return number of used - * CPU indices to iterate over all indices - * \param[out] flags CPU interrupt saved state for IRQ-safe locking - * - * \retval cpuid of current thread or number of allocated structs - * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats) - */ -int lprocfs_stats_lock(struct lprocfs_stats *stats, - enum lprocfs_stats_lock_ops opc, - unsigned long *flags) -{ - if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) - spin_lock_irqsave(&stats->ls_lock, *flags); - else - spin_lock(&stats->ls_lock); - return opc == LPROCFS_GET_NUM_CPU ? 1 : 0; - } - - switch (opc) { - case LPROCFS_GET_SMP_ID: { - unsigned int cpuid = get_cpu(); - - if (unlikely(!stats->ls_percpu[cpuid])) { - int rc = lprocfs_stats_alloc_one(stats, cpuid); - - if (rc < 0) { - put_cpu(); - return rc; - } - } - return cpuid; - } - case LPROCFS_GET_NUM_CPU: - return stats->ls_biggest_alloc_num; - default: - LBUG(); - } -} - -/** - * Unlock statistics structure after access. - * - * Unlock the lock acquired via lprocfs_stats_lock() for global statistics, - * or unpin this thread from the current cpuid for per-CPU statistics. - * - * This function must be called using the same arguments as used when calling - * lprocfs_stats_lock() so that the correct operation can be performed. - * - * \param[in] stats statistics structure to unlock - * \param[in] opc type of operation (current cpuid or number of structs) - * \param[in] flags CPU interrupt saved state for IRQ-safe locking - */ -void lprocfs_stats_unlock(struct lprocfs_stats *stats, - enum lprocfs_stats_lock_ops opc, - unsigned long *flags) -{ - if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) - spin_unlock_irqrestore(&stats->ls_lock, *flags); - else - spin_unlock(&stats->ls_lock); - } else if (opc == LPROCFS_GET_SMP_ID) { - put_cpu(); - } -} - -/** add up per-cpu counters */ -void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, - struct lprocfs_counter *cnt) -{ - unsigned int num_entry; - struct lprocfs_counter *percpu_cntr; - int i; - unsigned long flags = 0; - - memset(cnt, 0, sizeof(*cnt)); - - if (!stats) { - /* set count to 1 to avoid divide-by-zero errs in callers */ - cnt->lc_count = 1; - return; - } - - cnt->lc_min = LC_MIN_INIT; - - num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); - - for (i = 0; i < num_entry; i++) { - if (!stats->ls_percpu[i]) - continue; - percpu_cntr = lprocfs_stats_counter_get(stats, i, idx); - - cnt->lc_count += percpu_cntr->lc_count; - cnt->lc_sum += percpu_cntr->lc_sum; - if (percpu_cntr->lc_min < cnt->lc_min) - cnt->lc_min = percpu_cntr->lc_min; - if (percpu_cntr->lc_max > cnt->lc_max) - cnt->lc_max = percpu_cntr->lc_max; - cnt->lc_sumsquare += percpu_cntr->lc_sumsquare; - } - - lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags); -} - -/** - * Append a space separated list of current set flags to str. - */ -#define flag2str(flag, first) \ - do { \ - if (imp->imp_##flag) \ - seq_printf(m, "%s" #flag, first ? "" : ", "); \ - } while (0) -static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m) -{ - bool first = true; - - if (imp->imp_obd->obd_no_recov) { - seq_puts(m, "no_recov"); - first = false; - } - - flag2str(invalid, first); - first = false; - flag2str(deactive, first); - flag2str(replayable, first); - flag2str(pingable, first); - return 0; -} - -#undef flags2str - -static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep) -{ - __u64 mask = 1; - int i; - bool first = true; - - for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { - if (flags & mask) { - seq_printf(m, "%s%s", - first ? sep : "", obd_connect_names[i]); - first = false; - } - } - if (flags & ~(mask - 1)) - seq_printf(m, "%sunknown flags %#llx", - first ? sep : "", flags & ~(mask - 1)); -} - -int lprocfs_rd_import(struct seq_file *m, void *data) -{ - char nidstr[LNET_NIDSTR_SIZE]; - struct lprocfs_counter ret; - struct lprocfs_counter_header *header; - struct obd_device *obd = data; - struct obd_import *imp; - struct obd_import_conn *conn; - struct obd_connect_data *ocd; - int j; - int k; - int rw = 0; - int rc; - - LASSERT(obd); - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - imp = obd->u.cli.cl_import; - ocd = &imp->imp_connect_data; - - seq_printf(m, "import:\n" - " name: %s\n" - " target: %s\n" - " state: %s\n" - " instance: %u\n" - " connect_flags: [ ", - obd->obd_name, - obd2cli_tgt(obd), - ptlrpc_import_state_name(imp->imp_state), - imp->imp_connect_data.ocd_instance); - obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, - ", "); - seq_puts(m, " ]\n"); - obd_connect_data_seqprint(m, ocd); - seq_puts(m, " import_flags: [ "); - obd_import_flags2str(imp, m); - - seq_puts(m, - " ]\n" - " connection:\n" - " failover_nids: [ "); - spin_lock(&imp->imp_lock); - j = 0; - list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { - libcfs_nid2str_r(conn->oic_conn->c_peer.nid, - nidstr, sizeof(nidstr)); - seq_printf(m, "%s%s", j ? ", " : "", nidstr); - j++; - } - if (imp->imp_connection) - libcfs_nid2str_r(imp->imp_connection->c_peer.nid, - nidstr, sizeof(nidstr)); - else - strncpy(nidstr, "", sizeof(nidstr)); - seq_printf(m, - " ]\n" - " current_connection: %s\n" - " connection_attempts: %u\n" - " generation: %u\n" - " in-progress_invalidations: %u\n", - nidstr, - imp->imp_conn_cnt, - imp->imp_generation, - atomic_read(&imp->imp_inval_count)); - spin_unlock(&imp->imp_lock); - - if (!obd->obd_svc_stats) - goto out_climp; - - header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR]; - lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret); - if (ret.lc_count != 0) { - /* first argument to do_div MUST be __u64 */ - __u64 sum = ret.lc_sum; - - do_div(sum, ret.lc_count); - ret.lc_sum = sum; - } else { - ret.lc_sum = 0; - } - seq_printf(m, - " rpcs:\n" - " inflight: %u\n" - " unregistering: %u\n" - " timeouts: %u\n" - " avg_waittime: %llu %s\n", - atomic_read(&imp->imp_inflight), - atomic_read(&imp->imp_unregistering), - atomic_read(&imp->imp_timeouts), - ret.lc_sum, header->lc_units); - - k = 0; - for (j = 0; j < IMP_AT_MAX_PORTALS; j++) { - if (imp->imp_at.iat_portal[j] == 0) - break; - k = max_t(unsigned int, k, - at_get(&imp->imp_at.iat_service_estimate[j])); - } - seq_printf(m, - " service_estimates:\n" - " services: %u sec\n" - " network: %u sec\n", - k, - at_get(&imp->imp_at.iat_net_latency)); - - seq_printf(m, - " transactions:\n" - " last_replay: %llu\n" - " peer_committed: %llu\n" - " last_checked: %llu\n", - imp->imp_last_replay_transno, - imp->imp_peer_committed_transno, - imp->imp_last_transno_checked); - - /* avg data rates */ - for (rw = 0; rw <= 1; rw++) { - lprocfs_stats_collect(obd->obd_svc_stats, - PTLRPC_LAST_CNTR + BRW_READ_BYTES + rw, - &ret); - if (ret.lc_sum > 0 && ret.lc_count > 0) { - /* first argument to do_div MUST be __u64 */ - __u64 sum = ret.lc_sum; - - do_div(sum, ret.lc_count); - ret.lc_sum = sum; - seq_printf(m, - " %s_data_averages:\n" - " bytes_per_rpc: %llu\n", - rw ? "write" : "read", - ret.lc_sum); - } - k = (int)ret.lc_sum; - j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES; - header = &obd->obd_svc_stats->ls_cnt_header[j]; - lprocfs_stats_collect(obd->obd_svc_stats, j, &ret); - if (ret.lc_sum > 0 && ret.lc_count != 0) { - /* first argument to do_div MUST be __u64 */ - __u64 sum = ret.lc_sum; - - do_div(sum, ret.lc_count); - ret.lc_sum = sum; - seq_printf(m, - " %s_per_rpc: %llu\n", - header->lc_units, ret.lc_sum); - j = (int)ret.lc_sum; - if (j > 0) - seq_printf(m, - " MB_per_sec: %u.%.02u\n", - k / j, (100 * k / j) % 100); - } - } - -out_climp: - up_read(&obd->u.cli.cl_sem); - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_import); - -int lprocfs_rd_state(struct seq_file *m, void *data) -{ - struct obd_device *obd = data; - struct obd_import *imp; - int j, k, rc; - - LASSERT(obd); - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - imp = obd->u.cli.cl_import; - - seq_printf(m, "current_state: %s\n", - ptlrpc_import_state_name(imp->imp_state)); - seq_puts(m, "state_history:\n"); - k = imp->imp_state_hist_idx; - for (j = 0; j < IMP_STATE_HIST_LEN; j++) { - struct import_state_hist *ish = - &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN]; - if (ish->ish_state == 0) - continue; - seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time, - ptlrpc_import_state_name(ish->ish_state)); - } - - up_read(&obd->u.cli.cl_sem); - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_state); - -int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at) -{ - int i; - - for (i = 0; i < AT_BINS; i++) - seq_printf(m, "%3u ", at->at_hist[i]); - seq_puts(m, "\n"); - return 0; -} -EXPORT_SYMBOL(lprocfs_at_hist_helper); - -/* See also ptlrpc_lprocfs_rd_timeouts */ -int lprocfs_rd_timeouts(struct seq_file *m, void *data) -{ - struct obd_device *obd = data; - struct obd_import *imp; - unsigned int cur, worst; - time64_t now, worstt; - struct dhms ts; - int i, rc; - - LASSERT(obd); - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - imp = obd->u.cli.cl_import; - - now = ktime_get_real_seconds(); - - /* Some network health info for kicks */ - s2dhms(&ts, now - imp->imp_last_reply_time); - seq_printf(m, "%-10s : %lld, " DHMS_FMT " ago\n", - "last reply", (s64)imp->imp_last_reply_time, DHMS_VARS(&ts)); - - cur = at_get(&imp->imp_at.iat_net_latency); - worst = imp->imp_at.iat_net_latency.at_worst_ever; - worstt = imp->imp_at.iat_net_latency.at_worst_time; - s2dhms(&ts, now - worstt); - seq_printf(m, "%-10s : cur %3u worst %3u (at %lld, " DHMS_FMT " ago) ", - "network", cur, worst, (s64)worstt, DHMS_VARS(&ts)); - lprocfs_at_hist_helper(m, &imp->imp_at.iat_net_latency); - - for (i = 0; i < IMP_AT_MAX_PORTALS; i++) { - if (imp->imp_at.iat_portal[i] == 0) - break; - cur = at_get(&imp->imp_at.iat_service_estimate[i]); - worst = imp->imp_at.iat_service_estimate[i].at_worst_ever; - worstt = imp->imp_at.iat_service_estimate[i].at_worst_time; - s2dhms(&ts, now - worstt); - seq_printf(m, "portal %-2d : cur %3u worst %3u (at %lld, " - DHMS_FMT " ago) ", imp->imp_at.iat_portal[i], - cur, worst, (s64)worstt, DHMS_VARS(&ts)); - lprocfs_at_hist_helper(m, &imp->imp_at.iat_service_estimate[i]); - } - - up_read(&obd->u.cli.cl_sem); - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_timeouts); - -int lprocfs_rd_connect_flags(struct seq_file *m, void *data) -{ - struct obd_device *obd = data; - __u64 flags; - int rc; - - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - flags = obd->u.cli.cl_import->imp_connect_data.ocd_connect_flags; - seq_printf(m, "flags=%#llx\n", flags); - obd_connect_seq_flags2str(m, flags, "\n"); - seq_puts(m, "\n"); - up_read(&obd->u.cli.cl_sem); - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_connect_flags); - -static struct attribute *obd_def_attrs[] = { - &lustre_attr_blocksize.attr, - &lustre_attr_kbytestotal.attr, - &lustre_attr_kbytesfree.attr, - &lustre_attr_kbytesavail.attr, - &lustre_attr_filestotal.attr, - &lustre_attr_filesfree.attr, - &lustre_attr_uuid.attr, - NULL, -}; - -static void obd_sysfs_release(struct kobject *kobj) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - - complete(&obd->obd_kobj_unregister); -} - -static struct kobj_type obd_ktype = { - .default_attrs = obd_def_attrs, - .sysfs_ops = &lustre_sysfs_ops, - .release = obd_sysfs_release, -}; - -int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, - const struct attribute_group *attrs) -{ - int rc = 0; - - init_completion(&obd->obd_kobj_unregister); - rc = kobject_init_and_add(&obd->obd_kobj, &obd_ktype, - obd->obd_type->typ_kobj, - "%s", obd->obd_name); - if (rc) - return rc; - - if (attrs) { - rc = sysfs_create_group(&obd->obd_kobj, attrs); - if (rc) { - kobject_put(&obd->obd_kobj); - return rc; - } - } - - obd->obd_debugfs_entry = debugfs_create_dir(obd->obd_name, - obd->obd_type->typ_debugfs_entry); - ldebugfs_add_vars(obd->obd_debugfs_entry, list, obd); - - return rc; -} -EXPORT_SYMBOL_GPL(lprocfs_obd_setup); - -int lprocfs_obd_cleanup(struct obd_device *obd) -{ - if (!obd) - return -EINVAL; - - debugfs_remove_recursive(obd->obd_debugfs_entry); - - kobject_put(&obd->obd_kobj); - wait_for_completion(&obd->obd_kobj_unregister); - - return 0; -} -EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup); - -int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) -{ - struct lprocfs_counter *cntr; - unsigned int percpusize; - int rc = -ENOMEM; - unsigned long flags = 0; - int i; - - LASSERT(!stats->ls_percpu[cpuid]); - LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0); - - percpusize = lprocfs_stats_counter_size(stats); - stats->ls_percpu[cpuid] = kzalloc(percpusize, GFP_ATOMIC); - if (stats->ls_percpu[cpuid]) { - rc = 0; - if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) - spin_lock_irqsave(&stats->ls_lock, flags); - else - spin_lock(&stats->ls_lock); - if (stats->ls_biggest_alloc_num <= cpuid) - stats->ls_biggest_alloc_num = cpuid + 1; - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) - spin_unlock_irqrestore(&stats->ls_lock, flags); - else - spin_unlock(&stats->ls_lock); - } - /* initialize the ls_percpu[cpuid] non-zero counter */ - for (i = 0; i < stats->ls_num; ++i) { - cntr = lprocfs_stats_counter_get(stats, cpuid, i); - cntr->lc_min = LC_MIN_INIT; - } - } - return rc; -} - -struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, - enum lprocfs_stats_flags flags) -{ - struct lprocfs_stats *stats; - unsigned int num_entry; - unsigned int percpusize = 0; - int i; - - if (num == 0) - return NULL; - - if (lprocfs_no_percpu_stats != 0) - flags |= LPROCFS_STATS_FLAG_NOPERCPU; - - if (flags & LPROCFS_STATS_FLAG_NOPERCPU) - num_entry = 1; - else - num_entry = num_possible_cpus(); - - /* alloc percpu pointers for all possible cpu slots */ - stats = kvzalloc(offsetof(typeof(*stats), ls_percpu[num_entry]), - GFP_KERNEL); - if (!stats) - return NULL; - - stats->ls_num = num; - stats->ls_flags = flags; - spin_lock_init(&stats->ls_lock); - - /* alloc num of counter headers */ - stats->ls_cnt_header = kvmalloc_array(stats->ls_num, - sizeof(struct lprocfs_counter_header), - GFP_KERNEL | __GFP_ZERO); - if (!stats->ls_cnt_header) - goto fail; - - if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) { - /* contains only one set counters */ - percpusize = lprocfs_stats_counter_size(stats); - stats->ls_percpu[0] = kzalloc(percpusize, GFP_ATOMIC); - if (!stats->ls_percpu[0]) - goto fail; - stats->ls_biggest_alloc_num = 1; - } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) { - /* alloc all percpu data */ - for (i = 0; i < num_entry; ++i) - if (lprocfs_stats_alloc_one(stats, i) < 0) - goto fail; - } - - return stats; - -fail: - lprocfs_free_stats(&stats); - return NULL; -} -EXPORT_SYMBOL(lprocfs_alloc_stats); - -void lprocfs_free_stats(struct lprocfs_stats **statsh) -{ - struct lprocfs_stats *stats = *statsh; - unsigned int num_entry; - unsigned int percpusize; - unsigned int i; - - if (!stats || stats->ls_num == 0) - return; - *statsh = NULL; - - if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) - num_entry = 1; - else - num_entry = num_possible_cpus(); - - percpusize = lprocfs_stats_counter_size(stats); - for (i = 0; i < num_entry; i++) - kfree(stats->ls_percpu[i]); - kvfree(stats->ls_cnt_header); - kvfree(stats); -} -EXPORT_SYMBOL(lprocfs_free_stats); - -__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx, - enum lprocfs_fields_flags field) -{ - unsigned int i; - unsigned int num_cpu; - unsigned long flags = 0; - __u64 ret = 0; - - LASSERT(stats); - - num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); - for (i = 0; i < num_cpu; i++) { - if (!stats->ls_percpu[i]) - continue; - ret += lprocfs_read_helper( - lprocfs_stats_counter_get(stats, i, idx), - &stats->ls_cnt_header[idx], stats->ls_flags, - field); - } - lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags); - return ret; -} -EXPORT_SYMBOL(lprocfs_stats_collector); - -void lprocfs_clear_stats(struct lprocfs_stats *stats) -{ - struct lprocfs_counter *percpu_cntr; - int i; - int j; - unsigned int num_entry; - unsigned long flags = 0; - - num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); - - for (i = 0; i < num_entry; i++) { - if (!stats->ls_percpu[i]) - continue; - for (j = 0; j < stats->ls_num; j++) { - percpu_cntr = lprocfs_stats_counter_get(stats, i, j); - percpu_cntr->lc_count = 0; - percpu_cntr->lc_min = LC_MIN_INIT; - percpu_cntr->lc_max = 0; - percpu_cntr->lc_sumsquare = 0; - percpu_cntr->lc_sum = 0; - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) - percpu_cntr->lc_sum_irq = 0; - } - } - - lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags); -} -EXPORT_SYMBOL(lprocfs_clear_stats); - -static ssize_t lprocfs_stats_seq_write(struct file *file, - const char __user *buf, - size_t len, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct lprocfs_stats *stats = seq->private; - - lprocfs_clear_stats(stats); - - return len; -} - -static void *lprocfs_stats_seq_start(struct seq_file *p, loff_t *pos) -{ - struct lprocfs_stats *stats = p->private; - - return (*pos < stats->ls_num) ? pos : NULL; -} - -static void lprocfs_stats_seq_stop(struct seq_file *p, void *v) -{ -} - -static void *lprocfs_stats_seq_next(struct seq_file *p, void *v, loff_t *pos) -{ - (*pos)++; - return lprocfs_stats_seq_start(p, pos); -} - -/* seq file export of one lprocfs counter */ -static int lprocfs_stats_seq_show(struct seq_file *p, void *v) -{ - struct lprocfs_stats *stats = p->private; - struct lprocfs_counter_header *hdr; - struct lprocfs_counter ctr; - int idx = *(loff_t *)v; - - if (idx == 0) { - struct timespec64 now; - - ktime_get_real_ts64(&now); - seq_printf(p, "%-25s %llu.%9lu secs.usecs\n", - "snapshot_time", - (s64)now.tv_sec, (unsigned long)now.tv_nsec); - } - - hdr = &stats->ls_cnt_header[idx]; - lprocfs_stats_collect(stats, idx, &ctr); - - if (ctr.lc_count != 0) { - seq_printf(p, "%-25s %lld samples [%s]", - hdr->lc_name, ctr.lc_count, hdr->lc_units); - - if ((hdr->lc_config & LPROCFS_CNTR_AVGMINMAX) && - (ctr.lc_count > 0)) { - seq_printf(p, " %lld %lld %lld", - ctr.lc_min, ctr.lc_max, ctr.lc_sum); - if (hdr->lc_config & LPROCFS_CNTR_STDDEV) - seq_printf(p, " %lld", ctr.lc_sumsquare); - } - seq_putc(p, '\n'); - } - - return 0; -} - -static const struct seq_operations lprocfs_stats_seq_sops = { - .start = lprocfs_stats_seq_start, - .stop = lprocfs_stats_seq_stop, - .next = lprocfs_stats_seq_next, - .show = lprocfs_stats_seq_show, -}; - -static int lprocfs_stats_seq_open(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - int rc; - - rc = seq_open(file, &lprocfs_stats_seq_sops); - if (rc) - return rc; - - seq = file->private_data; - seq->private = inode->i_private; - - return 0; -} - -const struct file_operations lprocfs_stats_seq_fops = { - .owner = THIS_MODULE, - .open = lprocfs_stats_seq_open, - .read = seq_read, - .write = lprocfs_stats_seq_write, - .llseek = seq_lseek, - .release = lprocfs_seq_release, -}; -EXPORT_SYMBOL_GPL(lprocfs_stats_seq_fops); - -void lprocfs_counter_init(struct lprocfs_stats *stats, int index, - unsigned int conf, const char *name, - const char *units) -{ - struct lprocfs_counter_header *header; - struct lprocfs_counter *percpu_cntr; - unsigned long flags = 0; - unsigned int i; - unsigned int num_cpu; - - header = &stats->ls_cnt_header[index]; - LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n", - index, name, units); - - header->lc_config = conf; - header->lc_name = name; - header->lc_units = units; - - num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); - for (i = 0; i < num_cpu; ++i) { - if (!stats->ls_percpu[i]) - continue; - percpu_cntr = lprocfs_stats_counter_get(stats, i, index); - percpu_cntr->lc_count = 0; - percpu_cntr->lc_min = LC_MIN_INIT; - percpu_cntr->lc_max = 0; - percpu_cntr->lc_sumsquare = 0; - percpu_cntr->lc_sum = 0; - if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) - percpu_cntr->lc_sum_irq = 0; - } - lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags); -} -EXPORT_SYMBOL(lprocfs_counter_init); - -int lprocfs_exp_cleanup(struct obd_export *exp) -{ - return 0; -} -EXPORT_SYMBOL(lprocfs_exp_cleanup); - -__s64 lprocfs_read_helper(struct lprocfs_counter *lc, - struct lprocfs_counter_header *header, - enum lprocfs_stats_flags flags, - enum lprocfs_fields_flags field) -{ - __s64 ret = 0; - - if (!lc || !header) - return 0; - - switch (field) { - case LPROCFS_FIELDS_FLAGS_CONFIG: - ret = header->lc_config; - break; - case LPROCFS_FIELDS_FLAGS_SUM: - ret = lc->lc_sum; - if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) - ret += lc->lc_sum_irq; - break; - case LPROCFS_FIELDS_FLAGS_MIN: - ret = lc->lc_min; - break; - case LPROCFS_FIELDS_FLAGS_MAX: - ret = lc->lc_max; - break; - case LPROCFS_FIELDS_FLAGS_AVG: - ret = (lc->lc_max - lc->lc_min) / 2; - break; - case LPROCFS_FIELDS_FLAGS_SUMSQUARE: - ret = lc->lc_sumsquare; - break; - case LPROCFS_FIELDS_FLAGS_COUNT: - ret = lc->lc_count; - break; - default: - break; - } - - return 0; -} -EXPORT_SYMBOL(lprocfs_read_helper); - -int lprocfs_write_helper(const char __user *buffer, unsigned long count, - int *val) -{ - return lprocfs_write_frac_helper(buffer, count, val, 1); -} -EXPORT_SYMBOL(lprocfs_write_helper); - -int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, - __u64 *val) -{ - return lprocfs_write_frac_u64_helper(buffer, count, val, 1); -} -EXPORT_SYMBOL(lprocfs_write_u64_helper); - -int lprocfs_write_frac_u64_helper(const char __user *buffer, - unsigned long count, __u64 *val, int mult) -{ - char kernbuf[22], *end, *pbuf; - __u64 whole, frac = 0, units; - unsigned int frac_d = 1; - int sign = 1; - - if (count > (sizeof(kernbuf) - 1)) - return -EINVAL; - - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - - kernbuf[count] = '\0'; - pbuf = kernbuf; - if (*pbuf == '-') { - sign = -1; - pbuf++; - } - - whole = simple_strtoull(pbuf, &end, 10); - if (pbuf == end) - return -EINVAL; - - if (*end == '.') { - int i; - - pbuf = end + 1; - - /* need to limit frac_d to a __u32 */ - if (strlen(pbuf) > 10) - pbuf[10] = '\0'; - - frac = simple_strtoull(pbuf, &end, 10); - /* count decimal places */ - for (i = 0; i < (end - pbuf); i++) - frac_d *= 10; - } - - units = 1; - if (end) { - switch (tolower(*end)) { - case 'p': - units <<= 10; - /* fall through */ - case 't': - units <<= 10; - /* fall through */ - case 'g': - units <<= 10; - /* fall through */ - case 'm': - units <<= 10; - /* fall through */ - case 'k': - units <<= 10; - } - } - /* Specified units override the multiplier */ - if (units > 1) - mult = units; - - frac *= mult; - do_div(frac, frac_d); - *val = sign * (whole * mult + frac); - return 0; -} -EXPORT_SYMBOL(lprocfs_write_frac_u64_helper); - -static char *lprocfs_strnstr(const char *s1, const char *s2, size_t len) -{ - size_t l2; - - l2 = strlen(s2); - if (!l2) - return (char *)s1; - while (len >= l2) { - len--; - if (!memcmp(s1, s2, l2)) - return (char *)s1; - s1++; - } - return NULL; -} - -/** - * Find the string \a name in the input \a buffer, and return a pointer to the - * value immediately following \a name, reducing \a count appropriately. - * If \a name is not found the original \a buffer is returned. - */ -char *lprocfs_find_named_value(const char *buffer, const char *name, - size_t *count) -{ - char *val; - size_t buflen = *count; - - /* there is no strnstr() in rhel5 and ubuntu kernels */ - val = lprocfs_strnstr(buffer, name, buflen); - if (!val) - return (char *)buffer; - - val += strlen(name); /* skip prefix */ - while (val < buffer + buflen && isspace(*val)) /* skip separator */ - val++; - - *count = 0; - while (val < buffer + buflen && isalnum(*val)) { - ++*count; - ++val; - } - - return val - *count; -} -EXPORT_SYMBOL(lprocfs_find_named_value); - -void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value) -{ - if (value >= OBD_HIST_MAX) - value = OBD_HIST_MAX - 1; - - spin_lock(&oh->oh_lock); - oh->oh_buckets[value]++; - spin_unlock(&oh->oh_lock); -} -EXPORT_SYMBOL(lprocfs_oh_tally); - -void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value) -{ - unsigned int val = 0; - - if (likely(value != 0)) - val = min(fls(value - 1), OBD_HIST_MAX); - - lprocfs_oh_tally(oh, val); -} -EXPORT_SYMBOL(lprocfs_oh_tally_log2); - -unsigned long lprocfs_oh_sum(struct obd_histogram *oh) -{ - unsigned long ret = 0; - int i; - - for (i = 0; i < OBD_HIST_MAX; i++) - ret += oh->oh_buckets[i]; - return ret; -} -EXPORT_SYMBOL(lprocfs_oh_sum); - -void lprocfs_oh_clear(struct obd_histogram *oh) -{ - spin_lock(&oh->oh_lock); - memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets)); - spin_unlock(&oh->oh_lock); -} -EXPORT_SYMBOL(lprocfs_oh_clear); - -int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count, - struct root_squash_info *squash, char *name) -{ - char kernbuf[64], *tmp, *errmsg; - unsigned long uid, gid; - int rc; - - if (count >= sizeof(kernbuf)) { - errmsg = "string too long"; - rc = -EINVAL; - goto failed_noprint; - } - if (copy_from_user(kernbuf, buffer, count)) { - errmsg = "bad address"; - rc = -EFAULT; - goto failed_noprint; - } - kernbuf[count] = '\0'; - - /* look for uid gid separator */ - tmp = strchr(kernbuf, ':'); - if (!tmp) { - errmsg = "needs uid:gid format"; - rc = -EINVAL; - goto failed; - } - *tmp = '\0'; - tmp++; - - /* parse uid */ - if (kstrtoul(kernbuf, 0, &uid) != 0) { - errmsg = "bad uid"; - rc = -EINVAL; - goto failed; - } - /* parse gid */ - if (kstrtoul(tmp, 0, &gid) != 0) { - errmsg = "bad gid"; - rc = -EINVAL; - goto failed; - } - - squash->rsi_uid = uid; - squash->rsi_gid = gid; - - LCONSOLE_INFO("%s: root_squash is set to %u:%u\n", - name, squash->rsi_uid, squash->rsi_gid); - return count; - -failed: - if (tmp) { - tmp--; - *tmp = ':'; - } - CWARN("%s: failed to set root_squash to \"%s\", %s, rc = %d\n", - name, kernbuf, errmsg, rc); - return rc; -failed_noprint: - CWARN("%s: failed to set root_squash due to %s, rc = %d\n", - name, errmsg, rc); - return rc; -} -EXPORT_SYMBOL(lprocfs_wr_root_squash); - -int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count, - struct root_squash_info *squash, char *name) -{ - char *kernbuf = NULL, *errmsg; - struct list_head tmp; - int len = count; - int rc; - - if (count > 4096) { - errmsg = "string too long"; - rc = -EINVAL; - goto failed; - } - - kernbuf = kzalloc(count + 1, GFP_NOFS); - if (!kernbuf) { - errmsg = "no memory"; - rc = -ENOMEM; - goto failed; - } - - if (copy_from_user(kernbuf, buffer, count)) { - errmsg = "bad address"; - rc = -EFAULT; - goto failed; - } - kernbuf[count] = '\0'; - - if (count > 0 && kernbuf[count - 1] == '\n') - len = count - 1; - - if ((len == 4 && !strncmp(kernbuf, "NONE", len)) || - (len == 5 && !strncmp(kernbuf, "clear", len))) { - /* empty string is special case */ - down_write(&squash->rsi_sem); - if (!list_empty(&squash->rsi_nosquash_nids)) - cfs_free_nidlist(&squash->rsi_nosquash_nids); - up_write(&squash->rsi_sem); - LCONSOLE_INFO("%s: nosquash_nids is cleared\n", name); - kfree(kernbuf); - return count; - } - - INIT_LIST_HEAD(&tmp); - if (cfs_parse_nidlist(kernbuf, count, &tmp) <= 0) { - errmsg = "can't parse"; - rc = -EINVAL; - goto failed; - } - LCONSOLE_INFO("%s: nosquash_nids set to %s\n", - name, kernbuf); - kfree(kernbuf); - kernbuf = NULL; - - down_write(&squash->rsi_sem); - if (!list_empty(&squash->rsi_nosquash_nids)) - cfs_free_nidlist(&squash->rsi_nosquash_nids); - list_splice(&tmp, &squash->rsi_nosquash_nids); - up_write(&squash->rsi_sem); - - return count; - -failed: - if (kernbuf) { - CWARN("%s: failed to set nosquash_nids to \"%s\", %s rc = %d\n", - name, kernbuf, errmsg, rc); - kfree(kernbuf); - kernbuf = NULL; - } else { - CWARN("%s: failed to set nosquash_nids due to %s rc = %d\n", - name, errmsg, rc); - } - return rc; -} -EXPORT_SYMBOL(lprocfs_wr_nosquash_nids); - -static ssize_t lustre_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct lustre_attr *a = container_of(attr, struct lustre_attr, attr); - - return a->show ? a->show(kobj, attr, buf) : 0; -} - -static ssize_t lustre_attr_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t len) -{ - struct lustre_attr *a = container_of(attr, struct lustre_attr, attr); - - return a->store ? a->store(kobj, attr, buf, len) : len; -} - -const struct sysfs_ops lustre_sysfs_ops = { - .show = lustre_attr_show, - .store = lustre_attr_store, -}; -EXPORT_SYMBOL_GPL(lustre_sysfs_ops); diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c deleted file mode 100644 index aa9d74e087f4..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ /dev/null @@ -1,2056 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/lu_object.c - * - * Lustre Object. - * These are the only exported functions, they provide some generic - * infrastructure for managing object devices - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include - -/* hash_long() */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct lu_site_bkt_data { - /** - * LRU list, updated on each access to object. Protected by - * bucket lock of lu_site::ls_obj_hash. - * - * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are - * moved to the lu_site::ls_lru.prev (this is due to the non-existence - * of list_for_each_entry_safe_reverse()). - */ - struct list_head lsb_lru; - /** - * Wait-queue signaled when an object in this site is ultimately - * destroyed (lu_object_free()). It is used by lu_object_find() to - * wait before re-trying when object in the process of destruction is - * found in the hash table. - * - * \see htable_lookup(). - */ - wait_queue_head_t lsb_marche_funebre; -}; - -enum { - LU_CACHE_PERCENT_MAX = 50, - LU_CACHE_PERCENT_DEFAULT = 20 -}; - -#define LU_CACHE_NR_MAX_ADJUST 512 -#define LU_CACHE_NR_UNLIMITED -1 -#define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED -#define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED -#define LU_CACHE_NR_ZFS_LIMIT 256 - -#define LU_SITE_BITS_MIN 12 -#define LU_SITE_BITS_MAX 24 -#define LU_SITE_BITS_MAX_CL 19 -/** - * total 256 buckets, we don't want too many buckets because: - * - consume too much memory - * - avoid unbalanced LRU list - */ -#define LU_SITE_BKT_BITS 8 - -static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; -module_param(lu_cache_percent, int, 0644); -MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache"); - -static long lu_cache_nr = LU_CACHE_NR_DEFAULT; -module_param(lu_cache_nr, long, 0644); -MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache"); - -static void lu_object_free(const struct lu_env *env, struct lu_object *o); -static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx); - -wait_queue_head_t * -lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid) -{ - struct cfs_hash_bd bd; - struct lu_site_bkt_data *bkt; - - cfs_hash_bd_get(site->ls_obj_hash, fid, &bd); - bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); - return &bkt->lsb_marche_funebre; -} -EXPORT_SYMBOL(lu_site_wq_from_fid); - -/** - * Decrease reference counter on object. If last reference is freed, return - * object to the cache, unless lu_object_is_dying(o) holds. In the latter - * case, free object immediately. - */ -void lu_object_put(const struct lu_env *env, struct lu_object *o) -{ - struct lu_site_bkt_data *bkt; - struct lu_object_header *top; - struct lu_site *site; - struct lu_object *orig; - struct cfs_hash_bd bd; - const struct lu_fid *fid; - - top = o->lo_header; - site = o->lo_dev->ld_site; - orig = o; - - /* - * till we have full fids-on-OST implemented anonymous objects - * are possible in OSP. such an object isn't listed in the site - * so we should not remove it from the site. - */ - fid = lu_object_fid(o); - if (fid_is_zero(fid)) { - LASSERT(!top->loh_hash.next && !top->loh_hash.pprev); - LASSERT(list_empty(&top->loh_lru)); - if (!atomic_dec_and_test(&top->loh_ref)) - return; - list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release) - o->lo_ops->loo_object_release(env, o); - } - lu_object_free(env, orig); - return; - } - - cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); - bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); - - if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { - if (lu_object_is_dying(top)) { - /* - * somebody may be waiting for this, currently only - * used for cl_object, see cl_object_put_last(). - */ - wake_up_all(&bkt->lsb_marche_funebre); - } - return; - } - - /* - * When last reference is released, iterate over object - * layers, and notify them that object is no longer busy. - */ - list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release) - o->lo_ops->loo_object_release(env, o); - } - - if (!lu_object_is_dying(top)) { - LASSERT(list_empty(&top->loh_lru)); - list_add_tail(&top->loh_lru, &bkt->lsb_lru); - percpu_counter_inc(&site->ls_lru_len_counter); - CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p\n", - o, site->ls_obj_hash, bkt); - cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); - return; - } - - /* - * If object is dying (will not be cached), then removed it - * from hash table and LRU. - * - * This is done with hash table and LRU lists locked. As the only - * way to acquire first reference to previously unreferenced - * object is through hash-table lookup (lu_object_find()), - * or LRU scanning (lu_site_purge()), that are done under hash-table - * and LRU lock, no race with concurrent object lookup is possible - * and we can safely destroy object below. - */ - if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) - cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); - cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); - /* - * Object was already removed from hash and lru above, can - * kill it. - */ - lu_object_free(env, orig); -} -EXPORT_SYMBOL(lu_object_put); - -/** - * Kill the object and take it out of LRU cache. - * Currently used by client code for layout change. - */ -void lu_object_unhash(const struct lu_env *env, struct lu_object *o) -{ - struct lu_object_header *top; - - top = o->lo_header; - set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); - if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { - struct lu_site *site = o->lo_dev->ld_site; - struct cfs_hash *obj_hash = site->ls_obj_hash; - struct cfs_hash_bd bd; - - cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); - if (!list_empty(&top->loh_lru)) { - struct lu_site_bkt_data *bkt; - - list_del_init(&top->loh_lru); - bkt = cfs_hash_bd_extra_get(obj_hash, &bd); - percpu_counter_dec(&site->ls_lru_len_counter); - } - cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); - cfs_hash_bd_unlock(obj_hash, &bd, 1); - } -} -EXPORT_SYMBOL(lu_object_unhash); - -/** - * Allocate new object. - * - * This follows object creation protocol, described in the comment within - * struct lu_device_operations definition. - */ -static struct lu_object *lu_object_alloc(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - struct lu_object *scan; - struct lu_object *top; - struct list_head *layers; - unsigned int init_mask = 0; - unsigned int init_flag; - int clean; - int result; - - /* - * Create top-level object slice. This will also create - * lu_object_header. - */ - top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); - if (!top) - return ERR_PTR(-ENOMEM); - if (IS_ERR(top)) - return top; - /* - * This is the only place where object fid is assigned. It's constant - * after this point. - */ - top->lo_header->loh_fid = *f; - layers = &top->lo_header->loh_layers; - - do { - /* - * Call ->loo_object_init() repeatedly, until no more new - * object slices are created. - */ - clean = 1; - init_flag = 1; - list_for_each_entry(scan, layers, lo_linkage) { - if (init_mask & init_flag) - goto next; - clean = 0; - scan->lo_header = top->lo_header; - result = scan->lo_ops->loo_object_init(env, scan, conf); - if (result != 0) { - lu_object_free(env, top); - return ERR_PTR(result); - } - init_mask |= init_flag; -next: - init_flag <<= 1; - } - } while (!clean); - - list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_start) { - result = scan->lo_ops->loo_object_start(env, scan); - if (result != 0) { - lu_object_free(env, top); - return ERR_PTR(result); - } - } - } - - lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED); - return top; -} - -/** - * Free an object. - */ -static void lu_object_free(const struct lu_env *env, struct lu_object *o) -{ - wait_queue_head_t *wq; - struct lu_site *site; - struct lu_object *scan; - struct list_head *layers; - struct list_head splice; - - site = o->lo_dev->ld_site; - layers = &o->lo_header->loh_layers; - wq = lu_site_wq_from_fid(site, &o->lo_header->loh_fid); - /* - * First call ->loo_object_delete() method to release all resources. - */ - list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_delete) - scan->lo_ops->loo_object_delete(env, scan); - } - - /* - * Then, splice object layers into stand-alone list, and call - * ->loo_object_free() on all layers to free memory. Splice is - * necessary, because lu_object_header is freed together with the - * top-level slice. - */ - INIT_LIST_HEAD(&splice); - list_splice_init(layers, &splice); - while (!list_empty(&splice)) { - /* - * Free layers in bottom-to-top order, so that object header - * lives as long as possible and ->loo_object_free() methods - * can look at its contents. - */ - o = container_of(splice.prev, struct lu_object, lo_linkage); - list_del_init(&o->lo_linkage); - o->lo_ops->loo_object_free(env, o); - } - - if (waitqueue_active(wq)) - wake_up_all(wq); -} - -/** - * Free \a nr objects from the cold end of the site LRU list. - * if canblock is false, then don't block awaiting for another - * instance of lu_site_purge() to complete - */ -int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, - int nr, bool canblock) -{ - struct lu_object_header *h; - struct lu_object_header *temp; - struct lu_site_bkt_data *bkt; - struct cfs_hash_bd bd; - struct cfs_hash_bd bd2; - struct list_head dispose; - int did_sth; - unsigned int start = 0; - int count; - int bnr; - unsigned int i; - - if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) - return 0; - - INIT_LIST_HEAD(&dispose); - /* - * Under LRU list lock, scan LRU list and move unreferenced objects to - * the dispose list, removing them from LRU and hash table. - */ - if (nr != ~0) - start = s->ls_purge_start; - bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; - again: - /* - * It doesn't make any sense to make purge threads parallel, that can - * only bring troubles to us. See LU-5331. - */ - if (canblock) - mutex_lock(&s->ls_purge_mutex); - else if (!mutex_trylock(&s->ls_purge_mutex)) - goto out; - - did_sth = 0; - cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { - if (i < start) - continue; - count = bnr; - cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); - bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - - list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { - LASSERT(atomic_read(&h->loh_ref) == 0); - - cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); - LASSERT(bd.bd_bucket == bd2.bd_bucket); - - cfs_hash_bd_del_locked(s->ls_obj_hash, - &bd2, &h->loh_hash); - list_move(&h->loh_lru, &dispose); - percpu_counter_dec(&s->ls_lru_len_counter); - if (did_sth == 0) - did_sth = 1; - - if (nr != ~0 && --nr == 0) - break; - - if (count > 0 && --count == 0) - break; - } - cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); - cond_resched(); - /* - * Free everything on the dispose list. This is safe against - * races due to the reasons described in lu_object_put(). - */ - while (!list_empty(&dispose)) { - h = container_of(dispose.next, - struct lu_object_header, loh_lru); - list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); - } - - if (nr == 0) - break; - } - mutex_unlock(&s->ls_purge_mutex); - - if (nr != 0 && did_sth && start != 0) { - start = 0; /* restart from the first bucket */ - goto again; - } - /* race on s->ls_purge_start, but nobody cares */ - s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash); -out: - return nr; -} -EXPORT_SYMBOL(lu_site_purge_objects); - -/* - * Object printing. - * - * Code below has to jump through certain loops to output object description - * into libcfs_debug_msg-based log. The problem is that lu_object_print() - * composes object description from strings that are parts of _lines_ of - * output (i.e., strings that are not terminated by newline). This doesn't fit - * very well into libcfs_debug_msg() interface that assumes that each message - * supplied to it is a self-contained output line. - * - * To work around this, strings are collected in a temporary buffer - * (implemented as a value of lu_cdebug_key key), until terminating newline - * character is detected. - * - */ - -enum { - /** - * Maximal line size. - * - * XXX overflow is not handled correctly. - */ - LU_CDEBUG_LINE = 512 -}; - -struct lu_cdebug_data { - /** - * Temporary buffer. - */ - char lck_area[LU_CDEBUG_LINE]; -}; - -/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */ -LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); - -/** - * Key, holding temporary buffer. This key is registered very early by - * lu_global_init(). - */ -static struct lu_context_key lu_global_key = { - .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | - LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, - .lct_init = lu_global_key_init, - .lct_fini = lu_global_key_fini -}; - -/** - * Printer function emitting messages through libcfs_debug_msg(). - */ -int lu_cdebug_printer(const struct lu_env *env, - void *cookie, const char *format, ...) -{ - struct libcfs_debug_msg_data *msgdata = cookie; - struct lu_cdebug_data *key; - int used; - int complete; - va_list args; - - va_start(args, format); - - key = lu_context_key_get(&env->le_ctx, &lu_global_key); - - used = strlen(key->lck_area); - complete = format[strlen(format) - 1] == '\n'; - /* - * Append new chunk to the buffer. - */ - vsnprintf(key->lck_area + used, - ARRAY_SIZE(key->lck_area) - used, format, args); - if (complete) { - if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) - libcfs_debug_msg(msgdata, "%s\n", key->lck_area); - key->lck_area[0] = 0; - } - va_end(args); - return 0; -} -EXPORT_SYMBOL(lu_cdebug_printer); - -/** - * Print object header. - */ -void lu_object_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct lu_object_header *hdr) -{ - (*printer)(env, cookie, "header@%p[%#lx, %d, " DFID "%s%s%s]", - hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), - PFID(&hdr->loh_fid), - hlist_unhashed(&hdr->loh_hash) ? "" : " hash", - list_empty((struct list_head *)&hdr->loh_lru) ? \ - "" : " lru", - hdr->loh_attr & LOHA_EXISTS ? " exist":""); -} -EXPORT_SYMBOL(lu_object_header_print); - -/** - * Print human readable representation of the \a o to the \a printer. - */ -void lu_object_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct lu_object *o) -{ - static const char ruler[] = "........................................"; - struct lu_object_header *top; - int depth = 4; - - top = o->lo_header; - lu_object_header_print(env, cookie, printer, top); - (*printer)(env, cookie, "{\n"); - - list_for_each_entry(o, &top->loh_layers, lo_linkage) { - /* - * print `.' \a depth times followed by type name and address - */ - (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, - o->lo_dev->ld_type->ldt_name, o); - - if (o->lo_ops->loo_object_print) - (*o->lo_ops->loo_object_print)(env, cookie, printer, o); - - (*printer)(env, cookie, "\n"); - } - - (*printer)(env, cookie, "} header@%p\n", top); -} -EXPORT_SYMBOL(lu_object_print); - -/* - * NOTE: htable_lookup() is called with the relevant - * hash bucket locked, but might drop and re-acquire the lock. - */ -static struct lu_object *htable_lookup(struct lu_site *s, - struct cfs_hash_bd *bd, - const struct lu_fid *f, - __u64 *version) -{ - struct lu_site_bkt_data *bkt; - struct lu_object_header *h; - struct hlist_node *hnode; - u64 ver = cfs_hash_bd_version_get(bd); - - if (*version == ver) - return ERR_PTR(-ENOENT); - - *version = ver; - bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); - /* cfs_hash_bd_peek_locked is a somehow "internal" function - * of cfs_hash, it doesn't add refcount on object. - */ - hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); - if (!hnode) { - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); - return ERR_PTR(-ENOENT); - } - - h = container_of(hnode, struct lu_object_header, loh_hash); - cfs_hash_get(s->ls_obj_hash, hnode); - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - if (!list_empty(&h->loh_lru)) { - list_del_init(&h->loh_lru); - percpu_counter_dec(&s->ls_lru_len_counter); - } - return lu_object_top(h); -} - -/** - * Search cache for an object with the fid \a f. If such object is found, - * return it. Otherwise, create new object, insert it into cache and return - * it. In any case, additional reference is acquired on the returned object. - */ -static struct lu_object *lu_object_find(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf); -} - -/* - * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because - * the calculation for the number of objects to reclaim is not covered by - * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST. - * This ensures that many concurrent threads will not accidentally purge - * the entire cache. - */ -static void lu_object_limit(const struct lu_env *env, struct lu_device *dev) -{ - __u64 size, nr; - - if (lu_cache_nr == LU_CACHE_NR_UNLIMITED) - return; - - size = cfs_hash_size_get(dev->ld_site->ls_obj_hash); - nr = (__u64)lu_cache_nr; - if (size <= nr) - return; - - lu_site_purge_objects(env, dev->ld_site, - min_t(__u64, size - nr, LU_CACHE_NR_MAX_ADJUST), - false); -} - -/** - * Core logic of lu_object_find*() functions. - * - * Much like lu_object_find(), but top level device of object is specifically - * \a dev rather than top level device of the site. This interface allows - * objects of different "stacking" to be created within the same site. - */ -struct lu_object *lu_object_find_at(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - struct lu_object *o; - struct lu_object *shadow; - struct lu_site *s; - struct cfs_hash *hs; - struct cfs_hash_bd bd; - __u64 version = 0; - - /* - * This uses standard index maintenance protocol: - * - * - search index under lock, and return object if found; - * - otherwise, unlock index, allocate new object; - * - lock index and search again; - * - if nothing is found (usual case), insert newly created - * object into index; - * - otherwise (race: other thread inserted object), free - * object just allocated. - * - unlock index; - * - return object. - * - * For "LOC_F_NEW" case, we are sure the object is new established. - * It is unnecessary to perform lookup-alloc-lookup-insert, instead, - * just alloc and insert directly. - * - */ - s = dev->ld_site; - hs = s->ls_obj_hash; - - cfs_hash_bd_get(hs, f, &bd); - if (!(conf && conf->loc_flags & LOC_F_NEW)) { - cfs_hash_bd_lock(hs, &bd, 1); - o = htable_lookup(s, &bd, f, &version); - cfs_hash_bd_unlock(hs, &bd, 1); - - if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT) - return o; - } - /* - * Allocate new object. This may result in rather complicated - * operations, including fld queries, inode loading, etc. - */ - o = lu_object_alloc(env, dev, f, conf); - if (IS_ERR(o)) - return o; - - LASSERT(lu_fid_eq(lu_object_fid(o), f)); - - cfs_hash_bd_lock(hs, &bd, 1); - - if (conf && conf->loc_flags & LOC_F_NEW) - shadow = ERR_PTR(-ENOENT); - else - shadow = htable_lookup(s, &bd, f, &version); - if (likely(PTR_ERR(shadow) == -ENOENT)) { - cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - cfs_hash_bd_unlock(hs, &bd, 1); - - lu_object_limit(env, dev); - - return o; - } - - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE); - cfs_hash_bd_unlock(hs, &bd, 1); - lu_object_free(env, o); - return shadow; -} -EXPORT_SYMBOL(lu_object_find_at); - -/** - * Find object with given fid, and return its slice belonging to given device. - */ -struct lu_object *lu_object_find_slice(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - struct lu_object *top; - struct lu_object *obj; - - top = lu_object_find(env, dev, f, conf); - if (IS_ERR(top)) - return top; - - obj = lu_object_locate(top->lo_header, dev->ld_type); - if (unlikely(!obj)) { - lu_object_put(env, top); - obj = ERR_PTR(-ENOENT); - } - - return obj; -} -EXPORT_SYMBOL(lu_object_find_slice); - -/** - * Global list of all device types. - */ -static LIST_HEAD(lu_device_types); - -int lu_device_type_init(struct lu_device_type *ldt) -{ - int result = 0; - - atomic_set(&ldt->ldt_device_nr, 0); - INIT_LIST_HEAD(&ldt->ldt_linkage); - if (ldt->ldt_ops->ldto_init) - result = ldt->ldt_ops->ldto_init(ldt); - - if (!result) { - spin_lock(&obd_types_lock); - list_add(&ldt->ldt_linkage, &lu_device_types); - spin_unlock(&obd_types_lock); - } - - return result; -} -EXPORT_SYMBOL(lu_device_type_init); - -void lu_device_type_fini(struct lu_device_type *ldt) -{ - spin_lock(&obd_types_lock); - list_del_init(&ldt->ldt_linkage); - spin_unlock(&obd_types_lock); - if (ldt->ldt_ops->ldto_fini) - ldt->ldt_ops->ldto_fini(ldt); -} -EXPORT_SYMBOL(lu_device_type_fini); - -/** - * Global list of all sites on this node - */ -static LIST_HEAD(lu_sites); -static DECLARE_RWSEM(lu_sites_guard); - -/** - * Global environment used by site shrinker. - */ -static struct lu_env lu_shrink_env; - -struct lu_site_print_arg { - struct lu_env *lsp_env; - void *lsp_cookie; - lu_printer_t lsp_printer; -}; - -static int -lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; - struct lu_object_header *h; - - h = hlist_entry(hnode, struct lu_object_header, loh_hash); - if (!list_empty(&h->loh_layers)) { - const struct lu_object *o; - - o = lu_object_top(h); - lu_object_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, o); - } else { - lu_object_header_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, h); - } - return 0; -} - -/** - * Print all objects in \a s. - */ -void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, - lu_printer_t printer) -{ - struct lu_site_print_arg arg = { - .lsp_env = (struct lu_env *)env, - .lsp_cookie = cookie, - .lsp_printer = printer, - }; - - cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg); -} -EXPORT_SYMBOL(lu_site_print); - -/** - * Return desired hash table order. - */ -static unsigned long lu_htable_order(struct lu_device *top) -{ - unsigned long bits_max = LU_SITE_BITS_MAX; - unsigned long cache_size; - unsigned long bits; - - if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME)) - bits_max = LU_SITE_BITS_MAX_CL; - - /* - * Calculate hash table size, assuming that we want reasonable - * performance when 20% of total memory is occupied by cache of - * lu_objects. - * - * Size of lu_object is (arbitrary) taken as 1K (together with inode). - */ - cache_size = totalram_pages; - -#if BITS_PER_LONG == 32 - /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - PAGE_SHIFT)) - cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4; -#endif - - /* clear off unreasonable cache setting. */ - if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) { - CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n", - lu_cache_percent, LU_CACHE_PERCENT_MAX, - LU_CACHE_PERCENT_DEFAULT); - - lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; - } - cache_size = cache_size / 100 * lu_cache_percent * - (PAGE_SIZE / 1024); - - for (bits = 1; (1 << bits) < cache_size; ++bits) - ; - return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max); -} - -static unsigned int lu_obj_hop_hash(struct cfs_hash *hs, - const void *key, unsigned int mask) -{ - struct lu_fid *fid = (struct lu_fid *)key; - __u32 hash; - - hash = fid_flatten32(fid); - hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ - hash = hash_long(hash, hs->hs_bkt_bits); - - /* give me another random factor */ - hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); - - hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; - hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); - - return hash & mask; -} - -static void *lu_obj_hop_object(struct hlist_node *hnode) -{ - return hlist_entry(hnode, struct lu_object_header, loh_hash); -} - -static void *lu_obj_hop_key(struct hlist_node *hnode) -{ - struct lu_object_header *h; - - h = hlist_entry(hnode, struct lu_object_header, loh_hash); - return &h->loh_fid; -} - -static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) -{ - struct lu_object_header *h; - - h = hlist_entry(hnode, struct lu_object_header, loh_hash); - return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); -} - -static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode) -{ - struct lu_object_header *h; - - h = hlist_entry(hnode, struct lu_object_header, loh_hash); - atomic_inc(&h->loh_ref); -} - -static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) -{ - LBUG(); /* we should never called it */ -} - -static struct cfs_hash_ops lu_site_hash_ops = { - .hs_hash = lu_obj_hop_hash, - .hs_key = lu_obj_hop_key, - .hs_keycmp = lu_obj_hop_keycmp, - .hs_object = lu_obj_hop_object, - .hs_get = lu_obj_hop_get, - .hs_put_locked = lu_obj_hop_put_locked, -}; - -static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) -{ - spin_lock(&s->ls_ld_lock); - if (list_empty(&d->ld_linkage)) - list_add(&d->ld_linkage, &s->ls_ld_linkage); - spin_unlock(&s->ls_ld_lock); -} - -/** - * Initialize site \a s, with \a d as the top level device. - */ -int lu_site_init(struct lu_site *s, struct lu_device *top) -{ - struct lu_site_bkt_data *bkt; - struct cfs_hash_bd bd; - unsigned long bits; - unsigned long i; - char name[16]; - int rc; - - memset(s, 0, sizeof(*s)); - mutex_init(&s->ls_purge_mutex); - - rc = percpu_counter_init(&s->ls_lru_len_counter, 0, GFP_NOFS); - if (rc) - return -ENOMEM; - - snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name); - for (bits = lu_htable_order(top); bits >= LU_SITE_BITS_MIN; bits--) { - s->ls_obj_hash = cfs_hash_create(name, bits, bits, - bits - LU_SITE_BKT_BITS, - sizeof(*bkt), 0, 0, - &lu_site_hash_ops, - CFS_HASH_SPIN_BKTLOCK | - CFS_HASH_NO_ITEMREF | - CFS_HASH_DEPTH | - CFS_HASH_ASSERT_EMPTY | - CFS_HASH_COUNTER); - if (s->ls_obj_hash) - break; - } - - if (!s->ls_obj_hash) { - CERROR("failed to create lu_site hash with bits: %lu\n", bits); - return -ENOMEM; - } - - cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { - bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - INIT_LIST_HEAD(&bkt->lsb_lru); - init_waitqueue_head(&bkt->lsb_marche_funebre); - } - - s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); - if (!s->ls_stats) { - cfs_hash_putref(s->ls_obj_hash); - s->ls_obj_hash = NULL; - return -ENOMEM; - } - - lprocfs_counter_init(s->ls_stats, LU_SS_CREATED, - 0, "created", "created"); - lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT, - 0, "cache_hit", "cache_hit"); - lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS, - 0, "cache_miss", "cache_miss"); - lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE, - 0, "cache_race", "cache_race"); - lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE, - 0, "cache_death_race", "cache_death_race"); - lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, - 0, "lru_purged", "lru_purged"); - - INIT_LIST_HEAD(&s->ls_linkage); - s->ls_top_dev = top; - top->ld_site = s; - lu_device_get(top); - lu_ref_add(&top->ld_reference, "site-top", s); - - INIT_LIST_HEAD(&s->ls_ld_linkage); - spin_lock_init(&s->ls_ld_lock); - - lu_dev_add_linkage(s, top); - - return 0; -} -EXPORT_SYMBOL(lu_site_init); - -/** - * Finalize \a s and release its resources. - */ -void lu_site_fini(struct lu_site *s) -{ - down_write(&lu_sites_guard); - list_del_init(&s->ls_linkage); - up_write(&lu_sites_guard); - - percpu_counter_destroy(&s->ls_lru_len_counter); - - if (s->ls_obj_hash) { - cfs_hash_putref(s->ls_obj_hash); - s->ls_obj_hash = NULL; - } - - if (s->ls_top_dev) { - s->ls_top_dev->ld_site = NULL; - lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); - lu_device_put(s->ls_top_dev); - s->ls_top_dev = NULL; - } - - if (s->ls_stats) - lprocfs_free_stats(&s->ls_stats); -} -EXPORT_SYMBOL(lu_site_fini); - -/** - * Called when initialization of stack for this site is completed. - */ -int lu_site_init_finish(struct lu_site *s) -{ - int result; - - down_write(&lu_sites_guard); - result = lu_context_refill(&lu_shrink_env.le_ctx); - if (result == 0) - list_add(&s->ls_linkage, &lu_sites); - up_write(&lu_sites_guard); - return result; -} -EXPORT_SYMBOL(lu_site_init_finish); - -/** - * Acquire additional reference on device \a d - */ -void lu_device_get(struct lu_device *d) -{ - atomic_inc(&d->ld_ref); -} -EXPORT_SYMBOL(lu_device_get); - -/** - * Release reference on device \a d. - */ -void lu_device_put(struct lu_device *d) -{ - LASSERT(atomic_read(&d->ld_ref) > 0); - atomic_dec(&d->ld_ref); -} -EXPORT_SYMBOL(lu_device_put); - -/** - * Initialize device \a d of type \a t. - */ -int lu_device_init(struct lu_device *d, struct lu_device_type *t) -{ - if (atomic_inc_return(&t->ldt_device_nr) == 1 && - t->ldt_ops->ldto_start) - t->ldt_ops->ldto_start(t); - - memset(d, 0, sizeof(*d)); - atomic_set(&d->ld_ref, 0); - d->ld_type = t; - lu_ref_init(&d->ld_reference); - INIT_LIST_HEAD(&d->ld_linkage); - return 0; -} -EXPORT_SYMBOL(lu_device_init); - -/** - * Finalize device \a d. - */ -void lu_device_fini(struct lu_device *d) -{ - struct lu_device_type *t = d->ld_type; - - if (d->ld_obd) { - d->ld_obd->obd_lu_dev = NULL; - d->ld_obd = NULL; - } - - lu_ref_fini(&d->ld_reference); - LASSERTF(atomic_read(&d->ld_ref) == 0, - "Refcount is %u\n", atomic_read(&d->ld_ref)); - LASSERT(atomic_read(&t->ldt_device_nr) > 0); - - if (atomic_dec_and_test(&t->ldt_device_nr) && - t->ldt_ops->ldto_stop) - t->ldt_ops->ldto_stop(t); -} -EXPORT_SYMBOL(lu_device_fini); - -/** - * Initialize object \a o that is part of compound object \a h and was created - * by device \a d. - */ -int lu_object_init(struct lu_object *o, struct lu_object_header *h, - struct lu_device *d) -{ - memset(o, 0, sizeof(*o)); - o->lo_header = h; - o->lo_dev = d; - lu_device_get(d); - lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); - INIT_LIST_HEAD(&o->lo_linkage); - - return 0; -} -EXPORT_SYMBOL(lu_object_init); - -/** - * Finalize object and release its resources. - */ -void lu_object_fini(struct lu_object *o) -{ - struct lu_device *dev = o->lo_dev; - - LASSERT(list_empty(&o->lo_linkage)); - - if (dev) { - lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, - "lu_object", o); - lu_device_put(dev); - o->lo_dev = NULL; - } -} -EXPORT_SYMBOL(lu_object_fini); - -/** - * Add object \a o as first layer of compound object \a h - * - * This is typically called by the ->ldo_object_alloc() method of top-level - * device. - */ -void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) -{ - list_move(&o->lo_linkage, &h->loh_layers); -} -EXPORT_SYMBOL(lu_object_add_top); - -/** - * Add object \a o as a layer of compound object, going after \a before. - * - * This is typically called by the ->ldo_object_alloc() method of \a - * before->lo_dev. - */ -void lu_object_add(struct lu_object *before, struct lu_object *o) -{ - list_move(&o->lo_linkage, &before->lo_linkage); -} -EXPORT_SYMBOL(lu_object_add); - -/** - * Initialize compound object. - */ -int lu_object_header_init(struct lu_object_header *h) -{ - memset(h, 0, sizeof(*h)); - atomic_set(&h->loh_ref, 1); - INIT_HLIST_NODE(&h->loh_hash); - INIT_LIST_HEAD(&h->loh_lru); - INIT_LIST_HEAD(&h->loh_layers); - lu_ref_init(&h->loh_reference); - return 0; -} -EXPORT_SYMBOL(lu_object_header_init); - -/** - * Finalize compound object. - */ -void lu_object_header_fini(struct lu_object_header *h) -{ - LASSERT(list_empty(&h->loh_layers)); - LASSERT(list_empty(&h->loh_lru)); - LASSERT(hlist_unhashed(&h->loh_hash)); - lu_ref_fini(&h->loh_reference); -} -EXPORT_SYMBOL(lu_object_header_fini); - -/** - * Given a compound object, find its slice, corresponding to the device type - * \a dtype. - */ -struct lu_object *lu_object_locate(struct lu_object_header *h, - const struct lu_device_type *dtype) -{ - struct lu_object *o; - - list_for_each_entry(o, &h->loh_layers, lo_linkage) { - if (o->lo_dev->ld_type == dtype) - return o; - } - return NULL; -} -EXPORT_SYMBOL(lu_object_locate); - -/** - * Finalize and free devices in the device stack. - * - * Finalize device stack by purging object cache, and calling - * lu_device_type_operations::ldto_device_fini() and - * lu_device_type_operations::ldto_device_free() on all devices in the stack. - */ -void lu_stack_fini(const struct lu_env *env, struct lu_device *top) -{ - struct lu_site *site = top->ld_site; - struct lu_device *scan; - struct lu_device *next; - - lu_site_purge(env, site, ~0); - for (scan = top; scan; scan = next) { - next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan); - lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init); - lu_device_put(scan); - } - - /* purge again. */ - lu_site_purge(env, site, ~0); - - for (scan = top; scan; scan = next) { - const struct lu_device_type *ldt = scan->ld_type; - struct obd_type *type; - - next = ldt->ldt_ops->ldto_device_free(env, scan); - type = ldt->ldt_obd_type; - if (type) { - type->typ_refcnt--; - class_put_type(type); - } - } -} - -enum { - /** - * Maximal number of tld slots. - */ - LU_CONTEXT_KEY_NR = 40 -}; - -static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; - -static DEFINE_RWLOCK(lu_keys_guard); -static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0); - -/** - * Global counter incremented whenever key is registered, unregistered, - * revived or quiesced. This is used to void unnecessary calls to - * lu_context_refill(). No locking is provided, as initialization and shutdown - * are supposed to be externally serialized. - */ -static unsigned int key_set_version; - -/** - * Register new key. - */ -int lu_context_key_register(struct lu_context_key *key) -{ - int result; - unsigned int i; - - LASSERT(key->lct_init); - LASSERT(key->lct_fini); - LASSERT(key->lct_tags != 0); - - result = -ENFILE; - write_lock(&lu_keys_guard); - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (!lu_keys[i]) { - key->lct_index = i; - atomic_set(&key->lct_used, 1); - lu_keys[i] = key; - lu_ref_init(&key->lct_reference); - result = 0; - ++key_set_version; - break; - } - } - write_unlock(&lu_keys_guard); - return result; -} -EXPORT_SYMBOL(lu_context_key_register); - -static void key_fini(struct lu_context *ctx, int index) -{ - if (ctx->lc_value && ctx->lc_value[index]) { - struct lu_context_key *key; - - key = lu_keys[index]; - LASSERT(atomic_read(&key->lct_used) > 1); - - key->lct_fini(ctx, key, ctx->lc_value[index]); - lu_ref_del(&key->lct_reference, "ctx", ctx); - atomic_dec(&key->lct_used); - - if ((ctx->lc_tags & LCT_NOREF) == 0) - module_put(key->lct_owner); - ctx->lc_value[index] = NULL; - } -} - -/** - * Deregister key. - */ -void lu_context_key_degister(struct lu_context_key *key) -{ - LASSERT(atomic_read(&key->lct_used) >= 1); - LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); - - lu_context_key_quiesce(key); - - write_lock(&lu_keys_guard); - ++key_set_version; - key_fini(&lu_shrink_env.le_ctx, key->lct_index); - - /** - * Wait until all transient contexts referencing this key have - * run lu_context_key::lct_fini() method. - */ - while (atomic_read(&key->lct_used) > 1) { - write_unlock(&lu_keys_guard); - CDEBUG(D_INFO, "%s: \"%s\" %p, %d\n", - __func__, module_name(key->lct_owner), - key, atomic_read(&key->lct_used)); - schedule(); - write_lock(&lu_keys_guard); - } - if (lu_keys[key->lct_index]) { - lu_keys[key->lct_index] = NULL; - lu_ref_fini(&key->lct_reference); - } - write_unlock(&lu_keys_guard); - - LASSERTF(atomic_read(&key->lct_used) == 1, - "key has instances: %d\n", - atomic_read(&key->lct_used)); -} -EXPORT_SYMBOL(lu_context_key_degister); - -/** - * Register a number of keys. This has to be called after all keys have been - * initialized by a call to LU_CONTEXT_KEY_INIT(). - */ -int lu_context_key_register_many(struct lu_context_key *k, ...) -{ - struct lu_context_key *key = k; - va_list args; - int result; - - va_start(args, k); - do { - result = lu_context_key_register(key); - if (result) - break; - key = va_arg(args, struct lu_context_key *); - } while (key); - va_end(args); - - if (result != 0) { - va_start(args, k); - while (k != key) { - lu_context_key_degister(k); - k = va_arg(args, struct lu_context_key *); - } - va_end(args); - } - - return result; -} -EXPORT_SYMBOL(lu_context_key_register_many); - -/** - * De-register a number of keys. This is a dual to - * lu_context_key_register_many(). - */ -void lu_context_key_degister_many(struct lu_context_key *k, ...) -{ - va_list args; - - va_start(args, k); - do { - lu_context_key_degister(k); - k = va_arg(args, struct lu_context_key*); - } while (k); - va_end(args); -} -EXPORT_SYMBOL(lu_context_key_degister_many); - -/** - * Revive a number of keys. - */ -void lu_context_key_revive_many(struct lu_context_key *k, ...) -{ - va_list args; - - va_start(args, k); - do { - lu_context_key_revive(k); - k = va_arg(args, struct lu_context_key*); - } while (k); - va_end(args); -} -EXPORT_SYMBOL(lu_context_key_revive_many); - -/** - * Quiescent a number of keys. - */ -void lu_context_key_quiesce_many(struct lu_context_key *k, ...) -{ - va_list args; - - va_start(args, k); - do { - lu_context_key_quiesce(k); - k = va_arg(args, struct lu_context_key*); - } while (k); - va_end(args); -} -EXPORT_SYMBOL(lu_context_key_quiesce_many); - -/** - * Return value associated with key \a key in context \a ctx. - */ -void *lu_context_key_get(const struct lu_context *ctx, - const struct lu_context_key *key) -{ - LINVRNT(ctx->lc_state == LCS_ENTERED); - LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); - LASSERT(lu_keys[key->lct_index] == key); - return ctx->lc_value[key->lct_index]; -} -EXPORT_SYMBOL(lu_context_key_get); - -/** - * List of remembered contexts. XXX document me. - */ -static LIST_HEAD(lu_context_remembered); - -/** - * Destroy \a key in all remembered contexts. This is used to destroy key - * values in "shared" contexts (like service threads), when a module owning - * the key is about to be unloaded. - */ -void lu_context_key_quiesce(struct lu_context_key *key) -{ - struct lu_context *ctx; - - if (!(key->lct_tags & LCT_QUIESCENT)) { - /* - * XXX memory barrier has to go here. - */ - write_lock(&lu_keys_guard); - key->lct_tags |= LCT_QUIESCENT; - - /** - * Wait until all lu_context_key::lct_init() methods - * have completed. - */ - while (atomic_read(&lu_key_initing_cnt) > 0) { - write_unlock(&lu_keys_guard); - CDEBUG(D_INFO, "%s: \"%s\" %p, %d (%d)\n", - __func__, - module_name(key->lct_owner), - key, atomic_read(&key->lct_used), - atomic_read(&lu_key_initing_cnt)); - schedule(); - write_lock(&lu_keys_guard); - } - - list_for_each_entry(ctx, &lu_context_remembered, lc_remember) - key_fini(ctx, key->lct_index); - - ++key_set_version; - write_unlock(&lu_keys_guard); - } -} - -void lu_context_key_revive(struct lu_context_key *key) -{ - write_lock(&lu_keys_guard); - key->lct_tags &= ~LCT_QUIESCENT; - ++key_set_version; - write_unlock(&lu_keys_guard); -} - -static void keys_fini(struct lu_context *ctx) -{ - unsigned int i; - - if (!ctx->lc_value) - return; - - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) - key_fini(ctx, i); - - kfree(ctx->lc_value); - ctx->lc_value = NULL; -} - -static int keys_fill(struct lu_context *ctx) -{ - unsigned int pre_version; - unsigned int i; - - /* - * A serialisation with lu_context_key_quiesce() is needed, but some - * "key->lct_init()" are calling kernel memory allocation routine and - * can't be called while holding a spin_lock. - * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt" - * to ensure the start of the serialisation. - * An atomic_t variable is still used, in order not to reacquire the - * lock when decrementing the counter. - */ - read_lock(&lu_keys_guard); - atomic_inc(&lu_key_initing_cnt); - pre_version = key_set_version; - read_unlock(&lu_keys_guard); - -refill: - LINVRNT(ctx->lc_value); - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - struct lu_context_key *key; - - key = lu_keys[i]; - if (!ctx->lc_value[i] && key && - (key->lct_tags & ctx->lc_tags) && - /* - * Don't create values for a LCT_QUIESCENT key, as this - * will pin module owning a key. - */ - !(key->lct_tags & LCT_QUIESCENT)) { - void *value; - - LINVRNT(key->lct_init); - LINVRNT(key->lct_index == i); - - if (!(ctx->lc_tags & LCT_NOREF) && - !try_module_get(key->lct_owner)) { - /* module is unloading, skip this key */ - continue; - } - - value = key->lct_init(ctx, key); - if (unlikely(IS_ERR(value))) { - atomic_dec(&lu_key_initing_cnt); - return PTR_ERR(value); - } - - lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); - atomic_inc(&key->lct_used); - /* - * This is the only place in the code, where an - * element of ctx->lc_value[] array is set to non-NULL - * value. - */ - ctx->lc_value[i] = value; - if (key->lct_exit) - ctx->lc_tags |= LCT_HAS_EXIT; - } - } - - read_lock(&lu_keys_guard); - if (pre_version != key_set_version) { - pre_version = key_set_version; - read_unlock(&lu_keys_guard); - goto refill; - } - ctx->lc_version = key_set_version; - atomic_dec(&lu_key_initing_cnt); - read_unlock(&lu_keys_guard); - return 0; -} - -static int keys_init(struct lu_context *ctx) -{ - ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]), - GFP_NOFS); - if (likely(ctx->lc_value)) - return keys_fill(ctx); - - return -ENOMEM; -} - -/** - * Initialize context data-structure. Create values for all keys. - */ -int lu_context_init(struct lu_context *ctx, __u32 tags) -{ - int rc; - - memset(ctx, 0, sizeof(*ctx)); - ctx->lc_state = LCS_INITIALIZED; - ctx->lc_tags = tags; - if (tags & LCT_REMEMBER) { - write_lock(&lu_keys_guard); - list_add(&ctx->lc_remember, &lu_context_remembered); - write_unlock(&lu_keys_guard); - } else { - INIT_LIST_HEAD(&ctx->lc_remember); - } - - rc = keys_init(ctx); - if (rc != 0) - lu_context_fini(ctx); - - return rc; -} -EXPORT_SYMBOL(lu_context_init); - -/** - * Finalize context data-structure. Destroy key values. - */ -void lu_context_fini(struct lu_context *ctx) -{ - LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); - ctx->lc_state = LCS_FINALIZED; - - if ((ctx->lc_tags & LCT_REMEMBER) == 0) { - LASSERT(list_empty(&ctx->lc_remember)); - keys_fini(ctx); - - } else { /* could race with key degister */ - write_lock(&lu_keys_guard); - keys_fini(ctx); - list_del_init(&ctx->lc_remember); - write_unlock(&lu_keys_guard); - } -} -EXPORT_SYMBOL(lu_context_fini); - -/** - * Called before entering context. - */ -void lu_context_enter(struct lu_context *ctx) -{ - LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); - ctx->lc_state = LCS_ENTERED; -} -EXPORT_SYMBOL(lu_context_enter); - -/** - * Called after exiting from \a ctx - */ -void lu_context_exit(struct lu_context *ctx) -{ - unsigned int i; - - LINVRNT(ctx->lc_state == LCS_ENTERED); - ctx->lc_state = LCS_LEFT; - if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) { - /* could race with key quiescency */ - if (ctx->lc_tags & LCT_REMEMBER) - read_lock(&lu_keys_guard); - - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (ctx->lc_value[i]) { - struct lu_context_key *key; - - key = lu_keys[i]; - if (key->lct_exit) - key->lct_exit(ctx, - key, ctx->lc_value[i]); - } - } - - if (ctx->lc_tags & LCT_REMEMBER) - read_unlock(&lu_keys_guard); - } -} -EXPORT_SYMBOL(lu_context_exit); - -/** - * Allocate for context all missing keys that were registered after context - * creation. key_set_version is only changed in rare cases when modules - * are loaded and removed. - */ -int lu_context_refill(struct lu_context *ctx) -{ - read_lock(&lu_keys_guard); - if (likely(ctx->lc_version == key_set_version)) { - read_unlock(&lu_keys_guard); - return 0; - } - - read_unlock(&lu_keys_guard); - return keys_fill(ctx); -} - -/** - * lu_ctx_tags/lu_ses_tags will be updated if there are new types of - * obd being added. Currently, this is only used on client side, specifically - * for echo device client, for other stack (like ptlrpc threads), context are - * predefined when the lu_device type are registered, during the module probe - * phase. - */ -__u32 lu_context_tags_default; -__u32 lu_session_tags_default; - -int lu_env_init(struct lu_env *env, __u32 tags) -{ - int result; - - env->le_ses = NULL; - result = lu_context_init(&env->le_ctx, tags); - if (likely(result == 0)) - lu_context_enter(&env->le_ctx); - return result; -} -EXPORT_SYMBOL(lu_env_init); - -void lu_env_fini(struct lu_env *env) -{ - lu_context_exit(&env->le_ctx); - lu_context_fini(&env->le_ctx); - env->le_ses = NULL; -} -EXPORT_SYMBOL(lu_env_fini); - -int lu_env_refill(struct lu_env *env) -{ - int result; - - result = lu_context_refill(&env->le_ctx); - if (result == 0 && env->le_ses) - result = lu_context_refill(env->le_ses); - return result; -} -EXPORT_SYMBOL(lu_env_refill); - -struct lu_site_stats { - unsigned int lss_populated; - unsigned int lss_max_search; - unsigned int lss_total; - unsigned int lss_busy; -}; - -static void lu_site_stats_get(const struct lu_site *s, - struct lu_site_stats *stats, int populated) -{ - struct cfs_hash *hs = s->ls_obj_hash; - struct cfs_hash_bd bd; - unsigned int i; - /* - * percpu_counter_sum_positive() won't accept a const pointer - * as it does modify the struct by taking a spinlock - */ - struct lu_site *s2 = (struct lu_site *)s; - - stats->lss_busy += cfs_hash_size_get(hs) - - percpu_counter_sum_positive(&s2->ls_lru_len_counter); - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - cfs_hash_bd_lock(hs, &bd, 1); - stats->lss_total += cfs_hash_bd_count_get(&bd); - stats->lss_max_search = max((int)stats->lss_max_search, - cfs_hash_bd_depmax_get(&bd)); - if (!populated) { - cfs_hash_bd_unlock(hs, &bd, 1); - continue; - } - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - if (!hlist_empty(hhead)) - stats->lss_populated++; - } - cfs_hash_bd_unlock(hs, &bd, 1); - } -} - -/* - * lu_cache_shrink_count() returns an approximate number of cached objects - * that can be freed by shrink_slab(). A counter, which tracks the - * number of items in the site's lru, is maintained in a percpu_counter - * for each site. The percpu values are incremented and decremented as - * objects are added or removed from the lru. The percpu values are summed - * and saved whenever a percpu value exceeds a threshold. Thus the saved, - * summed value at any given time may not accurately reflect the current - * lru length. But this value is sufficiently accurate for the needs of - * a shrinker. - * - * Using a per cpu counter is a compromise solution to concurrent access: - * lu_object_put() can update the counter without locking the site and - * lu_cache_shrink_count can sum the counters without locking each - * ls_obj_hash bucket. - */ -static unsigned long lu_cache_shrink_count(struct shrinker *sk, - struct shrink_control *sc) -{ - struct lu_site *s; - struct lu_site *tmp; - unsigned long cached = 0; - - if (!(sc->gfp_mask & __GFP_FS)) - return 0; - - down_read(&lu_sites_guard); - list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) - cached += percpu_counter_read_positive(&s->ls_lru_len_counter); - up_read(&lu_sites_guard); - - cached = (cached / 100) * sysctl_vfs_cache_pressure; - CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n", - cached, sysctl_vfs_cache_pressure); - - return cached; -} - -static unsigned long lu_cache_shrink_scan(struct shrinker *sk, - struct shrink_control *sc) -{ - struct lu_site *s; - struct lu_site *tmp; - unsigned long remain = sc->nr_to_scan, freed = 0; - LIST_HEAD(splice); - - if (!(sc->gfp_mask & __GFP_FS)) - /* We must not take the lu_sites_guard lock when - * __GFP_FS is *not* set because of the deadlock - * possibility detailed above. Additionally, - * since we cannot determine the number of - * objects in the cache without taking this - * lock, we're in a particularly tough spot. As - * a result, we'll just lie and say our cache is - * empty. This _should_ be ok, as we can't - * reclaim objects when __GFP_FS is *not* set - * anyways. - */ - return SHRINK_STOP; - - down_write(&lu_sites_guard); - list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - freed = lu_site_purge(&lu_shrink_env, s, remain); - remain -= freed; - /* - * Move just shrunk site to the tail of site list to - * assure shrinking fairness. - */ - list_move_tail(&s->ls_linkage, &splice); - } - list_splice(&splice, lu_sites.prev); - up_write(&lu_sites_guard); - - return sc->nr_to_scan - remain; -} - -/** - * Debugging printer function using printk(). - */ -static struct shrinker lu_site_shrinker = { - .count_objects = lu_cache_shrink_count, - .scan_objects = lu_cache_shrink_scan, - .seeks = DEFAULT_SEEKS, -}; - -/** - * Initialization of global lu_* data. - */ -int lu_global_init(void) -{ - int result; - - CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); - - result = lu_ref_global_init(); - if (result != 0) - return result; - - LU_CONTEXT_KEY_INIT(&lu_global_key); - result = lu_context_key_register(&lu_global_key); - if (result != 0) { - lu_ref_global_fini(); - return result; - } - - /* - * At this level, we don't know what tags are needed, so allocate them - * conservatively. This should not be too bad, because this - * environment is global. - */ - down_write(&lu_sites_guard); - result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); - up_write(&lu_sites_guard); - if (result != 0) { - lu_context_key_degister(&lu_global_key); - lu_ref_global_fini(); - return result; - } - - /* - * seeks estimation: 3 seeks to read a record from oi, one to read - * inode, one for ea. Unfortunately setting this high value results in - * lu_object/inode cache consuming all the memory. - */ - result = register_shrinker(&lu_site_shrinker); - if (result != 0) { - /* Order explained in lu_global_fini(). */ - lu_context_key_degister(&lu_global_key); - - down_write(&lu_sites_guard); - lu_env_fini(&lu_shrink_env); - up_write(&lu_sites_guard); - - lu_ref_global_fini(); - return result; - } - - return 0; -} - -/** - * Dual to lu_global_init(). - */ -void lu_global_fini(void) -{ - unregister_shrinker(&lu_site_shrinker); - lu_context_key_degister(&lu_global_key); - - /* - * Tear shrinker environment down _after_ de-registering - * lu_global_key, because the latter has a value in the former. - */ - down_write(&lu_sites_guard); - lu_env_fini(&lu_shrink_env); - up_write(&lu_sites_guard); - - lu_ref_global_fini(); -} - -static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) -{ - struct lprocfs_counter ret; - - lprocfs_stats_collect(stats, idx, &ret); - return (__u32)ret.lc_count; -} - -/** - * Output site statistical counters into a buffer. Suitable for - * lprocfs_rd_*()-style functions. - */ -int lu_site_stats_print(const struct lu_site *s, struct seq_file *m) -{ - struct lu_site_stats stats; - - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s, &stats, 1); - - seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d\n", - stats.lss_busy, - stats.lss_total, - stats.lss_populated, - CFS_HASH_NHLIST(s->ls_obj_hash), - stats.lss_max_search, - ls_stats_read(s->ls_stats, LU_SS_CREATED), - ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), - ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), - ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), - ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); - return 0; -} -EXPORT_SYMBOL(lu_site_stats_print); - -/** - * Helper function to initialize a number of kmem slab caches at once. - */ -int lu_kmem_init(struct lu_kmem_descr *caches) -{ - int result; - struct lu_kmem_descr *iter = caches; - - for (result = 0; iter->ckd_cache; ++iter) { - *iter->ckd_cache = kmem_cache_create(iter->ckd_name, - iter->ckd_size, - 0, 0, NULL); - if (!*iter->ckd_cache) { - result = -ENOMEM; - /* free all previously allocated caches */ - lu_kmem_fini(caches); - break; - } - } - return result; -} -EXPORT_SYMBOL(lu_kmem_init); - -/** - * Helper function to finalize a number of kmem slab cached at once. Dual to - * lu_kmem_init(). - */ -void lu_kmem_fini(struct lu_kmem_descr *caches) -{ - for (; caches->ckd_cache; ++caches) { - kmem_cache_destroy(*caches->ckd_cache); - *caches->ckd_cache = NULL; - } -} -EXPORT_SYMBOL(lu_kmem_fini); diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c deleted file mode 100644 index f67cb89ea0ba..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/lu_ref.c - * - * Lustre reference. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c deleted file mode 100644 index cdc8dc10690d..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c +++ /dev/null @@ -1,241 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/lustre_handles.c - * - * Author: Phil Schwan - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include - -static __u64 handle_base; -#define HANDLE_INCR 7 -static spinlock_t handle_base_lock; - -static struct handle_bucket { - spinlock_t lock; - struct list_head head; -} *handle_hash; - -#define HANDLE_HASH_SIZE (1 << 16) -#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1) - -/* - * Generate a unique 64bit cookie (hash) for a handle and insert it into - * global (per-node) hash-table. - */ -void class_handle_hash(struct portals_handle *h, - struct portals_handle_ops *ops) -{ - struct handle_bucket *bucket; - - LASSERT(h); - LASSERT(list_empty(&h->h_link)); - - /* - * This is fast, but simplistic cookie generation algorithm, it will - * need a re-do at some point in the future for security. - */ - spin_lock(&handle_base_lock); - handle_base += HANDLE_INCR; - - if (unlikely(handle_base == 0)) { - /* - * Cookie of zero is "dangerous", because in many places it's - * assumed that 0 means "unassigned" handle, not bound to any - * object. - */ - CWARN("The universe has been exhausted: cookie wrap-around.\n"); - handle_base += HANDLE_INCR; - } - h->h_cookie = handle_base; - spin_unlock(&handle_base_lock); - - h->h_ops = ops; - spin_lock_init(&h->h_lock); - - bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; - spin_lock(&bucket->lock); - list_add_rcu(&h->h_link, &bucket->head); - h->h_in = 1; - spin_unlock(&bucket->lock); - - CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n", - h, h->h_cookie); -} -EXPORT_SYMBOL(class_handle_hash); - -static void class_handle_unhash_nolock(struct portals_handle *h) -{ - if (list_empty(&h->h_link)) { - CERROR("removing an already-removed handle (%#llx)\n", - h->h_cookie); - return; - } - - CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n", - h, h->h_cookie); - - spin_lock(&h->h_lock); - if (h->h_in == 0) { - spin_unlock(&h->h_lock); - return; - } - h->h_in = 0; - spin_unlock(&h->h_lock); - list_del_rcu(&h->h_link); -} - -void class_handle_unhash(struct portals_handle *h) -{ - struct handle_bucket *bucket; - - bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); - - spin_lock(&bucket->lock); - class_handle_unhash_nolock(h); - spin_unlock(&bucket->lock); -} -EXPORT_SYMBOL(class_handle_unhash); - -void *class_handle2object(__u64 cookie, const void *owner) -{ - struct handle_bucket *bucket; - struct portals_handle *h; - void *retval = NULL; - - LASSERT(handle_hash); - - /* Be careful when you want to change this code. See the - * rcu_read_lock() definition on top this file. - jxiong - */ - bucket = handle_hash + (cookie & HANDLE_HASH_MASK); - - rcu_read_lock(); - list_for_each_entry_rcu(h, &bucket->head, h_link) { - if (h->h_cookie != cookie || h->h_owner != owner) - continue; - - spin_lock(&h->h_lock); - if (likely(h->h_in != 0)) { - h->h_ops->hop_addref(h); - retval = h; - } - spin_unlock(&h->h_lock); - break; - } - rcu_read_unlock(); - - return retval; -} -EXPORT_SYMBOL(class_handle2object); - -void class_handle_free_cb(struct rcu_head *rcu) -{ - struct portals_handle *h; - void *ptr; - - h = container_of(rcu, struct portals_handle, h_rcu); - ptr = (void *)(unsigned long)h->h_cookie; - - if (h->h_ops->hop_free) - h->h_ops->hop_free(ptr, h->h_size); - else - kfree(ptr); -} -EXPORT_SYMBOL(class_handle_free_cb); - -int class_handle_init(void) -{ - struct handle_bucket *bucket; - - LASSERT(!handle_hash); - - handle_hash = kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE, - GFP_KERNEL); - if (!handle_hash) - return -ENOMEM; - - spin_lock_init(&handle_base_lock); - for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; - bucket--) { - INIT_LIST_HEAD(&bucket->head); - spin_lock_init(&bucket->lock); - } - - get_random_bytes(&handle_base, sizeof(handle_base)); - LASSERT(handle_base != 0ULL); - - return 0; -} - -static int cleanup_all_handles(void) -{ - int rc; - int i; - - for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) { - struct portals_handle *h; - - spin_lock(&handle_hash[i].lock); - list_for_each_entry_rcu(h, &handle_hash[i].head, h_link) { - CERROR("force clean handle %#llx addr %p ops %p\n", - h->h_cookie, h, h->h_ops); - - class_handle_unhash_nolock(h); - rc++; - } - spin_unlock(&handle_hash[i].lock); - } - - return rc; -} - -void class_handle_cleanup(void) -{ - int count; - - LASSERT(handle_hash); - - count = cleanup_all_handles(); - - kvfree(handle_hash); - handle_hash = NULL; - - if (count != 0) - CERROR("handle_count at cleanup: %d\n", count); -} diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c deleted file mode 100644 index e286a2665423..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c +++ /dev/null @@ -1,214 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include -#include -#include -#include - -#define NIDS_MAX 32 - -struct uuid_nid_data { - struct list_head un_list; - struct obd_uuid un_uuid; - int un_nid_count; - lnet_nid_t un_nids[NIDS_MAX]; -}; - -/* FIXME: This should probably become more elegant than a global linked list */ -static struct list_head g_uuid_list; -static spinlock_t g_uuid_lock; - -void class_init_uuidlist(void) -{ - INIT_LIST_HEAD(&g_uuid_list); - spin_lock_init(&g_uuid_lock); -} - -void class_exit_uuidlist(void) -{ - /* delete all */ - class_del_uuid(NULL); -} - -int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index) -{ - struct uuid_nid_data *data; - struct obd_uuid tmp; - int rc = -ENOENT; - - obd_str2uuid(&tmp, uuid); - spin_lock(&g_uuid_lock); - list_for_each_entry(data, &g_uuid_list, un_list) { - if (obd_uuid_equals(&data->un_uuid, &tmp)) { - if (index >= data->un_nid_count) - break; - - rc = 0; - *peer_nid = data->un_nids[index]; - break; - } - } - spin_unlock(&g_uuid_lock); - return rc; -} -EXPORT_SYMBOL(lustre_uuid_to_peer); - -/* Add a nid to a niduuid. Multiple nids can be added to a single uuid; - * LNET will choose the best one. - */ -int class_add_uuid(const char *uuid, __u64 nid) -{ - struct uuid_nid_data *data, *entry; - int found = 0; - - LASSERT(nid != 0); /* valid newconfig NID is never zero */ - - if (strlen(uuid) > UUID_MAX - 1) - return -EOVERFLOW; - - data = kzalloc(sizeof(*data), GFP_NOFS); - if (!data) - return -ENOMEM; - - obd_str2uuid(&data->un_uuid, uuid); - data->un_nids[0] = nid; - data->un_nid_count = 1; - - spin_lock(&g_uuid_lock); - list_for_each_entry(entry, &g_uuid_list, un_list) { - if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) { - int i; - - found = 1; - for (i = 0; i < entry->un_nid_count; i++) - if (nid == entry->un_nids[i]) - break; - - if (i == entry->un_nid_count) { - LASSERT(entry->un_nid_count < NIDS_MAX); - entry->un_nids[entry->un_nid_count++] = nid; - } - break; - } - } - if (!found) - list_add(&data->un_list, &g_uuid_list); - spin_unlock(&g_uuid_lock); - - if (found) { - CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid, - libcfs_nid2str(nid), entry->un_nid_count); - kfree(data); - } else { - CDEBUG(D_INFO, "add uuid %s %s\n", uuid, libcfs_nid2str(nid)); - } - return 0; -} - -/* Delete the nids for one uuid if specified, otherwise delete all */ -int class_del_uuid(const char *uuid) -{ - LIST_HEAD(deathrow); - struct uuid_nid_data *data; - struct uuid_nid_data *temp; - - spin_lock(&g_uuid_lock); - if (uuid) { - struct obd_uuid tmp; - - obd_str2uuid(&tmp, uuid); - list_for_each_entry(data, &g_uuid_list, un_list) { - if (obd_uuid_equals(&data->un_uuid, &tmp)) { - list_move(&data->un_list, &deathrow); - break; - } - } - } else { - list_splice_init(&g_uuid_list, &deathrow); - } - spin_unlock(&g_uuid_lock); - - if (uuid && list_empty(&deathrow)) { - CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid); - return -EINVAL; - } - - list_for_each_entry_safe(data, temp, &deathrow, un_list) { - list_del(&data->un_list); - - CDEBUG(D_INFO, "del uuid %s %s/%d\n", - obd_uuid2str(&data->un_uuid), - libcfs_nid2str(data->un_nids[0]), - data->un_nid_count); - - kfree(data); - } - - return 0; -} - -/* check if @nid exists in nid list of @uuid */ -int class_check_uuid(struct obd_uuid *uuid, __u64 nid) -{ - struct uuid_nid_data *entry; - int found = 0; - - CDEBUG(D_INFO, "check if uuid %s has %s.\n", - obd_uuid2str(uuid), libcfs_nid2str(nid)); - - spin_lock(&g_uuid_lock); - list_for_each_entry(entry, &g_uuid_list, un_list) { - int i; - - if (!obd_uuid_equals(&entry->un_uuid, uuid)) - continue; - - /* found the uuid, check if it has @nid */ - for (i = 0; i < entry->un_nid_count; i++) { - if (entry->un_nids[i] == nid) { - found = 1; - break; - } - } - break; - } - spin_unlock(&g_uuid_lock); - return found; -} -EXPORT_SYMBOL(class_check_uuid); diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c deleted file mode 100644 index ffc1814398a5..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/obd_config.c +++ /dev/null @@ -1,1538 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/obd_config.c - * - * Config API - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include - -#include -#include -#include -#include -#include -#include - -#include "llog_internal.h" - -/* - * uuid<->export lustre hash operations - */ -/* - * NOTE: It is impossible to find an export that is in failed - * state with this function - */ -static int -uuid_keycmp(struct rhashtable_compare_arg *arg, const void *obj) -{ - const struct obd_uuid *uuid = arg->key; - const struct obd_export *exp = obj; - - if (obd_uuid_equals(uuid, &exp->exp_client_uuid) && - !exp->exp_failed) - return 0; - return -ESRCH; -} - -static void -uuid_export_exit(void *vexport, void *data) -{ - struct obd_export *exp = vexport; - - class_export_put(exp); -} - -static const struct rhashtable_params uuid_hash_params = { - .key_len = sizeof(struct obd_uuid), - .key_offset = offsetof(struct obd_export, exp_client_uuid), - .head_offset = offsetof(struct obd_export, exp_uuid_hash), - .obj_cmpfn = uuid_keycmp, - .automatic_shrinking = true, -}; - -int obd_uuid_add(struct obd_device *obd, struct obd_export *export) -{ - int rc; - - rc = rhashtable_lookup_insert_fast(&obd->obd_uuid_hash, - &export->exp_uuid_hash, - uuid_hash_params); - if (rc == 0) - class_export_get(export); - else if (rc == -EEXIST) - rc = -EALREADY; - else - /* map obscure error codes to -ENOMEM */ - rc = -ENOMEM; - return rc; -} - -void obd_uuid_del(struct obd_device *obd, struct obd_export *export) -{ - int rc; - - rc = rhashtable_remove_fast(&obd->obd_uuid_hash, - &export->exp_uuid_hash, - uuid_hash_params); - - if (rc == 0) - class_export_put(export); -} - -/*********** string parsing utils *********/ - -/* returns 0 if we find this key in the buffer, else 1 */ -int class_find_param(char *buf, char *key, char **valp) -{ - char *ptr; - - if (!buf) - return 1; - - ptr = strstr(buf, key); - if (!ptr) - return 1; - - if (valp) - *valp = ptr + strlen(key); - - return 0; -} -EXPORT_SYMBOL(class_find_param); - -/* returns 0 if this is the first key in the buffer, else 1. - * valp points to first char after key. - */ -static int class_match_param(char *buf, const char *key, char **valp) -{ - if (!buf) - return 1; - - if (memcmp(buf, key, strlen(key)) != 0) - return 1; - - if (valp) - *valp = buf + strlen(key); - - return 0; -} - -static int parse_nid(char *buf, void *value, int quiet) -{ - lnet_nid_t *nid = value; - - *nid = libcfs_str2nid(buf); - if (*nid != LNET_NID_ANY) - return 0; - - if (!quiet) - LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", buf); - return -EINVAL; -} - -static int parse_net(char *buf, void *value) -{ - __u32 *net = value; - - *net = libcfs_str2net(buf); - CDEBUG(D_INFO, "Net %s\n", libcfs_net2str(*net)); - return 0; -} - -enum { - CLASS_PARSE_NID = 1, - CLASS_PARSE_NET, -}; - -/* 0 is good nid, - * 1 not found - * < 0 error - * endh is set to next separator - */ -static int class_parse_value(char *buf, int opc, void *value, char **endh, - int quiet) -{ - char *endp; - char tmp; - int rc = 0; - - if (!buf) - return 1; - while (*buf == ',' || *buf == ':') - buf++; - if (*buf == ' ' || *buf == '/' || *buf == '\0') - return 1; - - /* nid separators or end of nids */ - endp = strpbrk(buf, ",: /"); - if (!endp) - endp = buf + strlen(buf); - - tmp = *endp; - *endp = '\0'; - switch (opc) { - default: - LBUG(); - case CLASS_PARSE_NID: - rc = parse_nid(buf, value, quiet); - break; - case CLASS_PARSE_NET: - rc = parse_net(buf, value); - break; - } - *endp = tmp; - if (rc != 0) - return rc; - if (endh) - *endh = endp; - return 0; -} - -int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh) -{ - return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 0); -} -EXPORT_SYMBOL(class_parse_nid); - -int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh) -{ - return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 1); -} -EXPORT_SYMBOL(class_parse_nid_quiet); - -char *lustre_cfg_string(struct lustre_cfg *lcfg, u32 index) -{ - char *s; - - if (!lcfg->lcfg_buflens[index]) - return NULL; - - s = lustre_cfg_buf(lcfg, index); - if (!s) - return NULL; - - /* - * make sure it's NULL terminated, even if this kills a char - * of data. Try to use the padding first though. - */ - if (s[lcfg->lcfg_buflens[index] - 1] != '\0') { - size_t last = ALIGN(lcfg->lcfg_buflens[index], 8) - 1; - char lost; - - /* Use the smaller value */ - if (last > lcfg->lcfg_buflens[index]) - last = lcfg->lcfg_buflens[index]; - - lost = s[last]; - s[last] = '\0'; - if (lost != '\0') { - CWARN("Truncated buf %d to '%s' (lost '%c'...)\n", - index, s, lost); - } - } - return s; -} -EXPORT_SYMBOL(lustre_cfg_string); - -/********************** class fns **********************/ - -/** - * Create a new obd device and set the type, name and uuid. If successful, - * the new device can be accessed by either name or uuid. - */ -static int class_attach(struct lustre_cfg *lcfg) -{ - struct obd_device *obd = NULL; - char *typename, *name, *uuid; - int rc, len; - - if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) { - CERROR("No type passed!\n"); - return -EINVAL; - } - typename = lustre_cfg_string(lcfg, 1); - - if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) { - CERROR("No name passed!\n"); - return -EINVAL; - } - name = lustre_cfg_string(lcfg, 0); - - if (!LUSTRE_CFG_BUFLEN(lcfg, 2)) { - CERROR("No UUID passed!\n"); - return -EINVAL; - } - uuid = lustre_cfg_string(lcfg, 2); - - CDEBUG(D_IOCTL, "attach type %s name: %s uuid: %s\n", - typename, name, uuid); - - obd = class_newdev(typename, name); - if (IS_ERR(obd)) { - /* Already exists or out of obds */ - rc = PTR_ERR(obd); - obd = NULL; - CERROR("Cannot create device %s of type %s : %d\n", - name, typename, rc); - goto out; - } - LASSERTF(obd, "Cannot get obd device %s of type %s\n", - name, typename); - LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, - "obd %p obd_magic %08X != %08X\n", - obd, obd->obd_magic, OBD_DEVICE_MAGIC); - LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0, - "%p obd_name %s != %s\n", obd, obd->obd_name, name); - - rwlock_init(&obd->obd_pool_lock); - obd->obd_pool_limit = 0; - obd->obd_pool_slv = 0; - - INIT_LIST_HEAD(&obd->obd_exports); - INIT_LIST_HEAD(&obd->obd_unlinked_exports); - INIT_LIST_HEAD(&obd->obd_delayed_exports); - spin_lock_init(&obd->obd_nid_lock); - spin_lock_init(&obd->obd_dev_lock); - mutex_init(&obd->obd_dev_mutex); - spin_lock_init(&obd->obd_osfs_lock); - /* obd->obd_osfs_age must be set to a value in the distant - * past to guarantee a fresh statfs is fetched on mount. - */ - obd->obd_osfs_age = get_jiffies_64() - 1000 * HZ; - - /* XXX belongs in setup not attach */ - init_rwsem(&obd->obd_observer_link_sem); - /* recovery data */ - init_waitqueue_head(&obd->obd_evict_inprogress_waitq); - - llog_group_init(&obd->obd_olg); - - obd->obd_conn_inprogress = 0; - - len = strlen(uuid); - if (len >= sizeof(obd->obd_uuid)) { - CERROR("uuid must be < %d bytes long\n", - (int)sizeof(obd->obd_uuid)); - rc = -EINVAL; - goto out; - } - memcpy(obd->obd_uuid.uuid, uuid, len); - - /* Detach drops this */ - spin_lock(&obd->obd_dev_lock); - atomic_set(&obd->obd_refcount, 1); - spin_unlock(&obd->obd_dev_lock); - lu_ref_init(&obd->obd_reference); - lu_ref_add(&obd->obd_reference, "attach", obd); - - obd->obd_attached = 1; - CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n", - obd->obd_minor, typename, atomic_read(&obd->obd_refcount)); - return 0; - out: - if (obd) - class_release_dev(obd); - - return rc; -} - -/** Create hashes, self-export, and call type-specific setup. - * Setup is effectively the "start this obd" call. - */ -static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - int err = 0; - struct obd_export *exp; - - LASSERT(obd); - LASSERTF(obd == class_num2obd(obd->obd_minor), - "obd %p != obd_devs[%d] %p\n", - obd, obd->obd_minor, class_num2obd(obd->obd_minor)); - LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, - "obd %p obd_magic %08x != %08x\n", - obd, obd->obd_magic, OBD_DEVICE_MAGIC); - - /* have we attached a type to this device? */ - if (!obd->obd_attached) { - CERROR("Device %d not attached\n", obd->obd_minor); - return -ENODEV; - } - - if (obd->obd_set_up) { - CERROR("Device %d already setup (type %s)\n", - obd->obd_minor, obd->obd_type->typ_name); - return -EEXIST; - } - - /* is someone else setting us up right now? (attach inits spinlock) */ - spin_lock(&obd->obd_dev_lock); - if (obd->obd_starting) { - spin_unlock(&obd->obd_dev_lock); - CERROR("Device %d setup in progress (type %s)\n", - obd->obd_minor, obd->obd_type->typ_name); - return -EEXIST; - } - /* just leave this on forever. I can't use obd_set_up here because - * other fns check that status, and we're not actually set up yet. - */ - obd->obd_starting = 1; - spin_unlock(&obd->obd_dev_lock); - - /* create an uuid-export lustre hash */ - err = rhashtable_init(&obd->obd_uuid_hash, &uuid_hash_params); - - if (err) - goto err_hash; - - exp = class_new_export(obd, &obd->obd_uuid); - if (IS_ERR(exp)) { - err = PTR_ERR(exp); - goto err_new; - } - - obd->obd_self_export = exp; - class_export_put(exp); - - err = obd_setup(obd, lcfg); - if (err) - goto err_exp; - - obd->obd_set_up = 1; - - spin_lock(&obd->obd_dev_lock); - /* cleanup drops this */ - class_incref(obd, "setup", obd); - spin_unlock(&obd->obd_dev_lock); - - CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n", - obd->obd_name, obd->obd_uuid.uuid); - - return 0; -err_exp: - if (obd->obd_self_export) { - class_unlink_export(obd->obd_self_export); - obd->obd_self_export = NULL; - } -err_new: - rhashtable_destroy(&obd->obd_uuid_hash); -err_hash: - obd->obd_starting = 0; - CERROR("setup %s failed (%d)\n", obd->obd_name, err); - return err; -} - -/** We have finished using this obd and are ready to destroy it. - * There can be no more references to this obd. - */ -static int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - if (obd->obd_set_up) { - CERROR("OBD device %d still set up\n", obd->obd_minor); - return -EBUSY; - } - - spin_lock(&obd->obd_dev_lock); - if (!obd->obd_attached) { - spin_unlock(&obd->obd_dev_lock); - CERROR("OBD device %d not attached\n", obd->obd_minor); - return -ENODEV; - } - obd->obd_attached = 0; - spin_unlock(&obd->obd_dev_lock); - - CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n", - obd->obd_name, obd->obd_uuid.uuid); - - class_decref(obd, "attach", obd); - return 0; -} - -/** Start shutting down the obd. There may be in-progress ops when - * this is called. We tell them to start shutting down with a call - * to class_disconnect_exports(). - */ -static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - int err = 0; - char *flag; - - OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS); - - if (!obd->obd_set_up) { - CERROR("Device %d not setup\n", obd->obd_minor); - return -ENODEV; - } - - spin_lock(&obd->obd_dev_lock); - if (obd->obd_stopping) { - spin_unlock(&obd->obd_dev_lock); - CERROR("OBD %d already stopping\n", obd->obd_minor); - return -ENODEV; - } - /* Leave this on forever */ - obd->obd_stopping = 1; - spin_unlock(&obd->obd_dev_lock); - - while (obd->obd_conn_inprogress > 0) - cond_resched(); - smp_rmb(); - - if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) { - for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++) - switch (*flag) { - case 'F': - obd->obd_force = 1; - break; - case 'A': - LCONSOLE_WARN("Failing over %s\n", - obd->obd_name); - obd->obd_fail = 1; - obd->obd_no_transno = 1; - obd->obd_no_recov = 1; - if (OBP(obd, iocontrol)) { - obd_iocontrol(OBD_IOC_SYNC, - obd->obd_self_export, - 0, NULL, NULL); - } - break; - default: - CERROR("Unrecognised flag '%c'\n", *flag); - } - } - - LASSERT(obd->obd_self_export); - - /* Precleanup, we must make sure all exports get destroyed. */ - err = obd_precleanup(obd); - if (err) - CERROR("Precleanup %s returned %d\n", - obd->obd_name, err); - - /* destroy an uuid-export hash body */ - rhashtable_free_and_destroy(&obd->obd_uuid_hash, uuid_export_exit, NULL); - - class_decref(obd, "setup", obd); - obd->obd_set_up = 0; - - return 0; -} - -struct obd_device *class_incref(struct obd_device *obd, - const char *scope, const void *source) -{ - lu_ref_add_atomic(&obd->obd_reference, scope, source); - atomic_inc(&obd->obd_refcount); - CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd, - atomic_read(&obd->obd_refcount)); - - return obd; -} -EXPORT_SYMBOL(class_incref); - -void class_decref(struct obd_device *obd, const char *scope, const void *source) -{ - int err; - int refs; - - spin_lock(&obd->obd_dev_lock); - atomic_dec(&obd->obd_refcount); - refs = atomic_read(&obd->obd_refcount); - spin_unlock(&obd->obd_dev_lock); - lu_ref_del(&obd->obd_reference, scope, source); - - CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs); - - if ((refs == 1) && obd->obd_stopping) { - /* All exports have been destroyed; there should - * be no more in-progress ops by this point. - */ - - spin_lock(&obd->obd_self_export->exp_lock); - obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd); - spin_unlock(&obd->obd_self_export->exp_lock); - - /* note that we'll recurse into class_decref again */ - class_unlink_export(obd->obd_self_export); - return; - } - - if (refs == 0) { - CDEBUG(D_CONFIG, "finishing cleanup of obd %s (%s)\n", - obd->obd_name, obd->obd_uuid.uuid); - LASSERT(!obd->obd_attached); - if (obd->obd_stopping) { - /* If we're not stopping, we were never set up */ - err = obd_cleanup(obd); - if (err) - CERROR("Cleanup %s returned %d\n", - obd->obd_name, err); - } - class_release_dev(obd); - } -} -EXPORT_SYMBOL(class_decref); - -/** Add a failover nid location. - * Client obd types contact server obd types using this nid list. - */ -static int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct obd_import *imp; - struct obd_uuid uuid; - int rc; - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 || - LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) { - CERROR("invalid conn_uuid\n"); - return -EINVAL; - } - if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) && - strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) && - strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) && - strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) && - strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) { - CERROR("can't add connection on non-client dev\n"); - return -EINVAL; - } - - imp = obd->u.cli.cl_import; - if (!imp) { - CERROR("try to add conn on immature client dev\n"); - return -EINVAL; - } - - obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1)); - rc = obd_add_conn(imp, &uuid, lcfg->lcfg_num); - - return rc; -} - -/** Remove a failover nid location. - */ -static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct obd_import *imp; - struct obd_uuid uuid; - int rc; - - if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 || - LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) { - CERROR("invalid conn_uuid\n"); - return -EINVAL; - } - if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) && - strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME)) { - CERROR("can't del connection on non-client dev\n"); - return -EINVAL; - } - - imp = obd->u.cli.cl_import; - if (!imp) { - CERROR("try to del conn on immature client dev\n"); - return -EINVAL; - } - - obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1)); - rc = obd_del_conn(imp, &uuid); - - return rc; -} - -static LIST_HEAD(lustre_profile_list); -static DEFINE_SPINLOCK(lustre_profile_list_lock); - -struct lustre_profile *class_get_profile(const char *prof) -{ - struct lustre_profile *lprof; - - spin_lock(&lustre_profile_list_lock); - list_for_each_entry(lprof, &lustre_profile_list, lp_list) { - if (!strcmp(lprof->lp_profile, prof)) { - lprof->lp_refs++; - spin_unlock(&lustre_profile_list_lock); - return lprof; - } - } - spin_unlock(&lustre_profile_list_lock); - return NULL; -} -EXPORT_SYMBOL(class_get_profile); - -/** Create a named "profile". - * This defines the mdc and osc names to use for a client. - * This also is used to define the lov to be used by a mdt. - */ -static int class_add_profile(int proflen, char *prof, int osclen, char *osc, - int mdclen, char *mdc) -{ - struct lustre_profile *lprof; - int err = 0; - - CDEBUG(D_CONFIG, "Add profile %s\n", prof); - - lprof = kzalloc(sizeof(*lprof), GFP_NOFS); - if (!lprof) - return -ENOMEM; - INIT_LIST_HEAD(&lprof->lp_list); - - LASSERT(proflen == (strlen(prof) + 1)); - lprof->lp_profile = kmemdup(prof, proflen, GFP_NOFS); - if (!lprof->lp_profile) { - err = -ENOMEM; - goto free_lprof; - } - - LASSERT(osclen == (strlen(osc) + 1)); - lprof->lp_dt = kmemdup(osc, osclen, GFP_NOFS); - if (!lprof->lp_dt) { - err = -ENOMEM; - goto free_lp_profile; - } - - if (mdclen > 0) { - LASSERT(mdclen == (strlen(mdc) + 1)); - lprof->lp_md = kmemdup(mdc, mdclen, GFP_NOFS); - if (!lprof->lp_md) { - err = -ENOMEM; - goto free_lp_dt; - } - } - - spin_lock(&lustre_profile_list_lock); - lprof->lp_refs = 1; - lprof->lp_list_deleted = false; - list_add(&lprof->lp_list, &lustre_profile_list); - spin_unlock(&lustre_profile_list_lock); - return err; - -free_lp_dt: - kfree(lprof->lp_dt); -free_lp_profile: - kfree(lprof->lp_profile); -free_lprof: - kfree(lprof); - return err; -} - -void class_del_profile(const char *prof) -{ - struct lustre_profile *lprof; - - CDEBUG(D_CONFIG, "Del profile %s\n", prof); - - lprof = class_get_profile(prof); - if (lprof) { - spin_lock(&lustre_profile_list_lock); - /* because get profile increments the ref counter */ - lprof->lp_refs--; - list_del(&lprof->lp_list); - lprof->lp_list_deleted = true; - spin_unlock(&lustre_profile_list_lock); - - class_put_profile(lprof); - } -} -EXPORT_SYMBOL(class_del_profile); - -void class_put_profile(struct lustre_profile *lprof) -{ - spin_lock(&lustre_profile_list_lock); - if (--lprof->lp_refs > 0) { - LASSERT(lprof->lp_refs > 0); - spin_unlock(&lustre_profile_list_lock); - return; - } - spin_unlock(&lustre_profile_list_lock); - - /* confirm not a negative number */ - LASSERT(!lprof->lp_refs); - - /* - * At least one class_del_profile/profiles must be called - * on the target profile or lustre_profile_list will corrupt - */ - LASSERT(lprof->lp_list_deleted); - kfree(lprof->lp_profile); - kfree(lprof->lp_dt); - kfree(lprof->lp_md); - kfree(lprof); -} -EXPORT_SYMBOL(class_put_profile); - -/* COMPAT_146 */ -void class_del_profiles(void) -{ - struct lustre_profile *lprof, *n; - - spin_lock(&lustre_profile_list_lock); - list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) { - list_del(&lprof->lp_list); - lprof->lp_list_deleted = true; - spin_unlock(&lustre_profile_list_lock); - - class_put_profile(lprof); - - spin_lock(&lustre_profile_list_lock); - } - spin_unlock(&lustre_profile_list_lock); -} -EXPORT_SYMBOL(class_del_profiles); - -static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg) -{ - if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0) - at_min = val; - else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0) - at_max = val; - else if (class_match_param(ptr, PARAM_AT_EXTRA, NULL) == 0) - at_extra = val; - else if (class_match_param(ptr, PARAM_AT_EARLY_MARGIN, NULL) == 0) - at_early_margin = val; - else if (class_match_param(ptr, PARAM_AT_HISTORY, NULL) == 0) - at_history = val; - else if (class_match_param(ptr, PARAM_JOBID_VAR, NULL) == 0) - strlcpy(obd_jobid_var, lustre_cfg_string(lcfg, 2), - JOBSTATS_JOBID_VAR_MAX_LEN + 1); - else - return -EINVAL; - - CDEBUG(D_IOCTL, "global %s = %d\n", ptr, val); - return 0; -} - -/* We can't call ll_process_config or lquota_process_config directly because - * it lives in a module that must be loaded after this one. - */ -static int (*client_process_config)(struct lustre_cfg *lcfg); -static int (*quota_process_config)(struct lustre_cfg *lcfg); - -void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)) -{ - client_process_config = cpc; -} -EXPORT_SYMBOL(lustre_register_client_process_config); - -static int process_param2_config(struct lustre_cfg *lcfg) -{ - char *param = lustre_cfg_string(lcfg, 1); - char *upcall = lustre_cfg_string(lcfg, 2); - char *argv[] = { - [0] = "/usr/sbin/lctl", - [1] = "set_param", - [2] = param, - [3] = NULL - }; - ktime_t start; - ktime_t end; - int rc; - - /* Add upcall processing here. Now only lctl is supported */ - if (strcmp(upcall, LCTL_UPCALL) != 0) { - CERROR("Unsupported upcall %s\n", upcall); - return -EINVAL; - } - - start = ktime_get(); - rc = call_usermodehelper(argv[0], argv, NULL, UMH_WAIT_PROC); - end = ktime_get(); - - if (rc < 0) { - CERROR( - "lctl: error invoking upcall %s %s %s: rc = %d; time %ldus\n", - argv[0], argv[1], argv[2], rc, - (long)ktime_us_delta(end, start)); - } else { - CDEBUG(D_HA, "lctl: invoked upcall %s %s %s, time %ldus\n", - argv[0], argv[1], argv[2], - (long)ktime_us_delta(end, start)); - rc = 0; - } - - return rc; -} - -/** Process configuration commands given in lustre_cfg form. - * These may come from direct calls (e.g. class_manual_cleanup) - * or processing the config llog, or ioctl from lctl. - */ -int class_process_config(struct lustre_cfg *lcfg) -{ - struct obd_device *obd; - int err; - - LASSERT(lcfg && !IS_ERR(lcfg)); - CDEBUG(D_IOCTL, "processing cmd: %x\n", lcfg->lcfg_command); - - /* Commands that don't need a device */ - switch (lcfg->lcfg_command) { - case LCFG_ATTACH: { - err = class_attach(lcfg); - goto out; - } - case LCFG_ADD_UUID: { - CDEBUG(D_IOCTL, "adding mapping from uuid %s to nid %#llx (%s)\n", - lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid, - libcfs_nid2str(lcfg->lcfg_nid)); - - err = class_add_uuid(lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid); - goto out; - } - case LCFG_DEL_UUID: { - CDEBUG(D_IOCTL, "removing mappings for uuid %s\n", - (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) == 0) - ? "" : lustre_cfg_string(lcfg, 1)); - - err = class_del_uuid(lustre_cfg_string(lcfg, 1)); - goto out; - } - case LCFG_MOUNTOPT: { - CDEBUG(D_IOCTL, "mountopt: profile %s osc %s mdc %s\n", - lustre_cfg_string(lcfg, 1), - lustre_cfg_string(lcfg, 2), - lustre_cfg_string(lcfg, 3)); - /* set these mount options somewhere, so ll_fill_super - * can find them. - */ - err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1), - lustre_cfg_string(lcfg, 1), - LUSTRE_CFG_BUFLEN(lcfg, 2), - lustre_cfg_string(lcfg, 2), - LUSTRE_CFG_BUFLEN(lcfg, 3), - lustre_cfg_string(lcfg, 3)); - goto out; - } - case LCFG_DEL_MOUNTOPT: { - CDEBUG(D_IOCTL, "mountopt: profile %s\n", - lustre_cfg_string(lcfg, 1)); - class_del_profile(lustre_cfg_string(lcfg, 1)); - err = 0; - goto out; - } - case LCFG_SET_TIMEOUT: { - CDEBUG(D_IOCTL, "changing lustre timeout from %d to %d\n", - obd_timeout, lcfg->lcfg_num); - obd_timeout = max(lcfg->lcfg_num, 1U); - obd_timeout_set = 1; - err = 0; - goto out; - } - case LCFG_SET_LDLM_TIMEOUT: { - /* ldlm_timeout is not used on the client */ - err = 0; - goto out; - } - case LCFG_SET_UPCALL: { - LCONSOLE_ERROR_MSG(0x15a, "recovery upcall is deprecated\n"); - /* COMPAT_146 Don't fail on old configs */ - err = 0; - goto out; - } - case LCFG_MARKER: { - struct cfg_marker *marker; - - marker = lustre_cfg_buf(lcfg, 1); - CDEBUG(D_IOCTL, "marker %d (%#x) %.16s %s\n", marker->cm_step, - marker->cm_flags, marker->cm_tgtname, marker->cm_comment); - err = 0; - goto out; - } - case LCFG_PARAM: { - char *tmp; - /* llite has no obd */ - if ((class_match_param(lustre_cfg_string(lcfg, 1), - PARAM_LLITE, NULL) == 0) && - client_process_config) { - err = (*client_process_config)(lcfg); - goto out; - } else if ((class_match_param(lustre_cfg_string(lcfg, 1), - PARAM_SYS, &tmp) == 0)) { - /* Global param settings */ - err = class_set_global(tmp, lcfg->lcfg_num, lcfg); - /* - * Client or server should not fail to mount if - * it hits an unknown configuration parameter. - */ - if (err != 0) - CWARN("Ignoring unknown param %s\n", tmp); - - err = 0; - goto out; - } else if ((class_match_param(lustre_cfg_string(lcfg, 1), - PARAM_QUOTA, &tmp) == 0) && - quota_process_config) { - err = (*quota_process_config)(lcfg); - goto out; - } - - break; - } - case LCFG_SET_PARAM: { - err = process_param2_config(lcfg); - goto out; - } - } - /* Commands that require a device */ - obd = class_name2obd(lustre_cfg_string(lcfg, 0)); - if (!obd) { - if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) - CERROR("this lcfg command requires a device name\n"); - else - CERROR("no device for: %s\n", - lustre_cfg_string(lcfg, 0)); - - err = -EINVAL; - goto out; - } - - switch (lcfg->lcfg_command) { - case LCFG_SETUP: { - err = class_setup(obd, lcfg); - goto out; - } - case LCFG_DETACH: { - err = class_detach(obd, lcfg); - err = 0; - goto out; - } - case LCFG_CLEANUP: { - err = class_cleanup(obd, lcfg); - err = 0; - goto out; - } - case LCFG_ADD_CONN: { - err = class_add_conn(obd, lcfg); - err = 0; - goto out; - } - case LCFG_DEL_CONN: { - err = class_del_conn(obd, lcfg); - err = 0; - goto out; - } - case LCFG_POOL_NEW: { - err = obd_pool_new(obd, lustre_cfg_string(lcfg, 2)); - err = 0; - goto out; - } - case LCFG_POOL_ADD: { - err = obd_pool_add(obd, lustre_cfg_string(lcfg, 2), - lustre_cfg_string(lcfg, 3)); - err = 0; - goto out; - } - case LCFG_POOL_REM: { - err = obd_pool_rem(obd, lustre_cfg_string(lcfg, 2), - lustre_cfg_string(lcfg, 3)); - err = 0; - goto out; - } - case LCFG_POOL_DEL: { - err = obd_pool_del(obd, lustre_cfg_string(lcfg, 2)); - err = 0; - goto out; - } - default: { - err = obd_process_config(obd, sizeof(*lcfg), lcfg); - goto out; - } - } -out: - if ((err < 0) && !(lcfg->lcfg_command & LCFG_REQUIRED)) { - CWARN("Ignoring error %d on optional command %#x\n", err, - lcfg->lcfg_command); - err = 0; - } - return err; -} -EXPORT_SYMBOL(class_process_config); - -int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, - struct lustre_cfg *lcfg, void *data) -{ - struct lprocfs_vars *var; - struct file fakefile; - struct seq_file fake_seqfile; - char *key, *sval; - int i, keylen, vallen; - int matched = 0, j = 0; - int rc = 0; - int skip = 0; - - if (lcfg->lcfg_command != LCFG_PARAM) { - CERROR("Unknown command: %d\n", lcfg->lcfg_command); - return -EINVAL; - } - - /* fake a seq file so that var->fops->write can work... */ - fakefile.private_data = &fake_seqfile; - fake_seqfile.private = data; - /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt - * or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar - * or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 - */ - for (i = 1; i < lcfg->lcfg_bufcount; i++) { - key = lustre_cfg_buf(lcfg, i); - /* Strip off prefix */ - if (class_match_param(key, prefix, &key)) { - /* - * If the prefix doesn't match, return error so we - * can pass it down the stack - */ - return -ENOSYS; - } - sval = strchr(key, '='); - if (!sval || (*(sval + 1) == 0)) { - CERROR("Can't parse param %s (missing '=')\n", key); - /* rc = -EINVAL; continue parsing other params */ - continue; - } - keylen = sval - key; - sval++; - vallen = strlen(sval); - matched = 0; - j = 0; - /* Search proc entries */ - while (lvars[j].name) { - var = &lvars[j]; - if (!class_match_param(key, var->name, NULL) && - keylen == strlen(var->name)) { - matched++; - rc = -EROFS; - if (var->fops && var->fops->write) { - mm_segment_t oldfs; - - oldfs = get_fs(); - set_fs(KERNEL_DS); - rc = var->fops->write(&fakefile, - (const char __user *)sval, - vallen, NULL); - set_fs(oldfs); - } - break; - } - j++; - } - if (!matched) { - CERROR("%.*s: %s unknown param %s\n", - (int)strlen(prefix) - 1, prefix, - (char *)lustre_cfg_string(lcfg, 0), key); - /* rc = -EINVAL; continue parsing other params */ - skip++; - } else if (rc < 0) { - CERROR("%s: error writing proc entry '%s': rc = %d\n", - prefix, var->name, rc); - rc = 0; - } else { - CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n", - lustre_cfg_string(lcfg, 0), - (int)strlen(prefix) - 1, prefix, - (int)(sval - key - 1), key, sval); - } - } - - if (rc > 0) - rc = 0; - if (!rc && skip) - rc = skip; - return rc; -} -EXPORT_SYMBOL(class_process_proc_param); - -/** Parse a configuration llog, doing various manipulations on them - * for various reasons, (modifications for compatibility, skip obsolete - * records, change uuids, etc), then class_process_config() resulting - * net records. - */ -int class_config_llog_handler(const struct lu_env *env, - struct llog_handle *handle, - struct llog_rec_hdr *rec, void *data) -{ - struct config_llog_instance *clli = data; - int cfg_len = rec->lrh_len; - char *cfg_buf = (char *)(rec + 1); - int rc = 0; - - switch (rec->lrh_type) { - case OBD_CFG_REC: { - struct lustre_cfg *lcfg, *lcfg_new; - struct lustre_cfg_bufs bufs; - char *inst_name = NULL; - int inst_len = 0; - size_t lcfg_len; - int swab = 0; - - lcfg = (struct lustre_cfg *)cfg_buf; - if (lcfg->lcfg_version == __swab32(LUSTRE_CFG_VERSION)) { - lustre_swab_lustre_cfg(lcfg); - swab = 1; - } - - rc = lustre_cfg_sanity_check(cfg_buf, cfg_len); - if (rc) - goto out; - - /* Figure out config state info */ - if (lcfg->lcfg_command == LCFG_MARKER) { - struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1); - - lustre_swab_cfg_marker(marker, swab, - LUSTRE_CFG_BUFLEN(lcfg, 1)); - CDEBUG(D_CONFIG, "Marker, inst_flg=%#x mark_flg=%#x\n", - clli->cfg_flags, marker->cm_flags); - if (marker->cm_flags & CM_START) { - /* all previous flags off */ - clli->cfg_flags = CFG_F_MARKER; - if (marker->cm_flags & CM_SKIP) { - clli->cfg_flags |= CFG_F_SKIP; - CDEBUG(D_CONFIG, "SKIP #%d\n", - marker->cm_step); - } else if ((marker->cm_flags & CM_EXCLUDE) || - (clli->cfg_sb && - lustre_check_exclusion(clli->cfg_sb, - marker->cm_tgtname))) { - clli->cfg_flags |= CFG_F_EXCLUDE; - CDEBUG(D_CONFIG, "EXCLUDE %d\n", - marker->cm_step); - } - } else if (marker->cm_flags & CM_END) { - clli->cfg_flags = 0; - } - } - /* A config command without a start marker before it is - * illegal (post 146) - */ - if (!(clli->cfg_flags & CFG_F_COMPAT146) && - !(clli->cfg_flags & CFG_F_MARKER) && - (lcfg->lcfg_command != LCFG_MARKER)) { - CWARN("Config not inside markers, ignoring! (inst: %p, uuid: %s, flags: %#x)\n", - clli->cfg_instance, - clli->cfg_uuid.uuid, clli->cfg_flags); - clli->cfg_flags |= CFG_F_SKIP; - } - if (clli->cfg_flags & CFG_F_SKIP) { - CDEBUG(D_CONFIG, "skipping %#x\n", - clli->cfg_flags); - rc = 0; - /* No processing! */ - break; - } - - /* - * For interoperability between 1.8 and 2.0, - * rename "mds" obd device type to "mdt". - */ - { - char *typename = lustre_cfg_string(lcfg, 1); - char *index = lustre_cfg_string(lcfg, 2); - - if ((lcfg->lcfg_command == LCFG_ATTACH && typename && - strcmp(typename, "mds") == 0)) { - CWARN("For 1.8 interoperability, rename obd type from mds to mdt\n"); - typename[2] = 't'; - } - if ((lcfg->lcfg_command == LCFG_SETUP && index && - strcmp(index, "type") == 0)) { - CDEBUG(D_INFO, "For 1.8 interoperability, set this index to '0'\n"); - index[0] = '0'; - index[1] = 0; - } - } - - if (clli->cfg_flags & CFG_F_EXCLUDE) { - CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n", - lcfg->lcfg_command); - if (lcfg->lcfg_command == LCFG_LOV_ADD_OBD) - /* Add inactive instead */ - lcfg->lcfg_command = LCFG_LOV_ADD_INA; - } - - lustre_cfg_bufs_init(&bufs, lcfg); - - if (clli && clli->cfg_instance && - LUSTRE_CFG_BUFLEN(lcfg, 0) > 0) { - inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) + - sizeof(clli->cfg_instance) * 2 + 4; - inst_name = kasprintf(GFP_NOFS, "%s-%p", - lustre_cfg_string(lcfg, 0), - clli->cfg_instance); - if (!inst_name) { - rc = -ENOMEM; - goto out; - } - lustre_cfg_bufs_set_string(&bufs, 0, inst_name); - CDEBUG(D_CONFIG, "cmd %x, instance name: %s\n", - lcfg->lcfg_command, inst_name); - } - - /* we override the llog's uuid for clients, to insure they - * are unique - */ - if (clli && clli->cfg_instance && - lcfg->lcfg_command == LCFG_ATTACH) { - lustre_cfg_bufs_set_string(&bufs, 2, - clli->cfg_uuid.uuid); - } - /* - * sptlrpc config record, we expect 2 data segments: - * [0]: fs_name/target_name, - * [1]: rule string - * moving them to index [1] and [2], and insert MGC's - * obdname at index [0]. - */ - if (clli && !clli->cfg_instance && - lcfg->lcfg_command == LCFG_SPTLRPC_CONF) { - lustre_cfg_bufs_set(&bufs, 2, bufs.lcfg_buf[1], - bufs.lcfg_buflen[1]); - lustre_cfg_bufs_set(&bufs, 1, bufs.lcfg_buf[0], - bufs.lcfg_buflen[0]); - lustre_cfg_bufs_set_string(&bufs, 0, - clli->cfg_obdname); - } - - lcfg_len = lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen); - lcfg_new = kzalloc(lcfg_len, GFP_NOFS); - if (!lcfg_new) { - rc = -ENOMEM; - goto out; - } - - lustre_cfg_init(lcfg_new, lcfg->lcfg_command, &bufs); - lcfg_new->lcfg_num = lcfg->lcfg_num; - lcfg_new->lcfg_flags = lcfg->lcfg_flags; - - /* XXX Hack to try to remain binary compatible with - * pre-newconfig logs - */ - if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */ - (lcfg->lcfg_nid >> 32) == 0) { - __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff); - - lcfg_new->lcfg_nid = - LNET_MKNID(LNET_MKNET(lcfg->lcfg_nal, 0), addr); - CWARN("Converted pre-newconfig NAL %d NID %x to %s\n", - lcfg->lcfg_nal, addr, - libcfs_nid2str(lcfg_new->lcfg_nid)); - } else { - lcfg_new->lcfg_nid = lcfg->lcfg_nid; - } - - lcfg_new->lcfg_nal = 0; /* illegal value for obsolete field */ - - rc = class_process_config(lcfg_new); - kfree(lcfg_new); - kfree(inst_name); - break; - } - default: - CERROR("Unknown llog record type %#x encountered\n", - rec->lrh_type); - break; - } -out: - if (rc) { - CERROR("%s: cfg command failed: rc = %d\n", - handle->lgh_ctxt->loc_obd->obd_name, rc); - class_config_dump_handler(NULL, handle, rec, data); - } - return rc; -} -EXPORT_SYMBOL(class_config_llog_handler); - -int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, - char *name, struct config_llog_instance *cfg) -{ - struct llog_process_cat_data cd = {0, 0}; - struct llog_handle *llh; - llog_cb_t callback; - int rc; - - CDEBUG(D_INFO, "looking up llog %s\n", name); - rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); - if (rc) - return rc; - - rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL); - if (rc) - goto parse_out; - - /* continue processing from where we last stopped to end-of-log */ - if (cfg) { - cd.lpcd_first_idx = cfg->cfg_last_idx; - callback = cfg->cfg_callback; - LASSERT(callback); - } else { - callback = class_config_llog_handler; - } - - cd.lpcd_last_idx = 0; - - rc = llog_process(env, llh, callback, cfg, &cd); - - CDEBUG(D_CONFIG, "Processed log %s gen %d-%d (rc=%d)\n", name, - cd.lpcd_first_idx + 1, cd.lpcd_last_idx, rc); - if (cfg) - cfg->cfg_last_idx = cd.lpcd_last_idx; - -parse_out: - llog_close(env, llh); - return rc; -} -EXPORT_SYMBOL(class_config_parse_llog); - -/** - * parse config record and output dump in supplied buffer. - * This is separated from class_config_dump_handler() to use - * for ioctl needs as well - */ -static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, - int size) -{ - struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1); - char *ptr = buf; - char *end = buf + size; - int rc = 0; - - LASSERT(rec->lrh_type == OBD_CFG_REC); - rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len); - if (rc < 0) - return rc; - - ptr += snprintf(ptr, end - ptr, "cmd=%05x ", lcfg->lcfg_command); - if (lcfg->lcfg_flags) - ptr += snprintf(ptr, end - ptr, "flags=%#08x ", - lcfg->lcfg_flags); - - if (lcfg->lcfg_num) - ptr += snprintf(ptr, end - ptr, "num=%#08x ", lcfg->lcfg_num); - - if (lcfg->lcfg_nid) { - char nidstr[LNET_NIDSTR_SIZE]; - - libcfs_nid2str_r(lcfg->lcfg_nid, nidstr, sizeof(nidstr)); - ptr += snprintf(ptr, end - ptr, "nid=%s(%#llx)\n ", - nidstr, lcfg->lcfg_nid); - } - - if (lcfg->lcfg_command == LCFG_MARKER) { - struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1); - - ptr += snprintf(ptr, end - ptr, "marker=%d(%#x)%s '%s'", - marker->cm_step, marker->cm_flags, - marker->cm_tgtname, marker->cm_comment); - } else { - int i; - - for (i = 0; i < lcfg->lcfg_bufcount; i++) { - ptr += snprintf(ptr, end - ptr, "%d:%s ", i, - lustre_cfg_string(lcfg, i)); - } - } - ptr += snprintf(ptr, end - ptr, "\n"); - /* return consumed bytes */ - rc = ptr - buf; - return rc; -} - -int class_config_dump_handler(const struct lu_env *env, - struct llog_handle *handle, - struct llog_rec_hdr *rec, void *data) -{ - char *outstr; - int rc = 0; - - outstr = kzalloc(256, GFP_NOFS); - if (!outstr) - return -ENOMEM; - - if (rec->lrh_type == OBD_CFG_REC) { - class_config_parse_rec(rec, outstr, 256); - LCONSOLE(D_WARNING, " %s", outstr); - } else { - LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type); - rc = -EINVAL; - } - - kfree(outstr); - return rc; -} - -/** Call class_cleanup and class_detach. - * "Manual" only in the sense that we're faking lcfg commands. - */ -int class_manual_cleanup(struct obd_device *obd) -{ - char flags[3] = ""; - struct lustre_cfg *lcfg; - struct lustre_cfg_bufs bufs; - int rc; - - if (!obd) { - CERROR("empty cleanup\n"); - return -EALREADY; - } - - if (obd->obd_force) - strcat(flags, "F"); - if (obd->obd_fail) - strcat(flags, "A"); - - CDEBUG(D_CONFIG, "Manual cleanup of %s (flags='%s')\n", - obd->obd_name, flags); - - lustre_cfg_bufs_reset(&bufs, obd->obd_name); - lustre_cfg_bufs_set_string(&bufs, 1, flags); - lcfg = kzalloc(lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen), - GFP_NOFS); - if (!lcfg) - return -ENOMEM; - lustre_cfg_init(lcfg, LCFG_CLEANUP, &bufs); - - rc = class_process_config(lcfg); - if (rc) { - CERROR("cleanup failed %d: %s\n", rc, obd->obd_name); - goto out; - } - - /* the lcfg is almost the same for both ops */ - lcfg->lcfg_command = LCFG_DETACH; - rc = class_process_config(lcfg); - if (rc) - CERROR("detach failed %d: %s\n", rc, obd->obd_name); -out: - kfree(lcfg); - return rc; -} -EXPORT_SYMBOL(class_manual_cleanup); diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c deleted file mode 100644 index 06c38fdef7ba..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c +++ /dev/null @@ -1,1245 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/obd_mount.c - * - * Client mount routines - * - * Author: Nathan Rutman - */ - -#define DEBUG_SUBSYSTEM S_CLASS -#define D_MOUNT (D_SUPER | D_CONFIG/*|D_WARNING */) -#define PRINT_CMD CDEBUG - -#include -#include -#include -#include -#include -#include -#include -#include - -static DEFINE_SPINLOCK(client_lock); -static struct module *client_mod; -static int (*client_fill_super)(struct super_block *sb); -static void (*kill_super_cb)(struct super_block *sb); - -/**************** config llog ********************/ - -/** Get a config log from the MGS and process it. - * This func is called for both clients and servers. - * Continue to process new statements appended to the logs - * (whenever the config lock is revoked) until lustre_end_log - * is called. - * @param sb The superblock is used by the MGC to write to the local copy of - * the config log - * @param logname The name of the llog to replicate from the MGS - * @param cfg Since the same mgc may be used to follow multiple config logs - * (e.g. ost1, ost2, client), the config_llog_instance keeps the state for - * this log, and is added to the mgc's list of logs to follow. - */ -int lustre_process_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) -{ - struct lustre_cfg *lcfg; - struct lustre_cfg_bufs *bufs; - struct lustre_sb_info *lsi = s2lsi(sb); - struct obd_device *mgc = lsi->lsi_mgc; - int rc; - - LASSERT(mgc); - LASSERT(cfg); - - bufs = kzalloc(sizeof(*bufs), GFP_NOFS); - if (!bufs) - return -ENOMEM; - - /* mgc_process_config */ - lustre_cfg_bufs_reset(bufs, mgc->obd_name); - lustre_cfg_bufs_set_string(bufs, 1, logname); - lustre_cfg_bufs_set(bufs, 2, cfg, sizeof(*cfg)); - lustre_cfg_bufs_set(bufs, 3, &sb, sizeof(sb)); - lcfg = kzalloc(lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen), - GFP_NOFS); - if (!lcfg) { - rc = -ENOMEM; - goto out; - } - lustre_cfg_init(lcfg, LCFG_LOG_START, bufs); - - rc = obd_process_config(mgc, sizeof(*lcfg), lcfg); - kfree(lcfg); -out: - kfree(bufs); - - if (rc == -EINVAL) - LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n", - mgc->obd_name, logname, rc); - - else if (rc) - LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n", - mgc->obd_name, logname, - rc); - - /* class_obd_list(); */ - return rc; -} -EXPORT_SYMBOL(lustre_process_log); - -/* Stop watching this config log for updates */ -int lustre_end_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) -{ - struct lustre_cfg *lcfg; - struct lustre_cfg_bufs bufs; - struct lustre_sb_info *lsi = s2lsi(sb); - struct obd_device *mgc = lsi->lsi_mgc; - int rc; - - if (!mgc) - return -ENOENT; - - /* mgc_process_config */ - lustre_cfg_bufs_reset(&bufs, mgc->obd_name); - lustre_cfg_bufs_set_string(&bufs, 1, logname); - if (cfg) - lustre_cfg_bufs_set(&bufs, 2, cfg, sizeof(*cfg)); - lcfg = kzalloc(lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen), - GFP_NOFS); - if (!lcfg) - return -ENOMEM; - lustre_cfg_init(lcfg, LCFG_LOG_END, &bufs); - - rc = obd_process_config(mgc, sizeof(*lcfg), lcfg); - kfree(lcfg); - return rc; -} -EXPORT_SYMBOL(lustre_end_log); - -/**************** obd start *******************/ - -/** lustre_cfg_bufs are a holdover from 1.4; we can still set these up from - * lctl (and do for echo cli/srv. - */ -static int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd, - char *s1, char *s2, char *s3, char *s4) -{ - struct lustre_cfg_bufs bufs; - struct lustre_cfg *lcfg = NULL; - int rc; - - CDEBUG(D_TRACE, "lcfg %s %#x %s %s %s %s\n", cfgname, - cmd, s1, s2, s3, s4); - - lustre_cfg_bufs_reset(&bufs, cfgname); - if (s1) - lustre_cfg_bufs_set_string(&bufs, 1, s1); - if (s2) - lustre_cfg_bufs_set_string(&bufs, 2, s2); - if (s3) - lustre_cfg_bufs_set_string(&bufs, 3, s3); - if (s4) - lustre_cfg_bufs_set_string(&bufs, 4, s4); - - lcfg = kzalloc(lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen), - GFP_NOFS); - if (!lcfg) - return -ENOMEM; - lustre_cfg_init(lcfg, cmd, &bufs); - lcfg->lcfg_nid = nid; - rc = class_process_config(lcfg); - kfree(lcfg); - return rc; -} - -/** Call class_attach and class_setup. These methods in turn call - * obd type-specific methods. - */ -static int lustre_start_simple(char *obdname, char *type, char *uuid, - char *s1, char *s2, char *s3, char *s4) -{ - int rc; - - CDEBUG(D_MOUNT, "Starting obd %s (typ=%s)\n", obdname, type); - - rc = do_lcfg(obdname, 0, LCFG_ATTACH, type, uuid, NULL, NULL); - if (rc) { - CERROR("%s attach error %d\n", obdname, rc); - return rc; - } - rc = do_lcfg(obdname, 0, LCFG_SETUP, s1, s2, s3, s4); - if (rc) { - CERROR("%s setup error %d\n", obdname, rc); - do_lcfg(obdname, 0, LCFG_DETACH, NULL, NULL, NULL, NULL); - } - return rc; -} - -static DEFINE_MUTEX(mgc_start_lock); - -/** Set up a mgc obd to process startup logs - * - * \param sb [in] super block of the mgc obd - * - * \retval 0 success, otherwise error code - */ -int lustre_start_mgc(struct super_block *sb) -{ - struct obd_connect_data *data = NULL; - struct lustre_sb_info *lsi = s2lsi(sb); - struct obd_device *obd; - struct obd_export *exp; - struct obd_uuid *uuid; - class_uuid_t uuidc; - lnet_nid_t nid; - char nidstr[LNET_NIDSTR_SIZE]; - char *mgcname = NULL, *niduuid = NULL, *mgssec = NULL; - char *ptr; - int rc = 0, i = 0, j; - - LASSERT(lsi->lsi_lmd); - - /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */ - ptr = lsi->lsi_lmd->lmd_dev; - if (class_parse_nid(ptr, &nid, &ptr) == 0) - i++; - if (i == 0) { - CERROR("No valid MGS nids found.\n"); - return -EINVAL; - } - - mutex_lock(&mgc_start_lock); - - libcfs_nid2str_r(nid, nidstr, sizeof(nidstr)); - mgcname = kasprintf(GFP_NOFS, - "%s%s", LUSTRE_MGC_OBDNAME, nidstr); - niduuid = kasprintf(GFP_NOFS, "%s_%x", mgcname, 0); - if (!mgcname || !niduuid) { - rc = -ENOMEM; - goto out_free; - } - - mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : ""; - - data = kzalloc(sizeof(*data), GFP_NOFS); - if (!data) { - rc = -ENOMEM; - goto out_free; - } - - obd = class_name2obd(mgcname); - if (obd && !obd->obd_stopping) { - int recov_bk; - - rc = obd_set_info_async(NULL, obd->obd_self_export, - strlen(KEY_MGSSEC), KEY_MGSSEC, - strlen(mgssec), mgssec, NULL); - if (rc) - goto out_free; - - /* Re-using an existing MGC */ - atomic_inc(&obd->u.cli.cl_mgc_refcount); - - /* IR compatibility check, only for clients */ - if (lmd_is_client(lsi->lsi_lmd)) { - int has_ir; - int vallen = sizeof(*data); - __u32 *flags = &lsi->lsi_lmd->lmd_flags; - - rc = obd_get_info(NULL, obd->obd_self_export, - strlen(KEY_CONN_DATA), KEY_CONN_DATA, - &vallen, data); - LASSERT(rc == 0); - has_ir = OCD_HAS_FLAG(data, IMP_RECOV); - if (has_ir ^ !(*flags & LMD_FLG_NOIR)) { - /* LMD_FLG_NOIR is for test purpose only */ - LCONSOLE_WARN( - "Trying to mount a client with IR setting not compatible with current mgc. Force to use current mgc setting that is IR %s.\n", - has_ir ? "enabled" : "disabled"); - if (has_ir) - *flags &= ~LMD_FLG_NOIR; - else - *flags |= LMD_FLG_NOIR; - } - } - - recov_bk = 0; - - /* Try all connections, but only once (again). - * We don't want to block another target from starting - * (using its local copy of the log), but we do want to connect - * if at all possible. - */ - recov_bk++; - CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname, - recov_bk); - rc = obd_set_info_async(NULL, obd->obd_self_export, - sizeof(KEY_INIT_RECOV_BACKUP), - KEY_INIT_RECOV_BACKUP, - sizeof(recov_bk), &recov_bk, NULL); - rc = 0; - goto out; - } - - CDEBUG(D_MOUNT, "Start MGC '%s'\n", mgcname); - - /* Add the primary nids for the MGS */ - i = 0; - /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */ - ptr = lsi->lsi_lmd->lmd_dev; - while (class_parse_nid(ptr, &nid, &ptr) == 0) { - rc = do_lcfg(mgcname, nid, - LCFG_ADD_UUID, niduuid, NULL, NULL, NULL); - if (!rc) - i++; - /* Stop at the first failover nid */ - if (*ptr == ':') - break; - } - if (i == 0) { - CERROR("No valid MGS nids found.\n"); - rc = -EINVAL; - goto out_free; - } - lsi->lsi_lmd->lmd_mgs_failnodes = 1; - - /* Random uuid for MGC allows easier reconnects */ - uuid = kzalloc(sizeof(*uuid), GFP_NOFS); - if (!uuid) { - rc = -ENOMEM; - goto out_free; - } - - ll_generate_random_uuid(uuidc); - class_uuid_unparse(uuidc, uuid); - - /* Start the MGC */ - rc = lustre_start_simple(mgcname, LUSTRE_MGC_NAME, - (char *)uuid->uuid, LUSTRE_MGS_OBDNAME, - niduuid, NULL, NULL); - kfree(uuid); - if (rc) - goto out_free; - - /* Add any failover MGS nids */ - i = 1; - while (ptr && ((*ptr == ':' || - class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { - /* New failover node */ - sprintf(niduuid, "%s_%x", mgcname, i); - j = 0; - while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) { - rc = do_lcfg(mgcname, nid, LCFG_ADD_UUID, niduuid, - NULL, NULL, NULL); - if (!rc) - ++j; - if (*ptr == ':') - break; - } - if (j > 0) { - rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN, - niduuid, NULL, NULL, NULL); - if (!rc) - i++; - } else { - /* at ":/fsname" */ - break; - } - } - lsi->lsi_lmd->lmd_mgs_failnodes = i; - - obd = class_name2obd(mgcname); - if (!obd) { - CERROR("Can't find mgcobd %s\n", mgcname); - rc = -ENOTCONN; - goto out_free; - } - - rc = obd_set_info_async(NULL, obd->obd_self_export, - strlen(KEY_MGSSEC), KEY_MGSSEC, - strlen(mgssec), mgssec, NULL); - if (rc) - goto out_free; - - /* Keep a refcount of servers/clients who started with "mount", - * so we know when we can get rid of the mgc. - */ - atomic_set(&obd->u.cli.cl_mgc_refcount, 1); - - /* We connect to the MGS at setup, and don't disconnect until cleanup */ - data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT | - OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | - OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS; - -#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE - data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB; -#endif - - if (lmd_is_client(lsi->lsi_lmd) && - lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR) - data->ocd_connect_flags &= ~OBD_CONNECT_IMP_RECOV; - data->ocd_version = LUSTRE_VERSION_CODE; - rc = obd_connect(NULL, &exp, obd, &obd->obd_uuid, data, NULL); - if (rc) { - CERROR("connect failed %d\n", rc); - goto out; - } - - obd->u.cli.cl_mgc_mgsexp = exp; - -out: - /* Keep the mgc info in the sb. Note that many lsi's can point - * to the same mgc. - */ - lsi->lsi_mgc = obd; -out_free: - mutex_unlock(&mgc_start_lock); - - kfree(data); - kfree(mgcname); - kfree(niduuid); - return rc; -} - -static int lustre_stop_mgc(struct super_block *sb) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - struct obd_device *obd; - char *niduuid = NULL, *ptr = NULL; - int i, rc = 0, len = 0; - - if (!lsi) - return -ENOENT; - obd = lsi->lsi_mgc; - if (!obd) - return -ENOENT; - lsi->lsi_mgc = NULL; - - mutex_lock(&mgc_start_lock); - LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0); - if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) { - /* This is not fatal, every client that stops - * will call in here. - */ - CDEBUG(D_MOUNT, "mgc still has %d references.\n", - atomic_read(&obd->u.cli.cl_mgc_refcount)); - rc = -EBUSY; - goto out; - } - - /* The MGC has no recoverable data in any case. - * force shutdown set in umount_begin - */ - obd->obd_no_recov = 1; - - if (obd->u.cli.cl_mgc_mgsexp) { - /* An error is not fatal, if we are unable to send the - * disconnect mgs ping evictor cleans up the export - */ - rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp); - if (rc) - CDEBUG(D_MOUNT, "disconnect failed %d\n", rc); - } - - /* Save the obdname for cleaning the nid uuids, which are obdname_XX */ - len = strlen(obd->obd_name) + 6; - niduuid = kzalloc(len, GFP_NOFS); - if (niduuid) { - strcpy(niduuid, obd->obd_name); - ptr = niduuid + strlen(niduuid); - } - - rc = class_manual_cleanup(obd); - if (rc) - goto out; - - /* Clean the nid uuids */ - if (!niduuid) { - rc = -ENOMEM; - goto out; - } - - for (i = 0; i < lsi->lsi_lmd->lmd_mgs_failnodes; i++) { - sprintf(ptr, "_%x", i); - rc = do_lcfg(LUSTRE_MGC_OBDNAME, 0, LCFG_DEL_UUID, - niduuid, NULL, NULL, NULL); - if (rc) - CERROR("del MDC UUID %s failed: rc = %d\n", - niduuid, rc); - } -out: - kfree(niduuid); - - /* class_import_put will get rid of the additional connections */ - mutex_unlock(&mgc_start_lock); - return rc; -} - -/***************** lustre superblock **************/ - -static struct lustre_sb_info *lustre_init_lsi(struct super_block *sb) -{ - struct lustre_sb_info *lsi; - - lsi = kzalloc(sizeof(*lsi), GFP_NOFS); - if (!lsi) - return NULL; - lsi->lsi_lmd = kzalloc(sizeof(*lsi->lsi_lmd), GFP_NOFS); - if (!lsi->lsi_lmd) { - kfree(lsi); - return NULL; - } - - lsi->lsi_lmd->lmd_exclude_count = 0; - lsi->lsi_lmd->lmd_recovery_time_soft = 0; - lsi->lsi_lmd->lmd_recovery_time_hard = 0; - s2lsi_nocast(sb) = lsi; - /* we take 1 extra ref for our setup */ - atomic_set(&lsi->lsi_mounts, 1); - - /* Default umount style */ - lsi->lsi_flags = LSI_UMOUNT_FAILOVER; - - return lsi; -} - -static int lustre_free_lsi(struct super_block *sb) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - - CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi); - - /* someone didn't call server_put_mount. */ - LASSERT(atomic_read(&lsi->lsi_mounts) == 0); - - if (lsi->lsi_lmd) { - kfree(lsi->lsi_lmd->lmd_dev); - kfree(lsi->lsi_lmd->lmd_profile); - kfree(lsi->lsi_lmd->lmd_mgssec); - kfree(lsi->lsi_lmd->lmd_opts); - if (lsi->lsi_lmd->lmd_exclude_count) - kfree(lsi->lsi_lmd->lmd_exclude); - kfree(lsi->lsi_lmd->lmd_mgs); - kfree(lsi->lsi_lmd->lmd_osd_type); - kfree(lsi->lsi_lmd->lmd_params); - - kfree(lsi->lsi_lmd); - } - - LASSERT(!lsi->lsi_llsbi); - kfree(lsi); - s2lsi_nocast(sb) = NULL; - - return 0; -} - -/* The lsi has one reference for every server that is using the disk - - * e.g. MDT, MGS, and potentially MGC - */ -static int lustre_put_lsi(struct super_block *sb) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - - CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts)); - if (atomic_dec_and_test(&lsi->lsi_mounts)) { - lustre_free_lsi(sb); - return 1; - } - return 0; -} - -/*** SERVER NAME *** - * - * FSNAME is between 1 and 8 characters (inclusive). - * Excluded characters are '/' and ':' - * SEPARATOR is either ':' or '-' - * TYPE: "OST", "MDT", etc. - * INDEX: Hex representation of the index - */ - -/** Get the fsname ("lustre") from the server name ("lustre-OST003F"). - * @param [in] svname server name including type and index - * @param [out] fsname Buffer to copy filesystem name prefix into. - * Must have at least 'strlen(fsname) + 1' chars. - * @param [out] endptr if endptr isn't NULL it is set to end of fsname - * rc < 0 on error - */ -static int server_name2fsname(const char *svname, char *fsname, - const char **endptr) -{ - const char *dash; - - dash = svname + strnlen(svname, 8); /* max fsname length is 8 */ - for (; dash > svname && *dash != '-' && *dash != ':'; dash--) - ; - if (dash == svname) - return -EINVAL; - - if (fsname) { - strncpy(fsname, svname, dash - svname); - fsname[dash - svname] = '\0'; - } - - if (endptr) - *endptr = dash; - - return 0; -} - -/* Get the index from the obd name. - * rc = server type, or - * rc < 0 on error - * if endptr isn't NULL it is set to end of name - */ -static int server_name2index(const char *svname, __u32 *idx, - const char **endptr) -{ - unsigned long index; - int rc; - const char *dash; - - /* We use server_name2fsname() just for parsing */ - rc = server_name2fsname(svname, NULL, &dash); - if (rc != 0) - return rc; - - dash++; - - if (strncmp(dash, "MDT", 3) == 0) - rc = LDD_F_SV_TYPE_MDT; - else if (strncmp(dash, "OST", 3) == 0) - rc = LDD_F_SV_TYPE_OST; - else - return -EINVAL; - - dash += 3; - - if (strncmp(dash, "all", 3) == 0) { - if (endptr) - *endptr = dash + 3; - return rc | LDD_F_SV_ALL; - } - - index = simple_strtoul(dash, (char **)endptr, 16); - if (idx) - *idx = index; - - /* Account for -mdc after index that is possible when specifying mdt */ - if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1, - sizeof(LUSTRE_MDC_NAME) - 1) == 0) - *endptr += sizeof(LUSTRE_MDC_NAME); - - return rc; -} - -/*************** mount common between server and client ***************/ - -/* Common umount */ -int lustre_common_put_super(struct super_block *sb) -{ - int rc; - - CDEBUG(D_MOUNT, "dropping sb %p\n", sb); - - /* Drop a ref to the MGC */ - rc = lustre_stop_mgc(sb); - if (rc && (rc != -ENOENT)) { - if (rc != -EBUSY) { - CERROR("Can't stop MGC: %d\n", rc); - return rc; - } - /* BUSY just means that there's some other obd that - * needs the mgc. Let him clean it up. - */ - CDEBUG(D_MOUNT, "MGC still in use\n"); - } - /* Drop a ref to the mounted disk */ - lustre_put_lsi(sb); - return rc; -} -EXPORT_SYMBOL(lustre_common_put_super); - -static void lmd_print(struct lustre_mount_data *lmd) -{ - int i; - - PRINT_CMD(D_MOUNT, " mount data:\n"); - if (lmd_is_client(lmd)) - PRINT_CMD(D_MOUNT, "profile: %s\n", lmd->lmd_profile); - PRINT_CMD(D_MOUNT, "device: %s\n", lmd->lmd_dev); - PRINT_CMD(D_MOUNT, "flags: %x\n", lmd->lmd_flags); - - if (lmd->lmd_opts) - PRINT_CMD(D_MOUNT, "options: %s\n", lmd->lmd_opts); - - if (lmd->lmd_recovery_time_soft) - PRINT_CMD(D_MOUNT, "recovery time soft: %d\n", - lmd->lmd_recovery_time_soft); - - if (lmd->lmd_recovery_time_hard) - PRINT_CMD(D_MOUNT, "recovery time hard: %d\n", - lmd->lmd_recovery_time_hard); - - for (i = 0; i < lmd->lmd_exclude_count; i++) { - PRINT_CMD(D_MOUNT, "exclude %d: OST%04x\n", i, - lmd->lmd_exclude[i]); - } -} - -/* Is this server on the exclusion list */ -int lustre_check_exclusion(struct super_block *sb, char *svname) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - struct lustre_mount_data *lmd = lsi->lsi_lmd; - __u32 index; - int i, rc; - - rc = server_name2index(svname, &index, NULL); - if (rc != LDD_F_SV_TYPE_OST) - /* Only exclude OSTs */ - return 0; - - CDEBUG(D_MOUNT, "Check exclusion %s (%d) in %d of %s\n", svname, - index, lmd->lmd_exclude_count, lmd->lmd_dev); - - for (i = 0; i < lmd->lmd_exclude_count; i++) { - if (index == lmd->lmd_exclude[i]) { - CWARN("Excluding %s (on exclusion list)\n", svname); - return 1; - } - } - return 0; -} - -/* mount -v -o exclude=lustre-OST0001:lustre-OST0002 -t lustre ... */ -static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) -{ - const char *s1 = ptr, *s2; - __u32 index = 0, *exclude_list; - int rc = 0, devmax; - - /* The shortest an ost name can be is 8 chars: -OST0000. - * We don't actually know the fsname at this time, so in fact - * a user could specify any fsname. - */ - devmax = strlen(ptr) / 8 + 1; - - /* temp storage until we figure out how many we have */ - exclude_list = kcalloc(devmax, sizeof(index), GFP_NOFS); - if (!exclude_list) - return -ENOMEM; - - /* we enter this fn pointing at the '=' */ - while (*s1 && *s1 != ' ' && *s1 != ',') { - s1++; - rc = server_name2index(s1, &index, &s2); - if (rc < 0) { - CERROR("Can't parse server name '%s': rc = %d\n", - s1, rc); - break; - } - if (rc == LDD_F_SV_TYPE_OST) - exclude_list[lmd->lmd_exclude_count++] = index; - else - CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n", - (uint)(s2 - s1), s1, rc); - s1 = s2; - /* now we are pointing at ':' (next exclude) - * or ',' (end of excludes) - */ - if (lmd->lmd_exclude_count >= devmax) - break; - } - if (rc >= 0) /* non-err */ - rc = 0; - - if (lmd->lmd_exclude_count) { - /* permanent, freed in lustre_free_lsi */ - lmd->lmd_exclude = kcalloc(lmd->lmd_exclude_count, - sizeof(index), GFP_NOFS); - if (lmd->lmd_exclude) { - memcpy(lmd->lmd_exclude, exclude_list, - sizeof(index) * lmd->lmd_exclude_count); - } else { - rc = -ENOMEM; - lmd->lmd_exclude_count = 0; - } - } - kfree(exclude_list); - return rc; -} - -static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr) -{ - char *tail; - int length; - - kfree(lmd->lmd_mgssec); - lmd->lmd_mgssec = NULL; - - tail = strchr(ptr, ','); - if (!tail) - length = strlen(ptr); - else - length = tail - ptr; - - lmd->lmd_mgssec = kzalloc(length + 1, GFP_NOFS); - if (!lmd->lmd_mgssec) - return -ENOMEM; - - memcpy(lmd->lmd_mgssec, ptr, length); - lmd->lmd_mgssec[length] = '\0'; - return 0; -} - -static int lmd_parse_string(char **handle, char *ptr) -{ - char *tail; - int length; - - if (!handle || !ptr) - return -EINVAL; - - kfree(*handle); - *handle = NULL; - - tail = strchr(ptr, ','); - if (!tail) - length = strlen(ptr); - else - length = tail - ptr; - - *handle = kzalloc(length + 1, GFP_NOFS); - if (!*handle) - return -ENOMEM; - - memcpy(*handle, ptr, length); - (*handle)[length] = '\0'; - - return 0; -} - -/* Collect multiple values for mgsnid specifiers */ -static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr) -{ - lnet_nid_t nid; - char *tail = *ptr; - char *mgsnid; - int length; - int oldlen = 0; - - /* Find end of nidlist */ - while (class_parse_nid_quiet(tail, &nid, &tail) == 0) - ; - length = tail - *ptr; - if (length == 0) { - LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", *ptr); - return -EINVAL; - } - - if (lmd->lmd_mgs) - oldlen = strlen(lmd->lmd_mgs) + 1; - - mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS); - if (!mgsnid) - return -ENOMEM; - - if (lmd->lmd_mgs) { - /* Multiple mgsnid= are taken to mean failover locations */ - memcpy(mgsnid, lmd->lmd_mgs, oldlen); - mgsnid[oldlen - 1] = ':'; - kfree(lmd->lmd_mgs); - } - memcpy(mgsnid + oldlen, *ptr, length); - mgsnid[oldlen + length] = '\0'; - lmd->lmd_mgs = mgsnid; - *ptr = tail; - - return 0; -} - -/** Parse mount line options - * e.g. mount -v -t lustre -o abort_recov uml1:uml2:/lustre-client /mnt/lustre - * dev is passed as device=uml1:/lustre by mount.lustre - */ -static int lmd_parse(char *options, struct lustre_mount_data *lmd) -{ - char *s1, *s2, *devname = NULL; - struct lustre_mount_data *raw = (struct lustre_mount_data *)options; - int rc = 0; - - LASSERT(lmd); - if (!options) { - LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that /sbin/mount.lustre is installed.\n"); - return -EINVAL; - } - - /* Options should be a string - try to detect old lmd data */ - if ((raw->lmd_magic & 0xffffff00) == (LMD_MAGIC & 0xffffff00)) { - LCONSOLE_ERROR_MSG(0x163, "You're using an old version of /sbin/mount.lustre. Please install version %s\n", - LUSTRE_VERSION_STRING); - return -EINVAL; - } - lmd->lmd_magic = LMD_MAGIC; - - lmd->lmd_params = kzalloc(LMD_PARAMS_MAXLEN, GFP_NOFS); - if (!lmd->lmd_params) - return -ENOMEM; - lmd->lmd_params[0] = '\0'; - - /* Set default flags here */ - - s1 = options; - while (*s1) { - int clear = 0; - int time_min = OBD_RECOVERY_TIME_MIN; - char *s3; - - /* Skip whitespace and extra commas */ - while (*s1 == ' ' || *s1 == ',') - s1++; - s3 = s1; - - /* Client options are parsed in ll_options: eg. flock, - * user_xattr, acl - */ - - /* Parse non-ldiskfs options here. Rather than modifying - * ldiskfs, we just zero these out here - */ - if (strncmp(s1, "abort_recov", 11) == 0) { - lmd->lmd_flags |= LMD_FLG_ABORT_RECOV; - clear++; - } else if (strncmp(s1, "recovery_time_soft=", 19) == 0) { - lmd->lmd_recovery_time_soft = max_t(int, - simple_strtoul(s1 + 19, NULL, 10), time_min); - clear++; - } else if (strncmp(s1, "recovery_time_hard=", 19) == 0) { - lmd->lmd_recovery_time_hard = max_t(int, - simple_strtoul(s1 + 19, NULL, 10), time_min); - clear++; - } else if (strncmp(s1, "noir", 4) == 0) { - lmd->lmd_flags |= LMD_FLG_NOIR; /* test purpose only. */ - clear++; - } else if (strncmp(s1, "nosvc", 5) == 0) { - lmd->lmd_flags |= LMD_FLG_NOSVC; - clear++; - } else if (strncmp(s1, "nomgs", 5) == 0) { - lmd->lmd_flags |= LMD_FLG_NOMGS; - clear++; - } else if (strncmp(s1, "noscrub", 7) == 0) { - lmd->lmd_flags |= LMD_FLG_NOSCRUB; - clear++; - } else if (strncmp(s1, PARAM_MGSNODE, - sizeof(PARAM_MGSNODE) - 1) == 0) { - s2 = s1 + sizeof(PARAM_MGSNODE) - 1; - /* Assume the next mount opt is the first - * invalid nid we get to. - */ - rc = lmd_parse_mgs(lmd, &s2); - if (rc) - goto invalid; - clear++; - } else if (strncmp(s1, "writeconf", 9) == 0) { - lmd->lmd_flags |= LMD_FLG_WRITECONF; - clear++; - } else if (strncmp(s1, "update", 6) == 0) { - lmd->lmd_flags |= LMD_FLG_UPDATE; - clear++; - } else if (strncmp(s1, "virgin", 6) == 0) { - lmd->lmd_flags |= LMD_FLG_VIRGIN; - clear++; - } else if (strncmp(s1, "noprimnode", 10) == 0) { - lmd->lmd_flags |= LMD_FLG_NO_PRIMNODE; - clear++; - } else if (strncmp(s1, "mgssec=", 7) == 0) { - rc = lmd_parse_mgssec(lmd, s1 + 7); - if (rc) - goto invalid; - s3 = s2; - clear++; - /* ost exclusion list */ - } else if (strncmp(s1, "exclude=", 8) == 0) { - rc = lmd_make_exclusion(lmd, s1 + 7); - if (rc) - goto invalid; - clear++; - } else if (strncmp(s1, "mgs", 3) == 0) { - /* We are an MGS */ - lmd->lmd_flags |= LMD_FLG_MGS; - clear++; - } else if (strncmp(s1, "svname=", 7) == 0) { - rc = lmd_parse_string(&lmd->lmd_profile, s1 + 7); - if (rc) - goto invalid; - clear++; - } else if (strncmp(s1, "param=", 6) == 0) { - size_t length, params_length; - char *tail = strchr(s1 + 6, ','); - - if (!tail) { - length = strlen(s1); - } else { - lnet_nid_t nid; - char *param_str = tail + 1; - int supplementary = 1; - - while (!class_parse_nid_quiet(param_str, &nid, - ¶m_str)) { - supplementary = 0; - } - length = param_str - s1 - supplementary; - } - length -= 6; - params_length = strlen(lmd->lmd_params); - if (params_length + length + 1 >= LMD_PARAMS_MAXLEN) - return -E2BIG; - strncat(lmd->lmd_params, s1 + 6, length); - lmd->lmd_params[params_length + length] = '\0'; - strlcat(lmd->lmd_params, " ", LMD_PARAMS_MAXLEN); - s3 = s1 + 6 + length; - clear++; - } else if (strncmp(s1, "osd=", 4) == 0) { - rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4); - if (rc) - goto invalid; - clear++; - } - /* Linux 2.4 doesn't pass the device, so we stuck it at the - * end of the options. - */ - else if (strncmp(s1, "device=", 7) == 0) { - devname = s1 + 7; - /* terminate options right before device. device - * must be the last one. - */ - *s1 = '\0'; - break; - } - - /* Find next opt */ - s2 = strchr(s1, ','); - if (!s2) { - if (clear) - *s1 = '\0'; - break; - } - s2++; - if (clear) - memmove(s1, s2, strlen(s2) + 1); - else - s1 = s2; - } - - if (!devname) { - LCONSOLE_ERROR_MSG(0x164, "Can't find the device name (need mount option 'device=...')\n"); - goto invalid; - } - - s1 = strstr(devname, ":/"); - if (s1) { - ++s1; - lmd->lmd_flags |= LMD_FLG_CLIENT; - /* Remove leading /s from fsname */ - while (*++s1 == '/') - ; - /* Freed in lustre_free_lsi */ - lmd->lmd_profile = kasprintf(GFP_NOFS, "%s-client", s1); - if (!lmd->lmd_profile) - return -ENOMEM; - } - - /* Freed in lustre_free_lsi */ - lmd->lmd_dev = kzalloc(strlen(devname) + 1, GFP_NOFS); - if (!lmd->lmd_dev) - return -ENOMEM; - strcpy(lmd->lmd_dev, devname); - - /* Save mount options */ - s1 = options + strlen(options) - 1; - while (s1 >= options && (*s1 == ',' || *s1 == ' ')) - *s1-- = 0; - if (*options != 0) { - /* Freed in lustre_free_lsi */ - lmd->lmd_opts = kzalloc(strlen(options) + 1, GFP_NOFS); - if (!lmd->lmd_opts) - return -ENOMEM; - strcpy(lmd->lmd_opts, options); - } - - lmd_print(lmd); - lmd->lmd_magic = LMD_MAGIC; - - return rc; - -invalid: - CERROR("Bad mount options %s\n", options); - return -EINVAL; -} - -/** This is the entry point for the mount call into Lustre. - * This is called when a server or client is mounted, - * and this is where we start setting things up. - * @param data Mount options (e.g. -o flock,abort_recov) - */ -static int lustre_fill_super(struct super_block *sb, void *lmd2_data, int silent) -{ - struct lustre_mount_data *lmd; - struct lustre_sb_info *lsi; - int rc; - - CDEBUG(D_MOUNT | D_VFSTRACE, "VFS Op: sb %p\n", sb); - - lsi = lustre_init_lsi(sb); - if (!lsi) - return -ENOMEM; - lmd = lsi->lsi_lmd; - - /* - * Disable lockdep during mount, because mount locking patterns are - * `special'. - */ - lockdep_off(); - - /* - * LU-639: the obd cleanup of last mount may not finish yet, wait here. - */ - obd_zombie_barrier(); - - /* Figure out the lmd from the mount options */ - if (lmd_parse(lmd2_data, lmd)) { - lustre_put_lsi(sb); - rc = -EINVAL; - goto out; - } - - if (lmd_is_client(lmd)) { - bool have_client = false; - CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile); - if (!client_fill_super) - request_module("lustre"); - spin_lock(&client_lock); - if (client_fill_super && try_module_get(client_mod)) - have_client = true; - spin_unlock(&client_lock); - if (!have_client) { - LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n"); - lustre_put_lsi(sb); - rc = -ENODEV; - } else { - rc = lustre_start_mgc(sb); - if (rc) { - lustre_common_put_super(sb); - goto out; - } - /* Connect and start */ - /* (should always be ll_fill_super) */ - rc = (*client_fill_super)(sb); - /* c_f_s will call lustre_common_put_super on failure, otherwise - * c_f_s will have taken another reference to the module */ - module_put(client_mod); - } - } else { - CERROR("This is client-side-only module, cannot handle server mount.\n"); - rc = -EINVAL; - } - - /* If error happens in fill_super() call, @lsi will be killed there. - * This is why we do not put it here. - */ - goto out; -out: - if (rc) { - CERROR("Unable to mount %s (%d)\n", - s2lsi(sb) ? lmd->lmd_dev : "", rc); - } else { - CDEBUG(D_SUPER, "Mount %s complete\n", - lmd->lmd_dev); - } - lockdep_on(); - return rc; -} - -/* We can't call ll_fill_super by name because it lives in a module that - * must be loaded after this one. - */ -void lustre_register_super_ops(struct module *mod, - int (*cfs)(struct super_block *sb), - void (*ksc)(struct super_block *sb)) -{ - spin_lock(&client_lock); - client_mod = mod; - client_fill_super = cfs; - kill_super_cb = ksc; - spin_unlock(&client_lock); -} -EXPORT_SYMBOL(lustre_register_super_ops); - -/***************** FS registration ******************/ -static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, - const char *devname, void *data) -{ - return mount_nodev(fs_type, flags, data, lustre_fill_super); -} - -static void lustre_kill_super(struct super_block *sb) -{ - struct lustre_sb_info *lsi = s2lsi(sb); - - if (kill_super_cb && lsi) - (*kill_super_cb)(sb); - - kill_anon_super(sb); -} - -/** Register the "lustre" fs type - */ -static struct file_system_type lustre_fs_type = { - .owner = THIS_MODULE, - .name = "lustre", - .mount = lustre_mount, - .kill_sb = lustre_kill_super, - .fs_flags = FS_RENAME_DOES_D_MOVE, -}; -MODULE_ALIAS_FS("lustre"); - -int lustre_register_fs(void) -{ - return register_filesystem(&lustre_fs_type); -} - -int lustre_unregister_fs(void) -{ - return unregister_filesystem(&lustre_fs_type); -} diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c deleted file mode 100644 index c4503bc36591..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/obdo.c +++ /dev/null @@ -1,181 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/obdo.c - * - * Object Devices Class Driver - * These are the only exported functions, they provide some generic - * infrastructure for managing object devices - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include - -void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent) -{ - dst->o_parent_oid = fid_oid(parent); - dst->o_parent_seq = fid_seq(parent); - dst->o_parent_ver = fid_ver(parent); - dst->o_valid |= OBD_MD_FLGENER | OBD_MD_FLFID; -} -EXPORT_SYMBOL(obdo_set_parent_fid); - -/* WARNING: the file systems must take care not to tinker with - * attributes they don't manage (such as blocks). - */ -void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid) -{ - u32 newvalid = 0; - - if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME)) - CDEBUG(D_INODE, "valid %x, new time %lu/%lu\n", - valid, LTIME_S(src->i_mtime), - LTIME_S(src->i_ctime)); - - if (valid & OBD_MD_FLATIME) { - dst->o_atime = LTIME_S(src->i_atime); - newvalid |= OBD_MD_FLATIME; - } - if (valid & OBD_MD_FLMTIME) { - dst->o_mtime = LTIME_S(src->i_mtime); - newvalid |= OBD_MD_FLMTIME; - } - if (valid & OBD_MD_FLCTIME) { - dst->o_ctime = LTIME_S(src->i_ctime); - newvalid |= OBD_MD_FLCTIME; - } - if (valid & OBD_MD_FLSIZE) { - dst->o_size = i_size_read(src); - newvalid |= OBD_MD_FLSIZE; - } - if (valid & OBD_MD_FLBLOCKS) { /* allocation of space (x512 bytes) */ - dst->o_blocks = src->i_blocks; - newvalid |= OBD_MD_FLBLOCKS; - } - if (valid & OBD_MD_FLBLKSZ) { /* optimal block size */ - dst->o_blksize = 1 << src->i_blkbits; - newvalid |= OBD_MD_FLBLKSZ; - } - if (valid & OBD_MD_FLTYPE) { - dst->o_mode = (dst->o_mode & S_IALLUGO) | - (src->i_mode & S_IFMT); - newvalid |= OBD_MD_FLTYPE; - } - if (valid & OBD_MD_FLMODE) { - dst->o_mode = (dst->o_mode & S_IFMT) | - (src->i_mode & S_IALLUGO); - newvalid |= OBD_MD_FLMODE; - } - if (valid & OBD_MD_FLUID) { - dst->o_uid = from_kuid(&init_user_ns, src->i_uid); - newvalid |= OBD_MD_FLUID; - } - if (valid & OBD_MD_FLGID) { - dst->o_gid = from_kgid(&init_user_ns, src->i_gid); - newvalid |= OBD_MD_FLGID; - } - if (valid & OBD_MD_FLFLAGS) { - dst->o_flags = src->i_flags; - newvalid |= OBD_MD_FLFLAGS; - } - dst->o_valid |= newvalid; -} -EXPORT_SYMBOL(obdo_from_inode); - -void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj) -{ - ioobj->ioo_oid = oa->o_oi; - if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP))) - ostid_set_seq_mdt0(&ioobj->ioo_oid); - - /* Since 2.4 this does not contain o_mode in the low 16 bits. - * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs - */ - ioobj->ioo_max_brw = 0; -} -EXPORT_SYMBOL(obdo_to_ioobj); - -/** - * Create an obdo to send over the wire - */ -void lustre_set_wire_obdo(const struct obd_connect_data *ocd, - struct obdo *wobdo, const struct obdo *lobdo) -{ - *wobdo = *lobdo; - wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - if (!ocd) - return; - - if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && - fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { - /* - * Currently OBD_FL_OSTID will only be used when 2.4 echo - * client communicate with pre-2.4 server - */ - wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); - wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); - } -} -EXPORT_SYMBOL(lustre_set_wire_obdo); - -/** - * Create a local obdo from a wire based odbo - */ -void lustre_get_wire_obdo(const struct obd_connect_data *ocd, - struct obdo *lobdo, const struct obdo *wobdo) -{ - u32 local_flags = 0; - - if (lobdo->o_valid & OBD_MD_FLFLAGS) - local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; - - *lobdo = *wobdo; - if (local_flags) { - lobdo->o_valid |= OBD_MD_FLFLAGS; - lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - lobdo->o_flags |= local_flags; - } - if (!ocd) - return; - - if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && - fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) { - /* see above */ - lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq; - lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id; - lobdo->o_oi.oi_fid.f_ver = 0; - } -} -EXPORT_SYMBOL(lustre_get_wire_obdo); diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c deleted file mode 100644 index 355e888885f4..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c +++ /dev/null @@ -1,58 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/statfs_pack.c - * - * (Un)packing of OST/MDS requests - * - * Author: Andreas Dilger - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include - -void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs) -{ - memset(sfs, 0, sizeof(*sfs)); - sfs->f_type = osfs->os_type; - sfs->f_blocks = osfs->os_blocks; - sfs->f_bfree = osfs->os_bfree; - sfs->f_bavail = osfs->os_bavail; - sfs->f_files = osfs->os_files; - sfs->f_ffree = osfs->os_ffree; - sfs->f_bsize = osfs->os_bsize; - sfs->f_namelen = osfs->os_namelen; -} -EXPORT_SYMBOL(statfs_unpack); diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c deleted file mode 100644 index ec8c6dc5c9a7..000000000000 --- a/drivers/staging/lustre/lustre/obdclass/uuid.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdclass/uuid.c - * - * Public include file for the UUID library - */ - -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include - -void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out) -{ - sprintf(out->uuid, "%pU", uu); -} -EXPORT_SYMBOL(class_uuid_unparse); diff --git a/drivers/staging/lustre/lustre/obdecho/Makefile b/drivers/staging/lustre/lustre/obdecho/Makefile deleted file mode 100644 index 6be66fbab872..000000000000 --- a/drivers/staging/lustre/lustre/obdecho/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += obdecho.o -obdecho-y := echo_client.o diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c deleted file mode 100644 index b692e76e7108..000000000000 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ /dev/null @@ -1,1729 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_ECHO - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "echo_internal.h" - -/** \defgroup echo_client Echo Client - * @{ - */ - -struct echo_device { - struct cl_device ed_cl; - struct echo_client_obd *ed_ec; - - struct cl_site ed_site_myself; - struct lu_site *ed_site; - struct lu_device *ed_next; -}; - -struct echo_object { - struct cl_object eo_cl; - struct cl_object_header eo_hdr; - - struct echo_device *eo_dev; - struct list_head eo_obj_chain; - struct lov_oinfo *eo_oinfo; - atomic_t eo_npages; - int eo_deleted; -}; - -struct echo_object_conf { - struct cl_object_conf eoc_cl; - struct lov_oinfo **eoc_oinfo; -}; - -struct echo_page { - struct cl_page_slice ep_cl; - struct mutex ep_lock; -}; - -struct echo_lock { - struct cl_lock_slice el_cl; - struct list_head el_chain; - struct echo_object *el_object; - __u64 el_cookie; - atomic_t el_refcount; -}; - -static int echo_client_setup(const struct lu_env *env, - struct obd_device *obddev, - struct lustre_cfg *lcfg); -static int echo_client_cleanup(struct obd_device *obddev); - -/** \defgroup echo_helpers Helper functions - * @{ - */ -static inline struct echo_device *cl2echo_dev(const struct cl_device *dev) -{ - return container_of_safe(dev, struct echo_device, ed_cl); -} - -static inline struct cl_device *echo_dev2cl(struct echo_device *d) -{ - return &d->ed_cl; -} - -static inline struct echo_device *obd2echo_dev(const struct obd_device *obd) -{ - return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev)); -} - -static inline struct cl_object *echo_obj2cl(struct echo_object *eco) -{ - return &eco->eo_cl; -} - -static inline struct echo_object *cl2echo_obj(const struct cl_object *o) -{ - return container_of(o, struct echo_object, eo_cl); -} - -static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s) -{ - return container_of(s, struct echo_page, ep_cl); -} - -static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s) -{ - return container_of(s, struct echo_lock, el_cl); -} - -static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl) -{ - return ecl->el_cl.cls_lock; -} - -static struct lu_context_key echo_thread_key; -static inline struct echo_thread_info *echo_env_info(const struct lu_env *env) -{ - struct echo_thread_info *info; - - info = lu_context_key_get(&env->le_ctx, &echo_thread_key); - LASSERT(info); - return info; -} - -static inline -struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c) -{ - return container_of(c, struct echo_object_conf, eoc_cl); -} - -/** @} echo_helpers */ -static int cl_echo_object_put(struct echo_object *eco); -static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, - struct page **pages, int npages, int async); - -struct echo_thread_info { - struct echo_object_conf eti_conf; - struct lustre_md eti_md; - - struct cl_2queue eti_queue; - struct cl_io eti_io; - struct cl_lock eti_lock; - struct lu_fid eti_fid; - struct lu_fid eti_fid2; -}; - -/* No session used right now */ -struct echo_session_info { - unsigned long dummy; -}; - -static struct kmem_cache *echo_lock_kmem; -static struct kmem_cache *echo_object_kmem; -static struct kmem_cache *echo_thread_kmem; -static struct kmem_cache *echo_session_kmem; - -static struct lu_kmem_descr echo_caches[] = { - { - .ckd_cache = &echo_lock_kmem, - .ckd_name = "echo_lock_kmem", - .ckd_size = sizeof(struct echo_lock) - }, - { - .ckd_cache = &echo_object_kmem, - .ckd_name = "echo_object_kmem", - .ckd_size = sizeof(struct echo_object) - }, - { - .ckd_cache = &echo_thread_kmem, - .ckd_name = "echo_thread_kmem", - .ckd_size = sizeof(struct echo_thread_info) - }, - { - .ckd_cache = &echo_session_kmem, - .ckd_name = "echo_session_kmem", - .ckd_size = sizeof(struct echo_session_info) - }, - { - .ckd_cache = NULL - } -}; - -/** \defgroup echo_page Page operations - * - * Echo page operations. - * - * @{ - */ -static int echo_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, int nonblock) -{ - struct echo_page *ep = cl2echo_page(slice); - - if (!nonblock) - mutex_lock(&ep->ep_lock); - else if (!mutex_trylock(&ep->ep_lock)) - return -EAGAIN; - return 0; -} - -static void echo_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io) -{ - struct echo_page *ep = cl2echo_page(slice); - - LASSERT(mutex_is_locked(&ep->ep_lock)); - mutex_unlock(&ep->ep_lock); -} - -static void echo_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - cl_page_delete(env, slice->cpl_page); -} - -static int echo_page_is_vmlocked(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - if (mutex_is_locked(&cl2echo_page(slice)->ep_lock)) - return -EBUSY; - return -ENODATA; -} - -static void echo_page_completion(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) -{ - LASSERT(slice->cpl_page->cp_sync_io); -} - -static void echo_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) -{ - struct echo_object *eco = cl2echo_obj(slice->cpl_obj); - - atomic_dec(&eco->eo_npages); - put_page(slice->cpl_page->cp_vmpage); -} - -static int echo_page_prep(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - return 0; -} - -static int echo_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) -{ - struct echo_page *ep = cl2echo_page(slice); - - (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME "-page@%p %d vm@%p\n", - ep, mutex_is_locked(&ep->ep_lock), - slice->cpl_page->cp_vmpage); - return 0; -} - -static const struct cl_page_operations echo_page_ops = { - .cpo_own = echo_page_own, - .cpo_disown = echo_page_disown, - .cpo_discard = echo_page_discard, - .cpo_fini = echo_page_fini, - .cpo_print = echo_page_print, - .cpo_is_vmlocked = echo_page_is_vmlocked, - .io = { - [CRT_READ] = { - .cpo_prep = echo_page_prep, - .cpo_completion = echo_page_completion, - }, - [CRT_WRITE] = { - .cpo_prep = echo_page_prep, - .cpo_completion = echo_page_completion, - } - } -}; - -/** @} echo_page */ - -/** \defgroup echo_lock Locking - * - * echo lock operations - * - * @{ - */ -static void echo_lock_fini(const struct lu_env *env, - struct cl_lock_slice *slice) -{ - struct echo_lock *ecl = cl2echo_lock(slice); - - LASSERT(list_empty(&ecl->el_chain)); - kmem_cache_free(echo_lock_kmem, ecl); -} - -static const struct cl_lock_operations echo_lock_ops = { - .clo_fini = echo_lock_fini, -}; - -/** @} echo_lock */ - -/** \defgroup echo_cl_ops cl_object operations - * - * operations for cl_object - * - * @{ - */ -static int echo_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - struct echo_page *ep = cl_object_page_slice(obj, page); - struct echo_object *eco = cl2echo_obj(obj); - - get_page(page->cp_vmpage); - mutex_init(&ep->ep_lock); - cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops); - atomic_inc(&eco->eo_npages); - return 0; -} - -static int echo_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) -{ - return 0; -} - -static int echo_lock_init(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *unused) -{ - struct echo_lock *el; - - el = kmem_cache_zalloc(echo_lock_kmem, GFP_NOFS); - if (el) { - cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops); - el->el_object = cl2echo_obj(obj); - INIT_LIST_HEAD(&el->el_chain); - atomic_set(&el->el_refcount, 0); - } - return !el ? -ENOMEM : 0; -} - -static int echo_conf_set(const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf) -{ - return 0; -} - -static const struct cl_object_operations echo_cl_obj_ops = { - .coo_page_init = echo_page_init, - .coo_lock_init = echo_lock_init, - .coo_io_init = echo_io_init, - .coo_conf_set = echo_conf_set -}; - -/** @} echo_cl_ops */ - -/** \defgroup echo_lu_ops lu_object operations - * - * operations for echo lu object. - * - * @{ - */ -static int echo_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf) -{ - struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev)); - struct echo_client_obd *ec = ed->ed_ec; - struct echo_object *eco = cl2echo_obj(lu2cl(obj)); - const struct cl_object_conf *cconf; - struct echo_object_conf *econf; - - if (ed->ed_next) { - struct lu_object *below; - struct lu_device *under; - - under = ed->ed_next; - below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, - under); - if (!below) - return -ENOMEM; - lu_object_add(obj, below); - } - - cconf = lu2cl_conf(conf); - econf = cl2echo_conf(cconf); - - LASSERT(econf->eoc_oinfo); - /* - * Transfer the oinfo pointer to eco that it won't be - * freed. - */ - eco->eo_oinfo = *econf->eoc_oinfo; - *econf->eoc_oinfo = NULL; - - eco->eo_dev = ed; - atomic_set(&eco->eo_npages, 0); - cl_object_page_init(lu2cl(obj), sizeof(struct echo_page)); - - spin_lock(&ec->ec_lock); - list_add_tail(&eco->eo_obj_chain, &ec->ec_objects); - spin_unlock(&ec->ec_lock); - - return 0; -} - -static void echo_object_free(const struct lu_env *env, struct lu_object *obj) -{ - struct echo_object *eco = cl2echo_obj(lu2cl(obj)); - struct echo_client_obd *ec = eco->eo_dev->ed_ec; - - LASSERT(atomic_read(&eco->eo_npages) == 0); - - spin_lock(&ec->ec_lock); - list_del_init(&eco->eo_obj_chain); - spin_unlock(&ec->ec_lock); - - lu_object_fini(obj); - lu_object_header_fini(obj->lo_header); - - kfree(eco->eo_oinfo); - kmem_cache_free(echo_object_kmem, eco); -} - -static int echo_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) -{ - struct echo_object *obj = cl2echo_obj(lu2cl(o)); - - return (*p)(env, cookie, "echoclient-object@%p", obj); -} - -static const struct lu_object_operations echo_lu_obj_ops = { - .loo_object_init = echo_object_init, - .loo_object_delete = NULL, - .loo_object_release = NULL, - .loo_object_free = echo_object_free, - .loo_object_print = echo_object_print, - .loo_object_invariant = NULL -}; - -/** @} echo_lu_ops */ - -/** \defgroup echo_lu_dev_ops lu_device operations - * - * Operations for echo lu device. - * - * @{ - */ -static struct lu_object *echo_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev) -{ - struct echo_object *eco; - struct lu_object *obj = NULL; - - /* we're the top dev. */ - LASSERT(!hdr); - eco = kmem_cache_zalloc(echo_object_kmem, GFP_NOFS); - if (eco) { - struct cl_object_header *hdr = &eco->eo_hdr; - - obj = &echo_obj2cl(eco)->co_lu; - cl_object_header_init(hdr); - hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page)); - - lu_object_init(obj, &hdr->coh_lu, dev); - lu_object_add_top(&hdr->coh_lu, obj); - - eco->eo_cl.co_ops = &echo_cl_obj_ops; - obj->lo_ops = &echo_lu_obj_ops; - } - return obj; -} - -static const struct lu_device_operations echo_device_lu_ops = { - .ldo_object_alloc = echo_object_alloc, -}; - -/** @} echo_lu_dev_ops */ - -/** \defgroup echo_init Setup and teardown - * - * Init and fini functions for echo client. - * - * @{ - */ -static int echo_site_init(const struct lu_env *env, struct echo_device *ed) -{ - struct cl_site *site = &ed->ed_site_myself; - int rc; - - /* initialize site */ - rc = cl_site_init(site, &ed->ed_cl); - if (rc) { - CERROR("Cannot initialize site for echo client(%d)\n", rc); - return rc; - } - - rc = lu_site_init_finish(&site->cs_lu); - if (rc) { - cl_site_fini(site); - return rc; - } - - ed->ed_site = &site->cs_lu; - return 0; -} - -static void echo_site_fini(const struct lu_env *env, struct echo_device *ed) -{ - if (ed->ed_site) { - lu_site_fini(ed->ed_site); - ed->ed_site = NULL; - } -} - -static void *echo_thread_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct echo_thread_info *info; - - info = kmem_cache_zalloc(echo_thread_kmem, GFP_NOFS); - if (!info) - info = ERR_PTR(-ENOMEM); - return info; -} - -static void echo_thread_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct echo_thread_info *info = data; - - kmem_cache_free(echo_thread_kmem, info); -} - -static struct lu_context_key echo_thread_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = echo_thread_key_init, - .lct_fini = echo_thread_key_fini, -}; - -static void *echo_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct echo_session_info *session; - - session = kmem_cache_zalloc(echo_session_kmem, GFP_NOFS); - if (!session) - session = ERR_PTR(-ENOMEM); - return session; -} - -static void echo_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct echo_session_info *session = data; - - kmem_cache_free(echo_session_kmem, session); -} - -static struct lu_context_key echo_session_key = { - .lct_tags = LCT_SESSION, - .lct_init = echo_session_key_init, - .lct_fini = echo_session_key_fini, -}; - -LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key); - -static struct lu_device *echo_device_alloc(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *cfg) -{ - struct lu_device *next; - struct echo_device *ed; - struct cl_device *cd; - struct obd_device *obd = NULL; /* to keep compiler happy */ - struct obd_device *tgt; - const char *tgt_type_name; - int rc, err; - - ed = kzalloc(sizeof(*ed), GFP_NOFS); - if (!ed) { - rc = -ENOMEM; - goto out; - } - - cd = &ed->ed_cl; - rc = cl_device_init(cd, t); - if (rc) - goto out_free; - - cd->cd_lu_dev.ld_ops = &echo_device_lu_ops; - - obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd); - LASSERT(env); - - tgt = class_name2obd(lustre_cfg_string(cfg, 1)); - if (!tgt) { - CERROR("Can not find tgt device %s\n", - lustre_cfg_string(cfg, 1)); - rc = -ENODEV; - goto out_device_fini; - } - - next = tgt->obd_lu_dev; - if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) { - CERROR("echo MDT client must be run on server\n"); - rc = -EOPNOTSUPP; - goto out_device_fini; - } - - rc = echo_site_init(env, ed); - if (rc) - goto out_device_fini; - - rc = echo_client_setup(env, obd, cfg); - if (rc) - goto out_site_fini; - - ed->ed_ec = &obd->u.echo_client; - - /* if echo client is to be stacked upon ost device, the next is - * NULL since ost is not a clio device so far - */ - if (next && !lu_device_is_cl(next)) - next = NULL; - - tgt_type_name = tgt->obd_type->typ_name; - if (next) { - if (next->ld_site) { - rc = -EBUSY; - goto out_cleanup; - } - - next->ld_site = ed->ed_site; - rc = next->ld_type->ldt_ops->ldto_device_init(env, next, - next->ld_type->ldt_name, - NULL); - if (rc) - goto out_cleanup; - - } else { - LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0); - } - - ed->ed_next = next; - return &cd->cd_lu_dev; - -out_cleanup: - err = echo_client_cleanup(obd); - if (err) - CERROR("Cleanup obd device %s error(%d)\n", - obd->obd_name, err); -out_site_fini: - echo_site_fini(env, ed); -out_device_fini: - cl_device_fini(&ed->ed_cl); -out_free: - kfree(ed); -out: - return ERR_PTR(rc); -} - -static int echo_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) -{ - LBUG(); - return 0; -} - -static struct lu_device *echo_device_fini(const struct lu_env *env, - struct lu_device *d) -{ - struct echo_device *ed = cl2echo_dev(lu2cl_dev(d)); - struct lu_device *next = ed->ed_next; - - while (next) - next = next->ld_type->ldt_ops->ldto_device_fini(env, next); - return NULL; -} - -static void echo_lock_release(const struct lu_env *env, - struct echo_lock *ecl, - int still_used) -{ - struct cl_lock *clk = echo_lock2cl(ecl); - - cl_lock_release(env, clk); -} - -static struct lu_device *echo_device_free(const struct lu_env *env, - struct lu_device *d) -{ - struct echo_device *ed = cl2echo_dev(lu2cl_dev(d)); - struct echo_client_obd *ec = ed->ed_ec; - struct echo_object *eco; - struct lu_device *next = ed->ed_next; - - CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n", - ed, next); - - lu_site_purge(env, ed->ed_site, -1); - - /* check if there are objects still alive. - * It shouldn't have any object because lu_site_purge would cleanup - * all of cached objects. Anyway, probably the echo device is being - * parallelly accessed. - */ - spin_lock(&ec->ec_lock); - list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain) - eco->eo_deleted = 1; - spin_unlock(&ec->ec_lock); - - /* purge again */ - lu_site_purge(env, ed->ed_site, -1); - - CDEBUG(D_INFO, - "Waiting for the reference of echo object to be dropped\n"); - - /* Wait for the last reference to be dropped. */ - spin_lock(&ec->ec_lock); - while (!list_empty(&ec->ec_objects)) { - spin_unlock(&ec->ec_lock); - CERROR("echo_client still has objects at cleanup time, wait for 1 second\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - lu_site_purge(env, ed->ed_site, -1); - spin_lock(&ec->ec_lock); - } - spin_unlock(&ec->ec_lock); - - LASSERT(list_empty(&ec->ec_locks)); - - CDEBUG(D_INFO, "No object exists, exiting...\n"); - - echo_client_cleanup(d->ld_obd); - - while (next) - next = next->ld_type->ldt_ops->ldto_device_free(env, next); - - LASSERT(ed->ed_site == d->ld_site); - echo_site_fini(env, ed); - cl_device_fini(&ed->ed_cl); - kfree(ed); - - cl_env_cache_purge(~0); - - return NULL; -} - -static const struct lu_device_type_operations echo_device_type_ops = { - .ldto_init = echo_type_init, - .ldto_fini = echo_type_fini, - - .ldto_start = echo_type_start, - .ldto_stop = echo_type_stop, - - .ldto_device_alloc = echo_device_alloc, - .ldto_device_free = echo_device_free, - .ldto_device_init = echo_device_init, - .ldto_device_fini = echo_device_fini -}; - -static struct lu_device_type echo_device_type = { - .ldt_tags = LU_DEVICE_CL, - .ldt_name = LUSTRE_ECHO_CLIENT_NAME, - .ldt_ops = &echo_device_type_ops, - .ldt_ctx_tags = LCT_CL_THREAD, -}; - -/** @} echo_init */ - -/** \defgroup echo_exports Exported operations - * - * exporting functions to echo client - * - * @{ - */ - -/* Interfaces to echo client obd device */ -static struct echo_object * -cl_echo_object_find(struct echo_device *d, const struct ost_id *oi) -{ - struct lu_env *env; - struct echo_thread_info *info; - struct echo_object_conf *conf; - struct lov_oinfo *oinfo = NULL; - struct echo_object *eco; - struct cl_object *obj; - struct lu_fid *fid; - u16 refcheck; - int rc; - - LASSERTF(ostid_id(oi), DOSTID "\n", POSTID(oi)); - LASSERTF(ostid_seq(oi) == FID_SEQ_ECHO, DOSTID "\n", POSTID(oi)); - - /* Never return an object if the obd is to be freed. */ - if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping) - return ERR_PTR(-ENODEV); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return (void *)env; - - info = echo_env_info(env); - conf = &info->eti_conf; - if (d->ed_next) { - oinfo = kzalloc(sizeof(*oinfo), GFP_NOFS); - if (!oinfo) { - eco = ERR_PTR(-ENOMEM); - goto out; - } - - oinfo->loi_oi = *oi; - conf->eoc_cl.u.coc_oinfo = oinfo; - } - - /* - * If echo_object_init() is successful then ownership of oinfo - * is transferred to the object. - */ - conf->eoc_oinfo = &oinfo; - - fid = &info->eti_fid; - rc = ostid_to_fid(fid, (struct ost_id *)oi, 0); - if (rc != 0) { - eco = ERR_PTR(rc); - goto out; - } - - /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() - */ - /* coverity[overrun-buffer-val] */ - obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl); - if (IS_ERR(obj)) { - eco = (void *)obj; - goto out; - } - - eco = cl2echo_obj(obj); - if (eco->eo_deleted) { - cl_object_put(env, obj); - eco = ERR_PTR(-EAGAIN); - } - -out: - kfree(oinfo); - cl_env_put(env, &refcheck); - return eco; -} - -static int cl_echo_object_put(struct echo_object *eco) -{ - struct lu_env *env; - struct cl_object *obj = echo_obj2cl(eco); - u16 refcheck; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - /* an external function to kill an object? */ - if (eco->eo_deleted) { - struct lu_object_header *loh = obj->co_lu.lo_header; - - LASSERT(&eco->eo_hdr == luh2coh(loh)); - set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags); - } - - cl_object_put(env, obj); - cl_env_put(env, &refcheck); - return 0; -} - -static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco, - u64 start, u64 end, int mode, - __u64 *cookie, __u32 enqflags) -{ - struct cl_io *io; - struct cl_lock *lck; - struct cl_object *obj; - struct cl_lock_descr *descr; - struct echo_thread_info *info; - int rc = -ENOMEM; - - info = echo_env_info(env); - io = &info->eti_io; - lck = &info->eti_lock; - obj = echo_obj2cl(eco); - - memset(lck, 0, sizeof(*lck)); - descr = &lck->cll_descr; - descr->cld_obj = obj; - descr->cld_start = cl_index(obj, start); - descr->cld_end = cl_index(obj, end); - descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ; - descr->cld_enq_flags = enqflags; - io->ci_obj = obj; - - rc = cl_lock_request(env, io, lck); - if (rc == 0) { - struct echo_client_obd *ec = eco->eo_dev->ed_ec; - struct echo_lock *el; - - el = cl2echo_lock(cl_lock_at(lck, &echo_device_type)); - spin_lock(&ec->ec_lock); - if (list_empty(&el->el_chain)) { - list_add(&el->el_chain, &ec->ec_locks); - el->el_cookie = ++ec->ec_unique; - } - atomic_inc(&el->el_refcount); - *cookie = el->el_cookie; - spin_unlock(&ec->ec_lock); - } - return rc; -} - -static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, - __u64 cookie) -{ - struct echo_client_obd *ec = ed->ed_ec; - struct echo_lock *ecl = NULL; - struct list_head *el; - int found = 0, still_used = 0; - - spin_lock(&ec->ec_lock); - list_for_each(el, &ec->ec_locks) { - ecl = list_entry(el, struct echo_lock, el_chain); - CDEBUG(D_INFO, "ecl: %p, cookie: %#llx\n", ecl, ecl->el_cookie); - found = (ecl->el_cookie == cookie); - if (found) { - if (atomic_dec_and_test(&ecl->el_refcount)) - list_del_init(&ecl->el_chain); - else - still_used = 1; - break; - } - } - spin_unlock(&ec->ec_lock); - - if (!found) - return -ENOENT; - - echo_lock_release(env, ecl, still_used); - return 0; -} - -static void echo_commit_callback(const struct lu_env *env, struct cl_io *io, - struct cl_page *page) -{ - struct echo_thread_info *info; - struct cl_2queue *queue; - - info = echo_env_info(env); - LASSERT(io == &info->eti_io); - - queue = &info->eti_queue; - cl_page_list_add(&queue->c2_qout, page); -} - -static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, - struct page **pages, int npages, int async) -{ - struct lu_env *env; - struct echo_thread_info *info; - struct cl_object *obj = echo_obj2cl(eco); - struct echo_device *ed = eco->eo_dev; - struct cl_2queue *queue; - struct cl_io *io; - struct cl_page *clp; - struct lustre_handle lh = { 0 }; - size_t page_size = cl_page_size(obj); - u16 refcheck; - int rc; - int i; - - LASSERT((offset & ~PAGE_MASK) == 0); - LASSERT(ed->ed_next); - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - info = echo_env_info(env); - io = &info->eti_io; - queue = &info->eti_queue; - - cl_2queue_init(queue); - - io->ci_ignore_layout = 1; - rc = cl_io_init(env, io, CIT_MISC, obj); - if (rc < 0) - goto out; - LASSERT(rc == 0); - - rc = cl_echo_enqueue0(env, eco, offset, - offset + npages * PAGE_SIZE - 1, - rw == READ ? LCK_PR : LCK_PW, &lh.cookie, - CEF_NEVER); - if (rc < 0) - goto error_lock; - - for (i = 0; i < npages; i++) { - LASSERT(pages[i]); - clp = cl_page_find(env, obj, cl_index(obj, offset), - pages[i], CPT_TRANSIENT); - if (IS_ERR(clp)) { - rc = PTR_ERR(clp); - break; - } - LASSERT(clp->cp_type == CPT_TRANSIENT); - - rc = cl_page_own(env, io, clp); - if (rc) { - LASSERT(clp->cp_state == CPS_FREEING); - cl_page_put(env, clp); - break; - } - /* - * Add a page to the incoming page list of 2-queue. - */ - cl_page_list_add(&queue->c2_qin, clp); - - /* drop the reference count for cl_page_find, so that the page - * will be freed in cl_2queue_fini. - */ - cl_page_put(env, clp); - cl_page_clip(env, clp, 0, page_size); - - offset += page_size; - } - - if (rc == 0) { - enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE; - - async = async && (typ == CRT_WRITE); - if (async) - rc = cl_io_commit_async(env, io, &queue->c2_qin, - 0, PAGE_SIZE, - echo_commit_callback); - else - rc = cl_io_submit_sync(env, io, typ, queue, 0); - CDEBUG(D_INFO, "echo_client %s write returns %d\n", - async ? "async" : "sync", rc); - } - - cl_echo_cancel0(env, ed, lh.cookie); -error_lock: - cl_2queue_discard(env, io, queue); - cl_2queue_disown(env, io, queue); - cl_2queue_fini(env, queue); - cl_io_fini(env, io); -out: - cl_env_put(env, &refcheck); - return rc; -} - -/** @} echo_exports */ - -static u64 last_object_id; - -static int echo_create_object(const struct lu_env *env, struct echo_device *ed, - struct obdo *oa) -{ - struct echo_object *eco; - struct echo_client_obd *ec = ed->ed_ec; - int rc; - int created = 0; - - if (!(oa->o_valid & OBD_MD_FLID) || - !(oa->o_valid & OBD_MD_FLGROUP) || - !fid_seq_is_echo(ostid_seq(&oa->o_oi))) { - CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi)); - return -EINVAL; - } - - if (!ostid_id(&oa->o_oi)) { - rc = ostid_set_id(&oa->o_oi, ++last_object_id); - if (rc) - goto failed; - } - - rc = obd_create(env, ec->ec_exp, oa); - if (rc != 0) { - CERROR("Cannot create objects: rc = %d\n", rc); - goto failed; - } - created = 1; - - oa->o_valid |= OBD_MD_FLID; - - eco = cl_echo_object_find(ed, &oa->o_oi); - if (IS_ERR(eco)) { - rc = PTR_ERR(eco); - goto failed; - } - cl_echo_object_put(eco); - - CDEBUG(D_INFO, "oa oid " DOSTID "\n", POSTID(&oa->o_oi)); - - failed: - if (created && rc) - obd_destroy(env, ec->ec_exp, oa); - if (rc) - CERROR("create object failed with: rc = %d\n", rc); - return rc; -} - -static int echo_get_object(struct echo_object **ecop, struct echo_device *ed, - struct obdo *oa) -{ - struct echo_object *eco; - int rc; - - if (!(oa->o_valid & OBD_MD_FLID) || !(oa->o_valid & OBD_MD_FLGROUP) || - !ostid_id(&oa->o_oi)) { - CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi)); - return -EINVAL; - } - - rc = 0; - eco = cl_echo_object_find(ed, &oa->o_oi); - if (!IS_ERR(eco)) - *ecop = eco; - else - rc = PTR_ERR(eco); - return rc; -} - -static void echo_put_object(struct echo_object *eco) -{ - int rc; - - rc = cl_echo_object_put(eco); - if (rc) - CERROR("%s: echo client drop an object failed: rc = %d\n", - eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc); -} - -static void -echo_client_page_debug_setup(struct page *page, int rw, u64 id, - u64 offset, u64 count) -{ - char *addr; - u64 stripe_off; - u64 stripe_id; - int delta; - - /* no partial pages on the client */ - LASSERT(count == PAGE_SIZE); - - addr = kmap(page); - - for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { - if (rw == OBD_BRW_WRITE) { - stripe_off = offset + delta; - stripe_id = id; - } else { - stripe_off = 0xdeadbeef00c0ffeeULL; - stripe_id = 0xdeadbeef00c0ffeeULL; - } - block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE, - stripe_off, stripe_id); - } - - kunmap(page); -} - -static int echo_client_page_debug_check(struct page *page, u64 id, - u64 offset, u64 count) -{ - u64 stripe_off; - u64 stripe_id; - char *addr; - int delta; - int rc; - int rc2; - - /* no partial pages on the client */ - LASSERT(count == PAGE_SIZE); - - addr = kmap(page); - - for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { - stripe_off = offset + delta; - stripe_id = id; - - rc2 = block_debug_check("test_brw", - addr + delta, OBD_ECHO_BLOCK_SIZE, - stripe_off, stripe_id); - if (rc2 != 0) { - CERROR("Error in echo object %#llx\n", id); - rc = rc2; - } - } - - kunmap(page); - return rc; -} - -static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, - struct echo_object *eco, u64 offset, - u64 count, int async) -{ - u32 npages; - struct brw_page *pga; - struct brw_page *pgp; - struct page **pages; - u64 off; - int i; - int rc; - int verify; - gfp_t gfp_mask; - int brw_flags = 0; - - verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID && - (oa->o_valid & OBD_MD_FLFLAGS) != 0 && - (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0); - - gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER; - - LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ); - - if (count <= 0 || - (count & (~PAGE_MASK)) != 0) - return -EINVAL; - - /* XXX think again with misaligned I/O */ - npages = count >> PAGE_SHIFT; - - if (rw == OBD_BRW_WRITE) - brw_flags = OBD_BRW_ASYNC; - - pga = kcalloc(npages, sizeof(*pga), GFP_NOFS); - if (!pga) - return -ENOMEM; - - pages = kcalloc(npages, sizeof(*pages), GFP_NOFS); - if (!pages) { - kfree(pga); - return -ENOMEM; - } - - for (i = 0, pgp = pga, off = offset; - i < npages; - i++, pgp++, off += PAGE_SIZE) { - LASSERT(!pgp->pg); /* for cleanup */ - - rc = -ENOMEM; - pgp->pg = alloc_page(gfp_mask); - if (!pgp->pg) - goto out; - - pages[i] = pgp->pg; - pgp->count = PAGE_SIZE; - pgp->off = off; - pgp->flag = brw_flags; - - if (verify) - echo_client_page_debug_setup(pgp->pg, rw, - ostid_id(&oa->o_oi), off, - pgp->count); - } - - /* brw mode can only be used at client */ - LASSERT(ed->ed_next); - rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); - - out: - if (rc != 0 || rw != OBD_BRW_READ) - verify = 0; - - for (i = 0, pgp = pga; i < npages; i++, pgp++) { - if (!pgp->pg) - continue; - - if (verify) { - int vrc; - - vrc = echo_client_page_debug_check(pgp->pg, - ostid_id(&oa->o_oi), - pgp->off, pgp->count); - if (vrc != 0 && rc == 0) - rc = vrc; - } - __free_page(pgp->pg); - } - kfree(pga); - kfree(pages); - return rc; -} - -static int echo_client_prep_commit(const struct lu_env *env, - struct obd_export *exp, int rw, - struct obdo *oa, struct echo_object *eco, - u64 offset, u64 count, - u64 batch, int async) -{ - struct obd_ioobj ioo; - struct niobuf_local *lnb; - struct niobuf_remote rnb; - u64 off; - u64 npages, tot_pages; - int i, ret = 0, brw_flags = 0; - - if (count <= 0 || (count & (~PAGE_MASK)) != 0) - return -EINVAL; - - npages = batch >> PAGE_SHIFT; - tot_pages = count >> PAGE_SHIFT; - - lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); - if (!lnb) { - ret = -ENOMEM; - goto out; - } - - if (rw == OBD_BRW_WRITE && async) - brw_flags |= OBD_BRW_ASYNC; - - obdo_to_ioobj(oa, &ioo); - - off = offset; - - for (; tot_pages > 0; tot_pages -= npages) { - int lpages; - - if (tot_pages < npages) - npages = tot_pages; - - rnb.rnb_offset = off; - rnb.rnb_len = npages * PAGE_SIZE; - rnb.rnb_flags = brw_flags; - ioo.ioo_bufcnt = 1; - off += npages * PAGE_SIZE; - - lpages = npages; - ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb); - if (ret != 0) - goto out; - - for (i = 0; i < lpages; i++) { - struct page *page = lnb[i].lnb_page; - - /* read past eof? */ - if (!page && lnb[i].lnb_rc == 0) - continue; - - if (async) - lnb[i].lnb_flags |= OBD_BRW_ASYNC; - - if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID || - (oa->o_valid & OBD_MD_FLFLAGS) == 0 || - (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0) - continue; - - if (rw == OBD_BRW_WRITE) - echo_client_page_debug_setup(page, rw, - ostid_id(&oa->o_oi), - lnb[i].lnb_file_offset, - lnb[i].lnb_len); - else - echo_client_page_debug_check(page, - ostid_id(&oa->o_oi), - lnb[i].lnb_file_offset, - lnb[i].lnb_len); - } - - ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb, - ret); - if (ret != 0) - goto out; - - /* Reuse env context. */ - lu_context_exit((struct lu_context *)&env->le_ctx); - lu_context_enter((struct lu_context *)&env->le_ctx); - } - -out: - kfree(lnb); - return ret; -} - -static int echo_client_brw_ioctl(const struct lu_env *env, int rw, - struct obd_export *exp, - struct obd_ioctl_data *data) -{ - struct obd_device *obd = class_exp2obd(exp); - struct echo_device *ed = obd2echo_dev(obd); - struct echo_client_obd *ec = ed->ed_ec; - struct obdo *oa = &data->ioc_obdo1; - struct echo_object *eco; - int rc; - int async = 1; - long test_mode; - - LASSERT(oa->o_valid & OBD_MD_FLGROUP); - - rc = echo_get_object(&eco, ed, oa); - if (rc) - return rc; - - oa->o_valid &= ~OBD_MD_FLHANDLE; - - /* OFD/obdfilter works only via prep/commit */ - test_mode = (long)data->ioc_pbuf1; - if (test_mode == 1) - async = 0; - - if (!ed->ed_next && test_mode != 3) { - test_mode = 3; - data->ioc_plen1 = data->ioc_count; - } - - /* Truncate batch size to maximum */ - if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE) - data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE; - - switch (test_mode) { - case 1: - /* fall through */ - case 2: - rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset, - data->ioc_count, async); - break; - case 3: - rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco, - data->ioc_offset, data->ioc_count, - data->ioc_plen1, async); - break; - default: - rc = -EINVAL; - } - echo_put_object(eco); - return rc; -} - -static int -echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void __user *uarg) -{ - struct obd_device *obd = exp->exp_obd; - struct echo_device *ed = obd2echo_dev(obd); - struct echo_client_obd *ec = ed->ed_ec; - struct echo_object *eco; - struct obd_ioctl_data *data = karg; - struct lu_env *env; - struct obdo *oa; - struct lu_fid fid; - int rw = OBD_BRW_READ; - int rc = 0; - - oa = &data->ioc_obdo1; - if (!(oa->o_valid & OBD_MD_FLGROUP)) { - oa->o_valid |= OBD_MD_FLGROUP; - ostid_set_seq_echo(&oa->o_oi); - } - - /* This FID is unpacked just for validation at this point */ - rc = ostid_to_fid(&fid, &oa->o_oi, 0); - if (rc < 0) - return rc; - - env = kzalloc(sizeof(*env), GFP_NOFS); - if (!env) - return -ENOMEM; - - rc = lu_env_init(env, LCT_DT_THREAD); - if (rc) { - rc = -ENOMEM; - goto out; - } - - switch (cmd) { - case OBD_IOC_CREATE: /* may create echo object */ - if (!capable(CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rc = echo_create_object(env, ed, oa); - goto out; - - case OBD_IOC_DESTROY: - if (!capable(CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - rc = obd_destroy(env, ec->ec_exp, oa); - if (rc == 0) - eco->eo_deleted = 1; - echo_put_object(eco); - } - goto out; - - case OBD_IOC_GETATTR: - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - rc = obd_getattr(env, ec->ec_exp, oa); - echo_put_object(eco); - } - goto out; - - case OBD_IOC_SETATTR: - if (!capable(CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - rc = obd_setattr(env, ec->ec_exp, oa); - echo_put_object(eco); - } - goto out; - - case OBD_IOC_BRW_WRITE: - if (!capable(CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rw = OBD_BRW_WRITE; - /* fall through */ - case OBD_IOC_BRW_READ: - rc = echo_client_brw_ioctl(env, rw, exp, data); - goto out; - - default: - CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd); - rc = -ENOTTY; - goto out; - } - -out: - lu_env_fini(env); - kfree(env); - - return rc; -} - -static int echo_client_setup(const struct lu_env *env, - struct obd_device *obddev, struct lustre_cfg *lcfg) -{ - struct echo_client_obd *ec = &obddev->u.echo_client; - struct obd_device *tgt; - struct obd_uuid echo_uuid = { "ECHO_UUID" }; - struct obd_connect_data *ocd = NULL; - int rc; - - if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) { - CERROR("requires a TARGET OBD name\n"); - return -EINVAL; - } - - tgt = class_name2obd(lustre_cfg_string(lcfg, 1)); - if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) { - CERROR("device not attached or not set up (%s)\n", - lustre_cfg_string(lcfg, 1)); - return -EINVAL; - } - - spin_lock_init(&ec->ec_lock); - INIT_LIST_HEAD(&ec->ec_objects); - INIT_LIST_HEAD(&ec->ec_locks); - ec->ec_unique = 0; - - ocd = kzalloc(sizeof(*ocd), GFP_NOFS); - if (!ocd) - return -ENOMEM; - - ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL | - OBD_CONNECT_BRW_SIZE | - OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 | - OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE | - OBD_CONNECT_FID; - ocd->ocd_brw_size = DT_MAX_BRW_SIZE; - ocd->ocd_version = LUSTRE_VERSION_CODE; - ocd->ocd_group = FID_SEQ_ECHO; - - rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL); - - kfree(ocd); - - if (rc != 0) { - CERROR("fail to connect to device %s\n", - lustre_cfg_string(lcfg, 1)); - return rc; - } - - return rc; -} - -static int echo_client_cleanup(struct obd_device *obddev) -{ - struct echo_client_obd *ec = &obddev->u.echo_client; - int rc; - - if (!list_empty(&obddev->obd_exports)) { - CERROR("still has clients!\n"); - return -EBUSY; - } - - LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0); - rc = obd_disconnect(ec->ec_exp); - if (rc != 0) - CERROR("fail to disconnect device: %d\n", rc); - - return rc; -} - -static int echo_client_connect(const struct lu_env *env, - struct obd_export **exp, - struct obd_device *src, struct obd_uuid *cluuid, - struct obd_connect_data *data, void *localdata) -{ - int rc; - struct lustre_handle conn = { 0 }; - - rc = class_connect(&conn, src, cluuid); - if (rc == 0) - *exp = class_conn2export(&conn); - - return rc; -} - -static int echo_client_disconnect(struct obd_export *exp) -{ - int rc; - - if (!exp) { - rc = -EINVAL; - goto out; - } - - rc = class_disconnect(exp); - goto out; - out: - return rc; -} - -static struct obd_ops echo_client_obd_ops = { - .owner = THIS_MODULE, - .iocontrol = echo_client_iocontrol, - .connect = echo_client_connect, - .disconnect = echo_client_disconnect -}; - -static int echo_client_init(void) -{ - int rc; - - rc = lu_kmem_init(echo_caches); - if (rc == 0) { - rc = class_register_type(&echo_client_obd_ops, NULL, - LUSTRE_ECHO_CLIENT_NAME, - &echo_device_type); - if (rc) - lu_kmem_fini(echo_caches); - } - return rc; -} - -static void echo_client_exit(void) -{ - class_unregister_type(LUSTRE_ECHO_CLIENT_NAME); - lu_kmem_fini(echo_caches); -} - -static int __init obdecho_init(void) -{ - int rc; - - LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); - - LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); - - rc = libcfs_setup(); - if (rc) - return rc; - - return echo_client_init(); -} - -static void /*__exit*/ obdecho_exit(void) -{ - echo_client_exit(); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Echo Client test driver"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(obdecho_init); -module_exit(obdecho_exit); - -/** @} echo_client */ diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h deleted file mode 100644 index 42faa164fabb..000000000000 --- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Whamcloud, Inc. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/obdecho/echo_internal.h - */ - -#ifndef _ECHO_INTERNAL_H -#define _ECHO_INTERNAL_H - -/* The persistent object (i.e. actually stores stuff!) */ -#define ECHO_PERSISTENT_OBJID 1ULL -#define ECHO_PERSISTENT_SIZE ((__u64)(1 << 20)) - -/* block size to use for data verification */ -#define OBD_ECHO_BLOCK_SIZE (4 << 10) - -#endif diff --git a/drivers/staging/lustre/lustre/osc/Makefile b/drivers/staging/lustre/lustre/osc/Makefile deleted file mode 100644 index 30dec90e64e8..000000000000 --- a/drivers/staging/lustre/lustre/osc/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += osc.o -osc-y := osc_request.o osc_dev.o osc_object.o \ - osc_page.o osc_lock.o osc_io.o osc_quota.o osc_cache.o lproc_osc.o diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c deleted file mode 100644 index 6a705bc5420c..000000000000 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ /dev/null @@ -1,838 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include -#include "osc_internal.h" - -static ssize_t active_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%d\n", !dev->u.cli.cl_import->imp_deactive); -} - -static ssize_t active_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - if (val > 1) - return -ERANGE; - - /* opposite senses */ - if (dev->u.cli.cl_import->imp_deactive == val) - rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val); - else - CDEBUG(D_CONFIG, "activate %ld: ignoring repeat request\n", - val); - - return count; -} -LUSTRE_RW_ATTR(active); - -static ssize_t max_rpcs_in_flight_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - - return sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight); -} - -static ssize_t max_rpcs_in_flight_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - int rc; - unsigned long val; - int adding, added, req_count; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val < 1 || val > OSC_MAX_RIF_MAX) - return -ERANGE; - - adding = val - cli->cl_max_rpcs_in_flight; - req_count = atomic_read(&osc_pool_req_count); - if (adding > 0 && req_count < osc_reqpool_maxreqcount) { - /* - * There might be some race which will cause over-limit - * allocation, but it is fine. - */ - if (req_count + adding > osc_reqpool_maxreqcount) - adding = osc_reqpool_maxreqcount - req_count; - - added = osc_rq_pool->prp_populate(osc_rq_pool, adding); - atomic_add(added, &osc_pool_req_count); - } - - spin_lock(&cli->cl_loi_list_lock); - cli->cl_max_rpcs_in_flight = val; - client_adjust_max_dirty(cli); - spin_unlock(&cli->cl_loi_list_lock); - - return count; -} -LUSTRE_RW_ATTR(max_rpcs_in_flight); - -static ssize_t max_dirty_mb_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - long val; - int mult; - - spin_lock(&cli->cl_loi_list_lock); - val = cli->cl_dirty_max_pages; - spin_unlock(&cli->cl_loi_list_lock); - - mult = 1 << (20 - PAGE_SHIFT); - return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult); -} - -static ssize_t max_dirty_mb_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - int rc; - unsigned long pages_number; - - rc = kstrtoul(buffer, 10, &pages_number); - if (rc) - return rc; - - pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ - - if (pages_number <= 0 || - pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || - pages_number > totalram_pages / 4) /* 1/4 of RAM */ - return -ERANGE; - - spin_lock(&cli->cl_loi_list_lock); - cli->cl_dirty_max_pages = pages_number; - osc_wake_cache_waiters(cli); - spin_unlock(&cli->cl_loi_list_lock); - - return count; -} -LUSTRE_RW_ATTR(max_dirty_mb); - -static int osc_cached_mb_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *dev = m->private; - struct client_obd *cli = &dev->u.cli; - int shift = 20 - PAGE_SHIFT; - - seq_printf(m, - "used_mb: %ld\n" - "busy_cnt: %ld\n" - "reclaim: %llu\n", - (atomic_long_read(&cli->cl_lru_in_list) + - atomic_long_read(&cli->cl_lru_busy)) >> shift, - atomic_long_read(&cli->cl_lru_busy), - cli->cl_lru_reclaim); - - return 0; -} - -/* shrink the number of caching pages to a specific number */ -static ssize_t osc_cached_mb_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *dev = ((struct seq_file *)file->private_data)->private; - struct client_obd *cli = &dev->u.cli; - long pages_number, rc; - char kernbuf[128]; - int mult; - u64 val; - - if (count >= sizeof(kernbuf)) - return -EINVAL; - - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - kernbuf[count] = 0; - - mult = 1 << (20 - PAGE_SHIFT); - buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - - kernbuf; - rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult); - if (rc) - return rc; - - if (val > LONG_MAX) - return -ERANGE; - pages_number = (long)val; - - if (pages_number < 0) - return -ERANGE; - - rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number; - if (rc > 0) { - struct lu_env *env; - u16 refcheck; - - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - (void)osc_lru_shrink(env, cli, rc, true); - cl_env_put(env, &refcheck); - } - } - - return count; -} - -LPROC_SEQ_FOPS(osc_cached_mb); - -static ssize_t cur_dirty_bytes_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - int len; - - spin_lock(&cli->cl_loi_list_lock); - len = sprintf(buf, "%lu\n", cli->cl_dirty_pages << PAGE_SHIFT); - spin_unlock(&cli->cl_loi_list_lock); - - return len; -} -LUSTRE_RO_ATTR(cur_dirty_bytes); - -static ssize_t cur_grant_bytes_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - int len; - - spin_lock(&cli->cl_loi_list_lock); - len = sprintf(buf, "%lu\n", cli->cl_avail_grant); - spin_unlock(&cli->cl_loi_list_lock); - - return len; -} - -static ssize_t cur_grant_bytes_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &obd->u.cli; - int rc; - unsigned long long val; - - rc = kstrtoull(buffer, 10, &val); - if (rc) - return rc; - - /* this is only for shrinking grant */ - spin_lock(&cli->cl_loi_list_lock); - if (val >= cli->cl_avail_grant) { - spin_unlock(&cli->cl_loi_list_lock); - return -EINVAL; - } - spin_unlock(&cli->cl_loi_list_lock); - - if (cli->cl_import->imp_state == LUSTRE_IMP_FULL) - rc = osc_shrink_grant_to_target(cli, val); - if (rc) - return rc; - return count; -} -LUSTRE_RW_ATTR(cur_grant_bytes); - -static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - int len; - - spin_lock(&cli->cl_loi_list_lock); - len = sprintf(buf, "%lu\n", cli->cl_lost_grant); - spin_unlock(&cli->cl_loi_list_lock); - - return len; -} -LUSTRE_RO_ATTR(cur_lost_grant_bytes); - -static ssize_t grant_shrink_interval_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%d\n", obd->u.cli.cl_grant_shrink_interval); -} - -static ssize_t grant_shrink_interval_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - if (val <= 0) - return -ERANGE; - - obd->u.cli.cl_grant_shrink_interval = val; - - return count; -} -LUSTRE_RW_ATTR(grant_shrink_interval); - -static ssize_t checksums_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%d\n", obd->u.cli.cl_checksum ? 1 : 0); -} - -static ssize_t checksums_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - obd->u.cli.cl_checksum = (val ? 1 : 0); - - return count; -} -LUSTRE_RW_ATTR(checksums); - -static int osc_checksum_type_seq_show(struct seq_file *m, void *v) -{ - struct obd_device *obd = m->private; - int i; - - DECLARE_CKSUM_NAME; - - if (!obd) - return 0; - - for (i = 0; i < ARRAY_SIZE(cksum_name); i++) { - if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0) - continue; - if (obd->u.cli.cl_cksum_type == (1 << i)) - seq_printf(m, "[%s] ", cksum_name[i]); - else - seq_printf(m, "%s ", cksum_name[i]); - } - seq_putc(m, '\n'); - return 0; -} - -static ssize_t osc_checksum_type_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *obd = ((struct seq_file *)file->private_data)->private; - int i; - - DECLARE_CKSUM_NAME; - char kernbuf[10]; - - if (!obd) - return 0; - - if (count > sizeof(kernbuf) - 1) - return -EINVAL; - if (copy_from_user(kernbuf, buffer, count)) - return -EFAULT; - if (count > 0 && kernbuf[count - 1] == '\n') - kernbuf[count - 1] = '\0'; - else - kernbuf[count] = '\0'; - - for (i = 0; i < ARRAY_SIZE(cksum_name); i++) { - if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0) - continue; - if (!strcmp(kernbuf, cksum_name[i])) { - obd->u.cli.cl_cksum_type = 1 << i; - return count; - } - } - return -EINVAL; -} - -LPROC_SEQ_FOPS(osc_checksum_type); - -static ssize_t resend_count_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%u\n", atomic_read(&obd->u.cli.cl_resends)); -} - -static ssize_t resend_count_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - int rc; - unsigned long val; - - rc = kstrtoul(buffer, 10, &val); - if (rc) - return rc; - - atomic_set(&obd->u.cli.cl_resends, val); - - return count; -} -LUSTRE_RW_ATTR(resend_count); - -static ssize_t contention_seconds_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct osc_device *od = obd2osc_dev(obd); - - return sprintf(buf, "%u\n", od->od_contention_time); -} - -static ssize_t contention_seconds_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct osc_device *od = obd2osc_dev(obd); - int rc; - int val; - - rc = kstrtoint(buffer, 10, &val); - if (rc) - return rc; - - if (val < 0) - return -EINVAL; - - od->od_contention_time = val; - - return count; -} -LUSTRE_RW_ATTR(contention_seconds); - -static ssize_t lockless_truncate_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct osc_device *od = obd2osc_dev(obd); - - return sprintf(buf, "%u\n", od->od_lockless_truncate); -} - -static ssize_t lockless_truncate_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - struct osc_device *od = obd2osc_dev(obd); - int rc; - unsigned int val; - - rc = kstrtouint(buffer, 10, &val); - if (rc) - return rc; - - od->od_lockless_truncate = val; - - return count; -} -LUSTRE_RW_ATTR(lockless_truncate); - -static ssize_t destroys_in_flight_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *obd = container_of(kobj, struct obd_device, - obd_kobj); - - return sprintf(buf, "%u\n", - atomic_read(&obd->u.cli.cl_destroy_in_flight)); -} -LUSTRE_RO_ATTR(destroys_in_flight); - -static ssize_t max_pages_per_rpc_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - - return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc); -} - -static ssize_t max_pages_per_rpc_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - struct obd_connect_data *ocd = &cli->cl_import->imp_connect_data; - int chunk_mask, rc; - unsigned long long val; - - rc = kstrtoull(buffer, 10, &val); - if (rc) - return rc; - - /* if the max_pages is specified in bytes, convert to pages */ - if (val >= ONE_MB_BRW_SIZE) - val >>= PAGE_SHIFT; - - chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); - /* max_pages_per_rpc must be chunk aligned */ - val = (val + ~chunk_mask) & chunk_mask; - if (!val || (ocd->ocd_brw_size && - val > ocd->ocd_brw_size >> PAGE_SHIFT)) { - return -ERANGE; - } - spin_lock(&cli->cl_loi_list_lock); - cli->cl_max_pages_per_rpc = val; - client_adjust_max_dirty(cli); - spin_unlock(&cli->cl_loi_list_lock); - - return count; -} -LUSTRE_RW_ATTR(max_pages_per_rpc); - -static ssize_t unstable_stats_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct obd_device *dev = container_of(kobj, struct obd_device, - obd_kobj); - struct client_obd *cli = &dev->u.cli; - long pages; - int mb; - - pages = atomic_long_read(&cli->cl_unstable_count); - mb = (pages * PAGE_SIZE) >> 20; - - return sprintf(buf, "unstable_pages: %20ld\n" - "unstable_mb: %10d\n", pages, mb); -} -LUSTRE_RO_ATTR(unstable_stats); - -LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags); -LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid); -LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid); -LPROC_SEQ_FOPS_RO_TYPE(osc, timeouts); -LPROC_SEQ_FOPS_RO_TYPE(osc, state); - -LPROC_SEQ_FOPS_WR_ONLY(osc, ping); - -LPROC_SEQ_FOPS_RW_TYPE(osc, import); -LPROC_SEQ_FOPS_RW_TYPE(osc, pinger_recov); - -static struct lprocfs_vars lprocfs_osc_obd_vars[] = { - { "ping", &osc_ping_fops, NULL, 0222 }, - { "connect_flags", &osc_connect_flags_fops, NULL, 0 }, - /*{ "filegroups", lprocfs_rd_filegroups, NULL, 0 },*/ - { "ost_server_uuid", &osc_server_uuid_fops, NULL, 0 }, - { "ost_conn_uuid", &osc_conn_uuid_fops, NULL, 0 }, - { "osc_cached_mb", &osc_cached_mb_fops, NULL }, - { "checksum_type", &osc_checksum_type_fops, NULL }, - { "timeouts", &osc_timeouts_fops, NULL, 0 }, - { "import", &osc_import_fops, NULL }, - { "state", &osc_state_fops, NULL, 0 }, - { "pinger_recov", &osc_pinger_recov_fops, NULL }, - { NULL } -}; - -#define pct(a, b) (b ? a * 100 / b : 0) - -static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) -{ - struct timespec64 now; - struct obd_device *dev = seq->private; - struct client_obd *cli = &dev->u.cli; - unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum; - int i; - - ktime_get_real_ts64(&now); - - spin_lock(&cli->cl_loi_list_lock); - - seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n", - (s64)now.tv_sec, (unsigned long)now.tv_nsec); - seq_printf(seq, "read RPCs in flight: %d\n", - cli->cl_r_in_flight); - seq_printf(seq, "write RPCs in flight: %d\n", - cli->cl_w_in_flight); - seq_printf(seq, "pending write pages: %d\n", - atomic_read(&cli->cl_pending_w_pages)); - seq_printf(seq, "pending read pages: %d\n", - atomic_read(&cli->cl_pending_r_pages)); - - seq_puts(seq, "\n\t\t\tread\t\t\twrite\n"); - seq_puts(seq, "pages per rpc rpcs % cum % |"); - seq_puts(seq, " rpcs % cum %\n"); - - read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist); - write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist); - - read_cum = 0; - write_cum = 0; - for (i = 0; i < OBD_HIST_MAX; i++) { - unsigned long r = cli->cl_read_page_hist.oh_buckets[i]; - unsigned long w = cli->cl_write_page_hist.oh_buckets[i]; - - read_cum += r; - write_cum += w; - seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - 1 << i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); - if (read_cum == read_tot && write_cum == write_tot) - break; - } - - seq_puts(seq, "\n\t\t\tread\t\t\twrite\n"); - seq_puts(seq, "rpcs in flight rpcs % cum % |"); - seq_puts(seq, " rpcs % cum %\n"); - - read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist); - write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist); - - read_cum = 0; - write_cum = 0; - for (i = 0; i < OBD_HIST_MAX; i++) { - unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i]; - unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i]; - - read_cum += r; - write_cum += w; - seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); - if (read_cum == read_tot && write_cum == write_tot) - break; - } - - seq_puts(seq, "\n\t\t\tread\t\t\twrite\n"); - seq_puts(seq, "offset rpcs % cum % |"); - seq_puts(seq, " rpcs % cum %\n"); - - read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist); - write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist); - - read_cum = 0; - write_cum = 0; - for (i = 0; i < OBD_HIST_MAX; i++) { - unsigned long r = cli->cl_read_offset_hist.oh_buckets[i]; - unsigned long w = cli->cl_write_offset_hist.oh_buckets[i]; - - read_cum += r; - write_cum += w; - seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - (i == 0) ? 0 : 1 << (i - 1), - r, pct(r, read_tot), pct(read_cum, read_tot), - w, pct(w, write_tot), pct(write_cum, write_tot)); - if (read_cum == read_tot && write_cum == write_tot) - break; - } - - spin_unlock(&cli->cl_loi_list_lock); - - return 0; -} - -#undef pct - -static ssize_t osc_rpc_stats_seq_write(struct file *file, - const char __user *buf, - size_t len, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct obd_device *dev = seq->private; - struct client_obd *cli = &dev->u.cli; - - lprocfs_oh_clear(&cli->cl_read_rpc_hist); - lprocfs_oh_clear(&cli->cl_write_rpc_hist); - lprocfs_oh_clear(&cli->cl_read_page_hist); - lprocfs_oh_clear(&cli->cl_write_page_hist); - lprocfs_oh_clear(&cli->cl_read_offset_hist); - lprocfs_oh_clear(&cli->cl_write_offset_hist); - - return len; -} - -LPROC_SEQ_FOPS(osc_rpc_stats); - -static int osc_stats_seq_show(struct seq_file *seq, void *v) -{ - struct timespec64 now; - struct obd_device *dev = seq->private; - struct osc_stats *stats = &obd2osc_dev(dev)->od_stats; - - ktime_get_real_ts64(&now); - - seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n", - (s64)now.tv_sec, (unsigned long)now.tv_nsec); - seq_printf(seq, "lockless_write_bytes\t\t%llu\n", - stats->os_lockless_writes); - seq_printf(seq, "lockless_read_bytes\t\t%llu\n", - stats->os_lockless_reads); - seq_printf(seq, "lockless_truncate\t\t%llu\n", - stats->os_lockless_truncates); - return 0; -} - -static ssize_t osc_stats_seq_write(struct file *file, - const char __user *buf, - size_t len, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct obd_device *dev = seq->private; - struct osc_stats *stats = &obd2osc_dev(dev)->od_stats; - - memset(stats, 0, sizeof(*stats)); - return len; -} - -LPROC_SEQ_FOPS(osc_stats); - -void lproc_osc_attach_seqstat(struct obd_device *dev) -{ - debugfs_create_file("osc_stats", 0644, dev->obd_debugfs_entry, dev, - &osc_stats_fops); - debugfs_create_file("rpc_stats", 0644, dev->obd_debugfs_entry, dev, - &osc_rpc_stats_fops); -} - -static struct attribute *osc_attrs[] = { - &lustre_attr_active.attr, - &lustre_attr_checksums.attr, - &lustre_attr_contention_seconds.attr, - &lustre_attr_cur_dirty_bytes.attr, - &lustre_attr_cur_grant_bytes.attr, - &lustre_attr_cur_lost_grant_bytes.attr, - &lustre_attr_destroys_in_flight.attr, - &lustre_attr_grant_shrink_interval.attr, - &lustre_attr_lockless_truncate.attr, - &lustre_attr_max_dirty_mb.attr, - &lustre_attr_max_pages_per_rpc.attr, - &lustre_attr_max_rpcs_in_flight.attr, - &lustre_attr_resend_count.attr, - &lustre_attr_unstable_stats.attr, - NULL, -}; - -static const struct attribute_group osc_attr_group = { - .attrs = osc_attrs, -}; - -void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars) -{ - lvars->sysfs_vars = &osc_attr_group; - lvars->obd_vars = lprocfs_osc_obd_vars; -} diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c deleted file mode 100644 index f26983004843..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ /dev/null @@ -1,3306 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - * - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * osc cache management. - * - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_OSC - -#include "osc_cl_internal.h" -#include "osc_internal.h" - -static int extent_debug; /* set it to be true for more debug */ - -static void osc_update_pending(struct osc_object *obj, int cmd, int delta); -static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, - enum osc_extent_state state); -static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, - struct osc_async_page *oap, int sent, int rc); -static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap, - int cmd); -static int osc_refresh_count(const struct lu_env *env, - struct osc_async_page *oap, int cmd); -static int osc_io_unplug_async(const struct lu_env *env, - struct client_obd *cli, struct osc_object *osc); -static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, - unsigned int lost_grant); - -static void osc_extent_tree_dump0(int level, struct osc_object *obj, - const char *func, int line); -#define osc_extent_tree_dump(lvl, obj) \ - osc_extent_tree_dump0(lvl, obj, __func__, __LINE__) - -/** \addtogroup osc - * @{ - */ - -/* ------------------ osc extent ------------------ */ -static inline char *ext_flags(struct osc_extent *ext, char *flags) -{ - char *buf = flags; - *buf++ = ext->oe_rw ? 'r' : 'w'; - if (ext->oe_intree) - *buf++ = 'i'; - if (ext->oe_sync) - *buf++ = 'S'; - if (ext->oe_srvlock) - *buf++ = 's'; - if (ext->oe_hp) - *buf++ = 'h'; - if (ext->oe_urgent) - *buf++ = 'u'; - if (ext->oe_memalloc) - *buf++ = 'm'; - if (ext->oe_trunc_pending) - *buf++ = 't'; - if (ext->oe_fsync_wait) - *buf++ = 'Y'; - *buf = 0; - return flags; -} - -static inline char list_empty_marker(struct list_head *list) -{ - return list_empty(list) ? '-' : '+'; -} - -#define EXTSTR "[%lu -> %lu/%lu]" -#define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end -static const char *oes_strings[] = { - "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL }; - -#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \ - struct osc_extent *__ext = (extent); \ - char __buf[16]; \ - \ - CDEBUG(lvl, \ - "extent %p@{" EXTSTR ", " \ - "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \ - /* ----- extent part 0 ----- */ \ - __ext, EXTPARA(__ext), \ - /* ----- part 1 ----- */ \ - atomic_read(&__ext->oe_refc), \ - atomic_read(&__ext->oe_users), \ - list_empty_marker(&__ext->oe_link), \ - oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \ - __ext->oe_obj, \ - /* ----- part 2 ----- */ \ - __ext->oe_grants, __ext->oe_nr_pages, \ - list_empty_marker(&__ext->oe_pages), \ - waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \ - __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \ - /* ----- part 4 ----- */ \ - ## __VA_ARGS__); \ - if (lvl == D_ERROR && __ext->oe_dlmlock) \ - LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext); \ - else \ - LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext); \ -} while (0) - -#undef EASSERTF -#define EASSERTF(expr, ext, fmt, args...) do { \ - if (!(expr)) { \ - OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \ - osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \ - LASSERT(expr); \ - } \ -} while (0) - -#undef EASSERT -#define EASSERT(expr, ext) EASSERTF(expr, ext, "\n") - -static inline struct osc_extent *rb_extent(struct rb_node *n) -{ - return rb_entry_safe(n, struct osc_extent, oe_node); -} - -static inline struct osc_extent *next_extent(struct osc_extent *ext) -{ - if (!ext) - return NULL; - - LASSERT(ext->oe_intree); - return rb_extent(rb_next(&ext->oe_node)); -} - -static inline struct osc_extent *prev_extent(struct osc_extent *ext) -{ - if (!ext) - return NULL; - - LASSERT(ext->oe_intree); - return rb_extent(rb_prev(&ext->oe_node)); -} - -static inline struct osc_extent *first_extent(struct osc_object *obj) -{ - return rb_extent(rb_first(&obj->oo_root)); -} - -/* object must be locked by caller. */ -static int osc_extent_sanity_check0(struct osc_extent *ext, - const char *func, const int line) -{ - struct osc_object *obj = ext->oe_obj; - struct osc_async_page *oap; - size_t page_count; - int rc = 0; - - if (!osc_object_is_locked(obj)) { - rc = 9; - goto out; - } - - if (ext->oe_state >= OES_STATE_MAX) { - rc = 10; - goto out; - } - - if (atomic_read(&ext->oe_refc) <= 0) { - rc = 20; - goto out; - } - - if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) { - rc = 30; - goto out; - } - - switch (ext->oe_state) { - case OES_INV: - if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages)) - rc = 35; - else - rc = 0; - goto out; - case OES_ACTIVE: - if (atomic_read(&ext->oe_users) == 0) { - rc = 40; - goto out; - } - if (ext->oe_hp) { - rc = 50; - goto out; - } - if (ext->oe_fsync_wait && !ext->oe_urgent) { - rc = 55; - goto out; - } - break; - case OES_CACHE: - if (ext->oe_grants == 0) { - rc = 60; - goto out; - } - if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) { - rc = 65; - goto out; - } - /* fall through */ - default: - if (atomic_read(&ext->oe_users) > 0) { - rc = 70; - goto out; - } - } - - if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) { - rc = 80; - goto out; - } - - if (ext->oe_sync && ext->oe_grants > 0) { - rc = 90; - goto out; - } - - if (ext->oe_dlmlock && !ldlm_is_failed(ext->oe_dlmlock)) { - struct ldlm_extent *extent; - - extent = &ext->oe_dlmlock->l_policy_data.l_extent; - if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) && - extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end))) { - rc = 100; - goto out; - } - - if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))) { - rc = 102; - goto out; - } - } - - if (ext->oe_nr_pages > ext->oe_mppr) { - rc = 105; - goto out; - } - - /* Do not verify page list if extent is in RPC. This is because an - * in-RPC extent is supposed to be exclusively accessible w/o lock. - */ - if (ext->oe_state > OES_CACHE) { - rc = 0; - goto out; - } - - if (!extent_debug) { - rc = 0; - goto out; - } - - page_count = 0; - list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { - pgoff_t index = osc_index(oap2osc(oap)); - ++page_count; - if (index > ext->oe_end || index < ext->oe_start) { - rc = 110; - goto out; - } - } - if (page_count != ext->oe_nr_pages) { - rc = 120; - goto out; - } - -out: - if (rc != 0) - OSC_EXTENT_DUMP(D_ERROR, ext, - "%s:%d sanity check %p failed with rc = %d\n", - func, line, ext, rc); - return rc; -} - -#define sanity_check_nolock(ext) \ - osc_extent_sanity_check0(ext, __func__, __LINE__) - -#define sanity_check(ext) ({ \ - int __res; \ - osc_object_lock((ext)->oe_obj); \ - __res = sanity_check_nolock(ext); \ - osc_object_unlock((ext)->oe_obj); \ - __res; \ -}) - -/** - * sanity check - to make sure there is no overlapped extent in the tree. - */ -static int osc_extent_is_overlapped(struct osc_object *obj, - struct osc_extent *ext) -{ - struct osc_extent *tmp; - - LASSERT(osc_object_is_locked(obj)); - - if (!extent_debug) - return 0; - - for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) { - if (tmp == ext) - continue; - if (tmp->oe_end >= ext->oe_start && - tmp->oe_start <= ext->oe_end) - return 1; - } - return 0; -} - -static void osc_extent_state_set(struct osc_extent *ext, int state) -{ - LASSERT(osc_object_is_locked(ext->oe_obj)); - LASSERT(state >= OES_INV && state < OES_STATE_MAX); - - /* Never try to sanity check a state changing extent :-) */ - /* LASSERT(sanity_check_nolock(ext) == 0); */ - - /* TODO: validate the state machine */ - ext->oe_state = state; - wake_up_all(&ext->oe_waitq); -} - -static struct osc_extent *osc_extent_alloc(struct osc_object *obj) -{ - struct osc_extent *ext; - - ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS); - if (!ext) - return NULL; - - RB_CLEAR_NODE(&ext->oe_node); - ext->oe_obj = obj; - cl_object_get(osc2cl(obj)); - atomic_set(&ext->oe_refc, 1); - atomic_set(&ext->oe_users, 0); - INIT_LIST_HEAD(&ext->oe_link); - ext->oe_state = OES_INV; - INIT_LIST_HEAD(&ext->oe_pages); - init_waitqueue_head(&ext->oe_waitq); - ext->oe_dlmlock = NULL; - - return ext; -} - -static void osc_extent_free(struct osc_extent *ext) -{ - kmem_cache_free(osc_extent_kmem, ext); -} - -static struct osc_extent *osc_extent_get(struct osc_extent *ext) -{ - LASSERT(atomic_read(&ext->oe_refc) >= 0); - atomic_inc(&ext->oe_refc); - return ext; -} - -static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext) -{ - LASSERT(atomic_read(&ext->oe_refc) > 0); - if (atomic_dec_and_test(&ext->oe_refc)) { - LASSERT(list_empty(&ext->oe_link)); - LASSERT(atomic_read(&ext->oe_users) == 0); - LASSERT(ext->oe_state == OES_INV); - LASSERT(!ext->oe_intree); - - if (ext->oe_dlmlock) { - lu_ref_add(&ext->oe_dlmlock->l_reference, - "osc_extent", ext); - LDLM_LOCK_PUT(ext->oe_dlmlock); - ext->oe_dlmlock = NULL; - } - cl_object_put(env, osc2cl(ext->oe_obj)); - osc_extent_free(ext); - } -} - -/** - * osc_extent_put_trust() is a special version of osc_extent_put() when - * it's known that the caller is not the last user. This is to address the - * problem of lacking of lu_env ;-). - */ -static void osc_extent_put_trust(struct osc_extent *ext) -{ - LASSERT(atomic_read(&ext->oe_refc) > 1); - LASSERT(osc_object_is_locked(ext->oe_obj)); - atomic_dec(&ext->oe_refc); -} - -/** - * Return the extent which includes pgoff @index, or return the greatest - * previous extent in the tree. - */ -static struct osc_extent *osc_extent_search(struct osc_object *obj, - pgoff_t index) -{ - struct rb_node *n = obj->oo_root.rb_node; - struct osc_extent *tmp, *p = NULL; - - LASSERT(osc_object_is_locked(obj)); - while (n) { - tmp = rb_extent(n); - if (index < tmp->oe_start) { - n = n->rb_left; - } else if (index > tmp->oe_end) { - p = rb_extent(n); - n = n->rb_right; - } else { - return tmp; - } - } - return p; -} - -/* - * Return the extent covering @index, otherwise return NULL. - * caller must have held object lock. - */ -static struct osc_extent *osc_extent_lookup(struct osc_object *obj, - pgoff_t index) -{ - struct osc_extent *ext; - - ext = osc_extent_search(obj, index); - if (ext && ext->oe_start <= index && index <= ext->oe_end) - return osc_extent_get(ext); - return NULL; -} - -/* caller must have held object lock. */ -static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) -{ - struct rb_node **n = &obj->oo_root.rb_node; - struct rb_node *parent = NULL; - struct osc_extent *tmp; - - LASSERT(ext->oe_intree == 0); - LASSERT(ext->oe_obj == obj); - LASSERT(osc_object_is_locked(obj)); - while (*n) { - tmp = rb_extent(*n); - parent = *n; - - if (ext->oe_end < tmp->oe_start) - n = &(*n)->rb_left; - else if (ext->oe_start > tmp->oe_end) - n = &(*n)->rb_right; - else - EASSERTF(0, tmp, EXTSTR "\n", EXTPARA(ext)); - } - rb_link_node(&ext->oe_node, parent, n); - rb_insert_color(&ext->oe_node, &obj->oo_root); - osc_extent_get(ext); - ext->oe_intree = 1; -} - -/* caller must have held object lock. */ -static void osc_extent_erase(struct osc_extent *ext) -{ - struct osc_object *obj = ext->oe_obj; - - LASSERT(osc_object_is_locked(obj)); - if (ext->oe_intree) { - rb_erase(&ext->oe_node, &obj->oo_root); - ext->oe_intree = 0; - /* rbtree held a refcount */ - osc_extent_put_trust(ext); - } -} - -static struct osc_extent *osc_extent_hold(struct osc_extent *ext) -{ - struct osc_object *obj = ext->oe_obj; - - LASSERT(osc_object_is_locked(obj)); - LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE); - if (ext->oe_state == OES_CACHE) { - osc_extent_state_set(ext, OES_ACTIVE); - osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages); - } - atomic_inc(&ext->oe_users); - list_del_init(&ext->oe_link); - return osc_extent_get(ext); -} - -static void __osc_extent_remove(struct osc_extent *ext) -{ - LASSERT(osc_object_is_locked(ext->oe_obj)); - LASSERT(list_empty(&ext->oe_pages)); - osc_extent_erase(ext); - list_del_init(&ext->oe_link); - osc_extent_state_set(ext, OES_INV); - OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n"); -} - -static void osc_extent_remove(struct osc_extent *ext) -{ - struct osc_object *obj = ext->oe_obj; - - osc_object_lock(obj); - __osc_extent_remove(ext); - osc_object_unlock(obj); -} - -/** - * This function is used to merge extents to get better performance. It checks - * if @cur and @victim are contiguous at chunk level. - */ -static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, - struct osc_extent *victim) -{ - struct osc_object *obj = cur->oe_obj; - pgoff_t chunk_start; - pgoff_t chunk_end; - int ppc_bits; - - LASSERT(cur->oe_state == OES_CACHE); - LASSERT(osc_object_is_locked(obj)); - if (!victim) - return -EINVAL; - - if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait) - return -EBUSY; - - if (cur->oe_max_end != victim->oe_max_end) - return -ERANGE; - - LASSERT(cur->oe_dlmlock == victim->oe_dlmlock); - ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT; - chunk_start = cur->oe_start >> ppc_bits; - chunk_end = cur->oe_end >> ppc_bits; - if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && - chunk_end + 1 != victim->oe_start >> ppc_bits) - return -ERANGE; - - OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur); - - cur->oe_start = min(cur->oe_start, victim->oe_start); - cur->oe_end = max(cur->oe_end, victim->oe_end); - cur->oe_grants += victim->oe_grants; - cur->oe_nr_pages += victim->oe_nr_pages; - /* only the following bits are needed to merge */ - cur->oe_urgent |= victim->oe_urgent; - cur->oe_memalloc |= victim->oe_memalloc; - list_splice_init(&victim->oe_pages, &cur->oe_pages); - list_del_init(&victim->oe_link); - victim->oe_nr_pages = 0; - - osc_extent_get(victim); - __osc_extent_remove(victim); - osc_extent_put(env, victim); - - OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim); - return 0; -} - -/** - * Drop user count of osc_extent, and unplug IO asynchronously. - */ -void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) -{ - struct osc_object *obj = ext->oe_obj; - - LASSERT(atomic_read(&ext->oe_users) > 0); - LASSERT(sanity_check(ext) == 0); - LASSERT(ext->oe_grants > 0); - - if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) { - LASSERT(ext->oe_state == OES_ACTIVE); - if (ext->oe_trunc_pending) { - /* a truncate process is waiting for this extent. - * This may happen due to a race, check - * osc_cache_truncate_start(). - */ - osc_extent_state_set(ext, OES_TRUNC); - ext->oe_trunc_pending = 0; - } else { - osc_extent_state_set(ext, OES_CACHE); - osc_update_pending(obj, OBD_BRW_WRITE, - ext->oe_nr_pages); - - /* try to merge the previous and next extent. */ - osc_extent_merge(env, ext, prev_extent(ext)); - osc_extent_merge(env, ext, next_extent(ext)); - - if (ext->oe_urgent) - list_move_tail(&ext->oe_link, - &obj->oo_urgent_exts); - } - osc_object_unlock(obj); - - osc_io_unplug_async(env, osc_cli(obj), obj); - } - osc_extent_put(env, ext); -} - -static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2) -{ - return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start); -} - -/** - * Find or create an extent which includes @index, core function to manage - * extent tree. - */ -static struct osc_extent *osc_extent_find(const struct lu_env *env, - struct osc_object *obj, pgoff_t index, - unsigned int *grants) -{ - struct client_obd *cli = osc_cli(obj); - struct osc_lock *olck; - struct cl_lock_descr *descr; - struct osc_extent *cur; - struct osc_extent *ext; - struct osc_extent *conflict = NULL; - struct osc_extent *found = NULL; - pgoff_t chunk; - pgoff_t max_end; - unsigned int max_pages; /* max_pages_per_rpc */ - unsigned int chunksize; - int ppc_bits; /* pages per chunk bits */ - pgoff_t chunk_mask; - int rc; - - cur = osc_extent_alloc(obj); - if (!cur) - return ERR_PTR(-ENOMEM); - - olck = osc_env_io(env)->oi_write_osclock; - LASSERTF(olck, "page %lu is not covered by lock\n", index); - LASSERT(olck->ols_state == OLS_GRANTED); - - descr = &olck->ols_cl.cls_lock->cll_descr; - LASSERT(descr->cld_mode >= CLM_WRITE); - - LASSERT(cli->cl_chunkbits >= PAGE_SHIFT); - ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; - chunk_mask = ~((1 << ppc_bits) - 1); - chunksize = 1 << cli->cl_chunkbits; - chunk = index >> ppc_bits; - - /* align end to rpc edge, rpc size may not be a power 2 integer. */ - max_pages = cli->cl_max_pages_per_rpc; - LASSERT((max_pages & ~chunk_mask) == 0); - max_end = index - (index % max_pages) + max_pages - 1; - max_end = min_t(pgoff_t, max_end, descr->cld_end); - - /* initialize new extent by parameters so far */ - cur->oe_max_end = max_end; - cur->oe_start = index & chunk_mask; - cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1; - if (cur->oe_start < descr->cld_start) - cur->oe_start = descr->cld_start; - if (cur->oe_end > max_end) - cur->oe_end = max_end; - cur->oe_grants = 0; - cur->oe_mppr = max_pages; - if (olck->ols_dlmlock) { - LASSERT(olck->ols_hold); - cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock); - lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur); - } - - /* grants has been allocated by caller */ - LASSERTF(*grants >= chunksize + cli->cl_extent_tax, - "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax); - LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR "\n", - EXTPARA(cur)); - -restart: - osc_object_lock(obj); - ext = osc_extent_search(obj, cur->oe_start); - if (!ext) - ext = first_extent(obj); - while (ext) { - pgoff_t ext_chk_start = ext->oe_start >> ppc_bits; - pgoff_t ext_chk_end = ext->oe_end >> ppc_bits; - - LASSERT(sanity_check_nolock(ext) == 0); - if (chunk > ext_chk_end + 1) - break; - - /* if covering by different locks, no chance to match */ - if (olck->ols_dlmlock != ext->oe_dlmlock) { - EASSERTF(!overlapped(ext, cur), ext, - EXTSTR "\n", EXTPARA(cur)); - - ext = next_extent(ext); - continue; - } - - /* discontiguous chunks? */ - if (chunk + 1 < ext_chk_start) { - ext = next_extent(ext); - continue; - } - - /* ok, from now on, ext and cur have these attrs: - * 1. covered by the same lock - * 2. contiguous at chunk level or overlapping. - */ - - if (overlapped(ext, cur)) { - /* cur is the minimum unit, so overlapping means - * full contain. - */ - EASSERTF((ext->oe_start <= cur->oe_start && - ext->oe_end >= cur->oe_end), - ext, EXTSTR "\n", EXTPARA(cur)); - - if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) { - /* for simplicity, we wait for this extent to - * finish before going forward. - */ - conflict = osc_extent_get(ext); - break; - } - - found = osc_extent_hold(ext); - break; - } - - /* non-overlapped extent */ - if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) { - /* we can't do anything for a non OES_CACHE extent, or - * if there is someone waiting for this extent to be - * flushed, try next one. - */ - ext = next_extent(ext); - continue; - } - - /* check if they belong to the same rpc slot before trying to - * merge. the extents are not overlapped and contiguous at - * chunk level to get here. - */ - if (ext->oe_max_end != max_end) { - /* if they don't belong to the same RPC slot or - * max_pages_per_rpc has ever changed, do not merge. - */ - ext = next_extent(ext); - continue; - } - - /* it's required that an extent must be contiguous at chunk - * level so that we know the whole extent is covered by grant - * (the pages in the extent are NOT required to be contiguous). - * Otherwise, it will be too much difficult to know which - * chunks have grants allocated. - */ - - /* try to do front merge - extend ext's start */ - if (chunk + 1 == ext_chk_start) { - /* ext must be chunk size aligned */ - EASSERT((ext->oe_start & ~chunk_mask) == 0, ext); - - /* pull ext's start back to cover cur */ - ext->oe_start = cur->oe_start; - ext->oe_grants += chunksize; - LASSERT(*grants >= chunksize); - *grants -= chunksize; - - found = osc_extent_hold(ext); - } else if (chunk == ext_chk_end + 1) { - /* rear merge */ - ext->oe_end = cur->oe_end; - ext->oe_grants += chunksize; - LASSERT(*grants >= chunksize); - *grants -= chunksize; - - /* try to merge with the next one because we just fill - * in a gap - */ - if (osc_extent_merge(env, ext, next_extent(ext)) == 0) - /* we can save extent tax from next extent */ - *grants += cli->cl_extent_tax; - - found = osc_extent_hold(ext); - } - if (found) - break; - - ext = next_extent(ext); - } - - osc_extent_tree_dump(D_CACHE, obj); - if (found) { - LASSERT(!conflict); - if (!IS_ERR(found)) { - LASSERT(found->oe_dlmlock == cur->oe_dlmlock); - OSC_EXTENT_DUMP(D_CACHE, found, - "found caching ext for %lu.\n", index); - } - } else if (!conflict) { - /* create a new extent */ - EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); - cur->oe_grants = chunksize + cli->cl_extent_tax; - LASSERT(*grants >= cur->oe_grants); - *grants -= cur->oe_grants; - - cur->oe_state = OES_CACHE; - found = osc_extent_hold(cur); - osc_extent_insert(obj, cur); - OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n", - index, descr->cld_end); - } - osc_object_unlock(obj); - - if (conflict) { - LASSERT(!found); - - /* waiting for IO to finish. Please notice that it's impossible - * to be an OES_TRUNC extent. - */ - rc = osc_extent_wait(env, conflict, OES_INV); - osc_extent_put(env, conflict); - conflict = NULL; - if (rc < 0) { - found = ERR_PTR(rc); - goto out; - } - - goto restart; - } - -out: - osc_extent_put(env, cur); - return found; -} - -/** - * Called when IO is finished to an extent. - */ -int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, - int sent, int rc) -{ - struct client_obd *cli = osc_cli(ext->oe_obj); - struct osc_async_page *oap; - struct osc_async_page *tmp; - int nr_pages = ext->oe_nr_pages; - int lost_grant = 0; - int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096; - __u64 last_off = 0; - int last_count = -1; - - OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n"); - - ext->oe_rc = rc ?: ext->oe_nr_pages; - EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext); - - osc_lru_add_batch(cli, &ext->oe_pages); - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { - list_del_init(&oap->oap_rpc_item); - list_del_init(&oap->oap_pending_item); - if (last_off <= oap->oap_obj_off) { - last_off = oap->oap_obj_off; - last_count = oap->oap_count; - } - - --ext->oe_nr_pages; - osc_ap_completion(env, cli, oap, sent, rc); - } - EASSERT(ext->oe_nr_pages == 0, ext); - - if (!sent) { - lost_grant = ext->oe_grants; - } else if (blocksize < PAGE_SIZE && - last_count != PAGE_SIZE) { - /* For short writes we shouldn't count parts of pages that - * span a whole chunk on the OST side, or our accounting goes - * wrong. Should match the code in filter_grant_check. - */ - int offset = last_off & ~PAGE_MASK; - int count = last_count + (offset & (blocksize - 1)); - int end = (offset + last_count) & (blocksize - 1); - - if (end) - count += blocksize - end; - - lost_grant = PAGE_SIZE - count; - } - if (ext->oe_grants > 0) - osc_free_grant(cli, nr_pages, lost_grant); - - osc_extent_remove(ext); - /* put the refcount for RPC */ - osc_extent_put(env, ext); - return 0; -} - -static int extent_wait_cb(struct osc_extent *ext, enum osc_extent_state state) -{ - int ret; - - osc_object_lock(ext->oe_obj); - ret = ext->oe_state == state; - osc_object_unlock(ext->oe_obj); - - return ret; -} - -/** - * Wait for the extent's state to become @state. - */ -static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, - enum osc_extent_state state) -{ - struct osc_object *obj = ext->oe_obj; - int rc = 0; - - osc_object_lock(obj); - LASSERT(sanity_check_nolock(ext) == 0); - /* `Kick' this extent only if the caller is waiting for it to be - * written out. - */ - if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp && - !ext->oe_trunc_pending) { - if (ext->oe_state == OES_ACTIVE) { - ext->oe_urgent = 1; - } else if (ext->oe_state == OES_CACHE) { - ext->oe_urgent = 1; - osc_extent_hold(ext); - rc = 1; - } - } - osc_object_unlock(obj); - if (rc == 1) - osc_extent_release(env, ext); - - /* wait for the extent until its state becomes @state */ - rc = wait_event_idle_timeout(ext->oe_waitq, - extent_wait_cb(ext, state), 600 * HZ); - if (rc == 0) { - OSC_EXTENT_DUMP(D_ERROR, ext, - "%s: wait ext to %u timedout, recovery in progress?\n", - cli_name(osc_cli(obj)), state); - - wait_event_idle(ext->oe_waitq, extent_wait_cb(ext, state)); - } - if (ext->oe_rc < 0) - rc = ext->oe_rc; - else - rc = 0; - return rc; -} - -/** - * Discard pages with index greater than @size. If @ext is overlapped with - * @size, then partial truncate happens. - */ -static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, - bool partial) -{ - struct lu_env *env; - struct cl_io *io; - struct osc_object *obj = ext->oe_obj; - struct client_obd *cli = osc_cli(obj); - struct osc_async_page *oap; - struct osc_async_page *tmp; - int pages_in_chunk = 0; - int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; - __u64 trunc_chunk = trunc_index >> ppc_bits; - int grants = 0; - int nr_pages = 0; - int rc = 0; - u16 refcheck; - - LASSERT(sanity_check(ext) == 0); - EASSERT(ext->oe_state == OES_TRUNC, ext); - EASSERT(!ext->oe_urgent, ext); - - /* Request new lu_env. - * We can't use that env from osc_cache_truncate_start() because - * it's from lov_io_sub and not fully initialized. - */ - env = cl_env_get(&refcheck); - io = &osc_env_info(env)->oti_io; - io->ci_obj = cl_object_top(osc2cl(obj)); - io->ci_ignore_layout = 1; - rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); - if (rc < 0) - goto out; - - /* discard all pages with index greater then trunc_index */ - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { - pgoff_t index = osc_index(oap2osc(oap)); - struct cl_page *page = oap2cl_page(oap); - - LASSERT(list_empty(&oap->oap_rpc_item)); - - /* only discard the pages with their index greater than - * trunc_index, and ... - */ - if (index < trunc_index || - (index == trunc_index && partial)) { - /* accounting how many pages remaining in the chunk - * so that we can calculate grants correctly. */ - if (index >> ppc_bits == trunc_chunk) - ++pages_in_chunk; - continue; - } - - list_del_init(&oap->oap_pending_item); - - cl_page_get(page); - lu_ref_add(&page->cp_reference, "truncate", current); - - if (cl_page_own(env, io, page) == 0) { - cl_page_discard(env, io, page); - cl_page_disown(env, io, page); - } else { - LASSERT(page->cp_state == CPS_FREEING); - LASSERT(0); - } - - lu_ref_del(&page->cp_reference, "truncate", current); - cl_page_put(env, page); - - --ext->oe_nr_pages; - ++nr_pages; - } - EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial, - ext->oe_nr_pages == 0), - ext, "trunc_index %lu, partial %d\n", trunc_index, partial); - - osc_object_lock(obj); - if (ext->oe_nr_pages == 0) { - LASSERT(pages_in_chunk == 0); - grants = ext->oe_grants; - ext->oe_grants = 0; - } else { /* calculate how many grants we can free */ - int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk; - pgoff_t last_index; - - /* if there is no pages in this chunk, we can also free grants - * for the last chunk - */ - if (pages_in_chunk == 0) { - /* if this is the 1st chunk and no pages in this chunk, - * ext->oe_nr_pages must be zero, so we should be in - * the other if-clause. - */ - LASSERT(trunc_chunk > 0); - --trunc_chunk; - ++chunks; - } - - /* this is what we can free from this extent */ - grants = chunks << cli->cl_chunkbits; - ext->oe_grants -= grants; - last_index = ((trunc_chunk + 1) << ppc_bits) - 1; - ext->oe_end = min(last_index, ext->oe_max_end); - LASSERT(ext->oe_end >= ext->oe_start); - LASSERT(ext->oe_grants > 0); - } - osc_object_unlock(obj); - - if (grants > 0 || nr_pages > 0) - osc_free_grant(cli, nr_pages, grants); - -out: - cl_io_fini(env, io); - cl_env_put(env, &refcheck); - return rc; -} - -/** - * This function is used to make the extent prepared for transfer. - * A race with flushing page - ll_writepage() has to be handled cautiously. - */ -static int osc_extent_make_ready(const struct lu_env *env, - struct osc_extent *ext) -{ - struct osc_async_page *oap; - struct osc_async_page *last = NULL; - struct osc_object *obj = ext->oe_obj; - unsigned int page_count = 0; - int rc; - - /* we're going to grab page lock, so object lock must not be taken. */ - LASSERT(sanity_check(ext) == 0); - /* in locking state, any process should not touch this extent. */ - EASSERT(ext->oe_state == OES_LOCKING, ext); - EASSERT(ext->oe_owner, ext); - - OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n"); - - list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { - ++page_count; - if (!last || last->oap_obj_off < oap->oap_obj_off) - last = oap; - - /* checking ASYNC_READY is race safe */ - if ((oap->oap_async_flags & ASYNC_READY) != 0) - continue; - - rc = osc_make_ready(env, oap, OBD_BRW_WRITE); - switch (rc) { - case 0: - spin_lock(&oap->oap_lock); - oap->oap_async_flags |= ASYNC_READY; - spin_unlock(&oap->oap_lock); - break; - case -EALREADY: - LASSERT((oap->oap_async_flags & ASYNC_READY) != 0); - break; - default: - LASSERTF(0, "unknown return code: %d\n", rc); - } - } - - LASSERT(page_count == ext->oe_nr_pages); - LASSERT(last); - /* the last page is the only one we need to refresh its count by - * the size of file. - */ - if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { - int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); - - LASSERT(last_oap_count > 0); - LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE); - last->oap_count = last_oap_count; - spin_lock(&last->oap_lock); - last->oap_async_flags |= ASYNC_COUNT_STABLE; - spin_unlock(&last->oap_lock); - } - - /* for the rest of pages, we don't need to call osf_refresh_count() - * because it's known they are not the last page - */ - list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { - if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { - oap->oap_count = PAGE_SIZE - oap->oap_page_off; - spin_lock(&last->oap_lock); - oap->oap_async_flags |= ASYNC_COUNT_STABLE; - spin_unlock(&last->oap_lock); - } - } - - osc_object_lock(obj); - osc_extent_state_set(ext, OES_RPC); - osc_object_unlock(obj); - /* get a refcount for RPC. */ - osc_extent_get(ext); - - return 0; -} - -/** - * Quick and simple version of osc_extent_find(). This function is frequently - * called to expand the extent for the same IO. To expand the extent, the - * page index must be in the same or next chunk of ext->oe_end. - */ -static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, - unsigned int *grants) -{ - struct osc_object *obj = ext->oe_obj; - struct client_obd *cli = osc_cli(obj); - struct osc_extent *next; - int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; - pgoff_t chunk = index >> ppc_bits; - pgoff_t end_chunk; - pgoff_t end_index; - unsigned int chunksize = 1 << cli->cl_chunkbits; - int rc = 0; - - LASSERT(ext->oe_max_end >= index && ext->oe_start <= index); - osc_object_lock(obj); - LASSERT(sanity_check_nolock(ext) == 0); - end_chunk = ext->oe_end >> ppc_bits; - if (chunk > end_chunk + 1) { - rc = -ERANGE; - goto out; - } - - if (end_chunk >= chunk) { - rc = 0; - goto out; - } - - LASSERT(end_chunk + 1 == chunk); - /* try to expand this extent to cover @index */ - end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1); - - next = next_extent(ext); - if (next && next->oe_start <= end_index) { - /* complex mode - overlapped with the next extent, - * this case will be handled by osc_extent_find() - */ - rc = -EAGAIN; - goto out; - } - - ext->oe_end = end_index; - ext->oe_grants += chunksize; - LASSERT(*grants >= chunksize); - *grants -= chunksize; - EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext, - "overlapped after expanding for %lu.\n", index); - -out: - osc_object_unlock(obj); - return rc; -} - -static void osc_extent_tree_dump0(int level, struct osc_object *obj, - const char *func, int line) -{ - struct osc_extent *ext; - int cnt; - - CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n", - obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc); - - /* osc_object_lock(obj); */ - cnt = 1; - for (ext = first_extent(obj); ext; ext = next_extent(ext)) - OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++); - - cnt = 1; - list_for_each_entry(ext, &obj->oo_hp_exts, oe_link) - OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++); - - cnt = 1; - list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link) - OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++); - - cnt = 1; - list_for_each_entry(ext, &obj->oo_reading_exts, oe_link) - OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++); - /* osc_object_unlock(obj); */ -} - -/* ------------------ osc extent end ------------------ */ - -static inline int osc_is_ready(struct osc_object *osc) -{ - return !list_empty(&osc->oo_ready_item) || - !list_empty(&osc->oo_hp_ready_item); -} - -#define OSC_IO_DEBUG(OSC, STR, args...) \ - CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \ - (OSC), osc_is_ready(OSC), \ - list_empty_marker(&(OSC)->oo_hp_ready_item), \ - list_empty_marker(&(OSC)->oo_ready_item), \ - atomic_read(&(OSC)->oo_nr_writes), \ - list_empty_marker(&(OSC)->oo_hp_exts), \ - list_empty_marker(&(OSC)->oo_urgent_exts), \ - atomic_read(&(OSC)->oo_nr_reads), \ - list_empty_marker(&(OSC)->oo_reading_exts), \ - ##args) - -static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap, - int cmd) -{ - struct osc_page *opg = oap2osc_page(oap); - struct cl_page *page = oap2cl_page(oap); - int result; - - LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */ - - result = cl_page_make_ready(env, page, CRT_WRITE); - if (result == 0) - opg->ops_submit_time = jiffies; - return result; -} - -static int osc_refresh_count(const struct lu_env *env, - struct osc_async_page *oap, int cmd) -{ - struct osc_page *opg = oap2osc_page(oap); - pgoff_t index = osc_index(oap2osc(oap)); - struct cl_object *obj; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - - int result; - loff_t kms; - - /* readpage queues with _COUNT_STABLE, shouldn't get here. */ - LASSERT(!(cmd & OBD_BRW_READ)); - obj = opg->ops_cl.cpl_obj; - - cl_object_attr_lock(obj); - result = cl_object_attr_get(env, obj, attr); - cl_object_attr_unlock(obj); - if (result < 0) - return result; - kms = attr->cat_kms; - if (cl_offset(obj, index) >= kms) - /* catch race with truncate */ - return 0; - else if (cl_offset(obj, index + 1) > kms) - /* catch sub-page write at end of file */ - return kms % PAGE_SIZE; - else - return PAGE_SIZE; -} - -static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, - int cmd, int rc) -{ - struct osc_page *opg = oap2osc_page(oap); - struct cl_page *page = oap2cl_page(oap); - enum cl_req_type crt; - int srvlock; - - cmd &= ~OBD_BRW_NOQUOTA; - LASSERTF(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ), - "cp_state:%u, cmd:%d\n", page->cp_state, cmd); - LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE), - "cp_state:%u, cmd:%d\n", page->cp_state, cmd); - LASSERT(opg->ops_transfer_pinned); - - crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; - /* Clear opg->ops_transfer_pinned before VM lock is released. */ - opg->ops_transfer_pinned = 0; - - opg->ops_submit_time = 0; - srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK; - - /* statistic */ - if (rc == 0 && srvlock) { - struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev; - struct osc_stats *stats = &lu2osc_dev(ld)->od_stats; - size_t bytes = oap->oap_count; - - if (crt == CRT_READ) - stats->os_lockless_reads += bytes; - else - stats->os_lockless_writes += bytes; - } - - /* - * This has to be the last operation with the page, as locks are - * released in cl_page_completion() and nothing except for the - * reference counter protects page from concurrent reclaim. - */ - lu_ref_del(&page->cp_reference, "transfer", page); - - cl_page_completion(env, page, crt, rc); - cl_page_put(env, page); - - return 0; -} - -#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \ - struct client_obd *__tmp = (cli); \ - CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \ - "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \ - "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \ - cli_name(__tmp), \ - __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \ - atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \ - __tmp->cl_lost_grant, __tmp->cl_avail_grant, \ - __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \ - atomic_long_read(&__tmp->cl_lru_in_list), \ - atomic_long_read(&__tmp->cl_lru_busy), \ - atomic_read(&__tmp->cl_lru_shrinkers), ##args); \ -} while (0) - -/* caller must hold loi_list_lock */ -static void osc_consume_write_grant(struct client_obd *cli, - struct brw_page *pga) -{ - assert_spin_locked(&cli->cl_loi_list_lock); - LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); - atomic_long_inc(&obd_dirty_pages); - cli->cl_dirty_pages++; - pga->flag |= OBD_BRW_FROM_GRANT; - CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", - PAGE_SIZE, pga, pga->pg); - osc_update_next_shrink(cli); -} - -/* the companion to osc_consume_write_grant, called when a brw has completed. - * must be called with the loi lock held. - */ -static void osc_release_write_grant(struct client_obd *cli, - struct brw_page *pga) -{ - assert_spin_locked(&cli->cl_loi_list_lock); - if (!(pga->flag & OBD_BRW_FROM_GRANT)) - return; - - pga->flag &= ~OBD_BRW_FROM_GRANT; - atomic_long_dec(&obd_dirty_pages); - cli->cl_dirty_pages--; - if (pga->flag & OBD_BRW_NOCACHE) { - pga->flag &= ~OBD_BRW_NOCACHE; - atomic_long_dec(&obd_dirty_transit_pages); - cli->cl_dirty_transit--; - } -} - -/** - * To avoid sleeping with object lock held, it's good for us allocate enough - * grants before entering into critical section. - * - * spin_lock held by caller - */ -static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes) -{ - int rc = -EDQUOT; - - if (cli->cl_avail_grant >= bytes) { - cli->cl_avail_grant -= bytes; - cli->cl_reserved_grant += bytes; - rc = 0; - } - return rc; -} - -static void __osc_unreserve_grant(struct client_obd *cli, - unsigned int reserved, unsigned int unused) -{ - /* it's quite normal for us to get more grant than reserved. - * Thinking about a case that two extents merged by adding a new - * chunk, we can save one extent tax. If extent tax is greater than - * one chunk, we can save more grant by adding a new chunk - */ - cli->cl_reserved_grant -= reserved; - if (unused > reserved) { - cli->cl_avail_grant += reserved; - cli->cl_lost_grant += unused - reserved; - } else { - cli->cl_avail_grant += unused; - } -} - -static void osc_unreserve_grant(struct client_obd *cli, - unsigned int reserved, unsigned int unused) -{ - spin_lock(&cli->cl_loi_list_lock); - __osc_unreserve_grant(cli, reserved, unused); - if (unused > 0) - osc_wake_cache_waiters(cli); - spin_unlock(&cli->cl_loi_list_lock); -} - -/** - * Free grant after IO is finished or canceled. - * - * @lost_grant is used to remember how many grants we have allocated but not - * used, we should return these grants to OST. There're two cases where grants - * can be lost: - * 1. truncate; - * 2. blocksize at OST is less than PAGE_SIZE and a partial page was - * written. In this case OST may use less chunks to serve this partial - * write. OSTs don't actually know the page size on the client side. so - * clients have to calculate lost grant by the blocksize on the OST. - * See filter_grant_check() for details. - */ -static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, - unsigned int lost_grant) -{ - unsigned long grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; - - spin_lock(&cli->cl_loi_list_lock); - atomic_long_sub(nr_pages, &obd_dirty_pages); - cli->cl_dirty_pages -= nr_pages; - cli->cl_lost_grant += lost_grant; - if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { - /* borrow some grant from truncate to avoid the case that - * truncate uses up all avail grant - */ - cli->cl_lost_grant -= grant; - cli->cl_avail_grant += grant; - } - osc_wake_cache_waiters(cli); - spin_unlock(&cli->cl_loi_list_lock); - CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n", - lost_grant, cli->cl_lost_grant, - cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT); -} - -/** - * The companion to osc_enter_cache(), called when @oap is no longer part of - * the dirty accounting due to error. - */ -static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap) -{ - spin_lock(&cli->cl_loi_list_lock); - osc_release_write_grant(cli, &oap->oap_brw_page); - spin_unlock(&cli->cl_loi_list_lock); -} - -/** - * Non-blocking version of osc_enter_cache() that consumes grant only when it - * is available. - */ -static int osc_enter_cache_try(struct client_obd *cli, - struct osc_async_page *oap, - int bytes, int transient) -{ - int rc; - - OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes); - - rc = osc_reserve_grant(cli, bytes); - if (rc < 0) - return 0; - - if (cli->cl_dirty_pages < cli->cl_dirty_max_pages && - atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { - osc_consume_write_grant(cli, &oap->oap_brw_page); - if (transient) { - cli->cl_dirty_transit++; - atomic_long_inc(&obd_dirty_transit_pages); - oap->oap_brw_flags |= OBD_BRW_NOCACHE; - } - rc = 1; - } else { - __osc_unreserve_grant(cli, bytes, bytes); - rc = 0; - } - return rc; -} - -static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw) -{ - int rc; - - spin_lock(&cli->cl_loi_list_lock); - rc = list_empty(&ocw->ocw_entry); - spin_unlock(&cli->cl_loi_list_lock); - return rc; -} - -/** - * The main entry to reserve dirty page accounting. Usually the grant reserved - * in this function will be freed in bulk in osc_free_grant() unless it fails - * to add osc cache, in that case, it will be freed in osc_exit_cache(). - * - * The process will be put into sleep if it's already run out of grant. - */ -static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, - struct osc_async_page *oap, int bytes) -{ - struct osc_object *osc = oap->oap_obj; - struct lov_oinfo *loi = osc->oo_oinfo; - struct osc_cache_waiter ocw; - unsigned long timeout = (AT_OFF ? obd_timeout : at_max) * HZ; - int rc = -EDQUOT; - - OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes); - - spin_lock(&cli->cl_loi_list_lock); - - /* force the caller to try sync io. this can jump the list - * of queued writes and create a discontiguous rpc stream - */ - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || - !cli->cl_dirty_max_pages || cli->cl_ar.ar_force_sync || - loi->loi_ar.ar_force_sync) { - OSC_DUMP_GRANT(D_CACHE, cli, "forced sync i/o\n"); - rc = -EDQUOT; - goto out; - } - - /* Hopefully normal case - cache space and write credits available */ - if (osc_enter_cache_try(cli, oap, bytes, 0)) { - OSC_DUMP_GRANT(D_CACHE, cli, "granted from cache\n"); - rc = 0; - goto out; - } - - /* We can get here for two reasons: too many dirty pages in cache, or - * run out of grants. In both cases we should write dirty pages out. - * Adding a cache waiter will trigger urgent write-out no matter what - * RPC size will be. - * The exiting condition is no avail grants and no dirty pages caching, - * that really means there is no space on the OST. - */ - init_waitqueue_head(&ocw.ocw_waitq); - ocw.ocw_oap = oap; - ocw.ocw_grant = bytes; - while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) { - list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters); - ocw.ocw_rc = 0; - spin_unlock(&cli->cl_loi_list_lock); - - osc_io_unplug_async(env, cli, NULL); - - CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n", - cli_name(cli), &ocw, oap); - - rc = wait_event_idle_timeout(ocw.ocw_waitq, - ocw_granted(cli, &ocw), timeout); - - spin_lock(&cli->cl_loi_list_lock); - - if (rc == 0) { - /* wait_event is interrupted by signal, or timed out */ - list_del_init(&ocw.ocw_entry); - rc = -ETIMEDOUT; - break; - } - LASSERT(list_empty(&ocw.ocw_entry)); - rc = ocw.ocw_rc; - - if (rc != -EDQUOT) - break; - if (osc_enter_cache_try(cli, oap, bytes, 0)) { - rc = 0; - break; - } - } - - switch (rc) { - case 0: - OSC_DUMP_GRANT(D_CACHE, cli, "finally got grant space\n"); - break; - case -ETIMEDOUT: - OSC_DUMP_GRANT(D_CACHE, cli, - "timeout, fall back to sync i/o\n"); - osc_extent_tree_dump(D_CACHE, osc); - /* fall back to synchronous I/O */ - rc = -EDQUOT; - break; - case -EINTR: - /* Ensures restartability - LU-3581 */ - OSC_DUMP_GRANT(D_CACHE, cli, "interrupted\n"); - rc = -ERESTARTSYS; - break; - case -EDQUOT: - OSC_DUMP_GRANT(D_CACHE, cli, - "no grant space, fall back to sync i/o\n"); - break; - default: - CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n", - cli_name(cli), &ocw, rc); - break; - } -out: - spin_unlock(&cli->cl_loi_list_lock); - return rc; -} - -/* caller must hold loi_list_lock */ -void osc_wake_cache_waiters(struct client_obd *cli) -{ - struct list_head *l, *tmp; - struct osc_cache_waiter *ocw; - - list_for_each_safe(l, tmp, &cli->cl_cache_waiters) { - ocw = list_entry(l, struct osc_cache_waiter, ocw_entry); - list_del_init(&ocw->ocw_entry); - - ocw->ocw_rc = -EDQUOT; - /* we can't dirty more */ - if ((cli->cl_dirty_pages > cli->cl_dirty_max_pages) || - (atomic_long_read(&obd_dirty_pages) + 1 > - obd_max_dirty_pages)) { - CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %ld\n", - cli->cl_dirty_pages, cli->cl_dirty_max_pages, - obd_max_dirty_pages); - goto wakeup; - } - - if (osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0)) - ocw->ocw_rc = 0; -wakeup: - CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n", - ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc); - - wake_up(&ocw->ocw_waitq); - } -} - -static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc) -{ - int hprpc = !!list_empty(&osc->oo_hp_exts); - - return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc; -} - -/* This maintains the lists of pending pages to read/write for a given object - * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint() - * to quickly find objects that are ready to send an RPC. - */ -static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, - int cmd) -{ - int invalid_import = 0; - - /* if we have an invalid import we want to drain the queued pages - * by forcing them through rpcs that immediately fail and complete - * the pages. recovery relies on this to empty the queued pages - * before canceling the locks and evicting down the llite pages - */ - if (!cli->cl_import || cli->cl_import->imp_invalid) - invalid_import = 1; - - if (cmd & OBD_BRW_WRITE) { - if (atomic_read(&osc->oo_nr_writes) == 0) - return 0; - if (invalid_import) { - CDEBUG(D_CACHE, "invalid import forcing RPC\n"); - return 1; - } - if (!list_empty(&osc->oo_hp_exts)) { - CDEBUG(D_CACHE, "high prio request forcing RPC\n"); - return 1; - } - if (!list_empty(&osc->oo_urgent_exts)) { - CDEBUG(D_CACHE, "urgent request forcing RPC\n"); - return 1; - } - /* trigger a write rpc stream as long as there are dirtiers - * waiting for space. as they're waiting, they're not going to - * create more pages to coalesce with what's waiting.. - */ - if (!list_empty(&cli->cl_cache_waiters)) { - CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); - return 1; - } - if (atomic_read(&osc->oo_nr_writes) >= - cli->cl_max_pages_per_rpc) - return 1; - } else { - if (atomic_read(&osc->oo_nr_reads) == 0) - return 0; - if (invalid_import) { - CDEBUG(D_CACHE, "invalid import forcing RPC\n"); - return 1; - } - /* all read are urgent. */ - if (!list_empty(&osc->oo_reading_exts)) - return 1; - } - - return 0; -} - -static void osc_update_pending(struct osc_object *obj, int cmd, int delta) -{ - struct client_obd *cli = osc_cli(obj); - - if (cmd & OBD_BRW_WRITE) { - atomic_add(delta, &obj->oo_nr_writes); - atomic_add(delta, &cli->cl_pending_w_pages); - LASSERT(atomic_read(&obj->oo_nr_writes) >= 0); - } else { - atomic_add(delta, &obj->oo_nr_reads); - atomic_add(delta, &cli->cl_pending_r_pages); - LASSERT(atomic_read(&obj->oo_nr_reads) >= 0); - } - OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta); -} - -static int osc_makes_hprpc(struct osc_object *obj) -{ - return !list_empty(&obj->oo_hp_exts); -} - -static void on_list(struct list_head *item, struct list_head *list, int should_be_on) -{ - if (list_empty(item) && should_be_on) - list_add_tail(item, list); - else if (!list_empty(item) && !should_be_on) - list_del_init(item); -} - -/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc - * can find pages to build into rpcs quickly - */ -static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc) -{ - if (osc_makes_hprpc(osc)) { - /* HP rpc */ - on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0); - on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1); - } else { - on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0); - on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, - osc_makes_rpc(cli, osc, OBD_BRW_WRITE) || - osc_makes_rpc(cli, osc, OBD_BRW_READ)); - } - - on_list(&osc->oo_write_item, &cli->cl_loi_write_list, - atomic_read(&osc->oo_nr_writes) > 0); - - on_list(&osc->oo_read_item, &cli->cl_loi_read_list, - atomic_read(&osc->oo_nr_reads) > 0); - - return osc_is_ready(osc); -} - -static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) -{ - int is_ready; - - spin_lock(&cli->cl_loi_list_lock); - is_ready = __osc_list_maint(cli, osc); - spin_unlock(&cli->cl_loi_list_lock); - - return is_ready; -} - -/* this is trying to propagate async writeback errors back up to the - * application. As an async write fails we record the error code for later if - * the app does an fsync. As long as errors persist we force future rpcs to be - * sync so that the app can get a sync error and break the cycle of queueing - * pages for which writeback will fail. - */ -static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, - int rc) -{ - if (rc) { - if (!ar->ar_rc) - ar->ar_rc = rc; - - ar->ar_force_sync = 1; - ar->ar_min_xid = ptlrpc_sample_next_xid(); - return; - } - - if (ar->ar_force_sync && (xid >= ar->ar_min_xid)) - ar->ar_force_sync = 0; -} - -/* this must be called holding the loi list lock to give coverage to exit_cache, - * async_flag maintenance, and oap_request - */ -static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, - struct osc_async_page *oap, int sent, int rc) -{ - struct osc_object *osc = oap->oap_obj; - struct lov_oinfo *loi = osc->oo_oinfo; - __u64 xid = 0; - - if (oap->oap_request) { - xid = ptlrpc_req_xid(oap->oap_request); - ptlrpc_req_finished(oap->oap_request); - oap->oap_request = NULL; - } - - /* As the transfer for this page is being done, clear the flags */ - spin_lock(&oap->oap_lock); - oap->oap_async_flags = 0; - spin_unlock(&oap->oap_lock); - oap->oap_interrupted = 0; - - if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) { - spin_lock(&cli->cl_loi_list_lock); - osc_process_ar(&cli->cl_ar, xid, rc); - osc_process_ar(&loi->loi_ar, xid, rc); - spin_unlock(&cli->cl_loi_list_lock); - } - - rc = osc_completion(env, oap, oap->oap_cmd, rc); - if (rc) - CERROR("completion on oap %p obj %p returns %d.\n", - oap, osc, rc); -} - -struct extent_rpc_data { - struct list_head *erd_rpc_list; - unsigned int erd_page_count; - unsigned int erd_max_pages; - unsigned int erd_max_chunks; - unsigned int erd_max_extents; -}; - -static inline unsigned int osc_extent_chunks(const struct osc_extent *ext) -{ - struct client_obd *cli = osc_cli(ext->oe_obj); - unsigned int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; - - return (ext->oe_end >> ppc_bits) - (ext->oe_start >> ppc_bits) + 1; -} - -/** - * Try to add extent to one RPC. We need to think about the following things: - * - # of pages must not be over max_pages_per_rpc - * - extent must be compatible with previous ones - */ -static int try_to_add_extent_for_io(struct client_obd *cli, - struct osc_extent *ext, - struct extent_rpc_data *data) -{ - struct osc_extent *tmp; - unsigned int chunk_count; - struct osc_async_page *oap = list_first_entry(&ext->oe_pages, - struct osc_async_page, - oap_pending_item); - - EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE), - ext); - - if (!data->erd_max_extents) - return 0; - - chunk_count = osc_extent_chunks(ext); - EASSERTF(data->erd_page_count != 0 || - chunk_count <= data->erd_max_chunks, ext, - "The first extent to be fit in a RPC contains %u chunks, which is over the limit %u.\n", - chunk_count, data->erd_max_chunks); - - if (chunk_count > data->erd_max_chunks) - return 0; - - data->erd_max_pages = max(ext->oe_mppr, data->erd_max_pages); - EASSERTF(data->erd_page_count != 0 || - ext->oe_nr_pages <= data->erd_max_pages, ext, - "The first extent to be fit in a RPC contains %u pages, which is over the limit %u.\n", - ext->oe_nr_pages, data->erd_max_pages); - if (data->erd_page_count + ext->oe_nr_pages > data->erd_max_pages) - return 0; - - list_for_each_entry(tmp, data->erd_rpc_list, oe_link) { - struct osc_async_page *oap2; - - oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page, - oap_pending_item); - EASSERT(tmp->oe_owner == current, tmp); - if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) { - CDEBUG(D_CACHE, "Do not permit different type of IO in one RPC\n"); - return 0; - } - - if (tmp->oe_srvlock != ext->oe_srvlock || - !tmp->oe_grants != !ext->oe_grants || - tmp->oe_no_merge || ext->oe_no_merge) - return 0; - - /* remove break for strict check */ - break; - } - - data->erd_max_extents--; - data->erd_max_chunks -= chunk_count; - data->erd_page_count += ext->oe_nr_pages; - list_move_tail(&ext->oe_link, data->erd_rpc_list); - ext->oe_owner = current; - return 1; -} - -static inline unsigned int osc_max_write_chunks(const struct client_obd *cli) -{ - /* - * LU-8135: - * - * The maximum size of a single transaction is about 64MB in ZFS. - * #define DMU_MAX_ACCESS (64 * 1024 * 1024) - * - * Since ZFS is a copy-on-write file system, a single dirty page in - * a chunk will result in the rewrite of the whole chunk, therefore - * an RPC shouldn't be allowed to contain too many chunks otherwise - * it will make transaction size much bigger than 64MB, especially - * with big block size for ZFS. - * - * This piece of code is to make sure that OSC won't send write RPCs - * with too many chunks. The maximum chunk size that an RPC can cover - * is set to PTLRPC_MAX_BRW_SIZE, which is defined to 16MB. Ideally - * OST should tell the client what the biggest transaction size is, - * but it's good enough for now. - * - * This limitation doesn't apply to ldiskfs, which allows as many - * chunks in one RPC as we want. However, it won't have any benefits - * to have too many discontiguous pages in one RPC. - * - * An osc_extent won't cover over a RPC size, so the chunks in an - * osc_extent won't bigger than PTLRPC_MAX_BRW_SIZE >> chunkbits. - */ - return PTLRPC_MAX_BRW_SIZE >> cli->cl_chunkbits; -} - -/** - * In order to prevent multiple ptlrpcd from breaking contiguous extents, - * get_write_extent() takes all appropriate extents in atomic. - * - * The following policy is used to collect extents for IO: - * 1. Add as many HP extents as possible; - * 2. Add the first urgent extent in urgent extent list and take it out of - * urgent list; - * 3. Add subsequent extents of this urgent extent; - * 4. If urgent list is not empty, goto 2; - * 5. Traverse the extent tree from the 1st extent; - * 6. Above steps exit if there is no space in this RPC. - */ -static unsigned int get_write_extents(struct osc_object *obj, - struct list_head *rpclist) -{ - struct client_obd *cli = osc_cli(obj); - struct osc_extent *ext; - struct osc_extent *temp; - struct extent_rpc_data data = { - .erd_rpc_list = rpclist, - .erd_page_count = 0, - .erd_max_pages = cli->cl_max_pages_per_rpc, - .erd_max_chunks = osc_max_write_chunks(cli), - .erd_max_extents = 256, - }; - - LASSERT(osc_object_is_locked(obj)); - list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) { - LASSERT(ext->oe_state == OES_CACHE); - if (!try_to_add_extent_for_io(cli, ext, &data)) - return data.erd_page_count; - EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext); - } - if (data.erd_page_count == data.erd_max_pages) - return data.erd_page_count; - - while (!list_empty(&obj->oo_urgent_exts)) { - ext = list_entry(obj->oo_urgent_exts.next, - struct osc_extent, oe_link); - if (!try_to_add_extent_for_io(cli, ext, &data)) - return data.erd_page_count; - - if (!ext->oe_intree) - continue; - - while ((ext = next_extent(ext)) != NULL) { - if ((ext->oe_state != OES_CACHE) || - (!list_empty(&ext->oe_link) && - ext->oe_owner)) - continue; - - if (!try_to_add_extent_for_io(cli, ext, &data)) - return data.erd_page_count; - } - } - if (data.erd_page_count == data.erd_max_pages) - return data.erd_page_count; - - ext = first_extent(obj); - while (ext) { - if ((ext->oe_state != OES_CACHE) || - /* this extent may be already in current rpclist */ - (!list_empty(&ext->oe_link) && ext->oe_owner)) { - ext = next_extent(ext); - continue; - } - - if (!try_to_add_extent_for_io(cli, ext, &data)) - return data.erd_page_count; - - ext = next_extent(ext); - } - return data.erd_page_count; -} - -static int -osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, - struct osc_object *osc) - __must_hold(osc) -{ - LIST_HEAD(rpclist); - struct osc_extent *ext; - struct osc_extent *tmp; - struct osc_extent *first = NULL; - u32 page_count = 0; - int srvlock = 0; - int rc = 0; - - LASSERT(osc_object_is_locked(osc)); - - page_count = get_write_extents(osc, &rpclist); - LASSERT(equi(page_count == 0, list_empty(&rpclist))); - - if (list_empty(&rpclist)) - return 0; - - osc_update_pending(osc, OBD_BRW_WRITE, -page_count); - - list_for_each_entry(ext, &rpclist, oe_link) { - LASSERT(ext->oe_state == OES_CACHE || - ext->oe_state == OES_LOCK_DONE); - if (ext->oe_state == OES_CACHE) - osc_extent_state_set(ext, OES_LOCKING); - else - osc_extent_state_set(ext, OES_RPC); - } - - /* we're going to grab page lock, so release object lock because - * lock order is page lock -> object lock. - */ - osc_object_unlock(osc); - - list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) { - if (ext->oe_state == OES_LOCKING) { - rc = osc_extent_make_ready(env, ext); - if (unlikely(rc < 0)) { - list_del_init(&ext->oe_link); - osc_extent_finish(env, ext, 0, rc); - continue; - } - } - if (!first) { - first = ext; - srvlock = ext->oe_srvlock; - } else { - LASSERT(srvlock == ext->oe_srvlock); - } - } - - if (!list_empty(&rpclist)) { - LASSERT(page_count > 0); - rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE); - LASSERT(list_empty(&rpclist)); - } - - osc_object_lock(osc); - return rc; -} - -/** - * prepare pages for ASYNC io and put pages in send queue. - * - * \param cmd OBD_BRW_* macroses - * \param lop pending pages - * - * \return zero if no page added to send queue. - * \return 1 if pages successfully added to send queue. - * \return negative on errors. - */ -static int -osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, - struct osc_object *osc) - __must_hold(osc) -{ - struct osc_extent *ext; - struct osc_extent *next; - LIST_HEAD(rpclist); - struct extent_rpc_data data = { - .erd_rpc_list = &rpclist, - .erd_page_count = 0, - .erd_max_pages = cli->cl_max_pages_per_rpc, - .erd_max_chunks = UINT_MAX, - .erd_max_extents = UINT_MAX, - }; - int rc = 0; - - LASSERT(osc_object_is_locked(osc)); - list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) { - EASSERT(ext->oe_state == OES_LOCK_DONE, ext); - if (!try_to_add_extent_for_io(cli, ext, &data)) - break; - osc_extent_state_set(ext, OES_RPC); - EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext); - } - LASSERT(data.erd_page_count <= data.erd_max_pages); - - osc_update_pending(osc, OBD_BRW_READ, -data.erd_page_count); - - if (!list_empty(&rpclist)) { - osc_object_unlock(osc); - - rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ); - LASSERT(list_empty(&rpclist)); - - osc_object_lock(osc); - } - return rc; -} - -#define list_to_obj(list, item) ({ \ - struct list_head *__tmp = (list)->next; \ - list_del_init(__tmp); \ - list_entry(__tmp, struct osc_object, oo_##item); \ -}) - -/* This is called by osc_check_rpcs() to find which objects have pages that - * we could be sending. These lists are maintained by osc_makes_rpc(). - */ -static struct osc_object *osc_next_obj(struct client_obd *cli) -{ - /* First return objects that have blocked locks so that they - * will be flushed quickly and other clients can get the lock, - * then objects which have pages ready to be stuffed into RPCs - */ - if (!list_empty(&cli->cl_loi_hp_ready_list)) - return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item); - if (!list_empty(&cli->cl_loi_ready_list)) - return list_to_obj(&cli->cl_loi_ready_list, ready_item); - - /* then if we have cache waiters, return all objects with queued - * writes. This is especially important when many small files - * have filled up the cache and not been fired into rpcs because - * they don't pass the nr_pending/object threshold - */ - if (!list_empty(&cli->cl_cache_waiters) && - !list_empty(&cli->cl_loi_write_list)) - return list_to_obj(&cli->cl_loi_write_list, write_item); - - /* then return all queued objects when we have an invalid import - * so that they get flushed - */ - if (!cli->cl_import || cli->cl_import->imp_invalid) { - if (!list_empty(&cli->cl_loi_write_list)) - return list_to_obj(&cli->cl_loi_write_list, write_item); - if (!list_empty(&cli->cl_loi_read_list)) - return list_to_obj(&cli->cl_loi_read_list, read_item); - } - return NULL; -} - -/* called with the loi list lock held */ -static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) - __must_hold(&cli->cl_loi_list_lock) -{ - struct osc_object *osc; - int rc = 0; - - while ((osc = osc_next_obj(cli)) != NULL) { - struct cl_object *obj = osc2cl(osc); - struct lu_ref_link link; - - OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli)); - - if (osc_max_rpc_in_flight(cli, osc)) { - __osc_list_maint(cli, osc); - break; - } - - cl_object_get(obj); - spin_unlock(&cli->cl_loi_list_lock); - lu_object_ref_add_at(&obj->co_lu, &link, "check", current); - - /* attempt some read/write balancing by alternating between - * reads and writes in an object. The makes_rpc checks here - * would be redundant if we were getting read/write work items - * instead of objects. we don't want send_oap_rpc to drain a - * partial read pending queue when we're given this object to - * do io on writes while there are cache waiters - */ - osc_object_lock(osc); - if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) { - rc = osc_send_write_rpc(env, cli, osc); - if (rc < 0) { - CERROR("Write request failed with %d\n", rc); - - /* osc_send_write_rpc failed, mostly because of - * memory pressure. - * - * It can't break here, because if: - * - a page was submitted by osc_io_submit, so - * page locked; - * - no request in flight - * - no subsequent request - * The system will be in live-lock state, - * because there is no chance to call - * osc_io_unplug() and osc_check_rpcs() any - * more. pdflush can't help in this case, - * because it might be blocked at grabbing - * the page lock as we mentioned. - * - * Anyway, continue to drain pages. - */ - /* break; */ - } - } - if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) { - rc = osc_send_read_rpc(env, cli, osc); - if (rc < 0) - CERROR("Read request failed with %d\n", rc); - } - osc_object_unlock(osc); - - osc_list_maint(cli, osc); - lu_object_ref_del_at(&obj->co_lu, &link, "check", current); - cl_object_put(env, obj); - - spin_lock(&cli->cl_loi_list_lock); - } -} - -static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, - struct osc_object *osc, int async) -{ - int rc = 0; - - if (osc && osc_list_maint(cli, osc) == 0) - return 0; - - if (!async) { - spin_lock(&cli->cl_loi_list_lock); - osc_check_rpcs(env, cli); - spin_unlock(&cli->cl_loi_list_lock); - } else { - CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); - LASSERT(cli->cl_writeback_work); - rc = ptlrpcd_queue_work(cli->cl_writeback_work); - } - return rc; -} - -static int osc_io_unplug_async(const struct lu_env *env, - struct client_obd *cli, struct osc_object *osc) -{ - return osc_io_unplug0(env, cli, osc, 1); -} - -void osc_io_unplug(const struct lu_env *env, struct client_obd *cli, - struct osc_object *osc) -{ - (void)osc_io_unplug0(env, cli, osc, 0); -} - -int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops, - struct page *page, loff_t offset) -{ - struct obd_export *exp = osc_export(osc); - struct osc_async_page *oap = &ops->ops_oap; - - if (!page) - return cfs_size_round(sizeof(*oap)); - - oap->oap_magic = OAP_MAGIC; - oap->oap_cli = &exp->exp_obd->u.cli; - oap->oap_obj = osc; - - oap->oap_page = page; - oap->oap_obj_off = offset; - LASSERT(!(offset & ~PAGE_MASK)); - - if (capable(CAP_SYS_RESOURCE)) - oap->oap_brw_flags = OBD_BRW_NOQUOTA; - - INIT_LIST_HEAD(&oap->oap_pending_item); - INIT_LIST_HEAD(&oap->oap_rpc_item); - - spin_lock_init(&oap->oap_lock); - CDEBUG(D_INFO, "oap %p page %p obj off %llu\n", - oap, page, oap->oap_obj_off); - return 0; -} - -int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops) -{ - struct osc_io *oio = osc_env_io(env); - struct osc_extent *ext = NULL; - struct osc_async_page *oap = &ops->ops_oap; - struct client_obd *cli = oap->oap_cli; - struct osc_object *osc = oap->oap_obj; - pgoff_t index; - unsigned int grants = 0, tmp; - int brw_flags = OBD_BRW_ASYNC; - int cmd = OBD_BRW_WRITE; - int need_release = 0; - int rc = 0; - - if (oap->oap_magic != OAP_MAGIC) - return -EINVAL; - - if (!cli->cl_import || cli->cl_import->imp_invalid) - return -EIO; - - if (!list_empty(&oap->oap_pending_item) || - !list_empty(&oap->oap_rpc_item)) - return -EBUSY; - - /* Set the OBD_BRW_SRVLOCK before the page is queued. */ - brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0; - if (capable(CAP_SYS_RESOURCE)) { - brw_flags |= OBD_BRW_NOQUOTA; - cmd |= OBD_BRW_NOQUOTA; - } - - /* check if the file's owner/group is over quota */ - if (!(cmd & OBD_BRW_NOQUOTA)) { - struct cl_object *obj; - struct cl_attr *attr; - unsigned int qid[MAXQUOTAS]; - - obj = cl_object_top(&osc->oo_cl); - attr = &osc_env_info(env)->oti_attr; - - cl_object_attr_lock(obj); - rc = cl_object_attr_get(env, obj, attr); - cl_object_attr_unlock(obj); - - qid[USRQUOTA] = attr->cat_uid; - qid[GRPQUOTA] = attr->cat_gid; - if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA) - rc = -EDQUOT; - if (rc) - return rc; - } - - oap->oap_cmd = cmd; - oap->oap_page_off = ops->ops_from; - oap->oap_count = ops->ops_to - ops->ops_from; - /* - * No need to hold a lock here, - * since this page is not in any list yet. - */ - oap->oap_async_flags = 0; - oap->oap_brw_flags = brw_flags; - - OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n", - oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK); - - index = osc_index(oap2osc(oap)); - - /* Add this page into extent by the following steps: - * 1. if there exists an active extent for this IO, mostly this page - * can be added to the active extent and sometimes we need to - * expand extent to accommodate this page; - * 2. otherwise, a new extent will be allocated. - */ - - ext = oio->oi_active; - if (ext && ext->oe_start <= index && ext->oe_max_end >= index) { - /* one chunk plus extent overhead must be enough to write this - * page - */ - grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; - if (ext->oe_end >= index) - grants = 0; - - /* it doesn't need any grant to dirty this page */ - spin_lock(&cli->cl_loi_list_lock); - rc = osc_enter_cache_try(cli, oap, grants, 0); - spin_unlock(&cli->cl_loi_list_lock); - if (rc == 0) { /* try failed */ - grants = 0; - need_release = 1; - } else if (ext->oe_end < index) { - tmp = grants; - /* try to expand this extent */ - rc = osc_extent_expand(ext, index, &tmp); - if (rc < 0) { - need_release = 1; - /* don't free reserved grant */ - } else { - OSC_EXTENT_DUMP(D_CACHE, ext, - "expanded for %lu.\n", index); - osc_unreserve_grant(cli, grants, tmp); - grants = 0; - } - } - rc = 0; - } else if (ext) { - /* index is located outside of active extent */ - need_release = 1; - } - if (need_release) { - osc_extent_release(env, ext); - oio->oi_active = NULL; - ext = NULL; - } - - if (!ext) { - tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; - - /* try to find new extent to cover this page */ - LASSERT(!oio->oi_active); - /* we may have allocated grant for this page if we failed - * to expand the previous active extent. - */ - LASSERT(ergo(grants > 0, grants >= tmp)); - - rc = 0; - if (grants == 0) { - /* we haven't allocated grant for this page. */ - rc = osc_enter_cache(env, cli, oap, tmp); - if (rc == 0) - grants = tmp; - } - - tmp = grants; - if (rc == 0) { - ext = osc_extent_find(env, osc, index, &tmp); - if (IS_ERR(ext)) { - LASSERT(tmp == grants); - osc_exit_cache(cli, oap); - rc = PTR_ERR(ext); - ext = NULL; - } else { - oio->oi_active = ext; - } - } - if (grants > 0) - osc_unreserve_grant(cli, grants, tmp); - } - - LASSERT(ergo(rc == 0, ext)); - if (ext) { - EASSERTF(ext->oe_end >= index && ext->oe_start <= index, - ext, "index = %lu.\n", index); - LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0); - - osc_object_lock(osc); - if (ext->oe_nr_pages == 0) - ext->oe_srvlock = ops->ops_srvlock; - else - LASSERT(ext->oe_srvlock == ops->ops_srvlock); - ++ext->oe_nr_pages; - list_add_tail(&oap->oap_pending_item, &ext->oe_pages); - osc_object_unlock(osc); - } - return rc; -} - -int osc_teardown_async_page(const struct lu_env *env, - struct osc_object *obj, struct osc_page *ops) -{ - struct osc_async_page *oap = &ops->ops_oap; - int rc = 0; - - LASSERT(oap->oap_magic == OAP_MAGIC); - - CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n", - oap, ops, osc_index(oap2osc(oap))); - - if (!list_empty(&oap->oap_rpc_item)) { - CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap); - rc = -EBUSY; - } else if (!list_empty(&oap->oap_pending_item)) { - struct osc_extent *ext = NULL; - - osc_object_lock(obj); - ext = osc_extent_lookup(obj, osc_index(oap2osc(oap))); - osc_object_unlock(obj); - /* only truncated pages are allowed to be taken out. - * See osc_extent_truncate() and osc_cache_truncate_start() - * for details. - */ - if (ext && ext->oe_state != OES_TRUNC) { - OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n", - osc_index(oap2osc(oap))); - rc = -EBUSY; - } - if (ext) - osc_extent_put(env, ext); - } - return rc; -} - -/** - * This is called when a page is picked up by kernel to write out. - * - * We should find out the corresponding extent and add the whole extent - * into urgent list. The extent may be being truncated or used, handle it - * carefully. - */ -int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops) -{ - struct osc_extent *ext = NULL; - struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj); - struct cl_page *cp = ops->ops_cl.cpl_page; - pgoff_t index = osc_index(ops); - struct osc_async_page *oap = &ops->ops_oap; - bool unplug = false; - int rc = 0; - - osc_object_lock(obj); - ext = osc_extent_lookup(obj, index); - if (!ext) { - osc_extent_tree_dump(D_ERROR, obj); - LASSERTF(0, "page index %lu is NOT covered.\n", index); - } - - switch (ext->oe_state) { - case OES_RPC: - case OES_LOCK_DONE: - CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n"); - LASSERT(0); - break; - case OES_LOCKING: - /* If we know this extent is being written out, we should abort - * so that the writer can make this page ready. Otherwise, there - * exists a deadlock problem because other process can wait for - * page writeback bit holding page lock; and meanwhile in - * vvp_page_make_ready(), we need to grab page lock before - * really sending the RPC. - */ - case OES_TRUNC: - /* race with truncate, page will be redirtied */ - case OES_ACTIVE: - /* The extent is active so we need to abort and let the caller - * re-dirty the page. If we continued on here, and we were the - * one making the extent active, we could deadlock waiting for - * the page writeback to clear but it won't because the extent - * is active and won't be written out. - */ - rc = -EAGAIN; - goto out; - default: - break; - } - - rc = cl_page_prep(env, io, cp, CRT_WRITE); - if (rc) - goto out; - - spin_lock(&oap->oap_lock); - oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT; - spin_unlock(&oap->oap_lock); - - if (current->flags & PF_MEMALLOC) - ext->oe_memalloc = 1; - - ext->oe_urgent = 1; - if (ext->oe_state == OES_CACHE) { - OSC_EXTENT_DUMP(D_CACHE, ext, - "flush page %p make it urgent.\n", oap); - if (list_empty(&ext->oe_link)) - list_add_tail(&ext->oe_link, &obj->oo_urgent_exts); - unplug = true; - } - rc = 0; - -out: - osc_object_unlock(obj); - osc_extent_put(env, ext); - if (unplug) - osc_io_unplug_async(env, osc_cli(obj), obj); - return rc; -} - -/** - * this is called when a sync waiter receives an interruption. Its job is to - * get the caller woken as soon as possible. If its page hasn't been put in an - * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as - * desiring interruption which will forcefully complete the rpc once the rpc - * has timed out. - */ -int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) -{ - struct osc_async_page *oap = &ops->ops_oap; - struct osc_object *obj = oap->oap_obj; - struct client_obd *cli = osc_cli(obj); - struct osc_extent *ext; - struct osc_extent *found = NULL; - struct list_head *plist; - pgoff_t index = osc_index(ops); - int rc = -EBUSY; - int cmd; - - LASSERT(!oap->oap_interrupted); - oap->oap_interrupted = 1; - - /* Find out the caching extent */ - osc_object_lock(obj); - if (oap->oap_cmd & OBD_BRW_WRITE) { - plist = &obj->oo_urgent_exts; - cmd = OBD_BRW_WRITE; - } else { - plist = &obj->oo_reading_exts; - cmd = OBD_BRW_READ; - } - list_for_each_entry(ext, plist, oe_link) { - if (ext->oe_start <= index && ext->oe_end >= index) { - LASSERT(ext->oe_state == OES_LOCK_DONE); - /* For OES_LOCK_DONE state extent, it has already held - * a refcount for RPC. - */ - found = osc_extent_get(ext); - break; - } - } - if (found) { - list_del_init(&found->oe_link); - osc_update_pending(obj, cmd, -found->oe_nr_pages); - osc_object_unlock(obj); - - osc_extent_finish(env, found, 0, -EINTR); - osc_extent_put(env, found); - rc = 0; - } else { - osc_object_unlock(obj); - /* ok, it's been put in an rpc. only one oap gets a request - * reference - */ - if (oap->oap_request) { - ptlrpc_mark_interrupted(oap->oap_request); - ptlrpcd_wake(oap->oap_request); - ptlrpc_req_finished(oap->oap_request); - oap->oap_request = NULL; - } - } - - osc_list_maint(cli, obj); - return rc; -} - -int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, - struct list_head *list, int cmd, int brw_flags) -{ - struct client_obd *cli = osc_cli(obj); - struct osc_extent *ext; - struct osc_async_page *oap, *tmp; - int page_count = 0; - int mppr = cli->cl_max_pages_per_rpc; - bool can_merge = true; - pgoff_t start = CL_PAGE_EOF; - pgoff_t end = 0; - - list_for_each_entry(oap, list, oap_pending_item) { - struct osc_page *opg = oap2osc_page(oap); - pgoff_t index = osc_index(opg); - - if (index > end) - end = index; - if (index < start) - start = index; - ++page_count; - mppr <<= (page_count > mppr); - - if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE)) - can_merge = false; - } - - ext = osc_extent_alloc(obj); - if (!ext) { - list_for_each_entry_safe(oap, tmp, list, oap_pending_item) { - list_del_init(&oap->oap_pending_item); - osc_ap_completion(env, cli, oap, 0, -ENOMEM); - } - return -ENOMEM; - } - - ext->oe_rw = !!(cmd & OBD_BRW_READ); - ext->oe_sync = 1; - ext->oe_no_merge = !can_merge; - ext->oe_urgent = 1; - ext->oe_start = start; - ext->oe_end = end; - ext->oe_max_end = end; - ext->oe_obj = obj; - ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK); - ext->oe_nr_pages = page_count; - ext->oe_mppr = mppr; - list_splice_init(list, &ext->oe_pages); - - osc_object_lock(obj); - /* Reuse the initial refcount for RPC, don't drop it */ - osc_extent_state_set(ext, OES_LOCK_DONE); - if (cmd & OBD_BRW_WRITE) { - list_add_tail(&ext->oe_link, &obj->oo_urgent_exts); - osc_update_pending(obj, OBD_BRW_WRITE, page_count); - } else { - list_add_tail(&ext->oe_link, &obj->oo_reading_exts); - osc_update_pending(obj, OBD_BRW_READ, page_count); - } - osc_object_unlock(obj); - - osc_io_unplug_async(env, cli, obj); - return 0; -} - -/** - * Called by osc_io_setattr_start() to freeze and destroy covering extents. - */ -int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj, - u64 size, struct osc_extent **extp) -{ - struct client_obd *cli = osc_cli(obj); - struct osc_extent *ext; - struct osc_extent *temp; - struct osc_extent *waiting = NULL; - pgoff_t index; - LIST_HEAD(list); - int result = 0; - bool partial; - - /* pages with index greater or equal to index will be truncated. */ - index = cl_index(osc2cl(obj), size); - partial = size > cl_offset(osc2cl(obj), index); - -again: - osc_object_lock(obj); - ext = osc_extent_search(obj, index); - if (!ext) - ext = first_extent(obj); - else if (ext->oe_end < index) - ext = next_extent(ext); - while (ext) { - EASSERT(ext->oe_state != OES_TRUNC, ext); - - if (ext->oe_state > OES_CACHE || ext->oe_urgent) { - /* if ext is in urgent state, it means there must exist - * a page already having been flushed by write_page(). - * We have to wait for this extent because we can't - * truncate that page. - */ - OSC_EXTENT_DUMP(D_CACHE, ext, - "waiting for busy extent\n"); - waiting = osc_extent_get(ext); - break; - } - - OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size); - - osc_extent_get(ext); - if (ext->oe_state == OES_ACTIVE) { - /* though we grab inode mutex for write path, but we - * release it before releasing extent(in osc_io_end()), - * so there is a race window that an extent is still - * in OES_ACTIVE when truncate starts. - */ - LASSERT(!ext->oe_trunc_pending); - ext->oe_trunc_pending = 1; - } else { - EASSERT(ext->oe_state == OES_CACHE, ext); - osc_extent_state_set(ext, OES_TRUNC); - osc_update_pending(obj, OBD_BRW_WRITE, - -ext->oe_nr_pages); - } - EASSERT(list_empty(&ext->oe_link), ext); - list_add_tail(&ext->oe_link, &list); - - ext = next_extent(ext); - } - osc_object_unlock(obj); - - osc_list_maint(cli, obj); - - list_for_each_entry_safe(ext, temp, &list, oe_link) { - int rc; - - list_del_init(&ext->oe_link); - - /* extent may be in OES_ACTIVE state because inode mutex - * is released before osc_io_end() in file write case - */ - if (ext->oe_state != OES_TRUNC) - osc_extent_wait(env, ext, OES_TRUNC); - - rc = osc_extent_truncate(ext, index, partial); - if (rc < 0) { - if (result == 0) - result = rc; - - OSC_EXTENT_DUMP(D_ERROR, ext, - "truncate error %d\n", rc); - } else if (ext->oe_nr_pages == 0) { - osc_extent_remove(ext); - } else { - /* this must be an overlapped extent which means only - * part of pages in this extent have been truncated. - */ - EASSERTF(ext->oe_start <= index, ext, - "trunc index = %lu/%d.\n", index, partial); - /* fix index to skip this partially truncated extent */ - index = ext->oe_end + 1; - partial = false; - - /* we need to hold this extent in OES_TRUNC state so - * that no writeback will happen. This is to avoid - * BUG 17397. - * Only partial truncate can reach here, if @size is - * not zero, the caller should provide a valid @extp. - */ - LASSERT(!*extp); - *extp = osc_extent_get(ext); - OSC_EXTENT_DUMP(D_CACHE, ext, - "trunc at %llu\n", size); - } - osc_extent_put(env, ext); - } - if (waiting) { - int rc; - - /* ignore the result of osc_extent_wait the write initiator - * should take care of it. - */ - rc = osc_extent_wait(env, waiting, OES_INV); - if (rc < 0) - OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc); - - osc_extent_put(env, waiting); - waiting = NULL; - goto again; - } - return result; -} - -/** - * Called after osc_io_setattr_end to add oio->oi_trunc back to cache. - */ -void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext) -{ - if (ext) { - struct osc_object *obj = ext->oe_obj; - bool unplug = false; - - EASSERT(ext->oe_nr_pages > 0, ext); - EASSERT(ext->oe_state == OES_TRUNC, ext); - EASSERT(!ext->oe_urgent, ext); - - OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n"); - osc_object_lock(obj); - osc_extent_state_set(ext, OES_CACHE); - if (ext->oe_fsync_wait && !ext->oe_urgent) { - ext->oe_urgent = 1; - list_move_tail(&ext->oe_link, &obj->oo_urgent_exts); - unplug = true; - } - osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages); - osc_object_unlock(obj); - osc_extent_put(env, ext); - - if (unplug) - osc_io_unplug_async(env, osc_cli(obj), obj); - } -} - -/** - * Wait for extents in a specific range to be written out. - * The caller must have called osc_cache_writeback_range() to issue IO - * otherwise it will take a long time for this function to finish. - * - * Caller must hold inode_mutex , or cancel exclusive dlm lock so that - * nobody else can dirty this range of file while we're waiting for - * extents to be written. - */ -int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj, - pgoff_t start, pgoff_t end) -{ - struct osc_extent *ext; - pgoff_t index = start; - int result = 0; - -again: - osc_object_lock(obj); - ext = osc_extent_search(obj, index); - if (!ext) - ext = first_extent(obj); - else if (ext->oe_end < index) - ext = next_extent(ext); - while (ext) { - int rc; - - if (ext->oe_start > end) - break; - - if (!ext->oe_fsync_wait) { - ext = next_extent(ext); - continue; - } - - EASSERT(ergo(ext->oe_state == OES_CACHE, - ext->oe_hp || ext->oe_urgent), ext); - EASSERT(ergo(ext->oe_state == OES_ACTIVE, - !ext->oe_hp && ext->oe_urgent), ext); - - index = ext->oe_end + 1; - osc_extent_get(ext); - osc_object_unlock(obj); - - rc = osc_extent_wait(env, ext, OES_INV); - if (result == 0) - result = rc; - osc_extent_put(env, ext); - goto again; - } - osc_object_unlock(obj); - - OSC_IO_DEBUG(obj, "sync file range.\n"); - return result; -} - -/** - * Called to write out a range of osc object. - * - * @hp : should be set this is caused by lock cancel; - * @discard: is set if dirty pages should be dropped - file will be deleted or - * truncated, this implies there is no partially discarding extents. - * - * Return how many pages will be issued, or error code if error occurred. - */ -int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, - pgoff_t start, pgoff_t end, int hp, int discard) -{ - struct osc_extent *ext; - LIST_HEAD(discard_list); - bool unplug = false; - int result = 0; - - osc_object_lock(obj); - ext = osc_extent_search(obj, start); - if (!ext) - ext = first_extent(obj); - else if (ext->oe_end < start) - ext = next_extent(ext); - while (ext) { - if (ext->oe_start > end) - break; - - ext->oe_fsync_wait = 1; - switch (ext->oe_state) { - case OES_CACHE: - result += ext->oe_nr_pages; - if (!discard) { - struct list_head *list = NULL; - - if (hp) { - EASSERT(!ext->oe_hp, ext); - ext->oe_hp = 1; - list = &obj->oo_hp_exts; - } else if (!ext->oe_urgent) { - ext->oe_urgent = 1; - list = &obj->oo_urgent_exts; - } - if (list) - list_move_tail(&ext->oe_link, list); - unplug = true; - } else { - /* the only discarder is lock cancelling, so - * [start, end] must contain this extent - */ - EASSERT(ext->oe_start >= start && - ext->oe_max_end <= end, ext); - osc_extent_state_set(ext, OES_LOCKING); - ext->oe_owner = current; - list_move_tail(&ext->oe_link, &discard_list); - osc_update_pending(obj, OBD_BRW_WRITE, - -ext->oe_nr_pages); - } - break; - case OES_ACTIVE: - /* It's pretty bad to wait for ACTIVE extents, because - * we don't know how long we will wait for it to be - * flushed since it may be blocked at awaiting more - * grants. We do this for the correctness of fsync. - */ - LASSERT(hp == 0 && discard == 0); - ext->oe_urgent = 1; - break; - case OES_TRUNC: - /* this extent is being truncated, can't do anything - * for it now. it will be set to urgent after truncate - * is finished in osc_cache_truncate_end(). - */ - default: - break; - } - ext = next_extent(ext); - } - osc_object_unlock(obj); - - LASSERT(ergo(!discard, list_empty(&discard_list))); - if (!list_empty(&discard_list)) { - struct osc_extent *tmp; - int rc; - - osc_list_maint(osc_cli(obj), obj); - list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) { - list_del_init(&ext->oe_link); - EASSERT(ext->oe_state == OES_LOCKING, ext); - - /* Discard caching pages. We don't actually write this - * extent out but we complete it as if we did. - */ - rc = osc_extent_make_ready(env, ext); - if (unlikely(rc < 0)) { - OSC_EXTENT_DUMP(D_ERROR, ext, - "make_ready returned %d\n", rc); - if (result >= 0) - result = rc; - } - - /* finish the extent as if the pages were sent */ - osc_extent_finish(env, ext, 0, 0); - } - } - - if (unplug) - osc_io_unplug(env, osc_cli(obj), obj); - - if (hp || discard) { - int rc; - - rc = osc_cache_wait_range(env, obj, start, end); - if (result >= 0 && rc < 0) - result = rc; - } - - OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result); - return result; -} - -/** - * Returns a list of pages by a given [start, end] of \a obj. - * - * \param resched If not NULL, then we give up before hogging CPU for too - * long and set *resched = 1, in that case caller should implement a retry - * logic. - * - * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely - * crucial in the face of [offset, EOF] locks. - * - * Return at least one page in @queue unless there is no covered page. - */ -int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, - struct osc_object *osc, pgoff_t start, pgoff_t end, - osc_page_gang_cbt cb, void *cbdata) -{ - struct osc_page *ops; - void **pvec; - pgoff_t idx; - unsigned int nr; - unsigned int i; - unsigned int j; - int res = CLP_GANG_OKAY; - bool tree_lock = true; - - idx = start; - pvec = osc_env_info(env)->oti_pvec; - spin_lock(&osc->oo_tree_lock); - while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec, - idx, OTI_PVEC_SIZE)) > 0) { - struct cl_page *page; - bool end_of_region = false; - - for (i = 0, j = 0; i < nr; ++i) { - ops = pvec[i]; - pvec[i] = NULL; - - idx = osc_index(ops); - if (idx > end) { - end_of_region = true; - break; - } - - page = ops->ops_cl.cpl_page; - LASSERT(page->cp_type == CPT_CACHEABLE); - if (page->cp_state == CPS_FREEING) - continue; - - cl_page_get(page); - lu_ref_add_atomic(&page->cp_reference, - "gang_lookup", current); - pvec[j++] = ops; - } - ++idx; - - /* - * Here a delicate locking dance is performed. Current thread - * holds a reference to a page, but has to own it before it - * can be placed into queue. Owning implies waiting, so - * radix-tree lock is to be released. After a wait one has to - * check that pages weren't truncated (cl_page_own() returns - * error in the latter case). - */ - spin_unlock(&osc->oo_tree_lock); - tree_lock = false; - - for (i = 0; i < j; ++i) { - ops = pvec[i]; - if (res == CLP_GANG_OKAY) - res = (*cb)(env, io, ops, cbdata); - - page = ops->ops_cl.cpl_page; - lu_ref_del(&page->cp_reference, "gang_lookup", current); - cl_page_put(env, page); - } - if (nr < OTI_PVEC_SIZE || end_of_region) - break; - - if (res == CLP_GANG_OKAY && need_resched()) - res = CLP_GANG_RESCHED; - if (res != CLP_GANG_OKAY) - break; - - spin_lock(&osc->oo_tree_lock); - tree_lock = true; - } - if (tree_lock) - spin_unlock(&osc->oo_tree_lock); - return res; -} - -/** - * Check if page @page is covered by an extra lock or discard it. - */ -static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops, void *cbdata) -{ - struct osc_thread_info *info = osc_env_info(env); - struct osc_object *osc = cbdata; - pgoff_t index; - - index = osc_index(ops); - if (index >= info->oti_fn_index) { - struct ldlm_lock *tmp; - struct cl_page *page = ops->ops_cl.cpl_page; - - /* refresh non-overlapped index */ - tmp = osc_dlmlock_at_pgoff(env, osc, index, - OSC_DAP_FL_TEST_LOCK); - if (tmp) { - __u64 end = tmp->l_policy_data.l_extent.end; - /* Cache the first-non-overlapped index so as to skip - * all pages within [index, oti_fn_index). This is safe - * because if tmp lock is canceled, it will discard - * these pages. - */ - info->oti_fn_index = cl_index(osc2cl(osc), end + 1); - if (end == OBD_OBJECT_EOF) - info->oti_fn_index = CL_PAGE_EOF; - LDLM_LOCK_PUT(tmp); - } else if (cl_page_own(env, io, page) == 0) { - /* discard the page */ - cl_page_discard(env, io, page); - cl_page_disown(env, io, page); - } else { - LASSERT(page->cp_state == CPS_FREEING); - } - } - - info->oti_next_index = index + 1; - return CLP_GANG_OKAY; -} - -static int discard_cb(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops, void *cbdata) -{ - struct osc_thread_info *info = osc_env_info(env); - struct cl_page *page = ops->ops_cl.cpl_page; - - /* page is top page. */ - info->oti_next_index = osc_index(ops) + 1; - if (cl_page_own(env, io, page) == 0) { - if (page->cp_type == CPT_CACHEABLE && - PageDirty(cl_page_vmpage(page))) - CL_PAGE_DEBUG(D_ERROR, env, page, - "discard dirty page?\n"); - - /* discard the page */ - cl_page_discard(env, io, page); - cl_page_disown(env, io, page); - } else { - LASSERT(page->cp_state == CPS_FREEING); - } - - return CLP_GANG_OKAY; -} - -/** - * Discard pages protected by the given lock. This function traverses radix - * tree to find all covering pages and discard them. If a page is being covered - * by other locks, it should remain in cache. - * - * If error happens on any step, the process continues anyway (the reasoning - * behind this being that lock cancellation cannot be delayed indefinitely). - */ -int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc, - pgoff_t start, pgoff_t end, enum cl_lock_mode mode) -{ - struct osc_thread_info *info = osc_env_info(env); - struct cl_io *io = &info->oti_io; - osc_page_gang_cbt cb; - int res; - int result; - - io->ci_obj = cl_object_top(osc2cl(osc)); - io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, io->ci_obj); - if (result != 0) - goto out; - - cb = mode == CLM_READ ? check_and_discard_cb : discard_cb; - info->oti_fn_index = start; - info->oti_next_index = start; - do { - res = osc_page_gang_lookup(env, io, osc, - info->oti_next_index, end, cb, osc); - if (info->oti_next_index > end) - break; - - if (res == CLP_GANG_RESCHED) - cond_resched(); - } while (res != CLP_GANG_OKAY); -out: - cl_io_fini(env, io); - return result; -} - -/** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h deleted file mode 100644 index 2d3cba16ef34..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ /dev/null @@ -1,681 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Internal interfaces of OSC layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#ifndef OSC_CL_INTERNAL_H -#define OSC_CL_INTERNAL_H - -#include -/* osc_build_res_name() */ -#include -#include "osc_internal.h" - -/** \defgroup osc osc - * @{ - */ - -struct osc_extent; - -/** - * State maintained by osc layer for each IO context. - */ -struct osc_io { - /** super class */ - struct cl_io_slice oi_cl; - /** true if this io is lockless. */ - unsigned int oi_lockless:1, - /** true if this io is counted as active IO */ - oi_is_active:1; - /** how many LRU pages are reserved for this IO */ - unsigned long oi_lru_reserved; - - /** active extents, we know how many bytes is going to be written, - * so having an active extent will prevent it from being fragmented - */ - struct osc_extent *oi_active; - /** partially truncated extent, we need to hold this extent to prevent - * page writeback from happening. - */ - struct osc_extent *oi_trunc; - - /** write osc_lock for this IO, used by osc_extent_find(). */ - struct osc_lock *oi_write_osclock; - struct obdo oi_oa; - struct osc_async_cbargs { - bool opc_rpc_sent; - int opc_rc; - struct completion opc_sync; - } oi_cbarg; -}; - -/** - * State maintained by osc layer for the duration of a system call. - */ -struct osc_session { - struct osc_io os_io; -}; - -#define OTI_PVEC_SIZE 256 -struct osc_thread_info { - struct ldlm_res_id oti_resname; - union ldlm_policy_data oti_policy; - struct cl_lock_descr oti_descr; - struct cl_attr oti_attr; - struct lustre_handle oti_handle; - struct cl_page_list oti_plist; - struct cl_io oti_io; - void *oti_pvec[OTI_PVEC_SIZE]; - /** - * Fields used by cl_lock_discard_pages(). - */ - pgoff_t oti_next_index; - pgoff_t oti_fn_index; /* first non-overlapped index */ - struct cl_sync_io oti_anchor; - struct cl_req_attr oti_req_attr; -}; - -struct osc_object { - struct cl_object oo_cl; - struct lov_oinfo *oo_oinfo; - /** - * True if locking against this stripe got -EUSERS. - */ - int oo_contended; - unsigned long oo_contention_time; - /** - * used by the osc to keep track of what objects to build into rpcs. - * Protected by client_obd->cli_loi_list_lock. - */ - struct list_head oo_ready_item; - struct list_head oo_hp_ready_item; - struct list_head oo_write_item; - struct list_head oo_read_item; - - /** - * extent is a red black tree to manage (async) dirty pages. - */ - struct rb_root oo_root; - /** - * Manage write(dirty) extents. - */ - struct list_head oo_hp_exts; /* list of hp extents */ - struct list_head oo_urgent_exts; /* list of writeback extents */ - struct list_head oo_rpc_exts; - - struct list_head oo_reading_exts; - - atomic_t oo_nr_reads; - atomic_t oo_nr_writes; - - /** Protect extent tree. Will be used to protect - * oo_{read|write}_pages soon. - */ - spinlock_t oo_lock; - - /** - * Radix tree for caching pages - */ - struct radix_tree_root oo_tree; - spinlock_t oo_tree_lock; - unsigned long oo_npages; - - /* Protect osc_lock this osc_object has */ - spinlock_t oo_ol_spin; - struct list_head oo_ol_list; - - /** number of active IOs of this object */ - atomic_t oo_nr_ios; - wait_queue_head_t oo_io_waitq; -}; - -static inline void osc_object_lock(struct osc_object *obj) -{ - spin_lock(&obj->oo_lock); -} - -static inline int osc_object_trylock(struct osc_object *obj) -{ - return spin_trylock(&obj->oo_lock); -} - -static inline void osc_object_unlock(struct osc_object *obj) -{ - spin_unlock(&obj->oo_lock); -} - -static inline int osc_object_is_locked(struct osc_object *obj) -{ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - return spin_is_locked(&obj->oo_lock); -#else - /* - * It is not perfect to return true all the time. - * But since this function is only used for assertion - * and checking, it seems OK. - */ - return 1; -#endif -} - -/* - * Lock "micro-states" for osc layer. - */ -enum osc_lock_state { - OLS_NEW, - OLS_ENQUEUED, - OLS_UPCALL_RECEIVED, - OLS_GRANTED, - OLS_CANCELLED -}; - -/** - * osc-private state of cl_lock. - * - * Interaction with DLM. - * - * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in - * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock. - * - * This pointer is protected through a reference, acquired by - * osc_lock_upcall0(). Also, an additional reference is acquired by - * ldlm_lock_addref() call protecting the lock from cancellation, until - * osc_lock_unuse() releases it. - * - * Below is a description of how lock references are acquired and released - * inside of DLM. - * - * - When new lock is created and enqueued to the server (ldlm_cli_enqueue()) - * - ldlm_lock_create() - * - ldlm_lock_new(): initializes a lock with 2 references. One for - * the caller (released when reply from the server is received, or on - * error), and another for the hash table. - * - ldlm_lock_addref_internal(): protects the lock from cancellation. - * - * - When reply is received from the server (osc_enqueue_interpret()) - * - ldlm_cli_enqueue_fini() - * - LDLM_LOCK_PUT(): releases caller reference acquired by - * ldlm_lock_new(). - * - if (rc != 0) - * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue(). - * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue(). - * - * - When lock is being cancelled (ldlm_lock_cancel()) - * - ldlm_lock_destroy() - * - LDLM_LOCK_PUT(): releases hash-table reference acquired by - * ldlm_lock_new(). - * - * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called - * either when lock is cancelled (osc_lock_blocking()), or when locks is - * deleted without cancellation (e.g., from cl_locks_prune()). In the latter - * case ldlm lock remains in memory, and can be re-attached to osc_lock in the - * future. - */ -struct osc_lock { - struct cl_lock_slice ols_cl; - /** Internal lock to protect states, etc. */ - spinlock_t ols_lock; - /** Owner sleeps on this channel for state change */ - struct cl_sync_io *ols_owner; - /** waiting list for this lock to be cancelled */ - struct list_head ols_waiting_list; - /** wait entry of ols_waiting_list */ - struct list_head ols_wait_entry; - /** list entry for osc_object::oo_ol_list */ - struct list_head ols_nextlock_oscobj; - - /** underlying DLM lock */ - struct ldlm_lock *ols_dlmlock; - /** DLM flags with which osc_lock::ols_lock was enqueued */ - __u64 ols_flags; - /** osc_lock::ols_lock handle */ - struct lustre_handle ols_handle; - struct ldlm_enqueue_info ols_einfo; - enum osc_lock_state ols_state; - /** lock value block */ - struct ost_lvb ols_lvb; - - /** - * true, if ldlm_lock_addref() was called against - * osc_lock::ols_lock. This is used for sanity checking. - * - * \see osc_lock::ols_has_ref - */ - unsigned ols_hold :1, - /** - * this is much like osc_lock::ols_hold, except that this bit is - * cleared _after_ reference in released in osc_lock_unuse(). This - * fine distinction is needed because: - * - * - if ldlm lock still has a reference, osc_ast_data_get() needs - * to return associated cl_lock (so that a flag is needed that is - * cleared after ldlm_lock_decref() returned), and - * - * - ldlm_lock_decref() can invoke blocking ast (for a - * LDLM_FL_CBPENDING lock), and osc_lock functions like - * osc_lock_cancel() called from there need to know whether to - * release lock reference (so that a flag is needed that is - * cleared before ldlm_lock_decref() is called). - */ - ols_has_ref:1, - /** - * inherit the lockless attribute from top level cl_io. - * If true, osc_lock_enqueue is able to tolerate the -EUSERS error. - */ - ols_locklessable:1, - /** - * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat - * the EVAVAIL error as tolerable, this will make upper logic happy - * to wait all glimpse locks to each OSTs to be completed. - * Glimpse lock converts to normal lock if the server lock is - * granted. - * Glimpse lock should be destroyed immediately after use. - */ - ols_glimpse:1, - /** - * For async glimpse lock. - */ - ols_agl:1; -}; - -/** - * Page state private for osc layer. - */ -struct osc_page { - struct cl_page_slice ops_cl; - /** - * Page queues used by osc to detect when RPC can be formed. - */ - struct osc_async_page ops_oap; - /** - * An offset within page from which next transfer starts. This is used - * by cl_page_clip() to submit partial page transfers. - */ - int ops_from; - /** - * An offset within page at which next transfer ends. - * - * \see osc_page::ops_from. - */ - int ops_to; - /** - * Boolean, true iff page is under transfer. Used for sanity checking. - */ - unsigned ops_transfer_pinned:1, - /** - * in LRU? - */ - ops_in_lru:1, - /** - * Set if the page must be transferred with OBD_BRW_SRVLOCK. - */ - ops_srvlock:1; - /** - * lru page list. See osc_lru_{del|use}() in osc_page.c for usage. - */ - struct list_head ops_lru; - /** - * Submit time - the time when the page is starting RPC. For debugging. - */ - unsigned long ops_submit_time; -}; - -extern struct kmem_cache *osc_lock_kmem; -extern struct kmem_cache *osc_object_kmem; -extern struct kmem_cache *osc_thread_kmem; -extern struct kmem_cache *osc_session_kmem; -extern struct kmem_cache *osc_extent_kmem; - -extern struct lu_device_type osc_device_type; -extern struct lu_context_key osc_key; -extern struct lu_context_key osc_session_key; - -#define OSC_FLAGS (ASYNC_URGENT | ASYNC_READY) - -int osc_lock_init(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *io); -int osc_io_init(const struct lu_env *env, - struct cl_object *obj, struct cl_io *io); -struct lu_object *osc_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); -int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t ind); - -void osc_index2policy(union ldlm_policy_data *policy, - const struct cl_object *obj, - pgoff_t start, pgoff_t end); -int osc_lvb_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct ost_lvb *lvb); - -void osc_lru_add_batch(struct client_obd *cli, struct list_head *list); -void osc_page_submit(const struct lu_env *env, struct osc_page *opg, - enum cl_req_type crt, int brw_flags); -int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops); -int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg, - u32 async_flags); -int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops, - struct page *page, loff_t offset); -int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops); -int osc_page_cache_add(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); -int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj, - struct osc_page *ops); -int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops); -int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, - struct list_head *list, int cmd, int brw_flags); -int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj, - u64 size, struct osc_extent **extp); -void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext); -int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, - pgoff_t start, pgoff_t end, int hp, int discard); -int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj, - pgoff_t start, pgoff_t end); -void osc_io_unplug(const struct lu_env *env, struct client_obd *cli, - struct osc_object *osc); -int lru_queue_work(const struct lu_env *env, void *data); - -void osc_object_set_contended(struct osc_object *obj); -void osc_object_clear_contended(struct osc_object *obj); -int osc_object_is_contended(struct osc_object *obj); - -int osc_lock_is_lockless(const struct osc_lock *olck); - -/***************************************************************************** - * - * Accessors. - * - */ - -static inline struct osc_thread_info *osc_env_info(const struct lu_env *env) -{ - struct osc_thread_info *info; - - info = lu_context_key_get(&env->le_ctx, &osc_key); - LASSERT(info); - return info; -} - -static inline struct osc_session *osc_env_session(const struct lu_env *env) -{ - struct osc_session *ses; - - ses = lu_context_key_get(env->le_ses, &osc_session_key); - LASSERT(ses); - return ses; -} - -static inline struct osc_io *osc_env_io(const struct lu_env *env) -{ - return &osc_env_session(env)->os_io; -} - -static inline int osc_is_object(const struct lu_object *obj) -{ - return obj->lo_dev->ld_type == &osc_device_type; -} - -static inline struct osc_device *lu2osc_dev(const struct lu_device *d) -{ - LINVRNT(d->ld_type == &osc_device_type); - return container_of(d, struct osc_device, od_cl.cd_lu_dev); -} - -static inline struct obd_export *osc_export(const struct osc_object *obj) -{ - return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp; -} - -static inline struct client_obd *osc_cli(const struct osc_object *obj) -{ - return &osc_export(obj)->exp_obd->u.cli; -} - -static inline struct osc_object *cl2osc(const struct cl_object *obj) -{ - LINVRNT(osc_is_object(&obj->co_lu)); - return container_of(obj, struct osc_object, oo_cl); -} - -static inline struct cl_object *osc2cl(const struct osc_object *obj) -{ - return (struct cl_object *)&obj->oo_cl; -} - -static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode) -{ - LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP); - if (mode == CLM_READ) - return LCK_PR; - else if (mode == CLM_WRITE) - return LCK_PW; - else - return LCK_GROUP; -} - -static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode) -{ - LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP); - if (mode == LCK_PR) - return CLM_READ; - else if (mode == LCK_PW) - return CLM_WRITE; - else - return CLM_GROUP; -} - -static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice) -{ - LINVRNT(osc_is_object(&slice->cpl_obj->co_lu)); - return container_of(slice, struct osc_page, ops_cl); -} - -static inline struct osc_page *oap2osc(struct osc_async_page *oap) -{ - return container_of_safe(oap, struct osc_page, ops_oap); -} - -static inline pgoff_t osc_index(struct osc_page *opg) -{ - return opg->ops_cl.cpl_index; -} - -static inline struct cl_page *oap2cl_page(struct osc_async_page *oap) -{ - return oap2osc(oap)->ops_cl.cpl_page; -} - -static inline struct osc_page *oap2osc_page(struct osc_async_page *oap) -{ - return (struct osc_page *)container_of(oap, struct osc_page, ops_oap); -} - -static inline struct osc_page * -osc_cl_page_osc(struct cl_page *page, struct osc_object *osc) -{ - const struct cl_page_slice *slice; - - LASSERT(osc); - slice = cl_object_page_slice(&osc->oo_cl, page); - return cl2osc_page(slice); -} - -static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice) -{ - LINVRNT(osc_is_object(&slice->cls_obj->co_lu)); - return container_of(slice, struct osc_lock, ols_cl); -} - -static inline struct osc_lock *osc_lock_at(const struct cl_lock *lock) -{ - return cl2osc_lock(cl_lock_at(lock, &osc_device_type)); -} - -static inline int osc_io_srvlock(struct osc_io *oio) -{ - return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock); -} - -enum osc_extent_state { - OES_INV = 0, /** extent is just initialized or destroyed */ - OES_ACTIVE = 1, /** process is using this extent */ - OES_CACHE = 2, /** extent is ready for IO */ - OES_LOCKING = 3, /** locking page to prepare IO */ - OES_LOCK_DONE = 4, /** locking finished, ready to send */ - OES_RPC = 5, /** in RPC */ - OES_TRUNC = 6, /** being truncated */ - OES_STATE_MAX -}; - -/** - * osc_extent data to manage dirty pages. - * osc_extent has the following attributes: - * 1. all pages in the same must be in one RPC in write back; - * 2. # of pages must be less than max_pages_per_rpc - implied by 1; - * 3. must be covered by only 1 osc_lock; - * 4. exclusive. It's impossible to have overlapped osc_extent. - * - * The lifetime of an extent is from when the 1st page is dirtied to when - * all pages inside it are written out. - * - * LOCKING ORDER - * ============= - * page lock -> cl_loi_list_lock -> object lock(osc_object::oo_lock) - */ -struct osc_extent { - /** red-black tree node */ - struct rb_node oe_node; - /** osc_object of this extent */ - struct osc_object *oe_obj; - /** refcount, removed from red-black tree if reaches zero. */ - atomic_t oe_refc; - /** busy if non-zero */ - atomic_t oe_users; - /** link list of osc_object's oo_{hp|urgent|locking}_exts. */ - struct list_head oe_link; - /** state of this extent */ - enum osc_extent_state oe_state; - /** flags for this extent. */ - unsigned int oe_intree:1, - /** 0 is write, 1 is read */ - oe_rw:1, - /** sync extent, queued by osc_queue_sync_pages() */ - oe_sync:1, - /** set if this extent has partial, sync pages. - * Extents with partial page(s) can't merge with others in RPC - */ - oe_no_merge:1, - oe_srvlock:1, - oe_memalloc:1, - /** an ACTIVE extent is going to be truncated, so when this extent - * is released, it will turn into TRUNC state instead of CACHE. - */ - oe_trunc_pending:1, - /** this extent should be written asap and someone may wait for the - * write to finish. This bit is usually set along with urgent if - * the extent was CACHE state. - * fsync_wait extent can't be merged because new extent region may - * exceed fsync range. - */ - oe_fsync_wait:1, - /** covering lock is being canceled */ - oe_hp:1, - /** this extent should be written back asap. set if one of pages is - * called by page WB daemon, or sync write or reading requests. - */ - oe_urgent:1; - /** how many grants allocated for this extent. - * Grant allocated for this extent. There is no grant allocated - * for reading extents and sync write extents. - */ - unsigned int oe_grants; - /** # of dirty pages in this extent */ - unsigned int oe_nr_pages; - /** list of pending oap pages. Pages in this list are NOT sorted. */ - struct list_head oe_pages; - /** Since an extent has to be written out in atomic, this is used to - * remember the next page need to be locked to write this extent out. - * Not used right now. - */ - struct osc_page *oe_next_page; - /** start and end index of this extent, include start and end - * themselves. Page offset here is the page index of osc_pages. - * oe_start is used as keyword for red-black tree. - */ - pgoff_t oe_start; - pgoff_t oe_end; - /** maximum ending index of this extent, this is limited by - * max_pages_per_rpc, lock extent and chunk size. - */ - pgoff_t oe_max_end; - /** waitqueue - for those who want to be notified if this extent's - * state has changed. - */ - wait_queue_head_t oe_waitq; - /** lock covering this extent */ - struct ldlm_lock *oe_dlmlock; - /** terminator of this extent. Must be true if this extent is in IO. */ - struct task_struct *oe_owner; - /** return value of writeback. If somebody is waiting for this extent, - * this value can be known by outside world. - */ - int oe_rc; - /** max pages per rpc when this extent was created */ - unsigned int oe_mppr; -}; - -int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, - int sent, int rc); -void osc_extent_release(const struct lu_env *env, struct osc_extent *ext); - -int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc, - pgoff_t start, pgoff_t end, enum cl_lock_mode mode); - -typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *, - struct osc_page *, void *); -int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, - struct osc_object *osc, pgoff_t start, pgoff_t end, - osc_page_gang_cbt cb, void *cbdata); -/** @} osc */ - -#endif /* OSC_CL_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c deleted file mode 100644 index 2b5f324743e2..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_dev.c +++ /dev/null @@ -1,246 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_device, for OSC layer. - * - * Author: Nikita Danilov - */ - -#define DEBUG_SUBSYSTEM S_OSC - -/* class_name2obd() */ -#include - -#include "osc_cl_internal.h" - -/** \addtogroup osc - * @{ - */ - -struct kmem_cache *osc_lock_kmem; -struct kmem_cache *osc_object_kmem; -struct kmem_cache *osc_thread_kmem; -struct kmem_cache *osc_session_kmem; -struct kmem_cache *osc_extent_kmem; -struct kmem_cache *osc_quota_kmem; - -struct lu_kmem_descr osc_caches[] = { - { - .ckd_cache = &osc_lock_kmem, - .ckd_name = "osc_lock_kmem", - .ckd_size = sizeof(struct osc_lock) - }, - { - .ckd_cache = &osc_object_kmem, - .ckd_name = "osc_object_kmem", - .ckd_size = sizeof(struct osc_object) - }, - { - .ckd_cache = &osc_thread_kmem, - .ckd_name = "osc_thread_kmem", - .ckd_size = sizeof(struct osc_thread_info) - }, - { - .ckd_cache = &osc_session_kmem, - .ckd_name = "osc_session_kmem", - .ckd_size = sizeof(struct osc_session) - }, - { - .ckd_cache = &osc_extent_kmem, - .ckd_name = "osc_extent_kmem", - .ckd_size = sizeof(struct osc_extent) - }, - { - .ckd_cache = &osc_quota_kmem, - .ckd_name = "osc_quota_kmem", - .ckd_size = sizeof(struct osc_quota_info) - }, - { - .ckd_cache = NULL - } -}; - -/***************************************************************************** - * - * Type conversions. - * - */ - -static struct lu_device *osc2lu_dev(struct osc_device *osc) -{ - return &osc->od_cl.cd_lu_dev; -} - -/***************************************************************************** - * - * Osc device and device type functions. - * - */ - -static void *osc_key_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct osc_thread_info *info; - - info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS); - if (!info) - info = ERR_PTR(-ENOMEM); - return info; -} - -static void osc_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct osc_thread_info *info = data; - - kmem_cache_free(osc_thread_kmem, info); -} - -struct lu_context_key osc_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = osc_key_init, - .lct_fini = osc_key_fini -}; - -static void *osc_session_init(const struct lu_context *ctx, - struct lu_context_key *key) -{ - struct osc_session *info; - - info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS); - if (!info) - info = ERR_PTR(-ENOMEM); - return info; -} - -static void osc_session_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) -{ - struct osc_session *info = data; - - kmem_cache_free(osc_session_kmem, info); -} - -struct lu_context_key osc_session_key = { - .lct_tags = LCT_SESSION, - .lct_init = osc_session_init, - .lct_fini = osc_session_fini -}; - -/* type constructor/destructor: osc_type_{init,fini,start,stop}(). */ -LU_TYPE_INIT_FINI(osc, &osc_key, &osc_session_key); - -static int osc_cl_process_config(const struct lu_env *env, - struct lu_device *d, struct lustre_cfg *cfg) -{ - return osc_process_config_base(d->ld_obd, cfg); -} - -static const struct lu_device_operations osc_lu_ops = { - .ldo_object_alloc = osc_object_alloc, - .ldo_process_config = osc_cl_process_config, - .ldo_recovery_complete = NULL -}; - -static int osc_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) -{ - return 0; -} - -static struct lu_device *osc_device_fini(const struct lu_env *env, - struct lu_device *d) -{ - return NULL; -} - -static struct lu_device *osc_device_free(const struct lu_env *env, - struct lu_device *d) -{ - struct osc_device *od = lu2osc_dev(d); - - cl_device_fini(lu2cl_dev(d)); - kfree(od); - return NULL; -} - -static struct lu_device *osc_device_alloc(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *cfg) -{ - struct lu_device *d; - struct osc_device *od; - struct obd_device *obd; - int rc; - - od = kzalloc(sizeof(*od), GFP_NOFS); - if (!od) - return ERR_PTR(-ENOMEM); - - cl_device_init(&od->od_cl, t); - d = osc2lu_dev(od); - d->ld_ops = &osc_lu_ops; - - /* Setup OSC OBD */ - obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd); - rc = osc_setup(obd, cfg); - if (rc) { - osc_device_free(env, d); - return ERR_PTR(rc); - } - od->od_exp = obd->obd_self_export; - return d; -} - -static const struct lu_device_type_operations osc_device_type_ops = { - .ldto_init = osc_type_init, - .ldto_fini = osc_type_fini, - - .ldto_start = osc_type_start, - .ldto_stop = osc_type_stop, - - .ldto_device_alloc = osc_device_alloc, - .ldto_device_free = osc_device_free, - - .ldto_device_init = osc_device_init, - .ldto_device_fini = osc_device_fini -}; - -struct lu_device_type osc_device_type = { - .ldt_tags = LU_DEVICE_CL, - .ldt_name = LUSTRE_OSC_NAME, - .ldt_ops = &osc_device_type_ops, - .ldt_ctx_tags = LCT_CL_THREAD -}; - -/** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h deleted file mode 100644 index 4ddba1354bef..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_internal.h +++ /dev/null @@ -1,237 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef OSC_INTERNAL_H -#define OSC_INTERNAL_H - -#define OAP_MAGIC 8675309 - -extern atomic_t osc_pool_req_count; -extern unsigned int osc_reqpool_maxreqcount; -extern struct ptlrpc_request_pool *osc_rq_pool; - -struct lu_env; - -enum async_flags { - ASYNC_READY = 0x1, /* ap_make_ready will not be called before this - * page is added to an rpc - */ - ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */ - ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called - * to give the caller a chance to update - * or cancel the size of the io - */ - ASYNC_HP = 0x10, -}; - -struct osc_async_page { - int oap_magic; - unsigned short oap_cmd; - unsigned short oap_interrupted:1; - - struct list_head oap_pending_item; - struct list_head oap_rpc_item; - - u64 oap_obj_off; - unsigned int oap_page_off; - enum async_flags oap_async_flags; - - struct brw_page oap_brw_page; - - struct ptlrpc_request *oap_request; - struct client_obd *oap_cli; - struct osc_object *oap_obj; - - spinlock_t oap_lock; -}; - -#define oap_page oap_brw_page.pg -#define oap_count oap_brw_page.count -#define oap_brw_flags oap_brw_page.flag - -static inline struct osc_async_page *brw_page2oap(struct brw_page *pga) -{ - return (struct osc_async_page *)container_of(pga, struct osc_async_page, - oap_brw_page); -} - -struct osc_cache_waiter { - struct list_head ocw_entry; - wait_queue_head_t ocw_waitq; - struct osc_async_page *ocw_oap; - int ocw_grant; - int ocw_rc; -}; - -void osc_wake_cache_waiters(struct client_obd *cli); -int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes); -void osc_update_next_shrink(struct client_obd *cli); - -/* - * cl integration. - */ -#include - -extern struct ptlrpc_request_set *PTLRPCD_SET; - -typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh, - int rc); - -int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, - __u64 *flags, union ldlm_policy_data *policy, - struct ost_lvb *lvb, int kms_valid, - osc_enqueue_upcall_f upcall, - void *cookie, struct ldlm_enqueue_info *einfo, - struct ptlrpc_request_set *rqset, int async, int agl); - -int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, - enum ldlm_type type, union ldlm_policy_data *policy, - enum ldlm_mode mode, __u64 *flags, void *data, - struct lustre_handle *lockh, int unref); - -int osc_setattr_async(struct obd_export *exp, struct obdo *oa, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset); -int osc_punch_base(struct obd_export *exp, struct obdo *oa, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset); -int osc_sync_base(struct osc_object *exp, struct obdo *oa, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset); - -int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg); -int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, - struct list_head *ext_list, int cmd); -long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, - long target, bool force); -unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages); -void osc_lru_unreserve(struct client_obd *cli, unsigned long npages); - -unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock); - -int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg); - -void lproc_osc_attach_seqstat(struct obd_device *dev); -void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars); - -extern struct lu_device_type osc_device_type; - -static inline int osc_recoverable_error(int rc) -{ - return (rc == -EIO || rc == -EROFS || rc == -ENOMEM || - rc == -EAGAIN || rc == -EINPROGRESS); -} - -static inline unsigned long rpcs_in_flight(struct client_obd *cli) -{ - return cli->cl_r_in_flight + cli->cl_w_in_flight; -} - -static inline char *cli_name(struct client_obd *cli) -{ - return cli->cl_import->imp_obd->obd_name; -} - -struct osc_device { - struct cl_device od_cl; - struct obd_export *od_exp; - - /* Write stats is actually protected by client_obd's lock. */ - struct osc_stats { - u64 os_lockless_writes; /* by bytes */ - u64 os_lockless_reads; /* by bytes */ - u64 os_lockless_truncates; /* by times */ - } od_stats; - - /* configuration item(s) */ - int od_contention_time; - int od_lockless_truncate; -}; - -static inline struct osc_device *obd2osc_dev(const struct obd_device *d) -{ - return container_of_safe(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev); -} - -extern struct lu_kmem_descr osc_caches[]; - -extern struct kmem_cache *osc_quota_kmem; -struct osc_quota_info { - /** linkage for quota hash table */ - struct rhash_head oqi_hash; - u32 oqi_id; - struct rcu_head rcu; -}; - -int osc_quota_setup(struct obd_device *obd); -int osc_quota_cleanup(struct obd_device *obd); -int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], - u32 valid, u32 flags); -int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]); -int osc_quotactl(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl); -void osc_inc_unstable_pages(struct ptlrpc_request *req); -void osc_dec_unstable_pages(struct ptlrpc_request *req); -bool osc_over_unstable_soft_limit(struct client_obd *cli); - -/** - * Bit flags for osc_dlm_lock_at_pageoff(). - */ -enum osc_dap_flags { - /** - * Just check if the desired lock exists, it won't hold reference - * count on lock. - */ - OSC_DAP_FL_TEST_LOCK = BIT(0), - /** - * Return the lock even if it is being canceled. - */ - OSC_DAP_FL_CANCELING = BIT(1), -}; - -struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, - struct osc_object *obj, pgoff_t index, - enum osc_dap_flags flags); - -int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc); - -/** osc shrink list to link all osc client obd */ -extern struct list_head osc_shrink_list; -/** spin lock to protect osc_shrink_list */ -extern spinlock_t osc_shrink_lock; -unsigned long osc_cache_shrink_count(struct shrinker *sk, - struct shrink_control *sc); -unsigned long osc_cache_shrink_scan(struct shrinker *sk, - struct shrink_control *sc); - -#endif /* OSC_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c deleted file mode 100644 index 67734a8ed331..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_io.c +++ /dev/null @@ -1,918 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_io for OSC layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_OSC - -#include - -#include "osc_cl_internal.h" - -/** \addtogroup osc - * @{ - */ - -/***************************************************************************** - * - * Type conversions. - * - */ - -static struct osc_io *cl2osc_io(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct osc_io *oio = container_of_safe(slice, struct osc_io, oi_cl); - - LINVRNT(oio == osc_env_io(env)); - return oio; -} - -/***************************************************************************** - * - * io operations. - * - */ - -static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io) -{ -} - -static void osc_read_ahead_release(const struct lu_env *env, void *cbdata) -{ - struct ldlm_lock *dlmlock = cbdata; - struct lustre_handle lockh; - - ldlm_lock2handle(dlmlock, &lockh); - ldlm_lock_decref(&lockh, LCK_PR); - LDLM_LOCK_PUT(dlmlock); -} - -static int osc_io_read_ahead(const struct lu_env *env, - const struct cl_io_slice *ios, - pgoff_t start, struct cl_read_ahead *ra) -{ - struct osc_object *osc = cl2osc(ios->cis_obj); - struct ldlm_lock *dlmlock; - int result = -ENODATA; - - dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0); - if (dlmlock) { - LASSERT(dlmlock->l_ast_data == osc); - if (dlmlock->l_req_mode != LCK_PR) { - struct lustre_handle lockh; - - ldlm_lock2handle(dlmlock, &lockh); - ldlm_lock_addref(&lockh, LCK_PR); - ldlm_lock_decref(&lockh, dlmlock->l_req_mode); - } - - ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc; - ra->cra_end = cl_index(osc2cl(osc), - dlmlock->l_policy_data.l_extent.end); - ra->cra_release = osc_read_ahead_release; - ra->cra_cbdata = dlmlock; - result = 0; - } - - return result; -} - -/** - * An implementation of cl_io_operations::cio_io_submit() method for osc - * layer. Iterates over pages in the in-queue, prepares each for io by calling - * cl_page_prep() and then either submits them through osc_io_submit_page() - * or, if page is already submitted, changes osc flags through - * osc_set_async_flags(). - */ -static int osc_io_submit(const struct lu_env *env, - const struct cl_io_slice *ios, - enum cl_req_type crt, struct cl_2queue *queue) -{ - struct cl_page *page; - struct cl_page *tmp; - struct client_obd *cli = NULL; - struct osc_object *osc = NULL; /* to keep gcc happy */ - struct osc_page *opg; - struct cl_io *io; - LIST_HEAD(list); - - struct cl_page_list *qin = &queue->c2_qin; - struct cl_page_list *qout = &queue->c2_qout; - unsigned int queued = 0; - int result = 0; - int cmd; - int brw_flags; - unsigned int max_pages; - - LASSERT(qin->pl_nr > 0); - - CDEBUG(D_CACHE | D_READA, "%d %d\n", qin->pl_nr, crt); - - osc = cl2osc(ios->cis_obj); - cli = osc_cli(osc); - max_pages = cli->cl_max_pages_per_rpc; - - cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; - brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0; - - /* - * NOTE: here @page is a top-level page. This is done to avoid - * creation of sub-page-list. - */ - cl_page_list_for_each_safe(page, tmp, qin) { - struct osc_async_page *oap; - - /* Top level IO. */ - io = page->cp_owner; - LASSERT(io); - - opg = osc_cl_page_osc(page, osc); - oap = &opg->ops_oap; - LASSERT(osc == oap->oap_obj); - - if (!list_empty(&oap->oap_pending_item) || - !list_empty(&oap->oap_rpc_item)) { - CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n", - oap, opg); - result = -EBUSY; - break; - } - - result = cl_page_prep(env, io, page, crt); - if (result != 0) { - LASSERT(result < 0); - if (result != -EALREADY) - break; - /* - * Handle -EALREADY error: for read case, the page is - * already in UPTODATE state; for write, the page - * is not dirty. - */ - result = 0; - continue; - } - - spin_lock(&oap->oap_lock); - oap->oap_async_flags = ASYNC_URGENT | ASYNC_READY; - oap->oap_async_flags |= ASYNC_COUNT_STABLE; - spin_unlock(&oap->oap_lock); - - osc_page_submit(env, opg, crt, brw_flags); - list_add_tail(&oap->oap_pending_item, &list); - - if (page->cp_sync_io) - cl_page_list_move(qout, qin, page); - else /* async IO */ - cl_page_list_del(env, qin, page); - - if (++queued == max_pages) { - queued = 0; - result = osc_queue_sync_pages(env, osc, &list, cmd, - brw_flags); - if (result < 0) - break; - } - } - - if (queued > 0) - result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags); - - /* Update c/mtime for sync write. LU-7310 */ - if (qout->pl_nr > 0 && !result) { - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - struct cl_object *obj = ios->cis_obj; - - cl_object_attr_lock(obj); - attr->cat_mtime = ktime_get_real_seconds(); - attr->cat_ctime = attr->cat_mtime; - cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME); - cl_object_attr_unlock(obj); - } - - CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result); - return qout->pl_nr > 0 ? 0 : result; -} - -/** - * This is called when a page is accessed within file in a way that creates - * new page, if one were missing (i.e., if there were a hole at that place in - * the file, or accessed page is beyond the current file size). - * - * Expand stripe KMS if necessary. - */ -static void osc_page_touch_at(const struct lu_env *env, - struct cl_object *obj, pgoff_t idx, size_t to) -{ - struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - int valid; - __u64 kms; - - /* offset within stripe */ - kms = cl_offset(obj, idx) + to; - - cl_object_attr_lock(obj); - /* - * XXX old code used - * - * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm); - * - * here - */ - CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n", - kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, - loi->loi_lvb.lvb_size); - - attr->cat_ctime = ktime_get_real_seconds(); - attr->cat_mtime = attr->cat_ctime; - valid = CAT_MTIME | CAT_CTIME; - if (kms > loi->loi_kms) { - attr->cat_kms = kms; - valid |= CAT_KMS; - } - if (kms > loi->loi_lvb.lvb_size) { - attr->cat_size = kms; - valid |= CAT_SIZE; - } - cl_object_attr_update(env, obj, attr, valid); - cl_object_attr_unlock(obj); -} - -static int osc_io_commit_async(const struct lu_env *env, - const struct cl_io_slice *ios, - struct cl_page_list *qin, int from, int to, - cl_commit_cbt cb) -{ - struct cl_io *io = ios->cis_io; - struct osc_io *oio = cl2osc_io(env, ios); - struct osc_object *osc = cl2osc(ios->cis_obj); - struct cl_page *page; - struct cl_page *last_page; - struct osc_page *opg; - int result = 0; - - LASSERT(qin->pl_nr > 0); - - /* Handle partial page cases */ - last_page = cl_page_list_last(qin); - if (oio->oi_lockless) { - page = cl_page_list_first(qin); - if (page == last_page) { - cl_page_clip(env, page, from, to); - } else { - if (from != 0) - cl_page_clip(env, page, from, PAGE_SIZE); - if (to != PAGE_SIZE) - cl_page_clip(env, last_page, 0, to); - } - } - - while (qin->pl_nr > 0) { - struct osc_async_page *oap; - - page = cl_page_list_first(qin); - opg = osc_cl_page_osc(page, osc); - oap = &opg->ops_oap; - - if (!list_empty(&oap->oap_rpc_item)) { - CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n", - oap, opg); - result = -EBUSY; - break; - } - - /* The page may be already in dirty cache. */ - if (list_empty(&oap->oap_pending_item)) { - result = osc_page_cache_add(env, &opg->ops_cl, io); - if (result != 0) - break; - } - - osc_page_touch_at(env, osc2cl(osc), osc_index(opg), - page == last_page ? to : PAGE_SIZE); - - cl_page_list_del(env, qin, page); - - (*cb)(env, io, page); - /* Can't access page any more. Page can be in transfer and - * complete at any time. - */ - } - - /* for sync write, kernel will wait for this page to be flushed before - * osc_io_end() is called, so release it earlier. - * for mkwrite(), it's known there is no further pages. - */ - if (cl_io_is_sync_write(io) && oio->oi_active) { - osc_extent_release(env, oio->oi_active); - oio->oi_active = NULL; - } - - CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result); - return result; -} - -static int osc_io_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct osc_object *osc = cl2osc(ios->cis_obj); - struct obd_import *imp = osc_cli(osc)->cl_import; - int rc = -EIO; - - spin_lock(&imp->imp_lock); - if (likely(!imp->imp_invalid)) { - struct osc_io *oio = osc_env_io(env); - - atomic_inc(&osc->oo_nr_ios); - oio->oi_is_active = 1; - rc = 0; - } - spin_unlock(&imp->imp_lock); - - return rc; -} - -static int osc_io_write_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io = ios->cis_io; - struct osc_io *oio = osc_env_io(env); - struct osc_object *osc = cl2osc(ios->cis_obj); - unsigned long npages; - - if (cl_io_is_append(io)) - return osc_io_iter_init(env, ios); - - npages = io->u.ci_rw.crw_count >> PAGE_SHIFT; - if (io->u.ci_rw.crw_pos & ~PAGE_MASK) - ++npages; - - oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages); - - return osc_io_iter_init(env, ios); -} - -static void osc_io_iter_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct osc_io *oio = osc_env_io(env); - - if (oio->oi_is_active) { - struct osc_object *osc = cl2osc(ios->cis_obj); - - oio->oi_is_active = 0; - LASSERT(atomic_read(&osc->oo_nr_ios) > 0); - if (atomic_dec_and_test(&osc->oo_nr_ios)) - wake_up_all(&osc->oo_io_waitq); - } -} - -static void osc_io_write_iter_fini(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct osc_io *oio = osc_env_io(env); - struct osc_object *osc = cl2osc(ios->cis_obj); - - if (oio->oi_lru_reserved > 0) { - osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved); - oio->oi_lru_reserved = 0; - } - oio->oi_write_osclock = NULL; - - osc_io_iter_fini(env, ios); -} - -static int osc_io_fault_start(const struct lu_env *env, - const struct cl_io_slice *ios) -{ - struct cl_io *io; - struct cl_fault_io *fio; - - io = ios->cis_io; - fio = &io->u.ci_fault; - CDEBUG(D_INFO, "%lu %d %zu\n", - fio->ft_index, fio->ft_writable, fio->ft_nob); - /* - * If mapping is writeable, adjust kms to cover this page, - * but do not extend kms beyond actual file size. - * See bug 10919. - */ - if (fio->ft_writable) - osc_page_touch_at(env, ios->cis_obj, - fio->ft_index, fio->ft_nob); - return 0; -} - -static int osc_async_upcall(void *a, int rc) -{ - struct osc_async_cbargs *args = a; - - args->opc_rc = rc; - complete(&args->opc_sync); - return 0; -} - -/** - * Checks that there are no pages being written in the extent being truncated. - */ -static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops, void *cbdata) -{ - struct cl_page *page = ops->ops_cl.cpl_page; - struct osc_async_page *oap; - __u64 start = *(__u64 *)cbdata; - - oap = &ops->ops_oap; - if (oap->oap_cmd & OBD_BRW_WRITE && - !list_empty(&oap->oap_pending_item)) - CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", - start, current->comm); - - if (PageLocked(page->cp_vmpage)) - CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n", - ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK); - - return CLP_GANG_OKAY; -} - -static void osc_trunc_check(const struct lu_env *env, struct cl_io *io, - struct osc_io *oio, __u64 size) -{ - struct cl_object *clob; - int partial; - pgoff_t start; - - clob = oio->oi_cl.cis_obj; - start = cl_index(clob, size); - partial = cl_offset(clob, start) < size; - - /* - * Complain if there are pages in the truncated region. - */ - osc_page_gang_lookup(env, io, cl2osc(clob), - start + partial, CL_PAGE_EOF, - trunc_check_cb, (void *)&size); -} - -static int osc_io_setattr_start(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_io *io = slice->cis_io; - struct osc_io *oio = cl2osc_io(env, slice); - struct cl_object *obj = slice->cis_obj; - struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - struct obdo *oa = &oio->oi_oa; - struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - __u64 size = io->u.ci_setattr.sa_attr.lvb_size; - unsigned int ia_valid = io->u.ci_setattr.sa_valid; - int result = 0; - - /* truncate cache dirty pages first */ - if (cl_io_is_trunc(io)) - result = osc_cache_truncate_start(env, cl2osc(obj), size, - &oio->oi_trunc); - - if (result == 0 && oio->oi_lockless == 0) { - cl_object_attr_lock(obj); - result = cl_object_attr_get(env, obj, attr); - if (result == 0) { - struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr; - unsigned int cl_valid = 0; - - if (ia_valid & ATTR_SIZE) { - attr->cat_size = size; - attr->cat_kms = size; - cl_valid = CAT_SIZE | CAT_KMS; - } - if (ia_valid & ATTR_MTIME_SET) { - attr->cat_mtime = lvb->lvb_mtime; - cl_valid |= CAT_MTIME; - } - if (ia_valid & ATTR_ATIME_SET) { - attr->cat_atime = lvb->lvb_atime; - cl_valid |= CAT_ATIME; - } - if (ia_valid & ATTR_CTIME_SET) { - attr->cat_ctime = lvb->lvb_ctime; - cl_valid |= CAT_CTIME; - } - result = cl_object_attr_update(env, obj, attr, - cl_valid); - } - cl_object_attr_unlock(obj); - } - memset(oa, 0, sizeof(*oa)); - if (result == 0) { - oa->o_oi = loi->loi_oi; - obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid); - oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index; - oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP; - if (ia_valid & ATTR_CTIME) { - oa->o_valid |= OBD_MD_FLCTIME; - oa->o_ctime = attr->cat_ctime; - } - if (ia_valid & ATTR_ATIME) { - oa->o_valid |= OBD_MD_FLATIME; - oa->o_atime = attr->cat_atime; - } - if (ia_valid & ATTR_MTIME) { - oa->o_valid |= OBD_MD_FLMTIME; - oa->o_mtime = attr->cat_mtime; - } - if (ia_valid & ATTR_SIZE) { - oa->o_size = size; - oa->o_blocks = OBD_OBJECT_EOF; - oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; - - if (oio->oi_lockless) { - oa->o_flags = OBD_FL_SRVLOCK; - oa->o_valid |= OBD_MD_FLFLAGS; - } - } else { - LASSERT(oio->oi_lockless == 0); - } - if (ia_valid & ATTR_ATTR_FLAG) { - oa->o_flags = io->u.ci_setattr.sa_attr_flags; - oa->o_valid |= OBD_MD_FLFLAGS; - } - - init_completion(&cbargs->opc_sync); - - if (ia_valid & ATTR_SIZE) - result = osc_punch_base(osc_export(cl2osc(obj)), - oa, osc_async_upcall, - cbargs, PTLRPCD_SET); - else - result = osc_setattr_async(osc_export(cl2osc(obj)), - oa, osc_async_upcall, - cbargs, PTLRPCD_SET); - cbargs->opc_rpc_sent = result == 0; - } - return result; -} - -static void osc_io_setattr_end(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_io *io = slice->cis_io; - struct osc_io *oio = cl2osc_io(env, slice); - struct cl_object *obj = slice->cis_obj; - struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - int result = 0; - - if (cbargs->opc_rpc_sent) { - wait_for_completion(&cbargs->opc_sync); - result = cbargs->opc_rc; - io->ci_result = cbargs->opc_rc; - } - if (result == 0) { - if (oio->oi_lockless) { - /* lockless truncate */ - struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev); - - LASSERT(cl_io_is_trunc(io)); - /* XXX: Need a lock. */ - osd->od_stats.os_lockless_truncates++; - } - } - - if (cl_io_is_trunc(io)) { - __u64 size = io->u.ci_setattr.sa_attr.lvb_size; - - osc_trunc_check(env, io, oio, size); - osc_cache_truncate_end(env, oio->oi_trunc); - oio->oi_trunc = NULL; - } -} - -struct osc_data_version_args { - struct osc_io *dva_oio; -}; - -static int -osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req, - void *arg, int rc) -{ - struct osc_data_version_args *dva = arg; - struct osc_io *oio = dva->dva_oio; - const struct ost_body *body; - - if (rc < 0) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - rc = -EPROTO; - goto out; - } - - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa, - &body->oa); -out: - oio->oi_cbarg.opc_rc = rc; - complete(&oio->oi_cbarg.opc_sync); - - return 0; -} - -static int osc_io_data_version_start(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; - struct osc_io *oio = cl2osc_io(env, slice); - struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - struct osc_object *obj = cl2osc(slice->cis_obj); - struct obd_export *exp = osc_export(obj); - struct lov_oinfo *loi = obj->oo_oinfo; - struct osc_data_version_args *dva; - struct obdo *oa = &oio->oi_oa; - struct ptlrpc_request *req; - struct ost_body *body; - int rc; - - memset(oa, 0, sizeof(*oa)); - oa->o_oi = loi->loi_oi; - oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; - - if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) { - oa->o_valid |= OBD_MD_FLFLAGS; - oa->o_flags |= OBD_FL_SRVLOCK; - if (dv->dv_flags & LL_DV_WR_FLUSH) - oa->o_flags |= OBD_FL_FLUSH; - } - - init_completion(&cbargs->opc_sync); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); - if (rc < 0) { - ptlrpc_request_free(req); - return rc; - } - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); - - ptlrpc_request_set_replen(req); - req->rq_interpret_reply = osc_data_version_interpret; - BUILD_BUG_ON(sizeof(*dva) > sizeof(req->rq_async_args)); - dva = ptlrpc_req_async_args(req); - dva->dva_oio = oio; - - ptlrpcd_add_req(req); - - return 0; -} - -static void osc_io_data_version_end(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; - struct osc_io *oio = cl2osc_io(env, slice); - struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - - wait_for_completion(&cbargs->opc_sync); - - if (cbargs->opc_rc) { - slice->cis_io->ci_result = cbargs->opc_rc; - } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) { - slice->cis_io->ci_result = -EOPNOTSUPP; - } else { - dv->dv_data_version = oio->oi_oa.o_data_version; - slice->cis_io->ci_result = 0; - } -} - -static int osc_io_read_start(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_object *obj = slice->cis_obj; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - int rc = 0; - - if (!slice->cis_io->ci_noatime) { - cl_object_attr_lock(obj); - attr->cat_atime = ktime_get_real_seconds(); - rc = cl_object_attr_update(env, obj, attr, CAT_ATIME); - cl_object_attr_unlock(obj); - } - return rc; -} - -static int osc_io_write_start(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_object *obj = slice->cis_obj; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - int rc = 0; - - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1); - cl_object_attr_lock(obj); - attr->cat_ctime = ktime_get_real_seconds(); - attr->cat_mtime = attr->cat_ctime; - rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME); - cl_object_attr_unlock(obj); - - return rc; -} - -static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj, - struct cl_fsync_io *fio) -{ - struct osc_io *oio = osc_env_io(env); - struct obdo *oa = &oio->oi_oa; - struct lov_oinfo *loi = obj->oo_oinfo; - struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - int rc = 0; - - memset(oa, 0, sizeof(*oa)); - oa->o_oi = loi->loi_oi; - oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; - - /* reload size abd blocks for start and end of sync range */ - oa->o_size = fio->fi_start; - oa->o_blocks = fio->fi_end; - oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; - - obdo_set_parent_fid(oa, fio->fi_fid); - - init_completion(&cbargs->opc_sync); - - rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET); - return rc; -} - -static int osc_io_fsync_start(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_io *io = slice->cis_io; - struct cl_fsync_io *fio = &io->u.ci_fsync; - struct cl_object *obj = slice->cis_obj; - struct osc_object *osc = cl2osc(obj); - pgoff_t start = cl_index(obj, fio->fi_start); - pgoff_t end = cl_index(obj, fio->fi_end); - int result = 0; - - if (fio->fi_end == OBD_OBJECT_EOF) - end = CL_PAGE_EOF; - - result = osc_cache_writeback_range(env, osc, start, end, 0, - fio->fi_mode == CL_FSYNC_DISCARD); - if (result > 0) { - fio->fi_nr_written += result; - result = 0; - } - if (fio->fi_mode == CL_FSYNC_ALL) { - int rc; - - /* we have to wait for writeback to finish before we can - * send OST_SYNC RPC. This is bad because it causes extents - * to be written osc by osc. However, we usually start - * writeback before CL_FSYNC_ALL so this won't have any real - * problem. - */ - rc = osc_cache_wait_range(env, osc, start, end); - if (result == 0) - result = rc; - rc = osc_fsync_ost(env, osc, fio); - if (result == 0) - result = rc; - } - - return result; -} - -static void osc_io_fsync_end(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync; - struct cl_object *obj = slice->cis_obj; - pgoff_t start = cl_index(obj, fio->fi_start); - pgoff_t end = cl_index(obj, fio->fi_end); - int result = 0; - - if (fio->fi_mode == CL_FSYNC_LOCAL) { - result = osc_cache_wait_range(env, cl2osc(obj), start, end); - } else if (fio->fi_mode == CL_FSYNC_ALL) { - struct osc_io *oio = cl2osc_io(env, slice); - struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - - wait_for_completion(&cbargs->opc_sync); - if (result == 0) - result = cbargs->opc_rc; - } - slice->cis_io->ci_result = result; -} - -static void osc_io_end(const struct lu_env *env, - const struct cl_io_slice *slice) -{ - struct osc_io *oio = cl2osc_io(env, slice); - - if (oio->oi_active) { - osc_extent_release(env, oio->oi_active); - oio->oi_active = NULL; - } -} - -static const struct cl_io_operations osc_io_ops = { - .op = { - [CIT_READ] = { - .cio_iter_init = osc_io_iter_init, - .cio_iter_fini = osc_io_iter_fini, - .cio_start = osc_io_read_start, - .cio_fini = osc_io_fini - }, - [CIT_WRITE] = { - .cio_iter_init = osc_io_write_iter_init, - .cio_iter_fini = osc_io_write_iter_fini, - .cio_start = osc_io_write_start, - .cio_end = osc_io_end, - .cio_fini = osc_io_fini - }, - [CIT_SETATTR] = { - .cio_iter_init = osc_io_iter_init, - .cio_iter_fini = osc_io_iter_fini, - .cio_start = osc_io_setattr_start, - .cio_end = osc_io_setattr_end - }, - [CIT_DATA_VERSION] = { - .cio_start = osc_io_data_version_start, - .cio_end = osc_io_data_version_end, - }, - [CIT_FAULT] = { - .cio_iter_init = osc_io_iter_init, - .cio_iter_fini = osc_io_iter_fini, - .cio_start = osc_io_fault_start, - .cio_end = osc_io_end, - .cio_fini = osc_io_fini - }, - [CIT_FSYNC] = { - .cio_start = osc_io_fsync_start, - .cio_end = osc_io_fsync_end, - .cio_fini = osc_io_fini - }, - [CIT_MISC] = { - .cio_fini = osc_io_fini - } - }, - .cio_read_ahead = osc_io_read_ahead, - .cio_submit = osc_io_submit, - .cio_commit_async = osc_io_commit_async -}; - -/***************************************************************************** - * - * Transfer operations. - * - */ - -int osc_io_init(const struct lu_env *env, - struct cl_object *obj, struct cl_io *io) -{ - struct osc_io *oio = osc_env_io(env); - - CL_IO_SLICE_CLEAN(oio, oi_cl); - cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops); - return 0; -} - -/** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c deleted file mode 100644 index d93d33dc8dc4..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ /dev/null @@ -1,1230 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_lock for OSC layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_OSC - -/* fid_build_reg_res_name() */ -#include - -#include "osc_cl_internal.h" - -/** \addtogroup osc - * @{ - */ - -/***************************************************************************** - * - * Type conversions. - * - */ - -static const struct cl_lock_operations osc_lock_ops; -static const struct cl_lock_operations osc_lock_lockless_ops; -static void osc_lock_to_lockless(const struct lu_env *env, - struct osc_lock *ols, int force); - -int osc_lock_is_lockless(const struct osc_lock *olck) -{ - return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops); -} - -/** - * Returns a weak pointer to the ldlm lock identified by a handle. Returned - * pointer cannot be dereferenced, as lock is not protected from concurrent - * reclaim. This function is a helper for osc_lock_invariant(). - */ -static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) -{ - struct ldlm_lock *lock; - - lock = ldlm_handle2lock(handle); - if (lock) - LDLM_LOCK_PUT(lock); - return lock; -} - -/** - * Invariant that has to be true all of the time. - */ -static int osc_lock_invariant(struct osc_lock *ols) -{ - struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); - struct ldlm_lock *olock = ols->ols_dlmlock; - int handle_used = lustre_handle_is_used(&ols->ols_handle); - - if (ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && !ols->ols_dlmlock)) - return 1; - - /* - * If all the following "ergo"s are true, return 1, otherwise 0 - */ - if (!ergo(olock, handle_used)) - return 0; - - if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie)) - return 0; - - if (!ergo(handle_used, - ergo(lock && olock, lock == olock) && - ergo(!lock, !olock))) - return 0; - /* - * Check that ->ols_handle and ->ols_dlmlock are consistent, but - * take into account that they are set at the different time. - */ - if (!ergo(ols->ols_state == OLS_CANCELLED, - !olock && !handle_used)) - return 0; - /* - * DLM lock is destroyed only after we have seen cancellation - * ast. - */ - if (!ergo(olock && ols->ols_state < OLS_CANCELLED, - !ldlm_is_destroyed(olock))) - return 0; - - if (!ergo(ols->ols_state == OLS_GRANTED, - olock && olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)) - return 0; - return 1; -} - -/***************************************************************************** - * - * Lock operations. - * - */ - -static void osc_lock_fini(const struct lu_env *env, - struct cl_lock_slice *slice) -{ - struct osc_lock *ols = cl2osc_lock(slice); - - LINVRNT(osc_lock_invariant(ols)); - LASSERT(!ols->ols_dlmlock); - - kmem_cache_free(osc_lock_kmem, ols); -} - -static void osc_lock_build_policy(const struct lu_env *env, - const struct cl_lock *lock, - union ldlm_policy_data *policy) -{ - const struct cl_lock_descr *d = &lock->cll_descr; - - osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end); - policy->l_extent.gid = d->cld_gid; -} - -static __u64 osc_enq2ldlm_flags(__u32 enqflags) -{ - __u64 result = 0; - - LASSERT((enqflags & ~CEF_MASK) == 0); - - if (enqflags & CEF_NONBLOCK) - result |= LDLM_FL_BLOCK_NOWAIT; - if (enqflags & CEF_ASYNC) - result |= LDLM_FL_HAS_INTENT; - if (enqflags & CEF_DISCARD_DATA) - result |= LDLM_FL_AST_DISCARD_DATA; - if (enqflags & CEF_PEEK) - result |= LDLM_FL_TEST_LOCK; - if (enqflags & CEF_LOCK_MATCH) - result |= LDLM_FL_MATCH_LOCK; - return result; -} - -/** - * Updates object attributes from a lock value block (lvb) received together - * with the DLM lock reply from the server. Copy of osc_update_enqueue() - * logic. - * - * This can be optimized to not update attributes when lock is a result of a - * local match. - * - * Called under lock and resource spin-locks. - */ -static void osc_lock_lvb_update(const struct lu_env *env, - struct osc_object *osc, - struct ldlm_lock *dlmlock, - struct ost_lvb *lvb) -{ - struct cl_object *obj = osc2cl(osc); - struct lov_oinfo *oinfo = osc->oo_oinfo; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - unsigned int valid; - - valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; - if (!lvb) - lvb = dlmlock->l_lvb_data; - - cl_lvb2attr(attr, lvb); - - cl_object_attr_lock(obj); - if (dlmlock) { - __u64 size; - - check_res_locked(dlmlock->l_resource); - LASSERT(lvb == dlmlock->l_lvb_data); - size = lvb->lvb_size; - - /* Extend KMS up to the end of this lock and no further - * A lock on [x,y] means a KMS of up to y + 1 bytes! - */ - if (size > dlmlock->l_policy_data.l_extent.end) - size = dlmlock->l_policy_data.l_extent.end + 1; - if (size >= oinfo->loi_kms) { - LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu, kms=%llu", - lvb->lvb_size, size); - valid |= CAT_KMS; - attr->cat_kms = size; - } else { - LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu; leaving kms=%llu, end=%llu", - lvb->lvb_size, oinfo->loi_kms, - dlmlock->l_policy_data.l_extent.end); - } - ldlm_lock_allow_match_locked(dlmlock); - } - - cl_object_attr_update(env, obj, attr, valid); - cl_object_attr_unlock(obj); -} - -static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl, - struct lustre_handle *lockh, bool lvb_update) -{ - struct ldlm_lock *dlmlock; - - dlmlock = ldlm_handle2lock_long(lockh, 0); - LASSERT(dlmlock); - - /* lock reference taken by ldlm_handle2lock_long() is - * owned by osc_lock and released in osc_lock_detach() - */ - lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl); - oscl->ols_has_ref = 1; - - LASSERT(!oscl->ols_dlmlock); - oscl->ols_dlmlock = dlmlock; - - /* This may be a matched lock for glimpse request, do not hold - * lock reference in that case. - */ - if (!oscl->ols_glimpse) { - /* hold a refc for non glimpse lock which will - * be released in osc_lock_cancel() - */ - lustre_handle_copy(&oscl->ols_handle, lockh); - ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode); - oscl->ols_hold = 1; - } - - /* Lock must have been granted. */ - lock_res_and_lock(dlmlock); - if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { - struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent; - struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; - - /* extend the lock extent, otherwise it will have problem when - * we decide whether to grant a lockless lock. - */ - descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); - descr->cld_start = cl_index(descr->cld_obj, ext->start); - descr->cld_end = cl_index(descr->cld_obj, ext->end); - descr->cld_gid = ext->gid; - - /* no lvb update for matched lock */ - if (lvb_update) { - LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); - osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj), - dlmlock, NULL); - } - LINVRNT(osc_lock_invariant(oscl)); - } - unlock_res_and_lock(dlmlock); - - LASSERT(oscl->ols_state != OLS_GRANTED); - oscl->ols_state = OLS_GRANTED; -} - -/** - * Lock upcall function that is executed either when a reply to ENQUEUE rpc is - * received from a server, or after osc_enqueue_base() matched a local DLM - * lock. - */ -static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, - int errcode) -{ - struct osc_lock *oscl = cookie; - struct cl_lock_slice *slice = &oscl->ols_cl; - struct lu_env *env; - int rc; - u16 refcheck; - - env = cl_env_get(&refcheck); - /* should never happen, similar to osc_ldlm_blocking_ast(). */ - LASSERT(!IS_ERR(env)); - - rc = ldlm_error2errno(errcode); - if (oscl->ols_state == OLS_ENQUEUED) { - oscl->ols_state = OLS_UPCALL_RECEIVED; - } else if (oscl->ols_state == OLS_CANCELLED) { - rc = -EIO; - } else { - CERROR("Impossible state: %d\n", oscl->ols_state); - LBUG(); - } - - if (rc == 0) - osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK); - - /* Error handling, some errors are tolerable. */ - if (oscl->ols_locklessable && rc == -EUSERS) { - /* This is a tolerable error, turn this lock into - * lockless lock. - */ - osc_object_set_contended(cl2osc(slice->cls_obj)); - LASSERT(slice->cls_ops == &osc_lock_ops); - - /* Change this lock to ldlmlock-less lock. */ - osc_lock_to_lockless(env, oscl, 1); - oscl->ols_state = OLS_GRANTED; - rc = 0; - } else if (oscl->ols_glimpse && rc == -ENAVAIL) { - LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); - osc_lock_lvb_update(env, cl2osc(slice->cls_obj), - NULL, &oscl->ols_lvb); - /* Hide the error. */ - rc = 0; - } - - if (oscl->ols_owner) - cl_sync_io_note(env, oscl->ols_owner, rc); - cl_env_put(env, &refcheck); - - return rc; -} - -static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh, - int errcode) -{ - struct osc_object *osc = cookie; - struct ldlm_lock *dlmlock; - struct lu_env *env; - u16 refcheck; - - env = cl_env_get(&refcheck); - LASSERT(!IS_ERR(env)); - - if (errcode == ELDLM_LOCK_MATCHED) { - errcode = ELDLM_OK; - goto out; - } - - if (errcode != ELDLM_OK) - goto out; - - dlmlock = ldlm_handle2lock(lockh); - LASSERT(dlmlock); - - lock_res_and_lock(dlmlock); - LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); - - /* there is no osc_lock associated with AGL lock */ - osc_lock_lvb_update(env, osc, dlmlock, NULL); - - unlock_res_and_lock(dlmlock); - LDLM_LOCK_PUT(dlmlock); - -out: - cl_object_put(env, osc2cl(osc)); - cl_env_put(env, &refcheck); - return ldlm_error2errno(errcode); -} - -static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end, - enum cl_lock_mode mode, int discard) -{ - struct lu_env *env; - u16 refcheck; - int rc = 0; - int rc2 = 0; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - if (mode == CLM_WRITE) { - rc = osc_cache_writeback_range(env, obj, start, end, 1, - discard); - CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n", - obj, start, end, rc, - discard ? "discarded" : "written back"); - if (rc > 0) - rc = 0; - } - - rc2 = osc_lock_discard_pages(env, obj, start, end, mode); - if (rc == 0 && rc2 < 0) - rc = rc2; - - cl_env_put(env, &refcheck); - return rc; -} - -/** - * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock - * and ldlm_lock caches. - */ -static int osc_dlm_blocking_ast0(const struct lu_env *env, - struct ldlm_lock *dlmlock, - void *data, int flag) -{ - struct cl_object *obj = NULL; - int result = 0; - int discard; - enum cl_lock_mode mode = CLM_READ; - - LASSERT(flag == LDLM_CB_CANCELING); - - lock_res_and_lock(dlmlock); - if (dlmlock->l_granted_mode != dlmlock->l_req_mode) { - dlmlock->l_ast_data = NULL; - unlock_res_and_lock(dlmlock); - return 0; - } - - discard = ldlm_is_discard_data(dlmlock); - if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)) - mode = CLM_WRITE; - - if (dlmlock->l_ast_data) { - obj = osc2cl(dlmlock->l_ast_data); - dlmlock->l_ast_data = NULL; - - cl_object_get(obj); - } - - unlock_res_and_lock(dlmlock); - - /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or - * the object has been destroyed. - */ - if (obj) { - struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - __u64 old_kms; - - /* Destroy pages covered by the extent of the DLM lock */ - result = osc_lock_flush(cl2osc(obj), - cl_index(obj, extent->start), - cl_index(obj, extent->end), - mode, discard); - - /* losing a lock, update kms */ - lock_res_and_lock(dlmlock); - cl_object_attr_lock(obj); - /* Must get the value under the lock to avoid race. */ - old_kms = cl2osc(obj)->oo_oinfo->loi_kms; - /* Update the kms. Need to loop all granted locks. - * Not a problem for the client - */ - attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); - - cl_object_attr_update(env, obj, attr, CAT_KMS); - cl_object_attr_unlock(obj); - unlock_res_and_lock(dlmlock); - - cl_object_put(env, obj); - } - return result; -} - -/** - * Blocking ast invoked by ldlm when dlm lock is either blocking progress of - * some other lock, or is canceled. This function is installed as a - * ldlm_lock::l_blocking_ast() for client extent locks. - * - * Control flow is tricky, because ldlm uses the same call-back - * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's. - * - * \param dlmlock lock for which ast occurred. - * - * \param new description of a conflicting lock in case of blocking ast. - * - * \param data value of dlmlock->l_ast_data - * - * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish - * cancellation and blocking ast's. - * - * Possible use cases: - * - * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel - * lock due to lock lru pressure, or explicit user request to purge - * locks. - * - * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify - * us that dlmlock conflicts with another lock that some client is - * enqueing. Lock is canceled. - * - * - cl_lock_cancel() is called. osc_lock_cancel() calls - * ldlm_cli_cancel() that calls - * - * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) - * - * recursively entering osc_ldlm_blocking_ast(). - * - * - client cancels lock voluntary (e.g., as a part of early cancellation): - * - * cl_lock_cancel()-> - * osc_lock_cancel()-> - * ldlm_cli_cancel()-> - * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) - * - */ -static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock, - struct ldlm_lock_desc *new, void *data, - int flag) -{ - int result = 0; - - switch (flag) { - case LDLM_CB_BLOCKING: { - struct lustre_handle lockh; - - ldlm_lock2handle(dlmlock, &lockh); - result = ldlm_cli_cancel(&lockh, LCF_ASYNC); - if (result == -ENODATA) - result = 0; - break; - } - case LDLM_CB_CANCELING: { - struct lu_env *env; - u16 refcheck; - - /* - * This can be called in the context of outer IO, e.g., - * - * osc_enqueue_base()->... - * ->ldlm_prep_elc_req()->... - * ->ldlm_cancel_callback()->... - * ->osc_ldlm_blocking_ast() - * - * new environment has to be created to not corrupt outer - * context. - */ - env = cl_env_get(&refcheck); - if (IS_ERR(env)) { - result = PTR_ERR(env); - break; - } - - result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); - cl_env_put(env, &refcheck); - break; - } - default: - LBUG(); - } - return result; -} - -static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) -{ - struct ptlrpc_request *req = data; - struct lu_env *env; - struct ost_lvb *lvb; - struct req_capsule *cap; - struct cl_object *obj = NULL; - int result; - u16 refcheck; - - LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) { - result = PTR_ERR(env); - goto out; - } - - lock_res_and_lock(dlmlock); - if (dlmlock->l_ast_data) { - obj = osc2cl(dlmlock->l_ast_data); - cl_object_get(obj); - } - unlock_res_and_lock(dlmlock); - - if (obj) { - /* Do not grab the mutex of cl_lock for glimpse. - * See LU-1274 for details. - * BTW, it's okay for cl_lock to be cancelled during - * this period because server can handle this race. - * See ldlm_server_glimpse_ast() for details. - * cl_lock_mutex_get(env, lock); - */ - cap = &req->rq_pill; - req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); - req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, - sizeof(*lvb)); - result = req_capsule_server_pack(cap); - if (result == 0) { - lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); - result = cl_object_glimpse(env, obj, lvb); - } - if (!exp_connect_lvb_type(req->rq_export)) { - req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, - sizeof(struct ost_lvb_v1), - RCL_SERVER); - } - cl_object_put(env, obj); - } else { - /* - * These errors are normal races, so we don't want to - * fill the console with messages by calling - * ptlrpc_error() - */ - lustre_pack_reply(req, 1, NULL, NULL); - result = -ELDLM_NO_LOCK_DATA; - } - cl_env_put(env, &refcheck); - -out: - req->rq_status = result; - return result; -} - -static int weigh_cb(const struct lu_env *env, struct cl_io *io, - struct osc_page *ops, void *cbdata) -{ - struct cl_page *page = ops->ops_cl.cpl_page; - - if (cl_page_is_vmlocked(env, page) || - PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage) - ) - return CLP_GANG_ABORT; - - *(pgoff_t *)cbdata = osc_index(ops) + 1; - return CLP_GANG_OKAY; -} - -static unsigned long osc_lock_weight(const struct lu_env *env, - struct osc_object *oscobj, - struct ldlm_extent *extent) -{ - struct cl_io *io = &osc_env_info(env)->oti_io; - struct cl_object *obj = cl_object_top(&oscobj->oo_cl); - pgoff_t page_index; - int result; - - io->ci_obj = obj; - io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, io->ci_obj); - if (result != 0) - return result; - - page_index = cl_index(obj, extent->start); - do { - result = osc_page_gang_lookup(env, io, oscobj, - page_index, - cl_index(obj, extent->end), - weigh_cb, (void *)&page_index); - if (result == CLP_GANG_ABORT) - break; - if (result == CLP_GANG_RESCHED) - cond_resched(); - } while (result != CLP_GANG_OKAY); - cl_io_fini(env, io); - - return result == CLP_GANG_ABORT ? 1 : 0; -} - -/** - * Get the weight of dlm lock for early cancellation. - */ -unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) -{ - struct lu_env *env; - struct osc_object *obj; - struct osc_lock *oscl; - unsigned long weight; - bool found = false; - u16 refcheck; - - might_sleep(); - /* - * osc_ldlm_weigh_ast has a complex context since it might be called - * because of lock canceling, or from user's input. We have to make - * a new environment for it. Probably it is implementation safe to use - * the upper context because cl_lock_put don't modify environment - * variables. But just in case .. - */ - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - /* Mostly because lack of memory, do not eliminate this lock */ - return 1; - - LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT); - obj = dlmlock->l_ast_data; - if (!obj) { - weight = 1; - goto out; - } - - spin_lock(&obj->oo_ol_spin); - list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) { - if (oscl->ols_dlmlock && oscl->ols_dlmlock != dlmlock) - continue; - found = true; - } - spin_unlock(&obj->oo_ol_spin); - if (found) { - /* - * If the lock is being used by an IO, definitely not cancel it. - */ - weight = 1; - goto out; - } - - weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent); - -out: - cl_env_put(env, &refcheck); - return weight; -} - -static void osc_lock_build_einfo(const struct lu_env *env, - const struct cl_lock *lock, - struct osc_object *osc, - struct ldlm_enqueue_info *einfo) -{ - einfo->ei_type = LDLM_EXTENT; - einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode); - einfo->ei_cb_bl = osc_ldlm_blocking_ast; - einfo->ei_cb_cp = ldlm_completion_ast; - einfo->ei_cb_gl = osc_ldlm_glimpse_ast; - einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */ -} - -/** - * Determine if the lock should be converted into a lockless lock. - * - * Steps to check: - * - if the lock has an explicit requirement for a non-lockless lock; - * - if the io lock request type ci_lockreq; - * - send the enqueue rpc to ost to make the further decision; - * - special treat to truncate lockless lock - * - * Additional policy can be implemented here, e.g., never do lockless-io - * for large extents. - */ -static void osc_lock_to_lockless(const struct lu_env *env, - struct osc_lock *ols, int force) -{ - struct cl_lock_slice *slice = &ols->ols_cl; - - LASSERT(ols->ols_state == OLS_NEW || - ols->ols_state == OLS_UPCALL_RECEIVED); - - if (force) { - ols->ols_locklessable = 1; - slice->cls_ops = &osc_lock_lockless_ops; - } else { - struct osc_io *oio = osc_env_io(env); - struct cl_io *io = oio->oi_cl.cis_io; - struct cl_object *obj = slice->cls_obj; - struct osc_object *oob = cl2osc(obj); - const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev); - struct obd_connect_data *ocd; - - LASSERT(io->ci_lockreq == CILR_MANDATORY || - io->ci_lockreq == CILR_MAYBE || - io->ci_lockreq == CILR_NEVER); - - ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data; - ols->ols_locklessable = (io->ci_type != CIT_SETATTR) && - (io->ci_lockreq == CILR_MAYBE) && - (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK); - if (io->ci_lockreq == CILR_NEVER || - /* lockless IO */ - (ols->ols_locklessable && osc_object_is_contended(oob)) || - /* lockless truncate */ - (cl_io_is_trunc(io) && - (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) && - osd->od_lockless_truncate)) { - ols->ols_locklessable = 1; - slice->cls_ops = &osc_lock_lockless_ops; - } - } - LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); -} - -static bool osc_lock_compatible(const struct osc_lock *qing, - const struct osc_lock *qed) -{ - struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr; - struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr; - - if (qed->ols_glimpse) - return true; - - if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ) - return true; - - if (qed->ols_state < OLS_GRANTED) - return true; - - if (qed_descr->cld_mode >= qing_descr->cld_mode && - qed_descr->cld_start <= qing_descr->cld_start && - qed_descr->cld_end >= qing_descr->cld_end) - return true; - - return false; -} - -static void osc_lock_wake_waiters(const struct lu_env *env, - struct osc_object *osc, - struct osc_lock *oscl) -{ - spin_lock(&osc->oo_ol_spin); - list_del_init(&oscl->ols_nextlock_oscobj); - spin_unlock(&osc->oo_ol_spin); - - spin_lock(&oscl->ols_lock); - while (!list_empty(&oscl->ols_waiting_list)) { - struct osc_lock *scan; - - scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock, - ols_wait_entry); - list_del_init(&scan->ols_wait_entry); - - cl_sync_io_note(env, scan->ols_owner, 0); - } - spin_unlock(&oscl->ols_lock); -} - -static int osc_lock_enqueue_wait(const struct lu_env *env, - struct osc_object *obj, - struct osc_lock *oscl) -{ - struct osc_lock *tmp_oscl; - struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr; - struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor; - int rc = 0; - - spin_lock(&obj->oo_ol_spin); - list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list); - -restart: - list_for_each_entry(tmp_oscl, &obj->oo_ol_list, - ols_nextlock_oscobj) { - struct cl_lock_descr *descr; - - if (tmp_oscl == oscl) - break; - - descr = &tmp_oscl->ols_cl.cls_lock->cll_descr; - if (descr->cld_start > need->cld_end || - descr->cld_end < need->cld_start) - continue; - - /* We're not supposed to give up group lock */ - if (descr->cld_mode == CLM_GROUP) - break; - - if (!osc_lock_is_lockless(oscl) && - osc_lock_compatible(oscl, tmp_oscl)) - continue; - - /* wait for conflicting lock to be canceled */ - cl_sync_io_init(waiter, 1, cl_sync_io_end); - oscl->ols_owner = waiter; - - spin_lock(&tmp_oscl->ols_lock); - /* add oscl into tmp's ols_waiting list */ - list_add_tail(&oscl->ols_wait_entry, - &tmp_oscl->ols_waiting_list); - spin_unlock(&tmp_oscl->ols_lock); - - spin_unlock(&obj->oo_ol_spin); - rc = cl_sync_io_wait(env, waiter, 0); - spin_lock(&obj->oo_ol_spin); - if (rc < 0) - break; - - oscl->ols_owner = NULL; - goto restart; - } - spin_unlock(&obj->oo_ol_spin); - - return rc; -} - -/** - * Implementation of cl_lock_operations::clo_enqueue() method for osc - * layer. This initiates ldlm enqueue: - * - * - cancels conflicting locks early (osc_lock_enqueue_wait()); - * - * - calls osc_enqueue_base() to do actual enqueue. - * - * osc_enqueue_base() is supplied with an upcall function that is executed - * when lock is received either after a local cached ldlm lock is matched, or - * when a reply from the server is received. - * - * This function does not wait for the network communication to complete. - */ -static int osc_lock_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *unused, struct cl_sync_io *anchor) -{ - struct osc_thread_info *info = osc_env_info(env); - struct osc_io *oio = osc_env_io(env); - struct osc_object *osc = cl2osc(slice->cls_obj); - struct osc_lock *oscl = cl2osc_lock(slice); - struct cl_lock *lock = slice->cls_lock; - struct ldlm_res_id *resname = &info->oti_resname; - union ldlm_policy_data *policy = &info->oti_policy; - osc_enqueue_upcall_f upcall = osc_lock_upcall; - void *cookie = oscl; - bool async = false; - int result; - - LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), - "lock = %p, ols = %p\n", lock, oscl); - - if (oscl->ols_state == OLS_GRANTED) - return 0; - - if (oscl->ols_flags & LDLM_FL_TEST_LOCK) - goto enqueue_base; - - if (oscl->ols_glimpse) { - LASSERT(equi(oscl->ols_agl, !anchor)); - async = true; - goto enqueue_base; - } - - result = osc_lock_enqueue_wait(env, osc, oscl); - if (result < 0) - goto out; - - /* we can grant lockless lock right after all conflicting locks - * are canceled. - */ - if (osc_lock_is_lockless(oscl)) { - oscl->ols_state = OLS_GRANTED; - oio->oi_lockless = 1; - return 0; - } - -enqueue_base: - oscl->ols_state = OLS_ENQUEUED; - if (anchor) { - atomic_inc(&anchor->csi_sync_nr); - oscl->ols_owner = anchor; - } - - /** - * DLM lock's ast data must be osc_object; - * if glimpse or AGL lock, async of osc_enqueue_base() must be true, - * DLM's enqueue callback set to osc_lock_upcall() with cookie as - * osc_lock. - */ - ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname); - osc_lock_build_policy(env, lock, policy); - if (oscl->ols_agl) { - oscl->ols_einfo.ei_cbdata = NULL; - /* hold a reference for callback */ - cl_object_get(osc2cl(osc)); - upcall = osc_lock_upcall_agl; - cookie = osc; - } - result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags, - policy, &oscl->ols_lvb, - osc->oo_oinfo->loi_kms_valid, - upcall, cookie, - &oscl->ols_einfo, PTLRPCD_SET, async, - oscl->ols_agl); - if (!result) { - if (osc_lock_is_lockless(oscl)) { - oio->oi_lockless = 1; - } else if (!async) { - LASSERT(oscl->ols_state == OLS_GRANTED); - LASSERT(oscl->ols_hold); - LASSERT(oscl->ols_dlmlock); - } - } else if (oscl->ols_agl) { - cl_object_put(env, osc2cl(osc)); - result = 0; - } - -out: - if (result < 0) { - oscl->ols_state = OLS_CANCELLED; - osc_lock_wake_waiters(env, osc, oscl); - - if (anchor) - cl_sync_io_note(env, anchor, result); - } - return result; -} - -/** - * Breaks a link between osc_lock and dlm_lock. - */ -static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) -{ - struct ldlm_lock *dlmlock; - - dlmlock = olck->ols_dlmlock; - if (!dlmlock) - return; - - if (olck->ols_hold) { - olck->ols_hold = 0; - ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode); - olck->ols_handle.cookie = 0ULL; - } - - olck->ols_dlmlock = NULL; - - /* release a reference taken in osc_lock_upcall(). */ - LASSERT(olck->ols_has_ref); - lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); - LDLM_LOCK_RELEASE(dlmlock); - olck->ols_has_ref = 0; -} - -/** - * Implements cl_lock_operations::clo_cancel() method for osc layer. This is - * called (as part of cl_lock_cancel()) when lock is canceled either voluntary - * (LRU pressure, early cancellation, umount, etc.) or due to the conflict - * with some other lock some where in the cluster. This function does the - * following: - * - * - invalidates all pages protected by this lock (after sending dirty - * ones to the server, as necessary); - * - * - decref's underlying ldlm lock; - * - * - cancels ldlm lock (ldlm_cli_cancel()). - */ -static void osc_lock_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_object *obj = cl2osc(slice->cls_obj); - struct osc_lock *oscl = cl2osc_lock(slice); - - LINVRNT(osc_lock_invariant(oscl)); - - osc_lock_detach(env, oscl); - oscl->ols_state = OLS_CANCELLED; - oscl->ols_flags &= ~LDLM_FL_LVB_READY; - - osc_lock_wake_waiters(env, obj, oscl); -} - -static int osc_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) -{ - struct osc_lock *lock = cl2osc_lock(slice); - - (*p)(env, cookie, "%p %#16llx %#llx %d %p ", - lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie, - lock->ols_state, lock->ols_owner); - osc_lvb_print(env, cookie, p, &lock->ols_lvb); - return 0; -} - -static const struct cl_lock_operations osc_lock_ops = { - .clo_fini = osc_lock_fini, - .clo_enqueue = osc_lock_enqueue, - .clo_cancel = osc_lock_cancel, - .clo_print = osc_lock_print, -}; - -static void osc_lock_lockless_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *ols = cl2osc_lock(slice); - struct osc_object *osc = cl2osc(slice->cls_obj); - struct cl_lock_descr *descr = &slice->cls_lock->cll_descr; - int result; - - LASSERT(!ols->ols_dlmlock); - result = osc_lock_flush(osc, descr->cld_start, descr->cld_end, - descr->cld_mode, 0); - if (result) - CERROR("Pages for lockless lock %p were not purged(%d)\n", - ols, result); - - osc_lock_wake_waiters(env, osc, ols); -} - -static const struct cl_lock_operations osc_lock_lockless_ops = { - .clo_fini = osc_lock_fini, - .clo_enqueue = osc_lock_enqueue, - .clo_cancel = osc_lock_lockless_cancel, - .clo_print = osc_lock_print -}; - -static void osc_lock_set_writer(const struct lu_env *env, - const struct cl_io *io, - struct cl_object *obj, struct osc_lock *oscl) -{ - struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; - pgoff_t io_start; - pgoff_t io_end; - - if (!cl_object_same(io->ci_obj, obj)) - return; - - if (likely(io->ci_type == CIT_WRITE)) { - io_start = cl_index(obj, io->u.ci_rw.crw_pos); - io_end = cl_index(obj, io->u.ci_rw.crw_pos + - io->u.ci_rw.crw_count - 1); - if (cl_io_is_append(io)) { - io_start = 0; - io_end = CL_PAGE_EOF; - } - } else { - LASSERT(cl_io_is_mkwrite(io)); - io_start = io->u.ci_fault.ft_index; - io_end = io->u.ci_fault.ft_index; - } - - if (descr->cld_mode >= CLM_WRITE && - descr->cld_start <= io_start && descr->cld_end >= io_end) { - struct osc_io *oio = osc_env_io(env); - - /* There must be only one lock to match the write region */ - LASSERT(!oio->oi_write_osclock); - oio->oi_write_osclock = oscl; - } -} - -int osc_lock_init(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *io) -{ - struct osc_lock *oscl; - __u32 enqflags = lock->cll_descr.cld_enq_flags; - - oscl = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS); - if (!oscl) - return -ENOMEM; - - oscl->ols_state = OLS_NEW; - spin_lock_init(&oscl->ols_lock); - INIT_LIST_HEAD(&oscl->ols_waiting_list); - INIT_LIST_HEAD(&oscl->ols_wait_entry); - INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj); - - oscl->ols_flags = osc_enq2ldlm_flags(enqflags); - oscl->ols_agl = !!(enqflags & CEF_AGL); - if (oscl->ols_agl) - oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT; - if (oscl->ols_flags & LDLM_FL_HAS_INTENT) { - oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED; - oscl->ols_glimpse = 1; - } - osc_lock_build_einfo(env, lock, cl2osc(obj), &oscl->ols_einfo); - - cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops); - - if (!(enqflags & CEF_MUST)) - /* try to convert this lock to a lockless lock */ - osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER)); - if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) - oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - - if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io)) - osc_lock_set_writer(env, io, obj, oscl); - - - LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx", - lock, oscl, oscl->ols_flags); - - return 0; -} - -/** - * Finds an existing lock covering given index and optionally different from a - * given \a except lock. - */ -struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, - struct osc_object *obj, pgoff_t index, - enum osc_dap_flags dap_flags) -{ - struct osc_thread_info *info = osc_env_info(env); - struct ldlm_res_id *resname = &info->oti_resname; - union ldlm_policy_data *policy = &info->oti_policy; - struct lustre_handle lockh; - struct ldlm_lock *lock = NULL; - enum ldlm_mode mode; - __u64 flags; - - ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); - osc_index2policy(policy, osc2cl(obj), index, index); - policy->l_extent.gid = LDLM_GID_ANY; - - flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING; - if (dap_flags & OSC_DAP_FL_TEST_LOCK) - flags |= LDLM_FL_TEST_LOCK; - - /* - * It is fine to match any group lock since there could be only one - * with a uniq gid and it conflicts with all other lock modes too - */ -again: - mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, - LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh, - dap_flags & OSC_DAP_FL_CANCELING); - if (mode != 0) { - lock = ldlm_handle2lock(&lockh); - /* RACE: the lock is cancelled so let's try again */ - if (unlikely(!lock)) - goto again; - } - return lock; -} - -/** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c deleted file mode 100644 index 84240181c7ea..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_object.c +++ /dev/null @@ -1,473 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_object for OSC layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_OSC - -#include "osc_cl_internal.h" - -/** \addtogroup osc - * @{ - */ - -/***************************************************************************** - * - * Type conversions. - * - */ - -static struct lu_object *osc2lu(struct osc_object *osc) -{ - return &osc->oo_cl.co_lu; -} - -static struct osc_object *lu2osc(const struct lu_object *obj) -{ - LINVRNT(osc_is_object(obj)); - return container_of(obj, struct osc_object, oo_cl.co_lu); -} - -/***************************************************************************** - * - * Object operations. - * - */ - -static int osc_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf) -{ - struct osc_object *osc = lu2osc(obj); - const struct cl_object_conf *cconf = lu2cl_conf(conf); - - osc->oo_oinfo = cconf->u.coc_oinfo; - INIT_LIST_HEAD(&osc->oo_ready_item); - INIT_LIST_HEAD(&osc->oo_hp_ready_item); - INIT_LIST_HEAD(&osc->oo_write_item); - INIT_LIST_HEAD(&osc->oo_read_item); - - atomic_set(&osc->oo_nr_ios, 0); - init_waitqueue_head(&osc->oo_io_waitq); - - osc->oo_root.rb_node = NULL; - INIT_LIST_HEAD(&osc->oo_hp_exts); - INIT_LIST_HEAD(&osc->oo_urgent_exts); - INIT_LIST_HEAD(&osc->oo_rpc_exts); - INIT_LIST_HEAD(&osc->oo_reading_exts); - atomic_set(&osc->oo_nr_reads, 0); - atomic_set(&osc->oo_nr_writes, 0); - spin_lock_init(&osc->oo_lock); - spin_lock_init(&osc->oo_tree_lock); - spin_lock_init(&osc->oo_ol_spin); - INIT_LIST_HEAD(&osc->oo_ol_list); - - cl_object_page_init(lu2cl(obj), sizeof(struct osc_page)); - - return 0; -} - -static void osc_object_free(const struct lu_env *env, struct lu_object *obj) -{ - struct osc_object *osc = lu2osc(obj); - - LASSERT(list_empty(&osc->oo_ready_item)); - LASSERT(list_empty(&osc->oo_hp_ready_item)); - LASSERT(list_empty(&osc->oo_write_item)); - LASSERT(list_empty(&osc->oo_read_item)); - - LASSERT(!osc->oo_root.rb_node); - LASSERT(list_empty(&osc->oo_hp_exts)); - LASSERT(list_empty(&osc->oo_urgent_exts)); - LASSERT(list_empty(&osc->oo_rpc_exts)); - LASSERT(list_empty(&osc->oo_reading_exts)); - LASSERT(atomic_read(&osc->oo_nr_reads) == 0); - LASSERT(atomic_read(&osc->oo_nr_writes) == 0); - LASSERT(list_empty(&osc->oo_ol_list)); - LASSERT(!atomic_read(&osc->oo_nr_ios)); - - lu_object_fini(obj); - kmem_cache_free(osc_object_kmem, osc); -} - -int osc_lvb_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct ost_lvb *lvb) -{ - return (*p)(env, cookie, "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu", - lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime, - lvb->lvb_ctime, lvb->lvb_blocks); -} - -static int osc_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *obj) -{ - struct osc_object *osc = lu2osc(obj); - struct lov_oinfo *oinfo = osc->oo_oinfo; - struct osc_async_rc *ar = &oinfo->loi_ar; - - (*p)(env, cookie, "id: " DOSTID " idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ", - POSTID(&oinfo->loi_oi), oinfo->loi_ost_idx, - oinfo->loi_ost_gen, oinfo->loi_kms_valid, oinfo->loi_kms, - ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid); - osc_lvb_print(env, cookie, p, &oinfo->loi_lvb); - return 0; -} - -static int osc_attr_get(const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr) -{ - struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; - - cl_lvb2attr(attr, &oinfo->loi_lvb); - attr->cat_kms = oinfo->loi_kms_valid ? oinfo->loi_kms : 0; - return 0; -} - -static int osc_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned int valid) -{ - struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; - struct ost_lvb *lvb = &oinfo->loi_lvb; - - if (valid & CAT_SIZE) - lvb->lvb_size = attr->cat_size; - if (valid & CAT_MTIME) - lvb->lvb_mtime = attr->cat_mtime; - if (valid & CAT_ATIME) - lvb->lvb_atime = attr->cat_atime; - if (valid & CAT_CTIME) - lvb->lvb_ctime = attr->cat_ctime; - if (valid & CAT_BLOCKS) - lvb->lvb_blocks = attr->cat_blocks; - if (valid & CAT_KMS) { - CDEBUG(D_CACHE, "set kms from %llu to %llu\n", - oinfo->loi_kms, (__u64)attr->cat_kms); - loi_kms_set(oinfo, attr->cat_kms); - } - return 0; -} - -static int osc_object_glimpse(const struct lu_env *env, - const struct cl_object *obj, struct ost_lvb *lvb) -{ - struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; - - lvb->lvb_size = oinfo->loi_kms; - lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks; - return 0; -} - -static int osc_object_ast_clear(struct ldlm_lock *lock, void *data) -{ - if (lock->l_ast_data == data) - lock->l_ast_data = NULL; - return LDLM_ITER_CONTINUE; -} - -static int osc_object_prune(const struct lu_env *env, struct cl_object *obj) -{ - struct osc_object *osc = cl2osc(obj); - struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname; - - /* DLM locks don't hold a reference of osc_object so we have to - * clear it before the object is being destroyed. - */ - ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname); - ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname, - osc_object_ast_clear, osc); - return 0; -} - -static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, - struct ll_fiemap_info_key *fmkey, - struct fiemap *fiemap, size_t *buflen) -{ - struct obd_export *exp = osc_export(cl2osc(obj)); - union ldlm_policy_data policy; - struct ptlrpc_request *req; - struct lustre_handle lockh; - struct ldlm_res_id resid; - enum ldlm_mode mode = 0; - struct fiemap *reply; - char *tmp; - int rc; - - fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi; - if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC)) - goto skip_locking; - - policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK; - - if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <= - fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1) - policy.l_extent.end = OBD_OBJECT_EOF; - else - policy.l_extent.end = (fmkey->lfik_fiemap.fm_start + - fmkey->lfik_fiemap.fm_length + - PAGE_SIZE - 1) & PAGE_MASK; - - ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid); - mode = ldlm_lock_match(exp->exp_obd->obd_namespace, - LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY, - &resid, LDLM_EXTENT, &policy, - LCK_PR | LCK_PW, &lockh, 0); - if (mode) { /* lock is cached on client */ - if (mode != LCK_PR) { - ldlm_lock_addref(&lockh, LCK_PR); - ldlm_lock_decref(&lockh, LCK_PW); - } - } else { /* no cached lock, needs acquire lock on server side */ - fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS; - fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK; - } - -skip_locking: - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_OST_GET_INFO_FIEMAP); - if (!req) { - rc = -ENOMEM; - goto drop_lock; - } - - req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT, - sizeof(*fmkey)); - req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT, - *buflen); - req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER, - *buflen); - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); - if (rc) { - ptlrpc_request_free(req); - goto drop_lock; - } - tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); - memcpy(tmp, fmkey, sizeof(*fmkey)); - tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); - memcpy(tmp, fiemap, *buflen); - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto fini_req; - - reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); - if (!reply) { - rc = -EPROTO; - goto fini_req; - } - - memcpy(fiemap, reply, *buflen); -fini_req: - ptlrpc_req_finished(req); -drop_lock: - if (mode) - ldlm_lock_decref(&lockh, LCK_PR); - return rc; -} - -void osc_object_set_contended(struct osc_object *obj) -{ - obj->oo_contention_time = jiffies; - /* mb(); */ - obj->oo_contended = 1; -} - -void osc_object_clear_contended(struct osc_object *obj) -{ - obj->oo_contended = 0; -} - -int osc_object_is_contended(struct osc_object *obj) -{ - struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev); - int osc_contention_time = dev->od_contention_time; - unsigned long cur_time = jiffies; - unsigned long retry_time; - - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION)) - return 1; - - if (!obj->oo_contended) - return 0; - - /* - * I like copy-paste. the code is copied from - * ll_file_is_contended. - */ - retry_time = obj->oo_contention_time + osc_contention_time * HZ; - if (time_after(cur_time, retry_time)) { - osc_object_clear_contended(obj); - return 0; - } - return 1; -} - -/** - * Implementation of struct cl_object_operations::coo_req_attr_set() for osc - * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq - * fields. - */ -static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj, - struct cl_req_attr *attr) -{ - u64 flags = attr->cra_flags; - struct lov_oinfo *oinfo; - struct ost_lvb *lvb; - struct obdo *oa; - - oinfo = cl2osc(obj)->oo_oinfo; - lvb = &oinfo->loi_lvb; - oa = attr->cra_oa; - - if (flags & OBD_MD_FLMTIME) { - oa->o_mtime = lvb->lvb_mtime; - oa->o_valid |= OBD_MD_FLMTIME; - } - if (flags & OBD_MD_FLATIME) { - oa->o_atime = lvb->lvb_atime; - oa->o_valid |= OBD_MD_FLATIME; - } - if (flags & OBD_MD_FLCTIME) { - oa->o_ctime = lvb->lvb_ctime; - oa->o_valid |= OBD_MD_FLCTIME; - } - if (flags & OBD_MD_FLGROUP) { - ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); - oa->o_valid |= OBD_MD_FLGROUP; - } - if (flags & OBD_MD_FLID) { - int rc; - - rc = ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); - if (rc) { - CERROR("Bad %llu to set " DOSTID " : rc %d\n", - (unsigned long long)ostid_id(&oinfo->loi_oi), - POSTID(&oa->o_oi), rc); - } - oa->o_valid |= OBD_MD_FLID; - } - if (flags & OBD_MD_FLHANDLE) { - struct ldlm_lock *lock; - struct osc_page *opg; - - opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj)); - lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg), - OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING); - if (!lock && !opg->ops_srvlock) { - struct ldlm_resource *res; - struct ldlm_res_id *resname; - - CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page, - "uncovered page!\n"); - - resname = &osc_env_info(env)->oti_resname; - ostid_build_res_name(&oinfo->loi_oi, resname); - res = ldlm_resource_get( - osc_export(cl2osc(obj))->exp_obd->obd_namespace, - NULL, resname, LDLM_EXTENT, 0); - ldlm_resource_dump(D_ERROR, res); - - LBUG(); - } - - /* check for lockless io. */ - if (lock) { - oa->o_handle = lock->l_remote_handle; - oa->o_valid |= OBD_MD_FLHANDLE; - LDLM_LOCK_PUT(lock); - } - } -} - -static const struct cl_object_operations osc_ops = { - .coo_page_init = osc_page_init, - .coo_lock_init = osc_lock_init, - .coo_io_init = osc_io_init, - .coo_attr_get = osc_attr_get, - .coo_attr_update = osc_attr_update, - .coo_glimpse = osc_object_glimpse, - .coo_prune = osc_object_prune, - .coo_fiemap = osc_object_fiemap, - .coo_req_attr_set = osc_req_attr_set -}; - -static const struct lu_object_operations osc_lu_obj_ops = { - .loo_object_init = osc_object_init, - .loo_object_release = NULL, - .loo_object_free = osc_object_free, - .loo_object_print = osc_object_print, - .loo_object_invariant = NULL -}; - -struct lu_object *osc_object_alloc(const struct lu_env *env, - const struct lu_object_header *unused, - struct lu_device *dev) -{ - struct osc_object *osc; - struct lu_object *obj; - - osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS); - if (osc) { - obj = osc2lu(osc); - lu_object_init(obj, NULL, dev); - osc->oo_cl.co_ops = &osc_ops; - obj->lo_ops = &osc_lu_obj_ops; - } else { - obj = NULL; - } - return obj; -} - -int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc) -{ - CDEBUG(D_INODE, "Invalidate osc object: %p, # of active IOs: %d\n", - osc, atomic_read(&osc->oo_nr_ios)); - - wait_event_idle(osc->oo_io_waitq, !atomic_read(&osc->oo_nr_ios)); - - /* Discard all dirty pages of this object. */ - osc_cache_truncate_start(env, osc, 0, NULL); - - /* Discard all caching pages */ - osc_lock_discard_pages(env, osc, 0, CL_PAGE_EOF, CLM_WRITE); - - /* Clear ast data of dlm lock. Do this after discarding all pages */ - osc_object_prune(env, osc2cl(osc)); - - return 0; -} - -/** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c deleted file mode 100644 index 20c553ef3a5e..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ /dev/null @@ -1,1094 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_page for OSC layer. - * - * Author: Nikita Danilov - * Author: Jinshan Xiong - */ - -#define DEBUG_SUBSYSTEM S_OSC - -#include -#include "osc_cl_internal.h" - -static void osc_lru_del(struct client_obd *cli, struct osc_page *opg); -static void osc_lru_use(struct client_obd *cli, struct osc_page *opg); -static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli, - struct osc_page *opg); - -/** \addtogroup osc - * @{ - */ - -/***************************************************************************** - * - * Page operations. - * - */ -static void osc_page_transfer_get(struct osc_page *opg, const char *label) -{ - struct cl_page *page = opg->ops_cl.cpl_page; - - LASSERT(!opg->ops_transfer_pinned); - cl_page_get(page); - lu_ref_add_atomic(&page->cp_reference, label, page); - opg->ops_transfer_pinned = 1; -} - -static void osc_page_transfer_put(const struct lu_env *env, - struct osc_page *opg) -{ - struct cl_page *page = opg->ops_cl.cpl_page; - - if (opg->ops_transfer_pinned) { - opg->ops_transfer_pinned = 0; - lu_ref_del(&page->cp_reference, "transfer", page); - cl_page_put(env, page); - } -} - -/** - * This is called once for every page when it is submitted for a transfer - * either opportunistic (osc_page_cache_add()), or immediate - * (osc_page_submit()). - */ -static void osc_page_transfer_add(const struct lu_env *env, - struct osc_page *opg, enum cl_req_type crt) -{ - struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); - - osc_lru_use(osc_cli(obj), opg); -} - -int osc_page_cache_add(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io) -{ - struct osc_page *opg = cl2osc_page(slice); - int result; - - osc_page_transfer_get(opg, "transfer\0cache"); - result = osc_queue_async_io(env, io, opg); - if (result != 0) - osc_page_transfer_put(env, opg); - else - osc_page_transfer_add(env, opg, CRT_WRITE); - - return result; -} - -void osc_index2policy(union ldlm_policy_data *policy, - const struct cl_object *obj, - pgoff_t start, pgoff_t end) -{ - memset(policy, 0, sizeof(*policy)); - policy->l_extent.start = cl_offset(obj, start); - policy->l_extent.end = cl_offset(obj, end + 1) - 1; -} - -static const char *osc_list(struct list_head *head) -{ - return list_empty(head) ? "-" : "+"; -} - -static inline unsigned long osc_submit_duration(struct osc_page *opg) -{ - if (opg->ops_submit_time == 0) - return 0; - - return (jiffies - opg->ops_submit_time); -} - -static int osc_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) -{ - struct osc_page *opg = cl2osc_page(slice); - struct osc_async_page *oap = &opg->ops_oap; - struct osc_object *obj = cl2osc(slice->cpl_obj); - struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli; - - return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n", - opg, osc_index(opg), - /* 1 */ - oap->oap_magic, oap->oap_cmd, - oap->oap_interrupted, - osc_list(&oap->oap_pending_item), - osc_list(&oap->oap_rpc_item), - /* 2 */ - oap->oap_obj_off, oap->oap_page_off, oap->oap_count, - oap->oap_async_flags, oap->oap_brw_flags, - oap->oap_request, oap->oap_cli, obj, - /* 3 */ - opg->ops_transfer_pinned, - osc_submit_duration(opg), opg->ops_srvlock, - /* 4 */ - cli->cl_r_in_flight, cli->cl_w_in_flight, - cli->cl_max_rpcs_in_flight, - cli->cl_avail_grant, - osc_list(&cli->cl_cache_waiters), - osc_list(&cli->cl_loi_ready_list), - osc_list(&cli->cl_loi_hp_ready_list), - osc_list(&cli->cl_loi_write_list), - osc_list(&cli->cl_loi_read_list), - /* 5 */ - osc_list(&obj->oo_ready_item), - osc_list(&obj->oo_hp_ready_item), - osc_list(&obj->oo_write_item), - osc_list(&obj->oo_read_item), - atomic_read(&obj->oo_nr_reads), - osc_list(&obj->oo_reading_exts), - atomic_read(&obj->oo_nr_writes), - osc_list(&obj->oo_hp_exts), - osc_list(&obj->oo_urgent_exts)); -} - -static void osc_page_delete(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - struct osc_page *opg = cl2osc_page(slice); - struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); - int rc; - - CDEBUG(D_TRACE, "%p\n", opg); - osc_page_transfer_put(env, opg); - rc = osc_teardown_async_page(env, obj, opg); - if (rc) { - CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, - "Trying to teardown failed: %d\n", rc); - LASSERT(0); - } - - osc_lru_del(osc_cli(obj), opg); - - if (slice->cpl_page->cp_type == CPT_CACHEABLE) { - void *value; - - spin_lock(&obj->oo_tree_lock); - value = radix_tree_delete(&obj->oo_tree, osc_index(opg)); - if (value) - --obj->oo_npages; - spin_unlock(&obj->oo_tree_lock); - - LASSERT(ergo(value, value == opg)); - } -} - -static void osc_page_clip(const struct lu_env *env, - const struct cl_page_slice *slice, int from, int to) -{ - struct osc_page *opg = cl2osc_page(slice); - struct osc_async_page *oap = &opg->ops_oap; - - opg->ops_from = from; - opg->ops_to = to; - spin_lock(&oap->oap_lock); - oap->oap_async_flags |= ASYNC_COUNT_STABLE; - spin_unlock(&oap->oap_lock); -} - -static int osc_page_cancel(const struct lu_env *env, - const struct cl_page_slice *slice) -{ - struct osc_page *opg = cl2osc_page(slice); - int rc = 0; - - /* Check if the transferring against this page - * is completed, or not even queued. - */ - if (opg->ops_transfer_pinned) - /* FIXME: may not be interrupted.. */ - rc = osc_cancel_async_page(env, opg); - LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0)); - return rc; -} - -static int osc_page_flush(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io) -{ - struct osc_page *opg = cl2osc_page(slice); - int rc; - - rc = osc_flush_async_page(env, io, opg); - return rc; -} - -static const struct cl_page_operations osc_page_ops = { - .cpo_print = osc_page_print, - .cpo_delete = osc_page_delete, - .cpo_clip = osc_page_clip, - .cpo_cancel = osc_page_cancel, - .cpo_flush = osc_page_flush -}; - -int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) -{ - struct osc_object *osc = cl2osc(obj); - struct osc_page *opg = cl_object_page_slice(obj, page); - int result; - - opg->ops_from = 0; - opg->ops_to = PAGE_SIZE; - - result = osc_prep_async_page(osc, opg, page->cp_vmpage, - cl_offset(obj, index)); - if (result == 0) { - struct osc_io *oio = osc_env_io(env); - - opg->ops_srvlock = osc_io_srvlock(oio); - cl_page_slice_add(page, &opg->ops_cl, obj, index, - &osc_page_ops); - } - INIT_LIST_HEAD(&opg->ops_lru); - - /* reserve an LRU space for this page */ - if (page->cp_type == CPT_CACHEABLE && result == 0) { - result = osc_lru_alloc(env, osc_cli(osc), opg); - if (result == 0) { - spin_lock(&osc->oo_tree_lock); - result = radix_tree_insert(&osc->oo_tree, index, opg); - if (result == 0) - ++osc->oo_npages; - spin_unlock(&osc->oo_tree_lock); - LASSERT(result == 0); - } - } - - return result; -} - -/** - * Helper function called by osc_io_submit() for every page in an immediate - * transfer (i.e., transferred synchronously). - */ -void osc_page_submit(const struct lu_env *env, struct osc_page *opg, - enum cl_req_type crt, int brw_flags) -{ - struct osc_async_page *oap = &opg->ops_oap; - - LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n", - oap, oap->oap_magic); - LASSERT(oap->oap_async_flags & ASYNC_READY); - LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE); - - oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; - oap->oap_page_off = opg->ops_from; - oap->oap_count = opg->ops_to - opg->ops_from; - oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; - - if (capable(CAP_SYS_RESOURCE)) { - oap->oap_brw_flags |= OBD_BRW_NOQUOTA; - oap->oap_cmd |= OBD_BRW_NOQUOTA; - } - - opg->ops_submit_time = jiffies; - osc_page_transfer_get(opg, "transfer\0imm"); - osc_page_transfer_add(env, opg, crt); -} - -/* --------------- LRU page management ------------------ */ - -/* OSC is a natural place to manage LRU pages as applications are specialized - * to write OSC by OSC. Ideally, if one OSC is used more frequently it should - * occupy more LRU slots. On the other hand, we should avoid using up all LRU - * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep - * for free LRU slots - this will be very bad so the algorithm requires each - * OSC to free slots voluntarily to maintain a reasonable number of free slots - * at any time. - */ -static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); - -/** - * LRU pages are freed in batch mode. OSC should at least free this - * number of pages to avoid running out of LRU slots. - */ -static inline int lru_shrink_min(struct client_obd *cli) -{ - return cli->cl_max_pages_per_rpc * 2; -} - -/** - * free this number at most otherwise it will take too long time to finish. - */ -static inline int lru_shrink_max(struct client_obd *cli) -{ - return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; -} - -/** - * Check if we can free LRU slots from this OSC. If there exists LRU waiters, - * we should free slots aggressively. In this way, slots are freed in a steady - * step to maintain fairness among OSCs. - * - * Return how many LRU pages should be freed. - */ -static int osc_cache_too_much(struct client_obd *cli) -{ - struct cl_client_cache *cache = cli->cl_cache; - long pages = atomic_long_read(&cli->cl_lru_in_list); - unsigned long budget; - - budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2); - - /* if it's going to run out LRU slots, we should free some, but not - * too much to maintain fairness among OSCs. - */ - if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) { - if (pages >= budget) - return lru_shrink_max(cli); - else if (pages >= budget / 2) - return lru_shrink_min(cli); - } else { - time64_t duration = ktime_get_real_seconds(); - long timediff; - - /* knock out pages by duration of no IO activity */ - duration -= cli->cl_lru_last_used; - /* - * The difference shouldn't be more than 70 years - * so we can safely case to a long. Round to - * approximately 1 minute. - */ - timediff = (long)(duration >> 6); - if (timediff > 0 && pages >= budget / timediff) - return lru_shrink_min(cli); - } - return 0; -} - -int lru_queue_work(const struct lu_env *env, void *data) -{ - struct client_obd *cli = data; - int count; - - CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli)); - - count = osc_cache_too_much(cli); - if (count > 0) { - int rc = osc_lru_shrink(env, cli, count, false); - - CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n", - cli_name(cli), rc, count); - if (rc >= count) { - CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli)); - ptlrpcd_queue_work(cli->cl_lru_work); - } - } - - return 0; -} - -void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist) -{ - LIST_HEAD(lru); - struct osc_async_page *oap; - long npages = 0; - - list_for_each_entry(oap, plist, oap_pending_item) { - struct osc_page *opg = oap2osc_page(oap); - - if (!opg->ops_in_lru) - continue; - - ++npages; - LASSERT(list_empty(&opg->ops_lru)); - list_add(&opg->ops_lru, &lru); - } - - if (npages > 0) { - spin_lock(&cli->cl_lru_list_lock); - list_splice_tail(&lru, &cli->cl_lru_list); - atomic_long_sub(npages, &cli->cl_lru_busy); - atomic_long_add(npages, &cli->cl_lru_in_list); - cli->cl_lru_last_used = ktime_get_real_seconds(); - spin_unlock(&cli->cl_lru_list_lock); - - if (waitqueue_active(&osc_lru_waitq)) - (void)ptlrpcd_queue_work(cli->cl_lru_work); - } -} - -static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg) -{ - LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0); - list_del_init(&opg->ops_lru); - atomic_long_dec(&cli->cl_lru_in_list); -} - -/** - * Page is being destroyed. The page may be not in LRU list, if the transfer - * has never finished(error occurred). - */ -static void osc_lru_del(struct client_obd *cli, struct osc_page *opg) -{ - if (opg->ops_in_lru) { - spin_lock(&cli->cl_lru_list_lock); - if (!list_empty(&opg->ops_lru)) { - __osc_lru_del(cli, opg); - } else { - LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0); - atomic_long_dec(&cli->cl_lru_busy); - } - spin_unlock(&cli->cl_lru_list_lock); - - atomic_long_inc(cli->cl_lru_left); - /* this is a great place to release more LRU pages if - * this osc occupies too many LRU pages and kernel is - * stealing one of them. - */ - if (osc_cache_too_much(cli)) { - CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli)); - (void)ptlrpcd_queue_work(cli->cl_lru_work); - } - wake_up(&osc_lru_waitq); - } else { - LASSERT(list_empty(&opg->ops_lru)); - } -} - -/** - * Delete page from LRUlist for redirty. - */ -static void osc_lru_use(struct client_obd *cli, struct osc_page *opg) -{ - /* If page is being transferred for the first time, - * ops_lru should be empty - */ - if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) { - spin_lock(&cli->cl_lru_list_lock); - __osc_lru_del(cli, opg); - spin_unlock(&cli->cl_lru_list_lock); - atomic_long_inc(&cli->cl_lru_busy); - } -} - -static void discard_pagevec(const struct lu_env *env, struct cl_io *io, - struct cl_page **pvec, int max_index) -{ - int i; - - for (i = 0; i < max_index; i++) { - struct cl_page *page = pvec[i]; - - LASSERT(cl_page_is_owned(page, io)); - cl_page_delete(env, page); - cl_page_discard(env, io, page); - cl_page_disown(env, io, page); - cl_page_put(env, page); - - pvec[i] = NULL; - } -} - -/** - * Check if a cl_page can be released, i.e, it's not being used. - * - * If unstable account is turned on, bulk transfer may hold one refcount - * for recovery so we need to check vmpage refcount as well; otherwise, - * even we can destroy cl_page but the corresponding vmpage can't be reused. - */ -static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page) -{ - if (cl_page_in_use_noref(page)) - return true; - - if (cli->cl_cache->ccc_unstable_check) { - struct page *vmpage = cl_page_vmpage(page); - - /* vmpage have two known users: cl_page and VM page cache */ - if (page_count(vmpage) - page_mapcount(vmpage) > 2) - return true; - } - return false; -} - -/** - * Drop @target of pages from LRU at most. - */ -long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, - long target, bool force) -{ - struct cl_io *io; - struct cl_object *clobj = NULL; - struct cl_page **pvec; - struct osc_page *opg; - int maxscan = 0; - long count = 0; - int index = 0; - int rc = 0; - - LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0); - if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0) - return 0; - - CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n", - cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force); - if (!force) { - if (atomic_read(&cli->cl_lru_shrinkers) > 0) - return -EBUSY; - - if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) { - atomic_dec(&cli->cl_lru_shrinkers); - return -EBUSY; - } - } else { - atomic_inc(&cli->cl_lru_shrinkers); - } - - pvec = (struct cl_page **)osc_env_info(env)->oti_pvec; - io = &osc_env_info(env)->oti_io; - - spin_lock(&cli->cl_lru_list_lock); - if (force) - cli->cl_lru_reclaim++; - maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list)); - while (!list_empty(&cli->cl_lru_list)) { - struct cl_page *page; - bool will_free = false; - - if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1) - break; - - if (--maxscan < 0) - break; - - opg = list_entry(cli->cl_lru_list.next, struct osc_page, - ops_lru); - page = opg->ops_cl.cpl_page; - if (lru_page_busy(cli, page)) { - list_move_tail(&opg->ops_lru, &cli->cl_lru_list); - continue; - } - - LASSERT(page->cp_obj); - if (clobj != page->cp_obj) { - struct cl_object *tmp = page->cp_obj; - - cl_object_get(tmp); - spin_unlock(&cli->cl_lru_list_lock); - - if (clobj) { - discard_pagevec(env, io, pvec, index); - index = 0; - - cl_io_fini(env, io); - cl_object_put(env, clobj); - clobj = NULL; - } - - clobj = tmp; - io->ci_obj = clobj; - io->ci_ignore_layout = 1; - rc = cl_io_init(env, io, CIT_MISC, clobj); - - spin_lock(&cli->cl_lru_list_lock); - - if (rc != 0) - break; - - ++maxscan; - continue; - } - - if (cl_page_own_try(env, io, page) == 0) { - if (!lru_page_busy(cli, page)) { - /* remove it from lru list earlier to avoid - * lock contention - */ - __osc_lru_del(cli, opg); - opg->ops_in_lru = 0; /* will be discarded */ - - cl_page_get(page); - will_free = true; - } else { - cl_page_disown(env, io, page); - } - } - - if (!will_free) { - list_move_tail(&opg->ops_lru, &cli->cl_lru_list); - continue; - } - - /* Don't discard and free the page with cl_lru_list held */ - pvec[index++] = page; - if (unlikely(index == OTI_PVEC_SIZE)) { - spin_unlock(&cli->cl_lru_list_lock); - discard_pagevec(env, io, pvec, index); - index = 0; - - spin_lock(&cli->cl_lru_list_lock); - } - - if (++count >= target) - break; - } - spin_unlock(&cli->cl_lru_list_lock); - - if (clobj) { - discard_pagevec(env, io, pvec, index); - - cl_io_fini(env, io); - cl_object_put(env, clobj); - } - - atomic_dec(&cli->cl_lru_shrinkers); - if (count > 0) { - atomic_long_add(count, cli->cl_lru_left); - wake_up_all(&osc_lru_waitq); - } - return count > 0 ? count : rc; -} - -/** - * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least - * \@npages of LRU slots. For performance consideration, it's better to drop - * LRU pages in batch. Therefore, the actual number is adjusted at least - * max_pages_per_rpc. - */ -static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages) -{ - struct lu_env *env; - struct cl_client_cache *cache = cli->cl_cache; - int max_scans; - u16 refcheck; - long rc = 0; - - LASSERT(cache); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return 0; - - npages = max_t(int, npages, cli->cl_max_pages_per_rpc); - CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n", - cli_name(cli), npages); - rc = osc_lru_shrink(env, cli, npages, true); - if (rc >= npages) { - CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n", - cli_name(cli), rc, npages); - if (osc_cache_too_much(cli) > 0) - ptlrpcd_queue_work(cli->cl_lru_work); - goto out; - } else if (rc > 0) { - npages -= rc; - } - - CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n", - cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list), - atomic_long_read(&cli->cl_lru_busy), npages); - - /* Reclaim LRU slots from other client_obd as it can't free enough - * from its own. This should rarely happen. - */ - spin_lock(&cache->ccc_lru_lock); - LASSERT(!list_empty(&cache->ccc_lru)); - - cache->ccc_lru_shrinkers++; - list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); - - max_scans = atomic_read(&cache->ccc_users) - 2; - while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { - cli = list_entry(cache->ccc_lru.next, struct client_obd, - cl_lru_osc); - - CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n", - cli_name(cli), cli, - atomic_long_read(&cli->cl_lru_in_list), - atomic_long_read(&cli->cl_lru_busy)); - - list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); - if (osc_cache_too_much(cli) > 0) { - spin_unlock(&cache->ccc_lru_lock); - - rc = osc_lru_shrink(env, cli, npages, true); - spin_lock(&cache->ccc_lru_lock); - if (rc >= npages) - break; - if (rc > 0) - npages -= rc; - } - } - spin_unlock(&cache->ccc_lru_lock); - -out: - cl_env_put(env, &refcheck); - CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n", - cli_name(cli), cli, rc); - return rc; -} - -/** - * osc_lru_alloc() is called to reserve an LRU slot for a cl_page. - * - * Usually the LRU slots are reserved in osc_io_iter_rw_init(). - * Only in the case that the LRU slots are in extreme shortage, it should - * have reserved enough slots for an IO. - */ -static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli, - struct osc_page *opg) -{ - struct osc_io *oio = osc_env_io(env); - int rc = 0; - - if (!cli->cl_cache) /* shall not be in LRU */ - return 0; - - if (oio->oi_lru_reserved > 0) { - --oio->oi_lru_reserved; - goto out; - } - - LASSERT(atomic_long_read(cli->cl_lru_left) >= 0); - while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) { - /* run out of LRU spaces, try to drop some by itself */ - rc = osc_lru_reclaim(cli, 1); - if (rc < 0) - break; - if (rc > 0) - continue; - - cond_resched(); - - rc = l_wait_event_abortable(osc_lru_waitq, - atomic_long_read(cli->cl_lru_left) > 0); - - if (rc < 0) - break; - } - -out: - if (rc >= 0) { - atomic_long_inc(&cli->cl_lru_busy); - opg->ops_in_lru = 1; - rc = 0; - } - - return rc; -} - -/** - * osc_lru_reserve() is called to reserve enough LRU slots for I/O. - * - * The benefit of doing this is to reduce contention against atomic counter - * cl_lru_left by changing it from per-page access to per-IO access. - */ -unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages) -{ - unsigned long reserved = 0; - unsigned long max_pages; - unsigned long c; - - /* - * reserve a full RPC window at most to avoid that a thread accidentally - * consumes too many LRU slots - */ - max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; - if (npages > max_pages) - npages = max_pages; - - c = atomic_long_read(cli->cl_lru_left); - if (c < npages && osc_lru_reclaim(cli, npages) > 0) - c = atomic_long_read(cli->cl_lru_left); - while (c >= npages) { - if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) { - reserved = npages; - break; - } - c = atomic_long_read(cli->cl_lru_left); - } - if (atomic_long_read(cli->cl_lru_left) < max_pages) { - /* - * If there aren't enough pages in the per-OSC LRU then - * wake up the LRU thread to try and clear out space, so - * we don't block if pages are being dirtied quickly. - */ - CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n", - cli_name(cli), atomic_long_read(cli->cl_lru_left), - max_pages); - (void)ptlrpcd_queue_work(cli->cl_lru_work); - } - - return reserved; -} - -/** - * osc_lru_unreserve() is called to unreserve LRU slots. - * - * LRU slots reserved by osc_lru_reserve() may have entries left due to several - * reasons such as page already existing or I/O error. Those reserved slots - * should be freed by calling this function. - */ -void osc_lru_unreserve(struct client_obd *cli, unsigned long npages) -{ - atomic_long_add(npages, cli->cl_lru_left); - wake_up_all(&osc_lru_waitq); -} - -/** - * Atomic operations are expensive. We accumulate the accounting for the - * same page pgdat to get better performance. - * In practice this can work pretty good because the pages in the same RPC - * are likely from the same page zone. - */ -static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc, - int factor) -{ - int page_count = desc->bd_iov_count; - pg_data_t *last = NULL; - int count = 0; - int i; - - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - - for (i = 0; i < page_count; i++) { - pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page); - - if (likely(pgdat == last)) { - ++count; - continue; - } - - if (count > 0) { - mod_node_page_state(pgdat, NR_UNSTABLE_NFS, - factor * count); - count = 0; - } - last = pgdat; - ++count; - } - if (count > 0) - mod_node_page_state(last, NR_UNSTABLE_NFS, factor * count); -} - -static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc) -{ - unstable_page_accounting(desc, 1); -} - -static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc) -{ - unstable_page_accounting(desc, -1); -} - -/** - * Performs "unstable" page accounting. This function balances the - * increment operations performed in osc_inc_unstable_pages. It is - * registered as the RPC request callback, and is executed when the - * bulk RPC is committed on the server. Thus at this point, the pages - * involved in the bulk transfer are no longer considered unstable. - * - * If this function is called, the request should have been committed - * or req:rq_unstable must have been set; it implies that the unstable - * statistic have been added. - */ -void osc_dec_unstable_pages(struct ptlrpc_request *req) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - struct ptlrpc_bulk_desc *desc = req->rq_bulk; - int page_count = desc->bd_iov_count; - long unstable_count; - - LASSERT(page_count >= 0); - dec_unstable_page_accounting(desc); - - unstable_count = atomic_long_sub_return(page_count, - &cli->cl_unstable_count); - LASSERT(unstable_count >= 0); - - unstable_count = atomic_long_sub_return(page_count, - &cli->cl_cache->ccc_unstable_nr); - LASSERT(unstable_count >= 0); - if (!unstable_count) - wake_up_all(&cli->cl_cache->ccc_unstable_waitq); - - if (waitqueue_active(&osc_lru_waitq)) - (void)ptlrpcd_queue_work(cli->cl_lru_work); -} - -/** - * "unstable" page accounting. See: osc_dec_unstable_pages. - */ -void osc_inc_unstable_pages(struct ptlrpc_request *req) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - struct ptlrpc_bulk_desc *desc = req->rq_bulk; - long page_count = desc->bd_iov_count; - - /* No unstable page tracking */ - if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check) - return; - - add_unstable_page_accounting(desc); - atomic_long_add(page_count, &cli->cl_unstable_count); - atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr); - - /* - * If the request has already been committed (i.e. brw_commit - * called via rq_commit_cb), we need to undo the unstable page - * increments we just performed because rq_commit_cb wont be - * called again. - */ - spin_lock(&req->rq_lock); - if (unlikely(req->rq_committed)) { - spin_unlock(&req->rq_lock); - - osc_dec_unstable_pages(req); - } else { - req->rq_unstable = 1; - spin_unlock(&req->rq_lock); - } -} - -/** - * Check if it piggybacks SOFT_SYNC flag to OST from this OSC. - * This function will be called by every BRW RPC so it's critical - * to make this function fast. - */ -bool osc_over_unstable_soft_limit(struct client_obd *cli) -{ - long unstable_nr, osc_unstable_count; - - /* Can't check cli->cl_unstable_count, therefore, no soft limit */ - if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check) - return false; - - osc_unstable_count = atomic_long_read(&cli->cl_unstable_count); - unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr); - - CDEBUG(D_CACHE, - "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n", - cli_name(cli), cli, unstable_nr, osc_unstable_count); - - /* - * If the LRU slots are in shortage - 25% remaining AND this OSC - * has one full RPC window of unstable pages, it's a good chance - * to piggyback a SOFT_SYNC flag. - * Please notice that the OST won't take immediate response for the - * SOFT_SYNC request so active OSCs will have more chance to carry - * the flag, this is reasonable. - */ - return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 && - osc_unstable_count > cli->cl_max_pages_per_rpc * - cli->cl_max_rpcs_in_flight; -} - -/** - * Return how many LRU pages in the cache of all OSC devices - * - * Return: return # of cached LRU pages times reclaimation tendency - * SHRINK_STOP if it cannot do any scanning in this time - */ -unsigned long osc_cache_shrink_count(struct shrinker *sk, - struct shrink_control *sc) -{ - struct client_obd *cli; - unsigned long cached = 0; - - spin_lock(&osc_shrink_lock); - list_for_each_entry(cli, &osc_shrink_list, cl_shrink_list) - cached += atomic_long_read(&cli->cl_lru_in_list); - spin_unlock(&osc_shrink_lock); - - return (cached * sysctl_vfs_cache_pressure) / 100; -} - -/** - * Scan and try to reclaim sc->nr_to_scan cached LRU pages - * - * Return: number of cached LRU pages reclaimed - * SHRINK_STOP if it cannot do any scanning in this time - * - * Linux kernel will loop calling this shrinker scan routine with - * sc->nr_to_scan = SHRINK_BATCH(128 for now) until kernel got enough memory. - * - * If sc->nr_to_scan is 0, the VM is querying the cache size, we don't need - * to scan and try to reclaim LRU pages, just return 0 and - * osc_cache_shrink_count() will report the LRU page number. - */ -unsigned long osc_cache_shrink_scan(struct shrinker *sk, - struct shrink_control *sc) -{ - struct client_obd *stop_anchor = NULL; - struct client_obd *cli; - struct lu_env *env; - long shrank = 0; - u16 refcheck; - int rc; - - if (!sc->nr_to_scan) - return 0; - - if (!(sc->gfp_mask & __GFP_FS)) - return SHRINK_STOP; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return SHRINK_STOP; - - spin_lock(&osc_shrink_lock); - while (!list_empty(&osc_shrink_list)) { - cli = list_entry(osc_shrink_list.next, struct client_obd, - cl_shrink_list); - - if (!stop_anchor) - stop_anchor = cli; - else if (cli == stop_anchor) - break; - - list_move_tail(&cli->cl_shrink_list, &osc_shrink_list); - spin_unlock(&osc_shrink_lock); - - /* shrink no more than max_pages_per_rpc for an OSC */ - rc = osc_lru_shrink(env, cli, (sc->nr_to_scan - shrank) > - cli->cl_max_pages_per_rpc ? - cli->cl_max_pages_per_rpc : - sc->nr_to_scan - shrank, true); - if (rc > 0) - shrank += rc; - - if (shrank >= sc->nr_to_scan) - goto out; - - spin_lock(&osc_shrink_lock); - } - spin_unlock(&osc_shrink_lock); - -out: - cl_env_put(env, &refcheck); - - return shrank; -} - -/** @} osc */ diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c deleted file mode 100644 index 723ec2fb18bf..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2011, 2015, Intel Corporation. - * - * Code originally extracted from quota directory - */ - -#include -#include "osc_internal.h" - -static const struct rhashtable_params quota_hash_params = { - .key_len = sizeof(u32), - .key_offset = offsetof(struct osc_quota_info, oqi_id), - .head_offset = offsetof(struct osc_quota_info, oqi_hash), - .automatic_shrinking = true, -}; - -static inline struct osc_quota_info *osc_oqi_alloc(u32 id) -{ - struct osc_quota_info *oqi; - - oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS); - if (oqi) - oqi->oqi_id = id; - - return oqi; -} - -int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]) -{ - int type; - - for (type = 0; type < MAXQUOTAS; type++) { - struct osc_quota_info *oqi; - - oqi = rhashtable_lookup_fast(&cli->cl_quota_hash[type], &qid[type], - quota_hash_params); - if (oqi) { - /* Must not access oqi here, it could have been - * freed by osc_quota_setdq() - */ - - /* the slot is busy, the user is about to run out of - * quota space on this OST - */ - CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n", - type == USRQUOTA ? "user" : "grout", qid[type]); - return NO_QUOTA; - } - } - - return QUOTA_OK; -} - -static void osc_quota_free(struct rcu_head *head) -{ - struct osc_quota_info *oqi = container_of(head, struct osc_quota_info, rcu); - - kmem_cache_free(osc_quota_kmem, oqi); -} - - -#define MD_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_MD_FLUSRQUOTA \ - : OBD_MD_FLGRPQUOTA) -#define FL_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_FL_NO_USRQUOTA \ - : OBD_FL_NO_GRPQUOTA) - -int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], - u32 valid, u32 flags) -{ - int type; - int rc = 0; - - if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0) - return 0; - - for (type = 0; type < MAXQUOTAS; type++) { - struct osc_quota_info *oqi; - - if ((valid & MD_QUOTA_FLAG(type)) == 0) - continue; - - /* lookup the ID in the per-type hash table */ - rcu_read_lock(); - oqi = rhashtable_lookup_fast(&cli->cl_quota_hash[type], &qid[type], - quota_hash_params); - if ((flags & FL_QUOTA_FLAG(type)) != 0) { - /* This ID is getting close to its quota limit, let's - * switch to sync I/O - */ - rcu_read_unlock(); - if (oqi) - continue; - - oqi = osc_oqi_alloc(qid[type]); - if (!oqi) { - rc = -ENOMEM; - break; - } - - rc = rhashtable_lookup_insert_fast(&cli->cl_quota_hash[type], - &oqi->oqi_hash, quota_hash_params); - /* race with others? */ - if (rc) { - kmem_cache_free(osc_quota_kmem, oqi); - if (rc != -EEXIST) { - rc = -ENOMEM; - break; - } - rc = 0; - } - - CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n", - cli_name(cli), - type == USRQUOTA ? "user" : "group", - qid[type], rc); - } else { - /* This ID is now off the hook, let's remove it from - * the hash table - */ - if (!oqi) { - rcu_read_unlock(); - continue; - } - if (rhashtable_remove_fast(&cli->cl_quota_hash[type], - &oqi->oqi_hash, quota_hash_params) == 0) - call_rcu(&oqi->rcu, osc_quota_free); - rcu_read_unlock(); - CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n", - cli_name(cli), - type == USRQUOTA ? "user" : "group", - qid[type], oqi); - } - } - - return rc; -} - -static void -oqi_exit(void *vquota, void *data) -{ - struct osc_quota_info *oqi = vquota; - - osc_quota_free(&oqi->rcu); -} - -int osc_quota_setup(struct obd_device *obd) -{ - struct client_obd *cli = &obd->u.cli; - int i, type; - - for (type = 0; type < MAXQUOTAS; type++) { - if (rhashtable_init(&cli->cl_quota_hash[type], "a_hash_params) != 0) - break; - } - - if (type == MAXQUOTAS) - return 0; - - for (i = 0; i < type; i++) - rhashtable_destroy(&cli->cl_quota_hash[i]); - - return -ENOMEM; -} - -int osc_quota_cleanup(struct obd_device *obd) -{ - struct client_obd *cli = &obd->u.cli; - int type; - - for (type = 0; type < MAXQUOTAS; type++) - rhashtable_free_and_destroy(&cli->cl_quota_hash[type], - oqi_exit, NULL); - - return 0; -} - -int osc_quotactl(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct ptlrpc_request *req; - struct obd_quotactl *oqc; - int rc; - - req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), - &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION, - OST_QUOTACTL); - if (!req) - return -ENOMEM; - - oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - *oqc = *oqctl; - - ptlrpc_request_set_replen(req); - ptlrpc_at_set_req_timeout(req); - req->rq_no_resend = 1; - - rc = ptlrpc_queue_wait(req); - if (rc) - CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc); - - if (req->rq_repmsg) { - oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - if (oqc) { - *oqctl = *oqc; - } else if (!rc) { - CERROR("Can't unpack obd_quotactl\n"); - rc = -EPROTO; - } - } else if (!rc) { - CERROR("Can't unpack obd_quotactl\n"); - rc = -EPROTO; - } - ptlrpc_req_finished(req); - - return rc; -} diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c deleted file mode 100644 index 61ef6c8d7a12..000000000000 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ /dev/null @@ -1,2907 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_OSC - -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "osc_internal.h" -#include "osc_cl_internal.h" - -atomic_t osc_pool_req_count; -unsigned int osc_reqpool_maxreqcount; -struct ptlrpc_request_pool *osc_rq_pool; - -/* max memory used for request pool, unit is MB */ -static unsigned int osc_reqpool_mem_max = 5; -module_param(osc_reqpool_mem_max, uint, 0444); - -struct osc_brw_async_args { - struct obdo *aa_oa; - int aa_requested_nob; - int aa_nio_count; - u32 aa_page_count; - int aa_resends; - struct brw_page **aa_ppga; - struct client_obd *aa_cli; - struct list_head aa_oaps; - struct list_head aa_exts; -}; - -struct osc_async_args { - struct obd_info *aa_oi; -}; - -struct osc_setattr_args { - struct obdo *sa_oa; - obd_enqueue_update_f sa_upcall; - void *sa_cookie; -}; - -struct osc_fsync_args { - struct osc_object *fa_obj; - struct obdo *fa_oa; - obd_enqueue_update_f fa_upcall; - void *fa_cookie; -}; - -struct osc_enqueue_args { - struct obd_export *oa_exp; - enum ldlm_type oa_type; - enum ldlm_mode oa_mode; - __u64 *oa_flags; - osc_enqueue_upcall_f oa_upcall; - void *oa_cookie; - struct ost_lvb *oa_lvb; - struct lustre_handle oa_lockh; - unsigned int oa_agl:1; -}; - -static void osc_release_ppga(struct brw_page **ppga, u32 count); -static int brw_interpret(const struct lu_env *env, - struct ptlrpc_request *req, void *data, int rc); - -static inline void osc_pack_req_body(struct ptlrpc_request *req, - struct obdo *oa) -{ - struct ost_body *body; - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); -} - -static int osc_getattr(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa) -{ - struct ptlrpc_request *req; - struct ost_body *body; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - osc_pack_req_body(req, oa); - - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - rc = -EPROTO; - goto out; - } - - CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode); - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, - &body->oa); - - oa->o_blksize = cli_brw_size(exp->exp_obd); - oa->o_valid |= OBD_MD_FLBLKSZ; - - out: - ptlrpc_req_finished(req); - return rc; -} - -static int osc_setattr(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa) -{ - struct ptlrpc_request *req; - struct ost_body *body; - int rc; - - LASSERT(oa->o_valid & OBD_MD_FLGROUP); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - osc_pack_req_body(req, oa); - - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - rc = -EPROTO; - goto out; - } - - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, - &body->oa); - -out: - ptlrpc_req_finished(req); - return rc; -} - -static int osc_setattr_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - struct osc_setattr_args *sa, int rc) -{ - struct ost_body *body; - - if (rc != 0) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - rc = -EPROTO; - goto out; - } - - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa, - &body->oa); -out: - rc = sa->sa_upcall(sa->sa_cookie, rc); - return rc; -} - -int osc_setattr_async(struct obd_export *exp, struct obdo *oa, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset) -{ - struct ptlrpc_request *req; - struct osc_setattr_args *sa; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - osc_pack_req_body(req, oa); - - ptlrpc_request_set_replen(req); - - /* do mds to ost setattr asynchronously */ - if (!rqset) { - /* Do not wait for response. */ - ptlrpcd_add_req(req); - } else { - req->rq_interpret_reply = - (ptlrpc_interpterer_t)osc_setattr_interpret; - - BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args)); - sa = ptlrpc_req_async_args(req); - sa->sa_oa = oa; - sa->sa_upcall = upcall; - sa->sa_cookie = cookie; - - if (rqset == PTLRPCD_SET) - ptlrpcd_add_req(req); - else - ptlrpc_set_add_req(rqset, req); - } - - return 0; -} - -static int osc_create(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa) -{ - struct ptlrpc_request *req; - struct ost_body *body; - int rc; - - LASSERT(oa); - LASSERT(oa->o_valid & OBD_MD_FLGROUP); - LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi))); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE); - if (!req) { - rc = -ENOMEM; - goto out; - } - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE); - if (rc) { - ptlrpc_request_free(req); - goto out; - } - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); - - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out_req; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - rc = -EPROTO; - goto out_req; - } - - CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags); - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa); - - oa->o_blksize = cli_brw_size(exp->exp_obd); - oa->o_valid |= OBD_MD_FLBLKSZ; - - CDEBUG(D_HA, "transno: %lld\n", - lustre_msg_get_transno(req->rq_repmsg)); -out_req: - ptlrpc_req_finished(req); -out: - return rc; -} - -int osc_punch_base(struct obd_export *exp, struct obdo *oa, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset) -{ - struct ptlrpc_request *req; - struct osc_setattr_args *sa; - struct ost_body *body; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ - ptlrpc_at_set_req_timeout(req); - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, - oa); - - ptlrpc_request_set_replen(req); - - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; - BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args)); - sa = ptlrpc_req_async_args(req); - sa->sa_oa = oa; - sa->sa_upcall = upcall; - sa->sa_cookie = cookie; - if (rqset == PTLRPCD_SET) - ptlrpcd_add_req(req); - else - ptlrpc_set_add_req(rqset, req); - - return 0; -} - -static int osc_sync_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void *arg, int rc) -{ - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - struct osc_fsync_args *fa = arg; - unsigned long valid = 0; - struct ost_body *body; - struct cl_object *obj; - - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - CERROR("can't unpack ost_body\n"); - rc = -EPROTO; - goto out; - } - - *fa->fa_oa = body->oa; - obj = osc2cl(fa->fa_obj); - - /* Update osc object's blocks attribute */ - cl_object_attr_lock(obj); - if (body->oa.o_valid & OBD_MD_FLBLOCKS) { - attr->cat_blocks = body->oa.o_blocks; - valid |= CAT_BLOCKS; - } - - if (valid) - cl_object_attr_update(env, obj, attr, valid); - cl_object_attr_unlock(obj); - -out: - rc = fa->fa_upcall(fa->fa_cookie, rc); - return rc; -} - -int osc_sync_base(struct osc_object *obj, struct obdo *oa, - obd_enqueue_update_f upcall, void *cookie, - struct ptlrpc_request_set *rqset) -{ - struct obd_export *exp = osc_export(obj); - struct ptlrpc_request *req; - struct ost_body *body; - struct osc_fsync_args *fa; - int rc; - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - /* overload the size and blocks fields in the oa with start/end */ - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, - oa); - - ptlrpc_request_set_replen(req); - req->rq_interpret_reply = osc_sync_interpret; - - BUILD_BUG_ON(sizeof(*fa) > sizeof(req->rq_async_args)); - fa = ptlrpc_req_async_args(req); - fa->fa_obj = obj; - fa->fa_oa = oa; - fa->fa_upcall = upcall; - fa->fa_cookie = cookie; - - if (rqset == PTLRPCD_SET) - ptlrpcd_add_req(req); - else - ptlrpc_set_add_req(rqset, req); - - return 0; -} - -/* Find and cancel locally locks matched by @mode in the resource found by - * @objid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. - */ -static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, - struct list_head *cancels, - enum ldlm_mode mode, __u64 lock_flags) -{ - struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; - struct ldlm_res_id res_id; - struct ldlm_resource *res; - int count; - - /* Return, i.e. cancel nothing, only if ELC is supported (flag in - * export) but disabled through procfs (flag in NS). - * - * This distinguishes from a case when ELC is not supported originally, - * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. - */ - if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) - return 0; - - ostid_build_res_name(&oa->o_oi, &res_id); - res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (IS_ERR(res)) - return 0; - - LDLM_RESOURCE_ADDREF(res); - count = ldlm_cancel_resource_local(res, cancels, NULL, mode, - lock_flags, 0, NULL); - LDLM_RESOURCE_DELREF(res); - ldlm_resource_putref(res); - return count; -} - -static int osc_destroy_interpret(const struct lu_env *env, - struct ptlrpc_request *req, void *data, - int rc) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - - atomic_dec(&cli->cl_destroy_in_flight); - wake_up(&cli->cl_destroy_waitq); - return 0; -} - -static int osc_can_send_destroy(struct client_obd *cli) -{ - if (atomic_inc_return(&cli->cl_destroy_in_flight) <= - cli->cl_max_rpcs_in_flight) { - /* The destroy request can be sent */ - return 1; - } - if (atomic_dec_return(&cli->cl_destroy_in_flight) < - cli->cl_max_rpcs_in_flight) { - /* - * The counter has been modified between the two atomic - * operations. - */ - wake_up(&cli->cl_destroy_waitq); - } - return 0; -} - -static int osc_destroy(const struct lu_env *env, struct obd_export *exp, - struct obdo *oa) -{ - struct client_obd *cli = &exp->exp_obd->u.cli; - struct ptlrpc_request *req; - struct ost_body *body; - LIST_HEAD(cancels); - int rc, count; - - if (!oa) { - CDEBUG(D_INFO, "oa NULL\n"); - return -EINVAL; - } - - count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW, - LDLM_FL_DISCARD_DATA); - - req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY); - if (!req) { - ldlm_lock_list_put(&cancels, l_bl_ast, count); - return -ENOMEM; - } - - rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY, - 0, &cancels, count); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ - ptlrpc_at_set_req_timeout(req); - - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); - - ptlrpc_request_set_replen(req); - - req->rq_interpret_reply = osc_destroy_interpret; - if (!osc_can_send_destroy(cli)) { - /* - * Wait until the number of on-going destroy RPCs drops - * under max_rpc_in_flight - */ - l_wait_event_abortable_exclusive(cli->cl_destroy_waitq, - osc_can_send_destroy(cli)); - } - - /* Do not wait for response */ - ptlrpcd_add_req(req); - return 0; -} - -static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, - long writing_bytes) -{ - u32 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT; - - LASSERT(!(oa->o_valid & bits)); - - oa->o_valid |= bits; - spin_lock(&cli->cl_loi_list_lock); - oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT; - if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit > - cli->cl_dirty_max_pages)) { - CERROR("dirty %lu - %lu > dirty_max %lu\n", - cli->cl_dirty_pages, cli->cl_dirty_transit, - cli->cl_dirty_max_pages); - oa->o_undirty = 0; - } else if (unlikely(atomic_long_read(&obd_dirty_pages) - - atomic_long_read(&obd_dirty_transit_pages) > - (long)(obd_max_dirty_pages + 1))) { - /* The atomic_read() allowing the atomic_inc() are - * not covered by a lock thus they may safely race and trip - * this CERROR() unless we add in a small fudge factor (+1). - */ - CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n", - cli_name(cli), atomic_long_read(&obd_dirty_pages), - atomic_long_read(&obd_dirty_transit_pages), - obd_max_dirty_pages); - oa->o_undirty = 0; - } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages > - 0x7fffffff)) { - CERROR("dirty %lu - dirty_max %lu too big???\n", - cli->cl_dirty_pages, cli->cl_dirty_max_pages); - oa->o_undirty = 0; - } else { - unsigned long max_in_flight; - - max_in_flight = (cli->cl_max_pages_per_rpc << PAGE_SHIFT) * - (cli->cl_max_rpcs_in_flight + 1); - oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT, - max_in_flight); - } - oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant; - oa->o_dropped = cli->cl_lost_grant; - cli->cl_lost_grant = 0; - spin_unlock(&cli->cl_loi_list_lock); - CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n", - oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant); -} - -void osc_update_next_shrink(struct client_obd *cli) -{ - cli->cl_next_shrink_grant = - jiffies + cli->cl_grant_shrink_interval * HZ; - CDEBUG(D_CACHE, "next time %ld to shrink grant\n", - cli->cl_next_shrink_grant); -} - -static void __osc_update_grant(struct client_obd *cli, u64 grant) -{ - spin_lock(&cli->cl_loi_list_lock); - cli->cl_avail_grant += grant; - spin_unlock(&cli->cl_loi_list_lock); -} - -static void osc_update_grant(struct client_obd *cli, struct ost_body *body) -{ - if (body->oa.o_valid & OBD_MD_FLGRANT) { - CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant); - __osc_update_grant(cli, body->oa.o_grant); - } -} - -static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set); - -static int osc_shrink_grant_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void *aa, int rc) -{ - struct client_obd *cli = &req->rq_import->imp_obd->u.cli; - struct obdo *oa = ((struct osc_brw_async_args *)aa)->aa_oa; - struct ost_body *body; - - if (rc != 0) { - __osc_update_grant(cli, oa->o_grant); - goto out; - } - - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - LASSERT(body); - osc_update_grant(cli, body); -out: - kmem_cache_free(obdo_cachep, oa); - return rc; -} - -static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) -{ - spin_lock(&cli->cl_loi_list_lock); - oa->o_grant = cli->cl_avail_grant / 4; - cli->cl_avail_grant -= oa->o_grant; - spin_unlock(&cli->cl_loi_list_lock); - if (!(oa->o_valid & OBD_MD_FLFLAGS)) { - oa->o_valid |= OBD_MD_FLFLAGS; - oa->o_flags = 0; - } - oa->o_flags |= OBD_FL_SHRINK_GRANT; - osc_update_next_shrink(cli); -} - -/* Shrink the current grant, either from some large amount to enough for a - * full set of in-flight RPCs, or if we have already shrunk to that limit - * then to enough for a single RPC. This avoids keeping more grant than - * needed, and avoids shrinking the grant piecemeal. - */ -static int osc_shrink_grant(struct client_obd *cli) -{ - __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * - (cli->cl_max_pages_per_rpc << PAGE_SHIFT); - - spin_lock(&cli->cl_loi_list_lock); - if (cli->cl_avail_grant <= target_bytes) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; - spin_unlock(&cli->cl_loi_list_lock); - - return osc_shrink_grant_to_target(cli, target_bytes); -} - -int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) -{ - int rc = 0; - struct ost_body *body; - - spin_lock(&cli->cl_loi_list_lock); - /* Don't shrink if we are already above or below the desired limit - * We don't want to shrink below a single RPC, as that will negatively - * impact block allocation and long-term performance. - */ - if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; - - if (target_bytes >= cli->cl_avail_grant) { - spin_unlock(&cli->cl_loi_list_lock); - return 0; - } - spin_unlock(&cli->cl_loi_list_lock); - - body = kzalloc(sizeof(*body), GFP_NOFS); - if (!body) - return -ENOMEM; - - osc_announce_cached(cli, &body->oa, 0); - - spin_lock(&cli->cl_loi_list_lock); - body->oa.o_grant = cli->cl_avail_grant - target_bytes; - cli->cl_avail_grant = target_bytes; - spin_unlock(&cli->cl_loi_list_lock); - if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) { - body->oa.o_valid |= OBD_MD_FLFLAGS; - body->oa.o_flags = 0; - } - body->oa.o_flags |= OBD_FL_SHRINK_GRANT; - osc_update_next_shrink(cli); - - rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export, - sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK, - sizeof(*body), body, NULL); - if (rc != 0) - __osc_update_grant(cli, body->oa.o_grant); - kfree(body); - return rc; -} - -static int osc_should_shrink_grant(struct client_obd *client) -{ - unsigned long time = jiffies; - unsigned long next_shrink = client->cl_next_shrink_grant; - - if ((client->cl_import->imp_connect_data.ocd_connect_flags & - OBD_CONNECT_GRANT_SHRINK) == 0) - return 0; - - if (time_after_eq(time, next_shrink - 5)) { - /* Get the current RPC size directly, instead of going via: - * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) - * Keep comment here so that it can be found by searching. - */ - int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT; - - if (client->cl_import->imp_state == LUSTRE_IMP_FULL && - client->cl_avail_grant > brw_size) - return 1; - - osc_update_next_shrink(client); - } - return 0; -} - -static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) -{ - struct client_obd *client; - - list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) { - if (osc_should_shrink_grant(client)) - osc_shrink_grant(client); - } - return 0; -} - -static int osc_add_shrink_grant(struct client_obd *client) -{ - int rc; - - rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval, - TIMEOUT_GRANT, - osc_grant_shrink_grant_cb, NULL, - &client->cl_grant_shrink_list); - if (rc) { - CERROR("add grant client %s error %d\n", cli_name(client), rc); - return rc; - } - CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client)); - osc_update_next_shrink(client); - return 0; -} - -static int osc_del_shrink_grant(struct client_obd *client) -{ - return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list, - TIMEOUT_GRANT); -} - -static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) -{ - /* - * ocd_grant is the total grant amount we're expect to hold: if we've - * been evicted, it's the new avail_grant amount, cl_dirty_pages will - * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant + - * dirty. - * - * race is tolerable here: if we're evicted, but imp_state already - * left EVICTED state, then cl_dirty_pages must be 0 already. - */ - spin_lock(&cli->cl_loi_list_lock); - if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED) - cli->cl_avail_grant = ocd->ocd_grant; - else - cli->cl_avail_grant = ocd->ocd_grant - - (cli->cl_dirty_pages << PAGE_SHIFT); - - /* determine the appropriate chunk size used by osc_extent. */ - cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); - spin_unlock(&cli->cl_loi_list_lock); - - CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", - cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant, - cli->cl_chunkbits); - - if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK && - list_empty(&cli->cl_grant_shrink_list)) - osc_add_shrink_grant(cli); -} - -/* We assume that the reason this OSC got a short read is because it read - * beyond the end of a stripe file; i.e. lustre is reading a sparse file - * via the LOV, and it _knows_ it's reading inside the file, it's just that - * this stripe never got written at or beyond this stripe offset yet. - */ -static void handle_short_read(int nob_read, u32 page_count, - struct brw_page **pga) -{ - char *ptr; - int i = 0; - - /* skip bytes read OK */ - while (nob_read > 0) { - LASSERT(page_count > 0); - - if (pga[i]->count > nob_read) { - /* EOF inside this page */ - ptr = kmap(pga[i]->pg) + - (pga[i]->off & ~PAGE_MASK); - memset(ptr + nob_read, 0, pga[i]->count - nob_read); - kunmap(pga[i]->pg); - page_count--; - i++; - break; - } - - nob_read -= pga[i]->count; - page_count--; - i++; - } - - /* zero remaining pages */ - while (page_count-- > 0) { - ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK); - memset(ptr, 0, pga[i]->count); - kunmap(pga[i]->pg); - i++; - } -} - -static int check_write_rcs(struct ptlrpc_request *req, - int requested_nob, int niocount, - u32 page_count, struct brw_page **pga) -{ - int i; - __u32 *remote_rcs; - - remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, - sizeof(*remote_rcs) * - niocount); - if (!remote_rcs) { - CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); - return -EPROTO; - } - - /* return error if any niobuf was in error */ - for (i = 0; i < niocount; i++) { - if ((int)remote_rcs[i] < 0) - return remote_rcs[i]; - - if (remote_rcs[i] != 0) { - CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n", - i, remote_rcs[i], req); - return -EPROTO; - } - } - - if (req->rq_bulk->bd_nob_transferred != requested_nob) { - CERROR("Unexpected # bytes transferred: %d (requested %d)\n", - req->rq_bulk->bd_nob_transferred, requested_nob); - return -EPROTO; - } - - return 0; -} - -static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) -{ - if (p1->flag != p2->flag) { - unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE | - OBD_BRW_SYNC | OBD_BRW_ASYNC | - OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC); - - /* warn if we try to combine flags that we don't know to be - * safe to combine - */ - if (unlikely((p1->flag & mask) != (p2->flag & mask))) { - CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n", - p1->flag, p2->flag); - } - return 0; - } - - return (p1->off + p1->count == p2->off); -} - -static u32 osc_checksum_bulk(int nob, u32 pg_count, - struct brw_page **pga, int opc, - enum cksum_type cksum_type) -{ - __u32 cksum; - int i = 0; - struct ahash_request *hdesc; - unsigned int bufsize; - unsigned char cfs_alg = cksum_obd2cfs(cksum_type); - - LASSERT(pg_count > 0); - - hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0); - if (IS_ERR(hdesc)) { - CERROR("Unable to initialize checksum hash %s\n", - cfs_crypto_hash_name(cfs_alg)); - return PTR_ERR(hdesc); - } - - while (nob > 0 && pg_count > 0) { - unsigned int count = pga[i]->count > nob ? nob : pga[i]->count; - - /* corrupt the data before we compute the checksum, to - * simulate an OST->client data error - */ - if (i == 0 && opc == OST_READ && - OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { - unsigned char *ptr = kmap(pga[i]->pg); - int off = pga[i]->off & ~PAGE_MASK; - - memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob)); - kunmap(pga[i]->pg); - } - cfs_crypto_hash_update_page(hdesc, pga[i]->pg, - pga[i]->off & ~PAGE_MASK, - count); - CDEBUG(D_PAGE, - "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n", - pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index, - (long)pga[i]->pg->flags, page_count(pga[i]->pg), - page_private(pga[i]->pg), - (int)(pga[i]->off & ~PAGE_MASK)); - - nob -= pga[i]->count; - pg_count--; - i++; - } - - bufsize = sizeof(cksum); - cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize); - - /* For sending we only compute the wrong checksum instead - * of corrupting the data so it is still correct on a redo - */ - if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND)) - cksum++; - - return cksum; -} - -static int osc_brw_prep_request(int cmd, struct client_obd *cli, - struct obdo *oa, u32 page_count, - struct brw_page **pga, - struct ptlrpc_request **reqp, - int reserve, - int resend) -{ - struct ptlrpc_request *req; - struct ptlrpc_bulk_desc *desc; - struct ost_body *body; - struct obd_ioobj *ioobj; - struct niobuf_remote *niobuf; - int niocount, i, requested_nob, opc, rc; - struct osc_brw_async_args *aa; - struct req_capsule *pill; - struct brw_page *pg_prev; - - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ)) - return -ENOMEM; /* Recoverable */ - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2)) - return -EINVAL; /* Fatal */ - - if ((cmd & OBD_BRW_WRITE) != 0) { - opc = OST_WRITE; - req = ptlrpc_request_alloc_pool(cli->cl_import, - osc_rq_pool, - &RQF_OST_BRW_WRITE); - } else { - opc = OST_READ; - req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); - } - if (!req) - return -ENOMEM; - - for (niocount = i = 1; i < page_count; i++) { - if (!can_merge_pages(pga[i - 1], pga[i])) - niocount++; - } - - pill = &req->rq_pill; - req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT, - sizeof(*ioobj)); - req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT, - niocount * sizeof(*niobuf)); - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ - ptlrpc_at_set_req_timeout(req); - /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own - * retry logic - */ - req->rq_no_retry_einprogress = 1; - - desc = ptlrpc_prep_bulk_imp(req, page_count, - cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS, - (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE : - PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL, - &ptlrpc_bulk_kiov_pin_ops); - - if (!desc) { - rc = -ENOMEM; - goto out; - } - /* NB request now owns desc and will free it when it gets freed */ - - body = req_capsule_client_get(pill, &RMF_OST_BODY); - ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); - niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body && ioobj && niobuf); - - lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); - - obdo_to_ioobj(oa, ioobj); - ioobj->ioo_bufcnt = niocount; - /* The high bits of ioo_max_brw tells server _maximum_ number of bulks - * that might be send for this request. The actual number is decided - * when the RPC is finally sent in ptlrpc_register_bulk(). It sends - * "max - 1" for old client compatibility sending "0", and also so the - * the actual maximum is a power-of-two number, not one less. LU-1431 - */ - ioobj_max_brw_set(ioobj, desc->bd_md_max_brw); - LASSERT(page_count > 0); - pg_prev = pga[0]; - for (requested_nob = i = 0; i < page_count; i++, niobuf++) { - struct brw_page *pg = pga[i]; - int poff = pg->off & ~PAGE_MASK; - - LASSERT(pg->count > 0); - /* make sure there is no gap in the middle of page array */ - LASSERTF(page_count == 1 || - (ergo(i == 0, poff + pg->count == PAGE_SIZE) && - ergo(i > 0 && i < page_count - 1, - poff == 0 && pg->count == PAGE_SIZE) && - ergo(i == page_count - 1, poff == 0)), - "i: %d/%d pg: %p off: %llu, count: %u\n", - i, page_count, pg, pg->off, pg->count); - LASSERTF(i == 0 || pg->off > pg_prev->off, - "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n", - i, page_count, - pg->pg, page_private(pg->pg), pg->pg->index, pg->off, - pg_prev->pg, page_private(pg_prev->pg), - pg_prev->pg->index, pg_prev->off); - LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) == - (pg->flag & OBD_BRW_SRVLOCK)); - - desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count); - requested_nob += pg->count; - - if (i > 0 && can_merge_pages(pg_prev, pg)) { - niobuf--; - niobuf->rnb_len += pg->count; - } else { - niobuf->rnb_offset = pg->off; - niobuf->rnb_len = pg->count; - niobuf->rnb_flags = pg->flag; - } - pg_prev = pg; - } - - LASSERTF((void *)(niobuf - niocount) == - req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE), - "want %p - real %p\n", req_capsule_client_get(&req->rq_pill, - &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount)); - - osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0); - if (resend) { - if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { - body->oa.o_valid |= OBD_MD_FLFLAGS; - body->oa.o_flags = 0; - } - body->oa.o_flags |= OBD_FL_RECOV_RESEND; - } - - if (osc_should_shrink_grant(cli)) - osc_shrink_grant_local(cli, &body->oa); - - /* size[REQ_REC_OFF] still sizeof (*body) */ - if (opc == OST_WRITE) { - if (cli->cl_checksum && - !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { - /* store cl_cksum_type in a local variable since - * it can be changed via lprocfs - */ - enum cksum_type cksum_type = cli->cl_cksum_type; - - if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { - oa->o_flags &= OBD_FL_LOCAL_MASK; - body->oa.o_flags = 0; - } - body->oa.o_flags |= cksum_type_pack(cksum_type); - body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; - body->oa.o_cksum = osc_checksum_bulk(requested_nob, - page_count, pga, - OST_WRITE, - cksum_type); - CDEBUG(D_PAGE, "checksum at write origin: %x\n", - body->oa.o_cksum); - /* save this in 'oa', too, for later checking */ - oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; - oa->o_flags |= cksum_type_pack(cksum_type); - } else { - /* clear out the checksum flag, in case this is a - * resend but cl_checksum is no longer set. b=11238 - */ - oa->o_valid &= ~OBD_MD_FLCKSUM; - } - oa->o_cksum = body->oa.o_cksum; - /* 1 RC per niobuf */ - req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER, - sizeof(__u32) * niocount); - } else { - if (cli->cl_checksum && - !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { - if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) - body->oa.o_flags = 0; - body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type); - body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS; - } - } - ptlrpc_request_set_replen(req); - - BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->aa_oa = oa; - aa->aa_requested_nob = requested_nob; - aa->aa_nio_count = niocount; - aa->aa_page_count = page_count; - aa->aa_resends = 0; - aa->aa_ppga = pga; - aa->aa_cli = cli; - INIT_LIST_HEAD(&aa->aa_oaps); - - *reqp = req; - niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - CDEBUG(D_RPCTRACE, "brw rpc %p - object " DOSTID " offset %lld<>%lld\n", - req, POSTID(&oa->o_oi), niobuf[0].rnb_offset, - niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len); - - return 0; - - out: - ptlrpc_req_finished(req); - return rc; -} - -static int check_write_checksum(struct obdo *oa, - const struct lnet_process_id *peer, - __u32 client_cksum, __u32 server_cksum, int nob, - u32 page_count, struct brw_page **pga, - enum cksum_type client_cksum_type) -{ - __u32 new_cksum; - char *msg; - enum cksum_type cksum_type; - - if (server_cksum == client_cksum) { - CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); - return 0; - } - - cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ? - oa->o_flags : 0); - new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE, - cksum_type); - - if (cksum_type != client_cksum_type) - msg = "the server did not use the checksum type specified in the original request - likely a protocol problem" - ; - else if (new_cksum == server_cksum) - msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)" - ; - else if (new_cksum == client_cksum) - msg = "changed in transit before arrival at OST"; - else - msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)" - ; - - LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode " DFID " object " DOSTID " extent [%llu-%llu]\n", - msg, libcfs_nid2str(peer->nid), - oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0, - oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0, - oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0, - POSTID(&oa->o_oi), pga[0]->off, - pga[page_count - 1]->off + - pga[page_count - 1]->count - 1); - CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n", - client_cksum, client_cksum_type, - server_cksum, cksum_type, new_cksum); - return 1; -} - -/* Note rc enters this function as number of bytes transferred */ -static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) -{ - struct osc_brw_async_args *aa = (void *)&req->rq_async_args; - const struct lnet_process_id *peer = - &req->rq_import->imp_connection->c_peer; - struct client_obd *cli = aa->aa_cli; - struct ost_body *body; - __u32 client_cksum = 0; - - if (rc < 0 && rc != -EDQUOT) { - DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc); - return rc; - } - - LASSERTF(req->rq_repmsg, "rc = %d\n", rc); - body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (!body) { - DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); - return -EPROTO; - } - - /* set/clear over quota flag for a uid/gid */ - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && - body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) { - unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid }; - - CDEBUG(D_QUOTA, "setdq for [%u %u] with valid %#llx, flags %x\n", - body->oa.o_uid, body->oa.o_gid, body->oa.o_valid, - body->oa.o_flags); - osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags); - } - - osc_update_grant(cli, body); - - if (rc < 0) - return rc; - - if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM) - client_cksum = aa->aa_oa->o_cksum; /* save for later */ - - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) { - if (rc > 0) { - CERROR("Unexpected +ve rc %d\n", rc); - return -EPROTO; - } - LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob); - - if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk)) - return -EAGAIN; - - if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum && - check_write_checksum(&body->oa, peer, client_cksum, - body->oa.o_cksum, aa->aa_requested_nob, - aa->aa_page_count, aa->aa_ppga, - cksum_type_unpack(aa->aa_oa->o_flags))) - return -EAGAIN; - - rc = check_write_rcs(req, aa->aa_requested_nob, - aa->aa_nio_count, - aa->aa_page_count, aa->aa_ppga); - goto out; - } - - /* The rest of this function executes only for OST_READs */ - - /* if unwrap_bulk failed, return -EAGAIN to retry */ - rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc); - if (rc < 0) { - rc = -EAGAIN; - goto out; - } - - if (rc > aa->aa_requested_nob) { - CERROR("Unexpected rc %d (%d requested)\n", rc, - aa->aa_requested_nob); - return -EPROTO; - } - - if (rc != req->rq_bulk->bd_nob_transferred) { - CERROR("Unexpected rc %d (%d transferred)\n", - rc, req->rq_bulk->bd_nob_transferred); - return -EPROTO; - } - - if (rc < aa->aa_requested_nob) - handle_short_read(rc, aa->aa_page_count, aa->aa_ppga); - - if (body->oa.o_valid & OBD_MD_FLCKSUM) { - static int cksum_counter; - __u32 server_cksum = body->oa.o_cksum; - char *via = ""; - char *router = ""; - enum cksum_type cksum_type; - - cksum_type = cksum_type_unpack(body->oa.o_valid & - OBD_MD_FLFLAGS ? - body->oa.o_flags : 0); - client_cksum = osc_checksum_bulk(rc, aa->aa_page_count, - aa->aa_ppga, OST_READ, - cksum_type); - - if (peer->nid != req->rq_bulk->bd_sender) { - via = " via "; - router = libcfs_nid2str(req->rq_bulk->bd_sender); - } - - if (server_cksum != client_cksum) { - LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n", - req->rq_import->imp_obd->obd_name, - libcfs_nid2str(peer->nid), - via, router, - body->oa.o_valid & OBD_MD_FLFID ? - body->oa.o_parent_seq : (__u64)0, - body->oa.o_valid & OBD_MD_FLFID ? - body->oa.o_parent_oid : 0, - body->oa.o_valid & OBD_MD_FLFID ? - body->oa.o_parent_ver : 0, - POSTID(&body->oa.o_oi), - aa->aa_ppga[0]->off, - aa->aa_ppga[aa->aa_page_count-1]->off + - aa->aa_ppga[aa->aa_page_count-1]->count - - 1); - CERROR("client %x, server %x, cksum_type %x\n", - client_cksum, server_cksum, cksum_type); - cksum_counter = 0; - aa->aa_oa->o_cksum = client_cksum; - rc = -EAGAIN; - } else { - cksum_counter++; - CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); - rc = 0; - } - } else if (unlikely(client_cksum)) { - static int cksum_missed; - - cksum_missed++; - if ((cksum_missed & (-cksum_missed)) == cksum_missed) - CERROR("Checksum %u requested from %s but not sent\n", - cksum_missed, libcfs_nid2str(peer->nid)); - } else { - rc = 0; - } -out: - if (rc >= 0) - lustre_get_wire_obdo(&req->rq_import->imp_connect_data, - aa->aa_oa, &body->oa); - - return rc; -} - -static int osc_brw_redo_request(struct ptlrpc_request *request, - struct osc_brw_async_args *aa, int rc) -{ - struct ptlrpc_request *new_req; - struct osc_brw_async_args *new_aa; - struct osc_async_page *oap; - - DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request, - "redo for recoverable error %d", rc); - - rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) == - OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ, - aa->aa_cli, aa->aa_oa, - aa->aa_page_count, aa->aa_ppga, - &new_req, 0, 1); - if (rc) - return rc; - - list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { - if (oap->oap_request) { - LASSERTF(request == oap->oap_request, - "request %p != oap_request %p\n", - request, oap->oap_request); - if (oap->oap_interrupted) { - ptlrpc_req_finished(new_req); - return -EINTR; - } - } - } - /* New request takes over pga and oaps from old request. - * Note that copying a list_head doesn't work, need to move it... - */ - aa->aa_resends++; - new_req->rq_interpret_reply = request->rq_interpret_reply; - new_req->rq_async_args = request->rq_async_args; - new_req->rq_commit_cb = request->rq_commit_cb; - /* cap resend delay to the current request timeout, this is similar to - * what ptlrpc does (see after_reply()) - */ - if (aa->aa_resends > new_req->rq_timeout) - new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout; - else - new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends; - new_req->rq_generation_set = 1; - new_req->rq_import_generation = request->rq_import_generation; - - new_aa = ptlrpc_req_async_args(new_req); - - INIT_LIST_HEAD(&new_aa->aa_oaps); - list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps); - INIT_LIST_HEAD(&new_aa->aa_exts); - list_splice_init(&aa->aa_exts, &new_aa->aa_exts); - new_aa->aa_resends = aa->aa_resends; - - list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) { - if (oap->oap_request) { - ptlrpc_req_finished(oap->oap_request); - oap->oap_request = ptlrpc_request_addref(new_req); - } - } - - /* XXX: This code will run into problem if we're going to support - * to add a series of BRW RPCs into a self-defined ptlrpc_request_set - * and wait for all of them to be finished. We should inherit request - * set from old request. - */ - ptlrpcd_add_req(new_req); - - DEBUG_REQ(D_INFO, new_req, "new request"); - return 0; -} - -/* - * ugh, we want disk allocation on the target to happen in offset order. we'll - * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do - * fine for our small page arrays and doesn't require allocation. its an - * insertion sort that swaps elements that are strides apart, shrinking the - * stride down until its '1' and the array is sorted. - */ -static void sort_brw_pages(struct brw_page **array, int num) -{ - int stride, i, j; - struct brw_page *tmp; - - if (num == 1) - return; - for (stride = 1; stride < num ; stride = (stride * 3) + 1) - ; - - do { - stride /= 3; - for (i = stride ; i < num ; i++) { - tmp = array[i]; - j = i; - while (j >= stride && array[j - stride]->off > tmp->off) { - array[j] = array[j - stride]; - j -= stride; - } - array[j] = tmp; - } - } while (stride > 1); -} - -static void osc_release_ppga(struct brw_page **ppga, u32 count) -{ - LASSERT(ppga); - kfree(ppga); -} - -static int brw_interpret(const struct lu_env *env, - struct ptlrpc_request *req, void *data, int rc) -{ - struct osc_brw_async_args *aa = data; - struct osc_extent *ext; - struct osc_extent *tmp; - struct client_obd *cli = aa->aa_cli; - - rc = osc_brw_fini_request(req, rc); - CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); - /* When server return -EINPROGRESS, client should always retry - * regardless of the number of times the bulk was resent already. - */ - if (osc_recoverable_error(rc)) { - if (req->rq_import_generation != - req->rq_import->imp_generation) { - CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n", - req->rq_import->imp_obd->obd_name, - POSTID(&aa->aa_oa->o_oi), rc); - } else if (rc == -EINPROGRESS || - client_should_resend(aa->aa_resends, aa->aa_cli)) { - rc = osc_brw_redo_request(req, aa, rc); - } else { - CERROR("%s: too many resent retries for object: %llu:%llu, rc = %d.\n", - req->rq_import->imp_obd->obd_name, - POSTID(&aa->aa_oa->o_oi), rc); - } - - if (rc == 0) - return 0; - else if (rc == -EAGAIN || rc == -EINPROGRESS) - rc = -EIO; - } - - if (rc == 0) { - struct obdo *oa = aa->aa_oa; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - unsigned long valid = 0; - struct cl_object *obj; - struct osc_async_page *last; - - last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]); - obj = osc2cl(last->oap_obj); - - cl_object_attr_lock(obj); - if (oa->o_valid & OBD_MD_FLBLOCKS) { - attr->cat_blocks = oa->o_blocks; - valid |= CAT_BLOCKS; - } - if (oa->o_valid & OBD_MD_FLMTIME) { - attr->cat_mtime = oa->o_mtime; - valid |= CAT_MTIME; - } - if (oa->o_valid & OBD_MD_FLATIME) { - attr->cat_atime = oa->o_atime; - valid |= CAT_ATIME; - } - if (oa->o_valid & OBD_MD_FLCTIME) { - attr->cat_ctime = oa->o_ctime; - valid |= CAT_CTIME; - } - - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) { - struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; - loff_t last_off = last->oap_count + last->oap_obj_off + - last->oap_page_off; - - /* Change file size if this is an out of quota or - * direct IO write and it extends the file size - */ - if (loi->loi_lvb.lvb_size < last_off) { - attr->cat_size = last_off; - valid |= CAT_SIZE; - } - /* Extend KMS if it's not a lockless write */ - if (loi->loi_kms < last_off && - oap2osc_page(last)->ops_srvlock == 0) { - attr->cat_kms = last_off; - valid |= CAT_KMS; - } - } - - if (valid != 0) - cl_object_attr_update(env, obj, attr, valid); - cl_object_attr_unlock(obj); - } - kmem_cache_free(obdo_cachep, aa->aa_oa); - - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0) - osc_inc_unstable_pages(req); - - list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) { - list_del_init(&ext->oe_link); - osc_extent_finish(env, ext, 1, rc); - } - LASSERT(list_empty(&aa->aa_exts)); - LASSERT(list_empty(&aa->aa_oaps)); - - osc_release_ppga(aa->aa_ppga, aa->aa_page_count); - ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred); - - spin_lock(&cli->cl_loi_list_lock); - /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters - * is called so we know whether to go to sync BRWs or wait for more - * RPCs to complete - */ - if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) - cli->cl_w_in_flight--; - else - cli->cl_r_in_flight--; - osc_wake_cache_waiters(cli); - spin_unlock(&cli->cl_loi_list_lock); - - osc_io_unplug(env, cli, NULL); - return rc; -} - -static void brw_commit(struct ptlrpc_request *req) -{ - /* - * If osc_inc_unstable_pages (via osc_extent_finish) races with - * this called via the rq_commit_cb, I need to ensure - * osc_dec_unstable_pages is still called. Otherwise unstable - * pages may be leaked. - */ - spin_lock(&req->rq_lock); - if (unlikely(req->rq_unstable)) { - req->rq_unstable = 0; - spin_unlock(&req->rq_lock); - osc_dec_unstable_pages(req); - } else { - req->rq_committed = 1; - spin_unlock(&req->rq_lock); - } -} - -/** - * Build an RPC by the list of extent @ext_list. The caller must ensure - * that the total pages in this list are NOT over max pages per RPC. - * Extents in the list must be in OES_RPC state. - */ -int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, - struct list_head *ext_list, int cmd) -{ - struct ptlrpc_request *req = NULL; - struct osc_extent *ext; - struct brw_page **pga = NULL; - struct osc_brw_async_args *aa = NULL; - struct obdo *oa = NULL; - struct osc_async_page *oap; - struct osc_object *obj = NULL; - struct cl_req_attr *crattr = NULL; - u64 starting_offset = OBD_OBJECT_EOF; - u64 ending_offset = 0; - unsigned int mpflag = 0; - int mem_tight = 0; - int page_count = 0; - bool soft_sync = false; - bool interrupted = false; - int i; - int rc; - struct ost_body *body; - LIST_HEAD(rpc_list); - - LASSERT(!list_empty(ext_list)); - - /* add pages into rpc_list to build BRW rpc */ - list_for_each_entry(ext, ext_list, oe_link) { - LASSERT(ext->oe_state == OES_RPC); - mem_tight |= ext->oe_memalloc; - page_count += ext->oe_nr_pages; - if (!obj) - obj = ext->oe_obj; - } - - soft_sync = osc_over_unstable_soft_limit(cli); - if (mem_tight) - mpflag = memalloc_noreclaim_save(); - - pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); - if (!pga) { - rc = -ENOMEM; - goto out; - } - - oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!oa) { - rc = -ENOMEM; - goto out; - } - - i = 0; - list_for_each_entry(ext, ext_list, oe_link) { - list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { - if (mem_tight) - oap->oap_brw_flags |= OBD_BRW_MEMALLOC; - if (soft_sync) - oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC; - pga[i] = &oap->oap_brw_page; - pga[i]->off = oap->oap_obj_off + oap->oap_page_off; - i++; - - list_add_tail(&oap->oap_rpc_item, &rpc_list); - if (starting_offset == OBD_OBJECT_EOF || - starting_offset > oap->oap_obj_off) - starting_offset = oap->oap_obj_off; - else - LASSERT(!oap->oap_page_off); - if (ending_offset < oap->oap_obj_off + oap->oap_count) - ending_offset = oap->oap_obj_off + - oap->oap_count; - else - LASSERT(oap->oap_page_off + oap->oap_count == - PAGE_SIZE); - if (oap->oap_interrupted) - interrupted = true; - } - } - - /* first page in the list */ - oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item); - - crattr = &osc_env_info(env)->oti_req_attr; - memset(crattr, 0, sizeof(*crattr)); - crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ; - crattr->cra_flags = ~0ULL; - crattr->cra_page = oap2cl_page(oap); - crattr->cra_oa = oa; - cl_req_attr_set(env, osc2cl(obj), crattr); - - sort_brw_pages(pga, page_count); - rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0); - if (rc != 0) { - CERROR("prep_req failed: %d\n", rc); - goto out; - } - - req->rq_commit_cb = brw_commit; - req->rq_interpret_reply = brw_interpret; - - req->rq_memalloc = mem_tight != 0; - oap->oap_request = ptlrpc_request_addref(req); - if (interrupted && !req->rq_intr) - ptlrpc_mark_interrupted(req); - - /* Need to update the timestamps after the request is built in case - * we race with setattr (locally or in queue at OST). If OST gets - * later setattr before earlier BRW (as determined by the request xid), - * the OST will not use BRW timestamps. Sadly, there is no obvious - * way to do this in a single call. bug 10150 - */ - body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); - crattr->cra_oa = &body->oa; - crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME; - cl_req_attr_set(env, osc2cl(obj), crattr); - lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid); - - BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - INIT_LIST_HEAD(&aa->aa_oaps); - list_splice_init(&rpc_list, &aa->aa_oaps); - INIT_LIST_HEAD(&aa->aa_exts); - list_splice_init(ext_list, &aa->aa_exts); - - spin_lock(&cli->cl_loi_list_lock); - starting_offset >>= PAGE_SHIFT; - if (cmd == OBD_BRW_READ) { - cli->cl_r_in_flight++; - lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); - lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight); - lprocfs_oh_tally_log2(&cli->cl_read_offset_hist, - starting_offset + 1); - } else { - cli->cl_w_in_flight++; - lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count); - lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight); - lprocfs_oh_tally_log2(&cli->cl_write_offset_hist, - starting_offset + 1); - } - spin_unlock(&cli->cl_loi_list_lock); - - DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight", - page_count, aa, cli->cl_r_in_flight, - cli->cl_w_in_flight); - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val); - - ptlrpcd_add_req(req); - rc = 0; - -out: - if (mem_tight != 0) - memalloc_noreclaim_restore(mpflag); - - if (rc != 0) { - LASSERT(!req); - - if (oa) - kmem_cache_free(obdo_cachep, oa); - kfree(pga); - /* this should happen rarely and is pretty bad, it makes the - * pending list not follow the dirty order - */ - while (!list_empty(ext_list)) { - ext = list_entry(ext_list->next, struct osc_extent, - oe_link); - list_del_init(&ext->oe_link); - osc_extent_finish(env, ext, 0, rc); - } - } - return rc; -} - -static int osc_set_lock_data(struct ldlm_lock *lock, void *data) -{ - int set = 0; - - LASSERT(lock); - - lock_res_and_lock(lock); - - if (!lock->l_ast_data) - lock->l_ast_data = data; - if (lock->l_ast_data == data) - set = 1; - - unlock_res_and_lock(lock); - - return set; -} - -static int osc_enqueue_fini(struct ptlrpc_request *req, - osc_enqueue_upcall_f upcall, void *cookie, - struct lustre_handle *lockh, enum ldlm_mode mode, - __u64 *flags, int agl, int errcode) -{ - bool intent = *flags & LDLM_FL_HAS_INTENT; - int rc; - - /* The request was created before ldlm_cli_enqueue call. */ - if (intent && errcode == ELDLM_LOCK_ABORTED) { - struct ldlm_reply *rep; - - rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - - rep->lock_policy_res1 = - ptlrpc_status_ntoh(rep->lock_policy_res1); - if (rep->lock_policy_res1) - errcode = rep->lock_policy_res1; - if (!agl) - *flags |= LDLM_FL_LVB_READY; - } else if (errcode == ELDLM_OK) { - *flags |= LDLM_FL_LVB_READY; - } - - /* Call the update callback. */ - rc = (*upcall)(cookie, lockh, errcode); - /* release the reference taken in ldlm_cli_enqueue() */ - if (errcode == ELDLM_LOCK_MATCHED) - errcode = ELDLM_OK; - if (errcode == ELDLM_OK && lustre_handle_is_used(lockh)) - ldlm_lock_decref(lockh, mode); - - return rc; -} - -static int osc_enqueue_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - struct osc_enqueue_args *aa, int rc) -{ - struct ldlm_lock *lock; - struct lustre_handle *lockh = &aa->oa_lockh; - enum ldlm_mode mode = aa->oa_mode; - struct ost_lvb *lvb = aa->oa_lvb; - __u32 lvb_len = sizeof(*lvb); - __u64 flags = 0; - - - /* ldlm_cli_enqueue is holding a reference on the lock, so it must - * be valid. - */ - lock = ldlm_handle2lock(lockh); - LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n", - lockh->cookie, req, aa); - - /* Take an additional reference so that a blocking AST that - * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed - * to arrive after an upcall has been executed by - * osc_enqueue_fini(). - */ - ldlm_lock_addref(lockh, mode); - - /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */ - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2); - - /* Let CP AST to grant the lock first. */ - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1); - - if (aa->oa_agl) { - LASSERT(!aa->oa_lvb); - LASSERT(!aa->oa_flags); - aa->oa_flags = &flags; - } - - /* Complete obtaining the lock procedure. */ - rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1, - aa->oa_mode, aa->oa_flags, lvb, lvb_len, - lockh, rc); - /* Complete osc stuff. */ - rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode, - aa->oa_flags, aa->oa_agl, rc); - - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10); - - ldlm_lock_decref(lockh, mode); - LDLM_LOCK_PUT(lock); - return rc; -} - -struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; - -/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock - * from the 2nd OSC before a lock from the 1st one. This does not deadlock with - * other synchronous requests, however keeping some locks and trying to obtain - * others may take a considerable amount of time in a case of ost failure; and - * when other sync requests do not get released lock from a client, the client - * is evicted from the cluster -- such scenaries make the life difficult, so - * release locks just after they are obtained. - */ -int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, - __u64 *flags, union ldlm_policy_data *policy, - struct ost_lvb *lvb, int kms_valid, - osc_enqueue_upcall_f upcall, void *cookie, - struct ldlm_enqueue_info *einfo, - struct ptlrpc_request_set *rqset, int async, int agl) -{ - struct obd_device *obd = exp->exp_obd; - struct lustre_handle lockh = { 0 }; - struct ptlrpc_request *req = NULL; - int intent = *flags & LDLM_FL_HAS_INTENT; - __u64 match_flags = *flags; - enum ldlm_mode mode; - int rc; - - /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother. - */ - policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK; - policy->l_extent.end |= ~PAGE_MASK; - - /* - * kms is not valid when either object is completely fresh (so that no - * locks are cached), or object was evicted. In the latter case cached - * lock cannot be used, because it would prime inode state with - * potentially stale LVB. - */ - if (!kms_valid) - goto no_match; - - /* Next, search for already existing extent locks that will cover us */ - /* If we're trying to read, we also search for an existing PW lock. The - * VFS and page cache already protect us locally, so lots of readers/ - * writers can share a single PW lock. - * - * There are problems with conversion deadlocks, so instead of - * converting a read lock to a write lock, we'll just enqueue a new - * one. - * - * At some point we should cancel the read lock instead of making them - * send us a blocking callback, but there are problems with canceling - * locks out from other users right now, too. - */ - mode = einfo->ei_mode; - if (einfo->ei_mode == LCK_PR) - mode |= LCK_PW; - if (agl == 0) - match_flags |= LDLM_FL_LVB_READY; - if (intent != 0) - match_flags |= LDLM_FL_BLOCK_GRANTED; - mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id, - einfo->ei_type, policy, mode, &lockh, 0); - if (mode) { - struct ldlm_lock *matched; - - if (*flags & LDLM_FL_TEST_LOCK) - return ELDLM_OK; - - matched = ldlm_handle2lock(&lockh); - if (agl) { - /* AGL enqueues DLM locks speculatively. Therefore if - * it already exists a DLM lock, it wll just inform the - * caller to cancel the AGL process for this stripe. - */ - ldlm_lock_decref(&lockh, mode); - LDLM_LOCK_PUT(matched); - return -ECANCELED; - } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) { - *flags |= LDLM_FL_LVB_READY; - /* We already have a lock, and it's referenced. */ - (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED); - - ldlm_lock_decref(&lockh, mode); - LDLM_LOCK_PUT(matched); - return ELDLM_OK; - } else { - ldlm_lock_decref(&lockh, mode); - LDLM_LOCK_PUT(matched); - } - } - -no_match: - if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK)) - return -ENOLCK; - if (intent) { - req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_ENQUEUE_LVB); - if (!req) - return -ENOMEM; - - rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, - sizeof(*lvb)); - ptlrpc_request_set_replen(req); - } - - /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */ - *flags &= ~LDLM_FL_BLOCK_GRANTED; - - rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb, - sizeof(*lvb), LVB_T_OST, &lockh, async); - if (async) { - if (!rc) { - struct osc_enqueue_args *aa; - - BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->oa_exp = exp; - aa->oa_mode = einfo->ei_mode; - aa->oa_type = einfo->ei_type; - lustre_handle_copy(&aa->oa_lockh, &lockh); - aa->oa_upcall = upcall; - aa->oa_cookie = cookie; - aa->oa_agl = !!agl; - if (!agl) { - aa->oa_flags = flags; - aa->oa_lvb = lvb; - } else { - /* AGL is essentially to enqueue an DLM lock - * in advance, so we don't care about the - * result of AGL enqueue. - */ - aa->oa_lvb = NULL; - aa->oa_flags = NULL; - } - - req->rq_interpret_reply = - (ptlrpc_interpterer_t)osc_enqueue_interpret; - if (rqset == PTLRPCD_SET) - ptlrpcd_add_req(req); - else - ptlrpc_set_add_req(rqset, req); - } else if (intent) { - ptlrpc_req_finished(req); - } - return rc; - } - - rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode, - flags, agl, rc); - if (intent) - ptlrpc_req_finished(req); - - return rc; -} - -int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, - enum ldlm_type type, union ldlm_policy_data *policy, - enum ldlm_mode mode, __u64 *flags, void *data, - struct lustre_handle *lockh, int unref) -{ - struct obd_device *obd = exp->exp_obd; - __u64 lflags = *flags; - enum ldlm_mode rc; - - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH)) - return -EIO; - - /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother - */ - policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK; - policy->l_extent.end |= ~PAGE_MASK; - - /* Next, search for already existing extent locks that will cover us */ - /* If we're trying to read, we also search for an existing PW lock. The - * VFS and page cache already protect us locally, so lots of readers/ - * writers can share a single PW lock. - */ - rc = mode; - if (mode == LCK_PR) - rc |= LCK_PW; - rc = ldlm_lock_match(obd->obd_namespace, lflags, - res_id, type, policy, rc, lockh, unref); - if (!rc || lflags & LDLM_FL_TEST_LOCK) - return rc; - - if (data) { - struct ldlm_lock *lock = ldlm_handle2lock(lockh); - - LASSERT(lock); - if (!osc_set_lock_data(lock, data)) { - ldlm_lock_decref(lockh, rc); - rc = 0; - } - LDLM_LOCK_PUT(lock); - } - return rc; -} - -static int osc_statfs_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - struct osc_async_args *aa, int rc) -{ - struct obd_statfs *msfs; - - if (rc == -EBADR) - /* The request has in fact never been sent - * due to issues at a higher level (LOV). - * Exit immediately since the caller is - * aware of the problem and takes care - * of the clean up - */ - return rc; - - if ((rc == -ENOTCONN || rc == -EAGAIN) && - (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) { - rc = 0; - goto out; - } - - if (rc != 0) - goto out; - - msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (!msfs) { - rc = -EPROTO; - goto out; - } - - *aa->aa_oi->oi_osfs = *msfs; -out: - rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc); - return rc; -} - -static int osc_statfs_async(struct obd_export *exp, - struct obd_info *oinfo, __u64 max_age, - struct ptlrpc_request_set *rqset) -{ - struct obd_device *obd = class_exp2obd(exp); - struct ptlrpc_request *req; - struct osc_async_args *aa; - int rc; - - /* We could possibly pass max_age in the request (as an absolute - * timestamp or a "seconds.usec ago") so the target can avoid doing - * extra calls into the filesystem if that isn't necessary (e.g. - * during mount that would help a bit). Having relative timestamps - * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. - */ - req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - ptlrpc_request_set_replen(req); - req->rq_request_portal = OST_CREATE_PORTAL; - ptlrpc_at_set_req_timeout(req); - - if (oinfo->oi_flags & OBD_STATFS_NODELAY) { - /* procfs requests not want stat in wait for avoid deadlock */ - req->rq_no_resend = 1; - req->rq_no_delay = 1; - } - - req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret; - BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - aa->aa_oi = oinfo; - - ptlrpc_set_add_req(rqset, req); - return 0; -} - -static int osc_statfs(const struct lu_env *env, struct obd_export *exp, - struct obd_statfs *osfs, __u64 max_age, __u32 flags) -{ - struct obd_device *obd = class_exp2obd(exp); - struct obd_statfs *msfs; - struct ptlrpc_request *req; - struct obd_import *imp = NULL; - int rc; - - /* Since the request might also come from lprocfs, so we need - * sync this with client_disconnect_export Bug15684 - */ - down_read(&obd->u.cli.cl_sem); - if (obd->u.cli.cl_import) - imp = class_import_get(obd->u.cli.cl_import); - up_read(&obd->u.cli.cl_sem); - if (!imp) - return -ENODEV; - - /* We could possibly pass max_age in the request (as an absolute - * timestamp or a "seconds.usec ago") so the target can avoid doing - * extra calls into the filesystem if that isn't necessary (e.g. - * during mount that would help a bit). Having relative timestamps - * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. - */ - req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); - - class_import_put(imp); - - if (!req) - return -ENOMEM; - - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - ptlrpc_request_set_replen(req); - req->rq_request_portal = OST_CREATE_PORTAL; - ptlrpc_at_set_req_timeout(req); - - if (flags & OBD_STATFS_NODELAY) { - /* procfs requests not want stat in wait for avoid deadlock */ - req->rq_no_resend = 1; - req->rq_no_delay = 1; - } - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (!msfs) { - rc = -EPROTO; - goto out; - } - - *osfs = *msfs; - - out: - ptlrpc_req_finished(req); - return rc; -} - -static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void __user *uarg) -{ - struct obd_device *obd = exp->exp_obd; - struct obd_ioctl_data *data = karg; - int err = 0; - - if (!try_module_get(THIS_MODULE)) { - CERROR("%s: cannot get module '%s'\n", obd->obd_name, - module_name(THIS_MODULE)); - return -EINVAL; - } - switch (cmd) { - case OBD_IOC_CLIENT_RECOVER: - err = ptlrpc_recover_import(obd->u.cli.cl_import, - data->ioc_inlbuf1, 0); - if (err > 0) - err = 0; - goto out; - case IOC_OSC_SET_ACTIVE: - err = ptlrpc_set_import_active(obd->u.cli.cl_import, - data->ioc_offset); - goto out; - case OBD_IOC_PING_TARGET: - err = ptlrpc_obd_ping(obd); - goto out; - default: - CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n", - cmd, current->comm); - err = -ENOTTY; - goto out; - } -out: - module_put(THIS_MODULE); - return err; -} - -static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - struct obd_device *obd = exp->exp_obd; - struct obd_import *imp = class_exp2cliimp(exp); - char *tmp; - int rc; - - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10); - - if (KEY_IS(KEY_CHECKSUM)) { - if (vallen != sizeof(int)) - return -EINVAL; - exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0; - return 0; - } - - if (KEY_IS(KEY_SPTLRPC_CONF)) { - sptlrpc_conf_client_adapt(obd); - return 0; - } - - if (KEY_IS(KEY_FLUSH_CTX)) { - sptlrpc_import_flush_my_ctx(imp); - return 0; - } - - if (KEY_IS(KEY_CACHE_SET)) { - struct client_obd *cli = &obd->u.cli; - - LASSERT(!cli->cl_cache); /* only once */ - cli->cl_cache = val; - cl_cache_incref(cli->cl_cache); - cli->cl_lru_left = &cli->cl_cache->ccc_lru_left; - - /* add this osc into entity list */ - LASSERT(list_empty(&cli->cl_lru_osc)); - spin_lock(&cli->cl_cache->ccc_lru_lock); - list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru); - spin_unlock(&cli->cl_cache->ccc_lru_lock); - - return 0; - } - - if (KEY_IS(KEY_CACHE_LRU_SHRINK)) { - struct client_obd *cli = &obd->u.cli; - long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1; - long target = *(long *)val; - - nr = osc_lru_shrink(env, cli, min(nr, target), true); - *(long *)val -= nr; - return 0; - } - - if (!set && !KEY_IS(KEY_GRANT_SHRINK)) - return -EINVAL; - - /* We pass all other commands directly to OST. Since nobody calls osc - * methods directly and everybody is supposed to go through LOV, we - * assume lov checked invalid values for us. - * The only recognised values so far are evict_by_nid and mds_conn. - * Even if something bad goes through, we'd get a -EINVAL from OST - * anyway. - */ - - req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? - &RQF_OST_SET_GRANT_INFO : - &RQF_OBD_SET_INFO); - if (!req) - return -ENOMEM; - - req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, - RCL_CLIENT, keylen); - if (!KEY_IS(KEY_GRANT_SHRINK)) - req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL, - RCL_CLIENT, vallen); - rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); - memcpy(tmp, key, keylen); - tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ? - &RMF_OST_BODY : - &RMF_SETINFO_VAL); - memcpy(tmp, val, vallen); - - if (KEY_IS(KEY_GRANT_SHRINK)) { - struct osc_brw_async_args *aa; - struct obdo *oa; - - BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); - if (!oa) { - ptlrpc_req_finished(req); - return -ENOMEM; - } - *oa = ((struct ost_body *)val)->oa; - aa->aa_oa = oa; - req->rq_interpret_reply = osc_shrink_grant_interpret; - } - - ptlrpc_request_set_replen(req); - if (!KEY_IS(KEY_GRANT_SHRINK)) { - LASSERT(set); - ptlrpc_set_add_req(set, req); - ptlrpc_check_set(NULL, set); - } else { - ptlrpcd_add_req(req); - } - - return 0; -} - -static int osc_reconnect(const struct lu_env *env, - struct obd_export *exp, struct obd_device *obd, - struct obd_uuid *cluuid, - struct obd_connect_data *data, - void *localdata) -{ - struct client_obd *cli = &obd->u.cli; - - if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { - long lost_grant; - - spin_lock(&cli->cl_loi_list_lock); - data->ocd_grant = (cli->cl_avail_grant + - (cli->cl_dirty_pages << PAGE_SHIFT)) ?: - 2 * cli_brw_size(obd); - lost_grant = cli->cl_lost_grant; - cli->cl_lost_grant = 0; - spin_unlock(&cli->cl_loi_list_lock); - - CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n", - data->ocd_connect_flags, - data->ocd_version, data->ocd_grant, lost_grant); - } - - return 0; -} - -static int osc_disconnect(struct obd_export *exp) -{ - struct obd_device *obd = class_exp2obd(exp); - int rc; - - rc = client_disconnect_export(exp); - /** - * Initially we put del_shrink_grant before disconnect_export, but it - * causes the following problem if setup (connect) and cleanup - * (disconnect) are tangled together. - * connect p1 disconnect p2 - * ptlrpc_connect_import - * ............... class_manual_cleanup - * osc_disconnect - * del_shrink_grant - * ptlrpc_connect_interrupt - * init_grant_shrink - * add this client to shrink list - * cleanup_osc - * Bang! pinger trigger the shrink. - * So the osc should be disconnected from the shrink list, after we - * are sure the import has been destroyed. BUG18662 - */ - if (!obd->u.cli.cl_import) - osc_del_shrink_grant(&obd->u.cli); - return rc; -} - -static int osc_ldlm_resource_invalidate(struct cfs_hash *hs, - struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *arg) -{ - struct ldlm_resource *res = cfs_hash_object(hs, hnode); - struct osc_object *osc = NULL; - struct lu_env *env = arg; - struct ldlm_lock *lock; - - lock_res(res); - list_for_each_entry(lock, &res->lr_granted, l_res_link) { - if (lock->l_ast_data && !osc) { - osc = lock->l_ast_data; - cl_object_get(osc2cl(osc)); - } - - /* - * clear LDLM_FL_CLEANED flag to make sure it will be canceled - * by the 2nd round of ldlm_namespace_clean() call in - * osc_import_event(). - */ - ldlm_clear_cleaned(lock); - } - unlock_res(res); - - if (osc) { - osc_object_invalidate(env, osc); - cl_object_put(env, osc2cl(osc)); - } - - return 0; -} - -static int osc_import_event(struct obd_device *obd, - struct obd_import *imp, - enum obd_import_event event) -{ - struct client_obd *cli; - int rc = 0; - - LASSERT(imp->imp_obd == obd); - - switch (event) { - case IMP_EVENT_DISCON: { - cli = &obd->u.cli; - spin_lock(&cli->cl_loi_list_lock); - cli->cl_avail_grant = 0; - cli->cl_lost_grant = 0; - spin_unlock(&cli->cl_loi_list_lock); - break; - } - case IMP_EVENT_INACTIVE: { - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL); - break; - } - case IMP_EVENT_INVALIDATE: { - struct ldlm_namespace *ns = obd->obd_namespace; - struct lu_env *env; - u16 refcheck; - - ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); - - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - osc_io_unplug(env, &obd->u.cli, NULL); - - cfs_hash_for_each_nolock(ns->ns_rs_hash, - osc_ldlm_resource_invalidate, - env, 0); - cl_env_put(env, &refcheck); - - ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); - } else { - rc = PTR_ERR(env); - } - break; - } - case IMP_EVENT_ACTIVE: { - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL); - break; - } - case IMP_EVENT_OCD: { - struct obd_connect_data *ocd = &imp->imp_connect_data; - - if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT) - osc_init_grant(&obd->u.cli, ocd); - - /* See bug 7198 */ - if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL) - imp->imp_client->cli_request_portal = OST_REQUEST_PORTAL; - - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL); - break; - } - case IMP_EVENT_DEACTIVATE: { - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL); - break; - } - case IMP_EVENT_ACTIVATE: { - rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL); - break; - } - default: - CERROR("Unknown import event %d\n", event); - LBUG(); - } - return rc; -} - -/** - * Determine whether the lock can be canceled before replaying the lock - * during recovery, see bug16774 for detailed information. - * - * \retval zero the lock can't be canceled - * \retval other ok to cancel - */ -static int osc_cancel_weight(struct ldlm_lock *lock) -{ - /* - * Cancel all unused and granted extent lock. - */ - if (lock->l_resource->lr_type == LDLM_EXTENT && - lock->l_granted_mode == lock->l_req_mode && - osc_ldlm_weigh_ast(lock) == 0) - return 1; - - return 0; -} - -static int brw_queue_work(const struct lu_env *env, void *data) -{ - struct client_obd *cli = data; - - CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli); - - osc_io_unplug(env, cli, NULL); - return 0; -} - -int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct lprocfs_static_vars lvars = { NULL }; - struct client_obd *cli = &obd->u.cli; - void *handler; - int rc; - int adding; - int added; - int req_count; - - rc = ptlrpcd_addref(); - if (rc) - return rc; - - rc = client_obd_setup(obd, lcfg); - if (rc) - goto out_ptlrpcd; - - handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli); - if (IS_ERR(handler)) { - rc = PTR_ERR(handler); - goto out_client_setup; - } - cli->cl_writeback_work = handler; - - handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli); - if (IS_ERR(handler)) { - rc = PTR_ERR(handler); - goto out_ptlrpcd_work; - } - - cli->cl_lru_work = handler; - - rc = osc_quota_setup(obd); - if (rc) - goto out_ptlrpcd_work; - - cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL; - lprocfs_osc_init_vars(&lvars); - if (lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars) == 0) { - lproc_osc_attach_seqstat(obd); - sptlrpc_lprocfs_cliobd_attach(obd); - ptlrpc_lprocfs_register_obd(obd); - } - - /* - * We try to control the total number of requests with a upper limit - * osc_reqpool_maxreqcount. There might be some race which will cause - * over-limit allocation, but it is fine. - */ - req_count = atomic_read(&osc_pool_req_count); - if (req_count < osc_reqpool_maxreqcount) { - adding = cli->cl_max_rpcs_in_flight + 2; - if (req_count + adding > osc_reqpool_maxreqcount) - adding = osc_reqpool_maxreqcount - req_count; - - added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding); - atomic_add(added, &osc_pool_req_count); - } - - INIT_LIST_HEAD(&cli->cl_grant_shrink_list); - ns_register_cancel(obd->obd_namespace, osc_cancel_weight); - - spin_lock(&osc_shrink_lock); - list_add_tail(&cli->cl_shrink_list, &osc_shrink_list); - spin_unlock(&osc_shrink_lock); - - return rc; - -out_ptlrpcd_work: - if (cli->cl_writeback_work) { - ptlrpcd_destroy_work(cli->cl_writeback_work); - cli->cl_writeback_work = NULL; - } - if (cli->cl_lru_work) { - ptlrpcd_destroy_work(cli->cl_lru_work); - cli->cl_lru_work = NULL; - } -out_client_setup: - client_obd_cleanup(obd); -out_ptlrpcd: - ptlrpcd_decref(); - return rc; -} - -static int osc_precleanup(struct obd_device *obd) -{ - struct client_obd *cli = &obd->u.cli; - - /* LU-464 - * for echo client, export may be on zombie list, wait for - * zombie thread to cull it, because cli.cl_import will be - * cleared in client_disconnect_export(): - * class_export_destroy() -> obd_cleanup() -> - * echo_device_free() -> echo_client_cleanup() -> - * obd_disconnect() -> osc_disconnect() -> - * client_disconnect_export() - */ - obd_zombie_barrier(); - if (cli->cl_writeback_work) { - ptlrpcd_destroy_work(cli->cl_writeback_work); - cli->cl_writeback_work = NULL; - } - - if (cli->cl_lru_work) { - ptlrpcd_destroy_work(cli->cl_lru_work); - cli->cl_lru_work = NULL; - } - - obd_cleanup_client_import(obd); - ptlrpc_lprocfs_unregister_obd(obd); - lprocfs_obd_cleanup(obd); - return 0; -} - -static int osc_cleanup(struct obd_device *obd) -{ - struct client_obd *cli = &obd->u.cli; - int rc; - - spin_lock(&osc_shrink_lock); - list_del(&cli->cl_shrink_list); - spin_unlock(&osc_shrink_lock); - - /* lru cleanup */ - if (cli->cl_cache) { - LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0); - spin_lock(&cli->cl_cache->ccc_lru_lock); - list_del_init(&cli->cl_lru_osc); - spin_unlock(&cli->cl_cache->ccc_lru_lock); - cli->cl_lru_left = NULL; - cl_cache_decref(cli->cl_cache); - cli->cl_cache = NULL; - } - - /* free memory of osc quota cache */ - osc_quota_cleanup(obd); - - rc = client_obd_cleanup(obd); - - ptlrpcd_decref(); - return rc; -} - -int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg) -{ - struct lprocfs_static_vars lvars = { NULL }; - int rc = 0; - - lprocfs_osc_init_vars(&lvars); - - switch (lcfg->lcfg_command) { - default: - rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars, - lcfg, obd); - if (rc > 0) - rc = 0; - break; - } - - return rc; -} - -static int osc_process_config(struct obd_device *obd, u32 len, void *buf) -{ - return osc_process_config_base(obd, buf); -} - -static struct obd_ops osc_obd_ops = { - .owner = THIS_MODULE, - .setup = osc_setup, - .precleanup = osc_precleanup, - .cleanup = osc_cleanup, - .add_conn = client_import_add_conn, - .del_conn = client_import_del_conn, - .connect = client_connect_import, - .reconnect = osc_reconnect, - .disconnect = osc_disconnect, - .statfs = osc_statfs, - .statfs_async = osc_statfs_async, - .create = osc_create, - .destroy = osc_destroy, - .getattr = osc_getattr, - .setattr = osc_setattr, - .iocontrol = osc_iocontrol, - .set_info_async = osc_set_info_async, - .import_event = osc_import_event, - .process_config = osc_process_config, - .quotactl = osc_quotactl, -}; - -struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list); -DEFINE_SPINLOCK(osc_shrink_lock); - -static struct shrinker osc_cache_shrinker = { - .count_objects = osc_cache_shrink_count, - .scan_objects = osc_cache_shrink_scan, - .seeks = DEFAULT_SEEKS, -}; - -static int __init osc_init(void) -{ - struct lprocfs_static_vars lvars = { NULL }; - unsigned int reqpool_size; - unsigned int reqsize; - int rc; - - /* print an address of _any_ initialized kernel symbol from this - * module, to allow debugging with gdb that doesn't support data - * symbols from modules. - */ - CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches); - - rc = libcfs_setup(); - if (rc) - return rc; - - rc = lu_kmem_init(osc_caches); - if (rc) - return rc; - - lprocfs_osc_init_vars(&lvars); - - rc = register_shrinker(&osc_cache_shrinker); - if (rc) - goto err; - - /* This is obviously too much memory, only prevent overflow here */ - if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) { - rc = -EINVAL; - goto err; - } - - reqpool_size = osc_reqpool_mem_max << 20; - - reqsize = 1; - while (reqsize < OST_MAXREQSIZE) - reqsize = reqsize << 1; - - /* - * We don't enlarge the request count in OSC pool according to - * cl_max_rpcs_in_flight. The allocation from the pool will only be - * tried after normal allocation failed. So a small OSC pool won't - * cause much performance degression in most of cases. - */ - osc_reqpool_maxreqcount = reqpool_size / reqsize; - - atomic_set(&osc_pool_req_count, 0); - osc_rq_pool = ptlrpc_init_rq_pool(0, OST_MAXREQSIZE, - ptlrpc_add_rqs_to_pool); - - rc = -ENOMEM; - - if (!osc_rq_pool) - goto err; - - rc = class_register_type(&osc_obd_ops, NULL, - LUSTRE_OSC_NAME, &osc_device_type); - if (rc) - goto err; - - return rc; - -err: - if (osc_rq_pool) - ptlrpc_free_rq_pool(osc_rq_pool); - unregister_shrinker(&osc_cache_shrinker); - lu_kmem_fini(osc_caches); - return rc; -} - -static void /*__exit*/ osc_exit(void) -{ - unregister_shrinker(&osc_cache_shrinker); - class_unregister_type(LUSTRE_OSC_NAME); - lu_kmem_fini(osc_caches); - ptlrpc_free_rq_pool(osc_rq_pool); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(LUSTRE_VERSION_STRING); - -module_init(osc_init); -module_exit(osc_exit); diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile deleted file mode 100644 index 1deb1971b39e..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include -subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include - -obj-$(CONFIG_LUSTRE_FS) += ptlrpc.o -LDLM := ../../lustre/ldlm/ - -ldlm_objs := $(LDLM)l_lock.o $(LDLM)ldlm_lock.o -ldlm_objs += $(LDLM)ldlm_resource.o $(LDLM)ldlm_lib.o -ldlm_objs += $(LDLM)ldlm_plain.o $(LDLM)ldlm_extent.o -ldlm_objs += $(LDLM)ldlm_request.o $(LDLM)ldlm_lockd.o -ldlm_objs += $(LDLM)ldlm_flock.o $(LDLM)ldlm_inodebits.o -ldlm_objs += $(LDLM)ldlm_pool.o -ldlm_objs += $(LDLM)interval_tree.o -ptlrpc_objs := client.o recover.o connection.o niobuf.o pack_generic.o -ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o -ptlrpc_objs += llog_net.o llog_client.o import.o ptlrpcd.o -ptlrpc_objs += pers.o lproc_ptlrpc.o wiretest.o layout.o -ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o -ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o - -ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs) sec_lproc.o -ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c deleted file mode 100644 index c1b82bf20f08..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ /dev/null @@ -1,3271 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -/** Implementation of client-side PortalRPC interfaces */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = { - .add_kiov_frag = ptlrpc_prep_bulk_page_pin, - .release_frags = ptlrpc_release_bulk_page_pin, -}; -EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops); - -const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { - .add_kiov_frag = ptlrpc_prep_bulk_page_nopin, - .release_frags = NULL, -}; -EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); - -static int ptlrpc_send_new_req(struct ptlrpc_request *req); -static int ptlrpcd_check_work(struct ptlrpc_request *req); -static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); - -/** - * Initialize passed in client structure \a cl. - */ -void ptlrpc_init_client(int req_portal, int rep_portal, char *name, - struct ptlrpc_client *cl) -{ - cl->cli_request_portal = req_portal; - cl->cli_reply_portal = rep_portal; - cl->cli_name = name; -} -EXPORT_SYMBOL(ptlrpc_init_client); - -/** - * Return PortalRPC connection for remote uud \a uuid - */ -struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid) -{ - struct ptlrpc_connection *c; - lnet_nid_t self; - struct lnet_process_id peer; - int err; - - /* - * ptlrpc_uuid_to_peer() initializes its 2nd parameter - * before accessing its values. - * coverity[uninit_use_in_call] - */ - err = ptlrpc_uuid_to_peer(uuid, &peer, &self); - if (err != 0) { - CNETERR("cannot find peer %s!\n", uuid->uuid); - return NULL; - } - - c = ptlrpc_connection_get(peer, self, uuid); - if (c) { - memcpy(c->c_remote_uuid.uuid, - uuid->uuid, sizeof(c->c_remote_uuid.uuid)); - } - - CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c); - - return c; -} - -/** - * Allocate and initialize new bulk descriptor on the sender. - * Returns pointer to the descriptor or NULL on error. - */ -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, - unsigned int max_brw, - enum ptlrpc_bulk_op_type type, - unsigned int portal, - const struct ptlrpc_bulk_frag_ops *ops) -{ - struct ptlrpc_bulk_desc *desc; - int i; - - /* ensure that only one of KIOV or IOVEC is set but not both */ - LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) || - (ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag)); - - desc = kzalloc(sizeof(*desc), GFP_NOFS); - if (!desc) - return NULL; - - if (type & PTLRPC_BULK_BUF_KIOV) { - GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)), - GFP_NOFS); - if (!GET_KIOV(desc)) - goto free_desc; - } else { - GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)), - GFP_NOFS); - if (!GET_KVEC(desc)) - goto free_desc; - } - - spin_lock_init(&desc->bd_lock); - init_waitqueue_head(&desc->bd_waitq); - desc->bd_max_iov = nfrags; - desc->bd_iov_count = 0; - desc->bd_portal = portal; - desc->bd_type = type; - desc->bd_md_count = 0; - desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops; - LASSERT(max_brw > 0); - desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT); - /* - * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this - * node. Negotiated ocd_brw_size will always be <= this number. - */ - for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++) - LNetInvalidateMDHandle(&desc->bd_mds[i]); - - return desc; -free_desc: - kfree(desc); - return NULL; -} - -/** - * Prepare bulk descriptor for specified outgoing request \a req that - * can fit \a nfrags * pages. \a type is bulk type. \a portal is where - * the bulk to be sent. Used on client-side. - * Returns pointer to newly allocated initialized bulk descriptor or NULL on - * error. - */ -struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned int nfrags, - unsigned int max_brw, - unsigned int type, - unsigned int portal, - const struct ptlrpc_bulk_frag_ops *ops) -{ - struct obd_import *imp = req->rq_import; - struct ptlrpc_bulk_desc *desc; - - LASSERT(ptlrpc_is_bulk_op_passive(type)); - - desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops); - if (!desc) - return NULL; - - desc->bd_import_generation = req->rq_import_generation; - desc->bd_import = class_import_get(imp); - desc->bd_req = req; - - desc->bd_cbid.cbid_fn = client_bulk_callback; - desc->bd_cbid.cbid_arg = desc; - - /* This makes req own desc, and free it when she frees herself */ - req->rq_bulk = desc; - - return desc; -} -EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); - -void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, int pin) -{ - struct bio_vec *kiov; - - LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(page); - LASSERT(pageoffset >= 0); - LASSERT(len > 0); - LASSERT(pageoffset + len <= PAGE_SIZE); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - - kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); - - desc->bd_nob += len; - - if (pin) - get_page(page); - - kiov->bv_page = page; - kiov->bv_offset = pageoffset; - kiov->bv_len = len; - - desc->bd_iov_count++; -} -EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); - -int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, - void *frag, int len) -{ - struct kvec *iovec; - - LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(frag); - LASSERT(len > 0); - LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); - - iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); - - desc->bd_nob += len; - - iovec->iov_base = frag; - iovec->iov_len = len; - - desc->bd_iov_count++; - - return desc->bd_nob; -} -EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); - -void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) -{ - LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ - LASSERT(desc->bd_md_count == 0); /* network hands off */ - LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); - LASSERT(desc->bd_frag_ops); - - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) - sptlrpc_enc_pool_put_pages(desc); - - if (desc->bd_export) - class_export_put(desc->bd_export); - else - class_import_put(desc->bd_import); - - if (desc->bd_frag_ops->release_frags) - desc->bd_frag_ops->release_frags(desc); - - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) - kfree(GET_KIOV(desc)); - else - kfree(GET_KVEC(desc)); - - kfree(desc); -} -EXPORT_SYMBOL(ptlrpc_free_bulk); - -/** - * Set server timelimit for this req, i.e. how long are we willing to wait - * for reply before timing out this request. - */ -void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req) -{ - __u32 serv_est; - int idx; - struct imp_at *at; - - LASSERT(req->rq_import); - - if (AT_OFF) { - /* - * non-AT settings - * - * \a imp_server_timeout means this is reverse import and - * we send (currently only) ASTs to the client and cannot afford - * to wait too long for the reply, otherwise the other client - * (because of which we are sending this request) would - * timeout waiting for us - */ - req->rq_timeout = req->rq_import->imp_server_timeout ? - obd_timeout / 2 : obd_timeout; - } else { - at = &req->rq_import->imp_at; - idx = import_at_get_index(req->rq_import, - req->rq_request_portal); - serv_est = at_get(&at->iat_service_estimate[idx]); - req->rq_timeout = at_est2timeout(serv_est); - } - /* - * We could get even fancier here, using history to predict increased - * loading... - */ - - /* - * Let the server know what this RPC timeout is by putting it in the - * reqmsg - */ - lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); -} -EXPORT_SYMBOL(ptlrpc_at_set_req_timeout); - -/* Adjust max service estimate based on server value */ -static void ptlrpc_at_adj_service(struct ptlrpc_request *req, - unsigned int serv_est) -{ - int idx; - unsigned int oldse; - struct imp_at *at; - - LASSERT(req->rq_import); - at = &req->rq_import->imp_at; - - idx = import_at_get_index(req->rq_import, req->rq_request_portal); - /* - * max service estimates are tracked on the server side, - * so just keep minimal history here - */ - oldse = at_measured(&at->iat_service_estimate[idx], serv_est); - if (oldse != 0) - CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d has changed from %d to %d\n", - req->rq_import->imp_obd->obd_name, req->rq_request_portal, - oldse, at_get(&at->iat_service_estimate[idx])); -} - -/* Expected network latency per remote node (secs) */ -int ptlrpc_at_get_net_latency(struct ptlrpc_request *req) -{ - return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency); -} - -/* Adjust expected network latency */ -void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, - unsigned int service_time) -{ - unsigned int nl, oldnl; - struct imp_at *at; - time64_t now = ktime_get_real_seconds(); - - LASSERT(req->rq_import); - - if (service_time > now - req->rq_sent + 3) { - /* - * bz16408, however, this can also happen if early reply - * is lost and client RPC is expired and resent, early reply - * or reply of original RPC can still be fit in reply buffer - * of resent RPC, now client is measuring time from the - * resent time, but server sent back service time of original - * RPC. - */ - CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ? - D_ADAPTTO : D_WARNING, - "Reported service time %u > total measured time %lld\n", - service_time, now - req->rq_sent); - return; - } - - /* Network latency is total time less server processing time */ - nl = max_t(int, now - req->rq_sent - - service_time, 0) + 1; /* st rounding */ - at = &req->rq_import->imp_at; - - oldnl = at_measured(&at->iat_net_latency, nl); - if (oldnl != 0) - CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) has changed from %d to %d\n", - req->rq_import->imp_obd->obd_name, - obd_uuid2str( - &req->rq_import->imp_connection->c_remote_uuid), - oldnl, at_get(&at->iat_net_latency)); -} - -static int unpack_reply(struct ptlrpc_request *req) -{ - int rc; - - if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) { - rc = ptlrpc_unpack_rep_msg(req, req->rq_replen); - if (rc) { - DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc); - return -EPROTO; - } - } - - rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); - if (rc) { - DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc); - return -EPROTO; - } - return 0; -} - -/** - * Handle an early reply message, called with the rq_lock held. - * If anything goes wrong just ignore it - same as if it never happened - */ -static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) - __must_hold(&req->rq_lock) -{ - struct ptlrpc_request *early_req; - time64_t olddl; - int rc; - - req->rq_early = 0; - spin_unlock(&req->rq_lock); - - rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); - if (rc) { - spin_lock(&req->rq_lock); - return rc; - } - - rc = unpack_reply(early_req); - if (rc) { - sptlrpc_cli_finish_early_reply(early_req); - spin_lock(&req->rq_lock); - return rc; - } - - /* - * Use new timeout value just to adjust the local value for this - * request, don't include it into at_history. It is unclear yet why - * service time increased and should it be counted or skipped, e.g. - * that can be recovery case or some error or server, the real reply - * will add all new data if it is worth to add. - */ - req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg); - lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); - - /* Network latency can be adjusted, it is pure network delays */ - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(early_req->rq_repmsg)); - - sptlrpc_cli_finish_early_reply(early_req); - - spin_lock(&req->rq_lock); - olddl = req->rq_deadline; - /* - * server assumes it now has rq_timeout from when the request - * arrived, so the client should give it at least that long. - * since we don't know the arrival time we'll use the original - * sent time - */ - req->rq_deadline = req->rq_sent + req->rq_timeout + - ptlrpc_at_get_net_latency(req); - - DEBUG_REQ(D_ADAPTTO, req, - "Early reply #%d, new deadline in %lds (%lds)", - req->rq_early_count, - (long)(req->rq_deadline - ktime_get_real_seconds()), - (long)(req->rq_deadline - olddl)); - - return rc; -} - -static struct kmem_cache *request_cache; - -int ptlrpc_request_cache_init(void) -{ - request_cache = kmem_cache_create("ptlrpc_cache", - sizeof(struct ptlrpc_request), - 0, SLAB_HWCACHE_ALIGN, NULL); - return !request_cache ? -ENOMEM : 0; -} - -void ptlrpc_request_cache_fini(void) -{ - kmem_cache_destroy(request_cache); -} - -struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags) -{ - struct ptlrpc_request *req; - - req = kmem_cache_zalloc(request_cache, flags); - return req; -} - -void ptlrpc_request_cache_free(struct ptlrpc_request *req) -{ - kmem_cache_free(request_cache, req); -} - -/** - * Wind down request pool \a pool. - * Frees all requests from the pool too - */ -void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) -{ - struct ptlrpc_request *req; - - while ((req = list_first_entry_or_null(&pool->prp_req_list, - struct ptlrpc_request, rq_list))) { - list_del(&req->rq_list); - LASSERT(req->rq_reqbuf); - LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); - kvfree(req->rq_reqbuf); - ptlrpc_request_cache_free(req); - } - kfree(pool); -} -EXPORT_SYMBOL(ptlrpc_free_rq_pool); - -/** - * Allocates, initializes and adds \a num_rq requests to the pool \a pool - */ -int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) -{ - int i; - int size = 1; - - while (size < pool->prp_rq_size) - size <<= 1; - - LASSERTF(list_empty(&pool->prp_req_list) || - size == pool->prp_rq_size, - "Trying to change pool size with nonempty pool from %d to %d bytes\n", - pool->prp_rq_size, size); - - spin_lock(&pool->prp_lock); - pool->prp_rq_size = size; - for (i = 0; i < num_rq; i++) { - struct ptlrpc_request *req; - struct lustre_msg *msg; - - spin_unlock(&pool->prp_lock); - req = ptlrpc_request_cache_alloc(GFP_KERNEL); - if (!req) - return i; - msg = kvzalloc(size, GFP_KERNEL); - if (!msg) { - ptlrpc_request_cache_free(req); - return i; - } - req->rq_reqbuf = msg; - req->rq_reqbuf_len = size; - req->rq_pool = pool; - spin_lock(&pool->prp_lock); - list_add_tail(&req->rq_list, &pool->prp_req_list); - } - spin_unlock(&pool->prp_lock); - return num_rq; -} -EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool); - -/** - * Create and initialize new request pool with given attributes: - * \a num_rq - initial number of requests to create for the pool - * \a msgsize - maximum message size possible for requests in thid pool - * \a populate_pool - function to be called when more requests need to be added - * to the pool - * Returns pointer to newly created pool or NULL on error. - */ -struct ptlrpc_request_pool * -ptlrpc_init_rq_pool(int num_rq, int msgsize, - int (*populate_pool)(struct ptlrpc_request_pool *, int)) -{ - struct ptlrpc_request_pool *pool; - - pool = kzalloc(sizeof(struct ptlrpc_request_pool), GFP_NOFS); - if (!pool) - return NULL; - - /* - * Request next power of two for the allocation, because internally - * kernel would do exactly this - */ - - spin_lock_init(&pool->prp_lock); - INIT_LIST_HEAD(&pool->prp_req_list); - pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; - pool->prp_populate = populate_pool; - - populate_pool(pool, num_rq); - - return pool; -} -EXPORT_SYMBOL(ptlrpc_init_rq_pool); - -/** - * Fetches one request from pool \a pool - */ -static struct ptlrpc_request * -ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) -{ - struct ptlrpc_request *request; - struct lustre_msg *reqbuf; - - if (!pool) - return NULL; - - spin_lock(&pool->prp_lock); - - /* - * See if we have anything in a pool, and bail out if nothing, - * in writeout path, where this matters, this is safe to do, because - * nothing is lost in this case, and when some in-flight requests - * complete, this code will be called again. - */ - if (unlikely(list_empty(&pool->prp_req_list))) { - spin_unlock(&pool->prp_lock); - return NULL; - } - - request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); - list_del_init(&request->rq_list); - spin_unlock(&pool->prp_lock); - - LASSERT(request->rq_reqbuf); - LASSERT(request->rq_pool); - - reqbuf = request->rq_reqbuf; - memset(request, 0, sizeof(*request)); - request->rq_reqbuf = reqbuf; - request->rq_reqbuf_len = pool->prp_rq_size; - request->rq_pool = pool; - - return request; -} - -/** - * Returns freed \a request to pool. - */ -static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) -{ - struct ptlrpc_request_pool *pool = request->rq_pool; - - spin_lock(&pool->prp_lock); - LASSERT(list_empty(&request->rq_list)); - LASSERT(!request->rq_receiving_reply); - list_add_tail(&request->rq_list, &pool->prp_req_list); - spin_unlock(&pool->prp_lock); -} - -void ptlrpc_add_unreplied(struct ptlrpc_request *req) -{ - struct obd_import *imp = req->rq_import; - struct ptlrpc_request *iter; - - assert_spin_locked(&imp->imp_lock); - LASSERT(list_empty(&req->rq_unreplied_list)); - - /* unreplied list is sorted by xid in ascending order */ - list_for_each_entry_reverse(iter, &imp->imp_unreplied_list, rq_unreplied_list) { - - LASSERT(req->rq_xid != iter->rq_xid); - if (req->rq_xid < iter->rq_xid) - continue; - list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list); - return; - } - list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list); -} - -void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req) -{ - req->rq_xid = ptlrpc_next_xid(); - ptlrpc_add_unreplied(req); -} - -static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req) -{ - spin_lock(&req->rq_import->imp_lock); - ptlrpc_assign_next_xid_nolock(req); - spin_unlock(&req->rq_import->imp_lock); -} - -int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, - __u32 version, int opcode, char **bufs, - struct ptlrpc_cli_ctx *ctx) -{ - int count; - struct obd_import *imp; - __u32 *lengths; - int rc; - - count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT); - imp = request->rq_import; - lengths = request->rq_pill.rc_area[RCL_CLIENT]; - - if (unlikely(ctx)) { - request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx); - } else { - rc = sptlrpc_req_get_ctx(request); - if (rc) - goto out_free; - } - sptlrpc_req_set_flavor(request, opcode); - - rc = lustre_pack_request(request, imp->imp_msg_magic, count, - lengths, bufs); - if (rc) - goto out_ctx; - - lustre_msg_add_version(request->rq_reqmsg, version); - request->rq_send_state = LUSTRE_IMP_FULL; - request->rq_type = PTL_RPC_MSG_REQUEST; - - request->rq_req_cbid.cbid_fn = request_out_callback; - request->rq_req_cbid.cbid_arg = request; - - request->rq_reply_cbid.cbid_fn = reply_in_callback; - request->rq_reply_cbid.cbid_arg = request; - - request->rq_reply_deadline = 0; - request->rq_bulk_deadline = 0; - request->rq_req_deadline = 0; - request->rq_phase = RQ_PHASE_NEW; - request->rq_next_phase = RQ_PHASE_UNDEFINED; - - request->rq_request_portal = imp->imp_client->cli_request_portal; - request->rq_reply_portal = imp->imp_client->cli_reply_portal; - - ptlrpc_at_set_req_timeout(request); - - lustre_msg_set_opc(request->rq_reqmsg, opcode); - ptlrpc_assign_next_xid(request); - - /* Let's setup deadline for req/reply/bulk unlink for opcode. */ - if (cfs_fail_val == opcode) { - time64_t *fail_t = NULL, *fail2_t = NULL; - - if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) { - fail_t = &request->rq_bulk_deadline; - } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { - fail_t = &request->rq_reply_deadline; - } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) { - fail_t = &request->rq_req_deadline; - } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) { - fail_t = &request->rq_reply_deadline; - fail2_t = &request->rq_bulk_deadline; - } - - if (fail_t) { - *fail_t = ktime_get_real_seconds() + LONG_UNLINK; - - if (fail2_t) - *fail2_t = ktime_get_real_seconds() + - LONG_UNLINK; - - /* The RPC is infected, let the test change the - * fail_loc - */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(2 * HZ); - set_current_state(TASK_RUNNING); - } - } - - return 0; - -out_ctx: - LASSERT(!request->rq_pool); - sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1); -out_free: - class_import_put(imp); - return rc; -} -EXPORT_SYMBOL(ptlrpc_request_bufs_pack); - -/** - * Pack request buffers for network transfer, performing necessary encryption - * steps if necessary. - */ -int ptlrpc_request_pack(struct ptlrpc_request *request, - __u32 version, int opcode) -{ - int rc; - - rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL); - if (rc) - return rc; - - /* - * For some old 1.8 clients (< 1.8.7), they will LASSERT the size of - * ptlrpc_body sent from server equal to local ptlrpc_body size, so we - * have to send old ptlrpc_body to keep interoperability with these - * clients. - * - * Only three kinds of server->client RPCs so far: - * - LDLM_BL_CALLBACK - * - LDLM_CP_CALLBACK - * - LDLM_GL_CALLBACK - * - * XXX This should be removed whenever we drop the interoperability with - * the these old clients. - */ - if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK || - opcode == LDLM_GL_CALLBACK) - req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY, - sizeof(struct ptlrpc_body_v2), RCL_CLIENT); - - return rc; -} -EXPORT_SYMBOL(ptlrpc_request_pack); - -/** - * Helper function to allocate new request on import \a imp - * and possibly using existing request from pool \a pool if provided. - * Returns allocated request structure with import field filled or - * NULL on error. - */ -static inline -struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, - struct ptlrpc_request_pool *pool) -{ - struct ptlrpc_request *request; - - request = ptlrpc_request_cache_alloc(GFP_NOFS); - - if (!request && pool) - request = ptlrpc_prep_req_from_pool(pool); - - if (request) { - ptlrpc_cli_req_init(request); - - LASSERTF((unsigned long)imp > 0x1000, "%p", imp); - LASSERT(imp != LP_POISON); - LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", - imp->imp_client); - LASSERT(imp->imp_client != LP_POISON); - - request->rq_import = class_import_get(imp); - } else { - CERROR("request allocation out of memory\n"); - } - - return request; -} - -/** - * Helper function for creating a request. - * Calls __ptlrpc_request_alloc to allocate new request structure and inits - * buffer structures according to capsule template \a format. - * Returns allocated request structure pointer or NULL on error. - */ -static struct ptlrpc_request * -ptlrpc_request_alloc_internal(struct obd_import *imp, - struct ptlrpc_request_pool *pool, - const struct req_format *format) -{ - struct ptlrpc_request *request; - - request = __ptlrpc_request_alloc(imp, pool); - if (!request) - return NULL; - - req_capsule_init(&request->rq_pill, request, RCL_CLIENT); - req_capsule_set(&request->rq_pill, format); - return request; -} - -/** - * Allocate new request structure for import \a imp and initialize its - * buffer structure according to capsule template \a format. - */ -struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, - const struct req_format *format) -{ - return ptlrpc_request_alloc_internal(imp, NULL, format); -} -EXPORT_SYMBOL(ptlrpc_request_alloc); - -/** - * Allocate new request structure for import \a imp from pool \a pool and - * initialize its buffer structure according to capsule template \a format. - */ -struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, - struct ptlrpc_request_pool *pool, - const struct req_format *format) -{ - return ptlrpc_request_alloc_internal(imp, pool, format); -} -EXPORT_SYMBOL(ptlrpc_request_alloc_pool); - -/** - * For requests not from pool, free memory of the request structure. - * For requests obtained from a pool earlier, return request back to pool. - */ -void ptlrpc_request_free(struct ptlrpc_request *request) -{ - if (request->rq_pool) - __ptlrpc_free_req_to_pool(request); - else - ptlrpc_request_cache_free(request); -} -EXPORT_SYMBOL(ptlrpc_request_free); - -/** - * Allocate new request for operation \a opcode and immediately pack it for - * network transfer. - * Only used for simple requests like OBD_PING where the only important - * part of the request is operation itself. - * Returns allocated request or NULL on error. - */ -struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, - const struct req_format *format, - __u32 version, int opcode) -{ - struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format); - int rc; - - if (req) { - rc = ptlrpc_request_pack(req, version, opcode); - if (rc) { - ptlrpc_request_free(req); - req = NULL; - } - } - return req; -} -EXPORT_SYMBOL(ptlrpc_request_alloc_pack); - -/** - * Allocate and initialize new request set structure on the current CPT. - * Returns a pointer to the newly allocated set structure or NULL on error. - */ -struct ptlrpc_request_set *ptlrpc_prep_set(void) -{ - struct ptlrpc_request_set *set; - int cpt; - - cpt = cfs_cpt_current(cfs_cpt_tab, 0); - set = kzalloc_node(sizeof(*set), GFP_NOFS, - cfs_cpt_spread_node(cfs_cpt_tab, cpt)); - if (!set) - return NULL; - atomic_set(&set->set_refcount, 1); - INIT_LIST_HEAD(&set->set_requests); - init_waitqueue_head(&set->set_waitq); - atomic_set(&set->set_new_count, 0); - atomic_set(&set->set_remaining, 0); - spin_lock_init(&set->set_new_req_lock); - INIT_LIST_HEAD(&set->set_new_requests); - INIT_LIST_HEAD(&set->set_cblist); - set->set_max_inflight = UINT_MAX; - set->set_producer = NULL; - set->set_producer_arg = NULL; - set->set_rc = 0; - - return set; -} -EXPORT_SYMBOL(ptlrpc_prep_set); - -/** - * Allocate and initialize new request set structure with flow control - * extension. This extension allows to control the number of requests in-flight - * for the whole set. A callback function to generate requests must be provided - * and the request set will keep the number of requests sent over the wire to - * @max_inflight. - * Returns a pointer to the newly allocated set structure or NULL on error. - */ -struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, - void *arg) - -{ - struct ptlrpc_request_set *set; - - set = ptlrpc_prep_set(); - if (!set) - return NULL; - - set->set_max_inflight = max; - set->set_producer = func; - set->set_producer_arg = arg; - - return set; -} - -/** - * Wind down and free request set structure previously allocated with - * ptlrpc_prep_set. - * Ensures that all requests on the set have completed and removes - * all requests from the request list in a set. - * If any unsent request happen to be on the list, pretends that they got - * an error in flight and calls their completion handler. - */ -void ptlrpc_set_destroy(struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - int expected_phase; - int n = 0; - - /* Requests on the set should either all be completed, or all be new */ - expected_phase = (atomic_read(&set->set_remaining) == 0) ? - RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - LASSERT(req->rq_phase == expected_phase); - n++; - } - - LASSERTF(atomic_read(&set->set_remaining) == 0 || - atomic_read(&set->set_remaining) == n, "%d / %d\n", - atomic_read(&set->set_remaining), n); - - while ((req = list_first_entry_or_null(&set->set_requests, - struct ptlrpc_request, - rq_set_chain))) { - list_del_init(&req->rq_set_chain); - - LASSERT(req->rq_phase == expected_phase); - - if (req->rq_phase == RQ_PHASE_NEW) { - ptlrpc_req_interpret(NULL, req, -EBADR); - atomic_dec(&set->set_remaining); - } - - spin_lock(&req->rq_lock); - req->rq_set = NULL; - req->rq_invalid_rqset = 0; - spin_unlock(&req->rq_lock); - - ptlrpc_req_finished(req); - } - - LASSERT(atomic_read(&set->set_remaining) == 0); - - ptlrpc_reqset_put(set); -} -EXPORT_SYMBOL(ptlrpc_set_destroy); - -/** - * Add a new request to the general purpose request set. - * Assumes request reference from the caller. - */ -void ptlrpc_set_add_req(struct ptlrpc_request_set *set, - struct ptlrpc_request *req) -{ - LASSERT(list_empty(&req->rq_set_chain)); - - /* The set takes over the caller's request reference */ - list_add_tail(&req->rq_set_chain, &set->set_requests); - req->rq_set = set; - atomic_inc(&set->set_remaining); - req->rq_queued_time = jiffies; - - if (req->rq_reqmsg) - lustre_msg_set_jobid(req->rq_reqmsg, NULL); - - if (set->set_producer) - /* - * If the request set has a producer callback, the RPC must be - * sent straight away - */ - ptlrpc_send_new_req(req); -} -EXPORT_SYMBOL(ptlrpc_set_add_req); - -/** - * Add a request to a request with dedicated server thread - * and wake the thread to make any necessary processing. - * Currently only used for ptlrpcd. - */ -void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, - struct ptlrpc_request *req) -{ - struct ptlrpc_request_set *set = pc->pc_set; - int count, i; - - LASSERT(!req->rq_set); - LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0); - - spin_lock(&set->set_new_req_lock); - /* The set takes over the caller's request reference. */ - req->rq_set = set; - req->rq_queued_time = jiffies; - list_add_tail(&req->rq_set_chain, &set->set_new_requests); - count = atomic_inc_return(&set->set_new_count); - spin_unlock(&set->set_new_req_lock); - - /* Only need to call wakeup once for the first entry. */ - if (count == 1) { - wake_up(&set->set_waitq); - - /* - * XXX: It maybe unnecessary to wakeup all the partners. But to - * guarantee the async RPC can be processed ASAP, we have - * no other better choice. It maybe fixed in future. - */ - for (i = 0; i < pc->pc_npartners; i++) - wake_up(&pc->pc_partners[i]->pc_set->set_waitq); - } -} - -/** - * Based on the current state of the import, determine if the request - * can be sent, is an error, or should be delayed. - * - * Returns true if this request should be delayed. If false, and - * *status is set, then the request can not be sent and *status is the - * error code. If false and status is 0, then request can be sent. - * - * The imp->imp_lock must be held. - */ -static int ptlrpc_import_delay_req(struct obd_import *imp, - struct ptlrpc_request *req, int *status) -{ - int delay = 0; - - *status = 0; - - if (req->rq_ctx_init || req->rq_ctx_fini) { - /* always allow ctx init/fini rpc go through */ - } else if (imp->imp_state == LUSTRE_IMP_NEW) { - DEBUG_REQ(D_ERROR, req, "Uninitialized import."); - *status = -EIO; - } else if (imp->imp_state == LUSTRE_IMP_CLOSED) { - /* pings may safely race with umount */ - DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ? - D_HA : D_ERROR, req, "IMP_CLOSED "); - *status = -EIO; - } else if (ptlrpc_send_limit_expired(req)) { - /* probably doesn't need to be a D_ERROR after initial testing */ - DEBUG_REQ(D_HA, req, "send limit expired "); - *status = -ETIMEDOUT; - } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && - imp->imp_state == LUSTRE_IMP_CONNECTING) { - /* allow CONNECT even if import is invalid */ - if (atomic_read(&imp->imp_inval_count) != 0) { - DEBUG_REQ(D_ERROR, req, "invalidate in flight"); - *status = -EIO; - } - } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) { - if (!imp->imp_deactive) - DEBUG_REQ(D_NET, req, "IMP_INVALID"); - *status = -ESHUTDOWN; /* bz 12940 */ - } else if (req->rq_import_generation != imp->imp_generation) { - DEBUG_REQ(D_ERROR, req, "req wrong generation:"); - *status = -EIO; - } else if (req->rq_send_state != imp->imp_state) { - /* invalidate in progress - any requests should be drop */ - if (atomic_read(&imp->imp_inval_count) != 0) { - DEBUG_REQ(D_ERROR, req, "invalidate in flight"); - *status = -EIO; - } else if (req->rq_no_delay) { - *status = -EWOULDBLOCK; - } else if (req->rq_allow_replay && - (imp->imp_state == LUSTRE_IMP_REPLAY || - imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS || - imp->imp_state == LUSTRE_IMP_REPLAY_WAIT || - imp->imp_state == LUSTRE_IMP_RECOVER)) { - DEBUG_REQ(D_HA, req, "allow during recovery.\n"); - } else { - delay = 1; - } - } - - return delay; -} - -/** - * Decide if the error message should be printed to the console or not. - * Makes its decision based on request type, status, and failure frequency. - * - * \param[in] req request that failed and may need a console message - * - * \retval false if no message should be printed - * \retval true if console message should be printed - */ -static bool ptlrpc_console_allow(struct ptlrpc_request *req) -{ - __u32 opc; - - LASSERT(req->rq_reqmsg); - opc = lustre_msg_get_opc(req->rq_reqmsg); - - /* Suppress particular reconnect errors which are to be expected. */ - if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) { - int err; - - /* Suppress timed out reconnect requests */ - if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) || - req->rq_timedout) - return false; - - /* - * Suppress most unavailable/again reconnect requests, but - * print occasionally so it is clear client is trying to - * connect to a server where no target is running. - */ - err = lustre_msg_get_status(req->rq_repmsg); - if ((err == -ENODEV || err == -EAGAIN) && - req->rq_import->imp_conn_cnt % 30 != 20) - return false; - } - - return true; -} - -/** - * Check request processing status. - * Returns the status. - */ -static int ptlrpc_check_status(struct ptlrpc_request *req) -{ - int err; - - err = lustre_msg_get_status(req->rq_repmsg); - if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { - struct obd_import *imp = req->rq_import; - lnet_nid_t nid = imp->imp_connection->c_peer.nid; - __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); - - /* -EAGAIN is normal when using POSIX flocks */ - if (ptlrpc_console_allow(req) && - !(opc == LDLM_ENQUEUE && err == -EAGAIN)) - LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n", - imp->imp_obd->obd_name, - ll_opcode2str(opc), - libcfs_nid2str(nid), err); - return err < 0 ? err : -EINVAL; - } - - if (err < 0) - DEBUG_REQ(D_INFO, req, "status is %d", err); - else if (err > 0) - /* XXX: translate this error from net to host */ - DEBUG_REQ(D_INFO, req, "status is %d", err); - - return err; -} - -/** - * save pre-versions of objects into request for replay. - * Versions are obtained from server reply. - * used for VBR. - */ -static void ptlrpc_save_versions(struct ptlrpc_request *req) -{ - struct lustre_msg *repmsg = req->rq_repmsg; - struct lustre_msg *reqmsg = req->rq_reqmsg; - __u64 *versions = lustre_msg_get_versions(repmsg); - - if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) - return; - - LASSERT(versions); - lustre_msg_set_versions(reqmsg, versions); - CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n", - versions[0], versions[1]); -} - -__u64 ptlrpc_known_replied_xid(struct obd_import *imp) -{ - struct ptlrpc_request *req; - - assert_spin_locked(&imp->imp_lock); - if (list_empty(&imp->imp_unreplied_list)) - return 0; - - req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request, - rq_unreplied_list); - LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid); - - if (imp->imp_known_replied_xid < req->rq_xid - 1) - imp->imp_known_replied_xid = req->rq_xid - 1; - - return req->rq_xid - 1; -} - -/** - * Callback function called when client receives RPC reply for \a req. - * Returns 0 on success or error code. - * The return value would be assigned to req->rq_status by the caller - * as request processing status. - * This function also decides if the request needs to be saved for later replay. - */ -static int after_reply(struct ptlrpc_request *req) -{ - struct obd_import *imp = req->rq_import; - struct obd_device *obd = req->rq_import->imp_obd; - int rc; - struct timespec64 work_start; - long timediff; - u64 committed; - - LASSERT(obd); - /* repbuf must be unlinked */ - LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked); - - if (req->rq_reply_truncated) { - if (ptlrpc_no_resend(req)) { - DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d", - req->rq_nob_received, req->rq_repbuf_len); - return -EOVERFLOW; - } - - sptlrpc_cli_free_repbuf(req); - /* - * Pass the required reply buffer size (include space for early - * reply). NB: no need to round up because alloc_repbuf will - * round it up - */ - req->rq_replen = req->rq_nob_received; - req->rq_nob_received = 0; - spin_lock(&req->rq_lock); - req->rq_resend = 1; - spin_unlock(&req->rq_lock); - return 0; - } - - ktime_get_real_ts64(&work_start); - timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC + - (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) / - NSEC_PER_USEC; - /* - * NB Until this point, the whole of the incoming message, - * including buflens, status etc is in the sender's byte order. - */ - rc = sptlrpc_cli_unwrap_reply(req); - if (rc) { - DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc); - return rc; - } - - /* Security layer unwrap might ask resend this request. */ - if (req->rq_resend) - return 0; - - rc = unpack_reply(req); - if (rc) - return rc; - - /* retry indefinitely on EINPROGRESS */ - if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS && - ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) { - time64_t now = ktime_get_real_seconds(); - - DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS"); - spin_lock(&req->rq_lock); - req->rq_resend = 1; - spin_unlock(&req->rq_lock); - req->rq_nr_resend++; - - /* Readjust the timeout for current conditions */ - ptlrpc_at_set_req_timeout(req); - /* - * delay resend to give a chance to the server to get ready. - * The delay is increased by 1s on every resend and is capped to - * the current request timeout (i.e. obd_timeout if AT is off, - * or AT service time x 125% + 5s, see at_est2timeout) - */ - if (req->rq_nr_resend > req->rq_timeout) - req->rq_sent = now + req->rq_timeout; - else - req->rq_sent = now + req->rq_nr_resend; - - /* Resend for EINPROGRESS will use a new XID */ - spin_lock(&imp->imp_lock); - list_del_init(&req->rq_unreplied_list); - spin_unlock(&imp->imp_lock); - - return 0; - } - - if (obd->obd_svc_stats) { - lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, - timediff); - ptlrpc_lprocfs_rpc_sent(req, timediff); - } - - if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY && - lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) { - DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)", - lustre_msg_get_type(req->rq_repmsg)); - return -EPROTO; - } - - if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING) - CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val); - ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg)); - ptlrpc_at_adj_net_latency(req, - lustre_msg_get_service_time(req->rq_repmsg)); - - rc = ptlrpc_check_status(req); - imp->imp_connect_error = rc; - - if (rc) { - /* - * Either we've been evicted, or the server has failed for - * some reason. Try to reconnect, and if that fails, punt to - * the upcall. - */ - if (ptlrpc_recoverable_error(rc)) { - if (req->rq_send_state != LUSTRE_IMP_FULL || - imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) { - return rc; - } - ptlrpc_request_handle_notconn(req); - return rc; - } - } else { - /* - * Let's look if server sent slv. Do it only for RPC with - * rc == 0. - */ - ldlm_cli_update_pool(req); - } - - /* Store transno in reqmsg for replay. */ - if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) { - req->rq_transno = lustre_msg_get_transno(req->rq_repmsg); - lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno); - } - - if (imp->imp_replayable) { - spin_lock(&imp->imp_lock); - /* - * No point in adding already-committed requests to the replay - * list, we will just remove them immediately. b=9829 - */ - if (req->rq_transno != 0 && - (req->rq_transno > - lustre_msg_get_last_committed(req->rq_repmsg) || - req->rq_replay)) { - /* version recovery */ - ptlrpc_save_versions(req); - ptlrpc_retain_replayable_request(req, imp); - } else if (req->rq_commit_cb && - list_empty(&req->rq_replay_list)) { - /* - * NB: don't call rq_commit_cb if it's already on - * rq_replay_list, ptlrpc_free_committed() will call - * it later, see LU-3618 for details - */ - spin_unlock(&imp->imp_lock); - req->rq_commit_cb(req); - spin_lock(&imp->imp_lock); - } - - /* Replay-enabled imports return commit-status information. */ - committed = lustre_msg_get_last_committed(req->rq_repmsg); - if (likely(committed > imp->imp_peer_committed_transno)) - imp->imp_peer_committed_transno = committed; - - ptlrpc_free_committed(imp); - - if (!list_empty(&imp->imp_replay_list)) { - struct ptlrpc_request *last; - - last = list_entry(imp->imp_replay_list.prev, - struct ptlrpc_request, - rq_replay_list); - /* - * Requests with rq_replay stay on the list even if no - * commit is expected. - */ - if (last->rq_transno > imp->imp_peer_committed_transno) - ptlrpc_pinger_commit_expected(imp); - } - - spin_unlock(&imp->imp_lock); - } - - return rc; -} - -/** - * Helper function to send request \a req over the network for the first time - * Also adjusts request phase. - * Returns 0 on success or error code. - */ -static int ptlrpc_send_new_req(struct ptlrpc_request *req) -{ - struct obd_import *imp = req->rq_import; - u64 min_xid = 0; - int rc; - - LASSERT(req->rq_phase == RQ_PHASE_NEW); - - /* do not try to go further if there is not enough memory in enc_pool */ - if (req->rq_sent && req->rq_bulk) - if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() && - pool_is_at_full_capacity()) - return -ENOMEM; - - if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) && - (!req->rq_generation_set || - req->rq_import_generation == imp->imp_generation)) - return 0; - - ptlrpc_rqphase_move(req, RQ_PHASE_RPC); - - spin_lock(&imp->imp_lock); - - LASSERT(req->rq_xid); - LASSERT(!list_empty(&req->rq_unreplied_list)); - - if (!req->rq_generation_set) - req->rq_import_generation = imp->imp_generation; - - if (ptlrpc_import_delay_req(imp, req, &rc)) { - spin_lock(&req->rq_lock); - req->rq_waiting = 1; - spin_unlock(&req->rq_lock); - - DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: (%s != %s)", - lustre_msg_get_status(req->rq_reqmsg), - ptlrpc_import_state_name(req->rq_send_state), - ptlrpc_import_state_name(imp->imp_state)); - LASSERT(list_empty(&req->rq_list)); - list_add_tail(&req->rq_list, &imp->imp_delayed_list); - atomic_inc(&req->rq_import->imp_inflight); - spin_unlock(&imp->imp_lock); - return 0; - } - - if (rc != 0) { - spin_unlock(&imp->imp_lock); - req->rq_status = rc; - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - return rc; - } - - LASSERT(list_empty(&req->rq_list)); - list_add_tail(&req->rq_list, &imp->imp_sending_list); - atomic_inc(&req->rq_import->imp_inflight); - - /* find the known replied XID from the unreplied list, CONNECT - * and DISCONNECT requests are skipped to make the sanity check - * on server side happy. see process_req_last_xid(). - * - * For CONNECT: Because replay requests have lower XID, it'll - * break the sanity check if CONNECT bump the exp_last_xid on - * server. - * - * For DISCONNECT: Since client will abort inflight RPC before - * sending DISCONNECT, DISCONNECT may carry an XID which higher - * than the inflight RPC. - */ - if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req)) - min_xid = ptlrpc_known_replied_xid(imp); - spin_unlock(&imp->imp_lock); - - lustre_msg_set_last_xid(req->rq_reqmsg, min_xid); - - lustre_msg_set_status(req->rq_reqmsg, current->pid); - - rc = sptlrpc_req_refresh_ctx(req, -1); - if (rc) { - if (req->rq_err) { - req->rq_status = rc; - return 1; - } - spin_lock(&req->rq_lock); - req->rq_wait_ctx = 1; - spin_unlock(&req->rq_lock); - return 0; - } - - CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n", - current->comm, - imp->imp_obd->obd_uuid.uuid, - lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - lustre_msg_get_opc(req->rq_reqmsg)); - - rc = ptl_send_rpc(req, 0); - if (rc == -ENOMEM) { - spin_lock(&imp->imp_lock); - if (!list_empty(&req->rq_list)) { - list_del_init(&req->rq_list); - if (atomic_dec_and_test(&req->rq_import->imp_inflight)) - wake_up_all(&req->rq_import->imp_recovery_waitq); - } - spin_unlock(&imp->imp_lock); - ptlrpc_rqphase_move(req, RQ_PHASE_NEW); - return rc; - } - if (rc) { - DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); - spin_lock(&req->rq_lock); - req->rq_net_err = 1; - spin_unlock(&req->rq_lock); - return rc; - } - return 0; -} - -static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) -{ - int remaining, rc; - - LASSERT(set->set_producer); - - remaining = atomic_read(&set->set_remaining); - - /* - * populate the ->set_requests list with requests until we - * reach the maximum number of RPCs in flight for this set - */ - while (atomic_read(&set->set_remaining) < set->set_max_inflight) { - rc = set->set_producer(set, set->set_producer_arg); - if (rc == -ENOENT) { - /* no more RPC to produce */ - set->set_producer = NULL; - set->set_producer_arg = NULL; - return 0; - } - } - - return (atomic_read(&set->set_remaining) - remaining); -} - -/** - * this sends any unsent RPCs in \a set and returns 1 if all are sent - * and no more replies are expected. - * (it is possible to get less replies than requests sent e.g. due to timed out - * requests or requests that we had trouble to send out) - * - * NOTE: This function contains a potential schedule point (cond_resched()). - */ -int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req, *next; - struct list_head comp_reqs; - int force_timer_recalc = 0; - - if (atomic_read(&set->set_remaining) == 0) - return 1; - - INIT_LIST_HEAD(&comp_reqs); - list_for_each_entry_safe(req, next, &set->set_requests, rq_set_chain) { - struct obd_import *imp = req->rq_import; - int unregistered = 0; - int rc = 0; - - /* - * This schedule point is mainly for the ptlrpcd caller of this - * function. Most ptlrpc sets are not long-lived and unbounded - * in length, but at the least the set used by the ptlrpcd is. - * Since the processing time is unbounded, we need to insert an - * explicit schedule point to make the thread well-behaved. - */ - cond_resched(); - - if (req->rq_phase == RQ_PHASE_NEW && - ptlrpc_send_new_req(req)) { - force_timer_recalc = 1; - } - - /* delayed send - skip */ - if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent) - continue; - - /* delayed resend - skip */ - if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend && - req->rq_sent > ktime_get_real_seconds()) - continue; - - if (!(req->rq_phase == RQ_PHASE_RPC || - req->rq_phase == RQ_PHASE_BULK || - req->rq_phase == RQ_PHASE_INTERPRET || - req->rq_phase == RQ_PHASE_UNREG_RPC || - req->rq_phase == RQ_PHASE_UNREG_BULK || - req->rq_phase == RQ_PHASE_COMPLETE)) { - DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase); - LBUG(); - } - - if (req->rq_phase == RQ_PHASE_UNREG_RPC || - req->rq_phase == RQ_PHASE_UNREG_BULK) { - LASSERT(req->rq_next_phase != req->rq_phase); - LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED); - - if (req->rq_req_deadline && - !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) - req->rq_req_deadline = 0; - if (req->rq_reply_deadline && - !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) - req->rq_reply_deadline = 0; - if (req->rq_bulk_deadline && - !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) - req->rq_bulk_deadline = 0; - - /* - * Skip processing until reply is unlinked. We - * can't return to pool before that and we can't - * call interpret before that. We need to make - * sure that all rdma transfers finished and will - * not corrupt any data. - */ - if (req->rq_phase == RQ_PHASE_UNREG_RPC && - ptlrpc_client_recv_or_unlink(req)) - continue; - if (req->rq_phase == RQ_PHASE_UNREG_BULK && - ptlrpc_client_bulk_active(req)) - continue; - - /* - * Turn fail_loc off to prevent it from looping - * forever. - */ - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { - OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK, - OBD_FAIL_ONCE); - } - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) { - OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK, - OBD_FAIL_ONCE); - } - - /* Move to next phase if reply was successfully - * unlinked. - */ - ptlrpc_rqphase_move(req, req->rq_next_phase); - } - - if (req->rq_phase == RQ_PHASE_COMPLETE) { - list_move_tail(&req->rq_set_chain, &comp_reqs); - continue; - } - - if (req->rq_phase == RQ_PHASE_INTERPRET) - goto interpret; - - /* Note that this also will start async reply unlink. */ - if (req->rq_net_err && !req->rq_timedout) { - ptlrpc_expire_one_request(req, 1); - - /* Check if we still need to wait for unlink. */ - if (ptlrpc_client_recv_or_unlink(req) || - ptlrpc_client_bulk_active(req)) - continue; - /* If there is no need to resend, fail it now. */ - if (req->rq_no_resend) { - if (req->rq_status == 0) - req->rq_status = -EIO; - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - goto interpret; - } else { - continue; - } - } - - if (req->rq_err) { - spin_lock(&req->rq_lock); - req->rq_replied = 0; - spin_unlock(&req->rq_lock); - if (req->rq_status == 0) - req->rq_status = -EIO; - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - goto interpret; - } - - /* - * ptlrpc_set_wait allow signal to abort the timeout - * so it sets rq_intr regardless of individual rpc - * timeouts. The synchronous IO waiting path sets - * rq_intr irrespective of whether ptlrpcd - * has seen a timeout. Our policy is to only interpret - * interrupted rpcs after they have timed out, so we - * need to enforce that here. - */ - - if (req->rq_intr && (req->rq_timedout || req->rq_waiting || - req->rq_wait_ctx)) { - req->rq_status = -EINTR; - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - goto interpret; - } - - if (req->rq_phase == RQ_PHASE_RPC) { - if (req->rq_timedout || req->rq_resend || - req->rq_waiting || req->rq_wait_ctx) { - int status; - - if (!ptlrpc_unregister_reply(req, 1)) { - ptlrpc_unregister_bulk(req, 1); - continue; - } - - spin_lock(&imp->imp_lock); - if (ptlrpc_import_delay_req(imp, req, - &status)) { - /* - * put on delay list - only if we wait - * recovery finished - before send - */ - list_del_init(&req->rq_list); - list_add_tail(&req->rq_list, - &imp->imp_delayed_list); - spin_unlock(&imp->imp_lock); - continue; - } - - if (status != 0) { - req->rq_status = status; - ptlrpc_rqphase_move(req, - RQ_PHASE_INTERPRET); - spin_unlock(&imp->imp_lock); - goto interpret; - } - if (ptlrpc_no_resend(req) && - !req->rq_wait_ctx) { - req->rq_status = -ENOTCONN; - ptlrpc_rqphase_move(req, - RQ_PHASE_INTERPRET); - spin_unlock(&imp->imp_lock); - goto interpret; - } - - list_del_init(&req->rq_list); - list_add_tail(&req->rq_list, - &imp->imp_sending_list); - - spin_unlock(&imp->imp_lock); - - spin_lock(&req->rq_lock); - req->rq_waiting = 0; - spin_unlock(&req->rq_lock); - - if (req->rq_timedout || req->rq_resend) { - /* This is re-sending anyway, let's mark req as resend. */ - spin_lock(&req->rq_lock); - req->rq_resend = 1; - spin_unlock(&req->rq_lock); - if (req->rq_bulk && - !ptlrpc_unregister_bulk(req, 1)) - continue; - } - /* - * rq_wait_ctx is only touched by ptlrpcd, - * so no lock is needed here. - */ - status = sptlrpc_req_refresh_ctx(req, -1); - if (status) { - if (req->rq_err) { - req->rq_status = status; - spin_lock(&req->rq_lock); - req->rq_wait_ctx = 0; - spin_unlock(&req->rq_lock); - force_timer_recalc = 1; - } else { - spin_lock(&req->rq_lock); - req->rq_wait_ctx = 1; - spin_unlock(&req->rq_lock); - } - - continue; - } else { - spin_lock(&req->rq_lock); - req->rq_wait_ctx = 0; - spin_unlock(&req->rq_lock); - } - - rc = ptl_send_rpc(req, 0); - if (rc == -ENOMEM) { - spin_lock(&imp->imp_lock); - if (!list_empty(&req->rq_list)) - list_del_init(&req->rq_list); - spin_unlock(&imp->imp_lock); - ptlrpc_rqphase_move(req, RQ_PHASE_NEW); - continue; - } - if (rc) { - DEBUG_REQ(D_HA, req, - "send failed: rc = %d", rc); - force_timer_recalc = 1; - spin_lock(&req->rq_lock); - req->rq_net_err = 1; - spin_unlock(&req->rq_lock); - continue; - } - /* need to reset the timeout */ - force_timer_recalc = 1; - } - - spin_lock(&req->rq_lock); - - if (ptlrpc_client_early(req)) { - ptlrpc_at_recv_early_reply(req); - spin_unlock(&req->rq_lock); - continue; - } - - /* Still waiting for a reply? */ - if (ptlrpc_client_recv(req)) { - spin_unlock(&req->rq_lock); - continue; - } - - /* Did we actually receive a reply? */ - if (!ptlrpc_client_replied(req)) { - spin_unlock(&req->rq_lock); - continue; - } - - spin_unlock(&req->rq_lock); - - /* - * unlink from net because we are going to - * swab in-place of reply buffer - */ - unregistered = ptlrpc_unregister_reply(req, 1); - if (!unregistered) - continue; - - req->rq_status = after_reply(req); - if (req->rq_resend) - continue; - - /* - * If there is no bulk associated with this request, - * then we're done and should let the interpreter - * process the reply. Similarly if the RPC returned - * an error, and therefore the bulk will never arrive. - */ - if (!req->rq_bulk || req->rq_status < 0) { - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - goto interpret; - } - - ptlrpc_rqphase_move(req, RQ_PHASE_BULK); - } - - LASSERT(req->rq_phase == RQ_PHASE_BULK); - if (ptlrpc_client_bulk_active(req)) - continue; - - if (req->rq_bulk->bd_failure) { - /* - * The RPC reply arrived OK, but the bulk screwed - * up! Dead weird since the server told us the RPC - * was good after getting the REPLY for her GET or - * the ACK for her PUT. - */ - DEBUG_REQ(D_ERROR, req, "bulk transfer failed"); - req->rq_status = -EIO; - } - - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - -interpret: - LASSERT(req->rq_phase == RQ_PHASE_INTERPRET); - - /* - * This moves to "unregistering" phase we need to wait for - * reply unlink. - */ - if (!unregistered && !ptlrpc_unregister_reply(req, 1)) { - /* start async bulk unlink too */ - ptlrpc_unregister_bulk(req, 1); - continue; - } - - if (!ptlrpc_unregister_bulk(req, 1)) - continue; - - /* When calling interpret receive should already be finished. */ - LASSERT(!req->rq_receiving_reply); - - ptlrpc_req_interpret(env, req, req->rq_status); - - if (ptlrpcd_check_work(req)) { - atomic_dec(&set->set_remaining); - continue; - } - ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); - - CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0, - "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n", - current->comm, imp->imp_obd->obd_uuid.uuid, - lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - lustre_msg_get_opc(req->rq_reqmsg)); - - spin_lock(&imp->imp_lock); - /* - * Request already may be not on sending or delaying list. This - * may happen in the case of marking it erroneous for the case - * ptlrpc_import_delay_req(req, status) find it impossible to - * allow sending this rpc and returns *status != 0. - */ - if (!list_empty(&req->rq_list)) { - list_del_init(&req->rq_list); - atomic_dec(&imp->imp_inflight); - } - list_del_init(&req->rq_unreplied_list); - spin_unlock(&imp->imp_lock); - - atomic_dec(&set->set_remaining); - wake_up_all(&imp->imp_recovery_waitq); - - if (set->set_producer) { - /* produce a new request if possible */ - if (ptlrpc_set_producer(set) > 0) - force_timer_recalc = 1; - - /* - * free the request that has just been completed - * in order not to pollute set->set_requests - */ - list_del_init(&req->rq_set_chain); - spin_lock(&req->rq_lock); - req->rq_set = NULL; - req->rq_invalid_rqset = 0; - spin_unlock(&req->rq_lock); - - /* record rq_status to compute the final status later */ - if (req->rq_status != 0) - set->set_rc = req->rq_status; - ptlrpc_req_finished(req); - } else { - list_move_tail(&req->rq_set_chain, &comp_reqs); - } - } - - /* - * move completed request at the head of list so it's easier for - * caller to find them - */ - list_splice(&comp_reqs, &set->set_requests); - - /* If we hit an error, we want to recover promptly. */ - return atomic_read(&set->set_remaining) == 0 || force_timer_recalc; -} -EXPORT_SYMBOL(ptlrpc_check_set); - -/** - * Time out request \a req. is \a async_unlink is set, that means do not wait - * until LNet actually confirms network buffer unlinking. - * Return 1 if we should give up further retrying attempts or 0 otherwise. - */ -int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) -{ - struct obd_import *imp = req->rq_import; - int rc = 0; - - spin_lock(&req->rq_lock); - req->rq_timedout = 1; - spin_unlock(&req->rq_lock); - - DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]", - req->rq_net_err ? "failed due to network error" : - ((req->rq_real_sent == 0 || - req->rq_real_sent < req->rq_sent || - req->rq_real_sent >= req->rq_deadline) ? - "timed out for sent delay" : "timed out for slow reply"), - (s64)req->rq_sent, (s64)req->rq_real_sent); - - if (imp && obd_debug_peer_on_timeout) - LNetDebugPeer(imp->imp_connection->c_peer); - - ptlrpc_unregister_reply(req, async_unlink); - ptlrpc_unregister_bulk(req, async_unlink); - - if (obd_dump_on_timeout) - libcfs_debug_dumplog(); - - if (!imp) { - DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?"); - return 1; - } - - atomic_inc(&imp->imp_timeouts); - - /* The DLM server doesn't want recovery run on its imports. */ - if (imp->imp_dlm_fake) - return 1; - - /* - * If this request is for recovery or other primordial tasks, - * then error it out here. - */ - if (req->rq_ctx_init || req->rq_ctx_fini || - req->rq_send_state != LUSTRE_IMP_FULL || - imp->imp_obd->obd_no_recov) { - DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)", - ptlrpc_import_state_name(req->rq_send_state), - ptlrpc_import_state_name(imp->imp_state)); - spin_lock(&req->rq_lock); - req->rq_status = -ETIMEDOUT; - req->rq_err = 1; - spin_unlock(&req->rq_lock); - return 1; - } - - /* - * if a request can't be resent we can't wait for an answer after - * the timeout - */ - if (ptlrpc_no_resend(req)) { - DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:"); - rc = 1; - } - - ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg)); - - return rc; -} - -/** - * Time out all uncompleted requests in request set pointed by \a data - * Called when wait_event_idle_timeout times out. - */ -void ptlrpc_expired_set(struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - time64_t now = ktime_get_real_seconds(); - - /* A timeout expired. See which reqs it applies to... */ - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - - /* don't expire request waiting for context */ - if (req->rq_wait_ctx) - continue; - - /* Request in-flight? */ - if (!((req->rq_phase == RQ_PHASE_RPC && - !req->rq_waiting && !req->rq_resend) || - (req->rq_phase == RQ_PHASE_BULK))) - continue; - - if (req->rq_timedout || /* already dealt with */ - req->rq_deadline > now) /* not expired */ - continue; - - /* - * Deal with this guy. Do it asynchronously to not block - * ptlrpcd thread. - */ - ptlrpc_expire_one_request(req, 1); - } -} - -/** - * Sets rq_intr flag in \a req under spinlock. - */ -void ptlrpc_mark_interrupted(struct ptlrpc_request *req) -{ - spin_lock(&req->rq_lock); - req->rq_intr = 1; - spin_unlock(&req->rq_lock); -} -EXPORT_SYMBOL(ptlrpc_mark_interrupted); - -/** - * Interrupts (sets interrupted flag) all uncompleted requests in - * a set \a data. Called when l_wait_event_abortable_timeout receives signal. - */ -static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); - - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - - if (req->rq_phase != RQ_PHASE_RPC && - req->rq_phase != RQ_PHASE_UNREG_RPC) - continue; - - ptlrpc_mark_interrupted(req); - } -} - -/** - * Get the smallest timeout in the set; this does NOT set a timeout. - */ -int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) -{ - time64_t now = ktime_get_real_seconds(); - int timeout = 0; - struct ptlrpc_request *req; - time64_t deadline; - - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - - /* Request in-flight? */ - if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) || - (req->rq_phase == RQ_PHASE_BULK) || - (req->rq_phase == RQ_PHASE_NEW))) - continue; - - /* Already timed out. */ - if (req->rq_timedout) - continue; - - /* Waiting for ctx. */ - if (req->rq_wait_ctx) - continue; - - if (req->rq_phase == RQ_PHASE_NEW) - deadline = req->rq_sent; - else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend) - deadline = req->rq_sent; - else - deadline = req->rq_sent + req->rq_timeout; - - if (deadline <= now) /* actually expired already */ - timeout = 1; /* ASAP */ - else if (timeout == 0 || timeout > deadline - now) - timeout = deadline - now; - } - return timeout; -} - -/** - * Send all unset request from the set and then wait until all - * requests in the set complete (either get a reply, timeout, get an - * error or otherwise be interrupted). - * Returns 0 on success or error code otherwise. - */ -int ptlrpc_set_wait(struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - int rc, timeout; - - if (set->set_producer) - (void)ptlrpc_set_producer(set); - else - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - if (req->rq_phase == RQ_PHASE_NEW) - (void)ptlrpc_send_new_req(req); - } - - if (list_empty(&set->set_requests)) - return 0; - - do { - timeout = ptlrpc_set_next_timeout(set); - - /* - * wait until all complete, interrupted, or an in-flight - * req times out - */ - CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", - set, timeout); - - if (timeout == 0 && !signal_pending(current)) { - /* - * No requests are in-flight (ether timed out - * or delayed), so we can allow interrupts. - * We still want to block for a limited time, - * so we allow interrupts during the timeout. - */ - rc = l_wait_event_abortable_timeout(set->set_waitq, - ptlrpc_check_set(NULL, set), - HZ); - if (rc == 0) { - rc = -ETIMEDOUT; - ptlrpc_expired_set(set); - } else if (rc < 0) { - rc = -EINTR; - ptlrpc_interrupted_set(set); - } else - rc = 0; - } else { - /* - * At least one request is in flight, so no - * interrupts are allowed. Wait until all - * complete, or an in-flight req times out. - */ - rc = wait_event_idle_timeout(set->set_waitq, - ptlrpc_check_set(NULL, set), - (timeout ? timeout : 1) * HZ); - if (rc == 0) { - ptlrpc_expired_set(set); - rc = -ETIMEDOUT; - /* - * LU-769 - if we ignored the signal - * because it was already pending when - * we started, we need to handle it - * now or we risk it being ignored - * forever - */ - if (l_fatal_signal_pending(current)) - ptlrpc_interrupted_set(set); - } else - rc = 0; - } - - LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT); - - /* - * -EINTR => all requests have been flagged rq_intr so next - * check completes. - * -ETIMEDOUT => someone timed out. When all reqs have - * timed out, signals are enabled allowing completion with - * EINTR. - * I don't really care if we go once more round the loop in - * the error cases -eeb. - */ - if (rc == 0 && atomic_read(&set->set_remaining) == 0) { - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - spin_lock(&req->rq_lock); - req->rq_invalid_rqset = 1; - spin_unlock(&req->rq_lock); - } - } - } while (rc != 0 || atomic_read(&set->set_remaining) != 0); - - LASSERT(atomic_read(&set->set_remaining) == 0); - - rc = set->set_rc; /* rq_status of already freed requests if any */ - list_for_each_entry(req, &set->set_requests, rq_set_chain) { - LASSERT(req->rq_phase == RQ_PHASE_COMPLETE); - if (req->rq_status != 0) - rc = req->rq_status; - } - - if (set->set_interpret) { - int (*interpreter)(struct ptlrpc_request_set *set, void *, int) = - set->set_interpret; - rc = interpreter(set, set->set_arg, rc); - } else { - struct ptlrpc_set_cbdata *cbdata, *n; - int err; - - list_for_each_entry_safe(cbdata, n, - &set->set_cblist, psc_item) { - list_del_init(&cbdata->psc_item); - err = cbdata->psc_interpret(set, cbdata->psc_data, rc); - if (err && !rc) - rc = err; - kfree(cbdata); - } - } - - return rc; -} -EXPORT_SYMBOL(ptlrpc_set_wait); - -/** - * Helper function for request freeing. - * Called when request count reached zero and request needs to be freed. - * Removes request from all sorts of sending/replay lists it might be on, - * frees network buffers if any are present. - * If \a locked is set, that means caller is already holding import imp_lock - * and so we no longer need to reobtain it (for certain lists manipulations) - */ -static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) -{ - if (!request) - return; - LASSERT(!request->rq_srv_req); - LASSERT(!request->rq_export); - LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(list_empty(&request->rq_list), "req %p\n", request); - LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); - LASSERTF(!request->rq_replay, "req %p\n", request); - - req_capsule_fini(&request->rq_pill); - - /* - * We must take it off the imp_replay_list first. Otherwise, we'll set - * request->rq_reqmsg to NULL while osc_close is dereferencing it. - */ - if (request->rq_import) { - if (!locked) - spin_lock(&request->rq_import->imp_lock); - list_del_init(&request->rq_replay_list); - list_del_init(&request->rq_unreplied_list); - if (!locked) - spin_unlock(&request->rq_import->imp_lock); - } - LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request); - - if (atomic_read(&request->rq_refcount) != 0) { - DEBUG_REQ(D_ERROR, request, - "freeing request with nonzero refcount"); - LBUG(); - } - - if (request->rq_repbuf) - sptlrpc_cli_free_repbuf(request); - - if (request->rq_import) { - class_import_put(request->rq_import); - request->rq_import = NULL; - } - if (request->rq_bulk) - ptlrpc_free_bulk(request->rq_bulk); - - if (request->rq_reqbuf || request->rq_clrbuf) - sptlrpc_cli_free_reqbuf(request); - - if (request->rq_cli_ctx) - sptlrpc_req_put_ctx(request, !locked); - - if (request->rq_pool) - __ptlrpc_free_req_to_pool(request); - else - ptlrpc_request_cache_free(request); -} - -/** - * Helper function - * Drops one reference count for request \a request. - * \a locked set indicates that caller holds import imp_lock. - * Frees the request when reference count reaches zero. - */ -static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked) -{ - if (!request) - return 1; - - if (request == LP_POISON || - request->rq_reqmsg == LP_POISON) { - CERROR("dereferencing freed request (bug 575)\n"); - LBUG(); - return 1; - } - - DEBUG_REQ(D_INFO, request, "refcount now %u", - atomic_read(&request->rq_refcount) - 1); - - if (atomic_dec_and_test(&request->rq_refcount)) { - __ptlrpc_free_req(request, locked); - return 1; - } - - return 0; -} - -/** - * Drops one reference count for a request. - */ -void ptlrpc_req_finished(struct ptlrpc_request *request) -{ - __ptlrpc_req_finished(request, 0); -} -EXPORT_SYMBOL(ptlrpc_req_finished); - -/** - * Returns xid of a \a request - */ -__u64 ptlrpc_req_xid(struct ptlrpc_request *request) -{ - return request->rq_xid; -} -EXPORT_SYMBOL(ptlrpc_req_xid); - -/** - * Disengage the client's reply buffer from the network - * NB does _NOT_ unregister any client-side bulk. - * IDEMPOTENT, but _not_ safe against concurrent callers. - * The request owner (i.e. the thread doing the I/O) must call... - * Returns 0 on success or 1 if unregistering cannot be made. - */ -static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) -{ - int rc; - wait_queue_head_t *wq; - - /* Might sleep. */ - LASSERT(!in_interrupt()); - - /* Let's setup deadline for reply unlink. */ - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - async && request->rq_reply_deadline == 0 && cfs_fail_val == 0) - request->rq_reply_deadline = - ktime_get_real_seconds() + LONG_UNLINK; - - /* Nothing left to do. */ - if (!ptlrpc_client_recv_or_unlink(request)) - return 1; - - LNetMDUnlink(request->rq_reply_md_h); - - /* Let's check it once again. */ - if (!ptlrpc_client_recv_or_unlink(request)) - return 1; - - /* Move to "Unregistering" phase as reply was not unlinked yet. */ - ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC); - - /* Do not wait for unlink to finish. */ - if (async) - return 0; - - /* - * We have to wait_event_idle_timeout() whatever the result, to give liblustre - * a chance to run reply_in_callback(), and to make sure we've - * unlinked before returning a req to the pool. - */ - if (request->rq_set) - wq = &request->rq_set->set_waitq; - else - wq = &request->rq_reply_waitq; - - for (;;) { - /* - * Network access will complete in finite time but the HUGE - * timeout lets us CWARN for visibility of sluggish NALs - */ - int cnt = 0; - while (cnt < LONG_UNLINK && - (rc = wait_event_idle_timeout(*wq, - !ptlrpc_client_recv_or_unlink(request), - HZ)) == 0) - cnt += 1; - if (rc > 0) { - ptlrpc_rqphase_move(request, request->rq_next_phase); - return 1; - } - - DEBUG_REQ(D_WARNING, request, - "Unexpectedly long timeout receiving_reply=%d req_unlinked=%d reply_unlinked=%d", - request->rq_receiving_reply, - request->rq_req_unlinked, - request->rq_reply_unlinked); - } - return 0; -} - -static void ptlrpc_free_request(struct ptlrpc_request *req) -{ - spin_lock(&req->rq_lock); - req->rq_replay = 0; - spin_unlock(&req->rq_lock); - - if (req->rq_commit_cb) - req->rq_commit_cb(req); - list_del_init(&req->rq_replay_list); - - __ptlrpc_req_finished(req, 1); -} - -/** - * the request is committed and dropped from the replay list of its import - */ -void ptlrpc_request_committed(struct ptlrpc_request *req, int force) -{ - struct obd_import *imp = req->rq_import; - - spin_lock(&imp->imp_lock); - if (list_empty(&req->rq_replay_list)) { - spin_unlock(&imp->imp_lock); - return; - } - - if (force || req->rq_transno <= imp->imp_peer_committed_transno) - ptlrpc_free_request(req); - - spin_unlock(&imp->imp_lock); -} -EXPORT_SYMBOL(ptlrpc_request_committed); - -/** - * Iterates through replay_list on import and prunes - * all requests have transno smaller than last_committed for the - * import and don't have rq_replay set. - * Since requests are sorted in transno order, stops when meeting first - * transno bigger than last_committed. - * caller must hold imp->imp_lock - */ -void ptlrpc_free_committed(struct obd_import *imp) -{ - struct ptlrpc_request *req, *saved; - struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ - bool skip_committed_list = true; - - assert_spin_locked(&imp->imp_lock); - - if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && - imp->imp_generation == imp->imp_last_generation_checked) { - CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n", - imp->imp_obd->obd_name, imp->imp_peer_committed_transno); - return; - } - CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n", - imp->imp_obd->obd_name, imp->imp_peer_committed_transno, - imp->imp_generation); - - if (imp->imp_generation != imp->imp_last_generation_checked || - !imp->imp_last_transno_checked) - skip_committed_list = false; - - imp->imp_last_transno_checked = imp->imp_peer_committed_transno; - imp->imp_last_generation_checked = imp->imp_generation; - - list_for_each_entry_safe(req, saved, &imp->imp_replay_list, - rq_replay_list) { - /* XXX ok to remove when 1357 resolved - rread 05/29/03 */ - LASSERT(req != last_req); - last_req = req; - - if (req->rq_transno == 0) { - DEBUG_REQ(D_EMERG, req, "zero transno during replay"); - LBUG(); - } - if (req->rq_import_generation < imp->imp_generation) { - DEBUG_REQ(D_RPCTRACE, req, "free request with old gen"); - goto free_req; - } - - /* not yet committed */ - if (req->rq_transno > imp->imp_peer_committed_transno) { - DEBUG_REQ(D_RPCTRACE, req, "stopping search"); - break; - } - - if (req->rq_replay) { - DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)"); - list_move_tail(&req->rq_replay_list, - &imp->imp_committed_list); - continue; - } - - DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)", - imp->imp_peer_committed_transno); -free_req: - ptlrpc_free_request(req); - } - if (skip_committed_list) - return; - - list_for_each_entry_safe(req, saved, &imp->imp_committed_list, - rq_replay_list) { - LASSERT(req->rq_transno != 0); - if (req->rq_import_generation < imp->imp_generation || - !req->rq_replay) { - DEBUG_REQ(D_RPCTRACE, req, "free %s open request", - req->rq_import_generation < - imp->imp_generation ? "stale" : "closed"); - - if (imp->imp_replay_cursor == &req->rq_replay_list) - imp->imp_replay_cursor = - req->rq_replay_list.next; - - ptlrpc_free_request(req); - } - } -} - -/** - * Schedule previously sent request for resend. - * For bulk requests we assign new xid (to avoid problems with - * lost replies and therefore several transfers landing into same buffer - * from different sending attempts). - */ -void ptlrpc_resend_req(struct ptlrpc_request *req) -{ - DEBUG_REQ(D_HA, req, "going to resend"); - spin_lock(&req->rq_lock); - - /* - * Request got reply but linked to the import list still. - * Let ptlrpc_check_set() to process it. - */ - if (ptlrpc_client_replied(req)) { - spin_unlock(&req->rq_lock); - DEBUG_REQ(D_HA, req, "it has reply, so skip it"); - return; - } - - lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 }); - req->rq_status = -EAGAIN; - - req->rq_resend = 1; - req->rq_net_err = 0; - req->rq_timedout = 0; - ptlrpc_client_wake_req(req); - spin_unlock(&req->rq_lock); -} - -/** - * Grab additional reference on a request \a req - */ -struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req) -{ - atomic_inc(&req->rq_refcount); - return req; -} -EXPORT_SYMBOL(ptlrpc_request_addref); - -/** - * Add a request to import replay_list. - * Must be called under imp_lock - */ -void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, - struct obd_import *imp) -{ - struct ptlrpc_request *iter; - assert_spin_locked(&imp->imp_lock); - - if (req->rq_transno == 0) { - DEBUG_REQ(D_EMERG, req, "saving request with zero transno"); - LBUG(); - } - - /* - * clear this for new requests that were resent as well - * as resent replayed requests. - */ - lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); - - /* don't re-add requests that have been replayed */ - if (!list_empty(&req->rq_replay_list)) - return; - - lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); - - spin_lock(&req->rq_lock); - req->rq_resend = 0; - spin_unlock(&req->rq_lock); - - LASSERT(imp->imp_replayable); - /* Balanced in ptlrpc_free_committed, usually. */ - ptlrpc_request_addref(req); - list_for_each_entry_reverse(iter, &imp->imp_replay_list, rq_replay_list) { - /* - * We may have duplicate transnos if we create and then - * open a file, or for closes retained if to match creating - * opens, so use req->rq_xid as a secondary key. - * (See bugs 684, 685, and 428.) - * XXX no longer needed, but all opens need transnos! - */ - if (iter->rq_transno > req->rq_transno) - continue; - - if (iter->rq_transno == req->rq_transno) { - LASSERT(iter->rq_xid != req->rq_xid); - if (iter->rq_xid > req->rq_xid) - continue; - } - - list_add(&req->rq_replay_list, &iter->rq_replay_list); - return; - } - - list_add(&req->rq_replay_list, &imp->imp_replay_list); -} - -/** - * Send request and wait until it completes. - * Returns request processing status. - */ -int ptlrpc_queue_wait(struct ptlrpc_request *req) -{ - struct ptlrpc_request_set *set; - int rc; - - LASSERT(!req->rq_set); - LASSERT(!req->rq_receiving_reply); - - set = ptlrpc_prep_set(); - if (!set) { - CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); - return -ENOMEM; - } - - /* for distributed debugging */ - lustre_msg_set_status(req->rq_reqmsg, current->pid); - - /* add a ref for the set (see comment in ptlrpc_set_add_req) */ - ptlrpc_request_addref(req); - ptlrpc_set_add_req(set, req); - rc = ptlrpc_set_wait(set); - ptlrpc_set_destroy(set); - - return rc; -} -EXPORT_SYMBOL(ptlrpc_queue_wait); - -/** - * Callback used for replayed requests reply processing. - * In case of successful reply calls registered request replay callback. - * In case of error restart replay process. - */ -static int ptlrpc_replay_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void *data, int rc) -{ - struct ptlrpc_replay_async_args *aa = data; - struct obd_import *imp = req->rq_import; - - atomic_dec(&imp->imp_replay_inflight); - - /* - * Note: if it is bulk replay (MDS-MDS replay), then even if - * server got the request, but bulk transfer timeout, let's - * replay the bulk req again - */ - if (!ptlrpc_client_replied(req) || - (req->rq_bulk && - lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) { - DEBUG_REQ(D_ERROR, req, "request replay timed out.\n"); - rc = -ETIMEDOUT; - goto out; - } - - if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR && - (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN || - lustre_msg_get_status(req->rq_repmsg) == -ENODEV)) { - rc = lustre_msg_get_status(req->rq_repmsg); - goto out; - } - - /** VBR: check version failure */ - if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) { - /** replay was failed due to version mismatch */ - DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n"); - spin_lock(&imp->imp_lock); - imp->imp_vbr_failed = 1; - imp->imp_no_lock_replay = 1; - spin_unlock(&imp->imp_lock); - lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); - } else { - /** The transno had better not change over replay. */ - LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) == - lustre_msg_get_transno(req->rq_repmsg) || - lustre_msg_get_transno(req->rq_repmsg) == 0, - "%#llx/%#llx\n", - lustre_msg_get_transno(req->rq_reqmsg), - lustre_msg_get_transno(req->rq_repmsg)); - } - - spin_lock(&imp->imp_lock); - /** if replays by version then gap occur on server, no trust to locks */ - if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY) - imp->imp_no_lock_replay = 1; - imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg); - spin_unlock(&imp->imp_lock); - LASSERT(imp->imp_last_replay_transno); - - /* transaction number shouldn't be bigger than the latest replayed */ - if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) { - DEBUG_REQ(D_ERROR, req, - "Reported transno %llu is bigger than the replayed one: %llu", - req->rq_transno, - lustre_msg_get_transno(req->rq_reqmsg)); - rc = -EINVAL; - goto out; - } - - DEBUG_REQ(D_HA, req, "got rep"); - - /* let the callback do fixups, possibly including in the request */ - if (req->rq_replay_cb) - req->rq_replay_cb(req); - - if (ptlrpc_client_replied(req) && - lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) { - DEBUG_REQ(D_ERROR, req, "status %d, old was %d", - lustre_msg_get_status(req->rq_repmsg), - aa->praa_old_status); - } else { - /* Put it back for re-replay. */ - lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); - } - - /* - * Errors while replay can set transno to 0, but - * imp_last_replay_transno shouldn't be set to 0 anyway - */ - if (req->rq_transno == 0) - CERROR("Transno is 0 during replay!\n"); - - /* continue with recovery */ - rc = ptlrpc_import_recovery_state_machine(imp); - out: - req->rq_send_state = aa->praa_old_state; - - if (rc != 0) - /* this replay failed, so restart recovery */ - ptlrpc_connect_import(imp); - - return rc; -} - -/** - * Prepares and queues request for replay. - * Adds it to ptlrpcd queue for actual sending. - * Returns 0 on success. - */ -int ptlrpc_replay_req(struct ptlrpc_request *req) -{ - struct ptlrpc_replay_async_args *aa; - - LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY); - - LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); - aa = ptlrpc_req_async_args(req); - memset(aa, 0, sizeof(*aa)); - - /* Prepare request to be resent with ptlrpcd */ - aa->praa_old_state = req->rq_send_state; - req->rq_send_state = LUSTRE_IMP_REPLAY; - req->rq_phase = RQ_PHASE_NEW; - req->rq_next_phase = RQ_PHASE_UNDEFINED; - if (req->rq_repmsg) - aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg); - req->rq_status = 0; - req->rq_interpret_reply = ptlrpc_replay_interpret; - /* Readjust the timeout for current conditions */ - ptlrpc_at_set_req_timeout(req); - - /* - * Tell server the net_latency, so the server can calculate how long - * it should wait for next replay - */ - lustre_msg_set_service_time(req->rq_reqmsg, - ptlrpc_at_get_net_latency(req)); - DEBUG_REQ(D_HA, req, "REPLAY"); - - atomic_inc(&req->rq_import->imp_replay_inflight); - ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ - - ptlrpcd_add_req(req); - return 0; -} - -/** - * Aborts all in-flight request on import \a imp sending and delayed lists - */ -void ptlrpc_abort_inflight(struct obd_import *imp) -{ - struct ptlrpc_request *req, *n; - - /* - * Make sure that no new requests get processed for this import. - * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing - * this flag and then putting requests on sending_list or delayed_list. - */ - spin_lock(&imp->imp_lock); - - /* - * XXX locking? Maybe we should remove each request with the list - * locked? Also, how do we know if the requests on the list are - * being freed at this time? - */ - list_for_each_entry_safe(req, n, &imp->imp_sending_list, rq_list) { - DEBUG_REQ(D_RPCTRACE, req, "inflight"); - - spin_lock(&req->rq_lock); - if (req->rq_import_generation < imp->imp_generation) { - req->rq_err = 1; - req->rq_status = -EIO; - ptlrpc_client_wake_req(req); - } - spin_unlock(&req->rq_lock); - } - - list_for_each_entry_safe(req, n, &imp->imp_delayed_list, rq_list) { - DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); - - spin_lock(&req->rq_lock); - if (req->rq_import_generation < imp->imp_generation) { - req->rq_err = 1; - req->rq_status = -EIO; - ptlrpc_client_wake_req(req); - } - spin_unlock(&req->rq_lock); - } - - /* - * Last chance to free reqs left on the replay list, but we - * will still leak reqs that haven't committed. - */ - if (imp->imp_replayable) - ptlrpc_free_committed(imp); - - spin_unlock(&imp->imp_lock); -} - -/** - * Abort all uncompleted requests in request set \a set - */ -void ptlrpc_abort_set(struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req, *tmp; - - list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) { - spin_lock(&req->rq_lock); - if (req->rq_phase != RQ_PHASE_RPC) { - spin_unlock(&req->rq_lock); - continue; - } - - req->rq_err = 1; - req->rq_status = -EINTR; - ptlrpc_client_wake_req(req); - spin_unlock(&req->rq_lock); - } -} - -static __u64 ptlrpc_last_xid; -static spinlock_t ptlrpc_last_xid_lock; - -/** - * Initialize the XID for the node. This is common among all requests on - * this node, and only requires the property that it is monotonically - * increasing. It does not need to be sequential. Since this is also used - * as the RDMA match bits, it is important that a single client NOT have - * the same match bits for two different in-flight requests, hence we do - * NOT want to have an XID per target or similar. - * - * To avoid an unlikely collision between match bits after a client reboot - * (which would deliver old data into the wrong RDMA buffer) initialize - * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s. - * If the time is clearly incorrect, we instead use a 62-bit random number. - * In the worst case the random number will overflow 1M RPCs per second in - * 9133 years, or permutations thereof. - */ -#define YEAR_2004 (1ULL << 30) -void ptlrpc_init_xid(void) -{ - time64_t now = ktime_get_real_seconds(); - - spin_lock_init(&ptlrpc_last_xid_lock); - if (now < YEAR_2004) { - get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid)); - ptlrpc_last_xid >>= 2; - ptlrpc_last_xid |= (1ULL << 61); - } else { - ptlrpc_last_xid = (__u64)now << 20; - } - - /* Always need to be aligned to a power-of-two for multi-bulk BRW */ - BUILD_BUG_ON(((PTLRPC_BULK_OPS_COUNT - 1) & PTLRPC_BULK_OPS_COUNT) != 0); - ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK; -} - -/** - * Increase xid and returns resulting new value to the caller. - * - * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting - * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC - * itself uses the last bulk xid needed, so the server can determine the - * the number of bulk transfers from the RPC XID and a bitmask. The starting - * xid must align to a power-of-two value. - * - * This is assumed to be true due to the initial ptlrpc_last_xid - * value also being initialized to a power-of-two value. LU-1431 - */ -__u64 ptlrpc_next_xid(void) -{ - __u64 next; - - spin_lock(&ptlrpc_last_xid_lock); - next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT; - ptlrpc_last_xid = next; - spin_unlock(&ptlrpc_last_xid_lock); - - return next; -} - -/** - * If request has a new allocated XID (new request or EINPROGRESS resend), - * use this XID as matchbits of bulk, otherwise allocate a new matchbits for - * request to ensure previous bulk fails and avoid problems with lost replies - * and therefore several transfers landing into the same buffer from different - * sending attempts. - */ -void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) -{ - struct ptlrpc_bulk_desc *bd = req->rq_bulk; - - LASSERT(bd); - - /* - * Generate new matchbits for all resend requests, including - * resend replay. - */ - if (req->rq_resend) { - u64 old_mbits = req->rq_mbits; - - /* - * First time resend on -EINPROGRESS will generate new xid, - * so we can actually use the rq_xid as rq_mbits in such case, - * however, it's bit hard to distinguish such resend with a - * 'resend for the -EINPROGRESS resend'. To make it simple, - * we opt to generate mbits for all resend cases. - */ - if ((bd->bd_import->imp_connect_data.ocd_connect_flags & - OBD_CONNECT_BULK_MBITS)) { - req->rq_mbits = ptlrpc_next_xid(); - } else { - /* old version transfers rq_xid to peer as matchbits */ - spin_lock(&req->rq_import->imp_lock); - list_del_init(&req->rq_unreplied_list); - ptlrpc_assign_next_xid_nolock(req); - spin_unlock(&req->rq_import->imp_lock); - req->rq_mbits = req->rq_xid; - } - - CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n", - old_mbits, req->rq_mbits); - } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) { - /* Request being sent first time, use xid as matchbits. */ - req->rq_mbits = req->rq_xid; - } else { - /* - * Replay request, xid and matchbits have already been - * correctly assigned. - */ - return; - } - - /* - * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so - * that server can infer the number of bulks that were prepared, - * see LU-1431 - */ - req->rq_mbits += DIV_ROUND_UP(bd->bd_iov_count, LNET_MAX_IOV) - 1; -} - -/** - * Get a glimpse at what next xid value might have been. - * Returns possible next xid. - */ -__u64 ptlrpc_sample_next_xid(void) -{ -#if BITS_PER_LONG == 32 - /* need to avoid possible word tearing on 32-bit systems */ - __u64 next; - - spin_lock(&ptlrpc_last_xid_lock); - next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT; - spin_unlock(&ptlrpc_last_xid_lock); - - return next; -#else - /* No need to lock, since returned value is racy anyways */ - return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT; -#endif -} -EXPORT_SYMBOL(ptlrpc_sample_next_xid); - -/** - * Functions for operating ptlrpc workers. - * - * A ptlrpc work is a function which will be running inside ptlrpc context. - * The callback shouldn't sleep otherwise it will block that ptlrpcd thread. - * - * 1. after a work is created, it can be used many times, that is: - * handler = ptlrpcd_alloc_work(); - * ptlrpcd_queue_work(); - * - * queue it again when necessary: - * ptlrpcd_queue_work(); - * ptlrpcd_destroy_work(); - * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but - * it will only be queued once in any time. Also as its name implies, it may - * have delay before it really runs by ptlrpcd thread. - */ -struct ptlrpc_work_async_args { - int (*cb)(const struct lu_env *, void *); - void *cbdata; -}; - -static void ptlrpcd_add_work_req(struct ptlrpc_request *req) -{ - /* re-initialize the req */ - req->rq_timeout = obd_timeout; - req->rq_sent = ktime_get_real_seconds(); - req->rq_deadline = req->rq_sent + req->rq_timeout; - req->rq_phase = RQ_PHASE_INTERPRET; - req->rq_next_phase = RQ_PHASE_COMPLETE; - req->rq_xid = ptlrpc_next_xid(); - req->rq_import_generation = req->rq_import->imp_generation; - - ptlrpcd_add_req(req); -} - -static int work_interpreter(const struct lu_env *env, - struct ptlrpc_request *req, void *data, int rc) -{ - struct ptlrpc_work_async_args *arg = data; - - LASSERT(ptlrpcd_check_work(req)); - - rc = arg->cb(env, arg->cbdata); - - list_del_init(&req->rq_set_chain); - req->rq_set = NULL; - - if (atomic_dec_return(&req->rq_refcount) > 1) { - atomic_set(&req->rq_refcount, 2); - ptlrpcd_add_work_req(req); - } - return rc; -} - -static int worker_format; - -static int ptlrpcd_check_work(struct ptlrpc_request *req) -{ - return req->rq_pill.rc_fmt == (void *)&worker_format; -} - -/** - * Create a work for ptlrpc. - */ -void *ptlrpcd_alloc_work(struct obd_import *imp, - int (*cb)(const struct lu_env *, void *), void *cbdata) -{ - struct ptlrpc_request *req = NULL; - struct ptlrpc_work_async_args *args; - - might_sleep(); - - if (!cb) - return ERR_PTR(-EINVAL); - - /* copy some code from deprecated fakereq. */ - req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!req) { - CERROR("ptlrpc: run out of memory!\n"); - return ERR_PTR(-ENOMEM); - } - - ptlrpc_cli_req_init(req); - - req->rq_send_state = LUSTRE_IMP_FULL; - req->rq_type = PTL_RPC_MSG_REQUEST; - req->rq_import = class_import_get(imp); - req->rq_interpret_reply = work_interpreter; - /* don't want reply */ - req->rq_no_delay = 1; - req->rq_no_resend = 1; - req->rq_pill.rc_fmt = (void *)&worker_format; - - BUILD_BUG_ON(sizeof(*args) > sizeof(req->rq_async_args)); - args = ptlrpc_req_async_args(req); - args->cb = cb; - args->cbdata = cbdata; - - return req; -} -EXPORT_SYMBOL(ptlrpcd_alloc_work); - -void ptlrpcd_destroy_work(void *handler) -{ - struct ptlrpc_request *req = handler; - - if (req) - ptlrpc_req_finished(req); -} -EXPORT_SYMBOL(ptlrpcd_destroy_work); - -int ptlrpcd_queue_work(void *handler) -{ - struct ptlrpc_request *req = handler; - - /* - * Check if the req is already being queued. - * - * Here comes a trick: it lacks a way of checking if a req is being - * processed reliably in ptlrpc. Here I have to use refcount of req - * for this purpose. This is okay because the caller should use this - * req as opaque data. - Jinshan - */ - LASSERT(atomic_read(&req->rq_refcount) > 0); - if (atomic_inc_return(&req->rq_refcount) == 2) - ptlrpcd_add_work_req(req); - return 0; -} -EXPORT_SYMBOL(ptlrpcd_queue_work); diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c deleted file mode 100644 index fb35a89ca6c6..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/connection.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC -#include -#include -#include - -#include "ptlrpc_internal.h" - -static struct rhashtable conn_hash; - -/* - * struct lnet_process_id may contain unassigned bytes which might not - * be zero, so we cannot just hash and compare bytes. - */ - -static u32 lnet_process_id_hash(const void *data, u32 len, u32 seed) -{ - const struct lnet_process_id *lpi = data; - - seed = hash_32(seed ^ lpi->pid, 32); - seed ^= hash_64(lpi->nid, 32); - return seed; -} - -static int lnet_process_id_cmp(struct rhashtable_compare_arg *arg, - const void *obj) -{ - const struct lnet_process_id *lpi = arg->key; - const struct ptlrpc_connection *con = obj; - - if (lpi->nid == con->c_peer.nid && - lpi->pid == con->c_peer.pid) - return 0; - return -ESRCH; -} - -static const struct rhashtable_params conn_hash_params = { - .key_len = 1, /* actually variable-length */ - .key_offset = offsetof(struct ptlrpc_connection, c_peer), - .head_offset = offsetof(struct ptlrpc_connection, c_hash), - .hashfn = lnet_process_id_hash, - .obj_cmpfn = lnet_process_id_cmp, -}; - -struct ptlrpc_connection * -ptlrpc_connection_get(struct lnet_process_id peer, lnet_nid_t self, - struct obd_uuid *uuid) -{ - struct ptlrpc_connection *conn, *conn2; - - conn = rhashtable_lookup_fast(&conn_hash, &peer, conn_hash_params); - if (conn) { - ptlrpc_connection_addref(conn); - goto out; - } - - conn = kzalloc(sizeof(*conn), GFP_NOFS); - if (!conn) - return NULL; - - conn->c_peer = peer; - conn->c_self = self; - atomic_set(&conn->c_refcount, 1); - if (uuid) - obd_str2uuid(&conn->c_remote_uuid, uuid->uuid); - - /* - * Add the newly created conn to the hash, on key collision we - * lost a racing addition and must destroy our newly allocated - * connection. The object which exists in the hash will be - * returned, otherwise NULL is returned on success. - */ - conn2 = rhashtable_lookup_get_insert_fast(&conn_hash, &conn->c_hash, - conn_hash_params); - if (conn2 != NULL) { - /* insertion failed */ - kfree(conn); - if (IS_ERR(conn2)) - return NULL; - conn = conn2; - ptlrpc_connection_addref(conn); - } -out: - CDEBUG(D_INFO, "conn=%p refcount %d to %s\n", - conn, atomic_read(&conn->c_refcount), - libcfs_nid2str(conn->c_peer.nid)); - return conn; -} - -int ptlrpc_connection_put(struct ptlrpc_connection *conn) -{ - int rc = 0; - - if (!conn) - return rc; - - LASSERT(atomic_read(&conn->c_refcount) > 0); - - /* - * We do not remove connection from hashtable and - * do not free it even if last caller released ref, - * as we want to have it cached for the case it is - * needed again. - * - * Deallocating it and later creating new connection - * again would be wastful. This way we also avoid - * expensive locking to protect things from get/put - * race when found cached connection is freed by - * ptlrpc_connection_put(). - * - * It will be freed later in module unload time, - * when ptlrpc_connection_fini()->lh_exit->conn_exit() - * path is called. - */ - if (atomic_dec_return(&conn->c_refcount) == 0) - rc = 1; - - CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n", - conn, atomic_read(&conn->c_refcount), - libcfs_nid2str(conn->c_peer.nid)); - - return rc; -} - -struct ptlrpc_connection * -ptlrpc_connection_addref(struct ptlrpc_connection *conn) -{ - atomic_inc(&conn->c_refcount); - CDEBUG(D_INFO, "conn=%p refcount %d to %s\n", - conn, atomic_read(&conn->c_refcount), - libcfs_nid2str(conn->c_peer.nid)); - - return conn; -} - -static void -conn_exit(void *vconn, void *data) -{ - struct ptlrpc_connection *conn = vconn; - - /* - * Nothing should be left. Connection user put it and - * connection also was deleted from table by this time - * so we should have 0 refs. - */ - LASSERTF(atomic_read(&conn->c_refcount) == 0, - "Busy connection with %d refs\n", - atomic_read(&conn->c_refcount)); - kfree(conn); -} - -int ptlrpc_connection_init(void) -{ - return rhashtable_init(&conn_hash, &conn_hash_params); -} - -void ptlrpc_connection_fini(void) -{ - rhashtable_free_and_destroy(&conn_hash, conn_exit, NULL); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c deleted file mode 100644 index b904524fc1c6..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/errno.c +++ /dev/null @@ -1,383 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.txt - * - * GPL HEADER END - */ -/* - * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved. - * - * Copyright (c) 2013, Intel Corporation. - */ - -#include -#include -#include -#include - -/* - * The two translation tables below must define a one-to-one mapping between - * host and network errnos. - * - * EWOULDBLOCK is equal to EAGAIN on all architectures except for parisc, which - * appears irrelevant. Thus, existing references to EWOULDBLOCK are fine. - * - * EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc - * host has no context-free way to determine if a LUSTRE_EDEADLK represents an - * EDEADLK or an EDEADLOCK. Therefore, all existing references to EDEADLOCK - * that need to be transferred on wire have been replaced with EDEADLK. - */ -static int lustre_errno_hton_mapping[] = { - [EPERM] = LUSTRE_EPERM, - [ENOENT] = LUSTRE_ENOENT, - [ESRCH] = LUSTRE_ESRCH, - [EINTR] = LUSTRE_EINTR, - [EIO] = LUSTRE_EIO, - [ENXIO] = LUSTRE_ENXIO, - [E2BIG] = LUSTRE_E2BIG, - [ENOEXEC] = LUSTRE_ENOEXEC, - [EBADF] = LUSTRE_EBADF, - [ECHILD] = LUSTRE_ECHILD, - [EAGAIN] = LUSTRE_EAGAIN, - [ENOMEM] = LUSTRE_ENOMEM, - [EACCES] = LUSTRE_EACCES, - [EFAULT] = LUSTRE_EFAULT, - [ENOTBLK] = LUSTRE_ENOTBLK, - [EBUSY] = LUSTRE_EBUSY, - [EEXIST] = LUSTRE_EEXIST, - [EXDEV] = LUSTRE_EXDEV, - [ENODEV] = LUSTRE_ENODEV, - [ENOTDIR] = LUSTRE_ENOTDIR, - [EISDIR] = LUSTRE_EISDIR, - [EINVAL] = LUSTRE_EINVAL, - [ENFILE] = LUSTRE_ENFILE, - [EMFILE] = LUSTRE_EMFILE, - [ENOTTY] = LUSTRE_ENOTTY, - [ETXTBSY] = LUSTRE_ETXTBSY, - [EFBIG] = LUSTRE_EFBIG, - [ENOSPC] = LUSTRE_ENOSPC, - [ESPIPE] = LUSTRE_ESPIPE, - [EROFS] = LUSTRE_EROFS, - [EMLINK] = LUSTRE_EMLINK, - [EPIPE] = LUSTRE_EPIPE, - [EDOM] = LUSTRE_EDOM, - [ERANGE] = LUSTRE_ERANGE, - [EDEADLK] = LUSTRE_EDEADLK, - [ENAMETOOLONG] = LUSTRE_ENAMETOOLONG, - [ENOLCK] = LUSTRE_ENOLCK, - [ENOSYS] = LUSTRE_ENOSYS, - [ENOTEMPTY] = LUSTRE_ENOTEMPTY, - [ELOOP] = LUSTRE_ELOOP, - [ENOMSG] = LUSTRE_ENOMSG, - [EIDRM] = LUSTRE_EIDRM, - [ECHRNG] = LUSTRE_ECHRNG, - [EL2NSYNC] = LUSTRE_EL2NSYNC, - [EL3HLT] = LUSTRE_EL3HLT, - [EL3RST] = LUSTRE_EL3RST, - [ELNRNG] = LUSTRE_ELNRNG, - [EUNATCH] = LUSTRE_EUNATCH, - [ENOCSI] = LUSTRE_ENOCSI, - [EL2HLT] = LUSTRE_EL2HLT, - [EBADE] = LUSTRE_EBADE, - [EBADR] = LUSTRE_EBADR, - [EXFULL] = LUSTRE_EXFULL, - [ENOANO] = LUSTRE_ENOANO, - [EBADRQC] = LUSTRE_EBADRQC, - [EBADSLT] = LUSTRE_EBADSLT, - [EBFONT] = LUSTRE_EBFONT, - [ENOSTR] = LUSTRE_ENOSTR, - [ENODATA] = LUSTRE_ENODATA, - [ETIME] = LUSTRE_ETIME, - [ENOSR] = LUSTRE_ENOSR, - [ENONET] = LUSTRE_ENONET, - [ENOPKG] = LUSTRE_ENOPKG, - [EREMOTE] = LUSTRE_EREMOTE, - [ENOLINK] = LUSTRE_ENOLINK, - [EADV] = LUSTRE_EADV, - [ESRMNT] = LUSTRE_ESRMNT, - [ECOMM] = LUSTRE_ECOMM, - [EPROTO] = LUSTRE_EPROTO, - [EMULTIHOP] = LUSTRE_EMULTIHOP, - [EDOTDOT] = LUSTRE_EDOTDOT, - [EBADMSG] = LUSTRE_EBADMSG, - [EOVERFLOW] = LUSTRE_EOVERFLOW, - [ENOTUNIQ] = LUSTRE_ENOTUNIQ, - [EBADFD] = LUSTRE_EBADFD, - [EREMCHG] = LUSTRE_EREMCHG, - [ELIBACC] = LUSTRE_ELIBACC, - [ELIBBAD] = LUSTRE_ELIBBAD, - [ELIBSCN] = LUSTRE_ELIBSCN, - [ELIBMAX] = LUSTRE_ELIBMAX, - [ELIBEXEC] = LUSTRE_ELIBEXEC, - [EILSEQ] = LUSTRE_EILSEQ, - [ERESTART] = LUSTRE_ERESTART, - [ESTRPIPE] = LUSTRE_ESTRPIPE, - [EUSERS] = LUSTRE_EUSERS, - [ENOTSOCK] = LUSTRE_ENOTSOCK, - [EDESTADDRREQ] = LUSTRE_EDESTADDRREQ, - [EMSGSIZE] = LUSTRE_EMSGSIZE, - [EPROTOTYPE] = LUSTRE_EPROTOTYPE, - [ENOPROTOOPT] = LUSTRE_ENOPROTOOPT, - [EPROTONOSUPPORT] = LUSTRE_EPROTONOSUPPORT, - [ESOCKTNOSUPPORT] = LUSTRE_ESOCKTNOSUPPORT, - [EOPNOTSUPP] = LUSTRE_EOPNOTSUPP, - [EPFNOSUPPORT] = LUSTRE_EPFNOSUPPORT, - [EAFNOSUPPORT] = LUSTRE_EAFNOSUPPORT, - [EADDRINUSE] = LUSTRE_EADDRINUSE, - [EADDRNOTAVAIL] = LUSTRE_EADDRNOTAVAIL, - [ENETDOWN] = LUSTRE_ENETDOWN, - [ENETUNREACH] = LUSTRE_ENETUNREACH, - [ENETRESET] = LUSTRE_ENETRESET, - [ECONNABORTED] = LUSTRE_ECONNABORTED, - [ECONNRESET] = LUSTRE_ECONNRESET, - [ENOBUFS] = LUSTRE_ENOBUFS, - [EISCONN] = LUSTRE_EISCONN, - [ENOTCONN] = LUSTRE_ENOTCONN, - [ESHUTDOWN] = LUSTRE_ESHUTDOWN, - [ETOOMANYREFS] = LUSTRE_ETOOMANYREFS, - [ETIMEDOUT] = LUSTRE_ETIMEDOUT, - [ECONNREFUSED] = LUSTRE_ECONNREFUSED, - [EHOSTDOWN] = LUSTRE_EHOSTDOWN, - [EHOSTUNREACH] = LUSTRE_EHOSTUNREACH, - [EALREADY] = LUSTRE_EALREADY, - [EINPROGRESS] = LUSTRE_EINPROGRESS, - [ESTALE] = LUSTRE_ESTALE, - [EUCLEAN] = LUSTRE_EUCLEAN, - [ENOTNAM] = LUSTRE_ENOTNAM, - [ENAVAIL] = LUSTRE_ENAVAIL, - [EISNAM] = LUSTRE_EISNAM, - [EREMOTEIO] = LUSTRE_EREMOTEIO, - [EDQUOT] = LUSTRE_EDQUOT, - [ENOMEDIUM] = LUSTRE_ENOMEDIUM, - [EMEDIUMTYPE] = LUSTRE_EMEDIUMTYPE, - [ECANCELED] = LUSTRE_ECANCELED, - [ENOKEY] = LUSTRE_ENOKEY, - [EKEYEXPIRED] = LUSTRE_EKEYEXPIRED, - [EKEYREVOKED] = LUSTRE_EKEYREVOKED, - [EKEYREJECTED] = LUSTRE_EKEYREJECTED, - [EOWNERDEAD] = LUSTRE_EOWNERDEAD, - [ENOTRECOVERABLE] = LUSTRE_ENOTRECOVERABLE, - [ERESTARTSYS] = LUSTRE_ERESTARTSYS, - [ERESTARTNOINTR] = LUSTRE_ERESTARTNOINTR, - [ERESTARTNOHAND] = LUSTRE_ERESTARTNOHAND, - [ENOIOCTLCMD] = LUSTRE_ENOIOCTLCMD, - [ERESTART_RESTARTBLOCK] = LUSTRE_ERESTART_RESTARTBLOCK, - [EBADHANDLE] = LUSTRE_EBADHANDLE, - [ENOTSYNC] = LUSTRE_ENOTSYNC, - [EBADCOOKIE] = LUSTRE_EBADCOOKIE, - [ENOTSUPP] = LUSTRE_ENOTSUPP, - [ETOOSMALL] = LUSTRE_ETOOSMALL, - [ESERVERFAULT] = LUSTRE_ESERVERFAULT, - [EBADTYPE] = LUSTRE_EBADTYPE, - [EJUKEBOX] = LUSTRE_EJUKEBOX, - [EIOCBQUEUED] = LUSTRE_EIOCBQUEUED, -}; - -static int lustre_errno_ntoh_mapping[] = { - [LUSTRE_EPERM] = EPERM, - [LUSTRE_ENOENT] = ENOENT, - [LUSTRE_ESRCH] = ESRCH, - [LUSTRE_EINTR] = EINTR, - [LUSTRE_EIO] = EIO, - [LUSTRE_ENXIO] = ENXIO, - [LUSTRE_E2BIG] = E2BIG, - [LUSTRE_ENOEXEC] = ENOEXEC, - [LUSTRE_EBADF] = EBADF, - [LUSTRE_ECHILD] = ECHILD, - [LUSTRE_EAGAIN] = EAGAIN, - [LUSTRE_ENOMEM] = ENOMEM, - [LUSTRE_EACCES] = EACCES, - [LUSTRE_EFAULT] = EFAULT, - [LUSTRE_ENOTBLK] = ENOTBLK, - [LUSTRE_EBUSY] = EBUSY, - [LUSTRE_EEXIST] = EEXIST, - [LUSTRE_EXDEV] = EXDEV, - [LUSTRE_ENODEV] = ENODEV, - [LUSTRE_ENOTDIR] = ENOTDIR, - [LUSTRE_EISDIR] = EISDIR, - [LUSTRE_EINVAL] = EINVAL, - [LUSTRE_ENFILE] = ENFILE, - [LUSTRE_EMFILE] = EMFILE, - [LUSTRE_ENOTTY] = ENOTTY, - [LUSTRE_ETXTBSY] = ETXTBSY, - [LUSTRE_EFBIG] = EFBIG, - [LUSTRE_ENOSPC] = ENOSPC, - [LUSTRE_ESPIPE] = ESPIPE, - [LUSTRE_EROFS] = EROFS, - [LUSTRE_EMLINK] = EMLINK, - [LUSTRE_EPIPE] = EPIPE, - [LUSTRE_EDOM] = EDOM, - [LUSTRE_ERANGE] = ERANGE, - [LUSTRE_EDEADLK] = EDEADLK, - [LUSTRE_ENAMETOOLONG] = ENAMETOOLONG, - [LUSTRE_ENOLCK] = ENOLCK, - [LUSTRE_ENOSYS] = ENOSYS, - [LUSTRE_ENOTEMPTY] = ENOTEMPTY, - [LUSTRE_ELOOP] = ELOOP, - [LUSTRE_ENOMSG] = ENOMSG, - [LUSTRE_EIDRM] = EIDRM, - [LUSTRE_ECHRNG] = ECHRNG, - [LUSTRE_EL2NSYNC] = EL2NSYNC, - [LUSTRE_EL3HLT] = EL3HLT, - [LUSTRE_EL3RST] = EL3RST, - [LUSTRE_ELNRNG] = ELNRNG, - [LUSTRE_EUNATCH] = EUNATCH, - [LUSTRE_ENOCSI] = ENOCSI, - [LUSTRE_EL2HLT] = EL2HLT, - [LUSTRE_EBADE] = EBADE, - [LUSTRE_EBADR] = EBADR, - [LUSTRE_EXFULL] = EXFULL, - [LUSTRE_ENOANO] = ENOANO, - [LUSTRE_EBADRQC] = EBADRQC, - [LUSTRE_EBADSLT] = EBADSLT, - [LUSTRE_EBFONT] = EBFONT, - [LUSTRE_ENOSTR] = ENOSTR, - [LUSTRE_ENODATA] = ENODATA, - [LUSTRE_ETIME] = ETIME, - [LUSTRE_ENOSR] = ENOSR, - [LUSTRE_ENONET] = ENONET, - [LUSTRE_ENOPKG] = ENOPKG, - [LUSTRE_EREMOTE] = EREMOTE, - [LUSTRE_ENOLINK] = ENOLINK, - [LUSTRE_EADV] = EADV, - [LUSTRE_ESRMNT] = ESRMNT, - [LUSTRE_ECOMM] = ECOMM, - [LUSTRE_EPROTO] = EPROTO, - [LUSTRE_EMULTIHOP] = EMULTIHOP, - [LUSTRE_EDOTDOT] = EDOTDOT, - [LUSTRE_EBADMSG] = EBADMSG, - [LUSTRE_EOVERFLOW] = EOVERFLOW, - [LUSTRE_ENOTUNIQ] = ENOTUNIQ, - [LUSTRE_EBADFD] = EBADFD, - [LUSTRE_EREMCHG] = EREMCHG, - [LUSTRE_ELIBACC] = ELIBACC, - [LUSTRE_ELIBBAD] = ELIBBAD, - [LUSTRE_ELIBSCN] = ELIBSCN, - [LUSTRE_ELIBMAX] = ELIBMAX, - [LUSTRE_ELIBEXEC] = ELIBEXEC, - [LUSTRE_EILSEQ] = EILSEQ, - [LUSTRE_ERESTART] = ERESTART, - [LUSTRE_ESTRPIPE] = ESTRPIPE, - [LUSTRE_EUSERS] = EUSERS, - [LUSTRE_ENOTSOCK] = ENOTSOCK, - [LUSTRE_EDESTADDRREQ] = EDESTADDRREQ, - [LUSTRE_EMSGSIZE] = EMSGSIZE, - [LUSTRE_EPROTOTYPE] = EPROTOTYPE, - [LUSTRE_ENOPROTOOPT] = ENOPROTOOPT, - [LUSTRE_EPROTONOSUPPORT] = EPROTONOSUPPORT, - [LUSTRE_ESOCKTNOSUPPORT] = ESOCKTNOSUPPORT, - [LUSTRE_EOPNOTSUPP] = EOPNOTSUPP, - [LUSTRE_EPFNOSUPPORT] = EPFNOSUPPORT, - [LUSTRE_EAFNOSUPPORT] = EAFNOSUPPORT, - [LUSTRE_EADDRINUSE] = EADDRINUSE, - [LUSTRE_EADDRNOTAVAIL] = EADDRNOTAVAIL, - [LUSTRE_ENETDOWN] = ENETDOWN, - [LUSTRE_ENETUNREACH] = ENETUNREACH, - [LUSTRE_ENETRESET] = ENETRESET, - [LUSTRE_ECONNABORTED] = ECONNABORTED, - [LUSTRE_ECONNRESET] = ECONNRESET, - [LUSTRE_ENOBUFS] = ENOBUFS, - [LUSTRE_EISCONN] = EISCONN, - [LUSTRE_ENOTCONN] = ENOTCONN, - [LUSTRE_ESHUTDOWN] = ESHUTDOWN, - [LUSTRE_ETOOMANYREFS] = ETOOMANYREFS, - [LUSTRE_ETIMEDOUT] = ETIMEDOUT, - [LUSTRE_ECONNREFUSED] = ECONNREFUSED, - [LUSTRE_EHOSTDOWN] = EHOSTDOWN, - [LUSTRE_EHOSTUNREACH] = EHOSTUNREACH, - [LUSTRE_EALREADY] = EALREADY, - [LUSTRE_EINPROGRESS] = EINPROGRESS, - [LUSTRE_ESTALE] = ESTALE, - [LUSTRE_EUCLEAN] = EUCLEAN, - [LUSTRE_ENOTNAM] = ENOTNAM, - [LUSTRE_ENAVAIL] = ENAVAIL, - [LUSTRE_EISNAM] = EISNAM, - [LUSTRE_EREMOTEIO] = EREMOTEIO, - [LUSTRE_EDQUOT] = EDQUOT, - [LUSTRE_ENOMEDIUM] = ENOMEDIUM, - [LUSTRE_EMEDIUMTYPE] = EMEDIUMTYPE, - [LUSTRE_ECANCELED] = ECANCELED, - [LUSTRE_ENOKEY] = ENOKEY, - [LUSTRE_EKEYEXPIRED] = EKEYEXPIRED, - [LUSTRE_EKEYREVOKED] = EKEYREVOKED, - [LUSTRE_EKEYREJECTED] = EKEYREJECTED, - [LUSTRE_EOWNERDEAD] = EOWNERDEAD, - [LUSTRE_ENOTRECOVERABLE] = ENOTRECOVERABLE, - [LUSTRE_ERESTARTSYS] = ERESTARTSYS, - [LUSTRE_ERESTARTNOINTR] = ERESTARTNOINTR, - [LUSTRE_ERESTARTNOHAND] = ERESTARTNOHAND, - [LUSTRE_ENOIOCTLCMD] = ENOIOCTLCMD, - [LUSTRE_ERESTART_RESTARTBLOCK] = ERESTART_RESTARTBLOCK, - [LUSTRE_EBADHANDLE] = EBADHANDLE, - [LUSTRE_ENOTSYNC] = ENOTSYNC, - [LUSTRE_EBADCOOKIE] = EBADCOOKIE, - [LUSTRE_ENOTSUPP] = ENOTSUPP, - [LUSTRE_ETOOSMALL] = ETOOSMALL, - [LUSTRE_ESERVERFAULT] = ESERVERFAULT, - [LUSTRE_EBADTYPE] = EBADTYPE, - [LUSTRE_EJUKEBOX] = EJUKEBOX, - [LUSTRE_EIOCBQUEUED] = EIOCBQUEUED, -}; - -unsigned int lustre_errno_hton(unsigned int h) -{ - unsigned int n; - - if (h == 0) { - n = 0; - } else if (h < ARRAY_SIZE(lustre_errno_hton_mapping)) { - n = lustre_errno_hton_mapping[h]; - if (n == 0) - goto generic; - } else { -generic: - /* - * A generic errno is better than the unknown one that could - * mean anything to a different host. - */ - n = LUSTRE_EIO; - } - - return n; -} -EXPORT_SYMBOL(lustre_errno_hton); - -unsigned int lustre_errno_ntoh(unsigned int n) -{ - unsigned int h; - - if (n == 0) { - h = 0; - } else if (n < ARRAY_SIZE(lustre_errno_ntoh_mapping)) { - h = lustre_errno_ntoh_mapping[n]; - if (h == 0) - goto generic; - } else { -generic: - /* - * Similar to the situation in lustre_errno_hton(), an unknown - * network errno could coincide with anything. Hence, it is - * better to return a generic errno. - */ - h = EIO; - } - - return h; -} -EXPORT_SYMBOL(lustre_errno_ntoh); diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c deleted file mode 100644 index 130bacc2c891..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/events.c +++ /dev/null @@ -1,585 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -# ifdef __mips64__ -# include -# endif - -#include -#include -#include -#include "ptlrpc_internal.h" - -struct lnet_handle_eq ptlrpc_eq_h; - -/* - * Client's outgoing request callback - */ -void request_out_callback(struct lnet_event *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request *req = cbid->cbid_arg; - bool wakeup = false; - - LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK); - LASSERT(ev->unlinked); - - DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); - - sptlrpc_request_out_callback(req); - - spin_lock(&req->rq_lock); - req->rq_real_sent = ktime_get_real_seconds(); - req->rq_req_unlinked = 1; - /* reply_in_callback happened before request_out_callback? */ - if (req->rq_reply_unlinked) - wakeup = true; - - if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { - /* Failed send: make it seem like the reply timed out, just - * like failing sends in client.c does currently... - */ - req->rq_net_err = 1; - wakeup = true; - } - - if (wakeup) - ptlrpc_client_wake_req(req); - - spin_unlock(&req->rq_lock); - - ptlrpc_req_finished(req); -} - -/* - * Client's incoming reply callback - */ -void reply_in_callback(struct lnet_event *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request *req = cbid->cbid_arg; - - DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); - - LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK); - LASSERT(ev->md.start == req->rq_repbuf); - LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len); - /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests - * for adaptive timeouts' early reply. - */ - LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); - - spin_lock(&req->rq_lock); - - req->rq_receiving_reply = 0; - req->rq_early = 0; - if (ev->unlinked) - req->rq_reply_unlinked = 1; - - if (ev->status) - goto out_wake; - - if (ev->type == LNET_EVENT_UNLINK) { - LASSERT(ev->unlinked); - DEBUG_REQ(D_NET, req, "unlink"); - goto out_wake; - } - - if (ev->mlength < ev->rlength) { - CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req, - req->rq_replen, ev->rlength, ev->offset); - req->rq_reply_truncated = 1; - req->rq_replied = 1; - req->rq_status = -EOVERFLOW; - req->rq_nob_received = ev->rlength + ev->offset; - goto out_wake; - } - - if ((ev->offset == 0) && - ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) { - /* Early reply */ - DEBUG_REQ(D_ADAPTTO, req, - "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d", - ev->mlength, ev->offset, - req->rq_replen, req->rq_replied, ev->unlinked); - - req->rq_early_count++; /* number received, client side */ - - /* already got the real reply or buffers are already unlinked */ - if (req->rq_replied || req->rq_reply_unlinked == 1) - goto out_wake; - - req->rq_early = 1; - req->rq_reply_off = ev->offset; - req->rq_nob_received = ev->mlength; - /* And we're still receiving */ - req->rq_receiving_reply = 1; - } else { - /* Real reply */ - req->rq_rep_swab_mask = 0; - req->rq_replied = 1; - /* Got reply, no resend required */ - req->rq_resend = 0; - req->rq_reply_off = ev->offset; - req->rq_nob_received = ev->mlength; - /* LNetMDUnlink can't be called under the LNET_LOCK, - * so we must unlink in ptlrpc_unregister_reply - */ - DEBUG_REQ(D_INFO, req, - "reply in flags=%x mlen=%u offset=%d replen=%d", - lustre_msg_get_flags(req->rq_reqmsg), - ev->mlength, ev->offset, req->rq_replen); - } - - req->rq_import->imp_last_reply_time = ktime_get_real_seconds(); - -out_wake: - /* NB don't unlock till after wakeup; req can disappear under us - * since we don't have our own ref - */ - ptlrpc_client_wake_req(req); - spin_unlock(&req->rq_lock); -} - -/* - * Client's bulk has been written/read - */ -void client_bulk_callback(struct lnet_event *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; - struct ptlrpc_request *req; - - LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) && - ev->type == LNET_EVENT_PUT) || - (ptlrpc_is_bulk_get_source(desc->bd_type) && - ev->type == LNET_EVENT_GET) || - ev->type == LNET_EVENT_UNLINK); - LASSERT(ev->unlinked); - - if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE)) - ev->status = -EIO; - - if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2, - CFS_FAIL_ONCE)) - ev->status = -EIO; - - CDEBUG((ev->status == 0) ? D_NET : D_ERROR, - "event type %d, status %d, desc %p\n", - ev->type, ev->status, desc); - - spin_lock(&desc->bd_lock); - req = desc->bd_req; - LASSERT(desc->bd_md_count > 0); - desc->bd_md_count--; - - if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) { - desc->bd_nob_transferred += ev->mlength; - desc->bd_sender = ev->sender; - } else { - /* start reconnect and resend if network error hit */ - spin_lock(&req->rq_lock); - req->rq_net_err = 1; - spin_unlock(&req->rq_lock); - } - - if (ev->status != 0) - desc->bd_failure = 1; - - /* NB don't unlock till after wakeup; desc can disappear under us - * otherwise - */ - if (desc->bd_md_count == 0) - ptlrpc_client_wake_req(desc->bd_req); - - spin_unlock(&desc->bd_lock); -} - -/* - * We will have percpt request history list for ptlrpc service in upcoming - * patches because we don't want to be serialized by current per-service - * history operations. So we require history ID can (somehow) show arriving - * order w/o grabbing global lock, and user can sort them in userspace. - * - * This is how we generate history ID for ptlrpc_request: - * ---------------------------------------------------- - * | 32 bits | 16 bits | (16 - X)bits | X bits | - * ---------------------------------------------------- - * | seconds | usec / 16 | sequence | CPT id | - * ---------------------------------------------------- - * - * it might not be precise but should be good enough. - */ - -#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits) - -#define REQS_SEC_SHIFT 32 -#define REQS_USEC_SHIFT 16 -#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt) - -static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req) -{ - __u64 sec = req->rq_arrival_time.tv_sec; - __u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */ - __u64 new_seq; - - /* set sequence ID for request and add it to history list, - * it must be called with hold svcpt::scp_lock - */ - - new_seq = (sec << REQS_SEC_SHIFT) | - (usec << REQS_USEC_SHIFT) | - (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt); - - if (new_seq > svcpt->scp_hist_seq) { - /* This handles the initial case of scp_hist_seq == 0 or - * we just jumped into a new time window - */ - svcpt->scp_hist_seq = new_seq; - } else { - LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT); - /* NB: increase sequence number in current usec bucket, - * however, it's possible that we used up all bits for - * sequence and jumped into the next usec bucket (future time), - * then we hope there will be less RPCs per bucket at some - * point, and sequence will catch up again - */ - svcpt->scp_hist_seq += (1ULL << REQS_SEQ_SHIFT(svcpt)); - new_seq = svcpt->scp_hist_seq; - } - - req->rq_history_seq = new_seq; - - list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs); -} - -/* - * Server's incoming request callback - */ -void request_in_callback(struct lnet_event *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; - struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; - struct ptlrpc_service *service = svcpt->scp_service; - struct ptlrpc_request *req; - - LASSERT(ev->type == LNET_EVENT_PUT || - ev->type == LNET_EVENT_UNLINK); - LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer); - LASSERT((char *)ev->md.start + ev->offset + ev->mlength <= - rqbd->rqbd_buffer + service->srv_buf_size); - - CDEBUG((ev->status == 0) ? D_NET : D_ERROR, - "event type %d, status %d, service %s\n", - ev->type, ev->status, service->srv_name); - - if (ev->unlinked) { - /* If this is the last request message to fit in the - * request buffer we can use the request object embedded in - * rqbd. Note that if we failed to allocate a request, - * we'd have to re-post the rqbd, which we can't do in this - * context. - */ - req = &rqbd->rqbd_req; - memset(req, 0, sizeof(*req)); - } else { - LASSERT(ev->type == LNET_EVENT_PUT); - if (ev->status != 0) { - /* We moaned above already... */ - return; - } - req = ptlrpc_request_cache_alloc(GFP_ATOMIC); - if (!req) { - CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n", - service->srv_name, - libcfs_id2str(ev->initiator)); - return; - } - } - - ptlrpc_srv_req_init(req); - /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, - * flags are reset and scalars are zero. We only set the message - * size to non-zero if this was a successful receive. - */ - req->rq_xid = ev->match_bits; - req->rq_reqbuf = ev->md.start + ev->offset; - if (ev->type == LNET_EVENT_PUT && ev->status == 0) - req->rq_reqdata_len = ev->mlength; - ktime_get_real_ts64(&req->rq_arrival_time); - req->rq_peer = ev->initiator; - req->rq_self = ev->target.nid; - req->rq_rqbd = rqbd; - req->rq_phase = RQ_PHASE_NEW; - if (ev->type == LNET_EVENT_PUT) - CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n", - req, req->rq_xid, ev->mlength); - - CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer)); - - spin_lock(&svcpt->scp_lock); - - ptlrpc_req_add_history(svcpt, req); - - if (ev->unlinked) { - svcpt->scp_nrqbds_posted--; - CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", - svcpt->scp_nrqbds_posted); - - /* Normally, don't complain about 0 buffers posted; LNET won't - * drop incoming reqs since we set the portal lazy - */ - if (test_req_buffer_pressure && - ev->type != LNET_EVENT_UNLINK && - svcpt->scp_nrqbds_posted == 0) - CWARN("All %s request buffers busy\n", - service->srv_name); - - /* req takes over the network's ref on rqbd */ - } else { - /* req takes a ref on rqbd */ - rqbd->rqbd_refcount++; - } - - list_add_tail(&req->rq_list, &svcpt->scp_req_incoming); - svcpt->scp_nreqs_incoming++; - - /* NB everything can disappear under us once the request - * has been queued and we unlock, so do the wake now... - */ - wake_up(&svcpt->scp_waitq); - - spin_unlock(&svcpt->scp_lock); -} - -/* - * Server's outgoing reply callback - */ -void reply_out_callback(struct lnet_event *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - struct ptlrpc_reply_state *rs = cbid->cbid_arg; - struct ptlrpc_service_part *svcpt = rs->rs_svcpt; - - LASSERT(ev->type == LNET_EVENT_SEND || - ev->type == LNET_EVENT_ACK || - ev->type == LNET_EVENT_UNLINK); - - if (!rs->rs_difficult) { - /* 'Easy' replies have no further processing so I drop the - * net's ref on 'rs' - */ - LASSERT(ev->unlinked); - ptlrpc_rs_decref(rs); - return; - } - - LASSERT(rs->rs_on_net); - - if (ev->unlinked) { - /* Last network callback. The net's ref on 'rs' stays put - * until ptlrpc_handle_rs() is done with it - */ - spin_lock(&svcpt->scp_rep_lock); - spin_lock(&rs->rs_lock); - - rs->rs_on_net = 0; - if (!rs->rs_no_ack || - rs->rs_transno <= - rs->rs_export->exp_obd->obd_last_committed || - list_empty(&rs->rs_obd_list)) - ptlrpc_schedule_difficult_reply(rs); - - spin_unlock(&rs->rs_lock); - spin_unlock(&svcpt->scp_rep_lock); - } -} - -static void ptlrpc_master_callback(struct lnet_event *ev) -{ - struct ptlrpc_cb_id *cbid = ev->md.user_ptr; - void (*callback)(struct lnet_event *ev) = cbid->cbid_fn; - - /* Honestly, it's best to find out early. */ - LASSERT(cbid->cbid_arg != LP_POISON); - LASSERT(callback == request_out_callback || - callback == reply_in_callback || - callback == client_bulk_callback || - callback == request_in_callback || - callback == reply_out_callback); - - callback(ev); -} - -int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, - struct lnet_process_id *peer, lnet_nid_t *self) -{ - int best_dist = 0; - __u32 best_order = 0; - int count = 0; - int rc = -ENOENT; - int dist; - __u32 order; - lnet_nid_t dst_nid; - lnet_nid_t src_nid; - - peer->pid = LNET_PID_LUSTRE; - - /* Choose the matching UUID that's closest */ - while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { - dist = LNetDist(dst_nid, &src_nid, &order); - if (dist < 0) - continue; - - if (dist == 0) { /* local! use loopback LND */ - peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0); - rc = 0; - break; - } - - if (rc < 0 || - dist < best_dist || - (dist == best_dist && order < best_order)) { - best_dist = dist; - best_order = order; - - peer->nid = dst_nid; - *self = src_nid; - rc = 0; - } - } - - CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer)); - return rc; -} - -static void ptlrpc_ni_fini(void) -{ - int rc; - int retries; - - /* Wait for the event queue to become idle since there may still be - * messages in flight with pending events (i.e. the fire-and-forget - * messages == client requests and "non-difficult" server - * replies - */ - - for (retries = 0;; retries++) { - rc = LNetEQFree(ptlrpc_eq_h); - switch (rc) { - default: - LBUG(); - - case 0: - LNetNIFini(); - return; - - case -EBUSY: - if (retries != 0) - CWARN("Event queue still busy\n"); - - schedule_timeout_uninterruptible(2 * HZ); - break; - } - } - /* notreached */ -} - -static lnet_pid_t ptl_get_pid(void) -{ - lnet_pid_t pid; - - pid = LNET_PID_LUSTRE; - return pid; -} - -static int ptlrpc_ni_init(void) -{ - int rc; - lnet_pid_t pid; - - pid = ptl_get_pid(); - CDEBUG(D_NET, "My pid is: %x\n", pid); - - /* We're not passing any limits yet... */ - rc = LNetNIInit(pid); - if (rc < 0) { - CDEBUG(D_NET, "Can't init network interface: %d\n", rc); - return rc; - } - - /* CAVEAT EMPTOR: how we process portals events is _radically_ - * different depending on... - */ - /* kernel LNet calls our master callback when there are new event, - * because we are guaranteed to get every event via callback, - * so we just set EQ size to 0 to avoid overhead of serializing - * enqueue/dequeue operations in LNet. - */ - rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h); - if (rc == 0) - return 0; - - CERROR("Failed to allocate event queue: %d\n", rc); - LNetNIFini(); - - return rc; -} - -int ptlrpc_init_portals(void) -{ - int rc = ptlrpc_ni_init(); - - if (rc != 0) { - CERROR("network initialisation failed\n"); - return rc; - } - rc = ptlrpcd_addref(); - if (rc == 0) - return 0; - - CERROR("rpcd initialisation failed\n"); - ptlrpc_ni_fini(); - return rc; -} - -void ptlrpc_exit_portals(void) -{ - ptlrpcd_decref(); - ptlrpc_ni_fini(); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c deleted file mode 100644 index 1a0f35dfab97..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ /dev/null @@ -1,1677 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/import.c - * - * Author: Mike Shaver - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -struct ptlrpc_connect_async_args { - __u64 pcaa_peer_committed; - int pcaa_initial_connect; -}; - -/** - * Updates import \a imp current state to provided \a state value - * Helper function. Must be called under imp_lock. - */ -static void __import_set_state(struct obd_import *imp, - enum lustre_imp_state state) -{ - switch (state) { - case LUSTRE_IMP_CLOSED: - case LUSTRE_IMP_NEW: - case LUSTRE_IMP_DISCON: - case LUSTRE_IMP_CONNECTING: - break; - case LUSTRE_IMP_REPLAY_WAIT: - imp->imp_replay_state = LUSTRE_IMP_REPLAY_LOCKS; - break; - default: - imp->imp_replay_state = LUSTRE_IMP_REPLAY; - } - - imp->imp_state = state; - imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state; - imp->imp_state_hist[imp->imp_state_hist_idx].ish_time = - ktime_get_real_seconds(); - imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) % - IMP_STATE_HIST_LEN; -} - -/* A CLOSED import should remain so. */ -#define IMPORT_SET_STATE_NOLOCK(imp, state) \ -do { \ - if (imp->imp_state != LUSTRE_IMP_CLOSED) { \ - CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \ - imp, obd2cli_tgt(imp->imp_obd), \ - ptlrpc_import_state_name(imp->imp_state), \ - ptlrpc_import_state_name(state)); \ - __import_set_state(imp, state); \ - } \ -} while (0) - -#define IMPORT_SET_STATE(imp, state) \ -do { \ - spin_lock(&imp->imp_lock); \ - IMPORT_SET_STATE_NOLOCK(imp, state); \ - spin_unlock(&imp->imp_lock); \ -} while (0) - -static int ptlrpc_connect_interpret(const struct lu_env *env, - struct ptlrpc_request *request, - void *data, int rc); -int ptlrpc_import_recovery_state_machine(struct obd_import *imp); - -/* Only this function is allowed to change the import state when it is - * CLOSED. I would rather refcount the import and free it after - * disconnection like we do with exports. To do that, the client_obd - * will need to save the peer info somewhere other than in the import, - * though. - */ -int ptlrpc_init_import(struct obd_import *imp) -{ - spin_lock(&imp->imp_lock); - - imp->imp_generation++; - imp->imp_state = LUSTRE_IMP_NEW; - - spin_unlock(&imp->imp_lock); - - return 0; -} -EXPORT_SYMBOL(ptlrpc_init_import); - -#define UUID_STR "_UUID" -static void deuuidify(char *uuid, const char *prefix, char **uuid_start, - int *uuid_len) -{ - *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix)) - ? uuid : uuid + strlen(prefix); - - *uuid_len = strlen(*uuid_start); - - if (*uuid_len < strlen(UUID_STR)) - return; - - if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR), - UUID_STR, strlen(UUID_STR))) - *uuid_len -= strlen(UUID_STR); -} - -/** - * Returns true if import was FULL, false if import was already not - * connected. - * @imp - import to be disconnected - * @conn_cnt - connection count (epoch) of the request that timed out - * and caused the disconnection. In some cases, multiple - * inflight requests can fail to a single target (e.g. OST - * bulk requests) and if one has already caused a reconnection - * (increasing the import->conn_cnt) the older failure should - * not also cause a reconnection. If zero it forces a reconnect. - */ -int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) -{ - int rc = 0; - - spin_lock(&imp->imp_lock); - - if (imp->imp_state == LUSTRE_IMP_FULL && - (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) { - char *target_start; - int target_len; - - deuuidify(obd2cli_tgt(imp->imp_obd), NULL, - &target_start, &target_len); - - if (imp->imp_replayable) { - LCONSOLE_WARN("%s: Connection to %.*s (at %s) was lost; in progress operations using this service will wait for recovery to complete\n", - imp->imp_obd->obd_name, target_len, target_start, - libcfs_nid2str(imp->imp_connection->c_peer.nid)); - } else { - LCONSOLE_ERROR_MSG(0x166, "%s: Connection to %.*s (at %s) was lost; in progress operations using this service will fail\n", - imp->imp_obd->obd_name, - target_len, target_start, - libcfs_nid2str(imp->imp_connection->c_peer.nid)); - } - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); - spin_unlock(&imp->imp_lock); - - if (obd_dump_on_timeout) - libcfs_debug_dumplog(); - - obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON); - rc = 1; - } else { - spin_unlock(&imp->imp_lock); - CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n", - imp->imp_client->cli_name, imp, - (imp->imp_state == LUSTRE_IMP_FULL && - imp->imp_conn_cnt > conn_cnt) ? - "reconnected" : "not connected", imp->imp_conn_cnt, - conn_cnt, ptlrpc_import_state_name(imp->imp_state)); - } - - return rc; -} - -/* - * This acts as a barrier; all existing requests are rejected, and - * no new requests will be accepted until the import is valid again. - */ -void ptlrpc_deactivate_import(struct obd_import *imp) -{ - CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd)); - - spin_lock(&imp->imp_lock); - imp->imp_invalid = 1; - imp->imp_generation++; - spin_unlock(&imp->imp_lock); - - ptlrpc_abort_inflight(imp); - obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE); -} -EXPORT_SYMBOL(ptlrpc_deactivate_import); - -static unsigned int -ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now) -{ - long dl; - - if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) || - (req->rq_phase == RQ_PHASE_BULK) || - (req->rq_phase == RQ_PHASE_NEW))) - return 0; - - if (req->rq_timedout) - return 0; - - if (req->rq_phase == RQ_PHASE_NEW) - dl = req->rq_sent; - else - dl = req->rq_deadline; - - if (dl <= now) - return 0; - - return dl - now; -} - -static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp) -{ - time64_t now = ktime_get_real_seconds(); - struct ptlrpc_request *req, *n; - unsigned int timeout = 0; - - spin_lock(&imp->imp_lock); - list_for_each_entry_safe(req, n, &imp->imp_sending_list, rq_list) - timeout = max(ptlrpc_inflight_deadline(req, now), timeout); - - spin_unlock(&imp->imp_lock); - return timeout; -} - -/** - * This function will invalidate the import, if necessary, then block - * for all the RPC completions, and finally notify the obd to - * invalidate its state (ie cancel locks, clear pending requests, - * etc). - */ -void ptlrpc_invalidate_import(struct obd_import *imp) -{ - struct ptlrpc_request *req, *n; - unsigned int timeout; - int rc; - - atomic_inc(&imp->imp_inval_count); - - if (!imp->imp_invalid || imp->imp_obd->obd_no_recov) - ptlrpc_deactivate_import(imp); - - CFS_FAIL_TIMEOUT(OBD_FAIL_MGS_CONNECT_NET, 3 * cfs_fail_val / 2); - LASSERT(imp->imp_invalid); - - /* Wait forever until inflight == 0. We really can't do it another - * way because in some cases we need to wait for very long reply - * unlink. We can't do anything before that because there is really - * no guarantee that some rdma transfer is not in progress right now. - */ - do { - /* Calculate max timeout for waiting on rpcs to error - * out. Use obd_timeout if calculated value is smaller - * than it. - */ - if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { - timeout = ptlrpc_inflight_timeout(imp); - timeout += timeout / 3; - - if (timeout == 0) - timeout = obd_timeout; - } else { - /* decrease the interval to increase race condition */ - timeout = 1; - } - - CDEBUG(D_RPCTRACE, - "Sleeping %d sec for inflight to error out\n", - timeout); - - /* Wait for all requests to error out and call completion - * callbacks. Cap it at obd_timeout -- these should all - * have been locally cancelled by ptlrpc_abort_inflight. - */ - rc = wait_event_idle_timeout(imp->imp_recovery_waitq, - atomic_read(&imp->imp_inflight) == 0, - obd_timeout * HZ); - - if (rc == 0) { - const char *cli_tgt = obd2cli_tgt(imp->imp_obd); - - CERROR("%s: timeout waiting for callback (%d != 0)\n", - cli_tgt, - atomic_read(&imp->imp_inflight)); - - spin_lock(&imp->imp_lock); - if (atomic_read(&imp->imp_inflight) == 0) { - int count = atomic_read(&imp->imp_unregistering); - - /* We know that "unregistering" rpcs only can - * survive in sending or delaying lists (they - * maybe waiting for long reply unlink in - * sluggish nets). Let's check this. If there - * is no inflight and unregistering != 0, this - * is bug. - */ - LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n", - count); - - /* Let's save one loop as soon as inflight have - * dropped to zero. No new inflights possible at - * this point. - */ - rc = 0; - } else { - list_for_each_entry_safe(req, n, - &imp->imp_sending_list, rq_list) { - DEBUG_REQ(D_ERROR, req, - "still on sending list"); - } - list_for_each_entry_safe(req, n, - &imp->imp_delayed_list, rq_list) { - DEBUG_REQ(D_ERROR, req, - "still on delayed list"); - } - - CERROR("%s: Unregistering RPCs found (%d). Network is sluggish? Waiting them to error out.\n", - cli_tgt, - atomic_read(&imp-> - imp_unregistering)); - } - spin_unlock(&imp->imp_lock); - } - } while (rc == 0); - - /* - * Let's additionally check that no new rpcs added to import in - * "invalidate" state. - */ - LASSERT(atomic_read(&imp->imp_inflight) == 0); - obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE); - sptlrpc_import_flush_all_ctx(imp); - - atomic_dec(&imp->imp_inval_count); - wake_up_all(&imp->imp_recovery_waitq); -} -EXPORT_SYMBOL(ptlrpc_invalidate_import); - -/* unset imp_invalid */ -void ptlrpc_activate_import(struct obd_import *imp) -{ - struct obd_device *obd = imp->imp_obd; - - spin_lock(&imp->imp_lock); - if (imp->imp_deactive != 0) { - spin_unlock(&imp->imp_lock); - return; - } - - imp->imp_invalid = 0; - spin_unlock(&imp->imp_lock); - obd_import_event(obd, imp, IMP_EVENT_ACTIVE); -} -EXPORT_SYMBOL(ptlrpc_activate_import); - -void ptlrpc_pinger_force(struct obd_import *imp) -{ - CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd), - ptlrpc_import_state_name(imp->imp_state)); - - spin_lock(&imp->imp_lock); - imp->imp_force_verify = 1; - spin_unlock(&imp->imp_lock); - - if (imp->imp_state != LUSTRE_IMP_CONNECTING) - ptlrpc_pinger_wake_up(); -} -EXPORT_SYMBOL(ptlrpc_pinger_force); - -void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt) -{ - LASSERT(!imp->imp_dlm_fake); - - if (ptlrpc_set_import_discon(imp, conn_cnt)) { - if (!imp->imp_replayable) { - CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_obd->obd_name); - ptlrpc_deactivate_import(imp); - } - - ptlrpc_pinger_force(imp); - } -} - -int ptlrpc_reconnect_import(struct obd_import *imp) -{ - int rc; - - ptlrpc_pinger_force(imp); - - CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n", - obd2cli_tgt(imp->imp_obd), obd_timeout); - - rc = wait_event_idle_timeout(imp->imp_recovery_waitq, - !ptlrpc_import_in_recovery(imp), - obd_timeout * HZ); - CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd), - ptlrpc_import_state_name(imp->imp_state)); - return rc == 0 ? -ETIMEDOUT : 0; -} -EXPORT_SYMBOL(ptlrpc_reconnect_import); - -/** - * Connection on import \a imp is changed to another one (if more than one is - * present). We typically chose connection that we have not tried to connect to - * the longest - */ -static int import_select_connection(struct obd_import *imp) -{ - struct obd_import_conn *imp_conn = NULL, *conn; - struct obd_export *dlmexp; - char *target_start; - int target_len, tried_all = 1; - - spin_lock(&imp->imp_lock); - - if (list_empty(&imp->imp_conn_list)) { - CERROR("%s: no connections available\n", - imp->imp_obd->obd_name); - spin_unlock(&imp->imp_lock); - return -EINVAL; - } - - list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { - CDEBUG(D_HA, "%s: connect to NID %s last attempt %llu\n", - imp->imp_obd->obd_name, - libcfs_nid2str(conn->oic_conn->c_peer.nid), - conn->oic_last_attempt); - - /* If we have not tried this connection since - * the last successful attempt, go with this one - */ - if ((conn->oic_last_attempt == 0) || - time_before_eq64(conn->oic_last_attempt, - imp->imp_last_success_conn)) { - imp_conn = conn; - tried_all = 0; - break; - } - - /* If all of the connections have already been tried - * since the last successful connection; just choose the - * least recently used - */ - if (!imp_conn) - imp_conn = conn; - else if (time_before64(conn->oic_last_attempt, - imp_conn->oic_last_attempt)) - imp_conn = conn; - } - - /* if not found, simply choose the current one */ - if (!imp_conn || imp->imp_force_reconnect) { - LASSERT(imp->imp_conn_current); - imp_conn = imp->imp_conn_current; - tried_all = 0; - } - LASSERT(imp_conn->oic_conn); - - /* If we've tried everything, and we're back to the beginning of the - * list, increase our timeout and try again. It will be reset when - * we do finally connect. (FIXME: really we should wait for all network - * state associated with the last connection attempt to drain before - * trying to reconnect on it.) - */ - if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) { - struct adaptive_timeout *at = &imp->imp_at.iat_net_latency; - - if (at_get(at) < CONNECTION_SWITCH_MAX) { - at_measured(at, at_get(at) + CONNECTION_SWITCH_INC); - if (at_get(at) > CONNECTION_SWITCH_MAX) - at_reset(at, CONNECTION_SWITCH_MAX); - } - LASSERT(imp_conn->oic_last_attempt); - CDEBUG(D_HA, "%s: tried all connections, increasing latency to %ds\n", - imp->imp_obd->obd_name, at_get(at)); - } - - imp_conn->oic_last_attempt = get_jiffies_64(); - - /* switch connection, don't mind if it's same as the current one */ - ptlrpc_connection_put(imp->imp_connection); - imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); - - dlmexp = class_conn2export(&imp->imp_dlm_handle); - ptlrpc_connection_put(dlmexp->exp_connection); - dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); - class_export_put(dlmexp); - - if (imp->imp_conn_current != imp_conn) { - if (imp->imp_conn_current) { - deuuidify(obd2cli_tgt(imp->imp_obd), NULL, - &target_start, &target_len); - - CDEBUG(D_HA, "%s: Connection changing to %.*s (at %s)\n", - imp->imp_obd->obd_name, - target_len, target_start, - libcfs_nid2str(imp_conn->oic_conn->c_peer.nid)); - } - - imp->imp_conn_current = imp_conn; - } - - CDEBUG(D_HA, "%s: import %p using connection %s/%s\n", - imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid, - libcfs_nid2str(imp_conn->oic_conn->c_peer.nid)); - - spin_unlock(&imp->imp_lock); - - return 0; -} - -/* - * must be called under imp_lock - */ -static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) -{ - struct ptlrpc_request *req; - - /* The requests in committed_list always have smaller transnos than - * the requests in replay_list - */ - if (!list_empty(&imp->imp_committed_list)) { - req = list_first_entry(&imp->imp_committed_list, - struct ptlrpc_request, rq_replay_list); - *transno = req->rq_transno; - if (req->rq_transno == 0) { - DEBUG_REQ(D_ERROR, req, - "zero transno in committed_list"); - LBUG(); - } - return 1; - } - if (!list_empty(&imp->imp_replay_list)) { - req = list_first_entry(&imp->imp_replay_list, - struct ptlrpc_request, rq_replay_list); - *transno = req->rq_transno; - if (req->rq_transno == 0) { - DEBUG_REQ(D_ERROR, req, "zero transno in replay_list"); - LBUG(); - } - return 1; - } - return 0; -} - -/** - * Attempt to (re)connect import \a imp. This includes all preparations, - * initializing CONNECT RPC request and passing it to ptlrpcd for - * actual sending. - * Returns 0 on success or error code. - */ -int ptlrpc_connect_import(struct obd_import *imp) -{ - struct obd_device *obd = imp->imp_obd; - int initial_connect = 0; - int set_transno = 0; - __u64 committed_before_reconnect = 0; - struct ptlrpc_request *request; - char *bufs[] = { NULL, - obd2cli_tgt(imp->imp_obd), - obd->obd_uuid.uuid, - (char *)&imp->imp_dlm_handle, - (char *)&imp->imp_connect_data }; - struct ptlrpc_connect_async_args *aa; - int rc; - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_CLOSED) { - spin_unlock(&imp->imp_lock); - CERROR("can't connect to a closed import\n"); - return -EINVAL; - } else if (imp->imp_state == LUSTRE_IMP_FULL) { - spin_unlock(&imp->imp_lock); - CERROR("already connected\n"); - return 0; - } else if (imp->imp_state == LUSTRE_IMP_CONNECTING || - imp->imp_connected) { - spin_unlock(&imp->imp_lock); - CERROR("already connecting\n"); - return -EALREADY; - } - - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING); - - imp->imp_conn_cnt++; - imp->imp_resend_replay = 0; - - if (!lustre_handle_is_used(&imp->imp_remote_handle)) - initial_connect = 1; - else - committed_before_reconnect = imp->imp_peer_committed_transno; - - set_transno = ptlrpc_first_transno(imp, - &imp->imp_connect_data.ocd_transno); - spin_unlock(&imp->imp_lock); - - rc = import_select_connection(imp); - if (rc) - goto out; - - rc = sptlrpc_import_sec_adapt(imp, NULL, NULL); - if (rc) - goto out; - - /* Reset connect flags to the originally requested flags, in case - * the server is updated on-the-fly we will get the new features. - */ - imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; - /* Reset ocd_version each time so the server knows the exact versions */ - imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE; - imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; - imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; - - rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd, - &obd->obd_uuid, &imp->imp_connect_data, NULL); - if (rc) - goto out; - - request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT); - if (!request) { - rc = -ENOMEM; - goto out; - } - - rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION, - imp->imp_connect_op, bufs, NULL); - if (rc) { - ptlrpc_request_free(request); - goto out; - } - - /* Report the rpc service time to the server so that it knows how long - * to wait for clients to join recovery - */ - lustre_msg_set_service_time(request->rq_reqmsg, - at_timeout2est(request->rq_timeout)); - - /* The amount of time we give the server to process the connect req. - * import_select_connection will increase the net latency on - * repeated reconnect attempts to cover slow networks. - * We override/ignore the server rpc completion estimate here, - * which may be large if this is a reconnect attempt - */ - request->rq_timeout = INITIAL_CONNECT_TIMEOUT; - lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); - - request->rq_no_resend = 1; - request->rq_no_delay = 1; - request->rq_send_state = LUSTRE_IMP_CONNECTING; - /* Allow a slightly larger reply for future growth compatibility */ - req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER, - sizeof(struct obd_connect_data) + - 16 * sizeof(__u64)); - ptlrpc_request_set_replen(request); - request->rq_interpret_reply = ptlrpc_connect_interpret; - - BUILD_BUG_ON(sizeof(*aa) > sizeof(request->rq_async_args)); - aa = ptlrpc_req_async_args(request); - memset(aa, 0, sizeof(*aa)); - - aa->pcaa_peer_committed = committed_before_reconnect; - aa->pcaa_initial_connect = initial_connect; - - if (aa->pcaa_initial_connect) { - spin_lock(&imp->imp_lock); - imp->imp_replayable = 1; - spin_unlock(&imp->imp_lock); - lustre_msg_add_op_flags(request->rq_reqmsg, - MSG_CONNECT_INITIAL); - } - - if (set_transno) - lustre_msg_add_op_flags(request->rq_reqmsg, - MSG_CONNECT_TRANSNO); - - DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)", - request->rq_timeout); - ptlrpcd_add_req(request); - rc = 0; -out: - if (rc != 0) - IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); - - return rc; -} -EXPORT_SYMBOL(ptlrpc_connect_import); - -static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp) -{ - int force_verify; - - spin_lock(&imp->imp_lock); - force_verify = imp->imp_force_verify != 0; - spin_unlock(&imp->imp_lock); - - if (force_verify) - ptlrpc_pinger_wake_up(); -} - -static int ptlrpc_busy_reconnect(int rc) -{ - return (rc == -EBUSY) || (rc == -EAGAIN); -} - -static int ptlrpc_connect_set_flags(struct obd_import *imp, - struct obd_connect_data *ocd, - u64 old_connect_flags, - struct obd_export *exp, int init_connect) -{ - struct client_obd *cli = &imp->imp_obd->u.cli; - static bool warned; - - if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) && - !(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) { - LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %#llx, replied %#llx\n", - imp->imp_obd->obd_name, - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_connect_flags_orig, - ocd->ocd_connect_flags); - return -EPROTO; - } - - spin_lock(&imp->imp_lock); - list_del(&imp->imp_conn_current->oic_item); - list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list); - imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt; - - spin_unlock(&imp->imp_lock); - - if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && - (ocd->ocd_version > LUSTRE_VERSION_CODE + - LUSTRE_VERSION_OFFSET_WARN || - ocd->ocd_version < LUSTRE_VERSION_CODE - - LUSTRE_VERSION_OFFSET_WARN)) { - /* - * Sigh, some compilers do not like #ifdef in the middle - * of macro arguments - */ - const char *older = "older than client. Consider upgrading server"; - const char *newer = "newer than client. Consider recompiling application"; - - LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n", - obd2cli_tgt(imp->imp_obd), - OBD_OCD_VERSION_MAJOR(ocd->ocd_version), - OBD_OCD_VERSION_MINOR(ocd->ocd_version), - OBD_OCD_VERSION_PATCH(ocd->ocd_version), - OBD_OCD_VERSION_FIX(ocd->ocd_version), - ocd->ocd_version > LUSTRE_VERSION_CODE ? - newer : older, LUSTRE_VERSION_STRING); - warned = true; - } - -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0) - /* - * Check if server has LU-1252 fix applied to not always swab - * the IR MNE entries. Do this only once per connection. This - * fixup is version-limited, because we don't want to carry the - * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we - * need interop with unpatched 2.2 servers. For newer servers, - * the client will do MNE swabbing only as needed. LU-1644 - */ - if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && - !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) && - OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 && - OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 && - OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 && - !strcmp(imp->imp_obd->obd_type->typ_name, - LUSTRE_MGC_NAME))) - imp->imp_need_mne_swab = 1; - else /* clear if server was upgraded since last connect */ - imp->imp_need_mne_swab = 0; -#endif - - if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) { - /* - * We sent to the server ocd_cksum_types with bits set - * for algorithms we understand. The server masked off - * the checksum types it doesn't support - */ - if (!(ocd->ocd_cksum_types & cksum_types_supported_client())) { - LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n", - obd2cli_tgt(imp->imp_obd), - ocd->ocd_cksum_types, - cksum_types_supported_client()); - cli->cl_checksum = 0; - cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; - } else { - cli->cl_supp_cksum_types = ocd->ocd_cksum_types; - } - } else { - /* - * The server does not support OBD_CONNECT_CKSUM. - * Enforce ADLER for backward compatibility - */ - cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; - } - cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types); - - if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) - cli->cl_max_pages_per_rpc = - min(ocd->ocd_brw_size >> PAGE_SHIFT, - cli->cl_max_pages_per_rpc); - else if (imp->imp_connect_op == MDS_CONNECT || - imp->imp_connect_op == MGS_CONNECT) - cli->cl_max_pages_per_rpc = 1; - - LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) && - (cli->cl_max_pages_per_rpc > 0)); - - client_adjust_max_dirty(cli); - - /* - * Update client max modify RPCs in flight with value returned - * by the server - */ - if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) - cli->cl_max_mod_rpcs_in_flight = min( - cli->cl_max_mod_rpcs_in_flight, - ocd->ocd_maxmodrpcs); - else - cli->cl_max_mod_rpcs_in_flight = 1; - - /* - * Reset ns_connect_flags only for initial connect. It might be - * changed in while using FS and if we reset it in reconnect - * this leads to losing user settings done before such as - * disable lru_resize, etc. - */ - if (old_connect_flags != exp_connect_flags(exp) || init_connect) { - CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n", - imp->imp_obd->obd_name, ocd->ocd_connect_flags); - imp->imp_obd->obd_namespace->ns_connect_flags = - ocd->ocd_connect_flags; - imp->imp_obd->obd_namespace->ns_orig_connect_flags = - ocd->ocd_connect_flags; - } - - if (ocd->ocd_connect_flags & OBD_CONNECT_AT) - /* - * We need a per-message support flag, because - * a. we don't know if the incoming connect reply - * supports AT or not (in reply_in_callback) - * until we unpack it. - * b. failovered server means export and flags are gone - * (in ptlrpc_send_reply). - * Can only be set when we know AT is supported at - * both ends - */ - imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT; - else - imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; - - imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18; - - return 0; -} - -/** - * Add all replay requests back to unreplied list before start replay, - * so that we can make sure the known replied XID is always increased - * only even if when replaying requests. - */ -static void ptlrpc_prepare_replay(struct obd_import *imp) -{ - struct ptlrpc_request *req; - - if (imp->imp_state != LUSTRE_IMP_REPLAY || - imp->imp_resend_replay) - return; - - /* - * If the server was restart during repaly, the requests may - * have been added to the unreplied list in former replay. - */ - spin_lock(&imp->imp_lock); - - list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) { - if (list_empty(&req->rq_unreplied_list)) - ptlrpc_add_unreplied(req); - } - - list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) { - if (list_empty(&req->rq_unreplied_list)) - ptlrpc_add_unreplied(req); - } - - imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp); - spin_unlock(&imp->imp_lock); -} - -/** - * interpret_reply callback for connect RPCs. - * Looks into returned status of connect operation and decides - * what to do with the import - i.e enter recovery, promote it to - * full state for normal operations of disconnect it due to an error. - */ -static int ptlrpc_connect_interpret(const struct lu_env *env, - struct ptlrpc_request *request, - void *data, int rc) -{ - struct ptlrpc_connect_async_args *aa = data; - struct obd_import *imp = request->rq_import; - struct lustre_handle old_hdl; - __u64 old_connect_flags; - int msg_flags; - struct obd_connect_data *ocd; - struct obd_export *exp; - int ret; - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_CLOSED) { - imp->imp_connect_tried = 1; - spin_unlock(&imp->imp_lock); - return 0; - } - - if (rc) { - /* if this reconnect to busy export - not need select new target - * for connecting - */ - imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); - spin_unlock(&imp->imp_lock); - ptlrpc_maybe_ping_import_soon(imp); - goto out; - } - - /* - * LU-7558: indicate that we are interpretting connect reply, - * pltrpc_connect_import() will not try to reconnect until - * interpret will finish. - */ - imp->imp_connected = 1; - spin_unlock(&imp->imp_lock); - - LASSERT(imp->imp_conn_current); - - msg_flags = lustre_msg_get_op_flags(request->rq_repmsg); - - ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA, - RCL_SERVER); - /* server replied obd_connect_data is always bigger */ - ocd = req_capsule_server_sized_get(&request->rq_pill, - &RMF_CONNECT_DATA, ret); - - if (!ocd) { - CERROR("%s: no connect data from server\n", - imp->imp_obd->obd_name); - rc = -EPROTO; - goto out; - } - - spin_lock(&imp->imp_lock); - - /* All imports are pingable */ - imp->imp_pingable = 1; - imp->imp_force_reconnect = 0; - imp->imp_force_verify = 0; - - imp->imp_connect_data = *ocd; - - CDEBUG(D_HA, "%s: connect to target with instance %u\n", - imp->imp_obd->obd_name, ocd->ocd_instance); - exp = class_conn2export(&imp->imp_dlm_handle); - - spin_unlock(&imp->imp_lock); - - if (!exp) { - /* This could happen if export is cleaned during the - * connect attempt - */ - CERROR("%s: missing export after connect\n", - imp->imp_obd->obd_name); - rc = -ENODEV; - goto out; - } - - /* check that server granted subset of flags we asked for. */ - if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) != - ocd->ocd_connect_flags) { - CERROR("%s: Server didn't grant the asked for subset of flags: asked=%#llx granted=%#llx\n", - imp->imp_obd->obd_name, imp->imp_connect_flags_orig, - ocd->ocd_connect_flags); - rc = -EPROTO; - goto out; - } - - old_connect_flags = exp_connect_flags(exp); - exp->exp_connect_data = *ocd; - imp->imp_obd->obd_self_export->exp_connect_data = *ocd; - - /* - * The net statistics after (re-)connect is not valid anymore, - * because may reflect other routing, etc. - */ - at_init(&imp->imp_at.iat_net_latency, 0, 0); - ptlrpc_at_adj_net_latency(request, - lustre_msg_get_service_time(request->rq_repmsg)); - - /* Import flags should be updated before waking import at FULL state */ - rc = ptlrpc_connect_set_flags(imp, ocd, old_connect_flags, exp, - aa->pcaa_initial_connect); - class_export_put(exp); - if (rc) - goto out; - - obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD); - - if (aa->pcaa_initial_connect) { - spin_lock(&imp->imp_lock); - if (msg_flags & MSG_CONNECT_REPLAYABLE) { - imp->imp_replayable = 1; - spin_unlock(&imp->imp_lock); - CDEBUG(D_HA, "connected to replayable target: %s\n", - obd2cli_tgt(imp->imp_obd)); - } else { - imp->imp_replayable = 0; - spin_unlock(&imp->imp_lock); - } - - /* if applies, adjust the imp->imp_msg_magic here - * according to reply flags - */ - - imp->imp_remote_handle = - *lustre_msg_get_handle(request->rq_repmsg); - - /* Initial connects are allowed for clients with non-random - * uuids when servers are in recovery. Simply signal the - * servers replay is complete and wait in REPLAY_WAIT. - */ - if (msg_flags & MSG_CONNECT_RECOVERING) { - CDEBUG(D_HA, "connect to %s during recovery\n", - obd2cli_tgt(imp->imp_obd)); - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS); - } else { - IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL); - ptlrpc_activate_import(imp); - } - - rc = 0; - goto finish; - } - - /* Determine what recovery state to move the import to. */ - if (msg_flags & MSG_CONNECT_RECONNECT) { - memset(&old_hdl, 0, sizeof(old_hdl)); - if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg), - sizeof(old_hdl))) { - LCONSOLE_WARN("Reconnect to %s (at @%s) failed due bad handle %#llx\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_dlm_handle.cookie); - rc = -ENOTCONN; - goto out; - } - - if (memcmp(&imp->imp_remote_handle, - lustre_msg_get_handle(request->rq_repmsg), - sizeof(imp->imp_remote_handle))) { - int level = msg_flags & MSG_CONNECT_RECOVERING ? - D_HA : D_WARNING; - - /* Bug 16611/14775: if server handle have changed, - * that means some sort of disconnection happened. - * If the server is not in recovery, that also means it - * already erased all of our state because of previous - * eviction. If it is in recovery - we are safe to - * participate since we can reestablish all of our state - * with server again - */ - if ((msg_flags & MSG_CONNECT_RECOVERING)) { - CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_remote_handle.cookie, - lustre_msg_get_handle( - request->rq_repmsg)->cookie); - } else { - LCONSOLE_WARN("Evicted from %s (at %s) after server handle changed from %#llx to %#llx\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection-> \ - c_remote_uuid.uuid, - imp->imp_remote_handle.cookie, - lustre_msg_get_handle( - request->rq_repmsg)->cookie); - } - - imp->imp_remote_handle = - *lustre_msg_get_handle(request->rq_repmsg); - - if (!(msg_flags & MSG_CONNECT_RECOVERING)) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED); - rc = 0; - goto finish; - } - - } else { - CDEBUG(D_HA, "reconnected to %s@%s after partition\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - } - - if (imp->imp_invalid) { - CDEBUG(D_HA, "%s: reconnected but import is invalid; marking evicted\n", - imp->imp_obd->obd_name); - IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED); - } else if (msg_flags & MSG_CONNECT_RECOVERING) { - CDEBUG(D_HA, "%s: reconnected to %s during replay\n", - imp->imp_obd->obd_name, - obd2cli_tgt(imp->imp_obd)); - - spin_lock(&imp->imp_lock); - imp->imp_resend_replay = 1; - spin_unlock(&imp->imp_lock); - - IMPORT_SET_STATE(imp, imp->imp_replay_state); - } else { - IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); - } - } else if ((msg_flags & MSG_CONNECT_RECOVERING) && !imp->imp_invalid) { - LASSERT(imp->imp_replayable); - imp->imp_remote_handle = - *lustre_msg_get_handle(request->rq_repmsg); - imp->imp_last_replay_transno = 0; - imp->imp_replay_cursor = &imp->imp_committed_list; - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY); - } else { - DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)", - imp->imp_obd->obd_name, msg_flags); - imp->imp_remote_handle = - *lustre_msg_get_handle(request->rq_repmsg); - IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED); - } - - /* Sanity checks for a reconnected import. */ - if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) - CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n"); - - if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 && - lustre_msg_get_last_committed(request->rq_repmsg) < - aa->pcaa_peer_committed) - CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)! See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n", - obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed, - lustre_msg_get_last_committed(request->rq_repmsg)); - -finish: - ptlrpc_prepare_replay(imp); - rc = ptlrpc_import_recovery_state_machine(imp); - if (rc == -ENOTCONN) { - CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - ptlrpc_connect_import(imp); - spin_lock(&imp->imp_lock); - imp->imp_connected = 0; - imp->imp_connect_tried = 1; - spin_unlock(&imp->imp_lock); - return 0; - } - -out: - spin_lock(&imp->imp_lock); - imp->imp_connected = 0; - imp->imp_connect_tried = 1; - spin_unlock(&imp->imp_lock); - - if (rc != 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); - if (rc == -EACCES) { - /* - * Give up trying to reconnect - * EACCES means client has no permission for connection - */ - imp->imp_obd->obd_no_recov = 1; - ptlrpc_deactivate_import(imp); - } - - if (rc == -EPROTO) { - struct obd_connect_data *ocd; - - /* reply message might not be ready */ - if (!request->rq_repmsg) - return -EPROTO; - - ocd = req_capsule_server_get(&request->rq_pill, - &RMF_CONNECT_DATA); - if (ocd && - (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && - (ocd->ocd_version != LUSTRE_VERSION_CODE)) { - /* - * Actually servers are only supposed to refuse - * connection from liblustre clients, so we - * should never see this from VFS context - */ - LCONSOLE_ERROR_MSG(0x16a, "Server %s version (%d.%d.%d.%d) refused connection from this client with an incompatible version (%s). Client must be recompiled\n", - obd2cli_tgt(imp->imp_obd), - OBD_OCD_VERSION_MAJOR(ocd->ocd_version), - OBD_OCD_VERSION_MINOR(ocd->ocd_version), - OBD_OCD_VERSION_PATCH(ocd->ocd_version), - OBD_OCD_VERSION_FIX(ocd->ocd_version), - LUSTRE_VERSION_STRING); - ptlrpc_deactivate_import(imp); - IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED); - } - return -EPROTO; - } - - ptlrpc_maybe_ping_import_soon(imp); - - CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n", - obd2cli_tgt(imp->imp_obd), - (char *)imp->imp_connection->c_remote_uuid.uuid, rc); - } - - wake_up_all(&imp->imp_recovery_waitq); - return rc; -} - -/** - * interpret callback for "completed replay" RPCs. - * \see signal_completed_replay - */ -static int completed_replay_interpret(const struct lu_env *env, - struct ptlrpc_request *req, - void *data, int rc) -{ - atomic_dec(&req->rq_import->imp_replay_inflight); - if (req->rq_status == 0 && - !req->rq_import->imp_vbr_failed) { - ptlrpc_import_recovery_state_machine(req->rq_import); - } else { - if (req->rq_import->imp_vbr_failed) { - CDEBUG(D_WARNING, - "%s: version recovery fails, reconnecting\n", - req->rq_import->imp_obd->obd_name); - } else { - CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, reconnecting\n", - req->rq_import->imp_obd->obd_name, - req->rq_status); - } - ptlrpc_connect_import(req->rq_import); - } - - return 0; -} - -/** - * Let server know that we have no requests to replay anymore. - * Achieved by just sending a PING request - */ -static int signal_completed_replay(struct obd_import *imp) -{ - struct ptlrpc_request *req; - - if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY))) - return 0; - - LASSERT(atomic_read(&imp->imp_replay_inflight) == 0); - atomic_inc(&imp->imp_replay_inflight); - - req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, - OBD_PING); - if (!req) { - atomic_dec(&imp->imp_replay_inflight); - return -ENOMEM; - } - - ptlrpc_request_set_replen(req); - req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT; - lustre_msg_add_flags(req->rq_reqmsg, - MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE); - if (AT_OFF) - req->rq_timeout *= 3; - req->rq_interpret_reply = completed_replay_interpret; - - ptlrpcd_add_req(req); - return 0; -} - -/** - * In kernel code all import invalidation happens in its own - * separate thread, so that whatever application happened to encounter - * a problem could still be killed or otherwise continue - */ -static int ptlrpc_invalidate_import_thread(void *data) -{ - struct obd_import *imp = data; - - unshare_fs_struct(); - - CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n", - imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - - ptlrpc_invalidate_import(imp); - - if (obd_dump_on_eviction) { - CERROR("dump the log upon eviction\n"); - libcfs_debug_dumplog(); - } - - IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); - ptlrpc_import_recovery_state_machine(imp); - - class_import_put(imp); - return 0; -} - -/** - * This is the state machine for client-side recovery on import. - * - * Typically we have two possibly paths. If we came to server and it is not - * in recovery, we just enter IMP_EVICTED state, invalidate our import - * state and reconnect from scratch. - * If we came to server that is in recovery, we enter IMP_REPLAY import state. - * We go through our list of requests to replay and send them to server one by - * one. - * After sending all request from the list we change import state to - * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server - * and also all the locks we don't yet have and wait for server to grant us. - * After that we send a special "replay completed" request and change import - * state to IMP_REPLAY_WAIT. - * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER - * state and resend all requests from sending list. - * After that we promote import to FULL state and send all delayed requests - * and import is fully operational after that. - * - */ -int ptlrpc_import_recovery_state_machine(struct obd_import *imp) -{ - int rc = 0; - int inflight; - char *target_start; - int target_len; - - if (imp->imp_state == LUSTRE_IMP_EVICTED) { - deuuidify(obd2cli_tgt(imp->imp_obd), NULL, - &target_start, &target_len); - /* Don't care about MGC eviction */ - if (strcmp(imp->imp_obd->obd_type->typ_name, - LUSTRE_MGC_NAME) != 0) { - LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted by %.*s; in progress operations using this service will fail.\n", - imp->imp_obd->obd_name, target_len, - target_start); - } - CDEBUG(D_HA, "evicted from %s@%s; invalidating\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - /* reset vbr_failed flag upon eviction */ - spin_lock(&imp->imp_lock); - imp->imp_vbr_failed = 0; - spin_unlock(&imp->imp_lock); - - { - struct task_struct *task; - /* bug 17802: XXX client_disconnect_export vs connect request - * race. if client is evicted at this time, we start - * invalidate thread without reference to import and import can - * be freed at same time. - */ - class_import_get(imp); - task = kthread_run(ptlrpc_invalidate_import_thread, imp, - "ll_imp_inval"); - if (IS_ERR(task)) { - class_import_put(imp); - CERROR("error starting invalidate thread: %d\n", rc); - rc = PTR_ERR(task); - } else { - rc = 0; - } - return rc; - } - } - - if (imp->imp_state == LUSTRE_IMP_REPLAY) { - CDEBUG(D_HA, "replay requested by %s\n", - obd2cli_tgt(imp->imp_obd)); - rc = ptlrpc_replay_next(imp, &inflight); - if (inflight == 0 && - atomic_read(&imp->imp_replay_inflight) == 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS); - rc = ldlm_replay_locks(imp); - if (rc) - goto out; - } - rc = 0; - } - - if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) - if (atomic_read(&imp->imp_replay_inflight) == 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT); - rc = signal_completed_replay(imp); - if (rc) - goto out; - } - - if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) - if (atomic_read(&imp->imp_replay_inflight) == 0) - IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); - - if (imp->imp_state == LUSTRE_IMP_RECOVER) { - CDEBUG(D_HA, "reconnected to %s@%s\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - - rc = ptlrpc_resend(imp); - if (rc) - goto out; - IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL); - ptlrpc_activate_import(imp); - - deuuidify(obd2cli_tgt(imp->imp_obd), NULL, - &target_start, &target_len); - LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n", - imp->imp_obd->obd_name, - target_len, target_start, - libcfs_nid2str(imp->imp_connection->c_peer.nid)); - } - - if (imp->imp_state == LUSTRE_IMP_FULL) { - wake_up_all(&imp->imp_recovery_waitq); - ptlrpc_wake_delayed(imp); - } - -out: - return rc; -} - -int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) -{ - struct ptlrpc_request *req; - int rq_opc, rc = 0; - - if (imp->imp_obd->obd_force) - goto set_state; - - switch (imp->imp_connect_op) { - case OST_CONNECT: - rq_opc = OST_DISCONNECT; - break; - case MDS_CONNECT: - rq_opc = MDS_DISCONNECT; - break; - case MGS_CONNECT: - rq_opc = MGS_DISCONNECT; - break; - default: - rc = -EINVAL; - CERROR("%s: don't know how to disconnect from %s (connect_op %d): rc = %d\n", - imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd), - imp->imp_connect_op, rc); - return rc; - } - - if (ptlrpc_import_in_recovery(imp)) { - unsigned long timeout; - - if (AT_OFF) { - if (imp->imp_server_timeout) - timeout = obd_timeout * HZ / 2; - else - timeout = obd_timeout * HZ; - } else { - int idx = import_at_get_index(imp, - imp->imp_client->cli_request_portal); - timeout = at_get(&imp->imp_at.iat_service_estimate[idx]) * HZ; - } - - if (wait_event_idle_timeout(imp->imp_recovery_waitq, - !ptlrpc_import_in_recovery(imp), - max(timeout, 1UL)) == 0) - l_wait_event_abortable( - imp->imp_recovery_waitq, - !ptlrpc_import_in_recovery(imp)); - } - - spin_lock(&imp->imp_lock); - if (imp->imp_state != LUSTRE_IMP_FULL) - goto out; - spin_unlock(&imp->imp_lock); - - req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT, - LUSTRE_OBD_VERSION, rq_opc); - if (req) { - /* We are disconnecting, do not retry a failed DISCONNECT rpc if - * it fails. We can get through the above with a down server - * if the client doesn't know the server is gone yet. - */ - req->rq_no_resend = 1; - - /* We want client umounts to happen quickly, no matter the - * server state... - */ - req->rq_timeout = min_t(int, req->rq_timeout, - INITIAL_CONNECT_TIMEOUT); - - IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING); - req->rq_send_state = LUSTRE_IMP_CONNECTING; - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - ptlrpc_req_finished(req); - } - -set_state: - spin_lock(&imp->imp_lock); -out: - if (noclose) - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); - else - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); - memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle)); - spin_unlock(&imp->imp_lock); - - if (rc == -ETIMEDOUT || rc == -ENOTCONN || rc == -ESHUTDOWN) - rc = 0; - - return rc; -} -EXPORT_SYMBOL(ptlrpc_disconnect_import); - -/* Adaptive Timeout utils */ -extern unsigned int at_min, at_max, at_history; - -/* - *Update at_current with the specified value (bounded by at_min and at_max), - * as well as the AT history "bins". - * - Bin into timeslices using AT_BINS bins. - * - This gives us a max of the last at_history seconds without the storage, - * but still smoothing out a return to normalcy from a slow response. - * - (E.g. remember the maximum latency in each minute of the last 4 minutes.) - */ -int at_measured(struct adaptive_timeout *at, unsigned int val) -{ - unsigned int old = at->at_current; - time64_t now = ktime_get_real_seconds(); - long binlimit = max_t(long, at_history / AT_BINS, 1); - - LASSERT(at); - CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n", - val, at, (long)(now - at->at_binstart), at->at_current, - at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]); - - if (val == 0) - /* 0's don't count, because we never want our timeout to - * drop to 0, and because 0 could mean an error - */ - return 0; - - spin_lock(&at->at_lock); - - if (unlikely(at->at_binstart == 0)) { - /* Special case to remove default from history */ - at->at_current = val; - at->at_worst_ever = val; - at->at_worst_time = now; - at->at_hist[0] = val; - at->at_binstart = now; - } else if (now - at->at_binstart < binlimit) { - /* in bin 0 */ - at->at_hist[0] = max(val, at->at_hist[0]); - at->at_current = max(val, at->at_current); - } else { - int i, shift; - unsigned int maxv = val; - /* move bins over */ - shift = (u32)(now - at->at_binstart) / binlimit; - LASSERT(shift > 0); - for (i = AT_BINS - 1; i >= 0; i--) { - if (i >= shift) { - at->at_hist[i] = at->at_hist[i - shift]; - maxv = max(maxv, at->at_hist[i]); - } else { - at->at_hist[i] = 0; - } - } - at->at_hist[0] = val; - at->at_current = maxv; - at->at_binstart += shift * binlimit; - } - - if (at->at_current > at->at_worst_ever) { - at->at_worst_ever = at->at_current; - at->at_worst_time = now; - } - - if (at->at_flags & AT_FLG_NOHIST) - /* Only keep last reported val; keeping the rest of the history - * for debugfs only - */ - at->at_current = val; - - if (at_max > 0) - at->at_current = min(at->at_current, at_max); - at->at_current = max(at->at_current, at_min); - - if (at->at_current != old) - CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d (val=%u) hist %u %u %u %u\n", - at, - old, at->at_current, at->at_current - old, val, - at->at_hist[0], at->at_hist[1], at->at_hist[2], - at->at_hist[3]); - - /* if we changed, report the old value */ - old = (at->at_current != old) ? old : 0; - - spin_unlock(&at->at_lock); - return old; -} - -/* Find the imp_at index for a given portal; assign if space available */ -int import_at_get_index(struct obd_import *imp, int portal) -{ - struct imp_at *at = &imp->imp_at; - int i; - - for (i = 0; i < IMP_AT_MAX_PORTALS; i++) { - if (at->iat_portal[i] == portal) - return i; - if (at->iat_portal[i] == 0) - /* unused */ - break; - } - - /* Not found in list, add it under a lock */ - spin_lock(&imp->imp_lock); - - /* Check unused under lock */ - for (; i < IMP_AT_MAX_PORTALS; i++) { - if (at->iat_portal[i] == portal) - goto out; - if (at->iat_portal[i] == 0) - /* unused */ - break; - } - - /* Not enough portals? */ - LASSERT(i < IMP_AT_MAX_PORTALS); - - at->iat_portal[i] = portal; -out: - spin_unlock(&imp->imp_lock); - return i; -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c deleted file mode 100644 index 417d4a151433..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/layout.c +++ /dev/null @@ -1,2232 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/layout.c - * - * Lustre Metadata Target (mdt) request handler - * - * Author: Nikita Danilov - */ -/* - * This file contains the "capsule/pill" abstraction layered above PTLRPC. - * - * Every struct ptlrpc_request contains a "pill", which points to a description - * of the format that the request conforms to. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include - -#include - -#include -#include -#include -#include -#include -#include - -/* struct ptlrpc_request, lustre_msg* */ -#include -#include - -/* - * RQFs (see below) refer to two struct req_msg_field arrays describing the - * client request and server reply, respectively. - */ -/* empty set of fields... for suitable definition of emptiness. */ -static const struct req_msg_field *empty[] = { - &RMF_PTLRPC_BODY -}; - -static const struct req_msg_field *mgs_target_info_only[] = { - &RMF_PTLRPC_BODY, - &RMF_MGS_TARGET_INFO -}; - -static const struct req_msg_field *mgs_set_info[] = { - &RMF_PTLRPC_BODY, - &RMF_MGS_SEND_PARAM -}; - -static const struct req_msg_field *mgs_config_read_client[] = { - &RMF_PTLRPC_BODY, - &RMF_MGS_CONFIG_BODY -}; - -static const struct req_msg_field *mgs_config_read_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MGS_CONFIG_RES -}; - -static const struct req_msg_field *log_cancel_client[] = { - &RMF_PTLRPC_BODY, - &RMF_LOGCOOKIES -}; - -static const struct req_msg_field *mdt_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY -}; - -static const struct req_msg_field *mdt_body_capa[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_CAPA1 -}; - -static const struct req_msg_field *quotactl_only[] = { - &RMF_PTLRPC_BODY, - &RMF_OBD_QUOTACTL -}; - -static const struct req_msg_field *mdt_close_client[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_EPOCH, - &RMF_REC_REINT, - &RMF_CAPA1 -}; - -static const struct req_msg_field *mdt_intent_close_client[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_EPOCH, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_CLOSE_DATA -}; - -static const struct req_msg_field *obd_statfs_server[] = { - &RMF_PTLRPC_BODY, - &RMF_OBD_STATFS -}; - -static const struct req_msg_field *seq_query_client[] = { - &RMF_PTLRPC_BODY, - &RMF_SEQ_OPC, - &RMF_SEQ_RANGE -}; - -static const struct req_msg_field *seq_query_server[] = { - &RMF_PTLRPC_BODY, - &RMF_SEQ_RANGE -}; - -static const struct req_msg_field *fld_query_client[] = { - &RMF_PTLRPC_BODY, - &RMF_FLD_OPC, - &RMF_FLD_MDFLD -}; - -static const struct req_msg_field *fld_query_server[] = { - &RMF_PTLRPC_BODY, - &RMF_FLD_MDFLD -}; - -static const struct req_msg_field *fld_read_client[] = { - &RMF_PTLRPC_BODY, - &RMF_FLD_MDFLD -}; - -static const struct req_msg_field *fld_read_server[] = { - &RMF_PTLRPC_BODY, - &RMF_GENERIC_DATA -}; - -static const struct req_msg_field *mds_getattr_name_client[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_CAPA1, - &RMF_NAME -}; - -static const struct req_msg_field *mds_reint_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT -}; - -static const struct req_msg_field *mds_reint_create_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_NAME -}; - -static const struct req_msg_field *mds_reint_create_slave_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_NAME, - &RMF_EADATA, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_create_acl_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_NAME, - &RMF_EADATA, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_create_sym_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_NAME, - &RMF_SYMTGT, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_open_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_CAPA2, - &RMF_NAME, - &RMF_EADATA -}; - -static const struct req_msg_field *mds_reint_open_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL, - &RMF_CAPA1, - &RMF_CAPA2 -}; - -static const struct req_msg_field *mds_reint_unlink_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_NAME, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_link_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_CAPA2, - &RMF_NAME, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_rename_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_CAPA2, - &RMF_NAME, - &RMF_SYMTGT, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_migrate_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_CAPA2, - &RMF_NAME, - &RMF_SYMTGT, - &RMF_DLM_REQ, - &RMF_MDT_EPOCH, - &RMF_CLOSE_DATA -}; - -static const struct req_msg_field *mds_last_unlink_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_LOGCOOKIES, - &RMF_CAPA1, - &RMF_CAPA2 -}; - -static const struct req_msg_field *mds_reint_setattr_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_MDT_EPOCH, - &RMF_EADATA, - &RMF_LOGCOOKIES, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mds_reint_setxattr_client[] = { - &RMF_PTLRPC_BODY, - &RMF_REC_REINT, - &RMF_CAPA1, - &RMF_NAME, - &RMF_EADATA, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *mdt_swap_layouts[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_SWAP_LAYOUTS, - &RMF_CAPA1, - &RMF_CAPA2, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *obd_connect_client[] = { - &RMF_PTLRPC_BODY, - &RMF_TGTUUID, - &RMF_CLUUID, - &RMF_CONN, - &RMF_CONNECT_DATA -}; - -static const struct req_msg_field *obd_connect_server[] = { - &RMF_PTLRPC_BODY, - &RMF_CONNECT_DATA -}; - -static const struct req_msg_field *obd_set_info_client[] = { - &RMF_PTLRPC_BODY, - &RMF_SETINFO_KEY, - &RMF_SETINFO_VAL -}; - -static const struct req_msg_field *ost_grant_shrink_client[] = { - &RMF_PTLRPC_BODY, - &RMF_SETINFO_KEY, - &RMF_OST_BODY -}; - -static const struct req_msg_field *mds_getinfo_client[] = { - &RMF_PTLRPC_BODY, - &RMF_GETINFO_KEY, - &RMF_GETINFO_VALLEN -}; - -static const struct req_msg_field *mds_getinfo_server[] = { - &RMF_PTLRPC_BODY, - &RMF_GETINFO_VAL, -}; - -static const struct req_msg_field *ldlm_enqueue_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ -}; - -static const struct req_msg_field *ldlm_enqueue_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP -}; - -static const struct req_msg_field *ldlm_enqueue_lvb_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_DLM_LVB -}; - -static const struct req_msg_field *ldlm_cp_callback_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_DLM_LVB -}; - -static const struct req_msg_field *ldlm_gl_callback_desc_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_DLM_GL_DESC -}; - -static const struct req_msg_field *ldlm_gl_callback_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_LVB -}; - -static const struct req_msg_field *ldlm_intent_basic_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, -}; - -static const struct req_msg_field *ldlm_intent_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_REC_REINT -}; - -static const struct req_msg_field *ldlm_intent_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL -}; - -static const struct req_msg_field *ldlm_intent_layout_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_LAYOUT_INTENT, - &RMF_EADATA /* for new layout to be set up */ -}; - -static const struct req_msg_field *ldlm_intent_open_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL, - &RMF_CAPA1, - &RMF_CAPA2 -}; - -static const struct req_msg_field *ldlm_intent_getattr_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_MDT_BODY, /* coincides with mds_getattr_name_client[] */ - &RMF_CAPA1, - &RMF_NAME -}; - -static const struct req_msg_field *ldlm_intent_getattr_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL, - &RMF_CAPA1 -}; - -static const struct req_msg_field *ldlm_intent_create_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_REC_REINT, /* coincides with mds_reint_create_client[] */ - &RMF_CAPA1, - &RMF_NAME, - &RMF_EADATA -}; - -static const struct req_msg_field *ldlm_intent_open_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_REC_REINT, /* coincides with mds_reint_open_client[] */ - &RMF_CAPA1, - &RMF_CAPA2, - &RMF_NAME, - &RMF_EADATA -}; - -static const struct req_msg_field *ldlm_intent_unlink_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_REC_REINT, /* coincides with mds_reint_unlink_client[] */ - &RMF_CAPA1, - &RMF_NAME -}; - -static const struct req_msg_field *ldlm_intent_getxattr_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_MDT_BODY, - &RMF_CAPA1, -}; - -static const struct req_msg_field *ldlm_intent_getxattr_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL, /* for req_capsule_extend/mdt_intent_policy */ - &RMF_EADATA, - &RMF_EAVALS, - &RMF_EAVALS_LENS -}; - -static const struct req_msg_field *mds_getxattr_client[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_CAPA1, - &RMF_NAME, - &RMF_EADATA -}; - -static const struct req_msg_field *mds_getxattr_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_EADATA -}; - -static const struct req_msg_field *mds_getattr_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL, - &RMF_CAPA1, - &RMF_CAPA2 -}; - -static const struct req_msg_field *mds_setattr_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDT_MD, - &RMF_ACL, - &RMF_CAPA1, - &RMF_CAPA2 -}; - -static const struct req_msg_field *llog_origin_handle_create_client[] = { - &RMF_PTLRPC_BODY, - &RMF_LLOGD_BODY, - &RMF_NAME -}; - -static const struct req_msg_field *llogd_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_LLOGD_BODY -}; - -static const struct req_msg_field *llog_log_hdr_only[] = { - &RMF_PTLRPC_BODY, - &RMF_LLOG_LOG_HDR -}; - -static const struct req_msg_field *llogd_conn_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_LLOGD_CONN_BODY -}; - -static const struct req_msg_field *llog_origin_handle_next_block_server[] = { - &RMF_PTLRPC_BODY, - &RMF_LLOGD_BODY, - &RMF_EADATA -}; - -static const struct req_msg_field *ost_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_OST_BODY -}; - -static const struct req_msg_field *ost_body_capa[] = { - &RMF_PTLRPC_BODY, - &RMF_OST_BODY, - &RMF_CAPA1 -}; - -static const struct req_msg_field *ost_destroy_client[] = { - &RMF_PTLRPC_BODY, - &RMF_OST_BODY, - &RMF_DLM_REQ, - &RMF_CAPA1 -}; - -static const struct req_msg_field *ost_brw_client[] = { - &RMF_PTLRPC_BODY, - &RMF_OST_BODY, - &RMF_OBD_IOOBJ, - &RMF_NIOBUF_REMOTE, - &RMF_CAPA1 -}; - -static const struct req_msg_field *ost_brw_read_server[] = { - &RMF_PTLRPC_BODY, - &RMF_OST_BODY -}; - -static const struct req_msg_field *ost_brw_write_server[] = { - &RMF_PTLRPC_BODY, - &RMF_OST_BODY, - &RMF_RCS -}; - -static const struct req_msg_field *ost_get_info_generic_server[] = { - &RMF_PTLRPC_BODY, - &RMF_GENERIC_DATA, -}; - -static const struct req_msg_field *ost_get_info_generic_client[] = { - &RMF_PTLRPC_BODY, - &RMF_GETINFO_KEY -}; - -static const struct req_msg_field *ost_get_last_id_server[] = { - &RMF_PTLRPC_BODY, - &RMF_OBD_ID -}; - -static const struct req_msg_field *ost_get_last_fid_client[] = { - &RMF_PTLRPC_BODY, - &RMF_GETINFO_KEY, - &RMF_FID, -}; - -static const struct req_msg_field *ost_get_last_fid_server[] = { - &RMF_PTLRPC_BODY, - &RMF_FID, -}; - -static const struct req_msg_field *ost_get_fiemap_client[] = { - &RMF_PTLRPC_BODY, - &RMF_FIEMAP_KEY, - &RMF_FIEMAP_VAL -}; - -static const struct req_msg_field *ost_get_fiemap_server[] = { - &RMF_PTLRPC_BODY, - &RMF_FIEMAP_VAL -}; - -static const struct req_msg_field *mdt_hsm_progress[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDS_HSM_PROGRESS, -}; - -static const struct req_msg_field *mdt_hsm_ct_register[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDS_HSM_ARCHIVE, -}; - -static const struct req_msg_field *mdt_hsm_ct_unregister[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, -}; - -static const struct req_msg_field *mdt_hsm_action_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDS_HSM_CURRENT_ACTION, -}; - -static const struct req_msg_field *mdt_hsm_state_get_server[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_HSM_USER_STATE, -}; - -static const struct req_msg_field *mdt_hsm_state_set[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_CAPA1, - &RMF_HSM_STATE_SET, -}; - -static const struct req_msg_field *mdt_hsm_request[] = { - &RMF_PTLRPC_BODY, - &RMF_MDT_BODY, - &RMF_MDS_HSM_REQUEST, - &RMF_MDS_HSM_USER_ITEM, - &RMF_GENERIC_DATA, -}; - -static struct req_format *req_formats[] = { - &RQF_OBD_PING, - &RQF_OBD_SET_INFO, - &RQF_SEC_CTX, - &RQF_MGS_TARGET_REG, - &RQF_MGS_SET_INFO, - &RQF_MGS_CONFIG_READ, - &RQF_SEQ_QUERY, - &RQF_FLD_QUERY, - &RQF_FLD_READ, - &RQF_MDS_CONNECT, - &RQF_MDS_DISCONNECT, - &RQF_MDS_GET_INFO, - &RQF_MDS_GETSTATUS, - &RQF_MDS_STATFS, - &RQF_MDS_GETATTR, - &RQF_MDS_GETATTR_NAME, - &RQF_MDS_GETXATTR, - &RQF_MDS_SYNC, - &RQF_MDS_CLOSE, - &RQF_MDS_INTENT_CLOSE, - &RQF_MDS_READPAGE, - &RQF_MDS_WRITEPAGE, - &RQF_MDS_REINT, - &RQF_MDS_REINT_CREATE, - &RQF_MDS_REINT_CREATE_ACL, - &RQF_MDS_REINT_CREATE_SLAVE, - &RQF_MDS_REINT_CREATE_SYM, - &RQF_MDS_REINT_OPEN, - &RQF_MDS_REINT_UNLINK, - &RQF_MDS_REINT_LINK, - &RQF_MDS_REINT_RENAME, - &RQF_MDS_REINT_MIGRATE, - &RQF_MDS_REINT_SETATTR, - &RQF_MDS_REINT_SETXATTR, - &RQF_MDS_QUOTACTL, - &RQF_MDS_HSM_PROGRESS, - &RQF_MDS_HSM_CT_REGISTER, - &RQF_MDS_HSM_CT_UNREGISTER, - &RQF_MDS_HSM_STATE_GET, - &RQF_MDS_HSM_STATE_SET, - &RQF_MDS_HSM_ACTION, - &RQF_MDS_HSM_REQUEST, - &RQF_MDS_SWAP_LAYOUTS, - &RQF_OST_CONNECT, - &RQF_OST_DISCONNECT, - &RQF_OST_QUOTACTL, - &RQF_OST_GETATTR, - &RQF_OST_SETATTR, - &RQF_OST_CREATE, - &RQF_OST_PUNCH, - &RQF_OST_SYNC, - &RQF_OST_DESTROY, - &RQF_OST_BRW_READ, - &RQF_OST_BRW_WRITE, - &RQF_OST_STATFS, - &RQF_OST_SET_GRANT_INFO, - &RQF_OST_GET_INFO, - &RQF_OST_GET_INFO_LAST_ID, - &RQF_OST_GET_INFO_LAST_FID, - &RQF_OST_SET_INFO_LAST_FID, - &RQF_OST_GET_INFO_FIEMAP, - &RQF_LDLM_ENQUEUE, - &RQF_LDLM_ENQUEUE_LVB, - &RQF_LDLM_CONVERT, - &RQF_LDLM_CANCEL, - &RQF_LDLM_CALLBACK, - &RQF_LDLM_CP_CALLBACK, - &RQF_LDLM_BL_CALLBACK, - &RQF_LDLM_GL_CALLBACK, - &RQF_LDLM_GL_DESC_CALLBACK, - &RQF_LDLM_INTENT, - &RQF_LDLM_INTENT_BASIC, - &RQF_LDLM_INTENT_LAYOUT, - &RQF_LDLM_INTENT_GETATTR, - &RQF_LDLM_INTENT_OPEN, - &RQF_LDLM_INTENT_CREATE, - &RQF_LDLM_INTENT_UNLINK, - &RQF_LDLM_INTENT_GETXATTR, - &RQF_LOG_CANCEL, - &RQF_LLOG_ORIGIN_HANDLE_CREATE, - &RQF_LLOG_ORIGIN_HANDLE_DESTROY, - &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, - &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, - &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, - &RQF_LLOG_ORIGIN_CONNECT, - &RQF_CONNECT, -}; - -struct req_msg_field { - const __u32 rmf_flags; - const char *rmf_name; - /** - * Field length. (-1) means "variable length". If the - * \a RMF_F_STRUCT_ARRAY flag is set the field is also variable-length, - * but the actual size must be a whole multiple of \a rmf_size. - */ - const int rmf_size; - void (*rmf_swabber)(void *); - void (*rmf_dumper)(void *); - int rmf_offset[ARRAY_SIZE(req_formats)][RCL_NR]; -}; - -enum rmf_flags { - /** - * The field is a string, must be NUL-terminated. - */ - RMF_F_STRING = BIT(0), - /** - * The field's buffer size need not match the declared \a rmf_size. - */ - RMF_F_NO_SIZE_CHECK = BIT(1), - /** - * The field's buffer size must be a whole multiple of the declared \a - * rmf_size and the \a rmf_swabber function must work on the declared \a - * rmf_size worth of bytes. - */ - RMF_F_STRUCT_ARRAY = BIT(2) -}; - -struct req_capsule; - -/* - * Request fields. - */ -#define DEFINE_MSGF(name, flags, size, swabber, dumper) { \ - .rmf_name = (name), \ - .rmf_flags = (flags), \ - .rmf_size = (size), \ - .rmf_swabber = (void (*)(void *))(swabber), \ - .rmf_dumper = (void (*)(void *))(dumper) \ -} - -struct req_msg_field RMF_GENERIC_DATA = - DEFINE_MSGF("generic_data", 0, - -1, NULL, NULL); -EXPORT_SYMBOL(RMF_GENERIC_DATA); - -struct req_msg_field RMF_MGS_TARGET_INFO = - DEFINE_MSGF("mgs_target_info", 0, - sizeof(struct mgs_target_info), - lustre_swab_mgs_target_info, NULL); -EXPORT_SYMBOL(RMF_MGS_TARGET_INFO); - -struct req_msg_field RMF_MGS_SEND_PARAM = - DEFINE_MSGF("mgs_send_param", 0, - sizeof(struct mgs_send_param), - NULL, NULL); -EXPORT_SYMBOL(RMF_MGS_SEND_PARAM); - -struct req_msg_field RMF_MGS_CONFIG_BODY = - DEFINE_MSGF("mgs_config_read request", 0, - sizeof(struct mgs_config_body), - lustre_swab_mgs_config_body, NULL); -EXPORT_SYMBOL(RMF_MGS_CONFIG_BODY); - -struct req_msg_field RMF_MGS_CONFIG_RES = - DEFINE_MSGF("mgs_config_read reply ", 0, - sizeof(struct mgs_config_res), - lustre_swab_mgs_config_res, NULL); -EXPORT_SYMBOL(RMF_MGS_CONFIG_RES); - -struct req_msg_field RMF_U32 = - DEFINE_MSGF("generic u32", 0, - sizeof(__u32), lustre_swab_generic_32s, NULL); -EXPORT_SYMBOL(RMF_U32); - -struct req_msg_field RMF_SETINFO_VAL = - DEFINE_MSGF("setinfo_val", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_SETINFO_VAL); - -struct req_msg_field RMF_GETINFO_KEY = - DEFINE_MSGF("getinfo_key", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_GETINFO_KEY); - -struct req_msg_field RMF_GETINFO_VALLEN = - DEFINE_MSGF("getinfo_vallen", 0, - sizeof(__u32), lustre_swab_generic_32s, NULL); -EXPORT_SYMBOL(RMF_GETINFO_VALLEN); - -struct req_msg_field RMF_GETINFO_VAL = - DEFINE_MSGF("getinfo_val", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_GETINFO_VAL); - -struct req_msg_field RMF_SEQ_OPC = - DEFINE_MSGF("seq_query_opc", 0, - sizeof(__u32), lustre_swab_generic_32s, NULL); -EXPORT_SYMBOL(RMF_SEQ_OPC); - -struct req_msg_field RMF_SEQ_RANGE = - DEFINE_MSGF("seq_query_range", 0, - sizeof(struct lu_seq_range), - lustre_swab_lu_seq_range, NULL); -EXPORT_SYMBOL(RMF_SEQ_RANGE); - -struct req_msg_field RMF_FLD_OPC = - DEFINE_MSGF("fld_query_opc", 0, - sizeof(__u32), lustre_swab_generic_32s, NULL); -EXPORT_SYMBOL(RMF_FLD_OPC); - -struct req_msg_field RMF_FLD_MDFLD = - DEFINE_MSGF("fld_query_mdfld", 0, - sizeof(struct lu_seq_range), - lustre_swab_lu_seq_range, NULL); -EXPORT_SYMBOL(RMF_FLD_MDFLD); - -struct req_msg_field RMF_MDT_BODY = - DEFINE_MSGF("mdt_body", 0, - sizeof(struct mdt_body), lustre_swab_mdt_body, NULL); -EXPORT_SYMBOL(RMF_MDT_BODY); - -struct req_msg_field RMF_OBD_QUOTACTL = - DEFINE_MSGF("obd_quotactl", 0, - sizeof(struct obd_quotactl), - lustre_swab_obd_quotactl, NULL); -EXPORT_SYMBOL(RMF_OBD_QUOTACTL); - -struct req_msg_field RMF_MDT_EPOCH = - DEFINE_MSGF("mdt_ioepoch", 0, - sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL); -EXPORT_SYMBOL(RMF_MDT_EPOCH); - -struct req_msg_field RMF_PTLRPC_BODY = - DEFINE_MSGF("ptlrpc_body", 0, - sizeof(struct ptlrpc_body), lustre_swab_ptlrpc_body, NULL); -EXPORT_SYMBOL(RMF_PTLRPC_BODY); - -struct req_msg_field RMF_CLOSE_DATA = - DEFINE_MSGF("data_version", 0, - sizeof(struct close_data), lustre_swab_close_data, NULL); -EXPORT_SYMBOL(RMF_CLOSE_DATA); - -struct req_msg_field RMF_OBD_STATFS = - DEFINE_MSGF("obd_statfs", 0, - sizeof(struct obd_statfs), lustre_swab_obd_statfs, NULL); -EXPORT_SYMBOL(RMF_OBD_STATFS); - -struct req_msg_field RMF_SETINFO_KEY = - DEFINE_MSGF("setinfo_key", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_SETINFO_KEY); - -struct req_msg_field RMF_NAME = - DEFINE_MSGF("name", RMF_F_STRING, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_NAME); - -struct req_msg_field RMF_SYMTGT = - DEFINE_MSGF("symtgt", RMF_F_STRING, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_SYMTGT); - -struct req_msg_field RMF_TGTUUID = - DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); -EXPORT_SYMBOL(RMF_TGTUUID); - -struct req_msg_field RMF_CLUUID = - DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); -EXPORT_SYMBOL(RMF_CLUUID); - -struct req_msg_field RMF_STRING = - DEFINE_MSGF("string", RMF_F_STRING, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_STRING); - -struct req_msg_field RMF_LLOGD_BODY = - DEFINE_MSGF("llogd_body", 0, - sizeof(struct llogd_body), lustre_swab_llogd_body, NULL); -EXPORT_SYMBOL(RMF_LLOGD_BODY); - -struct req_msg_field RMF_LLOG_LOG_HDR = - DEFINE_MSGF("llog_log_hdr", 0, - sizeof(struct llog_log_hdr), lustre_swab_llog_hdr, NULL); -EXPORT_SYMBOL(RMF_LLOG_LOG_HDR); - -struct req_msg_field RMF_LLOGD_CONN_BODY = - DEFINE_MSGF("llogd_conn_body", 0, - sizeof(struct llogd_conn_body), - lustre_swab_llogd_conn_body, NULL); -EXPORT_SYMBOL(RMF_LLOGD_CONN_BODY); - -/* - * connection handle received in MDS_CONNECT request. - * - * No swabbing needed because struct lustre_handle contains only a 64-bit cookie - * that the client does not interpret at all. - */ -struct req_msg_field RMF_CONN = - DEFINE_MSGF("conn", 0, sizeof(struct lustre_handle), NULL, NULL); -EXPORT_SYMBOL(RMF_CONN); - -struct req_msg_field RMF_CONNECT_DATA = - DEFINE_MSGF("cdata", - RMF_F_NO_SIZE_CHECK /* we allow extra space for interop */, - sizeof(struct obd_connect_data), - lustre_swab_connect, NULL); -EXPORT_SYMBOL(RMF_CONNECT_DATA); - -struct req_msg_field RMF_DLM_REQ = - DEFINE_MSGF("dlm_req", RMF_F_NO_SIZE_CHECK /* ldlm_request_bufsize */, - sizeof(struct ldlm_request), - lustre_swab_ldlm_request, NULL); -EXPORT_SYMBOL(RMF_DLM_REQ); - -struct req_msg_field RMF_DLM_REP = - DEFINE_MSGF("dlm_rep", 0, - sizeof(struct ldlm_reply), lustre_swab_ldlm_reply, NULL); -EXPORT_SYMBOL(RMF_DLM_REP); - -struct req_msg_field RMF_LDLM_INTENT = - DEFINE_MSGF("ldlm_intent", 0, - sizeof(struct ldlm_intent), lustre_swab_ldlm_intent, NULL); -EXPORT_SYMBOL(RMF_LDLM_INTENT); - -struct req_msg_field RMF_DLM_LVB = - DEFINE_MSGF("dlm_lvb", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_DLM_LVB); - -struct req_msg_field RMF_DLM_GL_DESC = - DEFINE_MSGF("dlm_gl_desc", 0, sizeof(union ldlm_gl_desc), - lustre_swab_gl_desc, NULL); -EXPORT_SYMBOL(RMF_DLM_GL_DESC); - -struct req_msg_field RMF_MDT_MD = - DEFINE_MSGF("mdt_md", RMF_F_NO_SIZE_CHECK, MIN_MD_SIZE, NULL, NULL); -EXPORT_SYMBOL(RMF_MDT_MD); - -struct req_msg_field RMF_REC_REINT = - DEFINE_MSGF("rec_reint", 0, sizeof(struct mdt_rec_reint), - lustre_swab_mdt_rec_reint, NULL); -EXPORT_SYMBOL(RMF_REC_REINT); - -/* FIXME: this length should be defined as a macro */ -struct req_msg_field RMF_EADATA = DEFINE_MSGF("eadata", 0, -1, - NULL, NULL); -EXPORT_SYMBOL(RMF_EADATA); - -struct req_msg_field RMF_EAVALS = DEFINE_MSGF("eavals", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_EAVALS); - -struct req_msg_field RMF_ACL = DEFINE_MSGF("acl", 0, -1, NULL, NULL); -EXPORT_SYMBOL(RMF_ACL); - -/* FIXME: this should be made to use RMF_F_STRUCT_ARRAY */ -struct req_msg_field RMF_LOGCOOKIES = - DEFINE_MSGF("logcookies", RMF_F_NO_SIZE_CHECK /* multiple cookies */, - sizeof(struct llog_cookie), NULL, NULL); -EXPORT_SYMBOL(RMF_LOGCOOKIES); - -struct req_msg_field RMF_CAPA1 = - DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa), - lustre_swab_lustre_capa, NULL); -EXPORT_SYMBOL(RMF_CAPA1); - -struct req_msg_field RMF_CAPA2 = - DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa), - lustre_swab_lustre_capa, NULL); -EXPORT_SYMBOL(RMF_CAPA2); - -struct req_msg_field RMF_LAYOUT_INTENT = - DEFINE_MSGF("layout_intent", 0, - sizeof(struct layout_intent), lustre_swab_layout_intent, - NULL); -EXPORT_SYMBOL(RMF_LAYOUT_INTENT); - -/* - * OST request field. - */ -struct req_msg_field RMF_OST_BODY = - DEFINE_MSGF("ost_body", 0, - sizeof(struct ost_body), lustre_swab_ost_body, dump_ost_body); -EXPORT_SYMBOL(RMF_OST_BODY); - -struct req_msg_field RMF_OBD_IOOBJ = - DEFINE_MSGF("obd_ioobj", RMF_F_STRUCT_ARRAY, - sizeof(struct obd_ioobj), lustre_swab_obd_ioobj, dump_ioo); -EXPORT_SYMBOL(RMF_OBD_IOOBJ); - -struct req_msg_field RMF_NIOBUF_REMOTE = - DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY, - sizeof(struct niobuf_remote), lustre_swab_niobuf_remote, - dump_rniobuf); -EXPORT_SYMBOL(RMF_NIOBUF_REMOTE); - -struct req_msg_field RMF_RCS = - DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY, sizeof(__u32), - lustre_swab_generic_32s, dump_rcs); -EXPORT_SYMBOL(RMF_RCS); - -struct req_msg_field RMF_EAVALS_LENS = - DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32), - lustre_swab_generic_32s, NULL); -EXPORT_SYMBOL(RMF_EAVALS_LENS); - -struct req_msg_field RMF_OBD_ID = - DEFINE_MSGF("u64", 0, - sizeof(u64), lustre_swab_ost_last_id, NULL); -EXPORT_SYMBOL(RMF_OBD_ID); - -struct req_msg_field RMF_FID = - DEFINE_MSGF("fid", 0, - sizeof(struct lu_fid), lustre_swab_lu_fid, NULL); -EXPORT_SYMBOL(RMF_FID); - -struct req_msg_field RMF_OST_ID = - DEFINE_MSGF("ost_id", 0, - sizeof(struct ost_id), lustre_swab_ost_id, NULL); -EXPORT_SYMBOL(RMF_OST_ID); - -struct req_msg_field RMF_FIEMAP_KEY = - DEFINE_MSGF("fiemap", 0, sizeof(struct ll_fiemap_info_key), - lustre_swab_fiemap, NULL); -EXPORT_SYMBOL(RMF_FIEMAP_KEY); - -struct req_msg_field RMF_FIEMAP_VAL = - DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL); -EXPORT_SYMBOL(RMF_FIEMAP_VAL); - -struct req_msg_field RMF_HSM_USER_STATE = - DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state), - lustre_swab_hsm_user_state, NULL); -EXPORT_SYMBOL(RMF_HSM_USER_STATE); - -struct req_msg_field RMF_HSM_STATE_SET = - DEFINE_MSGF("hsm_state_set", 0, sizeof(struct hsm_state_set), - lustre_swab_hsm_state_set, NULL); -EXPORT_SYMBOL(RMF_HSM_STATE_SET); - -struct req_msg_field RMF_MDS_HSM_PROGRESS = - DEFINE_MSGF("hsm_progress", 0, sizeof(struct hsm_progress_kernel), - lustre_swab_hsm_progress_kernel, NULL); -EXPORT_SYMBOL(RMF_MDS_HSM_PROGRESS); - -struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION = - DEFINE_MSGF("hsm_current_action", 0, sizeof(struct hsm_current_action), - lustre_swab_hsm_current_action, NULL); -EXPORT_SYMBOL(RMF_MDS_HSM_CURRENT_ACTION); - -struct req_msg_field RMF_MDS_HSM_USER_ITEM = - DEFINE_MSGF("hsm_user_item", RMF_F_STRUCT_ARRAY, - sizeof(struct hsm_user_item), lustre_swab_hsm_user_item, - NULL); -EXPORT_SYMBOL(RMF_MDS_HSM_USER_ITEM); - -struct req_msg_field RMF_MDS_HSM_ARCHIVE = - DEFINE_MSGF("hsm_archive", 0, - sizeof(__u32), lustre_swab_generic_32s, NULL); -EXPORT_SYMBOL(RMF_MDS_HSM_ARCHIVE); - -struct req_msg_field RMF_MDS_HSM_REQUEST = - DEFINE_MSGF("hsm_request", 0, sizeof(struct hsm_request), - lustre_swab_hsm_request, NULL); -EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST); - -struct req_msg_field RMF_SWAP_LAYOUTS = - DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts), - lustre_swab_swap_layouts, NULL); -EXPORT_SYMBOL(RMF_SWAP_LAYOUTS); -/* - * Request formats. - */ - -struct req_format { - const char *rf_name; - size_t rf_idx; - struct { - size_t nr; - const struct req_msg_field **d; - } rf_fields[RCL_NR]; -}; - -#define DEFINE_REQ_FMT(name, client, client_nr, server, server_nr) { \ - .rf_name = name, \ - .rf_fields = { \ - [RCL_CLIENT] = { \ - .nr = client_nr, \ - .d = client \ - }, \ - [RCL_SERVER] = { \ - .nr = server_nr, \ - .d = server \ - } \ - } \ -} - -#define DEFINE_REQ_FMT0(name, client, server) \ -DEFINE_REQ_FMT(name, client, ARRAY_SIZE(client), server, ARRAY_SIZE(server)) - -struct req_format RQF_OBD_PING = - DEFINE_REQ_FMT0("OBD_PING", empty, empty); -EXPORT_SYMBOL(RQF_OBD_PING); - -struct req_format RQF_OBD_SET_INFO = - DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty); -EXPORT_SYMBOL(RQF_OBD_SET_INFO); - -struct req_format RQF_SEC_CTX = - DEFINE_REQ_FMT0("SEC_CTX", empty, empty); -EXPORT_SYMBOL(RQF_SEC_CTX); - -struct req_format RQF_MGS_TARGET_REG = - DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only, - mgs_target_info_only); -EXPORT_SYMBOL(RQF_MGS_TARGET_REG); - -struct req_format RQF_MGS_SET_INFO = - DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info, - mgs_set_info); -EXPORT_SYMBOL(RQF_MGS_SET_INFO); - -struct req_format RQF_MGS_CONFIG_READ = - DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client, - mgs_config_read_server); -EXPORT_SYMBOL(RQF_MGS_CONFIG_READ); - -struct req_format RQF_SEQ_QUERY = - DEFINE_REQ_FMT0("SEQ_QUERY", seq_query_client, seq_query_server); -EXPORT_SYMBOL(RQF_SEQ_QUERY); - -struct req_format RQF_FLD_QUERY = - DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server); -EXPORT_SYMBOL(RQF_FLD_QUERY); - -/* - * The 'fld_read_server' uses 'RMF_GENERIC_DATA' to hold the 'FLD_QUERY' - * RPC reply that is composed of 'struct lu_seq_range_array'. But there - * is not registered swabber function for 'RMF_GENERIC_DATA'. So the RPC - * peers need to handle the RPC reply with fixed little-endian format. - * - * In theory, we can define new structure with some swabber registered to - * handle the 'FLD_QUERY' RPC reply result automatically. But from the - * implementation view, it is not easy to be done within current "struct - * req_msg_field" framework. Because the sequence range array in the RPC - * reply is not fixed length, instead, its length depends on 'lu_seq_range' - * count, that is unknown when prepare the RPC buffer. Generally, for such - * flexible length RPC usage, there will be a field in the RPC layout to - * indicate the data length. But for the 'FLD_READ' RPC, we have no way to - * do that unless we add new length filed that will broken the on-wire RPC - * protocol and cause interoperability trouble with old peer. - */ -struct req_format RQF_FLD_READ = - DEFINE_REQ_FMT0("FLD_READ", fld_read_client, fld_read_server); -EXPORT_SYMBOL(RQF_FLD_READ); - -struct req_format RQF_LOG_CANCEL = - DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty); -EXPORT_SYMBOL(RQF_LOG_CANCEL); - -struct req_format RQF_MDS_QUOTACTL = - DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only); -EXPORT_SYMBOL(RQF_MDS_QUOTACTL); - -struct req_format RQF_OST_QUOTACTL = - DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only); -EXPORT_SYMBOL(RQF_OST_QUOTACTL); - -struct req_format RQF_MDS_GETSTATUS = - DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa); -EXPORT_SYMBOL(RQF_MDS_GETSTATUS); - -struct req_format RQF_MDS_STATFS = - DEFINE_REQ_FMT0("MDS_STATFS", empty, obd_statfs_server); -EXPORT_SYMBOL(RQF_MDS_STATFS); - -struct req_format RQF_MDS_SYNC = - DEFINE_REQ_FMT0("MDS_SYNC", mdt_body_capa, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_SYNC); - -struct req_format RQF_MDS_GETATTR = - DEFINE_REQ_FMT0("MDS_GETATTR", mdt_body_capa, mds_getattr_server); -EXPORT_SYMBOL(RQF_MDS_GETATTR); - -struct req_format RQF_MDS_GETXATTR = - DEFINE_REQ_FMT0("MDS_GETXATTR", - mds_getxattr_client, mds_getxattr_server); -EXPORT_SYMBOL(RQF_MDS_GETXATTR); - -struct req_format RQF_MDS_GETATTR_NAME = - DEFINE_REQ_FMT0("MDS_GETATTR_NAME", - mds_getattr_name_client, mds_getattr_server); -EXPORT_SYMBOL(RQF_MDS_GETATTR_NAME); - -struct req_format RQF_MDS_REINT = - DEFINE_REQ_FMT0("MDS_REINT", mds_reint_client, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_REINT); - -struct req_format RQF_MDS_REINT_CREATE = - DEFINE_REQ_FMT0("MDS_REINT_CREATE", - mds_reint_create_client, mdt_body_capa); -EXPORT_SYMBOL(RQF_MDS_REINT_CREATE); - -struct req_format RQF_MDS_REINT_CREATE_ACL = - DEFINE_REQ_FMT0("MDS_REINT_CREATE_ACL", - mds_reint_create_acl_client, mdt_body_capa); -EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_ACL); - -struct req_format RQF_MDS_REINT_CREATE_SLAVE = - DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA", - mds_reint_create_slave_client, mdt_body_capa); -EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SLAVE); - -struct req_format RQF_MDS_REINT_CREATE_SYM = - DEFINE_REQ_FMT0("MDS_REINT_CREATE_SYM", - mds_reint_create_sym_client, mdt_body_capa); -EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SYM); - -struct req_format RQF_MDS_REINT_OPEN = - DEFINE_REQ_FMT0("MDS_REINT_OPEN", - mds_reint_open_client, mds_reint_open_server); -EXPORT_SYMBOL(RQF_MDS_REINT_OPEN); - -struct req_format RQF_MDS_REINT_UNLINK = - DEFINE_REQ_FMT0("MDS_REINT_UNLINK", mds_reint_unlink_client, - mds_last_unlink_server); -EXPORT_SYMBOL(RQF_MDS_REINT_UNLINK); - -struct req_format RQF_MDS_REINT_LINK = - DEFINE_REQ_FMT0("MDS_REINT_LINK", - mds_reint_link_client, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_REINT_LINK); - -struct req_format RQF_MDS_REINT_RENAME = - DEFINE_REQ_FMT0("MDS_REINT_RENAME", mds_reint_rename_client, - mds_last_unlink_server); -EXPORT_SYMBOL(RQF_MDS_REINT_RENAME); - -struct req_format RQF_MDS_REINT_MIGRATE = - DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client, - mds_last_unlink_server); -EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE); - -struct req_format RQF_MDS_REINT_SETATTR = - DEFINE_REQ_FMT0("MDS_REINT_SETATTR", - mds_reint_setattr_client, mds_setattr_server); -EXPORT_SYMBOL(RQF_MDS_REINT_SETATTR); - -struct req_format RQF_MDS_REINT_SETXATTR = - DEFINE_REQ_FMT0("MDS_REINT_SETXATTR", - mds_reint_setxattr_client, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_REINT_SETXATTR); - -struct req_format RQF_MDS_CONNECT = - DEFINE_REQ_FMT0("MDS_CONNECT", - obd_connect_client, obd_connect_server); -EXPORT_SYMBOL(RQF_MDS_CONNECT); - -struct req_format RQF_MDS_DISCONNECT = - DEFINE_REQ_FMT0("MDS_DISCONNECT", empty, empty); -EXPORT_SYMBOL(RQF_MDS_DISCONNECT); - -struct req_format RQF_MDS_GET_INFO = - DEFINE_REQ_FMT0("MDS_GET_INFO", mds_getinfo_client, - mds_getinfo_server); -EXPORT_SYMBOL(RQF_MDS_GET_INFO); - -struct req_format RQF_LDLM_ENQUEUE = - DEFINE_REQ_FMT0("LDLM_ENQUEUE", - ldlm_enqueue_client, ldlm_enqueue_lvb_server); -EXPORT_SYMBOL(RQF_LDLM_ENQUEUE); - -struct req_format RQF_LDLM_ENQUEUE_LVB = - DEFINE_REQ_FMT0("LDLM_ENQUEUE_LVB", - ldlm_enqueue_client, ldlm_enqueue_lvb_server); -EXPORT_SYMBOL(RQF_LDLM_ENQUEUE_LVB); - -struct req_format RQF_LDLM_CONVERT = - DEFINE_REQ_FMT0("LDLM_CONVERT", - ldlm_enqueue_client, ldlm_enqueue_server); -EXPORT_SYMBOL(RQF_LDLM_CONVERT); - -struct req_format RQF_LDLM_CANCEL = - DEFINE_REQ_FMT0("LDLM_CANCEL", ldlm_enqueue_client, empty); -EXPORT_SYMBOL(RQF_LDLM_CANCEL); - -struct req_format RQF_LDLM_CALLBACK = - DEFINE_REQ_FMT0("LDLM_CALLBACK", ldlm_enqueue_client, empty); -EXPORT_SYMBOL(RQF_LDLM_CALLBACK); - -struct req_format RQF_LDLM_CP_CALLBACK = - DEFINE_REQ_FMT0("LDLM_CP_CALLBACK", ldlm_cp_callback_client, empty); -EXPORT_SYMBOL(RQF_LDLM_CP_CALLBACK); - -struct req_format RQF_LDLM_BL_CALLBACK = - DEFINE_REQ_FMT0("LDLM_BL_CALLBACK", ldlm_enqueue_client, empty); -EXPORT_SYMBOL(RQF_LDLM_BL_CALLBACK); - -struct req_format RQF_LDLM_GL_CALLBACK = - DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_enqueue_client, - ldlm_gl_callback_server); -EXPORT_SYMBOL(RQF_LDLM_GL_CALLBACK); - -struct req_format RQF_LDLM_GL_DESC_CALLBACK = - DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_gl_callback_desc_client, - ldlm_gl_callback_server); -EXPORT_SYMBOL(RQF_LDLM_GL_DESC_CALLBACK); - -struct req_format RQF_LDLM_INTENT_BASIC = - DEFINE_REQ_FMT0("LDLM_INTENT_BASIC", - ldlm_intent_basic_client, ldlm_enqueue_lvb_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_BASIC); - -struct req_format RQF_LDLM_INTENT = - DEFINE_REQ_FMT0("LDLM_INTENT", - ldlm_intent_client, ldlm_intent_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT); - -struct req_format RQF_LDLM_INTENT_LAYOUT = - DEFINE_REQ_FMT0("LDLM_INTENT_LAYOUT ", - ldlm_intent_layout_client, ldlm_enqueue_lvb_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_LAYOUT); - -struct req_format RQF_LDLM_INTENT_GETATTR = - DEFINE_REQ_FMT0("LDLM_INTENT_GETATTR", - ldlm_intent_getattr_client, ldlm_intent_getattr_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_GETATTR); - -struct req_format RQF_LDLM_INTENT_OPEN = - DEFINE_REQ_FMT0("LDLM_INTENT_OPEN", - ldlm_intent_open_client, ldlm_intent_open_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_OPEN); - -struct req_format RQF_LDLM_INTENT_CREATE = - DEFINE_REQ_FMT0("LDLM_INTENT_CREATE", - ldlm_intent_create_client, ldlm_intent_getattr_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_CREATE); - -struct req_format RQF_LDLM_INTENT_UNLINK = - DEFINE_REQ_FMT0("LDLM_INTENT_UNLINK", - ldlm_intent_unlink_client, ldlm_intent_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_UNLINK); - -struct req_format RQF_LDLM_INTENT_GETXATTR = - DEFINE_REQ_FMT0("LDLM_INTENT_GETXATTR", - ldlm_intent_getxattr_client, - ldlm_intent_getxattr_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_GETXATTR); - -struct req_format RQF_MDS_CLOSE = - DEFINE_REQ_FMT0("MDS_CLOSE", - mdt_close_client, mds_last_unlink_server); -EXPORT_SYMBOL(RQF_MDS_CLOSE); - -struct req_format RQF_MDS_INTENT_CLOSE = - DEFINE_REQ_FMT0("MDS_CLOSE", - mdt_intent_close_client, mds_last_unlink_server); -EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE); - -struct req_format RQF_MDS_READPAGE = - DEFINE_REQ_FMT0("MDS_READPAGE", - mdt_body_capa, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_READPAGE); - -struct req_format RQF_MDS_HSM_ACTION = - DEFINE_REQ_FMT0("MDS_HSM_ACTION", mdt_body_capa, mdt_hsm_action_server); -EXPORT_SYMBOL(RQF_MDS_HSM_ACTION); - -struct req_format RQF_MDS_HSM_PROGRESS = - DEFINE_REQ_FMT0("MDS_HSM_PROGRESS", mdt_hsm_progress, empty); -EXPORT_SYMBOL(RQF_MDS_HSM_PROGRESS); - -struct req_format RQF_MDS_HSM_CT_REGISTER = - DEFINE_REQ_FMT0("MDS_HSM_CT_REGISTER", mdt_hsm_ct_register, empty); -EXPORT_SYMBOL(RQF_MDS_HSM_CT_REGISTER); - -struct req_format RQF_MDS_HSM_CT_UNREGISTER = - DEFINE_REQ_FMT0("MDS_HSM_CT_UNREGISTER", mdt_hsm_ct_unregister, empty); -EXPORT_SYMBOL(RQF_MDS_HSM_CT_UNREGISTER); - -struct req_format RQF_MDS_HSM_STATE_GET = - DEFINE_REQ_FMT0("MDS_HSM_STATE_GET", - mdt_body_capa, mdt_hsm_state_get_server); -EXPORT_SYMBOL(RQF_MDS_HSM_STATE_GET); - -struct req_format RQF_MDS_HSM_STATE_SET = - DEFINE_REQ_FMT0("MDS_HSM_STATE_SET", mdt_hsm_state_set, empty); -EXPORT_SYMBOL(RQF_MDS_HSM_STATE_SET); - -struct req_format RQF_MDS_HSM_REQUEST = - DEFINE_REQ_FMT0("MDS_HSM_REQUEST", mdt_hsm_request, empty); -EXPORT_SYMBOL(RQF_MDS_HSM_REQUEST); - -struct req_format RQF_MDS_SWAP_LAYOUTS = - DEFINE_REQ_FMT0("MDS_SWAP_LAYOUTS", - mdt_swap_layouts, empty); -EXPORT_SYMBOL(RQF_MDS_SWAP_LAYOUTS); - -/* This is for split */ -struct req_format RQF_MDS_WRITEPAGE = - DEFINE_REQ_FMT0("MDS_WRITEPAGE", - mdt_body_capa, mdt_body_only); -EXPORT_SYMBOL(RQF_MDS_WRITEPAGE); - -struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE = - DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_CREATE", - llog_origin_handle_create_client, llogd_body_only); -EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_CREATE); - -struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY = - DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_DESTROY", - llogd_body_only, llogd_body_only); -EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_DESTROY); - -struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK = - DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_NEXT_BLOCK", - llogd_body_only, llog_origin_handle_next_block_server); -EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK); - -struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK = - DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_PREV_BLOCK", - llogd_body_only, llog_origin_handle_next_block_server); -EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK); - -struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER = - DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_READ_HEADER", - llogd_body_only, llog_log_hdr_only); -EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_READ_HEADER); - -struct req_format RQF_LLOG_ORIGIN_CONNECT = - DEFINE_REQ_FMT0("LLOG_ORIGIN_CONNECT", llogd_conn_body_only, empty); -EXPORT_SYMBOL(RQF_LLOG_ORIGIN_CONNECT); - -struct req_format RQF_CONNECT = - DEFINE_REQ_FMT0("CONNECT", obd_connect_client, obd_connect_server); -EXPORT_SYMBOL(RQF_CONNECT); - -struct req_format RQF_OST_CONNECT = - DEFINE_REQ_FMT0("OST_CONNECT", - obd_connect_client, obd_connect_server); -EXPORT_SYMBOL(RQF_OST_CONNECT); - -struct req_format RQF_OST_DISCONNECT = - DEFINE_REQ_FMT0("OST_DISCONNECT", empty, empty); -EXPORT_SYMBOL(RQF_OST_DISCONNECT); - -struct req_format RQF_OST_GETATTR = - DEFINE_REQ_FMT0("OST_GETATTR", ost_body_capa, ost_body_only); -EXPORT_SYMBOL(RQF_OST_GETATTR); - -struct req_format RQF_OST_SETATTR = - DEFINE_REQ_FMT0("OST_SETATTR", ost_body_capa, ost_body_only); -EXPORT_SYMBOL(RQF_OST_SETATTR); - -struct req_format RQF_OST_CREATE = - DEFINE_REQ_FMT0("OST_CREATE", ost_body_only, ost_body_only); -EXPORT_SYMBOL(RQF_OST_CREATE); - -struct req_format RQF_OST_PUNCH = - DEFINE_REQ_FMT0("OST_PUNCH", ost_body_capa, ost_body_only); -EXPORT_SYMBOL(RQF_OST_PUNCH); - -struct req_format RQF_OST_SYNC = - DEFINE_REQ_FMT0("OST_SYNC", ost_body_capa, ost_body_only); -EXPORT_SYMBOL(RQF_OST_SYNC); - -struct req_format RQF_OST_DESTROY = - DEFINE_REQ_FMT0("OST_DESTROY", ost_destroy_client, ost_body_only); -EXPORT_SYMBOL(RQF_OST_DESTROY); - -struct req_format RQF_OST_BRW_READ = - DEFINE_REQ_FMT0("OST_BRW_READ", ost_brw_client, ost_brw_read_server); -EXPORT_SYMBOL(RQF_OST_BRW_READ); - -struct req_format RQF_OST_BRW_WRITE = - DEFINE_REQ_FMT0("OST_BRW_WRITE", ost_brw_client, ost_brw_write_server); -EXPORT_SYMBOL(RQF_OST_BRW_WRITE); - -struct req_format RQF_OST_STATFS = - DEFINE_REQ_FMT0("OST_STATFS", empty, obd_statfs_server); -EXPORT_SYMBOL(RQF_OST_STATFS); - -struct req_format RQF_OST_SET_GRANT_INFO = - DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client, - ost_body_only); -EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO); - -struct req_format RQF_OST_GET_INFO = - DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client, - ost_get_info_generic_server); -EXPORT_SYMBOL(RQF_OST_GET_INFO); - -struct req_format RQF_OST_GET_INFO_LAST_ID = - DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client, - ost_get_last_id_server); -EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID); - -struct req_format RQF_OST_GET_INFO_LAST_FID = - DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", ost_get_last_fid_client, - ost_get_last_fid_server); -EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID); - -struct req_format RQF_OST_SET_INFO_LAST_FID = - DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client, - empty); -EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID); - -struct req_format RQF_OST_GET_INFO_FIEMAP = - DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client, - ost_get_fiemap_server); -EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP); - -/* Convenience macro */ -#define FMT_FIELD(fmt, i, j) ((fmt)->rf_fields[(i)].d[(j)]) - -/** - * Initializes the capsule abstraction by computing and setting the \a rf_idx - * field of RQFs and the \a rmf_offset field of RMFs. - */ -int req_layout_init(void) -{ - size_t i; - size_t j; - size_t k; - struct req_format *rf = NULL; - - for (i = 0; i < ARRAY_SIZE(req_formats); ++i) { - rf = req_formats[i]; - rf->rf_idx = i; - for (j = 0; j < RCL_NR; ++j) { - LASSERT(rf->rf_fields[j].nr <= REQ_MAX_FIELD_NR); - for (k = 0; k < rf->rf_fields[j].nr; ++k) { - struct req_msg_field *field; - - field = (typeof(field))rf->rf_fields[j].d[k]; - LASSERT(!(field->rmf_flags & RMF_F_STRUCT_ARRAY) - || field->rmf_size > 0); - LASSERT(field->rmf_offset[i][j] == 0); - /* - * k + 1 to detect unused format/field - * combinations. - */ - field->rmf_offset[i][j] = k + 1; - } - } - } - return 0; -} -EXPORT_SYMBOL(req_layout_init); - -void req_layout_fini(void) -{ -} -EXPORT_SYMBOL(req_layout_fini); - -/** - * Initializes the expected sizes of each RMF in a \a pill (\a rc_area) to -1. - * - * Actual/expected field sizes are set elsewhere in functions in this file: - * req_capsule_init(), req_capsule_server_pack(), req_capsule_set_size() and - * req_capsule_msg_size(). The \a rc_area information is used by. - * ptlrpc_request_set_replen(). - */ -static void req_capsule_init_area(struct req_capsule *pill) -{ - size_t i; - - for (i = 0; i < ARRAY_SIZE(pill->rc_area[RCL_CLIENT]); i++) { - pill->rc_area[RCL_CLIENT][i] = -1; - pill->rc_area[RCL_SERVER][i] = -1; - } -} - -/** - * Initialize a pill. - * - * The \a location indicates whether the caller is executing on the client side - * (RCL_CLIENT) or server side (RCL_SERVER).. - */ -void req_capsule_init(struct req_capsule *pill, - struct ptlrpc_request *req, - enum req_location location) -{ - LASSERT(location == RCL_SERVER || location == RCL_CLIENT); - - /* - * Today all capsules are embedded in ptlrpc_request structs, - * but just in case that ever isn't the case, we don't reach - * into req unless req != NULL and pill is the one embedded in - * the req. - * - * The req->rq_pill_init flag makes it safe to initialize a pill - * twice, which might happen in the OST paths as a result of the - * high-priority RPC queue getting peeked at before ost_handle() - * handles an OST RPC. - */ - if (req && pill == &req->rq_pill && req->rq_pill_init) - return; - - memset(pill, 0, sizeof(*pill)); - pill->rc_req = req; - pill->rc_loc = location; - req_capsule_init_area(pill); - - if (req && pill == &req->rq_pill) - req->rq_pill_init = 1; -} -EXPORT_SYMBOL(req_capsule_init); - -void req_capsule_fini(struct req_capsule *pill) -{ -} -EXPORT_SYMBOL(req_capsule_fini); - -static int __req_format_is_sane(const struct req_format *fmt) -{ - return fmt->rf_idx < ARRAY_SIZE(req_formats) && - req_formats[fmt->rf_idx] == fmt; -} - -static struct lustre_msg *__req_msg(const struct req_capsule *pill, - enum req_location loc) -{ - struct ptlrpc_request *req; - - req = pill->rc_req; - return loc == RCL_CLIENT ? req->rq_reqmsg : req->rq_repmsg; -} - -/** - * Set the format (\a fmt) of a \a pill; format changes are not allowed here - * (see req_capsule_extend()). - */ -void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt) -{ - LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt); - LASSERT(__req_format_is_sane(fmt)); - - pill->rc_fmt = fmt; -} -EXPORT_SYMBOL(req_capsule_set); - -/** - * Fills in any parts of the \a rc_area of a \a pill that haven't been filled in - * yet. - - * \a rc_area is an array of REQ_MAX_FIELD_NR elements, used to store sizes of - * variable-sized fields. The field sizes come from the declared \a rmf_size - * field of a \a pill's \a rc_fmt's RMF's. - */ -size_t req_capsule_filled_sizes(struct req_capsule *pill, - enum req_location loc) -{ - const struct req_format *fmt = pill->rc_fmt; - size_t i; - - for (i = 0; i < fmt->rf_fields[loc].nr; ++i) { - if (pill->rc_area[loc][i] == -1) { - pill->rc_area[loc][i] = - fmt->rf_fields[loc].d[i]->rmf_size; - if (pill->rc_area[loc][i] == -1) { - /* - * Skip the following fields. - * - * If this LASSERT() trips then you're missing a - * call to req_capsule_set_size(). - */ - LASSERT(loc != RCL_SERVER); - break; - } - } - } - return i; -} -EXPORT_SYMBOL(req_capsule_filled_sizes); - -/** - * Capsule equivalent of lustre_pack_request() and lustre_pack_reply(). - * - * This function uses the \a pill's \a rc_area as filled in by - * req_capsule_set_size() or req_capsule_filled_sizes() (the latter is called by - * this function). - */ -int req_capsule_server_pack(struct req_capsule *pill) -{ - const struct req_format *fmt; - int count; - int rc; - - LASSERT(pill->rc_loc == RCL_SERVER); - fmt = pill->rc_fmt; - LASSERT(fmt); - - count = req_capsule_filled_sizes(pill, RCL_SERVER); - rc = lustre_pack_reply(pill->rc_req, count, - pill->rc_area[RCL_SERVER], NULL); - if (rc != 0) { - DEBUG_REQ(D_ERROR, pill->rc_req, - "Cannot pack %d fields in format `%s': ", - count, fmt->rf_name); - } - return rc; -} -EXPORT_SYMBOL(req_capsule_server_pack); - -/** - * Returns the PTLRPC request or reply (\a loc) buffer offset of a \a pill - * corresponding to the given RMF (\a field). - */ -static u32 __req_capsule_offset(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc) -{ - u32 offset; - - offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc]; - LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name, - field->rmf_name, offset, loc); - offset--; - - LASSERT(offset < REQ_MAX_FIELD_NR); - return offset; -} - -/** - * Helper for __req_capsule_get(); swabs value / array of values and/or dumps - * them if desired. - */ -static -void -swabber_dumper_helper(struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc, - int offset, - void *value, int len, int dump, void (*swabber)(void *)) -{ - void *p; - int i; - int n; - int do_swab; - int inout = loc == RCL_CLIENT; - - swabber = swabber ?: field->rmf_swabber; - - if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) && - swabber && value) - do_swab = 1; - else - do_swab = 0; - - if (!field->rmf_dumper) - dump = 0; - - if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY)) { - if (dump) { - CDEBUG(D_RPCTRACE, "Dump of %sfield %s follows\n", - do_swab ? "unswabbed " : "", field->rmf_name); - field->rmf_dumper(value); - } - if (!do_swab) - return; - swabber(value); - ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset); - if (dump && field->rmf_dumper) { - CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n", - field->rmf_name); - field->rmf_dumper(value); - } - - return; - } - - /* - * We're swabbing an array; swabber() swabs a single array element, so - * swab every element. - */ - LASSERT((len % field->rmf_size) == 0); - for (p = value, i = 0, n = len / field->rmf_size; - i < n; - i++, p += field->rmf_size) { - if (dump) { - CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, element %d follows\n", - do_swab ? "unswabbed " : "", field->rmf_name, i); - field->rmf_dumper(p); - } - if (!do_swab) - continue; - swabber(p); - if (dump) { - CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, element %d follows\n", - field->rmf_name, i); - field->rmf_dumper(value); - } - } - if (do_swab) - ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset); -} - -/** - * Returns the pointer to a PTLRPC request or reply (\a loc) buffer of a \a pill - * corresponding to the given RMF (\a field). - * - * The buffer will be swabbed using the given \a swabber. If \a swabber == NULL - * then the \a rmf_swabber from the RMF will be used. Soon there will be no - * calls to __req_capsule_get() with a non-NULL \a swabber; \a swabber will then - * be removed. Fields with the \a RMF_F_STRUCT_ARRAY flag set will have each - * element of the array swabbed. - */ -static void *__req_capsule_get(struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc, - void (*swabber)(void *), - int dump) -{ - const struct req_format *fmt; - struct lustre_msg *msg; - void *value; - u32 len; - u32 offset; - - void *(*getter)(struct lustre_msg *m, u32 n, u32 minlen); - - static const char *rcl_names[RCL_NR] = { - [RCL_CLIENT] = "client", - [RCL_SERVER] = "server" - }; - - fmt = pill->rc_fmt; - LASSERT(fmt); - LASSERT(fmt != LP_POISON); - LASSERT(__req_format_is_sane(fmt)); - - offset = __req_capsule_offset(pill, field, loc); - - msg = __req_msg(pill, loc); - LASSERT(msg); - - getter = (field->rmf_flags & RMF_F_STRING) ? - (typeof(getter))lustre_msg_string : lustre_msg_buf; - - if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) { - /* - * We've already asserted that field->rmf_size > 0 in - * req_layout_init(). - */ - len = lustre_msg_buflen(msg, offset); - if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) && - (len % field->rmf_size)) { - CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n", - field->rmf_name, len, field->rmf_size, loc); - return NULL; - } - } else if (pill->rc_area[loc][offset] != -1) { - len = pill->rc_area[loc][offset]; - } else { - len = max_t(typeof(field->rmf_size), field->rmf_size, 0); - } - value = getter(msg, offset, len); - - if (!value) { - DEBUG_REQ(D_ERROR, pill->rc_req, - "Wrong buffer for field `%s' (%u of %u) in format `%s': %u vs. %u (%s)\n", - field->rmf_name, offset, lustre_msg_bufcount(msg), - fmt->rf_name, lustre_msg_buflen(msg, offset), len, - rcl_names[loc]); - } else { - swabber_dumper_helper(pill, field, loc, offset, value, len, - dump, swabber); - } - - return value; -} - -/** - * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC request - * buffer corresponding to the given RMF (\a field) of a \a pill. - */ -void *req_capsule_client_get(struct req_capsule *pill, - const struct req_msg_field *field) -{ - return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0); -} -EXPORT_SYMBOL(req_capsule_client_get); - -/** - * Same as req_capsule_client_get(), but with a \a swabber argument. - * - * Currently unused; will be removed when req_capsule_server_swab_get() is - * unused too. - */ -void *req_capsule_client_swab_get(struct req_capsule *pill, - const struct req_msg_field *field, - void *swabber) -{ - return __req_capsule_get(pill, field, RCL_CLIENT, swabber, 0); -} -EXPORT_SYMBOL(req_capsule_client_swab_get); - -/** - * Utility that combines req_capsule_set_size() and req_capsule_client_get(). - * - * First the \a pill's request \a field's size is set (\a rc_area) using - * req_capsule_set_size() with the given \a len. Then the actual buffer is - * returned. - */ -void *req_capsule_client_sized_get(struct req_capsule *pill, - const struct req_msg_field *field, - u32 len) -{ - req_capsule_set_size(pill, field, RCL_CLIENT, len); - return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0); -} -EXPORT_SYMBOL(req_capsule_client_sized_get); - -/** - * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC reply - * buffer corresponding to the given RMF (\a field) of a \a pill. - */ -void *req_capsule_server_get(struct req_capsule *pill, - const struct req_msg_field *field) -{ - return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0); -} -EXPORT_SYMBOL(req_capsule_server_get); - -/** - * Same as req_capsule_server_get(), but with a \a swabber argument. - * - * Ideally all swabbing should be done pursuant to RMF definitions, with no - * swabbing done outside this capsule abstraction. - */ -void *req_capsule_server_swab_get(struct req_capsule *pill, - const struct req_msg_field *field, - void *swabber) -{ - return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0); -} -EXPORT_SYMBOL(req_capsule_server_swab_get); - -/** - * Utility that combines req_capsule_set_size() and req_capsule_server_get(). - * - * First the \a pill's request \a field's size is set (\a rc_area) using - * req_capsule_set_size() with the given \a len. Then the actual buffer is - * returned. - */ -void *req_capsule_server_sized_get(struct req_capsule *pill, - const struct req_msg_field *field, - u32 len) -{ - req_capsule_set_size(pill, field, RCL_SERVER, len); - return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0); -} -EXPORT_SYMBOL(req_capsule_server_sized_get); - -void *req_capsule_server_sized_swab_get(struct req_capsule *pill, - const struct req_msg_field *field, - u32 len, void *swabber) -{ - req_capsule_set_size(pill, field, RCL_SERVER, len); - return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0); -} -EXPORT_SYMBOL(req_capsule_server_sized_swab_get); - -/** - * Set the size of the PTLRPC request/reply (\a loc) buffer for the given \a - * field of the given \a pill. - * - * This function must be used when constructing variable sized fields of a - * request or reply. - */ -void req_capsule_set_size(struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc, u32 size) -{ - LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT); - - if ((size != (u32)field->rmf_size) && - (field->rmf_size != -1) && - !(field->rmf_flags & RMF_F_NO_SIZE_CHECK) && - (size > 0)) { - u32 rmf_size = (u32)field->rmf_size; - - if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) && - (size % rmf_size != 0)) { - CERROR("%s: array field size mismatch %u %% %u != 0 (%d)\n", - field->rmf_name, size, rmf_size, loc); - LBUG(); - } else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) && - size < rmf_size) { - CERROR("%s: field size mismatch %u != %u (%d)\n", - field->rmf_name, size, rmf_size, loc); - LBUG(); - } - } - - pill->rc_area[loc][__req_capsule_offset(pill, field, loc)] = size; -} -EXPORT_SYMBOL(req_capsule_set_size); - -/** - * Return the actual PTLRPC buffer length of a request or reply (\a loc) - * for the given \a pill's given \a field. - * - * NB: this function doesn't correspond with req_capsule_set_size(), which - * actually sets the size in pill.rc_area[loc][offset], but this function - * returns the message buflen[offset], maybe we should use another name. - */ -u32 req_capsule_get_size(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc) -{ - LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT); - - return lustre_msg_buflen(__req_msg(pill, loc), - __req_capsule_offset(pill, field, loc)); -} -EXPORT_SYMBOL(req_capsule_get_size); - -/** - * Wrapper around lustre_msg_size() that returns the PTLRPC size needed for the - * given \a pill's request or reply (\a loc) given the field size recorded in - * the \a pill's rc_area. - * - * See also req_capsule_set_size(). - */ -u32 req_capsule_msg_size(struct req_capsule *pill, enum req_location loc) -{ - return lustre_msg_size(pill->rc_req->rq_import->imp_msg_magic, - pill->rc_fmt->rf_fields[loc].nr, - pill->rc_area[loc]); -} - -/** - * While req_capsule_msg_size() computes the size of a PTLRPC request or reply - * (\a loc) given a \a pill's \a rc_area, this function computes the size of a - * PTLRPC request or reply given only an RQF (\a fmt). - * - * This function should not be used for formats which contain variable size - * fields. - */ -u32 req_capsule_fmt_size(__u32 magic, const struct req_format *fmt, - enum req_location loc) -{ - size_t i = 0; - u32 size; - - /* - * This function should probably LASSERT() that fmt has no fields with - * RMF_F_STRUCT_ARRAY in rmf_flags, since we can't know here how many - * elements in the array there will ultimately be, but then, we could - * assume that there will be at least one element, and that's just what - * we do. - */ - size = lustre_msg_hdr_size(magic, fmt->rf_fields[loc].nr); - if (!size) - return size; - - for (; i < fmt->rf_fields[loc].nr; ++i) - if (fmt->rf_fields[loc].d[i]->rmf_size != -1) - size += cfs_size_round(fmt->rf_fields[loc].d[i]-> - rmf_size); - return size; -} - -/** - * Changes the format of an RPC. - * - * The pill must already have been initialized, which means that it already has - * a request format. The new format \a fmt must be an extension of the pill's - * old format. Specifically: the new format must have as many request and reply - * fields as the old one, and all fields shared by the old and new format must - * be at least as large in the new format. - * - * The new format's fields may be of different "type" than the old format, but - * only for fields that are "opaque" blobs: fields which have a) have no - * \a rmf_swabber, b) \a rmf_flags == 0 or RMF_F_NO_SIZE_CHECK, and c) \a - * rmf_size == -1 or \a rmf_flags == RMF_F_NO_SIZE_CHECK. For example, - * OBD_SET_INFO has a key field and an opaque value field that gets interpreted - * according to the key field. When the value, according to the key, contains a - * structure (or array thereof) to be swabbed, the format should be changed to - * one where the value field has \a rmf_size/rmf_flags/rmf_swabber set - * accordingly. - */ -void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) -{ - int i; - size_t j; - - const struct req_format *old; - - LASSERT(pill->rc_fmt); - LASSERT(__req_format_is_sane(fmt)); - - old = pill->rc_fmt; - /* - * Sanity checking... - */ - for (i = 0; i < RCL_NR; ++i) { - LASSERT(fmt->rf_fields[i].nr >= old->rf_fields[i].nr); - for (j = 0; j < old->rf_fields[i].nr - 1; ++j) { - const struct req_msg_field *ofield = FMT_FIELD(old, i, j); - - /* "opaque" fields can be transmogrified */ - if (!ofield->rmf_swabber && - (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 && - (ofield->rmf_size == -1 || - ofield->rmf_flags == RMF_F_NO_SIZE_CHECK)) - continue; - LASSERT(FMT_FIELD(fmt, i, j) == FMT_FIELD(old, i, j)); - } - /* - * Last field in old format can be shorter than in new. - */ - LASSERT(FMT_FIELD(fmt, i, j)->rmf_size >= - FMT_FIELD(old, i, j)->rmf_size); - } - - pill->rc_fmt = fmt; -} -EXPORT_SYMBOL(req_capsule_extend); - -/** - * This function returns a non-zero value if the given \a field is present in - * the format (\a rc_fmt) of \a pill's PTLRPC request or reply (\a loc), else it - * returns 0. - */ -int req_capsule_has_field(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc) -{ - LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT); - - return field->rmf_offset[pill->rc_fmt->rf_idx][loc]; -} -EXPORT_SYMBOL(req_capsule_has_field); - -/** - * Returns a non-zero value if the given \a field is present in the given \a - * pill's PTLRPC request or reply (\a loc), else it returns 0. - */ -static int req_capsule_field_present(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc) -{ - u32 offset; - - LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT); - LASSERT(req_capsule_has_field(pill, field, loc)); - - offset = __req_capsule_offset(pill, field, loc); - return lustre_msg_bufcount(__req_msg(pill, loc)) > offset; -} - -/** - * This function shrinks the size of the _buffer_ of the \a pill's PTLRPC - * request or reply (\a loc). - * - * This is not the opposite of req_capsule_extend(). - */ -void req_capsule_shrink(struct req_capsule *pill, - const struct req_msg_field *field, - u32 newlen, enum req_location loc) -{ - const struct req_format *fmt; - struct lustre_msg *msg; - u32 len; - int offset; - - fmt = pill->rc_fmt; - LASSERT(fmt); - LASSERT(__req_format_is_sane(fmt)); - LASSERT(req_capsule_has_field(pill, field, loc)); - LASSERT(req_capsule_field_present(pill, field, loc)); - - offset = __req_capsule_offset(pill, field, loc); - - msg = __req_msg(pill, loc); - len = lustre_msg_buflen(msg, offset); - LASSERTF(newlen <= len, "%s:%s, oldlen=%u, newlen=%u\n", - fmt->rf_name, field->rmf_name, len, newlen); - - if (loc == RCL_CLIENT) - pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen, - 1); - else - pill->rc_req->rq_replen = lustre_shrink_msg(msg, offset, newlen, - 1); -} -EXPORT_SYMBOL(req_capsule_shrink); diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c deleted file mode 100644 index 946d538121de..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c +++ /dev/null @@ -1,338 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/llog_client.c - * - * remote api for llog - client side - * - * Author: Andreas Dilger - */ - -#define DEBUG_SUBSYSTEM S_LOG - -#include -#include -#include -#include - -#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \ - mutex_lock(&ctxt->loc_mutex); \ - if (ctxt->loc_imp) { \ - imp = class_import_get(ctxt->loc_imp); \ - } else { \ - CERROR("ctxt->loc_imp == NULL for context idx %d." \ - "Unable to complete MDS/OSS recovery," \ - "but I'll try again next time. Not fatal.\n", \ - ctxt->loc_idx); \ - imp = NULL; \ - mutex_unlock(&ctxt->loc_mutex); \ - return (-EINVAL); \ - } \ - mutex_unlock(&ctxt->loc_mutex); \ -} while (0) - -#define LLOG_CLIENT_EXIT(ctxt, imp) do { \ - mutex_lock(&ctxt->loc_mutex); \ - if (ctxt->loc_imp != imp) \ - CWARN("loc_imp has changed from %p to %p\n", \ - ctxt->loc_imp, imp); \ - class_import_put(imp); \ - mutex_unlock(&ctxt->loc_mutex); \ -} while (0) - -/* This is a callback from the llog_* functions. - * Assumes caller has already pushed us into the kernel context. - */ -static int llog_client_open(const struct lu_env *env, - struct llog_handle *lgh, struct llog_logid *logid, - char *name, enum llog_open_param open_param) -{ - struct obd_import *imp; - struct llogd_body *body; - struct llog_ctxt *ctxt = lgh->lgh_ctxt; - struct ptlrpc_request *req = NULL; - int rc; - - LLOG_CLIENT_ENTRY(ctxt, imp); - - /* client cannot create llog */ - LASSERTF(open_param != LLOG_OPEN_NEW, "%#x\n", open_param); - LASSERT(lgh); - - req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); - if (!req) { - rc = -ENOMEM; - goto out; - } - - if (name) - req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, - strlen(name) + 1); - - rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION, - LLOG_ORIGIN_HANDLE_CREATE); - if (rc) { - ptlrpc_request_free(req); - req = NULL; - goto out; - } - ptlrpc_request_set_replen(req); - - body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (logid) - body->lgd_logid = *logid; - body->lgd_ctxt_idx = ctxt->loc_idx - 1; - - if (name) { - char *tmp; - - tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME, - strlen(name) + 1); - LASSERT(tmp); - strcpy(tmp, name); - } - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (!body) { - rc = -EFAULT; - goto out; - } - - lgh->lgh_id = body->lgd_logid; - lgh->lgh_ctxt = ctxt; -out: - LLOG_CLIENT_EXIT(ctxt, imp); - ptlrpc_req_finished(req); - return rc; -} - -static int llog_client_next_block(const struct lu_env *env, - struct llog_handle *loghandle, - int *cur_idx, int next_idx, - __u64 *cur_offset, void *buf, int len) -{ - struct obd_import *imp; - struct ptlrpc_request *req = NULL; - struct llogd_body *body; - void *ptr; - int rc; - - LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp); - req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, - LUSTRE_LOG_VERSION, - LLOG_ORIGIN_HANDLE_NEXT_BLOCK); - if (!req) { - rc = -ENOMEM; - goto err_exit; - } - - body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); - body->lgd_logid = loghandle->lgh_id; - body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1; - body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags; - body->lgd_index = next_idx; - body->lgd_saved_index = *cur_idx; - body->lgd_len = len; - body->lgd_cur_offset = *cur_offset; - - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len); - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (!body) { - rc = -EFAULT; - goto out; - } - - /* The log records are swabbed as they are processed */ - ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (!ptr) { - rc = -EFAULT; - goto out; - } - - *cur_idx = body->lgd_saved_index; - *cur_offset = body->lgd_cur_offset; - - memcpy(buf, ptr, len); -out: - ptlrpc_req_finished(req); -err_exit: - LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp); - return rc; -} - -static int llog_client_prev_block(const struct lu_env *env, - struct llog_handle *loghandle, - int prev_idx, void *buf, int len) -{ - struct obd_import *imp; - struct ptlrpc_request *req = NULL; - struct llogd_body *body; - void *ptr; - int rc; - - LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp); - req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, - LUSTRE_LOG_VERSION, - LLOG_ORIGIN_HANDLE_PREV_BLOCK); - if (!req) { - rc = -ENOMEM; - goto err_exit; - } - - body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); - body->lgd_logid = loghandle->lgh_id; - body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1; - body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags; - body->lgd_index = prev_idx; - body->lgd_len = len; - - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len); - ptlrpc_request_set_replen(req); - - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (!body) { - rc = -EFAULT; - goto out; - } - - ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (!ptr) { - rc = -EFAULT; - goto out; - } - - memcpy(buf, ptr, len); -out: - ptlrpc_req_finished(req); -err_exit: - LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp); - return rc; -} - -static int llog_client_read_header(const struct lu_env *env, - struct llog_handle *handle) -{ - struct obd_import *imp; - struct ptlrpc_request *req = NULL; - struct llogd_body *body; - struct llog_log_hdr *hdr; - struct llog_rec_hdr *llh_hdr; - int rc; - - LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp); - req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, - LUSTRE_LOG_VERSION, - LLOG_ORIGIN_HANDLE_READ_HEADER); - if (!req) { - rc = -ENOMEM; - goto err_exit; - } - - body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); - body->lgd_logid = handle->lgh_id; - body->lgd_ctxt_idx = handle->lgh_ctxt->loc_idx - 1; - body->lgd_llh_flags = handle->lgh_hdr->llh_flags; - - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - if (rc) - goto out; - - hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR); - if (!hdr) { - rc = -EFAULT; - goto out; - } - - if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) { - rc = -EFAULT; - goto out; - } - - memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len); - handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index; - - /* sanity checks */ - llh_hdr = &handle->lgh_hdr->llh_hdr; - if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) { - CERROR("bad log header magic: %#x (expecting %#x)\n", - llh_hdr->lrh_type, LLOG_HDR_MAGIC); - rc = -EIO; - } else if (llh_hdr->lrh_len != - LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len || - (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) || - llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE || - llh_hdr->lrh_len > handle->lgh_hdr_size) { - CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n", - llh_hdr->lrh_len, - LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len); - CERROR("you may need to re-run lconf --write_conf.\n"); - rc = -EIO; - } -out: - ptlrpc_req_finished(req); -err_exit: - LLOG_CLIENT_EXIT(handle->lgh_ctxt, imp); - return rc; -} - -static int llog_client_close(const struct lu_env *env, - struct llog_handle *handle) -{ - /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because - * the servers all close the file at the end of every - * other LLOG_ RPC. - */ - return 0; -} - -struct llog_operations llog_client_ops = { - .lop_next_block = llog_client_next_block, - .lop_prev_block = llog_client_prev_block, - .lop_read_header = llog_client_read_header, - .lop_open = llog_client_open, - .lop_close = llog_client_close, -}; -EXPORT_SYMBOL(llog_client_ops); diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c deleted file mode 100644 index b871d9e40a9e..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/llog_net.c - * - * OST<->MDS recovery logging infrastructure. - * - * Invariants in implementation: - * - we do not share logs among different OST<->MDS connections, so that - * if an OST or MDS fails it need only look at log(s) relevant to itself - * - * Author: Andreas Dilger - */ - -#define DEBUG_SUBSYSTEM S_LOG - -#include -#include -#include - -int llog_initiator_connect(struct llog_ctxt *ctxt) -{ - struct obd_import *new_imp; - - LASSERT(ctxt); - new_imp = ctxt->loc_obd->u.cli.cl_import; - LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp, - "%p - %p\n", ctxt->loc_imp, new_imp); - mutex_lock(&ctxt->loc_mutex); - if (ctxt->loc_imp != new_imp) { - if (ctxt->loc_imp) - class_import_put(ctxt->loc_imp); - ctxt->loc_imp = class_import_get(new_imp); - } - mutex_unlock(&ctxt->loc_mutex); - return 0; -} -EXPORT_SYMBOL(llog_initiator_connect); diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c deleted file mode 100644 index 0b638837f88b..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ /dev/null @@ -1,1316 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#define DEBUG_SUBSYSTEM S_CLASS - -#include -#include -#include -#include -#include -#include -#include "ptlrpc_internal.h" - -static struct ll_rpc_opcode { - __u32 opcode; - const char *opname; -} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = { - { OST_REPLY, "ost_reply" }, - { OST_GETATTR, "ost_getattr" }, - { OST_SETATTR, "ost_setattr" }, - { OST_READ, "ost_read" }, - { OST_WRITE, "ost_write" }, - { OST_CREATE, "ost_create" }, - { OST_DESTROY, "ost_destroy" }, - { OST_GET_INFO, "ost_get_info" }, - { OST_CONNECT, "ost_connect" }, - { OST_DISCONNECT, "ost_disconnect" }, - { OST_PUNCH, "ost_punch" }, - { OST_OPEN, "ost_open" }, - { OST_CLOSE, "ost_close" }, - { OST_STATFS, "ost_statfs" }, - { 14, NULL }, /* formerly OST_SAN_READ */ - { 15, NULL }, /* formerly OST_SAN_WRITE */ - { OST_SYNC, "ost_sync" }, - { OST_SET_INFO, "ost_set_info" }, - { OST_QUOTACHECK, "ost_quotacheck" }, - { OST_QUOTACTL, "ost_quotactl" }, - { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" }, - { MDS_GETATTR, "mds_getattr" }, - { MDS_GETATTR_NAME, "mds_getattr_lock" }, - { MDS_CLOSE, "mds_close" }, - { MDS_REINT, "mds_reint" }, - { MDS_READPAGE, "mds_readpage" }, - { MDS_CONNECT, "mds_connect" }, - { MDS_DISCONNECT, "mds_disconnect" }, - { MDS_GETSTATUS, "mds_getstatus" }, - { MDS_STATFS, "mds_statfs" }, - { MDS_PIN, "mds_pin" }, - { MDS_UNPIN, "mds_unpin" }, - { MDS_SYNC, "mds_sync" }, - { MDS_DONE_WRITING, "mds_done_writing" }, - { MDS_SET_INFO, "mds_set_info" }, - { MDS_QUOTACHECK, "mds_quotacheck" }, - { MDS_QUOTACTL, "mds_quotactl" }, - { MDS_GETXATTR, "mds_getxattr" }, - { MDS_SETXATTR, "mds_setxattr" }, - { MDS_WRITEPAGE, "mds_writepage" }, - { MDS_IS_SUBDIR, "mds_is_subdir" }, - { MDS_GET_INFO, "mds_get_info" }, - { MDS_HSM_STATE_GET, "mds_hsm_state_get" }, - { MDS_HSM_STATE_SET, "mds_hsm_state_set" }, - { MDS_HSM_ACTION, "mds_hsm_action" }, - { MDS_HSM_PROGRESS, "mds_hsm_progress" }, - { MDS_HSM_REQUEST, "mds_hsm_request" }, - { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" }, - { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" }, - { MDS_SWAP_LAYOUTS, "mds_swap_layouts" }, - { LDLM_ENQUEUE, "ldlm_enqueue" }, - { LDLM_CONVERT, "ldlm_convert" }, - { LDLM_CANCEL, "ldlm_cancel" }, - { LDLM_BL_CALLBACK, "ldlm_bl_callback" }, - { LDLM_CP_CALLBACK, "ldlm_cp_callback" }, - { LDLM_GL_CALLBACK, "ldlm_gl_callback" }, - { LDLM_SET_INFO, "ldlm_set_info" }, - { MGS_CONNECT, "mgs_connect" }, - { MGS_DISCONNECT, "mgs_disconnect" }, - { MGS_EXCEPTION, "mgs_exception" }, - { MGS_TARGET_REG, "mgs_target_reg" }, - { MGS_TARGET_DEL, "mgs_target_del" }, - { MGS_SET_INFO, "mgs_set_info" }, - { MGS_CONFIG_READ, "mgs_config_read" }, - { OBD_PING, "obd_ping" }, - { OBD_LOG_CANCEL, "llog_cancel" }, - { OBD_QC_CALLBACK, "obd_quota_callback" }, - { OBD_IDX_READ, "dt_index_read" }, - { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" }, - { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" }, - { LLOG_ORIGIN_HANDLE_READ_HEADER, "llog_origin_handle_read_header" }, - { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" }, - { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" }, - { LLOG_ORIGIN_CONNECT, "llog_origin_connect" }, - { LLOG_CATINFO, "llog_catinfo" }, - { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" }, - { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" }, - { QUOTA_DQACQ, "quota_acquire" }, - { QUOTA_DQREL, "quota_release" }, - { SEQ_QUERY, "seq_query" }, - { SEC_CTX_INIT, "sec_ctx_init" }, - { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" }, - { SEC_CTX_FINI, "sec_ctx_fini" }, - { FLD_QUERY, "fld_query" }, - { FLD_READ, "fld_read" }, -}; - -static struct ll_eopcode { - __u32 opcode; - const char *opname; -} ll_eopcode_table[EXTRA_LAST_OPC] = { - { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" }, - { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" }, - { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" }, - { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" }, - { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" }, - { MDS_REINT_SETATTR, "mds_reint_setattr" }, - { MDS_REINT_CREATE, "mds_reint_create" }, - { MDS_REINT_LINK, "mds_reint_link" }, - { MDS_REINT_UNLINK, "mds_reint_unlink" }, - { MDS_REINT_RENAME, "mds_reint_rename" }, - { MDS_REINT_OPEN, "mds_reint_open" }, - { MDS_REINT_SETXATTR, "mds_reint_setxattr" }, - { BRW_READ_BYTES, "read_bytes" }, - { BRW_WRITE_BYTES, "write_bytes" }, -}; - -const char *ll_opcode2str(__u32 opcode) -{ - /* When one of the assertions below fail, chances are that: - * 1) A new opcode was added in include/lustre/lustre_idl.h, - * but is missing from the table above. - * or 2) The opcode space was renumbered or rearranged, - * and the opcode_offset() function in - * ptlrpc_internal.h needs to be modified. - */ - __u32 offset = opcode_offset(opcode); - - LASSERTF(offset < LUSTRE_MAX_OPCODES, - "offset %u >= LUSTRE_MAX_OPCODES %u\n", - offset, LUSTRE_MAX_OPCODES); - LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode, - "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n", - offset, ll_rpc_opcode_table[offset].opcode, opcode); - return ll_rpc_opcode_table[offset].opname; -} - -static const char *ll_eopcode2str(__u32 opcode) -{ - LASSERT(ll_eopcode_table[opcode].opcode == opcode); - return ll_eopcode_table[opcode].opname; -} - -static void -ptlrpc_ldebugfs_register(struct dentry *root, char *dir, - char *name, - struct dentry **debugfs_root_ret, - struct lprocfs_stats **stats_ret) -{ - struct dentry *svc_debugfs_entry; - struct lprocfs_stats *svc_stats; - int i; - unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX | - LPROCFS_CNTR_STDDEV; - - LASSERT(!*debugfs_root_ret); - LASSERT(!*stats_ret); - - svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES + LUSTRE_MAX_OPCODES, - 0); - if (!svc_stats) - return; - - if (dir) - svc_debugfs_entry = debugfs_create_dir(dir, root); - else - svc_debugfs_entry = root; - - lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR, - svc_counter_config, "req_waittime", "usec"); - lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR, - svc_counter_config, "req_qdepth", "reqs"); - lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR, - svc_counter_config, "req_active", "reqs"); - lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT, - svc_counter_config, "req_timeout", "sec"); - lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR, - svc_counter_config, "reqbuf_avail", "bufs"); - for (i = 0; i < EXTRA_LAST_OPC; i++) { - char *units; - - switch (i) { - case BRW_WRITE_BYTES: - case BRW_READ_BYTES: - units = "bytes"; - break; - default: - units = "reqs"; - break; - } - lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i, - svc_counter_config, - ll_eopcode2str(i), units); - } - for (i = 0; i < LUSTRE_MAX_OPCODES; i++) { - __u32 opcode = ll_rpc_opcode_table[i].opcode; - - lprocfs_counter_init(svc_stats, - EXTRA_MAX_OPCODES + i, svc_counter_config, - ll_opcode2str(opcode), "usec"); - } - - debugfs_create_file("stats", 0644, svc_debugfs_entry, svc_stats, - &lprocfs_stats_seq_fops); - if (dir) - *debugfs_root_ret = svc_debugfs_entry; - *stats_ret = svc_stats; -} - -static int -ptlrpc_lprocfs_req_history_len_seq_show(struct seq_file *m, void *v) -{ - struct ptlrpc_service *svc = m->private; - struct ptlrpc_service_part *svcpt; - int total = 0; - int i; - - ptlrpc_service_for_each_part(svcpt, i, svc) - total += svcpt->scp_hist_nrqbds; - - seq_printf(m, "%d\n", total); - return 0; -} - -LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_req_history_len); - -static int -ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n) -{ - struct ptlrpc_service *svc = m->private; - struct ptlrpc_service_part *svcpt; - int total = 0; - int i; - - ptlrpc_service_for_each_part(svcpt, i, svc) - total += svc->srv_hist_nrqbds_cpt_max; - - seq_printf(m, "%d\n", total); - return 0; -} - -static ssize_t -ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private; - int bufpages; - int val; - int rc; - - rc = lprocfs_write_helper(buffer, count, &val); - if (rc < 0) - return rc; - - if (val < 0) - return -ERANGE; - - /* This sanity check is more of an insanity check; we can still - * hose a kernel by allowing the request history to grow too - * far. - */ - bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (val > totalram_pages / (2 * bufpages)) - return -ERANGE; - - spin_lock(&svc->srv_lock); - - if (val == 0) - svc->srv_hist_nrqbds_cpt_max = 0; - else - svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts)); - - spin_unlock(&svc->srv_lock); - - return count; -} - -LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max); - -static ssize_t threads_min_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - - return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_init * svc->srv_ncpts); -} - -static ssize_t threads_min_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - unsigned long val; - int rc = kstrtoul(buffer, 10, &val); - - if (rc < 0) - return rc; - - if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT) - return -ERANGE; - - spin_lock(&svc->srv_lock); - if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) { - spin_unlock(&svc->srv_lock); - return -ERANGE; - } - - svc->srv_nthrs_cpt_init = val / svc->srv_ncpts; - - spin_unlock(&svc->srv_lock); - - return count; -} -LUSTRE_RW_ATTR(threads_min); - -static ssize_t threads_started_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - struct ptlrpc_service_part *svcpt; - int total = 0; - int i; - - ptlrpc_service_for_each_part(svcpt, i, svc) - total += svcpt->scp_nthrs_running; - - return sprintf(buf, "%d\n", total); -} -LUSTRE_RO_ATTR(threads_started); - -static ssize_t threads_max_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - - return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_limit * svc->srv_ncpts); -} - -static ssize_t threads_max_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - unsigned long val; - int rc = kstrtoul(buffer, 10, &val); - - if (rc < 0) - return rc; - - if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT) - return -ERANGE; - - spin_lock(&svc->srv_lock); - if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) { - spin_unlock(&svc->srv_lock); - return -ERANGE; - } - - svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts; - - spin_unlock(&svc->srv_lock); - - return count; -} -LUSTRE_RW_ATTR(threads_max); - -/** - * \addtogoup nrs - * @{ - */ - -/** - * Translates \e ptlrpc_nrs_pol_state values to human-readable strings. - * - * \param[in] state The policy state - */ -static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state) -{ - switch (state) { - default: - LBUG(); - case NRS_POL_STATE_INVALID: - return "invalid"; - case NRS_POL_STATE_STOPPED: - return "stopped"; - case NRS_POL_STATE_STOPPING: - return "stopping"; - case NRS_POL_STATE_STARTING: - return "starting"; - case NRS_POL_STATE_STARTED: - return "started"; - } -} - -/** - * Obtains status information for \a policy. - * - * Information is copied in \a info. - * - * \param[in] policy The policy - * \param[out] info Holds returned status information - */ -static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_pol_info *info) -{ - assert_spin_locked(&policy->pol_nrs->nrs_lock); - - memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX); - - info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK); - info->pi_state = policy->pol_state; - /** - * XXX: These are accessed without holding - * ptlrpc_service_part::scp_req_lock. - */ - info->pi_req_queued = policy->pol_req_queued; - info->pi_req_started = policy->pol_req_started; -} - -/** - * Reads and prints policy status information for all policies of a PTLRPC - * service. - */ -static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n) -{ - struct ptlrpc_service *svc = m->private; - struct ptlrpc_service_part *svcpt; - struct ptlrpc_nrs *nrs; - struct ptlrpc_nrs_policy *policy; - struct ptlrpc_nrs_pol_info *infos; - struct ptlrpc_nrs_pol_info tmp; - unsigned int num_pols; - unsigned int pol_idx = 0; - bool hp = false; - int i; - int rc = 0; - - /** - * Serialize NRS core lprocfs operations with policy registration/ - * unregistration. - */ - mutex_lock(&nrs_core.nrs_mutex); - - /** - * Use the first service partition's regular NRS head in order to obtain - * the number of policies registered with NRS heads of this service. All - * service partitions will have the same number of policies. - */ - nrs = nrs_svcpt2nrs(svc->srv_parts[0], false); - - spin_lock(&nrs->nrs_lock); - num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols; - spin_unlock(&nrs->nrs_lock); - - infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS); - if (!infos) { - rc = -ENOMEM; - goto unlock; - } -again: - - ptlrpc_service_for_each_part(svcpt, i, svc) { - nrs = nrs_svcpt2nrs(svcpt, hp); - spin_lock(&nrs->nrs_lock); - - pol_idx = 0; - - list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) { - LASSERT(pol_idx < num_pols); - - nrs_policy_get_info_locked(policy, &tmp); - /** - * Copy values when handling the first service - * partition. - */ - if (i == 0) { - memcpy(infos[pol_idx].pi_name, tmp.pi_name, - NRS_POL_NAME_MAX); - memcpy(&infos[pol_idx].pi_state, &tmp.pi_state, - sizeof(tmp.pi_state)); - infos[pol_idx].pi_fallback = tmp.pi_fallback; - /** - * For the rest of the service partitions - * sanity-check the values we get. - */ - } else { - LASSERT(strncmp(infos[pol_idx].pi_name, - tmp.pi_name, - NRS_POL_NAME_MAX) == 0); - /** - * Not asserting ptlrpc_nrs_pol_info::pi_state, - * because it may be different between - * instances of the same policy in different - * service partitions. - */ - LASSERT(infos[pol_idx].pi_fallback == - tmp.pi_fallback); - } - - infos[pol_idx].pi_req_queued += tmp.pi_req_queued; - infos[pol_idx].pi_req_started += tmp.pi_req_started; - - pol_idx++; - } - spin_unlock(&nrs->nrs_lock); - } - - /** - * Policy status information output is in YAML format. - * For example: - * - * regular_requests: - * - name: fifo - * state: started - * fallback: yes - * queued: 0 - * active: 0 - * - * - name: crrn - * state: started - * fallback: no - * queued: 2015 - * active: 384 - * - * high_priority_requests: - * - name: fifo - * state: started - * fallback: yes - * queued: 0 - * active: 2 - * - * - name: crrn - * state: stopped - * fallback: no - * queued: 0 - * active: 0 - */ - seq_printf(m, "%s\n", - !hp ? "\nregular_requests:" : "high_priority_requests:"); - - for (pol_idx = 0; pol_idx < num_pols; pol_idx++) { - seq_printf(m, " - name: %s\n" - " state: %s\n" - " fallback: %s\n" - " queued: %-20d\n" - " active: %-20d\n\n", - infos[pol_idx].pi_name, - nrs_state2str(infos[pol_idx].pi_state), - infos[pol_idx].pi_fallback ? "yes" : "no", - (int)infos[pol_idx].pi_req_queued, - (int)infos[pol_idx].pi_req_started); - } - - if (!hp && nrs_svc_has_hp(svc)) { - memset(infos, 0, num_pols * sizeof(*infos)); - - /** - * Redo the processing for the service's HP NRS heads' policies. - */ - hp = true; - goto again; - } - - kfree(infos); -unlock: - mutex_unlock(&nrs_core.nrs_mutex); - - return rc; -} - -/** - * The longest valid command string is the maximum policy name size, plus the - * length of the " reg" substring - */ -#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1) - -/** - * Starts and stops a given policy on a PTLRPC service. - * - * Commands consist of the policy name, followed by an optional [reg|hp] token; - * if the optional token is omitted, the operation is performed on both the - * regular and high-priority (if the service has one) NRS head. - */ -static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) -{ - struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private; - enum ptlrpc_nrs_queue_type queue = PTLRPC_NRS_QUEUE_BOTH; - char *cmd; - char *cmd_copy = NULL; - char *token; - int rc = 0; - - if (count >= LPROCFS_NRS_WR_MAX_CMD) - return -EINVAL; - - cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS); - if (!cmd) - return -ENOMEM; - /** - * strsep() modifies its argument, so keep a copy - */ - cmd_copy = cmd; - - if (copy_from_user(cmd, buffer, count)) { - rc = -EFAULT; - goto out; - } - - cmd[count] = '\0'; - - token = strsep(&cmd, " "); - - if (strlen(token) > NRS_POL_NAME_MAX - 1) { - rc = -EINVAL; - goto out; - } - - /** - * No [reg|hp] token has been specified - */ - if (!cmd) - goto default_queue; - - /** - * The second token is either NULL, or an optional [reg|hp] string - */ - if (strcmp(cmd, "reg") == 0) { - queue = PTLRPC_NRS_QUEUE_REG; - } else if (strcmp(cmd, "hp") == 0) { - queue = PTLRPC_NRS_QUEUE_HP; - } else { - rc = -EINVAL; - goto out; - } - -default_queue: - - if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) { - rc = -ENODEV; - goto out; - } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) { - queue = PTLRPC_NRS_QUEUE_REG; - } - - /** - * Serialize NRS core lprocfs operations with policy registration/ - * unregistration. - */ - mutex_lock(&nrs_core.nrs_mutex); - - rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START, - false, NULL); - - mutex_unlock(&nrs_core.nrs_mutex); -out: - kfree(cmd_copy); - - return rc < 0 ? rc : count; -} - -LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs); - -/** @} nrs */ - -struct ptlrpc_srh_iterator { - int srhi_idx; - __u64 srhi_seq; - struct ptlrpc_request *srhi_req; -}; - -static int -ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt, - struct ptlrpc_srh_iterator *srhi, - __u64 seq) -{ - struct list_head *e; - struct ptlrpc_request *req; - - if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled && - srhi->srhi_seq <= seq) { - /* If srhi_req was set previously, hasn't been culled and - * we're searching for a seq on or after it (i.e. more - * recent), search from it onwards. - * Since the service history is LRU (i.e. culled reqs will - * be near the head), we shouldn't have to do long - * re-scans - */ - LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq, - "%s:%d: seek seq %llu, request seq %llu\n", - svcpt->scp_service->srv_name, svcpt->scp_cpt, - srhi->srhi_seq, srhi->srhi_req->rq_history_seq); - LASSERTF(!list_empty(&svcpt->scp_hist_reqs), - "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n", - svcpt->scp_service->srv_name, svcpt->scp_cpt, - seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled); - e = &srhi->srhi_req->rq_history_list; - } else { - /* search from start */ - e = svcpt->scp_hist_reqs.next; - } - - while (e != &svcpt->scp_hist_reqs) { - req = list_entry(e, struct ptlrpc_request, rq_history_list); - - if (req->rq_history_seq >= seq) { - srhi->srhi_seq = req->rq_history_seq; - srhi->srhi_req = req; - return 0; - } - e = e->next; - } - - return -ENOENT; -} - -/* - * ptlrpc history sequence is used as "position" of seq_file, in some case, - * seq_read() will increase "position" to indicate reading the next - * element, however, low bits of history sequence are reserved for CPT id - * (check the details from comments before ptlrpc_req_add_history), which - * means seq_read() might change CPT id of history sequence and never - * finish reading of requests on a CPT. To make it work, we have to shift - * CPT id to high bits and timestamp to low bits, so seq_read() will only - * increase timestamp which can correctly indicate the next position. - */ - -/* convert seq_file pos to cpt */ -#define PTLRPC_REQ_POS2CPT(svc, pos) \ - ((svc)->srv_cpt_bits == 0 ? 0 : \ - (__u64)(pos) >> (64 - (svc)->srv_cpt_bits)) - -/* make up seq_file pos from cpt */ -#define PTLRPC_REQ_CPT2POS(svc, cpt) \ - ((svc)->srv_cpt_bits == 0 ? 0 : \ - (cpt) << (64 - (svc)->srv_cpt_bits)) - -/* convert sequence to position */ -#define PTLRPC_REQ_SEQ2POS(svc, seq) \ - ((svc)->srv_cpt_bits == 0 ? (seq) : \ - ((seq) >> (svc)->srv_cpt_bits) | \ - ((seq) << (64 - (svc)->srv_cpt_bits))) - -/* convert position to sequence */ -#define PTLRPC_REQ_POS2SEQ(svc, pos) \ - ((svc)->srv_cpt_bits == 0 ? (pos) : \ - ((__u64)(pos) << (svc)->srv_cpt_bits) | \ - ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits))) - -static void * -ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos) -{ - struct ptlrpc_service *svc = s->private; - struct ptlrpc_service_part *svcpt; - struct ptlrpc_srh_iterator *srhi; - unsigned int cpt; - int rc; - int i; - - if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */ - CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n", - (int)sizeof(loff_t)); - return NULL; - } - - srhi = kzalloc(sizeof(*srhi), GFP_NOFS); - if (!srhi) - return NULL; - - srhi->srhi_seq = 0; - srhi->srhi_req = NULL; - - cpt = PTLRPC_REQ_POS2CPT(svc, *pos); - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (i < cpt) /* skip */ - continue; - if (i > cpt) /* make up the lowest position for this CPT */ - *pos = PTLRPC_REQ_CPT2POS(svc, i); - - spin_lock(&svcpt->scp_lock); - rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, - PTLRPC_REQ_POS2SEQ(svc, *pos)); - spin_unlock(&svcpt->scp_lock); - if (rc == 0) { - *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq); - srhi->srhi_idx = i; - return srhi; - } - } - - kfree(srhi); - return NULL; -} - -static void -ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter) -{ - struct ptlrpc_srh_iterator *srhi = iter; - - kfree(srhi); -} - -static void * -ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s, - void *iter, loff_t *pos) -{ - struct ptlrpc_service *svc = s->private; - struct ptlrpc_srh_iterator *srhi = iter; - struct ptlrpc_service_part *svcpt; - __u64 seq; - int rc; - int i; - - for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) { - svcpt = svc->srv_parts[i]; - - if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */ - srhi->srhi_req = NULL; - seq = 0; - srhi->srhi_seq = 0; - } else { /* the next sequence */ - seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits); - } - - spin_lock(&svcpt->scp_lock); - rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq); - spin_unlock(&svcpt->scp_lock); - if (rc == 0) { - *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq); - srhi->srhi_idx = i; - return srhi; - } - } - - kfree(srhi); - return NULL; -} - -static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) -{ - struct ptlrpc_service *svc = s->private; - struct ptlrpc_srh_iterator *srhi = iter; - struct ptlrpc_service_part *svcpt; - struct ptlrpc_request *req; - int rc; - - LASSERT(srhi->srhi_idx < svc->srv_ncpts); - - svcpt = svc->srv_parts[srhi->srhi_idx]; - - spin_lock(&svcpt->scp_lock); - - rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq); - - if (rc == 0) { - struct timespec64 arrival, sent, arrivaldiff; - char nidstr[LNET_NIDSTR_SIZE]; - - req = srhi->srhi_req; - - libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr)); - arrival.tv_sec = req->rq_arrival_time.tv_sec; - arrival.tv_nsec = req->rq_arrival_time.tv_nsec; - sent.tv_sec = req->rq_sent; - sent.tv_nsec = 0; - arrivaldiff = timespec64_sub(sent, arrival); - - /* Print common req fields. - * CAVEAT EMPTOR: we're racing with the service handler - * here. The request could contain any old crap, so you - * must be just as careful as the service's request - * parser. Currently I only print stuff here I know is OK - * to look at coz it was set up in request_in_callback()!!! - */ - seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld.%06lld:%lld.%06llds(%+lld.0s) ", - req->rq_history_seq, nidstr, - libcfs_id2str(req->rq_peer), req->rq_xid, - req->rq_reqlen, ptlrpc_rqphase2str(req), - (s64)req->rq_arrival_time.tv_sec, - (s64)req->rq_arrival_time.tv_nsec / NSEC_PER_USEC, - (s64)arrivaldiff.tv_sec, - (s64)(arrivaldiff.tv_nsec / NSEC_PER_USEC), - (s64)(req->rq_sent - req->rq_deadline)); - if (!svc->srv_ops.so_req_printer) - seq_putc(s, '\n'); - else - svc->srv_ops.so_req_printer(s, srhi->srhi_req); - } - - spin_unlock(&svcpt->scp_lock); - return rc; -} - -static int -ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file) -{ - static const struct seq_operations sops = { - .start = ptlrpc_lprocfs_svc_req_history_start, - .stop = ptlrpc_lprocfs_svc_req_history_stop, - .next = ptlrpc_lprocfs_svc_req_history_next, - .show = ptlrpc_lprocfs_svc_req_history_show, - }; - struct seq_file *seqf; - int rc; - - rc = seq_open(file, &sops); - if (rc) - return rc; - - seqf = file->private_data; - seqf->private = inode->i_private; - return 0; -} - -/* See also lprocfs_rd_timeouts */ -static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) -{ - struct ptlrpc_service *svc = m->private; - struct ptlrpc_service_part *svcpt; - struct dhms ts; - time64_t worstt; - unsigned int cur; - unsigned int worst; - int i; - - if (AT_OFF) { - seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n", - obd_timeout); - return 0; - } - - ptlrpc_service_for_each_part(svcpt, i, svc) { - cur = at_get(&svcpt->scp_at_estimate); - worst = svcpt->scp_at_estimate.at_worst_ever; - worstt = svcpt->scp_at_estimate.at_worst_time; - s2dhms(&ts, ktime_get_real_seconds() - worstt); - - seq_printf(m, "%10s : cur %3u worst %3u (at %lld, " - DHMS_FMT " ago) ", "service", - cur, worst, (s64)worstt, DHMS_VARS(&ts)); - - lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate); - } - - return 0; -} - -LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_timeouts); - -static ssize_t high_priority_ratio_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - return sprintf(buf, "%d\n", svc->srv_hpreq_ratio); -} - -static ssize_t high_priority_ratio_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - int rc; - int val; - - rc = kstrtoint(buffer, 10, &val); - if (rc < 0) - return rc; - - if (val < 0) - return -ERANGE; - - spin_lock(&svc->srv_lock); - svc->srv_hpreq_ratio = val; - spin_unlock(&svc->srv_lock); - - return count; -} -LUSTRE_RW_ATTR(high_priority_ratio); - -static struct attribute *ptlrpc_svc_attrs[] = { - &lustre_attr_threads_min.attr, - &lustre_attr_threads_started.attr, - &lustre_attr_threads_max.attr, - &lustre_attr_high_priority_ratio.attr, - NULL, -}; - -static void ptlrpc_sysfs_svc_release(struct kobject *kobj) -{ - struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service, - srv_kobj); - - complete(&svc->srv_kobj_unregister); -} - -static struct kobj_type ptlrpc_svc_ktype = { - .default_attrs = ptlrpc_svc_attrs, - .sysfs_ops = &lustre_sysfs_ops, - .release = ptlrpc_sysfs_svc_release, -}; - -void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc) -{ - /* Let's see if we had a chance at initialization first */ - if (svc->srv_kobj.kset) { - kobject_put(&svc->srv_kobj); - wait_for_completion(&svc->srv_kobj_unregister); - } -} - -int ptlrpc_sysfs_register_service(struct kset *parent, - struct ptlrpc_service *svc) -{ - int rc; - - svc->srv_kobj.kset = parent; - init_completion(&svc->srv_kobj_unregister); - rc = kobject_init_and_add(&svc->srv_kobj, &ptlrpc_svc_ktype, NULL, - "%s", svc->srv_name); - - return rc; -} - -void ptlrpc_ldebugfs_register_service(struct dentry *entry, - struct ptlrpc_service *svc) -{ - struct lprocfs_vars lproc_vars[] = { - {.name = "req_buffer_history_len", - .fops = &ptlrpc_lprocfs_req_history_len_fops, - .data = svc}, - {.name = "req_buffer_history_max", - .fops = &ptlrpc_lprocfs_req_history_max_fops, - .data = svc}, - {.name = "timeouts", - .fops = &ptlrpc_lprocfs_timeouts_fops, - .data = svc}, - {.name = "nrs_policies", - .fops = &ptlrpc_lprocfs_nrs_fops, - .data = svc}, - {NULL} - }; - static const struct file_operations req_history_fops = { - .owner = THIS_MODULE, - .open = ptlrpc_lprocfs_svc_req_history_open, - .read = seq_read, - .llseek = seq_lseek, - .release = lprocfs_seq_release, - }; - - ptlrpc_ldebugfs_register(entry, svc->srv_name, - "stats", &svc->srv_debugfs_entry, - &svc->srv_stats); - - if (IS_ERR_OR_NULL(svc->srv_debugfs_entry)) - return; - - ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL); - - debugfs_create_file("req_history", 0400, svc->srv_debugfs_entry, svc, - &req_history_fops); -} - -void ptlrpc_lprocfs_register_obd(struct obd_device *obddev) -{ - ptlrpc_ldebugfs_register(obddev->obd_debugfs_entry, NULL, "stats", - &obddev->obd_svc_debugfs_entry, - &obddev->obd_svc_stats); -} -EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd); - -void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount) -{ - struct lprocfs_stats *svc_stats; - __u32 op = lustre_msg_get_opc(req->rq_reqmsg); - int opc = opcode_offset(op); - - svc_stats = req->rq_import->imp_obd->obd_svc_stats; - if (!svc_stats || opc <= 0) - return; - LASSERT(opc < LUSTRE_MAX_OPCODES); - if (!(op == LDLM_ENQUEUE || op == MDS_REINT)) - lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount); -} - -void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) -{ - struct lprocfs_stats *svc_stats; - int idx; - - if (!req->rq_import) - return; - svc_stats = req->rq_import->imp_obd->obd_svc_stats; - if (!svc_stats) - return; - idx = lustre_msg_get_opc(req->rq_reqmsg); - switch (idx) { - case OST_READ: - idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR; - break; - case OST_WRITE: - idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR; - break; - default: - LASSERTF(0, "unsupported opcode %u\n", idx); - break; - } - - lprocfs_counter_add(svc_stats, idx, bytes); -} -EXPORT_SYMBOL(ptlrpc_lprocfs_brw); - -void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc) -{ - debugfs_remove_recursive(svc->srv_debugfs_entry); - - if (svc->srv_stats) - lprocfs_free_stats(&svc->srv_stats); -} - -void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) -{ - debugfs_remove_recursive(obd->obd_svc_debugfs_entry); - - if (obd->obd_svc_stats) - lprocfs_free_stats(&obd->obd_svc_stats); -} -EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd); - -#undef BUFLEN - -int lprocfs_wr_ping(struct file *file, const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *obd = ((struct seq_file *)file->private_data)->private; - struct ptlrpc_request *req; - int rc; - - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - req = ptlrpc_prep_ping(obd->u.cli.cl_import); - up_read(&obd->u.cli.cl_sem); - if (!req) - return -ENOMEM; - - req->rq_send_state = LUSTRE_IMP_FULL; - - rc = ptlrpc_queue_wait(req); - - ptlrpc_req_finished(req); - if (rc >= 0) - return count; - return rc; -} -EXPORT_SYMBOL(lprocfs_wr_ping); - -/* Write the connection UUID to this file to attempt to connect to that node. - * The connection UUID is a node's primary NID. For example, - * "echo connection=192.168.0.1@tcp0::instance > .../import". - */ -int lprocfs_wr_import(struct file *file, const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *obd = ((struct seq_file *)file->private_data)->private; - struct obd_import *imp = obd->u.cli.cl_import; - char *kbuf = NULL; - char *uuid; - char *ptr; - int do_reconn = 1; - const char prefix[] = "connection="; - const int prefix_len = sizeof(prefix) - 1; - - if (count > PAGE_SIZE - 1 || count <= prefix_len) - return -EINVAL; - - kbuf = kzalloc(count + 1, GFP_NOFS); - if (!kbuf) - return -ENOMEM; - - if (copy_from_user(kbuf, buffer, count)) { - count = -EFAULT; - goto out; - } - - kbuf[count] = 0; - - /* only support connection=uuid::instance now */ - if (strncmp(prefix, kbuf, prefix_len) != 0) { - count = -EINVAL; - goto out; - } - - uuid = kbuf + prefix_len; - ptr = strstr(uuid, "::"); - if (ptr) { - __u32 inst; - char *endptr; - - *ptr = 0; - do_reconn = 0; - ptr += strlen("::"); - inst = simple_strtoul(ptr, &endptr, 10); - if (*endptr) { - CERROR("config: wrong instance # %s\n", ptr); - } else if (inst != imp->imp_connect_data.ocd_instance) { - CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n", - imp->imp_obd->obd_name, - imp->imp_connect_data.ocd_instance, inst); - do_reconn = 1; - } else { - CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n", - imp->imp_obd->obd_name, inst); - } - } - - if (do_reconn) - ptlrpc_recover_import(imp, uuid, 1); - -out: - kfree(kbuf); - return count; -} -EXPORT_SYMBOL(lprocfs_wr_import); - -int lprocfs_rd_pinger_recov(struct seq_file *m, void *n) -{ - struct obd_device *obd = m->private; - struct obd_import *imp = obd->u.cli.cl_import; - int rc; - - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - seq_printf(m, "%d\n", !imp->imp_no_pinger_recover); - up_read(&obd->u.cli.cl_sem); - - return 0; -} -EXPORT_SYMBOL(lprocfs_rd_pinger_recov); - -int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer, - size_t count, loff_t *off) -{ - struct obd_device *obd = ((struct seq_file *)file->private_data)->private; - struct client_obd *cli = &obd->u.cli; - struct obd_import *imp = cli->cl_import; - int rc, val; - - rc = lprocfs_write_helper(buffer, count, &val); - if (rc < 0) - return rc; - - if (val != 0 && val != 1) - return -ERANGE; - - rc = lprocfs_climp_check(obd); - if (rc) - return rc; - - spin_lock(&imp->imp_lock); - imp->imp_no_pinger_recover = !val; - spin_unlock(&imp->imp_lock); - up_read(&obd->u.cli.cl_sem); - - return count; -} -EXPORT_SYMBOL(lprocfs_wr_pinger_recov); diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c deleted file mode 100644 index 2897afb8806c..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c +++ /dev/null @@ -1,771 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC -#include -#include -#include -#include -#include -#include -#include "ptlrpc_internal.h" - -/** - * Helper function. Sends \a len bytes from \a base at offset \a offset - * over \a conn connection to portal \a portal. - * Returns 0 on success or error code. - */ -static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len, - enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid, - struct ptlrpc_connection *conn, int portal, __u64 xid, - unsigned int offset) -{ - int rc; - struct lnet_md md; - - LASSERT(portal != 0); - CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer)); - md.start = base; - md.length = len; - md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1; - md.options = PTLRPC_MD_OPTIONS; - md.user_ptr = cbid; - md.eq_handle = ptlrpc_eq_h; - - if (unlikely(ack == LNET_ACK_REQ && - OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, - OBD_FAIL_ONCE))) { - /* don't ask for the ack to simulate failing client */ - ack = LNET_NOACK_REQ; - } - - rc = LNetMDBind(md, LNET_UNLINK, mdh); - if (unlikely(rc != 0)) { - CERROR("LNetMDBind failed: %d\n", rc); - LASSERT(rc == -ENOMEM); - return -ENOMEM; - } - - CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n", - len, portal, xid, offset); - - rc = LNetPut(conn->c_self, *mdh, ack, - conn->c_peer, portal, xid, offset, 0); - if (unlikely(rc != 0)) { - int rc2; - /* We're going to get an UNLINK event when I unlink below, - * which will complete just like any other failed send, so - * I fall through and return success here! - */ - CERROR("LNetPut(%s, %d, %lld) failed: %d\n", - libcfs_id2str(conn->c_peer), portal, xid, rc); - rc2 = LNetMDUnlink(*mdh); - LASSERTF(rc2 == 0, "rc2 = %d\n", rc2); - } - - return 0; -} - -static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count) -{ - int i; - - for (i = 0; i < count; i++) - LNetMDUnlink(bd_mds[i]); -} - -/** - * Register bulk at the sender for later transfer. - * Returns 0 on success or error code. - */ -static int ptlrpc_register_bulk(struct ptlrpc_request *req) -{ - struct ptlrpc_bulk_desc *desc = req->rq_bulk; - struct lnet_process_id peer; - int rc = 0; - int rc2; - int posted_md; - int total_md; - u64 mbits; - struct lnet_handle_me me_h; - struct lnet_md md; - - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET)) - return 0; - - /* NB no locking required until desc is on the network */ - LASSERT(desc->bd_nob > 0); - LASSERT(desc->bd_md_count == 0); - LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); - LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); - LASSERT(desc->bd_req); - LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type)); - - /* cleanup the state of the bulk for it will be reused */ - if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY) - desc->bd_nob_transferred = 0; - else - LASSERT(desc->bd_nob_transferred == 0); - - desc->bd_failure = 0; - - peer = desc->bd_import->imp_connection->c_peer; - - LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback); - LASSERT(desc->bd_cbid.cbid_arg == desc); - - total_md = DIV_ROUND_UP(desc->bd_iov_count, LNET_MAX_IOV); - /* rq_mbits is matchbits of the final bulk */ - mbits = req->rq_mbits - total_md + 1; - - LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK), - "first mbits = x%llu, last mbits = x%llu\n", - mbits, req->rq_mbits); - LASSERTF(!(desc->bd_registered && - req->rq_send_state != LUSTRE_IMP_REPLAY) || - mbits != desc->bd_last_mbits, - "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n", - desc->bd_registered, mbits, desc->bd_last_mbits); - - desc->bd_registered = 1; - desc->bd_last_mbits = mbits; - desc->bd_md_count = total_md; - md.user_ptr = &desc->bd_cbid; - md.eq_handle = ptlrpc_eq_h; - md.threshold = 1; /* PUT or GET */ - - for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) { - md.options = PTLRPC_MD_OPTIONS | - (ptlrpc_is_bulk_op_get(desc->bd_type) ? - LNET_MD_OP_GET : LNET_MD_OP_PUT); - ptlrpc_fill_bulk_md(&md, desc, posted_md); - - rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0, - LNET_UNLINK, LNET_INS_AFTER, &me_h); - if (rc != 0) { - CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n", - desc->bd_import->imp_obd->obd_name, mbits, - posted_md, rc); - break; - } - - /* About to let the network at it... */ - rc = LNetMDAttach(me_h, md, LNET_UNLINK, - &desc->bd_mds[posted_md]); - if (rc != 0) { - CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n", - desc->bd_import->imp_obd->obd_name, mbits, - posted_md, rc); - rc2 = LNetMEUnlink(me_h); - LASSERT(rc2 == 0); - break; - } - } - - if (rc != 0) { - LASSERT(rc == -ENOMEM); - spin_lock(&desc->bd_lock); - desc->bd_md_count -= total_md - posted_md; - spin_unlock(&desc->bd_lock); - LASSERT(desc->bd_md_count >= 0); - mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); - req->rq_status = -ENOMEM; - return -ENOMEM; - } - - spin_lock(&desc->bd_lock); - /* Holler if peer manages to touch buffers before he knows the mbits */ - if (desc->bd_md_count != total_md) - CWARN("%s: Peer %s touched %d buffers while I registered\n", - desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer), - total_md - desc->bd_md_count); - spin_unlock(&desc->bd_lock); - - CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n", - desc->bd_md_count, - ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink", - desc->bd_iov_count, desc->bd_nob, - desc->bd_last_mbits, req->rq_mbits, desc->bd_portal); - - return 0; -} - -/** - * Disconnect a bulk desc from the network. Idempotent. Not - * thread-safe (i.e. only interlocks with completion callback). - * Returns 1 on success or 0 if network unregistration failed for whatever - * reason. - */ -int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) -{ - struct ptlrpc_bulk_desc *desc = req->rq_bulk; - wait_queue_head_t *wq; - int rc; - - LASSERT(!in_interrupt()); /* might sleep */ - - /* Let's setup deadline for reply unlink. */ - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && - async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0) - req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK; - - if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ - return 1; /* never registered */ - - LASSERT(desc->bd_req == req); /* bd_req NULL until registered */ - - /* the unlink ensures the callback happens ASAP and is the last - * one. If it fails, it must be because completion just happened, - * but we must still wait_event() in this case to give liblustre - * a chance to run client_bulk_callback() - */ - mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); - - if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ - return 1; /* never registered */ - - /* Move to "Unregistering" phase as bulk was not unlinked yet. */ - ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK); - - /* Do not wait for unlink to finish. */ - if (async) - return 0; - - if (req->rq_set) - wq = &req->rq_set->set_waitq; - else - wq = &req->rq_reply_waitq; - - for (;;) { - /* Network access will complete in finite time but the HUGE - * timeout lets us CWARN for visibility of sluggish LNDs - */ - int cnt = 0; - while (cnt < LONG_UNLINK && - (rc = wait_event_idle_timeout(*wq, - !ptlrpc_client_bulk_active(req), - HZ)) == 0) - cnt += 1; - if (rc > 0) { - ptlrpc_rqphase_move(req, req->rq_next_phase); - return 1; - } - - DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p", - desc); - } - return 0; -} - -static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) -{ - struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; - struct ptlrpc_service *svc = svcpt->scp_service; - int service_time = max_t(int, ktime_get_real_seconds() - - req->rq_arrival_time.tv_sec, 1); - - if (!(flags & PTLRPC_REPLY_EARLY) && - (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg && - !(lustre_msg_get_flags(req->rq_reqmsg) & - (MSG_RESENT | MSG_REPLAY | - MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) { - /* early replies, errors and recovery requests don't count - * toward our service time estimate - */ - int oldse = at_measured(&svcpt->scp_at_estimate, service_time); - - if (oldse != 0) { - DEBUG_REQ(D_ADAPTTO, req, - "svc %s changed estimate from %d to %d", - svc->srv_name, oldse, - at_get(&svcpt->scp_at_estimate)); - } - } - /* Report actual service time for client latency calc */ - lustre_msg_set_service_time(req->rq_repmsg, service_time); - /* Report service time estimate for future client reqs, but report 0 - * (to be ignored by client) if it's a error reply during recovery. - * (bz15815) - */ - if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export) - lustre_msg_set_timeout(req->rq_repmsg, 0); - else - lustre_msg_set_timeout(req->rq_repmsg, - at_get(&svcpt->scp_at_estimate)); - - if (req->rq_reqmsg && - !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { - CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%x/%x len=%d\n", - flags, lustre_msg_get_flags(req->rq_reqmsg), - lustre_msg_get_magic(req->rq_reqmsg), - lustre_msg_get_magic(req->rq_repmsg), req->rq_replen); - } -} - -/** - * Send request reply from request \a req reply buffer. - * \a flags defines reply types - * Returns 0 on success or error code - */ -int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) -{ - struct ptlrpc_reply_state *rs = req->rq_reply_state; - struct ptlrpc_connection *conn; - int rc; - - /* We must already have a reply buffer (only ptlrpc_error() may be - * called without one). The reply generated by sptlrpc layer (e.g. - * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must - * have a request buffer which is either the actual (swabbed) incoming - * request, or a saved copy if this is a req saved in - * target_queue_final_reply(). - */ - LASSERT(req->rq_no_reply == 0); - LASSERT(req->rq_reqbuf); - LASSERT(rs); - LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); - LASSERT(req->rq_repmsg); - LASSERT(req->rq_repmsg == rs->rs_msg); - LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); - LASSERT(rs->rs_cb_id.cbid_arg == rs); - - /* There may be no rq_export during failover */ - - if (unlikely(req->rq_export && req->rq_export->exp_obd && - req->rq_export->exp_obd->obd_fail)) { - /* Failed obd's only send ENODEV */ - req->rq_type = PTL_RPC_MSG_ERR; - req->rq_status = -ENODEV; - CDEBUG(D_HA, "sending ENODEV from failed obd %d\n", - req->rq_export->exp_obd->obd_minor); - } - - /* In order to keep interoperability with the client (< 2.3) which - * doesn't have pb_jobid in ptlrpc_body, We have to shrink the - * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the - * reply buffer on client will be overflow. - * - * XXX Remove this whenever we drop the interoperability with - * such client. - */ - req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0, - sizeof(struct ptlrpc_body_v2), 1); - - if (req->rq_type != PTL_RPC_MSG_ERR) - req->rq_type = PTL_RPC_MSG_REPLY; - - lustre_msg_set_type(req->rq_repmsg, req->rq_type); - lustre_msg_set_status(req->rq_repmsg, - ptlrpc_status_hton(req->rq_status)); - lustre_msg_set_opc(req->rq_repmsg, - req->rq_reqmsg ? - lustre_msg_get_opc(req->rq_reqmsg) : 0); - - target_pack_pool_reply(req); - - ptlrpc_at_set_reply(req, flags); - - if (!req->rq_export || !req->rq_export->exp_connection) - conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); - else - conn = ptlrpc_connection_addref(req->rq_export->exp_connection); - - if (unlikely(!conn)) { - CERROR("not replying on NULL connection\n"); /* bug 9635 */ - return -ENOTCONN; - } - ptlrpc_rs_addref(rs); /* +1 ref for the network */ - - rc = sptlrpc_svc_wrap_reply(req); - if (unlikely(rc)) - goto out; - - req->rq_sent = ktime_get_real_seconds(); - - rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len, - (rs->rs_difficult && !rs->rs_no_ack) ? - LNET_ACK_REQ : LNET_NOACK_REQ, - &rs->rs_cb_id, conn, - ptlrpc_req2svc(req)->srv_rep_portal, - req->rq_xid, req->rq_reply_off); -out: - if (unlikely(rc != 0)) - ptlrpc_req_drop_rs(req); - ptlrpc_connection_put(conn); - return rc; -} - -int ptlrpc_reply(struct ptlrpc_request *req) -{ - if (req->rq_no_reply) - return 0; - return ptlrpc_send_reply(req, 0); -} - -/** - * For request \a req send an error reply back. Create empty - * reply buffers if necessary. - */ -int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult) -{ - int rc; - - if (req->rq_no_reply) - return 0; - - if (!req->rq_repmsg) { - rc = lustre_pack_reply(req, 1, NULL, NULL); - if (rc) - return rc; - } - - if (req->rq_status != -ENOSPC && req->rq_status != -EACCES && - req->rq_status != -EPERM && req->rq_status != -ENOENT && - req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT) - req->rq_type = PTL_RPC_MSG_ERR; - - rc = ptlrpc_send_reply(req, may_be_difficult); - return rc; -} - -int ptlrpc_error(struct ptlrpc_request *req) -{ - return ptlrpc_send_error(req, 0); -} - -/** - * Send request \a request. - * if \a noreply is set, don't expect any reply back and don't set up - * reply buffers. - * Returns 0 on success or error code. - */ -int ptl_send_rpc(struct ptlrpc_request *request, int noreply) -{ - int rc; - int rc2; - unsigned int mpflag = 0; - struct ptlrpc_connection *connection; - struct lnet_handle_me reply_me_h; - struct lnet_md reply_md; - struct obd_import *imp = request->rq_import; - struct obd_device *obd = imp->imp_obd; - - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC)) - return 0; - - LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST); - LASSERT(request->rq_wait_ctx == 0); - - /* If this is a re-transmit, we're required to have disengaged - * cleanly from the previous attempt - */ - LASSERT(!request->rq_receiving_reply); - LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) && - (imp->imp_state == LUSTRE_IMP_FULL))); - - if (unlikely(obd && obd->obd_fail)) { - CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", - obd->obd_name); - /* this prevents us from waiting in ptlrpc_queue_wait */ - spin_lock(&request->rq_lock); - request->rq_err = 1; - spin_unlock(&request->rq_lock); - request->rq_status = -ENODEV; - return -ENODEV; - } - - connection = imp->imp_connection; - - lustre_msg_set_handle(request->rq_reqmsg, - &imp->imp_remote_handle); - lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST); - lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt); - lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags); - - /* - * If it's the first time to resend the request for EINPROGRESS, - * we need to allocate a new XID (see after_reply()), it's different - * from the resend for reply timeout. - */ - if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) { - __u64 min_xid = 0; - /* - * resend for EINPROGRESS, allocate new xid to avoid reply - * reconstruction - */ - spin_lock(&imp->imp_lock); - ptlrpc_assign_next_xid_nolock(request); - min_xid = ptlrpc_known_replied_xid(imp); - spin_unlock(&imp->imp_lock); - - lustre_msg_set_last_xid(request->rq_reqmsg, min_xid); - DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS"); - } - - if (request->rq_bulk) { - ptlrpc_set_bulk_mbits(request); - lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits); - } - - if (list_empty(&request->rq_unreplied_list) || - request->rq_xid <= imp->imp_known_replied_xid) { - DEBUG_REQ(D_ERROR, request, - "xid: %llu, replied: %llu, list_empty:%d\n", - request->rq_xid, imp->imp_known_replied_xid, - list_empty(&request->rq_unreplied_list)); - LBUG(); - } - - /** - * For enabled AT all request should have AT_SUPPORT in the - * FULL import state when OBD_CONNECT_AT is set - */ - LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL || - (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) || - !(imp->imp_connect_data.ocd_connect_flags & - OBD_CONNECT_AT)); - - if (request->rq_resend) - lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT); - - if (request->rq_memalloc) - mpflag = memalloc_noreclaim_save(); - - rc = sptlrpc_cli_wrap_request(request); - if (rc) { - /* - * set rq_sent so that this request is treated - * as a delayed send in the upper layers - */ - if (rc == -ENOMEM) - request->rq_sent = ktime_get_seconds(); - goto out; - } - - /* bulk register should be done after wrap_request() */ - if (request->rq_bulk) { - rc = ptlrpc_register_bulk(request); - if (rc != 0) - goto out; - } - - if (!noreply) { - LASSERT(request->rq_replen != 0); - if (!request->rq_repbuf) { - LASSERT(!request->rq_repdata); - LASSERT(!request->rq_repmsg); - rc = sptlrpc_cli_alloc_repbuf(request, - request->rq_replen); - if (rc) { - /* this prevents us from looping in - * ptlrpc_queue_wait - */ - spin_lock(&request->rq_lock); - request->rq_err = 1; - spin_unlock(&request->rq_lock); - request->rq_status = rc; - goto cleanup_bulk; - } - } else { - request->rq_repdata = NULL; - request->rq_repmsg = NULL; - } - - rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/ - connection->c_peer, request->rq_xid, 0, - LNET_UNLINK, LNET_INS_AFTER, &reply_me_h); - if (rc != 0) { - CERROR("LNetMEAttach failed: %d\n", rc); - LASSERT(rc == -ENOMEM); - rc = -ENOMEM; - goto cleanup_bulk; - } - } - - spin_lock(&request->rq_lock); - /* We are responsible for unlinking the reply buffer */ - request->rq_reply_unlinked = noreply; - request->rq_receiving_reply = !noreply; - /* Clear any flags that may be present from previous sends. */ - request->rq_req_unlinked = 0; - request->rq_replied = 0; - request->rq_err = 0; - request->rq_timedout = 0; - request->rq_net_err = 0; - request->rq_resend = 0; - request->rq_restart = 0; - request->rq_reply_truncated = 0; - spin_unlock(&request->rq_lock); - - if (!noreply) { - reply_md.start = request->rq_repbuf; - reply_md.length = request->rq_repbuf_len; - /* Allow multiple early replies */ - reply_md.threshold = LNET_MD_THRESH_INF; - /* Manage remote for early replies */ - reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | - LNET_MD_MANAGE_REMOTE | - LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */ - reply_md.user_ptr = &request->rq_reply_cbid; - reply_md.eq_handle = ptlrpc_eq_h; - - /* We must see the unlink callback to set rq_reply_unlinked, - * so we can't auto-unlink - */ - rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, - &request->rq_reply_md_h); - if (rc != 0) { - CERROR("LNetMDAttach failed: %d\n", rc); - LASSERT(rc == -ENOMEM); - spin_lock(&request->rq_lock); - /* ...but the MD attach didn't succeed... */ - request->rq_receiving_reply = 0; - spin_unlock(&request->rq_lock); - rc = -ENOMEM; - goto cleanup_me; - } - - CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu, portal %u\n", - request->rq_repbuf_len, request->rq_xid, - request->rq_reply_portal); - } - - /* add references on request for request_out_callback */ - ptlrpc_request_addref(request); - if (obd && obd->obd_svc_stats) - lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, - atomic_read(&imp->imp_inflight)); - - OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5); - - ktime_get_real_ts64(&request->rq_sent_tv); - request->rq_sent = ktime_get_real_seconds(); - /* We give the server rq_timeout secs to process the req, and - * add the network latency for our local timeout. - */ - request->rq_deadline = request->rq_sent + request->rq_timeout + - ptlrpc_at_get_net_latency(request); - - ptlrpc_pinger_sending_on_import(imp); - - DEBUG_REQ(D_INFO, request, "send flg=%x", - lustre_msg_get_flags(request->rq_reqmsg)); - rc = ptl_send_buf(&request->rq_req_md_h, - request->rq_reqbuf, request->rq_reqdata_len, - LNET_NOACK_REQ, &request->rq_req_cbid, - connection, - request->rq_request_portal, - request->rq_xid, 0); - if (likely(rc == 0)) - goto out; - - request->rq_req_unlinked = 1; - ptlrpc_req_finished(request); - if (noreply) - goto out; - - cleanup_me: - /* MEUnlink is safe; the PUT didn't even get off the ground, and - * nobody apart from the PUT's target has the right nid+XID to - * access the reply buffer. - */ - rc2 = LNetMEUnlink(reply_me_h); - LASSERT(rc2 == 0); - /* UNLINKED callback called synchronously */ - LASSERT(!request->rq_receiving_reply); - - cleanup_bulk: - /* We do sync unlink here as there was no real transfer here so - * the chance to have long unlink to sluggish net is smaller here. - */ - ptlrpc_unregister_bulk(request, 0); - out: - if (request->rq_memalloc) - memalloc_noreclaim_restore(mpflag); - return rc; -} -EXPORT_SYMBOL(ptl_send_rpc); - -/** - * Register request buffer descriptor for request receiving. - */ -int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd) -{ - struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service; - static struct lnet_process_id match_id = {LNET_NID_ANY, LNET_PID_ANY}; - int rc; - struct lnet_md md; - struct lnet_handle_me me_h; - - CDEBUG(D_NET, "LNetMEAttach: portal %d\n", - service->srv_req_portal); - - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD)) - return -ENOMEM; - - /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL, - * which means buffer can only be attached on local CPT, and LND - * threads can find it by grabbing a local lock - */ - rc = LNetMEAttach(service->srv_req_portal, - match_id, 0, ~0, LNET_UNLINK, - rqbd->rqbd_svcpt->scp_cpt >= 0 ? - LNET_INS_LOCAL : LNET_INS_AFTER, &me_h); - if (rc != 0) { - CERROR("LNetMEAttach failed: %d\n", rc); - return -ENOMEM; - } - - LASSERT(rqbd->rqbd_refcount == 0); - rqbd->rqbd_refcount = 1; - - md.start = rqbd->rqbd_buffer; - md.length = service->srv_buf_size; - md.max_size = service->srv_max_req_size; - md.threshold = LNET_MD_THRESH_INF; - md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE; - md.user_ptr = &rqbd->rqbd_cbid; - md.eq_handle = ptlrpc_eq_h; - - rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h); - if (rc == 0) - return 0; - - CERROR("LNetMDAttach failed: %d;\n", rc); - LASSERT(rc == -ENOMEM); - rc = LNetMEUnlink(me_h); - LASSERT(rc == 0); - rqbd->rqbd_refcount = 0; - - return -ENOMEM; -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c deleted file mode 100644 index e09b86529c5d..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c +++ /dev/null @@ -1,1613 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. A copy is - * included in the COPYING file that accompanied this code. - - * GPL HEADER END - */ -/* - * Copyright (c) 2011 Intel Corporation - * - * Copyright 2012 Xyratex Technology Limited - */ -/* - * lustre/ptlrpc/nrs.c - * - * Network Request Scheduler (NRS) - * - * Allows to reorder the handling of RPCs at servers. - * - * Author: Liang Zhen - * Author: Nikitas Angelinas - */ -/** - * \addtogoup nrs - * @{ - */ - -#define DEBUG_SUBSYSTEM S_RPC -#include -#include -#include -#include -#include -#include -#include "ptlrpc_internal.h" - -/** - * NRS core object. - */ -struct nrs_core nrs_core; - -static int nrs_policy_init(struct ptlrpc_nrs_policy *policy) -{ - return policy->pol_desc->pd_ops->op_policy_init ? - policy->pol_desc->pd_ops->op_policy_init(policy) : 0; -} - -static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy) -{ - LASSERT(policy->pol_ref == 0); - LASSERT(policy->pol_req_queued == 0); - - if (policy->pol_desc->pd_ops->op_policy_fini) - policy->pol_desc->pd_ops->op_policy_fini(policy); -} - -static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy, - enum ptlrpc_nrs_ctl opc, void *arg) -{ - /** - * The policy may be stopped, but the lprocfs files and - * ptlrpc_nrs_policy instances remain present until unregistration time. - * Do not perform the ctl operation if the policy is stopped, as - * policy->pol_private will be NULL in such a case. - */ - if (policy->pol_state == NRS_POL_STATE_STOPPED) - return -ENODEV; - - return policy->pol_desc->pd_ops->op_policy_ctl ? - policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) : - -ENOSYS; -} - -static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) -{ - if (policy->pol_desc->pd_ops->op_policy_stop) - policy->pol_desc->pd_ops->op_policy_stop(policy); - - LASSERT(list_empty(&policy->pol_list_queued)); - LASSERT(policy->pol_req_queued == 0 && - policy->pol_req_started == 0); - - policy->pol_private = NULL; - - policy->pol_state = NRS_POL_STATE_STOPPED; - - if (atomic_dec_and_test(&policy->pol_desc->pd_refs)) - module_put(policy->pol_desc->pd_owner); -} - -static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy) -{ - struct ptlrpc_nrs *nrs = policy->pol_nrs; - - if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping) - return -EPERM; - - if (policy->pol_state == NRS_POL_STATE_STARTING) - return -EAGAIN; - - /* In progress or already stopped */ - if (policy->pol_state != NRS_POL_STATE_STARTED) - return 0; - - policy->pol_state = NRS_POL_STATE_STOPPING; - - /* Immediately make it invisible */ - if (nrs->nrs_policy_primary == policy) { - nrs->nrs_policy_primary = NULL; - - } else { - LASSERT(nrs->nrs_policy_fallback == policy); - nrs->nrs_policy_fallback = NULL; - } - - /* I have the only refcount */ - if (policy->pol_ref == 1) - nrs_policy_stop0(policy); - - return 0; -} - -/** - * Transitions the \a nrs NRS head's primary policy to - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING and if the policy has no - * pending usage references, to ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED. - * - * \param[in] nrs the NRS head to carry out this operation on - */ -static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs) -{ - struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary; - - if (!tmp) - return; - - nrs->nrs_policy_primary = NULL; - - LASSERT(tmp->pol_state == NRS_POL_STATE_STARTED); - tmp->pol_state = NRS_POL_STATE_STOPPING; - - if (tmp->pol_ref == 0) - nrs_policy_stop0(tmp); -} - -/** - * Transitions a policy across the ptlrpc_nrs_pol_state range of values, in - * response to an lprocfs command to start a policy. - * - * If a primary policy different to the current one is specified, this function - * will transition the new policy to the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTING and then to - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED, and will then transition - * the old primary policy (if there is one) to - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding - * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. - * - * If the fallback policy is specified, this is taken to indicate an instruction - * to stop the current primary policy, without substituting it with another - * primary policy, so the primary policy (if any) is transitioned to - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding - * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. In - * this case, the fallback policy is only left active in the NRS head. - */ -static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy) -{ - struct ptlrpc_nrs *nrs = policy->pol_nrs; - int rc = 0; - - /** - * Don't allow multiple starting which is too complex, and has no real - * benefit. - */ - if (nrs->nrs_policy_starting) - return -EAGAIN; - - LASSERT(policy->pol_state != NRS_POL_STATE_STARTING); - - if (policy->pol_state == NRS_POL_STATE_STOPPING) - return -EAGAIN; - - if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) { - /** - * This is for cases in which the user sets the policy to the - * fallback policy (currently fifo for all services); i.e. the - * user is resetting the policy to the default; so we stop the - * primary policy, if any. - */ - if (policy == nrs->nrs_policy_fallback) { - nrs_policy_stop_primary(nrs); - return 0; - } - - /** - * If we reach here, we must be setting up the fallback policy - * at service startup time, and only a single policy with the - * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can - * register with NRS core. - */ - LASSERT(!nrs->nrs_policy_fallback); - } else { - /** - * Shouldn't start primary policy if w/o fallback policy. - */ - if (!nrs->nrs_policy_fallback) - return -EPERM; - - if (policy->pol_state == NRS_POL_STATE_STARTED) - return 0; - } - - /** - * Increase the module usage count for policies registering from other - * modules. - */ - if (atomic_inc_return(&policy->pol_desc->pd_refs) == 1 && - !try_module_get(policy->pol_desc->pd_owner)) { - atomic_dec(&policy->pol_desc->pd_refs); - CERROR("NRS: cannot get module for policy %s; is it alive?\n", - policy->pol_desc->pd_name); - return -ENODEV; - } - - /** - * Serialize policy starting across the NRS head - */ - nrs->nrs_policy_starting = 1; - - policy->pol_state = NRS_POL_STATE_STARTING; - - if (policy->pol_desc->pd_ops->op_policy_start) { - spin_unlock(&nrs->nrs_lock); - - rc = policy->pol_desc->pd_ops->op_policy_start(policy); - - spin_lock(&nrs->nrs_lock); - if (rc != 0) { - if (atomic_dec_and_test(&policy->pol_desc->pd_refs)) - module_put(policy->pol_desc->pd_owner); - - policy->pol_state = NRS_POL_STATE_STOPPED; - goto out; - } - } - - policy->pol_state = NRS_POL_STATE_STARTED; - - if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) { - /** - * This path is only used at PTLRPC service setup time. - */ - nrs->nrs_policy_fallback = policy; - } else { - /* - * Try to stop the current primary policy if there is one. - */ - nrs_policy_stop_primary(nrs); - - /** - * And set the newly-started policy as the primary one. - */ - nrs->nrs_policy_primary = policy; - } - -out: - nrs->nrs_policy_starting = 0; - - return rc; -} - -/** - * Increases the policy's usage reference count. - */ -static inline void nrs_policy_get_locked(struct ptlrpc_nrs_policy *policy) -{ - policy->pol_ref++; -} - -/** - * Decreases the policy's usage reference count, and stops the policy in case it - * was already stopping and have no more outstanding usage references (which - * indicates it has no more queued or started requests, and can be safely - * stopped). - */ -static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy) -{ - LASSERT(policy->pol_ref > 0); - - policy->pol_ref--; - if (unlikely(policy->pol_ref == 0 && - policy->pol_state == NRS_POL_STATE_STOPPING)) - nrs_policy_stop0(policy); -} - -static void nrs_policy_put(struct ptlrpc_nrs_policy *policy) -{ - spin_lock(&policy->pol_nrs->nrs_lock); - nrs_policy_put_locked(policy); - spin_unlock(&policy->pol_nrs->nrs_lock); -} - -/** - * Find and return a policy by name. - */ -static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs, - char *name) -{ - struct ptlrpc_nrs_policy *tmp; - - list_for_each_entry(tmp, &nrs->nrs_policy_list, pol_list) { - if (strncmp(tmp->pol_desc->pd_name, name, - NRS_POL_NAME_MAX) == 0) { - nrs_policy_get_locked(tmp); - return tmp; - } - } - return NULL; -} - -/** - * Release references for the resource hierarchy moving upwards towards the - * policy instance resource. - */ -static void nrs_resource_put(struct ptlrpc_nrs_resource *res) -{ - struct ptlrpc_nrs_policy *policy = res->res_policy; - - if (policy->pol_desc->pd_ops->op_res_put) { - struct ptlrpc_nrs_resource *parent; - - for (; res; res = parent) { - parent = res->res_parent; - policy->pol_desc->pd_ops->op_res_put(policy, res); - } - } -} - -/** - * Obtains references for each resource in the resource hierarchy for request - * \a nrq if it is to be handled by \a policy. - * - * \param[in] policy the policy - * \param[in] nrq the request - * \param[in] moving_req denotes whether this is a call to the function by - * ldlm_lock_reorder_req(), in order to move \a nrq to - * the high-priority NRS head; we should not sleep when - * set. - * - * \retval NULL resource hierarchy references not obtained - * \retval valid-pointer the bottom level of the resource hierarchy - * - * \see ptlrpc_nrs_pol_ops::op_res_get() - */ -static -struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq, - bool moving_req) -{ - /** - * Set to NULL to traverse the resource hierarchy from the top. - */ - struct ptlrpc_nrs_resource *res = NULL; - struct ptlrpc_nrs_resource *tmp = NULL; - int rc; - - while (1) { - rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res, - &tmp, moving_req); - if (rc < 0) { - if (res) - nrs_resource_put(res); - return NULL; - } - - tmp->res_parent = res; - tmp->res_policy = policy; - res = tmp; - tmp = NULL; - /** - * Return once we have obtained a reference to the bottom level - * of the resource hierarchy. - */ - if (rc > 0) - return res; - } -} - -/** - * Obtains resources for the resource hierarchies and policy references for - * the fallback and current primary policy (if any), that will later be used - * to handle request \a nrq. - * - * \param[in] nrs the NRS head instance that will be handling request \a nrq. - * \param[in] nrq the request that is being handled. - * \param[out] resp the array where references to the resource hierarchy are - * stored. - * \param[in] moving_req is set when obtaining resources while moving a - * request from a policy on the regular NRS head to a - * policy on the HP NRS head (via - * ldlm_lock_reorder_req()). It signifies that - * allocations to get resources should be atomic; for - * a full explanation, see comment in - * ptlrpc_nrs_pol_ops::op_res_get(). - */ -static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, - struct ptlrpc_nrs_request *nrq, - struct ptlrpc_nrs_resource **resp, - bool moving_req) -{ - struct ptlrpc_nrs_policy *primary = NULL; - struct ptlrpc_nrs_policy *fallback = NULL; - - memset(resp, 0, sizeof(resp[0]) * NRS_RES_MAX); - - /** - * Obtain policy references. - */ - spin_lock(&nrs->nrs_lock); - - fallback = nrs->nrs_policy_fallback; - nrs_policy_get_locked(fallback); - - primary = nrs->nrs_policy_primary; - if (primary) - nrs_policy_get_locked(primary); - - spin_unlock(&nrs->nrs_lock); - - /** - * Obtain resource hierarchy references. - */ - resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req); - LASSERT(resp[NRS_RES_FALLBACK]); - - if (primary) { - resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq, - moving_req); - /** - * A primary policy may exist which may not wish to serve a - * particular request for different reasons; release the - * reference on the policy as it will not be used for this - * request. - */ - if (!resp[NRS_RES_PRIMARY]) - nrs_policy_put(primary); - } -} - -/** - * Releases references to resource hierarchies and policies, because they are no - * longer required; used when request handling has been completed, or the - * request is moving to the high priority NRS head. - * - * \param resp the resource hierarchy that is being released - * - * \see ptlrpc_nrs_req_finalize() - */ -static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) -{ - struct ptlrpc_nrs_policy *pols[NRS_RES_MAX]; - int i; - - for (i = 0; i < NRS_RES_MAX; i++) { - if (resp[i]) { - pols[i] = resp[i]->res_policy; - nrs_resource_put(resp[i]); - resp[i] = NULL; - } else { - pols[i] = NULL; - } - } - - for (i = 0; i < NRS_RES_MAX; i++) { - if (pols[i]) - nrs_policy_put(pols[i]); - } -} - -/** - * Obtains an NRS request from \a policy for handling or examination; the - * request should be removed in the 'handling' case. - * - * Calling into this function implies we already know the policy has a request - * waiting to be handled. - * - * \param[in] policy the policy from which a request - * \param[in] peek when set, signifies that we just want to examine the - * request, and not handle it, so the request is not removed - * from the policy. - * \param[in] force when set, it will force a policy to return a request if it - * has one pending - * - * \retval the NRS request to be handled - */ -static inline -struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy, - bool peek, bool force) -{ - struct ptlrpc_nrs_request *nrq; - - LASSERT(policy->pol_req_queued > 0); - - nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force); - - LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy)); - - return nrq; -} - -/** - * Enqueues request \a nrq for later handling, via one one the policies for - * which resources where earlier obtained via nrs_resource_get_safe(). The - * function attempts to enqueue the request first on the primary policy - * (if any), since this is the preferred choice. - * - * \param nrq the request being enqueued - * - * \see nrs_resource_get_safe() - */ -static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq) -{ - struct ptlrpc_nrs_policy *policy; - int rc; - int i; - - /** - * Try in descending order, because the primary policy (if any) is - * the preferred choice. - */ - for (i = NRS_RES_MAX - 1; i >= 0; i--) { - if (!nrq->nr_res_ptrs[i]) - continue; - - nrq->nr_res_idx = i; - policy = nrq->nr_res_ptrs[i]->res_policy; - - rc = policy->pol_desc->pd_ops->op_req_enqueue(policy, nrq); - if (rc == 0) { - policy->pol_nrs->nrs_req_queued++; - policy->pol_req_queued++; - return; - } - } - /** - * Should never get here, as at least the primary policy's - * ptlrpc_nrs_pol_ops::op_req_enqueue() implementation should always - * succeed. - */ - LBUG(); -} - -/** - * Called when a request has been handled - * - * \param[in] nrs the request that has been handled; can be used for - * job/resource control. - * - * \see ptlrpc_nrs_req_stop_nolock() - */ -static inline void nrs_request_stop(struct ptlrpc_nrs_request *nrq) -{ - struct ptlrpc_nrs_policy *policy = nrs_request_policy(nrq); - - if (policy->pol_desc->pd_ops->op_req_stop) - policy->pol_desc->pd_ops->op_req_stop(policy, nrq); - - LASSERT(policy->pol_nrs->nrs_req_started > 0); - LASSERT(policy->pol_req_started > 0); - - policy->pol_nrs->nrs_req_started--; - policy->pol_req_started--; -} - -/** - * Handler for operations that can be carried out on policies. - * - * Handles opcodes that are common to all policy types within NRS core, and - * passes any unknown opcodes to the policy-specific control function. - * - * \param[in] nrs the NRS head this policy belongs to. - * \param[in] name the human-readable policy name; should be the same as - * ptlrpc_nrs_pol_desc::pd_name. - * \param[in] opc the opcode of the operation being carried out. - * \param[in,out] arg can be used to pass information in and out between when - * carrying an operation; usually data that is private to - * the policy at some level, or generic policy status - * information. - * - * \retval -ve error condition - * \retval 0 operation was carried out successfully - */ -static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, - enum ptlrpc_nrs_ctl opc, void *arg) -{ - struct ptlrpc_nrs_policy *policy; - int rc = 0; - - spin_lock(&nrs->nrs_lock); - - policy = nrs_policy_find_locked(nrs, name); - if (!policy) { - rc = -ENOENT; - goto out; - } - - if (policy->pol_state != NRS_POL_STATE_STARTED && - policy->pol_state != NRS_POL_STATE_STOPPED) { - rc = -EAGAIN; - goto out; - } - - switch (opc) { - /** - * Unknown opcode, pass it down to the policy-specific control - * function for handling. - */ - default: - rc = nrs_policy_ctl_locked(policy, opc, arg); - break; - - /** - * Start \e policy - */ - case PTLRPC_NRS_CTL_START: - rc = nrs_policy_start_locked(policy); - break; - } -out: - if (policy) - nrs_policy_put_locked(policy); - - spin_unlock(&nrs->nrs_lock); - - return rc; -} - -/** - * Unregisters a policy by name. - * - * \param[in] nrs the NRS head this policy belongs to. - * \param[in] name the human-readable policy name; should be the same as - * ptlrpc_nrs_pol_desc::pd_name - * - * \retval -ve error - * \retval 0 success - */ -static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) -{ - struct ptlrpc_nrs_policy *policy = NULL; - - spin_lock(&nrs->nrs_lock); - - policy = nrs_policy_find_locked(nrs, name); - if (!policy) { - spin_unlock(&nrs->nrs_lock); - - CERROR("Can't find NRS policy %s\n", name); - return -ENOENT; - } - - if (policy->pol_ref > 1) { - CERROR("Policy %s is busy with %d references\n", name, - (int)policy->pol_ref); - nrs_policy_put_locked(policy); - - spin_unlock(&nrs->nrs_lock); - return -EBUSY; - } - - LASSERT(policy->pol_req_queued == 0); - LASSERT(policy->pol_req_started == 0); - - if (policy->pol_state != NRS_POL_STATE_STOPPED) { - nrs_policy_stop_locked(policy); - LASSERT(policy->pol_state == NRS_POL_STATE_STOPPED); - } - - list_del(&policy->pol_list); - nrs->nrs_num_pols--; - - nrs_policy_put_locked(policy); - - spin_unlock(&nrs->nrs_lock); - - nrs_policy_fini(policy); - - LASSERT(!policy->pol_private); - kfree(policy); - - return 0; -} - -/** - * Register a policy from \policy descriptor \a desc with NRS head \a nrs. - * - * \param[in] nrs the NRS head on which the policy will be registered. - * \param[in] desc the policy descriptor from which the information will be - * obtained to register the policy. - * - * \retval -ve error - * \retval 0 success - */ -static int nrs_policy_register(struct ptlrpc_nrs *nrs, - struct ptlrpc_nrs_pol_desc *desc) -{ - struct ptlrpc_nrs_policy *policy; - struct ptlrpc_nrs_policy *tmp; - struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt; - int rc; - - LASSERT(desc->pd_ops->op_res_get); - LASSERT(desc->pd_ops->op_req_get); - LASSERT(desc->pd_ops->op_req_enqueue); - LASSERT(desc->pd_ops->op_req_dequeue); - LASSERT(desc->pd_compat); - - policy = kzalloc_node(sizeof(*policy), GFP_NOFS, - cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, - svcpt->scp_cpt)); - if (!policy) - return -ENOMEM; - - policy->pol_nrs = nrs; - policy->pol_desc = desc; - policy->pol_state = NRS_POL_STATE_STOPPED; - policy->pol_flags = desc->pd_flags; - - INIT_LIST_HEAD(&policy->pol_list); - INIT_LIST_HEAD(&policy->pol_list_queued); - - rc = nrs_policy_init(policy); - if (rc != 0) { - kfree(policy); - return rc; - } - - spin_lock(&nrs->nrs_lock); - - tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name); - if (tmp) { - CERROR("NRS policy %s has been registered, can't register it for %s\n", - policy->pol_desc->pd_name, - svcpt->scp_service->srv_name); - nrs_policy_put_locked(tmp); - - spin_unlock(&nrs->nrs_lock); - nrs_policy_fini(policy); - kfree(policy); - - return -EEXIST; - } - - list_add_tail(&policy->pol_list, &nrs->nrs_policy_list); - nrs->nrs_num_pols++; - - if (policy->pol_flags & PTLRPC_NRS_FL_REG_START) - rc = nrs_policy_start_locked(policy); - - spin_unlock(&nrs->nrs_lock); - - if (rc != 0) - (void)nrs_policy_unregister(nrs, policy->pol_desc->pd_name); - - return rc; -} - -/** - * Enqueue request \a req using one of the policies its resources are referring - * to. - * - * \param[in] req the request to enqueue. - */ -static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req) -{ - struct ptlrpc_nrs_policy *policy; - - LASSERT(req->rq_nrq.nr_initialized); - LASSERT(!req->rq_nrq.nr_enqueued); - - nrs_request_enqueue(&req->rq_nrq); - req->rq_nrq.nr_enqueued = 1; - - policy = nrs_request_policy(&req->rq_nrq); - /** - * Add the policy to the NRS head's list of policies with enqueued - * requests, if it has not been added there. - */ - if (unlikely(list_empty(&policy->pol_list_queued))) - list_add_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); -} - -/** - * Enqueue a request on the high priority NRS head. - * - * \param req the request to enqueue. - */ -static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req) -{ - int opc = lustre_msg_get_opc(req->rq_reqmsg); - - spin_lock(&req->rq_lock); - req->rq_hp = 1; - ptlrpc_nrs_req_add_nolock(req); - if (opc != OBD_PING) - DEBUG_REQ(D_NET, req, "high priority req"); - spin_unlock(&req->rq_lock); -} - -/** - * Returns a boolean predicate indicating whether the policy described by - * \a desc is adequate for use with service \a svc. - * - * \param[in] svc the service - * \param[in] desc the policy descriptor - * - * \retval false the policy is not compatible with the service - * \retval true the policy is compatible with the service - */ -static inline bool nrs_policy_compatible(const struct ptlrpc_service *svc, - const struct ptlrpc_nrs_pol_desc *desc) -{ - return desc->pd_compat(svc, desc); -} - -/** - * Registers all compatible policies in nrs_core.nrs_policies, for NRS head - * \a nrs. - * - * \param[in] nrs the NRS head - * - * \retval -ve error - * \retval 0 success - * - * \pre mutex_is_locked(&nrs_core.nrs_mutex) - * - * \see ptlrpc_service_nrs_setup() - */ -static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs) -{ - struct ptlrpc_nrs_pol_desc *desc; - /* for convenience */ - struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt; - struct ptlrpc_service *svc = svcpt->scp_service; - int rc = -EINVAL; - - LASSERT(mutex_is_locked(&nrs_core.nrs_mutex)); - - list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) { - if (nrs_policy_compatible(svc, desc)) { - rc = nrs_policy_register(nrs, desc); - if (rc != 0) { - CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n", - desc->pd_name, svcpt->scp_cpt, - svc->srv_name, rc); - /** - * Fail registration if any of the policies' - * registration fails. - */ - break; - } - } - } - - return rc; -} - -/** - * Initializes NRS head \a nrs of service partition \a svcpt, and registers all - * compatible policies in NRS core, with the NRS head. - * - * \param[in] nrs the NRS head - * \param[in] svcpt the PTLRPC service partition to setup - * - * \retval -ve error - * \retval 0 success - * - * \pre mutex_is_locked(&nrs_core.nrs_mutex) - */ -static int nrs_svcpt_setup_locked0(struct ptlrpc_nrs *nrs, - struct ptlrpc_service_part *svcpt) -{ - enum ptlrpc_nrs_queue_type queue; - - LASSERT(mutex_is_locked(&nrs_core.nrs_mutex)); - - if (nrs == &svcpt->scp_nrs_reg) - queue = PTLRPC_NRS_QUEUE_REG; - else if (nrs == svcpt->scp_nrs_hp) - queue = PTLRPC_NRS_QUEUE_HP; - else - LBUG(); - - nrs->nrs_svcpt = svcpt; - nrs->nrs_queue_type = queue; - spin_lock_init(&nrs->nrs_lock); - INIT_LIST_HEAD(&nrs->nrs_policy_list); - INIT_LIST_HEAD(&nrs->nrs_policy_queued); - - return nrs_register_policies_locked(nrs); -} - -/** - * Allocates a regular and optionally a high-priority NRS head (if the service - * handles high-priority RPCs), and then registers all available compatible - * policies on those NRS heads. - * - * \param[in,out] svcpt the PTLRPC service partition to setup - * - * \pre mutex_is_locked(&nrs_core.nrs_mutex) - */ -static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_nrs *nrs; - int rc; - - LASSERT(mutex_is_locked(&nrs_core.nrs_mutex)); - - /** - * Initialize the regular NRS head. - */ - nrs = nrs_svcpt2nrs(svcpt, false); - rc = nrs_svcpt_setup_locked0(nrs, svcpt); - if (rc < 0) - goto out; - - /** - * Optionally allocate a high-priority NRS head. - */ - if (!svcpt->scp_service->srv_ops.so_hpreq_handler) - goto out; - - svcpt->scp_nrs_hp = - kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS, - cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, - svcpt->scp_cpt)); - if (!svcpt->scp_nrs_hp) { - rc = -ENOMEM; - goto out; - } - - nrs = nrs_svcpt2nrs(svcpt, true); - rc = nrs_svcpt_setup_locked0(nrs, svcpt); - -out: - return rc; -} - -/** - * Unregisters all policies on all available NRS heads in a service partition; - * called at PTLRPC service unregistration time. - * - * \param[in] svcpt the PTLRPC service partition - * - * \pre mutex_is_locked(&nrs_core.nrs_mutex) - */ -static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_nrs *nrs; - struct ptlrpc_nrs_policy *policy; - struct ptlrpc_nrs_policy *tmp; - int rc; - bool hp = false; - - LASSERT(mutex_is_locked(&nrs_core.nrs_mutex)); - -again: - /* scp_nrs_hp could be NULL due to short of memory. */ - nrs = hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg; - /* check the nrs_svcpt to see if nrs is initialized. */ - if (!nrs || !nrs->nrs_svcpt) - return; - nrs->nrs_stopping = 1; - - list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) { - rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name); - LASSERT(rc == 0); - } - - /** - * If the service partition has an HP NRS head, clean that up as well. - */ - if (!hp && nrs_svcpt_has_hp(svcpt)) { - hp = true; - goto again; - } - - if (hp) - kfree(nrs); -} - -/** - * Returns the descriptor for a policy as identified by by \a name. - * - * \param[in] name the policy name - * - * \retval the policy descriptor - * \retval NULL - */ -static struct ptlrpc_nrs_pol_desc *nrs_policy_find_desc_locked(const char *name) -{ - struct ptlrpc_nrs_pol_desc *tmp; - - list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) { - if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0) - return tmp; - } - return NULL; -} - -/** - * Removes the policy from all supported NRS heads of all partitions of all - * PTLRPC services. - * - * \param[in] desc the policy descriptor to unregister - * - * \retval -ve error - * \retval 0 successfully unregistered policy on all supported NRS heads - * - * \pre mutex_is_locked(&nrs_core.nrs_mutex) - * \pre mutex_is_locked(&ptlrpc_all_services_mutex) - */ -static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc) -{ - struct ptlrpc_nrs *nrs; - struct ptlrpc_service *svc; - struct ptlrpc_service_part *svcpt; - int i; - int rc = 0; - - LASSERT(mutex_is_locked(&nrs_core.nrs_mutex)); - LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex)); - - list_for_each_entry(svc, &ptlrpc_all_services, srv_list) { - if (!nrs_policy_compatible(svc, desc) || - unlikely(svc->srv_is_stopping)) - continue; - - ptlrpc_service_for_each_part(svcpt, i, svc) { - bool hp = false; - -again: - nrs = nrs_svcpt2nrs(svcpt, hp); - rc = nrs_policy_unregister(nrs, desc->pd_name); - /** - * Ignore -ENOENT as the policy may not have registered - * successfully on all service partitions. - */ - if (rc == -ENOENT) { - rc = 0; - } else if (rc != 0) { - CERROR("Failed to unregister NRS policy %s for partition %d of service %s: %d\n", - desc->pd_name, svcpt->scp_cpt, - svcpt->scp_service->srv_name, rc); - return rc; - } - - if (!hp && nrs_svc_has_hp(svc)) { - hp = true; - goto again; - } - } - - if (desc->pd_ops->op_lprocfs_fini) - desc->pd_ops->op_lprocfs_fini(svc); - } - - return rc; -} - -/** - * Registers a new policy with NRS core. - * - * The function will only succeed if policy registration with all compatible - * service partitions (if any) is successful. - * - * N.B. This function should be called either at ptlrpc module initialization - * time when registering a policy that ships with NRS core, or in a - * module's init() function for policies registering from other modules. - * - * \param[in] conf configuration information for the new policy to register - * - * \retval -ve error - * \retval 0 success - */ -static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) -{ - struct ptlrpc_service *svc; - struct ptlrpc_nrs_pol_desc *desc; - size_t len; - int rc = 0; - - LASSERT(conf->nc_ops); - LASSERT(conf->nc_compat); - LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one, - conf->nc_compat_svc_name)); - LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0, - conf->nc_owner)); - - conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0'; - - /** - * External policies are not allowed to start immediately upon - * registration, as there is a relatively higher chance that their - * registration might fail. In such a case, some policy instances may - * already have requests queued wen unregistration needs to happen as - * part o cleanup; since there is currently no way to drain requests - * from a policy unless the service is unregistering, we just disallow - * this. - */ - if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) && - (conf->nc_flags & (PTLRPC_NRS_FL_FALLBACK | - PTLRPC_NRS_FL_REG_START))) { - CERROR("NRS: failing to register policy %s. Please check policy flags; external policies cannot act as fallback policies, or be started immediately upon registration without interaction with lprocfs\n", - conf->nc_name); - return -EINVAL; - } - - mutex_lock(&nrs_core.nrs_mutex); - - if (nrs_policy_find_desc_locked(conf->nc_name)) { - CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n", - conf->nc_name); - rc = -EEXIST; - goto fail; - } - - desc = kzalloc(sizeof(*desc), GFP_NOFS); - if (!desc) { - rc = -ENOMEM; - goto fail; - } - - len = strlcpy(desc->pd_name, conf->nc_name, sizeof(desc->pd_name)); - if (len >= sizeof(desc->pd_name)) { - kfree(desc); - rc = -E2BIG; - goto fail; - } - desc->pd_ops = conf->nc_ops; - desc->pd_compat = conf->nc_compat; - desc->pd_compat_svc_name = conf->nc_compat_svc_name; - if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0) - desc->pd_owner = conf->nc_owner; - desc->pd_flags = conf->nc_flags; - atomic_set(&desc->pd_refs, 0); - - /** - * For policies that are held in the same module as NRS (currently - * ptlrpc), do not register the policy with all compatible services, - * as the services will not have started at this point, since we are - * calling from ptlrpc module initialization code. In such cases each - * service will register all compatible policies later, via - * ptlrpc_service_nrs_setup(). - */ - if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) == 0) - goto internal; - - /** - * Register the new policy on all compatible services - */ - mutex_lock(&ptlrpc_all_services_mutex); - - list_for_each_entry(svc, &ptlrpc_all_services, srv_list) { - struct ptlrpc_service_part *svcpt; - int i; - int rc2; - - if (!nrs_policy_compatible(svc, desc) || - unlikely(svc->srv_is_stopping)) - continue; - - ptlrpc_service_for_each_part(svcpt, i, svc) { - struct ptlrpc_nrs *nrs; - bool hp = false; -again: - nrs = nrs_svcpt2nrs(svcpt, hp); - rc = nrs_policy_register(nrs, desc); - if (rc != 0) { - CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n", - desc->pd_name, svcpt->scp_cpt, - svcpt->scp_service->srv_name, rc); - - rc2 = nrs_policy_unregister_locked(desc); - /** - * Should not fail at this point - */ - LASSERT(rc2 == 0); - mutex_unlock(&ptlrpc_all_services_mutex); - kfree(desc); - goto fail; - } - - if (!hp && nrs_svc_has_hp(svc)) { - hp = true; - goto again; - } - } - - /** - * No need to take a reference to other modules here, as we - * will be calling from the module's init() function. - */ - if (desc->pd_ops->op_lprocfs_init) { - rc = desc->pd_ops->op_lprocfs_init(svc); - if (rc != 0) { - rc2 = nrs_policy_unregister_locked(desc); - /** - * Should not fail at this point - */ - LASSERT(rc2 == 0); - mutex_unlock(&ptlrpc_all_services_mutex); - kfree(desc); - goto fail; - } - } - } - - mutex_unlock(&ptlrpc_all_services_mutex); -internal: - list_add_tail(&desc->pd_list, &nrs_core.nrs_policies); -fail: - mutex_unlock(&nrs_core.nrs_mutex); - - return rc; -} - -/** - * Setup NRS heads on all service partitions of service \a svc, and register - * all compatible policies on those NRS heads. - * - * To be called from within ptl - * \param[in] svc the service to setup - * - * \retval -ve error, the calling logic should eventually call - * ptlrpc_service_nrs_cleanup() to undo any work performed - * by this function. - * - * \see ptlrpc_register_service() - * \see ptlrpc_service_nrs_cleanup() - */ -int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - const struct ptlrpc_nrs_pol_desc *desc; - int i; - int rc = 0; - - mutex_lock(&nrs_core.nrs_mutex); - - /** - * Initialize NRS heads on all service CPTs. - */ - ptlrpc_service_for_each_part(svcpt, i, svc) { - rc = nrs_svcpt_setup_locked(svcpt); - if (rc != 0) - goto failed; - } - - /** - * Set up lprocfs interfaces for all supported policies for the - * service. - */ - list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) { - if (!nrs_policy_compatible(svc, desc)) - continue; - - if (desc->pd_ops->op_lprocfs_init) { - rc = desc->pd_ops->op_lprocfs_init(svc); - if (rc != 0) - goto failed; - } - } - -failed: - - mutex_unlock(&nrs_core.nrs_mutex); - - return rc; -} - -/** - * Unregisters all policies on all service partitions of service \a svc. - * - * \param[in] svc the PTLRPC service to unregister - */ -void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - const struct ptlrpc_nrs_pol_desc *desc; - int i; - - mutex_lock(&nrs_core.nrs_mutex); - - /** - * Clean up NRS heads on all service partitions - */ - ptlrpc_service_for_each_part(svcpt, i, svc) - nrs_svcpt_cleanup_locked(svcpt); - - /** - * Clean up lprocfs interfaces for all supported policies for the - * service. - */ - list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) { - if (!nrs_policy_compatible(svc, desc)) - continue; - - if (desc->pd_ops->op_lprocfs_fini) - desc->pd_ops->op_lprocfs_fini(svc); - } - - mutex_unlock(&nrs_core.nrs_mutex); -} - -/** - * Obtains NRS head resources for request \a req. - * - * These could be either on the regular or HP NRS head of \a svcpt; resources - * taken on the regular head can later be swapped for HP head resources by - * ldlm_lock_reorder_req(). - * - * \param[in] svcpt the service partition - * \param[in] req the request - * \param[in] hp which NRS head of \a svcpt to use - */ -void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req, bool hp) -{ - struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp); - - memset(&req->rq_nrq, 0, sizeof(req->rq_nrq)); - nrs_resource_get_safe(nrs, &req->rq_nrq, req->rq_nrq.nr_res_ptrs, - false); - - /** - * It is fine to access \e nr_initialized without locking as there is - * no contention at this early stage. - */ - req->rq_nrq.nr_initialized = 1; -} - -/** - * Releases resources for a request; is called after the request has been - * handled. - * - * \param[in] req the request - * - * \see ptlrpc_server_finish_request() - */ -void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req) -{ - if (req->rq_nrq.nr_initialized) { - nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs); - /* no protection on bit nr_initialized because no - * contention at this late stage - */ - req->rq_nrq.nr_finalized = 1; - } -} - -void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req) -{ - if (req->rq_nrq.nr_started) - nrs_request_stop(&req->rq_nrq); -} - -/** - * Enqueues request \a req on either the regular or high-priority NRS head - * of service partition \a svcpt. - * - * \param[in] svcpt the service partition - * \param[in] req the request to be enqueued - * \param[in] hp whether to enqueue the request on the regular or - * high-priority NRS head. - */ -void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req, bool hp) -{ - spin_lock(&svcpt->scp_req_lock); - - if (hp) - ptlrpc_nrs_hpreq_add_nolock(req); - else - ptlrpc_nrs_req_add_nolock(req); - - spin_unlock(&svcpt->scp_req_lock); -} - -static void nrs_request_removed(struct ptlrpc_nrs_policy *policy) -{ - LASSERT(policy->pol_nrs->nrs_req_queued > 0); - LASSERT(policy->pol_req_queued > 0); - - policy->pol_nrs->nrs_req_queued--; - policy->pol_req_queued--; - - /** - * If the policy has no more requests queued, remove it from - * ptlrpc_nrs::nrs_policy_queued. - */ - if (unlikely(policy->pol_req_queued == 0)) { - list_del_init(&policy->pol_list_queued); - - /** - * If there are other policies with queued requests, move the - * current policy to the end so that we can round robin over - * all policies and drain the requests. - */ - } else if (policy->pol_req_queued != policy->pol_nrs->nrs_req_queued) { - LASSERT(policy->pol_req_queued < - policy->pol_nrs->nrs_req_queued); - - list_move_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); - } -} - -/** - * Obtains a request for handling from an NRS head of service partition - * \a svcpt. - * - * \param[in] svcpt the service partition - * \param[in] hp whether to obtain a request from the regular or - * high-priority NRS head. - * \param[in] peek when set, signifies that we just want to examine the - * request, and not handle it, so the request is not removed - * from the policy. - * \param[in] force when set, it will force a policy to return a request if it - * has one pending - * - * \retval the request to be handled - * \retval NULL the head has no requests to serve - */ -struct ptlrpc_request * -ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp, - bool peek, bool force) -{ - struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp); - struct ptlrpc_nrs_policy *policy; - struct ptlrpc_nrs_request *nrq; - - /** - * Always try to drain requests from all NRS polices even if they are - * inactive, because the user can change policy status at runtime. - */ - list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) { - nrq = nrs_request_get(policy, peek, force); - if (nrq) { - if (likely(!peek)) { - nrq->nr_started = 1; - - policy->pol_req_started++; - policy->pol_nrs->nrs_req_started++; - - nrs_request_removed(policy); - } - - return container_of(nrq, struct ptlrpc_request, rq_nrq); - } - } - - return NULL; -} - -/** - * Returns whether there are any requests currently enqueued on any of the - * policies of service partition's \a svcpt NRS head specified by \a hp. Should - * be called while holding ptlrpc_service_part::scp_req_lock to get a reliable - * result. - * - * \param[in] svcpt the service partition to enquire. - * \param[in] hp whether the regular or high-priority NRS head is to be - * enquired. - * - * \retval false the indicated NRS head has no enqueued requests. - * \retval true the indicated NRS head has some enqueued requests. - */ -bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp) -{ - struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp); - - return nrs->nrs_req_queued > 0; -}; - -/** - * Carries out a control operation \a opc on the policy identified by the - * human-readable \a name, on either all partitions, or only on the first - * partition of service \a svc. - * - * \param[in] svc the service the policy belongs to. - * \param[in] queue whether to carry out the command on the policy which - * belongs to the regular, high-priority, or both NRS - * heads of service partitions of \a svc. - * \param[in] name the policy to act upon, by human-readable name - * \param[in] opc the opcode of the operation to carry out - * \param[in] single when set, the operation will only be carried out on the - * NRS heads of the first service partition of \a svc. - * This is useful for some policies which e.g. share - * identical values on the same parameters of different - * service partitions; when reading these parameters via - * lprocfs, these policies may just want to obtain and - * print out the values from the first service partition. - * Storing these values centrally elsewhere then could be - * another solution for this. - * \param[in,out] arg can be used as a generic in/out buffer between control - * operations and the user environment. - * - *\retval -ve error condition - *\retval 0 operation was carried out successfully - */ -int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc, - enum ptlrpc_nrs_queue_type queue, char *name, - enum ptlrpc_nrs_ctl opc, bool single, void *arg) -{ - struct ptlrpc_service_part *svcpt; - int i; - int rc = 0; - - LASSERT(opc != PTLRPC_NRS_CTL_INVALID); - - if ((queue & PTLRPC_NRS_QUEUE_BOTH) == 0) - return -EINVAL; - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) { - rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, false), name, - opc, arg); - if (rc != 0 || (queue == PTLRPC_NRS_QUEUE_REG && - single)) - goto out; - } - - if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) { - /** - * XXX: We could optionally check for - * nrs_svc_has_hp(svc) here, and return an error if it - * is false. Right now we rely on the policies' lprocfs - * handlers that call the present function to make this - * check; if they fail to do so, they might hit the - * assertion inside nrs_svcpt2nrs() below. - */ - rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, true), name, - opc, arg); - if (rc != 0 || single) - goto out; - } - } -out: - return rc; -} - -/** - * Adds all policies that ship with the ptlrpc module, to NRS core's list of - * policies \e nrs_core.nrs_policies. - * - * \retval 0 all policies have been registered successfully - * \retval -ve error - */ -int ptlrpc_nrs_init(void) -{ - int rc; - - mutex_init(&nrs_core.nrs_mutex); - INIT_LIST_HEAD(&nrs_core.nrs_policies); - - rc = ptlrpc_nrs_policy_register(&nrs_conf_fifo); - if (rc != 0) - goto fail; - - return rc; -fail: - /** - * Since no PTLRPC services have been started at this point, all we need - * to do for cleanup is to free the descriptors. - */ - ptlrpc_nrs_fini(); - - return rc; -} - -/** - * Removes all policy descriptors from nrs_core::nrs_policies, and frees the - * policy descriptors. - * - * Since all PTLRPC services are stopped at this point, there are no more - * instances of any policies, because each service will have stopped its policy - * instances in ptlrpc_service_nrs_cleanup(), so we just need to free the - * descriptors here. - */ -void ptlrpc_nrs_fini(void) -{ - struct ptlrpc_nrs_pol_desc *desc; - struct ptlrpc_nrs_pol_desc *tmp; - - list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) { - list_del_init(&desc->pd_list); - kfree(desc); - } -} - -/** @} nrs */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c deleted file mode 100644 index ff630d94dd26..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c +++ /dev/null @@ -1,270 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. A copy is - * included in the COPYING file that accompanied this code. - - * GPL HEADER END - */ -/* - * Copyright (c) 2011 Intel Corporation - * - * Copyright 2012 Xyratex Technology Limited - */ -/* - * lustre/ptlrpc/nrs_fifo.c - * - * Network Request Scheduler (NRS) FIFO policy - * - * Handles RPCs in a FIFO manner, as received from the network. This policy is - * a logical wrapper around previous, non-NRS functionality. It is used as the - * default and fallback policy for all types of RPCs on all PTLRPC service - * partitions, for both regular and high-priority NRS heads. Default here means - * the policy is the one enabled at PTLRPC service partition startup time, and - * fallback means the policy is used to handle RPCs that are not handled - * successfully or are not handled at all by any primary policy that may be - * enabled on a given NRS head. - * - * Author: Liang Zhen - * Author: Nikitas Angelinas - */ -/** - * \addtogoup nrs - * @{ - */ - -#define DEBUG_SUBSYSTEM S_RPC -#include -#include -#include -#include -#include "ptlrpc_internal.h" - -/** - * \name fifo - * - * The FIFO policy is a logical wrapper around previous, non-NRS functionality. - * It schedules RPCs in the same order as they are queued from LNet. - * - * @{ - */ - -#define NRS_POL_NAME_FIFO "fifo" - -/** - * Is called before the policy transitions into - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED; allocates and initializes a - * policy-specific private data structure. - * - * \param[in] policy The policy to start - * - * \retval -ENOMEM OOM error - * \retval 0 success - * - * \see nrs_policy_register() - * \see nrs_policy_ctl() - */ -static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy) -{ - struct nrs_fifo_head *head; - - head = kzalloc_node(sizeof(*head), GFP_NOFS, - cfs_cpt_spread_node(nrs_pol2cptab(policy), - nrs_pol2cptid(policy))); - if (!head) - return -ENOMEM; - - INIT_LIST_HEAD(&head->fh_list); - policy->pol_private = head; - return 0; -} - -/** - * Is called before the policy transitions into - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED; deallocates the policy-specific - * private data structure. - * - * \param[in] policy The policy to stop - * - * \see nrs_policy_stop0() - */ -static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy) -{ - struct nrs_fifo_head *head = policy->pol_private; - - LASSERT(head); - LASSERT(list_empty(&head->fh_list)); - - kfree(head); -} - -/** - * Is called for obtaining a FIFO policy resource. - * - * \param[in] policy The policy on which the request is being asked for - * \param[in] nrq The request for which resources are being taken - * \param[in] parent Parent resource, unused in this policy - * \param[out] resp Resources references are placed in this array - * \param[in] moving_req Signifies limited caller context; unused in this - * policy - * - * \retval 1 The FIFO policy only has a one-level resource hierarchy, as since - * it implements a simple scheduling algorithm in which request - * priority is determined on the request arrival order, it does not - * need to maintain a set of resources that would otherwise be used - * to calculate a request's priority. - * - * \see nrs_resource_get_safe() - */ -static int nrs_fifo_res_get(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq, - const struct ptlrpc_nrs_resource *parent, - struct ptlrpc_nrs_resource **resp, bool moving_req) -{ - /** - * Just return the resource embedded inside nrs_fifo_head, and end this - * resource hierarchy reference request. - */ - *resp = &((struct nrs_fifo_head *)policy->pol_private)->fh_res; - return 1; -} - -/** - * Called when getting a request from the FIFO policy for handling, or just - * peeking; removes the request from the policy when it is to be handled. - * - * \param[in] policy The policy - * \param[in] peek When set, signifies that we just want to examine the - * request, and not handle it, so the request is not removed - * from the policy. - * \param[in] force Force the policy to return a request; unused in this - * policy - * - * \retval The request to be handled; this is the next request in the FIFO - * queue - * - * \see ptlrpc_nrs_req_get_nolock() - * \see nrs_request_get() - */ -static -struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy, - bool peek, bool force) -{ - struct nrs_fifo_head *head = policy->pol_private; - struct ptlrpc_nrs_request *nrq; - - nrq = unlikely(list_empty(&head->fh_list)) ? NULL : - list_entry(head->fh_list.next, struct ptlrpc_nrs_request, - nr_u.fifo.fr_list); - - if (likely(!peek && nrq)) { - struct ptlrpc_request *req = container_of(nrq, - struct ptlrpc_request, - rq_nrq); - - list_del_init(&nrq->nr_u.fifo.fr_list); - - CDEBUG(D_RPCTRACE, "NRS start %s request from %s, seq: %llu\n", - policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer), - nrq->nr_u.fifo.fr_sequence); - } - - return nrq; -} - -/** - * Adds request \a nrq to \a policy's list of queued requests - * - * \param[in] policy The policy - * \param[in] nrq The request to add - * - * \retval 0 success; nrs_request_enqueue() assumes this function will always - * succeed - */ -static int nrs_fifo_req_add(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq) -{ - struct nrs_fifo_head *head; - - head = container_of(nrs_request_resource(nrq), struct nrs_fifo_head, - fh_res); - /** - * Only used for debugging - */ - nrq->nr_u.fifo.fr_sequence = head->fh_sequence++; - list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list); - - return 0; -} - -/** - * Removes request \a nrq from \a policy's list of queued requests. - * - * \param[in] policy The policy - * \param[in] nrq The request to remove - */ -static void nrs_fifo_req_del(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq) -{ - LASSERT(!list_empty(&nrq->nr_u.fifo.fr_list)); - list_del_init(&nrq->nr_u.fifo.fr_list); -} - -/** - * Prints a debug statement right before the request \a nrq stops being - * handled. - * - * \param[in] policy The policy handling the request - * \param[in] nrq The request being handled - * - * \see ptlrpc_server_finish_request() - * \see ptlrpc_nrs_req_stop_nolock() - */ -static void nrs_fifo_req_stop(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq) -{ - struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request, - rq_nrq); - - CDEBUG(D_RPCTRACE, "NRS stop %s request from %s, seq: %llu\n", - policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer), - nrq->nr_u.fifo.fr_sequence); -} - -/** - * FIFO policy operations - */ -static const struct ptlrpc_nrs_pol_ops nrs_fifo_ops = { - .op_policy_start = nrs_fifo_start, - .op_policy_stop = nrs_fifo_stop, - .op_res_get = nrs_fifo_res_get, - .op_req_get = nrs_fifo_req_get, - .op_req_enqueue = nrs_fifo_req_add, - .op_req_dequeue = nrs_fifo_req_del, - .op_req_stop = nrs_fifo_req_stop, -}; - -/** - * FIFO policy configuration - */ -struct ptlrpc_nrs_pol_conf nrs_conf_fifo = { - .nc_name = NRS_POL_NAME_FIFO, - .nc_ops = &nrs_fifo_ops, - .nc_compat = nrs_policy_compat_all, - .nc_flags = PTLRPC_NRS_FL_FALLBACK | - PTLRPC_NRS_FL_REG_START -}; - -/** @} fifo */ - -/** @} nrs */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c deleted file mode 100644 index 6ac9bb570663..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c +++ /dev/null @@ -1,2311 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/pack_generic.c - * - * (Un)packing of OST requests - * - * Author: Peter J. Braam - * Author: Phil Schwan - * Author: Eric Barton - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include - -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -static inline u32 lustre_msg_hdr_size_v2(u32 count) -{ - return cfs_size_round(offsetof(struct lustre_msg_v2, - lm_buflens[count])); -} - -u32 lustre_msg_hdr_size(__u32 magic, u32 count) -{ - switch (magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_msg_hdr_size_v2(count); - default: - LASSERTF(0, "incorrect message magic: %08x\n", magic); - return 0; - } -} - -void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - u32 index) -{ - if (inout) - lustre_set_req_swabbed(req, index); - else - lustre_set_rep_swabbed(req, index); -} - -int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, - u32 index) -{ - if (inout) - return (ptlrpc_req_need_swab(req) && - !lustre_req_swabbed(req, index)); - else - return (ptlrpc_rep_need_swab(req) && - !lustre_rep_swabbed(req, index)); -} - -/* early reply size */ -u32 lustre_msg_early_size(void) -{ - static u32 size; - - if (!size) { - /* Always reply old ptlrpc_body_v2 to keep interoperability - * with the old client (< 2.3) which doesn't have pb_jobid - * in the ptlrpc_body. - * - * XXX Remove this whenever we drop interoperability with such - * client. - */ - __u32 pblen = sizeof(struct ptlrpc_body_v2); - - size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen); - } - return size; -} -EXPORT_SYMBOL(lustre_msg_early_size); - -u32 lustre_msg_size_v2(int count, __u32 *lengths) -{ - u32 size; - int i; - - size = lustre_msg_hdr_size_v2(count); - for (i = 0; i < count; i++) - size += cfs_size_round(lengths[i]); - - return size; -} -EXPORT_SYMBOL(lustre_msg_size_v2); - -/* This returns the size of the buffer that is required to hold a lustre_msg - * with the given sub-buffer lengths. - * NOTE: this should only be used for NEW requests, and should always be - * in the form of a v2 request. If this is a connection to a v1 - * target then the first buffer will be stripped because the ptlrpc - * data is part of the lustre_msg_v1 header. b=14043 - */ -u32 lustre_msg_size(__u32 magic, int count, __u32 *lens) -{ - __u32 size[] = { sizeof(struct ptlrpc_body) }; - - if (!lens) { - LASSERT(count == 1); - lens = size; - } - - LASSERT(count > 0); - LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2)); - - switch (magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_msg_size_v2(count, lens); - default: - LASSERTF(0, "incorrect message magic: %08x\n", magic); - return 0; - } -} - -/* This is used to determine the size of a buffer that was already packed - * and will correctly handle the different message formats. - */ -u32 lustre_packed_msg_size(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} - -void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, - char **bufs) -{ - char *ptr; - int i; - - msg->lm_bufcount = count; - /* XXX: lm_secflvr uninitialized here */ - msg->lm_magic = LUSTRE_MSG_MAGIC_V2; - - for (i = 0; i < count; i++) - msg->lm_buflens[i] = lens[i]; - - if (!bufs) - return; - - ptr = (char *)msg + lustre_msg_hdr_size_v2(count); - for (i = 0; i < count; i++) { - char *tmp = bufs[i]; - - if (tmp) - memcpy(ptr, tmp, lens[i]); - ptr += cfs_size_round(lens[i]); - } -} -EXPORT_SYMBOL(lustre_init_msg_v2); - -static int lustre_pack_request_v2(struct ptlrpc_request *req, - int count, __u32 *lens, char **bufs) -{ - int reqlen, rc; - - reqlen = lustre_msg_size_v2(count, lens); - - rc = sptlrpc_cli_alloc_reqbuf(req, reqlen); - if (rc) - return rc; - - req->rq_reqlen = reqlen; - - lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs); - lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION); - return 0; -} - -int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count, - __u32 *lens, char **bufs) -{ - __u32 size[] = { sizeof(struct ptlrpc_body) }; - - if (!lens) { - LASSERT(count == 1); - lens = size; - } - - LASSERT(count > 0); - LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body)); - - /* only use new format, we don't need to be compatible with 1.4 */ - return lustre_pack_request_v2(req, count, lens, bufs); -} - -#if RS_DEBUG -LIST_HEAD(ptlrpc_rs_debug_lru); -spinlock_t ptlrpc_rs_debug_lock; - -#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \ -do { \ - spin_lock(&ptlrpc_rs_debug_lock); \ - list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \ - spin_unlock(&ptlrpc_rs_debug_lock); \ -} while (0) - -#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \ -do { \ - spin_lock(&ptlrpc_rs_debug_lock); \ - list_del(&(rs)->rs_debug_list); \ - spin_unlock(&ptlrpc_rs_debug_lock); \ -} while (0) -#else -# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while (0) -# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while (0) -#endif - -struct ptlrpc_reply_state * -lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_reply_state *rs = NULL; - - spin_lock(&svcpt->scp_rep_lock); - - /* See if we have anything in a pool, and wait if nothing */ - while (list_empty(&svcpt->scp_rep_idle)) { - int rc; - - spin_unlock(&svcpt->scp_rep_lock); - /* If we cannot get anything for some long time, we better - * bail out instead of waiting infinitely - */ - rc = wait_event_idle_timeout(svcpt->scp_rep_waitq, - !list_empty(&svcpt->scp_rep_idle), - 10 * HZ); - if (rc == 0) - goto out; - spin_lock(&svcpt->scp_rep_lock); - } - - rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, rs_list); - list_del(&rs->rs_list); - - spin_unlock(&svcpt->scp_rep_lock); - - memset(rs, 0, svcpt->scp_service->srv_max_reply_size); - rs->rs_size = svcpt->scp_service->srv_max_reply_size; - rs->rs_svcpt = svcpt; - rs->rs_prealloc = 1; -out: - return rs; -} - -void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs) -{ - struct ptlrpc_service_part *svcpt = rs->rs_svcpt; - - spin_lock(&svcpt->scp_rep_lock); - list_add(&rs->rs_list, &svcpt->scp_rep_idle); - spin_unlock(&svcpt->scp_rep_lock); - wake_up(&svcpt->scp_rep_waitq); -} - -int lustre_pack_reply_v2(struct ptlrpc_request *req, int count, - __u32 *lens, char **bufs, int flags) -{ - struct ptlrpc_reply_state *rs; - int msg_len, rc; - - LASSERT(!req->rq_reply_state); - - if ((flags & LPRFL_EARLY_REPLY) == 0) { - spin_lock(&req->rq_lock); - req->rq_packed_final = 1; - spin_unlock(&req->rq_lock); - } - - msg_len = lustre_msg_size_v2(count, lens); - rc = sptlrpc_svc_alloc_rs(req, msg_len); - if (rc) - return rc; - - rs = req->rq_reply_state; - atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */ - rs->rs_cb_id.cbid_fn = reply_out_callback; - rs->rs_cb_id.cbid_arg = rs; - rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt; - INIT_LIST_HEAD(&rs->rs_exp_list); - INIT_LIST_HEAD(&rs->rs_obd_list); - INIT_LIST_HEAD(&rs->rs_list); - spin_lock_init(&rs->rs_lock); - - req->rq_replen = msg_len; - req->rq_reply_state = rs; - req->rq_repmsg = rs->rs_msg; - - lustre_init_msg_v2(rs->rs_msg, count, lens, bufs); - lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION); - - PTLRPC_RS_DEBUG_LRU_ADD(rs); - - return 0; -} -EXPORT_SYMBOL(lustre_pack_reply_v2); - -int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens, - char **bufs, int flags) -{ - int rc = 0; - __u32 size[] = { sizeof(struct ptlrpc_body) }; - - if (!lens) { - LASSERT(count == 1); - lens = size; - } - - LASSERT(count > 0); - LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body)); - - switch (req->rq_reqmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - rc = lustre_pack_reply_v2(req, count, lens, bufs, flags); - break; - default: - LASSERTF(0, "incorrect message magic: %08x\n", - req->rq_reqmsg->lm_magic); - rc = -EINVAL; - } - if (rc != 0) - CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc, - lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens)); - return rc; -} - -int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens, - char **bufs) -{ - return lustre_pack_reply_flags(req, count, lens, bufs, 0); -} -EXPORT_SYMBOL(lustre_pack_reply); - -void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size) -{ - u32 i, offset, buflen, bufcount; - - bufcount = m->lm_bufcount; - if (unlikely(n >= bufcount)) { - CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n", - m, n, bufcount); - return NULL; - } - - buflen = m->lm_buflens[n]; - if (unlikely(buflen < min_size)) { - CERROR("msg %p buffer[%d] size %d too small (required %d, opc=%d)\n", - m, n, buflen, min_size, - n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m)); - return NULL; - } - - offset = lustre_msg_hdr_size_v2(bufcount); - for (i = 0; i < n; i++) - offset += cfs_size_round(m->lm_buflens[i]); - - return (char *)m + offset; -} - -void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 min_size) -{ - switch (m->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_msg_buf_v2(m, n, min_size); - default: - LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n", - m->lm_magic, m); - return NULL; - } -} -EXPORT_SYMBOL(lustre_msg_buf); - -static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, u32 segment, - unsigned int newlen, int move_data) -{ - char *tail = NULL, *newpos; - int tail_len = 0, n; - - LASSERT(msg); - LASSERT(msg->lm_bufcount > segment); - LASSERT(msg->lm_buflens[segment] >= newlen); - - if (msg->lm_buflens[segment] == newlen) - goto out; - - if (move_data && msg->lm_bufcount > segment + 1) { - tail = lustre_msg_buf_v2(msg, segment + 1, 0); - for (n = segment + 1; n < msg->lm_bufcount; n++) - tail_len += cfs_size_round(msg->lm_buflens[n]); - } - - msg->lm_buflens[segment] = newlen; - - if (tail && tail_len) { - newpos = lustre_msg_buf_v2(msg, segment + 1, 0); - LASSERT(newpos <= tail); - if (newpos != tail) - memmove(newpos, tail, tail_len); - } -out: - return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); -} - -/* - * for @msg, shrink @segment to size @newlen. if @move_data is non-zero, - * we also move data forward from @segment + 1. - * - * if @newlen == 0, we remove the segment completely, but we still keep the - * totally bufcount the same to save possible data moving. this will leave a - * unused segment with size 0 at the tail, but that's ok. - * - * return new msg size after shrinking. - * - * CAUTION: - * + if any buffers higher than @segment has been filled in, must call shrink - * with non-zero @move_data. - * + caller should NOT keep pointers to msg buffers which higher than @segment - * after call shrink. - */ -int lustre_shrink_msg(struct lustre_msg *msg, int segment, - unsigned int newlen, int move_data) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_shrink_msg_v2(msg, segment, newlen, move_data); - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } - return 0; -} -EXPORT_SYMBOL(lustre_shrink_msg); - -void lustre_free_reply_state(struct ptlrpc_reply_state *rs) -{ - PTLRPC_RS_DEBUG_LRU_DEL(rs); - - LASSERT(atomic_read(&rs->rs_refcount) == 0); - LASSERT(!rs->rs_difficult || rs->rs_handled); - LASSERT(!rs->rs_on_net); - LASSERT(!rs->rs_scheduled); - LASSERT(!rs->rs_export); - LASSERT(rs->rs_nlocks == 0); - LASSERT(list_empty(&rs->rs_exp_list)); - LASSERT(list_empty(&rs->rs_obd_list)); - - sptlrpc_svc_free_rs(rs); -} - -static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len) -{ - int swabbed, required_len, i; - - /* Now we know the sender speaks my language. */ - required_len = lustre_msg_hdr_size_v2(0); - if (len < required_len) { - /* can't even look inside the message */ - CERROR("message length %d too small for lustre_msg\n", len); - return -EINVAL; - } - - swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED); - - if (swabbed) { - __swab32s(&m->lm_magic); - __swab32s(&m->lm_bufcount); - __swab32s(&m->lm_secflvr); - __swab32s(&m->lm_repsize); - __swab32s(&m->lm_cksum); - __swab32s(&m->lm_flags); - BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_2) == 0); - BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_3) == 0); - } - - required_len = lustre_msg_hdr_size_v2(m->lm_bufcount); - if (len < required_len) { - /* didn't receive all the buffer lengths */ - CERROR("message length %d too small for %d buflens\n", - len, m->lm_bufcount); - return -EINVAL; - } - - for (i = 0; i < m->lm_bufcount; i++) { - if (swabbed) - __swab32s(&m->lm_buflens[i]); - required_len += cfs_size_round(m->lm_buflens[i]); - } - - if (len < required_len) { - CERROR("len: %d, required_len %d\n", len, required_len); - CERROR("bufcount: %d\n", m->lm_bufcount); - for (i = 0; i < m->lm_bufcount; i++) - CERROR("buffer %d length %d\n", i, m->lm_buflens[i]); - return -EINVAL; - } - - return swabbed; -} - -int __lustre_unpack_msg(struct lustre_msg *m, int len) -{ - int required_len, rc; - - /* We can provide a slightly better error log, if we check the - * message magic and version first. In the future, struct - * lustre_msg may grow, and we'd like to log a version mismatch, - * rather than a short message. - * - */ - required_len = offsetof(struct lustre_msg, lm_magic) + - sizeof(m->lm_magic); - if (len < required_len) { - /* can't even look inside the message */ - CERROR("message length %d too small for magic/version check\n", - len); - return -EINVAL; - } - - rc = lustre_unpack_msg_v2(m, len); - - return rc; -} -EXPORT_SYMBOL(__lustre_unpack_msg); - -int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len) -{ - int rc; - - rc = __lustre_unpack_msg(req->rq_reqmsg, len); - if (rc == 1) { - lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); - rc = 0; - } - return rc; -} - -int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len) -{ - int rc; - - rc = __lustre_unpack_msg(req->rq_repmsg, len); - if (rc == 1) { - lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); - rc = 0; - } - return rc; -} - -static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req, - const int inout, int offset) -{ - struct ptlrpc_body *pb; - struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg; - - pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2)); - if (!pb) { - CERROR("error unpacking ptlrpc body\n"); - return -EFAULT; - } - if (ptlrpc_buf_need_swab(req, inout, offset)) { - lustre_swab_ptlrpc_body(pb); - ptlrpc_buf_set_swabbed(req, inout, offset); - } - - if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) { - CERROR("wrong lustre_msg version %08x\n", pb->pb_version); - return -EINVAL; - } - - if (!inout) - pb->pb_status = ptlrpc_status_ntoh(pb->pb_status); - - return 0; -} - -int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset) -{ - switch (req->rq_reqmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_unpack_ptlrpc_body_v2(req, 1, offset); - default: - CERROR("bad lustre msg magic: %08x\n", - req->rq_reqmsg->lm_magic); - return -EINVAL; - } -} - -int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset) -{ - switch (req->rq_repmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_unpack_ptlrpc_body_v2(req, 0, offset); - default: - CERROR("bad lustre msg magic: %08x\n", - req->rq_repmsg->lm_magic); - return -EINVAL; - } -} - -static inline u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, u32 n) -{ - if (n >= m->lm_bufcount) - return 0; - - return m->lm_buflens[n]; -} - -/** - * lustre_msg_buflen - return the length of buffer \a n in message \a m - * \param m lustre_msg (request or reply) to look at - * \param n message index (base 0) - * - * returns zero for non-existent message indices - */ -u32 lustre_msg_buflen(struct lustre_msg *m, u32 n) -{ - switch (m->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_msg_buflen_v2(m, n); - default: - CERROR("incorrect message magic: %08x\n", m->lm_magic); - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_buflen); - -/* NB return the bufcount for lustre_msg_v2 format, so if message is packed - * in V1 format, the result is one bigger. (add struct ptlrpc_body). - */ -u32 lustre_msg_bufcount(struct lustre_msg *m) -{ - switch (m->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return m->lm_bufcount; - default: - CERROR("incorrect message magic: %08x\n", m->lm_magic); - return 0; - } -} - -char *lustre_msg_string(struct lustre_msg *m, u32 index, u32 max_len) -{ - /* max_len == 0 means the string should fill the buffer */ - char *str; - u32 slen, blen; - - switch (m->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - str = lustre_msg_buf_v2(m, index, 0); - blen = lustre_msg_buflen_v2(m, index); - break; - default: - LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic); - } - - if (!str) { - CERROR("can't unpack string in msg %p buffer[%d]\n", m, index); - return NULL; - } - - slen = strnlen(str, blen); - - if (slen == blen) { /* not NULL terminated */ - CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n", - m, index, blen); - return NULL; - } - - if (max_len == 0) { - if (slen != blen - 1) { - CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n", - m, index, blen, slen); - return NULL; - } - } else if (slen > max_len) { - CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n", - m, index, blen, slen, max_len); - return NULL; - } - - return str; -} - -/* Wrap up the normal fixed length cases */ -static inline void *__lustre_swab_buf(struct lustre_msg *msg, u32 index, - u32 min_size, void *swabber) -{ - void *ptr = NULL; - - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - ptr = lustre_msg_buf_v2(msg, index, min_size); - break; - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - } - - if (ptr && swabber) - ((void (*)(void *))swabber)(ptr); - - return ptr; -} - -static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg) -{ - return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF, - sizeof(struct ptlrpc_body_v2)); -} - -__u32 lustre_msghdr_get_flags(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - /* already in host endian */ - return msg->lm_flags; - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} -EXPORT_SYMBOL(lustre_msghdr_get_flags); - -void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - msg->lm_flags = flags; - return; - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -__u32 lustre_msg_get_flags(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (pb) - return pb->pb_flags; - - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - } - /* fall through */ - default: - /* flags might be printed in debug code while message - * uninitialized - */ - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_get_flags); - -void lustre_msg_add_flags(struct lustre_msg *msg, u32 flags) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_flags |= flags; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_add_flags); - -void lustre_msg_set_flags(struct lustre_msg *msg, u32 flags) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_flags = flags; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_clear_flags(struct lustre_msg *msg, u32 flags) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_flags &= ~(flags & MSG_GEN_FLAG_MASK); - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_clear_flags); - -__u32 lustre_msg_get_op_flags(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (pb) - return pb->pb_op_flags; - - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - } - /* fall through */ - default: - return 0; - } -} - -void lustre_msg_add_op_flags(struct lustre_msg *msg, u32 flags) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_op_flags |= flags; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_add_op_flags); - -struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return NULL; - } - return &pb->pb_handle; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return NULL; - } -} - -__u32 lustre_msg_get_type(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return PTL_RPC_MSG_ERR; - } - return pb->pb_type; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return PTL_RPC_MSG_ERR; - } -} -EXPORT_SYMBOL(lustre_msg_get_type); - -void lustre_msg_add_version(struct lustre_msg *msg, u32 version) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_version |= version; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -__u32 lustre_msg_get_opc(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_opc; - } - default: - CERROR("incorrect message magic: %08x (msg:%p)\n", - msg->lm_magic, msg); - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_get_opc); - -__u16 lustre_msg_get_tag(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_tag; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_get_tag); - -__u64 lustre_msg_get_last_committed(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_last_committed; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_get_last_committed); - -__u64 *lustre_msg_get_versions(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return NULL; - } - return pb->pb_pre_versions; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return NULL; - } -} -EXPORT_SYMBOL(lustre_msg_get_versions); - -__u64 lustre_msg_get_transno(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_transno; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_get_transno); - -int lustre_msg_get_status(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (pb) - return pb->pb_status; - - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - } - /* fall through */ - default: - /* status might be printed in debug code while message - * uninitialized - */ - return -EINVAL; - } -} -EXPORT_SYMBOL(lustre_msg_get_status); - -__u64 lustre_msg_get_slv(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return -EINVAL; - } - return pb->pb_slv; - } - default: - CERROR("invalid msg magic %08x\n", msg->lm_magic); - return -EINVAL; - } -} - -void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return; - } - pb->pb_slv = slv; - return; - } - default: - CERROR("invalid msg magic %x\n", msg->lm_magic); - return; - } -} - -__u32 lustre_msg_get_limit(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return -EINVAL; - } - return pb->pb_limit; - } - default: - CERROR("invalid msg magic %x\n", msg->lm_magic); - return -EINVAL; - } -} - -void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return; - } - pb->pb_limit = limit; - return; - } - default: - CERROR("invalid msg magic %08x\n", msg->lm_magic); - return; - } -} - -__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_conn_cnt; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} -EXPORT_SYMBOL(lustre_msg_get_conn_cnt); - -__u32 lustre_msg_get_magic(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return msg->lm_magic; - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} - -__u32 lustre_msg_get_timeout(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_timeout; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return -EPROTO; - } -} - -__u32 lustre_msg_get_service_time(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - if (!pb) { - CERROR("invalid msg %p: no ptlrpc body!\n", msg); - return 0; - } - return pb->pb_service_time; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} - -__u32 lustre_msg_get_cksum(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return msg->lm_cksum; - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} - -__u32 lustre_msg_calc_cksum(struct lustre_msg *msg) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - __u32 crc; - unsigned int hsize = 4; - - cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb, - lustre_msg_buflen(msg, - MSG_PTLRPC_BODY_OFF), - NULL, 0, (unsigned char *)&crc, &hsize); - return crc; - } - default: - CERROR("incorrect message magic: %08x\n", msg->lm_magic); - return 0; - } -} - -void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_handle = *handle; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_type(struct lustre_msg *msg, __u32 type) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_type = type; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_opc = opc; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_last_xid = last_xid; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_tag = tag; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_set_tag); - -void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_pre_versions[0] = versions[0]; - pb->pb_pre_versions[1] = versions[1]; - pb->pb_pre_versions[2] = versions[2]; - pb->pb_pre_versions[3] = versions[3]; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_set_versions); - -void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_transno = transno; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_set_transno); - -void lustre_msg_set_status(struct lustre_msg *msg, __u32 status) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_status = status; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_set_status); - -void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_conn_cnt = conn_cnt; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_timeout = timeout; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_service_time = service_time; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - __u32 opc = lustre_msg_get_opc(msg); - struct ptlrpc_body *pb; - - /* Don't set jobid for ldlm ast RPCs, they've been shrunk. - * See the comment in ptlrpc_request_pack(). - */ - if (!opc || opc == LDLM_BL_CALLBACK || - opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK) - return; - - pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF, - sizeof(struct ptlrpc_body)); - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - - if (jobid) - memcpy(pb->pb_jobid, jobid, LUSTRE_JOBID_SIZE); - else if (pb->pb_jobid[0] == '\0') - lustre_get_jobid(pb->pb_jobid); - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} -EXPORT_SYMBOL(lustre_msg_set_jobid); - -void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - msg->lm_cksum = cksum; - return; - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits) -{ - switch (msg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: { - struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg); - - LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - pb->pb_mbits = mbits; - return; - } - default: - LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic); - } -} - -void ptlrpc_request_set_replen(struct ptlrpc_request *req) -{ - int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER); - - req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, - req->rq_pill.rc_area[RCL_SERVER]); - if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2) - req->rq_reqmsg->lm_repsize = req->rq_replen; -} -EXPORT_SYMBOL(ptlrpc_request_set_replen); - -/** - * Send a remote set_info_async. - * - * This may go from client to server or server to client. - */ -int do_set_info_async(struct obd_import *imp, - int opcode, int version, - u32 keylen, void *key, - u32 vallen, void *val, - struct ptlrpc_request_set *set) -{ - struct ptlrpc_request *req; - char *tmp; - int rc; - - req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO); - if (!req) - return -ENOMEM; - - req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, - RCL_CLIENT, keylen); - req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL, - RCL_CLIENT, vallen); - rc = ptlrpc_request_pack(req, version, opcode); - if (rc) { - ptlrpc_request_free(req); - return rc; - } - - tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); - memcpy(tmp, key, keylen); - tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL); - memcpy(tmp, val, vallen); - - ptlrpc_request_set_replen(req); - - if (set) { - ptlrpc_set_add_req(set, req); - ptlrpc_check_set(NULL, set); - } else { - rc = ptlrpc_queue_wait(req); - ptlrpc_req_finished(req); - } - - return rc; -} -EXPORT_SYMBOL(do_set_info_async); - -/* byte flipping routines for all wire types declared in - * lustre_idl.h implemented here. - */ -void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) -{ - __swab32s(&b->pb_type); - __swab32s(&b->pb_version); - __swab32s(&b->pb_opc); - __swab32s(&b->pb_status); - __swab64s(&b->pb_last_xid); - __swab16s(&b->pb_tag); - __swab64s(&b->pb_last_committed); - __swab64s(&b->pb_transno); - __swab32s(&b->pb_flags); - __swab32s(&b->pb_op_flags); - __swab32s(&b->pb_conn_cnt); - __swab32s(&b->pb_timeout); - __swab32s(&b->pb_service_time); - __swab32s(&b->pb_limit); - __swab64s(&b->pb_slv); - __swab64s(&b->pb_pre_versions[0]); - __swab64s(&b->pb_pre_versions[1]); - __swab64s(&b->pb_pre_versions[2]); - __swab64s(&b->pb_pre_versions[3]); - __swab64s(&b->pb_mbits); - BUILD_BUG_ON(offsetof(typeof(*b), pb_padding0) == 0); - BUILD_BUG_ON(offsetof(typeof(*b), pb_padding1) == 0); - BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_0) == 0); - BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_1) == 0); - BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_2) == 0); - /* While we need to maintain compatibility between - * clients and servers without ptlrpc_body_v2 (< 2.3) - * do not swab any fields beyond pb_jobid, as we are - * using this swab function for both ptlrpc_body - * and ptlrpc_body_v2. - */ - BUILD_BUG_ON(offsetof(typeof(*b), pb_jobid) == 0); -} - -void lustre_swab_connect(struct obd_connect_data *ocd) -{ - __swab64s(&ocd->ocd_connect_flags); - __swab32s(&ocd->ocd_version); - __swab32s(&ocd->ocd_grant); - __swab64s(&ocd->ocd_ibits_known); - __swab32s(&ocd->ocd_index); - __swab32s(&ocd->ocd_brw_size); - /* ocd_blocksize and ocd_inodespace don't need to be swabbed because - * they are 8-byte values - */ - __swab16s(&ocd->ocd_grant_extent); - __swab32s(&ocd->ocd_unused); - __swab64s(&ocd->ocd_transno); - __swab32s(&ocd->ocd_group); - __swab32s(&ocd->ocd_cksum_types); - __swab32s(&ocd->ocd_instance); - /* Fields after ocd_cksum_types are only accessible by the receiver - * if the corresponding flag in ocd_connect_flags is set. Accessing - * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. - */ - if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE) - __swab32s(&ocd->ocd_max_easize); - if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) - __swab64s(&ocd->ocd_maxbytes); - if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) - __swab16s(&ocd->ocd_maxmodrpcs); - BUILD_BUG_ON(!offsetof(typeof(*ocd), padding0)); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding1) == 0); - if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2) - __swab64s(&ocd->ocd_connect_flags2); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding3) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding4) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding5) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding6) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding7) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding8) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), padding9) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), paddingA) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), paddingB) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), paddingC) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), paddingD) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), paddingE) == 0); - BUILD_BUG_ON(offsetof(typeof(*ocd), paddingF) == 0); -} - -static void lustre_swab_obdo(struct obdo *o) -{ - __swab64s(&o->o_valid); - lustre_swab_ost_id(&o->o_oi); - __swab64s(&o->o_parent_seq); - __swab64s(&o->o_size); - __swab64s(&o->o_mtime); - __swab64s(&o->o_atime); - __swab64s(&o->o_ctime); - __swab64s(&o->o_blocks); - __swab64s(&o->o_grant); - __swab32s(&o->o_blksize); - __swab32s(&o->o_mode); - __swab32s(&o->o_uid); - __swab32s(&o->o_gid); - __swab32s(&o->o_flags); - __swab32s(&o->o_nlink); - __swab32s(&o->o_parent_oid); - __swab32s(&o->o_misc); - __swab64s(&o->o_ioepoch); - __swab32s(&o->o_stripe_idx); - __swab32s(&o->o_parent_ver); - /* o_handle is opaque */ - /* o_lcookie is swabbed elsewhere */ - __swab32s(&o->o_uid_h); - __swab32s(&o->o_gid_h); - __swab64s(&o->o_data_version); - BUILD_BUG_ON(offsetof(typeof(*o), o_padding_4) == 0); - BUILD_BUG_ON(offsetof(typeof(*o), o_padding_5) == 0); - BUILD_BUG_ON(offsetof(typeof(*o), o_padding_6) == 0); -} - -void lustre_swab_obd_statfs(struct obd_statfs *os) -{ - __swab64s(&os->os_type); - __swab64s(&os->os_blocks); - __swab64s(&os->os_bfree); - __swab64s(&os->os_bavail); - __swab64s(&os->os_files); - __swab64s(&os->os_ffree); - /* no need to swab os_fsid */ - __swab32s(&os->os_bsize); - __swab32s(&os->os_namelen); - __swab64s(&os->os_maxbytes); - __swab32s(&os->os_state); - BUILD_BUG_ON(offsetof(typeof(*os), os_fprecreated) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare2) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare3) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare4) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare5) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare6) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare7) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare8) == 0); - BUILD_BUG_ON(offsetof(typeof(*os), os_spare9) == 0); -} - -void lustre_swab_obd_ioobj(struct obd_ioobj *ioo) -{ - lustre_swab_ost_id(&ioo->ioo_oid); - __swab32s(&ioo->ioo_max_brw); - __swab32s(&ioo->ioo_bufcnt); -} - -void lustre_swab_niobuf_remote(struct niobuf_remote *nbr) -{ - __swab64s(&nbr->rnb_offset); - __swab32s(&nbr->rnb_len); - __swab32s(&nbr->rnb_flags); -} - -void lustre_swab_ost_body(struct ost_body *b) -{ - lustre_swab_obdo(&b->oa); -} - -void lustre_swab_ost_last_id(u64 *id) -{ - __swab64s(id); -} - -void lustre_swab_generic_32s(__u32 *val) -{ - __swab32s(val); -} - -void lustre_swab_gl_desc(union ldlm_gl_desc *desc) -{ - lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid); - __swab64s(&desc->lquota_desc.gl_flags); - __swab64s(&desc->lquota_desc.gl_ver); - __swab64s(&desc->lquota_desc.gl_hardlimit); - __swab64s(&desc->lquota_desc.gl_softlimit); - __swab64s(&desc->lquota_desc.gl_time); - BUILD_BUG_ON(offsetof(typeof(desc->lquota_desc), gl_pad2) == 0); -} - -void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb) -{ - __swab64s(&lvb->lvb_size); - __swab64s(&lvb->lvb_mtime); - __swab64s(&lvb->lvb_atime); - __swab64s(&lvb->lvb_ctime); - __swab64s(&lvb->lvb_blocks); -} -EXPORT_SYMBOL(lustre_swab_ost_lvb_v1); - -void lustre_swab_ost_lvb(struct ost_lvb *lvb) -{ - __swab64s(&lvb->lvb_size); - __swab64s(&lvb->lvb_mtime); - __swab64s(&lvb->lvb_atime); - __swab64s(&lvb->lvb_ctime); - __swab64s(&lvb->lvb_blocks); - __swab32s(&lvb->lvb_mtime_ns); - __swab32s(&lvb->lvb_atime_ns); - __swab32s(&lvb->lvb_ctime_ns); - __swab32s(&lvb->lvb_padding); -} -EXPORT_SYMBOL(lustre_swab_ost_lvb); - -void lustre_swab_lquota_lvb(struct lquota_lvb *lvb) -{ - __swab64s(&lvb->lvb_flags); - __swab64s(&lvb->lvb_id_may_rel); - __swab64s(&lvb->lvb_id_rel); - __swab64s(&lvb->lvb_id_qunit); - __swab64s(&lvb->lvb_pad1); -} -EXPORT_SYMBOL(lustre_swab_lquota_lvb); - -void lustre_swab_mdt_body(struct mdt_body *b) -{ - lustre_swab_lu_fid(&b->mbo_fid1); - lustre_swab_lu_fid(&b->mbo_fid2); - /* handle is opaque */ - __swab64s(&b->mbo_valid); - __swab64s(&b->mbo_size); - __swab64s(&b->mbo_mtime); - __swab64s(&b->mbo_atime); - __swab64s(&b->mbo_ctime); - __swab64s(&b->mbo_blocks); - __swab64s(&b->mbo_ioepoch); - __swab64s(&b->mbo_t_state); - __swab32s(&b->mbo_fsuid); - __swab32s(&b->mbo_fsgid); - __swab32s(&b->mbo_capability); - __swab32s(&b->mbo_mode); - __swab32s(&b->mbo_uid); - __swab32s(&b->mbo_gid); - __swab32s(&b->mbo_flags); - __swab32s(&b->mbo_rdev); - __swab32s(&b->mbo_nlink); - BUILD_BUG_ON(offsetof(typeof(*b), mbo_unused2) == 0); - __swab32s(&b->mbo_suppgid); - __swab32s(&b->mbo_eadatasize); - __swab32s(&b->mbo_aclsize); - __swab32s(&b->mbo_max_mdsize); - BUILD_BUG_ON(!offsetof(typeof(*b), mbo_unused3)); - __swab32s(&b->mbo_uid_h); - __swab32s(&b->mbo_gid_h); - BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_5) == 0); -} - -void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b) -{ - /* handle is opaque */ - /* mio_handle is opaque */ - BUILD_BUG_ON(!offsetof(typeof(*b), mio_unused1)); - BUILD_BUG_ON(!offsetof(typeof(*b), mio_unused2)); - BUILD_BUG_ON(!offsetof(typeof(*b), mio_padding)); -} - -void lustre_swab_mgs_target_info(struct mgs_target_info *mti) -{ - int i; - - __swab32s(&mti->mti_lustre_ver); - __swab32s(&mti->mti_stripe_index); - __swab32s(&mti->mti_config_ver); - __swab32s(&mti->mti_flags); - __swab32s(&mti->mti_instance); - __swab32s(&mti->mti_nid_count); - BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64)); - for (i = 0; i < MTI_NIDS_MAX; i++) - __swab64s(&mti->mti_nids[i]); -} - -void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry) -{ - __u8 i; - - __swab64s(&entry->mne_version); - __swab32s(&entry->mne_instance); - __swab32s(&entry->mne_index); - __swab32s(&entry->mne_length); - - /* mne_nid_(count|type) must be one byte size because we're gonna - * access it w/o swapping. */ - BUILD_BUG_ON(sizeof(entry->mne_nid_count) != sizeof(__u8)); - BUILD_BUG_ON(sizeof(entry->mne_nid_type) != sizeof(__u8)); - - /* remove this assertion if ipv6 is supported. */ - LASSERT(entry->mne_nid_type == 0); - for (i = 0; i < entry->mne_nid_count; i++) { - BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64)); - __swab64s(&entry->u.nids[i]); - } -} -EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry); - -void lustre_swab_mgs_config_body(struct mgs_config_body *body) -{ - __swab64s(&body->mcb_offset); - __swab32s(&body->mcb_units); - __swab16s(&body->mcb_type); -} - -void lustre_swab_mgs_config_res(struct mgs_config_res *body) -{ - __swab64s(&body->mcr_offset); - __swab64s(&body->mcr_size); -} - -static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i) -{ - __swab64s(&i->dqi_bgrace); - __swab64s(&i->dqi_igrace); - __swab32s(&i->dqi_flags); - __swab32s(&i->dqi_valid); -} - -static void lustre_swab_obd_dqblk(struct obd_dqblk *b) -{ - __swab64s(&b->dqb_ihardlimit); - __swab64s(&b->dqb_isoftlimit); - __swab64s(&b->dqb_curinodes); - __swab64s(&b->dqb_bhardlimit); - __swab64s(&b->dqb_bsoftlimit); - __swab64s(&b->dqb_curspace); - __swab64s(&b->dqb_btime); - __swab64s(&b->dqb_itime); - __swab32s(&b->dqb_valid); - BUILD_BUG_ON(offsetof(typeof(*b), dqb_padding) == 0); -} - -void lustre_swab_obd_quotactl(struct obd_quotactl *q) -{ - __swab32s(&q->qc_cmd); - __swab32s(&q->qc_type); - __swab32s(&q->qc_id); - __swab32s(&q->qc_stat); - lustre_swab_obd_dqinfo(&q->qc_dqinfo); - lustre_swab_obd_dqblk(&q->qc_dqblk); -} - -void lustre_swab_fid2path(struct getinfo_fid2path *gf) -{ - lustre_swab_lu_fid(&gf->gf_fid); - __swab64s(&gf->gf_recno); - __swab32s(&gf->gf_linkno); - __swab32s(&gf->gf_pathlen); -} -EXPORT_SYMBOL(lustre_swab_fid2path); - -static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent) -{ - __swab64s(&fm_extent->fe_logical); - __swab64s(&fm_extent->fe_physical); - __swab64s(&fm_extent->fe_length); - __swab32s(&fm_extent->fe_flags); - __swab32s(&fm_extent->fe_device); -} - -void lustre_swab_fiemap(struct fiemap *fiemap) -{ - __u32 i; - - __swab64s(&fiemap->fm_start); - __swab64s(&fiemap->fm_length); - __swab32s(&fiemap->fm_flags); - __swab32s(&fiemap->fm_mapped_extents); - __swab32s(&fiemap->fm_extent_count); - __swab32s(&fiemap->fm_reserved); - - for (i = 0; i < fiemap->fm_mapped_extents; i++) - lustre_swab_fiemap_extent(&fiemap->fm_extents[i]); -} - -void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr) -{ - __swab32s(&rr->rr_opcode); - __swab32s(&rr->rr_cap); - __swab32s(&rr->rr_fsuid); - /* rr_fsuid_h is unused */ - __swab32s(&rr->rr_fsgid); - /* rr_fsgid_h is unused */ - __swab32s(&rr->rr_suppgid1); - /* rr_suppgid1_h is unused */ - __swab32s(&rr->rr_suppgid2); - /* rr_suppgid2_h is unused */ - lustre_swab_lu_fid(&rr->rr_fid1); - lustre_swab_lu_fid(&rr->rr_fid2); - __swab64s(&rr->rr_mtime); - __swab64s(&rr->rr_atime); - __swab64s(&rr->rr_ctime); - __swab64s(&rr->rr_size); - __swab64s(&rr->rr_blocks); - __swab32s(&rr->rr_bias); - __swab32s(&rr->rr_mode); - __swab32s(&rr->rr_flags); - __swab32s(&rr->rr_flags_h); - __swab32s(&rr->rr_umask); - - BUILD_BUG_ON(offsetof(typeof(*rr), rr_padding_4) == 0); -}; - -void lustre_swab_lov_desc(struct lov_desc *ld) -{ - __swab32s(&ld->ld_tgt_count); - __swab32s(&ld->ld_active_tgt_count); - __swab32s(&ld->ld_default_stripe_count); - __swab32s(&ld->ld_pattern); - __swab64s(&ld->ld_default_stripe_size); - __swab64s(&ld->ld_default_stripe_offset); - __swab32s(&ld->ld_qos_maxage); - /* uuid endian insensitive */ -} -EXPORT_SYMBOL(lustre_swab_lov_desc); - -/* This structure is always in little-endian */ -static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1) -{ - int i; - - __swab32s(&lmm1->lmv_magic); - __swab32s(&lmm1->lmv_stripe_count); - __swab32s(&lmm1->lmv_master_mdt_index); - __swab32s(&lmm1->lmv_hash_type); - __swab32s(&lmm1->lmv_layout_version); - for (i = 0; i < lmm1->lmv_stripe_count; i++) - lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]); -} - -void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm) -{ - switch (lmm->lmv_magic) { - case LMV_MAGIC_V1: - lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1); - break; - default: - break; - } -} -EXPORT_SYMBOL(lustre_swab_lmv_mds_md); - -void lustre_swab_lmv_user_md(struct lmv_user_md *lum) -{ - __swab32s(&lum->lum_magic); - __swab32s(&lum->lum_stripe_count); - __swab32s(&lum->lum_stripe_offset); - __swab32s(&lum->lum_hash_type); - __swab32s(&lum->lum_type); - BUILD_BUG_ON(!offsetof(typeof(*lum), lum_padding1)); -} -EXPORT_SYMBOL(lustre_swab_lmv_user_md); - -static void lustre_swab_lmm_oi(struct ost_id *oi) -{ - __swab64s(&oi->oi.oi_id); - __swab64s(&oi->oi.oi_seq); -} - -static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum) -{ - __swab32s(&lum->lmm_magic); - __swab32s(&lum->lmm_pattern); - lustre_swab_lmm_oi(&lum->lmm_oi); - __swab32s(&lum->lmm_stripe_size); - __swab16s(&lum->lmm_stripe_count); - __swab16s(&lum->lmm_stripe_offset); -} - -void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum) -{ - CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n"); - lustre_swab_lov_user_md_common(lum); -} -EXPORT_SYMBOL(lustre_swab_lov_user_md_v1); - -void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum) -{ - CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n"); - lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum); - /* lmm_pool_name nothing to do with char */ -} -EXPORT_SYMBOL(lustre_swab_lov_user_md_v3); - -void lustre_swab_lov_mds_md(struct lov_mds_md *lmm) -{ - CDEBUG(D_IOCTL, "swabbing lov_mds_md\n"); - __swab32s(&lmm->lmm_magic); - __swab32s(&lmm->lmm_pattern); - lustre_swab_lmm_oi(&lmm->lmm_oi); - __swab32s(&lmm->lmm_stripe_size); - __swab16s(&lmm->lmm_stripe_count); - __swab16s(&lmm->lmm_layout_gen); -} -EXPORT_SYMBOL(lustre_swab_lov_mds_md); - -void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, - int stripe_count) -{ - int i; - - for (i = 0; i < stripe_count; i++) { - lustre_swab_ost_id(&lod[i].l_ost_oi); - __swab32s(&lod[i].l_ost_gen); - __swab32s(&lod[i].l_ost_idx); - } -} -EXPORT_SYMBOL(lustre_swab_lov_user_md_objects); - -static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id) -{ - int i; - - for (i = 0; i < RES_NAME_SIZE; i++) - __swab64s(&id->name[i]); -} - -static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d) -{ - /* the lock data is a union and the first two fields are always an - * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock - * data the same way. - */ - __swab64s(&d->l_extent.start); - __swab64s(&d->l_extent.end); - __swab64s(&d->l_extent.gid); - __swab64s(&d->l_flock.lfw_owner); - __swab32s(&d->l_flock.lfw_pid); -} - -void lustre_swab_ldlm_intent(struct ldlm_intent *i) -{ - __swab64s(&i->opc); -} - -static void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r) -{ - __swab32s(&r->lr_type); - BUILD_BUG_ON(offsetof(typeof(*r), lr_padding) == 0); - lustre_swab_ldlm_res_id(&r->lr_name); -} - -static void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l) -{ - lustre_swab_ldlm_resource_desc(&l->l_resource); - __swab32s(&l->l_req_mode); - __swab32s(&l->l_granted_mode); - lustre_swab_ldlm_policy_data(&l->l_policy_data); -} - -void lustre_swab_ldlm_request(struct ldlm_request *rq) -{ - __swab32s(&rq->lock_flags); - lustre_swab_ldlm_lock_desc(&rq->lock_desc); - __swab32s(&rq->lock_count); - /* lock_handle[] opaque */ -} - -void lustre_swab_ldlm_reply(struct ldlm_reply *r) -{ - __swab32s(&r->lock_flags); - BUILD_BUG_ON(offsetof(typeof(*r), lock_padding) == 0); - lustre_swab_ldlm_lock_desc(&r->lock_desc); - /* lock_handle opaque */ - __swab64s(&r->lock_policy_res1); - __swab64s(&r->lock_policy_res2); -} - -/* Dump functions */ -void dump_ioo(struct obd_ioobj *ioo) -{ - CDEBUG(D_RPCTRACE, - "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n", - POSTID(&ioo->ioo_oid), ioo->ioo_max_brw, - ioo->ioo_bufcnt); -} - -void dump_rniobuf(struct niobuf_remote *nb) -{ - CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n", - nb->rnb_offset, nb->rnb_len, nb->rnb_flags); -} - -static void dump_obdo(struct obdo *oa) -{ - __u32 valid = oa->o_valid; - - CDEBUG(D_RPCTRACE, "obdo: o_valid = %08x\n", valid); - if (valid & OBD_MD_FLID) - CDEBUG(D_RPCTRACE, "obdo: id = " DOSTID "\n", POSTID(&oa->o_oi)); - if (valid & OBD_MD_FLFID) - CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n", - oa->o_parent_seq); - if (valid & OBD_MD_FLSIZE) - CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size); - if (valid & OBD_MD_FLMTIME) - CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime); - if (valid & OBD_MD_FLATIME) - CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime); - if (valid & OBD_MD_FLCTIME) - CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime); - if (valid & OBD_MD_FLBLOCKS) /* allocation of space */ - CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks); - if (valid & OBD_MD_FLGRANT) - CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant); - if (valid & OBD_MD_FLBLKSZ) - CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize); - if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE)) - CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n", - oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) | - (valid & OBD_MD_FLMODE ? ~S_IFMT : 0))); - if (valid & OBD_MD_FLUID) - CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid); - if (valid & OBD_MD_FLUID) - CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h); - if (valid & OBD_MD_FLGID) - CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid); - if (valid & OBD_MD_FLGID) - CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h); - if (valid & OBD_MD_FLFLAGS) - CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags); - if (valid & OBD_MD_FLNLINK) - CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink); - else if (valid & OBD_MD_FLCKSUM) - CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n", - oa->o_nlink); - if (valid & OBD_MD_FLGENER) - CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n", - oa->o_parent_oid); - if (valid & OBD_MD_FLEPOCH) - CDEBUG(D_RPCTRACE, "obdo: o_ioepoch = %lld\n", - oa->o_ioepoch); - if (valid & OBD_MD_FLFID) { - CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n", - oa->o_stripe_idx); - CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n", - oa->o_parent_ver); - } - if (valid & OBD_MD_FLHANDLE) - CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n", - oa->o_handle.cookie); -} - -void dump_ost_body(struct ost_body *ob) -{ - dump_obdo(&ob->oa); -} - -void dump_rcs(__u32 *rc) -{ - CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc); -} - -static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req) -{ - LASSERT(req->rq_reqmsg); - - switch (req->rq_reqmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF); - default: - CERROR("bad lustre msg magic: %#08X\n", - req->rq_reqmsg->lm_magic); - } - return 0; -} - -static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req) -{ - LASSERT(req->rq_repmsg); - - switch (req->rq_repmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V2: - return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF); - default: - /* uninitialized yet */ - return 0; - } -} - -void _debug_req(struct ptlrpc_request *req, - struct libcfs_debug_msg_data *msgdata, - const char *fmt, ...) -{ - int req_ok = req->rq_reqmsg != NULL; - int rep_ok = req->rq_repmsg != NULL; - lnet_nid_t nid = LNET_NID_ANY; - va_list args; - - if (ptlrpc_req_need_swab(req)) { - req_ok = req_ok && req_ptlrpc_body_swabbed(req); - rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req); - } - - if (req->rq_import && req->rq_import->imp_connection) - nid = req->rq_import->imp_connection->c_peer.nid; - else if (req->rq_export && req->rq_export->exp_connection) - nid = req->rq_export->exp_connection->c_peer.nid; - - va_start(args, fmt); - libcfs_debug_vmsg2(msgdata, fmt, args, - " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n", - req, req->rq_xid, req->rq_transno, - req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0, - req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1, - req->rq_import ? - req->rq_import->imp_obd->obd_name : - req->rq_export ? - req->rq_export->exp_client_uuid.uuid : - "", - libcfs_nid2str(nid), - req->rq_request_portal, req->rq_reply_portal, - req->rq_reqlen, req->rq_replen, - req->rq_early_count, (s64)req->rq_timedout, - (s64)req->rq_deadline, - atomic_read(&req->rq_refcount), - DEBUG_REQ_FLAGS(req), - req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1, - rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1, - req->rq_status, - rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1); - va_end(args); -} -EXPORT_SYMBOL(_debug_req); - -void lustre_swab_lustre_capa(struct lustre_capa *c) -{ - lustre_swab_lu_fid(&c->lc_fid); - __swab64s(&c->lc_opc); - __swab64s(&c->lc_uid); - __swab64s(&c->lc_gid); - __swab32s(&c->lc_flags); - __swab32s(&c->lc_keyid); - __swab32s(&c->lc_timeout); - __swab32s(&c->lc_expiry); -} - -void lustre_swab_hsm_user_state(struct hsm_user_state *state) -{ - __swab32s(&state->hus_states); - __swab32s(&state->hus_archive_id); -} - -void lustre_swab_hsm_state_set(struct hsm_state_set *hss) -{ - __swab32s(&hss->hss_valid); - __swab64s(&hss->hss_setmask); - __swab64s(&hss->hss_clearmask); - __swab32s(&hss->hss_archive_id); -} -EXPORT_SYMBOL(lustre_swab_hsm_state_set); - -static void lustre_swab_hsm_extent(struct hsm_extent *extent) -{ - __swab64s(&extent->offset); - __swab64s(&extent->length); -} - -void lustre_swab_hsm_current_action(struct hsm_current_action *action) -{ - __swab32s(&action->hca_state); - __swab32s(&action->hca_action); - lustre_swab_hsm_extent(&action->hca_location); -} - -void lustre_swab_hsm_user_item(struct hsm_user_item *hui) -{ - lustre_swab_lu_fid(&hui->hui_fid); - lustre_swab_hsm_extent(&hui->hui_extent); -} - -void lustre_swab_layout_intent(struct layout_intent *li) -{ - __swab32s(&li->li_opc); - __swab32s(&li->li_flags); - __swab64s(&li->li_start); - __swab64s(&li->li_end); -} - -void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk) -{ - lustre_swab_lu_fid(&hpk->hpk_fid); - __swab64s(&hpk->hpk_cookie); - __swab64s(&hpk->hpk_extent.offset); - __swab64s(&hpk->hpk_extent.length); - __swab16s(&hpk->hpk_flags); - __swab16s(&hpk->hpk_errval); -} - -void lustre_swab_hsm_request(struct hsm_request *hr) -{ - __swab32s(&hr->hr_action); - __swab32s(&hr->hr_archive_id); - __swab64s(&hr->hr_flags); - __swab32s(&hr->hr_itemcount); - __swab32s(&hr->hr_data_len); -} - -void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl) -{ - __swab64s(&msl->msl_flags); -} -EXPORT_SYMBOL(lustre_swab_swap_layouts); - -void lustre_swab_close_data(struct close_data *cd) -{ - lustre_swab_lu_fid(&cd->cd_fid); - __swab64s(&cd->cd_data_version); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c deleted file mode 100644 index 2466868afb9c..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/pers.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2014, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc, - int mdidx) -{ - int offset = mdidx * LNET_MAX_IOV; - - BUILD_BUG_ON(PTLRPC_MAX_BRW_PAGES >= LI_POISON); - - LASSERT(mdidx < desc->bd_md_max_brw); - LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); - LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | - LNET_MD_PHYS))); - - md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); - md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); - - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { - md->options |= LNET_MD_KIOV; - if (GET_ENC_KIOV(desc)) - md->start = &BD_GET_ENC_KIOV(desc, offset); - else - md->start = &BD_GET_KIOV(desc, offset); - } else { - md->options |= LNET_MD_IOVEC; - if (GET_ENC_KVEC(desc)) - md->start = &BD_GET_ENC_KVEC(desc, offset); - else - md->start = &BD_GET_KVEC(desc, offset); - } -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c deleted file mode 100644 index b3297b5ce395..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c +++ /dev/null @@ -1,474 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/pinger.c - * - * Portal-RPC reconnection and replay operations, for use in recovery. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include "ptlrpc_internal.h" - -struct mutex pinger_mutex; -static LIST_HEAD(pinger_imports); -static struct list_head timeout_list = LIST_HEAD_INIT(timeout_list); - -struct ptlrpc_request * -ptlrpc_prep_ping(struct obd_import *imp) -{ - struct ptlrpc_request *req; - - req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, - LUSTRE_OBD_VERSION, OBD_PING); - if (req) { - ptlrpc_request_set_replen(req); - req->rq_no_resend = 1; - req->rq_no_delay = 1; - } - return req; -} - -int ptlrpc_obd_ping(struct obd_device *obd) -{ - int rc; - struct ptlrpc_request *req; - - req = ptlrpc_prep_ping(obd->u.cli.cl_import); - if (!req) - return -ENOMEM; - - req->rq_send_state = LUSTRE_IMP_FULL; - - rc = ptlrpc_queue_wait(req); - - ptlrpc_req_finished(req); - - return rc; -} -EXPORT_SYMBOL(ptlrpc_obd_ping); - -static int ptlrpc_ping(struct obd_import *imp) -{ - struct ptlrpc_request *req; - - req = ptlrpc_prep_ping(imp); - if (!req) { - CERROR("OOM trying to ping %s->%s\n", - imp->imp_obd->obd_uuid.uuid, - obd2cli_tgt(imp->imp_obd)); - return -ENOMEM; - } - - DEBUG_REQ(D_INFO, req, "pinging %s->%s", - imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); - ptlrpcd_add_req(req); - - return 0; -} - -static void ptlrpc_update_next_ping(struct obd_import *imp, int soon) -{ - int time = soon ? PING_INTERVAL_SHORT : PING_INTERVAL; - - if (imp->imp_state == LUSTRE_IMP_DISCON) { - int dtime = max_t(int, CONNECTION_SWITCH_MIN, - AT_OFF ? 0 : - at_get(&imp->imp_at.iat_net_latency)); - time = min(time, dtime); - } - imp->imp_next_ping = jiffies + time * HZ; -} - -static inline int imp_is_deactive(struct obd_import *imp) -{ - return (imp->imp_deactive || - OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_IMP_DEACTIVE)); -} - -static inline int ptlrpc_next_reconnect(struct obd_import *imp) -{ - if (imp->imp_server_timeout) - return jiffies + obd_timeout / 2 * HZ; - else - return jiffies + obd_timeout * HZ; -} - -static long pinger_check_timeout(unsigned long time) -{ - struct timeout_item *item; - unsigned long timeout = PING_INTERVAL; - - /* The timeout list is a increase order sorted list */ - mutex_lock(&pinger_mutex); - list_for_each_entry(item, &timeout_list, ti_chain) { - int ti_timeout = item->ti_timeout; - - if (timeout > ti_timeout) - timeout = ti_timeout; - break; - } - mutex_unlock(&pinger_mutex); - - return time + timeout * HZ - jiffies; -} - -static bool ir_up; - -void ptlrpc_pinger_ir_up(void) -{ - CDEBUG(D_HA, "IR up\n"); - ir_up = true; -} -EXPORT_SYMBOL(ptlrpc_pinger_ir_up); - -void ptlrpc_pinger_ir_down(void) -{ - CDEBUG(D_HA, "IR down\n"); - ir_up = false; -} -EXPORT_SYMBOL(ptlrpc_pinger_ir_down); - -static void ptlrpc_pinger_process_import(struct obd_import *imp, - unsigned long this_ping) -{ - int level; - int force; - int force_next; - int suppress; - - spin_lock(&imp->imp_lock); - - level = imp->imp_state; - force = imp->imp_force_verify; - force_next = imp->imp_force_next_verify; - /* - * This will be used below only if the import is "FULL". - */ - suppress = ir_up && OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS); - - imp->imp_force_verify = 0; - - if (time_after_eq(imp->imp_next_ping - 5, this_ping) && - !force) { - spin_unlock(&imp->imp_lock); - return; - } - - imp->imp_force_next_verify = 0; - - spin_unlock(&imp->imp_lock); - - CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u force %u force_next %u deactive %u pingable %u suppress %u\n", - imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd), - ptlrpc_import_state_name(level), level, force, force_next, - imp->imp_deactive, imp->imp_pingable, suppress); - - if (level == LUSTRE_IMP_DISCON && !imp_is_deactive(imp)) { - /* wait for a while before trying recovery again */ - imp->imp_next_ping = ptlrpc_next_reconnect(imp); - if (!imp->imp_no_pinger_recover) - ptlrpc_initiate_recovery(imp); - } else if (level != LUSTRE_IMP_FULL || - imp->imp_obd->obd_no_recov || - imp_is_deactive(imp)) { - CDEBUG(D_HA, "%s->%s: not pinging (in recovery or recovery disabled: %s)\n", - imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd), - ptlrpc_import_state_name(level)); - if (force) { - spin_lock(&imp->imp_lock); - imp->imp_force_verify = 1; - spin_unlock(&imp->imp_lock); - } - } else if ((imp->imp_pingable && !suppress) || force_next || force) { - ptlrpc_ping(imp); - } -} - -static struct workqueue_struct *pinger_wq; -static void ptlrpc_pinger_main(struct work_struct *ws); -static DECLARE_DELAYED_WORK(ping_work, ptlrpc_pinger_main); - -static void ptlrpc_pinger_main(struct work_struct *ws) -{ - unsigned long this_ping = jiffies; - long time_to_next_wake; - struct timeout_item *item; - struct obd_import *imp; - - do { - mutex_lock(&pinger_mutex); - list_for_each_entry(item, &timeout_list, ti_chain) { - item->ti_cb(item, item->ti_cb_data); - } - list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) { - ptlrpc_pinger_process_import(imp, this_ping); - /* obd_timeout might have changed */ - if (imp->imp_pingable && imp->imp_next_ping && - time_after(imp->imp_next_ping, - this_ping + PING_INTERVAL * HZ)) - ptlrpc_update_next_ping(imp, 0); - } - mutex_unlock(&pinger_mutex); - - /* Wait until the next ping time, or until we're stopped. */ - time_to_next_wake = pinger_check_timeout(this_ping); - /* The ping sent by ptlrpc_send_rpc may get sent out - * say .01 second after this. - * ptlrpc_pinger_sending_on_import will then set the - * next ping time to next_ping + .01 sec, which means - * we will SKIP the next ping at next_ping, and the - * ping will get sent 2 timeouts from now! Beware. - */ - CDEBUG(D_INFO, "next wakeup in %ld (%ld)\n", - time_to_next_wake, - this_ping + PING_INTERVAL * HZ); - } while (time_to_next_wake <= 0); - - queue_delayed_work(pinger_wq, &ping_work, - round_jiffies_up_relative(time_to_next_wake)); -} - -int ptlrpc_start_pinger(void) -{ - if (pinger_wq) - return -EALREADY; - - pinger_wq = alloc_workqueue("ptlrpc_pinger", WQ_MEM_RECLAIM, 1); - if (!pinger_wq) { - CERROR("cannot start pinger workqueue\n"); - return -ENOMEM; - } - - queue_delayed_work(pinger_wq, &ping_work, 0); - return 0; -} - -static int ptlrpc_pinger_remove_timeouts(void); - -int ptlrpc_stop_pinger(void) -{ - int rc = 0; - - if (!pinger_wq) - return -EALREADY; - - ptlrpc_pinger_remove_timeouts(); - cancel_delayed_work_sync(&ping_work); - destroy_workqueue(pinger_wq); - pinger_wq = NULL; - - return rc; -} - -void ptlrpc_pinger_sending_on_import(struct obd_import *imp) -{ - ptlrpc_update_next_ping(imp, 0); -} - -void ptlrpc_pinger_commit_expected(struct obd_import *imp) -{ - ptlrpc_update_next_ping(imp, 1); - assert_spin_locked(&imp->imp_lock); - /* - * Avoid reading stale imp_connect_data. When not sure if pings are - * expected or not on next connection, we assume they are not and force - * one anyway to guarantee the chance of updating - * imp_peer_committed_transno. - */ - if (imp->imp_state != LUSTRE_IMP_FULL || - OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS)) - imp->imp_force_next_verify = 1; -} - -int ptlrpc_pinger_add_import(struct obd_import *imp) -{ - if (!list_empty(&imp->imp_pinger_chain)) - return -EALREADY; - - mutex_lock(&pinger_mutex); - CDEBUG(D_HA, "adding pingable import %s->%s\n", - imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); - /* if we add to pinger we want recovery on this import */ - imp->imp_obd->obd_no_recov = 0; - ptlrpc_update_next_ping(imp, 0); - /* XXX sort, blah blah */ - list_add_tail(&imp->imp_pinger_chain, &pinger_imports); - class_import_get(imp); - - ptlrpc_pinger_wake_up(); - mutex_unlock(&pinger_mutex); - - return 0; -} -EXPORT_SYMBOL(ptlrpc_pinger_add_import); - -int ptlrpc_pinger_del_import(struct obd_import *imp) -{ - if (list_empty(&imp->imp_pinger_chain)) - return -ENOENT; - - mutex_lock(&pinger_mutex); - list_del_init(&imp->imp_pinger_chain); - CDEBUG(D_HA, "removing pingable import %s->%s\n", - imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); - /* if we remove from pinger we don't want recovery on this import */ - imp->imp_obd->obd_no_recov = 1; - class_import_put(imp); - mutex_unlock(&pinger_mutex); - return 0; -} -EXPORT_SYMBOL(ptlrpc_pinger_del_import); - -/** - * Register a timeout callback to the pinger list, and the callback will - * be called when timeout happens. - */ -static struct timeout_item *ptlrpc_new_timeout(int time, - enum timeout_event event, - timeout_cb_t cb, void *data) -{ - struct timeout_item *ti; - - ti = kzalloc(sizeof(*ti), GFP_NOFS); - if (!ti) - return NULL; - - INIT_LIST_HEAD(&ti->ti_obd_list); - INIT_LIST_HEAD(&ti->ti_chain); - ti->ti_timeout = time; - ti->ti_event = event; - ti->ti_cb = cb; - ti->ti_cb_data = data; - - return ti; -} - -/** - * Register timeout event on the pinger thread. - * Note: the timeout list is an sorted list with increased timeout value. - */ -static struct timeout_item* -ptlrpc_pinger_register_timeout(int time, enum timeout_event event, - timeout_cb_t cb, void *data) -{ - struct timeout_item *item, *tmp; - - LASSERT(mutex_is_locked(&pinger_mutex)); - - list_for_each_entry(item, &timeout_list, ti_chain) - if (item->ti_event == event) - goto out; - - item = ptlrpc_new_timeout(time, event, cb, data); - if (item) { - list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) { - if (tmp->ti_timeout < time) { - list_add(&item->ti_chain, &tmp->ti_chain); - goto out; - } - } - list_add(&item->ti_chain, &timeout_list); - } -out: - return item; -} - -/* Add a client_obd to the timeout event list, when timeout(@time) - * happens, the callback(@cb) will be called. - */ -int ptlrpc_add_timeout_client(int time, enum timeout_event event, - timeout_cb_t cb, void *data, - struct list_head *obd_list) -{ - struct timeout_item *ti; - - mutex_lock(&pinger_mutex); - ti = ptlrpc_pinger_register_timeout(time, event, cb, data); - if (!ti) { - mutex_unlock(&pinger_mutex); - return -EINVAL; - } - list_add(obd_list, &ti->ti_obd_list); - mutex_unlock(&pinger_mutex); - return 0; -} -EXPORT_SYMBOL(ptlrpc_add_timeout_client); - -int ptlrpc_del_timeout_client(struct list_head *obd_list, - enum timeout_event event) -{ - struct timeout_item *ti = NULL, *item; - - if (list_empty(obd_list)) - return 0; - mutex_lock(&pinger_mutex); - list_del_init(obd_list); - /** - * If there are no obd attached to the timeout event - * list, remove this timeout event from the pinger - */ - list_for_each_entry(item, &timeout_list, ti_chain) { - if (item->ti_event == event) { - ti = item; - break; - } - } - if (list_empty(&ti->ti_obd_list)) { - list_del(&ti->ti_chain); - kfree(ti); - } - mutex_unlock(&pinger_mutex); - return 0; -} -EXPORT_SYMBOL(ptlrpc_del_timeout_client); - -static int ptlrpc_pinger_remove_timeouts(void) -{ - struct timeout_item *item, *tmp; - - mutex_lock(&pinger_mutex); - list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) { - LASSERT(list_empty(&item->ti_obd_list)); - list_del(&item->ti_chain); - kfree(item); - } - mutex_unlock(&pinger_mutex); - return 0; -} - -void ptlrpc_pinger_wake_up(void) -{ - mod_delayed_work(pinger_wq, &ping_work, 0); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h deleted file mode 100644 index 134b74234519..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h +++ /dev/null @@ -1,371 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -/* Intramodule declarations for ptlrpc. */ - -#ifndef PTLRPC_INTERNAL_H -#define PTLRPC_INTERNAL_H - -#include "../ldlm/ldlm_internal.h" - -struct ldlm_namespace; -struct obd_import; -struct ldlm_res_id; -struct ptlrpc_request_set; -extern int test_req_buffer_pressure; -extern struct mutex ptlrpc_all_services_mutex; -extern struct list_head ptlrpc_all_services; - -extern struct mutex ptlrpcd_mutex; -extern struct mutex pinger_mutex; - -int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait); -/* ptlrpcd.c */ -int ptlrpcd_start(struct ptlrpcd_ctl *pc); - -/* client.c */ -void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, - unsigned int service_time); -struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, - unsigned int max_brw, - enum ptlrpc_bulk_op_type type, - unsigned int portal, - const struct ptlrpc_bulk_frag_ops *ops); -int ptlrpc_request_cache_init(void); -void ptlrpc_request_cache_fini(void); -struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags); -void ptlrpc_request_cache_free(struct ptlrpc_request *req); -void ptlrpc_init_xid(void); -void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, - struct ptlrpc_request *req); -void ptlrpc_expired_set(struct ptlrpc_request_set *set); -int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set); -void ptlrpc_resend_req(struct ptlrpc_request *request); -void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req); -void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req); -__u64 ptlrpc_known_replied_xid(struct obd_import *imp); -void ptlrpc_add_unreplied(struct ptlrpc_request *req); - -/* events.c */ -int ptlrpc_init_portals(void); -void ptlrpc_exit_portals(void); - -void ptlrpc_request_handle_notconn(struct ptlrpc_request *req); -void lustre_assert_wire_constants(void); -int ptlrpc_import_in_recovery(struct obd_import *imp); -int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt); -int ptlrpc_replay_next(struct obd_import *imp, int *inflight); -void ptlrpc_initiate_recovery(struct obd_import *imp); - -int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset); -int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset); - -int ptlrpc_sysfs_register_service(struct kset *parent, - struct ptlrpc_service *svc); -void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc); - -void ptlrpc_ldebugfs_register_service(struct dentry *debugfs_entry, - struct ptlrpc_service *svc); -void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc); -void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount); - -/* NRS */ - -/** - * NRS core object. - * - * Holds NRS core fields. - */ -struct nrs_core { - /** - * Protects nrs_core::nrs_policies, serializes external policy - * registration/unregistration, and NRS core lprocfs operations. - */ - struct mutex nrs_mutex; - /** - * List of all policy descriptors registered with NRS core; protected - * by nrs_core::nrs_mutex. - */ - struct list_head nrs_policies; - -}; - -extern struct nrs_core nrs_core; - -int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc); -void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc); - -void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req, bool hp); -void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req); -void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req); -void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req, bool hp); - -struct ptlrpc_request * -ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp, - bool peek, bool force); - -static inline struct ptlrpc_request * -ptlrpc_nrs_req_get_nolock(struct ptlrpc_service_part *svcpt, bool hp, - bool force) -{ - return ptlrpc_nrs_req_get_nolock0(svcpt, hp, false, force); -} - -bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp); - -int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc, - enum ptlrpc_nrs_queue_type queue, char *name, - enum ptlrpc_nrs_ctl opc, bool single, void *arg); - -int ptlrpc_nrs_init(void); -void ptlrpc_nrs_fini(void); - -static inline bool nrs_svcpt_has_hp(const struct ptlrpc_service_part *svcpt) -{ - return svcpt->scp_nrs_hp != NULL; -} - -static inline bool nrs_svc_has_hp(const struct ptlrpc_service *svc) -{ - /** - * If the first service partition has an HP NRS head, all service - * partitions will. - */ - return nrs_svcpt_has_hp(svc->srv_parts[0]); -} - -static inline -struct ptlrpc_nrs *nrs_svcpt2nrs(struct ptlrpc_service_part *svcpt, bool hp) -{ - LASSERT(ergo(hp, nrs_svcpt_has_hp(svcpt))); - return hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg; -} - -static inline int nrs_pol2cptid(const struct ptlrpc_nrs_policy *policy) -{ - return policy->pol_nrs->nrs_svcpt->scp_cpt; -} - -static inline -struct ptlrpc_service *nrs_pol2svc(struct ptlrpc_nrs_policy *policy) -{ - return policy->pol_nrs->nrs_svcpt->scp_service; -} - -static inline -struct ptlrpc_service_part *nrs_pol2svcpt(struct ptlrpc_nrs_policy *policy) -{ - return policy->pol_nrs->nrs_svcpt; -} - -static inline -struct cfs_cpt_table *nrs_pol2cptab(struct ptlrpc_nrs_policy *policy) -{ - return nrs_pol2svc(policy)->srv_cptable; -} - -static inline struct ptlrpc_nrs_resource * -nrs_request_resource(struct ptlrpc_nrs_request *nrq) -{ - LASSERT(nrq->nr_initialized); - LASSERT(!nrq->nr_finalized); - - return nrq->nr_res_ptrs[nrq->nr_res_idx]; -} - -static inline -struct ptlrpc_nrs_policy *nrs_request_policy(struct ptlrpc_nrs_request *nrq) -{ - return nrs_request_resource(nrq)->res_policy; -} - -#define NRS_LPROCFS_QUANTUM_NAME_REG "reg_quantum:" -#define NRS_LPROCFS_QUANTUM_NAME_HP "hp_quantum:" - -/** - * the maximum size of nrs_crrn_client::cc_quantum and nrs_orr_data::od_quantum. - */ -#define LPROCFS_NRS_QUANTUM_MAX 65535 - -/** - * Max valid command string is the size of the labels, plus "65535" twice, plus - * a separating space character. - */ -#define LPROCFS_NRS_WR_QUANTUM_MAX_CMD \ - sizeof(NRS_LPROCFS_QUANTUM_NAME_REG __stringify(LPROCFS_NRS_QUANTUM_MAX) " " \ - NRS_LPROCFS_QUANTUM_NAME_HP __stringify(LPROCFS_NRS_QUANTUM_MAX)) - -/* ptlrpc/nrs_fifo.c */ -extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo; - -/* recovd_thread.c */ - -int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink); - -/* pers.c */ -void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc, - int mdcnt); - -/* pack_generic.c */ -struct ptlrpc_reply_state * -lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt); -void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs); - -/* pinger.c */ -int ptlrpc_start_pinger(void); -int ptlrpc_stop_pinger(void); -void ptlrpc_pinger_sending_on_import(struct obd_import *imp); -void ptlrpc_pinger_commit_expected(struct obd_import *imp); -void ptlrpc_pinger_wake_up(void); - -/* sec_null.c */ -int sptlrpc_null_init(void); -void sptlrpc_null_fini(void); - -/* sec_plain.c */ -int sptlrpc_plain_init(void); -void sptlrpc_plain_fini(void); - -/* sec_bulk.c */ -int sptlrpc_enc_pool_init(void); -void sptlrpc_enc_pool_fini(void); -int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v); - -/* sec_lproc.c */ -void sptlrpc_lproc_init(void); -void sptlrpc_lproc_fini(void); - -/* sec_gc.c */ -int sptlrpc_gc_init(void); -void sptlrpc_gc_fini(void); - -/* sec_config.c */ -void sptlrpc_conf_choose_flavor(enum lustre_sec_part from, - enum lustre_sec_part to, - struct obd_uuid *target, - lnet_nid_t nid, - struct sptlrpc_flavor *sf); -int sptlrpc_conf_init(void); -void sptlrpc_conf_fini(void); - -/* sec.c */ -int sptlrpc_init(void); -void sptlrpc_fini(void); - -static inline bool ptlrpc_recoverable_error(int rc) -{ - return (rc == -ENOTCONN || rc == -ENODEV); -} - -static inline int tgt_mod_init(void) -{ - return 0; -} - -static inline void tgt_mod_exit(void) -{ - return; -} - -static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set) -{ - if (atomic_dec_and_test(&set->set_refcount)) - kfree(set); -} - -/** initialise ptlrpc common fields */ -static inline void ptlrpc_req_comm_init(struct ptlrpc_request *req) -{ - spin_lock_init(&req->rq_lock); - atomic_set(&req->rq_refcount, 1); - INIT_LIST_HEAD(&req->rq_list); - INIT_LIST_HEAD(&req->rq_replay_list); -} - -/** initialise client side ptlrpc request */ -static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req) -{ - struct ptlrpc_cli_req *cr = &req->rq_cli; - - ptlrpc_req_comm_init(req); - - req->rq_receiving_reply = 0; - req->rq_req_unlinked = 1; - req->rq_reply_unlinked = 1; - - req->rq_receiving_reply = 0; - req->rq_req_unlinked = 1; - req->rq_reply_unlinked = 1; - - INIT_LIST_HEAD(&cr->cr_set_chain); - INIT_LIST_HEAD(&cr->cr_ctx_chain); - INIT_LIST_HEAD(&cr->cr_unreplied_list); - init_waitqueue_head(&cr->cr_reply_waitq); - init_waitqueue_head(&cr->cr_set_waitq); -} - -/** initialise server side ptlrpc request */ -static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req) -{ - struct ptlrpc_srv_req *sr = &req->rq_srv; - - ptlrpc_req_comm_init(req); - req->rq_srv_req = 1; - INIT_LIST_HEAD(&sr->sr_exp_list); - INIT_LIST_HEAD(&sr->sr_timed_list); - INIT_LIST_HEAD(&sr->sr_hist_list); -} - -static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req) -{ - if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT || - lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT || - lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT) - return true; - else - return false; -} - -static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req) -{ - if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT || - lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT || - lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT) - return true; - else - return false; -} - -#endif /* PTLRPC_INTERNAL_H */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c deleted file mode 100644 index 5c32b657b3b5..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c +++ /dev/null @@ -1,186 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -extern spinlock_t ptlrpc_last_xid_lock; -#if RS_DEBUG -extern spinlock_t ptlrpc_rs_debug_lock; -#endif - -DEFINE_MUTEX(ptlrpc_startup); -static int ptlrpc_active = 0; - -int ptlrpc_inc_ref(void) -{ - int rc = 0; - - mutex_lock(&ptlrpc_startup); - if (ptlrpc_active++ == 0) { - ptlrpc_put_connection_superhack = ptlrpc_connection_put; - - rc = ptlrpc_init_portals(); - if (!rc) { - rc= ptlrpc_start_pinger(); - if (rc) - ptlrpc_exit_portals(); - } - if (rc) - ptlrpc_active--; - } - mutex_unlock(&ptlrpc_startup); - return rc; -} -EXPORT_SYMBOL(ptlrpc_inc_ref); - -void ptlrpc_dec_ref(void) -{ - mutex_lock(&ptlrpc_startup); - if (--ptlrpc_active == 0) { - ptlrpc_stop_pinger(); - ptlrpc_exit_portals(); - } - mutex_unlock(&ptlrpc_startup); -} -EXPORT_SYMBOL(ptlrpc_dec_ref); - -static int __init ptlrpc_init(void) -{ - int rc, cleanup_phase = 0; - - lustre_assert_wire_constants(); -#if RS_DEBUG - spin_lock_init(&ptlrpc_rs_debug_lock); -#endif - mutex_init(&ptlrpc_all_services_mutex); - mutex_init(&pinger_mutex); - mutex_init(&ptlrpcd_mutex); - ptlrpc_init_xid(); - - rc = libcfs_setup(); - if (rc) - return rc; - - rc = req_layout_init(); - if (rc) - return rc; - - rc = ptlrpc_hr_init(); - if (rc) - return rc; - - cleanup_phase = 1; - rc = ptlrpc_request_cache_init(); - if (rc) - goto cleanup; - - cleanup_phase = 3; - - rc = ptlrpc_connection_init(); - if (rc) - goto cleanup; - - cleanup_phase = 5; - rc = ldlm_init(); - if (rc) - goto cleanup; - - cleanup_phase = 6; - rc = sptlrpc_init(); - if (rc) - goto cleanup; - - cleanup_phase = 7; - rc = ptlrpc_nrs_init(); - if (rc) - goto cleanup; - - cleanup_phase = 8; - rc = tgt_mod_init(); - if (rc) - goto cleanup; - return 0; - -cleanup: - switch (cleanup_phase) { - case 8: - ptlrpc_nrs_fini(); - /* Fall through */ - case 7: - sptlrpc_fini(); - /* Fall through */ - case 6: - ldlm_exit(); - /* Fall through */ - case 5: - ptlrpc_connection_fini(); - /* Fall through */ - case 3: - ptlrpc_request_cache_fini(); - /* Fall through */ - case 1: - ptlrpc_hr_fini(); - req_layout_fini(); - /* Fall through */ - default: - ; - } - - return rc; -} - -static void __exit ptlrpc_exit(void) -{ - tgt_mod_exit(); - ptlrpc_nrs_fini(); - sptlrpc_fini(); - ldlm_exit(); - ptlrpc_request_cache_fini(); - ptlrpc_hr_fini(); - ptlrpc_connection_fini(); -} - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Request Processor and Lock Management"); -MODULE_VERSION(LUSTRE_VERSION_STRING); -MODULE_LICENSE("GPL"); - -module_init(ptlrpc_init); -module_exit(ptlrpc_exit); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c deleted file mode 100644 index 531005411edf..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +++ /dev/null @@ -1,914 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/ptlrpcd.c - */ - -/** \defgroup ptlrpcd PortalRPC daemon - * - * ptlrpcd is a special thread with its own set where other user might add - * requests when they don't want to wait for their completion. - * PtlRPCD will take care of sending such requests and then processing their - * replies and calling completion callbacks as necessary. - * The callbacks are called directly from ptlrpcd context. - * It is important to never significantly block (esp. on RPCs!) within such - * completion handler or a deadlock might occur where ptlrpcd enters some - * callback that attempts to send another RPC and wait for it to return, - * during which time ptlrpcd is completely blocked, so e.g. if import - * fails, recovery cannot progress because connection requests are also - * sent by ptlrpcd. - * - * @{ - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include -#include -#include -#include -#include /* for obd_zombie */ -#include /* for OBD_FAIL_CHECK */ -#include /* cl_env_{get,put}() */ -#include - -#include "ptlrpc_internal.h" - -/* One of these per CPT. */ -struct ptlrpcd { - int pd_size; - int pd_index; - int pd_cpt; - int pd_cursor; - int pd_nthreads; - int pd_groupsize; - struct ptlrpcd_ctl pd_threads[0]; -}; - -/* - * max_ptlrpcds is obsolete, but retained to ensure that the kernel - * module will load on a system where it has been tuned. - * A value other than 0 implies it was tuned, in which case the value - * is used to derive a setting for ptlrpcd_per_cpt_max. - */ -static int max_ptlrpcds; -module_param(max_ptlrpcds, int, 0644); -MODULE_PARM_DESC(max_ptlrpcds, - "Max ptlrpcd thread count to be started (obsolete)."); - -/* - * ptlrpcd_bind_policy is obsolete, but retained to ensure that - * the kernel module will load on a system where it has been tuned. - * A value other than 0 implies it was tuned, in which case the value - * is used to derive a setting for ptlrpcd_partner_group_size. - */ -static int ptlrpcd_bind_policy; -module_param(ptlrpcd_bind_policy, int, 0644); -MODULE_PARM_DESC(ptlrpcd_bind_policy, - "Ptlrpcd threads binding mode (obsolete)."); - -/* - * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run - * in a CPT. - */ -static int ptlrpcd_per_cpt_max; -module_param(ptlrpcd_per_cpt_max, int, 0644); -MODULE_PARM_DESC(ptlrpcd_per_cpt_max, - "Max ptlrpcd thread count to be started per CPT."); - -/* - * ptlrpcd_partner_group_size: The desired number of threads in each - * ptlrpcd partner thread group. Default is 2, corresponding to the - * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in - * a CPT partners of each other. - */ -static int ptlrpcd_partner_group_size; -module_param(ptlrpcd_partner_group_size, int, 0644); -MODULE_PARM_DESC(ptlrpcd_partner_group_size, - "Number of ptlrpcd threads in a partner group."); - -/* - * ptlrpcd_cpts: A CPT string describing the CPU partitions that - * ptlrpcd threads should run on. Used to make ptlrpcd threads run on - * a subset of all CPTs. - * - * ptlrpcd_cpts=2 - * ptlrpcd_cpts=[2] - * run ptlrpcd threads only on CPT 2. - * - * ptlrpcd_cpts=0-3 - * ptlrpcd_cpts=[0-3] - * run ptlrpcd threads on CPTs 0, 1, 2, and 3. - * - * ptlrpcd_cpts=[0-3,5,7] - * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7. - */ -static char *ptlrpcd_cpts; -module_param(ptlrpcd_cpts, charp, 0644); -MODULE_PARM_DESC(ptlrpcd_cpts, - "CPU partitions ptlrpcd threads should run in"); - -/* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */ -static int *ptlrpcds_cpt_idx; - -/* ptlrpcds_num is the number of entries in the ptlrpcds array. */ -static int ptlrpcds_num; -static struct ptlrpcd **ptlrpcds; - -/* - * In addition to the regular thread pool above, there is a single - * global recovery thread. Recovery isn't critical for performance, - * and doesn't block, but must always be able to proceed, and it is - * possible that all normal ptlrpcd threads are blocked. Hence the - * need for a dedicated thread. - */ -static struct ptlrpcd_ctl ptlrpcd_rcv; - -struct mutex ptlrpcd_mutex; -static int ptlrpcd_users; - -void ptlrpcd_wake(struct ptlrpc_request *req) -{ - struct ptlrpc_request_set *set = req->rq_set; - - wake_up(&set->set_waitq); -} -EXPORT_SYMBOL(ptlrpcd_wake); - -static struct ptlrpcd_ctl * -ptlrpcd_select_pc(struct ptlrpc_request *req) -{ - struct ptlrpcd *pd; - int cpt; - int idx; - - if (req && req->rq_send_state != LUSTRE_IMP_FULL) - return &ptlrpcd_rcv; - - cpt = cfs_cpt_current(cfs_cpt_tab, 1); - if (!ptlrpcds_cpt_idx) - idx = cpt; - else - idx = ptlrpcds_cpt_idx[cpt]; - pd = ptlrpcds[idx]; - - /* We do not care whether it is strict load balance. */ - idx = pd->pd_cursor; - if (++idx == pd->pd_nthreads) - idx = 0; - pd->pd_cursor = idx; - - return &pd->pd_threads[idx]; -} - -/** - * Return transferred RPCs count. - */ -static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des, - struct ptlrpc_request_set *src) -{ - struct ptlrpc_request *req, *tmp; - int rc = 0; - - spin_lock(&src->set_new_req_lock); - if (likely(!list_empty(&src->set_new_requests))) { - list_for_each_entry_safe(req, tmp, &src->set_new_requests, rq_set_chain) - req->rq_set = des; - - list_splice_init(&src->set_new_requests, &des->set_requests); - rc = atomic_read(&src->set_new_count); - atomic_add(rc, &des->set_remaining); - atomic_set(&src->set_new_count, 0); - } - spin_unlock(&src->set_new_req_lock); - return rc; -} - -/** - * Requests that are added to the ptlrpcd queue are sent via - * ptlrpcd_check->ptlrpc_check_set(). - */ -void ptlrpcd_add_req(struct ptlrpc_request *req) -{ - struct ptlrpcd_ctl *pc; - - if (req->rq_reqmsg) - lustre_msg_set_jobid(req->rq_reqmsg, NULL); - - spin_lock(&req->rq_lock); - if (req->rq_invalid_rqset) { - req->rq_invalid_rqset = 0; - spin_unlock(&req->rq_lock); - if (wait_event_idle_timeout(req->rq_set_waitq, - !req->rq_set, - 5 * HZ) == 0) - wait_event_idle(req->rq_set_waitq, - !req->rq_set); - } else if (req->rq_set) { - /* If we have a valid "rq_set", just reuse it to avoid double - * linked. - */ - LASSERT(req->rq_phase == RQ_PHASE_NEW); - LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY); - - /* ptlrpc_check_set will decrease the count */ - atomic_inc(&req->rq_set->set_remaining); - spin_unlock(&req->rq_lock); - wake_up(&req->rq_set->set_waitq); - return; - } else { - spin_unlock(&req->rq_lock); - } - - pc = ptlrpcd_select_pc(req); - - DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]", - req, pc->pc_name, pc->pc_index); - - ptlrpc_set_add_new_req(pc, req); -} -EXPORT_SYMBOL(ptlrpcd_add_req); - -static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set) -{ - atomic_inc(&set->set_refcount); -} - -/** - * Check if there is more work to do on ptlrpcd set. - * Returns 1 if yes. - */ -static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) -{ - struct ptlrpc_request *req, *tmp; - struct ptlrpc_request_set *set = pc->pc_set; - int rc = 0; - int rc2; - - if (atomic_read(&set->set_new_count)) { - spin_lock(&set->set_new_req_lock); - if (likely(!list_empty(&set->set_new_requests))) { - list_splice_init(&set->set_new_requests, - &set->set_requests); - atomic_add(atomic_read(&set->set_new_count), - &set->set_remaining); - atomic_set(&set->set_new_count, 0); - /* - * Need to calculate its timeout. - */ - rc = 1; - } - spin_unlock(&set->set_new_req_lock); - } - - /* We should call lu_env_refill() before handling new requests to make - * sure that env key the requests depending on really exists. - */ - rc2 = lu_env_refill(env); - if (rc2 != 0) { - /* - * XXX This is very awkward situation, because - * execution can neither continue (request - * interpreters assume that env is set up), nor repeat - * the loop (as this potentially results in a tight - * loop of -ENOMEM's). - * - * Fortunately, refill only ever does something when - * new modules are loaded, i.e., early during boot up. - */ - CERROR("Failure to refill session: %d\n", rc2); - return rc; - } - - if (atomic_read(&set->set_remaining)) - rc |= ptlrpc_check_set(env, set); - - /* NB: ptlrpc_check_set has already moved completed request at the - * head of seq::set_requests - */ - list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) { - if (req->rq_phase != RQ_PHASE_COMPLETE) - break; - - list_del_init(&req->rq_set_chain); - req->rq_set = NULL; - ptlrpc_req_finished(req); - } - - if (rc == 0) { - /* - * If new requests have been added, make sure to wake up. - */ - rc = atomic_read(&set->set_new_count); - - /* If we have nothing to do, check whether we can take some - * work from our partner threads. - */ - if (rc == 0 && pc->pc_npartners > 0) { - struct ptlrpcd_ctl *partner; - struct ptlrpc_request_set *ps; - int first = pc->pc_cursor; - - do { - partner = pc->pc_partners[pc->pc_cursor++]; - if (pc->pc_cursor >= pc->pc_npartners) - pc->pc_cursor = 0; - if (!partner) - continue; - - spin_lock(&partner->pc_lock); - ps = partner->pc_set; - if (!ps) { - spin_unlock(&partner->pc_lock); - continue; - } - - ptlrpc_reqset_get(ps); - spin_unlock(&partner->pc_lock); - - if (atomic_read(&ps->set_new_count)) { - rc = ptlrpcd_steal_rqset(set, ps); - if (rc > 0) - CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n", - rc, partner->pc_index, - pc->pc_index); - } - ptlrpc_reqset_put(ps); - } while (rc == 0 && pc->pc_cursor != first); - } - } - - return rc; -} - -/** - * Main ptlrpcd thread. - * ptlrpc's code paths like to execute in process context, so we have this - * thread which spins on a set which contains the rpcs and sends them. - * - */ -static int ptlrpcd(void *arg) -{ - struct ptlrpcd_ctl *pc = arg; - struct ptlrpc_request_set *set; - struct lu_context ses = { 0 }; - struct lu_env env = { .le_ses = &ses }; - int rc = 0; - int exit = 0; - - unshare_fs_struct(); - if (cfs_cpt_bind(cfs_cpt_tab, pc->pc_cpt) != 0) - CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt); - - /* - * Allocate the request set after the thread has been bound - * above. This is safe because no requests will be queued - * until all ptlrpcd threads have confirmed that they have - * successfully started. - */ - set = ptlrpc_prep_set(); - if (!set) { - rc = -ENOMEM; - goto failed; - } - spin_lock(&pc->pc_lock); - pc->pc_set = set; - spin_unlock(&pc->pc_lock); - /* - * XXX So far only "client" ptlrpcd uses an environment. In - * the future, ptlrpcd thread (or a thread-set) has to given - * an argument, describing its "scope". - */ - rc = lu_context_init(&env.le_ctx, - LCT_CL_THREAD | LCT_REMEMBER | LCT_NOREF); - if (rc == 0) { - rc = lu_context_init(env.le_ses, - LCT_SESSION | LCT_REMEMBER | LCT_NOREF); - if (rc != 0) - lu_context_fini(&env.le_ctx); - } - - if (rc != 0) - goto failed; - - complete(&pc->pc_starting); - - /* - * This mainloop strongly resembles ptlrpc_set_wait() except that our - * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when - * there are requests in the set. New requests come in on the set's - * new_req_list and ptlrpcd_check() moves them into the set. - */ - do { - int timeout; - - timeout = ptlrpc_set_next_timeout(set); - - lu_context_enter(&env.le_ctx); - lu_context_enter(env.le_ses); - if (wait_event_idle_timeout(set->set_waitq, - ptlrpcd_check(&env, pc), - (timeout ? timeout : 1) * HZ) == 0) - ptlrpc_expired_set(set); - - lu_context_exit(&env.le_ctx); - lu_context_exit(env.le_ses); - - /* - * Abort inflight rpcs for forced stop case. - */ - if (test_bit(LIOD_STOP, &pc->pc_flags)) { - if (test_bit(LIOD_FORCE, &pc->pc_flags)) - ptlrpc_abort_set(set); - exit++; - } - - /* - * Let's make one more loop to make sure that ptlrpcd_check() - * copied all raced new rpcs into the set so we can kill them. - */ - } while (exit < 2); - - /* - * Wait for inflight requests to drain. - */ - if (!list_empty(&set->set_requests)) - ptlrpc_set_wait(set); - lu_context_fini(&env.le_ctx); - lu_context_fini(env.le_ses); - - complete(&pc->pc_finishing); - - return 0; -failed: - pc->pc_error = rc; - complete(&pc->pc_starting); - return rc; -} - -static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt) -{ - pc->pc_index = index; - pc->pc_cpt = cpt; - init_completion(&pc->pc_starting); - init_completion(&pc->pc_finishing); - spin_lock_init(&pc->pc_lock); - - if (index < 0) { - /* Recovery thread. */ - snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv"); - } else { - /* Regular thread. */ - snprintf(pc->pc_name, sizeof(pc->pc_name), - "ptlrpcd_%02d_%02d", cpt, index); - } -} - -/* XXX: We want multiple CPU cores to share the async RPC load. So we - * start many ptlrpcd threads. We also want to reduce the ptlrpcd - * overhead caused by data transfer cross-CPU cores. So we bind - * all ptlrpcd threads to a CPT, in the expectation that CPTs - * will be defined in a way that matches these boundaries. Within - * a CPT a ptlrpcd thread can be scheduled on any available core. - * - * Each ptlrpcd thread has its own request queue. This can cause - * response delay if the thread is already busy. To help with - * this we define partner threads: these are other threads bound - * to the same CPT which will check for work in each other's - * request queues if they have no work to do. - * - * The desired number of partner threads can be tuned by setting - * ptlrpcd_partner_group_size. The default is to create pairs of - * partner threads. - */ -static int ptlrpcd_partners(struct ptlrpcd *pd, int index) -{ - struct ptlrpcd_ctl *pc; - struct ptlrpcd_ctl **ppc; - int first; - int i; - int rc = 0; - int size; - - LASSERT(index >= 0 && index < pd->pd_nthreads); - pc = &pd->pd_threads[index]; - pc->pc_npartners = pd->pd_groupsize - 1; - - if (pc->pc_npartners <= 0) - goto out; - - size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners; - pc->pc_partners = kzalloc_node(size, GFP_NOFS, - cfs_cpt_spread_node(cfs_cpt_tab, - pc->pc_cpt)); - if (!pc->pc_partners) { - pc->pc_npartners = 0; - rc = -ENOMEM; - goto out; - } - - first = index - index % pd->pd_groupsize; - ppc = pc->pc_partners; - for (i = first; i < first + pd->pd_groupsize; i++) { - if (i != index) - *ppc++ = &pd->pd_threads[i]; - } -out: - return rc; -} - -int ptlrpcd_start(struct ptlrpcd_ctl *pc) -{ - struct task_struct *task; - int rc = 0; - - /* - * Do not allow start second thread for one pc. - */ - if (test_and_set_bit(LIOD_START, &pc->pc_flags)) { - CWARN("Starting second thread (%s) for same pc %p\n", - pc->pc_name, pc); - return 0; - } - - task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - goto out_set; - } - - wait_for_completion(&pc->pc_starting); - rc = pc->pc_error; - if (rc != 0) - goto out_set; - - return 0; - -out_set: - if (pc->pc_set) { - struct ptlrpc_request_set *set = pc->pc_set; - - spin_lock(&pc->pc_lock); - pc->pc_set = NULL; - spin_unlock(&pc->pc_lock); - ptlrpc_set_destroy(set); - } - clear_bit(LIOD_START, &pc->pc_flags); - return rc; -} - -void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force) -{ - if (!test_bit(LIOD_START, &pc->pc_flags)) { - CWARN("Thread for pc %p was not started\n", pc); - return; - } - - set_bit(LIOD_STOP, &pc->pc_flags); - if (force) - set_bit(LIOD_FORCE, &pc->pc_flags); - wake_up(&pc->pc_set->set_waitq); -} - -void ptlrpcd_free(struct ptlrpcd_ctl *pc) -{ - struct ptlrpc_request_set *set = pc->pc_set; - - if (!test_bit(LIOD_START, &pc->pc_flags)) { - CWARN("Thread for pc %p was not started\n", pc); - goto out; - } - - wait_for_completion(&pc->pc_finishing); - - spin_lock(&pc->pc_lock); - pc->pc_set = NULL; - spin_unlock(&pc->pc_lock); - ptlrpc_set_destroy(set); - - clear_bit(LIOD_START, &pc->pc_flags); - clear_bit(LIOD_STOP, &pc->pc_flags); - clear_bit(LIOD_FORCE, &pc->pc_flags); - -out: - if (pc->pc_npartners > 0) { - LASSERT(pc->pc_partners); - - kfree(pc->pc_partners); - pc->pc_partners = NULL; - } - pc->pc_npartners = 0; - pc->pc_error = 0; -} - -static void ptlrpcd_fini(void) -{ - int i; - int j; - - if (ptlrpcds) { - for (i = 0; i < ptlrpcds_num; i++) { - if (!ptlrpcds[i]) - break; - for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++) - ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0); - for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++) - ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]); - kfree(ptlrpcds[i]); - ptlrpcds[i] = NULL; - } - kfree(ptlrpcds); - } - ptlrpcds_num = 0; - - ptlrpcd_stop(&ptlrpcd_rcv, 0); - ptlrpcd_free(&ptlrpcd_rcv); - - kfree(ptlrpcds_cpt_idx); - ptlrpcds_cpt_idx = NULL; -} - -static int ptlrpcd_init(void) -{ - int nthreads; - int groupsize; - int size; - int i; - int j; - int rc = 0; - struct cfs_cpt_table *cptable; - __u32 *cpts = NULL; - int ncpts; - int cpt; - struct ptlrpcd *pd; - - /* - * Determine the CPTs that ptlrpcd threads will run on. - */ - cptable = cfs_cpt_tab; - ncpts = cfs_cpt_number(cptable); - if (ptlrpcd_cpts) { - struct cfs_expr_list *el; - - size = ncpts * sizeof(ptlrpcds_cpt_idx[0]); - ptlrpcds_cpt_idx = kzalloc(size, GFP_KERNEL); - if (!ptlrpcds_cpt_idx) { - rc = -ENOMEM; - goto out; - } - - rc = cfs_expr_list_parse(ptlrpcd_cpts, - strlen(ptlrpcd_cpts), - 0, ncpts - 1, &el); - - if (rc != 0) { - CERROR("ptlrpcd_cpts: invalid CPT pattern string: %s", - ptlrpcd_cpts); - rc = -EINVAL; - goto out; - } - - rc = cfs_expr_list_values(el, ncpts, &cpts); - cfs_expr_list_free(el); - if (rc <= 0) { - CERROR("ptlrpcd_cpts: failed to parse CPT array %s: %d\n", - ptlrpcd_cpts, rc); - if (rc == 0) - rc = -EINVAL; - goto out; - } - - /* - * Create the cpt-to-index map. When there is no match - * in the cpt table, pick a cpt at random. This could - * be changed to take the topology of the system into - * account. - */ - for (cpt = 0; cpt < ncpts; cpt++) { - for (i = 0; i < rc; i++) - if (cpts[i] == cpt) - break; - if (i >= rc) - i = cpt % rc; - ptlrpcds_cpt_idx[cpt] = i; - } - - cfs_expr_list_values_free(cpts, rc); - ncpts = rc; - } - ptlrpcds_num = ncpts; - - size = ncpts * sizeof(ptlrpcds[0]); - ptlrpcds = kzalloc(size, GFP_KERNEL); - if (!ptlrpcds) { - rc = -ENOMEM; - goto out; - } - - /* - * The max_ptlrpcds parameter is obsolete, but do something - * sane if it has been tuned, and complain if - * ptlrpcd_per_cpt_max has also been tuned. - */ - if (max_ptlrpcds != 0) { - CWARN("max_ptlrpcds is obsolete.\n"); - if (ptlrpcd_per_cpt_max == 0) { - ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts; - /* Round up if there is a remainder. */ - if (max_ptlrpcds % ncpts != 0) - ptlrpcd_per_cpt_max++; - CWARN("Setting ptlrpcd_per_cpt_max = %d\n", - ptlrpcd_per_cpt_max); - } else { - CWARN("ptlrpd_per_cpt_max is also set!\n"); - } - } - - /* - * The ptlrpcd_bind_policy parameter is obsolete, but do - * something sane if it has been tuned, and complain if - * ptlrpcd_partner_group_size is also tuned. - */ - if (ptlrpcd_bind_policy != 0) { - CWARN("ptlrpcd_bind_policy is obsolete.\n"); - if (ptlrpcd_partner_group_size == 0) { - switch (ptlrpcd_bind_policy) { - case 1: /* PDB_POLICY_NONE */ - case 2: /* PDB_POLICY_FULL */ - ptlrpcd_partner_group_size = 1; - break; - case 3: /* PDB_POLICY_PAIR */ - ptlrpcd_partner_group_size = 2; - break; - case 4: /* PDB_POLICY_NEIGHBOR */ -#ifdef CONFIG_NUMA - ptlrpcd_partner_group_size = -1; /* CPT */ -#else - ptlrpcd_partner_group_size = 3; /* Triplets */ -#endif - break; - default: /* Illegal value, use the default. */ - ptlrpcd_partner_group_size = 2; - break; - } - CWARN("Setting ptlrpcd_partner_group_size = %d\n", - ptlrpcd_partner_group_size); - } else { - CWARN("ptlrpcd_partner_group_size is also set!\n"); - } - } - - if (ptlrpcd_partner_group_size == 0) - ptlrpcd_partner_group_size = 2; - else if (ptlrpcd_partner_group_size < 0) - ptlrpcd_partner_group_size = -1; - else if (ptlrpcd_per_cpt_max > 0 && - ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max) - ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max; - - /* - * Start the recovery thread first. - */ - set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags); - ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY); - rc = ptlrpcd_start(&ptlrpcd_rcv); - if (rc < 0) - goto out; - - for (i = 0; i < ncpts; i++) { - if (!cpts) - cpt = i; - else - cpt = cpts[i]; - - nthreads = cfs_cpt_weight(cptable, cpt); - if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads) - nthreads = ptlrpcd_per_cpt_max; - if (nthreads < 2) - nthreads = 2; - - if (ptlrpcd_partner_group_size <= 0) { - groupsize = nthreads; - } else if (nthreads <= ptlrpcd_partner_group_size) { - groupsize = nthreads; - } else { - groupsize = ptlrpcd_partner_group_size; - if (nthreads % groupsize != 0) - nthreads += groupsize - (nthreads % groupsize); - } - - size = offsetof(struct ptlrpcd, pd_threads[nthreads]); - pd = kzalloc_node(size, GFP_NOFS, - cfs_cpt_spread_node(cfs_cpt_tab, cpt)); - if (!pd) { - rc = -ENOMEM; - goto out; - } - pd->pd_size = size; - pd->pd_index = i; - pd->pd_cpt = cpt; - pd->pd_cursor = 0; - pd->pd_nthreads = nthreads; - pd->pd_groupsize = groupsize; - ptlrpcds[i] = pd; - - /* - * The ptlrpcd threads in a partner group can access - * each other's struct ptlrpcd_ctl, so these must be - * initialized before any thread is started. - */ - for (j = 0; j < nthreads; j++) { - ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt); - rc = ptlrpcd_partners(pd, j); - if (rc < 0) - goto out; - } - - /* XXX: We start nthreads ptlrpc daemons. - * Each of them can process any non-recovery - * async RPC to improve overall async RPC - * efficiency. - * - * But there are some issues with async I/O RPCs - * and async non-I/O RPCs processed in the same - * set under some cases. The ptlrpcd may be - * blocked by some async I/O RPC(s), then will - * cause other async non-I/O RPC(s) can not be - * processed in time. - * - * Maybe we should distinguish blocked async RPCs - * from non-blocked async RPCs, and process them - * in different ptlrpcd sets to avoid unnecessary - * dependency. But how to distribute async RPCs - * load among all the ptlrpc daemons becomes - * another trouble. - */ - for (j = 0; j < nthreads; j++) { - rc = ptlrpcd_start(&pd->pd_threads[j]); - if (rc < 0) - goto out; - } - } -out: - if (rc != 0) - ptlrpcd_fini(); - - return rc; -} - -int ptlrpcd_addref(void) -{ - int rc = 0; - - mutex_lock(&ptlrpcd_mutex); - if (++ptlrpcd_users == 1) { - rc = ptlrpcd_init(); - if (rc < 0) - ptlrpcd_users--; - } - mutex_unlock(&ptlrpcd_mutex); - return rc; -} -EXPORT_SYMBOL(ptlrpcd_addref); - -void ptlrpcd_decref(void) -{ - mutex_lock(&ptlrpcd_mutex); - if (--ptlrpcd_users == 0) - ptlrpcd_fini(); - mutex_unlock(&ptlrpcd_mutex); -} -EXPORT_SYMBOL(ptlrpcd_decref); -/** @} ptlrpcd */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c deleted file mode 100644 index 2ea0a7ff87dd..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/recover.c +++ /dev/null @@ -1,374 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/recover.c - * - * Author: Mike Shaver - */ - -#define DEBUG_SUBSYSTEM S_RPC -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -/** - * Start recovery on disconnected import. - * This is done by just attempting a connect - */ -void ptlrpc_initiate_recovery(struct obd_import *imp) -{ - CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd)); - ptlrpc_connect_import(imp); -} - -/** - * Identify what request from replay list needs to be replayed next - * (based on what we have already replayed) and send it to server. - */ -int ptlrpc_replay_next(struct obd_import *imp, int *inflight) -{ - int rc = 0; - struct ptlrpc_request *req = NULL, *pos; - __u64 last_transno; - - *inflight = 0; - - /* It might have committed some after we last spoke, so make sure we - * get rid of them now. - */ - spin_lock(&imp->imp_lock); - imp->imp_last_transno_checked = 0; - ptlrpc_free_committed(imp); - last_transno = imp->imp_last_replay_transno; - - CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n", - imp, obd2cli_tgt(imp->imp_obd), - imp->imp_peer_committed_transno, last_transno); - - /* Replay all the committed open requests on committed_list first */ - if (!list_empty(&imp->imp_committed_list)) { - req = list_last_entry(&imp->imp_committed_list, - struct ptlrpc_request, rq_replay_list); - - /* The last request on committed_list hasn't been replayed */ - if (req->rq_transno > last_transno) { - if (!imp->imp_resend_replay || - imp->imp_replay_cursor == &imp->imp_committed_list) - imp->imp_replay_cursor = imp->imp_replay_cursor->next; - - while (imp->imp_replay_cursor != - &imp->imp_committed_list) { - req = list_entry(imp->imp_replay_cursor, - struct ptlrpc_request, - rq_replay_list); - if (req->rq_transno > last_transno) - break; - - req = NULL; - LASSERT(!list_empty(imp->imp_replay_cursor)); - imp->imp_replay_cursor = - imp->imp_replay_cursor->next; - } - } else { - /* All requests on committed_list have been replayed */ - imp->imp_replay_cursor = &imp->imp_committed_list; - req = NULL; - } - } - - /* All the requests in committed list have been replayed, let's replay - * the imp_replay_list - */ - if (!req) { - struct ptlrpc_request *tmp; - list_for_each_entry_safe(tmp, pos, &imp->imp_replay_list, - rq_replay_list) { - if (tmp->rq_transno > last_transno) { - req = tmp; - break; - } - } - } - - /* If need to resend the last sent transno (because a reconnect - * has occurred), then stop on the matching req and send it again. - * If, however, the last sent transno has been committed then we - * continue replay from the next request. - */ - if (req && imp->imp_resend_replay) - lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); - - /* The resend replay request may have been removed from the - * unreplied list. - */ - if (req && imp->imp_resend_replay && - list_empty(&req->rq_unreplied_list)) { - ptlrpc_add_unreplied(req); - imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp); - } - - imp->imp_resend_replay = 0; - spin_unlock(&imp->imp_lock); - - if (req) { - /* The request should have been added back in unreplied list - * by ptlrpc_prepare_replay(). - */ - LASSERT(!list_empty(&req->rq_unreplied_list)); - - rc = ptlrpc_replay_req(req); - if (rc) { - CERROR("recovery replay error %d for req %llu\n", - rc, req->rq_xid); - return rc; - } - *inflight = 1; - } - return rc; -} - -/** - * Schedule resending of request on sending_list. This is done after - * we completed replaying of requests and locks. - */ -int ptlrpc_resend(struct obd_import *imp) -{ - struct ptlrpc_request *req, *next; - - /* As long as we're in recovery, nothing should be added to the sending - * list, so we don't need to hold the lock during this iteration and - * resend process. - */ - /* Well... what if lctl recover is called twice at the same time? - */ - spin_lock(&imp->imp_lock); - if (imp->imp_state != LUSTRE_IMP_RECOVER) { - spin_unlock(&imp->imp_lock); - return -1; - } - - list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { - LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, - "req %p bad\n", req); - LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); - - /* - * If the request is allowed to be sent during replay and it - * is not timeout yet, then it does not need to be resent. - */ - if (!ptlrpc_no_resend(req) && - (req->rq_timedout || !req->rq_allow_replay)) - ptlrpc_resend_req(req); - } - spin_unlock(&imp->imp_lock); - - OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT, 2); - return 0; -} - -/** - * Go through all requests in delayed list and wake their threads - * for resending - */ -void ptlrpc_wake_delayed(struct obd_import *imp) -{ - struct ptlrpc_request *req, *pos; - - spin_lock(&imp->imp_lock); - list_for_each_entry_safe(req, pos, &imp->imp_delayed_list, rq_list) { - DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set); - ptlrpc_client_wake_req(req); - } - spin_unlock(&imp->imp_lock); -} - -void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) -{ - struct obd_import *imp = failed_req->rq_import; - - CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n", - imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - - if (ptlrpc_set_import_discon(imp, - lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) { - if (!imp->imp_replayable) { - CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_obd->obd_name); - ptlrpc_deactivate_import(imp); - } - /* to control recovery via lctl {disable|enable}_recovery */ - if (imp->imp_deactive == 0) - ptlrpc_connect_import(imp); - } - - /* Wait for recovery to complete and resend. If evicted, then - * this request will be errored out later. - */ - spin_lock(&failed_req->rq_lock); - if (!failed_req->rq_no_resend) - failed_req->rq_resend = 1; - spin_unlock(&failed_req->rq_lock); -} - -/** - * Administratively active/deactive a client. - * This should only be called by the ioctl interface, currently - * - the lctl deactivate and activate commands - * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active - * - client umount -f (ll_umount_begin) - */ -int ptlrpc_set_import_active(struct obd_import *imp, int active) -{ - struct obd_device *obd = imp->imp_obd; - int rc = 0; - - LASSERT(obd); - - /* When deactivating, mark import invalid, and abort in-flight - * requests. - */ - if (!active) { - LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n", - obd2cli_tgt(imp->imp_obd)); - - /* set before invalidate to avoid messages about imp_inval - * set without imp_deactive in ptlrpc_import_delay_req - */ - spin_lock(&imp->imp_lock); - imp->imp_deactive = 1; - spin_unlock(&imp->imp_lock); - - obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE); - - ptlrpc_invalidate_import(imp); - } - - /* When activating, mark import valid, and attempt recovery */ - if (active) { - CDEBUG(D_HA, "setting import %s VALID\n", - obd2cli_tgt(imp->imp_obd)); - - spin_lock(&imp->imp_lock); - imp->imp_deactive = 0; - spin_unlock(&imp->imp_lock); - obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE); - - rc = ptlrpc_recover_import(imp, NULL, 0); - } - - return rc; -} -EXPORT_SYMBOL(ptlrpc_set_import_active); - -/* Attempt to reconnect an import */ -int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async) -{ - int rc = 0; - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive || - atomic_read(&imp->imp_inval_count)) - rc = -EINVAL; - spin_unlock(&imp->imp_lock); - if (rc) - goto out; - - /* force import to be disconnected. */ - ptlrpc_set_import_discon(imp, 0); - - if (new_uuid) { - struct obd_uuid uuid; - - /* intruct import to use new uuid */ - obd_str2uuid(&uuid, new_uuid); - rc = import_set_conn_priority(imp, &uuid); - if (rc) - goto out; - } - - /* Check if reconnect is already in progress */ - spin_lock(&imp->imp_lock); - if (imp->imp_state != LUSTRE_IMP_DISCON) { - imp->imp_force_verify = 1; - rc = -EALREADY; - } - spin_unlock(&imp->imp_lock); - if (rc) - goto out; - - rc = ptlrpc_connect_import(imp); - if (rc) - goto out; - - if (!async) { - CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n", - obd2cli_tgt(imp->imp_obd), obd_timeout); - - rc = wait_event_idle_timeout(imp->imp_recovery_waitq, - !ptlrpc_import_in_recovery(imp), - obd_timeout * HZ); - CDEBUG(D_HA, "%s: recovery finished\n", - obd2cli_tgt(imp->imp_obd)); - rc = rc ? 0 : -ETIMEDOUT; - } - -out: - return rc; -} -EXPORT_SYMBOL(ptlrpc_recover_import); - -int ptlrpc_import_in_recovery(struct obd_import *imp) -{ - int in_recovery = 1; - - spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_FULL || - imp->imp_state == LUSTRE_IMP_CLOSED || - imp->imp_state == LUSTRE_IMP_DISCON || - imp->imp_obd->obd_no_recov) - in_recovery = 0; - spin_unlock(&imp->imp_lock); - - return in_recovery; -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c deleted file mode 100644 index e193f3346e6f..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ /dev/null @@ -1,2379 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/sec.c - * - * Author: Eric Mei - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -/*********************************************** - * policy registers * - ***********************************************/ - -static rwlock_t policy_lock; -static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = { - NULL, -}; - -int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy) -{ - __u16 number = policy->sp_policy; - - LASSERT(policy->sp_name); - LASSERT(policy->sp_cops); - LASSERT(policy->sp_sops); - - if (number >= SPTLRPC_POLICY_MAX) - return -EINVAL; - - write_lock(&policy_lock); - if (unlikely(policies[number])) { - write_unlock(&policy_lock); - return -EALREADY; - } - policies[number] = policy; - write_unlock(&policy_lock); - - CDEBUG(D_SEC, "%s: registered\n", policy->sp_name); - return 0; -} -EXPORT_SYMBOL(sptlrpc_register_policy); - -int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy) -{ - __u16 number = policy->sp_policy; - - LASSERT(number < SPTLRPC_POLICY_MAX); - - write_lock(&policy_lock); - if (unlikely(!policies[number])) { - write_unlock(&policy_lock); - CERROR("%s: already unregistered\n", policy->sp_name); - return -EINVAL; - } - - LASSERT(policies[number] == policy); - policies[number] = NULL; - write_unlock(&policy_lock); - - CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name); - return 0; -} -EXPORT_SYMBOL(sptlrpc_unregister_policy); - -static -struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor) -{ - static DEFINE_MUTEX(load_mutex); - static atomic_t loaded = ATOMIC_INIT(0); - struct ptlrpc_sec_policy *policy; - __u16 number = SPTLRPC_FLVR_POLICY(flavor); - __u16 flag = 0; - - if (number >= SPTLRPC_POLICY_MAX) - return NULL; - - while (1) { - read_lock(&policy_lock); - policy = policies[number]; - if (policy && !try_module_get(policy->sp_owner)) - policy = NULL; - if (!policy) - flag = atomic_read(&loaded); - read_unlock(&policy_lock); - - if (policy || flag != 0 || - number != SPTLRPC_POLICY_GSS) - break; - - /* try to load gss module, once */ - mutex_lock(&load_mutex); - if (atomic_read(&loaded) == 0) { - if (request_module("ptlrpc_gss") == 0) - CDEBUG(D_SEC, - "module ptlrpc_gss loaded on demand\n"); - else - CERROR("Unable to load module ptlrpc_gss\n"); - - atomic_set(&loaded, 1); - } - mutex_unlock(&load_mutex); - } - - return policy; -} - -__u32 sptlrpc_name2flavor_base(const char *name) -{ - if (!strcmp(name, "null")) - return SPTLRPC_FLVR_NULL; - if (!strcmp(name, "plain")) - return SPTLRPC_FLVR_PLAIN; - if (!strcmp(name, "krb5n")) - return SPTLRPC_FLVR_KRB5N; - if (!strcmp(name, "krb5a")) - return SPTLRPC_FLVR_KRB5A; - if (!strcmp(name, "krb5i")) - return SPTLRPC_FLVR_KRB5I; - if (!strcmp(name, "krb5p")) - return SPTLRPC_FLVR_KRB5P; - - return SPTLRPC_FLVR_INVALID; -} -EXPORT_SYMBOL(sptlrpc_name2flavor_base); - -const char *sptlrpc_flavor2name_base(__u32 flvr) -{ - __u32 base = SPTLRPC_FLVR_BASE(flvr); - - if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) - return "null"; - else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN)) - return "plain"; - else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N)) - return "krb5n"; - else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A)) - return "krb5a"; - else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I)) - return "krb5i"; - else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P)) - return "krb5p"; - - CERROR("invalid wire flavor 0x%x\n", flvr); - return "invalid"; -} -EXPORT_SYMBOL(sptlrpc_flavor2name_base); - -char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf, - char *buf, int bufsize) -{ - if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) - snprintf(buf, bufsize, "hash:%s", - sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg)); - else - snprintf(buf, bufsize, "%s", - sptlrpc_flavor2name_base(sf->sf_rpc)); - - buf[bufsize - 1] = '\0'; - return buf; -} -EXPORT_SYMBOL(sptlrpc_flavor2name_bulk); - -char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize) -{ - strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize); - - /* - * currently we don't support customized bulk specification for - * flavors other than plain - */ - if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) { - char bspec[16]; - - bspec[0] = '-'; - sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1); - strlcat(buf, bspec, bufsize); - } - - return buf; -} -EXPORT_SYMBOL(sptlrpc_flavor2name); - -static char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize) -{ - buf[0] = '\0'; - - if (flags & PTLRPC_SEC_FL_REVERSE) - strlcat(buf, "reverse,", bufsize); - if (flags & PTLRPC_SEC_FL_ROOTONLY) - strlcat(buf, "rootonly,", bufsize); - if (flags & PTLRPC_SEC_FL_UDESC) - strlcat(buf, "udesc,", bufsize); - if (flags & PTLRPC_SEC_FL_BULK) - strlcat(buf, "bulk,", bufsize); - if (buf[0] == '\0') - strlcat(buf, "-,", bufsize); - - return buf; -} - -/************************************************** - * client context APIs * - **************************************************/ - -static -struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec) -{ - struct vfs_cred vcred; - int create = 1, remove_dead = 1; - - LASSERT(sec); - LASSERT(sec->ps_policy->sp_cops->lookup_ctx); - - if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE | - PTLRPC_SEC_FL_ROOTONLY)) { - vcred.vc_uid = 0; - vcred.vc_gid = 0; - if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) { - create = 0; - remove_dead = 0; - } - } else { - vcred.vc_uid = from_kuid(&init_user_ns, current_uid()); - vcred.vc_gid = from_kgid(&init_user_ns, current_gid()); - } - - return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, - create, remove_dead); -} - -struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx) -{ - atomic_inc(&ctx->cc_refcount); - return ctx; -} -EXPORT_SYMBOL(sptlrpc_cli_ctx_get); - -void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync) -{ - struct ptlrpc_sec *sec = ctx->cc_sec; - - LASSERT(sec); - LASSERT_ATOMIC_POS(&ctx->cc_refcount); - - if (!atomic_dec_and_test(&ctx->cc_refcount)) - return; - - sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync); -} -EXPORT_SYMBOL(sptlrpc_cli_ctx_put); - -static int import_sec_check_expire(struct obd_import *imp) -{ - int adapt = 0; - - spin_lock(&imp->imp_lock); - if (imp->imp_sec_expire && - imp->imp_sec_expire < ktime_get_real_seconds()) { - adapt = 1; - imp->imp_sec_expire = 0; - } - spin_unlock(&imp->imp_lock); - - if (!adapt) - return 0; - - CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n"); - return sptlrpc_import_sec_adapt(imp, NULL, NULL); -} - -/** - * Get and validate the client side ptlrpc security facilities from - * \a imp. There is a race condition on client reconnect when the import is - * being destroyed while there are outstanding client bound requests. In - * this case do not output any error messages if import secuity is not - * found. - * - * \param[in] imp obd import associated with client - * \param[out] sec client side ptlrpc security - * - * \retval 0 if security retrieved successfully - * \retval -ve errno if there was a problem - */ -static int import_sec_validate_get(struct obd_import *imp, - struct ptlrpc_sec **sec) -{ - int rc; - - if (unlikely(imp->imp_sec_expire)) { - rc = import_sec_check_expire(imp); - if (rc) - return rc; - } - - *sec = sptlrpc_import_sec_ref(imp); - if (!*sec) { - CERROR("import %p (%s) with no sec\n", - imp, ptlrpc_import_state_name(imp->imp_state)); - return -EACCES; - } - - if (unlikely((*sec)->ps_dying)) { - CERROR("attempt to use dying sec %p\n", sec); - sptlrpc_sec_put(*sec); - return -EACCES; - } - - return 0; -} - -/** - * Given a \a req, find or allocate a appropriate context for it. - * \pre req->rq_cli_ctx == NULL. - * - * \retval 0 succeed, and req->rq_cli_ctx is set. - * \retval -ev error number, and req->rq_cli_ctx == NULL. - */ -int sptlrpc_req_get_ctx(struct ptlrpc_request *req) -{ - struct obd_import *imp = req->rq_import; - struct ptlrpc_sec *sec; - int rc; - - LASSERT(!req->rq_cli_ctx); - LASSERT(imp); - - rc = import_sec_validate_get(imp, &sec); - if (rc) - return rc; - - req->rq_cli_ctx = get_my_ctx(sec); - - sptlrpc_sec_put(sec); - - if (!req->rq_cli_ctx) { - CERROR("req %p: fail to get context\n", req); - return -ECONNREFUSED; - } - - return 0; -} - -/** - * Drop the context for \a req. - * \pre req->rq_cli_ctx != NULL. - * \post req->rq_cli_ctx == NULL. - * - * If \a sync == 0, this function should return quickly without sleep; - * otherwise it might trigger and wait for the whole process of sending - * an context-destroying rpc to server. - */ -void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync) -{ - LASSERT(req); - LASSERT(req->rq_cli_ctx); - - /* request might be asked to release earlier while still - * in the context waiting list. - */ - if (!list_empty(&req->rq_ctx_chain)) { - spin_lock(&req->rq_cli_ctx->cc_lock); - list_del_init(&req->rq_ctx_chain); - spin_unlock(&req->rq_cli_ctx->cc_lock); - } - - sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync); - req->rq_cli_ctx = NULL; -} - -static -int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, - struct ptlrpc_cli_ctx *oldctx, - struct ptlrpc_cli_ctx *newctx) -{ - struct sptlrpc_flavor old_flvr; - char *reqmsg = NULL; /* to workaround old gcc */ - int reqmsg_size; - int rc = 0; - - LASSERT(req->rq_reqmsg); - LASSERT(req->rq_reqlen); - LASSERT(req->rq_replen); - - CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n", - req, - oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec), - newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec), - oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name, - newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name); - - /* save flavor */ - old_flvr = req->rq_flvr; - - /* save request message */ - reqmsg_size = req->rq_reqlen; - if (reqmsg_size != 0) { - reqmsg = kvzalloc(reqmsg_size, GFP_NOFS); - if (!reqmsg) - return -ENOMEM; - memcpy(reqmsg, req->rq_reqmsg, reqmsg_size); - } - - /* release old req/rep buf */ - req->rq_cli_ctx = oldctx; - sptlrpc_cli_free_reqbuf(req); - sptlrpc_cli_free_repbuf(req); - req->rq_cli_ctx = newctx; - - /* recalculate the flavor */ - sptlrpc_req_set_flavor(req, 0); - - /* alloc new request buffer - * we don't need to alloc reply buffer here, leave it to the - * rest procedure of ptlrpc - */ - if (reqmsg_size != 0) { - rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size); - if (!rc) { - LASSERT(req->rq_reqmsg); - memcpy(req->rq_reqmsg, reqmsg, reqmsg_size); - } else { - CWARN("failed to alloc reqbuf: %d\n", rc); - req->rq_flvr = old_flvr; - } - - kvfree(reqmsg); - } - return rc; -} - -/** - * If current context of \a req is dead somehow, e.g. we just switched flavor - * thus marked original contexts dead, we'll find a new context for it. if - * no switch is needed, \a req will end up with the same context. - * - * \note a request must have a context, to keep other parts of code happy. - * In any case of failure during the switching, we must restore the old one. - */ -static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req) -{ - struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx; - struct ptlrpc_cli_ctx *newctx; - int rc; - - LASSERT(oldctx); - - sptlrpc_cli_ctx_get(oldctx); - sptlrpc_req_put_ctx(req, 0); - - rc = sptlrpc_req_get_ctx(req); - if (unlikely(rc)) { - LASSERT(!req->rq_cli_ctx); - - /* restore old ctx */ - req->rq_cli_ctx = oldctx; - return rc; - } - - newctx = req->rq_cli_ctx; - LASSERT(newctx); - - if (unlikely(newctx == oldctx && - test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) { - /* - * still get the old dead ctx, usually means system too busy - */ - CDEBUG(D_SEC, - "ctx (%p, fl %lx) doesn't switch, relax a little bit\n", - newctx, newctx->cc_flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); - } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) { - /* - * new ctx not up to date yet - */ - CDEBUG(D_SEC, - "ctx (%p, fl %lx) doesn't switch, not up to date yet\n", - newctx, newctx->cc_flags); - } else { - /* - * it's possible newctx == oldctx if we're switching - * subflavor with the same sec. - */ - rc = sptlrpc_req_ctx_switch(req, oldctx, newctx); - if (rc) { - /* restore old ctx */ - sptlrpc_req_put_ctx(req, 0); - req->rq_cli_ctx = oldctx; - return rc; - } - - LASSERT(req->rq_cli_ctx == newctx); - } - - sptlrpc_cli_ctx_put(oldctx, 1); - return 0; -} - -static -int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx) -{ - if (cli_ctx_is_refreshed(ctx)) - return 1; - return 0; -} - -static -int ctx_refresh_timeout(struct ptlrpc_request *req) -{ - int rc; - - /* conn_cnt is needed in expire_one_request */ - lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt); - - rc = ptlrpc_expire_one_request(req, 1); - /* if we started recovery, we should mark this ctx dead; otherwise - * in case of lgssd died nobody would retire this ctx, following - * connecting will still find the same ctx thus cause deadlock. - * there's an assumption that expire time of the request should be - * later than the context refresh expire time. - */ - if (rc == 0) - req->rq_cli_ctx->cc_ops->force_die(req->rq_cli_ctx, 0); - return rc; -} - -static -void ctx_refresh_interrupt(struct ptlrpc_request *req) -{ - spin_lock(&req->rq_lock); - req->rq_intr = 1; - spin_unlock(&req->rq_lock); -} - -static -void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx) -{ - spin_lock(&ctx->cc_lock); - if (!list_empty(&req->rq_ctx_chain)) - list_del_init(&req->rq_ctx_chain); - spin_unlock(&ctx->cc_lock); -} - -/** - * To refresh the context of \req, if it's not up-to-date. - * \param timeout - * - < 0: don't wait - * - = 0: wait until success or fatal error occur - * - > 0: timeout value (in seconds) - * - * The status of the context could be subject to be changed by other threads - * at any time. We allow this race, but once we return with 0, the caller will - * suppose it's uptodated and keep using it until the owning rpc is done. - * - * \retval 0 only if the context is uptodated. - * \retval -ev error number. - */ -int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - struct ptlrpc_sec *sec; - int rc; - - LASSERT(ctx); - - if (req->rq_ctx_init || req->rq_ctx_fini) - return 0; - - /* - * during the process a request's context might change type even - * (e.g. from gss ctx to null ctx), so each loop we need to re-check - * everything - */ -again: - rc = import_sec_validate_get(req->rq_import, &sec); - if (rc) - return rc; - - if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) { - CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n", - req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); - req_off_ctx_list(req, ctx); - sptlrpc_req_replace_dead_ctx(req); - ctx = req->rq_cli_ctx; - } - sptlrpc_sec_put(sec); - - if (cli_ctx_is_eternal(ctx)) - return 0; - - if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) { - LASSERT(ctx->cc_ops->refresh); - ctx->cc_ops->refresh(ctx); - } - LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0); - - LASSERT(ctx->cc_ops->validate); - if (ctx->cc_ops->validate(ctx) == 0) { - req_off_ctx_list(req, ctx); - return 0; - } - - if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) { - spin_lock(&req->rq_lock); - req->rq_err = 1; - spin_unlock(&req->rq_lock); - req_off_ctx_list(req, ctx); - return -EPERM; - } - - /* - * There's a subtle issue for resending RPCs, suppose following - * situation: - * 1. the request was sent to server. - * 2. recovery was kicked start, after finished the request was - * marked as resent. - * 3. resend the request. - * 4. old reply from server received, we accept and verify the reply. - * this has to be success, otherwise the error will be aware - * by application. - * 5. new reply from server received, dropped by LNet. - * - * Note the xid of old & new request is the same. We can't simply - * change xid for the resent request because the server replies on - * it for reply reconstruction. - * - * Commonly the original context should be uptodate because we - * have a expiry nice time; server will keep its context because - * we at least hold a ref of old context which prevent context - * destroying RPC being sent. So server still can accept the request - * and finish the RPC. But if that's not the case: - * 1. If server side context has been trimmed, a NO_CONTEXT will - * be returned, gss_cli_ctx_verify/unseal will switch to new - * context by force. - * 2. Current context never be refreshed, then we are fine: we - * never really send request with old context before. - */ - if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) && - unlikely(req->rq_reqmsg) && - lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) { - req_off_ctx_list(req, ctx); - return 0; - } - - if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) { - req_off_ctx_list(req, ctx); - /* - * don't switch ctx if import was deactivated - */ - if (req->rq_import->imp_deactive) { - spin_lock(&req->rq_lock); - req->rq_err = 1; - spin_unlock(&req->rq_lock); - return -EINTR; - } - - rc = sptlrpc_req_replace_dead_ctx(req); - if (rc) { - LASSERT(ctx == req->rq_cli_ctx); - CERROR("req %p: failed to replace dead ctx %p: %d\n", - req, ctx, rc); - spin_lock(&req->rq_lock); - req->rq_err = 1; - spin_unlock(&req->rq_lock); - return rc; - } - - ctx = req->rq_cli_ctx; - goto again; - } - - /* - * Now we're sure this context is during upcall, add myself into - * waiting list - */ - spin_lock(&ctx->cc_lock); - if (list_empty(&req->rq_ctx_chain)) - list_add(&req->rq_ctx_chain, &ctx->cc_req_list); - spin_unlock(&ctx->cc_lock); - - if (timeout < 0) - return -EWOULDBLOCK; - - /* Clear any flags that may be present from previous sends */ - LASSERT(req->rq_receiving_reply == 0); - spin_lock(&req->rq_lock); - req->rq_err = 0; - req->rq_timedout = 0; - req->rq_resend = 0; - req->rq_restart = 0; - spin_unlock(&req->rq_lock); - - rc = wait_event_idle_timeout(req->rq_reply_waitq, - ctx_check_refresh(ctx), - timeout * HZ); - if (rc == 0 && ctx_refresh_timeout(req) == 0) { - /* Keep waiting, but enable some signals */ - rc = l_wait_event_abortable(req->rq_reply_waitq, - ctx_check_refresh(ctx)); - if (rc == 0) - rc = 1; - } - - if (rc > 0) - /* condition is true */ - rc = 0; - else if (rc == 0) - /* Timed out */ - rc = -ETIMEDOUT; - else { - /* Aborted by signal */ - rc = -EINTR; - ctx_refresh_interrupt(req); - } - - /* - * following cases could lead us here: - * - successfully refreshed; - * - interrupted; - * - timedout, and we don't want recover from the failure; - * - timedout, and waked up upon recovery finished; - * - someone else mark this ctx dead by force; - * - someone invalidate the req and call ptlrpc_client_wake_req(), - * e.g. ptlrpc_abort_inflight(); - */ - if (!cli_ctx_is_refreshed(ctx)) { - /* timed out or interrupted */ - req_off_ctx_list(req, ctx); - - LASSERT(rc != 0); - return rc; - } - - goto again; -} - -/** - * Initialize flavor settings for \a req, according to \a opcode. - * - * \note this could be called in two situations: - * - new request from ptlrpc_pre_req(), with proper @opcode - * - old request which changed ctx in the middle, with @opcode == 0 - */ -void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode) -{ - struct ptlrpc_sec *sec; - - LASSERT(req->rq_import); - LASSERT(req->rq_cli_ctx); - LASSERT(req->rq_cli_ctx->cc_sec); - LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0); - - /* special security flags according to opcode */ - switch (opcode) { - case OST_READ: - case MDS_READPAGE: - case MGS_CONFIG_READ: - case OBD_IDX_READ: - req->rq_bulk_read = 1; - break; - case OST_WRITE: - case MDS_WRITEPAGE: - req->rq_bulk_write = 1; - break; - case SEC_CTX_INIT: - req->rq_ctx_init = 1; - break; - case SEC_CTX_FINI: - req->rq_ctx_fini = 1; - break; - case 0: - /* init/fini rpc won't be resend, so can't be here */ - LASSERT(req->rq_ctx_init == 0); - LASSERT(req->rq_ctx_fini == 0); - - /* cleanup flags, which should be recalculated */ - req->rq_pack_udesc = 0; - req->rq_pack_bulk = 0; - break; - } - - sec = req->rq_cli_ctx->cc_sec; - - spin_lock(&sec->ps_lock); - req->rq_flvr = sec->ps_flvr; - spin_unlock(&sec->ps_lock); - - /* force SVC_NULL for context initiation rpc, SVC_INTG for context - * destruction rpc - */ - if (unlikely(req->rq_ctx_init)) - flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL); - else if (unlikely(req->rq_ctx_fini)) - flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG); - - /* user descriptor flag, null security can't do it anyway */ - if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) && - (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL)) - req->rq_pack_udesc = 1; - - /* bulk security flag */ - if ((req->rq_bulk_read || req->rq_bulk_write) && - sptlrpc_flavor_has_bulk(&req->rq_flvr)) - req->rq_pack_bulk = 1; -} - -void sptlrpc_request_out_callback(struct ptlrpc_request *req) -{ - if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV) - return; - - LASSERT(req->rq_clrbuf); - if (req->rq_pool || !req->rq_reqbuf) - return; - - kvfree(req->rq_reqbuf); - req->rq_reqbuf = NULL; - req->rq_reqbuf_len = 0; -} - -/** - * Given an import \a imp, check whether current user has a valid context - * or not. We may create a new context and try to refresh it, and try - * repeatedly try in case of non-fatal errors. Return 0 means success. - */ -int sptlrpc_import_check_ctx(struct obd_import *imp) -{ - struct ptlrpc_sec *sec; - struct ptlrpc_cli_ctx *ctx; - struct ptlrpc_request *req = NULL; - int rc; - - might_sleep(); - - sec = sptlrpc_import_sec_ref(imp); - ctx = get_my_ctx(sec); - sptlrpc_sec_put(sec); - - if (!ctx) - return -ENOMEM; - - if (cli_ctx_is_eternal(ctx) || - ctx->cc_ops->validate(ctx) == 0) { - sptlrpc_cli_ctx_put(ctx, 1); - return 0; - } - - if (cli_ctx_is_error(ctx)) { - sptlrpc_cli_ctx_put(ctx, 1); - return -EACCES; - } - - req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!req) - return -ENOMEM; - - ptlrpc_cli_req_init(req); - atomic_set(&req->rq_refcount, 10000); - - req->rq_import = imp; - req->rq_flvr = sec->ps_flvr; - req->rq_cli_ctx = ctx; - - rc = sptlrpc_req_refresh_ctx(req, 0); - LASSERT(list_empty(&req->rq_ctx_chain)); - sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1); - ptlrpc_request_cache_free(req); - - return rc; -} - -/** - * Used by ptlrpc client, to perform the pre-defined security transformation - * upon the request message of \a req. After this function called, - * req->rq_reqmsg is still accessible as clear text. - */ -int sptlrpc_cli_wrap_request(struct ptlrpc_request *req) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - int rc = 0; - - LASSERT(ctx); - LASSERT(ctx->cc_sec); - LASSERT(req->rq_reqbuf || req->rq_clrbuf); - - /* we wrap bulk request here because now we can be sure - * the context is uptodate. - */ - if (req->rq_bulk) { - rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk); - if (rc) - return rc; - } - - switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { - case SPTLRPC_SVC_NULL: - case SPTLRPC_SVC_AUTH: - case SPTLRPC_SVC_INTG: - LASSERT(ctx->cc_ops->sign); - rc = ctx->cc_ops->sign(ctx, req); - break; - case SPTLRPC_SVC_PRIV: - LASSERT(ctx->cc_ops->seal); - rc = ctx->cc_ops->seal(ctx, req); - break; - default: - LBUG(); - } - - if (rc == 0) { - LASSERT(req->rq_reqdata_len); - LASSERT(req->rq_reqdata_len % 8 == 0); - LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len); - } - - return rc; -} - -static int do_cli_unwrap_reply(struct ptlrpc_request *req) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - int rc; - - LASSERT(ctx); - LASSERT(ctx->cc_sec); - LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata); - LASSERT(!req->rq_repmsg); - - req->rq_rep_swab_mask = 0; - - rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len); - switch (rc) { - case 1: - lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); - case 0: - break; - default: - CERROR("failed unpack reply: x%llu\n", req->rq_xid); - return -EPROTO; - } - - if (req->rq_repdata_len < sizeof(struct lustre_msg)) { - CERROR("replied data length %d too small\n", - req->rq_repdata_len); - return -EPROTO; - } - - if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) != - SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) { - CERROR("reply policy %u doesn't match request policy %u\n", - SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr), - SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)); - return -EPROTO; - } - - switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { - case SPTLRPC_SVC_NULL: - case SPTLRPC_SVC_AUTH: - case SPTLRPC_SVC_INTG: - LASSERT(ctx->cc_ops->verify); - rc = ctx->cc_ops->verify(ctx, req); - break; - case SPTLRPC_SVC_PRIV: - LASSERT(ctx->cc_ops->unseal); - rc = ctx->cc_ops->unseal(ctx, req); - break; - default: - LBUG(); - } - LASSERT(rc || req->rq_repmsg || req->rq_resend); - - if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL && - !req->rq_ctx_init) - req->rq_rep_swab_mask = 0; - return rc; -} - -/** - * Used by ptlrpc client, to perform security transformation upon the reply - * message of \a req. After return successfully, req->rq_repmsg points to - * the reply message in clear text. - * - * \pre the reply buffer should have been un-posted from LNet, so nothing is - * going to change. - */ -int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) -{ - LASSERT(req->rq_repbuf); - LASSERT(!req->rq_repdata); - LASSERT(!req->rq_repmsg); - LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len); - - if (req->rq_reply_off == 0 && - (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { - CERROR("real reply with offset 0\n"); - return -EPROTO; - } - - if (req->rq_reply_off % 8 != 0) { - CERROR("reply at odd offset %u\n", req->rq_reply_off); - return -EPROTO; - } - - req->rq_repdata = (struct lustre_msg *) - (req->rq_repbuf + req->rq_reply_off); - req->rq_repdata_len = req->rq_nob_received; - - return do_cli_unwrap_reply(req); -} - -/** - * Used by ptlrpc client, to perform security transformation upon the early - * reply message of \a req. We expect the rq_reply_off is 0, and - * rq_nob_received is the early reply size. - * - * Because the receive buffer might be still posted, the reply data might be - * changed at any time, no matter we're holding rq_lock or not. For this reason - * we allocate a separate ptlrpc_request and reply buffer for early reply - * processing. - * - * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request. - * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned - * \a *req_ret to release it. - * \retval -ev error number, and \a req_ret will not be set. - */ -int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, - struct ptlrpc_request **req_ret) -{ - struct ptlrpc_request *early_req; - char *early_buf; - int early_bufsz, early_size; - int rc; - - early_req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!early_req) - return -ENOMEM; - - ptlrpc_cli_req_init(early_req); - - early_size = req->rq_nob_received; - early_bufsz = size_roundup_power2(early_size); - early_buf = kvzalloc(early_bufsz, GFP_NOFS); - if (!early_buf) { - rc = -ENOMEM; - goto err_req; - } - - /* sanity checkings and copy data out, do it inside spinlock */ - spin_lock(&req->rq_lock); - - if (req->rq_replied) { - spin_unlock(&req->rq_lock); - rc = -EALREADY; - goto err_buf; - } - - LASSERT(req->rq_repbuf); - LASSERT(!req->rq_repdata); - LASSERT(!req->rq_repmsg); - - if (req->rq_reply_off != 0) { - CERROR("early reply with offset %u\n", req->rq_reply_off); - spin_unlock(&req->rq_lock); - rc = -EPROTO; - goto err_buf; - } - - if (req->rq_nob_received != early_size) { - /* even another early arrived the size should be the same */ - CERROR("data size has changed from %u to %u\n", - early_size, req->rq_nob_received); - spin_unlock(&req->rq_lock); - rc = -EINVAL; - goto err_buf; - } - - if (req->rq_nob_received < sizeof(struct lustre_msg)) { - CERROR("early reply length %d too small\n", - req->rq_nob_received); - spin_unlock(&req->rq_lock); - rc = -EALREADY; - goto err_buf; - } - - memcpy(early_buf, req->rq_repbuf, early_size); - spin_unlock(&req->rq_lock); - - early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx); - early_req->rq_flvr = req->rq_flvr; - early_req->rq_repbuf = early_buf; - early_req->rq_repbuf_len = early_bufsz; - early_req->rq_repdata = (struct lustre_msg *)early_buf; - early_req->rq_repdata_len = early_size; - early_req->rq_early = 1; - early_req->rq_reqmsg = req->rq_reqmsg; - - rc = do_cli_unwrap_reply(early_req); - if (rc) { - DEBUG_REQ(D_ADAPTTO, early_req, - "error %d unwrap early reply", rc); - goto err_ctx; - } - - LASSERT(early_req->rq_repmsg); - *req_ret = early_req; - return 0; - -err_ctx: - sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1); -err_buf: - kvfree(early_buf); -err_req: - ptlrpc_request_cache_free(early_req); - return rc; -} - -/** - * Used by ptlrpc client, to release a processed early reply \a early_req. - * - * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply(). - */ -void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req) -{ - LASSERT(early_req->rq_repbuf); - LASSERT(early_req->rq_repdata); - LASSERT(early_req->rq_repmsg); - - sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1); - kvfree(early_req->rq_repbuf); - ptlrpc_request_cache_free(early_req); -} - -/************************************************** - * sec ID * - **************************************************/ - -/* - * "fixed" sec (e.g. null) use sec_id < 0 - */ -static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1); - -int sptlrpc_get_next_secid(void) -{ - return atomic_inc_return(&sptlrpc_sec_id); -} -EXPORT_SYMBOL(sptlrpc_get_next_secid); - -/************************************************** - * client side high-level security APIs * - **************************************************/ - -static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid, - int grace, int force) -{ - struct ptlrpc_sec_policy *policy = sec->ps_policy; - - LASSERT(policy->sp_cops); - LASSERT(policy->sp_cops->flush_ctx_cache); - - return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force); -} - -static void sec_cop_destroy_sec(struct ptlrpc_sec *sec) -{ - struct ptlrpc_sec_policy *policy = sec->ps_policy; - - LASSERT_ATOMIC_ZERO(&sec->ps_refcount); - LASSERT_ATOMIC_ZERO(&sec->ps_nctx); - LASSERT(policy->sp_cops->destroy_sec); - - CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec); - - policy->sp_cops->destroy_sec(sec); - sptlrpc_policy_put(policy); -} - -static void sptlrpc_sec_kill(struct ptlrpc_sec *sec) -{ - LASSERT_ATOMIC_POS(&sec->ps_refcount); - - if (sec->ps_policy->sp_cops->kill_sec) { - sec->ps_policy->sp_cops->kill_sec(sec); - - sec_cop_flush_ctx_cache(sec, -1, 1, 1); - } -} - -static struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec) -{ - if (sec) - atomic_inc(&sec->ps_refcount); - - return sec; -} - -void sptlrpc_sec_put(struct ptlrpc_sec *sec) -{ - if (sec) { - LASSERT_ATOMIC_POS(&sec->ps_refcount); - - if (atomic_dec_and_test(&sec->ps_refcount)) { - sptlrpc_gc_del_sec(sec); - sec_cop_destroy_sec(sec); - } - } -} -EXPORT_SYMBOL(sptlrpc_sec_put); - -/* - * policy module is responsible for taking reference of import - */ -static -struct ptlrpc_sec *sptlrpc_sec_create(struct obd_import *imp, - struct ptlrpc_svc_ctx *svc_ctx, - struct sptlrpc_flavor *sf, - enum lustre_sec_part sp) -{ - struct ptlrpc_sec_policy *policy; - struct ptlrpc_sec *sec; - char str[32]; - - if (svc_ctx) { - LASSERT(imp->imp_dlm_fake == 1); - - CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n", - imp->imp_obd->obd_type->typ_name, - imp->imp_obd->obd_name, - sptlrpc_flavor2name(sf, str, sizeof(str))); - - policy = sptlrpc_policy_get(svc_ctx->sc_policy); - sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY; - } else { - LASSERT(imp->imp_dlm_fake == 0); - - CDEBUG(D_SEC, "%s %s: select security flavor %s\n", - imp->imp_obd->obd_type->typ_name, - imp->imp_obd->obd_name, - sptlrpc_flavor2name(sf, str, sizeof(str))); - - policy = sptlrpc_wireflavor2policy(sf->sf_rpc); - if (!policy) { - CERROR("invalid flavor 0x%x\n", sf->sf_rpc); - return NULL; - } - } - - sec = policy->sp_cops->create_sec(imp, svc_ctx, sf); - if (sec) { - atomic_inc(&sec->ps_refcount); - - sec->ps_part = sp; - - if (sec->ps_gc_interval && policy->sp_cops->gc_ctx) - sptlrpc_gc_add_sec(sec); - } else { - sptlrpc_policy_put(policy); - } - - return sec; -} - -struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp) -{ - struct ptlrpc_sec *sec; - - spin_lock(&imp->imp_lock); - sec = sptlrpc_sec_get(imp->imp_sec); - spin_unlock(&imp->imp_lock); - - return sec; -} -EXPORT_SYMBOL(sptlrpc_import_sec_ref); - -static void sptlrpc_import_sec_install(struct obd_import *imp, - struct ptlrpc_sec *sec) -{ - struct ptlrpc_sec *old_sec; - - LASSERT_ATOMIC_POS(&sec->ps_refcount); - - spin_lock(&imp->imp_lock); - old_sec = imp->imp_sec; - imp->imp_sec = sec; - spin_unlock(&imp->imp_lock); - - if (old_sec) { - sptlrpc_sec_kill(old_sec); - - /* balance the ref taken by this import */ - sptlrpc_sec_put(old_sec); - } -} - -static inline -int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2) -{ - return (memcmp(sf1, sf2, sizeof(*sf1)) == 0); -} - -static inline -void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src) -{ - *dst = *src; -} - -static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp, - struct ptlrpc_sec *sec, - struct sptlrpc_flavor *sf) -{ - char str1[32], str2[32]; - - if (sec->ps_flvr.sf_flags != sf->sf_flags) - CDEBUG(D_SEC, "changing sec flags: %s -> %s\n", - sptlrpc_secflags2str(sec->ps_flvr.sf_flags, - str1, sizeof(str1)), - sptlrpc_secflags2str(sf->sf_flags, - str2, sizeof(str2))); - - spin_lock(&sec->ps_lock); - flavor_copy(&sec->ps_flvr, sf); - spin_unlock(&sec->ps_lock); -} - -/** - * To get an appropriate ptlrpc_sec for the \a imp, according to the current - * configuration. Upon called, imp->imp_sec may or may not be NULL. - * - * - regular import: \a svc_ctx should be NULL and \a flvr is ignored; - * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request. - */ -int sptlrpc_import_sec_adapt(struct obd_import *imp, - struct ptlrpc_svc_ctx *svc_ctx, - struct sptlrpc_flavor *flvr) -{ - struct ptlrpc_connection *conn; - struct sptlrpc_flavor sf; - struct ptlrpc_sec *sec, *newsec; - enum lustre_sec_part sp; - char str[24]; - int rc = 0; - - might_sleep(); - - if (!imp) - return 0; - - conn = imp->imp_connection; - - if (!svc_ctx) { - struct client_obd *cliobd = &imp->imp_obd->u.cli; - /* - * normal import, determine flavor from rule set, except - * for mgc the flavor is predetermined. - */ - if (cliobd->cl_sp_me == LUSTRE_SP_MGC) - sf = cliobd->cl_flvr_mgc; - else - sptlrpc_conf_choose_flavor(cliobd->cl_sp_me, - cliobd->cl_sp_to, - &cliobd->cl_target_uuid, - conn->c_self, &sf); - - sp = imp->imp_obd->u.cli.cl_sp_me; - } else { - /* reverse import, determine flavor from incoming request */ - sf = *flvr; - - if (sf.sf_rpc != SPTLRPC_FLVR_NULL) - sf.sf_flags = PTLRPC_SEC_FL_REVERSE | - PTLRPC_SEC_FL_ROOTONLY; - - sp = sptlrpc_target_sec_part(imp->imp_obd); - } - - sec = sptlrpc_import_sec_ref(imp); - if (sec) { - char str2[24]; - - if (flavor_equal(&sf, &sec->ps_flvr)) - goto out; - - CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n", - imp->imp_obd->obd_name, - obd_uuid2str(&conn->c_remote_uuid), - sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)), - sptlrpc_flavor2name(&sf, str2, sizeof(str2))); - - if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) == - SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) && - SPTLRPC_FLVR_MECH(sf.sf_rpc) == - SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) { - sptlrpc_import_sec_adapt_inplace(imp, sec, &sf); - goto out; - } - } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) != - SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) { - CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n", - imp->imp_obd->obd_name, - obd_uuid2str(&conn->c_remote_uuid), - LNET_NIDNET(conn->c_self), - sptlrpc_flavor2name(&sf, str, sizeof(str))); - } - - mutex_lock(&imp->imp_sec_mutex); - - newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp); - if (newsec) { - sptlrpc_import_sec_install(imp, newsec); - } else { - CERROR("import %s->%s: failed to create new sec\n", - imp->imp_obd->obd_name, - obd_uuid2str(&conn->c_remote_uuid)); - rc = -EPERM; - } - - mutex_unlock(&imp->imp_sec_mutex); -out: - sptlrpc_sec_put(sec); - return rc; -} - -void sptlrpc_import_sec_put(struct obd_import *imp) -{ - if (imp->imp_sec) { - sptlrpc_sec_kill(imp->imp_sec); - - sptlrpc_sec_put(imp->imp_sec); - imp->imp_sec = NULL; - } -} - -static void import_flush_ctx_common(struct obd_import *imp, - uid_t uid, int grace, int force) -{ - struct ptlrpc_sec *sec; - - if (!imp) - return; - - sec = sptlrpc_import_sec_ref(imp); - if (!sec) - return; - - sec_cop_flush_ctx_cache(sec, uid, grace, force); - sptlrpc_sec_put(sec); -} - -void sptlrpc_import_flush_my_ctx(struct obd_import *imp) -{ - import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()), - 1, 1); -} -EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx); - -void sptlrpc_import_flush_all_ctx(struct obd_import *imp) -{ - import_flush_ctx_common(imp, -1, 1, 1); -} -EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx); - -/** - * Used by ptlrpc client to allocate request buffer of \a req. Upon return - * successfully, req->rq_reqmsg points to a buffer with size \a msgsize. - */ -int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - struct ptlrpc_sec_policy *policy; - int rc; - - LASSERT(ctx); - LASSERT(ctx->cc_sec); - LASSERT(ctx->cc_sec->ps_policy); - LASSERT(!req->rq_reqmsg); - LASSERT_ATOMIC_POS(&ctx->cc_refcount); - - policy = ctx->cc_sec->ps_policy; - rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize); - if (!rc) { - LASSERT(req->rq_reqmsg); - LASSERT(req->rq_reqbuf || req->rq_clrbuf); - - /* zeroing preallocated buffer */ - if (req->rq_pool) - memset(req->rq_reqmsg, 0, msgsize); - } - - return rc; -} - -/** - * Used by ptlrpc client to free request buffer of \a req. After this - * req->rq_reqmsg is set to NULL and should not be accessed anymore. - */ -void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - struct ptlrpc_sec_policy *policy; - - LASSERT(ctx); - LASSERT(ctx->cc_sec); - LASSERT(ctx->cc_sec->ps_policy); - LASSERT_ATOMIC_POS(&ctx->cc_refcount); - - if (!req->rq_reqbuf && !req->rq_clrbuf) - return; - - policy = ctx->cc_sec->ps_policy; - policy->sp_cops->free_reqbuf(ctx->cc_sec, req); - req->rq_reqmsg = NULL; -} - -/* - * NOTE caller must guarantee the buffer size is enough for the enlargement - */ -void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, - int segment, int newsize) -{ - void *src, *dst; - int oldsize, oldmsg_size, movesize; - - LASSERT(segment < msg->lm_bufcount); - LASSERT(msg->lm_buflens[segment] <= newsize); - - if (msg->lm_buflens[segment] == newsize) - return; - - /* nothing to do if we are enlarging the last segment */ - if (segment == msg->lm_bufcount - 1) { - msg->lm_buflens[segment] = newsize; - return; - } - - oldsize = msg->lm_buflens[segment]; - - src = lustre_msg_buf(msg, segment + 1, 0); - msg->lm_buflens[segment] = newsize; - dst = lustre_msg_buf(msg, segment + 1, 0); - msg->lm_buflens[segment] = oldsize; - - /* move from segment + 1 to end segment */ - LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2); - oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); - movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg); - LASSERT(movesize >= 0); - - if (movesize) - memmove(dst, src, movesize); - - /* note we don't clear the ares where old data live, not secret */ - - /* finally set new segment size */ - msg->lm_buflens[segment] = newsize; -} -EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace); - -/** - * Used by ptlrpc client to enlarge the \a segment of request message pointed - * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be - * preserved after the enlargement. this must be called after original request - * buffer being allocated. - * - * \note after this be called, rq_reqmsg and rq_reqlen might have been changed, - * so caller should refresh its local pointers if needed. - */ -int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req, - int segment, int newsize) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - struct ptlrpc_sec_cops *cops; - struct lustre_msg *msg = req->rq_reqmsg; - - LASSERT(ctx); - LASSERT(msg); - LASSERT(msg->lm_bufcount > segment); - LASSERT(msg->lm_buflens[segment] <= newsize); - - if (msg->lm_buflens[segment] == newsize) - return 0; - - cops = ctx->cc_sec->ps_policy->sp_cops; - LASSERT(cops->enlarge_reqbuf); - return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize); -} -EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf); - -/** - * Used by ptlrpc client to allocate reply buffer of \a req. - * - * \note After this, req->rq_repmsg is still not accessible. - */ -int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - struct ptlrpc_sec_policy *policy; - - LASSERT(ctx); - LASSERT(ctx->cc_sec); - LASSERT(ctx->cc_sec->ps_policy); - - if (req->rq_repbuf) - return 0; - - policy = ctx->cc_sec->ps_policy; - return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize); -} - -/** - * Used by ptlrpc client to free reply buffer of \a req. After this - * req->rq_repmsg is set to NULL and should not be accessed anymore. - */ -void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) -{ - struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; - struct ptlrpc_sec_policy *policy; - - LASSERT(ctx); - LASSERT(ctx->cc_sec); - LASSERT(ctx->cc_sec->ps_policy); - LASSERT_ATOMIC_POS(&ctx->cc_refcount); - - if (!req->rq_repbuf) - return; - LASSERT(req->rq_repbuf_len); - - policy = ctx->cc_sec->ps_policy; - policy->sp_cops->free_repbuf(ctx->cc_sec, req); - req->rq_repmsg = NULL; -} - -static int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx) -{ - struct ptlrpc_sec_policy *policy = ctx->sc_policy; - - if (!policy->sp_sops->install_rctx) - return 0; - return policy->sp_sops->install_rctx(imp, ctx); -} - -/**************************************** - * server side security * - ****************************************/ - -static int flavor_allowed(struct sptlrpc_flavor *exp, - struct ptlrpc_request *req) -{ - struct sptlrpc_flavor *flvr = &req->rq_flvr; - - if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc) - return 1; - - if ((req->rq_ctx_init || req->rq_ctx_fini) && - SPTLRPC_FLVR_POLICY(exp->sf_rpc) == - SPTLRPC_FLVR_POLICY(flvr->sf_rpc) && - SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc)) - return 1; - - return 0; -} - -#define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10) - -/** - * Given an export \a exp, check whether the flavor of incoming \a req - * is allowed by the export \a exp. Main logic is about taking care of - * changing configurations. Return 0 means success. - */ -int sptlrpc_target_export_check(struct obd_export *exp, - struct ptlrpc_request *req) -{ - struct sptlrpc_flavor flavor; - - if (!exp) - return 0; - - /* client side export has no imp_reverse, skip - * FIXME maybe we should check flavor this as well??? - */ - if (!exp->exp_imp_reverse) - return 0; - - /* don't care about ctx fini rpc */ - if (req->rq_ctx_fini) - return 0; - - spin_lock(&exp->exp_lock); - - /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for - * the first req with the new flavor, then treat it as current flavor, - * adapt reverse sec according to it. - * note the first rpc with new flavor might not be with root ctx, in - * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. - */ - if (unlikely(exp->exp_flvr_changed) && - flavor_allowed(&exp->exp_flvr_old[1], req)) { - /* make the new flavor as "current", and old ones as - * about-to-expire - */ - CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp, - exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc); - flavor = exp->exp_flvr_old[1]; - exp->exp_flvr_old[1] = exp->exp_flvr_old[0]; - exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0]; - exp->exp_flvr_old[0] = exp->exp_flvr; - exp->exp_flvr_expire[0] = ktime_get_real_seconds() + - EXP_FLVR_UPDATE_EXPIRE; - exp->exp_flvr = flavor; - - /* flavor change finished */ - exp->exp_flvr_changed = 0; - LASSERT(exp->exp_flvr_adapt == 1); - - /* if it's gss, we only interested in root ctx init */ - if (req->rq_auth_gss && - !(req->rq_ctx_init && - (req->rq_auth_usr_root || req->rq_auth_usr_mdt || - req->rq_auth_usr_ost))) { - spin_unlock(&exp->exp_lock); - CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n", - req->rq_auth_gss, req->rq_ctx_init, - req->rq_auth_usr_root, req->rq_auth_usr_mdt, - req->rq_auth_usr_ost); - return 0; - } - - exp->exp_flvr_adapt = 0; - spin_unlock(&exp->exp_lock); - - return sptlrpc_import_sec_adapt(exp->exp_imp_reverse, - req->rq_svc_ctx, &flavor); - } - - /* if it equals to the current flavor, we accept it, but need to - * dealing with reverse sec/ctx - */ - if (likely(flavor_allowed(&exp->exp_flvr, req))) { - /* most cases should return here, we only interested in - * gss root ctx init - */ - if (!req->rq_auth_gss || !req->rq_ctx_init || - (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && - !req->rq_auth_usr_ost)) { - spin_unlock(&exp->exp_lock); - return 0; - } - - /* if flavor just changed, we should not proceed, just leave - * it and current flavor will be discovered and replaced - * shortly, and let _this_ rpc pass through - */ - if (exp->exp_flvr_changed) { - LASSERT(exp->exp_flvr_adapt); - spin_unlock(&exp->exp_lock); - return 0; - } - - if (exp->exp_flvr_adapt) { - exp->exp_flvr_adapt = 0; - CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n", - exp, exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, - exp->exp_flvr_old[1].sf_rpc); - flavor = exp->exp_flvr; - spin_unlock(&exp->exp_lock); - - return sptlrpc_import_sec_adapt(exp->exp_imp_reverse, - req->rq_svc_ctx, - &flavor); - } else { - CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n", - exp, exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, - exp->exp_flvr_old[1].sf_rpc); - spin_unlock(&exp->exp_lock); - - return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse, - req->rq_svc_ctx); - } - } - - if (exp->exp_flvr_expire[0]) { - if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) { - if (flavor_allowed(&exp->exp_flvr_old[0], req)) { - CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp, - exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, - exp->exp_flvr_old[1].sf_rpc, - (s64)(exp->exp_flvr_expire[0] - - ktime_get_real_seconds())); - spin_unlock(&exp->exp_lock); - return 0; - } - } else { - CDEBUG(D_SEC, "mark middle expired\n"); - exp->exp_flvr_expire[0] = 0; - } - CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp, - exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc, - req->rq_flvr.sf_rpc); - } - - /* now it doesn't match the current flavor, the only chance we can - * accept it is match the old flavors which is not expired. - */ - if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) { - if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) { - if (flavor_allowed(&exp->exp_flvr_old[1], req)) { - CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n", - exp, - exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, - exp->exp_flvr_old[1].sf_rpc, - (s64)(exp->exp_flvr_expire[1] - - ktime_get_real_seconds())); - spin_unlock(&exp->exp_lock); - return 0; - } - } else { - CDEBUG(D_SEC, "mark oldest expired\n"); - exp->exp_flvr_expire[1] = 0; - } - CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n", - exp, exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc, - req->rq_flvr.sf_rpc); - } else { - CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n", - exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc, - exp->exp_flvr_old[1].sf_rpc); - } - - spin_unlock(&exp->exp_lock); - - CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n", - exp, exp->exp_obd->obd_name, - req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini, - req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost, - req->rq_flvr.sf_rpc, - exp->exp_flvr.sf_rpc, - exp->exp_flvr_old[0].sf_rpc, - exp->exp_flvr_expire[0] ? - (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0, - exp->exp_flvr_old[1].sf_rpc, - exp->exp_flvr_expire[1] ? - (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0); - return -EACCES; -} -EXPORT_SYMBOL(sptlrpc_target_export_check); - -static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc) -{ - /* peer's claim is unreliable unless gss is being used */ - if (!req->rq_auth_gss || svc_rc == SECSVC_DROP) - return svc_rc; - - switch (req->rq_sp_from) { - case LUSTRE_SP_CLI: - if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) { - DEBUG_REQ(D_ERROR, req, "faked source CLI"); - svc_rc = SECSVC_DROP; - } - break; - case LUSTRE_SP_MDT: - if (!req->rq_auth_usr_mdt) { - DEBUG_REQ(D_ERROR, req, "faked source MDT"); - svc_rc = SECSVC_DROP; - } - break; - case LUSTRE_SP_OST: - if (!req->rq_auth_usr_ost) { - DEBUG_REQ(D_ERROR, req, "faked source OST"); - svc_rc = SECSVC_DROP; - } - break; - case LUSTRE_SP_MGS: - case LUSTRE_SP_MGC: - if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && - !req->rq_auth_usr_ost) { - DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS"); - svc_rc = SECSVC_DROP; - } - break; - case LUSTRE_SP_ANY: - default: - DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from); - svc_rc = SECSVC_DROP; - } - - return svc_rc; -} - -/** - * Used by ptlrpc server, to perform transformation upon request message of - * incoming \a req. This must be the first thing to do with a incoming - * request in ptlrpc layer. - * - * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in - * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set. - * \retval SECSVC_COMPLETE success, the request has been fully processed, and - * reply message has been prepared. - * \retval SECSVC_DROP failed, this request should be dropped. - */ -int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) -{ - struct ptlrpc_sec_policy *policy; - struct lustre_msg *msg = req->rq_reqbuf; - int rc; - - LASSERT(msg); - LASSERT(!req->rq_reqmsg); - LASSERT(!req->rq_repmsg); - LASSERT(!req->rq_svc_ctx); - - req->rq_req_swab_mask = 0; - - rc = __lustre_unpack_msg(msg, req->rq_reqdata_len); - switch (rc) { - case 1: - lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); - case 0: - break; - default: - CERROR("error unpacking request from %s x%llu\n", - libcfs_id2str(req->rq_peer), req->rq_xid); - return SECSVC_DROP; - } - - req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr); - req->rq_sp_from = LUSTRE_SP_ANY; - req->rq_auth_uid = -1; - req->rq_auth_mapped_uid = -1; - - policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc); - if (!policy) { - CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc); - return SECSVC_DROP; - } - - LASSERT(policy->sp_sops->accept); - rc = policy->sp_sops->accept(req); - sptlrpc_policy_put(policy); - LASSERT(req->rq_reqmsg || rc != SECSVC_OK); - LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP); - - /* - * if it's not null flavor (which means embedded packing msg), - * reset the swab mask for the coming inner msg unpacking. - */ - if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) - req->rq_req_swab_mask = 0; - - /* sanity check for the request source */ - rc = sptlrpc_svc_check_from(req, rc); - return rc; -} - -/** - * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed, - * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to - * a buffer of \a msglen size. - */ -int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen) -{ - struct ptlrpc_sec_policy *policy; - struct ptlrpc_reply_state *rs; - int rc; - - LASSERT(req->rq_svc_ctx); - LASSERT(req->rq_svc_ctx->sc_policy); - - policy = req->rq_svc_ctx->sc_policy; - LASSERT(policy->sp_sops->alloc_rs); - - rc = policy->sp_sops->alloc_rs(req, msglen); - if (unlikely(rc == -ENOMEM)) { - struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; - - if (svcpt->scp_service->srv_max_reply_size < - msglen + sizeof(struct ptlrpc_reply_state)) { - /* Just return failure if the size is too big */ - CERROR("size of message is too big (%zd), %d allowed\n", - msglen + sizeof(struct ptlrpc_reply_state), - svcpt->scp_service->srv_max_reply_size); - return -ENOMEM; - } - - /* failed alloc, try emergency pool */ - rs = lustre_get_emerg_rs(svcpt); - if (!rs) - return -ENOMEM; - - req->rq_reply_state = rs; - rc = policy->sp_sops->alloc_rs(req, msglen); - if (rc) { - lustre_put_emerg_rs(rs); - req->rq_reply_state = NULL; - } - } - - LASSERT(rc != 0 || - (req->rq_reply_state && req->rq_reply_state->rs_msg)); - - return rc; -} - -/** - * Used by ptlrpc server, to perform transformation upon reply message. - * - * \post req->rq_reply_off is set to appropriate server-controlled reply offset. - * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible. - */ -int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req) -{ - struct ptlrpc_sec_policy *policy; - int rc; - - LASSERT(req->rq_svc_ctx); - LASSERT(req->rq_svc_ctx->sc_policy); - - policy = req->rq_svc_ctx->sc_policy; - LASSERT(policy->sp_sops->authorize); - - rc = policy->sp_sops->authorize(req); - LASSERT(rc || req->rq_reply_state->rs_repdata_len); - - return rc; -} - -/** - * Used by ptlrpc server, to free reply_state. - */ -void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs) -{ - struct ptlrpc_sec_policy *policy; - unsigned int prealloc; - - LASSERT(rs->rs_svc_ctx); - LASSERT(rs->rs_svc_ctx->sc_policy); - - policy = rs->rs_svc_ctx->sc_policy; - LASSERT(policy->sp_sops->free_rs); - - prealloc = rs->rs_prealloc; - policy->sp_sops->free_rs(rs); - - if (prealloc) - lustre_put_emerg_rs(rs); -} - -void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req) -{ - struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - - if (ctx) - atomic_inc(&ctx->sc_refcount); -} - -void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req) -{ - struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - - if (!ctx) - return; - - LASSERT_ATOMIC_POS(&ctx->sc_refcount); - if (atomic_dec_and_test(&ctx->sc_refcount)) { - if (ctx->sc_policy->sp_sops->free_ctx) - ctx->sc_policy->sp_sops->free_ctx(ctx); - } - req->rq_svc_ctx = NULL; -} - -/**************************************** - * bulk security * - ****************************************/ - -/** - * Perform transformation upon bulk data pointed by \a desc. This is called - * before transforming the request message. - */ -int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc) -{ - struct ptlrpc_cli_ctx *ctx; - - LASSERT(req->rq_bulk_read || req->rq_bulk_write); - - if (!req->rq_pack_bulk) - return 0; - - ctx = req->rq_cli_ctx; - if (ctx->cc_ops->wrap_bulk) - return ctx->cc_ops->wrap_bulk(ctx, req, desc); - return 0; -} -EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk); - -/** - * This is called after unwrap the reply message. - * return nob of actual plain text size received, or error code. - */ -int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc, - int nob) -{ - struct ptlrpc_cli_ctx *ctx; - int rc; - - LASSERT(req->rq_bulk_read && !req->rq_bulk_write); - - if (!req->rq_pack_bulk) - return desc->bd_nob_transferred; - - ctx = req->rq_cli_ctx; - if (ctx->cc_ops->unwrap_bulk) { - rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc); - if (rc < 0) - return rc; - } - return desc->bd_nob_transferred; -} -EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read); - -/** - * This is called after unwrap the reply message. - * return 0 for success or error code. - */ -int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc) -{ - struct ptlrpc_cli_ctx *ctx; - int rc; - - LASSERT(!req->rq_bulk_read && req->rq_bulk_write); - - if (!req->rq_pack_bulk) - return 0; - - ctx = req->rq_cli_ctx; - if (ctx->cc_ops->unwrap_bulk) { - rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc); - if (rc < 0) - return rc; - } - - /* - * if everything is going right, nob should equals to nob_transferred. - * in case of privacy mode, nob_transferred needs to be adjusted. - */ - if (desc->bd_nob != desc->bd_nob_transferred) { - CERROR("nob %d doesn't match transferred nob %d\n", - desc->bd_nob, desc->bd_nob_transferred); - return -EPROTO; - } - - return 0; -} -EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write); - -/**************************************** - * user descriptor helpers * - ****************************************/ - -int sptlrpc_current_user_desc_size(void) -{ - int ngroups; - - ngroups = current_ngroups; - - if (ngroups > LUSTRE_MAX_GROUPS) - ngroups = LUSTRE_MAX_GROUPS; - return sptlrpc_user_desc_size(ngroups); -} -EXPORT_SYMBOL(sptlrpc_current_user_desc_size); - -int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset) -{ - struct ptlrpc_user_desc *pud; - - pud = lustre_msg_buf(msg, offset, 0); - - if (!pud) - return -EINVAL; - - pud->pud_uid = from_kuid(&init_user_ns, current_uid()); - pud->pud_gid = from_kgid(&init_user_ns, current_gid()); - pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid()); - pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid()); - pud->pud_cap = current_cap().cap[0]; - pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4; - - task_lock(current); - if (pud->pud_ngroups > current_ngroups) - pud->pud_ngroups = current_ngroups; - memcpy(pud->pud_groups, current_cred()->group_info->gid, - pud->pud_ngroups * sizeof(__u32)); - task_unlock(current); - - return 0; -} -EXPORT_SYMBOL(sptlrpc_pack_user_desc); - -int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed) -{ - struct ptlrpc_user_desc *pud; - int i; - - pud = lustre_msg_buf(msg, offset, sizeof(*pud)); - if (!pud) - return -EINVAL; - - if (swabbed) { - __swab32s(&pud->pud_uid); - __swab32s(&pud->pud_gid); - __swab32s(&pud->pud_fsuid); - __swab32s(&pud->pud_fsgid); - __swab32s(&pud->pud_cap); - __swab32s(&pud->pud_ngroups); - } - - if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) { - CERROR("%u groups is too large\n", pud->pud_ngroups); - return -EINVAL; - } - - if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) > - msg->lm_buflens[offset]) { - CERROR("%u groups are claimed but bufsize only %u\n", - pud->pud_ngroups, msg->lm_buflens[offset]); - return -EINVAL; - } - - if (swabbed) { - for (i = 0; i < pud->pud_ngroups; i++) - __swab32s(&pud->pud_groups[i]); - } - - return 0; -} -EXPORT_SYMBOL(sptlrpc_unpack_user_desc); - -/**************************************** - * misc helpers * - ****************************************/ - -const char *sec2target_str(struct ptlrpc_sec *sec) -{ - if (!sec || !sec->ps_import || !sec->ps_import->imp_obd) - return "*"; - if (sec_is_reverse(sec)) - return "c"; - return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid); -} -EXPORT_SYMBOL(sec2target_str); - -/* - * return true if the bulk data is protected - */ -bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr) -{ - switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) { - case SPTLRPC_BULK_SVC_INTG: - case SPTLRPC_BULK_SVC_PRIV: - return true; - default: - return false; - } -} -EXPORT_SYMBOL(sptlrpc_flavor_has_bulk); - -/**************************************** - * crypto API helper/alloc blkciper * - ****************************************/ - -/**************************************** - * initialize/finalize * - ****************************************/ - -int sptlrpc_init(void) -{ - int rc; - - rwlock_init(&policy_lock); - - rc = sptlrpc_gc_init(); - if (rc) - goto out; - - rc = sptlrpc_conf_init(); - if (rc) - goto out_gc; - - rc = sptlrpc_enc_pool_init(); - if (rc) - goto out_conf; - - rc = sptlrpc_null_init(); - if (rc) - goto out_pool; - - rc = sptlrpc_plain_init(); - if (rc) - goto out_null; - - sptlrpc_lproc_init(); - - return 0; - -out_null: - sptlrpc_null_fini(); -out_pool: - sptlrpc_enc_pool_fini(); -out_conf: - sptlrpc_conf_fini(); -out_gc: - sptlrpc_gc_fini(); -out: - return rc; -} - -void sptlrpc_fini(void) -{ - sptlrpc_lproc_fini(); - sptlrpc_plain_fini(); - sptlrpc_null_fini(); - sptlrpc_enc_pool_fini(); - sptlrpc_conf_fini(); - sptlrpc_gc_fini(); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c deleted file mode 100644 index 625b9520d78f..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c +++ /dev/null @@ -1,572 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/sec_bulk.c - * - * Author: Eric Mei - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -/**************************************** - * bulk encryption page pools * - ****************************************/ - -#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *)) -#define PAGES_PER_POOL (POINTERS_PER_PAGE) - -#define IDLE_IDX_MAX (100) -#define IDLE_IDX_WEIGHT (3) - -#define CACHE_QUIESCENT_PERIOD (20) - -static struct ptlrpc_enc_page_pool { - /* - * constants - */ - unsigned long epp_max_pages; /* maximum pages can hold, const */ - unsigned int epp_max_pools; /* number of pools, const */ - - /* - * wait queue in case of not enough free pages. - */ - wait_queue_head_t epp_waitq; /* waiting threads */ - unsigned int epp_waitqlen; /* wait queue length */ - unsigned long epp_pages_short; /* # of pages wanted of in-q users */ - unsigned int epp_growing:1; /* during adding pages */ - - /* - * indicating how idle the pools are, from 0 to MAX_IDLE_IDX - * this is counted based on each time when getting pages from - * the pools, not based on time. which means in case that system - * is idled for a while but the idle_idx might still be low if no - * activities happened in the pools. - */ - unsigned long epp_idle_idx; - - /* last shrink time due to mem tight */ - time64_t epp_last_shrink; - time64_t epp_last_access; - - /* - * in-pool pages bookkeeping - */ - spinlock_t epp_lock; /* protect following fields */ - unsigned long epp_total_pages; /* total pages in pools */ - unsigned long epp_free_pages; /* current pages available */ - - /* - * statistics - */ - unsigned long epp_st_max_pages; /* # of pages ever reached */ - unsigned int epp_st_grows; /* # of grows */ - unsigned int epp_st_grow_fails; /* # of add pages failures */ - unsigned int epp_st_shrinks; /* # of shrinks */ - unsigned long epp_st_access; /* # of access */ - unsigned long epp_st_missings; /* # of cache missing */ - unsigned long epp_st_lowfree; /* lowest free pages reached */ - unsigned int epp_st_max_wqlen; /* highest waitqueue length */ - unsigned long epp_st_max_wait; /* in jiffies */ - unsigned long epp_st_outofmem; /* # of out of mem requests */ - /* - * pointers to pools - */ - struct page ***epp_pools; -} page_pools; - -/* - * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools - */ -int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) -{ - spin_lock(&page_pools.epp_lock); - - seq_printf(m, - "physical pages: %lu\n" - "pages per pool: %lu\n" - "max pages: %lu\n" - "max pools: %u\n" - "total pages: %lu\n" - "total free: %lu\n" - "idle index: %lu/100\n" - "last shrink: %lds\n" - "last access: %lds\n" - "max pages reached: %lu\n" - "grows: %u\n" - "grows failure: %u\n" - "shrinks: %u\n" - "cache access: %lu\n" - "cache missing: %lu\n" - "low free mark: %lu\n" - "max waitqueue depth: %u\n" - "max wait time: %ld/%lu\n" - "out of mem: %lu\n", - totalram_pages, - PAGES_PER_POOL, - page_pools.epp_max_pages, - page_pools.epp_max_pools, - page_pools.epp_total_pages, - page_pools.epp_free_pages, - page_pools.epp_idle_idx, - (long)(ktime_get_seconds() - page_pools.epp_last_shrink), - (long)(ktime_get_seconds() - page_pools.epp_last_access), - page_pools.epp_st_max_pages, - page_pools.epp_st_grows, - page_pools.epp_st_grow_fails, - page_pools.epp_st_shrinks, - page_pools.epp_st_access, - page_pools.epp_st_missings, - page_pools.epp_st_lowfree, - page_pools.epp_st_max_wqlen, - page_pools.epp_st_max_wait, - msecs_to_jiffies(MSEC_PER_SEC), - page_pools.epp_st_outofmem); - - spin_unlock(&page_pools.epp_lock); - - return 0; -} - -static void enc_pools_release_free_pages(long npages) -{ - int p_idx, g_idx; - int p_idx_max1, p_idx_max2; - - LASSERT(npages > 0); - LASSERT(npages <= page_pools.epp_free_pages); - LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages); - - /* max pool index before the release */ - p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL; - - page_pools.epp_free_pages -= npages; - page_pools.epp_total_pages -= npages; - - /* max pool index after the release */ - p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 : - ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL); - - p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; - g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; - LASSERT(page_pools.epp_pools[p_idx]); - - while (npages--) { - LASSERT(page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx]); - - __free_page(page_pools.epp_pools[p_idx][g_idx]); - page_pools.epp_pools[p_idx][g_idx] = NULL; - - if (++g_idx == PAGES_PER_POOL) { - p_idx++; - g_idx = 0; - } - } - - /* free unused pools */ - while (p_idx_max1 < p_idx_max2) { - LASSERT(page_pools.epp_pools[p_idx_max2]); - kfree(page_pools.epp_pools[p_idx_max2]); - page_pools.epp_pools[p_idx_max2] = NULL; - p_idx_max2--; - } -} - -/* - * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. - */ -static unsigned long enc_pools_shrink_count(struct shrinker *s, - struct shrink_control *sc) -{ - /* - * if no pool access for a long time, we consider it's fully idle. - * a little race here is fine. - */ - if (unlikely(ktime_get_seconds() - page_pools.epp_last_access > - CACHE_QUIESCENT_PERIOD)) { - spin_lock(&page_pools.epp_lock); - page_pools.epp_idle_idx = IDLE_IDX_MAX; - spin_unlock(&page_pools.epp_lock); - } - - LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX); - return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) * - (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX; -} - -/* - * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. - */ -static unsigned long enc_pools_shrink_scan(struct shrinker *s, - struct shrink_control *sc) -{ - spin_lock(&page_pools.epp_lock); - sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan, - page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES); - if (sc->nr_to_scan > 0) { - enc_pools_release_free_pages(sc->nr_to_scan); - CDEBUG(D_SEC, "released %ld pages, %ld left\n", - (long)sc->nr_to_scan, page_pools.epp_free_pages); - - page_pools.epp_st_shrinks++; - page_pools.epp_last_shrink = ktime_get_seconds(); - } - spin_unlock(&page_pools.epp_lock); - - /* - * if no pool access for a long time, we consider it's fully idle. - * a little race here is fine. - */ - if (unlikely(ktime_get_seconds() - page_pools.epp_last_access > - CACHE_QUIESCENT_PERIOD)) { - spin_lock(&page_pools.epp_lock); - page_pools.epp_idle_idx = IDLE_IDX_MAX; - spin_unlock(&page_pools.epp_lock); - } - - LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX); - return sc->nr_to_scan; -} - -static inline -int npages_to_npools(unsigned long npages) -{ - return (int)DIV_ROUND_UP(npages, PAGES_PER_POOL); -} - -/* - * return how many pages cleaned up. - */ -static unsigned long enc_pools_cleanup(struct page ***pools, int npools) -{ - unsigned long cleaned = 0; - int i, j; - - for (i = 0; i < npools; i++) { - if (pools[i]) { - for (j = 0; j < PAGES_PER_POOL; j++) { - if (pools[i][j]) { - __free_page(pools[i][j]); - cleaned++; - } - } - kfree(pools[i]); - pools[i] = NULL; - } - } - - return cleaned; -} - -static inline void enc_pools_wakeup(void) -{ - assert_spin_locked(&page_pools.epp_lock); - - if (unlikely(page_pools.epp_waitqlen)) { - LASSERT(waitqueue_active(&page_pools.epp_waitq)); - wake_up_all(&page_pools.epp_waitq); - } -} - -/* - * Export the number of free pages in the pool - */ -int get_free_pages_in_pool(void) -{ - return page_pools.epp_free_pages; -} - -/* - * Let outside world know if enc_pool full capacity is reached - */ -int pool_is_at_full_capacity(void) -{ - return (page_pools.epp_total_pages == page_pools.epp_max_pages); -} - -void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) -{ - int p_idx, g_idx; - int i; - - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - - if (!GET_ENC_KIOV(desc)) - return; - - LASSERT(desc->bd_iov_count > 0); - - spin_lock(&page_pools.epp_lock); - - p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; - g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; - - LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <= - page_pools.epp_total_pages); - LASSERT(page_pools.epp_pools[p_idx]); - - for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page); - LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); - LASSERT(!page_pools.epp_pools[p_idx][g_idx]); - - page_pools.epp_pools[p_idx][g_idx] = - BD_GET_ENC_KIOV(desc, i).bv_page; - - if (++g_idx == PAGES_PER_POOL) { - p_idx++; - g_idx = 0; - } - } - - page_pools.epp_free_pages += desc->bd_iov_count; - - enc_pools_wakeup(); - - spin_unlock(&page_pools.epp_lock); - - kfree(GET_ENC_KIOV(desc)); - GET_ENC_KIOV(desc) = NULL; -} - -static inline void enc_pools_alloc(void) -{ - LASSERT(page_pools.epp_max_pools); - page_pools.epp_pools = - kvzalloc(page_pools.epp_max_pools * - sizeof(*page_pools.epp_pools), - GFP_KERNEL); -} - -static inline void enc_pools_free(void) -{ - LASSERT(page_pools.epp_max_pools); - LASSERT(page_pools.epp_pools); - - kvfree(page_pools.epp_pools); -} - -static struct shrinker pools_shrinker = { - .count_objects = enc_pools_shrink_count, - .scan_objects = enc_pools_shrink_scan, - .seeks = DEFAULT_SEEKS, -}; - -int sptlrpc_enc_pool_init(void) -{ - int rc; - - /* - * maximum capacity is 1/8 of total physical memory. - * is the 1/8 a good number? - */ - page_pools.epp_max_pages = totalram_pages / 8; - page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages); - - init_waitqueue_head(&page_pools.epp_waitq); - page_pools.epp_waitqlen = 0; - page_pools.epp_pages_short = 0; - - page_pools.epp_growing = 0; - - page_pools.epp_idle_idx = 0; - page_pools.epp_last_shrink = ktime_get_seconds(); - page_pools.epp_last_access = ktime_get_seconds(); - - spin_lock_init(&page_pools.epp_lock); - page_pools.epp_total_pages = 0; - page_pools.epp_free_pages = 0; - - page_pools.epp_st_max_pages = 0; - page_pools.epp_st_grows = 0; - page_pools.epp_st_grow_fails = 0; - page_pools.epp_st_shrinks = 0; - page_pools.epp_st_access = 0; - page_pools.epp_st_missings = 0; - page_pools.epp_st_lowfree = 0; - page_pools.epp_st_max_wqlen = 0; - page_pools.epp_st_max_wait = 0; - page_pools.epp_st_outofmem = 0; - - enc_pools_alloc(); - if (!page_pools.epp_pools) - return -ENOMEM; - - rc = register_shrinker(&pools_shrinker); - if (rc) - enc_pools_free(); - - return rc; -} - -void sptlrpc_enc_pool_fini(void) -{ - unsigned long cleaned, npools; - - LASSERT(page_pools.epp_pools); - LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages); - - unregister_shrinker(&pools_shrinker); - - npools = npages_to_npools(page_pools.epp_total_pages); - cleaned = enc_pools_cleanup(page_pools.epp_pools, npools); - LASSERT(cleaned == page_pools.epp_total_pages); - - enc_pools_free(); - - if (page_pools.epp_st_access > 0) { - CDEBUG(D_SEC, - "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n", - page_pools.epp_st_max_pages, page_pools.epp_st_grows, - page_pools.epp_st_grow_fails, - page_pools.epp_st_shrinks, page_pools.epp_st_access, - page_pools.epp_st_missings, page_pools.epp_st_max_wqlen, - page_pools.epp_st_max_wait, - msecs_to_jiffies(MSEC_PER_SEC), - page_pools.epp_st_outofmem); - } -} - -static int cfs_hash_alg_id[] = { - [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL, - [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32, - [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32, - [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5, - [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1, - [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256, - [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384, - [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512, -}; - -const char *sptlrpc_get_hash_name(__u8 hash_alg) -{ - return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]); -} - -__u8 sptlrpc_get_hash_alg(const char *algname) -{ - return cfs_crypto_hash_alg(algname); -} - -int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) -{ - struct ptlrpc_bulk_sec_desc *bsd; - int size = msg->lm_buflens[offset]; - - bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); - if (!bsd) { - CERROR("Invalid bulk sec desc: size %d\n", size); - return -EINVAL; - } - - if (swabbed) - __swab32s(&bsd->bsd_nob); - - if (unlikely(bsd->bsd_version != 0)) { - CERROR("Unexpected version %u\n", bsd->bsd_version); - return -EPROTO; - } - - if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) { - CERROR("Invalid type %u\n", bsd->bsd_type); - return -EPROTO; - } - - /* FIXME more sanity check here */ - - if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL && - bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG && - bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) { - CERROR("Invalid svc %u\n", bsd->bsd_svc); - return -EPROTO; - } - - return 0; -} -EXPORT_SYMBOL(bulk_sec_desc_unpack); - -int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, - void *buf, int buflen) -{ - struct ahash_request *hdesc; - int hashsize; - unsigned int bufsize; - int i, err; - - LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX); - LASSERT(buflen >= 4); - - hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0); - if (IS_ERR(hdesc)) { - CERROR("Unable to initialize checksum hash %s\n", - cfs_crypto_hash_name(cfs_hash_alg_id[alg])); - return PTR_ERR(hdesc); - } - - hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); - - for (i = 0; i < desc->bd_iov_count; i++) { - cfs_crypto_hash_update_page(hdesc, - BD_GET_KIOV(desc, i).bv_page, - BD_GET_KIOV(desc, i).bv_offset & - ~PAGE_MASK, - BD_GET_KIOV(desc, i).bv_len); - } - - if (hashsize > buflen) { - unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; - - bufsize = sizeof(hashbuf); - LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n", - bufsize, hashsize); - err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize); - memcpy(buf, hashbuf, buflen); - } else { - bufsize = buflen; - err = cfs_crypto_hash_final(hdesc, buf, &bufsize); - } - - return err; -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c deleted file mode 100644 index 2389f9a8f534..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c +++ /dev/null @@ -1,850 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd) -{ - const char *type = obd->obd_type->typ_name; - - if (!strcmp(type, LUSTRE_MDT_NAME)) - return LUSTRE_SP_MDT; - if (!strcmp(type, LUSTRE_OST_NAME)) - return LUSTRE_SP_OST; - if (!strcmp(type, LUSTRE_MGS_NAME)) - return LUSTRE_SP_MGS; - - CERROR("unknown target %p(%s)\n", obd, type); - return LUSTRE_SP_ANY; -} - -/**************************************** - * user supplied flavor string parsing * - ****************************************/ - -/* - * format: [-] - */ -int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) -{ - char buf[32]; - char *bulk, *alg; - - memset(flvr, 0, sizeof(*flvr)); - - if (!str || str[0] == '\0') { - flvr->sf_rpc = SPTLRPC_FLVR_INVALID; - return 0; - } - - strlcpy(buf, str, sizeof(buf)); - - bulk = strchr(buf, '-'); - if (bulk) - *bulk++ = '\0'; - - flvr->sf_rpc = sptlrpc_name2flavor_base(buf); - if (flvr->sf_rpc == SPTLRPC_FLVR_INVALID) - goto err_out; - - /* - * currently only base flavor "plain" can have bulk specification. - */ - if (flvr->sf_rpc == SPTLRPC_FLVR_PLAIN) { - flvr->u_bulk.hash.hash_alg = BULK_HASH_ALG_ADLER32; - if (bulk) { - /* - * format: plain-hash: - */ - alg = strchr(bulk, ':'); - if (!alg) - goto err_out; - *alg++ = '\0'; - - if (strcmp(bulk, "hash")) - goto err_out; - - flvr->u_bulk.hash.hash_alg = sptlrpc_get_hash_alg(alg); - if (flvr->u_bulk.hash.hash_alg >= BULK_HASH_ALG_MAX) - goto err_out; - } - - if (flvr->u_bulk.hash.hash_alg == BULK_HASH_ALG_NULL) - flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_NULL); - else - flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_INTG); - } else { - if (bulk) - goto err_out; - } - - flvr->sf_flags = 0; - return 0; - -err_out: - CERROR("invalid flavor string: %s\n", str); - return -EINVAL; -} -EXPORT_SYMBOL(sptlrpc_parse_flavor); - -/**************************************** - * configure rules * - ****************************************/ - -static void get_default_flavor(struct sptlrpc_flavor *sf) -{ - memset(sf, 0, sizeof(*sf)); - - sf->sf_rpc = SPTLRPC_FLVR_NULL; - sf->sf_flags = 0; -} - -static void sptlrpc_rule_init(struct sptlrpc_rule *rule) -{ - rule->sr_netid = LNET_NIDNET(LNET_NID_ANY); - rule->sr_from = LUSTRE_SP_ANY; - rule->sr_to = LUSTRE_SP_ANY; - rule->sr_padding = 0; - - get_default_flavor(&rule->sr_flvr); -} - -/* - * format: network[.direction]=flavor - */ -static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) -{ - char *flavor, *dir; - int rc; - - sptlrpc_rule_init(rule); - - flavor = strchr(param, '='); - if (!flavor) { - CERROR("invalid param, no '='\n"); - return -EINVAL; - } - *flavor++ = '\0'; - - dir = strchr(param, '.'); - if (dir) - *dir++ = '\0'; - - /* 1.1 network */ - if (strcmp(param, "default")) { - rule->sr_netid = libcfs_str2net(param); - if (rule->sr_netid == LNET_NIDNET(LNET_NID_ANY)) { - CERROR("invalid network name: %s\n", param); - return -EINVAL; - } - } - - /* 1.2 direction */ - if (dir) { - if (!strcmp(dir, "mdt2ost")) { - rule->sr_from = LUSTRE_SP_MDT; - rule->sr_to = LUSTRE_SP_OST; - } else if (!strcmp(dir, "mdt2mdt")) { - rule->sr_from = LUSTRE_SP_MDT; - rule->sr_to = LUSTRE_SP_MDT; - } else if (!strcmp(dir, "cli2ost")) { - rule->sr_from = LUSTRE_SP_CLI; - rule->sr_to = LUSTRE_SP_OST; - } else if (!strcmp(dir, "cli2mdt")) { - rule->sr_from = LUSTRE_SP_CLI; - rule->sr_to = LUSTRE_SP_MDT; - } else { - CERROR("invalid rule dir segment: %s\n", dir); - return -EINVAL; - } - } - - /* 2.1 flavor */ - rc = sptlrpc_parse_flavor(flavor, &rule->sr_flvr); - if (rc) - return -EINVAL; - - return 0; -} - -static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset) -{ - LASSERT(rset->srs_nslot || - (rset->srs_nrule == 0 && !rset->srs_rules)); - - if (rset->srs_nslot) { - kfree(rset->srs_rules); - sptlrpc_rule_set_init(rset); - } -} - -/* - * return 0 if the rule set could accommodate one more rule. - */ -static int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset) -{ - struct sptlrpc_rule *rules; - int nslot; - - might_sleep(); - - if (rset->srs_nrule < rset->srs_nslot) - return 0; - - nslot = rset->srs_nslot + 8; - - /* better use realloc() if available */ - rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS); - if (!rules) - return -ENOMEM; - - if (rset->srs_nrule) { - LASSERT(rset->srs_nslot && rset->srs_rules); - memcpy(rules, rset->srs_rules, - rset->srs_nrule * sizeof(*rset->srs_rules)); - - kfree(rset->srs_rules); - } - - rset->srs_rules = rules; - rset->srs_nslot = nslot; - return 0; -} - -static inline int rule_spec_dir(struct sptlrpc_rule *rule) -{ - return (rule->sr_from != LUSTRE_SP_ANY || - rule->sr_to != LUSTRE_SP_ANY); -} - -static inline int rule_spec_net(struct sptlrpc_rule *rule) -{ - return (rule->sr_netid != LNET_NIDNET(LNET_NID_ANY)); -} - -static inline int rule_match_dir(struct sptlrpc_rule *r1, - struct sptlrpc_rule *r2) -{ - return (r1->sr_from == r2->sr_from && r1->sr_to == r2->sr_to); -} - -static inline int rule_match_net(struct sptlrpc_rule *r1, - struct sptlrpc_rule *r2) -{ - return (r1->sr_netid == r2->sr_netid); -} - -/* - * merge @rule into @rset. - * the @rset slots might be expanded. - */ -static int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset, - struct sptlrpc_rule *rule) -{ - struct sptlrpc_rule *p = rset->srs_rules; - int spec_dir, spec_net; - int rc, n, match = 0; - - might_sleep(); - - spec_net = rule_spec_net(rule); - spec_dir = rule_spec_dir(rule); - - for (n = 0; n < rset->srs_nrule; n++) { - p = &rset->srs_rules[n]; - - /* test network match, if failed: - * - spec rule: skip rules which is also spec rule match, until - * we hit a wild rule, which means no more chance - * - wild rule: skip until reach the one which is also wild - * and matches - */ - if (!rule_match_net(p, rule)) { - if (spec_net) { - if (rule_spec_net(p)) - continue; - else - break; - } else { - continue; - } - } - - /* test dir match, same logic as net matching */ - if (!rule_match_dir(p, rule)) { - if (spec_dir) { - if (rule_spec_dir(p)) - continue; - else - break; - } else { - continue; - } - } - - /* find a match */ - match = 1; - break; - } - - if (match) { - LASSERT(n >= 0 && n < rset->srs_nrule); - - if (rule->sr_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) { - /* remove this rule */ - if (n < rset->srs_nrule - 1) - memmove(&rset->srs_rules[n], - &rset->srs_rules[n + 1], - (rset->srs_nrule - n - 1) * - sizeof(*rule)); - rset->srs_nrule--; - } else { - /* override the rule */ - memcpy(&rset->srs_rules[n], rule, sizeof(*rule)); - } - } else { - LASSERT(n >= 0 && n <= rset->srs_nrule); - - if (rule->sr_flvr.sf_rpc != SPTLRPC_FLVR_INVALID) { - rc = sptlrpc_rule_set_expand(rset); - if (rc) - return rc; - - if (n < rset->srs_nrule) - memmove(&rset->srs_rules[n + 1], - &rset->srs_rules[n], - (rset->srs_nrule - n) * sizeof(*rule)); - memcpy(&rset->srs_rules[n], rule, sizeof(*rule)); - rset->srs_nrule++; - } else { - CDEBUG(D_CONFIG, "ignore the unmatched deletion\n"); - } - } - - return 0; -} - -/** - * given from/to/nid, determine a matching flavor in ruleset. - * return 1 if a match found, otherwise return 0. - */ -static int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset, - enum lustre_sec_part from, - enum lustre_sec_part to, - lnet_nid_t nid, - struct sptlrpc_flavor *sf) -{ - struct sptlrpc_rule *r; - int n; - - for (n = 0; n < rset->srs_nrule; n++) { - r = &rset->srs_rules[n]; - - if (LNET_NIDNET(nid) != LNET_NIDNET(LNET_NID_ANY) && - r->sr_netid != LNET_NIDNET(LNET_NID_ANY) && - LNET_NIDNET(nid) != r->sr_netid) - continue; - - if (from != LUSTRE_SP_ANY && r->sr_from != LUSTRE_SP_ANY && - from != r->sr_from) - continue; - - if (to != LUSTRE_SP_ANY && r->sr_to != LUSTRE_SP_ANY && - to != r->sr_to) - continue; - - *sf = r->sr_flvr; - return 1; - } - - return 0; -} - -/********************************** - * sptlrpc configuration support * - **********************************/ - -struct sptlrpc_conf_tgt { - struct list_head sct_list; - char sct_name[MAX_OBD_NAME]; - struct sptlrpc_rule_set sct_rset; -}; - -struct sptlrpc_conf { - struct list_head sc_list; - char sc_fsname[MTI_NAME_MAXLEN]; - unsigned int sc_modified; /* modified during updating */ - unsigned int sc_updated:1, /* updated copy from MGS */ - sc_local:1; /* local copy from target */ - struct sptlrpc_rule_set sc_rset; /* fs general rules */ - struct list_head sc_tgts; /* target-specific rules */ -}; - -static struct mutex sptlrpc_conf_lock; -static LIST_HEAD(sptlrpc_confs); - -static inline int is_hex(char c) -{ - return ((c >= '0' && c <= '9') || - (c >= 'a' && c <= 'f')); -} - -static void target2fsname(const char *tgt, char *fsname, int buflen) -{ - const char *ptr; - int len; - - ptr = strrchr(tgt, '-'); - if (ptr) { - if ((strncmp(ptr, "-MDT", 4) != 0 && - strncmp(ptr, "-OST", 4) != 0) || - !is_hex(ptr[4]) || !is_hex(ptr[5]) || - !is_hex(ptr[6]) || !is_hex(ptr[7])) - ptr = NULL; - } - - /* if we didn't find the pattern, treat the whole string as fsname */ - if (!ptr) - len = strlen(tgt); - else - len = ptr - tgt; - - len = min(len, buflen - 1); - memcpy(fsname, tgt, len); - fsname[len] = '\0'; -} - -static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf) -{ - struct sptlrpc_conf_tgt *conf_tgt, *conf_tgt_next; - - sptlrpc_rule_set_free(&conf->sc_rset); - - list_for_each_entry_safe(conf_tgt, conf_tgt_next, - &conf->sc_tgts, sct_list) { - sptlrpc_rule_set_free(&conf_tgt->sct_rset); - list_del(&conf_tgt->sct_list); - kfree(conf_tgt); - } - LASSERT(list_empty(&conf->sc_tgts)); - - conf->sc_updated = 0; - conf->sc_local = 0; -} - -static void sptlrpc_conf_free(struct sptlrpc_conf *conf) -{ - CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname); - - sptlrpc_conf_free_rsets(conf); - list_del(&conf->sc_list); - kfree(conf); -} - -static -struct sptlrpc_conf_tgt *sptlrpc_conf_get_tgt(struct sptlrpc_conf *conf, - const char *name, - int create) -{ - struct sptlrpc_conf_tgt *conf_tgt; - - list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) { - if (strcmp(conf_tgt->sct_name, name) == 0) - return conf_tgt; - } - - if (!create) - return NULL; - - conf_tgt = kzalloc(sizeof(*conf_tgt), GFP_NOFS); - if (conf_tgt) { - strlcpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name)); - sptlrpc_rule_set_init(&conf_tgt->sct_rset); - list_add(&conf_tgt->sct_list, &conf->sc_tgts); - } - - return conf_tgt; -} - -static -struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, - int create) -{ - struct sptlrpc_conf *conf; - size_t len; - - list_for_each_entry(conf, &sptlrpc_confs, sc_list) { - if (strcmp(conf->sc_fsname, fsname) == 0) - return conf; - } - - if (!create) - return NULL; - - conf = kzalloc(sizeof(*conf), GFP_NOFS); - if (!conf) - return NULL; - - len = strlcpy(conf->sc_fsname, fsname, sizeof(conf->sc_fsname)); - if (len >= sizeof(conf->sc_fsname)) { - kfree(conf); - return NULL; - } - sptlrpc_rule_set_init(&conf->sc_rset); - INIT_LIST_HEAD(&conf->sc_tgts); - list_add(&conf->sc_list, &sptlrpc_confs); - - CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname); - return conf; -} - -/** - * caller must hold conf_lock already. - */ -static int sptlrpc_conf_merge_rule(struct sptlrpc_conf *conf, - const char *target, - struct sptlrpc_rule *rule) -{ - struct sptlrpc_conf_tgt *conf_tgt; - struct sptlrpc_rule_set *rule_set; - - /* fsname == target means general rules for the whole fs */ - if (strcmp(conf->sc_fsname, target) == 0) { - rule_set = &conf->sc_rset; - } else { - conf_tgt = sptlrpc_conf_get_tgt(conf, target, 1); - if (conf_tgt) { - rule_set = &conf_tgt->sct_rset; - } else { - CERROR("out of memory, can't merge rule!\n"); - return -ENOMEM; - } - } - - return sptlrpc_rule_set_merge(rule_set, rule); -} - -/** - * process one LCFG_SPTLRPC_CONF record. if \a conf is NULL, we - * find one through the target name in the record inside conf_lock; - * otherwise means caller already hold conf_lock. - */ -static int __sptlrpc_process_config(struct lustre_cfg *lcfg, - struct sptlrpc_conf *conf) -{ - char *target, *param; - char fsname[MTI_NAME_MAXLEN]; - struct sptlrpc_rule rule; - int rc; - - target = lustre_cfg_string(lcfg, 1); - if (!target) { - CERROR("missing target name\n"); - return -EINVAL; - } - - param = lustre_cfg_string(lcfg, 2); - if (!param) { - CERROR("missing parameter\n"); - return -EINVAL; - } - - CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param); - - /* parse rule to make sure the format is correct */ - if (strncmp(param, PARAM_SRPC_FLVR, sizeof(PARAM_SRPC_FLVR) - 1) != 0) { - CERROR("Invalid sptlrpc parameter: %s\n", param); - return -EINVAL; - } - param += sizeof(PARAM_SRPC_FLVR) - 1; - - rc = sptlrpc_parse_rule(param, &rule); - if (rc) - return -EINVAL; - - if (!conf) { - target2fsname(target, fsname, sizeof(fsname)); - - mutex_lock(&sptlrpc_conf_lock); - conf = sptlrpc_conf_get(fsname, 0); - if (!conf) { - CERROR("can't find conf\n"); - rc = -ENOMEM; - } else { - rc = sptlrpc_conf_merge_rule(conf, target, &rule); - } - mutex_unlock(&sptlrpc_conf_lock); - } else { - LASSERT(mutex_is_locked(&sptlrpc_conf_lock)); - rc = sptlrpc_conf_merge_rule(conf, target, &rule); - } - - if (rc == 0) - conf->sc_modified++; - - return rc; -} - -int sptlrpc_process_config(struct lustre_cfg *lcfg) -{ - return __sptlrpc_process_config(lcfg, NULL); -} -EXPORT_SYMBOL(sptlrpc_process_config); - -static int logname2fsname(const char *logname, char *buf, int buflen) -{ - char *ptr; - int len; - - ptr = strrchr(logname, '-'); - if (!ptr || strcmp(ptr, "-sptlrpc")) { - CERROR("%s is not a sptlrpc config log\n", logname); - return -EINVAL; - } - - len = min((int)(ptr - logname), buflen - 1); - - memcpy(buf, logname, len); - buf[len] = '\0'; - return 0; -} - -void sptlrpc_conf_log_update_begin(const char *logname) -{ - struct sptlrpc_conf *conf; - char fsname[16]; - - if (logname2fsname(logname, fsname, sizeof(fsname))) - return; - - mutex_lock(&sptlrpc_conf_lock); - - conf = sptlrpc_conf_get(fsname, 0); - if (conf) { - if (conf->sc_local) { - LASSERT(conf->sc_updated == 0); - sptlrpc_conf_free_rsets(conf); - } - conf->sc_modified = 0; - } - - mutex_unlock(&sptlrpc_conf_lock); -} -EXPORT_SYMBOL(sptlrpc_conf_log_update_begin); - -/** - * mark a config log has been updated - */ -void sptlrpc_conf_log_update_end(const char *logname) -{ - struct sptlrpc_conf *conf; - char fsname[16]; - - if (logname2fsname(logname, fsname, sizeof(fsname))) - return; - - mutex_lock(&sptlrpc_conf_lock); - - conf = sptlrpc_conf_get(fsname, 0); - if (conf) { - /* - * if original state is not updated, make sure the - * modified counter > 0 to enforce updating local copy. - */ - if (conf->sc_updated == 0) - conf->sc_modified++; - - conf->sc_updated = 1; - } - - mutex_unlock(&sptlrpc_conf_lock); -} -EXPORT_SYMBOL(sptlrpc_conf_log_update_end); - -void sptlrpc_conf_log_start(const char *logname) -{ - char fsname[16]; - - if (logname2fsname(logname, fsname, sizeof(fsname))) - return; - - mutex_lock(&sptlrpc_conf_lock); - sptlrpc_conf_get(fsname, 1); - mutex_unlock(&sptlrpc_conf_lock); -} -EXPORT_SYMBOL(sptlrpc_conf_log_start); - -void sptlrpc_conf_log_stop(const char *logname) -{ - struct sptlrpc_conf *conf; - char fsname[16]; - - if (logname2fsname(logname, fsname, sizeof(fsname))) - return; - - mutex_lock(&sptlrpc_conf_lock); - conf = sptlrpc_conf_get(fsname, 0); - if (conf) - sptlrpc_conf_free(conf); - mutex_unlock(&sptlrpc_conf_lock); -} -EXPORT_SYMBOL(sptlrpc_conf_log_stop); - -static inline void flavor_set_flags(struct sptlrpc_flavor *sf, - enum lustre_sec_part from, - enum lustre_sec_part to, - unsigned int fl_udesc) -{ - /* - * null flavor doesn't need to set any flavor, and in fact - * we'd better not do that because everybody share a single sec. - */ - if (sf->sf_rpc == SPTLRPC_FLVR_NULL) - return; - - if (from == LUSTRE_SP_MDT) { - /* MDT->MDT; MDT->OST */ - sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY; - } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_OST) { - /* CLI->OST */ - sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_BULK; - } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_MDT) { - /* CLI->MDT */ - if (fl_udesc && sf->sf_rpc != SPTLRPC_FLVR_NULL) - sf->sf_flags |= PTLRPC_SEC_FL_UDESC; - } -} - -void sptlrpc_conf_choose_flavor(enum lustre_sec_part from, - enum lustre_sec_part to, - struct obd_uuid *target, - lnet_nid_t nid, - struct sptlrpc_flavor *sf) -{ - struct sptlrpc_conf *conf; - struct sptlrpc_conf_tgt *conf_tgt; - char name[MTI_NAME_MAXLEN]; - int len, rc = 0; - - target2fsname(target->uuid, name, sizeof(name)); - - mutex_lock(&sptlrpc_conf_lock); - - conf = sptlrpc_conf_get(name, 0); - if (!conf) - goto out; - - /* convert uuid name (supposed end with _UUID) to target name */ - len = strlen(target->uuid); - LASSERT(len > 5); - memcpy(name, target->uuid, len - 5); - name[len - 5] = '\0'; - - conf_tgt = sptlrpc_conf_get_tgt(conf, name, 0); - if (conf_tgt) { - rc = sptlrpc_rule_set_choose(&conf_tgt->sct_rset, - from, to, nid, sf); - if (rc) - goto out; - } - - rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf); -out: - mutex_unlock(&sptlrpc_conf_lock); - - if (rc == 0) - get_default_flavor(sf); - - flavor_set_flags(sf, from, to, 1); -} - -#define SEC_ADAPT_DELAY (10) - -/** - * called by client devices, notify the sptlrpc config has changed and - * do import_sec_adapt later. - */ -void sptlrpc_conf_client_adapt(struct obd_device *obd) -{ - struct obd_import *imp; - - LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 || - strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0); - CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid); - - /* serialize with connect/disconnect import */ - down_read_nested(&obd->u.cli.cl_sem, OBD_CLI_SEM_MDCOSC); - - imp = obd->u.cli.cl_import; - if (imp) { - spin_lock(&imp->imp_lock); - if (imp->imp_sec) - imp->imp_sec_expire = ktime_get_real_seconds() + - SEC_ADAPT_DELAY; - spin_unlock(&imp->imp_lock); - } - - up_read(&obd->u.cli.cl_sem); -} -EXPORT_SYMBOL(sptlrpc_conf_client_adapt); - -int sptlrpc_conf_init(void) -{ - mutex_init(&sptlrpc_conf_lock); - return 0; -} - -void sptlrpc_conf_fini(void) -{ - struct sptlrpc_conf *conf, *conf_next; - - mutex_lock(&sptlrpc_conf_lock); - list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) { - sptlrpc_conf_free(conf); - } - LASSERT(list_empty(&sptlrpc_confs)); - mutex_unlock(&sptlrpc_conf_lock); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c deleted file mode 100644 index 2c8bad7b7877..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/sec_gc.c - * - * Author: Eric Mei - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include - -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -#define SEC_GC_INTERVAL (30 * 60) - -static struct mutex sec_gc_mutex; -static LIST_HEAD(sec_gc_list); -static spinlock_t sec_gc_list_lock; - -static LIST_HEAD(sec_gc_ctx_list); -static spinlock_t sec_gc_ctx_list_lock; - -static atomic_t sec_gc_wait_del = ATOMIC_INIT(0); - -void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec) -{ - LASSERT(sec->ps_policy->sp_cops->gc_ctx); - LASSERT(sec->ps_gc_interval > 0); - LASSERT(list_empty(&sec->ps_gc_list)); - - sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval; - - spin_lock(&sec_gc_list_lock); - list_add_tail(&sec->ps_gc_list, &sec_gc_list); - spin_unlock(&sec_gc_list_lock); - - CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name); -} - -void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec) -{ - if (list_empty(&sec->ps_gc_list)) - return; - - might_sleep(); - - /* signal before list_del to make iteration in gc thread safe */ - atomic_inc(&sec_gc_wait_del); - - spin_lock(&sec_gc_list_lock); - list_del_init(&sec->ps_gc_list); - spin_unlock(&sec_gc_list_lock); - - /* barrier */ - mutex_lock(&sec_gc_mutex); - mutex_unlock(&sec_gc_mutex); - - atomic_dec(&sec_gc_wait_del); - - CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name); -} - -static void sec_process_ctx_list(void) -{ - struct ptlrpc_cli_ctx *ctx; - - spin_lock(&sec_gc_ctx_list_lock); - - while (!list_empty(&sec_gc_ctx_list)) { - ctx = list_entry(sec_gc_ctx_list.next, - struct ptlrpc_cli_ctx, cc_gc_chain); - list_del_init(&ctx->cc_gc_chain); - spin_unlock(&sec_gc_ctx_list_lock); - - LASSERT(ctx->cc_sec); - LASSERT(atomic_read(&ctx->cc_refcount) == 1); - CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n", - ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); - sptlrpc_cli_ctx_put(ctx, 1); - - spin_lock(&sec_gc_ctx_list_lock); - } - - spin_unlock(&sec_gc_ctx_list_lock); -} - -static void sec_do_gc(struct ptlrpc_sec *sec) -{ - LASSERT(sec->ps_policy->sp_cops->gc_ctx); - - if (unlikely(sec->ps_gc_next == 0)) { - CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n", - sec, sec->ps_policy->sp_name); - return; - } - - CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name); - - if (sec->ps_gc_next > ktime_get_real_seconds()) - return; - - sec->ps_policy->sp_cops->gc_ctx(sec); - sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval; -} - -static void sec_gc_main(struct work_struct *ws); -static DECLARE_DELAYED_WORK(sec_gc_work, sec_gc_main); - -static void sec_gc_main(struct work_struct *ws) -{ - struct ptlrpc_sec *sec; - - sec_process_ctx_list(); -again: - /* go through sec list do gc. - * FIXME here we iterate through the whole list each time which - * is not optimal. we perhaps want to use balanced binary tree - * to trace each sec as order of expiry time. - * another issue here is we wakeup as fixed interval instead of - * according to each sec's expiry time - */ - mutex_lock(&sec_gc_mutex); - list_for_each_entry(sec, &sec_gc_list, ps_gc_list) { - /* if someone is waiting to be deleted, let it - * proceed as soon as possible. - */ - if (atomic_read(&sec_gc_wait_del)) { - CDEBUG(D_SEC, "deletion pending, start over\n"); - mutex_unlock(&sec_gc_mutex); - goto again; - } - - sec_do_gc(sec); - } - mutex_unlock(&sec_gc_mutex); - - /* check ctx list again before sleep */ - sec_process_ctx_list(); - schedule_delayed_work(&sec_gc_work, SEC_GC_INTERVAL * HZ); -} - -int sptlrpc_gc_init(void) -{ - mutex_init(&sec_gc_mutex); - spin_lock_init(&sec_gc_list_lock); - spin_lock_init(&sec_gc_ctx_list_lock); - - schedule_delayed_work(&sec_gc_work, 0); - return 0; -} - -void sptlrpc_gc_fini(void) -{ - cancel_delayed_work_sync(&sec_gc_work); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c deleted file mode 100644 index 2bb75ebd5d98..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/sec_lproc.c - * - * Author: Eric Mei - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -static char *sec_flags2str(unsigned long flags, char *buf, int bufsize) -{ - buf[0] = '\0'; - - if (flags & PTLRPC_SEC_FL_REVERSE) - strlcat(buf, "reverse,", bufsize); - if (flags & PTLRPC_SEC_FL_ROOTONLY) - strlcat(buf, "rootonly,", bufsize); - if (flags & PTLRPC_SEC_FL_UDESC) - strlcat(buf, "udesc,", bufsize); - if (flags & PTLRPC_SEC_FL_BULK) - strlcat(buf, "bulk,", bufsize); - if (buf[0] == '\0') - strlcat(buf, "-,", bufsize); - - return buf; -} - -static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v) -{ - struct obd_device *dev = seq->private; - struct client_obd *cli = &dev->u.cli; - struct ptlrpc_sec *sec = NULL; - char str[32]; - - LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 || - strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 || - strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0); - - if (cli->cl_import) - sec = sptlrpc_import_sec_ref(cli->cl_import); - if (!sec) - goto out; - - sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)); - - seq_printf(seq, "rpc flavor: %s\n", - sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc)); - seq_printf(seq, "bulk flavor: %s\n", - sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str))); - seq_printf(seq, "flags: %s\n", - sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str))); - seq_printf(seq, "id: %d\n", sec->ps_id); - seq_printf(seq, "refcount: %d\n", - atomic_read(&sec->ps_refcount)); - seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx)); - seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval); - seq_printf(seq, "gc next %lld\n", - sec->ps_gc_interval ? - (s64)(sec->ps_gc_next - ktime_get_real_seconds()) : 0ll); - - sptlrpc_sec_put(sec); -out: - return 0; -} - -LPROC_SEQ_FOPS_RO(sptlrpc_info_lprocfs); - -static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v) -{ - struct obd_device *dev = seq->private; - struct client_obd *cli = &dev->u.cli; - struct ptlrpc_sec *sec = NULL; - - LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 || - strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 || - strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0); - - if (cli->cl_import) - sec = sptlrpc_import_sec_ref(cli->cl_import); - if (!sec) - goto out; - - if (sec->ps_policy->sp_cops->display) - sec->ps_policy->sp_cops->display(sec, seq); - - sptlrpc_sec_put(sec); -out: - return 0; -} - -LPROC_SEQ_FOPS_RO(sptlrpc_ctxs_lprocfs); - -int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev) -{ - if (strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) != 0 && - strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) != 0 && - strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) != 0) { - CERROR("can't register lproc for obd type %s\n", - dev->obd_type->typ_name); - return -EINVAL; - } - - debugfs_create_file("srpc_info", 0444, dev->obd_debugfs_entry, dev, - &sptlrpc_info_lprocfs_fops); - debugfs_create_file("srpc_contexts", 0444, dev->obd_debugfs_entry, dev, - &sptlrpc_ctxs_lprocfs_fops); - - return 0; -} -EXPORT_SYMBOL(sptlrpc_lprocfs_cliobd_attach); - -LPROC_SEQ_FOPS_RO(sptlrpc_proc_enc_pool); -static struct lprocfs_vars sptlrpc_lprocfs_vars[] = { - { "encrypt_page_pools", &sptlrpc_proc_enc_pool_fops }, - { NULL } -}; - -static struct dentry *sptlrpc_debugfs_dir; - -void sptlrpc_lproc_init(void) -{ - sptlrpc_debugfs_dir = debugfs_create_dir("sptlrpc", debugfs_lustre_root); - ldebugfs_add_vars(sptlrpc_debugfs_dir, sptlrpc_lprocfs_vars, NULL); -} - -void sptlrpc_lproc_fini(void) -{ - debugfs_remove_recursive(sptlrpc_debugfs_dir); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c deleted file mode 100644 index ecc387d1b9b4..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c +++ /dev/null @@ -1,459 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/sec_null.c - * - * Author: Eric Mei - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include -#include -#include -#include -#include - -#include "ptlrpc_internal.h" - -static struct ptlrpc_sec_policy null_policy; -static struct ptlrpc_sec null_sec; -static struct ptlrpc_cli_ctx null_cli_ctx; -static struct ptlrpc_svc_ctx null_svc_ctx; - -/* - * we can temporarily use the topmost 8-bits of lm_secflvr to identify - * the source sec part. - */ -static inline -void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp) -{ - msg->lm_secflvr |= (((__u32)sp) & 0xFF) << 24; -} - -static inline -enum lustre_sec_part null_decode_sec_part(struct lustre_msg *msg) -{ - return (msg->lm_secflvr >> 24) & 0xFF; -} - -static int null_ctx_refresh(struct ptlrpc_cli_ctx *ctx) -{ - /* should never reach here */ - LBUG(); - return 0; -} - -static -int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) -{ - req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL; - - if (!req->rq_import->imp_dlm_fake) { - struct obd_device *obd = req->rq_import->imp_obd; - - null_encode_sec_part(req->rq_reqbuf, - obd->u.cli.cl_sp_me); - } - req->rq_reqdata_len = req->rq_reqlen; - return 0; -} - -static -int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) -{ - __u32 cksums, cksumc; - - LASSERT(req->rq_repdata); - - req->rq_repmsg = req->rq_repdata; - req->rq_replen = req->rq_repdata_len; - - if (req->rq_early) { - cksums = lustre_msg_get_cksum(req->rq_repdata); - cksumc = lustre_msg_calc_cksum(req->rq_repmsg); - if (cksumc != cksums) { - CDEBUG(D_SEC, - "early reply checksum mismatch: %08x != %08x\n", - cksumc, cksums); - return -EINVAL; - } - } - - return 0; -} - -static -struct ptlrpc_sec *null_create_sec(struct obd_import *imp, - struct ptlrpc_svc_ctx *svc_ctx, - struct sptlrpc_flavor *sf) -{ - LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL); - - /* general layer has take a module reference for us, because we never - * really destroy the sec, simply release the reference here. - */ - sptlrpc_policy_put(&null_policy); - return &null_sec; -} - -static -void null_destroy_sec(struct ptlrpc_sec *sec) -{ - LASSERT(sec == &null_sec); -} - -static -struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, int remove_dead) -{ - atomic_inc(&null_cli_ctx.cc_refcount); - return &null_cli_ctx; -} - -static -int null_flush_ctx_cache(struct ptlrpc_sec *sec, - uid_t uid, - int grace, int force) -{ - return 0; -} - -static -int null_alloc_reqbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int msgsize) -{ - if (!req->rq_reqbuf) { - int alloc_size = size_roundup_power2(msgsize); - - LASSERT(!req->rq_pool); - req->rq_reqbuf = kvzalloc(alloc_size, GFP_NOFS); - if (!req->rq_reqbuf) - return -ENOMEM; - - req->rq_reqbuf_len = alloc_size; - } else { - LASSERT(req->rq_pool); - LASSERT(req->rq_reqbuf_len >= msgsize); - memset(req->rq_reqbuf, 0, msgsize); - } - - req->rq_reqmsg = req->rq_reqbuf; - return 0; -} - -static -void null_free_reqbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req) -{ - if (!req->rq_pool) { - LASSERTF(req->rq_reqmsg == req->rq_reqbuf, - "req %p: reqmsg %p is not reqbuf %p in null sec\n", - req, req->rq_reqmsg, req->rq_reqbuf); - LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen, - "req %p: reqlen %d should smaller than buflen %d\n", - req, req->rq_reqlen, req->rq_reqbuf_len); - - kvfree(req->rq_reqbuf); - req->rq_reqbuf = NULL; - req->rq_reqbuf_len = 0; - } -} - -static -int null_alloc_repbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int msgsize) -{ - /* add space for early replied */ - msgsize += lustre_msg_early_size(); - - msgsize = size_roundup_power2(msgsize); - - req->rq_repbuf = kvzalloc(msgsize, GFP_NOFS); - if (!req->rq_repbuf) - return -ENOMEM; - - req->rq_repbuf_len = msgsize; - return 0; -} - -static -void null_free_repbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req) -{ - LASSERT(req->rq_repbuf); - - kvfree(req->rq_repbuf); - req->rq_repbuf = NULL; - req->rq_repbuf_len = 0; -} - -static -int null_enlarge_reqbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize) -{ - struct lustre_msg *newbuf; - struct lustre_msg *oldbuf = req->rq_reqmsg; - int oldsize, newmsg_size, alloc_size; - - LASSERT(req->rq_reqbuf); - LASSERT(req->rq_reqbuf == req->rq_reqmsg); - LASSERT(req->rq_reqbuf_len >= req->rq_reqlen); - LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf)); - - /* compute new message size */ - oldsize = req->rq_reqbuf->lm_buflens[segment]; - req->rq_reqbuf->lm_buflens[segment] = newsize; - newmsg_size = lustre_packed_msg_size(oldbuf); - req->rq_reqbuf->lm_buflens[segment] = oldsize; - - /* request from pool should always have enough buffer */ - LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size); - - if (req->rq_reqbuf_len < newmsg_size) { - alloc_size = size_roundup_power2(newmsg_size); - - newbuf = kvzalloc(alloc_size, GFP_NOFS); - if (!newbuf) - return -ENOMEM; - - /* Must lock this, so that otherwise unprotected change of - * rq_reqmsg is not racing with parallel processing of - * imp_replay_list traversing threads. See LU-3333 - * This is a bandaid at best, we really need to deal with this - * in request enlarging code before unpacking that's already - * there - */ - if (req->rq_import) - spin_lock(&req->rq_import->imp_lock); - memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); - - kvfree(req->rq_reqbuf); - req->rq_reqbuf = newbuf; - req->rq_reqmsg = newbuf; - req->rq_reqbuf_len = alloc_size; - - if (req->rq_import) - spin_unlock(&req->rq_import->imp_lock); - } - - _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); - req->rq_reqlen = newmsg_size; - - return 0; -} - -static struct ptlrpc_svc_ctx null_svc_ctx = { - .sc_refcount = ATOMIC_INIT(1), - .sc_policy = &null_policy, -}; - -static -int null_accept(struct ptlrpc_request *req) -{ - LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == - SPTLRPC_POLICY_NULL); - - if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) { - CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc); - return SECSVC_DROP; - } - - req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf); - - req->rq_reqmsg = req->rq_reqbuf; - req->rq_reqlen = req->rq_reqdata_len; - - req->rq_svc_ctx = &null_svc_ctx; - atomic_inc(&req->rq_svc_ctx->sc_refcount); - - return SECSVC_OK; -} - -static -int null_alloc_rs(struct ptlrpc_request *req, int msgsize) -{ - struct ptlrpc_reply_state *rs; - int rs_size = sizeof(*rs) + msgsize; - - LASSERT(msgsize % 8 == 0); - - rs = req->rq_reply_state; - - if (rs) { - /* pre-allocated */ - LASSERT(rs->rs_size >= rs_size); - } else { - rs = kvzalloc(rs_size, GFP_NOFS); - if (!rs) - return -ENOMEM; - - rs->rs_size = rs_size; - } - - rs->rs_svc_ctx = req->rq_svc_ctx; - atomic_inc(&req->rq_svc_ctx->sc_refcount); - - rs->rs_repbuf = (struct lustre_msg *)(rs + 1); - rs->rs_repbuf_len = rs_size - sizeof(*rs); - rs->rs_msg = rs->rs_repbuf; - - req->rq_reply_state = rs; - return 0; -} - -static -void null_free_rs(struct ptlrpc_reply_state *rs) -{ - LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1); - atomic_dec(&rs->rs_svc_ctx->sc_refcount); - - if (!rs->rs_prealloc) - kvfree(rs); -} - -static -int null_authorize(struct ptlrpc_request *req) -{ - struct ptlrpc_reply_state *rs = req->rq_reply_state; - - LASSERT(rs); - - rs->rs_repbuf->lm_secflvr = SPTLRPC_FLVR_NULL; - rs->rs_repdata_len = req->rq_replen; - - if (likely(req->rq_packed_final)) { - if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) - req->rq_reply_off = lustre_msg_early_size(); - else - req->rq_reply_off = 0; - } else { - __u32 cksum; - - cksum = lustre_msg_calc_cksum(rs->rs_repbuf); - lustre_msg_set_cksum(rs->rs_repbuf, cksum); - req->rq_reply_off = 0; - } - - return 0; -} - -static struct ptlrpc_ctx_ops null_ctx_ops = { - .refresh = null_ctx_refresh, - .sign = null_ctx_sign, - .verify = null_ctx_verify, -}; - -static struct ptlrpc_sec_cops null_sec_cops = { - .create_sec = null_create_sec, - .destroy_sec = null_destroy_sec, - .lookup_ctx = null_lookup_ctx, - .flush_ctx_cache = null_flush_ctx_cache, - .alloc_reqbuf = null_alloc_reqbuf, - .alloc_repbuf = null_alloc_repbuf, - .free_reqbuf = null_free_reqbuf, - .free_repbuf = null_free_repbuf, - .enlarge_reqbuf = null_enlarge_reqbuf, -}; - -static struct ptlrpc_sec_sops null_sec_sops = { - .accept = null_accept, - .alloc_rs = null_alloc_rs, - .authorize = null_authorize, - .free_rs = null_free_rs, -}; - -static struct ptlrpc_sec_policy null_policy = { - .sp_owner = THIS_MODULE, - .sp_name = "sec.null", - .sp_policy = SPTLRPC_POLICY_NULL, - .sp_cops = &null_sec_cops, - .sp_sops = &null_sec_sops, -}; - -static void null_init_internal(void) -{ - static HLIST_HEAD(__list); - - null_sec.ps_policy = &null_policy; - atomic_set(&null_sec.ps_refcount, 1); /* always busy */ - null_sec.ps_id = -1; - null_sec.ps_import = NULL; - null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL; - null_sec.ps_flvr.sf_flags = 0; - null_sec.ps_part = LUSTRE_SP_ANY; - null_sec.ps_dying = 0; - spin_lock_init(&null_sec.ps_lock); - atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */ - INIT_LIST_HEAD(&null_sec.ps_gc_list); - null_sec.ps_gc_interval = 0; - null_sec.ps_gc_next = 0; - - hlist_add_head(&null_cli_ctx.cc_cache, &__list); - atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */ - null_cli_ctx.cc_sec = &null_sec; - null_cli_ctx.cc_ops = &null_ctx_ops; - null_cli_ctx.cc_expire = 0; - null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL | - PTLRPC_CTX_UPTODATE; - null_cli_ctx.cc_vcred.vc_uid = 0; - spin_lock_init(&null_cli_ctx.cc_lock); - INIT_LIST_HEAD(&null_cli_ctx.cc_req_list); - INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain); -} - -int sptlrpc_null_init(void) -{ - int rc; - - null_init_internal(); - - rc = sptlrpc_register_policy(&null_policy); - if (rc) - CERROR("failed to register %s: %d\n", null_policy.sp_name, rc); - - return rc; -} - -void sptlrpc_null_fini(void) -{ - int rc; - - rc = sptlrpc_unregister_policy(&null_policy); - if (rc) - CERROR("failed to unregister %s: %d\n", - null_policy.sp_name, rc); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c deleted file mode 100644 index ec3d9af76b17..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c +++ /dev/null @@ -1,1023 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * lustre/ptlrpc/sec_plain.c - * - * Author: Eric Mei - */ - -#define DEBUG_SUBSYSTEM S_SEC - -#include -#include -#include -#include -#include -#include "ptlrpc_internal.h" - -struct plain_sec { - struct ptlrpc_sec pls_base; - rwlock_t pls_lock; - struct ptlrpc_cli_ctx *pls_ctx; -}; - -static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec) -{ - return container_of(sec, struct plain_sec, pls_base); -} - -static struct ptlrpc_sec_policy plain_policy; -static struct ptlrpc_ctx_ops plain_ctx_ops; -static struct ptlrpc_svc_ctx plain_svc_ctx; - -static unsigned int plain_at_offset; - -/* - * for simplicity, plain policy rpc use fixed layout. - */ -#define PLAIN_PACK_SEGMENTS (4) - -#define PLAIN_PACK_HDR_OFF (0) -#define PLAIN_PACK_MSG_OFF (1) -#define PLAIN_PACK_USER_OFF (2) -#define PLAIN_PACK_BULK_OFF (3) - -#define PLAIN_FL_USER (0x01) -#define PLAIN_FL_BULK (0x02) - -struct plain_header { - __u8 ph_ver; /* 0 */ - __u8 ph_flags; - __u8 ph_sp; /* source */ - __u8 ph_bulk_hash_alg; /* complete flavor desc */ - __u8 ph_pad[4]; -}; - -struct plain_bulk_token { - __u8 pbt_hash[8]; -}; - -#define PLAIN_BSD_SIZE \ - (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token)) - -/**************************************** - * bulk checksum helpers * - ****************************************/ - -static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed) -{ - struct ptlrpc_bulk_sec_desc *bsd; - - if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed)) - return -EPROTO; - - bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE); - if (!bsd) { - CERROR("bulk sec desc has short size %d\n", - lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF)); - return -EPROTO; - } - - if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL && - bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) { - CERROR("invalid bulk svc %u\n", bsd->bsd_svc); - return -EPROTO; - } - - return 0; -} - -static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc, - __u8 hash_alg, - struct plain_bulk_token *token) -{ - if (hash_alg == BULK_HASH_ALG_NULL) - return 0; - - memset(token->pbt_hash, 0, sizeof(token->pbt_hash)); - return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash, - sizeof(token->pbt_hash)); -} - -static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc, - __u8 hash_alg, - struct plain_bulk_token *tokenr) -{ - struct plain_bulk_token tokenv; - int rc; - - if (hash_alg == BULK_HASH_ALG_NULL) - return 0; - - memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash)); - rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash, - sizeof(tokenv.pbt_hash)); - if (rc) - return rc; - - if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash))) - return -EACCES; - return 0; -} - -static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) -{ - char *ptr; - unsigned int off, i; - - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - - for (i = 0; i < desc->bd_iov_count; i++) { - if (!BD_GET_KIOV(desc, i).bv_len) - continue; - - ptr = kmap(BD_GET_KIOV(desc, i).bv_page); - off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK; - ptr[off] ^= 0x1; - kunmap(BD_GET_KIOV(desc, i).bv_page); - return; - } -} - -/**************************************** - * cli_ctx apis * - ****************************************/ - -static -int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx) -{ - /* should never reach here */ - LBUG(); - return 0; -} - -static -int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx) -{ - return 0; -} - -static -int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) -{ - struct lustre_msg *msg = req->rq_reqbuf; - struct plain_header *phdr; - - msg->lm_secflvr = req->rq_flvr.sf_rpc; - - phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0); - phdr->ph_ver = 0; - phdr->ph_flags = 0; - phdr->ph_sp = ctx->cc_sec->ps_part; - phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg; - - if (req->rq_pack_udesc) - phdr->ph_flags |= PLAIN_FL_USER; - if (req->rq_pack_bulk) - phdr->ph_flags |= PLAIN_FL_BULK; - - req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount, - msg->lm_buflens); - return 0; -} - -static -int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) -{ - struct lustre_msg *msg = req->rq_repdata; - struct plain_header *phdr; - __u32 cksum; - int swabbed; - - if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) { - CERROR("unexpected reply buf count %u\n", msg->lm_bufcount); - return -EPROTO; - } - - swabbed = ptlrpc_rep_need_swab(req); - - phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (!phdr) { - CERROR("missing plain header\n"); - return -EPROTO; - } - - if (phdr->ph_ver != 0) { - CERROR("Invalid header version\n"); - return -EPROTO; - } - - /* expect no user desc in reply */ - if (phdr->ph_flags & PLAIN_FL_USER) { - CERROR("Unexpected udesc flag in reply\n"); - return -EPROTO; - } - - if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) { - CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg, - req->rq_flvr.u_bulk.hash.hash_alg); - return -EPROTO; - } - - if (unlikely(req->rq_early)) { - unsigned int hsize = 4; - - cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, - lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, - 0), - lustre_msg_buflen(msg, - PLAIN_PACK_MSG_OFF), - NULL, 0, (unsigned char *)&cksum, - &hsize); - if (cksum != msg->lm_cksum) { - CDEBUG(D_SEC, - "early reply checksum mismatch: %08x != %08x\n", - cpu_to_le32(cksum), msg->lm_cksum); - return -EINVAL; - } - } else { - /* whether we sent with bulk or not, we expect the same - * in reply, except for early reply - */ - if (!req->rq_early && - !equi(req->rq_pack_bulk == 1, - phdr->ph_flags & PLAIN_FL_BULK)) { - CERROR("%s bulk checksum in reply\n", - req->rq_pack_bulk ? "Missing" : "Unexpected"); - return -EPROTO; - } - - if (phdr->ph_flags & PLAIN_FL_BULK) { - if (plain_unpack_bsd(msg, swabbed)) - return -EPROTO; - } - } - - req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0); - req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF); - return 0; -} - -static -int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc) -{ - struct ptlrpc_bulk_sec_desc *bsd; - struct plain_bulk_token *token; - int rc; - - LASSERT(req->rq_pack_bulk); - LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS); - - bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0); - token = (struct plain_bulk_token *)bsd->bsd_data; - - bsd->bsd_version = 0; - bsd->bsd_flags = 0; - bsd->bsd_type = SPTLRPC_BULK_DEFAULT; - bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc); - - if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL) - return 0; - - if (req->rq_bulk_read) - return 0; - - rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, - token); - if (rc) { - CERROR("bulk write: failed to compute checksum: %d\n", rc); - } else { - /* - * for sending we only compute the wrong checksum instead - * of corrupting the data so it is still correct on a redo - */ - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) && - req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL) - token->pbt_hash[0] ^= 0x1; - } - - return rc; -} - -static -int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc) -{ - struct ptlrpc_bulk_sec_desc *bsdv; - struct plain_bulk_token *tokenv; - int rc; - int i, nob; - - LASSERT(req->rq_pack_bulk); - LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS); - LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS); - - bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0); - tokenv = (struct plain_bulk_token *)bsdv->bsd_data; - - if (req->rq_bulk_write) { - if (bsdv->bsd_flags & BSD_FL_ERR) - return -EIO; - return 0; - } - - /* fix the actual data size */ - for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - struct bio_vec bv_desc = BD_GET_KIOV(desc, i); - - if (bv_desc.bv_len + nob > desc->bd_nob_transferred) - bv_desc.bv_len = desc->bd_nob_transferred - nob; - nob += bv_desc.bv_len; - } - - rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, - tokenv); - if (rc) - CERROR("bulk read: client verify failed: %d\n", rc); - - return rc; -} - -/**************************************** - * sec apis * - ****************************************/ - -static -struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec) -{ - struct ptlrpc_cli_ctx *ctx, *ctx_new; - - ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS); - - write_lock(&plsec->pls_lock); - - ctx = plsec->pls_ctx; - if (ctx) { - atomic_inc(&ctx->cc_refcount); - - kfree(ctx_new); - } else if (ctx_new) { - ctx = ctx_new; - - atomic_set(&ctx->cc_refcount, 1); /* for cache */ - ctx->cc_sec = &plsec->pls_base; - ctx->cc_ops = &plain_ctx_ops; - ctx->cc_expire = 0; - ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE; - ctx->cc_vcred.vc_uid = 0; - spin_lock_init(&ctx->cc_lock); - INIT_LIST_HEAD(&ctx->cc_req_list); - INIT_LIST_HEAD(&ctx->cc_gc_chain); - - plsec->pls_ctx = ctx; - atomic_inc(&plsec->pls_base.ps_nctx); - atomic_inc(&plsec->pls_base.ps_refcount); - - atomic_inc(&ctx->cc_refcount); /* for caller */ - } - - write_unlock(&plsec->pls_lock); - - return ctx; -} - -static -void plain_destroy_sec(struct ptlrpc_sec *sec) -{ - struct plain_sec *plsec = sec2plsec(sec); - - LASSERT(sec->ps_policy == &plain_policy); - LASSERT(sec->ps_import); - LASSERT(atomic_read(&sec->ps_refcount) == 0); - LASSERT(atomic_read(&sec->ps_nctx) == 0); - LASSERT(!plsec->pls_ctx); - - class_import_put(sec->ps_import); - - kfree(plsec); -} - -static -void plain_kill_sec(struct ptlrpc_sec *sec) -{ - sec->ps_dying = 1; -} - -static -struct ptlrpc_sec *plain_create_sec(struct obd_import *imp, - struct ptlrpc_svc_ctx *svc_ctx, - struct sptlrpc_flavor *sf) -{ - struct plain_sec *plsec; - struct ptlrpc_sec *sec; - struct ptlrpc_cli_ctx *ctx; - - LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN); - - plsec = kzalloc(sizeof(*plsec), GFP_NOFS); - if (!plsec) - return NULL; - - /* - * initialize plain_sec - */ - rwlock_init(&plsec->pls_lock); - plsec->pls_ctx = NULL; - - sec = &plsec->pls_base; - sec->ps_policy = &plain_policy; - atomic_set(&sec->ps_refcount, 0); - atomic_set(&sec->ps_nctx, 0); - sec->ps_id = sptlrpc_get_next_secid(); - sec->ps_import = class_import_get(imp); - sec->ps_flvr = *sf; - spin_lock_init(&sec->ps_lock); - INIT_LIST_HEAD(&sec->ps_gc_list); - sec->ps_gc_interval = 0; - sec->ps_gc_next = 0; - - /* install ctx immediately if this is a reverse sec */ - if (svc_ctx) { - ctx = plain_sec_install_ctx(plsec); - if (!ctx) { - plain_destroy_sec(sec); - return NULL; - } - sptlrpc_cli_ctx_put(ctx, 1); - } - - return sec; -} - -static -struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, int remove_dead) -{ - struct plain_sec *plsec = sec2plsec(sec); - struct ptlrpc_cli_ctx *ctx; - - read_lock(&plsec->pls_lock); - ctx = plsec->pls_ctx; - if (ctx) - atomic_inc(&ctx->cc_refcount); - read_unlock(&plsec->pls_lock); - - if (unlikely(!ctx)) - ctx = plain_sec_install_ctx(plsec); - - return ctx; -} - -static -void plain_release_ctx(struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx, int sync) -{ - LASSERT(atomic_read(&sec->ps_refcount) > 0); - LASSERT(atomic_read(&sec->ps_nctx) > 0); - LASSERT(atomic_read(&ctx->cc_refcount) == 0); - LASSERT(ctx->cc_sec == sec); - - kfree(ctx); - - atomic_dec(&sec->ps_nctx); - sptlrpc_sec_put(sec); -} - -static -int plain_flush_ctx_cache(struct ptlrpc_sec *sec, - uid_t uid, int grace, int force) -{ - struct plain_sec *plsec = sec2plsec(sec); - struct ptlrpc_cli_ctx *ctx; - - /* do nothing unless caller want to flush for 'all' */ - if (uid != -1) - return 0; - - write_lock(&plsec->pls_lock); - ctx = plsec->pls_ctx; - plsec->pls_ctx = NULL; - write_unlock(&plsec->pls_lock); - - if (ctx) - sptlrpc_cli_ctx_put(ctx, 1); - return 0; -} - -static -int plain_alloc_reqbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int msgsize) -{ - __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, }; - int alloc_len; - - buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header); - buflens[PLAIN_PACK_MSG_OFF] = msgsize; - - if (req->rq_pack_udesc) - buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size(); - - if (req->rq_pack_bulk) { - LASSERT(req->rq_bulk_read || req->rq_bulk_write); - buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE; - } - - alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens); - - if (!req->rq_reqbuf) { - LASSERT(!req->rq_pool); - - alloc_len = size_roundup_power2(alloc_len); - req->rq_reqbuf = kvzalloc(alloc_len, GFP_NOFS); - if (!req->rq_reqbuf) - return -ENOMEM; - - req->rq_reqbuf_len = alloc_len; - } else { - LASSERT(req->rq_pool); - LASSERT(req->rq_reqbuf_len >= alloc_len); - memset(req->rq_reqbuf, 0, alloc_len); - } - - lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL); - req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0); - - if (req->rq_pack_udesc) { - int rc = sptlrpc_pack_user_desc(req->rq_reqbuf, - PLAIN_PACK_USER_OFF); - if (rc < 0) - return rc; - } - - return 0; -} - -static -void plain_free_reqbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req) -{ - if (!req->rq_pool) { - kvfree(req->rq_reqbuf); - req->rq_reqbuf = NULL; - req->rq_reqbuf_len = 0; - } -} - -static -int plain_alloc_repbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int msgsize) -{ - __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, }; - int alloc_len; - - buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header); - buflens[PLAIN_PACK_MSG_OFF] = msgsize; - - if (req->rq_pack_bulk) { - LASSERT(req->rq_bulk_read || req->rq_bulk_write); - buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE; - } - - alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens); - - /* add space for early reply */ - alloc_len += plain_at_offset; - - alloc_len = size_roundup_power2(alloc_len); - - req->rq_repbuf = kvzalloc(alloc_len, GFP_NOFS); - if (!req->rq_repbuf) - return -ENOMEM; - - req->rq_repbuf_len = alloc_len; - return 0; -} - -static -void plain_free_repbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req) -{ - kvfree(req->rq_repbuf); - req->rq_repbuf = NULL; - req->rq_repbuf_len = 0; -} - -static -int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize) -{ - struct lustre_msg *newbuf; - int oldsize; - int newmsg_size, newbuf_size; - - LASSERT(req->rq_reqbuf); - LASSERT(req->rq_reqbuf_len >= req->rq_reqlen); - LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) == - req->rq_reqmsg); - - /* compute new embedded msg size. */ - oldsize = req->rq_reqmsg->lm_buflens[segment]; - req->rq_reqmsg->lm_buflens[segment] = newsize; - newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount, - req->rq_reqmsg->lm_buflens); - req->rq_reqmsg->lm_buflens[segment] = oldsize; - - /* compute new wrapper msg size. */ - oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF]; - req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size; - newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount, - req->rq_reqbuf->lm_buflens); - req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize; - - /* request from pool should always have enough buffer */ - LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size); - - if (req->rq_reqbuf_len < newbuf_size) { - newbuf_size = size_roundup_power2(newbuf_size); - - newbuf = kvzalloc(newbuf_size, GFP_NOFS); - if (!newbuf) - return -ENOMEM; - - /* Must lock this, so that otherwise unprotected change of - * rq_reqmsg is not racing with parallel processing of - * imp_replay_list traversing threads. See LU-3333 - * This is a bandaid at best, we really need to deal with this - * in request enlarging code before unpacking that's already - * there - */ - if (req->rq_import) - spin_lock(&req->rq_import->imp_lock); - - memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len); - - kvfree(req->rq_reqbuf); - req->rq_reqbuf = newbuf; - req->rq_reqbuf_len = newbuf_size; - req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, - PLAIN_PACK_MSG_OFF, 0); - - if (req->rq_import) - spin_unlock(&req->rq_import->imp_lock); - } - - _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, - newmsg_size); - _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); - - req->rq_reqlen = newmsg_size; - return 0; -} - -/**************************************** - * service apis * - ****************************************/ - -static struct ptlrpc_svc_ctx plain_svc_ctx = { - .sc_refcount = ATOMIC_INIT(1), - .sc_policy = &plain_policy, -}; - -static -int plain_accept(struct ptlrpc_request *req) -{ - struct lustre_msg *msg = req->rq_reqbuf; - struct plain_header *phdr; - int swabbed; - - LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == - SPTLRPC_POLICY_PLAIN); - - if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) != - SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) || - SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) != - SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) { - CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc); - return SECSVC_DROP; - } - - if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) { - CERROR("unexpected request buf count %u\n", msg->lm_bufcount); - return SECSVC_DROP; - } - - swabbed = ptlrpc_req_need_swab(req); - - phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (!phdr) { - CERROR("missing plain header\n"); - return -EPROTO; - } - - if (phdr->ph_ver != 0) { - CERROR("Invalid header version\n"); - return -EPROTO; - } - - if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) { - CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg); - return -EPROTO; - } - - req->rq_sp_from = phdr->ph_sp; - req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg; - - if (phdr->ph_flags & PLAIN_FL_USER) { - if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF, - swabbed)) { - CERROR("Mal-formed user descriptor\n"); - return SECSVC_DROP; - } - - req->rq_pack_udesc = 1; - req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0); - } - - if (phdr->ph_flags & PLAIN_FL_BULK) { - if (plain_unpack_bsd(msg, swabbed)) - return SECSVC_DROP; - - req->rq_pack_bulk = 1; - } - - req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0); - req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF]; - - req->rq_svc_ctx = &plain_svc_ctx; - atomic_inc(&req->rq_svc_ctx->sc_refcount); - - return SECSVC_OK; -} - -static -int plain_alloc_rs(struct ptlrpc_request *req, int msgsize) -{ - struct ptlrpc_reply_state *rs; - __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, }; - int rs_size = sizeof(*rs); - - LASSERT(msgsize % 8 == 0); - - buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header); - buflens[PLAIN_PACK_MSG_OFF] = msgsize; - - if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) - buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE; - - rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens); - - rs = req->rq_reply_state; - - if (rs) { - /* pre-allocated */ - LASSERT(rs->rs_size >= rs_size); - } else { - rs = kvzalloc(rs_size, GFP_NOFS); - if (!rs) - return -ENOMEM; - - rs->rs_size = rs_size; - } - - rs->rs_svc_ctx = req->rq_svc_ctx; - atomic_inc(&req->rq_svc_ctx->sc_refcount); - rs->rs_repbuf = (struct lustre_msg *)(rs + 1); - rs->rs_repbuf_len = rs_size - sizeof(*rs); - - lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL); - rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0); - - req->rq_reply_state = rs; - return 0; -} - -static -void plain_free_rs(struct ptlrpc_reply_state *rs) -{ - LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1); - atomic_dec(&rs->rs_svc_ctx->sc_refcount); - - if (!rs->rs_prealloc) - kvfree(rs); -} - -static -int plain_authorize(struct ptlrpc_request *req) -{ - struct ptlrpc_reply_state *rs = req->rq_reply_state; - struct lustre_msg_v2 *msg = rs->rs_repbuf; - struct plain_header *phdr; - int len; - - LASSERT(rs); - LASSERT(msg); - - if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF]) - len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF, - req->rq_replen, 1); - else - len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens); - - msg->lm_secflvr = req->rq_flvr.sf_rpc; - - phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0); - phdr->ph_ver = 0; - phdr->ph_flags = 0; - phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg; - - if (req->rq_pack_bulk) - phdr->ph_flags |= PLAIN_FL_BULK; - - rs->rs_repdata_len = len; - - if (likely(req->rq_packed_final)) { - if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) - req->rq_reply_off = plain_at_offset; - else - req->rq_reply_off = 0; - } else { - unsigned int hsize = 4; - - cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, - lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, - 0), - lustre_msg_buflen(msg, - PLAIN_PACK_MSG_OFF), - NULL, 0, (unsigned char *)&msg->lm_cksum, - &hsize); - req->rq_reply_off = 0; - } - - return 0; -} - -static -int plain_svc_unwrap_bulk(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc) -{ - struct ptlrpc_reply_state *rs = req->rq_reply_state; - struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; - struct plain_bulk_token *tokenr; - int rc; - - LASSERT(req->rq_bulk_write); - LASSERT(req->rq_pack_bulk); - - bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0); - tokenr = (struct plain_bulk_token *)bsdr->bsd_data; - bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0); - - bsdv->bsd_version = 0; - bsdv->bsd_type = SPTLRPC_BULK_DEFAULT; - bsdv->bsd_svc = bsdr->bsd_svc; - bsdv->bsd_flags = 0; - - if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL) - return 0; - - rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, - tokenr); - if (rc) { - bsdv->bsd_flags |= BSD_FL_ERR; - CERROR("bulk write: server verify failed: %d\n", rc); - } - - return rc; -} - -static -int plain_svc_wrap_bulk(struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc) -{ - struct ptlrpc_reply_state *rs = req->rq_reply_state; - struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; - struct plain_bulk_token *tokenv; - int rc; - - LASSERT(req->rq_bulk_read); - LASSERT(req->rq_pack_bulk); - - bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0); - bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0); - tokenv = (struct plain_bulk_token *)bsdv->bsd_data; - - bsdv->bsd_version = 0; - bsdv->bsd_type = SPTLRPC_BULK_DEFAULT; - bsdv->bsd_svc = bsdr->bsd_svc; - bsdv->bsd_flags = 0; - - if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL) - return 0; - - rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, - tokenv); - if (rc) { - CERROR("bulk read: server failed to compute checksum: %d\n", - rc); - } else { - if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) - corrupt_bulk_data(desc); - } - - return rc; -} - -static struct ptlrpc_ctx_ops plain_ctx_ops = { - .refresh = plain_ctx_refresh, - .validate = plain_ctx_validate, - .sign = plain_ctx_sign, - .verify = plain_ctx_verify, - .wrap_bulk = plain_cli_wrap_bulk, - .unwrap_bulk = plain_cli_unwrap_bulk, -}; - -static struct ptlrpc_sec_cops plain_sec_cops = { - .create_sec = plain_create_sec, - .destroy_sec = plain_destroy_sec, - .kill_sec = plain_kill_sec, - .lookup_ctx = plain_lookup_ctx, - .release_ctx = plain_release_ctx, - .flush_ctx_cache = plain_flush_ctx_cache, - .alloc_reqbuf = plain_alloc_reqbuf, - .free_reqbuf = plain_free_reqbuf, - .alloc_repbuf = plain_alloc_repbuf, - .free_repbuf = plain_free_repbuf, - .enlarge_reqbuf = plain_enlarge_reqbuf, -}; - -static struct ptlrpc_sec_sops plain_sec_sops = { - .accept = plain_accept, - .alloc_rs = plain_alloc_rs, - .authorize = plain_authorize, - .free_rs = plain_free_rs, - .unwrap_bulk = plain_svc_unwrap_bulk, - .wrap_bulk = plain_svc_wrap_bulk, -}; - -static struct ptlrpc_sec_policy plain_policy = { - .sp_owner = THIS_MODULE, - .sp_name = "plain", - .sp_policy = SPTLRPC_POLICY_PLAIN, - .sp_cops = &plain_sec_cops, - .sp_sops = &plain_sec_sops, -}; - -int sptlrpc_plain_init(void) -{ - __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, }; - int rc; - - buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size(); - plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens); - - rc = sptlrpc_register_policy(&plain_policy); - if (rc) - CERROR("failed to register: %d\n", rc); - - return rc; -} - -void sptlrpc_plain_fini(void) -{ - int rc; - - rc = sptlrpc_unregister_policy(&plain_policy); - if (rc) - CERROR("cannot unregister: %d\n", rc); -} diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c deleted file mode 100644 index 3fd8c746f460..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ /dev/null @@ -1,2807 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2010, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include -#include -#include -#include -#include -#include "ptlrpc_internal.h" -#include -#include - -/* The following are visible and mutable through /sys/module/ptlrpc */ -int test_req_buffer_pressure; -module_param(test_req_buffer_pressure, int, 0444); -MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools"); -module_param(at_min, int, 0644); -MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)"); -module_param(at_max, int, 0644); -MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)"); -module_param(at_history, int, 0644); -MODULE_PARM_DESC(at_history, - "Adaptive timeouts remember the slowest event that took place within this period (sec)"); -module_param(at_early_margin, int, 0644); -MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply"); -module_param(at_extra, int, 0644); -MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply"); - -/* forward ref */ -static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt); -static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req); -static void ptlrpc_at_remove_timed(struct ptlrpc_request *req); - -/** Holds a list of all PTLRPC services */ -LIST_HEAD(ptlrpc_all_services); -/** Used to protect the \e ptlrpc_all_services list */ -struct mutex ptlrpc_all_services_mutex; - -static struct ptlrpc_request_buffer_desc * -ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_service *svc = svcpt->scp_service; - struct ptlrpc_request_buffer_desc *rqbd; - - rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS, - cfs_cpt_spread_node(svc->srv_cptable, - svcpt->scp_cpt)); - if (!rqbd) - return NULL; - - rqbd->rqbd_svcpt = svcpt; - rqbd->rqbd_refcount = 0; - rqbd->rqbd_cbid.cbid_fn = request_in_callback; - rqbd->rqbd_cbid.cbid_arg = rqbd; - INIT_LIST_HEAD(&rqbd->rqbd_reqs); - rqbd->rqbd_buffer = kvzalloc_node(svc->srv_buf_size, GFP_KERNEL, - cfs_cpt_spread_node(svc->srv_cptable, - svcpt->scp_cpt)); - - if (!rqbd->rqbd_buffer) { - kfree(rqbd); - return NULL; - } - - spin_lock(&svcpt->scp_lock); - list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); - svcpt->scp_nrqbds_total++; - spin_unlock(&svcpt->scp_lock); - - return rqbd; -} - -static void -ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd) -{ - struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; - - LASSERT(rqbd->rqbd_refcount == 0); - LASSERT(list_empty(&rqbd->rqbd_reqs)); - - spin_lock(&svcpt->scp_lock); - list_del(&rqbd->rqbd_list); - svcpt->scp_nrqbds_total--; - spin_unlock(&svcpt->scp_lock); - - kvfree(rqbd->rqbd_buffer); - kfree(rqbd); -} - -static int -ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) -{ - struct ptlrpc_service *svc = svcpt->scp_service; - struct ptlrpc_request_buffer_desc *rqbd; - int rc = 0; - int i; - - if (svcpt->scp_rqbd_allocating) - goto try_post; - - spin_lock(&svcpt->scp_lock); - /* check again with lock */ - if (svcpt->scp_rqbd_allocating) { - /* NB: we might allow more than one thread in the future */ - LASSERT(svcpt->scp_rqbd_allocating == 1); - spin_unlock(&svcpt->scp_lock); - goto try_post; - } - - svcpt->scp_rqbd_allocating++; - spin_unlock(&svcpt->scp_lock); - - for (i = 0; i < svc->srv_nbuf_per_group; i++) { - /* NB: another thread might have recycled enough rqbds, we - * need to make sure it wouldn't over-allocate, see LU-1212. - */ - if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group) - break; - - rqbd = ptlrpc_alloc_rqbd(svcpt); - - if (!rqbd) { - CERROR("%s: Can't allocate request buffer\n", - svc->srv_name); - rc = -ENOMEM; - break; - } - } - - spin_lock(&svcpt->scp_lock); - - LASSERT(svcpt->scp_rqbd_allocating == 1); - svcpt->scp_rqbd_allocating--; - - spin_unlock(&svcpt->scp_lock); - - CDEBUG(D_RPCTRACE, - "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n", - svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted, - svcpt->scp_nrqbds_total, rc); - - try_post: - if (post && rc == 0) - rc = ptlrpc_server_post_idle_rqbds(svcpt); - - return rc; -} - -struct ptlrpc_hr_partition; - -struct ptlrpc_hr_thread { - int hrt_id; /* thread ID */ - spinlock_t hrt_lock; - wait_queue_head_t hrt_waitq; - struct list_head hrt_queue; /* RS queue */ - struct ptlrpc_hr_partition *hrt_partition; -}; - -struct ptlrpc_hr_partition { - /* # of started threads */ - atomic_t hrp_nstarted; - /* # of stopped threads */ - atomic_t hrp_nstopped; - /* cpu partition id */ - int hrp_cpt; - /* round-robin rotor for choosing thread */ - int hrp_rotor; - /* total number of threads on this partition */ - int hrp_nthrs; - /* threads table */ - struct ptlrpc_hr_thread *hrp_thrs; -}; - -#define HRT_RUNNING 0 -#define HRT_STOPPING 1 - -struct ptlrpc_hr_service { - /* CPU partition table, it's just cfs_cpt_tab for now */ - struct cfs_cpt_table *hr_cpt_table; - /** controller sleep waitq */ - wait_queue_head_t hr_waitq; - unsigned int hr_stopping; - /** roundrobin rotor for non-affinity service */ - unsigned int hr_rotor; - /* partition data */ - struct ptlrpc_hr_partition **hr_partitions; -}; - -/** reply handling service. */ -static struct ptlrpc_hr_service ptlrpc_hr; - -/** - * Choose an hr thread to dispatch requests to. - */ -static struct ptlrpc_hr_thread * -ptlrpc_hr_select(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_hr_partition *hrp; - unsigned int rotor; - - if (svcpt->scp_cpt >= 0 && - svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) { - /* directly match partition */ - hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt]; - - } else { - rotor = ptlrpc_hr.hr_rotor++; - rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table); - - hrp = ptlrpc_hr.hr_partitions[rotor]; - } - - rotor = hrp->hrp_rotor++; - return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs]; -} - -/** - * Put reply state into a queue for processing because we received - * ACK from the client - */ -void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs) -{ - struct ptlrpc_hr_thread *hrt; - - LASSERT(list_empty(&rs->rs_list)); - - hrt = ptlrpc_hr_select(rs->rs_svcpt); - - spin_lock(&hrt->hrt_lock); - list_add_tail(&rs->rs_list, &hrt->hrt_queue); - spin_unlock(&hrt->hrt_lock); - - wake_up(&hrt->hrt_waitq); -} - -void -ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs) -{ - assert_spin_locked(&rs->rs_svcpt->scp_rep_lock); - assert_spin_locked(&rs->rs_lock); - LASSERT(rs->rs_difficult); - rs->rs_scheduled_ever = 1; /* flag any notification attempt */ - - if (rs->rs_scheduled) { /* being set up or already notified */ - return; - } - - rs->rs_scheduled = 1; - list_del_init(&rs->rs_list); - ptlrpc_dispatch_difficult_reply(rs); -} -EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply); - -static int -ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_request_buffer_desc *rqbd; - int rc; - int posted = 0; - - for (;;) { - spin_lock(&svcpt->scp_lock); - - if (list_empty(&svcpt->scp_rqbd_idle)) { - spin_unlock(&svcpt->scp_lock); - return posted; - } - - rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); - list_del(&rqbd->rqbd_list); - - /* assume we will post successfully */ - svcpt->scp_nrqbds_posted++; - list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted); - - spin_unlock(&svcpt->scp_lock); - - rc = ptlrpc_register_rqbd(rqbd); - if (rc != 0) - break; - - posted = 1; - } - - spin_lock(&svcpt->scp_lock); - - svcpt->scp_nrqbds_posted--; - list_del(&rqbd->rqbd_list); - list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); - - /* Don't complain if no request buffers are posted right now; LNET - * won't drop requests because we set the portal lazy! - */ - - spin_unlock(&svcpt->scp_lock); - - return -1; -} - -static void ptlrpc_at_timer(struct timer_list *t) -{ - struct ptlrpc_service_part *svcpt; - - svcpt = from_timer(svcpt, t, scp_at_timer); - - svcpt->scp_at_check = 1; - svcpt->scp_at_checktime = jiffies; - wake_up(&svcpt->scp_waitq); -} - -static void -ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, - struct ptlrpc_service_conf *conf) -{ - struct ptlrpc_service_thr_conf *tc = &conf->psc_thr; - unsigned int init; - unsigned int total; - unsigned int nthrs; - int weight; - - /* - * Common code for estimating & validating threads number. - * CPT affinity service could have percpt thread-pool instead - * of a global thread-pool, which means user might not always - * get the threads number they give it in conf::tc_nthrs_user - * even they did set. It's because we need to validate threads - * number for each CPT to guarantee each pool will have enough - * threads to keep the service healthy. - */ - init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL); - init = max_t(int, init, tc->tc_nthrs_init); - - /* NB: please see comments in lustre_lnet.h for definition - * details of these members - */ - LASSERT(tc->tc_nthrs_max != 0); - - if (tc->tc_nthrs_user != 0) { - /* In case there is a reason to test a service with many - * threads, we give a less strict check here, it can - * be up to 8 * nthrs_max - */ - total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user); - nthrs = total / svc->srv_ncpts; - init = max(init, nthrs); - goto out; - } - - total = tc->tc_nthrs_max; - if (tc->tc_nthrs_base == 0) { - /* don't care about base threads number per partition, - * this is most for non-affinity service - */ - nthrs = total / svc->srv_ncpts; - goto out; - } - - nthrs = tc->tc_nthrs_base; - if (svc->srv_ncpts == 1) { - int i; - - /* NB: Increase the base number if it's single partition - * and total number of cores/HTs is larger or equal to 4. - * result will always < 2 * nthrs_base - */ - weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY); - for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */ - (tc->tc_nthrs_base >> i) != 0; i++) - nthrs += tc->tc_nthrs_base >> i; - } - - if (tc->tc_thr_factor != 0) { - int factor = tc->tc_thr_factor; - const int fade = 4; - - /* - * User wants to increase number of threads with for - * each CPU core/HT, most likely the factor is larger then - * one thread/core because service threads are supposed to - * be blocked by lock or wait for IO. - */ - /* - * Amdahl's law says that adding processors wouldn't give - * a linear increasing of parallelism, so it's nonsense to - * have too many threads no matter how many cores/HTs - * there are. - */ - /* weight is # of HTs */ - if (cpumask_weight(topology_sibling_cpumask(0)) > 1) { - /* depress thread factor for hyper-thread */ - factor = factor - (factor >> 1) + (factor >> 3); - } - - weight = cfs_cpt_weight(svc->srv_cptable, 0); - LASSERT(weight > 0); - - for (; factor > 0 && weight > 0; factor--, weight -= fade) - nthrs += min(weight, fade) * factor; - } - - if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) { - nthrs = max(tc->tc_nthrs_base, - tc->tc_nthrs_max / svc->srv_ncpts); - } - out: - nthrs = max(nthrs, tc->tc_nthrs_init); - svc->srv_nthrs_cpt_limit = nthrs; - svc->srv_nthrs_cpt_init = init; - - if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) { - CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n", - svc->srv_name, nthrs * svc->srv_ncpts, - tc->tc_nthrs_max); - } -} - -/** - * Initialize percpt data for a service - */ -static int -ptlrpc_service_part_init(struct ptlrpc_service *svc, - struct ptlrpc_service_part *svcpt, int cpt) -{ - struct ptlrpc_at_array *array; - int size; - int index; - int rc; - - svcpt->scp_cpt = cpt; - INIT_LIST_HEAD(&svcpt->scp_threads); - - /* rqbd and incoming request queue */ - spin_lock_init(&svcpt->scp_lock); - INIT_LIST_HEAD(&svcpt->scp_rqbd_idle); - INIT_LIST_HEAD(&svcpt->scp_rqbd_posted); - INIT_LIST_HEAD(&svcpt->scp_req_incoming); - init_waitqueue_head(&svcpt->scp_waitq); - /* history request & rqbd list */ - INIT_LIST_HEAD(&svcpt->scp_hist_reqs); - INIT_LIST_HEAD(&svcpt->scp_hist_rqbds); - - /* active requests and hp requests */ - spin_lock_init(&svcpt->scp_req_lock); - - /* reply states */ - spin_lock_init(&svcpt->scp_rep_lock); - INIT_LIST_HEAD(&svcpt->scp_rep_active); - INIT_LIST_HEAD(&svcpt->scp_rep_idle); - init_waitqueue_head(&svcpt->scp_rep_waitq); - atomic_set(&svcpt->scp_nreps_difficult, 0); - - /* adaptive timeout */ - spin_lock_init(&svcpt->scp_at_lock); - array = &svcpt->scp_at_array; - - size = at_est2timeout(at_max); - array->paa_size = size; - array->paa_count = 0; - array->paa_deadline = -1; - - /* allocate memory for scp_at_array (ptlrpc_at_array) */ - array->paa_reqs_array = - kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS, - cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (!array->paa_reqs_array) - return -ENOMEM; - - for (index = 0; index < size; index++) - INIT_LIST_HEAD(&array->paa_reqs_array[index]); - - array->paa_reqs_count = - kzalloc_node(sizeof(__u32) * size, GFP_NOFS, - cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (!array->paa_reqs_count) - goto free_reqs_array; - - timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0); - - /* At SOW, service time should be quick; 10s seems generous. If client - * timeout is less than this, we'll be sending an early reply. - */ - at_init(&svcpt->scp_at_estimate, 10, 0); - - /* assign this before call ptlrpc_grow_req_bufs */ - svcpt->scp_service = svc; - /* Now allocate the request buffers, but don't post them now */ - rc = ptlrpc_grow_req_bufs(svcpt, 0); - /* We shouldn't be under memory pressure at startup, so - * fail if we can't allocate all our buffers at this time. - */ - if (rc != 0) - goto free_reqs_count; - - return 0; - -free_reqs_count: - kfree(array->paa_reqs_count); - array->paa_reqs_count = NULL; -free_reqs_array: - kfree(array->paa_reqs_array); - array->paa_reqs_array = NULL; - - return -ENOMEM; -} - -/** - * Initialize service on a given portal. - * This includes starting serving threads , allocating and posting rqbds and - * so on. - */ -struct ptlrpc_service * -ptlrpc_register_service(struct ptlrpc_service_conf *conf, - struct kset *parent, - struct dentry *debugfs_entry) -{ - struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt; - struct ptlrpc_service *service; - struct ptlrpc_service_part *svcpt; - struct cfs_cpt_table *cptable; - __u32 *cpts = NULL; - int ncpts; - int cpt; - int rc; - int i; - - LASSERT(conf->psc_buf.bc_nbufs > 0); - LASSERT(conf->psc_buf.bc_buf_size >= - conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD); - LASSERT(conf->psc_thr.tc_ctx_tags != 0); - - cptable = cconf->cc_cptable; - if (!cptable) - cptable = cfs_cpt_tab; - - if (!conf->psc_thr.tc_cpu_affinity) { - ncpts = 1; - } else { - ncpts = cfs_cpt_number(cptable); - if (cconf->cc_pattern) { - struct cfs_expr_list *el; - - rc = cfs_expr_list_parse(cconf->cc_pattern, - strlen(cconf->cc_pattern), - 0, ncpts - 1, &el); - if (rc != 0) { - CERROR("%s: invalid CPT pattern string: %s", - conf->psc_name, cconf->cc_pattern); - return ERR_PTR(-EINVAL); - } - - rc = cfs_expr_list_values(el, ncpts, &cpts); - cfs_expr_list_free(el); - if (rc <= 0) { - CERROR("%s: failed to parse CPT array %s: %d\n", - conf->psc_name, cconf->cc_pattern, rc); - kfree(cpts); - return ERR_PTR(rc < 0 ? rc : -EINVAL); - } - ncpts = rc; - } - } - - service = kzalloc(offsetof(struct ptlrpc_service, srv_parts[ncpts]), - GFP_NOFS); - if (!service) { - kfree(cpts); - return ERR_PTR(-ENOMEM); - } - - service->srv_cptable = cptable; - service->srv_cpts = cpts; - service->srv_ncpts = ncpts; - - service->srv_cpt_bits = 0; /* it's zero already, easy to read... */ - while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable)) - service->srv_cpt_bits++; - - /* public members */ - spin_lock_init(&service->srv_lock); - service->srv_name = conf->psc_name; - service->srv_watchdog_factor = conf->psc_watchdog_factor; - INIT_LIST_HEAD(&service->srv_list); /* for safety of cleanup */ - - /* buffer configuration */ - service->srv_nbuf_per_group = test_req_buffer_pressure ? - 1 : conf->psc_buf.bc_nbufs; - service->srv_max_req_size = conf->psc_buf.bc_req_max_size + - SPTLRPC_MAX_PAYLOAD; - service->srv_buf_size = conf->psc_buf.bc_buf_size; - service->srv_rep_portal = conf->psc_buf.bc_rep_portal; - service->srv_req_portal = conf->psc_buf.bc_req_portal; - - /* Increase max reply size to next power of two */ - service->srv_max_reply_size = 1; - while (service->srv_max_reply_size < - conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD) - service->srv_max_reply_size <<= 1; - - service->srv_thread_name = conf->psc_thr.tc_thr_name; - service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags; - service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO; - service->srv_ops = conf->psc_ops; - - for (i = 0; i < ncpts; i++) { - if (!conf->psc_thr.tc_cpu_affinity) - cpt = CFS_CPT_ANY; - else - cpt = cpts ? cpts[i] : i; - - svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS, - cfs_cpt_spread_node(cptable, cpt)); - if (!svcpt) { - rc = -ENOMEM; - goto failed; - } - - service->srv_parts[i] = svcpt; - rc = ptlrpc_service_part_init(service, svcpt, cpt); - if (rc != 0) - goto failed; - } - - ptlrpc_server_nthreads_check(service, conf); - - rc = LNetSetLazyPortal(service->srv_req_portal); - LASSERT(rc == 0); - - mutex_lock(&ptlrpc_all_services_mutex); - list_add(&service->srv_list, &ptlrpc_all_services); - mutex_unlock(&ptlrpc_all_services_mutex); - - if (parent) { - rc = ptlrpc_sysfs_register_service(parent, service); - if (rc) - goto failed; - } - - if (!IS_ERR_OR_NULL(debugfs_entry)) - ptlrpc_ldebugfs_register_service(debugfs_entry, service); - - rc = ptlrpc_service_nrs_setup(service); - if (rc != 0) - goto failed; - - CDEBUG(D_NET, "%s: Started, listening on portal %d\n", - service->srv_name, service->srv_req_portal); - - rc = ptlrpc_start_threads(service); - if (rc != 0) { - CERROR("Failed to start threads for service %s: %d\n", - service->srv_name, rc); - goto failed; - } - - return service; -failed: - ptlrpc_unregister_service(service); - return ERR_PTR(rc); -} -EXPORT_SYMBOL(ptlrpc_register_service); - -/** - * to actually free the request, must be called without holding svc_lock. - * note it's caller's responsibility to unlink req->rq_list. - */ -static void ptlrpc_server_free_request(struct ptlrpc_request *req) -{ - LASSERT(atomic_read(&req->rq_refcount) == 0); - LASSERT(list_empty(&req->rq_timed_list)); - - /* DEBUG_REQ() assumes the reply state of a request with a valid - * ref will not be destroyed until that reference is dropped. - */ - ptlrpc_req_drop_rs(req); - - sptlrpc_svc_ctx_decref(req); - - if (req != &req->rq_rqbd->rqbd_req) { - /* NB request buffers use an embedded - * req if the incoming req unlinked the - * MD; this isn't one of them! - */ - ptlrpc_request_cache_free(req); - } -} - -/** - * drop a reference count of the request. if it reaches 0, we either - * put it into history list, or free it immediately. - */ -static void ptlrpc_server_drop_request(struct ptlrpc_request *req) -{ - struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd; - struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; - struct ptlrpc_service *svc = svcpt->scp_service; - int refcount; - - if (!atomic_dec_and_test(&req->rq_refcount)) - return; - - if (req->rq_at_linked) { - spin_lock(&svcpt->scp_at_lock); - /* recheck with lock, in case it's unlinked by - * ptlrpc_at_check_timed() - */ - if (likely(req->rq_at_linked)) - ptlrpc_at_remove_timed(req); - spin_unlock(&svcpt->scp_at_lock); - } - - LASSERT(list_empty(&req->rq_timed_list)); - - /* finalize request */ - if (req->rq_export) { - class_export_put(req->rq_export); - req->rq_export = NULL; - } - - spin_lock(&svcpt->scp_lock); - - list_add(&req->rq_list, &rqbd->rqbd_reqs); - - refcount = --(rqbd->rqbd_refcount); - if (refcount == 0) { - /* request buffer is now idle: add to history */ - list_del(&rqbd->rqbd_list); - - list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds); - svcpt->scp_hist_nrqbds++; - - /* cull some history? - * I expect only about 1 or 2 rqbds need to be recycled here - */ - while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) { - rqbd = list_entry(svcpt->scp_hist_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); - - list_del(&rqbd->rqbd_list); - svcpt->scp_hist_nrqbds--; - - /* remove rqbd's reqs from svc's req history while - * I've got the service lock - */ - list_for_each_entry(req, &rqbd->rqbd_reqs, rq_list) { - /* Track the highest culled req seq */ - if (req->rq_history_seq > - svcpt->scp_hist_seq_culled) { - svcpt->scp_hist_seq_culled = - req->rq_history_seq; - } - list_del(&req->rq_history_list); - } - - spin_unlock(&svcpt->scp_lock); - - while ((req = list_first_entry_or_null( - &rqbd->rqbd_reqs, - struct ptlrpc_request, rq_list))) { - list_del(&req->rq_list); - ptlrpc_server_free_request(req); - } - - spin_lock(&svcpt->scp_lock); - /* - * now all reqs including the embedded req has been - * disposed, schedule request buffer for re-use. - */ - LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == - 0); - list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); - } - - spin_unlock(&svcpt->scp_lock); - } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) { - /* If we are low on memory, we are not interested in history */ - list_del(&req->rq_list); - list_del_init(&req->rq_history_list); - - /* Track the highest culled req seq */ - if (req->rq_history_seq > svcpt->scp_hist_seq_culled) - svcpt->scp_hist_seq_culled = req->rq_history_seq; - - spin_unlock(&svcpt->scp_lock); - - ptlrpc_server_free_request(req); - } else { - spin_unlock(&svcpt->scp_lock); - } -} - -/** - * to finish a request: stop sending more early replies, and release - * the request. - */ -static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req) -{ - ptlrpc_server_hpreq_fini(req); - - if (req->rq_session.lc_thread) { - lu_context_exit(&req->rq_session); - lu_context_fini(&req->rq_session); - } - - ptlrpc_server_drop_request(req); -} - -/** - * to finish a active request: stop sending more early replies, and release - * the request. should be called after we finished handling the request. - */ -static void ptlrpc_server_finish_active_request( - struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req) -{ - spin_lock(&svcpt->scp_req_lock); - ptlrpc_nrs_req_stop_nolock(req); - svcpt->scp_nreqs_active--; - if (req->rq_hp) - svcpt->scp_nhreqs_active--; - spin_unlock(&svcpt->scp_req_lock); - - ptlrpc_nrs_req_finalize(req); - - if (req->rq_export) - class_export_rpc_dec(req->rq_export); - - ptlrpc_server_finish_request(svcpt, req); -} - -/** - * Sanity check request \a req. - * Return 0 if all is ok, error code otherwise. - */ -static int ptlrpc_check_req(struct ptlrpc_request *req) -{ - struct obd_device *obd = req->rq_export->exp_obd; - int rc = 0; - - if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) < - req->rq_export->exp_conn_cnt)) { - DEBUG_REQ(D_RPCTRACE, req, - "DROPPING req from old connection %d < %d", - lustre_msg_get_conn_cnt(req->rq_reqmsg), - req->rq_export->exp_conn_cnt); - return -EEXIST; - } - if (unlikely(!obd || obd->obd_fail)) { - /* - * Failing over, don't handle any more reqs, send - * error response instead. - */ - CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", - req, obd ? obd->obd_name : "unknown"); - rc = -ENODEV; - } else if (lustre_msg_get_flags(req->rq_reqmsg) & - (MSG_REPLAY | MSG_REQ_REPLAY_DONE)) { - DEBUG_REQ(D_ERROR, req, "Invalid replay without recovery"); - class_fail_export(req->rq_export); - rc = -ENODEV; - } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0) { - DEBUG_REQ(D_ERROR, req, - "Invalid req with transno %llu without recovery", - lustre_msg_get_transno(req->rq_reqmsg)); - class_fail_export(req->rq_export); - rc = -ENODEV; - } - - if (unlikely(rc < 0)) { - req->rq_status = rc; - ptlrpc_error(req); - } - return rc; -} - -static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_at_array *array = &svcpt->scp_at_array; - __s32 next; - - if (array->paa_count == 0) { - del_timer(&svcpt->scp_at_timer); - return; - } - - /* Set timer for closest deadline */ - next = (__s32)(array->paa_deadline - ktime_get_real_seconds() - - at_early_margin); - if (next <= 0) { - ptlrpc_at_timer(&svcpt->scp_at_timer); - } else { - mod_timer(&svcpt->scp_at_timer, jiffies + next * HZ); - CDEBUG(D_INFO, "armed %s at %+ds\n", - svcpt->scp_service->srv_name, next); - } -} - -/* Add rpc to early reply check list */ -static int ptlrpc_at_add_timed(struct ptlrpc_request *req) -{ - struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; - struct ptlrpc_at_array *array = &svcpt->scp_at_array; - struct ptlrpc_request *rq = NULL; - __u32 index; - - if (AT_OFF) - return 0; - - if (req->rq_no_reply) - return 0; - - if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0) - return -ENOSYS; - - spin_lock(&svcpt->scp_at_lock); - LASSERT(list_empty(&req->rq_timed_list)); - - div_u64_rem(req->rq_deadline, array->paa_size, &index); - if (array->paa_reqs_count[index] > 0) { - /* latest rpcs will have the latest deadlines in the list, - * so search backward. - */ - list_for_each_entry_reverse(rq, &array->paa_reqs_array[index], - rq_timed_list) { - if (req->rq_deadline >= rq->rq_deadline) { - list_add(&req->rq_timed_list, - &rq->rq_timed_list); - break; - } - } - } - - /* Add the request at the head of the list */ - if (list_empty(&req->rq_timed_list)) - list_add(&req->rq_timed_list, &array->paa_reqs_array[index]); - - spin_lock(&req->rq_lock); - req->rq_at_linked = 1; - spin_unlock(&req->rq_lock); - req->rq_at_index = index; - array->paa_reqs_count[index]++; - array->paa_count++; - if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) { - array->paa_deadline = req->rq_deadline; - ptlrpc_at_set_timer(svcpt); - } - spin_unlock(&svcpt->scp_at_lock); - - return 0; -} - -static void -ptlrpc_at_remove_timed(struct ptlrpc_request *req) -{ - struct ptlrpc_at_array *array; - - array = &req->rq_rqbd->rqbd_svcpt->scp_at_array; - - /* NB: must call with hold svcpt::scp_at_lock */ - LASSERT(!list_empty(&req->rq_timed_list)); - list_del_init(&req->rq_timed_list); - - spin_lock(&req->rq_lock); - req->rq_at_linked = 0; - spin_unlock(&req->rq_lock); - - array->paa_reqs_count[req->rq_at_index]--; - array->paa_count--; -} - -/* - * Attempt to extend the request deadline by sending an early reply to the - * client. - */ -static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) -{ - struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; - struct ptlrpc_request *reqcopy; - struct lustre_msg *reqmsg; - long olddl = req->rq_deadline - ktime_get_real_seconds(); - time64_t newdl; - int rc; - - /* deadline is when the client expects us to reply, margin is the - * difference between clients' and servers' expectations - */ - DEBUG_REQ(D_ADAPTTO, req, - "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d", - AT_OFF ? "AT off - not " : "", - olddl, olddl - at_get(&svcpt->scp_at_estimate), - at_get(&svcpt->scp_at_estimate), at_extra); - - if (AT_OFF) - return 0; - - if (olddl < 0) { - DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?", - olddl, at_early_margin); - - /* Return an error so we're not re-added to the timed list. */ - return -ETIMEDOUT; - } - - if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { - DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, but no AT support"); - return -ENOSYS; - } - - /* - * We want to extend the request deadline by at_extra seconds, - * so we set our service estimate to reflect how much time has - * passed since this request arrived plus an additional - * at_extra seconds. The client will calculate the new deadline - * based on this service estimate (plus some additional time to - * account for network latency). See ptlrpc_at_recv_early_reply - */ - at_measured(&svcpt->scp_at_estimate, at_extra + - ktime_get_real_seconds() - req->rq_arrival_time.tv_sec); - newdl = req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate); - - /* Check to see if we've actually increased the deadline - - * we may be past adaptive_max - */ - if (req->rq_deadline >= newdl) { - DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n", - olddl, newdl - ktime_get_real_seconds()); - return -ETIMEDOUT; - } - - reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS); - if (!reqcopy) - return -ENOMEM; - reqmsg = kvzalloc(req->rq_reqlen, GFP_NOFS); - if (!reqmsg) { - rc = -ENOMEM; - goto out_free; - } - - *reqcopy = *req; - reqcopy->rq_reply_state = NULL; - reqcopy->rq_rep_swab_mask = 0; - reqcopy->rq_pack_bulk = 0; - reqcopy->rq_pack_udesc = 0; - reqcopy->rq_packed_final = 0; - sptlrpc_svc_ctx_addref(reqcopy); - /* We only need the reqmsg for the magic */ - reqcopy->rq_reqmsg = reqmsg; - memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen); - - LASSERT(atomic_read(&req->rq_refcount)); - /** if it is last refcount then early reply isn't needed */ - if (atomic_read(&req->rq_refcount) == 1) { - DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, abort sending early reply\n"); - rc = -EINVAL; - goto out; - } - - /* Connection ref */ - reqcopy->rq_export = class_conn2export( - lustre_msg_get_handle(reqcopy->rq_reqmsg)); - if (!reqcopy->rq_export) { - rc = -ENODEV; - goto out; - } - - /* RPC ref */ - class_export_rpc_inc(reqcopy->rq_export); - if (reqcopy->rq_export->exp_obd && - reqcopy->rq_export->exp_obd->obd_fail) { - rc = -ENODEV; - goto out_put; - } - - rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY); - if (rc) - goto out_put; - - rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY); - - if (!rc) { - /* Adjust our own deadline to what we told the client */ - req->rq_deadline = newdl; - req->rq_early_count++; /* number sent, server side */ - } else { - DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc); - } - - /* Free the (early) reply state from lustre_pack_reply. - * (ptlrpc_send_reply takes it's own rs ref, so this is safe here) - */ - ptlrpc_req_drop_rs(reqcopy); - -out_put: - class_export_rpc_dec(reqcopy->rq_export); - class_export_put(reqcopy->rq_export); -out: - sptlrpc_svc_ctx_decref(reqcopy); - kvfree(reqmsg); -out_free: - ptlrpc_request_cache_free(reqcopy); - return rc; -} - -/* Send early replies to everybody expiring within at_early_margin - * asking for at_extra time - */ -static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_at_array *array = &svcpt->scp_at_array; - struct ptlrpc_request *rq, *n; - struct list_head work_list; - __u32 index, count; - time64_t deadline; - time64_t now = ktime_get_real_seconds(); - long delay; - int first, counter = 0; - - spin_lock(&svcpt->scp_at_lock); - if (svcpt->scp_at_check == 0) { - spin_unlock(&svcpt->scp_at_lock); - return; - } - delay = jiffies - svcpt->scp_at_checktime; - svcpt->scp_at_check = 0; - - if (array->paa_count == 0) { - spin_unlock(&svcpt->scp_at_lock); - return; - } - - /* The timer went off, but maybe the nearest rpc already completed. */ - first = array->paa_deadline - now; - if (first > at_early_margin) { - /* We've still got plenty of time. Reset the timer. */ - ptlrpc_at_set_timer(svcpt); - spin_unlock(&svcpt->scp_at_lock); - return; - } - - /* We're close to a timeout, and we don't know how much longer the - * server will take. Send early replies to everyone expiring soon. - */ - INIT_LIST_HEAD(&work_list); - deadline = -1; - div_u64_rem(array->paa_deadline, array->paa_size, &index); - count = array->paa_count; - while (count > 0) { - count -= array->paa_reqs_count[index]; - list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index], - rq_timed_list) { - if (rq->rq_deadline > now + at_early_margin) { - /* update the earliest deadline */ - if (deadline == -1 || - rq->rq_deadline < deadline) - deadline = rq->rq_deadline; - break; - } - - ptlrpc_at_remove_timed(rq); - /** - * ptlrpc_server_drop_request() may drop - * refcount to 0 already. Let's check this and - * don't add entry to work_list - */ - if (likely(atomic_inc_not_zero(&rq->rq_refcount))) - list_add(&rq->rq_timed_list, &work_list); - counter++; - } - - if (++index >= array->paa_size) - index = 0; - } - array->paa_deadline = deadline; - /* we have a new earliest deadline, restart the timer */ - ptlrpc_at_set_timer(svcpt); - - spin_unlock(&svcpt->scp_at_lock); - - CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early replies\n", - first, at_extra, counter); - if (first < 0) { - /* We're already past request deadlines before we even get a - * chance to send early replies - */ - LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n", - svcpt->scp_service->srv_name); - CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n", - counter, svcpt->scp_nreqs_incoming, - svcpt->scp_nreqs_active, - at_get(&svcpt->scp_at_estimate), delay); - } - - /* we took additional refcount so entries can't be deleted from list, no - * locking is needed - */ - while (!list_empty(&work_list)) { - rq = list_entry(work_list.next, struct ptlrpc_request, - rq_timed_list); - list_del_init(&rq->rq_timed_list); - - if (ptlrpc_at_send_early_reply(rq) == 0) - ptlrpc_at_add_timed(rq); - - ptlrpc_server_drop_request(rq); - } -} - -/** - * Put the request to the export list if the request may become - * a high priority one. - */ -static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req) -{ - int rc = 0; - - if (svcpt->scp_service->srv_ops.so_hpreq_handler) { - rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req); - if (rc < 0) - return rc; - LASSERT(rc == 0); - } - if (req->rq_export && req->rq_ops) { - /* Perform request specific check. We should do this check - * before the request is added into exp_hp_rpcs list otherwise - * it may hit swab race at LU-1044. - */ - if (req->rq_ops->hpreq_check) { - rc = req->rq_ops->hpreq_check(req); - if (rc == -ESTALE) { - req->rq_status = rc; - ptlrpc_error(req); - } - /** can only return error, - * 0 for normal request, - * or 1 for high priority request - */ - LASSERT(rc <= 1); - } - - spin_lock_bh(&req->rq_export->exp_rpc_lock); - list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs); - spin_unlock_bh(&req->rq_export->exp_rpc_lock); - } - - ptlrpc_nrs_req_initialize(svcpt, req, rc); - - return rc; -} - -/** Remove the request from the export list. */ -static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req) -{ - if (req->rq_export && req->rq_ops) { - /* refresh lock timeout again so that client has more - * room to send lock cancel RPC. - */ - if (req->rq_ops->hpreq_fini) - req->rq_ops->hpreq_fini(req); - - spin_lock_bh(&req->rq_export->exp_rpc_lock); - list_del_init(&req->rq_exp_list); - spin_unlock_bh(&req->rq_export->exp_rpc_lock); - } -} - -static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt, - struct ptlrpc_request *req) -{ - int rc; - - rc = ptlrpc_server_hpreq_init(svcpt, req); - if (rc < 0) - return rc; - - ptlrpc_nrs_req_add(svcpt, req, !!rc); - - return 0; -} - -/** - * Allow to handle high priority request - * User can call it w/o any lock but need to hold - * ptlrpc_service_part::scp_req_lock to get reliable result - */ -static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt, - bool force) -{ - int running = svcpt->scp_nthrs_running; - - if (!nrs_svcpt_has_hp(svcpt)) - return false; - - if (force) - return true; - - if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL && - CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { - /* leave just 1 thread for normal RPCs */ - running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler) - running += 1; - } - - if (svcpt->scp_nreqs_active >= running - 1) - return false; - - if (svcpt->scp_nhreqs_active == 0) - return true; - - return !ptlrpc_nrs_req_pending_nolock(svcpt, false) || - svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio; -} - -static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt, - bool force) -{ - return ptlrpc_server_allow_high(svcpt, force) && - ptlrpc_nrs_req_pending_nolock(svcpt, true); -} - -/** - * Only allow normal priority requests on a service that has a high-priority - * queue if forced (i.e. cleanup), if there are other high priority requests - * already being processed (i.e. those threads can service more high-priority - * requests), or if there are enough idle threads that a later thread can do - * a high priority request. - * User can call it w/o any lock but need to hold - * ptlrpc_service_part::scp_req_lock to get reliable result - */ -static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt, - bool force) -{ - int running = svcpt->scp_nthrs_running; - - if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL && - CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { - /* leave just 1 thread for normal RPCs */ - running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler) - running += 1; - } - - if (force || - svcpt->scp_nreqs_active < running - 2) - return true; - - if (svcpt->scp_nreqs_active >= running - 1) - return false; - - return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt); -} - -static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt, - bool force) -{ - return ptlrpc_server_allow_normal(svcpt, force) && - ptlrpc_nrs_req_pending_nolock(svcpt, false); -} - -/** - * Returns true if there are requests available in incoming - * request queue for processing and it is allowed to fetch them. - * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock - * to get reliable result - * \see ptlrpc_server_allow_normal - * \see ptlrpc_server_allow high - */ -static inline bool -ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force) -{ - return ptlrpc_server_high_pending(svcpt, force) || - ptlrpc_server_normal_pending(svcpt, force); -} - -/** - * Fetch a request for processing from queue of unprocessed requests. - * Favors high-priority requests. - * Returns a pointer to fetched request. - */ -static struct ptlrpc_request * -ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) -{ - struct ptlrpc_request *req = NULL; - - spin_lock(&svcpt->scp_req_lock); - - if (ptlrpc_server_high_pending(svcpt, force)) { - req = ptlrpc_nrs_req_get_nolock(svcpt, true, force); - if (req) { - svcpt->scp_hreq_count++; - goto got_request; - } - } - - if (ptlrpc_server_normal_pending(svcpt, force)) { - req = ptlrpc_nrs_req_get_nolock(svcpt, false, force); - if (req) { - svcpt->scp_hreq_count = 0; - goto got_request; - } - } - - spin_unlock(&svcpt->scp_req_lock); - return NULL; - -got_request: - svcpt->scp_nreqs_active++; - if (req->rq_hp) - svcpt->scp_nhreqs_active++; - - spin_unlock(&svcpt->scp_req_lock); - - if (likely(req->rq_export)) - class_export_rpc_inc(req->rq_export); - - return req; -} - -/** - * Handle freshly incoming reqs, add to timed early reply list, - * pass on to regular request queue. - * All incoming requests pass through here before getting into - * ptlrpc_server_handle_req later on. - */ -static int -ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, - struct ptlrpc_thread *thread) -{ - struct ptlrpc_service *svc = svcpt->scp_service; - struct ptlrpc_request *req; - __u32 deadline; - int rc; - - spin_lock(&svcpt->scp_lock); - if (list_empty(&svcpt->scp_req_incoming)) { - spin_unlock(&svcpt->scp_lock); - return 0; - } - - req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); - list_del_init(&req->rq_list); - svcpt->scp_nreqs_incoming--; - /* Consider this still a "queued" request as far as stats are - * concerned - */ - spin_unlock(&svcpt->scp_lock); - - /* go through security check/transform */ - rc = sptlrpc_svc_unwrap_request(req); - switch (rc) { - case SECSVC_OK: - break; - case SECSVC_COMPLETE: - target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET); - goto err_req; - case SECSVC_DROP: - goto err_req; - default: - LBUG(); - } - - /* - * for null-flavored rpc, msg has been unpacked by sptlrpc, although - * redo it wouldn't be harmful. - */ - if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) { - rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen); - if (rc != 0) { - CERROR("error unpacking request: ptl %d from %s x%llu\n", - svc->srv_req_portal, libcfs_id2str(req->rq_peer), - req->rq_xid); - goto err_req; - } - } - - rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); - if (rc) { - CERROR("error unpacking ptlrpc body: ptl %d from %s x%llu\n", - svc->srv_req_portal, libcfs_id2str(req->rq_peer), - req->rq_xid); - goto err_req; - } - - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) && - lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) { - CERROR("drop incoming rpc opc %u, x%llu\n", - cfs_fail_val, req->rq_xid); - goto err_req; - } - - rc = -EINVAL; - if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) { - CERROR("wrong packet type received (type=%u) from %s\n", - lustre_msg_get_type(req->rq_reqmsg), - libcfs_id2str(req->rq_peer)); - goto err_req; - } - - switch (lustre_msg_get_opc(req->rq_reqmsg)) { - case MDS_WRITEPAGE: - case OST_WRITE: - req->rq_bulk_write = 1; - break; - case MDS_READPAGE: - case OST_READ: - case MGS_CONFIG_READ: - req->rq_bulk_read = 1; - break; - } - - CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid); - - req->rq_export = class_conn2export( - lustre_msg_get_handle(req->rq_reqmsg)); - if (req->rq_export) { - rc = ptlrpc_check_req(req); - if (rc == 0) { - rc = sptlrpc_target_export_check(req->rq_export, req); - if (rc) - DEBUG_REQ(D_ERROR, req, "DROPPING req with illegal security flavor,"); - } - - if (rc) - goto err_req; - } - - /* req_in handling should/must be fast */ - if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5) - DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds", - (s64)(ktime_get_real_seconds() - - req->rq_arrival_time.tv_sec)); - - /* Set rpc server deadline and add it to the timed list */ - deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) & - MSGHDR_AT_SUPPORT) ? - /* The max time the client expects us to take */ - lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout; - req->rq_deadline = req->rq_arrival_time.tv_sec + deadline; - if (unlikely(deadline == 0)) { - DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout"); - goto err_req; - } - - req->rq_svc_thread = thread; - if (thread) { - /* initialize request session, it is needed for request - * processing by target - */ - rc = lu_context_init(&req->rq_session, - LCT_SERVER_SESSION | LCT_NOREF); - if (rc) { - CERROR("%s: failure to initialize session: rc = %d\n", - thread->t_name, rc); - goto err_req; - } - req->rq_session.lc_thread = thread; - lu_context_enter(&req->rq_session); - req->rq_svc_thread->t_env->le_ses = &req->rq_session; - } - - ptlrpc_at_add_timed(req); - - /* Move it over to the request processing queue */ - rc = ptlrpc_server_request_add(svcpt, req); - if (rc) - goto err_req; - - wake_up(&svcpt->scp_waitq); - return 1; - -err_req: - ptlrpc_server_finish_request(svcpt, req); - - return 1; -} - -/** - * Main incoming request handling logic. - * Calls handler function from service to do actual processing. - */ -static int -ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, - struct ptlrpc_thread *thread) -{ - struct ptlrpc_service *svc = svcpt->scp_service; - struct ptlrpc_request *request; - struct timespec64 work_start; - struct timespec64 work_end; - struct timespec64 timediff; - struct timespec64 arrived; - unsigned long timediff_usecs; - unsigned long arrived_usecs; - int fail_opc = 0; - - request = ptlrpc_server_request_get(svcpt, false); - if (!request) - return 0; - - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT)) - fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT; - else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT)) - fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT; - - if (unlikely(fail_opc)) { - if (request->rq_export && request->rq_ops) - OBD_FAIL_TIMEOUT(fail_opc, 4); - } - - ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET); - - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG)) - libcfs_debug_dumplog(); - - ktime_get_real_ts64(&work_start); - timediff = timespec64_sub(work_start, request->rq_arrival_time); - timediff_usecs = timediff.tv_sec * USEC_PER_SEC + - timediff.tv_nsec / NSEC_PER_USEC; - if (likely(svc->srv_stats)) { - lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR, - timediff_usecs); - lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR, - svcpt->scp_nreqs_incoming); - lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR, - svcpt->scp_nreqs_active); - lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT, - at_get(&svcpt->scp_at_estimate)); - } - - if (likely(request->rq_export)) { - if (unlikely(ptlrpc_check_req(request))) - goto put_conn; - } - - /* Discard requests queued for longer than the deadline. - * The deadline is increased if we send an early reply. - */ - if (ktime_get_real_seconds() > request->rq_deadline) { - DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n", - libcfs_id2str(request->rq_peer), - request->rq_deadline - - request->rq_arrival_time.tv_sec, - ktime_get_real_seconds() - request->rq_deadline); - goto put_conn; - } - - CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n", - current->comm, - (request->rq_export ? - (char *)request->rq_export->exp_client_uuid.uuid : "0"), - (request->rq_export ? - atomic_read(&request->rq_export->exp_refcount) : -99), - lustre_msg_get_status(request->rq_reqmsg), request->rq_xid, - libcfs_id2str(request->rq_peer), - lustre_msg_get_opc(request->rq_reqmsg)); - - if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING) - CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val); - - CDEBUG(D_NET, "got req %llu\n", request->rq_xid); - - /* re-assign request and sesson thread to the current one */ - request->rq_svc_thread = thread; - if (thread) { - LASSERT(request->rq_session.lc_thread); - request->rq_session.lc_thread = thread; - request->rq_session.lc_cookie = 0x55; - thread->t_env->le_ses = &request->rq_session; - } - svc->srv_ops.so_req_handler(request); - - ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE); - -put_conn: - if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) { - DEBUG_REQ(D_WARNING, request, - "Request took longer than estimated (%lld:%llds); " - "client may timeout.", - (s64)request->rq_deadline - - request->rq_arrival_time.tv_sec, - (s64)ktime_get_real_seconds() - request->rq_deadline); - } - - ktime_get_real_ts64(&work_end); - timediff = timespec64_sub(work_end, work_start); - timediff_usecs = timediff.tv_sec * USEC_PER_SEC + - timediff.tv_nsec / NSEC_PER_USEC; - arrived = timespec64_sub(work_end, request->rq_arrival_time); - arrived_usecs = arrived.tv_sec * USEC_PER_SEC + - arrived.tv_nsec / NSEC_PER_USEC; - CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n", - current->comm, - (request->rq_export ? - (char *)request->rq_export->exp_client_uuid.uuid : "0"), - (request->rq_export ? - atomic_read(&request->rq_export->exp_refcount) : -99), - lustre_msg_get_status(request->rq_reqmsg), - request->rq_xid, - libcfs_id2str(request->rq_peer), - lustre_msg_get_opc(request->rq_reqmsg), - timediff_usecs, - arrived_usecs, - (request->rq_repmsg ? - lustre_msg_get_transno(request->rq_repmsg) : - request->rq_transno), - request->rq_status, - (request->rq_repmsg ? - lustre_msg_get_status(request->rq_repmsg) : -999)); - if (likely(svc->srv_stats && request->rq_reqmsg)) { - __u32 op = lustre_msg_get_opc(request->rq_reqmsg); - int opc = opcode_offset(op); - - if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) { - LASSERT(opc < LUSTRE_MAX_OPCODES); - lprocfs_counter_add(svc->srv_stats, - opc + EXTRA_MAX_OPCODES, - timediff_usecs); - } - } - if (unlikely(request->rq_early_count)) { - DEBUG_REQ(D_ADAPTTO, request, - "sent %d early replies before finishing in %llds", - request->rq_early_count, - (s64)work_end.tv_sec - - request->rq_arrival_time.tv_sec); - } - - ptlrpc_server_finish_active_request(svcpt, request); - - return 1; -} - -/** - * An internal function to process a single reply state object. - */ -static int -ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) -{ - struct ptlrpc_service_part *svcpt = rs->rs_svcpt; - struct ptlrpc_service *svc = svcpt->scp_service; - struct obd_export *exp; - int nlocks; - int been_handled; - - exp = rs->rs_export; - - LASSERT(rs->rs_difficult); - LASSERT(rs->rs_scheduled); - LASSERT(list_empty(&rs->rs_list)); - - spin_lock(&exp->exp_lock); - /* Noop if removed already */ - list_del_init(&rs->rs_exp_list); - spin_unlock(&exp->exp_lock); - - /* The disk commit callback holds exp_uncommitted_replies_lock while it - * iterates over newly committed replies, removing them from - * exp_uncommitted_replies. It then drops this lock and schedules the - * replies it found for handling here. - * - * We can avoid contention for exp_uncommitted_replies_lock between the - * HRT threads and further commit callbacks by checking rs_committed - * which is set in the commit callback while it holds both - * rs_lock and exp_uncommitted_reples. - * - * If we see rs_committed clear, the commit callback _may_ not have - * handled this reply yet and we race with it to grab - * exp_uncommitted_replies_lock before removing the reply from - * exp_uncommitted_replies. Note that if we lose the race and the - * reply has already been removed, list_del_init() is a noop. - * - * If we see rs_committed set, we know the commit callback is handling, - * or has handled this reply since store reordering might allow us to - * see rs_committed set out of sequence. But since this is done - * holding rs_lock, we can be sure it has all completed once we hold - * rs_lock, which we do right next. - */ - if (!rs->rs_committed) { - spin_lock(&exp->exp_uncommitted_replies_lock); - list_del_init(&rs->rs_obd_list); - spin_unlock(&exp->exp_uncommitted_replies_lock); - } - - spin_lock(&rs->rs_lock); - - been_handled = rs->rs_handled; - rs->rs_handled = 1; - - nlocks = rs->rs_nlocks; /* atomic "steal", but */ - rs->rs_nlocks = 0; /* locks still on rs_locks! */ - - if (nlocks == 0 && !been_handled) { - /* If we see this, we should already have seen the warning - * in mds_steal_ack_locks() - */ - CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n", - rs, - rs->rs_xid, rs->rs_transno, rs->rs_opc, - libcfs_nid2str(exp->exp_connection->c_peer.nid)); - } - - if ((!been_handled && rs->rs_on_net) || nlocks > 0) { - spin_unlock(&rs->rs_lock); - - if (!been_handled && rs->rs_on_net) { - LNetMDUnlink(rs->rs_md_h); - /* Ignore return code; we're racing with completion */ - } - - while (nlocks-- > 0) - ldlm_lock_decref(&rs->rs_locks[nlocks], - rs->rs_modes[nlocks]); - - spin_lock(&rs->rs_lock); - } - - rs->rs_scheduled = 0; - - if (!rs->rs_on_net) { - /* Off the net */ - spin_unlock(&rs->rs_lock); - - class_export_put(exp); - rs->rs_export = NULL; - ptlrpc_rs_decref(rs); - if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) && - svc->srv_is_stopping) - wake_up_all(&svcpt->scp_waitq); - return 1; - } - - /* still on the net; callback will schedule */ - spin_unlock(&rs->rs_lock); - return 1; -} - -static void -ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt) -{ - int avail = svcpt->scp_nrqbds_posted; - int low_water = test_req_buffer_pressure ? 0 : - svcpt->scp_service->srv_nbuf_per_group / 2; - - /* NB I'm not locking; just looking. */ - - /* CAVEAT EMPTOR: We might be allocating buffers here because we've - * allowed the request history to grow out of control. We could put a - * sanity check on that here and cull some history if we need the - * space. - */ - - if (avail <= low_water) - ptlrpc_grow_req_bufs(svcpt, 1); - - if (svcpt->scp_service->srv_stats) { - lprocfs_counter_add(svcpt->scp_service->srv_stats, - PTLRPC_REQBUF_AVAIL_CNTR, avail); - } -} - -static inline int -ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt) -{ - return svcpt->scp_nreqs_active < - svcpt->scp_nthrs_running - 1 - - (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL); -} - -/** - * allowed to create more threads - * user can call it w/o any lock but need to hold - * ptlrpc_service_part::scp_lock to get reliable result - */ -static inline int -ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt) -{ - return svcpt->scp_nthrs_running + - svcpt->scp_nthrs_starting < - svcpt->scp_service->srv_nthrs_cpt_limit; -} - -/** - * too many requests and allowed to create more threads - */ -static inline int -ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt) -{ - return !ptlrpc_threads_enough(svcpt) && - ptlrpc_threads_increasable(svcpt); -} - -static inline int -ptlrpc_thread_stopping(struct ptlrpc_thread *thread) -{ - return thread_is_stopping(thread) || - thread->t_svcpt->scp_service->srv_is_stopping; -} - -static inline int -ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt) -{ - return !list_empty(&svcpt->scp_rqbd_idle) && - svcpt->scp_rqbd_timeout == 0; -} - -static inline int -ptlrpc_at_check(struct ptlrpc_service_part *svcpt) -{ - return svcpt->scp_at_check; -} - -/** - * requests wait on preprocessing - * user can call it w/o any lock but need to hold - * ptlrpc_service_part::scp_lock to get reliable result - */ -static inline int -ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt) -{ - return !list_empty(&svcpt->scp_req_incoming); -} - -/* We perfer lifo queuing, but kernel doesn't provide that yet. */ -#ifndef wait_event_idle_exclusive_lifo -#define wait_event_idle_exclusive_lifo wait_event_idle_exclusive -#define wait_event_idle_exclusive_lifo_timeout wait_event_idle_exclusive_timeout -#endif - -static __attribute__((__noinline__)) int -ptlrpc_wait_event(struct ptlrpc_service_part *svcpt, - struct ptlrpc_thread *thread) -{ - /* Don't exit while there are replies to be handled */ - - /* XXX: Add this back when libcfs watchdog is merged upstream - lc_watchdog_disable(thread->t_watchdog); - */ - - cond_resched(); - - if (svcpt->scp_rqbd_timeout == 0) - wait_event_idle_exclusive_lifo( - svcpt->scp_waitq, - ptlrpc_thread_stopping(thread) || - ptlrpc_server_request_incoming(svcpt) || - ptlrpc_server_request_pending(svcpt, - false) || - ptlrpc_rqbd_pending(svcpt) || - ptlrpc_at_check(svcpt)); - else if (0 == wait_event_idle_exclusive_lifo_timeout( - svcpt->scp_waitq, - ptlrpc_thread_stopping(thread) || - ptlrpc_server_request_incoming(svcpt) || - ptlrpc_server_request_pending(svcpt, - false) || - ptlrpc_rqbd_pending(svcpt) || - ptlrpc_at_check(svcpt), - svcpt->scp_rqbd_timeout)) - svcpt->scp_rqbd_timeout = 0; - - if (ptlrpc_thread_stopping(thread)) - return -EINTR; - - /* - lc_watchdog_touch(thread->t_watchdog, - ptlrpc_server_get_timeout(svcpt)); - */ - return 0; -} - -/** - * Main thread body for service threads. - * Waits in a loop waiting for new requests to process to appear. - * Every time an incoming requests is added to its queue, a waitq - * is woken up and one of the threads will handle it. - */ -static int ptlrpc_main(void *arg) -{ - struct ptlrpc_thread *thread = arg; - struct ptlrpc_service_part *svcpt = thread->t_svcpt; - struct ptlrpc_service *svc = svcpt->scp_service; - struct ptlrpc_reply_state *rs; - struct group_info *ginfo = NULL; - struct lu_env *env; - int counter = 0, rc = 0; - - thread->t_pid = current->pid; - unshare_fs_struct(); - - /* NB: we will call cfs_cpt_bind() for all threads, because we - * might want to run lustre server only on a subset of system CPUs, - * in that case ->scp_cpt is CFS_CPT_ANY - */ - rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt); - if (rc != 0) { - CWARN("%s: failed to bind %s on CPT %d\n", - svc->srv_name, thread->t_name, svcpt->scp_cpt); - } - - ginfo = groups_alloc(0); - if (!ginfo) { - rc = -ENOMEM; - goto out; - } - - set_current_groups(ginfo); - put_group_info(ginfo); - - if (svc->srv_ops.so_thr_init) { - rc = svc->srv_ops.so_thr_init(thread); - if (rc) - goto out; - } - - env = kzalloc(sizeof(*env), GFP_KERNEL); - if (!env) { - rc = -ENOMEM; - goto out_srv_fini; - } - - rc = lu_context_init(&env->le_ctx, - svc->srv_ctx_tags | LCT_REMEMBER | LCT_NOREF); - if (rc) - goto out_srv_fini; - - thread->t_env = env; - env->le_ctx.lc_thread = thread; - env->le_ctx.lc_cookie = 0x6; - - while (!list_empty(&svcpt->scp_rqbd_idle)) { - rc = ptlrpc_server_post_idle_rqbds(svcpt); - if (rc >= 0) - continue; - - CERROR("Failed to post rqbd for %s on CPT %d: %d\n", - svc->srv_name, svcpt->scp_cpt, rc); - goto out_srv_fini; - } - - /* Alloc reply state structure for this one */ - rs = kvzalloc(svc->srv_max_reply_size, GFP_KERNEL); - if (!rs) { - rc = -ENOMEM; - goto out_srv_fini; - } - - spin_lock(&svcpt->scp_lock); - - LASSERT(thread_is_starting(thread)); - thread_clear_flags(thread, SVC_STARTING); - - LASSERT(svcpt->scp_nthrs_starting == 1); - svcpt->scp_nthrs_starting--; - - /* SVC_STOPPING may already be set here if someone else is trying - * to stop the service while this new thread has been dynamically - * forked. We still set SVC_RUNNING to let our creator know that - * we are now running, however we will exit as soon as possible - */ - thread_add_flags(thread, SVC_RUNNING); - svcpt->scp_nthrs_running++; - spin_unlock(&svcpt->scp_lock); - - /* wake up our creator in case he's still waiting. */ - wake_up(&thread->t_ctl_waitq); - - /* - thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt), - NULL, NULL); - */ - - spin_lock(&svcpt->scp_rep_lock); - list_add(&rs->rs_list, &svcpt->scp_rep_idle); - wake_up(&svcpt->scp_rep_waitq); - spin_unlock(&svcpt->scp_rep_lock); - - CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id, - svcpt->scp_nthrs_running); - - /* XXX maintain a list of all managed devices: insert here */ - while (!ptlrpc_thread_stopping(thread)) { - if (ptlrpc_wait_event(svcpt, thread)) - break; - - ptlrpc_check_rqbd_pool(svcpt); - - if (ptlrpc_threads_need_create(svcpt)) { - /* Ignore return code - we tried... */ - ptlrpc_start_thread(svcpt, 0); - } - - /* Process all incoming reqs before handling any */ - if (ptlrpc_server_request_incoming(svcpt)) { - lu_context_enter(&env->le_ctx); - env->le_ses = NULL; - ptlrpc_server_handle_req_in(svcpt, thread); - lu_context_exit(&env->le_ctx); - - /* but limit ourselves in case of flood */ - if (counter++ < 100) - continue; - counter = 0; - } - - if (ptlrpc_at_check(svcpt)) - ptlrpc_at_check_timed(svcpt); - - if (ptlrpc_server_request_pending(svcpt, false)) { - lu_context_enter(&env->le_ctx); - ptlrpc_server_handle_request(svcpt, thread); - lu_context_exit(&env->le_ctx); - } - - if (ptlrpc_rqbd_pending(svcpt) && - ptlrpc_server_post_idle_rqbds(svcpt) < 0) { - /* I just failed to repost request buffers. - * Wait for a timeout (unless something else - * happens) before I try again - */ - svcpt->scp_rqbd_timeout = HZ / 10; - CDEBUG(D_RPCTRACE, "Posted buffers: %d\n", - svcpt->scp_nrqbds_posted); - } - } - - /* - lc_watchdog_delete(thread->t_watchdog); - thread->t_watchdog = NULL; - */ - -out_srv_fini: - /* - * deconstruct service specific state created by ptlrpc_start_thread() - */ - if (svc->srv_ops.so_thr_done) - svc->srv_ops.so_thr_done(thread); - - if (env) { - lu_context_fini(&env->le_ctx); - kfree(env); - } -out: - CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n", - thread, thread->t_pid, thread->t_id, rc); - - spin_lock(&svcpt->scp_lock); - if (thread_test_and_clear_flags(thread, SVC_STARTING)) - svcpt->scp_nthrs_starting--; - - if (thread_test_and_clear_flags(thread, SVC_RUNNING)) { - /* must know immediately */ - svcpt->scp_nthrs_running--; - } - - thread->t_id = rc; - thread_add_flags(thread, SVC_STOPPED); - - wake_up(&thread->t_ctl_waitq); - spin_unlock(&svcpt->scp_lock); - - return rc; -} - -static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt, - struct list_head *replies) -{ - int result; - - spin_lock(&hrt->hrt_lock); - - list_splice_init(&hrt->hrt_queue, replies); - result = ptlrpc_hr.hr_stopping || !list_empty(replies); - - spin_unlock(&hrt->hrt_lock); - return result; -} - -/** - * Main body of "handle reply" function. - * It processes acked reply states - */ -static int ptlrpc_hr_main(void *arg) -{ - struct ptlrpc_hr_thread *hrt = arg; - struct ptlrpc_hr_partition *hrp = hrt->hrt_partition; - LIST_HEAD(replies); - char threadname[20]; - int rc; - - snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d", - hrp->hrp_cpt, hrt->hrt_id); - unshare_fs_struct(); - - rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt); - if (rc != 0) { - CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n", - threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc); - } - - atomic_inc(&hrp->hrp_nstarted); - wake_up(&ptlrpc_hr.hr_waitq); - - while (!ptlrpc_hr.hr_stopping) { - wait_event_idle(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies)); - - while (!list_empty(&replies)) { - struct ptlrpc_reply_state *rs; - - rs = list_entry(replies.prev, struct ptlrpc_reply_state, - rs_list); - list_del_init(&rs->rs_list); - ptlrpc_handle_rs(rs); - } - } - - atomic_inc(&hrp->hrp_nstopped); - wake_up(&ptlrpc_hr.hr_waitq); - - return 0; -} - -static void ptlrpc_stop_hr_threads(void) -{ - struct ptlrpc_hr_partition *hrp; - int i; - int j; - - ptlrpc_hr.hr_stopping = 1; - - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (!hrp->hrp_thrs) - continue; /* uninitialized */ - for (j = 0; j < hrp->hrp_nthrs; j++) - wake_up_all(&hrp->hrp_thrs[j].hrt_waitq); - } - - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (!hrp->hrp_thrs) - continue; /* uninitialized */ - wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstopped) == - atomic_read(&hrp->hrp_nstarted)); - } -} - -static int ptlrpc_start_hr_threads(void) -{ - struct ptlrpc_hr_partition *hrp; - int i; - int j; - - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - int rc = 0; - - for (j = 0; j < hrp->hrp_nthrs; j++) { - struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j]; - struct task_struct *task; - - task = kthread_run(ptlrpc_hr_main, - &hrp->hrp_thrs[j], - "ptlrpc_hr%02d_%03d", - hrp->hrp_cpt, hrt->hrt_id); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - break; - } - } - wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstarted) == j); - - if (rc < 0) { - CERROR("cannot start reply handler thread %d:%d: rc = %d\n", - i, j, rc); - ptlrpc_stop_hr_threads(); - return rc; - } - } - return 0; -} - -static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_thread *thread; - LIST_HEAD(zombie); - - CDEBUG(D_INFO, "Stopping threads for service %s\n", - svcpt->scp_service->srv_name); - - spin_lock(&svcpt->scp_lock); - /* let the thread know that we would like it to stop asap */ - list_for_each_entry(thread, &svcpt->scp_threads, t_link) { - CDEBUG(D_INFO, "Stopping thread %s #%u\n", - svcpt->scp_service->srv_thread_name, thread->t_id); - thread_add_flags(thread, SVC_STOPPING); - } - - wake_up_all(&svcpt->scp_waitq); - - while (!list_empty(&svcpt->scp_threads)) { - thread = list_entry(svcpt->scp_threads.next, - struct ptlrpc_thread, t_link); - if (thread_is_stopped(thread)) { - list_del(&thread->t_link); - list_add(&thread->t_link, &zombie); - continue; - } - spin_unlock(&svcpt->scp_lock); - - CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n", - svcpt->scp_service->srv_thread_name, thread->t_id); - wait_event_idle(thread->t_ctl_waitq, - thread_is_stopped(thread)); - - spin_lock(&svcpt->scp_lock); - } - - spin_unlock(&svcpt->scp_lock); - - while (!list_empty(&zombie)) { - thread = list_entry(zombie.next, - struct ptlrpc_thread, t_link); - list_del(&thread->t_link); - kfree(thread); - } -} - -/** - * Stops all threads of a particular service \a svc - */ -static void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - int i; - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service) - ptlrpc_svcpt_stop_threads(svcpt); - } -} - -int ptlrpc_start_threads(struct ptlrpc_service *svc) -{ - int rc = 0; - int i; - int j; - - /* We require 2 threads min, see note in ptlrpc_server_handle_request */ - LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT); - - for (i = 0; i < svc->srv_ncpts; i++) { - for (j = 0; j < svc->srv_nthrs_cpt_init; j++) { - rc = ptlrpc_start_thread(svc->srv_parts[i], 1); - if (rc == 0) - continue; - - if (rc != -EMFILE) - goto failed; - /* We have enough threads, don't start more. b=15759 */ - break; - } - } - - return 0; - failed: - CERROR("cannot start %s thread #%d_%d: rc %d\n", - svc->srv_thread_name, i, j, rc); - ptlrpc_stop_all_threads(svc); - return rc; -} - -int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) -{ - struct ptlrpc_thread *thread; - struct ptlrpc_service *svc; - struct task_struct *task; - int rc; - - svc = svcpt->scp_service; - - CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n", - svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running, - svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit); - - again: - if (unlikely(svc->srv_is_stopping)) - return -ESRCH; - - if (!ptlrpc_threads_increasable(svcpt) || - (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) && - svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1)) - return -EMFILE; - - thread = kzalloc_node(sizeof(*thread), GFP_NOFS, - cfs_cpt_spread_node(svc->srv_cptable, - svcpt->scp_cpt)); - if (!thread) - return -ENOMEM; - init_waitqueue_head(&thread->t_ctl_waitq); - - spin_lock(&svcpt->scp_lock); - if (!ptlrpc_threads_increasable(svcpt)) { - spin_unlock(&svcpt->scp_lock); - kfree(thread); - return -EMFILE; - } - - if (svcpt->scp_nthrs_starting != 0) { - /* serialize starting because some modules (obdfilter) - * might require unique and contiguous t_id - */ - LASSERT(svcpt->scp_nthrs_starting == 1); - spin_unlock(&svcpt->scp_lock); - kfree(thread); - if (wait) { - CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n", - svc->srv_thread_name, svcpt->scp_thr_nextid); - schedule(); - goto again; - } - - CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n", - svc->srv_thread_name, svcpt->scp_thr_nextid); - return -EAGAIN; - } - - svcpt->scp_nthrs_starting++; - thread->t_id = svcpt->scp_thr_nextid++; - thread_add_flags(thread, SVC_STARTING); - thread->t_svcpt = svcpt; - - list_add(&thread->t_link, &svcpt->scp_threads); - spin_unlock(&svcpt->scp_lock); - - if (svcpt->scp_cpt >= 0) { - snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d", - svc->srv_thread_name, svcpt->scp_cpt, thread->t_id); - } else { - snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d", - svc->srv_thread_name, thread->t_id); - } - - CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); - task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name); - if (IS_ERR(task)) { - rc = PTR_ERR(task); - CERROR("cannot start thread '%s': rc = %d\n", - thread->t_name, rc); - spin_lock(&svcpt->scp_lock); - --svcpt->scp_nthrs_starting; - if (thread_is_stopping(thread)) { - /* this ptlrpc_thread is being handled - * by ptlrpc_svcpt_stop_threads now - */ - thread_add_flags(thread, SVC_STOPPED); - wake_up(&thread->t_ctl_waitq); - spin_unlock(&svcpt->scp_lock); - } else { - list_del(&thread->t_link); - spin_unlock(&svcpt->scp_lock); - kfree(thread); - } - return rc; - } - - if (!wait) - return 0; - - wait_event_idle(thread->t_ctl_waitq, - thread_is_running(thread) || thread_is_stopped(thread)); - - rc = thread_is_stopped(thread) ? thread->t_id : 0; - return rc; -} - -int ptlrpc_hr_init(void) -{ - struct ptlrpc_hr_partition *hrp; - struct ptlrpc_hr_thread *hrt; - int rc; - int i; - int j; - int weight; - - memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr)); - ptlrpc_hr.hr_cpt_table = cfs_cpt_tab; - - ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table, - sizeof(*hrp)); - if (!ptlrpc_hr.hr_partitions) - return -ENOMEM; - - init_waitqueue_head(&ptlrpc_hr.hr_waitq); - - weight = cpumask_weight(topology_sibling_cpumask(0)); - - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - hrp->hrp_cpt = i; - - atomic_set(&hrp->hrp_nstarted, 0); - atomic_set(&hrp->hrp_nstopped, 0); - - hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i); - hrp->hrp_nthrs /= weight; - if (hrp->hrp_nthrs == 0) - hrp->hrp_nthrs = 1; - - hrp->hrp_thrs = - kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS, - cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table, - i)); - if (!hrp->hrp_thrs) { - rc = -ENOMEM; - goto out; - } - - for (j = 0; j < hrp->hrp_nthrs; j++) { - hrt = &hrp->hrp_thrs[j]; - - hrt->hrt_id = j; - hrt->hrt_partition = hrp; - init_waitqueue_head(&hrt->hrt_waitq); - spin_lock_init(&hrt->hrt_lock); - INIT_LIST_HEAD(&hrt->hrt_queue); - } - } - - rc = ptlrpc_start_hr_threads(); -out: - if (rc != 0) - ptlrpc_hr_fini(); - return rc; -} - -void ptlrpc_hr_fini(void) -{ - struct ptlrpc_hr_partition *hrp; - int i; - - if (!ptlrpc_hr.hr_partitions) - return; - - ptlrpc_stop_hr_threads(); - - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - kfree(hrp->hrp_thrs); - } - - cfs_percpt_free(ptlrpc_hr.hr_partitions); - ptlrpc_hr.hr_partitions = NULL; -} - -/** - * Wait until all already scheduled replies are processed. - */ -static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt) -{ - while (1) { - int rc; - - rc = wait_event_idle_timeout( - svcpt->scp_waitq, - atomic_read(&svcpt->scp_nreps_difficult) == 0, - 10 * HZ); - if (rc > 0) - break; - CWARN("Unexpectedly long timeout %s %p\n", - svcpt->scp_service->srv_name, svcpt->scp_service); - } -} - -static void -ptlrpc_service_del_atimer(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - int i; - - /* early disarm AT timer... */ - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service) - del_timer(&svcpt->scp_at_timer); - } -} - -static void -ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - struct ptlrpc_request_buffer_desc *rqbd; - int cnt; - int rc; - int i; - - /* All history will be culled when the next request buffer is - * freed in ptlrpc_service_purge_all() - */ - svc->srv_hist_nrqbds_cpt_max = 0; - - rc = LNetClearLazyPortal(svc->srv_req_portal); - LASSERT(rc == 0); - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (!svcpt->scp_service) - break; - - /* Unlink all the request buffers. This forces a 'final' - * event with its 'unlink' flag set for each posted rqbd - */ - list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted, - rqbd_list) { - rc = LNetMDUnlink(rqbd->rqbd_md_h); - LASSERT(rc == 0 || rc == -ENOENT); - } - } - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (!svcpt->scp_service) - break; - - /* Wait for the network to release any buffers - * it's currently filling - */ - spin_lock(&svcpt->scp_lock); - while (svcpt->scp_nrqbds_posted != 0) { - spin_unlock(&svcpt->scp_lock); - /* Network access will complete in finite time but - * the HUGE timeout lets us CWARN for visibility - * of sluggish LNDs - */ - cnt = 0; - while (cnt < LONG_UNLINK && - (rc = wait_event_idle_timeout(svcpt->scp_waitq, - svcpt->scp_nrqbds_posted == 0, - HZ)) == 0) - cnt++; - if (rc == 0) { - CWARN("Service %s waiting for request buffers\n", - svcpt->scp_service->srv_name); - } - spin_lock(&svcpt->scp_lock); - } - spin_unlock(&svcpt->scp_lock); - } -} - -static void -ptlrpc_service_purge_all(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - struct ptlrpc_request_buffer_desc *rqbd; - struct ptlrpc_request *req; - struct ptlrpc_reply_state *rs; - int i; - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (!svcpt->scp_service) - break; - - spin_lock(&svcpt->scp_rep_lock); - while (!list_empty(&svcpt->scp_rep_active)) { - rs = list_entry(svcpt->scp_rep_active.next, - struct ptlrpc_reply_state, rs_list); - spin_lock(&rs->rs_lock); - ptlrpc_schedule_difficult_reply(rs); - spin_unlock(&rs->rs_lock); - } - spin_unlock(&svcpt->scp_rep_lock); - - /* purge the request queue. NB No new replies (rqbds - * all unlinked) and no service threads, so I'm the only - * thread noodling the request queue now - */ - while (!list_empty(&svcpt->scp_req_incoming)) { - req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); - - list_del(&req->rq_list); - svcpt->scp_nreqs_incoming--; - ptlrpc_server_finish_request(svcpt, req); - } - - while (ptlrpc_server_request_pending(svcpt, true)) { - req = ptlrpc_server_request_get(svcpt, true); - ptlrpc_server_finish_active_request(svcpt, req); - } - - LASSERT(list_empty(&svcpt->scp_rqbd_posted)); - LASSERT(svcpt->scp_nreqs_incoming == 0); - LASSERT(svcpt->scp_nreqs_active == 0); - /* history should have been culled by - * ptlrpc_server_finish_request - */ - LASSERT(svcpt->scp_hist_nrqbds == 0); - - /* Now free all the request buffers since nothing - * references them any more... - */ - - while (!list_empty(&svcpt->scp_rqbd_idle)) { - rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); - ptlrpc_free_rqbd(rqbd); - } - ptlrpc_wait_replies(svcpt); - - while (!list_empty(&svcpt->scp_rep_idle)) { - rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, - rs_list); - list_del(&rs->rs_list); - kvfree(rs); - } - } -} - -static void -ptlrpc_service_free(struct ptlrpc_service *svc) -{ - struct ptlrpc_service_part *svcpt; - struct ptlrpc_at_array *array; - int i; - - ptlrpc_service_for_each_part(svcpt, i, svc) { - if (!svcpt->scp_service) - break; - - /* In case somebody rearmed this in the meantime */ - del_timer(&svcpt->scp_at_timer); - array = &svcpt->scp_at_array; - - kfree(array->paa_reqs_array); - array->paa_reqs_array = NULL; - kfree(array->paa_reqs_count); - array->paa_reqs_count = NULL; - } - - ptlrpc_service_for_each_part(svcpt, i, svc) - kfree(svcpt); - - if (svc->srv_cpts) - cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts); - - kfree(svc); -} - -int ptlrpc_unregister_service(struct ptlrpc_service *service) -{ - CDEBUG(D_NET, "%s: tearing down\n", service->srv_name); - - service->srv_is_stopping = 1; - - mutex_lock(&ptlrpc_all_services_mutex); - list_del_init(&service->srv_list); - mutex_unlock(&ptlrpc_all_services_mutex); - - ptlrpc_service_del_atimer(service); - ptlrpc_stop_all_threads(service); - - ptlrpc_service_unlink_rqbd(service); - ptlrpc_service_purge_all(service); - ptlrpc_service_nrs_cleanup(service); - - ptlrpc_lprocfs_unregister_service(service); - ptlrpc_sysfs_unregister_service(service); - - ptlrpc_service_free(service); - - return 0; -} -EXPORT_SYMBOL(ptlrpc_unregister_service); diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c deleted file mode 100644 index f9394c3e1ee2..000000000000 --- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c +++ /dev/null @@ -1,4210 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_RPC - -#include -#include - -#include -#include -#include -#include -#include "ptlrpc_internal.h" - -void lustre_assert_wire_constants(void) -{ - /* Wire protocol assertions generated by 'wirecheck' - * (make -C lustre/utils newwiretest) - * running on Linux centos6-bis 2.6.32-358.0.1.el6-head - * #3 SMP Wed Apr 17 17:37:43 CEST 2013 - * with gcc version 4.4.6 20110731 (Red Hat 4.4.6-3) (GCC) - */ - - /* Constants... */ - LASSERTF(PTL_RPC_MSG_REQUEST == 4711, "found %lld\n", - (long long)PTL_RPC_MSG_REQUEST); - LASSERTF(PTL_RPC_MSG_ERR == 4712, "found %lld\n", - (long long)PTL_RPC_MSG_ERR); - LASSERTF(PTL_RPC_MSG_REPLY == 4713, "found %lld\n", - (long long)PTL_RPC_MSG_REPLY); - LASSERTF(MDS_DIR_END_OFF == 0xfffffffffffffffeULL, "found 0x%.16llxULL\n", - MDS_DIR_END_OFF); - LASSERTF(DEAD_HANDLE_MAGIC == 0xdeadbeefcafebabeULL, "found 0x%.16llxULL\n", - DEAD_HANDLE_MAGIC); - BUILD_BUG_ON(MTI_NAME_MAXLEN != 64); - LASSERTF(OST_REPLY == 0, "found %lld\n", - (long long)OST_REPLY); - LASSERTF(OST_GETATTR == 1, "found %lld\n", - (long long)OST_GETATTR); - LASSERTF(OST_SETATTR == 2, "found %lld\n", - (long long)OST_SETATTR); - LASSERTF(OST_READ == 3, "found %lld\n", - (long long)OST_READ); - LASSERTF(OST_WRITE == 4, "found %lld\n", - (long long)OST_WRITE); - LASSERTF(OST_CREATE == 5, "found %lld\n", - (long long)OST_CREATE); - LASSERTF(OST_DESTROY == 6, "found %lld\n", - (long long)OST_DESTROY); - LASSERTF(OST_GET_INFO == 7, "found %lld\n", - (long long)OST_GET_INFO); - LASSERTF(OST_CONNECT == 8, "found %lld\n", - (long long)OST_CONNECT); - LASSERTF(OST_DISCONNECT == 9, "found %lld\n", - (long long)OST_DISCONNECT); - LASSERTF(OST_PUNCH == 10, "found %lld\n", - (long long)OST_PUNCH); - LASSERTF(OST_OPEN == 11, "found %lld\n", - (long long)OST_OPEN); - LASSERTF(OST_CLOSE == 12, "found %lld\n", - (long long)OST_CLOSE); - LASSERTF(OST_STATFS == 13, "found %lld\n", - (long long)OST_STATFS); - LASSERTF(OST_SYNC == 16, "found %lld\n", - (long long)OST_SYNC); - LASSERTF(OST_SET_INFO == 17, "found %lld\n", - (long long)OST_SET_INFO); - LASSERTF(OST_QUOTACHECK == 18, "found %lld\n", - (long long)OST_QUOTACHECK); - LASSERTF(OST_QUOTACTL == 19, "found %lld\n", - (long long)OST_QUOTACTL); - LASSERTF(OST_QUOTA_ADJUST_QUNIT == 20, "found %lld\n", - (long long)OST_QUOTA_ADJUST_QUNIT); - LASSERTF(OST_LAST_OPC == 21, "found %lld\n", - (long long)OST_LAST_OPC); - LASSERTF(OBD_OBJECT_EOF == 0xffffffffffffffffULL, "found 0x%.16llxULL\n", - OBD_OBJECT_EOF); - LASSERTF(OST_MIN_PRECREATE == 32, "found %lld\n", - (long long)OST_MIN_PRECREATE); - LASSERTF(OST_MAX_PRECREATE == 20000, "found %lld\n", - (long long)OST_MAX_PRECREATE); - LASSERTF(OST_LVB_ERR_INIT == 0xffbadbad80000000ULL, "found 0x%.16llxULL\n", - OST_LVB_ERR_INIT); - LASSERTF(OST_LVB_ERR_MASK == 0xffbadbad00000000ULL, "found 0x%.16llxULL\n", - OST_LVB_ERR_MASK); - LASSERTF(MDS_FIRST_OPC == 33, "found %lld\n", - (long long)MDS_FIRST_OPC); - LASSERTF(MDS_GETATTR == 33, "found %lld\n", - (long long)MDS_GETATTR); - LASSERTF(MDS_GETATTR_NAME == 34, "found %lld\n", - (long long)MDS_GETATTR_NAME); - LASSERTF(MDS_CLOSE == 35, "found %lld\n", - (long long)MDS_CLOSE); - LASSERTF(MDS_REINT == 36, "found %lld\n", - (long long)MDS_REINT); - LASSERTF(MDS_READPAGE == 37, "found %lld\n", - (long long)MDS_READPAGE); - LASSERTF(MDS_CONNECT == 38, "found %lld\n", - (long long)MDS_CONNECT); - LASSERTF(MDS_DISCONNECT == 39, "found %lld\n", - (long long)MDS_DISCONNECT); - LASSERTF(MDS_GETSTATUS == 40, "found %lld\n", - (long long)MDS_GETSTATUS); - LASSERTF(MDS_STATFS == 41, "found %lld\n", - (long long)MDS_STATFS); - LASSERTF(MDS_PIN == 42, "found %lld\n", - (long long)MDS_PIN); - LASSERTF(MDS_UNPIN == 43, "found %lld\n", - (long long)MDS_UNPIN); - LASSERTF(MDS_SYNC == 44, "found %lld\n", - (long long)MDS_SYNC); - LASSERTF(MDS_DONE_WRITING == 45, "found %lld\n", - (long long)MDS_DONE_WRITING); - LASSERTF(MDS_SET_INFO == 46, "found %lld\n", - (long long)MDS_SET_INFO); - LASSERTF(MDS_QUOTACHECK == 47, "found %lld\n", - (long long)MDS_QUOTACHECK); - LASSERTF(MDS_QUOTACTL == 48, "found %lld\n", - (long long)MDS_QUOTACTL); - LASSERTF(MDS_GETXATTR == 49, "found %lld\n", - (long long)MDS_GETXATTR); - LASSERTF(MDS_SETXATTR == 50, "found %lld\n", - (long long)MDS_SETXATTR); - LASSERTF(MDS_WRITEPAGE == 51, "found %lld\n", - (long long)MDS_WRITEPAGE); - LASSERTF(MDS_IS_SUBDIR == 52, "found %lld\n", - (long long)MDS_IS_SUBDIR); - LASSERTF(MDS_GET_INFO == 53, "found %lld\n", - (long long)MDS_GET_INFO); - LASSERTF(MDS_HSM_STATE_GET == 54, "found %lld\n", - (long long)MDS_HSM_STATE_GET); - LASSERTF(MDS_HSM_STATE_SET == 55, "found %lld\n", - (long long)MDS_HSM_STATE_SET); - LASSERTF(MDS_HSM_ACTION == 56, "found %lld\n", - (long long)MDS_HSM_ACTION); - LASSERTF(MDS_HSM_PROGRESS == 57, "found %lld\n", - (long long)MDS_HSM_PROGRESS); - LASSERTF(MDS_HSM_REQUEST == 58, "found %lld\n", - (long long)MDS_HSM_REQUEST); - LASSERTF(MDS_HSM_CT_REGISTER == 59, "found %lld\n", - (long long)MDS_HSM_CT_REGISTER); - LASSERTF(MDS_HSM_CT_UNREGISTER == 60, "found %lld\n", - (long long)MDS_HSM_CT_UNREGISTER); - LASSERTF(MDS_SWAP_LAYOUTS == 61, "found %lld\n", - (long long)MDS_SWAP_LAYOUTS); - LASSERTF(MDS_LAST_OPC == 62, "found %lld\n", - (long long)MDS_LAST_OPC); - LASSERTF(REINT_SETATTR == 1, "found %lld\n", - (long long)REINT_SETATTR); - LASSERTF(REINT_CREATE == 2, "found %lld\n", - (long long)REINT_CREATE); - LASSERTF(REINT_LINK == 3, "found %lld\n", - (long long)REINT_LINK); - LASSERTF(REINT_UNLINK == 4, "found %lld\n", - (long long)REINT_UNLINK); - LASSERTF(REINT_RENAME == 5, "found %lld\n", - (long long)REINT_RENAME); - LASSERTF(REINT_OPEN == 6, "found %lld\n", - (long long)REINT_OPEN); - LASSERTF(REINT_SETXATTR == 7, "found %lld\n", - (long long)REINT_SETXATTR); - LASSERTF(REINT_RMENTRY == 8, "found %lld\n", - (long long)REINT_RMENTRY); - LASSERTF(REINT_MIGRATE == 9, "found %lld\n", - (long long)REINT_MIGRATE); - LASSERTF(REINT_MAX == 10, "found %lld\n", - (long long)REINT_MAX); - LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)DISP_IT_EXECD); - LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)DISP_LOOKUP_EXECD); - LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned int)DISP_LOOKUP_NEG); - LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n", - (unsigned int)DISP_LOOKUP_POS); - LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned int)DISP_OPEN_CREATE); - LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned int)DISP_OPEN_OPEN); - LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n", - (unsigned int)DISP_ENQ_COMPLETE); - LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n", - (unsigned int)DISP_ENQ_OPEN_REF); - LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n", - (unsigned int)DISP_ENQ_CREATE_REF); - LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n", - (unsigned int)DISP_OPEN_LOCK); - LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n", - (long long)MDS_STATUS_CONN); - LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n", - (long long)MDS_STATUS_LOV); - LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_MODE); - LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_UID); - LASSERTF(MDS_ATTR_GID == 0x0000000000000004ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_GID); - LASSERTF(MDS_ATTR_SIZE == 0x0000000000000008ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_SIZE); - LASSERTF(MDS_ATTR_ATIME == 0x0000000000000010ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_ATIME); - LASSERTF(MDS_ATTR_MTIME == 0x0000000000000020ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_MTIME); - LASSERTF(MDS_ATTR_CTIME == 0x0000000000000040ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_CTIME); - LASSERTF(MDS_ATTR_ATIME_SET == 0x0000000000000080ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_ATIME_SET); - LASSERTF(MDS_ATTR_MTIME_SET == 0x0000000000000100ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_MTIME_SET); - LASSERTF(MDS_ATTR_FORCE == 0x0000000000000200ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_FORCE); - LASSERTF(MDS_ATTR_ATTR_FLAG == 0x0000000000000400ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_ATTR_FLAG); - LASSERTF(MDS_ATTR_KILL_SUID == 0x0000000000000800ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_KILL_SUID); - LASSERTF(MDS_ATTR_KILL_SGID == 0x0000000000001000ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_KILL_SGID); - LASSERTF(MDS_ATTR_CTIME_SET == 0x0000000000002000ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_CTIME_SET); - LASSERTF(MDS_ATTR_FROM_OPEN == 0x0000000000004000ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_FROM_OPEN); - LASSERTF(MDS_ATTR_BLOCKS == 0x0000000000008000ULL, "found 0x%.16llxULL\n", - (long long)MDS_ATTR_BLOCKS); - LASSERTF(FLD_QUERY == 900, "found %lld\n", - (long long)FLD_QUERY); - LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n", - (long long)FLD_FIRST_OPC); - LASSERTF(FLD_READ == 901, "found %lld\n", - (long long)FLD_READ); - LASSERTF(FLD_LAST_OPC == 902, "found %lld\n", - (long long)FLD_LAST_OPC); - LASSERTF(SEQ_QUERY == 700, "found %lld\n", - (long long)SEQ_QUERY); - LASSERTF(SEQ_FIRST_OPC == 700, "found %lld\n", - (long long)SEQ_FIRST_OPC); - LASSERTF(SEQ_LAST_OPC == 701, "found %lld\n", - (long long)SEQ_LAST_OPC); - LASSERTF(SEQ_ALLOC_SUPER == 0, "found %lld\n", - (long long)SEQ_ALLOC_SUPER); - LASSERTF(SEQ_ALLOC_META == 1, "found %lld\n", - (long long)SEQ_ALLOC_META); - LASSERTF(LDLM_ENQUEUE == 101, "found %lld\n", - (long long)LDLM_ENQUEUE); - LASSERTF(LDLM_CONVERT == 102, "found %lld\n", - (long long)LDLM_CONVERT); - LASSERTF(LDLM_CANCEL == 103, "found %lld\n", - (long long)LDLM_CANCEL); - LASSERTF(LDLM_BL_CALLBACK == 104, "found %lld\n", - (long long)LDLM_BL_CALLBACK); - LASSERTF(LDLM_CP_CALLBACK == 105, "found %lld\n", - (long long)LDLM_CP_CALLBACK); - LASSERTF(LDLM_GL_CALLBACK == 106, "found %lld\n", - (long long)LDLM_GL_CALLBACK); - LASSERTF(LDLM_SET_INFO == 107, "found %lld\n", - (long long)LDLM_SET_INFO); - LASSERTF(LDLM_LAST_OPC == 108, "found %lld\n", - (long long)LDLM_LAST_OPC); - LASSERTF(LCK_MINMODE == 0, "found %lld\n", - (long long)LCK_MINMODE); - LASSERTF(LCK_EX == 1, "found %lld\n", - (long long)LCK_EX); - LASSERTF(LCK_PW == 2, "found %lld\n", - (long long)LCK_PW); - LASSERTF(LCK_PR == 4, "found %lld\n", - (long long)LCK_PR); - LASSERTF(LCK_CW == 8, "found %lld\n", - (long long)LCK_CW); - LASSERTF(LCK_CR == 16, "found %lld\n", - (long long)LCK_CR); - LASSERTF(LCK_NL == 32, "found %lld\n", - (long long)LCK_NL); - LASSERTF(LCK_GROUP == 64, "found %lld\n", - (long long)LCK_GROUP); - LASSERTF(LCK_COS == 128, "found %lld\n", - (long long)LCK_COS); - LASSERTF(LCK_MAXMODE == 129, "found %lld\n", - (long long)LCK_MAXMODE); - LASSERTF(LCK_MODE_NUM == 8, "found %lld\n", - (long long)LCK_MODE_NUM); - BUILD_BUG_ON(LDLM_PLAIN != 10); - BUILD_BUG_ON(LDLM_EXTENT != 11); - BUILD_BUG_ON(LDLM_FLOCK != 12); - BUILD_BUG_ON(LDLM_IBITS != 13); - BUILD_BUG_ON(LDLM_MAX_TYPE != 14); - BUILD_BUG_ON(LUSTRE_RES_ID_SEQ_OFF != 0); - BUILD_BUG_ON(LUSTRE_RES_ID_VER_OID_OFF != 1); - BUILD_BUG_ON(LUSTRE_RES_ID_QUOTA_SEQ_OFF != 2); - BUILD_BUG_ON(LUSTRE_RES_ID_QUOTA_VER_OID_OFF != 3); - BUILD_BUG_ON(LUSTRE_RES_ID_HSH_OFF != 3); - LASSERTF(OBD_PING == 400, "found %lld\n", - (long long)OBD_PING); - LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n", - (long long)OBD_LOG_CANCEL); - LASSERTF(OBD_QC_CALLBACK == 402, "found %lld\n", - (long long)OBD_QC_CALLBACK); - LASSERTF(OBD_IDX_READ == 403, "found %lld\n", - (long long)OBD_IDX_READ); - LASSERTF(OBD_LAST_OPC == 404, "found %lld\n", - (long long)OBD_LAST_OPC); - LASSERTF(QUOTA_DQACQ == 601, "found %lld\n", - (long long)QUOTA_DQACQ); - LASSERTF(QUOTA_DQREL == 602, "found %lld\n", - (long long)QUOTA_DQREL); - LASSERTF(QUOTA_LAST_OPC == 603, "found %lld\n", - (long long)QUOTA_LAST_OPC); - LASSERTF(MGS_CONNECT == 250, "found %lld\n", - (long long)MGS_CONNECT); - LASSERTF(MGS_DISCONNECT == 251, "found %lld\n", - (long long)MGS_DISCONNECT); - LASSERTF(MGS_EXCEPTION == 252, "found %lld\n", - (long long)MGS_EXCEPTION); - LASSERTF(MGS_TARGET_REG == 253, "found %lld\n", - (long long)MGS_TARGET_REG); - LASSERTF(MGS_TARGET_DEL == 254, "found %lld\n", - (long long)MGS_TARGET_DEL); - LASSERTF(MGS_SET_INFO == 255, "found %lld\n", - (long long)MGS_SET_INFO); - LASSERTF(MGS_LAST_OPC == 257, "found %lld\n", - (long long)MGS_LAST_OPC); - LASSERTF(SEC_CTX_INIT == 801, "found %lld\n", - (long long)SEC_CTX_INIT); - LASSERTF(SEC_CTX_INIT_CONT == 802, "found %lld\n", - (long long)SEC_CTX_INIT_CONT); - LASSERTF(SEC_CTX_FINI == 803, "found %lld\n", - (long long)SEC_CTX_FINI); - LASSERTF(SEC_LAST_OPC == 804, "found %lld\n", - (long long)SEC_LAST_OPC); - /* Sizes and Offsets */ - - /* Checks for struct obd_uuid */ - LASSERTF((int)sizeof(struct obd_uuid) == 40, "found %lld\n", - (long long)(int)sizeof(struct obd_uuid)); - - /* Checks for struct lu_seq_range */ - LASSERTF((int)sizeof(struct lu_seq_range) == 24, "found %lld\n", - (long long)(int)sizeof(struct lu_seq_range)); - LASSERTF((int)offsetof(struct lu_seq_range, lsr_start) == 0, "found %lld\n", - (long long)(int)offsetof(struct lu_seq_range, lsr_start)); - LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_start)); - LASSERTF((int)offsetof(struct lu_seq_range, lsr_end) == 8, "found %lld\n", - (long long)(int)offsetof(struct lu_seq_range, lsr_end)); - LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_end)); - LASSERTF((int)offsetof(struct lu_seq_range, lsr_index) == 16, "found %lld\n", - (long long)(int)offsetof(struct lu_seq_range, lsr_index)); - LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_index)); - LASSERTF((int)offsetof(struct lu_seq_range, lsr_flags) == 20, "found %lld\n", - (long long)(int)offsetof(struct lu_seq_range, lsr_flags)); - LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_flags)); - LASSERTF(LU_SEQ_RANGE_MDT == 0, "found %lld\n", - (long long)LU_SEQ_RANGE_MDT); - LASSERTF(LU_SEQ_RANGE_OST == 1, "found %lld\n", - (long long)LU_SEQ_RANGE_OST); - - /* Checks for struct lustre_mdt_attrs */ - LASSERTF((int)sizeof(struct lustre_mdt_attrs) == 24, "found %lld\n", - (long long)(int)sizeof(struct lustre_mdt_attrs)); - LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_compat) == 0, "found %lld\n", - (long long)(int)offsetof(struct lustre_mdt_attrs, lma_compat)); - LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat)); - LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_incompat) == 4, "found %lld\n", - (long long)(int)offsetof(struct lustre_mdt_attrs, lma_incompat)); - LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat)); - LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_self_fid) == 8, "found %lld\n", - (long long)(int)offsetof(struct lustre_mdt_attrs, lma_self_fid)); - LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid)); - LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)LMAI_RELEASED); - LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)LMAC_HSM); - LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned int)LMAC_NOT_IN_OI); - LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n", - (unsigned int)LMAC_FID_ON_OST); - - /* Checks for struct ost_id */ - LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n", - (long long)(int)sizeof(struct ost_id)); - LASSERTF((int)offsetof(struct ost_id, oi) == 0, "found %lld\n", - (long long)(int)offsetof(struct ost_id, oi)); - LASSERTF((int)sizeof(((struct ost_id *)0)->oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct ost_id *)0)->oi)); - LASSERTF(LUSTRE_FID_INIT_OID == 1, "found %lld\n", - (long long)LUSTRE_FID_INIT_OID); - LASSERTF(FID_SEQ_OST_MDT0 == 0, "found %lld\n", - (long long)FID_SEQ_OST_MDT0); - LASSERTF(FID_SEQ_LLOG == 1, "found %lld\n", - (long long)FID_SEQ_LLOG); - LASSERTF(FID_SEQ_ECHO == 2, "found %lld\n", - (long long)FID_SEQ_ECHO); - LASSERTF(FID_SEQ_OST_MDT1 == 3, "found %lld\n", - (long long)FID_SEQ_OST_MDT1); - LASSERTF(FID_SEQ_OST_MAX == 9, "found %lld\n", - (long long)FID_SEQ_OST_MAX); - LASSERTF(FID_SEQ_RSVD == 11, "found %lld\n", - (long long)FID_SEQ_RSVD); - LASSERTF(FID_SEQ_IGIF == 12, "found %lld\n", - (long long)FID_SEQ_IGIF); - LASSERTF(FID_SEQ_IGIF_MAX == 0x00000000ffffffffULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_IGIF_MAX); - LASSERTF(FID_SEQ_IDIF == 0x0000000100000000ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_IDIF); - LASSERTF(FID_SEQ_IDIF_MAX == 0x00000001ffffffffULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_IDIF_MAX); - LASSERTF(FID_SEQ_START == 0x0000000200000000ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_START); - LASSERTF(FID_SEQ_LOCAL_FILE == 0x0000000200000001ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_LOCAL_FILE); - LASSERTF(FID_SEQ_DOT_LUSTRE == 0x0000000200000002ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_DOT_LUSTRE); - LASSERTF(FID_SEQ_SPECIAL == 0x0000000200000004ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_SPECIAL); - LASSERTF(FID_SEQ_QUOTA == 0x0000000200000005ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_QUOTA); - LASSERTF(FID_SEQ_QUOTA_GLB == 0x0000000200000006ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_QUOTA_GLB); - LASSERTF(FID_SEQ_ROOT == 0x0000000200000007ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_ROOT); - LASSERTF(FID_SEQ_NORMAL == 0x0000000200000400ULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_NORMAL); - LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n", - (long long)FID_SEQ_LOV_DEFAULT); - LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)FID_OID_SPECIAL_BFL); - LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)FID_OID_DOT_LUSTRE); - LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)FID_OID_DOT_LUSTRE_OBF); - - /* Checks for struct lu_dirent */ - LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n", - (long long)(int)sizeof(struct lu_dirent)); - LASSERTF((int)offsetof(struct lu_dirent, lde_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct lu_dirent, lde_fid)); - LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirent *)0)->lde_fid)); - LASSERTF((int)offsetof(struct lu_dirent, lde_hash) == 16, "found %lld\n", - (long long)(int)offsetof(struct lu_dirent, lde_hash)); - LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_hash) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirent *)0)->lde_hash)); - LASSERTF((int)offsetof(struct lu_dirent, lde_reclen) == 24, "found %lld\n", - (long long)(int)offsetof(struct lu_dirent, lde_reclen)); - LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_reclen) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirent *)0)->lde_reclen)); - LASSERTF((int)offsetof(struct lu_dirent, lde_namelen) == 26, "found %lld\n", - (long long)(int)offsetof(struct lu_dirent, lde_namelen)); - LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_namelen) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirent *)0)->lde_namelen)); - LASSERTF((int)offsetof(struct lu_dirent, lde_attrs) == 28, "found %lld\n", - (long long)(int)offsetof(struct lu_dirent, lde_attrs)); - LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_attrs) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirent *)0)->lde_attrs)); - LASSERTF((int)offsetof(struct lu_dirent, lde_name[0]) == 32, "found %lld\n", - (long long)(int)offsetof(struct lu_dirent, lde_name[0])); - LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0])); - LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)LUDA_FID); - LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)LUDA_TYPE); - LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned int)LUDA_64BITHASH); - - /* Checks for struct luda_type */ - LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n", - (long long)(int)sizeof(struct luda_type)); - LASSERTF((int)offsetof(struct luda_type, lt_type) == 0, "found %lld\n", - (long long)(int)offsetof(struct luda_type, lt_type)); - LASSERTF((int)sizeof(((struct luda_type *)0)->lt_type) == 2, "found %lld\n", - (long long)(int)sizeof(((struct luda_type *)0)->lt_type)); - - /* Checks for struct lu_dirpage */ - LASSERTF((int)sizeof(struct lu_dirpage) == 24, "found %lld\n", - (long long)(int)sizeof(struct lu_dirpage)); - LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_start) == 0, "found %lld\n", - (long long)(int)offsetof(struct lu_dirpage, ldp_hash_start)); - LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start)); - LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_end) == 8, "found %lld\n", - (long long)(int)offsetof(struct lu_dirpage, ldp_hash_end)); - LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end)); - LASSERTF((int)offsetof(struct lu_dirpage, ldp_flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct lu_dirpage, ldp_flags)); - LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_flags)); - LASSERTF((int)offsetof(struct lu_dirpage, ldp_pad0) == 20, "found %lld\n", - (long long)(int)offsetof(struct lu_dirpage, ldp_pad0)); - LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_pad0) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_pad0)); - LASSERTF((int)offsetof(struct lu_dirpage, ldp_entries[0]) == 24, "found %lld\n", - (long long)(int)offsetof(struct lu_dirpage, ldp_entries[0])); - LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]) == 32, "found %lld\n", - (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0])); - LASSERTF(LDF_EMPTY == 1, "found %lld\n", - (long long)LDF_EMPTY); - LASSERTF(LDF_COLLIDE == 2, "found %lld\n", - (long long)LDF_COLLIDE); - LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n", - (long long)LU_PAGE_SIZE); - - /* Checks for struct lustre_handle */ - LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n", - (long long)(int)sizeof(struct lustre_handle)); - LASSERTF((int)offsetof(struct lustre_handle, cookie) == 0, "found %lld\n", - (long long)(int)offsetof(struct lustre_handle, cookie)); - LASSERTF((int)sizeof(((struct lustre_handle *)0)->cookie) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lustre_handle *)0)->cookie)); - - /* Checks for struct lustre_msg_v2 */ - LASSERTF((int)sizeof(struct lustre_msg_v2) == 32, "found %lld\n", - (long long)(int)sizeof(struct lustre_msg_v2)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_bufcount) == 0, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_bufcount)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_secflvr) == 4, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_secflvr)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_magic) == 8, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_magic)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_repsize) == 12, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_repsize)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_cksum) == 16, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_cksum)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_flags) == 20, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_flags)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_2) == 24, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_2)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_3) == 28, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_3)); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3)); - LASSERTF((int)offsetof(struct lustre_msg_v2, lm_buflens[0]) == 32, "found %lld\n", - (long long)(int)offsetof(struct lustre_msg_v2, lm_buflens[0])); - LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0])); - LASSERTF(LUSTRE_MSG_MAGIC_V2 == 0x0BD00BD3, "found 0x%.8x\n", - LUSTRE_MSG_MAGIC_V2); - LASSERTF(LUSTRE_MSG_MAGIC_V2_SWABBED == 0xD30BD00B, "found 0x%.8x\n", - LUSTRE_MSG_MAGIC_V2_SWABBED); - - /* Checks for struct ptlrpc_body */ - LASSERTF((int)sizeof(struct ptlrpc_body_v3) == 184, "found %lld\n", - (long long)(int)sizeof(struct ptlrpc_body_v3)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == 0, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_handle)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == 8, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_type)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == 12, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_version)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == 16, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_opc)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == 20, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_status)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == 24, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == 48, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_transno)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == 56, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_flags)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == 60, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_op_flags)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == 64, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == 68, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_timeout)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == 72, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_service_time)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == 76, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_limit)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == 80, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_slv)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv)); - BUILD_BUG_ON(PTLRPC_NUM_VERSIONS != 4); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == 88, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2)); - BUILD_BUG_ON(LUSTRE_JOBID_SIZE != 32); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n", - (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == (int)offsetof(struct ptlrpc_body_v2, pb_handle), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_handle), (int)offsetof(struct ptlrpc_body_v2, pb_handle)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == (int)offsetof(struct ptlrpc_body_v2, pb_type), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_type), (int)offsetof(struct ptlrpc_body_v2, pb_type)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == (int)offsetof(struct ptlrpc_body_v2, pb_version), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_version), (int)offsetof(struct ptlrpc_body_v2, pb_version)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == (int)offsetof(struct ptlrpc_body_v2, pb_opc), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_opc), (int)offsetof(struct ptlrpc_body_v2, pb_opc)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == (int)offsetof(struct ptlrpc_body_v2, pb_status), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_status), (int)offsetof(struct ptlrpc_body_v2, pb_status)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == (int)offsetof(struct ptlrpc_body_v2, pb_last_xid), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == (int)offsetof(struct ptlrpc_body_v2, pb_transno), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_transno), (int)offsetof(struct ptlrpc_body_v2, pb_transno)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_flags), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_flags), (int)offsetof(struct ptlrpc_body_v2, pb_flags)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_op_flags), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_op_flags), (int)offsetof(struct ptlrpc_body_v2, pb_op_flags)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt), (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == (int)offsetof(struct ptlrpc_body_v2, pb_timeout), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_timeout), (int)offsetof(struct ptlrpc_body_v2, pb_timeout)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == (int)offsetof(struct ptlrpc_body_v2, pb_service_time), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_service_time), (int)offsetof(struct ptlrpc_body_v2, pb_service_time)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == (int)offsetof(struct ptlrpc_body_v2, pb_limit), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_limit), (int)offsetof(struct ptlrpc_body_v2, pb_limit)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == (int)offsetof(struct ptlrpc_body_v2, pb_slv), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_slv), (int)offsetof(struct ptlrpc_body_v2, pb_slv)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1)); - LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n", - (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2)); - LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n", - (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2)); - LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n", - (long long)MSG_PTLRPC_BODY_OFF); - LASSERTF(REQ_REC_OFF == 1, "found %lld\n", - (long long)REQ_REC_OFF); - LASSERTF(REPLY_REC_OFF == 1, "found %lld\n", - (long long)REPLY_REC_OFF); - LASSERTF(DLM_LOCKREQ_OFF == 1, "found %lld\n", - (long long)DLM_LOCKREQ_OFF); - LASSERTF(DLM_REQ_REC_OFF == 2, "found %lld\n", - (long long)DLM_REQ_REC_OFF); - LASSERTF(DLM_INTENT_IT_OFF == 2, "found %lld\n", - (long long)DLM_INTENT_IT_OFF); - LASSERTF(DLM_INTENT_REC_OFF == 3, "found %lld\n", - (long long)DLM_INTENT_REC_OFF); - LASSERTF(DLM_LOCKREPLY_OFF == 1, "found %lld\n", - (long long)DLM_LOCKREPLY_OFF); - LASSERTF(DLM_REPLY_REC_OFF == 2, "found %lld\n", - (long long)DLM_REPLY_REC_OFF); - LASSERTF(MSG_PTLRPC_HEADER_OFF == 31, "found %lld\n", - (long long)MSG_PTLRPC_HEADER_OFF); - LASSERTF(PTLRPC_MSG_VERSION == 0x00000003, "found 0x%.8x\n", - PTLRPC_MSG_VERSION); - LASSERTF(LUSTRE_VERSION_MASK == 0xffff0000, "found 0x%.8x\n", - LUSTRE_VERSION_MASK); - LASSERTF(LUSTRE_OBD_VERSION == 0x00010000, "found 0x%.8x\n", - LUSTRE_OBD_VERSION); - LASSERTF(LUSTRE_MDS_VERSION == 0x00020000, "found 0x%.8x\n", - LUSTRE_MDS_VERSION); - LASSERTF(LUSTRE_OST_VERSION == 0x00030000, "found 0x%.8x\n", - LUSTRE_OST_VERSION); - LASSERTF(LUSTRE_DLM_VERSION == 0x00040000, "found 0x%.8x\n", - LUSTRE_DLM_VERSION); - LASSERTF(LUSTRE_LOG_VERSION == 0x00050000, "found 0x%.8x\n", - LUSTRE_LOG_VERSION); - LASSERTF(LUSTRE_MGS_VERSION == 0x00060000, "found 0x%.8x\n", - LUSTRE_MGS_VERSION); - LASSERTF(MSGHDR_AT_SUPPORT == 1, "found %lld\n", - (long long)MSGHDR_AT_SUPPORT); - LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n", - (long long)MSGHDR_CKSUM_INCOMPAT18); - LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n", - (unsigned int)MSG_OP_FLAG_MASK); - LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n", - (long long)MSG_OP_FLAG_SHIFT); - LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n", - (unsigned int)MSG_GEN_FLAG_MASK); - LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)MSG_LAST_REPLAY); - LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)MSG_RESENT); - LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned int)MSG_REPLAY); - LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned int)MSG_DELAY_REPLAY); - LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned int)MSG_VERSION_REPLAY); - LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n", - (unsigned int)MSG_REQ_REPLAY_DONE); - LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n", - (unsigned int)MSG_LOCK_REPLAY_DONE); - LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_RECOVERING); - LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_RECONNECT); - LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_REPLAYABLE); - LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_LIBCLIENT); - LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_INITIAL); - LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_ASYNC); - LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_NEXT_VER); - LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n", - (unsigned int)MSG_CONNECT_TRANSNO); - - /* Checks for struct obd_connect_data */ - LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n", - (long long)(int)sizeof(struct obd_connect_data)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags) == 0, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_version) == 8, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_version)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_version)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant) == 12, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_grant)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_index) == 16, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_index)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_index)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_brw_size) == 20, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_brw_size)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_ibits_known) == 24, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_ibits_known)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_blocksize) == 32, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_blocksize)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize) == 1, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_inodespace) == 33, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_inodespace)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace) == 1, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant_extent) == 34, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_grant_extent)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent) == 2, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_unused) == 36, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_unused)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_unused) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_unused)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_transno) == 40, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_transno)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_transno) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_transno)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_group) == 48, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_group)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_group) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_group)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_cksum_types) == 52, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_cksum_types)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_max_easize) == 56, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_max_easize)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_instance) == 60, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_instance)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_instance) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_instance)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxbytes) == 64, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs)); - LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding0)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0)); - LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding1)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1)); - LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2)); - LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding3)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding3)); - LASSERTF((int)offsetof(struct obd_connect_data, padding4) == 96, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding4)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding4) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding4)); - LASSERTF((int)offsetof(struct obd_connect_data, padding5) == 104, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding5)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding5) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding5)); - LASSERTF((int)offsetof(struct obd_connect_data, padding6) == 112, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding6)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding6) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding6)); - LASSERTF((int)offsetof(struct obd_connect_data, padding7) == 120, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding7)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding7) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding7)); - LASSERTF((int)offsetof(struct obd_connect_data, padding8) == 128, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding8)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding8) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding8)); - LASSERTF((int)offsetof(struct obd_connect_data, padding9) == 136, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, padding9)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding9) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->padding9)); - LASSERTF((int)offsetof(struct obd_connect_data, paddingA) == 144, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, paddingA)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingA) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingA)); - LASSERTF((int)offsetof(struct obd_connect_data, paddingB) == 152, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, paddingB)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingB) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingB)); - LASSERTF((int)offsetof(struct obd_connect_data, paddingC) == 160, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, paddingC)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingC) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingC)); - LASSERTF((int)offsetof(struct obd_connect_data, paddingD) == 168, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, paddingD)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingD) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingD)); - LASSERTF((int)offsetof(struct obd_connect_data, paddingE) == 176, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, paddingE)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingE) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingE)); - LASSERTF((int)offsetof(struct obd_connect_data, paddingF) == 184, "found %lld\n", - (long long)(int)offsetof(struct obd_connect_data, paddingF)); - LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingF) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingF)); - LASSERTF(OBD_CONNECT_RDONLY == 0x1ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_RDONLY); - LASSERTF(OBD_CONNECT_INDEX == 0x2ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_INDEX); - LASSERTF(OBD_CONNECT_MDS == 0x4ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_MDS); - LASSERTF(OBD_CONNECT_GRANT == 0x8ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_GRANT); - LASSERTF(OBD_CONNECT_SRVLOCK == 0x10ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_SRVLOCK); - LASSERTF(OBD_CONNECT_VERSION == 0x20ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_VERSION); - LASSERTF(OBD_CONNECT_REQPORTAL == 0x40ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_REQPORTAL); - LASSERTF(OBD_CONNECT_ACL == 0x80ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_ACL); - LASSERTF(OBD_CONNECT_XATTR == 0x100ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_XATTR); - LASSERTF(OBD_CONNECT_LARGE_ACL == 0x200ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LARGE_ACL); - LASSERTF(OBD_CONNECT_TRUNCLOCK == 0x400ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_TRUNCLOCK); - LASSERTF(OBD_CONNECT_TRANSNO == 0x800ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_TRANSNO); - LASSERTF(OBD_CONNECT_IBITS == 0x1000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_IBITS); - LASSERTF(OBD_CONNECT_JOIN == 0x2000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_JOIN); - LASSERTF(OBD_CONNECT_ATTRFID == 0x4000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_ATTRFID); - LASSERTF(OBD_CONNECT_NODEVOH == 0x8000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_NODEVOH); - LASSERTF(OBD_CONNECT_RMT_CLIENT == 0x10000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_RMT_CLIENT); - LASSERTF(OBD_CONNECT_RMT_CLIENT_FORCE == 0x20000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_RMT_CLIENT_FORCE); - LASSERTF(OBD_CONNECT_BRW_SIZE == 0x40000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_BRW_SIZE); - LASSERTF(OBD_CONNECT_QUOTA64 == 0x80000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_QUOTA64); - LASSERTF(OBD_CONNECT_MDS_CAPA == 0x100000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_MDS_CAPA); - LASSERTF(OBD_CONNECT_OSS_CAPA == 0x200000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_OSS_CAPA); - LASSERTF(OBD_CONNECT_CANCELSET == 0x400000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_CANCELSET); - LASSERTF(OBD_CONNECT_SOM == 0x800000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_SOM); - LASSERTF(OBD_CONNECT_AT == 0x1000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_AT); - LASSERTF(OBD_CONNECT_LRU_RESIZE == 0x2000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LRU_RESIZE); - LASSERTF(OBD_CONNECT_MDS_MDS == 0x4000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_MDS_MDS); - LASSERTF(OBD_CONNECT_REAL == 0x8000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_REAL); - LASSERTF(OBD_CONNECT_CHANGE_QS == 0x10000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_CHANGE_QS); - LASSERTF(OBD_CONNECT_CKSUM == 0x20000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_CKSUM); - LASSERTF(OBD_CONNECT_FID == 0x40000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_FID); - LASSERTF(OBD_CONNECT_VBR == 0x80000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_VBR); - LASSERTF(OBD_CONNECT_LOV_V3 == 0x100000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LOV_V3); - LASSERTF(OBD_CONNECT_GRANT_SHRINK == 0x200000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_GRANT_SHRINK); - LASSERTF(OBD_CONNECT_SKIP_ORPHAN == 0x400000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_SKIP_ORPHAN); - LASSERTF(OBD_CONNECT_MAX_EASIZE == 0x800000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_MAX_EASIZE); - LASSERTF(OBD_CONNECT_FULL20 == 0x1000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_FULL20); - LASSERTF(OBD_CONNECT_LAYOUTLOCK == 0x2000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LAYOUTLOCK); - LASSERTF(OBD_CONNECT_64BITHASH == 0x4000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_64BITHASH); - LASSERTF(OBD_CONNECT_MAXBYTES == 0x8000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_MAXBYTES); - LASSERTF(OBD_CONNECT_IMP_RECOV == 0x10000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_IMP_RECOV); - LASSERTF(OBD_CONNECT_JOBSTATS == 0x20000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_JOBSTATS); - LASSERTF(OBD_CONNECT_UMASK == 0x40000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_UMASK); - LASSERTF(OBD_CONNECT_EINPROGRESS == 0x80000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_EINPROGRESS); - LASSERTF(OBD_CONNECT_GRANT_PARAM == 0x100000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_GRANT_PARAM); - LASSERTF(OBD_CONNECT_FLOCK_OWNER == 0x200000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_FLOCK_OWNER); - LASSERTF(OBD_CONNECT_LVB_TYPE == 0x400000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LVB_TYPE); - LASSERTF(OBD_CONNECT_NANOSEC_TIME == 0x800000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_NANOSEC_TIME); - LASSERTF(OBD_CONNECT_LIGHTWEIGHT == 0x1000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LIGHTWEIGHT); - LASSERTF(OBD_CONNECT_SHORTIO == 0x2000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_SHORTIO); - LASSERTF(OBD_CONNECT_PINGLESS == 0x4000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_PINGLESS); - LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL, - "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD); - LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL, - "found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID); - LASSERTF(OBD_CONNECT_LFSCK == 0x40000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LFSCK); - LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_UNLINK_CLOSE); - LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_MULTIMODRPCS); - LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_DIR_STRIPE); - LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_SUBTREE); - LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_LOCK_AHEAD); - LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_OBDOPACK); - LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n", - OBD_CONNECT_FLAGS2); - LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)OBD_CKSUM_CRC32); - LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)OBD_CKSUM_ADLER); - LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n", - (unsigned int)OBD_CKSUM_CRC32C); - - /* Checks for struct obdo */ - LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n", - (long long)(int)sizeof(struct obdo)); - LASSERTF((int)offsetof(struct obdo, o_valid) == 0, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_valid)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_valid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_valid)); - LASSERTF((int)offsetof(struct obdo, o_oi) == 8, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_oi)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_oi)); - LASSERTF((int)offsetof(struct obdo, o_parent_seq) == 24, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_parent_seq)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_seq) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_parent_seq)); - LASSERTF((int)offsetof(struct obdo, o_size) == 32, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_size)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_size)); - LASSERTF((int)offsetof(struct obdo, o_mtime) == 40, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_mtime)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_mtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_mtime)); - LASSERTF((int)offsetof(struct obdo, o_atime) == 48, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_atime)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_atime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_atime)); - LASSERTF((int)offsetof(struct obdo, o_ctime) == 56, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_ctime)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_ctime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_ctime)); - LASSERTF((int)offsetof(struct obdo, o_blocks) == 64, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_blocks)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_blocks)); - LASSERTF((int)offsetof(struct obdo, o_grant) == 72, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_grant)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_grant) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_grant)); - LASSERTF((int)offsetof(struct obdo, o_blksize) == 80, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_blksize)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_blksize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_blksize)); - LASSERTF((int)offsetof(struct obdo, o_mode) == 84, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_mode)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_mode)); - LASSERTF((int)offsetof(struct obdo, o_uid) == 88, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_uid)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_uid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_uid)); - LASSERTF((int)offsetof(struct obdo, o_gid) == 92, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_gid)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_gid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_gid)); - LASSERTF((int)offsetof(struct obdo, o_flags) == 96, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_flags)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_flags)); - LASSERTF((int)offsetof(struct obdo, o_nlink) == 100, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_nlink)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_nlink) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_nlink)); - LASSERTF((int)offsetof(struct obdo, o_parent_oid) == 104, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_parent_oid)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_oid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_parent_oid)); - LASSERTF((int)offsetof(struct obdo, o_misc) == 108, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_misc)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_misc) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_misc)); - LASSERTF((int)offsetof(struct obdo, o_ioepoch) == 112, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_ioepoch)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_ioepoch) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_ioepoch)); - LASSERTF((int)offsetof(struct obdo, o_stripe_idx) == 120, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_stripe_idx)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_stripe_idx) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_stripe_idx)); - LASSERTF((int)offsetof(struct obdo, o_parent_ver) == 124, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_parent_ver)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_ver) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_parent_ver)); - LASSERTF((int)offsetof(struct obdo, o_handle) == 128, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_handle)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_handle)); - LASSERTF((int)offsetof(struct obdo, o_lcookie) == 136, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_lcookie)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_lcookie) == 32, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_lcookie)); - LASSERTF((int)offsetof(struct obdo, o_uid_h) == 168, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_uid_h)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_uid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_uid_h)); - LASSERTF((int)offsetof(struct obdo, o_gid_h) == 172, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_gid_h)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_gid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_gid_h)); - LASSERTF((int)offsetof(struct obdo, o_data_version) == 176, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_data_version)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_data_version) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_data_version)); - LASSERTF((int)offsetof(struct obdo, o_padding_4) == 184, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_padding_4)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_4) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_padding_4)); - LASSERTF((int)offsetof(struct obdo, o_padding_5) == 192, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_padding_5)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_5) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_padding_5)); - LASSERTF((int)offsetof(struct obdo, o_padding_6) == 200, "found %lld\n", - (long long)(int)offsetof(struct obdo, o_padding_6)); - LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_6) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obdo *)0)->o_padding_6)); - LASSERTF(OBD_MD_FLID == (0x00000001ULL), "found 0x%.16llxULL\n", - OBD_MD_FLID); - LASSERTF(OBD_MD_FLATIME == (0x00000002ULL), "found 0x%.16llxULL\n", - OBD_MD_FLATIME); - LASSERTF(OBD_MD_FLMTIME == (0x00000004ULL), "found 0x%.16llxULL\n", - OBD_MD_FLMTIME); - LASSERTF(OBD_MD_FLCTIME == (0x00000008ULL), "found 0x%.16llxULL\n", - OBD_MD_FLCTIME); - LASSERTF(OBD_MD_FLSIZE == (0x00000010ULL), "found 0x%.16llxULL\n", - OBD_MD_FLSIZE); - LASSERTF(OBD_MD_FLBLOCKS == (0x00000020ULL), "found 0x%.16llxULL\n", - OBD_MD_FLBLOCKS); - LASSERTF(OBD_MD_FLBLKSZ == (0x00000040ULL), "found 0x%.16llxULL\n", - OBD_MD_FLBLKSZ); - LASSERTF(OBD_MD_FLMODE == (0x00000080ULL), "found 0x%.16llxULL\n", - OBD_MD_FLMODE); - LASSERTF(OBD_MD_FLTYPE == (0x00000100ULL), "found 0x%.16llxULL\n", - OBD_MD_FLTYPE); - LASSERTF(OBD_MD_FLUID == (0x00000200ULL), "found 0x%.16llxULL\n", - OBD_MD_FLUID); - LASSERTF(OBD_MD_FLGID == (0x00000400ULL), "found 0x%.16llxULL\n", - OBD_MD_FLGID); - LASSERTF(OBD_MD_FLFLAGS == (0x00000800ULL), "found 0x%.16llxULL\n", - OBD_MD_FLFLAGS); - LASSERTF(OBD_MD_FLNLINK == (0x00002000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLNLINK); - LASSERTF(OBD_MD_FLGENER == (0x00004000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLGENER); - LASSERTF(OBD_MD_FLRDEV == (0x00010000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLRDEV); - LASSERTF(OBD_MD_FLEASIZE == (0x00020000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLEASIZE); - LASSERTF(OBD_MD_LINKNAME == (0x00040000ULL), "found 0x%.16llxULL\n", - OBD_MD_LINKNAME); - LASSERTF(OBD_MD_FLHANDLE == (0x00080000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLHANDLE); - LASSERTF(OBD_MD_FLCKSUM == (0x00100000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLCKSUM); - LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLQOS); - LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLGROUP); - LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLFID); - LASSERTF(OBD_MD_FLEPOCH == (0x04000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLEPOCH); - LASSERTF(OBD_MD_FLGRANT == (0x08000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLGRANT); - LASSERTF(OBD_MD_FLDIREA == (0x10000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLDIREA); - LASSERTF(OBD_MD_FLUSRQUOTA == (0x20000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLUSRQUOTA); - LASSERTF(OBD_MD_FLGRPQUOTA == (0x40000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLGRPQUOTA); - LASSERTF(OBD_MD_FLMODEASIZE == (0x80000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLMODEASIZE); - LASSERTF(OBD_MD_MDS == (0x0000000100000000ULL), "found 0x%.16llxULL\n", - OBD_MD_MDS); - LASSERTF(OBD_MD_REINT == (0x0000000200000000ULL), "found 0x%.16llxULL\n", - OBD_MD_REINT); - LASSERTF(OBD_MD_MEA == (0x0000000400000000ULL), "found 0x%.16llxULL\n", - OBD_MD_MEA); - LASSERTF(OBD_MD_TSTATE == (0x0000000800000000ULL), - "found 0x%.16llxULL\n", OBD_MD_TSTATE); - LASSERTF(OBD_MD_FLXATTR == (0x0000001000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLXATTR); - LASSERTF(OBD_MD_FLXATTRLS == (0x0000002000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLXATTRLS); - LASSERTF(OBD_MD_FLXATTRRM == (0x0000004000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLXATTRRM); - LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLACL); - LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLMDSCAPA); - LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLOSSCAPA); - LASSERTF(OBD_MD_FLCKSPLIT == (0x0000080000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLCKSPLIT); - LASSERTF(OBD_MD_FLCROSSREF == (0x0000100000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLCROSSREF); - LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLGETATTRLOCK); - LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n", - OBD_MD_FLDATAVERSION); - BUILD_BUG_ON(OBD_FL_INLINEDATA != 0x00000001); - BUILD_BUG_ON(OBD_FL_OBDMDEXISTS != 0x00000002); - BUILD_BUG_ON(OBD_FL_DELORPHAN != 0x00000004); - BUILD_BUG_ON(OBD_FL_NORPC != 0x00000008); - BUILD_BUG_ON(OBD_FL_IDONLY != 0x00000010); - BUILD_BUG_ON(OBD_FL_RECREATE_OBJS != 0x00000020); - BUILD_BUG_ON(OBD_FL_DEBUG_CHECK != 0x00000040); - BUILD_BUG_ON(OBD_FL_NO_USRQUOTA != 0x00000100); - BUILD_BUG_ON(OBD_FL_NO_GRPQUOTA != 0x00000200); - BUILD_BUG_ON(OBD_FL_CREATE_CROW != 0x00000400); - BUILD_BUG_ON(OBD_FL_SRVLOCK != 0x00000800); - BUILD_BUG_ON(OBD_FL_CKSUM_CRC32 != 0x00001000); - BUILD_BUG_ON(OBD_FL_CKSUM_ADLER != 0x00002000); - BUILD_BUG_ON(OBD_FL_CKSUM_CRC32C != 0x00004000); - BUILD_BUG_ON(OBD_FL_CKSUM_RSVD2 != 0x00008000); - BUILD_BUG_ON(OBD_FL_CKSUM_RSVD3 != 0x00010000); - BUILD_BUG_ON(OBD_FL_SHRINK_GRANT != 0x00020000); - BUILD_BUG_ON(OBD_FL_MMAP != 0x00040000); - BUILD_BUG_ON(OBD_FL_RECOV_RESEND != 0x00080000); - BUILD_BUG_ON(OBD_FL_NOSPC_BLK != 0x00100000); - BUILD_BUG_ON(OBD_FL_LOCAL_MASK != 0xf0000000); - - /* Checks for struct lov_ost_data_v1 */ - LASSERTF((int)sizeof(struct lov_ost_data_v1) == 24, "found %lld\n", - (long long)(int)sizeof(struct lov_ost_data_v1)); - LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_oi) == 0, "found %lld\n", - (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_oi)); - LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi)); - LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_gen) == 16, "found %lld\n", - (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_gen)); - LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen)); - LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_idx) == 20, "found %lld\n", - (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_idx)); - LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx)); - - /* Checks for struct lov_mds_md_v1 */ - LASSERTF((int)sizeof(struct lov_mds_md_v1) == 32, "found %lld\n", - (long long)(int)sizeof(struct lov_mds_md_v1)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_magic)); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_pattern) == 4, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_pattern)); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_oi) == 8, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_oi)); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_size) == 24, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_size)); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_count) == 28, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_count)); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_layout_gen) == 30, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_layout_gen)); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen)); - LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_objects[0]) == 32, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v1, lmm_objects[0])); - LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]) == 24, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0])); - BUILD_BUG_ON(LOV_MAGIC_V1 != (0x0BD10000 | 0x0BD0)); - - /* Checks for struct lov_mds_md_v3 */ - LASSERTF((int)sizeof(struct lov_mds_md_v3) == 48, "found %lld\n", - (long long)(int)sizeof(struct lov_mds_md_v3)); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_magic)); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic)); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pattern) == 4, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pattern)); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern)); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_oi) == 8, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_oi)); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi)); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_size) == 24, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_size)); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size)); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_count) == 28, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_count)); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count)); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_layout_gen) == 30, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_layout_gen)); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen)); - BUILD_BUG_ON(LOV_MAXPOOLNAME != 15); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]) == 48, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16])); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16])); - LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_objects[0]) == 48, "found %lld\n", - (long long)(int)offsetof(struct lov_mds_md_v3, lmm_objects[0])); - LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]) == 24, "found %lld\n", - (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0])); - BUILD_BUG_ON(LOV_MAGIC_V3 != (0x0BD30000 | 0x0BD0)); - LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)LOV_PATTERN_RAID0); - LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)LOV_PATTERN_RAID1); - LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n", - (unsigned int)LOV_PATTERN_FIRST); - LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n", - (unsigned int)LOV_PATTERN_CMOBD); - - /* Checks for struct lmv_mds_md_v1 */ - LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n", - (long long)(int)sizeof(struct lmv_mds_md_v1)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_magic)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_magic)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_stripe_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_stripe_count)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_count)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_master_mdt_index) == 8, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_master_mdt_index)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_master_mdt_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_master_mdt_index)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_hash_type) == 12, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_hash_type)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_hash_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_hash_type)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_layout_version) == 16, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_layout_version)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_layout_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_layout_version)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding1) == 20, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding1)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding1)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding2) == 24, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding2)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding2)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding3) == 32, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding3)); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding3)); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_pool_name[16]) == 56, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_pool_name[16])); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_pool_name[16]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_pool_name[16])); - LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_stripe_fids[0]) == 56, "found %lld\n", - (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_stripe_fids[0])); - LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_fids[0]) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_fids[0])); - BUILD_BUG_ON(LMV_MAGIC_V1 != 0x0CD20CD0); - BUILD_BUG_ON(LMV_MAGIC_STRIPE != 0x0CD40CD0); - BUILD_BUG_ON(LMV_HASH_TYPE_MASK != 0x0000ffff); - BUILD_BUG_ON(LMV_HASH_FLAG_MIGRATION != 0x80000000); - BUILD_BUG_ON(LMV_HASH_FLAG_DEAD != 0x40000000); - - /* Checks for struct obd_statfs */ - LASSERTF((int)sizeof(struct obd_statfs) == 144, "found %lld\n", - (long long)(int)sizeof(struct obd_statfs)); - LASSERTF((int)offsetof(struct obd_statfs, os_type) == 0, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_type)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_type) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_type)); - LASSERTF((int)offsetof(struct obd_statfs, os_blocks) == 8, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_blocks)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_blocks)); - LASSERTF((int)offsetof(struct obd_statfs, os_bfree) == 16, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_bfree)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bfree) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_bfree)); - LASSERTF((int)offsetof(struct obd_statfs, os_bavail) == 24, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_bavail)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bavail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_bavail)); - LASSERTF((int)offsetof(struct obd_statfs, os_ffree) == 40, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_ffree)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_ffree) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_ffree)); - LASSERTF((int)offsetof(struct obd_statfs, os_fsid) == 48, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_fsid)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fsid) == 40, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_fsid)); - LASSERTF((int)offsetof(struct obd_statfs, os_bsize) == 88, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_bsize)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bsize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_bsize)); - LASSERTF((int)offsetof(struct obd_statfs, os_namelen) == 92, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_namelen)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_namelen) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_namelen)); - LASSERTF((int)offsetof(struct obd_statfs, os_state) == 104, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_state)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_state) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_state)); - LASSERTF((int)offsetof(struct obd_statfs, os_fprecreated) == 108, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_fprecreated)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fprecreated) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_fprecreated)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare2) == 112, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare2)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare2)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare3) == 116, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare3)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare3)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare4) == 120, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare4)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare4) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare4)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare5) == 124, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare5)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare5) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare5)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare6) == 128, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare6)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare6) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare6)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare7) == 132, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare7)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare7) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare7)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare8) == 136, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare8)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare8) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare8)); - LASSERTF((int)offsetof(struct obd_statfs, os_spare9) == 140, "found %lld\n", - (long long)(int)offsetof(struct obd_statfs, os_spare9)); - LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare9) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare9)); - - /* Checks for struct obd_ioobj */ - LASSERTF((int)sizeof(struct obd_ioobj) == 24, "found %lld\n", - (long long)(int)sizeof(struct obd_ioobj)); - LASSERTF((int)offsetof(struct obd_ioobj, ioo_oid) == 0, "found %lld\n", - (long long)(int)offsetof(struct obd_ioobj, ioo_oid)); - LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_oid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_oid)); - LASSERTF((int)offsetof(struct obd_ioobj, ioo_max_brw) == 16, "found %lld\n", - (long long)(int)offsetof(struct obd_ioobj, ioo_max_brw)); - LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw)); - LASSERTF((int)offsetof(struct obd_ioobj, ioo_bufcnt) == 20, "found %lld\n", - (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt)); - LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt)); - LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n", - (long long)IOOBJ_MAX_BRW_BITS); - - /* Checks for union lquota_id */ - LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n", - (long long)(int)sizeof(union lquota_id)); - - /* Checks for struct obd_quotactl */ - LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n", - (long long)(int)sizeof(struct obd_quotactl)); - LASSERTF((int)offsetof(struct obd_quotactl, qc_cmd) == 0, "found %lld\n", - (long long)(int)offsetof(struct obd_quotactl, qc_cmd)); - LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_cmd) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_cmd)); - LASSERTF((int)offsetof(struct obd_quotactl, qc_type) == 4, "found %lld\n", - (long long)(int)offsetof(struct obd_quotactl, qc_type)); - LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_type)); - LASSERTF((int)offsetof(struct obd_quotactl, qc_id) == 8, "found %lld\n", - (long long)(int)offsetof(struct obd_quotactl, qc_id)); - LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_id)); - LASSERTF((int)offsetof(struct obd_quotactl, qc_stat) == 12, "found %lld\n", - (long long)(int)offsetof(struct obd_quotactl, qc_stat)); - LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_stat) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_stat)); - LASSERTF((int)offsetof(struct obd_quotactl, qc_dqinfo) == 16, "found %lld\n", - (long long)(int)offsetof(struct obd_quotactl, qc_dqinfo)); - LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo) == 24, "found %lld\n", - (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo)); - LASSERTF((int)offsetof(struct obd_quotactl, qc_dqblk) == 40, "found %lld\n", - (long long)(int)offsetof(struct obd_quotactl, qc_dqblk)); - LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqblk) == 72, "found %lld\n", - (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqblk)); - - /* Checks for struct obd_dqinfo */ - LASSERTF((int)sizeof(struct obd_dqinfo) == 24, "found %lld\n", - (long long)(int)sizeof(struct obd_dqinfo)); - LASSERTF((int)offsetof(struct obd_dqinfo, dqi_bgrace) == 0, "found %lld\n", - (long long)(int)offsetof(struct obd_dqinfo, dqi_bgrace)); - LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace)); - LASSERTF((int)offsetof(struct obd_dqinfo, dqi_igrace) == 8, "found %lld\n", - (long long)(int)offsetof(struct obd_dqinfo, dqi_igrace)); - LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace)); - LASSERTF((int)offsetof(struct obd_dqinfo, dqi_flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct obd_dqinfo, dqi_flags)); - LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_flags)); - LASSERTF((int)offsetof(struct obd_dqinfo, dqi_valid) == 20, "found %lld\n", - (long long)(int)offsetof(struct obd_dqinfo, dqi_valid)); - LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_valid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_valid)); - - /* Checks for struct obd_dqblk */ - LASSERTF((int)sizeof(struct obd_dqblk) == 72, "found %lld\n", - (long long)(int)sizeof(struct obd_dqblk)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_bhardlimit) == 0, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_bhardlimit)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_bsoftlimit) == 8, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_bsoftlimit)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_curspace) == 16, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_curspace)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curspace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curspace)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_ihardlimit) == 24, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_ihardlimit)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_isoftlimit) == 32, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_isoftlimit)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_curinodes) == 40, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_curinodes)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_btime) == 48, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_btime)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_btime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_btime)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_itime) == 56, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_itime)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_itime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_itime)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_valid) == 64, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_valid)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_valid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_valid)); - LASSERTF((int)offsetof(struct obd_dqblk, dqb_padding) == 68, "found %lld\n", - (long long)(int)offsetof(struct obd_dqblk, dqb_padding)); - LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_padding)); - LASSERTF(Q_QUOTACHECK == 0x800100, "found 0x%.8x\n", - Q_QUOTACHECK); - LASSERTF(Q_INITQUOTA == 0x800101, "found 0x%.8x\n", - Q_INITQUOTA); - LASSERTF(Q_GETOINFO == 0x800102, "found 0x%.8x\n", - Q_GETOINFO); - LASSERTF(Q_GETOQUOTA == 0x800103, "found 0x%.8x\n", - Q_GETOQUOTA); - LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n", - Q_FINVALIDATE); - - /* Checks for struct niobuf_remote */ - LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n", - (long long)(int)sizeof(struct niobuf_remote)); - LASSERTF((int)offsetof(struct niobuf_remote, rnb_offset) == 0, "found %lld\n", - (long long)(int)offsetof(struct niobuf_remote, rnb_offset)); - LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_offset) == 8, "found %lld\n", - (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_offset)); - LASSERTF((int)offsetof(struct niobuf_remote, rnb_len) == 8, "found %lld\n", - (long long)(int)offsetof(struct niobuf_remote, rnb_len)); - LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_len) == 4, "found %lld\n", - (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_len)); - LASSERTF((int)offsetof(struct niobuf_remote, rnb_flags) == 12, "found %lld\n", - (long long)(int)offsetof(struct niobuf_remote, rnb_flags)); - LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_flags)); - LASSERTF(OBD_BRW_READ == 0x01, "found 0x%.8x\n", - OBD_BRW_READ); - LASSERTF(OBD_BRW_WRITE == 0x02, "found 0x%.8x\n", - OBD_BRW_WRITE); - LASSERTF(OBD_BRW_SYNC == 0x08, "found 0x%.8x\n", - OBD_BRW_SYNC); - LASSERTF(OBD_BRW_CHECK == 0x10, "found 0x%.8x\n", - OBD_BRW_CHECK); - LASSERTF(OBD_BRW_FROM_GRANT == 0x20, "found 0x%.8x\n", - OBD_BRW_FROM_GRANT); - LASSERTF(OBD_BRW_GRANTED == 0x40, "found 0x%.8x\n", - OBD_BRW_GRANTED); - LASSERTF(OBD_BRW_NOCACHE == 0x80, "found 0x%.8x\n", - OBD_BRW_NOCACHE); - LASSERTF(OBD_BRW_NOQUOTA == 0x100, "found 0x%.8x\n", - OBD_BRW_NOQUOTA); - LASSERTF(OBD_BRW_SRVLOCK == 0x200, "found 0x%.8x\n", - OBD_BRW_SRVLOCK); - LASSERTF(OBD_BRW_ASYNC == 0x400, "found 0x%.8x\n", - OBD_BRW_ASYNC); - LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n", - OBD_BRW_MEMALLOC); - LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n", - OBD_BRW_OVER_USRQUOTA); - LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n", - OBD_BRW_OVER_GRPQUOTA); - LASSERTF(OBD_BRW_SOFT_SYNC == 0x4000, "found 0x%.8x\n", - OBD_BRW_SOFT_SYNC); - - /* Checks for struct ost_body */ - LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n", - (long long)(int)sizeof(struct ost_body)); - LASSERTF((int)offsetof(struct ost_body, oa) == 0, "found %lld\n", - (long long)(int)offsetof(struct ost_body, oa)); - LASSERTF((int)sizeof(((struct ost_body *)0)->oa) == 208, "found %lld\n", - (long long)(int)sizeof(((struct ost_body *)0)->oa)); - - /* Checks for struct ll_fid */ - LASSERTF((int)sizeof(struct ll_fid) == 16, "found %lld\n", - (long long)(int)sizeof(struct ll_fid)); - LASSERTF((int)offsetof(struct ll_fid, id) == 0, "found %lld\n", - (long long)(int)offsetof(struct ll_fid, id)); - LASSERTF((int)sizeof(((struct ll_fid *)0)->id) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ll_fid *)0)->id)); - LASSERTF((int)offsetof(struct ll_fid, generation) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_fid, generation)); - LASSERTF((int)sizeof(((struct ll_fid *)0)->generation) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_fid *)0)->generation)); - LASSERTF((int)offsetof(struct ll_fid, f_type) == 12, "found %lld\n", - (long long)(int)offsetof(struct ll_fid, f_type)); - LASSERTF((int)sizeof(((struct ll_fid *)0)->f_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ll_fid *)0)->f_type)); - - /* Checks for struct mdt_body */ - LASSERTF((int)sizeof(struct mdt_body) == 216, "found %lld\n", - (long long)(int)sizeof(struct mdt_body)); - LASSERTF((int)offsetof(struct mdt_body, mbo_fid1) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_fid1)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fid1) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fid1)); - LASSERTF((int)offsetof(struct mdt_body, mbo_fid2) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_fid2)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fid2) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fid2)); - LASSERTF((int)offsetof(struct mdt_body, mbo_handle) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_handle)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_handle)); - LASSERTF((int)offsetof(struct mdt_body, mbo_valid) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_valid)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_valid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_valid)); - LASSERTF((int)offsetof(struct mdt_body, mbo_size) == 48, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_size)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_size)); - LASSERTF((int)offsetof(struct mdt_body, mbo_mtime) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_mtime)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_mtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_mtime)); - LASSERTF((int)offsetof(struct mdt_body, mbo_atime) == 64, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_atime)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_atime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_atime)); - LASSERTF((int)offsetof(struct mdt_body, mbo_ctime) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_ctime)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_ctime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_ctime)); - LASSERTF((int)offsetof(struct mdt_body, mbo_blocks) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_blocks)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_blocks)); - LASSERTF((int)offsetof(struct mdt_body, mbo_t_state) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_t_state)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_t_state) == 8, - "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_t_state)); - LASSERTF((int)offsetof(struct mdt_body, mbo_fsuid) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_fsuid)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fsuid)); - LASSERTF((int)offsetof(struct mdt_body, mbo_fsgid) == 108, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_fsgid)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fsgid)); - LASSERTF((int)offsetof(struct mdt_body, mbo_capability) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_capability)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_capability) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_capability)); - LASSERTF((int)offsetof(struct mdt_body, mbo_mode) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_mode)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_mode)); - LASSERTF((int)offsetof(struct mdt_body, mbo_uid) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_uid)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_uid)); - LASSERTF((int)offsetof(struct mdt_body, mbo_gid) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_gid)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_gid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_gid)); - LASSERTF((int)offsetof(struct mdt_body, mbo_flags) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_flags)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_flags)); - LASSERTF((int)offsetof(struct mdt_body, mbo_rdev) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_rdev)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_rdev) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_rdev)); - LASSERTF((int)offsetof(struct mdt_body, mbo_nlink) == 136, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_nlink)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_nlink) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_nlink)); - LASSERTF((int)offsetof(struct mdt_body, mbo_unused2) == 140, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_unused2)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused2)); - LASSERTF((int)offsetof(struct mdt_body, mbo_suppgid) == 144, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_suppgid)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_suppgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_suppgid)); - LASSERTF((int)offsetof(struct mdt_body, mbo_eadatasize) == 148, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_eadatasize)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_eadatasize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_eadatasize)); - LASSERTF((int)offsetof(struct mdt_body, mbo_aclsize) == 152, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_aclsize)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_aclsize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_aclsize)); - LASSERTF((int)offsetof(struct mdt_body, mbo_max_mdsize) == 156, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize)); - LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_unused3)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3)); - LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_uid_h)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_uid_h)); - LASSERTF((int)offsetof(struct mdt_body, mbo_gid_h) == 168, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_gid_h)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_gid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_gid_h)); - LASSERTF((int)offsetof(struct mdt_body, mbo_padding_5) == 172, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_padding_5)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_5) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_5)); - LASSERTF((int)offsetof(struct mdt_body, mbo_padding_6) == 176, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_padding_6)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_6) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_6)); - LASSERTF((int)offsetof(struct mdt_body, mbo_padding_7) == 184, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_padding_7)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_7) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_7)); - LASSERTF((int)offsetof(struct mdt_body, mbo_padding_8) == 192, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_padding_8)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_8) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_8)); - LASSERTF((int)offsetof(struct mdt_body, mbo_padding_9) == 200, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_padding_9)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_9) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_9)); - LASSERTF((int)offsetof(struct mdt_body, mbo_padding_10) == 208, "found %lld\n", - (long long)(int)offsetof(struct mdt_body, mbo_padding_10)); - LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_10) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_10)); - LASSERTF(MDS_FMODE_CLOSED == 000000000000UL, "found 0%.11oUL\n", - MDS_FMODE_CLOSED); - LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n", - MDS_FMODE_EXEC); - LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n", - MDS_OPEN_CREATED); - LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n", - MDS_OPEN_CROSS); - LASSERTF(MDS_OPEN_CREAT == 000000000100UL, "found 0%.11oUL\n", - MDS_OPEN_CREAT); - LASSERTF(MDS_OPEN_EXCL == 000000000200UL, "found 0%.11oUL\n", - MDS_OPEN_EXCL); - LASSERTF(MDS_OPEN_TRUNC == 000000001000UL, "found 0%.11oUL\n", - MDS_OPEN_TRUNC); - LASSERTF(MDS_OPEN_APPEND == 000000002000UL, "found 0%.11oUL\n", - MDS_OPEN_APPEND); - LASSERTF(MDS_OPEN_SYNC == 000000010000UL, "found 0%.11oUL\n", - MDS_OPEN_SYNC); - LASSERTF(MDS_OPEN_DIRECTORY == 000000200000UL, "found 0%.11oUL\n", - MDS_OPEN_DIRECTORY); - LASSERTF(MDS_OPEN_BY_FID == 000040000000UL, "found 0%.11oUL\n", - MDS_OPEN_BY_FID); - LASSERTF(MDS_OPEN_DELAY_CREATE == 000100000000UL, "found 0%.11oUL\n", - MDS_OPEN_DELAY_CREATE); - LASSERTF(MDS_OPEN_OWNEROVERRIDE == 000200000000UL, "found 0%.11oUL\n", - MDS_OPEN_OWNEROVERRIDE); - LASSERTF(MDS_OPEN_JOIN_FILE == 000400000000UL, "found 0%.11oUL\n", - MDS_OPEN_JOIN_FILE); - LASSERTF(MDS_OPEN_LOCK == 004000000000UL, "found 0%.11oUL\n", - MDS_OPEN_LOCK); - LASSERTF(MDS_OPEN_HAS_EA == 010000000000UL, "found 0%.11oUL\n", - MDS_OPEN_HAS_EA); - LASSERTF(MDS_OPEN_HAS_OBJS == 020000000000UL, "found 0%.11oUL\n", - MDS_OPEN_HAS_OBJS); - LASSERTF(MDS_OPEN_NORESTORE == 00000000000100000000000ULL, "found 0%.22lloULL\n", - (long long)MDS_OPEN_NORESTORE); - LASSERTF(MDS_OPEN_NEWSTRIPE == 00000000000200000000000ULL, "found 0%.22lloULL\n", - (long long)MDS_OPEN_NEWSTRIPE); - LASSERTF(MDS_OPEN_VOLATILE == 00000000000400000000000ULL, "found 0%.22lloULL\n", - (long long)MDS_OPEN_VOLATILE); - LASSERTF(LUSTRE_SYNC_FL == 0x00000008, "found 0x%.8x\n", - LUSTRE_SYNC_FL); - LASSERTF(LUSTRE_IMMUTABLE_FL == 0x00000010, "found 0x%.8x\n", - LUSTRE_IMMUTABLE_FL); - LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n", - LUSTRE_APPEND_FL); - LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n", - LUSTRE_NODUMP_FL); - LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n", - LUSTRE_NOATIME_FL); - LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n", - LUSTRE_INDEX_FL); - LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n", - LUSTRE_DIRSYNC_FL); - LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n", - LUSTRE_TOPDIR_FL); - LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n", - LUSTRE_DIRECTIO_FL); - LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n", - LUSTRE_INLINE_DATA_FL); - LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n", - MDS_INODELOCK_LOOKUP); - LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n", - MDS_INODELOCK_UPDATE); - LASSERTF(MDS_INODELOCK_OPEN == 0x000004, "found 0x%.8x\n", - MDS_INODELOCK_OPEN); - LASSERTF(MDS_INODELOCK_LAYOUT == 0x000008, "found 0x%.8x\n", - MDS_INODELOCK_LAYOUT); - - /* Checks for struct mdt_ioepoch */ - LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n", - (long long)(int)sizeof(struct mdt_ioepoch)); - LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, mio_handle)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle)); - LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1)); - LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2)); - LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_ioepoch, mio_padding)); - LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding)); - - /* Checks for struct mdt_rec_setattr */ - LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_setattr)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fid) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_fid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_valid) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_valid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_uid) == 64, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_uid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_gid) == 68, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_gid)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_size) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_size)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_size)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_blocks) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_blocks)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mtime) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_mtime)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_atime) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_atime)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_ctime) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_ctime)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_attr_flags) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_attr_flags)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mode) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_mode)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_bias) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_bias)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_3) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_3)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_4) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_4)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4)); - LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_5) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_5)); - LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5)); - - /* Checks for struct mdt_rec_create */ - LASSERTF((int)sizeof(struct mdt_rec_create) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_create)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_cap)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2_h)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid1) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_fid1)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid1) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid1)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid2) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_fid2)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid2) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid2)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_old_handle) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_old_handle)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_time) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_time)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_time)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_rdev) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_rdev)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_rdev) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_rdev)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_ioepoch) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_ioepoch)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_1) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_padding_1)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_mode) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_mode)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_mode)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_bias) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_bias)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_bias) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_bias)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_l) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_flags_l)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_h) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_flags_h)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_umask) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_umask)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_umask) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_umask)); - LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_4) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_create, cr_padding_4)); - LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4)); - - /* Checks for struct mdt_rec_link */ - LASSERTF((int)sizeof(struct mdt_rec_link) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_link)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_cap)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2_h)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid1) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_fid1)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid1) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid1)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid2) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_fid2)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid2) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid2)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_time) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_time)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_time)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_1) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_1)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_2) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_2)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_3) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_3)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_4) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_4)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_bias) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_bias)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_bias) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_bias)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_5) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_5)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_6) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_6)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_7) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_7)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_8) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_8)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8)); - LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_9) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_link, lk_padding_9)); - LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9)); - - /* Checks for struct mdt_rec_unlink */ - LASSERTF((int)sizeof(struct mdt_rec_unlink) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_unlink)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid1) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid1)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid2) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid2)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_time) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_time)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_time)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_2) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_2)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_3) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_3)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_4) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_4)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_5) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_5)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_bias) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_bias)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_mode) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_mode)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_6) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_6)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_7) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_7)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_8) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_8)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8)); - LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_9) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_9)); - LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9)); - - /* Checks for struct mdt_rec_rename */ - LASSERTF((int)sizeof(struct mdt_rec_rename) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_rename)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_cap)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2_h)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid1) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_fid1)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid2) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_fid2)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_time) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_time)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_time)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_1) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_1)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_2) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_2)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_3) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_3)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_4) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_4)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_bias) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_bias)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_bias) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_bias)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_mode) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_mode)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_mode)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_5) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_5)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_6) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_6)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_7) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_7)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7)); - LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_8) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_8)); - LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8)); - - /* Checks for struct mdt_rec_setxattr */ - LASSERTF((int)sizeof(struct mdt_rec_setxattr) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_setxattr)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fid) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fid)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_1) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_1)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_2) == 64, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_2)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_3) == 68, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_3)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_valid) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_valid)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_time) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_time)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_5) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_5)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_6) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_6)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_7) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_7)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_size) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_size)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_flags) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_flags)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_8) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_8)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_9) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_9)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_10) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_10)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10)); - LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_11) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_11)); - LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11)); - - /* Checks for struct mdt_rec_reint */ - LASSERTF((int)sizeof(struct mdt_rec_reint) == 136, "found %lld\n", - (long long)(int)sizeof(struct mdt_rec_reint)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_opcode) == 0, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_opcode)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_cap) == 4, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_cap)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_cap) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_cap)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid) == 8, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid_h) == 12, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid) == 16, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid_h) == 20, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid_h)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1) == 24, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1_h) == 28, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1_h)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2) == 32, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2_h)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid1) == 40, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_fid1)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid2) == 56, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_fid2)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2) == 16, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mtime) == 72, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_mtime)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_atime) == 80, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_atime)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_atime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_atime)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_ctime) == 88, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_ctime)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_size) == 96, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_size)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_size)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_blocks) == 104, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_blocks)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_bias) == 112, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_bias)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_bias) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_bias)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mode) == 116, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_mode)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mode)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags) == 120, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_flags)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags_h) == 124, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_flags_h)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_umask) == 128, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_umask)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_umask) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_umask)); - LASSERTF((int)offsetof(struct mdt_rec_reint, rr_padding_4) == 132, "found %lld\n", - (long long)(int)offsetof(struct mdt_rec_reint, rr_padding_4)); - LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4)); - - /* Checks for struct lmv_desc */ - LASSERTF((int)sizeof(struct lmv_desc) == 88, "found %lld\n", - (long long)(int)sizeof(struct lmv_desc)); - LASSERTF((int)offsetof(struct lmv_desc, ld_tgt_count) == 0, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_tgt_count)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_tgt_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_tgt_count)); - LASSERTF((int)offsetof(struct lmv_desc, ld_active_tgt_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_active_tgt_count)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count)); - LASSERTF((int)offsetof(struct lmv_desc, ld_default_stripe_count) == 8, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_default_stripe_count)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count)); - LASSERTF((int)offsetof(struct lmv_desc, ld_pattern) == 12, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_pattern)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_pattern) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_pattern)); - LASSERTF((int)offsetof(struct lmv_desc, ld_default_hash_size) == 16, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_default_hash_size)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size)); - LASSERTF((int)offsetof(struct lmv_desc, ld_padding_1) == 24, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_padding_1)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_1)); - LASSERTF((int)offsetof(struct lmv_desc, ld_padding_2) == 32, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_padding_2)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_2)); - LASSERTF((int)offsetof(struct lmv_desc, ld_qos_maxage) == 36, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_qos_maxage)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage)); - LASSERTF((int)offsetof(struct lmv_desc, ld_padding_3) == 40, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_padding_3)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_3)); - LASSERTF((int)offsetof(struct lmv_desc, ld_padding_4) == 44, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_padding_4)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_4) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_4)); - LASSERTF((int)offsetof(struct lmv_desc, ld_uuid) == 48, "found %lld\n", - (long long)(int)offsetof(struct lmv_desc, ld_uuid)); - LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_uuid) == 40, "found %lld\n", - (long long)(int)sizeof(((struct lmv_desc *)0)->ld_uuid)); - - /* Checks for struct lov_desc */ - LASSERTF((int)sizeof(struct lov_desc) == 88, "found %lld\n", - (long long)(int)sizeof(struct lov_desc)); - LASSERTF((int)offsetof(struct lov_desc, ld_tgt_count) == 0, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_tgt_count)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_tgt_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_tgt_count)); - LASSERTF((int)offsetof(struct lov_desc, ld_active_tgt_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_active_tgt_count)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count)); - LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_count) == 8, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_default_stripe_count)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count)); - LASSERTF((int)offsetof(struct lov_desc, ld_pattern) == 12, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_pattern)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_pattern) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_pattern)); - LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_size) == 16, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_default_stripe_size)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size)); - LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_offset) == 24, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_default_stripe_offset)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset)); - LASSERTF((int)offsetof(struct lov_desc, ld_padding_0) == 32, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_padding_0)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_0) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_0)); - LASSERTF((int)offsetof(struct lov_desc, ld_qos_maxage) == 36, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_qos_maxage)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_qos_maxage) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_qos_maxage)); - LASSERTF((int)offsetof(struct lov_desc, ld_padding_1) == 40, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_padding_1)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_1)); - LASSERTF((int)offsetof(struct lov_desc, ld_padding_2) == 44, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_padding_2)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_2)); - LASSERTF((int)offsetof(struct lov_desc, ld_uuid) == 48, "found %lld\n", - (long long)(int)offsetof(struct lov_desc, ld_uuid)); - LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_uuid) == 40, "found %lld\n", - (long long)(int)sizeof(((struct lov_desc *)0)->ld_uuid)); - BUILD_BUG_ON(LOV_DESC_MAGIC != 0xB0CCDE5C); - - /* Checks for struct ldlm_res_id */ - LASSERTF((int)sizeof(struct ldlm_res_id) == 32, "found %lld\n", - (long long)(int)sizeof(struct ldlm_res_id)); - BUILD_BUG_ON(RES_NAME_SIZE != 4); - LASSERTF((int)offsetof(struct ldlm_res_id, name[4]) == 32, "found %lld\n", - (long long)(int)offsetof(struct ldlm_res_id, name[4])); - LASSERTF((int)sizeof(((struct ldlm_res_id *)0)->name[4]) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_res_id *)0)->name[4])); - - /* Checks for struct ldlm_extent */ - LASSERTF((int)sizeof(struct ldlm_extent) == 24, "found %lld\n", - (long long)(int)sizeof(struct ldlm_extent)); - LASSERTF((int)offsetof(struct ldlm_extent, start) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_extent, start)); - LASSERTF((int)sizeof(((struct ldlm_extent *)0)->start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_extent *)0)->start)); - LASSERTF((int)offsetof(struct ldlm_extent, end) == 8, "found %lld\n", - (long long)(int)offsetof(struct ldlm_extent, end)); - LASSERTF((int)sizeof(((struct ldlm_extent *)0)->end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_extent *)0)->end)); - LASSERTF((int)offsetof(struct ldlm_extent, gid) == 16, "found %lld\n", - (long long)(int)offsetof(struct ldlm_extent, gid)); - LASSERTF((int)sizeof(((struct ldlm_extent *)0)->gid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_extent *)0)->gid)); - - /* Checks for struct ldlm_inodebits */ - LASSERTF((int)sizeof(struct ldlm_inodebits) == 8, "found %lld\n", - (long long)(int)sizeof(struct ldlm_inodebits)); - LASSERTF((int)offsetof(struct ldlm_inodebits, bits) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_inodebits, bits)); - LASSERTF((int)sizeof(((struct ldlm_inodebits *)0)->bits) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_inodebits *)0)->bits)); - - /* Checks for struct ldlm_flock_wire */ - LASSERTF((int)sizeof(struct ldlm_flock_wire) == 32, "found %lld\n", - (long long)(int)sizeof(struct ldlm_flock_wire)); - LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_start) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_flock_wire, lfw_start)); - LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start)); - LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_end) == 8, "found %lld\n", - (long long)(int)offsetof(struct ldlm_flock_wire, lfw_end)); - LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end)); - LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_owner) == 16, "found %lld\n", - (long long)(int)offsetof(struct ldlm_flock_wire, lfw_owner)); - LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner)); - LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_padding) == 24, "found %lld\n", - (long long)(int)offsetof(struct ldlm_flock_wire, lfw_padding)); - LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding)); - LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_pid) == 28, "found %lld\n", - (long long)(int)offsetof(struct ldlm_flock_wire, lfw_pid)); - LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid)); - - /* Checks for struct ldlm_intent */ - LASSERTF((int)sizeof(struct ldlm_intent) == 8, "found %lld\n", - (long long)(int)sizeof(struct ldlm_intent)); - LASSERTF((int)offsetof(struct ldlm_intent, opc) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_intent, opc)); - LASSERTF((int)sizeof(((struct ldlm_intent *)0)->opc) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_intent *)0)->opc)); - - /* Checks for struct ldlm_resource_desc */ - LASSERTF((int)sizeof(struct ldlm_resource_desc) == 40, "found %lld\n", - (long long)(int)sizeof(struct ldlm_resource_desc)); - LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_type) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_resource_desc, lr_type)); - LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_type)); - LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_padding) == 4, "found %lld\n", - (long long)(int)offsetof(struct ldlm_resource_desc, lr_padding)); - LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding)); - LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_name) == 8, "found %lld\n", - (long long)(int)offsetof(struct ldlm_resource_desc, lr_name)); - LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_name) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_name)); - - /* Checks for struct ldlm_lock_desc */ - LASSERTF((int)sizeof(struct ldlm_lock_desc) == 80, "found %lld\n", - (long long)(int)sizeof(struct ldlm_lock_desc)); - LASSERTF((int)offsetof(struct ldlm_lock_desc, l_resource) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_lock_desc, l_resource)); - LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_resource) == 40, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_resource)); - LASSERTF((int)offsetof(struct ldlm_lock_desc, l_req_mode) == 40, "found %lld\n", - (long long)(int)offsetof(struct ldlm_lock_desc, l_req_mode)); - LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode)); - LASSERTF((int)offsetof(struct ldlm_lock_desc, l_granted_mode) == 44, "found %lld\n", - (long long)(int)offsetof(struct ldlm_lock_desc, l_granted_mode)); - LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode)); - LASSERTF((int)offsetof(struct ldlm_lock_desc, l_policy_data) == 48, "found %lld\n", - (long long)(int)offsetof(struct ldlm_lock_desc, l_policy_data)); - LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data)); - - /* Checks for struct ldlm_request */ - LASSERTF((int)sizeof(struct ldlm_request) == 104, "found %lld\n", - (long long)(int)sizeof(struct ldlm_request)); - LASSERTF((int)offsetof(struct ldlm_request, lock_flags) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_request, lock_flags)); - LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_request *)0)->lock_flags)); - LASSERTF((int)offsetof(struct ldlm_request, lock_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct ldlm_request, lock_count)); - LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_request *)0)->lock_count)); - LASSERTF((int)offsetof(struct ldlm_request, lock_desc) == 8, "found %lld\n", - (long long)(int)offsetof(struct ldlm_request, lock_desc)); - LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_desc) == 80, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_request *)0)->lock_desc)); - LASSERTF((int)offsetof(struct ldlm_request, lock_handle) == 88, "found %lld\n", - (long long)(int)offsetof(struct ldlm_request, lock_handle)); - LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_handle) == 16, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_request *)0)->lock_handle)); - - /* Checks for struct ldlm_reply */ - LASSERTF((int)sizeof(struct ldlm_reply) == 112, "found %lld\n", - (long long)(int)sizeof(struct ldlm_reply)); - LASSERTF((int)offsetof(struct ldlm_reply, lock_flags) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_reply, lock_flags)); - LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_flags)); - LASSERTF((int)offsetof(struct ldlm_reply, lock_padding) == 4, "found %lld\n", - (long long)(int)offsetof(struct ldlm_reply, lock_padding)); - LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_padding)); - LASSERTF((int)offsetof(struct ldlm_reply, lock_desc) == 8, "found %lld\n", - (long long)(int)offsetof(struct ldlm_reply, lock_desc)); - LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_desc) == 80, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_desc)); - LASSERTF((int)offsetof(struct ldlm_reply, lock_handle) == 88, "found %lld\n", - (long long)(int)offsetof(struct ldlm_reply, lock_handle)); - LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_handle) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_handle)); - LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res1) == 96, "found %lld\n", - (long long)(int)offsetof(struct ldlm_reply, lock_policy_res1)); - LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1)); - LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res2) == 104, "found %lld\n", - (long long)(int)offsetof(struct ldlm_reply, lock_policy_res2)); - LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2)); - - /* Checks for struct ost_lvb_v1 */ - LASSERTF((int)sizeof(struct ost_lvb_v1) == 40, "found %lld\n", - (long long)(int)sizeof(struct ost_lvb_v1)); - LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_size) == 0, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb_v1, lvb_size)); - LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size)); - LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_mtime) == 8, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb_v1, lvb_mtime)); - LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime)); - LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_atime) == 16, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb_v1, lvb_atime)); - LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime)); - LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_ctime) == 24, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb_v1, lvb_ctime)); - LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime)); - LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_blocks) == 32, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb_v1, lvb_blocks)); - LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks)); - - /* Checks for struct ost_lvb */ - LASSERTF((int)sizeof(struct ost_lvb) == 56, "found %lld\n", - (long long)(int)sizeof(struct ost_lvb)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_size) == 0, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_size)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_size) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_size)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime) == 8, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_mtime)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_atime) == 16, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_atime)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime) == 24, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_ctime)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_blocks) == 32, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_blocks)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_blocks) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_blocks)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime_ns) == 40, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_mtime_ns)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_atime_ns) == 44, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_atime_ns)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime_ns) == 48, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_ctime_ns)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns)); - LASSERTF((int)offsetof(struct ost_lvb, lvb_padding) == 52, "found %lld\n", - (long long)(int)offsetof(struct ost_lvb, lvb_padding)); - LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_padding)); - - /* Checks for struct lquota_lvb */ - LASSERTF((int)sizeof(struct lquota_lvb) == 40, "found %lld\n", - (long long)(int)sizeof(struct lquota_lvb)); - LASSERTF((int)offsetof(struct lquota_lvb, lvb_flags) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_lvb, lvb_flags)); - LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_flags) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_flags)); - LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_may_rel) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_lvb, lvb_id_may_rel)); - LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel)); - LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_rel) == 16, "found %lld\n", - (long long)(int)offsetof(struct lquota_lvb, lvb_id_rel)); - LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel)); - LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_qunit) == 24, "found %lld\n", - (long long)(int)offsetof(struct lquota_lvb, lvb_id_qunit)); - LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit)); - LASSERTF((int)offsetof(struct lquota_lvb, lvb_pad1) == 32, "found %lld\n", - (long long)(int)offsetof(struct lquota_lvb, lvb_pad1)); - LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_pad1) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_pad1)); - LASSERTF(LQUOTA_FL_EDQUOT == 1, "found %lld\n", - (long long)LQUOTA_FL_EDQUOT); - - /* Checks for struct ldlm_gl_lquota_desc */ - LASSERTF((int)sizeof(struct ldlm_gl_lquota_desc) == 64, "found %lld\n", - (long long)(int)sizeof(struct ldlm_gl_lquota_desc)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_id) == 0, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_id)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id) == 16, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_flags)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_ver) == 24, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_ver)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit) == 32, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit) == 40, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_time) == 48, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_time)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time)); - LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2) == 56, "found %lld\n", - (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2)); - LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2)); - - /* Checks for struct mgs_send_param */ - LASSERTF((int)sizeof(struct mgs_send_param) == 1024, "found %lld\n", - (long long)(int)sizeof(struct mgs_send_param)); - BUILD_BUG_ON(MGS_PARAM_MAXLEN != 1024); - LASSERTF((int)offsetof(struct mgs_send_param, mgs_param[1024]) == 1024, "found %lld\n", - (long long)(int)offsetof(struct mgs_send_param, mgs_param[1024])); - LASSERTF((int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024])); - - /* Checks for struct cfg_marker */ - LASSERTF((int)sizeof(struct cfg_marker) == 160, "found %lld\n", - (long long)(int)sizeof(struct cfg_marker)); - LASSERTF((int)offsetof(struct cfg_marker, cm_step) == 0, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_step)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_step) == 4, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_step)); - LASSERTF((int)offsetof(struct cfg_marker, cm_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_flags)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_flags)); - LASSERTF((int)offsetof(struct cfg_marker, cm_vers) == 8, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_vers)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_vers) == 4, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_vers)); - LASSERTF((int)offsetof(struct cfg_marker, cm_padding) == 12, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_padding)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_padding)); - LASSERTF((int)offsetof(struct cfg_marker, cm_createtime) == 16, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_createtime)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_createtime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_createtime)); - LASSERTF((int)offsetof(struct cfg_marker, cm_canceltime) == 24, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_canceltime)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_canceltime) == 8, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_canceltime)); - LASSERTF((int)offsetof(struct cfg_marker, cm_tgtname) == 32, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_tgtname)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_tgtname) == 64, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_tgtname)); - LASSERTF((int)offsetof(struct cfg_marker, cm_comment) == 96, "found %lld\n", - (long long)(int)offsetof(struct cfg_marker, cm_comment)); - LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_comment) == 64, "found %lld\n", - (long long)(int)sizeof(((struct cfg_marker *)0)->cm_comment)); - - /* Checks for struct llog_logid */ - LASSERTF((int)sizeof(struct llog_logid) == 20, "found %lld\n", - (long long)(int)sizeof(struct llog_logid)); - LASSERTF((int)offsetof(struct llog_logid, lgl_oi) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_logid, lgl_oi)); - LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid *)0)->lgl_oi)); - LASSERTF((int)offsetof(struct llog_logid, lgl_ogen) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_logid, lgl_ogen)); - LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_ogen) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid *)0)->lgl_ogen)); - BUILD_BUG_ON(OST_SZ_REC != 274730752); - BUILD_BUG_ON(MDS_UNLINK_REC != 274801668); - BUILD_BUG_ON(MDS_UNLINK64_REC != 275325956); - BUILD_BUG_ON(MDS_SETATTR64_REC != 275325953); - BUILD_BUG_ON(OBD_CFG_REC != 274857984); - BUILD_BUG_ON(LLOG_GEN_REC != 274989056); - BUILD_BUG_ON(CHANGELOG_REC != 275120128); - BUILD_BUG_ON(CHANGELOG_USER_REC != 275185664); - BUILD_BUG_ON(LLOG_HDR_MAGIC != 275010873); - BUILD_BUG_ON(LLOG_LOGID_MAGIC != 275010875); - - /* Checks for struct llog_catid */ - LASSERTF((int)sizeof(struct llog_catid) == 32, "found %lld\n", - (long long)(int)sizeof(struct llog_catid)); - LASSERTF((int)offsetof(struct llog_catid, lci_logid) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_catid, lci_logid)); - LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_logid) == 20, "found %lld\n", - (long long)(int)sizeof(((struct llog_catid *)0)->lci_logid)); - LASSERTF((int)offsetof(struct llog_catid, lci_padding1) == 20, "found %lld\n", - (long long)(int)offsetof(struct llog_catid, lci_padding1)); - LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding1)); - LASSERTF((int)offsetof(struct llog_catid, lci_padding2) == 24, "found %lld\n", - (long long)(int)offsetof(struct llog_catid, lci_padding2)); - LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding2)); - LASSERTF((int)offsetof(struct llog_catid, lci_padding3) == 28, "found %lld\n", - (long long)(int)offsetof(struct llog_catid, lci_padding3)); - LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding3) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding3)); - - /* Checks for struct llog_rec_hdr */ - LASSERTF((int)sizeof(struct llog_rec_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(struct llog_rec_hdr)); - LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_len) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_rec_hdr, lrh_len)); - LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_len) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_len)); - LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_index) == 4, "found %lld\n", - (long long)(int)offsetof(struct llog_rec_hdr, lrh_index)); - LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_index)); - LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_type) == 8, "found %lld\n", - (long long)(int)offsetof(struct llog_rec_hdr, lrh_type)); - LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_type)); - LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_id) == 12, "found %lld\n", - (long long)(int)offsetof(struct llog_rec_hdr, lrh_id)); - LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_id)); - - /* Checks for struct llog_rec_tail */ - LASSERTF((int)sizeof(struct llog_rec_tail) == 8, "found %lld\n", - (long long)(int)sizeof(struct llog_rec_tail)); - LASSERTF((int)offsetof(struct llog_rec_tail, lrt_len) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_rec_tail, lrt_len)); - LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_len) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_len)); - LASSERTF((int)offsetof(struct llog_rec_tail, lrt_index) == 4, "found %lld\n", - (long long)(int)offsetof(struct llog_rec_tail, lrt_index)); - LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_index)); - - /* Checks for struct llog_logid_rec */ - LASSERTF((int)sizeof(struct llog_logid_rec) == 64, "found %lld\n", - (long long)(int)sizeof(struct llog_logid_rec)); - LASSERTF((int)offsetof(struct llog_logid_rec, lid_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_logid_rec, lid_hdr)); - LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_hdr)); - LASSERTF((int)offsetof(struct llog_logid_rec, lid_id) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_logid_rec, lid_id)); - LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_id) == 20, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_id)); - LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding1) == 36, "found %lld\n", - (long long)(int)offsetof(struct llog_logid_rec, lid_padding1)); - LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding1)); - LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding2) == 40, "found %lld\n", - (long long)(int)offsetof(struct llog_logid_rec, lid_padding2)); - LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding2)); - LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding3) == 48, "found %lld\n", - (long long)(int)offsetof(struct llog_logid_rec, lid_padding3)); - LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding3)); - LASSERTF((int)offsetof(struct llog_logid_rec, lid_tail) == 56, "found %lld\n", - (long long)(int)offsetof(struct llog_logid_rec, lid_tail)); - LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_tail)); - - /* Checks for struct llog_unlink_rec */ - LASSERTF((int)sizeof(struct llog_unlink_rec) == 40, "found %lld\n", - (long long)(int)sizeof(struct llog_unlink_rec)); - LASSERTF((int)offsetof(struct llog_unlink_rec, lur_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink_rec, lur_hdr)); - LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr)); - LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oid) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink_rec, lur_oid)); - LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oid)); - LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oseq) == 24, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink_rec, lur_oseq)); - LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq)); - LASSERTF((int)offsetof(struct llog_unlink_rec, lur_count) == 28, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink_rec, lur_count)); - LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_count)); - LASSERTF((int)offsetof(struct llog_unlink_rec, lur_tail) == 32, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink_rec, lur_tail)); - LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_tail)); - /* Checks for struct llog_unlink64_rec */ - LASSERTF((int)sizeof(struct llog_unlink64_rec) == 64, "found %lld\n", - (long long)(int)sizeof(struct llog_unlink64_rec)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_hdr)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_fid) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_fid)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_count) == 32, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_count)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_count)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_tail) == 56, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_tail)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding1) == 36, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding1)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding2) == 40, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding2)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2)); - LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding3) == 48, "found %lld\n", - (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding3)); - LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3)); - - /* Checks for struct llog_setattr64_rec */ - LASSERTF((int)sizeof(struct llog_setattr64_rec) == 64, "found %lld\n", - (long long)(int)sizeof(struct llog_setattr64_rec)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_hdr)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_oi) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_oi)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid) == 32, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid_h) == 36, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid_h)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid) == 40, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid_h) == 44, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid_h)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_valid) == 48, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_valid)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_valid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_valid)); - LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_tail) == 56, "found %lld\n", - (long long)(int)offsetof(struct llog_setattr64_rec, lsr_tail)); - LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail)); - - /* Checks for struct llog_size_change_rec */ - LASSERTF((int)sizeof(struct llog_size_change_rec) == 64, "found %lld\n", - (long long)(int)sizeof(struct llog_size_change_rec)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_hdr)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_fid) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_fid)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_ioepoch) == 32, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_ioepoch)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding1) == 36, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding1)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding2) == 40, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding2)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding3) == 48, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding3)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3)); - LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_tail) == 56, "found %lld\n", - (long long)(int)offsetof(struct llog_size_change_rec, lsc_tail)); - LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail)); - - /* Checks for struct changelog_rec */ - LASSERTF((int)sizeof(struct changelog_rec) == 64, "found %lld\n", - (long long)(int)sizeof(struct changelog_rec)); - LASSERTF((int)offsetof(struct changelog_rec, cr_namelen) == 0, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_namelen)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_namelen) == 2, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_namelen)); - LASSERTF((int)offsetof(struct changelog_rec, cr_flags) == 2, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_flags)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_flags)); - LASSERTF((int)offsetof(struct changelog_rec, cr_type) == 4, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_type)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_type)); - LASSERTF((int)offsetof(struct changelog_rec, cr_index) == 8, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_index)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_index) == 8, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_index)); - LASSERTF((int)offsetof(struct changelog_rec, cr_prev) == 16, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_prev)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_prev) == 8, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_prev)); - LASSERTF((int)offsetof(struct changelog_rec, cr_time) == 24, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_time)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_time)); - LASSERTF((int)offsetof(struct changelog_rec, cr_tfid) == 32, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_tfid)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_tfid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_tfid)); - LASSERTF((int)offsetof(struct changelog_rec, cr_pfid) == 48, "found %lld\n", - (long long)(int)offsetof(struct changelog_rec, cr_pfid)); - LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_pfid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct changelog_rec *)0)->cr_pfid)); - - /* Checks for struct changelog_setinfo */ - LASSERTF((int)sizeof(struct changelog_setinfo) == 12, "found %lld\n", - (long long)(int)sizeof(struct changelog_setinfo)); - LASSERTF((int)offsetof(struct changelog_setinfo, cs_recno) == 0, "found %lld\n", - (long long)(int)offsetof(struct changelog_setinfo, cs_recno)); - LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_recno) == 8, "found %lld\n", - (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_recno)); - LASSERTF((int)offsetof(struct changelog_setinfo, cs_id) == 8, "found %lld\n", - (long long)(int)offsetof(struct changelog_setinfo, cs_id)); - LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_id)); - - /* Checks for struct llog_changelog_rec */ - LASSERTF((int)sizeof(struct llog_changelog_rec) == 88, "found %lld\n", - (long long)(int)sizeof(struct llog_changelog_rec)); - LASSERTF((int)offsetof(struct llog_changelog_rec, cr_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_rec, cr_hdr)); - LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr)); - LASSERTF((int)offsetof(struct llog_changelog_rec, cr) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_rec, cr)); - LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr) == 64, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr)); - LASSERTF((int)offsetof(struct llog_changelog_rec, cr_do_not_use) == 80, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_rec, cr_do_not_use)); - LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_do_not_use) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_do_not_use)); - - /* Checks for struct llog_changelog_user_rec */ - LASSERTF((int)sizeof(struct llog_changelog_user_rec) == 40, "found %lld\n", - (long long)(int)sizeof(struct llog_changelog_user_rec)); - LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_user_rec, cur_hdr)); - LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr)); - LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_id) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_user_rec, cur_id)); - LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id)); - LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_padding) == 20, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_user_rec, cur_padding)); - LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding)); - LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_endrec) == 24, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_user_rec, cur_endrec)); - LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec)); - LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_tail) == 32, "found %lld\n", - (long long)(int)offsetof(struct llog_changelog_user_rec, cur_tail)); - LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail)); - - /* Checks for struct llog_gen */ - LASSERTF((int)sizeof(struct llog_gen) == 16, "found %lld\n", - (long long)(int)sizeof(struct llog_gen)); - LASSERTF((int)offsetof(struct llog_gen, mnt_cnt) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_gen, mnt_cnt)); - LASSERTF((int)sizeof(((struct llog_gen *)0)->mnt_cnt) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_gen *)0)->mnt_cnt)); - LASSERTF((int)offsetof(struct llog_gen, conn_cnt) == 8, "found %lld\n", - (long long)(int)offsetof(struct llog_gen, conn_cnt)); - LASSERTF((int)sizeof(((struct llog_gen *)0)->conn_cnt) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_gen *)0)->conn_cnt)); - - /* Checks for struct llog_gen_rec */ - LASSERTF((int)sizeof(struct llog_gen_rec) == 64, "found %lld\n", - (long long)(int)sizeof(struct llog_gen_rec)); - LASSERTF((int)offsetof(struct llog_gen_rec, lgr_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_gen_rec, lgr_hdr)); - LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr)); - LASSERTF((int)offsetof(struct llog_gen_rec, lgr_gen) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_gen_rec, lgr_gen)); - LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_gen) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_gen)); - LASSERTF((int)offsetof(struct llog_gen_rec, lgr_tail) == 56, "found %lld\n", - (long long)(int)offsetof(struct llog_gen_rec, lgr_tail)); - LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_tail)); - - /* Checks for struct llog_log_hdr */ - LASSERTF((int)sizeof(struct llog_log_hdr) == 8192, "found %lld\n", - (long long)(int)sizeof(struct llog_log_hdr)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_hdr) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_hdr)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_hdr) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_hdr)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_timestamp) == 16, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_timestamp)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_count) == 24, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_count)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_count)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap_offset) == 28, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap_offset)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_size) == 32, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_size)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_size) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_size)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_flags) == 36, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_flags)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_flags)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_cat_idx) == 40, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_cat_idx)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_tgtuuid) == 44, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_tgtuuid)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid) == 40, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_reserved) == 84, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_reserved)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_reserved) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_reserved)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap) == 88, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap) == 8096, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap)); - LASSERTF((int)offsetof(struct llog_log_hdr, llh_tail) == 8184, "found %lld\n", - (long long)(int)offsetof(struct llog_log_hdr, llh_tail)); - LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tail) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tail)); - - /* Checks for struct llog_cookie */ - LASSERTF((int)sizeof(struct llog_cookie) == 32, "found %lld\n", - (long long)(int)sizeof(struct llog_cookie)); - LASSERTF((int)offsetof(struct llog_cookie, lgc_lgl) == 0, "found %lld\n", - (long long)(int)offsetof(struct llog_cookie, lgc_lgl)); - LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_lgl) == 20, "found %lld\n", - (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_lgl)); - LASSERTF((int)offsetof(struct llog_cookie, lgc_subsys) == 20, "found %lld\n", - (long long)(int)offsetof(struct llog_cookie, lgc_subsys)); - LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_subsys) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_subsys)); - LASSERTF((int)offsetof(struct llog_cookie, lgc_index) == 24, "found %lld\n", - (long long)(int)offsetof(struct llog_cookie, lgc_index)); - LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_index)); - LASSERTF((int)offsetof(struct llog_cookie, lgc_padding) == 28, "found %lld\n", - (long long)(int)offsetof(struct llog_cookie, lgc_padding)); - LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_padding)); - - /* Checks for struct llogd_body */ - LASSERTF((int)sizeof(struct llogd_body) == 48, "found %lld\n", - (long long)(int)sizeof(struct llogd_body)); - LASSERTF((int)offsetof(struct llogd_body, lgd_logid) == 0, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_logid)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_logid) == 20, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_logid)); - LASSERTF((int)offsetof(struct llogd_body, lgd_ctxt_idx) == 20, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_ctxt_idx)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx)); - LASSERTF((int)offsetof(struct llogd_body, lgd_llh_flags) == 24, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_llh_flags)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_llh_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_llh_flags)); - LASSERTF((int)offsetof(struct llogd_body, lgd_index) == 28, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_index)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_index)); - LASSERTF((int)offsetof(struct llogd_body, lgd_saved_index) == 32, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_saved_index)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_saved_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_saved_index)); - LASSERTF((int)offsetof(struct llogd_body, lgd_len) == 36, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_len)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_len) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_len)); - LASSERTF((int)offsetof(struct llogd_body, lgd_cur_offset) == 40, "found %lld\n", - (long long)(int)offsetof(struct llogd_body, lgd_cur_offset)); - LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_cur_offset) == 8, "found %lld\n", - (long long)(int)sizeof(((struct llogd_body *)0)->lgd_cur_offset)); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_CREATE != 501); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_NEXT_BLOCK != 502); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_READ_HEADER != 503); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_WRITE_REC != 504); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_CLOSE != 505); - BUILD_BUG_ON(LLOG_ORIGIN_CONNECT != 506); - BUILD_BUG_ON(LLOG_CATINFO != 507); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_PREV_BLOCK != 508); - BUILD_BUG_ON(LLOG_ORIGIN_HANDLE_DESTROY != 509); - BUILD_BUG_ON(LLOG_FIRST_OPC != 501); - BUILD_BUG_ON(LLOG_LAST_OPC != 510); - BUILD_BUG_ON(LLOG_CONFIG_ORIG_CTXT != 0); - BUILD_BUG_ON(LLOG_CONFIG_REPL_CTXT != 1); - BUILD_BUG_ON(LLOG_MDS_OST_ORIG_CTXT != 2); - BUILD_BUG_ON(LLOG_MDS_OST_REPL_CTXT != 3); - BUILD_BUG_ON(LLOG_SIZE_ORIG_CTXT != 4); - BUILD_BUG_ON(LLOG_SIZE_REPL_CTXT != 5); - BUILD_BUG_ON(LLOG_TEST_ORIG_CTXT != 8); - BUILD_BUG_ON(LLOG_TEST_REPL_CTXT != 9); - BUILD_BUG_ON(LLOG_CHANGELOG_ORIG_CTXT != 12); - BUILD_BUG_ON(LLOG_CHANGELOG_REPL_CTXT != 13); - BUILD_BUG_ON(LLOG_CHANGELOG_USER_ORIG_CTXT != 14); - BUILD_BUG_ON(LLOG_AGENT_ORIG_CTXT != 15); - BUILD_BUG_ON(LLOG_MAX_CTXTS != 16); - - /* Checks for struct llogd_conn_body */ - LASSERTF((int)sizeof(struct llogd_conn_body) == 40, "found %lld\n", - (long long)(int)sizeof(struct llogd_conn_body)); - LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_gen) == 0, "found %lld\n", - (long long)(int)offsetof(struct llogd_conn_body, lgdc_gen)); - LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen) == 16, "found %lld\n", - (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen)); - LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_logid) == 16, "found %lld\n", - (long long)(int)offsetof(struct llogd_conn_body, lgdc_logid)); - LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid) == 20, "found %lld\n", - (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid)); - LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx) == 36, "found %lld\n", - (long long)(int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx)); - LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n", - (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx)); - - /* Checks for struct fiemap_info_key */ - LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n", - (long long)(int)sizeof(struct ll_fiemap_info_key)); - LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8])); - LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8])); - LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa)); - LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa)); - LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n", - (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap)); - LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n", - (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap)); - - /* Checks for struct mgs_target_info */ - LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n", - (long long)(int)sizeof(struct mgs_target_info)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_lustre_ver) == 0, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_lustre_ver)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_stripe_index) == 4, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_stripe_index)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_config_ver) == 8, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_config_ver)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_config_ver) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_config_ver)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_flags) == 12, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_flags)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_flags)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_nid_count) == 16, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_nid_count)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nid_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nid_count)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_instance) == 20, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_instance)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_instance) == 4, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_instance)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_fsname) == 24, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_fsname)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_fsname) == 64, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_fsname)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_svname) == 88, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_svname)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_svname) == 64, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_svname)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_uuid) == 152, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_uuid)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_uuid) == 40, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_uuid)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_nids) == 192, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_nids)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nids) == 256, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nids)); - LASSERTF((int)offsetof(struct mgs_target_info, mti_params) == 448, "found %lld\n", - (long long)(int)offsetof(struct mgs_target_info, mti_params)); - LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_params) == 4096, "found %lld\n", - (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_params)); - - /* Checks for struct lustre_capa */ - LASSERTF((int)sizeof(struct lustre_capa) == 120, "found %lld\n", - (long long)(int)sizeof(struct lustre_capa)); - LASSERTF((int)offsetof(struct lustre_capa, lc_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_fid)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_fid)); - LASSERTF((int)offsetof(struct lustre_capa, lc_opc) == 16, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_opc)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_opc) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_opc)); - LASSERTF((int)offsetof(struct lustre_capa, lc_uid) == 24, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_uid)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_uid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_uid)); - LASSERTF((int)offsetof(struct lustre_capa, lc_gid) == 32, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_gid)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_gid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_gid)); - LASSERTF((int)offsetof(struct lustre_capa, lc_flags) == 40, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_flags)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_flags)); - LASSERTF((int)offsetof(struct lustre_capa, lc_keyid) == 44, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_keyid)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_keyid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_keyid)); - LASSERTF((int)offsetof(struct lustre_capa, lc_timeout) == 48, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_timeout)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_timeout) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_timeout)); - LASSERTF((int)offsetof(struct lustre_capa, lc_expiry) == 52, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_expiry)); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_expiry) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_expiry)); - BUILD_BUG_ON(CAPA_HMAC_MAX_LEN != 64); - LASSERTF((int)offsetof(struct lustre_capa, lc_hmac[64]) == 120, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa, lc_hmac[64])); - LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa *)0)->lc_hmac[64])); - - /* Checks for struct lustre_capa_key */ - LASSERTF((int)sizeof(struct lustre_capa_key) == 72, "found %lld\n", - (long long)(int)sizeof(struct lustre_capa_key)); - LASSERTF((int)offsetof(struct lustre_capa_key, lk_seq) == 0, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa_key, lk_seq)); - LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_seq) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_seq)); - LASSERTF((int)offsetof(struct lustre_capa_key, lk_keyid) == 8, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa_key, lk_keyid)); - LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_keyid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_keyid)); - LASSERTF((int)offsetof(struct lustre_capa_key, lk_padding) == 12, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa_key, lk_padding)); - LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_padding)); - BUILD_BUG_ON(CAPA_HMAC_KEY_MAX_LEN != 56); - LASSERTF((int)offsetof(struct lustre_capa_key, lk_key[56]) == 72, "found %lld\n", - (long long)(int)offsetof(struct lustre_capa_key, lk_key[56])); - LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_key[56])); - - /* Checks for struct getinfo_fid2path */ - LASSERTF((int)sizeof(struct getinfo_fid2path) == 32, "found %lld\n", - (long long)(int)sizeof(struct getinfo_fid2path)); - LASSERTF((int)offsetof(struct getinfo_fid2path, gf_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct getinfo_fid2path, gf_fid)); - LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_fid)); - LASSERTF((int)offsetof(struct getinfo_fid2path, gf_recno) == 16, "found %lld\n", - (long long)(int)offsetof(struct getinfo_fid2path, gf_recno)); - LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_recno) == 8, "found %lld\n", - (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_recno)); - LASSERTF((int)offsetof(struct getinfo_fid2path, gf_linkno) == 24, "found %lld\n", - (long long)(int)offsetof(struct getinfo_fid2path, gf_linkno)); - LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno) == 4, "found %lld\n", - (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno)); - LASSERTF((int)offsetof(struct getinfo_fid2path, gf_pathlen) == 28, "found %lld\n", - (long long)(int)offsetof(struct getinfo_fid2path, gf_pathlen)); - LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen) == 4, "found %lld\n", - (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen)); - LASSERTF((int)offsetof(struct getinfo_fid2path, gf_path[0]) == 32, "found %lld\n", - (long long)(int)offsetof(struct getinfo_fid2path, gf_path[0])); - LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n", - (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0])); - - /* Checks for struct fiemap */ - LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n", - (long long)(int)sizeof(struct fiemap)); - LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_start)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_start)); - LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_length)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_length)); - LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_flags)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_flags)); - LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_mapped_extents)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents)); - LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_extent_count)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count)); - LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_reserved)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved)); - LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n", - (long long)(int)offsetof(struct fiemap, fm_extents)); - LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n", - (long long)(int)sizeof(((struct fiemap *)0)->fm_extents)); - BUILD_BUG_ON(FIEMAP_FLAG_SYNC != 0x00000001); - BUILD_BUG_ON(FIEMAP_FLAG_XATTR != 0x00000002); - BUILD_BUG_ON(FIEMAP_FLAG_DEVICE_ORDER != 0x40000000); - - /* Checks for struct fiemap_extent */ - LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n", - (long long)(int)sizeof(struct fiemap_extent)); - LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n", - (long long)(int)offsetof(struct fiemap_extent, fe_logical)); - LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n", - (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical)); - LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n", - (long long)(int)offsetof(struct fiemap_extent, fe_physical)); - LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n", - (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical)); - LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n", - (long long)(int)offsetof(struct fiemap_extent, fe_length)); - LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n", - (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length)); - LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n", - (long long)(int)offsetof(struct fiemap_extent, fe_flags)); - LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags)); - LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n", - (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0])); - LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n", - (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0])); - BUILD_BUG_ON(FIEMAP_EXTENT_LAST != 0x00000001); - BUILD_BUG_ON(FIEMAP_EXTENT_UNKNOWN != 0x00000002); - BUILD_BUG_ON(FIEMAP_EXTENT_DELALLOC != 0x00000004); - BUILD_BUG_ON(FIEMAP_EXTENT_ENCODED != 0x00000008); - BUILD_BUG_ON(FIEMAP_EXTENT_DATA_ENCRYPTED != 0x00000080); - BUILD_BUG_ON(FIEMAP_EXTENT_NOT_ALIGNED != 0x00000100); - BUILD_BUG_ON(FIEMAP_EXTENT_DATA_INLINE != 0x00000200); - BUILD_BUG_ON(FIEMAP_EXTENT_DATA_TAIL != 0x00000400); - BUILD_BUG_ON(FIEMAP_EXTENT_UNWRITTEN != 0x00000800); - BUILD_BUG_ON(FIEMAP_EXTENT_MERGED != 0x00001000); - BUILD_BUG_ON(FIEMAP_EXTENT_NO_DIRECT != 0x40000000); - BUILD_BUG_ON(FIEMAP_EXTENT_NET != 0x80000000); - - /* Checks for type posix_acl_xattr_entry */ - LASSERTF((int)sizeof(struct posix_acl_xattr_entry) == 8, "found %lld\n", - (long long)(int)sizeof(struct posix_acl_xattr_entry)); - LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_tag) == 0, "found %lld\n", - (long long)(int)offsetof(struct posix_acl_xattr_entry, e_tag)); - LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n", - (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag)); - LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_perm) == 2, "found %lld\n", - (long long)(int)offsetof(struct posix_acl_xattr_entry, e_perm)); - LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n", - (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm)); - LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_id) == 4, "found %lld\n", - (long long)(int)offsetof(struct posix_acl_xattr_entry, e_id)); - LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id)); - - /* Checks for type posix_acl_xattr_header */ - LASSERTF((int)sizeof(struct posix_acl_xattr_header) == 4, "found %lld\n", - (long long)(int)sizeof(struct posix_acl_xattr_header)); - LASSERTF((int)offsetof(struct posix_acl_xattr_header, a_version) == 0, "found %lld\n", - (long long)(int)offsetof(struct posix_acl_xattr_header, a_version)); - LASSERTF((int)sizeof(((struct posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct posix_acl_xattr_header *)0)->a_version)); - - /* Checks for struct link_ea_header */ - LASSERTF((int)sizeof(struct link_ea_header) == 24, "found %lld\n", - (long long)(int)sizeof(struct link_ea_header)); - LASSERTF((int)offsetof(struct link_ea_header, leh_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, leh_magic)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->leh_magic)); - LASSERTF((int)offsetof(struct link_ea_header, leh_reccount) == 4, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, leh_reccount)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_reccount) == 4, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->leh_reccount)); - LASSERTF((int)offsetof(struct link_ea_header, leh_len) == 8, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, leh_len)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_len) == 8, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->leh_len)); - LASSERTF((int)offsetof(struct link_ea_header, leh_overflow_time) == 16, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, leh_overflow_time)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_overflow_time) == 4, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->leh_overflow_time)); - LASSERTF((int)offsetof(struct link_ea_header, leh_padding) == 20, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, leh_padding)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->leh_padding)); - BUILD_BUG_ON(LINK_EA_MAGIC != 0x11EAF1DFUL); - - /* Checks for struct link_ea_entry */ - LASSERTF((int)sizeof(struct link_ea_entry) == 18, "found %lld\n", - (long long)(int)sizeof(struct link_ea_entry)); - LASSERTF((int)offsetof(struct link_ea_entry, lee_reclen) == 0, "found %lld\n", - (long long)(int)offsetof(struct link_ea_entry, lee_reclen)); - LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_reclen) == 2, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_reclen)); - LASSERTF((int)offsetof(struct link_ea_entry, lee_parent_fid) == 2, "found %lld\n", - (long long)(int)offsetof(struct link_ea_entry, lee_parent_fid)); - LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid)); - LASSERTF((int)offsetof(struct link_ea_entry, lee_name) == 18, "found %lld\n", - (long long)(int)offsetof(struct link_ea_entry, lee_name)); - LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_name) == 0, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_name)); - - /* Checks for struct layout_intent */ - LASSERTF((int)sizeof(struct layout_intent) == 24, "found %lld\n", - (long long)(int)sizeof(struct layout_intent)); - LASSERTF((int)offsetof(struct layout_intent, li_opc) == 0, "found %lld\n", - (long long)(int)offsetof(struct layout_intent, li_opc)); - LASSERTF((int)sizeof(((struct layout_intent *)0)->li_opc) == 4, "found %lld\n", - (long long)(int)sizeof(((struct layout_intent *)0)->li_opc)); - LASSERTF((int)offsetof(struct layout_intent, li_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct layout_intent, li_flags)); - LASSERTF((int)sizeof(((struct layout_intent *)0)->li_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct layout_intent *)0)->li_flags)); - LASSERTF((int)offsetof(struct layout_intent, li_start) == 8, "found %lld\n", - (long long)(int)offsetof(struct layout_intent, li_start)); - LASSERTF((int)sizeof(((struct layout_intent *)0)->li_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct layout_intent *)0)->li_start)); - LASSERTF((int)offsetof(struct layout_intent, li_end) == 16, "found %lld\n", - (long long)(int)offsetof(struct layout_intent, li_end)); - LASSERTF((int)sizeof(((struct layout_intent *)0)->li_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct layout_intent *)0)->li_end)); - LASSERTF(LAYOUT_INTENT_ACCESS == 0, "found %lld\n", - (long long)LAYOUT_INTENT_ACCESS); - LASSERTF(LAYOUT_INTENT_READ == 1, "found %lld\n", - (long long)LAYOUT_INTENT_READ); - LASSERTF(LAYOUT_INTENT_WRITE == 2, "found %lld\n", - (long long)LAYOUT_INTENT_WRITE); - LASSERTF(LAYOUT_INTENT_GLIMPSE == 3, "found %lld\n", - (long long)LAYOUT_INTENT_GLIMPSE); - LASSERTF(LAYOUT_INTENT_TRUNC == 4, "found %lld\n", - (long long)LAYOUT_INTENT_TRUNC); - LASSERTF(LAYOUT_INTENT_RELEASE == 5, "found %lld\n", - (long long)LAYOUT_INTENT_RELEASE); - LASSERTF(LAYOUT_INTENT_RESTORE == 6, "found %lld\n", - (long long)LAYOUT_INTENT_RESTORE); - - /* Checks for struct hsm_action_item */ - LASSERTF((int)sizeof(struct hsm_action_item) == 72, "found %lld\n", - (long long)(int)sizeof(struct hsm_action_item)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_len) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_len)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_len) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_len)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_action) == 4, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_action)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_action) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_action)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_fid) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_fid)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_fid)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_dfid) == 24, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_dfid)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_dfid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_dfid)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_extent) == 40, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_extent)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_extent) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_extent)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_cookie) == 56, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_cookie)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_cookie) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_cookie)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_gid) == 64, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_gid)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_gid) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_gid)); - LASSERTF((int)offsetof(struct hsm_action_item, hai_data) == 72, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_item, hai_data)); - LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_data) == 0, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_data)); - - /* Checks for struct hsm_action_list */ - LASSERTF((int)sizeof(struct hsm_action_list) == 32, "found %lld\n", - (long long)(int)sizeof(struct hsm_action_list)); - LASSERTF((int)offsetof(struct hsm_action_list, hal_version) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, hal_version)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_version)); - LASSERTF((int)offsetof(struct hsm_action_list, hal_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, hal_count)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_count)); - LASSERTF((int)offsetof(struct hsm_action_list, hal_compound_id) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, hal_compound_id)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_compound_id) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_compound_id)); - LASSERTF((int)offsetof(struct hsm_action_list, hal_flags) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, hal_flags)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_flags) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_flags)); - LASSERTF((int)offsetof(struct hsm_action_list, hal_archive_id) == 24, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, hal_archive_id)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_archive_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_archive_id)); - LASSERTF((int)offsetof(struct hsm_action_list, padding1) == 28, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, padding1)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->padding1)); - LASSERTF((int)offsetof(struct hsm_action_list, hal_fsname) == 32, "found %lld\n", - (long long)(int)offsetof(struct hsm_action_list, hal_fsname)); - LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_fsname) == 0, "found %lld\n", - (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_fsname)); - - /* Checks for struct hsm_progress */ - LASSERTF((int)sizeof(struct hsm_progress) == 48, "found %lld\n", - (long long)(int)sizeof(struct hsm_progress)); - LASSERTF((int)offsetof(struct hsm_progress, hp_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress, hp_fid)); - LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress *)0)->hp_fid)); - LASSERTF((int)offsetof(struct hsm_progress, hp_cookie) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress, hp_cookie)); - LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_cookie) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress *)0)->hp_cookie)); - LASSERTF((int)offsetof(struct hsm_progress, hp_extent) == 24, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress, hp_extent)); - LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_extent) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress *)0)->hp_extent)); - LASSERTF((int)offsetof(struct hsm_progress, hp_flags) == 40, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress, hp_flags)); - LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress *)0)->hp_flags)); - LASSERTF((int)offsetof(struct hsm_progress, hp_errval) == 42, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress, hp_errval)); - LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_errval) == 2, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress *)0)->hp_errval)); - LASSERTF((int)offsetof(struct hsm_progress, padding) == 44, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress, padding)); - LASSERTF((int)sizeof(((struct hsm_progress *)0)->padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress *)0)->padding)); - LASSERTF(HP_FLAG_COMPLETED == 0x01, "found 0x%.8x\n", - HP_FLAG_COMPLETED); - LASSERTF(HP_FLAG_RETRY == 0x02, "found 0x%.8x\n", - HP_FLAG_RETRY); - - LASSERTF((int)offsetof(struct hsm_copy, hc_data_version) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_copy, hc_data_version)); - LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_data_version) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_copy *)0)->hc_data_version)); - LASSERTF((int)offsetof(struct hsm_copy, hc_flags) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_copy, hc_flags)); - LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct hsm_copy *)0)->hc_flags)); - LASSERTF((int)offsetof(struct hsm_copy, hc_errval) == 10, "found %lld\n", - (long long)(int)offsetof(struct hsm_copy, hc_errval)); - LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_errval) == 2, "found %lld\n", - (long long)(int)sizeof(((struct hsm_copy *)0)->hc_errval)); - LASSERTF((int)offsetof(struct hsm_copy, padding) == 12, "found %lld\n", - (long long)(int)offsetof(struct hsm_copy, padding)); - LASSERTF((int)sizeof(((struct hsm_copy *)0)->padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_copy *)0)->padding)); - LASSERTF((int)offsetof(struct hsm_copy, hc_hai) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_copy, hc_hai)); - LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_hai) == 72, "found %lld\n", - (long long)(int)sizeof(((struct hsm_copy *)0)->hc_hai)); - - /* Checks for struct hsm_progress_kernel */ - LASSERTF((int)sizeof(struct hsm_progress_kernel) == 64, "found %lld\n", - (long long)(int)sizeof(struct hsm_progress_kernel)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_fid)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_cookie) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_cookie)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_extent) == 24, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_extent)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_flags) == 40, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_flags)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_errval) == 42, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_errval)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval) == 2, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding1) == 44, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding1)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_data_version) == 48, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_data_version)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version)); - LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding2) == 56, "found %lld\n", - (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding2)); - LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2)); - - /* Checks for struct hsm_user_item */ - LASSERTF((int)sizeof(struct hsm_user_item) == 32, "found %lld\n", - (long long)(int)sizeof(struct hsm_user_item)); - LASSERTF((int)offsetof(struct hsm_user_item, hui_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_item, hui_fid)); - LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_fid)); - LASSERTF((int)offsetof(struct hsm_user_item, hui_extent) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_item, hui_extent)); - LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_extent) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_extent)); - - /* Checks for struct hsm_user_state */ - LASSERTF((int)sizeof(struct hsm_user_state) == 32, "found %lld\n", - (long long)(int)sizeof(struct hsm_user_state)); - LASSERTF((int)offsetof(struct hsm_user_state, hus_states) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_state, hus_states)); - LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_states) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_states)); - LASSERTF((int)offsetof(struct hsm_user_state, hus_archive_id) == 4, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_state, hus_archive_id)); - LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_archive_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_archive_id)); - LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_state) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_state)); - LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state)); - LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_action) == 12, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_action)); - LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action)); - LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_location) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_location)); - LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location)); - - /* Checks for struct hsm_state_set */ - LASSERTF((int)sizeof(struct hsm_state_set) == 24, "found %lld\n", - (long long)(int)sizeof(struct hsm_state_set)); - LASSERTF((int)offsetof(struct hsm_state_set, hss_valid) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_state_set, hss_valid)); - LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_valid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_valid)); - LASSERTF((int)offsetof(struct hsm_state_set, hss_archive_id) == 4, "found %lld\n", - (long long)(int)offsetof(struct hsm_state_set, hss_archive_id)); - LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_archive_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_archive_id)); - LASSERTF((int)offsetof(struct hsm_state_set, hss_setmask) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_state_set, hss_setmask)); - LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_setmask) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_setmask)); - LASSERTF((int)offsetof(struct hsm_state_set, hss_clearmask) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_state_set, hss_clearmask)); - LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_clearmask) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_clearmask)); - - /* Checks for struct hsm_current_action */ - LASSERTF((int)sizeof(struct hsm_current_action) == 24, "found %lld\n", - (long long)(int)sizeof(struct hsm_current_action)); - LASSERTF((int)offsetof(struct hsm_current_action, hca_state) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_current_action, hca_state)); - LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_state) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_state)); - LASSERTF((int)offsetof(struct hsm_current_action, hca_action) == 4, "found %lld\n", - (long long)(int)offsetof(struct hsm_current_action, hca_action)); - LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_action) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_action)); - LASSERTF((int)offsetof(struct hsm_current_action, hca_location) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_current_action, hca_location)); - LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_location) == 16, "found %lld\n", - (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_location)); - - /* Checks for struct hsm_request */ - LASSERTF((int)sizeof(struct hsm_request) == 24, "found %lld\n", - (long long)(int)sizeof(struct hsm_request)); - LASSERTF((int)offsetof(struct hsm_request, hr_action) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_request, hr_action)); - LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_action) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_request *)0)->hr_action)); - LASSERTF((int)offsetof(struct hsm_request, hr_archive_id) == 4, "found %lld\n", - (long long)(int)offsetof(struct hsm_request, hr_archive_id)); - LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_archive_id) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_request *)0)->hr_archive_id)); - LASSERTF((int)offsetof(struct hsm_request, hr_flags) == 8, "found %lld\n", - (long long)(int)offsetof(struct hsm_request, hr_flags)); - LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_flags) == 8, "found %lld\n", - (long long)(int)sizeof(((struct hsm_request *)0)->hr_flags)); - LASSERTF((int)offsetof(struct hsm_request, hr_itemcount) == 16, "found %lld\n", - (long long)(int)offsetof(struct hsm_request, hr_itemcount)); - LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_itemcount) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_request *)0)->hr_itemcount)); - LASSERTF((int)offsetof(struct hsm_request, hr_data_len) == 20, "found %lld\n", - (long long)(int)offsetof(struct hsm_request, hr_data_len)); - LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n", - (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len)); - LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n", - (unsigned int)HSM_FORCE_ACTION); - LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n", - (unsigned int)HSM_GHOST_COPY); - - /* Checks for struct hsm_user_request */ - LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n", - (long long)(int)sizeof(struct hsm_user_request)); - LASSERTF((int)offsetof(struct hsm_user_request, hur_request) == 0, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_request, hur_request)); - LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_request) == 24, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_request)); - LASSERTF((int)offsetof(struct hsm_user_request, hur_user_item) == 24, "found %lld\n", - (long long)(int)offsetof(struct hsm_user_request, hur_user_item)); - LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_user_item) == 0, "found %lld\n", - (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_user_item)); - - /* Checks for struct hsm_user_import */ - LASSERTF(sizeof(struct hsm_user_import) == 48, "found %lld\n", - (long long)sizeof(struct hsm_user_import)); - LASSERTF(offsetof(struct hsm_user_import, hui_size) == 0, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_size)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_size) == 8, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_size)); - LASSERTF(offsetof(struct hsm_user_import, hui_uid) == 32, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_uid)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_uid) == 4, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_uid)); - LASSERTF(offsetof(struct hsm_user_import, hui_gid) == 36, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_gid)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_gid) == 4, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_gid)); - LASSERTF(offsetof(struct hsm_user_import, hui_mode) == 40, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_mode)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mode) == 4, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_mode)); - LASSERTF(offsetof(struct hsm_user_import, hui_atime) == 8, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_atime)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime) == 8, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_atime)); - LASSERTF(offsetof(struct hsm_user_import, hui_atime_ns) == 24, - "found %lld\n", - (long long)(int)offsetof(struct hsm_user_import, hui_atime_ns)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_atime_ns) == 4, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_atime_ns)); - LASSERTF(offsetof(struct hsm_user_import, hui_mtime) == 16, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_mtime)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime) == 8, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime)); - LASSERTF(offsetof(struct hsm_user_import, hui_mtime_ns) == 28, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_mtime_ns)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_mtime_ns) == 4, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_mtime_ns)); - LASSERTF(offsetof(struct hsm_user_import, hui_archive_id) == 44, - "found %lld\n", - (long long)offsetof(struct hsm_user_import, hui_archive_id)); - LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4, - "found %lld\n", - (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id)); -} diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre deleted file mode 100644 index 8691c6543a9c..000000000000 --- a/drivers/staging/lustre/sysfs-fs-lustre +++ /dev/null @@ -1,654 +0,0 @@ -What: /sys/fs/lustre/version -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows current running lustre version. - -What: /sys/fs/lustre/pinger -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows if the lustre module has pinger support. - "on" means yes and "off" means no. - -What: /sys/fs/lustre/health_check -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows whenever current system state believed to be "healthy", - "NOT HEALTHY", or "LBUG" whenever lustre has experienced - an internal assertion failure - -What: /sys/fs/lustre/jobid_name -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Currently running job "name" for this node to be transferred - to Lustre servers for purposes of QoS and statistics gathering. - Writing into this file will change the name, reading outputs - currently set value. - -What: /sys/fs/lustre/jobid_var -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Control file for lustre "jobstats" functionality, write new - value from the list below to change the mode: - disable - disable job name reporting to the servers (default) - procname_uid - form the job name as the current running - command name and pid with a dot in between - e.g. dd.1253 - nodelocal - use jobid_name value from above. - -What: /sys/fs/lustre/timeout -Date: June 2015 -Contact: "Oleg Drokin" -Description: - Controls "lustre timeout" variable, also known as obd_timeout - in some old manual. In the past obd_timeout was of paramount - importance as the timeout value used everywhere and where - other timeouts were derived from. These days it's much less - important as network timeouts are mostly determined by - AT (adaptive timeouts). - Unit: seconds, default: 100 - -What: /sys/fs/lustre/max_dirty_mb -Date: June 2015 -Contact: "Oleg Drokin" -Description: - Controls total number of dirty cache (in megabytes) allowed - across all mounted lustre filesystems. - Since writeout of dirty pages in Lustre is somewhat expensive, - when you allow to many dirty pages, this might lead to - performance degradations as kernel tries to desperately - find some pages to free/writeout. - Default 1/2 RAM. Min value 4, max value 9/10 of RAM. - -What: /sys/fs/lustre/debug_peer_on_timeout -Date: June 2015 -Contact: "Oleg Drokin" -Description: - Control if lnet debug information should be printed when - an RPC timeout occurs. - 0 disabled (default) - 1 enabled - -What: /sys/fs/lustre/dump_on_timeout -Date: June 2015 -Contact: "Oleg Drokin" -Description: - Controls if Lustre debug log should be dumped when an RPC - timeout occurs. This is useful if yout debug buffer typically - rolls over by the time you notice RPC timeouts. - -What: /sys/fs/lustre/dump_on_eviction -Date: June 2015 -Contact: "Oleg Drokin" -Description: - Controls if Lustre debug log should be dumped when an this - client is evicted from one of the servers. - This is useful if yout debug buffer typically rolls over - by the time you notice the eviction event. - -What: /sys/fs/lustre/at_min -Date: July 2015 -Contact: "Oleg Drokin" -Description: - Controls minimum adaptive timeout in seconds. If you encounter - a case where clients timeout due to server-reported processing - time being too short, you might consider increasing this value. - One common case of this if the underlying network has - unpredictable long delays. - Default: 0 - -What: /sys/fs/lustre/at_max -Date: July 2015 -Contact: "Oleg Drokin" -Description: - Controls maximum adaptive timeout in seconds. If at_max timeout - is reached for an RPC, the RPC will time out. - Some genuinuely slow network hardware might warrant increasing - this value. - Setting this value to 0 disables Adaptive Timeouts - functionality and old-style obd_timeout value is then used. - Default: 600 - -What: /sys/fs/lustre/at_extra -Date: July 2015 -Contact: "Oleg Drokin" -Description: - Controls how much extra time to request for unfinished requests - in processing in seconds. Normally a server-side parameter, it - is also used on the client for responses to various LDLM ASTs - that are handled with a special server thread on the client. - This is a way for the servers to ask the clients not to time - out the request that reached current servicing time estimate - yet and give it some more time. - Default: 30 - -What: /sys/fs/lustre/at_early_margin -Date: July 2015 -Contact: "Oleg Drokin" -Description: - Controls when to send the early reply for requests that are - about to timeout as an offset to the estimated service time in - seconds.. - Default: 5 - -What: /sys/fs/lustre/at_history -Date: July 2015 -Contact: "Oleg Drokin" -Description: - Controls for how many seconds to remember slowest events - encountered by adaptive timeouts code. - Default: 600 - -What: /sys/fs/lustre/llite/-/blocksize -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Biggest blocksize on object storage server for this filesystem. - -What: /sys/fs/lustre/llite/-/kbytestotal -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows total number of kilobytes of space on this filesystem - -What: /sys/fs/lustre/llite/-/kbytesfree -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows total number of free kilobytes of space on this filesystem - -What: /sys/fs/lustre/llite/-/kbytesavail -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows total number of free kilobytes of space on this filesystem - actually available for use (taking into account per-client - grants and filesystem reservations). - -What: /sys/fs/lustre/llite/-/filestotal -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows total number of inodes on the filesystem. - -What: /sys/fs/lustre/llite/-/filesfree -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows estimated number of free inodes on the filesystem - -What: /sys/fs/lustre/llite/-/client_type -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows whenever this filesystem considers this client to be - compute cluster-local or remote. Remote clients have - additional uid/gid convrting logic applied. - -What: /sys/fs/lustre/llite/-/fstype -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows filesystem type of the filesystem - -What: /sys/fs/lustre/llite/-/uuid -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows this filesystem superblock uuid - -What: /sys/fs/lustre/llite/-/max_read_ahead_mb -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Sets maximum number of megabytes in system memory to be - given to read-ahead cache. - -What: /sys/fs/lustre/llite/-/max_read_ahead_per_file_mb -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Sets maximum number of megabytes to read-ahead for a single file - -What: /sys/fs/lustre/llite/-/max_read_ahead_whole_mb -Date: May 2015 -Contact: "Oleg Drokin" -Description: - For small reads, how many megabytes to actually request from - the server as initial read-ahead. - -What: /sys/fs/lustre/llite/-/checksum_pages -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Enables or disables per-page checksum at llite layer, before - the pages are actually given to lower level for network transfer - -What: /sys/fs/lustre/llite/-/stats_track_pid -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Limit Lustre vfs operations gathering to just a single pid. - 0 to track everything. - -What: /sys/fs/lustre/llite/-/stats_track_ppid -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Limit Lustre vfs operations gathering to just a single ppid. - 0 to track everything. - -What: /sys/fs/lustre/llite/-/stats_track_gid -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Limit Lustre vfs operations gathering to just a single gid. - 0 to track everything. - -What: /sys/fs/lustre/llite/-/statahead_max -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls maximum number of statahead requests to send when - sequential readdir+stat pattern is detected. - -What: /sys/fs/lustre/llite/-/statahead_agl -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls if AGL (async glimpse ahead - obtain object information - from OSTs in parallel with MDS during statahead) should be - enabled or disabled. - 0 to disable, 1 to enable. - -What: /sys/fs/lustre/llite/-/lazystatfs -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls statfs(2) behaviour in the face of down servers. - If 0, always wait for all servers to come online, - if 1, ignote inactive servers. - -What: /sys/fs/lustre/llite/-/max_easize -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows maximum number of bytes file striping data could be - in current configuration of storage. - -What: /sys/fs/lustre/llite/-/default_easize -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows maximum observed file striping data seen by this - filesystem client instance. - -What: /sys/fs/lustre/llite/-/xattr_cache -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls extended attributes client-side cache. - 1 to enable, 0 to disable. - -What: /sys/fs/lustre/llite/-/unstable_stats -Date: Apr 2016 -Contact: "Oleg Drokin" -Description: - Shows number of pages that were sent and acknowledged by - server but were not yet committed and therefore still - pinned in client memory even though no longer dirty. - -What: /sys/fs/lustre/ldlm/cancel_unused_locks_before_replay -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls if client should replay unused locks during recovery - If a client tends to have a lot of unused locks in LRU, - recovery times might become prolonged. - 1 - just locally cancel unused locks (default) - 0 - replay unused locks. - -What: /sys/fs/lustre/ldlm/namespaces//resource_count -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Displays number of lock resources (objects on which individual - locks are taken) currently allocated in this namespace. - -What: /sys/fs/lustre/ldlm/namespaces//lock_count -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Displays number or locks allocated in this namespace. - -What: /sys/fs/lustre/ldlm/namespaces//lru_size -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls and displays LRU size limit for unused locks for this - namespace. - 0 - LRU size is unlimited, controlled by server resources - positive number - number of locks to allow in lock LRU list - -What: /sys/fs/lustre/ldlm/namespaces//lock_unused_count -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Display number of locks currently sitting in the LRU list - of this namespace - -What: /sys/fs/lustre/ldlm/namespaces//lru_max_age -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Maximum number of milliseconds a lock could sit in LRU list - before client would voluntarily cancel it as unused. - -What: /sys/fs/lustre/ldlm/namespaces//early_lock_cancel -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls "early lock cancellation" feature on this namespace - if supported by the server. - When enabled, tries to preemtively cancel locks that would be - cancelled by verious operations and bundle the cancellation - requests in the same RPC as the main operation, which results - in significant speedups due to reduced lock-pingpong RPCs. - 0 - disabled - 1 - enabled (default) - -What: /sys/fs/lustre/ldlm/namespaces//pool/granted -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Displays number of granted locks in this namespace - -What: /sys/fs/lustre/ldlm/namespaces//pool/grant_rate -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of granted locks in this namespace during last - time interval - -What: /sys/fs/lustre/ldlm/namespaces//pool/cancel_rate -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of lock cancellations in this namespace during - last time interval - -What: /sys/fs/lustre/ldlm/namespaces//pool/grant_speed -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Calculated speed of lock granting (grant_rate - cancel_rate) - in this namespace - -What: /sys/fs/lustre/ldlm/namespaces//pool/grant_plan -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Estimated number of locks to be granted in the next time - interval in this namespace - -What: /sys/fs/lustre/ldlm/namespaces//pool/limit -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls number of allowed locks in this pool. - When lru_size is 0, this is the actual limit then. - -What: /sys/fs/lustre/ldlm/namespaces//pool/lock_volume_factor -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Multiplier for all lock volume calculations above. - Default is 1. Increase to make the client to more agressively - clean it's lock LRU list for this namespace. - -What: /sys/fs/lustre/ldlm/namespaces//pool/server_lock_volume -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Calculated server lock volume. - -What: /sys/fs/lustre/ldlm/namespaces//pool/recalc_period -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls length of time between recalculation of above - values (in seconds). - -What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_min -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls minimum number of ldlm callback threads to start. - -What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_max -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls maximum number of ldlm callback threads to start. - -What: /sys/fs/lustre/ldlm/services/ldlm_cbd/threads_started -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows actual number of ldlm callback threads running. - -What: /sys/fs/lustre/ldlm/services/ldlm_cbd/high_priority_ratio -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls what percentage of ldlm callback threads is dedicated - to "high priority" incoming requests. - -What: /sys/fs/lustre/{obdtype}/{connection_name}/blocksize -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Blocksize on backend filesystem for service behind this obd - device (or biggest blocksize for compound devices like lov - and lmv) - -What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytestotal -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Total number of kilobytes of space on backend filesystem - for service behind this obd (or total amount for compound - devices like lov lmv) - -What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytesfree -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of free kilobytes on backend filesystem for service - behind this obd (or total amount for compound devices - like lov lmv) - -What: /sys/fs/lustre/{obdtype}/{connection_name}/kbytesavail -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of kilobytes of free space on backend filesystem - for service behind this obd (or total amount for compound - devices like lov lmv) that is actually available for use - (taking into account per-client and filesystem reservations). - -What: /sys/fs/lustre/{obdtype}/{connection_name}/filestotal -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of inodes on backend filesystem for service behind this - obd. - -What: /sys/fs/lustre/{obdtype}/{connection_name}/filesfree -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of free inodes on backend filesystem for service - behind this obd. - -What: /sys/fs/lustre/mdc/{connection_name}/max_pages_per_rpc -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Maximum number of readdir pages to fit into a single readdir - RPC. - -What: /sys/fs/lustre/{mdc,osc}/{connection_name}/max_rpcs_in_flight -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Maximum number of parallel RPCs on the wire to allow on - this connection. Increasing this number would help on higher - latency links, but has a chance of overloading a server - if you have too many clients like this. - Default: 8 - -What: /sys/fs/lustre/osc/{connection_name}/max_pages_per_rpc -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Maximum number of pages to fit into a single RPC. - Typically bigger RPCs allow for better performance. - Default: however many pages to form 1M of data (256 pages - for 4K page sized platforms) - -What: /sys/fs/lustre/osc/{connection_name}/active -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls accessibility of this connection. If set to 0, - fail all accesses immediately. - -What: /sys/fs/lustre/osc/{connection_name}/checksums -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls whenever to checksum bulk RPC data over the wire - to this target. - 1: enable (default) ; 0: disable - -What: /sys/fs/lustre/osc/{connection_name}/contention_seconds -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls for how long to consider a file contended once - indicated as such by the server. - When a file is considered contended, all operations switch to - synchronous lockless mode to avoid cache and lock pingpong. - -What: /sys/fs/lustre/osc/{connection_name}/cur_dirty_bytes -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Displays how many dirty bytes is presently in the cache for this - target. - -What: /sys/fs/lustre/osc/{connection_name}/cur_grant_bytes -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows how many bytes we have as a "dirty cache" grant from the - server. Writing a value smaller than shown allows to release - some grant back to the server. - Dirty cache grant is a way Lustre ensures that cached successful - writes on client do not end up discarded by the server due to - lack of space later on. - -What: /sys/fs/lustre/osc/{connection_name}/cur_lost_grant_bytes -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Shows how many granted bytes were released to the server due - to lack of write activity on this client. - -What: /sys/fs/lustre/osc/{connection_name}/grant_shrink_interval -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of seconds with no write activity for this target - to start releasing dirty grant back to the server. - -What: /sys/fs/lustre/osc/{connection_name}/destroys_in_flight -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of DESTROY RPCs currently in flight to this target. - -What: /sys/fs/lustre/osc/{connection_name}/lockless_truncate -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls whether lockless truncate RPCs are allowed to this - target. - Lockless truncate causes server to perform the locking which - is beneficial if the truncate is not followed by a write - immediately. - 1: enable ; 0: disable (default) - -What: /sys/fs/lustre/osc/{connection_name}/max_dirty_mb -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls how much dirty data this client can accumulate - for this target. This is orthogonal to dirty grant and is - a hard limit even if the server would allow a bigger dirty - cache. - While allowing higher dirty cache is beneficial for write - performance, flushing write cache takes longer and as such - the node might be more prone to OOMs. - Having this value set too low might result in not being able - to sent too many parallel WRITE RPCs. - Default: 32 - -What: /sys/fs/lustre/osc/{connection_name}/resend_count -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Controls how many times to try and resend RPCs to this target - that failed with "recoverable" status, such as EAGAIN, - ENOMEM. - -What: /sys/fs/lustre/lov/{connection_name}/numobd -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of OSC targets managed by this LOV instance. - -What: /sys/fs/lustre/lov/{connection_name}/activeobd -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of OSC targets managed by this LOV instance that are - actually active. - -What: /sys/fs/lustre/lmv/{connection_name}/numobd -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of MDC targets managed by this LMV instance. - -What: /sys/fs/lustre/lmv/{connection_name}/activeobd -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Number of MDC targets managed by this LMV instance that are - actually active. - -What: /sys/fs/lustre/lmv/{connection_name}/placement -Date: May 2015 -Contact: "Oleg Drokin" -Description: - Determines policy of inode placement in case of multiple - metadata servers: - CHAR - based on a hash of the file name used at creation time - (Default) - NID - based on a hash of creating client network id. diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c index ffe8179f5d41..073fe7537f6c 100644 --- a/scripts/selinux/mdp/mdp.c +++ b/scripts/selinux/mdp/mdp.c @@ -124,7 +124,6 @@ int main(int argc, char *argv[]) fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n"); fprintf(fout, "fs_use_xattr jffs2 user_u:base_r:base_t;\n"); fprintf(fout, "fs_use_xattr gfs2 user_u:base_r:base_t;\n"); - fprintf(fout, "fs_use_xattr lustre user_u:base_r:base_t;\n"); fprintf(fout, "fs_use_task eventpollfs user_u:base_r:base_t;\n"); fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n"); -- cgit v1.2.3 From d7822b1e24f2df5df98c76f0e94a5416349ff759 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 2 Jun 2018 08:43:54 -0400 Subject: rseq: Introduce restartable sequences system call Expose a new system call allowing each thread to register one userspace memory area to be used as an ABI between kernel and user-space for two purposes: user-space restartable sequences and quick access to read the current CPU number value from user-space. * Restartable sequences (per-cpu atomics) Restartables sequences allow user-space to perform update operations on per-cpu data without requiring heavy-weight atomic operations. The restartable critical sections (percpu atomics) work has been started by Paul Turner and Andrew Hunter. It lets the kernel handle restart of critical sections. [1] [2] The re-implementation proposed here brings a few simplifications to the ABI which facilitates porting to other architectures and speeds up the user-space fast path. Here are benchmarks of various rseq use-cases. Test hardware: arm32: ARMv7 Processor rev 4 (v7l) "Cubietruck", 2-core x86-64: Intel E5-2630 v3@2.40GHz, 16-core, hyperthreading The following benchmarks were all performed on a single thread. * Per-CPU statistic counter increment getcpu+atomic (ns/op) rseq (ns/op) speedup arm32: 344.0 31.4 11.0 x86-64: 15.3 2.0 7.7 * LTTng-UST: write event 32-bit header, 32-bit payload into tracer per-cpu buffer getcpu+atomic (ns/op) rseq (ns/op) speedup arm32: 2502.0 2250.0 1.1 x86-64: 117.4 98.0 1.2 * liburcu percpu: lock-unlock pair, dereference, read/compare word getcpu+atomic (ns/op) rseq (ns/op) speedup arm32: 751.0 128.5 5.8 x86-64: 53.4 28.6 1.9 * jemalloc memory allocator adapted to use rseq Using rseq with per-cpu memory pools in jemalloc at Facebook (based on rseq 2016 implementation): The production workload response-time has 1-2% gain avg. latency, and the P99 overall latency drops by 2-3%. * Reading the current CPU number Speeding up reading the current CPU number on which the caller thread is running is done by keeping the current CPU number up do date within the cpu_id field of the memory area registered by the thread. This is done by making scheduler preemption set the TIF_NOTIFY_RESUME flag on the current thread. Upon return to user-space, a notify-resume handler updates the current CPU value within the registered user-space memory area. User-space can then read the current CPU number directly from memory. Keeping the current cpu id in a memory area shared between kernel and user-space is an improvement over current mechanisms available to read the current CPU number, which has the following benefits over alternative approaches: - 35x speedup on ARM vs system call through glibc - 20x speedup on x86 compared to calling glibc, which calls vdso executing a "lsl" instruction, - 14x speedup on x86 compared to inlined "lsl" instruction, - Unlike vdso approaches, this cpu_id value can be read from an inline assembly, which makes it a useful building block for restartable sequences. - The approach of reading the cpu id through memory mapping shared between kernel and user-space is portable (e.g. ARM), which is not the case for the lsl-based x86 vdso. On x86, yet another possible approach would be to use the gs segment selector to point to user-space per-cpu data. This approach performs similarly to the cpu id cache, but it has two disadvantages: it is not portable, and it is incompatible with existing applications already using the gs segment selector for other purposes. Benchmarking various approaches for reading the current CPU number: ARMv7 Processor rev 4 (v7l) Machine model: Cubietruck - Baseline (empty loop): 8.4 ns - Read CPU from rseq cpu_id: 16.7 ns - Read CPU from rseq cpu_id (lazy register): 19.8 ns - glibc 2.19-0ubuntu6.6 getcpu: 301.8 ns - getcpu system call: 234.9 ns x86-64 Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz: - Baseline (empty loop): 0.8 ns - Read CPU from rseq cpu_id: 0.8 ns - Read CPU from rseq cpu_id (lazy register): 0.8 ns - Read using gs segment selector: 0.8 ns - "lsl" inline assembly: 13.0 ns - glibc 2.19-0ubuntu6 getcpu: 16.6 ns - getcpu system call: 53.9 ns - Speed (benchmark taken on v8 of patchset) Running 10 runs of hackbench -l 100000 seems to indicate, contrary to expectations, that enabling CONFIG_RSEQ slightly accelerates the scheduler: Configuration: 2 sockets * 8-core Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz (directly on hardware, hyperthreading disabled in BIOS, energy saving disabled in BIOS, turboboost disabled in BIOS, cpuidle.off=1 kernel parameter), with a Linux v4.6 defconfig+localyesconfig, restartable sequences series applied. * CONFIG_RSEQ=n avg.: 41.37 s std.dev.: 0.36 s * CONFIG_RSEQ=y avg.: 40.46 s std.dev.: 0.33 s - Size On x86-64, between CONFIG_RSEQ=n/y, the text size increase of vmlinux is 567 bytes, and the data size increase of vmlinux is 5696 bytes. [1] https://lwn.net/Articles/650333/ [2] http://www.linuxplumbersconf.org/2013/ocw/system/presentations/1695/original/LPC%20-%20PerCpu%20Atomics.pdf Signed-off-by: Mathieu Desnoyers Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: Joel Fernandes Cc: Catalin Marinas Cc: Dave Watson Cc: Will Deacon Cc: Andi Kleen Cc: "H . Peter Anvin" Cc: Chris Lameter Cc: Russell King Cc: Andrew Hunter Cc: Michael Kerrisk Cc: "Paul E . McKenney" Cc: Paul Turner Cc: Boqun Feng Cc: Josh Triplett Cc: Steven Rostedt Cc: Ben Maurer Cc: Alexander Viro Cc: linux-api@vger.kernel.org Cc: Andy Lutomirski Cc: Andrew Morton Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20151027235635.16059.11630.stgit@pjt-glaptop.roam.corp.google.com Link: http://lkml.kernel.org/r/20150624222609.6116.86035.stgit@kitami.mtv.corp.google.com Link: https://lkml.kernel.org/r/20180602124408.8430-3-mathieu.desnoyers@efficios.com --- MAINTAINERS | 11 ++ arch/Kconfig | 7 + fs/exec.c | 1 + include/linux/sched.h | 134 +++++++++++++++++ include/linux/syscalls.h | 4 +- include/trace/events/rseq.h | 57 +++++++ include/uapi/linux/rseq.h | 133 +++++++++++++++++ init/Kconfig | 23 +++ kernel/Makefile | 1 + kernel/fork.c | 2 + kernel/rseq.c | 357 ++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/core.c | 2 + kernel/sys_ni.c | 3 + 13 files changed, 734 insertions(+), 1 deletion(-) create mode 100644 include/trace/events/rseq.h create mode 100644 include/uapi/linux/rseq.h create mode 100644 kernel/rseq.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aa635837a6af..a384243d911b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11976,6 +11976,17 @@ F: include/dt-bindings/reset/ F: include/linux/reset.h F: include/linux/reset-controller.h +RESTARTABLE SEQUENCES SUPPORT +M: Mathieu Desnoyers +M: Peter Zijlstra +M: "Paul E. McKenney" +M: Boqun Feng +L: linux-kernel@vger.kernel.org +S: Supported +F: kernel/rseq.c +F: include/uapi/linux/rseq.h +F: include/trace/events/rseq.h + RFKILL M: Johannes Berg L: linux-wireless@vger.kernel.org diff --git a/arch/Kconfig b/arch/Kconfig index b695a3e3e922..095ba99968c1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -272,6 +272,13 @@ config HAVE_REGS_AND_STACK_ACCESS_API declared in asm/ptrace.h For example the kprobes-based event tracer needs this API. +config HAVE_RSEQ + bool + depends on HAVE_REGS_AND_STACK_ACCESS_API + help + This symbol should be selected by an architecture if it + supports an implementation of restartable sequences. + config HAVE_CLK bool help diff --git a/fs/exec.c b/fs/exec.c index 183059c427b9..2c3911612b22 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1822,6 +1822,7 @@ static int do_execveat_common(int fd, struct filename *filename, current->fs->in_exec = 0; current->in_execve = 0; membarrier_execve(current); + rseq_execve(current); acct_update_integrals(current); task_numa_free(current); free_bprm(bprm); diff --git a/include/linux/sched.h b/include/linux/sched.h index 14e4f9c12337..3aa4fcb74e76 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -27,6 +27,7 @@ #include #include #include +#include /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -1047,6 +1048,17 @@ struct task_struct { unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ +#ifdef CONFIG_RSEQ + struct rseq __user *rseq; + u32 rseq_len; + u32 rseq_sig; + /* + * RmW on rseq_event_mask must be performed atomically + * with respect to preemption. + */ + unsigned long rseq_event_mask; +#endif + struct tlbflush_unmap_batch tlb_ubc; struct rcu_head rcu; @@ -1757,4 +1769,126 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); #define TASK_SIZE_OF(tsk) TASK_SIZE #endif +#ifdef CONFIG_RSEQ + +/* + * Map the event mask on the user-space ABI enum rseq_cs_flags + * for direct mask checks. + */ +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, + RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, + RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, +}; + +enum rseq_event_mask { + RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), + RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), + RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), +}; + +static inline void rseq_set_notify_resume(struct task_struct *t) +{ + if (t->rseq) + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); +} + +void __rseq_handle_notify_resume(struct pt_regs *regs); + +static inline void rseq_handle_notify_resume(struct pt_regs *regs) +{ + if (current->rseq) + __rseq_handle_notify_resume(regs); +} + +static inline void rseq_signal_deliver(struct pt_regs *regs) +{ + preempt_disable(); + __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); + preempt_enable(); + rseq_handle_notify_resume(regs); +} + +/* rseq_preempt() requires preemption to be disabled. */ +static inline void rseq_preempt(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + +/* rseq_migrate() requires preemption to be disabled. */ +static inline void rseq_migrate(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + +/* + * If parent process has a registered restartable sequences area, the + * child inherits. Only applies when forking a process, not a thread. In + * case a parent fork() in the middle of a restartable sequence, set the + * resume notifier to force the child to retry. + */ +static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ + if (clone_flags & CLONE_THREAD) { + t->rseq = NULL; + t->rseq_len = 0; + t->rseq_sig = 0; + t->rseq_event_mask = 0; + } else { + t->rseq = current->rseq; + t->rseq_len = current->rseq_len; + t->rseq_sig = current->rseq_sig; + t->rseq_event_mask = current->rseq_event_mask; + rseq_preempt(t); + } +} + +static inline void rseq_execve(struct task_struct *t) +{ + t->rseq = NULL; + t->rseq_len = 0; + t->rseq_sig = 0; + t->rseq_event_mask = 0; +} + +#else + +static inline void rseq_set_notify_resume(struct task_struct *t) +{ +} +static inline void rseq_handle_notify_resume(struct pt_regs *regs) +{ +} +static inline void rseq_signal_deliver(struct pt_regs *regs) +{ +} +static inline void rseq_preempt(struct task_struct *t) +{ +} +static inline void rseq_migrate(struct task_struct *t) +{ +} +static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ +} +static inline void rseq_execve(struct task_struct *t) +{ +} + +#endif + +#ifdef CONFIG_DEBUG_RSEQ + +void rseq_syscall(struct pt_regs *regs); + +#else + +static inline void rseq_syscall(struct pt_regs *regs) +{ +} + +#endif + #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 390e814fdc8d..73810808cdf2 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -66,6 +66,7 @@ struct old_linux_dirent; struct perf_event_attr; struct file_handle; struct sigaltstack; +struct rseq; union bpf_attr; #include @@ -897,7 +898,8 @@ asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); asmlinkage long sys_pkey_free(int pkey); asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, unsigned mask, struct statx __user *buffer); - +asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len, + int flags, uint32_t sig); /* * Architecture-specific system calls diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h new file mode 100644 index 000000000000..a04a64bc1a00 --- /dev/null +++ b/include/trace/events/rseq.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rseq + +#if !defined(_TRACE_RSEQ_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RSEQ_H + +#include +#include + +TRACE_EVENT(rseq_update, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __field(s32, cpu_id) + ), + + TP_fast_assign( + __entry->cpu_id = raw_smp_processor_id(); + ), + + TP_printk("cpu_id=%d", __entry->cpu_id) +); + +TRACE_EVENT(rseq_ip_fixup, + + TP_PROTO(unsigned long regs_ip, unsigned long start_ip, + unsigned long post_commit_offset, unsigned long abort_ip), + + TP_ARGS(regs_ip, start_ip, post_commit_offset, abort_ip), + + TP_STRUCT__entry( + __field(unsigned long, regs_ip) + __field(unsigned long, start_ip) + __field(unsigned long, post_commit_offset) + __field(unsigned long, abort_ip) + ), + + TP_fast_assign( + __entry->regs_ip = regs_ip; + __entry->start_ip = start_ip; + __entry->post_commit_offset = post_commit_offset; + __entry->abort_ip = abort_ip; + ), + + TP_printk("regs_ip=0x%lx start_ip=0x%lx post_commit_offset=%lu abort_ip=0x%lx", + __entry->regs_ip, __entry->start_ip, + __entry->post_commit_offset, __entry->abort_ip) +); + +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h new file mode 100644 index 000000000000..d620fa43756c --- /dev/null +++ b/include/uapi/linux/rseq.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_RSEQ_H +#define _UAPI_LINUX_RSEQ_H + +/* + * linux/rseq.h + * + * Restartable sequences system call API + * + * Copyright (c) 2015-2018 Mathieu Desnoyers + */ + +#ifdef __KERNEL__ +# include +#else +# include +#endif + +#include + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = (1 << 0), +}; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = + (1U << RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT), + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = + (1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT), + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = + (1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT), +}; + +/* + * struct rseq_cs is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. It is usually declared as + * link-time constant data. + */ +struct rseq_cs { + /* Version of this structure. */ + __u32 version; + /* enum rseq_cs_flags */ + __u32 flags; + LINUX_FIELD_u32_u64(start_ip); + /* Offset from start_ip. */ + LINUX_FIELD_u32_u64(post_commit_offset); + LINUX_FIELD_u32_u64(abort_ip); +} __attribute__((aligned(4 * sizeof(__u64)))); + +/* + * struct rseq is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. + * + * A single struct rseq per thread is allowed. + */ +struct rseq { + /* + * Restartable sequences cpu_id_start field. Updated by the + * kernel, and read by user-space with single-copy atomicity + * semantics. Aligned on 32-bit. Always contains a value in the + * range of possible CPUs, although the value may not be the + * actual current CPU (e.g. if rseq is not initialized). This + * CPU number value should always be compared against the value + * of the cpu_id field before performing a rseq commit or + * returning a value read from a data structure indexed using + * the cpu_id_start value. + */ + __u32 cpu_id_start; + /* + * Restartable sequences cpu_id field. Updated by the kernel, + * and read by user-space with single-copy atomicity semantics. + * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and + * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the + * former means "rseq uninitialized", and latter means "rseq + * initialization failed". This value is meant to be read within + * rseq critical sections and compared with the cpu_id_start + * value previously read, before performing the commit instruction, + * or read and compared with the cpu_id_start value before returning + * a value loaded from a data structure indexed using the + * cpu_id_start value. + */ + __u32 cpu_id; + /* + * Restartable sequences rseq_cs field. + * + * Contains NULL when no critical section is active for the current + * thread, or holds a pointer to the currently active struct rseq_cs. + * + * Updated by user-space, which sets the address of the currently + * active rseq_cs at the beginning of assembly instruction sequence + * block, and set to NULL by the kernel when it restarts an assembly + * instruction sequence block, as well as when the kernel detects that + * it is preempting or delivering a signal outside of the range + * targeted by the rseq_cs. Also needs to be set to NULL by user-space + * before reclaiming memory that contains the targeted struct rseq_cs. + * + * Read and set by the kernel with single-copy atomicity semantics. + * Set by user-space with single-copy atomicity semantics. Aligned + * on 64-bit. + */ + LINUX_FIELD_u32_u64(rseq_cs); + /* + * - RSEQ_DISABLE flag: + * + * Fallback fast-track flag for single-stepping. + * Set by user-space if lack of progress is detected. + * Cleared by user-space after rseq finish. + * Read by the kernel. + * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT + * Inhibit instruction sequence block restart and event + * counter increment on preemption for this thread. + * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL + * Inhibit instruction sequence block restart and event + * counter increment on signal delivery for this thread. + * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE + * Inhibit instruction sequence block restart and event + * counter increment on migration for this thread. + */ + __u32 flags; +} __attribute__((aligned(4 * sizeof(__u64)))); + +#endif /* _UAPI_LINUX_RSEQ_H */ diff --git a/init/Kconfig b/init/Kconfig index 18b151f0ddc1..33ec06fddaaa 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1417,6 +1417,29 @@ config ARCH_HAS_MEMBARRIER_CALLBACKS config ARCH_HAS_MEMBARRIER_SYNC_CORE bool +config RSEQ + bool "Enable rseq() system call" if EXPERT + default y + depends on HAVE_RSEQ + select MEMBARRIER + help + Enable the restartable sequences system call. It provides a + user-space cache for the current CPU number value, which + speeds up getting the current CPU number from user-space, + as well as an ABI to speed up user-space operations on + per-CPU data. + + If unsure, say Y. + +config DEBUG_RSEQ + default n + bool "Enabled debugging of rseq() system call" if EXPERT + depends on RSEQ && DEBUG_KERNEL + help + Enable extra debugging checks for the rseq system call. + + If unsure, say N. + config EMBEDDED bool "Embedded system" option allnoconfig_y diff --git a/kernel/Makefile b/kernel/Makefile index f85ae5dfa474..7085c841c413 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -113,6 +113,7 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_HAS_IOMEM) += memremap.o +obj-$(CONFIG_RSEQ) += rseq.o $(obj)/configs.o: $(obj)/config_data.h diff --git a/kernel/fork.c b/kernel/fork.c index a5d21c42acfc..70992bfeba81 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1899,6 +1899,8 @@ static __latent_entropy struct task_struct *copy_process( */ copy_seccomp(p); + rseq_fork(p, clone_flags); + /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the diff --git a/kernel/rseq.c b/kernel/rseq.c new file mode 100644 index 000000000000..ae306f90c514 --- /dev/null +++ b/kernel/rseq.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Restartable sequences system call + * + * Copyright (C) 2015, Google, Inc., + * Paul Turner and Andrew Hunter + * Copyright (C) 2015-2018, EfficiOS Inc., + * Mathieu Desnoyers + */ + +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define RSEQ_CS_PREEMPT_MIGRATE_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE | \ + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT) + +/* + * + * Restartable sequences are a lightweight interface that allows + * user-level code to be executed atomically relative to scheduler + * preemption and signal delivery. Typically used for implementing + * per-cpu operations. + * + * It allows user-space to perform update operations on per-cpu data + * without requiring heavy-weight atomic operations. + * + * Detailed algorithm of rseq user-space assembly sequences: + * + * init(rseq_cs) + * cpu = TLS->rseq::cpu_id_start + * [1] TLS->rseq::rseq_cs = rseq_cs + * [start_ip] ---------------------------- + * [2] if (cpu != TLS->rseq::cpu_id) + * goto abort_ip; + * [3] + * [post_commit_ip] ---------------------------- + * + * The address of jump target abort_ip must be outside the critical + * region, i.e.: + * + * [abort_ip] < [start_ip] || [abort_ip] >= [post_commit_ip] + * + * Steps [2]-[3] (inclusive) need to be a sequence of instructions in + * userspace that can handle being interrupted between any of those + * instructions, and then resumed to the abort_ip. + * + * 1. Userspace stores the address of the struct rseq_cs assembly + * block descriptor into the rseq_cs field of the registered + * struct rseq TLS area. This update is performed through a single + * store within the inline assembly instruction sequence. + * [start_ip] + * + * 2. Userspace tests to check whether the current cpu_id field match + * the cpu number loaded before start_ip, branching to abort_ip + * in case of a mismatch. + * + * If the sequence is preempted or interrupted by a signal + * at or after start_ip and before post_commit_ip, then the kernel + * clears TLS->__rseq_abi::rseq_cs, and sets the user-space return + * ip to abort_ip before returning to user-space, so the preempted + * execution resumes at abort_ip. + * + * 3. Userspace critical section final instruction before + * post_commit_ip is the commit. The critical section is + * self-terminating. + * [post_commit_ip] + * + * 4. + * + * On failure at [2], or if interrupted by preempt or signal delivery + * between [1] and [3]: + * + * [abort_ip] + * F1. + */ + +static int rseq_update_cpu_id(struct task_struct *t) +{ + u32 cpu_id = raw_smp_processor_id(); + + if (__put_user(cpu_id, &t->rseq->cpu_id_start)) + return -EFAULT; + if (__put_user(cpu_id, &t->rseq->cpu_id)) + return -EFAULT; + trace_rseq_update(t); + return 0; +} + +static int rseq_reset_rseq_cpu_id(struct task_struct *t) +{ + u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED; + + /* + * Reset cpu_id_start to its initial state (0). + */ + if (__put_user(cpu_id_start, &t->rseq->cpu_id_start)) + return -EFAULT; + /* + * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming + * in after unregistration can figure out that rseq needs to be + * registered again. + */ + if (__put_user(cpu_id, &t->rseq->cpu_id)) + return -EFAULT; + return 0; +} + +static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) +{ + struct rseq_cs __user *urseq_cs; + unsigned long ptr; + u32 __user *usig; + u32 sig; + int ret; + + ret = __get_user(ptr, &t->rseq->rseq_cs); + if (ret) + return ret; + if (!ptr) { + memset(rseq_cs, 0, sizeof(*rseq_cs)); + return 0; + } + urseq_cs = (struct rseq_cs __user *)ptr; + if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) + return -EFAULT; + if (rseq_cs->version > 0) + return -EINVAL; + + /* Ensure that abort_ip is not in the critical section. */ + if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) + return -EINVAL; + + usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32)); + ret = get_user(sig, usig); + if (ret) + return ret; + + if (current->rseq_sig != sig) { + printk_ratelimited(KERN_WARNING + "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", + sig, current->rseq_sig, current->pid, usig); + return -EPERM; + } + return 0; +} + +static int rseq_need_restart(struct task_struct *t, u32 cs_flags) +{ + u32 flags, event_mask; + int ret; + + /* Get thread flags. */ + ret = __get_user(flags, &t->rseq->flags); + if (ret) + return ret; + + /* Take critical section flags into account. */ + flags |= cs_flags; + + /* + * Restart on signal can only be inhibited when restart on + * preempt and restart on migrate are inhibited too. Otherwise, + * a preempted signal handler could fail to restart the prior + * execution context on sigreturn. + */ + if (unlikely((flags & RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL) && + (flags & RSEQ_CS_PREEMPT_MIGRATE_FLAGS) != + RSEQ_CS_PREEMPT_MIGRATE_FLAGS)) + return -EINVAL; + + /* + * Load and clear event mask atomically with respect to + * scheduler preemption. + */ + preempt_disable(); + event_mask = t->rseq_event_mask; + t->rseq_event_mask = 0; + preempt_enable(); + + return !!(event_mask & ~flags); +} + +static int clear_rseq_cs(struct task_struct *t) +{ + /* + * The rseq_cs field is set to NULL on preemption or signal + * delivery on top of rseq assembly block, as well as on top + * of code outside of the rseq assembly block. This performs + * a lazy clear of the rseq_cs field. + * + * Set rseq_cs to NULL with single-copy atomicity. + */ + return __put_user(0UL, &t->rseq->rseq_cs); +} + +/* + * Unsigned comparison will be true when ip >= start_ip, and when + * ip < start_ip + post_commit_offset. + */ +static bool in_rseq_cs(unsigned long ip, struct rseq_cs *rseq_cs) +{ + return ip - rseq_cs->start_ip < rseq_cs->post_commit_offset; +} + +static int rseq_ip_fixup(struct pt_regs *regs) +{ + unsigned long ip = instruction_pointer(regs); + struct task_struct *t = current; + struct rseq_cs rseq_cs; + int ret; + + ret = rseq_get_rseq_cs(t, &rseq_cs); + if (ret) + return ret; + + /* + * Handle potentially not being within a critical section. + * If not nested over a rseq critical section, restart is useless. + * Clear the rseq_cs pointer and return. + */ + if (!in_rseq_cs(ip, &rseq_cs)) + return clear_rseq_cs(t); + ret = rseq_need_restart(t, rseq_cs.flags); + if (ret <= 0) + return ret; + ret = clear_rseq_cs(t); + if (ret) + return ret; + trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset, + rseq_cs.abort_ip); + instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip); + return 0; +} + +/* + * This resume handler must always be executed between any of: + * - preemption, + * - signal delivery, + * and return to user-space. + * + * This is how we can ensure that the entire rseq critical section, + * consisting of both the C part and the assembly instruction sequence, + * will issue the commit instruction only if executed atomically with + * respect to other threads scheduled on the same CPU, and with respect + * to signal handlers. + */ +void __rseq_handle_notify_resume(struct pt_regs *regs) +{ + struct task_struct *t = current; + int ret; + + if (unlikely(t->flags & PF_EXITING)) + return; + if (unlikely(!access_ok(VERIFY_WRITE, t->rseq, sizeof(*t->rseq)))) + goto error; + ret = rseq_ip_fixup(regs); + if (unlikely(ret < 0)) + goto error; + if (unlikely(rseq_update_cpu_id(t))) + goto error; + return; + +error: + force_sig(SIGSEGV, t); +} + +#ifdef CONFIG_DEBUG_RSEQ + +/* + * Terminate the process if a syscall is issued within a restartable + * sequence. + */ +void rseq_syscall(struct pt_regs *regs) +{ + unsigned long ip = instruction_pointer(regs); + struct task_struct *t = current; + struct rseq_cs rseq_cs; + + if (!t->rseq) + return; + if (!access_ok(VERIFY_READ, t->rseq, sizeof(*t->rseq)) || + rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs)) + force_sig(SIGSEGV, t); +} + +#endif + +/* + * sys_rseq - setup restartable sequences for caller thread. + */ +SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, + int, flags, u32, sig) +{ + int ret; + + if (flags & RSEQ_FLAG_UNREGISTER) { + /* Unregister rseq for current thread. */ + if (current->rseq != rseq || !current->rseq) + return -EINVAL; + if (current->rseq_len != rseq_len) + return -EINVAL; + if (current->rseq_sig != sig) + return -EPERM; + ret = rseq_reset_rseq_cpu_id(current); + if (ret) + return ret; + current->rseq = NULL; + current->rseq_len = 0; + current->rseq_sig = 0; + return 0; + } + + if (unlikely(flags)) + return -EINVAL; + + if (current->rseq) { + /* + * If rseq is already registered, check whether + * the provided address differs from the prior + * one. + */ + if (current->rseq != rseq || current->rseq_len != rseq_len) + return -EINVAL; + if (current->rseq_sig != sig) + return -EPERM; + /* Already registered. */ + return -EBUSY; + } + + /* + * If there was no rseq previously registered, + * ensure the provided rseq is properly aligned and valid. + */ + if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) || + rseq_len != sizeof(*rseq)) + return -EINVAL; + if (!access_ok(VERIFY_WRITE, rseq, rseq_len)) + return -EFAULT; + current->rseq = rseq; + current->rseq_len = rseq_len; + current->rseq_sig = sig; + /* + * If rseq was previously inactive, and has just been + * registered, ensure the cpu_id_start and cpu_id fields + * are updated before returning to user-space. + */ + rseq_set_notify_resume(current); + + return 0; +} diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e9866f86f304..a98d54cd5535 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1191,6 +1191,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (p->sched_class->migrate_task_rq) p->sched_class->migrate_task_rq(p); p->se.nr_migrations++; + rseq_migrate(p); perf_event_task_migrate(p); } @@ -2634,6 +2635,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, { sched_info_switch(rq, prev, next); perf_event_task_sched_out(prev, next); + rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); prepare_task(next); prepare_arch_switch(next); diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 183169c2a75b..86f832d6ff6f 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -432,3 +432,6 @@ COND_SYSCALL(setresgid16); COND_SYSCALL(setresuid16); COND_SYSCALL(setreuid16); COND_SYSCALL(setuid16); + +/* restartable sequence */ +COND_SYSCALL(rseq); -- cgit v1.2.3 From ccba8b64452b8dbf2c9670de026d00f519bb5da0 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 2 Jun 2018 08:44:08 -0400 Subject: rseq/selftests: Provide Makefile, scripts, gitignore A run_param_test.sh script runs many variants of the parametrizable tests. Wire up the rseq Makefile, add directory entry into MAINTAINERS file. Signed-off-by: Mathieu Desnoyers Signed-off-by: Thomas Gleixner Cc: Joel Fernandes Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Dave Watson Cc: Will Deacon Cc: Shuah Khan Cc: Andi Kleen Cc: linux-kselftest@vger.kernel.org Cc: "H . Peter Anvin" Cc: Chris Lameter Cc: Russell King Cc: Andrew Hunter Cc: Michael Kerrisk Cc: "Paul E . McKenney" Cc: Paul Turner Cc: Boqun Feng Cc: Josh Triplett Cc: Steven Rostedt Cc: Ben Maurer Cc: linux-api@vger.kernel.org Cc: Andy Lutomirski Cc: Andrew Morton Cc: Linus Torvalds Link: https://lkml.kernel.org/r/20180602124408.8430-17-mathieu.desnoyers@efficios.com --- MAINTAINERS | 1 + tools/testing/selftests/Makefile | 1 + tools/testing/selftests/rseq/.gitignore | 6 ++ tools/testing/selftests/rseq/Makefile | 30 ++++++ tools/testing/selftests/rseq/run_param_test.sh | 121 +++++++++++++++++++++++++ 5 files changed, 159 insertions(+) create mode 100644 tools/testing/selftests/rseq/.gitignore create mode 100644 tools/testing/selftests/rseq/Makefile create mode 100644 tools/testing/selftests/rseq/run_param_test.sh (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a384243d911b..e743b9dab0c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11986,6 +11986,7 @@ S: Supported F: kernel/rseq.c F: include/uapi/linux/rseq.h F: include/trace/events/rseq.h +F: tools/testing/selftests/rseq/ RFKILL M: Johannes Berg diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 32aafa92074c..593fb44c9cd4 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -28,6 +28,7 @@ TARGETS += powerpc TARGETS += proc TARGETS += pstore TARGETS += ptrace +TARGETS += rseq TARGETS += seccomp TARGETS += sigaltstack TARGETS += size diff --git a/tools/testing/selftests/rseq/.gitignore b/tools/testing/selftests/rseq/.gitignore new file mode 100644 index 000000000000..cc610da7e369 --- /dev/null +++ b/tools/testing/selftests/rseq/.gitignore @@ -0,0 +1,6 @@ +basic_percpu_ops_test +basic_test +basic_rseq_op_test +param_test +param_test_benchmark +param_test_compare_twice diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile new file mode 100644 index 000000000000..c30c52e1d0d2 --- /dev/null +++ b/tools/testing/selftests/rseq/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0+ OR MIT +CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ +LDLIBS += -lpthread + +# Own dependencies because we only want to build against 1st prerequisite, but +# still track changes to header files and depend on shared object. +OVERRIDE_TARGETS = 1 + +TEST_GEN_PROGS = basic_test basic_percpu_ops_test param_test \ + param_test_benchmark param_test_compare_twice + +TEST_GEN_PROGS_EXTENDED = librseq.so + +TEST_PROGS = run_param_test.sh + +include ../lib.mk + +$(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h + $(CC) $(CFLAGS) -shared -fPIC $< $(LDLIBS) -o $@ + +$(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h + $(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@ + +$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ + rseq.h rseq-*.h + $(CC) $(CFLAGS) -DBENCHMARK $< $(LDLIBS) -lrseq -o $@ + +$(OUTPUT)/param_test_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ + rseq.h rseq-*.h + $(CC) $(CFLAGS) -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@ diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh new file mode 100644 index 000000000000..3acd6d75ff9f --- /dev/null +++ b/tools/testing/selftests/rseq/run_param_test.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ or MIT + +EXTRA_ARGS=${@} + +OLDIFS="$IFS" +IFS=$'\n' +TEST_LIST=( + "-T s" + "-T l" + "-T b" + "-T b -M" + "-T m" + "-T m -M" + "-T i" +) + +TEST_NAME=( + "spinlock" + "list" + "buffer" + "buffer with barrier" + "memcpy" + "memcpy with barrier" + "increment" +) +IFS="$OLDIFS" + +REPS=1000 +SLOW_REPS=100 + +function do_tests() +{ + local i=0 + while [ "$i" -lt "${#TEST_LIST[@]}" ]; do + echo "Running test ${TEST_NAME[$i]}" + ./param_test ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1 + echo "Running compare-twice test ${TEST_NAME[$i]}" + ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1 + let "i++" + done +} + +echo "Default parameters" +do_tests + +echo "Loop injection: 10000 loops" + +OLDIFS="$IFS" +IFS=$'\n' +INJECT_LIST=( + "1" + "2" + "3" + "4" + "5" + "6" + "7" + "8" + "9" +) +IFS="$OLDIFS" + +NR_LOOPS=10000 + +i=0 +while [ "$i" -lt "${#INJECT_LIST[@]}" ]; do + echo "Injecting at <${INJECT_LIST[$i]}>" + do_tests -${INJECT_LIST[i]} ${NR_LOOPS} + let "i++" +done +NR_LOOPS= + +function inject_blocking() +{ + OLDIFS="$IFS" + IFS=$'\n' + INJECT_LIST=( + "7" + "8" + "9" + ) + IFS="$OLDIFS" + + NR_LOOPS=-1 + + i=0 + while [ "$i" -lt "${#INJECT_LIST[@]}" ]; do + echo "Injecting at <${INJECT_LIST[$i]}>" + do_tests -${INJECT_LIST[i]} -1 ${@} + let "i++" + done + NR_LOOPS= +} + +echo "Yield injection (25%)" +inject_blocking -m 4 -y + +echo "Yield injection (50%)" +inject_blocking -m 2 -y + +echo "Yield injection (100%)" +inject_blocking -m 1 -y + +echo "Kill injection (25%)" +inject_blocking -m 4 -k + +echo "Kill injection (50%)" +inject_blocking -m 2 -k + +echo "Kill injection (100%)" +inject_blocking -m 1 -k + +echo "Sleep injection (1ms, 25%)" +inject_blocking -m 4 -s 1 + +echo "Sleep injection (1ms, 50%)" +inject_blocking -m 2 -s 1 + +echo "Sleep injection (1ms, 100%)" +inject_blocking -m 1 -s 1 -- cgit v1.2.3 From 0ab88bacc41df707750d8edf8ce7377d05403790 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Thu, 7 Jun 2018 17:11:42 -0700 Subject: autofs: update MAINTAINERS entry for autofs Update the autofs entry in MAINTAINERS to reflect the rename of autofs4 to autofs. Link: http://lkml.kernel.org/r/152626709611.28589.456596640024354223.stgit@pluto.themaw.net Signed-off-by: Ian Kent Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dc241b04d1bd..fff6439819c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7727,11 +7727,11 @@ W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-keene* -KERNEL AUTOMOUNTER v4 (AUTOFS4) +KERNEL AUTOMOUNTER M: Ian Kent L: autofs@vger.kernel.org S: Maintained -F: fs/autofs4/ +F: fs/autofs/ KERNEL BUILD + files below scripts/ (unless maintained elsewhere) M: Masahiro Yamada -- cgit v1.2.3 From 6e0832fa432ec99c94caee733c8f5851cf85560b Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Thu, 31 May 2018 09:12:37 +0800 Subject: PCI: Collect all native drivers under drivers/pci/controller/ Native PCI drivers for root complex devices were originally all in drivers/pci/host/. Some of these devices can also be operated in endpoint mode. Drivers for endpoint mode didn't seem to fit in the "host" directory, so we put both the root complex and endpoint drivers in per-device directories, e.g., drivers/pci/dwc/, drivers/pci/cadence/, etc. These per-device directories contain trivial Kconfig and Makefiles and clutter drivers/pci/. Make a new drivers/pci/controllers/ directory and collect all the device-specific drivers there. No functional change intended. Link: https://lkml.kernel.org/r/1520304202-232891-1-git-send-email-shawn.lin@rock-chips.com Signed-off-by: Shawn Lin [bhelgaas: changelog] Signed-off-by: Bjorn Helgaas --- MAINTAINERS | 70 +- drivers/pci/Kconfig | 4 +- drivers/pci/Makefile | 6 +- drivers/pci/cadence/Kconfig | 27 - drivers/pci/cadence/Makefile | 4 - drivers/pci/cadence/pcie-cadence-ep.c | 549 ----- drivers/pci/cadence/pcie-cadence-host.c | 336 --- drivers/pci/cadence/pcie-cadence.c | 126 - drivers/pci/cadence/pcie-cadence.h | 311 --- drivers/pci/controller/Kconfig | 275 +++ drivers/pci/controller/Makefile | 49 + drivers/pci/controller/dwc/Kconfig | 197 ++ drivers/pci/controller/dwc/Makefile | 30 + drivers/pci/controller/dwc/pci-dra7xx.c | 846 +++++++ drivers/pci/controller/dwc/pci-exynos.c | 539 +++++ drivers/pci/controller/dwc/pci-imx6.c | 871 +++++++ drivers/pci/controller/dwc/pci-keystone-dw.c | 484 ++++ drivers/pci/controller/dwc/pci-keystone.c | 457 ++++ drivers/pci/controller/dwc/pci-keystone.h | 57 + drivers/pci/controller/dwc/pci-layerscape.c | 341 +++ drivers/pci/controller/dwc/pcie-armada8k.c | 282 +++ drivers/pci/controller/dwc/pcie-artpec6.c | 618 +++++ drivers/pci/controller/dwc/pcie-designware-ep.c | 422 ++++ drivers/pci/controller/dwc/pcie-designware-host.c | 722 ++++++ drivers/pci/controller/dwc/pcie-designware-plat.c | 259 ++ drivers/pci/controller/dwc/pcie-designware.c | 394 +++ drivers/pci/controller/dwc/pcie-designware.h | 387 +++ drivers/pci/controller/dwc/pcie-hisi.c | 398 +++ drivers/pci/controller/dwc/pcie-histb.c | 472 ++++ drivers/pci/controller/dwc/pcie-kirin.c | 515 ++++ drivers/pci/controller/dwc/pcie-qcom.c | 1299 ++++++++++ drivers/pci/controller/dwc/pcie-spear13xx.c | 314 +++ drivers/pci/controller/pci-aardvark.c | 978 ++++++++ drivers/pci/controller/pci-ftpci100.c | 619 +++++ drivers/pci/controller/pci-host-common.c | 118 + drivers/pci/controller/pci-host-generic.c | 100 + drivers/pci/controller/pci-hyperv.c | 2694 +++++++++++++++++++++ drivers/pci/controller/pci-mvebu.c | 1313 ++++++++++ drivers/pci/controller/pci-rcar-gen2.c | 428 ++++ drivers/pci/controller/pci-tegra.c | 2531 +++++++++++++++++++ drivers/pci/controller/pci-thunder-ecam.c | 380 +++ drivers/pci/controller/pci-thunder-pem.c | 473 ++++ drivers/pci/controller/pci-v3-semi.c | 963 ++++++++ drivers/pci/controller/pci-versatile.c | 239 ++ drivers/pci/controller/pci-xgene-msi.c | 543 +++++ drivers/pci/controller/pci-xgene.c | 689 ++++++ drivers/pci/controller/pcie-altera-msi.c | 291 +++ drivers/pci/controller/pcie-altera.c | 645 +++++ drivers/pci/controller/pcie-cadence-ep.c | 549 +++++ drivers/pci/controller/pcie-cadence-host.c | 336 +++ drivers/pci/controller/pcie-cadence.c | 126 + drivers/pci/controller/pcie-cadence.h | 311 +++ drivers/pci/controller/pcie-iproc-bcma.c | 112 + drivers/pci/controller/pcie-iproc-msi.c | 671 +++++ drivers/pci/controller/pcie-iproc-platform.c | 157 ++ drivers/pci/controller/pcie-iproc.c | 1432 +++++++++++ drivers/pci/controller/pcie-iproc.h | 119 + drivers/pci/controller/pcie-mediatek.c | 1218 ++++++++++ drivers/pci/controller/pcie-mobiveil.c | 866 +++++++ drivers/pci/controller/pcie-rcar.c | 1222 ++++++++++ drivers/pci/controller/pcie-rockchip-ep.c | 642 +++++ drivers/pci/controller/pcie-rockchip-host.c | 1142 +++++++++ drivers/pci/controller/pcie-rockchip.c | 424 ++++ drivers/pci/controller/pcie-rockchip.h | 338 +++ drivers/pci/controller/pcie-tango.c | 341 +++ drivers/pci/controller/pcie-xilinx-nwl.c | 917 +++++++ drivers/pci/controller/pcie-xilinx.c | 702 ++++++ drivers/pci/controller/vmd.c | 870 +++++++ drivers/pci/dwc/Kconfig | 197 -- drivers/pci/dwc/Makefile | 30 - drivers/pci/dwc/pci-dra7xx.c | 846 ------- drivers/pci/dwc/pci-exynos.c | 539 ----- drivers/pci/dwc/pci-imx6.c | 871 ------- drivers/pci/dwc/pci-keystone-dw.c | 484 ---- drivers/pci/dwc/pci-keystone.c | 457 ---- drivers/pci/dwc/pci-keystone.h | 57 - drivers/pci/dwc/pci-layerscape.c | 341 --- drivers/pci/dwc/pcie-armada8k.c | 282 --- drivers/pci/dwc/pcie-artpec6.c | 618 ----- drivers/pci/dwc/pcie-designware-ep.c | 422 ---- drivers/pci/dwc/pcie-designware-host.c | 722 ------ drivers/pci/dwc/pcie-designware-plat.c | 259 -- drivers/pci/dwc/pcie-designware.c | 394 --- drivers/pci/dwc/pcie-designware.h | 387 --- drivers/pci/dwc/pcie-hisi.c | 398 --- drivers/pci/dwc/pcie-histb.c | 472 ---- drivers/pci/dwc/pcie-kirin.c | 515 ---- drivers/pci/dwc/pcie-qcom.c | 1299 ---------- drivers/pci/dwc/pcie-spear13xx.c | 314 --- drivers/pci/host/Kconfig | 246 -- drivers/pci/host/Makefile | 43 - drivers/pci/host/pci-aardvark.c | 978 -------- drivers/pci/host/pci-ftpci100.c | 619 ----- drivers/pci/host/pci-host-common.c | 118 - drivers/pci/host/pci-host-generic.c | 100 - drivers/pci/host/pci-hyperv.c | 2694 --------------------- drivers/pci/host/pci-mvebu.c | 1313 ---------- drivers/pci/host/pci-rcar-gen2.c | 428 ---- drivers/pci/host/pci-tegra.c | 2531 ------------------- drivers/pci/host/pci-thunder-ecam.c | 380 --- drivers/pci/host/pci-thunder-pem.c | 473 ---- drivers/pci/host/pci-v3-semi.c | 963 -------- drivers/pci/host/pci-versatile.c | 239 -- drivers/pci/host/pci-xgene-msi.c | 543 ----- drivers/pci/host/pci-xgene.c | 689 ------ drivers/pci/host/pcie-altera-msi.c | 291 --- drivers/pci/host/pcie-altera.c | 645 ----- drivers/pci/host/pcie-iproc-bcma.c | 112 - drivers/pci/host/pcie-iproc-msi.c | 671 ----- drivers/pci/host/pcie-iproc-platform.c | 157 -- drivers/pci/host/pcie-iproc.c | 1432 ----------- drivers/pci/host/pcie-iproc.h | 119 - drivers/pci/host/pcie-mediatek.c | 1218 ---------- drivers/pci/host/pcie-mobiveil.c | 866 ------- drivers/pci/host/pcie-rcar.c | 1222 ---------- drivers/pci/host/pcie-rockchip-ep.c | 642 ----- drivers/pci/host/pcie-rockchip-host.c | 1142 --------- drivers/pci/host/pcie-rockchip.c | 424 ---- drivers/pci/host/pcie-rockchip.h | 338 --- drivers/pci/host/pcie-tango.c | 341 --- drivers/pci/host/pcie-xilinx-nwl.c | 917 ------- drivers/pci/host/pcie-xilinx.c | 702 ------ drivers/pci/host/vmd.c | 870 ------- 123 files changed, 35763 insertions(+), 35767 deletions(-) delete mode 100644 drivers/pci/cadence/Kconfig delete mode 100644 drivers/pci/cadence/Makefile delete mode 100644 drivers/pci/cadence/pcie-cadence-ep.c delete mode 100644 drivers/pci/cadence/pcie-cadence-host.c delete mode 100644 drivers/pci/cadence/pcie-cadence.c delete mode 100644 drivers/pci/cadence/pcie-cadence.h create mode 100644 drivers/pci/controller/Kconfig create mode 100644 drivers/pci/controller/Makefile create mode 100644 drivers/pci/controller/dwc/Kconfig create mode 100644 drivers/pci/controller/dwc/Makefile create mode 100644 drivers/pci/controller/dwc/pci-dra7xx.c create mode 100644 drivers/pci/controller/dwc/pci-exynos.c create mode 100644 drivers/pci/controller/dwc/pci-imx6.c create mode 100644 drivers/pci/controller/dwc/pci-keystone-dw.c create mode 100644 drivers/pci/controller/dwc/pci-keystone.c create mode 100644 drivers/pci/controller/dwc/pci-keystone.h create mode 100644 drivers/pci/controller/dwc/pci-layerscape.c create mode 100644 drivers/pci/controller/dwc/pcie-armada8k.c create mode 100644 drivers/pci/controller/dwc/pcie-artpec6.c create mode 100644 drivers/pci/controller/dwc/pcie-designware-ep.c create mode 100644 drivers/pci/controller/dwc/pcie-designware-host.c create mode 100644 drivers/pci/controller/dwc/pcie-designware-plat.c create mode 100644 drivers/pci/controller/dwc/pcie-designware.c create mode 100644 drivers/pci/controller/dwc/pcie-designware.h create mode 100644 drivers/pci/controller/dwc/pcie-hisi.c create mode 100644 drivers/pci/controller/dwc/pcie-histb.c create mode 100644 drivers/pci/controller/dwc/pcie-kirin.c create mode 100644 drivers/pci/controller/dwc/pcie-qcom.c create mode 100644 drivers/pci/controller/dwc/pcie-spear13xx.c create mode 100644 drivers/pci/controller/pci-aardvark.c create mode 100644 drivers/pci/controller/pci-ftpci100.c create mode 100644 drivers/pci/controller/pci-host-common.c create mode 100644 drivers/pci/controller/pci-host-generic.c create mode 100644 drivers/pci/controller/pci-hyperv.c create mode 100644 drivers/pci/controller/pci-mvebu.c create mode 100644 drivers/pci/controller/pci-rcar-gen2.c create mode 100644 drivers/pci/controller/pci-tegra.c create mode 100644 drivers/pci/controller/pci-thunder-ecam.c create mode 100644 drivers/pci/controller/pci-thunder-pem.c create mode 100644 drivers/pci/controller/pci-v3-semi.c create mode 100644 drivers/pci/controller/pci-versatile.c create mode 100644 drivers/pci/controller/pci-xgene-msi.c create mode 100644 drivers/pci/controller/pci-xgene.c create mode 100644 drivers/pci/controller/pcie-altera-msi.c create mode 100644 drivers/pci/controller/pcie-altera.c create mode 100644 drivers/pci/controller/pcie-cadence-ep.c create mode 100644 drivers/pci/controller/pcie-cadence-host.c create mode 100644 drivers/pci/controller/pcie-cadence.c create mode 100644 drivers/pci/controller/pcie-cadence.h create mode 100644 drivers/pci/controller/pcie-iproc-bcma.c create mode 100644 drivers/pci/controller/pcie-iproc-msi.c create mode 100644 drivers/pci/controller/pcie-iproc-platform.c create mode 100644 drivers/pci/controller/pcie-iproc.c create mode 100644 drivers/pci/controller/pcie-iproc.h create mode 100644 drivers/pci/controller/pcie-mediatek.c create mode 100644 drivers/pci/controller/pcie-mobiveil.c create mode 100644 drivers/pci/controller/pcie-rcar.c create mode 100644 drivers/pci/controller/pcie-rockchip-ep.c create mode 100644 drivers/pci/controller/pcie-rockchip-host.c create mode 100644 drivers/pci/controller/pcie-rockchip.c create mode 100644 drivers/pci/controller/pcie-rockchip.h create mode 100644 drivers/pci/controller/pcie-tango.c create mode 100644 drivers/pci/controller/pcie-xilinx-nwl.c create mode 100644 drivers/pci/controller/pcie-xilinx.c create mode 100644 drivers/pci/controller/vmd.c delete mode 100644 drivers/pci/dwc/Kconfig delete mode 100644 drivers/pci/dwc/Makefile delete mode 100644 drivers/pci/dwc/pci-dra7xx.c delete mode 100644 drivers/pci/dwc/pci-exynos.c delete mode 100644 drivers/pci/dwc/pci-imx6.c delete mode 100644 drivers/pci/dwc/pci-keystone-dw.c delete mode 100644 drivers/pci/dwc/pci-keystone.c delete mode 100644 drivers/pci/dwc/pci-keystone.h delete mode 100644 drivers/pci/dwc/pci-layerscape.c delete mode 100644 drivers/pci/dwc/pcie-armada8k.c delete mode 100644 drivers/pci/dwc/pcie-artpec6.c delete mode 100644 drivers/pci/dwc/pcie-designware-ep.c delete mode 100644 drivers/pci/dwc/pcie-designware-host.c delete mode 100644 drivers/pci/dwc/pcie-designware-plat.c delete mode 100644 drivers/pci/dwc/pcie-designware.c delete mode 100644 drivers/pci/dwc/pcie-designware.h delete mode 100644 drivers/pci/dwc/pcie-hisi.c delete mode 100644 drivers/pci/dwc/pcie-histb.c delete mode 100644 drivers/pci/dwc/pcie-kirin.c delete mode 100644 drivers/pci/dwc/pcie-qcom.c delete mode 100644 drivers/pci/dwc/pcie-spear13xx.c delete mode 100644 drivers/pci/host/Kconfig delete mode 100644 drivers/pci/host/Makefile delete mode 100644 drivers/pci/host/pci-aardvark.c delete mode 100644 drivers/pci/host/pci-ftpci100.c delete mode 100644 drivers/pci/host/pci-host-common.c delete mode 100644 drivers/pci/host/pci-host-generic.c delete mode 100644 drivers/pci/host/pci-hyperv.c delete mode 100644 drivers/pci/host/pci-mvebu.c delete mode 100644 drivers/pci/host/pci-rcar-gen2.c delete mode 100644 drivers/pci/host/pci-tegra.c delete mode 100644 drivers/pci/host/pci-thunder-ecam.c delete mode 100644 drivers/pci/host/pci-thunder-pem.c delete mode 100644 drivers/pci/host/pci-v3-semi.c delete mode 100644 drivers/pci/host/pci-versatile.c delete mode 100644 drivers/pci/host/pci-xgene-msi.c delete mode 100644 drivers/pci/host/pci-xgene.c delete mode 100644 drivers/pci/host/pcie-altera-msi.c delete mode 100644 drivers/pci/host/pcie-altera.c delete mode 100644 drivers/pci/host/pcie-iproc-bcma.c delete mode 100644 drivers/pci/host/pcie-iproc-msi.c delete mode 100644 drivers/pci/host/pcie-iproc-platform.c delete mode 100644 drivers/pci/host/pcie-iproc.c delete mode 100644 drivers/pci/host/pcie-iproc.h delete mode 100644 drivers/pci/host/pcie-mediatek.c delete mode 100644 drivers/pci/host/pcie-mobiveil.c delete mode 100644 drivers/pci/host/pcie-rcar.c delete mode 100644 drivers/pci/host/pcie-rockchip-ep.c delete mode 100644 drivers/pci/host/pcie-rockchip-host.c delete mode 100644 drivers/pci/host/pcie-rockchip.c delete mode 100644 drivers/pci/host/pcie-rockchip.h delete mode 100644 drivers/pci/host/pcie-tango.c delete mode 100644 drivers/pci/host/pcie-xilinx-nwl.c delete mode 100644 drivers/pci/host/pcie-xilinx.c delete mode 100644 drivers/pci/host/vmd.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b4a564213cdf..9b55923b4668 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6607,7 +6607,7 @@ F: arch/x86/hyperv F: drivers/hid/hid-hyperv.c F: drivers/hv/ F: drivers/input/serio/hyperv-keyboard.c -F: drivers/pci/host/pci-hyperv.c +F: drivers/pci/controller/pci-hyperv.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c @@ -9489,7 +9489,7 @@ M: Subrahmanya Lingappa L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt -F: drivers/pci/host/pcie-mobiveil.c +F: drivers/pci/controller/pcie-mobiveil.c MODULE SUPPORT M: Jessica Yu @@ -10791,7 +10791,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/aardvark-pci.txt -F: drivers/pci/host/pci-aardvark.c +F: drivers/pci/controller/pci-aardvark.c PCI DRIVER FOR ALTERA PCIE IP M: Ley Foon Tan @@ -10799,7 +10799,7 @@ L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/altera-pcie.txt -F: drivers/pci/host/pcie-altera.c +F: drivers/pci/controller/pcie-altera.c PCI DRIVER FOR APPLIEDMICRO XGENE M: Tanmay Inamdar @@ -10807,7 +10807,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci.txt -F: drivers/pci/host/pci-xgene.c +F: drivers/pci/controller/pci-xgene.c PCI DRIVER FOR ARM VERSATILE PLATFORM M: Rob Herring @@ -10815,7 +10815,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/versatile.txt -F: drivers/pci/host/pci-versatile.c +F: drivers/pci/controller/pci-versatile.c PCI DRIVER FOR ARMADA 8K M: Thomas Petazzoni @@ -10823,14 +10823,14 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/pci-armada8k.txt -F: drivers/pci/dwc/pcie-armada8k.c +F: drivers/pci/controller/dwc/pcie-armada8k.c PCI DRIVER FOR CADENCE PCIE IP M: Alan Douglas L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt -F: drivers/pci/cadence/pcie-cadence* +F: drivers/pci/controller/pcie-cadence* PCI DRIVER FOR FREESCALE LAYERSCAPE M: Minghuan Lian @@ -10840,16 +10840,16 @@ L: linuxppc-dev@lists.ozlabs.org L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained -F: drivers/pci/dwc/*layerscape* +F: drivers/pci/controller/dwc/*layerscape* PCI DRIVER FOR GENERIC OF HOSTS M: Will Deacon L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: Documentation/devicetree/bindings/pci/host-generic-pci.txt -F: drivers/pci/host/pci-host-common.c -F: drivers/pci/host/pci-host-generic.c +F: Documentation/devicetree/bindings/pci/controller-generic-pci.txt +F: drivers/pci/controller/pci-host-common.c +F: drivers/pci/controller/pci-host-generic.c PCI DRIVER FOR IMX6 M: Richard Zhu @@ -10858,14 +10858,14 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt -F: drivers/pci/dwc/*imx6* +F: drivers/pci/controller/dwc/*imx6* PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) M: Keith Busch M: Jonathan Derrick L: linux-pci@vger.kernel.org S: Supported -F: drivers/pci/host/vmd.c +F: drivers/pci/controller/vmd.c PCI DRIVER FOR MICROSEMI SWITCHTEC M: Kurt Schwemmer @@ -10885,7 +10885,7 @@ M: Jason Cooper L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/pci/host/*mvebu* +F: drivers/pci/controller/*mvebu* PCI DRIVER FOR NVIDIA TEGRA M: Thierry Reding @@ -10893,14 +10893,14 @@ L: linux-tegra@vger.kernel.org L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt -F: drivers/pci/host/pci-tegra.c +F: drivers/pci/controller/pci-tegra.c PCI DRIVER FOR RENESAS R-CAR M: Simon Horman L: linux-pci@vger.kernel.org L: linux-renesas-soc@vger.kernel.org S: Maintained -F: drivers/pci/host/*rcar* +F: drivers/pci/controller/*rcar* PCI DRIVER FOR SAMSUNG EXYNOS M: Jingoo Han @@ -10908,7 +10908,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained -F: drivers/pci/dwc/pci-exynos.c +F: drivers/pci/controller/dwc/pci-exynos.c PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han @@ -10916,7 +10916,7 @@ M: Joao Pinto L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/designware-pcie.txt -F: drivers/pci/dwc/*designware* +F: drivers/pci/controller/dwc/*designware* PCI DRIVER FOR TI DRA7XX M: Kishon Vijay Abraham I @@ -10924,14 +10924,14 @@ L: linux-omap@vger.kernel.org L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/ti-pci.txt -F: drivers/pci/dwc/pci-dra7xx.c +F: drivers/pci/controller/dwc/pci-dra7xx.c PCI DRIVER FOR TI KEYSTONE M: Murali Karicheri L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/pci/dwc/*keystone* +F: drivers/pci/controller/dwc/*keystone* PCI ENDPOINT SUBSYSTEM M: Kishon Vijay Abraham I @@ -10964,7 +10964,7 @@ L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt -F: drivers/pci/host/pcie-altera-msi.c +F: drivers/pci/controller/pcie-altera-msi.c PCI MSI DRIVER FOR APPLIEDMICRO XGENE M: Duc Dang @@ -10972,7 +10972,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt -F: drivers/pci/host/pci-xgene-msi.c +F: drivers/pci/controller/pci-xgene-msi.c PCI SUBSYSTEM M: Bjorn Helgaas @@ -10998,9 +10998,7 @@ L: linux-pci@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ S: Supported -F: drivers/pci/cadence/ -F: drivers/pci/host/ -F: drivers/pci/dwc/ +F: drivers/pci/controller/ PCIE DRIVER FOR AXIS ARTPEC M: Jesper Nilsson @@ -11008,7 +11006,7 @@ L: linux-arm-kernel@axis.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/axis,artpec* -F: drivers/pci/dwc/*artpec* +F: drivers/pci/controller/dwc/*artpec* PCIE DRIVER FOR CAVIUM THUNDERX M: David Daney @@ -11016,14 +11014,14 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/pci/pci-thunder-* -F: drivers/pci/host/pci-thunder-* +F: drivers/pci/controller/pci-thunder-* PCIE DRIVER FOR HISILICON M: Zhou Wang L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt -F: drivers/pci/dwc/pcie-hisi.c +F: drivers/pci/controller/dwc/pcie-hisi.c PCIE DRIVER FOR HISILICON KIRIN M: Xiaowei Song @@ -11031,7 +11029,7 @@ M: Binghui Wang L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/pcie-kirin.txt -F: drivers/pci/dwc/pcie-kirin.c +F: drivers/pci/controller/dwc/pcie-kirin.c PCIE DRIVER FOR HISILICON STB M: Jianguo Sun @@ -11039,7 +11037,7 @@ M: Shawn Guo L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt -F: drivers/pci/dwc/pcie-histb.c +F: drivers/pci/controller/dwc/pcie-histb.c PCIE DRIVER FOR MEDIATEK M: Ryder Lee @@ -11047,14 +11045,14 @@ L: linux-pci@vger.kernel.org L: linux-mediatek@lists.infradead.org S: Supported F: Documentation/devicetree/bindings/pci/mediatek* -F: drivers/pci/host/*mediatek* +F: drivers/pci/controller/*mediatek* PCIE DRIVER FOR QUALCOMM MSM M: Stanimir Varbanov L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained -F: drivers/pci/dwc/*qcom* +F: drivers/pci/controller/dwc/*qcom* PCIE DRIVER FOR ROCKCHIP M: Shawn Lin @@ -11062,20 +11060,20 @@ L: linux-pci@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/rockchip-pcie* -F: drivers/pci/host/pcie-rockchip* +F: drivers/pci/controller/pcie-rockchip* PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC M: Linus Walleij L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt -F: drivers/pci/host/pci-v3-semi.c +F: drivers/pci/controller/pci-v3-semi.c PCIE DRIVER FOR ST SPEAR13XX M: Pratyush Anand L: linux-pci@vger.kernel.org S: Maintained -F: drivers/pci/dwc/*spear* +F: drivers/pci/controller/dwc/*spear* PCMCIA SUBSYSTEM M: Dominik Brodowski diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b2f07635e94d..56ff8f6d31fc 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -145,8 +145,6 @@ config PCI_HYPERV PCI devices from a PCI backend to support PCI driver domains. source "drivers/pci/hotplug/Kconfig" -source "drivers/pci/cadence/Kconfig" -source "drivers/pci/dwc/Kconfig" -source "drivers/pci/host/Kconfig" +source "drivers/pci/controller/Kconfig" source "drivers/pci/endpoint/Kconfig" source "drivers/pci/switch/Kconfig" diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 84c9eef6b1c3..535201984b8b 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -28,14 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o obj-$(CONFIG_PCI_ECAM) += ecam.o obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o -obj-y += host/ +obj-y += controller/ obj-y += switch/ # Endpoint library must be initialized before its users obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ -obj-$(CONFIG_PCIE_CADENCE) += cadence/ -# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW -obj-y += dwc/ - ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG diff --git a/drivers/pci/cadence/Kconfig b/drivers/pci/cadence/Kconfig deleted file mode 100644 index e6824cb56c16..000000000000 --- a/drivers/pci/cadence/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -menu "Cadence PCIe controllers support" - -config PCIE_CADENCE - bool - -config PCIE_CADENCE_HOST - bool "Cadence PCIe host controller" - depends on OF - depends on PCI - select IRQ_DOMAIN - select PCIE_CADENCE - help - Say Y here if you want to support the Cadence PCIe controller in host - mode. This PCIe controller may be embedded into many different vendors - SoCs. - -config PCIE_CADENCE_EP - bool "Cadence PCIe endpoint controller" - depends on OF - depends on PCI_ENDPOINT - select PCIE_CADENCE - help - Say Y here if you want to support the Cadence PCIe controller in - endpoint mode. This PCIe controller may be embedded into many - different vendors SoCs. - -endmenu diff --git a/drivers/pci/cadence/Makefile b/drivers/pci/cadence/Makefile deleted file mode 100644 index 719392b97998..000000000000 --- a/drivers/pci/cadence/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o -obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o -obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o diff --git a/drivers/pci/cadence/pcie-cadence-ep.c b/drivers/pci/cadence/pcie-cadence-ep.c deleted file mode 100644 index 3d8283e450a9..000000000000 --- a/drivers/pci/cadence/pcie-cadence-ep.c +++ /dev/null @@ -1,549 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2017 Cadence -// Cadence PCIe endpoint controller driver. -// Author: Cyrille Pitchen - -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-cadence.h" - -#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ -#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 -#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 - -/** - * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver - * @pcie: Cadence PCIe controller - * @max_regions: maximum number of regions supported by hardware - * @ob_region_map: bitmask of mapped outbound regions - * @ob_addr: base addresses in the AXI bus where the outbound regions start - * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ - * dedicated outbound regions is mapped. - * @irq_cpu_addr: base address in the CPU space where a write access triggers - * the sending of a memory write (MSI) / normal message (legacy - * IRQ) TLP through the PCIe bus. - * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ - * dedicated outbound region. - * @irq_pci_fn: the latest PCI function that has updated the mapping of - * the MSI/legacy IRQ dedicated outbound region. - * @irq_pending: bitmask of asserted legacy IRQs. - */ -struct cdns_pcie_ep { - struct cdns_pcie pcie; - u32 max_regions; - unsigned long ob_region_map; - phys_addr_t *ob_addr; - phys_addr_t irq_phys_addr; - void __iomem *irq_cpu_addr; - u64 irq_pci_addr; - u8 irq_pci_fn; - u8 irq_pending; -}; - -static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, - struct pci_epf_header *hdr) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - - cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); - cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); - cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); - cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, - hdr->subclass_code | hdr->baseclass_code << 8); - cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, - hdr->cache_line_size); - cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); - cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); - - /* - * Vendor ID can only be modified from function 0, all other functions - * use the same vendor ID as function 0. - */ - if (fn == 0) { - /* Update the vendor IDs. */ - u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | - CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); - - cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); - } - - return 0; -} - -static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, - struct pci_epf_bar *epf_bar) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - dma_addr_t bar_phys = epf_bar->phys_addr; - enum pci_barno bar = epf_bar->barno; - int flags = epf_bar->flags; - u32 addr0, addr1, reg, cfg, b, aperture, ctrl; - u64 sz; - - /* BAR size is 2^(aperture + 7) */ - sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); - /* - * roundup_pow_of_two() returns an unsigned long, which is not suited - * for 64bit values. - */ - sz = 1ULL << fls64(sz - 1); - aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ - - if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; - } else { - bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); - bool is_64bits = sz > SZ_2G; - - if (is_64bits && (bar & 1)) - return -EINVAL; - - if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) - epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; - - if (is_64bits && is_prefetch) - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; - else if (is_prefetch) - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; - else if (is_64bits) - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; - else - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; - } - - addr0 = lower_32_bits(bar_phys); - addr1 = upper_32_bits(bar_phys); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), - addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), - addr1); - - if (bar < BAR_4) { - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); - b = bar; - } else { - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); - b = bar - BAR_4; - } - - cfg = cdns_pcie_readl(pcie, reg); - cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); - cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); - cdns_pcie_writel(pcie, reg, cfg); - - return 0; -} - -static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, - struct pci_epf_bar *epf_bar) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - enum pci_barno bar = epf_bar->barno; - u32 reg, cfg, b, ctrl; - - if (bar < BAR_4) { - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); - b = bar; - } else { - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); - b = bar - BAR_4; - } - - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; - cfg = cdns_pcie_readl(pcie, reg); - cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); - cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); - cdns_pcie_writel(pcie, reg, cfg); - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); -} - -static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, - u64 pci_addr, size_t size) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - u32 r; - - r = find_first_zero_bit(&ep->ob_region_map, - sizeof(ep->ob_region_map) * BITS_PER_LONG); - if (r >= ep->max_regions - 1) { - dev_err(&epc->dev, "no free outbound region\n"); - return -EINVAL; - } - - cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); - - set_bit(r, &ep->ob_region_map); - ep->ob_addr[r] = addr; - - return 0; -} - -static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, - phys_addr_t addr) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - u32 r; - - for (r = 0; r < ep->max_regions - 1; r++) - if (ep->ob_addr[r] == addr) - break; - - if (r == ep->max_regions - 1) - return; - - cdns_pcie_reset_outbound_region(pcie, r); - - ep->ob_addr[r] = 0; - clear_bit(r, &ep->ob_region_map); -} - -static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; - u16 flags; - - /* - * Set the Multiple Message Capable bitfield into the Message Control - * register. - */ - flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); - flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); - flags |= PCI_MSI_FLAGS_64BIT; - flags &= ~PCI_MSI_FLAGS_MASKBIT; - cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); - - return 0; -} - -static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; - u16 flags, mmc, mme; - - /* Validate that the MSI feature is actually enabled. */ - flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); - if (!(flags & PCI_MSI_FLAGS_ENABLE)) - return -EINVAL; - - /* - * Get the Multiple Message Enable bitfield from the Message Control - * register. - */ - mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1; - mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; - - return mme; -} - -static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, - u8 intx, bool is_asserted) -{ - struct cdns_pcie *pcie = &ep->pcie; - u32 r = ep->max_regions - 1; - u32 offset; - u16 status; - u8 msg_code; - - intx &= 3; - - /* Set the outbound region if needed. */ - if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || - ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r, - ep->irq_phys_addr); - ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; - ep->irq_pci_fn = fn; - } - - if (is_asserted) { - ep->irq_pending |= BIT(intx); - msg_code = MSG_CODE_ASSERT_INTA + intx; - } else { - ep->irq_pending &= ~BIT(intx); - msg_code = MSG_CODE_DEASSERT_INTA + intx; - } - - status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); - if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { - status ^= PCI_STATUS_INTERRUPT; - cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); - } - - offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; - writel(0, ep->irq_cpu_addr + offset); -} - -static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) -{ - u16 cmd; - - cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); - if (cmd & PCI_COMMAND_INTX_DISABLE) - return -EINVAL; - - cdns_pcie_ep_assert_intx(ep, fn, intx, true); - /* - * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() - * from drivers/pci/dwc/pci-dra7xx.c - */ - mdelay(1); - cdns_pcie_ep_assert_intx(ep, fn, intx, false); - return 0; -} - -static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, - u8 interrupt_num) -{ - struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; - u16 flags, mme, data, data_mask; - u8 msi_count; - u64 pci_addr, pci_addr_mask = 0xff; - - /* Check whether the MSI feature has been enabled by the PCI host. */ - flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); - if (!(flags & PCI_MSI_FLAGS_ENABLE)) - return -EINVAL; - - /* Get the number of enabled MSIs */ - mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; - msi_count = 1 << mme; - if (!interrupt_num || interrupt_num > msi_count) - return -EINVAL; - - /* Compute the data value to be written. */ - data_mask = msi_count - 1; - data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); - data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); - - /* Get the PCI address where to write the data into. */ - pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); - pci_addr <<= 32; - pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); - pci_addr &= GENMASK_ULL(63, 2); - - /* Set the outbound region if needed. */ - if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || - ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, - false, - ep->irq_phys_addr, - pci_addr & ~pci_addr_mask, - pci_addr_mask + 1); - ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); - ep->irq_pci_fn = fn; - } - writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); - - return 0; -} - -static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, - enum pci_epc_irq_type type, u8 interrupt_num) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - - switch (type) { - case PCI_EPC_IRQ_LEGACY: - return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); - - case PCI_EPC_IRQ_MSI: - return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); - - default: - break; - } - - return -EINVAL; -} - -static int cdns_pcie_ep_start(struct pci_epc *epc) -{ - struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - struct cdns_pcie *pcie = &ep->pcie; - struct pci_epf *epf; - u32 cfg; - - /* - * BIT(0) is hardwired to 1, hence function 0 is always enabled - * and can't be disabled anyway. - */ - cfg = BIT(0); - list_for_each_entry(epf, &epc->pci_epf, list) - cfg |= BIT(epf->func_no); - cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); - - /* - * The PCIe links are automatically established by the controller - * once for all at powerup: the software can neither start nor stop - * those links later at runtime. - * - * Then we only have to notify the EP core that our links are already - * established. However we don't call directly pci_epc_linkup() because - * we've already locked the epc->lock. - */ - list_for_each_entry(epf, &epc->pci_epf, list) - pci_epf_linkup(epf); - - return 0; -} - -static const struct pci_epc_ops cdns_pcie_epc_ops = { - .write_header = cdns_pcie_ep_write_header, - .set_bar = cdns_pcie_ep_set_bar, - .clear_bar = cdns_pcie_ep_clear_bar, - .map_addr = cdns_pcie_ep_map_addr, - .unmap_addr = cdns_pcie_ep_unmap_addr, - .set_msi = cdns_pcie_ep_set_msi, - .get_msi = cdns_pcie_ep_get_msi, - .raise_irq = cdns_pcie_ep_raise_irq, - .start = cdns_pcie_ep_start, -}; - -static const struct of_device_id cdns_pcie_ep_of_match[] = { - { .compatible = "cdns,cdns-pcie-ep" }, - - { }, -}; - -static int cdns_pcie_ep_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct cdns_pcie_ep *ep; - struct cdns_pcie *pcie; - struct pci_epc *epc; - struct resource *res; - int ret; - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) - return -ENOMEM; - - pcie = &ep->pcie; - pcie->is_rc = false; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); - pcie->reg_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->reg_base)) { - dev_err(dev, "missing \"reg\"\n"); - return PTR_ERR(pcie->reg_base); - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!res) { - dev_err(dev, "missing \"mem\"\n"); - return -EINVAL; - } - pcie->mem_res = res; - - ret = of_property_read_u32(np, "cdns,max-outbound-regions", - &ep->max_regions); - if (ret < 0) { - dev_err(dev, "missing \"cdns,max-outbound-regions\"\n"); - return ret; - } - ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr), - GFP_KERNEL); - if (!ep->ob_addr) - return -ENOMEM; - - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - goto err_get_sync; - } - - /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ - cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); - - epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); - if (IS_ERR(epc)) { - dev_err(dev, "failed to create epc device\n"); - ret = PTR_ERR(epc); - goto err_init; - } - - epc_set_drvdata(epc, ep); - - if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) - epc->max_functions = 1; - - ret = pci_epc_mem_init(epc, pcie->mem_res->start, - resource_size(pcie->mem_res)); - if (ret < 0) { - dev_err(dev, "failed to initialize the memory space\n"); - goto err_init; - } - - ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, - SZ_128K); - if (!ep->irq_cpu_addr) { - dev_err(dev, "failed to reserve memory space for MSI\n"); - ret = -ENOMEM; - goto free_epc_mem; - } - ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; - - return 0; - - free_epc_mem: - pci_epc_mem_exit(epc); - - err_init: - pm_runtime_put_sync(dev); - - err_get_sync: - pm_runtime_disable(dev); - - return ret; -} - -static void cdns_pcie_ep_shutdown(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - int ret; - - ret = pm_runtime_put_sync(dev); - if (ret < 0) - dev_dbg(dev, "pm_runtime_put_sync failed\n"); - - pm_runtime_disable(dev); - - /* The PCIe controller can't be disabled. */ -} - -static struct platform_driver cdns_pcie_ep_driver = { - .driver = { - .name = "cdns-pcie-ep", - .of_match_table = cdns_pcie_ep_of_match, - }, - .probe = cdns_pcie_ep_probe, - .shutdown = cdns_pcie_ep_shutdown, -}; -builtin_platform_driver(cdns_pcie_ep_driver); diff --git a/drivers/pci/cadence/pcie-cadence-host.c b/drivers/pci/cadence/pcie-cadence-host.c deleted file mode 100644 index a4ebbd37b553..000000000000 --- a/drivers/pci/cadence/pcie-cadence-host.c +++ /dev/null @@ -1,336 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2017 Cadence -// Cadence PCIe host controller driver. -// Author: Cyrille Pitchen - -#include -#include -#include -#include -#include - -#include "pcie-cadence.h" - -/** - * struct cdns_pcie_rc - private data for this PCIe Root Complex driver - * @pcie: Cadence PCIe controller - * @dev: pointer to PCIe device - * @cfg_res: start/end offsets in the physical system memory to map PCI - * configuration space accesses - * @bus_range: first/last buses behind the PCIe host controller - * @cfg_base: IO mapped window to access the PCI configuration space of a - * single function at a time - * @max_regions: maximum number of regions supported by the hardware - * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address - * translation (nbits sets into the "no BAR match" register) - * @vendor_id: PCI vendor ID - * @device_id: PCI device ID - */ -struct cdns_pcie_rc { - struct cdns_pcie pcie; - struct device *dev; - struct resource *cfg_res; - struct resource *bus_range; - void __iomem *cfg_base; - u32 max_regions; - u32 no_bar_nbits; - u16 vendor_id; - u16 device_id; -}; - -static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) -{ - struct pci_host_bridge *bridge = pci_find_host_bridge(bus); - struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); - struct cdns_pcie *pcie = &rc->pcie; - unsigned int busn = bus->number; - u32 addr0, desc0; - - if (busn == rc->bus_range->start) { - /* - * Only the root port (devfn == 0) is connected to this bus. - * All other PCI devices are behind some bridge hence on another - * bus. - */ - if (devfn) - return NULL; - - return pcie->reg_base + (where & 0xfff); - } - - /* Update Output registers for AXI region 0. */ - addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | - CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | - CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0); - - /* Configuration Type 0 or Type 1 access. */ - desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | - CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - /* - * The bus number was already set once for all in desc1 by - * cdns_pcie_host_init_address_translation(). - */ - if (busn == rc->bus_range->start + 1) - desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; - else - desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0); - - return rc->cfg_base + (where & 0xfff); -} - -static struct pci_ops cdns_pcie_host_ops = { - .map_bus = cdns_pci_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, -}; - -static const struct of_device_id cdns_pcie_host_of_match[] = { - { .compatible = "cdns,cdns-pcie-host" }, - - { }, -}; - -static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) -{ - struct cdns_pcie *pcie = &rc->pcie; - u32 value, ctrl; - - /* - * Set the root complex BAR configuration register: - * - disable both BAR0 and BAR1. - * - enable Prefetchable Memory Base and Limit registers in type 1 - * config space (64 bits). - * - enable IO Base and Limit registers in type 1 config - * space (32 bits). - */ - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; - value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | - CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | - CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | - CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | - CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | - CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS; - cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); - - /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); - if (rc->device_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); - - cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0); - cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0); - cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); - - return 0; -} - -static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) -{ - struct cdns_pcie *pcie = &rc->pcie; - struct resource *cfg_res = rc->cfg_res; - struct resource *mem_res = pcie->mem_res; - struct resource *bus_range = rc->bus_range; - struct device *dev = rc->dev; - struct device_node *np = dev->of_node; - struct of_pci_range_parser parser; - struct of_pci_range range; - u32 addr0, addr1, desc1; - u64 cpu_addr; - int r, err; - - /* - * Reserve region 0 for PCI configure space accesses: - * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by - * cdns_pci_map_bus(), other region registers are set here once for all. - */ - addr1 = 0; /* Should be programmed to zero. */ - desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); - - cpu_addr = cfg_res->start - mem_res->start; - addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | - (lower_32_bits(cpu_addr) & GENMASK(31, 8)); - addr1 = upper_32_bits(cpu_addr); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); - - err = of_pci_range_parser_init(&parser, np); - if (err) - return err; - - r = 1; - for_each_of_pci_range(&parser, &range) { - bool is_io; - - if (r >= rc->max_regions) - break; - - if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) - is_io = false; - else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) - is_io = true; - else - continue; - - cdns_pcie_set_outbound_region(pcie, 0, r, is_io, - range.cpu_addr, - range.pci_addr, - range.size); - r++; - } - - /* - * Set Root Port no BAR match Inbound Translation registers: - * needed for MSI and DMA. - * Root Port BAR0 and BAR1 are disabled, hence no need to set their - * inbound translation registers. - */ - addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); - addr1 = 0; - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); - - return 0; -} - -static int cdns_pcie_host_init(struct device *dev, - struct list_head *resources, - struct cdns_pcie_rc *rc) -{ - struct resource *bus_range = NULL; - int err; - - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); - if (err) - return err; - - rc->bus_range = bus_range; - rc->pcie.bus = bus_range->start; - - err = cdns_pcie_host_init_root_port(rc); - if (err) - goto err_out; - - err = cdns_pcie_host_init_address_translation(rc); - if (err) - goto err_out; - - return 0; - - err_out: - pci_free_resource_list(resources); - return err; -} - -static int cdns_pcie_host_probe(struct platform_device *pdev) -{ - const char *type; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct pci_host_bridge *bridge; - struct list_head resources; - struct cdns_pcie_rc *rc; - struct cdns_pcie *pcie; - struct resource *res; - int ret; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); - if (!bridge) - return -ENOMEM; - - rc = pci_host_bridge_priv(bridge); - rc->dev = dev; - - pcie = &rc->pcie; - pcie->is_rc = true; - - rc->max_regions = 32; - of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions); - - rc->no_bar_nbits = 32; - of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); - - rc->vendor_id = 0xffff; - of_property_read_u16(np, "vendor-id", &rc->vendor_id); - - rc->device_id = 0xffff; - of_property_read_u16(np, "device-id", &rc->device_id); - - type = of_get_property(np, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); - pcie->reg_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->reg_base)) { - dev_err(dev, "missing \"reg\"\n"); - return PTR_ERR(pcie->reg_base); - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(rc->cfg_base)) { - dev_err(dev, "missing \"cfg\"\n"); - return PTR_ERR(rc->cfg_base); - } - rc->cfg_res = res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!res) { - dev_err(dev, "missing \"mem\"\n"); - return -EINVAL; - } - pcie->mem_res = res; - - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - goto err_get_sync; - } - - ret = cdns_pcie_host_init(dev, &resources, rc); - if (ret) - goto err_init; - - list_splice_init(&resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->busnr = pcie->bus; - bridge->ops = &cdns_pcie_host_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_host_probe(bridge); - if (ret < 0) - goto err_host_probe; - - return 0; - - err_host_probe: - pci_free_resource_list(&resources); - - err_init: - pm_runtime_put_sync(dev); - - err_get_sync: - pm_runtime_disable(dev); - - return ret; -} - -static struct platform_driver cdns_pcie_host_driver = { - .driver = { - .name = "cdns-pcie-host", - .of_match_table = cdns_pcie_host_of_match, - }, - .probe = cdns_pcie_host_probe, -}; -builtin_platform_driver(cdns_pcie_host_driver); diff --git a/drivers/pci/cadence/pcie-cadence.c b/drivers/pci/cadence/pcie-cadence.c deleted file mode 100644 index 138d113eb45d..000000000000 --- a/drivers/pci/cadence/pcie-cadence.c +++ /dev/null @@ -1,126 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2017 Cadence -// Cadence PCIe controller driver. -// Author: Cyrille Pitchen - -#include - -#include "pcie-cadence.h" - -void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, - u32 r, bool is_io, - u64 cpu_addr, u64 pci_addr, size_t size) -{ - /* - * roundup_pow_of_two() returns an unsigned long, which is not suited - * for 64bit values. - */ - u64 sz = 1ULL << fls64(size - 1); - int nbits = ilog2(sz); - u32 addr0, addr1, desc0, desc1; - - if (nbits < 8) - nbits = 8; - - /* Set the PCI address */ - addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | - (lower_32_bits(pci_addr) & GENMASK(31, 8)); - addr1 = upper_32_bits(pci_addr); - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1); - - /* Set the PCIe header descriptor */ - if (is_io) - desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO; - else - desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM; - desc1 = 0; - - /* - * Whatever Bit [23] is set or not inside DESC0 register of the outbound - * PCIe descriptor, the PCI function number must be set into - * Bits [26:24] of DESC0 anyway. - * - * In Root Complex mode, the function number is always 0 but in Endpoint - * mode, the PCIe controller may support more than one function. This - * function number needs to be set properly into the outbound PCIe - * descriptor. - * - * Besides, setting Bit [23] is mandatory when in Root Complex mode: - * then the driver must provide the bus, resp. device, number in - * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function - * number, the device number is always 0 in Root Complex mode. - * - * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence - * the PCIe controller will use the captured values for the bus and - * device numbers. - */ - if (pcie->is_rc) { - /* The device and function numbers are always 0. */ - desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | - CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); - } else { - /* - * Use captured values for bus and device numbers but still - * need to set the function number. - */ - desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); - } - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); - - /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; - addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | - (lower_32_bits(cpu_addr) & GENMASK(31, 8)); - addr1 = upper_32_bits(cpu_addr); - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); -} - -void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, - u32 r, u64 cpu_addr) -{ - u32 addr0, addr1, desc0, desc1; - - desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; - desc1 = 0; - - /* See cdns_pcie_set_outbound_region() comments above. */ - if (pcie->is_rc) { - desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | - CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); - } else { - desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); - } - - /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; - addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | - (lower_32_bits(cpu_addr) & GENMASK(31, 8)); - addr1 = upper_32_bits(cpu_addr); - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); -} - -void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) -{ - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0); - - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); -} diff --git a/drivers/pci/cadence/pcie-cadence.h b/drivers/pci/cadence/pcie-cadence.h deleted file mode 100644 index 4bb27333b05c..000000000000 --- a/drivers/pci/cadence/pcie-cadence.h +++ /dev/null @@ -1,311 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2017 Cadence -// Cadence PCIe controller driver. -// Author: Cyrille Pitchen - -#ifndef _PCIE_CADENCE_H -#define _PCIE_CADENCE_H - -#include -#include - -/* - * Local Management Registers - */ -#define CDNS_PCIE_LM_BASE 0x00100000 - -/* Vendor ID Register */ -#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) -#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) -#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 -#define CDNS_PCIE_LM_ID_VENDOR(vid) \ - (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) -#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) -#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 -#define CDNS_PCIE_LM_ID_SUBSYS(sub) \ - (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) - -/* Root Port Requestor ID Register */ -#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) -#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) -#define CDNS_PCIE_LM_RP_RID_SHIFT 0 -#define CDNS_PCIE_LM_RP_RID_(rid) \ - (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) - -/* Endpoint Bus and Device Number Register */ -#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c) -#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) -#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 -#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) -#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 - -/* Endpoint Function f BAR b Configuration Registers */ -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ - (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ - (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ - (GENMASK(4, 0) << ((b) * 8)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ - (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ - (GENMASK(7, 5) << ((b) * 8)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ - (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) - -/* Endpoint Function Configuration Register */ -#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0) - -/* Root Complex BAR Configuration Register */ -#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ - (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ - (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ - (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ - (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) -#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 -#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) -#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) -#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 -#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) -#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) - -/* BAR control values applicable to both Endpoint Function and Root Complex */ -#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 - - -/* - * Endpoint Function Registers (PCI configuration space for endpoint functions) - */ -#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) - -#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 - -/* - * Root Port Registers (PCI configuration space for the root port function) - */ -#define CDNS_PCIE_RP_BASE 0x00200000 - - -/* - * Address Translation Registers - */ -#define CDNS_PCIE_AT_BASE 0x00400000 - -/* Region r Outbound AXI to PCIe Address Translation Register 0 */ -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ - (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ - (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ - (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ - (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) - -/* Region r Outbound AXI to PCIe Address Translation Register 1 */ -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ - (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) - -/* Region r Outbound PCIe Descriptor Register 0 */ -#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ - (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd -/* Bit 23 MUST be set in RC mode. */ -#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) -#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) -#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ - (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) - -/* Region r Outbound PCIe Descriptor Register 1 */ -#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ - (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) -#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ - ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) - -/* Region r AXI Region Base Address Register 0 */ -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ - (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ - (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) - -/* Region r AXI Region Base Address Register 1 */ -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ - (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) - -/* Root Port BAR Inbound PCIe to AXI Address Translation Register */ -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ - (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ - (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ - (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) - -enum cdns_pcie_rp_bar { - RP_BAR0, - RP_BAR1, - RP_NO_BAR -}; - -/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ -#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ - (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) -#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ - (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) - -/* Normal/Vendor specific message access: offset inside some outbound region */ -#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) -#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ - (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) -#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) -#define CDNS_PCIE_NORMAL_MSG_CODE(code) \ - (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA BIT(16) - -enum cdns_pcie_msg_code { - MSG_CODE_ASSERT_INTA = 0x20, - MSG_CODE_ASSERT_INTB = 0x21, - MSG_CODE_ASSERT_INTC = 0x22, - MSG_CODE_ASSERT_INTD = 0x23, - MSG_CODE_DEASSERT_INTA = 0x24, - MSG_CODE_DEASSERT_INTB = 0x25, - MSG_CODE_DEASSERT_INTC = 0x26, - MSG_CODE_DEASSERT_INTD = 0x27, -}; - -enum cdns_pcie_msg_routing { - /* Route to Root Complex */ - MSG_ROUTING_TO_RC, - - /* Use Address Routing */ - MSG_ROUTING_BY_ADDR, - - /* Use ID Routing */ - MSG_ROUTING_BY_ID, - - /* Route as Broadcast Message from Root Complex */ - MSG_ROUTING_BCAST, - - /* Local message; terminate at receiver (INTx messages) */ - MSG_ROUTING_LOCAL, - - /* Gather & route to Root Complex (PME_TO_Ack message) */ - MSG_ROUTING_GATHER, -}; - -/** - * struct cdns_pcie - private data for Cadence PCIe controller drivers - * @reg_base: IO mapped register base - * @mem_res: start/end offsets in the physical system memory to map PCI accesses - * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. - * @bus: In Root Complex mode, the bus number - */ -struct cdns_pcie { - void __iomem *reg_base; - struct resource *mem_res; - bool is_rc; - u8 bus; -}; - -/* Register access */ -static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) -{ - writeb(value, pcie->reg_base + reg); -} - -static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) -{ - writew(value, pcie->reg_base + reg); -} - -static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) -{ - writel(value, pcie->reg_base + reg); -} - -static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) -{ - return readl(pcie->reg_base + reg); -} - -/* Root Port register access */ -static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, - u32 reg, u8 value) -{ - writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); -} - -static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, - u32 reg, u16 value) -{ - writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); -} - -/* Endpoint Function register access */ -static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, - u32 reg, u8 value) -{ - writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - -static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, - u32 reg, u16 value) -{ - writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - -static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, - u32 reg, u16 value) -{ - writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - -static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - -static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - -static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - -void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, - u32 r, bool is_io, - u64 cpu_addr, u64 pci_addr, size_t size); - -void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, - u32 r, u64 cpu_addr); - -void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); - -#endif /* _PCIE_CADENCE_H */ diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig new file mode 100644 index 000000000000..18fa09b3ac8f --- /dev/null +++ b/drivers/pci/controller/Kconfig @@ -0,0 +1,275 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "PCI controller drivers" + depends on PCI + +config PCI_MVEBU + bool "Marvell EBU PCIe controller" + depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST + depends on MVEBU_MBUS + depends on ARM + depends on OF + +config PCI_AARDVARK + bool "Aardvark PCIe controller" + depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + help + Add support for Aardvark 64bit PCIe Host Controller. This + controller is part of the South Bridge of the Marvel Armada + 3700 SoC. + +menu "Cadence PCIe controllers support" + +config PCIE_CADENCE + bool + +config PCIE_CADENCE_HOST + bool "Cadence PCIe host controller" + depends on OF + depends on PCI + select IRQ_DOMAIN + select PCIE_CADENCE + help + Say Y here if you want to support the Cadence PCIe controller in host + mode. This PCIe controller may be embedded into many different vendors + SoCs. + +config PCIE_CADENCE_EP + bool "Cadence PCIe endpoint controller" + depends on OF + depends on PCI_ENDPOINT + select PCIE_CADENCE + help + Say Y here if you want to support the Cadence PCIe controller in + endpoint mode. This PCIe controller may be embedded into many + different vendors SoCs. + +endmenu + +config PCIE_XILINX_NWL + bool "NWL PCIe Core" + depends on ARCH_ZYNQMP || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + help + Say 'Y' here if you want kernel support for Xilinx + NWL PCIe controller. The controller can act as Root Port + or End Point. The current option selection will only + support root port enabling. + +config PCI_FTPCI100 + bool "Faraday Technology FTPCI100 PCI controller" + depends on OF + default ARCH_GEMINI + +config PCI_TEGRA + bool "NVIDIA Tegra PCIe controller" + depends on ARCH_TEGRA || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + help + Say Y here if you want support for the PCIe host controller found + on NVIDIA Tegra SoCs. + +config PCI_RCAR_GEN2 + bool "Renesas R-Car Gen2 Internal PCI controller" + depends on ARCH_RENESAS || COMPILE_TEST + depends on ARM + help + Say Y here if you want internal PCI support on R-Car Gen2 SoC. + There are 3 internal PCI controllers available with a single + built-in EHCI/OHCI host controller present on each one. + +config PCIE_RCAR + bool "Renesas R-Car PCIe controller" + depends on ARCH_RENESAS || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + help + Say Y here if you want PCIe controller support on R-Car SoCs. + +config PCI_HOST_COMMON + bool + select PCI_ECAM + +config PCI_HOST_GENERIC + bool "Generic PCI host controller" + depends on OF + select PCI_HOST_COMMON + select IRQ_DOMAIN + select PCI_DOMAINS + help + Say Y here if you want to support a simple generic PCI host + controller, such as the one emulated by kvmtool. + +config PCIE_XILINX + bool "Xilinx AXI PCIe host bridge support" + depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST + help + Say 'Y' here if you want kernel to support the Xilinx AXI PCIe + Host Bridge driver. + +config PCI_XGENE + bool "X-Gene PCIe controller" + depends on ARM64 || COMPILE_TEST + depends on OF || (ACPI && PCI_QUIRKS) + help + Say Y here if you want internal PCI support on APM X-Gene SoC. + There are 5 internal PCIe ports available. Each port is GEN3 capable + and have varied lanes from x1 to x8. + +config PCI_XGENE_MSI + bool "X-Gene v1 PCIe MSI feature" + depends on PCI_XGENE + depends on PCI_MSI_IRQ_DOMAIN + default y + help + Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. + This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. + +config PCI_V3_SEMI + bool "V3 Semiconductor PCI controller" + depends on OF + depends on ARM || COMPILE_TEST + default ARCH_INTEGRATOR_AP + +config PCI_VERSATILE + bool "ARM Versatile PB PCI controller" + depends on ARCH_VERSATILE + +config PCIE_IPROC + tristate + select PCI_DOMAINS + help + This enables the iProc PCIe core controller support for Broadcom's + iProc family of SoCs. An appropriate bus interface driver needs + to be enabled to select this. + +config PCIE_IPROC_PLATFORM + tristate "Broadcom iProc PCIe platform bus driver" + depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST) + depends on OF + select PCIE_IPROC + default ARCH_BCM_IPROC + help + Say Y here if you want to use the Broadcom iProc PCIe controller + through the generic platform bus interface + +config PCIE_IPROC_BCMA + tristate "Broadcom iProc PCIe BCMA bus driver" + depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) + select PCIE_IPROC + select BCMA + default ARCH_BCM_5301X + help + Say Y here if you want to use the Broadcom iProc PCIe controller + through the BCMA bus interface + +config PCIE_IPROC_MSI + bool "Broadcom iProc PCIe MSI support" + depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA + depends on PCI_MSI_IRQ_DOMAIN + default ARCH_BCM_IPROC + help + Say Y here if you want to enable MSI support for Broadcom's iProc + PCIe controller + +config PCIE_ALTERA + bool "Altera PCIe controller" + depends on ARM || NIOS2 || COMPILE_TEST + select PCI_DOMAINS + help + Say Y here if you want to enable PCIe controller support on Altera + FPGA. + +config PCIE_ALTERA_MSI + bool "Altera PCIe MSI feature" + depends on PCIE_ALTERA + depends on PCI_MSI_IRQ_DOMAIN + help + Say Y here if you want PCIe MSI support for the Altera FPGA. + This MSI driver supports Altera MSI to GIC controller IP. + +config PCI_HOST_THUNDER_PEM + bool "Cavium Thunder PCIe controller to off-chip devices" + depends on ARM64 || COMPILE_TEST + depends on OF || (ACPI && PCI_QUIRKS) + select PCI_HOST_COMMON + help + Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs. + +config PCI_HOST_THUNDER_ECAM + bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon" + depends on ARM64 || COMPILE_TEST + depends on OF || (ACPI && PCI_QUIRKS) + select PCI_HOST_COMMON + help + Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. + +config PCIE_ROCKCHIP + bool + depends on PCI + +config PCIE_ROCKCHIP_HOST + tristate "Rockchip PCIe host controller" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + select MFD_SYSCON + select PCIE_ROCKCHIP + help + Say Y here if you want internal PCI support on Rockchip SoC. + There is 1 internal PCIe port available to support GEN2 with + 4 slots. + +config PCIE_ROCKCHIP_EP + bool "Rockchip PCIe endpoint controller" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on OF + depends on PCI_ENDPOINT + select MFD_SYSCON + select PCIE_ROCKCHIP + help + Say Y here if you want to support Rockchip PCIe controller in + endpoint mode on Rockchip SoC. There is 1 internal PCIe port + available to support GEN2 with 4 slots. + +config PCIE_MEDIATEK + bool "MediaTek PCIe controller" + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + help + Say Y here if you want to enable PCIe controller support on + MediaTek SoCs. + +config PCIE_TANGO_SMP8759 + bool "Tango SMP8759 PCIe controller (DANGEROUS)" + depends on ARCH_TANGO && PCI_MSI && OF + depends on BROKEN + select PCI_HOST_COMMON + help + Say Y here to enable PCIe controller support for Sigma Designs + Tango SMP8759-based systems. + + Note: The SMP8759 controller multiplexes PCI config and MMIO + accesses, and Linux doesn't provide a way to serialize them. + This can lead to data corruption if drivers perform concurrent + config and MMIO accesses. + +config VMD + depends on PCI_MSI && X86_64 && SRCU + tristate "Intel Volume Management Device Driver" + ---help--- + Adds support for the Intel Volume Management Device (VMD). VMD is a + secondary PCI host bridge that allows PCI Express root ports, + and devices attached to them, to be removed from the default + PCI domain and placed within the VMD domain. This provides + more bus resources than are otherwise possible with a + single domain. If you know your system provides one of these and + has devices attached to it, say Y; if you are not sure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmd. + +source "drivers/pci/controller/dwc/Kconfig" +endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile new file mode 100644 index 000000000000..24322b92f200 --- /dev/null +++ b/drivers/pci/controller/Makefile @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o +obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o +obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o +obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o +obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o +obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o +obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o +obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o +obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o +obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o +obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o +obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o +obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o +obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o +obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o +obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o +obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o +obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o +obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o +obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o +obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o +obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o +obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o +obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o +obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o +obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o +obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o +obj-$(CONFIG_VMD) += vmd.o +# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW +obj-y += dwc/ + + +# The following drivers are for devices that use the generic ACPI +# pci_root.c driver but don't support standard ECAM config access. +# They contain MCFG quirks to replace the generic ECAM accessors with +# device-specific ones that are shared with the DT driver. + +# The ACPI driver is generic and should not require driver-specific +# config options to be enabled, so we always build these drivers on +# ARM64 and use internal ifdefs to only build the pieces we need +# depending on whether ACPI, the DT driver, or both are enabled. + +ifdef CONFIG_PCI +obj-$(CONFIG_ARM64) += pci-thunder-ecam.o +obj-$(CONFIG_ARM64) += pci-thunder-pem.o +obj-$(CONFIG_ARM64) += pci-xgene.o +endif diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig new file mode 100644 index 000000000000..16f52c626b4b --- /dev/null +++ b/drivers/pci/controller/dwc/Kconfig @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "DesignWare PCI Core Support" + depends on PCI + +config PCIE_DW + bool + +config PCIE_DW_HOST + bool + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW + +config PCIE_DW_EP + bool + depends on PCI_ENDPOINT + select PCIE_DW + +config PCI_DRA7XX + bool + +config PCI_DRA7XX_HOST + bool "TI DRA7xx PCIe controller Host Mode" + depends on SOC_DRA7XX || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + depends on OF && HAS_IOMEM && TI_PIPE3 + select PCIE_DW_HOST + select PCI_DRA7XX + default y + help + Enables support for the PCIe controller in the DRA7xx SoC to work in + host mode. There are two instances of PCIe controller in DRA7xx. + This controller can work either as EP or RC. In order to enable + host-specific features PCI_DRA7XX_HOST must be selected and in order + to enable device-specific features PCI_DRA7XX_EP must be selected. + This uses the DesignWare core. + +config PCI_DRA7XX_EP + bool "TI DRA7xx PCIe controller Endpoint Mode" + depends on SOC_DRA7XX || COMPILE_TEST + depends on PCI_ENDPOINT + depends on OF && HAS_IOMEM && TI_PIPE3 + select PCIE_DW_EP + select PCI_DRA7XX + help + Enables support for the PCIe controller in the DRA7xx SoC to work in + endpoint mode. There are two instances of PCIe controller in DRA7xx. + This controller can work either as EP or RC. In order to enable + host-specific features PCI_DRA7XX_HOST must be selected and in order + to enable device-specific features PCI_DRA7XX_EP must be selected. + This uses the DesignWare core. + +config PCIE_DW_PLAT + bool + +config PCIE_DW_PLAT_HOST + bool "Platform bus based DesignWare PCIe Controller - Host mode" + depends on PCI && PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PCIE_DW_PLAT + default y + help + Enables support for the PCIe controller in the Designware IP to + work in host mode. There are two instances of PCIe controller in + Designware IP. + This controller can work either as EP or RC. In order to enable + host-specific features PCIE_DW_PLAT_HOST must be selected and in + order to enable device-specific features PCI_DW_PLAT_EP must be + selected. + +config PCIE_DW_PLAT_EP + bool "Platform bus based DesignWare PCIe Controller - Endpoint mode" + depends on PCI && PCI_MSI_IRQ_DOMAIN + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PCIE_DW_PLAT + help + Enables support for the PCIe controller in the Designware IP to + work in endpoint mode. There are two instances of PCIe controller + in Designware IP. + This controller can work either as EP or RC. In order to enable + host-specific features PCIE_DW_PLAT_HOST must be selected and in + order to enable device-specific features PCI_DW_PLAT_EP must be + selected. + +config PCI_EXYNOS + bool "Samsung Exynos PCIe controller" + depends on SOC_EXYNOS5440 || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + +config PCI_IMX6 + bool "Freescale i.MX6 PCIe controller" + depends on SOC_IMX6Q || (ARM && COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + +config PCIE_SPEAR13XX + bool "STMicroelectronics SPEAr PCIe controller" + depends on ARCH_SPEAR13XX || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe support on SPEAr13XX SoCs. + +config PCI_KEYSTONE + bool "TI Keystone PCIe controller" + depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want to enable PCI controller support on Keystone + SoCs. The PCI controller on Keystone is based on DesignWare hardware + and therefore the driver re-uses the DesignWare core functions to + implement the driver. + +config PCI_LAYERSCAPE + bool "Freescale Layerscape PCIe controller" + depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select MFD_SYSCON + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support on Layerscape SoCs. + +config PCI_HISI + depends on OF && (ARM64 || COMPILE_TEST) + bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PCI_HOST_COMMON + help + Say Y here if you want PCIe controller support on HiSilicon + Hip05 and Hip06 SoCs + +config PCIE_QCOM + bool "Qualcomm PCIe controller" + depends on OF && (ARCH_QCOM || COMPILE_TEST) + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here to enable PCIe controller support on Qualcomm SoCs. The + PCIe controller uses the DesignWare core plus Qualcomm-specific + hardware wrappers. + +config PCIE_ARMADA_8K + bool "Marvell Armada-8K PCIe controller" + depends on ARCH_MVEBU || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want to enable PCIe controller support on + Armada-8K SoCs. The PCIe controller on Armada-8K is based on + DesignWare hardware and therefore the driver re-uses the + DesignWare core functions to implement the driver. + +config PCIE_ARTPEC6 + bool + +config PCIE_ARTPEC6_HOST + bool "Axis ARTPEC-6 PCIe controller Host Mode" + depends on MACH_ARTPEC6 || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + select PCIE_ARTPEC6 + help + Enables support for the PCIe controller in the ARTPEC-6 SoC to work in + host mode. This uses the DesignWare core. + +config PCIE_ARTPEC6_EP + bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" + depends on MACH_ARTPEC6 || COMPILE_TEST + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PCIE_ARTPEC6 + help + Enables support for the PCIe controller in the ARTPEC-6 SoC to work in + endpoint mode. This uses the DesignWare core. + +config PCIE_KIRIN + depends on OF && (ARM64 || COMPILE_TEST) + bool "HiSilicon Kirin series SoCs PCIe controllers" + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support + on HiSilicon Kirin series SoCs. + +config PCIE_HISI_STB + bool "HiSilicon STB SoCs PCIe controllers" + depends on ARCH_HISI || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support on HiSilicon STB SoCs + +endmenu diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile new file mode 100644 index 000000000000..5d2ce72c7a52 --- /dev/null +++ b/drivers/pci/controller/dwc/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o +obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o +obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o +obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o +obj-$(CONFIG_PCI_IMX6) += pci-imx6.o +obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o +obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o +obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o +obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o +obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o +obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o +obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o + +# The following drivers are for devices that use the generic ACPI +# pci_root.c driver but don't support standard ECAM config access. +# They contain MCFG quirks to replace the generic ECAM accessors with +# device-specific ones that are shared with the DT driver. + +# The ACPI driver is generic and should not require driver-specific +# config options to be enabled, so we always build these drivers on +# ARM64 and use internal ifdefs to only build the pieces we need +# depending on whether ACPI, the DT driver, or both are enabled. + +ifdef CONFIG_PCI +obj-$(CONFIG_ARM64) += pcie-hisi.o +endif diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c new file mode 100644 index 000000000000..cfaeef81d868 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs + * + * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Kishon Vijay Abraham I + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../pci.h" +#include "pcie-designware.h" + +/* PCIe controller wrapper DRA7XX configuration registers */ + +#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 +#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 +#define ERR_SYS BIT(0) +#define ERR_FATAL BIT(1) +#define ERR_NONFATAL BIT(2) +#define ERR_COR BIT(3) +#define ERR_AXI BIT(4) +#define ERR_ECRC BIT(5) +#define PME_TURN_OFF BIT(8) +#define PME_TO_ACK BIT(9) +#define PM_PME BIT(10) +#define LINK_REQ_RST BIT(11) +#define LINK_UP_EVT BIT(12) +#define CFG_BME_EVT BIT(13) +#define CFG_MSE_EVT BIT(14) +#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ + ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ + LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) + +#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 +#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 +#define INTA BIT(0) +#define INTB BIT(1) +#define INTC BIT(2) +#define INTD BIT(3) +#define MSI BIT(4) +#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) + +#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 +#define DEVICE_TYPE_EP 0x0 +#define DEVICE_TYPE_LEG_EP 0x1 +#define DEVICE_TYPE_RC 0x4 + +#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 +#define LTSSM_EN 0x1 + +#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C +#define LINK_UP BIT(16) +#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF + +#define EXP_CAP_ID_OFFSET 0x70 + +#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 +#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 + +#define PCIECTRL_TI_CONF_MSI_XMT 0x012c +#define MSI_REQ_GRANT BIT(0) +#define MSI_VECTOR_SHIFT 7 + +struct dra7xx_pcie { + struct dw_pcie *pci; + void __iomem *base; /* DT ti_conf */ + int phy_count; /* DT phy-names count */ + struct phy **phy; + int link_gen; + struct irq_domain *irq_domain; + enum dw_pcie_device_mode mode; +}; + +struct dra7xx_pcie_of_data { + enum dw_pcie_device_mode mode; +}; + +#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) + +static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) +{ + return readl(pcie->base + offset); +} + +static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->base + offset); +} + +static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) +{ + return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; +} + +static int dra7xx_pcie_link_up(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); + + return !!(reg & LINK_UP); +} + +static void dra7xx_pcie_stop_link(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg &= ~LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); +} + +static int dra7xx_pcie_establish_link(struct dw_pcie *pci) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + struct device *dev = pci->dev; + u32 reg; + u32 exp_cap_off = EXP_CAP_ID_OFFSET; + + if (dw_pcie_link_up(pci)) { + dev_err(dev, "link is already up\n"); + return 0; + } + + if (dra7xx->link_gen == 1) { + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, + 4, ®); + if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + reg &= ~((u32)PCI_EXP_LNKCAP_SLS); + reg |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCAP, 4, reg); + } + + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, + 2, ®); + if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + reg &= ~((u32)PCI_EXP_LNKCAP_SLS); + reg |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCTL2, 2, reg); + } + } + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg |= LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); + + return 0; +} + +static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, + LEG_EP_INTERRUPTS | MSI); + + dra7xx_pcie_writel(dra7xx, + PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, + MSI | LEG_EP_INTERRUPTS); +} + +static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, + INTERRUPTS); + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, + INTERRUPTS); +} + +static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_enable_wrapper_interrupts(dra7xx); + dra7xx_pcie_enable_msi_interrupts(dra7xx); +} + +static int dra7xx_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + + dw_pcie_setup_rc(pp); + + dra7xx_pcie_establish_link(pci); + dw_pcie_wait_for_link(pci); + dw_pcie_msi_init(pp); + dra7xx_pcie_enable_interrupts(dra7xx); + + return 0; +} + +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { + .host_init = dra7xx_pcie_host_init, +}; + +static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = dra7xx_pcie_intx_map, + .xlate = pci_irqd_intx_xlate, +}; + +static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, pp); + if (!dra7xx->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENODEV; + } + + return 0; +} + +static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) +{ + struct dra7xx_pcie *dra7xx = arg; + struct dw_pcie *pci = dra7xx->pci; + struct pcie_port *pp = &pci->pp; + unsigned long reg; + u32 virq, bit; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); + + switch (reg) { + case MSI: + dw_handle_msi_irq(pp); + break; + case INTA: + case INTB: + case INTC: + case INTD: + for_each_set_bit(bit, ®, PCI_NUM_INTX) { + virq = irq_find_mapping(dra7xx->irq_domain, bit); + if (virq) + generic_handle_irq(virq); + } + break; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); + + return IRQ_HANDLED; +} + +static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) +{ + struct dra7xx_pcie *dra7xx = arg; + struct dw_pcie *pci = dra7xx->pci; + struct device *dev = pci->dev; + struct dw_pcie_ep *ep = &pci->ep; + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); + + if (reg & ERR_SYS) + dev_dbg(dev, "System Error\n"); + + if (reg & ERR_FATAL) + dev_dbg(dev, "Fatal Error\n"); + + if (reg & ERR_NONFATAL) + dev_dbg(dev, "Non Fatal Error\n"); + + if (reg & ERR_COR) + dev_dbg(dev, "Correctable Error\n"); + + if (reg & ERR_AXI) + dev_dbg(dev, "AXI tag lookup fatal Error\n"); + + if (reg & ERR_ECRC) + dev_dbg(dev, "ECRC Error\n"); + + if (reg & PME_TURN_OFF) + dev_dbg(dev, + "Power Management Event Turn-Off message received\n"); + + if (reg & PME_TO_ACK) + dev_dbg(dev, + "Power Management Turn-Off Ack message received\n"); + + if (reg & PM_PME) + dev_dbg(dev, "PM Power Management Event message received\n"); + + if (reg & LINK_REQ_RST) + dev_dbg(dev, "Link Request Reset\n"); + + if (reg & LINK_UP_EVT) { + if (dra7xx->mode == DW_PCIE_EP_TYPE) + dw_pcie_ep_linkup(ep); + dev_dbg(dev, "Link-up state change\n"); + } + + if (reg & CFG_BME_EVT) + dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); + + if (reg & CFG_MSE_EVT) + dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); + + return IRQ_HANDLED; +} + +static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); + + dra7xx_pcie_enable_wrapper_interrupts(dra7xx); +} + +static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) +{ + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); + mdelay(1); + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); +} + +static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, + u8 interrupt_num) +{ + u32 reg; + + reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; + reg |= MSI_REQ_GRANT; + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); +} + +static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u8 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + dra7xx_pcie_raise_legacy_irq(dra7xx); + break; + case PCI_EPC_IRQ_MSI: + dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); + break; + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = dra7xx_pcie_ep_init, + .raise_irq = dra7xx_pcie_raise_irq, +}; + +static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = dra7xx->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); + pci->dbi_base2 = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + +static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie *pci = dra7xx->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + struct resource *res; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) { + dev_err(dev, "missing IRQ resource\n"); + return pp->irq; + } + + ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, + "dra7-pcie-msi", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + ret = dra7xx_pcie_init_irq_domain(pp); + if (ret < 0) + return ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + pp->ops = &dra7xx_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, + .start_link = dra7xx_pcie_establish_link, + .stop_link = dra7xx_pcie_stop_link, + .link_up = dra7xx_pcie_link_up, +}; + +static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) +{ + int phy_count = dra7xx->phy_count; + + while (phy_count--) { + phy_power_off(dra7xx->phy[phy_count]); + phy_exit(dra7xx->phy[phy_count]); + } +} + +static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) +{ + int phy_count = dra7xx->phy_count; + int ret; + int i; + + for (i = 0; i < phy_count; i++) { + ret = phy_init(dra7xx->phy[i]); + if (ret < 0) + goto err_phy; + + ret = phy_power_on(dra7xx->phy[i]); + if (ret < 0) { + phy_exit(dra7xx->phy[i]); + goto err_phy; + } + } + + return 0; + +err_phy: + while (--i >= 0) { + phy_power_off(dra7xx->phy[i]); + phy_exit(dra7xx->phy[i]); + } + + return ret; +} + +static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id of_dra7xx_pcie_match[] = { + { + .compatible = "ti,dra7-pcie", + .data = &dra7xx_pcie_rc_of_data, + }, + { + .compatible = "ti,dra7-pcie-ep", + .data = &dra7xx_pcie_ep_of_data, + }, + {}, +}; + +/* + * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 + * @dra7xx: the dra7xx device where the workaround should be applied + * + * Access to the PCIe slave port that are not 32-bit aligned will result + * in incorrect mapping to TLP Address and Byte enable fields. Therefore, + * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or + * 0x3. + * + * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. + */ +static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) +{ + int ret; + struct device_node *np = dev->of_node; + struct of_phandle_args args; + struct regmap *regmap; + + regmap = syscon_regmap_lookup_by_phandle(np, + "ti,syscon-unaligned-access"); + if (IS_ERR(regmap)) { + dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); + return -EINVAL; + } + + ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", + 2, 0, &args); + if (ret) { + dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); + return ret; + } + + ret = regmap_update_bits(regmap, args.args[0], args.args[1], + args.args[1]); + if (ret) + dev_err(dev, "failed to enable unaligned access\n"); + + of_node_put(args.np); + + return ret; +} + +static int __init dra7xx_pcie_probe(struct platform_device *pdev) +{ + u32 reg; + int ret; + int irq; + int i; + int phy_count; + struct phy **phy; + struct device_link **link; + void __iomem *base; + struct resource *res; + struct dw_pcie *pci; + struct dra7xx_pcie *dra7xx; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + char name[10]; + struct gpio_desc *reset; + const struct of_device_id *match; + const struct dra7xx_pcie_of_data *data; + enum dw_pcie_device_mode mode; + + match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); + if (!match) + return -EINVAL; + + data = (struct dra7xx_pcie_of_data *)match->data; + mode = (enum dw_pcie_device_mode)data->mode; + + dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); + if (!dra7xx) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "missing IRQ resource: %d\n", irq); + return irq; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); + base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!base) + return -ENOMEM; + + phy_count = of_property_count_strings(np, "phy-names"); + if (phy_count < 0) { + dev_err(dev, "unable to find the strings\n"); + return phy_count; + } + + phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); + if (!phy) + return -ENOMEM; + + link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); + if (!link) + return -ENOMEM; + + for (i = 0; i < phy_count; i++) { + snprintf(name, sizeof(name), "pcie-phy%d", i); + phy[i] = devm_phy_get(dev, name); + if (IS_ERR(phy[i])) + return PTR_ERR(phy[i]); + + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + ret = -EINVAL; + goto err_link; + } + } + + dra7xx->base = base; + dra7xx->phy = phy; + dra7xx->pci = pci; + dra7xx->phy_count = phy_count; + + ret = dra7xx_pcie_enable_phy(dra7xx); + if (ret) { + dev_err(dev, "failed to enable phy\n"); + return ret; + } + + platform_set_drvdata(pdev, dra7xx); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); + if (IS_ERR(reset)) { + ret = PTR_ERR(reset); + dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); + goto err_gpio; + } + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg &= ~LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); + + dra7xx->link_gen = of_pci_get_max_link_speed(np); + if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) + dra7xx->link_gen = 2; + + switch (mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { + ret = -ENODEV; + goto err_gpio; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, + DEVICE_TYPE_RC); + ret = dra7xx_add_pcie_port(dra7xx, pdev); + if (ret < 0) + goto err_gpio; + break; + case DW_PCIE_EP_TYPE: + if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { + ret = -ENODEV; + goto err_gpio; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, + DEVICE_TYPE_EP); + + ret = dra7xx_pcie_ep_unaligned_memaccess(dev); + if (ret) + goto err_gpio; + + ret = dra7xx_add_pcie_ep(dra7xx, pdev); + if (ret < 0) + goto err_gpio; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + dra7xx->mode = mode; + + ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, + IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto err_gpio; + } + + return 0; + +err_gpio: + pm_runtime_put(dev); + +err_get_sync: + pm_runtime_disable(dev); + dra7xx_pcie_disable_phy(dra7xx); + +err_link: + while (--i >= 0) + device_link_del(link[i]); + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int dra7xx_pcie_suspend(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + struct dw_pcie *pci = dra7xx->pci; + u32 val; + + if (dra7xx->mode != DW_PCIE_RC_TYPE) + return 0; + + /* clear MSE */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + val &= ~PCI_COMMAND_MEMORY; + dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + + return 0; +} + +static int dra7xx_pcie_resume(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + struct dw_pcie *pci = dra7xx->pci; + u32 val; + + if (dra7xx->mode != DW_PCIE_RC_TYPE) + return 0; + + /* set MSE */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + val |= PCI_COMMAND_MEMORY; + dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + + return 0; +} + +static int dra7xx_pcie_suspend_noirq(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + + dra7xx_pcie_disable_phy(dra7xx); + + return 0; +} + +static int dra7xx_pcie_resume_noirq(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + int ret; + + ret = dra7xx_pcie_enable_phy(dra7xx); + if (ret) { + dev_err(dev, "failed to enable phy\n"); + return ret; + } + + return 0; +} +#endif + +static void dra7xx_pcie_shutdown(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + int ret; + + dra7xx_pcie_stop_link(dra7xx->pci); + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + dev_dbg(dev, "pm_runtime_put_sync failed\n"); + + pm_runtime_disable(dev); + dra7xx_pcie_disable_phy(dra7xx); +} + +static const struct dev_pm_ops dra7xx_pcie_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, + dra7xx_pcie_resume_noirq) +}; + +static struct platform_driver dra7xx_pcie_driver = { + .driver = { + .name = "dra7-pcie", + .of_match_table = of_dra7xx_pcie_match, + .suppress_bind_attrs = true, + .pm = &dra7xx_pcie_pm_ops, + }, + .shutdown = dra7xx_pcie_shutdown, +}; +builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c new file mode 100644 index 000000000000..4cc1e5df8c79 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Samsung EXYNOS SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define to_exynos_pcie(x) dev_get_drvdata((x)->dev) + +/* PCIe ELBI registers */ +#define PCIE_IRQ_PULSE 0x000 +#define IRQ_INTA_ASSERT BIT(0) +#define IRQ_INTB_ASSERT BIT(2) +#define IRQ_INTC_ASSERT BIT(4) +#define IRQ_INTD_ASSERT BIT(6) +#define PCIE_IRQ_LEVEL 0x004 +#define PCIE_IRQ_SPECIAL 0x008 +#define PCIE_IRQ_EN_PULSE 0x00c +#define PCIE_IRQ_EN_LEVEL 0x010 +#define IRQ_MSI_ENABLE BIT(2) +#define PCIE_IRQ_EN_SPECIAL 0x014 +#define PCIE_PWR_RESET 0x018 +#define PCIE_CORE_RESET 0x01c +#define PCIE_CORE_RESET_ENABLE BIT(0) +#define PCIE_STICKY_RESET 0x020 +#define PCIE_NONSTICKY_RESET 0x024 +#define PCIE_APP_INIT_RESET 0x028 +#define PCIE_APP_LTSSM_ENABLE 0x02c +#define PCIE_ELBI_RDLH_LINKUP 0x064 +#define PCIE_ELBI_LTSSM_ENABLE 0x1 +#define PCIE_ELBI_SLV_AWMISC 0x11c +#define PCIE_ELBI_SLV_ARMISC 0x120 +#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) + +struct exynos_pcie_mem_res { + void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */ +}; + +struct exynos_pcie_clk_res { + struct clk *clk; + struct clk *bus_clk; +}; + +struct exynos_pcie { + struct dw_pcie *pci; + struct exynos_pcie_mem_res *mem_res; + struct exynos_pcie_clk_res *clk_res; + const struct exynos_pcie_ops *ops; + int reset_gpio; + + struct phy *phy; +}; + +struct exynos_pcie_ops { + int (*get_mem_resources)(struct platform_device *pdev, + struct exynos_pcie *ep); + int (*get_clk_resources)(struct exynos_pcie *ep); + int (*init_clk_resources)(struct exynos_pcie *ep); + void (*deinit_clk_resources)(struct exynos_pcie *ep); +}; + +static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, + struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + struct resource *res; + + ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); + if (!ep->mem_res) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ep->mem_res->elbi_base)) + return PTR_ERR(ep->mem_res->elbi_base); + + return 0; +} + +static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + + ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL); + if (!ep->clk_res) + return -ENOMEM; + + ep->clk_res->clk = devm_clk_get(dev, "pcie"); + if (IS_ERR(ep->clk_res->clk)) { + dev_err(dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(ep->clk_res->clk); + } + + ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus"); + if (IS_ERR(ep->clk_res->bus_clk)) { + dev_err(dev, "Failed to get pcie bus clock\n"); + return PTR_ERR(ep->clk_res->bus_clk); + } + + return 0; +} + +static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + int ret; + + ret = clk_prepare_enable(ep->clk_res->clk); + if (ret) { + dev_err(dev, "cannot enable pcie rc clock"); + return ret; + } + + ret = clk_prepare_enable(ep->clk_res->bus_clk); + if (ret) { + dev_err(dev, "cannot enable pcie bus clock"); + goto err_bus_clk; + } + + return 0; + +err_bus_clk: + clk_disable_unprepare(ep->clk_res->clk); + + return ret; +} + +static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep) +{ + clk_disable_unprepare(ep->clk_res->bus_clk); + clk_disable_unprepare(ep->clk_res->clk); +} + +static const struct exynos_pcie_ops exynos5440_pcie_ops = { + .get_mem_resources = exynos5440_pcie_get_mem_resources, + .get_clk_resources = exynos5440_pcie_get_clk_resources, + .init_clk_resources = exynos5440_pcie_init_clk_resources, + .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources, +}; + +static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg) +{ + writel(val, base + reg); +} + +static u32 exynos_pcie_readl(void __iomem *base, u32 reg) +{ + return readl(base + reg); +} + +static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC); + if (on) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC); +} + +static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC); + if (on) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC); +} + +static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); + val &= ~PCIE_CORE_RESET_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET); +} + +static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); + val |= PCIE_CORE_RESET_ENABLE; + + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET); + exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET); +} + +static void exynos_pcie_assert_reset(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct device *dev = pci->dev; + + if (ep->reset_gpio >= 0) + devm_gpio_request_one(dev, ep->reset_gpio, + GPIOF_OUT_INIT_HIGH, "RESET"); +} + +static int exynos_pcie_establish_link(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + + if (dw_pcie_link_up(pci)) { + dev_err(dev, "Link already up\n"); + return 0; + } + + exynos_pcie_assert_core_reset(ep); + + phy_reset(ep->phy); + + exynos_pcie_writel(ep->mem_res->elbi_base, 1, + PCIE_PWR_RESET); + + phy_power_on(ep->phy); + phy_init(ep->phy); + + exynos_pcie_deassert_core_reset(ep); + dw_pcie_setup_rc(pp); + exynos_pcie_assert_reset(ep); + + /* assert LTSSM enable */ + exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + if (!dw_pcie_wait_for_link(pci)) + return 0; + + phy_power_off(ep->phy); + return -ETIMEDOUT; +} + +static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep) +{ + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE); + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE); +} + +static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep) +{ + u32 val; + + /* enable INTX interrupt */ + val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | + IRQ_INTC_ASSERT | IRQ_INTD_ASSERT; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE); +} + +static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) +{ + struct exynos_pcie *ep = arg; + + exynos_pcie_clear_irq_pulse(ep); + return IRQ_HANDLED; +} + +static void exynos_pcie_msi_init(struct exynos_pcie *ep) +{ + struct dw_pcie *pci = ep->pci; + struct pcie_port *pp = &pci->pp; + u32 val; + + dw_pcie_msi_init(pp); + + /* enable MSI interrupt */ + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL); + val |= IRQ_MSI_ENABLE; + exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL); +} + +static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) +{ + exynos_pcie_enable_irq_pulse(ep); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + exynos_pcie_msi_init(ep); +} + +static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct exynos_pcie *ep = to_exynos_pcie(pci); + u32 val; + + exynos_pcie_sideband_dbi_r_mode(ep, true); + dw_pcie_read(base + reg, size, &val); + exynos_pcie_sideband_dbi_r_mode(ep, false); + return val; +} + +static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct exynos_pcie *ep = to_exynos_pcie(pci); + + exynos_pcie_sideband_dbi_w_mode(ep, true); + dw_pcie_write(base + reg, size, val); + exynos_pcie_sideband_dbi_w_mode(ep, false); +} + +static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct exynos_pcie *ep = to_exynos_pcie(pci); + int ret; + + exynos_pcie_sideband_dbi_r_mode(ep, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + exynos_pcie_sideband_dbi_r_mode(ep, false); + return ret; +} + +static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct exynos_pcie *ep = to_exynos_pcie(pci); + int ret; + + exynos_pcie_sideband_dbi_w_mode(ep, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + exynos_pcie_sideband_dbi_w_mode(ep, false); + return ret; +} + +static int exynos_pcie_link_up(struct dw_pcie *pci) +{ + struct exynos_pcie *ep = to_exynos_pcie(pci); + u32 val; + + val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP); + if (val == PCIE_ELBI_LTSSM_ENABLE) + return 1; + + return 0; +} + +static int exynos_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct exynos_pcie *ep = to_exynos_pcie(pci); + + exynos_pcie_establish_link(ep); + exynos_pcie_enable_interrupts(ep); + + return 0; +} + +static const struct dw_pcie_host_ops exynos_pcie_host_ops = { + .rd_own_conf = exynos_pcie_rd_own_conf, + .wr_own_conf = exynos_pcie_wr_own_conf, + .host_init = exynos_pcie_host_init, +}; + +static int __init exynos_add_pcie_port(struct exynos_pcie *ep, + struct platform_device *pdev) +{ + struct dw_pcie *pci = ep->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) { + dev_err(dev, "failed to get irq\n"); + return pp->irq; + } + ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, + IRQF_SHARED, "exynos-pcie", ep); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq(pdev, 0); + if (pp->msi_irq < 0) { + dev_err(dev, "failed to get msi irq\n"); + return pp->msi_irq; + } + } + + pp->root_bus_nr = -1; + pp->ops = &exynos_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .read_dbi = exynos_pcie_read_dbi, + .write_dbi = exynos_pcie_write_dbi, + .link_up = exynos_pcie_link_up, +}; + +static int __init exynos_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct exynos_pcie *ep; + struct device_node *np = dev->of_node; + int ret; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + ep->pci = pci; + ep->ops = (const struct exynos_pcie_ops *) + of_device_get_match_data(dev); + + ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); + + ep->phy = devm_of_phy_get(dev, np, NULL); + if (IS_ERR(ep->phy)) { + if (PTR_ERR(ep->phy) == -EPROBE_DEFER) + return PTR_ERR(ep->phy); + + ep->phy = NULL; + } + + if (ep->ops && ep->ops->get_mem_resources) { + ret = ep->ops->get_mem_resources(pdev, ep); + if (ret) + return ret; + } + + if (ep->ops && ep->ops->get_clk_resources && + ep->ops->init_clk_resources) { + ret = ep->ops->get_clk_resources(ep); + if (ret) + return ret; + ret = ep->ops->init_clk_resources(ep); + if (ret) + return ret; + } + + platform_set_drvdata(pdev, ep); + + ret = exynos_add_pcie_port(ep, pdev); + if (ret < 0) + goto fail_probe; + + return 0; + +fail_probe: + phy_exit(ep->phy); + + if (ep->ops && ep->ops->deinit_clk_resources) + ep->ops->deinit_clk_resources(ep); + return ret; +} + +static int __exit exynos_pcie_remove(struct platform_device *pdev) +{ + struct exynos_pcie *ep = platform_get_drvdata(pdev); + + if (ep->ops && ep->ops->deinit_clk_resources) + ep->ops->deinit_clk_resources(ep); + + return 0; +} + +static const struct of_device_id exynos_pcie_of_match[] = { + { + .compatible = "samsung,exynos5440-pcie", + .data = &exynos5440_pcie_ops + }, + {}, +}; + +static struct platform_driver exynos_pcie_driver = { + .remove = __exit_p(exynos_pcie_remove), + .driver = { + .name = "exynos-pcie", + .of_match_table = exynos_pcie_of_match, + }, +}; + +/* Exynos PCIe driver does not allow module unload */ + +static int __init exynos_pcie_init(void) +{ + return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); +} +subsys_initcall(exynos_pcie_init); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c new file mode 100644 index 000000000000..80f604602783 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -0,0 +1,871 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Freescale i.MX6 SoCs + * + * Copyright (C) 2013 Kosagi + * http://www.kosagi.com + * + * Author: Sean Cross + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define to_imx6_pcie(x) dev_get_drvdata((x)->dev) + +enum imx6_pcie_variants { + IMX6Q, + IMX6SX, + IMX6QP, + IMX7D, +}; + +struct imx6_pcie { + struct dw_pcie *pci; + int reset_gpio; + bool gpio_active_high; + struct clk *pcie_bus; + struct clk *pcie_phy; + struct clk *pcie_inbound_axi; + struct clk *pcie; + struct regmap *iomuxc_gpr; + struct reset_control *pciephy_reset; + struct reset_control *apps_reset; + enum imx6_pcie_variants variant; + u32 tx_deemph_gen1; + u32 tx_deemph_gen2_3p5db; + u32 tx_deemph_gen2_6db; + u32 tx_swing_full; + u32 tx_swing_low; + int link_gen; + struct regulator *vpcie; +}; + +/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ +#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 +#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 +#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 + +/* PCIe Root Complex registers (memory-mapped) */ +#define PCIE_RC_LCR 0x7c +#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 +#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 +#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf + +#define PCIE_RC_LCSR 0x80 + +/* PCIe Port Logic registers (memory-mapped) */ +#define PL_OFFSET 0x700 +#define PCIE_PL_PFLR (PL_OFFSET + 0x08) +#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) +#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) +#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) +#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) +#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) +#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) + +#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) +#define PCIE_PHY_CTRL_DATA_LOC 0 +#define PCIE_PHY_CTRL_CAP_ADR_LOC 16 +#define PCIE_PHY_CTRL_CAP_DAT_LOC 17 +#define PCIE_PHY_CTRL_WR_LOC 18 +#define PCIE_PHY_CTRL_RD_LOC 19 + +#define PCIE_PHY_STAT (PL_OFFSET + 0x110) +#define PCIE_PHY_STAT_ACK_LOC 16 + +#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) + +/* PHY registers (not memory-mapped) */ +#define PCIE_PHY_RX_ASIC_OUT 0x100D +#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) + +#define PHY_RX_OVRD_IN_LO 0x1005 +#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) +#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) + +static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 val; + u32 max_iterations = 10; + u32 wait_counter = 0; + + do { + val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); + val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; + wait_counter++; + + if (val == exp_val) + return 0; + + udelay(1); + } while (wait_counter < max_iterations); + + return -ETIMEDOUT; +} + +static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 val; + int ret; + + val = addr << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); + + val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); + + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + val = addr << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); + + return pcie_phy_poll_ack(imx6_pcie, 0); +} + +/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ +static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 val, phy_ctl; + int ret; + + ret = pcie_phy_wait_ack(imx6_pcie, addr); + if (ret) + return ret; + + /* assert Read signal */ + phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); + + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); + *data = val & 0xffff; + + /* deassert Read signal */ + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); + + return pcie_phy_poll_ack(imx6_pcie, 0); +} + +static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) +{ + struct dw_pcie *pci = imx6_pcie->pci; + u32 var; + int ret; + + /* write addr */ + /* cap addr */ + ret = pcie_phy_wait_ack(imx6_pcie, addr); + if (ret) + return ret; + + var = data << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* capture data */ + var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + /* deassert cap data */ + var = data << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* wait for ack de-assertion */ + ret = pcie_phy_poll_ack(imx6_pcie, 0); + if (ret) + return ret; + + /* assert wr signal */ + var = 0x1 << PCIE_PHY_CTRL_WR_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* wait for ack */ + ret = pcie_phy_poll_ack(imx6_pcie, 1); + if (ret) + return ret; + + /* deassert wr signal */ + var = data << PCIE_PHY_CTRL_DATA_LOC; + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); + + /* wait for ack de-assertion */ + ret = pcie_phy_poll_ack(imx6_pcie, 0); + if (ret) + return ret; + + dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); + + return 0; +} + +static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) +{ + u32 tmp; + + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); + tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); + + usleep_range(2000, 3000); + + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); + tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); +} + +/* Added for PCI abort handling */ +static int imx6q_pcie_abort_handler(unsigned long addr, + unsigned int fsr, struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + unsigned long instr = *(unsigned long *)pc; + int reg = (instr >> 12) & 15; + + /* + * If the instruction being executed was a read, + * make it look like it read all-ones. + */ + if ((instr & 0x0c100000) == 0x04100000) { + unsigned long val; + + if (instr & 0x00400000) + val = 255; + else + val = -1; + + regs->uregs[reg] = val; + regs->ARM_pc += 4; + return 0; + } + + if ((instr & 0x0e100090) == 0x00100090) { + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + return 0; + } + + return 1; +} + +static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) +{ + struct device *dev = imx6_pcie->pci->dev; + + switch (imx6_pcie->variant) { + case IMX7D: + reset_control_assert(imx6_pcie->pciephy_reset); + reset_control_assert(imx6_pcie->apps_reset); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN); + /* Force PCIe PHY reset */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, + IMX6SX_GPR5_PCIE_BTNRST_RESET); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, + IMX6Q_GPR1_PCIE_SW_RST); + break; + case IMX6Q: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); + break; + } + + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + int ret = regulator_disable(imx6_pcie->vpcie); + + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } +} + +static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + int ret = 0; + + switch (imx6_pcie->variant) { + case IMX6SX: + ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); + if (ret) { + dev_err(dev, "unable to enable pcie_axi clock\n"); + break; + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); + break; + case IMX6QP: /* FALLTHROUGH */ + case IMX6Q: + /* power up core phy and enable ref clock */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); + /* + * the async reset input need ref clock to sync internally, + * when the ref clock comes after reset, internal synced + * reset time is too short, cannot meet the requirement. + * add one ~10us delay here. + */ + udelay(10); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); + break; + case IMX7D: + break; + } + + return ret; +} + +static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +{ + u32 val; + unsigned int retries; + struct device *dev = imx6_pcie->pci->dev; + + for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); + + if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) + return; + + usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, + PHY_PLL_LOCK_WAIT_USLEEP_MAX); + } + + dev_err(dev, "PCIe PLL lock timeout\n"); +} + +static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + int ret; + + if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { + ret = regulator_enable(imx6_pcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable vpcie regulator: %d\n", + ret); + return; + } + } + + ret = clk_prepare_enable(imx6_pcie->pcie_phy); + if (ret) { + dev_err(dev, "unable to enable pcie_phy clock\n"); + goto err_pcie_phy; + } + + ret = clk_prepare_enable(imx6_pcie->pcie_bus); + if (ret) { + dev_err(dev, "unable to enable pcie_bus clock\n"); + goto err_pcie_bus; + } + + ret = clk_prepare_enable(imx6_pcie->pcie); + if (ret) { + dev_err(dev, "unable to enable pcie clock\n"); + goto err_pcie; + } + + ret = imx6_pcie_enable_ref_clk(imx6_pcie); + if (ret) { + dev_err(dev, "unable to enable pcie ref clock\n"); + goto err_ref_clk; + } + + /* allow the clocks to stabilize */ + usleep_range(200, 500); + + /* Some boards don't have PCIe reset GPIO. */ + if (gpio_is_valid(imx6_pcie->reset_gpio)) { + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + imx6_pcie->gpio_active_high); + msleep(100); + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + !imx6_pcie->gpio_active_high); + } + + switch (imx6_pcie->variant) { + case IMX7D: + reset_control_deassert(imx6_pcie->pciephy_reset); + imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, 0); + + usleep_range(200, 500); + break; + case IMX6Q: /* Nothing to do */ + break; + } + + return; + +err_ref_clk: + clk_disable_unprepare(imx6_pcie->pcie); +err_pcie: + clk_disable_unprepare(imx6_pcie->pcie_bus); +err_pcie_bus: + clk_disable_unprepare(imx6_pcie->pcie_phy); +err_pcie_phy: + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + ret = regulator_disable(imx6_pcie->vpcie); + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } +} + +static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + switch (imx6_pcie->variant) { + case IMX7D: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_RX_EQ_MASK, + IMX6SX_GPR12_PCIE_RX_EQ_2); + /* FALLTHROUGH */ + default: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + + /* configure constant input signal to the pcie ctrl and phy */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_LOS_LEVEL, 9 << 4); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN1, + imx6_pcie->tx_deemph_gen1 << 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, + imx6_pcie->tx_deemph_gen2_3p5db << 6); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, + imx6_pcie->tx_deemph_gen2_6db << 12); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_FULL, + imx6_pcie->tx_swing_full << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_LOW, + imx6_pcie->tx_swing_low << 25); + break; + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); +} + +static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + + /* check if the link is up or not */ + if (!dw_pcie_wait_for_link(pci)) + return 0; + + dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); + return -ETIMEDOUT; +} + +static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + u32 tmp; + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + /* Test if the speed change finished. */ + if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) + return 0; + usleep_range(100, 1000); + } + + dev_err(dev, "Speed change timeout\n"); + return -EINVAL; +} + +static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; + u32 tmp; + int ret; + + /* + * Force Gen1 operation when starting the link. In case the link is + * started in Gen2 mode, there is a possibility the devices on the + * bus will not be detected at all. This happens with PCIe switches. + */ + tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); + tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; + tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; + dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); + + /* Start LTSSM. */ + if (imx6_pcie->variant == IMX7D) + reset_control_deassert(imx6_pcie->apps_reset); + else + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); + + ret = imx6_pcie_wait_for_link(imx6_pcie); + if (ret) + goto err_reset_phy; + + if (imx6_pcie->link_gen == 2) { + /* Allow Gen2 mode after the link is up. */ + tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); + tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; + tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; + dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); + + /* + * Start Directed Speed Change so the best possible + * speed both link partners support can be negotiated. + */ + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + + if (imx6_pcie->variant != IMX7D) { + /* + * On i.MX7, DIRECT_SPEED_CHANGE behaves differently + * from i.MX6 family when no link speed transition + * occurs and we go Gen1 -> yep, Gen1. The difference + * is that, in such case, it will not be cleared by HW + * which will cause the following code to report false + * failure. + */ + + ret = imx6_pcie_wait_for_speed_change(imx6_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); + goto err_reset_phy; + } + } + + /* Make sure link training is finished as well! */ + ret = imx6_pcie_wait_for_link(imx6_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); + goto err_reset_phy; + } + } else { + dev_info(dev, "Link: Gen2 disabled\n"); + } + + tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); + dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); + return 0; + +err_reset_phy: + dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), + dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); + imx6_pcie_reset_phy(imx6_pcie); + return ret; +} + +static int imx6_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + + imx6_pcie_assert_core_reset(imx6_pcie); + imx6_pcie_init_phy(imx6_pcie); + imx6_pcie_deassert_core_reset(imx6_pcie); + dw_pcie_setup_rc(pp); + imx6_pcie_establish_link(imx6_pcie); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static int imx6_pcie_link_up(struct dw_pcie *pci) +{ + return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & + PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; +} + +static const struct dw_pcie_host_ops imx6_pcie_host_ops = { + .host_init = imx6_pcie_host_init, +}; + +static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq <= 0) { + dev_err(dev, "failed to get MSI irq\n"); + return -ENODEV; + } + } + + pp->root_bus_nr = -1; + pp->ops = &imx6_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = imx6_pcie_link_up, +}; + +static int imx6_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct imx6_pcie *imx6_pcie; + struct resource *dbi_base; + struct device_node *node = dev->of_node; + int ret; + + imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); + if (!imx6_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + imx6_pcie->pci = pci; + imx6_pcie->variant = + (enum imx6_pcie_variants)of_device_get_match_data(dev); + + dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* Fetch GPIOs */ + imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); + imx6_pcie->gpio_active_high = of_property_read_bool(node, + "reset-gpio-active-high"); + if (gpio_is_valid(imx6_pcie->reset_gpio)) { + ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, + imx6_pcie->gpio_active_high ? + GPIOF_OUT_INIT_HIGH : + GPIOF_OUT_INIT_LOW, + "PCIe reset"); + if (ret) { + dev_err(dev, "unable to get reset gpio\n"); + return ret; + } + } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { + return imx6_pcie->reset_gpio; + } + + /* Fetch clocks */ + imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); + if (IS_ERR(imx6_pcie->pcie_phy)) { + dev_err(dev, "pcie_phy clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_phy); + } + + imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); + if (IS_ERR(imx6_pcie->pcie_bus)) { + dev_err(dev, "pcie_bus clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_bus); + } + + imx6_pcie->pcie = devm_clk_get(dev, "pcie"); + if (IS_ERR(imx6_pcie->pcie)) { + dev_err(dev, "pcie clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie); + } + + switch (imx6_pcie->variant) { + case IMX6SX: + imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, + "pcie_inbound_axi"); + if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { + dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_inbound_axi); + } + break; + case IMX7D: + imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, + "pciephy"); + if (IS_ERR(imx6_pcie->pciephy_reset)) { + dev_err(dev, "Failed to get PCIEPHY reset control\n"); + return PTR_ERR(imx6_pcie->pciephy_reset); + } + + imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, + "apps"); + if (IS_ERR(imx6_pcie->apps_reset)) { + dev_err(dev, "Failed to get PCIE APPS reset control\n"); + return PTR_ERR(imx6_pcie->apps_reset); + } + break; + default: + break; + } + + /* Grab GPR config register range */ + imx6_pcie->iomuxc_gpr = + syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); + if (IS_ERR(imx6_pcie->iomuxc_gpr)) { + dev_err(dev, "unable to find iomuxc registers\n"); + return PTR_ERR(imx6_pcie->iomuxc_gpr); + } + + /* Grab PCIe PHY Tx Settings */ + if (of_property_read_u32(node, "fsl,tx-deemph-gen1", + &imx6_pcie->tx_deemph_gen1)) + imx6_pcie->tx_deemph_gen1 = 0; + + if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", + &imx6_pcie->tx_deemph_gen2_3p5db)) + imx6_pcie->tx_deemph_gen2_3p5db = 0; + + if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", + &imx6_pcie->tx_deemph_gen2_6db)) + imx6_pcie->tx_deemph_gen2_6db = 20; + + if (of_property_read_u32(node, "fsl,tx-swing-full", + &imx6_pcie->tx_swing_full)) + imx6_pcie->tx_swing_full = 127; + + if (of_property_read_u32(node, "fsl,tx-swing-low", + &imx6_pcie->tx_swing_low)) + imx6_pcie->tx_swing_low = 127; + + /* Limit link speed */ + ret = of_property_read_u32(node, "fsl,max-link-speed", + &imx6_pcie->link_gen); + if (ret) + imx6_pcie->link_gen = 1; + + imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); + if (IS_ERR(imx6_pcie->vpcie)) { + if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) + return -EPROBE_DEFER; + imx6_pcie->vpcie = NULL; + } + + platform_set_drvdata(pdev, imx6_pcie); + + ret = imx6_add_pcie_port(imx6_pcie, pdev); + if (ret < 0) + return ret; + + return 0; +} + +static void imx6_pcie_shutdown(struct platform_device *pdev) +{ + struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); + + /* bring down link, so bootloader gets clean state in case of reboot */ + imx6_pcie_assert_core_reset(imx6_pcie); +} + +static const struct of_device_id imx6_pcie_of_match[] = { + { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, + { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, + { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, + { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, + {}, +}; + +static struct platform_driver imx6_pcie_driver = { + .driver = { + .name = "imx6q-pcie", + .of_match_table = imx6_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = imx6_pcie_probe, + .shutdown = imx6_pcie_shutdown, +}; + +static int __init imx6_pcie_init(void) +{ + /* + * Since probe() can be deferred we need to make sure that + * hook_fault_code is not called after __init memory is freed + * by kernel and since imx6q_pcie_abort_handler() is a no-op, + * we can install the handler here without risking it + * accessing some uninitialized driver state. + */ + hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, + "external abort on non-linefetch"); + + return platform_driver_register(&imx6_pcie_driver); +} +device_initcall(imx6_pcie_init); diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c new file mode 100644 index 000000000000..0682213328e9 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone-dw.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DesignWare application register space functions for Keystone PCI controller + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" +#include "pci-keystone.h" + +/* Application register defines */ +#define LTSSM_EN_VAL 1 +#define LTSSM_STATE_MASK 0x1f +#define LTSSM_STATE_L0 0x11 +#define DBI_CS2_EN_VAL 0x20 +#define OB_XLAT_EN_VAL 2 + +/* Application registers */ +#define CMD_STATUS 0x004 +#define CFG_SETUP 0x008 +#define OB_SIZE 0x030 +#define CFG_PCIM_WIN_SZ_IDX 3 +#define CFG_PCIM_WIN_CNT 32 +#define SPACE0_REMOTE_CFG_OFFSET 0x1000 +#define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) +#define OB_OFFSET_HI(n) (0x204 + (8 * n)) + +/* IRQ register defines */ +#define IRQ_EOI 0x050 +#define IRQ_STATUS 0x184 +#define IRQ_ENABLE_SET 0x188 +#define IRQ_ENABLE_CLR 0x18c + +#define MSI_IRQ 0x054 +#define MSI0_IRQ_STATUS 0x104 +#define MSI0_IRQ_ENABLE_SET 0x108 +#define MSI0_IRQ_ENABLE_CLR 0x10c +#define IRQ_STATUS 0x184 +#define MSI_IRQ_OFFSET 4 + +/* Error IRQ bits */ +#define ERR_AER BIT(5) /* ECRC error */ +#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ +#define ERR_CORR BIT(3) /* Correctable error */ +#define ERR_NONFATAL BIT(2) /* Non-fatal error */ +#define ERR_FATAL BIT(1) /* Fatal error */ +#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ +#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ + ERR_NONFATAL | ERR_FATAL | ERR_SYS) +#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) +#define ERR_IRQ_STATUS_RAW 0x1c0 +#define ERR_IRQ_STATUS 0x1c4 +#define ERR_IRQ_ENABLE_SET 0x1c8 +#define ERR_IRQ_ENABLE_CLR 0x1cc + +/* Config space registers */ +#define DEBUG0 0x728 + +#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + +static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, + u32 *bit_pos) +{ + *reg_offset = offset % 8; + *bit_pos = offset >> 3; +} + +phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + return ks_pcie->app.start + MSI_IRQ; +} + +static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) +{ + return readl(ks_pcie->va_app_base + offset); +} + +static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) +{ + writel(val, ks_pcie->va_app_base + offset); +} + +void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + u32 pending, vector; + int src, virq; + + pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); + + /* + * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit + * shows 1, 9, 17, 25 and so forth + */ + for (src = 0; src < 4; src++) { + if (BIT(src) & pending) { + vector = offset + (src << 3); + virq = irq_linear_revmap(pp->irq_domain, vector); + dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", + src, vector, virq); + generic_handle_irq(virq); + } + } +} + +void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) +{ + u32 reg_offset, bit_pos; + struct keystone_pcie *ks_pcie; + struct dw_pcie *pci; + + pci = to_dw_pcie_from_pp(pp); + ks_pcie = to_keystone_pcie(pci); + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + + ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), + BIT(bit_pos)); + ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); +} + +void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), + BIT(bit_pos)); +} + +void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), + BIT(bit_pos)); +} + +int ks_dw_pcie_msi_host_init(struct pcie_port *pp) +{ + return dw_pcie_allocate_domains(pp); +} + +void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) +{ + int i; + + for (i = 0; i < PCI_NUM_INTX; i++) + ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); +} + +void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + u32 pending; + int virq; + + pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); + + if (BIT(0) & pending) { + virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); + dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); + generic_handle_irq(virq); + } + + /* EOI the INTx interrupt */ + ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); +} + +void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) +{ + ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); +} + +irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) +{ + u32 status; + + status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; + if (!status) + return IRQ_NONE; + + if (status & ERR_FATAL_IRQ) + dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n", + status); + + /* Ack the IRQ; status bits are RW1C */ + ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); + return IRQ_HANDLED; +} + +static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) +{ +} + +static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) +{ +} + +static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) +{ +} + +static struct irq_chip ks_dw_pcie_legacy_irq_chip = { + .name = "Keystone-PCI-Legacy-IRQ", + .irq_ack = ks_dw_pcie_ack_legacy_irq, + .irq_mask = ks_dw_pcie_mask_legacy_irq, + .irq_unmask = ks_dw_pcie_unmask_legacy_irq, +}; + +static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hw_irq) +{ + irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, d->host_data); + + return 0; +} + +static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { + .map = ks_dw_pcie_init_legacy_irq_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +/** + * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask + * registers + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); + + do { + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + } while (!(val & DBI_CS2_EN_VAL)); +} + +/** + * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); + + do { + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + } while (val & DBI_CS2_EN_VAL); +} + +void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 start = pp->mem->start, end = pp->mem->end; + int i, tr_size; + u32 val; + + /* Disable BARs for inbound access */ + ks_dw_pcie_set_dbi_mode(ks_pcie); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); + ks_dw_pcie_clear_dbi_mode(ks_pcie); + + /* Set outbound translation size per window division */ + ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); + + tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; + + /* Using Direct 1:1 mapping of RC <-> PCI memory space */ + for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { + ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); + ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); + start += tr_size; + } + + /* Enable OB translation */ + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); +} + +/** + * ks_pcie_cfg_setup() - Set up configuration space address for a device + * + * @ks_pcie: ptr to keystone_pcie structure + * @bus: Bus number the device is residing on + * @devfn: device, function number info + * + * Forms and returns the address of configuration space mapped in PCIESS + * address space 0. Also configures CFG_SETUP for remote configuration space + * access. + * + * The address space has two regions to access configuration - local and remote. + * We access local region for bus 0 (as RC is attached on bus 0) and remote + * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, + * we will do TYPE 0 access as it will be on our secondary bus (logical). + * CFG_SETUP is needed only for remote configuration access. + */ +static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, + unsigned int devfn) +{ + u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + u32 regval; + + if (bus == 0) + return pci->dbi_base; + + regval = (bus << 16) | (device << 8) | function; + + /* + * Since Bus#1 will be a virtual bus, we need to have TYPE0 + * access only. + * TYPE 1 + */ + if (bus != 1) + regval |= BIT(24); + + ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); + return pp->va_cfg0_base; +} + +int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u8 bus_num = bus->number; + void __iomem *addr; + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + + return dw_pcie_read(addr + where, size, val); +} + +int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u8 bus_num = bus->number; + void __iomem *addr; + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + + return dw_pcie_write(addr + where, size, val); +} + +/** + * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization + * + * This sets BAR0 to enable inbound access for MSI_IRQ register + */ +void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + /* Configure and set up BAR0 */ + ks_dw_pcie_set_dbi_mode(ks_pcie); + + /* Enable BAR0 */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); + + ks_dw_pcie_clear_dbi_mode(ks_pcie); + + /* + * For BAR0, just setting bus address for inbound writes (MSI) should + * be sufficient. Use physical address to avoid any conflicts. + */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); +} + +/** + * ks_dw_pcie_link_up() - Check if link up + */ +int ks_dw_pcie_link_up(struct dw_pcie *pci) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, DEBUG0); + return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; +} + +void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) +{ + u32 val; + + /* Disable Link training */ + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + val &= ~LTSSM_EN_VAL; + ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); + + /* Initiate Link Training */ + val = ks_dw_app_readl(ks_pcie, CMD_STATUS); + ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); +} + +/** + * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware + * + * Ioremap the register resources, initialize legacy irq domain + * and call dw_pcie_v3_65_host_init() API to initialize the Keystone + * PCI host controller. + */ +int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, + struct device_node *msi_intc_np) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + /* Index 0 is the config reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* + * We set these same and is used in pcie rd/wr_other_conf + * functions + */ + pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; + pp->va_cfg1_base = pp->va_cfg0_base; + + /* Index 1 is the application reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ks_pcie->va_app_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ks_pcie->va_app_base)) + return PTR_ERR(ks_pcie->va_app_base); + + ks_pcie->app = *res; + + /* Create legacy IRQ domain */ + ks_pcie->legacy_irq_domain = + irq_domain_add_linear(ks_pcie->legacy_intc_np, + PCI_NUM_INTX, + &ks_dw_pcie_legacy_irq_domain_ops, + NULL); + if (!ks_pcie->legacy_irq_domain) { + dev_err(dev, "Failed to add irq domain for legacy irqs\n"); + return -EINVAL; + } + + return dw_pcie_host_init(pp); +} diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c new file mode 100644 index 000000000000..3722a5f31e5e --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Texas Instruments Keystone SoCs + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri + * Implementation based on pci-exynos.c and pcie-designware.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" +#include "pci-keystone.h" + +#define DRIVER_NAME "keystone-pcie" + +/* DEV_STAT_CTRL */ +#define PCIE_CAP_BASE 0x70 + +/* PCIE controller device IDs */ +#define PCIE_RC_K2HK 0xb008 +#define PCIE_RC_K2E 0xb009 +#define PCIE_RC_K2L 0xb00a + +#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + +static void quirk_limit_mrrs(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + struct pci_dev *bridge = bus->self; + static const struct pci_device_id rc_pci_devids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { 0, }, + }; + + if (pci_is_root_bus(bus)) + return; + + /* look for the host bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + } + + if (bridge) { + /* + * Keystone PCI controller has a h/w limitation of + * 256 bytes maximum read request size. It can't handle + * anything higher than this. So force this limit on + * all downstream devices. + */ + if (pci_match_id(rc_pci_devids, bridge)) { + if (pcie_get_readrq(dev) > 256) { + dev_info(&dev->dev, "limiting MRRS to 256\n"); + pcie_set_readrq(dev, 256); + } + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); + +static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + unsigned int retries; + + dw_pcie_setup_rc(pp); + + if (dw_pcie_link_up(pci)) { + dev_info(dev, "Link already up\n"); + return 0; + } + + /* check if the link is up or not */ + for (retries = 0; retries < 5; retries++) { + ks_dw_pcie_initiate_link_train(ks_pcie); + if (!dw_pcie_wait_for_link(pci)) + return 0; + } + + dev_err(dev, "phy link never came up\n"); + return -ETIMEDOUT; +} + +static void ks_pcie_msi_irq_handler(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); + u32 offset = irq - ks_pcie->msi_host_irqs[0]; + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + struct irq_chip *chip = irq_desc_get_chip(desc); + + dev_dbg(dev, "%s, irq %d\n", __func__, irq); + + /* + * The chained irq handler installation would have replaced normal + * interrupt driver handler so we need to take care of mask/unmask and + * ack operation. + */ + chained_irq_enter(chip, desc); + ks_dw_pcie_handle_msi_irq(ks_pcie, offset); + chained_irq_exit(chip, desc); +} + +/** + * ks_pcie_legacy_irq_handler() - Handle legacy interrupt + * @irq: IRQ line for legacy interrupts + * @desc: Pointer to irq descriptor + * + * Traverse through pending legacy interrupts and invoke handler for each. Also + * takes care of interrupt controller level mask/ack operation. + */ +static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; + struct irq_chip *chip = irq_desc_get_chip(desc); + + dev_dbg(dev, ": Handling legacy irq %d\n", irq); + + /* + * The chained irq handler installation would have replaced normal + * interrupt driver handler so we need to take care of mask/unmask and + * ack operation. + */ + chained_irq_enter(chip, desc); + ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); + chained_irq_exit(chip, desc); +} + +static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, + char *controller, int *num_irqs) +{ + int temp, max_host_irqs, legacy = 1, *host_irqs; + struct device *dev = ks_pcie->pci->dev; + struct device_node *np_pcie = dev->of_node, **np_temp; + + if (!strcmp(controller, "msi-interrupt-controller")) + legacy = 0; + + if (legacy) { + np_temp = &ks_pcie->legacy_intc_np; + max_host_irqs = PCI_NUM_INTX; + host_irqs = &ks_pcie->legacy_host_irqs[0]; + } else { + np_temp = &ks_pcie->msi_intc_np; + max_host_irqs = MAX_MSI_HOST_IRQS; + host_irqs = &ks_pcie->msi_host_irqs[0]; + } + + /* interrupt controller is in a child node */ + *np_temp = of_get_child_by_name(np_pcie, controller); + if (!(*np_temp)) { + dev_err(dev, "Node for %s is absent\n", controller); + return -EINVAL; + } + + temp = of_irq_count(*np_temp); + if (!temp) { + dev_err(dev, "No IRQ entries in %s\n", controller); + of_node_put(*np_temp); + return -EINVAL; + } + + if (temp > max_host_irqs) + dev_warn(dev, "Too many %s interrupts defined %u\n", + (legacy ? "legacy" : "MSI"), temp); + + /* + * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to + * 7 (MSI) + */ + for (temp = 0; temp < max_host_irqs; temp++) { + host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); + if (!host_irqs[temp]) + break; + } + + of_node_put(*np_temp); + + if (temp) { + *num_irqs = temp; + return 0; + } + + return -EINVAL; +} + +static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) +{ + int i; + + /* Legacy IRQ */ + for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { + irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], + ks_pcie_legacy_irq_handler, + ks_pcie); + } + ks_dw_pcie_enable_legacy_irqs(ks_pcie); + + /* MSI IRQ */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { + irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], + ks_pcie_msi_irq_handler, + ks_pcie); + } + } + + if (ks_pcie->error_irq > 0) + ks_dw_pcie_enable_error_irq(ks_pcie); +} + +/* + * When a PCI device does not exist during config cycles, keystone host gets a + * bus error instead of returning 0xffffffff. This handler always returns 0 + * for this kind of faults. + */ +static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + unsigned long instr = *(unsigned long *) instruction_pointer(regs); + + if ((instr & 0x0e100090) == 0x00100090) { + int reg = (instr >> 12) & 15; + + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + } + + return 0; +} + +static int __init ks_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u32 val; + + ks_pcie_establish_link(ks_pcie); + ks_dw_pcie_setup_rc_app_regs(ks_pcie); + ks_pcie_setup_interrupts(ks_pcie); + writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), + pci->dbi_base + PCI_IO_BASE); + + /* update the Vendor ID */ + writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID); + + /* update the DEV_STAT_CTRL to publish right mrrs */ + val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + val &= ~PCI_EXP_DEVCTL_READRQ; + /* set the mrrs to 256 bytes */ + val |= BIT(12); + writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + + /* + * PCIe access errors that result into OCP errors are caught by ARM as + * "External aborts" + */ + hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, + "Asynchronous external abort"); + + return 0; +} + +static const struct dw_pcie_host_ops keystone_pcie_host_ops = { + .rd_other_conf = ks_dw_pcie_rd_other_conf, + .wr_other_conf = ks_dw_pcie_wr_other_conf, + .host_init = ks_pcie_host_init, + .msi_set_irq = ks_dw_pcie_msi_set_irq, + .msi_clear_irq = ks_dw_pcie_msi_clear_irq, + .get_msi_addr = ks_dw_pcie_get_msi_addr, + .msi_host_init = ks_dw_pcie_msi_host_init, + .msi_irq_ack = ks_dw_pcie_msi_irq_ack, + .scan_bus = ks_dw_pcie_v3_65_scan_bus, +}; + +static irqreturn_t pcie_err_irq_handler(int irq, void *priv) +{ + struct keystone_pcie *ks_pcie = priv; + + return ks_dw_pcie_handle_error_irq(ks_pcie); +} + +static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + ret = ks_pcie_get_irq_controller_info(ks_pcie, + "legacy-interrupt-controller", + &ks_pcie->num_legacy_host_irqs); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + ret = ks_pcie_get_irq_controller_info(ks_pcie, + "msi-interrupt-controller", + &ks_pcie->num_msi_host_irqs); + if (ret) + return ret; + } + + /* + * Index 0 is the platform interrupt for error interrupt + * from RC. This is optional. + */ + ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); + if (ks_pcie->error_irq <= 0) + dev_info(dev, "no error IRQ defined\n"); + else { + ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, + IRQF_SHARED, "pcie-error-irq", ks_pcie); + if (ret < 0) { + dev_err(dev, "failed to request error IRQ %d\n", + ks_pcie->error_irq); + return ret; + } + } + + pp->root_bus_nr = -1; + pp->ops = &keystone_pcie_host_ops; + ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct of_device_id ks_pcie_of_match[] = { + { + .type = "pci", + .compatible = "ti,keystone-pcie", + }, + { }, +}; + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = ks_dw_pcie_link_up, +}; + +static int __exit ks_pcie_remove(struct platform_device *pdev) +{ + struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); + + clk_disable_unprepare(ks_pcie->clk); + + return 0; +} + +static int __init ks_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct keystone_pcie *ks_pcie; + struct resource *res; + void __iomem *reg_p; + struct phy *phy; + int ret; + + ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); + if (!ks_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + ks_pcie->pci = pci; + + /* initialize SerDes Phy if present */ + phy = devm_phy_get(dev, "pcie-phy"); + if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) + return PTR_ERR(phy); + + if (!IS_ERR_OR_NULL(phy)) { + ret = phy_init(phy); + if (ret < 0) + return ret; + } + + /* index 2 is to read PCI DEVICE_ID */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + reg_p = devm_ioremap_resource(dev, res); + if (IS_ERR(reg_p)) + return PTR_ERR(reg_p); + ks_pcie->device_id = readl(reg_p) >> 16; + devm_iounmap(dev, reg_p); + devm_release_mem_region(dev, res->start, resource_size(res)); + + ks_pcie->np = dev->of_node; + platform_set_drvdata(pdev, ks_pcie); + ks_pcie->clk = devm_clk_get(dev, "pcie"); + if (IS_ERR(ks_pcie->clk)) { + dev_err(dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(ks_pcie->clk); + } + ret = clk_prepare_enable(ks_pcie->clk); + if (ret) + return ret; + + platform_set_drvdata(pdev, ks_pcie); + + ret = ks_add_pcie_port(ks_pcie, pdev); + if (ret < 0) + goto fail_clk; + + return 0; +fail_clk: + clk_disable_unprepare(ks_pcie->clk); + + return ret; +} + +static struct platform_driver ks_pcie_driver __refdata = { + .probe = ks_pcie_probe, + .remove = __exit_p(ks_pcie_remove), + .driver = { + .name = "keystone-pcie", + .of_match_table = of_match_ptr(ks_pcie_of_match), + }, +}; +builtin_platform_driver(ks_pcie_driver); diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h new file mode 100644 index 000000000000..8a13da391543 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Keystone PCI Controller's common includes + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri + */ + +#define MAX_MSI_HOST_IRQS 8 + +struct keystone_pcie { + struct dw_pcie *pci; + struct clk *clk; + /* PCI Device ID */ + u32 device_id; + int num_legacy_host_irqs; + int legacy_host_irqs[PCI_NUM_INTX]; + struct device_node *legacy_intc_np; + + int num_msi_host_irqs; + int msi_host_irqs[MAX_MSI_HOST_IRQS]; + struct device_node *msi_intc_np; + struct irq_domain *legacy_irq_domain; + struct device_node *np; + + int error_irq; + + /* Application register space */ + void __iomem *va_app_base; /* DT 1st resource */ + struct resource app; +}; + +/* Keystone DW specific MSI controller APIs/definitions */ +void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); +phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); + +/* Keystone specific PCI controller APIs */ +void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); +void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); +irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); +int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, + struct device_node *msi_intc_np); +int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); +int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); +void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp); +void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); +void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); +void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); +int ks_dw_pcie_msi_host_init(struct pcie_port *pp); +int ks_dw_pcie_link_up(struct dw_pcie *pci); diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c new file mode 100644 index 000000000000..3724d3ef7008 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Freescale Layerscape SoCs + * + * Copyright (C) 2014 Freescale Semiconductor. + * + * Author: Minghuan Lian + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +/* PEX1/2 Misc Ports Status Register */ +#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) +#define LTSSM_STATE_SHIFT 20 +#define LTSSM_STATE_MASK 0x3f +#define LTSSM_PCIE_L0 0x11 /* L0 state */ + +/* PEX Internal Configuration Registers */ +#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ +#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ +#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ + +#define PCIE_IATU_NUM 6 + +struct ls_pcie_drvdata { + u32 lut_offset; + u32 ltssm_shift; + u32 lut_dbg; + const struct dw_pcie_host_ops *ops; + const struct dw_pcie_ops *dw_pcie_ops; +}; + +struct ls_pcie { + struct dw_pcie *pci; + void __iomem *lut; + struct regmap *scfg; + const struct ls_pcie_drvdata *drvdata; + int index; +}; + +#define to_ls_pcie(x) dev_get_drvdata((x)->dev) + +static bool ls_pcie_is_bridge(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u32 header_type; + + header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE); + header_type &= 0x7f; + + return header_type == PCI_HEADER_TYPE_BRIDGE; +} + +/* Clear multi-function bit */ +static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); +} + +/* Drop MSG TLP except for Vendor MSG */ +static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) +{ + u32 val; + struct dw_pcie *pci = pcie->pci; + + val = ioread32(pci->dbi_base + PCIE_STRFMR1); + val &= 0xDFFFFFFF; + iowrite32(val, pci->dbi_base + PCIE_STRFMR1); +} + +static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) +{ + int i; + + for (i = 0; i < PCIE_IATU_NUM; i++) + dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); +} + +static int ls1021_pcie_link_up(struct dw_pcie *pci) +{ + u32 state; + struct ls_pcie *pcie = to_ls_pcie(pci); + + if (!pcie->scfg) + return 0; + + regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); + state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +static int ls_pcie_link_up(struct dw_pcie *pci) +{ + struct ls_pcie *pcie = to_ls_pcie(pci); + u32 state; + + state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> + pcie->drvdata->ltssm_shift) & + LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +/* Forward error response of outbound non-posted requests */ +static void ls_pcie_fix_error_response(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); +} + +static int ls_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + + /* + * Disable outbound windows configured by the bootloader to avoid + * one transaction hitting multiple outbound windows. + * dw_pcie_setup_rc() will reconfigure the outbound windows. + */ + ls_pcie_disable_outbound_atus(pcie); + ls_pcie_fix_error_response(pcie); + + dw_pcie_dbi_ro_wr_en(pci); + ls_pcie_clear_multifunction(pcie); + dw_pcie_dbi_ro_wr_dis(pci); + + ls_pcie_drop_msg_tlp(pcie); + + dw_pcie_setup_rc(pp); + + return 0; +} + +static int ls1021_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct ls_pcie *pcie = to_ls_pcie(pci); + struct device *dev = pci->dev; + u32 index[2]; + int ret; + + pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, + "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + ret = PTR_ERR(pcie->scfg); + dev_err(dev, "No syscfg phandle specified\n"); + pcie->scfg = NULL; + return ret; + } + + if (of_property_read_u32_array(dev->of_node, + "fsl,pcie-scfg", index, 2)) { + pcie->scfg = NULL; + return -EINVAL; + } + pcie->index = index[1]; + + return ls_pcie_host_init(pp); +} + +static int ls_pcie_msi_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + struct device_node *msi_node; + + /* + * The MSI domain is set by the generic of_msi_configure(). This + * .msi_host_init() function keeps us from doing the default MSI + * domain setup in dw_pcie_host_init() and also enforces the + * requirement that "msi-parent" exists. + */ + msi_node = of_parse_phandle(np, "msi-parent", 0); + if (!msi_node) { + dev_err(dev, "failed to find msi-parent\n"); + return -EINVAL; + } + + return 0; +} + +static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { + .host_init = ls1021_pcie_host_init, + .msi_host_init = ls_pcie_msi_host_init, +}; + +static const struct dw_pcie_host_ops ls_pcie_host_ops = { + .host_init = ls_pcie_host_init, + .msi_host_init = ls_pcie_msi_host_init, +}; + +static const struct dw_pcie_ops dw_ls1021_pcie_ops = { + .link_up = ls1021_pcie_link_up, +}; + +static const struct dw_pcie_ops dw_ls_pcie_ops = { + .link_up = ls_pcie_link_up, +}; + +static struct ls_pcie_drvdata ls1021_drvdata = { + .ops = &ls1021_pcie_host_ops, + .dw_pcie_ops = &dw_ls1021_pcie_ops, +}; + +static struct ls_pcie_drvdata ls1043_drvdata = { + .lut_offset = 0x10000, + .ltssm_shift = 24, + .lut_dbg = 0x7fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static struct ls_pcie_drvdata ls1046_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 24, + .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static struct ls_pcie_drvdata ls2080_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, + .lut_dbg = 0x7fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static struct ls_pcie_drvdata ls2088_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, + .lut_dbg = 0x407fc, + .ops = &ls_pcie_host_ops, + .dw_pcie_ops = &dw_ls_pcie_ops, +}; + +static const struct of_device_id ls_pcie_of_match[] = { + { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata }, + { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, + { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, + { }, +}; + +static int __init ls_add_pcie_port(struct ls_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + int ret; + + pp->ops = pcie->drvdata->ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int __init ls_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct ls_pcie *pcie; + struct resource *dbi_base; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pcie->drvdata = of_device_get_match_data(dev); + + pci->dev = dev; + pci->ops = pcie->drvdata->dw_pcie_ops; + + pcie->pci = pci; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset; + + if (!ls_pcie_is_bridge(pcie)) + return -ENODEV; + + platform_set_drvdata(pdev, pcie); + + ret = ls_add_pcie_port(pcie); + if (ret < 0) + return ret; + + return 0; +} + +static struct platform_driver ls_pcie_driver = { + .driver = { + .name = "layerscape-pcie", + .of_match_table = ls_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c new file mode 100644 index 000000000000..072fd7ecc29f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Marvell Armada-8K SoCs + * + * Armada-8K PCIe Glue Layer Source Code + * + * Copyright (C) 2016 Marvell Technology Group Ltd. + * + * Author: Yehuda Yitshak + * Author: Shadi Ammouri + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +struct armada8k_pcie { + struct dw_pcie *pci; + struct clk *clk; + struct clk *clk_reg; +}; + +#define PCIE_VENDOR_REGS_OFFSET 0x8000 + +#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) +#define PCIE_APP_LTSSM_EN BIT(2) +#define PCIE_DEVICE_TYPE_SHIFT 4 +#define PCIE_DEVICE_TYPE_MASK 0xF +#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ + +#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) +#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) +#define PCIE_GLB_STS_PHY_LINK_UP BIT(9) + +#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) +#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) +#define PCIE_INT_A_ASSERT_MASK BIT(9) +#define PCIE_INT_B_ASSERT_MASK BIT(10) +#define PCIE_INT_C_ASSERT_MASK BIT(11) +#define PCIE_INT_D_ASSERT_MASK BIT(12) + +#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) +#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) +#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) +#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) +/* + * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write + * allocate + */ +#define ARCACHE_DEFAULT_VALUE 0x3511 +#define AWCACHE_DEFAULT_VALUE 0x5311 + +#define DOMAIN_OUTER_SHAREABLE 0x2 +#define AX_USER_DOMAIN_MASK 0x3 +#define AX_USER_DOMAIN_SHIFT 4 + +#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) + +static int armada8k_pcie_link_up(struct dw_pcie *pci) +{ + u32 reg; + u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; + + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); + + if ((reg & mask) == mask) + return 1; + + dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); + return 0; +} + +static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u32 reg; + + if (!dw_pcie_link_up(pci)) { + /* Disable LTSSM state machine to enable configuration */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); + reg &= ~(PCIE_APP_LTSSM_EN); + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); + } + + /* Set the device to root complex mode */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); + reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); + reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); + + /* Set the PCIe master AxCache attributes */ + dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); + dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); + + /* Set the PCIe master AxDomain attributes */ + reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); + reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); + reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; + dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); + + reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); + reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); + reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; + dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); + + /* Enable INT A-D interrupts */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); + reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | + PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); + + if (!dw_pcie_link_up(pci)) { + /* Configuration done. Start LTSSM */ + reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); + reg |= PCIE_APP_LTSSM_EN; + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); + } + + /* Wait until the link becomes active again */ + if (dw_pcie_wait_for_link(pci)) + dev_err(pci->dev, "Link not up after reconfiguration\n"); +} + +static int armada8k_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct armada8k_pcie *pcie = to_armada8k_pcie(pci); + + dw_pcie_setup_rc(pp); + armada8k_pcie_establish_link(pcie); + + return 0; +} + +static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) +{ + struct armada8k_pcie *pcie = arg; + struct dw_pcie *pci = pcie->pci; + u32 val; + + /* + * Interrupts are directly handled by the device driver of the + * PCI device. However, they are also latched into the PCIe + * controller, so we simply discard them. + */ + val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); + dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); + + return IRQ_HANDLED; +} + +static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { + .host_init = armada8k_pcie_host_init, +}; + +static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->root_bus_nr = -1; + pp->ops = &armada8k_pcie_host_ops; + + pp->irq = platform_get_irq(pdev, 0); + if (pp->irq < 0) { + dev_err(dev, "failed to get irq for port\n"); + return pp->irq; + } + + ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, + IRQF_SHARED, "armada8k-pcie", pcie); + if (ret) { + dev_err(dev, "failed to request irq %d\n", pp->irq); + return ret; + } + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = armada8k_pcie_link_up, +}; + +static int armada8k_pcie_probe(struct platform_device *pdev) +{ + struct dw_pcie *pci; + struct armada8k_pcie *pcie; + struct device *dev = &pdev->dev; + struct resource *base; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + pcie->pci = pci; + + pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pcie->clk)) + return PTR_ERR(pcie->clk); + + ret = clk_prepare_enable(pcie->clk); + if (ret) + return ret; + + pcie->clk_reg = devm_clk_get(dev, "reg"); + if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { + ret = -EPROBE_DEFER; + goto fail; + } + if (!IS_ERR(pcie->clk_reg)) { + ret = clk_prepare_enable(pcie->clk_reg); + if (ret) + goto fail_clkreg; + } + + /* Get the dw-pcie unit configuration/control registers base. */ + base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "couldn't remap regs base %p\n", base); + ret = PTR_ERR(pci->dbi_base); + goto fail_clkreg; + } + + platform_set_drvdata(pdev, pcie); + + ret = armada8k_add_pcie_port(pcie, pdev); + if (ret) + goto fail_clkreg; + + return 0; + +fail_clkreg: + clk_disable_unprepare(pcie->clk_reg); +fail: + clk_disable_unprepare(pcie->clk); + + return ret; +} + +static const struct of_device_id armada8k_pcie_of_match[] = { + { .compatible = "marvell,armada8k-pcie", }, + {}, +}; + +static struct platform_driver armada8k_pcie_driver = { + .probe = armada8k_pcie_probe, + .driver = { + .name = "armada8k-pcie", + .of_match_table = of_match_ptr(armada8k_pcie_of_match), + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(armada8k_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c new file mode 100644 index 000000000000..321b56cfd5d0 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -0,0 +1,618 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Axis ARTPEC-6 SoC + * + * Author: Niklas Cassel + * + * Based on work done by Phil Edworthy + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) + +enum artpec_pcie_variants { + ARTPEC6, + ARTPEC7, +}; + +struct artpec6_pcie { + struct dw_pcie *pci; + struct regmap *regmap; /* DT axis,syscon-pcie */ + void __iomem *phy_base; /* DT phy */ + enum artpec_pcie_variants variant; + enum dw_pcie_device_mode mode; +}; + +struct artpec_pcie_of_data { + enum artpec_pcie_variants variant; + enum dw_pcie_device_mode mode; +}; + +static const struct of_device_id artpec6_pcie_of_match[]; + +/* PCIe Port Logic registers (memory-mapped) */ +#define PL_OFFSET 0x700 + +#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc) +#define ACK_N_FTS_MASK GENMASK(15, 8) +#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK) + +#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0) +#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK) + +/* ARTPEC-6 specific registers */ +#define PCIECFG 0x18 +#define PCIECFG_DBG_OEN BIT(24) +#define PCIECFG_CORE_RESET_REQ BIT(21) +#define PCIECFG_LTSSM_ENABLE BIT(20) +#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16) +#define PCIECFG_CLKREQ_B BIT(11) +#define PCIECFG_REFCLK_ENABLE BIT(10) +#define PCIECFG_PLL_ENABLE BIT(9) +#define PCIECFG_PCLK_ENABLE BIT(8) +#define PCIECFG_RISRCREN BIT(4) +#define PCIECFG_MODE_TX_DRV_EN BIT(3) +#define PCIECFG_CISRREN BIT(2) +#define PCIECFG_MACRO_ENABLE BIT(0) +/* ARTPEC-7 specific fields */ +#define PCIECFG_REFCLKSEL BIT(23) +#define PCIECFG_NOC_RESET BIT(3) + +#define PCIESTAT 0x1c +/* ARTPEC-7 specific fields */ +#define PCIESTAT_EXTREFCLK BIT(3) + +#define NOCCFG 0x40 +#define NOCCFG_ENABLE_CLK_PCIE BIT(4) +#define NOCCFG_POWER_PCIE_IDLEACK BIT(3) +#define NOCCFG_POWER_PCIE_IDLE BIT(2) +#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1) + +#define PHY_STATUS 0x118 +#define PHY_COSPLLLOCK BIT(0) + +#define PHY_TX_ASIC_OUT 0x4040 +#define PHY_TX_ASIC_OUT_TX_ACK BIT(0) + +#define PHY_RX_ASIC_OUT 0x405c +#define PHY_RX_ASIC_OUT_ACK BIT(0) + +static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) +{ + u32 val; + + regmap_read(artpec6_pcie->regmap, offset, &val); + return val; +} + +static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) +{ + regmap_write(artpec6_pcie->regmap, offset, val); +} + +static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + struct pcie_port *pp = &pci->pp; + struct dw_pcie_ep *ep = &pci->ep; + + switch (artpec6_pcie->mode) { + case DW_PCIE_RC_TYPE: + return pci_addr - pp->cfg0_base; + case DW_PCIE_EP_TYPE: + return pci_addr - ep->phys_base; + default: + dev_err(pci->dev, "UNKNOWN device type\n"); + } + return pci_addr; +} + +static int artpec6_pcie_establish_link(struct dw_pcie *pci) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_LTSSM_ENABLE; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + + return 0; +} + +static void artpec6_pcie_stop_link(struct dw_pcie *pci) +{ + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val &= ~PCIECFG_LTSSM_ENABLE; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, + .start_link = artpec6_pcie_establish_link, + .stop_link = artpec6_pcie_stop_link, +}; + +static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct device *dev = pci->dev; + u32 val; + unsigned int retries; + + retries = 50; + do { + usleep_range(1000, 2000); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + retries--; + } while (retries && + (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); + if (!retries) + dev_err(dev, "PCIe clock manager did not leave idle state\n"); + + retries = 50; + do { + usleep_range(1000, 2000); + val = readl(artpec6_pcie->phy_base + PHY_STATUS); + retries--; + } while (retries && !(val & PHY_COSPLLLOCK)); + if (!retries) + dev_err(dev, "PHY PLL did not lock\n"); +} + +static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct device *dev = pci->dev; + u32 val; + u16 phy_status_tx, phy_status_rx; + unsigned int retries; + + retries = 50; + do { + usleep_range(1000, 2000); + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + retries--; + } while (retries && + (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); + if (!retries) + dev_err(dev, "PCIe clock manager did not leave idle state\n"); + + retries = 50; + do { + usleep_range(1000, 2000); + phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT); + phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT); + retries--; + } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) || + (phy_status_rx & PHY_RX_ASIC_OUT_ACK))); + if (!retries) + dev_err(dev, "PHY did not enter Pn state\n"); +} + +static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie) +{ + switch (artpec6_pcie->variant) { + case ARTPEC6: + artpec6_pcie_wait_for_phy_a6(artpec6_pcie); + break; + case ARTPEC7: + artpec6_pcie_wait_for_phy_a7(artpec6_pcie); + break; + } +} + +static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie) +{ + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ + PCIECFG_MODE_TX_DRV_EN | + PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ + PCIECFG_MACRO_ENABLE; + val |= PCIECFG_REFCLK_ENABLE; + val &= ~PCIECFG_DBG_OEN; + val &= ~PCIECFG_CLKREQ_B; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(5000, 6000); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val |= NOCCFG_ENABLE_CLK_PCIE; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); + usleep_range(20, 30); + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(6000, 7000); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val &= ~NOCCFG_POWER_PCIE_IDLEREQ; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); +} + +static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + u32 val; + bool extrefclk; + + /* Check if external reference clock is connected */ + val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT); + extrefclk = !!(val & PCIESTAT_EXTREFCLK); + dev_dbg(pci->dev, "Using reference clock: %s\n", + extrefclk ? "external" : "internal"); + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ + PCIECFG_PCLK_ENABLE; + if (extrefclk) + val |= PCIECFG_REFCLKSEL; + else + val &= ~PCIECFG_REFCLKSEL; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(10, 20); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val |= NOCCFG_ENABLE_CLK_PCIE; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); + usleep_range(20, 30); + + val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); + val &= ~NOCCFG_POWER_PCIE_IDLEREQ; + artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); +} + +static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie) +{ + switch (artpec6_pcie->variant) { + case ARTPEC6: + artpec6_pcie_init_phy_a6(artpec6_pcie); + break; + case ARTPEC7: + artpec6_pcie_init_phy_a7(artpec6_pcie); + break; + } +} + +static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + u32 val; + + if (artpec6_pcie->variant != ARTPEC7) + return; + + /* + * Increase the N_FTS (Number of Fast Training Sequences) + * to be transmitted when transitioning from L0s to L0. + */ + val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF); + val &= ~ACK_N_FTS_MASK; + val |= ACK_N_FTS(180); + dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val); + + /* + * Set the Number of Fast Training Sequences that the core + * advertises as its N_FTS during Gen2 or Gen3 link training. + */ + val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + val &= ~FAST_TRAINING_SEQ_MASK; + val |= FAST_TRAINING_SEQ(180); + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); +} + +static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie) +{ + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + switch (artpec6_pcie->variant) { + case ARTPEC6: + val |= PCIECFG_CORE_RESET_REQ; + break; + case ARTPEC7: + val &= ~PCIECFG_NOC_RESET; + break; + } + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); +} + +static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) +{ + u32 val; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + switch (artpec6_pcie->variant) { + case ARTPEC6: + val &= ~PCIECFG_CORE_RESET_REQ; + break; + case ARTPEC7: + val |= PCIECFG_NOC_RESET; + break; + } + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + usleep_range(100, 200); +} + +static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct pcie_port *pp = &pci->pp; + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); +} + +static int artpec6_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + + artpec6_pcie_assert_core_reset(artpec6_pcie); + artpec6_pcie_init_phy(artpec6_pcie); + artpec6_pcie_deassert_core_reset(artpec6_pcie); + artpec6_pcie_wait_for_phy(artpec6_pcie); + artpec6_pcie_set_nfts(artpec6_pcie); + dw_pcie_setup_rc(pp); + artpec6_pcie_establish_link(pci); + dw_pcie_wait_for_link(pci); + artpec6_pcie_enable_interrupts(artpec6_pcie); + + return 0; +} + +static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { + .host_init = artpec6_pcie_host_init, +}; + +static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = artpec6_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + int ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) { + dev_err(dev, "failed to get MSI irq\n"); + return pp->msi_irq; + } + } + + pp->root_bus_nr = -1; + pp->ops = &artpec6_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); + enum pci_barno bar; + + artpec6_pcie_assert_core_reset(artpec6_pcie); + artpec6_pcie_init_phy(artpec6_pcie); + artpec6_pcie_deassert_core_reset(artpec6_pcie); + artpec6_pcie_wait_for_phy(artpec6_pcie); + artpec6_pcie_set_nfts(artpec6_pcie); + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); +} + +static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u8 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); + return -EINVAL; + case PCI_EPC_IRQ_MSI: + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = artpec6_pcie_ep_init, + .raise_irq = artpec6_pcie_raise_irq, +}; + +static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = artpec6_pcie->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); + pci->dbi_base2 = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + +static int artpec6_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct artpec6_pcie *artpec6_pcie; + struct resource *dbi_base; + struct resource *phy_base; + int ret; + const struct of_device_id *match; + const struct artpec_pcie_of_data *data; + enum artpec_pcie_variants variant; + enum dw_pcie_device_mode mode; + + match = of_match_device(artpec6_pcie_of_match, dev); + if (!match) + return -EINVAL; + + data = (struct artpec_pcie_of_data *)match->data; + variant = (enum artpec_pcie_variants)data->variant; + mode = (enum dw_pcie_device_mode)data->mode; + + artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); + if (!artpec6_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + artpec6_pcie->pci = pci; + artpec6_pcie->variant = variant; + artpec6_pcie->mode = mode; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); + if (IS_ERR(artpec6_pcie->phy_base)) + return PTR_ERR(artpec6_pcie->phy_base); + + artpec6_pcie->regmap = + syscon_regmap_lookup_by_phandle(dev->of_node, + "axis,syscon-pcie"); + if (IS_ERR(artpec6_pcie->regmap)) + return PTR_ERR(artpec6_pcie->regmap); + + platform_set_drvdata(pdev, artpec6_pcie); + + switch (artpec6_pcie->mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST)) + return -ENODEV; + + ret = artpec6_add_pcie_port(artpec6_pcie, pdev); + if (ret < 0) + return ret; + break; + case DW_PCIE_EP_TYPE: { + u32 val; + + if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP)) + return -ENODEV; + + val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); + val &= ~PCIECFG_DEVICE_TYPE_MASK; + artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); + ret = artpec6_add_pcie_ep(artpec6_pcie, pdev); + if (ret < 0) + return ret; + break; + } + default: + dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); + } + + return 0; +} + +static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = { + .variant = ARTPEC6, + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = { + .variant = ARTPEC6, + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = { + .variant = ARTPEC7, + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = { + .variant = ARTPEC7, + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id artpec6_pcie_of_match[] = { + { + .compatible = "axis,artpec6-pcie", + .data = &artpec6_pcie_rc_of_data, + }, + { + .compatible = "axis,artpec6-pcie-ep", + .data = &artpec6_pcie_ep_of_data, + }, + { + .compatible = "axis,artpec7-pcie", + .data = &artpec7_pcie_rc_of_data, + }, + { + .compatible = "axis,artpec7-pcie-ep", + .data = &artpec7_pcie_ep_of_data, + }, + {}, +}; + +static struct platform_driver artpec6_pcie_driver = { + .probe = artpec6_pcie_probe, + .driver = { + .name = "artpec6-pcie", + .of_match_table = artpec6_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(artpec6_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c new file mode 100644 index 000000000000..1eec4415a77f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Synopsys DesignWare PCIe Endpoint controller driver + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I + */ + +#include + +#include "pcie-designware.h" +#include +#include + +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_linkup(epc); +} + +static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, + int flags) +{ + u32 reg; + + reg = PCI_BASE_ADDRESS_0 + (4 * bar); + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writel_dbi2(pci, reg, 0x0); + dw_pcie_writel_dbi(pci, reg, 0x0); + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_writel_dbi2(pci, reg + 4, 0x0); + dw_pcie_writel_dbi(pci, reg + 4, 0x0); + } + dw_pcie_dbi_ro_wr_dis(pci); +} + +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) +{ + __dw_pcie_ep_reset_bar(pci, bar, 0); +} + +static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, + struct pci_epf_header *hdr) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); + dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); + dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); + dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); + dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, + hdr->subclass_code | hdr->baseclass_code << 8); + dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, + hdr->cache_line_size); + dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, + hdr->subsys_vendor_id); + dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); + dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, + hdr->interrupt_pin); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, + dma_addr_t cpu_addr, + enum dw_pcie_as_type as_type) +{ + int ret; + u32 free_win; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); + if (free_win >= ep->num_ib_windows) { + dev_err(pci->dev, "No free inbound window\n"); + return -EINVAL; + } + + ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, + as_type); + if (ret < 0) { + dev_err(pci->dev, "Failed to program IB window\n"); + return ret; + } + + ep->bar_to_atu[bar] = free_win; + set_bit(free_win, ep->ib_window_map); + + return 0; +} + +static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, + u64 pci_addr, size_t size) +{ + u32 free_win; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); + if (free_win >= ep->num_ob_windows) { + dev_err(pci->dev, "No free outbound window\n"); + return -EINVAL; + } + + dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, + phys_addr, pci_addr, size); + + set_bit(free_win, ep->ob_window_map); + ep->outbound_addr[free_win] = phys_addr; + + return 0; +} + +static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + u32 atu_index = ep->bar_to_atu[bar]; + + __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); + + dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); + clear_bit(atu_index, ep->ib_window_map); +} + +static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + int ret; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + enum dw_pcie_as_type as_type; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + + if (!(flags & PCI_BASE_ADDRESS_SPACE)) + as_type = DW_PCIE_AS_MEM; + else + as_type = DW_PCIE_AS_IO; + + ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); + if (ret) + return ret; + + dw_pcie_dbi_ro_wr_en(pci); + + dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); + dw_pcie_writel_dbi(pci, reg, flags); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); + dw_pcie_writel_dbi(pci, reg + 4, 0); + } + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, + u32 *atu_index) +{ + u32 index; + + for (index = 0; index < ep->num_ob_windows; index++) { + if (ep->outbound_addr[index] != addr) + continue; + *atu_index = index; + return 0; + } + + return -EINVAL; +} + +static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t addr) +{ + int ret; + u32 atu_index; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + ret = dw_pcie_find_index(ep, addr, &atu_index); + if (ret < 0) + return; + + dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); + clear_bit(atu_index, ep->ob_window_map); +} + +static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t addr, + u64 pci_addr, size_t size) +{ + int ret; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); + if (ret) { + dev_err(pci->dev, "Failed to enable address\n"); + return ret; + } + + return 0; +} + +static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) +{ + int val; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); + if (!(val & MSI_CAP_MSI_EN_MASK)) + return -EINVAL; + + val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; + return val; +} + +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int) +{ + int val; + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); + val &= ~MSI_CAP_MMC_MASK; + val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK; + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, + enum pci_epc_irq_type type, u8 interrupt_num) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + + if (!ep->ops->raise_irq) + return -EINVAL; + + return ep->ops->raise_irq(ep, func_no, type, interrupt_num); +} + +static void dw_pcie_ep_stop(struct pci_epc *epc) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + if (!pci->ops->stop_link) + return; + + pci->ops->stop_link(pci); +} + +static int dw_pcie_ep_start(struct pci_epc *epc) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + if (!pci->ops->start_link) + return -EINVAL; + + return pci->ops->start_link(pci); +} + +static const struct pci_epc_ops epc_ops = { + .write_header = dw_pcie_ep_write_header, + .set_bar = dw_pcie_ep_set_bar, + .clear_bar = dw_pcie_ep_clear_bar, + .map_addr = dw_pcie_ep_map_addr, + .unmap_addr = dw_pcie_ep_unmap_addr, + .set_msi = dw_pcie_ep_set_msi, + .get_msi = dw_pcie_ep_get_msi, + .raise_irq = dw_pcie_ep_raise_irq, + .start = dw_pcie_ep_start, + .stop = dw_pcie_ep_stop, +}; + +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, + u8 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epc *epc = ep->epc; + u16 msg_ctrl, msg_data; + u32 msg_addr_lower, msg_addr_upper; + u64 msg_addr; + bool has_upper; + int ret; + + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ + msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); + msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); + if (has_upper) { + msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); + msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64); + } else { + msg_addr_upper = 0; + msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32); + } + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + epc->mem->page_size); + if (ret) + return ret; + + writel(msg_data | (interrupt_num - 1), ep->msi_mem); + + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); + + return 0; +} + +void dw_pcie_ep_exit(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, + epc->mem->page_size); + + pci_epc_mem_exit(epc); +} + +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + int ret; + void *addr; + struct pci_epc *epc; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + + if (!pci->dbi_base || !pci->dbi_base2) { + dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); + if (ret < 0) { + dev_err(dev, "Unable to read *num-ib-windows* property\n"); + return ret; + } + if (ep->num_ib_windows > MAX_IATU_IN) { + dev_err(dev, "Invalid *num-ib-windows*\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); + if (ret < 0) { + dev_err(dev, "Unable to read *num-ob-windows* property\n"); + return ret; + } + if (ep->num_ob_windows > MAX_IATU_OUT) { + dev_err(dev, "Invalid *num-ob-windows*\n"); + return -EINVAL; + } + + ep->ib_window_map = devm_kzalloc(dev, sizeof(long) * + BITS_TO_LONGS(ep->num_ib_windows), + GFP_KERNEL); + if (!ep->ib_window_map) + return -ENOMEM; + + ep->ob_window_map = devm_kzalloc(dev, sizeof(long) * + BITS_TO_LONGS(ep->num_ob_windows), + GFP_KERNEL); + if (!ep->ob_window_map) + return -ENOMEM; + + addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, + GFP_KERNEL); + if (!addr) + return -ENOMEM; + ep->outbound_addr = addr; + + if (ep->ops->ep_init) + ep->ops->ep_init(ep); + + epc = devm_pci_epc_create(dev, &epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "Failed to create epc device\n"); + return PTR_ERR(epc); + } + + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); + if (ret < 0) + epc->max_functions = 1; + + ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, + ep->page_size); + if (ret < 0) { + dev_err(dev, "Failed to initialize address space\n"); + return ret; + } + + ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, + epc->mem->page_size); + if (!ep->msi_mem) { + dev_err(dev, "Failed to reserve memory for MSI\n"); + return -ENOMEM; + } + + epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER; + EPC_FEATURE_SET_BAR(epc->features, BAR_0); + + ep->epc = epc; + epc_set_drvdata(epc, ep); + dw_pcie_setup(pci); + + return 0; +} diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c new file mode 100644 index 000000000000..781aa03aeede --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -0,0 +1,722 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han + */ + +#include +#include +#include +#include +#include +#include + +#include "../../pci.h" +#include "pcie-designware.h" + +static struct pci_ops dw_pcie_ops; + +static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci; + + if (pp->ops->rd_own_conf) + return pp->ops->rd_own_conf(pp, where, size, val); + + pci = to_dw_pcie_from_pp(pp); + return dw_pcie_read(pci->dbi_base + where, size, val); +} + +static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + u32 val) +{ + struct dw_pcie *pci; + + if (pp->ops->wr_own_conf) + return pp->ops->wr_own_conf(pp, where, size, val); + + pci = to_dw_pcie_from_pp(pp); + return dw_pcie_write(pci->dbi_base + where, size, val); +} + +static void dw_msi_ack_irq(struct irq_data *d) +{ + irq_chip_ack_parent(d); +} + +static void dw_msi_mask_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void dw_msi_unmask_irq(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip dw_pcie_msi_irq_chip = { + .name = "PCI-MSI", + .irq_ack = dw_msi_ack_irq, + .irq_mask = dw_msi_mask_irq, + .irq_unmask = dw_msi_unmask_irq, +}; + +static struct msi_domain_info dw_pcie_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), + .chip = &dw_pcie_msi_irq_chip, +}; + +/* MSI int handler */ +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +{ + int i, pos, irq; + u32 val, num_ctrls; + irqreturn_t ret = IRQ_NONE; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + for (i = 0; i < num_ctrls; i++) { + dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + + (i * MSI_REG_CTRL_BLOCK_SIZE), + 4, &val); + if (!val) + continue; + + ret = IRQ_HANDLED; + pos = 0; + while ((pos = find_next_bit((unsigned long *) &val, + MAX_MSI_IRQS_PER_CTRL, + pos)) != MAX_MSI_IRQS_PER_CTRL) { + irq = irq_find_mapping(pp->irq_domain, + (i * MAX_MSI_IRQS_PER_CTRL) + + pos); + generic_handle_irq(irq); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + + (i * MSI_REG_CTRL_BLOCK_SIZE), + 4, 1 << pos); + pos++; + } + } + + return ret; +} + +/* Chained MSI interrupt service routine */ +static void dw_chained_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct pcie_port *pp; + + chained_irq_enter(chip, desc); + + pp = irq_desc_get_handler_data(desc); + dw_handle_msi_irq(pp); + + chained_irq_exit(chip, desc); +} + +static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u64 msi_target; + + if (pp->ops->get_msi_addr) + msi_target = pp->ops->get_msi_addr(pp); + else + msi_target = (u64)pp->msi_data; + + msg->address_lo = lower_32_bits(msi_target); + msg->address_hi = upper_32_bits(msi_target); + + if (pp->ops->get_msi_data) + msg->data = pp->ops->get_msi_data(pp, data->hwirq); + else + msg->data = data->hwirq; + + dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int dw_pci_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static void dw_pci_bottom_mask(struct irq_data *data) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + if (pp->ops->msi_clear_irq) { + pp->ops->msi_clear_irq(pp, data->hwirq); + } else { + ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_status[ctrl] &= ~(1 << bit); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, + pp->irq_status[ctrl]); + } + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dw_pci_bottom_unmask(struct irq_data *data) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + if (pp->ops->msi_set_irq) { + pp->ops->msi_set_irq(pp, data->hwirq); + } else { + ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_status[ctrl] |= 1 << bit; + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, + pp->irq_status[ctrl]); + } + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dw_pci_bottom_ack(struct irq_data *d) +{ + struct msi_desc *msi = irq_data_get_msi_desc(d); + struct pcie_port *pp; + + pp = msi_desc_to_pci_sysdata(msi); + + if (pp->ops->msi_irq_ack) + pp->ops->msi_irq_ack(d->hwirq, pp); +} + +static struct irq_chip dw_pci_msi_bottom_irq_chip = { + .name = "DWPCI-MSI", + .irq_ack = dw_pci_bottom_ack, + .irq_compose_msi_msg = dw_pci_setup_msi_msg, + .irq_set_affinity = dw_pci_msi_set_affinity, + .irq_mask = dw_pci_bottom_mask, + .irq_unmask = dw_pci_bottom_unmask, +}; + +static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct pcie_port *pp = domain->host_data; + unsigned long flags; + u32 i; + int bit; + + raw_spin_lock_irqsave(&pp->lock, flags); + + bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, + order_base_2(nr_irqs)); + + raw_spin_unlock_irqrestore(&pp->lock, flags); + + if (bit < 0) + return -ENOSPC; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, bit + i, + &dw_pci_msi_bottom_irq_chip, + pp, handle_edge_irq, + NULL, NULL); + + return 0; +} + +static void dw_pcie_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + struct pcie_port *pp = irq_data_get_irq_chip_data(data); + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + bitmap_release_region(pp->msi_irq_in_use, data->hwirq, + order_base_2(nr_irqs)); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static const struct irq_domain_ops dw_pcie_msi_domain_ops = { + .alloc = dw_pcie_irq_domain_alloc, + .free = dw_pcie_irq_domain_free, +}; + +int dw_pcie_allocate_domains(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); + + pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, + &dw_pcie_msi_domain_ops, pp); + if (!pp->irq_domain) { + dev_err(pci->dev, "Failed to create IRQ domain\n"); + return -ENOMEM; + } + + pp->msi_domain = pci_msi_create_irq_domain(fwnode, + &dw_pcie_msi_domain_info, + pp->irq_domain); + if (!pp->msi_domain) { + dev_err(pci->dev, "Failed to create MSI domain\n"); + irq_domain_remove(pp->irq_domain); + return -ENOMEM; + } + + return 0; +} + +void dw_pcie_free_msi(struct pcie_port *pp) +{ + irq_set_chained_handler(pp->msi_irq, NULL); + irq_set_handler_data(pp->msi_irq, NULL); + + irq_domain_remove(pp->msi_domain); + irq_domain_remove(pp->irq_domain); +} + +void dw_pcie_msi_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct page *page; + u64 msi_target; + + page = alloc_page(GFP_KERNEL); + pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, pp->msi_data)) { + dev_err(dev, "Failed to map MSI data\n"); + __free_page(page); + return; + } + msi_target = (u64)pp->msi_data; + + /* Program the msi_data */ + dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, + lower_32_bits(msi_target)); + dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, + upper_32_bits(msi_target)); +} + +int dw_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + struct platform_device *pdev = to_platform_device(dev); + struct resource_entry *win, *tmp; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + struct resource *cfg_res; + int ret; + + raw_spin_lock_init(&pci->pp.lock); + + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (cfg_res) { + pp->cfg0_size = resource_size(cfg_res) >> 1; + pp->cfg1_size = resource_size(cfg_res) >> 1; + pp->cfg0_base = cfg_res->start; + pp->cfg1_base = cfg_res->start + pp->cfg0_size; + } else if (!pp->va_cfg0_base) { + dev_err(dev, "Missing *config* reg space\n"); + } + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return -ENOMEM; + + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &bridge->windows, &pp->io_base); + if (ret) + return ret; + + ret = devm_request_pci_bus_resources(dev, &bridge->windows); + if (ret) + goto error; + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + ret = pci_remap_iospace(win->res, pp->io_base); + if (ret) { + dev_warn(dev, "Error %d: failed to map resource %pR\n", + ret, win->res); + resource_list_destroy_entry(win); + } else { + pp->io = win->res; + pp->io->name = "I/O"; + pp->io_size = resource_size(pp->io); + pp->io_bus_addr = pp->io->start - win->offset; + } + break; + case IORESOURCE_MEM: + pp->mem = win->res; + pp->mem->name = "MEM"; + pp->mem_size = resource_size(pp->mem); + pp->mem_bus_addr = pp->mem->start - win->offset; + break; + case 0: + pp->cfg = win->res; + pp->cfg0_size = resource_size(pp->cfg) >> 1; + pp->cfg1_size = resource_size(pp->cfg) >> 1; + pp->cfg0_base = pp->cfg->start; + pp->cfg1_base = pp->cfg->start + pp->cfg0_size; + break; + case IORESOURCE_BUS: + pp->busn = win->res; + break; + } + } + + if (!pci->dbi_base) { + pci->dbi_base = devm_pci_remap_cfgspace(dev, + pp->cfg->start, + resource_size(pp->cfg)); + if (!pci->dbi_base) { + dev_err(dev, "Error with ioremap\n"); + ret = -ENOMEM; + goto error; + } + } + + pp->mem_base = pp->mem->start; + + if (!pp->va_cfg0_base) { + pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, + pp->cfg0_base, pp->cfg0_size); + if (!pp->va_cfg0_base) { + dev_err(dev, "Error with ioremap in function\n"); + ret = -ENOMEM; + goto error; + } + } + + if (!pp->va_cfg1_base) { + pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, + pp->cfg1_base, + pp->cfg1_size); + if (!pp->va_cfg1_base) { + dev_err(dev, "Error with ioremap\n"); + ret = -ENOMEM; + goto error; + } + } + + ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); + if (ret) + pci->num_viewport = 2; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + /* + * If a specific SoC driver needs to change the + * default number of vectors, it needs to implement + * the set_num_vectors callback. + */ + if (!pp->ops->set_num_vectors) { + pp->num_vectors = MSI_DEF_NUM_VECTORS; + } else { + pp->ops->set_num_vectors(pp); + + if (pp->num_vectors > MAX_MSI_IRQS || + pp->num_vectors == 0) { + dev_err(dev, + "Invalid number of vectors\n"); + goto error; + } + } + + if (!pp->ops->msi_host_init) { + ret = dw_pcie_allocate_domains(pp); + if (ret) + goto error; + + if (pp->msi_irq) + irq_set_chained_handler_and_data(pp->msi_irq, + dw_chained_msi_isr, + pp); + } else { + ret = pp->ops->msi_host_init(pp); + if (ret < 0) + goto error; + } + } + + if (pp->ops->host_init) { + ret = pp->ops->host_init(pp); + if (ret) + goto error; + } + + pp->root_bus_nr = pp->busn->start; + + bridge->dev.parent = dev; + bridge->sysdata = pp; + bridge->busnr = pp->root_bus_nr; + bridge->ops = &dw_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) + goto error; + + bus = bridge->bus; + + if (pp->ops->scan_bus) + pp->ops->scan_bus(pp); + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + return 0; + +error: + pci_free_host_bridge(bridge); + return ret; +} + +static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) +{ + int ret, type; + u32 busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + if (pp->ops->rd_other_conf) + return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + + if (bus->parent->number == pp->root_bus_nr) { + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; + } else { + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; + } + + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_read(va_cfg_base + where, size, val); + if (pci->num_viewport <= 2) + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + + return ret; +} + +static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 val) +{ + int ret, type; + u32 busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + if (pp->ops->wr_other_conf) + return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + + if (bus->parent->number == pp->root_bus_nr) { + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; + } else { + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; + } + + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_write(va_cfg_base + where, size, val); + if (pci->num_viewport <= 2) + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + + return ret; +} + +static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, + int dev) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* If there is no link, then there is no device */ + if (bus->number != pp->root_bus_nr) { + if (!dw_pcie_link_up(pci)) + return 0; + } + + /* Access only one slot on each root port */ + if (bus->number == pp->root_bus_nr && dev > 0) + return 0; + + return 1; +} + +static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct pcie_port *pp = bus->sysdata; + + if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (bus->number == pp->root_bus_nr) + return dw_pcie_rd_own_conf(pp, where, size, val); + + return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); +} + +static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct pcie_port *pp = bus->sysdata; + + if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == pp->root_bus_nr) + return dw_pcie_wr_own_conf(pp, where, size, val); + + return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); +} + +static struct pci_ops dw_pcie_ops = { + .read = dw_pcie_rd_conf, + .write = dw_pcie_wr_conf, +}; + +static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); + if (val == 0xffffffff) + return 1; + + return 0; +} + +void dw_pcie_setup_rc(struct pcie_port *pp) +{ + u32 val, ctrl, num_ctrls; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + dw_pcie_setup(pci); + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) + dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, &pp->irq_status[ctrl]); + + /* Setup RC BARs */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); + + /* Setup interrupt pins */ + dw_pcie_dbi_ro_wr_en(pci); + val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); + val &= 0xffff00ff; + val |= 0x00000100; + dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); + dw_pcie_dbi_ro_wr_dis(pci); + + /* Setup bus numbers */ + val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); + val &= 0xff000000; + val |= 0x00ff0100; + dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); + + /* Setup command register */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + val &= 0xffff0000; + val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SERR; + dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + + /* + * If the platform provides ->rd_other_conf, it means the platform + * uses its own address translation component rather than ATU, so + * we should not program the ATU here. + */ + if (!pp->ops->rd_other_conf) { + /* Get iATU unroll support */ + pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); + dev_dbg(pci->dev, "iATU unroll: %s\n", + pci->iatu_unroll_enabled ? "enabled" : "disabled"); + + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_MEM, pp->mem_base, + pp->mem_bus_addr, pp->mem_size); + if (pci->num_viewport > 2) + dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + } + + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + + /* Enable write permission for the DBI read-only register */ + dw_pcie_dbi_ro_wr_en(pci); + /* Program correct class for RC */ + dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); + /* Better disable write permission right after the update */ + dw_pcie_dbi_ro_wr_dis(pci); + + dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); +} diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c new file mode 100644 index 000000000000..5937fed4c938 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe RC driver for Synopsys DesignWare Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +struct dw_plat_pcie { + struct dw_pcie *pci; + struct regmap *regmap; + enum dw_pcie_device_mode mode; +}; + +struct dw_plat_pcie_of_data { + enum dw_pcie_device_mode mode; +}; + +static const struct of_device_id dw_plat_pcie_of_match[]; + +static int dw_plat_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + dw_pcie_setup_rc(pp); + dw_pcie_wait_for_link(pci); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static void dw_plat_set_num_vectors(struct pcie_port *pp) +{ + pp->num_vectors = MAX_MSI_IRQS; +} + +static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { + .host_init = dw_plat_pcie_host_init, + .set_num_vectors = dw_plat_set_num_vectors, +}; + +static int dw_plat_pcie_establish_link(struct dw_pcie *pci) +{ + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .start_link = dw_plat_pcie_establish_link, +}; + +static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); +} + +static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, + u8 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); + return -EINVAL; + case PCI_EPC_IRQ_MSI: + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + } + + return 0; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = dw_plat_pcie_ep_init, + .raise_irq = dw_plat_pcie_ep_raise_irq, +}; + +static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = dw_plat_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) + return pp->irq; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq(pdev, 0); + if (pp->msi_irq < 0) + return pp->msi_irq; + } + + pp->root_bus_nr = -1; + pp->ops = &dw_plat_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "Failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, + struct platform_device *pdev) +{ + int ret; + struct dw_pcie_ep *ep; + struct resource *res; + struct device *dev = &pdev->dev; + struct dw_pcie *pci = dw_plat_pcie->pci; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); + pci->dbi_base2 = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "Failed to initialize endpoint\n"); + return ret; + } + return 0; +} + +static int dw_plat_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_plat_pcie *dw_plat_pcie; + struct dw_pcie *pci; + struct resource *res; /* Resource from DT */ + int ret; + const struct of_device_id *match; + const struct dw_plat_pcie_of_data *data; + enum dw_pcie_device_mode mode; + + match = of_match_device(dw_plat_pcie_of_match, dev); + if (!match) + return -EINVAL; + + data = (struct dw_plat_pcie_of_data *)match->data; + mode = (enum dw_pcie_device_mode)data->mode; + + dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); + if (!dw_plat_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + dw_plat_pcie->pci = pci; + dw_plat_pcie->mode = mode; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + platform_set_drvdata(pdev, dw_plat_pcie); + + switch (dw_plat_pcie->mode) { + case DW_PCIE_RC_TYPE: + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST)) + return -ENODEV; + + ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); + if (ret < 0) + return ret; + break; + case DW_PCIE_EP_TYPE: + if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) + return -ENODEV; + + ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev); + if (ret < 0) + return ret; + break; + default: + dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); + } + + return 0; +} + +static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct of_device_id dw_plat_pcie_of_match[] = { + { + .compatible = "snps,dw-pcie", + .data = &dw_plat_pcie_rc_of_data, + }, + { + .compatible = "snps,dw-pcie-ep", + .data = &dw_plat_pcie_ep_of_data, + }, + {}, +}; + +static struct platform_driver dw_plat_pcie_driver = { + .driver = { + .name = "dw-pcie", + .of_match_table = dw_plat_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = dw_plat_pcie_probe, +}; +builtin_platform_driver(dw_plat_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c new file mode 100644 index 000000000000..778c4f76a884 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han + */ + +#include +#include +#include + +#include "pcie-designware.h" + +/* PCIe Port Logic registers */ +#define PLR_OFFSET 0x700 +#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) +#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) +#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) + +int dw_pcie_read(void __iomem *addr, int size, u32 *val) +{ + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 4) { + *val = readl(addr); + } else if (size == 2) { + *val = readw(addr); + } else if (size == 1) { + *val = readb(addr); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +int dw_pcie_write(void __iomem *addr, int size, u32 val) +{ + if ((uintptr_t)addr & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + writel(val, addr); + else if (size == 2) + writew(val, addr); + else if (size == 1) + writeb(val, addr); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size) +{ + int ret; + u32 val; + + if (pci->ops->read_dbi) + return pci->ops->read_dbi(pci, base, reg, size); + + ret = dw_pcie_read(base + reg, size, &val); + if (ret) + dev_err(pci->dev, "Read DBI address failed\n"); + + return val; +} + +void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val) +{ + int ret; + + if (pci->ops->write_dbi) { + pci->ops->write_dbi(pci, base, reg, size, val); + return; + } + + ret = dw_pcie_write(base + reg, size, val); + if (ret) + dev_err(pci->dev, "Write DBI address failed\n"); +} + +static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + return dw_pcie_readl_dbi(pci, offset + reg); +} + +static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + dw_pcie_writel_dbi(pci, offset + reg, val); +} + +static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, + int type, u64 cpu_addr, + u64 pci_addr, u32 size) +{ + u32 retries, val; + + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, + type); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ob_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); +} + +void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u32 size) +{ + u32 retries, val; + + if (pci->ops->cpu_addr_fixup) + cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); + + if (pci->iatu_unroll_enabled) { + dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, + pci_addr, size); + return; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, + PCIE_ATU_REGION_OUTBOUND | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, + upper_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, + lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, + upper_32_bits(pci_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + if (val & PCIE_ATU_ENABLE) + return; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); +} + +static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) +{ + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); + + return dw_pcie_readl_dbi(pci, offset + reg); +} + +static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) +{ + u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); + + dw_pcie_writel_dbi(pci, offset + reg, val); +} + +static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, + int bar, u64 cpu_addr, + enum dw_pcie_as_type as_type) +{ + int type; + u32 retries, val; + + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(cpu_addr)); + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + switch (as_type) { + case DW_PCIE_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case DW_PCIE_AS_IO: + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_ib_unroll(pci, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); + + return -EBUSY; +} + +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type) +{ + int type; + u32 retries, val; + + if (pci->iatu_unroll_enabled) + return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, + cpu_addr, as_type); + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | + index); + dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); + dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); + + switch (as_type) { + case DW_PCIE_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case DW_PCIE_AS_IO: + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE + | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + if (val & PCIE_ATU_ENABLE) + return 0; + + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); + + return -EBUSY; +} + +void dw_pcie_disable_atu(struct dw_pcie *pci, int index, + enum dw_pcie_region_type type) +{ + int region; + + switch (type) { + case DW_PCIE_REGION_INBOUND: + region = PCIE_ATU_REGION_INBOUND; + break; + case DW_PCIE_REGION_OUTBOUND: + region = PCIE_ATU_REGION_OUTBOUND; + break; + default: + return; + } + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); +} + +int dw_pcie_wait_for_link(struct dw_pcie *pci) +{ + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (dw_pcie_link_up(pci)) { + dev_info(pci->dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + dev_err(pci->dev, "Phy link never came up\n"); + + return -ETIMEDOUT; +} + +int dw_pcie_link_up(struct dw_pcie *pci) +{ + u32 val; + + if (pci->ops->link_up) + return pci->ops->link_up(pci); + + val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); + return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && + (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); +} + +void dw_pcie_setup(struct dw_pcie *pci) +{ + int ret; + u32 val; + u32 lanes; + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + + ret = of_property_read_u32(np, "num-lanes", &lanes); + if (ret) + lanes = 0; + + /* Set the number of lanes */ + val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); + val &= ~PORT_LINK_MODE_MASK; + switch (lanes) { + case 1: + val |= PORT_LINK_MODE_1_LANES; + break; + case 2: + val |= PORT_LINK_MODE_2_LANES; + break; + case 4: + val |= PORT_LINK_MODE_4_LANES; + break; + case 8: + val |= PORT_LINK_MODE_8_LANES; + break; + default: + dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); + return; + } + dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); + + /* Set link width speed control register */ + val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + val &= ~PORT_LOGIC_LINK_WIDTH_MASK; + switch (lanes) { + case 1: + val |= PORT_LOGIC_LINK_WIDTH_1_LANES; + break; + case 2: + val |= PORT_LOGIC_LINK_WIDTH_2_LANES; + break; + case 4: + val |= PORT_LOGIC_LINK_WIDTH_4_LANES; + break; + case 8: + val |= PORT_LOGIC_LINK_WIDTH_8_LANES; + break; + } + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); +} diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h new file mode 100644 index 000000000000..bee4e2535a61 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Synopsys DesignWare PCIe host controller driver + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Jingoo Han + */ + +#ifndef _PCIE_DESIGNWARE_H +#define _PCIE_DESIGNWARE_H + +#include +#include +#include +#include + +#include +#include + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +/* Parameters for the waiting for iATU enabled routine */ +#define LINK_WAIT_MAX_IATU_RETRIES 5 +#define LINK_WAIT_IATU_MIN 9000 +#define LINK_WAIT_IATU_MAX 10000 + +/* Synopsys-specific PCIe configuration registers */ +#define PCIE_PORT_LINK_CONTROL 0x710 +#define PORT_LINK_MODE_MASK (0x3f << 16) +#define PORT_LINK_MODE_1_LANES (0x1 << 16) +#define PORT_LINK_MODE_2_LANES (0x3 << 16) +#define PORT_LINK_MODE_4_LANES (0x7 << 16) +#define PORT_LINK_MODE_8_LANES (0xf << 16) + +#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) +#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) +#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) +#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) +#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) +#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) + +#define PCIE_MSI_ADDR_LO 0x820 +#define PCIE_MSI_ADDR_HI 0x824 +#define PCIE_MSI_INTR0_ENABLE 0x828 +#define PCIE_MSI_INTR0_MASK 0x82C +#define PCIE_MSI_INTR0_STATUS 0x830 + +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_REGION_INBOUND (0x1 << 31) +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM (0x0 << 0) +#define PCIE_ATU_TYPE_IO (0x2 << 0) +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_ENABLE (0x1 << 31) +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_LOWER_BASE 0x90C +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_UPPER_TARGET 0x91C + +#define PCIE_MISC_CONTROL_1_OFF 0x8BC +#define PCIE_DBI_RO_WR_EN (0x1 << 0) + +/* + * iATU Unroll-specific register definitions + * From 4.80 core version the address translation will be made by unroll + */ +#define PCIE_ATU_UNR_REGION_CTRL1 0x00 +#define PCIE_ATU_UNR_REGION_CTRL2 0x04 +#define PCIE_ATU_UNR_LOWER_BASE 0x08 +#define PCIE_ATU_UNR_UPPER_BASE 0x0C +#define PCIE_ATU_UNR_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_TARGET 0x14 +#define PCIE_ATU_UNR_UPPER_TARGET 0x18 + +/* Register address builder */ +#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ + ((0x3 << 20) | ((region) << 9)) + +#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ + ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) + +#define MSI_MESSAGE_CONTROL 0x52 +#define MSI_CAP_MMC_SHIFT 1 +#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT) +#define MSI_CAP_MME_SHIFT 4 +#define MSI_CAP_MSI_EN_MASK 0x1 +#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) +#define MSI_MESSAGE_ADDR_L32 0x54 +#define MSI_MESSAGE_ADDR_U32 0x58 +#define MSI_MESSAGE_DATA_32 0x58 +#define MSI_MESSAGE_DATA_64 0x5C + +#define MAX_MSI_IRQS 256 +#define MAX_MSI_IRQS_PER_CTRL 32 +#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) +#define MSI_REG_CTRL_BLOCK_SIZE 12 +#define MSI_DEF_NUM_VECTORS 32 + +/* Maximum number of inbound/outbound iATUs */ +#define MAX_IATU_IN 256 +#define MAX_IATU_OUT 256 + +struct pcie_port; +struct dw_pcie; +struct dw_pcie_ep; + +enum dw_pcie_region_type { + DW_PCIE_REGION_UNKNOWN, + DW_PCIE_REGION_INBOUND, + DW_PCIE_REGION_OUTBOUND, +}; + +enum dw_pcie_device_mode { + DW_PCIE_UNKNOWN_TYPE, + DW_PCIE_EP_TYPE, + DW_PCIE_LEG_EP_TYPE, + DW_PCIE_RC_TYPE, +}; + +struct dw_pcie_host_ops { + int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); + int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); + int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); + int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); + int (*host_init)(struct pcie_port *pp); + void (*msi_set_irq)(struct pcie_port *pp, int irq); + void (*msi_clear_irq)(struct pcie_port *pp, int irq); + phys_addr_t (*get_msi_addr)(struct pcie_port *pp); + u32 (*get_msi_data)(struct pcie_port *pp, int pos); + void (*scan_bus)(struct pcie_port *pp); + void (*set_num_vectors)(struct pcie_port *pp); + int (*msi_host_init)(struct pcie_port *pp); + void (*msi_irq_ack)(int irq, struct pcie_port *pp); +}; + +struct pcie_port { + u8 root_bus_nr; + u64 cfg0_base; + void __iomem *va_cfg0_base; + u32 cfg0_size; + u64 cfg1_base; + void __iomem *va_cfg1_base; + u32 cfg1_size; + resource_size_t io_base; + phys_addr_t io_bus_addr; + u32 io_size; + u64 mem_base; + phys_addr_t mem_bus_addr; + u32 mem_size; + struct resource *cfg; + struct resource *io; + struct resource *mem; + struct resource *busn; + int irq; + const struct dw_pcie_host_ops *ops; + int msi_irq; + struct irq_domain *irq_domain; + struct irq_domain *msi_domain; + dma_addr_t msi_data; + u32 num_vectors; + u32 irq_status[MAX_MSI_CTRLS]; + raw_spinlock_t lock; + DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); +}; + +enum dw_pcie_as_type { + DW_PCIE_AS_UNKNOWN, + DW_PCIE_AS_MEM, + DW_PCIE_AS_IO, +}; + +struct dw_pcie_ep_ops { + void (*ep_init)(struct dw_pcie_ep *ep); + int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u8 interrupt_num); +}; + +struct dw_pcie_ep { + struct pci_epc *epc; + struct dw_pcie_ep_ops *ops; + phys_addr_t phys_base; + size_t addr_size; + size_t page_size; + u8 bar_to_atu[6]; + phys_addr_t *outbound_addr; + unsigned long *ib_window_map; + unsigned long *ob_window_map; + u32 num_ib_windows; + u32 num_ob_windows; + void __iomem *msi_mem; + phys_addr_t msi_mem_phys; +}; + +struct dw_pcie_ops { + u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr); + u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size); + void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, + size_t size, u32 val); + int (*link_up)(struct dw_pcie *pcie); + int (*start_link)(struct dw_pcie *pcie); + void (*stop_link)(struct dw_pcie *pcie); +}; + +struct dw_pcie { + struct device *dev; + void __iomem *dbi_base; + void __iomem *dbi_base2; + u32 num_viewport; + u8 iatu_unroll_enabled; + struct pcie_port pp; + struct dw_pcie_ep ep; + const struct dw_pcie_ops *ops; +}; + +#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) + +#define to_dw_pcie_from_ep(endpoint) \ + container_of((endpoint), struct dw_pcie, ep) + +int dw_pcie_read(void __iomem *addr, int size, u32 *val); +int dw_pcie_write(void __iomem *addr, int size, u32 val); + +u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size); +void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, + size_t size, u32 val); +int dw_pcie_link_up(struct dw_pcie *pci); +int dw_pcie_wait_for_link(struct dw_pcie *pci); +void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, + int type, u64 cpu_addr, u64 pci_addr, + u32 size); +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, + u64 cpu_addr, enum dw_pcie_as_type as_type); +void dw_pcie_disable_atu(struct dw_pcie *pci, int index, + enum dw_pcie_region_type type); +void dw_pcie_setup(struct dw_pcie *pci); + +static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); +} + +static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); +} + +static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); +} + +static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); +} + +static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); +} + +static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) +{ + __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); +} + +static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) +{ + return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); +} + +static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) +{ + u32 reg; + u32 val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dw_pcie_readl_dbi(pci, reg); + val |= PCIE_DBI_RO_WR_EN; + dw_pcie_writel_dbi(pci, reg, val); +} + +static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) +{ + u32 reg; + u32 val; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = dw_pcie_readl_dbi(pci, reg); + val &= ~PCIE_DBI_RO_WR_EN; + dw_pcie_writel_dbi(pci, reg, val); +} + +#ifdef CONFIG_PCIE_DW_HOST +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); +void dw_pcie_msi_init(struct pcie_port *pp); +void dw_pcie_free_msi(struct pcie_port *pp); +void dw_pcie_setup_rc(struct pcie_port *pp); +int dw_pcie_host_init(struct pcie_port *pp); +int dw_pcie_allocate_domains(struct pcie_port *pp); +#else +static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +{ + return IRQ_NONE; +} + +static inline void dw_pcie_msi_init(struct pcie_port *pp) +{ +} + +static inline void dw_pcie_free_msi(struct pcie_port *pp) +{ +} + +static inline void dw_pcie_setup_rc(struct pcie_port *pp) +{ +} + +static inline int dw_pcie_host_init(struct pcie_port *pp) +{ + return 0; +} + +static inline int dw_pcie_allocate_domains(struct pcie_port *pp) +{ + return 0; +} +#endif + +#ifdef CONFIG_PCIE_DW_EP +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); +int dw_pcie_ep_init(struct dw_pcie_ep *ep); +void dw_pcie_ep_exit(struct dw_pcie_ep *ep); +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, + u8 interrupt_num); +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); +#else +static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) +{ +} + +static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + return 0; +} + +static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) +{ +} + +static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, + u8 interrupt_num) +{ + return 0; +} + +static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) +{ +} +#endif +#endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c new file mode 100644 index 000000000000..6d9e1b2b8f7b --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for HiSilicon SoCs + * + * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com + * + * Authors: Zhou Wang + * Dacai Zhu + * Gabriele Paoloni + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../pci.h" + +#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) + +static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + int dev = PCI_SLOT(devfn); + + if (bus->number == cfg->busr.start) { + /* access only one slot on each root port */ + if (dev > 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return pci_generic_config_read32(bus, devfn, where, + size, val); + } + + return pci_generic_config_read(bus, devfn, where, size, val); +} + +static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + int dev = PCI_SLOT(devfn); + + if (bus->number == cfg->busr.start) { + /* access only one slot on each root port */ + if (dev > 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return pci_generic_config_write32(bus, devfn, where, + size, val); + } + + return pci_generic_config_write(bus, devfn, where, size, val); +} + +static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_config_window *cfg = bus->sysdata; + void __iomem *reg_base = cfg->priv; + + if (bus->number == cfg->busr.start) + return reg_base + where; + else + return pci_ecam_map_bus(bus, devfn, where); +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) + +static int hisi_pcie_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct acpi_pci_root *root = acpi_driver_data(adev); + struct resource *res; + void __iomem *reg_base; + int ret; + + /* + * Retrieve RC base and size from a HISI0081 device with _UID + * matching our segment. + */ + res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); + if (ret) { + dev_err(dev, "can't get rc base address\n"); + return -ENOMEM; + } + + reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); + if (!reg_base) + return -ENOMEM; + + cfg->priv = reg_base; + return 0; +} + +struct pci_ecam_ops hisi_pcie_ops = { + .bus_shift = 20, + .init = hisi_pcie_init, + .pci_ops = { + .map_bus = hisi_pcie_map_bus, + .read = hisi_pcie_rd_conf, + .write = hisi_pcie_wr_conf, + } +}; + +#endif + +#ifdef CONFIG_PCI_HISI + +#include "pcie-designware.h" + +#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 +#define PCIE_HIP06_CTRL_OFF 0x1000 +#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) +#define PCIE_LTSSM_LINKUP_STATE 0x11 +#define PCIE_LTSSM_STATE_MASK 0x3F + +#define to_hisi_pcie(x) dev_get_drvdata((x)->dev) + +struct hisi_pcie; + +struct pcie_soc_ops { + int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); +}; + +struct hisi_pcie { + struct dw_pcie *pci; + struct regmap *subctrl; + u32 port_id; + const struct pcie_soc_ops *soc_ops; +}; + +/* HipXX PCIe host only supports 32-bit config access */ +static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, + u32 *val) +{ + u32 reg; + u32 reg_val; + void *walker = ®_val; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + walker += (where & 0x3); + reg = where & ~0x3; + reg_val = dw_pcie_readl_dbi(pci, reg); + + if (size == 1) + *val = *(u8 __force *) walker; + else if (size == 2) + *val = *(u16 __force *) walker; + else if (size == 4) + *val = reg_val; + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +/* HipXX PCIe host only supports 32-bit config access */ +static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, + u32 val) +{ + u32 reg_val; + u32 reg; + void *walker = ®_val; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + walker += (where & 0x3); + reg = where & ~0x3; + if (size == 4) + dw_pcie_writel_dbi(pci, reg, val); + else if (size == 2) { + reg_val = dw_pcie_readl_dbi(pci, reg); + *(u16 __force *) walker = val; + dw_pcie_writel_dbi(pci, reg, reg_val); + } else if (size == 1) { + reg_val = dw_pcie_readl_dbi(pci, reg); + *(u8 __force *) walker = val; + dw_pcie_writel_dbi(pci, reg, reg_val); + } else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) +{ + u32 val; + + regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + + 0x100 * hisi_pcie->port_id, &val); + + return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); +} + +static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) +{ + struct dw_pcie *pci = hisi_pcie->pci; + u32 val; + + val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); + + return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); +} + +static int hisi_pcie_link_up(struct dw_pcie *pci) +{ + struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); + + return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); +} + +static const struct dw_pcie_host_ops hisi_pcie_host_ops = { + .rd_own_conf = hisi_pcie_cfg_read, + .wr_own_conf = hisi_pcie_cfg_write, +}; + +static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = hisi_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + u32 port_id; + + if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { + dev_err(dev, "failed to read port-id\n"); + return -EINVAL; + } + if (port_id > 3) { + dev_err(dev, "Invalid port-id: %d\n", port_id); + return -EINVAL; + } + hisi_pcie->port_id = port_id; + + pp->ops = &hisi_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = hisi_pcie_link_up, +}; + +static int hisi_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct hisi_pcie *hisi_pcie; + struct resource *reg; + int ret; + + hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); + if (!hisi_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + hisi_pcie->pci = pci; + + hisi_pcie->soc_ops = of_device_get_match_data(dev); + + hisi_pcie->subctrl = + syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); + if (IS_ERR(hisi_pcie->subctrl)) { + dev_err(dev, "cannot get subctrl base\n"); + return PTR_ERR(hisi_pcie->subctrl); + } + + reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + platform_set_drvdata(pdev, hisi_pcie); + + ret = hisi_add_pcie_port(hisi_pcie, pdev); + if (ret) + return ret; + + return 0; +} + +static struct pcie_soc_ops hip05_ops = { + &hisi_pcie_link_up_hip05 +}; + +static struct pcie_soc_ops hip06_ops = { + &hisi_pcie_link_up_hip06 +}; + +static const struct of_device_id hisi_pcie_of_match[] = { + { + .compatible = "hisilicon,hip05-pcie", + .data = (void *) &hip05_ops, + }, + { + .compatible = "hisilicon,hip06-pcie", + .data = (void *) &hip06_ops, + }, + {}, +}; + +static struct platform_driver hisi_pcie_driver = { + .probe = hisi_pcie_probe, + .driver = { + .name = "hisi-pcie", + .of_match_table = hisi_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(hisi_pcie_driver); + +static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pci_ecam_ops *ops; + + ops = (struct pci_ecam_ops *)of_device_get_match_data(dev); + return pci_host_common_probe(pdev, ops); +} + +static int hisi_pcie_platform_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + void __iomem *reg_base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "missing \"reg[1]\"property\n"); + return -EINVAL; + } + + reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); + if (!reg_base) + return -ENOMEM; + + cfg->priv = reg_base; + return 0; +} + +struct pci_ecam_ops hisi_pcie_platform_ops = { + .bus_shift = 20, + .init = hisi_pcie_platform_init, + .pci_ops = { + .map_bus = hisi_pcie_map_bus, + .read = hisi_pcie_rd_conf, + .write = hisi_pcie_wr_conf, + } +}; + +static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { + { + .compatible = "hisilicon,hip06-pcie-ecam", + .data = (void *) &hisi_pcie_platform_ops, + }, + { + .compatible = "hisilicon,hip07-pcie-ecam", + .data = (void *) &hisi_pcie_platform_ops, + }, + {}, +}; + +static struct platform_driver hisi_pcie_almost_ecam_driver = { + .probe = hisi_pcie_almost_ecam_probe, + .driver = { + .name = "hisi-pcie-almost-ecam", + .of_match_table = hisi_pcie_almost_ecam_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(hisi_pcie_almost_ecam_driver); + +#endif +#endif diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c new file mode 100644 index 000000000000..3611d6ce9a92 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for HiSilicon STB SoCs + * + * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com + * + * Authors: Ruqiang Ju + * Jianguo Sun + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define to_histb_pcie(x) dev_get_drvdata((x)->dev) + +#define PCIE_SYS_CTRL0 0x0000 +#define PCIE_SYS_CTRL1 0x0004 +#define PCIE_SYS_CTRL7 0x001C +#define PCIE_SYS_CTRL13 0x0034 +#define PCIE_SYS_CTRL15 0x003C +#define PCIE_SYS_CTRL16 0x0040 +#define PCIE_SYS_CTRL17 0x0044 + +#define PCIE_SYS_STAT0 0x0100 +#define PCIE_SYS_STAT4 0x0110 + +#define PCIE_RDLH_LINK_UP BIT(5) +#define PCIE_XMLH_LINK_UP BIT(15) +#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) +#define PCIE_APP_LTSSM_ENABLE BIT(11) + +#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28) +#define PCIE_WM_EP 0 +#define PCIE_WM_LEGACY BIT(1) +#define PCIE_WM_RC BIT(30) + +#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0) +#define PCIE_LTSSM_STATE_ACTIVE 0x11 + +struct histb_pcie { + struct dw_pcie *pci; + struct clk *aux_clk; + struct clk *pipe_clk; + struct clk *sys_clk; + struct clk *bus_clk; + struct phy *phy; + struct reset_control *soft_reset; + struct reset_control *sys_reset; + struct reset_control *bus_reset; + void __iomem *ctrl; + int reset_gpio; + struct regulator *vpcie; +}; + +static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg) +{ + return readl(histb_pcie->ctrl + reg); +} + +static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) +{ + writel(val, histb_pcie->ctrl + reg); +} + +static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 val; + + val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); + if (enable) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); +} + +static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 val; + + val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1); + if (enable) + val |= PCIE_ELBI_SLV_DBI_ENABLE; + else + val &= ~PCIE_ELBI_SLV_DBI_ENABLE; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val); +} + +static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + u32 val; + + histb_pcie_dbi_r_mode(&pci->pp, true); + dw_pcie_read(base + reg, size, &val); + histb_pcie_dbi_r_mode(&pci->pp, false); + + return val; +} + +static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + histb_pcie_dbi_w_mode(&pci->pp, true); + dw_pcie_write(base + reg, size, val); + histb_pcie_dbi_w_mode(&pci->pp, false); +} + +static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, + int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; + + histb_pcie_dbi_r_mode(pp, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + histb_pcie_dbi_r_mode(pp, false); + + return ret; +} + +static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, + int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; + + histb_pcie_dbi_w_mode(pp, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + histb_pcie_dbi_w_mode(pp, false); + + return ret; +} + +static int histb_pcie_link_up(struct dw_pcie *pci) +{ + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 regval; + u32 status; + + regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); + status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); + status &= PCIE_LTSSM_STATE_MASK; + if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && + (status == PCIE_LTSSM_STATE_ACTIVE)) + return 1; + + return 0; +} + +static int histb_pcie_establish_link(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + u32 regval; + + if (dw_pcie_link_up(pci)) { + dev_info(pci->dev, "Link already up\n"); + return 0; + } + + /* PCIe RC work mode */ + regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); + regval &= ~PCIE_DEVICE_TYPE_MASK; + regval |= PCIE_WM_RC; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval); + + /* setup root complex */ + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7); + regval |= PCIE_APP_LTSSM_ENABLE; + histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval); + + return dw_pcie_wait_for_link(pci); +} + +static int histb_pcie_host_init(struct pcie_port *pp) +{ + histb_pcie_establish_link(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + return 0; +} + +static struct dw_pcie_host_ops histb_pcie_host_ops = { + .rd_own_conf = histb_pcie_rd_own_conf, + .wr_own_conf = histb_pcie_wr_own_conf, + .host_init = histb_pcie_host_init, +}; + +static void histb_pcie_host_disable(struct histb_pcie *hipcie) +{ + reset_control_assert(hipcie->soft_reset); + reset_control_assert(hipcie->sys_reset); + reset_control_assert(hipcie->bus_reset); + + clk_disable_unprepare(hipcie->aux_clk); + clk_disable_unprepare(hipcie->pipe_clk); + clk_disable_unprepare(hipcie->sys_clk); + clk_disable_unprepare(hipcie->bus_clk); + + if (gpio_is_valid(hipcie->reset_gpio)) + gpio_set_value_cansleep(hipcie->reset_gpio, 0); + + if (hipcie->vpcie) + regulator_disable(hipcie->vpcie); +} + +static int histb_pcie_host_enable(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct histb_pcie *hipcie = to_histb_pcie(pci); + struct device *dev = pci->dev; + int ret; + + /* power on PCIe device if have */ + if (hipcie->vpcie) { + ret = regulator_enable(hipcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable regulator: %d\n", ret); + return ret; + } + } + + if (gpio_is_valid(hipcie->reset_gpio)) + gpio_set_value_cansleep(hipcie->reset_gpio, 1); + + ret = clk_prepare_enable(hipcie->bus_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable bus clk\n"); + goto err_bus_clk; + } + + ret = clk_prepare_enable(hipcie->sys_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable sys clk\n"); + goto err_sys_clk; + } + + ret = clk_prepare_enable(hipcie->pipe_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable pipe clk\n"); + goto err_pipe_clk; + } + + ret = clk_prepare_enable(hipcie->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clk\n"); + goto err_aux_clk; + } + + reset_control_assert(hipcie->soft_reset); + reset_control_deassert(hipcie->soft_reset); + + reset_control_assert(hipcie->sys_reset); + reset_control_deassert(hipcie->sys_reset); + + reset_control_assert(hipcie->bus_reset); + reset_control_deassert(hipcie->bus_reset); + + return 0; + +err_aux_clk: + clk_disable_unprepare(hipcie->pipe_clk); +err_pipe_clk: + clk_disable_unprepare(hipcie->sys_clk); +err_sys_clk: + clk_disable_unprepare(hipcie->bus_clk); +err_bus_clk: + if (hipcie->vpcie) + regulator_disable(hipcie->vpcie); + + return ret; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .read_dbi = histb_pcie_read_dbi, + .write_dbi = histb_pcie_write_dbi, + .link_up = histb_pcie_link_up, +}; + +static int histb_pcie_probe(struct platform_device *pdev) +{ + struct histb_pcie *hipcie; + struct dw_pcie *pci; + struct pcie_port *pp; + struct resource *res; + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + enum of_gpio_flags of_flags; + unsigned long flag = GPIOF_DIR_OUT; + int ret; + + hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); + if (!hipcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + hipcie->pci = pci; + pp = &pci->pp; + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); + hipcie->ctrl = devm_ioremap_resource(dev, res); + if (IS_ERR(hipcie->ctrl)) { + dev_err(dev, "cannot get control reg base\n"); + return PTR_ERR(hipcie->ctrl); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); + pci->dbi_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "cannot get rc-dbi base\n"); + return PTR_ERR(pci->dbi_base); + } + + hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); + if (IS_ERR(hipcie->vpcie)) { + if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) + return -EPROBE_DEFER; + hipcie->vpcie = NULL; + } + + hipcie->reset_gpio = of_get_named_gpio_flags(np, + "reset-gpios", 0, &of_flags); + if (of_flags & OF_GPIO_ACTIVE_LOW) + flag |= GPIOF_ACTIVE_LOW; + if (gpio_is_valid(hipcie->reset_gpio)) { + ret = devm_gpio_request_one(dev, hipcie->reset_gpio, + flag, "PCIe device power control"); + if (ret) { + dev_err(dev, "unable to request gpio\n"); + return ret; + } + } + + hipcie->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(hipcie->aux_clk)) { + dev_err(dev, "Failed to get PCIe aux clk\n"); + return PTR_ERR(hipcie->aux_clk); + } + + hipcie->pipe_clk = devm_clk_get(dev, "pipe"); + if (IS_ERR(hipcie->pipe_clk)) { + dev_err(dev, "Failed to get PCIe pipe clk\n"); + return PTR_ERR(hipcie->pipe_clk); + } + + hipcie->sys_clk = devm_clk_get(dev, "sys"); + if (IS_ERR(hipcie->sys_clk)) { + dev_err(dev, "Failed to get PCIEe sys clk\n"); + return PTR_ERR(hipcie->sys_clk); + } + + hipcie->bus_clk = devm_clk_get(dev, "bus"); + if (IS_ERR(hipcie->bus_clk)) { + dev_err(dev, "Failed to get PCIe bus clk\n"); + return PTR_ERR(hipcie->bus_clk); + } + + hipcie->soft_reset = devm_reset_control_get(dev, "soft"); + if (IS_ERR(hipcie->soft_reset)) { + dev_err(dev, "couldn't get soft reset\n"); + return PTR_ERR(hipcie->soft_reset); + } + + hipcie->sys_reset = devm_reset_control_get(dev, "sys"); + if (IS_ERR(hipcie->sys_reset)) { + dev_err(dev, "couldn't get sys reset\n"); + return PTR_ERR(hipcie->sys_reset); + } + + hipcie->bus_reset = devm_reset_control_get(dev, "bus"); + if (IS_ERR(hipcie->bus_reset)) { + dev_err(dev, "couldn't get bus reset\n"); + return PTR_ERR(hipcie->bus_reset); + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) { + dev_err(dev, "Failed to get MSI IRQ\n"); + return pp->msi_irq; + } + } + + hipcie->phy = devm_phy_get(dev, "phy"); + if (IS_ERR(hipcie->phy)) { + dev_info(dev, "no pcie-phy found\n"); + hipcie->phy = NULL; + /* fall through here! + * if no pcie-phy found, phy init + * should be done under boot! + */ + } else { + phy_init(hipcie->phy); + } + + pp->root_bus_nr = -1; + pp->ops = &histb_pcie_host_ops; + + platform_set_drvdata(pdev, hipcie); + + ret = histb_pcie_host_enable(pp); + if (ret) { + dev_err(dev, "failed to enable host\n"); + return ret; + } + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int histb_pcie_remove(struct platform_device *pdev) +{ + struct histb_pcie *hipcie = platform_get_drvdata(pdev); + + histb_pcie_host_disable(hipcie); + + if (hipcie->phy) + phy_exit(hipcie->phy); + + return 0; +} + +static const struct of_device_id histb_pcie_of_match[] = { + { .compatible = "hisilicon,hi3798cv200-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, histb_pcie_of_match); + +static struct platform_driver histb_pcie_platform_driver = { + .probe = histb_pcie_probe, + .remove = histb_pcie_remove, + .driver = { + .name = "histb-pcie", + .of_match_table = histb_pcie_of_match, + }, +}; +module_platform_driver(histb_pcie_platform_driver); + +MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c new file mode 100644 index 000000000000..d2970a009eb5 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Kirin Phone SoCs + * + * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. + * http://www.huawei.com + * + * Author: Xiaowei Song + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pcie-designware.h" + +#define to_kirin_pcie(x) dev_get_drvdata((x)->dev) + +#define REF_CLK_FREQ 100000000 + +/* PCIe ELBI registers */ +#define SOC_PCIECTRL_CTRL0_ADDR 0x000 +#define SOC_PCIECTRL_CTRL1_ADDR 0x004 +#define SOC_PCIEPHY_CTRL2_ADDR 0x008 +#define SOC_PCIEPHY_CTRL3_ADDR 0x00c +#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) + +/* info located in APB */ +#define PCIE_APP_LTSSM_ENABLE 0x01c +#define PCIE_APB_PHY_CTRL0 0x0 +#define PCIE_APB_PHY_CTRL1 0x4 +#define PCIE_APB_PHY_STATUS0 0x400 +#define PCIE_LINKUP_ENABLE (0x8020) +#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) +#define PIPE_CLK_STABLE (0x1 << 19) +#define PHY_REF_PAD_BIT (0x1 << 8) +#define PHY_PWR_DOWN_BIT (0x1 << 22) +#define PHY_RST_ACK_BIT (0x1 << 16) + +/* info located in sysctrl */ +#define SCTRL_PCIE_CMOS_OFFSET 0x60 +#define SCTRL_PCIE_CMOS_BIT 0x10 +#define SCTRL_PCIE_ISO_OFFSET 0x44 +#define SCTRL_PCIE_ISO_BIT 0x30 +#define SCTRL_PCIE_HPCLK_OFFSET 0x190 +#define SCTRL_PCIE_HPCLK_BIT 0x184000 +#define SCTRL_PCIE_OE_OFFSET 0x14a +#define PCIE_DEBOUNCE_PARAM 0xF0F400 +#define PCIE_OE_BYPASS (0x3 << 28) + +/* peri_crg ctrl */ +#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 +#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 + +/* Time for delay */ +#define REF_2_PERST_MIN 20000 +#define REF_2_PERST_MAX 25000 +#define PERST_2_ACCESS_MIN 10000 +#define PERST_2_ACCESS_MAX 12000 +#define LINK_WAIT_MIN 900 +#define LINK_WAIT_MAX 1000 +#define PIPE_CLK_WAIT_MIN 550 +#define PIPE_CLK_WAIT_MAX 600 +#define TIME_CMOS_MIN 100 +#define TIME_CMOS_MAX 105 +#define TIME_PHY_PD_MIN 10 +#define TIME_PHY_PD_MAX 11 + +struct kirin_pcie { + struct dw_pcie *pci; + void __iomem *apb_base; + void __iomem *phy_base; + struct regmap *crgctrl; + struct regmap *sysctrl; + struct clk *apb_sys_clk; + struct clk *apb_phy_clk; + struct clk *phy_ref_clk; + struct clk *pcie_aclk; + struct clk *pcie_aux_clk; + int gpio_id_reset; +}; + +/* Registers in PCIeCTRL */ +static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->apb_base + reg); +} + +static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->apb_base + reg); +} + +/* Registers in PCIePHY */ +static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->phy_base + reg); +} + +static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->phy_base + reg); +} + +static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); + if (IS_ERR(kirin_pcie->phy_ref_clk)) + return PTR_ERR(kirin_pcie->phy_ref_clk); + + kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(kirin_pcie->pcie_aux_clk)) + return PTR_ERR(kirin_pcie->pcie_aux_clk); + + kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); + if (IS_ERR(kirin_pcie->apb_phy_clk)) + return PTR_ERR(kirin_pcie->apb_phy_clk); + + kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); + if (IS_ERR(kirin_pcie->apb_sys_clk)) + return PTR_ERR(kirin_pcie->apb_sys_clk); + + kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); + if (IS_ERR(kirin_pcie->pcie_aclk)) + return PTR_ERR(kirin_pcie->pcie_aclk); + + return 0; +} + +static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *apb; + struct resource *phy; + struct resource *dbi; + + apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); + kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + if (IS_ERR(kirin_pcie->apb_base)) + return PTR_ERR(kirin_pcie->apb_base); + + phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + if (IS_ERR(kirin_pcie->phy_base)) + return PTR_ERR(kirin_pcie->phy_base); + + dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + if (IS_ERR(kirin_pcie->pci->dbi_base)) + return PTR_ERR(kirin_pcie->pci->dbi_base); + + kirin_pcie->crgctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); + if (IS_ERR(kirin_pcie->crgctrl)) + return PTR_ERR(kirin_pcie->crgctrl); + + kirin_pcie->sysctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); + if (IS_ERR(kirin_pcie->sysctrl)) + return PTR_ERR(kirin_pcie->sysctrl); + + return 0; +} + +static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) +{ + struct device *dev = kirin_pcie->pci->dev; + u32 reg_val; + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_REF_PAD_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); + reg_val &= ~PHY_PWR_DOWN_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); + usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_RST_ACK_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + if (reg_val & PIPE_CLK_STABLE) { + dev_err(dev, "PIPE clk is not stable\n"); + return -EINVAL; + } + + return 0; +} + +static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) +{ + u32 val; + + regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); + val |= PCIE_DEBOUNCE_PARAM; + val &= ~PCIE_OE_BYPASS; + regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); +} + +static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) +{ + int ret = 0; + + if (!enable) + goto close_clk; + + ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); + if (ret) + goto apb_sys_fail; + + ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); + if (ret) + goto apb_phy_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aclk); + if (ret) + goto aclk_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); + if (ret) + goto aux_clk_fail; + + return 0; + +close_clk: + clk_disable_unprepare(kirin_pcie->pcie_aux_clk); +aux_clk_fail: + clk_disable_unprepare(kirin_pcie->pcie_aclk); +aclk_fail: + clk_disable_unprepare(kirin_pcie->apb_phy_clk); +apb_phy_fail: + clk_disable_unprepare(kirin_pcie->apb_sys_clk); +apb_sys_fail: + clk_disable_unprepare(kirin_pcie->phy_ref_clk); + + return ret; +} + +static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) +{ + int ret; + + /* Power supply for Host */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); + usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); + kirin_pcie_oe_enable(kirin_pcie); + + ret = kirin_pcie_clk_ctrl(kirin_pcie, true); + if (ret) + return ret; + + /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); + regmap_write(kirin_pcie->crgctrl, + CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); + + ret = kirin_pcie_phy_init(kirin_pcie); + if (ret) + goto close_clk; + + /* perst assert Endpoint */ + if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { + usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); + ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); + if (ret) + goto close_clk; + usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); + + return 0; + } + +close_clk: + kirin_pcie_clk_ctrl(kirin_pcie, false); + return ret; +} + +static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); +} + +static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); +} + +static int kirin_pcie_rd_own_conf(struct pcie_port *pp, + int where, int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static int kirin_pcie_wr_own_conf(struct pcie_port *pp, + int where, int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); + + return ret; +} + +static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + dw_pcie_read(base + reg, size, &ret); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + dw_pcie_write(base + reg, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); +} + +static int kirin_pcie_link_up(struct dw_pcie *pci) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + + if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) + return 1; + + return 0; +} + +static int kirin_pcie_establish_link(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + struct device *dev = kirin_pcie->pci->dev; + int count = 0; + + if (kirin_pcie_link_up(pci)) + return 0; + + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + while (!kirin_pcie_link_up(pci)) { + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); + count++; + if (count == 1000) { + dev_err(dev, "Link Fail\n"); + return -EINVAL; + } + } + + return 0; +} + +static int kirin_pcie_host_init(struct pcie_port *pp) +{ + kirin_pcie_establish_link(pp); + + return 0; +} + +static struct dw_pcie_ops kirin_dw_pcie_ops = { + .read_dbi = kirin_pcie_read_dbi, + .write_dbi = kirin_pcie_write_dbi, + .link_up = kirin_pcie_link_up, +}; + +static const struct dw_pcie_host_ops kirin_pcie_host_ops = { + .rd_own_conf = kirin_pcie_rd_own_conf, + .wr_own_conf = kirin_pcie_wr_own_conf, + .host_init = kirin_pcie_host_init, +}; + +static int __init kirin_add_pcie_port(struct dw_pcie *pci, + struct platform_device *pdev) +{ + pci->pp.ops = &kirin_pcie_host_ops; + + return dw_pcie_host_init(&pci->pp); +} + +static int kirin_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct kirin_pcie *kirin_pcie; + struct dw_pcie *pci; + int ret; + + if (!dev->of_node) { + dev_err(dev, "NULL node\n"); + return -EINVAL; + } + + kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); + if (!kirin_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &kirin_dw_pcie_ops; + kirin_pcie->pci = pci; + + ret = kirin_pcie_get_clk(kirin_pcie, pdev); + if (ret) + return ret; + + ret = kirin_pcie_get_resource(kirin_pcie, pdev); + if (ret) + return ret; + + kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, + "reset-gpios", 0); + if (kirin_pcie->gpio_id_reset < 0) + return -ENODEV; + + ret = kirin_pcie_power_on(kirin_pcie); + if (ret) + return ret; + + platform_set_drvdata(pdev, kirin_pcie); + + return kirin_add_pcie_port(pci, pdev); +} + +static const struct of_device_id kirin_pcie_match[] = { + { .compatible = "hisilicon,kirin960-pcie" }, + {}, +}; + +static struct platform_driver kirin_pcie_driver = { + .probe = kirin_pcie_probe, + .driver = { + .name = "kirin-pcie", + .of_match_table = kirin_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(kirin_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c new file mode 100644 index 000000000000..a1d0198081a6 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -0,0 +1,1299 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm PCIe root complex driver + * + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright 2015 Linaro Limited. + * + * Author: Stanimir Varbanov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define PCIE20_PARF_SYS_CTRL 0x00 +#define MST_WAKEUP_EN BIT(13) +#define SLV_WAKEUP_EN BIT(12) +#define MSTR_ACLK_CGC_DIS BIT(10) +#define SLV_ACLK_CGC_DIS BIT(9) +#define CORE_CLK_CGC_DIS BIT(6) +#define AUX_PWR_DET BIT(4) +#define L23_CLK_RMV_DIS BIT(2) +#define L1_CLK_RMV_DIS BIT(1) + +#define PCIE20_COMMAND_STATUS 0x04 +#define CMD_BME_VAL 0x4 +#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 +#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 + +#define PCIE20_PARF_PHY_CTRL 0x40 +#define PCIE20_PARF_PHY_REFCLK 0x4C +#define PCIE20_PARF_DBI_BASE_ADDR 0x168 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C +#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 +#define PCIE20_PARF_LTSSM 0x1B0 +#define PCIE20_PARF_SID_OFFSET 0x234 +#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C + +#define PCIE20_ELBI_SYS_CTRL 0x04 +#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) + +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c +#define CFG_BRIDGE_SB_INIT BIT(0) + +#define PCIE20_CAP 0x70 +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) +#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) +#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) +#define PCIE_CAP_LINK1_VAL 0x2FD7F + +#define PCIE20_PARF_Q2A_FLUSH 0x1AC + +#define PCIE20_MISC_CONTROL_1_REG 0x8BC +#define DBI_RO_WR_EN 1 + +#define PERST_DELAY_US 1000 + +#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define SLV_ADDR_SPACE_SZ 0x10000000 + +#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 +struct qcom_pcie_resources_2_1_0 { + struct clk *iface_clk; + struct clk *core_clk; + struct clk *phy_clk; + struct reset_control *pci_reset; + struct reset_control *axi_reset; + struct reset_control *ahb_reset; + struct reset_control *por_reset; + struct reset_control *phy_reset; + struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; +}; + +struct qcom_pcie_resources_1_0_0 { + struct clk *iface; + struct clk *aux; + struct clk *master_bus; + struct clk *slave_bus; + struct reset_control *core; + struct regulator *vdda; +}; + +#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 +struct qcom_pcie_resources_2_3_2 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct clk *cfg_clk; + struct clk *pipe_clk; + struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; +}; + +struct qcom_pcie_resources_2_4_0 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_vmid_reset; + struct reset_control *axi_s_xpu_reset; + struct reset_control *parf_reset; + struct reset_control *phy_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *pipe_sticky_reset; + struct reset_control *pwr_reset; + struct reset_control *ahb_reset; + struct reset_control *phy_ahb_reset; +}; + +struct qcom_pcie_resources_2_3_3 { + struct clk *iface; + struct clk *axi_m_clk; + struct clk *axi_s_clk; + struct clk *ahb_clk; + struct clk *aux_clk; + struct reset_control *rst[7]; +}; + +union qcom_pcie_resources { + struct qcom_pcie_resources_1_0_0 v1_0_0; + struct qcom_pcie_resources_2_1_0 v2_1_0; + struct qcom_pcie_resources_2_3_2 v2_3_2; + struct qcom_pcie_resources_2_3_3 v2_3_3; + struct qcom_pcie_resources_2_4_0 v2_4_0; +}; + +struct qcom_pcie; + +struct qcom_pcie_ops { + int (*get_resources)(struct qcom_pcie *pcie); + int (*init)(struct qcom_pcie *pcie); + int (*post_init)(struct qcom_pcie *pcie); + void (*deinit)(struct qcom_pcie *pcie); + void (*post_deinit)(struct qcom_pcie *pcie); + void (*ltssm_enable)(struct qcom_pcie *pcie); +}; + +struct qcom_pcie { + struct dw_pcie *pci; + void __iomem *parf; /* DT parf */ + void __iomem *elbi; /* DT elbi */ + union qcom_pcie_resources res; + struct phy *phy; + struct gpio_desc *reset; + const struct qcom_pcie_ops *ops; +}; + +#define to_qcom_pcie(x) dev_get_drvdata((x)->dev) + +static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +{ + gpiod_set_value_cansleep(pcie->reset, 1); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) +{ + gpiod_set_value_cansleep(pcie->reset, 0); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static int qcom_pcie_establish_link(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + if (dw_pcie_link_up(pci)) + return 0; + + /* Enable Link Training state machine */ + if (pcie->ops->ltssm_enable) + pcie->ops->ltssm_enable(pcie); + + return dw_pcie_wait_for_link(pci); +} + +static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); + val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; + writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); +} + +static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + res->supplies[0].supply = "vdda"; + res->supplies[1].supply = "vdda_phy"; + res->supplies[2].supply = "vdda_refclk"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), + res->supplies); + if (ret) + return ret; + + res->iface_clk = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface_clk)) + return PTR_ERR(res->iface_clk); + + res->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(res->core_clk)) + return PTR_ERR(res->core_clk); + + res->phy_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(res->phy_clk)) + return PTR_ERR(res->phy_clk); + + res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); + if (IS_ERR(res->pci_reset)) + return PTR_ERR(res->pci_reset); + + res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); + if (IS_ERR(res->axi_reset)) + return PTR_ERR(res->axi_reset); + + res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->por_reset = devm_reset_control_get_exclusive(dev, "por"); + if (IS_ERR(res->por_reset)) + return PTR_ERR(res->por_reset); + + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + return PTR_ERR_OR_ZERO(res->phy_reset); +} + +static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + + reset_control_assert(res->pci_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); + reset_control_assert(res->pci_reset); + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); + clk_disable_unprepare(res->phy_clk); + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); +} + +static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); + if (ret < 0) { + dev_err(dev, "cannot enable regulators\n"); + return ret; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + goto err_assert_ahb; + } + + ret = clk_prepare_enable(res->iface_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_assert_ahb; + } + + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_phy; + } + + ret = clk_prepare_enable(res->core_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_core; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_deassert_ahb; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* enable external reference clock */ + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); + val |= BIT(16); + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + return ret; + } + + ret = reset_control_deassert(res->pci_reset); + if (ret) { + dev_err(dev, "cannot deassert pci reset\n"); + return ret; + } + + ret = reset_control_deassert(res->por_reset); + if (ret) { + dev_err(dev, "cannot deassert por reset\n"); + return ret; + } + + ret = reset_control_deassert(res->axi_reset); + if (ret) { + dev_err(dev, "cannot deassert axi reset\n"); + return ret; + } + + /* wait for clock acquisition */ + usleep_range(1000, 1500); + + + /* Set the Max TLP size to 2K, instead of using default of 4K */ + writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); + writel(CFG_BRIDGE_SB_INIT, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); + + return 0; + +err_deassert_ahb: + clk_disable_unprepare(res->core_clk); +err_clk_core: + clk_disable_unprepare(res->phy_clk); +err_clk_phy: + clk_disable_unprepare(res->iface_clk); +err_assert_ahb: + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + + return ret; +} + +static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->aux = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux)) + return PTR_ERR(res->aux); + + res->master_bus = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_bus)) + return PTR_ERR(res->master_bus); + + res->slave_bus = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_bus)) + return PTR_ERR(res->slave_bus); + + res->core = devm_reset_control_get_exclusive(dev, "core"); + return PTR_ERR_OR_ZERO(res->core); +} + +static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; + + reset_control_assert(res->core); + clk_disable_unprepare(res->slave_bus); + clk_disable_unprepare(res->master_bus); + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->aux); + regulator_disable(res->vdda); +} + +static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + ret = reset_control_deassert(res->core); + if (ret) { + dev_err(dev, "cannot deassert core reset\n"); + return ret; + } + + ret = clk_prepare_enable(res->aux); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_res; + } + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_aux; + } + + ret = clk_prepare_enable(res->master_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable master_bus clock\n"); + goto err_iface; + } + + ret = clk_prepare_enable(res->slave_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable slave_bus clock\n"); + goto err_master; + } + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + goto err_slave; + } + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + } + + return 0; +err_slave: + clk_disable_unprepare(res->slave_bus); +err_master: + clk_disable_unprepare(res->master_bus); +err_iface: + clk_disable_unprepare(res->iface); +err_aux: + clk_disable_unprepare(res->aux); +err_res: + reset_control_assert(res->core); + + return ret; +} + +static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->parf + PCIE20_PARF_LTSSM); + val |= BIT(8); + writel(val, pcie->parf + PCIE20_PARF_LTSSM); +} + +static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + res->supplies[0].supply = "vdda"; + res->supplies[1].supply = "vddpe-3v3"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), + res->supplies); + if (ret) + return ret; + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->cfg_clk = devm_clk_get(dev, "cfg"); + if (IS_ERR(res->cfg_clk)) + return PTR_ERR(res->cfg_clk); + + res->master_clk = devm_clk_get(dev, "bus_master"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "bus_slave"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->pipe_clk = devm_clk_get(dev, "pipe"); + return PTR_ERR_OR_ZERO(res->pipe_clk); +} + +static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + + clk_disable_unprepare(res->slave_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->cfg_clk); + clk_disable_unprepare(res->aux_clk); + + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); +} + +static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + + clk_disable_unprepare(res->pipe_clk); +} + +static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); + if (ret < 0) { + dev_err(dev, "cannot enable regulators\n"); + return ret; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_aux_clk; + } + + ret = clk_prepare_enable(res->cfg_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable cfg clock\n"); + goto err_cfg_clk; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable master clock\n"); + goto err_master_clk; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable slave clock\n"); + goto err_slave_clk; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_slave_clk: + clk_disable_unprepare(res->master_clk); +err_master_clk: + clk_disable_unprepare(res->cfg_clk); +err_cfg_clk: + clk_disable_unprepare(res->aux_clk); + +err_aux_clk: + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + + return ret; +} + +static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + ret = clk_prepare_enable(res->pipe_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable pipe clock\n"); + return ret; + } + + return 0; +} + +static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->master_clk = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); + if (IS_ERR(res->axi_m_reset)) + return PTR_ERR(res->axi_m_reset); + + res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); + if (IS_ERR(res->axi_s_reset)) + return PTR_ERR(res->axi_s_reset); + + res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, + "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, + "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, + "axi_m_sticky"); + if (IS_ERR(res->axi_m_sticky_reset)) + return PTR_ERR(res->axi_m_sticky_reset); + + res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, + "pipe_sticky"); + if (IS_ERR(res->pipe_sticky_reset)) + return PTR_ERR(res->pipe_sticky_reset); + + res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); + if (IS_ERR(res->pwr_reset)) + return PTR_ERR(res->pwr_reset); + + res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + + return 0; +} + +static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; + + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + reset_control_assert(res->pipe_reset); + reset_control_assert(res->pipe_sticky_reset); + reset_control_assert(res->phy_reset); + reset_control_assert(res->phy_ahb_reset); + reset_control_assert(res->axi_m_sticky_reset); + reset_control_assert(res->pwr_reset); + reset_control_assert(res->ahb_reset); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->slave_clk); +} + +static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = reset_control_assert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot assert axi master reset\n"); + return ret; + } + + ret = reset_control_assert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot assert axi slave reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot assert pipe reset\n"); + return ret; + } + + ret = reset_control_assert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert pipe sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot assert phy reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot assert phy ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert axi master sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot assert power reset\n"); + return ret; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert phy ahb reset\n"); + return ret; + } + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + goto err_rst_phy; + } + + ret = reset_control_deassert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe reset\n"); + goto err_rst_pipe; + } + + ret = reset_control_deassert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe sticky reset\n"); + goto err_rst_pipe_sticky; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master reset\n"); + goto err_rst_axi_m; + } + + ret = reset_control_deassert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master sticky reset\n"); + goto err_rst_axi_m_sticky; + } + + ret = reset_control_deassert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot deassert axi slave reset\n"); + goto err_rst_axi_s; + } + + ret = reset_control_deassert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot deassert power reset\n"); + goto err_rst_pwr; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_rst_ahb; + } + + usleep_range(10000, 12000); + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_axi_s; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_clk_axi_s: + clk_disable_unprepare(res->master_clk); +err_clk_axi_m: + clk_disable_unprepare(res->aux_clk); +err_clk_aux: + reset_control_assert(res->ahb_reset); +err_rst_ahb: + reset_control_assert(res->pwr_reset); +err_rst_pwr: + reset_control_assert(res->axi_s_reset); +err_rst_axi_s: + reset_control_assert(res->axi_m_sticky_reset); +err_rst_axi_m_sticky: + reset_control_assert(res->axi_m_reset); +err_rst_axi_m: + reset_control_assert(res->pipe_sticky_reset); +err_rst_pipe_sticky: + reset_control_assert(res->pipe_reset); +err_rst_pipe: + reset_control_assert(res->phy_reset); +err_rst_phy: + reset_control_assert(res->phy_ahb_reset); + return ret; +} + +static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int i; + const char *rst_names[] = { "axi_m", "axi_s", "pipe", + "axi_m_sticky", "sticky", + "ahb", "sleep", }; + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->axi_m_clk = devm_clk_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_clk)) + return PTR_ERR(res->axi_m_clk); + + res->axi_s_clk = devm_clk_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_clk)) + return PTR_ERR(res->axi_s_clk); + + res->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(res->ahb_clk)) + return PTR_ERR(res->ahb_clk); + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + for (i = 0; i < ARRAY_SIZE(rst_names); i++) { + res->rst[i] = devm_reset_control_get(dev, rst_names[i]); + if (IS_ERR(res->rst[i])) + return PTR_ERR(res->rst[i]); + } + + return 0; +} + +static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->axi_m_clk); + clk_disable_unprepare(res->axi_s_clk); + clk_disable_unprepare(res->ahb_clk); + clk_disable_unprepare(res->aux_clk); +} + +static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int i, ret; + u32 val; + + for (i = 0; i < ARRAY_SIZE(res->rst); i++) { + ret = reset_control_assert(res->rst[i]); + if (ret) { + dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); + return ret; + } + } + + usleep_range(2000, 2500); + + for (i = 0; i < ARRAY_SIZE(res->rst); i++) { + ret = reset_control_deassert(res->rst[i]); + if (ret) { + dev_err(dev, "reset #%d deassert failed (%d)\n", i, + ret); + return ret; + } + } + + /* + * Don't have a way to see if the reset has completed. + * Wait for some time. + */ + usleep_range(2000, 2500); + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_iface; + } + + ret = clk_prepare_enable(res->axi_m_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->axi_s_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable axi slave clock\n"); + goto err_clk_axi_s; + } + + ret = clk_prepare_enable(res->ahb_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ahb clock\n"); + goto err_clk_ahb; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + writel(SLV_ADDR_SPACE_SZ, + pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); + + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS + | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | + AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, + pcie->parf + PCIE20_PARF_SYS_CTRL); + writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + + writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); + writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); + writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); + + val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); + val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; + writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); + + writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + + PCIE20_DEVICE_CONTROL2_STATUS2); + + return 0; + +err_clk_aux: + clk_disable_unprepare(res->ahb_clk); +err_clk_ahb: + clk_disable_unprepare(res->axi_s_clk); +err_clk_axi_s: + clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: + clk_disable_unprepare(res->iface); +err_clk_iface: + /* + * Not checking for failure, will anyway return + * the original failure in 'ret'. + */ + for (i = 0; i < ARRAY_SIZE(res->rst); i++) + reset_control_assert(res->rst[i]); + + return ret; +} + +static int qcom_pcie_link_up(struct dw_pcie *pci) +{ + u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); +} + +static int qcom_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct qcom_pcie *pcie = to_qcom_pcie(pci); + int ret; + + pm_runtime_get_sync(pci->dev); + qcom_ep_reset_assert(pcie); + + ret = pcie->ops->init(pcie); + if (ret) + return ret; + + ret = phy_power_on(pcie->phy); + if (ret) + goto err_deinit; + + if (pcie->ops->post_init) { + ret = pcie->ops->post_init(pcie); + if (ret) + goto err_disable_phy; + } + + dw_pcie_setup_rc(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + qcom_ep_reset_deassert(pcie); + + ret = qcom_pcie_establish_link(pcie); + if (ret) + goto err; + + return 0; +err: + qcom_ep_reset_assert(pcie); + if (pcie->ops->post_deinit) + pcie->ops->post_deinit(pcie); +err_disable_phy: + phy_power_off(pcie->phy); +err_deinit: + pcie->ops->deinit(pcie); + pm_runtime_put(pci->dev); + + return ret; +} + +static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + /* the device class is not reported correctly from the register */ + if (where == PCI_CLASS_REVISION && size == 4) { + *val = readl(pci->dbi_base + PCI_CLASS_REVISION); + *val &= 0xff; /* keep revision id */ + *val |= PCI_CLASS_BRIDGE_PCI << 16; + return PCIBIOS_SUCCESSFUL; + } + + return dw_pcie_read(pci->dbi_base + where, size, val); +} + +static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { + .host_init = qcom_pcie_host_init, + .rd_own_conf = qcom_pcie_rd_own_conf, +}; + +/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ +static const struct qcom_pcie_ops ops_2_1_0 = { + .get_resources = qcom_pcie_get_resources_2_1_0, + .init = qcom_pcie_init_2_1_0, + .deinit = qcom_pcie_deinit_2_1_0, + .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, +}; + +/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ +static const struct qcom_pcie_ops ops_1_0_0 = { + .get_resources = qcom_pcie_get_resources_1_0_0, + .init = qcom_pcie_init_1_0_0, + .deinit = qcom_pcie_deinit_1_0_0, + .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, +}; + +/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ +static const struct qcom_pcie_ops ops_2_3_2 = { + .get_resources = qcom_pcie_get_resources_2_3_2, + .init = qcom_pcie_init_2_3_2, + .post_init = qcom_pcie_post_init_2_3_2, + .deinit = qcom_pcie_deinit_2_3_2, + .post_deinit = qcom_pcie_post_deinit_2_3_2, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ +static const struct qcom_pcie_ops ops_2_4_0 = { + .get_resources = qcom_pcie_get_resources_2_4_0, + .init = qcom_pcie_init_2_4_0, + .deinit = qcom_pcie_deinit_2_4_0, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ +static const struct qcom_pcie_ops ops_2_3_3 = { + .get_resources = qcom_pcie_get_resources_2_3_3, + .init = qcom_pcie_init_2_3_3, + .deinit = qcom_pcie_deinit_2_3_3, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = qcom_pcie_link_up, +}; + +static int qcom_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct pcie_port *pp; + struct dw_pcie *pci; + struct qcom_pcie *pcie; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pm_runtime_enable(dev); + pci->dev = dev; + pci->ops = &dw_pcie_ops; + pp = &pci->pp; + + pcie->pci = pci; + + pcie->ops = of_device_get_match_data(dev); + + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); + if (IS_ERR(pcie->reset)) + return PTR_ERR(pcie->reset); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); + pcie->parf = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->parf)) + return PTR_ERR(pcie->parf); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + pcie->elbi = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->elbi)) + return PTR_ERR(pcie->elbi); + + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + + ret = pcie->ops->get_resources(pcie); + if (ret) + return ret; + + pp->root_bus_nr = -1; + pp->ops = &qcom_pcie_dw_ops; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) + return pp->msi_irq; + } + + ret = phy_init(pcie->phy); + if (ret) { + pm_runtime_disable(&pdev->dev); + return ret; + } + + platform_set_drvdata(pdev, pcie); + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "cannot initialize host\n"); + pm_runtime_disable(&pdev->dev); + return ret; + } + + return 0; +} + +static const struct of_device_id qcom_pcie_match[] = { + { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, + { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, + { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, + { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, + { } +}; + +static struct platform_driver qcom_pcie_driver = { + .probe = qcom_pcie_probe, + .driver = { + .name = "qcom-pcie", + .suppress_bind_attrs = true, + .of_match_table = qcom_pcie_match, + }, +}; +builtin_platform_driver(qcom_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c new file mode 100644 index 000000000000..ecb58f7b7566 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs + * + * SPEAr13xx PCIe Glue Layer Source Code + * + * Copyright (C) 2010-2014 ST Microelectronics + * Pratyush Anand + * Mohit Kumar + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +struct spear13xx_pcie { + struct dw_pcie *pci; + void __iomem *app_base; + struct phy *phy; + struct clk *clk; + bool is_gen1; +}; + +struct pcie_app_reg { + u32 app_ctrl_0; /* cr0 */ + u32 app_ctrl_1; /* cr1 */ + u32 app_status_0; /* cr2 */ + u32 app_status_1; /* cr3 */ + u32 msg_status; /* cr4 */ + u32 msg_payload; /* cr5 */ + u32 int_sts; /* cr6 */ + u32 int_clr; /* cr7 */ + u32 int_mask; /* cr8 */ + u32 mst_bmisc; /* cr9 */ + u32 phy_ctrl; /* cr10 */ + u32 phy_status; /* cr11 */ + u32 cxpl_debug_info_0; /* cr12 */ + u32 cxpl_debug_info_1; /* cr13 */ + u32 ven_msg_ctrl_0; /* cr14 */ + u32 ven_msg_ctrl_1; /* cr15 */ + u32 ven_msg_data_0; /* cr16 */ + u32 ven_msg_data_1; /* cr17 */ + u32 ven_msi_0; /* cr18 */ + u32 ven_msi_1; /* cr19 */ + u32 mst_rmisc; /* cr20 */ +}; + +/* CR0 ID */ +#define APP_LTSSM_ENABLE_ID 3 +#define DEVICE_TYPE_RC (4 << 25) +#define MISCTRL_EN_ID 30 +#define REG_TRANSLATION_ENABLE 31 + +/* CR3 ID */ +#define XMLH_LINK_UP (1 << 6) + +/* CR6 */ +#define MSI_CTRL_INT (1 << 26) + +#define EXP_CAP_ID_OFFSET 0x70 + +#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev) + +static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) +{ + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + u32 val; + u32 exp_cap_off = EXP_CAP_ID_OFFSET; + + if (dw_pcie_link_up(pci)) { + dev_err(pci->dev, "link already up\n"); + return 0; + } + + dw_pcie_setup_rc(pp); + + /* + * this controller support only 128 bytes read size, however its + * default value in capability register is 512 bytes. So force + * it to 128 here. + */ + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); + val &= ~PCI_EXP_DEVCTL_READRQ; + dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); + + dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A); + dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); + + /* + * if is_gen1 is set then handle it, so that some buggy card + * also works + */ + if (spear13xx_pcie->is_gen1) { + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, + 4, &val); + if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCAP, 4, val); + } + + dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, + 2, &val); + if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_write(pci->dbi_base + exp_cap_off + + PCI_EXP_LNKCTL2, 2, val); + } + } + + /* enable ltssm */ + writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) + | (1 << APP_LTSSM_ENABLE_ID) + | ((u32)1 << REG_TRANSLATION_ENABLE), + &app_reg->app_ctrl_0); + + return dw_pcie_wait_for_link(pci); +} + +static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) +{ + struct spear13xx_pcie *spear13xx_pcie = arg; + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + unsigned int status; + + status = readl(&app_reg->int_sts); + + if (status & MSI_CTRL_INT) { + BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); + dw_handle_msi_irq(pp); + } + + writel(status, &app_reg->int_clr); + + return IRQ_HANDLED; +} + +static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) +{ + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + + /* Enable MSI interrupt */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + dw_pcie_msi_init(pp); + writel(readl(&app_reg->int_mask) | + MSI_CTRL_INT, &app_reg->int_mask); + } +} + +static int spear13xx_pcie_link_up(struct dw_pcie *pci) +{ + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + + if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) + return 1; + + return 0; +} + +static int spear13xx_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); + + spear13xx_pcie_establish_link(spear13xx_pcie); + spear13xx_pcie_enable_interrupts(spear13xx_pcie); + + return 0; +} + +static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { + .host_init = spear13xx_pcie_host_init, +}; + +static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = spear13xx_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 0); + if (pp->irq < 0) { + dev_err(dev, "failed to get irq\n"); + return pp->irq; + } + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, + "spear1340-pcie", spear13xx_pcie); + if (ret) { + dev_err(dev, "failed to request irq %d\n", pp->irq); + return ret; + } + + pp->root_bus_nr = -1; + pp->ops = &spear13xx_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .link_up = spear13xx_pcie_link_up, +}; + +static int spear13xx_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct spear13xx_pcie *spear13xx_pcie; + struct device_node *np = dev->of_node; + struct resource *dbi_base; + int ret; + + spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); + if (!spear13xx_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &dw_pcie_ops; + + spear13xx_pcie->pci = pci; + + spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(spear13xx_pcie->phy)) { + ret = PTR_ERR(spear13xx_pcie->phy); + if (ret == -EPROBE_DEFER) + dev_info(dev, "probe deferred\n"); + else + dev_err(dev, "couldn't get pcie-phy\n"); + return ret; + } + + phy_init(spear13xx_pcie->phy); + + spear13xx_pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(spear13xx_pcie->clk)) { + dev_err(dev, "couldn't get clk for pcie\n"); + return PTR_ERR(spear13xx_pcie->clk); + } + ret = clk_prepare_enable(spear13xx_pcie->clk); + if (ret) { + dev_err(dev, "couldn't enable clk for pcie\n"); + return ret; + } + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) { + dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); + ret = PTR_ERR(pci->dbi_base); + goto fail_clk; + } + spear13xx_pcie->app_base = pci->dbi_base + 0x2000; + + if (of_property_read_bool(np, "st,pcie-is-gen1")) + spear13xx_pcie->is_gen1 = true; + + platform_set_drvdata(pdev, spear13xx_pcie); + + ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); + if (ret < 0) + goto fail_clk; + + return 0; + +fail_clk: + clk_disable_unprepare(spear13xx_pcie->clk); + + return ret; +} + +static const struct of_device_id spear13xx_pcie_of_match[] = { + { .compatible = "st,spear1340-pcie", }, + {}, +}; + +static struct platform_driver spear13xx_pcie_driver = { + .probe = spear13xx_pcie_probe, + .driver = { + .name = "spear-pcie", + .of_match_table = of_match_ptr(spear13xx_pcie_of_match), + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(spear13xx_pcie_driver); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c new file mode 100644 index 000000000000..d3172d5d3d35 --- /dev/null +++ b/drivers/pci/controller/pci-aardvark.c @@ -0,0 +1,978 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Aardvark PCIe controller, used on Marvell Armada + * 3700. + * + * Copyright (C) 2016 Marvell + * + * Author: Hezi Shahmoon + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* PCIe core registers */ +#define PCIE_CORE_CMD_STATUS_REG 0x4 +#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) +#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) +#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) +#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8 +#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4) +#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 +#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2 +#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 +#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) +#define PCIE_CORE_LINK_TRAINING BIT(5) +#define PCIE_CORE_LINK_WIDTH_SHIFT 20 +#define PCIE_CORE_ERR_CAPCTL_REG 0x118 +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) + +/* PIO registers base address and register offsets */ +#define PIO_BASE_ADDR 0x4000 +#define PIO_CTRL (PIO_BASE_ADDR + 0x0) +#define PIO_CTRL_TYPE_MASK GENMASK(3, 0) +#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) +#define PIO_STAT (PIO_BASE_ADDR + 0x4) +#define PIO_COMPLETION_STATUS_SHIFT 7 +#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) +#define PIO_COMPLETION_STATUS_OK 0 +#define PIO_COMPLETION_STATUS_UR 1 +#define PIO_COMPLETION_STATUS_CRS 2 +#define PIO_COMPLETION_STATUS_CA 4 +#define PIO_NON_POSTED_REQ BIT(0) +#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) +#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) +#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) +#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) +#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) +#define PIO_START (PIO_BASE_ADDR + 0x1c) +#define PIO_ISR (PIO_BASE_ADDR + 0x20) +#define PIO_ISRM (PIO_BASE_ADDR + 0x24) + +/* Aardvark Control registers */ +#define CONTROL_BASE_ADDR 0x4800 +#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) +#define PCIE_GEN_SEL_MSK 0x3 +#define PCIE_GEN_SEL_SHIFT 0x0 +#define SPEED_GEN_1 0 +#define SPEED_GEN_2 1 +#define SPEED_GEN_3 2 +#define IS_RC_MSK 1 +#define IS_RC_SHIFT 2 +#define LANE_CNT_MSK 0x18 +#define LANE_CNT_SHIFT 0x3 +#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) +#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) +#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) +#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) +#define LINK_TRAINING_EN BIT(6) +#define LEGACY_INTA BIT(28) +#define LEGACY_INTB BIT(29) +#define LEGACY_INTC BIT(30) +#define LEGACY_INTD BIT(31) +#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) +#define HOT_RESET_GEN BIT(0) +#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) +#define PCIE_CORE_CTRL2_RESERVED 0x7 +#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) +#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) +#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) +#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) +#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) +#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) +#define PCIE_ISR0_MSI_INT_PENDING BIT(24) +#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) +#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) +#define PCIE_ISR0_ALL_MASK GENMASK(26, 0) +#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) +#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) +#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) +#define PCIE_ISR1_FLUSH BIT(5) +#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) +#define PCIE_ISR1_ALL_MASK GENMASK(11, 4) +#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) +#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) +#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) +#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) +#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) + +/* PCIe window configuration */ +#define OB_WIN_BASE_ADDR 0x4c00 +#define OB_WIN_BLOCK_SIZE 0x20 +#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ + OB_WIN_BLOCK_SIZE * (win) + \ + (offset)) +#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) +#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) +#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) +#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) +#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) +#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) +#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) + +/* PCIe window types */ +#define OB_PCIE_MEM 0x0 +#define OB_PCIE_IO 0x4 + +/* LMI registers base address and register offsets */ +#define LMI_BASE_ADDR 0x6000 +#define CFG_REG (LMI_BASE_ADDR + 0x0) +#define LTSSM_SHIFT 24 +#define LTSSM_MASK 0x3f +#define LTSSM_L0 0x10 +#define RC_BAR_CONFIG 0x300 + +/* PCIe core controller registers */ +#define CTRL_CORE_BASE_ADDR 0x18000 +#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) +#define CTRL_MODE_SHIFT 0x0 +#define CTRL_MODE_MASK 0x1 +#define PCIE_CORE_MODE_DIRECT 0x0 +#define PCIE_CORE_MODE_COMMAND 0x1 + +/* PCIe Central Interrupts Registers */ +#define CENTRAL_INT_BASE_ADDR 0x1b000 +#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) +#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) +#define PCIE_IRQ_CMDQ_INT BIT(0) +#define PCIE_IRQ_MSI_STATUS_INT BIT(1) +#define PCIE_IRQ_CMD_SENT_DONE BIT(3) +#define PCIE_IRQ_DMA_INT BIT(4) +#define PCIE_IRQ_IB_DXFERDONE BIT(5) +#define PCIE_IRQ_OB_DXFERDONE BIT(6) +#define PCIE_IRQ_OB_RXFERDONE BIT(7) +#define PCIE_IRQ_COMPQ_INT BIT(12) +#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) +#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) +#define PCIE_IRQ_CORE_INT BIT(16) +#define PCIE_IRQ_CORE_INT_PIO BIT(17) +#define PCIE_IRQ_DPMU_INT BIT(18) +#define PCIE_IRQ_PCIE_MIS_INT BIT(19) +#define PCIE_IRQ_MSI_INT1_DET BIT(20) +#define PCIE_IRQ_MSI_INT2_DET BIT(21) +#define PCIE_IRQ_RC_DBELL_DET BIT(22) +#define PCIE_IRQ_EP_STATUS BIT(23) +#define PCIE_IRQ_ALL_MASK 0xfff0fb +#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT + +/* Transaction types */ +#define PCIE_CONFIG_RD_TYPE0 0x8 +#define PCIE_CONFIG_RD_TYPE1 0x9 +#define PCIE_CONFIG_WR_TYPE0 0xa +#define PCIE_CONFIG_WR_TYPE1 0xb + +#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) +#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) +#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) +#define PCIE_CONF_REG(reg) ((reg) & 0xffc) +#define PCIE_CONF_ADDR(bus, devfn, where) \ + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) + +#define PIO_TIMEOUT_MS 1 + +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +#define MSI_IRQ_NUM 32 + +struct advk_pcie { + struct platform_device *pdev; + void __iomem *base; + struct list_head resources; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; + struct irq_chip msi_bottom_irq_chip; + struct irq_chip msi_irq_chip; + struct msi_domain_info msi_domain_info; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); + struct mutex msi_used_lock; + u16 msi_msg; + int root_bus_nr; +}; + +static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) +{ + writel(val, pcie->base + reg); +} + +static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) +{ + return readl(pcie->base + reg); +} + +static int advk_pcie_link_up(struct advk_pcie *pcie) +{ + u32 val, ltssm_state; + + val = advk_readl(pcie, CFG_REG); + ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; + return ltssm_state >= LTSSM_L0; +} + +static int advk_pcie_wait_for_link(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (advk_pcie_link_up(pcie)) { + dev_info(dev, "link up\n"); + return 0; + } + + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + dev_err(dev, "link never came up\n"); + return -ETIMEDOUT; +} + +/* + * Set PCIe address window register which could be used for memory + * mapping. + */ +static void advk_pcie_set_ob_win(struct advk_pcie *pcie, + u32 win_num, u32 match_ms, + u32 match_ls, u32 mask_ms, + u32 mask_ls, u32 remap_ms, + u32 remap_ls, u32 action) +{ + advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num)); + advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num)); + advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num)); + advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num)); + advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num)); + advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num)); + advk_writel(pcie, action, OB_WIN_ACTIONS(win_num)); + advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num)); +} + +static void advk_pcie_setup_hw(struct advk_pcie *pcie) +{ + u32 reg; + int i; + + /* Point PCIe unit MBUS decode windows to DRAM space */ + for (i = 0; i < 8; i++) + advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0); + + /* Set to Direct mode */ + reg = advk_readl(pcie, CTRL_CONFIG_REG); + reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); + reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); + advk_writel(pcie, reg, CTRL_CONFIG_REG); + + /* Set PCI global control register to RC mode */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg |= (IS_RC_MSK << IS_RC_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Set Advanced Error Capabilities and Control PF0 register */ + reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | + PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; + advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG); + + /* Set PCIe Device Control and Status 1 PF0 register */ + reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | + (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | + PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | + (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ << + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); + + /* Program PCIe Control 2 to disable strict ordering */ + reg = PCIE_CORE_CTRL2_RESERVED | + PCIE_CORE_CTRL2_TD_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Set GEN2 */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg &= ~PCIE_GEN_SEL_MSK; + reg |= SPEED_GEN_2; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Set lane X1 */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg &= ~LANE_CNT_MSK; + reg |= LANE_COUNT_1; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Enable link training */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg |= LINK_TRAINING_EN; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Enable MSI */ + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_MSI_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Clear all interrupts */ + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + + /* Disable All ISR0/1 Sources */ + reg = PCIE_ISR0_ALL_MASK; + reg &= ~PCIE_ISR0_MSI_INT_PENDING; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); + + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + + /* Unmask all MSI's */ + advk_writel(pcie, 0, PCIE_MSI_MASK_REG); + + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); + + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Bypass the address window mapping for PIO */ + reg = advk_readl(pcie, PIO_CTRL); + reg |= PIO_CTRL_ADDR_WIN_DISABLE; + advk_writel(pcie, reg, PIO_CTRL); + + /* Start link training */ + reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG); + reg |= PCIE_CORE_LINK_TRAINING; + advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); + + advk_pcie_wait_for_link(pcie); + + reg = PCIE_CORE_LINK_L0S_ENTRY | + (1 << PCIE_CORE_LINK_WIDTH_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); + + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | + PCIE_CORE_CMD_IO_ACCESS_EN | + PCIE_CORE_CMD_MEM_IO_REQ_EN; + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); +} + +static void advk_pcie_check_pio_status(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + u32 reg; + unsigned int status; + char *strcomp_status, *str_posted; + + reg = advk_readl(pcie, PIO_STAT); + status = (reg & PIO_COMPLETION_STATUS_MASK) >> + PIO_COMPLETION_STATUS_SHIFT; + + if (!status) + return; + + switch (status) { + case PIO_COMPLETION_STATUS_UR: + strcomp_status = "UR"; + break; + case PIO_COMPLETION_STATUS_CRS: + strcomp_status = "CRS"; + break; + case PIO_COMPLETION_STATUS_CA: + strcomp_status = "CA"; + break; + default: + strcomp_status = "Unknown"; + break; + } + + if (reg & PIO_NON_POSTED_REQ) + str_posted = "Non-posted"; + else + str_posted = "Posted"; + + dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n", + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); +} + +static int advk_pcie_wait_pio(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); + + while (time_before(jiffies, timeout)) { + u32 start, isr; + + start = advk_readl(pcie, PIO_START); + isr = advk_readl(pcie, PIO_ISR); + if (!start && isr) + return 0; + } + + dev_err(dev, "config read/write timed out\n"); + return -ETIMEDOUT; +} + +static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 *val) +{ + struct advk_pcie *pcie = bus->sysdata; + u32 reg; + int ret; + + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Start PIO */ + advk_writel(pcie, 0, PIO_START); + advk_writel(pcie, 1, PIO_ISR); + + /* Program the control register */ + reg = advk_readl(pcie, PIO_CTRL); + reg &= ~PIO_CTRL_TYPE_MASK; + if (bus->number == pcie->root_bus_nr) + reg |= PCIE_CONFIG_RD_TYPE0; + else + reg |= PCIE_CONFIG_RD_TYPE1; + advk_writel(pcie, reg, PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_CONF_ADDR(bus->number, devfn, where); + advk_writel(pcie, reg, PIO_ADDR_LS); + advk_writel(pcie, 0, PIO_ADDR_MS); + + /* Program the data strobe */ + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); + + /* Start the transfer */ + advk_writel(pcie, 1, PIO_START); + + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED; + + advk_pcie_check_pio_status(pcie); + + /* Get the read result */ + *val = advk_readl(pcie, PIO_RD_DATA); + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return PCIBIOS_SUCCESSFUL; +} + +static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct advk_pcie *pcie = bus->sysdata; + u32 reg; + u32 data_strobe = 0x0; + int offset; + int ret; + + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (where % size) + return PCIBIOS_SET_FAILED; + + /* Start PIO */ + advk_writel(pcie, 0, PIO_START); + advk_writel(pcie, 1, PIO_ISR); + + /* Program the control register */ + reg = advk_readl(pcie, PIO_CTRL); + reg &= ~PIO_CTRL_TYPE_MASK; + if (bus->number == pcie->root_bus_nr) + reg |= PCIE_CONFIG_WR_TYPE0; + else + reg |= PCIE_CONFIG_WR_TYPE1; + advk_writel(pcie, reg, PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_CONF_ADDR(bus->number, devfn, where); + advk_writel(pcie, reg, PIO_ADDR_LS); + advk_writel(pcie, 0, PIO_ADDR_MS); + + /* Calculate the write strobe */ + offset = where & 0x3; + reg = val << (8 * offset); + data_strobe = GENMASK(size - 1, 0) << offset; + + /* Program the data register */ + advk_writel(pcie, reg, PIO_WR_DATA); + + /* Program the data strobe */ + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); + + /* Start the transfer */ + advk_writel(pcie, 1, PIO_START); + + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED; + + advk_pcie_check_pio_status(pcie); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops advk_pcie_ops = { + .read = advk_pcie_rd_conf, + .write = advk_pcie_wr_conf, +}; + +static void advk_msi_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); + phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); + + msg->address_lo = lower_32_bits(msi_msg); + msg->address_hi = upper_32_bits(msi_msg); + msg->data = data->irq; +} + +static int advk_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static int advk_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct advk_pcie *pcie = domain->host_data; + int hwirq, i; + + mutex_lock(&pcie->msi_used_lock); + hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, + 0, nr_irqs, 0); + if (hwirq >= MSI_IRQ_NUM) { + mutex_unlock(&pcie->msi_used_lock); + return -ENOSPC; + } + + bitmap_set(pcie->msi_used, hwirq, nr_irqs); + mutex_unlock(&pcie->msi_used_lock); + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &pcie->msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + + return hwirq; +} + +static void advk_msi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct advk_pcie *pcie = domain->host_data; + + mutex_lock(&pcie->msi_used_lock); + bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + mutex_unlock(&pcie->msi_used_lock); +} + +static const struct irq_domain_ops advk_msi_domain_ops = { + .alloc = advk_msi_irq_domain_alloc, + .free = advk_msi_irq_domain_free, +}; + +static void advk_pcie_irq_mask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 mask; + + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask |= PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); +} + +static void advk_pcie_irq_unmask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 mask; + + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); +} + +static int advk_pcie_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct advk_pcie *pcie = h->host_data; + + advk_pcie_irq_mask(irq_get_irq_data(virq)); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pcie->irq_chip, + handle_level_irq); + irq_set_chip_data(virq, pcie); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_irq_domain_ops = { + .map = advk_pcie_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct irq_chip *bottom_ic, *msi_ic; + struct msi_domain_info *msi_di; + phys_addr_t msi_msg_phys; + + mutex_init(&pcie->msi_used_lock); + + bottom_ic = &pcie->msi_bottom_irq_chip; + + bottom_ic->name = "MSI"; + bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; + bottom_ic->irq_set_affinity = advk_msi_set_affinity; + + msi_ic = &pcie->msi_irq_chip; + msi_ic->name = "advk-MSI"; + + msi_di = &pcie->msi_domain_info; + msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI; + msi_di->chip = msi_ic; + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + + advk_writel(pcie, lower_32_bits(msi_msg_phys), + PCIE_MSI_ADDR_LOW_REG); + advk_writel(pcie, upper_32_bits(msi_msg_phys), + PCIE_MSI_ADDR_HIGH_REG); + + pcie->msi_inner_domain = + irq_domain_add_linear(NULL, MSI_IRQ_NUM, + &advk_msi_domain_ops, pcie); + if (!pcie->msi_inner_domain) + return -ENOMEM; + + pcie->msi_domain = + pci_msi_create_irq_domain(of_node_to_fwnode(node), + msi_di, pcie->msi_inner_domain); + if (!pcie->msi_domain) { + irq_domain_remove(pcie->msi_inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) +{ + irq_domain_remove(pcie->msi_domain); + irq_domain_remove(pcie->msi_inner_domain); +} + +static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + struct irq_chip *irq_chip; + + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + irq_chip = &pcie->irq_chip; + + irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", + dev_name(dev)); + if (!irq_chip->name) { + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + irq_chip->irq_mask = advk_pcie_irq_mask; + irq_chip->irq_mask_ack = advk_pcie_irq_mask; + irq_chip->irq_unmask = advk_pcie_irq_unmask; + + pcie->irq_domain = + irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &advk_pcie_irq_domain_ops, pcie); + if (!pcie->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + return 0; +} + +static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) +{ + irq_domain_remove(pcie->irq_domain); +} + +static void advk_pcie_handle_msi(struct advk_pcie *pcie) +{ + u32 msi_val, msi_mask, msi_status, msi_idx; + u16 msi_data; + + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); + msi_status = msi_val & ~msi_mask; + + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { + if (!(BIT(msi_idx) & msi_status)) + continue; + + advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); + msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; + generic_handle_irq(msi_data); + } + + advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, + PCIE_ISR0_REG); +} + +static void advk_pcie_handle_int(struct advk_pcie *pcie) +{ + u32 isr0_val, isr0_mask, isr0_status; + u32 isr1_val, isr1_mask, isr1_status; + int i, virq; + + isr0_val = advk_readl(pcie, PCIE_ISR0_REG); + isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); + + isr1_val = advk_readl(pcie, PCIE_ISR1_REG); + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + + if (!isr0_status && !isr1_status) { + advk_writel(pcie, isr0_val, PCIE_ISR0_REG); + advk_writel(pcie, isr1_val, PCIE_ISR1_REG); + return; + } + + /* Process MSI interrupts */ + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) + advk_pcie_handle_msi(pcie); + + /* Process legacy interrupts */ + for (i = 0; i < PCI_NUM_INTX; i++) { + if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) + continue; + + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), + PCIE_ISR1_REG); + + virq = irq_find_mapping(pcie->irq_domain, i); + generic_handle_irq(virq); + } +} + +static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) +{ + struct advk_pcie *pcie = arg; + u32 status; + + status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); + if (!(status & PCIE_IRQ_CORE_INT)) + return IRQ_NONE; + + advk_pcie_handle_int(pcie); + + /* Clear interrupt */ + advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); + + return IRQ_HANDLED; +} + +static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) +{ + int err, res_valid = 0; + struct device *dev = &pcie->pdev->dev; + struct resource_entry *win, *tmp; + resource_size_t iobase; + + INIT_LIST_HEAD(&pcie->resources); + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &pcie->resources, &iobase); + if (err) + return err; + + err = devm_request_pci_bus_resources(dev, &pcie->resources); + if (err) + goto out_release_res; + + resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { + struct resource *res = win->res; + + switch (resource_type(res)) { + case IORESOURCE_IO: + advk_pcie_set_ob_win(pcie, 1, + upper_32_bits(res->start), + lower_32_bits(res->start), + 0, 0xF8000000, 0, + lower_32_bits(res->start), + OB_PCIE_IO); + err = pci_remap_iospace(res, iobase); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, res); + resource_list_destroy_entry(win); + } + break; + case IORESOURCE_MEM: + advk_pcie_set_ob_win(pcie, 0, + upper_32_bits(res->start), + lower_32_bits(res->start), + 0x0, 0xF8000000, 0, + lower_32_bits(res->start), + (2 << 20) | OB_PCIE_MEM); + res_valid |= !(res->flags & IORESOURCE_PREFETCH); + break; + case IORESOURCE_BUS: + pcie->root_bus_nr = res->start; + break; + } + } + + if (!res_valid) { + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + goto out_release_res; + } + + return 0; + +out_release_res: + pci_free_resource_list(&pcie->resources); + return err; +} + +static int advk_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct advk_pcie *pcie; + struct resource *res; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + int ret, irq; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + pcie->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie->base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", + pcie); + if (ret) { + dev_err(dev, "Failed to register interrupt\n"); + return ret; + } + + ret = advk_pcie_parse_request_of_pci_ranges(pcie); + if (ret) { + dev_err(dev, "Failed to parse resources\n"); + return ret; + } + + advk_pcie_setup_hw(pcie); + + ret = advk_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed to initialize irq\n"); + return ret; + } + + ret = advk_pcie_init_msi_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed to initialize irq\n"); + advk_pcie_remove_irq_domain(pcie); + return ret; + } + + list_splice_init(&pcie->resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = 0; + bridge->ops = &advk_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) { + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return ret; + } + + bus = bridge->bus; + + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + return 0; +} + +static const struct of_device_id advk_pcie_of_match_table[] = { + { .compatible = "marvell,armada-3700-pcie", }, + {}, +}; + +static struct platform_driver advk_pcie_driver = { + .driver = { + .name = "advk-pcie", + .of_match_table = advk_pcie_of_match_table, + /* Driver unloading/unbinding currently not supported */ + .suppress_bind_attrs = true, + }, + .probe = advk_pcie_probe, +}; +builtin_platform_driver(advk_pcie_driver); diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c new file mode 100644 index 000000000000..a1ebe9ed441f --- /dev/null +++ b/drivers/pci/controller/pci-ftpci100.c @@ -0,0 +1,619 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for Faraday Technology FTPC100 PCI Controller + * + * Copyright (C) 2017 Linus Walleij + * + * Based on the out-of-tree OpenWRT patch for Cortina Gemini: + * Copyright (C) 2009 Janos Laube + * Copyright (C) 2009 Paulius Zaleckas + * Based on SL2312 PCI controller code + * Storlink (C) 2003 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* + * Special configuration registers directly in the first few words + * in I/O space. + */ +#define PCI_IOSIZE 0x00 +#define PCI_PROT 0x04 /* AHB protection */ +#define PCI_CTRL 0x08 /* PCI control signal */ +#define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */ +#define PCI_CONFIG 0x28 /* PCI configuration command register */ +#define PCI_DATA 0x2C + +#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ +#define FARADAY_PCI_PMC 0x40 /* Power management control */ +#define FARADAY_PCI_PMCSR 0x44 /* Power management status */ +#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ +#define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */ +#define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */ +#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ +#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ + +#define PCI_STATUS_66MHZ_CAPABLE BIT(21) + +/* Bits 31..28 gives INTD..INTA status */ +#define PCI_CTRL2_INTSTS_SHIFT 28 +#define PCI_CTRL2_INTMASK_CMDERR BIT(27) +#define PCI_CTRL2_INTMASK_PARERR BIT(26) +/* Bits 25..22 masks INTD..INTA */ +#define PCI_CTRL2_INTMASK_SHIFT 22 +#define PCI_CTRL2_INTMASK_MABRT_RX BIT(21) +#define PCI_CTRL2_INTMASK_TABRT_RX BIT(20) +#define PCI_CTRL2_INTMASK_TABRT_TX BIT(19) +#define PCI_CTRL2_INTMASK_RETRY4 BIT(18) +#define PCI_CTRL2_INTMASK_SERR_RX BIT(17) +#define PCI_CTRL2_INTMASK_PERR_RX BIT(16) +/* Bit 15 reserved */ +#define PCI_CTRL2_MSTPRI_REQ6 BIT(14) +#define PCI_CTRL2_MSTPRI_REQ5 BIT(13) +#define PCI_CTRL2_MSTPRI_REQ4 BIT(12) +#define PCI_CTRL2_MSTPRI_REQ3 BIT(11) +#define PCI_CTRL2_MSTPRI_REQ2 BIT(10) +#define PCI_CTRL2_MSTPRI_REQ1 BIT(9) +#define PCI_CTRL2_MSTPRI_REQ0 BIT(8) +/* Bits 7..4 reserved */ +/* Bits 3..0 TRDYW */ + +/* + * Memory configs: + * Bit 31..20 defines the PCI side memory base + * Bit 19..16 (4 bits) defines the size per below + */ +#define FARADAY_PCI_MEMBASE_MASK 0xfff00000 +#define FARADAY_PCI_MEMSIZE_1MB 0x0 +#define FARADAY_PCI_MEMSIZE_2MB 0x1 +#define FARADAY_PCI_MEMSIZE_4MB 0x2 +#define FARADAY_PCI_MEMSIZE_8MB 0x3 +#define FARADAY_PCI_MEMSIZE_16MB 0x4 +#define FARADAY_PCI_MEMSIZE_32MB 0x5 +#define FARADAY_PCI_MEMSIZE_64MB 0x6 +#define FARADAY_PCI_MEMSIZE_128MB 0x7 +#define FARADAY_PCI_MEMSIZE_256MB 0x8 +#define FARADAY_PCI_MEMSIZE_512MB 0x9 +#define FARADAY_PCI_MEMSIZE_1GB 0xa +#define FARADAY_PCI_MEMSIZE_2GB 0xb +#define FARADAY_PCI_MEMSIZE_SHIFT 16 + +/* + * The DMA base is set to 0x0 for all memory segments, it reflects the + * fact that the memory of the host system starts at 0x0. + */ +#define FARADAY_PCI_DMA_MEM1_BASE 0x00000000 +#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 +#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 + +/* Defines for PCI configuration command register */ +#define PCI_CONF_ENABLE BIT(31) +#define PCI_CONF_WHERE(r) ((r) & 0xFC) +#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16) +#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11) +#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8) + +/** + * struct faraday_pci_variant - encodes IP block differences + * @cascaded_irq: this host has cascaded IRQs from an interrupt controller + * embedded in the host bridge. + */ +struct faraday_pci_variant { + bool cascaded_irq; +}; + +struct faraday_pci { + struct device *dev; + void __iomem *base; + struct irq_domain *irqdomain; + struct pci_bus *bus; + struct clk *bus_clk; +}; + +static int faraday_res_to_memcfg(resource_size_t mem_base, + resource_size_t mem_size, u32 *val) +{ + u32 outval; + + switch (mem_size) { + case SZ_1M: + outval = FARADAY_PCI_MEMSIZE_1MB; + break; + case SZ_2M: + outval = FARADAY_PCI_MEMSIZE_2MB; + break; + case SZ_4M: + outval = FARADAY_PCI_MEMSIZE_4MB; + break; + case SZ_8M: + outval = FARADAY_PCI_MEMSIZE_8MB; + break; + case SZ_16M: + outval = FARADAY_PCI_MEMSIZE_16MB; + break; + case SZ_32M: + outval = FARADAY_PCI_MEMSIZE_32MB; + break; + case SZ_64M: + outval = FARADAY_PCI_MEMSIZE_64MB; + break; + case SZ_128M: + outval = FARADAY_PCI_MEMSIZE_128MB; + break; + case SZ_256M: + outval = FARADAY_PCI_MEMSIZE_256MB; + break; + case SZ_512M: + outval = FARADAY_PCI_MEMSIZE_512MB; + break; + case SZ_1G: + outval = FARADAY_PCI_MEMSIZE_1GB; + break; + case SZ_2G: + outval = FARADAY_PCI_MEMSIZE_2GB; + break; + default: + return -EINVAL; + } + outval <<= FARADAY_PCI_MEMSIZE_SHIFT; + + /* This is probably not good */ + if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK)) + pr_warn("truncated PCI memory base\n"); + /* Translate to bridge side address space */ + outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK); + pr_debug("Translated pci base @%pap, size %pap to config %08x\n", + &mem_base, &mem_size, outval); + + *val = outval; + return 0; +} + +static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, + unsigned int fn, int config, int size, + u32 *value) +{ + writel(PCI_CONF_BUS(bus_number) | + PCI_CONF_DEVICE(PCI_SLOT(fn)) | + PCI_CONF_FUNCTION(PCI_FUNC(fn)) | + PCI_CONF_WHERE(config) | + PCI_CONF_ENABLE, + p->base + PCI_CONFIG); + + *value = readl(p->base + PCI_DATA); + + if (size == 1) + *value = (*value >> (8 * (config & 3))) & 0xFF; + else if (size == 2) + *value = (*value >> (8 * (config & 3))) & 0xFFFF; + + return PCIBIOS_SUCCESSFUL; +} + +static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 *value) +{ + struct faraday_pci *p = bus->sysdata; + + dev_dbg(&bus->dev, + "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); + + return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value); +} + +static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, + unsigned int fn, int config, int size, + u32 value) +{ + int ret = PCIBIOS_SUCCESSFUL; + + writel(PCI_CONF_BUS(bus_number) | + PCI_CONF_DEVICE(PCI_SLOT(fn)) | + PCI_CONF_FUNCTION(PCI_FUNC(fn)) | + PCI_CONF_WHERE(config) | + PCI_CONF_ENABLE, + p->base + PCI_CONFIG); + + switch (size) { + case 4: + writel(value, p->base + PCI_DATA); + break; + case 2: + writew(value, p->base + PCI_DATA + (config & 3)); + break; + case 1: + writeb(value, p->base + PCI_DATA + (config & 3)); + break; + default: + ret = PCIBIOS_BAD_REGISTER_NUMBER; + } + + return ret; +} + +static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 value) +{ + struct faraday_pci *p = bus->sysdata; + + dev_dbg(&bus->dev, + "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); + + return faraday_raw_pci_write_config(p, bus->number, fn, config, size, + value); +} + +static struct pci_ops faraday_pci_ops = { + .read = faraday_pci_read_config, + .write = faraday_pci_write_config, +}; + +static void faraday_pci_ack_irq(struct irq_data *d) +{ + struct faraday_pci *p = irq_data_get_irq_chip_data(d); + unsigned int reg; + + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); + reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); + reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); +} + +static void faraday_pci_mask_irq(struct irq_data *d) +{ + struct faraday_pci *p = irq_data_get_irq_chip_data(d); + unsigned int reg; + + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); + reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) + | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); +} + +static void faraday_pci_unmask_irq(struct irq_data *d) +{ + struct faraday_pci *p = irq_data_get_irq_chip_data(d); + unsigned int reg; + + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); + reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); + reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); +} + +static void faraday_pci_irq_handler(struct irq_desc *desc) +{ + struct faraday_pci *p = irq_desc_get_handler_data(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned int irq_stat, reg, i; + + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); + irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; + + chained_irq_enter(irqchip, desc); + + for (i = 0; i < 4; i++) { + if ((irq_stat & BIT(i)) == 0) + continue; + generic_handle_irq(irq_find_mapping(p->irqdomain, i)); + } + + chained_irq_exit(irqchip, desc); +} + +static struct irq_chip faraday_pci_irq_chip = { + .name = "PCI", + .irq_ack = faraday_pci_ack_irq, + .irq_mask = faraday_pci_mask_irq, + .irq_unmask = faraday_pci_unmask_irq, +}; + +static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops faraday_pci_irqdomain_ops = { + .map = faraday_pci_irq_map, +}; + +static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) +{ + struct device_node *intc = of_get_next_child(p->dev->of_node, NULL); + int irq; + int i; + + if (!intc) { + dev_err(p->dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + /* All PCI IRQs cascade off this one */ + irq = of_irq_get(intc, 0); + if (irq <= 0) { + dev_err(p->dev, "failed to get parent IRQ\n"); + return irq ?: -EINVAL; + } + + p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, + &faraday_pci_irqdomain_ops, p); + if (!p->irqdomain) { + dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); + return -EINVAL; + } + + irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p); + + for (i = 0; i < 4; i++) + irq_create_mapping(p->irqdomain, i); + + return 0; +} + +static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = p->dev; + u32 confreg[3] = { + FARADAY_PCI_MEM1_BASE_SIZE, + FARADAY_PCI_MEM2_BASE_SIZE, + FARADAY_PCI_MEM3_BASE_SIZE, + }; + int i = 0; + u32 val; + + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* + * Get the dma-ranges from the device tree + */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.pci_addr + range.size - 1; + int ret; + + ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val); + if (ret) { + dev_err(dev, + "DMA range %d: illegal MEM resource size\n", i); + return -EINVAL; + } + + dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", + i + 1, range.pci_addr, end, val); + if (i <= 2) { + faraday_raw_pci_write_config(p, 0, 0, confreg[i], + 4, val); + } else { + dev_err(dev, "ignore extraneous dma-range %d\n", i); + break; + } + + i++; + } + + return 0; +} + +static int faraday_pci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct faraday_pci_variant *variant = + of_device_get_match_data(dev); + struct resource *regs; + resource_size_t io_base; + struct resource_entry *win; + struct faraday_pci *p; + struct resource *mem; + struct resource *io; + struct pci_host_bridge *host; + struct clk *clk; + unsigned char max_bus_speed = PCI_SPEED_33MHz; + unsigned char cur_bus_speed = PCI_SPEED_33MHz; + int ret; + u32 val; + LIST_HEAD(res); + + host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); + if (!host) + return -ENOMEM; + + host->dev.parent = dev; + host->ops = &faraday_pci_ops; + host->busnr = 0; + host->msi = NULL; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; + p = pci_host_bridge_priv(host); + host->sysdata = p; + p->dev = dev; + + /* Retrieve and enable optional clocks */ + clk = devm_clk_get(dev, "PCLK"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "could not prepare PCLK\n"); + return ret; + } + p->bus_clk = devm_clk_get(dev, "PCICLK"); + if (IS_ERR(p->bus_clk)) + return PTR_ERR(p->bus_clk); + ret = clk_prepare_enable(p->bus_clk); + if (ret) { + dev_err(dev, "could not prepare PCICLK\n"); + return ret; + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + p->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(p->base)) + return PTR_ERR(p->base); + + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &res, &io_base); + if (ret) + return ret; + + ret = devm_request_pci_bus_resources(dev, &res); + if (ret) + return ret; + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry(win, &res) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + io = win->res; + io->name = "Gemini PCI I/O"; + if (!faraday_res_to_memcfg(io->start - win->offset, + resource_size(io), &val)) { + /* setup I/O space size */ + writel(val, p->base + PCI_IOSIZE); + } else { + dev_err(dev, "illegal IO mem size\n"); + return -EINVAL; + } + ret = pci_remap_iospace(io, io_base); + if (ret) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + ret, io); + continue; + } + break; + case IORESOURCE_MEM: + mem = win->res; + mem->name = "Gemini PCI MEM"; + break; + case IORESOURCE_BUS: + break; + default: + break; + } + } + + /* Setup hostbridge */ + val = readl(p->base + PCI_CTRL); + val |= PCI_COMMAND_IO; + val |= PCI_COMMAND_MEMORY; + val |= PCI_COMMAND_MASTER; + writel(val, p->base + PCI_CTRL); + /* Mask and clear all interrupts */ + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); + if (variant->cascaded_irq) { + ret = faraday_pci_setup_cascaded_irq(p); + if (ret) { + dev_err(dev, "failed to setup cascaded IRQ\n"); + return ret; + } + } + + /* Check bus clock if we can gear up to 66 MHz */ + if (!IS_ERR(p->bus_clk)) { + unsigned long rate; + u32 val; + + faraday_raw_pci_read_config(p, 0, 0, + FARADAY_PCI_STATUS_CMD, 4, &val); + rate = clk_get_rate(p->bus_clk); + + if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { + dev_info(dev, "33MHz bus is 66MHz capable\n"); + max_bus_speed = PCI_SPEED_66MHz; + ret = clk_set_rate(p->bus_clk, 66000000); + if (ret) + dev_err(dev, "failed to set bus clock\n"); + } else { + dev_info(dev, "33MHz only bus\n"); + max_bus_speed = PCI_SPEED_33MHz; + } + + /* Bumping the clock may fail so read back the rate */ + rate = clk_get_rate(p->bus_clk); + if (rate == 33000000) + cur_bus_speed = PCI_SPEED_33MHz; + if (rate == 66000000) + cur_bus_speed = PCI_SPEED_66MHz; + } + + ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); + if (ret) + return ret; + + list_splice_init(&res, &host->windows); + ret = pci_scan_root_bus_bridge(host); + if (ret) { + dev_err(dev, "failed to scan host: %d\n", ret); + return ret; + } + p->bus = host->bus; + p->bus->max_bus_speed = max_bus_speed; + p->bus->cur_bus_speed = cur_bus_speed; + + pci_bus_assign_resources(p->bus); + pci_bus_add_devices(p->bus); + pci_free_resource_list(&res); + + return 0; +} + +/* + * We encode bridge variants here, we have at least two so it doesn't + * hurt to have infrastructure to encompass future variants as well. + */ +static const struct faraday_pci_variant faraday_regular = { + .cascaded_irq = true, +}; + +static const struct faraday_pci_variant faraday_dual = { + .cascaded_irq = false, +}; + +static const struct of_device_id faraday_pci_of_match[] = { + { + .compatible = "faraday,ftpci100", + .data = &faraday_regular, + }, + { + .compatible = "faraday,ftpci100-dual", + .data = &faraday_dual, + }, + {}, +}; + +static struct platform_driver faraday_pci_driver = { + .driver = { + .name = "ftpci100", + .of_match_table = of_match_ptr(faraday_pci_of_match), + .suppress_bind_attrs = true, + }, + .probe = faraday_pci_probe, +}; +builtin_platform_driver(faraday_pci_driver); diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c new file mode 100644 index 000000000000..d8f10451f273 --- /dev/null +++ b/drivers/pci/controller/pci-host-common.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic PCI host driver common code + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include + +static void gen_pci_unmap_cfg(void *ptr) +{ + pci_ecam_free((struct pci_config_window *)ptr); +} + +static struct pci_config_window *gen_pci_init(struct device *dev, + struct list_head *resources, struct pci_ecam_ops *ops) +{ + int err; + struct resource cfgres; + struct resource *bus_range = NULL; + struct pci_config_window *cfg; + + /* Parse our PCI ranges and request their resources */ + err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); + if (err) + return ERR_PTR(err); + + err = of_address_to_resource(dev->of_node, 0, &cfgres); + if (err) { + dev_err(dev, "missing \"reg\" property\n"); + goto err_out; + } + + cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); + if (IS_ERR(cfg)) { + err = PTR_ERR(cfg); + goto err_out; + } + + err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); + if (err) { + gen_pci_unmap_cfg(cfg); + goto err_out; + } + return cfg; + +err_out: + pci_free_resource_list(resources); + return ERR_PTR(err); +} + +int pci_host_common_probe(struct platform_device *pdev, + struct pci_ecam_ops *ops) +{ + const char *type; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + struct pci_config_window *cfg; + struct list_head resources; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; + + type = of_get_property(np, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + of_pci_check_probe_only(); + + /* Parse and map our Configuration Space windows */ + cfg = gen_pci_init(dev, &resources, ops); + if (IS_ERR(cfg)) + return PTR_ERR(cfg); + + /* Do not reassign resources if probe only */ + if (!pci_has_flag(PCI_PROBE_ONLY)) + pci_add_flags(PCI_REASSIGN_ALL_BUS); + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = cfg; + bridge->busnr = cfg->busr.start; + bridge->ops = &ops->pci_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_host_probe(bridge); + if (ret < 0) { + pci_free_resource_list(&resources); + return ret; + } + + platform_set_drvdata(pdev, bridge->bus); + return 0; +} + +int pci_host_common_remove(struct platform_device *pdev) +{ + struct pci_bus *bus = platform_get_drvdata(pdev); + + pci_lock_rescan_remove(); + pci_stop_root_bus(bus); + pci_remove_root_bus(bus); + pci_unlock_rescan_remove(); + + return 0; +} diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c new file mode 100644 index 000000000000..dea3ec7592a2 --- /dev/null +++ b/drivers/pci/controller/pci-host-generic.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Simple, generic PCI host controller driver targetting firmware-initialised + * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include +#include + +static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { + .bus_shift = 16, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_config_window *cfg = bus->sysdata; + + /* + * The Synopsys DesignWare PCIe controller in ECAM mode will not filter + * type 0 config TLPs sent to devices 1 and up on its downstream port, + * resulting in devices appearing multiple times on bus 0 unless we + * filter out those accesses here. + */ + if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0) + return false; + + return true; +} + +static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + if (!pci_dw_valid_device(bus, devfn)) + return NULL; + + return pci_ecam_map_bus(bus, devfn, where); +} + +static struct pci_ecam_ops pci_dw_ecam_bus_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_dw_ecam_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +static const struct of_device_id gen_pci_of_match[] = { + { .compatible = "pci-host-cam-generic", + .data = &gen_pci_cfg_cam_bus_ops }, + + { .compatible = "pci-host-ecam-generic", + .data = &pci_generic_ecam_ops }, + + { .compatible = "marvell,armada8k-pcie-ecam", + .data = &pci_dw_ecam_bus_ops }, + + { .compatible = "socionext,synquacer-pcie-ecam", + .data = &pci_dw_ecam_bus_ops }, + + { .compatible = "snps,dw-pcie-ecam", + .data = &pci_dw_ecam_bus_ops }, + + { }, +}; + +static int gen_pci_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + struct pci_ecam_ops *ops; + + of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node); + ops = (struct pci_ecam_ops *)of_id->data; + + return pci_host_common_probe(pdev, ops); +} + +static struct platform_driver gen_pci_driver = { + .driver = { + .name = "pci-host-generic", + .of_match_table = gen_pci_of_match, + .suppress_bind_attrs = true, + }, + .probe = gen_pci_probe, + .remove = pci_host_common_remove, +}; +builtin_platform_driver(gen_pci_driver); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c new file mode 100644 index 000000000000..6cc5036ac83c --- /dev/null +++ b/drivers/pci/controller/pci-hyperv.c @@ -0,0 +1,2694 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Microsoft Corporation. + * + * Author: + * Jake Oshins + * + * This driver acts as a paravirtual front-end for PCI Express root buses. + * When a PCI Express function (either an entire device or an SR-IOV + * Virtual Function) is being passed through to the VM, this driver exposes + * a new bus to the guest VM. This is modeled as a root PCI bus because + * no bridges are being exposed to the VM. In fact, with a "Generation 2" + * VM within Hyper-V, there may seem to be no PCI bus at all in the VM + * until a device as been exposed using this driver. + * + * Each root PCI bus has its own PCI domain, which is called "Segment" in + * the PCI Firmware Specifications. Thus while each device passed through + * to the VM using this front-end will appear at "device 0", the domain will + * be unique. Typically, each bus will have one PCI function on it, though + * this driver does support more than one. + * + * In order to map the interrupts from the device through to the guest VM, + * this driver also implements an IRQ Domain, which handles interrupts (either + * MSI or MSI-X) associated with the functions on the bus. As interrupts are + * set up, torn down, or reaffined, this driver communicates with the + * underlying hypervisor to adjust the mappings in the I/O MMU so that each + * interrupt will be delivered to the correct virtual processor at the right + * vector. This driver does not support level-triggered (line-based) + * interrupts, and will report that the Interrupt Line register in the + * function's configuration space is zero. + * + * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V + * facilities. For instance, the configuration space of a function exposed + * by Hyper-V is mapped into a single page of memory space, and the + * read and write handlers for config space must be aware of this mechanism. + * Similarly, device setup and teardown involves messages sent to and from + * the PCI back-end driver in Hyper-V. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Protocol versions. The low word is the minor version, the high word the + * major version. + */ + +#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) +#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) +#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) + +enum pci_protocol_version_t { + PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ + PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ +}; + +#define CPU_AFFINITY_ALL -1ULL + +/* + * Supported protocol versions in the order of probing - highest go + * first. + */ +static enum pci_protocol_version_t pci_protocol_versions[] = { + PCI_PROTOCOL_VERSION_1_2, + PCI_PROTOCOL_VERSION_1_1, +}; + +/* + * Protocol version negotiated by hv_pci_protocol_negotiation(). + */ +static enum pci_protocol_version_t pci_protocol_version; + +#define PCI_CONFIG_MMIO_LENGTH 0x2000 +#define CFG_PAGE_OFFSET 0x1000 +#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) + +#define MAX_SUPPORTED_MSI_MESSAGES 0x400 + +#define STATUS_REVISION_MISMATCH 0xC0000059 + +/* + * Message Types + */ + +enum pci_message_type { + /* + * Version 1.1 + */ + PCI_MESSAGE_BASE = 0x42490000, + PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, + PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, + PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, + PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, + PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, + PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, + PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, + PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, + PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, + PCI_EJECT = PCI_MESSAGE_BASE + 0xB, + PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, + PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, + PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, + PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, + PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, + PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, + PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, + PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, + PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, + PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, + PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, + PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, + PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ + PCI_MESSAGE_MAXIMUM +}; + +/* + * Structures defining the virtual PCI Express protocol. + */ + +union pci_version { + struct { + u16 minor_version; + u16 major_version; + } parts; + u32 version; +} __packed; + +/* + * Function numbers are 8-bits wide on Express, as interpreted through ARI, + * which is all this driver does. This representation is the one used in + * Windows, which is what is expected when sending this back and forth with + * the Hyper-V parent partition. + */ +union win_slot_encoding { + struct { + u32 dev:5; + u32 func:3; + u32 reserved:24; + } bits; + u32 slot; +} __packed; + +/* + * Pretty much as defined in the PCI Specifications. + */ +struct pci_function_description { + u16 v_id; /* vendor ID */ + u16 d_id; /* device ID */ + u8 rev; + u8 prog_intf; + u8 subclass; + u8 base_class; + u32 subsystem_id; + union win_slot_encoding win_slot; + u32 ser; /* serial number */ +} __packed; + +/** + * struct hv_msi_desc + * @vector: IDT entry + * @delivery_mode: As defined in Intel's Programmer's + * Reference Manual, Volume 3, Chapter 8. + * @vector_count: Number of contiguous entries in the + * Interrupt Descriptor Table that are + * occupied by this Message-Signaled + * Interrupt. For "MSI", as first defined + * in PCI 2.2, this can be between 1 and + * 32. For "MSI-X," as first defined in PCI + * 3.0, this must be 1, as each MSI-X table + * entry would have its own descriptor. + * @reserved: Empty space + * @cpu_mask: All the target virtual processors. + */ +struct hv_msi_desc { + u8 vector; + u8 delivery_mode; + u16 vector_count; + u32 reserved; + u64 cpu_mask; +} __packed; + +/** + * struct hv_msi_desc2 - 1.2 version of hv_msi_desc + * @vector: IDT entry + * @delivery_mode: As defined in Intel's Programmer's + * Reference Manual, Volume 3, Chapter 8. + * @vector_count: Number of contiguous entries in the + * Interrupt Descriptor Table that are + * occupied by this Message-Signaled + * Interrupt. For "MSI", as first defined + * in PCI 2.2, this can be between 1 and + * 32. For "MSI-X," as first defined in PCI + * 3.0, this must be 1, as each MSI-X table + * entry would have its own descriptor. + * @processor_count: number of bits enabled in array. + * @processor_array: All the target virtual processors. + */ +struct hv_msi_desc2 { + u8 vector; + u8 delivery_mode; + u16 vector_count; + u16 processor_count; + u16 processor_array[32]; +} __packed; + +/** + * struct tran_int_desc + * @reserved: unused, padding + * @vector_count: same as in hv_msi_desc + * @data: This is the "data payload" value that is + * written by the device when it generates + * a message-signaled interrupt, either MSI + * or MSI-X. + * @address: This is the address to which the data + * payload is written on interrupt + * generation. + */ +struct tran_int_desc { + u16 reserved; + u16 vector_count; + u32 data; + u64 address; +} __packed; + +/* + * A generic message format for virtual PCI. + * Specific message formats are defined later in the file. + */ + +struct pci_message { + u32 type; +} __packed; + +struct pci_child_message { + struct pci_message message_type; + union win_slot_encoding wslot; +} __packed; + +struct pci_incoming_message { + struct vmpacket_descriptor hdr; + struct pci_message message_type; +} __packed; + +struct pci_response { + struct vmpacket_descriptor hdr; + s32 status; /* negative values are failures */ +} __packed; + +struct pci_packet { + void (*completion_func)(void *context, struct pci_response *resp, + int resp_packet_size); + void *compl_ctxt; + + struct pci_message message[0]; +}; + +/* + * Specific message types supporting the PCI protocol. + */ + +/* + * Version negotiation message. Sent from the guest to the host. + * The guest is free to try different versions until the host + * accepts the version. + * + * pci_version: The protocol version requested. + * is_last_attempt: If TRUE, this is the last version guest will request. + * reservedz: Reserved field, set to zero. + */ + +struct pci_version_request { + struct pci_message message_type; + u32 protocol_version; +} __packed; + +/* + * Bus D0 Entry. This is sent from the guest to the host when the virtual + * bus (PCI Express port) is ready for action. + */ + +struct pci_bus_d0_entry { + struct pci_message message_type; + u32 reserved; + u64 mmio_base; +} __packed; + +struct pci_bus_relations { + struct pci_incoming_message incoming; + u32 device_count; + struct pci_function_description func[0]; +} __packed; + +struct pci_q_res_req_response { + struct vmpacket_descriptor hdr; + s32 status; /* negative values are failures */ + u32 probed_bar[6]; +} __packed; + +struct pci_set_power { + struct pci_message message_type; + union win_slot_encoding wslot; + u32 power_state; /* In Windows terms */ + u32 reserved; +} __packed; + +struct pci_set_power_response { + struct vmpacket_descriptor hdr; + s32 status; /* negative values are failures */ + union win_slot_encoding wslot; + u32 resultant_state; /* In Windows terms */ + u32 reserved; +} __packed; + +struct pci_resources_assigned { + struct pci_message message_type; + union win_slot_encoding wslot; + u8 memory_range[0x14][6]; /* not used here */ + u32 msi_descriptors; + u32 reserved[4]; +} __packed; + +struct pci_resources_assigned2 { + struct pci_message message_type; + union win_slot_encoding wslot; + u8 memory_range[0x14][6]; /* not used here */ + u32 msi_descriptor_count; + u8 reserved[70]; +} __packed; + +struct pci_create_interrupt { + struct pci_message message_type; + union win_slot_encoding wslot; + struct hv_msi_desc int_desc; +} __packed; + +struct pci_create_int_response { + struct pci_response response; + u32 reserved; + struct tran_int_desc int_desc; +} __packed; + +struct pci_create_interrupt2 { + struct pci_message message_type; + union win_slot_encoding wslot; + struct hv_msi_desc2 int_desc; +} __packed; + +struct pci_delete_interrupt { + struct pci_message message_type; + union win_slot_encoding wslot; + struct tran_int_desc int_desc; +} __packed; + +struct pci_dev_incoming { + struct pci_incoming_message incoming; + union win_slot_encoding wslot; +} __packed; + +struct pci_eject_response { + struct pci_message message_type; + union win_slot_encoding wslot; + u32 status; +} __packed; + +static int pci_ring_size = (4 * PAGE_SIZE); + +/* + * Definitions or interrupt steering hypercall. + */ +#define HV_PARTITION_ID_SELF ((u64)-1) +#define HVCALL_RETARGET_INTERRUPT 0x7e + +struct hv_interrupt_entry { + u32 source; /* 1 for MSI(-X) */ + u32 reserved1; + u32 address; + u32 data; +}; + +#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ + +struct hv_vp_set { + u64 format; /* 0 (HvGenericSetSparse4k) */ + u64 valid_banks; + u64 masks[HV_VP_SET_BANK_COUNT_MAX]; +}; + +/* + * flags for hv_device_interrupt_target.flags + */ +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { + u32 vector; + u32 flags; + union { + u64 vp_mask; + struct hv_vp_set vp_set; + }; +}; + +struct retarget_msi_interrupt { + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; +} __packed; + +/* + * Driver specific state. + */ + +enum hv_pcibus_state { + hv_pcibus_init = 0, + hv_pcibus_probed, + hv_pcibus_installed, + hv_pcibus_removed, + hv_pcibus_maximum +}; + +struct hv_pcibus_device { + struct pci_sysdata sysdata; + enum hv_pcibus_state state; + refcount_t remove_lock; + struct hv_device *hdev; + resource_size_t low_mmio_space; + resource_size_t high_mmio_space; + struct resource *mem_config; + struct resource *low_mmio_res; + struct resource *high_mmio_res; + struct completion *survey_event; + struct completion remove_event; + struct pci_bus *pci_bus; + spinlock_t config_lock; /* Avoid two threads writing index page */ + spinlock_t device_list_lock; /* Protect lists below */ + void __iomem *cfg_addr; + + struct list_head resources_for_children; + + struct list_head children; + struct list_head dr_list; + + struct msi_domain_info msi_info; + struct msi_controller msi_chip; + struct irq_domain *irq_domain; + + /* hypercall arg, must not cross page boundary */ + struct retarget_msi_interrupt retarget_msi_interrupt_params; + + spinlock_t retarget_msi_interrupt_lock; + + struct workqueue_struct *wq; +}; + +/* + * Tracks "Device Relations" messages from the host, which must be both + * processed in order and deferred so that they don't run in the context + * of the incoming packet callback. + */ +struct hv_dr_work { + struct work_struct wrk; + struct hv_pcibus_device *bus; +}; + +struct hv_dr_state { + struct list_head list_entry; + u32 device_count; + struct pci_function_description func[0]; +}; + +enum hv_pcichild_state { + hv_pcichild_init = 0, + hv_pcichild_requirements, + hv_pcichild_resourced, + hv_pcichild_ejecting, + hv_pcichild_maximum +}; + +struct hv_pci_dev { + /* List protected by pci_rescan_remove_lock */ + struct list_head list_entry; + refcount_t refs; + enum hv_pcichild_state state; + struct pci_function_description desc; + bool reported_missing; + struct hv_pcibus_device *hbus; + struct work_struct wrk; + + /* + * What would be observed if one wrote 0xFFFFFFFF to a BAR and then + * read it back, for each of the BAR offsets within config space. + */ + u32 probed_bar[6]; +}; + +struct hv_pci_compl { + struct completion host_event; + s32 completion_status; +}; + +static void hv_pci_onchannelcallback(void *context); + +/** + * hv_pci_generic_compl() - Invoked for a completion packet + * @context: Set up by the sender of the packet. + * @resp: The response packet + * @resp_packet_size: Size in bytes of the packet + * + * This function is used to trigger an event and report status + * for any message for which the completion packet contains a + * status and nothing else. + */ +static void hv_pci_generic_compl(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct hv_pci_compl *comp_pkt = context; + + if (resp_packet_size >= offsetofend(struct pci_response, status)) + comp_pkt->completion_status = resp->status; + else + comp_pkt->completion_status = -1; + + complete(&comp_pkt->host_event); +} + +static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, + u32 wslot); + +static void get_pcichild(struct hv_pci_dev *hpdev) +{ + refcount_inc(&hpdev->refs); +} + +static void put_pcichild(struct hv_pci_dev *hpdev) +{ + if (refcount_dec_and_test(&hpdev->refs)) + kfree(hpdev); +} + +static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); +static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); + +/* + * There is no good way to get notified from vmbus_onoffer_rescind(), + * so let's use polling here, since this is not a hot path. + */ +static int wait_for_response(struct hv_device *hdev, + struct completion *comp) +{ + while (true) { + if (hdev->channel->rescind) { + dev_warn_once(&hdev->device, "The device is gone.\n"); + return -ENODEV; + } + + if (wait_for_completion_timeout(comp, HZ / 10)) + break; + } + + return 0; +} + +/** + * devfn_to_wslot() - Convert from Linux PCI slot to Windows + * @devfn: The Linux representation of PCI slot + * + * Windows uses a slightly different representation of PCI slot. + * + * Return: The Windows representation + */ +static u32 devfn_to_wslot(int devfn) +{ + union win_slot_encoding wslot; + + wslot.slot = 0; + wslot.bits.dev = PCI_SLOT(devfn); + wslot.bits.func = PCI_FUNC(devfn); + + return wslot.slot; +} + +/** + * wslot_to_devfn() - Convert from Windows PCI slot to Linux + * @wslot: The Windows representation of PCI slot + * + * Windows uses a slightly different representation of PCI slot. + * + * Return: The Linux representation + */ +static int wslot_to_devfn(u32 wslot) +{ + union win_slot_encoding slot_no; + + slot_no.slot = wslot; + return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func); +} + +/* + * PCI Configuration Space for these root PCI buses is implemented as a pair + * of pages in memory-mapped I/O space. Writing to the first page chooses + * the PCI function being written or read. Once the first page has been + * written to, the following page maps in the entire configuration space of + * the function. + */ + +/** + * _hv_pcifront_read_config() - Internal PCI config read + * @hpdev: The PCI driver's representation of the device + * @where: Offset within config space + * @size: Size of the transfer + * @val: Pointer to the buffer receiving the data + */ +static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, + int size, u32 *val) +{ + unsigned long flags; + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where; + + /* + * If the attempt is to read the IDs or the ROM BAR, simulate that. + */ + if (where + size <= PCI_COMMAND) { + memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size); + } else if (where >= PCI_CLASS_REVISION && where + size <= + PCI_CACHE_LINE_SIZE) { + memcpy(val, ((u8 *)&hpdev->desc.rev) + where - + PCI_CLASS_REVISION, size); + } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <= + PCI_ROM_ADDRESS) { + memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where - + PCI_SUBSYSTEM_VENDOR_ID, size); + } else if (where >= PCI_ROM_ADDRESS && where + size <= + PCI_CAPABILITY_LIST) { + /* ROM BARs are unimplemented */ + *val = 0; + } else if (where >= PCI_INTERRUPT_LINE && where + size <= + PCI_INTERRUPT_PIN) { + /* + * Interrupt Line and Interrupt PIN are hard-wired to zero + * because this front-end only supports message-signaled + * interrupts. + */ + *val = 0; + } else if (where + size <= CFG_PAGE_SIZE) { + spin_lock_irqsave(&hpdev->hbus->config_lock, flags); + /* Choose the function to be read. (See comment above) */ + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start reading. */ + mb(); + /* Read from that function's config space. */ + switch (size) { + case 1: + *val = readb(addr); + break; + case 2: + *val = readw(addr); + break; + default: + *val = readl(addr); + break; + } + /* + * Make sure the read was done before we release the spinlock + * allowing consecutive reads/writes. + */ + mb(); + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); + } else { + dev_err(&hpdev->hbus->hdev->device, + "Attempt to read beyond a function's config space.\n"); + } +} + +static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) +{ + u16 ret; + unsigned long flags; + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + + PCI_VENDOR_ID; + + spin_lock_irqsave(&hpdev->hbus->config_lock, flags); + + /* Choose the function to be read. (See comment above) */ + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start reading. */ + mb(); + /* Read from that function's config space. */ + ret = readw(addr); + /* + * mb() is not required here, because the spin_unlock_irqrestore() + * is a barrier. + */ + + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); + + return ret; +} + +/** + * _hv_pcifront_write_config() - Internal PCI config write + * @hpdev: The PCI driver's representation of the device + * @where: Offset within config space + * @size: Size of the transfer + * @val: The data being transferred + */ +static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, + int size, u32 val) +{ + unsigned long flags; + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where; + + if (where >= PCI_SUBSYSTEM_VENDOR_ID && + where + size <= PCI_CAPABILITY_LIST) { + /* SSIDs and ROM BARs are read-only */ + } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) { + spin_lock_irqsave(&hpdev->hbus->config_lock, flags); + /* Choose the function to be written. (See comment above) */ + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start writing. */ + wmb(); + /* Write to that function's config space. */ + switch (size) { + case 1: + writeb(val, addr); + break; + case 2: + writew(val, addr); + break; + default: + writel(val, addr); + break; + } + /* + * Make sure the write was done before we release the spinlock + * allowing consecutive reads/writes. + */ + mb(); + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); + } else { + dev_err(&hpdev->hbus->hdev->device, + "Attempt to write beyond a function's config space.\n"); + } +} + +/** + * hv_pcifront_read_config() - Read configuration space + * @bus: PCI Bus structure + * @devfn: Device/function + * @where: Offset from base + * @size: Byte/word/dword + * @val: Value to be read + * + * Return: PCIBIOS_SUCCESSFUL on success + * PCIBIOS_DEVICE_NOT_FOUND on failure + */ +static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct hv_pcibus_device *hbus = + container_of(bus->sysdata, struct hv_pcibus_device, sysdata); + struct hv_pci_dev *hpdev; + + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); + if (!hpdev) + return PCIBIOS_DEVICE_NOT_FOUND; + + _hv_pcifront_read_config(hpdev, where, size, val); + + put_pcichild(hpdev); + return PCIBIOS_SUCCESSFUL; +} + +/** + * hv_pcifront_write_config() - Write configuration space + * @bus: PCI Bus structure + * @devfn: Device/function + * @where: Offset from base + * @size: Byte/word/dword + * @val: Value to be written to device + * + * Return: PCIBIOS_SUCCESSFUL on success + * PCIBIOS_DEVICE_NOT_FOUND on failure + */ +static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct hv_pcibus_device *hbus = + container_of(bus->sysdata, struct hv_pcibus_device, sysdata); + struct hv_pci_dev *hpdev; + + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); + if (!hpdev) + return PCIBIOS_DEVICE_NOT_FOUND; + + _hv_pcifront_write_config(hpdev, where, size, val); + + put_pcichild(hpdev); + return PCIBIOS_SUCCESSFUL; +} + +/* PCIe operations */ +static struct pci_ops hv_pcifront_ops = { + .read = hv_pcifront_read_config, + .write = hv_pcifront_write_config, +}; + +/* Interrupt management hooks */ +static void hv_int_desc_free(struct hv_pci_dev *hpdev, + struct tran_int_desc *int_desc) +{ + struct pci_delete_interrupt *int_pkt; + struct { + struct pci_packet pkt; + u8 buffer[sizeof(struct pci_delete_interrupt)]; + } ctxt; + + memset(&ctxt, 0, sizeof(ctxt)); + int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; + int_pkt->message_type.type = + PCI_DELETE_INTERRUPT_MESSAGE; + int_pkt->wslot.slot = hpdev->desc.win_slot.slot; + int_pkt->int_desc = *int_desc; + vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), + (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); + kfree(int_desc); +} + +/** + * hv_msi_free() - Free the MSI. + * @domain: The interrupt domain pointer + * @info: Extra MSI-related context + * @irq: Identifies the IRQ. + * + * The Hyper-V parent partition and hypervisor are tracking the + * messages that are in use, keeping the interrupt redirection + * table up to date. This callback sends a message that frees + * the IRT entry and related tracking nonsense. + */ +static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, + unsigned int irq) +{ + struct hv_pcibus_device *hbus; + struct hv_pci_dev *hpdev; + struct pci_dev *pdev; + struct tran_int_desc *int_desc; + struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq); + struct msi_desc *msi = irq_data_get_msi_desc(irq_data); + + pdev = msi_desc_to_pci_dev(msi); + hbus = info->data; + int_desc = irq_data_get_irq_chip_data(irq_data); + if (!int_desc) + return; + + irq_data->chip_data = NULL; + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); + if (!hpdev) { + kfree(int_desc); + return; + } + + hv_int_desc_free(hpdev, int_desc); + put_pcichild(hpdev); +} + +static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest, + bool force) +{ + struct irq_data *parent = data->parent_data; + + return parent->chip->irq_set_affinity(parent, dest, force); +} + +static void hv_irq_mask(struct irq_data *data) +{ + pci_msi_mask_irq(data); +} + +/** + * hv_irq_unmask() - "Unmask" the IRQ by setting its current + * affinity. + * @data: Describes the IRQ + * + * Build new a destination for the MSI and make a hypercall to + * update the Interrupt Redirection Table. "Device Logical ID" + * is built out of this PCI bus's instance GUID and the function + * number of the device. + */ +static void hv_irq_unmask(struct irq_data *data) +{ + struct msi_desc *msi_desc = irq_data_get_msi_desc(data); + struct irq_cfg *cfg = irqd_cfg(data); + struct retarget_msi_interrupt *params; + struct hv_pcibus_device *hbus; + struct cpumask *dest; + struct pci_bus *pbus; + struct pci_dev *pdev; + unsigned long flags; + u32 var_size = 0; + int cpu_vmbus; + int cpu; + u64 res; + + dest = irq_data_get_effective_affinity_mask(data); + pdev = msi_desc_to_pci_dev(msi_desc); + pbus = pdev->bus; + hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + + spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); + + params = &hbus->retarget_msi_interrupt_params; + memset(params, 0, sizeof(*params)); + params->partition_id = HV_PARTITION_ID_SELF; + params->int_entry.source = 1; /* MSI(-X) */ + params->int_entry.address = msi_desc->msg.address_lo; + params->int_entry.data = msi_desc->msg.data; + params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | + (hbus->hdev->dev_instance.b[4] << 16) | + (hbus->hdev->dev_instance.b[7] << 8) | + (hbus->hdev->dev_instance.b[6] & 0xf8) | + PCI_FUNC(pdev->devfn); + params->int_target.vector = cfg->vector; + + /* + * Honoring apic->irq_delivery_mode set to dest_Fixed by + * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a + * spurious interrupt storm. Not doing so does not seem to have a + * negative effect (yet?). + */ + + if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { + /* + * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the + * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides + * with >64 VP support. + * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED + * is not sufficient for this hypercall. + */ + params->int_target.flags |= + HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + params->int_target.vp_set.valid_banks = + (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_banks does). + */ + var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; + + for_each_cpu_and(cpu, dest, cpu_online_mask) { + cpu_vmbus = hv_cpu_number_to_vp_number(cpu); + + if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { + dev_err(&hbus->hdev->device, + "too high CPU %d", cpu_vmbus); + res = 1; + goto exit_unlock; + } + + params->int_target.vp_set.masks[cpu_vmbus / 64] |= + (1ULL << (cpu_vmbus & 63)); + } + } else { + for_each_cpu_and(cpu, dest, cpu_online_mask) { + params->int_target.vp_mask |= + (1ULL << hv_cpu_number_to_vp_number(cpu)); + } + } + + res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), + params, NULL); + +exit_unlock: + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + + if (res) { + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); + return; + } + + pci_msi_unmask_irq(data); +} + +struct compose_comp_ctxt { + struct hv_pci_compl comp_pkt; + struct tran_int_desc int_desc; +}; + +static void hv_pci_compose_compl(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct compose_comp_ctxt *comp_pkt = context; + struct pci_create_int_response *int_resp = + (struct pci_create_int_response *)resp; + + comp_pkt->comp_pkt.completion_status = resp->status; + comp_pkt->int_desc = int_resp->int_desc; + complete(&comp_pkt->comp_pkt.host_event); +} + +static u32 hv_compose_msi_req_v1( + struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in + * hv_irq_unmask(). + */ + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; + + return sizeof(*int_pkt); +} + +static u32 hv_compose_msi_req_v2( + struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int cpu; + + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten + * by subsequent retarget in hv_irq_unmask(). + */ + cpu = cpumask_first_and(affinity, cpu_online_mask); + int_pkt->int_desc.processor_array[0] = + hv_cpu_number_to_vp_number(cpu); + int_pkt->int_desc.processor_count = 1; + + return sizeof(*int_pkt); +} + +/** + * hv_compose_msi_msg() - Supplies a valid MSI address/data + * @data: Everything about this MSI + * @msg: Buffer that is filled in by this function + * + * This function unpacks the IRQ looking for target CPU set, IDT + * vector and mode and sends a message to the parent partition + * asking for a mapping for that tuple in this partition. The + * response supplies a data value and address to which that data + * should be written to trigger that interrupt. + */ +static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct irq_cfg *cfg = irqd_cfg(data); + struct hv_pcibus_device *hbus; + struct hv_pci_dev *hpdev; + struct pci_bus *pbus; + struct pci_dev *pdev; + struct cpumask *dest; + struct compose_comp_ctxt comp; + struct tran_int_desc *int_desc; + struct { + struct pci_packet pci_pkt; + union { + struct pci_create_interrupt v1; + struct pci_create_interrupt2 v2; + } int_pkts; + } __packed ctxt; + + u32 size; + int ret; + + pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + dest = irq_data_get_effective_affinity_mask(data); + pbus = pdev->bus; + hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); + if (!hpdev) + goto return_null_message; + + /* Free any previous message that might have already been composed. */ + if (data->chip_data) { + int_desc = data->chip_data; + data->chip_data = NULL; + hv_int_desc_free(hpdev, int_desc); + } + + int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); + if (!int_desc) + goto drop_reference; + + memset(&ctxt, 0, sizeof(ctxt)); + init_completion(&comp.comp_pkt.host_event); + ctxt.pci_pkt.completion_func = hv_pci_compose_compl; + ctxt.pci_pkt.compl_ctxt = ∁ + + switch (pci_protocol_version) { + case PCI_PROTOCOL_VERSION_1_1: + size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, + dest, + hpdev->desc.win_slot.slot, + cfg->vector); + break; + + case PCI_PROTOCOL_VERSION_1_2: + size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, + dest, + hpdev->desc.win_slot.slot, + cfg->vector); + break; + + default: + /* As we only negotiate protocol versions known to this driver, + * this path should never hit. However, this is it not a hot + * path so we print a message to aid future updates. + */ + dev_err(&hbus->hdev->device, + "Unexpected vPCI protocol, update driver."); + goto free_int_desc; + } + + ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, + size, (unsigned long)&ctxt.pci_pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) { + dev_err(&hbus->hdev->device, + "Sending request for interrupt failed: 0x%x", + comp.comp_pkt.completion_status); + goto free_int_desc; + } + + /* + * Since this function is called with IRQ locks held, can't + * do normal wait for completion; instead poll. + */ + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { + /* 0xFFFF means an invalid PCI VENDOR ID. */ + if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { + dev_err_once(&hbus->hdev->device, + "the device has gone\n"); + goto free_int_desc; + } + + /* + * When the higher level interrupt code calls us with + * interrupt disabled, we must poll the channel by calling + * the channel callback directly when channel->target_cpu is + * the current CPU. When the higher level interrupt code + * calls us with interrupt enabled, let's add the + * local_bh_disable()/enable() to avoid race. + */ + local_bh_disable(); + + if (hbus->hdev->channel->target_cpu == smp_processor_id()) + hv_pci_onchannelcallback(hbus); + + local_bh_enable(); + + if (hpdev->state == hv_pcichild_ejecting) { + dev_err_once(&hbus->hdev->device, + "the device is being ejected\n"); + goto free_int_desc; + } + + udelay(100); + } + + if (comp.comp_pkt.completion_status < 0) { + dev_err(&hbus->hdev->device, + "Request for interrupt failed: 0x%x", + comp.comp_pkt.completion_status); + goto free_int_desc; + } + + /* + * Record the assignment so that this can be unwound later. Using + * irq_set_chip_data() here would be appropriate, but the lock it takes + * is already held. + */ + *int_desc = comp.int_desc; + data->chip_data = int_desc; + + /* Pass up the result. */ + msg->address_hi = comp.int_desc.address >> 32; + msg->address_lo = comp.int_desc.address & 0xffffffff; + msg->data = comp.int_desc.data; + + put_pcichild(hpdev); + return; + +free_int_desc: + kfree(int_desc); +drop_reference: + put_pcichild(hpdev); +return_null_message: + msg->address_hi = 0; + msg->address_lo = 0; + msg->data = 0; +} + +/* HW Interrupt Chip Descriptor */ +static struct irq_chip hv_msi_irq_chip = { + .name = "Hyper-V PCIe MSI", + .irq_compose_msi_msg = hv_compose_msi_msg, + .irq_set_affinity = hv_set_affinity, + .irq_ack = irq_chip_ack_parent, + .irq_mask = hv_irq_mask, + .irq_unmask = hv_irq_unmask, +}; + +static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return arg->msi_hwirq; +} + +static struct msi_domain_ops hv_msi_ops = { + .get_hwirq = hv_msi_domain_ops_get_hwirq, + .msi_prepare = pci_msi_prepare, + .set_desc = pci_msi_set_desc, + .msi_free = hv_msi_free, +}; + +/** + * hv_pcie_init_irq_domain() - Initialize IRQ domain + * @hbus: The root PCI bus + * + * This function creates an IRQ domain which will be used for + * interrupts from devices that have been passed through. These + * devices only support MSI and MSI-X, not line-based interrupts + * or simulations of line-based interrupts through PCIe's + * fabric-layer messages. Because interrupts are remapped, we + * can support multi-message MSI here. + * + * Return: '0' on success and error value on failure + */ +static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) +{ + hbus->msi_info.chip = &hv_msi_irq_chip; + hbus->msi_info.ops = &hv_msi_ops; + hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | + MSI_FLAG_PCI_MSIX); + hbus->msi_info.handler = handle_edge_irq; + hbus->msi_info.handler_name = "edge"; + hbus->msi_info.data = hbus; + hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode, + &hbus->msi_info, + x86_vector_domain); + if (!hbus->irq_domain) { + dev_err(&hbus->hdev->device, + "Failed to build an MSI IRQ domain\n"); + return -ENODEV; + } + + return 0; +} + +/** + * get_bar_size() - Get the address space consumed by a BAR + * @bar_val: Value that a BAR returned after -1 was written + * to it. + * + * This function returns the size of the BAR, rounded up to 1 + * page. It has to be rounded up because the hypervisor's page + * table entry that maps the BAR into the VM can't specify an + * offset within a page. The invariant is that the hypervisor + * must place any BARs of smaller than page length at the + * beginning of a page. + * + * Return: Size in bytes of the consumed MMIO space. + */ +static u64 get_bar_size(u64 bar_val) +{ + return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)), + PAGE_SIZE); +} + +/** + * survey_child_resources() - Total all MMIO requirements + * @hbus: Root PCI bus, as understood by this driver + */ +static void survey_child_resources(struct hv_pcibus_device *hbus) +{ + struct hv_pci_dev *hpdev; + resource_size_t bar_size = 0; + unsigned long flags; + struct completion *event; + u64 bar_val; + int i; + + /* If nobody is waiting on the answer, don't compute it. */ + event = xchg(&hbus->survey_event, NULL); + if (!event) + return; + + /* If the answer has already been computed, go with it. */ + if (hbus->low_mmio_space || hbus->high_mmio_space) { + complete(event); + return; + } + + spin_lock_irqsave(&hbus->device_list_lock, flags); + + /* + * Due to an interesting quirk of the PCI spec, all memory regions + * for a child device are a power of 2 in size and aligned in memory, + * so it's sufficient to just add them up without tracking alignment. + */ + list_for_each_entry(hpdev, &hbus->children, list_entry) { + for (i = 0; i < 6; i++) { + if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO) + dev_err(&hbus->hdev->device, + "There's an I/O BAR in this list!\n"); + + if (hpdev->probed_bar[i] != 0) { + /* + * A probed BAR has all the upper bits set that + * can be changed. + */ + + bar_val = hpdev->probed_bar[i]; + if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) + bar_val |= + ((u64)hpdev->probed_bar[++i] << 32); + else + bar_val |= 0xffffffff00000000ULL; + + bar_size = get_bar_size(bar_val); + + if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) + hbus->high_mmio_space += bar_size; + else + hbus->low_mmio_space += bar_size; + } + } + } + + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + complete(event); +} + +/** + * prepopulate_bars() - Fill in BARs with defaults + * @hbus: Root PCI bus, as understood by this driver + * + * The core PCI driver code seems much, much happier if the BARs + * for a device have values upon first scan. So fill them in. + * The algorithm below works down from large sizes to small, + * attempting to pack the assignments optimally. The assumption, + * enforced in other parts of the code, is that the beginning of + * the memory-mapped I/O space will be aligned on the largest + * BAR size. + */ +static void prepopulate_bars(struct hv_pcibus_device *hbus) +{ + resource_size_t high_size = 0; + resource_size_t low_size = 0; + resource_size_t high_base = 0; + resource_size_t low_base = 0; + resource_size_t bar_size; + struct hv_pci_dev *hpdev; + unsigned long flags; + u64 bar_val; + u32 command; + bool high; + int i; + + if (hbus->low_mmio_space) { + low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); + low_base = hbus->low_mmio_res->start; + } + + if (hbus->high_mmio_space) { + high_size = 1ULL << + (63 - __builtin_clzll(hbus->high_mmio_space)); + high_base = hbus->high_mmio_res->start; + } + + spin_lock_irqsave(&hbus->device_list_lock, flags); + + /* Pick addresses for the BARs. */ + do { + list_for_each_entry(hpdev, &hbus->children, list_entry) { + for (i = 0; i < 6; i++) { + bar_val = hpdev->probed_bar[i]; + if (bar_val == 0) + continue; + high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64; + if (high) { + bar_val |= + ((u64)hpdev->probed_bar[i + 1] + << 32); + } else { + bar_val |= 0xffffffffULL << 32; + } + bar_size = get_bar_size(bar_val); + if (high) { + if (high_size != bar_size) { + i++; + continue; + } + _hv_pcifront_write_config(hpdev, + PCI_BASE_ADDRESS_0 + (4 * i), + 4, + (u32)(high_base & 0xffffff00)); + i++; + _hv_pcifront_write_config(hpdev, + PCI_BASE_ADDRESS_0 + (4 * i), + 4, (u32)(high_base >> 32)); + high_base += bar_size; + } else { + if (low_size != bar_size) + continue; + _hv_pcifront_write_config(hpdev, + PCI_BASE_ADDRESS_0 + (4 * i), + 4, + (u32)(low_base & 0xffffff00)); + low_base += bar_size; + } + } + if (high_size <= 1 && low_size <= 1) { + /* Set the memory enable bit. */ + _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, + &command); + command |= PCI_COMMAND_MEMORY; + _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, + command); + break; + } + } + + high_size >>= 1; + low_size >>= 1; + } while (high_size || low_size); + + spin_unlock_irqrestore(&hbus->device_list_lock, flags); +} + +/** + * create_root_hv_pci_bus() - Expose a new root PCI bus + * @hbus: Root PCI bus, as understood by this driver + * + * Return: 0 on success, -errno on failure + */ +static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) +{ + /* Register the device */ + hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device, + 0, /* bus number is always zero */ + &hv_pcifront_ops, + &hbus->sysdata, + &hbus->resources_for_children); + if (!hbus->pci_bus) + return -ENODEV; + + hbus->pci_bus->msi = &hbus->msi_chip; + hbus->pci_bus->msi->dev = &hbus->hdev->device; + + pci_lock_rescan_remove(); + pci_scan_child_bus(hbus->pci_bus); + pci_bus_assign_resources(hbus->pci_bus); + pci_bus_add_devices(hbus->pci_bus); + pci_unlock_rescan_remove(); + hbus->state = hv_pcibus_installed; + return 0; +} + +struct q_res_req_compl { + struct completion host_event; + struct hv_pci_dev *hpdev; +}; + +/** + * q_resource_requirements() - Query Resource Requirements + * @context: The completion context. + * @resp: The response that came from the host. + * @resp_packet_size: The size in bytes of resp. + * + * This function is invoked on completion of a Query Resource + * Requirements packet. + */ +static void q_resource_requirements(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct q_res_req_compl *completion = context; + struct pci_q_res_req_response *q_res_req = + (struct pci_q_res_req_response *)resp; + int i; + + if (resp->status < 0) { + dev_err(&completion->hpdev->hbus->hdev->device, + "query resource requirements failed: %x\n", + resp->status); + } else { + for (i = 0; i < 6; i++) { + completion->hpdev->probed_bar[i] = + q_res_req->probed_bar[i]; + } + } + + complete(&completion->host_event); +} + +/** + * new_pcichild_device() - Create a new child device + * @hbus: The internal struct tracking this root PCI bus. + * @desc: The information supplied so far from the host + * about the device. + * + * This function creates the tracking structure for a new child + * device and kicks off the process of figuring out what it is. + * + * Return: Pointer to the new tracking struct + */ +static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, + struct pci_function_description *desc) +{ + struct hv_pci_dev *hpdev; + struct pci_child_message *res_req; + struct q_res_req_compl comp_pkt; + struct { + struct pci_packet init_packet; + u8 buffer[sizeof(struct pci_child_message)]; + } pkt; + unsigned long flags; + int ret; + + hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC); + if (!hpdev) + return NULL; + + hpdev->hbus = hbus; + + memset(&pkt, 0, sizeof(pkt)); + init_completion(&comp_pkt.host_event); + comp_pkt.hpdev = hpdev; + pkt.init_packet.compl_ctxt = &comp_pkt; + pkt.init_packet.completion_func = q_resource_requirements; + res_req = (struct pci_child_message *)&pkt.init_packet.message; + res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; + res_req->wslot.slot = desc->win_slot.slot; + + ret = vmbus_sendpacket(hbus->hdev->channel, res_req, + sizeof(struct pci_child_message), + (unsigned long)&pkt.init_packet, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) + goto error; + + if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) + goto error; + + hpdev->desc = *desc; + refcount_set(&hpdev->refs, 1); + get_pcichild(hpdev); + spin_lock_irqsave(&hbus->device_list_lock, flags); + + list_add_tail(&hpdev->list_entry, &hbus->children); + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + return hpdev; + +error: + kfree(hpdev); + return NULL; +} + +/** + * get_pcichild_wslot() - Find device from slot + * @hbus: Root PCI bus, as understood by this driver + * @wslot: Location on the bus + * + * This function looks up a PCI device and returns the internal + * representation of it. It acquires a reference on it, so that + * the device won't be deleted while somebody is using it. The + * caller is responsible for calling put_pcichild() to release + * this reference. + * + * Return: Internal representation of a PCI device + */ +static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, + u32 wslot) +{ + unsigned long flags; + struct hv_pci_dev *iter, *hpdev = NULL; + + spin_lock_irqsave(&hbus->device_list_lock, flags); + list_for_each_entry(iter, &hbus->children, list_entry) { + if (iter->desc.win_slot.slot == wslot) { + hpdev = iter; + get_pcichild(hpdev); + break; + } + } + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + + return hpdev; +} + +/** + * pci_devices_present_work() - Handle new list of child devices + * @work: Work struct embedded in struct hv_dr_work + * + * "Bus Relations" is the Windows term for "children of this + * bus." The terminology is preserved here for people trying to + * debug the interaction between Hyper-V and Linux. This + * function is called when the parent partition reports a list + * of functions that should be observed under this PCI Express + * port (bus). + * + * This function updates the list, and must tolerate being + * called multiple times with the same information. The typical + * number of child devices is one, with very atypical cases + * involving three or four, so the algorithms used here can be + * simple and inefficient. + * + * It must also treat the omission of a previously observed device as + * notification that the device no longer exists. + * + * Note that this function is serialized with hv_eject_device_work(), + * because both are pushed to the ordered workqueue hbus->wq. + */ +static void pci_devices_present_work(struct work_struct *work) +{ + u32 child_no; + bool found; + struct pci_function_description *new_desc; + struct hv_pci_dev *hpdev; + struct hv_pcibus_device *hbus; + struct list_head removed; + struct hv_dr_work *dr_wrk; + struct hv_dr_state *dr = NULL; + unsigned long flags; + + dr_wrk = container_of(work, struct hv_dr_work, wrk); + hbus = dr_wrk->bus; + kfree(dr_wrk); + + INIT_LIST_HEAD(&removed); + + /* Pull this off the queue and process it if it was the last one. */ + spin_lock_irqsave(&hbus->device_list_lock, flags); + while (!list_empty(&hbus->dr_list)) { + dr = list_first_entry(&hbus->dr_list, struct hv_dr_state, + list_entry); + list_del(&dr->list_entry); + + /* Throw this away if the list still has stuff in it. */ + if (!list_empty(&hbus->dr_list)) { + kfree(dr); + continue; + } + } + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + + if (!dr) { + put_hvpcibus(hbus); + return; + } + + /* First, mark all existing children as reported missing. */ + spin_lock_irqsave(&hbus->device_list_lock, flags); + list_for_each_entry(hpdev, &hbus->children, list_entry) { + hpdev->reported_missing = true; + } + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + + /* Next, add back any reported devices. */ + for (child_no = 0; child_no < dr->device_count; child_no++) { + found = false; + new_desc = &dr->func[child_no]; + + spin_lock_irqsave(&hbus->device_list_lock, flags); + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) && + (hpdev->desc.v_id == new_desc->v_id) && + (hpdev->desc.d_id == new_desc->d_id) && + (hpdev->desc.ser == new_desc->ser)) { + hpdev->reported_missing = false; + found = true; + } + } + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + + if (!found) { + hpdev = new_pcichild_device(hbus, new_desc); + if (!hpdev) + dev_err(&hbus->hdev->device, + "couldn't record a child device.\n"); + } + } + + /* Move missing children to a list on the stack. */ + spin_lock_irqsave(&hbus->device_list_lock, flags); + do { + found = false; + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if (hpdev->reported_missing) { + found = true; + put_pcichild(hpdev); + list_move_tail(&hpdev->list_entry, &removed); + break; + } + } + } while (found); + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + + /* Delete everything that should no longer exist. */ + while (!list_empty(&removed)) { + hpdev = list_first_entry(&removed, struct hv_pci_dev, + list_entry); + list_del(&hpdev->list_entry); + put_pcichild(hpdev); + } + + switch (hbus->state) { + case hv_pcibus_installed: + /* + * Tell the core to rescan bus + * because there may have been changes. + */ + pci_lock_rescan_remove(); + pci_scan_child_bus(hbus->pci_bus); + pci_unlock_rescan_remove(); + break; + + case hv_pcibus_init: + case hv_pcibus_probed: + survey_child_resources(hbus); + break; + + default: + break; + } + + put_hvpcibus(hbus); + kfree(dr); +} + +/** + * hv_pci_devices_present() - Handles list of new children + * @hbus: Root PCI bus, as understood by this driver + * @relations: Packet from host listing children + * + * This function is invoked whenever a new list of devices for + * this bus appears. + */ +static void hv_pci_devices_present(struct hv_pcibus_device *hbus, + struct pci_bus_relations *relations) +{ + struct hv_dr_state *dr; + struct hv_dr_work *dr_wrk; + unsigned long flags; + bool pending_dr; + + dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT); + if (!dr_wrk) + return; + + dr = kzalloc(offsetof(struct hv_dr_state, func) + + (sizeof(struct pci_function_description) * + (relations->device_count)), GFP_NOWAIT); + if (!dr) { + kfree(dr_wrk); + return; + } + + INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); + dr_wrk->bus = hbus; + dr->device_count = relations->device_count; + if (dr->device_count != 0) { + memcpy(dr->func, relations->func, + sizeof(struct pci_function_description) * + dr->device_count); + } + + spin_lock_irqsave(&hbus->device_list_lock, flags); + /* + * If pending_dr is true, we have already queued a work, + * which will see the new dr. Otherwise, we need to + * queue a new work. + */ + pending_dr = !list_empty(&hbus->dr_list); + list_add_tail(&dr->list_entry, &hbus->dr_list); + spin_unlock_irqrestore(&hbus->device_list_lock, flags); + + if (pending_dr) { + kfree(dr_wrk); + } else { + get_hvpcibus(hbus); + queue_work(hbus->wq, &dr_wrk->wrk); + } +} + +/** + * hv_eject_device_work() - Asynchronously handles ejection + * @work: Work struct embedded in internal device struct + * + * This function handles ejecting a device. Windows will + * attempt to gracefully eject a device, waiting 60 seconds to + * hear back from the guest OS that this completed successfully. + * If this timer expires, the device will be forcibly removed. + */ +static void hv_eject_device_work(struct work_struct *work) +{ + struct pci_eject_response *ejct_pkt; + struct hv_pci_dev *hpdev; + struct pci_dev *pdev; + unsigned long flags; + int wslot; + struct { + struct pci_packet pkt; + u8 buffer[sizeof(struct pci_eject_response)]; + } ctxt; + + hpdev = container_of(work, struct hv_pci_dev, wrk); + + WARN_ON(hpdev->state != hv_pcichild_ejecting); + + /* + * Ejection can come before or after the PCI bus has been set up, so + * attempt to find it and tear down the bus state, if it exists. This + * must be done without constructs like pci_domain_nr(hbus->pci_bus) + * because hbus->pci_bus may not exist yet. + */ + wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); + pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, + wslot); + if (pdev) { + pci_lock_rescan_remove(); + pci_stop_and_remove_bus_device(pdev); + pci_dev_put(pdev); + pci_unlock_rescan_remove(); + } + + spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); + list_del(&hpdev->list_entry); + spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + + memset(&ctxt, 0, sizeof(ctxt)); + ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; + ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; + ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; + vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, + sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, + VM_PKT_DATA_INBAND, 0); + + put_pcichild(hpdev); + put_pcichild(hpdev); + put_hvpcibus(hpdev->hbus); +} + +/** + * hv_pci_eject_device() - Handles device ejection + * @hpdev: Internal device tracking struct + * + * This function is invoked when an ejection packet arrives. It + * just schedules work so that we don't re-enter the packet + * delivery code handling the ejection. + */ +static void hv_pci_eject_device(struct hv_pci_dev *hpdev) +{ + hpdev->state = hv_pcichild_ejecting; + get_pcichild(hpdev); + INIT_WORK(&hpdev->wrk, hv_eject_device_work); + get_hvpcibus(hpdev->hbus); + queue_work(hpdev->hbus->wq, &hpdev->wrk); +} + +/** + * hv_pci_onchannelcallback() - Handles incoming packets + * @context: Internal bus tracking struct + * + * This function is invoked whenever the host sends a packet to + * this channel (which is private to this root PCI bus). + */ +static void hv_pci_onchannelcallback(void *context) +{ + const int packet_size = 0x100; + int ret; + struct hv_pcibus_device *hbus = context; + u32 bytes_recvd; + u64 req_id; + struct vmpacket_descriptor *desc; + unsigned char *buffer; + int bufferlen = packet_size; + struct pci_packet *comp_packet; + struct pci_response *response; + struct pci_incoming_message *new_message; + struct pci_bus_relations *bus_rel; + struct pci_dev_incoming *dev_message; + struct hv_pci_dev *hpdev; + + buffer = kmalloc(bufferlen, GFP_ATOMIC); + if (!buffer) + return; + + while (1) { + ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer, + bufferlen, &bytes_recvd, &req_id); + + if (ret == -ENOBUFS) { + kfree(buffer); + /* Handle large packet */ + bufferlen = bytes_recvd; + buffer = kmalloc(bytes_recvd, GFP_ATOMIC); + if (!buffer) + return; + continue; + } + + /* Zero length indicates there are no more packets. */ + if (ret || !bytes_recvd) + break; + + /* + * All incoming packets must be at least as large as a + * response. + */ + if (bytes_recvd <= sizeof(struct pci_response)) + continue; + desc = (struct vmpacket_descriptor *)buffer; + + switch (desc->type) { + case VM_PKT_COMP: + + /* + * The host is trusted, and thus it's safe to interpret + * this transaction ID as a pointer. + */ + comp_packet = (struct pci_packet *)req_id; + response = (struct pci_response *)buffer; + comp_packet->completion_func(comp_packet->compl_ctxt, + response, + bytes_recvd); + break; + + case VM_PKT_DATA_INBAND: + + new_message = (struct pci_incoming_message *)buffer; + switch (new_message->message_type.type) { + case PCI_BUS_RELATIONS: + + bus_rel = (struct pci_bus_relations *)buffer; + if (bytes_recvd < + offsetof(struct pci_bus_relations, func) + + (sizeof(struct pci_function_description) * + (bus_rel->device_count))) { + dev_err(&hbus->hdev->device, + "bus relations too small\n"); + break; + } + + hv_pci_devices_present(hbus, bus_rel); + break; + + case PCI_EJECT: + + dev_message = (struct pci_dev_incoming *)buffer; + hpdev = get_pcichild_wslot(hbus, + dev_message->wslot.slot); + if (hpdev) { + hv_pci_eject_device(hpdev); + put_pcichild(hpdev); + } + break; + + default: + dev_warn(&hbus->hdev->device, + "Unimplemented protocol message %x\n", + new_message->message_type.type); + break; + } + break; + + default: + dev_err(&hbus->hdev->device, + "unhandled packet type %d, tid %llx len %d\n", + desc->type, req_id, bytes_recvd); + break; + } + } + + kfree(buffer); +} + +/** + * hv_pci_protocol_negotiation() - Set up protocol + * @hdev: VMBus's tracking struct for this root PCI bus + * + * This driver is intended to support running on Windows 10 + * (server) and later versions. It will not run on earlier + * versions, as they assume that many of the operations which + * Linux needs accomplished with a spinlock held were done via + * asynchronous messaging via VMBus. Windows 10 increases the + * surface area of PCI emulation so that these actions can take + * place by suspending a virtual processor for their duration. + * + * This function negotiates the channel protocol version, + * failing if the host doesn't support the necessary protocol + * level. + */ +static int hv_pci_protocol_negotiation(struct hv_device *hdev) +{ + struct pci_version_request *version_req; + struct hv_pci_compl comp_pkt; + struct pci_packet *pkt; + int ret; + int i; + + /* + * Initiate the handshake with the host and negotiate + * a version that the host can support. We start with the + * highest version number and go down if the host cannot + * support it. + */ + pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + init_completion(&comp_pkt.host_event); + pkt->completion_func = hv_pci_generic_compl; + pkt->compl_ctxt = &comp_pkt; + version_req = (struct pci_version_request *)&pkt->message; + version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; + + for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) { + version_req->protocol_version = pci_protocol_versions[i]; + ret = vmbus_sendpacket(hdev->channel, version_req, + sizeof(struct pci_version_request), + (unsigned long)pkt, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + + if (ret) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed to request version: %d", + ret); + goto exit; + } + + if (comp_pkt.completion_status >= 0) { + pci_protocol_version = pci_protocol_versions[i]; + dev_info(&hdev->device, + "PCI VMBus probing: Using version %#x\n", + pci_protocol_version); + goto exit; + } + + if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed version request: %#x", + comp_pkt.completion_status); + ret = -EPROTO; + goto exit; + } + + reinit_completion(&comp_pkt.host_event); + } + + dev_err(&hdev->device, + "PCI pass-through VSP failed to find supported version"); + ret = -EPROTO; + +exit: + kfree(pkt); + return ret; +} + +/** + * hv_pci_free_bridge_windows() - Release memory regions for the + * bus + * @hbus: Root PCI bus, as understood by this driver + */ +static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) +{ + /* + * Set the resources back to the way they looked when they + * were allocated by setting IORESOURCE_BUSY again. + */ + + if (hbus->low_mmio_space && hbus->low_mmio_res) { + hbus->low_mmio_res->flags |= IORESOURCE_BUSY; + vmbus_free_mmio(hbus->low_mmio_res->start, + resource_size(hbus->low_mmio_res)); + } + + if (hbus->high_mmio_space && hbus->high_mmio_res) { + hbus->high_mmio_res->flags |= IORESOURCE_BUSY; + vmbus_free_mmio(hbus->high_mmio_res->start, + resource_size(hbus->high_mmio_res)); + } +} + +/** + * hv_pci_allocate_bridge_windows() - Allocate memory regions + * for the bus + * @hbus: Root PCI bus, as understood by this driver + * + * This function calls vmbus_allocate_mmio(), which is itself a + * bit of a compromise. Ideally, we might change the pnp layer + * in the kernel such that it comprehends either PCI devices + * which are "grandchildren of ACPI," with some intermediate bus + * node (in this case, VMBus) or change it such that it + * understands VMBus. The pnp layer, however, has been declared + * deprecated, and not subject to change. + * + * The workaround, implemented here, is to ask VMBus to allocate + * MMIO space for this bus. VMBus itself knows which ranges are + * appropriate by looking at its own ACPI objects. Then, after + * these ranges are claimed, they're modified to look like they + * would have looked if the ACPI and pnp code had allocated + * bridge windows. These descriptors have to exist in this form + * in order to satisfy the code which will get invoked when the + * endpoint PCI function driver calls request_mem_region() or + * request_mem_region_exclusive(). + * + * Return: 0 on success, -errno on failure + */ +static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) +{ + resource_size_t align; + int ret; + + if (hbus->low_mmio_space) { + align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); + ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0, + (u64)(u32)0xffffffff, + hbus->low_mmio_space, + align, false); + if (ret) { + dev_err(&hbus->hdev->device, + "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n", + hbus->low_mmio_space); + return ret; + } + + /* Modify this resource to become a bridge window. */ + hbus->low_mmio_res->flags |= IORESOURCE_WINDOW; + hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY; + pci_add_resource(&hbus->resources_for_children, + hbus->low_mmio_res); + } + + if (hbus->high_mmio_space) { + align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space)); + ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev, + 0x100000000, -1, + hbus->high_mmio_space, align, + false); + if (ret) { + dev_err(&hbus->hdev->device, + "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n", + hbus->high_mmio_space); + goto release_low_mmio; + } + + /* Modify this resource to become a bridge window. */ + hbus->high_mmio_res->flags |= IORESOURCE_WINDOW; + hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY; + pci_add_resource(&hbus->resources_for_children, + hbus->high_mmio_res); + } + + return 0; + +release_low_mmio: + if (hbus->low_mmio_res) { + vmbus_free_mmio(hbus->low_mmio_res->start, + resource_size(hbus->low_mmio_res)); + } + + return ret; +} + +/** + * hv_allocate_config_window() - Find MMIO space for PCI Config + * @hbus: Root PCI bus, as understood by this driver + * + * This function claims memory-mapped I/O space for accessing + * configuration space for the functions on this bus. + * + * Return: 0 on success, -errno on failure + */ +static int hv_allocate_config_window(struct hv_pcibus_device *hbus) +{ + int ret; + + /* + * Set up a region of MMIO space to use for accessing configuration + * space. + */ + ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1, + PCI_CONFIG_MMIO_LENGTH, 0x1000, false); + if (ret) + return ret; + + /* + * vmbus_allocate_mmio() gets used for allocating both device endpoint + * resource claims (those which cannot be overlapped) and the ranges + * which are valid for the children of this bus, which are intended + * to be overlapped by those children. Set the flag on this claim + * meaning that this region can't be overlapped. + */ + + hbus->mem_config->flags |= IORESOURCE_BUSY; + + return 0; +} + +static void hv_free_config_window(struct hv_pcibus_device *hbus) +{ + vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); +} + +/** + * hv_pci_enter_d0() - Bring the "bus" into the D0 power state + * @hdev: VMBus's tracking struct for this root PCI bus + * + * Return: 0 on success, -errno on failure + */ +static int hv_pci_enter_d0(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct pci_bus_d0_entry *d0_entry; + struct hv_pci_compl comp_pkt; + struct pci_packet *pkt; + int ret; + + /* + * Tell the host that the bus is ready to use, and moved into the + * powered-on state. This includes telling the host which region + * of memory-mapped I/O space has been chosen for configuration space + * access. + */ + pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + init_completion(&comp_pkt.host_event); + pkt->completion_func = hv_pci_generic_compl; + pkt->compl_ctxt = &comp_pkt; + d0_entry = (struct pci_bus_d0_entry *)&pkt->message; + d0_entry->message_type.type = PCI_BUS_D0ENTRY; + d0_entry->mmio_base = hbus->mem_config->start; + + ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), + (unsigned long)pkt, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + + if (ret) + goto exit; + + if (comp_pkt.completion_status < 0) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed D0 Entry with status %x\n", + comp_pkt.completion_status); + ret = -EPROTO; + goto exit; + } + + ret = 0; + +exit: + kfree(pkt); + return ret; +} + +/** + * hv_pci_query_relations() - Ask host to send list of child + * devices + * @hdev: VMBus's tracking struct for this root PCI bus + * + * Return: 0 on success, -errno on failure + */ +static int hv_pci_query_relations(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct pci_message message; + struct completion comp; + int ret; + + /* Ask the host to send along the list of child devices */ + init_completion(&comp); + if (cmpxchg(&hbus->survey_event, NULL, &comp)) + return -ENOTEMPTY; + + memset(&message, 0, sizeof(message)); + message.type = PCI_QUERY_BUS_RELATIONS; + + ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), + 0, VM_PKT_DATA_INBAND, 0); + if (!ret) + ret = wait_for_response(hdev, &comp); + + return ret; +} + +/** + * hv_send_resources_allocated() - Report local resource choices + * @hdev: VMBus's tracking struct for this root PCI bus + * + * The host OS is expecting to be sent a request as a message + * which contains all the resources that the device will use. + * The response contains those same resources, "translated" + * which is to say, the values which should be used by the + * hardware, when it delivers an interrupt. (MMIO resources are + * used in local terms.) This is nice for Windows, and lines up + * with the FDO/PDO split, which doesn't exist in Linux. Linux + * is deeply expecting to scan an emulated PCI configuration + * space. So this message is sent here only to drive the state + * machine on the host forward. + * + * Return: 0 on success, -errno on failure + */ +static int hv_send_resources_allocated(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct pci_resources_assigned *res_assigned; + struct pci_resources_assigned2 *res_assigned2; + struct hv_pci_compl comp_pkt; + struct hv_pci_dev *hpdev; + struct pci_packet *pkt; + size_t size_res; + u32 wslot; + int ret; + + size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) + ? sizeof(*res_assigned) : sizeof(*res_assigned2); + + pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + ret = 0; + + for (wslot = 0; wslot < 256; wslot++) { + hpdev = get_pcichild_wslot(hbus, wslot); + if (!hpdev) + continue; + + memset(pkt, 0, sizeof(*pkt) + size_res); + init_completion(&comp_pkt.host_event); + pkt->completion_func = hv_pci_generic_compl; + pkt->compl_ctxt = &comp_pkt; + + if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { + res_assigned = + (struct pci_resources_assigned *)&pkt->message; + res_assigned->message_type.type = + PCI_RESOURCES_ASSIGNED; + res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + } else { + res_assigned2 = + (struct pci_resources_assigned2 *)&pkt->message; + res_assigned2->message_type.type = + PCI_RESOURCES_ASSIGNED2; + res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; + } + put_pcichild(hpdev); + + ret = vmbus_sendpacket(hdev->channel, &pkt->message, + size_res, (unsigned long)pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + if (ret) + break; + + if (comp_pkt.completion_status < 0) { + ret = -EPROTO; + dev_err(&hdev->device, + "resource allocated returned 0x%x", + comp_pkt.completion_status); + break; + } + } + + kfree(pkt); + return ret; +} + +/** + * hv_send_resources_released() - Report local resources + * released + * @hdev: VMBus's tracking struct for this root PCI bus + * + * Return: 0 on success, -errno on failure + */ +static int hv_send_resources_released(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct pci_child_message pkt; + struct hv_pci_dev *hpdev; + u32 wslot; + int ret; + + for (wslot = 0; wslot < 256; wslot++) { + hpdev = get_pcichild_wslot(hbus, wslot); + if (!hpdev) + continue; + + memset(&pkt, 0, sizeof(pkt)); + pkt.message_type.type = PCI_RESOURCES_RELEASED; + pkt.wslot.slot = hpdev->desc.win_slot.slot; + + put_pcichild(hpdev); + + ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0, + VM_PKT_DATA_INBAND, 0); + if (ret) + return ret; + } + + return 0; +} + +static void get_hvpcibus(struct hv_pcibus_device *hbus) +{ + refcount_inc(&hbus->remove_lock); +} + +static void put_hvpcibus(struct hv_pcibus_device *hbus) +{ + if (refcount_dec_and_test(&hbus->remove_lock)) + complete(&hbus->remove_event); +} + +/** + * hv_pci_probe() - New VMBus channel probe, for a root PCI bus + * @hdev: VMBus's tracking struct for this root PCI bus + * @dev_id: Identifies the device itself + * + * Return: 0 on success, -errno on failure + */ +static int hv_pci_probe(struct hv_device *hdev, + const struct hv_vmbus_device_id *dev_id) +{ + struct hv_pcibus_device *hbus; + int ret; + + /* + * hv_pcibus_device contains the hypercall arguments for retargeting in + * hv_irq_unmask(). Those must not cross a page boundary. + */ + BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE); + + hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL); + if (!hbus) + return -ENOMEM; + hbus->state = hv_pcibus_init; + + /* + * The PCI bus "domain" is what is called "segment" in ACPI and + * other specs. Pull it from the instance ID, to get something + * unique. Bytes 8 and 9 are what is used in Windows guests, so + * do the same thing for consistency. Note that, since this code + * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee + * that (1) the only domain in use for something that looks like + * a physical PCI bus (which is actually emulated by the + * hypervisor) is domain 0 and (2) there will be no overlap + * between domains derived from these instance IDs in the same + * VM. + */ + hbus->sysdata.domain = hdev->dev_instance.b[9] | + hdev->dev_instance.b[8] << 8; + + hbus->hdev = hdev; + refcount_set(&hbus->remove_lock, 1); + INIT_LIST_HEAD(&hbus->children); + INIT_LIST_HEAD(&hbus->dr_list); + INIT_LIST_HEAD(&hbus->resources_for_children); + spin_lock_init(&hbus->config_lock); + spin_lock_init(&hbus->device_list_lock); + spin_lock_init(&hbus->retarget_msi_interrupt_lock); + init_completion(&hbus->remove_event); + hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, + hbus->sysdata.domain); + if (!hbus->wq) { + ret = -ENOMEM; + goto free_bus; + } + + ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, + hv_pci_onchannelcallback, hbus); + if (ret) + goto destroy_wq; + + hv_set_drvdata(hdev, hbus); + + ret = hv_pci_protocol_negotiation(hdev); + if (ret) + goto close; + + ret = hv_allocate_config_window(hbus); + if (ret) + goto close; + + hbus->cfg_addr = ioremap(hbus->mem_config->start, + PCI_CONFIG_MMIO_LENGTH); + if (!hbus->cfg_addr) { + dev_err(&hdev->device, + "Unable to map a virtual address for config space\n"); + ret = -ENOMEM; + goto free_config; + } + + hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus); + if (!hbus->sysdata.fwnode) { + ret = -ENOMEM; + goto unmap; + } + + ret = hv_pcie_init_irq_domain(hbus); + if (ret) + goto free_fwnode; + + ret = hv_pci_query_relations(hdev); + if (ret) + goto free_irq_domain; + + ret = hv_pci_enter_d0(hdev); + if (ret) + goto free_irq_domain; + + ret = hv_pci_allocate_bridge_windows(hbus); + if (ret) + goto free_irq_domain; + + ret = hv_send_resources_allocated(hdev); + if (ret) + goto free_windows; + + prepopulate_bars(hbus); + + hbus->state = hv_pcibus_probed; + + ret = create_root_hv_pci_bus(hbus); + if (ret) + goto free_windows; + + return 0; + +free_windows: + hv_pci_free_bridge_windows(hbus); +free_irq_domain: + irq_domain_remove(hbus->irq_domain); +free_fwnode: + irq_domain_free_fwnode(hbus->sysdata.fwnode); +unmap: + iounmap(hbus->cfg_addr); +free_config: + hv_free_config_window(hbus); +close: + vmbus_close(hdev->channel); +destroy_wq: + destroy_workqueue(hbus->wq); +free_bus: + free_page((unsigned long)hbus); + return ret; +} + +static void hv_pci_bus_exit(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct { + struct pci_packet teardown_packet; + u8 buffer[sizeof(struct pci_message)]; + } pkt; + struct pci_bus_relations relations; + struct hv_pci_compl comp_pkt; + int ret; + + /* + * After the host sends the RESCIND_CHANNEL message, it doesn't + * access the per-channel ringbuffer any longer. + */ + if (hdev->channel->rescind) + return; + + /* Delete any children which might still exist. */ + memset(&relations, 0, sizeof(relations)); + hv_pci_devices_present(hbus, &relations); + + ret = hv_send_resources_released(hdev); + if (ret) + dev_err(&hdev->device, + "Couldn't send resources released packet(s)\n"); + + memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); + init_completion(&comp_pkt.host_event); + pkt.teardown_packet.completion_func = hv_pci_generic_compl; + pkt.teardown_packet.compl_ctxt = &comp_pkt; + pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; + + ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, + sizeof(struct pci_message), + (unsigned long)&pkt.teardown_packet, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ); +} + +/** + * hv_pci_remove() - Remove routine for this VMBus channel + * @hdev: VMBus's tracking struct for this root PCI bus + * + * Return: 0 on success, -errno on failure + */ +static int hv_pci_remove(struct hv_device *hdev) +{ + struct hv_pcibus_device *hbus; + + hbus = hv_get_drvdata(hdev); + if (hbus->state == hv_pcibus_installed) { + /* Remove the bus from PCI's point of view. */ + pci_lock_rescan_remove(); + pci_stop_root_bus(hbus->pci_bus); + pci_remove_root_bus(hbus->pci_bus); + pci_unlock_rescan_remove(); + hbus->state = hv_pcibus_removed; + } + + hv_pci_bus_exit(hdev); + + vmbus_close(hdev->channel); + + iounmap(hbus->cfg_addr); + hv_free_config_window(hbus); + pci_free_resource_list(&hbus->resources_for_children); + hv_pci_free_bridge_windows(hbus); + irq_domain_remove(hbus->irq_domain); + irq_domain_free_fwnode(hbus->sysdata.fwnode); + put_hvpcibus(hbus); + wait_for_completion(&hbus->remove_event); + destroy_workqueue(hbus->wq); + free_page((unsigned long)hbus); + return 0; +} + +static const struct hv_vmbus_device_id hv_pci_id_table[] = { + /* PCI Pass-through Class ID */ + /* 44C4F61D-4444-4400-9D52-802E27EDE19F */ + { HV_PCIE_GUID, }, + { }, +}; + +MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table); + +static struct hv_driver hv_pci_drv = { + .name = "hv_pci", + .id_table = hv_pci_id_table, + .probe = hv_pci_probe, + .remove = hv_pci_remove, +}; + +static void __exit exit_hv_pci_drv(void) +{ + vmbus_driver_unregister(&hv_pci_drv); +} + +static int __init init_hv_pci_drv(void) +{ + return vmbus_driver_register(&hv_pci_drv); +} + +module_init(init_hv_pci_drv); +module_exit(exit_hv_pci_drv); + +MODULE_DESCRIPTION("Hyper-V PCI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c new file mode 100644 index 000000000000..23e270839e6a --- /dev/null +++ b/drivers/pci/controller/pci-mvebu.c @@ -0,0 +1,1313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe driver for Marvell Armada 370 and Armada XP SoCs + * + * Author: Thomas Petazzoni + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* + * PCIe unit register offsets. + */ +#define PCIE_DEV_ID_OFF 0x0000 +#define PCIE_CMD_OFF 0x0004 +#define PCIE_DEV_REV_OFF 0x0008 +#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) +#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) +#define PCIE_CAP_PCIEXP 0x0060 +#define PCIE_HEADER_LOG_4_OFF 0x0128 +#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) +#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) +#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) +#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) +#define PCIE_WIN5_CTRL_OFF 0x1880 +#define PCIE_WIN5_BASE_OFF 0x1884 +#define PCIE_WIN5_REMAP_OFF 0x188c +#define PCIE_CONF_ADDR_OFF 0x18f8 +#define PCIE_CONF_ADDR_EN 0x80000000 +#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) +#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) +#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) +#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) +#define PCIE_CONF_ADDR(bus, devfn, where) \ + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ + PCIE_CONF_ADDR_EN) +#define PCIE_CONF_DATA_OFF 0x18fc +#define PCIE_MASK_OFF 0x1910 +#define PCIE_MASK_ENABLE_INTS 0x0f000000 +#define PCIE_CTRL_OFF 0x1a00 +#define PCIE_CTRL_X1_MODE 0x0001 +#define PCIE_STAT_OFF 0x1a04 +#define PCIE_STAT_BUS 0xff00 +#define PCIE_STAT_DEV 0x1f0000 +#define PCIE_STAT_LINK_DOWN BIT(0) +#define PCIE_RC_RTSTA 0x1a14 +#define PCIE_DEBUG_CTRL 0x1a60 +#define PCIE_DEBUG_SOFT_RESET BIT(20) + +enum { + PCISWCAP = PCI_BRIDGE_CONTROL + 2, + PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID, + PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP, + PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL, + PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP, + PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL, + PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP, + PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL, + PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL, + PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA, + PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2, + PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2, + PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2, + PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2, + PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2, + PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2, +}; + +/* PCI configuration space of a PCI-to-PCI bridge */ +struct mvebu_sw_pci_bridge { + u16 vendor; + u16 device; + u16 command; + u16 status; + u16 class; + u8 interface; + u8 revision; + u8 bist; + u8 header_type; + u8 latency_timer; + u8 cache_line_size; + u32 bar[2]; + u8 primary_bus; + u8 secondary_bus; + u8 subordinate_bus; + u8 secondary_latency_timer; + u8 iobase; + u8 iolimit; + u16 secondary_status; + u16 membase; + u16 memlimit; + u16 iobaseupper; + u16 iolimitupper; + u32 romaddr; + u8 intline; + u8 intpin; + u16 bridgectrl; + + /* PCI express capability */ + u32 pcie_sltcap; + u16 pcie_devctl; + u16 pcie_rtctl; +}; + +struct mvebu_pcie_port; + +/* Structure representing all PCIe interfaces */ +struct mvebu_pcie { + struct platform_device *pdev; + struct mvebu_pcie_port *ports; + struct msi_controller *msi; + struct resource io; + struct resource realio; + struct resource mem; + struct resource busn; + int nports; +}; + +struct mvebu_pcie_window { + phys_addr_t base; + phys_addr_t remap; + size_t size; +}; + +/* Structure representing one PCIe interface */ +struct mvebu_pcie_port { + char *name; + void __iomem *base; + u32 port; + u32 lane; + int devfn; + unsigned int mem_target; + unsigned int mem_attr; + unsigned int io_target; + unsigned int io_attr; + struct clk *clk; + struct gpio_desc *reset_gpio; + char *reset_name; + struct mvebu_sw_pci_bridge bridge; + struct device_node *dn; + struct mvebu_pcie *pcie; + struct mvebu_pcie_window memwin; + struct mvebu_pcie_window iowin; + u32 saved_pcie_stat; +}; + +static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) +{ + writel(val, port->base + reg); +} + +static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) +{ + return readl(port->base + reg); +} + +static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) +{ + return port->io_target != -1 && port->io_attr != -1; +} + +static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) +{ + return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); +} + +static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) +{ + u32 stat; + + stat = mvebu_readl(port, PCIE_STAT_OFF); + stat &= ~PCIE_STAT_BUS; + stat |= nr << 8; + mvebu_writel(port, stat, PCIE_STAT_OFF); +} + +static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) +{ + u32 stat; + + stat = mvebu_readl(port, PCIE_STAT_OFF); + stat &= ~PCIE_STAT_DEV; + stat |= nr << 16; + mvebu_writel(port, stat, PCIE_STAT_OFF); +} + +/* + * Setup PCIE BARs and Address Decode Wins: + * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks + * WIN[0-3] -> DRAM bank[0-3] + */ +static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) +{ + const struct mbus_dram_target_info *dram; + u32 size; + int i; + + dram = mv_mbus_dram_info(); + + /* First, disable and clear BARs and windows. */ + for (i = 1; i < 3; i++) { + mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); + mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); + mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); + } + + for (i = 0; i < 5; i++) { + mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); + mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); + mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); + } + + mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); + mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); + mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); + + /* Setup windows for DDR banks. Count total DDR size on the fly. */ + size = 0; + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + mvebu_writel(port, cs->base & 0xffff0000, + PCIE_WIN04_BASE_OFF(i)); + mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); + mvebu_writel(port, + ((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + PCIE_WIN04_CTRL_OFF(i)); + + size += cs->size; + } + + /* Round up 'size' to the nearest power of two. */ + if ((size & (size - 1)) != 0) + size = 1 << fls(size); + + /* Setup BAR[1] to all DRAM banks. */ + mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); + mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); + mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, + PCIE_BAR_CTRL_OFF(1)); +} + +static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) +{ + u32 cmd, mask; + + /* Point PCIe unit MBUS decode windows to DRAM space. */ + mvebu_pcie_setup_wins(port); + + /* Master + slave enable. */ + cmd = mvebu_readl(port, PCIE_CMD_OFF); + cmd |= PCI_COMMAND_IO; + cmd |= PCI_COMMAND_MEMORY; + cmd |= PCI_COMMAND_MASTER; + mvebu_writel(port, cmd, PCIE_CMD_OFF); + + /* Enable interrupt lines A-D. */ + mask = mvebu_readl(port, PCIE_MASK_OFF); + mask |= PCIE_MASK_ENABLE_INTS; + mvebu_writel(port, mask, PCIE_MASK_OFF); +} + +static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, + struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) +{ + void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + + mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), + PCIE_CONF_ADDR_OFF); + + switch (size) { + case 1: + *val = readb_relaxed(conf_data + (where & 3)); + break; + case 2: + *val = readw_relaxed(conf_data + (where & 2)); + break; + case 4: + *val = readl_relaxed(conf_data); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, + struct pci_bus *bus, + u32 devfn, int where, int size, u32 val) +{ + void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + + mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), + PCIE_CONF_ADDR_OFF); + + switch (size) { + case 1: + writeb(val, conf_data + (where & 3)); + break; + case 2: + writew(val, conf_data + (where & 2)); + break; + case 4: + writel(val, conf_data); + break; + default: + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + return PCIBIOS_SUCCESSFUL; +} + +/* + * Remove windows, starting from the largest ones to the smallest + * ones. + */ +static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, + phys_addr_t base, size_t size) +{ + while (size) { + size_t sz = 1 << (fls(size) - 1); + + mvebu_mbus_del_window(base, sz); + base += sz; + size -= sz; + } +} + +/* + * MBus windows can only have a power of two size, but PCI BARs do not + * have this constraint. Therefore, we have to split the PCI BAR into + * areas each having a power of two size. We start from the largest + * one (i.e highest order bit set in the size). + */ +static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + phys_addr_t base, size_t size, + phys_addr_t remap) +{ + size_t size_mapped = 0; + + while (size) { + size_t sz = 1 << (fls(size) - 1); + int ret; + + ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, + sz, remap); + if (ret) { + phys_addr_t end = base + sz - 1; + + dev_err(&port->pcie->pdev->dev, + "Could not create MBus window at [mem %pa-%pa]: %d\n", + &base, &end, ret); + mvebu_pcie_del_windows(port, base - size_mapped, + size_mapped); + return; + } + + size -= sz; + size_mapped += sz; + base += sz; + if (remap != MVEBU_MBUS_NO_REMAP) + remap += sz; + } +} + +static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + const struct mvebu_pcie_window *desired, + struct mvebu_pcie_window *cur) +{ + if (desired->base == cur->base && desired->remap == cur->remap && + desired->size == cur->size) + return; + + if (cur->size != 0) { + mvebu_pcie_del_windows(port, cur->base, cur->size); + cur->size = 0; + cur->base = 0; + + /* + * If something tries to change the window while it is enabled + * the change will not be done atomically. That would be + * difficult to do in the general case. + */ + } + + if (desired->size == 0) + return; + + mvebu_pcie_add_windows(port, target, attribute, desired->base, + desired->size, desired->remap); + *cur = *desired; +} + +static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) +{ + struct mvebu_pcie_window desired = {}; + + /* Are the new iobase/iolimit values invalid? */ + if (port->bridge.iolimit < port->bridge.iobase || + port->bridge.iolimitupper < port->bridge.iobaseupper || + !(port->bridge.command & PCI_COMMAND_IO)) { + mvebu_pcie_set_window(port, port->io_target, port->io_attr, + &desired, &port->iowin); + return; + } + + if (!mvebu_has_ioport(port)) { + dev_WARN(&port->pcie->pdev->dev, + "Attempt to set IO when IO is disabled\n"); + return; + } + + /* + * We read the PCI-to-PCI bridge emulated registers, and + * calculate the base address and size of the address decoding + * window to setup, according to the PCI-to-PCI bridge + * specifications. iobase is the bus address, port->iowin_base + * is the CPU address. + */ + desired.remap = ((port->bridge.iobase & 0xF0) << 8) | + (port->bridge.iobaseupper << 16); + desired.base = port->pcie->io.start + desired.remap; + desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | + (port->bridge.iolimitupper << 16)) - + desired.remap) + + 1; + + mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, + &port->iowin); +} + +static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) +{ + struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; + + /* Are the new membase/memlimit values invalid? */ + if (port->bridge.memlimit < port->bridge.membase || + !(port->bridge.command & PCI_COMMAND_MEMORY)) { + mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, + &desired, &port->memwin); + return; + } + + /* + * We read the PCI-to-PCI bridge emulated registers, and + * calculate the base address and size of the address decoding + * window to setup, according to the PCI-to-PCI bridge + * specifications. + */ + desired.base = ((port->bridge.membase & 0xFFF0) << 16); + desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - + desired.base + 1; + + mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, + &port->memwin); +} + +/* + * Initialize the configuration space of the PCI-to-PCI bridge + * associated with the given PCIe interface. + */ +static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port) +{ + struct mvebu_sw_pci_bridge *bridge = &port->bridge; + + memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge)); + + bridge->class = PCI_CLASS_BRIDGE_PCI; + bridge->vendor = PCI_VENDOR_ID_MARVELL; + bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; + bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; + bridge->header_type = PCI_HEADER_TYPE_BRIDGE; + bridge->cache_line_size = 0x10; + + /* We support 32 bits I/O addressing */ + bridge->iobase = PCI_IO_RANGE_TYPE_32; + bridge->iolimit = PCI_IO_RANGE_TYPE_32; + + /* Add capabilities */ + bridge->status = PCI_STATUS_CAP_LIST; +} + +/* + * Read the configuration space of the PCI-to-PCI bridge associated to + * the given PCIe interface. + */ +static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, + unsigned int where, int size, u32 *value) +{ + struct mvebu_sw_pci_bridge *bridge = &port->bridge; + + switch (where & ~3) { + case PCI_VENDOR_ID: + *value = bridge->device << 16 | bridge->vendor; + break; + + case PCI_COMMAND: + *value = bridge->command | bridge->status << 16; + break; + + case PCI_CLASS_REVISION: + *value = bridge->class << 16 | bridge->interface << 8 | + bridge->revision; + break; + + case PCI_CACHE_LINE_SIZE: + *value = bridge->bist << 24 | bridge->header_type << 16 | + bridge->latency_timer << 8 | bridge->cache_line_size; + break; + + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: + *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4]; + break; + + case PCI_PRIMARY_BUS: + *value = (bridge->secondary_latency_timer << 24 | + bridge->subordinate_bus << 16 | + bridge->secondary_bus << 8 | + bridge->primary_bus); + break; + + case PCI_IO_BASE: + if (!mvebu_has_ioport(port)) + *value = bridge->secondary_status << 16; + else + *value = (bridge->secondary_status << 16 | + bridge->iolimit << 8 | + bridge->iobase); + break; + + case PCI_MEMORY_BASE: + *value = (bridge->memlimit << 16 | bridge->membase); + break; + + case PCI_PREF_MEMORY_BASE: + *value = 0; + break; + + case PCI_IO_BASE_UPPER16: + *value = (bridge->iolimitupper << 16 | bridge->iobaseupper); + break; + + case PCI_CAPABILITY_LIST: + *value = PCISWCAP; + break; + + case PCI_ROM_ADDRESS1: + *value = 0; + break; + + case PCI_INTERRUPT_LINE: + /* LINE PIN MIN_GNT MAX_LAT */ + *value = 0; + break; + + case PCISWCAP_EXP_LIST_ID: + /* Set PCIe v2, root port, slot support */ + *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | + PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP; + break; + + case PCISWCAP_EXP_DEVCAP: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); + break; + + case PCISWCAP_EXP_DEVCTL: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & + ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); + *value |= bridge->pcie_devctl; + break; + + case PCISWCAP_EXP_LNKCAP: + /* + * PCIe requires the clock power management capability to be + * hard-wired to zero for downstream ports + */ + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & + ~PCI_EXP_LNKCAP_CLKPM; + break; + + case PCISWCAP_EXP_LNKCTL: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + break; + + case PCISWCAP_EXP_SLTCAP: + *value = bridge->pcie_sltcap; + break; + + case PCISWCAP_EXP_SLTCTL: + *value = PCI_EXP_SLTSTA_PDS << 16; + break; + + case PCISWCAP_EXP_RTCTL: + *value = bridge->pcie_rtctl; + break; + + case PCISWCAP_EXP_RTSTA: + *value = mvebu_readl(port, PCIE_RC_RTSTA); + break; + + /* PCIe requires the v2 fields to be hard-wired to zero */ + case PCISWCAP_EXP_DEVCAP2: + case PCISWCAP_EXP_DEVCTL2: + case PCISWCAP_EXP_LNKCAP2: + case PCISWCAP_EXP_LNKCTL2: + case PCISWCAP_EXP_SLTCAP2: + case PCISWCAP_EXP_SLTCTL2: + default: + /* + * PCI defines configuration read accesses to reserved or + * unimplemented registers to read as zero and complete + * normally. + */ + *value = 0; + return PCIBIOS_SUCCESSFUL; + } + + if (size == 2) + *value = (*value >> (8 * (where & 3))) & 0xffff; + else if (size == 1) + *value = (*value >> (8 * (where & 3))) & 0xff; + + return PCIBIOS_SUCCESSFUL; +} + +/* Write to the PCI-to-PCI bridge configuration space */ +static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, + unsigned int where, int size, u32 value) +{ + struct mvebu_sw_pci_bridge *bridge = &port->bridge; + u32 mask, reg; + int err; + + if (size == 4) + mask = 0x0; + else if (size == 2) + mask = ~(0xffff << ((where & 3) * 8)); + else if (size == 1) + mask = ~(0xff << ((where & 3) * 8)); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, ®); + if (err) + return err; + + value = (reg & mask) | value << ((where & 3) * 8); + + switch (where & ~3) { + case PCI_COMMAND: + { + u32 old = bridge->command; + + if (!mvebu_has_ioport(port)) + value &= ~PCI_COMMAND_IO; + + bridge->command = value & 0xffff; + if ((old ^ bridge->command) & PCI_COMMAND_IO) + mvebu_pcie_handle_iobase_change(port); + if ((old ^ bridge->command) & PCI_COMMAND_MEMORY) + mvebu_pcie_handle_membase_change(port); + break; + } + + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: + bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; + break; + + case PCI_IO_BASE: + /* + * We also keep bit 1 set, it is a read-only bit that + * indicates we support 32 bits addressing for the + * I/O + */ + bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; + bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; + mvebu_pcie_handle_iobase_change(port); + break; + + case PCI_MEMORY_BASE: + bridge->membase = value & 0xffff; + bridge->memlimit = value >> 16; + mvebu_pcie_handle_membase_change(port); + break; + + case PCI_IO_BASE_UPPER16: + bridge->iobaseupper = value & 0xffff; + bridge->iolimitupper = value >> 16; + mvebu_pcie_handle_iobase_change(port); + break; + + case PCI_PRIMARY_BUS: + bridge->primary_bus = value & 0xff; + bridge->secondary_bus = (value >> 8) & 0xff; + bridge->subordinate_bus = (value >> 16) & 0xff; + bridge->secondary_latency_timer = (value >> 24) & 0xff; + mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus); + break; + + case PCISWCAP_EXP_DEVCTL: + /* + * Armada370 data says these bits must always + * be zero when in root complex mode. + */ + value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); + + /* + * If the mask is 0xffff0000, then we only want to write + * the device control register, rather than clearing the + * RW1C bits in the device status register. Mask out the + * status register bits. + */ + if (mask == 0xffff0000) + value &= 0xffff; + + mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); + break; + + case PCISWCAP_EXP_LNKCTL: + /* + * If we don't support CLKREQ, we must ensure that the + * CLKREQ enable bit always reads zero. Since we haven't + * had this capability, and it's dependent on board wiring, + * disable it for the time being. + */ + value &= ~PCI_EXP_LNKCTL_CLKREQ_EN; + + /* + * If the mask is 0xffff0000, then we only want to write + * the link control register, rather than clearing the + * RW1C bits in the link status register. Mask out the + * RW1C status register bits. + */ + if (mask == 0xffff0000) + value &= ~((PCI_EXP_LNKSTA_LABS | + PCI_EXP_LNKSTA_LBMS) << 16); + + mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + break; + + case PCISWCAP_EXP_RTSTA: + mvebu_writel(port, value, PCIE_RC_RTSTA); + break; + + default: + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, + struct pci_bus *bus, + int devfn) +{ + int i; + + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + + if (bus->number == 0 && port->devfn == devfn) + return port; + if (bus->number != 0 && + bus->number >= port->bridge.secondary_bus && + bus->number <= port->bridge.subordinate_bus) + return port; + } + + return NULL; +} + +/* PCI configuration space write function */ +static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); + struct mvebu_pcie_port *port; + int ret; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Access the emulated PCI-to-PCI bridge */ + if (bus->number == 0) + return mvebu_sw_pci_bridge_write(port, where, size, val); + + if (!mvebu_pcie_link_up(port)) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Access the real PCIe interface */ + ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, + where, size, val); + + return ret; +} + +/* PCI configuration space read function */ +static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); + struct mvebu_pcie_port *port; + int ret; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Access the emulated PCI-to-PCI bridge */ + if (bus->number == 0) + return mvebu_sw_pci_bridge_read(port, where, size, val); + + if (!mvebu_pcie_link_up(port)) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Access the real PCIe interface */ + ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, + where, size, val); + + return ret; +} + +static struct pci_ops mvebu_pcie_ops = { + .read = mvebu_pcie_rd_conf, + .write = mvebu_pcie_wr_conf, +}; + +static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct mvebu_pcie *pcie = sys_to_pcie(sys); + int err, i; + + pcie->mem.name = "PCI MEM"; + pcie->realio.name = "PCI I/O"; + + if (resource_size(&pcie->realio) != 0) + pci_add_resource_offset(&sys->resources, &pcie->realio, + sys->io_offset); + + pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); + pci_add_resource(&sys->resources, &pcie->busn); + + err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources); + if (err) + return 0; + + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + + if (!port->base) + continue; + mvebu_pcie_setup_hw(port); + } + + return 1; +} + +static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align) +{ + if (dev->bus->number != 0) + return start; + + /* + * On the PCI-to-PCI bridge side, the I/O windows must have at + * least a 64 KB size and the memory windows must have at + * least a 1 MB size. Moreover, MBus windows need to have a + * base address aligned on their size, and their size must be + * a power of two. This means that if the BAR doesn't have a + * power of two size, several MBus windows will actually be + * created. We need to ensure that the biggest MBus window + * (which will be the first one) is aligned on its size, which + * explains the rounddown_pow_of_two() being done here. + */ + if (res->flags & IORESOURCE_IO) + return round_up(start, max_t(resource_size_t, SZ_64K, + rounddown_pow_of_two(size))); + else if (res->flags & IORESOURCE_MEM) + return round_up(start, max_t(resource_size_t, SZ_1M, + rounddown_pow_of_two(size))); + else + return start; +} + +static void mvebu_pcie_enable(struct mvebu_pcie *pcie) +{ + struct hw_pci hw; + + memset(&hw, 0, sizeof(hw)); + +#ifdef CONFIG_PCI_MSI + hw.msi_ctrl = pcie->msi; +#endif + + hw.nr_controllers = 1; + hw.private_data = (void **)&pcie; + hw.setup = mvebu_pcie_setup; + hw.map_irq = of_irq_parse_and_map_pci; + hw.ops = &mvebu_pcie_ops; + hw.align_resource = mvebu_pcie_align_resource; + + pci_common_init_dev(&pcie->pdev->dev, &hw); +} + +/* + * Looks up the list of register addresses encoded into the reg = + * <...> property for one that matches the given port/lane. Once + * found, maps it. + */ +static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, + struct device_node *np, + struct mvebu_pcie_port *port) +{ + struct resource regs; + int ret = 0; + + ret = of_address_to_resource(np, 0, ®s); + if (ret) + return ERR_PTR(ret); + + return devm_ioremap_resource(&pdev->dev, ®s); +} + +#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) +#define DT_TYPE_IO 0x1 +#define DT_TYPE_MEM32 0x2 +#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) +#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) + +static int mvebu_get_tgt_attr(struct device_node *np, int devfn, + unsigned long type, + unsigned int *tgt, + unsigned int *attr) +{ + const int na = 3, ns = 2; + const __be32 *range; + int rlen, nranges, rangesz, pna, i; + + *tgt = -1; + *attr = -1; + + range = of_get_property(np, "ranges", &rlen); + if (!range) + return -EINVAL; + + pna = of_n_addr_cells(np); + rangesz = pna + na + ns; + nranges = rlen / sizeof(__be32) / rangesz; + + for (i = 0; i < nranges; i++, range += rangesz) { + u32 flags = of_read_number(range, 1); + u32 slot = of_read_number(range + 1, 1); + u64 cpuaddr = of_read_number(range + na, pna); + unsigned long rtype; + + if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) + rtype = IORESOURCE_IO; + else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) + rtype = IORESOURCE_MEM; + else + continue; + + if (slot == PCI_SLOT(devfn) && type == rtype) { + *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); + *attr = DT_CPUADDR_TO_ATTR(cpuaddr); + return 0; + } + } + + return -ENOENT; +} + +#ifdef CONFIG_PM_SLEEP +static int mvebu_pcie_suspend(struct device *dev) +{ + struct mvebu_pcie *pcie; + int i; + + pcie = dev_get_drvdata(dev); + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = pcie->ports + i; + port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); + } + + return 0; +} + +static int mvebu_pcie_resume(struct device *dev) +{ + struct mvebu_pcie *pcie; + int i; + + pcie = dev_get_drvdata(dev); + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = pcie->ports + i; + mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); + mvebu_pcie_setup_hw(port); + } + + return 0; +} +#endif + +static void mvebu_pcie_port_clk_put(void *data) +{ + struct mvebu_pcie_port *port = data; + + clk_put(port->clk); +} + +static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, + struct mvebu_pcie_port *port, struct device_node *child) +{ + struct device *dev = &pcie->pdev->dev; + enum of_gpio_flags flags; + int reset_gpio, ret; + + port->pcie = pcie; + + if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { + dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", + child); + goto skip; + } + + if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) + port->lane = 0; + + port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, + port->lane); + if (!port->name) { + ret = -ENOMEM; + goto err; + } + + port->devfn = of_pci_get_devfn(child); + if (port->devfn < 0) + goto skip; + + ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, + &port->mem_target, &port->mem_attr); + if (ret < 0) { + dev_err(dev, "%s: cannot get tgt/attr for mem window\n", + port->name); + goto skip; + } + + if (resource_size(&pcie->io) != 0) { + mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, + &port->io_target, &port->io_attr); + } else { + port->io_target = -1; + port->io_attr = -1; + } + + reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); + if (reset_gpio == -EPROBE_DEFER) { + ret = reset_gpio; + goto err; + } + + if (gpio_is_valid(reset_gpio)) { + unsigned long gpio_flags; + + port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", + port->name); + if (!port->reset_name) { + ret = -ENOMEM; + goto err; + } + + if (flags & OF_GPIO_ACTIVE_LOW) { + dev_info(dev, "%pOF: reset gpio is active low\n", + child); + gpio_flags = GPIOF_ACTIVE_LOW | + GPIOF_OUT_INIT_LOW; + } else { + gpio_flags = GPIOF_OUT_INIT_HIGH; + } + + ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, + port->reset_name); + if (ret) { + if (ret == -EPROBE_DEFER) + goto err; + goto skip; + } + + port->reset_gpio = gpio_to_desc(reset_gpio); + } + + port->clk = of_clk_get_by_name(child, NULL); + if (IS_ERR(port->clk)) { + dev_err(dev, "%s: cannot get clock\n", port->name); + goto skip; + } + + ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); + if (ret < 0) { + clk_put(port->clk); + goto err; + } + + return 1; + +skip: + ret = 0; + + /* In the case of skipping, we need to free these */ + devm_kfree(dev, port->reset_name); + port->reset_name = NULL; + devm_kfree(dev, port->name); + port->name = NULL; + +err: + return ret; +} + +/* + * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs + * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications + * of the PCI Express Card Electromechanical Specification, 1.1. + */ +static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) +{ + int ret; + + ret = clk_prepare_enable(port->clk); + if (ret < 0) + return ret; + + if (port->reset_gpio) { + u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000; + + of_property_read_u32(port->dn, "reset-delay-us", + &reset_udelay); + + udelay(100); + + gpiod_set_value_cansleep(port->reset_gpio, 0); + msleep(reset_udelay / 1000); + } + + return 0; +} + +/* + * Power down a PCIe port. Strictly, PCIe requires us to place the card + * in D3hot state before asserting PERST#. + */ +static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) +{ + gpiod_set_value_cansleep(port->reset_gpio, 1); + + clk_disable_unprepare(port->clk); +} + +static int mvebu_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mvebu_pcie *pcie; + struct device_node *np = dev->of_node; + struct device_node *child; + int num, i, ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->pdev = pdev; + platform_set_drvdata(pdev, pcie); + + /* Get the PCIe memory and I/O aperture */ + mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); + if (resource_size(&pcie->mem) == 0) { + dev_err(dev, "invalid memory aperture size\n"); + return -EINVAL; + } + + mvebu_mbus_get_pcie_io_aperture(&pcie->io); + + if (resource_size(&pcie->io) != 0) { + pcie->realio.flags = pcie->io.flags; + pcie->realio.start = PCIBIOS_MIN_IO; + pcie->realio.end = min_t(resource_size_t, + IO_SPACE_LIMIT, + resource_size(&pcie->io)); + } else + pcie->realio = pcie->io; + + /* Get the bus range */ + ret = of_pci_parse_bus_range(np, &pcie->busn); + if (ret) { + dev_err(dev, "failed to parse bus-range property: %d\n", ret); + return ret; + } + + num = of_get_available_child_count(np); + + pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); + if (!pcie->ports) + return -ENOMEM; + + i = 0; + for_each_available_child_of_node(np, child) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + + ret = mvebu_pcie_parse_port(pcie, port, child); + if (ret < 0) { + of_node_put(child); + return ret; + } else if (ret == 0) { + continue; + } + + port->dn = child; + i++; + } + pcie->nports = i; + + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; + + child = port->dn; + if (!child) + continue; + + ret = mvebu_pcie_powerup(port); + if (ret < 0) + continue; + + port->base = mvebu_pcie_map_registers(pdev, child, port); + if (IS_ERR(port->base)) { + dev_err(dev, "%s: cannot map registers\n", port->name); + port->base = NULL; + mvebu_pcie_powerdown(port); + continue; + } + + mvebu_pcie_set_local_dev_nr(port, 1); + mvebu_sw_pci_bridge_init(port); + } + + pcie->nports = i; + + for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) + pci_ioremap_io(i, pcie->io.start + i); + + mvebu_pcie_enable(pcie); + + platform_set_drvdata(pdev, pcie); + + return 0; +} + +static const struct of_device_id mvebu_pcie_of_match_table[] = { + { .compatible = "marvell,armada-xp-pcie", }, + { .compatible = "marvell,armada-370-pcie", }, + { .compatible = "marvell,dove-pcie", }, + { .compatible = "marvell,kirkwood-pcie", }, + {}, +}; + +static const struct dev_pm_ops mvebu_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) +}; + +static struct platform_driver mvebu_pcie_driver = { + .driver = { + .name = "mvebu-pcie", + .of_match_table = mvebu_pcie_of_match_table, + /* driver unloading/unbinding currently not supported */ + .suppress_bind_attrs = true, + .pm = &mvebu_pcie_pm_ops, + }, + .probe = mvebu_pcie_probe, +}; +builtin_platform_driver(mvebu_pcie_driver); diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c new file mode 100644 index 000000000000..326171cb1a97 --- /dev/null +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -0,0 +1,428 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pci-rcar-gen2: internal PCI bus support + * + * Copyright (C) 2013 Renesas Solutions Corp. + * Copyright (C) 2013 Cogent Embedded, Inc. + * + * Author: Valentine Barshak + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* AHB-PCI Bridge PCI communication registers */ +#define RCAR_AHBPCI_PCICOM_OFFSET 0x800 + +#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00) +#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04) +#define RCAR_PCIAHB_PREFETCH0 0x0 +#define RCAR_PCIAHB_PREFETCH4 0x1 +#define RCAR_PCIAHB_PREFETCH8 0x2 +#define RCAR_PCIAHB_PREFETCH16 0x3 + +#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10) +#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14) +#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1) +#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1) +#define RCAR_AHBPCI_WIN1_HOST (1 << 30) +#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31) + +#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20) +#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24) +#define RCAR_PCI_INT_SIGTABORT (1 << 0) +#define RCAR_PCI_INT_SIGRETABORT (1 << 1) +#define RCAR_PCI_INT_REMABORT (1 << 2) +#define RCAR_PCI_INT_PERR (1 << 3) +#define RCAR_PCI_INT_SIGSERR (1 << 4) +#define RCAR_PCI_INT_RESERR (1 << 5) +#define RCAR_PCI_INT_WIN1ERR (1 << 12) +#define RCAR_PCI_INT_WIN2ERR (1 << 13) +#define RCAR_PCI_INT_A (1 << 16) +#define RCAR_PCI_INT_B (1 << 17) +#define RCAR_PCI_INT_PME (1 << 19) +#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \ + RCAR_PCI_INT_SIGRETABORT | \ + RCAR_PCI_INT_REMABORT | \ + RCAR_PCI_INT_PERR | \ + RCAR_PCI_INT_SIGSERR | \ + RCAR_PCI_INT_RESERR | \ + RCAR_PCI_INT_WIN1ERR | \ + RCAR_PCI_INT_WIN2ERR) + +#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30) +#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0) +#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1) +#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2) +#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7) +#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17) +#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \ + RCAR_AHB_BUS_MMODE_BYTE_BURST | \ + RCAR_AHB_BUS_MMODE_WR_INCR | \ + RCAR_AHB_BUS_MMODE_HBUS_REQ | \ + RCAR_AHB_BUS_SMODE_READYCTR) + +#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34) +#define RCAR_USBCTR_USBH_RST (1 << 0) +#define RCAR_USBCTR_PCICLK_MASK (1 << 1) +#define RCAR_USBCTR_PLL_RST (1 << 2) +#define RCAR_USBCTR_DIRPD (1 << 8) +#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9) +#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10) +#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10) + +#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40) +#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0) +#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1) +#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12) + +#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) + +struct rcar_pci_priv { + struct device *dev; + void __iomem *reg; + struct resource mem_res; + struct resource *cfg_res; + unsigned busnr; + int irq; + unsigned long window_size; + unsigned long window_addr; + unsigned long window_pci; +}; + +/* PCI configuration space operations */ +static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_sys_data *sys = bus->sysdata; + struct rcar_pci_priv *priv = sys->private_data; + int slot, val; + + if (sys->busnr != bus->number || PCI_FUNC(devfn)) + return NULL; + + /* Only one EHCI/OHCI device built-in */ + slot = PCI_SLOT(devfn); + if (slot > 2) + return NULL; + + /* bridge logic only has registers to 0x40 */ + if (slot == 0x0 && where >= 0x40) + return NULL; + + val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG : + RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG; + + iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG); + return priv->reg + (slot >> 1) * 0x100 + where; +} + +/* PCI interrupt mapping */ +static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_sys_data *sys = dev->bus->sysdata; + struct rcar_pci_priv *priv = sys->private_data; + int irq; + + irq = of_irq_parse_and_map_pci(dev, slot, pin); + if (!irq) + irq = priv->irq; + + return irq; +} + +#ifdef CONFIG_PCI_DEBUG +/* if debug enabled, then attach an error handler irq to the bridge */ + +static irqreturn_t rcar_pci_err_irq(int irq, void *pw) +{ + struct rcar_pci_priv *priv = pw; + struct device *dev = priv->dev; + u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG); + + if (status & RCAR_PCI_INT_ALLERRORS) { + dev_err(dev, "error irq: status %08x\n", status); + + /* clear the error(s) */ + iowrite32(status & RCAR_PCI_INT_ALLERRORS, + priv->reg + RCAR_PCI_INT_STATUS_REG); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) +{ + struct device *dev = priv->dev; + int ret; + u32 val; + + ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq, + IRQF_SHARED, "error irq", priv); + if (ret) { + dev_err(dev, "cannot claim IRQ for error handling\n"); + return; + } + + val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG); + val |= RCAR_PCI_INT_ALLERRORS; + iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG); +} +#else +static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } +#endif + +/* PCI host controller setup */ +static int rcar_pci_setup(int nr, struct pci_sys_data *sys) +{ + struct rcar_pci_priv *priv = sys->private_data; + struct device *dev = priv->dev; + void __iomem *reg = priv->reg; + u32 val; + int ret; + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); + dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val); + + /* Disable Direct Power Down State and assert reset */ + val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; + val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST; + iowrite32(val, reg + RCAR_USBCTR_REG); + udelay(4); + + /* De-assert reset and reset PCIAHB window1 size */ + val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK | + RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); + + /* Setup PCIAHB window1 size */ + switch (priv->window_size) { + case SZ_2G: + val |= RCAR_USBCTR_PCIAHB_WIN1_2G; + break; + case SZ_1G: + val |= RCAR_USBCTR_PCIAHB_WIN1_1G; + break; + case SZ_512M: + val |= RCAR_USBCTR_PCIAHB_WIN1_512M; + break; + default: + pr_warn("unknown window size %ld - defaulting to 256M\n", + priv->window_size); + priv->window_size = SZ_256M; + /* fall-through */ + case SZ_256M: + val |= RCAR_USBCTR_PCIAHB_WIN1_256M; + break; + } + iowrite32(val, reg + RCAR_USBCTR_REG); + + /* Configure AHB master and slave modes */ + iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG); + + /* Configure PCI arbiter */ + val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG); + val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 | + RCAR_PCI_ARBITER_PCIBP_MODE; + iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); + + /* PCI-AHB mapping */ + iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16, + reg + RCAR_PCIAHB_WIN1_CTR_REG); + + /* AHB-PCI mapping: OHCI/EHCI registers */ + val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM; + iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG); + + /* Enable AHB-PCI bridge PCI configuration access */ + iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, + reg + RCAR_AHBPCI_WIN1_CTR_REG); + /* Set PCI-AHB Window1 address */ + iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, + reg + PCI_BASE_ADDRESS_1); + /* Set AHB-PCI bridge PCI communication area address */ + val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; + iowrite32(val, reg + PCI_BASE_ADDRESS_0); + + val = ioread32(reg + PCI_COMMAND); + val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + iowrite32(val, reg + PCI_COMMAND); + + /* Enable PCI interrupts */ + iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, + reg + RCAR_PCI_INT_ENABLE_REG); + + if (priv->irq > 0) + rcar_pci_setup_errirq(priv); + + /* Add PCI resources */ + pci_add_resource(&sys->resources, &priv->mem_res); + ret = devm_request_pci_bus_resources(dev, &sys->resources); + if (ret < 0) + return ret; + + /* Setup bus number based on platform device id / of bus-range */ + sys->busnr = priv->busnr; + return 1; +} + +static struct pci_ops rcar_pci_ops = { + .map_bus = rcar_pci_cfg_base, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, + struct device_node *np) +{ + struct device *dev = pci->dev; + struct of_pci_range range; + struct of_pci_range_parser parser; + int index = 0; + + /* Failure to parse is ok as we fall back to defaults */ + if (of_pci_dma_range_parser_init(&parser, np)) + return 0; + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + /* Hardware only allows one inbound 32-bit range */ + if (index) + return -EINVAL; + + pci->window_addr = (unsigned long)range.cpu_addr; + pci->window_pci = (unsigned long)range.pci_addr; + pci->window_size = (unsigned long)range.size; + + /* Catch HW limitations */ + if (!(range.flags & IORESOURCE_PREFETCH)) { + dev_err(dev, "window must be prefetchable\n"); + return -EINVAL; + } + if (pci->window_addr) { + u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); + + if (lowaddr < pci->window_size) { + dev_err(dev, "invalid window size/addr\n"); + return -EINVAL; + } + } + index++; + } + + return 0; +} + +static int rcar_pci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *cfg_res, *mem_res; + struct rcar_pci_priv *priv; + void __iomem *reg; + struct hw_pci hw; + void *hw_private[1]; + + cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(dev, cfg_res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem_res || !mem_res->start) + return -ENODEV; + + if (mem_res->start & 0xFFFF) + return -EINVAL; + + priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->mem_res = *mem_res; + priv->cfg_res = cfg_res; + + priv->irq = platform_get_irq(pdev, 0); + priv->reg = reg; + priv->dev = dev; + + if (priv->irq < 0) { + dev_err(dev, "no valid irq found\n"); + return priv->irq; + } + + /* default window addr and size if not specified in DT */ + priv->window_addr = 0x40000000; + priv->window_pci = 0x40000000; + priv->window_size = SZ_1G; + + if (dev->of_node) { + struct resource busnr; + int ret; + + ret = of_pci_parse_bus_range(dev->of_node, &busnr); + if (ret < 0) { + dev_err(dev, "failed to parse bus-range\n"); + return ret; + } + + priv->busnr = busnr.start; + if (busnr.end != busnr.start) + dev_warn(dev, "only one bus number supported\n"); + + ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node); + if (ret < 0) { + dev_err(dev, "failed to parse dma-range\n"); + return ret; + } + } else { + priv->busnr = pdev->id; + } + + hw_private[0] = priv; + memset(&hw, 0, sizeof(hw)); + hw.nr_controllers = ARRAY_SIZE(hw_private); + hw.io_optional = 1; + hw.private_data = hw_private; + hw.map_irq = rcar_pci_map_irq; + hw.ops = &rcar_pci_ops; + hw.setup = rcar_pci_setup; + pci_common_init_dev(dev, &hw); + return 0; +} + +static const struct of_device_id rcar_pci_of_match[] = { + { .compatible = "renesas,pci-r8a7790", }, + { .compatible = "renesas,pci-r8a7791", }, + { .compatible = "renesas,pci-r8a7794", }, + { .compatible = "renesas,pci-rcar-gen2", }, + { }, +}; + +static struct platform_driver rcar_pci_driver = { + .driver = { + .name = "pci-rcar-gen2", + .suppress_bind_attrs = true, + .of_match_table = rcar_pci_of_match, + }, + .probe = rcar_pci_probe, +}; +builtin_platform_driver(rcar_pci_driver); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c new file mode 100644 index 000000000000..f4f53d092e00 --- /dev/null +++ b/drivers/pci/controller/pci-tegra.c @@ -0,0 +1,2531 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Tegra SoCs + * + * Copyright (c) 2010, CompuLab, Ltd. + * Author: Mike Rapoport + * + * Based on NVIDIA PCIe driver + * Copyright (c) 2008-2009, NVIDIA Corporation. + * + * Bits taken from arch/arm/mach-dove/pcie.c + * + * Author: Thierry Reding + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../pci.h" + +#define INT_PCI_MSI_NR (8 * 32) + +/* register definitions */ + +#define AFI_AXI_BAR0_SZ 0x00 +#define AFI_AXI_BAR1_SZ 0x04 +#define AFI_AXI_BAR2_SZ 0x08 +#define AFI_AXI_BAR3_SZ 0x0c +#define AFI_AXI_BAR4_SZ 0x10 +#define AFI_AXI_BAR5_SZ 0x14 + +#define AFI_AXI_BAR0_START 0x18 +#define AFI_AXI_BAR1_START 0x1c +#define AFI_AXI_BAR2_START 0x20 +#define AFI_AXI_BAR3_START 0x24 +#define AFI_AXI_BAR4_START 0x28 +#define AFI_AXI_BAR5_START 0x2c + +#define AFI_FPCI_BAR0 0x30 +#define AFI_FPCI_BAR1 0x34 +#define AFI_FPCI_BAR2 0x38 +#define AFI_FPCI_BAR3 0x3c +#define AFI_FPCI_BAR4 0x40 +#define AFI_FPCI_BAR5 0x44 + +#define AFI_CACHE_BAR0_SZ 0x48 +#define AFI_CACHE_BAR0_ST 0x4c +#define AFI_CACHE_BAR1_SZ 0x50 +#define AFI_CACHE_BAR1_ST 0x54 + +#define AFI_MSI_BAR_SZ 0x60 +#define AFI_MSI_FPCI_BAR_ST 0x64 +#define AFI_MSI_AXI_BAR_ST 0x68 + +#define AFI_MSI_VEC0 0x6c +#define AFI_MSI_VEC1 0x70 +#define AFI_MSI_VEC2 0x74 +#define AFI_MSI_VEC3 0x78 +#define AFI_MSI_VEC4 0x7c +#define AFI_MSI_VEC5 0x80 +#define AFI_MSI_VEC6 0x84 +#define AFI_MSI_VEC7 0x88 + +#define AFI_MSI_EN_VEC0 0x8c +#define AFI_MSI_EN_VEC1 0x90 +#define AFI_MSI_EN_VEC2 0x94 +#define AFI_MSI_EN_VEC3 0x98 +#define AFI_MSI_EN_VEC4 0x9c +#define AFI_MSI_EN_VEC5 0xa0 +#define AFI_MSI_EN_VEC6 0xa4 +#define AFI_MSI_EN_VEC7 0xa8 + +#define AFI_CONFIGURATION 0xac +#define AFI_CONFIGURATION_EN_FPCI (1 << 0) + +#define AFI_FPCI_ERROR_MASKS 0xb0 + +#define AFI_INTR_MASK 0xb4 +#define AFI_INTR_MASK_INT_MASK (1 << 0) +#define AFI_INTR_MASK_MSI_MASK (1 << 8) + +#define AFI_INTR_CODE 0xb8 +#define AFI_INTR_CODE_MASK 0xf +#define AFI_INTR_INI_SLAVE_ERROR 1 +#define AFI_INTR_INI_DECODE_ERROR 2 +#define AFI_INTR_TARGET_ABORT 3 +#define AFI_INTR_MASTER_ABORT 4 +#define AFI_INTR_INVALID_WRITE 5 +#define AFI_INTR_LEGACY 6 +#define AFI_INTR_FPCI_DECODE_ERROR 7 +#define AFI_INTR_AXI_DECODE_ERROR 8 +#define AFI_INTR_FPCI_TIMEOUT 9 +#define AFI_INTR_PE_PRSNT_SENSE 10 +#define AFI_INTR_PE_CLKREQ_SENSE 11 +#define AFI_INTR_CLKCLAMP_SENSE 12 +#define AFI_INTR_RDY4PD_SENSE 13 +#define AFI_INTR_P2P_ERROR 14 + +#define AFI_INTR_SIGNATURE 0xbc +#define AFI_UPPER_FPCI_ADDRESS 0xc0 +#define AFI_SM_INTR_ENABLE 0xc4 +#define AFI_SM_INTR_INTA_ASSERT (1 << 0) +#define AFI_SM_INTR_INTB_ASSERT (1 << 1) +#define AFI_SM_INTR_INTC_ASSERT (1 << 2) +#define AFI_SM_INTR_INTD_ASSERT (1 << 3) +#define AFI_SM_INTR_INTA_DEASSERT (1 << 4) +#define AFI_SM_INTR_INTB_DEASSERT (1 << 5) +#define AFI_SM_INTR_INTC_DEASSERT (1 << 6) +#define AFI_SM_INTR_INTD_DEASSERT (1 << 7) + +#define AFI_AFI_INTR_ENABLE 0xc8 +#define AFI_INTR_EN_INI_SLVERR (1 << 0) +#define AFI_INTR_EN_INI_DECERR (1 << 1) +#define AFI_INTR_EN_TGT_SLVERR (1 << 2) +#define AFI_INTR_EN_TGT_DECERR (1 << 3) +#define AFI_INTR_EN_TGT_WRERR (1 << 4) +#define AFI_INTR_EN_DFPCI_DECERR (1 << 5) +#define AFI_INTR_EN_AXI_DECERR (1 << 6) +#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) +#define AFI_INTR_EN_PRSNT_SENSE (1 << 8) + +#define AFI_PCIE_PME 0xf0 + +#define AFI_PCIE_CONFIG 0x0f8 +#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) +#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) +#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20) + +#define AFI_FUSE 0x104 +#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) + +#define AFI_PEX0_CTRL 0x110 +#define AFI_PEX1_CTRL 0x118 +#define AFI_PEX2_CTRL 0x128 +#define AFI_PEX_CTRL_RST (1 << 0) +#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) +#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) +#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) + +#define AFI_PLLE_CONTROL 0x160 +#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) +#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) + +#define AFI_PEXBIAS_CTRL_0 0x168 + +#define RP_VEND_XP 0x00000f00 +#define RP_VEND_XP_DL_UP (1 << 30) + +#define RP_VEND_CTL2 0x00000fa8 +#define RP_VEND_CTL2_PCA_ENABLE (1 << 7) + +#define RP_PRIV_MISC 0x00000fe0 +#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) +#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) + +#define RP_LINK_CONTROL_STATUS 0x00000090 +#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 +#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 + +#define PADS_CTL_SEL 0x0000009c + +#define PADS_CTL 0x000000a0 +#define PADS_CTL_IDDQ_1L (1 << 0) +#define PADS_CTL_TX_DATA_EN_1L (1 << 6) +#define PADS_CTL_RX_DATA_EN_1L (1 << 10) + +#define PADS_PLL_CTL_TEGRA20 0x000000b8 +#define PADS_PLL_CTL_TEGRA30 0x000000b4 +#define PADS_PLL_CTL_RST_B4SM (1 << 1) +#define PADS_PLL_CTL_LOCKDET (1 << 8) +#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) +#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) +#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) +#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) +#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) +#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) +#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) +#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22) + +#define PADS_REFCLK_CFG0 0x000000c8 +#define PADS_REFCLK_CFG1 0x000000cc +#define PADS_REFCLK_BIAS 0x000000d0 + +/* + * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit + * entries, one entry per PCIe port. These field definitions and desired + * values aren't in the TRM, but do come from NVIDIA. + */ +#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */ +#define PADS_REFCLK_CFG_E_TERM_SHIFT 7 +#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ +#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ + +#define PME_ACK_TIMEOUT 10000 + +struct tegra_msi { + struct msi_controller chip; + DECLARE_BITMAP(used, INT_PCI_MSI_NR); + struct irq_domain *domain; + unsigned long pages; + struct mutex lock; + u64 phys; + int irq; +}; + +/* used to differentiate between Tegra SoC generations */ +struct tegra_pcie_port_soc { + struct { + u8 turnoff_bit; + u8 ack_bit; + } pme; +}; + +struct tegra_pcie_soc { + unsigned int num_ports; + const struct tegra_pcie_port_soc *ports; + unsigned int msi_base_shift; + u32 pads_pll_ctl; + u32 tx_ref_sel; + u32 pads_refclk_cfg0; + u32 pads_refclk_cfg1; + bool has_pex_clkreq_en; + bool has_pex_bias_ctrl; + bool has_intr_prsnt_sense; + bool has_cml_clk; + bool has_gen2; + bool force_pca_enable; + bool program_uphy; +}; + +static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) +{ + return container_of(chip, struct tegra_msi, chip); +} + +struct tegra_pcie { + struct device *dev; + + void __iomem *pads; + void __iomem *afi; + void __iomem *cfg; + int irq; + + struct resource cs; + struct resource io; + struct resource pio; + struct resource mem; + struct resource prefetch; + struct resource busn; + + struct { + resource_size_t mem; + resource_size_t io; + } offset; + + struct clk *pex_clk; + struct clk *afi_clk; + struct clk *pll_e; + struct clk *cml_clk; + + struct reset_control *pex_rst; + struct reset_control *afi_rst; + struct reset_control *pcie_xrst; + + bool legacy_phy; + struct phy *phy; + + struct tegra_msi msi; + + struct list_head ports; + u32 xbar_config; + + struct regulator_bulk_data *supplies; + unsigned int num_supplies; + + const struct tegra_pcie_soc *soc; + struct dentry *debugfs; +}; + +struct tegra_pcie_port { + struct tegra_pcie *pcie; + struct device_node *np; + struct list_head list; + struct resource regs; + void __iomem *base; + unsigned int index; + unsigned int lanes; + + struct phy **phys; +}; + +struct tegra_pcie_bus { + struct list_head list; + unsigned int nr; +}; + +static inline void afi_writel(struct tegra_pcie *pcie, u32 value, + unsigned long offset) +{ + writel(value, pcie->afi + offset); +} + +static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset) +{ + return readl(pcie->afi + offset); +} + +static inline void pads_writel(struct tegra_pcie *pcie, u32 value, + unsigned long offset) +{ + writel(value, pcie->pads + offset); +} + +static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) +{ + return readl(pcie->pads + offset); +} + +/* + * The configuration space mapping on Tegra is somewhat similar to the ECAM + * defined by PCIe. However it deviates a bit in how the 4 bits for extended + * register accesses are mapped: + * + * [27:24] extended register number + * [23:16] bus number + * [15:11] device number + * [10: 8] function number + * [ 7: 0] register number + * + * Mapping the whole extended configuration space would require 256 MiB of + * virtual address space, only a small part of which will actually be used. + * + * To work around this, a 4 KiB region is used to generate the required + * configuration transaction with relevant B:D:F and register offset values. + * This is achieved by dynamically programming base address and size of + * AFI_AXI_BAR used for end point config space mapping to make sure that the + * address (access to which generates correct config transaction) falls in + * this 4 KiB region. + */ +static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, + unsigned int where) +{ + return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | + (PCI_FUNC(devfn) << 8) | (where & 0xff); +} + +static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + struct tegra_pcie *pcie = bus->sysdata; + void __iomem *addr = NULL; + + if (bus->number == 0) { + unsigned int slot = PCI_SLOT(devfn); + struct tegra_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) { + if (port->index + 1 == slot) { + addr = port->base + (where & ~3); + break; + } + } + } else { + unsigned int offset; + u32 base; + + offset = tegra_pcie_conf_offset(bus->number, devfn, where); + + /* move 4 KiB window to offset within the FPCI region */ + base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); + afi_writel(pcie, base, AFI_FPCI_BAR0); + + /* move to correct offset within the 4 KiB page */ + addr = pcie->cfg + (offset & (SZ_4K - 1)); + } + + return addr; +} + +static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (bus->number == 0) + return pci_generic_config_read32(bus, devfn, where, size, + value); + + return pci_generic_config_read(bus, devfn, where, size, value); +} + +static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (bus->number == 0) + return pci_generic_config_write32(bus, devfn, where, size, + value); + + return pci_generic_config_write(bus, devfn, where, size, value); +} + +static struct pci_ops tegra_pcie_ops = { + .map_bus = tegra_pcie_map_bus, + .read = tegra_pcie_config_read, + .write = tegra_pcie_config_write, +}; + +static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) +{ + unsigned long ret = 0; + + switch (port->index) { + case 0: + ret = AFI_PEX0_CTRL; + break; + + case 1: + ret = AFI_PEX1_CTRL; + break; + + case 2: + ret = AFI_PEX2_CTRL; + break; + } + + return ret; +} + +static void tegra_pcie_port_reset(struct tegra_pcie_port *port) +{ + unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + unsigned long value; + + /* pulse reset signal */ + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + + usleep_range(1000, 2000); + + value = afi_readl(port->pcie, ctrl); + value |= AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); +} + +static void tegra_pcie_port_enable(struct tegra_pcie_port *port) +{ + unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + const struct tegra_pcie_soc *soc = port->pcie->soc; + unsigned long value; + + /* enable reference clock */ + value = afi_readl(port->pcie, ctrl); + value |= AFI_PEX_CTRL_REFCLK_EN; + + if (soc->has_pex_clkreq_en) + value |= AFI_PEX_CTRL_CLKREQ_EN; + + value |= AFI_PEX_CTRL_OVERRIDE_EN; + + afi_writel(port->pcie, value, ctrl); + + tegra_pcie_port_reset(port); + + if (soc->force_pca_enable) { + value = readl(port->base + RP_VEND_CTL2); + value |= RP_VEND_CTL2_PCA_ENABLE; + writel(value, port->base + RP_VEND_CTL2); + } +} + +static void tegra_pcie_port_disable(struct tegra_pcie_port *port) +{ + unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); + const struct tegra_pcie_soc *soc = port->pcie->soc; + unsigned long value; + + /* assert port reset */ + value = afi_readl(port->pcie, ctrl); + value &= ~AFI_PEX_CTRL_RST; + afi_writel(port->pcie, value, ctrl); + + /* disable reference clock */ + value = afi_readl(port->pcie, ctrl); + + if (soc->has_pex_clkreq_en) + value &= ~AFI_PEX_CTRL_CLKREQ_EN; + + value &= ~AFI_PEX_CTRL_REFCLK_EN; + afi_writel(port->pcie, value, ctrl); +} + +static void tegra_pcie_port_free(struct tegra_pcie_port *port) +{ + struct tegra_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + + devm_iounmap(dev, port->base); + devm_release_mem_region(dev, port->regs.start, + resource_size(&port->regs)); + list_del(&port->list); + devm_kfree(dev, port); +} + +/* Tegra PCIE root complex wrongly reports device class */ +static void tegra_pcie_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); + +/* Tegra PCIE requires relaxed ordering */ +static void tegra_pcie_relax_enable(struct pci_dev *dev) +{ + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); +} +DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); + +static int tegra_pcie_request_resources(struct tegra_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + struct device *dev = pcie->dev; + int err; + + pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); + pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); + pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem); + pci_add_resource(windows, &pcie->busn); + + err = devm_request_pci_bus_resources(dev, windows); + if (err < 0) { + pci_free_resource_list(windows); + return err; + } + + pci_remap_iospace(&pcie->pio, pcie->io.start); + + return 0; +} + +static void tegra_pcie_free_resources(struct tegra_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + + pci_unmap_iospace(&pcie->pio); + pci_free_resource_list(windows); +} + +static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) +{ + struct tegra_pcie *pcie = pdev->bus->sysdata; + int irq; + + tegra_cpuidle_pcie_irqs_in_use(); + + irq = of_irq_parse_and_map_pci(pdev, slot, pin); + if (!irq) + irq = pcie->irq; + + return irq; +} + +static irqreturn_t tegra_pcie_isr(int irq, void *arg) +{ + const char *err_msg[] = { + "Unknown", + "AXI slave error", + "AXI decode error", + "Target abort", + "Master abort", + "Invalid write", + "Legacy interrupt", + "Response decoding error", + "AXI response decoding error", + "Transaction timeout", + "Slot present pin change", + "Slot clock request change", + "TMS clock ramp change", + "TMS ready for power down", + "Peer2Peer error", + }; + struct tegra_pcie *pcie = arg; + struct device *dev = pcie->dev; + u32 code, signature; + + code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; + signature = afi_readl(pcie, AFI_INTR_SIGNATURE); + afi_writel(pcie, 0, AFI_INTR_CODE); + + if (code == AFI_INTR_LEGACY) + return IRQ_NONE; + + if (code >= ARRAY_SIZE(err_msg)) + code = 0; + + /* + * do not pollute kernel log with master abort reports since they + * happen a lot during enumeration + */ + if (code == AFI_INTR_MASTER_ABORT) + dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); + else + dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); + + if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT || + code == AFI_INTR_FPCI_DECODE_ERROR) { + u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff; + u64 address = (u64)fpci << 32 | (signature & 0xfffffffc); + + if (code == AFI_INTR_MASTER_ABORT) + dev_dbg(dev, " FPCI address: %10llx\n", address); + else + dev_err(dev, " FPCI address: %10llx\n", address); + } + + return IRQ_HANDLED; +} + +/* + * FPCI map is as follows: + * - 0xfdfc000000: I/O space + * - 0xfdfe000000: type 0 configuration space + * - 0xfdff000000: type 1 configuration space + * - 0xfe00000000: type 0 extended configuration space + * - 0xfe10000000: type 1 extended configuration space + */ +static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) +{ + u32 fpci_bar, size, axi_address; + + /* Bar 0: type 1 extended configuration space */ + size = resource_size(&pcie->cs); + afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); + + /* Bar 1: downstream IO bar */ + fpci_bar = 0xfdfc0000; + size = resource_size(&pcie->io); + axi_address = pcie->io.start; + afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); + afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); + + /* Bar 2: prefetchable memory BAR */ + fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1; + size = resource_size(&pcie->prefetch); + axi_address = pcie->prefetch.start; + afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); + afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); + + /* Bar 3: non prefetchable memory BAR */ + fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1; + size = resource_size(&pcie->mem); + axi_address = pcie->mem.start; + afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); + afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); + afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); + + /* NULL out the remaining BARs as they are not used */ + afi_writel(pcie, 0, AFI_AXI_BAR4_START); + afi_writel(pcie, 0, AFI_AXI_BAR4_SZ); + afi_writel(pcie, 0, AFI_FPCI_BAR4); + + afi_writel(pcie, 0, AFI_AXI_BAR5_START); + afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); + afi_writel(pcie, 0, AFI_FPCI_BAR5); + + /* map all upstream transactions as uncached */ + afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); + afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); + afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); + + /* MSI translations are setup only when needed */ + afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); + afi_writel(pcie, 0, AFI_MSI_BAR_SZ); + afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST); + afi_writel(pcie, 0, AFI_MSI_BAR_SZ); +} + +static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + u32 value; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = pads_readl(pcie, soc->pads_pll_ctl); + if (value & PADS_PLL_CTL_LOCKDET) + return 0; + } + + return -ETIMEDOUT; +} + +static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + const struct tegra_pcie_soc *soc = pcie->soc; + u32 value; + int err; + + /* initialize internal PHY, enable up to 16 PCIE lanes */ + pads_writel(pcie, 0x0, PADS_CTL_SEL); + + /* override IDDQ to 1 on all 4 lanes */ + value = pads_readl(pcie, PADS_CTL); + value |= PADS_CTL_IDDQ_1L; + pads_writel(pcie, value, PADS_CTL); + + /* + * Set up PHY PLL inputs select PLLE output as refclock, + * set TX ref sel to div10 (not div5). + */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); + value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; + pads_writel(pcie, value, soc->pads_pll_ctl); + + /* reset PLL */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value &= ~PADS_PLL_CTL_RST_B4SM; + pads_writel(pcie, value, soc->pads_pll_ctl); + + usleep_range(20, 100); + + /* take PLL out of reset */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value |= PADS_PLL_CTL_RST_B4SM; + pads_writel(pcie, value, soc->pads_pll_ctl); + + /* wait for the PLL to lock */ + err = tegra_pcie_pll_wait(pcie, 500); + if (err < 0) { + dev_err(dev, "PLL failed to lock: %d\n", err); + return err; + } + + /* turn off IDDQ override */ + value = pads_readl(pcie, PADS_CTL); + value &= ~PADS_CTL_IDDQ_1L; + pads_writel(pcie, value, PADS_CTL); + + /* enable TX/RX data */ + value = pads_readl(pcie, PADS_CTL); + value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; + pads_writel(pcie, value, PADS_CTL); + + return 0; +} + +static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + u32 value; + + /* disable TX/RX data */ + value = pads_readl(pcie, PADS_CTL); + value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); + pads_writel(pcie, value, PADS_CTL); + + /* override IDDQ */ + value = pads_readl(pcie, PADS_CTL); + value |= PADS_CTL_IDDQ_1L; + pads_writel(pcie, value, PADS_CTL); + + /* reset PLL */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value &= ~PADS_PLL_CTL_RST_B4SM; + pads_writel(pcie, value, soc->pads_pll_ctl); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + unsigned int i; + int err; + + for (i = 0; i < port->lanes; i++) { + err = phy_power_on(port->phys[i]); + if (err < 0) { + dev_err(dev, "failed to power on PHY#%u: %d\n", i, err); + return err; + } + } + + return 0; +} + +static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + unsigned int i; + int err; + + for (i = 0; i < port->lanes; i++) { + err = phy_power_off(port->phys[i]); + if (err < 0) { + dev_err(dev, "failed to power off PHY#%u: %d\n", i, + err); + return err; + } + } + + return 0; +} + +static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + const struct tegra_pcie_soc *soc = pcie->soc; + struct tegra_pcie_port *port; + int err; + + if (pcie->legacy_phy) { + if (pcie->phy) + err = phy_power_on(pcie->phy); + else + err = tegra_pcie_phy_enable(pcie); + + if (err < 0) + dev_err(dev, "failed to power on PHY: %d\n", err); + + return err; + } + + list_for_each_entry(port, &pcie->ports, list) { + err = tegra_pcie_port_phy_power_on(port); + if (err < 0) { + dev_err(dev, + "failed to power on PCIe port %u PHY: %d\n", + port->index, err); + return err; + } + } + + /* Configure the reference clock driver */ + pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); + + if (soc->num_ports > 2) + pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); + + return 0; +} + +static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct tegra_pcie_port *port; + int err; + + if (pcie->legacy_phy) { + if (pcie->phy) + err = phy_power_off(pcie->phy); + else + err = tegra_pcie_phy_disable(pcie); + + if (err < 0) + dev_err(dev, "failed to power off PHY: %d\n", err); + + return err; + } + + list_for_each_entry(port, &pcie->ports, list) { + err = tegra_pcie_port_phy_power_off(port); + if (err < 0) { + dev_err(dev, + "failed to power off PCIe port %u PHY: %d\n", + port->index, err); + return err; + } + } + + return 0; +} + +static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + const struct tegra_pcie_soc *soc = pcie->soc; + struct tegra_pcie_port *port; + unsigned long value; + int err; + + /* enable PLL power down */ + if (pcie->phy) { + value = afi_readl(pcie, AFI_PLLE_CONTROL); + value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; + value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; + afi_writel(pcie, value, AFI_PLLE_CONTROL); + } + + /* power down PCIe slot clock bias pad */ + if (soc->has_pex_bias_ctrl) + afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0); + + /* configure mode and disable all ports */ + value = afi_readl(pcie, AFI_PCIE_CONFIG); + value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; + value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; + + list_for_each_entry(port, &pcie->ports, list) + value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); + + afi_writel(pcie, value, AFI_PCIE_CONFIG); + + if (soc->has_gen2) { + value = afi_readl(pcie, AFI_FUSE); + value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; + afi_writel(pcie, value, AFI_FUSE); + } else { + value = afi_readl(pcie, AFI_FUSE); + value |= AFI_FUSE_PCIE_T0_GEN2_DIS; + afi_writel(pcie, value, AFI_FUSE); + } + + if (soc->program_uphy) { + err = tegra_pcie_phy_power_on(pcie); + if (err < 0) { + dev_err(dev, "failed to power on PHY(s): %d\n", err); + return err; + } + } + + /* take the PCIe interface module out of reset */ + reset_control_deassert(pcie->pcie_xrst); + + /* finally enable PCIe */ + value = afi_readl(pcie, AFI_CONFIGURATION); + value |= AFI_CONFIGURATION_EN_FPCI; + afi_writel(pcie, value, AFI_CONFIGURATION); + + value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | + AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | + AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR; + + if (soc->has_intr_prsnt_sense) + value |= AFI_INTR_EN_PRSNT_SENSE; + + afi_writel(pcie, value, AFI_AFI_INTR_ENABLE); + afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE); + + /* don't enable MSI for now, only when needed */ + afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); + + /* disable all exceptions */ + afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); + + return 0; +} + +static void tegra_pcie_disable_controller(struct tegra_pcie *pcie) +{ + int err; + + reset_control_assert(pcie->pcie_xrst); + + if (pcie->soc->program_uphy) { + err = tegra_pcie_phy_power_off(pcie); + if (err < 0) + dev_err(pcie->dev, "failed to power off PHY(s): %d\n", + err); + } +} + +static void tegra_pcie_power_off(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + const struct tegra_pcie_soc *soc = pcie->soc; + int err; + + reset_control_assert(pcie->afi_rst); + reset_control_assert(pcie->pex_rst); + + clk_disable_unprepare(pcie->pll_e); + if (soc->has_cml_clk) + clk_disable_unprepare(pcie->cml_clk); + clk_disable_unprepare(pcie->afi_clk); + clk_disable_unprepare(pcie->pex_clk); + + if (!dev->pm_domain) + tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); + + err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); + if (err < 0) + dev_warn(dev, "failed to disable regulators: %d\n", err); +} + +static int tegra_pcie_power_on(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + const struct tegra_pcie_soc *soc = pcie->soc; + int err; + + reset_control_assert(pcie->pcie_xrst); + reset_control_assert(pcie->afi_rst); + reset_control_assert(pcie->pex_rst); + + if (!dev->pm_domain) + tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); + + /* enable regulators */ + err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); + if (err < 0) + dev_err(dev, "failed to enable regulators: %d\n", err); + + if (dev->pm_domain) { + err = clk_prepare_enable(pcie->pex_clk); + if (err) { + dev_err(dev, "failed to enable PEX clock: %d\n", err); + return err; + } + reset_control_deassert(pcie->pex_rst); + } else { + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, + pcie->pex_clk, + pcie->pex_rst); + if (err) { + dev_err(dev, "powerup sequence failed: %d\n", err); + return err; + } + } + + reset_control_deassert(pcie->afi_rst); + + err = clk_prepare_enable(pcie->afi_clk); + if (err < 0) { + dev_err(dev, "failed to enable AFI clock: %d\n", err); + return err; + } + + if (soc->has_cml_clk) { + err = clk_prepare_enable(pcie->cml_clk); + if (err < 0) { + dev_err(dev, "failed to enable CML clock: %d\n", err); + return err; + } + } + + err = clk_prepare_enable(pcie->pll_e); + if (err < 0) { + dev_err(dev, "failed to enable PLLE clock: %d\n", err); + return err; + } + + return 0; +} + +static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + const struct tegra_pcie_soc *soc = pcie->soc; + + pcie->pex_clk = devm_clk_get(dev, "pex"); + if (IS_ERR(pcie->pex_clk)) + return PTR_ERR(pcie->pex_clk); + + pcie->afi_clk = devm_clk_get(dev, "afi"); + if (IS_ERR(pcie->afi_clk)) + return PTR_ERR(pcie->afi_clk); + + pcie->pll_e = devm_clk_get(dev, "pll_e"); + if (IS_ERR(pcie->pll_e)) + return PTR_ERR(pcie->pll_e); + + if (soc->has_cml_clk) { + pcie->cml_clk = devm_clk_get(dev, "cml"); + if (IS_ERR(pcie->cml_clk)) + return PTR_ERR(pcie->cml_clk); + } + + return 0; +} + +static int tegra_pcie_resets_get(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + + pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex"); + if (IS_ERR(pcie->pex_rst)) + return PTR_ERR(pcie->pex_rst); + + pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi"); + if (IS_ERR(pcie->afi_rst)) + return PTR_ERR(pcie->afi_rst); + + pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x"); + if (IS_ERR(pcie->pcie_xrst)) + return PTR_ERR(pcie->pcie_xrst); + + return 0; +} + +static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + int err; + + pcie->phy = devm_phy_optional_get(dev, "pcie"); + if (IS_ERR(pcie->phy)) { + err = PTR_ERR(pcie->phy); + dev_err(dev, "failed to get PHY: %d\n", err); + return err; + } + + err = phy_init(pcie->phy); + if (err < 0) { + dev_err(dev, "failed to initialize PHY: %d\n", err); + return err; + } + + pcie->legacy_phy = true; + + return 0; +} + +static struct phy *devm_of_phy_optional_get_index(struct device *dev, + struct device_node *np, + const char *consumer, + unsigned int index) +{ + struct phy *phy; + char *name; + + name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index); + if (!name) + return ERR_PTR(-ENOMEM); + + phy = devm_of_phy_get(dev, np, name); + kfree(name); + + if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV) + phy = NULL; + + return phy; +} + +static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + struct phy *phy; + unsigned int i; + int err; + + port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); + if (!port->phys) + return -ENOMEM; + + for (i = 0; i < port->lanes; i++) { + phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i); + if (IS_ERR(phy)) { + dev_err(dev, "failed to get PHY#%u: %ld\n", i, + PTR_ERR(phy)); + return PTR_ERR(phy); + } + + err = phy_init(phy); + if (err < 0) { + dev_err(dev, "failed to initialize PHY#%u: %d\n", i, + err); + return err; + } + + port->phys[i] = phy; + } + + return 0; +} + +static int tegra_pcie_phys_get(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + struct device_node *np = pcie->dev->of_node; + struct tegra_pcie_port *port; + int err; + + if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL) + return tegra_pcie_phys_get_legacy(pcie); + + list_for_each_entry(port, &pcie->ports, list) { + err = tegra_pcie_port_get_phys(port); + if (err < 0) + return err; + } + + return 0; +} + +static void tegra_pcie_phys_put(struct tegra_pcie *pcie) +{ + struct tegra_pcie_port *port; + struct device *dev = pcie->dev; + int err, i; + + if (pcie->legacy_phy) { + err = phy_exit(pcie->phy); + if (err < 0) + dev_err(dev, "failed to teardown PHY: %d\n", err); + return; + } + + list_for_each_entry(port, &pcie->ports, list) { + for (i = 0; i < port->lanes; i++) { + err = phy_exit(port->phys[i]); + if (err < 0) + dev_err(dev, "failed to teardown PHY#%u: %d\n", + i, err); + } + } +} + + +static int tegra_pcie_get_resources(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *pads, *afi, *res; + const struct tegra_pcie_soc *soc = pcie->soc; + int err; + + err = tegra_pcie_clocks_get(pcie); + if (err) { + dev_err(dev, "failed to get clocks: %d\n", err); + return err; + } + + err = tegra_pcie_resets_get(pcie); + if (err) { + dev_err(dev, "failed to get resets: %d\n", err); + return err; + } + + if (soc->program_uphy) { + err = tegra_pcie_phys_get(pcie); + if (err < 0) { + dev_err(dev, "failed to get PHYs: %d\n", err); + return err; + } + } + + pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); + pcie->pads = devm_ioremap_resource(dev, pads); + if (IS_ERR(pcie->pads)) { + err = PTR_ERR(pcie->pads); + goto phys_put; + } + + afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); + pcie->afi = devm_ioremap_resource(dev, afi); + if (IS_ERR(pcie->afi)) { + err = PTR_ERR(pcie->afi); + goto phys_put; + } + + /* request configuration space, but remap later, on demand */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs"); + if (!res) { + err = -EADDRNOTAVAIL; + goto phys_put; + } + + pcie->cs = *res; + + /* constrain configuration space to 4 KiB */ + pcie->cs.end = pcie->cs.start + SZ_4K - 1; + + pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); + if (IS_ERR(pcie->cfg)) { + err = PTR_ERR(pcie->cfg); + goto phys_put; + } + + /* request interrupt */ + err = platform_get_irq_byname(pdev, "intr"); + if (err < 0) { + dev_err(dev, "failed to get IRQ: %d\n", err); + goto phys_put; + } + + pcie->irq = err; + + err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); + if (err) { + dev_err(dev, "failed to register IRQ: %d\n", err); + goto phys_put; + } + + return 0; + +phys_put: + if (soc->program_uphy) + tegra_pcie_phys_put(pcie); + return err; +} + +static int tegra_pcie_put_resources(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + + if (pcie->irq > 0) + free_irq(pcie->irq, pcie); + + if (soc->program_uphy) + tegra_pcie_phys_put(pcie); + + return 0; +} + +static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port) +{ + struct tegra_pcie *pcie = port->pcie; + const struct tegra_pcie_soc *soc = pcie->soc; + int err; + u32 val; + u8 ack_bit; + + val = afi_readl(pcie, AFI_PCIE_PME); + val |= (0x1 << soc->ports[port->index].pme.turnoff_bit); + afi_writel(pcie, val, AFI_PCIE_PME); + + ack_bit = soc->ports[port->index].pme.ack_bit; + err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val, + val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT); + if (err) + dev_err(pcie->dev, "PME Ack is not received on port: %d\n", + port->index); + + usleep_range(10000, 11000); + + val = afi_readl(pcie, AFI_PCIE_PME); + val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit); + afi_writel(pcie, val, AFI_PCIE_PME); +} + +static int tegra_msi_alloc(struct tegra_msi *chip) +{ + int msi; + + mutex_lock(&chip->lock); + + msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); + if (msi < INT_PCI_MSI_NR) + set_bit(msi, chip->used); + else + msi = -ENOSPC; + + mutex_unlock(&chip->lock); + + return msi; +} + +static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq) +{ + struct device *dev = chip->chip.dev; + + mutex_lock(&chip->lock); + + if (!test_bit(irq, chip->used)) + dev_err(dev, "trying to free unused MSI#%lu\n", irq); + else + clear_bit(irq, chip->used); + + mutex_unlock(&chip->lock); +} + +static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) +{ + struct tegra_pcie *pcie = data; + struct device *dev = pcie->dev; + struct tegra_msi *msi = &pcie->msi; + unsigned int i, processed = 0; + + for (i = 0; i < 8; i++) { + unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); + + while (reg) { + unsigned int offset = find_first_bit(®, 32); + unsigned int index = i * 32 + offset; + unsigned int irq; + + /* clear the interrupt */ + afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4); + + irq = irq_find_mapping(msi->domain, index); + if (irq) { + if (test_bit(index, msi->used)) + generic_handle_irq(irq); + else + dev_info(dev, "unhandled MSI\n"); + } else { + /* + * that's weird who triggered this? + * just clear it + */ + dev_info(dev, "unexpected MSI\n"); + } + + /* see if there's any more pending in this vector */ + reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); + + processed++; + } + } + + return processed > 0 ? IRQ_HANDLED : IRQ_NONE; +} + +static int tegra_msi_setup_irq(struct msi_controller *chip, + struct pci_dev *pdev, struct msi_desc *desc) +{ + struct tegra_msi *msi = to_tegra_msi(chip); + struct msi_msg msg; + unsigned int irq; + int hwirq; + + hwirq = tegra_msi_alloc(msi); + if (hwirq < 0) + return hwirq; + + irq = irq_create_mapping(msi->domain, hwirq); + if (!irq) { + tegra_msi_free(msi, hwirq); + return -EINVAL; + } + + irq_set_msi_desc(irq, desc); + + msg.address_lo = lower_32_bits(msi->phys); + msg.address_hi = upper_32_bits(msi->phys); + msg.data = hwirq; + + pci_write_msi_msg(irq, &msg); + + return 0; +} + +static void tegra_msi_teardown_irq(struct msi_controller *chip, + unsigned int irq) +{ + struct tegra_msi *msi = to_tegra_msi(chip); + struct irq_data *d = irq_get_irq_data(irq); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + irq_dispose_mapping(irq); + tegra_msi_free(msi, hwirq); +} + +static struct irq_chip tegra_msi_irq_chip = { + .name = "Tegra PCIe MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + tegra_cpuidle_pcie_irqs_in_use(); + + return 0; +} + +static const struct irq_domain_ops msi_domain_ops = { + .map = tegra_msi_map, +}; + +static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct platform_device *pdev = to_platform_device(pcie->dev); + struct tegra_msi *msi = &pcie->msi; + struct device *dev = pcie->dev; + int err; + + mutex_init(&msi->lock); + + msi->chip.dev = dev; + msi->chip.setup_irq = tegra_msi_setup_irq; + msi->chip.teardown_irq = tegra_msi_teardown_irq; + + msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, + &msi_domain_ops, &msi->chip); + if (!msi->domain) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + err = platform_get_irq_byname(pdev, "msi"); + if (err < 0) { + dev_err(dev, "failed to get IRQ: %d\n", err); + goto err; + } + + msi->irq = err; + + err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD, + tegra_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(dev, "failed to request IRQ: %d\n", err); + goto err; + } + + /* setup AFI/FPCI range */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); + msi->phys = virt_to_phys((void *)msi->pages); + host->msi = &msi->chip; + + return 0; + +err: + irq_domain_remove(msi->domain); + return err; +} + +static void tegra_pcie_enable_msi(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc *soc = pcie->soc; + struct tegra_msi *msi = &pcie->msi; + u32 reg; + + afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); + afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); + /* this register is in 4K increments */ + afi_writel(pcie, 1, AFI_MSI_BAR_SZ); + + /* enable all MSI vectors */ + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6); + afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7); + + /* and unmask the MSI interrupt */ + reg = afi_readl(pcie, AFI_INTR_MASK); + reg |= AFI_INTR_MASK_MSI_MASK; + afi_writel(pcie, reg, AFI_INTR_MASK); +} + +static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) +{ + struct tegra_msi *msi = &pcie->msi; + unsigned int i, irq; + + free_pages(msi->pages, 0); + + if (msi->irq > 0) + free_irq(msi->irq, pcie); + + for (i = 0; i < INT_PCI_MSI_NR; i++) { + irq = irq_find_mapping(msi->domain, i); + if (irq > 0) + irq_dispose_mapping(irq); + } + + irq_domain_remove(msi->domain); +} + +static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) +{ + u32 value; + + /* mask the MSI interrupt */ + value = afi_readl(pcie, AFI_INTR_MASK); + value &= ~AFI_INTR_MASK_MSI_MASK; + afi_writel(pcie, value, AFI_INTR_MASK); + + /* disable all MSI vectors */ + afi_writel(pcie, 0, AFI_MSI_EN_VEC0); + afi_writel(pcie, 0, AFI_MSI_EN_VEC1); + afi_writel(pcie, 0, AFI_MSI_EN_VEC2); + afi_writel(pcie, 0, AFI_MSI_EN_VEC3); + afi_writel(pcie, 0, AFI_MSI_EN_VEC4); + afi_writel(pcie, 0, AFI_MSI_EN_VEC5); + afi_writel(pcie, 0, AFI_MSI_EN_VEC6); + afi_writel(pcie, 0, AFI_MSI_EN_VEC7); + + return 0; +} + +static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, + u32 *xbar) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + + if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { + switch (lanes) { + case 0x010004: + dev_info(dev, "4x1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401; + return 0; + + case 0x010102: + dev_info(dev, "2x1, 1X1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; + return 0; + + case 0x010101: + dev_info(dev, "1x1, 1x1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111; + return 0; + + default: + dev_info(dev, "wrong configuration updated in DT, " + "switching to default 2x1, 1x1, 1x1 " + "configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; + return 0; + } + } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") || + of_device_is_compatible(np, "nvidia,tegra210-pcie")) { + switch (lanes) { + case 0x0000104: + dev_info(dev, "4x1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; + return 0; + + case 0x0000102: + dev_info(dev, "2x1, 1x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; + return 0; + } + } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { + switch (lanes) { + case 0x00000204: + dev_info(dev, "4x1, 2x1 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; + return 0; + + case 0x00020202: + dev_info(dev, "2x3 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; + return 0; + + case 0x00010104: + dev_info(dev, "4x1, 1x2 configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; + return 0; + } + } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { + switch (lanes) { + case 0x00000004: + dev_info(dev, "single-mode configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; + return 0; + + case 0x00000202: + dev_info(dev, "dual-mode configuration\n"); + *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; + return 0; + } + } + + return -EINVAL; +} + +/* + * Check whether a given set of supplies is available in a device tree node. + * This is used to check whether the new or the legacy device tree bindings + * should be used. + */ +static bool of_regulator_bulk_available(struct device_node *np, + struct regulator_bulk_data *supplies, + unsigned int num_supplies) +{ + char property[32]; + unsigned int i; + + for (i = 0; i < num_supplies; i++) { + snprintf(property, 32, "%s-supply", supplies[i].supply); + + if (of_find_property(np, property, NULL) == NULL) + return false; + } + + return true; +} + +/* + * Old versions of the device tree binding for this device used a set of power + * supplies that didn't match the hardware inputs. This happened to work for a + * number of cases but is not future proof. However to preserve backwards- + * compatibility with old device trees, this function will try to use the old + * set of supplies. + */ +static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + + if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) + pcie->num_supplies = 3; + else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) + pcie->num_supplies = 2; + + if (pcie->num_supplies == 0) { + dev_err(dev, "device %pOF not supported in legacy mode\n", np); + return -ENODEV; + } + + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[0].supply = "pex-clk"; + pcie->supplies[1].supply = "vdd"; + + if (pcie->num_supplies > 2) + pcie->supplies[2].supply = "avdd"; + + return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); +} + +/* + * Obtains the list of regulators required for a particular generation of the + * IP block. + * + * This would've been nice to do simply by providing static tables for use + * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky + * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB) + * and either seems to be optional depending on which ports are being used. + */ +static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + unsigned int i = 0; + + if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { + pcie->num_supplies = 4; + + pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[i++].supply = "dvdd-pex"; + pcie->supplies[i++].supply = "hvdd-pex-pll"; + pcie->supplies[i++].supply = "hvdd-pex"; + pcie->supplies[i++].supply = "vddio-pexctl-aud"; + } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { + pcie->num_supplies = 6; + + pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[i++].supply = "avdd-pll-uerefe"; + pcie->supplies[i++].supply = "hvddio-pex"; + pcie->supplies[i++].supply = "dvddio-pex"; + pcie->supplies[i++].supply = "dvdd-pex-pll"; + pcie->supplies[i++].supply = "hvdd-pex-pll-e"; + pcie->supplies[i++].supply = "vddio-pex-ctl"; + } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { + pcie->num_supplies = 7; + + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[i++].supply = "avddio-pex"; + pcie->supplies[i++].supply = "dvddio-pex"; + pcie->supplies[i++].supply = "avdd-pex-pll"; + pcie->supplies[i++].supply = "hvdd-pex"; + pcie->supplies[i++].supply = "hvdd-pex-pll-e"; + pcie->supplies[i++].supply = "vddio-pex-ctl"; + pcie->supplies[i++].supply = "avdd-pll-erefe"; + } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { + bool need_pexa = false, need_pexb = false; + + /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ + if (lane_mask & 0x0f) + need_pexa = true; + + /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */ + if (lane_mask & 0x30) + need_pexb = true; + + pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + + (need_pexb ? 2 : 0); + + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[i++].supply = "avdd-pex-pll"; + pcie->supplies[i++].supply = "hvdd-pex"; + pcie->supplies[i++].supply = "vddio-pex-ctl"; + pcie->supplies[i++].supply = "avdd-plle"; + + if (need_pexa) { + pcie->supplies[i++].supply = "avdd-pexa"; + pcie->supplies[i++].supply = "vdd-pexa"; + } + + if (need_pexb) { + pcie->supplies[i++].supply = "avdd-pexb"; + pcie->supplies[i++].supply = "vdd-pexb"; + } + } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { + pcie->num_supplies = 5; + + pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[0].supply = "avdd-pex"; + pcie->supplies[1].supply = "vdd-pex"; + pcie->supplies[2].supply = "avdd-pex-pll"; + pcie->supplies[3].supply = "avdd-plle"; + pcie->supplies[4].supply = "vddio-pex-clk"; + } + + if (of_regulator_bulk_available(dev->of_node, pcie->supplies, + pcie->num_supplies)) + return devm_regulator_bulk_get(dev, pcie->num_supplies, + pcie->supplies); + + /* + * If not all regulators are available for this new scheme, assume + * that the device tree complies with an older version of the device + * tree binding. + */ + dev_info(dev, "using legacy DT binding for power supplies\n"); + + devm_kfree(dev, pcie->supplies); + pcie->num_supplies = 0; + + return tegra_pcie_get_legacy_regulators(pcie); +} + +static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node, *port; + const struct tegra_pcie_soc *soc = pcie->soc; + struct of_pci_range_parser parser; + struct of_pci_range range; + u32 lanes = 0, mask = 0; + unsigned int lane = 0; + struct resource res; + int err; + + if (of_pci_range_parser_init(&parser, np)) { + dev_err(dev, "missing \"ranges\" property\n"); + return -EINVAL; + } + + for_each_of_pci_range(&parser, &range) { + err = of_pci_range_to_resource(&range, np, &res); + if (err < 0) + return err; + + switch (res.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + /* Track the bus -> CPU I/O mapping offset. */ + pcie->offset.io = res.start - range.pci_addr; + + memcpy(&pcie->pio, &res, sizeof(res)); + pcie->pio.name = np->full_name; + + /* + * The Tegra PCIe host bridge uses this to program the + * mapping of the I/O space to the physical address, + * so we override the .start and .end fields here that + * of_pci_range_to_resource() converted to I/O space. + * We also set the IORESOURCE_MEM type to clarify that + * the resource is in the physical memory space. + */ + pcie->io.start = range.cpu_addr; + pcie->io.end = range.cpu_addr + range.size - 1; + pcie->io.flags = IORESOURCE_MEM; + pcie->io.name = "I/O"; + + memcpy(&res, &pcie->io, sizeof(res)); + break; + + case IORESOURCE_MEM: + /* + * Track the bus -> CPU memory mapping offset. This + * assumes that the prefetchable and non-prefetchable + * regions will be the last of type IORESOURCE_MEM in + * the ranges property. + * */ + pcie->offset.mem = res.start - range.pci_addr; + + if (res.flags & IORESOURCE_PREFETCH) { + memcpy(&pcie->prefetch, &res, sizeof(res)); + pcie->prefetch.name = "prefetchable"; + } else { + memcpy(&pcie->mem, &res, sizeof(res)); + pcie->mem.name = "non-prefetchable"; + } + break; + } + } + + err = of_pci_parse_bus_range(np, &pcie->busn); + if (err < 0) { + dev_err(dev, "failed to parse ranges property: %d\n", err); + pcie->busn.name = np->name; + pcie->busn.start = 0; + pcie->busn.end = 0xff; + pcie->busn.flags = IORESOURCE_BUS; + } + + /* parse root ports */ + for_each_child_of_node(np, port) { + struct tegra_pcie_port *rp; + unsigned int index; + u32 value; + + err = of_pci_get_devfn(port); + if (err < 0) { + dev_err(dev, "failed to parse address: %d\n", err); + return err; + } + + index = PCI_SLOT(err); + + if (index < 1 || index > soc->num_ports) { + dev_err(dev, "invalid port number: %d\n", index); + return -EINVAL; + } + + index--; + + err = of_property_read_u32(port, "nvidia,num-lanes", &value); + if (err < 0) { + dev_err(dev, "failed to parse # of lanes: %d\n", + err); + return err; + } + + if (value > 16) { + dev_err(dev, "invalid # of lanes: %u\n", value); + return -EINVAL; + } + + lanes |= value << (index << 3); + + if (!of_device_is_available(port)) { + lane += value; + continue; + } + + mask |= ((1 << value) - 1) << lane; + lane += value; + + rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); + if (!rp) + return -ENOMEM; + + err = of_address_to_resource(port, 0, &rp->regs); + if (err < 0) { + dev_err(dev, "failed to parse address: %d\n", err); + return err; + } + + INIT_LIST_HEAD(&rp->list); + rp->index = index; + rp->lanes = value; + rp->pcie = pcie; + rp->np = port; + + rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); + if (IS_ERR(rp->base)) + return PTR_ERR(rp->base); + + list_add_tail(&rp->list, &pcie->ports); + } + + err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); + if (err < 0) { + dev_err(dev, "invalid lane configuration\n"); + return err; + } + + err = tegra_pcie_get_regulators(pcie, mask); + if (err < 0) + return err; + + return 0; +} + +/* + * FIXME: If there are no PCIe cards attached, then calling this function + * can result in the increase of the bootup time as there are big timeout + * loops. + */ +#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ +static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + unsigned int retries = 3; + unsigned long value; + + /* override presence detection */ + value = readl(port->base + RP_PRIV_MISC); + value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; + value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; + writel(value, port->base + RP_PRIV_MISC); + + do { + unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; + + do { + value = readl(port->base + RP_VEND_XP); + + if (value & RP_VEND_XP_DL_UP) + break; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) { + dev_err(dev, "link %u down, retrying\n", port->index); + goto retry; + } + + timeout = TEGRA_PCIE_LINKUP_TIMEOUT; + + do { + value = readl(port->base + RP_LINK_CONTROL_STATUS); + + if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) + return true; + + usleep_range(1000, 2000); + } while (--timeout); + +retry: + tegra_pcie_port_reset(port); + } while (--retries); + + return false; +} + +static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct tegra_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { + dev_info(dev, "probing port %u, using %u lanes\n", + port->index, port->lanes); + + tegra_pcie_port_enable(port); + + if (tegra_pcie_port_check_link(port)) + continue; + + dev_info(dev, "link %u down, ignoring\n", port->index); + + tegra_pcie_port_disable(port); + tegra_pcie_port_free(port); + } +} + +static void tegra_pcie_disable_ports(struct tegra_pcie *pcie) +{ + struct tegra_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + tegra_pcie_port_disable(port); +} + +static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = { + { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, + { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, +}; + +static const struct tegra_pcie_soc tegra20_pcie = { + .num_ports = 2, + .ports = tegra20_pcie_ports, + .msi_base_shift = 0, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, + .pads_refclk_cfg0 = 0xfa5cfa5c, + .has_pex_clkreq_en = false, + .has_pex_bias_ctrl = false, + .has_intr_prsnt_sense = false, + .has_cml_clk = false, + .has_gen2 = false, + .force_pca_enable = false, + .program_uphy = true, +}; + +static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = { + { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, + { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, + { .pme.turnoff_bit = 16, .pme.ack_bit = 18 }, +}; + +static const struct tegra_pcie_soc tegra30_pcie = { + .num_ports = 3, + .ports = tegra30_pcie_ports, + .msi_base_shift = 8, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0xfa5cfa5c, + .pads_refclk_cfg1 = 0xfa5cfa5c, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_intr_prsnt_sense = true, + .has_cml_clk = true, + .has_gen2 = false, + .force_pca_enable = false, + .program_uphy = true, +}; + +static const struct tegra_pcie_soc tegra124_pcie = { + .num_ports = 2, + .ports = tegra20_pcie_ports, + .msi_base_shift = 8, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x44ac44ac, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_intr_prsnt_sense = true, + .has_cml_clk = true, + .has_gen2 = true, + .force_pca_enable = false, + .program_uphy = true, +}; + +static const struct tegra_pcie_soc tegra210_pcie = { + .num_ports = 2, + .ports = tegra20_pcie_ports, + .msi_base_shift = 8, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x90b890b8, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_intr_prsnt_sense = true, + .has_cml_clk = true, + .has_gen2 = true, + .force_pca_enable = true, + .program_uphy = true, +}; + +static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = { + { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, + { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, + { .pme.turnoff_bit = 12, .pme.ack_bit = 14 }, +}; + +static const struct tegra_pcie_soc tegra186_pcie = { + .num_ports = 3, + .ports = tegra186_pcie_ports, + .msi_base_shift = 8, + .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, + .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, + .pads_refclk_cfg0 = 0x80b880b8, + .pads_refclk_cfg1 = 0x000480b8, + .has_pex_clkreq_en = true, + .has_pex_bias_ctrl = true, + .has_intr_prsnt_sense = true, + .has_cml_clk = false, + .has_gen2 = true, + .force_pca_enable = false, + .program_uphy = false, +}; + +static const struct of_device_id tegra_pcie_of_match[] = { + { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie }, + { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie }, + { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie }, + { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie }, + { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie }, + { }, +}; + +static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) +{ + struct tegra_pcie *pcie = s->private; + + if (list_empty(&pcie->ports)) + return NULL; + + seq_printf(s, "Index Status\n"); + + return seq_list_start(&pcie->ports, *pos); +} + +static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct tegra_pcie *pcie = s->private; + + return seq_list_next(v, &pcie->ports, pos); +} + +static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v) +{ +} + +static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v) +{ + bool up = false, active = false; + struct tegra_pcie_port *port; + unsigned int value; + + port = list_entry(v, struct tegra_pcie_port, list); + + value = readl(port->base + RP_VEND_XP); + + if (value & RP_VEND_XP_DL_UP) + up = true; + + value = readl(port->base + RP_LINK_CONTROL_STATUS); + + if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) + active = true; + + seq_printf(s, "%2u ", port->index); + + if (up) + seq_printf(s, "up"); + + if (active) { + if (up) + seq_printf(s, ", "); + + seq_printf(s, "active"); + } + + seq_printf(s, "\n"); + return 0; +} + +static const struct seq_operations tegra_pcie_ports_seq_ops = { + .start = tegra_pcie_ports_seq_start, + .next = tegra_pcie_ports_seq_next, + .stop = tegra_pcie_ports_seq_stop, + .show = tegra_pcie_ports_seq_show, +}; + +static int tegra_pcie_ports_open(struct inode *inode, struct file *file) +{ + struct tegra_pcie *pcie = inode->i_private; + struct seq_file *s; + int err; + + err = seq_open(file, &tegra_pcie_ports_seq_ops); + if (err) + return err; + + s = file->private_data; + s->private = pcie; + + return 0; +} + +static const struct file_operations tegra_pcie_ports_ops = { + .owner = THIS_MODULE, + .open = tegra_pcie_ports_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie) +{ + debugfs_remove_recursive(pcie->debugfs); + pcie->debugfs = NULL; +} + +static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) +{ + struct dentry *file; + + pcie->debugfs = debugfs_create_dir("pcie", NULL); + if (!pcie->debugfs) + return -ENOMEM; + + file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, + pcie, &tegra_pcie_ports_ops); + if (!file) + goto remove; + + return 0; + +remove: + tegra_pcie_debugfs_exit(pcie); + return -ENOMEM; +} + +static int tegra_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pci_host_bridge *host; + struct tegra_pcie *pcie; + struct pci_bus *child; + int err; + + host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!host) + return -ENOMEM; + + pcie = pci_host_bridge_priv(host); + host->sysdata = pcie; + platform_set_drvdata(pdev, pcie); + + pcie->soc = of_device_get_match_data(dev); + INIT_LIST_HEAD(&pcie->ports); + pcie->dev = dev; + + err = tegra_pcie_parse_dt(pcie); + if (err < 0) + return err; + + err = tegra_pcie_get_resources(pcie); + if (err < 0) { + dev_err(dev, "failed to request resources: %d\n", err); + return err; + } + + err = tegra_pcie_msi_setup(pcie); + if (err < 0) { + dev_err(dev, "failed to enable MSI support: %d\n", err); + goto put_resources; + } + + pm_runtime_enable(pcie->dev); + err = pm_runtime_get_sync(pcie->dev); + if (err) { + dev_err(dev, "fail to enable pcie controller: %d\n", err); + goto teardown_msi; + } + + err = tegra_pcie_request_resources(pcie); + if (err) + goto pm_runtime_put; + + host->busnr = pcie->busn.start; + host->dev.parent = &pdev->dev; + host->ops = &tegra_pcie_ops; + host->map_irq = tegra_pcie_map_irq; + host->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(host); + if (err < 0) { + dev_err(dev, "failed to register host: %d\n", err); + goto free_resources; + } + + pci_bus_size_bridges(host->bus); + pci_bus_assign_resources(host->bus); + + list_for_each_entry(child, &host->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(host->bus); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_pcie_debugfs_init(pcie); + if (err < 0) + dev_err(dev, "failed to setup debugfs: %d\n", err); + } + + return 0; + +free_resources: + tegra_pcie_free_resources(pcie); +pm_runtime_put: + pm_runtime_put_sync(pcie->dev); + pm_runtime_disable(pcie->dev); +teardown_msi: + tegra_pcie_msi_teardown(pcie); +put_resources: + tegra_pcie_put_resources(pcie); + return err; +} + +static int tegra_pcie_remove(struct platform_device *pdev) +{ + struct tegra_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct tegra_pcie_port *port, *tmp; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_pcie_debugfs_exit(pcie); + + pci_stop_root_bus(host->bus); + pci_remove_root_bus(host->bus); + tegra_pcie_free_resources(pcie); + pm_runtime_put_sync(pcie->dev); + pm_runtime_disable(pcie->dev); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_pcie_msi_teardown(pcie); + + tegra_pcie_put_resources(pcie); + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + tegra_pcie_port_free(port); + + return 0; +} + +static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) +{ + struct tegra_pcie *pcie = dev_get_drvdata(dev); + struct tegra_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) + tegra_pcie_pme_turnoff(port); + + tegra_pcie_disable_ports(pcie); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_pcie_disable_msi(pcie); + + tegra_pcie_disable_controller(pcie); + tegra_pcie_power_off(pcie); + + return 0; +} + +static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) +{ + struct tegra_pcie *pcie = dev_get_drvdata(dev); + int err; + + err = tegra_pcie_power_on(pcie); + if (err) { + dev_err(dev, "tegra pcie power on fail: %d\n", err); + return err; + } + err = tegra_pcie_enable_controller(pcie); + if (err) { + dev_err(dev, "tegra pcie controller enable fail: %d\n", err); + goto poweroff; + } + tegra_pcie_setup_translations(pcie); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_pcie_enable_msi(pcie); + + tegra_pcie_enable_ports(pcie); + + return 0; + +poweroff: + tegra_pcie_power_off(pcie); + + return err; +} + +static const struct dev_pm_ops tegra_pcie_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, + tegra_pcie_pm_resume) +}; + +static struct platform_driver tegra_pcie_driver = { + .driver = { + .name = "tegra-pcie", + .of_match_table = tegra_pcie_of_match, + .suppress_bind_attrs = true, + .pm = &tegra_pcie_pm_ops, + }, + .probe = tegra_pcie_probe, + .remove = tegra_pcie_remove, +}; +module_platform_driver(tegra_pcie_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c new file mode 100644 index 000000000000..32d1d7b81ef4 --- /dev/null +++ b/drivers/pci/controller/pci-thunder-ecam.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015, 2016 Cavium, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) + +static void set_val(u32 v, int where, int size, u32 *val) +{ + int shift = (where & 3) * 8; + + pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v); + v >>= shift; + if (size == 1) + v &= 0xff; + else if (size == 2) + v &= 0xffff; + *val = v; +} + +static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val) +{ + void __iomem *addr; + u32 v; + + /* Entries are 16-byte aligned; bits[2,3] select word in entry */ + int where_a = where & 0xc; + + if (where_a == 0) { + set_val(e0, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0x4) { + addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + v = readl(addr); + v &= ~0xf; + v |= 2; /* EA entry-1. Base-L */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0x8) { + u32 barl_orig; + u32 barl_rb; + + addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + barl_orig = readl(addr + 0); + writel(0xffffffff, addr + 0); + barl_rb = readl(addr + 0); + writel(barl_orig, addr + 0); + /* zeros in unsettable bits */ + v = ~barl_rb & ~3; + v |= 0xc; /* EA entry-2. Offset-L */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xc) { + addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */ + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + v = readl(addr); /* EA entry-3. Base-H */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + int where_a = where & ~3; + void __iomem *addr; + u32 node_bits; + u32 v; + + /* EA Base[63:32] may be missing some bits ... */ + switch (where_a) { + case 0xa8: + case 0xbc: + case 0xd0: + case 0xe4: + break; + default: + return pci_generic_config_read(bus, devfn, where, size, val); + } + + addr = bus->ops->map_bus(bus, devfn, where_a); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + v = readl(addr); + + /* + * Bit 44 of the 64-bit Base must match the same bit in + * the config space access window. Since we are working with + * the high-order 32 bits, shift everything down by 32 bits. + */ + node_bits = (cfg->res.start >> 32) & (1 << 12); + + v |= node_bits; + set_val(v, where, size, val); + + return PCIBIOS_SUCCESSFUL; +} + +static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 v; + u32 vendor_device; + u32 class_rev; + void __iomem *addr; + int cfg_type; + int where_a = where & ~3; + + addr = bus->ops->map_bus(bus, devfn, 0xc); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + v = readl(addr); + + /* Check for non type-00 header */ + cfg_type = (v >> 16) & 0x7f; + + addr = bus->ops->map_bus(bus, devfn, 8); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + class_rev = readl(addr); + if (class_rev == 0xffffffff) + goto no_emulation; + + if ((class_rev & 0xff) >= 8) { + /* Pass-2 handling */ + if (cfg_type) + goto no_emulation; + return thunder_ecam_p2_config_read(bus, devfn, where, + size, val); + } + + /* + * All BARs have fixed addresses specified by the EA + * capability; they must return zero on read. + */ + if (cfg_type == 0 && + ((where >= 0x10 && where < 0x2c) || + (where >= 0x1a4 && where < 0x1bc))) { + /* BAR or SR-IOV BAR */ + *val = 0; + return PCIBIOS_SUCCESSFUL; + } + + addr = bus->ops->map_bus(bus, devfn, 0); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + vendor_device = readl(addr); + if (vendor_device == 0xffffffff) + goto no_emulation; + + pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n", + vendor_device & 0xffff, vendor_device >> 16, class_rev, + (unsigned) where, devfn); + + /* Check for non type-00 header */ + if (cfg_type == 0) { + bool has_msix; + bool is_nic = (vendor_device == 0xa01e177d); + bool is_tns = (vendor_device == 0xa01f177d); + + addr = bus->ops->map_bus(bus, devfn, 0x70); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + /* E_CAP */ + v = readl(addr); + has_msix = (v & 0xff00) != 0; + + if (!has_msix && where_a == 0x70) { + v |= 0xbc00; /* next capability is EA at 0xbc */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xb0) { + addr = bus->ops->map_bus(bus, devfn, where_a); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + v = readl(addr); + if (v & 0xff00) + pr_err("Bad MSIX cap header: %08x\n", v); + v |= 0xbc00; /* next capability is EA at 0xbc */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xbc) { + if (is_nic) + v = 0x40014; /* EA last in chain, 4 entries */ + else if (is_tns) + v = 0x30014; /* EA last in chain, 3 entries */ + else if (has_msix) + v = 0x20014; /* EA last in chain, 2 entries */ + else + v = 0x10014; /* EA last in chain, 1 entry */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a >= 0xc0 && where_a < 0xd0) + /* EA entry-0. PP=0, BAR0 Size:3 */ + return handle_ea_bar(0x80ff0003, + 0x10, bus, devfn, where, + size, val); + if (where_a >= 0xd0 && where_a < 0xe0 && has_msix) + /* EA entry-1. PP=0, BAR4 Size:3 */ + return handle_ea_bar(0x80ff0043, + 0x20, bus, devfn, where, + size, val); + if (where_a >= 0xe0 && where_a < 0xf0 && is_tns) + /* EA entry-2. PP=0, BAR2, Size:3 */ + return handle_ea_bar(0x80ff0023, + 0x18, bus, devfn, where, + size, val); + if (where_a >= 0xe0 && where_a < 0xf0 && is_nic) + /* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */ + return handle_ea_bar(0x80ff0493, + 0x1a4, bus, devfn, where, + size, val); + if (where_a >= 0xf0 && where_a < 0x100 && is_nic) + /* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */ + return handle_ea_bar(0x80ff04d3, + 0x1b4, bus, devfn, where, + size, val); + } else if (cfg_type == 1) { + bool is_rsl_bridge = devfn == 0x08; + bool is_rad_bridge = devfn == 0xa0; + bool is_zip_bridge = devfn == 0xa8; + bool is_dfa_bridge = devfn == 0xb0; + bool is_nic_bridge = devfn == 0x10; + + if (where_a == 0x70) { + addr = bus->ops->map_bus(bus, devfn, where_a); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + v = readl(addr); + if (v & 0xff00) + pr_err("Bad PCIe cap header: %08x\n", v); + v |= 0xbc00; /* next capability is EA at 0xbc */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xbc) { + if (is_nic_bridge) + v = 0x10014; /* EA last in chain, 1 entry */ + else + v = 0x00014; /* EA last in chain, no entries */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xc0) { + if (is_rsl_bridge || is_nic_bridge) + v = 0x0101; /* subordinate:secondary = 1:1 */ + else if (is_rad_bridge) + v = 0x0202; /* subordinate:secondary = 2:2 */ + else if (is_zip_bridge) + v = 0x0303; /* subordinate:secondary = 3:3 */ + else if (is_dfa_bridge) + v = 0x0404; /* subordinate:secondary = 4:4 */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xc4 && is_nic_bridge) { + /* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */ + v = 0x80ff0564; + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xc8 && is_nic_bridge) { + v = 0x00000002; /* Base-L 64-bit */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xcc && is_nic_bridge) { + v = 0xfffffffe; /* MaxOffset-L 64-bit */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xd0 && is_nic_bridge) { + v = 0x00008430; /* NIC Base-H */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + if (where_a == 0xd4 && is_nic_bridge) { + v = 0x0000000f; /* MaxOffset-H */ + set_val(v, where, size, val); + return PCIBIOS_SUCCESSFUL; + } + } +no_emulation: + return pci_generic_config_read(bus, devfn, where, size, val); +} + +static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + /* + * All BARs have fixed addresses; ignore BAR writes so they + * don't get corrupted. + */ + if ((where >= 0x10 && where < 0x2c) || + (where >= 0x1a4 && where < 0x1bc)) + /* BAR or SR-IOV BAR */ + return PCIBIOS_SUCCESSFUL; + + return pci_generic_config_write(bus, devfn, where, size, val); +} + +struct pci_ecam_ops pci_thunder_ecam_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = thunder_ecam_config_read, + .write = thunder_ecam_config_write, + } +}; + +#ifdef CONFIG_PCI_HOST_THUNDER_ECAM + +static const struct of_device_id thunder_ecam_of_match[] = { + { .compatible = "cavium,pci-host-thunder-ecam" }, + { }, +}; + +static int thunder_ecam_probe(struct platform_device *pdev) +{ + return pci_host_common_probe(pdev, &pci_thunder_ecam_ops); +} + +static struct platform_driver thunder_ecam_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = thunder_ecam_of_match, + .suppress_bind_attrs = true, + }, + .probe = thunder_ecam_probe, +}; +builtin_platform_driver(thunder_ecam_driver); + +#endif +#endif diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c new file mode 100644 index 000000000000..f127ce8bd4ef --- /dev/null +++ b/drivers/pci/controller/pci-thunder-pem.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015 - 2016 Cavium, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../pci.h" + +#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) + +#define PEM_CFG_WR 0x28 +#define PEM_CFG_RD 0x30 + +struct thunder_pem_pci { + u32 ea_entry[3]; + void __iomem *pem_reg_base; +}; + +static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u64 read_val, tmp_val; + struct pci_config_window *cfg = bus->sysdata; + struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; + + if (devfn != 0 || where >= 2048) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* + * 32-bit accesses only. Write the address to the low order + * bits of PEM_CFG_RD, then trigger the read by reading back. + * The config data lands in the upper 32-bits of PEM_CFG_RD. + */ + read_val = where & ~3ull; + writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); + read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); + read_val >>= 32; + + /* + * The config space contains some garbage, fix it up. Also + * synthesize an EA capability for the BAR used by MSI-X. + */ + switch (where & ~3) { + case 0x40: + read_val &= 0xffff00ff; + read_val |= 0x00007000; /* Skip MSI CAP */ + break; + case 0x70: /* Express Cap */ + /* + * Change PME interrupt to vector 2 on T88 where it + * reads as 0, else leave it alone. + */ + if (!(read_val & (0x1f << 25))) + read_val |= (2u << 25); + break; + case 0xb0: /* MSI-X Cap */ + /* TableSize=2 or 4, Next Cap is EA */ + read_val &= 0xc00000ff; + /* + * If Express Cap(0x70) raw PME vector reads as 0 we are on + * T88 and TableSize is reported as 4, else TableSize + * is 2. + */ + writeq(0x70, pem_pci->pem_reg_base + PEM_CFG_RD); + tmp_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); + tmp_val >>= 32; + if (!(tmp_val & (0x1f << 25))) + read_val |= 0x0003bc00; + else + read_val |= 0x0001bc00; + break; + case 0xb4: + /* Table offset=0, BIR=0 */ + read_val = 0x00000000; + break; + case 0xb8: + /* BPA offset=0xf0000, BIR=0 */ + read_val = 0x000f0000; + break; + case 0xbc: + /* EA, 1 entry, no next Cap */ + read_val = 0x00010014; + break; + case 0xc0: + /* DW2 for type-1 */ + read_val = 0x00000000; + break; + case 0xc4: + /* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */ + read_val = 0x80ff0003; + break; + case 0xc8: + read_val = pem_pci->ea_entry[0]; + break; + case 0xcc: + read_val = pem_pci->ea_entry[1]; + break; + case 0xd0: + read_val = pem_pci->ea_entry[2]; + break; + default: + break; + } + read_val >>= (8 * (where & 3)); + switch (size) { + case 1: + read_val &= 0xff; + break; + case 2: + read_val &= 0xffff; + break; + default: + break; + } + *val = read_val; + return PCIBIOS_SUCCESSFUL; +} + +static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + + if (bus->number < cfg->busr.start || + bus->number > cfg->busr.end) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * The first device on the bus is the PEM PCIe bridge. + * Special case its config access. + */ + if (bus->number == cfg->busr.start) + return thunder_pem_bridge_read(bus, devfn, where, size, val); + + return pci_generic_config_read(bus, devfn, where, size, val); +} + +/* + * Some of the w1c_bits below also include read-only or non-writable + * reserved bits, this makes the code simpler and is OK as the bits + * are not affected by writing zeros to them. + */ +static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned) +{ + u32 w1c_bits = 0; + + switch (where_aligned) { + case 0x04: /* Command/Status */ + case 0x1c: /* Base and I/O Limit/Secondary Status */ + w1c_bits = 0xff000000; + break; + case 0x44: /* Power Management Control and Status */ + w1c_bits = 0xfffffe00; + break; + case 0x78: /* Device Control/Device Status */ + case 0x80: /* Link Control/Link Status */ + case 0x88: /* Slot Control/Slot Status */ + case 0x90: /* Root Status */ + case 0xa0: /* Link Control 2 Registers/Link Status 2 */ + w1c_bits = 0xffff0000; + break; + case 0x104: /* Uncorrectable Error Status */ + case 0x110: /* Correctable Error Status */ + case 0x130: /* Error Status */ + case 0x160: /* Link Control 4 */ + w1c_bits = 0xffffffff; + break; + default: + break; + } + return w1c_bits; +} + +/* Some bits must be written to one so they appear to be read-only. */ +static u32 thunder_pem_bridge_w1_bits(u64 where_aligned) +{ + u32 w1_bits; + + switch (where_aligned) { + case 0x1c: /* I/O Base / I/O Limit, Secondary Status */ + /* Force 32-bit I/O addressing. */ + w1_bits = 0x0101; + break; + case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */ + /* Force 64-bit addressing */ + w1_bits = 0x00010001; + break; + default: + w1_bits = 0; + break; + } + return w1_bits; +} + +static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; + u64 write_val, read_val; + u64 where_aligned = where & ~3ull; + u32 mask = 0; + + + if (devfn != 0 || where >= 2048) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * 32-bit accesses only. If the write is for a size smaller + * than 32-bits, we must first read the 32-bit value and merge + * in the desired bits and then write the whole 32-bits back + * out. + */ + switch (size) { + case 1: + writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); + read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); + read_val >>= 32; + mask = ~(0xff << (8 * (where & 3))); + read_val &= mask; + val = (val & 0xff) << (8 * (where & 3)); + val |= (u32)read_val; + break; + case 2: + writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); + read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); + read_val >>= 32; + mask = ~(0xffff << (8 * (where & 3))); + read_val &= mask; + val = (val & 0xffff) << (8 * (where & 3)); + val |= (u32)read_val; + break; + default: + break; + } + + /* + * By expanding the write width to 32 bits, we may + * inadvertently hit some W1C bits that were not intended to + * be written. Calculate the mask that must be applied to the + * data to be written to avoid these cases. + */ + if (mask) { + u32 w1c_bits = thunder_pem_bridge_w1c_bits(where); + + if (w1c_bits) { + mask &= w1c_bits; + val &= ~mask; + } + } + + /* + * Some bits must be read-only with value of one. Since the + * access method allows these to be cleared if a zero is + * written, force them to one before writing. + */ + val |= thunder_pem_bridge_w1_bits(where_aligned); + + /* + * Low order bits are the config address, the high order 32 + * bits are the data to be written. + */ + write_val = (((u64)val) << 32) | where_aligned; + writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); + return PCIBIOS_SUCCESSFUL; +} + +static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + + if (bus->number < cfg->busr.start || + bus->number > cfg->busr.end) + return PCIBIOS_DEVICE_NOT_FOUND; + /* + * The first device on the bus is the PEM PCIe bridge. + * Special case its config access. + */ + if (bus->number == cfg->busr.start) + return thunder_pem_bridge_write(bus, devfn, where, size, val); + + + return pci_generic_config_write(bus, devfn, where, size, val); +} + +static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, + struct resource *res_pem) +{ + struct thunder_pem_pci *pem_pci; + resource_size_t bar4_start; + + pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL); + if (!pem_pci) + return -ENOMEM; + + pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000); + if (!pem_pci->pem_reg_base) + return -ENOMEM; + + /* + * The MSI-X BAR for the PEM and AER interrupts is located at + * a fixed offset from the PEM register base. Generate a + * fragment of the synthesized Enhanced Allocation capability + * structure here for the BAR. + */ + bar4_start = res_pem->start + 0xf00000; + pem_pci->ea_entry[0] = (u32)bar4_start | 2; + pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u; + pem_pci->ea_entry[2] = (u32)(bar4_start >> 32); + + cfg->priv = pem_pci; + return 0; +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) + +#define PEM_RES_BASE 0x87e0c0000000UL +#define PEM_NODE_MASK GENMASK(45, 44) +#define PEM_INDX_MASK GENMASK(26, 24) +#define PEM_MIN_DOM_IN_NODE 4 +#define PEM_MAX_DOM_IN_NODE 10 + +static void thunder_pem_reserve_range(struct device *dev, int seg, + struct resource *r) +{ + resource_size_t start = r->start, end = r->end; + struct resource *res; + const char *regionid; + + regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); + if (!regionid) + return; + + res = request_mem_region(start, end - start + 1, regionid); + if (res) + res->flags &= ~IORESOURCE_BUSY; + else + kfree(regionid); + + dev_info(dev, "%pR %s reserved\n", r, + res ? "has been" : "could not be"); +} + +static void thunder_pem_legacy_fw(struct acpi_pci_root *root, + struct resource *res_pem) +{ + int node = acpi_get_node(root->device->handle); + int index; + + if (node == NUMA_NO_NODE) + node = 0; + + index = root->segment - PEM_MIN_DOM_IN_NODE; + index -= node * PEM_MAX_DOM_IN_NODE; + res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | + FIELD_PREP(PEM_INDX_MASK, index); + res_pem->flags = IORESOURCE_MEM; +} + +static int thunder_pem_acpi_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct acpi_pci_root *root = acpi_driver_data(adev); + struct resource *res_pem; + int ret; + + res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL); + if (!res_pem) + return -ENOMEM; + + ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem); + + /* + * If we fail to gather resources it means that we run with old + * FW where we need to calculate PEM-specific resources manually. + */ + if (ret) { + thunder_pem_legacy_fw(root, res_pem); + /* + * Reserve 64K size PEM specific resources. The full 16M range + * size is required for thunder_pem_init() call. + */ + res_pem->end = res_pem->start + SZ_64K - 1; + thunder_pem_reserve_range(dev, root->segment, res_pem); + res_pem->end = res_pem->start + SZ_16M - 1; + + /* Reserve PCI configuration space as well. */ + thunder_pem_reserve_range(dev, root->segment, &cfg->res); + } + + return thunder_pem_init(dev, cfg, res_pem); +} + +struct pci_ecam_ops thunder_pem_ecam_ops = { + .bus_shift = 24, + .init = thunder_pem_acpi_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = thunder_pem_config_read, + .write = thunder_pem_config_write, + } +}; + +#endif + +#ifdef CONFIG_PCI_HOST_THUNDER_PEM + +static int thunder_pem_platform_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res_pem; + + if (!dev->of_node) + return -EINVAL; + + /* + * The second register range is the PEM bridge to the PCIe + * bus. It has a different config access method than those + * devices behind the bridge. + */ + res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_pem) { + dev_err(dev, "missing \"reg[1]\"property\n"); + return -EINVAL; + } + + return thunder_pem_init(dev, cfg, res_pem); +} + +static struct pci_ecam_ops pci_thunder_pem_ops = { + .bus_shift = 24, + .init = thunder_pem_platform_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = thunder_pem_config_read, + .write = thunder_pem_config_write, + } +}; + +static const struct of_device_id thunder_pem_of_match[] = { + { .compatible = "cavium,pci-host-thunder-pem" }, + { }, +}; + +static int thunder_pem_probe(struct platform_device *pdev) +{ + return pci_host_common_probe(pdev, &pci_thunder_pem_ops); +} + +static struct platform_driver thunder_pem_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = thunder_pem_of_match, + .suppress_bind_attrs = true, + }, + .probe = thunder_pem_probe, +}; +builtin_platform_driver(thunder_pem_driver); + +#endif +#endif diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c new file mode 100644 index 000000000000..68b8bfbdb867 --- /dev/null +++ b/drivers/pci/controller/pci-v3-semi.c @@ -0,0 +1,963 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for V3 Semiconductor PCI Local Bus to PCI Bridge + * Copyright (C) 2017 Linus Walleij + * + * Based on the code from arch/arm/mach-integrator/pci_v3.c + * Copyright (C) 1999 ARM Limited + * Copyright (C) 2000-2001 Deep Blue Solutions Ltd + * + * Contributors to the old driver include: + * Russell King + * David A. Rusling (uHAL, ARM Firmware suite) + * Rob Herring + * Liviu Dudau + * Grant Likely + * Arnd Bergmann + * Bjorn Helgaas + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +#define V3_PCI_VENDOR 0x00000000 +#define V3_PCI_DEVICE 0x00000002 +#define V3_PCI_CMD 0x00000004 +#define V3_PCI_STAT 0x00000006 +#define V3_PCI_CC_REV 0x00000008 +#define V3_PCI_HDR_CFG 0x0000000C +#define V3_PCI_IO_BASE 0x00000010 +#define V3_PCI_BASE0 0x00000014 +#define V3_PCI_BASE1 0x00000018 +#define V3_PCI_SUB_VENDOR 0x0000002C +#define V3_PCI_SUB_ID 0x0000002E +#define V3_PCI_ROM 0x00000030 +#define V3_PCI_BPARAM 0x0000003C +#define V3_PCI_MAP0 0x00000040 +#define V3_PCI_MAP1 0x00000044 +#define V3_PCI_INT_STAT 0x00000048 +#define V3_PCI_INT_CFG 0x0000004C +#define V3_LB_BASE0 0x00000054 +#define V3_LB_BASE1 0x00000058 +#define V3_LB_MAP0 0x0000005E +#define V3_LB_MAP1 0x00000062 +#define V3_LB_BASE2 0x00000064 +#define V3_LB_MAP2 0x00000066 +#define V3_LB_SIZE 0x00000068 +#define V3_LB_IO_BASE 0x0000006E +#define V3_FIFO_CFG 0x00000070 +#define V3_FIFO_PRIORITY 0x00000072 +#define V3_FIFO_STAT 0x00000074 +#define V3_LB_ISTAT 0x00000076 +#define V3_LB_IMASK 0x00000077 +#define V3_SYSTEM 0x00000078 +#define V3_LB_CFG 0x0000007A +#define V3_PCI_CFG 0x0000007C +#define V3_DMA_PCI_ADR0 0x00000080 +#define V3_DMA_PCI_ADR1 0x00000090 +#define V3_DMA_LOCAL_ADR0 0x00000084 +#define V3_DMA_LOCAL_ADR1 0x00000094 +#define V3_DMA_LENGTH0 0x00000088 +#define V3_DMA_LENGTH1 0x00000098 +#define V3_DMA_CSR0 0x0000008B +#define V3_DMA_CSR1 0x0000009B +#define V3_DMA_CTLB_ADR0 0x0000008C +#define V3_DMA_CTLB_ADR1 0x0000009C +#define V3_DMA_DELAY 0x000000E0 +#define V3_MAIL_DATA 0x000000C0 +#define V3_PCI_MAIL_IEWR 0x000000D0 +#define V3_PCI_MAIL_IERD 0x000000D2 +#define V3_LB_MAIL_IEWR 0x000000D4 +#define V3_LB_MAIL_IERD 0x000000D6 +#define V3_MAIL_WR_STAT 0x000000D8 +#define V3_MAIL_RD_STAT 0x000000DA +#define V3_QBA_MAP 0x000000DC + +/* PCI STATUS bits */ +#define V3_PCI_STAT_PAR_ERR BIT(15) +#define V3_PCI_STAT_SYS_ERR BIT(14) +#define V3_PCI_STAT_M_ABORT_ERR BIT(13) +#define V3_PCI_STAT_T_ABORT_ERR BIT(12) + +/* LB ISTAT bits */ +#define V3_LB_ISTAT_MAILBOX BIT(7) +#define V3_LB_ISTAT_PCI_RD BIT(6) +#define V3_LB_ISTAT_PCI_WR BIT(5) +#define V3_LB_ISTAT_PCI_INT BIT(4) +#define V3_LB_ISTAT_PCI_PERR BIT(3) +#define V3_LB_ISTAT_I2O_QWR BIT(2) +#define V3_LB_ISTAT_DMA1 BIT(1) +#define V3_LB_ISTAT_DMA0 BIT(0) + +/* PCI COMMAND bits */ +#define V3_COMMAND_M_FBB_EN BIT(9) +#define V3_COMMAND_M_SERR_EN BIT(8) +#define V3_COMMAND_M_PAR_EN BIT(6) +#define V3_COMMAND_M_MASTER_EN BIT(2) +#define V3_COMMAND_M_MEM_EN BIT(1) +#define V3_COMMAND_M_IO_EN BIT(0) + +/* SYSTEM bits */ +#define V3_SYSTEM_M_RST_OUT BIT(15) +#define V3_SYSTEM_M_LOCK BIT(14) +#define V3_SYSTEM_UNLOCK 0xa05f + +/* PCI CFG bits */ +#define V3_PCI_CFG_M_I2O_EN BIT(15) +#define V3_PCI_CFG_M_IO_REG_DIS BIT(14) +#define V3_PCI_CFG_M_IO_DIS BIT(13) +#define V3_PCI_CFG_M_EN3V BIT(12) +#define V3_PCI_CFG_M_RETRY_EN BIT(10) +#define V3_PCI_CFG_M_AD_LOW1 BIT(9) +#define V3_PCI_CFG_M_AD_LOW0 BIT(8) +/* + * This is the value applied to C/BE[3:1], with bit 0 always held 0 + * during DMA access. + */ +#define V3_PCI_CFG_M_RTYPE_SHIFT 5 +#define V3_PCI_CFG_M_WTYPE_SHIFT 1 +#define V3_PCI_CFG_TYPE_DEFAULT 0x3 + +/* PCI BASE bits (PCI -> Local Bus) */ +#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U +#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U +#define V3_PCI_BASE_M_PREFETCH BIT(3) +#define V3_PCI_BASE_M_TYPE (3 << 1) +#define V3_PCI_BASE_M_IO BIT(0) + +/* PCI MAP bits (PCI -> Local bus) */ +#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U +#define V3_PCI_MAP_M_RD_POST_INH BIT(15) +#define V3_PCI_MAP_M_ROM_SIZE (3 << 10) +#define V3_PCI_MAP_M_SWAP (3 << 8) +#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U +#define V3_PCI_MAP_M_REG_EN BIT(1) +#define V3_PCI_MAP_M_ENABLE BIT(0) + +/* LB_BASE0,1 bits (Local bus -> PCI) */ +#define V3_LB_BASE_ADR_BASE 0xfff00000U +#define V3_LB_BASE_SWAP (3 << 8) +#define V3_LB_BASE_ADR_SIZE (15 << 4) +#define V3_LB_BASE_PREFETCH BIT(3) +#define V3_LB_BASE_ENABLE BIT(0) + +#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4) +#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4) +#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4) +#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4) +#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4) +#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4) +#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4) +#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4) +#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4) +#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4) +#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4) +#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4) + +#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE) + +/* LB_MAP0,1 bits (Local bus -> PCI) */ +#define V3_LB_MAP_MAP_ADR 0xfff0U +#define V3_LB_MAP_TYPE (7 << 1) +#define V3_LB_MAP_AD_LOW_EN BIT(0) + +#define V3_LB_MAP_TYPE_IACK (0 << 1) +#define V3_LB_MAP_TYPE_IO (1 << 1) +#define V3_LB_MAP_TYPE_MEM (3 << 1) +#define V3_LB_MAP_TYPE_CONFIG (5 << 1) +#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1) + +#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR) + +/* LB_BASE2 bits (Local bus -> PCI IO) */ +#define V3_LB_BASE2_ADR_BASE 0xff00U +#define V3_LB_BASE2_SWAP_AUTO (3 << 6) +#define V3_LB_BASE2_ENABLE BIT(0) + +#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE) + +/* LB_MAP2 bits (Local bus -> PCI IO) */ +#define V3_LB_MAP2_MAP_ADR 0xff00U + +#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR) + +/* FIFO priority bits */ +#define V3_FIFO_PRIO_LOCAL BIT(12) +#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10) +#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11) +#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11)) +#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8) +#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9) +#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9)) +#define V3_FIFO_PRIO_PCI BIT(4) +#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2) +#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3) +#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3)) +#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0) +#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1) +#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1)) + +/* Local bus configuration bits */ +#define V3_LB_CFG_LB_TO_64_CYCLES 0x0000 +#define V3_LB_CFG_LB_TO_256_CYCLES BIT(13) +#define V3_LB_CFG_LB_TO_512_CYCLES BIT(14) +#define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14)) +#define V3_LB_CFG_LB_RST BIT(12) +#define V3_LB_CFG_LB_PPC_RDY BIT(11) +#define V3_LB_CFG_LB_LB_INT BIT(10) +#define V3_LB_CFG_LB_ERR_EN BIT(9) +#define V3_LB_CFG_LB_RDY_EN BIT(8) +#define V3_LB_CFG_LB_BE_IMODE BIT(7) +#define V3_LB_CFG_LB_BE_OMODE BIT(6) +#define V3_LB_CFG_LB_ENDIAN BIT(5) +#define V3_LB_CFG_LB_PARK_EN BIT(4) +#define V3_LB_CFG_LB_FBB_DIS BIT(2) + +/* ARM Integrator-specific extended control registers */ +#define INTEGRATOR_SC_PCI_OFFSET 0x18 +#define INTEGRATOR_SC_PCI_ENABLE BIT(0) +#define INTEGRATOR_SC_PCI_INTCLR BIT(1) +#define INTEGRATOR_SC_LBFADDR_OFFSET 0x20 +#define INTEGRATOR_SC_LBFCODE_OFFSET 0x24 + +struct v3_pci { + struct device *dev; + void __iomem *base; + void __iomem *config_base; + struct pci_bus *bus; + u32 config_mem; + u32 io_mem; + u32 non_pre_mem; + u32 pre_mem; + phys_addr_t io_bus_addr; + phys_addr_t non_pre_bus_addr; + phys_addr_t pre_bus_addr; + struct regmap *map; +}; + +/* + * The V3 PCI interface chip in Integrator provides several windows from + * local bus memory into the PCI memory areas. Unfortunately, there + * are not really enough windows for our usage, therefore we reuse + * one of the windows for access to PCI configuration space. On the + * Integrator/AP, the memory map is as follows: + * + * Local Bus Memory Usage + * + * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable + * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable + * 60000000 - 60FFFFFF PCI IO. 16M + * 61000000 - 61FFFFFF PCI Configuration. 16M + * + * There are three V3 windows, each described by a pair of V3 registers. + * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2. + * Base0 and Base1 can be used for any type of PCI memory access. Base2 + * can be used either for PCI I/O or for I20 accesses. By default, uHAL + * uses this only for PCI IO space. + * + * Normally these spaces are mapped using the following base registers: + * + * Usage Local Bus Memory Base/Map registers used + * + * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 + * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1 + * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 + * Cfg 61000000 - 61FFFFFF + * + * This means that I20 and PCI configuration space accesses will fail. + * When PCI configuration accesses are needed (via the uHAL PCI + * configuration space primitives) we must remap the spaces as follows: + * + * Usage Local Bus Memory Base/Map registers used + * + * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 + * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0 + * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 + * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1 + * + * To make this work, the code depends on overlapping windows working. + * The V3 chip translates an address by checking its range within + * each of the BASE/MAP pairs in turn (in ascending register number + * order). It will use the first matching pair. So, for example, + * if the same address is mapped by both LB_BASE0/LB_MAP0 and + * LB_BASE1/LB_MAP1, the V3 will use the translation from + * LB_BASE0/LB_MAP0. + * + * To allow PCI Configuration space access, the code enlarges the + * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes + * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can + * be remapped for use by configuration cycles. + * + * At the end of the PCI Configuration space accesses, + * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window + * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to + * reveal the now restored LB_BASE1/LB_MAP1 window. + * + * NOTE: We do not set up I2O mapping. I suspect that this is only + * for an intelligent (target) device. Using I2O disables most of + * the mappings into PCI memory. + */ +static void __iomem *v3_map_bus(struct pci_bus *bus, + unsigned int devfn, int offset) +{ + struct v3_pci *v3 = bus->sysdata; + unsigned int address, mapaddress, busnr; + + busnr = bus->number; + if (busnr == 0) { + int slot = PCI_SLOT(devfn); + + /* + * local bus segment so need a type 0 config cycle + * + * build the PCI configuration "address" with one-hot in + * A31-A11 + * + * mapaddress: + * 3:1 = config cycle (101) + * 0 = PCI A1 & A0 are 0 (0) + */ + address = PCI_FUNC(devfn) << 8; + mapaddress = V3_LB_MAP_TYPE_CONFIG; + + if (slot > 12) + /* + * high order bits are handled by the MAP register + */ + mapaddress |= BIT(slot - 5); + else + /* + * low order bits handled directly in the address + */ + address |= BIT(slot + 11); + } else { + /* + * not the local bus segment so need a type 1 config cycle + * + * address: + * 23:16 = bus number + * 15:11 = slot number (7:3 of devfn) + * 10:8 = func number (2:0 of devfn) + * + * mapaddress: + * 3:1 = config cycle (101) + * 0 = PCI A1 & A0 from host bus (1) + */ + mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN; + address = (busnr << 16) | (devfn << 8); + } + + /* + * Set up base0 to see all 512Mbytes of memory space (not + * prefetchable), this frees up base1 for re-use by + * configuration memory + */ + writel(v3_addr_to_lb_base(v3->non_pre_mem) | + V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE, + v3->base + V3_LB_BASE0); + + /* + * Set up base1/map1 to point into configuration space. + * The config mem is always 16MB. + */ + writel(v3_addr_to_lb_base(v3->config_mem) | + V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE, + v3->base + V3_LB_BASE1); + writew(mapaddress, v3->base + V3_LB_MAP1); + + return v3->config_base + address + offset; +} + +static void v3_unmap_bus(struct v3_pci *v3) +{ + /* + * Reassign base1 for use by prefetchable PCI memory + */ + writel(v3_addr_to_lb_base(v3->pre_mem) | + V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH | + V3_LB_BASE_ENABLE, + v3->base + V3_LB_BASE1); + writew(v3_addr_to_lb_map(v3->pre_bus_addr) | + V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */ + v3->base + V3_LB_MAP1); + + /* + * And shrink base0 back to a 256M window (NOTE: MAP0 already correct) + */ + writel(v3_addr_to_lb_base(v3->non_pre_mem) | + V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE, + v3->base + V3_LB_BASE0); +} + +static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 *value) +{ + struct v3_pci *v3 = bus->sysdata; + int ret; + + dev_dbg(&bus->dev, + "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); + ret = pci_generic_config_read(bus, fn, config, size, value); + v3_unmap_bus(v3); + return ret; +} + +static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 value) +{ + struct v3_pci *v3 = bus->sysdata; + int ret; + + dev_dbg(&bus->dev, + "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); + ret = pci_generic_config_write(bus, fn, config, size, value); + v3_unmap_bus(v3); + return ret; +} + +static struct pci_ops v3_pci_ops = { + .map_bus = v3_map_bus, + .read = v3_pci_read_config, + .write = v3_pci_write_config, +}; + +static irqreturn_t v3_irq(int irq, void *data) +{ + struct v3_pci *v3 = data; + struct device *dev = v3->dev; + u32 status; + + status = readw(v3->base + V3_PCI_STAT); + if (status & V3_PCI_STAT_PAR_ERR) + dev_err(dev, "parity error interrupt\n"); + if (status & V3_PCI_STAT_SYS_ERR) + dev_err(dev, "system error interrupt\n"); + if (status & V3_PCI_STAT_M_ABORT_ERR) + dev_err(dev, "master abort error interrupt\n"); + if (status & V3_PCI_STAT_T_ABORT_ERR) + dev_err(dev, "target abort error interrupt\n"); + writew(status, v3->base + V3_PCI_STAT); + + status = readb(v3->base + V3_LB_ISTAT); + if (status & V3_LB_ISTAT_MAILBOX) + dev_info(dev, "PCI mailbox interrupt\n"); + if (status & V3_LB_ISTAT_PCI_RD) + dev_err(dev, "PCI target LB->PCI READ abort interrupt\n"); + if (status & V3_LB_ISTAT_PCI_WR) + dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n"); + if (status & V3_LB_ISTAT_PCI_INT) + dev_info(dev, "PCI pin interrupt\n"); + if (status & V3_LB_ISTAT_PCI_PERR) + dev_err(dev, "PCI parity error interrupt\n"); + if (status & V3_LB_ISTAT_I2O_QWR) + dev_info(dev, "I2O inbound post queue interrupt\n"); + if (status & V3_LB_ISTAT_DMA1) + dev_info(dev, "DMA channel 1 interrupt\n"); + if (status & V3_LB_ISTAT_DMA0) + dev_info(dev, "DMA channel 0 interrupt\n"); + /* Clear all possible interrupts on the local bus */ + writeb(0, v3->base + V3_LB_ISTAT); + if (v3->map) + regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, + INTEGRATOR_SC_PCI_ENABLE | + INTEGRATOR_SC_PCI_INTCLR); + + return IRQ_HANDLED; +} + +static int v3_integrator_init(struct v3_pci *v3) +{ + unsigned int val; + + v3->map = + syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon"); + if (IS_ERR(v3->map)) { + dev_err(v3->dev, "no syscon\n"); + return -ENODEV; + } + + regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val); + /* Take the PCI bridge out of reset, clear IRQs */ + regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, + INTEGRATOR_SC_PCI_ENABLE | + INTEGRATOR_SC_PCI_INTCLR); + + if (!(val & INTEGRATOR_SC_PCI_ENABLE)) { + /* If we were in reset we need to sleep a bit */ + msleep(230); + + /* Set the physical base for the controller itself */ + writel(0x6200, v3->base + V3_LB_IO_BASE); + + /* Wait for the mailbox to settle after reset */ + do { + writeb(0xaa, v3->base + V3_MAIL_DATA); + writeb(0x55, v3->base + V3_MAIL_DATA + 4); + } while (readb(v3->base + V3_MAIL_DATA) != 0xaa && + readb(v3->base + V3_MAIL_DATA) != 0x55); + } + + dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n"); + + return 0; +} + +static int v3_pci_setup_resource(struct v3_pci *v3, + resource_size_t io_base, + struct pci_host_bridge *host, + struct resource_entry *win) +{ + struct device *dev = v3->dev; + struct resource *mem; + struct resource *io; + int ret; + + switch (resource_type(win->res)) { + case IORESOURCE_IO: + io = win->res; + io->name = "V3 PCI I/O"; + v3->io_mem = io_base; + v3->io_bus_addr = io->start - win->offset; + dev_dbg(dev, "I/O window %pR, bus addr %pap\n", + io, &v3->io_bus_addr); + ret = pci_remap_iospace(io, io_base); + if (ret) { + dev_warn(dev, + "error %d: failed to map resource %pR\n", + ret, io); + return ret; + } + /* Setup window 2 - PCI I/O */ + writel(v3_addr_to_lb_base2(v3->io_mem) | + V3_LB_BASE2_ENABLE, + v3->base + V3_LB_BASE2); + writew(v3_addr_to_lb_map2(v3->io_bus_addr), + v3->base + V3_LB_MAP2); + break; + case IORESOURCE_MEM: + mem = win->res; + if (mem->flags & IORESOURCE_PREFETCH) { + mem->name = "V3 PCI PRE-MEM"; + v3->pre_mem = mem->start; + v3->pre_bus_addr = mem->start - win->offset; + dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n", + mem, &v3->pre_bus_addr); + if (resource_size(mem) != SZ_256M) { + dev_err(dev, "prefetchable memory range is not 256MB\n"); + return -EINVAL; + } + if (v3->non_pre_mem && + (mem->start != v3->non_pre_mem + SZ_256M)) { + dev_err(dev, + "prefetchable memory is not adjacent to non-prefetchable memory\n"); + return -EINVAL; + } + /* Setup window 1 - PCI prefetchable memory */ + writel(v3_addr_to_lb_base(v3->pre_mem) | + V3_LB_BASE_ADR_SIZE_256MB | + V3_LB_BASE_PREFETCH | + V3_LB_BASE_ENABLE, + v3->base + V3_LB_BASE1); + writew(v3_addr_to_lb_map(v3->pre_bus_addr) | + V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */ + v3->base + V3_LB_MAP1); + } else { + mem->name = "V3 PCI NON-PRE-MEM"; + v3->non_pre_mem = mem->start; + v3->non_pre_bus_addr = mem->start - win->offset; + dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n", + mem, &v3->non_pre_bus_addr); + if (resource_size(mem) != SZ_256M) { + dev_err(dev, + "non-prefetchable memory range is not 256MB\n"); + return -EINVAL; + } + /* Setup window 0 - PCI non-prefetchable memory */ + writel(v3_addr_to_lb_base(v3->non_pre_mem) | + V3_LB_BASE_ADR_SIZE_256MB | + V3_LB_BASE_ENABLE, + v3->base + V3_LB_BASE0); + writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) | + V3_LB_MAP_TYPE_MEM, + v3->base + V3_LB_MAP0); + } + break; + case IORESOURCE_BUS: + dev_dbg(dev, "BUS %pR\n", win->res); + host->busnr = win->res->start; + break; + default: + dev_info(dev, "Unknown resource type %lu\n", + resource_type(win->res)); + break; + } + + return 0; +} + +static int v3_get_dma_range_config(struct v3_pci *v3, + struct of_pci_range *range, + u32 *pci_base, u32 *pci_map) +{ + struct device *dev = v3->dev; + u64 cpu_end = range->cpu_addr + range->size - 1; + u64 pci_end = range->pci_addr + range->size - 1; + u32 val; + + if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) { + dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n"); + return -EINVAL; + } + val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE; + *pci_base = val; + + if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) { + dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n"); + return -EINVAL; + } + val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR; + + switch (range->size) { + case SZ_1M: + val |= V3_LB_BASE_ADR_SIZE_1MB; + break; + case SZ_2M: + val |= V3_LB_BASE_ADR_SIZE_2MB; + break; + case SZ_4M: + val |= V3_LB_BASE_ADR_SIZE_4MB; + break; + case SZ_8M: + val |= V3_LB_BASE_ADR_SIZE_8MB; + break; + case SZ_16M: + val |= V3_LB_BASE_ADR_SIZE_16MB; + break; + case SZ_32M: + val |= V3_LB_BASE_ADR_SIZE_32MB; + break; + case SZ_64M: + val |= V3_LB_BASE_ADR_SIZE_64MB; + break; + case SZ_128M: + val |= V3_LB_BASE_ADR_SIZE_128MB; + break; + case SZ_256M: + val |= V3_LB_BASE_ADR_SIZE_256MB; + break; + case SZ_512M: + val |= V3_LB_BASE_ADR_SIZE_512MB; + break; + case SZ_1G: + val |= V3_LB_BASE_ADR_SIZE_1GB; + break; + case SZ_2G: + val |= V3_LB_BASE_ADR_SIZE_2GB; + break; + default: + dev_err(v3->dev, "illegal dma memory chunk size\n"); + return -EINVAL; + break; + } + val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE; + *pci_map = val; + + dev_dbg(dev, + "DMA MEM CPU: 0x%016llx -> 0x%016llx => " + "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n", + range->cpu_addr, cpu_end, + range->pci_addr, pci_end, + *pci_base, *pci_map); + + return 0; +} + +static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = v3->dev; + int i = 0; + + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* + * Get the dma-ranges from the device tree + */ + for_each_of_pci_range(&parser, &range) { + int ret; + u32 pci_base, pci_map; + + ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map); + if (ret) + return ret; + + if (i == 0) { + writel(pci_base, v3->base + V3_PCI_BASE0); + writel(pci_map, v3->base + V3_PCI_MAP0); + } else if (i == 1) { + writel(pci_base, v3->base + V3_PCI_BASE1); + writel(pci_map, v3->base + V3_PCI_MAP1); + } else { + dev_err(dev, "too many ranges, only two supported\n"); + dev_err(dev, "range %d ignored\n", i); + } + i++; + } + return 0; +} + +static int v3_pci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + resource_size_t io_base; + struct resource *regs; + struct resource_entry *win; + struct v3_pci *v3; + struct pci_host_bridge *host; + struct clk *clk; + u16 val; + int irq; + int ret; + LIST_HEAD(res); + + host = pci_alloc_host_bridge(sizeof(*v3)); + if (!host) + return -ENOMEM; + + host->dev.parent = dev; + host->ops = &v3_pci_ops; + host->busnr = 0; + host->msi = NULL; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; + v3 = pci_host_bridge_priv(host); + host->sysdata = v3; + v3->dev = dev; + + /* Get and enable host clock */ + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "clock not found\n"); + return PTR_ERR(clk); + } + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "unable to enable clock\n"); + return ret; + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + v3->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(v3->base)) + return PTR_ERR(v3->base); + /* + * The hardware has a register with the physical base address + * of the V3 controller itself, verify that this is the same + * as the physical memory we've remapped it from. + */ + if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16)) + dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n", + readl(v3->base + V3_LB_IO_BASE), regs); + + /* Configuration space is 16MB directly mapped */ + regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (resource_size(regs) != SZ_16M) { + dev_err(dev, "config mem is not 16MB!\n"); + return -EINVAL; + } + v3->config_mem = regs->start; + v3->config_base = devm_ioremap_resource(dev, regs); + if (IS_ERR(v3->config_base)) + return PTR_ERR(v3->config_base); + + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, + &io_base); + if (ret) + return ret; + + ret = devm_request_pci_bus_resources(dev, &res); + if (ret) + return ret; + + /* Get and request error IRQ resource */ + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "unable to obtain PCIv3 error IRQ\n"); + return -ENODEV; + } + ret = devm_request_irq(dev, irq, v3_irq, 0, + "PCIv3 error", v3); + if (ret < 0) { + dev_err(dev, + "unable to request PCIv3 error IRQ %d (%d)\n", + irq, ret); + return ret; + } + + /* + * Unlock V3 registers, but only if they were previously locked. + */ + if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK) + writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM); + + /* Disable all slave access while we set up the windows */ + val = readw(v3->base + V3_PCI_CMD); + val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + writew(val, v3->base + V3_PCI_CMD); + + /* Put the PCI bus into reset */ + val = readw(v3->base + V3_SYSTEM); + val &= ~V3_SYSTEM_M_RST_OUT; + writew(val, v3->base + V3_SYSTEM); + + /* Retry until we're ready */ + val = readw(v3->base + V3_PCI_CFG); + val |= V3_PCI_CFG_M_RETRY_EN; + writew(val, v3->base + V3_PCI_CFG); + + /* Set up the local bus protocol */ + val = readw(v3->base + V3_LB_CFG); + val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */ + val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */ + val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */ + val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */ + writew(val, v3->base + V3_LB_CFG); + + /* Enable the PCI bus master */ + val = readw(v3->base + V3_PCI_CMD); + val |= PCI_COMMAND_MASTER; + writew(val, v3->base + V3_PCI_CMD); + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry(win, &res) { + ret = v3_pci_setup_resource(v3, io_base, host, win); + if (ret) { + dev_err(dev, "error setting up resources\n"); + return ret; + } + } + ret = v3_pci_parse_map_dma_ranges(v3, np); + if (ret) + return ret; + + /* + * Disable PCI to host IO cycles, enable I/O buffers @3.3V, + * set AD_LOW0 to 1 if one of the LB_MAP registers choose + * to use this (should be unused). + */ + writel(0x00000000, v3->base + V3_PCI_IO_BASE); + val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS | + V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0; + /* + * DMA read and write from PCI bus commands types + */ + val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT; + val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT; + writew(val, v3->base + V3_PCI_CFG); + + /* + * Set the V3 FIFO such that writes have higher priority than + * reads, and local bus write causes local bus read fifo flush + * on aperture 1. Same for PCI. + */ + writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 | + V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 | + V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 | + V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1, + v3->base + V3_FIFO_PRIORITY); + + + /* + * Clear any error interrupts, and enable parity and write error + * interrupts + */ + writeb(0, v3->base + V3_LB_ISTAT); + val = readw(v3->base + V3_LB_CFG); + val |= V3_LB_CFG_LB_LB_INT; + writew(val, v3->base + V3_LB_CFG); + writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, + v3->base + V3_LB_IMASK); + + /* Special Integrator initialization */ + if (of_device_is_compatible(np, "arm,integrator-ap-pci")) { + ret = v3_integrator_init(v3); + if (ret) + return ret; + } + + /* Post-init: enable PCI memory and invalidate (master already on) */ + val = readw(v3->base + V3_PCI_CMD); + val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE; + writew(val, v3->base + V3_PCI_CMD); + + /* Clear pending interrupts */ + writeb(0, v3->base + V3_LB_ISTAT); + /* Read or write errors and parity errors cause interrupts */ + writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, + v3->base + V3_LB_IMASK); + + /* Take the PCI bus out of reset so devices can initialize */ + val = readw(v3->base + V3_SYSTEM); + val |= V3_SYSTEM_M_RST_OUT; + writew(val, v3->base + V3_SYSTEM); + + /* + * Re-lock the system register. + */ + val = readw(v3->base + V3_SYSTEM); + val |= V3_SYSTEM_M_LOCK; + writew(val, v3->base + V3_SYSTEM); + + list_splice_init(&res, &host->windows); + ret = pci_scan_root_bus_bridge(host); + if (ret) { + dev_err(dev, "failed to register host: %d\n", ret); + return ret; + } + v3->bus = host->bus; + + pci_bus_assign_resources(v3->bus); + pci_bus_add_devices(v3->bus); + + return 0; +} + +static const struct of_device_id v3_pci_of_match[] = { + { + .compatible = "v3,v360epc-pci", + }, + {}, +}; + +static struct platform_driver v3_pci_driver = { + .driver = { + .name = "pci-v3-semi", + .of_match_table = of_match_ptr(v3_pci_of_match), + .suppress_bind_attrs = true, + }, + .probe = v3_pci_probe, +}; +builtin_platform_driver(v3_pci_driver); diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c new file mode 100644 index 000000000000..994f32061b32 --- /dev/null +++ b/drivers/pci/controller/pci-versatile.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2004 Koninklijke Philips Electronics NV + * + * Conversion to platform driver and DT: + * Copyright 2014 Linaro Ltd. + * + * 14/04/2005 Initial version, colin.king@philips.com + */ +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +static void __iomem *versatile_pci_base; +static void __iomem *versatile_cfg_base[2]; + +#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4)) +#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4)) +#define PCI_SELFID (versatile_pci_base + 0xc) + +#define VP_PCI_DEVICE_ID 0x030010ee +#define VP_PCI_CLASS_ID 0x0b400000 + +static u32 pci_slot_ignore; + +static int __init versatile_pci_slot_ignore(char *str) +{ + int retval; + int slot; + + while ((retval = get_option(&str, &slot))) { + if ((slot < 0) || (slot > 31)) + pr_err("Illegal slot value: %d\n", slot); + else + pci_slot_ignore |= (1 << slot); + } + return 1; +} +__setup("pci_slot_ignore=", versatile_pci_slot_ignore); + + +static void __iomem *versatile_map_bus(struct pci_bus *bus, + unsigned int devfn, int offset) +{ + unsigned int busnr = bus->number; + + if (pci_slot_ignore & (1 << PCI_SLOT(devfn))) + return NULL; + + return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset); +} + +static struct pci_ops pci_versatile_ops = { + .map_bus = versatile_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write, +}; + +static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, + struct list_head *res) +{ + int err, mem = 1, res_valid = 0; + resource_size_t iobase; + struct resource_entry *win, *tmp; + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase); + if (err) + return err; + + err = devm_request_pci_bus_resources(dev, res); + if (err) + goto out_release_res; + + resource_list_for_each_entry_safe(win, tmp, res) { + struct resource *res = win->res; + + switch (resource_type(res)) { + case IORESOURCE_IO: + err = pci_remap_iospace(res, iobase); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, res); + resource_list_destroy_entry(win); + } + break; + case IORESOURCE_MEM: + res_valid |= !(res->flags & IORESOURCE_PREFETCH); + + writel(res->start >> 28, PCI_IMAP(mem)); + writel(PHYS_OFFSET >> 28, PCI_SMAP(mem)); + mem++; + + break; + } + } + + if (res_valid) + return 0; + + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + +out_release_res: + pci_free_resource_list(res); + return err; +} + +static int versatile_pci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + int ret, i, myslot = -1; + u32 val; + void __iomem *local_pci_cfg_base; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + LIST_HEAD(pci_res); + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + versatile_pci_base = devm_ioremap_resource(dev, res); + if (IS_ERR(versatile_pci_base)) + return PTR_ERR(versatile_pci_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + versatile_cfg_base[0] = devm_ioremap_resource(dev, res); + if (IS_ERR(versatile_cfg_base[0])) + return PTR_ERR(versatile_cfg_base[0]); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(versatile_cfg_base[1])) + return PTR_ERR(versatile_cfg_base[1]); + + ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res); + if (ret) + return ret; + + /* + * We need to discover the PCI core first to configure itself + * before the main PCI probing is performed + */ + for (i = 0; i < 32; i++) { + if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) && + (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) { + myslot = i; + break; + } + } + if (myslot == -1) { + dev_err(dev, "Cannot find PCI core!\n"); + return -EIO; + } + /* + * Do not to map Versatile FPGA PCI device into memory space + */ + pci_slot_ignore |= (1 << myslot); + + dev_info(dev, "PCI core found (slot %d)\n", myslot); + + writel(myslot, PCI_SELFID); + local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); + + val = readl(local_pci_cfg_base + PCI_COMMAND); + val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; + writel(val, local_pci_cfg_base + PCI_COMMAND); + + /* + * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM + */ + writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0); + writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1); + writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2); + + /* + * For many years the kernel and QEMU were symbiotically buggy + * in that they both assumed the same broken IRQ mapping. + * QEMU therefore attempts to auto-detect old broken kernels + * so that they still work on newer QEMU as they did on old + * QEMU. Since we now use the correct (ie matching-hardware) + * IRQ mapping we write a definitely different value to a + * PCI_INTERRUPT_LINE register to tell QEMU that we expect + * real hardware behaviour and it need not be backwards + * compatible for us. This write is harmless on real hardware. + */ + writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); + + pci_add_flags(PCI_ENABLE_PROC_DOMAINS); + pci_add_flags(PCI_REASSIGN_ALL_BUS); + + list_splice_init(&pci_res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = NULL; + bridge->busnr = 0; + bridge->ops = &pci_versatile_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) + return ret; + + bus = bridge->bus; + + pci_assign_unassigned_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + pci_bus_add_devices(bus); + + return 0; +} + +static const struct of_device_id versatile_pci_of_match[] = { + { .compatible = "arm,versatile-pci", }, + { }, +}; +MODULE_DEVICE_TABLE(of, versatile_pci_of_match); + +static struct platform_driver versatile_pci_driver = { + .driver = { + .name = "versatile-pci", + .of_match_table = versatile_pci_of_match, + .suppress_bind_attrs = true, + }, + .probe = versatile_pci_probe, +}; +module_platform_driver(versatile_pci_driver); + +MODULE_DESCRIPTION("Versatile PCI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c new file mode 100644 index 000000000000..f4c02da84e59 --- /dev/null +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * APM X-Gene MSI Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Tanmay Inamdar + * Duc Dang + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MSI_IR0 0x000000 +#define MSI_INT0 0x800000 +#define IDX_PER_GROUP 8 +#define IRQS_PER_IDX 16 +#define NR_HW_IRQS 16 +#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) + +struct xgene_msi_group { + struct xgene_msi *msi; + int gic_irq; + u32 msi_grp; +}; + +struct xgene_msi { + struct device_node *node; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + u64 msi_addr; + void __iomem *msi_regs; + unsigned long *bitmap; + struct mutex bitmap_lock; + struct xgene_msi_group *msi_groups; + int num_cpus; +}; + +/* Global data */ +static struct xgene_msi xgene_msi_ctrl; + +static struct irq_chip xgene_msi_top_irq_chip = { + .name = "X-Gene1 MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info xgene_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &xgene_msi_top_irq_chip, +}; + +/* + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where + * n is group number (0..F), x is index of registers in each group (0..7) + * The register layout is as follows: + * MSI0IR0 base_addr + * MSI0IR1 base_addr + 0x10000 + * ... ... + * MSI0IR6 base_addr + 0x60000 + * MSI0IR7 base_addr + 0x70000 + * MSI1IR0 base_addr + 0x80000 + * MSI1IR1 base_addr + 0x90000 + * ... ... + * MSI1IR7 base_addr + 0xF0000 + * MSI2IR0 base_addr + 0x100000 + * ... ... + * MSIFIR0 base_addr + 0x780000 + * MSIFIR1 base_addr + 0x790000 + * ... ... + * MSIFIR7 base_addr + 0x7F0000 + * MSIINT0 base_addr + 0x800000 + * MSIINT1 base_addr + 0x810000 + * ... ... + * MSIINTF base_addr + 0x8F0000 + * + * Each index register supports 16 MSI vectors (0..15) to generate interrupt. + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination + * registers. + * + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate + * the MSI pending status caused by 1 of its 8 index registers. + */ + +/* MSInIRx read helper */ +static u32 xgene_msi_ir_read(struct xgene_msi *msi, + u32 msi_grp, u32 msir_idx) +{ + return readl_relaxed(msi->msi_regs + MSI_IR0 + + (msi_grp << 19) + (msir_idx << 16)); +} + +/* MSIINTn read helper */ +static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) +{ + return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); +} + +/* + * With 2048 MSI vectors supported, the MSI message can be constructed using + * following scheme: + * - Divide into 8 256-vector groups + * Group 0: 0-255 + * Group 1: 256-511 + * Group 2: 512-767 + * ... + * Group 7: 1792-2047 + * - Each 256-vector group is divided into 16 16-vector groups + * As an example: 16 16-vector groups for 256-vector group 0-255 is + * Group 0: 0-15 + * Group 1: 16-32 + * ... + * Group 15: 240-255 + * - The termination address of MSI vector in 256-vector group n and 16-vector + * group x is the address of MSIxIRn + * - The data for MSI vector in 16-vector group x is x + */ +static u32 hwirq_to_reg_set(unsigned long hwirq) +{ + return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); +} + +static u32 hwirq_to_group(unsigned long hwirq) +{ + return (hwirq % NR_HW_IRQS); +} + +static u32 hwirq_to_msi_data(unsigned long hwirq) +{ + return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); +} + +static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct xgene_msi *msi = irq_data_get_irq_chip_data(data); + u32 reg_set = hwirq_to_reg_set(data->hwirq); + u32 group = hwirq_to_group(data->hwirq); + u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); + + msg->address_hi = upper_32_bits(target_addr); + msg->address_lo = lower_32_bits(target_addr); + msg->data = hwirq_to_msi_data(data->hwirq); +} + +/* + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain + * the expected behaviour of .set_affinity for each MSI interrupt, the 16 + * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs + * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another + * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a + * consequence, the total MSI vectors that X-Gene v1 supports will be + * reduced to 256 (2048/8) vectors. + */ +static int hwirq_to_cpu(unsigned long hwirq) +{ + return (hwirq % xgene_msi_ctrl.num_cpus); +} + +static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(hwirq)); +} + +static int xgene_msi_set_affinity(struct irq_data *irqdata, + const struct cpumask *mask, bool force) +{ + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(irqdata->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* Update MSI number to target the new CPU */ + irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static struct irq_chip xgene_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = xgene_msi_set_affinity, + .irq_compose_msi_msg = xgene_compose_msi_msg, +}; + +static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct xgene_msi *msi = domain->host_data; + int msi_irq; + + mutex_lock(&msi->bitmap_lock); + + msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, + msi->num_cpus, 0); + if (msi_irq < NR_MSI_VEC) + bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); + else + msi_irq = -ENOSPC; + + mutex_unlock(&msi->bitmap_lock); + + if (msi_irq < 0) + return msi_irq; + + irq_domain_set_info(domain, virq, msi_irq, + &xgene_msi_bottom_irq_chip, domain->host_data, + handle_simple_irq, NULL, NULL); + + return 0; +} + +static void xgene_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct xgene_msi *msi = irq_data_get_irq_chip_data(d); + u32 hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(d->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = xgene_irq_domain_alloc, + .free = xgene_irq_domain_free, +}; + +static int xgene_allocate_domains(struct xgene_msi *msi) +{ + msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, + &msi_domain_ops, msi); + if (!msi->inner_domain) + return -ENOMEM; + + msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), + &xgene_msi_domain_info, + msi->inner_domain); + + if (!msi->msi_domain) { + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void xgene_free_domains(struct xgene_msi *msi) +{ + if (msi->msi_domain) + irq_domain_remove(msi->msi_domain); + if (msi->inner_domain) + irq_domain_remove(msi->inner_domain); +} + +static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) +{ + int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); + + xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); + if (!xgene_msi->bitmap) + return -ENOMEM; + + mutex_init(&xgene_msi->bitmap_lock); + + xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, + sizeof(struct xgene_msi_group), + GFP_KERNEL); + if (!xgene_msi->msi_groups) + return -ENOMEM; + + return 0; +} + +static void xgene_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct xgene_msi_group *msi_groups; + struct xgene_msi *xgene_msi; + unsigned int virq; + int msir_index, msir_val, hw_irq; + u32 intr_index, grp_select, msi_grp; + + chained_irq_enter(chip, desc); + + msi_groups = irq_desc_get_handler_data(desc); + xgene_msi = msi_groups->msi; + msi_grp = msi_groups->msi_grp; + + /* + * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt + * If bit x of this register is set (x is 0..7), one or more interupts + * corresponding to MSInIRx is set. + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + while (grp_select) { + msir_index = ffs(grp_select) - 1; + /* + * Calculate MSInIRx address to read to check for interrupts + * (refer to termination address and data assignment + * described in xgene_compose_msi_msg() ) + */ + msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); + while (msir_val) { + intr_index = ffs(msir_val) - 1; + /* + * Calculate MSI vector number (refer to the termination + * address and data assignment described in + * xgene_compose_msi_msg function) + */ + hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * + NR_HW_IRQS) + msi_grp; + /* + * As we have multiple hw_irq that maps to single MSI, + * always look up the virq using the hw_irq as seen from + * CPU0 + */ + hw_irq = hwirq_to_canonical_hwirq(hw_irq); + virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq); + WARN_ON(!virq); + if (virq != 0) + generic_handle_irq(virq); + msir_val &= ~(1 << intr_index); + } + grp_select &= ~(1 << msir_index); + + if (!grp_select) { + /* + * We handled all interrupts happened in this group, + * resample this group MSI_INTx register in case + * something else has been made pending in the meantime + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + } + } + + chained_irq_exit(chip, desc); +} + +static enum cpuhp_state pci_xgene_online; + +static int xgene_msi_remove(struct platform_device *pdev) +{ + struct xgene_msi *msi = platform_get_drvdata(pdev); + + if (pci_xgene_online) + cpuhp_remove_state(pci_xgene_online); + cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); + + kfree(msi->msi_groups); + + kfree(msi->bitmap); + msi->bitmap = NULL; + + xgene_free_domains(msi); + + return 0; +} + +static int xgene_msi_hwirq_alloc(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + cpumask_var_t mask; + int i; + int err; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler(msi_group->gic_irq, + xgene_msi_isr); + err = irq_set_handler_data(msi_group->gic_irq, msi_group); + if (err) { + pr_err("failed to register GIC IRQ handler\n"); + return -EINVAL; + } + /* + * Statically allocate MSI GIC IRQs to each CPU core. + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated + * to each core. + */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + err = irq_set_affinity(msi_group->gic_irq, mask); + if (err) + pr_err("failed to set affinity for GIC IRQ"); + free_cpumask_var(mask); + } else { + pr_err("failed to alloc CPU mask for affinity\n"); + err = -EINVAL; + } + + if (err) { + irq_set_chained_handler_and_data(msi_group->gic_irq, + NULL, NULL); + return err; + } + } + + return 0; +} + +static int xgene_msi_hwirq_free(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + int i; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, + NULL); + } + return 0; +} + +static const struct of_device_id xgene_msi_match_table[] = { + {.compatible = "apm,xgene1-msi"}, + {}, +}; + +static int xgene_msi_probe(struct platform_device *pdev) +{ + struct resource *res; + int rc, irq_index; + struct xgene_msi *xgene_msi; + int virt_msir; + u32 msi_val, msi_idx; + + xgene_msi = &xgene_msi_ctrl; + + platform_set_drvdata(pdev, xgene_msi); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xgene_msi->msi_regs)) { + dev_err(&pdev->dev, "no reg space\n"); + rc = PTR_ERR(xgene_msi->msi_regs); + goto error; + } + xgene_msi->msi_addr = res->start; + xgene_msi->node = pdev->dev.of_node; + xgene_msi->num_cpus = num_possible_cpus(); + + rc = xgene_msi_init_allocator(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); + goto error; + } + + rc = xgene_allocate_domains(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); + goto error; + } + + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + virt_msir = platform_get_irq(pdev, irq_index); + if (virt_msir < 0) { + dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", + irq_index); + rc = virt_msir; + goto error; + } + xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; + xgene_msi->msi_groups[irq_index].msi_grp = irq_index; + xgene_msi->msi_groups[irq_index].msi = xgene_msi; + } + + /* + * MSInIRx registers are read-to-clear; before registering + * interrupt handlers, read all of them to clear spurious + * interrupts that may occur before the driver is probed. + */ + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) + msi_val = xgene_msi_ir_read(xgene_msi, irq_index, + msi_idx); + /* Read MSIINTn to confirm */ + msi_val = xgene_msi_int_read(xgene_msi, irq_index); + if (msi_val) { + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); + rc = -EINVAL; + goto error; + } + } + + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", + xgene_msi_hwirq_alloc, NULL); + if (rc < 0) + goto err_cpuhp; + pci_xgene_online = rc; + rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, + xgene_msi_hwirq_free); + if (rc) + goto err_cpuhp; + + dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); + + return 0; + +err_cpuhp: + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); +error: + xgene_msi_remove(pdev); + return rc; +} + +static struct platform_driver xgene_msi_driver = { + .driver = { + .name = "xgene-msi", + .of_match_table = xgene_msi_match_table, + }, + .probe = xgene_msi_probe, + .remove = xgene_msi_remove, +}; + +static int __init xgene_pcie_msi_init(void) +{ + return platform_driver_register(&xgene_msi_driver); +} +subsys_initcall(xgene_pcie_msi_init); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c new file mode 100644 index 000000000000..d854d67e873c --- /dev/null +++ b/drivers/pci/controller/pci-xgene.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0+ +/** + * APM X-Gene PCIe Driver + * + * Copyright (c) 2014 Applied Micro Circuits Corporation. + * + * Author: Tanmay Inamdar . + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +#define PCIECORE_CTLANDSTATUS 0x50 +#define PIM1_1L 0x80 +#define IBAR2 0x98 +#define IR2MSK 0x9c +#define PIM2_1L 0xa0 +#define IBAR3L 0xb4 +#define IR3MSKL 0xbc +#define PIM3_1L 0xc4 +#define OMR1BARL 0x100 +#define OMR2BARL 0x118 +#define OMR3BARL 0x130 +#define CFGBARL 0x154 +#define CFGBARH 0x158 +#define CFGCTL 0x15c +#define RTDID 0x160 +#define BRIDGE_CFG_0 0x2000 +#define BRIDGE_CFG_4 0x2010 +#define BRIDGE_STATUS_0 0x2600 + +#define LINK_UP_MASK 0x00000100 +#define AXI_EP_CFG_ACCESS 0x10000 +#define EN_COHERENCY 0xF0000000 +#define EN_REG 0x00000001 +#define OB_LO_IO 0x00000002 +#define XGENE_PCIE_VENDORID 0x10E8 +#define XGENE_PCIE_DEVICEID 0xE004 +#define SZ_1T (SZ_1G*1024ULL) +#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) + +#define XGENE_V1_PCI_EXP_CAP 0x40 + +/* PCIe IP version */ +#define XGENE_PCIE_IP_VER_UNKN 0 +#define XGENE_PCIE_IP_VER_1 1 +#define XGENE_PCIE_IP_VER_2 2 + +#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) +struct xgene_pcie_port { + struct device_node *node; + struct device *dev; + struct clk *clk; + void __iomem *csr_base; + void __iomem *cfg_base; + unsigned long cfg_addr; + bool link_up; + u32 version; +}; + +static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg) +{ + return readl(port->csr_base + reg); +} + +static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val) +{ + writel(val, port->csr_base + reg); +} + +static inline u32 pcie_bar_low_val(u32 addr, u32 flags) +{ + return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; +} + +static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus) +{ + struct pci_config_window *cfg; + + if (acpi_disabled) + return (struct xgene_pcie_port *)(bus->sysdata); + + cfg = bus->sysdata; + return (struct xgene_pcie_port *)(cfg->priv); +} + +/* + * When the address bit [17:16] is 2'b01, the Configuration access will be + * treated as Type 1 and it will be forwarded to external PCIe device. + */ +static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) +{ + struct xgene_pcie_port *port = pcie_bus_to_port(bus); + + if (bus->number >= (bus->primary + 1)) + return port->cfg_base + AXI_EP_CFG_ACCESS; + + return port->cfg_base; +} + +/* + * For Configuration request, RTDID register is used as Bus Number, + * Device Number and Function number of the header fields. + */ +static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) +{ + struct xgene_pcie_port *port = pcie_bus_to_port(bus); + unsigned int b, d, f; + u32 rtdid_val = 0; + + b = bus->number; + d = PCI_SLOT(devfn); + f = PCI_FUNC(devfn); + + if (!pci_is_root_bus(bus)) + rtdid_val = (b << 8) | (d << 3) | f; + + xgene_pcie_writel(port, RTDID, rtdid_val); + /* read the register back to ensure flush */ + xgene_pcie_readl(port, RTDID); +} + +/* + * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as + * the translation from PCI bus to native BUS. Entire DDR region + * is mapped into PCIe space using these registers, so it can be + * reached by DMA from EP devices. The BAR0/1 of bridge should be + * hidden during enumeration to avoid the sizing and resource allocation + * by PCIe core. + */ +static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) +{ + if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) || + (offset == PCI_BASE_ADDRESS_1))) + return true; + + return false; +} + +static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int offset) +{ + if ((pci_is_root_bus(bus) && devfn != 0) || + xgene_pcie_hide_rc_bars(bus, offset)) + return NULL; + + xgene_pcie_set_rtdid_reg(bus, devfn); + return xgene_pcie_get_cfg_base(bus) + offset; +} + +static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct xgene_pcie_port *port = pcie_bus_to_port(bus); + + if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != + PCIBIOS_SUCCESSFUL) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * The v1 controller has a bug in its Configuration Request + * Retry Status (CRS) logic: when CRS is enabled and we read the + * Vendor and Device ID of a non-existent device, the controller + * fabricates return data of 0xFFFF0001 ("device exists but is not + * ready") instead of 0xFFFFFFFF ("device does not exist"). This + * causes the PCI core to retry the read until it times out. + * Avoid this by not claiming to support CRS. + */ + if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && + ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +static int xgene_get_csr_resource(struct acpi_device *adev, + struct resource *res) +{ + struct device *dev = &adev->dev; + struct resource_entry *entry; + struct list_head list; + unsigned long flags; + int ret; + + INIT_LIST_HEAD(&list); + flags = IORESOURCE_MEM; + ret = acpi_dev_get_resources(adev, &list, + acpi_dev_filter_resource_type_cb, + (void *) flags); + if (ret < 0) { + dev_err(dev, "failed to parse _CRS method, error code %d\n", + ret); + return ret; + } + + if (ret == 0) { + dev_err(dev, "no IO and memory resources present in _CRS\n"); + return -EINVAL; + } + + entry = list_first_entry(&list, struct resource_entry, node); + *res = *entry->res; + acpi_dev_free_resource_list(&list); + return 0; +} + +static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) +{ + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + struct xgene_pcie_port *port; + struct resource csr; + int ret; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ret = xgene_get_csr_resource(adev, &csr); + if (ret) { + dev_err(dev, "can't get CSR resource\n"); + return ret; + } + port->csr_base = devm_pci_remap_cfg_resource(dev, &csr); + if (IS_ERR(port->csr_base)) + return PTR_ERR(port->csr_base); + + port->cfg_base = cfg->win; + port->version = ipversion; + + cfg->priv = port; + return 0; +} + +static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg) +{ + return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1); +} + +struct pci_ecam_ops xgene_v1_pcie_ecam_ops = { + .bus_shift = 16, + .init = xgene_v1_pcie_ecam_init, + .pci_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write, + } +}; + +static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg) +{ + return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2); +} + +struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { + .bus_shift = 16, + .init = xgene_v2_pcie_ecam_init, + .pci_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write, + } +}; +#endif + +#if defined(CONFIG_PCI_XGENE) +static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, + u32 flags, u64 size) +{ + u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; + u32 val32 = 0; + u32 val; + + val32 = xgene_pcie_readl(port, addr); + val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); + xgene_pcie_writel(port, addr, val); + + val32 = xgene_pcie_readl(port, addr + 0x04); + val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); + xgene_pcie_writel(port, addr + 0x04, val); + + val32 = xgene_pcie_readl(port, addr + 0x04); + val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); + xgene_pcie_writel(port, addr + 0x04, val); + + val32 = xgene_pcie_readl(port, addr + 0x08); + val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); + xgene_pcie_writel(port, addr + 0x08, val); + + return mask; +} + +static void xgene_pcie_linkup(struct xgene_pcie_port *port, + u32 *lanes, u32 *speed) +{ + u32 val32; + + port->link_up = false; + val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS); + if (val32 & LINK_UP_MASK) { + port->link_up = true; + *speed = PIPE_PHY_RATE_RD(val32); + val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0); + *lanes = val32 >> 26; + } +} + +static int xgene_pcie_init_port(struct xgene_pcie_port *port) +{ + struct device *dev = port->dev; + int rc; + + port->clk = clk_get(dev, NULL); + if (IS_ERR(port->clk)) { + dev_err(dev, "clock not available\n"); + return -ENODEV; + } + + rc = clk_prepare_enable(port->clk); + if (rc) { + dev_err(dev, "clock enable failed\n"); + return rc; + } + + return 0; +} + +static int xgene_pcie_map_reg(struct xgene_pcie_port *port, + struct platform_device *pdev) +{ + struct device *dev = port->dev; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); + port->csr_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(port->csr_base)) + return PTR_ERR(port->csr_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + port->cfg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(port->cfg_base)) + return PTR_ERR(port->cfg_base); + port->cfg_addr = res->start; + + return 0; +} + +static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, + struct resource *res, u32 offset, + u64 cpu_addr, u64 pci_addr) +{ + struct device *dev = port->dev; + resource_size_t size = resource_size(res); + u64 restype = resource_type(res); + u64 mask = 0; + u32 min_size; + u32 flag = EN_REG; + + if (restype == IORESOURCE_MEM) { + min_size = SZ_128M; + } else { + min_size = 128; + flag |= OB_LO_IO; + } + + if (size >= min_size) + mask = ~(size - 1) | flag; + else + dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n", + (u64)size, min_size); + + xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr)); + xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr)); + xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask)); + xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask)); + xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr)); + xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr)); +} + +static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port) +{ + u64 addr = port->cfg_addr; + + xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr)); + xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr)); + xgene_pcie_writel(port, CFGCTL, EN_REG); +} + +static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, + struct list_head *res, + resource_size_t io_base) +{ + struct resource_entry *window; + struct device *dev = port->dev; + int ret; + + resource_list_for_each_entry(window, res) { + struct resource *res = window->res; + u64 restype = resource_type(res); + + dev_dbg(dev, "%pR\n", res); + + switch (restype) { + case IORESOURCE_IO: + xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, + res->start - window->offset); + ret = pci_remap_iospace(res, io_base); + if (ret < 0) + return ret; + break; + case IORESOURCE_MEM: + if (res->flags & IORESOURCE_PREFETCH) + xgene_pcie_setup_ob_reg(port, res, OMR2BARL, + res->start, + res->start - + window->offset); + else + xgene_pcie_setup_ob_reg(port, res, OMR1BARL, + res->start, + res->start - + window->offset); + break; + case IORESOURCE_BUS: + break; + default: + dev_err(dev, "invalid resource %pR\n", res); + return -EINVAL; + } + } + xgene_pcie_setup_cfg_reg(port); + return 0; +} + +static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg, + u64 pim, u64 size) +{ + xgene_pcie_writel(port, pim_reg, lower_32_bits(pim)); + xgene_pcie_writel(port, pim_reg + 0x04, + upper_32_bits(pim) | EN_COHERENCY); + xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size)); + xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size)); +} + +/* + * X-Gene PCIe support maximum 3 inbound memory regions + * This function helps to select a region based on size of region + */ +static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) +{ + if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) { + *ib_reg_mask |= (1 << 1); + return 1; + } + + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { + *ib_reg_mask |= (1 << 0); + return 0; + } + + if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) { + *ib_reg_mask |= (1 << 2); + return 2; + } + + return -EINVAL; +} + +static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, + struct of_pci_range *range, u8 *ib_reg_mask) +{ + void __iomem *cfg_base = port->cfg_base; + struct device *dev = port->dev; + void *bar_addr; + u32 pim_reg; + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; + u64 mask = ~(size - 1) | EN_REG; + u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; + u32 bar_low; + int region; + + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); + if (region < 0) { + dev_warn(dev, "invalid pcie dma-range config\n"); + return; + } + + if (range->flags & IORESOURCE_PREFETCH) + flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; + + bar_low = pcie_bar_low_val((u32)cpu_addr, flags); + switch (region) { + case 0: + xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size); + bar_addr = cfg_base + PCI_BASE_ADDRESS_0; + writel(bar_low, bar_addr); + writel(upper_32_bits(cpu_addr), bar_addr + 0x4); + pim_reg = PIM1_1L; + break; + case 1: + xgene_pcie_writel(port, IBAR2, bar_low); + xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask)); + pim_reg = PIM2_1L; + break; + case 2: + xgene_pcie_writel(port, IBAR3L, bar_low); + xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr)); + xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask)); + xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask)); + pim_reg = PIM3_1L; + break; + } + + xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1)); +} + +static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) +{ + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; + u8 ib_reg_mask = 0; + + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } + return 0; +} + +/* clear BAR configuration which was done by firmware */ +static void xgene_pcie_clear_config(struct xgene_pcie_port *port) +{ + int i; + + for (i = PIM1_1L; i <= CFGCTL; i += 4) + xgene_pcie_writel(port, i, 0); +} + +static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res, + resource_size_t io_base) +{ + struct device *dev = port->dev; + u32 val, lanes = 0, speed = 0; + int ret; + + xgene_pcie_clear_config(port); + + /* setup the vendor and device IDs correctly */ + val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID; + xgene_pcie_writel(port, BRIDGE_CFG_0, val); + + ret = xgene_pcie_map_ranges(port, res, io_base); + if (ret) + return ret; + + ret = xgene_pcie_parse_map_dma_ranges(port); + if (ret) + return ret; + + xgene_pcie_linkup(port, &lanes, &speed); + if (!port->link_up) + dev_info(dev, "(rc) link down\n"); + else + dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1); + return 0; +} + +static struct pci_ops xgene_pcie_ops = { + .map_bus = xgene_pcie_map_bus, + .read = xgene_pcie_config_read32, + .write = pci_generic_config_write32, +}; + +static int xgene_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; + struct xgene_pcie_port *port; + resource_size_t iobase = 0; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + int ret; + LIST_HEAD(res); + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!bridge) + return -ENOMEM; + + port = pci_host_bridge_priv(bridge); + + port->node = of_node_get(dn); + port->dev = dev; + + port->version = XGENE_PCIE_IP_VER_UNKN; + if (of_device_is_compatible(port->node, "apm,xgene-pcie")) + port->version = XGENE_PCIE_IP_VER_1; + + ret = xgene_pcie_map_reg(port, pdev); + if (ret) + return ret; + + ret = xgene_pcie_init_port(port); + if (ret) + return ret; + + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, + &iobase); + if (ret) + return ret; + + ret = devm_request_pci_bus_resources(dev, &res); + if (ret) + goto error; + + ret = xgene_pcie_setup(port, &res, iobase); + if (ret) + goto error; + + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = port; + bridge->busnr = 0; + bridge->ops = &xgene_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) + goto error; + + bus = bridge->bus; + + pci_assign_unassigned_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + pci_bus_add_devices(bus); + return 0; + +error: + pci_free_resource_list(&res); + return ret; +} + +static const struct of_device_id xgene_pcie_match_table[] = { + {.compatible = "apm,xgene-pcie",}, + {}, +}; + +static struct platform_driver xgene_pcie_driver = { + .driver = { + .name = "xgene-pcie", + .of_match_table = of_match_ptr(xgene_pcie_match_table), + .suppress_bind_attrs = true, + }, + .probe = xgene_pcie_probe, +}; +builtin_platform_driver(xgene_pcie_driver); +#endif diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c new file mode 100644 index 000000000000..025ef7d9a046 --- /dev/null +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Altera PCIe MSI support + * + * Author: Ley Foon Tan + * + * Copyright Altera Corporation (C) 2013-2015. All rights reserved + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MSI_STATUS 0x0 +#define MSI_ERROR 0x4 +#define MSI_INTMASK 0x8 + +#define MAX_MSI_VECTORS 32 + +struct altera_msi { + DECLARE_BITMAP(used, MAX_MSI_VECTORS); + struct mutex lock; /* protect "used" bitmap */ + struct platform_device *pdev; + struct irq_domain *msi_domain; + struct irq_domain *inner_domain; + void __iomem *csr_base; + void __iomem *vector_base; + phys_addr_t vector_phy; + u32 num_of_vectors; + int irq; +}; + +static inline void msi_writel(struct altera_msi *msi, const u32 value, + const u32 reg) +{ + writel_relaxed(value, msi->csr_base + reg); +} + +static inline u32 msi_readl(struct altera_msi *msi, const u32 reg) +{ + return readl_relaxed(msi->csr_base + reg); +} + +static void altera_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct altera_msi *msi; + unsigned long status; + u32 bit; + u32 virq; + + chained_irq_enter(chip, desc); + msi = irq_desc_get_handler_data(desc); + + while ((status = msi_readl(msi, MSI_STATUS)) != 0) { + for_each_set_bit(bit, &status, msi->num_of_vectors) { + /* Dummy read from vector to clear the interrupt */ + readl_relaxed(msi->vector_base + (bit * sizeof(u32))); + + virq = irq_find_mapping(msi->inner_domain, bit); + if (virq) + generic_handle_irq(virq); + else + dev_err(&msi->pdev->dev, "unexpected MSI\n"); + } + } + + chained_irq_exit(chip, desc); +} + +static struct irq_chip altera_msi_irq_chip = { + .name = "Altera PCIe MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info altera_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &altera_msi_irq_chip, +}; + +static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct altera_msi *msi = irq_data_get_irq_chip_data(data); + phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32)); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq; + + dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int altera_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip altera_msi_bottom_irq_chip = { + .name = "Altera MSI", + .irq_compose_msi_msg = altera_compose_msi_msg, + .irq_set_affinity = altera_msi_set_affinity, +}; + +static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct altera_msi *msi = domain->host_data; + unsigned long bit; + u32 mask; + + WARN_ON(nr_irqs != 1); + mutex_lock(&msi->lock); + + bit = find_first_zero_bit(msi->used, msi->num_of_vectors); + if (bit >= msi->num_of_vectors) { + mutex_unlock(&msi->lock); + return -ENOSPC; + } + + set_bit(bit, msi->used); + + mutex_unlock(&msi->lock); + + irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + + mask = msi_readl(msi, MSI_INTMASK); + mask |= 1 << bit; + msi_writel(msi, mask, MSI_INTMASK); + + return 0; +} + +static void altera_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct altera_msi *msi = irq_data_get_irq_chip_data(d); + u32 mask; + + mutex_lock(&msi->lock); + + if (!test_bit(d->hwirq, msi->used)) { + dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n", + d->hwirq); + } else { + __clear_bit(d->hwirq, msi->used); + mask = msi_readl(msi, MSI_INTMASK); + mask &= ~(1 << d->hwirq); + msi_writel(msi, mask, MSI_INTMASK); + } + + mutex_unlock(&msi->lock); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = altera_irq_domain_alloc, + .free = altera_irq_domain_free, +}; + +static int altera_allocate_domains(struct altera_msi *msi) +{ + struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); + + msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, + &msi_domain_ops, msi); + if (!msi->inner_domain) { + dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + msi->msi_domain = pci_msi_create_irq_domain(fwnode, + &altera_msi_domain_info, msi->inner_domain); + if (!msi->msi_domain) { + dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void altera_free_domains(struct altera_msi *msi) +{ + irq_domain_remove(msi->msi_domain); + irq_domain_remove(msi->inner_domain); +} + +static int altera_msi_remove(struct platform_device *pdev) +{ + struct altera_msi *msi = platform_get_drvdata(pdev); + + msi_writel(msi, 0, MSI_INTMASK); + irq_set_chained_handler(msi->irq, NULL); + irq_set_handler_data(msi->irq, NULL); + + altera_free_domains(msi); + + platform_set_drvdata(pdev, NULL); + return 0; +} + +static int altera_msi_probe(struct platform_device *pdev) +{ + struct altera_msi *msi; + struct device_node *np = pdev->dev.of_node; + struct resource *res; + int ret; + + msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi), + GFP_KERNEL); + if (!msi) + return -ENOMEM; + + mutex_init(&msi->lock); + msi->pdev = pdev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); + msi->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(msi->csr_base)) { + dev_err(&pdev->dev, "failed to map csr memory\n"); + return PTR_ERR(msi->csr_base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "vector_slave"); + msi->vector_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(msi->vector_base)) { + dev_err(&pdev->dev, "failed to map vector_slave memory\n"); + return PTR_ERR(msi->vector_base); + } + + msi->vector_phy = res->start; + + if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) { + dev_err(&pdev->dev, "failed to parse the number of vectors\n"); + return -EINVAL; + } + + ret = altera_allocate_domains(msi); + if (ret) + return ret; + + msi->irq = platform_get_irq(pdev, 0); + if (msi->irq < 0) { + dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); + ret = msi->irq; + goto err; + } + + irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi); + platform_set_drvdata(pdev, msi); + + return 0; + +err: + altera_msi_remove(pdev); + return ret; +} + +static const struct of_device_id altera_msi_of_match[] = { + { .compatible = "altr,msi-1.0", NULL }, + { }, +}; + +static struct platform_driver altera_msi_driver = { + .driver = { + .name = "altera-msi", + .of_match_table = altera_msi_of_match, + }, + .probe = altera_msi_probe, + .remove = altera_msi_remove, +}; + +static int __init altera_msi_init(void) +{ + return platform_driver_register(&altera_msi_driver); +} +subsys_initcall(altera_msi_init); diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c new file mode 100644 index 000000000000..7d05e51205b3 --- /dev/null +++ b/drivers/pci/controller/pcie-altera.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright Altera Corporation (C) 2013-2015. All rights reserved + * + * Author: Ley Foon Tan + * Description: Altera PCIe host controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +#define RP_TX_REG0 0x2000 +#define RP_TX_REG1 0x2004 +#define RP_TX_CNTRL 0x2008 +#define RP_TX_EOP 0x2 +#define RP_TX_SOP 0x1 +#define RP_RXCPL_STATUS 0x2010 +#define RP_RXCPL_EOP 0x2 +#define RP_RXCPL_SOP 0x1 +#define RP_RXCPL_REG0 0x2014 +#define RP_RXCPL_REG1 0x2018 +#define P2A_INT_STATUS 0x3060 +#define P2A_INT_STS_ALL 0xf +#define P2A_INT_ENABLE 0x3070 +#define P2A_INT_ENA_ALL 0xf +#define RP_LTSSM 0x3c64 +#define RP_LTSSM_MASK 0x1f +#define LTSSM_L0 0xf + +#define PCIE_CAP_OFFSET 0x80 +/* TLP configuration type 0 and 1 */ +#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ +#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ +#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ +#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ +#define TLP_PAYLOAD_SIZE 0x01 +#define TLP_READ_TAG 0x1d +#define TLP_WRITE_TAG 0x10 +#define RP_DEVFN 0 +#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) +#define TLP_CFGRD_DW0(pcie, bus) \ + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ + : TLP_FMTTYPE_CFGRD1) << 24) | \ + TLP_PAYLOAD_SIZE) +#define TLP_CFGWR_DW0(pcie, bus) \ + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ + : TLP_FMTTYPE_CFGWR1) << 24) | \ + TLP_PAYLOAD_SIZE) +#define TLP_CFG_DW1(pcie, tag, be) \ + (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) +#define TLP_CFG_DW2(bus, devfn, offset) \ + (((bus) << 24) | ((devfn) << 16) | (offset)) +#define TLP_COMP_STATUS(s) (((s) >> 13) & 7) +#define TLP_HDR_SIZE 3 +#define TLP_LOOP 500 + +#define LINK_UP_TIMEOUT HZ +#define LINK_RETRAIN_TIMEOUT HZ + +#define DWORD_MASK 3 + +struct altera_pcie { + struct platform_device *pdev; + void __iomem *cra_base; /* DT Cra */ + int irq; + u8 root_bus_nr; + struct irq_domain *irq_domain; + struct resource bus_range; + struct list_head resources; +}; + +struct tlp_rp_regpair_t { + u32 ctrl; + u32 reg0; + u32 reg1; +}; + +static inline void cra_writel(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) +{ + return readl_relaxed(pcie->cra_base + reg); +} + +static bool altera_pcie_link_up(struct altera_pcie *pcie) +{ + return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); +} + +/* + * Altera PCIe port uses BAR0 of RC's configuration space as the translation + * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space + * using these registers, so it can be reached by DMA from EP devices. + * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt + * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge + * should be hidden during enumeration to avoid the sizing and resource + * allocation by PCIe core. + */ +static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, + int offset) +{ + if (pci_is_root_bus(bus) && (devfn == 0) && + (offset == PCI_BASE_ADDRESS_0)) + return true; + + return false; +} + +static void tlp_write_tx(struct altera_pcie *pcie, + struct tlp_rp_regpair_t *tlp_rp_regdata) +{ + cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0); + cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1); + cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); +} + +static bool altera_pcie_valid_device(struct altera_pcie *pcie, + struct pci_bus *bus, int dev) +{ + /* If there is no link, then there is no device */ + if (bus->number != pcie->root_bus_nr) { + if (!altera_pcie_link_up(pcie)) + return false; + } + + /* access only one slot on each root port */ + if (bus->number == pcie->root_bus_nr && dev > 0) + return false; + + return true; +} + +static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) +{ + int i; + bool sop = false; + u32 ctrl; + u32 reg0, reg1; + u32 comp_status = 1; + + /* + * Minimum 2 loops to read TLP headers and 1 loop to read data + * payload. + */ + for (i = 0; i < TLP_LOOP; i++) { + ctrl = cra_readl(pcie, RP_RXCPL_STATUS); + if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { + reg0 = cra_readl(pcie, RP_RXCPL_REG0); + reg1 = cra_readl(pcie, RP_RXCPL_REG1); + + if (ctrl & RP_RXCPL_SOP) { + sop = true; + comp_status = TLP_COMP_STATUS(reg1); + } + + if (ctrl & RP_RXCPL_EOP) { + if (comp_status) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (value) + *value = reg0; + + return PCIBIOS_SUCCESSFUL; + } + } + udelay(5); + } + + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, + u32 data, bool align) +{ + struct tlp_rp_regpair_t tlp_rp_regdata; + + tlp_rp_regdata.reg0 = headers[0]; + tlp_rp_regdata.reg1 = headers[1]; + tlp_rp_regdata.ctrl = RP_TX_SOP; + tlp_write_tx(pcie, &tlp_rp_regdata); + + if (align) { + tlp_rp_regdata.reg0 = headers[2]; + tlp_rp_regdata.reg1 = 0; + tlp_rp_regdata.ctrl = 0; + tlp_write_tx(pcie, &tlp_rp_regdata); + + tlp_rp_regdata.reg0 = data; + tlp_rp_regdata.reg1 = 0; + } else { + tlp_rp_regdata.reg0 = headers[2]; + tlp_rp_regdata.reg1 = data; + } + + tlp_rp_regdata.ctrl = RP_TX_EOP; + tlp_write_tx(pcie, &tlp_rp_regdata); +} + +static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, + int where, u8 byte_en, u32 *value) +{ + u32 headers[TLP_HDR_SIZE]; + + headers[0] = TLP_CFGRD_DW0(pcie, bus); + headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); + + tlp_write_packet(pcie, headers, 0, false); + + return tlp_read_packet(pcie, value); +} + +static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, + int where, u8 byte_en, u32 value) +{ + u32 headers[TLP_HDR_SIZE]; + int ret; + + headers[0] = TLP_CFGWR_DW0(pcie, bus); + headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); + + /* check alignment to Qword */ + if ((where & 0x7) == 0) + tlp_write_packet(pcie, headers, value, true); + else + tlp_write_packet(pcie, headers, value, false); + + ret = tlp_read_packet(pcie, NULL); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + /* + * Monitor changes to PCI_PRIMARY_BUS register on root port + * and update local copy of root bus number accordingly. + */ + if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) + pcie->root_bus_nr = (u8)(value); + + return PCIBIOS_SUCCESSFUL; +} + +static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, + u32 *value) +{ + int ret; + u32 data; + u8 byte_en; + + switch (size) { + case 1: + byte_en = 1 << (where & 3); + break; + case 2: + byte_en = 3 << (where & 3); + break; + default: + byte_en = 0xf; + break; + } + + ret = tlp_cfg_dword_read(pcie, busno, devfn, + (where & ~DWORD_MASK), byte_en, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + switch (size) { + case 1: + *value = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *value = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *value = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, + u32 value) +{ + u32 data32; + u32 shift = 8 * (where & 3); + u8 byte_en; + + switch (size) { + case 1: + data32 = (value & 0xff) << shift; + byte_en = 1 << (where & 3); + break; + case 2: + data32 = (value & 0xffff) << shift; + byte_en = 3 << (where & 3); + break; + default: + data32 = value; + byte_en = 0xf; + break; + } + + return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK), + byte_en, data32); +} + +static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct altera_pcie *pcie = bus->sysdata; + + if (altera_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) { + *value = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, + value); +} + +static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct altera_pcie *pcie = bus->sysdata; + + if (altera_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, + value); +} + +static struct pci_ops altera_pcie_ops = { + .read = altera_pcie_cfg_read, + .write = altera_pcie_cfg_write, +}; + +static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int offset, u16 *value) +{ + u32 data; + int ret; + + ret = _altera_pcie_cfg_read(pcie, busno, devfn, + PCIE_CAP_OFFSET + offset, sizeof(*value), + &data); + *value = data; + return ret; +} + +static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int offset, u16 value) +{ + return _altera_pcie_cfg_write(pcie, busno, devfn, + PCIE_CAP_OFFSET + offset, sizeof(value), + value); +} + +static void altera_wait_link_retrain(struct altera_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + u16 reg16; + unsigned long start_jiffies; + + /* Wait for link training end. */ + start_jiffies = jiffies; + for (;;) { + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, + PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + break; + + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { + dev_err(dev, "link retrain timeout\n"); + break; + } + udelay(100); + } + + /* Wait for link is up */ + start_jiffies = jiffies; + for (;;) { + if (altera_pcie_link_up(pcie)) + break; + + if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { + dev_err(dev, "link up timeout\n"); + break; + } + udelay(100); + } +} + +static void altera_pcie_retrain(struct altera_pcie *pcie) +{ + u16 linkcap, linkstat, linkctl; + + if (!altera_pcie_link_up(pcie)) + return; + + /* + * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but + * current speed is 2.5 GB/s. + */ + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP, + &linkcap); + if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) + return; + + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, + &linkstat); + if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { + altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, + PCI_EXP_LNKCTL, &linkctl); + linkctl |= PCI_EXP_LNKCTL_RL; + altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, + PCI_EXP_LNKCTL, linkctl); + + altera_wait_link_retrain(pcie); + } +} + +static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = altera_pcie_intx_map, + .xlate = pci_irqd_intx_xlate, +}; + +static void altera_pcie_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct altera_pcie *pcie; + struct device *dev; + unsigned long status; + u32 bit; + u32 virq; + + chained_irq_enter(chip, desc); + pcie = irq_desc_get_handler_data(desc); + dev = &pcie->pdev->dev; + + while ((status = cra_readl(pcie, P2A_INT_STATUS) + & P2A_INT_STS_ALL) != 0) { + for_each_set_bit(bit, &status, PCI_NUM_INTX) { + /* clear interrupts */ + cra_writel(pcie, 1 << bit, P2A_INT_STATUS); + + virq = irq_find_mapping(pcie->irq_domain, bit); + if (virq) + generic_handle_irq(virq); + else + dev_err(dev, "unexpected IRQ, INT%d\n", bit); + } + } + + chained_irq_exit(chip, desc); +} + +static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) +{ + int err, res_valid = 0; + struct device *dev = &pcie->pdev->dev; + struct resource_entry *win; + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &pcie->resources, NULL); + if (err) + return err; + + err = devm_request_pci_bus_resources(dev, &pcie->resources); + if (err) + goto out_release_res; + + resource_list_for_each_entry(win, &pcie->resources) { + struct resource *res = win->res; + + if (resource_type(res) == IORESOURCE_MEM) + res_valid |= !(res->flags & IORESOURCE_PREFETCH); + } + + if (res_valid) + return 0; + + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + +out_release_res: + pci_free_resource_list(&pcie->resources); + return err; +} + +static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + + /* Setup INTx */ + pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, + &intx_domain_ops, pcie); + if (!pcie->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENOMEM; + } + + return 0; +} + +static int altera_pcie_parse_dt(struct altera_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct platform_device *pdev = pcie->pdev; + struct resource *cra; + + cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); + pcie->cra_base = devm_ioremap_resource(dev, cra); + if (IS_ERR(pcie->cra_base)) + return PTR_ERR(pcie->cra_base); + + /* setup IRQ */ + pcie->irq = platform_get_irq(pdev, 0); + if (pcie->irq < 0) { + dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); + return pcie->irq; + } + + irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); + return 0; +} + +static void altera_pcie_host_init(struct altera_pcie *pcie) +{ + altera_pcie_retrain(pcie); +} + +static int altera_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct altera_pcie *pcie; + struct pci_bus *bus; + struct pci_bus *child; + struct pci_host_bridge *bridge; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + pcie->pdev = pdev; + + ret = altera_pcie_parse_dt(pcie); + if (ret) { + dev_err(dev, "Parsing DT failed\n"); + return ret; + } + + INIT_LIST_HEAD(&pcie->resources); + + ret = altera_pcie_parse_request_of_pci_ranges(pcie); + if (ret) { + dev_err(dev, "Failed add resources\n"); + return ret; + } + + ret = altera_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed creating IRQ Domain\n"); + return ret; + } + + /* clear all interrupts */ + cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); + /* enable all interrupts */ + cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); + altera_pcie_host_init(pcie); + + list_splice_init(&pcie->resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_bus_nr; + bridge->ops = &altera_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) + return ret; + + bus = bridge->bus; + + pci_assign_unassigned_bus_resources(bus); + + /* Configure PCI Express setting. */ + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + return ret; +} + +static const struct of_device_id altera_pcie_of_match[] = { + { .compatible = "altr,pcie-root-port-1.0", }, + {}, +}; + +static struct platform_driver altera_pcie_driver = { + .probe = altera_pcie_probe, + .driver = { + .name = "altera-pcie", + .of_match_table = altera_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(altera_pcie_driver); diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c new file mode 100644 index 000000000000..3d8283e450a9 --- /dev/null +++ b/drivers/pci/controller/pcie-cadence-ep.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe endpoint controller driver. +// Author: Cyrille Pitchen + +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-cadence.h" + +#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 + +/** + * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver + * @pcie: Cadence PCIe controller + * @max_regions: maximum number of regions supported by hardware + * @ob_region_map: bitmask of mapped outbound regions + * @ob_addr: base addresses in the AXI bus where the outbound regions start + * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + * dedicated outbound regions is mapped. + * @irq_cpu_addr: base address in the CPU space where a write access triggers + * the sending of a memory write (MSI) / normal message (legacy + * IRQ) TLP through the PCIe bus. + * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + * dedicated outbound region. + * @irq_pci_fn: the latest PCI function that has updated the mapping of + * the MSI/legacy IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted legacy IRQs. + */ +struct cdns_pcie_ep { + struct cdns_pcie pcie; + u32 max_regions; + unsigned long ob_region_map; + phys_addr_t *ob_addr; + phys_addr_t irq_phys_addr; + void __iomem *irq_cpu_addr; + u64 irq_pci_addr; + u8 irq_pci_fn; + u8 irq_pending; +}; + +static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, + struct pci_epf_header *hdr) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + + cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); + cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, + hdr->subclass_code | hdr->baseclass_code << 8); + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, + hdr->cache_line_size); + cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); + + /* + * Vendor ID can only be modified from function 0, all other functions + * use the same vendor ID as function 0. + */ + if (fn == 0) { + /* Update the vendor IDs. */ + u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | + CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); + + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + + return 0; +} + +static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + dma_addr_t bar_phys = epf_bar->phys_addr; + enum pci_barno bar = epf_bar->barno; + int flags = epf_bar->flags; + u32 addr0, addr1, reg, cfg, b, aperture, ctrl; + u64 sz; + + /* BAR size is 2^(aperture + 7) */ + sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); + /* + * roundup_pow_of_two() returns an unsigned long, which is not suited + * for 64bit values. + */ + sz = 1ULL << fls64(sz - 1); + aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ + + if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; + } else { + bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); + bool is_64bits = sz > SZ_2G; + + if (is_64bits && (bar & 1)) + return -EINVAL; + + if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) + epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + if (is_64bits && is_prefetch) + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; + else if (is_prefetch) + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; + else if (is_64bits) + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; + else + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; + } + + addr0 = lower_32_bits(bar_phys); + addr1 = upper_32_bits(bar_phys); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), + addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), + addr1); + + if (bar < BAR_4) { + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); + b = bar; + } else { + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); + b = bar - BAR_4; + } + + cfg = cdns_pcie_readl(pcie, reg); + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); + cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); + cdns_pcie_writel(pcie, reg, cfg); + + return 0; +} + +static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + enum pci_barno bar = epf_bar->barno; + u32 reg, cfg, b, ctrl; + + if (bar < BAR_4) { + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); + b = bar; + } else { + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); + b = bar - BAR_4; + } + + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; + cfg = cdns_pcie_readl(pcie, reg); + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); + cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); + cdns_pcie_writel(pcie, reg, cfg); + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); +} + +static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, + u64 pci_addr, size_t size) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 r; + + r = find_first_zero_bit(&ep->ob_region_map, + sizeof(ep->ob_region_map) * BITS_PER_LONG); + if (r >= ep->max_regions - 1) { + dev_err(&epc->dev, "no free outbound region\n"); + return -EINVAL; + } + + cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); + + set_bit(r, &ep->ob_region_map); + ep->ob_addr[r] = addr; + + return 0; +} + +static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 r; + + for (r = 0; r < ep->max_regions - 1; r++) + if (ep->ob_addr[r] == addr) + break; + + if (r == ep->max_regions - 1) + return; + + cdns_pcie_reset_outbound_region(pcie, r); + + ep->ob_addr[r] = 0; + clear_bit(r, &ep->ob_region_map); +} + +static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; + u16 flags; + + /* + * Set the Multiple Message Capable bitfield into the Message Control + * register. + */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); + flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); + flags |= PCI_MSI_FLAGS_64BIT; + flags &= ~PCI_MSI_FLAGS_MASKBIT; + cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); + + return 0; +} + +static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; + u16 flags, mmc, mme; + + /* Validate that the MSI feature is actually enabled. */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); + if (!(flags & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + /* + * Get the Multiple Message Enable bitfield from the Message Control + * register. + */ + mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1; + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + + return mme; +} + +static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, + u8 intx, bool is_asserted) +{ + struct cdns_pcie *pcie = &ep->pcie; + u32 r = ep->max_regions - 1; + u32 offset; + u16 status; + u8 msg_code; + + intx &= 3; + + /* Set the outbound region if needed. */ + if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || + ep->irq_pci_fn != fn)) { + /* Last region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r, + ep->irq_phys_addr); + ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; + ep->irq_pci_fn = fn; + } + + if (is_asserted) { + ep->irq_pending |= BIT(intx); + msg_code = MSG_CODE_ASSERT_INTA + intx; + } else { + ep->irq_pending &= ~BIT(intx); + msg_code = MSG_CODE_DEASSERT_INTA + intx; + } + + status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); + if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { + status ^= PCI_STATUS_INTERRUPT; + cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); + } + + offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | + CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | + CDNS_PCIE_MSG_NO_DATA; + writel(0, ep->irq_cpu_addr + offset); +} + +static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) +{ + u16 cmd; + + cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); + if (cmd & PCI_COMMAND_INTX_DISABLE) + return -EINVAL; + + cdns_pcie_ep_assert_intx(ep, fn, intx, true); + /* + * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() + * from drivers/pci/dwc/pci-dra7xx.c + */ + mdelay(1); + cdns_pcie_ep_assert_intx(ep, fn, intx, false); + return 0; +} + +static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, + u8 interrupt_num) +{ + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; + u16 flags, mme, data, data_mask; + u8 msi_count; + u64 pci_addr, pci_addr_mask = 0xff; + + /* Check whether the MSI feature has been enabled by the PCI host. */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); + if (!(flags & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + /* Get the number of enabled MSIs */ + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + msi_count = 1 << mme; + if (!interrupt_num || interrupt_num > msi_count) + return -EINVAL; + + /* Compute the data value to be written. */ + data_mask = msi_count - 1; + data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); + data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); + + /* Get the PCI address where to write the data into. */ + pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); + pci_addr <<= 32; + pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); + pci_addr &= GENMASK_ULL(63, 2); + + /* Set the outbound region if needed. */ + if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || + ep->irq_pci_fn != fn)) { + /* Last region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, + false, + ep->irq_phys_addr, + pci_addr & ~pci_addr_mask, + pci_addr_mask + 1); + ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); + ep->irq_pci_fn = fn; + } + writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); + + return 0; +} + +static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, + enum pci_epc_irq_type type, u8 interrupt_num) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); + + case PCI_EPC_IRQ_MSI: + return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + + default: + break; + } + + return -EINVAL; +} + +static int cdns_pcie_ep_start(struct pci_epc *epc) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + struct pci_epf *epf; + u32 cfg; + + /* + * BIT(0) is hardwired to 1, hence function 0 is always enabled + * and can't be disabled anyway. + */ + cfg = BIT(0); + list_for_each_entry(epf, &epc->pci_epf, list) + cfg |= BIT(epf->func_no); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); + + /* + * The PCIe links are automatically established by the controller + * once for all at powerup: the software can neither start nor stop + * those links later at runtime. + * + * Then we only have to notify the EP core that our links are already + * established. However we don't call directly pci_epc_linkup() because + * we've already locked the epc->lock. + */ + list_for_each_entry(epf, &epc->pci_epf, list) + pci_epf_linkup(epf); + + return 0; +} + +static const struct pci_epc_ops cdns_pcie_epc_ops = { + .write_header = cdns_pcie_ep_write_header, + .set_bar = cdns_pcie_ep_set_bar, + .clear_bar = cdns_pcie_ep_clear_bar, + .map_addr = cdns_pcie_ep_map_addr, + .unmap_addr = cdns_pcie_ep_unmap_addr, + .set_msi = cdns_pcie_ep_set_msi, + .get_msi = cdns_pcie_ep_get_msi, + .raise_irq = cdns_pcie_ep_raise_irq, + .start = cdns_pcie_ep_start, +}; + +static const struct of_device_id cdns_pcie_ep_of_match[] = { + { .compatible = "cdns,cdns-pcie-ep" }, + + { }, +}; + +static int cdns_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct cdns_pcie_ep *ep; + struct cdns_pcie *pcie; + struct pci_epc *epc; + struct resource *res; + int ret; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + pcie = &ep->pcie; + pcie->is_rc = false; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + pcie->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->reg_base)) { + dev_err(dev, "missing \"reg\"\n"); + return PTR_ERR(pcie->reg_base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + if (!res) { + dev_err(dev, "missing \"mem\"\n"); + return -EINVAL; + } + pcie->mem_res = res; + + ret = of_property_read_u32(np, "cdns,max-outbound-regions", + &ep->max_regions); + if (ret < 0) { + dev_err(dev, "missing \"cdns,max-outbound-regions\"\n"); + return ret; + } + ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr), + GFP_KERNEL); + if (!ep->ob_addr) + return -ENOMEM; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + goto err_get_sync; + } + + /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ + cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); + + epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "failed to create epc device\n"); + ret = PTR_ERR(epc); + goto err_init; + } + + epc_set_drvdata(epc, ep); + + if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) + epc->max_functions = 1; + + ret = pci_epc_mem_init(epc, pcie->mem_res->start, + resource_size(pcie->mem_res)); + if (ret < 0) { + dev_err(dev, "failed to initialize the memory space\n"); + goto err_init; + } + + ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, + SZ_128K); + if (!ep->irq_cpu_addr) { + dev_err(dev, "failed to reserve memory space for MSI\n"); + ret = -ENOMEM; + goto free_epc_mem; + } + ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; + + return 0; + + free_epc_mem: + pci_epc_mem_exit(epc); + + err_init: + pm_runtime_put_sync(dev); + + err_get_sync: + pm_runtime_disable(dev); + + return ret; +} + +static void cdns_pcie_ep_shutdown(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + dev_dbg(dev, "pm_runtime_put_sync failed\n"); + + pm_runtime_disable(dev); + + /* The PCIe controller can't be disabled. */ +} + +static struct platform_driver cdns_pcie_ep_driver = { + .driver = { + .name = "cdns-pcie-ep", + .of_match_table = cdns_pcie_ep_of_match, + }, + .probe = cdns_pcie_ep_probe, + .shutdown = cdns_pcie_ep_shutdown, +}; +builtin_platform_driver(cdns_pcie_ep_driver); diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c new file mode 100644 index 000000000000..a4ebbd37b553 --- /dev/null +++ b/drivers/pci/controller/pcie-cadence-host.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe host controller driver. +// Author: Cyrille Pitchen + +#include +#include +#include +#include +#include + +#include "pcie-cadence.h" + +/** + * struct cdns_pcie_rc - private data for this PCIe Root Complex driver + * @pcie: Cadence PCIe controller + * @dev: pointer to PCIe device + * @cfg_res: start/end offsets in the physical system memory to map PCI + * configuration space accesses + * @bus_range: first/last buses behind the PCIe host controller + * @cfg_base: IO mapped window to access the PCI configuration space of a + * single function at a time + * @max_regions: maximum number of regions supported by the hardware + * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address + * translation (nbits sets into the "no BAR match" register) + * @vendor_id: PCI vendor ID + * @device_id: PCI device ID + */ +struct cdns_pcie_rc { + struct cdns_pcie pcie; + struct device *dev; + struct resource *cfg_res; + struct resource *bus_range; + void __iomem *cfg_base; + u32 max_regions; + u32 no_bar_nbits; + u16 vendor_id; + u16 device_id; +}; + +static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(bus); + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); + struct cdns_pcie *pcie = &rc->pcie; + unsigned int busn = bus->number; + u32 addr0, desc0; + + if (busn == rc->bus_range->start) { + /* + * Only the root port (devfn == 0) is connected to this bus. + * All other PCI devices are behind some bridge hence on another + * bus. + */ + if (devfn) + return NULL; + + return pcie->reg_base + (where & 0xfff); + } + + /* Update Output registers for AXI region 0. */ + addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | + CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | + CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0); + + /* Configuration Type 0 or Type 1 access. */ + desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | + CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); + /* + * The bus number was already set once for all in desc1 by + * cdns_pcie_host_init_address_translation(). + */ + if (busn == rc->bus_range->start + 1) + desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; + else + desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0); + + return rc->cfg_base + (where & 0xfff); +} + +static struct pci_ops cdns_pcie_host_ops = { + .map_bus = cdns_pci_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static const struct of_device_id cdns_pcie_host_of_match[] = { + { .compatible = "cdns,cdns-pcie-host" }, + + { }, +}; + +static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 value, ctrl; + + /* + * Set the root complex BAR configuration register: + * - disable both BAR0 and BAR1. + * - enable Prefetchable Memory Base and Limit registers in type 1 + * config space (64 bits). + * - enable IO Base and Limit registers in type 1 config + * space (32 bits). + */ + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; + value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | + CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | + CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | + CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | + CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | + CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS; + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + + /* Set root port configuration space */ + if (rc->vendor_id != 0xffff) + cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->device_id != 0xffff) + cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); + + cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0); + cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0); + cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); + + return 0; +} + +static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct resource *cfg_res = rc->cfg_res; + struct resource *mem_res = pcie->mem_res; + struct resource *bus_range = rc->bus_range; + struct device *dev = rc->dev; + struct device_node *np = dev->of_node; + struct of_pci_range_parser parser; + struct of_pci_range range; + u32 addr0, addr1, desc1; + u64 cpu_addr; + int r, err; + + /* + * Reserve region 0 for PCI configure space accesses: + * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by + * cdns_pci_map_bus(), other region registers are set here once for all. + */ + addr1 = 0; /* Should be programmed to zero. */ + desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); + + cpu_addr = cfg_res->start - mem_res->start; + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); + + err = of_pci_range_parser_init(&parser, np); + if (err) + return err; + + r = 1; + for_each_of_pci_range(&parser, &range) { + bool is_io; + + if (r >= rc->max_regions) + break; + + if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) + is_io = false; + else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) + is_io = true; + else + continue; + + cdns_pcie_set_outbound_region(pcie, 0, r, is_io, + range.cpu_addr, + range.pci_addr, + range.size); + r++; + } + + /* + * Set Root Port no BAR match Inbound Translation registers: + * needed for MSI and DMA. + * Root Port BAR0 and BAR1 are disabled, hence no need to set their + * inbound translation registers. + */ + addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); + addr1 = 0; + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); + + return 0; +} + +static int cdns_pcie_host_init(struct device *dev, + struct list_head *resources, + struct cdns_pcie_rc *rc) +{ + struct resource *bus_range = NULL; + int err; + + /* Parse our PCI ranges and request their resources */ + err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); + if (err) + return err; + + rc->bus_range = bus_range; + rc->pcie.bus = bus_range->start; + + err = cdns_pcie_host_init_root_port(rc); + if (err) + goto err_out; + + err = cdns_pcie_host_init_address_translation(rc); + if (err) + goto err_out; + + return 0; + + err_out: + pci_free_resource_list(resources); + return err; +} + +static int cdns_pcie_host_probe(struct platform_device *pdev) +{ + const char *type; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + struct list_head resources; + struct cdns_pcie_rc *rc; + struct cdns_pcie *pcie; + struct resource *res; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) + return -ENOMEM; + + rc = pci_host_bridge_priv(bridge); + rc->dev = dev; + + pcie = &rc->pcie; + pcie->is_rc = true; + + rc->max_regions = 32; + of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions); + + rc->no_bar_nbits = 32; + of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); + + rc->vendor_id = 0xffff; + of_property_read_u16(np, "vendor-id", &rc->vendor_id); + + rc->device_id = 0xffff; + of_property_read_u16(np, "device-id", &rc->device_id); + + type = of_get_property(np, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + pcie->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->reg_base)) { + dev_err(dev, "missing \"reg\"\n"); + return PTR_ERR(pcie->reg_base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(rc->cfg_base)) { + dev_err(dev, "missing \"cfg\"\n"); + return PTR_ERR(rc->cfg_base); + } + rc->cfg_res = res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + if (!res) { + dev_err(dev, "missing \"mem\"\n"); + return -EINVAL; + } + pcie->mem_res = res; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + goto err_get_sync; + } + + ret = cdns_pcie_host_init(dev, &resources, rc); + if (ret) + goto err_init; + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->busnr = pcie->bus; + bridge->ops = &cdns_pcie_host_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_host_probe(bridge); + if (ret < 0) + goto err_host_probe; + + return 0; + + err_host_probe: + pci_free_resource_list(&resources); + + err_init: + pm_runtime_put_sync(dev); + + err_get_sync: + pm_runtime_disable(dev); + + return ret; +} + +static struct platform_driver cdns_pcie_host_driver = { + .driver = { + .name = "cdns-pcie-host", + .of_match_table = cdns_pcie_host_of_match, + }, + .probe = cdns_pcie_host_probe, +}; +builtin_platform_driver(cdns_pcie_host_driver); diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c new file mode 100644 index 000000000000..138d113eb45d --- /dev/null +++ b/drivers/pci/controller/pcie-cadence.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe controller driver. +// Author: Cyrille Pitchen + +#include + +#include "pcie-cadence.h" + +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, + u32 r, bool is_io, + u64 cpu_addr, u64 pci_addr, size_t size) +{ + /* + * roundup_pow_of_two() returns an unsigned long, which is not suited + * for 64bit values. + */ + u64 sz = 1ULL << fls64(size - 1); + int nbits = ilog2(sz); + u32 addr0, addr1, desc0, desc1; + + if (nbits < 8) + nbits = 8; + + /* Set the PCI address */ + addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | + (lower_32_bits(pci_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(pci_addr); + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1); + + /* Set the PCIe header descriptor */ + if (is_io) + desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO; + else + desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM; + desc1 = 0; + + /* + * Whatever Bit [23] is set or not inside DESC0 register of the outbound + * PCIe descriptor, the PCI function number must be set into + * Bits [26:24] of DESC0 anyway. + * + * In Root Complex mode, the function number is always 0 but in Endpoint + * mode, the PCIe controller may support more than one function. This + * function number needs to be set properly into the outbound PCIe + * descriptor. + * + * Besides, setting Bit [23] is mandatory when in Root Complex mode: + * then the driver must provide the bus, resp. device, number in + * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function + * number, the device number is always 0 in Root Complex mode. + * + * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence + * the PCIe controller will use the captured values for the bus and + * device numbers. + */ + if (pcie->is_rc) { + /* The device and function numbers are always 0. */ + desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | + CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); + desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); + } else { + /* + * Use captured values for bus and device numbers but still + * need to set the function number. + */ + desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); + } + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); + + /* Set the CPU address */ + cpu_addr -= pcie->mem_res->start; + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); +} + +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, + u32 r, u64 cpu_addr) +{ + u32 addr0, addr1, desc0, desc1; + + desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; + desc1 = 0; + + /* See cdns_pcie_set_outbound_region() comments above. */ + if (pcie->is_rc) { + desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | + CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); + desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); + } else { + desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); + } + + /* Set the CPU address */ + cpu_addr -= pcie->mem_res->start; + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); +} + +void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) +{ + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0); + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); +} diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/pcie-cadence.h new file mode 100644 index 000000000000..4bb27333b05c --- /dev/null +++ b/drivers/pci/controller/pcie-cadence.h @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe controller driver. +// Author: Cyrille Pitchen + +#ifndef _PCIE_CADENCE_H +#define _PCIE_CADENCE_H + +#include +#include + +/* + * Local Management Registers + */ +#define CDNS_PCIE_LM_BASE 0x00100000 + +/* Vendor ID Register */ +#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) +#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) +#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 +#define CDNS_PCIE_LM_ID_VENDOR(vid) \ + (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) +#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) +#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 +#define CDNS_PCIE_LM_ID_SUBSYS(sub) \ + (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) + +/* Root Port Requestor ID Register */ +#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) +#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) +#define CDNS_PCIE_LM_RP_RID_SHIFT 0 +#define CDNS_PCIE_LM_RP_RID_(rid) \ + (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) + +/* Endpoint Bus and Device Number Register */ +#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c) +#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) +#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 +#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) +#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 + +/* Endpoint Function f BAR b Configuration Registers */ +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ + (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ + (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ + (GENMASK(4, 0) << ((b) * 8)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ + (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ + (GENMASK(7, 5) << ((b) * 8)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ + (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) + +/* Endpoint Function Configuration Register */ +#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0) + +/* Root Complex BAR Configuration Register */ +#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ + (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ + (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ + (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ + (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) +#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 +#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) +#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) +#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 +#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) +#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) + +/* BAR control values applicable to both Endpoint Function and Root Complex */ +#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 + + +/* + * Endpoint Function Registers (PCI configuration space for endpoint functions) + */ +#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) + +#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 + +/* + * Root Port Registers (PCI configuration space for the root port function) + */ +#define CDNS_PCIE_RP_BASE 0x00200000 + + +/* + * Address Translation Registers + */ +#define CDNS_PCIE_AT_BASE 0x00400000 + +/* Region r Outbound AXI to PCIe Address Translation Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ + (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ + (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ + (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) + +/* Region r Outbound AXI to PCIe Address Translation Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ + (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) + +/* Region r Outbound PCIe Descriptor Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ + (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd +/* Bit 23 MUST be set in RC mode. */ +#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) +#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) +#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ + (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) + +/* Region r Outbound PCIe Descriptor Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ + (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) +#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ + ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) + +/* Region r AXI Region Base Address Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ + (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) + +/* Region r AXI Region Base Address Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ + (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) + +/* Root Port BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ + (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ + (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) + +enum cdns_pcie_rp_bar { + RP_BAR0, + RP_BAR1, + RP_NO_BAR +}; + +/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ + (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) +#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ + (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) + +/* Normal/Vendor specific message access: offset inside some outbound region */ +#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) +#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ + (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) +#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) +#define CDNS_PCIE_NORMAL_MSG_CODE(code) \ + (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) +#define CDNS_PCIE_MSG_NO_DATA BIT(16) + +enum cdns_pcie_msg_code { + MSG_CODE_ASSERT_INTA = 0x20, + MSG_CODE_ASSERT_INTB = 0x21, + MSG_CODE_ASSERT_INTC = 0x22, + MSG_CODE_ASSERT_INTD = 0x23, + MSG_CODE_DEASSERT_INTA = 0x24, + MSG_CODE_DEASSERT_INTB = 0x25, + MSG_CODE_DEASSERT_INTC = 0x26, + MSG_CODE_DEASSERT_INTD = 0x27, +}; + +enum cdns_pcie_msg_routing { + /* Route to Root Complex */ + MSG_ROUTING_TO_RC, + + /* Use Address Routing */ + MSG_ROUTING_BY_ADDR, + + /* Use ID Routing */ + MSG_ROUTING_BY_ID, + + /* Route as Broadcast Message from Root Complex */ + MSG_ROUTING_BCAST, + + /* Local message; terminate at receiver (INTx messages) */ + MSG_ROUTING_LOCAL, + + /* Gather & route to Root Complex (PME_TO_Ack message) */ + MSG_ROUTING_GATHER, +}; + +/** + * struct cdns_pcie - private data for Cadence PCIe controller drivers + * @reg_base: IO mapped register base + * @mem_res: start/end offsets in the physical system memory to map PCI accesses + * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. + * @bus: In Root Complex mode, the bus number + */ +struct cdns_pcie { + void __iomem *reg_base; + struct resource *mem_res; + bool is_rc; + u8 bus; +}; + +/* Register access */ +static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) +{ + writeb(value, pcie->reg_base + reg); +} + +static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) +{ + writew(value, pcie->reg_base + reg); +} + +static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) +{ + writel(value, pcie->reg_base + reg); +} + +static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) +{ + return readl(pcie->reg_base + reg); +} + +/* Root Port register access */ +static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, + u32 reg, u8 value) +{ + writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); +} + +static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, + u32 reg, u16 value) +{ + writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); +} + +/* Endpoint Function register access */ +static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, + u32 reg, u8 value) +{ + writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, + u32 reg, u16 value) +{ + writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, + u32 reg, u16 value) +{ + writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) +{ + return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) +{ + return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) +{ + return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); +} + +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, + u32 r, bool is_io, + u64 cpu_addr, u64 pci_addr, size_t size); + +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, + u32 r, u64 cpu_addr); + +void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); + +#endif /* _PCIE_CADENCE_H */ diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c new file mode 100644 index 000000000000..aa55b064f64d --- /dev/null +++ b/drivers/pci/controller/pcie-iproc-bcma.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015 Broadcom Corporation + * Copyright (C) 2015 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-iproc.h" + + +/* NS: CLASS field is R/O, and set to wrong 0x200 value */ +static void bcma_pcie2_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); + +static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct iproc_pcie *pcie = dev->sysdata; + struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); + + return bcma_core_irq(bdev, 5); +} + +static int iproc_pcie_bcma_probe(struct bcma_device *bdev) +{ + struct device *dev = &bdev->dev; + struct iproc_pcie *pcie; + LIST_HEAD(resources); + struct pci_host_bridge *bridge; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + + pcie->dev = dev; + + pcie->type = IPROC_PCIE_PAXB_BCMA; + pcie->base = bdev->io_addr; + if (!pcie->base) { + dev_err(dev, "no controller registers\n"); + return -ENOMEM; + } + + pcie->base_addr = bdev->addr; + + pcie->mem.start = bdev->addr_s[0]; + pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; + pcie->mem.name = "PCIe MEM space"; + pcie->mem.flags = IORESOURCE_MEM; + pci_add_resource(&resources, &pcie->mem); + + pcie->map_irq = iproc_pcie_bcma_map_irq; + + ret = iproc_pcie_setup(pcie, &resources); + if (ret) { + dev_err(dev, "PCIe controller setup failed\n"); + pci_free_resource_list(&resources); + return ret; + } + + bcma_set_drvdata(bdev, pcie); + return 0; +} + +static void iproc_pcie_bcma_remove(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie = bcma_get_drvdata(bdev); + + iproc_pcie_remove(pcie); +} + +static const struct bcma_device_id iproc_pcie_bcma_table[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); + +static struct bcma_driver iproc_pcie_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = iproc_pcie_bcma_table, + .probe = iproc_pcie_bcma_probe, + .remove = iproc_pcie_bcma_remove, +}; + +static int __init iproc_pcie_bcma_init(void) +{ + return bcma_driver_register(&iproc_pcie_bcma_driver); +} +module_init(iproc_pcie_bcma_init); + +static void __exit iproc_pcie_bcma_exit(void) +{ + bcma_driver_unregister(&iproc_pcie_bcma_driver); +} +module_exit(iproc_pcie_bcma_exit); + +MODULE_AUTHOR("Hauke Mehrtens"); +MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c new file mode 100644 index 000000000000..9deb56989d72 --- /dev/null +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -0,0 +1,671 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015 Broadcom Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-iproc.h" + +#define IPROC_MSI_INTR_EN_SHIFT 11 +#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) +#define IPROC_MSI_INT_N_EVENT_SHIFT 1 +#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) +#define IPROC_MSI_EQ_EN_SHIFT 0 +#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) + +#define IPROC_MSI_EQ_MASK 0x3f + +/* Max number of GIC interrupts */ +#define NR_HW_IRQS 6 + +/* Number of entries in each event queue */ +#define EQ_LEN 64 + +/* Size of each event queue memory region */ +#define EQ_MEM_REGION_SIZE SZ_4K + +/* Size of each MSI address region */ +#define MSI_MEM_REGION_SIZE SZ_4K + +enum iproc_msi_reg { + IPROC_MSI_EQ_PAGE = 0, + IPROC_MSI_EQ_PAGE_UPPER, + IPROC_MSI_PAGE, + IPROC_MSI_PAGE_UPPER, + IPROC_MSI_CTRL, + IPROC_MSI_EQ_HEAD, + IPROC_MSI_EQ_TAIL, + IPROC_MSI_INTS_EN, + IPROC_MSI_REG_SIZE, +}; + +struct iproc_msi; + +/** + * iProc MSI group + * + * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI + * event queue. + * + * @msi: pointer to iProc MSI data + * @gic_irq: GIC interrupt + * @eq: Event queue number + */ +struct iproc_msi_grp { + struct iproc_msi *msi; + int gic_irq; + unsigned int eq; +}; + +/** + * iProc event queue based MSI + * + * Only meant to be used on platforms without MSI support integrated into the + * GIC. + * + * @pcie: pointer to iProc PCIe data + * @reg_offsets: MSI register offsets + * @grps: MSI groups + * @nr_irqs: number of total interrupts connected to GIC + * @nr_cpus: number of toal CPUs + * @has_inten_reg: indicates the MSI interrupt enable register needs to be + * set explicitly (required for some legacy platforms) + * @bitmap: MSI vector bitmap + * @bitmap_lock: lock to protect access to the MSI bitmap + * @nr_msi_vecs: total number of MSI vectors + * @inner_domain: inner IRQ domain + * @msi_domain: MSI IRQ domain + * @nr_eq_region: required number of 4K aligned memory region for MSI event + * queues + * @nr_msi_region: required number of 4K aligned address region for MSI posted + * writes + * @eq_cpu: pointer to allocated memory region for MSI event queues + * @eq_dma: DMA address of MSI event queues + * @msi_addr: MSI address + */ +struct iproc_msi { + struct iproc_pcie *pcie; + const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; + struct iproc_msi_grp *grps; + int nr_irqs; + int nr_cpus; + bool has_inten_reg; + unsigned long *bitmap; + struct mutex bitmap_lock; + unsigned int nr_msi_vecs; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + unsigned int nr_eq_region; + unsigned int nr_msi_region; + void *eq_cpu; + dma_addr_t eq_dma; + phys_addr_t msi_addr; +}; + +static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { + { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, +}; + +static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { + { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, + { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, + { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, + { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, +}; + +static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, + enum iproc_msi_reg reg, + unsigned int eq) +{ + struct iproc_pcie *pcie = msi->pcie; + + return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); +} + +static inline void iproc_msi_write_reg(struct iproc_msi *msi, + enum iproc_msi_reg reg, + int eq, u32 val) +{ + struct iproc_pcie *pcie = msi->pcie; + + writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); +} + +static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) +{ + return (hwirq % msi->nr_irqs); +} + +static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, + unsigned long hwirq) +{ + if (msi->nr_msi_region > 1) + return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; + else + return hwirq_to_group(msi, hwirq) * sizeof(u32); +} + +static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) +{ + if (msi->nr_eq_region > 1) + return eq * EQ_MEM_REGION_SIZE; + else + return eq * EQ_LEN * sizeof(u32); +} + +static struct irq_chip iproc_msi_irq_chip = { + .name = "iProc-MSI", +}; + +static struct msi_domain_info iproc_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .chip = &iproc_msi_irq_chip, +}; + +/* + * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a + * dedicated event queue. Each MSI group can support up to 64 MSI vectors. + * + * The number of MSI groups varies between different iProc SoCs. The total + * number of CPU cores also varies. To support MSI IRQ affinity, we + * distribute GIC interrupts across all available CPUs. MSI vector is moved + * from one GIC interrupt to another to steer to the target CPU. + * + * Assuming: + * - the number of MSI groups is M + * - the number of CPU cores is N + * - M is always a multiple of N + * + * Total number of raw MSI vectors = M * 64 + * Total number of supported MSI vectors = (M * 64) / N + */ +static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) +{ + return (hwirq % msi->nr_cpus); +} + +static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, + unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(msi, hwirq)); +} + +static int iproc_msi_irq_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(msi, data->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* steer MSI to the target CPU */ + data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + dma_addr_t addr; + + addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq << 5; +} + +static struct irq_chip iproc_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = iproc_msi_irq_set_affinity, + .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, +}; + +static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct iproc_msi *msi = domain->host_data; + int hwirq, i; + + mutex_lock(&msi->bitmap_lock); + + /* Allocate 'nr_cpus' number of MSI vectors each time */ + hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, + msi->nr_cpus, 0); + if (hwirq < msi->nr_msi_vecs) { + bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); + } else { + mutex_unlock(&msi->bitmap_lock); + return -ENOSPC; + } + + mutex_unlock(&msi->bitmap_lock); + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, + &iproc_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + } + + return hwirq; +} + +static void iproc_msi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + unsigned int hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = iproc_msi_irq_domain_alloc, + .free = iproc_msi_irq_domain_free, +}; + +static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) +{ + u32 *msg, hwirq; + unsigned int offs; + + offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); + msg = (u32 *)(msi->eq_cpu + offs); + hwirq = readl(msg); + hwirq = (hwirq >> 5) + (hwirq & 0x1f); + + /* + * Since we have multiple hwirq mapped to a single MSI vector, + * now we need to derive the hwirq at CPU0. It can then be used to + * mapped back to virq. + */ + return hwirq_to_canonical_hwirq(msi, hwirq); +} + +static void iproc_msi_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct iproc_msi_grp *grp; + struct iproc_msi *msi; + u32 eq, head, tail, nr_events; + unsigned long hwirq; + int virq; + + chained_irq_enter(chip, desc); + + grp = irq_desc_get_handler_data(desc); + msi = grp->msi; + eq = grp->eq; + + /* + * iProc MSI event queue is tracked by head and tail pointers. Head + * pointer indicates the next entry (MSI data) to be consumed by SW in + * the queue and needs to be updated by SW. iProc MSI core uses the + * tail pointer as the next data insertion point. + * + * Entries between head and tail pointers contain valid MSI data. MSI + * data is guaranteed to be in the event queue memory before the tail + * pointer is updated by the iProc MSI core. + */ + head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, + eq) & IPROC_MSI_EQ_MASK; + do { + tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, + eq) & IPROC_MSI_EQ_MASK; + + /* + * Figure out total number of events (MSI data) to be + * processed. + */ + nr_events = (tail < head) ? + (EQ_LEN - (head - tail)) : (tail - head); + if (!nr_events) + break; + + /* process all outstanding events */ + while (nr_events--) { + hwirq = decode_msi_hwirq(msi, eq, head); + virq = irq_find_mapping(msi->inner_domain, hwirq); + generic_handle_irq(virq); + + head++; + head %= EQ_LEN; + } + + /* + * Now all outstanding events have been processed. Update the + * head pointer. + */ + iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); + + /* + * Now go read the tail pointer again to see if there are new + * oustanding events that came in during the above window. + */ + } while (true); + + chained_irq_exit(chip, desc); +} + +static void iproc_msi_enable(struct iproc_msi *msi) +{ + int i, eq; + u32 val; + + /* Program memory region for each event queue */ + for (i = 0; i < msi->nr_eq_region; i++) { + dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); + + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, + lower_32_bits(addr)); + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, + upper_32_bits(addr)); + } + + /* Program address region for MSI posted writes */ + for (i = 0; i < msi->nr_msi_region; i++) { + phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); + + iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, + lower_32_bits(addr)); + iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, + upper_32_bits(addr)); + } + + for (eq = 0; eq < msi->nr_irqs; eq++) { + /* Enable MSI event queue */ + val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | + IPROC_MSI_EQ_EN; + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); + + /* + * Some legacy platforms require the MSI interrupt enable + * register to be set explicitly. + */ + if (msi->has_inten_reg) { + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); + val |= BIT(eq); + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); + } + } +} + +static void iproc_msi_disable(struct iproc_msi *msi) +{ + u32 eq, val; + + for (eq = 0; eq < msi->nr_irqs; eq++) { + if (msi->has_inten_reg) { + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); + val &= ~BIT(eq); + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); + } + + val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); + val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | + IPROC_MSI_EQ_EN); + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); + } +} + +static int iproc_msi_alloc_domains(struct device_node *node, + struct iproc_msi *msi) +{ + msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, + &msi_domain_ops, msi); + if (!msi->inner_domain) + return -ENOMEM; + + msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + &iproc_msi_domain_info, + msi->inner_domain); + if (!msi->msi_domain) { + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void iproc_msi_free_domains(struct iproc_msi *msi) +{ + if (msi->msi_domain) + irq_domain_remove(msi->msi_domain); + + if (msi->inner_domain) + irq_domain_remove(msi->inner_domain); +} + +static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) +{ + int i; + + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { + irq_set_chained_handler_and_data(msi->grps[i].gic_irq, + NULL, NULL); + } +} + +static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) +{ + int i, ret; + cpumask_var_t mask; + struct iproc_pcie *pcie = msi->pcie; + + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { + irq_set_chained_handler_and_data(msi->grps[i].gic_irq, + iproc_msi_handler, + &msi->grps[i]); + /* Dedicate GIC interrupt to each CPU core */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + ret = irq_set_affinity(msi->grps[i].gic_irq, mask); + if (ret) + dev_err(pcie->dev, + "failed to set affinity for IRQ%d\n", + msi->grps[i].gic_irq); + free_cpumask_var(mask); + } else { + dev_err(pcie->dev, "failed to alloc CPU mask\n"); + ret = -EINVAL; + } + + if (ret) { + /* Free all configured/unconfigured IRQs */ + iproc_msi_irq_free(msi, cpu); + return ret; + } + } + + return 0; +} + +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) +{ + struct iproc_msi *msi; + int i, ret; + unsigned int cpu; + + if (!of_device_is_compatible(node, "brcm,iproc-msi")) + return -ENODEV; + + if (!of_find_property(node, "msi-controller", NULL)) + return -ENODEV; + + if (pcie->msi) + return -EBUSY; + + msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); + if (!msi) + return -ENOMEM; + + msi->pcie = pcie; + pcie->msi = msi; + msi->msi_addr = pcie->base_addr; + mutex_init(&msi->bitmap_lock); + msi->nr_cpus = num_possible_cpus(); + + msi->nr_irqs = of_irq_count(node); + if (!msi->nr_irqs) { + dev_err(pcie->dev, "found no MSI GIC interrupt\n"); + return -ENODEV; + } + + if (msi->nr_irqs > NR_HW_IRQS) { + dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", + msi->nr_irqs); + msi->nr_irqs = NR_HW_IRQS; + } + + if (msi->nr_irqs < msi->nr_cpus) { + dev_err(pcie->dev, + "not enough GIC interrupts for MSI affinity\n"); + return -EINVAL; + } + + if (msi->nr_irqs % msi->nr_cpus != 0) { + msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; + dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", + msi->nr_irqs); + } + + switch (pcie->type) { + case IPROC_PCIE_PAXB_BCMA: + case IPROC_PCIE_PAXB: + msi->reg_offsets = iproc_msi_reg_paxb; + msi->nr_eq_region = 1; + msi->nr_msi_region = 1; + break; + case IPROC_PCIE_PAXC: + msi->reg_offsets = iproc_msi_reg_paxc; + msi->nr_eq_region = msi->nr_irqs; + msi->nr_msi_region = msi->nr_irqs; + break; + default: + dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); + return -EINVAL; + } + + if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) + msi->has_inten_reg = true; + + msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; + msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), + sizeof(*msi->bitmap), GFP_KERNEL); + if (!msi->bitmap) + return -ENOMEM; + + msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), + GFP_KERNEL); + if (!msi->grps) + return -ENOMEM; + + for (i = 0; i < msi->nr_irqs; i++) { + unsigned int irq = irq_of_parse_and_map(node, i); + + if (!irq) { + dev_err(pcie->dev, "unable to parse/map interrupt\n"); + ret = -ENODEV; + goto free_irqs; + } + msi->grps[i].gic_irq = irq; + msi->grps[i].msi = msi; + msi->grps[i].eq = i; + } + + /* Reserve memory for event queue and make sure memories are zeroed */ + msi->eq_cpu = dma_zalloc_coherent(pcie->dev, + msi->nr_eq_region * EQ_MEM_REGION_SIZE, + &msi->eq_dma, GFP_KERNEL); + if (!msi->eq_cpu) { + ret = -ENOMEM; + goto free_irqs; + } + + ret = iproc_msi_alloc_domains(node, msi); + if (ret) { + dev_err(pcie->dev, "failed to create MSI domains\n"); + goto free_eq_dma; + } + + for_each_online_cpu(cpu) { + ret = iproc_msi_irq_setup(msi, cpu); + if (ret) + goto free_msi_irq; + } + + iproc_msi_enable(msi); + + return 0; + +free_msi_irq: + for_each_online_cpu(cpu) + iproc_msi_irq_free(msi, cpu); + iproc_msi_free_domains(msi); + +free_eq_dma: + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, + msi->eq_cpu, msi->eq_dma); + +free_irqs: + for (i = 0; i < msi->nr_irqs; i++) { + if (msi->grps[i].gic_irq) + irq_dispose_mapping(msi->grps[i].gic_irq); + } + pcie->msi = NULL; + return ret; +} +EXPORT_SYMBOL(iproc_msi_init); + +void iproc_msi_exit(struct iproc_pcie *pcie) +{ + struct iproc_msi *msi = pcie->msi; + unsigned int i, cpu; + + if (!msi) + return; + + iproc_msi_disable(msi); + + for_each_online_cpu(cpu) + iproc_msi_irq_free(msi, cpu); + + iproc_msi_free_domains(msi); + + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, + msi->eq_cpu, msi->eq_dma); + + for (i = 0; i < msi->nr_irqs; i++) { + if (msi->grps[i].gic_irq) + irq_dispose_mapping(msi->grps[i].gic_irq); + } +} +EXPORT_SYMBOL(iproc_msi_exit); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c new file mode 100644 index 000000000000..f30f5f3fb5c1 --- /dev/null +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015 Broadcom Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" +#include "pcie-iproc.h" + +static const struct of_device_id iproc_pcie_of_match_table[] = { + { + .compatible = "brcm,iproc-pcie", + .data = (int *)IPROC_PCIE_PAXB, + }, { + .compatible = "brcm,iproc-pcie-paxb-v2", + .data = (int *)IPROC_PCIE_PAXB_V2, + }, { + .compatible = "brcm,iproc-pcie-paxc", + .data = (int *)IPROC_PCIE_PAXC, + }, { + .compatible = "brcm,iproc-pcie-paxc-v2", + .data = (int *)IPROC_PCIE_PAXC_V2, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); + +static int iproc_pcie_pltfm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct iproc_pcie *pcie; + struct device_node *np = dev->of_node; + struct resource reg; + resource_size_t iobase = 0; + LIST_HEAD(resources); + struct pci_host_bridge *bridge; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + + pcie->dev = dev; + pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); + + ret = of_address_to_resource(np, 0, ®); + if (ret < 0) { + dev_err(dev, "unable to obtain controller resources\n"); + return ret; + } + + pcie->base = devm_pci_remap_cfgspace(dev, reg.start, + resource_size(®)); + if (!pcie->base) { + dev_err(dev, "unable to map controller registers\n"); + return -ENOMEM; + } + pcie->base_addr = reg.start; + + if (of_property_read_bool(np, "brcm,pcie-ob")) { + u32 val; + + ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset", + &val); + if (ret) { + dev_err(dev, + "missing brcm,pcie-ob-axi-offset property\n"); + return ret; + } + pcie->ob.axi_offset = val; + pcie->need_ob_cfg = true; + } + + /* + * DT nodes are not used by all platforms that use the iProc PCIe + * core driver. For platforms that require explict inbound mapping + * configuration, "dma-ranges" would have been present in DT + */ + pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); + + /* PHY use is optional */ + pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(pcie->phy)) { + if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) + return -EPROBE_DEFER; + pcie->phy = NULL; + } + + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources, + &iobase); + if (ret) { + dev_err(dev, "unable to get PCI host bridge resources\n"); + return ret; + } + + /* PAXC doesn't support legacy IRQs, skip mapping */ + switch (pcie->type) { + case IPROC_PCIE_PAXC: + case IPROC_PCIE_PAXC_V2: + break; + default: + pcie->map_irq = of_irq_parse_and_map_pci; + } + + ret = iproc_pcie_setup(pcie, &resources); + if (ret) { + dev_err(dev, "PCIe controller setup failed\n"); + pci_free_resource_list(&resources); + return ret; + } + + platform_set_drvdata(pdev, pcie); + return 0; +} + +static int iproc_pcie_pltfm_remove(struct platform_device *pdev) +{ + struct iproc_pcie *pcie = platform_get_drvdata(pdev); + + return iproc_pcie_remove(pcie); +} + +static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev) +{ + struct iproc_pcie *pcie = platform_get_drvdata(pdev); + + iproc_pcie_shutdown(pcie); +} + +static struct platform_driver iproc_pcie_pltfm_driver = { + .driver = { + .name = "iproc-pcie", + .of_match_table = of_match_ptr(iproc_pcie_of_match_table), + }, + .probe = iproc_pcie_pltfm_probe, + .remove = iproc_pcie_pltfm_remove, + .shutdown = iproc_pcie_pltfm_shutdown, +}; +module_platform_driver(iproc_pcie_pltfm_driver); + +MODULE_AUTHOR("Ray Jui "); +MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c new file mode 100644 index 000000000000..3c76c5fa4f32 --- /dev/null +++ b/drivers/pci/controller/pcie-iproc.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014 Hauke Mehrtens + * Copyright (C) 2015 Broadcom Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-iproc.h" + +#define EP_PERST_SOURCE_SELECT_SHIFT 2 +#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) +#define EP_MODE_SURVIVE_PERST_SHIFT 1 +#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) +#define RC_PCIE_RST_OUTPUT_SHIFT 0 +#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) +#define PAXC_RESET_MASK 0x7f + +#define GIC_V3_CFG_SHIFT 0 +#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) + +#define MSI_ENABLE_CFG_SHIFT 0 +#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) + +#define CFG_IND_ADDR_MASK 0x00001ffc + +#define CFG_ADDR_BUS_NUM_SHIFT 20 +#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 +#define CFG_ADDR_DEV_NUM_SHIFT 15 +#define CFG_ADDR_DEV_NUM_MASK 0x000f8000 +#define CFG_ADDR_FUNC_NUM_SHIFT 12 +#define CFG_ADDR_FUNC_NUM_MASK 0x00007000 +#define CFG_ADDR_REG_NUM_SHIFT 2 +#define CFG_ADDR_REG_NUM_MASK 0x00000ffc +#define CFG_ADDR_CFG_TYPE_SHIFT 0 +#define CFG_ADDR_CFG_TYPE_MASK 0x00000003 + +#define SYS_RC_INTX_MASK 0xf + +#define PCIE_PHYLINKUP_SHIFT 3 +#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) +#define PCIE_DL_ACTIVE_SHIFT 2 +#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) + +#define APB_ERR_EN_SHIFT 0 +#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) + +#define CFG_RETRY_STATUS 0xffff0001 +#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ + +/* derive the enum index of the outbound/inbound mapping registers */ +#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) + +/* + * Maximum number of outbound mapping window sizes that can be supported by any + * OARR/OMAP mapping pair + */ +#define MAX_NUM_OB_WINDOW_SIZES 4 + +#define OARR_VALID_SHIFT 0 +#define OARR_VALID BIT(OARR_VALID_SHIFT) +#define OARR_SIZE_CFG_SHIFT 1 + +/* + * Maximum number of inbound mapping region sizes that can be supported by an + * IARR + */ +#define MAX_NUM_IB_REGION_SIZES 9 + +#define IMAP_VALID_SHIFT 0 +#define IMAP_VALID BIT(IMAP_VALID_SHIFT) + +#define IPROC_PCI_EXP_CAP 0xac + +#define IPROC_PCIE_REG_INVALID 0xffff + +/** + * iProc PCIe outbound mapping controller specific parameters + * + * @window_sizes: list of supported outbound mapping window sizes in MB + * @nr_sizes: number of supported outbound mapping window sizes + */ +struct iproc_pcie_ob_map { + resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; + unsigned int nr_sizes; +}; + +static const struct iproc_pcie_ob_map paxb_ob_map[] = { + { + /* OARR0/OMAP0 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR1/OMAP1 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, +}; + +static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { + { + /* OARR0/OMAP0 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR1/OMAP1 */ + .window_sizes = { 128, 256 }, + .nr_sizes = 2, + }, + { + /* OARR2/OMAP2 */ + .window_sizes = { 128, 256, 512, 1024 }, + .nr_sizes = 4, + }, + { + /* OARR3/OMAP3 */ + .window_sizes = { 128, 256, 512, 1024 }, + .nr_sizes = 4, + }, +}; + +/** + * iProc PCIe inbound mapping type + */ +enum iproc_pcie_ib_map_type { + /* for DDR memory */ + IPROC_PCIE_IB_MAP_MEM = 0, + + /* for device I/O memory */ + IPROC_PCIE_IB_MAP_IO, + + /* invalid or unused */ + IPROC_PCIE_IB_MAP_INVALID +}; + +/** + * iProc PCIe inbound mapping controller specific parameters + * + * @type: inbound mapping region type + * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or + * SZ_1G + * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or + * GB, depedning on the size unit + * @nr_sizes: number of supported inbound mapping region sizes + * @nr_windows: number of supported inbound mapping windows for the region + * @imap_addr_offset: register offset between the upper and lower 32-bit + * IMAP address registers + * @imap_window_offset: register offset between each IMAP window + */ +struct iproc_pcie_ib_map { + enum iproc_pcie_ib_map_type type; + unsigned int size_unit; + resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; + unsigned int nr_sizes; + unsigned int nr_windows; + u16 imap_addr_offset; + u16 imap_window_offset; +}; + +static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { + { + /* IARR0/IMAP0 */ + .type = IPROC_PCIE_IB_MAP_IO, + .size_unit = SZ_1K, + .region_sizes = { 32 }, + .nr_sizes = 1, + .nr_windows = 8, + .imap_addr_offset = 0x40, + .imap_window_offset = 0x4, + }, + { + /* IARR1/IMAP1 (currently unused) */ + .type = IPROC_PCIE_IB_MAP_INVALID, + }, + { + /* IARR2/IMAP2 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1M, + .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, + 16384 }, + .nr_sizes = 9, + .nr_windows = 1, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, + { + /* IARR3/IMAP3 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1G, + .region_sizes = { 1, 2, 4, 8, 16, 32 }, + .nr_sizes = 6, + .nr_windows = 8, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, + { + /* IARR4/IMAP4 */ + .type = IPROC_PCIE_IB_MAP_MEM, + .size_unit = SZ_1G, + .region_sizes = { 32, 64, 128, 256, 512 }, + .nr_sizes = 5, + .nr_windows = 8, + .imap_addr_offset = 0x4, + .imap_window_offset = 0x8, + }, +}; + +/* + * iProc PCIe host registers + */ +enum iproc_pcie_reg { + /* clock/reset signal control */ + IPROC_PCIE_CLK_CTRL = 0, + + /* + * To allow MSI to be steered to an external MSI controller (e.g., ARM + * GICv3 ITS) + */ + IPROC_PCIE_MSI_GIC_MODE, + + /* + * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the + * window where the MSI posted writes are written, for the writes to be + * interpreted as MSI writes. + */ + IPROC_PCIE_MSI_BASE_ADDR, + IPROC_PCIE_MSI_WINDOW_SIZE, + + /* + * To hold the address of the register where the MSI writes are + * programed. When ARM GICv3 ITS is used, this should be programmed + * with the address of the GITS_TRANSLATER register. + */ + IPROC_PCIE_MSI_ADDR_LO, + IPROC_PCIE_MSI_ADDR_HI, + + /* enable MSI */ + IPROC_PCIE_MSI_EN_CFG, + + /* allow access to root complex configuration space */ + IPROC_PCIE_CFG_IND_ADDR, + IPROC_PCIE_CFG_IND_DATA, + + /* allow access to device configuration space */ + IPROC_PCIE_CFG_ADDR, + IPROC_PCIE_CFG_DATA, + + /* enable INTx */ + IPROC_PCIE_INTX_EN, + + /* outbound address mapping */ + IPROC_PCIE_OARR0, + IPROC_PCIE_OMAP0, + IPROC_PCIE_OARR1, + IPROC_PCIE_OMAP1, + IPROC_PCIE_OARR2, + IPROC_PCIE_OMAP2, + IPROC_PCIE_OARR3, + IPROC_PCIE_OMAP3, + + /* inbound address mapping */ + IPROC_PCIE_IARR0, + IPROC_PCIE_IMAP0, + IPROC_PCIE_IARR1, + IPROC_PCIE_IMAP1, + IPROC_PCIE_IARR2, + IPROC_PCIE_IMAP2, + IPROC_PCIE_IARR3, + IPROC_PCIE_IMAP3, + IPROC_PCIE_IARR4, + IPROC_PCIE_IMAP4, + + /* link status */ + IPROC_PCIE_LINK_STATUS, + + /* enable APB error for unsupported requests */ + IPROC_PCIE_APB_ERR_EN, + + /* total number of core registers */ + IPROC_PCIE_MAX_NUM_REG, +}; + +/* iProc PCIe PAXB BCMA registers */ +static const u16 iproc_pcie_reg_paxb_bcma[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, +}; + +/* iProc PCIe PAXB registers */ +static const u16 iproc_pcie_reg_paxb[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, +}; + +/* iProc PCIe PAXB v2 registers */ +static const u16 iproc_pcie_reg_paxb_v2[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR0] = 0xd20, + [IPROC_PCIE_OMAP0] = 0xd40, + [IPROC_PCIE_OARR1] = 0xd28, + [IPROC_PCIE_OMAP1] = 0xd48, + [IPROC_PCIE_OARR2] = 0xd60, + [IPROC_PCIE_OMAP2] = 0xd68, + [IPROC_PCIE_OARR3] = 0xdf0, + [IPROC_PCIE_OMAP3] = 0xdf8, + [IPROC_PCIE_IARR0] = 0xd00, + [IPROC_PCIE_IMAP0] = 0xc00, + [IPROC_PCIE_IARR2] = 0xd10, + [IPROC_PCIE_IMAP2] = 0xcc0, + [IPROC_PCIE_IARR3] = 0xe00, + [IPROC_PCIE_IMAP3] = 0xe08, + [IPROC_PCIE_IARR4] = 0xe68, + [IPROC_PCIE_IMAP4] = 0xe70, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, + [IPROC_PCIE_APB_ERR_EN] = 0xf40, +}; + +/* iProc PCIe PAXC v1 registers */ +static const u16 iproc_pcie_reg_paxc[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, +}; + +/* iProc PCIe PAXC v2 registers */ +static const u16 iproc_pcie_reg_paxc_v2[] = { + [IPROC_PCIE_MSI_GIC_MODE] = 0x050, + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, + [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, + [IPROC_PCIE_MSI_ADDR_HI] = 0x080, + [IPROC_PCIE_MSI_EN_CFG] = 0x09c, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, +}; + +static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) +{ + struct iproc_pcie *pcie = bus->sysdata; + return pcie; +} + +static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) +{ + return !!(reg_offset == IPROC_PCIE_REG_INVALID); +} + +static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg) +{ + return pcie->reg_offsets[reg]; +} + +static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return 0; + + return readl(pcie->base + offset); +} + +static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg, u32 val) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return; + + writel(val, pcie->base + offset); +} + +/** + * APB error forwarding can be disabled during access of configuration + * registers of the endpoint device, to prevent unsupported requests + * (typically seen during enumeration with multi-function devices) from + * triggering a system exception. + */ +static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, + bool disable) +{ + struct iproc_pcie *pcie = iproc_data(bus); + u32 val; + + if (bus->number && pcie->has_apb_err_disable) { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); + if (disable) + val &= ~APB_ERR_EN; + else + val |= APB_ERR_EN; + iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); + } +} + +static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, + unsigned int busno, + unsigned int slot, + unsigned int fn, + int where) +{ + u16 offset; + u32 val; + + /* EP device access */ + val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | + (slot << CFG_ADDR_DEV_NUM_SHIFT) | + (fn << CFG_ADDR_FUNC_NUM_SHIFT) | + (where & CFG_ADDR_REG_NUM_MASK) | + (1 & CFG_ADDR_CFG_TYPE_MASK); + + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); + + if (iproc_pcie_reg_is_invalid(offset)) + return NULL; + + return (pcie->base + offset); +} + +static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) +{ + int timeout = CFG_RETRY_STATUS_TIMEOUT_US; + unsigned int data; + + /* + * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only + * affects config reads of the Vendor ID. For config writes or any + * other config reads, the Root may automatically reissue the + * configuration request again as a new request. + * + * For config reads, this hardware returns CFG_RETRY_STATUS data + * when it receives a CRS completion, regardless of the address of + * the read or the CRS Software Visibility Enable bit. As a + * partial workaround for this, we retry in software any read that + * returns CFG_RETRY_STATUS. + * + * Note that a non-Vendor ID config register may have a value of + * CFG_RETRY_STATUS. If we read that, we can't distinguish it from + * a CRS completion, so we will incorrectly retry the read and + * eventually return the wrong data (0xffffffff). + */ + data = readl(cfg_data_p); + while (data == CFG_RETRY_STATUS && timeout--) { + udelay(1); + data = readl(cfg_data_p); + } + + if (data == CFG_RETRY_STATUS) + data = 0xffffffff; + + return data; +} + +static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct iproc_pcie *pcie = iproc_data(bus); + unsigned int slot = PCI_SLOT(devfn); + unsigned int fn = PCI_FUNC(devfn); + unsigned int busno = bus->number; + void __iomem *cfg_data_p; + unsigned int data; + int ret; + + /* root complex access */ + if (busno == 0) { + ret = pci_generic_config_read32(bus, devfn, where, size, val); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + /* Don't advertise CRS SV support */ + if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL) + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + return PCIBIOS_SUCCESSFUL; + } + + cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); + + if (!cfg_data_p) + return PCIBIOS_DEVICE_NOT_FOUND; + + data = iproc_pcie_cfg_retry(cfg_data_p); + + *val = data; + if (size <= 2) + *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * Note access to the configuration registers are protected at the higher layer + * by 'pci_lock' in drivers/pci/access.c + */ +static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, + int busno, unsigned int devfn, + int where) +{ + unsigned slot = PCI_SLOT(devfn); + unsigned fn = PCI_FUNC(devfn); + u16 offset; + + /* root complex access */ + if (busno == 0) { + if (slot > 0 || fn > 0) + return NULL; + + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, + where & CFG_IND_ADDR_MASK); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); + if (iproc_pcie_reg_is_invalid(offset)) + return NULL; + else + return (pcie->base + offset); + } + + /* + * PAXC is connected to an internally emulated EP within the SoC. It + * allows only one device. + */ + if (pcie->ep_is_internal) + if (slot > 0) + return NULL; + + return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); +} + +static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, + where); +} + +static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, + unsigned int devfn, int where, + int size, u32 *val) +{ + void __iomem *addr; + + addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + *val = readl(addr); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + +static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, + unsigned int devfn, int where, + int size, u32 val) +{ + void __iomem *addr; + u32 mask, tmp; + + addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); + if (!addr) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 4) { + writel(val, addr); + return PCIBIOS_SUCCESSFUL; + } + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); + tmp = readl(addr) & mask; + tmp |= val << ((where & 0x3) * 8); + writel(tmp, addr); + + return PCIBIOS_SUCCESSFUL; +} + +static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + int ret; + struct iproc_pcie *pcie = iproc_data(bus); + + iproc_pcie_apb_err_disable(bus, true); + if (pcie->type == IPROC_PCIE_PAXB_V2) + ret = iproc_pcie_config_read(bus, devfn, where, size, val); + else + ret = pci_generic_config_read32(bus, devfn, where, size, val); + iproc_pcie_apb_err_disable(bus, false); + + return ret; +} + +static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + int ret; + + iproc_pcie_apb_err_disable(bus, true); + ret = pci_generic_config_write32(bus, devfn, where, size, val); + iproc_pcie_apb_err_disable(bus, false); + + return ret; +} + +static struct pci_ops iproc_pcie_ops = { + .map_bus = iproc_pcie_bus_map_cfg_bus, + .read = iproc_pcie_config_read32, + .write = iproc_pcie_config_write32, +}; + +static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) +{ + u32 val; + + /* + * PAXC and the internal emulated endpoint device downstream should not + * be reset. If firmware has been loaded on the endpoint device at an + * earlier boot stage, reset here causes issues. + */ + if (pcie->ep_is_internal) + return; + + if (assert) { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); + val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & + ~RC_PCIE_RST_OUTPUT; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + udelay(250); + } else { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); + val |= RC_PCIE_RST_OUTPUT; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + msleep(100); + } +} + +int iproc_pcie_shutdown(struct iproc_pcie *pcie) +{ + iproc_pcie_perst_ctrl(pcie, true); + msleep(500); + + return 0; +} +EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); + +static int iproc_pcie_check_link(struct iproc_pcie *pcie) +{ + struct device *dev = pcie->dev; + u32 hdr_type, link_ctrl, link_status, class, val; + bool link_is_active = false; + + /* + * PAXC connects to emulated endpoint devices directly and does not + * have a Serdes. Therefore skip the link detection logic here. + */ + if (pcie->ep_is_internal) + return 0; + + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); + if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { + dev_err(dev, "PHY or data link is INACTIVE!\n"); + return -ENODEV; + } + + /* make sure we are not in EP mode */ + iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); + if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { + dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); + return -EFAULT; + } + + /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ +#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c +#define PCI_CLASS_BRIDGE_MASK 0xffff00 +#define PCI_CLASS_BRIDGE_SHIFT 8 + iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, + 4, &class); + class &= ~PCI_CLASS_BRIDGE_MASK; + class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); + iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, + 4, class); + + /* check link status to see if link is active */ + iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, + 2, &link_status); + if (link_status & PCI_EXP_LNKSTA_NLW) + link_is_active = true; + + if (!link_is_active) { + /* try GEN 1 link speed */ +#define PCI_TARGET_LINK_SPEED_MASK 0xf +#define PCI_TARGET_LINK_SPEED_GEN2 0x2 +#define PCI_TARGET_LINK_SPEED_GEN1 0x1 + iproc_pci_raw_config_read32(pcie, 0, + IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, + 4, &link_ctrl); + if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == + PCI_TARGET_LINK_SPEED_GEN2) { + link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; + link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; + iproc_pci_raw_config_write32(pcie, 0, + IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, + 4, link_ctrl); + msleep(100); + + iproc_pci_raw_config_read32(pcie, 0, + IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, + 2, &link_status); + if (link_status & PCI_EXP_LNKSTA_NLW) + link_is_active = true; + } + } + + dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); + + return link_is_active ? 0 : -ENODEV; +} + +static void iproc_pcie_enable(struct iproc_pcie *pcie) +{ + iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); +} + +static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, + int window_idx) +{ + u32 val; + + val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); + + return !!(val & OARR_VALID); +} + +static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, + int size_idx, u64 axi_addr, u64 pci_addr) +{ + struct device *dev = pcie->dev; + u16 oarr_offset, omap_offset; + + /* + * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based + * on window index. + */ + oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, + window_idx)); + omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, + window_idx)); + if (iproc_pcie_reg_is_invalid(oarr_offset) || + iproc_pcie_reg_is_invalid(omap_offset)) + return -EINVAL; + + /* + * Program the OARR registers. The upper 32-bit OARR register is + * always right after the lower 32-bit OARR register. + */ + writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | + OARR_VALID, pcie->base + oarr_offset); + writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); + + /* now program the OMAP registers */ + writel(lower_32_bits(pci_addr), pcie->base + omap_offset); + writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); + + dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", + window_idx, oarr_offset, &axi_addr, &pci_addr); + dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n", + readl(pcie->base + oarr_offset), + readl(pcie->base + oarr_offset + 4)); + dev_info(dev, "omap lo 0x%x omap hi 0x%x\n", + readl(pcie->base + omap_offset), + readl(pcie->base + omap_offset + 4)); + + return 0; +} + +/** + * Some iProc SoCs require the SW to configure the outbound address mapping + * + * Outbound address translation: + * + * iproc_pcie_address = axi_address - axi_offset + * OARR = iproc_pcie_address + * OMAP = pci_addr + * + * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address + */ +static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, + u64 pci_addr, resource_size_t size) +{ + struct iproc_pcie_ob *ob = &pcie->ob; + struct device *dev = pcie->dev; + int ret = -EINVAL, window_idx, size_idx; + + if (axi_addr < ob->axi_offset) { + dev_err(dev, "axi address %pap less than offset %pap\n", + &axi_addr, &ob->axi_offset); + return -EINVAL; + } + + /* + * Translate the AXI address to the internal address used by the iProc + * PCIe core before programming the OARR + */ + axi_addr -= ob->axi_offset; + + /* iterate through all OARR/OMAP mapping windows */ + for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { + const struct iproc_pcie_ob_map *ob_map = + &pcie->ob_map[window_idx]; + + /* + * If current outbound window is already in use, move on to the + * next one. + */ + if (iproc_pcie_ob_is_valid(pcie, window_idx)) + continue; + + /* + * Iterate through all supported window sizes within the + * OARR/OMAP pair to find a match. Go through the window sizes + * in a descending order. + */ + for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; + size_idx--) { + resource_size_t window_size = + ob_map->window_sizes[size_idx] * SZ_1M; + + if (size < window_size) + continue; + + if (!IS_ALIGNED(axi_addr, window_size) || + !IS_ALIGNED(pci_addr, window_size)) { + dev_err(dev, + "axi %pap or pci %pap not aligned\n", + &axi_addr, &pci_addr); + return -EINVAL; + } + + /* + * Match found! Program both OARR and OMAP and mark + * them as a valid entry. + */ + ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, + axi_addr, pci_addr); + if (ret) + goto err_ob; + + size -= window_size; + if (size == 0) + return 0; + + /* + * If we are here, we are done with the current window, + * but not yet finished all mappings. Need to move on + * to the next window. + */ + axi_addr += window_size; + pci_addr += window_size; + break; + } + } + +err_ob: + dev_err(dev, "unable to configure outbound mapping\n"); + dev_err(dev, + "axi %pap, axi offset %pap, pci %pap, res size %pap\n", + &axi_addr, &ob->axi_offset, &pci_addr, &size); + + return ret; +} + +static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, + struct list_head *resources) +{ + struct device *dev = pcie->dev; + struct resource_entry *window; + int ret; + + resource_list_for_each_entry(window, resources) { + struct resource *res = window->res; + u64 res_type = resource_type(res); + + switch (res_type) { + case IORESOURCE_IO: + case IORESOURCE_BUS: + break; + case IORESOURCE_MEM: + ret = iproc_pcie_setup_ob(pcie, res->start, + res->start - window->offset, + resource_size(res)); + if (ret) + return ret; + break; + default: + dev_err(dev, "invalid resource %pR\n", res); + return -EINVAL; + } + } + + return 0; +} + +static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, + int region_idx) +{ + const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; + u32 val; + + val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); + + return !!(val & (BIT(ib_map->nr_sizes) - 1)); +} + +static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, + enum iproc_pcie_ib_map_type type) +{ + return !!(ib_map->type == type); +} + +static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, + int size_idx, int nr_windows, u64 axi_addr, + u64 pci_addr, resource_size_t size) +{ + struct device *dev = pcie->dev; + const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; + u16 iarr_offset, imap_offset; + u32 val; + int window_idx; + + iarr_offset = iproc_pcie_reg_offset(pcie, + MAP_REG(IPROC_PCIE_IARR0, region_idx)); + imap_offset = iproc_pcie_reg_offset(pcie, + MAP_REG(IPROC_PCIE_IMAP0, region_idx)); + if (iproc_pcie_reg_is_invalid(iarr_offset) || + iproc_pcie_reg_is_invalid(imap_offset)) + return -EINVAL; + + dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", + region_idx, iarr_offset, &axi_addr, &pci_addr); + + /* + * Program the IARR registers. The upper 32-bit IARR register is + * always right after the lower 32-bit IARR register. + */ + writel(lower_32_bits(pci_addr) | BIT(size_idx), + pcie->base + iarr_offset); + writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); + + dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n", + readl(pcie->base + iarr_offset), + readl(pcie->base + iarr_offset + 4)); + + /* + * Now program the IMAP registers. Each IARR region may have one or + * more IMAP windows. + */ + size >>= ilog2(nr_windows); + for (window_idx = 0; window_idx < nr_windows; window_idx++) { + val = readl(pcie->base + imap_offset); + val |= lower_32_bits(axi_addr) | IMAP_VALID; + writel(val, pcie->base + imap_offset); + writel(upper_32_bits(axi_addr), + pcie->base + imap_offset + ib_map->imap_addr_offset); + + dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n", + window_idx, readl(pcie->base + imap_offset), + readl(pcie->base + imap_offset + + ib_map->imap_addr_offset)); + + imap_offset += ib_map->imap_window_offset; + axi_addr += size; + } + + return 0; +} + +static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, + struct of_pci_range *range, + enum iproc_pcie_ib_map_type type) +{ + struct device *dev = pcie->dev; + struct iproc_pcie_ib *ib = &pcie->ib; + int ret; + unsigned int region_idx, size_idx; + u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr; + resource_size_t size = range->size; + + /* iterate through all IARR mapping regions */ + for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { + const struct iproc_pcie_ib_map *ib_map = + &pcie->ib_map[region_idx]; + + /* + * If current inbound region is already in use or not a + * compatible type, move on to the next. + */ + if (iproc_pcie_ib_is_in_use(pcie, region_idx) || + !iproc_pcie_ib_check_type(ib_map, type)) + continue; + + /* iterate through all supported region sizes to find a match */ + for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { + resource_size_t region_size = + ib_map->region_sizes[size_idx] * ib_map->size_unit; + + if (size != region_size) + continue; + + if (!IS_ALIGNED(axi_addr, region_size) || + !IS_ALIGNED(pci_addr, region_size)) { + dev_err(dev, + "axi %pap or pci %pap not aligned\n", + &axi_addr, &pci_addr); + return -EINVAL; + } + + /* Match found! Program IARR and all IMAP windows. */ + ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, + ib_map->nr_windows, axi_addr, + pci_addr, size); + if (ret) + goto err_ib; + else + return 0; + + } + } + ret = -EINVAL; + +err_ib: + dev_err(dev, "unable to configure inbound mapping\n"); + dev_err(dev, "axi %pap, pci %pap, res size %pap\n", + &axi_addr, &pci_addr, &size); + + return ret; +} + +static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int ret; + + /* Get the dma-ranges from DT */ + ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node); + if (ret) + return ret; + + for_each_of_pci_range(&parser, &range) { + /* Each range entry corresponds to an inbound mapping region */ + ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); + if (ret) + return ret; + } + + return 0; +} + +static int iproce_pcie_get_msi(struct iproc_pcie *pcie, + struct device_node *msi_node, + u64 *msi_addr) +{ + struct device *dev = pcie->dev; + int ret; + struct resource res; + + /* + * Check if 'msi-map' points to ARM GICv3 ITS, which is the only + * supported external MSI controller that requires steering. + */ + if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) { + dev_err(dev, "unable to find compatible MSI controller\n"); + return -ENODEV; + } + + /* derive GITS_TRANSLATER address from GICv3 */ + ret = of_address_to_resource(msi_node, 0, &res); + if (ret < 0) { + dev_err(dev, "unable to obtain MSI controller resources\n"); + return ret; + } + + *msi_addr = res.start + GITS_TRANSLATER; + return 0; +} + +static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) +{ + int ret; + struct of_pci_range range; + + memset(&range, 0, sizeof(range)); + range.size = SZ_32K; + range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1); + + ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO); + return ret; +} + +static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) +{ + u32 val; + + /* + * Program bits [43:13] of address of GITS_TRANSLATER register into + * bits [30:0] of the MSI base address register. In fact, in all iProc + * based SoCs, all I/O register bases are well below the 32-bit + * boundary, so we can safely assume bits [43:32] are always zeros. + */ + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR, + (u32)(msi_addr >> 13)); + + /* use a default 8K window size */ + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0); + + /* steering MSI to GICv3 ITS */ + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE); + val |= GIC_V3_CFG; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val); + + /* + * Program bits [43:2] of address of GITS_TRANSLATER register into the + * iProc MSI address registers. + */ + msi_addr >>= 2; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI, + upper_32_bits(msi_addr)); + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO, + lower_32_bits(msi_addr)); + + /* enable MSI */ + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); + val |= MSI_ENABLE_CFG; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); +} + +static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, + struct device_node *msi_node) +{ + struct device *dev = pcie->dev; + int ret; + u64 msi_addr; + + ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr); + if (ret < 0) { + dev_err(dev, "msi steering failed\n"); + return ret; + } + + switch (pcie->type) { + case IPROC_PCIE_PAXB_V2: + ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); + if (ret) + return ret; + break; + case IPROC_PCIE_PAXC_V2: + iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) +{ + struct device_node *msi_node; + int ret; + + /* + * Either the "msi-parent" or the "msi-map" phandle needs to exist + * for us to obtain the MSI node. + */ + + msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); + if (!msi_node) { + const __be32 *msi_map = NULL; + int len; + u32 phandle; + + msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); + if (!msi_map) + return -ENODEV; + + phandle = be32_to_cpup(msi_map + 1); + msi_node = of_find_node_by_phandle(phandle); + if (!msi_node) + return -ENODEV; + } + + /* + * Certain revisions of the iProc PCIe controller require additional + * configurations to steer the MSI writes towards an external MSI + * controller. + */ + if (pcie->need_msi_steer) { + ret = iproc_pcie_msi_steer(pcie, msi_node); + if (ret) + return ret; + } + + /* + * If another MSI controller is being used, the call below should fail + * but that is okay + */ + return iproc_msi_init(pcie, msi_node); +} + +static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) +{ + iproc_msi_exit(pcie); +} + +static int iproc_pcie_rev_init(struct iproc_pcie *pcie) +{ + struct device *dev = pcie->dev; + unsigned int reg_idx; + const u16 *regs; + + switch (pcie->type) { + case IPROC_PCIE_PAXB_BCMA: + regs = iproc_pcie_reg_paxb_bcma; + break; + case IPROC_PCIE_PAXB: + regs = iproc_pcie_reg_paxb; + pcie->has_apb_err_disable = true; + if (pcie->need_ob_cfg) { + pcie->ob_map = paxb_ob_map; + pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); + } + break; + case IPROC_PCIE_PAXB_V2: + regs = iproc_pcie_reg_paxb_v2; + pcie->has_apb_err_disable = true; + if (pcie->need_ob_cfg) { + pcie->ob_map = paxb_v2_ob_map; + pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); + } + pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); + pcie->ib_map = paxb_v2_ib_map; + pcie->need_msi_steer = true; + dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n", + CFG_RETRY_STATUS); + break; + case IPROC_PCIE_PAXC: + regs = iproc_pcie_reg_paxc; + pcie->ep_is_internal = true; + break; + case IPROC_PCIE_PAXC_V2: + regs = iproc_pcie_reg_paxc_v2; + pcie->ep_is_internal = true; + pcie->need_msi_steer = true; + break; + default: + dev_err(dev, "incompatible iProc PCIe interface\n"); + return -EINVAL; + } + + pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG, + sizeof(*pcie->reg_offsets), + GFP_KERNEL); + if (!pcie->reg_offsets) + return -ENOMEM; + + /* go through the register table and populate all valid registers */ + pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? + IPROC_PCIE_REG_INVALID : regs[0]; + for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) + pcie->reg_offsets[reg_idx] = regs[reg_idx] ? + regs[reg_idx] : IPROC_PCIE_REG_INVALID; + + return 0; +} + +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) +{ + struct device *dev; + int ret; + struct pci_bus *child; + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + + dev = pcie->dev; + + ret = iproc_pcie_rev_init(pcie); + if (ret) { + dev_err(dev, "unable to initialize controller parameters\n"); + return ret; + } + + ret = devm_request_pci_bus_resources(dev, res); + if (ret) + return ret; + + ret = phy_init(pcie->phy); + if (ret) { + dev_err(dev, "unable to initialize PCIe PHY\n"); + return ret; + } + + ret = phy_power_on(pcie->phy); + if (ret) { + dev_err(dev, "unable to power on PCIe PHY\n"); + goto err_exit_phy; + } + + iproc_pcie_perst_ctrl(pcie, true); + iproc_pcie_perst_ctrl(pcie, false); + + if (pcie->need_ob_cfg) { + ret = iproc_pcie_map_ranges(pcie, res); + if (ret) { + dev_err(dev, "map failed\n"); + goto err_power_off_phy; + } + } + + if (pcie->need_ib_cfg) { + ret = iproc_pcie_map_dma_ranges(pcie); + if (ret && ret != -ENOENT) + goto err_power_off_phy; + } + + ret = iproc_pcie_check_link(pcie); + if (ret) { + dev_err(dev, "no PCIe EP device detected\n"); + goto err_power_off_phy; + } + + iproc_pcie_enable(pcie); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + if (iproc_pcie_msi_enable(pcie)) + dev_info(dev, "not using iProc MSI\n"); + + list_splice_init(res, &host->windows); + host->busnr = 0; + host->dev.parent = dev; + host->ops = &iproc_pcie_ops; + host->sysdata = pcie; + host->map_irq = pcie->map_irq; + host->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(host); + if (ret < 0) { + dev_err(dev, "failed to scan host: %d\n", ret); + goto err_power_off_phy; + } + + pci_assign_unassigned_bus_resources(host->bus); + + pcie->root_bus = host->bus; + + list_for_each_entry(child, &host->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(host->bus); + + return 0; + +err_power_off_phy: + phy_power_off(pcie->phy); +err_exit_phy: + phy_exit(pcie->phy); + return ret; +} +EXPORT_SYMBOL(iproc_pcie_setup); + +int iproc_pcie_remove(struct iproc_pcie *pcie) +{ + pci_stop_root_bus(pcie->root_bus); + pci_remove_root_bus(pcie->root_bus); + + iproc_pcie_msi_disable(pcie); + + phy_power_off(pcie->phy); + phy_exit(pcie->phy); + + return 0; +} +EXPORT_SYMBOL(iproc_pcie_remove); + +MODULE_AUTHOR("Ray Jui "); +MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h new file mode 100644 index 000000000000..814b600b383a --- /dev/null +++ b/drivers/pci/controller/pcie-iproc.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2014-2015 Broadcom Corporation + */ + +#ifndef _PCIE_IPROC_H +#define _PCIE_IPROC_H + +/** + * iProc PCIe interface type + * + * PAXB is the wrapper used in root complex that can be connected to an + * external endpoint device. + * + * PAXC is the wrapper used in root complex dedicated for internal emulated + * endpoint devices. + */ +enum iproc_pcie_type { + IPROC_PCIE_PAXB_BCMA = 0, + IPROC_PCIE_PAXB, + IPROC_PCIE_PAXB_V2, + IPROC_PCIE_PAXC, + IPROC_PCIE_PAXC_V2, +}; + +/** + * iProc PCIe outbound mapping + * @axi_offset: offset from the AXI address to the internal address used by + * the iProc PCIe core + * @nr_windows: total number of supported outbound mapping windows + */ +struct iproc_pcie_ob { + resource_size_t axi_offset; + unsigned int nr_windows; +}; + +/** + * iProc PCIe inbound mapping + * @nr_regions: total number of supported inbound mapping regions + */ +struct iproc_pcie_ib { + unsigned int nr_regions; +}; + +struct iproc_pcie_ob_map; +struct iproc_pcie_ib_map; +struct iproc_msi; + +/** + * iProc PCIe device + * + * @dev: pointer to device data structure + * @type: iProc PCIe interface type + * @reg_offsets: register offsets + * @base: PCIe host controller I/O register base + * @base_addr: PCIe host controller register base physical address + * @root_bus: pointer to root bus + * @phy: optional PHY device that controls the Serdes + * @map_irq: function callback to map interrupts + * @ep_is_internal: indicates an internal emulated endpoint device is connected + * @has_apb_err_disable: indicates the controller can be configured to prevent + * unsupported request from being forwarded as an APB bus error + * + * @need_ob_cfg: indicates SW needs to configure the outbound mapping window + * @ob: outbound mapping related parameters + * @ob_map: outbound mapping related parameters specific to the controller + * + * @need_ib_cfg: indicates SW needs to configure the inbound mapping window + * @ib: inbound mapping related parameters + * @ib_map: outbound mapping region related parameters + * + * @need_msi_steer: indicates additional configuration of the iProc PCIe + * controller is required to steer MSI writes to external interrupt controller + * @msi: MSI data + */ +struct iproc_pcie { + struct device *dev; + enum iproc_pcie_type type; + u16 *reg_offsets; + void __iomem *base; + phys_addr_t base_addr; + struct resource mem; + struct pci_bus *root_bus; + struct phy *phy; + int (*map_irq)(const struct pci_dev *, u8, u8); + bool ep_is_internal; + bool has_apb_err_disable; + + bool need_ob_cfg; + struct iproc_pcie_ob ob; + const struct iproc_pcie_ob_map *ob_map; + + bool need_ib_cfg; + struct iproc_pcie_ib ib; + const struct iproc_pcie_ib_map *ib_map; + + bool need_msi_steer; + struct iproc_msi *msi; +}; + +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); +int iproc_pcie_remove(struct iproc_pcie *pcie); +int iproc_pcie_shutdown(struct iproc_pcie *pcie); + +#ifdef CONFIG_PCIE_IPROC_MSI +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node); +void iproc_msi_exit(struct iproc_pcie *pcie); +#else +static inline int iproc_msi_init(struct iproc_pcie *pcie, + struct device_node *node) +{ + return -ENODEV; +} +static inline void iproc_msi_exit(struct iproc_pcie *pcie) +{ +} +#endif + +#endif /* _PCIE_IPROC_H */ diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c new file mode 100644 index 000000000000..0baabe30858f --- /dev/null +++ b/drivers/pci/controller/pcie-mediatek.c @@ -0,0 +1,1218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek PCIe host controller driver. + * + * Copyright (c) 2017 MediaTek Inc. + * Author: Ryder Lee + * Honghui Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* PCIe shared registers */ +#define PCIE_SYS_CFG 0x00 +#define PCIE_INT_ENABLE 0x0c +#define PCIE_CFG_ADDR 0x20 +#define PCIE_CFG_DATA 0x24 + +/* PCIe per port registers */ +#define PCIE_BAR0_SETUP 0x10 +#define PCIE_CLASS 0x34 +#define PCIE_LINK_STATUS 0x50 + +#define PCIE_PORT_INT_EN(x) BIT(20 + (x)) +#define PCIE_PORT_PERST(x) BIT(1 + (x)) +#define PCIE_PORT_LINKUP BIT(0) +#define PCIE_BAR_MAP_MAX GENMASK(31, 16) + +#define PCIE_BAR_ENABLE BIT(0) +#define PCIE_REVISION_ID BIT(0) +#define PCIE_CLASS_CODE (0x60400 << 8) +#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ + ((((regn) >> 8) & GENMASK(3, 0)) << 24)) +#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) +#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) +#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) +#define PCIE_CONF_ADDR(regn, fun, dev, bus) \ + (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ + PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) + +/* MediaTek specific configuration registers */ +#define PCIE_FTS_NUM 0x70c +#define PCIE_FTS_NUM_MASK GENMASK(15, 8) +#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) + +#define PCIE_FC_CREDIT 0x73c +#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) +#define PCIE_FC_CREDIT_VAL(x) ((x) << 16) + +/* PCIe V2 share registers */ +#define PCIE_SYS_CFG_V2 0x0 +#define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) +#define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) + +/* PCIe V2 per-port registers */ +#define PCIE_MSI_VECTOR 0x0c0 + +#define PCIE_CONF_VEND_ID 0x100 +#define PCIE_CONF_CLASS_ID 0x106 + +#define PCIE_INT_MASK 0x420 +#define INTX_MASK GENMASK(19, 16) +#define INTX_SHIFT 16 +#define PCIE_INT_STATUS 0x424 +#define MSI_STATUS BIT(23) +#define PCIE_IMSI_STATUS 0x42c +#define PCIE_IMSI_ADDR 0x430 +#define MSI_MASK BIT(23) +#define MTK_MSI_IRQS_NUM 32 + +#define PCIE_AHB_TRANS_BASE0_L 0x438 +#define PCIE_AHB_TRANS_BASE0_H 0x43c +#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) +#define PCIE_AXI_WINDOW0 0x448 +#define WIN_ENABLE BIT(7) + +/* PCIe V2 configuration transaction header */ +#define PCIE_CFG_HEADER0 0x460 +#define PCIE_CFG_HEADER1 0x464 +#define PCIE_CFG_HEADER2 0x468 +#define PCIE_CFG_WDATA 0x470 +#define PCIE_APP_TLP_REQ 0x488 +#define PCIE_CFG_RDATA 0x48c +#define APP_CFG_REQ BIT(0) +#define APP_CPL_STATUS GENMASK(7, 5) + +#define CFG_WRRD_TYPE_0 4 +#define CFG_WR_FMT 2 +#define CFG_RD_FMT 0 + +#define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) +#define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) +#define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) +#define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) +#define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) +#define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) +#define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) +#define CFG_HEADER_DW0(type, fmt) \ + (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) +#define CFG_HEADER_DW1(where, size) \ + (GENMASK(((size) - 1), 0) << ((where) & 0x3)) +#define CFG_HEADER_DW2(regn, fun, dev, bus) \ + (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ + CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) + +#define PCIE_RST_CTRL 0x510 +#define PCIE_PHY_RSTB BIT(0) +#define PCIE_PIPE_SRSTB BIT(1) +#define PCIE_MAC_SRSTB BIT(2) +#define PCIE_CRSTB BIT(3) +#define PCIE_PERSTB BIT(8) +#define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) +#define PCIE_LINK_STATUS_V2 0x804 +#define PCIE_PORT_LINKUP_V2 BIT(10) + +struct mtk_pcie_port; + +/** + * struct mtk_pcie_soc - differentiate between host generations + * @need_fix_class_id: whether this host's class ID needed to be fixed or not + * @ops: pointer to configuration access functions + * @startup: pointer to controller setting functions + * @setup_irq: pointer to initialize IRQ functions + */ +struct mtk_pcie_soc { + bool need_fix_class_id; + struct pci_ops *ops; + int (*startup)(struct mtk_pcie_port *port); + int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); +}; + +/** + * struct mtk_pcie_port - PCIe port information + * @base: IO mapped register base + * @list: port list + * @pcie: pointer to PCIe host info + * @reset: pointer to port reset control + * @sys_ck: pointer to transaction/data link layer clock + * @ahb_ck: pointer to AHB slave interface operating clock for CSR access + * and RC initiated MMIO access + * @axi_ck: pointer to application layer MMIO channel operating clock + * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock + * when pcie_mac_ck/pcie_pipe_ck is turned off + * @obff_ck: pointer to OBFF functional block operating clock + * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock + * @phy: pointer to PHY control block + * @lane: lane count + * @slot: port slot + * @irq_domain: legacy INTx IRQ domain + * @inner_domain: inner IRQ domain + * @msi_domain: MSI IRQ domain + * @lock: protect the msi_irq_in_use bitmap + * @msi_irq_in_use: bit map for assigned MSI IRQ + */ +struct mtk_pcie_port { + void __iomem *base; + struct list_head list; + struct mtk_pcie *pcie; + struct reset_control *reset; + struct clk *sys_ck; + struct clk *ahb_ck; + struct clk *axi_ck; + struct clk *aux_ck; + struct clk *obff_ck; + struct clk *pipe_ck; + struct phy *phy; + u32 lane; + u32 slot; + struct irq_domain *irq_domain; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + struct mutex lock; + DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); +}; + +/** + * struct mtk_pcie - PCIe host information + * @dev: pointer to PCIe device + * @base: IO mapped register base + * @free_ck: free-run reference clock + * @io: IO resource + * @pio: PIO resource + * @mem: non-prefetchable memory resource + * @busn: bus range + * @offset: IO / Memory offset + * @ports: pointer to PCIe port information + * @soc: pointer to SoC-dependent operations + */ +struct mtk_pcie { + struct device *dev; + void __iomem *base; + struct clk *free_ck; + + struct resource io; + struct resource pio; + struct resource mem; + struct resource busn; + struct { + resource_size_t mem; + resource_size_t io; + } offset; + struct list_head ports; + const struct mtk_pcie_soc *soc; +}; + +static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + + clk_disable_unprepare(pcie->free_ck); + + if (dev->pm_domain) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } +} + +static void mtk_pcie_port_free(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + + devm_iounmap(dev, port->base); + list_del(&port->list); + devm_kfree(dev, port); +} + +static void mtk_pcie_put_resources(struct mtk_pcie *pcie) +{ + struct mtk_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { + phy_power_off(port->phy); + phy_exit(port->phy); + clk_disable_unprepare(port->pipe_ck); + clk_disable_unprepare(port->obff_ck); + clk_disable_unprepare(port->axi_ck); + clk_disable_unprepare(port->aux_ck); + clk_disable_unprepare(port->ahb_ck); + clk_disable_unprepare(port->sys_ck); + mtk_pcie_port_free(port); + } + + mtk_pcie_subsys_powerdown(pcie); +} + +static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) +{ + u32 val; + int err; + + err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, + !(val & APP_CFG_REQ), 10, + 100 * USEC_PER_MSEC); + if (err) + return PCIBIOS_SET_FAILED; + + if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) + return PCIBIOS_SET_FAILED; + + return PCIBIOS_SUCCESSFUL; +} + +static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, + int where, int size, u32 *val) +{ + u32 tmp; + + /* Write PCIe configuration transaction header for Cfgrd */ + writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), + port->base + PCIE_CFG_HEADER0); + writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); + writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), + port->base + PCIE_CFG_HEADER2); + + /* Trigger h/w to transmit Cfgrd TLP */ + tmp = readl(port->base + PCIE_APP_TLP_REQ); + tmp |= APP_CFG_REQ; + writel(tmp, port->base + PCIE_APP_TLP_REQ); + + /* Check completion status */ + if (mtk_pcie_check_cfg_cpld(port)) + return PCIBIOS_SET_FAILED; + + /* Read cpld payload of Cfgrd */ + *val = readl(port->base + PCIE_CFG_RDATA); + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return PCIBIOS_SUCCESSFUL; +} + +static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, + int where, int size, u32 val) +{ + /* Write PCIe configuration transaction header for Cfgwr */ + writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), + port->base + PCIE_CFG_HEADER0); + writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); + writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), + port->base + PCIE_CFG_HEADER2); + + /* Write Cfgwr data */ + val = val << 8 * (where & 3); + writel(val, port->base + PCIE_CFG_WDATA); + + /* Trigger h/w to transmit Cfgwr TLP */ + val = readl(port->base + PCIE_APP_TLP_REQ); + val |= APP_CFG_REQ; + writel(val, port->base + PCIE_APP_TLP_REQ); + + /* Check completion status */ + return mtk_pcie_check_cfg_cpld(port); +} + +static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, + unsigned int devfn) +{ + struct mtk_pcie *pcie = bus->sysdata; + struct mtk_pcie_port *port; + + list_for_each_entry(port, &pcie->ports, list) + if (port->slot == PCI_SLOT(devfn)) + return port; + + return NULL; +} + +static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct mtk_pcie_port *port; + u32 bn = bus->number; + int ret; + + port = mtk_pcie_find_port(bus, devfn); + if (!port) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); + if (ret) + *val = ~0; + + return ret; +} + +static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct mtk_pcie_port *port; + u32 bn = bus->number; + + port = mtk_pcie_find_port(bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); +} + +static struct pci_ops mtk_pcie_ops_v2 = { + .read = mtk_pcie_config_read, + .write = mtk_pcie_config_write, +}; + +static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct resource *mem = &pcie->mem; + const struct mtk_pcie_soc *soc = port->pcie->soc; + u32 val; + size_t size; + int err; + + /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ + if (pcie->base) { + val = readl(pcie->base + PCIE_SYS_CFG_V2); + val |= PCIE_CSR_LTSSM_EN(port->slot) | + PCIE_CSR_ASPM_L1_EN(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG_V2); + } + + /* Assert all reset signals */ + writel(0, port->base + PCIE_RST_CTRL); + + /* + * Enable PCIe link down reset, if link status changed from link up to + * link down, this will reset MAC control registers and configuration + * space. + */ + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ + val = readl(port->base + PCIE_RST_CTRL); + val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | + PCIE_MAC_SRSTB | PCIE_CRSTB; + writel(val, port->base + PCIE_RST_CTRL); + + /* Set up vendor ID and class code */ + if (soc->need_fix_class_id) { + val = PCI_VENDOR_ID_MEDIATEK; + writew(val, port->base + PCIE_CONF_VEND_ID); + + val = PCI_CLASS_BRIDGE_HOST; + writew(val, port->base + PCIE_CONF_CLASS_ID); + } + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, + !!(val & PCIE_PORT_LINKUP_V2), 20, + 100 * USEC_PER_MSEC); + if (err) + return -ETIMEDOUT; + + /* Set INTx mask */ + val = readl(port->base + PCIE_INT_MASK); + val &= ~INTX_MASK; + writel(val, port->base + PCIE_INT_MASK); + + /* Set AHB to PCIe translation windows */ + size = mem->end - mem->start; + val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); + + val = upper_32_bits(mem->start); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); + + /* Set PCIe to AXI translation memory space.*/ + val = fls(0xffffffff) | WIN_ENABLE; + writel(val, port->base + PCIE_AXI_WINDOW0); + + return 0; +} + +static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + phys_addr_t addr; + + /* MT2712/MT7622 only support 32-bit MSI addresses */ + addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); + msg->address_hi = 0; + msg->address_lo = lower_32_bits(addr); + + msg->data = data->hwirq; + + dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int mtk_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static void mtk_msi_ack_irq(struct irq_data *data) +{ + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + u32 hwirq = data->hwirq; + + writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); +} + +static struct irq_chip mtk_msi_bottom_irq_chip = { + .name = "MTK MSI", + .irq_compose_msi_msg = mtk_compose_msi_msg, + .irq_set_affinity = mtk_msi_set_affinity, + .irq_ack = mtk_msi_ack_irq, +}; + +static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct mtk_pcie_port *port = domain->host_data; + unsigned long bit; + + WARN_ON(nr_irqs != 1); + mutex_lock(&port->lock); + + bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); + if (bit >= MTK_MSI_IRQS_NUM) { + mutex_unlock(&port->lock); + return -ENOSPC; + } + + __set_bit(bit, port->msi_irq_in_use); + + mutex_unlock(&port->lock); + + irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip, + domain->host_data, handle_edge_irq, + NULL, NULL); + + return 0; +} + +static void mtk_pcie_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); + + mutex_lock(&port->lock); + + if (!test_bit(d->hwirq, port->msi_irq_in_use)) + dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", + d->hwirq); + else + __clear_bit(d->hwirq, port->msi_irq_in_use); + + mutex_unlock(&port->lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = mtk_pcie_irq_domain_alloc, + .free = mtk_pcie_irq_domain_free, +}; + +static struct irq_chip mtk_msi_irq_chip = { + .name = "MTK PCIe MSI", + .irq_ack = irq_chip_ack_parent, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info mtk_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &mtk_msi_irq_chip, +}; + +static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) +{ + struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); + + mutex_init(&port->lock); + + port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, + &msi_domain_ops, port); + if (!port->inner_domain) { + dev_err(port->pcie->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, + port->inner_domain); + if (!port->msi_domain) { + dev_err(port->pcie->dev, "failed to create MSI domain\n"); + irq_domain_remove(port->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) +{ + u32 val; + phys_addr_t msg_addr; + + msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); + val = lower_32_bits(msg_addr); + writel(val, port->base + PCIE_IMSI_ADDR); + + val = readl(port->base + PCIE_INT_MASK); + val &= ~MSI_MASK; + writel(val, port->base + PCIE_INT_MASK); +} + +static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = mtk_pcie_intx_map, +}; + +static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, + struct device_node *node) +{ + struct device *dev = port->pcie->dev; + struct device_node *pcie_intc_node; + int ret; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "no PCIe Intc node found\n"); + return -ENODEV; + } + + port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, port); + if (!port->irq_domain) { + dev_err(dev, "failed to get INTx IRQ domain\n"); + return -ENODEV; + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + ret = mtk_pcie_allocate_msi_domains(port); + if (ret) + return ret; + + mtk_pcie_enable_msi(port); + } + + return 0; +} + +static void mtk_pcie_intr_handler(struct irq_desc *desc) +{ + struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long status; + u32 virq; + u32 bit = INTX_SHIFT; + + chained_irq_enter(irqchip, desc); + + status = readl(port->base + PCIE_INT_STATUS); + if (status & INTX_MASK) { + for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { + /* Clear the INTx */ + writel(1 << bit, port->base + PCIE_INT_STATUS); + virq = irq_find_mapping(port->irq_domain, + bit - INTX_SHIFT); + generic_handle_irq(virq); + } + } + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (status & MSI_STATUS){ + unsigned long imsi_status; + + while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { + for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { + virq = irq_find_mapping(port->inner_domain, bit); + generic_handle_irq(virq); + } + } + /* Clear MSI interrupt status */ + writel(MSI_STATUS, port->base + PCIE_INT_STATUS); + } + } + + chained_irq_exit(irqchip, desc); + + return; +} + +static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, + struct device_node *node) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + int err, irq; + + err = mtk_pcie_init_irq_domain(port, node); + if (err) { + dev_err(dev, "failed to init PCIe IRQ domain\n"); + return err; + } + + irq = platform_get_irq(pdev, port->slot); + irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port); + + return 0; +} + +static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct mtk_pcie *pcie = bus->sysdata; + + writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), + bus->number), pcie->base + PCIE_CFG_ADDR); + + return pcie->base + PCIE_CFG_DATA + (where & 3); +} + +static struct pci_ops mtk_pcie_ops = { + .map_bus = mtk_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static int mtk_pcie_startup_port(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + u32 func = PCI_FUNC(port->slot << 3); + u32 slot = PCI_SLOT(port->slot << 3); + u32 val; + int err; + + /* assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val |= PCIE_PORT_PERST(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* de-assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val &= ~PCIE_PORT_PERST(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, + !!(val & PCIE_PORT_LINKUP), 20, + 100 * USEC_PER_MSEC); + if (err) + return -ETIMEDOUT; + + /* enable interrupt */ + val = readl(pcie->base + PCIE_INT_ENABLE); + val |= PCIE_PORT_INT_EN(port->slot); + writel(val, pcie->base + PCIE_INT_ENABLE); + + /* map to all DDR region. We need to set it before cfg operation. */ + writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, + port->base + PCIE_BAR0_SETUP); + + /* configure class code and revision ID */ + writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); + + /* configure FC credit */ + writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + val = readl(pcie->base + PCIE_CFG_DATA); + val &= ~PCIE_FC_CREDIT_MASK; + val |= PCIE_FC_CREDIT_VAL(0x806c); + writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + writel(val, pcie->base + PCIE_CFG_DATA); + + /* configure RC FTS number to 250 when it leaves L0s */ + writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + val = readl(pcie->base + PCIE_CFG_DATA); + val &= ~PCIE_FTS_NUM_MASK; + val |= PCIE_FTS_NUM_L0(0x50); + writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + writel(val, pcie->base + PCIE_CFG_DATA); + + return 0; +} + +static void mtk_pcie_enable_port(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + int err; + + err = clk_prepare_enable(port->sys_ck); + if (err) { + dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); + goto err_sys_clk; + } + + err = clk_prepare_enable(port->ahb_ck); + if (err) { + dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); + goto err_ahb_clk; + } + + err = clk_prepare_enable(port->aux_ck); + if (err) { + dev_err(dev, "failed to enable aux_ck%d\n", port->slot); + goto err_aux_clk; + } + + err = clk_prepare_enable(port->axi_ck); + if (err) { + dev_err(dev, "failed to enable axi_ck%d\n", port->slot); + goto err_axi_clk; + } + + err = clk_prepare_enable(port->obff_ck); + if (err) { + dev_err(dev, "failed to enable obff_ck%d\n", port->slot); + goto err_obff_clk; + } + + err = clk_prepare_enable(port->pipe_ck); + if (err) { + dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); + goto err_pipe_clk; + } + + reset_control_assert(port->reset); + reset_control_deassert(port->reset); + + err = phy_init(port->phy); + if (err) { + dev_err(dev, "failed to initialize port%d phy\n", port->slot); + goto err_phy_init; + } + + err = phy_power_on(port->phy); + if (err) { + dev_err(dev, "failed to power on port%d phy\n", port->slot); + goto err_phy_on; + } + + if (!pcie->soc->startup(port)) + return; + + dev_info(dev, "Port%d link down\n", port->slot); + + phy_power_off(port->phy); +err_phy_on: + phy_exit(port->phy); +err_phy_init: + clk_disable_unprepare(port->pipe_ck); +err_pipe_clk: + clk_disable_unprepare(port->obff_ck); +err_obff_clk: + clk_disable_unprepare(port->axi_ck); +err_axi_clk: + clk_disable_unprepare(port->aux_ck); +err_aux_clk: + clk_disable_unprepare(port->ahb_ck); +err_ahb_clk: + clk_disable_unprepare(port->sys_ck); +err_sys_clk: + mtk_pcie_port_free(port); +} + +static int mtk_pcie_parse_port(struct mtk_pcie *pcie, + struct device_node *node, + int slot) +{ + struct mtk_pcie_port *port; + struct resource *regs; + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + char name[10]; + int err; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + err = of_property_read_u32(node, "num-lanes", &port->lane); + if (err) { + dev_err(dev, "missing num-lanes property\n"); + return err; + } + + snprintf(name, sizeof(name), "port%d", slot); + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + port->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(port->base)) { + dev_err(dev, "failed to map port%d base\n", slot); + return PTR_ERR(port->base); + } + + snprintf(name, sizeof(name), "sys_ck%d", slot); + port->sys_ck = devm_clk_get(dev, name); + if (IS_ERR(port->sys_ck)) { + dev_err(dev, "failed to get sys_ck%d clock\n", slot); + return PTR_ERR(port->sys_ck); + } + + /* sys_ck might be divided into the following parts in some chips */ + snprintf(name, sizeof(name), "ahb_ck%d", slot); + port->ahb_ck = devm_clk_get(dev, name); + if (IS_ERR(port->ahb_ck)) { + if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->ahb_ck = NULL; + } + + snprintf(name, sizeof(name), "axi_ck%d", slot); + port->axi_ck = devm_clk_get(dev, name); + if (IS_ERR(port->axi_ck)) { + if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->axi_ck = NULL; + } + + snprintf(name, sizeof(name), "aux_ck%d", slot); + port->aux_ck = devm_clk_get(dev, name); + if (IS_ERR(port->aux_ck)) { + if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->aux_ck = NULL; + } + + snprintf(name, sizeof(name), "obff_ck%d", slot); + port->obff_ck = devm_clk_get(dev, name); + if (IS_ERR(port->obff_ck)) { + if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->obff_ck = NULL; + } + + snprintf(name, sizeof(name), "pipe_ck%d", slot); + port->pipe_ck = devm_clk_get(dev, name); + if (IS_ERR(port->pipe_ck)) { + if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + port->pipe_ck = NULL; + } + + snprintf(name, sizeof(name), "pcie-rst%d", slot); + port->reset = devm_reset_control_get_optional_exclusive(dev, name); + if (PTR_ERR(port->reset) == -EPROBE_DEFER) + return PTR_ERR(port->reset); + + /* some platforms may use default PHY setting */ + snprintf(name, sizeof(name), "pcie-phy%d", slot); + port->phy = devm_phy_optional_get(dev, name); + if (IS_ERR(port->phy)) + return PTR_ERR(port->phy); + + port->slot = slot; + port->pcie = pcie; + + if (pcie->soc->setup_irq) { + err = pcie->soc->setup_irq(port, node); + if (err) + return err; + } + + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *regs; + int err; + + /* get shared registers, which are optional */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); + if (regs) { + pcie->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(pcie->base)) { + dev_err(dev, "failed to map shared register\n"); + return PTR_ERR(pcie->base); + } + } + + pcie->free_ck = devm_clk_get(dev, "free_ck"); + if (IS_ERR(pcie->free_ck)) { + if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + pcie->free_ck = NULL; + } + + if (dev->pm_domain) { + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + } + + /* enable top level clock */ + err = clk_prepare_enable(pcie->free_ck); + if (err) { + dev_err(dev, "failed to enable free_ck\n"); + goto err_free_ck; + } + + return 0; + +err_free_ck: + if (dev->pm_domain) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } + + return err; +} + +static int mtk_pcie_setup(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node, *child; + struct of_pci_range_parser parser; + struct of_pci_range range; + struct resource res; + struct mtk_pcie_port *port, *tmp; + int err; + + if (of_pci_range_parser_init(&parser, node)) { + dev_err(dev, "missing \"ranges\" property\n"); + return -EINVAL; + } + + for_each_of_pci_range(&parser, &range) { + err = of_pci_range_to_resource(&range, node, &res); + if (err < 0) + return err; + + switch (res.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + pcie->offset.io = res.start - range.pci_addr; + + memcpy(&pcie->pio, &res, sizeof(res)); + pcie->pio.name = node->full_name; + + pcie->io.start = range.cpu_addr; + pcie->io.end = range.cpu_addr + range.size - 1; + pcie->io.flags = IORESOURCE_MEM; + pcie->io.name = "I/O"; + + memcpy(&res, &pcie->io, sizeof(res)); + break; + + case IORESOURCE_MEM: + pcie->offset.mem = res.start - range.pci_addr; + + memcpy(&pcie->mem, &res, sizeof(res)); + pcie->mem.name = "non-prefetchable"; + break; + } + } + + err = of_pci_parse_bus_range(node, &pcie->busn); + if (err < 0) { + dev_err(dev, "failed to parse bus ranges property: %d\n", err); + pcie->busn.name = node->name; + pcie->busn.start = 0; + pcie->busn.end = 0xff; + pcie->busn.flags = IORESOURCE_BUS; + } + + for_each_available_child_of_node(node, child) { + int slot; + + err = of_pci_get_devfn(child); + if (err < 0) { + dev_err(dev, "failed to parse devfn: %d\n", err); + return err; + } + + slot = PCI_SLOT(err); + + err = mtk_pcie_parse_port(pcie, child, slot); + if (err) + return err; + } + + err = mtk_pcie_subsys_powerup(pcie); + if (err) + return err; + + /* enable each port, and then check link status */ + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + mtk_pcie_enable_port(port); + + /* power down PCIe subsys if slots are all empty (link down) */ + if (list_empty(&pcie->ports)) + mtk_pcie_subsys_powerdown(pcie); + + return 0; +} + +static int mtk_pcie_request_resources(struct mtk_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + struct device *dev = pcie->dev; + int err; + + pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); + pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); + pci_add_resource(windows, &pcie->busn); + + err = devm_request_pci_bus_resources(dev, windows); + if (err < 0) + return err; + + pci_remap_iospace(&pcie->pio, pcie->io.start); + + return 0; +} + +static int mtk_pcie_register_host(struct pci_host_bridge *host) +{ + struct mtk_pcie *pcie = pci_host_bridge_priv(host); + struct pci_bus *child; + int err; + + host->busnr = pcie->busn.start; + host->dev.parent = pcie->dev; + host->ops = pcie->soc->ops; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; + host->sysdata = pcie; + + err = pci_scan_root_bus_bridge(host); + if (err < 0) + return err; + + pci_bus_size_bridges(host->bus); + pci_bus_assign_resources(host->bus); + + list_for_each_entry(child, &host->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(host->bus); + + return 0; +} + +static int mtk_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_pcie *pcie; + struct pci_host_bridge *host; + int err; + + host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!host) + return -ENOMEM; + + pcie = pci_host_bridge_priv(host); + + pcie->dev = dev; + pcie->soc = of_device_get_match_data(dev); + platform_set_drvdata(pdev, pcie); + INIT_LIST_HEAD(&pcie->ports); + + err = mtk_pcie_setup(pcie); + if (err) + return err; + + err = mtk_pcie_request_resources(pcie); + if (err) + goto put_resources; + + err = mtk_pcie_register_host(host); + if (err) + goto put_resources; + + return 0; + +put_resources: + if (!list_empty(&pcie->ports)) + mtk_pcie_put_resources(pcie); + + return err; +} + +static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { + .ops = &mtk_pcie_ops, + .startup = mtk_pcie_startup_port, +}; + +static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { + .ops = &mtk_pcie_ops_v2, + .startup = mtk_pcie_startup_port_v2, + .setup_irq = mtk_pcie_setup_irq, +}; + +static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { + .need_fix_class_id = true, + .ops = &mtk_pcie_ops_v2, + .startup = mtk_pcie_startup_port_v2, + .setup_irq = mtk_pcie_setup_irq, +}; + +static const struct of_device_id mtk_pcie_ids[] = { + { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, + { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, + { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, + { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, + {}, +}; + +static struct platform_driver mtk_pcie_driver = { + .probe = mtk_pcie_probe, + .driver = { + .name = "mtk-pcie", + .of_match_table = mtk_pcie_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(mtk_pcie_driver); diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c new file mode 100644 index 000000000000..4d6c20e47bed --- /dev/null +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for Mobiveil PCIe Host controller + * + * Copyright (c) 2018 Mobiveil Inc. + * Author: Subrahmanya Lingappa + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* register offsets and bit positions */ + +/* + * translation tables are grouped into windows, each window registers are + * grouped into blocks of 4 or 16 registers each + */ +#define PAB_REG_BLOCK_SIZE 16 +#define PAB_EXT_REG_BLOCK_SIZE 4 + +#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE)) +#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) + +#define LTSSM_STATUS 0x0404 +#define LTSSM_STATUS_L0_MASK 0x3f +#define LTSSM_STATUS_L0 0x2d + +#define PAB_CTRL 0x0808 +#define AMBA_PIO_ENABLE_SHIFT 0 +#define PEX_PIO_ENABLE_SHIFT 1 +#define PAGE_SEL_SHIFT 13 +#define PAGE_SEL_MASK 0x3f +#define PAGE_LO_MASK 0x3ff +#define PAGE_SEL_EN 0xc00 +#define PAGE_SEL_OFFSET_SHIFT 10 + +#define PAB_AXI_PIO_CTRL 0x0840 +#define APIO_EN_MASK 0xf + +#define PAB_PEX_PIO_CTRL 0x08c0 +#define PIO_ENABLE_SHIFT 0 + +#define PAB_INTP_AMBA_MISC_ENB 0x0b0c +#define PAB_INTP_AMBA_MISC_STAT 0x0b1c +#define PAB_INTP_INTX_MASK 0x01e0 +#define PAB_INTP_MSI_MASK 0x8 + +#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) +#define WIN_ENABLE_SHIFT 0 +#define WIN_TYPE_SHIFT 1 + +#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) + +#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) +#define AXI_WINDOW_ALIGN_MASK 3 + +#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) +#define PAB_BUS_SHIFT 24 +#define PAB_DEVICE_SHIFT 19 +#define PAB_FUNCTION_SHIFT 16 + +#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) +#define PAB_INTP_AXI_PIO_CLASS 0x474 + +#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) +#define AMAP_CTRL_EN_SHIFT 0 +#define AMAP_CTRL_TYPE_SHIFT 1 + +#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) +#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) +#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) +#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) + +/* starting offset of INTX bits in status register */ +#define PAB_INTX_START 5 + +/* supported number of MSI interrupts */ +#define PCI_NUM_MSI 16 + +/* MSI registers */ +#define MSI_BASE_LO_OFFSET 0x04 +#define MSI_BASE_HI_OFFSET 0x08 +#define MSI_SIZE_OFFSET 0x0c +#define MSI_ENABLE_OFFSET 0x14 +#define MSI_STATUS_OFFSET 0x18 +#define MSI_DATA_OFFSET 0x20 +#define MSI_ADDR_L_OFFSET 0x24 +#define MSI_ADDR_H_OFFSET 0x28 + +/* outbound and inbound window definitions */ +#define WIN_NUM_0 0 +#define WIN_NUM_1 1 +#define CFG_WINDOW_TYPE 0 +#define IO_WINDOW_TYPE 1 +#define MEM_WINDOW_TYPE 2 +#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) +#define MAX_PIO_WINDOWS 8 + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_MIN 90000 +#define LINK_WAIT_MAX 100000 + +struct mobiveil_msi { /* MSI information */ + struct mutex lock; /* protect bitmap variable */ + struct irq_domain *msi_domain; + struct irq_domain *dev_domain; + phys_addr_t msi_pages_phys; + int num_of_vectors; + DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI); +}; + +struct mobiveil_pcie { + struct platform_device *pdev; + struct list_head resources; + void __iomem *config_axi_slave_base; /* endpoint config base */ + void __iomem *csr_axi_slave_base; /* root port config base */ + void __iomem *apb_csr_base; /* MSI register base */ + void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */ + struct irq_domain *intx_domain; + raw_spinlock_t intx_mask_lock; + int irq; + int apio_wins; + int ppio_wins; + int ob_wins_configured; /* configured outbound windows */ + int ib_wins_configured; /* configured inbound windows */ + struct resource *ob_io_res; + char root_bus_nr; + struct mobiveil_msi msi; +}; + +static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->csr_axi_slave_base + reg); +} + +static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg) +{ + return readl_relaxed(pcie->csr_axi_slave_base + reg); +} + +static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) +{ + return (csr_readl(pcie, LTSSM_STATUS) & + LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; +} + +static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct mobiveil_pcie *pcie = bus->sysdata; + + /* Only one device down on each root port */ + if ((bus->number == pcie->root_bus_nr) && (devfn > 0)) + return false; + + /* + * Do not read more than one device on the bus directly + * attached to RC + */ + if ((bus->primary == pcie->root_bus_nr) && (devfn > 0)) + return false; + + return true; +} + +/* + * mobiveil_pcie_map_bus - routine to get the configuration base of either + * root port or endpoint + */ +static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct mobiveil_pcie *pcie = bus->sysdata; + + if (!mobiveil_pcie_valid_device(bus, devfn)) + return NULL; + + if (bus->number == pcie->root_bus_nr) { + /* RC config access */ + return pcie->csr_axi_slave_base + where; + } + + /* + * EP config access (in Config/APIO space) + * Program PEX Address base (31..16 bits) with appropriate value + * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register. + * Relies on pci_lock serialization + */ + csr_writel(pcie, bus->number << PAB_BUS_SHIFT | + PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | + PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT, + PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); + return pcie->config_axi_slave_base + where; +} + +static struct pci_ops mobiveil_pcie_ops = { + .map_bus = mobiveil_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static void mobiveil_pcie_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc); + struct device *dev = &pcie->pdev->dev; + struct mobiveil_msi *msi = &pcie->msi; + u32 msi_data, msi_addr_lo, msi_addr_hi; + u32 intr_status, msi_status; + unsigned long shifted_status; + u32 bit, virq, val, mask; + + /* + * The core provides a single interrupt for both INTx/MSI messages. + * So we'll read both INTx and MSI status + */ + + chained_irq_enter(chip, desc); + + /* read INTx status */ + val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); + mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); + intr_status = val & mask; + + /* Handle INTx */ + if (intr_status & PAB_INTP_INTX_MASK) { + shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >> + PAB_INTX_START; + do { + for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { + virq = irq_find_mapping(pcie->intx_domain, + bit + 1); + if (virq) + generic_handle_irq(virq); + else + dev_err_ratelimited(dev, + "unexpected IRQ, INT%d\n", bit); + + /* clear interrupt */ + csr_writel(pcie, + shifted_status << PAB_INTX_START, + PAB_INTP_AMBA_MISC_STAT); + } + } while ((shifted_status >> PAB_INTX_START) != 0); + } + + /* read extra MSI status register */ + msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET); + + /* handle MSI interrupts */ + while (msi_status & 1) { + msi_data = readl_relaxed(pcie->apb_csr_base + + MSI_DATA_OFFSET); + + /* + * MSI_STATUS_OFFSET register gets updated to zero + * once we pop not only the MSI data but also address + * from MSI hardware FIFO. So keeping these following + * two dummy reads. + */ + msi_addr_lo = readl_relaxed(pcie->apb_csr_base + + MSI_ADDR_L_OFFSET); + msi_addr_hi = readl_relaxed(pcie->apb_csr_base + + MSI_ADDR_H_OFFSET); + dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n", + msi_data, msi_addr_hi, msi_addr_lo); + + virq = irq_find_mapping(msi->dev_domain, msi_data); + if (virq) + generic_handle_irq(virq); + + msi_status = readl_relaxed(pcie->apb_csr_base + + MSI_STATUS_OFFSET); + } + + /* Clear the interrupt status */ + csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT); + chained_irq_exit(chip, desc); +} + +static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct platform_device *pdev = pcie->pdev; + struct device_node *node = dev->of_node; + struct resource *res; + const char *type; + + type = of_get_property(node, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + /* map config resource */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "config_axi_slave"); + pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pcie->config_axi_slave_base)) + return PTR_ERR(pcie->config_axi_slave_base); + pcie->ob_io_res = res; + + /* map csr resource */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "csr_axi_slave"); + pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pcie->csr_axi_slave_base)) + return PTR_ERR(pcie->csr_axi_slave_base); + pcie->pcie_reg_base = res->start; + + /* map MSI config resource */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr"); + pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pcie->apb_csr_base)) + return PTR_ERR(pcie->apb_csr_base); + + /* read the number of windows requested */ + if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins)) + pcie->apio_wins = MAX_PIO_WINDOWS; + + if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins)) + pcie->ppio_wins = MAX_PIO_WINDOWS; + + pcie->irq = platform_get_irq(pdev, 0); + if (pcie->irq <= 0) { + dev_err(dev, "failed to map IRQ: %d\n", pcie->irq); + return -ENODEV; + } + + irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); + + return 0; +} + +/* + * select_paged_register - routine to access paged register of root complex + * + * registers of RC are paged, for this scheme to work + * extracted higher 6 bits of the offset will be written to pg_sel + * field of PAB_CTRL register and rest of the lower 10 bits enabled with + * PAGE_SEL_EN are used as offset of the register. + */ +static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset) +{ + int pab_ctrl_dw, pg_sel; + + /* clear pg_sel field */ + pab_ctrl_dw = csr_readl(pcie, PAB_CTRL); + pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT)); + + /* set pg_sel field */ + pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK; + pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT)); + csr_writel(pcie, pab_ctrl_dw, PAB_CTRL); +} + +static void write_paged_register(struct mobiveil_pcie *pcie, + u32 val, u32 offset) +{ + u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; + + select_paged_register(pcie, offset); + csr_writel(pcie, val, off); +} + +static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset) +{ + u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; + + select_paged_register(pcie, offset); + return csr_readl(pcie, off); +} + +static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, + int pci_addr, u32 type, u64 size) +{ + int pio_ctrl_val; + int amap_ctrl_dw; + u64 size64 = ~(size - 1); + + if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) { + dev_err(&pcie->pdev->dev, + "ERROR: max inbound windows reached !\n"); + return; + } + + pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL); + csr_writel(pcie, + pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL); + amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num)); + amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT)); + amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT)); + + write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64), + PAB_PEX_AMAP_CTRL(win_num)); + + write_paged_register(pcie, upper_32_bits(size64), + PAB_EXT_PEX_AMAP_SIZEN(win_num)); + + write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); + write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num)); + write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num)); +} + +/* + * routine to program the outbound windows + */ +static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, + u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size) +{ + + u32 value, type; + u64 size64 = ~(size - 1); + + if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) { + dev_err(&pcie->pdev->dev, + "ERROR: max outbound windows reached !\n"); + return; + } + + /* + * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit + * to 4 KB in PAB_AXI_AMAP_CTRL register + */ + type = config_io_bit; + value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); + csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | + lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num)); + + write_paged_register(pcie, upper_32_bits(size64), + PAB_EXT_AXI_AMAP_SIZE(win_num)); + + /* + * program AXI window base with appropriate value in + * PAB_AXI_AMAP_AXI_WIN0 register + */ + value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num)); + csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK), + PAB_AXI_AMAP_AXI_WIN(win_num)); + + value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num)); + + csr_writel(pcie, lower_32_bits(pci_addr), + PAB_AXI_AMAP_PEX_WIN_L(win_num)); + csr_writel(pcie, upper_32_bits(pci_addr), + PAB_AXI_AMAP_PEX_WIN_H(win_num)); + + pcie->ob_wins_configured++; +} + +static int mobiveil_bringup_link(struct mobiveil_pcie *pcie) +{ + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (mobiveil_pcie_link_up(pcie)) + return 0; + + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); + } + dev_err(&pcie->pdev->dev, "link never came up\n"); + return -ETIMEDOUT; +} + +static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) +{ + phys_addr_t msg_addr = pcie->pcie_reg_base; + struct mobiveil_msi *msi = &pcie->msi; + + pcie->msi.num_of_vectors = PCI_NUM_MSI; + msi->msi_pages_phys = (phys_addr_t)msg_addr; + + writel_relaxed(lower_32_bits(msg_addr), + pcie->apb_csr_base + MSI_BASE_LO_OFFSET); + writel_relaxed(upper_32_bits(msg_addr), + pcie->apb_csr_base + MSI_BASE_HI_OFFSET); + writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET); + writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); +} + +static int mobiveil_host_init(struct mobiveil_pcie *pcie) +{ + u32 value, pab_ctrl, type = 0; + int err; + struct resource_entry *win, *tmp; + + err = mobiveil_bringup_link(pcie); + if (err) { + dev_info(&pcie->pdev->dev, "link bring-up failed\n"); + return err; + } + + /* + * program Bus Master Enable Bit in Command Register in PAB Config + * Space + */ + value = csr_readl(pcie, PCI_COMMAND); + csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER, PCI_COMMAND); + + /* + * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL + * register + */ + pab_ctrl = csr_readl(pcie, PAB_CTRL); + csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) | + (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL); + + csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), + PAB_INTP_AMBA_MISC_ENB); + + /* + * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in + * PAB_AXI_PIO_CTRL Register + */ + value = csr_readl(pcie, PAB_AXI_PIO_CTRL); + csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL); + + /* + * we'll program one outbound window for config reads and + * another default inbound window for all the upstream traffic + * rest of the outbound windows will be configured according to + * the "ranges" field defined in device tree + */ + + /* config outbound translation window */ + program_ob_windows(pcie, pcie->ob_wins_configured, + pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE, + resource_size(pcie->ob_io_res)); + + /* memory inbound translation window */ + program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { + type = 0; + if (resource_type(win->res) == IORESOURCE_MEM) + type = MEM_WINDOW_TYPE; + if (resource_type(win->res) == IORESOURCE_IO) + type = IO_WINDOW_TYPE; + if (type) { + /* configure outbound translation window */ + program_ob_windows(pcie, pcie->ob_wins_configured, + win->res->start, 0, type, + resource_size(win->res)); + } + } + + /* setup MSI hardware registers */ + mobiveil_pcie_enable_msi(pcie); + + return err; +} + +static void mobiveil_mask_intx_irq(struct irq_data *data) +{ + struct irq_desc *desc = irq_to_desc(data->irq); + struct mobiveil_pcie *pcie; + unsigned long flags; + u32 mask, shifted_val; + + pcie = irq_desc_get_chip_data(desc); + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); + raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); + csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB); + raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); +} + +static void mobiveil_unmask_intx_irq(struct irq_data *data) +{ + struct irq_desc *desc = irq_to_desc(data->irq); + struct mobiveil_pcie *pcie; + unsigned long flags; + u32 shifted_val, mask; + + pcie = irq_desc_get_chip_data(desc); + mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); + raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); + shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); + csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB); + raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); +} + +static struct irq_chip intx_irq_chip = { + .name = "mobiveil_pcie:intx", + .irq_enable = mobiveil_unmask_intx_irq, + .irq_disable = mobiveil_mask_intx_irq, + .irq_mask = mobiveil_mask_intx_irq, + .irq_unmask = mobiveil_unmask_intx_irq, +}; + +/* routine to setup the INTx related data */ +static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + return 0; +} + +/* INTx domain operations structure */ +static const struct irq_domain_ops intx_domain_ops = { + .map = mobiveil_pcie_intx_map, +}; + +static struct irq_chip mobiveil_msi_irq_chip = { + .name = "Mobiveil PCIe MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info mobiveil_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), + .chip = &mobiveil_msi_irq_chip, +}; + +static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data); + phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int)); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq; + + dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int mobiveil_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip mobiveil_msi_bottom_irq_chip = { + .name = "Mobiveil MSI", + .irq_compose_msi_msg = mobiveil_compose_msi_msg, + .irq_set_affinity = mobiveil_msi_set_affinity, +}; + +static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, void *args) +{ + struct mobiveil_pcie *pcie = domain->host_data; + struct mobiveil_msi *msi = &pcie->msi; + unsigned long bit; + + WARN_ON(nr_irqs != 1); + mutex_lock(&msi->lock); + + bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors); + if (bit >= msi->num_of_vectors) { + mutex_unlock(&msi->lock); + return -ENOSPC; + } + + set_bit(bit, msi->msi_irq_in_use); + + mutex_unlock(&msi->lock); + + irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip, + domain->host_data, handle_level_irq, + NULL, NULL); + return 0; +} + +static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); + struct mobiveil_msi *msi = &pcie->msi; + + mutex_lock(&msi->lock); + + if (!test_bit(d->hwirq, msi->msi_irq_in_use)) { + dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n", + d->hwirq); + } else { + __clear_bit(d->hwirq, msi->msi_irq_in_use); + } + + mutex_unlock(&msi->lock); +} +static const struct irq_domain_ops msi_domain_ops = { + .alloc = mobiveil_irq_msi_domain_alloc, + .free = mobiveil_irq_msi_domain_free, +}; + +static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct mobiveil_msi *msi = &pcie->msi; + + mutex_init(&pcie->msi.lock); + msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, + &msi_domain_ops, pcie); + if (!msi->dev_domain) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + msi->msi_domain = pci_msi_create_irq_domain(fwnode, + &mobiveil_msi_domain_info, msi->dev_domain); + if (!msi->msi_domain) { + dev_err(dev, "failed to create MSI domain\n"); + irq_domain_remove(msi->dev_domain); + return -ENOMEM; + } + return 0; +} + +static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + int ret; + + /* setup INTx */ + pcie->intx_domain = irq_domain_add_linear(node, + PCI_NUM_INTX, &intx_domain_ops, pcie); + + if (!pcie->intx_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENODEV; + } + + raw_spin_lock_init(&pcie->intx_mask_lock); + + /* setup MSI */ + ret = mobiveil_allocate_msi_domains(pcie); + if (ret) + return ret; + + return 0; +} + +static int mobiveil_pcie_probe(struct platform_device *pdev) +{ + struct mobiveil_pcie *pcie; + struct pci_bus *bus; + struct pci_bus *child; + struct pci_host_bridge *bridge; + struct device *dev = &pdev->dev; + resource_size_t iobase; + int ret; + + /* allocate the PCIe port */ + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENODEV; + + pcie = pci_host_bridge_priv(bridge); + if (!pcie) + return -ENOMEM; + + pcie->pdev = pdev; + + ret = mobiveil_pcie_parse_dt(pcie); + if (ret) { + dev_err(dev, "Parsing DT failed, ret: %x\n", ret); + return ret; + } + + INIT_LIST_HEAD(&pcie->resources); + + /* parse the host bridge base addresses from the device tree file */ + ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &pcie->resources, &iobase); + if (ret) { + dev_err(dev, "Getting bridge resources failed\n"); + return -ENOMEM; + } + + /* + * configure all inbound and outbound windows and prepare the RC for + * config access + */ + ret = mobiveil_host_init(pcie); + if (ret) { + dev_err(dev, "Failed to initialize host\n"); + goto error; + } + + /* fixup for PCIe class register */ + csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); + + /* initialize the IRQ domains */ + ret = mobiveil_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed creating IRQ Domain\n"); + goto error; + } + + ret = devm_request_pci_bus_resources(dev, &pcie->resources); + if (ret) + goto error; + + /* Initialize bridge */ + list_splice_init(&pcie->resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_bus_nr; + bridge->ops = &mobiveil_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + /* setup the kernel resources for the newly added PCIe root bus */ + ret = pci_scan_root_bus_bridge(bridge); + if (ret) + goto error; + + bus = bridge->bus; + + pci_assign_unassigned_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + pci_bus_add_devices(bus); + + return 0; +error: + pci_free_resource_list(&pcie->resources); + return ret; +} + +static const struct of_device_id mobiveil_pcie_of_match[] = { + {.compatible = "mbvl,gpex40-pcie",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); + +static struct platform_driver mobiveil_pcie_driver = { + .probe = mobiveil_pcie_probe, + .driver = { + .name = "mobiveil-pcie", + .of_match_table = mobiveil_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(mobiveil_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Mobiveil PCIe host controller driver"); +MODULE_AUTHOR("Subrahmanya Lingappa "); diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c new file mode 100644 index 000000000000..874d75c9ee4a --- /dev/null +++ b/drivers/pci/controller/pcie-rcar.c @@ -0,0 +1,1222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe driver for Renesas R-Car SoCs + * Copyright (C) 2014 Renesas Electronics Europe Ltd + * + * Based on: + * arch/sh/drivers/pci/pcie-sh7786.c + * arch/sh/drivers/pci/ops-sh7786.c + * Copyright (C) 2009 - 2011 Paul Mundt + * + * Author: Phil Edworthy + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +#define PCIECAR 0x000010 +#define PCIECCTLR 0x000018 +#define CONFIG_SEND_ENABLE BIT(31) +#define TYPE0 (0 << 8) +#define TYPE1 BIT(8) +#define PCIECDR 0x000020 +#define PCIEMSR 0x000028 +#define PCIEINTXR 0x000400 +#define PCIEPHYSR 0x0007f0 +#define PHYRDY BIT(0) +#define PCIEMSITXR 0x000840 + +/* Transfer control */ +#define PCIETCTLR 0x02000 +#define CFINIT 1 +#define PCIETSTR 0x02004 +#define DATA_LINK_ACTIVE 1 +#define PCIEERRFR 0x02020 +#define UNSUPPORTED_REQUEST BIT(4) +#define PCIEMSIFR 0x02044 +#define PCIEMSIALR 0x02048 +#define MSIFE 1 +#define PCIEMSIAUR 0x0204c +#define PCIEMSIIER 0x02050 + +/* root port address */ +#define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) + +/* local address reg & mask */ +#define PCIELAR(x) (0x02200 + ((x) * 0x20)) +#define PCIELAMR(x) (0x02208 + ((x) * 0x20)) +#define LAM_PREFETCH BIT(3) +#define LAM_64BIT BIT(2) +#define LAR_ENABLE BIT(1) + +/* PCIe address reg & mask */ +#define PCIEPALR(x) (0x03400 + ((x) * 0x20)) +#define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) +#define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) +#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) +#define PAR_ENABLE BIT(31) +#define IO_SPACE BIT(8) + +/* Configuration */ +#define PCICONF(x) (0x010000 + ((x) * 0x4)) +#define PMCAP(x) (0x010040 + ((x) * 0x4)) +#define EXPCAP(x) (0x010070 + ((x) * 0x4)) +#define VCCAP(x) (0x010100 + ((x) * 0x4)) + +/* link layer */ +#define IDSETR1 0x011004 +#define TLCTLR 0x011048 +#define MACSR 0x011054 +#define SPCHGFIN BIT(4) +#define SPCHGFAIL BIT(6) +#define SPCHGSUC BIT(7) +#define LINK_SPEED (0xf << 16) +#define LINK_SPEED_2_5GTS (1 << 16) +#define LINK_SPEED_5_0GTS (2 << 16) +#define MACCTLR 0x011058 +#define SPEED_CHANGE BIT(24) +#define SCRAMBLE_DISABLE BIT(27) +#define MACS2R 0x011078 +#define MACCGSPSETR 0x011084 +#define SPCNGRSN BIT(31) + +/* R-Car H1 PHY */ +#define H1_PCIEPHYADRR 0x04000c +#define WRITE_CMD BIT(16) +#define PHY_ACK BIT(24) +#define RATE_POS 12 +#define LANE_POS 8 +#define ADR_POS 0 +#define H1_PCIEPHYDOUTR 0x040014 + +/* R-Car Gen2 PHY */ +#define GEN2_PCIEPHYADDR 0x780 +#define GEN2_PCIEPHYDATA 0x784 +#define GEN2_PCIEPHYCTRL 0x78c + +#define INT_PCI_MSI_NR 32 + +#define RCONF(x) (PCICONF(0) + (x)) +#define RPMCAP(x) (PMCAP(0) + (x)) +#define REXPCAP(x) (EXPCAP(0) + (x)) +#define RVCCAP(x) (VCCAP(0) + (x)) + +#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) +#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) +#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) + +#define RCAR_PCI_MAX_RESOURCES 4 +#define MAX_NR_INBOUND_MAPS 6 + +struct rcar_msi { + DECLARE_BITMAP(used, INT_PCI_MSI_NR); + struct irq_domain *domain; + struct msi_controller chip; + unsigned long pages; + struct mutex lock; + int irq1; + int irq2; +}; + +static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) +{ + return container_of(chip, struct rcar_msi, chip); +} + +/* Structure representing the PCIe interface */ +struct rcar_pcie { + struct device *dev; + struct phy *phy; + void __iomem *base; + struct list_head resources; + int root_bus_nr; + struct clk *bus_clk; + struct rcar_msi msi; +}; + +static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, + unsigned long reg) +{ + writel(val, pcie->base + reg); +} + +static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, + unsigned long reg) +{ + return readl(pcie->base + reg); +} + +enum { + RCAR_PCI_ACCESS_READ, + RCAR_PCI_ACCESS_WRITE, +}; + +static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) +{ + int shift = 8 * (where & 3); + u32 val = rcar_pci_read_reg(pcie, where & ~3); + + val &= ~(mask << shift); + val |= data << shift; + rcar_pci_write_reg(pcie, val, where & ~3); +} + +static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) +{ + int shift = 8 * (where & 3); + u32 val = rcar_pci_read_reg(pcie, where & ~3); + + return val >> shift; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rcar_pcie_config_access(struct rcar_pcie *pcie, + unsigned char access_type, struct pci_bus *bus, + unsigned int devfn, int where, u32 *data) +{ + int dev, func, reg, index; + + dev = PCI_SLOT(devfn); + func = PCI_FUNC(devfn); + reg = where & ~3; + index = reg / 4; + + /* + * While each channel has its own memory-mapped extended config + * space, it's generally only accessible when in endpoint mode. + * When in root complex mode, the controller is unable to target + * itself with either type 0 or type 1 accesses, and indeed, any + * controller initiated target transfer to its own config space + * result in a completer abort. + * + * Each channel effectively only supports a single device, but as + * the same channel <-> device access works for any PCI_SLOT() + * value, we cheat a bit here and bind the controller's config + * space to devfn 0 in order to enable self-enumeration. In this + * case the regular ECAR/ECDR path is sidelined and the mangled + * config access itself is initiated as an internal bus transaction. + */ + if (pci_is_root_bus(bus)) { + if (dev != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == RCAR_PCI_ACCESS_READ) { + *data = rcar_pci_read_reg(pcie, PCICONF(index)); + } else { + /* Keep an eye out for changes to the root bus number */ + if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) + pcie->root_bus_nr = *data & 0xff; + + rcar_pci_write_reg(pcie, *data, PCICONF(index)); + } + + return PCIBIOS_SUCCESSFUL; + } + + if (pcie->root_bus_nr < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Clear errors */ + rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); + + /* Set the PIO address */ + rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | + PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); + + /* Enable the configuration access */ + if (bus->parent->number == pcie->root_bus_nr) + rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); + else + rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); + + /* Check for errors */ + if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check for master and target aborts */ + if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & + (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == RCAR_PCI_ACCESS_READ) + *data = rcar_pci_read_reg(pcie, PCIECDR); + else + rcar_pci_write_reg(pcie, *data, PCIECDR); + + /* Disable the configuration access */ + rcar_pci_write_reg(pcie, 0, PCIECCTLR); + + return PCIBIOS_SUCCESSFUL; +} + +static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct rcar_pcie *pcie = bus->sysdata; + int ret; + + ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, + bus, devfn, where, val); + if (ret != PCIBIOS_SUCCESSFUL) { + *val = 0xffffffff; + return ret; + } + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 2))) & 0xffff; + + dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", + bus->number, devfn, where, size, (unsigned long)*val); + + return ret; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct rcar_pcie *pcie = bus->sysdata; + int shift, ret; + u32 data; + + ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, + bus, devfn, where, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", + bus->number, devfn, where, size, (unsigned long)val); + + if (size == 1) { + shift = 8 * (where & 3); + data &= ~(0xff << shift); + data |= ((val & 0xff) << shift); + } else if (size == 2) { + shift = 8 * (where & 2); + data &= ~(0xffff << shift); + data |= ((val & 0xffff) << shift); + } else + data = val; + + ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE, + bus, devfn, where, &data); + + return ret; +} + +static struct pci_ops rcar_pcie_ops = { + .read = rcar_pcie_read_conf, + .write = rcar_pcie_write_conf, +}; + +static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, + struct resource *res) +{ + /* Setup PCIe address space mappings for each resource */ + resource_size_t size; + resource_size_t res_start; + u32 mask; + + rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); + + /* + * The PAMR mask is calculated in units of 128Bytes, which + * keeps things pretty simple. + */ + size = resource_size(res); + mask = (roundup_pow_of_two(size) / SZ_128) - 1; + rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); + + if (res->flags & IORESOURCE_IO) + res_start = pci_pio_to_address(res->start); + else + res_start = res->start; + + rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); + rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, + PCIEPALR(win)); + + /* First resource is for IO */ + mask = PAR_ENABLE; + if (res->flags & IORESOURCE_IO) + mask |= IO_SPACE; + + rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); +} + +static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) +{ + struct resource_entry *win; + int i = 0; + + /* Setup PCI resources */ + resource_list_for_each_entry(win, &pci->resources) { + struct resource *res = win->res; + + if (!res->flags) + continue; + + switch (resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + rcar_pcie_setup_window(i, pci, res); + i++; + break; + case IORESOURCE_BUS: + pci->root_bus_nr = res->start; + break; + default: + continue; + } + + pci_add_resource(resource, res); + } + + return 1; +} + +static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) +{ + struct device *dev = pcie->dev; + unsigned int timeout = 1000; + u32 macsr; + + if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) + return; + + if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { + dev_err(dev, "Speed change already in progress\n"); + return; + } + + macsr = rcar_pci_read_reg(pcie, MACSR); + if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) + goto done; + + /* Set target link speed to 5.0 GT/s */ + rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, + PCI_EXP_LNKSTA_CLS_5_0GB); + + /* Set speed change reason as intentional factor */ + rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); + + /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ + if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) + rcar_pci_write_reg(pcie, macsr, MACSR); + + /* Start link speed change */ + rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); + + while (timeout--) { + macsr = rcar_pci_read_reg(pcie, MACSR); + if (macsr & SPCHGFIN) { + /* Clear the interrupt bits */ + rcar_pci_write_reg(pcie, macsr, MACSR); + + if (macsr & SPCHGFAIL) + dev_err(dev, "Speed change failed\n"); + + goto done; + } + + msleep(1); + } + + dev_err(dev, "Speed change timed out\n"); + +done: + dev_info(dev, "Current link speed is %s GT/s\n", + (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); +} + +static int rcar_pcie_enable(struct rcar_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + struct pci_bus *bus, *child; + int ret; + + /* Try setting 5 GT/s link speed */ + rcar_pcie_force_speedup(pcie); + + rcar_pcie_setup(&bridge->windows, pcie); + + pci_add_flags(PCI_REASSIGN_ALL_BUS); + + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_bus_nr; + bridge->ops = &rcar_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + if (IS_ENABLED(CONFIG_PCI_MSI)) + bridge->msi = &pcie->msi.chip; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) + return ret; + + bus = bridge->bus; + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + + return 0; +} + +static int phy_wait_for_ack(struct rcar_pcie *pcie) +{ + struct device *dev = pcie->dev; + unsigned int timeout = 100; + + while (timeout--) { + if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) + return 0; + + udelay(100); + } + + dev_err(dev, "Access to PCIe phy timed out\n"); + + return -ETIMEDOUT; +} + +static void phy_write_reg(struct rcar_pcie *pcie, + unsigned int rate, unsigned int addr, + unsigned int lane, unsigned int data) +{ + unsigned long phyaddr; + + phyaddr = WRITE_CMD | + ((rate & 1) << RATE_POS) | + ((lane & 0xf) << LANE_POS) | + ((addr & 0xff) << ADR_POS); + + /* Set write data */ + rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); + rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); + + /* Ignore errors as they will be dealt with if the data link is down */ + phy_wait_for_ack(pcie); + + /* Clear command */ + rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); + rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); + + /* Ignore errors as they will be dealt with if the data link is down */ + phy_wait_for_ack(pcie); +} + +static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie) +{ + unsigned int timeout = 10; + + while (timeout--) { + if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY) + return 0; + + msleep(5); + } + + return -ETIMEDOUT; +} + +static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) +{ + unsigned int timeout = 10000; + + while (timeout--) { + if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) + return 0; + + udelay(5); + cpu_relax(); + } + + return -ETIMEDOUT; +} + +static int rcar_pcie_hw_init(struct rcar_pcie *pcie) +{ + int err; + + /* Begin initialization */ + rcar_pci_write_reg(pcie, 0, PCIETCTLR); + + /* Set mode */ + rcar_pci_write_reg(pcie, 1, PCIEMSR); + + err = rcar_pcie_wait_for_phyrdy(pcie); + if (err) + return err; + + /* + * Initial header for port config space is type 1, set the device + * class to match. Hardware takes care of propagating the IDSETR + * settings, so there is no need to bother with a quirk. + */ + rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); + + /* + * Setup Secondary Bus Number & Subordinate Bus Number, even though + * they aren't used, to avoid bridge being detected as broken. + */ + rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); + rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); + + /* Initialize default capabilities. */ + rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); + rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), + PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); + rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, + PCI_HEADER_TYPE_BRIDGE); + + /* Enable data link layer active state reporting */ + rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, + PCI_EXP_LNKCAP_DLLLARC); + + /* Write out the physical slot number = 0 */ + rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); + + /* Set the completion timer timeout to the maximum 50ms. */ + rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); + + /* Terminate list of capabilities (Next Capability Offset=0) */ + rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); + + /* Enable MSI */ + if (IS_ENABLED(CONFIG_PCI_MSI)) + rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); + + /* Finish initialization - establish a PCI Express link */ + rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); + + /* This will timeout if we don't have a link. */ + err = rcar_pcie_wait_for_dl(pcie); + if (err) + return err; + + /* Enable INTx interrupts */ + rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); + + wmb(); + + return 0; +} + +static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie) +{ + /* Initialize the phy */ + phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); + phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); + phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); + phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); + phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); + phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); + phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); + phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); + phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); + phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); + phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); + phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); + + phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); + phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); + phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); + + return 0; +} + +static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie) +{ + /* + * These settings come from the R-Car Series, 2nd Generation User's + * Manual, section 50.3.1 (2) Initialization of the physical layer. + */ + rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); + rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); + rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); + rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); + + rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); + /* The following value is for DC connection, no termination resistor */ + rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); + rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); + rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); + + return 0; +} + +static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) +{ + int err; + + err = phy_init(pcie->phy); + if (err) + return err; + + return phy_power_on(pcie->phy); +} + +static int rcar_msi_alloc(struct rcar_msi *chip) +{ + int msi; + + mutex_lock(&chip->lock); + + msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); + if (msi < INT_PCI_MSI_NR) + set_bit(msi, chip->used); + else + msi = -ENOSPC; + + mutex_unlock(&chip->lock); + + return msi; +} + +static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) +{ + int msi; + + mutex_lock(&chip->lock); + msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, + order_base_2(no_irqs)); + mutex_unlock(&chip->lock); + + return msi; +} + +static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) +{ + mutex_lock(&chip->lock); + clear_bit(irq, chip->used); + mutex_unlock(&chip->lock); +} + +static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) +{ + struct rcar_pcie *pcie = data; + struct rcar_msi *msi = &pcie->msi; + struct device *dev = pcie->dev; + unsigned long reg; + + reg = rcar_pci_read_reg(pcie, PCIEMSIFR); + + /* MSI & INTx share an interrupt - we only handle MSI here */ + if (!reg) + return IRQ_NONE; + + while (reg) { + unsigned int index = find_first_bit(®, 32); + unsigned int irq; + + /* clear the interrupt */ + rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); + + irq = irq_find_mapping(msi->domain, index); + if (irq) { + if (test_bit(index, msi->used)) + generic_handle_irq(irq); + else + dev_info(dev, "unhandled MSI\n"); + } else { + /* Unknown MSI, just clear it */ + dev_dbg(dev, "unexpected MSI\n"); + } + + /* see if there's any more pending in this vector */ + reg = rcar_pci_read_reg(pcie, PCIEMSIFR); + } + + return IRQ_HANDLED; +} + +static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct rcar_msi *msi = to_rcar_msi(chip); + struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); + struct msi_msg msg; + unsigned int irq; + int hwirq; + + hwirq = rcar_msi_alloc(msi); + if (hwirq < 0) + return hwirq; + + irq = irq_find_mapping(msi->domain, hwirq); + if (!irq) { + rcar_msi_free(msi, hwirq); + return -EINVAL; + } + + irq_set_msi_desc(irq, desc); + + msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; + msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); + msg.data = hwirq; + + pci_write_msi_msg(irq, &msg); + + return 0; +} + +static int rcar_msi_setup_irqs(struct msi_controller *chip, + struct pci_dev *pdev, int nvec, int type) +{ + struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); + struct rcar_msi *msi = to_rcar_msi(chip); + struct msi_desc *desc; + struct msi_msg msg; + unsigned int irq; + int hwirq; + int i; + + /* MSI-X interrupts are not supported */ + if (type == PCI_CAP_ID_MSIX) + return -EINVAL; + + WARN_ON(!list_is_singular(&pdev->dev.msi_list)); + desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); + + hwirq = rcar_msi_alloc_region(msi, nvec); + if (hwirq < 0) + return -ENOSPC; + + irq = irq_find_mapping(msi->domain, hwirq); + if (!irq) + return -ENOSPC; + + for (i = 0; i < nvec; i++) { + /* + * irq_create_mapping() called from rcar_pcie_probe() pre- + * allocates descs, so there is no need to allocate descs here. + * We can therefore assume that if irq_find_mapping() above + * returns non-zero, then the descs are also successfully + * allocated. + */ + if (irq_set_msi_desc_off(irq, i, desc)) { + /* TODO: clear */ + return -EINVAL; + } + } + + desc->nvec_used = nvec; + desc->msi_attrib.multiple = order_base_2(nvec); + + msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; + msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); + msg.data = hwirq; + + pci_write_msi_msg(irq, &msg); + + return 0; +} + +static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) +{ + struct rcar_msi *msi = to_rcar_msi(chip); + struct irq_data *d = irq_get_irq_data(irq); + + rcar_msi_free(msi, d->hwirq); +} + +static struct irq_chip rcar_msi_irq_chip = { + .name = "R-Car PCIe MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops msi_domain_ops = { + .map = rcar_msi_map, +}; + +static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie) +{ + struct rcar_msi *msi = &pcie->msi; + int i, irq; + + for (i = 0; i < INT_PCI_MSI_NR; i++) { + irq = irq_find_mapping(msi->domain, i); + if (irq > 0) + irq_dispose_mapping(irq); + } + + irq_domain_remove(msi->domain); +} + +static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct rcar_msi *msi = &pcie->msi; + unsigned long base; + int err, i; + + mutex_init(&msi->lock); + + msi->chip.dev = dev; + msi->chip.setup_irq = rcar_msi_setup_irq; + msi->chip.setup_irqs = rcar_msi_setup_irqs; + msi->chip.teardown_irq = rcar_msi_teardown_irq; + + msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, + &msi_domain_ops, &msi->chip); + if (!msi->domain) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + for (i = 0; i < INT_PCI_MSI_NR; i++) + irq_create_mapping(msi->domain, i); + + /* Two irqs are for MSI, but they are also used for non-MSI irqs */ + err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, + IRQF_SHARED | IRQF_NO_THREAD, + rcar_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(dev, "failed to request IRQ: %d\n", err); + goto err; + } + + err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, + IRQF_SHARED | IRQF_NO_THREAD, + rcar_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(dev, "failed to request IRQ: %d\n", err); + goto err; + } + + /* setup MSI data target */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); + base = virt_to_phys((void *)msi->pages); + + rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); + rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); + + /* enable all MSI interrupts */ + rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); + + return 0; + +err: + rcar_pcie_unmap_msi(pcie); + return err; +} + +static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie) +{ + struct rcar_msi *msi = &pcie->msi; + + /* Disable all MSI interrupts */ + rcar_pci_write_reg(pcie, 0, PCIEMSIIER); + + /* Disable address decoding of the MSI interrupt, MSIFE */ + rcar_pci_write_reg(pcie, 0, PCIEMSIALR); + + free_pages(msi->pages, 0); + + rcar_pcie_unmap_msi(pcie); +} + +static int rcar_pcie_get_resources(struct rcar_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct resource res; + int err, i; + + pcie->phy = devm_phy_optional_get(dev, "pcie"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + + err = of_address_to_resource(dev->of_node, 0, &res); + if (err) + return err; + + pcie->base = devm_ioremap_resource(dev, &res); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + + pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); + if (IS_ERR(pcie->bus_clk)) { + dev_err(dev, "cannot get pcie bus clock\n"); + return PTR_ERR(pcie->bus_clk); + } + + i = irq_of_parse_and_map(dev->of_node, 0); + if (!i) { + dev_err(dev, "cannot get platform resources for msi interrupt\n"); + err = -ENOENT; + goto err_irq1; + } + pcie->msi.irq1 = i; + + i = irq_of_parse_and_map(dev->of_node, 1); + if (!i) { + dev_err(dev, "cannot get platform resources for msi interrupt\n"); + err = -ENOENT; + goto err_irq2; + } + pcie->msi.irq2 = i; + + return 0; + +err_irq2: + irq_dispose_mapping(pcie->msi.irq1); +err_irq1: + return err; +} + +static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, + struct of_pci_range *range, + int *index) +{ + u64 restype = range->flags; + u64 cpu_addr = range->cpu_addr; + u64 cpu_end = range->cpu_addr + range->size; + u64 pci_addr = range->pci_addr; + u32 flags = LAM_64BIT | LAR_ENABLE; + u64 mask; + u64 size; + int idx = *index; + + if (restype & IORESOURCE_PREFETCH) + flags |= LAM_PREFETCH; + + /* + * If the size of the range is larger than the alignment of the start + * address, we have to use multiple entries to perform the mapping. + */ + if (cpu_addr > 0) { + unsigned long nr_zeros = __ffs64(cpu_addr); + u64 alignment = 1ULL << nr_zeros; + + size = min(range->size, alignment); + } else { + size = range->size; + } + /* Hardware supports max 4GiB inbound region */ + size = min(size, 1ULL << 32); + + mask = roundup_pow_of_two(size) - 1; + mask &= ~0xf; + + while (cpu_addr < cpu_end) { + /* + * Set up 64-bit inbound regions as the range parser doesn't + * distinguish between 32 and 64-bit types. + */ + rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), + PCIEPRAR(idx)); + rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); + rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, + PCIELAMR(idx)); + + rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), + PCIEPRAR(idx + 1)); + rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), + PCIELAR(idx + 1)); + rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); + + pci_addr += size; + cpu_addr += size; + idx += 2; + + if (idx > MAX_NR_INBOUND_MAPS) { + dev_err(pcie->dev, "Failed to map inbound regions!\n"); + return -EINVAL; + } + } + *index = idx; + + return 0; +} + +static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int index = 0; + int err; + + if (of_pci_dma_range_parser_init(&parser, np)) + return -EINVAL; + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + + dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + + err = rcar_pcie_inbound_ranges(pcie, &range, &index); + if (err) + return err; + } + + return 0; +} + +static const struct of_device_id rcar_pcie_of_match[] = { + { .compatible = "renesas,pcie-r8a7779", + .data = rcar_pcie_phy_init_h1 }, + { .compatible = "renesas,pcie-r8a7790", + .data = rcar_pcie_phy_init_gen2 }, + { .compatible = "renesas,pcie-r8a7791", + .data = rcar_pcie_phy_init_gen2 }, + { .compatible = "renesas,pcie-rcar-gen2", + .data = rcar_pcie_phy_init_gen2 }, + { .compatible = "renesas,pcie-r8a7795", + .data = rcar_pcie_phy_init_gen3 }, + { .compatible = "renesas,pcie-rcar-gen3", + .data = rcar_pcie_phy_init_gen3 }, + {}, +}; + +static int rcar_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rcar_pcie *pcie; + unsigned int data; + int err; + int (*phy_init_fn)(struct rcar_pcie *); + struct pci_host_bridge *bridge; + + bridge = pci_alloc_host_bridge(sizeof(*pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + + pcie->dev = dev; + + err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); + if (err) + goto err_free_bridge; + + pm_runtime_enable(pcie->dev); + err = pm_runtime_get_sync(pcie->dev); + if (err < 0) { + dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); + goto err_pm_disable; + } + + err = rcar_pcie_get_resources(pcie); + if (err < 0) { + dev_err(dev, "failed to request resources: %d\n", err); + goto err_pm_put; + } + + err = clk_prepare_enable(pcie->bus_clk); + if (err) { + dev_err(dev, "failed to enable bus clock: %d\n", err); + goto err_unmap_msi_irqs; + } + + err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); + if (err) + goto err_clk_disable; + + phy_init_fn = of_device_get_match_data(dev); + err = phy_init_fn(pcie); + if (err) { + dev_err(dev, "failed to init PCIe PHY\n"); + goto err_clk_disable; + } + + /* Failure to get a link might just be that no cards are inserted */ + if (rcar_pcie_hw_init(pcie)) { + dev_info(dev, "PCIe link down\n"); + err = -ENODEV; + goto err_clk_disable; + } + + data = rcar_pci_read_reg(pcie, MACSR); + dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + err = rcar_pcie_enable_msi(pcie); + if (err < 0) { + dev_err(dev, + "failed to enable MSI support: %d\n", + err); + goto err_clk_disable; + } + } + + err = rcar_pcie_enable(pcie); + if (err) + goto err_msi_teardown; + + return 0; + +err_msi_teardown: + if (IS_ENABLED(CONFIG_PCI_MSI)) + rcar_pcie_teardown_msi(pcie); + +err_clk_disable: + clk_disable_unprepare(pcie->bus_clk); + +err_unmap_msi_irqs: + irq_dispose_mapping(pcie->msi.irq2); + irq_dispose_mapping(pcie->msi.irq1); + +err_pm_put: + pm_runtime_put(dev); + +err_pm_disable: + pm_runtime_disable(dev); + pci_free_resource_list(&pcie->resources); + +err_free_bridge: + pci_free_host_bridge(bridge); + + return err; +} + +static struct platform_driver rcar_pcie_driver = { + .driver = { + .name = "rcar-pcie", + .of_match_table = rcar_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = rcar_pcie_probe, +}; +builtin_platform_driver(rcar_pcie_driver); diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c new file mode 100644 index 000000000000..fc267a49a932 --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -0,0 +1,642 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe endpoint controller driver + * + * Copyright (c) 2018 Rockchip, Inc. + * + * Author: Shawn Lin + * Simon Xue + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-rockchip.h" + +/** + * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver + * @rockchip: Rockchip PCIe controller + * @max_regions: maximum number of regions supported by hardware + * @ob_region_map: bitmask of mapped outbound regions + * @ob_addr: base addresses in the AXI bus where the outbound regions start + * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ + * dedicated outbound regions is mapped. + * @irq_cpu_addr: base address in the CPU space where a write access triggers + * the sending of a memory write (MSI) / normal message (legacy + * IRQ) TLP through the PCIe bus. + * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ + * dedicated outbound region. + * @irq_pci_fn: the latest PCI function that has updated the mapping of + * the MSI/legacy IRQ dedicated outbound region. + * @irq_pending: bitmask of asserted legacy IRQs. + */ +struct rockchip_pcie_ep { + struct rockchip_pcie rockchip; + struct pci_epc *epc; + u32 max_regions; + unsigned long ob_region_map; + phys_addr_t *ob_addr; + phys_addr_t irq_phys_addr; + void __iomem *irq_cpu_addr; + u64 irq_pci_addr; + u8 irq_pci_fn; + u8 irq_pending; +}; + +static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, + u32 region) +{ + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region)); +} + +static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, + u32 r, u32 type, u64 cpu_addr, + u64 pci_addr, size_t size) +{ + u64 sz = 1ULL << fls64(size - 1); + int num_pass_bits = ilog2(sz); + u32 addr0, addr1, desc0, desc1; + bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG); + + /* The minimal region size is 1MB */ + if (num_pass_bits < 8) + num_pass_bits = 8; + + cpu_addr -= rockchip->mem_res->start; + addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) & + PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | + (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); + addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr); + desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type; + desc1 = 0; + + if (is_nor_msg) { + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); + rockchip_pcie_write(rockchip, desc0, + ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); + rockchip_pcie_write(rockchip, desc1, + ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); + } else { + /* PCI bus address region */ + rockchip_pcie_write(rockchip, addr0, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); + rockchip_pcie_write(rockchip, addr1, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); + rockchip_pcie_write(rockchip, desc0, + ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); + rockchip_pcie_write(rockchip, desc1, + ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); + + addr0 = + ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | + (lower_32_bits(cpu_addr) & + PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); + addr1 = upper_32_bits(cpu_addr); + } + + /* CPU bus address region */ + rockchip_pcie_write(rockchip, addr0, + ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r)); + rockchip_pcie_write(rockchip, addr1, + ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r)); +} + +static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, + struct pci_epf_header *hdr) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + + /* All functions share the same vendor ID with function 0 */ + if (fn == 0) { + u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) | + (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16; + + rockchip_pcie_write(rockchip, vid_regs, + PCIE_CORE_CONFIG_VENDOR); + } + + rockchip_pcie_write(rockchip, hdr->deviceid << 16, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID); + + rockchip_pcie_write(rockchip, + hdr->revid | + hdr->progif_code << 8 | + hdr->subclass_code << 16 | + hdr->baseclass_code << 24, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID); + rockchip_pcie_write(rockchip, hdr->cache_line_size, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + PCI_CACHE_LINE_SIZE); + rockchip_pcie_write(rockchip, hdr->subsys_id << 16, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + PCI_SUBSYSTEM_VENDOR_ID); + rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + PCI_INTERRUPT_LINE); + + return 0; +} + +static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + dma_addr_t bar_phys = epf_bar->phys_addr; + enum pci_barno bar = epf_bar->barno; + int flags = epf_bar->flags; + u32 addr0, addr1, reg, cfg, b, aperture, ctrl; + u64 sz; + + /* BAR size is 2^(aperture + 7) */ + sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE); + + /* + * roundup_pow_of_two() returns an unsigned long, which is not suited + * for 64bit values. + */ + sz = 1ULL << fls64(sz - 1); + aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ + + if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { + ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS; + } else { + bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); + bool is_64bits = sz > SZ_2G; + + if (is_64bits && (bar & 1)) + return -EINVAL; + + if (is_64bits && is_prefetch) + ctrl = + ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; + else if (is_prefetch) + ctrl = + ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; + else if (is_64bits) + ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS; + else + ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS; + } + + if (bar < BAR_4) { + reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); + b = bar; + } else { + reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); + b = bar - BAR_4; + } + + addr0 = lower_32_bits(bar_phys); + addr1 = upper_32_bits(bar_phys); + + cfg = rockchip_pcie_read(rockchip, reg); + cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | + ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); + cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | + ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); + + rockchip_pcie_write(rockchip, cfg, reg); + rockchip_pcie_write(rockchip, addr0, + ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); + rockchip_pcie_write(rockchip, addr1, + ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); + + return 0; +} + +static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + u32 reg, cfg, b, ctrl; + enum pci_barno bar = epf_bar->barno; + + if (bar < BAR_4) { + reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); + b = bar; + } else { + reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); + b = bar - BAR_4; + } + + ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED; + cfg = rockchip_pcie_read(rockchip, reg); + cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | + ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); + cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); + + rockchip_pcie_write(rockchip, cfg, reg); + rockchip_pcie_write(rockchip, 0x0, + ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); + rockchip_pcie_write(rockchip, 0x0, + ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); +} + +static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr, u64 pci_addr, + size_t size) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *pcie = &ep->rockchip; + u32 r; + + r = find_first_zero_bit(&ep->ob_region_map, + sizeof(ep->ob_region_map) * BITS_PER_LONG); + /* + * Region 0 is reserved for configuration space and shouldn't + * be used elsewhere per TRM, so leave it out. + */ + if (r >= ep->max_regions - 1) { + dev_err(&epc->dev, "no free outbound region\n"); + return -EINVAL; + } + + rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr, + pci_addr, size); + + set_bit(r, &ep->ob_region_map); + ep->ob_addr[r] = addr; + + return 0; +} + +static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + u32 r; + + for (r = 0; r < ep->max_regions - 1; r++) + if (ep->ob_addr[r] == addr) + break; + + /* + * Region 0 is reserved for configuration space and shouldn't + * be used elsewhere per TRM, so leave it out. + */ + if (r == ep->max_regions - 1) + return; + + rockchip_pcie_clear_ep_ob_atu(rockchip, r); + + ep->ob_addr[r] = 0; + clear_bit(r, &ep->ob_region_map); +} + +static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, + u8 multi_msg_cap) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + u16 flags; + + flags = rockchip_pcie_read(rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); + flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; + flags |= + ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | + PCI_MSI_FLAGS_64BIT; + flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; + rockchip_pcie_write(rockchip, flags, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); + return 0; +} + +static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + u16 flags; + + flags = rockchip_pcie_read(rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); + if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) + return -EINVAL; + + return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> + ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); +} + +static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, + u8 intx, bool is_asserted) +{ + struct rockchip_pcie *rockchip = &ep->rockchip; + u32 r = ep->max_regions - 1; + u32 offset; + u16 status; + u8 msg_code; + + if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || + ep->irq_pci_fn != fn)) { + rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, + AXI_WRAPPER_NOR_MSG, + ep->irq_phys_addr, 0, 0); + ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR; + ep->irq_pci_fn = fn; + } + + intx &= 3; + if (is_asserted) { + ep->irq_pending |= BIT(intx); + msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx; + } else { + ep->irq_pending &= ~BIT(intx); + msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx; + } + + status = rockchip_pcie_read(rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_CMD_STATUS); + status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; + + if ((status != 0) ^ (ep->irq_pending != 0)) { + status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; + rockchip_pcie_write(rockchip, status, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_CMD_STATUS); + } + + offset = + ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) | + ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA; + writel(0, ep->irq_cpu_addr + offset); +} + +static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, + u8 intx) +{ + u16 cmd; + + cmd = rockchip_pcie_read(&ep->rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_CMD_STATUS); + + if (cmd & PCI_COMMAND_INTX_DISABLE) + return -EINVAL; + + /* + * Should add some delay between toggling INTx per TRM vaguely saying + * it depends on some cycles of the AHB bus clock to function it. So + * add sufficient 1ms here. + */ + rockchip_pcie_ep_assert_intx(ep, fn, intx, true); + mdelay(1); + rockchip_pcie_ep_assert_intx(ep, fn, intx, false); + return 0; +} + +static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, + u8 interrupt_num) +{ + struct rockchip_pcie *rockchip = &ep->rockchip; + u16 flags, mme, data, data_mask; + u8 msi_count; + u64 pci_addr, pci_addr_mask = 0xff; + + /* Check MSI enable bit */ + flags = rockchip_pcie_read(&ep->rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); + if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) + return -EINVAL; + + /* Get MSI numbers from MME */ + mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> + ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); + msi_count = 1 << mme; + if (!interrupt_num || interrupt_num > msi_count) + return -EINVAL; + + /* Set MSI private data */ + data_mask = msi_count - 1; + data = rockchip_pcie_read(rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + + PCI_MSI_DATA_64); + data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); + + /* Get MSI PCI address */ + pci_addr = rockchip_pcie_read(rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + + PCI_MSI_ADDRESS_HI); + pci_addr <<= 32; + pci_addr |= rockchip_pcie_read(rockchip, + ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + + PCI_MSI_ADDRESS_LO); + pci_addr &= GENMASK_ULL(63, 2); + + /* Set the outbound region if needed. */ + if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || + ep->irq_pci_fn != fn)) { + rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1, + AXI_WRAPPER_MEM_WRITE, + ep->irq_phys_addr, + pci_addr & ~pci_addr_mask, + pci_addr_mask + 1); + ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); + ep->irq_pci_fn = fn; + } + + writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); + return 0; +} + +static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, + enum pci_epc_irq_type type, + u8 interrupt_num) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0); + case PCI_EPC_IRQ_MSI: + return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + default: + return -EINVAL; + } +} + +static int rockchip_pcie_ep_start(struct pci_epc *epc) +{ + struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); + struct rockchip_pcie *rockchip = &ep->rockchip; + struct pci_epf *epf; + u32 cfg; + + cfg = BIT(0); + list_for_each_entry(epf, &epc->pci_epf, list) + cfg |= BIT(epf->func_no); + + rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); + + list_for_each_entry(epf, &epc->pci_epf, list) + pci_epf_linkup(epf); + + return 0; +} + +static const struct pci_epc_ops rockchip_pcie_epc_ops = { + .write_header = rockchip_pcie_ep_write_header, + .set_bar = rockchip_pcie_ep_set_bar, + .clear_bar = rockchip_pcie_ep_clear_bar, + .map_addr = rockchip_pcie_ep_map_addr, + .unmap_addr = rockchip_pcie_ep_unmap_addr, + .set_msi = rockchip_pcie_ep_set_msi, + .get_msi = rockchip_pcie_ep_get_msi, + .raise_irq = rockchip_pcie_ep_raise_irq, + .start = rockchip_pcie_ep_start, +}; + +static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, + struct rockchip_pcie_ep *ep) +{ + struct device *dev = rockchip->dev; + int err; + + err = rockchip_pcie_parse_dt(rockchip); + if (err) + return err; + + err = rockchip_pcie_get_phys(rockchip); + if (err) + return err; + + err = of_property_read_u32(dev->of_node, + "rockchip,max-outbound-regions", + &ep->max_regions); + if (err < 0 || ep->max_regions > MAX_REGION_LIMIT) + ep->max_regions = MAX_REGION_LIMIT; + + err = of_property_read_u8(dev->of_node, "max-functions", + &ep->epc->max_functions); + if (err < 0) + ep->epc->max_functions = 1; + + return 0; +} + +static const struct of_device_id rockchip_pcie_ep_of_match[] = { + { .compatible = "rockchip,rk3399-pcie-ep"}, + {}, +}; + +static int rockchip_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_pcie_ep *ep; + struct rockchip_pcie *rockchip; + struct pci_epc *epc; + size_t max_regions; + int err; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + rockchip = &ep->rockchip; + rockchip->is_rc = false; + rockchip->dev = dev; + + epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "failed to create epc device\n"); + return PTR_ERR(epc); + } + + ep->epc = epc; + epc_set_drvdata(epc, ep); + + err = rockchip_pcie_parse_ep_dt(rockchip, ep); + if (err) + return err; + + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + return err; + + err = rockchip_pcie_init_port(rockchip); + if (err) + goto err_disable_clocks; + + /* Establish the link automatically */ + rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, + PCIE_CLIENT_CONFIG); + + max_regions = ep->max_regions; + ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr), + GFP_KERNEL); + + if (!ep->ob_addr) { + err = -ENOMEM; + goto err_uninit_port; + } + + /* Only enable function 0 by default */ + rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); + + err = pci_epc_mem_init(epc, rockchip->mem_res->start, + resource_size(rockchip->mem_res)); + if (err < 0) { + dev_err(dev, "failed to initialize the memory space\n"); + goto err_uninit_port; + } + + ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, + SZ_128K); + if (!ep->irq_cpu_addr) { + dev_err(dev, "failed to reserve memory space for MSI\n"); + err = -ENOMEM; + goto err_epc_mem_exit; + } + + ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; + + return 0; +err_epc_mem_exit: + pci_epc_mem_exit(epc); +err_uninit_port: + rockchip_pcie_deinit_phys(rockchip); +err_disable_clocks: + rockchip_pcie_disable_clocks(rockchip); + return err; +} + +static struct platform_driver rockchip_pcie_ep_driver = { + .driver = { + .name = "rockchip-pcie-ep", + .of_match_table = rockchip_pcie_ep_of_match, + }, + .probe = rockchip_pcie_ep_probe, +}; + +builtin_platform_driver(rockchip_pcie_ep_driver); diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c new file mode 100644 index 000000000000..1372d270764f --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -0,0 +1,1142 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe host controller driver + * + * Copyright (c) 2016 Rockchip, Inc. + * + * Author: Shawn Lin + * Wenrui Li + * + * Bits taken from Synopsys DesignWare Host controller driver and + * ARM PCI Host generic driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" +#include "pcie-rockchip.h" + +static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); +} + +static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) +{ + u32 status; + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); +} + +static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) +{ + u32 val; + + /* Update Tx credit maximum update interval */ + val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); + val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; + val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ + rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); +} + +static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, + struct pci_bus *bus, int dev) +{ + /* access only one slot on each root port */ + if (bus->number == rockchip->root_bus_nr && dev > 0) + return 0; + + /* + * do not read more than one device on the bus directly attached + * to RC's downstream side. + */ + if (bus->primary == rockchip->root_bus_nr && dev > 0) + return 0; + + return 1; +} + +static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) +{ + u32 val; + u8 map; + + if (rockchip->legacy_phy) + return GENMASK(MAX_LANE_NUM - 1, 0); + + val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); + map = val & PCIE_CORE_LANE_MAP_MASK; + + /* The link may be using a reverse-indexed mapping. */ + if (val & PCIE_CORE_LANE_MAP_REVERSE) + map = bitrev8(map) >> 4; + + return map; +} + +static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (size == 4) { + *val = readl(addr); + } else if (size == 2) { + *val = readw(addr); + } else if (size == 1) { + *val = readb(addr); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, + int where, int size, u32 val) +{ + u32 mask, tmp, offset; + void __iomem *addr; + + offset = where & ~0x3; + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; + + if (size == 4) { + writel(val, addr); + return PCIBIOS_SUCCESSFUL; + } + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); + + /* + * N.B. This read/modify/write isn't safe in general because it can + * corrupt RW1C bits in adjacent registers. But the hardware + * doesn't support smaller writes. + */ + tmp = readl(addr) & mask; + tmp |= val << ((where & 0x3) * 8); + writel(tmp, addr); + + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, + struct pci_bus *bus, u32 devfn, + int where, int size, u32 *val) +{ + u32 busdev; + + busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + + if (!IS_ALIGNED(busdev, size)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + + if (size == 4) { + *val = readl(rockchip->reg_base + busdev); + } else if (size == 2) { + *val = readw(rockchip->reg_base + busdev); + } else if (size == 1) { + *val = readb(rockchip->reg_base + busdev); + } else { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, + struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + u32 busdev; + + busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); + if (!IS_ALIGNED(busdev, size)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + + if (size == 4) + writel(val, rockchip->reg_base + busdev); + else if (size == 2) + writew(val, rockchip->reg_base + busdev); + else if (size == 1) + writeb(val, rockchip->reg_base + busdev); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) +{ + struct rockchip_pcie *rockchip = bus->sysdata; + + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (bus->number == rockchip->root_bus_nr) + return rockchip_pcie_rd_own_conf(rockchip, where, size, val); + + return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, + val); +} + +static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct rockchip_pcie *rockchip = bus->sysdata; + + if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (bus->number == rockchip->root_bus_nr) + return rockchip_pcie_wr_own_conf(rockchip, where, size, val); + + return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, + val); +} + +static struct pci_ops rockchip_pcie_ops = { + .read = rockchip_pcie_rd_conf, + .write = rockchip_pcie_wr_conf, +}; + +static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) +{ + int curr; + u32 status, scale, power; + + if (IS_ERR(rockchip->vpcie3v3)) + return; + + /* + * Set RC's captured slot power limit and scale if + * vpcie3v3 available. The default values are both zero + * which means the software should set these two according + * to the actual power supply. + */ + curr = regulator_get_current_limit(rockchip->vpcie3v3); + if (curr <= 0) + return; + + scale = 3; /* 0.001x */ + curr = curr / 1000; /* convert to mA */ + power = (curr * 3300) / 1000; /* milliwatt */ + while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + if (!scale) { + dev_warn(rockchip->dev, "invalid power supply\n"); + return; + } + scale--; + power = power / 10; + } + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); + status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | + (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); +} + +/** + * rockchip_pcie_host_init_port - Initialize hardware + * @rockchip: PCIe port information + */ +static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err, i = MAX_LANE_NUM; + u32 status; + + gpiod_set_value_cansleep(rockchip->ep_gpio, 0); + + err = rockchip_pcie_init_port(rockchip); + if (err) + return err; + + /* Fix the transmitted FTS count desired to exit from L0s. */ + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); + status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | + (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); + rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); + + rockchip_pcie_set_power_limit(rockchip); + + /* Set RC's clock architecture as common clock */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKSTA_SLC << 16; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + /* Set RC's RCB to 128 */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RCB; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + /* Enable Gen1 training */ + rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, + PCIE_CLIENT_CONFIG); + + gpiod_set_value_cansleep(rockchip->ep_gpio, 1); + + /* 500ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, + status, PCIE_LINK_UP(status), 20, + 500 * USEC_PER_MSEC); + if (err) { + dev_err(dev, "PCIe link training gen1 timeout!\n"); + goto err_power_off_phy; + } + + if (rockchip->link_gen == 2) { + /* + * Enable retrain for gen2. This should be configured only after + * gen1 finished. + */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status |= PCI_EXP_LNKCTL_RL; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + + err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, + status, PCIE_LINK_IS_GEN2(status), 20, + 500 * USEC_PER_MSEC); + if (err) + dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); + } + + /* Check the final link width from negotiated lane counter from MGMT */ + status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); + status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> + PCIE_CORE_PL_CONF_LANE_SHIFT); + dev_dbg(dev, "current link width is x%d\n", status); + + /* Power off unused lane(s) */ + rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); + for (i = 0; i < MAX_LANE_NUM; i++) { + if (!(rockchip->lanes_map & BIT(i))) { + dev_dbg(dev, "idling lane %d\n", i); + phy_power_off(rockchip->phys[i]); + } + } + + rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, + PCIE_CORE_CONFIG_VENDOR); + rockchip_pcie_write(rockchip, + PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, + PCIE_RC_CONFIG_RID_CCR); + + /* Clear THP cap's next cap pointer to remove L1 substate cap */ + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); + status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); + + /* Clear L0s from RC's link cap */ + if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); + status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); + } + + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); + status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; + status |= PCIE_RC_CONFIG_DCSR_MPS_256; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); + + return 0; +err_power_off_phy: + while (i--) + phy_power_off(rockchip->phys[i]); + i = MAX_LANE_NUM; + while (i--) + phy_exit(rockchip->phys[i]); + return err; +} + +static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct device *dev = rockchip->dev; + u32 reg; + u32 sub_reg; + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + if (reg & PCIE_CLIENT_INT_LOCAL) { + dev_dbg(dev, "local interrupt received\n"); + sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); + if (sub_reg & PCIE_CORE_INT_PRFPE) + dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); + + if (sub_reg & PCIE_CORE_INT_CRFPE) + dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); + + if (sub_reg & PCIE_CORE_INT_RRPE) + dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); + + if (sub_reg & PCIE_CORE_INT_PRFO) + dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); + + if (sub_reg & PCIE_CORE_INT_CRFO) + dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); + + if (sub_reg & PCIE_CORE_INT_RT) + dev_dbg(dev, "replay timer timed out\n"); + + if (sub_reg & PCIE_CORE_INT_RTR) + dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); + + if (sub_reg & PCIE_CORE_INT_PE) + dev_dbg(dev, "phy error detected on receive side\n"); + + if (sub_reg & PCIE_CORE_INT_MTR) + dev_dbg(dev, "malformed TLP received from the link\n"); + + if (sub_reg & PCIE_CORE_INT_UCR) + dev_dbg(dev, "malformed TLP received from the link\n"); + + if (sub_reg & PCIE_CORE_INT_FCE) + dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); + + if (sub_reg & PCIE_CORE_INT_CT) + dev_dbg(dev, "a request timed out waiting for completion\n"); + + if (sub_reg & PCIE_CORE_INT_UTC) + dev_dbg(dev, "unmapped TC error\n"); + + if (sub_reg & PCIE_CORE_INT_MMVC) + dev_dbg(dev, "MSI mask register changes\n"); + + rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); + } else if (reg & PCIE_CLIENT_INT_PHY) { + dev_dbg(dev, "phy link changes\n"); + rockchip_pcie_update_txcredit_mui(rockchip); + rockchip_pcie_clr_bw_int(rockchip); + } + + rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, + PCIE_CLIENT_INT_STATUS); + + return IRQ_HANDLED; +} + +static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct device *dev = rockchip->dev; + u32 reg; + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + if (reg & PCIE_CLIENT_INT_LEGACY_DONE) + dev_dbg(dev, "legacy done interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_MSG) + dev_dbg(dev, "message done interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_HOT_RST) + dev_dbg(dev, "hot reset interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_DPA) + dev_dbg(dev, "dpa interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_FATAL_ERR) + dev_dbg(dev, "fatal error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_NFATAL_ERR) + dev_dbg(dev, "no fatal error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_CORR_ERR) + dev_dbg(dev, "correctable error interrupt received\n"); + + if (reg & PCIE_CLIENT_INT_PHY) + dev_dbg(dev, "phy interrupt received\n"); + + rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | + PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | + PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | + PCIE_CLIENT_INT_NFATAL_ERR | + PCIE_CLIENT_INT_CORR_ERR | + PCIE_CLIENT_INT_PHY), + PCIE_CLIENT_INT_STATUS); + + return IRQ_HANDLED; +} + +static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); + struct device *dev = rockchip->dev; + u32 reg; + u32 hwirq; + u32 virq; + + chained_irq_enter(chip, desc); + + reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); + reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; + + while (reg) { + hwirq = ffs(reg) - 1; + reg &= ~BIT(hwirq); + + virq = irq_find_mapping(rockchip->irq_domain, hwirq); + if (virq) + generic_handle_irq(virq); + else + dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); + } + + chained_irq_exit(chip, desc); +} + +static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) +{ + int irq, err; + struct device *dev = rockchip->dev; + struct platform_device *pdev = to_platform_device(dev); + + irq = platform_get_irq_byname(pdev, "sys"); + if (irq < 0) { + dev_err(dev, "missing sys IRQ resource\n"); + return irq; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, + IRQF_SHARED, "pcie-sys", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe subsystem IRQ\n"); + return err; + } + + irq = platform_get_irq_byname(pdev, "legacy"); + if (irq < 0) { + dev_err(dev, "missing legacy IRQ resource\n"); + return irq; + } + + irq_set_chained_handler_and_data(irq, + rockchip_pcie_legacy_int_handler, + rockchip); + + irq = platform_get_irq_byname(pdev, "client"); + if (irq < 0) { + dev_err(dev, "missing client IRQ resource\n"); + return irq; + } + + err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, + IRQF_SHARED, "pcie-client", rockchip); + if (err) { + dev_err(dev, "failed to request PCIe client IRQ\n"); + return err; + } + + return 0; +} + +/** + * rockchip_pcie_parse_host_dt - Parse Device Tree + * @rockchip: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + + err = rockchip_pcie_parse_dt(rockchip); + if (err) + return err; + + err = rockchip_pcie_setup_irq(rockchip); + if (err) + return err; + + rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); + if (IS_ERR(rockchip->vpcie12v)) { + if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie12v regulator found\n"); + } + + rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); + if (IS_ERR(rockchip->vpcie3v3)) { + if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie3v3 regulator found\n"); + } + + rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); + if (IS_ERR(rockchip->vpcie1v8)) { + if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie1v8 regulator found\n"); + } + + rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); + if (IS_ERR(rockchip->vpcie0v9)) { + if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no vpcie0v9 regulator found\n"); + } + + return 0; +} + +static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + + if (!IS_ERR(rockchip->vpcie12v)) { + err = regulator_enable(rockchip->vpcie12v); + if (err) { + dev_err(dev, "fail to enable vpcie12v regulator\n"); + goto err_out; + } + } + + if (!IS_ERR(rockchip->vpcie3v3)) { + err = regulator_enable(rockchip->vpcie3v3); + if (err) { + dev_err(dev, "fail to enable vpcie3v3 regulator\n"); + goto err_disable_12v; + } + } + + if (!IS_ERR(rockchip->vpcie1v8)) { + err = regulator_enable(rockchip->vpcie1v8); + if (err) { + dev_err(dev, "fail to enable vpcie1v8 regulator\n"); + goto err_disable_3v3; + } + } + + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + goto err_disable_1v8; + } + } + + return 0; + +err_disable_1v8: + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); +err_disable_3v3: + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); +err_disable_12v: + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); +err_out: + return err; +} + +static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) +{ + rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & + (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); + rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), + PCIE_CORE_INT_MASK); + + rockchip_pcie_enable_bw_int(rockchip); +} + +static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = rockchip_pcie_intx_map, +}; + +static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct device_node *intc = of_get_next_child(dev->of_node, NULL); + + if (!intc) { + dev_err(dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, + &intx_domain_ops, rockchip); + if (!rockchip->irq_domain) { + dev_err(dev, "failed to get a INTx IRQ domain\n"); + return -EINVAL; + } + + return 0; +} + +static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, + int region_no, int type, u8 num_pass_bits, + u32 lower_addr, u32 upper_addr) +{ + u32 ob_addr_0; + u32 ob_addr_1; + u32 ob_desc_0; + u32 aw_offset; + + if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) + return -EINVAL; + if (num_pass_bits + 1 < 8) + return -EINVAL; + if (num_pass_bits > 63) + return -EINVAL; + if (region_no == 0) { + if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) + return -EINVAL; + } + if (region_no != 0) { + if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) + return -EINVAL; + } + + aw_offset = (region_no << OB_REG_SIZE_SHIFT); + + ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; + ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; + ob_addr_1 = upper_addr; + ob_desc_0 = (1 << 23 | type); + + rockchip_pcie_write(rockchip, ob_addr_0, + PCIE_CORE_OB_REGION_ADDR0 + aw_offset); + rockchip_pcie_write(rockchip, ob_addr_1, + PCIE_CORE_OB_REGION_ADDR1 + aw_offset); + rockchip_pcie_write(rockchip, ob_desc_0, + PCIE_CORE_OB_REGION_DESC0 + aw_offset); + rockchip_pcie_write(rockchip, 0, + PCIE_CORE_OB_REGION_DESC1 + aw_offset); + + return 0; +} + +static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, + int region_no, u8 num_pass_bits, + u32 lower_addr, u32 upper_addr) +{ + u32 ib_addr_0; + u32 ib_addr_1; + u32 aw_offset; + + if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) + return -EINVAL; + if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) + return -EINVAL; + if (num_pass_bits > 63) + return -EINVAL; + + aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); + + ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; + ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; + ib_addr_1 = upper_addr; + + rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); + rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); + + return 0; +} + +static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int offset; + int err; + int reg_no; + + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + + for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, + AXI_WRAPPER_MEM_WRITE, + 20 - 1, + rockchip->mem_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC mem outbound ATU failed\n"); + return err; + } + } + + err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); + if (err) { + dev_err(dev, "program RC mem inbound ATU failed\n"); + return err; + } + + offset = rockchip->mem_size >> 20; + for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) { + err = rockchip_pcie_prog_ob_atu(rockchip, + reg_no + 1 + offset, + AXI_WRAPPER_IO_WRITE, + 20 - 1, + rockchip->io_bus_addr + + (reg_no << 20), + 0); + if (err) { + dev_err(dev, "program RC io outbound ATU failed\n"); + return err; + } + } + + /* assign message regions */ + rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, + AXI_WRAPPER_NOR_MSG, + 20 - 1, 0, 0); + + rockchip->msg_bus_addr = rockchip->mem_bus_addr + + ((reg_no + offset) << 20); + return err; +} + +static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) +{ + u32 value; + int err; + + /* send PME_TURN_OFF message */ + writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); + + /* read LTSSM and wait for falling into L2 link state */ + err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, + value, PCIE_LINK_IS_L2(value), 20, + jiffies_to_usecs(5 * HZ)); + if (err) { + dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); + return err; + } + + return 0; +} + +static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) +{ + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + int ret; + + /* disable core and cli int since we don't need to ack PME_ACK */ + rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | + PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); + rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); + + ret = rockchip_pcie_wait_l2(rockchip); + if (ret) { + rockchip_pcie_enable_interrupts(rockchip); + return ret; + } + + rockchip_pcie_deinit_phys(rockchip); + + rockchip_pcie_disable_clocks(rockchip); + + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + + return ret; +} + +static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) +{ + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + int err; + + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + return err; + } + } + + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + goto err_disable_0v9; + + err = rockchip_pcie_host_init_port(rockchip); + if (err) + goto err_pcie_resume; + + err = rockchip_pcie_cfg_atu(rockchip); + if (err) + goto err_err_deinit_port; + + /* Need this to enter L1 again */ + rockchip_pcie_update_txcredit_mui(rockchip); + rockchip_pcie_enable_interrupts(rockchip); + + return 0; + +err_err_deinit_port: + rockchip_pcie_deinit_phys(rockchip); +err_pcie_resume: + rockchip_pcie_disable_clocks(rockchip); +err_disable_0v9: + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + return err; +} + +static int rockchip_pcie_probe(struct platform_device *pdev) +{ + struct rockchip_pcie *rockchip; + struct device *dev = &pdev->dev; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + struct resource_entry *win; + resource_size_t io_base; + struct resource *mem; + struct resource *io; + int err; + + LIST_HEAD(res); + + if (!dev->of_node) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); + if (!bridge) + return -ENOMEM; + + rockchip = pci_host_bridge_priv(bridge); + + platform_set_drvdata(pdev, rockchip); + rockchip->dev = dev; + rockchip->is_rc = true; + + err = rockchip_pcie_parse_host_dt(rockchip); + if (err) + return err; + + err = rockchip_pcie_enable_clocks(rockchip); + if (err) + return err; + + err = rockchip_pcie_set_vpcie(rockchip); + if (err) { + dev_err(dev, "failed to set vpcie regulator\n"); + goto err_set_vpcie; + } + + err = rockchip_pcie_host_init_port(rockchip); + if (err) + goto err_vpcie; + + rockchip_pcie_enable_interrupts(rockchip); + + err = rockchip_pcie_init_irq_domain(rockchip); + if (err < 0) + goto err_deinit_port; + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, + &res, &io_base); + if (err) + goto err_remove_irq_domain; + + err = devm_request_pci_bus_resources(dev, &res); + if (err) + goto err_free_res; + + /* Get the I/O and memory ranges from DT */ + resource_list_for_each_entry(win, &res) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + io = win->res; + io->name = "I/O"; + rockchip->io_size = resource_size(io); + rockchip->io_bus_addr = io->start - win->offset; + err = pci_remap_iospace(io, io_base); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, io); + continue; + } + rockchip->io = io; + break; + case IORESOURCE_MEM: + mem = win->res; + mem->name = "MEM"; + rockchip->mem_size = resource_size(mem); + rockchip->mem_bus_addr = mem->start - win->offset; + break; + case IORESOURCE_BUS: + rockchip->root_bus_nr = win->res->start; + break; + default: + continue; + } + } + + err = rockchip_pcie_cfg_atu(rockchip); + if (err) + goto err_unmap_iospace; + + rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); + if (!rockchip->msg_region) { + err = -ENOMEM; + goto err_unmap_iospace; + } + + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = rockchip; + bridge->busnr = 0; + bridge->ops = &rockchip_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(bridge); + if (err < 0) + goto err_unmap_iospace; + + bus = bridge->bus; + + rockchip->root_bus = bus; + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + return 0; + +err_unmap_iospace: + pci_unmap_iospace(rockchip->io); +err_free_res: + pci_free_resource_list(&res); +err_remove_irq_domain: + irq_domain_remove(rockchip->irq_domain); +err_deinit_port: + rockchip_pcie_deinit_phys(rockchip); +err_vpcie: + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); +err_set_vpcie: + rockchip_pcie_disable_clocks(rockchip); + return err; +} + +static int rockchip_pcie_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + + pci_stop_root_bus(rockchip->root_bus); + pci_remove_root_bus(rockchip->root_bus); + pci_unmap_iospace(rockchip->io); + irq_domain_remove(rockchip->irq_domain); + + rockchip_pcie_deinit_phys(rockchip); + + rockchip_pcie_disable_clocks(rockchip); + + if (!IS_ERR(rockchip->vpcie12v)) + regulator_disable(rockchip->vpcie12v); + if (!IS_ERR(rockchip->vpcie3v3)) + regulator_disable(rockchip->vpcie3v3); + if (!IS_ERR(rockchip->vpcie1v8)) + regulator_disable(rockchip->vpcie1v8); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + + return 0; +} + +static const struct dev_pm_ops rockchip_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, + rockchip_pcie_resume_noirq) +}; + +static const struct of_device_id rockchip_pcie_of_match[] = { + { .compatible = "rockchip,rk3399-pcie", }, + {} +}; +MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); + +static struct platform_driver rockchip_pcie_driver = { + .driver = { + .name = "rockchip-pcie", + .of_match_table = rockchip_pcie_of_match, + .pm = &rockchip_pcie_pm_ops, + }, + .probe = rockchip_pcie_probe, + .remove = rockchip_pcie_remove, +}; +module_platform_driver(rockchip_pcie_driver); + +MODULE_AUTHOR("Rockchip Inc"); +MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c new file mode 100644 index 000000000000..c53d1322a3d6 --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe host controller driver + * + * Copyright (c) 2016 Rockchip, Inc. + * + * Author: Shawn Lin + * Wenrui Li + * + * Bits taken from Synopsys DesignWare Host controller driver and + * ARM PCI Host generic driver. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" +#include "pcie-rockchip.h" + +int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct platform_device *pdev = to_platform_device(dev); + struct device_node *node = dev->of_node; + struct resource *regs; + int err; + + if (rockchip->is_rc) { + regs = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + "axi-base"); + rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); + if (IS_ERR(rockchip->reg_base)) + return PTR_ERR(rockchip->reg_base); + } else { + rockchip->mem_res = + platform_get_resource_byname(pdev, IORESOURCE_MEM, + "mem-base"); + if (!rockchip->mem_res) + return -EINVAL; + } + + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "apb-base"); + rockchip->apb_base = devm_ioremap_resource(dev, regs); + if (IS_ERR(rockchip->apb_base)) + return PTR_ERR(rockchip->apb_base); + + err = rockchip_pcie_get_phys(rockchip); + if (err) + return err; + + rockchip->lanes = 1; + err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); + if (!err && (rockchip->lanes == 0 || + rockchip->lanes == 3 || + rockchip->lanes > 4)) { + dev_warn(dev, "invalid num-lanes, default to use one lane\n"); + rockchip->lanes = 1; + } + + rockchip->link_gen = of_pci_get_max_link_speed(node); + if (rockchip->link_gen < 0 || rockchip->link_gen > 2) + rockchip->link_gen = 2; + + rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); + if (IS_ERR(rockchip->core_rst)) { + if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) + dev_err(dev, "missing core reset property in node\n"); + return PTR_ERR(rockchip->core_rst); + } + + rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); + if (IS_ERR(rockchip->mgmt_rst)) { + if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) + dev_err(dev, "missing mgmt reset property in node\n"); + return PTR_ERR(rockchip->mgmt_rst); + } + + rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, + "mgmt-sticky"); + if (IS_ERR(rockchip->mgmt_sticky_rst)) { + if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) + dev_err(dev, "missing mgmt-sticky reset property in node\n"); + return PTR_ERR(rockchip->mgmt_sticky_rst); + } + + rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); + if (IS_ERR(rockchip->pipe_rst)) { + if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pipe reset property in node\n"); + return PTR_ERR(rockchip->pipe_rst); + } + + rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); + if (IS_ERR(rockchip->pm_rst)) { + if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pm reset property in node\n"); + return PTR_ERR(rockchip->pm_rst); + } + + rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); + if (IS_ERR(rockchip->pclk_rst)) { + if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pclk reset property in node\n"); + return PTR_ERR(rockchip->pclk_rst); + } + + rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); + if (IS_ERR(rockchip->aclk_rst)) { + if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing aclk reset property in node\n"); + return PTR_ERR(rockchip->aclk_rst); + } + + if (rockchip->is_rc) { + rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); + if (IS_ERR(rockchip->ep_gpio)) { + dev_err(dev, "missing ep-gpios property in node\n"); + return PTR_ERR(rockchip->ep_gpio); + } + } + + rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); + if (IS_ERR(rockchip->aclk_pcie)) { + dev_err(dev, "aclk clock not found\n"); + return PTR_ERR(rockchip->aclk_pcie); + } + + rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); + if (IS_ERR(rockchip->aclk_perf_pcie)) { + dev_err(dev, "aclk_perf clock not found\n"); + return PTR_ERR(rockchip->aclk_perf_pcie); + } + + rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); + if (IS_ERR(rockchip->hclk_pcie)) { + dev_err(dev, "hclk clock not found\n"); + return PTR_ERR(rockchip->hclk_pcie); + } + + rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); + if (IS_ERR(rockchip->clk_pcie_pm)) { + dev_err(dev, "pm clock not found\n"); + return PTR_ERR(rockchip->clk_pcie_pm); + } + + return 0; +} +EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt); + +int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err, i; + u32 regs; + + err = reset_control_assert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "assert aclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "assert pclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pm_rst); + if (err) { + dev_err(dev, "assert pm_rst err %d\n", err); + return err; + } + + for (i = 0; i < MAX_LANE_NUM; i++) { + err = phy_init(rockchip->phys[i]); + if (err) { + dev_err(dev, "init phy%d err %d\n", i, err); + goto err_exit_phy; + } + } + + err = reset_control_assert(rockchip->core_rst); + if (err) { + dev_err(dev, "assert core_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_assert(rockchip->mgmt_rst); + if (err) { + dev_err(dev, "assert mgmt_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_assert(rockchip->mgmt_sticky_rst); + if (err) { + dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_assert(rockchip->pipe_rst); + if (err) { + dev_err(dev, "assert pipe_rst err %d\n", err); + goto err_exit_phy; + } + + udelay(10); + + err = reset_control_deassert(rockchip->pm_rst); + if (err) { + dev_err(dev, "deassert pm_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_deassert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "deassert aclk_rst err %d\n", err); + goto err_exit_phy; + } + + err = reset_control_deassert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "deassert pclk_rst err %d\n", err); + goto err_exit_phy; + } + + if (rockchip->link_gen == 2) + rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, + PCIE_CLIENT_CONFIG); + else + rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, + PCIE_CLIENT_CONFIG); + + regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE | + PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes); + + if (rockchip->is_rc) + regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; + else + regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP; + + rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG); + + for (i = 0; i < MAX_LANE_NUM; i++) { + err = phy_power_on(rockchip->phys[i]); + if (err) { + dev_err(dev, "power on phy%d err %d\n", i, err); + goto err_power_off_phy; + } + } + + /* + * Please don't reorder the deassert sequence of the following + * four reset pins. + */ + err = reset_control_deassert(rockchip->mgmt_sticky_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + goto err_power_off_phy; + } + + err = reset_control_deassert(rockchip->core_rst); + if (err) { + dev_err(dev, "deassert core_rst err %d\n", err); + goto err_power_off_phy; + } + + err = reset_control_deassert(rockchip->mgmt_rst); + if (err) { + dev_err(dev, "deassert mgmt_rst err %d\n", err); + goto err_power_off_phy; + } + + err = reset_control_deassert(rockchip->pipe_rst); + if (err) { + dev_err(dev, "deassert pipe_rst err %d\n", err); + goto err_power_off_phy; + } + + return 0; +err_power_off_phy: + while (i--) + phy_power_off(rockchip->phys[i]); + i = MAX_LANE_NUM; +err_exit_phy: + while (i--) + phy_exit(rockchip->phys[i]); + return err; +} +EXPORT_SYMBOL_GPL(rockchip_pcie_init_port); + +int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + struct phy *phy; + char *name; + u32 i; + + phy = devm_phy_get(dev, "pcie-phy"); + if (!IS_ERR(phy)) { + rockchip->legacy_phy = true; + rockchip->phys[0] = phy; + dev_warn(dev, "legacy phy model is deprecated!\n"); + return 0; + } + + if (PTR_ERR(phy) == -EPROBE_DEFER) + return PTR_ERR(phy); + + dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n"); + + for (i = 0; i < MAX_LANE_NUM; i++) { + name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i); + if (!name) + return -ENOMEM; + + phy = devm_of_phy_get(dev, dev->of_node, name); + kfree(name); + + if (IS_ERR(phy)) { + if (PTR_ERR(phy) != -EPROBE_DEFER) + dev_err(dev, "missing phy for lane %d: %ld\n", + i, PTR_ERR(phy)); + return PTR_ERR(phy); + } + + rockchip->phys[i] = phy; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys); + +void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) +{ + int i; + + for (i = 0; i < MAX_LANE_NUM; i++) { + /* inactive lanes are already powered off */ + if (rockchip->lanes_map & BIT(i)) + phy_power_off(rockchip->phys[i]); + phy_exit(rockchip->phys[i]); + } +} +EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys); + +int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->dev; + int err; + + err = clk_prepare_enable(rockchip->aclk_pcie); + if (err) { + dev_err(dev, "unable to enable aclk_pcie clock\n"); + return err; + } + + err = clk_prepare_enable(rockchip->aclk_perf_pcie); + if (err) { + dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); + goto err_aclk_perf_pcie; + } + + err = clk_prepare_enable(rockchip->hclk_pcie); + if (err) { + dev_err(dev, "unable to enable hclk_pcie clock\n"); + goto err_hclk_pcie; + } + + err = clk_prepare_enable(rockchip->clk_pcie_pm); + if (err) { + dev_err(dev, "unable to enable clk_pcie_pm clock\n"); + goto err_clk_pcie_pm; + } + + return 0; + +err_clk_pcie_pm: + clk_disable_unprepare(rockchip->hclk_pcie); +err_hclk_pcie: + clk_disable_unprepare(rockchip->aclk_perf_pcie); +err_aclk_perf_pcie: + clk_disable_unprepare(rockchip->aclk_pcie); + return err; +} +EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks); + +void rockchip_pcie_disable_clocks(void *data) +{ + struct rockchip_pcie *rockchip = data; + + clk_disable_unprepare(rockchip->clk_pcie_pm); + clk_disable_unprepare(rockchip->hclk_pcie); + clk_disable_unprepare(rockchip->aclk_perf_pcie); + clk_disable_unprepare(rockchip->aclk_pcie); +} +EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks); + +void rockchip_pcie_cfg_configuration_accesses( + struct rockchip_pcie *rockchip, u32 type) +{ + u32 ob_desc_0; + + /* Configuration Accesses for region 0 */ + rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); + + rockchip_pcie_write(rockchip, + (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), + PCIE_CORE_OB_REGION_ADDR0); + rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, + PCIE_CORE_OB_REGION_ADDR1); + ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); + ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); + ob_desc_0 |= (type | (0x1 << 23)); + rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); + rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); +} +EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h new file mode 100644 index 000000000000..8e87a059ce73 --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip.h @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Rockchip AXI PCIe controller driver + * + * Copyright (c) 2018 Rockchip, Inc. + * + * Author: Shawn Lin + * + */ + +#ifndef _PCIE_ROCKCHIP_H +#define _PCIE_ROCKCHIP_H + +#include +#include + +/* + * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 + * bits. This allows atomic updates of the register without locking. + */ +#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) +#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) + +#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) +#define MAX_LANE_NUM 4 +#define MAX_REGION_LIMIT 32 +#define MIN_EP_APERTURE 28 + +#define PCIE_CLIENT_BASE 0x0 +#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) +#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) +#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0) +#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) +#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) +#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) +#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) +#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0) +#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) +#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) +#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c) +#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) +#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 +#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19 +#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) +#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 +#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 +#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c) +#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50) +#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5) +#define PCIE_CLIENT_INTR_SHIFT 5 +#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15) +#define PCIE_CLIENT_INT_MSG BIT(14) +#define PCIE_CLIENT_INT_HOT_RST BIT(13) +#define PCIE_CLIENT_INT_DPA BIT(12) +#define PCIE_CLIENT_INT_FATAL_ERR BIT(11) +#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10) +#define PCIE_CLIENT_INT_CORR_ERR BIT(9) +#define PCIE_CLIENT_INT_INTD BIT(8) +#define PCIE_CLIENT_INT_INTC BIT(7) +#define PCIE_CLIENT_INT_INTB BIT(6) +#define PCIE_CLIENT_INT_INTA BIT(5) +#define PCIE_CLIENT_INT_LOCAL BIT(4) +#define PCIE_CLIENT_INT_UDMA BIT(3) +#define PCIE_CLIENT_INT_PHY BIT(2) +#define PCIE_CLIENT_INT_HOT_PLUG BIT(1) +#define PCIE_CLIENT_INT_PWR_STCG BIT(0) + +#define PCIE_CLIENT_INT_LEGACY \ + (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \ + PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD) + +#define PCIE_CLIENT_INT_CLI \ + (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \ + PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \ + PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \ + PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \ + PCIE_CLIENT_INT_PHY) + +#define PCIE_CORE_CTRL_MGMT_BASE 0x900000 +#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) +#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 +#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 +#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 +#define PCIE_CORE_PL_CONF_LANE_SHIFT 1 +#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004) +#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8) +#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8 +#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff +#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020) +#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000 +#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 +#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ + (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) +#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200) +#define PCIE_CORE_LANE_MAP_MASK 0x0000000f +#define PCIE_CORE_LANE_MAP_REVERSE BIT(16) +#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) +#define PCIE_CORE_INT_PRFPE BIT(0) +#define PCIE_CORE_INT_CRFPE BIT(1) +#define PCIE_CORE_INT_RRPE BIT(2) +#define PCIE_CORE_INT_PRFO BIT(3) +#define PCIE_CORE_INT_CRFO BIT(4) +#define PCIE_CORE_INT_RT BIT(5) +#define PCIE_CORE_INT_RTR BIT(6) +#define PCIE_CORE_INT_PE BIT(7) +#define PCIE_CORE_INT_MTR BIT(8) +#define PCIE_CORE_INT_UCR BIT(9) +#define PCIE_CORE_INT_FCE BIT(10) +#define PCIE_CORE_INT_CT BIT(11) +#define PCIE_CORE_INT_UTC BIT(18) +#define PCIE_CORE_INT_MMVC BIT(19) +#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44) +#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210) +#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0) +#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300) +#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0 +#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1 +#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4 +#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 +#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6 +#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 + +#define PCIE_CORE_INT \ + (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \ + PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \ + PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \ + PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \ + PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \ + PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ + PCIE_CORE_INT_MMVC) + +#define PCIE_RC_RP_ATS_BASE 0x400000 +#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 +#define PCIE_RC_CONFIG_BASE 0xa00000 +#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) +#define PCIE_RC_CONFIG_SCC_SHIFT 16 +#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) +#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 +#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff +#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 +#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) +#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) +#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) +#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) +#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) +#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) +#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) +#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) +#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) + +#define PCIE_CORE_AXI_CONF_BASE 0xc00000 +#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) +#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f +#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) +#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) +#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) + +#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 +#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) +#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f +#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) + +/* Size of one AXI Region (not Region 0) */ +#define AXI_REGION_SIZE BIT(20) +/* Size of Region 0, equal to sum of sizes of other regions */ +#define AXI_REGION_0_SIZE (32 * (0x1 << 20)) +#define OB_REG_SIZE_SHIFT 5 +#define IB_ROOT_PORT_REG_SIZE_SHIFT 3 +#define AXI_WRAPPER_IO_WRITE 0x6 +#define AXI_WRAPPER_MEM_WRITE 0x2 +#define AXI_WRAPPER_TYPE0_CFG 0xa +#define AXI_WRAPPER_TYPE1_CFG 0xb +#define AXI_WRAPPER_NOR_MSG 0xc + +#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 +#define MIN_AXI_ADDR_BITS_PASSED 8 +#define PCIE_RC_SEND_PME_OFF 0x11960 +#define ROCKCHIP_VENDOR_ID 0x1d87 +#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20) +#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15) +#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12) +#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0) +#define PCIE_ECAM_ADDR(bus, dev, func, reg) \ + (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \ + PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg)) +#define PCIE_LINK_IS_L2(x) \ + (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) +#define PCIE_LINK_UP(x) \ + (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) +#define PCIE_LINK_IS_GEN2(x) \ + (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G) + +#define RC_REGION_0_ADDR_TRANS_H 0x00000000 +#define RC_REGION_0_ADDR_TRANS_L 0x00000000 +#define RC_REGION_0_PASS_BITS (25 - 1) +#define RC_REGION_0_TYPE_MASK GENMASK(3, 0) +#define MAX_AXI_WRAPPER_REGION_NUM 33 + +#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0 +#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1 +#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2 +#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3 +#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4 +#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5 +#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20 +#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21 +#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22 +#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23 +#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24 +#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25 +#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26 +#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27 +#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5) +#define ROCKCHIP_PCIE_MSG_ROUTING(route) \ + (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK) +#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8) +#define ROCKCHIP_PCIE_MSG_CODE(code) \ + (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK) +#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16) + +#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4 +#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19) +#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90 +#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 +#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) +#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20 +#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20) +#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16) +#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) +#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 +#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 +#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) +#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ + (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) +#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ + (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ + (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ + (((devfn) << 12) & \ + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ + (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ + (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ + (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \ + (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \ + (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ + (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ + (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020) + +#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \ + (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008) +#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \ + (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008) +#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ + (GENMASK(4, 0) << ((b) * 8)) +#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ + (((a) << ((b) * 8)) & \ + ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) +#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ + (GENMASK(7, 5) << ((b) * 8)) +#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ + (((c) << ((b) * 8 + 5)) & \ + ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) + +struct rockchip_pcie { + void __iomem *reg_base; /* DT axi-base */ + void __iomem *apb_base; /* DT apb-base */ + bool legacy_phy; + struct phy *phys[MAX_LANE_NUM]; + struct reset_control *core_rst; + struct reset_control *mgmt_rst; + struct reset_control *mgmt_sticky_rst; + struct reset_control *pipe_rst; + struct reset_control *pm_rst; + struct reset_control *aclk_rst; + struct reset_control *pclk_rst; + struct clk *aclk_pcie; + struct clk *aclk_perf_pcie; + struct clk *hclk_pcie; + struct clk *clk_pcie_pm; + struct regulator *vpcie12v; /* 12V power supply */ + struct regulator *vpcie3v3; /* 3.3V power supply */ + struct regulator *vpcie1v8; /* 1.8V power supply */ + struct regulator *vpcie0v9; /* 0.9V power supply */ + struct gpio_desc *ep_gpio; + u32 lanes; + u8 lanes_map; + u8 root_bus_nr; + int link_gen; + struct device *dev; + struct irq_domain *irq_domain; + int offset; + struct pci_bus *root_bus; + struct resource *io; + phys_addr_t io_bus_addr; + u32 io_size; + void __iomem *msg_region; + u32 mem_size; + phys_addr_t msg_bus_addr; + phys_addr_t mem_bus_addr; + bool is_rc; + struct resource *mem_res; +}; + +static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) +{ + return readl(rockchip->apb_base + reg); +} + +static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val, + u32 reg) +{ + writel(val, rockchip->apb_base + reg); +} + +int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip); +int rockchip_pcie_init_port(struct rockchip_pcie *rockchip); +int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip); +void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip); +int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip); +void rockchip_pcie_disable_clocks(void *data); +void rockchip_pcie_cfg_configuration_accesses( + struct rockchip_pcie *rockchip, u32 type); + +#endif /* _PCIE_ROCKCHIP_H */ diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c new file mode 100644 index 000000000000..21a208da3f59 --- /dev/null +++ b/drivers/pci/controller/pcie-tango.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#define MSI_MAX 256 + +#define SMP8759_MUX 0x48 +#define SMP8759_TEST_OUT 0x74 +#define SMP8759_DOORBELL 0x7c +#define SMP8759_STATUS 0x80 +#define SMP8759_ENABLE 0xa0 + +struct tango_pcie { + DECLARE_BITMAP(used_msi, MSI_MAX); + u64 msi_doorbell; + spinlock_t used_msi_lock; + void __iomem *base; + struct irq_domain *dom; +}; + +static void tango_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct tango_pcie *pcie = irq_desc_get_handler_data(desc); + unsigned long status, base, virq, idx, pos = 0; + + chained_irq_enter(chip, desc); + spin_lock(&pcie->used_msi_lock); + + while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) { + base = round_down(pos, 32); + status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8); + for_each_set_bit(idx, &status, 32) { + virq = irq_find_mapping(pcie->dom, base + idx); + generic_handle_irq(virq); + } + pos = base + 32; + } + + spin_unlock(&pcie->used_msi_lock); + chained_irq_exit(chip, desc); +} + +static void tango_ack(struct irq_data *d) +{ + struct tango_pcie *pcie = d->chip_data; + u32 offset = (d->hwirq / 32) * 4; + u32 bit = BIT(d->hwirq % 32); + + writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset); +} + +static void update_msi_enable(struct irq_data *d, bool unmask) +{ + unsigned long flags; + struct tango_pcie *pcie = d->chip_data; + u32 offset = (d->hwirq / 32) * 4; + u32 bit = BIT(d->hwirq % 32); + u32 val; + + spin_lock_irqsave(&pcie->used_msi_lock, flags); + val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset); + val = unmask ? val | bit : val & ~bit; + writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset); + spin_unlock_irqrestore(&pcie->used_msi_lock, flags); +} + +static void tango_mask(struct irq_data *d) +{ + update_msi_enable(d, false); +} + +static void tango_unmask(struct irq_data *d) +{ + update_msi_enable(d, true); +} + +static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask, + bool force) +{ + return -EINVAL; +} + +static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct tango_pcie *pcie = d->chip_data; + msg->address_lo = lower_32_bits(pcie->msi_doorbell); + msg->address_hi = upper_32_bits(pcie->msi_doorbell); + msg->data = d->hwirq; +} + +static struct irq_chip tango_chip = { + .irq_ack = tango_ack, + .irq_mask = tango_mask, + .irq_unmask = tango_unmask, + .irq_set_affinity = tango_set_affinity, + .irq_compose_msi_msg = tango_compose_msi_msg, +}; + +static void msi_ack(struct irq_data *d) +{ + irq_chip_ack_parent(d); +} + +static void msi_mask(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void msi_unmask(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip msi_chip = { + .name = "MSI", + .irq_ack = msi_ack, + .irq_mask = msi_mask, + .irq_unmask = msi_unmask, +}; + +static struct msi_domain_info msi_dom_info = { + .flags = MSI_FLAG_PCI_MSIX + | MSI_FLAG_USE_DEF_DOM_OPS + | MSI_FLAG_USE_DEF_CHIP_OPS, + .chip = &msi_chip, +}; + +static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct tango_pcie *pcie = dom->host_data; + unsigned long flags; + int pos; + + spin_lock_irqsave(&pcie->used_msi_lock, flags); + pos = find_first_zero_bit(pcie->used_msi, MSI_MAX); + if (pos >= MSI_MAX) { + spin_unlock_irqrestore(&pcie->used_msi_lock, flags); + return -ENOSPC; + } + __set_bit(pos, pcie->used_msi); + spin_unlock_irqrestore(&pcie->used_msi_lock, flags); + irq_domain_set_info(dom, virq, pos, &tango_chip, + pcie, handle_edge_irq, NULL, NULL); + + return 0; +} + +static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq, + unsigned int nr_irqs) +{ + unsigned long flags; + struct irq_data *d = irq_domain_get_irq_data(dom, virq); + struct tango_pcie *pcie = d->chip_data; + + spin_lock_irqsave(&pcie->used_msi_lock, flags); + __clear_bit(d->hwirq, pcie->used_msi); + spin_unlock_irqrestore(&pcie->used_msi_lock, flags); +} + +static const struct irq_domain_ops dom_ops = { + .alloc = tango_irq_domain_alloc, + .free = tango_irq_domain_free, +}; + +static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); + int ret; + + /* Reads in configuration space outside devfn 0 return garbage */ + if (devfn != 0) + return PCIBIOS_FUNC_NOT_SUPPORTED; + + /* + * PCI config and MMIO accesses are muxed. Linux doesn't have a + * mutual exclusion mechanism for config vs. MMIO accesses, so + * concurrent accesses may cause corruption. + */ + writel_relaxed(1, pcie->base + SMP8759_MUX); + ret = pci_generic_config_read(bus, devfn, where, size, val); + writel_relaxed(0, pcie->base + SMP8759_MUX); + + return ret; +} + +static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); + int ret; + + writel_relaxed(1, pcie->base + SMP8759_MUX); + ret = pci_generic_config_write(bus, devfn, where, size, val); + writel_relaxed(0, pcie->base + SMP8759_MUX); + + return ret; +} + +static struct pci_ecam_ops smp8759_ecam_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = smp8759_config_read, + .write = smp8759_config_write, + } +}; + +static int tango_pcie_link_up(struct tango_pcie *pcie) +{ + void __iomem *test_out = pcie->base + SMP8759_TEST_OUT; + int i; + + writel_relaxed(16, test_out); + for (i = 0; i < 10; ++i) { + u32 ltssm_state = readl_relaxed(test_out) >> 8; + if ((ltssm_state & 0x1f) == 0xf) /* L0 */ + return 1; + usleep_range(3000, 4000); + } + + return 0; +} + +static int tango_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tango_pcie *pcie; + struct resource *res; + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct irq_domain *msi_dom, *irq_dom; + struct of_pci_range_parser parser; + struct of_pci_range range; + int virq, offset; + + dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n"); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + pcie->base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + + platform_set_drvdata(pdev, pcie); + + if (!tango_pcie_link_up(pcie)) + return -ENODEV; + + if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0) + return -ENOENT; + + if (of_pci_range_parser_one(&parser, &range) == NULL) + return -ENOENT; + + range.pci_addr += range.size; + pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL; + + for (offset = 0; offset < MSI_MAX / 8; offset += 4) + writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset); + + virq = platform_get_irq(pdev, 1); + if (virq <= 0) { + dev_err(dev, "Failed to map IRQ\n"); + return -ENXIO; + } + + irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie); + if (!irq_dom) { + dev_err(dev, "Failed to create IRQ domain\n"); + return -ENOMEM; + } + + msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom); + if (!msi_dom) { + dev_err(dev, "Failed to create MSI domain\n"); + irq_domain_remove(irq_dom); + return -ENOMEM; + } + + pcie->dom = irq_dom; + spin_lock_init(&pcie->used_msi_lock); + irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie); + + return pci_host_common_probe(pdev, &smp8759_ecam_ops); +} + +static const struct of_device_id tango_pcie_ids[] = { + { .compatible = "sigma,smp8759-pcie" }, + { }, +}; + +static struct platform_driver tango_pcie_driver = { + .probe = tango_pcie_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = tango_pcie_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(tango_pcie_driver); + +/* + * The root complex advertises the wrong device class. + * Header Type 1 is for PCI-to-PCI bridges. + */ +static void tango_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class); + +/* + * The root complex exposes a "fake" BAR, which is used to filter + * bus-to-system accesses. Only accesses within the range defined by this + * BAR are forwarded to the host, others are ignored. + * + * By default, the DMA framework expects an identity mapping, and DRAM0 is + * mapped at 0x80000000. + */ +static void tango_fixup_bar(struct pci_dev *dev) +{ + dev->non_compliant_bars = true; + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c new file mode 100644 index 000000000000..6a4bbb5b3de0 --- /dev/null +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -0,0 +1,917 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for NWL PCIe Bridge + * Based on pcie-xilinx.c, pci-tegra.c + * + * (C) Copyright 2014 - 2015, Xilinx, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* Bridge core config registers */ +#define BRCFG_PCIE_RX0 0x00000000 +#define BRCFG_INTERRUPT 0x00000010 +#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 + +/* Egress - Bridge translation registers */ +#define E_BREG_CAPABILITIES 0x00000200 +#define E_BREG_CONTROL 0x00000208 +#define E_BREG_BASE_LO 0x00000210 +#define E_BREG_BASE_HI 0x00000214 +#define E_ECAM_CAPABILITIES 0x00000220 +#define E_ECAM_CONTROL 0x00000228 +#define E_ECAM_BASE_LO 0x00000230 +#define E_ECAM_BASE_HI 0x00000234 + +/* Ingress - address translations */ +#define I_MSII_CAPABILITIES 0x00000300 +#define I_MSII_CONTROL 0x00000308 +#define I_MSII_BASE_LO 0x00000310 +#define I_MSII_BASE_HI 0x00000314 + +#define I_ISUB_CONTROL 0x000003E8 +#define SET_ISUB_CONTROL BIT(0) +/* Rxed msg fifo - Interrupt status registers */ +#define MSGF_MISC_STATUS 0x00000400 +#define MSGF_MISC_MASK 0x00000404 +#define MSGF_LEG_STATUS 0x00000420 +#define MSGF_LEG_MASK 0x00000424 +#define MSGF_MSI_STATUS_LO 0x00000440 +#define MSGF_MSI_STATUS_HI 0x00000444 +#define MSGF_MSI_MASK_LO 0x00000448 +#define MSGF_MSI_MASK_HI 0x0000044C + +/* Msg filter mask bits */ +#define CFG_ENABLE_PM_MSG_FWD BIT(1) +#define CFG_ENABLE_INT_MSG_FWD BIT(2) +#define CFG_ENABLE_ERR_MSG_FWD BIT(3) +#define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \ + CFG_ENABLE_INT_MSG_FWD | \ + CFG_ENABLE_ERR_MSG_FWD) + +/* Misc interrupt status mask bits */ +#define MSGF_MISC_SR_RXMSG_AVAIL BIT(0) +#define MSGF_MISC_SR_RXMSG_OVER BIT(1) +#define MSGF_MISC_SR_SLAVE_ERR BIT(4) +#define MSGF_MISC_SR_MASTER_ERR BIT(5) +#define MSGF_MISC_SR_I_ADDR_ERR BIT(6) +#define MSGF_MISC_SR_E_ADDR_ERR BIT(7) +#define MSGF_MISC_SR_FATAL_AER BIT(16) +#define MSGF_MISC_SR_NON_FATAL_AER BIT(17) +#define MSGF_MISC_SR_CORR_AER BIT(18) +#define MSGF_MISC_SR_UR_DETECT BIT(20) +#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) +#define MSGF_MISC_SR_FATAL_DEV BIT(23) +#define MSGF_MISC_SR_LINK_DOWN BIT(24) +#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) +#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) + +#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ + MSGF_MISC_SR_RXMSG_OVER | \ + MSGF_MISC_SR_SLAVE_ERR | \ + MSGF_MISC_SR_MASTER_ERR | \ + MSGF_MISC_SR_I_ADDR_ERR | \ + MSGF_MISC_SR_E_ADDR_ERR | \ + MSGF_MISC_SR_FATAL_AER | \ + MSGF_MISC_SR_NON_FATAL_AER | \ + MSGF_MISC_SR_CORR_AER | \ + MSGF_MISC_SR_UR_DETECT | \ + MSGF_MISC_SR_NON_FATAL_DEV | \ + MSGF_MISC_SR_FATAL_DEV | \ + MSGF_MISC_SR_LINK_DOWN | \ + MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ + MSGF_MSIC_SR_LINK_BWIDTH) + +/* Legacy interrupt status mask bits */ +#define MSGF_LEG_SR_INTA BIT(0) +#define MSGF_LEG_SR_INTB BIT(1) +#define MSGF_LEG_SR_INTC BIT(2) +#define MSGF_LEG_SR_INTD BIT(3) +#define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \ + MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) + +/* MSI interrupt status mask bits */ +#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) +#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) + +#define MSII_PRESENT BIT(0) +#define MSII_ENABLE BIT(0) +#define MSII_STATUS_ENABLE BIT(15) + +/* Bridge config interrupt mask */ +#define BRCFG_INTERRUPT_MASK BIT(0) +#define BREG_PRESENT BIT(0) +#define BREG_ENABLE BIT(0) +#define BREG_ENABLE_FORCE BIT(1) + +/* E_ECAM status mask bits */ +#define E_ECAM_PRESENT BIT(0) +#define E_ECAM_CR_ENABLE BIT(0) +#define E_ECAM_SIZE_LOC GENMASK(20, 16) +#define E_ECAM_SIZE_SHIFT 16 +#define ECAM_BUS_LOC_SHIFT 20 +#define ECAM_DEV_LOC_SHIFT 12 +#define NWL_ECAM_VALUE_DEFAULT 12 + +#define CFG_DMA_REG_BAR GENMASK(2, 0) + +#define INT_PCI_MSI_NR (2 * 32) + +/* Readin the PS_LINKUP */ +#define PS_LINKUP_OFFSET 0x00000238 +#define PCIE_PHY_LINKUP_BIT BIT(0) +#define PHY_RDY_LINKUP_BIT BIT(1) + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +struct nwl_msi { /* MSI information */ + struct irq_domain *msi_domain; + unsigned long *bitmap; + struct irq_domain *dev_domain; + struct mutex lock; /* protect bitmap variable */ + int irq_msi0; + int irq_msi1; +}; + +struct nwl_pcie { + struct device *dev; + void __iomem *breg_base; + void __iomem *pcireg_base; + void __iomem *ecam_base; + phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ + phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ + phys_addr_t phys_ecam_base; /* Physical Configuration Base */ + u32 breg_size; + u32 pcie_reg_size; + u32 ecam_size; + int irq_intx; + int irq_misc; + u32 ecam_value; + u8 last_busno; + u8 root_busno; + struct nwl_msi msi; + struct irq_domain *legacy_irq_domain; + raw_spinlock_t leg_mask_lock; +}; + +static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) +{ + return readl(pcie->breg_base + off); +} + +static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off) +{ + writel(val, pcie->breg_base + off); +} + +static bool nwl_pcie_link_up(struct nwl_pcie *pcie) +{ + if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT) + return true; + return false; +} + +static bool nwl_phy_link_up(struct nwl_pcie *pcie) +{ + if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT) + return true; + return false; +} + +static int nwl_wait_for_link(struct nwl_pcie *pcie) +{ + struct device *dev = pcie->dev; + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (nwl_phy_link_up(pcie)) + return 0; + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + dev_err(dev, "PHY link never came up\n"); + return -ETIMEDOUT; +} + +static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct nwl_pcie *pcie = bus->sysdata; + + /* Check link before accessing downstream ports */ + if (bus->number != pcie->root_busno) { + if (!nwl_pcie_link_up(pcie)) + return false; + } + + /* Only one device down on each root port */ + if (bus->number == pcie->root_busno && devfn > 0) + return false; + + return true; +} + +/** + * nwl_pcie_map_bus - Get configuration base + * + * @bus: Bus structure of current bus + * @devfn: Device/function + * @where: Offset from base + * + * Return: Base address of the configuration space needed to be + * accessed. + */ +static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct nwl_pcie *pcie = bus->sysdata; + int relbus; + + if (!nwl_pcie_valid_device(bus, devfn)) + return NULL; + + relbus = (bus->number << ECAM_BUS_LOC_SHIFT) | + (devfn << ECAM_DEV_LOC_SHIFT); + + return pcie->ecam_base + relbus + where; +} + +/* PCIe operations */ +static struct pci_ops nwl_pcie_ops = { + .map_bus = nwl_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) +{ + struct nwl_pcie *pcie = data; + struct device *dev = pcie->dev; + u32 misc_stat; + + /* Checking for misc interrupts */ + misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & + MSGF_MISC_SR_MASKALL; + if (!misc_stat) + return IRQ_NONE; + + if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) + dev_err(dev, "Received Message FIFO Overflow\n"); + + if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) + dev_err(dev, "Slave error\n"); + + if (misc_stat & MSGF_MISC_SR_MASTER_ERR) + dev_err(dev, "Master error\n"); + + if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) + dev_err(dev, "In Misc Ingress address translation error\n"); + + if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) + dev_err(dev, "In Misc Egress address translation error\n"); + + if (misc_stat & MSGF_MISC_SR_FATAL_AER) + dev_err(dev, "Fatal Error in AER Capability\n"); + + if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) + dev_err(dev, "Non-Fatal Error in AER Capability\n"); + + if (misc_stat & MSGF_MISC_SR_CORR_AER) + dev_err(dev, "Correctable Error in AER Capability\n"); + + if (misc_stat & MSGF_MISC_SR_UR_DETECT) + dev_err(dev, "Unsupported request Detected\n"); + + if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) + dev_err(dev, "Non-Fatal Error Detected\n"); + + if (misc_stat & MSGF_MISC_SR_FATAL_DEV) + dev_err(dev, "Fatal Error Detected\n"); + + if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) + dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); + + if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) + dev_info(dev, "Link Bandwidth Management Status bit set\n"); + + /* Clear misc interrupt status */ + nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); + + return IRQ_HANDLED; +} + +static void nwl_pcie_leg_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct nwl_pcie *pcie; + unsigned long status; + u32 bit; + u32 virq; + + chained_irq_enter(chip, desc); + pcie = irq_desc_get_handler_data(desc); + + while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & + MSGF_LEG_SR_MASKALL) != 0) { + for_each_set_bit(bit, &status, PCI_NUM_INTX) { + virq = irq_find_mapping(pcie->legacy_irq_domain, bit); + if (virq) + generic_handle_irq(virq); + } + } + + chained_irq_exit(chip, desc); +} + +static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg) +{ + struct nwl_msi *msi; + unsigned long status; + u32 bit; + u32 virq; + + msi = &pcie->msi; + + while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) { + for_each_set_bit(bit, &status, 32) { + nwl_bridge_writel(pcie, 1 << bit, status_reg); + virq = irq_find_mapping(msi->dev_domain, bit); + if (virq) + generic_handle_irq(virq); + } + } +} + +static void nwl_pcie_msi_handler_high(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI); + chained_irq_exit(chip, desc); +} + +static void nwl_pcie_msi_handler_low(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO); + chained_irq_exit(chip, desc); +} + +static void nwl_mask_leg_irq(struct irq_data *data) +{ + struct irq_desc *desc = irq_to_desc(data->irq); + struct nwl_pcie *pcie; + unsigned long flags; + u32 mask; + u32 val; + + pcie = irq_desc_get_chip_data(desc); + mask = 1 << (data->hwirq - 1); + raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); + val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); + nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); + raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); +} + +static void nwl_unmask_leg_irq(struct irq_data *data) +{ + struct irq_desc *desc = irq_to_desc(data->irq); + struct nwl_pcie *pcie; + unsigned long flags; + u32 mask; + u32 val; + + pcie = irq_desc_get_chip_data(desc); + mask = 1 << (data->hwirq - 1); + raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); + val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); + nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); + raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); +} + +static struct irq_chip nwl_leg_irq_chip = { + .name = "nwl_pcie:legacy", + .irq_enable = nwl_unmask_leg_irq, + .irq_disable = nwl_mask_leg_irq, + .irq_mask = nwl_mask_leg_irq, + .irq_unmask = nwl_unmask_leg_irq, +}; + +static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +static const struct irq_domain_ops legacy_domain_ops = { + .map = nwl_legacy_map, + .xlate = pci_irqd_intx_xlate, +}; + +#ifdef CONFIG_PCI_MSI +static struct irq_chip nwl_msi_irq_chip = { + .name = "nwl_pcie:msi", + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, + +}; + +static struct msi_domain_info nwl_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI), + .chip = &nwl_msi_irq_chip, +}; +#endif + +static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); + phys_addr_t msi_addr = pcie->phys_pcie_reg_base; + + msg->address_lo = lower_32_bits(msi_addr); + msg->address_hi = upper_32_bits(msi_addr); + msg->data = data->hwirq; +} + +static int nwl_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip nwl_irq_chip = { + .name = "Xilinx MSI", + .irq_compose_msi_msg = nwl_compose_msi_msg, + .irq_set_affinity = nwl_msi_set_affinity, +}; + +static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct nwl_pcie *pcie = domain->host_data; + struct nwl_msi *msi = &pcie->msi; + int bit; + int i; + + mutex_lock(&msi->lock); + bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, + nr_irqs, 0); + if (bit >= INT_PCI_MSI_NR) { + mutex_unlock(&msi->lock); + return -ENOSPC; + } + + bitmap_set(msi->bitmap, bit, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + } + mutex_unlock(&msi->lock); + return 0; +} + +static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); + struct nwl_msi *msi = &pcie->msi; + + mutex_lock(&msi->lock); + bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); + mutex_unlock(&msi->lock); +} + +static const struct irq_domain_ops dev_msi_domain_ops = { + .alloc = nwl_irq_domain_alloc, + .free = nwl_irq_domain_free, +}; + +static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) +{ +#ifdef CONFIG_PCI_MSI + struct device *dev = pcie->dev; + struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct nwl_msi *msi = &pcie->msi; + + msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, + &dev_msi_domain_ops, pcie); + if (!msi->dev_domain) { + dev_err(dev, "failed to create dev IRQ domain\n"); + return -ENOMEM; + } + msi->msi_domain = pci_msi_create_irq_domain(fwnode, + &nwl_msi_domain_info, + msi->dev_domain); + if (!msi->msi_domain) { + dev_err(dev, "failed to create msi IRQ domain\n"); + irq_domain_remove(msi->dev_domain); + return -ENOMEM; + } +#endif + return 0; +} + +static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; + struct device_node *legacy_intc_node; + + legacy_intc_node = of_get_next_child(node, NULL); + if (!legacy_intc_node) { + dev_err(dev, "No legacy intc node found\n"); + return -EINVAL; + } + + pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, + PCI_NUM_INTX, + &legacy_domain_ops, + pcie); + + if (!pcie->legacy_irq_domain) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + raw_spin_lock_init(&pcie->leg_mask_lock); + nwl_pcie_init_msi_irq_domain(pcie); + return 0; +} + +static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + struct nwl_msi *msi = &pcie->msi; + unsigned long base; + int ret; + int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long); + + mutex_init(&msi->lock); + + msi->bitmap = kzalloc(size, GFP_KERNEL); + if (!msi->bitmap) + return -ENOMEM; + + /* Get msi_1 IRQ number */ + msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); + if (msi->irq_msi1 < 0) { + dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1); + ret = -EINVAL; + goto err; + } + + irq_set_chained_handler_and_data(msi->irq_msi1, + nwl_pcie_msi_handler_high, pcie); + + /* Get msi_0 IRQ number */ + msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); + if (msi->irq_msi0 < 0) { + dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0); + ret = -EINVAL; + goto err; + } + + irq_set_chained_handler_and_data(msi->irq_msi0, + nwl_pcie_msi_handler_low, pcie); + + /* Check for msii_present bit */ + ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; + if (!ret) { + dev_err(dev, "MSI not present\n"); + ret = -EIO; + goto err; + } + + /* Enable MSII */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | + MSII_ENABLE, I_MSII_CONTROL); + + /* Enable MSII status */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | + MSII_STATUS_ENABLE, I_MSII_CONTROL); + + /* setup AFI/FPCI range */ + base = pcie->phys_pcie_reg_base; + nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO); + nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI); + + /* + * For high range MSI interrupts: disable, clear any pending, + * and enable + */ + nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI); + + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) & + MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI); + + nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI); + + /* + * For low range MSI interrupts: disable, clear any pending, + * and enable + */ + nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO); + + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) & + MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO); + + nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO); + + return 0; +err: + kfree(msi->bitmap); + msi->bitmap = NULL; + return ret; +} + +static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + u32 breg_val, ecam_val, first_busno = 0; + int err; + + breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; + if (!breg_val) { + dev_err(dev, "BREG is not present\n"); + return breg_val; + } + + /* Write bridge_off to breg base */ + nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base), + E_BREG_BASE_LO); + nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base), + E_BREG_BASE_HI); + + /* Enable BREG */ + nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE, + E_BREG_CONTROL); + + /* Disable DMA channel registers */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) | + CFG_DMA_REG_BAR, BRCFG_PCIE_RX0); + + /* Enable Ingress subtractive decode translation */ + nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL); + + /* Enable msg filtering details */ + nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, + BRCFG_PCIE_RX_MSG_FILTER); + + err = nwl_wait_for_link(pcie); + if (err) + return err; + + ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; + if (!ecam_val) { + dev_err(dev, "ECAM is not present\n"); + return ecam_val; + } + + /* Enable ECAM */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | + E_ECAM_CR_ENABLE, E_ECAM_CONTROL); + + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | + (pcie->ecam_value << E_ECAM_SIZE_SHIFT), + E_ECAM_CONTROL); + + nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), + E_ECAM_BASE_LO); + nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base), + E_ECAM_BASE_HI); + + /* Get bus range */ + ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); + pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT; + /* Write primary, secondary and subordinate bus numbers */ + ecam_val = first_busno; + ecam_val |= (first_busno + 1) << 8; + ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT); + writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS)); + + if (nwl_pcie_link_up(pcie)) + dev_info(dev, "Link is UP\n"); + else + dev_info(dev, "Link is DOWN\n"); + + /* Get misc IRQ number */ + pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); + if (pcie->irq_misc < 0) { + dev_err(dev, "failed to get misc IRQ %d\n", + pcie->irq_misc); + return -EINVAL; + } + + err = devm_request_irq(dev, pcie->irq_misc, + nwl_pcie_misc_handler, IRQF_SHARED, + "nwl_pcie:misc", pcie); + if (err) { + dev_err(dev, "fail to register misc IRQ#%d\n", + pcie->irq_misc); + return err; + } + + /* Disable all misc interrupts */ + nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); + + /* Clear pending misc interrupts */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & + MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS); + + /* Enable all misc interrupts */ + nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); + + + /* Disable all legacy interrupts */ + nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); + + /* Clear pending legacy interrupts */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & + MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); + + /* Enable all legacy interrupts */ + nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); + + /* Enable the bridge config interrupt */ + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) | + BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT); + + return 0; +} + +static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, + struct platform_device *pdev) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; + struct resource *res; + const char *type; + + /* Check for device type */ + type = of_get_property(node, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); + pcie->breg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->breg_base)) + return PTR_ERR(pcie->breg_base); + pcie->phys_breg_base = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg"); + pcie->pcireg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->pcireg_base)) + return PTR_ERR(pcie->pcireg_base); + pcie->phys_pcie_reg_base = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pcie->ecam_base)) + return PTR_ERR(pcie->ecam_base); + pcie->phys_ecam_base = res->start; + + /* Get intx IRQ number */ + pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); + if (pcie->irq_intx < 0) { + dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); + return pcie->irq_intx; + } + + irq_set_chained_handler_and_data(pcie->irq_intx, + nwl_pcie_leg_handler, pcie); + + return 0; +} + +static const struct of_device_id nwl_pcie_of_match[] = { + { .compatible = "xlnx,nwl-pcie-2.11", }, + {} +}; + +static int nwl_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nwl_pcie *pcie; + struct pci_bus *bus; + struct pci_bus *child; + struct pci_host_bridge *bridge; + int err; + resource_size_t iobase = 0; + LIST_HEAD(res); + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENODEV; + + pcie = pci_host_bridge_priv(bridge); + + pcie->dev = dev; + pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; + + err = nwl_pcie_parse_dt(pcie, pdev); + if (err) { + dev_err(dev, "Parsing DT failed\n"); + return err; + } + + err = nwl_pcie_bridge_init(pcie); + if (err) { + dev_err(dev, "HW Initialization failed\n"); + return err; + } + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, + &iobase); + if (err) { + dev_err(dev, "Getting bridge resources failed\n"); + return err; + } + + err = devm_request_pci_bus_resources(dev, &res); + if (err) + goto error; + + err = nwl_pcie_init_irq_domain(pcie); + if (err) { + dev_err(dev, "Failed creating IRQ Domain\n"); + goto error; + } + + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_busno; + bridge->ops = &nwl_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + err = nwl_pcie_enable_msi(pcie); + if (err < 0) { + dev_err(dev, "failed to enable MSI support: %d\n", err); + goto error; + } + } + + err = pci_scan_root_bus_bridge(bridge); + if (err) + goto error; + + bus = bridge->bus; + + pci_assign_unassigned_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + pci_bus_add_devices(bus); + return 0; + +error: + pci_free_resource_list(&res); + return err; +} + +static struct platform_driver nwl_pcie_driver = { + .driver = { + .name = "nwl-pcie", + .suppress_bind_attrs = true, + .of_match_table = nwl_pcie_of_match, + }, + .probe = nwl_pcie_probe, +}; +builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c new file mode 100644 index 000000000000..b110a3a814e3 --- /dev/null +++ b/drivers/pci/controller/pcie-xilinx.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Xilinx AXI PCIe Bridge + * + * Copyright (c) 2012 - 2014 Xilinx, Inc. + * + * Based on the Tegra PCIe driver + * + * Bits taken from Synopsys DesignWare Host controller driver and + * ARM PCI Host generic driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../pci.h" + +/* Register definitions */ +#define XILINX_PCIE_REG_BIR 0x00000130 +#define XILINX_PCIE_REG_IDR 0x00000138 +#define XILINX_PCIE_REG_IMR 0x0000013c +#define XILINX_PCIE_REG_PSCR 0x00000144 +#define XILINX_PCIE_REG_RPSC 0x00000148 +#define XILINX_PCIE_REG_MSIBASE1 0x0000014c +#define XILINX_PCIE_REG_MSIBASE2 0x00000150 +#define XILINX_PCIE_REG_RPEFR 0x00000154 +#define XILINX_PCIE_REG_RPIFR1 0x00000158 +#define XILINX_PCIE_REG_RPIFR2 0x0000015c + +/* Interrupt registers definitions */ +#define XILINX_PCIE_INTR_LINK_DOWN BIT(0) +#define XILINX_PCIE_INTR_ECRC_ERR BIT(1) +#define XILINX_PCIE_INTR_STR_ERR BIT(2) +#define XILINX_PCIE_INTR_HOT_RESET BIT(3) +#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8) +#define XILINX_PCIE_INTR_CORRECTABLE BIT(9) +#define XILINX_PCIE_INTR_NONFATAL BIT(10) +#define XILINX_PCIE_INTR_FATAL BIT(11) +#define XILINX_PCIE_INTR_INTX BIT(16) +#define XILINX_PCIE_INTR_MSI BIT(17) +#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20) +#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21) +#define XILINX_PCIE_INTR_SLV_COMPL BIT(22) +#define XILINX_PCIE_INTR_SLV_ERRP BIT(23) +#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24) +#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25) +#define XILINX_PCIE_INTR_MST_DECERR BIT(26) +#define XILINX_PCIE_INTR_MST_SLVERR BIT(27) +#define XILINX_PCIE_INTR_MST_ERRP BIT(28) +#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED +#define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D +#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF + +/* Root Port Error FIFO Read Register definitions */ +#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18) +#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0) +#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF + +/* Root Port Interrupt FIFO Read Register 1 definitions */ +#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31) +#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30) +#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27) +#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF +#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27 + +/* Bridge Info Register definitions */ +#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16) +#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16 + +/* Root Port Interrupt FIFO Read Register 2 definitions */ +#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0) + +/* Root Port Status/control Register definitions */ +#define XILINX_PCIE_REG_RPSC_BEN BIT(0) + +/* Phy Status/Control Register definitions */ +#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) + +/* ECAM definitions */ +#define ECAM_BUS_NUM_SHIFT 20 +#define ECAM_DEV_NUM_SHIFT 12 + +/* Number of MSI IRQs */ +#define XILINX_NUM_MSI_IRQS 128 + +/** + * struct xilinx_pcie_port - PCIe port information + * @reg_base: IO Mapped Register Base + * @irq: Interrupt number + * @msi_pages: MSI pages + * @root_busno: Root Bus number + * @dev: Device pointer + * @msi_domain: MSI IRQ domain pointer + * @leg_domain: Legacy IRQ domain pointer + * @resources: Bus Resources + */ +struct xilinx_pcie_port { + void __iomem *reg_base; + u32 irq; + unsigned long msi_pages; + u8 root_busno; + struct device *dev; + struct irq_domain *msi_domain; + struct irq_domain *leg_domain; + struct list_head resources; +}; + +static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS); + +static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) +{ + return readl(port->reg_base + reg); +} + +static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg) +{ + writel(val, port->reg_base + reg); +} + +static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port) +{ + return (pcie_read(port, XILINX_PCIE_REG_PSCR) & + XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; +} + +/** + * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts + * @port: PCIe port information + */ +static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) +{ + struct device *dev = port->dev; + unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); + + if (val & XILINX_PCIE_RPEFR_ERR_VALID) { + dev_dbg(dev, "Requester ID %lu\n", + val & XILINX_PCIE_RPEFR_REQ_ID); + pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, + XILINX_PCIE_REG_RPEFR); + } +} + +/** + * xilinx_pcie_valid_device - Check if a valid device is present on bus + * @bus: PCI Bus structure + * @devfn: device/function + * + * Return: 'true' on success and 'false' if invalid device is found + */ +static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct xilinx_pcie_port *port = bus->sysdata; + + /* Check if link is up when trying to access downstream ports */ + if (bus->number != port->root_busno) + if (!xilinx_pcie_link_up(port)) + return false; + + /* Only one device down on each root port */ + if (bus->number == port->root_busno && devfn > 0) + return false; + + return true; +} + +/** + * xilinx_pcie_map_bus - Get configuration base + * @bus: PCI Bus structure + * @devfn: Device/function + * @where: Offset from base + * + * Return: Base address of the configuration space needed to be + * accessed. + */ +static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct xilinx_pcie_port *port = bus->sysdata; + int relbus; + + if (!xilinx_pcie_valid_device(bus, devfn)) + return NULL; + + relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | + (devfn << ECAM_DEV_NUM_SHIFT); + + return port->reg_base + relbus + where; +} + +/* PCIe operations */ +static struct pci_ops xilinx_pcie_ops = { + .map_bus = xilinx_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +/* MSI functions */ + +/** + * xilinx_pcie_destroy_msi - Free MSI number + * @irq: IRQ to be freed + */ +static void xilinx_pcie_destroy_msi(unsigned int irq) +{ + struct msi_desc *msi; + struct xilinx_pcie_port *port; + struct irq_data *d = irq_get_irq_data(irq); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + if (!test_bit(hwirq, msi_irq_in_use)) { + msi = irq_get_msi_desc(irq); + port = msi_desc_to_pci_sysdata(msi); + dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); + } else { + clear_bit(hwirq, msi_irq_in_use); + } +} + +/** + * xilinx_pcie_assign_msi - Allocate MSI number + * + * Return: A valid IRQ on success and error value on failure. + */ +static int xilinx_pcie_assign_msi(void) +{ + int pos; + + pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS); + if (pos < XILINX_NUM_MSI_IRQS) + set_bit(pos, msi_irq_in_use); + else + return -ENOSPC; + + return pos; +} + +/** + * xilinx_msi_teardown_irq - Destroy the MSI + * @chip: MSI Chip descriptor + * @irq: MSI IRQ to destroy + */ +static void xilinx_msi_teardown_irq(struct msi_controller *chip, + unsigned int irq) +{ + xilinx_pcie_destroy_msi(irq); + irq_dispose_mapping(irq); +} + +/** + * xilinx_pcie_msi_setup_irq - Setup MSI request + * @chip: MSI chip pointer + * @pdev: PCIe device pointer + * @desc: MSI descriptor pointer + * + * Return: '0' on success and error value on failure + */ +static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, + struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct xilinx_pcie_port *port = pdev->bus->sysdata; + unsigned int irq; + int hwirq; + struct msi_msg msg; + phys_addr_t msg_addr; + + hwirq = xilinx_pcie_assign_msi(); + if (hwirq < 0) + return hwirq; + + irq = irq_create_mapping(port->msi_domain, hwirq); + if (!irq) + return -EINVAL; + + irq_set_msi_desc(irq, desc); + + msg_addr = virt_to_phys((void *)port->msi_pages); + + msg.address_hi = 0; + msg.address_lo = msg_addr; + msg.data = irq; + + pci_write_msi_msg(irq, &msg); + + return 0; +} + +/* MSI Chip Descriptor */ +static struct msi_controller xilinx_pcie_msi_chip = { + .setup_irq = xilinx_pcie_msi_setup_irq, + .teardown_irq = xilinx_msi_teardown_irq, +}; + +/* HW Interrupt Chip Descriptor */ +static struct irq_chip xilinx_msi_irq_chip = { + .name = "Xilinx PCIe MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +/** + * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: HW interrupt number + * + * Return: Always returns 0. + */ +static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +/* IRQ Domain operations */ +static const struct irq_domain_ops msi_domain_ops = { + .map = xilinx_pcie_msi_map, +}; + +/** + * xilinx_pcie_enable_msi - Enable MSI support + * @port: PCIe port information + */ +static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) +{ + phys_addr_t msg_addr; + + port->msi_pages = __get_free_pages(GFP_KERNEL, 0); + msg_addr = virt_to_phys((void *)port->msi_pages); + pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); + pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); +} + +/* INTx Functions */ + +/** + * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: HW interrupt number + * + * Return: Always returns 0. + */ +static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +/* INTx IRQ Domain operations */ +static const struct irq_domain_ops intx_domain_ops = { + .map = xilinx_pcie_intx_map, + .xlate = pci_irqd_intx_xlate, +}; + +/* PCIe HW Functions */ + +/** + * xilinx_pcie_intr_handler - Interrupt Service Handler + * @irq: IRQ number + * @data: PCIe port information + * + * Return: IRQ_HANDLED on success and IRQ_NONE on failure + */ +static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) +{ + struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; + struct device *dev = port->dev; + u32 val, mask, status; + + /* Read interrupt decode and mask registers */ + val = pcie_read(port, XILINX_PCIE_REG_IDR); + mask = pcie_read(port, XILINX_PCIE_REG_IMR); + + status = val & mask; + if (!status) + return IRQ_NONE; + + if (status & XILINX_PCIE_INTR_LINK_DOWN) + dev_warn(dev, "Link Down\n"); + + if (status & XILINX_PCIE_INTR_ECRC_ERR) + dev_warn(dev, "ECRC failed\n"); + + if (status & XILINX_PCIE_INTR_STR_ERR) + dev_warn(dev, "Streaming error\n"); + + if (status & XILINX_PCIE_INTR_HOT_RESET) + dev_info(dev, "Hot reset\n"); + + if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) + dev_warn(dev, "ECAM access timeout\n"); + + if (status & XILINX_PCIE_INTR_CORRECTABLE) { + dev_warn(dev, "Correctable error message\n"); + xilinx_pcie_clear_err_interrupts(port); + } + + if (status & XILINX_PCIE_INTR_NONFATAL) { + dev_warn(dev, "Non fatal error message\n"); + xilinx_pcie_clear_err_interrupts(port); + } + + if (status & XILINX_PCIE_INTR_FATAL) { + dev_warn(dev, "Fatal error message\n"); + xilinx_pcie_clear_err_interrupts(port); + } + + if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { + val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); + + /* Check whether interrupt valid */ + if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { + dev_warn(dev, "RP Intr FIFO1 read error\n"); + goto error; + } + + /* Decode the IRQ number */ + if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { + val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & + XILINX_PCIE_RPIFR2_MSG_DATA; + } else { + val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> + XILINX_PCIE_RPIFR1_INTR_SHIFT; + val = irq_find_mapping(port->leg_domain, val); + } + + /* Clear interrupt FIFO register 1 */ + pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, + XILINX_PCIE_REG_RPIFR1); + + /* Handle the interrupt */ + if (IS_ENABLED(CONFIG_PCI_MSI) || + !(val & XILINX_PCIE_RPIFR1_MSI_INTR)) + generic_handle_irq(val); + } + + if (status & XILINX_PCIE_INTR_SLV_UNSUPP) + dev_warn(dev, "Slave unsupported request\n"); + + if (status & XILINX_PCIE_INTR_SLV_UNEXP) + dev_warn(dev, "Slave unexpected completion\n"); + + if (status & XILINX_PCIE_INTR_SLV_COMPL) + dev_warn(dev, "Slave completion timeout\n"); + + if (status & XILINX_PCIE_INTR_SLV_ERRP) + dev_warn(dev, "Slave Error Poison\n"); + + if (status & XILINX_PCIE_INTR_SLV_CMPABT) + dev_warn(dev, "Slave Completer Abort\n"); + + if (status & XILINX_PCIE_INTR_SLV_ILLBUR) + dev_warn(dev, "Slave Illegal Burst\n"); + + if (status & XILINX_PCIE_INTR_MST_DECERR) + dev_warn(dev, "Master decode error\n"); + + if (status & XILINX_PCIE_INTR_MST_SLVERR) + dev_warn(dev, "Master slave error\n"); + + if (status & XILINX_PCIE_INTR_MST_ERRP) + dev_warn(dev, "Master error poison\n"); + +error: + /* Clear the Interrupt Decode register */ + pcie_write(port, status, XILINX_PCIE_REG_IDR); + + return IRQ_HANDLED; +} + +/** + * xilinx_pcie_init_irq_domain - Initialize IRQ domain + * @port: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) +{ + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, + port); + if (!port->leg_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENODEV; + } + + /* Setup MSI */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + port->msi_domain = irq_domain_add_linear(node, + XILINX_NUM_MSI_IRQS, + &msi_domain_ops, + &xilinx_pcie_msi_chip); + if (!port->msi_domain) { + dev_err(dev, "Failed to get a MSI IRQ domain\n"); + return -ENODEV; + } + + xilinx_pcie_enable_msi(port); + } + + return 0; +} + +/** + * xilinx_pcie_init_port - Initialize hardware + * @port: PCIe port information + */ +static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) +{ + struct device *dev = port->dev; + + if (xilinx_pcie_link_up(port)) + dev_info(dev, "PCIe Link is UP\n"); + else + dev_info(dev, "PCIe Link is DOWN\n"); + + /* Disable all interrupts */ + pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, + XILINX_PCIE_REG_IMR); + + /* Clear pending interrupts */ + pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) & + XILINX_PCIE_IMR_ALL_MASK, + XILINX_PCIE_REG_IDR); + + /* Enable all interrupts we handle */ + pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); + + /* Enable the Bridge enable bit */ + pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | + XILINX_PCIE_REG_RPSC_BEN, + XILINX_PCIE_REG_RPSC); +} + +/** + * xilinx_pcie_parse_dt - Parse Device tree + * @port: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) +{ + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct resource regs; + const char *type; + int err; + + type = of_get_property(node, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + err = of_address_to_resource(node, 0, ®s); + if (err) { + dev_err(dev, "missing \"reg\" property\n"); + return err; + } + + port->reg_base = devm_pci_remap_cfg_resource(dev, ®s); + if (IS_ERR(port->reg_base)) + return PTR_ERR(port->reg_base); + + port->irq = irq_of_parse_and_map(node, 0); + err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, + IRQF_SHARED | IRQF_NO_THREAD, + "xilinx-pcie", port); + if (err) { + dev_err(dev, "unable to request irq %d\n", port->irq); + return err; + } + + return 0; +} + +/** + * xilinx_pcie_probe - Probe function + * @pdev: Platform device pointer + * + * Return: '0' on success and error value on failure + */ +static int xilinx_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xilinx_pcie_port *port; + struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; + int err; + resource_size_t iobase = 0; + LIST_HEAD(res); + + if (!dev->of_node) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!bridge) + return -ENODEV; + + port = pci_host_bridge_priv(bridge); + + port->dev = dev; + + err = xilinx_pcie_parse_dt(port); + if (err) { + dev_err(dev, "Parsing DT failed\n"); + return err; + } + + xilinx_pcie_init_port(port); + + err = xilinx_pcie_init_irq_domain(port); + if (err) { + dev_err(dev, "Failed creating IRQ Domain\n"); + return err; + } + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, + &iobase); + if (err) { + dev_err(dev, "Getting bridge resources failed\n"); + return err; + } + + err = devm_request_pci_bus_resources(dev, &res); + if (err) + goto error; + + + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = port; + bridge->busnr = 0; + bridge->ops = &xilinx_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + +#ifdef CONFIG_PCI_MSI + xilinx_pcie_msi_chip.dev = dev; + bridge->msi = &xilinx_pcie_msi_chip; +#endif + err = pci_scan_root_bus_bridge(bridge); + if (err < 0) + goto error; + + bus = bridge->bus; + + pci_assign_unassigned_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + pci_bus_add_devices(bus); + return 0; + +error: + pci_free_resource_list(&res); + return err; +} + +static const struct of_device_id xilinx_pcie_of_match[] = { + { .compatible = "xlnx,axi-pcie-host-1.00.a", }, + {} +}; + +static struct platform_driver xilinx_pcie_driver = { + .driver = { + .name = "xilinx-pcie", + .of_match_table = xilinx_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = xilinx_pcie_probe, +}; +builtin_platform_driver(xilinx_pcie_driver); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c new file mode 100644 index 000000000000..942b64fc7f1f --- /dev/null +++ b/drivers/pci/controller/vmd.c @@ -0,0 +1,870 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Volume Management Device driver + * Copyright (c) 2015, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define VMD_CFGBAR 0 +#define VMD_MEMBAR1 2 +#define VMD_MEMBAR2 4 + +#define PCI_REG_VMCAP 0x40 +#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) +#define PCI_REG_VMCONFIG 0x44 +#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) +#define PCI_REG_VMLOCK 0x70 +#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) + +enum vmd_features { + /* + * Device may contain registers which hint the physical location of the + * membars, in order to allow proper address translation during + * resource assignment to enable guest virtualization + */ + VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), + + /* + * Device may provide root port configuration information which limits + * bus numbering + */ + VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), +}; + +/* + * Lock for manipulating VMD IRQ lists. + */ +static DEFINE_RAW_SPINLOCK(list_lock); + +/** + * struct vmd_irq - private data to map driver IRQ to the VMD shared vector + * @node: list item for parent traversal. + * @irq: back pointer to parent. + * @enabled: true if driver enabled IRQ + * @virq: the virtual IRQ value provided to the requesting driver. + * + * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to + * a VMD IRQ using this structure. + */ +struct vmd_irq { + struct list_head node; + struct vmd_irq_list *irq; + bool enabled; + unsigned int virq; +}; + +/** + * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector + * @irq_list: the list of irq's the VMD one demuxes to. + * @srcu: SRCU struct for local synchronization. + * @count: number of child IRQs assigned to this vector; used to track + * sharing. + */ +struct vmd_irq_list { + struct list_head irq_list; + struct srcu_struct srcu; + unsigned int count; +}; + +struct vmd_dev { + struct pci_dev *dev; + + spinlock_t cfg_lock; + char __iomem *cfgbar; + + int msix_count; + struct vmd_irq_list *irqs; + + struct pci_sysdata sysdata; + struct resource resources[3]; + struct irq_domain *irq_domain; + struct pci_bus *bus; + +#ifdef CONFIG_X86_DEV_DMA_OPS + struct dma_map_ops dma_ops; + struct dma_domain dma_domain; +#endif +}; + +static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) +{ + return container_of(bus->sysdata, struct vmd_dev, sysdata); +} + +static inline unsigned int index_from_irqs(struct vmd_dev *vmd, + struct vmd_irq_list *irqs) +{ + return irqs - vmd->irqs; +} + +/* + * Drivers managing a device in a VMD domain allocate their own IRQs as before, + * but the MSI entry for the hardware it's driving will be programmed with a + * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its + * domain into one of its own, and the VMD driver de-muxes these for the + * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations + * and irq_chip to set this up. + */ +static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct vmd_irq *vmdirq = data->chip_data; + struct vmd_irq_list *irq = vmdirq->irq; + struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO | + MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); + msg->data = 0; +} + +/* + * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. + */ +static void vmd_irq_enable(struct irq_data *data) +{ + struct vmd_irq *vmdirq = data->chip_data; + unsigned long flags; + + raw_spin_lock_irqsave(&list_lock, flags); + WARN_ON(vmdirq->enabled); + list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); + vmdirq->enabled = true; + raw_spin_unlock_irqrestore(&list_lock, flags); + + data->chip->irq_unmask(data); +} + +static void vmd_irq_disable(struct irq_data *data) +{ + struct vmd_irq *vmdirq = data->chip_data; + unsigned long flags; + + data->chip->irq_mask(data); + + raw_spin_lock_irqsave(&list_lock, flags); + if (vmdirq->enabled) { + list_del_rcu(&vmdirq->node); + vmdirq->enabled = false; + } + raw_spin_unlock_irqrestore(&list_lock, flags); +} + +/* + * XXX: Stubbed until we develop acceptable way to not create conflicts with + * other devices sharing the same vector. + */ +static int vmd_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + return -EINVAL; +} + +static struct irq_chip vmd_msi_controller = { + .name = "VMD-MSI", + .irq_enable = vmd_irq_enable, + .irq_disable = vmd_irq_disable, + .irq_compose_msi_msg = vmd_compose_msi_msg, + .irq_set_affinity = vmd_irq_set_affinity, +}; + +static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return 0; +} + +/* + * XXX: We can be even smarter selecting the best IRQ once we solve the + * affinity problem. + */ +static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) +{ + int i, best = 1; + unsigned long flags; + + if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1) + return &vmd->irqs[0]; + + raw_spin_lock_irqsave(&list_lock, flags); + for (i = 1; i < vmd->msix_count; i++) + if (vmd->irqs[i].count < vmd->irqs[best].count) + best = i; + vmd->irqs[best].count++; + raw_spin_unlock_irqrestore(&list_lock, flags); + + return &vmd->irqs[best]; +} + +static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg) +{ + struct msi_desc *desc = arg->desc; + struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); + struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + unsigned int index, vector; + + if (!vmdirq) + return -ENOMEM; + + INIT_LIST_HEAD(&vmdirq->node); + vmdirq->irq = vmd_next_irq(vmd, desc); + vmdirq->virq = virq; + index = index_from_irqs(vmd, vmdirq->irq); + vector = pci_irq_vector(vmd->dev, index); + + irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, + handle_untracked_irq, vmd, NULL); + return 0; +} + +static void vmd_msi_free(struct irq_domain *domain, + struct msi_domain_info *info, unsigned int virq) +{ + struct vmd_irq *vmdirq = irq_get_chip_data(virq); + unsigned long flags; + + synchronize_srcu(&vmdirq->irq->srcu); + + /* XXX: Potential optimization to rebalance */ + raw_spin_lock_irqsave(&list_lock, flags); + vmdirq->irq->count--; + raw_spin_unlock_irqrestore(&list_lock, flags); + + kfree(vmdirq); +} + +static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = vmd_from_bus(pdev->bus); + + if (nvec > vmd->msix_count) + return vmd->msix_count; + + memset(arg, 0, sizeof(*arg)); + return 0; +} + +static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; +} + +static struct msi_domain_ops vmd_msi_domain_ops = { + .get_hwirq = vmd_get_hwirq, + .msi_init = vmd_msi_init, + .msi_free = vmd_msi_free, + .msi_prepare = vmd_msi_prepare, + .set_desc = vmd_set_desc, +}; + +static struct msi_domain_info vmd_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .ops = &vmd_msi_domain_ops, + .chip = &vmd_msi_controller, +}; + +#ifdef CONFIG_X86_DEV_DMA_OPS +/* + * VMD replaces the requester ID with its own. DMA mappings for devices in a + * VMD domain need to be mapped for the VMD, not the device requiring + * the mapping. + */ +static struct device *to_vmd_dev(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = vmd_from_bus(pdev->bus); + + return &vmd->dev->dev; +} + +static const struct dma_map_ops *vmd_dma_ops(struct device *dev) +{ + return get_dma_ops(to_vmd_dev(dev)); +} + +static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, + gfp_t flag, unsigned long attrs) +{ + return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, + attrs); +} + +static void vmd_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t addr, unsigned long attrs) +{ + return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, + attrs); +} + +static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t addr, size_t size, + unsigned long attrs) +{ + return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, + size, attrs); +} + +static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t addr, size_t size, + unsigned long attrs) +{ + return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, + addr, size, attrs); +} + +static dma_addr_t vmd_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, + dir, attrs); +} + +static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); +} + +static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); +} + +static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); +} + +static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); +} + +static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, + dir); +} + +static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); +} + +static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); +} + +static int vmd_mapping_error(struct device *dev, dma_addr_t addr) +{ + return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); +} + +static int vmd_dma_supported(struct device *dev, u64 mask) +{ + return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); +} + +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK +static u64 vmd_get_required_mask(struct device *dev) +{ + return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); +} +#endif + +static void vmd_teardown_dma_ops(struct vmd_dev *vmd) +{ + struct dma_domain *domain = &vmd->dma_domain; + + if (get_dma_ops(&vmd->dev->dev)) + del_dma_domain(domain); +} + +#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ + do { \ + if (source->fn) \ + dest->fn = vmd_##fn; \ + } while (0) + +static void vmd_setup_dma_ops(struct vmd_dev *vmd) +{ + const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); + struct dma_map_ops *dest = &vmd->dma_ops; + struct dma_domain *domain = &vmd->dma_domain; + + domain->domain_nr = vmd->sysdata.domain; + domain->dma_ops = dest; + + if (!source) + return; + ASSIGN_VMD_DMA_OPS(source, dest, alloc); + ASSIGN_VMD_DMA_OPS(source, dest, free); + ASSIGN_VMD_DMA_OPS(source, dest, mmap); + ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); + ASSIGN_VMD_DMA_OPS(source, dest, map_page); + ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); + ASSIGN_VMD_DMA_OPS(source, dest, map_sg); + ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); + ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); + ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); + ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); + ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); + ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); + ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK + ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); +#endif + add_dma_domain(domain); +} +#undef ASSIGN_VMD_DMA_OPS +#else +static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} +static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} +#endif + +static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, + unsigned int devfn, int reg, int len) +{ + char __iomem *addr = vmd->cfgbar + + (bus->number << 20) + (devfn << 12) + reg; + + if ((addr - vmd->cfgbar) + len >= + resource_size(&vmd->dev->resource[VMD_CFGBAR])) + return NULL; + + return addr; +} + +/* + * CPU may deadlock if config space is not serialized on some versions of this + * hardware, so all config space access is done under a spinlock. + */ +static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, + int len, u32 *value) +{ + struct vmd_dev *vmd = vmd_from_bus(bus); + char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); + unsigned long flags; + int ret = 0; + + if (!addr) + return -EFAULT; + + spin_lock_irqsave(&vmd->cfg_lock, flags); + switch (len) { + case 1: + *value = readb(addr); + break; + case 2: + *value = readw(addr); + break; + case 4: + *value = readl(addr); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&vmd->cfg_lock, flags); + return ret; +} + +/* + * VMD h/w converts non-posted config writes to posted memory writes. The + * read-back in this function forces the completion so it returns only after + * the config space was written, as expected. + */ +static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, + int len, u32 value) +{ + struct vmd_dev *vmd = vmd_from_bus(bus); + char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); + unsigned long flags; + int ret = 0; + + if (!addr) + return -EFAULT; + + spin_lock_irqsave(&vmd->cfg_lock, flags); + switch (len) { + case 1: + writeb(value, addr); + readb(addr); + break; + case 2: + writew(value, addr); + readw(addr); + break; + case 4: + writel(value, addr); + readl(addr); + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&vmd->cfg_lock, flags); + return ret; +} + +static struct pci_ops vmd_ops = { + .read = vmd_pci_read, + .write = vmd_pci_write, +}; + +static void vmd_attach_resources(struct vmd_dev *vmd) +{ + vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; + vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; +} + +static void vmd_detach_resources(struct vmd_dev *vmd) +{ + vmd->dev->resource[VMD_MEMBAR1].child = NULL; + vmd->dev->resource[VMD_MEMBAR2].child = NULL; +} + +/* + * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. + * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower + * 16 bits are the PCI Segment Group (domain) number. Other bits are + * currently reserved. + */ +static int vmd_find_free_domain(void) +{ + int domain = 0xffff; + struct pci_bus *bus = NULL; + + while ((bus = pci_find_next_bus(bus)) != NULL) + domain = max_t(int, domain, pci_domain_nr(bus)); + return domain + 1; +} + +static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) +{ + struct pci_sysdata *sd = &vmd->sysdata; + struct fwnode_handle *fn; + struct resource *res; + u32 upper_bits; + unsigned long flags; + LIST_HEAD(resources); + resource_size_t offset[2] = {0}; + resource_size_t membar2_offset = 0x2000, busn_start = 0; + + /* + * Shadow registers may exist in certain VMD device ids which allow + * guests to correctly assign host physical addresses to the root ports + * and child devices. These registers will either return the host value + * or 0, depending on an enable bit in the VMD device. + */ + if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { + u32 vmlock; + int ret; + + membar2_offset = 0x2018; + ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); + if (ret || vmlock == ~0) + return -ENODEV; + + if (MB2_SHADOW_EN(vmlock)) { + void __iomem *membar2; + + membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); + if (!membar2) + return -ENOMEM; + offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - + readq(membar2 + 0x2008); + offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - + readq(membar2 + 0x2010); + pci_iounmap(vmd->dev, membar2); + } + } + + /* + * Certain VMD devices may have a root port configuration option which + * limits the bus range to between 0-127 or 128-255 + */ + if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { + u32 vmcap, vmconfig; + + pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); + pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); + if (BUS_RESTRICT_CAP(vmcap) && + (BUS_RESTRICT_CFG(vmconfig) == 0x1)) + busn_start = 128; + } + + res = &vmd->dev->resource[VMD_CFGBAR]; + vmd->resources[0] = (struct resource) { + .name = "VMD CFGBAR", + .start = busn_start, + .end = busn_start + (resource_size(res) >> 20) - 1, + .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, + }; + + /* + * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can + * put 32-bit resources in the window. + * + * There's no hardware reason why a 64-bit window *couldn't* + * contain a 32-bit resource, but pbus_size_mem() computes the + * bridge window size assuming a 64-bit window will contain no + * 32-bit resources. __pci_assign_resource() enforces that + * artificial restriction to make sure everything will fit. + * + * The only way we could use a 64-bit non-prefechable MEMBAR is + * if its address is <4GB so that we can convert it to a 32-bit + * resource. To be visible to the host OS, all VMD endpoints must + * be initially configured by platform BIOS, which includes setting + * up these resources. We can assume the device is configured + * according to the platform needs. + */ + res = &vmd->dev->resource[VMD_MEMBAR1]; + upper_bits = upper_32_bits(res->end); + flags = res->flags & ~IORESOURCE_SIZEALIGN; + if (!upper_bits) + flags &= ~IORESOURCE_MEM_64; + vmd->resources[1] = (struct resource) { + .name = "VMD MEMBAR1", + .start = res->start, + .end = res->end, + .flags = flags, + .parent = res, + }; + + res = &vmd->dev->resource[VMD_MEMBAR2]; + upper_bits = upper_32_bits(res->end); + flags = res->flags & ~IORESOURCE_SIZEALIGN; + if (!upper_bits) + flags &= ~IORESOURCE_MEM_64; + vmd->resources[2] = (struct resource) { + .name = "VMD MEMBAR2", + .start = res->start + membar2_offset, + .end = res->end, + .flags = flags, + .parent = res, + }; + + sd->vmd_domain = true; + sd->domain = vmd_find_free_domain(); + if (sd->domain < 0) + return sd->domain; + + sd->node = pcibus_to_node(vmd->dev->bus); + + fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); + if (!fn) + return -ENODEV; + + vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, + x86_vector_domain); + irq_domain_free_fwnode(fn); + if (!vmd->irq_domain) + return -ENODEV; + + pci_add_resource(&resources, &vmd->resources[0]); + pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); + pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); + + vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, + sd, &resources); + if (!vmd->bus) { + pci_free_resource_list(&resources); + irq_domain_remove(vmd->irq_domain); + return -ENODEV; + } + + vmd_attach_resources(vmd); + vmd_setup_dma_ops(vmd); + dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); + pci_rescan_bus(vmd->bus); + + WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, + "domain"), "Can't create symlink to domain\n"); + return 0; +} + +static irqreturn_t vmd_irq(int irq, void *data) +{ + struct vmd_irq_list *irqs = data; + struct vmd_irq *vmdirq; + int idx; + + idx = srcu_read_lock(&irqs->srcu); + list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) + generic_handle_irq(vmdirq->virq); + srcu_read_unlock(&irqs->srcu, idx); + + return IRQ_HANDLED; +} + +static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct vmd_dev *vmd; + int i, err; + + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) + return -ENOMEM; + + vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); + if (!vmd) + return -ENOMEM; + + vmd->dev = dev; + err = pcim_enable_device(dev); + if (err < 0) + return err; + + vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); + if (!vmd->cfgbar) + return -ENOMEM; + + pci_set_master(dev); + if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) + return -ENODEV; + + vmd->msix_count = pci_msix_vec_count(dev); + if (vmd->msix_count < 0) + return -ENODEV; + + vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, + PCI_IRQ_MSIX); + if (vmd->msix_count < 0) + return vmd->msix_count; + + vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), + GFP_KERNEL); + if (!vmd->irqs) + return -ENOMEM; + + for (i = 0; i < vmd->msix_count; i++) { + err = init_srcu_struct(&vmd->irqs[i].srcu); + if (err) + return err; + + INIT_LIST_HEAD(&vmd->irqs[i].irq_list); + err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd_irq, IRQF_NO_THREAD, + "vmd", &vmd->irqs[i]); + if (err) + return err; + } + + spin_lock_init(&vmd->cfg_lock); + pci_set_drvdata(dev, vmd); + err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); + if (err) + return err; + + dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", + vmd->sysdata.domain); + return 0; +} + +static void vmd_cleanup_srcu(struct vmd_dev *vmd) +{ + int i; + + for (i = 0; i < vmd->msix_count; i++) + cleanup_srcu_struct(&vmd->irqs[i].srcu); +} + +static void vmd_remove(struct pci_dev *dev) +{ + struct vmd_dev *vmd = pci_get_drvdata(dev); + + vmd_detach_resources(vmd); + sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); + pci_stop_root_bus(vmd->bus); + pci_remove_root_bus(vmd->bus); + vmd_cleanup_srcu(vmd); + vmd_teardown_dma_ops(vmd); + irq_domain_remove(vmd->irq_domain); +} + +#ifdef CONFIG_PM_SLEEP +static int vmd_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = pci_get_drvdata(pdev); + int i; + + for (i = 0; i < vmd->msix_count; i++) + devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); + + pci_save_state(pdev); + return 0; +} + +static int vmd_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct vmd_dev *vmd = pci_get_drvdata(pdev); + int err, i; + + for (i = 0; i < vmd->msix_count; i++) { + err = devm_request_irq(dev, pci_irq_vector(pdev, i), + vmd_irq, IRQF_NO_THREAD, + "vmd", &vmd->irqs[i]); + if (err) + return err; + } + + pci_restore_state(pdev); + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); + +static const struct pci_device_id vmd_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, vmd_ids); + +static struct pci_driver vmd_drv = { + .name = "vmd", + .id_table = vmd_ids, + .probe = vmd_probe, + .remove = vmd_remove, + .driver = { + .pm = &vmd_dev_pm_ops, + }, +}; +module_pci_driver(vmd_drv); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.6"); diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig deleted file mode 100644 index 16f52c626b4b..000000000000 --- a/drivers/pci/dwc/Kconfig +++ /dev/null @@ -1,197 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -menu "DesignWare PCI Core Support" - depends on PCI - -config PCIE_DW - bool - -config PCIE_DW_HOST - bool - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW - -config PCIE_DW_EP - bool - depends on PCI_ENDPOINT - select PCIE_DW - -config PCI_DRA7XX - bool - -config PCI_DRA7XX_HOST - bool "TI DRA7xx PCIe controller Host Mode" - depends on SOC_DRA7XX || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - depends on OF && HAS_IOMEM && TI_PIPE3 - select PCIE_DW_HOST - select PCI_DRA7XX - default y - help - Enables support for the PCIe controller in the DRA7xx SoC to work in - host mode. There are two instances of PCIe controller in DRA7xx. - This controller can work either as EP or RC. In order to enable - host-specific features PCI_DRA7XX_HOST must be selected and in order - to enable device-specific features PCI_DRA7XX_EP must be selected. - This uses the DesignWare core. - -config PCI_DRA7XX_EP - bool "TI DRA7xx PCIe controller Endpoint Mode" - depends on SOC_DRA7XX || COMPILE_TEST - depends on PCI_ENDPOINT - depends on OF && HAS_IOMEM && TI_PIPE3 - select PCIE_DW_EP - select PCI_DRA7XX - help - Enables support for the PCIe controller in the DRA7xx SoC to work in - endpoint mode. There are two instances of PCIe controller in DRA7xx. - This controller can work either as EP or RC. In order to enable - host-specific features PCI_DRA7XX_HOST must be selected and in order - to enable device-specific features PCI_DRA7XX_EP must be selected. - This uses the DesignWare core. - -config PCIE_DW_PLAT - bool - -config PCIE_DW_PLAT_HOST - bool "Platform bus based DesignWare PCIe Controller - Host mode" - depends on PCI && PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - select PCIE_DW_PLAT - default y - help - Enables support for the PCIe controller in the Designware IP to - work in host mode. There are two instances of PCIe controller in - Designware IP. - This controller can work either as EP or RC. In order to enable - host-specific features PCIE_DW_PLAT_HOST must be selected and in - order to enable device-specific features PCI_DW_PLAT_EP must be - selected. - -config PCIE_DW_PLAT_EP - bool "Platform bus based DesignWare PCIe Controller - Endpoint mode" - depends on PCI && PCI_MSI_IRQ_DOMAIN - depends on PCI_ENDPOINT - select PCIE_DW_EP - select PCIE_DW_PLAT - help - Enables support for the PCIe controller in the Designware IP to - work in endpoint mode. There are two instances of PCIe controller - in Designware IP. - This controller can work either as EP or RC. In order to enable - host-specific features PCIE_DW_PLAT_HOST must be selected and in - order to enable device-specific features PCI_DW_PLAT_EP must be - selected. - -config PCI_EXYNOS - bool "Samsung Exynos PCIe controller" - depends on SOC_EXYNOS5440 || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - -config PCI_IMX6 - bool "Freescale i.MX6 PCIe controller" - depends on SOC_IMX6Q || (ARM && COMPILE_TEST) - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - -config PCIE_SPEAR13XX - bool "STMicroelectronics SPEAr PCIe controller" - depends on ARCH_SPEAR13XX || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - help - Say Y here if you want PCIe support on SPEAr13XX SoCs. - -config PCI_KEYSTONE - bool "TI Keystone PCIe controller" - depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - help - Say Y here if you want to enable PCI controller support on Keystone - SoCs. The PCI controller on Keystone is based on DesignWare hardware - and therefore the driver re-uses the DesignWare core functions to - implement the driver. - -config PCI_LAYERSCAPE - bool "Freescale Layerscape PCIe controller" - depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) - depends on PCI_MSI_IRQ_DOMAIN - select MFD_SYSCON - select PCIE_DW_HOST - help - Say Y here if you want PCIe controller support on Layerscape SoCs. - -config PCI_HISI - depends on OF && (ARM64 || COMPILE_TEST) - bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - select PCI_HOST_COMMON - help - Say Y here if you want PCIe controller support on HiSilicon - Hip05 and Hip06 SoCs - -config PCIE_QCOM - bool "Qualcomm PCIe controller" - depends on OF && (ARCH_QCOM || COMPILE_TEST) - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - help - Say Y here to enable PCIe controller support on Qualcomm SoCs. The - PCIe controller uses the DesignWare core plus Qualcomm-specific - hardware wrappers. - -config PCIE_ARMADA_8K - bool "Marvell Armada-8K PCIe controller" - depends on ARCH_MVEBU || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - help - Say Y here if you want to enable PCIe controller support on - Armada-8K SoCs. The PCIe controller on Armada-8K is based on - DesignWare hardware and therefore the driver re-uses the - DesignWare core functions to implement the driver. - -config PCIE_ARTPEC6 - bool - -config PCIE_ARTPEC6_HOST - bool "Axis ARTPEC-6 PCIe controller Host Mode" - depends on MACH_ARTPEC6 || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - select PCIE_ARTPEC6 - help - Enables support for the PCIe controller in the ARTPEC-6 SoC to work in - host mode. This uses the DesignWare core. - -config PCIE_ARTPEC6_EP - bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" - depends on MACH_ARTPEC6 || COMPILE_TEST - depends on PCI_ENDPOINT - select PCIE_DW_EP - select PCIE_ARTPEC6 - help - Enables support for the PCIe controller in the ARTPEC-6 SoC to work in - endpoint mode. This uses the DesignWare core. - -config PCIE_KIRIN - depends on OF && (ARM64 || COMPILE_TEST) - bool "HiSilicon Kirin series SoCs PCIe controllers" - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - help - Say Y here if you want PCIe controller support - on HiSilicon Kirin series SoCs. - -config PCIE_HISI_STB - bool "HiSilicon STB SoCs PCIe controllers" - depends on ARCH_HISI || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - select PCIE_DW_HOST - help - Say Y here if you want PCIe controller support on HiSilicon STB SoCs - -endmenu diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile deleted file mode 100644 index 5d2ce72c7a52..000000000000 --- a/drivers/pci/dwc/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PCIE_DW) += pcie-designware.o -obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o -obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o -obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o -obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o -obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o -obj-$(CONFIG_PCI_IMX6) += pci-imx6.o -obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o -obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o -obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o -obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o -obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o -obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o -obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o - -# The following drivers are for devices that use the generic ACPI -# pci_root.c driver but don't support standard ECAM config access. -# They contain MCFG quirks to replace the generic ECAM accessors with -# device-specific ones that are shared with the DT driver. - -# The ACPI driver is generic and should not require driver-specific -# config options to be enabled, so we always build these drivers on -# ARM64 and use internal ifdefs to only build the pieces we need -# depending on whether ACPI, the DT driver, or both are enabled. - -ifdef CONFIG_PCI -obj-$(CONFIG_ARM64) += pcie-hisi.o -endif diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c deleted file mode 100644 index f688204e50c5..000000000000 --- a/drivers/pci/dwc/pci-dra7xx.c +++ /dev/null @@ -1,846 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs - * - * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com - * - * Authors: Kishon Vijay Abraham I - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" -#include "pcie-designware.h" - -/* PCIe controller wrapper DRA7XX configuration registers */ - -#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 -#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 -#define ERR_SYS BIT(0) -#define ERR_FATAL BIT(1) -#define ERR_NONFATAL BIT(2) -#define ERR_COR BIT(3) -#define ERR_AXI BIT(4) -#define ERR_ECRC BIT(5) -#define PME_TURN_OFF BIT(8) -#define PME_TO_ACK BIT(9) -#define PM_PME BIT(10) -#define LINK_REQ_RST BIT(11) -#define LINK_UP_EVT BIT(12) -#define CFG_BME_EVT BIT(13) -#define CFG_MSE_EVT BIT(14) -#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ - ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ - LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) - -#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 -#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 -#define INTA BIT(0) -#define INTB BIT(1) -#define INTC BIT(2) -#define INTD BIT(3) -#define MSI BIT(4) -#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) - -#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 -#define DEVICE_TYPE_EP 0x0 -#define DEVICE_TYPE_LEG_EP 0x1 -#define DEVICE_TYPE_RC 0x4 - -#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 -#define LTSSM_EN 0x1 - -#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C -#define LINK_UP BIT(16) -#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF - -#define EXP_CAP_ID_OFFSET 0x70 - -#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 -#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 - -#define PCIECTRL_TI_CONF_MSI_XMT 0x012c -#define MSI_REQ_GRANT BIT(0) -#define MSI_VECTOR_SHIFT 7 - -struct dra7xx_pcie { - struct dw_pcie *pci; - void __iomem *base; /* DT ti_conf */ - int phy_count; /* DT phy-names count */ - struct phy **phy; - int link_gen; - struct irq_domain *irq_domain; - enum dw_pcie_device_mode mode; -}; - -struct dra7xx_pcie_of_data { - enum dw_pcie_device_mode mode; -}; - -#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) - -static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) -{ - return readl(pcie->base + offset); -} - -static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, - u32 value) -{ - writel(value, pcie->base + offset); -} - -static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) -{ - return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; -} - -static int dra7xx_pcie_link_up(struct dw_pcie *pci) -{ - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); - - return !!(reg & LINK_UP); -} - -static void dra7xx_pcie_stop_link(struct dw_pcie *pci) -{ - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - u32 reg; - - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); - reg &= ~LTSSM_EN; - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); -} - -static int dra7xx_pcie_establish_link(struct dw_pcie *pci) -{ - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - struct device *dev = pci->dev; - u32 reg; - u32 exp_cap_off = EXP_CAP_ID_OFFSET; - - if (dw_pcie_link_up(pci)) { - dev_err(dev, "link is already up\n"); - return 0; - } - - if (dra7xx->link_gen == 1) { - dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, - 4, ®); - if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { - reg &= ~((u32)PCI_EXP_LNKCAP_SLS); - reg |= PCI_EXP_LNKCAP_SLS_2_5GB; - dw_pcie_write(pci->dbi_base + exp_cap_off + - PCI_EXP_LNKCAP, 4, reg); - } - - dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, - 2, ®); - if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { - reg &= ~((u32)PCI_EXP_LNKCAP_SLS); - reg |= PCI_EXP_LNKCAP_SLS_2_5GB; - dw_pcie_write(pci->dbi_base + exp_cap_off + - PCI_EXP_LNKCTL2, 2, reg); - } - } - - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); - reg |= LTSSM_EN; - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - - return 0; -} - -static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) -{ - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, - LEG_EP_INTERRUPTS | MSI); - - dra7xx_pcie_writel(dra7xx, - PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, - MSI | LEG_EP_INTERRUPTS); -} - -static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) -{ - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, - INTERRUPTS); - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, - INTERRUPTS); -} - -static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) -{ - dra7xx_pcie_enable_wrapper_interrupts(dra7xx); - dra7xx_pcie_enable_msi_interrupts(dra7xx); -} - -static int dra7xx_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - - dw_pcie_setup_rc(pp); - - dra7xx_pcie_establish_link(pci); - dw_pcie_wait_for_link(pci); - dw_pcie_msi_init(pp); - dra7xx_pcie_enable_interrupts(dra7xx); - - return 0; -} - -static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { - .host_init = dra7xx_pcie_host_init, -}; - -static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -static const struct irq_domain_ops intx_domain_ops = { - .map = dra7xx_pcie_intx_map, - .xlate = pci_irqd_intx_xlate, -}; - -static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - struct device_node *node = dev->of_node; - struct device_node *pcie_intc_node = of_get_next_child(node, NULL); - - if (!pcie_intc_node) { - dev_err(dev, "No PCIe Intc node found\n"); - return -ENODEV; - } - - dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, pp); - if (!dra7xx->irq_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; - } - - return 0; -} - -static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) -{ - struct dra7xx_pcie *dra7xx = arg; - struct dw_pcie *pci = dra7xx->pci; - struct pcie_port *pp = &pci->pp; - unsigned long reg; - u32 virq, bit; - - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); - - switch (reg) { - case MSI: - dw_handle_msi_irq(pp); - break; - case INTA: - case INTB: - case INTC: - case INTD: - for_each_set_bit(bit, ®, PCI_NUM_INTX) { - virq = irq_find_mapping(dra7xx->irq_domain, bit); - if (virq) - generic_handle_irq(virq); - } - break; - } - - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); - - return IRQ_HANDLED; -} - -static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) -{ - struct dra7xx_pcie *dra7xx = arg; - struct dw_pcie *pci = dra7xx->pci; - struct device *dev = pci->dev; - struct dw_pcie_ep *ep = &pci->ep; - u32 reg; - - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); - - if (reg & ERR_SYS) - dev_dbg(dev, "System Error\n"); - - if (reg & ERR_FATAL) - dev_dbg(dev, "Fatal Error\n"); - - if (reg & ERR_NONFATAL) - dev_dbg(dev, "Non Fatal Error\n"); - - if (reg & ERR_COR) - dev_dbg(dev, "Correctable Error\n"); - - if (reg & ERR_AXI) - dev_dbg(dev, "AXI tag lookup fatal Error\n"); - - if (reg & ERR_ECRC) - dev_dbg(dev, "ECRC Error\n"); - - if (reg & PME_TURN_OFF) - dev_dbg(dev, - "Power Management Event Turn-Off message received\n"); - - if (reg & PME_TO_ACK) - dev_dbg(dev, - "Power Management Turn-Off Ack message received\n"); - - if (reg & PM_PME) - dev_dbg(dev, "PM Power Management Event message received\n"); - - if (reg & LINK_REQ_RST) - dev_dbg(dev, "Link Request Reset\n"); - - if (reg & LINK_UP_EVT) { - if (dra7xx->mode == DW_PCIE_EP_TYPE) - dw_pcie_ep_linkup(ep); - dev_dbg(dev, "Link-up state change\n"); - } - - if (reg & CFG_BME_EVT) - dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); - - if (reg & CFG_MSE_EVT) - dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); - - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); - - return IRQ_HANDLED; -} - -static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - enum pci_barno bar; - - for (bar = BAR_0; bar <= BAR_5; bar++) - dw_pcie_ep_reset_bar(pci, bar); - - dra7xx_pcie_enable_wrapper_interrupts(dra7xx); -} - -static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) -{ - dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); - mdelay(1); - dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); -} - -static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, - u8 interrupt_num) -{ - u32 reg; - - reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; - reg |= MSI_REQ_GRANT; - dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); -} - -static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - - switch (type) { - case PCI_EPC_IRQ_LEGACY: - dra7xx_pcie_raise_legacy_irq(dra7xx); - break; - case PCI_EPC_IRQ_MSI: - dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); - break; - default: - dev_err(pci->dev, "UNKNOWN IRQ type\n"); - } - - return 0; -} - -static struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = dra7xx_pcie_ep_init, - .raise_irq = dra7xx_pcie_raise_irq, -}; - -static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, - struct platform_device *pdev) -{ - int ret; - struct dw_pcie_ep *ep; - struct resource *res; - struct device *dev = &pdev->dev; - struct dw_pcie *pci = dra7xx->pci; - - ep = &pci->ep; - ep->ops = &pcie_ep_ops; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base2)) - return PTR_ERR(pci->dbi_base2); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); - if (!res) - return -EINVAL; - - ep->phys_base = res->start; - ep->addr_size = resource_size(res); - - ret = dw_pcie_ep_init(ep); - if (ret) { - dev_err(dev, "failed to initialize endpoint\n"); - return ret; - } - - return 0; -} - -static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, - struct platform_device *pdev) -{ - int ret; - struct dw_pcie *pci = dra7xx->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - struct resource *res; - - pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "missing IRQ resource\n"); - return pp->irq; - } - - ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, - IRQF_SHARED | IRQF_NO_THREAD, - "dra7-pcie-msi", dra7xx); - if (ret) { - dev_err(dev, "failed to request irq\n"); - return ret; - } - - ret = dra7xx_pcie_init_irq_domain(pp); - if (ret < 0) - return ret; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - pp->ops = &dra7xx_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, - .start_link = dra7xx_pcie_establish_link, - .stop_link = dra7xx_pcie_stop_link, - .link_up = dra7xx_pcie_link_up, -}; - -static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) -{ - int phy_count = dra7xx->phy_count; - - while (phy_count--) { - phy_power_off(dra7xx->phy[phy_count]); - phy_exit(dra7xx->phy[phy_count]); - } -} - -static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) -{ - int phy_count = dra7xx->phy_count; - int ret; - int i; - - for (i = 0; i < phy_count; i++) { - ret = phy_init(dra7xx->phy[i]); - if (ret < 0) - goto err_phy; - - ret = phy_power_on(dra7xx->phy[i]); - if (ret < 0) { - phy_exit(dra7xx->phy[i]); - goto err_phy; - } - } - - return 0; - -err_phy: - while (--i >= 0) { - phy_power_off(dra7xx->phy[i]); - phy_exit(dra7xx->phy[i]); - } - - return ret; -} - -static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { - .mode = DW_PCIE_RC_TYPE, -}; - -static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { - .mode = DW_PCIE_EP_TYPE, -}; - -static const struct of_device_id of_dra7xx_pcie_match[] = { - { - .compatible = "ti,dra7-pcie", - .data = &dra7xx_pcie_rc_of_data, - }, - { - .compatible = "ti,dra7-pcie-ep", - .data = &dra7xx_pcie_ep_of_data, - }, - {}, -}; - -/* - * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 - * @dra7xx: the dra7xx device where the workaround should be applied - * - * Access to the PCIe slave port that are not 32-bit aligned will result - * in incorrect mapping to TLP Address and Byte enable fields. Therefore, - * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or - * 0x3. - * - * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. - */ -static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) -{ - int ret; - struct device_node *np = dev->of_node; - struct of_phandle_args args; - struct regmap *regmap; - - regmap = syscon_regmap_lookup_by_phandle(np, - "ti,syscon-unaligned-access"); - if (IS_ERR(regmap)) { - dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); - return -EINVAL; - } - - ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", - 2, 0, &args); - if (ret) { - dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); - return ret; - } - - ret = regmap_update_bits(regmap, args.args[0], args.args[1], - args.args[1]); - if (ret) - dev_err(dev, "failed to enable unaligned access\n"); - - of_node_put(args.np); - - return ret; -} - -static int __init dra7xx_pcie_probe(struct platform_device *pdev) -{ - u32 reg; - int ret; - int irq; - int i; - int phy_count; - struct phy **phy; - struct device_link **link; - void __iomem *base; - struct resource *res; - struct dw_pcie *pci; - struct dra7xx_pcie *dra7xx; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - char name[10]; - struct gpio_desc *reset; - const struct of_device_id *match; - const struct dra7xx_pcie_of_data *data; - enum dw_pcie_device_mode mode; - - match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); - if (!match) - return -EINVAL; - - data = (struct dra7xx_pcie_of_data *)match->data; - mode = (enum dw_pcie_device_mode)data->mode; - - dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); - if (!dra7xx) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); - return irq; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); - base = devm_ioremap_nocache(dev, res->start, resource_size(res)); - if (!base) - return -ENOMEM; - - phy_count = of_property_count_strings(np, "phy-names"); - if (phy_count < 0) { - dev_err(dev, "unable to find the strings\n"); - return phy_count; - } - - phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); - if (!phy) - return -ENOMEM; - - link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); - if (!link) - return -ENOMEM; - - for (i = 0; i < phy_count; i++) { - snprintf(name, sizeof(name), "pcie-phy%d", i); - phy[i] = devm_phy_get(dev, name); - if (IS_ERR(phy[i])) - return PTR_ERR(phy[i]); - - link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); - if (!link[i]) { - ret = -EINVAL; - goto err_link; - } - } - - dra7xx->base = base; - dra7xx->phy = phy; - dra7xx->pci = pci; - dra7xx->phy_count = phy_count; - - ret = dra7xx_pcie_enable_phy(dra7xx); - if (ret) { - dev_err(dev, "failed to enable phy\n"); - return ret; - } - - platform_set_drvdata(pdev, dra7xx); - - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync failed\n"); - goto err_get_sync; - } - - reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); - if (IS_ERR(reset)) { - ret = PTR_ERR(reset); - dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); - goto err_gpio; - } - - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); - reg &= ~LTSSM_EN; - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - - dra7xx->link_gen = of_pci_get_max_link_speed(np); - if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) - dra7xx->link_gen = 2; - - switch (mode) { - case DW_PCIE_RC_TYPE: - if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { - ret = -ENODEV; - goto err_gpio; - } - - dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, - DEVICE_TYPE_RC); - ret = dra7xx_add_pcie_port(dra7xx, pdev); - if (ret < 0) - goto err_gpio; - break; - case DW_PCIE_EP_TYPE: - if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { - ret = -ENODEV; - goto err_gpio; - } - - dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, - DEVICE_TYPE_EP); - - ret = dra7xx_pcie_ep_unaligned_memaccess(dev); - if (ret) - goto err_gpio; - - ret = dra7xx_add_pcie_ep(dra7xx, pdev); - if (ret < 0) - goto err_gpio; - break; - default: - dev_err(dev, "INVALID device type %d\n", mode); - } - dra7xx->mode = mode; - - ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, - IRQF_SHARED, "dra7xx-pcie-main", dra7xx); - if (ret) { - dev_err(dev, "failed to request irq\n"); - goto err_gpio; - } - - return 0; - -err_gpio: - pm_runtime_put(dev); - -err_get_sync: - pm_runtime_disable(dev); - dra7xx_pcie_disable_phy(dra7xx); - -err_link: - while (--i >= 0) - device_link_del(link[i]); - - return ret; -} - -#ifdef CONFIG_PM_SLEEP -static int dra7xx_pcie_suspend(struct device *dev) -{ - struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); - struct dw_pcie *pci = dra7xx->pci; - u32 val; - - if (dra7xx->mode != DW_PCIE_RC_TYPE) - return 0; - - /* clear MSE */ - val = dw_pcie_readl_dbi(pci, PCI_COMMAND); - val &= ~PCI_COMMAND_MEMORY; - dw_pcie_writel_dbi(pci, PCI_COMMAND, val); - - return 0; -} - -static int dra7xx_pcie_resume(struct device *dev) -{ - struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); - struct dw_pcie *pci = dra7xx->pci; - u32 val; - - if (dra7xx->mode != DW_PCIE_RC_TYPE) - return 0; - - /* set MSE */ - val = dw_pcie_readl_dbi(pci, PCI_COMMAND); - val |= PCI_COMMAND_MEMORY; - dw_pcie_writel_dbi(pci, PCI_COMMAND, val); - - return 0; -} - -static int dra7xx_pcie_suspend_noirq(struct device *dev) -{ - struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); - - dra7xx_pcie_disable_phy(dra7xx); - - return 0; -} - -static int dra7xx_pcie_resume_noirq(struct device *dev) -{ - struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); - int ret; - - ret = dra7xx_pcie_enable_phy(dra7xx); - if (ret) { - dev_err(dev, "failed to enable phy\n"); - return ret; - } - - return 0; -} -#endif - -static void dra7xx_pcie_shutdown(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); - int ret; - - dra7xx_pcie_stop_link(dra7xx->pci); - - ret = pm_runtime_put_sync(dev); - if (ret < 0) - dev_dbg(dev, "pm_runtime_put_sync failed\n"); - - pm_runtime_disable(dev); - dra7xx_pcie_disable_phy(dra7xx); -} - -static const struct dev_pm_ops dra7xx_pcie_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, - dra7xx_pcie_resume_noirq) -}; - -static struct platform_driver dra7xx_pcie_driver = { - .driver = { - .name = "dra7-pcie", - .of_match_table = of_dra7xx_pcie_match, - .suppress_bind_attrs = true, - .pm = &dra7xx_pcie_pm_ops, - }, - .shutdown = dra7xx_pcie_shutdown, -}; -builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c deleted file mode 100644 index 4cc1e5df8c79..000000000000 --- a/drivers/pci/dwc/pci-exynos.c +++ /dev/null @@ -1,539 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Samsung EXYNOS SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Jingoo Han - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -#define to_exynos_pcie(x) dev_get_drvdata((x)->dev) - -/* PCIe ELBI registers */ -#define PCIE_IRQ_PULSE 0x000 -#define IRQ_INTA_ASSERT BIT(0) -#define IRQ_INTB_ASSERT BIT(2) -#define IRQ_INTC_ASSERT BIT(4) -#define IRQ_INTD_ASSERT BIT(6) -#define PCIE_IRQ_LEVEL 0x004 -#define PCIE_IRQ_SPECIAL 0x008 -#define PCIE_IRQ_EN_PULSE 0x00c -#define PCIE_IRQ_EN_LEVEL 0x010 -#define IRQ_MSI_ENABLE BIT(2) -#define PCIE_IRQ_EN_SPECIAL 0x014 -#define PCIE_PWR_RESET 0x018 -#define PCIE_CORE_RESET 0x01c -#define PCIE_CORE_RESET_ENABLE BIT(0) -#define PCIE_STICKY_RESET 0x020 -#define PCIE_NONSTICKY_RESET 0x024 -#define PCIE_APP_INIT_RESET 0x028 -#define PCIE_APP_LTSSM_ENABLE 0x02c -#define PCIE_ELBI_RDLH_LINKUP 0x064 -#define PCIE_ELBI_LTSSM_ENABLE 0x1 -#define PCIE_ELBI_SLV_AWMISC 0x11c -#define PCIE_ELBI_SLV_ARMISC 0x120 -#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) - -struct exynos_pcie_mem_res { - void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */ -}; - -struct exynos_pcie_clk_res { - struct clk *clk; - struct clk *bus_clk; -}; - -struct exynos_pcie { - struct dw_pcie *pci; - struct exynos_pcie_mem_res *mem_res; - struct exynos_pcie_clk_res *clk_res; - const struct exynos_pcie_ops *ops; - int reset_gpio; - - struct phy *phy; -}; - -struct exynos_pcie_ops { - int (*get_mem_resources)(struct platform_device *pdev, - struct exynos_pcie *ep); - int (*get_clk_resources)(struct exynos_pcie *ep); - int (*init_clk_resources)(struct exynos_pcie *ep); - void (*deinit_clk_resources)(struct exynos_pcie *ep); -}; - -static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, - struct exynos_pcie *ep) -{ - struct dw_pcie *pci = ep->pci; - struct device *dev = pci->dev; - struct resource *res; - - ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); - if (!ep->mem_res) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); - if (IS_ERR(ep->mem_res->elbi_base)) - return PTR_ERR(ep->mem_res->elbi_base); - - return 0; -} - -static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep) -{ - struct dw_pcie *pci = ep->pci; - struct device *dev = pci->dev; - - ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL); - if (!ep->clk_res) - return -ENOMEM; - - ep->clk_res->clk = devm_clk_get(dev, "pcie"); - if (IS_ERR(ep->clk_res->clk)) { - dev_err(dev, "Failed to get pcie rc clock\n"); - return PTR_ERR(ep->clk_res->clk); - } - - ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus"); - if (IS_ERR(ep->clk_res->bus_clk)) { - dev_err(dev, "Failed to get pcie bus clock\n"); - return PTR_ERR(ep->clk_res->bus_clk); - } - - return 0; -} - -static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep) -{ - struct dw_pcie *pci = ep->pci; - struct device *dev = pci->dev; - int ret; - - ret = clk_prepare_enable(ep->clk_res->clk); - if (ret) { - dev_err(dev, "cannot enable pcie rc clock"); - return ret; - } - - ret = clk_prepare_enable(ep->clk_res->bus_clk); - if (ret) { - dev_err(dev, "cannot enable pcie bus clock"); - goto err_bus_clk; - } - - return 0; - -err_bus_clk: - clk_disable_unprepare(ep->clk_res->clk); - - return ret; -} - -static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep) -{ - clk_disable_unprepare(ep->clk_res->bus_clk); - clk_disable_unprepare(ep->clk_res->clk); -} - -static const struct exynos_pcie_ops exynos5440_pcie_ops = { - .get_mem_resources = exynos5440_pcie_get_mem_resources, - .get_clk_resources = exynos5440_pcie_get_clk_resources, - .init_clk_resources = exynos5440_pcie_init_clk_resources, - .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources, -}; - -static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg) -{ - writel(val, base + reg); -} - -static u32 exynos_pcie_readl(void __iomem *base, u32 reg) -{ - return readl(base + reg); -} - -static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on) -{ - u32 val; - - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC); - if (on) - val |= PCIE_ELBI_SLV_DBI_ENABLE; - else - val &= ~PCIE_ELBI_SLV_DBI_ENABLE; - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC); -} - -static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on) -{ - u32 val; - - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC); - if (on) - val |= PCIE_ELBI_SLV_DBI_ENABLE; - else - val &= ~PCIE_ELBI_SLV_DBI_ENABLE; - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC); -} - -static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep) -{ - u32 val; - - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); - val &= ~PCIE_CORE_RESET_ENABLE; - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET); -} - -static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep) -{ - u32 val; - - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); - val |= PCIE_CORE_RESET_ENABLE; - - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET); - exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET); -} - -static void exynos_pcie_assert_reset(struct exynos_pcie *ep) -{ - struct dw_pcie *pci = ep->pci; - struct device *dev = pci->dev; - - if (ep->reset_gpio >= 0) - devm_gpio_request_one(dev, ep->reset_gpio, - GPIOF_OUT_INIT_HIGH, "RESET"); -} - -static int exynos_pcie_establish_link(struct exynos_pcie *ep) -{ - struct dw_pcie *pci = ep->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - - if (dw_pcie_link_up(pci)) { - dev_err(dev, "Link already up\n"); - return 0; - } - - exynos_pcie_assert_core_reset(ep); - - phy_reset(ep->phy); - - exynos_pcie_writel(ep->mem_res->elbi_base, 1, - PCIE_PWR_RESET); - - phy_power_on(ep->phy); - phy_init(ep->phy); - - exynos_pcie_deassert_core_reset(ep); - dw_pcie_setup_rc(pp); - exynos_pcie_assert_reset(ep); - - /* assert LTSSM enable */ - exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE, - PCIE_APP_LTSSM_ENABLE); - - /* check if the link is up or not */ - if (!dw_pcie_wait_for_link(pci)) - return 0; - - phy_power_off(ep->phy); - return -ETIMEDOUT; -} - -static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep) -{ - u32 val; - - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE); - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE); -} - -static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep) -{ - u32 val; - - /* enable INTX interrupt */ - val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | - IRQ_INTC_ASSERT | IRQ_INTD_ASSERT; - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE); -} - -static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) -{ - struct exynos_pcie *ep = arg; - - exynos_pcie_clear_irq_pulse(ep); - return IRQ_HANDLED; -} - -static void exynos_pcie_msi_init(struct exynos_pcie *ep) -{ - struct dw_pcie *pci = ep->pci; - struct pcie_port *pp = &pci->pp; - u32 val; - - dw_pcie_msi_init(pp); - - /* enable MSI interrupt */ - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL); - val |= IRQ_MSI_ENABLE; - exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL); -} - -static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) -{ - exynos_pcie_enable_irq_pulse(ep); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - exynos_pcie_msi_init(ep); -} - -static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, - u32 reg, size_t size) -{ - struct exynos_pcie *ep = to_exynos_pcie(pci); - u32 val; - - exynos_pcie_sideband_dbi_r_mode(ep, true); - dw_pcie_read(base + reg, size, &val); - exynos_pcie_sideband_dbi_r_mode(ep, false); - return val; -} - -static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, - u32 reg, size_t size, u32 val) -{ - struct exynos_pcie *ep = to_exynos_pcie(pci); - - exynos_pcie_sideband_dbi_w_mode(ep, true); - dw_pcie_write(base + reg, size, val); - exynos_pcie_sideband_dbi_w_mode(ep, false); -} - -static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct exynos_pcie *ep = to_exynos_pcie(pci); - int ret; - - exynos_pcie_sideband_dbi_r_mode(ep, true); - ret = dw_pcie_read(pci->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_r_mode(ep, false); - return ret; -} - -static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - u32 val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct exynos_pcie *ep = to_exynos_pcie(pci); - int ret; - - exynos_pcie_sideband_dbi_w_mode(ep, true); - ret = dw_pcie_write(pci->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_w_mode(ep, false); - return ret; -} - -static int exynos_pcie_link_up(struct dw_pcie *pci) -{ - struct exynos_pcie *ep = to_exynos_pcie(pci); - u32 val; - - val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP); - if (val == PCIE_ELBI_LTSSM_ENABLE) - return 1; - - return 0; -} - -static int exynos_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct exynos_pcie *ep = to_exynos_pcie(pci); - - exynos_pcie_establish_link(ep); - exynos_pcie_enable_interrupts(ep); - - return 0; -} - -static const struct dw_pcie_host_ops exynos_pcie_host_ops = { - .rd_own_conf = exynos_pcie_rd_own_conf, - .wr_own_conf = exynos_pcie_wr_own_conf, - .host_init = exynos_pcie_host_init, -}; - -static int __init exynos_add_pcie_port(struct exynos_pcie *ep, - struct platform_device *pdev) -{ - struct dw_pcie *pci = ep->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - - pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); - return pp->irq; - } - ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, - IRQF_SHARED, "exynos-pcie", ep); - if (ret) { - dev_err(dev, "failed to request irq\n"); - return ret; - } - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get msi irq\n"); - return pp->msi_irq; - } - } - - pp->root_bus_nr = -1; - pp->ops = &exynos_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .read_dbi = exynos_pcie_read_dbi, - .write_dbi = exynos_pcie_write_dbi, - .link_up = exynos_pcie_link_up, -}; - -static int __init exynos_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct exynos_pcie *ep; - struct device_node *np = dev->of_node; - int ret; - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - ep->pci = pci; - ep->ops = (const struct exynos_pcie_ops *) - of_device_get_match_data(dev); - - ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); - - ep->phy = devm_of_phy_get(dev, np, NULL); - if (IS_ERR(ep->phy)) { - if (PTR_ERR(ep->phy) == -EPROBE_DEFER) - return PTR_ERR(ep->phy); - - ep->phy = NULL; - } - - if (ep->ops && ep->ops->get_mem_resources) { - ret = ep->ops->get_mem_resources(pdev, ep); - if (ret) - return ret; - } - - if (ep->ops && ep->ops->get_clk_resources && - ep->ops->init_clk_resources) { - ret = ep->ops->get_clk_resources(ep); - if (ret) - return ret; - ret = ep->ops->init_clk_resources(ep); - if (ret) - return ret; - } - - platform_set_drvdata(pdev, ep); - - ret = exynos_add_pcie_port(ep, pdev); - if (ret < 0) - goto fail_probe; - - return 0; - -fail_probe: - phy_exit(ep->phy); - - if (ep->ops && ep->ops->deinit_clk_resources) - ep->ops->deinit_clk_resources(ep); - return ret; -} - -static int __exit exynos_pcie_remove(struct platform_device *pdev) -{ - struct exynos_pcie *ep = platform_get_drvdata(pdev); - - if (ep->ops && ep->ops->deinit_clk_resources) - ep->ops->deinit_clk_resources(ep); - - return 0; -} - -static const struct of_device_id exynos_pcie_of_match[] = { - { - .compatible = "samsung,exynos5440-pcie", - .data = &exynos5440_pcie_ops - }, - {}, -}; - -static struct platform_driver exynos_pcie_driver = { - .remove = __exit_p(exynos_pcie_remove), - .driver = { - .name = "exynos-pcie", - .of_match_table = exynos_pcie_of_match, - }, -}; - -/* Exynos PCIe driver does not allow module unload */ - -static int __init exynos_pcie_init(void) -{ - return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); -} -subsys_initcall(exynos_pcie_init); diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c deleted file mode 100644 index 80f604602783..000000000000 --- a/drivers/pci/dwc/pci-imx6.c +++ /dev/null @@ -1,871 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Freescale i.MX6 SoCs - * - * Copyright (C) 2013 Kosagi - * http://www.kosagi.com - * - * Author: Sean Cross - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -#define to_imx6_pcie(x) dev_get_drvdata((x)->dev) - -enum imx6_pcie_variants { - IMX6Q, - IMX6SX, - IMX6QP, - IMX7D, -}; - -struct imx6_pcie { - struct dw_pcie *pci; - int reset_gpio; - bool gpio_active_high; - struct clk *pcie_bus; - struct clk *pcie_phy; - struct clk *pcie_inbound_axi; - struct clk *pcie; - struct regmap *iomuxc_gpr; - struct reset_control *pciephy_reset; - struct reset_control *apps_reset; - enum imx6_pcie_variants variant; - u32 tx_deemph_gen1; - u32 tx_deemph_gen2_3p5db; - u32 tx_deemph_gen2_6db; - u32 tx_swing_full; - u32 tx_swing_low; - int link_gen; - struct regulator *vpcie; -}; - -/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ -#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 -#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 -#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 - -/* PCIe Root Complex registers (memory-mapped) */ -#define PCIE_RC_LCR 0x7c -#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 -#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 -#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf - -#define PCIE_RC_LCSR 0x80 - -/* PCIe Port Logic registers (memory-mapped) */ -#define PL_OFFSET 0x700 -#define PCIE_PL_PFLR (PL_OFFSET + 0x08) -#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) -#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) -#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) -#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) - -#define PCIE_PHY_CTRL (PL_OFFSET + 0x114) -#define PCIE_PHY_CTRL_DATA_LOC 0 -#define PCIE_PHY_CTRL_CAP_ADR_LOC 16 -#define PCIE_PHY_CTRL_CAP_DAT_LOC 17 -#define PCIE_PHY_CTRL_WR_LOC 18 -#define PCIE_PHY_CTRL_RD_LOC 19 - -#define PCIE_PHY_STAT (PL_OFFSET + 0x110) -#define PCIE_PHY_STAT_ACK_LOC 16 - -#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C -#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) - -/* PHY registers (not memory-mapped) */ -#define PCIE_PHY_RX_ASIC_OUT 0x100D -#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) - -#define PHY_RX_OVRD_IN_LO 0x1005 -#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) -#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) - -static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) -{ - struct dw_pcie *pci = imx6_pcie->pci; - u32 val; - u32 max_iterations = 10; - u32 wait_counter = 0; - - do { - val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); - val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; - wait_counter++; - - if (val == exp_val) - return 0; - - udelay(1); - } while (wait_counter < max_iterations); - - return -ETIMEDOUT; -} - -static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) -{ - struct dw_pcie *pci = imx6_pcie->pci; - u32 val; - int ret; - - val = addr << PCIE_PHY_CTRL_DATA_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - - val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - - ret = pcie_phy_poll_ack(imx6_pcie, 1); - if (ret) - return ret; - - val = addr << PCIE_PHY_CTRL_DATA_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - - return pcie_phy_poll_ack(imx6_pcie, 0); -} - -/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ -static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) -{ - struct dw_pcie *pci = imx6_pcie->pci; - u32 val, phy_ctl; - int ret; - - ret = pcie_phy_wait_ack(imx6_pcie, addr); - if (ret) - return ret; - - /* assert Read signal */ - phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); - - ret = pcie_phy_poll_ack(imx6_pcie, 1); - if (ret) - return ret; - - val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); - *data = val & 0xffff; - - /* deassert Read signal */ - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); - - return pcie_phy_poll_ack(imx6_pcie, 0); -} - -static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) -{ - struct dw_pcie *pci = imx6_pcie->pci; - u32 var; - int ret; - - /* write addr */ - /* cap addr */ - ret = pcie_phy_wait_ack(imx6_pcie, addr); - if (ret) - return ret; - - var = data << PCIE_PHY_CTRL_DATA_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - - /* capture data */ - var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - - ret = pcie_phy_poll_ack(imx6_pcie, 1); - if (ret) - return ret; - - /* deassert cap data */ - var = data << PCIE_PHY_CTRL_DATA_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - - /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(imx6_pcie, 0); - if (ret) - return ret; - - /* assert wr signal */ - var = 0x1 << PCIE_PHY_CTRL_WR_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - - /* wait for ack */ - ret = pcie_phy_poll_ack(imx6_pcie, 1); - if (ret) - return ret; - - /* deassert wr signal */ - var = data << PCIE_PHY_CTRL_DATA_LOC; - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - - /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(imx6_pcie, 0); - if (ret) - return ret; - - dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); - - return 0; -} - -static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) -{ - u32 tmp; - - pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); - tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | - PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); - - usleep_range(2000, 3000); - - pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); - tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | - PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); -} - -/* Added for PCI abort handling */ -static int imx6q_pcie_abort_handler(unsigned long addr, - unsigned int fsr, struct pt_regs *regs) -{ - unsigned long pc = instruction_pointer(regs); - unsigned long instr = *(unsigned long *)pc; - int reg = (instr >> 12) & 15; - - /* - * If the instruction being executed was a read, - * make it look like it read all-ones. - */ - if ((instr & 0x0c100000) == 0x04100000) { - unsigned long val; - - if (instr & 0x00400000) - val = 255; - else - val = -1; - - regs->uregs[reg] = val; - regs->ARM_pc += 4; - return 0; - } - - if ((instr & 0x0e100090) == 0x00100090) { - regs->uregs[reg] = -1; - regs->ARM_pc += 4; - return 0; - } - - return 1; -} - -static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) -{ - struct device *dev = imx6_pcie->pci->dev; - - switch (imx6_pcie->variant) { - case IMX7D: - reset_control_assert(imx6_pcie->pciephy_reset); - reset_control_assert(imx6_pcie->apps_reset); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - /* Force PCIe PHY reset */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, - IMX6SX_GPR5_PCIE_BTNRST_RESET, - IMX6SX_GPR5_PCIE_BTNRST_RESET); - break; - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_SW_RST, - IMX6Q_GPR1_PCIE_SW_RST); - break; - case IMX6Q: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); - break; - } - - if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { - int ret = regulator_disable(imx6_pcie->vpcie); - - if (ret) - dev_err(dev, "failed to disable vpcie regulator: %d\n", - ret); - } -} - -static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; - int ret = 0; - - switch (imx6_pcie->variant) { - case IMX6SX: - ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); - if (ret) { - dev_err(dev, "unable to enable pcie_axi clock\n"); - break; - } - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); - break; - case IMX6QP: /* FALLTHROUGH */ - case IMX6Q: - /* power up core phy and enable ref clock */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); - /* - * the async reset input need ref clock to sync internally, - * when the ref clock comes after reset, internal synced - * reset time is too short, cannot meet the requirement. - * add one ~10us delay here. - */ - udelay(10); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); - break; - case IMX7D: - break; - } - - return ret; -} - -static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) -{ - u32 val; - unsigned int retries; - struct device *dev = imx6_pcie->pci->dev; - - for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { - regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); - - if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) - return; - - usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, - PHY_PLL_LOCK_WAIT_USLEEP_MAX); - } - - dev_err(dev, "PCIe PLL lock timeout\n"); -} - -static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; - int ret; - - if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { - ret = regulator_enable(imx6_pcie->vpcie); - if (ret) { - dev_err(dev, "failed to enable vpcie regulator: %d\n", - ret); - return; - } - } - - ret = clk_prepare_enable(imx6_pcie->pcie_phy); - if (ret) { - dev_err(dev, "unable to enable pcie_phy clock\n"); - goto err_pcie_phy; - } - - ret = clk_prepare_enable(imx6_pcie->pcie_bus); - if (ret) { - dev_err(dev, "unable to enable pcie_bus clock\n"); - goto err_pcie_bus; - } - - ret = clk_prepare_enable(imx6_pcie->pcie); - if (ret) { - dev_err(dev, "unable to enable pcie clock\n"); - goto err_pcie; - } - - ret = imx6_pcie_enable_ref_clk(imx6_pcie); - if (ret) { - dev_err(dev, "unable to enable pcie ref clock\n"); - goto err_ref_clk; - } - - /* allow the clocks to stabilize */ - usleep_range(200, 500); - - /* Some boards don't have PCIe reset GPIO. */ - if (gpio_is_valid(imx6_pcie->reset_gpio)) { - gpio_set_value_cansleep(imx6_pcie->reset_gpio, - imx6_pcie->gpio_active_high); - msleep(100); - gpio_set_value_cansleep(imx6_pcie->reset_gpio, - !imx6_pcie->gpio_active_high); - } - - switch (imx6_pcie->variant) { - case IMX7D: - reset_control_deassert(imx6_pcie->pciephy_reset); - imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, - IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); - break; - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_SW_RST, 0); - - usleep_range(200, 500); - break; - case IMX6Q: /* Nothing to do */ - break; - } - - return; - -err_ref_clk: - clk_disable_unprepare(imx6_pcie->pcie); -err_pcie: - clk_disable_unprepare(imx6_pcie->pcie_bus); -err_pcie_bus: - clk_disable_unprepare(imx6_pcie->pcie_phy); -err_pcie_phy: - if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { - ret = regulator_disable(imx6_pcie->vpcie); - if (ret) - dev_err(dev, "failed to disable vpcie regulator: %d\n", - ret); - } -} - -static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) -{ - switch (imx6_pcie->variant) { - case IMX7D: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_RX_EQ_MASK, - IMX6SX_GPR12_PCIE_RX_EQ_2); - /* FALLTHROUGH */ - default: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); - - /* configure constant input signal to the pcie ctrl and phy */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_LOS_LEVEL, 9 << 4); - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN1, - imx6_pcie->tx_deemph_gen1 << 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, - imx6_pcie->tx_deemph_gen2_3p5db << 6); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, - imx6_pcie->tx_deemph_gen2_6db << 12); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_FULL, - imx6_pcie->tx_swing_full << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_LOW, - imx6_pcie->tx_swing_low << 25); - break; - } - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); -} - -static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; - - /* check if the link is up or not */ - if (!dw_pcie_wait_for_link(pci)) - return 0; - - dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); - return -ETIMEDOUT; -} - -static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; - u32 tmp; - unsigned int retries; - - for (retries = 0; retries < 200; retries++) { - tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); - /* Test if the speed change finished. */ - if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) - return 0; - usleep_range(100, 1000); - } - - dev_err(dev, "Speed change timeout\n"); - return -EINVAL; -} - -static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; - u32 tmp; - int ret; - - /* - * Force Gen1 operation when starting the link. In case the link is - * started in Gen2 mode, there is a possibility the devices on the - * bus will not be detected at all. This happens with PCIe switches. - */ - tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); - tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; - tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; - dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); - - /* Start LTSSM. */ - if (imx6_pcie->variant == IMX7D) - reset_control_deassert(imx6_pcie->apps_reset); - else - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); - - ret = imx6_pcie_wait_for_link(imx6_pcie); - if (ret) - goto err_reset_phy; - - if (imx6_pcie->link_gen == 2) { - /* Allow Gen2 mode after the link is up. */ - tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); - tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; - tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; - dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); - - /* - * Start Directed Speed Change so the best possible - * speed both link partners support can be negotiated. - */ - tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); - tmp |= PORT_LOGIC_SPEED_CHANGE; - dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); - - if (imx6_pcie->variant != IMX7D) { - /* - * On i.MX7, DIRECT_SPEED_CHANGE behaves differently - * from i.MX6 family when no link speed transition - * occurs and we go Gen1 -> yep, Gen1. The difference - * is that, in such case, it will not be cleared by HW - * which will cause the following code to report false - * failure. - */ - - ret = imx6_pcie_wait_for_speed_change(imx6_pcie); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; - } - } - - /* Make sure link training is finished as well! */ - ret = imx6_pcie_wait_for_link(imx6_pcie); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; - } - } else { - dev_info(dev, "Link: Gen2 disabled\n"); - } - - tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); - dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); - return 0; - -err_reset_phy: - dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), - dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); - imx6_pcie_reset_phy(imx6_pcie); - return ret; -} - -static int imx6_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); - - imx6_pcie_assert_core_reset(imx6_pcie); - imx6_pcie_init_phy(imx6_pcie); - imx6_pcie_deassert_core_reset(imx6_pcie); - dw_pcie_setup_rc(pp); - imx6_pcie_establish_link(imx6_pcie); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); - - return 0; -} - -static int imx6_pcie_link_up(struct dw_pcie *pci) -{ - return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & - PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; -} - -static const struct dw_pcie_host_ops imx6_pcie_host_ops = { - .host_init = imx6_pcie_host_init, -}; - -static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = imx6_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq <= 0) { - dev_err(dev, "failed to get MSI irq\n"); - return -ENODEV; - } - } - - pp->root_bus_nr = -1; - pp->ops = &imx6_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = imx6_pcie_link_up, -}; - -static int imx6_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct imx6_pcie *imx6_pcie; - struct resource *dbi_base; - struct device_node *node = dev->of_node; - int ret; - - imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); - if (!imx6_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - imx6_pcie->pci = pci; - imx6_pcie->variant = - (enum imx6_pcie_variants)of_device_get_match_data(dev); - - dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - /* Fetch GPIOs */ - imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); - imx6_pcie->gpio_active_high = of_property_read_bool(node, - "reset-gpio-active-high"); - if (gpio_is_valid(imx6_pcie->reset_gpio)) { - ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, - imx6_pcie->gpio_active_high ? - GPIOF_OUT_INIT_HIGH : - GPIOF_OUT_INIT_LOW, - "PCIe reset"); - if (ret) { - dev_err(dev, "unable to get reset gpio\n"); - return ret; - } - } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { - return imx6_pcie->reset_gpio; - } - - /* Fetch clocks */ - imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); - if (IS_ERR(imx6_pcie->pcie_phy)) { - dev_err(dev, "pcie_phy clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_phy); - } - - imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); - if (IS_ERR(imx6_pcie->pcie_bus)) { - dev_err(dev, "pcie_bus clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_bus); - } - - imx6_pcie->pcie = devm_clk_get(dev, "pcie"); - if (IS_ERR(imx6_pcie->pcie)) { - dev_err(dev, "pcie clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie); - } - - switch (imx6_pcie->variant) { - case IMX6SX: - imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, - "pcie_inbound_axi"); - if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { - dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_inbound_axi); - } - break; - case IMX7D: - imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, - "pciephy"); - if (IS_ERR(imx6_pcie->pciephy_reset)) { - dev_err(dev, "Failed to get PCIEPHY reset control\n"); - return PTR_ERR(imx6_pcie->pciephy_reset); - } - - imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, - "apps"); - if (IS_ERR(imx6_pcie->apps_reset)) { - dev_err(dev, "Failed to get PCIE APPS reset control\n"); - return PTR_ERR(imx6_pcie->apps_reset); - } - break; - default: - break; - } - - /* Grab GPR config register range */ - imx6_pcie->iomuxc_gpr = - syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); - if (IS_ERR(imx6_pcie->iomuxc_gpr)) { - dev_err(dev, "unable to find iomuxc registers\n"); - return PTR_ERR(imx6_pcie->iomuxc_gpr); - } - - /* Grab PCIe PHY Tx Settings */ - if (of_property_read_u32(node, "fsl,tx-deemph-gen1", - &imx6_pcie->tx_deemph_gen1)) - imx6_pcie->tx_deemph_gen1 = 0; - - if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", - &imx6_pcie->tx_deemph_gen2_3p5db)) - imx6_pcie->tx_deemph_gen2_3p5db = 0; - - if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", - &imx6_pcie->tx_deemph_gen2_6db)) - imx6_pcie->tx_deemph_gen2_6db = 20; - - if (of_property_read_u32(node, "fsl,tx-swing-full", - &imx6_pcie->tx_swing_full)) - imx6_pcie->tx_swing_full = 127; - - if (of_property_read_u32(node, "fsl,tx-swing-low", - &imx6_pcie->tx_swing_low)) - imx6_pcie->tx_swing_low = 127; - - /* Limit link speed */ - ret = of_property_read_u32(node, "fsl,max-link-speed", - &imx6_pcie->link_gen); - if (ret) - imx6_pcie->link_gen = 1; - - imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); - if (IS_ERR(imx6_pcie->vpcie)) { - if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; - imx6_pcie->vpcie = NULL; - } - - platform_set_drvdata(pdev, imx6_pcie); - - ret = imx6_add_pcie_port(imx6_pcie, pdev); - if (ret < 0) - return ret; - - return 0; -} - -static void imx6_pcie_shutdown(struct platform_device *pdev) -{ - struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); - - /* bring down link, so bootloader gets clean state in case of reboot */ - imx6_pcie_assert_core_reset(imx6_pcie); -} - -static const struct of_device_id imx6_pcie_of_match[] = { - { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, - { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, - { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, - { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, - {}, -}; - -static struct platform_driver imx6_pcie_driver = { - .driver = { - .name = "imx6q-pcie", - .of_match_table = imx6_pcie_of_match, - .suppress_bind_attrs = true, - }, - .probe = imx6_pcie_probe, - .shutdown = imx6_pcie_shutdown, -}; - -static int __init imx6_pcie_init(void) -{ - /* - * Since probe() can be deferred we need to make sure that - * hook_fault_code is not called after __init memory is freed - * by kernel and since imx6q_pcie_abort_handler() is a no-op, - * we can install the handler here without risking it - * accessing some uninitialized driver state. - */ - hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, - "external abort on non-linefetch"); - - return platform_driver_register(&imx6_pcie_driver); -} -device_initcall(imx6_pcie_init); diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c deleted file mode 100644 index 0682213328e9..000000000000 --- a/drivers/pci/dwc/pci-keystone-dw.c +++ /dev/null @@ -1,484 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DesignWare application register space functions for Keystone PCI controller - * - * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com - * - * Author: Murali Karicheri - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" -#include "pci-keystone.h" - -/* Application register defines */ -#define LTSSM_EN_VAL 1 -#define LTSSM_STATE_MASK 0x1f -#define LTSSM_STATE_L0 0x11 -#define DBI_CS2_EN_VAL 0x20 -#define OB_XLAT_EN_VAL 2 - -/* Application registers */ -#define CMD_STATUS 0x004 -#define CFG_SETUP 0x008 -#define OB_SIZE 0x030 -#define CFG_PCIM_WIN_SZ_IDX 3 -#define CFG_PCIM_WIN_CNT 32 -#define SPACE0_REMOTE_CFG_OFFSET 0x1000 -#define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) -#define OB_OFFSET_HI(n) (0x204 + (8 * n)) - -/* IRQ register defines */ -#define IRQ_EOI 0x050 -#define IRQ_STATUS 0x184 -#define IRQ_ENABLE_SET 0x188 -#define IRQ_ENABLE_CLR 0x18c - -#define MSI_IRQ 0x054 -#define MSI0_IRQ_STATUS 0x104 -#define MSI0_IRQ_ENABLE_SET 0x108 -#define MSI0_IRQ_ENABLE_CLR 0x10c -#define IRQ_STATUS 0x184 -#define MSI_IRQ_OFFSET 4 - -/* Error IRQ bits */ -#define ERR_AER BIT(5) /* ECRC error */ -#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ -#define ERR_CORR BIT(3) /* Correctable error */ -#define ERR_NONFATAL BIT(2) /* Non-fatal error */ -#define ERR_FATAL BIT(1) /* Fatal error */ -#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ -#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ - ERR_NONFATAL | ERR_FATAL | ERR_SYS) -#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) -#define ERR_IRQ_STATUS_RAW 0x1c0 -#define ERR_IRQ_STATUS 0x1c4 -#define ERR_IRQ_ENABLE_SET 0x1c8 -#define ERR_IRQ_ENABLE_CLR 0x1cc - -/* Config space registers */ -#define DEBUG0 0x728 - -#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) - -static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, - u32 *bit_pos) -{ - *reg_offset = offset % 8; - *bit_pos = offset >> 3; -} - -phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - return ks_pcie->app.start + MSI_IRQ; -} - -static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) -{ - return readl(ks_pcie->va_app_base + offset); -} - -static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) -{ - writel(val, ks_pcie->va_app_base + offset); -} - -void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - u32 pending, vector; - int src, virq; - - pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); - - /* - * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit - * shows 1, 9, 17, 25 and so forth - */ - for (src = 0; src < 4; src++) { - if (BIT(src) & pending) { - vector = offset + (src << 3); - virq = irq_linear_revmap(pp->irq_domain, vector); - dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", - src, vector, virq); - generic_handle_irq(virq); - } - } -} - -void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) -{ - u32 reg_offset, bit_pos; - struct keystone_pcie *ks_pcie; - struct dw_pcie *pci; - - pci = to_dw_pcie_from_pp(pp); - ks_pcie = to_keystone_pcie(pci); - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - - ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), - BIT(bit_pos)); - ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); -} - -void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) -{ - u32 reg_offset, bit_pos; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), - BIT(bit_pos)); -} - -void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) -{ - u32 reg_offset, bit_pos; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), - BIT(bit_pos)); -} - -int ks_dw_pcie_msi_host_init(struct pcie_port *pp) -{ - return dw_pcie_allocate_domains(pp); -} - -void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) -{ - int i; - - for (i = 0; i < PCI_NUM_INTX; i++) - ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); -} - -void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct device *dev = pci->dev; - u32 pending; - int virq; - - pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); - - if (BIT(0) & pending) { - virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); - dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); - generic_handle_irq(virq); - } - - /* EOI the INTx interrupt */ - ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); -} - -void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) -{ - ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); -} - -irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) -{ - u32 status; - - status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; - if (!status) - return IRQ_NONE; - - if (status & ERR_FATAL_IRQ) - dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n", - status); - - /* Ack the IRQ; status bits are RW1C */ - ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); - return IRQ_HANDLED; -} - -static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) -{ -} - -static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) -{ -} - -static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) -{ -} - -static struct irq_chip ks_dw_pcie_legacy_irq_chip = { - .name = "Keystone-PCI-Legacy-IRQ", - .irq_ack = ks_dw_pcie_ack_legacy_irq, - .irq_mask = ks_dw_pcie_mask_legacy_irq, - .irq_unmask = ks_dw_pcie_unmask_legacy_irq, -}; - -static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, - unsigned int irq, irq_hw_number_t hw_irq) -{ - irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, d->host_data); - - return 0; -} - -static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { - .map = ks_dw_pcie_init_legacy_irq_map, - .xlate = irq_domain_xlate_onetwocell, -}; - -/** - * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask - * registers - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) -{ - u32 val; - - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); - - do { - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - } while (!(val & DBI_CS2_EN_VAL)); -} - -/** - * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) -{ - u32 val; - - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); - - do { - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - } while (val & DBI_CS2_EN_VAL); -} - -void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - u32 start = pp->mem->start, end = pp->mem->end; - int i, tr_size; - u32 val; - - /* Disable BARs for inbound access */ - ks_dw_pcie_set_dbi_mode(ks_pcie); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); - ks_dw_pcie_clear_dbi_mode(ks_pcie); - - /* Set outbound translation size per window division */ - ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); - - tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; - - /* Using Direct 1:1 mapping of RC <-> PCI memory space */ - for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { - ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); - ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); - start += tr_size; - } - - /* Enable OB translation */ - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); -} - -/** - * ks_pcie_cfg_setup() - Set up configuration space address for a device - * - * @ks_pcie: ptr to keystone_pcie structure - * @bus: Bus number the device is residing on - * @devfn: device, function number info - * - * Forms and returns the address of configuration space mapped in PCIESS - * address space 0. Also configures CFG_SETUP for remote configuration space - * access. - * - * The address space has two regions to access configuration - local and remote. - * We access local region for bus 0 (as RC is attached on bus 0) and remote - * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, - * we will do TYPE 0 access as it will be on our secondary bus (logical). - * CFG_SETUP is needed only for remote configuration access. - */ -static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, - unsigned int devfn) -{ - u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - u32 regval; - - if (bus == 0) - return pci->dbi_base; - - regval = (bus << 16) | (device << 8) | function; - - /* - * Since Bus#1 will be a virtual bus, we need to have TYPE0 - * access only. - * TYPE 1 - */ - if (bus != 1) - regval |= BIT(24); - - ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); - return pp->va_cfg0_base; -} - -int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - u8 bus_num = bus->number; - void __iomem *addr; - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - - return dw_pcie_read(addr + where, size, val); -} - -int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - u8 bus_num = bus->number; - void __iomem *addr; - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - - return dw_pcie_write(addr + where, size, val); -} - -/** - * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization - * - * This sets BAR0 to enable inbound access for MSI_IRQ register - */ -void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - /* Configure and set up BAR0 */ - ks_dw_pcie_set_dbi_mode(ks_pcie); - - /* Enable BAR0 */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); - - ks_dw_pcie_clear_dbi_mode(ks_pcie); - - /* - * For BAR0, just setting bus address for inbound writes (MSI) should - * be sufficient. Use physical address to avoid any conflicts. - */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); -} - -/** - * ks_dw_pcie_link_up() - Check if link up - */ -int ks_dw_pcie_link_up(struct dw_pcie *pci) -{ - u32 val; - - val = dw_pcie_readl_dbi(pci, DEBUG0); - return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; -} - -void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) -{ - u32 val; - - /* Disable Link training */ - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - val &= ~LTSSM_EN_VAL; - ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); - - /* Initiate Link Training */ - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); -} - -/** - * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware - * - * Ioremap the register resources, initialize legacy irq domain - * and call dw_pcie_v3_65_host_init() API to initialize the Keystone - * PCI host controller. - */ -int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, - struct device_node *msi_intc_np) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; - - /* Index 0 is the config reg. space address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - /* - * We set these same and is used in pcie rd/wr_other_conf - * functions - */ - pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; - pp->va_cfg1_base = pp->va_cfg0_base; - - /* Index 1 is the application reg. space address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ks_pcie->va_app_base = devm_ioremap_resource(dev, res); - if (IS_ERR(ks_pcie->va_app_base)) - return PTR_ERR(ks_pcie->va_app_base); - - ks_pcie->app = *res; - - /* Create legacy IRQ domain */ - ks_pcie->legacy_irq_domain = - irq_domain_add_linear(ks_pcie->legacy_intc_np, - PCI_NUM_INTX, - &ks_dw_pcie_legacy_irq_domain_ops, - NULL); - if (!ks_pcie->legacy_irq_domain) { - dev_err(dev, "Failed to add irq domain for legacy irqs\n"); - return -EINVAL; - } - - return dw_pcie_host_init(pp); -} diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c deleted file mode 100644 index 3722a5f31e5e..000000000000 --- a/drivers/pci/dwc/pci-keystone.c +++ /dev/null @@ -1,457 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Texas Instruments Keystone SoCs - * - * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com - * - * Author: Murali Karicheri - * Implementation based on pci-exynos.c and pcie-designware.c - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" -#include "pci-keystone.h" - -#define DRIVER_NAME "keystone-pcie" - -/* DEV_STAT_CTRL */ -#define PCIE_CAP_BASE 0x70 - -/* PCIE controller device IDs */ -#define PCIE_RC_K2HK 0xb008 -#define PCIE_RC_K2E 0xb009 -#define PCIE_RC_K2L 0xb00a - -#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) - -static void quirk_limit_mrrs(struct pci_dev *dev) -{ - struct pci_bus *bus = dev->bus; - struct pci_dev *bridge = bus->self; - static const struct pci_device_id rc_pci_devids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, - { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, - { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, - { 0, }, - }; - - if (pci_is_root_bus(bus)) - return; - - /* look for the host bridge */ - while (!pci_is_root_bus(bus)) { - bridge = bus->self; - bus = bus->parent; - } - - if (bridge) { - /* - * Keystone PCI controller has a h/w limitation of - * 256 bytes maximum read request size. It can't handle - * anything higher than this. So force this limit on - * all downstream devices. - */ - if (pci_match_id(rc_pci_devids, bridge)) { - if (pcie_get_readrq(dev) > 256) { - dev_info(&dev->dev, "limiting MRRS to 256\n"); - pcie_set_readrq(dev, 256); - } - } - } -} -DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); - -static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - unsigned int retries; - - dw_pcie_setup_rc(pp); - - if (dw_pcie_link_up(pci)) { - dev_info(dev, "Link already up\n"); - return 0; - } - - /* check if the link is up or not */ - for (retries = 0; retries < 5; retries++) { - ks_dw_pcie_initiate_link_train(ks_pcie); - if (!dw_pcie_wait_for_link(pci)) - return 0; - } - - dev_err(dev, "phy link never came up\n"); - return -ETIMEDOUT; -} - -static void ks_pcie_msi_irq_handler(struct irq_desc *desc) -{ - unsigned int irq = irq_desc_get_irq(desc); - struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); - u32 offset = irq - ks_pcie->msi_host_irqs[0]; - struct dw_pcie *pci = ks_pcie->pci; - struct device *dev = pci->dev; - struct irq_chip *chip = irq_desc_get_chip(desc); - - dev_dbg(dev, "%s, irq %d\n", __func__, irq); - - /* - * The chained irq handler installation would have replaced normal - * interrupt driver handler so we need to take care of mask/unmask and - * ack operation. - */ - chained_irq_enter(chip, desc); - ks_dw_pcie_handle_msi_irq(ks_pcie, offset); - chained_irq_exit(chip, desc); -} - -/** - * ks_pcie_legacy_irq_handler() - Handle legacy interrupt - * @irq: IRQ line for legacy interrupts - * @desc: Pointer to irq descriptor - * - * Traverse through pending legacy interrupts and invoke handler for each. Also - * takes care of interrupt controller level mask/ack operation. - */ -static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) -{ - unsigned int irq = irq_desc_get_irq(desc); - struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); - struct dw_pcie *pci = ks_pcie->pci; - struct device *dev = pci->dev; - u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; - struct irq_chip *chip = irq_desc_get_chip(desc); - - dev_dbg(dev, ": Handling legacy irq %d\n", irq); - - /* - * The chained irq handler installation would have replaced normal - * interrupt driver handler so we need to take care of mask/unmask and - * ack operation. - */ - chained_irq_enter(chip, desc); - ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); - chained_irq_exit(chip, desc); -} - -static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, - char *controller, int *num_irqs) -{ - int temp, max_host_irqs, legacy = 1, *host_irqs; - struct device *dev = ks_pcie->pci->dev; - struct device_node *np_pcie = dev->of_node, **np_temp; - - if (!strcmp(controller, "msi-interrupt-controller")) - legacy = 0; - - if (legacy) { - np_temp = &ks_pcie->legacy_intc_np; - max_host_irqs = PCI_NUM_INTX; - host_irqs = &ks_pcie->legacy_host_irqs[0]; - } else { - np_temp = &ks_pcie->msi_intc_np; - max_host_irqs = MAX_MSI_HOST_IRQS; - host_irqs = &ks_pcie->msi_host_irqs[0]; - } - - /* interrupt controller is in a child node */ - *np_temp = of_get_child_by_name(np_pcie, controller); - if (!(*np_temp)) { - dev_err(dev, "Node for %s is absent\n", controller); - return -EINVAL; - } - - temp = of_irq_count(*np_temp); - if (!temp) { - dev_err(dev, "No IRQ entries in %s\n", controller); - of_node_put(*np_temp); - return -EINVAL; - } - - if (temp > max_host_irqs) - dev_warn(dev, "Too many %s interrupts defined %u\n", - (legacy ? "legacy" : "MSI"), temp); - - /* - * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to - * 7 (MSI) - */ - for (temp = 0; temp < max_host_irqs; temp++) { - host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); - if (!host_irqs[temp]) - break; - } - - of_node_put(*np_temp); - - if (temp) { - *num_irqs = temp; - return 0; - } - - return -EINVAL; -} - -static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) -{ - int i; - - /* Legacy IRQ */ - for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { - irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], - ks_pcie_legacy_irq_handler, - ks_pcie); - } - ks_dw_pcie_enable_legacy_irqs(ks_pcie); - - /* MSI IRQ */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { - irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], - ks_pcie_msi_irq_handler, - ks_pcie); - } - } - - if (ks_pcie->error_irq > 0) - ks_dw_pcie_enable_error_irq(ks_pcie); -} - -/* - * When a PCI device does not exist during config cycles, keystone host gets a - * bus error instead of returning 0xffffffff. This handler always returns 0 - * for this kind of faults. - */ -static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) -{ - unsigned long instr = *(unsigned long *) instruction_pointer(regs); - - if ((instr & 0x0e100090) == 0x00100090) { - int reg = (instr >> 12) & 15; - - regs->uregs[reg] = -1; - regs->ARM_pc += 4; - } - - return 0; -} - -static int __init ks_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - u32 val; - - ks_pcie_establish_link(ks_pcie); - ks_dw_pcie_setup_rc_app_regs(ks_pcie); - ks_pcie_setup_interrupts(ks_pcie); - writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), - pci->dbi_base + PCI_IO_BASE); - - /* update the Vendor ID */ - writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID); - - /* update the DEV_STAT_CTRL to publish right mrrs */ - val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); - val &= ~PCI_EXP_DEVCTL_READRQ; - /* set the mrrs to 256 bytes */ - val |= BIT(12); - writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); - - /* - * PCIe access errors that result into OCP errors are caught by ARM as - * "External aborts" - */ - hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, - "Asynchronous external abort"); - - return 0; -} - -static const struct dw_pcie_host_ops keystone_pcie_host_ops = { - .rd_other_conf = ks_dw_pcie_rd_other_conf, - .wr_other_conf = ks_dw_pcie_wr_other_conf, - .host_init = ks_pcie_host_init, - .msi_set_irq = ks_dw_pcie_msi_set_irq, - .msi_clear_irq = ks_dw_pcie_msi_clear_irq, - .get_msi_addr = ks_dw_pcie_get_msi_addr, - .msi_host_init = ks_dw_pcie_msi_host_init, - .msi_irq_ack = ks_dw_pcie_msi_irq_ack, - .scan_bus = ks_dw_pcie_v3_65_scan_bus, -}; - -static irqreturn_t pcie_err_irq_handler(int irq, void *priv) -{ - struct keystone_pcie *ks_pcie = priv; - - return ks_dw_pcie_handle_error_irq(ks_pcie); -} - -static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - - ret = ks_pcie_get_irq_controller_info(ks_pcie, - "legacy-interrupt-controller", - &ks_pcie->num_legacy_host_irqs); - if (ret) - return ret; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - ret = ks_pcie_get_irq_controller_info(ks_pcie, - "msi-interrupt-controller", - &ks_pcie->num_msi_host_irqs); - if (ret) - return ret; - } - - /* - * Index 0 is the platform interrupt for error interrupt - * from RC. This is optional. - */ - ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); - if (ks_pcie->error_irq <= 0) - dev_info(dev, "no error IRQ defined\n"); - else { - ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, - IRQF_SHARED, "pcie-error-irq", ks_pcie); - if (ret < 0) { - dev_err(dev, "failed to request error IRQ %d\n", - ks_pcie->error_irq); - return ret; - } - } - - pp->root_bus_nr = -1; - pp->ops = &keystone_pcie_host_ops; - ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct of_device_id ks_pcie_of_match[] = { - { - .type = "pci", - .compatible = "ti,keystone-pcie", - }, - { }, -}; - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = ks_dw_pcie_link_up, -}; - -static int __exit ks_pcie_remove(struct platform_device *pdev) -{ - struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); - - clk_disable_unprepare(ks_pcie->clk); - - return 0; -} - -static int __init ks_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct keystone_pcie *ks_pcie; - struct resource *res; - void __iomem *reg_p; - struct phy *phy; - int ret; - - ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); - if (!ks_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - ks_pcie->pci = pci; - - /* initialize SerDes Phy if present */ - phy = devm_phy_get(dev, "pcie-phy"); - if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) - return PTR_ERR(phy); - - if (!IS_ERR_OR_NULL(phy)) { - ret = phy_init(phy); - if (ret < 0) - return ret; - } - - /* index 2 is to read PCI DEVICE_ID */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - reg_p = devm_ioremap_resource(dev, res); - if (IS_ERR(reg_p)) - return PTR_ERR(reg_p); - ks_pcie->device_id = readl(reg_p) >> 16; - devm_iounmap(dev, reg_p); - devm_release_mem_region(dev, res->start, resource_size(res)); - - ks_pcie->np = dev->of_node; - platform_set_drvdata(pdev, ks_pcie); - ks_pcie->clk = devm_clk_get(dev, "pcie"); - if (IS_ERR(ks_pcie->clk)) { - dev_err(dev, "Failed to get pcie rc clock\n"); - return PTR_ERR(ks_pcie->clk); - } - ret = clk_prepare_enable(ks_pcie->clk); - if (ret) - return ret; - - platform_set_drvdata(pdev, ks_pcie); - - ret = ks_add_pcie_port(ks_pcie, pdev); - if (ret < 0) - goto fail_clk; - - return 0; -fail_clk: - clk_disable_unprepare(ks_pcie->clk); - - return ret; -} - -static struct platform_driver ks_pcie_driver __refdata = { - .probe = ks_pcie_probe, - .remove = __exit_p(ks_pcie_remove), - .driver = { - .name = "keystone-pcie", - .of_match_table = of_match_ptr(ks_pcie_of_match), - }, -}; -builtin_platform_driver(ks_pcie_driver); diff --git a/drivers/pci/dwc/pci-keystone.h b/drivers/pci/dwc/pci-keystone.h deleted file mode 100644 index 8a13da391543..000000000000 --- a/drivers/pci/dwc/pci-keystone.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Keystone PCI Controller's common includes - * - * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com - * - * Author: Murali Karicheri - */ - -#define MAX_MSI_HOST_IRQS 8 - -struct keystone_pcie { - struct dw_pcie *pci; - struct clk *clk; - /* PCI Device ID */ - u32 device_id; - int num_legacy_host_irqs; - int legacy_host_irqs[PCI_NUM_INTX]; - struct device_node *legacy_intc_np; - - int num_msi_host_irqs; - int msi_host_irqs[MAX_MSI_HOST_IRQS]; - struct device_node *msi_intc_np; - struct irq_domain *legacy_irq_domain; - struct device_node *np; - - int error_irq; - - /* Application register space */ - void __iomem *va_app_base; /* DT 1st resource */ - struct resource app; -}; - -/* Keystone DW specific MSI controller APIs/definitions */ -void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); -phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); - -/* Keystone specific PCI controller APIs */ -void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); -void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); -void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); -irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); -int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, - struct device_node *msi_intc_np); -int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 val); -int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val); -void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); -void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); -void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp); -void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); -void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); -void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); -int ks_dw_pcie_msi_host_init(struct pcie_port *pp); -int ks_dw_pcie_link_up(struct dw_pcie *pci); diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c deleted file mode 100644 index 3724d3ef7008..000000000000 --- a/drivers/pci/dwc/pci-layerscape.c +++ /dev/null @@ -1,341 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Freescale Layerscape SoCs - * - * Copyright (C) 2014 Freescale Semiconductor. - * - * Author: Minghuan Lian - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -/* PEX1/2 Misc Ports Status Register */ -#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) -#define LTSSM_STATE_SHIFT 20 -#define LTSSM_STATE_MASK 0x3f -#define LTSSM_PCIE_L0 0x11 /* L0 state */ - -/* PEX Internal Configuration Registers */ -#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ -#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ -#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ - -#define PCIE_IATU_NUM 6 - -struct ls_pcie_drvdata { - u32 lut_offset; - u32 ltssm_shift; - u32 lut_dbg; - const struct dw_pcie_host_ops *ops; - const struct dw_pcie_ops *dw_pcie_ops; -}; - -struct ls_pcie { - struct dw_pcie *pci; - void __iomem *lut; - struct regmap *scfg; - const struct ls_pcie_drvdata *drvdata; - int index; -}; - -#define to_ls_pcie(x) dev_get_drvdata((x)->dev) - -static bool ls_pcie_is_bridge(struct ls_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - u32 header_type; - - header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE); - header_type &= 0x7f; - - return header_type == PCI_HEADER_TYPE_BRIDGE; -} - -/* Clear multi-function bit */ -static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - - iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); -} - -/* Drop MSG TLP except for Vendor MSG */ -static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) -{ - u32 val; - struct dw_pcie *pci = pcie->pci; - - val = ioread32(pci->dbi_base + PCIE_STRFMR1); - val &= 0xDFFFFFFF; - iowrite32(val, pci->dbi_base + PCIE_STRFMR1); -} - -static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) -{ - int i; - - for (i = 0; i < PCIE_IATU_NUM; i++) - dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); -} - -static int ls1021_pcie_link_up(struct dw_pcie *pci) -{ - u32 state; - struct ls_pcie *pcie = to_ls_pcie(pci); - - if (!pcie->scfg) - return 0; - - regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); - state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; - - return 1; -} - -static int ls_pcie_link_up(struct dw_pcie *pci) -{ - struct ls_pcie *pcie = to_ls_pcie(pci); - u32 state; - - state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> - pcie->drvdata->ltssm_shift) & - LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; - - return 1; -} - -/* Forward error response of outbound non-posted requests */ -static void ls_pcie_fix_error_response(struct ls_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - - iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); -} - -static int ls_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct ls_pcie *pcie = to_ls_pcie(pci); - - /* - * Disable outbound windows configured by the bootloader to avoid - * one transaction hitting multiple outbound windows. - * dw_pcie_setup_rc() will reconfigure the outbound windows. - */ - ls_pcie_disable_outbound_atus(pcie); - ls_pcie_fix_error_response(pcie); - - dw_pcie_dbi_ro_wr_en(pci); - ls_pcie_clear_multifunction(pcie); - dw_pcie_dbi_ro_wr_dis(pci); - - ls_pcie_drop_msg_tlp(pcie); - - dw_pcie_setup_rc(pp); - - return 0; -} - -static int ls1021_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct ls_pcie *pcie = to_ls_pcie(pci); - struct device *dev = pci->dev; - u32 index[2]; - int ret; - - pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, - "fsl,pcie-scfg"); - if (IS_ERR(pcie->scfg)) { - ret = PTR_ERR(pcie->scfg); - dev_err(dev, "No syscfg phandle specified\n"); - pcie->scfg = NULL; - return ret; - } - - if (of_property_read_u32_array(dev->of_node, - "fsl,pcie-scfg", index, 2)) { - pcie->scfg = NULL; - return -EINVAL; - } - pcie->index = index[1]; - - return ls_pcie_host_init(pp); -} - -static int ls_pcie_msi_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - struct device_node *np = dev->of_node; - struct device_node *msi_node; - - /* - * The MSI domain is set by the generic of_msi_configure(). This - * .msi_host_init() function keeps us from doing the default MSI - * domain setup in dw_pcie_host_init() and also enforces the - * requirement that "msi-parent" exists. - */ - msi_node = of_parse_phandle(np, "msi-parent", 0); - if (!msi_node) { - dev_err(dev, "failed to find msi-parent\n"); - return -EINVAL; - } - - return 0; -} - -static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { - .host_init = ls1021_pcie_host_init, - .msi_host_init = ls_pcie_msi_host_init, -}; - -static const struct dw_pcie_host_ops ls_pcie_host_ops = { - .host_init = ls_pcie_host_init, - .msi_host_init = ls_pcie_msi_host_init, -}; - -static const struct dw_pcie_ops dw_ls1021_pcie_ops = { - .link_up = ls1021_pcie_link_up, -}; - -static const struct dw_pcie_ops dw_ls_pcie_ops = { - .link_up = ls_pcie_link_up, -}; - -static struct ls_pcie_drvdata ls1021_drvdata = { - .ops = &ls1021_pcie_host_ops, - .dw_pcie_ops = &dw_ls1021_pcie_ops, -}; - -static struct ls_pcie_drvdata ls1043_drvdata = { - .lut_offset = 0x10000, - .ltssm_shift = 24, - .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static struct ls_pcie_drvdata ls1046_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 24, - .lut_dbg = 0x407fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static struct ls_pcie_drvdata ls2080_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, - .lut_dbg = 0x7fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static struct ls_pcie_drvdata ls2088_drvdata = { - .lut_offset = 0x80000, - .ltssm_shift = 0, - .lut_dbg = 0x407fc, - .ops = &ls_pcie_host_ops, - .dw_pcie_ops = &dw_ls_pcie_ops, -}; - -static const struct of_device_id ls_pcie_of_match[] = { - { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata }, - { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, - { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, - { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, - { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, - { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, - { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, - { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, - { }, -}; - -static int __init ls_add_pcie_port(struct ls_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - int ret; - - pp->ops = pcie->drvdata->ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static int __init ls_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct ls_pcie *pcie; - struct resource *dbi_base; - int ret; - - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pcie->drvdata = of_device_get_match_data(dev); - - pci->dev = dev; - pci->ops = pcie->drvdata->dw_pcie_ops; - - pcie->pci = pci; - - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset; - - if (!ls_pcie_is_bridge(pcie)) - return -ENODEV; - - platform_set_drvdata(pdev, pcie); - - ret = ls_add_pcie_port(pcie); - if (ret < 0) - return ret; - - return 0; -} - -static struct platform_driver ls_pcie_driver = { - .driver = { - .name = "layerscape-pcie", - .of_match_table = ls_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c deleted file mode 100644 index 072fd7ecc29f..000000000000 --- a/drivers/pci/dwc/pcie-armada8k.c +++ /dev/null @@ -1,282 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Marvell Armada-8K SoCs - * - * Armada-8K PCIe Glue Layer Source Code - * - * Copyright (C) 2016 Marvell Technology Group Ltd. - * - * Author: Yehuda Yitshak - * Author: Shadi Ammouri - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -struct armada8k_pcie { - struct dw_pcie *pci; - struct clk *clk; - struct clk *clk_reg; -}; - -#define PCIE_VENDOR_REGS_OFFSET 0x8000 - -#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) -#define PCIE_APP_LTSSM_EN BIT(2) -#define PCIE_DEVICE_TYPE_SHIFT 4 -#define PCIE_DEVICE_TYPE_MASK 0xF -#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ - -#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) -#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) -#define PCIE_GLB_STS_PHY_LINK_UP BIT(9) - -#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) -#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) -#define PCIE_INT_A_ASSERT_MASK BIT(9) -#define PCIE_INT_B_ASSERT_MASK BIT(10) -#define PCIE_INT_C_ASSERT_MASK BIT(11) -#define PCIE_INT_D_ASSERT_MASK BIT(12) - -#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) -#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) -#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) -#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) -/* - * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write - * allocate - */ -#define ARCACHE_DEFAULT_VALUE 0x3511 -#define AWCACHE_DEFAULT_VALUE 0x5311 - -#define DOMAIN_OUTER_SHAREABLE 0x2 -#define AX_USER_DOMAIN_MASK 0x3 -#define AX_USER_DOMAIN_SHIFT 4 - -#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) - -static int armada8k_pcie_link_up(struct dw_pcie *pci) -{ - u32 reg; - u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; - - reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); - - if ((reg & mask) == mask) - return 1; - - dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); - return 0; -} - -static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - u32 reg; - - if (!dw_pcie_link_up(pci)) { - /* Disable LTSSM state machine to enable configuration */ - reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); - reg &= ~(PCIE_APP_LTSSM_EN); - dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); - } - - /* Set the device to root complex mode */ - reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); - reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); - reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; - dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); - - /* Set the PCIe master AxCache attributes */ - dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); - dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); - - /* Set the PCIe master AxDomain attributes */ - reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); - reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); - reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; - dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); - - reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); - reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); - reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; - dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); - - /* Enable INT A-D interrupts */ - reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); - reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | - PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; - dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); - - if (!dw_pcie_link_up(pci)) { - /* Configuration done. Start LTSSM */ - reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); - reg |= PCIE_APP_LTSSM_EN; - dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); - } - - /* Wait until the link becomes active again */ - if (dw_pcie_wait_for_link(pci)) - dev_err(pci->dev, "Link not up after reconfiguration\n"); -} - -static int armada8k_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct armada8k_pcie *pcie = to_armada8k_pcie(pci); - - dw_pcie_setup_rc(pp); - armada8k_pcie_establish_link(pcie); - - return 0; -} - -static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) -{ - struct armada8k_pcie *pcie = arg; - struct dw_pcie *pci = pcie->pci; - u32 val; - - /* - * Interrupts are directly handled by the device driver of the - * PCI device. However, they are also latched into the PCIe - * controller, so we simply discard them. - */ - val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); - dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); - - return IRQ_HANDLED; -} - -static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { - .host_init = armada8k_pcie_host_init, -}; - -static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - - pp->root_bus_nr = -1; - pp->ops = &armada8k_pcie_host_ops; - - pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq for port\n"); - return pp->irq; - } - - ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, - IRQF_SHARED, "armada8k-pcie", pcie); - if (ret) { - dev_err(dev, "failed to request irq %d\n", pp->irq); - return ret; - } - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host: %d\n", ret); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = armada8k_pcie_link_up, -}; - -static int armada8k_pcie_probe(struct platform_device *pdev) -{ - struct dw_pcie *pci; - struct armada8k_pcie *pcie; - struct device *dev = &pdev->dev; - struct resource *base; - int ret; - - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - pcie->pci = pci; - - pcie->clk = devm_clk_get(dev, NULL); - if (IS_ERR(pcie->clk)) - return PTR_ERR(pcie->clk); - - ret = clk_prepare_enable(pcie->clk); - if (ret) - return ret; - - pcie->clk_reg = devm_clk_get(dev, "reg"); - if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { - ret = -EPROBE_DEFER; - goto fail; - } - if (!IS_ERR(pcie->clk_reg)) { - ret = clk_prepare_enable(pcie->clk_reg); - if (ret) - goto fail_clkreg; - } - - /* Get the dw-pcie unit configuration/control registers base. */ - base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); - if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap regs base %p\n", base); - ret = PTR_ERR(pci->dbi_base); - goto fail_clkreg; - } - - platform_set_drvdata(pdev, pcie); - - ret = armada8k_add_pcie_port(pcie, pdev); - if (ret) - goto fail_clkreg; - - return 0; - -fail_clkreg: - clk_disable_unprepare(pcie->clk_reg); -fail: - clk_disable_unprepare(pcie->clk); - - return ret; -} - -static const struct of_device_id armada8k_pcie_of_match[] = { - { .compatible = "marvell,armada8k-pcie", }, - {}, -}; - -static struct platform_driver armada8k_pcie_driver = { - .probe = armada8k_pcie_probe, - .driver = { - .name = "armada8k-pcie", - .of_match_table = of_match_ptr(armada8k_pcie_of_match), - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(armada8k_pcie_driver); diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c deleted file mode 100644 index 321b56cfd5d0..000000000000 --- a/drivers/pci/dwc/pcie-artpec6.c +++ /dev/null @@ -1,618 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Axis ARTPEC-6 SoC - * - * Author: Niklas Cassel - * - * Based on work done by Phil Edworthy - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) - -enum artpec_pcie_variants { - ARTPEC6, - ARTPEC7, -}; - -struct artpec6_pcie { - struct dw_pcie *pci; - struct regmap *regmap; /* DT axis,syscon-pcie */ - void __iomem *phy_base; /* DT phy */ - enum artpec_pcie_variants variant; - enum dw_pcie_device_mode mode; -}; - -struct artpec_pcie_of_data { - enum artpec_pcie_variants variant; - enum dw_pcie_device_mode mode; -}; - -static const struct of_device_id artpec6_pcie_of_match[]; - -/* PCIe Port Logic registers (memory-mapped) */ -#define PL_OFFSET 0x700 - -#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc) -#define ACK_N_FTS_MASK GENMASK(15, 8) -#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK) - -#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0) -#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK) - -/* ARTPEC-6 specific registers */ -#define PCIECFG 0x18 -#define PCIECFG_DBG_OEN BIT(24) -#define PCIECFG_CORE_RESET_REQ BIT(21) -#define PCIECFG_LTSSM_ENABLE BIT(20) -#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16) -#define PCIECFG_CLKREQ_B BIT(11) -#define PCIECFG_REFCLK_ENABLE BIT(10) -#define PCIECFG_PLL_ENABLE BIT(9) -#define PCIECFG_PCLK_ENABLE BIT(8) -#define PCIECFG_RISRCREN BIT(4) -#define PCIECFG_MODE_TX_DRV_EN BIT(3) -#define PCIECFG_CISRREN BIT(2) -#define PCIECFG_MACRO_ENABLE BIT(0) -/* ARTPEC-7 specific fields */ -#define PCIECFG_REFCLKSEL BIT(23) -#define PCIECFG_NOC_RESET BIT(3) - -#define PCIESTAT 0x1c -/* ARTPEC-7 specific fields */ -#define PCIESTAT_EXTREFCLK BIT(3) - -#define NOCCFG 0x40 -#define NOCCFG_ENABLE_CLK_PCIE BIT(4) -#define NOCCFG_POWER_PCIE_IDLEACK BIT(3) -#define NOCCFG_POWER_PCIE_IDLE BIT(2) -#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1) - -#define PHY_STATUS 0x118 -#define PHY_COSPLLLOCK BIT(0) - -#define PHY_TX_ASIC_OUT 0x4040 -#define PHY_TX_ASIC_OUT_TX_ACK BIT(0) - -#define PHY_RX_ASIC_OUT 0x405c -#define PHY_RX_ASIC_OUT_ACK BIT(0) - -static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) -{ - u32 val; - - regmap_read(artpec6_pcie->regmap, offset, &val); - return val; -} - -static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) -{ - regmap_write(artpec6_pcie->regmap, offset, val); -} - -static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) -{ - struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); - struct pcie_port *pp = &pci->pp; - struct dw_pcie_ep *ep = &pci->ep; - - switch (artpec6_pcie->mode) { - case DW_PCIE_RC_TYPE: - return pci_addr - pp->cfg0_base; - case DW_PCIE_EP_TYPE: - return pci_addr - ep->phys_base; - default: - dev_err(pci->dev, "UNKNOWN device type\n"); - } - return pci_addr; -} - -static int artpec6_pcie_establish_link(struct dw_pcie *pci) -{ - struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); - u32 val; - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - val |= PCIECFG_LTSSM_ENABLE; - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - - return 0; -} - -static void artpec6_pcie_stop_link(struct dw_pcie *pci) -{ - struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); - u32 val; - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - val &= ~PCIECFG_LTSSM_ENABLE; - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, - .start_link = artpec6_pcie_establish_link, - .stop_link = artpec6_pcie_stop_link, -}; - -static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie) -{ - struct dw_pcie *pci = artpec6_pcie->pci; - struct device *dev = pci->dev; - u32 val; - unsigned int retries; - - retries = 50; - do { - usleep_range(1000, 2000); - val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); - retries--; - } while (retries && - (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); - if (!retries) - dev_err(dev, "PCIe clock manager did not leave idle state\n"); - - retries = 50; - do { - usleep_range(1000, 2000); - val = readl(artpec6_pcie->phy_base + PHY_STATUS); - retries--; - } while (retries && !(val & PHY_COSPLLLOCK)); - if (!retries) - dev_err(dev, "PHY PLL did not lock\n"); -} - -static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie) -{ - struct dw_pcie *pci = artpec6_pcie->pci; - struct device *dev = pci->dev; - u32 val; - u16 phy_status_tx, phy_status_rx; - unsigned int retries; - - retries = 50; - do { - usleep_range(1000, 2000); - val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); - retries--; - } while (retries && - (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); - if (!retries) - dev_err(dev, "PCIe clock manager did not leave idle state\n"); - - retries = 50; - do { - usleep_range(1000, 2000); - phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT); - phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT); - retries--; - } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) || - (phy_status_rx & PHY_RX_ASIC_OUT_ACK))); - if (!retries) - dev_err(dev, "PHY did not enter Pn state\n"); -} - -static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie) -{ - switch (artpec6_pcie->variant) { - case ARTPEC6: - artpec6_pcie_wait_for_phy_a6(artpec6_pcie); - break; - case ARTPEC7: - artpec6_pcie_wait_for_phy_a7(artpec6_pcie); - break; - } -} - -static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie) -{ - u32 val; - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ - PCIECFG_MODE_TX_DRV_EN | - PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ - PCIECFG_MACRO_ENABLE; - val |= PCIECFG_REFCLK_ENABLE; - val &= ~PCIECFG_DBG_OEN; - val &= ~PCIECFG_CLKREQ_B; - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - usleep_range(5000, 6000); - - val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); - val |= NOCCFG_ENABLE_CLK_PCIE; - artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); - usleep_range(20, 30); - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - usleep_range(6000, 7000); - - val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); - val &= ~NOCCFG_POWER_PCIE_IDLEREQ; - artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); -} - -static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie) -{ - struct dw_pcie *pci = artpec6_pcie->pci; - u32 val; - bool extrefclk; - - /* Check if external reference clock is connected */ - val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT); - extrefclk = !!(val & PCIESTAT_EXTREFCLK); - dev_dbg(pci->dev, "Using reference clock: %s\n", - extrefclk ? "external" : "internal"); - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ - PCIECFG_PCLK_ENABLE; - if (extrefclk) - val |= PCIECFG_REFCLKSEL; - else - val &= ~PCIECFG_REFCLKSEL; - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - usleep_range(10, 20); - - val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); - val |= NOCCFG_ENABLE_CLK_PCIE; - artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); - usleep_range(20, 30); - - val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); - val &= ~NOCCFG_POWER_PCIE_IDLEREQ; - artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); -} - -static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie) -{ - switch (artpec6_pcie->variant) { - case ARTPEC6: - artpec6_pcie_init_phy_a6(artpec6_pcie); - break; - case ARTPEC7: - artpec6_pcie_init_phy_a7(artpec6_pcie); - break; - } -} - -static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie) -{ - struct dw_pcie *pci = artpec6_pcie->pci; - u32 val; - - if (artpec6_pcie->variant != ARTPEC7) - return; - - /* - * Increase the N_FTS (Number of Fast Training Sequences) - * to be transmitted when transitioning from L0s to L0. - */ - val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF); - val &= ~ACK_N_FTS_MASK; - val |= ACK_N_FTS(180); - dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val); - - /* - * Set the Number of Fast Training Sequences that the core - * advertises as its N_FTS during Gen2 or Gen3 link training. - */ - val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); - val &= ~FAST_TRAINING_SEQ_MASK; - val |= FAST_TRAINING_SEQ(180); - dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); -} - -static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie) -{ - u32 val; - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - switch (artpec6_pcie->variant) { - case ARTPEC6: - val |= PCIECFG_CORE_RESET_REQ; - break; - case ARTPEC7: - val &= ~PCIECFG_NOC_RESET; - break; - } - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); -} - -static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) -{ - u32 val; - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - switch (artpec6_pcie->variant) { - case ARTPEC6: - val &= ~PCIECFG_CORE_RESET_REQ; - break; - case ARTPEC7: - val |= PCIECFG_NOC_RESET; - break; - } - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - usleep_range(100, 200); -} - -static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) -{ - struct dw_pcie *pci = artpec6_pcie->pci; - struct pcie_port *pp = &pci->pp; - - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); -} - -static int artpec6_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); - - artpec6_pcie_assert_core_reset(artpec6_pcie); - artpec6_pcie_init_phy(artpec6_pcie); - artpec6_pcie_deassert_core_reset(artpec6_pcie); - artpec6_pcie_wait_for_phy(artpec6_pcie); - artpec6_pcie_set_nfts(artpec6_pcie); - dw_pcie_setup_rc(pp); - artpec6_pcie_establish_link(pci); - dw_pcie_wait_for_link(pci); - artpec6_pcie_enable_interrupts(artpec6_pcie); - - return 0; -} - -static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { - .host_init = artpec6_pcie_host_init, -}; - -static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = artpec6_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - int ret; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI irq\n"); - return pp->msi_irq; - } - } - - pp->root_bus_nr = -1; - pp->ops = &artpec6_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); - enum pci_barno bar; - - artpec6_pcie_assert_core_reset(artpec6_pcie); - artpec6_pcie_init_phy(artpec6_pcie); - artpec6_pcie_deassert_core_reset(artpec6_pcie); - artpec6_pcie_wait_for_phy(artpec6_pcie); - artpec6_pcie_set_nfts(artpec6_pcie); - - for (bar = BAR_0; bar <= BAR_5; bar++) - dw_pcie_ep_reset_bar(pci, bar); -} - -static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - switch (type) { - case PCI_EPC_IRQ_LEGACY: - dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); - return -EINVAL; - case PCI_EPC_IRQ_MSI: - return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - default: - dev_err(pci->dev, "UNKNOWN IRQ type\n"); - } - - return 0; -} - -static struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = artpec6_pcie_ep_init, - .raise_irq = artpec6_pcie_raise_irq, -}; - -static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, - struct platform_device *pdev) -{ - int ret; - struct dw_pcie_ep *ep; - struct resource *res; - struct device *dev = &pdev->dev; - struct dw_pcie *pci = artpec6_pcie->pci; - - ep = &pci->ep; - ep->ops = &pcie_ep_ops; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base2)) - return PTR_ERR(pci->dbi_base2); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); - if (!res) - return -EINVAL; - - ep->phys_base = res->start; - ep->addr_size = resource_size(res); - - ret = dw_pcie_ep_init(ep); - if (ret) { - dev_err(dev, "failed to initialize endpoint\n"); - return ret; - } - - return 0; -} - -static int artpec6_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct artpec6_pcie *artpec6_pcie; - struct resource *dbi_base; - struct resource *phy_base; - int ret; - const struct of_device_id *match; - const struct artpec_pcie_of_data *data; - enum artpec_pcie_variants variant; - enum dw_pcie_device_mode mode; - - match = of_match_device(artpec6_pcie_of_match, dev); - if (!match) - return -EINVAL; - - data = (struct artpec_pcie_of_data *)match->data; - variant = (enum artpec_pcie_variants)data->variant; - mode = (enum dw_pcie_device_mode)data->mode; - - artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); - if (!artpec6_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - artpec6_pcie->pci = pci; - artpec6_pcie->variant = variant; - artpec6_pcie->mode = mode; - - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); - if (IS_ERR(artpec6_pcie->phy_base)) - return PTR_ERR(artpec6_pcie->phy_base); - - artpec6_pcie->regmap = - syscon_regmap_lookup_by_phandle(dev->of_node, - "axis,syscon-pcie"); - if (IS_ERR(artpec6_pcie->regmap)) - return PTR_ERR(artpec6_pcie->regmap); - - platform_set_drvdata(pdev, artpec6_pcie); - - switch (artpec6_pcie->mode) { - case DW_PCIE_RC_TYPE: - if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST)) - return -ENODEV; - - ret = artpec6_add_pcie_port(artpec6_pcie, pdev); - if (ret < 0) - return ret; - break; - case DW_PCIE_EP_TYPE: { - u32 val; - - if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP)) - return -ENODEV; - - val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); - val &= ~PCIECFG_DEVICE_TYPE_MASK; - artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); - ret = artpec6_add_pcie_ep(artpec6_pcie, pdev); - if (ret < 0) - return ret; - break; - } - default: - dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); - } - - return 0; -} - -static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = { - .variant = ARTPEC6, - .mode = DW_PCIE_RC_TYPE, -}; - -static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = { - .variant = ARTPEC6, - .mode = DW_PCIE_EP_TYPE, -}; - -static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = { - .variant = ARTPEC7, - .mode = DW_PCIE_RC_TYPE, -}; - -static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = { - .variant = ARTPEC7, - .mode = DW_PCIE_EP_TYPE, -}; - -static const struct of_device_id artpec6_pcie_of_match[] = { - { - .compatible = "axis,artpec6-pcie", - .data = &artpec6_pcie_rc_of_data, - }, - { - .compatible = "axis,artpec6-pcie-ep", - .data = &artpec6_pcie_ep_of_data, - }, - { - .compatible = "axis,artpec7-pcie", - .data = &artpec7_pcie_rc_of_data, - }, - { - .compatible = "axis,artpec7-pcie-ep", - .data = &artpec7_pcie_ep_of_data, - }, - {}, -}; - -static struct platform_driver artpec6_pcie_driver = { - .probe = artpec6_pcie_probe, - .driver = { - .name = "artpec6-pcie", - .of_match_table = artpec6_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(artpec6_pcie_driver); diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c deleted file mode 100644 index 1eec4415a77f..000000000000 --- a/drivers/pci/dwc/pcie-designware-ep.c +++ /dev/null @@ -1,422 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/** - * Synopsys DesignWare PCIe Endpoint controller driver - * - * Copyright (C) 2017 Texas Instruments - * Author: Kishon Vijay Abraham I - */ - -#include - -#include "pcie-designware.h" -#include -#include - -void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) -{ - struct pci_epc *epc = ep->epc; - - pci_epc_linkup(epc); -} - -static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, - int flags) -{ - u32 reg; - - reg = PCI_BASE_ADDRESS_0 + (4 * bar); - dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writel_dbi2(pci, reg, 0x0); - dw_pcie_writel_dbi(pci, reg, 0x0); - if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_writel_dbi2(pci, reg + 4, 0x0); - dw_pcie_writel_dbi(pci, reg + 4, 0x0); - } - dw_pcie_dbi_ro_wr_dis(pci); -} - -void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) -{ - __dw_pcie_ep_reset_bar(pci, bar, 0); -} - -static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, - struct pci_epf_header *hdr) -{ - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); - dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); - dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); - dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); - dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, - hdr->subclass_code | hdr->baseclass_code << 8); - dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, - hdr->cache_line_size); - dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, - hdr->subsys_vendor_id); - dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); - dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, - hdr->interrupt_pin); - dw_pcie_dbi_ro_wr_dis(pci); - - return 0; -} - -static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, - dma_addr_t cpu_addr, - enum dw_pcie_as_type as_type) -{ - int ret; - u32 free_win; - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); - if (free_win >= ep->num_ib_windows) { - dev_err(pci->dev, "No free inbound window\n"); - return -EINVAL; - } - - ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, - as_type); - if (ret < 0) { - dev_err(pci->dev, "Failed to program IB window\n"); - return ret; - } - - ep->bar_to_atu[bar] = free_win; - set_bit(free_win, ep->ib_window_map); - - return 0; -} - -static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, - u64 pci_addr, size_t size) -{ - u32 free_win; - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); - if (free_win >= ep->num_ob_windows) { - dev_err(pci->dev, "No free outbound window\n"); - return -EINVAL; - } - - dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, - phys_addr, pci_addr, size); - - set_bit(free_win, ep->ob_window_map); - ep->outbound_addr[free_win] = phys_addr; - - return 0; -} - -static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, - struct pci_epf_bar *epf_bar) -{ - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - enum pci_barno bar = epf_bar->barno; - u32 atu_index = ep->bar_to_atu[bar]; - - __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); - - dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); - clear_bit(atu_index, ep->ib_window_map); -} - -static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, - struct pci_epf_bar *epf_bar) -{ - int ret; - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - enum pci_barno bar = epf_bar->barno; - size_t size = epf_bar->size; - int flags = epf_bar->flags; - enum dw_pcie_as_type as_type; - u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); - - if (!(flags & PCI_BASE_ADDRESS_SPACE)) - as_type = DW_PCIE_AS_MEM; - else - as_type = DW_PCIE_AS_IO; - - ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); - if (ret) - return ret; - - dw_pcie_dbi_ro_wr_en(pci); - - dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); - dw_pcie_writel_dbi(pci, reg, flags); - - if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); - dw_pcie_writel_dbi(pci, reg + 4, 0); - } - - dw_pcie_dbi_ro_wr_dis(pci); - - return 0; -} - -static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, - u32 *atu_index) -{ - u32 index; - - for (index = 0; index < ep->num_ob_windows; index++) { - if (ep->outbound_addr[index] != addr) - continue; - *atu_index = index; - return 0; - } - - return -EINVAL; -} - -static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, - phys_addr_t addr) -{ - int ret; - u32 atu_index; - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - ret = dw_pcie_find_index(ep, addr, &atu_index); - if (ret < 0) - return; - - dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); - clear_bit(atu_index, ep->ob_window_map); -} - -static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, - phys_addr_t addr, - u64 pci_addr, size_t size) -{ - int ret; - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); - if (ret) { - dev_err(pci->dev, "Failed to enable address\n"); - return ret; - } - - return 0; -} - -static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) -{ - int val; - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); - if (!(val & MSI_CAP_MSI_EN_MASK)) - return -EINVAL; - - val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; - return val; -} - -static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int) -{ - int val; - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); - val &= ~MSI_CAP_MMC_MASK; - val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK; - dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); - dw_pcie_dbi_ro_wr_dis(pci); - - return 0; -} - -static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num) -{ - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - - if (!ep->ops->raise_irq) - return -EINVAL; - - return ep->ops->raise_irq(ep, func_no, type, interrupt_num); -} - -static void dw_pcie_ep_stop(struct pci_epc *epc) -{ - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - if (!pci->ops->stop_link) - return; - - pci->ops->stop_link(pci); -} - -static int dw_pcie_ep_start(struct pci_epc *epc) -{ - struct dw_pcie_ep *ep = epc_get_drvdata(epc); - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - if (!pci->ops->start_link) - return -EINVAL; - - return pci->ops->start_link(pci); -} - -static const struct pci_epc_ops epc_ops = { - .write_header = dw_pcie_ep_write_header, - .set_bar = dw_pcie_ep_set_bar, - .clear_bar = dw_pcie_ep_clear_bar, - .map_addr = dw_pcie_ep_map_addr, - .unmap_addr = dw_pcie_ep_unmap_addr, - .set_msi = dw_pcie_ep_set_msi, - .get_msi = dw_pcie_ep_get_msi, - .raise_irq = dw_pcie_ep_raise_irq, - .start = dw_pcie_ep_start, - .stop = dw_pcie_ep_stop, -}; - -int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, - u8 interrupt_num) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct pci_epc *epc = ep->epc; - u16 msg_ctrl, msg_data; - u32 msg_addr_lower, msg_addr_upper; - u64 msg_addr; - bool has_upper; - int ret; - - /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ - msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); - has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); - msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); - if (has_upper) { - msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); - msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64); - } else { - msg_addr_upper = 0; - msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32); - } - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; - ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, - epc->mem->page_size); - if (ret) - return ret; - - writel(msg_data | (interrupt_num - 1), ep->msi_mem); - - dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); - - return 0; -} - -void dw_pcie_ep_exit(struct dw_pcie_ep *ep) -{ - struct pci_epc *epc = ep->epc; - - pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, - epc->mem->page_size); - - pci_epc_mem_exit(epc); -} - -int dw_pcie_ep_init(struct dw_pcie_ep *ep) -{ - int ret; - void *addr; - struct pci_epc *epc; - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct device *dev = pci->dev; - struct device_node *np = dev->of_node; - - if (!pci->dbi_base || !pci->dbi_base2) { - dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); - return -EINVAL; - } - - ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); - if (ret < 0) { - dev_err(dev, "Unable to read *num-ib-windows* property\n"); - return ret; - } - if (ep->num_ib_windows > MAX_IATU_IN) { - dev_err(dev, "Invalid *num-ib-windows*\n"); - return -EINVAL; - } - - ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); - if (ret < 0) { - dev_err(dev, "Unable to read *num-ob-windows* property\n"); - return ret; - } - if (ep->num_ob_windows > MAX_IATU_OUT) { - dev_err(dev, "Invalid *num-ob-windows*\n"); - return -EINVAL; - } - - ep->ib_window_map = devm_kzalloc(dev, sizeof(long) * - BITS_TO_LONGS(ep->num_ib_windows), - GFP_KERNEL); - if (!ep->ib_window_map) - return -ENOMEM; - - ep->ob_window_map = devm_kzalloc(dev, sizeof(long) * - BITS_TO_LONGS(ep->num_ob_windows), - GFP_KERNEL); - if (!ep->ob_window_map) - return -ENOMEM; - - addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, - GFP_KERNEL); - if (!addr) - return -ENOMEM; - ep->outbound_addr = addr; - - if (ep->ops->ep_init) - ep->ops->ep_init(ep); - - epc = devm_pci_epc_create(dev, &epc_ops); - if (IS_ERR(epc)) { - dev_err(dev, "Failed to create epc device\n"); - return PTR_ERR(epc); - } - - ret = of_property_read_u8(np, "max-functions", &epc->max_functions); - if (ret < 0) - epc->max_functions = 1; - - ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, - ep->page_size); - if (ret < 0) { - dev_err(dev, "Failed to initialize address space\n"); - return ret; - } - - ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, - epc->mem->page_size); - if (!ep->msi_mem) { - dev_err(dev, "Failed to reserve memory for MSI\n"); - return -ENOMEM; - } - - epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER; - EPC_FEATURE_SET_BAR(epc->features, BAR_0); - - ep->epc = epc; - epc_set_drvdata(epc, ep); - dw_pcie_setup(pci); - - return 0; -} diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c deleted file mode 100644 index cba1432e395d..000000000000 --- a/drivers/pci/dwc/pcie-designware-host.c +++ /dev/null @@ -1,722 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Synopsys DesignWare PCIe host controller driver - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Jingoo Han - */ - -#include -#include -#include -#include -#include -#include - -#include "../pci.h" -#include "pcie-designware.h" - -static struct pci_ops dw_pcie_ops; - -static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - u32 *val) -{ - struct dw_pcie *pci; - - if (pp->ops->rd_own_conf) - return pp->ops->rd_own_conf(pp, where, size, val); - - pci = to_dw_pcie_from_pp(pp); - return dw_pcie_read(pci->dbi_base + where, size, val); -} - -static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - u32 val) -{ - struct dw_pcie *pci; - - if (pp->ops->wr_own_conf) - return pp->ops->wr_own_conf(pp, where, size, val); - - pci = to_dw_pcie_from_pp(pp); - return dw_pcie_write(pci->dbi_base + where, size, val); -} - -static void dw_msi_ack_irq(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void dw_msi_mask_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void dw_msi_unmask_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip dw_pcie_msi_irq_chip = { - .name = "PCI-MSI", - .irq_ack = dw_msi_ack_irq, - .irq_mask = dw_msi_mask_irq, - .irq_unmask = dw_msi_unmask_irq, -}; - -static struct msi_domain_info dw_pcie_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), - .chip = &dw_pcie_msi_irq_chip, -}; - -/* MSI int handler */ -irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) -{ - int i, pos, irq; - u32 val, num_ctrls; - irqreturn_t ret = IRQ_NONE; - - num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; - - for (i = 0; i < num_ctrls; i++) { - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + - (i * MSI_REG_CTRL_BLOCK_SIZE), - 4, &val); - if (!val) - continue; - - ret = IRQ_HANDLED; - pos = 0; - while ((pos = find_next_bit((unsigned long *) &val, - MAX_MSI_IRQS_PER_CTRL, - pos)) != MAX_MSI_IRQS_PER_CTRL) { - irq = irq_find_mapping(pp->irq_domain, - (i * MAX_MSI_IRQS_PER_CTRL) + - pos); - generic_handle_irq(irq); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + - (i * MSI_REG_CTRL_BLOCK_SIZE), - 4, 1 << pos); - pos++; - } - } - - return ret; -} - -/* Chained MSI interrupt service routine */ -static void dw_chained_msi_isr(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct pcie_port *pp; - - chained_irq_enter(chip, desc); - - pp = irq_desc_get_handler_data(desc); - dw_handle_msi_irq(pp); - - chained_irq_exit(chip, desc); -} - -static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct pcie_port *pp = irq_data_get_irq_chip_data(data); - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - u64 msi_target; - - if (pp->ops->get_msi_addr) - msi_target = pp->ops->get_msi_addr(pp); - else - msi_target = (u64)pp->msi_data; - - msg->address_lo = lower_32_bits(msi_target); - msg->address_hi = upper_32_bits(msi_target); - - if (pp->ops->get_msi_data) - msg->data = pp->ops->get_msi_data(pp, data->hwirq); - else - msg->data = data->hwirq; - - dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", - (int)data->hwirq, msg->address_hi, msg->address_lo); -} - -static int dw_pci_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - -static void dw_pci_bottom_mask(struct irq_data *data) -{ - struct pcie_port *pp = irq_data_get_irq_chip_data(data); - unsigned int res, bit, ctrl; - unsigned long flags; - - raw_spin_lock_irqsave(&pp->lock, flags); - - if (pp->ops->msi_clear_irq) { - pp->ops->msi_clear_irq(pp, data->hwirq); - } else { - ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; - res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; - bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; - - pp->irq_status[ctrl] &= ~(1 << bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); - } - - raw_spin_unlock_irqrestore(&pp->lock, flags); -} - -static void dw_pci_bottom_unmask(struct irq_data *data) -{ - struct pcie_port *pp = irq_data_get_irq_chip_data(data); - unsigned int res, bit, ctrl; - unsigned long flags; - - raw_spin_lock_irqsave(&pp->lock, flags); - - if (pp->ops->msi_set_irq) { - pp->ops->msi_set_irq(pp, data->hwirq); - } else { - ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; - res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; - bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; - - pp->irq_status[ctrl] |= 1 << bit; - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); - } - - raw_spin_unlock_irqrestore(&pp->lock, flags); -} - -static void dw_pci_bottom_ack(struct irq_data *d) -{ - struct msi_desc *msi = irq_data_get_msi_desc(d); - struct pcie_port *pp; - - pp = msi_desc_to_pci_sysdata(msi); - - if (pp->ops->msi_irq_ack) - pp->ops->msi_irq_ack(d->hwirq, pp); -} - -static struct irq_chip dw_pci_msi_bottom_irq_chip = { - .name = "DWPCI-MSI", - .irq_ack = dw_pci_bottom_ack, - .irq_compose_msi_msg = dw_pci_setup_msi_msg, - .irq_set_affinity = dw_pci_msi_set_affinity, - .irq_mask = dw_pci_bottom_mask, - .irq_unmask = dw_pci_bottom_unmask, -}; - -static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs, - void *args) -{ - struct pcie_port *pp = domain->host_data; - unsigned long flags; - u32 i; - int bit; - - raw_spin_lock_irqsave(&pp->lock, flags); - - bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, - order_base_2(nr_irqs)); - - raw_spin_unlock_irqrestore(&pp->lock, flags); - - if (bit < 0) - return -ENOSPC; - - for (i = 0; i < nr_irqs; i++) - irq_domain_set_info(domain, virq + i, bit + i, - &dw_pci_msi_bottom_irq_chip, - pp, handle_edge_irq, - NULL, NULL); - - return 0; -} - -static void dw_pcie_irq_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *data = irq_domain_get_irq_data(domain, virq); - struct pcie_port *pp = irq_data_get_irq_chip_data(data); - unsigned long flags; - - raw_spin_lock_irqsave(&pp->lock, flags); - - bitmap_release_region(pp->msi_irq_in_use, data->hwirq, - order_base_2(nr_irqs)); - - raw_spin_unlock_irqrestore(&pp->lock, flags); -} - -static const struct irq_domain_ops dw_pcie_msi_domain_ops = { - .alloc = dw_pcie_irq_domain_alloc, - .free = dw_pcie_irq_domain_free, -}; - -int dw_pcie_allocate_domains(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); - - pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, - &dw_pcie_msi_domain_ops, pp); - if (!pp->irq_domain) { - dev_err(pci->dev, "Failed to create IRQ domain\n"); - return -ENOMEM; - } - - pp->msi_domain = pci_msi_create_irq_domain(fwnode, - &dw_pcie_msi_domain_info, - pp->irq_domain); - if (!pp->msi_domain) { - dev_err(pci->dev, "Failed to create MSI domain\n"); - irq_domain_remove(pp->irq_domain); - return -ENOMEM; - } - - return 0; -} - -void dw_pcie_free_msi(struct pcie_port *pp) -{ - irq_set_chained_handler(pp->msi_irq, NULL); - irq_set_handler_data(pp->msi_irq, NULL); - - irq_domain_remove(pp->msi_domain); - irq_domain_remove(pp->irq_domain); -} - -void dw_pcie_msi_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - struct page *page; - u64 msi_target; - - page = alloc_page(GFP_KERNEL); - pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, pp->msi_data)) { - dev_err(dev, "Failed to map MSI data\n"); - __free_page(page); - return; - } - msi_target = (u64)pp->msi_data; - - /* Program the msi_data */ - dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, - lower_32_bits(msi_target)); - dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, - upper_32_bits(msi_target)); -} - -int dw_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - struct device_node *np = dev->of_node; - struct platform_device *pdev = to_platform_device(dev); - struct resource_entry *win, *tmp; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - struct resource *cfg_res; - int ret; - - raw_spin_lock_init(&pci->pp.lock); - - cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); - if (cfg_res) { - pp->cfg0_size = resource_size(cfg_res) >> 1; - pp->cfg1_size = resource_size(cfg_res) >> 1; - pp->cfg0_base = cfg_res->start; - pp->cfg1_base = cfg_res->start + pp->cfg0_size; - } else if (!pp->va_cfg0_base) { - dev_err(dev, "Missing *config* reg space\n"); - } - - bridge = pci_alloc_host_bridge(0); - if (!bridge) - return -ENOMEM; - - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, - &bridge->windows, &pp->io_base); - if (ret) - return ret; - - ret = devm_request_pci_bus_resources(dev, &bridge->windows); - if (ret) - goto error; - - /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { - switch (resource_type(win->res)) { - case IORESOURCE_IO: - ret = pci_remap_iospace(win->res, pp->io_base); - if (ret) { - dev_warn(dev, "Error %d: failed to map resource %pR\n", - ret, win->res); - resource_list_destroy_entry(win); - } else { - pp->io = win->res; - pp->io->name = "I/O"; - pp->io_size = resource_size(pp->io); - pp->io_bus_addr = pp->io->start - win->offset; - } - break; - case IORESOURCE_MEM: - pp->mem = win->res; - pp->mem->name = "MEM"; - pp->mem_size = resource_size(pp->mem); - pp->mem_bus_addr = pp->mem->start - win->offset; - break; - case 0: - pp->cfg = win->res; - pp->cfg0_size = resource_size(pp->cfg) >> 1; - pp->cfg1_size = resource_size(pp->cfg) >> 1; - pp->cfg0_base = pp->cfg->start; - pp->cfg1_base = pp->cfg->start + pp->cfg0_size; - break; - case IORESOURCE_BUS: - pp->busn = win->res; - break; - } - } - - if (!pci->dbi_base) { - pci->dbi_base = devm_pci_remap_cfgspace(dev, - pp->cfg->start, - resource_size(pp->cfg)); - if (!pci->dbi_base) { - dev_err(dev, "Error with ioremap\n"); - ret = -ENOMEM; - goto error; - } - } - - pp->mem_base = pp->mem->start; - - if (!pp->va_cfg0_base) { - pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, - pp->cfg0_base, pp->cfg0_size); - if (!pp->va_cfg0_base) { - dev_err(dev, "Error with ioremap in function\n"); - ret = -ENOMEM; - goto error; - } - } - - if (!pp->va_cfg1_base) { - pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, - pp->cfg1_base, - pp->cfg1_size); - if (!pp->va_cfg1_base) { - dev_err(dev, "Error with ioremap\n"); - ret = -ENOMEM; - goto error; - } - } - - ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); - if (ret) - pci->num_viewport = 2; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - /* - * If a specific SoC driver needs to change the - * default number of vectors, it needs to implement - * the set_num_vectors callback. - */ - if (!pp->ops->set_num_vectors) { - pp->num_vectors = MSI_DEF_NUM_VECTORS; - } else { - pp->ops->set_num_vectors(pp); - - if (pp->num_vectors > MAX_MSI_IRQS || - pp->num_vectors == 0) { - dev_err(dev, - "Invalid number of vectors\n"); - goto error; - } - } - - if (!pp->ops->msi_host_init) { - ret = dw_pcie_allocate_domains(pp); - if (ret) - goto error; - - if (pp->msi_irq) - irq_set_chained_handler_and_data(pp->msi_irq, - dw_chained_msi_isr, - pp); - } else { - ret = pp->ops->msi_host_init(pp); - if (ret < 0) - goto error; - } - } - - if (pp->ops->host_init) { - ret = pp->ops->host_init(pp); - if (ret) - goto error; - } - - pp->root_bus_nr = pp->busn->start; - - bridge->dev.parent = dev; - bridge->sysdata = pp; - bridge->busnr = pp->root_bus_nr; - bridge->ops = &dw_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret) - goto error; - - bus = bridge->bus; - - if (pp->ops->scan_bus) - pp->ops->scan_bus(pp); - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return 0; - -error: - pci_free_host_bridge(bridge); - return ret; -} - -static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) -{ - int ret, type; - u32 busdev, cfg_size; - u64 cpu_addr; - void __iomem *va_cfg_base; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - if (pp->ops->rd_other_conf) - return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); - - if (bus->parent->number == pp->root_bus_nr) { - type = PCIE_ATU_TYPE_CFG0; - cpu_addr = pp->cfg0_base; - cfg_size = pp->cfg0_size; - va_cfg_base = pp->va_cfg0_base; - } else { - type = PCIE_ATU_TYPE_CFG1; - cpu_addr = pp->cfg1_base; - cfg_size = pp->cfg1_size; - va_cfg_base = pp->va_cfg1_base; - } - - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, - type, cpu_addr, - busdev, cfg_size); - ret = dw_pcie_read(va_cfg_base + where, size, val); - if (pci->num_viewport <= 2) - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); - - return ret; -} - -static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 val) -{ - int ret, type; - u32 busdev, cfg_size; - u64 cpu_addr; - void __iomem *va_cfg_base; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - if (pp->ops->wr_other_conf) - return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); - - if (bus->parent->number == pp->root_bus_nr) { - type = PCIE_ATU_TYPE_CFG0; - cpu_addr = pp->cfg0_base; - cfg_size = pp->cfg0_size; - va_cfg_base = pp->va_cfg0_base; - } else { - type = PCIE_ATU_TYPE_CFG1; - cpu_addr = pp->cfg1_base; - cfg_size = pp->cfg1_size; - va_cfg_base = pp->va_cfg1_base; - } - - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, - type, cpu_addr, - busdev, cfg_size); - ret = dw_pcie_write(va_cfg_base + where, size, val); - if (pci->num_viewport <= 2) - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); - - return ret; -} - -static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, - int dev) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - /* If there is no link, then there is no device */ - if (bus->number != pp->root_bus_nr) { - if (!dw_pcie_link_up(pci)) - return 0; - } - - /* Access only one slot on each root port */ - if (bus->number == pp->root_bus_nr && dev > 0) - return 0; - - return 1; -} - -static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) -{ - struct pcie_port *pp = bus->sysdata; - - if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - if (bus->number == pp->root_bus_nr) - return dw_pcie_rd_own_conf(pp, where, size, val); - - return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); -} - -static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - struct pcie_port *pp = bus->sysdata; - - if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == pp->root_bus_nr) - return dw_pcie_wr_own_conf(pp, where, size, val); - - return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); -} - -static struct pci_ops dw_pcie_ops = { - .read = dw_pcie_rd_conf, - .write = dw_pcie_wr_conf, -}; - -static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) -{ - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); - if (val == 0xffffffff) - return 1; - - return 0; -} - -void dw_pcie_setup_rc(struct pcie_port *pp) -{ - u32 val, ctrl, num_ctrls; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - dw_pcie_setup(pci); - - num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; - - /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, &pp->irq_status[ctrl]); - - /* Setup RC BARs */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); - - /* Setup interrupt pins */ - dw_pcie_dbi_ro_wr_en(pci); - val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); - val &= 0xffff00ff; - val |= 0x00000100; - dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); - dw_pcie_dbi_ro_wr_dis(pci); - - /* Setup bus numbers */ - val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); - val &= 0xff000000; - val |= 0x00ff0100; - dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); - - /* Setup command register */ - val = dw_pcie_readl_dbi(pci, PCI_COMMAND); - val &= 0xffff0000; - val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER | PCI_COMMAND_SERR; - dw_pcie_writel_dbi(pci, PCI_COMMAND, val); - - /* - * If the platform provides ->rd_other_conf, it means the platform - * uses its own address translation component rather than ATU, so - * we should not program the ATU here. - */ - if (!pp->ops->rd_other_conf) { - /* Get iATU unroll support */ - pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); - dev_dbg(pci->dev, "iATU unroll: %s\n", - pci->iatu_unroll_enabled ? "enabled" : "disabled"); - - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, - PCIE_ATU_TYPE_MEM, pp->mem_base, - pp->mem_bus_addr, pp->mem_size); - if (pci->num_viewport > 2) - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); - } - - dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - - /* Enable write permission for the DBI read-only register */ - dw_pcie_dbi_ro_wr_en(pci); - /* Program correct class for RC */ - dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); - /* Better disable write permission right after the update */ - dw_pcie_dbi_ro_wr_dis(pci); - - dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); - val |= PORT_LOGIC_SPEED_CHANGE; - dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); -} diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c deleted file mode 100644 index 5937fed4c938..000000000000 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ /dev/null @@ -1,259 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe RC driver for Synopsys DesignWare Core - * - * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) - * - * Authors: Joao Pinto - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -struct dw_plat_pcie { - struct dw_pcie *pci; - struct regmap *regmap; - enum dw_pcie_device_mode mode; -}; - -struct dw_plat_pcie_of_data { - enum dw_pcie_device_mode mode; -}; - -static const struct of_device_id dw_plat_pcie_of_match[]; - -static int dw_plat_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - dw_pcie_setup_rc(pp); - dw_pcie_wait_for_link(pci); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); - - return 0; -} - -static void dw_plat_set_num_vectors(struct pcie_port *pp) -{ - pp->num_vectors = MAX_MSI_IRQS; -} - -static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { - .host_init = dw_plat_pcie_host_init, - .set_num_vectors = dw_plat_set_num_vectors, -}; - -static int dw_plat_pcie_establish_link(struct dw_pcie *pci) -{ - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .start_link = dw_plat_pcie_establish_link, -}; - -static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - enum pci_barno bar; - - for (bar = BAR_0; bar <= BAR_5; bar++) - dw_pcie_ep_reset_bar(pci, bar); -} - -static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, - u8 interrupt_num) -{ - struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - - switch (type) { - case PCI_EPC_IRQ_LEGACY: - dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); - return -EINVAL; - case PCI_EPC_IRQ_MSI: - return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); - default: - dev_err(pci->dev, "UNKNOWN IRQ type\n"); - } - - return 0; -} - -static struct dw_pcie_ep_ops pcie_ep_ops = { - .ep_init = dw_plat_pcie_ep_init, - .raise_irq = dw_plat_pcie_ep_raise_irq, -}; - -static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = dw_plat_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - - pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) - return pp->irq; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) - return pp->msi_irq; - } - - pp->root_bus_nr = -1; - pp->ops = &dw_plat_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "Failed to initialize host\n"); - return ret; - } - - return 0; -} - -static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, - struct platform_device *pdev) -{ - int ret; - struct dw_pcie_ep *ep; - struct resource *res; - struct device *dev = &pdev->dev; - struct dw_pcie *pci = dw_plat_pcie->pci; - - ep = &pci->ep; - ep->ops = &pcie_ep_ops; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base2)) - return PTR_ERR(pci->dbi_base2); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); - if (!res) - return -EINVAL; - - ep->phys_base = res->start; - ep->addr_size = resource_size(res); - - ret = dw_pcie_ep_init(ep); - if (ret) { - dev_err(dev, "Failed to initialize endpoint\n"); - return ret; - } - return 0; -} - -static int dw_plat_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_plat_pcie *dw_plat_pcie; - struct dw_pcie *pci; - struct resource *res; /* Resource from DT */ - int ret; - const struct of_device_id *match; - const struct dw_plat_pcie_of_data *data; - enum dw_pcie_device_mode mode; - - match = of_match_device(dw_plat_pcie_of_match, dev); - if (!match) - return -EINVAL; - - data = (struct dw_plat_pcie_of_data *)match->data; - mode = (enum dw_pcie_device_mode)data->mode; - - dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); - if (!dw_plat_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - dw_plat_pcie->pci = pci; - dw_plat_pcie->mode = mode; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - if (!res) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - pci->dbi_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - platform_set_drvdata(pdev, dw_plat_pcie); - - switch (dw_plat_pcie->mode) { - case DW_PCIE_RC_TYPE: - if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST)) - return -ENODEV; - - ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); - if (ret < 0) - return ret; - break; - case DW_PCIE_EP_TYPE: - if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) - return -ENODEV; - - ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev); - if (ret < 0) - return ret; - break; - default: - dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); - } - - return 0; -} - -static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { - .mode = DW_PCIE_RC_TYPE, -}; - -static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = { - .mode = DW_PCIE_EP_TYPE, -}; - -static const struct of_device_id dw_plat_pcie_of_match[] = { - { - .compatible = "snps,dw-pcie", - .data = &dw_plat_pcie_rc_of_data, - }, - { - .compatible = "snps,dw-pcie-ep", - .data = &dw_plat_pcie_ep_of_data, - }, - {}, -}; - -static struct platform_driver dw_plat_pcie_driver = { - .driver = { - .name = "dw-pcie", - .of_match_table = dw_plat_pcie_of_match, - .suppress_bind_attrs = true, - }, - .probe = dw_plat_pcie_probe, -}; -builtin_platform_driver(dw_plat_pcie_driver); diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c deleted file mode 100644 index 778c4f76a884..000000000000 --- a/drivers/pci/dwc/pcie-designware.c +++ /dev/null @@ -1,394 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Synopsys DesignWare PCIe host controller driver - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Jingoo Han - */ - -#include -#include -#include - -#include "pcie-designware.h" - -/* PCIe Port Logic registers */ -#define PLR_OFFSET 0x700 -#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) -#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) -#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) - -int dw_pcie_read(void __iomem *addr, int size, u32 *val) -{ - if ((uintptr_t)addr & (size - 1)) { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - if (size == 4) { - *val = readl(addr); - } else if (size == 2) { - *val = readw(addr); - } else if (size == 1) { - *val = readb(addr); - } else { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - return PCIBIOS_SUCCESSFUL; -} - -int dw_pcie_write(void __iomem *addr, int size, u32 val) -{ - if ((uintptr_t)addr & (size - 1)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (size == 4) - writel(val, addr); - else if (size == 2) - writew(val, addr); - else if (size == 1) - writeb(val, addr); - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size) -{ - int ret; - u32 val; - - if (pci->ops->read_dbi) - return pci->ops->read_dbi(pci, base, reg, size); - - ret = dw_pcie_read(base + reg, size, &val); - if (ret) - dev_err(pci->dev, "Read DBI address failed\n"); - - return val; -} - -void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val) -{ - int ret; - - if (pci->ops->write_dbi) { - pci->ops->write_dbi(pci, base, reg, size, val); - return; - } - - ret = dw_pcie_write(base + reg, size, val); - if (ret) - dev_err(pci->dev, "Write DBI address failed\n"); -} - -static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) -{ - u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - - return dw_pcie_readl_dbi(pci, offset + reg); -} - -static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, - u32 val) -{ - u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - - dw_pcie_writel_dbi(pci, offset + reg, val); -} - -static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, - int type, u64 cpu_addr, - u64 pci_addr, u32 size) -{ - u32 retries, val; - - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, - lower_32_bits(cpu_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, - upper_32_bits(cpu_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, - lower_32_bits(cpu_addr + size - 1)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, - lower_32_bits(pci_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, - upper_32_bits(pci_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, - type); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, - PCIE_ATU_ENABLE); - - /* - * Make sure ATU enable takes effect before any subsequent config - * and I/O accesses. - */ - for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_ob_unroll(pci, index, - PCIE_ATU_UNR_REGION_CTRL2); - if (val & PCIE_ATU_ENABLE) - return; - - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); - } - dev_err(pci->dev, "Outbound iATU is not being enabled\n"); -} - -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u32 size) -{ - u32 retries, val; - - if (pci->ops->cpu_addr_fixup) - cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); - - if (pci->iatu_unroll_enabled) { - dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, - pci_addr, size); - return; - } - - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, - PCIE_ATU_REGION_OUTBOUND | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, - lower_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, - upper_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, - lower_32_bits(cpu_addr + size - 1)); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, - lower_32_bits(pci_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, - upper_32_bits(pci_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); - - /* - * Make sure ATU enable takes effect before any subsequent config - * and I/O accesses. - */ - for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); - if (val & PCIE_ATU_ENABLE) - return; - - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); - } - dev_err(pci->dev, "Outbound iATU is not being enabled\n"); -} - -static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) -{ - u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - - return dw_pcie_readl_dbi(pci, offset + reg); -} - -static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, - u32 val) -{ - u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - - dw_pcie_writel_dbi(pci, offset + reg, val); -} - -static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, - int bar, u64 cpu_addr, - enum dw_pcie_as_type as_type) -{ - int type; - u32 retries, val; - - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, - lower_32_bits(cpu_addr)); - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, - upper_32_bits(cpu_addr)); - - switch (as_type) { - case DW_PCIE_AS_MEM: - type = PCIE_ATU_TYPE_MEM; - break; - case DW_PCIE_AS_IO: - type = PCIE_ATU_TYPE_IO; - break; - default: - return -EINVAL; - } - - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, - PCIE_ATU_ENABLE | - PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); - - /* - * Make sure ATU enable takes effect before any subsequent config - * and I/O accesses. - */ - for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_ib_unroll(pci, index, - PCIE_ATU_UNR_REGION_CTRL2); - if (val & PCIE_ATU_ENABLE) - return 0; - - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); - } - dev_err(pci->dev, "Inbound iATU is not being enabled\n"); - - return -EBUSY; -} - -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, - u64 cpu_addr, enum dw_pcie_as_type as_type) -{ - int type; - u32 retries, val; - - if (pci->iatu_unroll_enabled) - return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, - cpu_addr, as_type); - - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | - index); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); - - switch (as_type) { - case DW_PCIE_AS_MEM: - type = PCIE_ATU_TYPE_MEM; - break; - case DW_PCIE_AS_IO: - type = PCIE_ATU_TYPE_IO; - break; - default: - return -EINVAL; - } - - dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE - | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); - - /* - * Make sure ATU enable takes effect before any subsequent config - * and I/O accesses. - */ - for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); - if (val & PCIE_ATU_ENABLE) - return 0; - - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); - } - dev_err(pci->dev, "Inbound iATU is not being enabled\n"); - - return -EBUSY; -} - -void dw_pcie_disable_atu(struct dw_pcie *pci, int index, - enum dw_pcie_region_type type) -{ - int region; - - switch (type) { - case DW_PCIE_REGION_INBOUND: - region = PCIE_ATU_REGION_INBOUND; - break; - case DW_PCIE_REGION_OUTBOUND: - region = PCIE_ATU_REGION_OUTBOUND; - break; - default: - return; - } - - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); -} - -int dw_pcie_wait_for_link(struct dw_pcie *pci) -{ - int retries; - - /* Check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (dw_pcie_link_up(pci)) { - dev_info(pci->dev, "Link up\n"); - return 0; - } - usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); - } - - dev_err(pci->dev, "Phy link never came up\n"); - - return -ETIMEDOUT; -} - -int dw_pcie_link_up(struct dw_pcie *pci) -{ - u32 val; - - if (pci->ops->link_up) - return pci->ops->link_up(pci); - - val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); - return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && - (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); -} - -void dw_pcie_setup(struct dw_pcie *pci) -{ - int ret; - u32 val; - u32 lanes; - struct device *dev = pci->dev; - struct device_node *np = dev->of_node; - - ret = of_property_read_u32(np, "num-lanes", &lanes); - if (ret) - lanes = 0; - - /* Set the number of lanes */ - val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); - val &= ~PORT_LINK_MODE_MASK; - switch (lanes) { - case 1: - val |= PORT_LINK_MODE_1_LANES; - break; - case 2: - val |= PORT_LINK_MODE_2_LANES; - break; - case 4: - val |= PORT_LINK_MODE_4_LANES; - break; - case 8: - val |= PORT_LINK_MODE_8_LANES; - break; - default: - dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); - return; - } - dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); - - /* Set link width speed control register */ - val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); - val &= ~PORT_LOGIC_LINK_WIDTH_MASK; - switch (lanes) { - case 1: - val |= PORT_LOGIC_LINK_WIDTH_1_LANES; - break; - case 2: - val |= PORT_LOGIC_LINK_WIDTH_2_LANES; - break; - case 4: - val |= PORT_LOGIC_LINK_WIDTH_4_LANES; - break; - case 8: - val |= PORT_LOGIC_LINK_WIDTH_8_LANES; - break; - } - dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); -} diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h deleted file mode 100644 index bee4e2535a61..000000000000 --- a/drivers/pci/dwc/pcie-designware.h +++ /dev/null @@ -1,387 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Synopsys DesignWare PCIe host controller driver - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Jingoo Han - */ - -#ifndef _PCIE_DESIGNWARE_H -#define _PCIE_DESIGNWARE_H - -#include -#include -#include -#include - -#include -#include - -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_USLEEP_MIN 90000 -#define LINK_WAIT_USLEEP_MAX 100000 - -/* Parameters for the waiting for iATU enabled routine */ -#define LINK_WAIT_MAX_IATU_RETRIES 5 -#define LINK_WAIT_IATU_MIN 9000 -#define LINK_WAIT_IATU_MAX 10000 - -/* Synopsys-specific PCIe configuration registers */ -#define PCIE_PORT_LINK_CONTROL 0x710 -#define PORT_LINK_MODE_MASK (0x3f << 16) -#define PORT_LINK_MODE_1_LANES (0x1 << 16) -#define PORT_LINK_MODE_2_LANES (0x3 << 16) -#define PORT_LINK_MODE_4_LANES (0x7 << 16) -#define PORT_LINK_MODE_8_LANES (0xf << 16) - -#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C -#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) -#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) -#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) -#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) -#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) -#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) - -#define PCIE_MSI_ADDR_LO 0x820 -#define PCIE_MSI_ADDR_HI 0x824 -#define PCIE_MSI_INTR0_ENABLE 0x828 -#define PCIE_MSI_INTR0_MASK 0x82C -#define PCIE_MSI_INTR0_STATUS 0x830 - -#define PCIE_ATU_VIEWPORT 0x900 -#define PCIE_ATU_REGION_INBOUND (0x1 << 31) -#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) -#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) -#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) -#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) -#define PCIE_ATU_CR1 0x904 -#define PCIE_ATU_TYPE_MEM (0x0 << 0) -#define PCIE_ATU_TYPE_IO (0x2 << 0) -#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) -#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) -#define PCIE_ATU_CR2 0x908 -#define PCIE_ATU_ENABLE (0x1 << 31) -#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) -#define PCIE_ATU_LOWER_BASE 0x90C -#define PCIE_ATU_UPPER_BASE 0x910 -#define PCIE_ATU_LIMIT 0x914 -#define PCIE_ATU_LOWER_TARGET 0x918 -#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) -#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) -#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) -#define PCIE_ATU_UPPER_TARGET 0x91C - -#define PCIE_MISC_CONTROL_1_OFF 0x8BC -#define PCIE_DBI_RO_WR_EN (0x1 << 0) - -/* - * iATU Unroll-specific register definitions - * From 4.80 core version the address translation will be made by unroll - */ -#define PCIE_ATU_UNR_REGION_CTRL1 0x00 -#define PCIE_ATU_UNR_REGION_CTRL2 0x04 -#define PCIE_ATU_UNR_LOWER_BASE 0x08 -#define PCIE_ATU_UNR_UPPER_BASE 0x0C -#define PCIE_ATU_UNR_LIMIT 0x10 -#define PCIE_ATU_UNR_LOWER_TARGET 0x14 -#define PCIE_ATU_UNR_UPPER_TARGET 0x18 - -/* Register address builder */ -#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ - ((0x3 << 20) | ((region) << 9)) - -#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ - ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) - -#define MSI_MESSAGE_CONTROL 0x52 -#define MSI_CAP_MMC_SHIFT 1 -#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT) -#define MSI_CAP_MME_SHIFT 4 -#define MSI_CAP_MSI_EN_MASK 0x1 -#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) -#define MSI_MESSAGE_ADDR_L32 0x54 -#define MSI_MESSAGE_ADDR_U32 0x58 -#define MSI_MESSAGE_DATA_32 0x58 -#define MSI_MESSAGE_DATA_64 0x5C - -#define MAX_MSI_IRQS 256 -#define MAX_MSI_IRQS_PER_CTRL 32 -#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) -#define MSI_REG_CTRL_BLOCK_SIZE 12 -#define MSI_DEF_NUM_VECTORS 32 - -/* Maximum number of inbound/outbound iATUs */ -#define MAX_IATU_IN 256 -#define MAX_IATU_OUT 256 - -struct pcie_port; -struct dw_pcie; -struct dw_pcie_ep; - -enum dw_pcie_region_type { - DW_PCIE_REGION_UNKNOWN, - DW_PCIE_REGION_INBOUND, - DW_PCIE_REGION_OUTBOUND, -}; - -enum dw_pcie_device_mode { - DW_PCIE_UNKNOWN_TYPE, - DW_PCIE_EP_TYPE, - DW_PCIE_LEG_EP_TYPE, - DW_PCIE_RC_TYPE, -}; - -struct dw_pcie_host_ops { - int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); - int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); - int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val); - int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 val); - int (*host_init)(struct pcie_port *pp); - void (*msi_set_irq)(struct pcie_port *pp, int irq); - void (*msi_clear_irq)(struct pcie_port *pp, int irq); - phys_addr_t (*get_msi_addr)(struct pcie_port *pp); - u32 (*get_msi_data)(struct pcie_port *pp, int pos); - void (*scan_bus)(struct pcie_port *pp); - void (*set_num_vectors)(struct pcie_port *pp); - int (*msi_host_init)(struct pcie_port *pp); - void (*msi_irq_ack)(int irq, struct pcie_port *pp); -}; - -struct pcie_port { - u8 root_bus_nr; - u64 cfg0_base; - void __iomem *va_cfg0_base; - u32 cfg0_size; - u64 cfg1_base; - void __iomem *va_cfg1_base; - u32 cfg1_size; - resource_size_t io_base; - phys_addr_t io_bus_addr; - u32 io_size; - u64 mem_base; - phys_addr_t mem_bus_addr; - u32 mem_size; - struct resource *cfg; - struct resource *io; - struct resource *mem; - struct resource *busn; - int irq; - const struct dw_pcie_host_ops *ops; - int msi_irq; - struct irq_domain *irq_domain; - struct irq_domain *msi_domain; - dma_addr_t msi_data; - u32 num_vectors; - u32 irq_status[MAX_MSI_CTRLS]; - raw_spinlock_t lock; - DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); -}; - -enum dw_pcie_as_type { - DW_PCIE_AS_UNKNOWN, - DW_PCIE_AS_MEM, - DW_PCIE_AS_IO, -}; - -struct dw_pcie_ep_ops { - void (*ep_init)(struct dw_pcie_ep *ep); - int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num); -}; - -struct dw_pcie_ep { - struct pci_epc *epc; - struct dw_pcie_ep_ops *ops; - phys_addr_t phys_base; - size_t addr_size; - size_t page_size; - u8 bar_to_atu[6]; - phys_addr_t *outbound_addr; - unsigned long *ib_window_map; - unsigned long *ob_window_map; - u32 num_ib_windows; - u32 num_ob_windows; - void __iomem *msi_mem; - phys_addr_t msi_mem_phys; -}; - -struct dw_pcie_ops { - u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr); - u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, - size_t size); - void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, - size_t size, u32 val); - int (*link_up)(struct dw_pcie *pcie); - int (*start_link)(struct dw_pcie *pcie); - void (*stop_link)(struct dw_pcie *pcie); -}; - -struct dw_pcie { - struct device *dev; - void __iomem *dbi_base; - void __iomem *dbi_base2; - u32 num_viewport; - u8 iatu_unroll_enabled; - struct pcie_port pp; - struct dw_pcie_ep ep; - const struct dw_pcie_ops *ops; -}; - -#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) - -#define to_dw_pcie_from_ep(endpoint) \ - container_of((endpoint), struct dw_pcie, ep) - -int dw_pcie_read(void __iomem *addr, int size, u32 *val); -int dw_pcie_write(void __iomem *addr, int size, u32 val); - -u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size); -void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, - size_t size, u32 val); -int dw_pcie_link_up(struct dw_pcie *pci); -int dw_pcie_wait_for_link(struct dw_pcie *pci); -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, - int type, u64 cpu_addr, u64 pci_addr, - u32 size); -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, - u64 cpu_addr, enum dw_pcie_as_type as_type); -void dw_pcie_disable_atu(struct dw_pcie *pci, int index, - enum dw_pcie_region_type type); -void dw_pcie_setup(struct dw_pcie *pci); - -static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) -{ - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); -} - -static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) -{ - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); -} - -static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) -{ - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); -} - -static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) -{ - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); -} - -static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) -{ - __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); -} - -static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) -{ - return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); -} - -static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) -{ - __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); -} - -static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) -{ - return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); -} - -static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) -{ - u32 reg; - u32 val; - - reg = PCIE_MISC_CONTROL_1_OFF; - val = dw_pcie_readl_dbi(pci, reg); - val |= PCIE_DBI_RO_WR_EN; - dw_pcie_writel_dbi(pci, reg, val); -} - -static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) -{ - u32 reg; - u32 val; - - reg = PCIE_MISC_CONTROL_1_OFF; - val = dw_pcie_readl_dbi(pci, reg); - val &= ~PCIE_DBI_RO_WR_EN; - dw_pcie_writel_dbi(pci, reg, val); -} - -#ifdef CONFIG_PCIE_DW_HOST -irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); -void dw_pcie_msi_init(struct pcie_port *pp); -void dw_pcie_free_msi(struct pcie_port *pp); -void dw_pcie_setup_rc(struct pcie_port *pp); -int dw_pcie_host_init(struct pcie_port *pp); -int dw_pcie_allocate_domains(struct pcie_port *pp); -#else -static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) -{ - return IRQ_NONE; -} - -static inline void dw_pcie_msi_init(struct pcie_port *pp) -{ -} - -static inline void dw_pcie_free_msi(struct pcie_port *pp) -{ -} - -static inline void dw_pcie_setup_rc(struct pcie_port *pp) -{ -} - -static inline int dw_pcie_host_init(struct pcie_port *pp) -{ - return 0; -} - -static inline int dw_pcie_allocate_domains(struct pcie_port *pp) -{ - return 0; -} -#endif - -#ifdef CONFIG_PCIE_DW_EP -void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); -int dw_pcie_ep_init(struct dw_pcie_ep *ep); -void dw_pcie_ep_exit(struct dw_pcie_ep *ep); -int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, - u8 interrupt_num); -void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); -#else -static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) -{ -} - -static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) -{ - return 0; -} - -static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) -{ -} - -static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, - u8 interrupt_num) -{ - return 0; -} - -static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) -{ -} -#endif -#endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c deleted file mode 100644 index 2658aaebb993..000000000000 --- a/drivers/pci/dwc/pcie-hisi.c +++ /dev/null @@ -1,398 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for HiSilicon SoCs - * - * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com - * - * Authors: Zhou Wang - * Dacai Zhu - * Gabriele Paoloni - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../pci.h" - -#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) - -static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) -{ - struct pci_config_window *cfg = bus->sysdata; - int dev = PCI_SLOT(devfn); - - if (bus->number == cfg->busr.start) { - /* access only one slot on each root port */ - if (dev > 0) - return PCIBIOS_DEVICE_NOT_FOUND; - else - return pci_generic_config_read32(bus, devfn, where, - size, val); - } - - return pci_generic_config_read(bus, devfn, where, size, val); -} - -static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - struct pci_config_window *cfg = bus->sysdata; - int dev = PCI_SLOT(devfn); - - if (bus->number == cfg->busr.start) { - /* access only one slot on each root port */ - if (dev > 0) - return PCIBIOS_DEVICE_NOT_FOUND; - else - return pci_generic_config_write32(bus, devfn, where, - size, val); - } - - return pci_generic_config_write(bus, devfn, where, size, val); -} - -static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) -{ - struct pci_config_window *cfg = bus->sysdata; - void __iomem *reg_base = cfg->priv; - - if (bus->number == cfg->busr.start) - return reg_base + where; - else - return pci_ecam_map_bus(bus, devfn, where); -} - -#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) - -static int hisi_pcie_init(struct pci_config_window *cfg) -{ - struct device *dev = cfg->parent; - struct acpi_device *adev = to_acpi_device(dev); - struct acpi_pci_root *root = acpi_driver_data(adev); - struct resource *res; - void __iomem *reg_base; - int ret; - - /* - * Retrieve RC base and size from a HISI0081 device with _UID - * matching our segment. - */ - res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); - if (ret) { - dev_err(dev, "can't get rc base address\n"); - return -ENOMEM; - } - - reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); - if (!reg_base) - return -ENOMEM; - - cfg->priv = reg_base; - return 0; -} - -struct pci_ecam_ops hisi_pcie_ops = { - .bus_shift = 20, - .init = hisi_pcie_init, - .pci_ops = { - .map_bus = hisi_pcie_map_bus, - .read = hisi_pcie_rd_conf, - .write = hisi_pcie_wr_conf, - } -}; - -#endif - -#ifdef CONFIG_PCI_HISI - -#include "pcie-designware.h" - -#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 -#define PCIE_HIP06_CTRL_OFF 0x1000 -#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) -#define PCIE_LTSSM_LINKUP_STATE 0x11 -#define PCIE_LTSSM_STATE_MASK 0x3F - -#define to_hisi_pcie(x) dev_get_drvdata((x)->dev) - -struct hisi_pcie; - -struct pcie_soc_ops { - int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); -}; - -struct hisi_pcie { - struct dw_pcie *pci; - struct regmap *subctrl; - u32 port_id; - const struct pcie_soc_ops *soc_ops; -}; - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, - u32 *val) -{ - u32 reg; - u32 reg_val; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - reg_val = dw_pcie_readl_dbi(pci, reg); - - if (size == 1) - *val = *(u8 __force *) walker; - else if (size == 2) - *val = *(u16 __force *) walker; - else if (size == 4) - *val = reg_val; - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, - u32 val) -{ - u32 reg_val; - u32 reg; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - if (size == 4) - dw_pcie_writel_dbi(pci, reg, val); - else if (size == 2) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u16 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else if (size == 1) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u8 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) -{ - u32 val; - - regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + - 0x100 * hisi_pcie->port_id, &val); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) -{ - struct dw_pcie *pci = hisi_pcie->pci; - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up(struct dw_pcie *pci) -{ - struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); - - return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); -} - -static const struct dw_pcie_host_ops hisi_pcie_host_ops = { - .rd_own_conf = hisi_pcie_cfg_read, - .wr_own_conf = hisi_pcie_cfg_write, -}; - -static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = hisi_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - u32 port_id; - - if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { - dev_err(dev, "failed to read port-id\n"); - return -EINVAL; - } - if (port_id > 3) { - dev_err(dev, "Invalid port-id: %d\n", port_id); - return -EINVAL; - } - hisi_pcie->port_id = port_id; - - pp->ops = &hisi_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = hisi_pcie_link_up, -}; - -static int hisi_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct hisi_pcie *hisi_pcie; - struct resource *reg; - int ret; - - hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); - if (!hisi_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - hisi_pcie->pci = pci; - - hisi_pcie->soc_ops = of_device_get_match_data(dev); - - hisi_pcie->subctrl = - syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); - if (IS_ERR(hisi_pcie->subctrl)) { - dev_err(dev, "cannot get subctrl base\n"); - return PTR_ERR(hisi_pcie->subctrl); - } - - reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - platform_set_drvdata(pdev, hisi_pcie); - - ret = hisi_add_pcie_port(hisi_pcie, pdev); - if (ret) - return ret; - - return 0; -} - -static struct pcie_soc_ops hip05_ops = { - &hisi_pcie_link_up_hip05 -}; - -static struct pcie_soc_ops hip06_ops = { - &hisi_pcie_link_up_hip06 -}; - -static const struct of_device_id hisi_pcie_of_match[] = { - { - .compatible = "hisilicon,hip05-pcie", - .data = (void *) &hip05_ops, - }, - { - .compatible = "hisilicon,hip06-pcie", - .data = (void *) &hip06_ops, - }, - {}, -}; - -static struct platform_driver hisi_pcie_driver = { - .probe = hisi_pcie_probe, - .driver = { - .name = "hisi-pcie", - .of_match_table = hisi_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(hisi_pcie_driver); - -static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct pci_ecam_ops *ops; - - ops = (struct pci_ecam_ops *)of_device_get_match_data(dev); - return pci_host_common_probe(pdev, ops); -} - -static int hisi_pcie_platform_init(struct pci_config_window *cfg) -{ - struct device *dev = cfg->parent; - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; - void __iomem *reg_base; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(dev, "missing \"reg[1]\"property\n"); - return -EINVAL; - } - - reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); - if (!reg_base) - return -ENOMEM; - - cfg->priv = reg_base; - return 0; -} - -struct pci_ecam_ops hisi_pcie_platform_ops = { - .bus_shift = 20, - .init = hisi_pcie_platform_init, - .pci_ops = { - .map_bus = hisi_pcie_map_bus, - .read = hisi_pcie_rd_conf, - .write = hisi_pcie_wr_conf, - } -}; - -static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { - { - .compatible = "hisilicon,hip06-pcie-ecam", - .data = (void *) &hisi_pcie_platform_ops, - }, - { - .compatible = "hisilicon,hip07-pcie-ecam", - .data = (void *) &hisi_pcie_platform_ops, - }, - {}, -}; - -static struct platform_driver hisi_pcie_almost_ecam_driver = { - .probe = hisi_pcie_almost_ecam_probe, - .driver = { - .name = "hisi-pcie-almost-ecam", - .of_match_table = hisi_pcie_almost_ecam_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(hisi_pcie_almost_ecam_driver); - -#endif -#endif diff --git a/drivers/pci/dwc/pcie-histb.c b/drivers/pci/dwc/pcie-histb.c deleted file mode 100644 index 3611d6ce9a92..000000000000 --- a/drivers/pci/dwc/pcie-histb.c +++ /dev/null @@ -1,472 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for HiSilicon STB SoCs - * - * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com - * - * Authors: Ruqiang Ju - * Jianguo Sun - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -#define to_histb_pcie(x) dev_get_drvdata((x)->dev) - -#define PCIE_SYS_CTRL0 0x0000 -#define PCIE_SYS_CTRL1 0x0004 -#define PCIE_SYS_CTRL7 0x001C -#define PCIE_SYS_CTRL13 0x0034 -#define PCIE_SYS_CTRL15 0x003C -#define PCIE_SYS_CTRL16 0x0040 -#define PCIE_SYS_CTRL17 0x0044 - -#define PCIE_SYS_STAT0 0x0100 -#define PCIE_SYS_STAT4 0x0110 - -#define PCIE_RDLH_LINK_UP BIT(5) -#define PCIE_XMLH_LINK_UP BIT(15) -#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) -#define PCIE_APP_LTSSM_ENABLE BIT(11) - -#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28) -#define PCIE_WM_EP 0 -#define PCIE_WM_LEGACY BIT(1) -#define PCIE_WM_RC BIT(30) - -#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0) -#define PCIE_LTSSM_STATE_ACTIVE 0x11 - -struct histb_pcie { - struct dw_pcie *pci; - struct clk *aux_clk; - struct clk *pipe_clk; - struct clk *sys_clk; - struct clk *bus_clk; - struct phy *phy; - struct reset_control *soft_reset; - struct reset_control *sys_reset; - struct reset_control *bus_reset; - void __iomem *ctrl; - int reset_gpio; - struct regulator *vpcie; -}; - -static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg) -{ - return readl(histb_pcie->ctrl + reg); -} - -static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) -{ - writel(val, histb_pcie->ctrl + reg); -} - -static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct histb_pcie *hipcie = to_histb_pcie(pci); - u32 val; - - val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); - if (enable) - val |= PCIE_ELBI_SLV_DBI_ENABLE; - else - val &= ~PCIE_ELBI_SLV_DBI_ENABLE; - histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); -} - -static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct histb_pcie *hipcie = to_histb_pcie(pci); - u32 val; - - val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1); - if (enable) - val |= PCIE_ELBI_SLV_DBI_ENABLE; - else - val &= ~PCIE_ELBI_SLV_DBI_ENABLE; - histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val); -} - -static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, - u32 reg, size_t size) -{ - u32 val; - - histb_pcie_dbi_r_mode(&pci->pp, true); - dw_pcie_read(base + reg, size, &val); - histb_pcie_dbi_r_mode(&pci->pp, false); - - return val; -} - -static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, - u32 reg, size_t size, u32 val) -{ - histb_pcie_dbi_w_mode(&pci->pp, true); - dw_pcie_write(base + reg, size, val); - histb_pcie_dbi_w_mode(&pci->pp, false); -} - -static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, - int size, u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - int ret; - - histb_pcie_dbi_r_mode(pp, true); - ret = dw_pcie_read(pci->dbi_base + where, size, val); - histb_pcie_dbi_r_mode(pp, false); - - return ret; -} - -static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, - int size, u32 val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - int ret; - - histb_pcie_dbi_w_mode(pp, true); - ret = dw_pcie_write(pci->dbi_base + where, size, val); - histb_pcie_dbi_w_mode(pp, false); - - return ret; -} - -static int histb_pcie_link_up(struct dw_pcie *pci) -{ - struct histb_pcie *hipcie = to_histb_pcie(pci); - u32 regval; - u32 status; - - regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); - status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); - status &= PCIE_LTSSM_STATE_MASK; - if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && - (status == PCIE_LTSSM_STATE_ACTIVE)) - return 1; - - return 0; -} - -static int histb_pcie_establish_link(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct histb_pcie *hipcie = to_histb_pcie(pci); - u32 regval; - - if (dw_pcie_link_up(pci)) { - dev_info(pci->dev, "Link already up\n"); - return 0; - } - - /* PCIe RC work mode */ - regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); - regval &= ~PCIE_DEVICE_TYPE_MASK; - regval |= PCIE_WM_RC; - histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval); - - /* setup root complex */ - dw_pcie_setup_rc(pp); - - /* assert LTSSM enable */ - regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7); - regval |= PCIE_APP_LTSSM_ENABLE; - histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval); - - return dw_pcie_wait_for_link(pci); -} - -static int histb_pcie_host_init(struct pcie_port *pp) -{ - histb_pcie_establish_link(pp); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); - - return 0; -} - -static struct dw_pcie_host_ops histb_pcie_host_ops = { - .rd_own_conf = histb_pcie_rd_own_conf, - .wr_own_conf = histb_pcie_wr_own_conf, - .host_init = histb_pcie_host_init, -}; - -static void histb_pcie_host_disable(struct histb_pcie *hipcie) -{ - reset_control_assert(hipcie->soft_reset); - reset_control_assert(hipcie->sys_reset); - reset_control_assert(hipcie->bus_reset); - - clk_disable_unprepare(hipcie->aux_clk); - clk_disable_unprepare(hipcie->pipe_clk); - clk_disable_unprepare(hipcie->sys_clk); - clk_disable_unprepare(hipcie->bus_clk); - - if (gpio_is_valid(hipcie->reset_gpio)) - gpio_set_value_cansleep(hipcie->reset_gpio, 0); - - if (hipcie->vpcie) - regulator_disable(hipcie->vpcie); -} - -static int histb_pcie_host_enable(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct histb_pcie *hipcie = to_histb_pcie(pci); - struct device *dev = pci->dev; - int ret; - - /* power on PCIe device if have */ - if (hipcie->vpcie) { - ret = regulator_enable(hipcie->vpcie); - if (ret) { - dev_err(dev, "failed to enable regulator: %d\n", ret); - return ret; - } - } - - if (gpio_is_valid(hipcie->reset_gpio)) - gpio_set_value_cansleep(hipcie->reset_gpio, 1); - - ret = clk_prepare_enable(hipcie->bus_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable bus clk\n"); - goto err_bus_clk; - } - - ret = clk_prepare_enable(hipcie->sys_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable sys clk\n"); - goto err_sys_clk; - } - - ret = clk_prepare_enable(hipcie->pipe_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable pipe clk\n"); - goto err_pipe_clk; - } - - ret = clk_prepare_enable(hipcie->aux_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable aux clk\n"); - goto err_aux_clk; - } - - reset_control_assert(hipcie->soft_reset); - reset_control_deassert(hipcie->soft_reset); - - reset_control_assert(hipcie->sys_reset); - reset_control_deassert(hipcie->sys_reset); - - reset_control_assert(hipcie->bus_reset); - reset_control_deassert(hipcie->bus_reset); - - return 0; - -err_aux_clk: - clk_disable_unprepare(hipcie->pipe_clk); -err_pipe_clk: - clk_disable_unprepare(hipcie->sys_clk); -err_sys_clk: - clk_disable_unprepare(hipcie->bus_clk); -err_bus_clk: - if (hipcie->vpcie) - regulator_disable(hipcie->vpcie); - - return ret; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .read_dbi = histb_pcie_read_dbi, - .write_dbi = histb_pcie_write_dbi, - .link_up = histb_pcie_link_up, -}; - -static int histb_pcie_probe(struct platform_device *pdev) -{ - struct histb_pcie *hipcie; - struct dw_pcie *pci; - struct pcie_port *pp; - struct resource *res; - struct device_node *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - enum of_gpio_flags of_flags; - unsigned long flag = GPIOF_DIR_OUT; - int ret; - - hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); - if (!hipcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - hipcie->pci = pci; - pp = &pci->pp; - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); - hipcie->ctrl = devm_ioremap_resource(dev, res); - if (IS_ERR(hipcie->ctrl)) { - dev_err(dev, "cannot get control reg base\n"); - return PTR_ERR(hipcie->ctrl); - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "cannot get rc-dbi base\n"); - return PTR_ERR(pci->dbi_base); - } - - hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); - if (IS_ERR(hipcie->vpcie)) { - if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; - hipcie->vpcie = NULL; - } - - hipcie->reset_gpio = of_get_named_gpio_flags(np, - "reset-gpios", 0, &of_flags); - if (of_flags & OF_GPIO_ACTIVE_LOW) - flag |= GPIOF_ACTIVE_LOW; - if (gpio_is_valid(hipcie->reset_gpio)) { - ret = devm_gpio_request_one(dev, hipcie->reset_gpio, - flag, "PCIe device power control"); - if (ret) { - dev_err(dev, "unable to request gpio\n"); - return ret; - } - } - - hipcie->aux_clk = devm_clk_get(dev, "aux"); - if (IS_ERR(hipcie->aux_clk)) { - dev_err(dev, "Failed to get PCIe aux clk\n"); - return PTR_ERR(hipcie->aux_clk); - } - - hipcie->pipe_clk = devm_clk_get(dev, "pipe"); - if (IS_ERR(hipcie->pipe_clk)) { - dev_err(dev, "Failed to get PCIe pipe clk\n"); - return PTR_ERR(hipcie->pipe_clk); - } - - hipcie->sys_clk = devm_clk_get(dev, "sys"); - if (IS_ERR(hipcie->sys_clk)) { - dev_err(dev, "Failed to get PCIEe sys clk\n"); - return PTR_ERR(hipcie->sys_clk); - } - - hipcie->bus_clk = devm_clk_get(dev, "bus"); - if (IS_ERR(hipcie->bus_clk)) { - dev_err(dev, "Failed to get PCIe bus clk\n"); - return PTR_ERR(hipcie->bus_clk); - } - - hipcie->soft_reset = devm_reset_control_get(dev, "soft"); - if (IS_ERR(hipcie->soft_reset)) { - dev_err(dev, "couldn't get soft reset\n"); - return PTR_ERR(hipcie->soft_reset); - } - - hipcie->sys_reset = devm_reset_control_get(dev, "sys"); - if (IS_ERR(hipcie->sys_reset)) { - dev_err(dev, "couldn't get sys reset\n"); - return PTR_ERR(hipcie->sys_reset); - } - - hipcie->bus_reset = devm_reset_control_get(dev, "bus"); - if (IS_ERR(hipcie->bus_reset)) { - dev_err(dev, "couldn't get bus reset\n"); - return PTR_ERR(hipcie->bus_reset); - } - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "Failed to get MSI IRQ\n"); - return pp->msi_irq; - } - } - - hipcie->phy = devm_phy_get(dev, "phy"); - if (IS_ERR(hipcie->phy)) { - dev_info(dev, "no pcie-phy found\n"); - hipcie->phy = NULL; - /* fall through here! - * if no pcie-phy found, phy init - * should be done under boot! - */ - } else { - phy_init(hipcie->phy); - } - - pp->root_bus_nr = -1; - pp->ops = &histb_pcie_host_ops; - - platform_set_drvdata(pdev, hipcie); - - ret = histb_pcie_host_enable(pp); - if (ret) { - dev_err(dev, "failed to enable host\n"); - return ret; - } - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static int histb_pcie_remove(struct platform_device *pdev) -{ - struct histb_pcie *hipcie = platform_get_drvdata(pdev); - - histb_pcie_host_disable(hipcie); - - if (hipcie->phy) - phy_exit(hipcie->phy); - - return 0; -} - -static const struct of_device_id histb_pcie_of_match[] = { - { .compatible = "hisilicon,hi3798cv200-pcie", }, - {}, -}; -MODULE_DEVICE_TABLE(of, histb_pcie_of_match); - -static struct platform_driver histb_pcie_platform_driver = { - .probe = histb_pcie_probe, - .remove = histb_pcie_remove, - .driver = { - .name = "histb-pcie", - .of_match_table = histb_pcie_of_match, - }, -}; -module_platform_driver(histb_pcie_platform_driver); - -MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c deleted file mode 100644 index d2970a009eb5..000000000000 --- a/drivers/pci/dwc/pcie-kirin.c +++ /dev/null @@ -1,515 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Kirin Phone SoCs - * - * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. - * http://www.huawei.com - * - * Author: Xiaowei Song - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "pcie-designware.h" - -#define to_kirin_pcie(x) dev_get_drvdata((x)->dev) - -#define REF_CLK_FREQ 100000000 - -/* PCIe ELBI registers */ -#define SOC_PCIECTRL_CTRL0_ADDR 0x000 -#define SOC_PCIECTRL_CTRL1_ADDR 0x004 -#define SOC_PCIEPHY_CTRL2_ADDR 0x008 -#define SOC_PCIEPHY_CTRL3_ADDR 0x00c -#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) - -/* info located in APB */ -#define PCIE_APP_LTSSM_ENABLE 0x01c -#define PCIE_APB_PHY_CTRL0 0x0 -#define PCIE_APB_PHY_CTRL1 0x4 -#define PCIE_APB_PHY_STATUS0 0x400 -#define PCIE_LINKUP_ENABLE (0x8020) -#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) -#define PIPE_CLK_STABLE (0x1 << 19) -#define PHY_REF_PAD_BIT (0x1 << 8) -#define PHY_PWR_DOWN_BIT (0x1 << 22) -#define PHY_RST_ACK_BIT (0x1 << 16) - -/* info located in sysctrl */ -#define SCTRL_PCIE_CMOS_OFFSET 0x60 -#define SCTRL_PCIE_CMOS_BIT 0x10 -#define SCTRL_PCIE_ISO_OFFSET 0x44 -#define SCTRL_PCIE_ISO_BIT 0x30 -#define SCTRL_PCIE_HPCLK_OFFSET 0x190 -#define SCTRL_PCIE_HPCLK_BIT 0x184000 -#define SCTRL_PCIE_OE_OFFSET 0x14a -#define PCIE_DEBOUNCE_PARAM 0xF0F400 -#define PCIE_OE_BYPASS (0x3 << 28) - -/* peri_crg ctrl */ -#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 -#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 - -/* Time for delay */ -#define REF_2_PERST_MIN 20000 -#define REF_2_PERST_MAX 25000 -#define PERST_2_ACCESS_MIN 10000 -#define PERST_2_ACCESS_MAX 12000 -#define LINK_WAIT_MIN 900 -#define LINK_WAIT_MAX 1000 -#define PIPE_CLK_WAIT_MIN 550 -#define PIPE_CLK_WAIT_MAX 600 -#define TIME_CMOS_MIN 100 -#define TIME_CMOS_MAX 105 -#define TIME_PHY_PD_MIN 10 -#define TIME_PHY_PD_MAX 11 - -struct kirin_pcie { - struct dw_pcie *pci; - void __iomem *apb_base; - void __iomem *phy_base; - struct regmap *crgctrl; - struct regmap *sysctrl; - struct clk *apb_sys_clk; - struct clk *apb_phy_clk; - struct clk *phy_ref_clk; - struct clk *pcie_aclk; - struct clk *pcie_aux_clk; - int gpio_id_reset; -}; - -/* Registers in PCIeCTRL */ -static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, - u32 val, u32 reg) -{ - writel(val, kirin_pcie->apb_base + reg); -} - -static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) -{ - return readl(kirin_pcie->apb_base + reg); -} - -/* Registers in PCIePHY */ -static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, - u32 val, u32 reg) -{ - writel(val, kirin_pcie->phy_base + reg); -} - -static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) -{ - return readl(kirin_pcie->phy_base + reg); -} - -static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, - struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - - kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); - if (IS_ERR(kirin_pcie->phy_ref_clk)) - return PTR_ERR(kirin_pcie->phy_ref_clk); - - kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); - if (IS_ERR(kirin_pcie->pcie_aux_clk)) - return PTR_ERR(kirin_pcie->pcie_aux_clk); - - kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); - if (IS_ERR(kirin_pcie->apb_phy_clk)) - return PTR_ERR(kirin_pcie->apb_phy_clk); - - kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); - if (IS_ERR(kirin_pcie->apb_sys_clk)) - return PTR_ERR(kirin_pcie->apb_sys_clk); - - kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); - if (IS_ERR(kirin_pcie->pcie_aclk)) - return PTR_ERR(kirin_pcie->pcie_aclk); - - return 0; -} - -static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, - struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct resource *apb; - struct resource *phy; - struct resource *dbi; - - apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); - kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); - if (IS_ERR(kirin_pcie->apb_base)) - return PTR_ERR(kirin_pcie->apb_base); - - phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); - if (IS_ERR(kirin_pcie->phy_base)) - return PTR_ERR(kirin_pcie->phy_base); - - dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); - if (IS_ERR(kirin_pcie->pci->dbi_base)) - return PTR_ERR(kirin_pcie->pci->dbi_base); - - kirin_pcie->crgctrl = - syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); - if (IS_ERR(kirin_pcie->crgctrl)) - return PTR_ERR(kirin_pcie->crgctrl); - - kirin_pcie->sysctrl = - syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); - if (IS_ERR(kirin_pcie->sysctrl)) - return PTR_ERR(kirin_pcie->sysctrl); - - return 0; -} - -static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) -{ - struct device *dev = kirin_pcie->pci->dev; - u32 reg_val; - - reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); - reg_val &= ~PHY_REF_PAD_BIT; - kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); - - reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); - reg_val &= ~PHY_PWR_DOWN_BIT; - kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); - usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); - - reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); - reg_val &= ~PHY_RST_ACK_BIT; - kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); - - usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); - reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); - if (reg_val & PIPE_CLK_STABLE) { - dev_err(dev, "PIPE clk is not stable\n"); - return -EINVAL; - } - - return 0; -} - -static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) -{ - u32 val; - - regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); - val |= PCIE_DEBOUNCE_PARAM; - val &= ~PCIE_OE_BYPASS; - regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); -} - -static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) -{ - int ret = 0; - - if (!enable) - goto close_clk; - - ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); - if (ret) - return ret; - - ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); - if (ret) - return ret; - - ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); - if (ret) - goto apb_sys_fail; - - ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); - if (ret) - goto apb_phy_fail; - - ret = clk_prepare_enable(kirin_pcie->pcie_aclk); - if (ret) - goto aclk_fail; - - ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); - if (ret) - goto aux_clk_fail; - - return 0; - -close_clk: - clk_disable_unprepare(kirin_pcie->pcie_aux_clk); -aux_clk_fail: - clk_disable_unprepare(kirin_pcie->pcie_aclk); -aclk_fail: - clk_disable_unprepare(kirin_pcie->apb_phy_clk); -apb_phy_fail: - clk_disable_unprepare(kirin_pcie->apb_sys_clk); -apb_sys_fail: - clk_disable_unprepare(kirin_pcie->phy_ref_clk); - - return ret; -} - -static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) -{ - int ret; - - /* Power supply for Host */ - regmap_write(kirin_pcie->sysctrl, - SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); - usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); - kirin_pcie_oe_enable(kirin_pcie); - - ret = kirin_pcie_clk_ctrl(kirin_pcie, true); - if (ret) - return ret; - - /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ - regmap_write(kirin_pcie->sysctrl, - SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); - regmap_write(kirin_pcie->crgctrl, - CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); - regmap_write(kirin_pcie->sysctrl, - SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); - - ret = kirin_pcie_phy_init(kirin_pcie); - if (ret) - goto close_clk; - - /* perst assert Endpoint */ - if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { - usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); - ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); - if (ret) - goto close_clk; - usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); - - return 0; - } - -close_clk: - kirin_pcie_clk_ctrl(kirin_pcie, false); - return ret; -} - -static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, - bool on) -{ - u32 val; - - val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); - if (on) - val = val | PCIE_ELBI_SLV_DBI_ENABLE; - else - val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; - - kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); -} - -static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, - bool on) -{ - u32 val; - - val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); - if (on) - val = val | PCIE_ELBI_SLV_DBI_ENABLE; - else - val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; - - kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); -} - -static int kirin_pcie_rd_own_conf(struct pcie_port *pp, - int where, int size, u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); - int ret; - - kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); - ret = dw_pcie_read(pci->dbi_base + where, size, val); - kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); - - return ret; -} - -static int kirin_pcie_wr_own_conf(struct pcie_port *pp, - int where, int size, u32 val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); - int ret; - - kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); - ret = dw_pcie_write(pci->dbi_base + where, size, val); - kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); - - return ret; -} - -static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, - u32 reg, size_t size) -{ - struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); - u32 ret; - - kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); - dw_pcie_read(base + reg, size, &ret); - kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); - - return ret; -} - -static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, - u32 reg, size_t size, u32 val) -{ - struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); - - kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); - dw_pcie_write(base + reg, size, val); - kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); -} - -static int kirin_pcie_link_up(struct dw_pcie *pci) -{ - struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); - u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); - - if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) - return 1; - - return 0; -} - -static int kirin_pcie_establish_link(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); - struct device *dev = kirin_pcie->pci->dev; - int count = 0; - - if (kirin_pcie_link_up(pci)) - return 0; - - dw_pcie_setup_rc(pp); - - /* assert LTSSM enable */ - kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, - PCIE_APP_LTSSM_ENABLE); - - /* check if the link is up or not */ - while (!kirin_pcie_link_up(pci)) { - usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); - count++; - if (count == 1000) { - dev_err(dev, "Link Fail\n"); - return -EINVAL; - } - } - - return 0; -} - -static int kirin_pcie_host_init(struct pcie_port *pp) -{ - kirin_pcie_establish_link(pp); - - return 0; -} - -static struct dw_pcie_ops kirin_dw_pcie_ops = { - .read_dbi = kirin_pcie_read_dbi, - .write_dbi = kirin_pcie_write_dbi, - .link_up = kirin_pcie_link_up, -}; - -static const struct dw_pcie_host_ops kirin_pcie_host_ops = { - .rd_own_conf = kirin_pcie_rd_own_conf, - .wr_own_conf = kirin_pcie_wr_own_conf, - .host_init = kirin_pcie_host_init, -}; - -static int __init kirin_add_pcie_port(struct dw_pcie *pci, - struct platform_device *pdev) -{ - pci->pp.ops = &kirin_pcie_host_ops; - - return dw_pcie_host_init(&pci->pp); -} - -static int kirin_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct kirin_pcie *kirin_pcie; - struct dw_pcie *pci; - int ret; - - if (!dev->of_node) { - dev_err(dev, "NULL node\n"); - return -EINVAL; - } - - kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); - if (!kirin_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &kirin_dw_pcie_ops; - kirin_pcie->pci = pci; - - ret = kirin_pcie_get_clk(kirin_pcie, pdev); - if (ret) - return ret; - - ret = kirin_pcie_get_resource(kirin_pcie, pdev); - if (ret) - return ret; - - kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, - "reset-gpios", 0); - if (kirin_pcie->gpio_id_reset < 0) - return -ENODEV; - - ret = kirin_pcie_power_on(kirin_pcie); - if (ret) - return ret; - - platform_set_drvdata(pdev, kirin_pcie); - - return kirin_add_pcie_port(pci, pdev); -} - -static const struct of_device_id kirin_pcie_match[] = { - { .compatible = "hisilicon,kirin960-pcie" }, - {}, -}; - -static struct platform_driver kirin_pcie_driver = { - .probe = kirin_pcie_probe, - .driver = { - .name = "kirin-pcie", - .of_match_table = kirin_pcie_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(kirin_pcie_driver); diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c deleted file mode 100644 index a1d0198081a6..000000000000 --- a/drivers/pci/dwc/pcie-qcom.c +++ /dev/null @@ -1,1299 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Qualcomm PCIe root complex driver - * - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - * Copyright 2015 Linaro Limited. - * - * Author: Stanimir Varbanov - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -#define PCIE20_PARF_SYS_CTRL 0x00 -#define MST_WAKEUP_EN BIT(13) -#define SLV_WAKEUP_EN BIT(12) -#define MSTR_ACLK_CGC_DIS BIT(10) -#define SLV_ACLK_CGC_DIS BIT(9) -#define CORE_CLK_CGC_DIS BIT(6) -#define AUX_PWR_DET BIT(4) -#define L23_CLK_RMV_DIS BIT(2) -#define L1_CLK_RMV_DIS BIT(1) - -#define PCIE20_COMMAND_STATUS 0x04 -#define CMD_BME_VAL 0x4 -#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 -#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 - -#define PCIE20_PARF_PHY_CTRL 0x40 -#define PCIE20_PARF_PHY_REFCLK 0x4C -#define PCIE20_PARF_DBI_BASE_ADDR 0x168 -#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C -#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 -#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 -#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 -#define PCIE20_PARF_LTSSM 0x1B0 -#define PCIE20_PARF_SID_OFFSET 0x234 -#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C - -#define PCIE20_ELBI_SYS_CTRL 0x04 -#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) - -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 -#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 -#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c -#define CFG_BRIDGE_SB_INIT BIT(0) - -#define PCIE20_CAP 0x70 -#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) -#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) -#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) -#define PCIE_CAP_LINK1_VAL 0x2FD7F - -#define PCIE20_PARF_Q2A_FLUSH 0x1AC - -#define PCIE20_MISC_CONTROL_1_REG 0x8BC -#define DBI_RO_WR_EN 1 - -#define PERST_DELAY_US 1000 - -#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 -#define SLV_ADDR_SPACE_SZ 0x10000000 - -#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 -struct qcom_pcie_resources_2_1_0 { - struct clk *iface_clk; - struct clk *core_clk; - struct clk *phy_clk; - struct reset_control *pci_reset; - struct reset_control *axi_reset; - struct reset_control *ahb_reset; - struct reset_control *por_reset; - struct reset_control *phy_reset; - struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; -}; - -struct qcom_pcie_resources_1_0_0 { - struct clk *iface; - struct clk *aux; - struct clk *master_bus; - struct clk *slave_bus; - struct reset_control *core; - struct regulator *vdda; -}; - -#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 -struct qcom_pcie_resources_2_3_2 { - struct clk *aux_clk; - struct clk *master_clk; - struct clk *slave_clk; - struct clk *cfg_clk; - struct clk *pipe_clk; - struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; -}; - -struct qcom_pcie_resources_2_4_0 { - struct clk *aux_clk; - struct clk *master_clk; - struct clk *slave_clk; - struct reset_control *axi_m_reset; - struct reset_control *axi_s_reset; - struct reset_control *pipe_reset; - struct reset_control *axi_m_vmid_reset; - struct reset_control *axi_s_xpu_reset; - struct reset_control *parf_reset; - struct reset_control *phy_reset; - struct reset_control *axi_m_sticky_reset; - struct reset_control *pipe_sticky_reset; - struct reset_control *pwr_reset; - struct reset_control *ahb_reset; - struct reset_control *phy_ahb_reset; -}; - -struct qcom_pcie_resources_2_3_3 { - struct clk *iface; - struct clk *axi_m_clk; - struct clk *axi_s_clk; - struct clk *ahb_clk; - struct clk *aux_clk; - struct reset_control *rst[7]; -}; - -union qcom_pcie_resources { - struct qcom_pcie_resources_1_0_0 v1_0_0; - struct qcom_pcie_resources_2_1_0 v2_1_0; - struct qcom_pcie_resources_2_3_2 v2_3_2; - struct qcom_pcie_resources_2_3_3 v2_3_3; - struct qcom_pcie_resources_2_4_0 v2_4_0; -}; - -struct qcom_pcie; - -struct qcom_pcie_ops { - int (*get_resources)(struct qcom_pcie *pcie); - int (*init)(struct qcom_pcie *pcie); - int (*post_init)(struct qcom_pcie *pcie); - void (*deinit)(struct qcom_pcie *pcie); - void (*post_deinit)(struct qcom_pcie *pcie); - void (*ltssm_enable)(struct qcom_pcie *pcie); -}; - -struct qcom_pcie { - struct dw_pcie *pci; - void __iomem *parf; /* DT parf */ - void __iomem *elbi; /* DT elbi */ - union qcom_pcie_resources res; - struct phy *phy; - struct gpio_desc *reset; - const struct qcom_pcie_ops *ops; -}; - -#define to_qcom_pcie(x) dev_get_drvdata((x)->dev) - -static void qcom_ep_reset_assert(struct qcom_pcie *pcie) -{ - gpiod_set_value_cansleep(pcie->reset, 1); - usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); -} - -static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) -{ - gpiod_set_value_cansleep(pcie->reset, 0); - usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); -} - -static int qcom_pcie_establish_link(struct qcom_pcie *pcie) -{ - struct dw_pcie *pci = pcie->pci; - - if (dw_pcie_link_up(pci)) - return 0; - - /* Enable Link Training state machine */ - if (pcie->ops->ltssm_enable) - pcie->ops->ltssm_enable(pcie); - - return dw_pcie_wait_for_link(pci); -} - -static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable link training */ - val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); - val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; - writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); -} - -static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int ret; - - res->supplies[0].supply = "vdda"; - res->supplies[1].supply = "vdda_phy"; - res->supplies[2].supply = "vdda_refclk"; - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), - res->supplies); - if (ret) - return ret; - - res->iface_clk = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface_clk)) - return PTR_ERR(res->iface_clk); - - res->core_clk = devm_clk_get(dev, "core"); - if (IS_ERR(res->core_clk)) - return PTR_ERR(res->core_clk); - - res->phy_clk = devm_clk_get(dev, "phy"); - if (IS_ERR(res->phy_clk)) - return PTR_ERR(res->phy_clk); - - res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); - if (IS_ERR(res->pci_reset)) - return PTR_ERR(res->pci_reset); - - res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); - if (IS_ERR(res->axi_reset)) - return PTR_ERR(res->axi_reset); - - res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); - if (IS_ERR(res->ahb_reset)) - return PTR_ERR(res->ahb_reset); - - res->por_reset = devm_reset_control_get_exclusive(dev, "por"); - if (IS_ERR(res->por_reset)) - return PTR_ERR(res->por_reset); - - res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); - return PTR_ERR_OR_ZERO(res->phy_reset); -} - -static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; - - reset_control_assert(res->pci_reset); - reset_control_assert(res->axi_reset); - reset_control_assert(res->ahb_reset); - reset_control_assert(res->por_reset); - reset_control_assert(res->pci_reset); - clk_disable_unprepare(res->iface_clk); - clk_disable_unprepare(res->core_clk); - clk_disable_unprepare(res->phy_clk); - regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); -} - -static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - u32 val; - int ret; - - ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); - if (ret < 0) { - dev_err(dev, "cannot enable regulators\n"); - return ret; - } - - ret = reset_control_assert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot assert ahb reset\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->iface_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->phy_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_phy; - } - - ret = clk_prepare_enable(res->core_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_core; - } - - ret = reset_control_deassert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot deassert ahb reset\n"); - goto err_deassert_ahb; - } - - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - /* enable external reference clock */ - val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); - writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); - - ret = reset_control_deassert(res->phy_reset); - if (ret) { - dev_err(dev, "cannot deassert phy reset\n"); - return ret; - } - - ret = reset_control_deassert(res->pci_reset); - if (ret) { - dev_err(dev, "cannot deassert pci reset\n"); - return ret; - } - - ret = reset_control_deassert(res->por_reset); - if (ret) { - dev_err(dev, "cannot deassert por reset\n"); - return ret; - } - - ret = reset_control_deassert(res->axi_reset); - if (ret) { - dev_err(dev, "cannot deassert axi reset\n"); - return ret; - } - - /* wait for clock acquisition */ - usleep_range(1000, 1500); - - - /* Set the Max TLP size to 2K, instead of using default of 4K */ - writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, - pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); - writel(CFG_BRIDGE_SB_INIT, - pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); - - return 0; - -err_deassert_ahb: - clk_disable_unprepare(res->core_clk); -err_clk_core: - clk_disable_unprepare(res->phy_clk); -err_clk_phy: - clk_disable_unprepare(res->iface_clk); -err_assert_ahb: - regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - - return ret; -} - -static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - - res->vdda = devm_regulator_get(dev, "vdda"); - if (IS_ERR(res->vdda)) - return PTR_ERR(res->vdda); - - res->iface = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface)) - return PTR_ERR(res->iface); - - res->aux = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux)) - return PTR_ERR(res->aux); - - res->master_bus = devm_clk_get(dev, "master_bus"); - if (IS_ERR(res->master_bus)) - return PTR_ERR(res->master_bus); - - res->slave_bus = devm_clk_get(dev, "slave_bus"); - if (IS_ERR(res->slave_bus)) - return PTR_ERR(res->slave_bus); - - res->core = devm_reset_control_get_exclusive(dev, "core"); - return PTR_ERR_OR_ZERO(res->core); -} - -static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; - - reset_control_assert(res->core); - clk_disable_unprepare(res->slave_bus); - clk_disable_unprepare(res->master_bus); - clk_disable_unprepare(res->iface); - clk_disable_unprepare(res->aux); - regulator_disable(res->vdda); -} - -static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int ret; - - ret = reset_control_deassert(res->core); - if (ret) { - dev_err(dev, "cannot deassert core reset\n"); - return ret; - } - - ret = clk_prepare_enable(res->aux); - if (ret) { - dev_err(dev, "cannot prepare/enable aux clock\n"); - goto err_res; - } - - ret = clk_prepare_enable(res->iface); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_aux; - } - - ret = clk_prepare_enable(res->master_bus); - if (ret) { - dev_err(dev, "cannot prepare/enable master_bus clock\n"); - goto err_iface; - } - - ret = clk_prepare_enable(res->slave_bus); - if (ret) { - dev_err(dev, "cannot prepare/enable slave_bus clock\n"); - goto err_master; - } - - ret = regulator_enable(res->vdda); - if (ret) { - dev_err(dev, "cannot enable vdda regulator\n"); - goto err_slave; - } - - /* change DBI base address */ - writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - - val |= BIT(31); - writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - } - - return 0; -err_slave: - clk_disable_unprepare(res->slave_bus); -err_master: - clk_disable_unprepare(res->master_bus); -err_iface: - clk_disable_unprepare(res->iface); -err_aux: - clk_disable_unprepare(res->aux); -err_res: - reset_control_assert(res->core); - - return ret; -} - -static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable link training */ - val = readl(pcie->parf + PCIE20_PARF_LTSSM); - val |= BIT(8); - writel(val, pcie->parf + PCIE20_PARF_LTSSM); -} - -static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int ret; - - res->supplies[0].supply = "vdda"; - res->supplies[1].supply = "vddpe-3v3"; - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), - res->supplies); - if (ret) - return ret; - - res->aux_clk = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux_clk)) - return PTR_ERR(res->aux_clk); - - res->cfg_clk = devm_clk_get(dev, "cfg"); - if (IS_ERR(res->cfg_clk)) - return PTR_ERR(res->cfg_clk); - - res->master_clk = devm_clk_get(dev, "bus_master"); - if (IS_ERR(res->master_clk)) - return PTR_ERR(res->master_clk); - - res->slave_clk = devm_clk_get(dev, "bus_slave"); - if (IS_ERR(res->slave_clk)) - return PTR_ERR(res->slave_clk); - - res->pipe_clk = devm_clk_get(dev, "pipe"); - return PTR_ERR_OR_ZERO(res->pipe_clk); -} - -static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - - clk_disable_unprepare(res->slave_clk); - clk_disable_unprepare(res->master_clk); - clk_disable_unprepare(res->cfg_clk); - clk_disable_unprepare(res->aux_clk); - - regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); -} - -static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - - clk_disable_unprepare(res->pipe_clk); -} - -static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - u32 val; - int ret; - - ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); - if (ret < 0) { - dev_err(dev, "cannot enable regulators\n"); - return ret; - } - - ret = clk_prepare_enable(res->aux_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable aux clock\n"); - goto err_aux_clk; - } - - ret = clk_prepare_enable(res->cfg_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable cfg clock\n"); - goto err_cfg_clk; - } - - ret = clk_prepare_enable(res->master_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable master clock\n"); - goto err_master_clk; - } - - ret = clk_prepare_enable(res->slave_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable slave clock\n"); - goto err_slave_clk; - } - - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - /* change DBI base address */ - writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); - - /* MAC PHY_POWERDOWN MUX DISABLE */ - val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); - val &= ~BIT(29); - writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); - - val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); - val |= BIT(4); - writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); - - val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); - val |= BIT(31); - writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); - - return 0; - -err_slave_clk: - clk_disable_unprepare(res->master_clk); -err_master_clk: - clk_disable_unprepare(res->cfg_clk); -err_cfg_clk: - clk_disable_unprepare(res->aux_clk); - -err_aux_clk: - regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - - return ret; -} - -static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int ret; - - ret = clk_prepare_enable(res->pipe_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable pipe clock\n"); - return ret; - } - - return 0; -} - -static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - - res->aux_clk = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux_clk)) - return PTR_ERR(res->aux_clk); - - res->master_clk = devm_clk_get(dev, "master_bus"); - if (IS_ERR(res->master_clk)) - return PTR_ERR(res->master_clk); - - res->slave_clk = devm_clk_get(dev, "slave_bus"); - if (IS_ERR(res->slave_clk)) - return PTR_ERR(res->slave_clk); - - res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); - if (IS_ERR(res->axi_m_reset)) - return PTR_ERR(res->axi_m_reset); - - res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); - if (IS_ERR(res->axi_s_reset)) - return PTR_ERR(res->axi_s_reset); - - res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); - if (IS_ERR(res->pipe_reset)) - return PTR_ERR(res->pipe_reset); - - res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, - "axi_m_vmid"); - if (IS_ERR(res->axi_m_vmid_reset)) - return PTR_ERR(res->axi_m_vmid_reset); - - res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, - "axi_s_xpu"); - if (IS_ERR(res->axi_s_xpu_reset)) - return PTR_ERR(res->axi_s_xpu_reset); - - res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); - if (IS_ERR(res->parf_reset)) - return PTR_ERR(res->parf_reset); - - res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); - if (IS_ERR(res->phy_reset)) - return PTR_ERR(res->phy_reset); - - res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, - "axi_m_sticky"); - if (IS_ERR(res->axi_m_sticky_reset)) - return PTR_ERR(res->axi_m_sticky_reset); - - res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, - "pipe_sticky"); - if (IS_ERR(res->pipe_sticky_reset)) - return PTR_ERR(res->pipe_sticky_reset); - - res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); - if (IS_ERR(res->pwr_reset)) - return PTR_ERR(res->pwr_reset); - - res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); - if (IS_ERR(res->ahb_reset)) - return PTR_ERR(res->ahb_reset); - - res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); - if (IS_ERR(res->phy_ahb_reset)) - return PTR_ERR(res->phy_ahb_reset); - - return 0; -} - -static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; - - reset_control_assert(res->axi_m_reset); - reset_control_assert(res->axi_s_reset); - reset_control_assert(res->pipe_reset); - reset_control_assert(res->pipe_sticky_reset); - reset_control_assert(res->phy_reset); - reset_control_assert(res->phy_ahb_reset); - reset_control_assert(res->axi_m_sticky_reset); - reset_control_assert(res->pwr_reset); - reset_control_assert(res->ahb_reset); - clk_disable_unprepare(res->aux_clk); - clk_disable_unprepare(res->master_clk); - clk_disable_unprepare(res->slave_clk); -} - -static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - u32 val; - int ret; - - ret = reset_control_assert(res->axi_m_reset); - if (ret) { - dev_err(dev, "cannot assert axi master reset\n"); - return ret; - } - - ret = reset_control_assert(res->axi_s_reset); - if (ret) { - dev_err(dev, "cannot assert axi slave reset\n"); - return ret; - } - - usleep_range(10000, 12000); - - ret = reset_control_assert(res->pipe_reset); - if (ret) { - dev_err(dev, "cannot assert pipe reset\n"); - return ret; - } - - ret = reset_control_assert(res->pipe_sticky_reset); - if (ret) { - dev_err(dev, "cannot assert pipe sticky reset\n"); - return ret; - } - - ret = reset_control_assert(res->phy_reset); - if (ret) { - dev_err(dev, "cannot assert phy reset\n"); - return ret; - } - - ret = reset_control_assert(res->phy_ahb_reset); - if (ret) { - dev_err(dev, "cannot assert phy ahb reset\n"); - return ret; - } - - usleep_range(10000, 12000); - - ret = reset_control_assert(res->axi_m_sticky_reset); - if (ret) { - dev_err(dev, "cannot assert axi master sticky reset\n"); - return ret; - } - - ret = reset_control_assert(res->pwr_reset); - if (ret) { - dev_err(dev, "cannot assert power reset\n"); - return ret; - } - - ret = reset_control_assert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot assert ahb reset\n"); - return ret; - } - - usleep_range(10000, 12000); - - ret = reset_control_deassert(res->phy_ahb_reset); - if (ret) { - dev_err(dev, "cannot deassert phy ahb reset\n"); - return ret; - } - - ret = reset_control_deassert(res->phy_reset); - if (ret) { - dev_err(dev, "cannot deassert phy reset\n"); - goto err_rst_phy; - } - - ret = reset_control_deassert(res->pipe_reset); - if (ret) { - dev_err(dev, "cannot deassert pipe reset\n"); - goto err_rst_pipe; - } - - ret = reset_control_deassert(res->pipe_sticky_reset); - if (ret) { - dev_err(dev, "cannot deassert pipe sticky reset\n"); - goto err_rst_pipe_sticky; - } - - usleep_range(10000, 12000); - - ret = reset_control_deassert(res->axi_m_reset); - if (ret) { - dev_err(dev, "cannot deassert axi master reset\n"); - goto err_rst_axi_m; - } - - ret = reset_control_deassert(res->axi_m_sticky_reset); - if (ret) { - dev_err(dev, "cannot deassert axi master sticky reset\n"); - goto err_rst_axi_m_sticky; - } - - ret = reset_control_deassert(res->axi_s_reset); - if (ret) { - dev_err(dev, "cannot deassert axi slave reset\n"); - goto err_rst_axi_s; - } - - ret = reset_control_deassert(res->pwr_reset); - if (ret) { - dev_err(dev, "cannot deassert power reset\n"); - goto err_rst_pwr; - } - - ret = reset_control_deassert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot deassert ahb reset\n"); - goto err_rst_ahb; - } - - usleep_range(10000, 12000); - - ret = clk_prepare_enable(res->aux_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_clk_aux; - } - - ret = clk_prepare_enable(res->master_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_axi_m; - } - - ret = clk_prepare_enable(res->slave_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_axi_s; - } - - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - /* change DBI base address */ - writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); - - /* MAC PHY_POWERDOWN MUX DISABLE */ - val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); - val &= ~BIT(29); - writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); - - val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); - val |= BIT(4); - writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); - - val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); - val |= BIT(31); - writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); - - return 0; - -err_clk_axi_s: - clk_disable_unprepare(res->master_clk); -err_clk_axi_m: - clk_disable_unprepare(res->aux_clk); -err_clk_aux: - reset_control_assert(res->ahb_reset); -err_rst_ahb: - reset_control_assert(res->pwr_reset); -err_rst_pwr: - reset_control_assert(res->axi_s_reset); -err_rst_axi_s: - reset_control_assert(res->axi_m_sticky_reset); -err_rst_axi_m_sticky: - reset_control_assert(res->axi_m_reset); -err_rst_axi_m: - reset_control_assert(res->pipe_sticky_reset); -err_rst_pipe_sticky: - reset_control_assert(res->pipe_reset); -err_rst_pipe: - reset_control_assert(res->phy_reset); -err_rst_phy: - reset_control_assert(res->phy_ahb_reset); - return ret; -} - -static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int i; - const char *rst_names[] = { "axi_m", "axi_s", "pipe", - "axi_m_sticky", "sticky", - "ahb", "sleep", }; - - res->iface = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface)) - return PTR_ERR(res->iface); - - res->axi_m_clk = devm_clk_get(dev, "axi_m"); - if (IS_ERR(res->axi_m_clk)) - return PTR_ERR(res->axi_m_clk); - - res->axi_s_clk = devm_clk_get(dev, "axi_s"); - if (IS_ERR(res->axi_s_clk)) - return PTR_ERR(res->axi_s_clk); - - res->ahb_clk = devm_clk_get(dev, "ahb"); - if (IS_ERR(res->ahb_clk)) - return PTR_ERR(res->ahb_clk); - - res->aux_clk = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux_clk)) - return PTR_ERR(res->aux_clk); - - for (i = 0; i < ARRAY_SIZE(rst_names); i++) { - res->rst[i] = devm_reset_control_get(dev, rst_names[i]); - if (IS_ERR(res->rst[i])) - return PTR_ERR(res->rst[i]); - } - - return 0; -} - -static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; - - clk_disable_unprepare(res->iface); - clk_disable_unprepare(res->axi_m_clk); - clk_disable_unprepare(res->axi_s_clk); - clk_disable_unprepare(res->ahb_clk); - clk_disable_unprepare(res->aux_clk); -} - -static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int i, ret; - u32 val; - - for (i = 0; i < ARRAY_SIZE(res->rst); i++) { - ret = reset_control_assert(res->rst[i]); - if (ret) { - dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); - return ret; - } - } - - usleep_range(2000, 2500); - - for (i = 0; i < ARRAY_SIZE(res->rst); i++) { - ret = reset_control_deassert(res->rst[i]); - if (ret) { - dev_err(dev, "reset #%d deassert failed (%d)\n", i, - ret); - return ret; - } - } - - /* - * Don't have a way to see if the reset has completed. - * Wait for some time. - */ - usleep_range(2000, 2500); - - ret = clk_prepare_enable(res->iface); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_iface; - } - - ret = clk_prepare_enable(res->axi_m_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_axi_m; - } - - ret = clk_prepare_enable(res->axi_s_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable axi slave clock\n"); - goto err_clk_axi_s; - } - - ret = clk_prepare_enable(res->ahb_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable ahb clock\n"); - goto err_clk_ahb; - } - - ret = clk_prepare_enable(res->aux_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable aux clock\n"); - goto err_clk_aux; - } - - writel(SLV_ADDR_SPACE_SZ, - pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); - - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); - - writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS - | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | - AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, - pcie->parf + PCIE20_PARF_SYS_CTRL); - writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); - - writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); - writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); - writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); - - val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; - writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - - writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + - PCIE20_DEVICE_CONTROL2_STATUS2); - - return 0; - -err_clk_aux: - clk_disable_unprepare(res->ahb_clk); -err_clk_ahb: - clk_disable_unprepare(res->axi_s_clk); -err_clk_axi_s: - clk_disable_unprepare(res->axi_m_clk); -err_clk_axi_m: - clk_disable_unprepare(res->iface); -err_clk_iface: - /* - * Not checking for failure, will anyway return - * the original failure in 'ret'. - */ - for (i = 0; i < ARRAY_SIZE(res->rst); i++) - reset_control_assert(res->rst[i]); - - return ret; -} - -static int qcom_pcie_link_up(struct dw_pcie *pci) -{ - u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); - - return !!(val & PCI_EXP_LNKSTA_DLLLA); -} - -static int qcom_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct qcom_pcie *pcie = to_qcom_pcie(pci); - int ret; - - pm_runtime_get_sync(pci->dev); - qcom_ep_reset_assert(pcie); - - ret = pcie->ops->init(pcie); - if (ret) - return ret; - - ret = phy_power_on(pcie->phy); - if (ret) - goto err_deinit; - - if (pcie->ops->post_init) { - ret = pcie->ops->post_init(pcie); - if (ret) - goto err_disable_phy; - } - - dw_pcie_setup_rc(pp); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); - - qcom_ep_reset_deassert(pcie); - - ret = qcom_pcie_establish_link(pcie); - if (ret) - goto err; - - return 0; -err: - qcom_ep_reset_assert(pcie); - if (pcie->ops->post_deinit) - pcie->ops->post_deinit(pcie); -err_disable_phy: - phy_power_off(pcie->phy); -err_deinit: - pcie->ops->deinit(pcie); - pm_runtime_put(pci->dev); - - return ret; -} - -static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - /* the device class is not reported correctly from the register */ - if (where == PCI_CLASS_REVISION && size == 4) { - *val = readl(pci->dbi_base + PCI_CLASS_REVISION); - *val &= 0xff; /* keep revision id */ - *val |= PCI_CLASS_BRIDGE_PCI << 16; - return PCIBIOS_SUCCESSFUL; - } - - return dw_pcie_read(pci->dbi_base + where, size, val); -} - -static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { - .host_init = qcom_pcie_host_init, - .rd_own_conf = qcom_pcie_rd_own_conf, -}; - -/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ -static const struct qcom_pcie_ops ops_2_1_0 = { - .get_resources = qcom_pcie_get_resources_2_1_0, - .init = qcom_pcie_init_2_1_0, - .deinit = qcom_pcie_deinit_2_1_0, - .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, -}; - -/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ -static const struct qcom_pcie_ops ops_1_0_0 = { - .get_resources = qcom_pcie_get_resources_1_0_0, - .init = qcom_pcie_init_1_0_0, - .deinit = qcom_pcie_deinit_1_0_0, - .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, -}; - -/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ -static const struct qcom_pcie_ops ops_2_3_2 = { - .get_resources = qcom_pcie_get_resources_2_3_2, - .init = qcom_pcie_init_2_3_2, - .post_init = qcom_pcie_post_init_2_3_2, - .deinit = qcom_pcie_deinit_2_3_2, - .post_deinit = qcom_pcie_post_deinit_2_3_2, - .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, -}; - -/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ -static const struct qcom_pcie_ops ops_2_4_0 = { - .get_resources = qcom_pcie_get_resources_2_4_0, - .init = qcom_pcie_init_2_4_0, - .deinit = qcom_pcie_deinit_2_4_0, - .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, -}; - -/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ -static const struct qcom_pcie_ops ops_2_3_3 = { - .get_resources = qcom_pcie_get_resources_2_3_3, - .init = qcom_pcie_init_2_3_3, - .deinit = qcom_pcie_deinit_2_3_3, - .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, -}; - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = qcom_pcie_link_up, -}; - -static int qcom_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct resource *res; - struct pcie_port *pp; - struct dw_pcie *pci; - struct qcom_pcie *pcie; - int ret; - - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pm_runtime_enable(dev); - pci->dev = dev; - pci->ops = &dw_pcie_ops; - pp = &pci->pp; - - pcie->pci = pci; - - pcie->ops = of_device_get_match_data(dev); - - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); - if (IS_ERR(pcie->reset)) - return PTR_ERR(pcie->reset); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); - pcie->parf = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->parf)) - return PTR_ERR(pcie->parf); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); - pcie->elbi = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->elbi)) - return PTR_ERR(pcie->elbi); - - pcie->phy = devm_phy_optional_get(dev, "pciephy"); - if (IS_ERR(pcie->phy)) - return PTR_ERR(pcie->phy); - - ret = pcie->ops->get_resources(pcie); - if (ret) - return ret; - - pp->root_bus_nr = -1; - pp->ops = &qcom_pcie_dw_ops; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) - return pp->msi_irq; - } - - ret = phy_init(pcie->phy); - if (ret) { - pm_runtime_disable(&pdev->dev); - return ret; - } - - platform_set_drvdata(pdev, pcie); - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "cannot initialize host\n"); - pm_runtime_disable(&pdev->dev); - return ret; - } - - return 0; -} - -static const struct of_device_id qcom_pcie_match[] = { - { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, - { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, - { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, - { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, - { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, - { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, - { } -}; - -static struct platform_driver qcom_pcie_driver = { - .probe = qcom_pcie_probe, - .driver = { - .name = "qcom-pcie", - .suppress_bind_attrs = true, - .of_match_table = qcom_pcie_match, - }, -}; -builtin_platform_driver(qcom_pcie_driver); diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c deleted file mode 100644 index ecb58f7b7566..000000000000 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ /dev/null @@ -1,314 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs - * - * SPEAr13xx PCIe Glue Layer Source Code - * - * Copyright (C) 2010-2014 ST Microelectronics - * Pratyush Anand - * Mohit Kumar - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-designware.h" - -struct spear13xx_pcie { - struct dw_pcie *pci; - void __iomem *app_base; - struct phy *phy; - struct clk *clk; - bool is_gen1; -}; - -struct pcie_app_reg { - u32 app_ctrl_0; /* cr0 */ - u32 app_ctrl_1; /* cr1 */ - u32 app_status_0; /* cr2 */ - u32 app_status_1; /* cr3 */ - u32 msg_status; /* cr4 */ - u32 msg_payload; /* cr5 */ - u32 int_sts; /* cr6 */ - u32 int_clr; /* cr7 */ - u32 int_mask; /* cr8 */ - u32 mst_bmisc; /* cr9 */ - u32 phy_ctrl; /* cr10 */ - u32 phy_status; /* cr11 */ - u32 cxpl_debug_info_0; /* cr12 */ - u32 cxpl_debug_info_1; /* cr13 */ - u32 ven_msg_ctrl_0; /* cr14 */ - u32 ven_msg_ctrl_1; /* cr15 */ - u32 ven_msg_data_0; /* cr16 */ - u32 ven_msg_data_1; /* cr17 */ - u32 ven_msi_0; /* cr18 */ - u32 ven_msi_1; /* cr19 */ - u32 mst_rmisc; /* cr20 */ -}; - -/* CR0 ID */ -#define APP_LTSSM_ENABLE_ID 3 -#define DEVICE_TYPE_RC (4 << 25) -#define MISCTRL_EN_ID 30 -#define REG_TRANSLATION_ENABLE 31 - -/* CR3 ID */ -#define XMLH_LINK_UP (1 << 6) - -/* CR6 */ -#define MSI_CTRL_INT (1 << 26) - -#define EXP_CAP_ID_OFFSET 0x70 - -#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev) - -static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) -{ - struct dw_pcie *pci = spear13xx_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; - u32 val; - u32 exp_cap_off = EXP_CAP_ID_OFFSET; - - if (dw_pcie_link_up(pci)) { - dev_err(pci->dev, "link already up\n"); - return 0; - } - - dw_pcie_setup_rc(pp); - - /* - * this controller support only 128 bytes read size, however its - * default value in capability register is 512 bytes. So force - * it to 128 here. - */ - dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); - val &= ~PCI_EXP_DEVCTL_READRQ; - dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); - - dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A); - dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); - - /* - * if is_gen1 is set then handle it, so that some buggy card - * also works - */ - if (spear13xx_pcie->is_gen1) { - dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, - 4, &val); - if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { - val &= ~((u32)PCI_EXP_LNKCAP_SLS); - val |= PCI_EXP_LNKCAP_SLS_2_5GB; - dw_pcie_write(pci->dbi_base + exp_cap_off + - PCI_EXP_LNKCAP, 4, val); - } - - dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, - 2, &val); - if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { - val &= ~((u32)PCI_EXP_LNKCAP_SLS); - val |= PCI_EXP_LNKCAP_SLS_2_5GB; - dw_pcie_write(pci->dbi_base + exp_cap_off + - PCI_EXP_LNKCTL2, 2, val); - } - } - - /* enable ltssm */ - writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) - | (1 << APP_LTSSM_ENABLE_ID) - | ((u32)1 << REG_TRANSLATION_ENABLE), - &app_reg->app_ctrl_0); - - return dw_pcie_wait_for_link(pci); -} - -static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) -{ - struct spear13xx_pcie *spear13xx_pcie = arg; - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; - struct dw_pcie *pci = spear13xx_pcie->pci; - struct pcie_port *pp = &pci->pp; - unsigned int status; - - status = readl(&app_reg->int_sts); - - if (status & MSI_CTRL_INT) { - BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); - dw_handle_msi_irq(pp); - } - - writel(status, &app_reg->int_clr); - - return IRQ_HANDLED; -} - -static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) -{ - struct dw_pcie *pci = spear13xx_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; - - /* Enable MSI interrupt */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - dw_pcie_msi_init(pp); - writel(readl(&app_reg->int_mask) | - MSI_CTRL_INT, &app_reg->int_mask); - } -} - -static int spear13xx_pcie_link_up(struct dw_pcie *pci) -{ - struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); - struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; - - if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) - return 1; - - return 0; -} - -static int spear13xx_pcie_host_init(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); - - spear13xx_pcie_establish_link(spear13xx_pcie); - spear13xx_pcie_enable_interrupts(spear13xx_pcie); - - return 0; -} - -static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { - .host_init = spear13xx_pcie_host_init, -}; - -static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = spear13xx_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - - pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); - return pp->irq; - } - ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, - IRQF_SHARED | IRQF_NO_THREAD, - "spear1340-pcie", spear13xx_pcie); - if (ret) { - dev_err(dev, "failed to request irq %d\n", pp->irq); - return ret; - } - - pp->root_bus_nr = -1; - pp->ops = &spear13xx_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = spear13xx_pcie_link_up, -}; - -static int spear13xx_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct spear13xx_pcie *spear13xx_pcie; - struct device_node *np = dev->of_node; - struct resource *dbi_base; - int ret; - - spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); - if (!spear13xx_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - spear13xx_pcie->pci = pci; - - spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(spear13xx_pcie->phy)) { - ret = PTR_ERR(spear13xx_pcie->phy); - if (ret == -EPROBE_DEFER) - dev_info(dev, "probe deferred\n"); - else - dev_err(dev, "couldn't get pcie-phy\n"); - return ret; - } - - phy_init(spear13xx_pcie->phy); - - spear13xx_pcie->clk = devm_clk_get(dev, NULL); - if (IS_ERR(spear13xx_pcie->clk)) { - dev_err(dev, "couldn't get clk for pcie\n"); - return PTR_ERR(spear13xx_pcie->clk); - } - ret = clk_prepare_enable(spear13xx_pcie->clk); - if (ret) { - dev_err(dev, "couldn't enable clk for pcie\n"); - return ret; - } - - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); - if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); - ret = PTR_ERR(pci->dbi_base); - goto fail_clk; - } - spear13xx_pcie->app_base = pci->dbi_base + 0x2000; - - if (of_property_read_bool(np, "st,pcie-is-gen1")) - spear13xx_pcie->is_gen1 = true; - - platform_set_drvdata(pdev, spear13xx_pcie); - - ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); - if (ret < 0) - goto fail_clk; - - return 0; - -fail_clk: - clk_disable_unprepare(spear13xx_pcie->clk); - - return ret; -} - -static const struct of_device_id spear13xx_pcie_of_match[] = { - { .compatible = "st,spear1340-pcie", }, - {}, -}; - -static struct platform_driver spear13xx_pcie_driver = { - .probe = spear13xx_pcie_probe, - .driver = { - .name = "spear-pcie", - .of_match_table = of_match_ptr(spear13xx_pcie_of_match), - .suppress_bind_attrs = true, - }, -}; - -builtin_platform_driver(spear13xx_pcie_driver); diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig deleted file mode 100644 index a96e23bda664..000000000000 --- a/drivers/pci/host/Kconfig +++ /dev/null @@ -1,246 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -menu "PCI host controller drivers" - depends on PCI - -config PCI_MVEBU - bool "Marvell EBU PCIe controller" - depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST - depends on MVEBU_MBUS - depends on ARM - depends on OF - -config PCI_AARDVARK - bool "Aardvark PCIe controller" - depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST - depends on OF - depends on PCI_MSI_IRQ_DOMAIN - help - Add support for Aardvark 64bit PCIe Host Controller. This - controller is part of the South Bridge of the Marvel Armada - 3700 SoC. - -config PCIE_XILINX_NWL - bool "NWL PCIe Core" - depends on ARCH_ZYNQMP || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - help - Say 'Y' here if you want kernel support for Xilinx - NWL PCIe controller. The controller can act as Root Port - or End Point. The current option selection will only - support root port enabling. - -config PCI_FTPCI100 - bool "Faraday Technology FTPCI100 PCI controller" - depends on OF - default ARCH_GEMINI - -config PCI_TEGRA - bool "NVIDIA Tegra PCIe controller" - depends on ARCH_TEGRA || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - help - Say Y here if you want support for the PCIe host controller found - on NVIDIA Tegra SoCs. - -config PCI_RCAR_GEN2 - bool "Renesas R-Car Gen2 Internal PCI controller" - depends on ARCH_RENESAS || COMPILE_TEST - depends on ARM - help - Say Y here if you want internal PCI support on R-Car Gen2 SoC. - There are 3 internal PCI controllers available with a single - built-in EHCI/OHCI host controller present on each one. - -config PCIE_RCAR - bool "Renesas R-Car PCIe controller" - depends on ARCH_RENESAS || COMPILE_TEST - depends on PCI_MSI_IRQ_DOMAIN - help - Say Y here if you want PCIe controller support on R-Car SoCs. - -config PCI_HOST_COMMON - bool - select PCI_ECAM - -config PCI_HOST_GENERIC - bool "Generic PCI host controller" - depends on OF - select PCI_HOST_COMMON - select IRQ_DOMAIN - select PCI_DOMAINS - help - Say Y here if you want to support a simple generic PCI host - controller, such as the one emulated by kvmtool. - -config PCIE_XILINX - bool "Xilinx AXI PCIe host bridge support" - depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST - help - Say 'Y' here if you want kernel to support the Xilinx AXI PCIe - Host Bridge driver. - -config PCI_XGENE - bool "X-Gene PCIe controller" - depends on ARM64 || COMPILE_TEST - depends on OF || (ACPI && PCI_QUIRKS) - help - Say Y here if you want internal PCI support on APM X-Gene SoC. - There are 5 internal PCIe ports available. Each port is GEN3 capable - and have varied lanes from x1 to x8. - -config PCI_XGENE_MSI - bool "X-Gene v1 PCIe MSI feature" - depends on PCI_XGENE - depends on PCI_MSI_IRQ_DOMAIN - default y - help - Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. - This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. - -config PCI_V3_SEMI - bool "V3 Semiconductor PCI controller" - depends on OF - depends on ARM || COMPILE_TEST - default ARCH_INTEGRATOR_AP - -config PCI_VERSATILE - bool "ARM Versatile PB PCI controller" - depends on ARCH_VERSATILE - -config PCIE_IPROC - tristate - select PCI_DOMAINS - help - This enables the iProc PCIe core controller support for Broadcom's - iProc family of SoCs. An appropriate bus interface driver needs - to be enabled to select this. - -config PCIE_IPROC_PLATFORM - tristate "Broadcom iProc PCIe platform bus driver" - depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST) - depends on OF - select PCIE_IPROC - default ARCH_BCM_IPROC - help - Say Y here if you want to use the Broadcom iProc PCIe controller - through the generic platform bus interface - -config PCIE_IPROC_BCMA - tristate "Broadcom iProc PCIe BCMA bus driver" - depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) - select PCIE_IPROC - select BCMA - default ARCH_BCM_5301X - help - Say Y here if you want to use the Broadcom iProc PCIe controller - through the BCMA bus interface - -config PCIE_IPROC_MSI - bool "Broadcom iProc PCIe MSI support" - depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA - depends on PCI_MSI_IRQ_DOMAIN - default ARCH_BCM_IPROC - help - Say Y here if you want to enable MSI support for Broadcom's iProc - PCIe controller - -config PCIE_ALTERA - bool "Altera PCIe controller" - depends on ARM || NIOS2 || COMPILE_TEST - select PCI_DOMAINS - help - Say Y here if you want to enable PCIe controller support on Altera - FPGA. - -config PCIE_ALTERA_MSI - bool "Altera PCIe MSI feature" - depends on PCIE_ALTERA - depends on PCI_MSI_IRQ_DOMAIN - help - Say Y here if you want PCIe MSI support for the Altera FPGA. - This MSI driver supports Altera MSI to GIC controller IP. - -config PCI_HOST_THUNDER_PEM - bool "Cavium Thunder PCIe controller to off-chip devices" - depends on ARM64 || COMPILE_TEST - depends on OF || (ACPI && PCI_QUIRKS) - select PCI_HOST_COMMON - help - Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs. - -config PCI_HOST_THUNDER_ECAM - bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon" - depends on ARM64 || COMPILE_TEST - depends on OF || (ACPI && PCI_QUIRKS) - select PCI_HOST_COMMON - help - Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. - -config PCIE_ROCKCHIP - bool - depends on PCI - -config PCIE_ROCKCHIP_HOST - tristate "Rockchip PCIe host controller" - depends on ARCH_ROCKCHIP || COMPILE_TEST - depends on OF - depends on PCI_MSI_IRQ_DOMAIN - select MFD_SYSCON - select PCIE_ROCKCHIP - help - Say Y here if you want internal PCI support on Rockchip SoC. - There is 1 internal PCIe port available to support GEN2 with - 4 slots. - -config PCIE_ROCKCHIP_EP - bool "Rockchip PCIe endpoint controller" - depends on ARCH_ROCKCHIP || COMPILE_TEST - depends on OF - depends on PCI_ENDPOINT - select MFD_SYSCON - select PCIE_ROCKCHIP - help - Say Y here if you want to support Rockchip PCIe controller in - endpoint mode on Rockchip SoC. There is 1 internal PCIe port - available to support GEN2 with 4 slots. - -config PCIE_MEDIATEK - bool "MediaTek PCIe controller" - depends on ARCH_MEDIATEK || COMPILE_TEST - depends on OF - depends on PCI_MSI_IRQ_DOMAIN - help - Say Y here if you want to enable PCIe controller support on - MediaTek SoCs. - -config PCIE_TANGO_SMP8759 - bool "Tango SMP8759 PCIe controller (DANGEROUS)" - depends on ARCH_TANGO && PCI_MSI && OF - depends on BROKEN - select PCI_HOST_COMMON - help - Say Y here to enable PCIe controller support for Sigma Designs - Tango SMP8759-based systems. - - Note: The SMP8759 controller multiplexes PCI config and MMIO - accesses, and Linux doesn't provide a way to serialize them. - This can lead to data corruption if drivers perform concurrent - config and MMIO accesses. - -config VMD - depends on PCI_MSI && X86_64 && SRCU - tristate "Intel Volume Management Device Driver" - ---help--- - Adds support for the Intel Volume Management Device (VMD). VMD is a - secondary PCI host bridge that allows PCI Express root ports, - and devices attached to them, to be removed from the default - PCI domain and placed within the VMD domain. This provides - more bus resources than are otherwise possible with a - single domain. If you know your system provides one of these and - has devices attached to it, say Y; if you are not sure, say N. - - To compile this driver as a module, choose M here: the - module will be called vmd. - -endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile deleted file mode 100644 index 11d21b026d37..000000000000 --- a/drivers/pci/host/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o -obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o -obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o -obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o -obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o -obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o -obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o -obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o -obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o -obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o -obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o -obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o -obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o -obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o -obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o -obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o -obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o -obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o -obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o -obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o -obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o -obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o -obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o -obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o -obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o -obj-$(CONFIG_VMD) += vmd.o - -# The following drivers are for devices that use the generic ACPI -# pci_root.c driver but don't support standard ECAM config access. -# They contain MCFG quirks to replace the generic ECAM accessors with -# device-specific ones that are shared with the DT driver. - -# The ACPI driver is generic and should not require driver-specific -# config options to be enabled, so we always build these drivers on -# ARM64 and use internal ifdefs to only build the pieces we need -# depending on whether ACPI, the DT driver, or both are enabled. - -ifdef CONFIG_PCI -obj-$(CONFIG_ARM64) += pci-thunder-ecam.o -obj-$(CONFIG_ARM64) += pci-thunder-pem.o -obj-$(CONFIG_ARM64) += pci-xgene.o -endif diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c deleted file mode 100644 index d3172d5d3d35..000000000000 --- a/drivers/pci/host/pci-aardvark.c +++ /dev/null @@ -1,978 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for the Aardvark PCIe controller, used on Marvell Armada - * 3700. - * - * Copyright (C) 2016 Marvell - * - * Author: Hezi Shahmoon - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* PCIe core registers */ -#define PCIE_CORE_CMD_STATUS_REG 0x4 -#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) -#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) -#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) -#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8 -#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4) -#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 -#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) -#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 -#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2 -#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 -#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) -#define PCIE_CORE_LINK_TRAINING BIT(5) -#define PCIE_CORE_LINK_WIDTH_SHIFT 20 -#define PCIE_CORE_ERR_CAPCTL_REG 0x118 -#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) -#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) -#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) -#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) - -/* PIO registers base address and register offsets */ -#define PIO_BASE_ADDR 0x4000 -#define PIO_CTRL (PIO_BASE_ADDR + 0x0) -#define PIO_CTRL_TYPE_MASK GENMASK(3, 0) -#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) -#define PIO_STAT (PIO_BASE_ADDR + 0x4) -#define PIO_COMPLETION_STATUS_SHIFT 7 -#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) -#define PIO_COMPLETION_STATUS_OK 0 -#define PIO_COMPLETION_STATUS_UR 1 -#define PIO_COMPLETION_STATUS_CRS 2 -#define PIO_COMPLETION_STATUS_CA 4 -#define PIO_NON_POSTED_REQ BIT(0) -#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) -#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) -#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) -#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) -#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) -#define PIO_START (PIO_BASE_ADDR + 0x1c) -#define PIO_ISR (PIO_BASE_ADDR + 0x20) -#define PIO_ISRM (PIO_BASE_ADDR + 0x24) - -/* Aardvark Control registers */ -#define CONTROL_BASE_ADDR 0x4800 -#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) -#define PCIE_GEN_SEL_MSK 0x3 -#define PCIE_GEN_SEL_SHIFT 0x0 -#define SPEED_GEN_1 0 -#define SPEED_GEN_2 1 -#define SPEED_GEN_3 2 -#define IS_RC_MSK 1 -#define IS_RC_SHIFT 2 -#define LANE_CNT_MSK 0x18 -#define LANE_CNT_SHIFT 0x3 -#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) -#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) -#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) -#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) -#define LINK_TRAINING_EN BIT(6) -#define LEGACY_INTA BIT(28) -#define LEGACY_INTB BIT(29) -#define LEGACY_INTC BIT(30) -#define LEGACY_INTD BIT(31) -#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) -#define HOT_RESET_GEN BIT(0) -#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) -#define PCIE_CORE_CTRL2_RESERVED 0x7 -#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) -#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) -#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) -#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) -#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) -#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) -#define PCIE_ISR0_MSI_INT_PENDING BIT(24) -#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) -#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) -#define PCIE_ISR0_ALL_MASK GENMASK(26, 0) -#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) -#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) -#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) -#define PCIE_ISR1_FLUSH BIT(5) -#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) -#define PCIE_ISR1_ALL_MASK GENMASK(11, 4) -#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) -#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) -#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) -#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) -#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) - -/* PCIe window configuration */ -#define OB_WIN_BASE_ADDR 0x4c00 -#define OB_WIN_BLOCK_SIZE 0x20 -#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ - OB_WIN_BLOCK_SIZE * (win) + \ - (offset)) -#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) -#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) -#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) -#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) -#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) -#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) -#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) - -/* PCIe window types */ -#define OB_PCIE_MEM 0x0 -#define OB_PCIE_IO 0x4 - -/* LMI registers base address and register offsets */ -#define LMI_BASE_ADDR 0x6000 -#define CFG_REG (LMI_BASE_ADDR + 0x0) -#define LTSSM_SHIFT 24 -#define LTSSM_MASK 0x3f -#define LTSSM_L0 0x10 -#define RC_BAR_CONFIG 0x300 - -/* PCIe core controller registers */ -#define CTRL_CORE_BASE_ADDR 0x18000 -#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) -#define CTRL_MODE_SHIFT 0x0 -#define CTRL_MODE_MASK 0x1 -#define PCIE_CORE_MODE_DIRECT 0x0 -#define PCIE_CORE_MODE_COMMAND 0x1 - -/* PCIe Central Interrupts Registers */ -#define CENTRAL_INT_BASE_ADDR 0x1b000 -#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) -#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) -#define PCIE_IRQ_CMDQ_INT BIT(0) -#define PCIE_IRQ_MSI_STATUS_INT BIT(1) -#define PCIE_IRQ_CMD_SENT_DONE BIT(3) -#define PCIE_IRQ_DMA_INT BIT(4) -#define PCIE_IRQ_IB_DXFERDONE BIT(5) -#define PCIE_IRQ_OB_DXFERDONE BIT(6) -#define PCIE_IRQ_OB_RXFERDONE BIT(7) -#define PCIE_IRQ_COMPQ_INT BIT(12) -#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) -#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) -#define PCIE_IRQ_CORE_INT BIT(16) -#define PCIE_IRQ_CORE_INT_PIO BIT(17) -#define PCIE_IRQ_DPMU_INT BIT(18) -#define PCIE_IRQ_PCIE_MIS_INT BIT(19) -#define PCIE_IRQ_MSI_INT1_DET BIT(20) -#define PCIE_IRQ_MSI_INT2_DET BIT(21) -#define PCIE_IRQ_RC_DBELL_DET BIT(22) -#define PCIE_IRQ_EP_STATUS BIT(23) -#define PCIE_IRQ_ALL_MASK 0xfff0fb -#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT - -/* Transaction types */ -#define PCIE_CONFIG_RD_TYPE0 0x8 -#define PCIE_CONFIG_RD_TYPE1 0x9 -#define PCIE_CONFIG_WR_TYPE0 0xa -#define PCIE_CONFIG_WR_TYPE1 0xb - -#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) -#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) -#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) -#define PCIE_CONF_REG(reg) ((reg) & 0xffc) -#define PCIE_CONF_ADDR(bus, devfn, where) \ - (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ - PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) - -#define PIO_TIMEOUT_MS 1 - -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_USLEEP_MIN 90000 -#define LINK_WAIT_USLEEP_MAX 100000 - -#define MSI_IRQ_NUM 32 - -struct advk_pcie { - struct platform_device *pdev; - void __iomem *base; - struct list_head resources; - struct irq_domain *irq_domain; - struct irq_chip irq_chip; - struct irq_domain *msi_domain; - struct irq_domain *msi_inner_domain; - struct irq_chip msi_bottom_irq_chip; - struct irq_chip msi_irq_chip; - struct msi_domain_info msi_domain_info; - DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); - struct mutex msi_used_lock; - u16 msi_msg; - int root_bus_nr; -}; - -static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) -{ - writel(val, pcie->base + reg); -} - -static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) -{ - return readl(pcie->base + reg); -} - -static int advk_pcie_link_up(struct advk_pcie *pcie) -{ - u32 val, ltssm_state; - - val = advk_readl(pcie, CFG_REG); - ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; - return ltssm_state >= LTSSM_L0; -} - -static int advk_pcie_wait_for_link(struct advk_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - int retries; - - /* check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (advk_pcie_link_up(pcie)) { - dev_info(dev, "link up\n"); - return 0; - } - - usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); - } - - dev_err(dev, "link never came up\n"); - return -ETIMEDOUT; -} - -/* - * Set PCIe address window register which could be used for memory - * mapping. - */ -static void advk_pcie_set_ob_win(struct advk_pcie *pcie, - u32 win_num, u32 match_ms, - u32 match_ls, u32 mask_ms, - u32 mask_ls, u32 remap_ms, - u32 remap_ls, u32 action) -{ - advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num)); - advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num)); - advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num)); - advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num)); - advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num)); - advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num)); - advk_writel(pcie, action, OB_WIN_ACTIONS(win_num)); - advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num)); -} - -static void advk_pcie_setup_hw(struct advk_pcie *pcie) -{ - u32 reg; - int i; - - /* Point PCIe unit MBUS decode windows to DRAM space */ - for (i = 0; i < 8; i++) - advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0); - - /* Set to Direct mode */ - reg = advk_readl(pcie, CTRL_CONFIG_REG); - reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); - reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); - advk_writel(pcie, reg, CTRL_CONFIG_REG); - - /* Set PCI global control register to RC mode */ - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); - reg |= (IS_RC_MSK << IS_RC_SHIFT); - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); - - /* Set Advanced Error Capabilities and Control PF0 register */ - reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | - PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | - PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | - PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; - advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG); - - /* Set PCIe Device Control and Status 1 PF0 register */ - reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | - (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | - PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | - (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ << - PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT); - advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); - - /* Program PCIe Control 2 to disable strict ordering */ - reg = PCIE_CORE_CTRL2_RESERVED | - PCIE_CORE_CTRL2_TD_ENABLE; - advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); - - /* Set GEN2 */ - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); - reg &= ~PCIE_GEN_SEL_MSK; - reg |= SPEED_GEN_2; - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); - - /* Set lane X1 */ - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); - reg &= ~LANE_CNT_MSK; - reg |= LANE_COUNT_1; - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); - - /* Enable link training */ - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); - reg |= LINK_TRAINING_EN; - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); - - /* Enable MSI */ - reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); - reg |= PCIE_CORE_CTRL2_MSI_ENABLE; - advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); - - /* Clear all interrupts */ - advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); - advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); - advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); - - /* Disable All ISR0/1 Sources */ - reg = PCIE_ISR0_ALL_MASK; - reg &= ~PCIE_ISR0_MSI_INT_PENDING; - advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); - - advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); - - /* Unmask all MSI's */ - advk_writel(pcie, 0, PCIE_MSI_MASK_REG); - - /* Enable summary interrupt for GIC SPI source */ - reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); - advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); - - reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); - reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; - advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); - - /* Bypass the address window mapping for PIO */ - reg = advk_readl(pcie, PIO_CTRL); - reg |= PIO_CTRL_ADDR_WIN_DISABLE; - advk_writel(pcie, reg, PIO_CTRL); - - /* Start link training */ - reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG); - reg |= PCIE_CORE_LINK_TRAINING; - advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); - - advk_pcie_wait_for_link(pcie); - - reg = PCIE_CORE_LINK_L0S_ENTRY | - (1 << PCIE_CORE_LINK_WIDTH_SHIFT); - advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); - - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); - reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | - PCIE_CORE_CMD_IO_ACCESS_EN | - PCIE_CORE_CMD_MEM_IO_REQ_EN; - advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); -} - -static void advk_pcie_check_pio_status(struct advk_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - u32 reg; - unsigned int status; - char *strcomp_status, *str_posted; - - reg = advk_readl(pcie, PIO_STAT); - status = (reg & PIO_COMPLETION_STATUS_MASK) >> - PIO_COMPLETION_STATUS_SHIFT; - - if (!status) - return; - - switch (status) { - case PIO_COMPLETION_STATUS_UR: - strcomp_status = "UR"; - break; - case PIO_COMPLETION_STATUS_CRS: - strcomp_status = "CRS"; - break; - case PIO_COMPLETION_STATUS_CA: - strcomp_status = "CA"; - break; - default: - strcomp_status = "Unknown"; - break; - } - - if (reg & PIO_NON_POSTED_REQ) - str_posted = "Non-posted"; - else - str_posted = "Posted"; - - dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n", - str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); -} - -static int advk_pcie_wait_pio(struct advk_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - unsigned long timeout; - - timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); - - while (time_before(jiffies, timeout)) { - u32 start, isr; - - start = advk_readl(pcie, PIO_START); - isr = advk_readl(pcie, PIO_ISR); - if (!start && isr) - return 0; - } - - dev_err(dev, "config read/write timed out\n"); - return -ETIMEDOUT; -} - -static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 *val) -{ - struct advk_pcie *pcie = bus->sysdata; - u32 reg; - int ret; - - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - /* Start PIO */ - advk_writel(pcie, 0, PIO_START); - advk_writel(pcie, 1, PIO_ISR); - - /* Program the control register */ - reg = advk_readl(pcie, PIO_CTRL); - reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->number == pcie->root_bus_nr) - reg |= PCIE_CONFIG_RD_TYPE0; - else - reg |= PCIE_CONFIG_RD_TYPE1; - advk_writel(pcie, reg, PIO_CTRL); - - /* Program the address registers */ - reg = PCIE_CONF_ADDR(bus->number, devfn, where); - advk_writel(pcie, reg, PIO_ADDR_LS); - advk_writel(pcie, 0, PIO_ADDR_MS); - - /* Program the data strobe */ - advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); - - /* Start the transfer */ - advk_writel(pcie, 1, PIO_START); - - ret = advk_pcie_wait_pio(pcie); - if (ret < 0) - return PCIBIOS_SET_FAILED; - - advk_pcie_check_pio_status(pcie); - - /* Get the read result */ - *val = advk_readl(pcie, PIO_RD_DATA); - if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; - else if (size == 2) - *val = (*val >> (8 * (where & 3))) & 0xffff; - - return PCIBIOS_SUCCESSFUL; -} - -static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - struct advk_pcie *pcie = bus->sysdata; - u32 reg; - u32 data_strobe = 0x0; - int offset; - int ret; - - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (where % size) - return PCIBIOS_SET_FAILED; - - /* Start PIO */ - advk_writel(pcie, 0, PIO_START); - advk_writel(pcie, 1, PIO_ISR); - - /* Program the control register */ - reg = advk_readl(pcie, PIO_CTRL); - reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->number == pcie->root_bus_nr) - reg |= PCIE_CONFIG_WR_TYPE0; - else - reg |= PCIE_CONFIG_WR_TYPE1; - advk_writel(pcie, reg, PIO_CTRL); - - /* Program the address registers */ - reg = PCIE_CONF_ADDR(bus->number, devfn, where); - advk_writel(pcie, reg, PIO_ADDR_LS); - advk_writel(pcie, 0, PIO_ADDR_MS); - - /* Calculate the write strobe */ - offset = where & 0x3; - reg = val << (8 * offset); - data_strobe = GENMASK(size - 1, 0) << offset; - - /* Program the data register */ - advk_writel(pcie, reg, PIO_WR_DATA); - - /* Program the data strobe */ - advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); - - /* Start the transfer */ - advk_writel(pcie, 1, PIO_START); - - ret = advk_pcie_wait_pio(pcie); - if (ret < 0) - return PCIBIOS_SET_FAILED; - - advk_pcie_check_pio_status(pcie); - - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops advk_pcie_ops = { - .read = advk_pcie_rd_conf, - .write = advk_pcie_wr_conf, -}; - -static void advk_msi_irq_compose_msi_msg(struct irq_data *data, - struct msi_msg *msg) -{ - struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); - phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); - - msg->address_lo = lower_32_bits(msi_msg); - msg->address_hi = upper_32_bits(msi_msg); - msg->data = data->irq; -} - -static int advk_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - -static int advk_msi_irq_domain_alloc(struct irq_domain *domain, - unsigned int virq, - unsigned int nr_irqs, void *args) -{ - struct advk_pcie *pcie = domain->host_data; - int hwirq, i; - - mutex_lock(&pcie->msi_used_lock); - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, - 0, nr_irqs, 0); - if (hwirq >= MSI_IRQ_NUM) { - mutex_unlock(&pcie->msi_used_lock); - return -ENOSPC; - } - - bitmap_set(pcie->msi_used, hwirq, nr_irqs); - mutex_unlock(&pcie->msi_used_lock); - - for (i = 0; i < nr_irqs; i++) - irq_domain_set_info(domain, virq + i, hwirq + i, - &pcie->msi_bottom_irq_chip, - domain->host_data, handle_simple_irq, - NULL, NULL); - - return hwirq; -} - -static void advk_msi_irq_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct advk_pcie *pcie = domain->host_data; - - mutex_lock(&pcie->msi_used_lock); - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); - mutex_unlock(&pcie->msi_used_lock); -} - -static const struct irq_domain_ops advk_msi_domain_ops = { - .alloc = advk_msi_irq_domain_alloc, - .free = advk_msi_irq_domain_free, -}; - -static void advk_pcie_irq_mask(struct irq_data *d) -{ - struct advk_pcie *pcie = d->domain->host_data; - irq_hw_number_t hwirq = irqd_to_hwirq(d); - u32 mask; - - mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); - mask |= PCIE_ISR1_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); -} - -static void advk_pcie_irq_unmask(struct irq_data *d) -{ - struct advk_pcie *pcie = d->domain->host_data; - irq_hw_number_t hwirq = irqd_to_hwirq(d); - u32 mask; - - mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); - mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); -} - -static int advk_pcie_irq_map(struct irq_domain *h, - unsigned int virq, irq_hw_number_t hwirq) -{ - struct advk_pcie *pcie = h->host_data; - - advk_pcie_irq_mask(irq_get_irq_data(virq)); - irq_set_status_flags(virq, IRQ_LEVEL); - irq_set_chip_and_handler(virq, &pcie->irq_chip, - handle_level_irq); - irq_set_chip_data(virq, pcie); - - return 0; -} - -static const struct irq_domain_ops advk_pcie_irq_domain_ops = { - .map = advk_pcie_irq_map, - .xlate = irq_domain_xlate_onecell, -}; - -static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; - struct irq_chip *bottom_ic, *msi_ic; - struct msi_domain_info *msi_di; - phys_addr_t msi_msg_phys; - - mutex_init(&pcie->msi_used_lock); - - bottom_ic = &pcie->msi_bottom_irq_chip; - - bottom_ic->name = "MSI"; - bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; - bottom_ic->irq_set_affinity = advk_msi_set_affinity; - - msi_ic = &pcie->msi_irq_chip; - msi_ic->name = "advk-MSI"; - - msi_di = &pcie->msi_domain_info; - msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI; - msi_di->chip = msi_ic; - - msi_msg_phys = virt_to_phys(&pcie->msi_msg); - - advk_writel(pcie, lower_32_bits(msi_msg_phys), - PCIE_MSI_ADDR_LOW_REG); - advk_writel(pcie, upper_32_bits(msi_msg_phys), - PCIE_MSI_ADDR_HIGH_REG); - - pcie->msi_inner_domain = - irq_domain_add_linear(NULL, MSI_IRQ_NUM, - &advk_msi_domain_ops, pcie); - if (!pcie->msi_inner_domain) - return -ENOMEM; - - pcie->msi_domain = - pci_msi_create_irq_domain(of_node_to_fwnode(node), - msi_di, pcie->msi_inner_domain); - if (!pcie->msi_domain) { - irq_domain_remove(pcie->msi_inner_domain); - return -ENOMEM; - } - - return 0; -} - -static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) -{ - irq_domain_remove(pcie->msi_domain); - irq_domain_remove(pcie->msi_inner_domain); -} - -static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; - struct device_node *pcie_intc_node; - struct irq_chip *irq_chip; - - pcie_intc_node = of_get_next_child(node, NULL); - if (!pcie_intc_node) { - dev_err(dev, "No PCIe Intc node found\n"); - return -ENODEV; - } - - irq_chip = &pcie->irq_chip; - - irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", - dev_name(dev)); - if (!irq_chip->name) { - of_node_put(pcie_intc_node); - return -ENOMEM; - } - - irq_chip->irq_mask = advk_pcie_irq_mask; - irq_chip->irq_mask_ack = advk_pcie_irq_mask; - irq_chip->irq_unmask = advk_pcie_irq_unmask; - - pcie->irq_domain = - irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &advk_pcie_irq_domain_ops, pcie); - if (!pcie->irq_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - of_node_put(pcie_intc_node); - return -ENOMEM; - } - - return 0; -} - -static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) -{ - irq_domain_remove(pcie->irq_domain); -} - -static void advk_pcie_handle_msi(struct advk_pcie *pcie) -{ - u32 msi_val, msi_mask, msi_status, msi_idx; - u16 msi_data; - - msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); - msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); - msi_status = msi_val & ~msi_mask; - - for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { - if (!(BIT(msi_idx) & msi_status)) - continue; - - advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; - generic_handle_irq(msi_data); - } - - advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, - PCIE_ISR0_REG); -} - -static void advk_pcie_handle_int(struct advk_pcie *pcie) -{ - u32 isr0_val, isr0_mask, isr0_status; - u32 isr1_val, isr1_mask, isr1_status; - int i, virq; - - isr0_val = advk_readl(pcie, PCIE_ISR0_REG); - isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); - - isr1_val = advk_readl(pcie, PCIE_ISR1_REG); - isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); - isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); - - if (!isr0_status && !isr1_status) { - advk_writel(pcie, isr0_val, PCIE_ISR0_REG); - advk_writel(pcie, isr1_val, PCIE_ISR1_REG); - return; - } - - /* Process MSI interrupts */ - if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) - advk_pcie_handle_msi(pcie); - - /* Process legacy interrupts */ - for (i = 0; i < PCI_NUM_INTX; i++) { - if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) - continue; - - advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), - PCIE_ISR1_REG); - - virq = irq_find_mapping(pcie->irq_domain, i); - generic_handle_irq(virq); - } -} - -static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) -{ - struct advk_pcie *pcie = arg; - u32 status; - - status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); - if (!(status & PCIE_IRQ_CORE_INT)) - return IRQ_NONE; - - advk_pcie_handle_int(pcie); - - /* Clear interrupt */ - advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); - - return IRQ_HANDLED; -} - -static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) -{ - int err, res_valid = 0; - struct device *dev = &pcie->pdev->dev; - struct resource_entry *win, *tmp; - resource_size_t iobase; - - INIT_LIST_HEAD(&pcie->resources); - - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, - &pcie->resources, &iobase); - if (err) - return err; - - err = devm_request_pci_bus_resources(dev, &pcie->resources); - if (err) - goto out_release_res; - - resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { - struct resource *res = win->res; - - switch (resource_type(res)) { - case IORESOURCE_IO: - advk_pcie_set_ob_win(pcie, 1, - upper_32_bits(res->start), - lower_32_bits(res->start), - 0, 0xF8000000, 0, - lower_32_bits(res->start), - OB_PCIE_IO); - err = pci_remap_iospace(res, iobase); - if (err) { - dev_warn(dev, "error %d: failed to map resource %pR\n", - err, res); - resource_list_destroy_entry(win); - } - break; - case IORESOURCE_MEM: - advk_pcie_set_ob_win(pcie, 0, - upper_32_bits(res->start), - lower_32_bits(res->start), - 0x0, 0xF8000000, 0, - lower_32_bits(res->start), - (2 << 20) | OB_PCIE_MEM); - res_valid |= !(res->flags & IORESOURCE_PREFETCH); - break; - case IORESOURCE_BUS: - pcie->root_bus_nr = res->start; - break; - } - } - - if (!res_valid) { - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; - goto out_release_res; - } - - return 0; - -out_release_res: - pci_free_resource_list(&pcie->resources); - return err; -} - -static int advk_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct advk_pcie *pcie; - struct resource *res; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - int ret, irq; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); - if (!bridge) - return -ENOMEM; - - pcie = pci_host_bridge_priv(bridge); - pcie->pdev = pdev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->base)) - return PTR_ERR(pcie->base); - - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, - IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", - pcie); - if (ret) { - dev_err(dev, "Failed to register interrupt\n"); - return ret; - } - - ret = advk_pcie_parse_request_of_pci_ranges(pcie); - if (ret) { - dev_err(dev, "Failed to parse resources\n"); - return ret; - } - - advk_pcie_setup_hw(pcie); - - ret = advk_pcie_init_irq_domain(pcie); - if (ret) { - dev_err(dev, "Failed to initialize irq\n"); - return ret; - } - - ret = advk_pcie_init_msi_irq_domain(pcie); - if (ret) { - dev_err(dev, "Failed to initialize irq\n"); - advk_pcie_remove_irq_domain(pcie); - return ret; - } - - list_splice_init(&pcie->resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = pcie; - bridge->busnr = 0; - bridge->ops = &advk_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) { - advk_pcie_remove_msi_irq_domain(pcie); - advk_pcie_remove_irq_domain(pcie); - return ret; - } - - bus = bridge->bus; - - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return 0; -} - -static const struct of_device_id advk_pcie_of_match_table[] = { - { .compatible = "marvell,armada-3700-pcie", }, - {}, -}; - -static struct platform_driver advk_pcie_driver = { - .driver = { - .name = "advk-pcie", - .of_match_table = advk_pcie_of_match_table, - /* Driver unloading/unbinding currently not supported */ - .suppress_bind_attrs = true, - }, - .probe = advk_pcie_probe, -}; -builtin_platform_driver(advk_pcie_driver); diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c deleted file mode 100644 index a1ebe9ed441f..000000000000 --- a/drivers/pci/host/pci-ftpci100.c +++ /dev/null @@ -1,619 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for Faraday Technology FTPC100 PCI Controller - * - * Copyright (C) 2017 Linus Walleij - * - * Based on the out-of-tree OpenWRT patch for Cortina Gemini: - * Copyright (C) 2009 Janos Laube - * Copyright (C) 2009 Paulius Zaleckas - * Based on SL2312 PCI controller code - * Storlink (C) 2003 - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* - * Special configuration registers directly in the first few words - * in I/O space. - */ -#define PCI_IOSIZE 0x00 -#define PCI_PROT 0x04 /* AHB protection */ -#define PCI_CTRL 0x08 /* PCI control signal */ -#define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */ -#define PCI_CONFIG 0x28 /* PCI configuration command register */ -#define PCI_DATA 0x2C - -#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ -#define FARADAY_PCI_PMC 0x40 /* Power management control */ -#define FARADAY_PCI_PMCSR 0x44 /* Power management status */ -#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ -#define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */ -#define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */ -#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ -#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ - -#define PCI_STATUS_66MHZ_CAPABLE BIT(21) - -/* Bits 31..28 gives INTD..INTA status */ -#define PCI_CTRL2_INTSTS_SHIFT 28 -#define PCI_CTRL2_INTMASK_CMDERR BIT(27) -#define PCI_CTRL2_INTMASK_PARERR BIT(26) -/* Bits 25..22 masks INTD..INTA */ -#define PCI_CTRL2_INTMASK_SHIFT 22 -#define PCI_CTRL2_INTMASK_MABRT_RX BIT(21) -#define PCI_CTRL2_INTMASK_TABRT_RX BIT(20) -#define PCI_CTRL2_INTMASK_TABRT_TX BIT(19) -#define PCI_CTRL2_INTMASK_RETRY4 BIT(18) -#define PCI_CTRL2_INTMASK_SERR_RX BIT(17) -#define PCI_CTRL2_INTMASK_PERR_RX BIT(16) -/* Bit 15 reserved */ -#define PCI_CTRL2_MSTPRI_REQ6 BIT(14) -#define PCI_CTRL2_MSTPRI_REQ5 BIT(13) -#define PCI_CTRL2_MSTPRI_REQ4 BIT(12) -#define PCI_CTRL2_MSTPRI_REQ3 BIT(11) -#define PCI_CTRL2_MSTPRI_REQ2 BIT(10) -#define PCI_CTRL2_MSTPRI_REQ1 BIT(9) -#define PCI_CTRL2_MSTPRI_REQ0 BIT(8) -/* Bits 7..4 reserved */ -/* Bits 3..0 TRDYW */ - -/* - * Memory configs: - * Bit 31..20 defines the PCI side memory base - * Bit 19..16 (4 bits) defines the size per below - */ -#define FARADAY_PCI_MEMBASE_MASK 0xfff00000 -#define FARADAY_PCI_MEMSIZE_1MB 0x0 -#define FARADAY_PCI_MEMSIZE_2MB 0x1 -#define FARADAY_PCI_MEMSIZE_4MB 0x2 -#define FARADAY_PCI_MEMSIZE_8MB 0x3 -#define FARADAY_PCI_MEMSIZE_16MB 0x4 -#define FARADAY_PCI_MEMSIZE_32MB 0x5 -#define FARADAY_PCI_MEMSIZE_64MB 0x6 -#define FARADAY_PCI_MEMSIZE_128MB 0x7 -#define FARADAY_PCI_MEMSIZE_256MB 0x8 -#define FARADAY_PCI_MEMSIZE_512MB 0x9 -#define FARADAY_PCI_MEMSIZE_1GB 0xa -#define FARADAY_PCI_MEMSIZE_2GB 0xb -#define FARADAY_PCI_MEMSIZE_SHIFT 16 - -/* - * The DMA base is set to 0x0 for all memory segments, it reflects the - * fact that the memory of the host system starts at 0x0. - */ -#define FARADAY_PCI_DMA_MEM1_BASE 0x00000000 -#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 -#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 - -/* Defines for PCI configuration command register */ -#define PCI_CONF_ENABLE BIT(31) -#define PCI_CONF_WHERE(r) ((r) & 0xFC) -#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16) -#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11) -#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8) - -/** - * struct faraday_pci_variant - encodes IP block differences - * @cascaded_irq: this host has cascaded IRQs from an interrupt controller - * embedded in the host bridge. - */ -struct faraday_pci_variant { - bool cascaded_irq; -}; - -struct faraday_pci { - struct device *dev; - void __iomem *base; - struct irq_domain *irqdomain; - struct pci_bus *bus; - struct clk *bus_clk; -}; - -static int faraday_res_to_memcfg(resource_size_t mem_base, - resource_size_t mem_size, u32 *val) -{ - u32 outval; - - switch (mem_size) { - case SZ_1M: - outval = FARADAY_PCI_MEMSIZE_1MB; - break; - case SZ_2M: - outval = FARADAY_PCI_MEMSIZE_2MB; - break; - case SZ_4M: - outval = FARADAY_PCI_MEMSIZE_4MB; - break; - case SZ_8M: - outval = FARADAY_PCI_MEMSIZE_8MB; - break; - case SZ_16M: - outval = FARADAY_PCI_MEMSIZE_16MB; - break; - case SZ_32M: - outval = FARADAY_PCI_MEMSIZE_32MB; - break; - case SZ_64M: - outval = FARADAY_PCI_MEMSIZE_64MB; - break; - case SZ_128M: - outval = FARADAY_PCI_MEMSIZE_128MB; - break; - case SZ_256M: - outval = FARADAY_PCI_MEMSIZE_256MB; - break; - case SZ_512M: - outval = FARADAY_PCI_MEMSIZE_512MB; - break; - case SZ_1G: - outval = FARADAY_PCI_MEMSIZE_1GB; - break; - case SZ_2G: - outval = FARADAY_PCI_MEMSIZE_2GB; - break; - default: - return -EINVAL; - } - outval <<= FARADAY_PCI_MEMSIZE_SHIFT; - - /* This is probably not good */ - if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK)) - pr_warn("truncated PCI memory base\n"); - /* Translate to bridge side address space */ - outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK); - pr_debug("Translated pci base @%pap, size %pap to config %08x\n", - &mem_base, &mem_size, outval); - - *val = outval; - return 0; -} - -static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, - unsigned int fn, int config, int size, - u32 *value) -{ - writel(PCI_CONF_BUS(bus_number) | - PCI_CONF_DEVICE(PCI_SLOT(fn)) | - PCI_CONF_FUNCTION(PCI_FUNC(fn)) | - PCI_CONF_WHERE(config) | - PCI_CONF_ENABLE, - p->base + PCI_CONFIG); - - *value = readl(p->base + PCI_DATA); - - if (size == 1) - *value = (*value >> (8 * (config & 3))) & 0xFF; - else if (size == 2) - *value = (*value >> (8 * (config & 3))) & 0xFFFF; - - return PCIBIOS_SUCCESSFUL; -} - -static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, - int config, int size, u32 *value) -{ - struct faraday_pci *p = bus->sysdata; - - dev_dbg(&bus->dev, - "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", - PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); - - return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value); -} - -static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, - unsigned int fn, int config, int size, - u32 value) -{ - int ret = PCIBIOS_SUCCESSFUL; - - writel(PCI_CONF_BUS(bus_number) | - PCI_CONF_DEVICE(PCI_SLOT(fn)) | - PCI_CONF_FUNCTION(PCI_FUNC(fn)) | - PCI_CONF_WHERE(config) | - PCI_CONF_ENABLE, - p->base + PCI_CONFIG); - - switch (size) { - case 4: - writel(value, p->base + PCI_DATA); - break; - case 2: - writew(value, p->base + PCI_DATA + (config & 3)); - break; - case 1: - writeb(value, p->base + PCI_DATA + (config & 3)); - break; - default: - ret = PCIBIOS_BAD_REGISTER_NUMBER; - } - - return ret; -} - -static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, - int config, int size, u32 value) -{ - struct faraday_pci *p = bus->sysdata; - - dev_dbg(&bus->dev, - "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", - PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); - - return faraday_raw_pci_write_config(p, bus->number, fn, config, size, - value); -} - -static struct pci_ops faraday_pci_ops = { - .read = faraday_pci_read_config, - .write = faraday_pci_write_config, -}; - -static void faraday_pci_ack_irq(struct irq_data *d) -{ - struct faraday_pci *p = irq_data_get_irq_chip_data(d); - unsigned int reg; - - faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); - reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); - reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); - faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); -} - -static void faraday_pci_mask_irq(struct irq_data *d) -{ - struct faraday_pci *p = irq_data_get_irq_chip_data(d); - unsigned int reg; - - faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); - reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) - | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); - faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); -} - -static void faraday_pci_unmask_irq(struct irq_data *d) -{ - struct faraday_pci *p = irq_data_get_irq_chip_data(d); - unsigned int reg; - - faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); - reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); - reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); - faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); -} - -static void faraday_pci_irq_handler(struct irq_desc *desc) -{ - struct faraday_pci *p = irq_desc_get_handler_data(desc); - struct irq_chip *irqchip = irq_desc_get_chip(desc); - unsigned int irq_stat, reg, i; - - faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); - irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; - - chained_irq_enter(irqchip, desc); - - for (i = 0; i < 4; i++) { - if ((irq_stat & BIT(i)) == 0) - continue; - generic_handle_irq(irq_find_mapping(p->irqdomain, i)); - } - - chained_irq_exit(irqchip, desc); -} - -static struct irq_chip faraday_pci_irq_chip = { - .name = "PCI", - .irq_ack = faraday_pci_ack_irq, - .irq_mask = faraday_pci_mask_irq, - .irq_unmask = faraday_pci_unmask_irq, -}; - -static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -static const struct irq_domain_ops faraday_pci_irqdomain_ops = { - .map = faraday_pci_irq_map, -}; - -static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) -{ - struct device_node *intc = of_get_next_child(p->dev->of_node, NULL); - int irq; - int i; - - if (!intc) { - dev_err(p->dev, "missing child interrupt-controller node\n"); - return -EINVAL; - } - - /* All PCI IRQs cascade off this one */ - irq = of_irq_get(intc, 0); - if (irq <= 0) { - dev_err(p->dev, "failed to get parent IRQ\n"); - return irq ?: -EINVAL; - } - - p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &faraday_pci_irqdomain_ops, p); - if (!p->irqdomain) { - dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); - return -EINVAL; - } - - irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p); - - for (i = 0; i < 4; i++) - irq_create_mapping(p->irqdomain, i); - - return 0; -} - -static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, - struct device_node *np) -{ - struct of_pci_range range; - struct of_pci_range_parser parser; - struct device *dev = p->dev; - u32 confreg[3] = { - FARADAY_PCI_MEM1_BASE_SIZE, - FARADAY_PCI_MEM2_BASE_SIZE, - FARADAY_PCI_MEM3_BASE_SIZE, - }; - int i = 0; - u32 val; - - if (of_pci_dma_range_parser_init(&parser, np)) { - dev_err(dev, "missing dma-ranges property\n"); - return -EINVAL; - } - - /* - * Get the dma-ranges from the device tree - */ - for_each_of_pci_range(&parser, &range) { - u64 end = range.pci_addr + range.size - 1; - int ret; - - ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val); - if (ret) { - dev_err(dev, - "DMA range %d: illegal MEM resource size\n", i); - return -EINVAL; - } - - dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", - i + 1, range.pci_addr, end, val); - if (i <= 2) { - faraday_raw_pci_write_config(p, 0, 0, confreg[i], - 4, val); - } else { - dev_err(dev, "ignore extraneous dma-range %d\n", i); - break; - } - - i++; - } - - return 0; -} - -static int faraday_pci_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - const struct faraday_pci_variant *variant = - of_device_get_match_data(dev); - struct resource *regs; - resource_size_t io_base; - struct resource_entry *win; - struct faraday_pci *p; - struct resource *mem; - struct resource *io; - struct pci_host_bridge *host; - struct clk *clk; - unsigned char max_bus_speed = PCI_SPEED_33MHz; - unsigned char cur_bus_speed = PCI_SPEED_33MHz; - int ret; - u32 val; - LIST_HEAD(res); - - host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); - if (!host) - return -ENOMEM; - - host->dev.parent = dev; - host->ops = &faraday_pci_ops; - host->busnr = 0; - host->msi = NULL; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; - p = pci_host_bridge_priv(host); - host->sysdata = p; - p->dev = dev; - - /* Retrieve and enable optional clocks */ - clk = devm_clk_get(dev, "PCLK"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "could not prepare PCLK\n"); - return ret; - } - p->bus_clk = devm_clk_get(dev, "PCICLK"); - if (IS_ERR(p->bus_clk)) - return PTR_ERR(p->bus_clk); - ret = clk_prepare_enable(p->bus_clk); - if (ret) { - dev_err(dev, "could not prepare PCICLK\n"); - return ret; - } - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - p->base = devm_ioremap_resource(dev, regs); - if (IS_ERR(p->base)) - return PTR_ERR(p->base); - - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, - &res, &io_base); - if (ret) - return ret; - - ret = devm_request_pci_bus_resources(dev, &res); - if (ret) - return ret; - - /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry(win, &res) { - switch (resource_type(win->res)) { - case IORESOURCE_IO: - io = win->res; - io->name = "Gemini PCI I/O"; - if (!faraday_res_to_memcfg(io->start - win->offset, - resource_size(io), &val)) { - /* setup I/O space size */ - writel(val, p->base + PCI_IOSIZE); - } else { - dev_err(dev, "illegal IO mem size\n"); - return -EINVAL; - } - ret = pci_remap_iospace(io, io_base); - if (ret) { - dev_warn(dev, "error %d: failed to map resource %pR\n", - ret, io); - continue; - } - break; - case IORESOURCE_MEM: - mem = win->res; - mem->name = "Gemini PCI MEM"; - break; - case IORESOURCE_BUS: - break; - default: - break; - } - } - - /* Setup hostbridge */ - val = readl(p->base + PCI_CTRL); - val |= PCI_COMMAND_IO; - val |= PCI_COMMAND_MEMORY; - val |= PCI_COMMAND_MASTER; - writel(val, p->base + PCI_CTRL); - /* Mask and clear all interrupts */ - faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); - if (variant->cascaded_irq) { - ret = faraday_pci_setup_cascaded_irq(p); - if (ret) { - dev_err(dev, "failed to setup cascaded IRQ\n"); - return ret; - } - } - - /* Check bus clock if we can gear up to 66 MHz */ - if (!IS_ERR(p->bus_clk)) { - unsigned long rate; - u32 val; - - faraday_raw_pci_read_config(p, 0, 0, - FARADAY_PCI_STATUS_CMD, 4, &val); - rate = clk_get_rate(p->bus_clk); - - if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { - dev_info(dev, "33MHz bus is 66MHz capable\n"); - max_bus_speed = PCI_SPEED_66MHz; - ret = clk_set_rate(p->bus_clk, 66000000); - if (ret) - dev_err(dev, "failed to set bus clock\n"); - } else { - dev_info(dev, "33MHz only bus\n"); - max_bus_speed = PCI_SPEED_33MHz; - } - - /* Bumping the clock may fail so read back the rate */ - rate = clk_get_rate(p->bus_clk); - if (rate == 33000000) - cur_bus_speed = PCI_SPEED_33MHz; - if (rate == 66000000) - cur_bus_speed = PCI_SPEED_66MHz; - } - - ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); - if (ret) - return ret; - - list_splice_init(&res, &host->windows); - ret = pci_scan_root_bus_bridge(host); - if (ret) { - dev_err(dev, "failed to scan host: %d\n", ret); - return ret; - } - p->bus = host->bus; - p->bus->max_bus_speed = max_bus_speed; - p->bus->cur_bus_speed = cur_bus_speed; - - pci_bus_assign_resources(p->bus); - pci_bus_add_devices(p->bus); - pci_free_resource_list(&res); - - return 0; -} - -/* - * We encode bridge variants here, we have at least two so it doesn't - * hurt to have infrastructure to encompass future variants as well. - */ -static const struct faraday_pci_variant faraday_regular = { - .cascaded_irq = true, -}; - -static const struct faraday_pci_variant faraday_dual = { - .cascaded_irq = false, -}; - -static const struct of_device_id faraday_pci_of_match[] = { - { - .compatible = "faraday,ftpci100", - .data = &faraday_regular, - }, - { - .compatible = "faraday,ftpci100-dual", - .data = &faraday_dual, - }, - {}, -}; - -static struct platform_driver faraday_pci_driver = { - .driver = { - .name = "ftpci100", - .of_match_table = of_match_ptr(faraday_pci_of_match), - .suppress_bind_attrs = true, - }, - .probe = faraday_pci_probe, -}; -builtin_platform_driver(faraday_pci_driver); diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c deleted file mode 100644 index d8f10451f273..000000000000 --- a/drivers/pci/host/pci-host-common.c +++ /dev/null @@ -1,118 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Generic PCI host driver common code - * - * Copyright (C) 2014 ARM Limited - * - * Author: Will Deacon - */ - -#include -#include -#include -#include -#include - -static void gen_pci_unmap_cfg(void *ptr) -{ - pci_ecam_free((struct pci_config_window *)ptr); -} - -static struct pci_config_window *gen_pci_init(struct device *dev, - struct list_head *resources, struct pci_ecam_ops *ops) -{ - int err; - struct resource cfgres; - struct resource *bus_range = NULL; - struct pci_config_window *cfg; - - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); - if (err) - return ERR_PTR(err); - - err = of_address_to_resource(dev->of_node, 0, &cfgres); - if (err) { - dev_err(dev, "missing \"reg\" property\n"); - goto err_out; - } - - cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); - if (IS_ERR(cfg)) { - err = PTR_ERR(cfg); - goto err_out; - } - - err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); - if (err) { - gen_pci_unmap_cfg(cfg); - goto err_out; - } - return cfg; - -err_out: - pci_free_resource_list(resources); - return ERR_PTR(err); -} - -int pci_host_common_probe(struct platform_device *pdev, - struct pci_ecam_ops *ops) -{ - const char *type; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct pci_host_bridge *bridge; - struct pci_config_window *cfg; - struct list_head resources; - int ret; - - bridge = devm_pci_alloc_host_bridge(dev, 0); - if (!bridge) - return -ENOMEM; - - type = of_get_property(np, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - - of_pci_check_probe_only(); - - /* Parse and map our Configuration Space windows */ - cfg = gen_pci_init(dev, &resources, ops); - if (IS_ERR(cfg)) - return PTR_ERR(cfg); - - /* Do not reassign resources if probe only */ - if (!pci_has_flag(PCI_PROBE_ONLY)) - pci_add_flags(PCI_REASSIGN_ALL_BUS); - - list_splice_init(&resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = cfg; - bridge->busnr = cfg->busr.start; - bridge->ops = &ops->pci_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_host_probe(bridge); - if (ret < 0) { - pci_free_resource_list(&resources); - return ret; - } - - platform_set_drvdata(pdev, bridge->bus); - return 0; -} - -int pci_host_common_remove(struct platform_device *pdev) -{ - struct pci_bus *bus = platform_get_drvdata(pdev); - - pci_lock_rescan_remove(); - pci_stop_root_bus(bus); - pci_remove_root_bus(bus); - pci_unlock_rescan_remove(); - - return 0; -} diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c deleted file mode 100644 index dea3ec7592a2..000000000000 --- a/drivers/pci/host/pci-host-generic.c +++ /dev/null @@ -1,100 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Simple, generic PCI host controller driver targetting firmware-initialised - * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). - * - * Copyright (C) 2014 ARM Limited - * - * Author: Will Deacon - */ - -#include -#include -#include -#include -#include -#include - -static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { - .bus_shift = 16, - .pci_ops = { - .map_bus = pci_ecam_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, - } -}; - -static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn) -{ - struct pci_config_window *cfg = bus->sysdata; - - /* - * The Synopsys DesignWare PCIe controller in ECAM mode will not filter - * type 0 config TLPs sent to devices 1 and up on its downstream port, - * resulting in devices appearing multiple times on bus 0 unless we - * filter out those accesses here. - */ - if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0) - return false; - - return true; -} - -static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) -{ - if (!pci_dw_valid_device(bus, devfn)) - return NULL; - - return pci_ecam_map_bus(bus, devfn, where); -} - -static struct pci_ecam_ops pci_dw_ecam_bus_ops = { - .bus_shift = 20, - .pci_ops = { - .map_bus = pci_dw_ecam_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, - } -}; - -static const struct of_device_id gen_pci_of_match[] = { - { .compatible = "pci-host-cam-generic", - .data = &gen_pci_cfg_cam_bus_ops }, - - { .compatible = "pci-host-ecam-generic", - .data = &pci_generic_ecam_ops }, - - { .compatible = "marvell,armada8k-pcie-ecam", - .data = &pci_dw_ecam_bus_ops }, - - { .compatible = "socionext,synquacer-pcie-ecam", - .data = &pci_dw_ecam_bus_ops }, - - { .compatible = "snps,dw-pcie-ecam", - .data = &pci_dw_ecam_bus_ops }, - - { }, -}; - -static int gen_pci_probe(struct platform_device *pdev) -{ - const struct of_device_id *of_id; - struct pci_ecam_ops *ops; - - of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node); - ops = (struct pci_ecam_ops *)of_id->data; - - return pci_host_common_probe(pdev, ops); -} - -static struct platform_driver gen_pci_driver = { - .driver = { - .name = "pci-host-generic", - .of_match_table = gen_pci_of_match, - .suppress_bind_attrs = true, - }, - .probe = gen_pci_probe, - .remove = pci_host_common_remove, -}; -builtin_platform_driver(gen_pci_driver); diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c deleted file mode 100644 index 6cc5036ac83c..000000000000 --- a/drivers/pci/host/pci-hyperv.c +++ /dev/null @@ -1,2694 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) Microsoft Corporation. - * - * Author: - * Jake Oshins - * - * This driver acts as a paravirtual front-end for PCI Express root buses. - * When a PCI Express function (either an entire device or an SR-IOV - * Virtual Function) is being passed through to the VM, this driver exposes - * a new bus to the guest VM. This is modeled as a root PCI bus because - * no bridges are being exposed to the VM. In fact, with a "Generation 2" - * VM within Hyper-V, there may seem to be no PCI bus at all in the VM - * until a device as been exposed using this driver. - * - * Each root PCI bus has its own PCI domain, which is called "Segment" in - * the PCI Firmware Specifications. Thus while each device passed through - * to the VM using this front-end will appear at "device 0", the domain will - * be unique. Typically, each bus will have one PCI function on it, though - * this driver does support more than one. - * - * In order to map the interrupts from the device through to the guest VM, - * this driver also implements an IRQ Domain, which handles interrupts (either - * MSI or MSI-X) associated with the functions on the bus. As interrupts are - * set up, torn down, or reaffined, this driver communicates with the - * underlying hypervisor to adjust the mappings in the I/O MMU so that each - * interrupt will be delivered to the correct virtual processor at the right - * vector. This driver does not support level-triggered (line-based) - * interrupts, and will report that the Interrupt Line register in the - * function's configuration space is zero. - * - * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V - * facilities. For instance, the configuration space of a function exposed - * by Hyper-V is mapped into a single page of memory space, and the - * read and write handlers for config space must be aware of this mechanism. - * Similarly, device setup and teardown involves messages sent to and from - * the PCI back-end driver in Hyper-V. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Protocol versions. The low word is the minor version, the high word the - * major version. - */ - -#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) -#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) -#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) - -enum pci_protocol_version_t { - PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ - PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ -}; - -#define CPU_AFFINITY_ALL -1ULL - -/* - * Supported protocol versions in the order of probing - highest go - * first. - */ -static enum pci_protocol_version_t pci_protocol_versions[] = { - PCI_PROTOCOL_VERSION_1_2, - PCI_PROTOCOL_VERSION_1_1, -}; - -/* - * Protocol version negotiated by hv_pci_protocol_negotiation(). - */ -static enum pci_protocol_version_t pci_protocol_version; - -#define PCI_CONFIG_MMIO_LENGTH 0x2000 -#define CFG_PAGE_OFFSET 0x1000 -#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) - -#define MAX_SUPPORTED_MSI_MESSAGES 0x400 - -#define STATUS_REVISION_MISMATCH 0xC0000059 - -/* - * Message Types - */ - -enum pci_message_type { - /* - * Version 1.1 - */ - PCI_MESSAGE_BASE = 0x42490000, - PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, - PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, - PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, - PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, - PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, - PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, - PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, - PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, - PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, - PCI_EJECT = PCI_MESSAGE_BASE + 0xB, - PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, - PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, - PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, - PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, - PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, - PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, - PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, - PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, - PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, - PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, - PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, - PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, - PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ - PCI_MESSAGE_MAXIMUM -}; - -/* - * Structures defining the virtual PCI Express protocol. - */ - -union pci_version { - struct { - u16 minor_version; - u16 major_version; - } parts; - u32 version; -} __packed; - -/* - * Function numbers are 8-bits wide on Express, as interpreted through ARI, - * which is all this driver does. This representation is the one used in - * Windows, which is what is expected when sending this back and forth with - * the Hyper-V parent partition. - */ -union win_slot_encoding { - struct { - u32 dev:5; - u32 func:3; - u32 reserved:24; - } bits; - u32 slot; -} __packed; - -/* - * Pretty much as defined in the PCI Specifications. - */ -struct pci_function_description { - u16 v_id; /* vendor ID */ - u16 d_id; /* device ID */ - u8 rev; - u8 prog_intf; - u8 subclass; - u8 base_class; - u32 subsystem_id; - union win_slot_encoding win_slot; - u32 ser; /* serial number */ -} __packed; - -/** - * struct hv_msi_desc - * @vector: IDT entry - * @delivery_mode: As defined in Intel's Programmer's - * Reference Manual, Volume 3, Chapter 8. - * @vector_count: Number of contiguous entries in the - * Interrupt Descriptor Table that are - * occupied by this Message-Signaled - * Interrupt. For "MSI", as first defined - * in PCI 2.2, this can be between 1 and - * 32. For "MSI-X," as first defined in PCI - * 3.0, this must be 1, as each MSI-X table - * entry would have its own descriptor. - * @reserved: Empty space - * @cpu_mask: All the target virtual processors. - */ -struct hv_msi_desc { - u8 vector; - u8 delivery_mode; - u16 vector_count; - u32 reserved; - u64 cpu_mask; -} __packed; - -/** - * struct hv_msi_desc2 - 1.2 version of hv_msi_desc - * @vector: IDT entry - * @delivery_mode: As defined in Intel's Programmer's - * Reference Manual, Volume 3, Chapter 8. - * @vector_count: Number of contiguous entries in the - * Interrupt Descriptor Table that are - * occupied by this Message-Signaled - * Interrupt. For "MSI", as first defined - * in PCI 2.2, this can be between 1 and - * 32. For "MSI-X," as first defined in PCI - * 3.0, this must be 1, as each MSI-X table - * entry would have its own descriptor. - * @processor_count: number of bits enabled in array. - * @processor_array: All the target virtual processors. - */ -struct hv_msi_desc2 { - u8 vector; - u8 delivery_mode; - u16 vector_count; - u16 processor_count; - u16 processor_array[32]; -} __packed; - -/** - * struct tran_int_desc - * @reserved: unused, padding - * @vector_count: same as in hv_msi_desc - * @data: This is the "data payload" value that is - * written by the device when it generates - * a message-signaled interrupt, either MSI - * or MSI-X. - * @address: This is the address to which the data - * payload is written on interrupt - * generation. - */ -struct tran_int_desc { - u16 reserved; - u16 vector_count; - u32 data; - u64 address; -} __packed; - -/* - * A generic message format for virtual PCI. - * Specific message formats are defined later in the file. - */ - -struct pci_message { - u32 type; -} __packed; - -struct pci_child_message { - struct pci_message message_type; - union win_slot_encoding wslot; -} __packed; - -struct pci_incoming_message { - struct vmpacket_descriptor hdr; - struct pci_message message_type; -} __packed; - -struct pci_response { - struct vmpacket_descriptor hdr; - s32 status; /* negative values are failures */ -} __packed; - -struct pci_packet { - void (*completion_func)(void *context, struct pci_response *resp, - int resp_packet_size); - void *compl_ctxt; - - struct pci_message message[0]; -}; - -/* - * Specific message types supporting the PCI protocol. - */ - -/* - * Version negotiation message. Sent from the guest to the host. - * The guest is free to try different versions until the host - * accepts the version. - * - * pci_version: The protocol version requested. - * is_last_attempt: If TRUE, this is the last version guest will request. - * reservedz: Reserved field, set to zero. - */ - -struct pci_version_request { - struct pci_message message_type; - u32 protocol_version; -} __packed; - -/* - * Bus D0 Entry. This is sent from the guest to the host when the virtual - * bus (PCI Express port) is ready for action. - */ - -struct pci_bus_d0_entry { - struct pci_message message_type; - u32 reserved; - u64 mmio_base; -} __packed; - -struct pci_bus_relations { - struct pci_incoming_message incoming; - u32 device_count; - struct pci_function_description func[0]; -} __packed; - -struct pci_q_res_req_response { - struct vmpacket_descriptor hdr; - s32 status; /* negative values are failures */ - u32 probed_bar[6]; -} __packed; - -struct pci_set_power { - struct pci_message message_type; - union win_slot_encoding wslot; - u32 power_state; /* In Windows terms */ - u32 reserved; -} __packed; - -struct pci_set_power_response { - struct vmpacket_descriptor hdr; - s32 status; /* negative values are failures */ - union win_slot_encoding wslot; - u32 resultant_state; /* In Windows terms */ - u32 reserved; -} __packed; - -struct pci_resources_assigned { - struct pci_message message_type; - union win_slot_encoding wslot; - u8 memory_range[0x14][6]; /* not used here */ - u32 msi_descriptors; - u32 reserved[4]; -} __packed; - -struct pci_resources_assigned2 { - struct pci_message message_type; - union win_slot_encoding wslot; - u8 memory_range[0x14][6]; /* not used here */ - u32 msi_descriptor_count; - u8 reserved[70]; -} __packed; - -struct pci_create_interrupt { - struct pci_message message_type; - union win_slot_encoding wslot; - struct hv_msi_desc int_desc; -} __packed; - -struct pci_create_int_response { - struct pci_response response; - u32 reserved; - struct tran_int_desc int_desc; -} __packed; - -struct pci_create_interrupt2 { - struct pci_message message_type; - union win_slot_encoding wslot; - struct hv_msi_desc2 int_desc; -} __packed; - -struct pci_delete_interrupt { - struct pci_message message_type; - union win_slot_encoding wslot; - struct tran_int_desc int_desc; -} __packed; - -struct pci_dev_incoming { - struct pci_incoming_message incoming; - union win_slot_encoding wslot; -} __packed; - -struct pci_eject_response { - struct pci_message message_type; - union win_slot_encoding wslot; - u32 status; -} __packed; - -static int pci_ring_size = (4 * PAGE_SIZE); - -/* - * Definitions or interrupt steering hypercall. - */ -#define HV_PARTITION_ID_SELF ((u64)-1) -#define HVCALL_RETARGET_INTERRUPT 0x7e - -struct hv_interrupt_entry { - u32 source; /* 1 for MSI(-X) */ - u32 reserved1; - u32 address; - u32 data; -}; - -#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ - -struct hv_vp_set { - u64 format; /* 0 (HvGenericSetSparse4k) */ - u64 valid_banks; - u64 masks[HV_VP_SET_BANK_COUNT_MAX]; -}; - -/* - * flags for hv_device_interrupt_target.flags - */ -#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 -#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 - -struct hv_device_interrupt_target { - u32 vector; - u32 flags; - union { - u64 vp_mask; - struct hv_vp_set vp_set; - }; -}; - -struct retarget_msi_interrupt { - u64 partition_id; /* use "self" */ - u64 device_id; - struct hv_interrupt_entry int_entry; - u64 reserved2; - struct hv_device_interrupt_target int_target; -} __packed; - -/* - * Driver specific state. - */ - -enum hv_pcibus_state { - hv_pcibus_init = 0, - hv_pcibus_probed, - hv_pcibus_installed, - hv_pcibus_removed, - hv_pcibus_maximum -}; - -struct hv_pcibus_device { - struct pci_sysdata sysdata; - enum hv_pcibus_state state; - refcount_t remove_lock; - struct hv_device *hdev; - resource_size_t low_mmio_space; - resource_size_t high_mmio_space; - struct resource *mem_config; - struct resource *low_mmio_res; - struct resource *high_mmio_res; - struct completion *survey_event; - struct completion remove_event; - struct pci_bus *pci_bus; - spinlock_t config_lock; /* Avoid two threads writing index page */ - spinlock_t device_list_lock; /* Protect lists below */ - void __iomem *cfg_addr; - - struct list_head resources_for_children; - - struct list_head children; - struct list_head dr_list; - - struct msi_domain_info msi_info; - struct msi_controller msi_chip; - struct irq_domain *irq_domain; - - /* hypercall arg, must not cross page boundary */ - struct retarget_msi_interrupt retarget_msi_interrupt_params; - - spinlock_t retarget_msi_interrupt_lock; - - struct workqueue_struct *wq; -}; - -/* - * Tracks "Device Relations" messages from the host, which must be both - * processed in order and deferred so that they don't run in the context - * of the incoming packet callback. - */ -struct hv_dr_work { - struct work_struct wrk; - struct hv_pcibus_device *bus; -}; - -struct hv_dr_state { - struct list_head list_entry; - u32 device_count; - struct pci_function_description func[0]; -}; - -enum hv_pcichild_state { - hv_pcichild_init = 0, - hv_pcichild_requirements, - hv_pcichild_resourced, - hv_pcichild_ejecting, - hv_pcichild_maximum -}; - -struct hv_pci_dev { - /* List protected by pci_rescan_remove_lock */ - struct list_head list_entry; - refcount_t refs; - enum hv_pcichild_state state; - struct pci_function_description desc; - bool reported_missing; - struct hv_pcibus_device *hbus; - struct work_struct wrk; - - /* - * What would be observed if one wrote 0xFFFFFFFF to a BAR and then - * read it back, for each of the BAR offsets within config space. - */ - u32 probed_bar[6]; -}; - -struct hv_pci_compl { - struct completion host_event; - s32 completion_status; -}; - -static void hv_pci_onchannelcallback(void *context); - -/** - * hv_pci_generic_compl() - Invoked for a completion packet - * @context: Set up by the sender of the packet. - * @resp: The response packet - * @resp_packet_size: Size in bytes of the packet - * - * This function is used to trigger an event and report status - * for any message for which the completion packet contains a - * status and nothing else. - */ -static void hv_pci_generic_compl(void *context, struct pci_response *resp, - int resp_packet_size) -{ - struct hv_pci_compl *comp_pkt = context; - - if (resp_packet_size >= offsetofend(struct pci_response, status)) - comp_pkt->completion_status = resp->status; - else - comp_pkt->completion_status = -1; - - complete(&comp_pkt->host_event); -} - -static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, - u32 wslot); - -static void get_pcichild(struct hv_pci_dev *hpdev) -{ - refcount_inc(&hpdev->refs); -} - -static void put_pcichild(struct hv_pci_dev *hpdev) -{ - if (refcount_dec_and_test(&hpdev->refs)) - kfree(hpdev); -} - -static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); -static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); - -/* - * There is no good way to get notified from vmbus_onoffer_rescind(), - * so let's use polling here, since this is not a hot path. - */ -static int wait_for_response(struct hv_device *hdev, - struct completion *comp) -{ - while (true) { - if (hdev->channel->rescind) { - dev_warn_once(&hdev->device, "The device is gone.\n"); - return -ENODEV; - } - - if (wait_for_completion_timeout(comp, HZ / 10)) - break; - } - - return 0; -} - -/** - * devfn_to_wslot() - Convert from Linux PCI slot to Windows - * @devfn: The Linux representation of PCI slot - * - * Windows uses a slightly different representation of PCI slot. - * - * Return: The Windows representation - */ -static u32 devfn_to_wslot(int devfn) -{ - union win_slot_encoding wslot; - - wslot.slot = 0; - wslot.bits.dev = PCI_SLOT(devfn); - wslot.bits.func = PCI_FUNC(devfn); - - return wslot.slot; -} - -/** - * wslot_to_devfn() - Convert from Windows PCI slot to Linux - * @wslot: The Windows representation of PCI slot - * - * Windows uses a slightly different representation of PCI slot. - * - * Return: The Linux representation - */ -static int wslot_to_devfn(u32 wslot) -{ - union win_slot_encoding slot_no; - - slot_no.slot = wslot; - return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func); -} - -/* - * PCI Configuration Space for these root PCI buses is implemented as a pair - * of pages in memory-mapped I/O space. Writing to the first page chooses - * the PCI function being written or read. Once the first page has been - * written to, the following page maps in the entire configuration space of - * the function. - */ - -/** - * _hv_pcifront_read_config() - Internal PCI config read - * @hpdev: The PCI driver's representation of the device - * @where: Offset within config space - * @size: Size of the transfer - * @val: Pointer to the buffer receiving the data - */ -static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, - int size, u32 *val) -{ - unsigned long flags; - void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where; - - /* - * If the attempt is to read the IDs or the ROM BAR, simulate that. - */ - if (where + size <= PCI_COMMAND) { - memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size); - } else if (where >= PCI_CLASS_REVISION && where + size <= - PCI_CACHE_LINE_SIZE) { - memcpy(val, ((u8 *)&hpdev->desc.rev) + where - - PCI_CLASS_REVISION, size); - } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <= - PCI_ROM_ADDRESS) { - memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where - - PCI_SUBSYSTEM_VENDOR_ID, size); - } else if (where >= PCI_ROM_ADDRESS && where + size <= - PCI_CAPABILITY_LIST) { - /* ROM BARs are unimplemented */ - *val = 0; - } else if (where >= PCI_INTERRUPT_LINE && where + size <= - PCI_INTERRUPT_PIN) { - /* - * Interrupt Line and Interrupt PIN are hard-wired to zero - * because this front-end only supports message-signaled - * interrupts. - */ - *val = 0; - } else if (where + size <= CFG_PAGE_SIZE) { - spin_lock_irqsave(&hpdev->hbus->config_lock, flags); - /* Choose the function to be read. (See comment above) */ - writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); - /* Make sure the function was chosen before we start reading. */ - mb(); - /* Read from that function's config space. */ - switch (size) { - case 1: - *val = readb(addr); - break; - case 2: - *val = readw(addr); - break; - default: - *val = readl(addr); - break; - } - /* - * Make sure the read was done before we release the spinlock - * allowing consecutive reads/writes. - */ - mb(); - spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); - } else { - dev_err(&hpdev->hbus->hdev->device, - "Attempt to read beyond a function's config space.\n"); - } -} - -static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) -{ - u16 ret; - unsigned long flags; - void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + - PCI_VENDOR_ID; - - spin_lock_irqsave(&hpdev->hbus->config_lock, flags); - - /* Choose the function to be read. (See comment above) */ - writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); - /* Make sure the function was chosen before we start reading. */ - mb(); - /* Read from that function's config space. */ - ret = readw(addr); - /* - * mb() is not required here, because the spin_unlock_irqrestore() - * is a barrier. - */ - - spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); - - return ret; -} - -/** - * _hv_pcifront_write_config() - Internal PCI config write - * @hpdev: The PCI driver's representation of the device - * @where: Offset within config space - * @size: Size of the transfer - * @val: The data being transferred - */ -static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, - int size, u32 val) -{ - unsigned long flags; - void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where; - - if (where >= PCI_SUBSYSTEM_VENDOR_ID && - where + size <= PCI_CAPABILITY_LIST) { - /* SSIDs and ROM BARs are read-only */ - } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) { - spin_lock_irqsave(&hpdev->hbus->config_lock, flags); - /* Choose the function to be written. (See comment above) */ - writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); - /* Make sure the function was chosen before we start writing. */ - wmb(); - /* Write to that function's config space. */ - switch (size) { - case 1: - writeb(val, addr); - break; - case 2: - writew(val, addr); - break; - default: - writel(val, addr); - break; - } - /* - * Make sure the write was done before we release the spinlock - * allowing consecutive reads/writes. - */ - mb(); - spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); - } else { - dev_err(&hpdev->hbus->hdev->device, - "Attempt to write beyond a function's config space.\n"); - } -} - -/** - * hv_pcifront_read_config() - Read configuration space - * @bus: PCI Bus structure - * @devfn: Device/function - * @where: Offset from base - * @size: Byte/word/dword - * @val: Value to be read - * - * Return: PCIBIOS_SUCCESSFUL on success - * PCIBIOS_DEVICE_NOT_FOUND on failure - */ -static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct hv_pcibus_device *hbus = - container_of(bus->sysdata, struct hv_pcibus_device, sysdata); - struct hv_pci_dev *hpdev; - - hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); - if (!hpdev) - return PCIBIOS_DEVICE_NOT_FOUND; - - _hv_pcifront_read_config(hpdev, where, size, val); - - put_pcichild(hpdev); - return PCIBIOS_SUCCESSFUL; -} - -/** - * hv_pcifront_write_config() - Write configuration space - * @bus: PCI Bus structure - * @devfn: Device/function - * @where: Offset from base - * @size: Byte/word/dword - * @val: Value to be written to device - * - * Return: PCIBIOS_SUCCESSFUL on success - * PCIBIOS_DEVICE_NOT_FOUND on failure - */ -static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - struct hv_pcibus_device *hbus = - container_of(bus->sysdata, struct hv_pcibus_device, sysdata); - struct hv_pci_dev *hpdev; - - hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); - if (!hpdev) - return PCIBIOS_DEVICE_NOT_FOUND; - - _hv_pcifront_write_config(hpdev, where, size, val); - - put_pcichild(hpdev); - return PCIBIOS_SUCCESSFUL; -} - -/* PCIe operations */ -static struct pci_ops hv_pcifront_ops = { - .read = hv_pcifront_read_config, - .write = hv_pcifront_write_config, -}; - -/* Interrupt management hooks */ -static void hv_int_desc_free(struct hv_pci_dev *hpdev, - struct tran_int_desc *int_desc) -{ - struct pci_delete_interrupt *int_pkt; - struct { - struct pci_packet pkt; - u8 buffer[sizeof(struct pci_delete_interrupt)]; - } ctxt; - - memset(&ctxt, 0, sizeof(ctxt)); - int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; - int_pkt->message_type.type = - PCI_DELETE_INTERRUPT_MESSAGE; - int_pkt->wslot.slot = hpdev->desc.win_slot.slot; - int_pkt->int_desc = *int_desc; - vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), - (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); - kfree(int_desc); -} - -/** - * hv_msi_free() - Free the MSI. - * @domain: The interrupt domain pointer - * @info: Extra MSI-related context - * @irq: Identifies the IRQ. - * - * The Hyper-V parent partition and hypervisor are tracking the - * messages that are in use, keeping the interrupt redirection - * table up to date. This callback sends a message that frees - * the IRT entry and related tracking nonsense. - */ -static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, - unsigned int irq) -{ - struct hv_pcibus_device *hbus; - struct hv_pci_dev *hpdev; - struct pci_dev *pdev; - struct tran_int_desc *int_desc; - struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq); - struct msi_desc *msi = irq_data_get_msi_desc(irq_data); - - pdev = msi_desc_to_pci_dev(msi); - hbus = info->data; - int_desc = irq_data_get_irq_chip_data(irq_data); - if (!int_desc) - return; - - irq_data->chip_data = NULL; - hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); - if (!hpdev) { - kfree(int_desc); - return; - } - - hv_int_desc_free(hpdev, int_desc); - put_pcichild(hpdev); -} - -static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest, - bool force) -{ - struct irq_data *parent = data->parent_data; - - return parent->chip->irq_set_affinity(parent, dest, force); -} - -static void hv_irq_mask(struct irq_data *data) -{ - pci_msi_mask_irq(data); -} - -/** - * hv_irq_unmask() - "Unmask" the IRQ by setting its current - * affinity. - * @data: Describes the IRQ - * - * Build new a destination for the MSI and make a hypercall to - * update the Interrupt Redirection Table. "Device Logical ID" - * is built out of this PCI bus's instance GUID and the function - * number of the device. - */ -static void hv_irq_unmask(struct irq_data *data) -{ - struct msi_desc *msi_desc = irq_data_get_msi_desc(data); - struct irq_cfg *cfg = irqd_cfg(data); - struct retarget_msi_interrupt *params; - struct hv_pcibus_device *hbus; - struct cpumask *dest; - struct pci_bus *pbus; - struct pci_dev *pdev; - unsigned long flags; - u32 var_size = 0; - int cpu_vmbus; - int cpu; - u64 res; - - dest = irq_data_get_effective_affinity_mask(data); - pdev = msi_desc_to_pci_dev(msi_desc); - pbus = pdev->bus; - hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); - - spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); - - params = &hbus->retarget_msi_interrupt_params; - memset(params, 0, sizeof(*params)); - params->partition_id = HV_PARTITION_ID_SELF; - params->int_entry.source = 1; /* MSI(-X) */ - params->int_entry.address = msi_desc->msg.address_lo; - params->int_entry.data = msi_desc->msg.data; - params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | - (hbus->hdev->dev_instance.b[4] << 16) | - (hbus->hdev->dev_instance.b[7] << 8) | - (hbus->hdev->dev_instance.b[6] & 0xf8) | - PCI_FUNC(pdev->devfn); - params->int_target.vector = cfg->vector; - - /* - * Honoring apic->irq_delivery_mode set to dest_Fixed by - * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a - * spurious interrupt storm. Not doing so does not seem to have a - * negative effect (yet?). - */ - - if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { - /* - * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the - * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides - * with >64 VP support. - * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED - * is not sufficient for this hypercall. - */ - params->int_target.flags |= - HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; - params->int_target.vp_set.valid_banks = - (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; - - /* - * var-sized hypercall, var-size starts after vp_mask (thus - * vp_set.format does not count, but vp_set.valid_banks does). - */ - var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; - - for_each_cpu_and(cpu, dest, cpu_online_mask) { - cpu_vmbus = hv_cpu_number_to_vp_number(cpu); - - if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { - dev_err(&hbus->hdev->device, - "too high CPU %d", cpu_vmbus); - res = 1; - goto exit_unlock; - } - - params->int_target.vp_set.masks[cpu_vmbus / 64] |= - (1ULL << (cpu_vmbus & 63)); - } - } else { - for_each_cpu_and(cpu, dest, cpu_online_mask) { - params->int_target.vp_mask |= - (1ULL << hv_cpu_number_to_vp_number(cpu)); - } - } - - res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), - params, NULL); - -exit_unlock: - spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); - - if (res) { - dev_err(&hbus->hdev->device, - "%s() failed: %#llx", __func__, res); - return; - } - - pci_msi_unmask_irq(data); -} - -struct compose_comp_ctxt { - struct hv_pci_compl comp_pkt; - struct tran_int_desc int_desc; -}; - -static void hv_pci_compose_compl(void *context, struct pci_response *resp, - int resp_packet_size) -{ - struct compose_comp_ctxt *comp_pkt = context; - struct pci_create_int_response *int_resp = - (struct pci_create_int_response *)resp; - - comp_pkt->comp_pkt.completion_status = resp->status; - comp_pkt->int_desc = int_resp->int_desc; - complete(&comp_pkt->comp_pkt.host_event); -} - -static u32 hv_compose_msi_req_v1( - struct pci_create_interrupt *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) -{ - int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; - int_pkt->wslot.slot = slot; - int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = dest_Fixed; - - /* - * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in - * hv_irq_unmask(). - */ - int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; - - return sizeof(*int_pkt); -} - -static u32 hv_compose_msi_req_v2( - struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) -{ - int cpu; - - int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; - int_pkt->wslot.slot = slot; - int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = dest_Fixed; - - /* - * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten - * by subsequent retarget in hv_irq_unmask(). - */ - cpu = cpumask_first_and(affinity, cpu_online_mask); - int_pkt->int_desc.processor_array[0] = - hv_cpu_number_to_vp_number(cpu); - int_pkt->int_desc.processor_count = 1; - - return sizeof(*int_pkt); -} - -/** - * hv_compose_msi_msg() - Supplies a valid MSI address/data - * @data: Everything about this MSI - * @msg: Buffer that is filled in by this function - * - * This function unpacks the IRQ looking for target CPU set, IDT - * vector and mode and sends a message to the parent partition - * asking for a mapping for that tuple in this partition. The - * response supplies a data value and address to which that data - * should be written to trigger that interrupt. - */ -static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct irq_cfg *cfg = irqd_cfg(data); - struct hv_pcibus_device *hbus; - struct hv_pci_dev *hpdev; - struct pci_bus *pbus; - struct pci_dev *pdev; - struct cpumask *dest; - struct compose_comp_ctxt comp; - struct tran_int_desc *int_desc; - struct { - struct pci_packet pci_pkt; - union { - struct pci_create_interrupt v1; - struct pci_create_interrupt2 v2; - } int_pkts; - } __packed ctxt; - - u32 size; - int ret; - - pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); - dest = irq_data_get_effective_affinity_mask(data); - pbus = pdev->bus; - hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); - hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); - if (!hpdev) - goto return_null_message; - - /* Free any previous message that might have already been composed. */ - if (data->chip_data) { - int_desc = data->chip_data; - data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); - } - - int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); - if (!int_desc) - goto drop_reference; - - memset(&ctxt, 0, sizeof(ctxt)); - init_completion(&comp.comp_pkt.host_event); - ctxt.pci_pkt.completion_func = hv_pci_compose_compl; - ctxt.pci_pkt.compl_ctxt = ∁ - - switch (pci_protocol_version) { - case PCI_PROTOCOL_VERSION_1_1: - size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, - dest, - hpdev->desc.win_slot.slot, - cfg->vector); - break; - - case PCI_PROTOCOL_VERSION_1_2: - size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, - dest, - hpdev->desc.win_slot.slot, - cfg->vector); - break; - - default: - /* As we only negotiate protocol versions known to this driver, - * this path should never hit. However, this is it not a hot - * path so we print a message to aid future updates. - */ - dev_err(&hbus->hdev->device, - "Unexpected vPCI protocol, update driver."); - goto free_int_desc; - } - - ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, - size, (unsigned long)&ctxt.pci_pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) { - dev_err(&hbus->hdev->device, - "Sending request for interrupt failed: 0x%x", - comp.comp_pkt.completion_status); - goto free_int_desc; - } - - /* - * Since this function is called with IRQ locks held, can't - * do normal wait for completion; instead poll. - */ - while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { - /* 0xFFFF means an invalid PCI VENDOR ID. */ - if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { - dev_err_once(&hbus->hdev->device, - "the device has gone\n"); - goto free_int_desc; - } - - /* - * When the higher level interrupt code calls us with - * interrupt disabled, we must poll the channel by calling - * the channel callback directly when channel->target_cpu is - * the current CPU. When the higher level interrupt code - * calls us with interrupt enabled, let's add the - * local_bh_disable()/enable() to avoid race. - */ - local_bh_disable(); - - if (hbus->hdev->channel->target_cpu == smp_processor_id()) - hv_pci_onchannelcallback(hbus); - - local_bh_enable(); - - if (hpdev->state == hv_pcichild_ejecting) { - dev_err_once(&hbus->hdev->device, - "the device is being ejected\n"); - goto free_int_desc; - } - - udelay(100); - } - - if (comp.comp_pkt.completion_status < 0) { - dev_err(&hbus->hdev->device, - "Request for interrupt failed: 0x%x", - comp.comp_pkt.completion_status); - goto free_int_desc; - } - - /* - * Record the assignment so that this can be unwound later. Using - * irq_set_chip_data() here would be appropriate, but the lock it takes - * is already held. - */ - *int_desc = comp.int_desc; - data->chip_data = int_desc; - - /* Pass up the result. */ - msg->address_hi = comp.int_desc.address >> 32; - msg->address_lo = comp.int_desc.address & 0xffffffff; - msg->data = comp.int_desc.data; - - put_pcichild(hpdev); - return; - -free_int_desc: - kfree(int_desc); -drop_reference: - put_pcichild(hpdev); -return_null_message: - msg->address_hi = 0; - msg->address_lo = 0; - msg->data = 0; -} - -/* HW Interrupt Chip Descriptor */ -static struct irq_chip hv_msi_irq_chip = { - .name = "Hyper-V PCIe MSI", - .irq_compose_msi_msg = hv_compose_msi_msg, - .irq_set_affinity = hv_set_affinity, - .irq_ack = irq_chip_ack_parent, - .irq_mask = hv_irq_mask, - .irq_unmask = hv_irq_unmask, -}; - -static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return arg->msi_hwirq; -} - -static struct msi_domain_ops hv_msi_ops = { - .get_hwirq = hv_msi_domain_ops_get_hwirq, - .msi_prepare = pci_msi_prepare, - .set_desc = pci_msi_set_desc, - .msi_free = hv_msi_free, -}; - -/** - * hv_pcie_init_irq_domain() - Initialize IRQ domain - * @hbus: The root PCI bus - * - * This function creates an IRQ domain which will be used for - * interrupts from devices that have been passed through. These - * devices only support MSI and MSI-X, not line-based interrupts - * or simulations of line-based interrupts through PCIe's - * fabric-layer messages. Because interrupts are remapped, we - * can support multi-message MSI here. - * - * Return: '0' on success and error value on failure - */ -static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) -{ - hbus->msi_info.chip = &hv_msi_irq_chip; - hbus->msi_info.ops = &hv_msi_ops; - hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | - MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX); - hbus->msi_info.handler = handle_edge_irq; - hbus->msi_info.handler_name = "edge"; - hbus->msi_info.data = hbus; - hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode, - &hbus->msi_info, - x86_vector_domain); - if (!hbus->irq_domain) { - dev_err(&hbus->hdev->device, - "Failed to build an MSI IRQ domain\n"); - return -ENODEV; - } - - return 0; -} - -/** - * get_bar_size() - Get the address space consumed by a BAR - * @bar_val: Value that a BAR returned after -1 was written - * to it. - * - * This function returns the size of the BAR, rounded up to 1 - * page. It has to be rounded up because the hypervisor's page - * table entry that maps the BAR into the VM can't specify an - * offset within a page. The invariant is that the hypervisor - * must place any BARs of smaller than page length at the - * beginning of a page. - * - * Return: Size in bytes of the consumed MMIO space. - */ -static u64 get_bar_size(u64 bar_val) -{ - return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)), - PAGE_SIZE); -} - -/** - * survey_child_resources() - Total all MMIO requirements - * @hbus: Root PCI bus, as understood by this driver - */ -static void survey_child_resources(struct hv_pcibus_device *hbus) -{ - struct hv_pci_dev *hpdev; - resource_size_t bar_size = 0; - unsigned long flags; - struct completion *event; - u64 bar_val; - int i; - - /* If nobody is waiting on the answer, don't compute it. */ - event = xchg(&hbus->survey_event, NULL); - if (!event) - return; - - /* If the answer has already been computed, go with it. */ - if (hbus->low_mmio_space || hbus->high_mmio_space) { - complete(event); - return; - } - - spin_lock_irqsave(&hbus->device_list_lock, flags); - - /* - * Due to an interesting quirk of the PCI spec, all memory regions - * for a child device are a power of 2 in size and aligned in memory, - * so it's sufficient to just add them up without tracking alignment. - */ - list_for_each_entry(hpdev, &hbus->children, list_entry) { - for (i = 0; i < 6; i++) { - if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO) - dev_err(&hbus->hdev->device, - "There's an I/O BAR in this list!\n"); - - if (hpdev->probed_bar[i] != 0) { - /* - * A probed BAR has all the upper bits set that - * can be changed. - */ - - bar_val = hpdev->probed_bar[i]; - if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) - bar_val |= - ((u64)hpdev->probed_bar[++i] << 32); - else - bar_val |= 0xffffffff00000000ULL; - - bar_size = get_bar_size(bar_val); - - if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) - hbus->high_mmio_space += bar_size; - else - hbus->low_mmio_space += bar_size; - } - } - } - - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - complete(event); -} - -/** - * prepopulate_bars() - Fill in BARs with defaults - * @hbus: Root PCI bus, as understood by this driver - * - * The core PCI driver code seems much, much happier if the BARs - * for a device have values upon first scan. So fill them in. - * The algorithm below works down from large sizes to small, - * attempting to pack the assignments optimally. The assumption, - * enforced in other parts of the code, is that the beginning of - * the memory-mapped I/O space will be aligned on the largest - * BAR size. - */ -static void prepopulate_bars(struct hv_pcibus_device *hbus) -{ - resource_size_t high_size = 0; - resource_size_t low_size = 0; - resource_size_t high_base = 0; - resource_size_t low_base = 0; - resource_size_t bar_size; - struct hv_pci_dev *hpdev; - unsigned long flags; - u64 bar_val; - u32 command; - bool high; - int i; - - if (hbus->low_mmio_space) { - low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); - low_base = hbus->low_mmio_res->start; - } - - if (hbus->high_mmio_space) { - high_size = 1ULL << - (63 - __builtin_clzll(hbus->high_mmio_space)); - high_base = hbus->high_mmio_res->start; - } - - spin_lock_irqsave(&hbus->device_list_lock, flags); - - /* Pick addresses for the BARs. */ - do { - list_for_each_entry(hpdev, &hbus->children, list_entry) { - for (i = 0; i < 6; i++) { - bar_val = hpdev->probed_bar[i]; - if (bar_val == 0) - continue; - high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64; - if (high) { - bar_val |= - ((u64)hpdev->probed_bar[i + 1] - << 32); - } else { - bar_val |= 0xffffffffULL << 32; - } - bar_size = get_bar_size(bar_val); - if (high) { - if (high_size != bar_size) { - i++; - continue; - } - _hv_pcifront_write_config(hpdev, - PCI_BASE_ADDRESS_0 + (4 * i), - 4, - (u32)(high_base & 0xffffff00)); - i++; - _hv_pcifront_write_config(hpdev, - PCI_BASE_ADDRESS_0 + (4 * i), - 4, (u32)(high_base >> 32)); - high_base += bar_size; - } else { - if (low_size != bar_size) - continue; - _hv_pcifront_write_config(hpdev, - PCI_BASE_ADDRESS_0 + (4 * i), - 4, - (u32)(low_base & 0xffffff00)); - low_base += bar_size; - } - } - if (high_size <= 1 && low_size <= 1) { - /* Set the memory enable bit. */ - _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, - &command); - command |= PCI_COMMAND_MEMORY; - _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, - command); - break; - } - } - - high_size >>= 1; - low_size >>= 1; - } while (high_size || low_size); - - spin_unlock_irqrestore(&hbus->device_list_lock, flags); -} - -/** - * create_root_hv_pci_bus() - Expose a new root PCI bus - * @hbus: Root PCI bus, as understood by this driver - * - * Return: 0 on success, -errno on failure - */ -static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) -{ - /* Register the device */ - hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device, - 0, /* bus number is always zero */ - &hv_pcifront_ops, - &hbus->sysdata, - &hbus->resources_for_children); - if (!hbus->pci_bus) - return -ENODEV; - - hbus->pci_bus->msi = &hbus->msi_chip; - hbus->pci_bus->msi->dev = &hbus->hdev->device; - - pci_lock_rescan_remove(); - pci_scan_child_bus(hbus->pci_bus); - pci_bus_assign_resources(hbus->pci_bus); - pci_bus_add_devices(hbus->pci_bus); - pci_unlock_rescan_remove(); - hbus->state = hv_pcibus_installed; - return 0; -} - -struct q_res_req_compl { - struct completion host_event; - struct hv_pci_dev *hpdev; -}; - -/** - * q_resource_requirements() - Query Resource Requirements - * @context: The completion context. - * @resp: The response that came from the host. - * @resp_packet_size: The size in bytes of resp. - * - * This function is invoked on completion of a Query Resource - * Requirements packet. - */ -static void q_resource_requirements(void *context, struct pci_response *resp, - int resp_packet_size) -{ - struct q_res_req_compl *completion = context; - struct pci_q_res_req_response *q_res_req = - (struct pci_q_res_req_response *)resp; - int i; - - if (resp->status < 0) { - dev_err(&completion->hpdev->hbus->hdev->device, - "query resource requirements failed: %x\n", - resp->status); - } else { - for (i = 0; i < 6; i++) { - completion->hpdev->probed_bar[i] = - q_res_req->probed_bar[i]; - } - } - - complete(&completion->host_event); -} - -/** - * new_pcichild_device() - Create a new child device - * @hbus: The internal struct tracking this root PCI bus. - * @desc: The information supplied so far from the host - * about the device. - * - * This function creates the tracking structure for a new child - * device and kicks off the process of figuring out what it is. - * - * Return: Pointer to the new tracking struct - */ -static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, - struct pci_function_description *desc) -{ - struct hv_pci_dev *hpdev; - struct pci_child_message *res_req; - struct q_res_req_compl comp_pkt; - struct { - struct pci_packet init_packet; - u8 buffer[sizeof(struct pci_child_message)]; - } pkt; - unsigned long flags; - int ret; - - hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC); - if (!hpdev) - return NULL; - - hpdev->hbus = hbus; - - memset(&pkt, 0, sizeof(pkt)); - init_completion(&comp_pkt.host_event); - comp_pkt.hpdev = hpdev; - pkt.init_packet.compl_ctxt = &comp_pkt; - pkt.init_packet.completion_func = q_resource_requirements; - res_req = (struct pci_child_message *)&pkt.init_packet.message; - res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; - res_req->wslot.slot = desc->win_slot.slot; - - ret = vmbus_sendpacket(hbus->hdev->channel, res_req, - sizeof(struct pci_child_message), - (unsigned long)&pkt.init_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) - goto error; - - if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) - goto error; - - hpdev->desc = *desc; - refcount_set(&hpdev->refs, 1); - get_pcichild(hpdev); - spin_lock_irqsave(&hbus->device_list_lock, flags); - - list_add_tail(&hpdev->list_entry, &hbus->children); - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - return hpdev; - -error: - kfree(hpdev); - return NULL; -} - -/** - * get_pcichild_wslot() - Find device from slot - * @hbus: Root PCI bus, as understood by this driver - * @wslot: Location on the bus - * - * This function looks up a PCI device and returns the internal - * representation of it. It acquires a reference on it, so that - * the device won't be deleted while somebody is using it. The - * caller is responsible for calling put_pcichild() to release - * this reference. - * - * Return: Internal representation of a PCI device - */ -static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, - u32 wslot) -{ - unsigned long flags; - struct hv_pci_dev *iter, *hpdev = NULL; - - spin_lock_irqsave(&hbus->device_list_lock, flags); - list_for_each_entry(iter, &hbus->children, list_entry) { - if (iter->desc.win_slot.slot == wslot) { - hpdev = iter; - get_pcichild(hpdev); - break; - } - } - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - - return hpdev; -} - -/** - * pci_devices_present_work() - Handle new list of child devices - * @work: Work struct embedded in struct hv_dr_work - * - * "Bus Relations" is the Windows term for "children of this - * bus." The terminology is preserved here for people trying to - * debug the interaction between Hyper-V and Linux. This - * function is called when the parent partition reports a list - * of functions that should be observed under this PCI Express - * port (bus). - * - * This function updates the list, and must tolerate being - * called multiple times with the same information. The typical - * number of child devices is one, with very atypical cases - * involving three or four, so the algorithms used here can be - * simple and inefficient. - * - * It must also treat the omission of a previously observed device as - * notification that the device no longer exists. - * - * Note that this function is serialized with hv_eject_device_work(), - * because both are pushed to the ordered workqueue hbus->wq. - */ -static void pci_devices_present_work(struct work_struct *work) -{ - u32 child_no; - bool found; - struct pci_function_description *new_desc; - struct hv_pci_dev *hpdev; - struct hv_pcibus_device *hbus; - struct list_head removed; - struct hv_dr_work *dr_wrk; - struct hv_dr_state *dr = NULL; - unsigned long flags; - - dr_wrk = container_of(work, struct hv_dr_work, wrk); - hbus = dr_wrk->bus; - kfree(dr_wrk); - - INIT_LIST_HEAD(&removed); - - /* Pull this off the queue and process it if it was the last one. */ - spin_lock_irqsave(&hbus->device_list_lock, flags); - while (!list_empty(&hbus->dr_list)) { - dr = list_first_entry(&hbus->dr_list, struct hv_dr_state, - list_entry); - list_del(&dr->list_entry); - - /* Throw this away if the list still has stuff in it. */ - if (!list_empty(&hbus->dr_list)) { - kfree(dr); - continue; - } - } - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - - if (!dr) { - put_hvpcibus(hbus); - return; - } - - /* First, mark all existing children as reported missing. */ - spin_lock_irqsave(&hbus->device_list_lock, flags); - list_for_each_entry(hpdev, &hbus->children, list_entry) { - hpdev->reported_missing = true; - } - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - - /* Next, add back any reported devices. */ - for (child_no = 0; child_no < dr->device_count; child_no++) { - found = false; - new_desc = &dr->func[child_no]; - - spin_lock_irqsave(&hbus->device_list_lock, flags); - list_for_each_entry(hpdev, &hbus->children, list_entry) { - if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) && - (hpdev->desc.v_id == new_desc->v_id) && - (hpdev->desc.d_id == new_desc->d_id) && - (hpdev->desc.ser == new_desc->ser)) { - hpdev->reported_missing = false; - found = true; - } - } - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - - if (!found) { - hpdev = new_pcichild_device(hbus, new_desc); - if (!hpdev) - dev_err(&hbus->hdev->device, - "couldn't record a child device.\n"); - } - } - - /* Move missing children to a list on the stack. */ - spin_lock_irqsave(&hbus->device_list_lock, flags); - do { - found = false; - list_for_each_entry(hpdev, &hbus->children, list_entry) { - if (hpdev->reported_missing) { - found = true; - put_pcichild(hpdev); - list_move_tail(&hpdev->list_entry, &removed); - break; - } - } - } while (found); - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - - /* Delete everything that should no longer exist. */ - while (!list_empty(&removed)) { - hpdev = list_first_entry(&removed, struct hv_pci_dev, - list_entry); - list_del(&hpdev->list_entry); - put_pcichild(hpdev); - } - - switch (hbus->state) { - case hv_pcibus_installed: - /* - * Tell the core to rescan bus - * because there may have been changes. - */ - pci_lock_rescan_remove(); - pci_scan_child_bus(hbus->pci_bus); - pci_unlock_rescan_remove(); - break; - - case hv_pcibus_init: - case hv_pcibus_probed: - survey_child_resources(hbus); - break; - - default: - break; - } - - put_hvpcibus(hbus); - kfree(dr); -} - -/** - * hv_pci_devices_present() - Handles list of new children - * @hbus: Root PCI bus, as understood by this driver - * @relations: Packet from host listing children - * - * This function is invoked whenever a new list of devices for - * this bus appears. - */ -static void hv_pci_devices_present(struct hv_pcibus_device *hbus, - struct pci_bus_relations *relations) -{ - struct hv_dr_state *dr; - struct hv_dr_work *dr_wrk; - unsigned long flags; - bool pending_dr; - - dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT); - if (!dr_wrk) - return; - - dr = kzalloc(offsetof(struct hv_dr_state, func) + - (sizeof(struct pci_function_description) * - (relations->device_count)), GFP_NOWAIT); - if (!dr) { - kfree(dr_wrk); - return; - } - - INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); - dr_wrk->bus = hbus; - dr->device_count = relations->device_count; - if (dr->device_count != 0) { - memcpy(dr->func, relations->func, - sizeof(struct pci_function_description) * - dr->device_count); - } - - spin_lock_irqsave(&hbus->device_list_lock, flags); - /* - * If pending_dr is true, we have already queued a work, - * which will see the new dr. Otherwise, we need to - * queue a new work. - */ - pending_dr = !list_empty(&hbus->dr_list); - list_add_tail(&dr->list_entry, &hbus->dr_list); - spin_unlock_irqrestore(&hbus->device_list_lock, flags); - - if (pending_dr) { - kfree(dr_wrk); - } else { - get_hvpcibus(hbus); - queue_work(hbus->wq, &dr_wrk->wrk); - } -} - -/** - * hv_eject_device_work() - Asynchronously handles ejection - * @work: Work struct embedded in internal device struct - * - * This function handles ejecting a device. Windows will - * attempt to gracefully eject a device, waiting 60 seconds to - * hear back from the guest OS that this completed successfully. - * If this timer expires, the device will be forcibly removed. - */ -static void hv_eject_device_work(struct work_struct *work) -{ - struct pci_eject_response *ejct_pkt; - struct hv_pci_dev *hpdev; - struct pci_dev *pdev; - unsigned long flags; - int wslot; - struct { - struct pci_packet pkt; - u8 buffer[sizeof(struct pci_eject_response)]; - } ctxt; - - hpdev = container_of(work, struct hv_pci_dev, wrk); - - WARN_ON(hpdev->state != hv_pcichild_ejecting); - - /* - * Ejection can come before or after the PCI bus has been set up, so - * attempt to find it and tear down the bus state, if it exists. This - * must be done without constructs like pci_domain_nr(hbus->pci_bus) - * because hbus->pci_bus may not exist yet. - */ - wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); - pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, - wslot); - if (pdev) { - pci_lock_rescan_remove(); - pci_stop_and_remove_bus_device(pdev); - pci_dev_put(pdev); - pci_unlock_rescan_remove(); - } - - spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); - list_del(&hpdev->list_entry); - spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); - - memset(&ctxt, 0, sizeof(ctxt)); - ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; - ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; - ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; - vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, - sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, - VM_PKT_DATA_INBAND, 0); - - put_pcichild(hpdev); - put_pcichild(hpdev); - put_hvpcibus(hpdev->hbus); -} - -/** - * hv_pci_eject_device() - Handles device ejection - * @hpdev: Internal device tracking struct - * - * This function is invoked when an ejection packet arrives. It - * just schedules work so that we don't re-enter the packet - * delivery code handling the ejection. - */ -static void hv_pci_eject_device(struct hv_pci_dev *hpdev) -{ - hpdev->state = hv_pcichild_ejecting; - get_pcichild(hpdev); - INIT_WORK(&hpdev->wrk, hv_eject_device_work); - get_hvpcibus(hpdev->hbus); - queue_work(hpdev->hbus->wq, &hpdev->wrk); -} - -/** - * hv_pci_onchannelcallback() - Handles incoming packets - * @context: Internal bus tracking struct - * - * This function is invoked whenever the host sends a packet to - * this channel (which is private to this root PCI bus). - */ -static void hv_pci_onchannelcallback(void *context) -{ - const int packet_size = 0x100; - int ret; - struct hv_pcibus_device *hbus = context; - u32 bytes_recvd; - u64 req_id; - struct vmpacket_descriptor *desc; - unsigned char *buffer; - int bufferlen = packet_size; - struct pci_packet *comp_packet; - struct pci_response *response; - struct pci_incoming_message *new_message; - struct pci_bus_relations *bus_rel; - struct pci_dev_incoming *dev_message; - struct hv_pci_dev *hpdev; - - buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - while (1) { - ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer, - bufferlen, &bytes_recvd, &req_id); - - if (ret == -ENOBUFS) { - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (!buffer) - return; - continue; - } - - /* Zero length indicates there are no more packets. */ - if (ret || !bytes_recvd) - break; - - /* - * All incoming packets must be at least as large as a - * response. - */ - if (bytes_recvd <= sizeof(struct pci_response)) - continue; - desc = (struct vmpacket_descriptor *)buffer; - - switch (desc->type) { - case VM_PKT_COMP: - - /* - * The host is trusted, and thus it's safe to interpret - * this transaction ID as a pointer. - */ - comp_packet = (struct pci_packet *)req_id; - response = (struct pci_response *)buffer; - comp_packet->completion_func(comp_packet->compl_ctxt, - response, - bytes_recvd); - break; - - case VM_PKT_DATA_INBAND: - - new_message = (struct pci_incoming_message *)buffer; - switch (new_message->message_type.type) { - case PCI_BUS_RELATIONS: - - bus_rel = (struct pci_bus_relations *)buffer; - if (bytes_recvd < - offsetof(struct pci_bus_relations, func) + - (sizeof(struct pci_function_description) * - (bus_rel->device_count))) { - dev_err(&hbus->hdev->device, - "bus relations too small\n"); - break; - } - - hv_pci_devices_present(hbus, bus_rel); - break; - - case PCI_EJECT: - - dev_message = (struct pci_dev_incoming *)buffer; - hpdev = get_pcichild_wslot(hbus, - dev_message->wslot.slot); - if (hpdev) { - hv_pci_eject_device(hpdev); - put_pcichild(hpdev); - } - break; - - default: - dev_warn(&hbus->hdev->device, - "Unimplemented protocol message %x\n", - new_message->message_type.type); - break; - } - break; - - default: - dev_err(&hbus->hdev->device, - "unhandled packet type %d, tid %llx len %d\n", - desc->type, req_id, bytes_recvd); - break; - } - } - - kfree(buffer); -} - -/** - * hv_pci_protocol_negotiation() - Set up protocol - * @hdev: VMBus's tracking struct for this root PCI bus - * - * This driver is intended to support running on Windows 10 - * (server) and later versions. It will not run on earlier - * versions, as they assume that many of the operations which - * Linux needs accomplished with a spinlock held were done via - * asynchronous messaging via VMBus. Windows 10 increases the - * surface area of PCI emulation so that these actions can take - * place by suspending a virtual processor for their duration. - * - * This function negotiates the channel protocol version, - * failing if the host doesn't support the necessary protocol - * level. - */ -static int hv_pci_protocol_negotiation(struct hv_device *hdev) -{ - struct pci_version_request *version_req; - struct hv_pci_compl comp_pkt; - struct pci_packet *pkt; - int ret; - int i; - - /* - * Initiate the handshake with the host and negotiate - * a version that the host can support. We start with the - * highest version number and go down if the host cannot - * support it. - */ - pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL); - if (!pkt) - return -ENOMEM; - - init_completion(&comp_pkt.host_event); - pkt->completion_func = hv_pci_generic_compl; - pkt->compl_ctxt = &comp_pkt; - version_req = (struct pci_version_request *)&pkt->message; - version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; - - for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) { - version_req->protocol_version = pci_protocol_versions[i]; - ret = vmbus_sendpacket(hdev->channel, version_req, - sizeof(struct pci_version_request), - (unsigned long)pkt, VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (!ret) - ret = wait_for_response(hdev, &comp_pkt.host_event); - - if (ret) { - dev_err(&hdev->device, - "PCI Pass-through VSP failed to request version: %d", - ret); - goto exit; - } - - if (comp_pkt.completion_status >= 0) { - pci_protocol_version = pci_protocol_versions[i]; - dev_info(&hdev->device, - "PCI VMBus probing: Using version %#x\n", - pci_protocol_version); - goto exit; - } - - if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { - dev_err(&hdev->device, - "PCI Pass-through VSP failed version request: %#x", - comp_pkt.completion_status); - ret = -EPROTO; - goto exit; - } - - reinit_completion(&comp_pkt.host_event); - } - - dev_err(&hdev->device, - "PCI pass-through VSP failed to find supported version"); - ret = -EPROTO; - -exit: - kfree(pkt); - return ret; -} - -/** - * hv_pci_free_bridge_windows() - Release memory regions for the - * bus - * @hbus: Root PCI bus, as understood by this driver - */ -static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) -{ - /* - * Set the resources back to the way they looked when they - * were allocated by setting IORESOURCE_BUSY again. - */ - - if (hbus->low_mmio_space && hbus->low_mmio_res) { - hbus->low_mmio_res->flags |= IORESOURCE_BUSY; - vmbus_free_mmio(hbus->low_mmio_res->start, - resource_size(hbus->low_mmio_res)); - } - - if (hbus->high_mmio_space && hbus->high_mmio_res) { - hbus->high_mmio_res->flags |= IORESOURCE_BUSY; - vmbus_free_mmio(hbus->high_mmio_res->start, - resource_size(hbus->high_mmio_res)); - } -} - -/** - * hv_pci_allocate_bridge_windows() - Allocate memory regions - * for the bus - * @hbus: Root PCI bus, as understood by this driver - * - * This function calls vmbus_allocate_mmio(), which is itself a - * bit of a compromise. Ideally, we might change the pnp layer - * in the kernel such that it comprehends either PCI devices - * which are "grandchildren of ACPI," with some intermediate bus - * node (in this case, VMBus) or change it such that it - * understands VMBus. The pnp layer, however, has been declared - * deprecated, and not subject to change. - * - * The workaround, implemented here, is to ask VMBus to allocate - * MMIO space for this bus. VMBus itself knows which ranges are - * appropriate by looking at its own ACPI objects. Then, after - * these ranges are claimed, they're modified to look like they - * would have looked if the ACPI and pnp code had allocated - * bridge windows. These descriptors have to exist in this form - * in order to satisfy the code which will get invoked when the - * endpoint PCI function driver calls request_mem_region() or - * request_mem_region_exclusive(). - * - * Return: 0 on success, -errno on failure - */ -static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) -{ - resource_size_t align; - int ret; - - if (hbus->low_mmio_space) { - align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); - ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0, - (u64)(u32)0xffffffff, - hbus->low_mmio_space, - align, false); - if (ret) { - dev_err(&hbus->hdev->device, - "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n", - hbus->low_mmio_space); - return ret; - } - - /* Modify this resource to become a bridge window. */ - hbus->low_mmio_res->flags |= IORESOURCE_WINDOW; - hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY; - pci_add_resource(&hbus->resources_for_children, - hbus->low_mmio_res); - } - - if (hbus->high_mmio_space) { - align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space)); - ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev, - 0x100000000, -1, - hbus->high_mmio_space, align, - false); - if (ret) { - dev_err(&hbus->hdev->device, - "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n", - hbus->high_mmio_space); - goto release_low_mmio; - } - - /* Modify this resource to become a bridge window. */ - hbus->high_mmio_res->flags |= IORESOURCE_WINDOW; - hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY; - pci_add_resource(&hbus->resources_for_children, - hbus->high_mmio_res); - } - - return 0; - -release_low_mmio: - if (hbus->low_mmio_res) { - vmbus_free_mmio(hbus->low_mmio_res->start, - resource_size(hbus->low_mmio_res)); - } - - return ret; -} - -/** - * hv_allocate_config_window() - Find MMIO space for PCI Config - * @hbus: Root PCI bus, as understood by this driver - * - * This function claims memory-mapped I/O space for accessing - * configuration space for the functions on this bus. - * - * Return: 0 on success, -errno on failure - */ -static int hv_allocate_config_window(struct hv_pcibus_device *hbus) -{ - int ret; - - /* - * Set up a region of MMIO space to use for accessing configuration - * space. - */ - ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1, - PCI_CONFIG_MMIO_LENGTH, 0x1000, false); - if (ret) - return ret; - - /* - * vmbus_allocate_mmio() gets used for allocating both device endpoint - * resource claims (those which cannot be overlapped) and the ranges - * which are valid for the children of this bus, which are intended - * to be overlapped by those children. Set the flag on this claim - * meaning that this region can't be overlapped. - */ - - hbus->mem_config->flags |= IORESOURCE_BUSY; - - return 0; -} - -static void hv_free_config_window(struct hv_pcibus_device *hbus) -{ - vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); -} - -/** - * hv_pci_enter_d0() - Bring the "bus" into the D0 power state - * @hdev: VMBus's tracking struct for this root PCI bus - * - * Return: 0 on success, -errno on failure - */ -static int hv_pci_enter_d0(struct hv_device *hdev) -{ - struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); - struct pci_bus_d0_entry *d0_entry; - struct hv_pci_compl comp_pkt; - struct pci_packet *pkt; - int ret; - - /* - * Tell the host that the bus is ready to use, and moved into the - * powered-on state. This includes telling the host which region - * of memory-mapped I/O space has been chosen for configuration space - * access. - */ - pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL); - if (!pkt) - return -ENOMEM; - - init_completion(&comp_pkt.host_event); - pkt->completion_func = hv_pci_generic_compl; - pkt->compl_ctxt = &comp_pkt; - d0_entry = (struct pci_bus_d0_entry *)&pkt->message; - d0_entry->message_type.type = PCI_BUS_D0ENTRY; - d0_entry->mmio_base = hbus->mem_config->start; - - ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), - (unsigned long)pkt, VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (!ret) - ret = wait_for_response(hdev, &comp_pkt.host_event); - - if (ret) - goto exit; - - if (comp_pkt.completion_status < 0) { - dev_err(&hdev->device, - "PCI Pass-through VSP failed D0 Entry with status %x\n", - comp_pkt.completion_status); - ret = -EPROTO; - goto exit; - } - - ret = 0; - -exit: - kfree(pkt); - return ret; -} - -/** - * hv_pci_query_relations() - Ask host to send list of child - * devices - * @hdev: VMBus's tracking struct for this root PCI bus - * - * Return: 0 on success, -errno on failure - */ -static int hv_pci_query_relations(struct hv_device *hdev) -{ - struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); - struct pci_message message; - struct completion comp; - int ret; - - /* Ask the host to send along the list of child devices */ - init_completion(&comp); - if (cmpxchg(&hbus->survey_event, NULL, &comp)) - return -ENOTEMPTY; - - memset(&message, 0, sizeof(message)); - message.type = PCI_QUERY_BUS_RELATIONS; - - ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), - 0, VM_PKT_DATA_INBAND, 0); - if (!ret) - ret = wait_for_response(hdev, &comp); - - return ret; -} - -/** - * hv_send_resources_allocated() - Report local resource choices - * @hdev: VMBus's tracking struct for this root PCI bus - * - * The host OS is expecting to be sent a request as a message - * which contains all the resources that the device will use. - * The response contains those same resources, "translated" - * which is to say, the values which should be used by the - * hardware, when it delivers an interrupt. (MMIO resources are - * used in local terms.) This is nice for Windows, and lines up - * with the FDO/PDO split, which doesn't exist in Linux. Linux - * is deeply expecting to scan an emulated PCI configuration - * space. So this message is sent here only to drive the state - * machine on the host forward. - * - * Return: 0 on success, -errno on failure - */ -static int hv_send_resources_allocated(struct hv_device *hdev) -{ - struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); - struct pci_resources_assigned *res_assigned; - struct pci_resources_assigned2 *res_assigned2; - struct hv_pci_compl comp_pkt; - struct hv_pci_dev *hpdev; - struct pci_packet *pkt; - size_t size_res; - u32 wslot; - int ret; - - size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) - ? sizeof(*res_assigned) : sizeof(*res_assigned2); - - pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); - if (!pkt) - return -ENOMEM; - - ret = 0; - - for (wslot = 0; wslot < 256; wslot++) { - hpdev = get_pcichild_wslot(hbus, wslot); - if (!hpdev) - continue; - - memset(pkt, 0, sizeof(*pkt) + size_res); - init_completion(&comp_pkt.host_event); - pkt->completion_func = hv_pci_generic_compl; - pkt->compl_ctxt = &comp_pkt; - - if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { - res_assigned = - (struct pci_resources_assigned *)&pkt->message; - res_assigned->message_type.type = - PCI_RESOURCES_ASSIGNED; - res_assigned->wslot.slot = hpdev->desc.win_slot.slot; - } else { - res_assigned2 = - (struct pci_resources_assigned2 *)&pkt->message; - res_assigned2->message_type.type = - PCI_RESOURCES_ASSIGNED2; - res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; - } - put_pcichild(hpdev); - - ret = vmbus_sendpacket(hdev->channel, &pkt->message, - size_res, (unsigned long)pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (!ret) - ret = wait_for_response(hdev, &comp_pkt.host_event); - if (ret) - break; - - if (comp_pkt.completion_status < 0) { - ret = -EPROTO; - dev_err(&hdev->device, - "resource allocated returned 0x%x", - comp_pkt.completion_status); - break; - } - } - - kfree(pkt); - return ret; -} - -/** - * hv_send_resources_released() - Report local resources - * released - * @hdev: VMBus's tracking struct for this root PCI bus - * - * Return: 0 on success, -errno on failure - */ -static int hv_send_resources_released(struct hv_device *hdev) -{ - struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); - struct pci_child_message pkt; - struct hv_pci_dev *hpdev; - u32 wslot; - int ret; - - for (wslot = 0; wslot < 256; wslot++) { - hpdev = get_pcichild_wslot(hbus, wslot); - if (!hpdev) - continue; - - memset(&pkt, 0, sizeof(pkt)); - pkt.message_type.type = PCI_RESOURCES_RELEASED; - pkt.wslot.slot = hpdev->desc.win_slot.slot; - - put_pcichild(hpdev); - - ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0, - VM_PKT_DATA_INBAND, 0); - if (ret) - return ret; - } - - return 0; -} - -static void get_hvpcibus(struct hv_pcibus_device *hbus) -{ - refcount_inc(&hbus->remove_lock); -} - -static void put_hvpcibus(struct hv_pcibus_device *hbus) -{ - if (refcount_dec_and_test(&hbus->remove_lock)) - complete(&hbus->remove_event); -} - -/** - * hv_pci_probe() - New VMBus channel probe, for a root PCI bus - * @hdev: VMBus's tracking struct for this root PCI bus - * @dev_id: Identifies the device itself - * - * Return: 0 on success, -errno on failure - */ -static int hv_pci_probe(struct hv_device *hdev, - const struct hv_vmbus_device_id *dev_id) -{ - struct hv_pcibus_device *hbus; - int ret; - - /* - * hv_pcibus_device contains the hypercall arguments for retargeting in - * hv_irq_unmask(). Those must not cross a page boundary. - */ - BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE); - - hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL); - if (!hbus) - return -ENOMEM; - hbus->state = hv_pcibus_init; - - /* - * The PCI bus "domain" is what is called "segment" in ACPI and - * other specs. Pull it from the instance ID, to get something - * unique. Bytes 8 and 9 are what is used in Windows guests, so - * do the same thing for consistency. Note that, since this code - * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee - * that (1) the only domain in use for something that looks like - * a physical PCI bus (which is actually emulated by the - * hypervisor) is domain 0 and (2) there will be no overlap - * between domains derived from these instance IDs in the same - * VM. - */ - hbus->sysdata.domain = hdev->dev_instance.b[9] | - hdev->dev_instance.b[8] << 8; - - hbus->hdev = hdev; - refcount_set(&hbus->remove_lock, 1); - INIT_LIST_HEAD(&hbus->children); - INIT_LIST_HEAD(&hbus->dr_list); - INIT_LIST_HEAD(&hbus->resources_for_children); - spin_lock_init(&hbus->config_lock); - spin_lock_init(&hbus->device_list_lock); - spin_lock_init(&hbus->retarget_msi_interrupt_lock); - init_completion(&hbus->remove_event); - hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, - hbus->sysdata.domain); - if (!hbus->wq) { - ret = -ENOMEM; - goto free_bus; - } - - ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, - hv_pci_onchannelcallback, hbus); - if (ret) - goto destroy_wq; - - hv_set_drvdata(hdev, hbus); - - ret = hv_pci_protocol_negotiation(hdev); - if (ret) - goto close; - - ret = hv_allocate_config_window(hbus); - if (ret) - goto close; - - hbus->cfg_addr = ioremap(hbus->mem_config->start, - PCI_CONFIG_MMIO_LENGTH); - if (!hbus->cfg_addr) { - dev_err(&hdev->device, - "Unable to map a virtual address for config space\n"); - ret = -ENOMEM; - goto free_config; - } - - hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus); - if (!hbus->sysdata.fwnode) { - ret = -ENOMEM; - goto unmap; - } - - ret = hv_pcie_init_irq_domain(hbus); - if (ret) - goto free_fwnode; - - ret = hv_pci_query_relations(hdev); - if (ret) - goto free_irq_domain; - - ret = hv_pci_enter_d0(hdev); - if (ret) - goto free_irq_domain; - - ret = hv_pci_allocate_bridge_windows(hbus); - if (ret) - goto free_irq_domain; - - ret = hv_send_resources_allocated(hdev); - if (ret) - goto free_windows; - - prepopulate_bars(hbus); - - hbus->state = hv_pcibus_probed; - - ret = create_root_hv_pci_bus(hbus); - if (ret) - goto free_windows; - - return 0; - -free_windows: - hv_pci_free_bridge_windows(hbus); -free_irq_domain: - irq_domain_remove(hbus->irq_domain); -free_fwnode: - irq_domain_free_fwnode(hbus->sysdata.fwnode); -unmap: - iounmap(hbus->cfg_addr); -free_config: - hv_free_config_window(hbus); -close: - vmbus_close(hdev->channel); -destroy_wq: - destroy_workqueue(hbus->wq); -free_bus: - free_page((unsigned long)hbus); - return ret; -} - -static void hv_pci_bus_exit(struct hv_device *hdev) -{ - struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); - struct { - struct pci_packet teardown_packet; - u8 buffer[sizeof(struct pci_message)]; - } pkt; - struct pci_bus_relations relations; - struct hv_pci_compl comp_pkt; - int ret; - - /* - * After the host sends the RESCIND_CHANNEL message, it doesn't - * access the per-channel ringbuffer any longer. - */ - if (hdev->channel->rescind) - return; - - /* Delete any children which might still exist. */ - memset(&relations, 0, sizeof(relations)); - hv_pci_devices_present(hbus, &relations); - - ret = hv_send_resources_released(hdev); - if (ret) - dev_err(&hdev->device, - "Couldn't send resources released packet(s)\n"); - - memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); - init_completion(&comp_pkt.host_event); - pkt.teardown_packet.completion_func = hv_pci_generic_compl; - pkt.teardown_packet.compl_ctxt = &comp_pkt; - pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; - - ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, - sizeof(struct pci_message), - (unsigned long)&pkt.teardown_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (!ret) - wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ); -} - -/** - * hv_pci_remove() - Remove routine for this VMBus channel - * @hdev: VMBus's tracking struct for this root PCI bus - * - * Return: 0 on success, -errno on failure - */ -static int hv_pci_remove(struct hv_device *hdev) -{ - struct hv_pcibus_device *hbus; - - hbus = hv_get_drvdata(hdev); - if (hbus->state == hv_pcibus_installed) { - /* Remove the bus from PCI's point of view. */ - pci_lock_rescan_remove(); - pci_stop_root_bus(hbus->pci_bus); - pci_remove_root_bus(hbus->pci_bus); - pci_unlock_rescan_remove(); - hbus->state = hv_pcibus_removed; - } - - hv_pci_bus_exit(hdev); - - vmbus_close(hdev->channel); - - iounmap(hbus->cfg_addr); - hv_free_config_window(hbus); - pci_free_resource_list(&hbus->resources_for_children); - hv_pci_free_bridge_windows(hbus); - irq_domain_remove(hbus->irq_domain); - irq_domain_free_fwnode(hbus->sysdata.fwnode); - put_hvpcibus(hbus); - wait_for_completion(&hbus->remove_event); - destroy_workqueue(hbus->wq); - free_page((unsigned long)hbus); - return 0; -} - -static const struct hv_vmbus_device_id hv_pci_id_table[] = { - /* PCI Pass-through Class ID */ - /* 44C4F61D-4444-4400-9D52-802E27EDE19F */ - { HV_PCIE_GUID, }, - { }, -}; - -MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table); - -static struct hv_driver hv_pci_drv = { - .name = "hv_pci", - .id_table = hv_pci_id_table, - .probe = hv_pci_probe, - .remove = hv_pci_remove, -}; - -static void __exit exit_hv_pci_drv(void) -{ - vmbus_driver_unregister(&hv_pci_drv); -} - -static int __init init_hv_pci_drv(void) -{ - return vmbus_driver_register(&hv_pci_drv); -} - -module_init(init_hv_pci_drv); -module_exit(exit_hv_pci_drv); - -MODULE_DESCRIPTION("Hyper-V PCI"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c deleted file mode 100644 index 23e270839e6a..000000000000 --- a/drivers/pci/host/pci-mvebu.c +++ /dev/null @@ -1,1313 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe driver for Marvell Armada 370 and Armada XP SoCs - * - * Author: Thomas Petazzoni - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* - * PCIe unit register offsets. - */ -#define PCIE_DEV_ID_OFF 0x0000 -#define PCIE_CMD_OFF 0x0004 -#define PCIE_DEV_REV_OFF 0x0008 -#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) -#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) -#define PCIE_CAP_PCIEXP 0x0060 -#define PCIE_HEADER_LOG_4_OFF 0x0128 -#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) -#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) -#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) -#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) -#define PCIE_WIN5_CTRL_OFF 0x1880 -#define PCIE_WIN5_BASE_OFF 0x1884 -#define PCIE_WIN5_REMAP_OFF 0x188c -#define PCIE_CONF_ADDR_OFF 0x18f8 -#define PCIE_CONF_ADDR_EN 0x80000000 -#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) -#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) -#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) -#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) -#define PCIE_CONF_ADDR(bus, devfn, where) \ - (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ - PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ - PCIE_CONF_ADDR_EN) -#define PCIE_CONF_DATA_OFF 0x18fc -#define PCIE_MASK_OFF 0x1910 -#define PCIE_MASK_ENABLE_INTS 0x0f000000 -#define PCIE_CTRL_OFF 0x1a00 -#define PCIE_CTRL_X1_MODE 0x0001 -#define PCIE_STAT_OFF 0x1a04 -#define PCIE_STAT_BUS 0xff00 -#define PCIE_STAT_DEV 0x1f0000 -#define PCIE_STAT_LINK_DOWN BIT(0) -#define PCIE_RC_RTSTA 0x1a14 -#define PCIE_DEBUG_CTRL 0x1a60 -#define PCIE_DEBUG_SOFT_RESET BIT(20) - -enum { - PCISWCAP = PCI_BRIDGE_CONTROL + 2, - PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID, - PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP, - PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL, - PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP, - PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL, - PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP, - PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL, - PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL, - PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA, - PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2, - PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2, - PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2, - PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2, - PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2, - PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2, -}; - -/* PCI configuration space of a PCI-to-PCI bridge */ -struct mvebu_sw_pci_bridge { - u16 vendor; - u16 device; - u16 command; - u16 status; - u16 class; - u8 interface; - u8 revision; - u8 bist; - u8 header_type; - u8 latency_timer; - u8 cache_line_size; - u32 bar[2]; - u8 primary_bus; - u8 secondary_bus; - u8 subordinate_bus; - u8 secondary_latency_timer; - u8 iobase; - u8 iolimit; - u16 secondary_status; - u16 membase; - u16 memlimit; - u16 iobaseupper; - u16 iolimitupper; - u32 romaddr; - u8 intline; - u8 intpin; - u16 bridgectrl; - - /* PCI express capability */ - u32 pcie_sltcap; - u16 pcie_devctl; - u16 pcie_rtctl; -}; - -struct mvebu_pcie_port; - -/* Structure representing all PCIe interfaces */ -struct mvebu_pcie { - struct platform_device *pdev; - struct mvebu_pcie_port *ports; - struct msi_controller *msi; - struct resource io; - struct resource realio; - struct resource mem; - struct resource busn; - int nports; -}; - -struct mvebu_pcie_window { - phys_addr_t base; - phys_addr_t remap; - size_t size; -}; - -/* Structure representing one PCIe interface */ -struct mvebu_pcie_port { - char *name; - void __iomem *base; - u32 port; - u32 lane; - int devfn; - unsigned int mem_target; - unsigned int mem_attr; - unsigned int io_target; - unsigned int io_attr; - struct clk *clk; - struct gpio_desc *reset_gpio; - char *reset_name; - struct mvebu_sw_pci_bridge bridge; - struct device_node *dn; - struct mvebu_pcie *pcie; - struct mvebu_pcie_window memwin; - struct mvebu_pcie_window iowin; - u32 saved_pcie_stat; -}; - -static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) -{ - writel(val, port->base + reg); -} - -static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) -{ - return readl(port->base + reg); -} - -static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) -{ - return port->io_target != -1 && port->io_attr != -1; -} - -static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) -{ - return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); -} - -static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) -{ - u32 stat; - - stat = mvebu_readl(port, PCIE_STAT_OFF); - stat &= ~PCIE_STAT_BUS; - stat |= nr << 8; - mvebu_writel(port, stat, PCIE_STAT_OFF); -} - -static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) -{ - u32 stat; - - stat = mvebu_readl(port, PCIE_STAT_OFF); - stat &= ~PCIE_STAT_DEV; - stat |= nr << 16; - mvebu_writel(port, stat, PCIE_STAT_OFF); -} - -/* - * Setup PCIE BARs and Address Decode Wins: - * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks - * WIN[0-3] -> DRAM bank[0-3] - */ -static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) -{ - const struct mbus_dram_target_info *dram; - u32 size; - int i; - - dram = mv_mbus_dram_info(); - - /* First, disable and clear BARs and windows. */ - for (i = 1; i < 3; i++) { - mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); - mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); - mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); - } - - for (i = 0; i < 5; i++) { - mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); - mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); - mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); - } - - mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); - mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); - mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); - - /* Setup windows for DDR banks. Count total DDR size on the fly. */ - size = 0; - for (i = 0; i < dram->num_cs; i++) { - const struct mbus_dram_window *cs = dram->cs + i; - - mvebu_writel(port, cs->base & 0xffff0000, - PCIE_WIN04_BASE_OFF(i)); - mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); - mvebu_writel(port, - ((cs->size - 1) & 0xffff0000) | - (cs->mbus_attr << 8) | - (dram->mbus_dram_target_id << 4) | 1, - PCIE_WIN04_CTRL_OFF(i)); - - size += cs->size; - } - - /* Round up 'size' to the nearest power of two. */ - if ((size & (size - 1)) != 0) - size = 1 << fls(size); - - /* Setup BAR[1] to all DRAM banks. */ - mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); - mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); - mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, - PCIE_BAR_CTRL_OFF(1)); -} - -static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) -{ - u32 cmd, mask; - - /* Point PCIe unit MBUS decode windows to DRAM space. */ - mvebu_pcie_setup_wins(port); - - /* Master + slave enable. */ - cmd = mvebu_readl(port, PCIE_CMD_OFF); - cmd |= PCI_COMMAND_IO; - cmd |= PCI_COMMAND_MEMORY; - cmd |= PCI_COMMAND_MASTER; - mvebu_writel(port, cmd, PCIE_CMD_OFF); - - /* Enable interrupt lines A-D. */ - mask = mvebu_readl(port, PCIE_MASK_OFF); - mask |= PCIE_MASK_ENABLE_INTS; - mvebu_writel(port, mask, PCIE_MASK_OFF); -} - -static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, - struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) -{ - void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; - - mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), - PCIE_CONF_ADDR_OFF); - - switch (size) { - case 1: - *val = readb_relaxed(conf_data + (where & 3)); - break; - case 2: - *val = readw_relaxed(conf_data + (where & 2)); - break; - case 4: - *val = readl_relaxed(conf_data); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, - struct pci_bus *bus, - u32 devfn, int where, int size, u32 val) -{ - void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; - - mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), - PCIE_CONF_ADDR_OFF); - - switch (size) { - case 1: - writeb(val, conf_data + (where & 3)); - break; - case 2: - writew(val, conf_data + (where & 2)); - break; - case 4: - writel(val, conf_data); - break; - default: - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - return PCIBIOS_SUCCESSFUL; -} - -/* - * Remove windows, starting from the largest ones to the smallest - * ones. - */ -static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, - phys_addr_t base, size_t size) -{ - while (size) { - size_t sz = 1 << (fls(size) - 1); - - mvebu_mbus_del_window(base, sz); - base += sz; - size -= sz; - } -} - -/* - * MBus windows can only have a power of two size, but PCI BARs do not - * have this constraint. Therefore, we have to split the PCI BAR into - * areas each having a power of two size. We start from the largest - * one (i.e highest order bit set in the size). - */ -static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, - unsigned int target, unsigned int attribute, - phys_addr_t base, size_t size, - phys_addr_t remap) -{ - size_t size_mapped = 0; - - while (size) { - size_t sz = 1 << (fls(size) - 1); - int ret; - - ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, - sz, remap); - if (ret) { - phys_addr_t end = base + sz - 1; - - dev_err(&port->pcie->pdev->dev, - "Could not create MBus window at [mem %pa-%pa]: %d\n", - &base, &end, ret); - mvebu_pcie_del_windows(port, base - size_mapped, - size_mapped); - return; - } - - size -= sz; - size_mapped += sz; - base += sz; - if (remap != MVEBU_MBUS_NO_REMAP) - remap += sz; - } -} - -static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, - unsigned int target, unsigned int attribute, - const struct mvebu_pcie_window *desired, - struct mvebu_pcie_window *cur) -{ - if (desired->base == cur->base && desired->remap == cur->remap && - desired->size == cur->size) - return; - - if (cur->size != 0) { - mvebu_pcie_del_windows(port, cur->base, cur->size); - cur->size = 0; - cur->base = 0; - - /* - * If something tries to change the window while it is enabled - * the change will not be done atomically. That would be - * difficult to do in the general case. - */ - } - - if (desired->size == 0) - return; - - mvebu_pcie_add_windows(port, target, attribute, desired->base, - desired->size, desired->remap); - *cur = *desired; -} - -static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) -{ - struct mvebu_pcie_window desired = {}; - - /* Are the new iobase/iolimit values invalid? */ - if (port->bridge.iolimit < port->bridge.iobase || - port->bridge.iolimitupper < port->bridge.iobaseupper || - !(port->bridge.command & PCI_COMMAND_IO)) { - mvebu_pcie_set_window(port, port->io_target, port->io_attr, - &desired, &port->iowin); - return; - } - - if (!mvebu_has_ioport(port)) { - dev_WARN(&port->pcie->pdev->dev, - "Attempt to set IO when IO is disabled\n"); - return; - } - - /* - * We read the PCI-to-PCI bridge emulated registers, and - * calculate the base address and size of the address decoding - * window to setup, according to the PCI-to-PCI bridge - * specifications. iobase is the bus address, port->iowin_base - * is the CPU address. - */ - desired.remap = ((port->bridge.iobase & 0xF0) << 8) | - (port->bridge.iobaseupper << 16); - desired.base = port->pcie->io.start + desired.remap; - desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | - (port->bridge.iolimitupper << 16)) - - desired.remap) + - 1; - - mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, - &port->iowin); -} - -static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) -{ - struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; - - /* Are the new membase/memlimit values invalid? */ - if (port->bridge.memlimit < port->bridge.membase || - !(port->bridge.command & PCI_COMMAND_MEMORY)) { - mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, - &desired, &port->memwin); - return; - } - - /* - * We read the PCI-to-PCI bridge emulated registers, and - * calculate the base address and size of the address decoding - * window to setup, according to the PCI-to-PCI bridge - * specifications. - */ - desired.base = ((port->bridge.membase & 0xFFF0) << 16); - desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - - desired.base + 1; - - mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, - &port->memwin); -} - -/* - * Initialize the configuration space of the PCI-to-PCI bridge - * associated with the given PCIe interface. - */ -static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port) -{ - struct mvebu_sw_pci_bridge *bridge = &port->bridge; - - memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge)); - - bridge->class = PCI_CLASS_BRIDGE_PCI; - bridge->vendor = PCI_VENDOR_ID_MARVELL; - bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; - bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; - bridge->header_type = PCI_HEADER_TYPE_BRIDGE; - bridge->cache_line_size = 0x10; - - /* We support 32 bits I/O addressing */ - bridge->iobase = PCI_IO_RANGE_TYPE_32; - bridge->iolimit = PCI_IO_RANGE_TYPE_32; - - /* Add capabilities */ - bridge->status = PCI_STATUS_CAP_LIST; -} - -/* - * Read the configuration space of the PCI-to-PCI bridge associated to - * the given PCIe interface. - */ -static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, - unsigned int where, int size, u32 *value) -{ - struct mvebu_sw_pci_bridge *bridge = &port->bridge; - - switch (where & ~3) { - case PCI_VENDOR_ID: - *value = bridge->device << 16 | bridge->vendor; - break; - - case PCI_COMMAND: - *value = bridge->command | bridge->status << 16; - break; - - case PCI_CLASS_REVISION: - *value = bridge->class << 16 | bridge->interface << 8 | - bridge->revision; - break; - - case PCI_CACHE_LINE_SIZE: - *value = bridge->bist << 24 | bridge->header_type << 16 | - bridge->latency_timer << 8 | bridge->cache_line_size; - break; - - case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: - *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4]; - break; - - case PCI_PRIMARY_BUS: - *value = (bridge->secondary_latency_timer << 24 | - bridge->subordinate_bus << 16 | - bridge->secondary_bus << 8 | - bridge->primary_bus); - break; - - case PCI_IO_BASE: - if (!mvebu_has_ioport(port)) - *value = bridge->secondary_status << 16; - else - *value = (bridge->secondary_status << 16 | - bridge->iolimit << 8 | - bridge->iobase); - break; - - case PCI_MEMORY_BASE: - *value = (bridge->memlimit << 16 | bridge->membase); - break; - - case PCI_PREF_MEMORY_BASE: - *value = 0; - break; - - case PCI_IO_BASE_UPPER16: - *value = (bridge->iolimitupper << 16 | bridge->iobaseupper); - break; - - case PCI_CAPABILITY_LIST: - *value = PCISWCAP; - break; - - case PCI_ROM_ADDRESS1: - *value = 0; - break; - - case PCI_INTERRUPT_LINE: - /* LINE PIN MIN_GNT MAX_LAT */ - *value = 0; - break; - - case PCISWCAP_EXP_LIST_ID: - /* Set PCIe v2, root port, slot support */ - *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | - PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP; - break; - - case PCISWCAP_EXP_DEVCAP: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); - break; - - case PCISWCAP_EXP_DEVCTL: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & - ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - *value |= bridge->pcie_devctl; - break; - - case PCISWCAP_EXP_LNKCAP: - /* - * PCIe requires the clock power management capability to be - * hard-wired to zero for downstream ports - */ - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & - ~PCI_EXP_LNKCAP_CLKPM; - break; - - case PCISWCAP_EXP_LNKCTL: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); - break; - - case PCISWCAP_EXP_SLTCAP: - *value = bridge->pcie_sltcap; - break; - - case PCISWCAP_EXP_SLTCTL: - *value = PCI_EXP_SLTSTA_PDS << 16; - break; - - case PCISWCAP_EXP_RTCTL: - *value = bridge->pcie_rtctl; - break; - - case PCISWCAP_EXP_RTSTA: - *value = mvebu_readl(port, PCIE_RC_RTSTA); - break; - - /* PCIe requires the v2 fields to be hard-wired to zero */ - case PCISWCAP_EXP_DEVCAP2: - case PCISWCAP_EXP_DEVCTL2: - case PCISWCAP_EXP_LNKCAP2: - case PCISWCAP_EXP_LNKCTL2: - case PCISWCAP_EXP_SLTCAP2: - case PCISWCAP_EXP_SLTCTL2: - default: - /* - * PCI defines configuration read accesses to reserved or - * unimplemented registers to read as zero and complete - * normally. - */ - *value = 0; - return PCIBIOS_SUCCESSFUL; - } - - if (size == 2) - *value = (*value >> (8 * (where & 3))) & 0xffff; - else if (size == 1) - *value = (*value >> (8 * (where & 3))) & 0xff; - - return PCIBIOS_SUCCESSFUL; -} - -/* Write to the PCI-to-PCI bridge configuration space */ -static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, - unsigned int where, int size, u32 value) -{ - struct mvebu_sw_pci_bridge *bridge = &port->bridge; - u32 mask, reg; - int err; - - if (size == 4) - mask = 0x0; - else if (size == 2) - mask = ~(0xffff << ((where & 3) * 8)); - else if (size == 1) - mask = ~(0xff << ((where & 3) * 8)); - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, ®); - if (err) - return err; - - value = (reg & mask) | value << ((where & 3) * 8); - - switch (where & ~3) { - case PCI_COMMAND: - { - u32 old = bridge->command; - - if (!mvebu_has_ioport(port)) - value &= ~PCI_COMMAND_IO; - - bridge->command = value & 0xffff; - if ((old ^ bridge->command) & PCI_COMMAND_IO) - mvebu_pcie_handle_iobase_change(port); - if ((old ^ bridge->command) & PCI_COMMAND_MEMORY) - mvebu_pcie_handle_membase_change(port); - break; - } - - case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: - bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; - break; - - case PCI_IO_BASE: - /* - * We also keep bit 1 set, it is a read-only bit that - * indicates we support 32 bits addressing for the - * I/O - */ - bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; - bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; - mvebu_pcie_handle_iobase_change(port); - break; - - case PCI_MEMORY_BASE: - bridge->membase = value & 0xffff; - bridge->memlimit = value >> 16; - mvebu_pcie_handle_membase_change(port); - break; - - case PCI_IO_BASE_UPPER16: - bridge->iobaseupper = value & 0xffff; - bridge->iolimitupper = value >> 16; - mvebu_pcie_handle_iobase_change(port); - break; - - case PCI_PRIMARY_BUS: - bridge->primary_bus = value & 0xff; - bridge->secondary_bus = (value >> 8) & 0xff; - bridge->subordinate_bus = (value >> 16) & 0xff; - bridge->secondary_latency_timer = (value >> 24) & 0xff; - mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus); - break; - - case PCISWCAP_EXP_DEVCTL: - /* - * Armada370 data says these bits must always - * be zero when in root complex mode. - */ - value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - - /* - * If the mask is 0xffff0000, then we only want to write - * the device control register, rather than clearing the - * RW1C bits in the device status register. Mask out the - * status register bits. - */ - if (mask == 0xffff0000) - value &= 0xffff; - - mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); - break; - - case PCISWCAP_EXP_LNKCTL: - /* - * If we don't support CLKREQ, we must ensure that the - * CLKREQ enable bit always reads zero. Since we haven't - * had this capability, and it's dependent on board wiring, - * disable it for the time being. - */ - value &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - - /* - * If the mask is 0xffff0000, then we only want to write - * the link control register, rather than clearing the - * RW1C bits in the link status register. Mask out the - * RW1C status register bits. - */ - if (mask == 0xffff0000) - value &= ~((PCI_EXP_LNKSTA_LABS | - PCI_EXP_LNKSTA_LBMS) << 16); - - mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); - break; - - case PCISWCAP_EXP_RTSTA: - mvebu_writel(port, value, PCIE_RC_RTSTA); - break; - - default: - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) -{ - return sys->private_data; -} - -static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, - struct pci_bus *bus, - int devfn) -{ - int i; - - for (i = 0; i < pcie->nports; i++) { - struct mvebu_pcie_port *port = &pcie->ports[i]; - - if (bus->number == 0 && port->devfn == devfn) - return port; - if (bus->number != 0 && - bus->number >= port->bridge.secondary_bus && - bus->number <= port->bridge.subordinate_bus) - return port; - } - - return NULL; -} - -/* PCI configuration space write function */ -static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); - struct mvebu_pcie_port *port; - int ret; - - port = mvebu_pcie_find_port(pcie, bus, devfn); - if (!port) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Access the emulated PCI-to-PCI bridge */ - if (bus->number == 0) - return mvebu_sw_pci_bridge_write(port, where, size, val); - - if (!mvebu_pcie_link_up(port)) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Access the real PCIe interface */ - ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, - where, size, val); - - return ret; -} - -/* PCI configuration space read function */ -static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) -{ - struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); - struct mvebu_pcie_port *port; - int ret; - - port = mvebu_pcie_find_port(pcie, bus, devfn); - if (!port) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - /* Access the emulated PCI-to-PCI bridge */ - if (bus->number == 0) - return mvebu_sw_pci_bridge_read(port, where, size, val); - - if (!mvebu_pcie_link_up(port)) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - /* Access the real PCIe interface */ - ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, - where, size, val); - - return ret; -} - -static struct pci_ops mvebu_pcie_ops = { - .read = mvebu_pcie_rd_conf, - .write = mvebu_pcie_wr_conf, -}; - -static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) -{ - struct mvebu_pcie *pcie = sys_to_pcie(sys); - int err, i; - - pcie->mem.name = "PCI MEM"; - pcie->realio.name = "PCI I/O"; - - if (resource_size(&pcie->realio) != 0) - pci_add_resource_offset(&sys->resources, &pcie->realio, - sys->io_offset); - - pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); - pci_add_resource(&sys->resources, &pcie->busn); - - err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources); - if (err) - return 0; - - for (i = 0; i < pcie->nports; i++) { - struct mvebu_pcie_port *port = &pcie->ports[i]; - - if (!port->base) - continue; - mvebu_pcie_setup_hw(port); - } - - return 1; -} - -static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, - const struct resource *res, - resource_size_t start, - resource_size_t size, - resource_size_t align) -{ - if (dev->bus->number != 0) - return start; - - /* - * On the PCI-to-PCI bridge side, the I/O windows must have at - * least a 64 KB size and the memory windows must have at - * least a 1 MB size. Moreover, MBus windows need to have a - * base address aligned on their size, and their size must be - * a power of two. This means that if the BAR doesn't have a - * power of two size, several MBus windows will actually be - * created. We need to ensure that the biggest MBus window - * (which will be the first one) is aligned on its size, which - * explains the rounddown_pow_of_two() being done here. - */ - if (res->flags & IORESOURCE_IO) - return round_up(start, max_t(resource_size_t, SZ_64K, - rounddown_pow_of_two(size))); - else if (res->flags & IORESOURCE_MEM) - return round_up(start, max_t(resource_size_t, SZ_1M, - rounddown_pow_of_two(size))); - else - return start; -} - -static void mvebu_pcie_enable(struct mvebu_pcie *pcie) -{ - struct hw_pci hw; - - memset(&hw, 0, sizeof(hw)); - -#ifdef CONFIG_PCI_MSI - hw.msi_ctrl = pcie->msi; -#endif - - hw.nr_controllers = 1; - hw.private_data = (void **)&pcie; - hw.setup = mvebu_pcie_setup; - hw.map_irq = of_irq_parse_and_map_pci; - hw.ops = &mvebu_pcie_ops; - hw.align_resource = mvebu_pcie_align_resource; - - pci_common_init_dev(&pcie->pdev->dev, &hw); -} - -/* - * Looks up the list of register addresses encoded into the reg = - * <...> property for one that matches the given port/lane. Once - * found, maps it. - */ -static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, - struct device_node *np, - struct mvebu_pcie_port *port) -{ - struct resource regs; - int ret = 0; - - ret = of_address_to_resource(np, 0, ®s); - if (ret) - return ERR_PTR(ret); - - return devm_ioremap_resource(&pdev->dev, ®s); -} - -#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) -#define DT_TYPE_IO 0x1 -#define DT_TYPE_MEM32 0x2 -#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) -#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) - -static int mvebu_get_tgt_attr(struct device_node *np, int devfn, - unsigned long type, - unsigned int *tgt, - unsigned int *attr) -{ - const int na = 3, ns = 2; - const __be32 *range; - int rlen, nranges, rangesz, pna, i; - - *tgt = -1; - *attr = -1; - - range = of_get_property(np, "ranges", &rlen); - if (!range) - return -EINVAL; - - pna = of_n_addr_cells(np); - rangesz = pna + na + ns; - nranges = rlen / sizeof(__be32) / rangesz; - - for (i = 0; i < nranges; i++, range += rangesz) { - u32 flags = of_read_number(range, 1); - u32 slot = of_read_number(range + 1, 1); - u64 cpuaddr = of_read_number(range + na, pna); - unsigned long rtype; - - if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) - rtype = IORESOURCE_IO; - else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) - rtype = IORESOURCE_MEM; - else - continue; - - if (slot == PCI_SLOT(devfn) && type == rtype) { - *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); - *attr = DT_CPUADDR_TO_ATTR(cpuaddr); - return 0; - } - } - - return -ENOENT; -} - -#ifdef CONFIG_PM_SLEEP -static int mvebu_pcie_suspend(struct device *dev) -{ - struct mvebu_pcie *pcie; - int i; - - pcie = dev_get_drvdata(dev); - for (i = 0; i < pcie->nports; i++) { - struct mvebu_pcie_port *port = pcie->ports + i; - port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); - } - - return 0; -} - -static int mvebu_pcie_resume(struct device *dev) -{ - struct mvebu_pcie *pcie; - int i; - - pcie = dev_get_drvdata(dev); - for (i = 0; i < pcie->nports; i++) { - struct mvebu_pcie_port *port = pcie->ports + i; - mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); - mvebu_pcie_setup_hw(port); - } - - return 0; -} -#endif - -static void mvebu_pcie_port_clk_put(void *data) -{ - struct mvebu_pcie_port *port = data; - - clk_put(port->clk); -} - -static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, - struct mvebu_pcie_port *port, struct device_node *child) -{ - struct device *dev = &pcie->pdev->dev; - enum of_gpio_flags flags; - int reset_gpio, ret; - - port->pcie = pcie; - - if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { - dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", - child); - goto skip; - } - - if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) - port->lane = 0; - - port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, - port->lane); - if (!port->name) { - ret = -ENOMEM; - goto err; - } - - port->devfn = of_pci_get_devfn(child); - if (port->devfn < 0) - goto skip; - - ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, - &port->mem_target, &port->mem_attr); - if (ret < 0) { - dev_err(dev, "%s: cannot get tgt/attr for mem window\n", - port->name); - goto skip; - } - - if (resource_size(&pcie->io) != 0) { - mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, - &port->io_target, &port->io_attr); - } else { - port->io_target = -1; - port->io_attr = -1; - } - - reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); - if (reset_gpio == -EPROBE_DEFER) { - ret = reset_gpio; - goto err; - } - - if (gpio_is_valid(reset_gpio)) { - unsigned long gpio_flags; - - port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", - port->name); - if (!port->reset_name) { - ret = -ENOMEM; - goto err; - } - - if (flags & OF_GPIO_ACTIVE_LOW) { - dev_info(dev, "%pOF: reset gpio is active low\n", - child); - gpio_flags = GPIOF_ACTIVE_LOW | - GPIOF_OUT_INIT_LOW; - } else { - gpio_flags = GPIOF_OUT_INIT_HIGH; - } - - ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, - port->reset_name); - if (ret) { - if (ret == -EPROBE_DEFER) - goto err; - goto skip; - } - - port->reset_gpio = gpio_to_desc(reset_gpio); - } - - port->clk = of_clk_get_by_name(child, NULL); - if (IS_ERR(port->clk)) { - dev_err(dev, "%s: cannot get clock\n", port->name); - goto skip; - } - - ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); - if (ret < 0) { - clk_put(port->clk); - goto err; - } - - return 1; - -skip: - ret = 0; - - /* In the case of skipping, we need to free these */ - devm_kfree(dev, port->reset_name); - port->reset_name = NULL; - devm_kfree(dev, port->name); - port->name = NULL; - -err: - return ret; -} - -/* - * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs - * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications - * of the PCI Express Card Electromechanical Specification, 1.1. - */ -static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) -{ - int ret; - - ret = clk_prepare_enable(port->clk); - if (ret < 0) - return ret; - - if (port->reset_gpio) { - u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000; - - of_property_read_u32(port->dn, "reset-delay-us", - &reset_udelay); - - udelay(100); - - gpiod_set_value_cansleep(port->reset_gpio, 0); - msleep(reset_udelay / 1000); - } - - return 0; -} - -/* - * Power down a PCIe port. Strictly, PCIe requires us to place the card - * in D3hot state before asserting PERST#. - */ -static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) -{ - gpiod_set_value_cansleep(port->reset_gpio, 1); - - clk_disable_unprepare(port->clk); -} - -static int mvebu_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct mvebu_pcie *pcie; - struct device_node *np = dev->of_node; - struct device_node *child; - int num, i, ret; - - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pcie->pdev = pdev; - platform_set_drvdata(pdev, pcie); - - /* Get the PCIe memory and I/O aperture */ - mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); - if (resource_size(&pcie->mem) == 0) { - dev_err(dev, "invalid memory aperture size\n"); - return -EINVAL; - } - - mvebu_mbus_get_pcie_io_aperture(&pcie->io); - - if (resource_size(&pcie->io) != 0) { - pcie->realio.flags = pcie->io.flags; - pcie->realio.start = PCIBIOS_MIN_IO; - pcie->realio.end = min_t(resource_size_t, - IO_SPACE_LIMIT, - resource_size(&pcie->io)); - } else - pcie->realio = pcie->io; - - /* Get the bus range */ - ret = of_pci_parse_bus_range(np, &pcie->busn); - if (ret) { - dev_err(dev, "failed to parse bus-range property: %d\n", ret); - return ret; - } - - num = of_get_available_child_count(np); - - pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); - if (!pcie->ports) - return -ENOMEM; - - i = 0; - for_each_available_child_of_node(np, child) { - struct mvebu_pcie_port *port = &pcie->ports[i]; - - ret = mvebu_pcie_parse_port(pcie, port, child); - if (ret < 0) { - of_node_put(child); - return ret; - } else if (ret == 0) { - continue; - } - - port->dn = child; - i++; - } - pcie->nports = i; - - for (i = 0; i < pcie->nports; i++) { - struct mvebu_pcie_port *port = &pcie->ports[i]; - - child = port->dn; - if (!child) - continue; - - ret = mvebu_pcie_powerup(port); - if (ret < 0) - continue; - - port->base = mvebu_pcie_map_registers(pdev, child, port); - if (IS_ERR(port->base)) { - dev_err(dev, "%s: cannot map registers\n", port->name); - port->base = NULL; - mvebu_pcie_powerdown(port); - continue; - } - - mvebu_pcie_set_local_dev_nr(port, 1); - mvebu_sw_pci_bridge_init(port); - } - - pcie->nports = i; - - for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) - pci_ioremap_io(i, pcie->io.start + i); - - mvebu_pcie_enable(pcie); - - platform_set_drvdata(pdev, pcie); - - return 0; -} - -static const struct of_device_id mvebu_pcie_of_match_table[] = { - { .compatible = "marvell,armada-xp-pcie", }, - { .compatible = "marvell,armada-370-pcie", }, - { .compatible = "marvell,dove-pcie", }, - { .compatible = "marvell,kirkwood-pcie", }, - {}, -}; - -static const struct dev_pm_ops mvebu_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) -}; - -static struct platform_driver mvebu_pcie_driver = { - .driver = { - .name = "mvebu-pcie", - .of_match_table = mvebu_pcie_of_match_table, - /* driver unloading/unbinding currently not supported */ - .suppress_bind_attrs = true, - .pm = &mvebu_pcie_pm_ops, - }, - .probe = mvebu_pcie_probe, -}; -builtin_platform_driver(mvebu_pcie_driver); diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c deleted file mode 100644 index 326171cb1a97..000000000000 --- a/drivers/pci/host/pci-rcar-gen2.c +++ /dev/null @@ -1,428 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * pci-rcar-gen2: internal PCI bus support - * - * Copyright (C) 2013 Renesas Solutions Corp. - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * Author: Valentine Barshak - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* AHB-PCI Bridge PCI communication registers */ -#define RCAR_AHBPCI_PCICOM_OFFSET 0x800 - -#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00) -#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04) -#define RCAR_PCIAHB_PREFETCH0 0x0 -#define RCAR_PCIAHB_PREFETCH4 0x1 -#define RCAR_PCIAHB_PREFETCH8 0x2 -#define RCAR_PCIAHB_PREFETCH16 0x3 - -#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10) -#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14) -#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1) -#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1) -#define RCAR_AHBPCI_WIN1_HOST (1 << 30) -#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31) - -#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20) -#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24) -#define RCAR_PCI_INT_SIGTABORT (1 << 0) -#define RCAR_PCI_INT_SIGRETABORT (1 << 1) -#define RCAR_PCI_INT_REMABORT (1 << 2) -#define RCAR_PCI_INT_PERR (1 << 3) -#define RCAR_PCI_INT_SIGSERR (1 << 4) -#define RCAR_PCI_INT_RESERR (1 << 5) -#define RCAR_PCI_INT_WIN1ERR (1 << 12) -#define RCAR_PCI_INT_WIN2ERR (1 << 13) -#define RCAR_PCI_INT_A (1 << 16) -#define RCAR_PCI_INT_B (1 << 17) -#define RCAR_PCI_INT_PME (1 << 19) -#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \ - RCAR_PCI_INT_SIGRETABORT | \ - RCAR_PCI_INT_REMABORT | \ - RCAR_PCI_INT_PERR | \ - RCAR_PCI_INT_SIGSERR | \ - RCAR_PCI_INT_RESERR | \ - RCAR_PCI_INT_WIN1ERR | \ - RCAR_PCI_INT_WIN2ERR) - -#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30) -#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0) -#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1) -#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2) -#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7) -#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17) -#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \ - RCAR_AHB_BUS_MMODE_BYTE_BURST | \ - RCAR_AHB_BUS_MMODE_WR_INCR | \ - RCAR_AHB_BUS_MMODE_HBUS_REQ | \ - RCAR_AHB_BUS_SMODE_READYCTR) - -#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34) -#define RCAR_USBCTR_USBH_RST (1 << 0) -#define RCAR_USBCTR_PCICLK_MASK (1 << 1) -#define RCAR_USBCTR_PLL_RST (1 << 2) -#define RCAR_USBCTR_DIRPD (1 << 8) -#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9) -#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10) -#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10) -#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10) -#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10) -#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10) - -#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40) -#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0) -#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1) -#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12) - -#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) - -struct rcar_pci_priv { - struct device *dev; - void __iomem *reg; - struct resource mem_res; - struct resource *cfg_res; - unsigned busnr; - int irq; - unsigned long window_size; - unsigned long window_addr; - unsigned long window_pci; -}; - -/* PCI configuration space operations */ -static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, - int where) -{ - struct pci_sys_data *sys = bus->sysdata; - struct rcar_pci_priv *priv = sys->private_data; - int slot, val; - - if (sys->busnr != bus->number || PCI_FUNC(devfn)) - return NULL; - - /* Only one EHCI/OHCI device built-in */ - slot = PCI_SLOT(devfn); - if (slot > 2) - return NULL; - - /* bridge logic only has registers to 0x40 */ - if (slot == 0x0 && where >= 0x40) - return NULL; - - val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG : - RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG; - - iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG); - return priv->reg + (slot >> 1) * 0x100 + where; -} - -/* PCI interrupt mapping */ -static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - struct pci_sys_data *sys = dev->bus->sysdata; - struct rcar_pci_priv *priv = sys->private_data; - int irq; - - irq = of_irq_parse_and_map_pci(dev, slot, pin); - if (!irq) - irq = priv->irq; - - return irq; -} - -#ifdef CONFIG_PCI_DEBUG -/* if debug enabled, then attach an error handler irq to the bridge */ - -static irqreturn_t rcar_pci_err_irq(int irq, void *pw) -{ - struct rcar_pci_priv *priv = pw; - struct device *dev = priv->dev; - u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG); - - if (status & RCAR_PCI_INT_ALLERRORS) { - dev_err(dev, "error irq: status %08x\n", status); - - /* clear the error(s) */ - iowrite32(status & RCAR_PCI_INT_ALLERRORS, - priv->reg + RCAR_PCI_INT_STATUS_REG); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) -{ - struct device *dev = priv->dev; - int ret; - u32 val; - - ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq, - IRQF_SHARED, "error irq", priv); - if (ret) { - dev_err(dev, "cannot claim IRQ for error handling\n"); - return; - } - - val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG); - val |= RCAR_PCI_INT_ALLERRORS; - iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG); -} -#else -static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } -#endif - -/* PCI host controller setup */ -static int rcar_pci_setup(int nr, struct pci_sys_data *sys) -{ - struct rcar_pci_priv *priv = sys->private_data; - struct device *dev = priv->dev; - void __iomem *reg = priv->reg; - u32 val; - int ret; - - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - - val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); - dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val); - - /* Disable Direct Power Down State and assert reset */ - val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; - val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST; - iowrite32(val, reg + RCAR_USBCTR_REG); - udelay(4); - - /* De-assert reset and reset PCIAHB window1 size */ - val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK | - RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); - - /* Setup PCIAHB window1 size */ - switch (priv->window_size) { - case SZ_2G: - val |= RCAR_USBCTR_PCIAHB_WIN1_2G; - break; - case SZ_1G: - val |= RCAR_USBCTR_PCIAHB_WIN1_1G; - break; - case SZ_512M: - val |= RCAR_USBCTR_PCIAHB_WIN1_512M; - break; - default: - pr_warn("unknown window size %ld - defaulting to 256M\n", - priv->window_size); - priv->window_size = SZ_256M; - /* fall-through */ - case SZ_256M: - val |= RCAR_USBCTR_PCIAHB_WIN1_256M; - break; - } - iowrite32(val, reg + RCAR_USBCTR_REG); - - /* Configure AHB master and slave modes */ - iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG); - - /* Configure PCI arbiter */ - val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG); - val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 | - RCAR_PCI_ARBITER_PCIBP_MODE; - iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); - - /* PCI-AHB mapping */ - iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16, - reg + RCAR_PCIAHB_WIN1_CTR_REG); - - /* AHB-PCI mapping: OHCI/EHCI registers */ - val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM; - iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG); - - /* Enable AHB-PCI bridge PCI configuration access */ - iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, - reg + RCAR_AHBPCI_WIN1_CTR_REG); - /* Set PCI-AHB Window1 address */ - iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, - reg + PCI_BASE_ADDRESS_1); - /* Set AHB-PCI bridge PCI communication area address */ - val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; - iowrite32(val, reg + PCI_BASE_ADDRESS_0); - - val = ioread32(reg + PCI_COMMAND); - val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | - PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; - iowrite32(val, reg + PCI_COMMAND); - - /* Enable PCI interrupts */ - iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, - reg + RCAR_PCI_INT_ENABLE_REG); - - if (priv->irq > 0) - rcar_pci_setup_errirq(priv); - - /* Add PCI resources */ - pci_add_resource(&sys->resources, &priv->mem_res); - ret = devm_request_pci_bus_resources(dev, &sys->resources); - if (ret < 0) - return ret; - - /* Setup bus number based on platform device id / of bus-range */ - sys->busnr = priv->busnr; - return 1; -} - -static struct pci_ops rcar_pci_ops = { - .map_bus = rcar_pci_cfg_base, - .read = pci_generic_config_read, - .write = pci_generic_config_write, -}; - -static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, - struct device_node *np) -{ - struct device *dev = pci->dev; - struct of_pci_range range; - struct of_pci_range_parser parser; - int index = 0; - - /* Failure to parse is ok as we fall back to defaults */ - if (of_pci_dma_range_parser_init(&parser, np)) - return 0; - - /* Get the dma-ranges from DT */ - for_each_of_pci_range(&parser, &range) { - /* Hardware only allows one inbound 32-bit range */ - if (index) - return -EINVAL; - - pci->window_addr = (unsigned long)range.cpu_addr; - pci->window_pci = (unsigned long)range.pci_addr; - pci->window_size = (unsigned long)range.size; - - /* Catch HW limitations */ - if (!(range.flags & IORESOURCE_PREFETCH)) { - dev_err(dev, "window must be prefetchable\n"); - return -EINVAL; - } - if (pci->window_addr) { - u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); - - if (lowaddr < pci->window_size) { - dev_err(dev, "invalid window size/addr\n"); - return -EINVAL; - } - } - index++; - } - - return 0; -} - -static int rcar_pci_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct resource *cfg_res, *mem_res; - struct rcar_pci_priv *priv; - void __iomem *reg; - struct hw_pci hw; - void *hw_private[1]; - - cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(dev, cfg_res); - if (IS_ERR(reg)) - return PTR_ERR(reg); - - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!mem_res || !mem_res->start) - return -ENODEV; - - if (mem_res->start & 0xFFFF) - return -EINVAL; - - priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->mem_res = *mem_res; - priv->cfg_res = cfg_res; - - priv->irq = platform_get_irq(pdev, 0); - priv->reg = reg; - priv->dev = dev; - - if (priv->irq < 0) { - dev_err(dev, "no valid irq found\n"); - return priv->irq; - } - - /* default window addr and size if not specified in DT */ - priv->window_addr = 0x40000000; - priv->window_pci = 0x40000000; - priv->window_size = SZ_1G; - - if (dev->of_node) { - struct resource busnr; - int ret; - - ret = of_pci_parse_bus_range(dev->of_node, &busnr); - if (ret < 0) { - dev_err(dev, "failed to parse bus-range\n"); - return ret; - } - - priv->busnr = busnr.start; - if (busnr.end != busnr.start) - dev_warn(dev, "only one bus number supported\n"); - - ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node); - if (ret < 0) { - dev_err(dev, "failed to parse dma-range\n"); - return ret; - } - } else { - priv->busnr = pdev->id; - } - - hw_private[0] = priv; - memset(&hw, 0, sizeof(hw)); - hw.nr_controllers = ARRAY_SIZE(hw_private); - hw.io_optional = 1; - hw.private_data = hw_private; - hw.map_irq = rcar_pci_map_irq; - hw.ops = &rcar_pci_ops; - hw.setup = rcar_pci_setup; - pci_common_init_dev(dev, &hw); - return 0; -} - -static const struct of_device_id rcar_pci_of_match[] = { - { .compatible = "renesas,pci-r8a7790", }, - { .compatible = "renesas,pci-r8a7791", }, - { .compatible = "renesas,pci-r8a7794", }, - { .compatible = "renesas,pci-rcar-gen2", }, - { }, -}; - -static struct platform_driver rcar_pci_driver = { - .driver = { - .name = "pci-rcar-gen2", - .suppress_bind_attrs = true, - .of_match_table = rcar_pci_of_match, - }, - .probe = rcar_pci_probe, -}; -builtin_platform_driver(rcar_pci_driver); diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c deleted file mode 100644 index f4f53d092e00..000000000000 --- a/drivers/pci/host/pci-tegra.c +++ /dev/null @@ -1,2531 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * PCIe host controller driver for Tegra SoCs - * - * Copyright (c) 2010, CompuLab, Ltd. - * Author: Mike Rapoport - * - * Based on NVIDIA PCIe driver - * Copyright (c) 2008-2009, NVIDIA Corporation. - * - * Bits taken from arch/arm/mach-dove/pcie.c - * - * Author: Thierry Reding - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "../pci.h" - -#define INT_PCI_MSI_NR (8 * 32) - -/* register definitions */ - -#define AFI_AXI_BAR0_SZ 0x00 -#define AFI_AXI_BAR1_SZ 0x04 -#define AFI_AXI_BAR2_SZ 0x08 -#define AFI_AXI_BAR3_SZ 0x0c -#define AFI_AXI_BAR4_SZ 0x10 -#define AFI_AXI_BAR5_SZ 0x14 - -#define AFI_AXI_BAR0_START 0x18 -#define AFI_AXI_BAR1_START 0x1c -#define AFI_AXI_BAR2_START 0x20 -#define AFI_AXI_BAR3_START 0x24 -#define AFI_AXI_BAR4_START 0x28 -#define AFI_AXI_BAR5_START 0x2c - -#define AFI_FPCI_BAR0 0x30 -#define AFI_FPCI_BAR1 0x34 -#define AFI_FPCI_BAR2 0x38 -#define AFI_FPCI_BAR3 0x3c -#define AFI_FPCI_BAR4 0x40 -#define AFI_FPCI_BAR5 0x44 - -#define AFI_CACHE_BAR0_SZ 0x48 -#define AFI_CACHE_BAR0_ST 0x4c -#define AFI_CACHE_BAR1_SZ 0x50 -#define AFI_CACHE_BAR1_ST 0x54 - -#define AFI_MSI_BAR_SZ 0x60 -#define AFI_MSI_FPCI_BAR_ST 0x64 -#define AFI_MSI_AXI_BAR_ST 0x68 - -#define AFI_MSI_VEC0 0x6c -#define AFI_MSI_VEC1 0x70 -#define AFI_MSI_VEC2 0x74 -#define AFI_MSI_VEC3 0x78 -#define AFI_MSI_VEC4 0x7c -#define AFI_MSI_VEC5 0x80 -#define AFI_MSI_VEC6 0x84 -#define AFI_MSI_VEC7 0x88 - -#define AFI_MSI_EN_VEC0 0x8c -#define AFI_MSI_EN_VEC1 0x90 -#define AFI_MSI_EN_VEC2 0x94 -#define AFI_MSI_EN_VEC3 0x98 -#define AFI_MSI_EN_VEC4 0x9c -#define AFI_MSI_EN_VEC5 0xa0 -#define AFI_MSI_EN_VEC6 0xa4 -#define AFI_MSI_EN_VEC7 0xa8 - -#define AFI_CONFIGURATION 0xac -#define AFI_CONFIGURATION_EN_FPCI (1 << 0) - -#define AFI_FPCI_ERROR_MASKS 0xb0 - -#define AFI_INTR_MASK 0xb4 -#define AFI_INTR_MASK_INT_MASK (1 << 0) -#define AFI_INTR_MASK_MSI_MASK (1 << 8) - -#define AFI_INTR_CODE 0xb8 -#define AFI_INTR_CODE_MASK 0xf -#define AFI_INTR_INI_SLAVE_ERROR 1 -#define AFI_INTR_INI_DECODE_ERROR 2 -#define AFI_INTR_TARGET_ABORT 3 -#define AFI_INTR_MASTER_ABORT 4 -#define AFI_INTR_INVALID_WRITE 5 -#define AFI_INTR_LEGACY 6 -#define AFI_INTR_FPCI_DECODE_ERROR 7 -#define AFI_INTR_AXI_DECODE_ERROR 8 -#define AFI_INTR_FPCI_TIMEOUT 9 -#define AFI_INTR_PE_PRSNT_SENSE 10 -#define AFI_INTR_PE_CLKREQ_SENSE 11 -#define AFI_INTR_CLKCLAMP_SENSE 12 -#define AFI_INTR_RDY4PD_SENSE 13 -#define AFI_INTR_P2P_ERROR 14 - -#define AFI_INTR_SIGNATURE 0xbc -#define AFI_UPPER_FPCI_ADDRESS 0xc0 -#define AFI_SM_INTR_ENABLE 0xc4 -#define AFI_SM_INTR_INTA_ASSERT (1 << 0) -#define AFI_SM_INTR_INTB_ASSERT (1 << 1) -#define AFI_SM_INTR_INTC_ASSERT (1 << 2) -#define AFI_SM_INTR_INTD_ASSERT (1 << 3) -#define AFI_SM_INTR_INTA_DEASSERT (1 << 4) -#define AFI_SM_INTR_INTB_DEASSERT (1 << 5) -#define AFI_SM_INTR_INTC_DEASSERT (1 << 6) -#define AFI_SM_INTR_INTD_DEASSERT (1 << 7) - -#define AFI_AFI_INTR_ENABLE 0xc8 -#define AFI_INTR_EN_INI_SLVERR (1 << 0) -#define AFI_INTR_EN_INI_DECERR (1 << 1) -#define AFI_INTR_EN_TGT_SLVERR (1 << 2) -#define AFI_INTR_EN_TGT_DECERR (1 << 3) -#define AFI_INTR_EN_TGT_WRERR (1 << 4) -#define AFI_INTR_EN_DFPCI_DECERR (1 << 5) -#define AFI_INTR_EN_AXI_DECERR (1 << 6) -#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) -#define AFI_INTR_EN_PRSNT_SENSE (1 << 8) - -#define AFI_PCIE_PME 0xf0 - -#define AFI_PCIE_CONFIG 0x0f8 -#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) -#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) -#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20) - -#define AFI_FUSE 0x104 -#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) - -#define AFI_PEX0_CTRL 0x110 -#define AFI_PEX1_CTRL 0x118 -#define AFI_PEX2_CTRL 0x128 -#define AFI_PEX_CTRL_RST (1 << 0) -#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) -#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) -#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) - -#define AFI_PLLE_CONTROL 0x160 -#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) -#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) - -#define AFI_PEXBIAS_CTRL_0 0x168 - -#define RP_VEND_XP 0x00000f00 -#define RP_VEND_XP_DL_UP (1 << 30) - -#define RP_VEND_CTL2 0x00000fa8 -#define RP_VEND_CTL2_PCA_ENABLE (1 << 7) - -#define RP_PRIV_MISC 0x00000fe0 -#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) -#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) - -#define RP_LINK_CONTROL_STATUS 0x00000090 -#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 -#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 - -#define PADS_CTL_SEL 0x0000009c - -#define PADS_CTL 0x000000a0 -#define PADS_CTL_IDDQ_1L (1 << 0) -#define PADS_CTL_TX_DATA_EN_1L (1 << 6) -#define PADS_CTL_RX_DATA_EN_1L (1 << 10) - -#define PADS_PLL_CTL_TEGRA20 0x000000b8 -#define PADS_PLL_CTL_TEGRA30 0x000000b4 -#define PADS_PLL_CTL_RST_B4SM (1 << 1) -#define PADS_PLL_CTL_LOCKDET (1 << 8) -#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) -#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) -#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) -#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) -#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) -#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) -#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) -#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22) - -#define PADS_REFCLK_CFG0 0x000000c8 -#define PADS_REFCLK_CFG1 0x000000cc -#define PADS_REFCLK_BIAS 0x000000d0 - -/* - * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit - * entries, one entry per PCIe port. These field definitions and desired - * values aren't in the TRM, but do come from NVIDIA. - */ -#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */ -#define PADS_REFCLK_CFG_E_TERM_SHIFT 7 -#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ -#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ - -#define PME_ACK_TIMEOUT 10000 - -struct tegra_msi { - struct msi_controller chip; - DECLARE_BITMAP(used, INT_PCI_MSI_NR); - struct irq_domain *domain; - unsigned long pages; - struct mutex lock; - u64 phys; - int irq; -}; - -/* used to differentiate between Tegra SoC generations */ -struct tegra_pcie_port_soc { - struct { - u8 turnoff_bit; - u8 ack_bit; - } pme; -}; - -struct tegra_pcie_soc { - unsigned int num_ports; - const struct tegra_pcie_port_soc *ports; - unsigned int msi_base_shift; - u32 pads_pll_ctl; - u32 tx_ref_sel; - u32 pads_refclk_cfg0; - u32 pads_refclk_cfg1; - bool has_pex_clkreq_en; - bool has_pex_bias_ctrl; - bool has_intr_prsnt_sense; - bool has_cml_clk; - bool has_gen2; - bool force_pca_enable; - bool program_uphy; -}; - -static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) -{ - return container_of(chip, struct tegra_msi, chip); -} - -struct tegra_pcie { - struct device *dev; - - void __iomem *pads; - void __iomem *afi; - void __iomem *cfg; - int irq; - - struct resource cs; - struct resource io; - struct resource pio; - struct resource mem; - struct resource prefetch; - struct resource busn; - - struct { - resource_size_t mem; - resource_size_t io; - } offset; - - struct clk *pex_clk; - struct clk *afi_clk; - struct clk *pll_e; - struct clk *cml_clk; - - struct reset_control *pex_rst; - struct reset_control *afi_rst; - struct reset_control *pcie_xrst; - - bool legacy_phy; - struct phy *phy; - - struct tegra_msi msi; - - struct list_head ports; - u32 xbar_config; - - struct regulator_bulk_data *supplies; - unsigned int num_supplies; - - const struct tegra_pcie_soc *soc; - struct dentry *debugfs; -}; - -struct tegra_pcie_port { - struct tegra_pcie *pcie; - struct device_node *np; - struct list_head list; - struct resource regs; - void __iomem *base; - unsigned int index; - unsigned int lanes; - - struct phy **phys; -}; - -struct tegra_pcie_bus { - struct list_head list; - unsigned int nr; -}; - -static inline void afi_writel(struct tegra_pcie *pcie, u32 value, - unsigned long offset) -{ - writel(value, pcie->afi + offset); -} - -static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset) -{ - return readl(pcie->afi + offset); -} - -static inline void pads_writel(struct tegra_pcie *pcie, u32 value, - unsigned long offset) -{ - writel(value, pcie->pads + offset); -} - -static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) -{ - return readl(pcie->pads + offset); -} - -/* - * The configuration space mapping on Tegra is somewhat similar to the ECAM - * defined by PCIe. However it deviates a bit in how the 4 bits for extended - * register accesses are mapped: - * - * [27:24] extended register number - * [23:16] bus number - * [15:11] device number - * [10: 8] function number - * [ 7: 0] register number - * - * Mapping the whole extended configuration space would require 256 MiB of - * virtual address space, only a small part of which will actually be used. - * - * To work around this, a 4 KiB region is used to generate the required - * configuration transaction with relevant B:D:F and register offset values. - * This is achieved by dynamically programming base address and size of - * AFI_AXI_BAR used for end point config space mapping to make sure that the - * address (access to which generates correct config transaction) falls in - * this 4 KiB region. - */ -static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, - unsigned int where) -{ - return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | - (PCI_FUNC(devfn) << 8) | (where & 0xff); -} - -static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, - unsigned int devfn, - int where) -{ - struct tegra_pcie *pcie = bus->sysdata; - void __iomem *addr = NULL; - - if (bus->number == 0) { - unsigned int slot = PCI_SLOT(devfn); - struct tegra_pcie_port *port; - - list_for_each_entry(port, &pcie->ports, list) { - if (port->index + 1 == slot) { - addr = port->base + (where & ~3); - break; - } - } - } else { - unsigned int offset; - u32 base; - - offset = tegra_pcie_conf_offset(bus->number, devfn, where); - - /* move 4 KiB window to offset within the FPCI region */ - base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); - afi_writel(pcie, base, AFI_FPCI_BAR0); - - /* move to correct offset within the 4 KiB page */ - addr = pcie->cfg + (offset & (SZ_4K - 1)); - } - - return addr; -} - -static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *value) -{ - if (bus->number == 0) - return pci_generic_config_read32(bus, devfn, where, size, - value); - - return pci_generic_config_read(bus, devfn, where, size, value); -} - -static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 value) -{ - if (bus->number == 0) - return pci_generic_config_write32(bus, devfn, where, size, - value); - - return pci_generic_config_write(bus, devfn, where, size, value); -} - -static struct pci_ops tegra_pcie_ops = { - .map_bus = tegra_pcie_map_bus, - .read = tegra_pcie_config_read, - .write = tegra_pcie_config_write, -}; - -static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) -{ - unsigned long ret = 0; - - switch (port->index) { - case 0: - ret = AFI_PEX0_CTRL; - break; - - case 1: - ret = AFI_PEX1_CTRL; - break; - - case 2: - ret = AFI_PEX2_CTRL; - break; - } - - return ret; -} - -static void tegra_pcie_port_reset(struct tegra_pcie_port *port) -{ - unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); - unsigned long value; - - /* pulse reset signal */ - value = afi_readl(port->pcie, ctrl); - value &= ~AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); - - usleep_range(1000, 2000); - - value = afi_readl(port->pcie, ctrl); - value |= AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); -} - -static void tegra_pcie_port_enable(struct tegra_pcie_port *port) -{ - unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); - const struct tegra_pcie_soc *soc = port->pcie->soc; - unsigned long value; - - /* enable reference clock */ - value = afi_readl(port->pcie, ctrl); - value |= AFI_PEX_CTRL_REFCLK_EN; - - if (soc->has_pex_clkreq_en) - value |= AFI_PEX_CTRL_CLKREQ_EN; - - value |= AFI_PEX_CTRL_OVERRIDE_EN; - - afi_writel(port->pcie, value, ctrl); - - tegra_pcie_port_reset(port); - - if (soc->force_pca_enable) { - value = readl(port->base + RP_VEND_CTL2); - value |= RP_VEND_CTL2_PCA_ENABLE; - writel(value, port->base + RP_VEND_CTL2); - } -} - -static void tegra_pcie_port_disable(struct tegra_pcie_port *port) -{ - unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); - const struct tegra_pcie_soc *soc = port->pcie->soc; - unsigned long value; - - /* assert port reset */ - value = afi_readl(port->pcie, ctrl); - value &= ~AFI_PEX_CTRL_RST; - afi_writel(port->pcie, value, ctrl); - - /* disable reference clock */ - value = afi_readl(port->pcie, ctrl); - - if (soc->has_pex_clkreq_en) - value &= ~AFI_PEX_CTRL_CLKREQ_EN; - - value &= ~AFI_PEX_CTRL_REFCLK_EN; - afi_writel(port->pcie, value, ctrl); -} - -static void tegra_pcie_port_free(struct tegra_pcie_port *port) -{ - struct tegra_pcie *pcie = port->pcie; - struct device *dev = pcie->dev; - - devm_iounmap(dev, port->base); - devm_release_mem_region(dev, port->regs.start, - resource_size(&port->regs)); - list_del(&port->list); - devm_kfree(dev, port); -} - -/* Tegra PCIE root complex wrongly reports device class */ -static void tegra_pcie_fixup_class(struct pci_dev *dev) -{ - dev->class = PCI_CLASS_BRIDGE_PCI << 8; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); - -/* Tegra PCIE requires relaxed ordering */ -static void tegra_pcie_relax_enable(struct pci_dev *dev) -{ - pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); -} -DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); - -static int tegra_pcie_request_resources(struct tegra_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - struct device *dev = pcie->dev; - int err; - - pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); - pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); - pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem); - pci_add_resource(windows, &pcie->busn); - - err = devm_request_pci_bus_resources(dev, windows); - if (err < 0) { - pci_free_resource_list(windows); - return err; - } - - pci_remap_iospace(&pcie->pio, pcie->io.start); - - return 0; -} - -static void tegra_pcie_free_resources(struct tegra_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - - pci_unmap_iospace(&pcie->pio); - pci_free_resource_list(windows); -} - -static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) -{ - struct tegra_pcie *pcie = pdev->bus->sysdata; - int irq; - - tegra_cpuidle_pcie_irqs_in_use(); - - irq = of_irq_parse_and_map_pci(pdev, slot, pin); - if (!irq) - irq = pcie->irq; - - return irq; -} - -static irqreturn_t tegra_pcie_isr(int irq, void *arg) -{ - const char *err_msg[] = { - "Unknown", - "AXI slave error", - "AXI decode error", - "Target abort", - "Master abort", - "Invalid write", - "Legacy interrupt", - "Response decoding error", - "AXI response decoding error", - "Transaction timeout", - "Slot present pin change", - "Slot clock request change", - "TMS clock ramp change", - "TMS ready for power down", - "Peer2Peer error", - }; - struct tegra_pcie *pcie = arg; - struct device *dev = pcie->dev; - u32 code, signature; - - code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; - signature = afi_readl(pcie, AFI_INTR_SIGNATURE); - afi_writel(pcie, 0, AFI_INTR_CODE); - - if (code == AFI_INTR_LEGACY) - return IRQ_NONE; - - if (code >= ARRAY_SIZE(err_msg)) - code = 0; - - /* - * do not pollute kernel log with master abort reports since they - * happen a lot during enumeration - */ - if (code == AFI_INTR_MASTER_ABORT) - dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); - else - dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); - - if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT || - code == AFI_INTR_FPCI_DECODE_ERROR) { - u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff; - u64 address = (u64)fpci << 32 | (signature & 0xfffffffc); - - if (code == AFI_INTR_MASTER_ABORT) - dev_dbg(dev, " FPCI address: %10llx\n", address); - else - dev_err(dev, " FPCI address: %10llx\n", address); - } - - return IRQ_HANDLED; -} - -/* - * FPCI map is as follows: - * - 0xfdfc000000: I/O space - * - 0xfdfe000000: type 0 configuration space - * - 0xfdff000000: type 1 configuration space - * - 0xfe00000000: type 0 extended configuration space - * - 0xfe10000000: type 1 extended configuration space - */ -static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) -{ - u32 fpci_bar, size, axi_address; - - /* Bar 0: type 1 extended configuration space */ - size = resource_size(&pcie->cs); - afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); - - /* Bar 1: downstream IO bar */ - fpci_bar = 0xfdfc0000; - size = resource_size(&pcie->io); - axi_address = pcie->io.start; - afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); - afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); - - /* Bar 2: prefetchable memory BAR */ - fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1; - size = resource_size(&pcie->prefetch); - axi_address = pcie->prefetch.start; - afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); - afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); - - /* Bar 3: non prefetchable memory BAR */ - fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1; - size = resource_size(&pcie->mem); - axi_address = pcie->mem.start; - afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); - afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); - afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); - - /* NULL out the remaining BARs as they are not used */ - afi_writel(pcie, 0, AFI_AXI_BAR4_START); - afi_writel(pcie, 0, AFI_AXI_BAR4_SZ); - afi_writel(pcie, 0, AFI_FPCI_BAR4); - - afi_writel(pcie, 0, AFI_AXI_BAR5_START); - afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); - afi_writel(pcie, 0, AFI_FPCI_BAR5); - - /* map all upstream transactions as uncached */ - afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); - afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); - afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); - afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); - - /* MSI translations are setup only when needed */ - afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); - afi_writel(pcie, 0, AFI_MSI_BAR_SZ); - afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST); - afi_writel(pcie, 0, AFI_MSI_BAR_SZ); -} - -static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) -{ - const struct tegra_pcie_soc *soc = pcie->soc; - u32 value; - - timeout = jiffies + msecs_to_jiffies(timeout); - - while (time_before(jiffies, timeout)) { - value = pads_readl(pcie, soc->pads_pll_ctl); - if (value & PADS_PLL_CTL_LOCKDET) - return 0; - } - - return -ETIMEDOUT; -} - -static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; - u32 value; - int err; - - /* initialize internal PHY, enable up to 16 PCIE lanes */ - pads_writel(pcie, 0x0, PADS_CTL_SEL); - - /* override IDDQ to 1 on all 4 lanes */ - value = pads_readl(pcie, PADS_CTL); - value |= PADS_CTL_IDDQ_1L; - pads_writel(pcie, value, PADS_CTL); - - /* - * Set up PHY PLL inputs select PLLE output as refclock, - * set TX ref sel to div10 (not div5). - */ - value = pads_readl(pcie, soc->pads_pll_ctl); - value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); - value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; - pads_writel(pcie, value, soc->pads_pll_ctl); - - /* reset PLL */ - value = pads_readl(pcie, soc->pads_pll_ctl); - value &= ~PADS_PLL_CTL_RST_B4SM; - pads_writel(pcie, value, soc->pads_pll_ctl); - - usleep_range(20, 100); - - /* take PLL out of reset */ - value = pads_readl(pcie, soc->pads_pll_ctl); - value |= PADS_PLL_CTL_RST_B4SM; - pads_writel(pcie, value, soc->pads_pll_ctl); - - /* wait for the PLL to lock */ - err = tegra_pcie_pll_wait(pcie, 500); - if (err < 0) { - dev_err(dev, "PLL failed to lock: %d\n", err); - return err; - } - - /* turn off IDDQ override */ - value = pads_readl(pcie, PADS_CTL); - value &= ~PADS_CTL_IDDQ_1L; - pads_writel(pcie, value, PADS_CTL); - - /* enable TX/RX data */ - value = pads_readl(pcie, PADS_CTL); - value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; - pads_writel(pcie, value, PADS_CTL); - - return 0; -} - -static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) -{ - const struct tegra_pcie_soc *soc = pcie->soc; - u32 value; - - /* disable TX/RX data */ - value = pads_readl(pcie, PADS_CTL); - value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); - pads_writel(pcie, value, PADS_CTL); - - /* override IDDQ */ - value = pads_readl(pcie, PADS_CTL); - value |= PADS_CTL_IDDQ_1L; - pads_writel(pcie, value, PADS_CTL); - - /* reset PLL */ - value = pads_readl(pcie, soc->pads_pll_ctl); - value &= ~PADS_PLL_CTL_RST_B4SM; - pads_writel(pcie, value, soc->pads_pll_ctl); - - usleep_range(20, 100); - - return 0; -} - -static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port) -{ - struct device *dev = port->pcie->dev; - unsigned int i; - int err; - - for (i = 0; i < port->lanes; i++) { - err = phy_power_on(port->phys[i]); - if (err < 0) { - dev_err(dev, "failed to power on PHY#%u: %d\n", i, err); - return err; - } - } - - return 0; -} - -static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) -{ - struct device *dev = port->pcie->dev; - unsigned int i; - int err; - - for (i = 0; i < port->lanes; i++) { - err = phy_power_off(port->phys[i]); - if (err < 0) { - dev_err(dev, "failed to power off PHY#%u: %d\n", i, - err); - return err; - } - } - - return 0; -} - -static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; - struct tegra_pcie_port *port; - int err; - - if (pcie->legacy_phy) { - if (pcie->phy) - err = phy_power_on(pcie->phy); - else - err = tegra_pcie_phy_enable(pcie); - - if (err < 0) - dev_err(dev, "failed to power on PHY: %d\n", err); - - return err; - } - - list_for_each_entry(port, &pcie->ports, list) { - err = tegra_pcie_port_phy_power_on(port); - if (err < 0) { - dev_err(dev, - "failed to power on PCIe port %u PHY: %d\n", - port->index, err); - return err; - } - } - - /* Configure the reference clock driver */ - pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); - - if (soc->num_ports > 2) - pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); - - return 0; -} - -static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct tegra_pcie_port *port; - int err; - - if (pcie->legacy_phy) { - if (pcie->phy) - err = phy_power_off(pcie->phy); - else - err = tegra_pcie_phy_disable(pcie); - - if (err < 0) - dev_err(dev, "failed to power off PHY: %d\n", err); - - return err; - } - - list_for_each_entry(port, &pcie->ports, list) { - err = tegra_pcie_port_phy_power_off(port); - if (err < 0) { - dev_err(dev, - "failed to power off PCIe port %u PHY: %d\n", - port->index, err); - return err; - } - } - - return 0; -} - -static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; - struct tegra_pcie_port *port; - unsigned long value; - int err; - - /* enable PLL power down */ - if (pcie->phy) { - value = afi_readl(pcie, AFI_PLLE_CONTROL); - value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; - value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; - afi_writel(pcie, value, AFI_PLLE_CONTROL); - } - - /* power down PCIe slot clock bias pad */ - if (soc->has_pex_bias_ctrl) - afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0); - - /* configure mode and disable all ports */ - value = afi_readl(pcie, AFI_PCIE_CONFIG); - value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; - value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; - - list_for_each_entry(port, &pcie->ports, list) - value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); - - afi_writel(pcie, value, AFI_PCIE_CONFIG); - - if (soc->has_gen2) { - value = afi_readl(pcie, AFI_FUSE); - value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; - afi_writel(pcie, value, AFI_FUSE); - } else { - value = afi_readl(pcie, AFI_FUSE); - value |= AFI_FUSE_PCIE_T0_GEN2_DIS; - afi_writel(pcie, value, AFI_FUSE); - } - - if (soc->program_uphy) { - err = tegra_pcie_phy_power_on(pcie); - if (err < 0) { - dev_err(dev, "failed to power on PHY(s): %d\n", err); - return err; - } - } - - /* take the PCIe interface module out of reset */ - reset_control_deassert(pcie->pcie_xrst); - - /* finally enable PCIe */ - value = afi_readl(pcie, AFI_CONFIGURATION); - value |= AFI_CONFIGURATION_EN_FPCI; - afi_writel(pcie, value, AFI_CONFIGURATION); - - value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | - AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | - AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR; - - if (soc->has_intr_prsnt_sense) - value |= AFI_INTR_EN_PRSNT_SENSE; - - afi_writel(pcie, value, AFI_AFI_INTR_ENABLE); - afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE); - - /* don't enable MSI for now, only when needed */ - afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); - - /* disable all exceptions */ - afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); - - return 0; -} - -static void tegra_pcie_disable_controller(struct tegra_pcie *pcie) -{ - int err; - - reset_control_assert(pcie->pcie_xrst); - - if (pcie->soc->program_uphy) { - err = tegra_pcie_phy_power_off(pcie); - if (err < 0) - dev_err(pcie->dev, "failed to power off PHY(s): %d\n", - err); - } -} - -static void tegra_pcie_power_off(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; - int err; - - reset_control_assert(pcie->afi_rst); - reset_control_assert(pcie->pex_rst); - - clk_disable_unprepare(pcie->pll_e); - if (soc->has_cml_clk) - clk_disable_unprepare(pcie->cml_clk); - clk_disable_unprepare(pcie->afi_clk); - clk_disable_unprepare(pcie->pex_clk); - - if (!dev->pm_domain) - tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); - - err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); - if (err < 0) - dev_warn(dev, "failed to disable regulators: %d\n", err); -} - -static int tegra_pcie_power_on(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; - int err; - - reset_control_assert(pcie->pcie_xrst); - reset_control_assert(pcie->afi_rst); - reset_control_assert(pcie->pex_rst); - - if (!dev->pm_domain) - tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); - - /* enable regulators */ - err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); - if (err < 0) - dev_err(dev, "failed to enable regulators: %d\n", err); - - if (dev->pm_domain) { - err = clk_prepare_enable(pcie->pex_clk); - if (err) { - dev_err(dev, "failed to enable PEX clock: %d\n", err); - return err; - } - reset_control_deassert(pcie->pex_rst); - } else { - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, - pcie->pex_clk, - pcie->pex_rst); - if (err) { - dev_err(dev, "powerup sequence failed: %d\n", err); - return err; - } - } - - reset_control_deassert(pcie->afi_rst); - - err = clk_prepare_enable(pcie->afi_clk); - if (err < 0) { - dev_err(dev, "failed to enable AFI clock: %d\n", err); - return err; - } - - if (soc->has_cml_clk) { - err = clk_prepare_enable(pcie->cml_clk); - if (err < 0) { - dev_err(dev, "failed to enable CML clock: %d\n", err); - return err; - } - } - - err = clk_prepare_enable(pcie->pll_e); - if (err < 0) { - dev_err(dev, "failed to enable PLLE clock: %d\n", err); - return err; - } - - return 0; -} - -static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - const struct tegra_pcie_soc *soc = pcie->soc; - - pcie->pex_clk = devm_clk_get(dev, "pex"); - if (IS_ERR(pcie->pex_clk)) - return PTR_ERR(pcie->pex_clk); - - pcie->afi_clk = devm_clk_get(dev, "afi"); - if (IS_ERR(pcie->afi_clk)) - return PTR_ERR(pcie->afi_clk); - - pcie->pll_e = devm_clk_get(dev, "pll_e"); - if (IS_ERR(pcie->pll_e)) - return PTR_ERR(pcie->pll_e); - - if (soc->has_cml_clk) { - pcie->cml_clk = devm_clk_get(dev, "cml"); - if (IS_ERR(pcie->cml_clk)) - return PTR_ERR(pcie->cml_clk); - } - - return 0; -} - -static int tegra_pcie_resets_get(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - - pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex"); - if (IS_ERR(pcie->pex_rst)) - return PTR_ERR(pcie->pex_rst); - - pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi"); - if (IS_ERR(pcie->afi_rst)) - return PTR_ERR(pcie->afi_rst); - - pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x"); - if (IS_ERR(pcie->pcie_xrst)) - return PTR_ERR(pcie->pcie_xrst); - - return 0; -} - -static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - int err; - - pcie->phy = devm_phy_optional_get(dev, "pcie"); - if (IS_ERR(pcie->phy)) { - err = PTR_ERR(pcie->phy); - dev_err(dev, "failed to get PHY: %d\n", err); - return err; - } - - err = phy_init(pcie->phy); - if (err < 0) { - dev_err(dev, "failed to initialize PHY: %d\n", err); - return err; - } - - pcie->legacy_phy = true; - - return 0; -} - -static struct phy *devm_of_phy_optional_get_index(struct device *dev, - struct device_node *np, - const char *consumer, - unsigned int index) -{ - struct phy *phy; - char *name; - - name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index); - if (!name) - return ERR_PTR(-ENOMEM); - - phy = devm_of_phy_get(dev, np, name); - kfree(name); - - if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV) - phy = NULL; - - return phy; -} - -static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) -{ - struct device *dev = port->pcie->dev; - struct phy *phy; - unsigned int i; - int err; - - port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); - if (!port->phys) - return -ENOMEM; - - for (i = 0; i < port->lanes; i++) { - phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i); - if (IS_ERR(phy)) { - dev_err(dev, "failed to get PHY#%u: %ld\n", i, - PTR_ERR(phy)); - return PTR_ERR(phy); - } - - err = phy_init(phy); - if (err < 0) { - dev_err(dev, "failed to initialize PHY#%u: %d\n", i, - err); - return err; - } - - port->phys[i] = phy; - } - - return 0; -} - -static int tegra_pcie_phys_get(struct tegra_pcie *pcie) -{ - const struct tegra_pcie_soc *soc = pcie->soc; - struct device_node *np = pcie->dev->of_node; - struct tegra_pcie_port *port; - int err; - - if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL) - return tegra_pcie_phys_get_legacy(pcie); - - list_for_each_entry(port, &pcie->ports, list) { - err = tegra_pcie_port_get_phys(port); - if (err < 0) - return err; - } - - return 0; -} - -static void tegra_pcie_phys_put(struct tegra_pcie *pcie) -{ - struct tegra_pcie_port *port; - struct device *dev = pcie->dev; - int err, i; - - if (pcie->legacy_phy) { - err = phy_exit(pcie->phy); - if (err < 0) - dev_err(dev, "failed to teardown PHY: %d\n", err); - return; - } - - list_for_each_entry(port, &pcie->ports, list) { - for (i = 0; i < port->lanes; i++) { - err = phy_exit(port->phys[i]); - if (err < 0) - dev_err(dev, "failed to teardown PHY#%u: %d\n", - i, err); - } - } -} - - -static int tegra_pcie_get_resources(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *pads, *afi, *res; - const struct tegra_pcie_soc *soc = pcie->soc; - int err; - - err = tegra_pcie_clocks_get(pcie); - if (err) { - dev_err(dev, "failed to get clocks: %d\n", err); - return err; - } - - err = tegra_pcie_resets_get(pcie); - if (err) { - dev_err(dev, "failed to get resets: %d\n", err); - return err; - } - - if (soc->program_uphy) { - err = tegra_pcie_phys_get(pcie); - if (err < 0) { - dev_err(dev, "failed to get PHYs: %d\n", err); - return err; - } - } - - pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); - pcie->pads = devm_ioremap_resource(dev, pads); - if (IS_ERR(pcie->pads)) { - err = PTR_ERR(pcie->pads); - goto phys_put; - } - - afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); - pcie->afi = devm_ioremap_resource(dev, afi); - if (IS_ERR(pcie->afi)) { - err = PTR_ERR(pcie->afi); - goto phys_put; - } - - /* request configuration space, but remap later, on demand */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs"); - if (!res) { - err = -EADDRNOTAVAIL; - goto phys_put; - } - - pcie->cs = *res; - - /* constrain configuration space to 4 KiB */ - pcie->cs.end = pcie->cs.start + SZ_4K - 1; - - pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); - if (IS_ERR(pcie->cfg)) { - err = PTR_ERR(pcie->cfg); - goto phys_put; - } - - /* request interrupt */ - err = platform_get_irq_byname(pdev, "intr"); - if (err < 0) { - dev_err(dev, "failed to get IRQ: %d\n", err); - goto phys_put; - } - - pcie->irq = err; - - err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); - if (err) { - dev_err(dev, "failed to register IRQ: %d\n", err); - goto phys_put; - } - - return 0; - -phys_put: - if (soc->program_uphy) - tegra_pcie_phys_put(pcie); - return err; -} - -static int tegra_pcie_put_resources(struct tegra_pcie *pcie) -{ - const struct tegra_pcie_soc *soc = pcie->soc; - - if (pcie->irq > 0) - free_irq(pcie->irq, pcie); - - if (soc->program_uphy) - tegra_pcie_phys_put(pcie); - - return 0; -} - -static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port) -{ - struct tegra_pcie *pcie = port->pcie; - const struct tegra_pcie_soc *soc = pcie->soc; - int err; - u32 val; - u8 ack_bit; - - val = afi_readl(pcie, AFI_PCIE_PME); - val |= (0x1 << soc->ports[port->index].pme.turnoff_bit); - afi_writel(pcie, val, AFI_PCIE_PME); - - ack_bit = soc->ports[port->index].pme.ack_bit; - err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val, - val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT); - if (err) - dev_err(pcie->dev, "PME Ack is not received on port: %d\n", - port->index); - - usleep_range(10000, 11000); - - val = afi_readl(pcie, AFI_PCIE_PME); - val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit); - afi_writel(pcie, val, AFI_PCIE_PME); -} - -static int tegra_msi_alloc(struct tegra_msi *chip) -{ - int msi; - - mutex_lock(&chip->lock); - - msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); - if (msi < INT_PCI_MSI_NR) - set_bit(msi, chip->used); - else - msi = -ENOSPC; - - mutex_unlock(&chip->lock); - - return msi; -} - -static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq) -{ - struct device *dev = chip->chip.dev; - - mutex_lock(&chip->lock); - - if (!test_bit(irq, chip->used)) - dev_err(dev, "trying to free unused MSI#%lu\n", irq); - else - clear_bit(irq, chip->used); - - mutex_unlock(&chip->lock); -} - -static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) -{ - struct tegra_pcie *pcie = data; - struct device *dev = pcie->dev; - struct tegra_msi *msi = &pcie->msi; - unsigned int i, processed = 0; - - for (i = 0; i < 8; i++) { - unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); - - while (reg) { - unsigned int offset = find_first_bit(®, 32); - unsigned int index = i * 32 + offset; - unsigned int irq; - - /* clear the interrupt */ - afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4); - - irq = irq_find_mapping(msi->domain, index); - if (irq) { - if (test_bit(index, msi->used)) - generic_handle_irq(irq); - else - dev_info(dev, "unhandled MSI\n"); - } else { - /* - * that's weird who triggered this? - * just clear it - */ - dev_info(dev, "unexpected MSI\n"); - } - - /* see if there's any more pending in this vector */ - reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); - - processed++; - } - } - - return processed > 0 ? IRQ_HANDLED : IRQ_NONE; -} - -static int tegra_msi_setup_irq(struct msi_controller *chip, - struct pci_dev *pdev, struct msi_desc *desc) -{ - struct tegra_msi *msi = to_tegra_msi(chip); - struct msi_msg msg; - unsigned int irq; - int hwirq; - - hwirq = tegra_msi_alloc(msi); - if (hwirq < 0) - return hwirq; - - irq = irq_create_mapping(msi->domain, hwirq); - if (!irq) { - tegra_msi_free(msi, hwirq); - return -EINVAL; - } - - irq_set_msi_desc(irq, desc); - - msg.address_lo = lower_32_bits(msi->phys); - msg.address_hi = upper_32_bits(msi->phys); - msg.data = hwirq; - - pci_write_msi_msg(irq, &msg); - - return 0; -} - -static void tegra_msi_teardown_irq(struct msi_controller *chip, - unsigned int irq) -{ - struct tegra_msi *msi = to_tegra_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - - irq_dispose_mapping(irq); - tegra_msi_free(msi, hwirq); -} - -static struct irq_chip tegra_msi_irq_chip = { - .name = "Tegra PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - tegra_cpuidle_pcie_irqs_in_use(); - - return 0; -} - -static const struct irq_domain_ops msi_domain_ops = { - .map = tegra_msi_map, -}; - -static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct platform_device *pdev = to_platform_device(pcie->dev); - struct tegra_msi *msi = &pcie->msi; - struct device *dev = pcie->dev; - int err; - - mutex_init(&msi->lock); - - msi->chip.dev = dev; - msi->chip.setup_irq = tegra_msi_setup_irq; - msi->chip.teardown_irq = tegra_msi_teardown_irq; - - msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, - &msi_domain_ops, &msi->chip); - if (!msi->domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - err = platform_get_irq_byname(pdev, "msi"); - if (err < 0) { - dev_err(dev, "failed to get IRQ: %d\n", err); - goto err; - } - - msi->irq = err; - - err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD, - tegra_msi_irq_chip.name, pcie); - if (err < 0) { - dev_err(dev, "failed to request IRQ: %d\n", err); - goto err; - } - - /* setup AFI/FPCI range */ - msi->pages = __get_free_pages(GFP_KERNEL, 0); - msi->phys = virt_to_phys((void *)msi->pages); - host->msi = &msi->chip; - - return 0; - -err: - irq_domain_remove(msi->domain); - return err; -} - -static void tegra_pcie_enable_msi(struct tegra_pcie *pcie) -{ - const struct tegra_pcie_soc *soc = pcie->soc; - struct tegra_msi *msi = &pcie->msi; - u32 reg; - - afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); - afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); - /* this register is in 4K increments */ - afi_writel(pcie, 1, AFI_MSI_BAR_SZ); - - /* enable all MSI vectors */ - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7); - - /* and unmask the MSI interrupt */ - reg = afi_readl(pcie, AFI_INTR_MASK); - reg |= AFI_INTR_MASK_MSI_MASK; - afi_writel(pcie, reg, AFI_INTR_MASK); -} - -static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) -{ - struct tegra_msi *msi = &pcie->msi; - unsigned int i, irq; - - free_pages(msi->pages, 0); - - if (msi->irq > 0) - free_irq(msi->irq, pcie); - - for (i = 0; i < INT_PCI_MSI_NR; i++) { - irq = irq_find_mapping(msi->domain, i); - if (irq > 0) - irq_dispose_mapping(irq); - } - - irq_domain_remove(msi->domain); -} - -static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) -{ - u32 value; - - /* mask the MSI interrupt */ - value = afi_readl(pcie, AFI_INTR_MASK); - value &= ~AFI_INTR_MASK_MSI_MASK; - afi_writel(pcie, value, AFI_INTR_MASK); - - /* disable all MSI vectors */ - afi_writel(pcie, 0, AFI_MSI_EN_VEC0); - afi_writel(pcie, 0, AFI_MSI_EN_VEC1); - afi_writel(pcie, 0, AFI_MSI_EN_VEC2); - afi_writel(pcie, 0, AFI_MSI_EN_VEC3); - afi_writel(pcie, 0, AFI_MSI_EN_VEC4); - afi_writel(pcie, 0, AFI_MSI_EN_VEC5); - afi_writel(pcie, 0, AFI_MSI_EN_VEC6); - afi_writel(pcie, 0, AFI_MSI_EN_VEC7); - - return 0; -} - -static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, - u32 *xbar) -{ - struct device *dev = pcie->dev; - struct device_node *np = dev->of_node; - - if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { - switch (lanes) { - case 0x010004: - dev_info(dev, "4x1, 1x1 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401; - return 0; - - case 0x010102: - dev_info(dev, "2x1, 1X1, 1x1 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; - return 0; - - case 0x010101: - dev_info(dev, "1x1, 1x1, 1x1 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111; - return 0; - - default: - dev_info(dev, "wrong configuration updated in DT, " - "switching to default 2x1, 1x1, 1x1 " - "configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; - return 0; - } - } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") || - of_device_is_compatible(np, "nvidia,tegra210-pcie")) { - switch (lanes) { - case 0x0000104: - dev_info(dev, "4x1, 1x1 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; - return 0; - - case 0x0000102: - dev_info(dev, "2x1, 1x1 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; - return 0; - } - } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { - switch (lanes) { - case 0x00000204: - dev_info(dev, "4x1, 2x1 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; - return 0; - - case 0x00020202: - dev_info(dev, "2x3 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; - return 0; - - case 0x00010104: - dev_info(dev, "4x1, 1x2 configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; - return 0; - } - } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { - switch (lanes) { - case 0x00000004: - dev_info(dev, "single-mode configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; - return 0; - - case 0x00000202: - dev_info(dev, "dual-mode configuration\n"); - *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; - return 0; - } - } - - return -EINVAL; -} - -/* - * Check whether a given set of supplies is available in a device tree node. - * This is used to check whether the new or the legacy device tree bindings - * should be used. - */ -static bool of_regulator_bulk_available(struct device_node *np, - struct regulator_bulk_data *supplies, - unsigned int num_supplies) -{ - char property[32]; - unsigned int i; - - for (i = 0; i < num_supplies; i++) { - snprintf(property, 32, "%s-supply", supplies[i].supply); - - if (of_find_property(np, property, NULL) == NULL) - return false; - } - - return true; -} - -/* - * Old versions of the device tree binding for this device used a set of power - * supplies that didn't match the hardware inputs. This happened to work for a - * number of cases but is not future proof. However to preserve backwards- - * compatibility with old device trees, this function will try to use the old - * set of supplies. - */ -static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct device_node *np = dev->of_node; - - if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) - pcie->num_supplies = 3; - else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) - pcie->num_supplies = 2; - - if (pcie->num_supplies == 0) { - dev_err(dev, "device %pOF not supported in legacy mode\n", np); - return -ENODEV; - } - - pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, - sizeof(*pcie->supplies), - GFP_KERNEL); - if (!pcie->supplies) - return -ENOMEM; - - pcie->supplies[0].supply = "pex-clk"; - pcie->supplies[1].supply = "vdd"; - - if (pcie->num_supplies > 2) - pcie->supplies[2].supply = "avdd"; - - return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); -} - -/* - * Obtains the list of regulators required for a particular generation of the - * IP block. - * - * This would've been nice to do simply by providing static tables for use - * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky - * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB) - * and either seems to be optional depending on which ports are being used. - */ -static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) -{ - struct device *dev = pcie->dev; - struct device_node *np = dev->of_node; - unsigned int i = 0; - - if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { - pcie->num_supplies = 4; - - pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, - sizeof(*pcie->supplies), - GFP_KERNEL); - if (!pcie->supplies) - return -ENOMEM; - - pcie->supplies[i++].supply = "dvdd-pex"; - pcie->supplies[i++].supply = "hvdd-pex-pll"; - pcie->supplies[i++].supply = "hvdd-pex"; - pcie->supplies[i++].supply = "vddio-pexctl-aud"; - } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { - pcie->num_supplies = 6; - - pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, - sizeof(*pcie->supplies), - GFP_KERNEL); - if (!pcie->supplies) - return -ENOMEM; - - pcie->supplies[i++].supply = "avdd-pll-uerefe"; - pcie->supplies[i++].supply = "hvddio-pex"; - pcie->supplies[i++].supply = "dvddio-pex"; - pcie->supplies[i++].supply = "dvdd-pex-pll"; - pcie->supplies[i++].supply = "hvdd-pex-pll-e"; - pcie->supplies[i++].supply = "vddio-pex-ctl"; - } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { - pcie->num_supplies = 7; - - pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, - sizeof(*pcie->supplies), - GFP_KERNEL); - if (!pcie->supplies) - return -ENOMEM; - - pcie->supplies[i++].supply = "avddio-pex"; - pcie->supplies[i++].supply = "dvddio-pex"; - pcie->supplies[i++].supply = "avdd-pex-pll"; - pcie->supplies[i++].supply = "hvdd-pex"; - pcie->supplies[i++].supply = "hvdd-pex-pll-e"; - pcie->supplies[i++].supply = "vddio-pex-ctl"; - pcie->supplies[i++].supply = "avdd-pll-erefe"; - } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { - bool need_pexa = false, need_pexb = false; - - /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ - if (lane_mask & 0x0f) - need_pexa = true; - - /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */ - if (lane_mask & 0x30) - need_pexb = true; - - pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + - (need_pexb ? 2 : 0); - - pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, - sizeof(*pcie->supplies), - GFP_KERNEL); - if (!pcie->supplies) - return -ENOMEM; - - pcie->supplies[i++].supply = "avdd-pex-pll"; - pcie->supplies[i++].supply = "hvdd-pex"; - pcie->supplies[i++].supply = "vddio-pex-ctl"; - pcie->supplies[i++].supply = "avdd-plle"; - - if (need_pexa) { - pcie->supplies[i++].supply = "avdd-pexa"; - pcie->supplies[i++].supply = "vdd-pexa"; - } - - if (need_pexb) { - pcie->supplies[i++].supply = "avdd-pexb"; - pcie->supplies[i++].supply = "vdd-pexb"; - } - } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { - pcie->num_supplies = 5; - - pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, - sizeof(*pcie->supplies), - GFP_KERNEL); - if (!pcie->supplies) - return -ENOMEM; - - pcie->supplies[0].supply = "avdd-pex"; - pcie->supplies[1].supply = "vdd-pex"; - pcie->supplies[2].supply = "avdd-pex-pll"; - pcie->supplies[3].supply = "avdd-plle"; - pcie->supplies[4].supply = "vddio-pex-clk"; - } - - if (of_regulator_bulk_available(dev->of_node, pcie->supplies, - pcie->num_supplies)) - return devm_regulator_bulk_get(dev, pcie->num_supplies, - pcie->supplies); - - /* - * If not all regulators are available for this new scheme, assume - * that the device tree complies with an older version of the device - * tree binding. - */ - dev_info(dev, "using legacy DT binding for power supplies\n"); - - devm_kfree(dev, pcie->supplies); - pcie->num_supplies = 0; - - return tegra_pcie_get_legacy_regulators(pcie); -} - -static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct device_node *np = dev->of_node, *port; - const struct tegra_pcie_soc *soc = pcie->soc; - struct of_pci_range_parser parser; - struct of_pci_range range; - u32 lanes = 0, mask = 0; - unsigned int lane = 0; - struct resource res; - int err; - - if (of_pci_range_parser_init(&parser, np)) { - dev_err(dev, "missing \"ranges\" property\n"); - return -EINVAL; - } - - for_each_of_pci_range(&parser, &range) { - err = of_pci_range_to_resource(&range, np, &res); - if (err < 0) - return err; - - switch (res.flags & IORESOURCE_TYPE_BITS) { - case IORESOURCE_IO: - /* Track the bus -> CPU I/O mapping offset. */ - pcie->offset.io = res.start - range.pci_addr; - - memcpy(&pcie->pio, &res, sizeof(res)); - pcie->pio.name = np->full_name; - - /* - * The Tegra PCIe host bridge uses this to program the - * mapping of the I/O space to the physical address, - * so we override the .start and .end fields here that - * of_pci_range_to_resource() converted to I/O space. - * We also set the IORESOURCE_MEM type to clarify that - * the resource is in the physical memory space. - */ - pcie->io.start = range.cpu_addr; - pcie->io.end = range.cpu_addr + range.size - 1; - pcie->io.flags = IORESOURCE_MEM; - pcie->io.name = "I/O"; - - memcpy(&res, &pcie->io, sizeof(res)); - break; - - case IORESOURCE_MEM: - /* - * Track the bus -> CPU memory mapping offset. This - * assumes that the prefetchable and non-prefetchable - * regions will be the last of type IORESOURCE_MEM in - * the ranges property. - * */ - pcie->offset.mem = res.start - range.pci_addr; - - if (res.flags & IORESOURCE_PREFETCH) { - memcpy(&pcie->prefetch, &res, sizeof(res)); - pcie->prefetch.name = "prefetchable"; - } else { - memcpy(&pcie->mem, &res, sizeof(res)); - pcie->mem.name = "non-prefetchable"; - } - break; - } - } - - err = of_pci_parse_bus_range(np, &pcie->busn); - if (err < 0) { - dev_err(dev, "failed to parse ranges property: %d\n", err); - pcie->busn.name = np->name; - pcie->busn.start = 0; - pcie->busn.end = 0xff; - pcie->busn.flags = IORESOURCE_BUS; - } - - /* parse root ports */ - for_each_child_of_node(np, port) { - struct tegra_pcie_port *rp; - unsigned int index; - u32 value; - - err = of_pci_get_devfn(port); - if (err < 0) { - dev_err(dev, "failed to parse address: %d\n", err); - return err; - } - - index = PCI_SLOT(err); - - if (index < 1 || index > soc->num_ports) { - dev_err(dev, "invalid port number: %d\n", index); - return -EINVAL; - } - - index--; - - err = of_property_read_u32(port, "nvidia,num-lanes", &value); - if (err < 0) { - dev_err(dev, "failed to parse # of lanes: %d\n", - err); - return err; - } - - if (value > 16) { - dev_err(dev, "invalid # of lanes: %u\n", value); - return -EINVAL; - } - - lanes |= value << (index << 3); - - if (!of_device_is_available(port)) { - lane += value; - continue; - } - - mask |= ((1 << value) - 1) << lane; - lane += value; - - rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); - if (!rp) - return -ENOMEM; - - err = of_address_to_resource(port, 0, &rp->regs); - if (err < 0) { - dev_err(dev, "failed to parse address: %d\n", err); - return err; - } - - INIT_LIST_HEAD(&rp->list); - rp->index = index; - rp->lanes = value; - rp->pcie = pcie; - rp->np = port; - - rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); - if (IS_ERR(rp->base)) - return PTR_ERR(rp->base); - - list_add_tail(&rp->list, &pcie->ports); - } - - err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); - if (err < 0) { - dev_err(dev, "invalid lane configuration\n"); - return err; - } - - err = tegra_pcie_get_regulators(pcie, mask); - if (err < 0) - return err; - - return 0; -} - -/* - * FIXME: If there are no PCIe cards attached, then calling this function - * can result in the increase of the bootup time as there are big timeout - * loops. - */ -#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ -static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) -{ - struct device *dev = port->pcie->dev; - unsigned int retries = 3; - unsigned long value; - - /* override presence detection */ - value = readl(port->base + RP_PRIV_MISC); - value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; - value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; - writel(value, port->base + RP_PRIV_MISC); - - do { - unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; - - do { - value = readl(port->base + RP_VEND_XP); - - if (value & RP_VEND_XP_DL_UP) - break; - - usleep_range(1000, 2000); - } while (--timeout); - - if (!timeout) { - dev_err(dev, "link %u down, retrying\n", port->index); - goto retry; - } - - timeout = TEGRA_PCIE_LINKUP_TIMEOUT; - - do { - value = readl(port->base + RP_LINK_CONTROL_STATUS); - - if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) - return true; - - usleep_range(1000, 2000); - } while (--timeout); - -retry: - tegra_pcie_port_reset(port); - } while (--retries); - - return false; -} - -static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct tegra_pcie_port *port, *tmp; - - list_for_each_entry_safe(port, tmp, &pcie->ports, list) { - dev_info(dev, "probing port %u, using %u lanes\n", - port->index, port->lanes); - - tegra_pcie_port_enable(port); - - if (tegra_pcie_port_check_link(port)) - continue; - - dev_info(dev, "link %u down, ignoring\n", port->index); - - tegra_pcie_port_disable(port); - tegra_pcie_port_free(port); - } -} - -static void tegra_pcie_disable_ports(struct tegra_pcie *pcie) -{ - struct tegra_pcie_port *port, *tmp; - - list_for_each_entry_safe(port, tmp, &pcie->ports, list) - tegra_pcie_port_disable(port); -} - -static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = { - { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, - { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, -}; - -static const struct tegra_pcie_soc tegra20_pcie = { - .num_ports = 2, - .ports = tegra20_pcie_ports, - .msi_base_shift = 0, - .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, - .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, - .pads_refclk_cfg0 = 0xfa5cfa5c, - .has_pex_clkreq_en = false, - .has_pex_bias_ctrl = false, - .has_intr_prsnt_sense = false, - .has_cml_clk = false, - .has_gen2 = false, - .force_pca_enable = false, - .program_uphy = true, -}; - -static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = { - { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, - { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, - { .pme.turnoff_bit = 16, .pme.ack_bit = 18 }, -}; - -static const struct tegra_pcie_soc tegra30_pcie = { - .num_ports = 3, - .ports = tegra30_pcie_ports, - .msi_base_shift = 8, - .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, - .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, - .pads_refclk_cfg0 = 0xfa5cfa5c, - .pads_refclk_cfg1 = 0xfa5cfa5c, - .has_pex_clkreq_en = true, - .has_pex_bias_ctrl = true, - .has_intr_prsnt_sense = true, - .has_cml_clk = true, - .has_gen2 = false, - .force_pca_enable = false, - .program_uphy = true, -}; - -static const struct tegra_pcie_soc tegra124_pcie = { - .num_ports = 2, - .ports = tegra20_pcie_ports, - .msi_base_shift = 8, - .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, - .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, - .pads_refclk_cfg0 = 0x44ac44ac, - .has_pex_clkreq_en = true, - .has_pex_bias_ctrl = true, - .has_intr_prsnt_sense = true, - .has_cml_clk = true, - .has_gen2 = true, - .force_pca_enable = false, - .program_uphy = true, -}; - -static const struct tegra_pcie_soc tegra210_pcie = { - .num_ports = 2, - .ports = tegra20_pcie_ports, - .msi_base_shift = 8, - .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, - .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, - .pads_refclk_cfg0 = 0x90b890b8, - .has_pex_clkreq_en = true, - .has_pex_bias_ctrl = true, - .has_intr_prsnt_sense = true, - .has_cml_clk = true, - .has_gen2 = true, - .force_pca_enable = true, - .program_uphy = true, -}; - -static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = { - { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, - { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, - { .pme.turnoff_bit = 12, .pme.ack_bit = 14 }, -}; - -static const struct tegra_pcie_soc tegra186_pcie = { - .num_ports = 3, - .ports = tegra186_pcie_ports, - .msi_base_shift = 8, - .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, - .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, - .pads_refclk_cfg0 = 0x80b880b8, - .pads_refclk_cfg1 = 0x000480b8, - .has_pex_clkreq_en = true, - .has_pex_bias_ctrl = true, - .has_intr_prsnt_sense = true, - .has_cml_clk = false, - .has_gen2 = true, - .force_pca_enable = false, - .program_uphy = false, -}; - -static const struct of_device_id tegra_pcie_of_match[] = { - { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie }, - { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie }, - { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie }, - { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie }, - { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie }, - { }, -}; - -static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) -{ - struct tegra_pcie *pcie = s->private; - - if (list_empty(&pcie->ports)) - return NULL; - - seq_printf(s, "Index Status\n"); - - return seq_list_start(&pcie->ports, *pos); -} - -static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos) -{ - struct tegra_pcie *pcie = s->private; - - return seq_list_next(v, &pcie->ports, pos); -} - -static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v) -{ -} - -static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v) -{ - bool up = false, active = false; - struct tegra_pcie_port *port; - unsigned int value; - - port = list_entry(v, struct tegra_pcie_port, list); - - value = readl(port->base + RP_VEND_XP); - - if (value & RP_VEND_XP_DL_UP) - up = true; - - value = readl(port->base + RP_LINK_CONTROL_STATUS); - - if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) - active = true; - - seq_printf(s, "%2u ", port->index); - - if (up) - seq_printf(s, "up"); - - if (active) { - if (up) - seq_printf(s, ", "); - - seq_printf(s, "active"); - } - - seq_printf(s, "\n"); - return 0; -} - -static const struct seq_operations tegra_pcie_ports_seq_ops = { - .start = tegra_pcie_ports_seq_start, - .next = tegra_pcie_ports_seq_next, - .stop = tegra_pcie_ports_seq_stop, - .show = tegra_pcie_ports_seq_show, -}; - -static int tegra_pcie_ports_open(struct inode *inode, struct file *file) -{ - struct tegra_pcie *pcie = inode->i_private; - struct seq_file *s; - int err; - - err = seq_open(file, &tegra_pcie_ports_seq_ops); - if (err) - return err; - - s = file->private_data; - s->private = pcie; - - return 0; -} - -static const struct file_operations tegra_pcie_ports_ops = { - .owner = THIS_MODULE, - .open = tegra_pcie_ports_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie) -{ - debugfs_remove_recursive(pcie->debugfs); - pcie->debugfs = NULL; -} - -static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) -{ - struct dentry *file; - - pcie->debugfs = debugfs_create_dir("pcie", NULL); - if (!pcie->debugfs) - return -ENOMEM; - - file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, - pcie, &tegra_pcie_ports_ops); - if (!file) - goto remove; - - return 0; - -remove: - tegra_pcie_debugfs_exit(pcie); - return -ENOMEM; -} - -static int tegra_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct pci_host_bridge *host; - struct tegra_pcie *pcie; - struct pci_bus *child; - int err; - - host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!host) - return -ENOMEM; - - pcie = pci_host_bridge_priv(host); - host->sysdata = pcie; - platform_set_drvdata(pdev, pcie); - - pcie->soc = of_device_get_match_data(dev); - INIT_LIST_HEAD(&pcie->ports); - pcie->dev = dev; - - err = tegra_pcie_parse_dt(pcie); - if (err < 0) - return err; - - err = tegra_pcie_get_resources(pcie); - if (err < 0) { - dev_err(dev, "failed to request resources: %d\n", err); - return err; - } - - err = tegra_pcie_msi_setup(pcie); - if (err < 0) { - dev_err(dev, "failed to enable MSI support: %d\n", err); - goto put_resources; - } - - pm_runtime_enable(pcie->dev); - err = pm_runtime_get_sync(pcie->dev); - if (err) { - dev_err(dev, "fail to enable pcie controller: %d\n", err); - goto teardown_msi; - } - - err = tegra_pcie_request_resources(pcie); - if (err) - goto pm_runtime_put; - - host->busnr = pcie->busn.start; - host->dev.parent = &pdev->dev; - host->ops = &tegra_pcie_ops; - host->map_irq = tegra_pcie_map_irq; - host->swizzle_irq = pci_common_swizzle; - - err = pci_scan_root_bus_bridge(host); - if (err < 0) { - dev_err(dev, "failed to register host: %d\n", err); - goto free_resources; - } - - pci_bus_size_bridges(host->bus); - pci_bus_assign_resources(host->bus); - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_pcie_debugfs_init(pcie); - if (err < 0) - dev_err(dev, "failed to setup debugfs: %d\n", err); - } - - return 0; - -free_resources: - tegra_pcie_free_resources(pcie); -pm_runtime_put: - pm_runtime_put_sync(pcie->dev); - pm_runtime_disable(pcie->dev); -teardown_msi: - tegra_pcie_msi_teardown(pcie); -put_resources: - tegra_pcie_put_resources(pcie); - return err; -} - -static int tegra_pcie_remove(struct platform_device *pdev) -{ - struct tegra_pcie *pcie = platform_get_drvdata(pdev); - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct tegra_pcie_port *port, *tmp; - - if (IS_ENABLED(CONFIG_DEBUG_FS)) - tegra_pcie_debugfs_exit(pcie); - - pci_stop_root_bus(host->bus); - pci_remove_root_bus(host->bus); - tegra_pcie_free_resources(pcie); - pm_runtime_put_sync(pcie->dev); - pm_runtime_disable(pcie->dev); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - tegra_pcie_msi_teardown(pcie); - - tegra_pcie_put_resources(pcie); - - list_for_each_entry_safe(port, tmp, &pcie->ports, list) - tegra_pcie_port_free(port); - - return 0; -} - -static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) -{ - struct tegra_pcie *pcie = dev_get_drvdata(dev); - struct tegra_pcie_port *port; - - list_for_each_entry(port, &pcie->ports, list) - tegra_pcie_pme_turnoff(port); - - tegra_pcie_disable_ports(pcie); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - tegra_pcie_disable_msi(pcie); - - tegra_pcie_disable_controller(pcie); - tegra_pcie_power_off(pcie); - - return 0; -} - -static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) -{ - struct tegra_pcie *pcie = dev_get_drvdata(dev); - int err; - - err = tegra_pcie_power_on(pcie); - if (err) { - dev_err(dev, "tegra pcie power on fail: %d\n", err); - return err; - } - err = tegra_pcie_enable_controller(pcie); - if (err) { - dev_err(dev, "tegra pcie controller enable fail: %d\n", err); - goto poweroff; - } - tegra_pcie_setup_translations(pcie); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - tegra_pcie_enable_msi(pcie); - - tegra_pcie_enable_ports(pcie); - - return 0; - -poweroff: - tegra_pcie_power_off(pcie); - - return err; -} - -static const struct dev_pm_ops tegra_pcie_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, - tegra_pcie_pm_resume) -}; - -static struct platform_driver tegra_pcie_driver = { - .driver = { - .name = "tegra-pcie", - .of_match_table = tegra_pcie_of_match, - .suppress_bind_attrs = true, - .pm = &tegra_pcie_pm_ops, - }, - .probe = tegra_pcie_probe, - .remove = tegra_pcie_remove, -}; -module_platform_driver(tegra_pcie_driver); -MODULE_LICENSE("GPL"); diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c deleted file mode 100644 index 32d1d7b81ef4..000000000000 --- a/drivers/pci/host/pci-thunder-ecam.c +++ /dev/null @@ -1,380 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2015, 2016 Cavium, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include - -#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) - -static void set_val(u32 v, int where, int size, u32 *val) -{ - int shift = (where & 3) * 8; - - pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v); - v >>= shift; - if (size == 1) - v &= 0xff; - else if (size == 2) - v &= 0xffff; - *val = v; -} - -static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val) -{ - void __iomem *addr; - u32 v; - - /* Entries are 16-byte aligned; bits[2,3] select word in entry */ - int where_a = where & 0xc; - - if (where_a == 0) { - set_val(e0, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0x4) { - addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - v = readl(addr); - v &= ~0xf; - v |= 2; /* EA entry-1. Base-L */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0x8) { - u32 barl_orig; - u32 barl_rb; - - addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - barl_orig = readl(addr + 0); - writel(0xffffffff, addr + 0); - barl_rb = readl(addr + 0); - writel(barl_orig, addr + 0); - /* zeros in unsettable bits */ - v = ~barl_rb & ~3; - v |= 0xc; /* EA entry-2. Offset-L */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xc) { - addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */ - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - v = readl(addr); /* EA entry-3. Base-H */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - return PCIBIOS_DEVICE_NOT_FOUND; -} - -static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct pci_config_window *cfg = bus->sysdata; - int where_a = where & ~3; - void __iomem *addr; - u32 node_bits; - u32 v; - - /* EA Base[63:32] may be missing some bits ... */ - switch (where_a) { - case 0xa8: - case 0xbc: - case 0xd0: - case 0xe4: - break; - default: - return pci_generic_config_read(bus, devfn, where, size, val); - } - - addr = bus->ops->map_bus(bus, devfn, where_a); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - v = readl(addr); - - /* - * Bit 44 of the 64-bit Base must match the same bit in - * the config space access window. Since we are working with - * the high-order 32 bits, shift everything down by 32 bits. - */ - node_bits = (cfg->res.start >> 32) & (1 << 12); - - v |= node_bits; - set_val(v, where, size, val); - - return PCIBIOS_SUCCESSFUL; -} - -static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - u32 v; - u32 vendor_device; - u32 class_rev; - void __iomem *addr; - int cfg_type; - int where_a = where & ~3; - - addr = bus->ops->map_bus(bus, devfn, 0xc); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - v = readl(addr); - - /* Check for non type-00 header */ - cfg_type = (v >> 16) & 0x7f; - - addr = bus->ops->map_bus(bus, devfn, 8); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - class_rev = readl(addr); - if (class_rev == 0xffffffff) - goto no_emulation; - - if ((class_rev & 0xff) >= 8) { - /* Pass-2 handling */ - if (cfg_type) - goto no_emulation; - return thunder_ecam_p2_config_read(bus, devfn, where, - size, val); - } - - /* - * All BARs have fixed addresses specified by the EA - * capability; they must return zero on read. - */ - if (cfg_type == 0 && - ((where >= 0x10 && where < 0x2c) || - (where >= 0x1a4 && where < 0x1bc))) { - /* BAR or SR-IOV BAR */ - *val = 0; - return PCIBIOS_SUCCESSFUL; - } - - addr = bus->ops->map_bus(bus, devfn, 0); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - vendor_device = readl(addr); - if (vendor_device == 0xffffffff) - goto no_emulation; - - pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n", - vendor_device & 0xffff, vendor_device >> 16, class_rev, - (unsigned) where, devfn); - - /* Check for non type-00 header */ - if (cfg_type == 0) { - bool has_msix; - bool is_nic = (vendor_device == 0xa01e177d); - bool is_tns = (vendor_device == 0xa01f177d); - - addr = bus->ops->map_bus(bus, devfn, 0x70); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - /* E_CAP */ - v = readl(addr); - has_msix = (v & 0xff00) != 0; - - if (!has_msix && where_a == 0x70) { - v |= 0xbc00; /* next capability is EA at 0xbc */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xb0) { - addr = bus->ops->map_bus(bus, devfn, where_a); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - v = readl(addr); - if (v & 0xff00) - pr_err("Bad MSIX cap header: %08x\n", v); - v |= 0xbc00; /* next capability is EA at 0xbc */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xbc) { - if (is_nic) - v = 0x40014; /* EA last in chain, 4 entries */ - else if (is_tns) - v = 0x30014; /* EA last in chain, 3 entries */ - else if (has_msix) - v = 0x20014; /* EA last in chain, 2 entries */ - else - v = 0x10014; /* EA last in chain, 1 entry */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a >= 0xc0 && where_a < 0xd0) - /* EA entry-0. PP=0, BAR0 Size:3 */ - return handle_ea_bar(0x80ff0003, - 0x10, bus, devfn, where, - size, val); - if (where_a >= 0xd0 && where_a < 0xe0 && has_msix) - /* EA entry-1. PP=0, BAR4 Size:3 */ - return handle_ea_bar(0x80ff0043, - 0x20, bus, devfn, where, - size, val); - if (where_a >= 0xe0 && where_a < 0xf0 && is_tns) - /* EA entry-2. PP=0, BAR2, Size:3 */ - return handle_ea_bar(0x80ff0023, - 0x18, bus, devfn, where, - size, val); - if (where_a >= 0xe0 && where_a < 0xf0 && is_nic) - /* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */ - return handle_ea_bar(0x80ff0493, - 0x1a4, bus, devfn, where, - size, val); - if (where_a >= 0xf0 && where_a < 0x100 && is_nic) - /* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */ - return handle_ea_bar(0x80ff04d3, - 0x1b4, bus, devfn, where, - size, val); - } else if (cfg_type == 1) { - bool is_rsl_bridge = devfn == 0x08; - bool is_rad_bridge = devfn == 0xa0; - bool is_zip_bridge = devfn == 0xa8; - bool is_dfa_bridge = devfn == 0xb0; - bool is_nic_bridge = devfn == 0x10; - - if (where_a == 0x70) { - addr = bus->ops->map_bus(bus, devfn, where_a); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - v = readl(addr); - if (v & 0xff00) - pr_err("Bad PCIe cap header: %08x\n", v); - v |= 0xbc00; /* next capability is EA at 0xbc */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xbc) { - if (is_nic_bridge) - v = 0x10014; /* EA last in chain, 1 entry */ - else - v = 0x00014; /* EA last in chain, no entries */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xc0) { - if (is_rsl_bridge || is_nic_bridge) - v = 0x0101; /* subordinate:secondary = 1:1 */ - else if (is_rad_bridge) - v = 0x0202; /* subordinate:secondary = 2:2 */ - else if (is_zip_bridge) - v = 0x0303; /* subordinate:secondary = 3:3 */ - else if (is_dfa_bridge) - v = 0x0404; /* subordinate:secondary = 4:4 */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xc4 && is_nic_bridge) { - /* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */ - v = 0x80ff0564; - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xc8 && is_nic_bridge) { - v = 0x00000002; /* Base-L 64-bit */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xcc && is_nic_bridge) { - v = 0xfffffffe; /* MaxOffset-L 64-bit */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xd0 && is_nic_bridge) { - v = 0x00008430; /* NIC Base-H */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - if (where_a == 0xd4 && is_nic_bridge) { - v = 0x0000000f; /* MaxOffset-H */ - set_val(v, where, size, val); - return PCIBIOS_SUCCESSFUL; - } - } -no_emulation: - return pci_generic_config_read(bus, devfn, where, size, val); -} - -static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - /* - * All BARs have fixed addresses; ignore BAR writes so they - * don't get corrupted. - */ - if ((where >= 0x10 && where < 0x2c) || - (where >= 0x1a4 && where < 0x1bc)) - /* BAR or SR-IOV BAR */ - return PCIBIOS_SUCCESSFUL; - - return pci_generic_config_write(bus, devfn, where, size, val); -} - -struct pci_ecam_ops pci_thunder_ecam_ops = { - .bus_shift = 20, - .pci_ops = { - .map_bus = pci_ecam_map_bus, - .read = thunder_ecam_config_read, - .write = thunder_ecam_config_write, - } -}; - -#ifdef CONFIG_PCI_HOST_THUNDER_ECAM - -static const struct of_device_id thunder_ecam_of_match[] = { - { .compatible = "cavium,pci-host-thunder-ecam" }, - { }, -}; - -static int thunder_ecam_probe(struct platform_device *pdev) -{ - return pci_host_common_probe(pdev, &pci_thunder_ecam_ops); -} - -static struct platform_driver thunder_ecam_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = thunder_ecam_of_match, - .suppress_bind_attrs = true, - }, - .probe = thunder_ecam_probe, -}; -builtin_platform_driver(thunder_ecam_driver); - -#endif -#endif diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c deleted file mode 100644 index f127ce8bd4ef..000000000000 --- a/drivers/pci/host/pci-thunder-pem.c +++ /dev/null @@ -1,473 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2015 - 2016 Cavium, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "../pci.h" - -#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) - -#define PEM_CFG_WR 0x28 -#define PEM_CFG_RD 0x30 - -struct thunder_pem_pci { - u32 ea_entry[3]; - void __iomem *pem_reg_base; -}; - -static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - u64 read_val, tmp_val; - struct pci_config_window *cfg = bus->sysdata; - struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; - - if (devfn != 0 || where >= 2048) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - /* - * 32-bit accesses only. Write the address to the low order - * bits of PEM_CFG_RD, then trigger the read by reading back. - * The config data lands in the upper 32-bits of PEM_CFG_RD. - */ - read_val = where & ~3ull; - writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); - read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); - read_val >>= 32; - - /* - * The config space contains some garbage, fix it up. Also - * synthesize an EA capability for the BAR used by MSI-X. - */ - switch (where & ~3) { - case 0x40: - read_val &= 0xffff00ff; - read_val |= 0x00007000; /* Skip MSI CAP */ - break; - case 0x70: /* Express Cap */ - /* - * Change PME interrupt to vector 2 on T88 where it - * reads as 0, else leave it alone. - */ - if (!(read_val & (0x1f << 25))) - read_val |= (2u << 25); - break; - case 0xb0: /* MSI-X Cap */ - /* TableSize=2 or 4, Next Cap is EA */ - read_val &= 0xc00000ff; - /* - * If Express Cap(0x70) raw PME vector reads as 0 we are on - * T88 and TableSize is reported as 4, else TableSize - * is 2. - */ - writeq(0x70, pem_pci->pem_reg_base + PEM_CFG_RD); - tmp_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); - tmp_val >>= 32; - if (!(tmp_val & (0x1f << 25))) - read_val |= 0x0003bc00; - else - read_val |= 0x0001bc00; - break; - case 0xb4: - /* Table offset=0, BIR=0 */ - read_val = 0x00000000; - break; - case 0xb8: - /* BPA offset=0xf0000, BIR=0 */ - read_val = 0x000f0000; - break; - case 0xbc: - /* EA, 1 entry, no next Cap */ - read_val = 0x00010014; - break; - case 0xc0: - /* DW2 for type-1 */ - read_val = 0x00000000; - break; - case 0xc4: - /* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */ - read_val = 0x80ff0003; - break; - case 0xc8: - read_val = pem_pci->ea_entry[0]; - break; - case 0xcc: - read_val = pem_pci->ea_entry[1]; - break; - case 0xd0: - read_val = pem_pci->ea_entry[2]; - break; - default: - break; - } - read_val >>= (8 * (where & 3)); - switch (size) { - case 1: - read_val &= 0xff; - break; - case 2: - read_val &= 0xffff; - break; - default: - break; - } - *val = read_val; - return PCIBIOS_SUCCESSFUL; -} - -static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct pci_config_window *cfg = bus->sysdata; - - if (bus->number < cfg->busr.start || - bus->number > cfg->busr.end) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* - * The first device on the bus is the PEM PCIe bridge. - * Special case its config access. - */ - if (bus->number == cfg->busr.start) - return thunder_pem_bridge_read(bus, devfn, where, size, val); - - return pci_generic_config_read(bus, devfn, where, size, val); -} - -/* - * Some of the w1c_bits below also include read-only or non-writable - * reserved bits, this makes the code simpler and is OK as the bits - * are not affected by writing zeros to them. - */ -static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned) -{ - u32 w1c_bits = 0; - - switch (where_aligned) { - case 0x04: /* Command/Status */ - case 0x1c: /* Base and I/O Limit/Secondary Status */ - w1c_bits = 0xff000000; - break; - case 0x44: /* Power Management Control and Status */ - w1c_bits = 0xfffffe00; - break; - case 0x78: /* Device Control/Device Status */ - case 0x80: /* Link Control/Link Status */ - case 0x88: /* Slot Control/Slot Status */ - case 0x90: /* Root Status */ - case 0xa0: /* Link Control 2 Registers/Link Status 2 */ - w1c_bits = 0xffff0000; - break; - case 0x104: /* Uncorrectable Error Status */ - case 0x110: /* Correctable Error Status */ - case 0x130: /* Error Status */ - case 0x160: /* Link Control 4 */ - w1c_bits = 0xffffffff; - break; - default: - break; - } - return w1c_bits; -} - -/* Some bits must be written to one so they appear to be read-only. */ -static u32 thunder_pem_bridge_w1_bits(u64 where_aligned) -{ - u32 w1_bits; - - switch (where_aligned) { - case 0x1c: /* I/O Base / I/O Limit, Secondary Status */ - /* Force 32-bit I/O addressing. */ - w1_bits = 0x0101; - break; - case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */ - /* Force 64-bit addressing */ - w1_bits = 0x00010001; - break; - default: - w1_bits = 0; - break; - } - return w1_bits; -} - -static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - struct pci_config_window *cfg = bus->sysdata; - struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; - u64 write_val, read_val; - u64 where_aligned = where & ~3ull; - u32 mask = 0; - - - if (devfn != 0 || where >= 2048) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* - * 32-bit accesses only. If the write is for a size smaller - * than 32-bits, we must first read the 32-bit value and merge - * in the desired bits and then write the whole 32-bits back - * out. - */ - switch (size) { - case 1: - writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); - read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); - read_val >>= 32; - mask = ~(0xff << (8 * (where & 3))); - read_val &= mask; - val = (val & 0xff) << (8 * (where & 3)); - val |= (u32)read_val; - break; - case 2: - writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); - read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); - read_val >>= 32; - mask = ~(0xffff << (8 * (where & 3))); - read_val &= mask; - val = (val & 0xffff) << (8 * (where & 3)); - val |= (u32)read_val; - break; - default: - break; - } - - /* - * By expanding the write width to 32 bits, we may - * inadvertently hit some W1C bits that were not intended to - * be written. Calculate the mask that must be applied to the - * data to be written to avoid these cases. - */ - if (mask) { - u32 w1c_bits = thunder_pem_bridge_w1c_bits(where); - - if (w1c_bits) { - mask &= w1c_bits; - val &= ~mask; - } - } - - /* - * Some bits must be read-only with value of one. Since the - * access method allows these to be cleared if a zero is - * written, force them to one before writing. - */ - val |= thunder_pem_bridge_w1_bits(where_aligned); - - /* - * Low order bits are the config address, the high order 32 - * bits are the data to be written. - */ - write_val = (((u64)val) << 32) | where_aligned; - writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); - return PCIBIOS_SUCCESSFUL; -} - -static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - struct pci_config_window *cfg = bus->sysdata; - - if (bus->number < cfg->busr.start || - bus->number > cfg->busr.end) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * The first device on the bus is the PEM PCIe bridge. - * Special case its config access. - */ - if (bus->number == cfg->busr.start) - return thunder_pem_bridge_write(bus, devfn, where, size, val); - - - return pci_generic_config_write(bus, devfn, where, size, val); -} - -static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, - struct resource *res_pem) -{ - struct thunder_pem_pci *pem_pci; - resource_size_t bar4_start; - - pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL); - if (!pem_pci) - return -ENOMEM; - - pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000); - if (!pem_pci->pem_reg_base) - return -ENOMEM; - - /* - * The MSI-X BAR for the PEM and AER interrupts is located at - * a fixed offset from the PEM register base. Generate a - * fragment of the synthesized Enhanced Allocation capability - * structure here for the BAR. - */ - bar4_start = res_pem->start + 0xf00000; - pem_pci->ea_entry[0] = (u32)bar4_start | 2; - pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u; - pem_pci->ea_entry[2] = (u32)(bar4_start >> 32); - - cfg->priv = pem_pci; - return 0; -} - -#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) - -#define PEM_RES_BASE 0x87e0c0000000UL -#define PEM_NODE_MASK GENMASK(45, 44) -#define PEM_INDX_MASK GENMASK(26, 24) -#define PEM_MIN_DOM_IN_NODE 4 -#define PEM_MAX_DOM_IN_NODE 10 - -static void thunder_pem_reserve_range(struct device *dev, int seg, - struct resource *r) -{ - resource_size_t start = r->start, end = r->end; - struct resource *res; - const char *regionid; - - regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); - if (!regionid) - return; - - res = request_mem_region(start, end - start + 1, regionid); - if (res) - res->flags &= ~IORESOURCE_BUSY; - else - kfree(regionid); - - dev_info(dev, "%pR %s reserved\n", r, - res ? "has been" : "could not be"); -} - -static void thunder_pem_legacy_fw(struct acpi_pci_root *root, - struct resource *res_pem) -{ - int node = acpi_get_node(root->device->handle); - int index; - - if (node == NUMA_NO_NODE) - node = 0; - - index = root->segment - PEM_MIN_DOM_IN_NODE; - index -= node * PEM_MAX_DOM_IN_NODE; - res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | - FIELD_PREP(PEM_INDX_MASK, index); - res_pem->flags = IORESOURCE_MEM; -} - -static int thunder_pem_acpi_init(struct pci_config_window *cfg) -{ - struct device *dev = cfg->parent; - struct acpi_device *adev = to_acpi_device(dev); - struct acpi_pci_root *root = acpi_driver_data(adev); - struct resource *res_pem; - int ret; - - res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL); - if (!res_pem) - return -ENOMEM; - - ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem); - - /* - * If we fail to gather resources it means that we run with old - * FW where we need to calculate PEM-specific resources manually. - */ - if (ret) { - thunder_pem_legacy_fw(root, res_pem); - /* - * Reserve 64K size PEM specific resources. The full 16M range - * size is required for thunder_pem_init() call. - */ - res_pem->end = res_pem->start + SZ_64K - 1; - thunder_pem_reserve_range(dev, root->segment, res_pem); - res_pem->end = res_pem->start + SZ_16M - 1; - - /* Reserve PCI configuration space as well. */ - thunder_pem_reserve_range(dev, root->segment, &cfg->res); - } - - return thunder_pem_init(dev, cfg, res_pem); -} - -struct pci_ecam_ops thunder_pem_ecam_ops = { - .bus_shift = 24, - .init = thunder_pem_acpi_init, - .pci_ops = { - .map_bus = pci_ecam_map_bus, - .read = thunder_pem_config_read, - .write = thunder_pem_config_write, - } -}; - -#endif - -#ifdef CONFIG_PCI_HOST_THUNDER_PEM - -static int thunder_pem_platform_init(struct pci_config_window *cfg) -{ - struct device *dev = cfg->parent; - struct platform_device *pdev = to_platform_device(dev); - struct resource *res_pem; - - if (!dev->of_node) - return -EINVAL; - - /* - * The second register range is the PEM bridge to the PCIe - * bus. It has a different config access method than those - * devices behind the bridge. - */ - res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res_pem) { - dev_err(dev, "missing \"reg[1]\"property\n"); - return -EINVAL; - } - - return thunder_pem_init(dev, cfg, res_pem); -} - -static struct pci_ecam_ops pci_thunder_pem_ops = { - .bus_shift = 24, - .init = thunder_pem_platform_init, - .pci_ops = { - .map_bus = pci_ecam_map_bus, - .read = thunder_pem_config_read, - .write = thunder_pem_config_write, - } -}; - -static const struct of_device_id thunder_pem_of_match[] = { - { .compatible = "cavium,pci-host-thunder-pem" }, - { }, -}; - -static int thunder_pem_probe(struct platform_device *pdev) -{ - return pci_host_common_probe(pdev, &pci_thunder_pem_ops); -} - -static struct platform_driver thunder_pem_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = thunder_pem_of_match, - .suppress_bind_attrs = true, - }, - .probe = thunder_pem_probe, -}; -builtin_platform_driver(thunder_pem_driver); - -#endif -#endif diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c deleted file mode 100644 index 68b8bfbdb867..000000000000 --- a/drivers/pci/host/pci-v3-semi.c +++ /dev/null @@ -1,963 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for V3 Semiconductor PCI Local Bus to PCI Bridge - * Copyright (C) 2017 Linus Walleij - * - * Based on the code from arch/arm/mach-integrator/pci_v3.c - * Copyright (C) 1999 ARM Limited - * Copyright (C) 2000-2001 Deep Blue Solutions Ltd - * - * Contributors to the old driver include: - * Russell King - * David A. Rusling (uHAL, ARM Firmware suite) - * Rob Herring - * Liviu Dudau - * Grant Likely - * Arnd Bergmann - * Bjorn Helgaas - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -#define V3_PCI_VENDOR 0x00000000 -#define V3_PCI_DEVICE 0x00000002 -#define V3_PCI_CMD 0x00000004 -#define V3_PCI_STAT 0x00000006 -#define V3_PCI_CC_REV 0x00000008 -#define V3_PCI_HDR_CFG 0x0000000C -#define V3_PCI_IO_BASE 0x00000010 -#define V3_PCI_BASE0 0x00000014 -#define V3_PCI_BASE1 0x00000018 -#define V3_PCI_SUB_VENDOR 0x0000002C -#define V3_PCI_SUB_ID 0x0000002E -#define V3_PCI_ROM 0x00000030 -#define V3_PCI_BPARAM 0x0000003C -#define V3_PCI_MAP0 0x00000040 -#define V3_PCI_MAP1 0x00000044 -#define V3_PCI_INT_STAT 0x00000048 -#define V3_PCI_INT_CFG 0x0000004C -#define V3_LB_BASE0 0x00000054 -#define V3_LB_BASE1 0x00000058 -#define V3_LB_MAP0 0x0000005E -#define V3_LB_MAP1 0x00000062 -#define V3_LB_BASE2 0x00000064 -#define V3_LB_MAP2 0x00000066 -#define V3_LB_SIZE 0x00000068 -#define V3_LB_IO_BASE 0x0000006E -#define V3_FIFO_CFG 0x00000070 -#define V3_FIFO_PRIORITY 0x00000072 -#define V3_FIFO_STAT 0x00000074 -#define V3_LB_ISTAT 0x00000076 -#define V3_LB_IMASK 0x00000077 -#define V3_SYSTEM 0x00000078 -#define V3_LB_CFG 0x0000007A -#define V3_PCI_CFG 0x0000007C -#define V3_DMA_PCI_ADR0 0x00000080 -#define V3_DMA_PCI_ADR1 0x00000090 -#define V3_DMA_LOCAL_ADR0 0x00000084 -#define V3_DMA_LOCAL_ADR1 0x00000094 -#define V3_DMA_LENGTH0 0x00000088 -#define V3_DMA_LENGTH1 0x00000098 -#define V3_DMA_CSR0 0x0000008B -#define V3_DMA_CSR1 0x0000009B -#define V3_DMA_CTLB_ADR0 0x0000008C -#define V3_DMA_CTLB_ADR1 0x0000009C -#define V3_DMA_DELAY 0x000000E0 -#define V3_MAIL_DATA 0x000000C0 -#define V3_PCI_MAIL_IEWR 0x000000D0 -#define V3_PCI_MAIL_IERD 0x000000D2 -#define V3_LB_MAIL_IEWR 0x000000D4 -#define V3_LB_MAIL_IERD 0x000000D6 -#define V3_MAIL_WR_STAT 0x000000D8 -#define V3_MAIL_RD_STAT 0x000000DA -#define V3_QBA_MAP 0x000000DC - -/* PCI STATUS bits */ -#define V3_PCI_STAT_PAR_ERR BIT(15) -#define V3_PCI_STAT_SYS_ERR BIT(14) -#define V3_PCI_STAT_M_ABORT_ERR BIT(13) -#define V3_PCI_STAT_T_ABORT_ERR BIT(12) - -/* LB ISTAT bits */ -#define V3_LB_ISTAT_MAILBOX BIT(7) -#define V3_LB_ISTAT_PCI_RD BIT(6) -#define V3_LB_ISTAT_PCI_WR BIT(5) -#define V3_LB_ISTAT_PCI_INT BIT(4) -#define V3_LB_ISTAT_PCI_PERR BIT(3) -#define V3_LB_ISTAT_I2O_QWR BIT(2) -#define V3_LB_ISTAT_DMA1 BIT(1) -#define V3_LB_ISTAT_DMA0 BIT(0) - -/* PCI COMMAND bits */ -#define V3_COMMAND_M_FBB_EN BIT(9) -#define V3_COMMAND_M_SERR_EN BIT(8) -#define V3_COMMAND_M_PAR_EN BIT(6) -#define V3_COMMAND_M_MASTER_EN BIT(2) -#define V3_COMMAND_M_MEM_EN BIT(1) -#define V3_COMMAND_M_IO_EN BIT(0) - -/* SYSTEM bits */ -#define V3_SYSTEM_M_RST_OUT BIT(15) -#define V3_SYSTEM_M_LOCK BIT(14) -#define V3_SYSTEM_UNLOCK 0xa05f - -/* PCI CFG bits */ -#define V3_PCI_CFG_M_I2O_EN BIT(15) -#define V3_PCI_CFG_M_IO_REG_DIS BIT(14) -#define V3_PCI_CFG_M_IO_DIS BIT(13) -#define V3_PCI_CFG_M_EN3V BIT(12) -#define V3_PCI_CFG_M_RETRY_EN BIT(10) -#define V3_PCI_CFG_M_AD_LOW1 BIT(9) -#define V3_PCI_CFG_M_AD_LOW0 BIT(8) -/* - * This is the value applied to C/BE[3:1], with bit 0 always held 0 - * during DMA access. - */ -#define V3_PCI_CFG_M_RTYPE_SHIFT 5 -#define V3_PCI_CFG_M_WTYPE_SHIFT 1 -#define V3_PCI_CFG_TYPE_DEFAULT 0x3 - -/* PCI BASE bits (PCI -> Local Bus) */ -#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U -#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U -#define V3_PCI_BASE_M_PREFETCH BIT(3) -#define V3_PCI_BASE_M_TYPE (3 << 1) -#define V3_PCI_BASE_M_IO BIT(0) - -/* PCI MAP bits (PCI -> Local bus) */ -#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U -#define V3_PCI_MAP_M_RD_POST_INH BIT(15) -#define V3_PCI_MAP_M_ROM_SIZE (3 << 10) -#define V3_PCI_MAP_M_SWAP (3 << 8) -#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U -#define V3_PCI_MAP_M_REG_EN BIT(1) -#define V3_PCI_MAP_M_ENABLE BIT(0) - -/* LB_BASE0,1 bits (Local bus -> PCI) */ -#define V3_LB_BASE_ADR_BASE 0xfff00000U -#define V3_LB_BASE_SWAP (3 << 8) -#define V3_LB_BASE_ADR_SIZE (15 << 4) -#define V3_LB_BASE_PREFETCH BIT(3) -#define V3_LB_BASE_ENABLE BIT(0) - -#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4) -#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4) -#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4) -#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4) -#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4) -#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4) -#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4) -#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4) -#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4) -#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4) -#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4) -#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4) - -#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE) - -/* LB_MAP0,1 bits (Local bus -> PCI) */ -#define V3_LB_MAP_MAP_ADR 0xfff0U -#define V3_LB_MAP_TYPE (7 << 1) -#define V3_LB_MAP_AD_LOW_EN BIT(0) - -#define V3_LB_MAP_TYPE_IACK (0 << 1) -#define V3_LB_MAP_TYPE_IO (1 << 1) -#define V3_LB_MAP_TYPE_MEM (3 << 1) -#define V3_LB_MAP_TYPE_CONFIG (5 << 1) -#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1) - -#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR) - -/* LB_BASE2 bits (Local bus -> PCI IO) */ -#define V3_LB_BASE2_ADR_BASE 0xff00U -#define V3_LB_BASE2_SWAP_AUTO (3 << 6) -#define V3_LB_BASE2_ENABLE BIT(0) - -#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE) - -/* LB_MAP2 bits (Local bus -> PCI IO) */ -#define V3_LB_MAP2_MAP_ADR 0xff00U - -#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR) - -/* FIFO priority bits */ -#define V3_FIFO_PRIO_LOCAL BIT(12) -#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10) -#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11) -#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11)) -#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8) -#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9) -#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9)) -#define V3_FIFO_PRIO_PCI BIT(4) -#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2) -#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3) -#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3)) -#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0) -#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1) -#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1)) - -/* Local bus configuration bits */ -#define V3_LB_CFG_LB_TO_64_CYCLES 0x0000 -#define V3_LB_CFG_LB_TO_256_CYCLES BIT(13) -#define V3_LB_CFG_LB_TO_512_CYCLES BIT(14) -#define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14)) -#define V3_LB_CFG_LB_RST BIT(12) -#define V3_LB_CFG_LB_PPC_RDY BIT(11) -#define V3_LB_CFG_LB_LB_INT BIT(10) -#define V3_LB_CFG_LB_ERR_EN BIT(9) -#define V3_LB_CFG_LB_RDY_EN BIT(8) -#define V3_LB_CFG_LB_BE_IMODE BIT(7) -#define V3_LB_CFG_LB_BE_OMODE BIT(6) -#define V3_LB_CFG_LB_ENDIAN BIT(5) -#define V3_LB_CFG_LB_PARK_EN BIT(4) -#define V3_LB_CFG_LB_FBB_DIS BIT(2) - -/* ARM Integrator-specific extended control registers */ -#define INTEGRATOR_SC_PCI_OFFSET 0x18 -#define INTEGRATOR_SC_PCI_ENABLE BIT(0) -#define INTEGRATOR_SC_PCI_INTCLR BIT(1) -#define INTEGRATOR_SC_LBFADDR_OFFSET 0x20 -#define INTEGRATOR_SC_LBFCODE_OFFSET 0x24 - -struct v3_pci { - struct device *dev; - void __iomem *base; - void __iomem *config_base; - struct pci_bus *bus; - u32 config_mem; - u32 io_mem; - u32 non_pre_mem; - u32 pre_mem; - phys_addr_t io_bus_addr; - phys_addr_t non_pre_bus_addr; - phys_addr_t pre_bus_addr; - struct regmap *map; -}; - -/* - * The V3 PCI interface chip in Integrator provides several windows from - * local bus memory into the PCI memory areas. Unfortunately, there - * are not really enough windows for our usage, therefore we reuse - * one of the windows for access to PCI configuration space. On the - * Integrator/AP, the memory map is as follows: - * - * Local Bus Memory Usage - * - * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable - * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable - * 60000000 - 60FFFFFF PCI IO. 16M - * 61000000 - 61FFFFFF PCI Configuration. 16M - * - * There are three V3 windows, each described by a pair of V3 registers. - * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2. - * Base0 and Base1 can be used for any type of PCI memory access. Base2 - * can be used either for PCI I/O or for I20 accesses. By default, uHAL - * uses this only for PCI IO space. - * - * Normally these spaces are mapped using the following base registers: - * - * Usage Local Bus Memory Base/Map registers used - * - * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 - * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1 - * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 - * Cfg 61000000 - 61FFFFFF - * - * This means that I20 and PCI configuration space accesses will fail. - * When PCI configuration accesses are needed (via the uHAL PCI - * configuration space primitives) we must remap the spaces as follows: - * - * Usage Local Bus Memory Base/Map registers used - * - * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 - * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0 - * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 - * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1 - * - * To make this work, the code depends on overlapping windows working. - * The V3 chip translates an address by checking its range within - * each of the BASE/MAP pairs in turn (in ascending register number - * order). It will use the first matching pair. So, for example, - * if the same address is mapped by both LB_BASE0/LB_MAP0 and - * LB_BASE1/LB_MAP1, the V3 will use the translation from - * LB_BASE0/LB_MAP0. - * - * To allow PCI Configuration space access, the code enlarges the - * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes - * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can - * be remapped for use by configuration cycles. - * - * At the end of the PCI Configuration space accesses, - * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window - * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to - * reveal the now restored LB_BASE1/LB_MAP1 window. - * - * NOTE: We do not set up I2O mapping. I suspect that this is only - * for an intelligent (target) device. Using I2O disables most of - * the mappings into PCI memory. - */ -static void __iomem *v3_map_bus(struct pci_bus *bus, - unsigned int devfn, int offset) -{ - struct v3_pci *v3 = bus->sysdata; - unsigned int address, mapaddress, busnr; - - busnr = bus->number; - if (busnr == 0) { - int slot = PCI_SLOT(devfn); - - /* - * local bus segment so need a type 0 config cycle - * - * build the PCI configuration "address" with one-hot in - * A31-A11 - * - * mapaddress: - * 3:1 = config cycle (101) - * 0 = PCI A1 & A0 are 0 (0) - */ - address = PCI_FUNC(devfn) << 8; - mapaddress = V3_LB_MAP_TYPE_CONFIG; - - if (slot > 12) - /* - * high order bits are handled by the MAP register - */ - mapaddress |= BIT(slot - 5); - else - /* - * low order bits handled directly in the address - */ - address |= BIT(slot + 11); - } else { - /* - * not the local bus segment so need a type 1 config cycle - * - * address: - * 23:16 = bus number - * 15:11 = slot number (7:3 of devfn) - * 10:8 = func number (2:0 of devfn) - * - * mapaddress: - * 3:1 = config cycle (101) - * 0 = PCI A1 & A0 from host bus (1) - */ - mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN; - address = (busnr << 16) | (devfn << 8); - } - - /* - * Set up base0 to see all 512Mbytes of memory space (not - * prefetchable), this frees up base1 for re-use by - * configuration memory - */ - writel(v3_addr_to_lb_base(v3->non_pre_mem) | - V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE, - v3->base + V3_LB_BASE0); - - /* - * Set up base1/map1 to point into configuration space. - * The config mem is always 16MB. - */ - writel(v3_addr_to_lb_base(v3->config_mem) | - V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE, - v3->base + V3_LB_BASE1); - writew(mapaddress, v3->base + V3_LB_MAP1); - - return v3->config_base + address + offset; -} - -static void v3_unmap_bus(struct v3_pci *v3) -{ - /* - * Reassign base1 for use by prefetchable PCI memory - */ - writel(v3_addr_to_lb_base(v3->pre_mem) | - V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH | - V3_LB_BASE_ENABLE, - v3->base + V3_LB_BASE1); - writew(v3_addr_to_lb_map(v3->pre_bus_addr) | - V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */ - v3->base + V3_LB_MAP1); - - /* - * And shrink base0 back to a 256M window (NOTE: MAP0 already correct) - */ - writel(v3_addr_to_lb_base(v3->non_pre_mem) | - V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE, - v3->base + V3_LB_BASE0); -} - -static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn, - int config, int size, u32 *value) -{ - struct v3_pci *v3 = bus->sysdata; - int ret; - - dev_dbg(&bus->dev, - "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", - PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); - ret = pci_generic_config_read(bus, fn, config, size, value); - v3_unmap_bus(v3); - return ret; -} - -static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn, - int config, int size, u32 value) -{ - struct v3_pci *v3 = bus->sysdata; - int ret; - - dev_dbg(&bus->dev, - "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", - PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); - ret = pci_generic_config_write(bus, fn, config, size, value); - v3_unmap_bus(v3); - return ret; -} - -static struct pci_ops v3_pci_ops = { - .map_bus = v3_map_bus, - .read = v3_pci_read_config, - .write = v3_pci_write_config, -}; - -static irqreturn_t v3_irq(int irq, void *data) -{ - struct v3_pci *v3 = data; - struct device *dev = v3->dev; - u32 status; - - status = readw(v3->base + V3_PCI_STAT); - if (status & V3_PCI_STAT_PAR_ERR) - dev_err(dev, "parity error interrupt\n"); - if (status & V3_PCI_STAT_SYS_ERR) - dev_err(dev, "system error interrupt\n"); - if (status & V3_PCI_STAT_M_ABORT_ERR) - dev_err(dev, "master abort error interrupt\n"); - if (status & V3_PCI_STAT_T_ABORT_ERR) - dev_err(dev, "target abort error interrupt\n"); - writew(status, v3->base + V3_PCI_STAT); - - status = readb(v3->base + V3_LB_ISTAT); - if (status & V3_LB_ISTAT_MAILBOX) - dev_info(dev, "PCI mailbox interrupt\n"); - if (status & V3_LB_ISTAT_PCI_RD) - dev_err(dev, "PCI target LB->PCI READ abort interrupt\n"); - if (status & V3_LB_ISTAT_PCI_WR) - dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n"); - if (status & V3_LB_ISTAT_PCI_INT) - dev_info(dev, "PCI pin interrupt\n"); - if (status & V3_LB_ISTAT_PCI_PERR) - dev_err(dev, "PCI parity error interrupt\n"); - if (status & V3_LB_ISTAT_I2O_QWR) - dev_info(dev, "I2O inbound post queue interrupt\n"); - if (status & V3_LB_ISTAT_DMA1) - dev_info(dev, "DMA channel 1 interrupt\n"); - if (status & V3_LB_ISTAT_DMA0) - dev_info(dev, "DMA channel 0 interrupt\n"); - /* Clear all possible interrupts on the local bus */ - writeb(0, v3->base + V3_LB_ISTAT); - if (v3->map) - regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, - INTEGRATOR_SC_PCI_ENABLE | - INTEGRATOR_SC_PCI_INTCLR); - - return IRQ_HANDLED; -} - -static int v3_integrator_init(struct v3_pci *v3) -{ - unsigned int val; - - v3->map = - syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon"); - if (IS_ERR(v3->map)) { - dev_err(v3->dev, "no syscon\n"); - return -ENODEV; - } - - regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val); - /* Take the PCI bridge out of reset, clear IRQs */ - regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, - INTEGRATOR_SC_PCI_ENABLE | - INTEGRATOR_SC_PCI_INTCLR); - - if (!(val & INTEGRATOR_SC_PCI_ENABLE)) { - /* If we were in reset we need to sleep a bit */ - msleep(230); - - /* Set the physical base for the controller itself */ - writel(0x6200, v3->base + V3_LB_IO_BASE); - - /* Wait for the mailbox to settle after reset */ - do { - writeb(0xaa, v3->base + V3_MAIL_DATA); - writeb(0x55, v3->base + V3_MAIL_DATA + 4); - } while (readb(v3->base + V3_MAIL_DATA) != 0xaa && - readb(v3->base + V3_MAIL_DATA) != 0x55); - } - - dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n"); - - return 0; -} - -static int v3_pci_setup_resource(struct v3_pci *v3, - resource_size_t io_base, - struct pci_host_bridge *host, - struct resource_entry *win) -{ - struct device *dev = v3->dev; - struct resource *mem; - struct resource *io; - int ret; - - switch (resource_type(win->res)) { - case IORESOURCE_IO: - io = win->res; - io->name = "V3 PCI I/O"; - v3->io_mem = io_base; - v3->io_bus_addr = io->start - win->offset; - dev_dbg(dev, "I/O window %pR, bus addr %pap\n", - io, &v3->io_bus_addr); - ret = pci_remap_iospace(io, io_base); - if (ret) { - dev_warn(dev, - "error %d: failed to map resource %pR\n", - ret, io); - return ret; - } - /* Setup window 2 - PCI I/O */ - writel(v3_addr_to_lb_base2(v3->io_mem) | - V3_LB_BASE2_ENABLE, - v3->base + V3_LB_BASE2); - writew(v3_addr_to_lb_map2(v3->io_bus_addr), - v3->base + V3_LB_MAP2); - break; - case IORESOURCE_MEM: - mem = win->res; - if (mem->flags & IORESOURCE_PREFETCH) { - mem->name = "V3 PCI PRE-MEM"; - v3->pre_mem = mem->start; - v3->pre_bus_addr = mem->start - win->offset; - dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n", - mem, &v3->pre_bus_addr); - if (resource_size(mem) != SZ_256M) { - dev_err(dev, "prefetchable memory range is not 256MB\n"); - return -EINVAL; - } - if (v3->non_pre_mem && - (mem->start != v3->non_pre_mem + SZ_256M)) { - dev_err(dev, - "prefetchable memory is not adjacent to non-prefetchable memory\n"); - return -EINVAL; - } - /* Setup window 1 - PCI prefetchable memory */ - writel(v3_addr_to_lb_base(v3->pre_mem) | - V3_LB_BASE_ADR_SIZE_256MB | - V3_LB_BASE_PREFETCH | - V3_LB_BASE_ENABLE, - v3->base + V3_LB_BASE1); - writew(v3_addr_to_lb_map(v3->pre_bus_addr) | - V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */ - v3->base + V3_LB_MAP1); - } else { - mem->name = "V3 PCI NON-PRE-MEM"; - v3->non_pre_mem = mem->start; - v3->non_pre_bus_addr = mem->start - win->offset; - dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n", - mem, &v3->non_pre_bus_addr); - if (resource_size(mem) != SZ_256M) { - dev_err(dev, - "non-prefetchable memory range is not 256MB\n"); - return -EINVAL; - } - /* Setup window 0 - PCI non-prefetchable memory */ - writel(v3_addr_to_lb_base(v3->non_pre_mem) | - V3_LB_BASE_ADR_SIZE_256MB | - V3_LB_BASE_ENABLE, - v3->base + V3_LB_BASE0); - writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) | - V3_LB_MAP_TYPE_MEM, - v3->base + V3_LB_MAP0); - } - break; - case IORESOURCE_BUS: - dev_dbg(dev, "BUS %pR\n", win->res); - host->busnr = win->res->start; - break; - default: - dev_info(dev, "Unknown resource type %lu\n", - resource_type(win->res)); - break; - } - - return 0; -} - -static int v3_get_dma_range_config(struct v3_pci *v3, - struct of_pci_range *range, - u32 *pci_base, u32 *pci_map) -{ - struct device *dev = v3->dev; - u64 cpu_end = range->cpu_addr + range->size - 1; - u64 pci_end = range->pci_addr + range->size - 1; - u32 val; - - if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) { - dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n"); - return -EINVAL; - } - val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE; - *pci_base = val; - - if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) { - dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n"); - return -EINVAL; - } - val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR; - - switch (range->size) { - case SZ_1M: - val |= V3_LB_BASE_ADR_SIZE_1MB; - break; - case SZ_2M: - val |= V3_LB_BASE_ADR_SIZE_2MB; - break; - case SZ_4M: - val |= V3_LB_BASE_ADR_SIZE_4MB; - break; - case SZ_8M: - val |= V3_LB_BASE_ADR_SIZE_8MB; - break; - case SZ_16M: - val |= V3_LB_BASE_ADR_SIZE_16MB; - break; - case SZ_32M: - val |= V3_LB_BASE_ADR_SIZE_32MB; - break; - case SZ_64M: - val |= V3_LB_BASE_ADR_SIZE_64MB; - break; - case SZ_128M: - val |= V3_LB_BASE_ADR_SIZE_128MB; - break; - case SZ_256M: - val |= V3_LB_BASE_ADR_SIZE_256MB; - break; - case SZ_512M: - val |= V3_LB_BASE_ADR_SIZE_512MB; - break; - case SZ_1G: - val |= V3_LB_BASE_ADR_SIZE_1GB; - break; - case SZ_2G: - val |= V3_LB_BASE_ADR_SIZE_2GB; - break; - default: - dev_err(v3->dev, "illegal dma memory chunk size\n"); - return -EINVAL; - break; - } - val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE; - *pci_map = val; - - dev_dbg(dev, - "DMA MEM CPU: 0x%016llx -> 0x%016llx => " - "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n", - range->cpu_addr, cpu_end, - range->pci_addr, pci_end, - *pci_base, *pci_map); - - return 0; -} - -static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3, - struct device_node *np) -{ - struct of_pci_range range; - struct of_pci_range_parser parser; - struct device *dev = v3->dev; - int i = 0; - - if (of_pci_dma_range_parser_init(&parser, np)) { - dev_err(dev, "missing dma-ranges property\n"); - return -EINVAL; - } - - /* - * Get the dma-ranges from the device tree - */ - for_each_of_pci_range(&parser, &range) { - int ret; - u32 pci_base, pci_map; - - ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map); - if (ret) - return ret; - - if (i == 0) { - writel(pci_base, v3->base + V3_PCI_BASE0); - writel(pci_map, v3->base + V3_PCI_MAP0); - } else if (i == 1) { - writel(pci_base, v3->base + V3_PCI_BASE1); - writel(pci_map, v3->base + V3_PCI_MAP1); - } else { - dev_err(dev, "too many ranges, only two supported\n"); - dev_err(dev, "range %d ignored\n", i); - } - i++; - } - return 0; -} - -static int v3_pci_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - resource_size_t io_base; - struct resource *regs; - struct resource_entry *win; - struct v3_pci *v3; - struct pci_host_bridge *host; - struct clk *clk; - u16 val; - int irq; - int ret; - LIST_HEAD(res); - - host = pci_alloc_host_bridge(sizeof(*v3)); - if (!host) - return -ENOMEM; - - host->dev.parent = dev; - host->ops = &v3_pci_ops; - host->busnr = 0; - host->msi = NULL; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; - v3 = pci_host_bridge_priv(host); - host->sysdata = v3; - v3->dev = dev; - - /* Get and enable host clock */ - clk = devm_clk_get(dev, NULL); - if (IS_ERR(clk)) { - dev_err(dev, "clock not found\n"); - return PTR_ERR(clk); - } - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "unable to enable clock\n"); - return ret; - } - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - v3->base = devm_ioremap_resource(dev, regs); - if (IS_ERR(v3->base)) - return PTR_ERR(v3->base); - /* - * The hardware has a register with the physical base address - * of the V3 controller itself, verify that this is the same - * as the physical memory we've remapped it from. - */ - if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16)) - dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n", - readl(v3->base + V3_LB_IO_BASE), regs); - - /* Configuration space is 16MB directly mapped */ - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (resource_size(regs) != SZ_16M) { - dev_err(dev, "config mem is not 16MB!\n"); - return -EINVAL; - } - v3->config_mem = regs->start; - v3->config_base = devm_ioremap_resource(dev, regs); - if (IS_ERR(v3->config_base)) - return PTR_ERR(v3->config_base); - - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, - &io_base); - if (ret) - return ret; - - ret = devm_request_pci_bus_resources(dev, &res); - if (ret) - return ret; - - /* Get and request error IRQ resource */ - irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "unable to obtain PCIv3 error IRQ\n"); - return -ENODEV; - } - ret = devm_request_irq(dev, irq, v3_irq, 0, - "PCIv3 error", v3); - if (ret < 0) { - dev_err(dev, - "unable to request PCIv3 error IRQ %d (%d)\n", - irq, ret); - return ret; - } - - /* - * Unlock V3 registers, but only if they were previously locked. - */ - if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK) - writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM); - - /* Disable all slave access while we set up the windows */ - val = readw(v3->base + V3_PCI_CMD); - val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - writew(val, v3->base + V3_PCI_CMD); - - /* Put the PCI bus into reset */ - val = readw(v3->base + V3_SYSTEM); - val &= ~V3_SYSTEM_M_RST_OUT; - writew(val, v3->base + V3_SYSTEM); - - /* Retry until we're ready */ - val = readw(v3->base + V3_PCI_CFG); - val |= V3_PCI_CFG_M_RETRY_EN; - writew(val, v3->base + V3_PCI_CFG); - - /* Set up the local bus protocol */ - val = readw(v3->base + V3_LB_CFG); - val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */ - val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */ - val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */ - val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */ - writew(val, v3->base + V3_LB_CFG); - - /* Enable the PCI bus master */ - val = readw(v3->base + V3_PCI_CMD); - val |= PCI_COMMAND_MASTER; - writew(val, v3->base + V3_PCI_CMD); - - /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry(win, &res) { - ret = v3_pci_setup_resource(v3, io_base, host, win); - if (ret) { - dev_err(dev, "error setting up resources\n"); - return ret; - } - } - ret = v3_pci_parse_map_dma_ranges(v3, np); - if (ret) - return ret; - - /* - * Disable PCI to host IO cycles, enable I/O buffers @3.3V, - * set AD_LOW0 to 1 if one of the LB_MAP registers choose - * to use this (should be unused). - */ - writel(0x00000000, v3->base + V3_PCI_IO_BASE); - val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS | - V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0; - /* - * DMA read and write from PCI bus commands types - */ - val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT; - val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT; - writew(val, v3->base + V3_PCI_CFG); - - /* - * Set the V3 FIFO such that writes have higher priority than - * reads, and local bus write causes local bus read fifo flush - * on aperture 1. Same for PCI. - */ - writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 | - V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 | - V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 | - V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1, - v3->base + V3_FIFO_PRIORITY); - - - /* - * Clear any error interrupts, and enable parity and write error - * interrupts - */ - writeb(0, v3->base + V3_LB_ISTAT); - val = readw(v3->base + V3_LB_CFG); - val |= V3_LB_CFG_LB_LB_INT; - writew(val, v3->base + V3_LB_CFG); - writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, - v3->base + V3_LB_IMASK); - - /* Special Integrator initialization */ - if (of_device_is_compatible(np, "arm,integrator-ap-pci")) { - ret = v3_integrator_init(v3); - if (ret) - return ret; - } - - /* Post-init: enable PCI memory and invalidate (master already on) */ - val = readw(v3->base + V3_PCI_CMD); - val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE; - writew(val, v3->base + V3_PCI_CMD); - - /* Clear pending interrupts */ - writeb(0, v3->base + V3_LB_ISTAT); - /* Read or write errors and parity errors cause interrupts */ - writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, - v3->base + V3_LB_IMASK); - - /* Take the PCI bus out of reset so devices can initialize */ - val = readw(v3->base + V3_SYSTEM); - val |= V3_SYSTEM_M_RST_OUT; - writew(val, v3->base + V3_SYSTEM); - - /* - * Re-lock the system register. - */ - val = readw(v3->base + V3_SYSTEM); - val |= V3_SYSTEM_M_LOCK; - writew(val, v3->base + V3_SYSTEM); - - list_splice_init(&res, &host->windows); - ret = pci_scan_root_bus_bridge(host); - if (ret) { - dev_err(dev, "failed to register host: %d\n", ret); - return ret; - } - v3->bus = host->bus; - - pci_bus_assign_resources(v3->bus); - pci_bus_add_devices(v3->bus); - - return 0; -} - -static const struct of_device_id v3_pci_of_match[] = { - { - .compatible = "v3,v360epc-pci", - }, - {}, -}; - -static struct platform_driver v3_pci_driver = { - .driver = { - .name = "pci-v3-semi", - .of_match_table = of_match_ptr(v3_pci_of_match), - .suppress_bind_attrs = true, - }, - .probe = v3_pci_probe, -}; -builtin_platform_driver(v3_pci_driver); diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c deleted file mode 100644 index 994f32061b32..000000000000 --- a/drivers/pci/host/pci-versatile.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2004 Koninklijke Philips Electronics NV - * - * Conversion to platform driver and DT: - * Copyright 2014 Linaro Ltd. - * - * 14/04/2005 Initial version, colin.king@philips.com - */ -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -static void __iomem *versatile_pci_base; -static void __iomem *versatile_cfg_base[2]; - -#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4)) -#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4)) -#define PCI_SELFID (versatile_pci_base + 0xc) - -#define VP_PCI_DEVICE_ID 0x030010ee -#define VP_PCI_CLASS_ID 0x0b400000 - -static u32 pci_slot_ignore; - -static int __init versatile_pci_slot_ignore(char *str) -{ - int retval; - int slot; - - while ((retval = get_option(&str, &slot))) { - if ((slot < 0) || (slot > 31)) - pr_err("Illegal slot value: %d\n", slot); - else - pci_slot_ignore |= (1 << slot); - } - return 1; -} -__setup("pci_slot_ignore=", versatile_pci_slot_ignore); - - -static void __iomem *versatile_map_bus(struct pci_bus *bus, - unsigned int devfn, int offset) -{ - unsigned int busnr = bus->number; - - if (pci_slot_ignore & (1 << PCI_SLOT(devfn))) - return NULL; - - return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset); -} - -static struct pci_ops pci_versatile_ops = { - .map_bus = versatile_map_bus, - .read = pci_generic_config_read32, - .write = pci_generic_config_write, -}; - -static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *res) -{ - int err, mem = 1, res_valid = 0; - resource_size_t iobase; - struct resource_entry *win, *tmp; - - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase); - if (err) - return err; - - err = devm_request_pci_bus_resources(dev, res); - if (err) - goto out_release_res; - - resource_list_for_each_entry_safe(win, tmp, res) { - struct resource *res = win->res; - - switch (resource_type(res)) { - case IORESOURCE_IO: - err = pci_remap_iospace(res, iobase); - if (err) { - dev_warn(dev, "error %d: failed to map resource %pR\n", - err, res); - resource_list_destroy_entry(win); - } - break; - case IORESOURCE_MEM: - res_valid |= !(res->flags & IORESOURCE_PREFETCH); - - writel(res->start >> 28, PCI_IMAP(mem)); - writel(PHYS_OFFSET >> 28, PCI_SMAP(mem)); - mem++; - - break; - } - } - - if (res_valid) - return 0; - - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; - -out_release_res: - pci_free_resource_list(res); - return err; -} - -static int versatile_pci_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct resource *res; - int ret, i, myslot = -1; - u32 val; - void __iomem *local_pci_cfg_base; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - LIST_HEAD(pci_res); - - bridge = devm_pci_alloc_host_bridge(dev, 0); - if (!bridge) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - versatile_pci_base = devm_ioremap_resource(dev, res); - if (IS_ERR(versatile_pci_base)) - return PTR_ERR(versatile_pci_base); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - versatile_cfg_base[0] = devm_ioremap_resource(dev, res); - if (IS_ERR(versatile_cfg_base[0])) - return PTR_ERR(versatile_cfg_base[0]); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(versatile_cfg_base[1])) - return PTR_ERR(versatile_cfg_base[1]); - - ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res); - if (ret) - return ret; - - /* - * We need to discover the PCI core first to configure itself - * before the main PCI probing is performed - */ - for (i = 0; i < 32; i++) { - if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) && - (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) { - myslot = i; - break; - } - } - if (myslot == -1) { - dev_err(dev, "Cannot find PCI core!\n"); - return -EIO; - } - /* - * Do not to map Versatile FPGA PCI device into memory space - */ - pci_slot_ignore |= (1 << myslot); - - dev_info(dev, "PCI core found (slot %d)\n", myslot); - - writel(myslot, PCI_SELFID); - local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); - - val = readl(local_pci_cfg_base + PCI_COMMAND); - val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; - writel(val, local_pci_cfg_base + PCI_COMMAND); - - /* - * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM - */ - writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0); - writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1); - writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2); - - /* - * For many years the kernel and QEMU were symbiotically buggy - * in that they both assumed the same broken IRQ mapping. - * QEMU therefore attempts to auto-detect old broken kernels - * so that they still work on newer QEMU as they did on old - * QEMU. Since we now use the correct (ie matching-hardware) - * IRQ mapping we write a definitely different value to a - * PCI_INTERRUPT_LINE register to tell QEMU that we expect - * real hardware behaviour and it need not be backwards - * compatible for us. This write is harmless on real hardware. - */ - writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); - - pci_add_flags(PCI_ENABLE_PROC_DOMAINS); - pci_add_flags(PCI_REASSIGN_ALL_BUS); - - list_splice_init(&pci_res, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = NULL; - bridge->busnr = 0; - bridge->ops = &pci_versatile_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - - return 0; -} - -static const struct of_device_id versatile_pci_of_match[] = { - { .compatible = "arm,versatile-pci", }, - { }, -}; -MODULE_DEVICE_TABLE(of, versatile_pci_of_match); - -static struct platform_driver versatile_pci_driver = { - .driver = { - .name = "versatile-pci", - .of_match_table = versatile_pci_of_match, - .suppress_bind_attrs = true, - }, - .probe = versatile_pci_probe, -}; -module_platform_driver(versatile_pci_driver); - -MODULE_DESCRIPTION("Versatile PCI driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c deleted file mode 100644 index f4c02da84e59..000000000000 --- a/drivers/pci/host/pci-xgene-msi.c +++ /dev/null @@ -1,543 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * APM X-Gene MSI Driver - * - * Copyright (c) 2014, Applied Micro Circuits Corporation - * Author: Tanmay Inamdar - * Duc Dang - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MSI_IR0 0x000000 -#define MSI_INT0 0x800000 -#define IDX_PER_GROUP 8 -#define IRQS_PER_IDX 16 -#define NR_HW_IRQS 16 -#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) - -struct xgene_msi_group { - struct xgene_msi *msi; - int gic_irq; - u32 msi_grp; -}; - -struct xgene_msi { - struct device_node *node; - struct irq_domain *inner_domain; - struct irq_domain *msi_domain; - u64 msi_addr; - void __iomem *msi_regs; - unsigned long *bitmap; - struct mutex bitmap_lock; - struct xgene_msi_group *msi_groups; - int num_cpus; -}; - -/* Global data */ -static struct xgene_msi xgene_msi_ctrl; - -static struct irq_chip xgene_msi_top_irq_chip = { - .name = "X-Gene1 MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info xgene_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), - .chip = &xgene_msi_top_irq_chip, -}; - -/* - * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where - * n is group number (0..F), x is index of registers in each group (0..7) - * The register layout is as follows: - * MSI0IR0 base_addr - * MSI0IR1 base_addr + 0x10000 - * ... ... - * MSI0IR6 base_addr + 0x60000 - * MSI0IR7 base_addr + 0x70000 - * MSI1IR0 base_addr + 0x80000 - * MSI1IR1 base_addr + 0x90000 - * ... ... - * MSI1IR7 base_addr + 0xF0000 - * MSI2IR0 base_addr + 0x100000 - * ... ... - * MSIFIR0 base_addr + 0x780000 - * MSIFIR1 base_addr + 0x790000 - * ... ... - * MSIFIR7 base_addr + 0x7F0000 - * MSIINT0 base_addr + 0x800000 - * MSIINT1 base_addr + 0x810000 - * ... ... - * MSIINTF base_addr + 0x8F0000 - * - * Each index register supports 16 MSI vectors (0..15) to generate interrupt. - * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination - * registers. - * - * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate - * the MSI pending status caused by 1 of its 8 index registers. - */ - -/* MSInIRx read helper */ -static u32 xgene_msi_ir_read(struct xgene_msi *msi, - u32 msi_grp, u32 msir_idx) -{ - return readl_relaxed(msi->msi_regs + MSI_IR0 + - (msi_grp << 19) + (msir_idx << 16)); -} - -/* MSIINTn read helper */ -static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) -{ - return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); -} - -/* - * With 2048 MSI vectors supported, the MSI message can be constructed using - * following scheme: - * - Divide into 8 256-vector groups - * Group 0: 0-255 - * Group 1: 256-511 - * Group 2: 512-767 - * ... - * Group 7: 1792-2047 - * - Each 256-vector group is divided into 16 16-vector groups - * As an example: 16 16-vector groups for 256-vector group 0-255 is - * Group 0: 0-15 - * Group 1: 16-32 - * ... - * Group 15: 240-255 - * - The termination address of MSI vector in 256-vector group n and 16-vector - * group x is the address of MSIxIRn - * - The data for MSI vector in 16-vector group x is x - */ -static u32 hwirq_to_reg_set(unsigned long hwirq) -{ - return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); -} - -static u32 hwirq_to_group(unsigned long hwirq) -{ - return (hwirq % NR_HW_IRQS); -} - -static u32 hwirq_to_msi_data(unsigned long hwirq) -{ - return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); -} - -static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct xgene_msi *msi = irq_data_get_irq_chip_data(data); - u32 reg_set = hwirq_to_reg_set(data->hwirq); - u32 group = hwirq_to_group(data->hwirq); - u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); - - msg->address_hi = upper_32_bits(target_addr); - msg->address_lo = lower_32_bits(target_addr); - msg->data = hwirq_to_msi_data(data->hwirq); -} - -/* - * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain - * the expected behaviour of .set_affinity for each MSI interrupt, the 16 - * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs - * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another - * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a - * consequence, the total MSI vectors that X-Gene v1 supports will be - * reduced to 256 (2048/8) vectors. - */ -static int hwirq_to_cpu(unsigned long hwirq) -{ - return (hwirq % xgene_msi_ctrl.num_cpus); -} - -static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) -{ - return (hwirq - hwirq_to_cpu(hwirq)); -} - -static int xgene_msi_set_affinity(struct irq_data *irqdata, - const struct cpumask *mask, bool force) -{ - int target_cpu = cpumask_first(mask); - int curr_cpu; - - curr_cpu = hwirq_to_cpu(irqdata->hwirq); - if (curr_cpu == target_cpu) - return IRQ_SET_MASK_OK_DONE; - - /* Update MSI number to target the new CPU */ - irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; - - return IRQ_SET_MASK_OK; -} - -static struct irq_chip xgene_msi_bottom_irq_chip = { - .name = "MSI", - .irq_set_affinity = xgene_msi_set_affinity, - .irq_compose_msi_msg = xgene_compose_msi_msg, -}; - -static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) -{ - struct xgene_msi *msi = domain->host_data; - int msi_irq; - - mutex_lock(&msi->bitmap_lock); - - msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, - msi->num_cpus, 0); - if (msi_irq < NR_MSI_VEC) - bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); - else - msi_irq = -ENOSPC; - - mutex_unlock(&msi->bitmap_lock); - - if (msi_irq < 0) - return msi_irq; - - irq_domain_set_info(domain, virq, msi_irq, - &xgene_msi_bottom_irq_chip, domain->host_data, - handle_simple_irq, NULL, NULL); - - return 0; -} - -static void xgene_irq_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct xgene_msi *msi = irq_data_get_irq_chip_data(d); - u32 hwirq; - - mutex_lock(&msi->bitmap_lock); - - hwirq = hwirq_to_canonical_hwirq(d->hwirq); - bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); - - mutex_unlock(&msi->bitmap_lock); - - irq_domain_free_irqs_parent(domain, virq, nr_irqs); -} - -static const struct irq_domain_ops msi_domain_ops = { - .alloc = xgene_irq_domain_alloc, - .free = xgene_irq_domain_free, -}; - -static int xgene_allocate_domains(struct xgene_msi *msi) -{ - msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, - &msi_domain_ops, msi); - if (!msi->inner_domain) - return -ENOMEM; - - msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), - &xgene_msi_domain_info, - msi->inner_domain); - - if (!msi->msi_domain) { - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - - return 0; -} - -static void xgene_free_domains(struct xgene_msi *msi) -{ - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); - if (msi->inner_domain) - irq_domain_remove(msi->inner_domain); -} - -static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) -{ - int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); - - xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); - if (!xgene_msi->bitmap) - return -ENOMEM; - - mutex_init(&xgene_msi->bitmap_lock); - - xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, - sizeof(struct xgene_msi_group), - GFP_KERNEL); - if (!xgene_msi->msi_groups) - return -ENOMEM; - - return 0; -} - -static void xgene_msi_isr(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct xgene_msi_group *msi_groups; - struct xgene_msi *xgene_msi; - unsigned int virq; - int msir_index, msir_val, hw_irq; - u32 intr_index, grp_select, msi_grp; - - chained_irq_enter(chip, desc); - - msi_groups = irq_desc_get_handler_data(desc); - xgene_msi = msi_groups->msi; - msi_grp = msi_groups->msi_grp; - - /* - * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt - * If bit x of this register is set (x is 0..7), one or more interupts - * corresponding to MSInIRx is set. - */ - grp_select = xgene_msi_int_read(xgene_msi, msi_grp); - while (grp_select) { - msir_index = ffs(grp_select) - 1; - /* - * Calculate MSInIRx address to read to check for interrupts - * (refer to termination address and data assignment - * described in xgene_compose_msi_msg() ) - */ - msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); - while (msir_val) { - intr_index = ffs(msir_val) - 1; - /* - * Calculate MSI vector number (refer to the termination - * address and data assignment described in - * xgene_compose_msi_msg function) - */ - hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * - NR_HW_IRQS) + msi_grp; - /* - * As we have multiple hw_irq that maps to single MSI, - * always look up the virq using the hw_irq as seen from - * CPU0 - */ - hw_irq = hwirq_to_canonical_hwirq(hw_irq); - virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq); - WARN_ON(!virq); - if (virq != 0) - generic_handle_irq(virq); - msir_val &= ~(1 << intr_index); - } - grp_select &= ~(1 << msir_index); - - if (!grp_select) { - /* - * We handled all interrupts happened in this group, - * resample this group MSI_INTx register in case - * something else has been made pending in the meantime - */ - grp_select = xgene_msi_int_read(xgene_msi, msi_grp); - } - } - - chained_irq_exit(chip, desc); -} - -static enum cpuhp_state pci_xgene_online; - -static int xgene_msi_remove(struct platform_device *pdev) -{ - struct xgene_msi *msi = platform_get_drvdata(pdev); - - if (pci_xgene_online) - cpuhp_remove_state(pci_xgene_online); - cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); - - kfree(msi->msi_groups); - - kfree(msi->bitmap); - msi->bitmap = NULL; - - xgene_free_domains(msi); - - return 0; -} - -static int xgene_msi_hwirq_alloc(unsigned int cpu) -{ - struct xgene_msi *msi = &xgene_msi_ctrl; - struct xgene_msi_group *msi_group; - cpumask_var_t mask; - int i; - int err; - - for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { - msi_group = &msi->msi_groups[i]; - if (!msi_group->gic_irq) - continue; - - irq_set_chained_handler(msi_group->gic_irq, - xgene_msi_isr); - err = irq_set_handler_data(msi_group->gic_irq, msi_group); - if (err) { - pr_err("failed to register GIC IRQ handler\n"); - return -EINVAL; - } - /* - * Statically allocate MSI GIC IRQs to each CPU core. - * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated - * to each core. - */ - if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - cpumask_clear(mask); - cpumask_set_cpu(cpu, mask); - err = irq_set_affinity(msi_group->gic_irq, mask); - if (err) - pr_err("failed to set affinity for GIC IRQ"); - free_cpumask_var(mask); - } else { - pr_err("failed to alloc CPU mask for affinity\n"); - err = -EINVAL; - } - - if (err) { - irq_set_chained_handler_and_data(msi_group->gic_irq, - NULL, NULL); - return err; - } - } - - return 0; -} - -static int xgene_msi_hwirq_free(unsigned int cpu) -{ - struct xgene_msi *msi = &xgene_msi_ctrl; - struct xgene_msi_group *msi_group; - int i; - - for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { - msi_group = &msi->msi_groups[i]; - if (!msi_group->gic_irq) - continue; - - irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, - NULL); - } - return 0; -} - -static const struct of_device_id xgene_msi_match_table[] = { - {.compatible = "apm,xgene1-msi"}, - {}, -}; - -static int xgene_msi_probe(struct platform_device *pdev) -{ - struct resource *res; - int rc, irq_index; - struct xgene_msi *xgene_msi; - int virt_msir; - u32 msi_val, msi_idx; - - xgene_msi = &xgene_msi_ctrl; - - platform_set_drvdata(pdev, xgene_msi); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(xgene_msi->msi_regs)) { - dev_err(&pdev->dev, "no reg space\n"); - rc = PTR_ERR(xgene_msi->msi_regs); - goto error; - } - xgene_msi->msi_addr = res->start; - xgene_msi->node = pdev->dev.of_node; - xgene_msi->num_cpus = num_possible_cpus(); - - rc = xgene_msi_init_allocator(xgene_msi); - if (rc) { - dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); - goto error; - } - - rc = xgene_allocate_domains(xgene_msi); - if (rc) { - dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); - goto error; - } - - for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { - virt_msir = platform_get_irq(pdev, irq_index); - if (virt_msir < 0) { - dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", - irq_index); - rc = virt_msir; - goto error; - } - xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; - xgene_msi->msi_groups[irq_index].msi_grp = irq_index; - xgene_msi->msi_groups[irq_index].msi = xgene_msi; - } - - /* - * MSInIRx registers are read-to-clear; before registering - * interrupt handlers, read all of them to clear spurious - * interrupts that may occur before the driver is probed. - */ - for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { - for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) - msi_val = xgene_msi_ir_read(xgene_msi, irq_index, - msi_idx); - /* Read MSIINTn to confirm */ - msi_val = xgene_msi_int_read(xgene_msi, irq_index); - if (msi_val) { - dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); - rc = -EINVAL; - goto error; - } - } - - rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", - xgene_msi_hwirq_alloc, NULL); - if (rc < 0) - goto err_cpuhp; - pci_xgene_online = rc; - rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, - xgene_msi_hwirq_free); - if (rc) - goto err_cpuhp; - - dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); - - return 0; - -err_cpuhp: - dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); -error: - xgene_msi_remove(pdev); - return rc; -} - -static struct platform_driver xgene_msi_driver = { - .driver = { - .name = "xgene-msi", - .of_match_table = xgene_msi_match_table, - }, - .probe = xgene_msi_probe, - .remove = xgene_msi_remove, -}; - -static int __init xgene_pcie_msi_init(void) -{ - return platform_driver_register(&xgene_msi_driver); -} -subsys_initcall(xgene_pcie_msi_init); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c deleted file mode 100644 index d854d67e873c..000000000000 --- a/drivers/pci/host/pci-xgene.c +++ /dev/null @@ -1,689 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/** - * APM X-Gene PCIe Driver - * - * Copyright (c) 2014 Applied Micro Circuits Corporation. - * - * Author: Tanmay Inamdar . - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -#define PCIECORE_CTLANDSTATUS 0x50 -#define PIM1_1L 0x80 -#define IBAR2 0x98 -#define IR2MSK 0x9c -#define PIM2_1L 0xa0 -#define IBAR3L 0xb4 -#define IR3MSKL 0xbc -#define PIM3_1L 0xc4 -#define OMR1BARL 0x100 -#define OMR2BARL 0x118 -#define OMR3BARL 0x130 -#define CFGBARL 0x154 -#define CFGBARH 0x158 -#define CFGCTL 0x15c -#define RTDID 0x160 -#define BRIDGE_CFG_0 0x2000 -#define BRIDGE_CFG_4 0x2010 -#define BRIDGE_STATUS_0 0x2600 - -#define LINK_UP_MASK 0x00000100 -#define AXI_EP_CFG_ACCESS 0x10000 -#define EN_COHERENCY 0xF0000000 -#define EN_REG 0x00000001 -#define OB_LO_IO 0x00000002 -#define XGENE_PCIE_VENDORID 0x10E8 -#define XGENE_PCIE_DEVICEID 0xE004 -#define SZ_1T (SZ_1G*1024ULL) -#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) - -#define XGENE_V1_PCI_EXP_CAP 0x40 - -/* PCIe IP version */ -#define XGENE_PCIE_IP_VER_UNKN 0 -#define XGENE_PCIE_IP_VER_1 1 -#define XGENE_PCIE_IP_VER_2 2 - -#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) -struct xgene_pcie_port { - struct device_node *node; - struct device *dev; - struct clk *clk; - void __iomem *csr_base; - void __iomem *cfg_base; - unsigned long cfg_addr; - bool link_up; - u32 version; -}; - -static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg) -{ - return readl(port->csr_base + reg); -} - -static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val) -{ - writel(val, port->csr_base + reg); -} - -static inline u32 pcie_bar_low_val(u32 addr, u32 flags) -{ - return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; -} - -static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus) -{ - struct pci_config_window *cfg; - - if (acpi_disabled) - return (struct xgene_pcie_port *)(bus->sysdata); - - cfg = bus->sysdata; - return (struct xgene_pcie_port *)(cfg->priv); -} - -/* - * When the address bit [17:16] is 2'b01, the Configuration access will be - * treated as Type 1 and it will be forwarded to external PCIe device. - */ -static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) -{ - struct xgene_pcie_port *port = pcie_bus_to_port(bus); - - if (bus->number >= (bus->primary + 1)) - return port->cfg_base + AXI_EP_CFG_ACCESS; - - return port->cfg_base; -} - -/* - * For Configuration request, RTDID register is used as Bus Number, - * Device Number and Function number of the header fields. - */ -static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) -{ - struct xgene_pcie_port *port = pcie_bus_to_port(bus); - unsigned int b, d, f; - u32 rtdid_val = 0; - - b = bus->number; - d = PCI_SLOT(devfn); - f = PCI_FUNC(devfn); - - if (!pci_is_root_bus(bus)) - rtdid_val = (b << 8) | (d << 3) | f; - - xgene_pcie_writel(port, RTDID, rtdid_val); - /* read the register back to ensure flush */ - xgene_pcie_readl(port, RTDID); -} - -/* - * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as - * the translation from PCI bus to native BUS. Entire DDR region - * is mapped into PCIe space using these registers, so it can be - * reached by DMA from EP devices. The BAR0/1 of bridge should be - * hidden during enumeration to avoid the sizing and resource allocation - * by PCIe core. - */ -static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) -{ - if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) || - (offset == PCI_BASE_ADDRESS_1))) - return true; - - return false; -} - -static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, - int offset) -{ - if ((pci_is_root_bus(bus) && devfn != 0) || - xgene_pcie_hide_rc_bars(bus, offset)) - return NULL; - - xgene_pcie_set_rtdid_reg(bus, devfn); - return xgene_pcie_get_cfg_base(bus) + offset; -} - -static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct xgene_pcie_port *port = pcie_bus_to_port(bus); - - if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != - PCIBIOS_SUCCESSFUL) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* - * The v1 controller has a bug in its Configuration Request - * Retry Status (CRS) logic: when CRS is enabled and we read the - * Vendor and Device ID of a non-existent device, the controller - * fabricates return data of 0xFFFF0001 ("device exists but is not - * ready") instead of 0xFFFFFFFF ("device does not exist"). This - * causes the PCI core to retry the read until it times out. - * Avoid this by not claiming to support CRS. - */ - if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && - ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) - *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); - - if (size <= 2) - *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); - - return PCIBIOS_SUCCESSFUL; -} -#endif - -#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) -static int xgene_get_csr_resource(struct acpi_device *adev, - struct resource *res) -{ - struct device *dev = &adev->dev; - struct resource_entry *entry; - struct list_head list; - unsigned long flags; - int ret; - - INIT_LIST_HEAD(&list); - flags = IORESOURCE_MEM; - ret = acpi_dev_get_resources(adev, &list, - acpi_dev_filter_resource_type_cb, - (void *) flags); - if (ret < 0) { - dev_err(dev, "failed to parse _CRS method, error code %d\n", - ret); - return ret; - } - - if (ret == 0) { - dev_err(dev, "no IO and memory resources present in _CRS\n"); - return -EINVAL; - } - - entry = list_first_entry(&list, struct resource_entry, node); - *res = *entry->res; - acpi_dev_free_resource_list(&list); - return 0; -} - -static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) -{ - struct device *dev = cfg->parent; - struct acpi_device *adev = to_acpi_device(dev); - struct xgene_pcie_port *port; - struct resource csr; - int ret; - - port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); - if (!port) - return -ENOMEM; - - ret = xgene_get_csr_resource(adev, &csr); - if (ret) { - dev_err(dev, "can't get CSR resource\n"); - return ret; - } - port->csr_base = devm_pci_remap_cfg_resource(dev, &csr); - if (IS_ERR(port->csr_base)) - return PTR_ERR(port->csr_base); - - port->cfg_base = cfg->win; - port->version = ipversion; - - cfg->priv = port; - return 0; -} - -static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg) -{ - return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1); -} - -struct pci_ecam_ops xgene_v1_pcie_ecam_ops = { - .bus_shift = 16, - .init = xgene_v1_pcie_ecam_init, - .pci_ops = { - .map_bus = xgene_pcie_map_bus, - .read = xgene_pcie_config_read32, - .write = pci_generic_config_write, - } -}; - -static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg) -{ - return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2); -} - -struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { - .bus_shift = 16, - .init = xgene_v2_pcie_ecam_init, - .pci_ops = { - .map_bus = xgene_pcie_map_bus, - .read = xgene_pcie_config_read32, - .write = pci_generic_config_write, - } -}; -#endif - -#if defined(CONFIG_PCI_XGENE) -static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, - u32 flags, u64 size) -{ - u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; - u32 val32 = 0; - u32 val; - - val32 = xgene_pcie_readl(port, addr); - val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); - xgene_pcie_writel(port, addr, val); - - val32 = xgene_pcie_readl(port, addr + 0x04); - val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); - xgene_pcie_writel(port, addr + 0x04, val); - - val32 = xgene_pcie_readl(port, addr + 0x04); - val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); - xgene_pcie_writel(port, addr + 0x04, val); - - val32 = xgene_pcie_readl(port, addr + 0x08); - val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); - xgene_pcie_writel(port, addr + 0x08, val); - - return mask; -} - -static void xgene_pcie_linkup(struct xgene_pcie_port *port, - u32 *lanes, u32 *speed) -{ - u32 val32; - - port->link_up = false; - val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS); - if (val32 & LINK_UP_MASK) { - port->link_up = true; - *speed = PIPE_PHY_RATE_RD(val32); - val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0); - *lanes = val32 >> 26; - } -} - -static int xgene_pcie_init_port(struct xgene_pcie_port *port) -{ - struct device *dev = port->dev; - int rc; - - port->clk = clk_get(dev, NULL); - if (IS_ERR(port->clk)) { - dev_err(dev, "clock not available\n"); - return -ENODEV; - } - - rc = clk_prepare_enable(port->clk); - if (rc) { - dev_err(dev, "clock enable failed\n"); - return rc; - } - - return 0; -} - -static int xgene_pcie_map_reg(struct xgene_pcie_port *port, - struct platform_device *pdev) -{ - struct device *dev = port->dev; - struct resource *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - port->csr_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(port->csr_base)) - return PTR_ERR(port->csr_base); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - port->cfg_base = devm_ioremap_resource(dev, res); - if (IS_ERR(port->cfg_base)) - return PTR_ERR(port->cfg_base); - port->cfg_addr = res->start; - - return 0; -} - -static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, - struct resource *res, u32 offset, - u64 cpu_addr, u64 pci_addr) -{ - struct device *dev = port->dev; - resource_size_t size = resource_size(res); - u64 restype = resource_type(res); - u64 mask = 0; - u32 min_size; - u32 flag = EN_REG; - - if (restype == IORESOURCE_MEM) { - min_size = SZ_128M; - } else { - min_size = 128; - flag |= OB_LO_IO; - } - - if (size >= min_size) - mask = ~(size - 1) | flag; - else - dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n", - (u64)size, min_size); - - xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr)); - xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr)); - xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask)); - xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask)); - xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr)); - xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr)); -} - -static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port) -{ - u64 addr = port->cfg_addr; - - xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr)); - xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr)); - xgene_pcie_writel(port, CFGCTL, EN_REG); -} - -static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, - struct list_head *res, - resource_size_t io_base) -{ - struct resource_entry *window; - struct device *dev = port->dev; - int ret; - - resource_list_for_each_entry(window, res) { - struct resource *res = window->res; - u64 restype = resource_type(res); - - dev_dbg(dev, "%pR\n", res); - - switch (restype) { - case IORESOURCE_IO: - xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, - res->start - window->offset); - ret = pci_remap_iospace(res, io_base); - if (ret < 0) - return ret; - break; - case IORESOURCE_MEM: - if (res->flags & IORESOURCE_PREFETCH) - xgene_pcie_setup_ob_reg(port, res, OMR2BARL, - res->start, - res->start - - window->offset); - else - xgene_pcie_setup_ob_reg(port, res, OMR1BARL, - res->start, - res->start - - window->offset); - break; - case IORESOURCE_BUS: - break; - default: - dev_err(dev, "invalid resource %pR\n", res); - return -EINVAL; - } - } - xgene_pcie_setup_cfg_reg(port); - return 0; -} - -static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg, - u64 pim, u64 size) -{ - xgene_pcie_writel(port, pim_reg, lower_32_bits(pim)); - xgene_pcie_writel(port, pim_reg + 0x04, - upper_32_bits(pim) | EN_COHERENCY); - xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size)); - xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size)); -} - -/* - * X-Gene PCIe support maximum 3 inbound memory regions - * This function helps to select a region based on size of region - */ -static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) -{ - if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) { - *ib_reg_mask |= (1 << 1); - return 1; - } - - if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { - *ib_reg_mask |= (1 << 0); - return 0; - } - - if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) { - *ib_reg_mask |= (1 << 2); - return 2; - } - - return -EINVAL; -} - -static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, - struct of_pci_range *range, u8 *ib_reg_mask) -{ - void __iomem *cfg_base = port->cfg_base; - struct device *dev = port->dev; - void *bar_addr; - u32 pim_reg; - u64 cpu_addr = range->cpu_addr; - u64 pci_addr = range->pci_addr; - u64 size = range->size; - u64 mask = ~(size - 1) | EN_REG; - u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; - u32 bar_low; - int region; - - region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); - if (region < 0) { - dev_warn(dev, "invalid pcie dma-range config\n"); - return; - } - - if (range->flags & IORESOURCE_PREFETCH) - flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; - - bar_low = pcie_bar_low_val((u32)cpu_addr, flags); - switch (region) { - case 0: - xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size); - bar_addr = cfg_base + PCI_BASE_ADDRESS_0; - writel(bar_low, bar_addr); - writel(upper_32_bits(cpu_addr), bar_addr + 0x4); - pim_reg = PIM1_1L; - break; - case 1: - xgene_pcie_writel(port, IBAR2, bar_low); - xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask)); - pim_reg = PIM2_1L; - break; - case 2: - xgene_pcie_writel(port, IBAR3L, bar_low); - xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr)); - xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask)); - xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask)); - pim_reg = PIM3_1L; - break; - } - - xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1)); -} - -static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) -{ - struct device_node *np = port->node; - struct of_pci_range range; - struct of_pci_range_parser parser; - struct device *dev = port->dev; - u8 ib_reg_mask = 0; - - if (of_pci_dma_range_parser_init(&parser, np)) { - dev_err(dev, "missing dma-ranges property\n"); - return -EINVAL; - } - - /* Get the dma-ranges from DT */ - for_each_of_pci_range(&parser, &range) { - u64 end = range.cpu_addr + range.size - 1; - - dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", - range.flags, range.cpu_addr, end, range.pci_addr); - xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); - } - return 0; -} - -/* clear BAR configuration which was done by firmware */ -static void xgene_pcie_clear_config(struct xgene_pcie_port *port) -{ - int i; - - for (i = PIM1_1L; i <= CFGCTL; i += 4) - xgene_pcie_writel(port, i, 0); -} - -static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res, - resource_size_t io_base) -{ - struct device *dev = port->dev; - u32 val, lanes = 0, speed = 0; - int ret; - - xgene_pcie_clear_config(port); - - /* setup the vendor and device IDs correctly */ - val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID; - xgene_pcie_writel(port, BRIDGE_CFG_0, val); - - ret = xgene_pcie_map_ranges(port, res, io_base); - if (ret) - return ret; - - ret = xgene_pcie_parse_map_dma_ranges(port); - if (ret) - return ret; - - xgene_pcie_linkup(port, &lanes, &speed); - if (!port->link_up) - dev_info(dev, "(rc) link down\n"); - else - dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1); - return 0; -} - -static struct pci_ops xgene_pcie_ops = { - .map_bus = xgene_pcie_map_bus, - .read = xgene_pcie_config_read32, - .write = pci_generic_config_write32, -}; - -static int xgene_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *dn = dev->of_node; - struct xgene_pcie_port *port; - resource_size_t iobase = 0; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - int ret; - LIST_HEAD(res); - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); - if (!bridge) - return -ENOMEM; - - port = pci_host_bridge_priv(bridge); - - port->node = of_node_get(dn); - port->dev = dev; - - port->version = XGENE_PCIE_IP_VER_UNKN; - if (of_device_is_compatible(port->node, "apm,xgene-pcie")) - port->version = XGENE_PCIE_IP_VER_1; - - ret = xgene_pcie_map_reg(port, pdev); - if (ret) - return ret; - - ret = xgene_pcie_init_port(port); - if (ret) - return ret; - - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, - &iobase); - if (ret) - return ret; - - ret = devm_request_pci_bus_resources(dev, &res); - if (ret) - goto error; - - ret = xgene_pcie_setup(port, &res, iobase); - if (ret) - goto error; - - list_splice_init(&res, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = port; - bridge->busnr = 0; - bridge->ops = &xgene_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - goto error; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; - -error: - pci_free_resource_list(&res); - return ret; -} - -static const struct of_device_id xgene_pcie_match_table[] = { - {.compatible = "apm,xgene-pcie",}, - {}, -}; - -static struct platform_driver xgene_pcie_driver = { - .driver = { - .name = "xgene-pcie", - .of_match_table = of_match_ptr(xgene_pcie_match_table), - .suppress_bind_attrs = true, - }, - .probe = xgene_pcie_probe, -}; -builtin_platform_driver(xgene_pcie_driver); -#endif diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c deleted file mode 100644 index 025ef7d9a046..000000000000 --- a/drivers/pci/host/pcie-altera-msi.c +++ /dev/null @@ -1,291 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Altera PCIe MSI support - * - * Author: Ley Foon Tan - * - * Copyright Altera Corporation (C) 2013-2015. All rights reserved - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MSI_STATUS 0x0 -#define MSI_ERROR 0x4 -#define MSI_INTMASK 0x8 - -#define MAX_MSI_VECTORS 32 - -struct altera_msi { - DECLARE_BITMAP(used, MAX_MSI_VECTORS); - struct mutex lock; /* protect "used" bitmap */ - struct platform_device *pdev; - struct irq_domain *msi_domain; - struct irq_domain *inner_domain; - void __iomem *csr_base; - void __iomem *vector_base; - phys_addr_t vector_phy; - u32 num_of_vectors; - int irq; -}; - -static inline void msi_writel(struct altera_msi *msi, const u32 value, - const u32 reg) -{ - writel_relaxed(value, msi->csr_base + reg); -} - -static inline u32 msi_readl(struct altera_msi *msi, const u32 reg) -{ - return readl_relaxed(msi->csr_base + reg); -} - -static void altera_msi_isr(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct altera_msi *msi; - unsigned long status; - u32 bit; - u32 virq; - - chained_irq_enter(chip, desc); - msi = irq_desc_get_handler_data(desc); - - while ((status = msi_readl(msi, MSI_STATUS)) != 0) { - for_each_set_bit(bit, &status, msi->num_of_vectors) { - /* Dummy read from vector to clear the interrupt */ - readl_relaxed(msi->vector_base + (bit * sizeof(u32))); - - virq = irq_find_mapping(msi->inner_domain, bit); - if (virq) - generic_handle_irq(virq); - else - dev_err(&msi->pdev->dev, "unexpected MSI\n"); - } - } - - chained_irq_exit(chip, desc); -} - -static struct irq_chip altera_msi_irq_chip = { - .name = "Altera PCIe MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info altera_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), - .chip = &altera_msi_irq_chip, -}; - -static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct altera_msi *msi = irq_data_get_irq_chip_data(data); - phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32)); - - msg->address_lo = lower_32_bits(addr); - msg->address_hi = upper_32_bits(addr); - msg->data = data->hwirq; - - dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", - (int)data->hwirq, msg->address_hi, msg->address_lo); -} - -static int altera_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - -static struct irq_chip altera_msi_bottom_irq_chip = { - .name = "Altera MSI", - .irq_compose_msi_msg = altera_compose_msi_msg, - .irq_set_affinity = altera_msi_set_affinity, -}; - -static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) -{ - struct altera_msi *msi = domain->host_data; - unsigned long bit; - u32 mask; - - WARN_ON(nr_irqs != 1); - mutex_lock(&msi->lock); - - bit = find_first_zero_bit(msi->used, msi->num_of_vectors); - if (bit >= msi->num_of_vectors) { - mutex_unlock(&msi->lock); - return -ENOSPC; - } - - set_bit(bit, msi->used); - - mutex_unlock(&msi->lock); - - irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, - domain->host_data, handle_simple_irq, - NULL, NULL); - - mask = msi_readl(msi, MSI_INTMASK); - mask |= 1 << bit; - msi_writel(msi, mask, MSI_INTMASK); - - return 0; -} - -static void altera_irq_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct altera_msi *msi = irq_data_get_irq_chip_data(d); - u32 mask; - - mutex_lock(&msi->lock); - - if (!test_bit(d->hwirq, msi->used)) { - dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n", - d->hwirq); - } else { - __clear_bit(d->hwirq, msi->used); - mask = msi_readl(msi, MSI_INTMASK); - mask &= ~(1 << d->hwirq); - msi_writel(msi, mask, MSI_INTMASK); - } - - mutex_unlock(&msi->lock); -} - -static const struct irq_domain_ops msi_domain_ops = { - .alloc = altera_irq_domain_alloc, - .free = altera_irq_domain_free, -}; - -static int altera_allocate_domains(struct altera_msi *msi) -{ - struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); - - msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, msi); - if (!msi->inner_domain) { - dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &altera_msi_domain_info, msi->inner_domain); - if (!msi->msi_domain) { - dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - - return 0; -} - -static void altera_free_domains(struct altera_msi *msi) -{ - irq_domain_remove(msi->msi_domain); - irq_domain_remove(msi->inner_domain); -} - -static int altera_msi_remove(struct platform_device *pdev) -{ - struct altera_msi *msi = platform_get_drvdata(pdev); - - msi_writel(msi, 0, MSI_INTMASK); - irq_set_chained_handler(msi->irq, NULL); - irq_set_handler_data(msi->irq, NULL); - - altera_free_domains(msi); - - platform_set_drvdata(pdev, NULL); - return 0; -} - -static int altera_msi_probe(struct platform_device *pdev) -{ - struct altera_msi *msi; - struct device_node *np = pdev->dev.of_node; - struct resource *res; - int ret; - - msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi), - GFP_KERNEL); - if (!msi) - return -ENOMEM; - - mutex_init(&msi->lock); - msi->pdev = pdev; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - msi->csr_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(msi->csr_base)) { - dev_err(&pdev->dev, "failed to map csr memory\n"); - return PTR_ERR(msi->csr_base); - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "vector_slave"); - msi->vector_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(msi->vector_base)) { - dev_err(&pdev->dev, "failed to map vector_slave memory\n"); - return PTR_ERR(msi->vector_base); - } - - msi->vector_phy = res->start; - - if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) { - dev_err(&pdev->dev, "failed to parse the number of vectors\n"); - return -EINVAL; - } - - ret = altera_allocate_domains(msi); - if (ret) - return ret; - - msi->irq = platform_get_irq(pdev, 0); - if (msi->irq < 0) { - dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); - ret = msi->irq; - goto err; - } - - irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi); - platform_set_drvdata(pdev, msi); - - return 0; - -err: - altera_msi_remove(pdev); - return ret; -} - -static const struct of_device_id altera_msi_of_match[] = { - { .compatible = "altr,msi-1.0", NULL }, - { }, -}; - -static struct platform_driver altera_msi_driver = { - .driver = { - .name = "altera-msi", - .of_match_table = altera_msi_of_match, - }, - .probe = altera_msi_probe, - .remove = altera_msi_remove, -}; - -static int __init altera_msi_init(void) -{ - return platform_driver_register(&altera_msi_driver); -} -subsys_initcall(altera_msi_init); diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c deleted file mode 100644 index 7d05e51205b3..000000000000 --- a/drivers/pci/host/pcie-altera.c +++ /dev/null @@ -1,645 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright Altera Corporation (C) 2013-2015. All rights reserved - * - * Author: Ley Foon Tan - * Description: Altera PCIe host controller driver - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -#define RP_TX_REG0 0x2000 -#define RP_TX_REG1 0x2004 -#define RP_TX_CNTRL 0x2008 -#define RP_TX_EOP 0x2 -#define RP_TX_SOP 0x1 -#define RP_RXCPL_STATUS 0x2010 -#define RP_RXCPL_EOP 0x2 -#define RP_RXCPL_SOP 0x1 -#define RP_RXCPL_REG0 0x2014 -#define RP_RXCPL_REG1 0x2018 -#define P2A_INT_STATUS 0x3060 -#define P2A_INT_STS_ALL 0xf -#define P2A_INT_ENABLE 0x3070 -#define P2A_INT_ENA_ALL 0xf -#define RP_LTSSM 0x3c64 -#define RP_LTSSM_MASK 0x1f -#define LTSSM_L0 0xf - -#define PCIE_CAP_OFFSET 0x80 -/* TLP configuration type 0 and 1 */ -#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ -#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ -#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ -#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ -#define TLP_PAYLOAD_SIZE 0x01 -#define TLP_READ_TAG 0x1d -#define TLP_WRITE_TAG 0x10 -#define RP_DEVFN 0 -#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) -#define TLP_CFGRD_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ - : TLP_FMTTYPE_CFGRD1) << 24) | \ - TLP_PAYLOAD_SIZE) -#define TLP_CFGWR_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ - : TLP_FMTTYPE_CFGWR1) << 24) | \ - TLP_PAYLOAD_SIZE) -#define TLP_CFG_DW1(pcie, tag, be) \ - (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) -#define TLP_CFG_DW2(bus, devfn, offset) \ - (((bus) << 24) | ((devfn) << 16) | (offset)) -#define TLP_COMP_STATUS(s) (((s) >> 13) & 7) -#define TLP_HDR_SIZE 3 -#define TLP_LOOP 500 - -#define LINK_UP_TIMEOUT HZ -#define LINK_RETRAIN_TIMEOUT HZ - -#define DWORD_MASK 3 - -struct altera_pcie { - struct platform_device *pdev; - void __iomem *cra_base; /* DT Cra */ - int irq; - u8 root_bus_nr; - struct irq_domain *irq_domain; - struct resource bus_range; - struct list_head resources; -}; - -struct tlp_rp_regpair_t { - u32 ctrl; - u32 reg0; - u32 reg1; -}; - -static inline void cra_writel(struct altera_pcie *pcie, const u32 value, - const u32 reg) -{ - writel_relaxed(value, pcie->cra_base + reg); -} - -static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) -{ - return readl_relaxed(pcie->cra_base + reg); -} - -static bool altera_pcie_link_up(struct altera_pcie *pcie) -{ - return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); -} - -/* - * Altera PCIe port uses BAR0 of RC's configuration space as the translation - * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space - * using these registers, so it can be reached by DMA from EP devices. - * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt - * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge - * should be hidden during enumeration to avoid the sizing and resource - * allocation by PCIe core. - */ -static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, - int offset) -{ - if (pci_is_root_bus(bus) && (devfn == 0) && - (offset == PCI_BASE_ADDRESS_0)) - return true; - - return false; -} - -static void tlp_write_tx(struct altera_pcie *pcie, - struct tlp_rp_regpair_t *tlp_rp_regdata) -{ - cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0); - cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1); - cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); -} - -static bool altera_pcie_valid_device(struct altera_pcie *pcie, - struct pci_bus *bus, int dev) -{ - /* If there is no link, then there is no device */ - if (bus->number != pcie->root_bus_nr) { - if (!altera_pcie_link_up(pcie)) - return false; - } - - /* access only one slot on each root port */ - if (bus->number == pcie->root_bus_nr && dev > 0) - return false; - - return true; -} - -static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) -{ - int i; - bool sop = false; - u32 ctrl; - u32 reg0, reg1; - u32 comp_status = 1; - - /* - * Minimum 2 loops to read TLP headers and 1 loop to read data - * payload. - */ - for (i = 0; i < TLP_LOOP; i++) { - ctrl = cra_readl(pcie, RP_RXCPL_STATUS); - if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { - reg0 = cra_readl(pcie, RP_RXCPL_REG0); - reg1 = cra_readl(pcie, RP_RXCPL_REG1); - - if (ctrl & RP_RXCPL_SOP) { - sop = true; - comp_status = TLP_COMP_STATUS(reg1); - } - - if (ctrl & RP_RXCPL_EOP) { - if (comp_status) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (value) - *value = reg0; - - return PCIBIOS_SUCCESSFUL; - } - } - udelay(5); - } - - return PCIBIOS_DEVICE_NOT_FOUND; -} - -static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, - u32 data, bool align) -{ - struct tlp_rp_regpair_t tlp_rp_regdata; - - tlp_rp_regdata.reg0 = headers[0]; - tlp_rp_regdata.reg1 = headers[1]; - tlp_rp_regdata.ctrl = RP_TX_SOP; - tlp_write_tx(pcie, &tlp_rp_regdata); - - if (align) { - tlp_rp_regdata.reg0 = headers[2]; - tlp_rp_regdata.reg1 = 0; - tlp_rp_regdata.ctrl = 0; - tlp_write_tx(pcie, &tlp_rp_regdata); - - tlp_rp_regdata.reg0 = data; - tlp_rp_regdata.reg1 = 0; - } else { - tlp_rp_regdata.reg0 = headers[2]; - tlp_rp_regdata.reg1 = data; - } - - tlp_rp_regdata.ctrl = RP_TX_EOP; - tlp_write_tx(pcie, &tlp_rp_regdata); -} - -static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, - int where, u8 byte_en, u32 *value) -{ - u32 headers[TLP_HDR_SIZE]; - - headers[0] = TLP_CFGRD_DW0(pcie, bus); - headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); - headers[2] = TLP_CFG_DW2(bus, devfn, where); - - tlp_write_packet(pcie, headers, 0, false); - - return tlp_read_packet(pcie, value); -} - -static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, - int where, u8 byte_en, u32 value) -{ - u32 headers[TLP_HDR_SIZE]; - int ret; - - headers[0] = TLP_CFGWR_DW0(pcie, bus); - headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); - headers[2] = TLP_CFG_DW2(bus, devfn, where); - - /* check alignment to Qword */ - if ((where & 0x7) == 0) - tlp_write_packet(pcie, headers, value, true); - else - tlp_write_packet(pcie, headers, value, false); - - ret = tlp_read_packet(pcie, NULL); - if (ret != PCIBIOS_SUCCESSFUL) - return ret; - - /* - * Monitor changes to PCI_PRIMARY_BUS register on root port - * and update local copy of root bus number accordingly. - */ - if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) - pcie->root_bus_nr = (u8)(value); - - return PCIBIOS_SUCCESSFUL; -} - -static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, - unsigned int devfn, int where, int size, - u32 *value) -{ - int ret; - u32 data; - u8 byte_en; - - switch (size) { - case 1: - byte_en = 1 << (where & 3); - break; - case 2: - byte_en = 3 << (where & 3); - break; - default: - byte_en = 0xf; - break; - } - - ret = tlp_cfg_dword_read(pcie, busno, devfn, - (where & ~DWORD_MASK), byte_en, &data); - if (ret != PCIBIOS_SUCCESSFUL) - return ret; - - switch (size) { - case 1: - *value = (data >> (8 * (where & 0x3))) & 0xff; - break; - case 2: - *value = (data >> (8 * (where & 0x2))) & 0xffff; - break; - default: - *value = data; - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, - unsigned int devfn, int where, int size, - u32 value) -{ - u32 data32; - u32 shift = 8 * (where & 3); - u8 byte_en; - - switch (size) { - case 1: - data32 = (value & 0xff) << shift; - byte_en = 1 << (where & 3); - break; - case 2: - data32 = (value & 0xffff) << shift; - byte_en = 3 << (where & 3); - break; - default: - data32 = value; - byte_en = 0xf; - break; - } - - return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK), - byte_en, data32); -} - -static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *value) -{ - struct altera_pcie *pcie = bus->sysdata; - - if (altera_pcie_hide_rc_bar(bus, devfn, where)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) { - *value = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, - value); -} - -static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 value) -{ - struct altera_pcie *pcie = bus->sysdata; - - if (altera_pcie_hide_rc_bar(bus, devfn, where)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) - return PCIBIOS_DEVICE_NOT_FOUND; - - return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, - value); -} - -static struct pci_ops altera_pcie_ops = { - .read = altera_pcie_cfg_read, - .write = altera_pcie_cfg_write, -}; - -static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, - unsigned int devfn, int offset, u16 *value) -{ - u32 data; - int ret; - - ret = _altera_pcie_cfg_read(pcie, busno, devfn, - PCIE_CAP_OFFSET + offset, sizeof(*value), - &data); - *value = data; - return ret; -} - -static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, - unsigned int devfn, int offset, u16 value) -{ - return _altera_pcie_cfg_write(pcie, busno, devfn, - PCIE_CAP_OFFSET + offset, sizeof(value), - value); -} - -static void altera_wait_link_retrain(struct altera_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - u16 reg16; - unsigned long start_jiffies; - - /* Wait for link training end. */ - start_jiffies = jiffies; - for (;;) { - altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, - PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - - if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { - dev_err(dev, "link retrain timeout\n"); - break; - } - udelay(100); - } - - /* Wait for link is up */ - start_jiffies = jiffies; - for (;;) { - if (altera_pcie_link_up(pcie)) - break; - - if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { - dev_err(dev, "link up timeout\n"); - break; - } - udelay(100); - } -} - -static void altera_pcie_retrain(struct altera_pcie *pcie) -{ - u16 linkcap, linkstat, linkctl; - - if (!altera_pcie_link_up(pcie)) - return; - - /* - * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but - * current speed is 2.5 GB/s. - */ - altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP, - &linkcap); - if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) - return; - - altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, - &linkstat); - if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { - altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, - PCI_EXP_LNKCTL, &linkctl); - linkctl |= PCI_EXP_LNKCTL_RL; - altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, - PCI_EXP_LNKCTL, linkctl); - - altera_wait_link_retrain(pcie); - } -} - -static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - return 0; -} - -static const struct irq_domain_ops intx_domain_ops = { - .map = altera_pcie_intx_map, - .xlate = pci_irqd_intx_xlate, -}; - -static void altera_pcie_isr(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct altera_pcie *pcie; - struct device *dev; - unsigned long status; - u32 bit; - u32 virq; - - chained_irq_enter(chip, desc); - pcie = irq_desc_get_handler_data(desc); - dev = &pcie->pdev->dev; - - while ((status = cra_readl(pcie, P2A_INT_STATUS) - & P2A_INT_STS_ALL) != 0) { - for_each_set_bit(bit, &status, PCI_NUM_INTX) { - /* clear interrupts */ - cra_writel(pcie, 1 << bit, P2A_INT_STATUS); - - virq = irq_find_mapping(pcie->irq_domain, bit); - if (virq) - generic_handle_irq(virq); - else - dev_err(dev, "unexpected IRQ, INT%d\n", bit); - } - } - - chained_irq_exit(chip, desc); -} - -static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) -{ - int err, res_valid = 0; - struct device *dev = &pcie->pdev->dev; - struct resource_entry *win; - - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, - &pcie->resources, NULL); - if (err) - return err; - - err = devm_request_pci_bus_resources(dev, &pcie->resources); - if (err) - goto out_release_res; - - resource_list_for_each_entry(win, &pcie->resources) { - struct resource *res = win->res; - - if (resource_type(res) == IORESOURCE_MEM) - res_valid |= !(res->flags & IORESOURCE_PREFETCH); - } - - if (res_valid) - return 0; - - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; - -out_release_res: - pci_free_resource_list(&pcie->resources); - return err; -} - -static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; - - /* Setup INTx */ - pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, - &intx_domain_ops, pcie); - if (!pcie->irq_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENOMEM; - } - - return 0; -} - -static int altera_pcie_parse_dt(struct altera_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct platform_device *pdev = pcie->pdev; - struct resource *cra; - - cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); - pcie->cra_base = devm_ioremap_resource(dev, cra); - if (IS_ERR(pcie->cra_base)) - return PTR_ERR(pcie->cra_base); - - /* setup IRQ */ - pcie->irq = platform_get_irq(pdev, 0); - if (pcie->irq < 0) { - dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); - return pcie->irq; - } - - irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); - return 0; -} - -static void altera_pcie_host_init(struct altera_pcie *pcie) -{ - altera_pcie_retrain(pcie); -} - -static int altera_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct altera_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; - struct pci_host_bridge *bridge; - int ret; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!bridge) - return -ENOMEM; - - pcie = pci_host_bridge_priv(bridge); - pcie->pdev = pdev; - - ret = altera_pcie_parse_dt(pcie); - if (ret) { - dev_err(dev, "Parsing DT failed\n"); - return ret; - } - - INIT_LIST_HEAD(&pcie->resources); - - ret = altera_pcie_parse_request_of_pci_ranges(pcie); - if (ret) { - dev_err(dev, "Failed add resources\n"); - return ret; - } - - ret = altera_pcie_init_irq_domain(pcie); - if (ret) { - dev_err(dev, "Failed creating IRQ Domain\n"); - return ret; - } - - /* clear all interrupts */ - cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); - /* enable all interrupts */ - cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); - altera_pcie_host_init(pcie); - - list_splice_init(&pcie->resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = pcie; - bridge->busnr = pcie->root_bus_nr; - bridge->ops = &altera_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - - /* Configure PCI Express setting. */ - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return ret; -} - -static const struct of_device_id altera_pcie_of_match[] = { - { .compatible = "altr,pcie-root-port-1.0", }, - {}, -}; - -static struct platform_driver altera_pcie_driver = { - .probe = altera_pcie_probe, - .driver = { - .name = "altera-pcie", - .of_match_table = altera_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; - -builtin_platform_driver(altera_pcie_driver); diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c deleted file mode 100644 index aa55b064f64d..000000000000 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2015 Broadcom Corporation - * Copyright (C) 2015 Hauke Mehrtens - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-iproc.h" - - -/* NS: CLASS field is R/O, and set to wrong 0x200 value */ -static void bcma_pcie2_fixup_class(struct pci_dev *dev) -{ - dev->class = PCI_CLASS_BRIDGE_PCI << 8; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); - -static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - struct iproc_pcie *pcie = dev->sysdata; - struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); - - return bcma_core_irq(bdev, 5); -} - -static int iproc_pcie_bcma_probe(struct bcma_device *bdev) -{ - struct device *dev = &bdev->dev; - struct iproc_pcie *pcie; - LIST_HEAD(resources); - struct pci_host_bridge *bridge; - int ret; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!bridge) - return -ENOMEM; - - pcie = pci_host_bridge_priv(bridge); - - pcie->dev = dev; - - pcie->type = IPROC_PCIE_PAXB_BCMA; - pcie->base = bdev->io_addr; - if (!pcie->base) { - dev_err(dev, "no controller registers\n"); - return -ENOMEM; - } - - pcie->base_addr = bdev->addr; - - pcie->mem.start = bdev->addr_s[0]; - pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; - pcie->mem.name = "PCIe MEM space"; - pcie->mem.flags = IORESOURCE_MEM; - pci_add_resource(&resources, &pcie->mem); - - pcie->map_irq = iproc_pcie_bcma_map_irq; - - ret = iproc_pcie_setup(pcie, &resources); - if (ret) { - dev_err(dev, "PCIe controller setup failed\n"); - pci_free_resource_list(&resources); - return ret; - } - - bcma_set_drvdata(bdev, pcie); - return 0; -} - -static void iproc_pcie_bcma_remove(struct bcma_device *bdev) -{ - struct iproc_pcie *pcie = bcma_get_drvdata(bdev); - - iproc_pcie_remove(pcie); -} - -static const struct bcma_device_id iproc_pcie_bcma_table[] = { - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), - {}, -}; -MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); - -static struct bcma_driver iproc_pcie_bcma_driver = { - .name = KBUILD_MODNAME, - .id_table = iproc_pcie_bcma_table, - .probe = iproc_pcie_bcma_probe, - .remove = iproc_pcie_bcma_remove, -}; - -static int __init iproc_pcie_bcma_init(void) -{ - return bcma_driver_register(&iproc_pcie_bcma_driver); -} -module_init(iproc_pcie_bcma_init); - -static void __exit iproc_pcie_bcma_exit(void) -{ - bcma_driver_unregister(&iproc_pcie_bcma_driver); -} -module_exit(iproc_pcie_bcma_exit); - -MODULE_AUTHOR("Hauke Mehrtens"); -MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c deleted file mode 100644 index 9deb56989d72..000000000000 --- a/drivers/pci/host/pcie-iproc-msi.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2015 Broadcom Corporation - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-iproc.h" - -#define IPROC_MSI_INTR_EN_SHIFT 11 -#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) -#define IPROC_MSI_INT_N_EVENT_SHIFT 1 -#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) -#define IPROC_MSI_EQ_EN_SHIFT 0 -#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) - -#define IPROC_MSI_EQ_MASK 0x3f - -/* Max number of GIC interrupts */ -#define NR_HW_IRQS 6 - -/* Number of entries in each event queue */ -#define EQ_LEN 64 - -/* Size of each event queue memory region */ -#define EQ_MEM_REGION_SIZE SZ_4K - -/* Size of each MSI address region */ -#define MSI_MEM_REGION_SIZE SZ_4K - -enum iproc_msi_reg { - IPROC_MSI_EQ_PAGE = 0, - IPROC_MSI_EQ_PAGE_UPPER, - IPROC_MSI_PAGE, - IPROC_MSI_PAGE_UPPER, - IPROC_MSI_CTRL, - IPROC_MSI_EQ_HEAD, - IPROC_MSI_EQ_TAIL, - IPROC_MSI_INTS_EN, - IPROC_MSI_REG_SIZE, -}; - -struct iproc_msi; - -/** - * iProc MSI group - * - * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI - * event queue. - * - * @msi: pointer to iProc MSI data - * @gic_irq: GIC interrupt - * @eq: Event queue number - */ -struct iproc_msi_grp { - struct iproc_msi *msi; - int gic_irq; - unsigned int eq; -}; - -/** - * iProc event queue based MSI - * - * Only meant to be used on platforms without MSI support integrated into the - * GIC. - * - * @pcie: pointer to iProc PCIe data - * @reg_offsets: MSI register offsets - * @grps: MSI groups - * @nr_irqs: number of total interrupts connected to GIC - * @nr_cpus: number of toal CPUs - * @has_inten_reg: indicates the MSI interrupt enable register needs to be - * set explicitly (required for some legacy platforms) - * @bitmap: MSI vector bitmap - * @bitmap_lock: lock to protect access to the MSI bitmap - * @nr_msi_vecs: total number of MSI vectors - * @inner_domain: inner IRQ domain - * @msi_domain: MSI IRQ domain - * @nr_eq_region: required number of 4K aligned memory region for MSI event - * queues - * @nr_msi_region: required number of 4K aligned address region for MSI posted - * writes - * @eq_cpu: pointer to allocated memory region for MSI event queues - * @eq_dma: DMA address of MSI event queues - * @msi_addr: MSI address - */ -struct iproc_msi { - struct iproc_pcie *pcie; - const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; - struct iproc_msi_grp *grps; - int nr_irqs; - int nr_cpus; - bool has_inten_reg; - unsigned long *bitmap; - struct mutex bitmap_lock; - unsigned int nr_msi_vecs; - struct irq_domain *inner_domain; - struct irq_domain *msi_domain; - unsigned int nr_eq_region; - unsigned int nr_msi_region; - void *eq_cpu; - dma_addr_t eq_dma; - phys_addr_t msi_addr; -}; - -static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { - { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, - { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, - { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, - { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, - { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, - { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, -}; - -static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { - { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, - { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, - { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, - { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, -}; - -static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, - enum iproc_msi_reg reg, - unsigned int eq) -{ - struct iproc_pcie *pcie = msi->pcie; - - return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); -} - -static inline void iproc_msi_write_reg(struct iproc_msi *msi, - enum iproc_msi_reg reg, - int eq, u32 val) -{ - struct iproc_pcie *pcie = msi->pcie; - - writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); -} - -static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) -{ - return (hwirq % msi->nr_irqs); -} - -static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, - unsigned long hwirq) -{ - if (msi->nr_msi_region > 1) - return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; - else - return hwirq_to_group(msi, hwirq) * sizeof(u32); -} - -static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) -{ - if (msi->nr_eq_region > 1) - return eq * EQ_MEM_REGION_SIZE; - else - return eq * EQ_LEN * sizeof(u32); -} - -static struct irq_chip iproc_msi_irq_chip = { - .name = "iProc-MSI", -}; - -static struct msi_domain_info iproc_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, - .chip = &iproc_msi_irq_chip, -}; - -/* - * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a - * dedicated event queue. Each MSI group can support up to 64 MSI vectors. - * - * The number of MSI groups varies between different iProc SoCs. The total - * number of CPU cores also varies. To support MSI IRQ affinity, we - * distribute GIC interrupts across all available CPUs. MSI vector is moved - * from one GIC interrupt to another to steer to the target CPU. - * - * Assuming: - * - the number of MSI groups is M - * - the number of CPU cores is N - * - M is always a multiple of N - * - * Total number of raw MSI vectors = M * 64 - * Total number of supported MSI vectors = (M * 64) / N - */ -static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) -{ - return (hwirq % msi->nr_cpus); -} - -static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, - unsigned long hwirq) -{ - return (hwirq - hwirq_to_cpu(msi, hwirq)); -} - -static int iproc_msi_irq_set_affinity(struct irq_data *data, - const struct cpumask *mask, bool force) -{ - struct iproc_msi *msi = irq_data_get_irq_chip_data(data); - int target_cpu = cpumask_first(mask); - int curr_cpu; - - curr_cpu = hwirq_to_cpu(msi, data->hwirq); - if (curr_cpu == target_cpu) - return IRQ_SET_MASK_OK_DONE; - - /* steer MSI to the target CPU */ - data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; - - return IRQ_SET_MASK_OK; -} - -static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, - struct msi_msg *msg) -{ - struct iproc_msi *msi = irq_data_get_irq_chip_data(data); - dma_addr_t addr; - - addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); - msg->address_lo = lower_32_bits(addr); - msg->address_hi = upper_32_bits(addr); - msg->data = data->hwirq << 5; -} - -static struct irq_chip iproc_msi_bottom_irq_chip = { - .name = "MSI", - .irq_set_affinity = iproc_msi_irq_set_affinity, - .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, -}; - -static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs, - void *args) -{ - struct iproc_msi *msi = domain->host_data; - int hwirq, i; - - mutex_lock(&msi->bitmap_lock); - - /* Allocate 'nr_cpus' number of MSI vectors each time */ - hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, - msi->nr_cpus, 0); - if (hwirq < msi->nr_msi_vecs) { - bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); - } else { - mutex_unlock(&msi->bitmap_lock); - return -ENOSPC; - } - - mutex_unlock(&msi->bitmap_lock); - - for (i = 0; i < nr_irqs; i++) { - irq_domain_set_info(domain, virq + i, hwirq + i, - &iproc_msi_bottom_irq_chip, - domain->host_data, handle_simple_irq, - NULL, NULL); - } - - return hwirq; -} - -static void iproc_msi_irq_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *data = irq_domain_get_irq_data(domain, virq); - struct iproc_msi *msi = irq_data_get_irq_chip_data(data); - unsigned int hwirq; - - mutex_lock(&msi->bitmap_lock); - - hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); - bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); - - mutex_unlock(&msi->bitmap_lock); - - irq_domain_free_irqs_parent(domain, virq, nr_irqs); -} - -static const struct irq_domain_ops msi_domain_ops = { - .alloc = iproc_msi_irq_domain_alloc, - .free = iproc_msi_irq_domain_free, -}; - -static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) -{ - u32 *msg, hwirq; - unsigned int offs; - - offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); - msg = (u32 *)(msi->eq_cpu + offs); - hwirq = readl(msg); - hwirq = (hwirq >> 5) + (hwirq & 0x1f); - - /* - * Since we have multiple hwirq mapped to a single MSI vector, - * now we need to derive the hwirq at CPU0. It can then be used to - * mapped back to virq. - */ - return hwirq_to_canonical_hwirq(msi, hwirq); -} - -static void iproc_msi_handler(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct iproc_msi_grp *grp; - struct iproc_msi *msi; - u32 eq, head, tail, nr_events; - unsigned long hwirq; - int virq; - - chained_irq_enter(chip, desc); - - grp = irq_desc_get_handler_data(desc); - msi = grp->msi; - eq = grp->eq; - - /* - * iProc MSI event queue is tracked by head and tail pointers. Head - * pointer indicates the next entry (MSI data) to be consumed by SW in - * the queue and needs to be updated by SW. iProc MSI core uses the - * tail pointer as the next data insertion point. - * - * Entries between head and tail pointers contain valid MSI data. MSI - * data is guaranteed to be in the event queue memory before the tail - * pointer is updated by the iProc MSI core. - */ - head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, - eq) & IPROC_MSI_EQ_MASK; - do { - tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, - eq) & IPROC_MSI_EQ_MASK; - - /* - * Figure out total number of events (MSI data) to be - * processed. - */ - nr_events = (tail < head) ? - (EQ_LEN - (head - tail)) : (tail - head); - if (!nr_events) - break; - - /* process all outstanding events */ - while (nr_events--) { - hwirq = decode_msi_hwirq(msi, eq, head); - virq = irq_find_mapping(msi->inner_domain, hwirq); - generic_handle_irq(virq); - - head++; - head %= EQ_LEN; - } - - /* - * Now all outstanding events have been processed. Update the - * head pointer. - */ - iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); - - /* - * Now go read the tail pointer again to see if there are new - * oustanding events that came in during the above window. - */ - } while (true); - - chained_irq_exit(chip, desc); -} - -static void iproc_msi_enable(struct iproc_msi *msi) -{ - int i, eq; - u32 val; - - /* Program memory region for each event queue */ - for (i = 0; i < msi->nr_eq_region; i++) { - dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); - - iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, - lower_32_bits(addr)); - iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, - upper_32_bits(addr)); - } - - /* Program address region for MSI posted writes */ - for (i = 0; i < msi->nr_msi_region; i++) { - phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); - - iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, - lower_32_bits(addr)); - iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, - upper_32_bits(addr)); - } - - for (eq = 0; eq < msi->nr_irqs; eq++) { - /* Enable MSI event queue */ - val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | - IPROC_MSI_EQ_EN; - iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); - - /* - * Some legacy platforms require the MSI interrupt enable - * register to be set explicitly. - */ - if (msi->has_inten_reg) { - val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); - val |= BIT(eq); - iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); - } - } -} - -static void iproc_msi_disable(struct iproc_msi *msi) -{ - u32 eq, val; - - for (eq = 0; eq < msi->nr_irqs; eq++) { - if (msi->has_inten_reg) { - val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); - val &= ~BIT(eq); - iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); - } - - val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); - val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | - IPROC_MSI_EQ_EN); - iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); - } -} - -static int iproc_msi_alloc_domains(struct device_node *node, - struct iproc_msi *msi) -{ - msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, - &msi_domain_ops, msi); - if (!msi->inner_domain) - return -ENOMEM; - - msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), - &iproc_msi_domain_info, - msi->inner_domain); - if (!msi->msi_domain) { - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - - return 0; -} - -static void iproc_msi_free_domains(struct iproc_msi *msi) -{ - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); - - if (msi->inner_domain) - irq_domain_remove(msi->inner_domain); -} - -static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) -{ - int i; - - for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { - irq_set_chained_handler_and_data(msi->grps[i].gic_irq, - NULL, NULL); - } -} - -static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) -{ - int i, ret; - cpumask_var_t mask; - struct iproc_pcie *pcie = msi->pcie; - - for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { - irq_set_chained_handler_and_data(msi->grps[i].gic_irq, - iproc_msi_handler, - &msi->grps[i]); - /* Dedicate GIC interrupt to each CPU core */ - if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - cpumask_clear(mask); - cpumask_set_cpu(cpu, mask); - ret = irq_set_affinity(msi->grps[i].gic_irq, mask); - if (ret) - dev_err(pcie->dev, - "failed to set affinity for IRQ%d\n", - msi->grps[i].gic_irq); - free_cpumask_var(mask); - } else { - dev_err(pcie->dev, "failed to alloc CPU mask\n"); - ret = -EINVAL; - } - - if (ret) { - /* Free all configured/unconfigured IRQs */ - iproc_msi_irq_free(msi, cpu); - return ret; - } - } - - return 0; -} - -int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) -{ - struct iproc_msi *msi; - int i, ret; - unsigned int cpu; - - if (!of_device_is_compatible(node, "brcm,iproc-msi")) - return -ENODEV; - - if (!of_find_property(node, "msi-controller", NULL)) - return -ENODEV; - - if (pcie->msi) - return -EBUSY; - - msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); - if (!msi) - return -ENOMEM; - - msi->pcie = pcie; - pcie->msi = msi; - msi->msi_addr = pcie->base_addr; - mutex_init(&msi->bitmap_lock); - msi->nr_cpus = num_possible_cpus(); - - msi->nr_irqs = of_irq_count(node); - if (!msi->nr_irqs) { - dev_err(pcie->dev, "found no MSI GIC interrupt\n"); - return -ENODEV; - } - - if (msi->nr_irqs > NR_HW_IRQS) { - dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", - msi->nr_irqs); - msi->nr_irqs = NR_HW_IRQS; - } - - if (msi->nr_irqs < msi->nr_cpus) { - dev_err(pcie->dev, - "not enough GIC interrupts for MSI affinity\n"); - return -EINVAL; - } - - if (msi->nr_irqs % msi->nr_cpus != 0) { - msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; - dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", - msi->nr_irqs); - } - - switch (pcie->type) { - case IPROC_PCIE_PAXB_BCMA: - case IPROC_PCIE_PAXB: - msi->reg_offsets = iproc_msi_reg_paxb; - msi->nr_eq_region = 1; - msi->nr_msi_region = 1; - break; - case IPROC_PCIE_PAXC: - msi->reg_offsets = iproc_msi_reg_paxc; - msi->nr_eq_region = msi->nr_irqs; - msi->nr_msi_region = msi->nr_irqs; - break; - default: - dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); - return -EINVAL; - } - - if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) - msi->has_inten_reg = true; - - msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; - msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), - sizeof(*msi->bitmap), GFP_KERNEL); - if (!msi->bitmap) - return -ENOMEM; - - msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), - GFP_KERNEL); - if (!msi->grps) - return -ENOMEM; - - for (i = 0; i < msi->nr_irqs; i++) { - unsigned int irq = irq_of_parse_and_map(node, i); - - if (!irq) { - dev_err(pcie->dev, "unable to parse/map interrupt\n"); - ret = -ENODEV; - goto free_irqs; - } - msi->grps[i].gic_irq = irq; - msi->grps[i].msi = msi; - msi->grps[i].eq = i; - } - - /* Reserve memory for event queue and make sure memories are zeroed */ - msi->eq_cpu = dma_zalloc_coherent(pcie->dev, - msi->nr_eq_region * EQ_MEM_REGION_SIZE, - &msi->eq_dma, GFP_KERNEL); - if (!msi->eq_cpu) { - ret = -ENOMEM; - goto free_irqs; - } - - ret = iproc_msi_alloc_domains(node, msi); - if (ret) { - dev_err(pcie->dev, "failed to create MSI domains\n"); - goto free_eq_dma; - } - - for_each_online_cpu(cpu) { - ret = iproc_msi_irq_setup(msi, cpu); - if (ret) - goto free_msi_irq; - } - - iproc_msi_enable(msi); - - return 0; - -free_msi_irq: - for_each_online_cpu(cpu) - iproc_msi_irq_free(msi, cpu); - iproc_msi_free_domains(msi); - -free_eq_dma: - dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, - msi->eq_cpu, msi->eq_dma); - -free_irqs: - for (i = 0; i < msi->nr_irqs; i++) { - if (msi->grps[i].gic_irq) - irq_dispose_mapping(msi->grps[i].gic_irq); - } - pcie->msi = NULL; - return ret; -} -EXPORT_SYMBOL(iproc_msi_init); - -void iproc_msi_exit(struct iproc_pcie *pcie) -{ - struct iproc_msi *msi = pcie->msi; - unsigned int i, cpu; - - if (!msi) - return; - - iproc_msi_disable(msi); - - for_each_online_cpu(cpu) - iproc_msi_irq_free(msi, cpu); - - iproc_msi_free_domains(msi); - - dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, - msi->eq_cpu, msi->eq_dma); - - for (i = 0; i < msi->nr_irqs; i++) { - if (msi->grps[i].gic_irq) - irq_dispose_mapping(msi->grps[i].gic_irq); - } -} -EXPORT_SYMBOL(iproc_msi_exit); diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c deleted file mode 100644 index f30f5f3fb5c1..000000000000 --- a/drivers/pci/host/pcie-iproc-platform.c +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2015 Broadcom Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" -#include "pcie-iproc.h" - -static const struct of_device_id iproc_pcie_of_match_table[] = { - { - .compatible = "brcm,iproc-pcie", - .data = (int *)IPROC_PCIE_PAXB, - }, { - .compatible = "brcm,iproc-pcie-paxb-v2", - .data = (int *)IPROC_PCIE_PAXB_V2, - }, { - .compatible = "brcm,iproc-pcie-paxc", - .data = (int *)IPROC_PCIE_PAXC, - }, { - .compatible = "brcm,iproc-pcie-paxc-v2", - .data = (int *)IPROC_PCIE_PAXC_V2, - }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); - -static int iproc_pcie_pltfm_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct iproc_pcie *pcie; - struct device_node *np = dev->of_node; - struct resource reg; - resource_size_t iobase = 0; - LIST_HEAD(resources); - struct pci_host_bridge *bridge; - int ret; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!bridge) - return -ENOMEM; - - pcie = pci_host_bridge_priv(bridge); - - pcie->dev = dev; - pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); - - ret = of_address_to_resource(np, 0, ®); - if (ret < 0) { - dev_err(dev, "unable to obtain controller resources\n"); - return ret; - } - - pcie->base = devm_pci_remap_cfgspace(dev, reg.start, - resource_size(®)); - if (!pcie->base) { - dev_err(dev, "unable to map controller registers\n"); - return -ENOMEM; - } - pcie->base_addr = reg.start; - - if (of_property_read_bool(np, "brcm,pcie-ob")) { - u32 val; - - ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset", - &val); - if (ret) { - dev_err(dev, - "missing brcm,pcie-ob-axi-offset property\n"); - return ret; - } - pcie->ob.axi_offset = val; - pcie->need_ob_cfg = true; - } - - /* - * DT nodes are not used by all platforms that use the iProc PCIe - * core driver. For platforms that require explict inbound mapping - * configuration, "dma-ranges" would have been present in DT - */ - pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); - - /* PHY use is optional */ - pcie->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(pcie->phy)) { - if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) - return -EPROBE_DEFER; - pcie->phy = NULL; - } - - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources, - &iobase); - if (ret) { - dev_err(dev, "unable to get PCI host bridge resources\n"); - return ret; - } - - /* PAXC doesn't support legacy IRQs, skip mapping */ - switch (pcie->type) { - case IPROC_PCIE_PAXC: - case IPROC_PCIE_PAXC_V2: - break; - default: - pcie->map_irq = of_irq_parse_and_map_pci; - } - - ret = iproc_pcie_setup(pcie, &resources); - if (ret) { - dev_err(dev, "PCIe controller setup failed\n"); - pci_free_resource_list(&resources); - return ret; - } - - platform_set_drvdata(pdev, pcie); - return 0; -} - -static int iproc_pcie_pltfm_remove(struct platform_device *pdev) -{ - struct iproc_pcie *pcie = platform_get_drvdata(pdev); - - return iproc_pcie_remove(pcie); -} - -static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev) -{ - struct iproc_pcie *pcie = platform_get_drvdata(pdev); - - iproc_pcie_shutdown(pcie); -} - -static struct platform_driver iproc_pcie_pltfm_driver = { - .driver = { - .name = "iproc-pcie", - .of_match_table = of_match_ptr(iproc_pcie_of_match_table), - }, - .probe = iproc_pcie_pltfm_probe, - .remove = iproc_pcie_pltfm_remove, - .shutdown = iproc_pcie_pltfm_shutdown, -}; -module_platform_driver(iproc_pcie_pltfm_driver); - -MODULE_AUTHOR("Ray Jui "); -MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c deleted file mode 100644 index 3c76c5fa4f32..000000000000 --- a/drivers/pci/host/pcie-iproc.c +++ /dev/null @@ -1,1432 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2014 Hauke Mehrtens - * Copyright (C) 2015 Broadcom Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-iproc.h" - -#define EP_PERST_SOURCE_SELECT_SHIFT 2 -#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) -#define EP_MODE_SURVIVE_PERST_SHIFT 1 -#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) -#define RC_PCIE_RST_OUTPUT_SHIFT 0 -#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) -#define PAXC_RESET_MASK 0x7f - -#define GIC_V3_CFG_SHIFT 0 -#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) - -#define MSI_ENABLE_CFG_SHIFT 0 -#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) - -#define CFG_IND_ADDR_MASK 0x00001ffc - -#define CFG_ADDR_BUS_NUM_SHIFT 20 -#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 -#define CFG_ADDR_DEV_NUM_SHIFT 15 -#define CFG_ADDR_DEV_NUM_MASK 0x000f8000 -#define CFG_ADDR_FUNC_NUM_SHIFT 12 -#define CFG_ADDR_FUNC_NUM_MASK 0x00007000 -#define CFG_ADDR_REG_NUM_SHIFT 2 -#define CFG_ADDR_REG_NUM_MASK 0x00000ffc -#define CFG_ADDR_CFG_TYPE_SHIFT 0 -#define CFG_ADDR_CFG_TYPE_MASK 0x00000003 - -#define SYS_RC_INTX_MASK 0xf - -#define PCIE_PHYLINKUP_SHIFT 3 -#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) -#define PCIE_DL_ACTIVE_SHIFT 2 -#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) - -#define APB_ERR_EN_SHIFT 0 -#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) - -#define CFG_RETRY_STATUS 0xffff0001 -#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ - -/* derive the enum index of the outbound/inbound mapping registers */ -#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) - -/* - * Maximum number of outbound mapping window sizes that can be supported by any - * OARR/OMAP mapping pair - */ -#define MAX_NUM_OB_WINDOW_SIZES 4 - -#define OARR_VALID_SHIFT 0 -#define OARR_VALID BIT(OARR_VALID_SHIFT) -#define OARR_SIZE_CFG_SHIFT 1 - -/* - * Maximum number of inbound mapping region sizes that can be supported by an - * IARR - */ -#define MAX_NUM_IB_REGION_SIZES 9 - -#define IMAP_VALID_SHIFT 0 -#define IMAP_VALID BIT(IMAP_VALID_SHIFT) - -#define IPROC_PCI_EXP_CAP 0xac - -#define IPROC_PCIE_REG_INVALID 0xffff - -/** - * iProc PCIe outbound mapping controller specific parameters - * - * @window_sizes: list of supported outbound mapping window sizes in MB - * @nr_sizes: number of supported outbound mapping window sizes - */ -struct iproc_pcie_ob_map { - resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; - unsigned int nr_sizes; -}; - -static const struct iproc_pcie_ob_map paxb_ob_map[] = { - { - /* OARR0/OMAP0 */ - .window_sizes = { 128, 256 }, - .nr_sizes = 2, - }, - { - /* OARR1/OMAP1 */ - .window_sizes = { 128, 256 }, - .nr_sizes = 2, - }, -}; - -static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { - { - /* OARR0/OMAP0 */ - .window_sizes = { 128, 256 }, - .nr_sizes = 2, - }, - { - /* OARR1/OMAP1 */ - .window_sizes = { 128, 256 }, - .nr_sizes = 2, - }, - { - /* OARR2/OMAP2 */ - .window_sizes = { 128, 256, 512, 1024 }, - .nr_sizes = 4, - }, - { - /* OARR3/OMAP3 */ - .window_sizes = { 128, 256, 512, 1024 }, - .nr_sizes = 4, - }, -}; - -/** - * iProc PCIe inbound mapping type - */ -enum iproc_pcie_ib_map_type { - /* for DDR memory */ - IPROC_PCIE_IB_MAP_MEM = 0, - - /* for device I/O memory */ - IPROC_PCIE_IB_MAP_IO, - - /* invalid or unused */ - IPROC_PCIE_IB_MAP_INVALID -}; - -/** - * iProc PCIe inbound mapping controller specific parameters - * - * @type: inbound mapping region type - * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or - * SZ_1G - * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or - * GB, depedning on the size unit - * @nr_sizes: number of supported inbound mapping region sizes - * @nr_windows: number of supported inbound mapping windows for the region - * @imap_addr_offset: register offset between the upper and lower 32-bit - * IMAP address registers - * @imap_window_offset: register offset between each IMAP window - */ -struct iproc_pcie_ib_map { - enum iproc_pcie_ib_map_type type; - unsigned int size_unit; - resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; - unsigned int nr_sizes; - unsigned int nr_windows; - u16 imap_addr_offset; - u16 imap_window_offset; -}; - -static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { - { - /* IARR0/IMAP0 */ - .type = IPROC_PCIE_IB_MAP_IO, - .size_unit = SZ_1K, - .region_sizes = { 32 }, - .nr_sizes = 1, - .nr_windows = 8, - .imap_addr_offset = 0x40, - .imap_window_offset = 0x4, - }, - { - /* IARR1/IMAP1 (currently unused) */ - .type = IPROC_PCIE_IB_MAP_INVALID, - }, - { - /* IARR2/IMAP2 */ - .type = IPROC_PCIE_IB_MAP_MEM, - .size_unit = SZ_1M, - .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, - 16384 }, - .nr_sizes = 9, - .nr_windows = 1, - .imap_addr_offset = 0x4, - .imap_window_offset = 0x8, - }, - { - /* IARR3/IMAP3 */ - .type = IPROC_PCIE_IB_MAP_MEM, - .size_unit = SZ_1G, - .region_sizes = { 1, 2, 4, 8, 16, 32 }, - .nr_sizes = 6, - .nr_windows = 8, - .imap_addr_offset = 0x4, - .imap_window_offset = 0x8, - }, - { - /* IARR4/IMAP4 */ - .type = IPROC_PCIE_IB_MAP_MEM, - .size_unit = SZ_1G, - .region_sizes = { 32, 64, 128, 256, 512 }, - .nr_sizes = 5, - .nr_windows = 8, - .imap_addr_offset = 0x4, - .imap_window_offset = 0x8, - }, -}; - -/* - * iProc PCIe host registers - */ -enum iproc_pcie_reg { - /* clock/reset signal control */ - IPROC_PCIE_CLK_CTRL = 0, - - /* - * To allow MSI to be steered to an external MSI controller (e.g., ARM - * GICv3 ITS) - */ - IPROC_PCIE_MSI_GIC_MODE, - - /* - * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the - * window where the MSI posted writes are written, for the writes to be - * interpreted as MSI writes. - */ - IPROC_PCIE_MSI_BASE_ADDR, - IPROC_PCIE_MSI_WINDOW_SIZE, - - /* - * To hold the address of the register where the MSI writes are - * programed. When ARM GICv3 ITS is used, this should be programmed - * with the address of the GITS_TRANSLATER register. - */ - IPROC_PCIE_MSI_ADDR_LO, - IPROC_PCIE_MSI_ADDR_HI, - - /* enable MSI */ - IPROC_PCIE_MSI_EN_CFG, - - /* allow access to root complex configuration space */ - IPROC_PCIE_CFG_IND_ADDR, - IPROC_PCIE_CFG_IND_DATA, - - /* allow access to device configuration space */ - IPROC_PCIE_CFG_ADDR, - IPROC_PCIE_CFG_DATA, - - /* enable INTx */ - IPROC_PCIE_INTX_EN, - - /* outbound address mapping */ - IPROC_PCIE_OARR0, - IPROC_PCIE_OMAP0, - IPROC_PCIE_OARR1, - IPROC_PCIE_OMAP1, - IPROC_PCIE_OARR2, - IPROC_PCIE_OMAP2, - IPROC_PCIE_OARR3, - IPROC_PCIE_OMAP3, - - /* inbound address mapping */ - IPROC_PCIE_IARR0, - IPROC_PCIE_IMAP0, - IPROC_PCIE_IARR1, - IPROC_PCIE_IMAP1, - IPROC_PCIE_IARR2, - IPROC_PCIE_IMAP2, - IPROC_PCIE_IARR3, - IPROC_PCIE_IMAP3, - IPROC_PCIE_IARR4, - IPROC_PCIE_IMAP4, - - /* link status */ - IPROC_PCIE_LINK_STATUS, - - /* enable APB error for unsupported requests */ - IPROC_PCIE_APB_ERR_EN, - - /* total number of core registers */ - IPROC_PCIE_MAX_NUM_REG, -}; - -/* iProc PCIe PAXB BCMA registers */ -static const u16 iproc_pcie_reg_paxb_bcma[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, -}; - -/* iProc PCIe PAXB registers */ -static const u16 iproc_pcie_reg_paxb[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_OARR0] = 0xd20, - [IPROC_PCIE_OMAP0] = 0xd40, - [IPROC_PCIE_OARR1] = 0xd28, - [IPROC_PCIE_OMAP1] = 0xd48, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, - [IPROC_PCIE_APB_ERR_EN] = 0xf40, -}; - -/* iProc PCIe PAXB v2 registers */ -static const u16 iproc_pcie_reg_paxb_v2[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x120, - [IPROC_PCIE_CFG_IND_DATA] = 0x124, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, - [IPROC_PCIE_INTX_EN] = 0x330, - [IPROC_PCIE_OARR0] = 0xd20, - [IPROC_PCIE_OMAP0] = 0xd40, - [IPROC_PCIE_OARR1] = 0xd28, - [IPROC_PCIE_OMAP1] = 0xd48, - [IPROC_PCIE_OARR2] = 0xd60, - [IPROC_PCIE_OMAP2] = 0xd68, - [IPROC_PCIE_OARR3] = 0xdf0, - [IPROC_PCIE_OMAP3] = 0xdf8, - [IPROC_PCIE_IARR0] = 0xd00, - [IPROC_PCIE_IMAP0] = 0xc00, - [IPROC_PCIE_IARR2] = 0xd10, - [IPROC_PCIE_IMAP2] = 0xcc0, - [IPROC_PCIE_IARR3] = 0xe00, - [IPROC_PCIE_IMAP3] = 0xe08, - [IPROC_PCIE_IARR4] = 0xe68, - [IPROC_PCIE_IMAP4] = 0xe70, - [IPROC_PCIE_LINK_STATUS] = 0xf0c, - [IPROC_PCIE_APB_ERR_EN] = 0xf40, -}; - -/* iProc PCIe PAXC v1 registers */ -static const u16 iproc_pcie_reg_paxc[] = { - [IPROC_PCIE_CLK_CTRL] = 0x000, - [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, - [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, -}; - -/* iProc PCIe PAXC v2 registers */ -static const u16 iproc_pcie_reg_paxc_v2[] = { - [IPROC_PCIE_MSI_GIC_MODE] = 0x050, - [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, - [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, - [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, - [IPROC_PCIE_MSI_ADDR_HI] = 0x080, - [IPROC_PCIE_MSI_EN_CFG] = 0x09c, - [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, - [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, - [IPROC_PCIE_CFG_ADDR] = 0x1f8, - [IPROC_PCIE_CFG_DATA] = 0x1fc, -}; - -static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) -{ - struct iproc_pcie *pcie = bus->sysdata; - return pcie; -} - -static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) -{ - return !!(reg_offset == IPROC_PCIE_REG_INVALID); -} - -static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, - enum iproc_pcie_reg reg) -{ - return pcie->reg_offsets[reg]; -} - -static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, - enum iproc_pcie_reg reg) -{ - u16 offset = iproc_pcie_reg_offset(pcie, reg); - - if (iproc_pcie_reg_is_invalid(offset)) - return 0; - - return readl(pcie->base + offset); -} - -static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, - enum iproc_pcie_reg reg, u32 val) -{ - u16 offset = iproc_pcie_reg_offset(pcie, reg); - - if (iproc_pcie_reg_is_invalid(offset)) - return; - - writel(val, pcie->base + offset); -} - -/** - * APB error forwarding can be disabled during access of configuration - * registers of the endpoint device, to prevent unsupported requests - * (typically seen during enumeration with multi-function devices) from - * triggering a system exception. - */ -static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, - bool disable) -{ - struct iproc_pcie *pcie = iproc_data(bus); - u32 val; - - if (bus->number && pcie->has_apb_err_disable) { - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); - if (disable) - val &= ~APB_ERR_EN; - else - val |= APB_ERR_EN; - iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); - } -} - -static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, - unsigned int busno, - unsigned int slot, - unsigned int fn, - int where) -{ - u16 offset; - u32 val; - - /* EP device access */ - val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | - (slot << CFG_ADDR_DEV_NUM_SHIFT) | - (fn << CFG_ADDR_FUNC_NUM_SHIFT) | - (where & CFG_ADDR_REG_NUM_MASK) | - (1 & CFG_ADDR_CFG_TYPE_MASK); - - iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); - offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); - - if (iproc_pcie_reg_is_invalid(offset)) - return NULL; - - return (pcie->base + offset); -} - -static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) -{ - int timeout = CFG_RETRY_STATUS_TIMEOUT_US; - unsigned int data; - - /* - * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only - * affects config reads of the Vendor ID. For config writes or any - * other config reads, the Root may automatically reissue the - * configuration request again as a new request. - * - * For config reads, this hardware returns CFG_RETRY_STATUS data - * when it receives a CRS completion, regardless of the address of - * the read or the CRS Software Visibility Enable bit. As a - * partial workaround for this, we retry in software any read that - * returns CFG_RETRY_STATUS. - * - * Note that a non-Vendor ID config register may have a value of - * CFG_RETRY_STATUS. If we read that, we can't distinguish it from - * a CRS completion, so we will incorrectly retry the read and - * eventually return the wrong data (0xffffffff). - */ - data = readl(cfg_data_p); - while (data == CFG_RETRY_STATUS && timeout--) { - udelay(1); - data = readl(cfg_data_p); - } - - if (data == CFG_RETRY_STATUS) - data = 0xffffffff; - - return data; -} - -static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct iproc_pcie *pcie = iproc_data(bus); - unsigned int slot = PCI_SLOT(devfn); - unsigned int fn = PCI_FUNC(devfn); - unsigned int busno = bus->number; - void __iomem *cfg_data_p; - unsigned int data; - int ret; - - /* root complex access */ - if (busno == 0) { - ret = pci_generic_config_read32(bus, devfn, where, size, val); - if (ret != PCIBIOS_SUCCESSFUL) - return ret; - - /* Don't advertise CRS SV support */ - if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL) - *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); - return PCIBIOS_SUCCESSFUL; - } - - cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); - - if (!cfg_data_p) - return PCIBIOS_DEVICE_NOT_FOUND; - - data = iproc_pcie_cfg_retry(cfg_data_p); - - *val = data; - if (size <= 2) - *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); - - return PCIBIOS_SUCCESSFUL; -} - -/** - * Note access to the configuration registers are protected at the higher layer - * by 'pci_lock' in drivers/pci/access.c - */ -static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, - int busno, unsigned int devfn, - int where) -{ - unsigned slot = PCI_SLOT(devfn); - unsigned fn = PCI_FUNC(devfn); - u16 offset; - - /* root complex access */ - if (busno == 0) { - if (slot > 0 || fn > 0) - return NULL; - - iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, - where & CFG_IND_ADDR_MASK); - offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); - if (iproc_pcie_reg_is_invalid(offset)) - return NULL; - else - return (pcie->base + offset); - } - - /* - * PAXC is connected to an internally emulated EP within the SoC. It - * allows only one device. - */ - if (pcie->ep_is_internal) - if (slot > 0) - return NULL; - - return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); -} - -static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, - unsigned int devfn, - int where) -{ - return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, - where); -} - -static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, - unsigned int devfn, int where, - int size, u32 *val) -{ - void __iomem *addr; - - addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); - if (!addr) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - *val = readl(addr); - - if (size <= 2) - *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); - - return PCIBIOS_SUCCESSFUL; -} - -static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, - unsigned int devfn, int where, - int size, u32 val) -{ - void __iomem *addr; - u32 mask, tmp; - - addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (size == 4) { - writel(val, addr); - return PCIBIOS_SUCCESSFUL; - } - - mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); - tmp = readl(addr) & mask; - tmp |= val << ((where & 0x3) * 8); - writel(tmp, addr); - - return PCIBIOS_SUCCESSFUL; -} - -static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - int ret; - struct iproc_pcie *pcie = iproc_data(bus); - - iproc_pcie_apb_err_disable(bus, true); - if (pcie->type == IPROC_PCIE_PAXB_V2) - ret = iproc_pcie_config_read(bus, devfn, where, size, val); - else - ret = pci_generic_config_read32(bus, devfn, where, size, val); - iproc_pcie_apb_err_disable(bus, false); - - return ret; -} - -static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - int ret; - - iproc_pcie_apb_err_disable(bus, true); - ret = pci_generic_config_write32(bus, devfn, where, size, val); - iproc_pcie_apb_err_disable(bus, false); - - return ret; -} - -static struct pci_ops iproc_pcie_ops = { - .map_bus = iproc_pcie_bus_map_cfg_bus, - .read = iproc_pcie_config_read32, - .write = iproc_pcie_config_write32, -}; - -static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) -{ - u32 val; - - /* - * PAXC and the internal emulated endpoint device downstream should not - * be reset. If firmware has been loaded on the endpoint device at an - * earlier boot stage, reset here causes issues. - */ - if (pcie->ep_is_internal) - return; - - if (assert) { - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); - val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & - ~RC_PCIE_RST_OUTPUT; - iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); - udelay(250); - } else { - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); - val |= RC_PCIE_RST_OUTPUT; - iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); - msleep(100); - } -} - -int iproc_pcie_shutdown(struct iproc_pcie *pcie) -{ - iproc_pcie_perst_ctrl(pcie, true); - msleep(500); - - return 0; -} -EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); - -static int iproc_pcie_check_link(struct iproc_pcie *pcie) -{ - struct device *dev = pcie->dev; - u32 hdr_type, link_ctrl, link_status, class, val; - bool link_is_active = false; - - /* - * PAXC connects to emulated endpoint devices directly and does not - * have a Serdes. Therefore skip the link detection logic here. - */ - if (pcie->ep_is_internal) - return 0; - - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); - if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { - dev_err(dev, "PHY or data link is INACTIVE!\n"); - return -ENODEV; - } - - /* make sure we are not in EP mode */ - iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); - if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { - dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); - return -EFAULT; - } - - /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ -#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c -#define PCI_CLASS_BRIDGE_MASK 0xffff00 -#define PCI_CLASS_BRIDGE_SHIFT 8 - iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, - 4, &class); - class &= ~PCI_CLASS_BRIDGE_MASK; - class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); - iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, - 4, class); - - /* check link status to see if link is active */ - iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, - 2, &link_status); - if (link_status & PCI_EXP_LNKSTA_NLW) - link_is_active = true; - - if (!link_is_active) { - /* try GEN 1 link speed */ -#define PCI_TARGET_LINK_SPEED_MASK 0xf -#define PCI_TARGET_LINK_SPEED_GEN2 0x2 -#define PCI_TARGET_LINK_SPEED_GEN1 0x1 - iproc_pci_raw_config_read32(pcie, 0, - IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, - 4, &link_ctrl); - if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == - PCI_TARGET_LINK_SPEED_GEN2) { - link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; - link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; - iproc_pci_raw_config_write32(pcie, 0, - IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, - 4, link_ctrl); - msleep(100); - - iproc_pci_raw_config_read32(pcie, 0, - IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, - 2, &link_status); - if (link_status & PCI_EXP_LNKSTA_NLW) - link_is_active = true; - } - } - - dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); - - return link_is_active ? 0 : -ENODEV; -} - -static void iproc_pcie_enable(struct iproc_pcie *pcie) -{ - iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); -} - -static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, - int window_idx) -{ - u32 val; - - val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); - - return !!(val & OARR_VALID); -} - -static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, - int size_idx, u64 axi_addr, u64 pci_addr) -{ - struct device *dev = pcie->dev; - u16 oarr_offset, omap_offset; - - /* - * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based - * on window index. - */ - oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, - window_idx)); - omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, - window_idx)); - if (iproc_pcie_reg_is_invalid(oarr_offset) || - iproc_pcie_reg_is_invalid(omap_offset)) - return -EINVAL; - - /* - * Program the OARR registers. The upper 32-bit OARR register is - * always right after the lower 32-bit OARR register. - */ - writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | - OARR_VALID, pcie->base + oarr_offset); - writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); - - /* now program the OMAP registers */ - writel(lower_32_bits(pci_addr), pcie->base + omap_offset); - writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); - - dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", - window_idx, oarr_offset, &axi_addr, &pci_addr); - dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n", - readl(pcie->base + oarr_offset), - readl(pcie->base + oarr_offset + 4)); - dev_info(dev, "omap lo 0x%x omap hi 0x%x\n", - readl(pcie->base + omap_offset), - readl(pcie->base + omap_offset + 4)); - - return 0; -} - -/** - * Some iProc SoCs require the SW to configure the outbound address mapping - * - * Outbound address translation: - * - * iproc_pcie_address = axi_address - axi_offset - * OARR = iproc_pcie_address - * OMAP = pci_addr - * - * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address - */ -static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, - u64 pci_addr, resource_size_t size) -{ - struct iproc_pcie_ob *ob = &pcie->ob; - struct device *dev = pcie->dev; - int ret = -EINVAL, window_idx, size_idx; - - if (axi_addr < ob->axi_offset) { - dev_err(dev, "axi address %pap less than offset %pap\n", - &axi_addr, &ob->axi_offset); - return -EINVAL; - } - - /* - * Translate the AXI address to the internal address used by the iProc - * PCIe core before programming the OARR - */ - axi_addr -= ob->axi_offset; - - /* iterate through all OARR/OMAP mapping windows */ - for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { - const struct iproc_pcie_ob_map *ob_map = - &pcie->ob_map[window_idx]; - - /* - * If current outbound window is already in use, move on to the - * next one. - */ - if (iproc_pcie_ob_is_valid(pcie, window_idx)) - continue; - - /* - * Iterate through all supported window sizes within the - * OARR/OMAP pair to find a match. Go through the window sizes - * in a descending order. - */ - for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; - size_idx--) { - resource_size_t window_size = - ob_map->window_sizes[size_idx] * SZ_1M; - - if (size < window_size) - continue; - - if (!IS_ALIGNED(axi_addr, window_size) || - !IS_ALIGNED(pci_addr, window_size)) { - dev_err(dev, - "axi %pap or pci %pap not aligned\n", - &axi_addr, &pci_addr); - return -EINVAL; - } - - /* - * Match found! Program both OARR and OMAP and mark - * them as a valid entry. - */ - ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, - axi_addr, pci_addr); - if (ret) - goto err_ob; - - size -= window_size; - if (size == 0) - return 0; - - /* - * If we are here, we are done with the current window, - * but not yet finished all mappings. Need to move on - * to the next window. - */ - axi_addr += window_size; - pci_addr += window_size; - break; - } - } - -err_ob: - dev_err(dev, "unable to configure outbound mapping\n"); - dev_err(dev, - "axi %pap, axi offset %pap, pci %pap, res size %pap\n", - &axi_addr, &ob->axi_offset, &pci_addr, &size); - - return ret; -} - -static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, - struct list_head *resources) -{ - struct device *dev = pcie->dev; - struct resource_entry *window; - int ret; - - resource_list_for_each_entry(window, resources) { - struct resource *res = window->res; - u64 res_type = resource_type(res); - - switch (res_type) { - case IORESOURCE_IO: - case IORESOURCE_BUS: - break; - case IORESOURCE_MEM: - ret = iproc_pcie_setup_ob(pcie, res->start, - res->start - window->offset, - resource_size(res)); - if (ret) - return ret; - break; - default: - dev_err(dev, "invalid resource %pR\n", res); - return -EINVAL; - } - } - - return 0; -} - -static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, - int region_idx) -{ - const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; - u32 val; - - val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); - - return !!(val & (BIT(ib_map->nr_sizes) - 1)); -} - -static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, - enum iproc_pcie_ib_map_type type) -{ - return !!(ib_map->type == type); -} - -static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, - int size_idx, int nr_windows, u64 axi_addr, - u64 pci_addr, resource_size_t size) -{ - struct device *dev = pcie->dev; - const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; - u16 iarr_offset, imap_offset; - u32 val; - int window_idx; - - iarr_offset = iproc_pcie_reg_offset(pcie, - MAP_REG(IPROC_PCIE_IARR0, region_idx)); - imap_offset = iproc_pcie_reg_offset(pcie, - MAP_REG(IPROC_PCIE_IMAP0, region_idx)); - if (iproc_pcie_reg_is_invalid(iarr_offset) || - iproc_pcie_reg_is_invalid(imap_offset)) - return -EINVAL; - - dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", - region_idx, iarr_offset, &axi_addr, &pci_addr); - - /* - * Program the IARR registers. The upper 32-bit IARR register is - * always right after the lower 32-bit IARR register. - */ - writel(lower_32_bits(pci_addr) | BIT(size_idx), - pcie->base + iarr_offset); - writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); - - dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n", - readl(pcie->base + iarr_offset), - readl(pcie->base + iarr_offset + 4)); - - /* - * Now program the IMAP registers. Each IARR region may have one or - * more IMAP windows. - */ - size >>= ilog2(nr_windows); - for (window_idx = 0; window_idx < nr_windows; window_idx++) { - val = readl(pcie->base + imap_offset); - val |= lower_32_bits(axi_addr) | IMAP_VALID; - writel(val, pcie->base + imap_offset); - writel(upper_32_bits(axi_addr), - pcie->base + imap_offset + ib_map->imap_addr_offset); - - dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n", - window_idx, readl(pcie->base + imap_offset), - readl(pcie->base + imap_offset + - ib_map->imap_addr_offset)); - - imap_offset += ib_map->imap_window_offset; - axi_addr += size; - } - - return 0; -} - -static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, - struct of_pci_range *range, - enum iproc_pcie_ib_map_type type) -{ - struct device *dev = pcie->dev; - struct iproc_pcie_ib *ib = &pcie->ib; - int ret; - unsigned int region_idx, size_idx; - u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr; - resource_size_t size = range->size; - - /* iterate through all IARR mapping regions */ - for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { - const struct iproc_pcie_ib_map *ib_map = - &pcie->ib_map[region_idx]; - - /* - * If current inbound region is already in use or not a - * compatible type, move on to the next. - */ - if (iproc_pcie_ib_is_in_use(pcie, region_idx) || - !iproc_pcie_ib_check_type(ib_map, type)) - continue; - - /* iterate through all supported region sizes to find a match */ - for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { - resource_size_t region_size = - ib_map->region_sizes[size_idx] * ib_map->size_unit; - - if (size != region_size) - continue; - - if (!IS_ALIGNED(axi_addr, region_size) || - !IS_ALIGNED(pci_addr, region_size)) { - dev_err(dev, - "axi %pap or pci %pap not aligned\n", - &axi_addr, &pci_addr); - return -EINVAL; - } - - /* Match found! Program IARR and all IMAP windows. */ - ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, - ib_map->nr_windows, axi_addr, - pci_addr, size); - if (ret) - goto err_ib; - else - return 0; - - } - } - ret = -EINVAL; - -err_ib: - dev_err(dev, "unable to configure inbound mapping\n"); - dev_err(dev, "axi %pap, pci %pap, res size %pap\n", - &axi_addr, &pci_addr, &size); - - return ret; -} - -static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) -{ - struct of_pci_range range; - struct of_pci_range_parser parser; - int ret; - - /* Get the dma-ranges from DT */ - ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node); - if (ret) - return ret; - - for_each_of_pci_range(&parser, &range) { - /* Each range entry corresponds to an inbound mapping region */ - ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); - if (ret) - return ret; - } - - return 0; -} - -static int iproce_pcie_get_msi(struct iproc_pcie *pcie, - struct device_node *msi_node, - u64 *msi_addr) -{ - struct device *dev = pcie->dev; - int ret; - struct resource res; - - /* - * Check if 'msi-map' points to ARM GICv3 ITS, which is the only - * supported external MSI controller that requires steering. - */ - if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) { - dev_err(dev, "unable to find compatible MSI controller\n"); - return -ENODEV; - } - - /* derive GITS_TRANSLATER address from GICv3 */ - ret = of_address_to_resource(msi_node, 0, &res); - if (ret < 0) { - dev_err(dev, "unable to obtain MSI controller resources\n"); - return ret; - } - - *msi_addr = res.start + GITS_TRANSLATER; - return 0; -} - -static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) -{ - int ret; - struct of_pci_range range; - - memset(&range, 0, sizeof(range)); - range.size = SZ_32K; - range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1); - - ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO); - return ret; -} - -static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) -{ - u32 val; - - /* - * Program bits [43:13] of address of GITS_TRANSLATER register into - * bits [30:0] of the MSI base address register. In fact, in all iProc - * based SoCs, all I/O register bases are well below the 32-bit - * boundary, so we can safely assume bits [43:32] are always zeros. - */ - iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR, - (u32)(msi_addr >> 13)); - - /* use a default 8K window size */ - iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0); - - /* steering MSI to GICv3 ITS */ - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE); - val |= GIC_V3_CFG; - iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val); - - /* - * Program bits [43:2] of address of GITS_TRANSLATER register into the - * iProc MSI address registers. - */ - msi_addr >>= 2; - iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI, - upper_32_bits(msi_addr)); - iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO, - lower_32_bits(msi_addr)); - - /* enable MSI */ - val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); - val |= MSI_ENABLE_CFG; - iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); -} - -static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, - struct device_node *msi_node) -{ - struct device *dev = pcie->dev; - int ret; - u64 msi_addr; - - ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr); - if (ret < 0) { - dev_err(dev, "msi steering failed\n"); - return ret; - } - - switch (pcie->type) { - case IPROC_PCIE_PAXB_V2: - ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); - if (ret) - return ret; - break; - case IPROC_PCIE_PAXC_V2: - iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr); - break; - default: - return -EINVAL; - } - - return 0; -} - -static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) -{ - struct device_node *msi_node; - int ret; - - /* - * Either the "msi-parent" or the "msi-map" phandle needs to exist - * for us to obtain the MSI node. - */ - - msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); - if (!msi_node) { - const __be32 *msi_map = NULL; - int len; - u32 phandle; - - msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); - if (!msi_map) - return -ENODEV; - - phandle = be32_to_cpup(msi_map + 1); - msi_node = of_find_node_by_phandle(phandle); - if (!msi_node) - return -ENODEV; - } - - /* - * Certain revisions of the iProc PCIe controller require additional - * configurations to steer the MSI writes towards an external MSI - * controller. - */ - if (pcie->need_msi_steer) { - ret = iproc_pcie_msi_steer(pcie, msi_node); - if (ret) - return ret; - } - - /* - * If another MSI controller is being used, the call below should fail - * but that is okay - */ - return iproc_msi_init(pcie, msi_node); -} - -static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) -{ - iproc_msi_exit(pcie); -} - -static int iproc_pcie_rev_init(struct iproc_pcie *pcie) -{ - struct device *dev = pcie->dev; - unsigned int reg_idx; - const u16 *regs; - - switch (pcie->type) { - case IPROC_PCIE_PAXB_BCMA: - regs = iproc_pcie_reg_paxb_bcma; - break; - case IPROC_PCIE_PAXB: - regs = iproc_pcie_reg_paxb; - pcie->has_apb_err_disable = true; - if (pcie->need_ob_cfg) { - pcie->ob_map = paxb_ob_map; - pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); - } - break; - case IPROC_PCIE_PAXB_V2: - regs = iproc_pcie_reg_paxb_v2; - pcie->has_apb_err_disable = true; - if (pcie->need_ob_cfg) { - pcie->ob_map = paxb_v2_ob_map; - pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); - } - pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); - pcie->ib_map = paxb_v2_ib_map; - pcie->need_msi_steer = true; - dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n", - CFG_RETRY_STATUS); - break; - case IPROC_PCIE_PAXC: - regs = iproc_pcie_reg_paxc; - pcie->ep_is_internal = true; - break; - case IPROC_PCIE_PAXC_V2: - regs = iproc_pcie_reg_paxc_v2; - pcie->ep_is_internal = true; - pcie->need_msi_steer = true; - break; - default: - dev_err(dev, "incompatible iProc PCIe interface\n"); - return -EINVAL; - } - - pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG, - sizeof(*pcie->reg_offsets), - GFP_KERNEL); - if (!pcie->reg_offsets) - return -ENOMEM; - - /* go through the register table and populate all valid registers */ - pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? - IPROC_PCIE_REG_INVALID : regs[0]; - for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) - pcie->reg_offsets[reg_idx] = regs[reg_idx] ? - regs[reg_idx] : IPROC_PCIE_REG_INVALID; - - return 0; -} - -int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) -{ - struct device *dev; - int ret; - struct pci_bus *child; - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - - dev = pcie->dev; - - ret = iproc_pcie_rev_init(pcie); - if (ret) { - dev_err(dev, "unable to initialize controller parameters\n"); - return ret; - } - - ret = devm_request_pci_bus_resources(dev, res); - if (ret) - return ret; - - ret = phy_init(pcie->phy); - if (ret) { - dev_err(dev, "unable to initialize PCIe PHY\n"); - return ret; - } - - ret = phy_power_on(pcie->phy); - if (ret) { - dev_err(dev, "unable to power on PCIe PHY\n"); - goto err_exit_phy; - } - - iproc_pcie_perst_ctrl(pcie, true); - iproc_pcie_perst_ctrl(pcie, false); - - if (pcie->need_ob_cfg) { - ret = iproc_pcie_map_ranges(pcie, res); - if (ret) { - dev_err(dev, "map failed\n"); - goto err_power_off_phy; - } - } - - if (pcie->need_ib_cfg) { - ret = iproc_pcie_map_dma_ranges(pcie); - if (ret && ret != -ENOENT) - goto err_power_off_phy; - } - - ret = iproc_pcie_check_link(pcie); - if (ret) { - dev_err(dev, "no PCIe EP device detected\n"); - goto err_power_off_phy; - } - - iproc_pcie_enable(pcie); - - if (IS_ENABLED(CONFIG_PCI_MSI)) - if (iproc_pcie_msi_enable(pcie)) - dev_info(dev, "not using iProc MSI\n"); - - list_splice_init(res, &host->windows); - host->busnr = 0; - host->dev.parent = dev; - host->ops = &iproc_pcie_ops; - host->sysdata = pcie; - host->map_irq = pcie->map_irq; - host->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(host); - if (ret < 0) { - dev_err(dev, "failed to scan host: %d\n", ret); - goto err_power_off_phy; - } - - pci_assign_unassigned_bus_resources(host->bus); - - pcie->root_bus = host->bus; - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - - return 0; - -err_power_off_phy: - phy_power_off(pcie->phy); -err_exit_phy: - phy_exit(pcie->phy); - return ret; -} -EXPORT_SYMBOL(iproc_pcie_setup); - -int iproc_pcie_remove(struct iproc_pcie *pcie) -{ - pci_stop_root_bus(pcie->root_bus); - pci_remove_root_bus(pcie->root_bus); - - iproc_pcie_msi_disable(pcie); - - phy_power_off(pcie->phy); - phy_exit(pcie->phy); - - return 0; -} -EXPORT_SYMBOL(iproc_pcie_remove); - -MODULE_AUTHOR("Ray Jui "); -MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h deleted file mode 100644 index 814b600b383a..000000000000 --- a/drivers/pci/host/pcie-iproc.h +++ /dev/null @@ -1,119 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2014-2015 Broadcom Corporation - */ - -#ifndef _PCIE_IPROC_H -#define _PCIE_IPROC_H - -/** - * iProc PCIe interface type - * - * PAXB is the wrapper used in root complex that can be connected to an - * external endpoint device. - * - * PAXC is the wrapper used in root complex dedicated for internal emulated - * endpoint devices. - */ -enum iproc_pcie_type { - IPROC_PCIE_PAXB_BCMA = 0, - IPROC_PCIE_PAXB, - IPROC_PCIE_PAXB_V2, - IPROC_PCIE_PAXC, - IPROC_PCIE_PAXC_V2, -}; - -/** - * iProc PCIe outbound mapping - * @axi_offset: offset from the AXI address to the internal address used by - * the iProc PCIe core - * @nr_windows: total number of supported outbound mapping windows - */ -struct iproc_pcie_ob { - resource_size_t axi_offset; - unsigned int nr_windows; -}; - -/** - * iProc PCIe inbound mapping - * @nr_regions: total number of supported inbound mapping regions - */ -struct iproc_pcie_ib { - unsigned int nr_regions; -}; - -struct iproc_pcie_ob_map; -struct iproc_pcie_ib_map; -struct iproc_msi; - -/** - * iProc PCIe device - * - * @dev: pointer to device data structure - * @type: iProc PCIe interface type - * @reg_offsets: register offsets - * @base: PCIe host controller I/O register base - * @base_addr: PCIe host controller register base physical address - * @root_bus: pointer to root bus - * @phy: optional PHY device that controls the Serdes - * @map_irq: function callback to map interrupts - * @ep_is_internal: indicates an internal emulated endpoint device is connected - * @has_apb_err_disable: indicates the controller can be configured to prevent - * unsupported request from being forwarded as an APB bus error - * - * @need_ob_cfg: indicates SW needs to configure the outbound mapping window - * @ob: outbound mapping related parameters - * @ob_map: outbound mapping related parameters specific to the controller - * - * @need_ib_cfg: indicates SW needs to configure the inbound mapping window - * @ib: inbound mapping related parameters - * @ib_map: outbound mapping region related parameters - * - * @need_msi_steer: indicates additional configuration of the iProc PCIe - * controller is required to steer MSI writes to external interrupt controller - * @msi: MSI data - */ -struct iproc_pcie { - struct device *dev; - enum iproc_pcie_type type; - u16 *reg_offsets; - void __iomem *base; - phys_addr_t base_addr; - struct resource mem; - struct pci_bus *root_bus; - struct phy *phy; - int (*map_irq)(const struct pci_dev *, u8, u8); - bool ep_is_internal; - bool has_apb_err_disable; - - bool need_ob_cfg; - struct iproc_pcie_ob ob; - const struct iproc_pcie_ob_map *ob_map; - - bool need_ib_cfg; - struct iproc_pcie_ib ib; - const struct iproc_pcie_ib_map *ib_map; - - bool need_msi_steer; - struct iproc_msi *msi; -}; - -int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); -int iproc_pcie_remove(struct iproc_pcie *pcie); -int iproc_pcie_shutdown(struct iproc_pcie *pcie); - -#ifdef CONFIG_PCIE_IPROC_MSI -int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node); -void iproc_msi_exit(struct iproc_pcie *pcie); -#else -static inline int iproc_msi_init(struct iproc_pcie *pcie, - struct device_node *node) -{ - return -ENODEV; -} -static inline void iproc_msi_exit(struct iproc_pcie *pcie) -{ -} -#endif - -#endif /* _PCIE_IPROC_H */ diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c deleted file mode 100644 index 0baabe30858f..000000000000 --- a/drivers/pci/host/pcie-mediatek.c +++ /dev/null @@ -1,1218 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * MediaTek PCIe host controller driver. - * - * Copyright (c) 2017 MediaTek Inc. - * Author: Ryder Lee - * Honghui Zhang - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* PCIe shared registers */ -#define PCIE_SYS_CFG 0x00 -#define PCIE_INT_ENABLE 0x0c -#define PCIE_CFG_ADDR 0x20 -#define PCIE_CFG_DATA 0x24 - -/* PCIe per port registers */ -#define PCIE_BAR0_SETUP 0x10 -#define PCIE_CLASS 0x34 -#define PCIE_LINK_STATUS 0x50 - -#define PCIE_PORT_INT_EN(x) BIT(20 + (x)) -#define PCIE_PORT_PERST(x) BIT(1 + (x)) -#define PCIE_PORT_LINKUP BIT(0) -#define PCIE_BAR_MAP_MAX GENMASK(31, 16) - -#define PCIE_BAR_ENABLE BIT(0) -#define PCIE_REVISION_ID BIT(0) -#define PCIE_CLASS_CODE (0x60400 << 8) -#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ - ((((regn) >> 8) & GENMASK(3, 0)) << 24)) -#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) -#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) -#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) -#define PCIE_CONF_ADDR(regn, fun, dev, bus) \ - (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ - PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) - -/* MediaTek specific configuration registers */ -#define PCIE_FTS_NUM 0x70c -#define PCIE_FTS_NUM_MASK GENMASK(15, 8) -#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) - -#define PCIE_FC_CREDIT 0x73c -#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) -#define PCIE_FC_CREDIT_VAL(x) ((x) << 16) - -/* PCIe V2 share registers */ -#define PCIE_SYS_CFG_V2 0x0 -#define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) -#define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) - -/* PCIe V2 per-port registers */ -#define PCIE_MSI_VECTOR 0x0c0 - -#define PCIE_CONF_VEND_ID 0x100 -#define PCIE_CONF_CLASS_ID 0x106 - -#define PCIE_INT_MASK 0x420 -#define INTX_MASK GENMASK(19, 16) -#define INTX_SHIFT 16 -#define PCIE_INT_STATUS 0x424 -#define MSI_STATUS BIT(23) -#define PCIE_IMSI_STATUS 0x42c -#define PCIE_IMSI_ADDR 0x430 -#define MSI_MASK BIT(23) -#define MTK_MSI_IRQS_NUM 32 - -#define PCIE_AHB_TRANS_BASE0_L 0x438 -#define PCIE_AHB_TRANS_BASE0_H 0x43c -#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) -#define PCIE_AXI_WINDOW0 0x448 -#define WIN_ENABLE BIT(7) - -/* PCIe V2 configuration transaction header */ -#define PCIE_CFG_HEADER0 0x460 -#define PCIE_CFG_HEADER1 0x464 -#define PCIE_CFG_HEADER2 0x468 -#define PCIE_CFG_WDATA 0x470 -#define PCIE_APP_TLP_REQ 0x488 -#define PCIE_CFG_RDATA 0x48c -#define APP_CFG_REQ BIT(0) -#define APP_CPL_STATUS GENMASK(7, 5) - -#define CFG_WRRD_TYPE_0 4 -#define CFG_WR_FMT 2 -#define CFG_RD_FMT 0 - -#define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) -#define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) -#define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) -#define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) -#define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) -#define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) -#define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) -#define CFG_HEADER_DW0(type, fmt) \ - (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) -#define CFG_HEADER_DW1(where, size) \ - (GENMASK(((size) - 1), 0) << ((where) & 0x3)) -#define CFG_HEADER_DW2(regn, fun, dev, bus) \ - (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ - CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) - -#define PCIE_RST_CTRL 0x510 -#define PCIE_PHY_RSTB BIT(0) -#define PCIE_PIPE_SRSTB BIT(1) -#define PCIE_MAC_SRSTB BIT(2) -#define PCIE_CRSTB BIT(3) -#define PCIE_PERSTB BIT(8) -#define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) -#define PCIE_LINK_STATUS_V2 0x804 -#define PCIE_PORT_LINKUP_V2 BIT(10) - -struct mtk_pcie_port; - -/** - * struct mtk_pcie_soc - differentiate between host generations - * @need_fix_class_id: whether this host's class ID needed to be fixed or not - * @ops: pointer to configuration access functions - * @startup: pointer to controller setting functions - * @setup_irq: pointer to initialize IRQ functions - */ -struct mtk_pcie_soc { - bool need_fix_class_id; - struct pci_ops *ops; - int (*startup)(struct mtk_pcie_port *port); - int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); -}; - -/** - * struct mtk_pcie_port - PCIe port information - * @base: IO mapped register base - * @list: port list - * @pcie: pointer to PCIe host info - * @reset: pointer to port reset control - * @sys_ck: pointer to transaction/data link layer clock - * @ahb_ck: pointer to AHB slave interface operating clock for CSR access - * and RC initiated MMIO access - * @axi_ck: pointer to application layer MMIO channel operating clock - * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock - * when pcie_mac_ck/pcie_pipe_ck is turned off - * @obff_ck: pointer to OBFF functional block operating clock - * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock - * @phy: pointer to PHY control block - * @lane: lane count - * @slot: port slot - * @irq_domain: legacy INTx IRQ domain - * @inner_domain: inner IRQ domain - * @msi_domain: MSI IRQ domain - * @lock: protect the msi_irq_in_use bitmap - * @msi_irq_in_use: bit map for assigned MSI IRQ - */ -struct mtk_pcie_port { - void __iomem *base; - struct list_head list; - struct mtk_pcie *pcie; - struct reset_control *reset; - struct clk *sys_ck; - struct clk *ahb_ck; - struct clk *axi_ck; - struct clk *aux_ck; - struct clk *obff_ck; - struct clk *pipe_ck; - struct phy *phy; - u32 lane; - u32 slot; - struct irq_domain *irq_domain; - struct irq_domain *inner_domain; - struct irq_domain *msi_domain; - struct mutex lock; - DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); -}; - -/** - * struct mtk_pcie - PCIe host information - * @dev: pointer to PCIe device - * @base: IO mapped register base - * @free_ck: free-run reference clock - * @io: IO resource - * @pio: PIO resource - * @mem: non-prefetchable memory resource - * @busn: bus range - * @offset: IO / Memory offset - * @ports: pointer to PCIe port information - * @soc: pointer to SoC-dependent operations - */ -struct mtk_pcie { - struct device *dev; - void __iomem *base; - struct clk *free_ck; - - struct resource io; - struct resource pio; - struct resource mem; - struct resource busn; - struct { - resource_size_t mem; - resource_size_t io; - } offset; - struct list_head ports; - const struct mtk_pcie_soc *soc; -}; - -static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) -{ - struct device *dev = pcie->dev; - - clk_disable_unprepare(pcie->free_ck); - - if (dev->pm_domain) { - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - } -} - -static void mtk_pcie_port_free(struct mtk_pcie_port *port) -{ - struct mtk_pcie *pcie = port->pcie; - struct device *dev = pcie->dev; - - devm_iounmap(dev, port->base); - list_del(&port->list); - devm_kfree(dev, port); -} - -static void mtk_pcie_put_resources(struct mtk_pcie *pcie) -{ - struct mtk_pcie_port *port, *tmp; - - list_for_each_entry_safe(port, tmp, &pcie->ports, list) { - phy_power_off(port->phy); - phy_exit(port->phy); - clk_disable_unprepare(port->pipe_ck); - clk_disable_unprepare(port->obff_ck); - clk_disable_unprepare(port->axi_ck); - clk_disable_unprepare(port->aux_ck); - clk_disable_unprepare(port->ahb_ck); - clk_disable_unprepare(port->sys_ck); - mtk_pcie_port_free(port); - } - - mtk_pcie_subsys_powerdown(pcie); -} - -static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) -{ - u32 val; - int err; - - err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, - !(val & APP_CFG_REQ), 10, - 100 * USEC_PER_MSEC); - if (err) - return PCIBIOS_SET_FAILED; - - if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) - return PCIBIOS_SET_FAILED; - - return PCIBIOS_SUCCESSFUL; -} - -static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, - int where, int size, u32 *val) -{ - u32 tmp; - - /* Write PCIe configuration transaction header for Cfgrd */ - writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), - port->base + PCIE_CFG_HEADER0); - writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); - writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), - port->base + PCIE_CFG_HEADER2); - - /* Trigger h/w to transmit Cfgrd TLP */ - tmp = readl(port->base + PCIE_APP_TLP_REQ); - tmp |= APP_CFG_REQ; - writel(tmp, port->base + PCIE_APP_TLP_REQ); - - /* Check completion status */ - if (mtk_pcie_check_cfg_cpld(port)) - return PCIBIOS_SET_FAILED; - - /* Read cpld payload of Cfgrd */ - *val = readl(port->base + PCIE_CFG_RDATA); - - if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; - else if (size == 2) - *val = (*val >> (8 * (where & 3))) & 0xffff; - - return PCIBIOS_SUCCESSFUL; -} - -static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, - int where, int size, u32 val) -{ - /* Write PCIe configuration transaction header for Cfgwr */ - writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), - port->base + PCIE_CFG_HEADER0); - writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); - writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), - port->base + PCIE_CFG_HEADER2); - - /* Write Cfgwr data */ - val = val << 8 * (where & 3); - writel(val, port->base + PCIE_CFG_WDATA); - - /* Trigger h/w to transmit Cfgwr TLP */ - val = readl(port->base + PCIE_APP_TLP_REQ); - val |= APP_CFG_REQ; - writel(val, port->base + PCIE_APP_TLP_REQ); - - /* Check completion status */ - return mtk_pcie_check_cfg_cpld(port); -} - -static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, - unsigned int devfn) -{ - struct mtk_pcie *pcie = bus->sysdata; - struct mtk_pcie_port *port; - - list_for_each_entry(port, &pcie->ports, list) - if (port->slot == PCI_SLOT(devfn)) - return port; - - return NULL; -} - -static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct mtk_pcie_port *port; - u32 bn = bus->number; - int ret; - - port = mtk_pcie_find_port(bus, devfn); - if (!port) { - *val = ~0; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); - if (ret) - *val = ~0; - - return ret; -} - -static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - struct mtk_pcie_port *port; - u32 bn = bus->number; - - port = mtk_pcie_find_port(bus, devfn); - if (!port) - return PCIBIOS_DEVICE_NOT_FOUND; - - return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); -} - -static struct pci_ops mtk_pcie_ops_v2 = { - .read = mtk_pcie_config_read, - .write = mtk_pcie_config_write, -}; - -static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) -{ - struct mtk_pcie *pcie = port->pcie; - struct resource *mem = &pcie->mem; - const struct mtk_pcie_soc *soc = port->pcie->soc; - u32 val; - size_t size; - int err; - - /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ - if (pcie->base) { - val = readl(pcie->base + PCIE_SYS_CFG_V2); - val |= PCIE_CSR_LTSSM_EN(port->slot) | - PCIE_CSR_ASPM_L1_EN(port->slot); - writel(val, pcie->base + PCIE_SYS_CFG_V2); - } - - /* Assert all reset signals */ - writel(0, port->base + PCIE_RST_CTRL); - - /* - * Enable PCIe link down reset, if link status changed from link up to - * link down, this will reset MAC control registers and configuration - * space. - */ - writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); - - /* De-assert PHY, PE, PIPE, MAC and configuration reset */ - val = readl(port->base + PCIE_RST_CTRL); - val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | - PCIE_MAC_SRSTB | PCIE_CRSTB; - writel(val, port->base + PCIE_RST_CTRL); - - /* Set up vendor ID and class code */ - if (soc->need_fix_class_id) { - val = PCI_VENDOR_ID_MEDIATEK; - writew(val, port->base + PCIE_CONF_VEND_ID); - - val = PCI_CLASS_BRIDGE_HOST; - writew(val, port->base + PCIE_CONF_CLASS_ID); - } - - /* 100ms timeout value should be enough for Gen1/2 training */ - err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, - !!(val & PCIE_PORT_LINKUP_V2), 20, - 100 * USEC_PER_MSEC); - if (err) - return -ETIMEDOUT; - - /* Set INTx mask */ - val = readl(port->base + PCIE_INT_MASK); - val &= ~INTX_MASK; - writel(val, port->base + PCIE_INT_MASK); - - /* Set AHB to PCIe translation windows */ - size = mem->end - mem->start; - val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); - writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); - - val = upper_32_bits(mem->start); - writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); - - /* Set PCIe to AXI translation memory space.*/ - val = fls(0xffffffff) | WIN_ENABLE; - writel(val, port->base + PCIE_AXI_WINDOW0); - - return 0; -} - -static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); - phys_addr_t addr; - - /* MT2712/MT7622 only support 32-bit MSI addresses */ - addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); - msg->address_hi = 0; - msg->address_lo = lower_32_bits(addr); - - msg->data = data->hwirq; - - dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", - (int)data->hwirq, msg->address_hi, msg->address_lo); -} - -static int mtk_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - -static void mtk_msi_ack_irq(struct irq_data *data) -{ - struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); - u32 hwirq = data->hwirq; - - writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); -} - -static struct irq_chip mtk_msi_bottom_irq_chip = { - .name = "MTK MSI", - .irq_compose_msi_msg = mtk_compose_msi_msg, - .irq_set_affinity = mtk_msi_set_affinity, - .irq_ack = mtk_msi_ack_irq, -}; - -static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) -{ - struct mtk_pcie_port *port = domain->host_data; - unsigned long bit; - - WARN_ON(nr_irqs != 1); - mutex_lock(&port->lock); - - bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); - if (bit >= MTK_MSI_IRQS_NUM) { - mutex_unlock(&port->lock); - return -ENOSPC; - } - - __set_bit(bit, port->msi_irq_in_use); - - mutex_unlock(&port->lock); - - irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip, - domain->host_data, handle_edge_irq, - NULL, NULL); - - return 0; -} - -static void mtk_pcie_irq_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); - - mutex_lock(&port->lock); - - if (!test_bit(d->hwirq, port->msi_irq_in_use)) - dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", - d->hwirq); - else - __clear_bit(d->hwirq, port->msi_irq_in_use); - - mutex_unlock(&port->lock); - - irq_domain_free_irqs_parent(domain, virq, nr_irqs); -} - -static const struct irq_domain_ops msi_domain_ops = { - .alloc = mtk_pcie_irq_domain_alloc, - .free = mtk_pcie_irq_domain_free, -}; - -static struct irq_chip mtk_msi_irq_chip = { - .name = "MTK PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info mtk_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), - .chip = &mtk_msi_irq_chip, -}; - -static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) -{ - struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); - - mutex_init(&port->lock); - - port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, - &msi_domain_ops, port); - if (!port->inner_domain) { - dev_err(port->pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, - port->inner_domain); - if (!port->msi_domain) { - dev_err(port->pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(port->inner_domain); - return -ENOMEM; - } - - return 0; -} - -static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) -{ - u32 val; - phys_addr_t msg_addr; - - msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); - val = lower_32_bits(msg_addr); - writel(val, port->base + PCIE_IMSI_ADDR); - - val = readl(port->base + PCIE_INT_MASK); - val &= ~MSI_MASK; - writel(val, port->base + PCIE_INT_MASK); -} - -static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -static const struct irq_domain_ops intx_domain_ops = { - .map = mtk_pcie_intx_map, -}; - -static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, - struct device_node *node) -{ - struct device *dev = port->pcie->dev; - struct device_node *pcie_intc_node; - int ret; - - /* Setup INTx */ - pcie_intc_node = of_get_next_child(node, NULL); - if (!pcie_intc_node) { - dev_err(dev, "no PCIe Intc node found\n"); - return -ENODEV; - } - - port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); - if (!port->irq_domain) { - dev_err(dev, "failed to get INTx IRQ domain\n"); - return -ENODEV; - } - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - ret = mtk_pcie_allocate_msi_domains(port); - if (ret) - return ret; - - mtk_pcie_enable_msi(port); - } - - return 0; -} - -static void mtk_pcie_intr_handler(struct irq_desc *desc) -{ - struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); - struct irq_chip *irqchip = irq_desc_get_chip(desc); - unsigned long status; - u32 virq; - u32 bit = INTX_SHIFT; - - chained_irq_enter(irqchip, desc); - - status = readl(port->base + PCIE_INT_STATUS); - if (status & INTX_MASK) { - for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { - /* Clear the INTx */ - writel(1 << bit, port->base + PCIE_INT_STATUS); - virq = irq_find_mapping(port->irq_domain, - bit - INTX_SHIFT); - generic_handle_irq(virq); - } - } - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (status & MSI_STATUS){ - unsigned long imsi_status; - - while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { - for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { - virq = irq_find_mapping(port->inner_domain, bit); - generic_handle_irq(virq); - } - } - /* Clear MSI interrupt status */ - writel(MSI_STATUS, port->base + PCIE_INT_STATUS); - } - } - - chained_irq_exit(irqchip, desc); - - return; -} - -static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, - struct device_node *node) -{ - struct mtk_pcie *pcie = port->pcie; - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); - int err, irq; - - err = mtk_pcie_init_irq_domain(port, node); - if (err) { - dev_err(dev, "failed to init PCIe IRQ domain\n"); - return err; - } - - irq = platform_get_irq(pdev, port->slot); - irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port); - - return 0; -} - -static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) -{ - struct mtk_pcie *pcie = bus->sysdata; - - writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), - bus->number), pcie->base + PCIE_CFG_ADDR); - - return pcie->base + PCIE_CFG_DATA + (where & 3); -} - -static struct pci_ops mtk_pcie_ops = { - .map_bus = mtk_pcie_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, -}; - -static int mtk_pcie_startup_port(struct mtk_pcie_port *port) -{ - struct mtk_pcie *pcie = port->pcie; - u32 func = PCI_FUNC(port->slot << 3); - u32 slot = PCI_SLOT(port->slot << 3); - u32 val; - int err; - - /* assert port PERST_N */ - val = readl(pcie->base + PCIE_SYS_CFG); - val |= PCIE_PORT_PERST(port->slot); - writel(val, pcie->base + PCIE_SYS_CFG); - - /* de-assert port PERST_N */ - val = readl(pcie->base + PCIE_SYS_CFG); - val &= ~PCIE_PORT_PERST(port->slot); - writel(val, pcie->base + PCIE_SYS_CFG); - - /* 100ms timeout value should be enough for Gen1/2 training */ - err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, - !!(val & PCIE_PORT_LINKUP), 20, - 100 * USEC_PER_MSEC); - if (err) - return -ETIMEDOUT; - - /* enable interrupt */ - val = readl(pcie->base + PCIE_INT_ENABLE); - val |= PCIE_PORT_INT_EN(port->slot); - writel(val, pcie->base + PCIE_INT_ENABLE); - - /* map to all DDR region. We need to set it before cfg operation. */ - writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, - port->base + PCIE_BAR0_SETUP); - - /* configure class code and revision ID */ - writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); - - /* configure FC credit */ - writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), - pcie->base + PCIE_CFG_ADDR); - val = readl(pcie->base + PCIE_CFG_DATA); - val &= ~PCIE_FC_CREDIT_MASK; - val |= PCIE_FC_CREDIT_VAL(0x806c); - writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), - pcie->base + PCIE_CFG_ADDR); - writel(val, pcie->base + PCIE_CFG_DATA); - - /* configure RC FTS number to 250 when it leaves L0s */ - writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), - pcie->base + PCIE_CFG_ADDR); - val = readl(pcie->base + PCIE_CFG_DATA); - val &= ~PCIE_FTS_NUM_MASK; - val |= PCIE_FTS_NUM_L0(0x50); - writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), - pcie->base + PCIE_CFG_ADDR); - writel(val, pcie->base + PCIE_CFG_DATA); - - return 0; -} - -static void mtk_pcie_enable_port(struct mtk_pcie_port *port) -{ - struct mtk_pcie *pcie = port->pcie; - struct device *dev = pcie->dev; - int err; - - err = clk_prepare_enable(port->sys_ck); - if (err) { - dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); - goto err_sys_clk; - } - - err = clk_prepare_enable(port->ahb_ck); - if (err) { - dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); - goto err_ahb_clk; - } - - err = clk_prepare_enable(port->aux_ck); - if (err) { - dev_err(dev, "failed to enable aux_ck%d\n", port->slot); - goto err_aux_clk; - } - - err = clk_prepare_enable(port->axi_ck); - if (err) { - dev_err(dev, "failed to enable axi_ck%d\n", port->slot); - goto err_axi_clk; - } - - err = clk_prepare_enable(port->obff_ck); - if (err) { - dev_err(dev, "failed to enable obff_ck%d\n", port->slot); - goto err_obff_clk; - } - - err = clk_prepare_enable(port->pipe_ck); - if (err) { - dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); - goto err_pipe_clk; - } - - reset_control_assert(port->reset); - reset_control_deassert(port->reset); - - err = phy_init(port->phy); - if (err) { - dev_err(dev, "failed to initialize port%d phy\n", port->slot); - goto err_phy_init; - } - - err = phy_power_on(port->phy); - if (err) { - dev_err(dev, "failed to power on port%d phy\n", port->slot); - goto err_phy_on; - } - - if (!pcie->soc->startup(port)) - return; - - dev_info(dev, "Port%d link down\n", port->slot); - - phy_power_off(port->phy); -err_phy_on: - phy_exit(port->phy); -err_phy_init: - clk_disable_unprepare(port->pipe_ck); -err_pipe_clk: - clk_disable_unprepare(port->obff_ck); -err_obff_clk: - clk_disable_unprepare(port->axi_ck); -err_axi_clk: - clk_disable_unprepare(port->aux_ck); -err_aux_clk: - clk_disable_unprepare(port->ahb_ck); -err_ahb_clk: - clk_disable_unprepare(port->sys_ck); -err_sys_clk: - mtk_pcie_port_free(port); -} - -static int mtk_pcie_parse_port(struct mtk_pcie *pcie, - struct device_node *node, - int slot) -{ - struct mtk_pcie_port *port; - struct resource *regs; - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); - char name[10]; - int err; - - port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); - if (!port) - return -ENOMEM; - - err = of_property_read_u32(node, "num-lanes", &port->lane); - if (err) { - dev_err(dev, "missing num-lanes property\n"); - return err; - } - - snprintf(name, sizeof(name), "port%d", slot); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - port->base = devm_ioremap_resource(dev, regs); - if (IS_ERR(port->base)) { - dev_err(dev, "failed to map port%d base\n", slot); - return PTR_ERR(port->base); - } - - snprintf(name, sizeof(name), "sys_ck%d", slot); - port->sys_ck = devm_clk_get(dev, name); - if (IS_ERR(port->sys_ck)) { - dev_err(dev, "failed to get sys_ck%d clock\n", slot); - return PTR_ERR(port->sys_ck); - } - - /* sys_ck might be divided into the following parts in some chips */ - snprintf(name, sizeof(name), "ahb_ck%d", slot); - port->ahb_ck = devm_clk_get(dev, name); - if (IS_ERR(port->ahb_ck)) { - if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->ahb_ck = NULL; - } - - snprintf(name, sizeof(name), "axi_ck%d", slot); - port->axi_ck = devm_clk_get(dev, name); - if (IS_ERR(port->axi_ck)) { - if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->axi_ck = NULL; - } - - snprintf(name, sizeof(name), "aux_ck%d", slot); - port->aux_ck = devm_clk_get(dev, name); - if (IS_ERR(port->aux_ck)) { - if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->aux_ck = NULL; - } - - snprintf(name, sizeof(name), "obff_ck%d", slot); - port->obff_ck = devm_clk_get(dev, name); - if (IS_ERR(port->obff_ck)) { - if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->obff_ck = NULL; - } - - snprintf(name, sizeof(name), "pipe_ck%d", slot); - port->pipe_ck = devm_clk_get(dev, name); - if (IS_ERR(port->pipe_ck)) { - if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - port->pipe_ck = NULL; - } - - snprintf(name, sizeof(name), "pcie-rst%d", slot); - port->reset = devm_reset_control_get_optional_exclusive(dev, name); - if (PTR_ERR(port->reset) == -EPROBE_DEFER) - return PTR_ERR(port->reset); - - /* some platforms may use default PHY setting */ - snprintf(name, sizeof(name), "pcie-phy%d", slot); - port->phy = devm_phy_optional_get(dev, name); - if (IS_ERR(port->phy)) - return PTR_ERR(port->phy); - - port->slot = slot; - port->pcie = pcie; - - if (pcie->soc->setup_irq) { - err = pcie->soc->setup_irq(port, node); - if (err) - return err; - } - - INIT_LIST_HEAD(&port->list); - list_add_tail(&port->list, &pcie->ports); - - return 0; -} - -static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *regs; - int err; - - /* get shared registers, which are optional */ - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); - if (regs) { - pcie->base = devm_ioremap_resource(dev, regs); - if (IS_ERR(pcie->base)) { - dev_err(dev, "failed to map shared register\n"); - return PTR_ERR(pcie->base); - } - } - - pcie->free_ck = devm_clk_get(dev, "free_ck"); - if (IS_ERR(pcie->free_ck)) { - if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - pcie->free_ck = NULL; - } - - if (dev->pm_domain) { - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - } - - /* enable top level clock */ - err = clk_prepare_enable(pcie->free_ck); - if (err) { - dev_err(dev, "failed to enable free_ck\n"); - goto err_free_ck; - } - - return 0; - -err_free_ck: - if (dev->pm_domain) { - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - } - - return err; -} - -static int mtk_pcie_setup(struct mtk_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct device_node *node = dev->of_node, *child; - struct of_pci_range_parser parser; - struct of_pci_range range; - struct resource res; - struct mtk_pcie_port *port, *tmp; - int err; - - if (of_pci_range_parser_init(&parser, node)) { - dev_err(dev, "missing \"ranges\" property\n"); - return -EINVAL; - } - - for_each_of_pci_range(&parser, &range) { - err = of_pci_range_to_resource(&range, node, &res); - if (err < 0) - return err; - - switch (res.flags & IORESOURCE_TYPE_BITS) { - case IORESOURCE_IO: - pcie->offset.io = res.start - range.pci_addr; - - memcpy(&pcie->pio, &res, sizeof(res)); - pcie->pio.name = node->full_name; - - pcie->io.start = range.cpu_addr; - pcie->io.end = range.cpu_addr + range.size - 1; - pcie->io.flags = IORESOURCE_MEM; - pcie->io.name = "I/O"; - - memcpy(&res, &pcie->io, sizeof(res)); - break; - - case IORESOURCE_MEM: - pcie->offset.mem = res.start - range.pci_addr; - - memcpy(&pcie->mem, &res, sizeof(res)); - pcie->mem.name = "non-prefetchable"; - break; - } - } - - err = of_pci_parse_bus_range(node, &pcie->busn); - if (err < 0) { - dev_err(dev, "failed to parse bus ranges property: %d\n", err); - pcie->busn.name = node->name; - pcie->busn.start = 0; - pcie->busn.end = 0xff; - pcie->busn.flags = IORESOURCE_BUS; - } - - for_each_available_child_of_node(node, child) { - int slot; - - err = of_pci_get_devfn(child); - if (err < 0) { - dev_err(dev, "failed to parse devfn: %d\n", err); - return err; - } - - slot = PCI_SLOT(err); - - err = mtk_pcie_parse_port(pcie, child, slot); - if (err) - return err; - } - - err = mtk_pcie_subsys_powerup(pcie); - if (err) - return err; - - /* enable each port, and then check link status */ - list_for_each_entry_safe(port, tmp, &pcie->ports, list) - mtk_pcie_enable_port(port); - - /* power down PCIe subsys if slots are all empty (link down) */ - if (list_empty(&pcie->ports)) - mtk_pcie_subsys_powerdown(pcie); - - return 0; -} - -static int mtk_pcie_request_resources(struct mtk_pcie *pcie) -{ - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - struct device *dev = pcie->dev; - int err; - - pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); - pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); - pci_add_resource(windows, &pcie->busn); - - err = devm_request_pci_bus_resources(dev, windows); - if (err < 0) - return err; - - pci_remap_iospace(&pcie->pio, pcie->io.start); - - return 0; -} - -static int mtk_pcie_register_host(struct pci_host_bridge *host) -{ - struct mtk_pcie *pcie = pci_host_bridge_priv(host); - struct pci_bus *child; - int err; - - host->busnr = pcie->busn.start; - host->dev.parent = pcie->dev; - host->ops = pcie->soc->ops; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; - host->sysdata = pcie; - - err = pci_scan_root_bus_bridge(host); - if (err < 0) - return err; - - pci_bus_size_bridges(host->bus); - pci_bus_assign_resources(host->bus); - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - - return 0; -} - -static int mtk_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct mtk_pcie *pcie; - struct pci_host_bridge *host; - int err; - - host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!host) - return -ENOMEM; - - pcie = pci_host_bridge_priv(host); - - pcie->dev = dev; - pcie->soc = of_device_get_match_data(dev); - platform_set_drvdata(pdev, pcie); - INIT_LIST_HEAD(&pcie->ports); - - err = mtk_pcie_setup(pcie); - if (err) - return err; - - err = mtk_pcie_request_resources(pcie); - if (err) - goto put_resources; - - err = mtk_pcie_register_host(host); - if (err) - goto put_resources; - - return 0; - -put_resources: - if (!list_empty(&pcie->ports)) - mtk_pcie_put_resources(pcie); - - return err; -} - -static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { - .ops = &mtk_pcie_ops, - .startup = mtk_pcie_startup_port, -}; - -static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { - .ops = &mtk_pcie_ops_v2, - .startup = mtk_pcie_startup_port_v2, - .setup_irq = mtk_pcie_setup_irq, -}; - -static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { - .need_fix_class_id = true, - .ops = &mtk_pcie_ops_v2, - .startup = mtk_pcie_startup_port_v2, - .setup_irq = mtk_pcie_setup_irq, -}; - -static const struct of_device_id mtk_pcie_ids[] = { - { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, - { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, - { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, - { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, - {}, -}; - -static struct platform_driver mtk_pcie_driver = { - .probe = mtk_pcie_probe, - .driver = { - .name = "mtk-pcie", - .of_match_table = mtk_pcie_ids, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(mtk_pcie_driver); diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c deleted file mode 100644 index 4d6c20e47bed..000000000000 --- a/drivers/pci/host/pcie-mobiveil.c +++ /dev/null @@ -1,866 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe host controller driver for Mobiveil PCIe Host controller - * - * Copyright (c) 2018 Mobiveil Inc. - * Author: Subrahmanya Lingappa - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* register offsets and bit positions */ - -/* - * translation tables are grouped into windows, each window registers are - * grouped into blocks of 4 or 16 registers each - */ -#define PAB_REG_BLOCK_SIZE 16 -#define PAB_EXT_REG_BLOCK_SIZE 4 - -#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE)) -#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) - -#define LTSSM_STATUS 0x0404 -#define LTSSM_STATUS_L0_MASK 0x3f -#define LTSSM_STATUS_L0 0x2d - -#define PAB_CTRL 0x0808 -#define AMBA_PIO_ENABLE_SHIFT 0 -#define PEX_PIO_ENABLE_SHIFT 1 -#define PAGE_SEL_SHIFT 13 -#define PAGE_SEL_MASK 0x3f -#define PAGE_LO_MASK 0x3ff -#define PAGE_SEL_EN 0xc00 -#define PAGE_SEL_OFFSET_SHIFT 10 - -#define PAB_AXI_PIO_CTRL 0x0840 -#define APIO_EN_MASK 0xf - -#define PAB_PEX_PIO_CTRL 0x08c0 -#define PIO_ENABLE_SHIFT 0 - -#define PAB_INTP_AMBA_MISC_ENB 0x0b0c -#define PAB_INTP_AMBA_MISC_STAT 0x0b1c -#define PAB_INTP_INTX_MASK 0x01e0 -#define PAB_INTP_MSI_MASK 0x8 - -#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) -#define WIN_ENABLE_SHIFT 0 -#define WIN_TYPE_SHIFT 1 - -#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) - -#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) -#define AXI_WINDOW_ALIGN_MASK 3 - -#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) -#define PAB_BUS_SHIFT 24 -#define PAB_DEVICE_SHIFT 19 -#define PAB_FUNCTION_SHIFT 16 - -#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) -#define PAB_INTP_AXI_PIO_CLASS 0x474 - -#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) -#define AMAP_CTRL_EN_SHIFT 0 -#define AMAP_CTRL_TYPE_SHIFT 1 - -#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) -#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) -#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) -#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) - -/* starting offset of INTX bits in status register */ -#define PAB_INTX_START 5 - -/* supported number of MSI interrupts */ -#define PCI_NUM_MSI 16 - -/* MSI registers */ -#define MSI_BASE_LO_OFFSET 0x04 -#define MSI_BASE_HI_OFFSET 0x08 -#define MSI_SIZE_OFFSET 0x0c -#define MSI_ENABLE_OFFSET 0x14 -#define MSI_STATUS_OFFSET 0x18 -#define MSI_DATA_OFFSET 0x20 -#define MSI_ADDR_L_OFFSET 0x24 -#define MSI_ADDR_H_OFFSET 0x28 - -/* outbound and inbound window definitions */ -#define WIN_NUM_0 0 -#define WIN_NUM_1 1 -#define CFG_WINDOW_TYPE 0 -#define IO_WINDOW_TYPE 1 -#define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) -#define MAX_PIO_WINDOWS 8 - -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_MIN 90000 -#define LINK_WAIT_MAX 100000 - -struct mobiveil_msi { /* MSI information */ - struct mutex lock; /* protect bitmap variable */ - struct irq_domain *msi_domain; - struct irq_domain *dev_domain; - phys_addr_t msi_pages_phys; - int num_of_vectors; - DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI); -}; - -struct mobiveil_pcie { - struct platform_device *pdev; - struct list_head resources; - void __iomem *config_axi_slave_base; /* endpoint config base */ - void __iomem *csr_axi_slave_base; /* root port config base */ - void __iomem *apb_csr_base; /* MSI register base */ - void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */ - struct irq_domain *intx_domain; - raw_spinlock_t intx_mask_lock; - int irq; - int apio_wins; - int ppio_wins; - int ob_wins_configured; /* configured outbound windows */ - int ib_wins_configured; /* configured inbound windows */ - struct resource *ob_io_res; - char root_bus_nr; - struct mobiveil_msi msi; -}; - -static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value, - const u32 reg) -{ - writel_relaxed(value, pcie->csr_axi_slave_base + reg); -} - -static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg) -{ - return readl_relaxed(pcie->csr_axi_slave_base + reg); -} - -static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) -{ - return (csr_readl(pcie, LTSSM_STATUS) & - LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; -} - -static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) -{ - struct mobiveil_pcie *pcie = bus->sysdata; - - /* Only one device down on each root port */ - if ((bus->number == pcie->root_bus_nr) && (devfn > 0)) - return false; - - /* - * Do not read more than one device on the bus directly - * attached to RC - */ - if ((bus->primary == pcie->root_bus_nr) && (devfn > 0)) - return false; - - return true; -} - -/* - * mobiveil_pcie_map_bus - routine to get the configuration base of either - * root port or endpoint - */ -static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) -{ - struct mobiveil_pcie *pcie = bus->sysdata; - - if (!mobiveil_pcie_valid_device(bus, devfn)) - return NULL; - - if (bus->number == pcie->root_bus_nr) { - /* RC config access */ - return pcie->csr_axi_slave_base + where; - } - - /* - * EP config access (in Config/APIO space) - * Program PEX Address base (31..16 bits) with appropriate value - * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register. - * Relies on pci_lock serialization - */ - csr_writel(pcie, bus->number << PAB_BUS_SHIFT | - PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | - PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT, - PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); - return pcie->config_axi_slave_base + where; -} - -static struct pci_ops mobiveil_pcie_ops = { - .map_bus = mobiveil_pcie_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, -}; - -static void mobiveil_pcie_isr(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc); - struct device *dev = &pcie->pdev->dev; - struct mobiveil_msi *msi = &pcie->msi; - u32 msi_data, msi_addr_lo, msi_addr_hi; - u32 intr_status, msi_status; - unsigned long shifted_status; - u32 bit, virq, val, mask; - - /* - * The core provides a single interrupt for both INTx/MSI messages. - * So we'll read both INTx and MSI status - */ - - chained_irq_enter(chip, desc); - - /* read INTx status */ - val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); - mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - intr_status = val & mask; - - /* Handle INTx */ - if (intr_status & PAB_INTP_INTX_MASK) { - shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >> - PAB_INTX_START; - do { - for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { - virq = irq_find_mapping(pcie->intx_domain, - bit + 1); - if (virq) - generic_handle_irq(virq); - else - dev_err_ratelimited(dev, - "unexpected IRQ, INT%d\n", bit); - - /* clear interrupt */ - csr_writel(pcie, - shifted_status << PAB_INTX_START, - PAB_INTP_AMBA_MISC_STAT); - } - } while ((shifted_status >> PAB_INTX_START) != 0); - } - - /* read extra MSI status register */ - msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET); - - /* handle MSI interrupts */ - while (msi_status & 1) { - msi_data = readl_relaxed(pcie->apb_csr_base - + MSI_DATA_OFFSET); - - /* - * MSI_STATUS_OFFSET register gets updated to zero - * once we pop not only the MSI data but also address - * from MSI hardware FIFO. So keeping these following - * two dummy reads. - */ - msi_addr_lo = readl_relaxed(pcie->apb_csr_base + - MSI_ADDR_L_OFFSET); - msi_addr_hi = readl_relaxed(pcie->apb_csr_base + - MSI_ADDR_H_OFFSET); - dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n", - msi_data, msi_addr_hi, msi_addr_lo); - - virq = irq_find_mapping(msi->dev_domain, msi_data); - if (virq) - generic_handle_irq(virq); - - msi_status = readl_relaxed(pcie->apb_csr_base + - MSI_STATUS_OFFSET); - } - - /* Clear the interrupt status */ - csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT); - chained_irq_exit(chip, desc); -} - -static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct platform_device *pdev = pcie->pdev; - struct device_node *node = dev->of_node; - struct resource *res; - const char *type; - - type = of_get_property(node, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - - /* map config resource */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "config_axi_slave"); - pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pcie->config_axi_slave_base)) - return PTR_ERR(pcie->config_axi_slave_base); - pcie->ob_io_res = res; - - /* map csr resource */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "csr_axi_slave"); - pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pcie->csr_axi_slave_base)) - return PTR_ERR(pcie->csr_axi_slave_base); - pcie->pcie_reg_base = res->start; - - /* map MSI config resource */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr"); - pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pcie->apb_csr_base)) - return PTR_ERR(pcie->apb_csr_base); - - /* read the number of windows requested */ - if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins)) - pcie->apio_wins = MAX_PIO_WINDOWS; - - if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins)) - pcie->ppio_wins = MAX_PIO_WINDOWS; - - pcie->irq = platform_get_irq(pdev, 0); - if (pcie->irq <= 0) { - dev_err(dev, "failed to map IRQ: %d\n", pcie->irq); - return -ENODEV; - } - - irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); - - return 0; -} - -/* - * select_paged_register - routine to access paged register of root complex - * - * registers of RC are paged, for this scheme to work - * extracted higher 6 bits of the offset will be written to pg_sel - * field of PAB_CTRL register and rest of the lower 10 bits enabled with - * PAGE_SEL_EN are used as offset of the register. - */ -static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset) -{ - int pab_ctrl_dw, pg_sel; - - /* clear pg_sel field */ - pab_ctrl_dw = csr_readl(pcie, PAB_CTRL); - pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT)); - - /* set pg_sel field */ - pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK; - pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT)); - csr_writel(pcie, pab_ctrl_dw, PAB_CTRL); -} - -static void write_paged_register(struct mobiveil_pcie *pcie, - u32 val, u32 offset) -{ - u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; - - select_paged_register(pcie, offset); - csr_writel(pcie, val, off); -} - -static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset) -{ - u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; - - select_paged_register(pcie, offset); - return csr_readl(pcie, off); -} - -static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, - int pci_addr, u32 type, u64 size) -{ - int pio_ctrl_val; - int amap_ctrl_dw; - u64 size64 = ~(size - 1); - - if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) { - dev_err(&pcie->pdev->dev, - "ERROR: max inbound windows reached !\n"); - return; - } - - pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL); - csr_writel(pcie, - pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL); - amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num)); - amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT)); - amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT)); - - write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64), - PAB_PEX_AMAP_CTRL(win_num)); - - write_paged_register(pcie, upper_32_bits(size64), - PAB_EXT_PEX_AMAP_SIZEN(win_num)); - - write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); - write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num)); - write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num)); -} - -/* - * routine to program the outbound windows - */ -static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, - u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size) -{ - - u32 value, type; - u64 size64 = ~(size - 1); - - if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) { - dev_err(&pcie->pdev->dev, - "ERROR: max outbound windows reached !\n"); - return; - } - - /* - * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit - * to 4 KB in PAB_AXI_AMAP_CTRL register - */ - type = config_io_bit; - value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); - csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | - lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num)); - - write_paged_register(pcie, upper_32_bits(size64), - PAB_EXT_AXI_AMAP_SIZE(win_num)); - - /* - * program AXI window base with appropriate value in - * PAB_AXI_AMAP_AXI_WIN0 register - */ - value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num)); - csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK), - PAB_AXI_AMAP_AXI_WIN(win_num)); - - value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num)); - - csr_writel(pcie, lower_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_L(win_num)); - csr_writel(pcie, upper_32_bits(pci_addr), - PAB_AXI_AMAP_PEX_WIN_H(win_num)); - - pcie->ob_wins_configured++; -} - -static int mobiveil_bringup_link(struct mobiveil_pcie *pcie) -{ - int retries; - - /* check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (mobiveil_pcie_link_up(pcie)) - return 0; - - usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); - } - dev_err(&pcie->pdev->dev, "link never came up\n"); - return -ETIMEDOUT; -} - -static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) -{ - phys_addr_t msg_addr = pcie->pcie_reg_base; - struct mobiveil_msi *msi = &pcie->msi; - - pcie->msi.num_of_vectors = PCI_NUM_MSI; - msi->msi_pages_phys = (phys_addr_t)msg_addr; - - writel_relaxed(lower_32_bits(msg_addr), - pcie->apb_csr_base + MSI_BASE_LO_OFFSET); - writel_relaxed(upper_32_bits(msg_addr), - pcie->apb_csr_base + MSI_BASE_HI_OFFSET); - writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET); - writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); -} - -static int mobiveil_host_init(struct mobiveil_pcie *pcie) -{ - u32 value, pab_ctrl, type = 0; - int err; - struct resource_entry *win, *tmp; - - err = mobiveil_bringup_link(pcie); - if (err) { - dev_info(&pcie->pdev->dev, "link bring-up failed\n"); - return err; - } - - /* - * program Bus Master Enable Bit in Command Register in PAB Config - * Space - */ - value = csr_readl(pcie, PCI_COMMAND); - csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_MASTER, PCI_COMMAND); - - /* - * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL - * register - */ - pab_ctrl = csr_readl(pcie, PAB_CTRL); - csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) | - (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL); - - csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), - PAB_INTP_AMBA_MISC_ENB); - - /* - * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in - * PAB_AXI_PIO_CTRL Register - */ - value = csr_readl(pcie, PAB_AXI_PIO_CTRL); - csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL); - - /* - * we'll program one outbound window for config reads and - * another default inbound window for all the upstream traffic - * rest of the outbound windows will be configured according to - * the "ranges" field defined in device tree - */ - - /* config outbound translation window */ - program_ob_windows(pcie, pcie->ob_wins_configured, - pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE, - resource_size(pcie->ob_io_res)); - - /* memory inbound translation window */ - program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); - - /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { - type = 0; - if (resource_type(win->res) == IORESOURCE_MEM) - type = MEM_WINDOW_TYPE; - if (resource_type(win->res) == IORESOURCE_IO) - type = IO_WINDOW_TYPE; - if (type) { - /* configure outbound translation window */ - program_ob_windows(pcie, pcie->ob_wins_configured, - win->res->start, 0, type, - resource_size(win->res)); - } - } - - /* setup MSI hardware registers */ - mobiveil_pcie_enable_msi(pcie); - - return err; -} - -static void mobiveil_mask_intx_irq(struct irq_data *data) -{ - struct irq_desc *desc = irq_to_desc(data->irq); - struct mobiveil_pcie *pcie; - unsigned long flags; - u32 mask, shifted_val; - - pcie = irq_desc_get_chip_data(desc); - mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB); - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); -} - -static void mobiveil_unmask_intx_irq(struct irq_data *data) -{ - struct irq_desc *desc = irq_to_desc(data->irq); - struct mobiveil_pcie *pcie; - unsigned long flags; - u32 shifted_val, mask; - - pcie = irq_desc_get_chip_data(desc); - mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); - raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); - shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); - csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB); - raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); -} - -static struct irq_chip intx_irq_chip = { - .name = "mobiveil_pcie:intx", - .irq_enable = mobiveil_unmask_intx_irq, - .irq_disable = mobiveil_mask_intx_irq, - .irq_mask = mobiveil_mask_intx_irq, - .irq_unmask = mobiveil_unmask_intx_irq, -}; - -/* routine to setup the INTx related data */ -static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq); - irq_set_chip_data(irq, domain->host_data); - return 0; -} - -/* INTx domain operations structure */ -static const struct irq_domain_ops intx_domain_ops = { - .map = mobiveil_pcie_intx_map, -}; - -static struct irq_chip mobiveil_msi_irq_chip = { - .name = "Mobiveil PCIe MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info mobiveil_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), - .chip = &mobiveil_msi_irq_chip, -}; - -static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data); - phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int)); - - msg->address_lo = lower_32_bits(addr); - msg->address_hi = upper_32_bits(addr); - msg->data = data->hwirq; - - dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", - (int)data->hwirq, msg->address_hi, msg->address_lo); -} - -static int mobiveil_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - -static struct irq_chip mobiveil_msi_bottom_irq_chip = { - .name = "Mobiveil MSI", - .irq_compose_msi_msg = mobiveil_compose_msi_msg, - .irq_set_affinity = mobiveil_msi_set_affinity, -}; - -static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs, void *args) -{ - struct mobiveil_pcie *pcie = domain->host_data; - struct mobiveil_msi *msi = &pcie->msi; - unsigned long bit; - - WARN_ON(nr_irqs != 1); - mutex_lock(&msi->lock); - - bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors); - if (bit >= msi->num_of_vectors) { - mutex_unlock(&msi->lock); - return -ENOSPC; - } - - set_bit(bit, msi->msi_irq_in_use); - - mutex_unlock(&msi->lock); - - irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip, - domain->host_data, handle_level_irq, - NULL, NULL); - return 0; -} - -static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) -{ - struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); - struct mobiveil_msi *msi = &pcie->msi; - - mutex_lock(&msi->lock); - - if (!test_bit(d->hwirq, msi->msi_irq_in_use)) { - dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n", - d->hwirq); - } else { - __clear_bit(d->hwirq, msi->msi_irq_in_use); - } - - mutex_unlock(&msi->lock); -} -static const struct irq_domain_ops msi_domain_ops = { - .alloc = mobiveil_irq_msi_domain_alloc, - .free = mobiveil_irq_msi_domain_free, -}; - -static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); - struct mobiveil_msi *msi = &pcie->msi; - - mutex_init(&pcie->msi.lock); - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, pcie); - if (!msi->dev_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &mobiveil_msi_domain_info, msi->dev_domain); - if (!msi->msi_domain) { - dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->dev_domain); - return -ENOMEM; - } - return 0; -} - -static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; - int ret; - - /* setup INTx */ - pcie->intx_domain = irq_domain_add_linear(node, - PCI_NUM_INTX, &intx_domain_ops, pcie); - - if (!pcie->intx_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; - } - - raw_spin_lock_init(&pcie->intx_mask_lock); - - /* setup MSI */ - ret = mobiveil_allocate_msi_domains(pcie); - if (ret) - return ret; - - return 0; -} - -static int mobiveil_pcie_probe(struct platform_device *pdev) -{ - struct mobiveil_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; - struct pci_host_bridge *bridge; - struct device *dev = &pdev->dev; - resource_size_t iobase; - int ret; - - /* allocate the PCIe port */ - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!bridge) - return -ENODEV; - - pcie = pci_host_bridge_priv(bridge); - if (!pcie) - return -ENOMEM; - - pcie->pdev = pdev; - - ret = mobiveil_pcie_parse_dt(pcie); - if (ret) { - dev_err(dev, "Parsing DT failed, ret: %x\n", ret); - return ret; - } - - INIT_LIST_HEAD(&pcie->resources); - - /* parse the host bridge base addresses from the device tree file */ - ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, - &pcie->resources, &iobase); - if (ret) { - dev_err(dev, "Getting bridge resources failed\n"); - return -ENOMEM; - } - - /* - * configure all inbound and outbound windows and prepare the RC for - * config access - */ - ret = mobiveil_host_init(pcie); - if (ret) { - dev_err(dev, "Failed to initialize host\n"); - goto error; - } - - /* fixup for PCIe class register */ - csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); - - /* initialize the IRQ domains */ - ret = mobiveil_pcie_init_irq_domain(pcie); - if (ret) { - dev_err(dev, "Failed creating IRQ Domain\n"); - goto error; - } - - ret = devm_request_pci_bus_resources(dev, &pcie->resources); - if (ret) - goto error; - - /* Initialize bridge */ - list_splice_init(&pcie->resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = pcie; - bridge->busnr = pcie->root_bus_nr; - bridge->ops = &mobiveil_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - /* setup the kernel resources for the newly added PCIe root bus */ - ret = pci_scan_root_bus_bridge(bridge); - if (ret) - goto error; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - - return 0; -error: - pci_free_resource_list(&pcie->resources); - return ret; -} - -static const struct of_device_id mobiveil_pcie_of_match[] = { - {.compatible = "mbvl,gpex40-pcie",}, - {}, -}; - -MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); - -static struct platform_driver mobiveil_pcie_driver = { - .probe = mobiveil_pcie_probe, - .driver = { - .name = "mobiveil-pcie", - .of_match_table = mobiveil_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; - -builtin_platform_driver(mobiveil_pcie_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Mobiveil PCIe host controller driver"); -MODULE_AUTHOR("Subrahmanya Lingappa "); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c deleted file mode 100644 index 874d75c9ee4a..000000000000 --- a/drivers/pci/host/pcie-rcar.c +++ /dev/null @@ -1,1222 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PCIe driver for Renesas R-Car SoCs - * Copyright (C) 2014 Renesas Electronics Europe Ltd - * - * Based on: - * arch/sh/drivers/pci/pcie-sh7786.c - * arch/sh/drivers/pci/ops-sh7786.c - * Copyright (C) 2009 - 2011 Paul Mundt - * - * Author: Phil Edworthy - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -#define PCIECAR 0x000010 -#define PCIECCTLR 0x000018 -#define CONFIG_SEND_ENABLE BIT(31) -#define TYPE0 (0 << 8) -#define TYPE1 BIT(8) -#define PCIECDR 0x000020 -#define PCIEMSR 0x000028 -#define PCIEINTXR 0x000400 -#define PCIEPHYSR 0x0007f0 -#define PHYRDY BIT(0) -#define PCIEMSITXR 0x000840 - -/* Transfer control */ -#define PCIETCTLR 0x02000 -#define CFINIT 1 -#define PCIETSTR 0x02004 -#define DATA_LINK_ACTIVE 1 -#define PCIEERRFR 0x02020 -#define UNSUPPORTED_REQUEST BIT(4) -#define PCIEMSIFR 0x02044 -#define PCIEMSIALR 0x02048 -#define MSIFE 1 -#define PCIEMSIAUR 0x0204c -#define PCIEMSIIER 0x02050 - -/* root port address */ -#define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) - -/* local address reg & mask */ -#define PCIELAR(x) (0x02200 + ((x) * 0x20)) -#define PCIELAMR(x) (0x02208 + ((x) * 0x20)) -#define LAM_PREFETCH BIT(3) -#define LAM_64BIT BIT(2) -#define LAR_ENABLE BIT(1) - -/* PCIe address reg & mask */ -#define PCIEPALR(x) (0x03400 + ((x) * 0x20)) -#define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) -#define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) -#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) -#define PAR_ENABLE BIT(31) -#define IO_SPACE BIT(8) - -/* Configuration */ -#define PCICONF(x) (0x010000 + ((x) * 0x4)) -#define PMCAP(x) (0x010040 + ((x) * 0x4)) -#define EXPCAP(x) (0x010070 + ((x) * 0x4)) -#define VCCAP(x) (0x010100 + ((x) * 0x4)) - -/* link layer */ -#define IDSETR1 0x011004 -#define TLCTLR 0x011048 -#define MACSR 0x011054 -#define SPCHGFIN BIT(4) -#define SPCHGFAIL BIT(6) -#define SPCHGSUC BIT(7) -#define LINK_SPEED (0xf << 16) -#define LINK_SPEED_2_5GTS (1 << 16) -#define LINK_SPEED_5_0GTS (2 << 16) -#define MACCTLR 0x011058 -#define SPEED_CHANGE BIT(24) -#define SCRAMBLE_DISABLE BIT(27) -#define MACS2R 0x011078 -#define MACCGSPSETR 0x011084 -#define SPCNGRSN BIT(31) - -/* R-Car H1 PHY */ -#define H1_PCIEPHYADRR 0x04000c -#define WRITE_CMD BIT(16) -#define PHY_ACK BIT(24) -#define RATE_POS 12 -#define LANE_POS 8 -#define ADR_POS 0 -#define H1_PCIEPHYDOUTR 0x040014 - -/* R-Car Gen2 PHY */ -#define GEN2_PCIEPHYADDR 0x780 -#define GEN2_PCIEPHYDATA 0x784 -#define GEN2_PCIEPHYCTRL 0x78c - -#define INT_PCI_MSI_NR 32 - -#define RCONF(x) (PCICONF(0) + (x)) -#define RPMCAP(x) (PMCAP(0) + (x)) -#define REXPCAP(x) (EXPCAP(0) + (x)) -#define RVCCAP(x) (VCCAP(0) + (x)) - -#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) -#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) -#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) - -#define RCAR_PCI_MAX_RESOURCES 4 -#define MAX_NR_INBOUND_MAPS 6 - -struct rcar_msi { - DECLARE_BITMAP(used, INT_PCI_MSI_NR); - struct irq_domain *domain; - struct msi_controller chip; - unsigned long pages; - struct mutex lock; - int irq1; - int irq2; -}; - -static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) -{ - return container_of(chip, struct rcar_msi, chip); -} - -/* Structure representing the PCIe interface */ -struct rcar_pcie { - struct device *dev; - struct phy *phy; - void __iomem *base; - struct list_head resources; - int root_bus_nr; - struct clk *bus_clk; - struct rcar_msi msi; -}; - -static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, - unsigned long reg) -{ - writel(val, pcie->base + reg); -} - -static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, - unsigned long reg) -{ - return readl(pcie->base + reg); -} - -enum { - RCAR_PCI_ACCESS_READ, - RCAR_PCI_ACCESS_WRITE, -}; - -static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) -{ - int shift = 8 * (where & 3); - u32 val = rcar_pci_read_reg(pcie, where & ~3); - - val &= ~(mask << shift); - val |= data << shift; - rcar_pci_write_reg(pcie, val, where & ~3); -} - -static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) -{ - int shift = 8 * (where & 3); - u32 val = rcar_pci_read_reg(pcie, where & ~3); - - return val >> shift; -} - -/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ -static int rcar_pcie_config_access(struct rcar_pcie *pcie, - unsigned char access_type, struct pci_bus *bus, - unsigned int devfn, int where, u32 *data) -{ - int dev, func, reg, index; - - dev = PCI_SLOT(devfn); - func = PCI_FUNC(devfn); - reg = where & ~3; - index = reg / 4; - - /* - * While each channel has its own memory-mapped extended config - * space, it's generally only accessible when in endpoint mode. - * When in root complex mode, the controller is unable to target - * itself with either type 0 or type 1 accesses, and indeed, any - * controller initiated target transfer to its own config space - * result in a completer abort. - * - * Each channel effectively only supports a single device, but as - * the same channel <-> device access works for any PCI_SLOT() - * value, we cheat a bit here and bind the controller's config - * space to devfn 0 in order to enable self-enumeration. In this - * case the regular ECAR/ECDR path is sidelined and the mangled - * config access itself is initiated as an internal bus transaction. - */ - if (pci_is_root_bus(bus)) { - if (dev != 0) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (access_type == RCAR_PCI_ACCESS_READ) { - *data = rcar_pci_read_reg(pcie, PCICONF(index)); - } else { - /* Keep an eye out for changes to the root bus number */ - if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) - pcie->root_bus_nr = *data & 0xff; - - rcar_pci_write_reg(pcie, *data, PCICONF(index)); - } - - return PCIBIOS_SUCCESSFUL; - } - - if (pcie->root_bus_nr < 0) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Clear errors */ - rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); - - /* Set the PIO address */ - rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | - PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); - - /* Enable the configuration access */ - if (bus->parent->number == pcie->root_bus_nr) - rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); - else - rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); - - /* Check for errors */ - if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Check for master and target aborts */ - if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & - (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (access_type == RCAR_PCI_ACCESS_READ) - *data = rcar_pci_read_reg(pcie, PCIECDR); - else - rcar_pci_write_reg(pcie, *data, PCIECDR); - - /* Disable the configuration access */ - rcar_pci_write_reg(pcie, 0, PCIECCTLR); - - return PCIBIOS_SUCCESSFUL; -} - -static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct rcar_pcie *pcie = bus->sysdata; - int ret; - - ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, - bus, devfn, where, val); - if (ret != PCIBIOS_SUCCESSFUL) { - *val = 0xffffffff; - return ret; - } - - if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; - else if (size == 2) - *val = (*val >> (8 * (where & 2))) & 0xffff; - - dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", - bus->number, devfn, where, size, (unsigned long)*val); - - return ret; -} - -/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ -static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - struct rcar_pcie *pcie = bus->sysdata; - int shift, ret; - u32 data; - - ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, - bus, devfn, where, &data); - if (ret != PCIBIOS_SUCCESSFUL) - return ret; - - dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", - bus->number, devfn, where, size, (unsigned long)val); - - if (size == 1) { - shift = 8 * (where & 3); - data &= ~(0xff << shift); - data |= ((val & 0xff) << shift); - } else if (size == 2) { - shift = 8 * (where & 2); - data &= ~(0xffff << shift); - data |= ((val & 0xffff) << shift); - } else - data = val; - - ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE, - bus, devfn, where, &data); - - return ret; -} - -static struct pci_ops rcar_pcie_ops = { - .read = rcar_pcie_read_conf, - .write = rcar_pcie_write_conf, -}; - -static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, - struct resource *res) -{ - /* Setup PCIe address space mappings for each resource */ - resource_size_t size; - resource_size_t res_start; - u32 mask; - - rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); - - /* - * The PAMR mask is calculated in units of 128Bytes, which - * keeps things pretty simple. - */ - size = resource_size(res); - mask = (roundup_pow_of_two(size) / SZ_128) - 1; - rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); - - if (res->flags & IORESOURCE_IO) - res_start = pci_pio_to_address(res->start); - else - res_start = res->start; - - rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); - rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, - PCIEPALR(win)); - - /* First resource is for IO */ - mask = PAR_ENABLE; - if (res->flags & IORESOURCE_IO) - mask |= IO_SPACE; - - rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); -} - -static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) -{ - struct resource_entry *win; - int i = 0; - - /* Setup PCI resources */ - resource_list_for_each_entry(win, &pci->resources) { - struct resource *res = win->res; - - if (!res->flags) - continue; - - switch (resource_type(res)) { - case IORESOURCE_IO: - case IORESOURCE_MEM: - rcar_pcie_setup_window(i, pci, res); - i++; - break; - case IORESOURCE_BUS: - pci->root_bus_nr = res->start; - break; - default: - continue; - } - - pci_add_resource(resource, res); - } - - return 1; -} - -static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) -{ - struct device *dev = pcie->dev; - unsigned int timeout = 1000; - u32 macsr; - - if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) - return; - - if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { - dev_err(dev, "Speed change already in progress\n"); - return; - } - - macsr = rcar_pci_read_reg(pcie, MACSR); - if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) - goto done; - - /* Set target link speed to 5.0 GT/s */ - rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, - PCI_EXP_LNKSTA_CLS_5_0GB); - - /* Set speed change reason as intentional factor */ - rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); - - /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ - if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) - rcar_pci_write_reg(pcie, macsr, MACSR); - - /* Start link speed change */ - rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); - - while (timeout--) { - macsr = rcar_pci_read_reg(pcie, MACSR); - if (macsr & SPCHGFIN) { - /* Clear the interrupt bits */ - rcar_pci_write_reg(pcie, macsr, MACSR); - - if (macsr & SPCHGFAIL) - dev_err(dev, "Speed change failed\n"); - - goto done; - } - - msleep(1); - } - - dev_err(dev, "Speed change timed out\n"); - -done: - dev_info(dev, "Current link speed is %s GT/s\n", - (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); -} - -static int rcar_pcie_enable(struct rcar_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); - struct pci_bus *bus, *child; - int ret; - - /* Try setting 5 GT/s link speed */ - rcar_pcie_force_speedup(pcie); - - rcar_pcie_setup(&bridge->windows, pcie); - - pci_add_flags(PCI_REASSIGN_ALL_BUS); - - bridge->dev.parent = dev; - bridge->sysdata = pcie; - bridge->busnr = pcie->root_bus_nr; - bridge->ops = &rcar_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - if (IS_ENABLED(CONFIG_PCI_MSI)) - bridge->msi = &pcie->msi.chip; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - - return 0; -} - -static int phy_wait_for_ack(struct rcar_pcie *pcie) -{ - struct device *dev = pcie->dev; - unsigned int timeout = 100; - - while (timeout--) { - if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) - return 0; - - udelay(100); - } - - dev_err(dev, "Access to PCIe phy timed out\n"); - - return -ETIMEDOUT; -} - -static void phy_write_reg(struct rcar_pcie *pcie, - unsigned int rate, unsigned int addr, - unsigned int lane, unsigned int data) -{ - unsigned long phyaddr; - - phyaddr = WRITE_CMD | - ((rate & 1) << RATE_POS) | - ((lane & 0xf) << LANE_POS) | - ((addr & 0xff) << ADR_POS); - - /* Set write data */ - rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); - rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); - - /* Ignore errors as they will be dealt with if the data link is down */ - phy_wait_for_ack(pcie); - - /* Clear command */ - rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); - rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); - - /* Ignore errors as they will be dealt with if the data link is down */ - phy_wait_for_ack(pcie); -} - -static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie) -{ - unsigned int timeout = 10; - - while (timeout--) { - if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY) - return 0; - - msleep(5); - } - - return -ETIMEDOUT; -} - -static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) -{ - unsigned int timeout = 10000; - - while (timeout--) { - if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) - return 0; - - udelay(5); - cpu_relax(); - } - - return -ETIMEDOUT; -} - -static int rcar_pcie_hw_init(struct rcar_pcie *pcie) -{ - int err; - - /* Begin initialization */ - rcar_pci_write_reg(pcie, 0, PCIETCTLR); - - /* Set mode */ - rcar_pci_write_reg(pcie, 1, PCIEMSR); - - err = rcar_pcie_wait_for_phyrdy(pcie); - if (err) - return err; - - /* - * Initial header for port config space is type 1, set the device - * class to match. Hardware takes care of propagating the IDSETR - * settings, so there is no need to bother with a quirk. - */ - rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); - - /* - * Setup Secondary Bus Number & Subordinate Bus Number, even though - * they aren't used, to avoid bridge being detected as broken. - */ - rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); - rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); - - /* Initialize default capabilities. */ - rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); - rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), - PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); - rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, - PCI_HEADER_TYPE_BRIDGE); - - /* Enable data link layer active state reporting */ - rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, - PCI_EXP_LNKCAP_DLLLARC); - - /* Write out the physical slot number = 0 */ - rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); - - /* Set the completion timer timeout to the maximum 50ms. */ - rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); - - /* Terminate list of capabilities (Next Capability Offset=0) */ - rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); - - /* Enable MSI */ - if (IS_ENABLED(CONFIG_PCI_MSI)) - rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); - - /* Finish initialization - establish a PCI Express link */ - rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); - - /* This will timeout if we don't have a link. */ - err = rcar_pcie_wait_for_dl(pcie); - if (err) - return err; - - /* Enable INTx interrupts */ - rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); - - wmb(); - - return 0; -} - -static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie) -{ - /* Initialize the phy */ - phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); - phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); - phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); - phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); - phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); - phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); - phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); - phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); - phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); - phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); - phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); - phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); - - phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); - phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); - phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); - - return 0; -} - -static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie) -{ - /* - * These settings come from the R-Car Series, 2nd Generation User's - * Manual, section 50.3.1 (2) Initialization of the physical layer. - */ - rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); - rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); - rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); - rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); - - rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); - /* The following value is for DC connection, no termination resistor */ - rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); - rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); - rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); - - return 0; -} - -static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) -{ - int err; - - err = phy_init(pcie->phy); - if (err) - return err; - - return phy_power_on(pcie->phy); -} - -static int rcar_msi_alloc(struct rcar_msi *chip) -{ - int msi; - - mutex_lock(&chip->lock); - - msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); - if (msi < INT_PCI_MSI_NR) - set_bit(msi, chip->used); - else - msi = -ENOSPC; - - mutex_unlock(&chip->lock); - - return msi; -} - -static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) -{ - int msi; - - mutex_lock(&chip->lock); - msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, - order_base_2(no_irqs)); - mutex_unlock(&chip->lock); - - return msi; -} - -static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) -{ - mutex_lock(&chip->lock); - clear_bit(irq, chip->used); - mutex_unlock(&chip->lock); -} - -static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) -{ - struct rcar_pcie *pcie = data; - struct rcar_msi *msi = &pcie->msi; - struct device *dev = pcie->dev; - unsigned long reg; - - reg = rcar_pci_read_reg(pcie, PCIEMSIFR); - - /* MSI & INTx share an interrupt - we only handle MSI here */ - if (!reg) - return IRQ_NONE; - - while (reg) { - unsigned int index = find_first_bit(®, 32); - unsigned int irq; - - /* clear the interrupt */ - rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); - - irq = irq_find_mapping(msi->domain, index); - if (irq) { - if (test_bit(index, msi->used)) - generic_handle_irq(irq); - else - dev_info(dev, "unhandled MSI\n"); - } else { - /* Unknown MSI, just clear it */ - dev_dbg(dev, "unexpected MSI\n"); - } - - /* see if there's any more pending in this vector */ - reg = rcar_pci_read_reg(pcie, PCIEMSIFR); - } - - return IRQ_HANDLED; -} - -static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, - struct msi_desc *desc) -{ - struct rcar_msi *msi = to_rcar_msi(chip); - struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); - struct msi_msg msg; - unsigned int irq; - int hwirq; - - hwirq = rcar_msi_alloc(msi); - if (hwirq < 0) - return hwirq; - - irq = irq_find_mapping(msi->domain, hwirq); - if (!irq) { - rcar_msi_free(msi, hwirq); - return -EINVAL; - } - - irq_set_msi_desc(irq, desc); - - msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; - msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); - msg.data = hwirq; - - pci_write_msi_msg(irq, &msg); - - return 0; -} - -static int rcar_msi_setup_irqs(struct msi_controller *chip, - struct pci_dev *pdev, int nvec, int type) -{ - struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); - struct rcar_msi *msi = to_rcar_msi(chip); - struct msi_desc *desc; - struct msi_msg msg; - unsigned int irq; - int hwirq; - int i; - - /* MSI-X interrupts are not supported */ - if (type == PCI_CAP_ID_MSIX) - return -EINVAL; - - WARN_ON(!list_is_singular(&pdev->dev.msi_list)); - desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); - - hwirq = rcar_msi_alloc_region(msi, nvec); - if (hwirq < 0) - return -ENOSPC; - - irq = irq_find_mapping(msi->domain, hwirq); - if (!irq) - return -ENOSPC; - - for (i = 0; i < nvec; i++) { - /* - * irq_create_mapping() called from rcar_pcie_probe() pre- - * allocates descs, so there is no need to allocate descs here. - * We can therefore assume that if irq_find_mapping() above - * returns non-zero, then the descs are also successfully - * allocated. - */ - if (irq_set_msi_desc_off(irq, i, desc)) { - /* TODO: clear */ - return -EINVAL; - } - } - - desc->nvec_used = nvec; - desc->msi_attrib.multiple = order_base_2(nvec); - - msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; - msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); - msg.data = hwirq; - - pci_write_msi_msg(irq, &msg); - - return 0; -} - -static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) -{ - struct rcar_msi *msi = to_rcar_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); - - rcar_msi_free(msi, d->hwirq); -} - -static struct irq_chip rcar_msi_irq_chip = { - .name = "R-Car PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -static const struct irq_domain_ops msi_domain_ops = { - .map = rcar_msi_map, -}; - -static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie) -{ - struct rcar_msi *msi = &pcie->msi; - int i, irq; - - for (i = 0; i < INT_PCI_MSI_NR; i++) { - irq = irq_find_mapping(msi->domain, i); - if (irq > 0) - irq_dispose_mapping(irq); - } - - irq_domain_remove(msi->domain); -} - -static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct rcar_msi *msi = &pcie->msi; - unsigned long base; - int err, i; - - mutex_init(&msi->lock); - - msi->chip.dev = dev; - msi->chip.setup_irq = rcar_msi_setup_irq; - msi->chip.setup_irqs = rcar_msi_setup_irqs; - msi->chip.teardown_irq = rcar_msi_teardown_irq; - - msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, - &msi_domain_ops, &msi->chip); - if (!msi->domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - for (i = 0; i < INT_PCI_MSI_NR; i++) - irq_create_mapping(msi->domain, i); - - /* Two irqs are for MSI, but they are also used for non-MSI irqs */ - err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, - IRQF_SHARED | IRQF_NO_THREAD, - rcar_msi_irq_chip.name, pcie); - if (err < 0) { - dev_err(dev, "failed to request IRQ: %d\n", err); - goto err; - } - - err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, - IRQF_SHARED | IRQF_NO_THREAD, - rcar_msi_irq_chip.name, pcie); - if (err < 0) { - dev_err(dev, "failed to request IRQ: %d\n", err); - goto err; - } - - /* setup MSI data target */ - msi->pages = __get_free_pages(GFP_KERNEL, 0); - base = virt_to_phys((void *)msi->pages); - - rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); - rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); - - /* enable all MSI interrupts */ - rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); - - return 0; - -err: - rcar_pcie_unmap_msi(pcie); - return err; -} - -static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie) -{ - struct rcar_msi *msi = &pcie->msi; - - /* Disable all MSI interrupts */ - rcar_pci_write_reg(pcie, 0, PCIEMSIIER); - - /* Disable address decoding of the MSI interrupt, MSIFE */ - rcar_pci_write_reg(pcie, 0, PCIEMSIALR); - - free_pages(msi->pages, 0); - - rcar_pcie_unmap_msi(pcie); -} - -static int rcar_pcie_get_resources(struct rcar_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct resource res; - int err, i; - - pcie->phy = devm_phy_optional_get(dev, "pcie"); - if (IS_ERR(pcie->phy)) - return PTR_ERR(pcie->phy); - - err = of_address_to_resource(dev->of_node, 0, &res); - if (err) - return err; - - pcie->base = devm_ioremap_resource(dev, &res); - if (IS_ERR(pcie->base)) - return PTR_ERR(pcie->base); - - pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); - if (IS_ERR(pcie->bus_clk)) { - dev_err(dev, "cannot get pcie bus clock\n"); - return PTR_ERR(pcie->bus_clk); - } - - i = irq_of_parse_and_map(dev->of_node, 0); - if (!i) { - dev_err(dev, "cannot get platform resources for msi interrupt\n"); - err = -ENOENT; - goto err_irq1; - } - pcie->msi.irq1 = i; - - i = irq_of_parse_and_map(dev->of_node, 1); - if (!i) { - dev_err(dev, "cannot get platform resources for msi interrupt\n"); - err = -ENOENT; - goto err_irq2; - } - pcie->msi.irq2 = i; - - return 0; - -err_irq2: - irq_dispose_mapping(pcie->msi.irq1); -err_irq1: - return err; -} - -static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, - struct of_pci_range *range, - int *index) -{ - u64 restype = range->flags; - u64 cpu_addr = range->cpu_addr; - u64 cpu_end = range->cpu_addr + range->size; - u64 pci_addr = range->pci_addr; - u32 flags = LAM_64BIT | LAR_ENABLE; - u64 mask; - u64 size; - int idx = *index; - - if (restype & IORESOURCE_PREFETCH) - flags |= LAM_PREFETCH; - - /* - * If the size of the range is larger than the alignment of the start - * address, we have to use multiple entries to perform the mapping. - */ - if (cpu_addr > 0) { - unsigned long nr_zeros = __ffs64(cpu_addr); - u64 alignment = 1ULL << nr_zeros; - - size = min(range->size, alignment); - } else { - size = range->size; - } - /* Hardware supports max 4GiB inbound region */ - size = min(size, 1ULL << 32); - - mask = roundup_pow_of_two(size) - 1; - mask &= ~0xf; - - while (cpu_addr < cpu_end) { - /* - * Set up 64-bit inbound regions as the range parser doesn't - * distinguish between 32 and 64-bit types. - */ - rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), - PCIEPRAR(idx)); - rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); - rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, - PCIELAMR(idx)); - - rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), - PCIEPRAR(idx + 1)); - rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), - PCIELAR(idx + 1)); - rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); - - pci_addr += size; - cpu_addr += size; - idx += 2; - - if (idx > MAX_NR_INBOUND_MAPS) { - dev_err(pcie->dev, "Failed to map inbound regions!\n"); - return -EINVAL; - } - } - *index = idx; - - return 0; -} - -static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, - struct device_node *np) -{ - struct of_pci_range range; - struct of_pci_range_parser parser; - int index = 0; - int err; - - if (of_pci_dma_range_parser_init(&parser, np)) - return -EINVAL; - - /* Get the dma-ranges from DT */ - for_each_of_pci_range(&parser, &range) { - u64 end = range.cpu_addr + range.size - 1; - - dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", - range.flags, range.cpu_addr, end, range.pci_addr); - - err = rcar_pcie_inbound_ranges(pcie, &range, &index); - if (err) - return err; - } - - return 0; -} - -static const struct of_device_id rcar_pcie_of_match[] = { - { .compatible = "renesas,pcie-r8a7779", - .data = rcar_pcie_phy_init_h1 }, - { .compatible = "renesas,pcie-r8a7790", - .data = rcar_pcie_phy_init_gen2 }, - { .compatible = "renesas,pcie-r8a7791", - .data = rcar_pcie_phy_init_gen2 }, - { .compatible = "renesas,pcie-rcar-gen2", - .data = rcar_pcie_phy_init_gen2 }, - { .compatible = "renesas,pcie-r8a7795", - .data = rcar_pcie_phy_init_gen3 }, - { .compatible = "renesas,pcie-rcar-gen3", - .data = rcar_pcie_phy_init_gen3 }, - {}, -}; - -static int rcar_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct rcar_pcie *pcie; - unsigned int data; - int err; - int (*phy_init_fn)(struct rcar_pcie *); - struct pci_host_bridge *bridge; - - bridge = pci_alloc_host_bridge(sizeof(*pcie)); - if (!bridge) - return -ENOMEM; - - pcie = pci_host_bridge_priv(bridge); - - pcie->dev = dev; - - err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); - if (err) - goto err_free_bridge; - - pm_runtime_enable(pcie->dev); - err = pm_runtime_get_sync(pcie->dev); - if (err < 0) { - dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); - goto err_pm_disable; - } - - err = rcar_pcie_get_resources(pcie); - if (err < 0) { - dev_err(dev, "failed to request resources: %d\n", err); - goto err_pm_put; - } - - err = clk_prepare_enable(pcie->bus_clk); - if (err) { - dev_err(dev, "failed to enable bus clock: %d\n", err); - goto err_unmap_msi_irqs; - } - - err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); - if (err) - goto err_clk_disable; - - phy_init_fn = of_device_get_match_data(dev); - err = phy_init_fn(pcie); - if (err) { - dev_err(dev, "failed to init PCIe PHY\n"); - goto err_clk_disable; - } - - /* Failure to get a link might just be that no cards are inserted */ - if (rcar_pcie_hw_init(pcie)) { - dev_info(dev, "PCIe link down\n"); - err = -ENODEV; - goto err_clk_disable; - } - - data = rcar_pci_read_reg(pcie, MACSR); - dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - err = rcar_pcie_enable_msi(pcie); - if (err < 0) { - dev_err(dev, - "failed to enable MSI support: %d\n", - err); - goto err_clk_disable; - } - } - - err = rcar_pcie_enable(pcie); - if (err) - goto err_msi_teardown; - - return 0; - -err_msi_teardown: - if (IS_ENABLED(CONFIG_PCI_MSI)) - rcar_pcie_teardown_msi(pcie); - -err_clk_disable: - clk_disable_unprepare(pcie->bus_clk); - -err_unmap_msi_irqs: - irq_dispose_mapping(pcie->msi.irq2); - irq_dispose_mapping(pcie->msi.irq1); - -err_pm_put: - pm_runtime_put(dev); - -err_pm_disable: - pm_runtime_disable(dev); - pci_free_resource_list(&pcie->resources); - -err_free_bridge: - pci_free_host_bridge(bridge); - - return err; -} - -static struct platform_driver rcar_pcie_driver = { - .driver = { - .name = "rcar-pcie", - .of_match_table = rcar_pcie_of_match, - .suppress_bind_attrs = true, - }, - .probe = rcar_pcie_probe, -}; -builtin_platform_driver(rcar_pcie_driver); diff --git a/drivers/pci/host/pcie-rockchip-ep.c b/drivers/pci/host/pcie-rockchip-ep.c deleted file mode 100644 index fc267a49a932..000000000000 --- a/drivers/pci/host/pcie-rockchip-ep.c +++ /dev/null @@ -1,642 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Rockchip AXI PCIe endpoint controller driver - * - * Copyright (c) 2018 Rockchip, Inc. - * - * Author: Shawn Lin - * Simon Xue - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie-rockchip.h" - -/** - * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver - * @rockchip: Rockchip PCIe controller - * @max_regions: maximum number of regions supported by hardware - * @ob_region_map: bitmask of mapped outbound regions - * @ob_addr: base addresses in the AXI bus where the outbound regions start - * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ - * dedicated outbound regions is mapped. - * @irq_cpu_addr: base address in the CPU space where a write access triggers - * the sending of a memory write (MSI) / normal message (legacy - * IRQ) TLP through the PCIe bus. - * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ - * dedicated outbound region. - * @irq_pci_fn: the latest PCI function that has updated the mapping of - * the MSI/legacy IRQ dedicated outbound region. - * @irq_pending: bitmask of asserted legacy IRQs. - */ -struct rockchip_pcie_ep { - struct rockchip_pcie rockchip; - struct pci_epc *epc; - u32 max_regions; - unsigned long ob_region_map; - phys_addr_t *ob_addr; - phys_addr_t irq_phys_addr; - void __iomem *irq_cpu_addr; - u64 irq_pci_addr; - u8 irq_pci_fn; - u8 irq_pending; -}; - -static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, - u32 region) -{ - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region)); -} - -static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, - u32 r, u32 type, u64 cpu_addr, - u64 pci_addr, size_t size) -{ - u64 sz = 1ULL << fls64(size - 1); - int num_pass_bits = ilog2(sz); - u32 addr0, addr1, desc0, desc1; - bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG); - - /* The minimal region size is 1MB */ - if (num_pass_bits < 8) - num_pass_bits = 8; - - cpu_addr -= rockchip->mem_res->start; - addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) & - PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | - (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); - addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr); - desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type; - desc1 = 0; - - if (is_nor_msg) { - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); - rockchip_pcie_write(rockchip, desc0, - ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); - rockchip_pcie_write(rockchip, desc1, - ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); - } else { - /* PCI bus address region */ - rockchip_pcie_write(rockchip, addr0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); - rockchip_pcie_write(rockchip, addr1, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); - rockchip_pcie_write(rockchip, desc0, - ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); - rockchip_pcie_write(rockchip, desc1, - ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); - - addr0 = - ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | - (lower_32_bits(cpu_addr) & - PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); - addr1 = upper_32_bits(cpu_addr); - } - - /* CPU bus address region */ - rockchip_pcie_write(rockchip, addr0, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r)); - rockchip_pcie_write(rockchip, addr1, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r)); -} - -static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, - struct pci_epf_header *hdr) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - - /* All functions share the same vendor ID with function 0 */ - if (fn == 0) { - u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) | - (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16; - - rockchip_pcie_write(rockchip, vid_regs, - PCIE_CORE_CONFIG_VENDOR); - } - - rockchip_pcie_write(rockchip, hdr->deviceid << 16, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID); - - rockchip_pcie_write(rockchip, - hdr->revid | - hdr->progif_code << 8 | - hdr->subclass_code << 16 | - hdr->baseclass_code << 24, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID); - rockchip_pcie_write(rockchip, hdr->cache_line_size, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - PCI_CACHE_LINE_SIZE); - rockchip_pcie_write(rockchip, hdr->subsys_id << 16, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - PCI_SUBSYSTEM_VENDOR_ID); - rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - PCI_INTERRUPT_LINE); - - return 0; -} - -static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, - struct pci_epf_bar *epf_bar) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - dma_addr_t bar_phys = epf_bar->phys_addr; - enum pci_barno bar = epf_bar->barno; - int flags = epf_bar->flags; - u32 addr0, addr1, reg, cfg, b, aperture, ctrl; - u64 sz; - - /* BAR size is 2^(aperture + 7) */ - sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE); - - /* - * roundup_pow_of_two() returns an unsigned long, which is not suited - * for 64bit values. - */ - sz = 1ULL << fls64(sz - 1); - aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ - - if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { - ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS; - } else { - bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); - bool is_64bits = sz > SZ_2G; - - if (is_64bits && (bar & 1)) - return -EINVAL; - - if (is_64bits && is_prefetch) - ctrl = - ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; - else if (is_prefetch) - ctrl = - ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; - else if (is_64bits) - ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS; - else - ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS; - } - - if (bar < BAR_4) { - reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); - b = bar; - } else { - reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); - b = bar - BAR_4; - } - - addr0 = lower_32_bits(bar_phys); - addr1 = upper_32_bits(bar_phys); - - cfg = rockchip_pcie_read(rockchip, reg); - cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | - ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); - cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | - ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); - - rockchip_pcie_write(rockchip, cfg, reg); - rockchip_pcie_write(rockchip, addr0, - ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); - rockchip_pcie_write(rockchip, addr1, - ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); - - return 0; -} - -static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, - struct pci_epf_bar *epf_bar) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - u32 reg, cfg, b, ctrl; - enum pci_barno bar = epf_bar->barno; - - if (bar < BAR_4) { - reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); - b = bar; - } else { - reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); - b = bar - BAR_4; - } - - ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED; - cfg = rockchip_pcie_read(rockchip, reg); - cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | - ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); - cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); - - rockchip_pcie_write(rockchip, cfg, reg); - rockchip_pcie_write(rockchip, 0x0, - ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); - rockchip_pcie_write(rockchip, 0x0, - ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); -} - -static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, - phys_addr_t addr, u64 pci_addr, - size_t size) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *pcie = &ep->rockchip; - u32 r; - - r = find_first_zero_bit(&ep->ob_region_map, - sizeof(ep->ob_region_map) * BITS_PER_LONG); - /* - * Region 0 is reserved for configuration space and shouldn't - * be used elsewhere per TRM, so leave it out. - */ - if (r >= ep->max_regions - 1) { - dev_err(&epc->dev, "no free outbound region\n"); - return -EINVAL; - } - - rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr, - pci_addr, size); - - set_bit(r, &ep->ob_region_map); - ep->ob_addr[r] = addr; - - return 0; -} - -static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, - phys_addr_t addr) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - u32 r; - - for (r = 0; r < ep->max_regions - 1; r++) - if (ep->ob_addr[r] == addr) - break; - - /* - * Region 0 is reserved for configuration space and shouldn't - * be used elsewhere per TRM, so leave it out. - */ - if (r == ep->max_regions - 1) - return; - - rockchip_pcie_clear_ep_ob_atu(rockchip, r); - - ep->ob_addr[r] = 0; - clear_bit(r, &ep->ob_region_map); -} - -static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, - u8 multi_msg_cap) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - u16 flags; - - flags = rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG); - flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; - flags |= - ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | - PCI_MSI_FLAGS_64BIT; - flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; - rockchip_pcie_write(rockchip, flags, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG); - return 0; -} - -static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - u16 flags; - - flags = rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG); - if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) - return -EINVAL; - - return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> - ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); -} - -static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, - u8 intx, bool is_asserted) -{ - struct rockchip_pcie *rockchip = &ep->rockchip; - u32 r = ep->max_regions - 1; - u32 offset; - u16 status; - u8 msg_code; - - if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || - ep->irq_pci_fn != fn)) { - rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, - AXI_WRAPPER_NOR_MSG, - ep->irq_phys_addr, 0, 0); - ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR; - ep->irq_pci_fn = fn; - } - - intx &= 3; - if (is_asserted) { - ep->irq_pending |= BIT(intx); - msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx; - } else { - ep->irq_pending &= ~BIT(intx); - msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx; - } - - status = rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_CMD_STATUS); - status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; - - if ((status != 0) ^ (ep->irq_pending != 0)) { - status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; - rockchip_pcie_write(rockchip, status, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_CMD_STATUS); - } - - offset = - ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) | - ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA; - writel(0, ep->irq_cpu_addr + offset); -} - -static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, - u8 intx) -{ - u16 cmd; - - cmd = rockchip_pcie_read(&ep->rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_CMD_STATUS); - - if (cmd & PCI_COMMAND_INTX_DISABLE) - return -EINVAL; - - /* - * Should add some delay between toggling INTx per TRM vaguely saying - * it depends on some cycles of the AHB bus clock to function it. So - * add sufficient 1ms here. - */ - rockchip_pcie_ep_assert_intx(ep, fn, intx, true); - mdelay(1); - rockchip_pcie_ep_assert_intx(ep, fn, intx, false); - return 0; -} - -static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, - u8 interrupt_num) -{ - struct rockchip_pcie *rockchip = &ep->rockchip; - u16 flags, mme, data, data_mask; - u8 msi_count; - u64 pci_addr, pci_addr_mask = 0xff; - - /* Check MSI enable bit */ - flags = rockchip_pcie_read(&ep->rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG); - if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) - return -EINVAL; - - /* Get MSI numbers from MME */ - mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> - ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); - msi_count = 1 << mme; - if (!interrupt_num || interrupt_num > msi_count) - return -EINVAL; - - /* Set MSI private data */ - data_mask = msi_count - 1; - data = rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG + - PCI_MSI_DATA_64); - data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); - - /* Get MSI PCI address */ - pci_addr = rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG + - PCI_MSI_ADDRESS_HI); - pci_addr <<= 32; - pci_addr |= rockchip_pcie_read(rockchip, - ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + - ROCKCHIP_PCIE_EP_MSI_CTRL_REG + - PCI_MSI_ADDRESS_LO); - pci_addr &= GENMASK_ULL(63, 2); - - /* Set the outbound region if needed. */ - if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || - ep->irq_pci_fn != fn)) { - rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1, - AXI_WRAPPER_MEM_WRITE, - ep->irq_phys_addr, - pci_addr & ~pci_addr_mask, - pci_addr_mask + 1); - ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); - ep->irq_pci_fn = fn; - } - - writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); - return 0; -} - -static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, - enum pci_epc_irq_type type, - u8 interrupt_num) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - - switch (type) { - case PCI_EPC_IRQ_LEGACY: - return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0); - case PCI_EPC_IRQ_MSI: - return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); - default: - return -EINVAL; - } -} - -static int rockchip_pcie_ep_start(struct pci_epc *epc) -{ - struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); - struct rockchip_pcie *rockchip = &ep->rockchip; - struct pci_epf *epf; - u32 cfg; - - cfg = BIT(0); - list_for_each_entry(epf, &epc->pci_epf, list) - cfg |= BIT(epf->func_no); - - rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); - - list_for_each_entry(epf, &epc->pci_epf, list) - pci_epf_linkup(epf); - - return 0; -} - -static const struct pci_epc_ops rockchip_pcie_epc_ops = { - .write_header = rockchip_pcie_ep_write_header, - .set_bar = rockchip_pcie_ep_set_bar, - .clear_bar = rockchip_pcie_ep_clear_bar, - .map_addr = rockchip_pcie_ep_map_addr, - .unmap_addr = rockchip_pcie_ep_unmap_addr, - .set_msi = rockchip_pcie_ep_set_msi, - .get_msi = rockchip_pcie_ep_get_msi, - .raise_irq = rockchip_pcie_ep_raise_irq, - .start = rockchip_pcie_ep_start, -}; - -static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, - struct rockchip_pcie_ep *ep) -{ - struct device *dev = rockchip->dev; - int err; - - err = rockchip_pcie_parse_dt(rockchip); - if (err) - return err; - - err = rockchip_pcie_get_phys(rockchip); - if (err) - return err; - - err = of_property_read_u32(dev->of_node, - "rockchip,max-outbound-regions", - &ep->max_regions); - if (err < 0 || ep->max_regions > MAX_REGION_LIMIT) - ep->max_regions = MAX_REGION_LIMIT; - - err = of_property_read_u8(dev->of_node, "max-functions", - &ep->epc->max_functions); - if (err < 0) - ep->epc->max_functions = 1; - - return 0; -} - -static const struct of_device_id rockchip_pcie_ep_of_match[] = { - { .compatible = "rockchip,rk3399-pcie-ep"}, - {}, -}; - -static int rockchip_pcie_ep_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct rockchip_pcie_ep *ep; - struct rockchip_pcie *rockchip; - struct pci_epc *epc; - size_t max_regions; - int err; - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) - return -ENOMEM; - - rockchip = &ep->rockchip; - rockchip->is_rc = false; - rockchip->dev = dev; - - epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); - if (IS_ERR(epc)) { - dev_err(dev, "failed to create epc device\n"); - return PTR_ERR(epc); - } - - ep->epc = epc; - epc_set_drvdata(epc, ep); - - err = rockchip_pcie_parse_ep_dt(rockchip, ep); - if (err) - return err; - - err = rockchip_pcie_enable_clocks(rockchip); - if (err) - return err; - - err = rockchip_pcie_init_port(rockchip); - if (err) - goto err_disable_clocks; - - /* Establish the link automatically */ - rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, - PCIE_CLIENT_CONFIG); - - max_regions = ep->max_regions; - ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr), - GFP_KERNEL); - - if (!ep->ob_addr) { - err = -ENOMEM; - goto err_uninit_port; - } - - /* Only enable function 0 by default */ - rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); - - err = pci_epc_mem_init(epc, rockchip->mem_res->start, - resource_size(rockchip->mem_res)); - if (err < 0) { - dev_err(dev, "failed to initialize the memory space\n"); - goto err_uninit_port; - } - - ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, - SZ_128K); - if (!ep->irq_cpu_addr) { - dev_err(dev, "failed to reserve memory space for MSI\n"); - err = -ENOMEM; - goto err_epc_mem_exit; - } - - ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; - - return 0; -err_epc_mem_exit: - pci_epc_mem_exit(epc); -err_uninit_port: - rockchip_pcie_deinit_phys(rockchip); -err_disable_clocks: - rockchip_pcie_disable_clocks(rockchip); - return err; -} - -static struct platform_driver rockchip_pcie_ep_driver = { - .driver = { - .name = "rockchip-pcie-ep", - .of_match_table = rockchip_pcie_ep_of_match, - }, - .probe = rockchip_pcie_ep_probe, -}; - -builtin_platform_driver(rockchip_pcie_ep_driver); diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c deleted file mode 100644 index 1372d270764f..000000000000 --- a/drivers/pci/host/pcie-rockchip-host.c +++ /dev/null @@ -1,1142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Rockchip AXI PCIe host controller driver - * - * Copyright (c) 2016 Rockchip, Inc. - * - * Author: Shawn Lin - * Wenrui Li - * - * Bits taken from Synopsys DesignWare Host controller driver and - * ARM PCI Host generic driver. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" -#include "pcie-rockchip.h" - -static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) -{ - u32 status; - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); -} - -static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) -{ - u32 status; - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); -} - -static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) -{ - u32 val; - - /* Update Tx credit maximum update interval */ - val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); - val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; - val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ - rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); -} - -static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, - struct pci_bus *bus, int dev) -{ - /* access only one slot on each root port */ - if (bus->number == rockchip->root_bus_nr && dev > 0) - return 0; - - /* - * do not read more than one device on the bus directly attached - * to RC's downstream side. - */ - if (bus->primary == rockchip->root_bus_nr && dev > 0) - return 0; - - return 1; -} - -static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) -{ - u32 val; - u8 map; - - if (rockchip->legacy_phy) - return GENMASK(MAX_LANE_NUM - 1, 0); - - val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); - map = val & PCIE_CORE_LANE_MAP_MASK; - - /* The link may be using a reverse-indexed mapping. */ - if (val & PCIE_CORE_LANE_MAP_REVERSE) - map = bitrev8(map) >> 4; - - return map; -} - -static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, - int where, int size, u32 *val) -{ - void __iomem *addr; - - addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; - - if (!IS_ALIGNED((uintptr_t)addr, size)) { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - if (size == 4) { - *val = readl(addr); - } else if (size == 2) { - *val = readw(addr); - } else if (size == 1) { - *val = readb(addr); - } else { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, - int where, int size, u32 val) -{ - u32 mask, tmp, offset; - void __iomem *addr; - - offset = where & ~0x3; - addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; - - if (size == 4) { - writel(val, addr); - return PCIBIOS_SUCCESSFUL; - } - - mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); - - /* - * N.B. This read/modify/write isn't safe in general because it can - * corrupt RW1C bits in adjacent registers. But the hardware - * doesn't support smaller writes. - */ - tmp = readl(addr) & mask; - tmp |= val << ((where & 0x3) * 8); - writel(tmp, addr); - - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, - struct pci_bus *bus, u32 devfn, - int where, int size, u32 *val) -{ - u32 busdev; - - busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where); - - if (!IS_ALIGNED(busdev, size)) { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - - if (bus->parent->number == rockchip->root_bus_nr) - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE0_CFG); - else - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE1_CFG); - - if (size == 4) { - *val = readl(rockchip->reg_base + busdev); - } else if (size == 2) { - *val = readw(rockchip->reg_base + busdev); - } else if (size == 1) { - *val = readb(rockchip->reg_base + busdev); - } else { - *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; - } - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, - struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - u32 busdev; - - busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where); - if (!IS_ALIGNED(busdev, size)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - if (bus->parent->number == rockchip->root_bus_nr) - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE0_CFG); - else - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE1_CFG); - - if (size == 4) - writel(val, rockchip->reg_base + busdev); - else if (size == 2) - writew(val, rockchip->reg_base + busdev); - else if (size == 1) - writeb(val, rockchip->reg_base + busdev); - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) -{ - struct rockchip_pcie *rockchip = bus->sysdata; - - if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - - if (bus->number == rockchip->root_bus_nr) - return rockchip_pcie_rd_own_conf(rockchip, where, size, val); - - return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, - val); -} - -static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) -{ - struct rockchip_pcie *rockchip = bus->sysdata; - - if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == rockchip->root_bus_nr) - return rockchip_pcie_wr_own_conf(rockchip, where, size, val); - - return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, - val); -} - -static struct pci_ops rockchip_pcie_ops = { - .read = rockchip_pcie_rd_conf, - .write = rockchip_pcie_wr_conf, -}; - -static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) -{ - int curr; - u32 status, scale, power; - - if (IS_ERR(rockchip->vpcie3v3)) - return; - - /* - * Set RC's captured slot power limit and scale if - * vpcie3v3 available. The default values are both zero - * which means the software should set these two according - * to the actual power supply. - */ - curr = regulator_get_current_limit(rockchip->vpcie3v3); - if (curr <= 0) - return; - - scale = 3; /* 0.001x */ - curr = curr / 1000; /* convert to mA */ - power = (curr * 3300) / 1000; /* milliwatt */ - while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { - if (!scale) { - dev_warn(rockchip->dev, "invalid power supply\n"); - return; - } - scale--; - power = power / 10; - } - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); - status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | - (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); -} - -/** - * rockchip_pcie_host_init_port - Initialize hardware - * @rockchip: PCIe port information - */ -static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err, i = MAX_LANE_NUM; - u32 status; - - gpiod_set_value_cansleep(rockchip->ep_gpio, 0); - - err = rockchip_pcie_init_port(rockchip); - if (err) - return err; - - /* Fix the transmitted FTS count desired to exit from L0s. */ - status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); - status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | - (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); - - rockchip_pcie_set_power_limit(rockchip); - - /* Set RC's clock architecture as common clock */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKSTA_SLC << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); - - /* Set RC's RCB to 128 */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKCTL_RCB; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); - - /* Enable Gen1 training */ - rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, - PCIE_CLIENT_CONFIG); - - gpiod_set_value_cansleep(rockchip->ep_gpio, 1); - - /* 500ms timeout value should be enough for Gen1/2 training */ - err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, - status, PCIE_LINK_UP(status), 20, - 500 * USEC_PER_MSEC); - if (err) { - dev_err(dev, "PCIe link training gen1 timeout!\n"); - goto err_power_off_phy; - } - - if (rockchip->link_gen == 2) { - /* - * Enable retrain for gen2. This should be configured only after - * gen1 finished. - */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); - status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); - - err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, - status, PCIE_LINK_IS_GEN2(status), 20, - 500 * USEC_PER_MSEC); - if (err) - dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); - } - - /* Check the final link width from negotiated lane counter from MGMT */ - status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); - status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> - PCIE_CORE_PL_CONF_LANE_SHIFT); - dev_dbg(dev, "current link width is x%d\n", status); - - /* Power off unused lane(s) */ - rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); - for (i = 0; i < MAX_LANE_NUM; i++) { - if (!(rockchip->lanes_map & BIT(i))) { - dev_dbg(dev, "idling lane %d\n", i); - phy_power_off(rockchip->phys[i]); - } - } - - rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, - PCIE_CORE_CONFIG_VENDOR); - rockchip_pcie_write(rockchip, - PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, - PCIE_RC_CONFIG_RID_CCR); - - /* Clear THP cap's next cap pointer to remove L1 substate cap */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); - status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); - - /* Clear L0s from RC's link cap */ - if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); - status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); - } - - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); - status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; - status |= PCIE_RC_CONFIG_DCSR_MPS_256; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); - - return 0; -err_power_off_phy: - while (i--) - phy_power_off(rockchip->phys[i]); - i = MAX_LANE_NUM; - while (i--) - phy_exit(rockchip->phys[i]); - return err; -} - -static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) -{ - struct rockchip_pcie *rockchip = arg; - struct device *dev = rockchip->dev; - u32 reg; - u32 sub_reg; - - reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); - if (reg & PCIE_CLIENT_INT_LOCAL) { - dev_dbg(dev, "local interrupt received\n"); - sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); - if (sub_reg & PCIE_CORE_INT_PRFPE) - dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); - - if (sub_reg & PCIE_CORE_INT_CRFPE) - dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); - - if (sub_reg & PCIE_CORE_INT_RRPE) - dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); - - if (sub_reg & PCIE_CORE_INT_PRFO) - dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); - - if (sub_reg & PCIE_CORE_INT_CRFO) - dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); - - if (sub_reg & PCIE_CORE_INT_RT) - dev_dbg(dev, "replay timer timed out\n"); - - if (sub_reg & PCIE_CORE_INT_RTR) - dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); - - if (sub_reg & PCIE_CORE_INT_PE) - dev_dbg(dev, "phy error detected on receive side\n"); - - if (sub_reg & PCIE_CORE_INT_MTR) - dev_dbg(dev, "malformed TLP received from the link\n"); - - if (sub_reg & PCIE_CORE_INT_UCR) - dev_dbg(dev, "malformed TLP received from the link\n"); - - if (sub_reg & PCIE_CORE_INT_FCE) - dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); - - if (sub_reg & PCIE_CORE_INT_CT) - dev_dbg(dev, "a request timed out waiting for completion\n"); - - if (sub_reg & PCIE_CORE_INT_UTC) - dev_dbg(dev, "unmapped TC error\n"); - - if (sub_reg & PCIE_CORE_INT_MMVC) - dev_dbg(dev, "MSI mask register changes\n"); - - rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); - } else if (reg & PCIE_CLIENT_INT_PHY) { - dev_dbg(dev, "phy link changes\n"); - rockchip_pcie_update_txcredit_mui(rockchip); - rockchip_pcie_clr_bw_int(rockchip); - } - - rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, - PCIE_CLIENT_INT_STATUS); - - return IRQ_HANDLED; -} - -static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) -{ - struct rockchip_pcie *rockchip = arg; - struct device *dev = rockchip->dev; - u32 reg; - - reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); - if (reg & PCIE_CLIENT_INT_LEGACY_DONE) - dev_dbg(dev, "legacy done interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_MSG) - dev_dbg(dev, "message done interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_HOT_RST) - dev_dbg(dev, "hot reset interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_DPA) - dev_dbg(dev, "dpa interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_FATAL_ERR) - dev_dbg(dev, "fatal error interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_NFATAL_ERR) - dev_dbg(dev, "no fatal error interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_CORR_ERR) - dev_dbg(dev, "correctable error interrupt received\n"); - - if (reg & PCIE_CLIENT_INT_PHY) - dev_dbg(dev, "phy interrupt received\n"); - - rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | - PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | - PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | - PCIE_CLIENT_INT_NFATAL_ERR | - PCIE_CLIENT_INT_CORR_ERR | - PCIE_CLIENT_INT_PHY), - PCIE_CLIENT_INT_STATUS); - - return IRQ_HANDLED; -} - -static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); - struct device *dev = rockchip->dev; - u32 reg; - u32 hwirq; - u32 virq; - - chained_irq_enter(chip, desc); - - reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); - reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; - - while (reg) { - hwirq = ffs(reg) - 1; - reg &= ~BIT(hwirq); - - virq = irq_find_mapping(rockchip->irq_domain, hwirq); - if (virq) - generic_handle_irq(virq); - else - dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); - } - - chained_irq_exit(chip, desc); -} - -static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) -{ - int irq, err; - struct device *dev = rockchip->dev; - struct platform_device *pdev = to_platform_device(dev); - - irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); - return irq; - } - - err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, - IRQF_SHARED, "pcie-sys", rockchip); - if (err) { - dev_err(dev, "failed to request PCIe subsystem IRQ\n"); - return err; - } - - irq = platform_get_irq_byname(pdev, "legacy"); - if (irq < 0) { - dev_err(dev, "missing legacy IRQ resource\n"); - return irq; - } - - irq_set_chained_handler_and_data(irq, - rockchip_pcie_legacy_int_handler, - rockchip); - - irq = platform_get_irq_byname(pdev, "client"); - if (irq < 0) { - dev_err(dev, "missing client IRQ resource\n"); - return irq; - } - - err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, - IRQF_SHARED, "pcie-client", rockchip); - if (err) { - dev_err(dev, "failed to request PCIe client IRQ\n"); - return err; - } - - return 0; -} - -/** - * rockchip_pcie_parse_host_dt - Parse Device Tree - * @rockchip: PCIe port information - * - * Return: '0' on success and error value on failure - */ -static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err; - - err = rockchip_pcie_parse_dt(rockchip); - if (err) - return err; - - err = rockchip_pcie_setup_irq(rockchip); - if (err) - return err; - - rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); - if (IS_ERR(rockchip->vpcie12v)) { - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie12v regulator found\n"); - } - - rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); - if (IS_ERR(rockchip->vpcie3v3)) { - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie3v3 regulator found\n"); - } - - rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); - if (IS_ERR(rockchip->vpcie1v8)) { - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie1v8 regulator found\n"); - } - - rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); - if (IS_ERR(rockchip->vpcie0v9)) { - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(dev, "no vpcie0v9 regulator found\n"); - } - - return 0; -} - -static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err; - - if (!IS_ERR(rockchip->vpcie12v)) { - err = regulator_enable(rockchip->vpcie12v); - if (err) { - dev_err(dev, "fail to enable vpcie12v regulator\n"); - goto err_out; - } - } - - if (!IS_ERR(rockchip->vpcie3v3)) { - err = regulator_enable(rockchip->vpcie3v3); - if (err) { - dev_err(dev, "fail to enable vpcie3v3 regulator\n"); - goto err_disable_12v; - } - } - - if (!IS_ERR(rockchip->vpcie1v8)) { - err = regulator_enable(rockchip->vpcie1v8); - if (err) { - dev_err(dev, "fail to enable vpcie1v8 regulator\n"); - goto err_disable_3v3; - } - } - - if (!IS_ERR(rockchip->vpcie0v9)) { - err = regulator_enable(rockchip->vpcie0v9); - if (err) { - dev_err(dev, "fail to enable vpcie0v9 regulator\n"); - goto err_disable_1v8; - } - } - - return 0; - -err_disable_1v8: - if (!IS_ERR(rockchip->vpcie1v8)) - regulator_disable(rockchip->vpcie1v8); -err_disable_3v3: - if (!IS_ERR(rockchip->vpcie3v3)) - regulator_disable(rockchip->vpcie3v3); -err_disable_12v: - if (!IS_ERR(rockchip->vpcie12v)) - regulator_disable(rockchip->vpcie12v); -err_out: - return err; -} - -static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) -{ - rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & - (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); - rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), - PCIE_CORE_INT_MASK); - - rockchip_pcie_enable_bw_int(rockchip); -} - -static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -static const struct irq_domain_ops intx_domain_ops = { - .map = rockchip_pcie_intx_map, -}; - -static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - struct device_node *intc = of_get_next_child(dev->of_node, NULL); - - if (!intc) { - dev_err(dev, "missing child interrupt-controller node\n"); - return -EINVAL; - } - - rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &intx_domain_ops, rockchip); - if (!rockchip->irq_domain) { - dev_err(dev, "failed to get a INTx IRQ domain\n"); - return -EINVAL; - } - - return 0; -} - -static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, - int region_no, int type, u8 num_pass_bits, - u32 lower_addr, u32 upper_addr) -{ - u32 ob_addr_0; - u32 ob_addr_1; - u32 ob_desc_0; - u32 aw_offset; - - if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) - return -EINVAL; - if (num_pass_bits + 1 < 8) - return -EINVAL; - if (num_pass_bits > 63) - return -EINVAL; - if (region_no == 0) { - if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) - return -EINVAL; - } - if (region_no != 0) { - if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) - return -EINVAL; - } - - aw_offset = (region_no << OB_REG_SIZE_SHIFT); - - ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; - ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; - ob_addr_1 = upper_addr; - ob_desc_0 = (1 << 23 | type); - - rockchip_pcie_write(rockchip, ob_addr_0, - PCIE_CORE_OB_REGION_ADDR0 + aw_offset); - rockchip_pcie_write(rockchip, ob_addr_1, - PCIE_CORE_OB_REGION_ADDR1 + aw_offset); - rockchip_pcie_write(rockchip, ob_desc_0, - PCIE_CORE_OB_REGION_DESC0 + aw_offset); - rockchip_pcie_write(rockchip, 0, - PCIE_CORE_OB_REGION_DESC1 + aw_offset); - - return 0; -} - -static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, - int region_no, u8 num_pass_bits, - u32 lower_addr, u32 upper_addr) -{ - u32 ib_addr_0; - u32 ib_addr_1; - u32 aw_offset; - - if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) - return -EINVAL; - if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) - return -EINVAL; - if (num_pass_bits > 63) - return -EINVAL; - - aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); - - ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; - ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; - ib_addr_1 = upper_addr; - - rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); - rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); - - return 0; -} - -static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int offset; - int err; - int reg_no; - - rockchip_pcie_cfg_configuration_accesses(rockchip, - AXI_WRAPPER_TYPE0_CFG); - - for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { - err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, - AXI_WRAPPER_MEM_WRITE, - 20 - 1, - rockchip->mem_bus_addr + - (reg_no << 20), - 0); - if (err) { - dev_err(dev, "program RC mem outbound ATU failed\n"); - return err; - } - } - - err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); - if (err) { - dev_err(dev, "program RC mem inbound ATU failed\n"); - return err; - } - - offset = rockchip->mem_size >> 20; - for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) { - err = rockchip_pcie_prog_ob_atu(rockchip, - reg_no + 1 + offset, - AXI_WRAPPER_IO_WRITE, - 20 - 1, - rockchip->io_bus_addr + - (reg_no << 20), - 0); - if (err) { - dev_err(dev, "program RC io outbound ATU failed\n"); - return err; - } - } - - /* assign message regions */ - rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, - AXI_WRAPPER_NOR_MSG, - 20 - 1, 0, 0); - - rockchip->msg_bus_addr = rockchip->mem_bus_addr + - ((reg_no + offset) << 20); - return err; -} - -static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) -{ - u32 value; - int err; - - /* send PME_TURN_OFF message */ - writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); - - /* read LTSSM and wait for falling into L2 link state */ - err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, - value, PCIE_LINK_IS_L2(value), 20, - jiffies_to_usecs(5 * HZ)); - if (err) { - dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); - return err; - } - - return 0; -} - -static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) -{ - struct rockchip_pcie *rockchip = dev_get_drvdata(dev); - int ret; - - /* disable core and cli int since we don't need to ack PME_ACK */ - rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | - PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); - rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); - - ret = rockchip_pcie_wait_l2(rockchip); - if (ret) { - rockchip_pcie_enable_interrupts(rockchip); - return ret; - } - - rockchip_pcie_deinit_phys(rockchip); - - rockchip_pcie_disable_clocks(rockchip); - - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); - - return ret; -} - -static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) -{ - struct rockchip_pcie *rockchip = dev_get_drvdata(dev); - int err; - - if (!IS_ERR(rockchip->vpcie0v9)) { - err = regulator_enable(rockchip->vpcie0v9); - if (err) { - dev_err(dev, "fail to enable vpcie0v9 regulator\n"); - return err; - } - } - - err = rockchip_pcie_enable_clocks(rockchip); - if (err) - goto err_disable_0v9; - - err = rockchip_pcie_host_init_port(rockchip); - if (err) - goto err_pcie_resume; - - err = rockchip_pcie_cfg_atu(rockchip); - if (err) - goto err_err_deinit_port; - - /* Need this to enter L1 again */ - rockchip_pcie_update_txcredit_mui(rockchip); - rockchip_pcie_enable_interrupts(rockchip); - - return 0; - -err_err_deinit_port: - rockchip_pcie_deinit_phys(rockchip); -err_pcie_resume: - rockchip_pcie_disable_clocks(rockchip); -err_disable_0v9: - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); - return err; -} - -static int rockchip_pcie_probe(struct platform_device *pdev) -{ - struct rockchip_pcie *rockchip; - struct device *dev = &pdev->dev; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - struct resource_entry *win; - resource_size_t io_base; - struct resource *mem; - struct resource *io; - int err; - - LIST_HEAD(res); - - if (!dev->of_node) - return -ENODEV; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); - if (!bridge) - return -ENOMEM; - - rockchip = pci_host_bridge_priv(bridge); - - platform_set_drvdata(pdev, rockchip); - rockchip->dev = dev; - rockchip->is_rc = true; - - err = rockchip_pcie_parse_host_dt(rockchip); - if (err) - return err; - - err = rockchip_pcie_enable_clocks(rockchip); - if (err) - return err; - - err = rockchip_pcie_set_vpcie(rockchip); - if (err) { - dev_err(dev, "failed to set vpcie regulator\n"); - goto err_set_vpcie; - } - - err = rockchip_pcie_host_init_port(rockchip); - if (err) - goto err_vpcie; - - rockchip_pcie_enable_interrupts(rockchip); - - err = rockchip_pcie_init_irq_domain(rockchip); - if (err < 0) - goto err_deinit_port; - - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, - &res, &io_base); - if (err) - goto err_remove_irq_domain; - - err = devm_request_pci_bus_resources(dev, &res); - if (err) - goto err_free_res; - - /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry(win, &res) { - switch (resource_type(win->res)) { - case IORESOURCE_IO: - io = win->res; - io->name = "I/O"; - rockchip->io_size = resource_size(io); - rockchip->io_bus_addr = io->start - win->offset; - err = pci_remap_iospace(io, io_base); - if (err) { - dev_warn(dev, "error %d: failed to map resource %pR\n", - err, io); - continue; - } - rockchip->io = io; - break; - case IORESOURCE_MEM: - mem = win->res; - mem->name = "MEM"; - rockchip->mem_size = resource_size(mem); - rockchip->mem_bus_addr = mem->start - win->offset; - break; - case IORESOURCE_BUS: - rockchip->root_bus_nr = win->res->start; - break; - default: - continue; - } - } - - err = rockchip_pcie_cfg_atu(rockchip); - if (err) - goto err_unmap_iospace; - - rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); - if (!rockchip->msg_region) { - err = -ENOMEM; - goto err_unmap_iospace; - } - - list_splice_init(&res, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = rockchip; - bridge->busnr = 0; - bridge->ops = &rockchip_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - err = pci_scan_root_bus_bridge(bridge); - if (err < 0) - goto err_unmap_iospace; - - bus = bridge->bus; - - rockchip->root_bus = bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return 0; - -err_unmap_iospace: - pci_unmap_iospace(rockchip->io); -err_free_res: - pci_free_resource_list(&res); -err_remove_irq_domain: - irq_domain_remove(rockchip->irq_domain); -err_deinit_port: - rockchip_pcie_deinit_phys(rockchip); -err_vpcie: - if (!IS_ERR(rockchip->vpcie12v)) - regulator_disable(rockchip->vpcie12v); - if (!IS_ERR(rockchip->vpcie3v3)) - regulator_disable(rockchip->vpcie3v3); - if (!IS_ERR(rockchip->vpcie1v8)) - regulator_disable(rockchip->vpcie1v8); - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); -err_set_vpcie: - rockchip_pcie_disable_clocks(rockchip); - return err; -} - -static int rockchip_pcie_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct rockchip_pcie *rockchip = dev_get_drvdata(dev); - - pci_stop_root_bus(rockchip->root_bus); - pci_remove_root_bus(rockchip->root_bus); - pci_unmap_iospace(rockchip->io); - irq_domain_remove(rockchip->irq_domain); - - rockchip_pcie_deinit_phys(rockchip); - - rockchip_pcie_disable_clocks(rockchip); - - if (!IS_ERR(rockchip->vpcie12v)) - regulator_disable(rockchip->vpcie12v); - if (!IS_ERR(rockchip->vpcie3v3)) - regulator_disable(rockchip->vpcie3v3); - if (!IS_ERR(rockchip->vpcie1v8)) - regulator_disable(rockchip->vpcie1v8); - if (!IS_ERR(rockchip->vpcie0v9)) - regulator_disable(rockchip->vpcie0v9); - - return 0; -} - -static const struct dev_pm_ops rockchip_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, - rockchip_pcie_resume_noirq) -}; - -static const struct of_device_id rockchip_pcie_of_match[] = { - { .compatible = "rockchip,rk3399-pcie", }, - {} -}; -MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); - -static struct platform_driver rockchip_pcie_driver = { - .driver = { - .name = "rockchip-pcie", - .of_match_table = rockchip_pcie_of_match, - .pm = &rockchip_pcie_pm_ops, - }, - .probe = rockchip_pcie_probe, - .remove = rockchip_pcie_remove, -}; -module_platform_driver(rockchip_pcie_driver); - -MODULE_AUTHOR("Rockchip Inc"); -MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c deleted file mode 100644 index c53d1322a3d6..000000000000 --- a/drivers/pci/host/pcie-rockchip.c +++ /dev/null @@ -1,424 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Rockchip AXI PCIe host controller driver - * - * Copyright (c) 2016 Rockchip, Inc. - * - * Author: Shawn Lin - * Wenrui Li - * - * Bits taken from Synopsys DesignWare Host controller driver and - * ARM PCI Host generic driver. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" -#include "pcie-rockchip.h" - -int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - struct platform_device *pdev = to_platform_device(dev); - struct device_node *node = dev->of_node; - struct resource *regs; - int err; - - if (rockchip->is_rc) { - regs = platform_get_resource_byname(pdev, - IORESOURCE_MEM, - "axi-base"); - rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); - if (IS_ERR(rockchip->reg_base)) - return PTR_ERR(rockchip->reg_base); - } else { - rockchip->mem_res = - platform_get_resource_byname(pdev, IORESOURCE_MEM, - "mem-base"); - if (!rockchip->mem_res) - return -EINVAL; - } - - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "apb-base"); - rockchip->apb_base = devm_ioremap_resource(dev, regs); - if (IS_ERR(rockchip->apb_base)) - return PTR_ERR(rockchip->apb_base); - - err = rockchip_pcie_get_phys(rockchip); - if (err) - return err; - - rockchip->lanes = 1; - err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); - if (!err && (rockchip->lanes == 0 || - rockchip->lanes == 3 || - rockchip->lanes > 4)) { - dev_warn(dev, "invalid num-lanes, default to use one lane\n"); - rockchip->lanes = 1; - } - - rockchip->link_gen = of_pci_get_max_link_speed(node); - if (rockchip->link_gen < 0 || rockchip->link_gen > 2) - rockchip->link_gen = 2; - - rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); - if (IS_ERR(rockchip->core_rst)) { - if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) - dev_err(dev, "missing core reset property in node\n"); - return PTR_ERR(rockchip->core_rst); - } - - rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); - if (IS_ERR(rockchip->mgmt_rst)) { - if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) - dev_err(dev, "missing mgmt reset property in node\n"); - return PTR_ERR(rockchip->mgmt_rst); - } - - rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, - "mgmt-sticky"); - if (IS_ERR(rockchip->mgmt_sticky_rst)) { - if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) - dev_err(dev, "missing mgmt-sticky reset property in node\n"); - return PTR_ERR(rockchip->mgmt_sticky_rst); - } - - rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); - if (IS_ERR(rockchip->pipe_rst)) { - if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pipe reset property in node\n"); - return PTR_ERR(rockchip->pipe_rst); - } - - rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); - if (IS_ERR(rockchip->pm_rst)) { - if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pm reset property in node\n"); - return PTR_ERR(rockchip->pm_rst); - } - - rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); - if (IS_ERR(rockchip->pclk_rst)) { - if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pclk reset property in node\n"); - return PTR_ERR(rockchip->pclk_rst); - } - - rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); - if (IS_ERR(rockchip->aclk_rst)) { - if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) - dev_err(dev, "missing aclk reset property in node\n"); - return PTR_ERR(rockchip->aclk_rst); - } - - if (rockchip->is_rc) { - rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); - if (IS_ERR(rockchip->ep_gpio)) { - dev_err(dev, "missing ep-gpios property in node\n"); - return PTR_ERR(rockchip->ep_gpio); - } - } - - rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); - if (IS_ERR(rockchip->aclk_pcie)) { - dev_err(dev, "aclk clock not found\n"); - return PTR_ERR(rockchip->aclk_pcie); - } - - rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); - if (IS_ERR(rockchip->aclk_perf_pcie)) { - dev_err(dev, "aclk_perf clock not found\n"); - return PTR_ERR(rockchip->aclk_perf_pcie); - } - - rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); - if (IS_ERR(rockchip->hclk_pcie)) { - dev_err(dev, "hclk clock not found\n"); - return PTR_ERR(rockchip->hclk_pcie); - } - - rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); - if (IS_ERR(rockchip->clk_pcie_pm)) { - dev_err(dev, "pm clock not found\n"); - return PTR_ERR(rockchip->clk_pcie_pm); - } - - return 0; -} -EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt); - -int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err, i; - u32 regs; - - err = reset_control_assert(rockchip->aclk_rst); - if (err) { - dev_err(dev, "assert aclk_rst err %d\n", err); - return err; - } - - err = reset_control_assert(rockchip->pclk_rst); - if (err) { - dev_err(dev, "assert pclk_rst err %d\n", err); - return err; - } - - err = reset_control_assert(rockchip->pm_rst); - if (err) { - dev_err(dev, "assert pm_rst err %d\n", err); - return err; - } - - for (i = 0; i < MAX_LANE_NUM; i++) { - err = phy_init(rockchip->phys[i]); - if (err) { - dev_err(dev, "init phy%d err %d\n", i, err); - goto err_exit_phy; - } - } - - err = reset_control_assert(rockchip->core_rst); - if (err) { - dev_err(dev, "assert core_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->mgmt_rst); - if (err) { - dev_err(dev, "assert mgmt_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->mgmt_sticky_rst); - if (err) { - dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->pipe_rst); - if (err) { - dev_err(dev, "assert pipe_rst err %d\n", err); - goto err_exit_phy; - } - - udelay(10); - - err = reset_control_deassert(rockchip->pm_rst); - if (err) { - dev_err(dev, "deassert pm_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_deassert(rockchip->aclk_rst); - if (err) { - dev_err(dev, "deassert aclk_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_deassert(rockchip->pclk_rst); - if (err) { - dev_err(dev, "deassert pclk_rst err %d\n", err); - goto err_exit_phy; - } - - if (rockchip->link_gen == 2) - rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, - PCIE_CLIENT_CONFIG); - else - rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, - PCIE_CLIENT_CONFIG); - - regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE | - PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes); - - if (rockchip->is_rc) - regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; - else - regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP; - - rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG); - - for (i = 0; i < MAX_LANE_NUM; i++) { - err = phy_power_on(rockchip->phys[i]); - if (err) { - dev_err(dev, "power on phy%d err %d\n", i, err); - goto err_power_off_phy; - } - } - - /* - * Please don't reorder the deassert sequence of the following - * four reset pins. - */ - err = reset_control_deassert(rockchip->mgmt_sticky_rst); - if (err) { - dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->core_rst); - if (err) { - dev_err(dev, "deassert core_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->mgmt_rst); - if (err) { - dev_err(dev, "deassert mgmt_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->pipe_rst); - if (err) { - dev_err(dev, "deassert pipe_rst err %d\n", err); - goto err_power_off_phy; - } - - return 0; -err_power_off_phy: - while (i--) - phy_power_off(rockchip->phys[i]); - i = MAX_LANE_NUM; -err_exit_phy: - while (i--) - phy_exit(rockchip->phys[i]); - return err; -} -EXPORT_SYMBOL_GPL(rockchip_pcie_init_port); - -int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - struct phy *phy; - char *name; - u32 i; - - phy = devm_phy_get(dev, "pcie-phy"); - if (!IS_ERR(phy)) { - rockchip->legacy_phy = true; - rockchip->phys[0] = phy; - dev_warn(dev, "legacy phy model is deprecated!\n"); - return 0; - } - - if (PTR_ERR(phy) == -EPROBE_DEFER) - return PTR_ERR(phy); - - dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n"); - - for (i = 0; i < MAX_LANE_NUM; i++) { - name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i); - if (!name) - return -ENOMEM; - - phy = devm_of_phy_get(dev, dev->of_node, name); - kfree(name); - - if (IS_ERR(phy)) { - if (PTR_ERR(phy) != -EPROBE_DEFER) - dev_err(dev, "missing phy for lane %d: %ld\n", - i, PTR_ERR(phy)); - return PTR_ERR(phy); - } - - rockchip->phys[i] = phy; - } - - return 0; -} -EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys); - -void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) -{ - int i; - - for (i = 0; i < MAX_LANE_NUM; i++) { - /* inactive lanes are already powered off */ - if (rockchip->lanes_map & BIT(i)) - phy_power_off(rockchip->phys[i]); - phy_exit(rockchip->phys[i]); - } -} -EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys); - -int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->dev; - int err; - - err = clk_prepare_enable(rockchip->aclk_pcie); - if (err) { - dev_err(dev, "unable to enable aclk_pcie clock\n"); - return err; - } - - err = clk_prepare_enable(rockchip->aclk_perf_pcie); - if (err) { - dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); - goto err_aclk_perf_pcie; - } - - err = clk_prepare_enable(rockchip->hclk_pcie); - if (err) { - dev_err(dev, "unable to enable hclk_pcie clock\n"); - goto err_hclk_pcie; - } - - err = clk_prepare_enable(rockchip->clk_pcie_pm); - if (err) { - dev_err(dev, "unable to enable clk_pcie_pm clock\n"); - goto err_clk_pcie_pm; - } - - return 0; - -err_clk_pcie_pm: - clk_disable_unprepare(rockchip->hclk_pcie); -err_hclk_pcie: - clk_disable_unprepare(rockchip->aclk_perf_pcie); -err_aclk_perf_pcie: - clk_disable_unprepare(rockchip->aclk_pcie); - return err; -} -EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks); - -void rockchip_pcie_disable_clocks(void *data) -{ - struct rockchip_pcie *rockchip = data; - - clk_disable_unprepare(rockchip->clk_pcie_pm); - clk_disable_unprepare(rockchip->hclk_pcie); - clk_disable_unprepare(rockchip->aclk_perf_pcie); - clk_disable_unprepare(rockchip->aclk_pcie); -} -EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks); - -void rockchip_pcie_cfg_configuration_accesses( - struct rockchip_pcie *rockchip, u32 type) -{ - u32 ob_desc_0; - - /* Configuration Accesses for region 0 */ - rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); - - rockchip_pcie_write(rockchip, - (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), - PCIE_CORE_OB_REGION_ADDR0); - rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, - PCIE_CORE_OB_REGION_ADDR1); - ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); - ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); - ob_desc_0 |= (type | (0x1 << 23)); - rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); - rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); -} -EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses); diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h deleted file mode 100644 index 8e87a059ce73..000000000000 --- a/drivers/pci/host/pcie-rockchip.h +++ /dev/null @@ -1,338 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Rockchip AXI PCIe controller driver - * - * Copyright (c) 2018 Rockchip, Inc. - * - * Author: Shawn Lin - * - */ - -#ifndef _PCIE_ROCKCHIP_H -#define _PCIE_ROCKCHIP_H - -#include -#include - -/* - * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 - * bits. This allows atomic updates of the register without locking. - */ -#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) -#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) - -#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) -#define MAX_LANE_NUM 4 -#define MAX_REGION_LIMIT 32 -#define MIN_EP_APERTURE 28 - -#define PCIE_CLIENT_BASE 0x0 -#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) -#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) -#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0) -#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) -#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) -#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) -#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) -#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0) -#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) -#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) -#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c) -#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) -#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 -#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19 -#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) -#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 -#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 -#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c) -#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50) -#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5) -#define PCIE_CLIENT_INTR_SHIFT 5 -#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15) -#define PCIE_CLIENT_INT_MSG BIT(14) -#define PCIE_CLIENT_INT_HOT_RST BIT(13) -#define PCIE_CLIENT_INT_DPA BIT(12) -#define PCIE_CLIENT_INT_FATAL_ERR BIT(11) -#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10) -#define PCIE_CLIENT_INT_CORR_ERR BIT(9) -#define PCIE_CLIENT_INT_INTD BIT(8) -#define PCIE_CLIENT_INT_INTC BIT(7) -#define PCIE_CLIENT_INT_INTB BIT(6) -#define PCIE_CLIENT_INT_INTA BIT(5) -#define PCIE_CLIENT_INT_LOCAL BIT(4) -#define PCIE_CLIENT_INT_UDMA BIT(3) -#define PCIE_CLIENT_INT_PHY BIT(2) -#define PCIE_CLIENT_INT_HOT_PLUG BIT(1) -#define PCIE_CLIENT_INT_PWR_STCG BIT(0) - -#define PCIE_CLIENT_INT_LEGACY \ - (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \ - PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD) - -#define PCIE_CLIENT_INT_CLI \ - (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \ - PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \ - PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \ - PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \ - PCIE_CLIENT_INT_PHY) - -#define PCIE_CORE_CTRL_MGMT_BASE 0x900000 -#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) -#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 -#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 -#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 -#define PCIE_CORE_PL_CONF_LANE_SHIFT 1 -#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004) -#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8) -#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8 -#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff -#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020) -#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000 -#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 -#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ - (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) -#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200) -#define PCIE_CORE_LANE_MAP_MASK 0x0000000f -#define PCIE_CORE_LANE_MAP_REVERSE BIT(16) -#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) -#define PCIE_CORE_INT_PRFPE BIT(0) -#define PCIE_CORE_INT_CRFPE BIT(1) -#define PCIE_CORE_INT_RRPE BIT(2) -#define PCIE_CORE_INT_PRFO BIT(3) -#define PCIE_CORE_INT_CRFO BIT(4) -#define PCIE_CORE_INT_RT BIT(5) -#define PCIE_CORE_INT_RTR BIT(6) -#define PCIE_CORE_INT_PE BIT(7) -#define PCIE_CORE_INT_MTR BIT(8) -#define PCIE_CORE_INT_UCR BIT(9) -#define PCIE_CORE_INT_FCE BIT(10) -#define PCIE_CORE_INT_CT BIT(11) -#define PCIE_CORE_INT_UTC BIT(18) -#define PCIE_CORE_INT_MMVC BIT(19) -#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44) -#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210) -#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0) -#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300) -#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0 -#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1 -#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4 -#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 -#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6 -#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 - -#define PCIE_CORE_INT \ - (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \ - PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \ - PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \ - PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \ - PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \ - PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ - PCIE_CORE_INT_MMVC) - -#define PCIE_RC_RP_ATS_BASE 0x400000 -#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 -#define PCIE_RC_CONFIG_BASE 0xa00000 -#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_SCC_SHIFT 16 -#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) -#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 -#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff -#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 -#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) -#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) -#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) -#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) -#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) -#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) -#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) -#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) -#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) - -#define PCIE_CORE_AXI_CONF_BASE 0xc00000 -#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) -#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f -#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 -#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) -#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) -#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) - -#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 -#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) -#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f -#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 -#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) - -/* Size of one AXI Region (not Region 0) */ -#define AXI_REGION_SIZE BIT(20) -/* Size of Region 0, equal to sum of sizes of other regions */ -#define AXI_REGION_0_SIZE (32 * (0x1 << 20)) -#define OB_REG_SIZE_SHIFT 5 -#define IB_ROOT_PORT_REG_SIZE_SHIFT 3 -#define AXI_WRAPPER_IO_WRITE 0x6 -#define AXI_WRAPPER_MEM_WRITE 0x2 -#define AXI_WRAPPER_TYPE0_CFG 0xa -#define AXI_WRAPPER_TYPE1_CFG 0xb -#define AXI_WRAPPER_NOR_MSG 0xc - -#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 -#define MIN_AXI_ADDR_BITS_PASSED 8 -#define PCIE_RC_SEND_PME_OFF 0x11960 -#define ROCKCHIP_VENDOR_ID 0x1d87 -#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20) -#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15) -#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12) -#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0) -#define PCIE_ECAM_ADDR(bus, dev, func, reg) \ - (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \ - PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg)) -#define PCIE_LINK_IS_L2(x) \ - (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) -#define PCIE_LINK_UP(x) \ - (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) -#define PCIE_LINK_IS_GEN2(x) \ - (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G) - -#define RC_REGION_0_ADDR_TRANS_H 0x00000000 -#define RC_REGION_0_ADDR_TRANS_L 0x00000000 -#define RC_REGION_0_PASS_BITS (25 - 1) -#define RC_REGION_0_TYPE_MASK GENMASK(3, 0) -#define MAX_AXI_WRAPPER_REGION_NUM 33 - -#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0 -#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1 -#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2 -#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3 -#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4 -#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27 -#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5) -#define ROCKCHIP_PCIE_MSG_ROUTING(route) \ - (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK) -#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8) -#define ROCKCHIP_PCIE_MSG_CODE(code) \ - (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK) -#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16) - -#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4 -#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19) -#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90 -#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 -#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) -#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20 -#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20) -#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16) -#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) -#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 -#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 -#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) -#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ - (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) -#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ - (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ - (((devfn) << 12) & \ - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ - (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) -#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) -#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ - (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) -#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \ - (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ - (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020) - -#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \ - (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008) -#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \ - (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008) -#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ - (GENMASK(4, 0) << ((b) * 8)) -#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ - (((a) << ((b) * 8)) & \ - ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) -#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ - (GENMASK(7, 5) << ((b) * 8)) -#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ - (((c) << ((b) * 8 + 5)) & \ - ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) - -struct rockchip_pcie { - void __iomem *reg_base; /* DT axi-base */ - void __iomem *apb_base; /* DT apb-base */ - bool legacy_phy; - struct phy *phys[MAX_LANE_NUM]; - struct reset_control *core_rst; - struct reset_control *mgmt_rst; - struct reset_control *mgmt_sticky_rst; - struct reset_control *pipe_rst; - struct reset_control *pm_rst; - struct reset_control *aclk_rst; - struct reset_control *pclk_rst; - struct clk *aclk_pcie; - struct clk *aclk_perf_pcie; - struct clk *hclk_pcie; - struct clk *clk_pcie_pm; - struct regulator *vpcie12v; /* 12V power supply */ - struct regulator *vpcie3v3; /* 3.3V power supply */ - struct regulator *vpcie1v8; /* 1.8V power supply */ - struct regulator *vpcie0v9; /* 0.9V power supply */ - struct gpio_desc *ep_gpio; - u32 lanes; - u8 lanes_map; - u8 root_bus_nr; - int link_gen; - struct device *dev; - struct irq_domain *irq_domain; - int offset; - struct pci_bus *root_bus; - struct resource *io; - phys_addr_t io_bus_addr; - u32 io_size; - void __iomem *msg_region; - u32 mem_size; - phys_addr_t msg_bus_addr; - phys_addr_t mem_bus_addr; - bool is_rc; - struct resource *mem_res; -}; - -static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) -{ - return readl(rockchip->apb_base + reg); -} - -static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val, - u32 reg) -{ - writel(val, rockchip->apb_base + reg); -} - -int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip); -int rockchip_pcie_init_port(struct rockchip_pcie *rockchip); -int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip); -void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip); -int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip); -void rockchip_pcie_disable_clocks(void *data); -void rockchip_pcie_cfg_configuration_accesses( - struct rockchip_pcie *rockchip, u32 type); - -#endif /* _PCIE_ROCKCHIP_H */ diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c deleted file mode 100644 index 21a208da3f59..000000000000 --- a/drivers/pci/host/pcie-tango.c +++ /dev/null @@ -1,341 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include - -#define MSI_MAX 256 - -#define SMP8759_MUX 0x48 -#define SMP8759_TEST_OUT 0x74 -#define SMP8759_DOORBELL 0x7c -#define SMP8759_STATUS 0x80 -#define SMP8759_ENABLE 0xa0 - -struct tango_pcie { - DECLARE_BITMAP(used_msi, MSI_MAX); - u64 msi_doorbell; - spinlock_t used_msi_lock; - void __iomem *base; - struct irq_domain *dom; -}; - -static void tango_msi_isr(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct tango_pcie *pcie = irq_desc_get_handler_data(desc); - unsigned long status, base, virq, idx, pos = 0; - - chained_irq_enter(chip, desc); - spin_lock(&pcie->used_msi_lock); - - while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) { - base = round_down(pos, 32); - status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8); - for_each_set_bit(idx, &status, 32) { - virq = irq_find_mapping(pcie->dom, base + idx); - generic_handle_irq(virq); - } - pos = base + 32; - } - - spin_unlock(&pcie->used_msi_lock); - chained_irq_exit(chip, desc); -} - -static void tango_ack(struct irq_data *d) -{ - struct tango_pcie *pcie = d->chip_data; - u32 offset = (d->hwirq / 32) * 4; - u32 bit = BIT(d->hwirq % 32); - - writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset); -} - -static void update_msi_enable(struct irq_data *d, bool unmask) -{ - unsigned long flags; - struct tango_pcie *pcie = d->chip_data; - u32 offset = (d->hwirq / 32) * 4; - u32 bit = BIT(d->hwirq % 32); - u32 val; - - spin_lock_irqsave(&pcie->used_msi_lock, flags); - val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset); - val = unmask ? val | bit : val & ~bit; - writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset); - spin_unlock_irqrestore(&pcie->used_msi_lock, flags); -} - -static void tango_mask(struct irq_data *d) -{ - update_msi_enable(d, false); -} - -static void tango_unmask(struct irq_data *d) -{ - update_msi_enable(d, true); -} - -static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask, - bool force) -{ - return -EINVAL; -} - -static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) -{ - struct tango_pcie *pcie = d->chip_data; - msg->address_lo = lower_32_bits(pcie->msi_doorbell); - msg->address_hi = upper_32_bits(pcie->msi_doorbell); - msg->data = d->hwirq; -} - -static struct irq_chip tango_chip = { - .irq_ack = tango_ack, - .irq_mask = tango_mask, - .irq_unmask = tango_unmask, - .irq_set_affinity = tango_set_affinity, - .irq_compose_msi_msg = tango_compose_msi_msg, -}; - -static void msi_ack(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void msi_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void msi_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip msi_chip = { - .name = "MSI", - .irq_ack = msi_ack, - .irq_mask = msi_mask, - .irq_unmask = msi_unmask, -}; - -static struct msi_domain_info msi_dom_info = { - .flags = MSI_FLAG_PCI_MSIX - | MSI_FLAG_USE_DEF_DOM_OPS - | MSI_FLAG_USE_DEF_CHIP_OPS, - .chip = &msi_chip, -}; - -static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq, - unsigned int nr_irqs, void *args) -{ - struct tango_pcie *pcie = dom->host_data; - unsigned long flags; - int pos; - - spin_lock_irqsave(&pcie->used_msi_lock, flags); - pos = find_first_zero_bit(pcie->used_msi, MSI_MAX); - if (pos >= MSI_MAX) { - spin_unlock_irqrestore(&pcie->used_msi_lock, flags); - return -ENOSPC; - } - __set_bit(pos, pcie->used_msi); - spin_unlock_irqrestore(&pcie->used_msi_lock, flags); - irq_domain_set_info(dom, virq, pos, &tango_chip, - pcie, handle_edge_irq, NULL, NULL); - - return 0; -} - -static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq, - unsigned int nr_irqs) -{ - unsigned long flags; - struct irq_data *d = irq_domain_get_irq_data(dom, virq); - struct tango_pcie *pcie = d->chip_data; - - spin_lock_irqsave(&pcie->used_msi_lock, flags); - __clear_bit(d->hwirq, pcie->used_msi); - spin_unlock_irqrestore(&pcie->used_msi_lock, flags); -} - -static const struct irq_domain_ops dom_ops = { - .alloc = tango_irq_domain_alloc, - .free = tango_irq_domain_free, -}; - -static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - struct pci_config_window *cfg = bus->sysdata; - struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); - int ret; - - /* Reads in configuration space outside devfn 0 return garbage */ - if (devfn != 0) - return PCIBIOS_FUNC_NOT_SUPPORTED; - - /* - * PCI config and MMIO accesses are muxed. Linux doesn't have a - * mutual exclusion mechanism for config vs. MMIO accesses, so - * concurrent accesses may cause corruption. - */ - writel_relaxed(1, pcie->base + SMP8759_MUX); - ret = pci_generic_config_read(bus, devfn, where, size, val); - writel_relaxed(0, pcie->base + SMP8759_MUX); - - return ret; -} - -static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - struct pci_config_window *cfg = bus->sysdata; - struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); - int ret; - - writel_relaxed(1, pcie->base + SMP8759_MUX); - ret = pci_generic_config_write(bus, devfn, where, size, val); - writel_relaxed(0, pcie->base + SMP8759_MUX); - - return ret; -} - -static struct pci_ecam_ops smp8759_ecam_ops = { - .bus_shift = 20, - .pci_ops = { - .map_bus = pci_ecam_map_bus, - .read = smp8759_config_read, - .write = smp8759_config_write, - } -}; - -static int tango_pcie_link_up(struct tango_pcie *pcie) -{ - void __iomem *test_out = pcie->base + SMP8759_TEST_OUT; - int i; - - writel_relaxed(16, test_out); - for (i = 0; i < 10; ++i) { - u32 ltssm_state = readl_relaxed(test_out) >> 8; - if ((ltssm_state & 0x1f) == 0xf) /* L0 */ - return 1; - usleep_range(3000, 4000); - } - - return 0; -} - -static int tango_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct tango_pcie *pcie; - struct resource *res; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); - struct irq_domain *msi_dom, *irq_dom; - struct of_pci_range_parser parser; - struct of_pci_range range; - int virq, offset; - - dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n"); - add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); - - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - pcie->base = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->base)) - return PTR_ERR(pcie->base); - - platform_set_drvdata(pdev, pcie); - - if (!tango_pcie_link_up(pcie)) - return -ENODEV; - - if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0) - return -ENOENT; - - if (of_pci_range_parser_one(&parser, &range) == NULL) - return -ENOENT; - - range.pci_addr += range.size; - pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL; - - for (offset = 0; offset < MSI_MAX / 8; offset += 4) - writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset); - - virq = platform_get_irq(pdev, 1); - if (virq <= 0) { - dev_err(dev, "Failed to map IRQ\n"); - return -ENXIO; - } - - irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie); - if (!irq_dom) { - dev_err(dev, "Failed to create IRQ domain\n"); - return -ENOMEM; - } - - msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom); - if (!msi_dom) { - dev_err(dev, "Failed to create MSI domain\n"); - irq_domain_remove(irq_dom); - return -ENOMEM; - } - - pcie->dom = irq_dom; - spin_lock_init(&pcie->used_msi_lock); - irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie); - - return pci_host_common_probe(pdev, &smp8759_ecam_ops); -} - -static const struct of_device_id tango_pcie_ids[] = { - { .compatible = "sigma,smp8759-pcie" }, - { }, -}; - -static struct platform_driver tango_pcie_driver = { - .probe = tango_pcie_probe, - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = tango_pcie_ids, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(tango_pcie_driver); - -/* - * The root complex advertises the wrong device class. - * Header Type 1 is for PCI-to-PCI bridges. - */ -static void tango_fixup_class(struct pci_dev *dev) -{ - dev->class = PCI_CLASS_BRIDGE_PCI << 8; -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class); - -/* - * The root complex exposes a "fake" BAR, which is used to filter - * bus-to-system accesses. Only accesses within the range defined by this - * BAR are forwarded to the host, others are ignored. - * - * By default, the DMA framework expects an identity mapping, and DRAM0 is - * mapped at 0x80000000. - */ -static void tango_fixup_bar(struct pci_dev *dev) -{ - dev->non_compliant_bars = true; - pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000); -} -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c deleted file mode 100644 index 6a4bbb5b3de0..000000000000 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ /dev/null @@ -1,917 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * PCIe host controller driver for NWL PCIe Bridge - * Based on pcie-xilinx.c, pci-tegra.c - * - * (C) Copyright 2014 - 2015, Xilinx, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* Bridge core config registers */ -#define BRCFG_PCIE_RX0 0x00000000 -#define BRCFG_INTERRUPT 0x00000010 -#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 - -/* Egress - Bridge translation registers */ -#define E_BREG_CAPABILITIES 0x00000200 -#define E_BREG_CONTROL 0x00000208 -#define E_BREG_BASE_LO 0x00000210 -#define E_BREG_BASE_HI 0x00000214 -#define E_ECAM_CAPABILITIES 0x00000220 -#define E_ECAM_CONTROL 0x00000228 -#define E_ECAM_BASE_LO 0x00000230 -#define E_ECAM_BASE_HI 0x00000234 - -/* Ingress - address translations */ -#define I_MSII_CAPABILITIES 0x00000300 -#define I_MSII_CONTROL 0x00000308 -#define I_MSII_BASE_LO 0x00000310 -#define I_MSII_BASE_HI 0x00000314 - -#define I_ISUB_CONTROL 0x000003E8 -#define SET_ISUB_CONTROL BIT(0) -/* Rxed msg fifo - Interrupt status registers */ -#define MSGF_MISC_STATUS 0x00000400 -#define MSGF_MISC_MASK 0x00000404 -#define MSGF_LEG_STATUS 0x00000420 -#define MSGF_LEG_MASK 0x00000424 -#define MSGF_MSI_STATUS_LO 0x00000440 -#define MSGF_MSI_STATUS_HI 0x00000444 -#define MSGF_MSI_MASK_LO 0x00000448 -#define MSGF_MSI_MASK_HI 0x0000044C - -/* Msg filter mask bits */ -#define CFG_ENABLE_PM_MSG_FWD BIT(1) -#define CFG_ENABLE_INT_MSG_FWD BIT(2) -#define CFG_ENABLE_ERR_MSG_FWD BIT(3) -#define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \ - CFG_ENABLE_INT_MSG_FWD | \ - CFG_ENABLE_ERR_MSG_FWD) - -/* Misc interrupt status mask bits */ -#define MSGF_MISC_SR_RXMSG_AVAIL BIT(0) -#define MSGF_MISC_SR_RXMSG_OVER BIT(1) -#define MSGF_MISC_SR_SLAVE_ERR BIT(4) -#define MSGF_MISC_SR_MASTER_ERR BIT(5) -#define MSGF_MISC_SR_I_ADDR_ERR BIT(6) -#define MSGF_MISC_SR_E_ADDR_ERR BIT(7) -#define MSGF_MISC_SR_FATAL_AER BIT(16) -#define MSGF_MISC_SR_NON_FATAL_AER BIT(17) -#define MSGF_MISC_SR_CORR_AER BIT(18) -#define MSGF_MISC_SR_UR_DETECT BIT(20) -#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) -#define MSGF_MISC_SR_FATAL_DEV BIT(23) -#define MSGF_MISC_SR_LINK_DOWN BIT(24) -#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) -#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) - -#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ - MSGF_MISC_SR_RXMSG_OVER | \ - MSGF_MISC_SR_SLAVE_ERR | \ - MSGF_MISC_SR_MASTER_ERR | \ - MSGF_MISC_SR_I_ADDR_ERR | \ - MSGF_MISC_SR_E_ADDR_ERR | \ - MSGF_MISC_SR_FATAL_AER | \ - MSGF_MISC_SR_NON_FATAL_AER | \ - MSGF_MISC_SR_CORR_AER | \ - MSGF_MISC_SR_UR_DETECT | \ - MSGF_MISC_SR_NON_FATAL_DEV | \ - MSGF_MISC_SR_FATAL_DEV | \ - MSGF_MISC_SR_LINK_DOWN | \ - MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ - MSGF_MSIC_SR_LINK_BWIDTH) - -/* Legacy interrupt status mask bits */ -#define MSGF_LEG_SR_INTA BIT(0) -#define MSGF_LEG_SR_INTB BIT(1) -#define MSGF_LEG_SR_INTC BIT(2) -#define MSGF_LEG_SR_INTD BIT(3) -#define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \ - MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) - -/* MSI interrupt status mask bits */ -#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) -#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) - -#define MSII_PRESENT BIT(0) -#define MSII_ENABLE BIT(0) -#define MSII_STATUS_ENABLE BIT(15) - -/* Bridge config interrupt mask */ -#define BRCFG_INTERRUPT_MASK BIT(0) -#define BREG_PRESENT BIT(0) -#define BREG_ENABLE BIT(0) -#define BREG_ENABLE_FORCE BIT(1) - -/* E_ECAM status mask bits */ -#define E_ECAM_PRESENT BIT(0) -#define E_ECAM_CR_ENABLE BIT(0) -#define E_ECAM_SIZE_LOC GENMASK(20, 16) -#define E_ECAM_SIZE_SHIFT 16 -#define ECAM_BUS_LOC_SHIFT 20 -#define ECAM_DEV_LOC_SHIFT 12 -#define NWL_ECAM_VALUE_DEFAULT 12 - -#define CFG_DMA_REG_BAR GENMASK(2, 0) - -#define INT_PCI_MSI_NR (2 * 32) - -/* Readin the PS_LINKUP */ -#define PS_LINKUP_OFFSET 0x00000238 -#define PCIE_PHY_LINKUP_BIT BIT(0) -#define PHY_RDY_LINKUP_BIT BIT(1) - -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_USLEEP_MIN 90000 -#define LINK_WAIT_USLEEP_MAX 100000 - -struct nwl_msi { /* MSI information */ - struct irq_domain *msi_domain; - unsigned long *bitmap; - struct irq_domain *dev_domain; - struct mutex lock; /* protect bitmap variable */ - int irq_msi0; - int irq_msi1; -}; - -struct nwl_pcie { - struct device *dev; - void __iomem *breg_base; - void __iomem *pcireg_base; - void __iomem *ecam_base; - phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ - phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ - phys_addr_t phys_ecam_base; /* Physical Configuration Base */ - u32 breg_size; - u32 pcie_reg_size; - u32 ecam_size; - int irq_intx; - int irq_misc; - u32 ecam_value; - u8 last_busno; - u8 root_busno; - struct nwl_msi msi; - struct irq_domain *legacy_irq_domain; - raw_spinlock_t leg_mask_lock; -}; - -static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) -{ - return readl(pcie->breg_base + off); -} - -static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off) -{ - writel(val, pcie->breg_base + off); -} - -static bool nwl_pcie_link_up(struct nwl_pcie *pcie) -{ - if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT) - return true; - return false; -} - -static bool nwl_phy_link_up(struct nwl_pcie *pcie) -{ - if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT) - return true; - return false; -} - -static int nwl_wait_for_link(struct nwl_pcie *pcie) -{ - struct device *dev = pcie->dev; - int retries; - - /* check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (nwl_phy_link_up(pcie)) - return 0; - usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); - } - - dev_err(dev, "PHY link never came up\n"); - return -ETIMEDOUT; -} - -static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) -{ - struct nwl_pcie *pcie = bus->sysdata; - - /* Check link before accessing downstream ports */ - if (bus->number != pcie->root_busno) { - if (!nwl_pcie_link_up(pcie)) - return false; - } - - /* Only one device down on each root port */ - if (bus->number == pcie->root_busno && devfn > 0) - return false; - - return true; -} - -/** - * nwl_pcie_map_bus - Get configuration base - * - * @bus: Bus structure of current bus - * @devfn: Device/function - * @where: Offset from base - * - * Return: Base address of the configuration space needed to be - * accessed. - */ -static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) -{ - struct nwl_pcie *pcie = bus->sysdata; - int relbus; - - if (!nwl_pcie_valid_device(bus, devfn)) - return NULL; - - relbus = (bus->number << ECAM_BUS_LOC_SHIFT) | - (devfn << ECAM_DEV_LOC_SHIFT); - - return pcie->ecam_base + relbus + where; -} - -/* PCIe operations */ -static struct pci_ops nwl_pcie_ops = { - .map_bus = nwl_pcie_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, -}; - -static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) -{ - struct nwl_pcie *pcie = data; - struct device *dev = pcie->dev; - u32 misc_stat; - - /* Checking for misc interrupts */ - misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & - MSGF_MISC_SR_MASKALL; - if (!misc_stat) - return IRQ_NONE; - - if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) - dev_err(dev, "Received Message FIFO Overflow\n"); - - if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) - dev_err(dev, "Slave error\n"); - - if (misc_stat & MSGF_MISC_SR_MASTER_ERR) - dev_err(dev, "Master error\n"); - - if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) - dev_err(dev, "In Misc Ingress address translation error\n"); - - if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) - dev_err(dev, "In Misc Egress address translation error\n"); - - if (misc_stat & MSGF_MISC_SR_FATAL_AER) - dev_err(dev, "Fatal Error in AER Capability\n"); - - if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) - dev_err(dev, "Non-Fatal Error in AER Capability\n"); - - if (misc_stat & MSGF_MISC_SR_CORR_AER) - dev_err(dev, "Correctable Error in AER Capability\n"); - - if (misc_stat & MSGF_MISC_SR_UR_DETECT) - dev_err(dev, "Unsupported request Detected\n"); - - if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) - dev_err(dev, "Non-Fatal Error Detected\n"); - - if (misc_stat & MSGF_MISC_SR_FATAL_DEV) - dev_err(dev, "Fatal Error Detected\n"); - - if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) - dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); - - if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) - dev_info(dev, "Link Bandwidth Management Status bit set\n"); - - /* Clear misc interrupt status */ - nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); - - return IRQ_HANDLED; -} - -static void nwl_pcie_leg_handler(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct nwl_pcie *pcie; - unsigned long status; - u32 bit; - u32 virq; - - chained_irq_enter(chip, desc); - pcie = irq_desc_get_handler_data(desc); - - while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & - MSGF_LEG_SR_MASKALL) != 0) { - for_each_set_bit(bit, &status, PCI_NUM_INTX) { - virq = irq_find_mapping(pcie->legacy_irq_domain, bit); - if (virq) - generic_handle_irq(virq); - } - } - - chained_irq_exit(chip, desc); -} - -static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg) -{ - struct nwl_msi *msi; - unsigned long status; - u32 bit; - u32 virq; - - msi = &pcie->msi; - - while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) { - for_each_set_bit(bit, &status, 32) { - nwl_bridge_writel(pcie, 1 << bit, status_reg); - virq = irq_find_mapping(msi->dev_domain, bit); - if (virq) - generic_handle_irq(virq); - } - } -} - -static void nwl_pcie_msi_handler_high(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); - - chained_irq_enter(chip, desc); - nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI); - chained_irq_exit(chip, desc); -} - -static void nwl_pcie_msi_handler_low(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); - - chained_irq_enter(chip, desc); - nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO); - chained_irq_exit(chip, desc); -} - -static void nwl_mask_leg_irq(struct irq_data *data) -{ - struct irq_desc *desc = irq_to_desc(data->irq); - struct nwl_pcie *pcie; - unsigned long flags; - u32 mask; - u32 val; - - pcie = irq_desc_get_chip_data(desc); - mask = 1 << (data->hwirq - 1); - raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); - val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); - nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); - raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); -} - -static void nwl_unmask_leg_irq(struct irq_data *data) -{ - struct irq_desc *desc = irq_to_desc(data->irq); - struct nwl_pcie *pcie; - unsigned long flags; - u32 mask; - u32 val; - - pcie = irq_desc_get_chip_data(desc); - mask = 1 << (data->hwirq - 1); - raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); - val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); - nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); - raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); -} - -static struct irq_chip nwl_leg_irq_chip = { - .name = "nwl_pcie:legacy", - .irq_enable = nwl_unmask_leg_irq, - .irq_disable = nwl_mask_leg_irq, - .irq_mask = nwl_mask_leg_irq, - .irq_unmask = nwl_unmask_leg_irq, -}; - -static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); - irq_set_chip_data(irq, domain->host_data); - irq_set_status_flags(irq, IRQ_LEVEL); - - return 0; -} - -static const struct irq_domain_ops legacy_domain_ops = { - .map = nwl_legacy_map, - .xlate = pci_irqd_intx_xlate, -}; - -#ifdef CONFIG_PCI_MSI -static struct irq_chip nwl_msi_irq_chip = { - .name = "nwl_pcie:msi", - .irq_enable = unmask_msi_irq, - .irq_disable = mask_msi_irq, - .irq_mask = mask_msi_irq, - .irq_unmask = unmask_msi_irq, - -}; - -static struct msi_domain_info nwl_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI), - .chip = &nwl_msi_irq_chip, -}; -#endif - -static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); - phys_addr_t msi_addr = pcie->phys_pcie_reg_base; - - msg->address_lo = lower_32_bits(msi_addr); - msg->address_hi = upper_32_bits(msi_addr); - msg->data = data->hwirq; -} - -static int nwl_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - -static struct irq_chip nwl_irq_chip = { - .name = "Xilinx MSI", - .irq_compose_msi_msg = nwl_compose_msi_msg, - .irq_set_affinity = nwl_msi_set_affinity, -}; - -static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) -{ - struct nwl_pcie *pcie = domain->host_data; - struct nwl_msi *msi = &pcie->msi; - int bit; - int i; - - mutex_lock(&msi->lock); - bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, - nr_irqs, 0); - if (bit >= INT_PCI_MSI_NR) { - mutex_unlock(&msi->lock); - return -ENOSPC; - } - - bitmap_set(msi->bitmap, bit, nr_irqs); - - for (i = 0; i < nr_irqs; i++) { - irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, - domain->host_data, handle_simple_irq, - NULL, NULL); - } - mutex_unlock(&msi->lock); - return 0; -} - -static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs) -{ - struct irq_data *data = irq_domain_get_irq_data(domain, virq); - struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); - struct nwl_msi *msi = &pcie->msi; - - mutex_lock(&msi->lock); - bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); - mutex_unlock(&msi->lock); -} - -static const struct irq_domain_ops dev_msi_domain_ops = { - .alloc = nwl_irq_domain_alloc, - .free = nwl_irq_domain_free, -}; - -static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) -{ -#ifdef CONFIG_PCI_MSI - struct device *dev = pcie->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); - struct nwl_msi *msi = &pcie->msi; - - msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, - &dev_msi_domain_ops, pcie); - if (!msi->dev_domain) { - dev_err(dev, "failed to create dev IRQ domain\n"); - return -ENOMEM; - } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &nwl_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { - dev_err(dev, "failed to create msi IRQ domain\n"); - irq_domain_remove(msi->dev_domain); - return -ENOMEM; - } -#endif - return 0; -} - -static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct device_node *node = dev->of_node; - struct device_node *legacy_intc_node; - - legacy_intc_node = of_get_next_child(node, NULL); - if (!legacy_intc_node) { - dev_err(dev, "No legacy intc node found\n"); - return -EINVAL; - } - - pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, - PCI_NUM_INTX, - &legacy_domain_ops, - pcie); - - if (!pcie->legacy_irq_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - raw_spin_lock_init(&pcie->leg_mask_lock); - nwl_pcie_init_msi_irq_domain(pcie); - return 0; -} - -static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); - struct nwl_msi *msi = &pcie->msi; - unsigned long base; - int ret; - int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long); - - mutex_init(&msi->lock); - - msi->bitmap = kzalloc(size, GFP_KERNEL); - if (!msi->bitmap) - return -ENOMEM; - - /* Get msi_1 IRQ number */ - msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); - if (msi->irq_msi1 < 0) { - dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1); - ret = -EINVAL; - goto err; - } - - irq_set_chained_handler_and_data(msi->irq_msi1, - nwl_pcie_msi_handler_high, pcie); - - /* Get msi_0 IRQ number */ - msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); - if (msi->irq_msi0 < 0) { - dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0); - ret = -EINVAL; - goto err; - } - - irq_set_chained_handler_and_data(msi->irq_msi0, - nwl_pcie_msi_handler_low, pcie); - - /* Check for msii_present bit */ - ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; - if (!ret) { - dev_err(dev, "MSI not present\n"); - ret = -EIO; - goto err; - } - - /* Enable MSII */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | - MSII_ENABLE, I_MSII_CONTROL); - - /* Enable MSII status */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | - MSII_STATUS_ENABLE, I_MSII_CONTROL); - - /* setup AFI/FPCI range */ - base = pcie->phys_pcie_reg_base; - nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO); - nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI); - - /* - * For high range MSI interrupts: disable, clear any pending, - * and enable - */ - nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI); - - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) & - MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI); - - nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI); - - /* - * For low range MSI interrupts: disable, clear any pending, - * and enable - */ - nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO); - - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) & - MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO); - - nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO); - - return 0; -err: - kfree(msi->bitmap); - msi->bitmap = NULL; - return ret; -} - -static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) -{ - struct device *dev = pcie->dev; - struct platform_device *pdev = to_platform_device(dev); - u32 breg_val, ecam_val, first_busno = 0; - int err; - - breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; - if (!breg_val) { - dev_err(dev, "BREG is not present\n"); - return breg_val; - } - - /* Write bridge_off to breg base */ - nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base), - E_BREG_BASE_LO); - nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base), - E_BREG_BASE_HI); - - /* Enable BREG */ - nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE, - E_BREG_CONTROL); - - /* Disable DMA channel registers */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) | - CFG_DMA_REG_BAR, BRCFG_PCIE_RX0); - - /* Enable Ingress subtractive decode translation */ - nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL); - - /* Enable msg filtering details */ - nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, - BRCFG_PCIE_RX_MSG_FILTER); - - err = nwl_wait_for_link(pcie); - if (err) - return err; - - ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; - if (!ecam_val) { - dev_err(dev, "ECAM is not present\n"); - return ecam_val; - } - - /* Enable ECAM */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | - E_ECAM_CR_ENABLE, E_ECAM_CONTROL); - - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | - (pcie->ecam_value << E_ECAM_SIZE_SHIFT), - E_ECAM_CONTROL); - - nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), - E_ECAM_BASE_LO); - nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base), - E_ECAM_BASE_HI); - - /* Get bus range */ - ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); - pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT; - /* Write primary, secondary and subordinate bus numbers */ - ecam_val = first_busno; - ecam_val |= (first_busno + 1) << 8; - ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT); - writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS)); - - if (nwl_pcie_link_up(pcie)) - dev_info(dev, "Link is UP\n"); - else - dev_info(dev, "Link is DOWN\n"); - - /* Get misc IRQ number */ - pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); - if (pcie->irq_misc < 0) { - dev_err(dev, "failed to get misc IRQ %d\n", - pcie->irq_misc); - return -EINVAL; - } - - err = devm_request_irq(dev, pcie->irq_misc, - nwl_pcie_misc_handler, IRQF_SHARED, - "nwl_pcie:misc", pcie); - if (err) { - dev_err(dev, "fail to register misc IRQ#%d\n", - pcie->irq_misc); - return err; - } - - /* Disable all misc interrupts */ - nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); - - /* Clear pending misc interrupts */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & - MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS); - - /* Enable all misc interrupts */ - nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); - - - /* Disable all legacy interrupts */ - nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); - - /* Clear pending legacy interrupts */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & - MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); - - /* Enable all legacy interrupts */ - nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); - - /* Enable the bridge config interrupt */ - nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) | - BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT); - - return 0; -} - -static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, - struct platform_device *pdev) -{ - struct device *dev = pcie->dev; - struct device_node *node = dev->of_node; - struct resource *res; - const char *type; - - /* Check for device type */ - type = of_get_property(node, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); - pcie->breg_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->breg_base)) - return PTR_ERR(pcie->breg_base); - pcie->phys_breg_base = res->start; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg"); - pcie->pcireg_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->pcireg_base)) - return PTR_ERR(pcie->pcireg_base); - pcie->phys_pcie_reg_base = res->start; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pcie->ecam_base)) - return PTR_ERR(pcie->ecam_base); - pcie->phys_ecam_base = res->start; - - /* Get intx IRQ number */ - pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); - if (pcie->irq_intx < 0) { - dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); - return pcie->irq_intx; - } - - irq_set_chained_handler_and_data(pcie->irq_intx, - nwl_pcie_leg_handler, pcie); - - return 0; -} - -static const struct of_device_id nwl_pcie_of_match[] = { - { .compatible = "xlnx,nwl-pcie-2.11", }, - {} -}; - -static int nwl_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct nwl_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; - struct pci_host_bridge *bridge; - int err; - resource_size_t iobase = 0; - LIST_HEAD(res); - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); - if (!bridge) - return -ENODEV; - - pcie = pci_host_bridge_priv(bridge); - - pcie->dev = dev; - pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; - - err = nwl_pcie_parse_dt(pcie, pdev); - if (err) { - dev_err(dev, "Parsing DT failed\n"); - return err; - } - - err = nwl_pcie_bridge_init(pcie); - if (err) { - dev_err(dev, "HW Initialization failed\n"); - return err; - } - - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, - &iobase); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - - err = devm_request_pci_bus_resources(dev, &res); - if (err) - goto error; - - err = nwl_pcie_init_irq_domain(pcie); - if (err) { - dev_err(dev, "Failed creating IRQ Domain\n"); - goto error; - } - - list_splice_init(&res, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = pcie; - bridge->busnr = pcie->root_busno; - bridge->ops = &nwl_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - err = nwl_pcie_enable_msi(pcie); - if (err < 0) { - dev_err(dev, "failed to enable MSI support: %d\n", err); - goto error; - } - } - - err = pci_scan_root_bus_bridge(bridge); - if (err) - goto error; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; - -error: - pci_free_resource_list(&res); - return err; -} - -static struct platform_driver nwl_pcie_driver = { - .driver = { - .name = "nwl-pcie", - .suppress_bind_attrs = true, - .of_match_table = nwl_pcie_of_match, - }, - .probe = nwl_pcie_probe, -}; -builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c deleted file mode 100644 index b110a3a814e3..000000000000 --- a/drivers/pci/host/pcie-xilinx.c +++ /dev/null @@ -1,702 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * PCIe host controller driver for Xilinx AXI PCIe Bridge - * - * Copyright (c) 2012 - 2014 Xilinx, Inc. - * - * Based on the Tegra PCIe driver - * - * Bits taken from Synopsys DesignWare Host controller driver and - * ARM PCI Host generic driver. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../pci.h" - -/* Register definitions */ -#define XILINX_PCIE_REG_BIR 0x00000130 -#define XILINX_PCIE_REG_IDR 0x00000138 -#define XILINX_PCIE_REG_IMR 0x0000013c -#define XILINX_PCIE_REG_PSCR 0x00000144 -#define XILINX_PCIE_REG_RPSC 0x00000148 -#define XILINX_PCIE_REG_MSIBASE1 0x0000014c -#define XILINX_PCIE_REG_MSIBASE2 0x00000150 -#define XILINX_PCIE_REG_RPEFR 0x00000154 -#define XILINX_PCIE_REG_RPIFR1 0x00000158 -#define XILINX_PCIE_REG_RPIFR2 0x0000015c - -/* Interrupt registers definitions */ -#define XILINX_PCIE_INTR_LINK_DOWN BIT(0) -#define XILINX_PCIE_INTR_ECRC_ERR BIT(1) -#define XILINX_PCIE_INTR_STR_ERR BIT(2) -#define XILINX_PCIE_INTR_HOT_RESET BIT(3) -#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8) -#define XILINX_PCIE_INTR_CORRECTABLE BIT(9) -#define XILINX_PCIE_INTR_NONFATAL BIT(10) -#define XILINX_PCIE_INTR_FATAL BIT(11) -#define XILINX_PCIE_INTR_INTX BIT(16) -#define XILINX_PCIE_INTR_MSI BIT(17) -#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20) -#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21) -#define XILINX_PCIE_INTR_SLV_COMPL BIT(22) -#define XILINX_PCIE_INTR_SLV_ERRP BIT(23) -#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24) -#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25) -#define XILINX_PCIE_INTR_MST_DECERR BIT(26) -#define XILINX_PCIE_INTR_MST_SLVERR BIT(27) -#define XILINX_PCIE_INTR_MST_ERRP BIT(28) -#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED -#define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D -#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF - -/* Root Port Error FIFO Read Register definitions */ -#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18) -#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0) -#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF - -/* Root Port Interrupt FIFO Read Register 1 definitions */ -#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31) -#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30) -#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27) -#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF -#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27 - -/* Bridge Info Register definitions */ -#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16) -#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16 - -/* Root Port Interrupt FIFO Read Register 2 definitions */ -#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0) - -/* Root Port Status/control Register definitions */ -#define XILINX_PCIE_REG_RPSC_BEN BIT(0) - -/* Phy Status/Control Register definitions */ -#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) - -/* ECAM definitions */ -#define ECAM_BUS_NUM_SHIFT 20 -#define ECAM_DEV_NUM_SHIFT 12 - -/* Number of MSI IRQs */ -#define XILINX_NUM_MSI_IRQS 128 - -/** - * struct xilinx_pcie_port - PCIe port information - * @reg_base: IO Mapped Register Base - * @irq: Interrupt number - * @msi_pages: MSI pages - * @root_busno: Root Bus number - * @dev: Device pointer - * @msi_domain: MSI IRQ domain pointer - * @leg_domain: Legacy IRQ domain pointer - * @resources: Bus Resources - */ -struct xilinx_pcie_port { - void __iomem *reg_base; - u32 irq; - unsigned long msi_pages; - u8 root_busno; - struct device *dev; - struct irq_domain *msi_domain; - struct irq_domain *leg_domain; - struct list_head resources; -}; - -static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS); - -static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) -{ - return readl(port->reg_base + reg); -} - -static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg) -{ - writel(val, port->reg_base + reg); -} - -static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port) -{ - return (pcie_read(port, XILINX_PCIE_REG_PSCR) & - XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; -} - -/** - * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts - * @port: PCIe port information - */ -static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) -{ - struct device *dev = port->dev; - unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); - - if (val & XILINX_PCIE_RPEFR_ERR_VALID) { - dev_dbg(dev, "Requester ID %lu\n", - val & XILINX_PCIE_RPEFR_REQ_ID); - pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, - XILINX_PCIE_REG_RPEFR); - } -} - -/** - * xilinx_pcie_valid_device - Check if a valid device is present on bus - * @bus: PCI Bus structure - * @devfn: device/function - * - * Return: 'true' on success and 'false' if invalid device is found - */ -static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) -{ - struct xilinx_pcie_port *port = bus->sysdata; - - /* Check if link is up when trying to access downstream ports */ - if (bus->number != port->root_busno) - if (!xilinx_pcie_link_up(port)) - return false; - - /* Only one device down on each root port */ - if (bus->number == port->root_busno && devfn > 0) - return false; - - return true; -} - -/** - * xilinx_pcie_map_bus - Get configuration base - * @bus: PCI Bus structure - * @devfn: Device/function - * @where: Offset from base - * - * Return: Base address of the configuration space needed to be - * accessed. - */ -static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) -{ - struct xilinx_pcie_port *port = bus->sysdata; - int relbus; - - if (!xilinx_pcie_valid_device(bus, devfn)) - return NULL; - - relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | - (devfn << ECAM_DEV_NUM_SHIFT); - - return port->reg_base + relbus + where; -} - -/* PCIe operations */ -static struct pci_ops xilinx_pcie_ops = { - .map_bus = xilinx_pcie_map_bus, - .read = pci_generic_config_read, - .write = pci_generic_config_write, -}; - -/* MSI functions */ - -/** - * xilinx_pcie_destroy_msi - Free MSI number - * @irq: IRQ to be freed - */ -static void xilinx_pcie_destroy_msi(unsigned int irq) -{ - struct msi_desc *msi; - struct xilinx_pcie_port *port; - struct irq_data *d = irq_get_irq_data(irq); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - - if (!test_bit(hwirq, msi_irq_in_use)) { - msi = irq_get_msi_desc(irq); - port = msi_desc_to_pci_sysdata(msi); - dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); - } else { - clear_bit(hwirq, msi_irq_in_use); - } -} - -/** - * xilinx_pcie_assign_msi - Allocate MSI number - * - * Return: A valid IRQ on success and error value on failure. - */ -static int xilinx_pcie_assign_msi(void) -{ - int pos; - - pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS); - if (pos < XILINX_NUM_MSI_IRQS) - set_bit(pos, msi_irq_in_use); - else - return -ENOSPC; - - return pos; -} - -/** - * xilinx_msi_teardown_irq - Destroy the MSI - * @chip: MSI Chip descriptor - * @irq: MSI IRQ to destroy - */ -static void xilinx_msi_teardown_irq(struct msi_controller *chip, - unsigned int irq) -{ - xilinx_pcie_destroy_msi(irq); - irq_dispose_mapping(irq); -} - -/** - * xilinx_pcie_msi_setup_irq - Setup MSI request - * @chip: MSI chip pointer - * @pdev: PCIe device pointer - * @desc: MSI descriptor pointer - * - * Return: '0' on success and error value on failure - */ -static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) -{ - struct xilinx_pcie_port *port = pdev->bus->sysdata; - unsigned int irq; - int hwirq; - struct msi_msg msg; - phys_addr_t msg_addr; - - hwirq = xilinx_pcie_assign_msi(); - if (hwirq < 0) - return hwirq; - - irq = irq_create_mapping(port->msi_domain, hwirq); - if (!irq) - return -EINVAL; - - irq_set_msi_desc(irq, desc); - - msg_addr = virt_to_phys((void *)port->msi_pages); - - msg.address_hi = 0; - msg.address_lo = msg_addr; - msg.data = irq; - - pci_write_msi_msg(irq, &msg); - - return 0; -} - -/* MSI Chip Descriptor */ -static struct msi_controller xilinx_pcie_msi_chip = { - .setup_irq = xilinx_pcie_msi_setup_irq, - .teardown_irq = xilinx_msi_teardown_irq, -}; - -/* HW Interrupt Chip Descriptor */ -static struct irq_chip xilinx_msi_irq_chip = { - .name = "Xilinx PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -/** - * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid - * @domain: IRQ domain - * @irq: Virtual IRQ number - * @hwirq: HW interrupt number - * - * Return: Always returns 0. - */ -static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -/* IRQ Domain operations */ -static const struct irq_domain_ops msi_domain_ops = { - .map = xilinx_pcie_msi_map, -}; - -/** - * xilinx_pcie_enable_msi - Enable MSI support - * @port: PCIe port information - */ -static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) -{ - phys_addr_t msg_addr; - - port->msi_pages = __get_free_pages(GFP_KERNEL, 0); - msg_addr = virt_to_phys((void *)port->msi_pages); - pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); - pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); -} - -/* INTx Functions */ - -/** - * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid - * @domain: IRQ domain - * @irq: Virtual IRQ number - * @hwirq: HW interrupt number - * - * Return: Always returns 0. - */ -static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); - - return 0; -} - -/* INTx IRQ Domain operations */ -static const struct irq_domain_ops intx_domain_ops = { - .map = xilinx_pcie_intx_map, - .xlate = pci_irqd_intx_xlate, -}; - -/* PCIe HW Functions */ - -/** - * xilinx_pcie_intr_handler - Interrupt Service Handler - * @irq: IRQ number - * @data: PCIe port information - * - * Return: IRQ_HANDLED on success and IRQ_NONE on failure - */ -static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) -{ - struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; - struct device *dev = port->dev; - u32 val, mask, status; - - /* Read interrupt decode and mask registers */ - val = pcie_read(port, XILINX_PCIE_REG_IDR); - mask = pcie_read(port, XILINX_PCIE_REG_IMR); - - status = val & mask; - if (!status) - return IRQ_NONE; - - if (status & XILINX_PCIE_INTR_LINK_DOWN) - dev_warn(dev, "Link Down\n"); - - if (status & XILINX_PCIE_INTR_ECRC_ERR) - dev_warn(dev, "ECRC failed\n"); - - if (status & XILINX_PCIE_INTR_STR_ERR) - dev_warn(dev, "Streaming error\n"); - - if (status & XILINX_PCIE_INTR_HOT_RESET) - dev_info(dev, "Hot reset\n"); - - if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) - dev_warn(dev, "ECAM access timeout\n"); - - if (status & XILINX_PCIE_INTR_CORRECTABLE) { - dev_warn(dev, "Correctable error message\n"); - xilinx_pcie_clear_err_interrupts(port); - } - - if (status & XILINX_PCIE_INTR_NONFATAL) { - dev_warn(dev, "Non fatal error message\n"); - xilinx_pcie_clear_err_interrupts(port); - } - - if (status & XILINX_PCIE_INTR_FATAL) { - dev_warn(dev, "Fatal error message\n"); - xilinx_pcie_clear_err_interrupts(port); - } - - if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { - val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); - - /* Check whether interrupt valid */ - if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { - dev_warn(dev, "RP Intr FIFO1 read error\n"); - goto error; - } - - /* Decode the IRQ number */ - if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { - val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & - XILINX_PCIE_RPIFR2_MSG_DATA; - } else { - val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> - XILINX_PCIE_RPIFR1_INTR_SHIFT; - val = irq_find_mapping(port->leg_domain, val); - } - - /* Clear interrupt FIFO register 1 */ - pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, - XILINX_PCIE_REG_RPIFR1); - - /* Handle the interrupt */ - if (IS_ENABLED(CONFIG_PCI_MSI) || - !(val & XILINX_PCIE_RPIFR1_MSI_INTR)) - generic_handle_irq(val); - } - - if (status & XILINX_PCIE_INTR_SLV_UNSUPP) - dev_warn(dev, "Slave unsupported request\n"); - - if (status & XILINX_PCIE_INTR_SLV_UNEXP) - dev_warn(dev, "Slave unexpected completion\n"); - - if (status & XILINX_PCIE_INTR_SLV_COMPL) - dev_warn(dev, "Slave completion timeout\n"); - - if (status & XILINX_PCIE_INTR_SLV_ERRP) - dev_warn(dev, "Slave Error Poison\n"); - - if (status & XILINX_PCIE_INTR_SLV_CMPABT) - dev_warn(dev, "Slave Completer Abort\n"); - - if (status & XILINX_PCIE_INTR_SLV_ILLBUR) - dev_warn(dev, "Slave Illegal Burst\n"); - - if (status & XILINX_PCIE_INTR_MST_DECERR) - dev_warn(dev, "Master decode error\n"); - - if (status & XILINX_PCIE_INTR_MST_SLVERR) - dev_warn(dev, "Master slave error\n"); - - if (status & XILINX_PCIE_INTR_MST_ERRP) - dev_warn(dev, "Master error poison\n"); - -error: - /* Clear the Interrupt Decode register */ - pcie_write(port, status, XILINX_PCIE_REG_IDR); - - return IRQ_HANDLED; -} - -/** - * xilinx_pcie_init_irq_domain - Initialize IRQ domain - * @port: PCIe port information - * - * Return: '0' on success and error value on failure - */ -static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) -{ - struct device *dev = port->dev; - struct device_node *node = dev->of_node; - struct device_node *pcie_intc_node; - - /* Setup INTx */ - pcie_intc_node = of_get_next_child(node, NULL); - if (!pcie_intc_node) { - dev_err(dev, "No PCIe Intc node found\n"); - return -ENODEV; - } - - port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, - port); - if (!port->leg_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; - } - - /* Setup MSI */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - port->msi_domain = irq_domain_add_linear(node, - XILINX_NUM_MSI_IRQS, - &msi_domain_ops, - &xilinx_pcie_msi_chip); - if (!port->msi_domain) { - dev_err(dev, "Failed to get a MSI IRQ domain\n"); - return -ENODEV; - } - - xilinx_pcie_enable_msi(port); - } - - return 0; -} - -/** - * xilinx_pcie_init_port - Initialize hardware - * @port: PCIe port information - */ -static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) -{ - struct device *dev = port->dev; - - if (xilinx_pcie_link_up(port)) - dev_info(dev, "PCIe Link is UP\n"); - else - dev_info(dev, "PCIe Link is DOWN\n"); - - /* Disable all interrupts */ - pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, - XILINX_PCIE_REG_IMR); - - /* Clear pending interrupts */ - pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) & - XILINX_PCIE_IMR_ALL_MASK, - XILINX_PCIE_REG_IDR); - - /* Enable all interrupts we handle */ - pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); - - /* Enable the Bridge enable bit */ - pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | - XILINX_PCIE_REG_RPSC_BEN, - XILINX_PCIE_REG_RPSC); -} - -/** - * xilinx_pcie_parse_dt - Parse Device tree - * @port: PCIe port information - * - * Return: '0' on success and error value on failure - */ -static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) -{ - struct device *dev = port->dev; - struct device_node *node = dev->of_node; - struct resource regs; - const char *type; - int err; - - type = of_get_property(node, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - - err = of_address_to_resource(node, 0, ®s); - if (err) { - dev_err(dev, "missing \"reg\" property\n"); - return err; - } - - port->reg_base = devm_pci_remap_cfg_resource(dev, ®s); - if (IS_ERR(port->reg_base)) - return PTR_ERR(port->reg_base); - - port->irq = irq_of_parse_and_map(node, 0); - err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, - IRQF_SHARED | IRQF_NO_THREAD, - "xilinx-pcie", port); - if (err) { - dev_err(dev, "unable to request irq %d\n", port->irq); - return err; - } - - return 0; -} - -/** - * xilinx_pcie_probe - Probe function - * @pdev: Platform device pointer - * - * Return: '0' on success and error value on failure - */ -static int xilinx_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct xilinx_pcie_port *port; - struct pci_bus *bus, *child; - struct pci_host_bridge *bridge; - int err; - resource_size_t iobase = 0; - LIST_HEAD(res); - - if (!dev->of_node) - return -ENODEV; - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); - if (!bridge) - return -ENODEV; - - port = pci_host_bridge_priv(bridge); - - port->dev = dev; - - err = xilinx_pcie_parse_dt(port); - if (err) { - dev_err(dev, "Parsing DT failed\n"); - return err; - } - - xilinx_pcie_init_port(port); - - err = xilinx_pcie_init_irq_domain(port); - if (err) { - dev_err(dev, "Failed creating IRQ Domain\n"); - return err; - } - - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, - &iobase); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - - err = devm_request_pci_bus_resources(dev, &res); - if (err) - goto error; - - - list_splice_init(&res, &bridge->windows); - bridge->dev.parent = dev; - bridge->sysdata = port; - bridge->busnr = 0; - bridge->ops = &xilinx_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - -#ifdef CONFIG_PCI_MSI - xilinx_pcie_msi_chip.dev = dev; - bridge->msi = &xilinx_pcie_msi_chip; -#endif - err = pci_scan_root_bus_bridge(bridge); - if (err < 0) - goto error; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; - -error: - pci_free_resource_list(&res); - return err; -} - -static const struct of_device_id xilinx_pcie_of_match[] = { - { .compatible = "xlnx,axi-pcie-host-1.00.a", }, - {} -}; - -static struct platform_driver xilinx_pcie_driver = { - .driver = { - .name = "xilinx-pcie", - .of_match_table = xilinx_pcie_of_match, - .suppress_bind_attrs = true, - }, - .probe = xilinx_pcie_probe, -}; -builtin_platform_driver(xilinx_pcie_driver); diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c deleted file mode 100644 index 942b64fc7f1f..000000000000 --- a/drivers/pci/host/vmd.c +++ /dev/null @@ -1,870 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Volume Management Device driver - * Copyright (c) 2015, Intel Corporation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define VMD_CFGBAR 0 -#define VMD_MEMBAR1 2 -#define VMD_MEMBAR2 4 - -#define PCI_REG_VMCAP 0x40 -#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) -#define PCI_REG_VMCONFIG 0x44 -#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) -#define PCI_REG_VMLOCK 0x70 -#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) - -enum vmd_features { - /* - * Device may contain registers which hint the physical location of the - * membars, in order to allow proper address translation during - * resource assignment to enable guest virtualization - */ - VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), - - /* - * Device may provide root port configuration information which limits - * bus numbering - */ - VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), -}; - -/* - * Lock for manipulating VMD IRQ lists. - */ -static DEFINE_RAW_SPINLOCK(list_lock); - -/** - * struct vmd_irq - private data to map driver IRQ to the VMD shared vector - * @node: list item for parent traversal. - * @irq: back pointer to parent. - * @enabled: true if driver enabled IRQ - * @virq: the virtual IRQ value provided to the requesting driver. - * - * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to - * a VMD IRQ using this structure. - */ -struct vmd_irq { - struct list_head node; - struct vmd_irq_list *irq; - bool enabled; - unsigned int virq; -}; - -/** - * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector - * @irq_list: the list of irq's the VMD one demuxes to. - * @srcu: SRCU struct for local synchronization. - * @count: number of child IRQs assigned to this vector; used to track - * sharing. - */ -struct vmd_irq_list { - struct list_head irq_list; - struct srcu_struct srcu; - unsigned int count; -}; - -struct vmd_dev { - struct pci_dev *dev; - - spinlock_t cfg_lock; - char __iomem *cfgbar; - - int msix_count; - struct vmd_irq_list *irqs; - - struct pci_sysdata sysdata; - struct resource resources[3]; - struct irq_domain *irq_domain; - struct pci_bus *bus; - -#ifdef CONFIG_X86_DEV_DMA_OPS - struct dma_map_ops dma_ops; - struct dma_domain dma_domain; -#endif -}; - -static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) -{ - return container_of(bus->sysdata, struct vmd_dev, sysdata); -} - -static inline unsigned int index_from_irqs(struct vmd_dev *vmd, - struct vmd_irq_list *irqs) -{ - return irqs - vmd->irqs; -} - -/* - * Drivers managing a device in a VMD domain allocate their own IRQs as before, - * but the MSI entry for the hardware it's driving will be programmed with a - * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its - * domain into one of its own, and the VMD driver de-muxes these for the - * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations - * and irq_chip to set this up. - */ -static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -{ - struct vmd_irq *vmdirq = data->chip_data; - struct vmd_irq_list *irq = vmdirq->irq; - struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); - - msg->address_hi = MSI_ADDR_BASE_HI; - msg->address_lo = MSI_ADDR_BASE_LO | - MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); - msg->data = 0; -} - -/* - * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. - */ -static void vmd_irq_enable(struct irq_data *data) -{ - struct vmd_irq *vmdirq = data->chip_data; - unsigned long flags; - - raw_spin_lock_irqsave(&list_lock, flags); - WARN_ON(vmdirq->enabled); - list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); - vmdirq->enabled = true; - raw_spin_unlock_irqrestore(&list_lock, flags); - - data->chip->irq_unmask(data); -} - -static void vmd_irq_disable(struct irq_data *data) -{ - struct vmd_irq *vmdirq = data->chip_data; - unsigned long flags; - - data->chip->irq_mask(data); - - raw_spin_lock_irqsave(&list_lock, flags); - if (vmdirq->enabled) { - list_del_rcu(&vmdirq->node); - vmdirq->enabled = false; - } - raw_spin_unlock_irqrestore(&list_lock, flags); -} - -/* - * XXX: Stubbed until we develop acceptable way to not create conflicts with - * other devices sharing the same vector. - */ -static int vmd_irq_set_affinity(struct irq_data *data, - const struct cpumask *dest, bool force) -{ - return -EINVAL; -} - -static struct irq_chip vmd_msi_controller = { - .name = "VMD-MSI", - .irq_enable = vmd_irq_enable, - .irq_disable = vmd_irq_disable, - .irq_compose_msi_msg = vmd_compose_msi_msg, - .irq_set_affinity = vmd_irq_set_affinity, -}; - -static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return 0; -} - -/* - * XXX: We can be even smarter selecting the best IRQ once we solve the - * affinity problem. - */ -static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) -{ - int i, best = 1; - unsigned long flags; - - if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1) - return &vmd->irqs[0]; - - raw_spin_lock_irqsave(&list_lock, flags); - for (i = 1; i < vmd->msix_count; i++) - if (vmd->irqs[i].count < vmd->irqs[best].count) - best = i; - vmd->irqs[best].count++; - raw_spin_unlock_irqrestore(&list_lock, flags); - - return &vmd->irqs[best]; -} - -static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, - unsigned int virq, irq_hw_number_t hwirq, - msi_alloc_info_t *arg) -{ - struct msi_desc *desc = arg->desc; - struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); - struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); - unsigned int index, vector; - - if (!vmdirq) - return -ENOMEM; - - INIT_LIST_HEAD(&vmdirq->node); - vmdirq->irq = vmd_next_irq(vmd, desc); - vmdirq->virq = virq; - index = index_from_irqs(vmd, vmdirq->irq); - vector = pci_irq_vector(vmd->dev, index); - - irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, - handle_untracked_irq, vmd, NULL); - return 0; -} - -static void vmd_msi_free(struct irq_domain *domain, - struct msi_domain_info *info, unsigned int virq) -{ - struct vmd_irq *vmdirq = irq_get_chip_data(virq); - unsigned long flags; - - synchronize_srcu(&vmdirq->irq->srcu); - - /* XXX: Potential optimization to rebalance */ - raw_spin_lock_irqsave(&list_lock, flags); - vmdirq->irq->count--; - raw_spin_unlock_irqrestore(&list_lock, flags); - - kfree(vmdirq); -} - -static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *arg) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vmd_dev *vmd = vmd_from_bus(pdev->bus); - - if (nvec > vmd->msix_count) - return vmd->msix_count; - - memset(arg, 0, sizeof(*arg)); - return 0; -} - -static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) -{ - arg->desc = desc; -} - -static struct msi_domain_ops vmd_msi_domain_ops = { - .get_hwirq = vmd_get_hwirq, - .msi_init = vmd_msi_init, - .msi_free = vmd_msi_free, - .msi_prepare = vmd_msi_prepare, - .set_desc = vmd_set_desc, -}; - -static struct msi_domain_info vmd_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX, - .ops = &vmd_msi_domain_ops, - .chip = &vmd_msi_controller, -}; - -#ifdef CONFIG_X86_DEV_DMA_OPS -/* - * VMD replaces the requester ID with its own. DMA mappings for devices in a - * VMD domain need to be mapped for the VMD, not the device requiring - * the mapping. - */ -static struct device *to_vmd_dev(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vmd_dev *vmd = vmd_from_bus(pdev->bus); - - return &vmd->dev->dev; -} - -static const struct dma_map_ops *vmd_dma_ops(struct device *dev) -{ - return get_dma_ops(to_vmd_dev(dev)); -} - -static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, - gfp_t flag, unsigned long attrs) -{ - return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, - attrs); -} - -static void vmd_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t addr, unsigned long attrs) -{ - return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, - attrs); -} - -static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t addr, size_t size, - unsigned long attrs) -{ - return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, - size, attrs); -} - -static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t addr, size_t size, - unsigned long attrs) -{ - return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, - addr, size, attrs); -} - -static dma_addr_t vmd_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, - dir, attrs); -} - -static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); -} - -static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs) -{ - return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); -} - -static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs) -{ - vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); -} - -static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); -} - -static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, - dir); -} - -static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); -} - -static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); -} - -static int vmd_mapping_error(struct device *dev, dma_addr_t addr) -{ - return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); -} - -static int vmd_dma_supported(struct device *dev, u64 mask) -{ - return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); -} - -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK -static u64 vmd_get_required_mask(struct device *dev) -{ - return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); -} -#endif - -static void vmd_teardown_dma_ops(struct vmd_dev *vmd) -{ - struct dma_domain *domain = &vmd->dma_domain; - - if (get_dma_ops(&vmd->dev->dev)) - del_dma_domain(domain); -} - -#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ - do { \ - if (source->fn) \ - dest->fn = vmd_##fn; \ - } while (0) - -static void vmd_setup_dma_ops(struct vmd_dev *vmd) -{ - const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); - struct dma_map_ops *dest = &vmd->dma_ops; - struct dma_domain *domain = &vmd->dma_domain; - - domain->domain_nr = vmd->sysdata.domain; - domain->dma_ops = dest; - - if (!source) - return; - ASSIGN_VMD_DMA_OPS(source, dest, alloc); - ASSIGN_VMD_DMA_OPS(source, dest, free); - ASSIGN_VMD_DMA_OPS(source, dest, mmap); - ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); - ASSIGN_VMD_DMA_OPS(source, dest, map_page); - ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); - ASSIGN_VMD_DMA_OPS(source, dest, map_sg); - ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); - ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); - ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); - ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); - ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); - ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); - ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK - ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); -#endif - add_dma_domain(domain); -} -#undef ASSIGN_VMD_DMA_OPS -#else -static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} -static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} -#endif - -static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, - unsigned int devfn, int reg, int len) -{ - char __iomem *addr = vmd->cfgbar + - (bus->number << 20) + (devfn << 12) + reg; - - if ((addr - vmd->cfgbar) + len >= - resource_size(&vmd->dev->resource[VMD_CFGBAR])) - return NULL; - - return addr; -} - -/* - * CPU may deadlock if config space is not serialized on some versions of this - * hardware, so all config space access is done under a spinlock. - */ -static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, - int len, u32 *value) -{ - struct vmd_dev *vmd = vmd_from_bus(bus); - char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); - unsigned long flags; - int ret = 0; - - if (!addr) - return -EFAULT; - - spin_lock_irqsave(&vmd->cfg_lock, flags); - switch (len) { - case 1: - *value = readb(addr); - break; - case 2: - *value = readw(addr); - break; - case 4: - *value = readl(addr); - break; - default: - ret = -EINVAL; - break; - } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); - return ret; -} - -/* - * VMD h/w converts non-posted config writes to posted memory writes. The - * read-back in this function forces the completion so it returns only after - * the config space was written, as expected. - */ -static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, - int len, u32 value) -{ - struct vmd_dev *vmd = vmd_from_bus(bus); - char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); - unsigned long flags; - int ret = 0; - - if (!addr) - return -EFAULT; - - spin_lock_irqsave(&vmd->cfg_lock, flags); - switch (len) { - case 1: - writeb(value, addr); - readb(addr); - break; - case 2: - writew(value, addr); - readw(addr); - break; - case 4: - writel(value, addr); - readl(addr); - break; - default: - ret = -EINVAL; - break; - } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); - return ret; -} - -static struct pci_ops vmd_ops = { - .read = vmd_pci_read, - .write = vmd_pci_write, -}; - -static void vmd_attach_resources(struct vmd_dev *vmd) -{ - vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; - vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; -} - -static void vmd_detach_resources(struct vmd_dev *vmd) -{ - vmd->dev->resource[VMD_MEMBAR1].child = NULL; - vmd->dev->resource[VMD_MEMBAR2].child = NULL; -} - -/* - * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. - * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower - * 16 bits are the PCI Segment Group (domain) number. Other bits are - * currently reserved. - */ -static int vmd_find_free_domain(void) -{ - int domain = 0xffff; - struct pci_bus *bus = NULL; - - while ((bus = pci_find_next_bus(bus)) != NULL) - domain = max_t(int, domain, pci_domain_nr(bus)); - return domain + 1; -} - -static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) -{ - struct pci_sysdata *sd = &vmd->sysdata; - struct fwnode_handle *fn; - struct resource *res; - u32 upper_bits; - unsigned long flags; - LIST_HEAD(resources); - resource_size_t offset[2] = {0}; - resource_size_t membar2_offset = 0x2000, busn_start = 0; - - /* - * Shadow registers may exist in certain VMD device ids which allow - * guests to correctly assign host physical addresses to the root ports - * and child devices. These registers will either return the host value - * or 0, depending on an enable bit in the VMD device. - */ - if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { - u32 vmlock; - int ret; - - membar2_offset = 0x2018; - ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); - if (ret || vmlock == ~0) - return -ENODEV; - - if (MB2_SHADOW_EN(vmlock)) { - void __iomem *membar2; - - membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); - if (!membar2) - return -ENOMEM; - offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - readq(membar2 + 0x2008); - offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - readq(membar2 + 0x2010); - pci_iounmap(vmd->dev, membar2); - } - } - - /* - * Certain VMD devices may have a root port configuration option which - * limits the bus range to between 0-127 or 128-255 - */ - if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { - u32 vmcap, vmconfig; - - pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); - pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); - if (BUS_RESTRICT_CAP(vmcap) && - (BUS_RESTRICT_CFG(vmconfig) == 0x1)) - busn_start = 128; - } - - res = &vmd->dev->resource[VMD_CFGBAR]; - vmd->resources[0] = (struct resource) { - .name = "VMD CFGBAR", - .start = busn_start, - .end = busn_start + (resource_size(res) >> 20) - 1, - .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, - }; - - /* - * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can - * put 32-bit resources in the window. - * - * There's no hardware reason why a 64-bit window *couldn't* - * contain a 32-bit resource, but pbus_size_mem() computes the - * bridge window size assuming a 64-bit window will contain no - * 32-bit resources. __pci_assign_resource() enforces that - * artificial restriction to make sure everything will fit. - * - * The only way we could use a 64-bit non-prefechable MEMBAR is - * if its address is <4GB so that we can convert it to a 32-bit - * resource. To be visible to the host OS, all VMD endpoints must - * be initially configured by platform BIOS, which includes setting - * up these resources. We can assume the device is configured - * according to the platform needs. - */ - res = &vmd->dev->resource[VMD_MEMBAR1]; - upper_bits = upper_32_bits(res->end); - flags = res->flags & ~IORESOURCE_SIZEALIGN; - if (!upper_bits) - flags &= ~IORESOURCE_MEM_64; - vmd->resources[1] = (struct resource) { - .name = "VMD MEMBAR1", - .start = res->start, - .end = res->end, - .flags = flags, - .parent = res, - }; - - res = &vmd->dev->resource[VMD_MEMBAR2]; - upper_bits = upper_32_bits(res->end); - flags = res->flags & ~IORESOURCE_SIZEALIGN; - if (!upper_bits) - flags &= ~IORESOURCE_MEM_64; - vmd->resources[2] = (struct resource) { - .name = "VMD MEMBAR2", - .start = res->start + membar2_offset, - .end = res->end, - .flags = flags, - .parent = res, - }; - - sd->vmd_domain = true; - sd->domain = vmd_find_free_domain(); - if (sd->domain < 0) - return sd->domain; - - sd->node = pcibus_to_node(vmd->dev->bus); - - fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); - if (!fn) - return -ENODEV; - - vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, - x86_vector_domain); - irq_domain_free_fwnode(fn); - if (!vmd->irq_domain) - return -ENODEV; - - pci_add_resource(&resources, &vmd->resources[0]); - pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); - pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); - - vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, - sd, &resources); - if (!vmd->bus) { - pci_free_resource_list(&resources); - irq_domain_remove(vmd->irq_domain); - return -ENODEV; - } - - vmd_attach_resources(vmd); - vmd_setup_dma_ops(vmd); - dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); - pci_rescan_bus(vmd->bus); - - WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, - "domain"), "Can't create symlink to domain\n"); - return 0; -} - -static irqreturn_t vmd_irq(int irq, void *data) -{ - struct vmd_irq_list *irqs = data; - struct vmd_irq *vmdirq; - int idx; - - idx = srcu_read_lock(&irqs->srcu); - list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) - generic_handle_irq(vmdirq->virq); - srcu_read_unlock(&irqs->srcu, idx); - - return IRQ_HANDLED; -} - -static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) -{ - struct vmd_dev *vmd; - int i, err; - - if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) - return -ENOMEM; - - vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); - if (!vmd) - return -ENOMEM; - - vmd->dev = dev; - err = pcim_enable_device(dev); - if (err < 0) - return err; - - vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); - if (!vmd->cfgbar) - return -ENOMEM; - - pci_set_master(dev); - if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && - dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) - return -ENODEV; - - vmd->msix_count = pci_msix_vec_count(dev); - if (vmd->msix_count < 0) - return -ENODEV; - - vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, - PCI_IRQ_MSIX); - if (vmd->msix_count < 0) - return vmd->msix_count; - - vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), - GFP_KERNEL); - if (!vmd->irqs) - return -ENOMEM; - - for (i = 0; i < vmd->msix_count; i++) { - err = init_srcu_struct(&vmd->irqs[i].srcu); - if (err) - return err; - - INIT_LIST_HEAD(&vmd->irqs[i].irq_list); - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), - vmd_irq, IRQF_NO_THREAD, - "vmd", &vmd->irqs[i]); - if (err) - return err; - } - - spin_lock_init(&vmd->cfg_lock); - pci_set_drvdata(dev, vmd); - err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); - if (err) - return err; - - dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", - vmd->sysdata.domain); - return 0; -} - -static void vmd_cleanup_srcu(struct vmd_dev *vmd) -{ - int i; - - for (i = 0; i < vmd->msix_count; i++) - cleanup_srcu_struct(&vmd->irqs[i].srcu); -} - -static void vmd_remove(struct pci_dev *dev) -{ - struct vmd_dev *vmd = pci_get_drvdata(dev); - - vmd_detach_resources(vmd); - sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); - pci_stop_root_bus(vmd->bus); - pci_remove_root_bus(vmd->bus); - vmd_cleanup_srcu(vmd); - vmd_teardown_dma_ops(vmd); - irq_domain_remove(vmd->irq_domain); -} - -#ifdef CONFIG_PM_SLEEP -static int vmd_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vmd_dev *vmd = pci_get_drvdata(pdev); - int i; - - for (i = 0; i < vmd->msix_count; i++) - devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); - - pci_save_state(pdev); - return 0; -} - -static int vmd_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vmd_dev *vmd = pci_get_drvdata(pdev); - int err, i; - - for (i = 0; i < vmd->msix_count; i++) { - err = devm_request_irq(dev, pci_irq_vector(pdev, i), - vmd_irq, IRQF_NO_THREAD, - "vmd", &vmd->irqs[i]); - if (err) - return err; - } - - pci_restore_state(pdev); - return 0; -} -#endif -static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); - -static const struct pci_device_id vmd_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), - .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | - VMD_FEAT_HAS_BUS_RESTRICTIONS,}, - {0,} -}; -MODULE_DEVICE_TABLE(pci, vmd_ids); - -static struct pci_driver vmd_drv = { - .name = "vmd", - .id_table = vmd_ids, - .probe = vmd_probe, - .remove = vmd_remove, - .driver = { - .pm = &vmd_dev_pm_ops, - }, -}; -module_pci_driver(vmd_drv); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_LICENSE("GPL v2"); -MODULE_VERSION("0.6"); -- cgit v1.2.3 From 3d1d40b6a68a72ccf7e2608e244e754d1bd34746 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 8 Jun 2018 18:08:11 +0200 Subject: MAINTAINERS: make omapfb orphan omapfb is not maintained by me anymore, so drop my name from the maintainers, and mark omapfb as orphan. At some point in the future we should mark omapfb as obsolete, but there are still some features supported by omapfb which are not supported by omapdrm, so we're not there yet. Signed-off-by: Tomi Valkeinen Signed-off-by: Bartlomiej Zolnierkiewicz --- MAINTAINERS | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 92be777d060a..10223303b233 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10144,18 +10144,16 @@ F: arch/arm/boot/dts/*am5* F: arch/arm/boot/dts/*dra7* OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2) -M: Tomi Valkeinen L: linux-omap@vger.kernel.org L: linux-fbdev@vger.kernel.org -S: Maintained +S: Orphan F: drivers/video/fbdev/omap2/ F: Documentation/arm/OMAP/DSS OMAP FRAMEBUFFER SUPPORT -M: Tomi Valkeinen L: linux-fbdev@vger.kernel.org L: linux-omap@vger.kernel.org -S: Maintained +S: Orphan F: drivers/video/fbdev/omap/ OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT -- cgit v1.2.3 From 8e0ab9140cdafaa258bf7716cb82d73086ee3d06 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Wed, 18 Apr 2018 14:27:21 +0200 Subject: um: Update mailing list address We have a new mailing list, so update the MAINTAINERS file. Signed-off-by: Richard Weinberger --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9c125f705f78..6fe9c2166496 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14812,8 +14812,7 @@ F: drivers/media/usb/zr364xx/ USER-MODE LINUX (UML) M: Jeff Dike M: Richard Weinberger -L: user-mode-linux-devel@lists.sourceforge.net -L: user-mode-linux-user@lists.sourceforge.net +L: linux-um@lists.infradead.org W: http://user-mode-linux.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git S: Maintained -- cgit v1.2.3 From 9ed8b56b80c11ef7c25230b93f2c486fe6b41c4d Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 4 Jun 2018 17:56:56 +0100 Subject: MAINTAINERS: Add Paul Burton as MIPS co-maintainer I soon won't have access to much MIPS hardware, nor enough time to properly maintain MIPS on my own, so add Paul Burton as a co-maintainer. Also add a link to a new shared git repository on kernel.org for linux-next branches and pull request tags. Signed-off-by: James Hogan Acked-by: Paul Burton Acked-by: Florian Fainelli Cc: Ralf Baechle Cc: Matt Redfearn Cc: Maciej W. Rozycki Cc: Huacai Chen Cc: Aaro Koskinen Cc: John Crispin Cc: Steven J. Hill Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/19473/ --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 92be777d060a..bd49f390c692 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9320,10 +9320,12 @@ F: drivers/usb/image/microtek.* MIPS M: Ralf Baechle +M: Paul Burton M: James Hogan L: linux-mips@linux-mips.org W: http://www.linux-mips.org/ T: git git://git.linux-mips.org/pub/scm/ralf/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Supported F: Documentation/devicetree/bindings/mips/ -- cgit v1.2.3 From ec15872daa0ac3f5cbe7cb6f1734c493d74301ac Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 8 May 2018 18:54:36 -0300 Subject: docs: fix broken references with multiple hints The script: ./scripts/documentation-file-ref-check --fix Gives multiple hints for broken references on some files. Manually use the one that applies for some files. Acked-by: Steven Rostedt (VMware) Acked-by: James Morris Signed-off-by: Mauro Carvalho Chehab Acked-by: Jonathan Corbet --- Documentation/ABI/obsolete/sysfs-gpio | 2 +- Documentation/devicetree/bindings/display/bridge/tda998x.txt | 2 +- Documentation/trace/events.rst | 2 +- Documentation/trace/tracepoint-analysis.rst | 2 +- Documentation/translations/zh_CN/SubmittingDrivers | 2 +- Documentation/translations/zh_CN/gpio.txt | 4 ++-- MAINTAINERS | 2 +- drivers/hid/usbhid/Kconfig | 2 +- drivers/input/Kconfig | 4 ++-- drivers/input/joystick/Kconfig | 4 ++-- drivers/input/joystick/iforce/Kconfig | 4 ++-- drivers/input/serio/Kconfig | 4 ++-- drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt | 2 +- drivers/video/fbdev/skeletonfb.c | 8 ++++---- include/linux/tracepoint.h | 2 +- security/device_cgroup.c | 2 +- 16 files changed, 24 insertions(+), 24 deletions(-) (limited to 'MAINTAINERS') diff --git a/Documentation/ABI/obsolete/sysfs-gpio b/Documentation/ABI/obsolete/sysfs-gpio index 32513dc2eec9..40d41ea1a3f5 100644 --- a/Documentation/ABI/obsolete/sysfs-gpio +++ b/Documentation/ABI/obsolete/sysfs-gpio @@ -11,7 +11,7 @@ Description: Kernel code may export it for complete or partial access. GPIOs are identified as they are inside the kernel, using integers in - the range 0..INT_MAX. See Documentation/gpio/gpio.txt for more information. + the range 0..INT_MAX. See Documentation/gpio for more information. /sys/class/gpio /export ... asks the kernel to export a GPIO to userspace diff --git a/Documentation/devicetree/bindings/display/bridge/tda998x.txt b/Documentation/devicetree/bindings/display/bridge/tda998x.txt index 1a4eaca40d94..f5a02f61dd36 100644 --- a/Documentation/devicetree/bindings/display/bridge/tda998x.txt +++ b/Documentation/devicetree/bindings/display/bridge/tda998x.txt @@ -30,7 +30,7 @@ Optional properties: - nxp,calib-gpios: calibration GPIO, which must correspond with the gpio used for the TDA998x interrupt pin. -[1] Documentation/sound/alsa/soc/DAI.txt +[1] Documentation/sound/soc/dai.rst [2] include/dt-bindings/display/tda998x.h Example: diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst index 1afae55dc55c..696dc69b8158 100644 --- a/Documentation/trace/events.rst +++ b/Documentation/trace/events.rst @@ -8,7 +8,7 @@ Event Tracing 1. Introduction =============== -Tracepoints (see Documentation/trace/tracepoints.txt) can be used +Tracepoints (see Documentation/trace/tracepoints.rst) can be used without creating custom kernel modules to register probe functions using the event tracing infrastructure. diff --git a/Documentation/trace/tracepoint-analysis.rst b/Documentation/trace/tracepoint-analysis.rst index a4d3ff2e5efb..bef37abf4ad3 100644 --- a/Documentation/trace/tracepoint-analysis.rst +++ b/Documentation/trace/tracepoint-analysis.rst @@ -6,7 +6,7 @@ Notes on Analysing Behaviour Using Events and Tracepoints 1. Introduction =============== -Tracepoints (see Documentation/trace/tracepoints.txt) can be used without +Tracepoints (see Documentation/trace/tracepoints.rst) can be used without creating custom kernel modules to register probe functions using the event tracing infrastructure. diff --git a/Documentation/translations/zh_CN/SubmittingDrivers b/Documentation/translations/zh_CN/SubmittingDrivers index 929385e4b194..15e73562f710 100644 --- a/Documentation/translations/zh_CN/SubmittingDrivers +++ b/Documentation/translations/zh_CN/SubmittingDrivers @@ -107,7 +107,7 @@ Linux 2.6: 程序测试的指导,请参阅 Documentation/power/drivers-testing.txt。有关驱动程序电 源管理问题相对全面的概述,请参阅 - Documentation/power/admin-guide/devices.rst。 + Documentation/driver-api/pm/devices.rst。 管理: 如果一个驱动程序的作者还在进行有效的维护,那么通常除了那 些明显正确且不需要任何检查的补丁以外,其他所有的补丁都会 diff --git a/Documentation/translations/zh_CN/gpio.txt b/Documentation/translations/zh_CN/gpio.txt index 4f8bf30a41dc..4cb1ba8b8fed 100644 --- a/Documentation/translations/zh_CN/gpio.txt +++ b/Documentation/translations/zh_CN/gpio.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/gpio.txt +Chinese translated version of Documentation/gpio If you have any comment or update to the content, please contact the original document maintainer directly. However, if you have a problem @@ -10,7 +10,7 @@ Maintainer: Grant Likely Linus Walleij Chinese maintainer: Fu Wei --------------------------------------------------------------------- -Documentation/gpio.txt 的中文翻译 +Documentation/gpio 的中文翻译 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 diff --git a/MAINTAINERS b/MAINTAINERS index cb468a535f32..653a2c29ca43 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13312,7 +13312,7 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://alsa-project.org/main/index.php/ASoC S: Supported F: Documentation/devicetree/bindings/sound/ -F: Documentation/sound/alsa/soc/ +F: Documentation/sound/soc/ F: sound/soc/ F: include/sound/soc* diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig index 0108c5991a04..e50d8fe4d36c 100644 --- a/drivers/hid/usbhid/Kconfig +++ b/drivers/hid/usbhid/Kconfig @@ -14,7 +14,7 @@ config USB_HID You can't use this driver and the HIDBP (Boot Protocol) keyboard and mouse drivers at the same time. More information is available: - . + . If unsure, say Y. diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index ff8037798779..c5992cd195a1 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -16,7 +16,7 @@ config INPUT Say N here if you have a headless (no monitor, no keyboard) system. - More information is available: + More information is available: If unsure, say Y. @@ -144,7 +144,7 @@ config INPUT_JOYDEV If unsure, say Y. - More information is available: + More information is available: To compile this driver as a module, choose M here: the module will be called joydev. diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 9591fc04a8ab..32ec4cee6716 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -9,7 +9,7 @@ menuconfig INPUT_JOYSTICK and the list of supported devices will be displayed. This option doesn't affect the kernel. - Please read the file which + Please read the file which contains more information. if INPUT_JOYSTICK @@ -25,7 +25,7 @@ config JOYSTICK_ANALOG Flightstick Pro, ThrustMaster FCS, 6 and 8 button gamepads, or Saitek Cyborg joysticks. - Please read the file which + Please read the file which contains more information. To compile this driver as a module, choose M here: the diff --git a/drivers/input/joystick/iforce/Kconfig b/drivers/input/joystick/iforce/Kconfig index 8fde22a021b3..ab4dbcbcbf50 100644 --- a/drivers/input/joystick/iforce/Kconfig +++ b/drivers/input/joystick/iforce/Kconfig @@ -27,6 +27,6 @@ config JOYSTICK_IFORCE_232 connected to your serial (COM) port. You will need an additional utility called inputattach, see - - and . + + and . diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index ca4530eb3378..d90d9f1098ff 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -47,7 +47,7 @@ config SERIO_SERPORT Say Y here if you plan to use an input device (mouse, joystick, tablet, 6dof) that communicates over the RS232 serial (COM) port. - More information is available: + More information is available: If unsure, say Y. @@ -78,7 +78,7 @@ config SERIO_PARKBD Say Y here if you built a simple parallel port adapter to attach an additional AT keyboard, XT keyboard or PS/2 mouse. - More information is available: + More information is available: If unsure, say N. diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt index 0ba6771654f7..72ba9da3d179 100644 --- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt +++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt @@ -11,7 +11,7 @@ pool management for network interfaces. This document provides an overview the Linux DPIO driver, its subcomponents, and its APIs. -See Documentation/dpaa2/overview.txt for a general overview of DPAA2 +See Documentation/networking/dpaa2/overview.rst for a general overview of DPAA2 and the general DPAA2 driver architecture in Linux. Driver Overview diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index 7f4e908330bf..812a36cb60c3 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -836,7 +836,7 @@ static void xxxfb_remove(struct pci_dev *dev) * @dev: PCI device * @msg: the suspend event code. * - * See Documentation/power/admin-guide/devices.rst for more information + * See Documentation/driver-api/pm/devices.rst for more information */ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) { @@ -851,7 +851,7 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) * xxxfb_resume - Optional but recommended function. Resume the device. * @dev: PCI device * - * See Documentation/power/admin-guide/devices.rst for more information + * See Documentation/driver-api/pm/devices.rst for more information */ static int xxxfb_resume(struct pci_dev *dev) { @@ -915,7 +915,7 @@ static void __exit xxxfb_exit(void) * @dev: platform device * @msg: the suspend event code. * - * See Documentation/power/admin-guide/devices.rst for more information + * See Documentation/driver-api/pm/devices.rst for more information */ static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg) { @@ -930,7 +930,7 @@ static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg) * xxxfb_resume - Optional but recommended function. Resume the device. * @dev: platform device * - * See Documentation/power/admin-guide/devices.rst for more information + * See Documentation/driver-api/pm/devices.rst for more information */ static int xxxfb_resume(struct platform_dev *dev) { diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index c94f466d57ef..19a690b559ca 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -4,7 +4,7 @@ /* * Kernel Tracepoint API. * - * See Documentation/trace/tracepoints.txt. + * See Documentation/trace/tracepoints.rst. * * Copyright (C) 2008-2014 Mathieu Desnoyers * diff --git a/security/device_cgroup.c b/security/device_cgroup.c index c65b39bafdfe..cd97929fac66 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -509,7 +509,7 @@ static inline int may_allow_all(struct dev_cgroup *parent) * This is one of the three key functions for hierarchy implementation. * This function is responsible for re-evaluating all the cgroup's active * exceptions due to a parent's exception change. - * Refer to Documentation/cgroups/devices.txt for more details. + * Refer to Documentation/cgroup-v1/devices.txt for more details. */ static void revalidate_active_exceptions(struct dev_cgroup *devcg) { -- cgit v1.2.3 From 5fb94e9ca333f0fe1d96de06704a79942b3832c3 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 8 May 2018 15:14:57 -0300 Subject: docs: Fix some broken references As we move stuff around, some doc references are broken. Fix some of them via this script: ./scripts/documentation-file-ref-check --fix Manually checked if the produced result is valid, removing a few false-positives. Acked-by: Takashi Iwai Acked-by: Masami Hiramatsu Acked-by: Stephen Boyd Acked-by: Charles Keepax Acked-by: Mathieu Poirier Reviewed-by: Coly Li Signed-off-by: Mauro Carvalho Chehab Acked-by: Jonathan Corbet --- Documentation/admin-guide/kernel-parameters.txt | 4 ++-- .../devicetree/bindings/input/rotary-encoder.txt | 2 +- Documentation/driver-api/gpio/consumer.rst | 2 +- Documentation/kprobes.txt | 4 ++-- Documentation/trace/coresight.txt | 2 +- Documentation/trace/ftrace-uses.rst | 2 +- Documentation/trace/histogram.txt | 2 +- Documentation/trace/intel_th.rst | 2 +- Documentation/trace/tracepoint-analysis.rst | 6 +++--- Documentation/translations/ja_JP/howto.rst | 4 ++-- Documentation/translations/zh_CN/magic-number.txt | 4 ++-- .../translations/zh_CN/video4linux/omap3isp.txt | 4 ++-- MAINTAINERS | 20 ++++++++++---------- arch/Kconfig | 2 +- arch/arm/include/asm/cacheflush.h | 2 +- arch/arm64/include/asm/cacheflush.h | 2 +- arch/microblaze/include/asm/cacheflush.h | 2 +- arch/um/Kconfig.um | 2 +- arch/unicore32/include/asm/cacheflush.h | 2 +- arch/x86/entry/vsyscall/vsyscall_64.c | 2 +- arch/xtensa/include/asm/cacheflush.h | 4 ++-- block/Kconfig | 2 +- certs/Kconfig | 2 +- crypto/asymmetric_keys/asymmetric_type.c | 2 +- crypto/asymmetric_keys/signature.c | 2 +- drivers/char/Kconfig | 2 +- drivers/clk/clk.c | 4 ++-- drivers/clk/ingenic/cgu.h | 2 +- drivers/gpu/vga/Kconfig | 2 +- drivers/gpu/vga/vgaarb.c | 2 +- drivers/input/joystick/Kconfig | 10 +++++----- drivers/input/joystick/walkera0701.c | 2 +- drivers/input/misc/Kconfig | 4 ++-- drivers/input/misc/rotary_encoder.c | 2 +- drivers/input/mouse/Kconfig | 6 +++--- drivers/input/mouse/alps.c | 2 +- drivers/input/touchscreen/wm97xx-core.c | 2 +- drivers/lightnvm/pblk-rb.c | 2 +- drivers/md/bcache/Kconfig | 2 +- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/extents.c | 2 +- drivers/media/dvb-core/dvb_ringbuffer.c | 2 +- drivers/media/pci/meye/Kconfig | 2 +- drivers/media/platform/pxa_camera.c | 4 ++-- .../media/platform/soc_camera/sh_mobile_ceu_camera.c | 2 +- drivers/media/radio/Kconfig | 2 +- drivers/media/radio/si470x/Kconfig | 2 +- drivers/media/usb/dvb-usb-v2/lmedm04.c | 2 +- drivers/media/usb/zr364xx/Kconfig | 2 +- drivers/parport/Kconfig | 6 +++--- drivers/staging/media/bcm2048/TODO | 2 +- include/keys/asymmetric-subtype.h | 2 +- include/keys/asymmetric-type.h | 2 +- include/linux/assoc_array.h | 2 +- include/linux/assoc_array_priv.h | 2 +- include/linux/circ_buf.h | 2 +- include/linux/ftrace.h | 2 +- include/linux/rculist_nulls.h | 2 +- include/uapi/linux/prctl.h | 2 +- include/xen/interface/io/kbdif.h | 2 +- kernel/cgroup/cpuset.c | 2 +- kernel/trace/Kconfig | 16 ++++++++-------- lib/Kconfig | 2 +- security/selinux/hooks.c | 2 +- sound/core/Kconfig | 4 ++-- sound/drivers/Kconfig | 4 ++-- sound/pci/Kconfig | 10 +++++----- tools/include/uapi/linux/prctl.h | 2 +- tools/lib/api/fs/fs.c | 2 +- tools/perf/util/bpf-prologue.c | 2 +- .../pm-graph/config/custom-timeline-functions.cfg | 4 ++-- 71 files changed, 113 insertions(+), 113 deletions(-) (limited to 'MAINTAINERS') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 638342d0a095..6fa3f31ed2a5 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4335,7 +4335,7 @@ [FTRACE] Set and start specified trace events in order to facilitate early boot debugging. The event-list is a comma separated list of trace events to enable. See - also Documentation/trace/events.txt + also Documentation/trace/events.rst trace_options=[option-list] [FTRACE] Enable or disable tracer options at boot. @@ -4350,7 +4350,7 @@ trace_options=stacktrace - See also Documentation/trace/ftrace.txt "trace options" + See also Documentation/trace/ftrace.rst "trace options" section. tp_printk[FTRACE] diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt index f99fe5cdeaec..a644408b33b8 100644 --- a/Documentation/devicetree/bindings/input/rotary-encoder.txt +++ b/Documentation/devicetree/bindings/input/rotary-encoder.txt @@ -28,7 +28,7 @@ Deprecated properties: This property is deprecated. Instead, a 'steps-per-period ' value should be used, such as "rotary-encoder,steps-per-period = <2>". -See Documentation/input/rotary-encoder.txt for more information. +See Documentation/input/devices/rotary-encoder.rst for more information. Example: diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst index c71a50d85b50..aa03f389d41d 100644 --- a/Documentation/driver-api/gpio/consumer.rst +++ b/Documentation/driver-api/gpio/consumer.rst @@ -57,7 +57,7 @@ device that displays digits), an additional index argument can be specified:: enum gpiod_flags flags) For a more detailed description of the con_id parameter in the DeviceTree case -see Documentation/gpio/board.txt +see Documentation/driver-api/gpio/board.rst The flags parameter is used to optionally specify a direction and initial value for the GPIO. Values can be: diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 22208bf2386d..cb3b0de83fc6 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -724,8 +724,8 @@ migrate your tool to one of the following options: See following documents: - - Documentation/trace/kprobetrace.txt - - Documentation/trace/events.txt + - Documentation/trace/kprobetrace.rst + - Documentation/trace/events.rst - tools/perf/Documentation/perf-probe.txt diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt index 1d74ad0202b6..efbc832146e7 100644 --- a/Documentation/trace/coresight.txt +++ b/Documentation/trace/coresight.txt @@ -426,5 +426,5 @@ root@genericarmv8:~# Details on how to use the generic STM API can be found here [2]. [1]. Documentation/ABI/testing/sysfs-bus-coresight-devices-stm -[2]. Documentation/trace/stm.txt +[2]. Documentation/trace/stm.rst [3]. https://github.com/Linaro/perf-opencsd diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst index 00283b6dd101..1fbc69894eed 100644 --- a/Documentation/trace/ftrace-uses.rst +++ b/Documentation/trace/ftrace-uses.rst @@ -199,7 +199,7 @@ If @buf is NULL and reset is set, all functions will be enabled for tracing. The @buf can also be a glob expression to enable all functions that match a specific pattern. -See Filter Commands in :file:`Documentation/trace/ftrace.txt`. +See Filter Commands in :file:`Documentation/trace/ftrace.rst`. To just trace the schedule function: diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt index b13771cb12c1..e73bcf9cb5f3 100644 --- a/Documentation/trace/histogram.txt +++ b/Documentation/trace/histogram.txt @@ -7,7 +7,7 @@ Histogram triggers are special event triggers that can be used to aggregate trace event data into histograms. For information on - trace events and event triggers, see Documentation/trace/events.txt. + trace events and event triggers, see Documentation/trace/events.rst. 2. Histogram Trigger Command diff --git a/Documentation/trace/intel_th.rst b/Documentation/trace/intel_th.rst index 990f13265178..19e2d633f3c7 100644 --- a/Documentation/trace/intel_th.rst +++ b/Documentation/trace/intel_th.rst @@ -38,7 +38,7 @@ description is at Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth. STH registers an stm class device, through which it provides interface to userspace and kernelspace software trace sources. See -Documentation/trace/stm.txt for more information on that. +Documentation/trace/stm.rst for more information on that. MSU can be configured to collect trace data into a system memory buffer, which can later on be read from its device nodes via read() or diff --git a/Documentation/trace/tracepoint-analysis.rst b/Documentation/trace/tracepoint-analysis.rst index bef37abf4ad3..716326b9f152 100644 --- a/Documentation/trace/tracepoint-analysis.rst +++ b/Documentation/trace/tracepoint-analysis.rst @@ -55,7 +55,7 @@ simple case of:: 3.1 System-Wide Event Enabling ------------------------------ -See Documentation/trace/events.txt for a proper description on how events +See Documentation/trace/events.rst for a proper description on how events can be enabled system-wide. A short example of enabling all events related to page allocation would look something like:: @@ -112,7 +112,7 @@ at that point. 3.4 Local Event Enabling ------------------------ -Documentation/trace/ftrace.txt describes how to enable events on a per-thread +Documentation/trace/ftrace.rst describes how to enable events on a per-thread basis using set_ftrace_pid. 3.5 Local Event Enablement with PCL @@ -137,7 +137,7 @@ basis using PCL such as follows. 4. Event Filtering ================== -Documentation/trace/ftrace.txt covers in-depth how to filter events in +Documentation/trace/ftrace.rst covers in-depth how to filter events in ftrace. Obviously using grep and awk of trace_pipe is an option as well as any script reading trace_pipe. diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst index 8d7ed0cbbf5f..f3116381c26b 100644 --- a/Documentation/translations/ja_JP/howto.rst +++ b/Documentation/translations/ja_JP/howto.rst @@ -1,5 +1,5 @@ NOTE: -This is a version of Documentation/HOWTO translated into Japanese. +This is a version of Documentation/process/howto.rst translated into Japanese. This document is maintained by Tsugikazu Shibata If you find any difference between this document and the original file or a problem with the translation, please contact the maintainer of this file. @@ -109,7 +109,7 @@ linux-api@vger.kernel.org に送ることを勧めます。 ています。 カーネルに関して初めての人はここからスタートすると良い でしょう。 - :ref:`Documentation/Process/changes.rst ` + :ref:`Documentation/process/changes.rst ` このファイルはカーネルをうまく生成(訳注 build )し、走らせるのに最 小限のレベルで必要な数々のソフトウェアパッケージの一覧を示してい ます。 diff --git a/Documentation/translations/zh_CN/magic-number.txt b/Documentation/translations/zh_CN/magic-number.txt index e9db693c0a23..7159cec04090 100644 --- a/Documentation/translations/zh_CN/magic-number.txt +++ b/Documentation/translations/zh_CN/magic-number.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/magic-number.txt +Chinese translated version of Documentation/process/magic-number.rst If you have any comment or update to the content, please post to LKML directly. However, if you have problem communicating in English you can also ask the @@ -7,7 +7,7 @@ translation is outdated or there is problem with translation. Chinese maintainer: Jia Wei Wei --------------------------------------------------------------------- -Documentation/magic-number.txt的中文翻译 +Documentation/process/magic-number.rst的中文翻译 如果想评论或更新本文的内容,请直接发信到LKML。如果你使用英文交流有困难的话,也可 以向中文版维护者求助。如果本翻译更新不及时或者翻译存在问题,请联系中文版维护者。 diff --git a/Documentation/translations/zh_CN/video4linux/omap3isp.txt b/Documentation/translations/zh_CN/video4linux/omap3isp.txt index 67ffbf352ae0..e9f29375aa95 100644 --- a/Documentation/translations/zh_CN/video4linux/omap3isp.txt +++ b/Documentation/translations/zh_CN/video4linux/omap3isp.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/video4linux/omap3isp.txt +Chinese translated version of Documentation/media/v4l-drivers/omap3isp.rst If you have any comment or update to the content, please contact the original document maintainer directly. However, if you have a problem @@ -11,7 +11,7 @@ Maintainer: Laurent Pinchart David Cohen Chinese maintainer: Fu Wei --------------------------------------------------------------------- -Documentation/video4linux/omap3isp.txt 的中文翻译 +Documentation/media/v4l-drivers/omap3isp.rst 的中文翻译 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 diff --git a/MAINTAINERS b/MAINTAINERS index 653a2c29ca43..09554034be46 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3079,7 +3079,7 @@ M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained -F: Documentation/sound/alsa/Bt87x.txt +F: Documentation/sound/cards/bt87x.rst F: sound/pci/bt87x.c BT8XXGPIO DRIVER @@ -3375,7 +3375,7 @@ M: David Howells M: David Woodhouse L: keyrings@vger.kernel.org S: Maintained -F: Documentation/module-signing.txt +F: Documentation/admin-guide/module-signing.rst F: certs/ F: scripts/sign-file.c F: scripts/extract-cert.c @@ -6501,7 +6501,7 @@ L: linux-mm@kvack.org S: Maintained F: mm/hmm* F: include/linux/hmm* -F: Documentation/vm/hmm.txt +F: Documentation/vm/hmm.rst HOST AP DRIVER M: Jouni Malinen @@ -7401,7 +7401,7 @@ F: drivers/platform/x86/intel-wmi-thunderbolt.c INTEL(R) TRACE HUB M: Alexander Shishkin S: Supported -F: Documentation/trace/intel_th.txt +F: Documentation/trace/intel_th.rst F: drivers/hwtracing/intel_th/ INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) @@ -9665,7 +9665,7 @@ F: include/uapi/linux/mmc/ MULTIPLEXER SUBSYSTEM M: Peter Rosin S: Maintained -F: Documentation/ABI/testing/mux/sysfs-class-mux* +F: Documentation/ABI/testing/sysfs-class-mux* F: Documentation/devicetree/bindings/mux/ F: include/linux/dt-bindings/mux/ F: include/linux/mux/ @@ -10244,7 +10244,7 @@ F: arch/powerpc/include/asm/pnv-ocxl.h F: drivers/misc/ocxl/ F: include/misc/ocxl* F: include/uapi/misc/ocxl.h -F: Documentation/accelerators/ocxl.txt +F: Documentation/accelerators/ocxl.rst OMAP AUDIO SUPPORT M: Peter Ujfalusi @@ -13794,7 +13794,7 @@ SYSTEM TRACE MODULE CLASS M: Alexander Shishkin S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git -F: Documentation/trace/stm.txt +F: Documentation/trace/stm.rst F: drivers/hwtracing/stm/ F: include/linux/stm.h F: include/uapi/linux/stm.h @@ -14471,7 +14471,7 @@ M: Steven Rostedt M: Ingo Molnar T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core S: Maintained -F: Documentation/trace/ftrace.txt +F: Documentation/trace/ftrace.rst F: arch/*/*/*/ftrace.h F: arch/*/kernel/ftrace.c F: include/*/ftrace.h @@ -14940,7 +14940,7 @@ M: Heikki Krogerus L: linux-usb@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-class-typec -F: Documentation/usb/typec.rst +F: Documentation/driver-api/usb/typec.rst F: drivers/usb/typec/ F: include/linux/usb/typec.h @@ -15770,7 +15770,7 @@ YEALINK PHONE DRIVER M: Henk Vergonet L: usbb2k-api-dev@nongnu.org S: Maintained -F: Documentation/input/yealink.rst +F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* Z8530 DRIVER FOR AX.25 diff --git a/arch/Kconfig b/arch/Kconfig index 47b235d43909..1aa59063f1fd 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -403,7 +403,7 @@ config SECCOMP_FILTER in terms of Berkeley Packet Filter programs which implement task-defined system call filtering polices. - See Documentation/prctl/seccomp_filter.txt for details. + See Documentation/userspace-api/seccomp_filter.rst for details. preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),$(HOSTCC)) diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 869080bedb89..ec1a5fd0d294 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -35,7 +35,7 @@ * Start addresses are inclusive and end addresses are exclusive; * start addresses should be rounded down, end addresses up. * - * See Documentation/cachetlb.txt for more information. + * See Documentation/core-api/cachetlb.rst for more information. * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 0094c6653b06..d264a7274811 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -36,7 +36,7 @@ * Start addresses are inclusive and end addresses are exclusive; start * addresses should be rounded down, end addresses up. * - * See Documentation/cachetlb.txt for more information. Please note that + * See Documentation/core-api/cachetlb.rst for more information. Please note that * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT I-cache. * diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index ffea82a16d2c..b091de77b15b 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h @@ -19,7 +19,7 @@ #include #include -/* Look at Documentation/cachetlb.txt */ +/* Look at Documentation/core-api/cachetlb.rst */ /* * Cache handling functions. diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um index 3e7f228b22e1..20da5a8ca949 100644 --- a/arch/um/Kconfig.um +++ b/arch/um/Kconfig.um @@ -80,7 +80,7 @@ config MAGIC_SYSRQ On UML, this is accomplished by sending a "sysrq" command with mconsole, followed by the letter for the requested command. - The keys are documented in . Don't say Y + The keys are documented in . Don't say Y unless you really know what this hack does. config KERNEL_STACK_ORDER diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h index 1d9132b66039..1c8b9f13a9e1 100644 --- a/arch/unicore32/include/asm/cacheflush.h +++ b/arch/unicore32/include/asm/cacheflush.h @@ -33,7 +33,7 @@ * Start addresses are inclusive and end addresses are exclusive; * start addresses should be rounded down, end addresses up. * - * See Documentation/cachetlb.txt for more information. + * See Documentation/core-api/cachetlb.rst for more information. * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 7782cdbcd67d..82ed001e8909 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -201,7 +201,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) /* * Handle seccomp. regs->ip must be the original value. - * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt. + * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst. * * We could optimize the seccomp disabled case, but performance * here doesn't matter. diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 397d6a1a4224..a0d50be5a8cb 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -88,7 +88,7 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, * * Pages can get remapped. Because this might change the 'color' of that page, * we have to flush the cache before the PTE is changed. - * (see also Documentation/cachetlb.txt) + * (see also Documentation/core-api/cachetlb.rst) */ #if defined(CONFIG_MMU) && \ @@ -152,7 +152,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, __invalidate_icache_range(start,(end) - (start)); \ } while (0) -/* This is not required, see Documentation/cachetlb.txt */ +/* This is not required, see Documentation/core-api/cachetlb.rst */ #define flush_icache_page(vma,page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) diff --git a/block/Kconfig b/block/Kconfig index 28ec55752b68..eb50fd4977c2 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -114,7 +114,7 @@ config BLK_DEV_THROTTLING one needs to mount and use blkio cgroup controller for creating cgroups and specifying per device IO rate policies. - See Documentation/cgroups/blkio-controller.txt for more information. + See Documentation/cgroup-v1/blkio-controller.txt for more information. config BLK_DEV_THROTTLING_LOW bool "Block throttling .low limit interface support (EXPERIMENTAL)" diff --git a/certs/Kconfig b/certs/Kconfig index 5f7663df6e8e..c94e93d8bccf 100644 --- a/certs/Kconfig +++ b/certs/Kconfig @@ -13,7 +13,7 @@ config MODULE_SIG_KEY If this option is unchanged from its default "certs/signing_key.pem", then the kernel will automatically generate the private key and - certificate as described in Documentation/module-signing.txt + certificate as described in Documentation/admin-guide/module-signing.rst config SYSTEM_TRUSTED_KEYRING bool "Provide system-wide ring of trusted keys" diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 39aecad286fe..26539e9a8bda 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -1,6 +1,6 @@ /* Asymmetric public-key cryptography key type * - * See Documentation/security/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 11b7ba170904..28198314bc39 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -1,6 +1,6 @@ /* Signature verification with an asymmetric key * - * See Documentation/security/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 410c30c42120..212f447938ae 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -81,7 +81,7 @@ config PRINTER corresponding drivers into the kernel. To compile this driver as a module, choose M here and read - . The module will be called lp. + . The module will be called lp. If you have several parallel ports, you can specify which ports to use with the "lp" kernel command line option. (Try "man bootparam" diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index a24a6afb50b6..9760b526ca31 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -6,7 +6,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * Standard functionality for the common clock API. See Documentation/clk.txt + * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst */ #include @@ -2747,7 +2747,7 @@ static int __clk_core_init(struct clk_core *core) goto out; } - /* check that clk_ops are sane. See Documentation/clk.txt */ + /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ if (core->ops->set_rate && !((core->ops->round_rate || core->ops->determine_rate) && core->ops->recalc_rate)) { diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index 542192376ebf..502bcbb61b04 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h @@ -194,7 +194,7 @@ struct ingenic_cgu { /** * struct ingenic_clk - private data for a clock - * @hw: see Documentation/clk.txt + * @hw: see Documentation/driver-api/clk.rst * @cgu: a pointer to the CGU data * @idx: the index of this clock in cgu->clock_info */ diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index 29437eabe095..b677e5d524e6 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig @@ -6,7 +6,7 @@ config VGA_ARB Some "legacy" VGA devices implemented on PCI typically have the same hard-decoded addresses as they did on ISA. When multiple PCI devices are accessed at same time they need some kind of coordination. Please - see Documentation/vgaarbiter.txt for more details. Select this to + see Documentation/gpu/vgaarbiter.rst for more details. Select this to enable VGA arbiter. config VGA_ARB_MAX_GPUS diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 1c5e74cb9279..c61b04555779 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1,6 +1,6 @@ /* * vgaarb.c: Implements the VGA arbitration. For details refer to - * Documentation/vgaarbiter.txt + * Documentation/gpu/vgaarbiter.rst * * * (C) Copyright 2005 Benjamin Herrenschmidt diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 32ec4cee6716..d8f9c6e1fc08 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -214,7 +214,7 @@ config JOYSTICK_DB9 gamepad, Sega Saturn gamepad, or a Multisystem -- Atari, Amiga, Commodore, Amstrad CPC joystick connected to your parallel port. For more information on how to use the driver please read - . + . To compile this driver as a module, choose M here: the module will be called db9. @@ -229,7 +229,7 @@ config JOYSTICK_GAMECON Sony PlayStation gamepad or a Multisystem -- Atari, Amiga, Commodore, Amstrad CPC joystick connected to your parallel port. For more information on how to use the driver please read - . + . To compile this driver as a module, choose M here: the module will be called gamecon. @@ -241,7 +241,7 @@ config JOYSTICK_TURBOGRAFX Say Y here if you have the TurboGraFX interface by Steffen Schwenke, and want to use it with Multisystem -- Atari, Amiga, Commodore, Amstrad CPC joystick. For more information on how to use the driver - please read . + please read . To compile this driver as a module, choose M here: the module will be called turbografx. @@ -287,7 +287,7 @@ config JOYSTICK_XPAD and/or "Event interface support" (CONFIG_INPUT_EVDEV) as well. For information about how to connect the X-Box pad to USB, see - . + . To compile this driver as a module, choose M here: the module will be called xpad. @@ -313,7 +313,7 @@ config JOYSTICK_WALKERA0701 Say Y or M here if you have a Walkera WK-0701 transmitter which is supplied with a ready to fly Walkera helicopters such as HM36, HM37, HM60 and want to use it via parport as a joystick. More - information is available: + information is available: To compile this driver as a module, choose M here: the module will be called walkera0701. diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index 36a5b93156ed..dce313dc260a 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -3,7 +3,7 @@ * * Copyright (c) 2008 Peter Popovec * - * More about driver: + * More about driver: */ /* diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 572b15fa18c2..c25606e00693 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -411,7 +411,7 @@ config INPUT_YEALINK usb sound driver, so you might want to enable that as well. For information about how to use these additional functions, see - . + . To compile this driver as a module, choose M here: the module will be called yealink. @@ -595,7 +595,7 @@ config INPUT_GPIO_ROTARY_ENCODER depends on GPIOLIB || COMPILE_TEST help Say Y here to add support for rotary encoders connected to GPIO lines. - Check file:Documentation/input/rotary-encoder.txt for more + Check file:Documentation/input/devices/rotary-encoder.rst for more information. To compile this driver as a module, choose M here: the diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index 6d304381fc30..30ec77ad32c6 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c @@ -7,7 +7,7 @@ * state machine code inspired by code from Tim Ruetz * * A generic driver for rotary encoders connected to GPIO lines. - * See file:Documentation/input/rotary-encoder.txt for more information + * See file:Documentation/input/devices/rotary-encoder.rst for more information * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index f27f23f2d99a..566a1e3aa504 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -129,7 +129,7 @@ config MOUSE_PS2_ELANTECH This driver exposes some configuration registers via sysfs entries. For further information, - see . + see . If unsure, say N. @@ -228,7 +228,7 @@ config MOUSE_APPLETOUCH scrolling in X11. For further information, see - . + . To compile this driver as a module, choose M here: the module will be called appletouch. @@ -251,7 +251,7 @@ config MOUSE_BCM5974 The interface is currently identical to the appletouch interface, for further information, see - . + . To compile this driver as a module, choose M here: the module will be called bcm5974. diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index cb5579716dba..0a6f7ca883e7 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -212,7 +212,7 @@ static void alps_set_abs_params_v7(struct alps_data *priv, static void alps_set_abs_params_ss4_v2(struct alps_data *priv, struct input_dev *dev1); -/* Packet formats are described in Documentation/input/alps.txt */ +/* Packet formats are described in Documentation/input/devices/alps.rst */ static bool alps_is_valid_first_byte(struct alps_data *priv, unsigned char data) diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index fd714ee881f7..2566b4d8b342 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -68,7 +68,7 @@ * The default values correspond to Mainstone II in QVGA mode * * Please read - * Documentation/input/input-programming.txt for more details. + * Documentation/input/input-programming.rst for more details. */ static int abs_x[3] = {150, 4000, 5}; diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 00cd1f20a196..55e9442a99e2 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -38,7 +38,7 @@ void pblk_rb_data_free(struct pblk_rb *rb) /* * Initialize ring buffer. The data and metadata buffers must be previously * allocated and their size must be a power of two - * (Documentation/circular-buffers.txt) + * (Documentation/core-api/circular-buffers.rst) */ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, unsigned int power_size, unsigned int power_seg_sz) diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index 4d200883c505..17bf109c58e9 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -5,7 +5,7 @@ config BCACHE Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. - See Documentation/bcache.txt for details. + See Documentation/admin-guide/bcache.rst for details. config BCACHE_DEBUG bool "Bcache debugging" diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 2a0968c04e21..547c9eedc2f4 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -18,7 +18,7 @@ * as keys are inserted we only sort the pages that have not yet been written. * When garbage collection is run, we resort the entire node. * - * All configuration is done via sysfs; see Documentation/bcache.txt. + * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst. */ #include "bcache.h" diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index c334e6666461..1d096742eb41 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -18,7 +18,7 @@ * as keys are inserted we only sort the pages that have not yet been written. * When garbage collection is run, we resort the entire node. * - * All configuration is done via sysfs; see Documentation/bcache.txt. + * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst. */ #include "bcache.h" diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c index 4330b6fa4af2..d1d471af0636 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.c +++ b/drivers/media/dvb-core/dvb_ringbuffer.c @@ -55,7 +55,7 @@ int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) * this pairs with smp_store_release() in dvb_ringbuffer_write(), * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() * - * for memory barriers also see Documentation/circular-buffers.txt + * for memory barriers also see Documentation/core-api/circular-buffers.rst */ return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); } diff --git a/drivers/media/pci/meye/Kconfig b/drivers/media/pci/meye/Kconfig index 2e60334ffef5..9a50f54231ad 100644 --- a/drivers/media/pci/meye/Kconfig +++ b/drivers/media/pci/meye/Kconfig @@ -5,7 +5,7 @@ config VIDEO_MEYE ---help--- This is the video4linux driver for the Motion Eye camera found in the Vaio Picturebook laptops. Please read the material in - for more information. + for more information. If you say Y or M here, you need to say Y or M to "Sony Laptop Extras" in the misc device section. diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 4d5a26b4cdda..d85ffbfb7c1f 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -1021,7 +1021,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev, * - a videobuffer is queued on the pcdev->capture list * * Please check the "DMA hot chaining timeslice issue" in - * Documentation/video4linux/pxa_camera.txt + * Documentation/media/v4l-drivers/pxa_camera.rst * * Context: should only be called within the dma irq handler */ @@ -1443,7 +1443,7 @@ static void pxac_vb2_queue(struct vb2_buffer *vb) /* * Please check the DMA prepared buffer structure in : - * Documentation/video4linux/pxa_camera.txt + * Documentation/media/v4l-drivers/pxa_camera.rst * Please check also in pxa_camera_check_link_miss() to understand why DMA chain * modification while DMA chain is running will work anyway. */ diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 242342fd7ede..9897213f2618 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -1111,7 +1111,7 @@ static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) /* * CEU can scale and crop, but we don't want to waste bandwidth and kill the * framerate by always requesting the maximum image from the client. See - * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of + * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of * scaling and cropping algorithms and for the meaning of referenced here steps. */ static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd, diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig index 39b04ad924c0..b426d6f9787d 100644 --- a/drivers/media/radio/Kconfig +++ b/drivers/media/radio/Kconfig @@ -272,7 +272,7 @@ config RADIO_RTRACK been reported to be used by these cards. More information is contained in the file - . + . To compile this driver as a module, choose M here: the module will be called radio-aimslab. diff --git a/drivers/media/radio/si470x/Kconfig b/drivers/media/radio/si470x/Kconfig index a21172e413a9..6dbb158cd2a0 100644 --- a/drivers/media/radio/si470x/Kconfig +++ b/drivers/media/radio/si470x/Kconfig @@ -29,7 +29,7 @@ config USB_SI470X Please have a look at the documentation, especially on how to redirect the audio stream from the radio to your sound device: - Documentation/video4linux/si470x.txt + Documentation/media/v4l-drivers/si470x.rst Say Y here if you want to connect this type of radio to your computer's USB port. diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index be26c029546b..39db6dc4b5cd 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -21,7 +21,7 @@ * * LME2510C + M88RS2000 * - * For firmware see Documentation/dvb/lmedm04.txt + * For firmware see Documentation/media/dvb-drivers/lmedm04.rst * * I2C addresses: * 0xd0 - STV0288 - Demodulator diff --git a/drivers/media/usb/zr364xx/Kconfig b/drivers/media/usb/zr364xx/Kconfig index 0f585662881d..ac429bca70e8 100644 --- a/drivers/media/usb/zr364xx/Kconfig +++ b/drivers/media/usb/zr364xx/Kconfig @@ -6,7 +6,7 @@ config USB_ZR364XX ---help--- Say Y here if you want to connect this type of camera to your computer's USB port. - See for more info + See for more info and list of supported cameras. To compile this driver as a module, choose M here: the diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 44333bd8f908..a97f4eada60b 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -20,7 +20,7 @@ menuconfig PARPORT drive, PLIP link (Parallel Line Internet Protocol is mainly used to create a mini network by connecting the parallel ports of two local machines) etc., then you need to say Y here; please read - and + and . For extensive information about drivers for many devices attaching @@ -33,7 +33,7 @@ menuconfig PARPORT the module will be called parport. If you have more than one parallel port and want to specify which port and IRQ to be used by this driver at module load time, take a - look at . + look at . If unsure, say Y. @@ -71,7 +71,7 @@ config PARPORT_PC_FIFO As well as actually having a FIFO, or DMA capability, the kernel will need to know which IRQ the parallel port has. By default, parallel port interrupts will not be used, and so neither will the - FIFO. See to find out how to + FIFO. See to find out how to specify which IRQ/DMA to use. config PARPORT_PC_SUPERIO diff --git a/drivers/staging/media/bcm2048/TODO b/drivers/staging/media/bcm2048/TODO index 051f85dbe89e..6bee2a2dad68 100644 --- a/drivers/staging/media/bcm2048/TODO +++ b/drivers/staging/media/bcm2048/TODO @@ -3,7 +3,7 @@ TODO: From the initial code review: The main thing you need to do is to implement all the controls using the -control framework (see Documentation/video4linux/v4l2-controls.txt). +control framework (see Documentation/media/kapi/v4l2-controls.rst). Most drivers are by now converted to the control framework, so you will find many examples of how to do this in drivers/media/radio. diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index 2480469ce8fb..e0a9c2368872 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -1,6 +1,6 @@ /* Asymmetric public-key cryptography key subtype * - * See Documentation/security/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index b38240716d41..1cb77cd5135e 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -1,6 +1,6 @@ /* Asymmetric Public-key cryptography key type interface * - * See Documentation/security/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h index a89df3be1686..65e3832f96b2 100644 --- a/include/linux/assoc_array.h +++ b/include/linux/assoc_array.h @@ -1,6 +1,6 @@ /* Generic associative array implementation. * - * See Documentation/assoc_array.txt for information. + * See Documentation/core-api/assoc_array.rst for information. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h index 711275e6681c..a00a06550c10 100644 --- a/include/linux/assoc_array_priv.h +++ b/include/linux/assoc_array_priv.h @@ -1,6 +1,6 @@ /* Private definitions for the generic associative array implementation. * - * See Documentation/assoc_array.txt for information. + * See Documentation/core-api/assoc_array.rst for information. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h index 7cf262a421c3..b3233e8202f9 100644 --- a/include/linux/circ_buf.h +++ b/include/linux/circ_buf.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * See Documentation/circular-buffers.txt for more information. + * See Documentation/core-api/circular-buffers.rst for more information. */ #ifndef _LINUX_CIRC_BUF_H diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9c3c9a319e48..8154f4920fcb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Ftrace header. For implementation details beyond the random comments - * scattered below, see: Documentation/trace/ftrace-design.txt + * scattered below, see: Documentation/trace/ftrace-design.rst */ #ifndef _LINUX_FTRACE_H diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index e4b257ff881b..bc8206a8f30e 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -109,7 +109,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] - * [1] Documentation/atomic_ops.txt around line 114 + * [1] Documentation/core-api/atomic_ops.rst around line 114 * [2] Documentation/RCU/rculist_nulls.txt around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index db9f15f5db04..c0d7ea0bf5b6 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -170,7 +170,7 @@ struct prctl_mm_map { * asking selinux for a specific new context (e.g. with runcon) will result * in execve returning -EPERM. * - * See Documentation/prctl/no_new_privs.txt for more details. + * See Documentation/userspace-api/no_new_privs.rst for more details. */ #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h index 2a9510ade701..e2340a4130cf 100644 --- a/include/xen/interface/io/kbdif.h +++ b/include/xen/interface/io/kbdif.h @@ -317,7 +317,7 @@ struct xenkbd_position { * Linux [2] and Windows [3] multi-touch support. * * [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml - * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt + * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.rst * [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx * * diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index d8b12e0d39cd..266f10cb7222 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -605,7 +605,7 @@ static inline int nr_cpusets(void) * load balancing domains (sched domains) as specified by that partial * partition. * - * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt + * See "What is sched_load_balance" in Documentation/cgroup-v1/cpusets.txt * for a background explanation of this. * * Does not return errors, on the theory that the callers of this diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index dd6c0a2ad969..dcc0166d1997 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -12,22 +12,22 @@ config NOP_TRACER config HAVE_FTRACE_NMI_ENTER bool help - See Documentation/trace/ftrace-design.txt + See Documentation/trace/ftrace-design.rst config HAVE_FUNCTION_TRACER bool help - See Documentation/trace/ftrace-design.txt + See Documentation/trace/ftrace-design.rst config HAVE_FUNCTION_GRAPH_TRACER bool help - See Documentation/trace/ftrace-design.txt + See Documentation/trace/ftrace-design.rst config HAVE_DYNAMIC_FTRACE bool help - See Documentation/trace/ftrace-design.txt + See Documentation/trace/ftrace-design.rst config HAVE_DYNAMIC_FTRACE_WITH_REGS bool @@ -35,12 +35,12 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS config HAVE_FTRACE_MCOUNT_RECORD bool help - See Documentation/trace/ftrace-design.txt + See Documentation/trace/ftrace-design.rst config HAVE_SYSCALL_TRACEPOINTS bool help - See Documentation/trace/ftrace-design.txt + See Documentation/trace/ftrace-design.rst config HAVE_FENTRY bool @@ -448,7 +448,7 @@ config KPROBE_EVENTS help This allows the user to add tracing events (similar to tracepoints) on the fly via the ftrace interface. See - Documentation/trace/kprobetrace.txt for more details. + Documentation/trace/kprobetrace.rst for more details. Those events can be inserted wherever kprobes can probe, and record various register and memory values. @@ -575,7 +575,7 @@ config MMIOTRACE implementation and works via page faults. Tracing is disabled by default and can be enabled at run-time. - See Documentation/trace/mmiotrace.txt. + See Documentation/trace/mmiotrace.rst. If you are not helping to develop drivers, say N. config TRACING_MAP diff --git a/lib/Kconfig b/lib/Kconfig index 809fdd155739..e34b04b56057 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -405,7 +405,7 @@ config ASSOCIATIVE_ARRAY See: - Documentation/assoc_array.txt + Documentation/core-api/assoc_array.rst for more information. diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 9a46dc24ac10..2b5ee5fbd652 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4728,7 +4728,7 @@ err_af: } /* This supports connect(2) and SCTP connect services such as sctp_connectx(3) - * and sctp_sendmsg(3) as described in Documentation/security/LSM-sctp.txt + * and sctp_sendmsg(3) as described in Documentation/security/LSM-sctp.rst */ static int selinux_socket_connect_helper(struct socket *sock, struct sockaddr *address, int addrlen) diff --git a/sound/core/Kconfig b/sound/core/Kconfig index 6e937a8146a1..63b3ef9c83f5 100644 --- a/sound/core/Kconfig +++ b/sound/core/Kconfig @@ -48,7 +48,7 @@ config SND_MIXER_OSS depends on SND_OSSEMUL help To enable OSS mixer API emulation (/dev/mixer*), say Y here - and read . + and read . Many programs still use the OSS API, so say Y. @@ -61,7 +61,7 @@ config SND_PCM_OSS select SND_PCM help To enable OSS digital audio (PCM) emulation (/dev/dsp*), say Y - here and read . + here and read . Many programs still use the OSS API, so say Y. diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig index 7144cc36e8ae..648a12da44f9 100644 --- a/sound/drivers/Kconfig +++ b/sound/drivers/Kconfig @@ -153,7 +153,7 @@ config SND_SERIAL_U16550 select SND_RAWMIDI help To include support for MIDI serial port interfaces, say Y here - and read . + and read . This driver works with serial UARTs 16550 and better. This driver accesses the serial port hardware directly, so @@ -223,7 +223,7 @@ config SND_AC97_POWER_SAVE the device frequently. A value of 10 seconds would be a good choice for normal operations. - See Documentation/sound/alsa/powersave.txt for more details. + See Documentation/sound/designs/powersave.rst for more details. config SND_AC97_POWER_SAVE_DEFAULT int "Default time-out for AC97 power-save mode" diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index d9f3fdb777e4..4105d9f653d9 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig @@ -175,7 +175,7 @@ config SND_BT87X help If you want to record audio from TV cards based on Brooktree Bt878/Bt879 chips, say Y here and read - . + . To compile this driver as a module, choose M here: the module will be called snd-bt87x. @@ -210,7 +210,7 @@ config SND_CMIPCI help If you want to use soundcards based on C-Media CMI8338, CMI8738, CMI8768 or CMI8770 chips, say Y here and read - . + . To compile this driver as a module, choose M here: the module will be called snd-cmipci. @@ -472,8 +472,8 @@ config SND_EMU10K1 Audigy and E-mu APS (partially supported) soundcards. The confusing multitude of mixer controls is documented in - and - . + and + . To compile this driver as a module, choose M here: the module will be called snd-emu10k1. @@ -735,7 +735,7 @@ config SND_MIXART select SND_PCM help If you want to use Digigram miXart soundcards, say Y here and - read . + read . To compile this driver as a module, choose M here: the module will be called snd-mixart. diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index db9f15f5db04..c0d7ea0bf5b6 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -170,7 +170,7 @@ struct prctl_mm_map { * asking selinux for a specific new context (e.g. with runcon) will result * in execve returning -EPERM. * - * See Documentation/prctl/no_new_privs.txt for more details. + * See Documentation/userspace-api/no_new_privs.rst for more details. */ #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 6a12bbf39f7b..7aba8243a0e7 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -201,7 +201,7 @@ static void mem_toupper(char *f, size_t len) /* * Check for "NAME_PATH" environment variable to override fs location (for - * testing). This matches the recommendation in Documentation/sysfs-rules.txt + * testing). This matches the recommendation in Documentation/admin-guide/sysfs-rules.rst * for SYSFS_PATH. */ static bool fs__env_override(struct fs *fs) diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c index 29347756b0af..77e4891e17b0 100644 --- a/tools/perf/util/bpf-prologue.c +++ b/tools/perf/util/bpf-prologue.c @@ -61,7 +61,7 @@ check_pos(struct bpf_insn_pos *pos) /* * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see - * Documentation/trace/kprobetrace.txt) to size field of BPF_LDX_MEM + * Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM * instruction (BPF_{B,H,W,DW}). */ static int diff --git a/tools/power/pm-graph/config/custom-timeline-functions.cfg b/tools/power/pm-graph/config/custom-timeline-functions.cfg index 4f80ad7d7275..f8fcb06fd68b 100644 --- a/tools/power/pm-graph/config/custom-timeline-functions.cfg +++ b/tools/power/pm-graph/config/custom-timeline-functions.cfg @@ -105,7 +105,7 @@ override-dev-timeline-functions: true # example: [color=#CC00CC] # # arglist: A list of arguments from registers/stack addresses. See URL: -# https://www.kernel.org/doc/Documentation/trace/kprobetrace.txt +# https://www.kernel.org/doc/Documentation/trace/kprobetrace.rst # # example: cpu=%di:s32 # @@ -170,7 +170,7 @@ pm_restore_console: # example: [color=#CC00CC] # # arglist: A list of arguments from registers/stack addresses. See URL: -# https://www.kernel.org/doc/Documentation/trace/kprobetrace.txt +# https://www.kernel.org/doc/Documentation/trace/kprobetrace.rst # # example: port=+36(%di):s32 # -- cgit v1.2.3 From 34962fb8070cb5a60b686a5ee11f81f2978836bd Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 8 May 2018 15:14:57 -0300 Subject: docs: Fix more broken references As we move stuff around, some doc references are broken. Fix some of them via this script: ./scripts/documentation-file-ref-check --fix Manually checked that produced results are valid. Acked-by: Matthias Brugger Acked-by: Takashi Iwai Acked-by: Jeff Kirsher Acked-by: Guenter Roeck Acked-by: Miguel Ojeda Signed-off-by: Mauro Carvalho Chehab Acked-by: Jonathan Corbet --- .../devicetree/bindings/clock/st/st,clkgen.txt | 8 ++++---- Documentation/devicetree/bindings/clock/ti/gate.txt | 2 +- .../devicetree/bindings/clock/ti/interface.txt | 2 +- .../devicetree/bindings/cpufreq/cpufreq-mediatek.txt | 2 +- .../devicetree/bindings/devfreq/rk3399_dmc.txt | 2 +- .../devicetree/bindings/gpu/arm,mali-midgard.txt | 2 +- .../devicetree/bindings/gpu/arm,mali-utgard.txt | 2 +- Documentation/devicetree/bindings/mfd/mt6397.txt | 2 +- Documentation/devicetree/bindings/mfd/sun6i-prcm.txt | 2 +- .../devicetree/bindings/mmc/exynos-dw-mshc.txt | 2 +- Documentation/devicetree/bindings/net/dsa/ksz.txt | 2 +- Documentation/devicetree/bindings/net/dsa/mt7530.txt | 2 +- .../devicetree/bindings/power/fsl,imx-gpc.txt | 2 +- .../devicetree/bindings/power/wakeup-source.txt | 2 +- .../devicetree/bindings/usb/rockchip,dwc3.txt | 2 +- Documentation/hwmon/ina2xx | 2 +- Documentation/maintainer/pull-requests.rst | 2 +- Documentation/translations/ko_KR/howto.rst | 2 +- MAINTAINERS | 20 ++++++++++---------- drivers/net/ethernet/intel/Kconfig | 8 ++++---- drivers/soundwire/stream.c | 8 ++++---- fs/Kconfig.binfmt | 2 +- fs/binfmt_misc.c | 2 +- 23 files changed, 41 insertions(+), 41 deletions(-) (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt index 7364953d0d0b..45ac19bfa0a9 100644 --- a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt +++ b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt @@ -31,10 +31,10 @@ This binding uses the common clock binding[1]. Each subnode should use the binding described in [2]..[7] [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[3] Documentation/devicetree/bindings/clock/st,clkgen-mux.txt -[4] Documentation/devicetree/bindings/clock/st,clkgen-pll.txt -[7] Documentation/devicetree/bindings/clock/st,quadfs.txt -[8] Documentation/devicetree/bindings/clock/st,flexgen.txt +[3] Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt +[4] Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt +[7] Documentation/devicetree/bindings/clock/st/st,quadfs.txt +[8] Documentation/devicetree/bindings/clock/st/st,flexgen.txt Required properties: diff --git a/Documentation/devicetree/bindings/clock/ti/gate.txt b/Documentation/devicetree/bindings/clock/ti/gate.txt index 03f8fdee62a7..56d603c1f716 100644 --- a/Documentation/devicetree/bindings/clock/ti/gate.txt +++ b/Documentation/devicetree/bindings/clock/ti/gate.txt @@ -10,7 +10,7 @@ will be controlled instead and the corresponding hw-ops for that is used. [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[2] Documentation/devicetree/bindings/clock/gate-clock.txt +[2] Documentation/devicetree/bindings/clock/gpio-gate-clock.txt [3] Documentation/devicetree/bindings/clock/ti/clockdomain.txt Required properties: diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt index 3111a409fea6..3f4704040140 100644 --- a/Documentation/devicetree/bindings/clock/ti/interface.txt +++ b/Documentation/devicetree/bindings/clock/ti/interface.txt @@ -9,7 +9,7 @@ companion clock finding (match corresponding functional gate clock) and hardware autoidle enable / disable. [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[2] Documentation/devicetree/bindings/clock/gate-clock.txt +[2] Documentation/devicetree/bindings/clock/gpio-gate-clock.txt Required properties: - compatible : shall be one of: diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt index d36f07e0a2bb..0551c78619de 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt @@ -8,7 +8,7 @@ Required properties: "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock source (usually MAINPLL) when the original CPU PLL is under transition and not stable yet. - Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for + Please refer to Documentation/devicetree/bindings/clock/clock-bindings.txt for generic clock consumer properties. - operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp.txt for detail. diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt index d6d2833482c9..fc2bcbe26b1e 100644 --- a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt +++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt @@ -12,7 +12,7 @@ Required properties: - clocks: Phandles for clock specified in "clock-names" property - clock-names : The name of clock used by the DFI, must be "pclk_ddr_mon"; -- operating-points-v2: Refer to Documentation/devicetree/bindings/power/opp.txt +- operating-points-v2: Refer to Documentation/devicetree/bindings/opp/opp.txt for details. - center-supply: DMC supply node. - status: Marks the node enabled/disabled. diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt index 039219df05c5..18a2cde2e5f3 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt @@ -34,7 +34,7 @@ Optional properties: - mali-supply : Phandle to regulator for the Mali device. Refer to Documentation/devicetree/bindings/regulator/regulator.txt for details. -- operating-points-v2 : Refer to Documentation/devicetree/bindings/power/opp.txt +- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt for details. diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt index c1f65d1dac1d..63cd91176a68 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt @@ -44,7 +44,7 @@ Optional properties: - memory-region: Memory region to allocate from, as defined in - Documentation/devicetree/bindi/reserved-memory/reserved-memory.txt + Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt - mali-supply: Phandle to regulator for the Mali device, as defined in diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt index d1df77f4d655..0ebd08af777d 100644 --- a/Documentation/devicetree/bindings/mfd/mt6397.txt +++ b/Documentation/devicetree/bindings/mfd/mt6397.txt @@ -12,7 +12,7 @@ MT6397/MT6323 is a multifunction device with the following sub modules: It is interfaced to host controller using SPI interface by a proprietary hardware called PMIC wrapper or pwrap. MT6397/MT6323 MFD is a child device of pwrap. See the following for pwarp node definitions: -Documentation/devicetree/bindings/soc/pwrap.txt +Documentation/devicetree/bindings/soc/mediatek/pwrap.txt This document describes the binding for MFD device and its sub module. diff --git a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt index dd2c06540485..4d21ffdb0fc1 100644 --- a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt +++ b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt @@ -9,7 +9,7 @@ Required properties: The prcm node may contain several subdevices definitions: - see Documentation/devicetree/clk/sunxi.txt for clock devices - - see Documentation/devicetree/reset/allwinner,sunxi-clock-reset.txt for reset + - see Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt for reset controller devices diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt index a58c173b7ab9..0419a63f73a0 100644 --- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt @@ -62,7 +62,7 @@ Required properties for a slot (Deprecated - Recommend to use one slot per host) rest of the gpios (depending on the bus-width property) are the data lines in no particular order. The format of the gpio specifier depends on the gpio controller. -(Deprecated - Refer to Documentation/devicetree/binding/pinctrl/samsung-pinctrl.txt) +(Deprecated - Refer to Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt) Example: diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index fd23904ac68e..a700943218ca 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -6,7 +6,7 @@ Required properties: - compatible: For external switch chips, compatible string must be exactly one of: "microchip,ksz9477" -See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional +See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional required and optional properties. Examples: diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt index a9bc27b93ee3..aa3527f71fdc 100644 --- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt +++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt @@ -31,7 +31,7 @@ Required properties for the child nodes within ports container: - phy-mode: String, must be either "trgmii" or "rgmii" for port labeled "cpu". -See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional +See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional required, optional properties and how the integrated switch subnodes must be specified. diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt index b31d6bbeee16..726ec2875223 100644 --- a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt +++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt @@ -14,7 +14,7 @@ Required properties: datasheet - interrupts: Should contain one interrupt specifier for the GPC interrupt - clocks: Must contain an entry for each entry in clock-names. - See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. + See Documentation/devicetree/bindings/clock/clock-bindings.txt for details. - clock-names: Must include the following entries: - ipg diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt index 5d254ab13ebf..cfd74659fbed 100644 --- a/Documentation/devicetree/bindings/power/wakeup-source.txt +++ b/Documentation/devicetree/bindings/power/wakeup-source.txt @@ -22,7 +22,7 @@ List of legacy properties and respective binding document 3. "has-tpo" Documentation/devicetree/bindings/rtc/rtc-opal.txt 4. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt Documentation/devicetree/bindings/mfd/tc3589x.txt - Documentation/devicetree/bindings/input/ads7846.txt + Documentation/devicetree/bindings/input/touchscreen/ads7846.txt 5. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt 6. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung-keypad.txt 7. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt index 50a31536e975..252a05c5d976 100644 --- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt +++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt @@ -16,7 +16,7 @@ A child node must exist to represent the core DWC3 IP block. The name of the node is not important. The content of the node is defined in dwc3.txt. Phy documentation is provided in the following places: -Documentation/devicetree/bindings/phy/rockchip,dwc3-usb-phy.txt +Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt Example device nodes: diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index cfd31d94c872..72d16f08e431 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -53,7 +53,7 @@ bus supply voltage. The shunt value in micro-ohms can be set via platform data or device tree at compile-time or via the shunt_resistor attribute in sysfs at run-time. Please -refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings +refer to the Documentation/devicetree/bindings/hwmon/ina2xx.txt for bindings if the device tree is used. Additionally ina226 supports update_interval attribute as described in diff --git a/Documentation/maintainer/pull-requests.rst b/Documentation/maintainer/pull-requests.rst index a19db3458b56..22b271de0304 100644 --- a/Documentation/maintainer/pull-requests.rst +++ b/Documentation/maintainer/pull-requests.rst @@ -41,7 +41,7 @@ named ``char-misc-next``, you would be using the following command:: that will create a signed tag called ``char-misc-4.15-rc1`` based on the last commit in the ``char-misc-next`` branch, and sign it with your gpg key -(see :ref:`Documentation/maintainer/configure_git.rst `). +(see :ref:`Documentation/maintainer/configure-git.rst `). Linus will only accept pull requests based on a signed tag. Other maintainers may differ. diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst index 624654bdcd8a..a8197e072599 100644 --- a/Documentation/translations/ko_KR/howto.rst +++ b/Documentation/translations/ko_KR/howto.rst @@ -160,7 +160,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다. 독특한 행동에 관하여 흔히 있는 오해들과 혼란들을 해소하고 있기 때문이다. - :ref:`Documentation/process/stable_kernel_rules.rst ` + :ref:`Documentation/process/stable-kernel-rules.rst ` 이 문서는 안정적인 커널 배포가 이루어지는 규칙을 설명하고 있으며 여러분들이 이러한 배포들 중 하나에 변경을 하길 원한다면 무엇을 해야 하는지를 설명한다. diff --git a/MAINTAINERS b/MAINTAINERS index 09554034be46..5871dd5060f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4513,7 +4513,7 @@ DRM DRIVER FOR ILITEK ILI9225 PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/ili9225.c -F: Documentation/devicetree/bindings/display/ili9225.txt +F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt DRM DRIVER FOR INTEL I810 VIDEO CARDS S: Orphan / Obsolete @@ -4599,13 +4599,13 @@ DRM DRIVER FOR SITRONIX ST7586 PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/st7586.c -F: Documentation/devicetree/bindings/display/st7586.txt +F: Documentation/devicetree/bindings/display/sitronix,st7586.txt DRM DRIVER FOR SITRONIX ST7735R PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/st7735r.c -F: Documentation/devicetree/bindings/display/st7735r.txt +F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt DRM DRIVER FOR TDFX VIDEO CARDS S: Orphan / Obsolete @@ -4824,7 +4824,7 @@ M: Eric Anholt S: Supported F: drivers/gpu/drm/v3d/ F: include/uapi/drm/v3d_drm.h -F: Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt +F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR VC4 @@ -5735,7 +5735,7 @@ M: Madalin Bucur L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/fman -F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt +F: Documentation/devicetree/bindings/net/fsl-fman.txt FREESCALE QORIQ PTP CLOCK DRIVER M: Yangbo Lu @@ -7425,7 +7425,7 @@ M: Linus Walleij L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/gyro/mpu3050* -F: Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt +F: Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt IOC3 ETHERNET DRIVER M: Ralf Baechle @@ -8700,7 +8700,7 @@ M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/max6697 -F: Documentation/devicetree/bindings/i2c/max6697.txt +F: Documentation/devicetree/bindings/hwmon/max6697.txt F: drivers/hwmon/max6697.c F: include/linux/platform_data/max6697.h @@ -9080,7 +9080,7 @@ M: Martin Donnelly M: Martyn Welch S: Maintained F: drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c -F: Documentation/devicetree/bindings/video/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt +F: Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt MEGARAID SCSI/SAS DRIVERS M: Kashyap Desai @@ -10728,7 +10728,7 @@ PARALLEL LCD/KEYPAD PANEL DRIVER M: Willy Tarreau M: Ksenija Stanojevic S: Odd Fixes -F: Documentation/misc-devices/lcd-panel-cgram.txt +F: Documentation/auxdisplay/lcd-panel-cgram.txt F: drivers/misc/panel.c PARALLEL PORT SUBSYSTEM @@ -13291,7 +13291,7 @@ M: Vinod Koul L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Supported -F: Documentation/sound/alsa/compress_offload.txt +F: Documentation/sound/designs/compress-offload.rst F: include/sound/compress_driver.h F: include/uapi/sound/compress_* F: sound/core/compress_offload.c diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 14d287bed33c..1ab613eb5796 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -33,7 +33,7 @@ config E100 to identify the adapter. More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called e100. @@ -49,7 +49,7 @@ config E1000 More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called e1000. @@ -94,7 +94,7 @@ config IGB More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called igb. @@ -130,7 +130,7 @@ config IGBVF More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called igbvf. diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 8974a0fcda1b..4b5e250e8615 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1291,7 +1291,7 @@ restore_params: * * @stream: Soundwire stream * - * Documentation/soundwire/stream.txt explains this API in detail + * Documentation/driver-api/soundwire/stream.rst explains this API in detail */ int sdw_prepare_stream(struct sdw_stream_runtime *stream) { @@ -1348,7 +1348,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream) * * @stream: Soundwire stream * - * Documentation/soundwire/stream.txt explains this API in detail + * Documentation/driver-api/soundwire/stream.rst explains this API in detail */ int sdw_enable_stream(struct sdw_stream_runtime *stream) { @@ -1400,7 +1400,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream) * * @stream: Soundwire stream * - * Documentation/soundwire/stream.txt explains this API in detail + * Documentation/driver-api/soundwire/stream.rst explains this API in detail */ int sdw_disable_stream(struct sdw_stream_runtime *stream) { @@ -1456,7 +1456,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream) * * @stream: Soundwire stream * - * Documentation/soundwire/stream.txt explains this API in detail + * Documentation/driver-api/soundwire/stream.rst explains this API in detail */ int sdw_deprepare_stream(struct sdw_stream_runtime *stream) { diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 57a27c42b5ac..56df483de619 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -168,7 +168,7 @@ config BINFMT_MISC will automatically feed it to the correct interpreter. You can do other nice things, too. Read the file - to learn how to use this + to learn how to use this feature, for information about how to include Java support. and for information about how to include Mono-based .NET support. diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 4de191563261..4b5fff31ef27 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -4,7 +4,7 @@ * Copyright (C) 1997 Richard Günther * * binfmt_misc detects binaries via a magic or filename extension and invokes - * a specified wrapper. See Documentation/binfmt_misc.txt for more details. + * a specified wrapper. See Documentation/admin-guide/binfmt-misc.rst for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -- cgit v1.2.3 From 2f635cff0e902a9e70e1039e471645c9167bb398 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Thu, 14 Jun 2018 08:01:00 -0300 Subject: MAINTAINERS: fix location of some display DT bindings Those files got a manufacturer's name prepended and were moved around. Adjust their references accordingly. Also, due those movements, Documentation/devicetree/bindings/video doesn't exist anymore. Cc: David Airlie Cc: David Lechner Cc: Peter Senna Tschudin Cc: Martin Donnelly Cc: Martyn Welch Cc: Stefan Agner Cc: Alison Wang Cc: Eric Anholt Signed-off-by: Mauro Carvalho Chehab Acked-by: Jonathan Corbet --- MAINTAINERS | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5871dd5060f6..43eee2fb8798 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4638,7 +4638,6 @@ F: drivers/gpu/drm/ F: drivers/gpu/vga/ F: Documentation/devicetree/bindings/display/ F: Documentation/devicetree/bindings/gpu/ -F: Documentation/devicetree/bindings/video/ F: Documentation/gpu/ F: include/drm/ F: include/uapi/drm/ @@ -4683,7 +4682,7 @@ M: Boris Brezillon L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/atmel-hlcdc/ -F: Documentation/devicetree/bindings/drm/atmel/ +F: Documentation/devicetree/bindings/display/atmel/ T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR BRIDGE CHIPS @@ -4714,7 +4713,7 @@ S: Supported F: drivers/gpu/drm/fsl-dcu/ F: Documentation/devicetree/bindings/display/fsl,dcu.txt F: Documentation/devicetree/bindings/display/fsl,tcon.txt -F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19_05b.txt +F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt DRM DRIVERS FOR FREESCALE IMX M: Philipp Zabel -- cgit v1.2.3 From 5efa6f8473cc50299c0bdd68280a472993cc5c32 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Thu, 14 Jun 2018 08:59:37 -0300 Subject: MAINTAINERS: fix location of DT npcm files The specified locations are not right. Fix the wildcard logic to point to the correct directories. Without that, get-maintainer won't get things right: $ ./scripts/get_maintainer.pl --no-git-fallback --no-r --no-n --no-l -f Documentation/devicetree/bindings/arm/cpu-enable-method/nuvoton,npcm750-smp robh+dt@kernel.org (maintainer:OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS) mark.rutland@arm.com (maintainer:OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS) After the patch, it will properly point to NPCM arch maintainers: $ ./scripts/get_maintainer.pl --no-git-fallback --no-r --no-n --no-l -f Documentation/devicetree/bindings/arm/cpu-enable-method/nuvoton,npcm750-smp avifishman70@gmail.com (supporter:ARM/NUVOTON NPCM ARCHITECTURE) tmaimon77@gmail.com (supporter:ARM/NUVOTON NPCM ARCHITECTURE) robh+dt@kernel.org (maintainer:OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS) mark.rutland@arm.com (maintainer:OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS) Cc: Avi Fishman Cc: Tomer Maimon Cc: Patrick Venture Cc: Nancy Yuen Cc: Brendan Higgins Signed-off-by: Mauro Carvalho Chehab Acked-by: Jonathan Corbet --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 43eee2fb8798..783ce44b3e7a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1732,7 +1732,8 @@ F: arch/arm/mach-npcm/ F: arch/arm/boot/dts/nuvoton-npcm* F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h F: drivers/*/*npcm* -F: Documentation/*/*npcm* +F: Documentation/devicetree/bindings/*/*npcm* +F: Documentation/devicetree/bindings/*/*/*npcm* ARM/NUVOTON W90X900 ARM ARCHITECTURE M: Wan ZongShun -- cgit v1.2.3 From e5ca4259b8d51b702de736ee6b6355a655c7095d Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Thu, 14 Jun 2018 12:30:30 -0300 Subject: devicetree: fix a series of wrong file references As files got renamed, their references broke. Manually fix a series of broken refs at the DT bindings. Signed-off-by: Mauro Carvalho Chehab Acked-by: Jonathan Corbet --- .../devicetree/bindings/input/rmi4/rmi_2d_sensor.txt | 2 +- Documentation/devicetree/bindings/mfd/sun6i-prcm.txt | 2 +- Documentation/devicetree/bindings/pci/hisilicon-pcie.txt | 2 +- Documentation/devicetree/bindings/pci/kirin-pcie.txt | 2 +- Documentation/devicetree/bindings/pci/pci-keystone.txt | 4 ++-- Documentation/devicetree/bindings/sound/st,stm32-i2s.txt | 2 +- Documentation/devicetree/bindings/sound/st,stm32-sai.txt | 2 +- MAINTAINERS | 12 ++++++------ 8 files changed, 14 insertions(+), 14 deletions(-) (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt b/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt index f2c30c8b725d..9afffbdf6e28 100644 --- a/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt +++ b/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt @@ -12,7 +12,7 @@ Additional documentation for F11 can be found at: http://www.synaptics.com/sites/default/files/511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf Optional Touch Properties: -Description in Documentation/devicetree/bindings/input/touch +Description in Documentation/devicetree/bindings/input/touchscreen - touchscreen-inverted-x - touchscreen-inverted-y - touchscreen-swapped-x-y diff --git a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt index 4d21ffdb0fc1..daa091c2e67b 100644 --- a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt +++ b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt @@ -8,7 +8,7 @@ Required properties: - reg: The PRCM registers range The prcm node may contain several subdevices definitions: - - see Documentation/devicetree/clk/sunxi.txt for clock devices + - see Documentation/devicetree/bindings/clock/sunxi.txt for clock devices - see Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt for reset controller devices diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt index 7bf9df047a1e..0dcb87d6554f 100644 --- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt +++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt @@ -3,7 +3,7 @@ HiSilicon Hip05 and Hip06 PCIe host bridge DT description HiSilicon PCIe host controller is based on the Synopsys DesignWare PCI core. It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in -Documentation/devicetree/bindings/pci/designware-pci.txt. +Documentation/devicetree/bindings/pci/designware-pcie.txt. Additional properties are described here: diff --git a/Documentation/devicetree/bindings/pci/kirin-pcie.txt b/Documentation/devicetree/bindings/pci/kirin-pcie.txt index 6e217c63123d..6bbe43818ad5 100644 --- a/Documentation/devicetree/bindings/pci/kirin-pcie.txt +++ b/Documentation/devicetree/bindings/pci/kirin-pcie.txt @@ -3,7 +3,7 @@ HiSilicon Kirin SoCs PCIe host DT description Kirin PCIe host controller is based on the Synopsys DesignWare PCI core. It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in -Documentation/devicetree/bindings/pci/designware-pci.txt. +Documentation/devicetree/bindings/pci/designware-pcie.txt. Additional properties are described here: diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt index 7e05487544ed..3d4a209b0fd0 100644 --- a/Documentation/devicetree/bindings/pci/pci-keystone.txt +++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt @@ -3,9 +3,9 @@ TI Keystone PCIe interface Keystone PCI host Controller is based on the Synopsys DesignWare PCI hardware version 3.65. It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in -Documentation/devicetree/bindings/pci/designware-pci.txt +Documentation/devicetree/bindings/pci/designware-pcie.txt -Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt +Please refer to Documentation/devicetree/bindings/pci/designware-pcie.txt for the details of DesignWare DT bindings. Additional properties are described here as well as properties that are not applicable. diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt index 4bda52042402..58c341300552 100644 --- a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt +++ b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt @@ -18,7 +18,7 @@ Required properties: See Documentation/devicetree/bindings/dma/stm32-dma.txt. - dma-names: Identifier for each DMA request line. Must be "tx" and "rx". - pinctrl-names: should contain only value "default" - - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt + - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt Optional properties: - resets: Reference to a reset controller asserting the reset controller diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt index f301cdf0b7e6..3a3fc506e43a 100644 --- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt +++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt @@ -37,7 +37,7 @@ SAI subnodes required properties: "tx": if sai sub-block is configured as playback DAI "rx": if sai sub-block is configured as capture DAI - pinctrl-names: should contain only value "default" - - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt + - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt SAI subnodes Optional properties: - st,sync: specify synchronization mode. diff --git a/MAINTAINERS b/MAINTAINERS index 783ce44b3e7a..fd3fc63f2759 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6966,7 +6966,7 @@ IIO MULTIPLEXER M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/iio/multiplexer/iio-mux.txt +F: Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt F: drivers/iio/multiplexer/iio-mux.c IIO SUBSYSTEM AND DRIVERS @@ -9696,7 +9696,7 @@ MXSFB DRM DRIVER M: Marek Vasut S: Supported F: drivers/gpu/drm/mxsfb/ -F: Documentation/devicetree/bindings/display/mxsfb-drm.txt +F: Documentation/devicetree/bindings/display/mxsfb.txt MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) M: Chris Lee @@ -10885,7 +10885,7 @@ M: Will Deacon L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: Documentation/devicetree/bindings/pci/controller-generic-pci.txt +F: Documentation/devicetree/bindings/pci/host-generic-pci.txt F: drivers/pci/controller/pci-host-common.c F: drivers/pci/controller/pci-host-generic.c @@ -11066,7 +11066,7 @@ M: Xiaowei Song M: Binghui Wang L: linux-pci@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/pci/pcie-kirin.txt +F: Documentation/devicetree/bindings/pci/kirin-pcie.txt F: drivers/pci/controller/dwc/pcie-kirin.c PCIE DRIVER FOR HISILICON STB @@ -12457,7 +12457,7 @@ L: linux-crypto@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/crypto/exynos-rng.c -F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt +F: Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER M: Łukasz Stelmach @@ -13571,7 +13571,7 @@ F: drivers/*/stm32-*timer* F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* F: Documentation/ABI/testing/*timer-stm32 -F: Documentation/devicetree/bindings/*/stm32-*timer +F: Documentation/devicetree/bindings/*/stm32-*timer* F: Documentation/devicetree/bindings/pwm/pwm-stm32* STMMAC ETHERNET DRIVER -- cgit v1.2.3